From 17db4cb9703bcaac3a18ff1c2bbc43c57af9d6b6 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Thu, 1 Feb 2024 17:22:52 +0100 Subject: [PATCH 001/581] refact "cscli machines" (#2777) --- cmd/crowdsec-cli/bouncers.go | 5 +- cmd/crowdsec-cli/flag.go | 28 +++ cmd/crowdsec-cli/items.go | 2 - cmd/crowdsec-cli/machines.go | 420 ++++++++++++++++++----------------- cmd/crowdsec-cli/main.go | 22 +- cmd/crowdsec-cli/support.go | 5 +- test/bats/30_machines.bats | 11 +- 7 files changed, 268 insertions(+), 225 deletions(-) create mode 100644 cmd/crowdsec-cli/flag.go diff --git a/cmd/crowdsec-cli/bouncers.go b/cmd/crowdsec-cli/bouncers.go index 410827b3159..d2685901ebb 100644 --- a/cmd/crowdsec-cli/bouncers.go +++ b/cmd/crowdsec-cli/bouncers.go @@ -16,7 +16,6 @@ import ( "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/require" middlewares "github.com/crowdsecurity/crowdsec/pkg/apiserver/middlewares/v1" - "github.com/crowdsecurity/crowdsec/pkg/csconfig" "github.com/crowdsecurity/crowdsec/pkg/database" "github.com/crowdsecurity/crowdsec/pkg/types" ) @@ -38,10 +37,10 @@ func askYesNo(message string, defaultAnswer bool) (bool, error) { type cliBouncers struct { db *database.Client - cfg func() *csconfig.Config + cfg configGetter } -func NewCLIBouncers(getconfig func() *csconfig.Config) *cliBouncers { +func NewCLIBouncers(getconfig configGetter) *cliBouncers { return &cliBouncers{ cfg: getconfig, } diff --git a/cmd/crowdsec-cli/flag.go b/cmd/crowdsec-cli/flag.go new file mode 100644 index 00000000000..402302a1f64 --- /dev/null +++ b/cmd/crowdsec-cli/flag.go @@ -0,0 +1,28 @@ +package main + +// Custom types for flag validation and conversion. + +import ( + "errors" +) + +type MachinePassword string + +func (p *MachinePassword) String() string { + return string(*p) +} + +func (p *MachinePassword) Set(v string) error { + // a password can't be more than 72 characters + // due to bcrypt limitations + if len(v) > 72 { + return errors.New("password too long (max 72 characters)") + } + *p = MachinePassword(v) + + return nil +} + +func (p *MachinePassword) Type() string { + return "string" +} diff --git a/cmd/crowdsec-cli/items.go b/cmd/crowdsec-cli/items.go index a1d079747fa..851be553f15 100644 --- a/cmd/crowdsec-cli/items.go +++ b/cmd/crowdsec-cli/items.go @@ -138,8 +138,6 @@ func listItems(out io.Writer, itemTypes []string, items map[string][]*cwhub.Item } csvwriter.Flush() - default: - return fmt.Errorf("unknown output format '%s'", csConfig.Cscli.Output) } return nil diff --git a/cmd/crowdsec-cli/machines.go b/cmd/crowdsec-cli/machines.go index 581683baa8f..0cabccf76f5 100644 --- a/cmd/crowdsec-cli/machines.go +++ b/cmd/crowdsec-cli/machines.go @@ -5,7 +5,6 @@ import ( "encoding/csv" "encoding/json" "fmt" - "io" "math/big" "os" "strings" @@ -101,85 +100,97 @@ func getLastHeartbeat(m *ent.Machine) (string, bool) { return hb, true } -func getAgents(out io.Writer, dbClient *database.Client) error { - machines, err := dbClient.ListMachines() +type cliMachines struct{ + db *database.Client + cfg configGetter +} + +func NewCLIMachines(getconfig configGetter) *cliMachines { + return &cliMachines{ + cfg: getconfig, + } +} + +func (cli *cliMachines) NewCommand() *cobra.Command { + cmd := &cobra.Command{ + Use: "machines [action]", + Short: "Manage local API machines [requires local API]", + Long: `To list/add/delete/validate/prune machines. +Note: This command requires database direct access, so is intended to be run on the local API machine. +`, + Example: `cscli machines [action]`, + DisableAutoGenTag: true, + Aliases: []string{"machine"}, + PersistentPreRunE: func(_ *cobra.Command, _ []string) error { + var err error + if err = require.LAPI(cli.cfg()); err != nil { + return err + } + cli.db, err = database.NewClient(cli.cfg().DbConfig) + if err != nil { + return fmt.Errorf("unable to create new database client: %s", err) + } + return nil + }, + } + + cmd.AddCommand(cli.newListCmd()) + cmd.AddCommand(cli.newAddCmd()) + cmd.AddCommand(cli.newDeleteCmd()) + cmd.AddCommand(cli.newValidateCmd()) + cmd.AddCommand(cli.newPruneCmd()) + + return cmd +} + +func (cli *cliMachines) list() error { + out := color.Output + + machines, err := cli.db.ListMachines() if err != nil { return fmt.Errorf("unable to list machines: %s", err) } - switch csConfig.Cscli.Output { + switch cli.cfg().Cscli.Output { case "human": getAgentsTable(out, machines) case "json": enc := json.NewEncoder(out) enc.SetIndent("", " ") + if err := enc.Encode(machines); err != nil { return fmt.Errorf("failed to marshal") } + return nil case "raw": csvwriter := csv.NewWriter(out) + err := csvwriter.Write([]string{"machine_id", "ip_address", "updated_at", "validated", "version", "auth_type", "last_heartbeat"}) if err != nil { return fmt.Errorf("failed to write header: %s", err) } + for _, m := range machines { validated := "false" if m.IsValidated { validated = "true" } + hb, _ := getLastHeartbeat(m) - err := csvwriter.Write([]string{m.MachineId, m.IpAddress, m.UpdatedAt.Format(time.RFC3339), validated, m.Version, m.AuthType, hb}) - if err != nil { + + if err := csvwriter.Write([]string{m.MachineId, m.IpAddress, m.UpdatedAt.Format(time.RFC3339), validated, m.Version, m.AuthType, hb}); err != nil { return fmt.Errorf("failed to write raw output: %w", err) } } - csvwriter.Flush() - default: - return fmt.Errorf("unknown output '%s'", csConfig.Cscli.Output) - } - return nil -} - -type cliMachines struct{} - -func NewCLIMachines() *cliMachines { - return &cliMachines{} -} -func (cli cliMachines) NewCommand() *cobra.Command { - cmd := &cobra.Command{ - Use: "machines [action]", - Short: "Manage local API machines [requires local API]", - Long: `To list/add/delete/validate/prune machines. -Note: This command requires database direct access, so is intended to be run on the local API machine. -`, - Example: `cscli machines [action]`, - DisableAutoGenTag: true, - Aliases: []string{"machine"}, - PersistentPreRunE: func(_ *cobra.Command, _ []string) error { - var err error - if err = require.LAPI(csConfig); err != nil { - return err - } - dbClient, err = database.NewClient(csConfig.DbConfig) - if err != nil { - return fmt.Errorf("unable to create new database client: %s", err) - } - return nil - }, + csvwriter.Flush() } - cmd.AddCommand(cli.NewListCmd()) - cmd.AddCommand(cli.NewAddCmd()) - cmd.AddCommand(cli.NewDeleteCmd()) - cmd.AddCommand(cli.NewValidateCmd()) - cmd.AddCommand(cli.NewPruneCmd()) - - return cmd + return nil } -func (cli cliMachines) NewListCmd() *cobra.Command { +func (cli *cliMachines) newListCmd() *cobra.Command { cmd := &cobra.Command{ Use: "list", Short: "list all machines in the database", @@ -188,84 +199,60 @@ func (cli cliMachines) NewListCmd() *cobra.Command { Args: cobra.NoArgs, DisableAutoGenTag: true, RunE: func(_ *cobra.Command, _ []string) error { - err := getAgents(color.Output, dbClient) - if err != nil { - return fmt.Errorf("unable to list machines: %s", err) - } - - return nil + return cli.list() }, } return cmd } -func (cli cliMachines) NewAddCmd() *cobra.Command { +func (cli *cliMachines) newAddCmd() *cobra.Command { + var ( + password MachinePassword + dumpFile string + apiURL string + interactive bool + autoAdd bool + force bool + ) + cmd := &cobra.Command{ Use: "add", Short: "add a single machine to the database", DisableAutoGenTag: true, Long: `Register a new machine in the database. cscli should be on the same machine as LAPI.`, - Example: ` -cscli machines add --auto + Example: `cscli machines add --auto cscli machines add MyTestMachine --auto cscli machines add MyTestMachine --password MyPassword -`, - RunE: cli.add, +cscli machines add -f- --auto > /tmp/mycreds.yaml`, + RunE: func(_ *cobra.Command, args []string) error { + return cli.add(args, string(password), dumpFile, apiURL, interactive, autoAdd, force) + }, } flags := cmd.Flags() - flags.StringP("password", "p", "", "machine password to login to the API") - flags.StringP("file", "f", "", "output file destination (defaults to "+csconfig.DefaultConfigPath("local_api_credentials.yaml")+")") - flags.StringP("url", "u", "", "URL of the local API") - flags.BoolP("interactive", "i", false, "interfactive mode to enter the password") - flags.BoolP("auto", "a", false, "automatically generate password (and username if not provided)") - flags.Bool("force", false, "will force add the machine if it already exist") + flags.VarP(&password, "password", "p", "machine password to login to the API") + flags.StringVarP(&dumpFile, "file", "f", "", "output file destination (defaults to "+csconfig.DefaultConfigPath("local_api_credentials.yaml")+")") + flags.StringVarP(&apiURL, "url", "u", "", "URL of the local API") + flags.BoolVarP(&interactive, "interactive", "i", false, "interfactive mode to enter the password") + flags.BoolVarP(&autoAdd, "auto", "a", false, "automatically generate password (and username if not provided)") + flags.BoolVar(&force, "force", false, "will force add the machine if it already exist") return cmd } -func (cli cliMachines) add(cmd *cobra.Command, args []string) error { - flags := cmd.Flags() - - machinePassword, err := flags.GetString("password") - if err != nil { - return err - } - - dumpFile, err := flags.GetString("file") - if err != nil { - return err - } - - apiURL, err := flags.GetString("url") - if err != nil { - return err - } - - interactive, err := flags.GetBool("interactive") - if err != nil { - return err - } - - autoAdd, err := flags.GetBool("auto") - if err != nil { - return err - } - - force, err := flags.GetBool("force") - if err != nil { - return err - } - - var machineID string +func (cli *cliMachines) add(args []string, machinePassword string, dumpFile string, apiURL string, interactive bool, autoAdd bool, force bool) error { + var ( + err error + machineID string + ) // create machineID if not specified by user if len(args) == 0 { if !autoAdd { - printHelp(cmd) - return nil + return fmt.Errorf("please specify a machine name to add, or use --auto") } + machineID, err = generateID("") if err != nil { return fmt.Errorf("unable to generate machine id: %s", err) @@ -274,15 +261,18 @@ func (cli cliMachines) add(cmd *cobra.Command, args []string) error { machineID = args[0] } + clientCfg := cli.cfg().API.Client + serverCfg := cli.cfg().API.Server + /*check if file already exists*/ - if dumpFile == "" && csConfig.API.Client != nil && csConfig.API.Client.CredentialsFilePath != "" { - credFile := csConfig.API.Client.CredentialsFilePath + if dumpFile == "" && clientCfg != nil && clientCfg.CredentialsFilePath != "" { + credFile := clientCfg.CredentialsFilePath // use the default only if the file does not exist _, err = os.Stat(credFile) switch { case os.IsNotExist(err) || force: - dumpFile = csConfig.API.Client.CredentialsFilePath + dumpFile = credFile case err != nil: return fmt.Errorf("unable to stat '%s': %s", credFile, err) default: @@ -302,49 +292,85 @@ func (cli cliMachines) add(cmd *cobra.Command, args []string) error { machinePassword = generatePassword(passwordLength) } else if machinePassword == "" && interactive { qs := &survey.Password{ - Message: "Please provide a password for the machine", + Message: "Please provide a password for the machine:", } survey.AskOne(qs, &machinePassword) } + password := strfmt.Password(machinePassword) - _, err = dbClient.CreateMachine(&machineID, &password, "", true, force, types.PasswordAuthType) + + _, err = cli.db.CreateMachine(&machineID, &password, "", true, force, types.PasswordAuthType) if err != nil { return fmt.Errorf("unable to create machine: %s", err) } - fmt.Printf("Machine '%s' successfully added to the local API.\n", machineID) + + fmt.Fprintf(os.Stderr, "Machine '%s' successfully added to the local API.\n", machineID) if apiURL == "" { - if csConfig.API.Client != nil && csConfig.API.Client.Credentials != nil && csConfig.API.Client.Credentials.URL != "" { - apiURL = csConfig.API.Client.Credentials.URL - } else if csConfig.API.Server != nil && csConfig.API.Server.ListenURI != "" { - apiURL = "http://" + csConfig.API.Server.ListenURI + if clientCfg != nil && clientCfg.Credentials != nil && clientCfg.Credentials.URL != "" { + apiURL = clientCfg.Credentials.URL + } else if serverCfg != nil && serverCfg.ListenURI != "" { + apiURL = "http://" + serverCfg.ListenURI } else { return fmt.Errorf("unable to dump an api URL. Please provide it in your configuration or with the -u parameter") } } + apiCfg := csconfig.ApiCredentialsCfg{ Login: machineID, Password: password.String(), URL: apiURL, } + apiConfigDump, err := yaml.Marshal(apiCfg) if err != nil { return fmt.Errorf("unable to marshal api credentials: %s", err) } + if dumpFile != "" && dumpFile != "-" { err = os.WriteFile(dumpFile, apiConfigDump, 0o600) if err != nil { return fmt.Errorf("write api credentials in '%s' failed: %s", dumpFile, err) } - fmt.Printf("API credentials written to '%s'.\n", dumpFile) + fmt.Fprintf(os.Stderr, "API credentials written to '%s'.\n", dumpFile) } else { - fmt.Printf("%s\n", string(apiConfigDump)) + fmt.Print(string(apiConfigDump)) } return nil } -func (cli cliMachines) NewDeleteCmd() *cobra.Command { +func (cli *cliMachines) deleteValid(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { + machines, err := cli.db.ListMachines() + if err != nil { + cobra.CompError("unable to list machines " + err.Error()) + } + + ret := []string{} + + for _, machine := range machines { + if strings.Contains(machine.MachineId, toComplete) && !slices.Contains(args, machine.MachineId) { + ret = append(ret, machine.MachineId) + } + } + + return ret, cobra.ShellCompDirectiveNoFileComp +} + +func (cli *cliMachines) delete(machines []string) error { + for _, machineID := range machines { + err := cli.db.DeleteWatcher(machineID) + if err != nil { + log.Errorf("unable to delete machine '%s': %s", machineID, err) + return nil + } + log.Infof("machine '%s' deleted successfully", machineID) + } + + return nil +} + +func (cli *cliMachines) newDeleteCmd() *cobra.Command { cmd := &cobra.Command{ Use: "delete [machine_name]...", Short: "delete machine(s) by name", @@ -352,40 +378,75 @@ func (cli cliMachines) NewDeleteCmd() *cobra.Command { Args: cobra.MinimumNArgs(1), Aliases: []string{"remove"}, DisableAutoGenTag: true, - ValidArgsFunction: func(_ *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { - machines, err := dbClient.ListMachines() - if err != nil { - cobra.CompError("unable to list machines " + err.Error()) - } - ret := make([]string, 0) - for _, machine := range machines { - if strings.Contains(machine.MachineId, toComplete) && !slices.Contains(args, machine.MachineId) { - ret = append(ret, machine.MachineId) - } - } - return ret, cobra.ShellCompDirectiveNoFileComp + ValidArgsFunction: cli.deleteValid, + RunE: func(_ *cobra.Command, args []string) error { + return cli.delete(args) }, - RunE: cli.delete, } return cmd } -func (cli cliMachines) delete(_ *cobra.Command, args []string) error { - for _, machineID := range args { - err := dbClient.DeleteWatcher(machineID) - if err != nil { - log.Errorf("unable to delete machine '%s': %s", machineID, err) +func (cli *cliMachines) prune(duration time.Duration, notValidOnly bool, force bool) error { + if duration < 2*time.Minute && !notValidOnly { + if yes, err := askYesNo( + "The duration you provided is less than 2 minutes. " + + "This can break installations if the machines are only temporarily disconnected. Continue?", false); err != nil { + return err + } else if !yes { + fmt.Println("User aborted prune. No changes were made.") return nil } - log.Infof("machine '%s' deleted successfully", machineID) } + machines := []*ent.Machine{} + if pending, err := cli.db.QueryPendingMachine(); err == nil { + machines = append(machines, pending...) + } + + if !notValidOnly { + if pending, err := cli.db.QueryLastValidatedHeartbeatLT(time.Now().UTC().Add(duration)); err == nil { + machines = append(machines, pending...) + } + } + + if len(machines) == 0 { + fmt.Println("no machines to prune") + return nil + } + + getAgentsTable(color.Output, machines) + + if !force { + if yes, err := askYesNo( + "You are about to PERMANENTLY remove the above machines from the database. " + + "These will NOT be recoverable. Continue?", false); err != nil { + return err + } else if !yes { + fmt.Println("User aborted prune. No changes were made.") + return nil + } + } + + deleted, err := cli.db.BulkDeleteWatchers(machines) + if err != nil { + return fmt.Errorf("unable to prune machines: %s", err) + } + + fmt.Fprintf(os.Stderr, "successfully delete %d machines\n", deleted) + return nil } -func (cli cliMachines) NewPruneCmd() *cobra.Command { - var parsedDuration time.Duration +func (cli *cliMachines) newPruneCmd() *cobra.Command { + var ( + duration time.Duration + notValidOnly bool + force bool + ) + + const defaultDuration = 10 * time.Minute + cmd := &cobra.Command{ Use: "prune", Short: "prune multiple machines from the database", @@ -395,76 +456,29 @@ cscli machines prune --duration 1h cscli machines prune --not-validated-only --force`, Args: cobra.NoArgs, DisableAutoGenTag: true, - PreRunE: func(cmd *cobra.Command, _ []string) error { - dur, _ := cmd.Flags().GetString("duration") - var err error - parsedDuration, err = time.ParseDuration(fmt.Sprintf("-%s", dur)) - if err != nil { - return fmt.Errorf("unable to parse duration '%s': %s", dur, err) - } - return nil - }, - RunE: func(cmd *cobra.Command, _ []string) error { - notValidOnly, _ := cmd.Flags().GetBool("not-validated-only") - force, _ := cmd.Flags().GetBool("force") - if parsedDuration >= 0-60*time.Second && !notValidOnly { - var answer bool - prompt := &survey.Confirm{ - Message: "The duration you provided is less than or equal 60 seconds this can break installations do you want to continue ?", - Default: false, - } - if err := survey.AskOne(prompt, &answer); err != nil { - return fmt.Errorf("unable to ask about prune check: %s", err) - } - if !answer { - fmt.Println("user aborted prune no changes were made") - return nil - } - } - machines := make([]*ent.Machine, 0) - if pending, err := dbClient.QueryPendingMachine(); err == nil { - machines = append(machines, pending...) - } - if !notValidOnly { - if pending, err := dbClient.QueryLastValidatedHeartbeatLT(time.Now().UTC().Add(parsedDuration)); err == nil { - machines = append(machines, pending...) - } - } - if len(machines) == 0 { - fmt.Println("no machines to prune") - return nil - } - getAgentsTable(color.Output, machines) - if !force { - var answer bool - prompt := &survey.Confirm{ - Message: "You are about to PERMANENTLY remove the above machines from the database these will NOT be recoverable, continue ?", - Default: false, - } - if err := survey.AskOne(prompt, &answer); err != nil { - return fmt.Errorf("unable to ask about prune check: %s", err) - } - if !answer { - fmt.Println("user aborted prune no changes were made") - return nil - } - } - nbDeleted, err := dbClient.BulkDeleteWatchers(machines) - if err != nil { - return fmt.Errorf("unable to prune machines: %s", err) - } - fmt.Printf("successfully delete %d machines\n", nbDeleted) - return nil + RunE: func(_ *cobra.Command, _ []string) error { + return cli.prune(duration, notValidOnly, force) }, } - cmd.Flags().StringP("duration", "d", "10m", "duration of time since validated machine last heartbeat") - cmd.Flags().Bool("not-validated-only", false, "only prune machines that are not validated") - cmd.Flags().Bool("force", false, "force prune without asking for confirmation") + + flags := cmd.Flags() + flags.DurationVarP(&duration, "duration", "d", defaultDuration, "duration of time since validated machine last heartbeat") + flags.BoolVar(¬ValidOnly, "not-validated-only", false, "only prune machines that are not validated") + flags.BoolVar(&force, "force", false, "force prune without asking for confirmation") return cmd } -func (cli cliMachines) NewValidateCmd() *cobra.Command { +func (cli *cliMachines) validate(machineID string) error { + if err := cli.db.ValidateMachine(machineID); err != nil { + return fmt.Errorf("unable to validate machine '%s': %s", machineID, err) + } + log.Infof("machine '%s' validated successfully", machineID) + + return nil +} + +func (cli *cliMachines) newValidateCmd() *cobra.Command { cmd := &cobra.Command{ Use: "validate", Short: "validate a machine to access the local API", @@ -472,14 +486,8 @@ func (cli cliMachines) NewValidateCmd() *cobra.Command { Example: `cscli machines validate "machine_name"`, Args: cobra.ExactArgs(1), DisableAutoGenTag: true, - RunE: func(_ *cobra.Command, args []string) error { - machineID := args[0] - if err := dbClient.ValidateMachine(machineID); err != nil { - return fmt.Errorf("unable to validate machine '%s': %s", machineID, err) - } - log.Infof("machine '%s' validated successfully", machineID) - - return nil + RunE: func(cmd *cobra.Command, args []string) error { + return cli.validate(args[0]) }, } diff --git a/cmd/crowdsec-cli/main.go b/cmd/crowdsec-cli/main.go index fda4cddc2bc..91e31a9778c 100644 --- a/cmd/crowdsec-cli/main.go +++ b/cmd/crowdsec-cli/main.go @@ -21,7 +21,7 @@ var ConfigFilePath string var csConfig *csconfig.Config var dbClient *database.Client -var OutputFormat string +var outputFormat string var OutputColor string var mergedConfig string @@ -29,6 +29,8 @@ var mergedConfig string // flagBranch overrides the value in csConfig.Cscli.HubBranch var flagBranch = "" +type configGetter func() *csconfig.Config + func initConfig() { var err error @@ -64,16 +66,18 @@ func initConfig() { csConfig.Cscli.HubBranch = flagBranch } - if OutputFormat != "" { - csConfig.Cscli.Output = OutputFormat - - if OutputFormat != "json" && OutputFormat != "raw" && OutputFormat != "human" { - log.Fatalf("output format %s unknown", OutputFormat) - } + if outputFormat != "" { + csConfig.Cscli.Output = outputFormat } + if csConfig.Cscli.Output == "" { csConfig.Cscli.Output = "human" } + + if csConfig.Cscli.Output != "human" && csConfig.Cscli.Output != "json" && csConfig.Cscli.Output != "raw" { + log.Fatalf("output format '%s' not supported: must be one of human, json, raw", csConfig.Cscli.Output) + } + if csConfig.Cscli.Output == "json" { log.SetFormatter(&log.JSONFormatter{}) log.SetLevel(log.ErrorLevel) @@ -146,7 +150,7 @@ It is meant to allow you to manage bans, parsers/scenarios/etc, api and generall cmd.SetOut(color.Output) cmd.PersistentFlags().StringVarP(&ConfigFilePath, "config", "c", csconfig.DefaultConfigPath("config.yaml"), "path to crowdsec config file") - cmd.PersistentFlags().StringVarP(&OutputFormat, "output", "o", "", "Output format: human, json, raw") + cmd.PersistentFlags().StringVarP(&outputFormat, "output", "o", "", "Output format: human, json, raw") cmd.PersistentFlags().StringVarP(&OutputColor, "color", "", "auto", "Output color: yes, no, auto") cmd.PersistentFlags().BoolVar(&dbg_lvl, "debug", false, "Set logging to debug") cmd.PersistentFlags().BoolVar(&nfo_lvl, "info", false, "Set logging to info") @@ -197,7 +201,7 @@ It is meant to allow you to manage bans, parsers/scenarios/etc, api and generall cmd.AddCommand(NewCLIAlerts().NewCommand()) cmd.AddCommand(NewCLISimulation().NewCommand()) cmd.AddCommand(NewCLIBouncers(getconfig).NewCommand()) - cmd.AddCommand(NewCLIMachines().NewCommand()) + cmd.AddCommand(NewCLIMachines(getconfig).NewCommand()) cmd.AddCommand(NewCLICapi().NewCommand()) cmd.AddCommand(NewLapiCmd()) cmd.AddCommand(NewCompletionCmd()) diff --git a/cmd/crowdsec-cli/support.go b/cmd/crowdsec-cli/support.go index 47768e7c2ee..ed7f7cf2ffd 100644 --- a/cmd/crowdsec-cli/support.go +++ b/cmd/crowdsec-cli/support.go @@ -159,10 +159,11 @@ func collectBouncers(dbClient *database.Client) ([]byte, error) { func collectAgents(dbClient *database.Client) ([]byte, error) { out := bytes.NewBuffer(nil) - err := getAgents(out, dbClient) + machines, err := dbClient.ListMachines() if err != nil { - return nil, err + return nil, fmt.Errorf("unable to list machines: %s", err) } + getAgentsTable(out, machines) return out.Bytes(), nil } diff --git a/test/bats/30_machines.bats b/test/bats/30_machines.bats index c7a72c334b1..f32c376e5b0 100644 --- a/test/bats/30_machines.bats +++ b/test/bats/30_machines.bats @@ -34,13 +34,18 @@ teardown() { rune -0 jq -r '.msg' <(stderr) assert_output --partial 'already exists: please remove it, use "--force" or specify a different file with "-f"' rune -0 cscli machines add local -a --force - assert_output --partial "Machine 'local' successfully added to the local API." + assert_stderr --partial "Machine 'local' successfully added to the local API." +} + +@test "passwords have a size limit" { + rune -1 cscli machines add local --password "$(printf '%73s' '' | tr ' ' x)" + assert_stderr --partial "password too long (max 72 characters)" } @test "add a new machine and delete it" { rune -0 cscli machines add -a -f /dev/null CiTestMachine -o human - assert_output --partial "Machine 'CiTestMachine' successfully added to the local API" - assert_output --partial "API credentials written to '/dev/null'" + assert_stderr --partial "Machine 'CiTestMachine' successfully added to the local API" + assert_stderr --partial "API credentials written to '/dev/null'" # we now have two machines rune -0 cscli machines list -o json From 785fce4dc752581613375b9edc23ee87d51cd98e Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Thu, 1 Feb 2024 17:24:00 +0100 Subject: [PATCH 002/581] refact "cscli alerts" (#2778) --- cmd/crowdsec-cli/alerts.go | 161 ++++++++++++++----------------------- 1 file changed, 59 insertions(+), 102 deletions(-) diff --git a/cmd/crowdsec-cli/alerts.go b/cmd/crowdsec-cli/alerts.go index 15824d2d067..4ab71be5bbf 100644 --- a/cmd/crowdsec-cli/alerts.go +++ b/cmd/crowdsec-cli/alerts.go @@ -11,7 +11,6 @@ import ( "strconv" "strings" "text/template" - "time" "github.com/fatih/color" "github.com/go-openapi/strfmt" @@ -48,52 +47,9 @@ func DecisionsFromAlert(alert *models.Alert) string { return ret } -func DateFromAlert(alert *models.Alert) string { - ts, err := time.Parse(time.RFC3339, alert.CreatedAt) - if err != nil { - log.Infof("while parsing %s with %s : %s", alert.CreatedAt, time.RFC3339, err) - return alert.CreatedAt - } - return ts.Format(time.RFC822) -} - -func SourceFromAlert(alert *models.Alert) string { - - //more than one item, just number and scope - if len(alert.Decisions) > 1 { - return fmt.Sprintf("%d %ss (%s)", len(alert.Decisions), *alert.Decisions[0].Scope, *alert.Decisions[0].Origin) - } - - //fallback on single decision information - if len(alert.Decisions) == 1 { - return fmt.Sprintf("%s:%s", *alert.Decisions[0].Scope, *alert.Decisions[0].Value) - } - - //try to compose a human friendly version - if *alert.Source.Value != "" && *alert.Source.Scope != "" { - scope := fmt.Sprintf("%s:%s", *alert.Source.Scope, *alert.Source.Value) - extra := "" - if alert.Source.Cn != "" { - extra = alert.Source.Cn - } - if alert.Source.AsNumber != "" { - extra += fmt.Sprintf("/%s", alert.Source.AsNumber) - } - if alert.Source.AsName != "" { - extra += fmt.Sprintf("/%s", alert.Source.AsName) - } - - if extra != "" { - scope += " (" + extra + ")" - } - return scope - } - return "" -} - -func AlertsToTable(alerts *models.GetAlertsResponse, printMachine bool) error { - - if csConfig.Cscli.Output == "raw" { +func alertsToTable(alerts *models.GetAlertsResponse, printMachine bool) error { + switch csConfig.Cscli.Output { + case "raw": csvwriter := csv.NewWriter(os.Stdout) header := []string{"id", "scope", "value", "reason", "country", "as", "decisions", "created_at"} if printMachine { @@ -123,7 +79,7 @@ func AlertsToTable(alerts *models.GetAlertsResponse, printMachine bool) error { } } csvwriter.Flush() - } else if csConfig.Cscli.Output == "json" { + case "json": if *alerts == nil { // avoid returning "null" in json // could be cleaner if we used slice of alerts directly @@ -131,8 +87,8 @@ func AlertsToTable(alerts *models.GetAlertsResponse, printMachine bool) error { return nil } x, _ := json.MarshalIndent(alerts, "", " ") - fmt.Printf("%s", string(x)) - } else if csConfig.Cscli.Output == "human" { + fmt.Print(string(x)) + case "human": if len(*alerts) == 0 { fmt.Println("No active alerts") return nil @@ -160,59 +116,60 @@ var alertTemplate = ` ` -func DisplayOneAlert(alert *models.Alert, withDetail bool) error { - if csConfig.Cscli.Output == "human" { - tmpl, err := template.New("alert").Parse(alertTemplate) - if err != nil { - return err - } - err = tmpl.Execute(os.Stdout, alert) - if err != nil { - return err - } - - alertDecisionsTable(color.Output, alert) +func displayOneAlert(alert *models.Alert, withDetail bool) error { + tmpl, err := template.New("alert").Parse(alertTemplate) + if err != nil { + return err + } + err = tmpl.Execute(os.Stdout, alert) + if err != nil { + return err + } - if len(alert.Meta) > 0 { - fmt.Printf("\n - Context :\n") - sort.Slice(alert.Meta, func(i, j int) bool { - return alert.Meta[i].Key < alert.Meta[j].Key - }) - table := newTable(color.Output) - table.SetRowLines(false) - table.SetHeaders("Key", "Value") - for _, meta := range alert.Meta { - var valSlice []string - if err := json.Unmarshal([]byte(meta.Value), &valSlice); err != nil { - return fmt.Errorf("unknown context value type '%s' : %s", meta.Value, err) - } - for _, value := range valSlice { - table.AddRow( - meta.Key, - value, - ) - } + alertDecisionsTable(color.Output, alert) + + if len(alert.Meta) > 0 { + fmt.Printf("\n - Context :\n") + sort.Slice(alert.Meta, func(i, j int) bool { + return alert.Meta[i].Key < alert.Meta[j].Key + }) + table := newTable(color.Output) + table.SetRowLines(false) + table.SetHeaders("Key", "Value") + for _, meta := range alert.Meta { + var valSlice []string + if err := json.Unmarshal([]byte(meta.Value), &valSlice); err != nil { + return fmt.Errorf("unknown context value type '%s' : %s", meta.Value, err) + } + for _, value := range valSlice { + table.AddRow( + meta.Key, + value, + ) } - table.Render() } + table.Render() + } - if withDetail { - fmt.Printf("\n - Events :\n") - for _, event := range alert.Events { - alertEventTable(color.Output, event) - } + if withDetail { + fmt.Printf("\n - Events :\n") + for _, event := range alert.Events { + alertEventTable(color.Output, event) } } + return nil } -type cliAlerts struct{} +type cliAlerts struct{ + client *apiclient.ApiClient +} func NewCLIAlerts() *cliAlerts { return &cliAlerts{} } -func (cli cliAlerts) NewCommand() *cobra.Command { +func (cli *cliAlerts) NewCommand() *cobra.Command { cmd := &cobra.Command{ Use: "alerts [action]", Short: "Manage alerts", @@ -228,7 +185,7 @@ func (cli cliAlerts) NewCommand() *cobra.Command { if err != nil { return fmt.Errorf("parsing api url %s: %w", apiURL, err) } - Client, err = apiclient.NewClient(&apiclient.Config{ + cli.client, err = apiclient.NewClient(&apiclient.Config{ MachineID: csConfig.API.Client.Credentials.Login, Password: strfmt.Password(csConfig.API.Client.Credentials.Password), UserAgent: fmt.Sprintf("crowdsec/%s", version.String()), @@ -251,7 +208,7 @@ func (cli cliAlerts) NewCommand() *cobra.Command { return cmd } -func (cli cliAlerts) NewListCmd() *cobra.Command { +func (cli *cliAlerts) NewListCmd() *cobra.Command { var alertListFilter = apiclient.AlertsListOpts{ ScopeEquals: new(string), ValueEquals: new(string), @@ -345,12 +302,12 @@ cscli alerts list --type ban`, alertListFilter.Contains = new(bool) } - alerts, _, err := Client.Alerts.List(context.Background(), alertListFilter) + alerts, _, err := cli.client.Alerts.List(context.Background(), alertListFilter) if err != nil { return fmt.Errorf("unable to list alerts: %v", err) } - err = AlertsToTable(alerts, printMachine) + err = alertsToTable(alerts, printMachine) if err != nil { return fmt.Errorf("unable to list alerts: %v", err) } @@ -376,7 +333,7 @@ cscli alerts list --type ban`, return cmd } -func (cli cliAlerts) NewDeleteCmd() *cobra.Command { +func (cli *cliAlerts) NewDeleteCmd() *cobra.Command { var ActiveDecision *bool var AlertDeleteAll bool var delAlertByID string @@ -451,12 +408,12 @@ cscli alerts delete -s crowdsecurity/ssh-bf"`, var alerts *models.DeleteAlertsResponse if delAlertByID == "" { - alerts, _, err = Client.Alerts.Delete(context.Background(), alertDeleteFilter) + alerts, _, err = cli.client.Alerts.Delete(context.Background(), alertDeleteFilter) if err != nil { return fmt.Errorf("unable to delete alerts : %v", err) } } else { - alerts, _, err = Client.Alerts.DeleteOne(context.Background(), delAlertByID) + alerts, _, err = cli.client.Alerts.DeleteOne(context.Background(), delAlertByID) if err != nil { return fmt.Errorf("unable to delete alert: %v", err) } @@ -478,7 +435,7 @@ cscli alerts delete -s crowdsecurity/ssh-bf"`, return cmd } -func (cli cliAlerts) NewInspectCmd() *cobra.Command { +func (cli *cliAlerts) NewInspectCmd() *cobra.Command { var details bool cmd := &cobra.Command{ Use: `inspect "alert_id"`, @@ -495,13 +452,13 @@ func (cli cliAlerts) NewInspectCmd() *cobra.Command { if err != nil { return fmt.Errorf("bad alert id %s", alertID) } - alert, _, err := Client.Alerts.GetByID(context.Background(), id) + alert, _, err := cli.client.Alerts.GetByID(context.Background(), id) if err != nil { return fmt.Errorf("can't find alert with id %s: %s", alertID, err) } switch csConfig.Cscli.Output { case "human": - if err := DisplayOneAlert(alert, details); err != nil { + if err := displayOneAlert(alert, details); err != nil { continue } case "json": @@ -528,7 +485,7 @@ func (cli cliAlerts) NewInspectCmd() *cobra.Command { return cmd } -func (cli cliAlerts) NewFlushCmd() *cobra.Command { +func (cli *cliAlerts) NewFlushCmd() *cobra.Command { var maxItems int var maxAge string cmd := &cobra.Command{ @@ -542,12 +499,12 @@ func (cli cliAlerts) NewFlushCmd() *cobra.Command { if err := require.LAPI(csConfig); err != nil { return err } - dbClient, err = database.NewClient(csConfig.DbConfig) + db, err := database.NewClient(csConfig.DbConfig) if err != nil { return fmt.Errorf("unable to create new database client: %s", err) } log.Info("Flushing alerts. !! This may take a long time !!") - err = dbClient.FlushAlerts(maxAge, maxItems) + err = db.FlushAlerts(maxAge, maxItems) if err != nil { return fmt.Errorf("unable to flush alerts: %s", err) } From e6f5d157b8a84ff68393a1446258eea093fc99ad Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Thu, 1 Feb 2024 17:25:29 +0100 Subject: [PATCH 003/581] refact "cscli hub" (#2800) --- cmd/crowdsec-cli/hub.go | 84 ++++++++++++++++++++-------------------- cmd/crowdsec-cli/main.go | 2 +- 2 files changed, 44 insertions(+), 42 deletions(-) diff --git a/cmd/crowdsec-cli/hub.go b/cmd/crowdsec-cli/hub.go index 3a2913f0513..d3ce380bb6f 100644 --- a/cmd/crowdsec-cli/hub.go +++ b/cmd/crowdsec-cli/hub.go @@ -13,13 +13,17 @@ import ( "github.com/crowdsecurity/crowdsec/pkg/cwhub" ) -type cliHub struct{} +type cliHub struct{ + cfg configGetter +} -func NewCLIHub() *cliHub { - return &cliHub{} +func NewCLIHub(getconfig configGetter) *cliHub { + return &cliHub{ + cfg: getconfig, + } } -func (cli cliHub) NewCommand() *cobra.Command { +func (cli *cliHub) NewCommand() *cobra.Command { cmd := &cobra.Command{ Use: "hub [action]", Short: "Manage hub index", @@ -34,23 +38,16 @@ cscli hub upgrade`, DisableAutoGenTag: true, } - cmd.AddCommand(cli.NewListCmd()) - cmd.AddCommand(cli.NewUpdateCmd()) - cmd.AddCommand(cli.NewUpgradeCmd()) - cmd.AddCommand(cli.NewTypesCmd()) + cmd.AddCommand(cli.newListCmd()) + cmd.AddCommand(cli.newUpdateCmd()) + cmd.AddCommand(cli.newUpgradeCmd()) + cmd.AddCommand(cli.newTypesCmd()) return cmd } -func (cli cliHub) list(cmd *cobra.Command, args []string) error { - flags := cmd.Flags() - - all, err := flags.GetBool("all") - if err != nil { - return err - } - - hub, err := require.Hub(csConfig, nil, log.StandardLogger()) +func (cli *cliHub) list(all bool) error { + hub, err := require.Hub(cli.cfg(), nil, log.StandardLogger()) if err != nil { return err } @@ -80,24 +77,28 @@ func (cli cliHub) list(cmd *cobra.Command, args []string) error { return nil } -func (cli cliHub) NewListCmd() *cobra.Command { +func (cli *cliHub) newListCmd() *cobra.Command { + var all bool + cmd := &cobra.Command{ Use: "list [-a]", Short: "List all installed configurations", Args: cobra.ExactArgs(0), DisableAutoGenTag: true, - RunE: cli.list, + RunE: func(_ *cobra.Command, _ []string) error { + return cli.list(all) + }, } flags := cmd.Flags() - flags.BoolP("all", "a", false, "List disabled items as well") + flags.BoolVarP(&all, "all", "a", false, "List disabled items as well") return cmd } -func (cli cliHub) update(cmd *cobra.Command, args []string) error { - local := csConfig.Hub - remote := require.RemoteHub(csConfig) +func (cli *cliHub) update() error { + local := cli.cfg().Hub + remote := require.RemoteHub(cli.cfg()) // don't use require.Hub because if there is no index file, it would fail hub, err := cwhub.NewHub(local, remote, true, log.StandardLogger()) @@ -112,7 +113,7 @@ func (cli cliHub) update(cmd *cobra.Command, args []string) error { return nil } -func (cli cliHub) NewUpdateCmd() *cobra.Command { +func (cli *cliHub) newUpdateCmd() *cobra.Command { cmd := &cobra.Command{ Use: "update", Short: "Download the latest index (catalog of available configurations)", @@ -121,21 +122,16 @@ Fetches the .index.json file from the hub, containing the list of available conf `, Args: cobra.ExactArgs(0), DisableAutoGenTag: true, - RunE: cli.update, + RunE: func(_ *cobra.Command, _ []string) error { + return cli.update() + }, } return cmd } -func (cli cliHub) upgrade(cmd *cobra.Command, args []string) error { - flags := cmd.Flags() - - force, err := flags.GetBool("force") - if err != nil { - return err - } - - hub, err := require.Hub(csConfig, require.RemoteHub(csConfig), log.StandardLogger()) +func (cli *cliHub) upgrade(force bool) error { + hub, err := require.Hub(cli.cfg(), require.RemoteHub(cli.cfg()), log.StandardLogger()) if err != nil { return err } @@ -167,7 +163,9 @@ func (cli cliHub) upgrade(cmd *cobra.Command, args []string) error { return nil } -func (cli cliHub) NewUpgradeCmd() *cobra.Command { +func (cli *cliHub) newUpgradeCmd() *cobra.Command { + var force bool + cmd := &cobra.Command{ Use: "upgrade", Short: "Upgrade all configurations to their latest version", @@ -176,17 +174,19 @@ Upgrade all configs installed from Crowdsec Hub. Run 'sudo cscli hub update' if `, Args: cobra.ExactArgs(0), DisableAutoGenTag: true, - RunE: cli.upgrade, + RunE: func(_ *cobra.Command, _ []string) error { + return cli.upgrade(force) + }, } flags := cmd.Flags() - flags.Bool("force", false, "Force upgrade: overwrite tainted and outdated files") + flags.BoolVar(&force, "force", false, "Force upgrade: overwrite tainted and outdated files") return cmd } -func (cli cliHub) types(cmd *cobra.Command, args []string) error { - switch csConfig.Cscli.Output { +func (cli *cliHub) types() error { + switch cli.cfg().Cscli.Output { case "human": s, err := yaml.Marshal(cwhub.ItemTypes) if err != nil { @@ -210,7 +210,7 @@ func (cli cliHub) types(cmd *cobra.Command, args []string) error { return nil } -func (cli cliHub) NewTypesCmd() *cobra.Command { +func (cli *cliHub) newTypesCmd() *cobra.Command { cmd := &cobra.Command{ Use: "types", Short: "List supported item types", @@ -219,7 +219,9 @@ List the types of supported hub items. `, Args: cobra.ExactArgs(0), DisableAutoGenTag: true, - RunE: cli.types, + RunE: func(_ *cobra.Command, _ []string) error { + return cli.types() + }, } return cmd diff --git a/cmd/crowdsec-cli/main.go b/cmd/crowdsec-cli/main.go index 91e31a9778c..acbced2e5a3 100644 --- a/cmd/crowdsec-cli/main.go +++ b/cmd/crowdsec-cli/main.go @@ -194,7 +194,7 @@ It is meant to allow you to manage bans, parsers/scenarios/etc, api and generall cmd.AddCommand(NewCLIDoc().NewCommand(cmd)) cmd.AddCommand(NewCLIVersion().NewCommand()) cmd.AddCommand(NewConfigCmd()) - cmd.AddCommand(NewCLIHub().NewCommand()) + cmd.AddCommand(NewCLIHub(getconfig).NewCommand()) cmd.AddCommand(NewMetricsCmd()) cmd.AddCommand(NewCLIDashboard().NewCommand()) cmd.AddCommand(NewCLIDecisions().NewCommand()) From af14f1085f1d31c75364e040a9d317cff22a093d Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Thu, 1 Feb 2024 17:26:06 +0100 Subject: [PATCH 004/581] refact "cscli " (#2782) --- cmd/crowdsec-cli/itemcli.go | 196 +++++++++++++----------------------- cmd/crowdsec-cli/items.go | 2 +- 2 files changed, 72 insertions(+), 126 deletions(-) diff --git a/cmd/crowdsec-cli/itemcli.go b/cmd/crowdsec-cli/itemcli.go index 5b0ad13ffe6..4f3dc40ae04 100644 --- a/cmd/crowdsec-cli/itemcli.go +++ b/cmd/crowdsec-cli/itemcli.go @@ -51,33 +51,16 @@ func (cli cliItem) NewCommand() *cobra.Command { DisableAutoGenTag: true, } - cmd.AddCommand(cli.NewInstallCmd()) - cmd.AddCommand(cli.NewRemoveCmd()) - cmd.AddCommand(cli.NewUpgradeCmd()) - cmd.AddCommand(cli.NewInspectCmd()) - cmd.AddCommand(cli.NewListCmd()) + cmd.AddCommand(cli.newInstallCmd()) + cmd.AddCommand(cli.newRemoveCmd()) + cmd.AddCommand(cli.newUpgradeCmd()) + cmd.AddCommand(cli.newInspectCmd()) + cmd.AddCommand(cli.newListCmd()) return cmd } -func (cli cliItem) Install(cmd *cobra.Command, args []string) error { - flags := cmd.Flags() - - downloadOnly, err := flags.GetBool("download-only") - if err != nil { - return err - } - - force, err := flags.GetBool("force") - if err != nil { - return err - } - - ignoreError, err := flags.GetBool("ignore") - if err != nil { - return err - } - +func (cli cliItem) install(args []string, downloadOnly bool, force bool, ignoreError bool) error { hub, err := require.Hub(csConfig, require.RemoteHub(csConfig), log.StandardLogger()) if err != nil { return err @@ -110,7 +93,13 @@ func (cli cliItem) Install(cmd *cobra.Command, args []string) error { return nil } -func (cli cliItem) NewInstallCmd() *cobra.Command { +func (cli cliItem) newInstallCmd() *cobra.Command { + var ( + downloadOnly bool + force bool + ignoreError bool + ) + cmd := &cobra.Command{ Use: coalesce.String(cli.installHelp.use, "install [item]..."), Short: coalesce.String(cli.installHelp.short, fmt.Sprintf("Install given %s", cli.oneOrMore)), @@ -121,13 +110,15 @@ func (cli cliItem) NewInstallCmd() *cobra.Command { ValidArgsFunction: func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { return compAllItems(cli.name, args, toComplete) }, - RunE: cli.Install, + RunE: func(cmd *cobra.Command, args []string) error { + return cli.install(args, downloadOnly, force, ignoreError) + }, } flags := cmd.Flags() - flags.BoolP("download-only", "d", false, "Only download packages, don't enable") - flags.Bool("force", false, "Force install: overwrite tainted and outdated files") - flags.Bool("ignore", false, fmt.Sprintf("Ignore errors when installing multiple %s", cli.name)) + flags.BoolVarP(&downloadOnly, "download-only", "d", false, "Only download packages, don't enable") + flags.BoolVar(&force, "force", false, "Force install: overwrite tainted and outdated files") + flags.BoolVar(&ignoreError, "ignore", false, fmt.Sprintf("Ignore errors when installing multiple %s", cli.name)) return cmd } @@ -145,24 +136,7 @@ func istalledParentNames(item *cwhub.Item) []string { return ret } -func (cli cliItem) Remove(cmd *cobra.Command, args []string) error { - flags := cmd.Flags() - - purge, err := flags.GetBool("purge") - if err != nil { - return err - } - - force, err := flags.GetBool("force") - if err != nil { - return err - } - - all, err := flags.GetBool("all") - if err != nil { - return err - } - +func (cli cliItem) remove(args []string, purge bool, force bool, all bool) error { hub, err := require.Hub(csConfig, nil, log.StandardLogger()) if err != nil { return err @@ -243,7 +217,13 @@ func (cli cliItem) Remove(cmd *cobra.Command, args []string) error { return nil } -func (cli cliItem) NewRemoveCmd() *cobra.Command { +func (cli cliItem) newRemoveCmd() *cobra.Command { + var ( + purge bool + force bool + all bool + ) + cmd := &cobra.Command{ Use: coalesce.String(cli.removeHelp.use, "remove [item]..."), Short: coalesce.String(cli.removeHelp.short, fmt.Sprintf("Remove given %s", cli.oneOrMore)), @@ -254,30 +234,20 @@ func (cli cliItem) NewRemoveCmd() *cobra.Command { ValidArgsFunction: func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { return compInstalledItems(cli.name, args, toComplete) }, - RunE: cli.Remove, + RunE: func(cmd *cobra.Command, args []string) error { + return cli.remove(args, purge, force, all) + }, } flags := cmd.Flags() - flags.Bool("purge", false, "Delete source file too") - flags.Bool("force", false, "Force remove: remove tainted and outdated files") - flags.Bool("all", false, fmt.Sprintf("Remove all the %s", cli.name)) + flags.BoolVar(&purge, "purge", false, "Delete source file too") + flags.BoolVar(&force, "force", false, "Force remove: remove tainted and outdated files") + flags.BoolVar(&all, "all", false, fmt.Sprintf("Remove all the %s", cli.name)) return cmd } -func (cli cliItem) Upgrade(cmd *cobra.Command, args []string) error { - flags := cmd.Flags() - - force, err := flags.GetBool("force") - if err != nil { - return err - } - - all, err := flags.GetBool("all") - if err != nil { - return err - } - +func (cli cliItem) upgrade(args []string, force bool, all bool) error { hub, err := require.Hub(csConfig, require.RemoteHub(csConfig), log.StandardLogger()) if err != nil { return err @@ -341,7 +311,12 @@ func (cli cliItem) Upgrade(cmd *cobra.Command, args []string) error { return nil } -func (cli cliItem) NewUpgradeCmd() *cobra.Command { +func (cli cliItem) newUpgradeCmd() *cobra.Command { + var ( + all bool + force bool + ) + cmd := &cobra.Command{ Use: coalesce.String(cli.upgradeHelp.use, "upgrade [item]..."), Short: coalesce.String(cli.upgradeHelp.short, fmt.Sprintf("Upgrade given %s", cli.oneOrMore)), @@ -351,43 +326,27 @@ func (cli cliItem) NewUpgradeCmd() *cobra.Command { ValidArgsFunction: func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { return compInstalledItems(cli.name, args, toComplete) }, - RunE: cli.Upgrade, + RunE: func(cmd *cobra.Command, args []string) error { + return cli.upgrade(args, force, all) + }, } flags := cmd.Flags() - flags.BoolP("all", "a", false, fmt.Sprintf("Upgrade all the %s", cli.name)) - flags.Bool("force", false, "Force upgrade: overwrite tainted and outdated files") + flags.BoolVarP(&all, "all", "a", false, fmt.Sprintf("Upgrade all the %s", cli.name)) + flags.BoolVar(&force, "force", false, "Force upgrade: overwrite tainted and outdated files") return cmd } -func (cli cliItem) Inspect(cmd *cobra.Command, args []string) error { - flags := cmd.Flags() - - url, err := flags.GetString("url") - if err != nil { - return err +func (cli cliItem) inspect(args []string, url string, diff bool, rev bool, noMetrics bool) error { + if rev && !diff { + return fmt.Errorf("--rev can only be used with --diff") } if url != "" { csConfig.Cscli.PrometheusUrl = url } - diff, err := flags.GetBool("diff") - if err != nil { - return err - } - - rev, err := flags.GetBool("rev") - if err != nil { - return err - } - - noMetrics, err := flags.GetBool("no-metrics") - if err != nil { - return err - } - remote := (*cwhub.RemoteHubCfg)(nil) if diff { @@ -411,7 +370,7 @@ func (cli cliItem) Inspect(cmd *cobra.Command, args []string) error { continue } - if err = InspectItem(item, !noMetrics); err != nil { + if err = inspectItem(item, !noMetrics); err != nil { return err } @@ -425,7 +384,14 @@ func (cli cliItem) Inspect(cmd *cobra.Command, args []string) error { return nil } -func (cli cliItem) NewInspectCmd() *cobra.Command { +func (cli cliItem) newInspectCmd() *cobra.Command { + var ( + url string + diff bool + rev bool + noMetrics bool + ) + cmd := &cobra.Command{ Use: coalesce.String(cli.inspectHelp.use, "inspect [item]..."), Short: coalesce.String(cli.inspectHelp.short, fmt.Sprintf("Inspect given %s", cli.oneOrMore)), @@ -436,45 +402,21 @@ func (cli cliItem) NewInspectCmd() *cobra.Command { ValidArgsFunction: func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { return compInstalledItems(cli.name, args, toComplete) }, - PreRunE: func(cmd *cobra.Command, _ []string) error { - flags := cmd.Flags() - - diff, err := flags.GetBool("diff") - if err != nil { - return err - } - - rev, err := flags.GetBool("rev") - if err != nil { - return err - } - - if rev && !diff { - return fmt.Errorf("--rev can only be used with --diff") - } - - return nil + RunE: func(cmd *cobra.Command, args []string) error { + return cli.inspect(args, url, diff, rev, noMetrics) }, - RunE: cli.Inspect, } flags := cmd.Flags() - flags.StringP("url", "u", "", "Prometheus url") - flags.Bool("diff", false, "Show diff with latest version (for tainted items)") - flags.Bool("rev", false, "Reverse diff output") - flags.Bool("no-metrics", false, "Don't show metrics (when cscli.output=human)") + flags.StringVarP(&url, "url", "u", "", "Prometheus url") + flags.BoolVar(&diff, "diff", false, "Show diff with latest version (for tainted items)") + flags.BoolVar(&rev, "rev", false, "Reverse diff output") + flags.BoolVar(&noMetrics, "no-metrics", false, "Don't show metrics (when cscli.output=human)") return cmd } -func (cli cliItem) List(cmd *cobra.Command, args []string) error { - flags := cmd.Flags() - - all, err := flags.GetBool("all") - if err != nil { - return err - } - +func (cli cliItem) list(args []string, all bool) error { hub, err := require.Hub(csConfig, nil, log.StandardLogger()) if err != nil { return err @@ -494,18 +436,22 @@ func (cli cliItem) List(cmd *cobra.Command, args []string) error { return nil } -func (cli cliItem) NewListCmd() *cobra.Command { +func (cli cliItem) newListCmd() *cobra.Command { + var all bool + cmd := &cobra.Command{ Use: coalesce.String(cli.listHelp.use, "list [item... | -a]"), Short: coalesce.String(cli.listHelp.short, fmt.Sprintf("List %s", cli.oneOrMore)), Long: coalesce.String(cli.listHelp.long, fmt.Sprintf("List of installed/available/specified %s", cli.name)), Example: cli.listHelp.example, DisableAutoGenTag: true, - RunE: cli.List, + RunE: func(_ *cobra.Command, args []string) error { + return cli.list(args, all) + }, } flags := cmd.Flags() - flags.BoolP("all", "a", false, "List disabled items as well") + flags.BoolVarP(&all, "all", "a", false, "List disabled items as well") return cmd } diff --git a/cmd/crowdsec-cli/items.go b/cmd/crowdsec-cli/items.go index 851be553f15..ea6d8a25631 100644 --- a/cmd/crowdsec-cli/items.go +++ b/cmd/crowdsec-cli/items.go @@ -143,7 +143,7 @@ func listItems(out io.Writer, itemTypes []string, items map[string][]*cwhub.Item return nil } -func InspectItem(item *cwhub.Item, showMetrics bool) error { +func inspectItem(item *cwhub.Item, showMetrics bool) error { switch csConfig.Cscli.Output { case "human", "raw": enc := yaml.NewEncoder(os.Stdout) From 825c08aa9df2a5889872312eb17163e0effbeb90 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Thu, 1 Feb 2024 17:26:46 +0100 Subject: [PATCH 005/581] refact "cscli simulation" (#2801) --- cmd/crowdsec-cli/main.go | 2 +- cmd/crowdsec-cli/simulation.go | 168 ++++++++++++++++----------------- 2 files changed, 85 insertions(+), 85 deletions(-) diff --git a/cmd/crowdsec-cli/main.go b/cmd/crowdsec-cli/main.go index acbced2e5a3..98d10a6bfb6 100644 --- a/cmd/crowdsec-cli/main.go +++ b/cmd/crowdsec-cli/main.go @@ -199,7 +199,7 @@ It is meant to allow you to manage bans, parsers/scenarios/etc, api and generall cmd.AddCommand(NewCLIDashboard().NewCommand()) cmd.AddCommand(NewCLIDecisions().NewCommand()) cmd.AddCommand(NewCLIAlerts().NewCommand()) - cmd.AddCommand(NewCLISimulation().NewCommand()) + cmd.AddCommand(NewCLISimulation(getconfig).NewCommand()) cmd.AddCommand(NewCLIBouncers(getconfig).NewCommand()) cmd.AddCommand(NewCLIMachines(getconfig).NewCommand()) cmd.AddCommand(NewCLICapi().NewCommand()) diff --git a/cmd/crowdsec-cli/simulation.go b/cmd/crowdsec-cli/simulation.go index 99dac7c17f2..19eb1b99d01 100644 --- a/cmd/crowdsec-cli/simulation.go +++ b/cmd/crowdsec-cli/simulation.go @@ -13,13 +13,17 @@ import ( "github.com/crowdsecurity/crowdsec/pkg/cwhub" ) -type cliSimulation struct{} +type cliSimulation struct{ + cfg configGetter +} -func NewCLISimulation() *cliSimulation { - return &cliSimulation{} +func NewCLISimulation(getconfig configGetter) *cliSimulation { + return &cliSimulation{ + cfg: getconfig, + } } -func (cli cliSimulation) NewCommand() *cobra.Command { +func (cli *cliSimulation) NewCommand() *cobra.Command { cmd := &cobra.Command{ Use: "simulation [command]", Short: "Manage simulation status of scenarios", @@ -27,16 +31,16 @@ func (cli cliSimulation) NewCommand() *cobra.Command { cscli simulation enable crowdsecurity/ssh-bf cscli simulation disable crowdsecurity/ssh-bf`, DisableAutoGenTag: true, - PersistentPreRunE: func(cmd *cobra.Command, args []string) error { - if err := csConfig.LoadSimulation(); err != nil { - log.Fatal(err) + PersistentPreRunE: func(_ *cobra.Command, _ []string) error { + if err := cli.cfg().LoadSimulation(); err != nil { + return err } - if csConfig.Cscli.SimulationConfig == nil { + if cli.cfg().Cscli.SimulationConfig == nil { return fmt.Errorf("no simulation configured") } return nil }, - PersistentPostRun: func(cmd *cobra.Command, args []string) { + PersistentPostRun: func(cmd *cobra.Command, _ []string) { if cmd.Name() != "status" { log.Infof(ReloadMessage()) } @@ -52,7 +56,7 @@ cscli simulation disable crowdsecurity/ssh-bf`, return cmd } -func (cli cliSimulation) NewEnableCmd() *cobra.Command { +func (cli *cliSimulation) NewEnableCmd() *cobra.Command { var forceGlobalSimulation bool cmd := &cobra.Command{ @@ -60,10 +64,10 @@ func (cli cliSimulation) NewEnableCmd() *cobra.Command { Short: "Enable the simulation, globally or on specified scenarios", Example: `cscli simulation enable`, DisableAutoGenTag: true, - Run: func(cmd *cobra.Command, args []string) { - hub, err := require.Hub(csConfig, nil, nil) + RunE: func(cmd *cobra.Command, args []string) error { + hub, err := require.Hub(cli.cfg(), nil, nil) if err != nil { - log.Fatal(err) + return err } if len(args) > 0 { @@ -76,37 +80,35 @@ func (cli cliSimulation) NewEnableCmd() *cobra.Command { if !item.State.Installed { log.Warningf("'%s' isn't enabled", scenario) } - isExcluded := slices.Contains(csConfig.Cscli.SimulationConfig.Exclusions, scenario) - if *csConfig.Cscli.SimulationConfig.Simulation && !isExcluded { + isExcluded := slices.Contains(cli.cfg().Cscli.SimulationConfig.Exclusions, scenario) + if *cli.cfg().Cscli.SimulationConfig.Simulation && !isExcluded { log.Warning("global simulation is already enabled") continue } - if !*csConfig.Cscli.SimulationConfig.Simulation && isExcluded { + if !*cli.cfg().Cscli.SimulationConfig.Simulation && isExcluded { log.Warningf("simulation for '%s' already enabled", scenario) continue } - if *csConfig.Cscli.SimulationConfig.Simulation && isExcluded { - if err := removeFromExclusion(scenario); err != nil { - log.Fatal(err) - } + if *cli.cfg().Cscli.SimulationConfig.Simulation && isExcluded { + cli.removeFromExclusion(scenario) log.Printf("simulation enabled for '%s'", scenario) continue } - if err := addToExclusion(scenario); err != nil { - log.Fatal(err) - } + cli.addToExclusion(scenario) log.Printf("simulation mode for '%s' enabled", scenario) } - if err := dumpSimulationFile(); err != nil { - log.Fatalf("simulation enable: %s", err) + if err := cli.dumpSimulationFile(); err != nil { + return fmt.Errorf("simulation enable: %s", err) } } else if forceGlobalSimulation { - if err := enableGlobalSimulation(); err != nil { - log.Fatalf("unable to enable global simulation mode : %s", err) + if err := cli.enableGlobalSimulation(); err != nil { + return fmt.Errorf("unable to enable global simulation mode: %s", err) } } else { printHelp(cmd) } + + return nil }, } cmd.Flags().BoolVarP(&forceGlobalSimulation, "global", "g", false, "Enable global simulation (reverse mode)") @@ -114,7 +116,7 @@ func (cli cliSimulation) NewEnableCmd() *cobra.Command { return cmd } -func (cli cliSimulation) NewDisableCmd() *cobra.Command { +func (cli *cliSimulation) NewDisableCmd() *cobra.Command { var forceGlobalSimulation bool cmd := &cobra.Command{ @@ -122,18 +124,16 @@ func (cli cliSimulation) NewDisableCmd() *cobra.Command { Short: "Disable the simulation mode. Disable only specified scenarios", Example: `cscli simulation disable`, DisableAutoGenTag: true, - Run: func(cmd *cobra.Command, args []string) { + RunE: func(cmd *cobra.Command, args []string) error { if len(args) > 0 { for _, scenario := range args { - isExcluded := slices.Contains(csConfig.Cscli.SimulationConfig.Exclusions, scenario) - if !*csConfig.Cscli.SimulationConfig.Simulation && !isExcluded { + isExcluded := slices.Contains(cli.cfg().Cscli.SimulationConfig.Exclusions, scenario) + if !*cli.cfg().Cscli.SimulationConfig.Simulation && !isExcluded { log.Warningf("%s isn't in simulation mode", scenario) continue } - if !*csConfig.Cscli.SimulationConfig.Simulation && isExcluded { - if err := removeFromExclusion(scenario); err != nil { - log.Fatal(err) - } + if !*cli.cfg().Cscli.SimulationConfig.Simulation && isExcluded { + cli.removeFromExclusion(scenario) log.Printf("simulation mode for '%s' disabled", scenario) continue } @@ -141,21 +141,21 @@ func (cli cliSimulation) NewDisableCmd() *cobra.Command { log.Warningf("simulation mode is enabled but is already disable for '%s'", scenario) continue } - if err := addToExclusion(scenario); err != nil { - log.Fatal(err) - } + cli.addToExclusion(scenario) log.Printf("simulation mode for '%s' disabled", scenario) } - if err := dumpSimulationFile(); err != nil { - log.Fatalf("simulation disable: %s", err) + if err := cli.dumpSimulationFile(); err != nil { + return fmt.Errorf("simulation disable: %s", err) } } else if forceGlobalSimulation { - if err := disableGlobalSimulation(); err != nil { - log.Fatalf("unable to disable global simulation mode : %s", err) + if err := cli.disableGlobalSimulation(); err != nil { + return fmt.Errorf("unable to disable global simulation mode: %s", err) } } else { printHelp(cmd) } + + return nil }, } cmd.Flags().BoolVarP(&forceGlobalSimulation, "global", "g", false, "Disable global simulation (reverse mode)") @@ -163,16 +163,14 @@ func (cli cliSimulation) NewDisableCmd() *cobra.Command { return cmd } -func (cli cliSimulation) NewStatusCmd() *cobra.Command { +func (cli *cliSimulation) NewStatusCmd() *cobra.Command { cmd := &cobra.Command{ Use: "status", Short: "Show simulation mode status", Example: `cscli simulation status`, DisableAutoGenTag: true, - Run: func(cmd *cobra.Command, args []string) { - if err := simulationStatus(); err != nil { - log.Fatal(err) - } + Run: func(_ *cobra.Command, _ []string) { + cli.status() }, PersistentPostRun: func(cmd *cobra.Command, args []string) { }, @@ -181,29 +179,29 @@ func (cli cliSimulation) NewStatusCmd() *cobra.Command { return cmd } -func addToExclusion(name string) error { - csConfig.Cscli.SimulationConfig.Exclusions = append(csConfig.Cscli.SimulationConfig.Exclusions, name) - return nil +func (cli *cliSimulation) addToExclusion(name string) { + cfg := cli.cfg() + cfg.Cscli.SimulationConfig.Exclusions = append(cfg.Cscli.SimulationConfig.Exclusions, name) } -func removeFromExclusion(name string) error { - index := slices.Index(csConfig.Cscli.SimulationConfig.Exclusions, name) +func (cli *cliSimulation) removeFromExclusion(name string) { + cfg := cli.cfg() + index := slices.Index(cfg.Cscli.SimulationConfig.Exclusions, name) // Remove element from the slice - csConfig.Cscli.SimulationConfig.Exclusions[index] = csConfig.Cscli.SimulationConfig.Exclusions[len(csConfig.Cscli.SimulationConfig.Exclusions)-1] - csConfig.Cscli.SimulationConfig.Exclusions[len(csConfig.Cscli.SimulationConfig.Exclusions)-1] = "" - csConfig.Cscli.SimulationConfig.Exclusions = csConfig.Cscli.SimulationConfig.Exclusions[:len(csConfig.Cscli.SimulationConfig.Exclusions)-1] - - return nil + cfg.Cscli.SimulationConfig.Exclusions[index] = cfg.Cscli.SimulationConfig.Exclusions[len(cfg.Cscli.SimulationConfig.Exclusions)-1] + cfg.Cscli.SimulationConfig.Exclusions[len(cfg.Cscli.SimulationConfig.Exclusions)-1] = "" + cfg.Cscli.SimulationConfig.Exclusions = cfg.Cscli.SimulationConfig.Exclusions[:len(cfg.Cscli.SimulationConfig.Exclusions)-1] } -func enableGlobalSimulation() error { - csConfig.Cscli.SimulationConfig.Simulation = new(bool) - *csConfig.Cscli.SimulationConfig.Simulation = true - csConfig.Cscli.SimulationConfig.Exclusions = []string{} +func (cli *cliSimulation) enableGlobalSimulation() error { + cfg := cli.cfg() + cfg.Cscli.SimulationConfig.Simulation = new(bool) + *cfg.Cscli.SimulationConfig.Simulation = true + cfg.Cscli.SimulationConfig.Exclusions = []string{} - if err := dumpSimulationFile(); err != nil { - log.Fatalf("unable to dump simulation file: %s", err) + if err := cli.dumpSimulationFile(); err != nil { + return fmt.Errorf("unable to dump simulation file: %s", err) } log.Printf("global simulation: enabled") @@ -211,59 +209,61 @@ func enableGlobalSimulation() error { return nil } -func dumpSimulationFile() error { - newConfigSim, err := yaml.Marshal(csConfig.Cscli.SimulationConfig) +func (cli *cliSimulation) dumpSimulationFile() error { + cfg := cli.cfg() + newConfigSim, err := yaml.Marshal(cfg.Cscli.SimulationConfig) if err != nil { return fmt.Errorf("unable to marshal simulation configuration: %s", err) } - err = os.WriteFile(csConfig.ConfigPaths.SimulationFilePath, newConfigSim, 0o644) + err = os.WriteFile(cfg.ConfigPaths.SimulationFilePath, newConfigSim, 0o644) if err != nil { - return fmt.Errorf("write simulation config in '%s' failed: %s", csConfig.ConfigPaths.SimulationFilePath, err) + return fmt.Errorf("write simulation config in '%s' failed: %s", cfg.ConfigPaths.SimulationFilePath, err) } - log.Debugf("updated simulation file %s", csConfig.ConfigPaths.SimulationFilePath) + log.Debugf("updated simulation file %s", cfg.ConfigPaths.SimulationFilePath) return nil } -func disableGlobalSimulation() error { - csConfig.Cscli.SimulationConfig.Simulation = new(bool) - *csConfig.Cscli.SimulationConfig.Simulation = false +func (cli *cliSimulation) disableGlobalSimulation() error { + cfg := cli.cfg() + cfg.Cscli.SimulationConfig.Simulation = new(bool) + *cfg.Cscli.SimulationConfig.Simulation = false - csConfig.Cscli.SimulationConfig.Exclusions = []string{} - newConfigSim, err := yaml.Marshal(csConfig.Cscli.SimulationConfig) + cfg.Cscli.SimulationConfig.Exclusions = []string{} + newConfigSim, err := yaml.Marshal(cfg.Cscli.SimulationConfig) if err != nil { return fmt.Errorf("unable to marshal new simulation configuration: %s", err) } - err = os.WriteFile(csConfig.ConfigPaths.SimulationFilePath, newConfigSim, 0o644) + err = os.WriteFile(cfg.ConfigPaths.SimulationFilePath, newConfigSim, 0o644) if err != nil { - return fmt.Errorf("unable to write new simulation config in '%s' : %s", csConfig.ConfigPaths.SimulationFilePath, err) + return fmt.Errorf("unable to write new simulation config in '%s' : %s", cfg.ConfigPaths.SimulationFilePath, err) } log.Printf("global simulation: disabled") return nil } -func simulationStatus() error { - if csConfig.Cscli.SimulationConfig == nil { +func (cli *cliSimulation) status() { + cfg := cli.cfg() + if cfg.Cscli.SimulationConfig == nil { log.Printf("global simulation: disabled (configuration file is missing)") - return nil + return } - if *csConfig.Cscli.SimulationConfig.Simulation { + if *cfg.Cscli.SimulationConfig.Simulation { log.Println("global simulation: enabled") - if len(csConfig.Cscli.SimulationConfig.Exclusions) > 0 { + if len(cfg.Cscli.SimulationConfig.Exclusions) > 0 { log.Println("Scenarios not in simulation mode :") - for _, scenario := range csConfig.Cscli.SimulationConfig.Exclusions { + for _, scenario := range cfg.Cscli.SimulationConfig.Exclusions { log.Printf(" - %s", scenario) } } } else { log.Println("global simulation: disabled") - if len(csConfig.Cscli.SimulationConfig.Exclusions) > 0 { + if len(cfg.Cscli.SimulationConfig.Exclusions) > 0 { log.Println("Scenarios in simulation mode :") - for _, scenario := range csConfig.Cscli.SimulationConfig.Exclusions { + for _, scenario := range cfg.Cscli.SimulationConfig.Exclusions { log.Printf(" - %s", scenario) } } } - return nil } From 45c669fb654b289ccd9125c852451551ce07c547 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Thu, 1 Feb 2024 17:27:00 +0100 Subject: [PATCH 006/581] refact "cscli papi" (#2802) --- cmd/crowdsec-cli/main.go | 6 +--- cmd/crowdsec-cli/papi.go | 67 +++++++++++++++++++++++----------------- 2 files changed, 40 insertions(+), 33 deletions(-) diff --git a/cmd/crowdsec-cli/main.go b/cmd/crowdsec-cli/main.go index 98d10a6bfb6..a642098768b 100644 --- a/cmd/crowdsec-cli/main.go +++ b/cmd/crowdsec-cli/main.go @@ -210,7 +210,7 @@ It is meant to allow you to manage bans, parsers/scenarios/etc, api and generall cmd.AddCommand(NewCLIHubTest().NewCommand()) cmd.AddCommand(NewCLINotifications().NewCommand()) cmd.AddCommand(NewCLISupport().NewCommand()) - cmd.AddCommand(NewCLIPapi().NewCommand()) + cmd.AddCommand(NewCLIPapi(getconfig).NewCommand()) cmd.AddCommand(NewCLICollection().NewCommand()) cmd.AddCommand(NewCLIParser().NewCommand()) cmd.AddCommand(NewCLIScenario().NewCommand()) @@ -223,10 +223,6 @@ It is meant to allow you to manage bans, parsers/scenarios/etc, api and generall cmd.AddCommand(NewSetupCmd()) } - if fflag.PapiClient.IsEnabled() { - cmd.AddCommand(NewCLIPapi().NewCommand()) - } - if err := cmd.Execute(); err != nil { log.Fatal(err) } diff --git a/cmd/crowdsec-cli/papi.go b/cmd/crowdsec-cli/papi.go index 606d8b415a0..43dcc30e0db 100644 --- a/cmd/crowdsec-cli/papi.go +++ b/cmd/crowdsec-cli/papi.go @@ -1,6 +1,7 @@ package main import ( + "fmt" "time" log "github.com/sirupsen/logrus" @@ -15,26 +16,31 @@ import ( "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/require" ) -type cliPapi struct {} +type cliPapi struct { + cfg configGetter +} -func NewCLIPapi() *cliPapi { - return &cliPapi{} +func NewCLIPapi(getconfig configGetter) *cliPapi { + return &cliPapi{ + cfg: getconfig, + } } -func (cli cliPapi) NewCommand() *cobra.Command { - var cmd = &cobra.Command{ +func (cli *cliPapi) NewCommand() *cobra.Command { + cmd := &cobra.Command{ Use: "papi [action]", Short: "Manage interaction with Polling API (PAPI)", Args: cobra.MinimumNArgs(1), DisableAutoGenTag: true, PersistentPreRunE: func(cmd *cobra.Command, args []string) error { - if err := require.LAPI(csConfig); err != nil { + cfg := cli.cfg() + if err := require.LAPI(cfg); err != nil { return err } - if err := require.CAPI(csConfig); err != nil { + if err := require.CAPI(cfg); err != nil { return err } - if err := require.PAPI(csConfig); err != nil { + if err := require.PAPI(cfg); err != nil { return err } return nil @@ -47,35 +53,36 @@ func (cli cliPapi) NewCommand() *cobra.Command { return cmd } -func (cli cliPapi) NewStatusCmd() *cobra.Command { +func (cli *cliPapi) NewStatusCmd() *cobra.Command { cmd := &cobra.Command{ Use: "status", Short: "Get status of the Polling API", Args: cobra.MinimumNArgs(0), DisableAutoGenTag: true, - Run: func(cmd *cobra.Command, args []string) { + RunE: func(cmd *cobra.Command, args []string) error { var err error - dbClient, err = database.NewClient(csConfig.DbConfig) + cfg := cli.cfg() + dbClient, err = database.NewClient(cfg.DbConfig) if err != nil { - log.Fatalf("unable to initialize database client : %s", err) + return fmt.Errorf("unable to initialize database client: %s", err) } - apic, err := apiserver.NewAPIC(csConfig.API.Server.OnlineClient, dbClient, csConfig.API.Server.ConsoleConfig, csConfig.API.Server.CapiWhitelists) + apic, err := apiserver.NewAPIC(cfg.API.Server.OnlineClient, dbClient, cfg.API.Server.ConsoleConfig, cfg.API.Server.CapiWhitelists) if err != nil { - log.Fatalf("unable to initialize API client : %s", err) + return fmt.Errorf("unable to initialize API client: %s", err) } - papi, err := apiserver.NewPAPI(apic, dbClient, csConfig.API.Server.ConsoleConfig, log.GetLevel()) + papi, err := apiserver.NewPAPI(apic, dbClient, cfg.API.Server.ConsoleConfig, log.GetLevel()) if err != nil { - log.Fatalf("unable to initialize PAPI client : %s", err) + return fmt.Errorf("unable to initialize PAPI client: %s", err) } perms, err := papi.GetPermissions() if err != nil { - log.Fatalf("unable to get PAPI permissions: %s", err) + return fmt.Errorf("unable to get PAPI permissions: %s", err) } var lastTimestampStr *string lastTimestampStr, err = dbClient.GetConfigItem(apiserver.PapiPullKey) @@ -90,45 +97,48 @@ func (cli cliPapi) NewStatusCmd() *cobra.Command { for _, sub := range perms.Categories { log.Infof(" - %s", sub) } + + return nil }, } return cmd } -func (cli cliPapi) NewSyncCmd() *cobra.Command { +func (cli *cliPapi) NewSyncCmd() *cobra.Command { cmd := &cobra.Command{ Use: "sync", Short: "Sync with the Polling API, pulling all non-expired orders for the instance", Args: cobra.MinimumNArgs(0), DisableAutoGenTag: true, - Run: func(cmd *cobra.Command, args []string) { + RunE: func(cmd *cobra.Command, args []string) error { var err error + cfg := cli.cfg() t := tomb.Tomb{} - dbClient, err = database.NewClient(csConfig.DbConfig) + + dbClient, err = database.NewClient(cfg.DbConfig) if err != nil { - log.Fatalf("unable to initialize database client : %s", err) + return fmt.Errorf("unable to initialize database client: %s", err) } - apic, err := apiserver.NewAPIC(csConfig.API.Server.OnlineClient, dbClient, csConfig.API.Server.ConsoleConfig, csConfig.API.Server.CapiWhitelists) - + apic, err := apiserver.NewAPIC(cfg.API.Server.OnlineClient, dbClient, cfg.API.Server.ConsoleConfig, cfg.API.Server.CapiWhitelists) if err != nil { - log.Fatalf("unable to initialize API client : %s", err) + return fmt.Errorf("unable to initialize API client: %s", err) } t.Go(apic.Push) - papi, err := apiserver.NewPAPI(apic, dbClient, csConfig.API.Server.ConsoleConfig, log.GetLevel()) - + papi, err := apiserver.NewPAPI(apic, dbClient, cfg.API.Server.ConsoleConfig, log.GetLevel()) if err != nil { - log.Fatalf("unable to initialize PAPI client : %s", err) + return fmt.Errorf("unable to initialize PAPI client: %s", err) } + t.Go(papi.SyncDecisions) err = papi.PullOnce(time.Time{}, true) if err != nil { - log.Fatalf("unable to sync decisions: %s", err) + return fmt.Errorf("unable to sync decisions: %s", err) } log.Infof("Sending acknowledgements to CAPI") @@ -138,6 +148,7 @@ func (cli cliPapi) NewSyncCmd() *cobra.Command { t.Wait() time.Sleep(5 * time.Second) //FIXME: the push done by apic.Push is run inside a sub goroutine, sleep to make sure it's done + return nil }, } From f5fbe4a200105af8b84a47467addc4253a7c0c1b Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Thu, 1 Feb 2024 17:27:15 +0100 Subject: [PATCH 007/581] refact "cscli dashboard" (#2803) --- cmd/crowdsec-cli/dashboard.go | 100 ++++++++++++---------- cmd/crowdsec-cli/dashboard_unsupported.go | 12 ++- cmd/crowdsec-cli/main.go | 2 +- 3 files changed, 64 insertions(+), 50 deletions(-) diff --git a/cmd/crowdsec-cli/dashboard.go b/cmd/crowdsec-cli/dashboard.go index a3701c4dbbb..f32b9f061f9 100644 --- a/cmd/crowdsec-cli/dashboard.go +++ b/cmd/crowdsec-cli/dashboard.go @@ -43,14 +43,17 @@ var ( // information needed to set up a random password on user's behalf ) -type cliDashboard struct{} +type cliDashboard struct{ + cfg configGetter +} -func NewCLIDashboard() *cliDashboard { - return &cliDashboard{} +func NewCLIDashboard(getconfig configGetter) *cliDashboard { + return &cliDashboard{ + cfg: getconfig, + } } -func (cli cliDashboard) NewCommand() *cobra.Command { - /* ---- UPDATE COMMAND */ +func (cli *cliDashboard) NewCommand() *cobra.Command { cmd := &cobra.Command{ Use: "dashboard [command]", Short: "Manage your metabase dashboard container [requires local API]", @@ -65,8 +68,9 @@ cscli dashboard start cscli dashboard stop cscli dashboard remove `, - PersistentPreRunE: func(cmd *cobra.Command, args []string) error { - if err := require.LAPI(csConfig); err != nil { + PersistentPreRunE: func(_ *cobra.Command, _ []string) error { + cfg := cli.cfg() + if err := require.LAPI(cfg); err != nil { return err } @@ -74,13 +78,13 @@ cscli dashboard remove return err } - metabaseConfigFolderPath := filepath.Join(csConfig.ConfigPaths.ConfigDir, metabaseConfigFolder) + metabaseConfigFolderPath := filepath.Join(cfg.ConfigPaths.ConfigDir, metabaseConfigFolder) metabaseConfigPath = filepath.Join(metabaseConfigFolderPath, metabaseConfigFile) if err := os.MkdirAll(metabaseConfigFolderPath, os.ModePerm); err != nil { return err } - if err := require.DB(csConfig); err != nil { + if err := require.DB(cfg); err != nil { return err } @@ -99,16 +103,16 @@ cscli dashboard remove }, } - cmd.AddCommand(cli.NewSetupCmd()) - cmd.AddCommand(cli.NewStartCmd()) - cmd.AddCommand(cli.NewStopCmd()) - cmd.AddCommand(cli.NewShowPasswordCmd()) - cmd.AddCommand(cli.NewRemoveCmd()) + cmd.AddCommand(cli.newSetupCmd()) + cmd.AddCommand(cli.newStartCmd()) + cmd.AddCommand(cli.newStopCmd()) + cmd.AddCommand(cli.newShowPasswordCmd()) + cmd.AddCommand(cli.newRemoveCmd()) return cmd } -func (cli cliDashboard) NewSetupCmd() *cobra.Command { +func (cli *cliDashboard) newSetupCmd() *cobra.Command { var force bool cmd := &cobra.Command{ @@ -122,9 +126,9 @@ cscli dashboard setup cscli dashboard setup --listen 0.0.0.0 cscli dashboard setup -l 0.0.0.0 -p 443 --password `, - RunE: func(cmd *cobra.Command, args []string) error { + RunE: func(_ *cobra.Command, _ []string) error { if metabaseDbPath == "" { - metabaseDbPath = csConfig.ConfigPaths.DataDir + metabaseDbPath = cli.cfg().ConfigPaths.DataDir } if metabasePassword == "" { @@ -145,10 +149,10 @@ cscli dashboard setup -l 0.0.0.0 -p 443 --password if err != nil { return err } - if err = chownDatabase(dockerGroup.Gid); err != nil { + if err = cli.chownDatabase(dockerGroup.Gid); err != nil { return err } - mb, err := metabase.SetupMetabase(csConfig.API.Server.DbConfig, metabaseListenAddress, metabaseListenPort, metabaseUser, metabasePassword, metabaseDbPath, dockerGroup.Gid, metabaseContainerID, metabaseImage) + mb, err := metabase.SetupMetabase(cli.cfg().API.Server.DbConfig, metabaseListenAddress, metabaseListenPort, metabaseUser, metabasePassword, metabaseDbPath, dockerGroup.Gid, metabaseContainerID, metabaseImage) if err != nil { return err } @@ -164,26 +168,28 @@ cscli dashboard setup -l 0.0.0.0 -p 443 --password return nil }, } - cmd.Flags().BoolVarP(&force, "force", "f", false, "Force setup : override existing files") - cmd.Flags().StringVarP(&metabaseDbPath, "dir", "d", "", "Shared directory with metabase container") - cmd.Flags().StringVarP(&metabaseListenAddress, "listen", "l", metabaseListenAddress, "Listen address of container") - cmd.Flags().StringVar(&metabaseImage, "metabase-image", metabaseImage, "Metabase image to use") - cmd.Flags().StringVarP(&metabaseListenPort, "port", "p", metabaseListenPort, "Listen port of container") - cmd.Flags().BoolVarP(&forceYes, "yes", "y", false, "force yes") - //cmd.Flags().StringVarP(&metabaseUser, "user", "u", "crowdsec@crowdsec.net", "metabase user") - cmd.Flags().StringVar(&metabasePassword, "password", "", "metabase password") + + flags := cmd.Flags() + flags.BoolVarP(&force, "force", "f", false, "Force setup : override existing files") + flags.StringVarP(&metabaseDbPath, "dir", "d", "", "Shared directory with metabase container") + flags.StringVarP(&metabaseListenAddress, "listen", "l", metabaseListenAddress, "Listen address of container") + flags.StringVar(&metabaseImage, "metabase-image", metabaseImage, "Metabase image to use") + flags.StringVarP(&metabaseListenPort, "port", "p", metabaseListenPort, "Listen port of container") + flags.BoolVarP(&forceYes, "yes", "y", false, "force yes") + //flags.StringVarP(&metabaseUser, "user", "u", "crowdsec@crowdsec.net", "metabase user") + flags.StringVar(&metabasePassword, "password", "", "metabase password") return cmd } -func (cli cliDashboard) NewStartCmd() *cobra.Command { +func (cli *cliDashboard) newStartCmd() *cobra.Command { cmd := &cobra.Command{ Use: "start", Short: "Start the metabase container.", Long: `Stats the metabase container using docker.`, Args: cobra.ExactArgs(0), DisableAutoGenTag: true, - RunE: func(cmd *cobra.Command, args []string) error { + RunE: func(_ *cobra.Command, _ []string) error { mb, err := metabase.NewMetabase(metabaseConfigPath, metabaseContainerID) if err != nil { return err @@ -200,19 +206,20 @@ func (cli cliDashboard) NewStartCmd() *cobra.Command { return nil }, } + cmd.Flags().BoolVarP(&forceYes, "yes", "y", false, "force yes") return cmd } -func (cli cliDashboard) NewStopCmd() *cobra.Command { +func (cli *cliDashboard) newStopCmd() *cobra.Command { cmd := &cobra.Command{ Use: "stop", Short: "Stops the metabase container.", Long: `Stops the metabase container using docker.`, Args: cobra.ExactArgs(0), DisableAutoGenTag: true, - RunE: func(cmd *cobra.Command, args []string) error { + RunE: func(_ *cobra.Command, _ []string) error { if err := metabase.StopContainer(metabaseContainerID); err != nil { return fmt.Errorf("unable to stop container '%s': %s", metabaseContainerID, err) } @@ -223,12 +230,12 @@ func (cli cliDashboard) NewStopCmd() *cobra.Command { return cmd } -func (cli cliDashboard) NewShowPasswordCmd() *cobra.Command { +func (cli *cliDashboard) newShowPasswordCmd() *cobra.Command { cmd := &cobra.Command{Use: "show-password", Short: "displays password of metabase.", Args: cobra.ExactArgs(0), DisableAutoGenTag: true, - RunE: func(cmd *cobra.Command, args []string) error { + RunE: func(_ *cobra.Command, _ []string) error { m := metabase.Metabase{} if err := m.LoadConfig(metabaseConfigPath); err != nil { return err @@ -241,7 +248,7 @@ func (cli cliDashboard) NewShowPasswordCmd() *cobra.Command { return cmd } -func (cli cliDashboard) NewRemoveCmd() *cobra.Command { +func (cli *cliDashboard) newRemoveCmd() *cobra.Command { var force bool cmd := &cobra.Command{ @@ -254,7 +261,7 @@ func (cli cliDashboard) NewRemoveCmd() *cobra.Command { cscli dashboard remove cscli dashboard remove --force `, - RunE: func(cmd *cobra.Command, args []string) error { + RunE: func(_ *cobra.Command, _ []string) error { if !forceYes { var answer bool prompt := &survey.Confirm{ @@ -291,8 +298,8 @@ cscli dashboard remove --force } log.Infof("container %s stopped & removed", metabaseContainerID) } - log.Debugf("Removing metabase db %s", csConfig.ConfigPaths.DataDir) - if err := metabase.RemoveDatabase(csConfig.ConfigPaths.DataDir); err != nil { + log.Debugf("Removing metabase db %s", cli.cfg().ConfigPaths.DataDir) + if err := metabase.RemoveDatabase(cli.cfg().ConfigPaths.DataDir); err != nil { log.Warnf("failed to remove metabase internal db : %s", err) } if force { @@ -309,8 +316,10 @@ cscli dashboard remove --force return nil }, } - cmd.Flags().BoolVarP(&force, "force", "f", false, "Remove also the metabase image") - cmd.Flags().BoolVarP(&forceYes, "yes", "y", false, "force yes") + + flags := cmd.Flags() + flags.BoolVarP(&force, "force", "f", false, "Remove also the metabase image") + flags.BoolVarP(&forceYes, "yes", "y", false, "force yes") return cmd } @@ -431,22 +440,23 @@ func checkGroups(forceYes *bool) (*user.Group, error) { return user.LookupGroup(crowdsecGroup) } -func chownDatabase(gid string) error { +func (cli *cliDashboard) chownDatabase(gid string) error { + cfg := cli.cfg() intID, err := strconv.Atoi(gid) if err != nil { return fmt.Errorf("unable to convert group ID to int: %s", err) } - if stat, err := os.Stat(csConfig.DbConfig.DbPath); !os.IsNotExist(err) { + if stat, err := os.Stat(cfg.DbConfig.DbPath); !os.IsNotExist(err) { info := stat.Sys() - if err := os.Chown(csConfig.DbConfig.DbPath, int(info.(*syscall.Stat_t).Uid), intID); err != nil { - return fmt.Errorf("unable to chown sqlite db file '%s': %s", csConfig.DbConfig.DbPath, err) + if err := os.Chown(cfg.DbConfig.DbPath, int(info.(*syscall.Stat_t).Uid), intID); err != nil { + return fmt.Errorf("unable to chown sqlite db file '%s': %s", cfg.DbConfig.DbPath, err) } } - if csConfig.DbConfig.Type == "sqlite" && csConfig.DbConfig.UseWal != nil && *csConfig.DbConfig.UseWal { + if cfg.DbConfig.Type == "sqlite" && cfg.DbConfig.UseWal != nil && *cfg.DbConfig.UseWal { for _, ext := range []string{"-wal", "-shm"} { - file := csConfig.DbConfig.DbPath + ext + file := cfg.DbConfig.DbPath + ext if stat, err := os.Stat(file); !os.IsNotExist(err) { info := stat.Sys() if err := os.Chown(file, int(info.(*syscall.Stat_t).Uid), intID); err != nil { diff --git a/cmd/crowdsec-cli/dashboard_unsupported.go b/cmd/crowdsec-cli/dashboard_unsupported.go index 072ff525b19..4cf8e18b503 100644 --- a/cmd/crowdsec-cli/dashboard_unsupported.go +++ b/cmd/crowdsec-cli/dashboard_unsupported.go @@ -9,17 +9,21 @@ import ( "github.com/spf13/cobra" ) -type cliDashboard struct{} +type cliDashboard struct{ + cfg configGetter +} -func NewCLIDashboard() *cliDashboard { - return &cliDashboard{} +func NewCLIDashboard(getconfig configGetter) *cliDashboard { + return &cliDashboard{ + cfg: getconfig, + } } func (cli cliDashboard) NewCommand() *cobra.Command { cmd := &cobra.Command{ Use: "dashboard", DisableAutoGenTag: true, - Run: func(cmd *cobra.Command, args []string) { + Run: func(_ *cobra.Command, _ []string) { log.Infof("Dashboard command is disabled on %s", runtime.GOOS) }, } diff --git a/cmd/crowdsec-cli/main.go b/cmd/crowdsec-cli/main.go index a642098768b..055c38e2788 100644 --- a/cmd/crowdsec-cli/main.go +++ b/cmd/crowdsec-cli/main.go @@ -196,7 +196,7 @@ It is meant to allow you to manage bans, parsers/scenarios/etc, api and generall cmd.AddCommand(NewConfigCmd()) cmd.AddCommand(NewCLIHub(getconfig).NewCommand()) cmd.AddCommand(NewMetricsCmd()) - cmd.AddCommand(NewCLIDashboard().NewCommand()) + cmd.AddCommand(NewCLIDashboard(getconfig).NewCommand()) cmd.AddCommand(NewCLIDecisions().NewCommand()) cmd.AddCommand(NewCLIAlerts().NewCommand()) cmd.AddCommand(NewCLISimulation(getconfig).NewCommand()) From 4160bb8102a07f7ea4d96098821f42951280dd22 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Thu, 1 Feb 2024 22:36:21 +0100 Subject: [PATCH 008/581] refact "cscli decisions" (#2804) * refact "cscli decisions" * CI: relax mysql test timing * lint --- cmd/crowdsec-cli/dashboard.go | 3 +- cmd/crowdsec-cli/decisions.go | 57 ++++++++++++++++------------ cmd/crowdsec-cli/decisions_import.go | 4 +- cmd/crowdsec-cli/decisions_table.go | 6 ++- cmd/crowdsec-cli/flag.go | 1 + cmd/crowdsec-cli/machines.go | 16 ++++++-- cmd/crowdsec-cli/main.go | 4 +- cmd/crowdsec-cli/papi.go | 6 +-- cmd/crowdsec-cli/simulation.go | 11 +++++- cmd/crowdsec-cli/support.go | 30 ++++++++++++--- cmd/crowdsec-cli/utils.go | 4 +- pkg/csconfig/api.go | 3 +- pkg/csconfig/config.go | 4 +- test/bats/97_ipv6_single.bats | 2 +- 14 files changed, 103 insertions(+), 48 deletions(-) diff --git a/cmd/crowdsec-cli/dashboard.go b/cmd/crowdsec-cli/dashboard.go index f32b9f061f9..64cb7577e89 100644 --- a/cmd/crowdsec-cli/dashboard.go +++ b/cmd/crowdsec-cli/dashboard.go @@ -176,7 +176,7 @@ cscli dashboard setup -l 0.0.0.0 -p 443 --password flags.StringVar(&metabaseImage, "metabase-image", metabaseImage, "Metabase image to use") flags.StringVarP(&metabaseListenPort, "port", "p", metabaseListenPort, "Listen port of container") flags.BoolVarP(&forceYes, "yes", "y", false, "force yes") - //flags.StringVarP(&metabaseUser, "user", "u", "crowdsec@crowdsec.net", "metabase user") + // flags.StringVarP(&metabaseUser, "user", "u", "crowdsec@crowdsec.net", "metabase user") flags.StringVar(&metabasePassword, "password", "", "metabase password") return cmd @@ -443,6 +443,7 @@ func checkGroups(forceYes *bool) (*user.Group, error) { func (cli *cliDashboard) chownDatabase(gid string) error { cfg := cli.cfg() intID, err := strconv.Atoi(gid) + if err != nil { return fmt.Errorf("unable to convert group ID to int: %s", err) } diff --git a/cmd/crowdsec-cli/decisions.go b/cmd/crowdsec-cli/decisions.go index 683f100d4f7..c5839ae0079 100644 --- a/cmd/crowdsec-cli/decisions.go +++ b/cmd/crowdsec-cli/decisions.go @@ -25,7 +25,7 @@ import ( var Client *apiclient.ApiClient -func DecisionsToTable(alerts *models.GetAlertsResponse, printMachine bool) error { +func (cli *cliDecisions) decisionsToTable(alerts *models.GetAlertsResponse, printMachine bool) error { /*here we cheat a bit : to make it more readable for the user, we dedup some entries*/ spamLimit := make(map[string]bool) skipped := 0 @@ -49,7 +49,8 @@ func DecisionsToTable(alerts *models.GetAlertsResponse, printMachine bool) error alertItem.Decisions = newDecisions } - if csConfig.Cscli.Output == "raw" { + switch cli.cfg().Cscli.Output { + case "raw": csvwriter := csv.NewWriter(os.Stdout) header := []string{"id", "source", "ip", "reason", "action", "country", "as", "events_count", "expiration", "simulated", "alert_id"} @@ -89,21 +90,24 @@ func DecisionsToTable(alerts *models.GetAlertsResponse, printMachine bool) error } csvwriter.Flush() - } else if csConfig.Cscli.Output == "json" { + case "json": if *alerts == nil { // avoid returning "null" in `json" // could be cleaner if we used slice of alerts directly fmt.Println("[]") return nil } + x, _ := json.MarshalIndent(alerts, "", " ") fmt.Printf("%s", string(x)) - } else if csConfig.Cscli.Output == "human" { + case "human": if len(*alerts) == 0 { fmt.Println("No active decisions") return nil } - decisionsTable(color.Output, alerts, printMachine) + + cli.decisionsTable(color.Output, alerts, printMachine) + if skipped > 0 { fmt.Printf("%d duplicated entries skipped\n", skipped) } @@ -113,13 +117,17 @@ func DecisionsToTable(alerts *models.GetAlertsResponse, printMachine bool) error } -type cliDecisions struct {} +type cliDecisions struct { + cfg configGetter +} -func NewCLIDecisions() *cliDecisions { - return &cliDecisions{} +func NewCLIDecisions(getconfig configGetter) *cliDecisions { + return &cliDecisions{ + cfg: getconfig, + } } -func (cli cliDecisions) NewCommand() *cobra.Command { +func (cli *cliDecisions) NewCommand() *cobra.Command { cmd := &cobra.Command{ Use: "decisions [action]", Short: "Manage decisions", @@ -130,16 +138,17 @@ func (cli cliDecisions) NewCommand() *cobra.Command { Args: cobra.MinimumNArgs(1), DisableAutoGenTag: true, PersistentPreRunE: func(_ *cobra.Command, _ []string) error { - if err := csConfig.LoadAPIClient(); err != nil { + cfg := cli.cfg() + if err := cfg.LoadAPIClient(); err != nil { return fmt.Errorf("loading api client: %w", err) } - password := strfmt.Password(csConfig.API.Client.Credentials.Password) - apiurl, err := url.Parse(csConfig.API.Client.Credentials.URL) + password := strfmt.Password(cfg.API.Client.Credentials.Password) + apiurl, err := url.Parse(cfg.API.Client.Credentials.URL) if err != nil { - return fmt.Errorf("parsing api url %s: %w", csConfig.API.Client.Credentials.URL, err) + return fmt.Errorf("parsing api url %s: %w", cfg.API.Client.Credentials.URL, err) } Client, err = apiclient.NewClient(&apiclient.Config{ - MachineID: csConfig.API.Client.Credentials.Login, + MachineID: cfg.API.Client.Credentials.Login, Password: password, UserAgent: fmt.Sprintf("crowdsec/%s", version.String()), URL: apiurl, @@ -152,15 +161,15 @@ func (cli cliDecisions) NewCommand() *cobra.Command { }, } - cmd.AddCommand(cli.NewListCmd()) - cmd.AddCommand(cli.NewAddCmd()) - cmd.AddCommand(cli.NewDeleteCmd()) - cmd.AddCommand(cli.NewImportCmd()) + cmd.AddCommand(cli.newListCmd()) + cmd.AddCommand(cli.newAddCmd()) + cmd.AddCommand(cli.newDeleteCmd()) + cmd.AddCommand(cli.newImportCmd()) return cmd } -func (cli cliDecisions) NewListCmd() *cobra.Command { +func (cli *cliDecisions) newListCmd() *cobra.Command { var filter = apiclient.AlertsListOpts{ ValueEquals: new(string), ScopeEquals: new(string), @@ -262,7 +271,7 @@ cscli decisions list -t ban return fmt.Errorf("unable to retrieve decisions: %w", err) } - err = DecisionsToTable(alerts, printMachine) + err = cli.decisionsToTable(alerts, printMachine) if err != nil { return fmt.Errorf("unable to print decisions: %w", err) } @@ -289,7 +298,7 @@ cscli decisions list -t ban return cmd } -func (cli cliDecisions) NewAddCmd() *cobra.Command { +func (cli *cliDecisions) newAddCmd() *cobra.Command { var ( addIP string addRange string @@ -325,7 +334,7 @@ cscli decisions add --scope username --value foobar createdAt := time.Now().UTC().Format(time.RFC3339) /*take care of shorthand options*/ - if err := manageCliDecisionAlerts(&addIP, &addRange, &addScope, &addValue); err != nil { + if err = manageCliDecisionAlerts(&addIP, &addRange, &addScope, &addValue); err != nil { return err } @@ -341,7 +350,7 @@ cscli decisions add --scope username --value foobar } if addReason == "" { - addReason = fmt.Sprintf("manual '%s' from '%s'", addType, csConfig.API.Client.Credentials.Login) + addReason = fmt.Sprintf("manual '%s' from '%s'", addType, cli.cfg().API.Client.Credentials.Login) } decision := models.Decision{ Duration: &addDuration, @@ -400,7 +409,7 @@ cscli decisions add --scope username --value foobar return cmd } -func (cli cliDecisions) NewDeleteCmd() *cobra.Command { +func (cli *cliDecisions) newDeleteCmd() *cobra.Command { var delFilter = apiclient.DecisionsDeleteOpts{ ScopeEquals: new(string), ValueEquals: new(string), diff --git a/cmd/crowdsec-cli/decisions_import.go b/cmd/crowdsec-cli/decisions_import.go index 2d7ee485bd1..45d1841a603 100644 --- a/cmd/crowdsec-cli/decisions_import.go +++ b/cmd/crowdsec-cli/decisions_import.go @@ -67,7 +67,7 @@ func parseDecisionList(content []byte, format string) ([]decisionRaw, error) { } -func (cli cliDecisions) runImport(cmd *cobra.Command, args []string) error { +func (cli *cliDecisions) runImport(cmd *cobra.Command, args []string) error { flags := cmd.Flags() input, err := flags.GetString("input") @@ -236,7 +236,7 @@ func (cli cliDecisions) runImport(cmd *cobra.Command, args []string) error { } -func (cli cliDecisions) NewImportCmd() *cobra.Command { +func (cli *cliDecisions) newImportCmd() *cobra.Command { cmd := &cobra.Command{ Use: "import [options]", Short: "Import decisions from a file or pipe", diff --git a/cmd/crowdsec-cli/decisions_table.go b/cmd/crowdsec-cli/decisions_table.go index d8d5e032594..10021e4dd4b 100644 --- a/cmd/crowdsec-cli/decisions_table.go +++ b/cmd/crowdsec-cli/decisions_table.go @@ -8,13 +8,15 @@ import ( "github.com/crowdsecurity/crowdsec/pkg/models" ) -func decisionsTable(out io.Writer, alerts *models.GetAlertsResponse, printMachine bool) { +func (cli *cliDecisions) decisionsTable(out io.Writer, alerts *models.GetAlertsResponse, printMachine bool) { t := newTable(out) t.SetRowLines(false) + header := []string{"ID", "Source", "Scope:Value", "Reason", "Action", "Country", "AS", "Events", "expiration", "Alert ID"} if printMachine { header = append(header, "Machine") } + t.SetHeaders(header...) for _, alertItem := range *alerts { @@ -22,6 +24,7 @@ func decisionsTable(out io.Writer, alerts *models.GetAlertsResponse, printMachin if *alertItem.Simulated { *decisionItem.Type = fmt.Sprintf("(simul)%s", *decisionItem.Type) } + row := []string{ strconv.Itoa(int(decisionItem.ID)), *decisionItem.Origin, @@ -42,5 +45,6 @@ func decisionsTable(out io.Writer, alerts *models.GetAlertsResponse, printMachin t.AddRow(row...) } } + t.Render() } diff --git a/cmd/crowdsec-cli/flag.go b/cmd/crowdsec-cli/flag.go index 402302a1f64..e85f33d4467 100644 --- a/cmd/crowdsec-cli/flag.go +++ b/cmd/crowdsec-cli/flag.go @@ -18,6 +18,7 @@ func (p *MachinePassword) Set(v string) error { if len(v) > 72 { return errors.New("password too long (max 72 characters)") } + *p = MachinePassword(v) return nil diff --git a/cmd/crowdsec-cli/machines.go b/cmd/crowdsec-cli/machines.go index 0cabccf76f5..1819bdcf5fb 100644 --- a/cmd/crowdsec-cli/machines.go +++ b/cmd/crowdsec-cli/machines.go @@ -45,6 +45,7 @@ func generatePassword(length int) string { if err != nil { log.Fatalf("failed getting data from prng for password generation : %s", err) } + buf[i] = charset[rInt.Int64()] } @@ -59,12 +60,14 @@ func generateIDPrefix() (string, error) { if err == nil { return prefix, nil } + log.Debugf("failed to get machine-id with usual files: %s", err) bID, err := uuid.NewRandom() if err == nil { return bID.String(), nil } + return "", fmt.Errorf("generating machine id: %w", err) } @@ -75,11 +78,14 @@ func generateID(prefix string) (string, error) { if prefix == "" { prefix, err = generateIDPrefix() } + if err != nil { return "", err } + prefix = strings.ReplaceAll(prefix, "-", "")[:32] suffix := generatePassword(16) + return prefix + suffix, nil } @@ -289,6 +295,7 @@ func (cli *cliMachines) add(args []string, machinePassword string, dumpFile stri if !autoAdd { return fmt.Errorf("please specify a password with --password or use --auto") } + machinePassword = generatePassword(passwordLength) } else if machinePassword == "" && interactive { qs := &survey.Password{ @@ -328,10 +335,10 @@ func (cli *cliMachines) add(args []string, machinePassword string, dumpFile stri } if dumpFile != "" && dumpFile != "-" { - err = os.WriteFile(dumpFile, apiConfigDump, 0o600) - if err != nil { + if err = os.WriteFile(dumpFile, apiConfigDump, 0o600); err != nil { return fmt.Errorf("write api credentials in '%s' failed: %s", dumpFile, err) } + fmt.Fprintf(os.Stderr, "API credentials written to '%s'.\n", dumpFile) } else { fmt.Print(string(apiConfigDump)) @@ -359,11 +366,11 @@ func (cli *cliMachines) deleteValid(cmd *cobra.Command, args []string, toComplet func (cli *cliMachines) delete(machines []string) error { for _, machineID := range machines { - err := cli.db.DeleteWatcher(machineID) - if err != nil { + if err := cli.db.DeleteWatcher(machineID); err != nil { log.Errorf("unable to delete machine '%s': %s", machineID, err) return nil } + log.Infof("machine '%s' deleted successfully", machineID) } @@ -473,6 +480,7 @@ func (cli *cliMachines) validate(machineID string) error { if err := cli.db.ValidateMachine(machineID); err != nil { return fmt.Errorf("unable to validate machine '%s': %s", machineID, err) } + log.Infof("machine '%s' validated successfully", machineID) return nil diff --git a/cmd/crowdsec-cli/main.go b/cmd/crowdsec-cli/main.go index 055c38e2788..1a7c84e3c78 100644 --- a/cmd/crowdsec-cli/main.go +++ b/cmd/crowdsec-cli/main.go @@ -157,8 +157,8 @@ It is meant to allow you to manage bans, parsers/scenarios/etc, api and generall cmd.PersistentFlags().BoolVar(&wrn_lvl, "warning", false, "Set logging to warning") cmd.PersistentFlags().BoolVar(&err_lvl, "error", false, "Set logging to error") cmd.PersistentFlags().BoolVar(&trace_lvl, "trace", false, "Set logging to trace") - cmd.PersistentFlags().StringVar(&flagBranch, "branch", "", "Override hub branch on github") + if err := cmd.PersistentFlags().MarkHidden("branch"); err != nil { log.Fatalf("failed to hide flag: %s", err) } @@ -197,7 +197,7 @@ It is meant to allow you to manage bans, parsers/scenarios/etc, api and generall cmd.AddCommand(NewCLIHub(getconfig).NewCommand()) cmd.AddCommand(NewMetricsCmd()) cmd.AddCommand(NewCLIDashboard(getconfig).NewCommand()) - cmd.AddCommand(NewCLIDecisions().NewCommand()) + cmd.AddCommand(NewCLIDecisions(getconfig).NewCommand()) cmd.AddCommand(NewCLIAlerts().NewCommand()) cmd.AddCommand(NewCLISimulation(getconfig).NewCommand()) cmd.AddCommand(NewCLIBouncers(getconfig).NewCommand()) diff --git a/cmd/crowdsec-cli/papi.go b/cmd/crowdsec-cli/papi.go index 43dcc30e0db..04223ef93ab 100644 --- a/cmd/crowdsec-cli/papi.go +++ b/cmd/crowdsec-cli/papi.go @@ -32,7 +32,7 @@ func (cli *cliPapi) NewCommand() *cobra.Command { Short: "Manage interaction with Polling API (PAPI)", Args: cobra.MinimumNArgs(1), DisableAutoGenTag: true, - PersistentPreRunE: func(cmd *cobra.Command, args []string) error { + PersistentPreRunE: func(_ *cobra.Command, _ []string) error { cfg := cli.cfg() if err := require.LAPI(cfg); err != nil { return err @@ -59,7 +59,7 @@ func (cli *cliPapi) NewStatusCmd() *cobra.Command { Short: "Get status of the Polling API", Args: cobra.MinimumNArgs(0), DisableAutoGenTag: true, - RunE: func(cmd *cobra.Command, args []string) error { + RunE: func(_ *cobra.Command, _ []string) error { var err error cfg := cli.cfg() dbClient, err = database.NewClient(cfg.DbConfig) @@ -111,7 +111,7 @@ func (cli *cliPapi) NewSyncCmd() *cobra.Command { Short: "Sync with the Polling API, pulling all non-expired orders for the instance", Args: cobra.MinimumNArgs(0), DisableAutoGenTag: true, - RunE: func(cmd *cobra.Command, args []string) error { + RunE: func(_ *cobra.Command, _ []string) error { var err error cfg := cli.cfg() t := tomb.Tomb{} diff --git a/cmd/crowdsec-cli/simulation.go b/cmd/crowdsec-cli/simulation.go index 19eb1b99d01..a6e710c5747 100644 --- a/cmd/crowdsec-cli/simulation.go +++ b/cmd/crowdsec-cli/simulation.go @@ -211,14 +211,17 @@ func (cli *cliSimulation) enableGlobalSimulation() error { func (cli *cliSimulation) dumpSimulationFile() error { cfg := cli.cfg() + newConfigSim, err := yaml.Marshal(cfg.Cscli.SimulationConfig) if err != nil { return fmt.Errorf("unable to marshal simulation configuration: %s", err) } + err = os.WriteFile(cfg.ConfigPaths.SimulationFilePath, newConfigSim, 0o644) if err != nil { return fmt.Errorf("write simulation config in '%s' failed: %s", cfg.ConfigPaths.SimulationFilePath, err) } + log.Debugf("updated simulation file %s", cfg.ConfigPaths.SimulationFilePath) return nil @@ -230,16 +233,19 @@ func (cli *cliSimulation) disableGlobalSimulation() error { *cfg.Cscli.SimulationConfig.Simulation = false cfg.Cscli.SimulationConfig.Exclusions = []string{} + newConfigSim, err := yaml.Marshal(cfg.Cscli.SimulationConfig) if err != nil { return fmt.Errorf("unable to marshal new simulation configuration: %s", err) } + err = os.WriteFile(cfg.ConfigPaths.SimulationFilePath, newConfigSim, 0o644) if err != nil { - return fmt.Errorf("unable to write new simulation config in '%s' : %s", cfg.ConfigPaths.SimulationFilePath, err) + return fmt.Errorf("unable to write new simulation config in '%s': %s", cfg.ConfigPaths.SimulationFilePath, err) } log.Printf("global simulation: disabled") + return nil } @@ -249,10 +255,13 @@ func (cli *cliSimulation) status() { log.Printf("global simulation: disabled (configuration file is missing)") return } + if *cfg.Cscli.SimulationConfig.Simulation { log.Println("global simulation: enabled") + if len(cfg.Cscli.SimulationConfig.Exclusions) > 0 { log.Println("Scenarios not in simulation mode :") + for _, scenario := range cfg.Cscli.SimulationConfig.Exclusions { log.Printf(" - %s", scenario) } diff --git a/cmd/crowdsec-cli/support.go b/cmd/crowdsec-cli/support.go index ed7f7cf2ffd..6ddfa3056bc 100644 --- a/cmd/crowdsec-cli/support.go +++ b/cmd/crowdsec-cli/support.go @@ -76,9 +76,10 @@ func collectMetrics() ([]byte, []byte, error) { if err != nil { return nil, nil, fmt.Errorf("could not create requests to prometheus endpoint: %s", err) } + client := &http.Client{} - resp, err := client.Do(req) + resp, err := client.Do(req) if err != nil { return nil, nil, fmt.Errorf("could not get metrics from prometheus endpoint: %s", err) } @@ -100,17 +101,20 @@ func collectVersion() []byte { func collectFeatures() []byte { log.Info("Collecting feature flags") + enabledFeatures := fflag.Crowdsec.GetEnabledFeatures() w := bytes.NewBuffer(nil) for _, k := range enabledFeatures { fmt.Fprintf(w, "%s\n", k) } + return w.Bytes() } func collectOSInfo() ([]byte, error) { log.Info("Collecting OS info") + info, err := osinfo.GetOSInfo() if err != nil { @@ -133,6 +137,7 @@ func collectHubItems(hub *cwhub.Hub, itemType string) []byte { var err error out := bytes.NewBuffer(nil) + log.Infof("Collecting %s list", itemType) items := make(map[string][]*cwhub.Item) @@ -144,26 +149,33 @@ func collectHubItems(hub *cwhub.Hub, itemType string) []byte { if err := listItems(out, []string{itemType}, items, false); err != nil { log.Warnf("could not collect %s list: %s", itemType, err) } + return out.Bytes() } func collectBouncers(dbClient *database.Client) ([]byte, error) { out := bytes.NewBuffer(nil) + bouncers, err := dbClient.ListBouncers() if err != nil { return nil, fmt.Errorf("unable to list bouncers: %s", err) } + getBouncersTable(out, bouncers) + return out.Bytes(), nil } func collectAgents(dbClient *database.Client) ([]byte, error) { out := bytes.NewBuffer(nil) + machines, err := dbClient.ListMachines() if err != nil { return nil, fmt.Errorf("unable to list machines: %s", err) } + getAgentsTable(out, machines) + return out.Bytes(), nil } @@ -171,12 +183,14 @@ func collectAPIStatus(login string, password string, endpoint string, prefix str if csConfig.API.Client == nil || csConfig.API.Client.Credentials == nil { return []byte("No agent credentials found, are we LAPI ?") } + pwd := strfmt.Password(password) - apiurl, err := url.Parse(endpoint) + apiurl, err := url.Parse(endpoint) if err != nil { return []byte(fmt.Sprintf("cannot parse API URL: %s", err)) } + scenarios, err := hub.GetInstalledItemNames(cwhub.SCENARIOS) if err != nil { return []byte(fmt.Sprintf("could not collect scenarios: %s", err)) @@ -189,6 +203,7 @@ func collectAPIStatus(login string, password string, endpoint string, prefix str if err != nil { return []byte(fmt.Sprintf("could not init client: %s", err)) } + t := models.WatcherAuthRequest{ MachineID: &login, Password: &pwd, @@ -205,6 +220,7 @@ func collectAPIStatus(login string, password string, endpoint string, prefix str func collectCrowdsecConfig() []byte { log.Info("Collecting crowdsec config") + config, err := os.ReadFile(*csConfig.FilePath) if err != nil { return []byte(fmt.Sprintf("could not read config file: %s", err)) @@ -217,15 +233,18 @@ func collectCrowdsecConfig() []byte { func collectCrowdsecProfile() []byte { log.Info("Collecting crowdsec profile") + config, err := os.ReadFile(csConfig.API.Server.ProfilesPath) if err != nil { return []byte(fmt.Sprintf("could not read profile file: %s", err)) } + return config } func collectAcquisitionConfig() map[string][]byte { log.Info("Collecting acquisition config") + ret := make(map[string][]byte) for _, filename := range csConfig.Crowdsec.AcquisitionFiles { @@ -287,7 +306,7 @@ cscli support dump -f /tmp/crowdsec-support.zip `, Args: cobra.NoArgs, DisableAutoGenTag: true, - Run: func(cmd *cobra.Command, args []string) { + Run: func(_ *cobra.Command, _ []string) { var err error var skipHub, skipDB, skipCAPI, skipLAPI, skipAgent bool infos := map[string][]byte{ @@ -307,13 +326,13 @@ cscli support dump -f /tmp/crowdsec-support.zip infos[SUPPORT_AGENTS_PATH] = []byte(err.Error()) } - if err := csConfig.LoadAPIServer(true); err != nil { + if err = csConfig.LoadAPIServer(true); err != nil { log.Warnf("could not load LAPI, skipping CAPI check") skipLAPI = true infos[SUPPORT_CAPI_STATUS_PATH] = []byte(err.Error()) } - if err := csConfig.LoadCrowdsec(); err != nil { + if err = csConfig.LoadCrowdsec(); err != nil { log.Warnf("could not load agent config, skipping crowdsec config check") skipAgent = true } @@ -399,7 +418,6 @@ cscli support dump -f /tmp/crowdsec-support.zip } if !skipAgent { - acquis := collectAcquisitionConfig() for filename, content := range acquis { diff --git a/cmd/crowdsec-cli/utils.go b/cmd/crowdsec-cli/utils.go index b568c6eae3f..1ae8f9da2ed 100644 --- a/cmd/crowdsec-cli/utils.go +++ b/cmd/crowdsec-cli/utils.go @@ -25,6 +25,7 @@ func manageCliDecisionAlerts(ip *string, ipRange *string, scope *string, value * return fmt.Errorf("%s isn't a valid range", *ipRange) } } + if *ip != "" { ipRepr := net.ParseIP(*ip) if ipRepr == nil { @@ -32,7 +33,7 @@ func manageCliDecisionAlerts(ip *string, ipRange *string, scope *string, value * } } - //avoid confusion on scope (ip vs Ip and range vs Range) + // avoid confusion on scope (ip vs Ip and range vs Range) switch strings.ToLower(*scope) { case "ip": *scope = types.Ip @@ -43,6 +44,7 @@ func manageCliDecisionAlerts(ip *string, ipRange *string, scope *string, value * case "as": *scope = types.AS } + return nil } diff --git a/pkg/csconfig/api.go b/pkg/csconfig/api.go index cdff39e700f..de8ee4934a7 100644 --- a/pkg/csconfig/api.go +++ b/pkg/csconfig/api.go @@ -178,6 +178,7 @@ func (l *LocalApiClientCfg) Load() error { func (lapiCfg *LocalApiServerCfg) GetTrustedIPs() ([]net.IPNet, error) { trustedIPs := make([]net.IPNet, 0) + for _, ip := range lapiCfg.TrustedIPs { cidr := toValidCIDR(ip) @@ -265,7 +266,7 @@ func (c *Config) LoadAPIServer(inCli bool) error { return fmt.Errorf("no listen_uri specified") } - //inherit log level from common, then api->server + // inherit log level from common, then api->server var logLevel log.Level if c.API.Server.LogLevel != nil { logLevel = *c.API.Server.LogLevel diff --git a/pkg/csconfig/config.go b/pkg/csconfig/config.go index a704414952e..2dc7ecc7d53 100644 --- a/pkg/csconfig/config.go +++ b/pkg/csconfig/config.go @@ -25,7 +25,7 @@ var globalConfig = Config{} // Config contains top-level defaults -> overridden by configuration file -> overridden by CLI flags type Config struct { - //just a path to ourselves :p + // just a path to ourselves :p FilePath *string `yaml:"-"` Self []byte `yaml:"-"` Common *CommonCfg `yaml:"common,omitempty"` @@ -44,10 +44,12 @@ type Config struct { func NewConfig(configFile string, disableAgent bool, disableAPI bool, inCli bool) (*Config, string, error) { patcher := yamlpatch.NewPatcher(configFile, ".local") patcher.SetQuiet(inCli) + fcontent, err := patcher.MergedPatchContent() if err != nil { return nil, "", err } + configData := csstring.StrictExpand(string(fcontent), os.LookupEnv) cfg := Config{ FilePath: &configFile, diff --git a/test/bats/97_ipv6_single.bats b/test/bats/97_ipv6_single.bats index ffbfc125b24..982976d70ed 100644 --- a/test/bats/97_ipv6_single.bats +++ b/test/bats/97_ipv6_single.bats @@ -19,7 +19,7 @@ teardown_file() { setup() { load "../lib/setup.sh" - if is_db_mysql; then sleep 0.3; fi + if is_db_mysql; then sleep 0.5; fi } api() { From 5ff8a03195fda2466cb4a18cfd11256467fa064a Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Fri, 2 Feb 2024 09:45:03 +0100 Subject: [PATCH 009/581] refact "cscli metrics" par 1 (#2805) --- cmd/crowdsec-cli/main.go | 2 +- cmd/crowdsec-cli/metrics.go | 68 +++++++++++++++++-------------- cmd/crowdsec-cli/metrics_table.go | 22 +++++----- cmd/crowdsec-cli/support.go | 2 +- 4 files changed, 50 insertions(+), 44 deletions(-) diff --git a/cmd/crowdsec-cli/main.go b/cmd/crowdsec-cli/main.go index 1a7c84e3c78..db3a164af90 100644 --- a/cmd/crowdsec-cli/main.go +++ b/cmd/crowdsec-cli/main.go @@ -195,7 +195,7 @@ It is meant to allow you to manage bans, parsers/scenarios/etc, api and generall cmd.AddCommand(NewCLIVersion().NewCommand()) cmd.AddCommand(NewConfigCmd()) cmd.AddCommand(NewCLIHub(getconfig).NewCommand()) - cmd.AddCommand(NewMetricsCmd()) + cmd.AddCommand(NewCLIMetrics(getconfig).NewCommand()) cmd.AddCommand(NewCLIDashboard(getconfig).NewCommand()) cmd.AddCommand(NewCLIDecisions(getconfig).NewCommand()) cmd.AddCommand(NewCLIAlerts().NewCommand()) diff --git a/cmd/crowdsec-cli/metrics.go b/cmd/crowdsec-cli/metrics.go index 5b24dc84c91..902d4f0f445 100644 --- a/cmd/crowdsec-cli/metrics.go +++ b/cmd/crowdsec-cli/metrics.go @@ -19,8 +19,19 @@ import ( "github.com/crowdsecurity/go-cs-lib/trace" ) +type cliMetrics struct { + cfg configGetter +} + +func NewCLIMetrics(getconfig configGetter) *cliMetrics { + return &cliMetrics{ + cfg: getconfig, + } +} + + // FormatPrometheusMetrics is a complete rip from prom2json -func FormatPrometheusMetrics(out io.Writer, url string, formatType string) error { +func FormatPrometheusMetrics(out io.Writer, url string, formatType string, noUnit bool) error { mfChan := make(chan *dto.MetricFamily, 1024) errChan := make(chan error, 1) @@ -256,9 +267,9 @@ func FormatPrometheusMetrics(out io.Writer, url string, formatType string) error } if formatType == "human" { - acquisStatsTable(out, acquis_stats) - bucketStatsTable(out, buckets_stats) - parserStatsTable(out, parsers_stats) + acquisStatsTable(out, acquis_stats, noUnit) + bucketStatsTable(out, buckets_stats, noUnit) + parserStatsTable(out, parsers_stats, noUnit) lapiStatsTable(out, lapi_stats) lapiMachineStatsTable(out, lapi_machine_stats) lapiBouncerStatsTable(out, lapi_bouncer_stats) @@ -266,8 +277,8 @@ func FormatPrometheusMetrics(out io.Writer, url string, formatType string) error decisionStatsTable(out, decisions_stats) alertStatsTable(out, alerts_stats) stashStatsTable(out, stash_stats) - appsecMetricsToTable(out, appsec_engine_stats) - appsecRulesToTable(out, appsec_rule_stats) + appsecMetricsToTable(out, appsec_engine_stats, noUnit) + appsecRulesToTable(out, appsec_rule_stats, noUnit) return nil } @@ -304,52 +315,47 @@ func FormatPrometheusMetrics(out io.Writer, url string, formatType string) error return nil } -var noUnit bool - -func runMetrics(cmd *cobra.Command, args []string) error { - flags := cmd.Flags() - - url, err := flags.GetString("url") - if err != nil { - return err - } +func (cli *cliMetrics) run(url string, noUnit bool) error { + cfg := cli.cfg() if url != "" { - csConfig.Cscli.PrometheusUrl = url - } - - noUnit, err = flags.GetBool("no-unit") - if err != nil { - return err + cfg.Cscli.PrometheusUrl = url } - if csConfig.Prometheus == nil { + if cfg.Prometheus == nil { return fmt.Errorf("prometheus section missing, can't show metrics") } - if !csConfig.Prometheus.Enabled { + if !cfg.Prometheus.Enabled { return fmt.Errorf("prometheus is not enabled, can't show metrics") } - if err = FormatPrometheusMetrics(color.Output, csConfig.Cscli.PrometheusUrl, csConfig.Cscli.Output); err != nil { + if err := FormatPrometheusMetrics(color.Output, cfg.Cscli.PrometheusUrl, cfg.Cscli.Output, noUnit); err != nil { return err } return nil } -func NewMetricsCmd() *cobra.Command { - cmdMetrics := &cobra.Command{ +func (cli *cliMetrics) NewCommand() *cobra.Command { + var ( + url string + noUnit bool + ) + + cmd := &cobra.Command{ Use: "metrics", Short: "Display crowdsec prometheus metrics.", Long: `Fetch metrics from the prometheus server and display them in a human-friendly way`, Args: cobra.ExactArgs(0), DisableAutoGenTag: true, - RunE: runMetrics, + RunE: func(cmd *cobra.Command, args []string) error { + return cli.run(url, noUnit) + }, } - flags := cmdMetrics.PersistentFlags() - flags.StringP("url", "u", "", "Prometheus url (http://:/metrics)") - flags.Bool("no-unit", false, "Show the real number instead of formatted with units") + flags := cmd.Flags() + flags.StringVarP(&url, "url", "u", "", "Prometheus url (http://:/metrics)") + flags.BoolVar(&noUnit, "no-unit", false, "Show the real number instead of formatted with units") - return cmdMetrics + return cmd } diff --git a/cmd/crowdsec-cli/metrics_table.go b/cmd/crowdsec-cli/metrics_table.go index 80b9cb6e435..287333b5f3c 100644 --- a/cmd/crowdsec-cli/metrics_table.go +++ b/cmd/crowdsec-cli/metrics_table.go @@ -43,7 +43,7 @@ func lapiMetricsToTable(t *table.Table, stats map[string]map[string]map[string]i return numRows } -func metricsToTable(t *table.Table, stats map[string]map[string]int, keys []string) (int, error) { +func metricsToTable(t *table.Table, stats map[string]map[string]int, keys []string, noUnit bool) (int, error) { if t == nil { return 0, fmt.Errorf("nil table") } @@ -81,7 +81,7 @@ func metricsToTable(t *table.Table, stats map[string]map[string]int, keys []stri return numRows, nil } -func bucketStatsTable(out io.Writer, stats map[string]map[string]int) { +func bucketStatsTable(out io.Writer, stats map[string]map[string]int, noUnit bool) { t := newTable(out) t.SetRowLines(false) t.SetHeaders("Bucket", "Current Count", "Overflows", "Instantiated", "Poured", "Expired") @@ -89,7 +89,7 @@ func bucketStatsTable(out io.Writer, stats map[string]map[string]int) { keys := []string{"curr_count", "overflow", "instantiation", "pour", "underflow"} - if numRows, err := metricsToTable(t, stats, keys); err != nil { + if numRows, err := metricsToTable(t, stats, keys, noUnit); err != nil { log.Warningf("while collecting bucket stats: %s", err) } else if numRows > 0 { renderTableTitle(out, "\nBucket Metrics:") @@ -97,7 +97,7 @@ func bucketStatsTable(out io.Writer, stats map[string]map[string]int) { } } -func acquisStatsTable(out io.Writer, stats map[string]map[string]int) { +func acquisStatsTable(out io.Writer, stats map[string]map[string]int, noUnit bool) { t := newTable(out) t.SetRowLines(false) t.SetHeaders("Source", "Lines read", "Lines parsed", "Lines unparsed", "Lines poured to bucket") @@ -105,7 +105,7 @@ func acquisStatsTable(out io.Writer, stats map[string]map[string]int) { keys := []string{"reads", "parsed", "unparsed", "pour"} - if numRows, err := metricsToTable(t, stats, keys); err != nil { + if numRows, err := metricsToTable(t, stats, keys, noUnit); err != nil { log.Warningf("while collecting acquis stats: %s", err) } else if numRows > 0 { renderTableTitle(out, "\nAcquisition Metrics:") @@ -113,13 +113,13 @@ func acquisStatsTable(out io.Writer, stats map[string]map[string]int) { } } -func appsecMetricsToTable(out io.Writer, metrics map[string]map[string]int) { +func appsecMetricsToTable(out io.Writer, metrics map[string]map[string]int, noUnit bool) { t := newTable(out) t.SetRowLines(false) t.SetHeaders("Appsec Engine", "Processed", "Blocked") t.SetAlignment(table.AlignLeft, table.AlignLeft) keys := []string{"processed", "blocked"} - if numRows, err := metricsToTable(t, metrics, keys); err != nil { + if numRows, err := metricsToTable(t, metrics, keys, noUnit); err != nil { log.Warningf("while collecting appsec stats: %s", err) } else if numRows > 0 { renderTableTitle(out, "\nAppsec Metrics:") @@ -127,14 +127,14 @@ func appsecMetricsToTable(out io.Writer, metrics map[string]map[string]int) { } } -func appsecRulesToTable(out io.Writer, metrics map[string]map[string]map[string]int) { +func appsecRulesToTable(out io.Writer, metrics map[string]map[string]map[string]int, noUnit bool) { for appsecEngine, appsecEngineRulesStats := range metrics { t := newTable(out) t.SetRowLines(false) t.SetHeaders("Rule ID", "Triggered") t.SetAlignment(table.AlignLeft, table.AlignLeft) keys := []string{"triggered"} - if numRows, err := metricsToTable(t, appsecEngineRulesStats, keys); err != nil { + if numRows, err := metricsToTable(t, appsecEngineRulesStats, keys, noUnit); err != nil { log.Warningf("while collecting appsec rules stats: %s", err) } else if numRows > 0 { renderTableTitle(out, fmt.Sprintf("\nAppsec '%s' Rules Metrics:", appsecEngine)) @@ -144,7 +144,7 @@ func appsecRulesToTable(out io.Writer, metrics map[string]map[string]map[string] } -func parserStatsTable(out io.Writer, stats map[string]map[string]int) { +func parserStatsTable(out io.Writer, stats map[string]map[string]int, noUnit bool) { t := newTable(out) t.SetRowLines(false) t.SetHeaders("Parsers", "Hits", "Parsed", "Unparsed") @@ -152,7 +152,7 @@ func parserStatsTable(out io.Writer, stats map[string]map[string]int) { keys := []string{"hits", "parsed", "unparsed"} - if numRows, err := metricsToTable(t, stats, keys); err != nil { + if numRows, err := metricsToTable(t, stats, keys, noUnit); err != nil { log.Warningf("while collecting parsers stats: %s", err) } else if numRows > 0 { renderTableTitle(out, "\nParser Metrics:") diff --git a/cmd/crowdsec-cli/support.go b/cmd/crowdsec-cli/support.go index 6ddfa3056bc..e0a2fa9db90 100644 --- a/cmd/crowdsec-cli/support.go +++ b/cmd/crowdsec-cli/support.go @@ -66,7 +66,7 @@ func collectMetrics() ([]byte, []byte, error) { } humanMetrics := bytes.NewBuffer(nil) - err := FormatPrometheusMetrics(humanMetrics, csConfig.Cscli.PrometheusUrl, "human") + err := FormatPrometheusMetrics(humanMetrics, csConfig.Cscli.PrometheusUrl, "human", false) if err != nil { return nil, nil, fmt.Errorf("could not fetch promtheus metrics: %s", err) From 81acad0d668fb575fb207e54f300e0dd4e39e05e Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Fri, 2 Feb 2024 10:40:55 +0100 Subject: [PATCH 010/581] refact "cscli metrics" part 2 (#2806) --- cmd/crowdsec-cli/metrics.go | 243 ++++++++++++++++-------------- cmd/crowdsec-cli/metrics_table.go | 60 ++++---- 2 files changed, 155 insertions(+), 148 deletions(-) diff --git a/cmd/crowdsec-cli/metrics.go b/cmd/crowdsec-cli/metrics.go index 902d4f0f445..ad255e847db 100644 --- a/cmd/crowdsec-cli/metrics.go +++ b/cmd/crowdsec-cli/metrics.go @@ -19,6 +19,27 @@ import ( "github.com/crowdsecurity/go-cs-lib/trace" ) +type ( + statAcquis map[string]map[string]int + statParser map[string]map[string]int + statBucket map[string]map[string]int + statLapi map[string]map[string]int + statLapiMachine map[string]map[string]map[string]int + statLapiBouncer map[string]map[string]map[string]int + statLapiDecision map[string]struct { + NonEmpty int + Empty int + } + statDecision map[string]map[string]map[string]int + statAppsecEngine map[string]map[string]int + statAppsecRule map[string]map[string]map[string]int + statAlert map[string]int + statStash map[string]struct { + Type string + Count int + } +) + type cliMetrics struct { cfg configGetter } @@ -29,7 +50,6 @@ func NewCLIMetrics(getconfig configGetter) *cliMetrics { } } - // FormatPrometheusMetrics is a complete rip from prom2json func FormatPrometheusMetrics(out io.Writer, url string, formatType string, noUnit bool) error { mfChan := make(chan *dto.MetricFamily, 1024) @@ -63,24 +83,19 @@ func FormatPrometheusMetrics(out io.Writer, url string, formatType string, noUni log.Debugf("Finished reading prometheus output, %d entries", len(result)) /*walk*/ - lapi_decisions_stats := map[string]struct { - NonEmpty int - Empty int - }{} - acquis_stats := map[string]map[string]int{} - parsers_stats := map[string]map[string]int{} - buckets_stats := map[string]map[string]int{} - lapi_stats := map[string]map[string]int{} - lapi_machine_stats := map[string]map[string]map[string]int{} - lapi_bouncer_stats := map[string]map[string]map[string]int{} - decisions_stats := map[string]map[string]map[string]int{} - appsec_engine_stats := map[string]map[string]int{} - appsec_rule_stats := map[string]map[string]map[string]int{} - alerts_stats := map[string]int{} - stash_stats := map[string]struct { - Type string - Count int - }{} + + mAcquis := statAcquis{} + mParser := statParser{} + mBucket := statBucket{} + mLapi := statLapi{} + mLapiMachine := statLapiMachine{} + mLapiBouncer := statLapiBouncer{} + mLapiDecision := statLapiDecision{} + mDecision := statDecision{} + mAppsecEngine := statAppsecEngine{} + mAppsecRule := statAppsecRule{} + mAlert := statAlert{} + mStash := statStash{} for idx, fam := range result { if !strings.HasPrefix(fam.Name, "cs_") { @@ -127,138 +142,138 @@ func FormatPrometheusMetrics(out io.Writer, url string, formatType string, noUni switch fam.Name { /*buckets*/ case "cs_bucket_created_total": - if _, ok := buckets_stats[name]; !ok { - buckets_stats[name] = make(map[string]int) + if _, ok := mBucket[name]; !ok { + mBucket[name] = make(map[string]int) } - buckets_stats[name]["instantiation"] += ival + mBucket[name]["instantiation"] += ival case "cs_buckets": - if _, ok := buckets_stats[name]; !ok { - buckets_stats[name] = make(map[string]int) + if _, ok := mBucket[name]; !ok { + mBucket[name] = make(map[string]int) } - buckets_stats[name]["curr_count"] += ival + mBucket[name]["curr_count"] += ival case "cs_bucket_overflowed_total": - if _, ok := buckets_stats[name]; !ok { - buckets_stats[name] = make(map[string]int) + if _, ok := mBucket[name]; !ok { + mBucket[name] = make(map[string]int) } - buckets_stats[name]["overflow"] += ival + mBucket[name]["overflow"] += ival case "cs_bucket_poured_total": - if _, ok := buckets_stats[name]; !ok { - buckets_stats[name] = make(map[string]int) + if _, ok := mBucket[name]; !ok { + mBucket[name] = make(map[string]int) } - if _, ok := acquis_stats[source]; !ok { - acquis_stats[source] = make(map[string]int) + if _, ok := mAcquis[source]; !ok { + mAcquis[source] = make(map[string]int) } - buckets_stats[name]["pour"] += ival - acquis_stats[source]["pour"] += ival + mBucket[name]["pour"] += ival + mAcquis[source]["pour"] += ival case "cs_bucket_underflowed_total": - if _, ok := buckets_stats[name]; !ok { - buckets_stats[name] = make(map[string]int) + if _, ok := mBucket[name]; !ok { + mBucket[name] = make(map[string]int) } - buckets_stats[name]["underflow"] += ival + mBucket[name]["underflow"] += ival /*acquis*/ case "cs_parser_hits_total": - if _, ok := acquis_stats[source]; !ok { - acquis_stats[source] = make(map[string]int) + if _, ok := mAcquis[source]; !ok { + mAcquis[source] = make(map[string]int) } - acquis_stats[source]["reads"] += ival + mAcquis[source]["reads"] += ival case "cs_parser_hits_ok_total": - if _, ok := acquis_stats[source]; !ok { - acquis_stats[source] = make(map[string]int) + if _, ok := mAcquis[source]; !ok { + mAcquis[source] = make(map[string]int) } - acquis_stats[source]["parsed"] += ival + mAcquis[source]["parsed"] += ival case "cs_parser_hits_ko_total": - if _, ok := acquis_stats[source]; !ok { - acquis_stats[source] = make(map[string]int) + if _, ok := mAcquis[source]; !ok { + mAcquis[source] = make(map[string]int) } - acquis_stats[source]["unparsed"] += ival + mAcquis[source]["unparsed"] += ival case "cs_node_hits_total": - if _, ok := parsers_stats[name]; !ok { - parsers_stats[name] = make(map[string]int) + if _, ok := mParser[name]; !ok { + mParser[name] = make(map[string]int) } - parsers_stats[name]["hits"] += ival + mParser[name]["hits"] += ival case "cs_node_hits_ok_total": - if _, ok := parsers_stats[name]; !ok { - parsers_stats[name] = make(map[string]int) + if _, ok := mParser[name]; !ok { + mParser[name] = make(map[string]int) } - parsers_stats[name]["parsed"] += ival + mParser[name]["parsed"] += ival case "cs_node_hits_ko_total": - if _, ok := parsers_stats[name]; !ok { - parsers_stats[name] = make(map[string]int) + if _, ok := mParser[name]; !ok { + mParser[name] = make(map[string]int) } - parsers_stats[name]["unparsed"] += ival + mParser[name]["unparsed"] += ival case "cs_lapi_route_requests_total": - if _, ok := lapi_stats[route]; !ok { - lapi_stats[route] = make(map[string]int) + if _, ok := mLapi[route]; !ok { + mLapi[route] = make(map[string]int) } - lapi_stats[route][method] += ival + mLapi[route][method] += ival case "cs_lapi_machine_requests_total": - if _, ok := lapi_machine_stats[machine]; !ok { - lapi_machine_stats[machine] = make(map[string]map[string]int) + if _, ok := mLapiMachine[machine]; !ok { + mLapiMachine[machine] = make(map[string]map[string]int) } - if _, ok := lapi_machine_stats[machine][route]; !ok { - lapi_machine_stats[machine][route] = make(map[string]int) + if _, ok := mLapiMachine[machine][route]; !ok { + mLapiMachine[machine][route] = make(map[string]int) } - lapi_machine_stats[machine][route][method] += ival + mLapiMachine[machine][route][method] += ival case "cs_lapi_bouncer_requests_total": - if _, ok := lapi_bouncer_stats[bouncer]; !ok { - lapi_bouncer_stats[bouncer] = make(map[string]map[string]int) + if _, ok := mLapiBouncer[bouncer]; !ok { + mLapiBouncer[bouncer] = make(map[string]map[string]int) } - if _, ok := lapi_bouncer_stats[bouncer][route]; !ok { - lapi_bouncer_stats[bouncer][route] = make(map[string]int) + if _, ok := mLapiBouncer[bouncer][route]; !ok { + mLapiBouncer[bouncer][route] = make(map[string]int) } - lapi_bouncer_stats[bouncer][route][method] += ival + mLapiBouncer[bouncer][route][method] += ival case "cs_lapi_decisions_ko_total", "cs_lapi_decisions_ok_total": - if _, ok := lapi_decisions_stats[bouncer]; !ok { - lapi_decisions_stats[bouncer] = struct { + if _, ok := mLapiDecision[bouncer]; !ok { + mLapiDecision[bouncer] = struct { NonEmpty int Empty int }{} } - x := lapi_decisions_stats[bouncer] + x := mLapiDecision[bouncer] if fam.Name == "cs_lapi_decisions_ko_total" { x.Empty += ival } else if fam.Name == "cs_lapi_decisions_ok_total" { x.NonEmpty += ival } - lapi_decisions_stats[bouncer] = x + mLapiDecision[bouncer] = x case "cs_active_decisions": - if _, ok := decisions_stats[reason]; !ok { - decisions_stats[reason] = make(map[string]map[string]int) + if _, ok := mDecision[reason]; !ok { + mDecision[reason] = make(map[string]map[string]int) } - if _, ok := decisions_stats[reason][origin]; !ok { - decisions_stats[reason][origin] = make(map[string]int) + if _, ok := mDecision[reason][origin]; !ok { + mDecision[reason][origin] = make(map[string]int) } - decisions_stats[reason][origin][action] += ival + mDecision[reason][origin][action] += ival case "cs_alerts": - /*if _, ok := alerts_stats[scenario]; !ok { - alerts_stats[scenario] = make(map[string]int) + /*if _, ok := mAlert[scenario]; !ok { + mAlert[scenario] = make(map[string]int) }*/ - alerts_stats[reason] += ival + mAlert[reason] += ival case "cs_cache_size": - stash_stats[name] = struct { + mStash[name] = struct { Type string Count int }{Type: mtype, Count: ival} case "cs_appsec_reqs_total": - if _, ok := appsec_engine_stats[metric.Labels["appsec_engine"]]; !ok { - appsec_engine_stats[metric.Labels["appsec_engine"]] = make(map[string]int, 0) + if _, ok := mAppsecEngine[metric.Labels["appsec_engine"]]; !ok { + mAppsecEngine[metric.Labels["appsec_engine"]] = make(map[string]int, 0) } - appsec_engine_stats[metric.Labels["appsec_engine"]]["processed"] = ival + mAppsecEngine[metric.Labels["appsec_engine"]]["processed"] = ival case "cs_appsec_block_total": - if _, ok := appsec_engine_stats[metric.Labels["appsec_engine"]]; !ok { - appsec_engine_stats[metric.Labels["appsec_engine"]] = make(map[string]int, 0) + if _, ok := mAppsecEngine[metric.Labels["appsec_engine"]]; !ok { + mAppsecEngine[metric.Labels["appsec_engine"]] = make(map[string]int, 0) } - appsec_engine_stats[metric.Labels["appsec_engine"]]["blocked"] = ival + mAppsecEngine[metric.Labels["appsec_engine"]]["blocked"] = ival case "cs_appsec_rule_hits": appsecEngine := metric.Labels["appsec_engine"] ruleID := metric.Labels["rule_name"] - if _, ok := appsec_rule_stats[appsecEngine]; !ok { - appsec_rule_stats[appsecEngine] = make(map[string]map[string]int, 0) + if _, ok := mAppsecRule[appsecEngine]; !ok { + mAppsecRule[appsecEngine] = make(map[string]map[string]int, 0) } - if _, ok := appsec_rule_stats[appsecEngine][ruleID]; !ok { - appsec_rule_stats[appsecEngine][ruleID] = make(map[string]int, 0) + if _, ok := mAppsecRule[appsecEngine][ruleID]; !ok { + mAppsecRule[appsecEngine][ruleID] = make(map[string]int, 0) } - appsec_rule_stats[appsecEngine][ruleID]["triggered"] = ival + mAppsecRule[appsecEngine][ruleID]["triggered"] = ival default: log.Debugf("unknown: %+v", fam.Name) continue @@ -267,33 +282,33 @@ func FormatPrometheusMetrics(out io.Writer, url string, formatType string, noUni } if formatType == "human" { - acquisStatsTable(out, acquis_stats, noUnit) - bucketStatsTable(out, buckets_stats, noUnit) - parserStatsTable(out, parsers_stats, noUnit) - lapiStatsTable(out, lapi_stats) - lapiMachineStatsTable(out, lapi_machine_stats) - lapiBouncerStatsTable(out, lapi_bouncer_stats) - lapiDecisionStatsTable(out, lapi_decisions_stats) - decisionStatsTable(out, decisions_stats) - alertStatsTable(out, alerts_stats) - stashStatsTable(out, stash_stats) - appsecMetricsToTable(out, appsec_engine_stats, noUnit) - appsecRulesToTable(out, appsec_rule_stats, noUnit) + mAcquis.table(out, noUnit) + mBucket.table(out, noUnit) + mParser.table(out, noUnit) + mLapi.table(out) + mLapiMachine.table(out) + mLapiBouncer.table(out) + mLapiDecision.table(out) + mDecision.table(out) + mAlert.table(out) + mStash.table(out) + mAppsecEngine.table(out, noUnit) + mAppsecRule.table(out, noUnit) return nil } stats := make(map[string]any) - stats["acquisition"] = acquis_stats - stats["buckets"] = buckets_stats - stats["parsers"] = parsers_stats - stats["lapi"] = lapi_stats - stats["lapi_machine"] = lapi_machine_stats - stats["lapi_bouncer"] = lapi_bouncer_stats - stats["lapi_decisions"] = lapi_decisions_stats - stats["decisions"] = decisions_stats - stats["alerts"] = alerts_stats - stats["stash"] = stash_stats + stats["acquisition"] = mAcquis + stats["buckets"] = mBucket + stats["parsers"] = mParser + stats["lapi"] = mLapi + stats["lapi_machine"] = mLapiMachine + stats["lapi_bouncer"] = mLapiBouncer + stats["lapi_decisions"] = mLapiDecision + stats["decisions"] = mDecision + stats["alerts"] = mAlert + stats["stash"] = mStash switch formatType { case "json": diff --git a/cmd/crowdsec-cli/metrics_table.go b/cmd/crowdsec-cli/metrics_table.go index 287333b5f3c..835277aa4ee 100644 --- a/cmd/crowdsec-cli/metrics_table.go +++ b/cmd/crowdsec-cli/metrics_table.go @@ -81,7 +81,7 @@ func metricsToTable(t *table.Table, stats map[string]map[string]int, keys []stri return numRows, nil } -func bucketStatsTable(out io.Writer, stats map[string]map[string]int, noUnit bool) { +func (s statBucket) table(out io.Writer, noUnit bool) { t := newTable(out) t.SetRowLines(false) t.SetHeaders("Bucket", "Current Count", "Overflows", "Instantiated", "Poured", "Expired") @@ -89,7 +89,7 @@ func bucketStatsTable(out io.Writer, stats map[string]map[string]int, noUnit boo keys := []string{"curr_count", "overflow", "instantiation", "pour", "underflow"} - if numRows, err := metricsToTable(t, stats, keys, noUnit); err != nil { + if numRows, err := metricsToTable(t, s, keys, noUnit); err != nil { log.Warningf("while collecting bucket stats: %s", err) } else if numRows > 0 { renderTableTitle(out, "\nBucket Metrics:") @@ -97,7 +97,7 @@ func bucketStatsTable(out io.Writer, stats map[string]map[string]int, noUnit boo } } -func acquisStatsTable(out io.Writer, stats map[string]map[string]int, noUnit bool) { +func (s statAcquis) table(out io.Writer, noUnit bool) { t := newTable(out) t.SetRowLines(false) t.SetHeaders("Source", "Lines read", "Lines parsed", "Lines unparsed", "Lines poured to bucket") @@ -105,7 +105,7 @@ func acquisStatsTable(out io.Writer, stats map[string]map[string]int, noUnit boo keys := []string{"reads", "parsed", "unparsed", "pour"} - if numRows, err := metricsToTable(t, stats, keys, noUnit); err != nil { + if numRows, err := metricsToTable(t, s, keys, noUnit); err != nil { log.Warningf("while collecting acquis stats: %s", err) } else if numRows > 0 { renderTableTitle(out, "\nAcquisition Metrics:") @@ -113,13 +113,13 @@ func acquisStatsTable(out io.Writer, stats map[string]map[string]int, noUnit boo } } -func appsecMetricsToTable(out io.Writer, metrics map[string]map[string]int, noUnit bool) { +func (s statAppsecEngine) table(out io.Writer, noUnit bool) { t := newTable(out) t.SetRowLines(false) t.SetHeaders("Appsec Engine", "Processed", "Blocked") t.SetAlignment(table.AlignLeft, table.AlignLeft) keys := []string{"processed", "blocked"} - if numRows, err := metricsToTable(t, metrics, keys, noUnit); err != nil { + if numRows, err := metricsToTable(t, s, keys, noUnit); err != nil { log.Warningf("while collecting appsec stats: %s", err) } else if numRows > 0 { renderTableTitle(out, "\nAppsec Metrics:") @@ -127,8 +127,8 @@ func appsecMetricsToTable(out io.Writer, metrics map[string]map[string]int, noUn } } -func appsecRulesToTable(out io.Writer, metrics map[string]map[string]map[string]int, noUnit bool) { - for appsecEngine, appsecEngineRulesStats := range metrics { +func (s statAppsecRule) table(out io.Writer, noUnit bool) { + for appsecEngine, appsecEngineRulesStats := range s { t := newTable(out) t.SetRowLines(false) t.SetHeaders("Rule ID", "Triggered") @@ -144,7 +144,7 @@ func appsecRulesToTable(out io.Writer, metrics map[string]map[string]map[string] } -func parserStatsTable(out io.Writer, stats map[string]map[string]int, noUnit bool) { +func (s statParser) table(out io.Writer, noUnit bool) { t := newTable(out) t.SetRowLines(false) t.SetHeaders("Parsers", "Hits", "Parsed", "Unparsed") @@ -152,7 +152,7 @@ func parserStatsTable(out io.Writer, stats map[string]map[string]int, noUnit boo keys := []string{"hits", "parsed", "unparsed"} - if numRows, err := metricsToTable(t, stats, keys, noUnit); err != nil { + if numRows, err := metricsToTable(t, s, keys, noUnit); err != nil { log.Warningf("while collecting parsers stats: %s", err) } else if numRows > 0 { renderTableTitle(out, "\nParser Metrics:") @@ -160,11 +160,7 @@ func parserStatsTable(out io.Writer, stats map[string]map[string]int, noUnit boo } } -func stashStatsTable(out io.Writer, stats map[string]struct { - Type string - Count int -}) { - +func (s statStash) table(out io.Writer) { t := newTable(out) t.SetRowLines(false) t.SetHeaders("Name", "Type", "Items") @@ -172,14 +168,14 @@ func stashStatsTable(out io.Writer, stats map[string]struct { // unfortunately, we can't reuse metricsToTable as the structure is too different :/ sortedKeys := []string{} - for k := range stats { + for k := range s { sortedKeys = append(sortedKeys, k) } sort.Strings(sortedKeys) numRows := 0 for _, alabel := range sortedKeys { - astats := stats[alabel] + astats := s[alabel] row := []string{ alabel, @@ -195,7 +191,7 @@ func stashStatsTable(out io.Writer, stats map[string]struct { } } -func lapiStatsTable(out io.Writer, stats map[string]map[string]int) { +func (s statLapi) table(out io.Writer) { t := newTable(out) t.SetRowLines(false) t.SetHeaders("Route", "Method", "Hits") @@ -203,14 +199,14 @@ func lapiStatsTable(out io.Writer, stats map[string]map[string]int) { // unfortunately, we can't reuse metricsToTable as the structure is too different :/ sortedKeys := []string{} - for k := range stats { + for k := range s { sortedKeys = append(sortedKeys, k) } sort.Strings(sortedKeys) numRows := 0 for _, alabel := range sortedKeys { - astats := stats[alabel] + astats := s[alabel] subKeys := []string{} for skey := range astats { @@ -235,13 +231,13 @@ func lapiStatsTable(out io.Writer, stats map[string]map[string]int) { } } -func lapiMachineStatsTable(out io.Writer, stats map[string]map[string]map[string]int) { +func (s statLapiMachine) table(out io.Writer) { t := newTable(out) t.SetRowLines(false) t.SetHeaders("Machine", "Route", "Method", "Hits") t.SetAlignment(table.AlignLeft, table.AlignLeft, table.AlignLeft, table.AlignLeft) - numRows := lapiMetricsToTable(t, stats) + numRows := lapiMetricsToTable(t, s) if numRows > 0 { renderTableTitle(out, "\nLocal API Machines Metrics:") @@ -249,13 +245,13 @@ func lapiMachineStatsTable(out io.Writer, stats map[string]map[string]map[string } } -func lapiBouncerStatsTable(out io.Writer, stats map[string]map[string]map[string]int) { +func (s statLapiBouncer) table(out io.Writer) { t := newTable(out) t.SetRowLines(false) t.SetHeaders("Bouncer", "Route", "Method", "Hits") t.SetAlignment(table.AlignLeft, table.AlignLeft, table.AlignLeft, table.AlignLeft) - numRows := lapiMetricsToTable(t, stats) + numRows := lapiMetricsToTable(t, s) if numRows > 0 { renderTableTitle(out, "\nLocal API Bouncers Metrics:") @@ -263,18 +259,14 @@ func lapiBouncerStatsTable(out io.Writer, stats map[string]map[string]map[string } } -func lapiDecisionStatsTable(out io.Writer, stats map[string]struct { - NonEmpty int - Empty int -}, -) { +func (s statLapiDecision) table(out io.Writer) { t := newTable(out) t.SetRowLines(false) t.SetHeaders("Bouncer", "Empty answers", "Non-empty answers") t.SetAlignment(table.AlignLeft, table.AlignLeft, table.AlignLeft) numRows := 0 - for bouncer, hits := range stats { + for bouncer, hits := range s { t.AddRow( bouncer, fmt.Sprintf("%d", hits.Empty), @@ -289,14 +281,14 @@ func lapiDecisionStatsTable(out io.Writer, stats map[string]struct { } } -func decisionStatsTable(out io.Writer, stats map[string]map[string]map[string]int) { +func (s statDecision) table(out io.Writer) { t := newTable(out) t.SetRowLines(false) t.SetHeaders("Reason", "Origin", "Action", "Count") t.SetAlignment(table.AlignLeft, table.AlignLeft, table.AlignLeft, table.AlignLeft) numRows := 0 - for reason, origins := range stats { + for reason, origins := range s { for origin, actions := range origins { for action, hits := range actions { t.AddRow( @@ -316,14 +308,14 @@ func decisionStatsTable(out io.Writer, stats map[string]map[string]map[string]in } } -func alertStatsTable(out io.Writer, stats map[string]int) { +func (s statAlert) table(out io.Writer) { t := newTable(out) t.SetRowLines(false) t.SetHeaders("Reason", "Count") t.SetAlignment(table.AlignLeft, table.AlignLeft) numRows := 0 - for scenario, hits := range stats { + for scenario, hits := range s { t.AddRow( scenario, fmt.Sprintf("%d", hits), From fdc525164a675f7038ccd828967471edd6076eb1 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Tue, 6 Feb 2024 10:07:05 +0100 Subject: [PATCH 011/581] refact "cscli metrics" part 3 (#2807) --- cmd/crowdsec-cli/main.go | 2 + cmd/crowdsec-cli/metrics.go | 267 ++++++++++++++++++++++++------ cmd/crowdsec-cli/metrics_table.go | 170 ++++++++++++------- cmd/crowdsec-cli/support.go | 11 +- test/bats/01_cscli.bats | 9 - test/bats/08_metrics.bats | 56 ++++++- 6 files changed, 394 insertions(+), 121 deletions(-) diff --git a/cmd/crowdsec-cli/main.go b/cmd/crowdsec-cli/main.go index db3a164af90..3b20cf112c0 100644 --- a/cmd/crowdsec-cli/main.go +++ b/cmd/crowdsec-cli/main.go @@ -146,6 +146,8 @@ It is meant to allow you to manage bans, parsers/scenarios/etc, api and generall FlagsDataType: cc.White, Flags: cc.Green, FlagsDescr: cc.Cyan, + NoExtraNewlines: true, + NoBottomNewline: true, }) cmd.SetOut(color.Output) diff --git a/cmd/crowdsec-cli/metrics.go b/cmd/crowdsec-cli/metrics.go index ad255e847db..c883c809291 100644 --- a/cmd/crowdsec-cli/metrics.go +++ b/cmd/crowdsec-cli/metrics.go @@ -16,6 +16,7 @@ import ( "github.com/spf13/cobra" "gopkg.in/yaml.v3" + "github.com/crowdsecurity/go-cs-lib/maptools" "github.com/crowdsecurity/go-cs-lib/trace" ) @@ -40,18 +41,31 @@ type ( } ) -type cliMetrics struct { - cfg configGetter +type metricSection interface { + Table(io.Writer, bool, bool) + Description() (string, string) } -func NewCLIMetrics(getconfig configGetter) *cliMetrics { - return &cliMetrics{ - cfg: getconfig, +type metricStore map[string]metricSection + +func NewMetricStore() metricStore { + return metricStore{ + "acquisition": statAcquis{}, + "buckets": statBucket{}, + "parsers": statParser{}, + "lapi": statLapi{}, + "lapi-machine": statLapiMachine{}, + "lapi-bouncer": statLapiBouncer{}, + "lapi-decisions": statLapiDecision{}, + "decisions": statDecision{}, + "alerts": statAlert{}, + "stash": statStash{}, + "appsec-engine": statAppsecEngine{}, + "appsec-rule": statAppsecRule{}, } } -// FormatPrometheusMetrics is a complete rip from prom2json -func FormatPrometheusMetrics(out io.Writer, url string, formatType string, noUnit bool) error { +func (ms metricStore) Fetch(url string) error { mfChan := make(chan *dto.MetricFamily, 1024) errChan := make(chan error, 1) @@ -64,9 +78,10 @@ func FormatPrometheusMetrics(out io.Writer, url string, formatType string, noUni transport.ResponseHeaderTimeout = time.Minute go func() { defer trace.CatchPanic("crowdsec/ShowPrometheus") + err := prom2json.FetchMetricFamilies(url, mfChan, transport) if err != nil { - errChan <- fmt.Errorf("failed to fetch prometheus metrics: %w", err) + errChan <- fmt.Errorf("failed to fetch metrics: %w", err) return } errChan <- nil @@ -81,21 +96,21 @@ func FormatPrometheusMetrics(out io.Writer, url string, formatType string, noUni return err } - log.Debugf("Finished reading prometheus output, %d entries", len(result)) + log.Debugf("Finished reading metrics output, %d entries", len(result)) /*walk*/ - mAcquis := statAcquis{} - mParser := statParser{} - mBucket := statBucket{} - mLapi := statLapi{} - mLapiMachine := statLapiMachine{} - mLapiBouncer := statLapiBouncer{} - mLapiDecision := statLapiDecision{} - mDecision := statDecision{} - mAppsecEngine := statAppsecEngine{} - mAppsecRule := statAppsecRule{} - mAlert := statAlert{} - mStash := statStash{} + mAcquis := ms["acquisition"].(statAcquis) + mParser := ms["parsers"].(statParser) + mBucket := ms["buckets"].(statBucket) + mLapi := ms["lapi"].(statLapi) + mLapiMachine := ms["lapi-machine"].(statLapiMachine) + mLapiBouncer := ms["lapi-bouncer"].(statLapiBouncer) + mLapiDecision := ms["lapi-decisions"].(statLapiDecision) + mDecision := ms["decisions"].(statDecision) + mAppsecEngine := ms["appsec-engine"].(statAppsecEngine) + mAppsecRule := ms["appsec-rule"].(statAppsecRule) + mAlert := ms["alerts"].(statAlert) + mStash := ms["stash"].(statStash) for idx, fam := range result { if !strings.HasPrefix(fam.Name, "cs_") { @@ -281,44 +296,50 @@ func FormatPrometheusMetrics(out io.Writer, url string, formatType string, noUni } } - if formatType == "human" { - mAcquis.table(out, noUnit) - mBucket.table(out, noUnit) - mParser.table(out, noUnit) - mLapi.table(out) - mLapiMachine.table(out) - mLapiBouncer.table(out) - mLapiDecision.table(out) - mDecision.table(out) - mAlert.table(out) - mStash.table(out) - mAppsecEngine.table(out, noUnit) - mAppsecRule.table(out, noUnit) - return nil + return nil +} + +type cliMetrics struct { + cfg configGetter +} + +func NewCLIMetrics(getconfig configGetter) *cliMetrics { + return &cliMetrics{ + cfg: getconfig, } +} - stats := make(map[string]any) +func (ms metricStore) Format(out io.Writer, sections []string, formatType string, noUnit bool) error { + // copy only the sections we want + want := map[string]metricSection{} - stats["acquisition"] = mAcquis - stats["buckets"] = mBucket - stats["parsers"] = mParser - stats["lapi"] = mLapi - stats["lapi_machine"] = mLapiMachine - stats["lapi_bouncer"] = mLapiBouncer - stats["lapi_decisions"] = mLapiDecision - stats["decisions"] = mDecision - stats["alerts"] = mAlert - stats["stash"] = mStash + // if explicitly asking for sections, we want to show empty tables + showEmpty := len(sections) > 0 + + // if no sections are specified, we want all of them + if len(sections) == 0 { + for section := range ms { + sections = append(sections, section) + } + } + + for _, section := range sections { + want[section] = ms[section] + } switch formatType { + case "human": + for section := range want { + want[section].Table(out, noUnit, showEmpty) + } case "json": - x, err := json.MarshalIndent(stats, "", " ") + x, err := json.MarshalIndent(want, "", " ") if err != nil { return fmt.Errorf("failed to unmarshal metrics : %v", err) } out.Write(x) case "raw": - x, err := yaml.Marshal(stats) + x, err := yaml.Marshal(want) if err != nil { return fmt.Errorf("failed to unmarshal metrics : %v", err) } @@ -330,7 +351,7 @@ func FormatPrometheusMetrics(out io.Writer, url string, formatType string, noUni return nil } -func (cli *cliMetrics) run(url string, noUnit bool) error { +func (cli *cliMetrics) show(sections []string, url string, noUnit bool) error { cfg := cli.cfg() if url != "" { @@ -345,7 +366,20 @@ func (cli *cliMetrics) run(url string, noUnit bool) error { return fmt.Errorf("prometheus is not enabled, can't show metrics") } - if err := FormatPrometheusMetrics(color.Output, cfg.Cscli.PrometheusUrl, cfg.Cscli.Output, noUnit); err != nil { + ms := NewMetricStore() + + if err := ms.Fetch(cfg.Cscli.PrometheusUrl); err != nil { + return err + } + + // any section that we don't have in the store is an error + for _, section := range sections { + if _, ok := ms[section]; !ok { + return fmt.Errorf("unknown metrics type: %s", section) + } + } + + if err := ms.Format(color.Output, sections, cfg.Cscli.Output, noUnit); err != nil { return err } return nil @@ -360,11 +394,19 @@ func (cli *cliMetrics) NewCommand() *cobra.Command { cmd := &cobra.Command{ Use: "metrics", Short: "Display crowdsec prometheus metrics.", - Long: `Fetch metrics from the prometheus server and display them in a human-friendly way`, + Long: `Fetch metrics from a Local API server and display them`, + Example: `# Show all Metrics, skip empty tables (same as "cecli metrics show") +cscli metrics + +# Show only some metrics, connect to a different url +cscli metrics --url http://lapi.local:6060/metrics show acquisition parsers + +# List available metric types +cscli metrics list`, Args: cobra.ExactArgs(0), DisableAutoGenTag: true, RunE: func(cmd *cobra.Command, args []string) error { - return cli.run(url, noUnit) + return cli.show(nil, url, noUnit) }, } @@ -372,5 +414,126 @@ func (cli *cliMetrics) NewCommand() *cobra.Command { flags.StringVarP(&url, "url", "u", "", "Prometheus url (http://:/metrics)") flags.BoolVar(&noUnit, "no-unit", false, "Show the real number instead of formatted with units") + cmd.AddCommand(cli.newShowCmd()) + cmd.AddCommand(cli.newListCmd()) + + return cmd +} + +// expandAlias returns a list of sections. The input can be a list of sections or alias. +func (cli *cliMetrics) expandSectionGroups(args []string) []string { + ret := []string{} + for _, section := range args { + switch section { + case "engine": + ret = append(ret, "acquisition", "parsers", "buckets", "stash") + case "lapi": + ret = append(ret, "alerts", "decisions", "lapi", "lapi-bouncer", "lapi-decisions", "lapi-machine") + case "appsec": + ret = append(ret, "appsec-engine", "appsec-rule") + default: + ret = append(ret, section) + } + } + + return ret +} + +func (cli *cliMetrics) newShowCmd() *cobra.Command { + var ( + url string + noUnit bool + ) + + cmd := &cobra.Command{ + Use: "show [type]...", + Short: "Display all or part of the available metrics.", + Long: `Fetch metrics from a Local API server and display them, optionally filtering on specific types.`, + Example: `# Show all Metrics, skip empty tables +cscli metrics show + +# Use an alias: "engine", "lapi" or "appsec" to show a group of metrics +cscli metrics show engine + +# Show some specific metrics, show empty tables, connect to a different url +cscli metrics show acquisition parsers buckets stash --url http://lapi.local:6060/metrics + +# Show metrics in json format +cscli metrics show acquisition parsers buckets stash -o json`, + // Positional args are optional + DisableAutoGenTag: true, + RunE: func(_ *cobra.Command, args []string) error { + args = cli.expandSectionGroups(args) + return cli.show(args, url, noUnit) + }, + } + + flags := cmd.Flags() + flags.StringVarP(&url, "url", "u", "", "Metrics url (http://:/metrics)") + flags.BoolVar(&noUnit, "no-unit", false, "Show the real number instead of formatted with units") + + return cmd +} + +func (cli *cliMetrics) list() error { + type metricType struct { + Type string `json:"type" yaml:"type"` + Title string `json:"title" yaml:"title"` + Description string `json:"description" yaml:"description"` + } + + var allMetrics []metricType + + ms := NewMetricStore() + for _, section := range maptools.SortedKeys(ms) { + title, description := ms[section].Description() + allMetrics = append(allMetrics, metricType{ + Type: section, + Title: title, + Description: description, + }) + } + + switch cli.cfg().Cscli.Output { + case "human": + t := newTable(color.Output) + t.SetRowLines(true) + t.SetHeaders("Type", "Title", "Description") + + for _, metric := range allMetrics { + t.AddRow(metric.Type, metric.Title, metric.Description) + } + + t.Render() + case "json": + x, err := json.MarshalIndent(allMetrics, "", " ") + if err != nil { + return fmt.Errorf("failed to unmarshal metrics: %w", err) + } + fmt.Println(string(x)) + case "raw": + x, err := yaml.Marshal(allMetrics) + if err != nil { + return fmt.Errorf("failed to unmarshal metrics: %w", err) + } + fmt.Println(string(x)) + } + + return nil +} + +func (cli *cliMetrics) newListCmd() *cobra.Command { + cmd := &cobra.Command{ + Use: "list", + Short: "List available types of metrics.", + Long: `List available types of metrics.`, + Args: cobra.ExactArgs(0), + DisableAutoGenTag: true, + RunE: func(_ *cobra.Command, _ []string) error { + cli.list() + return nil + }, + } + return cmd } diff --git a/cmd/crowdsec-cli/metrics_table.go b/cmd/crowdsec-cli/metrics_table.go index 835277aa4ee..72f53f94c49 100644 --- a/cmd/crowdsec-cli/metrics_table.go +++ b/cmd/crowdsec-cli/metrics_table.go @@ -7,6 +7,8 @@ import ( "github.com/aquasecurity/table" log "github.com/sirupsen/logrus" + + "github.com/crowdsecurity/go-cs-lib/maptools" ) func lapiMetricsToTable(t *table.Table, stats map[string]map[string]map[string]int) int { @@ -47,15 +49,10 @@ func metricsToTable(t *table.Table, stats map[string]map[string]int, keys []stri if t == nil { return 0, fmt.Errorf("nil table") } - // sort keys to keep consistent order when printing - sortedKeys := []string{} - for k := range stats { - sortedKeys = append(sortedKeys, k) - } - sort.Strings(sortedKeys) numRows := 0 - for _, alabel := range sortedKeys { + + for _, alabel := range maptools.SortedKeys(stats) { astats, ok := stats[alabel] if !ok { continue @@ -81,7 +78,12 @@ func metricsToTable(t *table.Table, stats map[string]map[string]int, keys []stri return numRows, nil } -func (s statBucket) table(out io.Writer, noUnit bool) { +func (s statBucket) Description() (string, string) { + return "Bucket Metrics", + `Measure events in different scenarios. Current count is the number of buckets during metrics collection. Overflows are past event-producing buckets, while Expired are the ones that didn’t receive enough events to Overflow.` +} + +func (s statBucket) Table(out io.Writer, noUnit bool, showEmpty bool) { t := newTable(out) t.SetRowLines(false) t.SetHeaders("Bucket", "Current Count", "Overflows", "Instantiated", "Poured", "Expired") @@ -91,13 +93,19 @@ func (s statBucket) table(out io.Writer, noUnit bool) { if numRows, err := metricsToTable(t, s, keys, noUnit); err != nil { log.Warningf("while collecting bucket stats: %s", err) - } else if numRows > 0 { - renderTableTitle(out, "\nBucket Metrics:") + } else if numRows > 0 || showEmpty { + title, _ := s.Description() + renderTableTitle(out, "\n" + title + ":") t.Render() } } -func (s statAcquis) table(out io.Writer, noUnit bool) { +func (s statAcquis) Description() (string, string) { + return "Acquisition Metrics", + `Measures the lines read, parsed, and unparsed per datasource. Zero read lines indicate a misconfigured or inactive datasource. Zero parsed lines mean the parser(s) failed. Non-zero parsed lines are fine as crowdsec selects relevant lines.` +} + +func (s statAcquis) Table(out io.Writer, noUnit bool, showEmpty bool) { t := newTable(out) t.SetRowLines(false) t.SetHeaders("Source", "Lines read", "Lines parsed", "Lines unparsed", "Lines poured to bucket") @@ -107,13 +115,19 @@ func (s statAcquis) table(out io.Writer, noUnit bool) { if numRows, err := metricsToTable(t, s, keys, noUnit); err != nil { log.Warningf("while collecting acquis stats: %s", err) - } else if numRows > 0 { - renderTableTitle(out, "\nAcquisition Metrics:") + } else if numRows > 0 || showEmpty { + title, _ := s.Description() + renderTableTitle(out, "\n" + title + ":") t.Render() } } -func (s statAppsecEngine) table(out io.Writer, noUnit bool) { +func (s statAppsecEngine) Description() (string, string) { + return "Appsec Metrics", + `Measures the number of parsed and blocked requests by the AppSec Component.` +} + +func (s statAppsecEngine) Table(out io.Writer, noUnit bool, showEmpty bool) { t := newTable(out) t.SetRowLines(false) t.SetHeaders("Appsec Engine", "Processed", "Blocked") @@ -121,13 +135,19 @@ func (s statAppsecEngine) table(out io.Writer, noUnit bool) { keys := []string{"processed", "blocked"} if numRows, err := metricsToTable(t, s, keys, noUnit); err != nil { log.Warningf("while collecting appsec stats: %s", err) - } else if numRows > 0 { - renderTableTitle(out, "\nAppsec Metrics:") + } else if numRows > 0 || showEmpty { + title, _ := s.Description() + renderTableTitle(out, "\n" + title + ":") t.Render() } } -func (s statAppsecRule) table(out io.Writer, noUnit bool) { +func (s statAppsecRule) Description() (string, string) { + return "Appsec Rule Metrics", + `Provides “per AppSec Component” information about the number of matches for loaded AppSec Rules.` +} + +func (s statAppsecRule) Table(out io.Writer, noUnit bool, showEmpty bool) { for appsecEngine, appsecEngineRulesStats := range s { t := newTable(out) t.SetRowLines(false) @@ -136,7 +156,7 @@ func (s statAppsecRule) table(out io.Writer, noUnit bool) { keys := []string{"triggered"} if numRows, err := metricsToTable(t, appsecEngineRulesStats, keys, noUnit); err != nil { log.Warningf("while collecting appsec rules stats: %s", err) - } else if numRows > 0 { + } else if numRows > 0 || showEmpty{ renderTableTitle(out, fmt.Sprintf("\nAppsec '%s' Rules Metrics:", appsecEngine)) t.Render() } @@ -144,7 +164,12 @@ func (s statAppsecRule) table(out io.Writer, noUnit bool) { } -func (s statParser) table(out io.Writer, noUnit bool) { +func (s statParser) Description() (string, string) { + return "Parser Metrics", + `Tracks the number of events processed by each parser and indicates success of failure. Zero parsed lines means the parer(s) failed. Non-zero unparsed lines are fine as crowdsec select relevant lines.` +} + +func (s statParser) Table(out io.Writer, noUnit bool, showEmpty bool) { t := newTable(out) t.SetRowLines(false) t.SetHeaders("Parsers", "Hits", "Parsed", "Unparsed") @@ -154,27 +179,28 @@ func (s statParser) table(out io.Writer, noUnit bool) { if numRows, err := metricsToTable(t, s, keys, noUnit); err != nil { log.Warningf("while collecting parsers stats: %s", err) - } else if numRows > 0 { - renderTableTitle(out, "\nParser Metrics:") + } else if numRows > 0 || showEmpty { + title, _ := s.Description() + renderTableTitle(out, "\n" + title + ":") t.Render() } } -func (s statStash) table(out io.Writer) { +func (s statStash) Description() (string, string) { + return "Parser Stash Metrics", + `Tracks the status of stashes that might be created by various parsers and scenarios.` +} + +func (s statStash) Table(out io.Writer, noUnit bool, showEmpty bool) { t := newTable(out) t.SetRowLines(false) t.SetHeaders("Name", "Type", "Items") t.SetAlignment(table.AlignLeft, table.AlignLeft, table.AlignLeft) // unfortunately, we can't reuse metricsToTable as the structure is too different :/ - sortedKeys := []string{} - for k := range s { - sortedKeys = append(sortedKeys, k) - } - sort.Strings(sortedKeys) - numRows := 0 - for _, alabel := range sortedKeys { + + for _, alabel := range maptools.SortedKeys(s) { astats := s[alabel] row := []string{ @@ -185,27 +211,28 @@ func (s statStash) table(out io.Writer) { t.AddRow(row...) numRows++ } - if numRows > 0 { - renderTableTitle(out, "\nParser Stash Metrics:") + if numRows > 0 || showEmpty { + title, _ := s.Description() + renderTableTitle(out, "\n" + title + ":") t.Render() } } -func (s statLapi) table(out io.Writer) { +func (s statLapi) Description() (string, string) { + return "Local API Metrics", + `Monitors the requests made to local API routes.` +} + +func (s statLapi) Table(out io.Writer, noUnit bool, showEmpty bool) { t := newTable(out) t.SetRowLines(false) t.SetHeaders("Route", "Method", "Hits") t.SetAlignment(table.AlignLeft, table.AlignLeft, table.AlignLeft) // unfortunately, we can't reuse metricsToTable as the structure is too different :/ - sortedKeys := []string{} - for k := range s { - sortedKeys = append(sortedKeys, k) - } - sort.Strings(sortedKeys) - numRows := 0 - for _, alabel := range sortedKeys { + + for _, alabel := range maptools.SortedKeys(s) { astats := s[alabel] subKeys := []string{} @@ -225,13 +252,19 @@ func (s statLapi) table(out io.Writer) { } } - if numRows > 0 { - renderTableTitle(out, "\nLocal API Metrics:") + if numRows > 0 || showEmpty { + title, _ := s.Description() + renderTableTitle(out, "\n" + title + ":") t.Render() } } -func (s statLapiMachine) table(out io.Writer) { +func (s statLapiMachine) Description() (string, string) { + return "Local API Machines Metrics", + `Tracks the number of calls to the local API from each registered machine.` +} + +func (s statLapiMachine) Table(out io.Writer, noUnit bool, showEmpty bool) { t := newTable(out) t.SetRowLines(false) t.SetHeaders("Machine", "Route", "Method", "Hits") @@ -239,13 +272,19 @@ func (s statLapiMachine) table(out io.Writer) { numRows := lapiMetricsToTable(t, s) - if numRows > 0 { - renderTableTitle(out, "\nLocal API Machines Metrics:") + if numRows > 0 || showEmpty{ + title, _ := s.Description() + renderTableTitle(out, "\n" + title + ":") t.Render() } } -func (s statLapiBouncer) table(out io.Writer) { +func (s statLapiBouncer) Description() (string, string) { + return "Local API Bouncers Metrics", + `Tracks total hits to remediation component related API routes.` +} + +func (s statLapiBouncer) Table(out io.Writer, noUnit bool, showEmpty bool) { t := newTable(out) t.SetRowLines(false) t.SetHeaders("Bouncer", "Route", "Method", "Hits") @@ -253,13 +292,19 @@ func (s statLapiBouncer) table(out io.Writer) { numRows := lapiMetricsToTable(t, s) - if numRows > 0 { - renderTableTitle(out, "\nLocal API Bouncers Metrics:") + if numRows > 0 || showEmpty { + title, _ := s.Description() + renderTableTitle(out, "\n" + title + ":") t.Render() } } -func (s statLapiDecision) table(out io.Writer) { +func (s statLapiDecision) Description() (string, string) { + return "Local API Bouncers Decisions", + `Tracks the number of empty/non-empty answers from LAPI to bouncers that are working in "live" mode.` +} + +func (s statLapiDecision) Table(out io.Writer, noUnit bool, showEmpty bool) { t := newTable(out) t.SetRowLines(false) t.SetHeaders("Bouncer", "Empty answers", "Non-empty answers") @@ -275,13 +320,19 @@ func (s statLapiDecision) table(out io.Writer) { numRows++ } - if numRows > 0 { - renderTableTitle(out, "\nLocal API Bouncers Decisions:") + if numRows > 0 || showEmpty{ + title, _ := s.Description() + renderTableTitle(out, "\n" + title + ":") t.Render() } } -func (s statDecision) table(out io.Writer) { +func (s statDecision) Description() (string, string) { + return "Local API Decisions", + `Provides information about all currently active decisions. Includes both local (crowdsec) and global decisions (CAPI), and lists subscriptions (lists).` +} + +func (s statDecision) Table(out io.Writer, noUnit bool, showEmpty bool) { t := newTable(out) t.SetRowLines(false) t.SetHeaders("Reason", "Origin", "Action", "Count") @@ -302,13 +353,19 @@ func (s statDecision) table(out io.Writer) { } } - if numRows > 0 { - renderTableTitle(out, "\nLocal API Decisions:") + if numRows > 0 || showEmpty{ + title, _ := s.Description() + renderTableTitle(out, "\n" + title + ":") t.Render() } } -func (s statAlert) table(out io.Writer) { +func (s statAlert) Description() (string, string) { + return "Local API Alerts", + `Tracks the total number of past and present alerts for the installed scenarios.` +} + +func (s statAlert) Table(out io.Writer, noUnit bool, showEmpty bool) { t := newTable(out) t.SetRowLines(false) t.SetHeaders("Reason", "Count") @@ -323,8 +380,9 @@ func (s statAlert) table(out io.Writer) { numRows++ } - if numRows > 0 { - renderTableTitle(out, "\nLocal API Alerts:") + if numRows > 0 || showEmpty{ + title, _ := s.Description() + renderTableTitle(out, "\n" + title + ":") t.Render() } } diff --git a/cmd/crowdsec-cli/support.go b/cmd/crowdsec-cli/support.go index e0a2fa9db90..661950fa8f6 100644 --- a/cmd/crowdsec-cli/support.go +++ b/cmd/crowdsec-cli/support.go @@ -66,10 +66,15 @@ func collectMetrics() ([]byte, []byte, error) { } humanMetrics := bytes.NewBuffer(nil) - err := FormatPrometheusMetrics(humanMetrics, csConfig.Cscli.PrometheusUrl, "human", false) - if err != nil { - return nil, nil, fmt.Errorf("could not fetch promtheus metrics: %s", err) + ms := NewMetricStore() + + if err := ms.Fetch(csConfig.Cscli.PrometheusUrl); err != nil { + return nil, nil, fmt.Errorf("could not fetch prometheus metrics: %s", err) + } + + if err := ms.Format(humanMetrics, nil, "human", false); err != nil { + return nil, nil, err } req, err := http.NewRequest(http.MethodGet, csConfig.Cscli.PrometheusUrl, nil) diff --git a/test/bats/01_cscli.bats b/test/bats/01_cscli.bats index 3a5b4aad04c..60a65b98d58 100644 --- a/test/bats/01_cscli.bats +++ b/test/bats/01_cscli.bats @@ -273,15 +273,6 @@ teardown() { assert_output 'failed to authenticate to Local API (LAPI): API error: incorrect Username or Password' } -@test "cscli metrics" { - rune -0 ./instance-crowdsec start - rune -0 cscli lapi status - rune -0 cscli metrics - assert_output --partial "Route" - assert_output --partial '/v1/watchers/login' - assert_output --partial "Local API Metrics:" -} - @test "'cscli completion' with or without configuration file" { rune -0 cscli completion bash assert_output --partial "# bash completion for cscli" diff --git a/test/bats/08_metrics.bats b/test/bats/08_metrics.bats index 0275d7fd4a0..8bf30812cff 100644 --- a/test/bats/08_metrics.bats +++ b/test/bats/08_metrics.bats @@ -25,7 +25,7 @@ teardown() { @test "cscli metrics (crowdsec not running)" { rune -1 cscli metrics # crowdsec is down - assert_stderr --partial 'failed to fetch prometheus metrics: executing GET request for URL \"http://127.0.0.1:6060/metrics\" failed: Get \"http://127.0.0.1:6060/metrics\": dial tcp 127.0.0.1:6060: connect: connection refused' + assert_stderr --partial 'failed to fetch metrics: executing GET request for URL \"http://127.0.0.1:6060/metrics\" failed: Get \"http://127.0.0.1:6060/metrics\": dial tcp 127.0.0.1:6060: connect: connection refused' } @test "cscli metrics (bad configuration)" { @@ -59,3 +59,57 @@ teardown() { rune -1 cscli metrics assert_stderr --partial "prometheus is not enabled, can't show metrics" } + +@test "cscli metrics" { + rune -0 ./instance-crowdsec start + rune -0 cscli lapi status + rune -0 cscli metrics + assert_output --partial "Route" + assert_output --partial '/v1/watchers/login' + assert_output --partial "Local API Metrics:" + + rune -0 cscli metrics -o json + rune -0 jq 'keys' <(output) + assert_output --partial '"alerts",' + assert_output --partial '"parsers",' + + rune -0 cscli metrics -o raw + assert_output --partial 'alerts: {}' + assert_output --partial 'parsers: {}' +} + +@test "cscli metrics list" { + rune -0 cscli metrics list + assert_output --regexp "Type.*Title.*Description" + + rune -0 cscli metrics list -o json + rune -0 jq -c '.[] | [.type,.title]' <(output) + assert_line '["acquisition","Acquisition Metrics"]' + + rune -0 cscli metrics list -o raw + assert_line "- type: acquisition" + assert_line " title: Acquisition Metrics" +} + +@test "cscli metrics show" { + rune -0 ./instance-crowdsec start + rune -0 cscli lapi status + + assert_equal "$(cscli metrics)" "$(cscli metrics show)" + + rune -1 cscli metrics show foobar + assert_stderr --partial "unknown metrics type: foobar" + + rune -0 cscli metrics show lapi + assert_output --partial "Local API Metrics:" + assert_output --regexp "Route.*Method.*Hits" + assert_output --regexp "/v1/watchers/login.*POST" + + rune -0 cscli metrics show lapi -o json + rune -0 jq -c '.lapi."/v1/watchers/login" | keys' <(output) + assert_json '["POST"]' + + rune -0 cscli metrics show lapi -o raw + assert_line 'lapi:' + assert_line ' /v1/watchers/login:' +} From 4e724f6c0a54ad1c67eeab6ca3be62f00ee0cf20 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Tue, 6 Feb 2024 10:50:28 +0100 Subject: [PATCH 012/581] refact "cscli" root cmd (#2811) * refact "cscli" root cmd * lint (naming, imports, whitespace) --- .golangci.yml | 2 +- cmd/crowdsec-cli/bouncers.go | 10 +- cmd/crowdsec-cli/config_restore.go | 9 +- cmd/crowdsec-cli/dashboard.go | 24 +-- cmd/crowdsec-cli/dashboard_unsupported.go | 4 +- cmd/crowdsec-cli/decisions.go | 8 +- cmd/crowdsec-cli/hub.go | 6 +- cmd/crowdsec-cli/machines.go | 21 +-- cmd/crowdsec-cli/main.go | 207 +++++++++++++--------- cmd/crowdsec-cli/metrics.go | 31 ++-- cmd/crowdsec-cli/papi.go | 8 +- cmd/crowdsec-cli/simulation.go | 9 +- 12 files changed, 199 insertions(+), 140 deletions(-) diff --git a/.golangci.yml b/.golangci.yml index e1f2fc09a84..dbeb96875c5 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -53,7 +53,7 @@ linters-settings: nestif: # lower this after refactoring - min-complexity: 27 + min-complexity: 28 nlreturn: block-size: 4 diff --git a/cmd/crowdsec-cli/bouncers.go b/cmd/crowdsec-cli/bouncers.go index d2685901ebb..717e9aef5fe 100644 --- a/cmd/crowdsec-cli/bouncers.go +++ b/cmd/crowdsec-cli/bouncers.go @@ -36,13 +36,13 @@ func askYesNo(message string, defaultAnswer bool) (bool, error) { } type cliBouncers struct { - db *database.Client + db *database.Client cfg configGetter } -func NewCLIBouncers(getconfig configGetter) *cliBouncers { +func NewCLIBouncers(cfg configGetter) *cliBouncers { return &cliBouncers{ - cfg: getconfig, + cfg: cfg, } } @@ -197,13 +197,13 @@ cscli bouncers add MyBouncerName --key `, return cmd } -func (cli *cliBouncers) deleteValid(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { +func (cli *cliBouncers) deleteValid(_ *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { bouncers, err := cli.db.ListBouncers() if err != nil { cobra.CompError("unable to list bouncers " + err.Error()) } - ret :=[]string{} + ret := []string{} for _, bouncer := range bouncers { if strings.Contains(bouncer.Name, toComplete) && !slices.Contains(args, bouncer.Name) { diff --git a/cmd/crowdsec-cli/config_restore.go b/cmd/crowdsec-cli/config_restore.go index e9c2fa9aa23..17d7494c60f 100644 --- a/cmd/crowdsec-cli/config_restore.go +++ b/cmd/crowdsec-cli/config_restore.go @@ -146,7 +146,12 @@ func restoreConfigFromDirectory(dirPath string, oldBackup bool) error { // Now we have config.yaml, we should regenerate config struct to have rights paths etc ConfigFilePath = fmt.Sprintf("%s/config.yaml", csConfig.ConfigPaths.ConfigDir) - initConfig() + log.Debug("Reloading configuration") + + csConfig, _, err = loadConfigFor("config") + if err != nil { + return fmt.Errorf("failed to reload configuration: %s", err) + } backupCAPICreds := fmt.Sprintf("%s/online_api_credentials.yaml", dirPath) if _, err = os.Stat(backupCAPICreds); err == nil { @@ -227,7 +232,7 @@ func restoreConfigFromDirectory(dirPath string, oldBackup bool) error { } } - // if there is files in the acquis backup dir, restore them + // if there are files in the acquis backup dir, restore them acquisBackupDir := filepath.Join(dirPath, "acquis", "*.yaml") if acquisFiles, err := filepath.Glob(acquisBackupDir); err == nil { for _, acquisFile := range acquisFiles { diff --git a/cmd/crowdsec-cli/dashboard.go b/cmd/crowdsec-cli/dashboard.go index 64cb7577e89..59b9e67cd94 100644 --- a/cmd/crowdsec-cli/dashboard.go +++ b/cmd/crowdsec-cli/dashboard.go @@ -19,15 +19,14 @@ import ( log "github.com/sirupsen/logrus" "github.com/spf13/cobra" - "github.com/crowdsecurity/crowdsec/pkg/metabase" - "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/require" + "github.com/crowdsecurity/crowdsec/pkg/metabase" ) var ( metabaseUser = "crowdsec@crowdsec.net" metabasePassword string - metabaseDbPath string + metabaseDBPath string metabaseConfigPath string metabaseConfigFolder = "metabase/" metabaseConfigFile = "metabase.yaml" @@ -43,13 +42,13 @@ var ( // information needed to set up a random password on user's behalf ) -type cliDashboard struct{ +type cliDashboard struct { cfg configGetter } -func NewCLIDashboard(getconfig configGetter) *cliDashboard { +func NewCLIDashboard(cfg configGetter) *cliDashboard { return &cliDashboard{ - cfg: getconfig, + cfg: cfg, } } @@ -99,6 +98,7 @@ cscli dashboard remove metabaseContainerID = oldContainerID } } + return nil }, } @@ -127,8 +127,8 @@ cscli dashboard setup --listen 0.0.0.0 cscli dashboard setup -l 0.0.0.0 -p 443 --password `, RunE: func(_ *cobra.Command, _ []string) error { - if metabaseDbPath == "" { - metabaseDbPath = cli.cfg().ConfigPaths.DataDir + if metabaseDBPath == "" { + metabaseDBPath = cli.cfg().ConfigPaths.DataDir } if metabasePassword == "" { @@ -152,7 +152,7 @@ cscli dashboard setup -l 0.0.0.0 -p 443 --password if err = cli.chownDatabase(dockerGroup.Gid); err != nil { return err } - mb, err := metabase.SetupMetabase(cli.cfg().API.Server.DbConfig, metabaseListenAddress, metabaseListenPort, metabaseUser, metabasePassword, metabaseDbPath, dockerGroup.Gid, metabaseContainerID, metabaseImage) + mb, err := metabase.SetupMetabase(cli.cfg().API.Server.DbConfig, metabaseListenAddress, metabaseListenPort, metabaseUser, metabasePassword, metabaseDBPath, dockerGroup.Gid, metabaseContainerID, metabaseImage) if err != nil { return err } @@ -165,13 +165,14 @@ cscli dashboard setup -l 0.0.0.0 -p 443 --password fmt.Printf("\tURL : '%s'\n", mb.Config.ListenURL) fmt.Printf("\tusername : '%s'\n", mb.Config.Username) fmt.Printf("\tpassword : '%s'\n", mb.Config.Password) + return nil }, } flags := cmd.Flags() flags.BoolVarP(&force, "force", "f", false, "Force setup : override existing files") - flags.StringVarP(&metabaseDbPath, "dir", "d", "", "Shared directory with metabase container") + flags.StringVarP(&metabaseDBPath, "dir", "d", "", "Shared directory with metabase container") flags.StringVarP(&metabaseListenAddress, "listen", "l", metabaseListenAddress, "Listen address of container") flags.StringVar(&metabaseImage, "metabase-image", metabaseImage, "Metabase image to use") flags.StringVarP(&metabaseListenPort, "port", "p", metabaseListenPort, "Listen port of container") @@ -203,6 +204,7 @@ func (cli *cliDashboard) newStartCmd() *cobra.Command { } log.Infof("Started metabase") log.Infof("url : http://%s:%s", mb.Config.ListenAddr, mb.Config.ListenPort) + return nil }, } @@ -241,6 +243,7 @@ func (cli *cliDashboard) newShowPasswordCmd() *cobra.Command { return err } log.Printf("'%s'", m.Config.Password) + return nil }, } @@ -313,6 +316,7 @@ cscli dashboard remove --force } } } + return nil }, } diff --git a/cmd/crowdsec-cli/dashboard_unsupported.go b/cmd/crowdsec-cli/dashboard_unsupported.go index 4cf8e18b503..cc80abd2528 100644 --- a/cmd/crowdsec-cli/dashboard_unsupported.go +++ b/cmd/crowdsec-cli/dashboard_unsupported.go @@ -13,9 +13,9 @@ type cliDashboard struct{ cfg configGetter } -func NewCLIDashboard(getconfig configGetter) *cliDashboard { +func NewCLIDashboard(cfg configGetter) *cliDashboard { return &cliDashboard{ - cfg: getconfig, + cfg: cfg, } } diff --git a/cmd/crowdsec-cli/decisions.go b/cmd/crowdsec-cli/decisions.go index c5839ae0079..d7165367898 100644 --- a/cmd/crowdsec-cli/decisions.go +++ b/cmd/crowdsec-cli/decisions.go @@ -116,14 +116,13 @@ func (cli *cliDecisions) decisionsToTable(alerts *models.GetAlertsResponse, prin return nil } - type cliDecisions struct { cfg configGetter } -func NewCLIDecisions(getconfig configGetter) *cliDecisions { +func NewCLIDecisions(cfg configGetter) *cliDecisions { return &cliDecisions{ - cfg: getconfig, + cfg: cfg, } } @@ -157,6 +156,7 @@ func (cli *cliDecisions) NewCommand() *cobra.Command { if err != nil { return fmt.Errorf("creating api client: %w", err) } + return nil }, } @@ -393,6 +393,7 @@ cscli decisions add --scope username --value foobar } log.Info("Decision successfully added") + return nil }, } @@ -499,6 +500,7 @@ cscli decisions delete --type captcha } } log.Infof("%s decision(s) deleted", decisions.NbDeleted) + return nil }, } diff --git a/cmd/crowdsec-cli/hub.go b/cmd/crowdsec-cli/hub.go index d3ce380bb6f..600e56889f7 100644 --- a/cmd/crowdsec-cli/hub.go +++ b/cmd/crowdsec-cli/hub.go @@ -13,13 +13,13 @@ import ( "github.com/crowdsecurity/crowdsec/pkg/cwhub" ) -type cliHub struct{ +type cliHub struct { cfg configGetter } -func NewCLIHub(getconfig configGetter) *cliHub { +func NewCLIHub(cfg configGetter) *cliHub { return &cliHub{ - cfg: getconfig, + cfg: cfg, } } diff --git a/cmd/crowdsec-cli/machines.go b/cmd/crowdsec-cli/machines.go index 1819bdcf5fb..7c9b9708c92 100644 --- a/cmd/crowdsec-cli/machines.go +++ b/cmd/crowdsec-cli/machines.go @@ -7,6 +7,7 @@ import ( "fmt" "math/big" "os" + "slices" "strings" "time" @@ -17,7 +18,6 @@ import ( log "github.com/sirupsen/logrus" "github.com/spf13/cobra" "gopkg.in/yaml.v3" - "slices" "github.com/crowdsecurity/machineid" @@ -106,14 +106,14 @@ func getLastHeartbeat(m *ent.Machine) (string, bool) { return hb, true } -type cliMachines struct{ - db *database.Client +type cliMachines struct { + db *database.Client cfg configGetter } -func NewCLIMachines(getconfig configGetter) *cliMachines { +func NewCLIMachines(cfg configGetter) *cliMachines { return &cliMachines{ - cfg: getconfig, + cfg: cfg, } } @@ -136,6 +136,7 @@ Note: This command requires database direct access, so is intended to be run on if err != nil { return fmt.Errorf("unable to create new database client: %s", err) } + return nil }, } @@ -249,7 +250,7 @@ cscli machines add -f- --auto > /tmp/mycreds.yaml`, func (cli *cliMachines) add(args []string, machinePassword string, dumpFile string, apiURL string, interactive bool, autoAdd bool, force bool) error { var ( - err error + err error machineID string ) @@ -347,7 +348,7 @@ func (cli *cliMachines) add(args []string, machinePassword string, dumpFile stri return nil } -func (cli *cliMachines) deleteValid(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { +func (cli *cliMachines) deleteValid(_ *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { machines, err := cli.db.ListMachines() if err != nil { cobra.CompError("unable to list machines " + err.Error()) @@ -447,9 +448,9 @@ func (cli *cliMachines) prune(duration time.Duration, notValidOnly bool, force b func (cli *cliMachines) newPruneCmd() *cobra.Command { var ( - duration time.Duration - notValidOnly bool - force bool + duration time.Duration + notValidOnly bool + force bool ) const defaultDuration = 10 * time.Minute diff --git a/cmd/crowdsec-cli/main.go b/cmd/crowdsec-cli/main.go index 3b20cf112c0..62b85e63047 100644 --- a/cmd/crowdsec-cli/main.go +++ b/cmd/crowdsec-cli/main.go @@ -15,45 +15,88 @@ import ( "github.com/crowdsecurity/crowdsec/pkg/fflag" ) -var trace_lvl, dbg_lvl, nfo_lvl, wrn_lvl, err_lvl bool - var ConfigFilePath string var csConfig *csconfig.Config var dbClient *database.Client -var outputFormat string -var OutputColor string +type configGetter func() *csconfig.Config var mergedConfig string -// flagBranch overrides the value in csConfig.Cscli.HubBranch -var flagBranch = "" +type cliRoot struct { + logTrace bool + logDebug bool + logInfo bool + logWarn bool + logErr bool + outputColor string + outputFormat string + // flagBranch overrides the value in csConfig.Cscli.HubBranch + flagBranch string +} -type configGetter func() *csconfig.Config +func newCliRoot() *cliRoot { + return &cliRoot{} +} -func initConfig() { - var err error +// cfg() is a helper function to get the configuration loaded from config.yaml, +// we pass it to subcommands because the file is not read until the Execute() call +func (cli *cliRoot) cfg() *csconfig.Config { + return csConfig +} - if trace_lvl { - log.SetLevel(log.TraceLevel) - } else if dbg_lvl { - log.SetLevel(log.DebugLevel) - } else if nfo_lvl { - log.SetLevel(log.InfoLevel) - } else if wrn_lvl { - log.SetLevel(log.WarnLevel) - } else if err_lvl { - log.SetLevel(log.ErrorLevel) +// wantedLogLevel returns the log level requested in the command line flags. +func (cli *cliRoot) wantedLogLevel() log.Level { + switch { + case cli.logTrace: + return log.TraceLevel + case cli.logDebug: + return log.DebugLevel + case cli.logInfo: + return log.InfoLevel + case cli.logWarn: + return log.WarnLevel + case cli.logErr: + return log.ErrorLevel + default: + return log.InfoLevel + } +} + +// loadConfigFor loads the configuration file for the given sub-command. +// If the sub-command does not need it, it returns a default configuration. +func loadConfigFor(command string) (*csconfig.Config, string, error) { + noNeedConfig := []string{ + "doc", + "help", + "completion", + "version", + "hubtest", } - if !slices.Contains(NoNeedConfig, os.Args[1]) { + if !slices.Contains(noNeedConfig, command) { log.Debugf("Using %s as configuration file", ConfigFilePath) - csConfig, mergedConfig, err = csconfig.NewConfig(ConfigFilePath, false, false, true) + + config, merged, err := csconfig.NewConfig(ConfigFilePath, false, false, true) if err != nil { - log.Fatal(err) + return nil, "", err } - } else { - csConfig = csconfig.NewDefaultConfig() + + return config, merged, nil + } + + return csconfig.NewDefaultConfig(), "", nil +} + +// initialize is called before the subcommand is executed. +func (cli *cliRoot) initialize() { + var err error + + log.SetLevel(cli.wantedLogLevel()) + + csConfig, mergedConfig, err = loadConfigFor(os.Args[1]) + if err != nil { + log.Fatal(err) } // recap of the enabled feature flags, because logging @@ -62,12 +105,12 @@ func initConfig() { log.Debugf("Enabled feature flags: %s", fflist) } - if flagBranch != "" { - csConfig.Cscli.HubBranch = flagBranch + if cli.flagBranch != "" { + csConfig.Cscli.HubBranch = cli.flagBranch } - if outputFormat != "" { - csConfig.Cscli.Output = outputFormat + if cli.outputFormat != "" { + csConfig.Cscli.Output = cli.outputFormat } if csConfig.Cscli.Output == "" { @@ -85,11 +128,11 @@ func initConfig() { log.SetLevel(log.ErrorLevel) } - if OutputColor != "" { - csConfig.Cscli.Color = OutputColor + if cli.outputColor != "" { + csConfig.Cscli.Color = cli.outputColor - if OutputColor != "yes" && OutputColor != "no" && OutputColor != "auto" { - log.Fatalf("output color %s unknown", OutputColor) + if cli.outputColor != "yes" && cli.outputColor != "no" && cli.outputColor != "auto" { + log.Fatalf("output color %s unknown", cli.outputColor) } } } @@ -102,15 +145,25 @@ var validArgs = []string{ "postoverflows", "scenarios", "simulation", "support", "version", } -var NoNeedConfig = []string{ - "doc", - "help", - "completion", - "version", - "hubtest", +func (cli *cliRoot) colorize(cmd *cobra.Command) { + cc.Init(&cc.Config{ + RootCmd: cmd, + Headings: cc.Yellow, + Commands: cc.Green + cc.Bold, + CmdShortDescr: cc.Cyan, + Example: cc.Italic, + ExecName: cc.Bold, + Aliases: cc.Bold + cc.Italic, + FlagsDataType: cc.White, + Flags: cc.Green, + FlagsDescr: cc.Cyan, + NoExtraNewlines: true, + NoBottomNewline: true, + }) + cmd.SetOut(color.Output) } -func main() { +func (cli *cliRoot) NewCommand() *cobra.Command { // set the formatter asap and worry about level later logFormatter := &log.TextFormatter{TimestampFormat: time.RFC3339, FullTimestamp: true} log.SetFormatter(logFormatter) @@ -135,33 +188,25 @@ It is meant to allow you to manage bans, parsers/scenarios/etc, api and generall /*TBD examples*/ } - cc.Init(&cc.Config{ - RootCmd: cmd, - Headings: cc.Yellow, - Commands: cc.Green + cc.Bold, - CmdShortDescr: cc.Cyan, - Example: cc.Italic, - ExecName: cc.Bold, - Aliases: cc.Bold + cc.Italic, - FlagsDataType: cc.White, - Flags: cc.Green, - FlagsDescr: cc.Cyan, - NoExtraNewlines: true, - NoBottomNewline: true, - }) - cmd.SetOut(color.Output) + cli.colorize(cmd) + + /*don't sort flags so we can enforce order*/ + cmd.Flags().SortFlags = false + + pflags := cmd.PersistentFlags() + pflags.SortFlags = false + + pflags.StringVarP(&ConfigFilePath, "config", "c", csconfig.DefaultConfigPath("config.yaml"), "path to crowdsec config file") + pflags.StringVarP(&cli.outputFormat, "output", "o", "", "Output format: human, json, raw") + pflags.StringVarP(&cli.outputColor, "color", "", "auto", "Output color: yes, no, auto") + pflags.BoolVar(&cli.logDebug, "debug", false, "Set logging to debug") + pflags.BoolVar(&cli.logInfo, "info", false, "Set logging to info") + pflags.BoolVar(&cli.logWarn, "warning", false, "Set logging to warning") + pflags.BoolVar(&cli.logErr, "error", false, "Set logging to error") + pflags.BoolVar(&cli.logTrace, "trace", false, "Set logging to trace") + pflags.StringVar(&cli.flagBranch, "branch", "", "Override hub branch on github") - cmd.PersistentFlags().StringVarP(&ConfigFilePath, "config", "c", csconfig.DefaultConfigPath("config.yaml"), "path to crowdsec config file") - cmd.PersistentFlags().StringVarP(&outputFormat, "output", "o", "", "Output format: human, json, raw") - cmd.PersistentFlags().StringVarP(&OutputColor, "color", "", "auto", "Output color: yes, no, auto") - cmd.PersistentFlags().BoolVar(&dbg_lvl, "debug", false, "Set logging to debug") - cmd.PersistentFlags().BoolVar(&nfo_lvl, "info", false, "Set logging to info") - cmd.PersistentFlags().BoolVar(&wrn_lvl, "warning", false, "Set logging to warning") - cmd.PersistentFlags().BoolVar(&err_lvl, "error", false, "Set logging to error") - cmd.PersistentFlags().BoolVar(&trace_lvl, "trace", false, "Set logging to trace") - cmd.PersistentFlags().StringVar(&flagBranch, "branch", "", "Override hub branch on github") - - if err := cmd.PersistentFlags().MarkHidden("branch"); err != nil { + if err := pflags.MarkHidden("branch"); err != nil { log.Fatalf("failed to hide flag: %s", err) } @@ -181,29 +226,20 @@ It is meant to allow you to manage bans, parsers/scenarios/etc, api and generall } if len(os.Args) > 1 { - cobra.OnInitialize(initConfig) - } - - /*don't sort flags so we can enforce order*/ - cmd.Flags().SortFlags = false - cmd.PersistentFlags().SortFlags = false - - // we use a getter because the config is not initialized until the Execute() call - getconfig := func() *csconfig.Config { - return csConfig + cobra.OnInitialize(cli.initialize) } cmd.AddCommand(NewCLIDoc().NewCommand(cmd)) cmd.AddCommand(NewCLIVersion().NewCommand()) cmd.AddCommand(NewConfigCmd()) - cmd.AddCommand(NewCLIHub(getconfig).NewCommand()) - cmd.AddCommand(NewCLIMetrics(getconfig).NewCommand()) - cmd.AddCommand(NewCLIDashboard(getconfig).NewCommand()) - cmd.AddCommand(NewCLIDecisions(getconfig).NewCommand()) + cmd.AddCommand(NewCLIHub(cli.cfg).NewCommand()) + cmd.AddCommand(NewCLIMetrics(cli.cfg).NewCommand()) + cmd.AddCommand(NewCLIDashboard(cli.cfg).NewCommand()) + cmd.AddCommand(NewCLIDecisions(cli.cfg).NewCommand()) cmd.AddCommand(NewCLIAlerts().NewCommand()) - cmd.AddCommand(NewCLISimulation(getconfig).NewCommand()) - cmd.AddCommand(NewCLIBouncers(getconfig).NewCommand()) - cmd.AddCommand(NewCLIMachines(getconfig).NewCommand()) + cmd.AddCommand(NewCLISimulation(cli.cfg).NewCommand()) + cmd.AddCommand(NewCLIBouncers(cli.cfg).NewCommand()) + cmd.AddCommand(NewCLIMachines(cli.cfg).NewCommand()) cmd.AddCommand(NewCLICapi().NewCommand()) cmd.AddCommand(NewLapiCmd()) cmd.AddCommand(NewCompletionCmd()) @@ -212,7 +248,7 @@ It is meant to allow you to manage bans, parsers/scenarios/etc, api and generall cmd.AddCommand(NewCLIHubTest().NewCommand()) cmd.AddCommand(NewCLINotifications().NewCommand()) cmd.AddCommand(NewCLISupport().NewCommand()) - cmd.AddCommand(NewCLIPapi(getconfig).NewCommand()) + cmd.AddCommand(NewCLIPapi(cli.cfg).NewCommand()) cmd.AddCommand(NewCLICollection().NewCommand()) cmd.AddCommand(NewCLIParser().NewCommand()) cmd.AddCommand(NewCLIScenario().NewCommand()) @@ -225,6 +261,11 @@ It is meant to allow you to manage bans, parsers/scenarios/etc, api and generall cmd.AddCommand(NewSetupCmd()) } + return cmd +} + +func main() { + cmd := newCliRoot().NewCommand() if err := cmd.Execute(); err != nil { log.Fatal(err) } diff --git a/cmd/crowdsec-cli/metrics.go b/cmd/crowdsec-cli/metrics.go index c883c809291..7df52841ecb 100644 --- a/cmd/crowdsec-cli/metrics.go +++ b/cmd/crowdsec-cli/metrics.go @@ -50,18 +50,18 @@ type metricStore map[string]metricSection func NewMetricStore() metricStore { return metricStore{ - "acquisition": statAcquis{}, - "buckets": statBucket{}, - "parsers": statParser{}, - "lapi": statLapi{}, - "lapi-machine": statLapiMachine{}, - "lapi-bouncer": statLapiBouncer{}, + "acquisition": statAcquis{}, + "buckets": statBucket{}, + "parsers": statParser{}, + "lapi": statLapi{}, + "lapi-machine": statLapiMachine{}, + "lapi-bouncer": statLapiBouncer{}, "lapi-decisions": statLapiDecision{}, - "decisions": statDecision{}, - "alerts": statAlert{}, - "stash": statStash{}, - "appsec-engine": statAppsecEngine{}, - "appsec-rule": statAppsecRule{}, + "decisions": statDecision{}, + "alerts": statAlert{}, + "stash": statStash{}, + "appsec-engine": statAppsecEngine{}, + "appsec-rule": statAppsecRule{}, } } @@ -116,17 +116,21 @@ func (ms metricStore) Fetch(url string) error { if !strings.HasPrefix(fam.Name, "cs_") { continue } + log.Tracef("round %d", idx) + for _, m := range fam.Metrics { metric, ok := m.(prom2json.Metric) if !ok { log.Debugf("failed to convert metric to prom2json.Metric") continue } + name, ok := metric.Labels["name"] if !ok { log.Debugf("no name in Metric %v", metric.Labels) } + source, ok := metric.Labels["source"] if !ok { log.Debugf("no source in Metric %v for %s", metric.Labels, fam.Name) @@ -153,6 +157,7 @@ func (ms metricStore) Fetch(url string) error { if err != nil { log.Errorf("Unexpected int value %s : %s", value, err) } + ival := int(fval) switch fam.Name { /*buckets*/ @@ -303,9 +308,9 @@ type cliMetrics struct { cfg configGetter } -func NewCLIMetrics(getconfig configGetter) *cliMetrics { +func NewCLIMetrics(cfg configGetter) *cliMetrics { return &cliMetrics{ - cfg: getconfig, + cfg: cfg, } } diff --git a/cmd/crowdsec-cli/papi.go b/cmd/crowdsec-cli/papi.go index 04223ef93ab..e18af94d4bb 100644 --- a/cmd/crowdsec-cli/papi.go +++ b/cmd/crowdsec-cli/papi.go @@ -10,19 +10,18 @@ import ( "github.com/crowdsecurity/go-cs-lib/ptr" + "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/require" "github.com/crowdsecurity/crowdsec/pkg/apiserver" "github.com/crowdsecurity/crowdsec/pkg/database" - - "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/require" ) type cliPapi struct { cfg configGetter } -func NewCLIPapi(getconfig configGetter) *cliPapi { +func NewCLIPapi(cfg configGetter) *cliPapi { return &cliPapi{ - cfg: getconfig, + cfg: cfg, } } @@ -43,6 +42,7 @@ func (cli *cliPapi) NewCommand() *cobra.Command { if err := require.PAPI(cfg); err != nil { return err } + return nil }, } diff --git a/cmd/crowdsec-cli/simulation.go b/cmd/crowdsec-cli/simulation.go index a6e710c5747..6ccac761727 100644 --- a/cmd/crowdsec-cli/simulation.go +++ b/cmd/crowdsec-cli/simulation.go @@ -3,23 +3,23 @@ package main import ( "fmt" "os" + "slices" log "github.com/sirupsen/logrus" "github.com/spf13/cobra" "gopkg.in/yaml.v2" - "slices" "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/require" "github.com/crowdsecurity/crowdsec/pkg/cwhub" ) -type cliSimulation struct{ +type cliSimulation struct { cfg configGetter } -func NewCLISimulation(getconfig configGetter) *cliSimulation { +func NewCLISimulation(cfg configGetter) *cliSimulation { return &cliSimulation{ - cfg: getconfig, + cfg: cfg, } } @@ -38,6 +38,7 @@ cscli simulation disable crowdsecurity/ssh-bf`, if cli.cfg().Cscli.SimulationConfig == nil { return fmt.Errorf("no simulation configured") } + return nil }, PersistentPostRun: func(cmd *cobra.Command, _ []string) { From 3208a40ef337a50a815d67f5337f9c31b6926cbf Mon Sep 17 00:00:00 2001 From: "Thibault \"bui\" Koechlin" Date: Tue, 6 Feb 2024 18:04:17 +0100 Subject: [PATCH 013/581] Dedicated whitelist metrics (#2813) * add proper whitelist metrics : both its own table and an extension to acquis metrics to track discarded/whitelisted lines --- .golangci.yml | 6 +- cmd/crowdsec-cli/metrics.go | 98 ++++++++++++++++++++++--------- cmd/crowdsec-cli/metrics_table.go | 88 +++++++++++++++++++++------ cmd/crowdsec/metrics.go | 4 +- pkg/parser/node.go | 4 +- pkg/parser/runtime.go | 18 ++++++ pkg/parser/whitelist.go | 14 ++++- pkg/parser/whitelist_test.go | 4 +- 8 files changed, 178 insertions(+), 58 deletions(-) diff --git a/.golangci.yml b/.golangci.yml index dbeb96875c5..a3aacccb817 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -11,7 +11,7 @@ run: linters-settings: cyclop: # lower this after refactoring - max-complexity: 66 + max-complexity: 70 gci: sections: @@ -22,11 +22,11 @@ linters-settings: gocognit: # lower this after refactoring - min-complexity: 145 + min-complexity: 150 gocyclo: # lower this after refactoring - min-complexity: 64 + min-complexity: 70 funlen: # Checks the number of lines in a function. diff --git a/cmd/crowdsec-cli/metrics.go b/cmd/crowdsec-cli/metrics.go index 7df52841ecb..6b3155e5549 100644 --- a/cmd/crowdsec-cli/metrics.go +++ b/cmd/crowdsec-cli/metrics.go @@ -21,21 +21,22 @@ import ( ) type ( - statAcquis map[string]map[string]int - statParser map[string]map[string]int - statBucket map[string]map[string]int - statLapi map[string]map[string]int - statLapiMachine map[string]map[string]map[string]int - statLapiBouncer map[string]map[string]map[string]int + statAcquis map[string]map[string]int + statParser map[string]map[string]int + statBucket map[string]map[string]int + statWhitelist map[string]map[string]map[string]int + statLapi map[string]map[string]int + statLapiMachine map[string]map[string]map[string]int + statLapiBouncer map[string]map[string]map[string]int statLapiDecision map[string]struct { NonEmpty int Empty int } - statDecision map[string]map[string]map[string]int + statDecision map[string]map[string]map[string]int statAppsecEngine map[string]map[string]int - statAppsecRule map[string]map[string]map[string]int - statAlert map[string]int - statStash map[string]struct { + statAppsecRule map[string]map[string]map[string]int + statAlert map[string]int + statStash map[string]struct { Type string Count int } @@ -62,6 +63,7 @@ func NewMetricStore() metricStore { "stash": statStash{}, "appsec-engine": statAppsecEngine{}, "appsec-rule": statAppsecRule{}, + "whitelists": statWhitelist{}, } } @@ -111,6 +113,7 @@ func (ms metricStore) Fetch(url string) error { mAppsecRule := ms["appsec-rule"].(statAppsecRule) mAlert := ms["alerts"].(statAlert) mStash := ms["stash"].(statStash) + mWhitelist := ms["whitelists"].(statWhitelist) for idx, fam := range result { if !strings.HasPrefix(fam.Name, "cs_") { @@ -160,7 +163,9 @@ func (ms metricStore) Fetch(url string) error { ival := int(fval) switch fam.Name { - /*buckets*/ + // + // buckets + // case "cs_bucket_created_total": if _, ok := mBucket[name]; !ok { mBucket[name] = make(map[string]int) @@ -190,7 +195,9 @@ func (ms metricStore) Fetch(url string) error { mBucket[name] = make(map[string]int) } mBucket[name]["underflow"] += ival - /*acquis*/ + // + // parsers + // case "cs_parser_hits_total": if _, ok := mAcquis[source]; !ok { mAcquis[source] = make(map[string]int) @@ -221,6 +228,33 @@ func (ms metricStore) Fetch(url string) error { mParser[name] = make(map[string]int) } mParser[name]["unparsed"] += ival + // + // whitelists + // + case "cs_node_wl_hits_total": + if _, ok := mWhitelist[name]; !ok { + mWhitelist[name] = make(map[string]map[string]int) + } + if _, ok := mWhitelist[name][reason]; !ok { + mWhitelist[name][reason] = make(map[string]int) + } + mWhitelist[name][reason]["hits"] += ival + case "cs_node_wl_hits_ok_total": + if _, ok := mWhitelist[name]; !ok { + mWhitelist[name] = make(map[string]map[string]int) + } + if _, ok := mWhitelist[name][reason]; !ok { + mWhitelist[name][reason] = make(map[string]int) + } + mWhitelist[name][reason]["whitelisted"] += ival + // track as well whitelisted lines at acquis level + if _, ok := mAcquis[source]; !ok { + mAcquis[source] = make(map[string]int) + } + mAcquis[source]["whitelisted"] += ival + // + // lapi + // case "cs_lapi_route_requests_total": if _, ok := mLapi[route]; !ok { mLapi[route] = make(map[string]int) @@ -256,6 +290,9 @@ func (ms metricStore) Fetch(url string) error { x.NonEmpty += ival } mLapiDecision[bouncer] = x + // + // decisions + // case "cs_active_decisions": if _, ok := mDecision[reason]; !ok { mDecision[reason] = make(map[string]map[string]int) @@ -265,15 +302,18 @@ func (ms metricStore) Fetch(url string) error { } mDecision[reason][origin][action] += ival case "cs_alerts": - /*if _, ok := mAlert[scenario]; !ok { - mAlert[scenario] = make(map[string]int) - }*/ mAlert[reason] += ival + // + // stash + // case "cs_cache_size": mStash[name] = struct { Type string Count int }{Type: mtype, Count: ival} + // + // appsec + // case "cs_appsec_reqs_total": if _, ok := mAppsecEngine[metric.Labels["appsec_engine"]]; !ok { mAppsecEngine[metric.Labels["appsec_engine"]] = make(map[string]int, 0) @@ -392,15 +432,15 @@ func (cli *cliMetrics) show(sections []string, url string, noUnit bool) error { func (cli *cliMetrics) NewCommand() *cobra.Command { var ( - url string + url string noUnit bool ) cmd := &cobra.Command{ - Use: "metrics", - Short: "Display crowdsec prometheus metrics.", - Long: `Fetch metrics from a Local API server and display them`, - Example: `# Show all Metrics, skip empty tables (same as "cecli metrics show") + Use: "metrics", + Short: "Display crowdsec prometheus metrics.", + Long: `Fetch metrics from a Local API server and display them`, + Example: `# Show all Metrics, skip empty tables (same as "cecli metrics show") cscli metrics # Show only some metrics, connect to a different url @@ -431,7 +471,7 @@ func (cli *cliMetrics) expandSectionGroups(args []string) []string { for _, section := range args { switch section { case "engine": - ret = append(ret, "acquisition", "parsers", "buckets", "stash") + ret = append(ret, "acquisition", "parsers", "buckets", "stash", "whitelists") case "lapi": ret = append(ret, "alerts", "decisions", "lapi", "lapi-bouncer", "lapi-decisions", "lapi-machine") case "appsec": @@ -446,15 +486,15 @@ func (cli *cliMetrics) expandSectionGroups(args []string) []string { func (cli *cliMetrics) newShowCmd() *cobra.Command { var ( - url string + url string noUnit bool ) cmd := &cobra.Command{ - Use: "show [type]...", - Short: "Display all or part of the available metrics.", - Long: `Fetch metrics from a Local API server and display them, optionally filtering on specific types.`, - Example: `# Show all Metrics, skip empty tables + Use: "show [type]...", + Short: "Display all or part of the available metrics.", + Long: `Fetch metrics from a Local API server and display them, optionally filtering on specific types.`, + Example: `# Show all Metrics, skip empty tables cscli metrics show # Use an alias: "engine", "lapi" or "appsec" to show a group of metrics @@ -482,9 +522,9 @@ cscli metrics show acquisition parsers buckets stash -o json`, func (cli *cliMetrics) list() error { type metricType struct { - Type string `json:"type" yaml:"type"` - Title string `json:"title" yaml:"title"` - Description string `json:"description" yaml:"description"` + Type string `json:"type" yaml:"type"` + Title string `json:"title" yaml:"title"` + Description string `json:"description" yaml:"description"` } var allMetrics []metricType diff --git a/cmd/crowdsec-cli/metrics_table.go b/cmd/crowdsec-cli/metrics_table.go index 72f53f94c49..f11ee11bf88 100644 --- a/cmd/crowdsec-cli/metrics_table.go +++ b/cmd/crowdsec-cli/metrics_table.go @@ -45,6 +45,38 @@ func lapiMetricsToTable(t *table.Table, stats map[string]map[string]map[string]i return numRows } +func wlMetricsToTable(t *table.Table, stats map[string]map[string]map[string]int, noUnit bool) (int, error) { + if t == nil { + return 0, fmt.Errorf("nil table") + } + + numRows := 0 + + for _, name := range maptools.SortedKeys(stats) { + for _, reason := range maptools.SortedKeys(stats[name]) { + row := make([]string, 4) + row[0] = name + row[1] = reason + row[2] = "-" + row[3] = "-" + + for _, action := range maptools.SortedKeys(stats[name][reason]) { + value := stats[name][reason][action] + if action == "whitelisted" { + row[3] = fmt.Sprintf("%d", value) + } else if action == "hits" { + row[2] = fmt.Sprintf("%d", value) + } else { + log.Debugf("unexpected counter '%s' for whitelists = %d", action, value) + } + } + t.AddRow(row...) + numRows++ + } + } + return numRows, nil +} + func metricsToTable(t *table.Table, stats map[string]map[string]int, keys []string, noUnit bool) (int, error) { if t == nil { return 0, fmt.Errorf("nil table") @@ -95,7 +127,7 @@ func (s statBucket) Table(out io.Writer, noUnit bool, showEmpty bool) { log.Warningf("while collecting bucket stats: %s", err) } else if numRows > 0 || showEmpty { title, _ := s.Description() - renderTableTitle(out, "\n" + title + ":") + renderTableTitle(out, "\n"+title+":") t.Render() } } @@ -108,16 +140,16 @@ func (s statAcquis) Description() (string, string) { func (s statAcquis) Table(out io.Writer, noUnit bool, showEmpty bool) { t := newTable(out) t.SetRowLines(false) - t.SetHeaders("Source", "Lines read", "Lines parsed", "Lines unparsed", "Lines poured to bucket") + t.SetHeaders("Source", "Lines read", "Lines parsed", "Lines unparsed", "Lines poured to bucket", "Lines whitelisted") t.SetAlignment(table.AlignLeft, table.AlignLeft, table.AlignLeft, table.AlignLeft, table.AlignLeft) - keys := []string{"reads", "parsed", "unparsed", "pour"} + keys := []string{"reads", "parsed", "unparsed", "pour", "whitelisted"} if numRows, err := metricsToTable(t, s, keys, noUnit); err != nil { log.Warningf("while collecting acquis stats: %s", err) } else if numRows > 0 || showEmpty { title, _ := s.Description() - renderTableTitle(out, "\n" + title + ":") + renderTableTitle(out, "\n"+title+":") t.Render() } } @@ -137,7 +169,7 @@ func (s statAppsecEngine) Table(out io.Writer, noUnit bool, showEmpty bool) { log.Warningf("while collecting appsec stats: %s", err) } else if numRows > 0 || showEmpty { title, _ := s.Description() - renderTableTitle(out, "\n" + title + ":") + renderTableTitle(out, "\n"+title+":") t.Render() } } @@ -156,7 +188,7 @@ func (s statAppsecRule) Table(out io.Writer, noUnit bool, showEmpty bool) { keys := []string{"triggered"} if numRows, err := metricsToTable(t, appsecEngineRulesStats, keys, noUnit); err != nil { log.Warningf("while collecting appsec rules stats: %s", err) - } else if numRows > 0 || showEmpty{ + } else if numRows > 0 || showEmpty { renderTableTitle(out, fmt.Sprintf("\nAppsec '%s' Rules Metrics:", appsecEngine)) t.Render() } @@ -164,6 +196,26 @@ func (s statAppsecRule) Table(out io.Writer, noUnit bool, showEmpty bool) { } +func (s statWhitelist) Description() (string, string) { + return "Whitelist Metrics", + `Tracks the number of events processed and possibly whitelisted by each parser whitelist.` +} + +func (s statWhitelist) Table(out io.Writer, noUnit bool, showEmpty bool) { + t := newTable(out) + t.SetRowLines(false) + t.SetHeaders("Whitelist", "Reason", "Hits", "Whitelisted") + t.SetAlignment(table.AlignLeft, table.AlignLeft, table.AlignLeft, table.AlignLeft) + + if numRows, err := wlMetricsToTable(t, s, noUnit); err != nil { + log.Warningf("while collecting parsers stats: %s", err) + } else if numRows > 0 || showEmpty { + title, _ := s.Description() + renderTableTitle(out, "\n"+title+":") + t.Render() + } +} + func (s statParser) Description() (string, string) { return "Parser Metrics", `Tracks the number of events processed by each parser and indicates success of failure. Zero parsed lines means the parer(s) failed. Non-zero unparsed lines are fine as crowdsec select relevant lines.` @@ -181,7 +233,7 @@ func (s statParser) Table(out io.Writer, noUnit bool, showEmpty bool) { log.Warningf("while collecting parsers stats: %s", err) } else if numRows > 0 || showEmpty { title, _ := s.Description() - renderTableTitle(out, "\n" + title + ":") + renderTableTitle(out, "\n"+title+":") t.Render() } } @@ -213,7 +265,7 @@ func (s statStash) Table(out io.Writer, noUnit bool, showEmpty bool) { } if numRows > 0 || showEmpty { title, _ := s.Description() - renderTableTitle(out, "\n" + title + ":") + renderTableTitle(out, "\n"+title+":") t.Render() } } @@ -254,7 +306,7 @@ func (s statLapi) Table(out io.Writer, noUnit bool, showEmpty bool) { if numRows > 0 || showEmpty { title, _ := s.Description() - renderTableTitle(out, "\n" + title + ":") + renderTableTitle(out, "\n"+title+":") t.Render() } } @@ -272,9 +324,9 @@ func (s statLapiMachine) Table(out io.Writer, noUnit bool, showEmpty bool) { numRows := lapiMetricsToTable(t, s) - if numRows > 0 || showEmpty{ + if numRows > 0 || showEmpty { title, _ := s.Description() - renderTableTitle(out, "\n" + title + ":") + renderTableTitle(out, "\n"+title+":") t.Render() } } @@ -294,7 +346,7 @@ func (s statLapiBouncer) Table(out io.Writer, noUnit bool, showEmpty bool) { if numRows > 0 || showEmpty { title, _ := s.Description() - renderTableTitle(out, "\n" + title + ":") + renderTableTitle(out, "\n"+title+":") t.Render() } } @@ -320,9 +372,9 @@ func (s statLapiDecision) Table(out io.Writer, noUnit bool, showEmpty bool) { numRows++ } - if numRows > 0 || showEmpty{ + if numRows > 0 || showEmpty { title, _ := s.Description() - renderTableTitle(out, "\n" + title + ":") + renderTableTitle(out, "\n"+title+":") t.Render() } } @@ -353,9 +405,9 @@ func (s statDecision) Table(out io.Writer, noUnit bool, showEmpty bool) { } } - if numRows > 0 || showEmpty{ + if numRows > 0 || showEmpty { title, _ := s.Description() - renderTableTitle(out, "\n" + title + ":") + renderTableTitle(out, "\n"+title+":") t.Render() } } @@ -380,9 +432,9 @@ func (s statAlert) Table(out io.Writer, noUnit bool, showEmpty bool) { numRows++ } - if numRows > 0 || showEmpty{ + if numRows > 0 || showEmpty { title, _ := s.Description() - renderTableTitle(out, "\n" + title + ":") + renderTableTitle(out, "\n"+title+":") t.Render() } } diff --git a/cmd/crowdsec/metrics.go b/cmd/crowdsec/metrics.go index ca893872edb..fa2d8d5de32 100644 --- a/cmd/crowdsec/metrics.go +++ b/cmd/crowdsec/metrics.go @@ -161,7 +161,7 @@ func registerPrometheus(config *csconfig.PrometheusCfg) { leaky.BucketsUnderflow, leaky.BucketsCanceled, leaky.BucketsInstantiation, leaky.BucketsOverflow, v1.LapiRouteHits, leaky.BucketsCurrentCount, - cache.CacheMetrics, exprhelpers.RegexpCacheMetrics, + cache.CacheMetrics, exprhelpers.RegexpCacheMetrics, parser.NodesWlHitsOk, parser.NodesWlHits, ) } else { log.Infof("Loading prometheus collectors") @@ -170,7 +170,7 @@ func registerPrometheus(config *csconfig.PrometheusCfg) { globalCsInfo, globalParsingHistogram, globalPourHistogram, v1.LapiRouteHits, v1.LapiMachineHits, v1.LapiBouncerHits, v1.LapiNilDecisions, v1.LapiNonNilDecisions, v1.LapiResponseTime, leaky.BucketsPour, leaky.BucketsUnderflow, leaky.BucketsCanceled, leaky.BucketsInstantiation, leaky.BucketsOverflow, leaky.BucketsCurrentCount, - globalActiveDecisions, globalAlerts, + globalActiveDecisions, globalAlerts, parser.NodesWlHitsOk, parser.NodesWlHits, cache.CacheMetrics, exprhelpers.RegexpCacheMetrics, ) diff --git a/pkg/parser/node.go b/pkg/parser/node.go index 23ed20511c3..fe5432ce938 100644 --- a/pkg/parser/node.go +++ b/pkg/parser/node.go @@ -168,9 +168,9 @@ func (n *Node) process(p *types.Event, ctx UnixParserCtx, expressionEnv map[stri NodesHits.With(prometheus.Labels{"source": p.Line.Src, "type": p.Line.Module, "name": n.Name}).Inc() } exprErr := error(nil) - isWhitelisted := n.CheckIPsWL(p.ParseIPSources()) + isWhitelisted := n.CheckIPsWL(p) if !isWhitelisted { - isWhitelisted, exprErr = n.CheckExprWL(cachedExprEnv) + isWhitelisted, exprErr = n.CheckExprWL(cachedExprEnv, p) } if exprErr != nil { // Previous code returned nil if there was an error, so we keep this behavior diff --git a/pkg/parser/runtime.go b/pkg/parser/runtime.go index 4f4f6a0f3d0..afdf88dc873 100644 --- a/pkg/parser/runtime.go +++ b/pkg/parser/runtime.go @@ -221,6 +221,24 @@ var NodesHitsKo = prometheus.NewCounterVec( []string{"source", "type", "name"}, ) +// + +var NodesWlHitsOk = prometheus.NewCounterVec( + prometheus.CounterOpts{ + Name: "cs_node_wl_hits_ok_total", + Help: "Total events successfully whitelisted by node.", + }, + []string{"source", "type", "name", "reason"}, +) + +var NodesWlHits = prometheus.NewCounterVec( + prometheus.CounterOpts{ + Name: "cs_node_wl_hits_total", + Help: "Total events processed by whitelist node.", + }, + []string{"source", "type", "name", "reason"}, +) + func stageidx(stage string, stages []string) int { for i, v := range stages { if stage == v { diff --git a/pkg/parser/whitelist.go b/pkg/parser/whitelist.go index 027a9a2858a..f3739a49438 100644 --- a/pkg/parser/whitelist.go +++ b/pkg/parser/whitelist.go @@ -8,6 +8,7 @@ import ( "github.com/antonmedv/expr/vm" "github.com/crowdsecurity/crowdsec/pkg/exprhelpers" "github.com/crowdsecurity/crowdsec/pkg/types" + "github.com/prometheus/client_golang/prometheus" ) type Whitelist struct { @@ -36,11 +37,13 @@ func (n *Node) ContainsIPLists() bool { return len(n.Whitelist.B_Ips) > 0 || len(n.Whitelist.B_Cidrs) > 0 } -func (n *Node) CheckIPsWL(srcs []net.IP) bool { +func (n *Node) CheckIPsWL(p *types.Event) bool { + srcs := p.ParseIPSources() isWhitelisted := false if !n.ContainsIPLists() { return isWhitelisted } + NodesWlHits.With(prometheus.Labels{"source": p.Line.Src, "type": p.Line.Module, "name": n.Name, "reason": n.Whitelist.Reason}).Inc() for _, src := range srcs { if isWhitelisted { break @@ -62,15 +65,19 @@ func (n *Node) CheckIPsWL(srcs []net.IP) bool { n.Logger.Tracef("whitelist: %s not in [%s]", src, v) } } + if isWhitelisted { + NodesWlHitsOk.With(prometheus.Labels{"source": p.Line.Src, "type": p.Line.Module, "name": n.Name, "reason": n.Whitelist.Reason}).Inc() + } return isWhitelisted } -func (n *Node) CheckExprWL(cachedExprEnv map[string]interface{}) (bool, error) { +func (n *Node) CheckExprWL(cachedExprEnv map[string]interface{}, p *types.Event) (bool, error) { isWhitelisted := false if !n.ContainsExprLists() { return false, nil } + NodesWlHits.With(prometheus.Labels{"source": p.Line.Src, "type": p.Line.Module, "name": n.Name, "reason": n.Whitelist.Reason}).Inc() /* run whitelist expression tests anyway */ for eidx, e := range n.Whitelist.B_Exprs { //if we already know the event is whitelisted, skip the rest of the expressions @@ -94,6 +101,9 @@ func (n *Node) CheckExprWL(cachedExprEnv map[string]interface{}) (bool, error) { n.Logger.Errorf("unexpected type %t (%v) while running '%s'", output, output, n.Whitelist.Exprs[eidx]) } } + if isWhitelisted { + NodesWlHitsOk.With(prometheus.Labels{"source": p.Line.Src, "type": p.Line.Module, "name": n.Name, "reason": n.Whitelist.Reason}).Inc() + } return isWhitelisted, nil } diff --git a/pkg/parser/whitelist_test.go b/pkg/parser/whitelist_test.go index 8796aaedafe..501c655243d 100644 --- a/pkg/parser/whitelist_test.go +++ b/pkg/parser/whitelist_test.go @@ -289,9 +289,9 @@ func TestWhitelistCheck(t *testing.T) { var err error node.Whitelist = tt.whitelist node.CompileWLs() - isWhitelisted := node.CheckIPsWL(tt.event.ParseIPSources()) + isWhitelisted := node.CheckIPsWL(tt.event) if !isWhitelisted { - isWhitelisted, err = node.CheckExprWL(map[string]interface{}{"evt": tt.event}) + isWhitelisted, err = node.CheckExprWL(map[string]interface{}{"evt": tt.event}, tt.event) } require.NoError(t, err) require.Equal(t, tt.expected, isWhitelisted) From af1df0696b084997482729ba5ee3f7769270c7fe Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Wed, 7 Feb 2024 11:10:25 +0100 Subject: [PATCH 014/581] refact cscli metric processing (#2816) * typos * refact cscli metric processing * lint --- .golangci.yml | 2 +- cmd/crowdsec-cli/metrics.go | 185 +++++++------------------ cmd/crowdsec-cli/metrics_table.go | 217 ++++++++++++++++++++++++++---- 3 files changed, 240 insertions(+), 164 deletions(-) diff --git a/.golangci.yml b/.golangci.yml index a3aacccb817..f69bf66eaa5 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -22,7 +22,7 @@ linters-settings: gocognit: # lower this after refactoring - min-complexity: 150 + min-complexity: 145 gocyclo: # lower this after refactoring diff --git a/cmd/crowdsec-cli/metrics.go b/cmd/crowdsec-cli/metrics.go index 6b3155e5549..6e23bcf12e4 100644 --- a/cmd/crowdsec-cli/metrics.go +++ b/cmd/crowdsec-cli/metrics.go @@ -2,6 +2,7 @@ package main import ( "encoding/json" + "errors" "fmt" "io" "net/http" @@ -42,8 +43,14 @@ type ( } ) +var ( + ErrMissingConfig = errors.New("prometheus section missing, can't show metrics") + ErrMetricsDisabled = errors.New("prometheus is not enabled, can't show metrics") + +) + type metricSection interface { - Table(io.Writer, bool, bool) + Table(out io.Writer, noUnit bool, showEmpty bool) Description() (string, string) } @@ -154,6 +161,9 @@ func (ms metricStore) Fetch(url string) error { origin := metric.Labels["origin"] action := metric.Labels["action"] + appsecEngine := metric.Labels["appsec_engine"] + appsecRule := metric.Labels["rule_name"] + mtype := metric.Labels["type"] fval, err := strconv.ParseFloat(value, 32) @@ -162,178 +172,78 @@ func (ms metricStore) Fetch(url string) error { } ival := int(fval) + switch fam.Name { // // buckets // case "cs_bucket_created_total": - if _, ok := mBucket[name]; !ok { - mBucket[name] = make(map[string]int) - } - mBucket[name]["instantiation"] += ival + mBucket.Process(name, "instantiation", ival) case "cs_buckets": - if _, ok := mBucket[name]; !ok { - mBucket[name] = make(map[string]int) - } - mBucket[name]["curr_count"] += ival + mBucket.Process(name, "curr_count", ival) case "cs_bucket_overflowed_total": - if _, ok := mBucket[name]; !ok { - mBucket[name] = make(map[string]int) - } - mBucket[name]["overflow"] += ival + mBucket.Process(name, "overflow", ival) case "cs_bucket_poured_total": - if _, ok := mBucket[name]; !ok { - mBucket[name] = make(map[string]int) - } - if _, ok := mAcquis[source]; !ok { - mAcquis[source] = make(map[string]int) - } - mBucket[name]["pour"] += ival - mAcquis[source]["pour"] += ival + mBucket.Process(name, "pour", ival) + mAcquis.Process(source, "pour", ival) case "cs_bucket_underflowed_total": - if _, ok := mBucket[name]; !ok { - mBucket[name] = make(map[string]int) - } - mBucket[name]["underflow"] += ival + mBucket.Process(name, "underflow", ival) // // parsers // case "cs_parser_hits_total": - if _, ok := mAcquis[source]; !ok { - mAcquis[source] = make(map[string]int) - } - mAcquis[source]["reads"] += ival + mAcquis.Process(source, "reads", ival) case "cs_parser_hits_ok_total": - if _, ok := mAcquis[source]; !ok { - mAcquis[source] = make(map[string]int) - } - mAcquis[source]["parsed"] += ival + mAcquis.Process(source, "parsed", ival) case "cs_parser_hits_ko_total": - if _, ok := mAcquis[source]; !ok { - mAcquis[source] = make(map[string]int) - } - mAcquis[source]["unparsed"] += ival + mAcquis.Process(source, "unparsed", ival) case "cs_node_hits_total": - if _, ok := mParser[name]; !ok { - mParser[name] = make(map[string]int) - } - mParser[name]["hits"] += ival + mParser.Process(name, "hits", ival) case "cs_node_hits_ok_total": - if _, ok := mParser[name]; !ok { - mParser[name] = make(map[string]int) - } - mParser[name]["parsed"] += ival + mParser.Process(name, "parsed", ival) case "cs_node_hits_ko_total": - if _, ok := mParser[name]; !ok { - mParser[name] = make(map[string]int) - } - mParser[name]["unparsed"] += ival + mParser.Process(name, "unparsed", ival) // // whitelists // case "cs_node_wl_hits_total": - if _, ok := mWhitelist[name]; !ok { - mWhitelist[name] = make(map[string]map[string]int) - } - if _, ok := mWhitelist[name][reason]; !ok { - mWhitelist[name][reason] = make(map[string]int) - } - mWhitelist[name][reason]["hits"] += ival + mWhitelist.Process(name, reason, "hits", ival) case "cs_node_wl_hits_ok_total": - if _, ok := mWhitelist[name]; !ok { - mWhitelist[name] = make(map[string]map[string]int) - } - if _, ok := mWhitelist[name][reason]; !ok { - mWhitelist[name][reason] = make(map[string]int) - } - mWhitelist[name][reason]["whitelisted"] += ival + mWhitelist.Process(name, reason, "whitelisted", ival) // track as well whitelisted lines at acquis level - if _, ok := mAcquis[source]; !ok { - mAcquis[source] = make(map[string]int) - } - mAcquis[source]["whitelisted"] += ival + mAcquis.Process(source, "whitelisted", ival) // // lapi // case "cs_lapi_route_requests_total": - if _, ok := mLapi[route]; !ok { - mLapi[route] = make(map[string]int) - } - mLapi[route][method] += ival + mLapi.Process(route, method, ival) case "cs_lapi_machine_requests_total": - if _, ok := mLapiMachine[machine]; !ok { - mLapiMachine[machine] = make(map[string]map[string]int) - } - if _, ok := mLapiMachine[machine][route]; !ok { - mLapiMachine[machine][route] = make(map[string]int) - } - mLapiMachine[machine][route][method] += ival + mLapiMachine.Process(machine, route, method, ival) case "cs_lapi_bouncer_requests_total": - if _, ok := mLapiBouncer[bouncer]; !ok { - mLapiBouncer[bouncer] = make(map[string]map[string]int) - } - if _, ok := mLapiBouncer[bouncer][route]; !ok { - mLapiBouncer[bouncer][route] = make(map[string]int) - } - mLapiBouncer[bouncer][route][method] += ival + mLapiBouncer.Process(bouncer, route, method, ival) case "cs_lapi_decisions_ko_total", "cs_lapi_decisions_ok_total": - if _, ok := mLapiDecision[bouncer]; !ok { - mLapiDecision[bouncer] = struct { - NonEmpty int - Empty int - }{} - } - x := mLapiDecision[bouncer] - if fam.Name == "cs_lapi_decisions_ko_total" { - x.Empty += ival - } else if fam.Name == "cs_lapi_decisions_ok_total" { - x.NonEmpty += ival - } - mLapiDecision[bouncer] = x + mLapiDecision.Process(bouncer, fam.Name, ival) // // decisions // case "cs_active_decisions": - if _, ok := mDecision[reason]; !ok { - mDecision[reason] = make(map[string]map[string]int) - } - if _, ok := mDecision[reason][origin]; !ok { - mDecision[reason][origin] = make(map[string]int) - } - mDecision[reason][origin][action] += ival + mDecision.Process(reason, origin, action, ival) case "cs_alerts": - mAlert[reason] += ival + mAlert.Process(reason, ival) // // stash // case "cs_cache_size": - mStash[name] = struct { - Type string - Count int - }{Type: mtype, Count: ival} + mStash.Process(name, mtype, ival) // // appsec // case "cs_appsec_reqs_total": - if _, ok := mAppsecEngine[metric.Labels["appsec_engine"]]; !ok { - mAppsecEngine[metric.Labels["appsec_engine"]] = make(map[string]int, 0) - } - mAppsecEngine[metric.Labels["appsec_engine"]]["processed"] = ival + mAppsecEngine.Process(appsecEngine, "processed", ival) case "cs_appsec_block_total": - if _, ok := mAppsecEngine[metric.Labels["appsec_engine"]]; !ok { - mAppsecEngine[metric.Labels["appsec_engine"]] = make(map[string]int, 0) - } - mAppsecEngine[metric.Labels["appsec_engine"]]["blocked"] = ival + mAppsecEngine.Process(appsecEngine, "blocked", ival) case "cs_appsec_rule_hits": - appsecEngine := metric.Labels["appsec_engine"] - ruleID := metric.Labels["rule_name"] - if _, ok := mAppsecRule[appsecEngine]; !ok { - mAppsecRule[appsecEngine] = make(map[string]map[string]int, 0) - } - if _, ok := mAppsecRule[appsecEngine][ruleID]; !ok { - mAppsecRule[appsecEngine][ruleID] = make(map[string]int, 0) - } - mAppsecRule[appsecEngine][ruleID]["triggered"] = ival + mAppsecRule.Process(appsecEngine, appsecRule, "triggered", ival) default: log.Debugf("unknown: %+v", fam.Name) continue @@ -380,13 +290,13 @@ func (ms metricStore) Format(out io.Writer, sections []string, formatType string case "json": x, err := json.MarshalIndent(want, "", " ") if err != nil { - return fmt.Errorf("failed to unmarshal metrics : %v", err) + return fmt.Errorf("failed to marshal metrics: %w", err) } out.Write(x) case "raw": x, err := yaml.Marshal(want) if err != nil { - return fmt.Errorf("failed to unmarshal metrics : %v", err) + return fmt.Errorf("failed to marshal metrics: %w", err) } out.Write(x) default: @@ -404,11 +314,11 @@ func (cli *cliMetrics) show(sections []string, url string, noUnit bool) error { } if cfg.Prometheus == nil { - return fmt.Errorf("prometheus section missing, can't show metrics") + return ErrMissingConfig } if !cfg.Prometheus.Enabled { - return fmt.Errorf("prometheus is not enabled, can't show metrics") + return ErrMetricsDisabled } ms := NewMetricStore() @@ -427,6 +337,7 @@ func (cli *cliMetrics) show(sections []string, url string, noUnit bool) error { if err := ms.Format(color.Output, sections, cfg.Cscli.Output, noUnit); err != nil { return err } + return nil } @@ -468,6 +379,7 @@ cscli metrics list`, // expandAlias returns a list of sections. The input can be a list of sections or alias. func (cli *cliMetrics) expandSectionGroups(args []string) []string { ret := []string{} + for _, section := range args { switch section { case "engine": @@ -522,8 +434,8 @@ cscli metrics show acquisition parsers buckets stash -o json`, func (cli *cliMetrics) list() error { type metricType struct { - Type string `json:"type" yaml:"type"` - Title string `json:"title" yaml:"title"` + Type string `json:"type" yaml:"type"` + Title string `json:"title" yaml:"title"` Description string `json:"description" yaml:"description"` } @@ -553,13 +465,13 @@ func (cli *cliMetrics) list() error { case "json": x, err := json.MarshalIndent(allMetrics, "", " ") if err != nil { - return fmt.Errorf("failed to unmarshal metrics: %w", err) + return fmt.Errorf("failed to marshal metric types: %w", err) } fmt.Println(string(x)) case "raw": x, err := yaml.Marshal(allMetrics) if err != nil { - return fmt.Errorf("failed to unmarshal metrics: %w", err) + return fmt.Errorf("failed to marshal metric types: %w", err) } fmt.Println(string(x)) } @@ -575,8 +487,7 @@ func (cli *cliMetrics) newListCmd() *cobra.Command { Args: cobra.ExactArgs(0), DisableAutoGenTag: true, RunE: func(_ *cobra.Command, _ []string) error { - cli.list() - return nil + return cli.list() }, } diff --git a/cmd/crowdsec-cli/metrics_table.go b/cmd/crowdsec-cli/metrics_table.go index f11ee11bf88..da6ea3d9f1d 100644 --- a/cmd/crowdsec-cli/metrics_table.go +++ b/cmd/crowdsec-cli/metrics_table.go @@ -4,6 +4,7 @@ import ( "fmt" "io" "sort" + "strconv" "github.com/aquasecurity/table" log "github.com/sirupsen/logrus" @@ -11,17 +12,21 @@ import ( "github.com/crowdsecurity/go-cs-lib/maptools" ) +// ErrNilTable means a nil pointer was passed instead of a table instance. This is a programming error. +var ErrNilTable = fmt.Errorf("nil table") + func lapiMetricsToTable(t *table.Table, stats map[string]map[string]map[string]int) int { // stats: machine -> route -> method -> count - // sort keys to keep consistent order when printing machineKeys := []string{} for k := range stats { machineKeys = append(machineKeys, k) } + sort.Strings(machineKeys) numRows := 0 + for _, machine := range machineKeys { // oneRow: route -> method -> count machineRow := stats[machine] @@ -33,53 +38,60 @@ func lapiMetricsToTable(t *table.Table, stats map[string]map[string]map[string]i methodName, } if count != 0 { - row = append(row, fmt.Sprintf("%d", count)) + row = append(row, strconv.Itoa(count)) } else { row = append(row, "-") } + t.AddRow(row...) numRows++ } } } + return numRows } func wlMetricsToTable(t *table.Table, stats map[string]map[string]map[string]int, noUnit bool) (int, error) { if t == nil { - return 0, fmt.Errorf("nil table") + return 0, ErrNilTable } numRows := 0 for _, name := range maptools.SortedKeys(stats) { for _, reason := range maptools.SortedKeys(stats[name]) { - row := make([]string, 4) - row[0] = name - row[1] = reason - row[2] = "-" - row[3] = "-" + row := []string{ + name, + reason, + "-", + "-", + } for _, action := range maptools.SortedKeys(stats[name][reason]) { value := stats[name][reason][action] - if action == "whitelisted" { - row[3] = fmt.Sprintf("%d", value) - } else if action == "hits" { - row[2] = fmt.Sprintf("%d", value) - } else { + + switch action { + case "whitelisted": + row[3] = strconv.Itoa(value) + case "hits": + row[2] = strconv.Itoa(value) + default: log.Debugf("unexpected counter '%s' for whitelists = %d", action, value) } } + t.AddRow(row...) numRows++ } } + return numRows, nil } func metricsToTable(t *table.Table, stats map[string]map[string]int, keys []string, noUnit bool) (int, error) { if t == nil { - return 0, fmt.Errorf("nil table") + return 0, ErrNilTable } numRows := 0 @@ -89,12 +101,14 @@ func metricsToTable(t *table.Table, stats map[string]map[string]int, keys []stri if !ok { continue } + row := []string{ alabel, } + for _, sl := range keys { if v, ok := astats[sl]; ok && v != 0 { - numberToShow := fmt.Sprintf("%d", v) + numberToShow := strconv.Itoa(v) if !noUnit { numberToShow = formatNumber(v) } @@ -104,15 +118,26 @@ func metricsToTable(t *table.Table, stats map[string]map[string]int, keys []stri row = append(row, "-") } } + t.AddRow(row...) numRows++ } + return numRows, nil } func (s statBucket) Description() (string, string) { return "Bucket Metrics", - `Measure events in different scenarios. Current count is the number of buckets during metrics collection. Overflows are past event-producing buckets, while Expired are the ones that didn’t receive enough events to Overflow.` + `Measure events in different scenarios. Current count is the number of buckets during metrics collection. ` + + `Overflows are past event-producing buckets, while Expired are the ones that didn’t receive enough events to Overflow.` +} + +func (s statBucket) Process(bucket, metric string, val int) { + if _, ok := s[bucket]; !ok { + s[bucket] = make(map[string]int) + } + + s[bucket][metric] += val } func (s statBucket) Table(out io.Writer, noUnit bool, showEmpty bool) { @@ -134,7 +159,18 @@ func (s statBucket) Table(out io.Writer, noUnit bool, showEmpty bool) { func (s statAcquis) Description() (string, string) { return "Acquisition Metrics", - `Measures the lines read, parsed, and unparsed per datasource. Zero read lines indicate a misconfigured or inactive datasource. Zero parsed lines mean the parser(s) failed. Non-zero parsed lines are fine as crowdsec selects relevant lines.` + `Measures the lines read, parsed, and unparsed per datasource. ` + + `Zero read lines indicate a misconfigured or inactive datasource. ` + + `Zero parsed lines mean the parser(s) failed. ` + + `Non-zero parsed lines are fine as crowdsec selects relevant lines.` +} + +func (s statAcquis) Process(source, metric string, val int) { + if _, ok := s[source]; !ok { + s[source] = make(map[string]int) + } + + s[source][metric] += val } func (s statAcquis) Table(out io.Writer, noUnit bool, showEmpty bool) { @@ -159,12 +195,22 @@ func (s statAppsecEngine) Description() (string, string) { `Measures the number of parsed and blocked requests by the AppSec Component.` } +func (s statAppsecEngine) Process(appsecEngine, metric string, val int) { + if _, ok := s[appsecEngine]; !ok { + s[appsecEngine] = make(map[string]int) + } + + s[appsecEngine][metric] += val +} + func (s statAppsecEngine) Table(out io.Writer, noUnit bool, showEmpty bool) { t := newTable(out) t.SetRowLines(false) t.SetHeaders("Appsec Engine", "Processed", "Blocked") t.SetAlignment(table.AlignLeft, table.AlignLeft) + keys := []string{"processed", "blocked"} + if numRows, err := metricsToTable(t, s, keys, noUnit); err != nil { log.Warningf("while collecting appsec stats: %s", err) } else if numRows > 0 || showEmpty { @@ -179,13 +225,27 @@ func (s statAppsecRule) Description() (string, string) { `Provides “per AppSec Component” information about the number of matches for loaded AppSec Rules.` } +func (s statAppsecRule) Process(appsecEngine, appsecRule string, metric string, val int) { + if _, ok := s[appsecEngine]; !ok { + s[appsecEngine] = make(map[string]map[string]int) + } + + if _, ok := s[appsecEngine][appsecRule]; !ok { + s[appsecEngine][appsecRule] = make(map[string]int) + } + + s[appsecEngine][appsecRule][metric] += val +} + func (s statAppsecRule) Table(out io.Writer, noUnit bool, showEmpty bool) { for appsecEngine, appsecEngineRulesStats := range s { t := newTable(out) t.SetRowLines(false) t.SetHeaders("Rule ID", "Triggered") t.SetAlignment(table.AlignLeft, table.AlignLeft) + keys := []string{"triggered"} + if numRows, err := metricsToTable(t, appsecEngineRulesStats, keys, noUnit); err != nil { log.Warningf("while collecting appsec rules stats: %s", err) } else if numRows > 0 || showEmpty { @@ -193,7 +253,6 @@ func (s statAppsecRule) Table(out io.Writer, noUnit bool, showEmpty bool) { t.Render() } } - } func (s statWhitelist) Description() (string, string) { @@ -201,6 +260,18 @@ func (s statWhitelist) Description() (string, string) { `Tracks the number of events processed and possibly whitelisted by each parser whitelist.` } +func (s statWhitelist) Process(whitelist, reason, metric string, val int) { + if _, ok := s[whitelist]; !ok { + s[whitelist] = make(map[string]map[string]int) + } + + if _, ok := s[whitelist][reason]; !ok { + s[whitelist][reason] = make(map[string]int) + } + + s[whitelist][reason][metric] += val +} + func (s statWhitelist) Table(out io.Writer, noUnit bool, showEmpty bool) { t := newTable(out) t.SetRowLines(false) @@ -218,7 +289,17 @@ func (s statWhitelist) Table(out io.Writer, noUnit bool, showEmpty bool) { func (s statParser) Description() (string, string) { return "Parser Metrics", - `Tracks the number of events processed by each parser and indicates success of failure. Zero parsed lines means the parer(s) failed. Non-zero unparsed lines are fine as crowdsec select relevant lines.` + `Tracks the number of events processed by each parser and indicates success of failure. ` + + `Zero parsed lines means the parer(s) failed. ` + + `Non-zero unparsed lines are fine as crowdsec select relevant lines.` +} + +func (s statParser) Process(parser, metric string, val int) { + if _, ok := s[parser]; !ok { + s[parser] = make(map[string]int) + } + + s[parser][metric] += val } func (s statParser) Table(out io.Writer, noUnit bool, showEmpty bool) { @@ -243,6 +324,16 @@ func (s statStash) Description() (string, string) { `Tracks the status of stashes that might be created by various parsers and scenarios.` } +func (s statStash) Process(name, mtype string, val int) { + s[name] = struct { + Type string + Count int + }{ + Type: mtype, + Count: val, + } +} + func (s statStash) Table(out io.Writer, noUnit bool, showEmpty bool) { t := newTable(out) t.SetRowLines(false) @@ -258,11 +349,12 @@ func (s statStash) Table(out io.Writer, noUnit bool, showEmpty bool) { row := []string{ alabel, astats.Type, - fmt.Sprintf("%d", astats.Count), + strconv.Itoa(astats.Count), } t.AddRow(row...) numRows++ } + if numRows > 0 || showEmpty { title, _ := s.Description() renderTableTitle(out, "\n"+title+":") @@ -275,6 +367,14 @@ func (s statLapi) Description() (string, string) { `Monitors the requests made to local API routes.` } +func (s statLapi) Process(route, method string, val int) { + if _, ok := s[route]; !ok { + s[route] = make(map[string]int) + } + + s[route][method] += val +} + func (s statLapi) Table(out io.Writer, noUnit bool, showEmpty bool) { t := newTable(out) t.SetRowLines(false) @@ -291,13 +391,14 @@ func (s statLapi) Table(out io.Writer, noUnit bool, showEmpty bool) { for skey := range astats { subKeys = append(subKeys, skey) } + sort.Strings(subKeys) for _, sl := range subKeys { row := []string{ alabel, sl, - fmt.Sprintf("%d", astats[sl]), + strconv.Itoa(astats[sl]), } t.AddRow(row...) numRows++ @@ -316,6 +417,18 @@ func (s statLapiMachine) Description() (string, string) { `Tracks the number of calls to the local API from each registered machine.` } +func (s statLapiMachine) Process(machine, route, method string, val int) { + if _, ok := s[machine]; !ok { + s[machine] = make(map[string]map[string]int) + } + + if _, ok := s[machine][route]; !ok { + s[machine][route] = make(map[string]int) + } + + s[machine][route][method] += val +} + func (s statLapiMachine) Table(out io.Writer, noUnit bool, showEmpty bool) { t := newTable(out) t.SetRowLines(false) @@ -336,6 +449,18 @@ func (s statLapiBouncer) Description() (string, string) { `Tracks total hits to remediation component related API routes.` } +func (s statLapiBouncer) Process(bouncer, route, method string, val int) { + if _, ok := s[bouncer]; !ok { + s[bouncer] = make(map[string]map[string]int) + } + + if _, ok := s[bouncer][route]; !ok { + s[bouncer][route] = make(map[string]int) + } + + s[bouncer][route][method] += val +} + func (s statLapiBouncer) Table(out io.Writer, noUnit bool, showEmpty bool) { t := newTable(out) t.SetRowLines(false) @@ -356,6 +481,26 @@ func (s statLapiDecision) Description() (string, string) { `Tracks the number of empty/non-empty answers from LAPI to bouncers that are working in "live" mode.` } +func (s statLapiDecision) Process(bouncer, fam string, val int) { + if _, ok := s[bouncer]; !ok { + s[bouncer] = struct { + NonEmpty int + Empty int + }{} + } + + x := s[bouncer] + + switch fam { + case "cs_lapi_decisions_ko_total": + x.Empty += val + case "cs_lapi_decisions_ok_total": + x.NonEmpty += val + } + + s[bouncer] = x +} + func (s statLapiDecision) Table(out io.Writer, noUnit bool, showEmpty bool) { t := newTable(out) t.SetRowLines(false) @@ -363,11 +508,12 @@ func (s statLapiDecision) Table(out io.Writer, noUnit bool, showEmpty bool) { t.SetAlignment(table.AlignLeft, table.AlignLeft, table.AlignLeft) numRows := 0 + for bouncer, hits := range s { t.AddRow( bouncer, - fmt.Sprintf("%d", hits.Empty), - fmt.Sprintf("%d", hits.NonEmpty), + strconv.Itoa(hits.Empty), + strconv.Itoa(hits.NonEmpty), ) numRows++ } @@ -381,7 +527,20 @@ func (s statLapiDecision) Table(out io.Writer, noUnit bool, showEmpty bool) { func (s statDecision) Description() (string, string) { return "Local API Decisions", - `Provides information about all currently active decisions. Includes both local (crowdsec) and global decisions (CAPI), and lists subscriptions (lists).` + `Provides information about all currently active decisions. ` + + `Includes both local (crowdsec) and global decisions (CAPI), and lists subscriptions (lists).` +} + +func (s statDecision) Process(reason, origin, action string, val int) { + if _, ok := s[reason]; !ok { + s[reason] = make(map[string]map[string]int) + } + + if _, ok := s[reason][origin]; !ok { + s[reason][origin] = make(map[string]int) + } + + s[reason][origin][action] += val } func (s statDecision) Table(out io.Writer, noUnit bool, showEmpty bool) { @@ -391,6 +550,7 @@ func (s statDecision) Table(out io.Writer, noUnit bool, showEmpty bool) { t.SetAlignment(table.AlignLeft, table.AlignLeft, table.AlignLeft, table.AlignLeft) numRows := 0 + for reason, origins := range s { for origin, actions := range origins { for action, hits := range actions { @@ -398,7 +558,7 @@ func (s statDecision) Table(out io.Writer, noUnit bool, showEmpty bool) { reason, origin, action, - fmt.Sprintf("%d", hits), + strconv.Itoa(hits), ) numRows++ } @@ -417,6 +577,10 @@ func (s statAlert) Description() (string, string) { `Tracks the total number of past and present alerts for the installed scenarios.` } +func (s statAlert) Process(reason string, val int) { + s[reason] += val +} + func (s statAlert) Table(out io.Writer, noUnit bool, showEmpty bool) { t := newTable(out) t.SetRowLines(false) @@ -424,10 +588,11 @@ func (s statAlert) Table(out io.Writer, noUnit bool, showEmpty bool) { t.SetAlignment(table.AlignLeft, table.AlignLeft) numRows := 0 + for scenario, hits := range s { t.AddRow( scenario, - fmt.Sprintf("%d", hits), + strconv.Itoa(hits), ) numRows++ } From df159b016705aa78ac64020634d66152efd0e76b Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Fri, 9 Feb 2024 13:55:24 +0100 Subject: [PATCH 015/581] update calls to deprecated x509 methods (#2824) --- .github/workflows/docker-tests.yml | 4 +- .golangci.yml | 4 - pkg/apiserver/middlewares/v1/api_key.go | 2 +- pkg/apiserver/middlewares/v1/tls_auth.go | 104 ++++++++++++----------- test/bats/11_bouncers_tls.bats | 3 + test/bats/30_machines_tls.bats | 7 +- 6 files changed, 65 insertions(+), 59 deletions(-) diff --git a/.github/workflows/docker-tests.yml b/.github/workflows/docker-tests.yml index 7bc63de0178..d3ae4f90d79 100644 --- a/.github/workflows/docker-tests.yml +++ b/.github/workflows/docker-tests.yml @@ -50,7 +50,7 @@ jobs: cache-to: type=gha,mode=min - name: "Setup Python" - uses: actions/setup-python@v4 + uses: actions/setup-python@v5 with: python-version: "3.x" @@ -61,7 +61,7 @@ jobs: - name: "Cache virtualenvs" id: cache-pipenv - uses: actions/cache@v3 + uses: actions/cache@v4 with: path: ~/.local/share/virtualenvs key: ${{ runner.os }}-pipenv-${{ hashFiles('**/Pipfile.lock') }} diff --git a/.golangci.yml b/.golangci.yml index f69bf66eaa5..3161b2c0aaf 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -310,10 +310,6 @@ issues: # Will fix, might be trickier # - - linters: - - staticcheck - text: "x509.ParseCRL has been deprecated since Go 1.19: Use ParseRevocationList instead" - # https://github.com/pkg/errors/issues/245 - linters: - depguard diff --git a/pkg/apiserver/middlewares/v1/api_key.go b/pkg/apiserver/middlewares/v1/api_key.go index ae7645e1b85..41ee15b4417 100644 --- a/pkg/apiserver/middlewares/v1/api_key.go +++ b/pkg/apiserver/middlewares/v1/api_key.go @@ -66,7 +66,7 @@ func (a *APIKey) authTLS(c *gin.Context, logger *log.Entry) *ent.Bouncer { validCert, extractedCN, err := a.TlsAuth.ValidateCert(c) if !validCert { - logger.Errorf("invalid client certificate: %s", err) + logger.Error(err) return nil } diff --git a/pkg/apiserver/middlewares/v1/tls_auth.go b/pkg/apiserver/middlewares/v1/tls_auth.go index 904f6cd445a..bd2c4bb30e7 100644 --- a/pkg/apiserver/middlewares/v1/tls_auth.go +++ b/pkg/apiserver/middlewares/v1/tls_auth.go @@ -4,6 +4,7 @@ import ( "bytes" "crypto" "crypto/x509" + "encoding/pem" "fmt" "io" "net/http" @@ -19,14 +20,13 @@ import ( type TLSAuth struct { AllowedOUs []string CrlPath string - revokationCache map[string]cacheEntry + revocationCache map[string]cacheEntry cacheExpiration time.Duration logger *log.Entry } type cacheEntry struct { revoked bool - err error timestamp time.Time } @@ -89,10 +89,12 @@ func (ta *TLSAuth) isExpired(cert *x509.Certificate) bool { return false } -func (ta *TLSAuth) isOCSPRevoked(cert *x509.Certificate, issuer *x509.Certificate) (bool, error) { - if cert.OCSPServer == nil || (cert.OCSPServer != nil && len(cert.OCSPServer) == 0) { +// isOCSPRevoked checks if the client certificate is revoked by any of the OCSP servers present in the certificate. +// It returns a boolean indicating if the certificate is revoked and a boolean indicating if the OCSP check was successful and could be cached. +func (ta *TLSAuth) isOCSPRevoked(cert *x509.Certificate, issuer *x509.Certificate) (bool, bool) { + if cert.OCSPServer == nil || len(cert.OCSPServer) == 0 { ta.logger.Infof("TLSAuth: no OCSP Server present in client certificate, skipping OCSP verification") - return false, nil + return false, true } for _, server := range cert.OCSPServer { @@ -104,9 +106,10 @@ func (ta *TLSAuth) isOCSPRevoked(cert *x509.Certificate, issuer *x509.Certificat switch ocspResponse.Status { case ocsp.Good: - return false, nil + return false, true case ocsp.Revoked: - return true, fmt.Errorf("client certificate is revoked by server %s", server) + ta.logger.Errorf("TLSAuth: client certificate is revoked by server %s", server) + return true, true case ocsp.Unknown: log.Debugf("unknow OCSP status for server %s", server) continue @@ -115,83 +118,82 @@ func (ta *TLSAuth) isOCSPRevoked(cert *x509.Certificate, issuer *x509.Certificat log.Infof("Could not get any valid OCSP response, assuming the cert is revoked") - return true, nil + return true, false } -func (ta *TLSAuth) isCRLRevoked(cert *x509.Certificate) (bool, error) { +// isCRLRevoked checks if the client certificate is revoked by the CRL present in the CrlPath. +// It returns a boolean indicating if the certificate is revoked and a boolean indicating if the CRL check was successful and could be cached. +func (ta *TLSAuth) isCRLRevoked(cert *x509.Certificate) (bool, bool) { if ta.CrlPath == "" { - ta.logger.Warn("no crl_path, skipping CRL check") - return false, nil + ta.logger.Info("no crl_path, skipping CRL check") + return false, true } crlContent, err := os.ReadFile(ta.CrlPath) if err != nil { - ta.logger.Warnf("could not read CRL file, skipping check: %s", err) - return false, nil + ta.logger.Errorf("could not read CRL file, skipping check: %s", err) + return false, false } - crl, err := x509.ParseCRL(crlContent) + crlBinary, rest := pem.Decode(crlContent) + if len(rest) > 0 { + ta.logger.Warn("CRL file contains more than one PEM block, ignoring the rest") + } + + crl, err := x509.ParseRevocationList(crlBinary.Bytes) if err != nil { - ta.logger.Warnf("could not parse CRL file, skipping check: %s", err) - return false, nil + ta.logger.Errorf("could not parse CRL file, skipping check: %s", err) + return false, false } - if crl.HasExpired(time.Now().UTC()) { + now := time.Now().UTC() + + if now.After(crl.NextUpdate) { ta.logger.Warn("CRL has expired, will still validate the cert against it.") } - for _, revoked := range crl.TBSCertList.RevokedCertificates { + if now.Before(crl.ThisUpdate) { + ta.logger.Warn("CRL is not yet valid, will still validate the cert against it.") + } + + for _, revoked := range crl.RevokedCertificateEntries { if revoked.SerialNumber.Cmp(cert.SerialNumber) == 0 { - return true, fmt.Errorf("client certificate is revoked by CRL") + ta.logger.Warn("client certificate is revoked by CRL") + return true, true } } - return false, nil + return false, true } func (ta *TLSAuth) isRevoked(cert *x509.Certificate, issuer *x509.Certificate) (bool, error) { sn := cert.SerialNumber.String() - if cacheValue, ok := ta.revokationCache[sn]; ok { + if cacheValue, ok := ta.revocationCache[sn]; ok { if time.Now().UTC().Sub(cacheValue.timestamp) < ta.cacheExpiration { - ta.logger.Debugf("TLSAuth: using cached value for cert %s: %t | %s", sn, cacheValue.revoked, cacheValue.err) - return cacheValue.revoked, cacheValue.err - } else { - ta.logger.Debugf("TLSAuth: cached value expired, removing from cache") - delete(ta.revokationCache, sn) + ta.logger.Debugf("TLSAuth: using cached value for cert %s: %t", sn, cacheValue.revoked) + return cacheValue.revoked, nil } + + ta.logger.Debugf("TLSAuth: cached value expired, removing from cache") + delete(ta.revocationCache, sn) } else { ta.logger.Tracef("TLSAuth: no cached value for cert %s", sn) } - revoked, err := ta.isOCSPRevoked(cert, issuer) - if err != nil { - ta.revokationCache[sn] = cacheEntry{ - revoked: revoked, - err: err, - timestamp: time.Now().UTC(), - } + revokedByOCSP, cacheOCSP := ta.isOCSPRevoked(cert, issuer) - return true, err - } + revokedByCRL, cacheCRL := ta.isCRLRevoked(cert) - if revoked { - ta.revokationCache[sn] = cacheEntry{ + revoked := revokedByOCSP || revokedByCRL + + if cacheOCSP && cacheCRL { + ta.revocationCache[sn] = cacheEntry{ revoked: revoked, - err: err, timestamp: time.Now().UTC(), } - - return true, nil - } - - revoked, err = ta.isCRLRevoked(cert) - ta.revokationCache[sn] = cacheEntry{ - revoked: revoked, - err: err, - timestamp: time.Now().UTC(), } - return revoked, err + return revoked, nil } func (ta *TLSAuth) isInvalid(cert *x509.Certificate, issuer *x509.Certificate) (bool, error) { @@ -265,11 +267,11 @@ func (ta *TLSAuth) ValidateCert(c *gin.Context) (bool, string, error) { revoked, err := ta.isInvalid(clientCert, c.Request.TLS.VerifiedChains[0][1]) if err != nil { ta.logger.Errorf("TLSAuth: error checking if client certificate is revoked: %s", err) - return false, "", fmt.Errorf("could not check for client certification revokation status: %w", err) + return false, "", fmt.Errorf("could not check for client certification revocation status: %w", err) } if revoked { - return false, "", fmt.Errorf("client certificate is revoked") + return false, "", fmt.Errorf("client certificate for CN=%s OU=%s is revoked", clientCert.Subject.CommonName, clientCert.Subject.OrganizationalUnit) } ta.logger.Debugf("client OU %v is allowed vs required OU %v", clientCert.Subject.OrganizationalUnit, ta.AllowedOUs) @@ -282,7 +284,7 @@ func (ta *TLSAuth) ValidateCert(c *gin.Context) (bool, string, error) { func NewTLSAuth(allowedOus []string, crlPath string, cacheExpiration time.Duration, logger *log.Entry) (*TLSAuth, error) { ta := &TLSAuth{ - revokationCache: map[string]cacheEntry{}, + revocationCache: map[string]cacheEntry{}, cacheExpiration: cacheExpiration, CrlPath: crlPath, logger: logger, diff --git a/test/bats/11_bouncers_tls.bats b/test/bats/11_bouncers_tls.bats index 8fb4579259d..2c39aae3079 100644 --- a/test/bats/11_bouncers_tls.bats +++ b/test/bats/11_bouncers_tls.bats @@ -90,7 +90,10 @@ teardown() { } @test "simulate one bouncer request with a revoked certificate" { + truncate_log rune -0 curl -i -s --cert "${tmpdir}/bouncer_revoked.pem" --key "${tmpdir}/bouncer_revoked-key.pem" --cacert "${tmpdir}/bundle.pem" https://localhost:8080/v1/decisions\?ip=42.42.42.42 + assert_log --partial "client certificate is revoked by CRL" + assert_log --partial "client certificate for CN=localhost OU=[bouncer-ou] is revoked" assert_output --partial "access forbidden" rune -0 cscli bouncers list -o json assert_output "[]" diff --git a/test/bats/30_machines_tls.bats b/test/bats/30_machines_tls.bats index 535435336ba..311293ca70c 100644 --- a/test/bats/30_machines_tls.bats +++ b/test/bats/30_machines_tls.bats @@ -132,13 +132,15 @@ teardown() { ' config_set "${CONFIG_DIR}/local_api_credentials.yaml" 'del(.login,.password)' ./instance-crowdsec start + rune -1 cscli lapi status rune -0 cscli machines list -o json assert_output '[]' } @test "revoked cert for agent" { + truncate_log config_set "${CONFIG_DIR}/local_api_credentials.yaml" ' - .ca_cert_path=strenv(tmpdir) + "/bundle.pem" | + .ca_cert_path=strenv(tmpdir) + "/bundle.pem" | .key_path=strenv(tmpdir) + "/agent_revoked-key.pem" | .cert_path=strenv(tmpdir) + "/agent_revoked.pem" | .url="https://127.0.0.1:8080" @@ -146,6 +148,9 @@ teardown() { config_set "${CONFIG_DIR}/local_api_credentials.yaml" 'del(.login,.password)' ./instance-crowdsec start + rune -1 cscli lapi status + assert_log --partial "client certificate is revoked by CRL" + assert_log --partial "client certificate for CN=localhost OU=[agent-ou] is revoked" rune -0 cscli machines list -o json assert_output '[]' } From fa56d35a483f89b0fb1bd74a9c3ee4ae4ca31623 Mon Sep 17 00:00:00 2001 From: Laurence Jones Date: Fri, 9 Feb 2024 13:37:49 +0000 Subject: [PATCH 016/581] [Loki] Set headers/basic auth if set for queryRange (#2815) --- .../loki/internal/lokiclient/loki_client.go | 41 ++++++++++++------- pkg/acquisition/modules/loki/loki_test.go | 33 ++++++++++----- 2 files changed, 48 insertions(+), 26 deletions(-) diff --git a/pkg/acquisition/modules/loki/internal/lokiclient/loki_client.go b/pkg/acquisition/modules/loki/internal/lokiclient/loki_client.go index 8451a86fcdf..d2af4e8af28 100644 --- a/pkg/acquisition/modules/loki/internal/lokiclient/loki_client.go +++ b/pkg/acquisition/modules/loki/internal/lokiclient/loki_client.go @@ -25,6 +25,7 @@ type LokiClient struct { t *tomb.Tomb fail_start time.Time currentTickerInterval time.Duration + requestHeaders map[string]string } type Config struct { @@ -116,7 +117,7 @@ func (lc *LokiClient) queryRange(uri string, ctx context.Context, c chan *LokiQu case <-lc.t.Dying(): return lc.t.Err() case <-ticker.C: - resp, err := http.Get(uri) + resp, err := lc.Get(uri) if err != nil { if ok := lc.shouldRetry(); !ok { return errors.Wrapf(err, "error querying range") @@ -127,6 +128,7 @@ func (lc *LokiClient) queryRange(uri string, ctx context.Context, c chan *LokiQu } if resp.StatusCode != http.StatusOK { + lc.Logger.Warnf("bad HTTP response code for query range: %d", resp.StatusCode) body, _ := io.ReadAll(resp.Body) resp.Body.Close() if ok := lc.shouldRetry(); !ok { @@ -215,7 +217,7 @@ func (lc *LokiClient) Ready(ctx context.Context) error { return lc.t.Err() case <-tick.C: lc.Logger.Debug("Checking if Loki is ready") - resp, err := http.Get(url) + resp, err := lc.Get(url) if err != nil { lc.Logger.Warnf("Error checking if Loki is ready: %s", err) continue @@ -251,10 +253,9 @@ func (lc *LokiClient) Tail(ctx context.Context) (chan *LokiResponse, error) { } requestHeader := http.Header{} - for k, v := range lc.config.Headers { + for k, v := range lc.requestHeaders { requestHeader.Add(k, v) } - requestHeader.Set("User-Agent", "Crowdsec "+cwversion.VersionStr()) lc.Logger.Infof("Connecting to %s", u) conn, _, err := dialer.Dial(u, requestHeader) @@ -293,16 +294,6 @@ func (lc *LokiClient) QueryRange(ctx context.Context, infinite bool) chan *LokiQ lc.Logger.Debugf("Since: %s (%s)", lc.config.Since, time.Now().Add(-lc.config.Since)) - requestHeader := http.Header{} - for k, v := range lc.config.Headers { - requestHeader.Add(k, v) - } - - if lc.config.Username != "" || lc.config.Password != "" { - requestHeader.Set("Authorization", "Basic "+base64.StdEncoding.EncodeToString([]byte(lc.config.Username+":"+lc.config.Password))) - } - - requestHeader.Set("User-Agent", "Crowdsec "+cwversion.VersionStr()) lc.Logger.Infof("Connecting to %s", url) lc.t.Go(func() error { return lc.queryRange(url, ctx, c, infinite) @@ -310,6 +301,26 @@ func (lc *LokiClient) QueryRange(ctx context.Context, infinite bool) chan *LokiQ return c } +// Create a wrapper for http.Get to be able to set headers and auth +func (lc *LokiClient) Get(url string) (*http.Response, error) { + request, err := http.NewRequest(http.MethodGet, url, nil) + if err != nil { + return nil, err + } + for k, v := range lc.requestHeaders { + request.Header.Add(k, v) + } + return http.DefaultClient.Do(request) +} + func NewLokiClient(config Config) *LokiClient { - return &LokiClient{Logger: log.WithField("component", "lokiclient"), config: config} + headers := make(map[string]string) + for k, v := range config.Headers { + headers[k] = v + } + if config.Username != "" || config.Password != "" { + headers["Authorization"] = "Basic " + base64.StdEncoding.EncodeToString([]byte(config.Username+":"+config.Password)) + } + headers["User-Agent"] = "Crowdsec " + cwversion.VersionStr() + return &LokiClient{Logger: log.WithField("component", "lokiclient"), config: config, requestHeaders: headers} } diff --git a/pkg/acquisition/modules/loki/loki_test.go b/pkg/acquisition/modules/loki/loki_test.go index fae2e3aa98f..6cac1c0fec3 100644 --- a/pkg/acquisition/modules/loki/loki_test.go +++ b/pkg/acquisition/modules/loki/loki_test.go @@ -276,10 +276,17 @@ func feedLoki(logger *log.Entry, n int, title string) error { if err != nil { return err } - resp, err := http.Post("http://127.0.0.1:3100/loki/api/v1/push", "application/json", bytes.NewBuffer(buff)) + req, err := http.NewRequest(http.MethodPost, "http://127.0.0.1:3100/loki/api/v1/push", bytes.NewBuffer(buff)) if err != nil { return err } + req.Header.Set("Content-Type", "application/json") + req.Header.Set("X-Scope-OrgID", "1234") + resp, err := http.DefaultClient.Do(req) + if err != nil { + return err + } + defer resp.Body.Close() if resp.StatusCode != http.StatusNoContent { b, _ := io.ReadAll(resp.Body) logger.Error(string(b)) @@ -306,6 +313,8 @@ mode: cat source: loki url: http://127.0.0.1:3100 query: '{server="demo",key="%s"}' +headers: + x-scope-orgid: "1234" since: 1h `, title), }, @@ -362,26 +371,26 @@ func TestStreamingAcquisition(t *testing.T) { }{ { name: "Bad port", - config: ` -mode: tail + config: `mode: tail source: loki -url: http://127.0.0.1:3101 +url: "http://127.0.0.1:3101" +headers: + x-scope-orgid: "1234" query: > - {server="demo"} -`, // No Loki server here + {server="demo"}`, // No Loki server here expectedErr: "", streamErr: `loki is not ready: context deadline exceeded`, expectedLines: 0, }, { name: "ok", - config: ` -mode: tail + config: `mode: tail source: loki -url: http://127.0.0.1:3100 +url: "http://127.0.0.1:3100" +headers: + x-scope-orgid: "1234" query: > - {server="demo"} -`, + {server="demo"}`, expectedErr: "", streamErr: "", expectedLines: 20, @@ -456,6 +465,8 @@ func TestStopStreaming(t *testing.T) { mode: tail source: loki url: http://127.0.0.1:3100 +headers: + x-scope-orgid: "1234" query: > {server="demo"} ` From 332af5dd8dd7a546c94758390e404f3ecf428fda Mon Sep 17 00:00:00 2001 From: blotus Date: Fri, 9 Feb 2024 14:39:34 +0100 Subject: [PATCH 017/581] appsec: split return code for bouncer and user (#2821) --- pkg/acquisition/modules/appsec/appsec.go | 8 +- .../modules/appsec/appsec_runner.go | 7 +- pkg/acquisition/modules/appsec/appsec_test.go | 735 ++++++++++++++++-- pkg/appsec/appsec.go | 146 ++-- 4 files changed, 777 insertions(+), 119 deletions(-) diff --git a/pkg/acquisition/modules/appsec/appsec.go b/pkg/acquisition/modules/appsec/appsec.go index 030724fc3e9..4e2ff0bd22b 100644 --- a/pkg/acquisition/modules/appsec/appsec.go +++ b/pkg/acquisition/modules/appsec/appsec.go @@ -354,15 +354,17 @@ func (w *AppsecSource) appsecHandler(rw http.ResponseWriter, r *http.Request) { w.InChan <- parsedRequest + /* + response is a copy of w.AppSecRuntime.Response that is safe to use. + As OutOfBand might still be running, the original one can be modified + */ response := <-parsedRequest.ResponseChannel - statusCode := http.StatusOK if response.InBandInterrupt { - statusCode = http.StatusForbidden AppsecBlockCounter.With(prometheus.Labels{"source": parsedRequest.RemoteAddrNormalized, "appsec_engine": parsedRequest.AppsecEngine}).Inc() } - appsecResponse := w.AppsecRuntime.GenerateResponse(response, logger) + statusCode, appsecResponse := w.AppsecRuntime.GenerateResponse(response, logger) logger.Debugf("Response: %+v", appsecResponse) rw.WriteHeader(statusCode) diff --git a/pkg/acquisition/modules/appsec/appsec_runner.go b/pkg/acquisition/modules/appsec/appsec_runner.go index a9d74aa8f63..cc7264aa2c8 100644 --- a/pkg/acquisition/modules/appsec/appsec_runner.go +++ b/pkg/acquisition/modules/appsec/appsec_runner.go @@ -226,7 +226,8 @@ func (r *AppsecRunner) handleInBandInterrupt(request *appsec.ParsedRequest) { if in := request.Tx.Interruption(); in != nil { r.logger.Debugf("inband rules matched : %d", in.RuleID) r.AppsecRuntime.Response.InBandInterrupt = true - r.AppsecRuntime.Response.HTTPResponseCode = r.AppsecRuntime.Config.BlockedHTTPCode + r.AppsecRuntime.Response.BouncerHTTPResponseCode = r.AppsecRuntime.Config.BouncerBlockedHTTPCode + r.AppsecRuntime.Response.UserHTTPResponseCode = r.AppsecRuntime.Config.UserBlockedHTTPCode r.AppsecRuntime.Response.Action = r.AppsecRuntime.DefaultRemediation if _, ok := r.AppsecRuntime.RemediationById[in.RuleID]; ok { @@ -252,7 +253,9 @@ func (r *AppsecRunner) handleInBandInterrupt(request *appsec.ParsedRequest) { r.logger.Errorf("unable to generate appsec event : %s", err) return } - r.outChan <- *appsecOvlfw + if appsecOvlfw != nil { + r.outChan <- *appsecOvlfw + } } // Should the in band match trigger an event ? diff --git a/pkg/acquisition/modules/appsec/appsec_test.go b/pkg/acquisition/modules/appsec/appsec_test.go index 2a58580137d..25aea0c78ea 100644 --- a/pkg/acquisition/modules/appsec/appsec_test.go +++ b/pkg/acquisition/modules/appsec/appsec_test.go @@ -1,6 +1,7 @@ package appsecacquisition import ( + "net/http" "net/url" "testing" "time" @@ -21,16 +22,21 @@ Missing tests (wip): */ type appsecRuleTest struct { - name string - expected_load_ok bool - inband_rules []appsec_rule.CustomRule - outofband_rules []appsec_rule.CustomRule - on_load []appsec.Hook - pre_eval []appsec.Hook - post_eval []appsec.Hook - on_match []appsec.Hook - input_request appsec.ParsedRequest - output_asserts func(events []types.Event, responses []appsec.AppsecTempResponse) + name string + expected_load_ok bool + inband_rules []appsec_rule.CustomRule + outofband_rules []appsec_rule.CustomRule + on_load []appsec.Hook + pre_eval []appsec.Hook + post_eval []appsec.Hook + on_match []appsec.Hook + BouncerBlockedHTTPCode int + UserBlockedHTTPCode int + UserPassedHTTPCode int + DefaultRemediation string + DefaultPassAction string + input_request appsec.ParsedRequest + output_asserts func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) } func TestAppsecOnMatchHooks(t *testing.T) { @@ -53,13 +59,14 @@ func TestAppsecOnMatchHooks(t *testing.T) { URI: "/urllll", Args: url.Values{"foo": []string{"toto"}}, }, - output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse) { + output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { require.Len(t, events, 2) require.Equal(t, types.APPSEC, events[0].Type) require.Equal(t, types.LOG, events[1].Type) require.Len(t, responses, 1) - require.Equal(t, 403, responses[0].HTTPResponseCode) - require.Equal(t, "ban", responses[0].Action) + require.Equal(t, 403, responses[0].BouncerHTTPResponseCode) + require.Equal(t, 403, responses[0].UserHTTPResponseCode) + require.Equal(t, appsec.BanRemediation, responses[0].Action) }, }, @@ -84,17 +91,18 @@ func TestAppsecOnMatchHooks(t *testing.T) { URI: "/urllll", Args: url.Values{"foo": []string{"toto"}}, }, - output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse) { + output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { require.Len(t, events, 2) require.Equal(t, types.APPSEC, events[0].Type) require.Equal(t, types.LOG, events[1].Type) require.Len(t, responses, 1) - require.Equal(t, 413, responses[0].HTTPResponseCode) - require.Equal(t, "ban", responses[0].Action) + require.Equal(t, 403, responses[0].BouncerHTTPResponseCode) + require.Equal(t, 413, responses[0].UserHTTPResponseCode) + require.Equal(t, appsec.BanRemediation, responses[0].Action) }, }, { - name: "on_match: change action to another standard one (log)", + name: "on_match: change action to a non standard one (log)", expected_load_ok: true, inband_rules: []appsec_rule.CustomRule{ { @@ -114,7 +122,7 @@ func TestAppsecOnMatchHooks(t *testing.T) { URI: "/urllll", Args: url.Values{"foo": []string{"toto"}}, }, - output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse) { + output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { require.Len(t, events, 2) require.Equal(t, types.APPSEC, events[0].Type) require.Equal(t, types.LOG, events[1].Type) @@ -143,16 +151,16 @@ func TestAppsecOnMatchHooks(t *testing.T) { URI: "/urllll", Args: url.Values{"foo": []string{"toto"}}, }, - output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse) { + output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { require.Len(t, events, 2) require.Equal(t, types.APPSEC, events[0].Type) require.Equal(t, types.LOG, events[1].Type) require.Len(t, responses, 1) - require.Equal(t, "allow", responses[0].Action) + require.Equal(t, appsec.AllowRemediation, responses[0].Action) }, }, { - name: "on_match: change action to another standard one (deny/ban/block)", + name: "on_match: change action to another standard one (ban)", expected_load_ok: true, inband_rules: []appsec_rule.CustomRule{ { @@ -164,7 +172,7 @@ func TestAppsecOnMatchHooks(t *testing.T) { }, }, on_match: []appsec.Hook{ - {Filter: "IsInBand == true", Apply: []string{"SetRemediation('deny')"}}, + {Filter: "IsInBand == true", Apply: []string{"SetRemediation('ban')"}}, }, input_request: appsec.ParsedRequest{ RemoteAddr: "1.2.3.4", @@ -172,10 +180,10 @@ func TestAppsecOnMatchHooks(t *testing.T) { URI: "/urllll", Args: url.Values{"foo": []string{"toto"}}, }, - output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse) { + output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { require.Len(t, responses, 1) //note: SetAction normalizes deny, ban and block to ban - require.Equal(t, "ban", responses[0].Action) + require.Equal(t, appsec.BanRemediation, responses[0].Action) }, }, { @@ -199,10 +207,10 @@ func TestAppsecOnMatchHooks(t *testing.T) { URI: "/urllll", Args: url.Values{"foo": []string{"toto"}}, }, - output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse) { + output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { require.Len(t, responses, 1) //note: SetAction normalizes deny, ban and block to ban - require.Equal(t, "captcha", responses[0].Action) + require.Equal(t, appsec.CaptchaRemediation, responses[0].Action) }, }, { @@ -226,7 +234,7 @@ func TestAppsecOnMatchHooks(t *testing.T) { URI: "/urllll", Args: url.Values{"foo": []string{"toto"}}, }, - output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse) { + output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { require.Len(t, events, 2) require.Equal(t, types.APPSEC, events[0].Type) require.Equal(t, types.LOG, events[1].Type) @@ -255,11 +263,11 @@ func TestAppsecOnMatchHooks(t *testing.T) { URI: "/urllll", Args: url.Values{"foo": []string{"toto"}}, }, - output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse) { + output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { require.Len(t, events, 1) require.Equal(t, types.LOG, events[0].Type) require.Len(t, responses, 1) - require.Equal(t, "ban", responses[0].Action) + require.Equal(t, appsec.BanRemediation, responses[0].Action) }, }, { @@ -283,11 +291,11 @@ func TestAppsecOnMatchHooks(t *testing.T) { URI: "/urllll", Args: url.Values{"foo": []string{"toto"}}, }, - output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse) { + output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { require.Len(t, events, 1) require.Equal(t, types.APPSEC, events[0].Type) require.Len(t, responses, 1) - require.Equal(t, "ban", responses[0].Action) + require.Equal(t, appsec.BanRemediation, responses[0].Action) }, }, } @@ -328,7 +336,7 @@ func TestAppsecPreEvalHooks(t *testing.T) { URI: "/urllll", Args: url.Values{"foo": []string{"toto"}}, }, - output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse) { + output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { require.Empty(t, events) require.Len(t, responses, 1) require.False(t, responses[0].InBandInterrupt) @@ -356,7 +364,7 @@ func TestAppsecPreEvalHooks(t *testing.T) { URI: "/urllll", Args: url.Values{"foo": []string{"toto"}}, }, - output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse) { + output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { require.Len(t, events, 2) require.Equal(t, types.APPSEC, events[0].Type) @@ -391,7 +399,7 @@ func TestAppsecPreEvalHooks(t *testing.T) { URI: "/urllll", Args: url.Values{"foo": []string{"toto"}}, }, - output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse) { + output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { require.Empty(t, events) require.Len(t, responses, 1) require.False(t, responses[0].InBandInterrupt) @@ -419,7 +427,7 @@ func TestAppsecPreEvalHooks(t *testing.T) { URI: "/urllll", Args: url.Values{"foo": []string{"toto"}}, }, - output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse) { + output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { require.Empty(t, events) require.Len(t, responses, 1) require.False(t, responses[0].InBandInterrupt) @@ -447,7 +455,7 @@ func TestAppsecPreEvalHooks(t *testing.T) { URI: "/urllll", Args: url.Values{"foo": []string{"toto"}}, }, - output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse) { + output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { require.Empty(t, events) require.Len(t, responses, 1) require.False(t, responses[0].InBandInterrupt) @@ -472,7 +480,7 @@ func TestAppsecPreEvalHooks(t *testing.T) { URI: "/urllll", Args: url.Values{"foo": []string{"toto"}}, }, - output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse) { + output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { require.Len(t, events, 1) require.Equal(t, types.LOG, events[0].Type) require.True(t, events[0].Appsec.HasOutBandMatches) @@ -506,7 +514,7 @@ func TestAppsecPreEvalHooks(t *testing.T) { URI: "/urllll", Args: url.Values{"foo": []string{"toto"}}, }, - output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse) { + output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { require.Len(t, events, 2) require.Len(t, responses, 1) require.Equal(t, "foobar", responses[0].Action) @@ -533,7 +541,7 @@ func TestAppsecPreEvalHooks(t *testing.T) { URI: "/urllll", Args: url.Values{"foo": []string{"toto"}}, }, - output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse) { + output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { require.Len(t, events, 2) require.Len(t, responses, 1) require.Equal(t, "foobar", responses[0].Action) @@ -560,10 +568,12 @@ func TestAppsecPreEvalHooks(t *testing.T) { URI: "/urllll", Args: url.Values{"foo": []string{"toto"}}, }, - output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse) { + output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { require.Len(t, events, 2) require.Len(t, responses, 1) require.Equal(t, "foobar", responses[0].Action) + require.Equal(t, "foobar", appsecResponse.Action) + require.Equal(t, http.StatusForbidden, appsecResponse.HTTPStatus) }, }, } @@ -574,6 +584,473 @@ func TestAppsecPreEvalHooks(t *testing.T) { }) } } + +func TestAppsecRemediationConfigHooks(t *testing.T) { + + tests := []appsecRuleTest{ + { + name: "Basic matching rule", + expected_load_ok: true, + inband_rules: []appsec_rule.CustomRule{ + { + Name: "rule1", + Zones: []string{"ARGS"}, + Variables: []string{"foo"}, + Match: appsec_rule.Match{Type: "regex", Value: "^toto"}, + Transform: []string{"lowercase"}, + }, + }, + input_request: appsec.ParsedRequest{ + RemoteAddr: "1.2.3.4", + Method: "GET", + URI: "/urllll", + Args: url.Values{"foo": []string{"toto"}}, + }, + output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { + require.Equal(t, appsec.BanRemediation, responses[0].Action) + require.Equal(t, http.StatusForbidden, statusCode) + require.Equal(t, appsec.BanRemediation, appsecResponse.Action) + require.Equal(t, http.StatusForbidden, appsecResponse.HTTPStatus) + }, + }, + { + name: "SetRemediation", + expected_load_ok: true, + inband_rules: []appsec_rule.CustomRule{ + { + Name: "rule1", + Zones: []string{"ARGS"}, + Variables: []string{"foo"}, + Match: appsec_rule.Match{Type: "regex", Value: "^toto"}, + Transform: []string{"lowercase"}, + }, + }, + input_request: appsec.ParsedRequest{ + RemoteAddr: "1.2.3.4", + Method: "GET", + URI: "/urllll", + Args: url.Values{"foo": []string{"toto"}}, + }, + on_match: []appsec.Hook{{Apply: []string{"SetRemediation('captcha')"}}}, //rule ID is generated at runtime. If you change rule, it will break the test (: + + output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { + require.Equal(t, appsec.CaptchaRemediation, responses[0].Action) + require.Equal(t, http.StatusForbidden, statusCode) + require.Equal(t, appsec.CaptchaRemediation, appsecResponse.Action) + require.Equal(t, http.StatusForbidden, appsecResponse.HTTPStatus) + }, + }, + { + name: "SetRemediation", + expected_load_ok: true, + inband_rules: []appsec_rule.CustomRule{ + { + Name: "rule1", + Zones: []string{"ARGS"}, + Variables: []string{"foo"}, + Match: appsec_rule.Match{Type: "regex", Value: "^toto"}, + Transform: []string{"lowercase"}, + }, + }, + input_request: appsec.ParsedRequest{ + RemoteAddr: "1.2.3.4", + Method: "GET", + URI: "/urllll", + Args: url.Values{"foo": []string{"toto"}}, + }, + on_match: []appsec.Hook{{Apply: []string{"SetReturnCode(418)"}}}, //rule ID is generated at runtime. If you change rule, it will break the test (: + + output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { + require.Equal(t, appsec.BanRemediation, responses[0].Action) + require.Equal(t, http.StatusForbidden, statusCode) + require.Equal(t, appsec.BanRemediation, appsecResponse.Action) + require.Equal(t, http.StatusTeapot, appsecResponse.HTTPStatus) + }, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + loadAppSecEngine(test, t) + }) + } +} +func TestOnMatchRemediationHooks(t *testing.T) { + tests := []appsecRuleTest{ + { + name: "set remediation to allow with on_match hook", + expected_load_ok: true, + inband_rules: []appsec_rule.CustomRule{ + { + Name: "rule42", + Zones: []string{"ARGS"}, + Variables: []string{"foo"}, + Match: appsec_rule.Match{Type: "regex", Value: "^toto"}, + Transform: []string{"lowercase"}, + }, + }, + input_request: appsec.ParsedRequest{ + RemoteAddr: "1.2.3.4", + Method: "GET", + URI: "/urllll", + Args: url.Values{"foo": []string{"toto"}}, + }, + on_match: []appsec.Hook{ + {Filter: "IsInBand == true", Apply: []string{"SetRemediation('allow')"}}, + }, + output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { + require.Equal(t, appsec.AllowRemediation, appsecResponse.Action) + require.Equal(t, http.StatusOK, appsecResponse.HTTPStatus) + }, + }, + { + name: "set remediation to captcha + custom user code with on_match hook", + expected_load_ok: true, + inband_rules: []appsec_rule.CustomRule{ + { + Name: "rule42", + Zones: []string{"ARGS"}, + Variables: []string{"foo"}, + Match: appsec_rule.Match{Type: "regex", Value: "^toto"}, + Transform: []string{"lowercase"}, + }, + }, + input_request: appsec.ParsedRequest{ + RemoteAddr: "1.2.3.4", + Method: "GET", + URI: "/urllll", + Args: url.Values{"foo": []string{"toto"}}, + }, + DefaultRemediation: appsec.AllowRemediation, + on_match: []appsec.Hook{ + {Filter: "IsInBand == true", Apply: []string{"SetRemediation('captcha')", "SetReturnCode(418)"}}, + }, + output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { + spew.Dump(responses) + spew.Dump(appsecResponse) + + log.Errorf("http status : %d", statusCode) + require.Equal(t, appsec.CaptchaRemediation, appsecResponse.Action) + require.Equal(t, http.StatusTeapot, appsecResponse.HTTPStatus) + require.Equal(t, http.StatusForbidden, statusCode) + }, + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + loadAppSecEngine(test, t) + }) + } +} + +func TestAppsecDefaultPassRemediation(t *testing.T) { + + tests := []appsecRuleTest{ + { + name: "Basic non-matching rule", + expected_load_ok: true, + inband_rules: []appsec_rule.CustomRule{ + { + Name: "rule1", + Zones: []string{"ARGS"}, + Variables: []string{"foo"}, + Match: appsec_rule.Match{Type: "regex", Value: "^toto"}, + Transform: []string{"lowercase"}, + }, + }, + input_request: appsec.ParsedRequest{ + RemoteAddr: "1.2.3.4", + Method: "GET", + URI: "/", + Args: url.Values{"foo": []string{"tutu"}}, + }, + output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { + require.Equal(t, appsec.AllowRemediation, responses[0].Action) + require.Equal(t, http.StatusOK, statusCode) + require.Equal(t, appsec.AllowRemediation, appsecResponse.Action) + require.Equal(t, http.StatusOK, appsecResponse.HTTPStatus) + }, + }, + { + name: "DefaultPassAction: pass", + expected_load_ok: true, + inband_rules: []appsec_rule.CustomRule{ + { + Name: "rule1", + Zones: []string{"ARGS"}, + Variables: []string{"foo"}, + Match: appsec_rule.Match{Type: "regex", Value: "^toto"}, + Transform: []string{"lowercase"}, + }, + }, + input_request: appsec.ParsedRequest{ + RemoteAddr: "1.2.3.4", + Method: "GET", + URI: "/", + Args: url.Values{"foo": []string{"tutu"}}, + }, + DefaultPassAction: "allow", + output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { + require.Equal(t, appsec.AllowRemediation, responses[0].Action) + require.Equal(t, http.StatusOK, statusCode) + require.Equal(t, appsec.AllowRemediation, appsecResponse.Action) + require.Equal(t, http.StatusOK, appsecResponse.HTTPStatus) + }, + }, + { + name: "DefaultPassAction: captcha", + expected_load_ok: true, + inband_rules: []appsec_rule.CustomRule{ + { + Name: "rule1", + Zones: []string{"ARGS"}, + Variables: []string{"foo"}, + Match: appsec_rule.Match{Type: "regex", Value: "^toto"}, + Transform: []string{"lowercase"}, + }, + }, + input_request: appsec.ParsedRequest{ + RemoteAddr: "1.2.3.4", + Method: "GET", + URI: "/", + Args: url.Values{"foo": []string{"tutu"}}, + }, + DefaultPassAction: "captcha", + output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { + require.Equal(t, appsec.CaptchaRemediation, responses[0].Action) + require.Equal(t, http.StatusOK, statusCode) //@tko: body is captcha, but as it's 200, captcha won't be showed to user + require.Equal(t, appsec.CaptchaRemediation, appsecResponse.Action) + require.Equal(t, http.StatusOK, appsecResponse.HTTPStatus) + }, + }, + { + name: "DefaultPassHTTPCode: 200", + expected_load_ok: true, + inband_rules: []appsec_rule.CustomRule{ + { + Name: "rule1", + Zones: []string{"ARGS"}, + Variables: []string{"foo"}, + Match: appsec_rule.Match{Type: "regex", Value: "^toto"}, + Transform: []string{"lowercase"}, + }, + }, + input_request: appsec.ParsedRequest{ + RemoteAddr: "1.2.3.4", + Method: "GET", + URI: "/", + Args: url.Values{"foo": []string{"tutu"}}, + }, + UserPassedHTTPCode: 200, + output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { + require.Equal(t, appsec.AllowRemediation, responses[0].Action) + require.Equal(t, http.StatusOK, statusCode) + require.Equal(t, appsec.AllowRemediation, appsecResponse.Action) + require.Equal(t, http.StatusOK, appsecResponse.HTTPStatus) + }, + }, + { + name: "DefaultPassHTTPCode: 200", + expected_load_ok: true, + inband_rules: []appsec_rule.CustomRule{ + { + Name: "rule1", + Zones: []string{"ARGS"}, + Variables: []string{"foo"}, + Match: appsec_rule.Match{Type: "regex", Value: "^toto"}, + Transform: []string{"lowercase"}, + }, + }, + input_request: appsec.ParsedRequest{ + RemoteAddr: "1.2.3.4", + Method: "GET", + URI: "/", + Args: url.Values{"foo": []string{"tutu"}}, + }, + UserPassedHTTPCode: 418, + output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { + require.Equal(t, appsec.AllowRemediation, responses[0].Action) + require.Equal(t, http.StatusOK, statusCode) + require.Equal(t, appsec.AllowRemediation, appsecResponse.Action) + require.Equal(t, http.StatusTeapot, appsecResponse.HTTPStatus) + }, + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + loadAppSecEngine(test, t) + }) + } +} + +func TestAppsecDefaultRemediation(t *testing.T) { + + tests := []appsecRuleTest{ + { + name: "Basic matching rule", + expected_load_ok: true, + inband_rules: []appsec_rule.CustomRule{ + { + Name: "rule1", + Zones: []string{"ARGS"}, + Variables: []string{"foo"}, + Match: appsec_rule.Match{Type: "regex", Value: "^toto"}, + Transform: []string{"lowercase"}, + }, + }, + input_request: appsec.ParsedRequest{ + RemoteAddr: "1.2.3.4", + Method: "GET", + URI: "/urllll", + Args: url.Values{"foo": []string{"toto"}}, + }, + output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { + require.Equal(t, appsec.BanRemediation, responses[0].Action) + require.Equal(t, http.StatusForbidden, statusCode) + require.Equal(t, appsec.BanRemediation, appsecResponse.Action) + require.Equal(t, http.StatusForbidden, appsecResponse.HTTPStatus) + }, + }, + { + name: "default remediation to ban (default)", + expected_load_ok: true, + inband_rules: []appsec_rule.CustomRule{ + { + Name: "rule42", + Zones: []string{"ARGS"}, + Variables: []string{"foo"}, + Match: appsec_rule.Match{Type: "regex", Value: "^toto"}, + Transform: []string{"lowercase"}, + }, + }, + input_request: appsec.ParsedRequest{ + RemoteAddr: "1.2.3.4", + Method: "GET", + URI: "/urllll", + Args: url.Values{"foo": []string{"toto"}}, + }, + DefaultRemediation: "ban", + output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { + require.Equal(t, appsec.BanRemediation, responses[0].Action) + require.Equal(t, http.StatusForbidden, statusCode) + require.Equal(t, appsec.BanRemediation, appsecResponse.Action) + require.Equal(t, http.StatusForbidden, appsecResponse.HTTPStatus) + }, + }, + { + name: "default remediation to allow", + expected_load_ok: true, + inband_rules: []appsec_rule.CustomRule{ + { + Name: "rule42", + Zones: []string{"ARGS"}, + Variables: []string{"foo"}, + Match: appsec_rule.Match{Type: "regex", Value: "^toto"}, + Transform: []string{"lowercase"}, + }, + }, + input_request: appsec.ParsedRequest{ + RemoteAddr: "1.2.3.4", + Method: "GET", + URI: "/urllll", + Args: url.Values{"foo": []string{"toto"}}, + }, + DefaultRemediation: "allow", + output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { + require.Equal(t, appsec.AllowRemediation, responses[0].Action) + require.Equal(t, http.StatusOK, statusCode) + require.Equal(t, appsec.AllowRemediation, appsecResponse.Action) + require.Equal(t, http.StatusOK, appsecResponse.HTTPStatus) + }, + }, + { + name: "default remediation to captcha", + expected_load_ok: true, + inband_rules: []appsec_rule.CustomRule{ + { + Name: "rule42", + Zones: []string{"ARGS"}, + Variables: []string{"foo"}, + Match: appsec_rule.Match{Type: "regex", Value: "^toto"}, + Transform: []string{"lowercase"}, + }, + }, + input_request: appsec.ParsedRequest{ + RemoteAddr: "1.2.3.4", + Method: "GET", + URI: "/urllll", + Args: url.Values{"foo": []string{"toto"}}, + }, + DefaultRemediation: "captcha", + output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { + require.Equal(t, appsec.CaptchaRemediation, responses[0].Action) + require.Equal(t, http.StatusForbidden, statusCode) + require.Equal(t, appsec.CaptchaRemediation, appsecResponse.Action) + require.Equal(t, http.StatusForbidden, appsecResponse.HTTPStatus) + }, + }, + { + name: "custom user HTTP code", + expected_load_ok: true, + inband_rules: []appsec_rule.CustomRule{ + { + Name: "rule42", + Zones: []string{"ARGS"}, + Variables: []string{"foo"}, + Match: appsec_rule.Match{Type: "regex", Value: "^toto"}, + Transform: []string{"lowercase"}, + }, + }, + input_request: appsec.ParsedRequest{ + RemoteAddr: "1.2.3.4", + Method: "GET", + URI: "/urllll", + Args: url.Values{"foo": []string{"toto"}}, + }, + UserBlockedHTTPCode: 418, + output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { + require.Equal(t, appsec.BanRemediation, responses[0].Action) + require.Equal(t, http.StatusForbidden, statusCode) + require.Equal(t, appsec.BanRemediation, appsecResponse.Action) + require.Equal(t, http.StatusTeapot, appsecResponse.HTTPStatus) + }, + }, + { + name: "custom remediation + HTTP code", + expected_load_ok: true, + inband_rules: []appsec_rule.CustomRule{ + { + Name: "rule42", + Zones: []string{"ARGS"}, + Variables: []string{"foo"}, + Match: appsec_rule.Match{Type: "regex", Value: "^toto"}, + Transform: []string{"lowercase"}, + }, + }, + input_request: appsec.ParsedRequest{ + RemoteAddr: "1.2.3.4", + Method: "GET", + URI: "/urllll", + Args: url.Values{"foo": []string{"toto"}}, + }, + UserBlockedHTTPCode: 418, + DefaultRemediation: "foobar", + output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { + require.Equal(t, "foobar", responses[0].Action) + require.Equal(t, http.StatusForbidden, statusCode) + require.Equal(t, "foobar", appsecResponse.Action) + require.Equal(t, http.StatusTeapot, appsecResponse.HTTPStatus) + }, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + loadAppSecEngine(test, t) + }) + } +} + func TestAppsecRuleMatches(t *testing.T) { /* @@ -601,7 +1078,7 @@ func TestAppsecRuleMatches(t *testing.T) { URI: "/urllll", Args: url.Values{"foo": []string{"toto"}}, }, - output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse) { + output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { require.Len(t, events, 2) require.Equal(t, types.APPSEC, events[0].Type) @@ -632,13 +1109,172 @@ func TestAppsecRuleMatches(t *testing.T) { URI: "/urllll", Args: url.Values{"foo": []string{"tutu"}}, }, - output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse) { + output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { require.Empty(t, events) require.Len(t, responses, 1) require.False(t, responses[0].InBandInterrupt) require.False(t, responses[0].OutOfBandInterrupt) }, }, + { + name: "default remediation to allow", + expected_load_ok: true, + inband_rules: []appsec_rule.CustomRule{ + { + Name: "rule42", + Zones: []string{"ARGS"}, + Variables: []string{"foo"}, + Match: appsec_rule.Match{Type: "regex", Value: "^toto"}, + Transform: []string{"lowercase"}, + }, + }, + input_request: appsec.ParsedRequest{ + RemoteAddr: "1.2.3.4", + Method: "GET", + URI: "/urllll", + Args: url.Values{"foo": []string{"toto"}}, + }, + DefaultRemediation: "allow", + output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { + require.Equal(t, appsec.AllowRemediation, responses[0].Action) + require.Equal(t, http.StatusOK, statusCode) + require.Equal(t, appsec.AllowRemediation, appsecResponse.Action) + require.Equal(t, http.StatusOK, appsecResponse.HTTPStatus) + }, + }, + { + name: "default remediation to captcha", + expected_load_ok: true, + inband_rules: []appsec_rule.CustomRule{ + { + Name: "rule42", + Zones: []string{"ARGS"}, + Variables: []string{"foo"}, + Match: appsec_rule.Match{Type: "regex", Value: "^toto"}, + Transform: []string{"lowercase"}, + }, + }, + input_request: appsec.ParsedRequest{ + RemoteAddr: "1.2.3.4", + Method: "GET", + URI: "/urllll", + Args: url.Values{"foo": []string{"toto"}}, + }, + DefaultRemediation: "captcha", + output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { + require.Equal(t, appsec.CaptchaRemediation, responses[0].Action) + require.Equal(t, http.StatusForbidden, statusCode) + require.Equal(t, appsec.CaptchaRemediation, appsecResponse.Action) + require.Equal(t, http.StatusForbidden, appsecResponse.HTTPStatus) + }, + }, + { + name: "no default remediation / custom user HTTP code", + expected_load_ok: true, + inband_rules: []appsec_rule.CustomRule{ + { + Name: "rule42", + Zones: []string{"ARGS"}, + Variables: []string{"foo"}, + Match: appsec_rule.Match{Type: "regex", Value: "^toto"}, + Transform: []string{"lowercase"}, + }, + }, + input_request: appsec.ParsedRequest{ + RemoteAddr: "1.2.3.4", + Method: "GET", + URI: "/urllll", + Args: url.Values{"foo": []string{"toto"}}, + }, + UserBlockedHTTPCode: 418, + output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { + require.Equal(t, appsec.BanRemediation, responses[0].Action) + require.Equal(t, http.StatusForbidden, statusCode) + require.Equal(t, appsec.BanRemediation, appsecResponse.Action) + require.Equal(t, http.StatusTeapot, appsecResponse.HTTPStatus) + }, + }, + { + name: "no match but try to set remediation to captcha with on_match hook", + expected_load_ok: true, + inband_rules: []appsec_rule.CustomRule{ + { + Name: "rule42", + Zones: []string{"ARGS"}, + Variables: []string{"foo"}, + Match: appsec_rule.Match{Type: "regex", Value: "^toto"}, + Transform: []string{"lowercase"}, + }, + }, + on_match: []appsec.Hook{ + {Filter: "IsInBand == true", Apply: []string{"SetRemediation('captcha')"}}, + }, + input_request: appsec.ParsedRequest{ + RemoteAddr: "1.2.3.4", + Method: "GET", + URI: "/urllll", + Args: url.Values{"foo": []string{"bla"}}, + }, + output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { + require.Empty(t, events) + require.Equal(t, http.StatusOK, statusCode) + require.Equal(t, appsec.AllowRemediation, appsecResponse.Action) + }, + }, + { + name: "no match but try to set user HTTP code with on_match hook", + expected_load_ok: true, + inband_rules: []appsec_rule.CustomRule{ + { + Name: "rule42", + Zones: []string{"ARGS"}, + Variables: []string{"foo"}, + Match: appsec_rule.Match{Type: "regex", Value: "^toto"}, + Transform: []string{"lowercase"}, + }, + }, + on_match: []appsec.Hook{ + {Filter: "IsInBand == true", Apply: []string{"SetReturnCode(418)"}}, + }, + input_request: appsec.ParsedRequest{ + RemoteAddr: "1.2.3.4", + Method: "GET", + URI: "/urllll", + Args: url.Values{"foo": []string{"bla"}}, + }, + output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { + require.Empty(t, events) + require.Equal(t, http.StatusOK, statusCode) + require.Equal(t, appsec.AllowRemediation, appsecResponse.Action) + }, + }, + { + name: "no match but try to set remediation with pre_eval hook", + expected_load_ok: true, + inband_rules: []appsec_rule.CustomRule{ + { + Name: "rule42", + Zones: []string{"ARGS"}, + Variables: []string{"foo"}, + Match: appsec_rule.Match{Type: "regex", Value: "^toto"}, + Transform: []string{"lowercase"}, + }, + }, + pre_eval: []appsec.Hook{ + {Filter: "IsInBand == true", Apply: []string{"SetRemediationByName('rule42', 'captcha')"}}, + }, + input_request: appsec.ParsedRequest{ + RemoteAddr: "1.2.3.4", + Method: "GET", + URI: "/urllll", + Args: url.Values{"foo": []string{"bla"}}, + }, + output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { + require.Empty(t, events) + require.Equal(t, http.StatusOK, statusCode) + require.Equal(t, appsec.AllowRemediation, appsecResponse.Action) + }, + }, } for _, test := range tests { @@ -678,7 +1314,16 @@ func loadAppSecEngine(test appsecRuleTest, t *testing.T) { outofbandRules = append(outofbandRules, strRule) } - appsecCfg := appsec.AppsecConfig{Logger: logger, OnLoad: test.on_load, PreEval: test.pre_eval, PostEval: test.post_eval, OnMatch: test.on_match} + appsecCfg := appsec.AppsecConfig{Logger: logger, + OnLoad: test.on_load, + PreEval: test.pre_eval, + PostEval: test.post_eval, + OnMatch: test.on_match, + BouncerBlockedHTTPCode: test.BouncerBlockedHTTPCode, + UserBlockedHTTPCode: test.UserBlockedHTTPCode, + UserPassedHTTPCode: test.UserPassedHTTPCode, + DefaultRemediation: test.DefaultRemediation, + DefaultPassAction: test.DefaultPassAction} AppsecRuntime, err := appsecCfg.Build() if err != nil { t.Fatalf("unable to build appsec runtime : %s", err) @@ -724,8 +1369,10 @@ func loadAppSecEngine(test appsecRuleTest, t *testing.T) { runner.handleRequest(&input) time.Sleep(50 * time.Millisecond) + + http_status, appsecResponse := AppsecRuntime.GenerateResponse(OutputResponses[0], logger) log.Infof("events : %s", spew.Sdump(OutputEvents)) log.Infof("responses : %s", spew.Sdump(OutputResponses)) - test.output_asserts(OutputEvents, OutputResponses) + test.output_asserts(OutputEvents, OutputResponses, appsecResponse, http_status) } diff --git a/pkg/appsec/appsec.go b/pkg/appsec/appsec.go index ec7e7bef3b6..554fc3b7123 100644 --- a/pkg/appsec/appsec.go +++ b/pkg/appsec/appsec.go @@ -2,6 +2,7 @@ package appsec import ( "fmt" + "net/http" "os" "regexp" @@ -30,6 +31,12 @@ const ( hookOnMatch ) +const ( + BanRemediation = "ban" + CaptchaRemediation = "captcha" + AllowRemediation = "allow" +) + func (h *Hook) Build(hookStage int) error { ctx := map[string]interface{}{} @@ -62,12 +69,13 @@ func (h *Hook) Build(hookStage int) error { } type AppsecTempResponse struct { - InBandInterrupt bool - OutOfBandInterrupt bool - Action string //allow, deny, captcha, log - HTTPResponseCode int - SendEvent bool //do we send an internal event on rule match - SendAlert bool //do we send an alert on rule match + InBandInterrupt bool + OutOfBandInterrupt bool + Action string //allow, deny, captcha, log + UserHTTPResponseCode int //The response code to send to the user + BouncerHTTPResponseCode int //The response code to send to the remediation component + SendEvent bool //do we send an internal event on rule match + SendAlert bool //do we send an alert on rule match } type AppsecSubEngineOpts struct { @@ -110,31 +118,33 @@ type AppsecRuntimeConfig struct { } type AppsecConfig struct { - Name string `yaml:"name"` - OutOfBandRules []string `yaml:"outofband_rules"` - InBandRules []string `yaml:"inband_rules"` - DefaultRemediation string `yaml:"default_remediation"` - DefaultPassAction string `yaml:"default_pass_action"` - BlockedHTTPCode int `yaml:"blocked_http_code"` - PassedHTTPCode int `yaml:"passed_http_code"` - OnLoad []Hook `yaml:"on_load"` - PreEval []Hook `yaml:"pre_eval"` - PostEval []Hook `yaml:"post_eval"` - OnMatch []Hook `yaml:"on_match"` - VariablesTracking []string `yaml:"variables_tracking"` - InbandOptions AppsecSubEngineOpts `yaml:"inband_options"` - OutOfBandOptions AppsecSubEngineOpts `yaml:"outofband_options"` + Name string `yaml:"name"` + OutOfBandRules []string `yaml:"outofband_rules"` + InBandRules []string `yaml:"inband_rules"` + DefaultRemediation string `yaml:"default_remediation"` + DefaultPassAction string `yaml:"default_pass_action"` + BouncerBlockedHTTPCode int `yaml:"blocked_http_code"` //returned to the bouncer + BouncerPassedHTTPCode int `yaml:"passed_http_code"` //returned to the bouncer + UserBlockedHTTPCode int `yaml:"user_blocked_http_code"` //returned to the user + UserPassedHTTPCode int `yaml:"user_passed_http_code"` //returned to the user + + OnLoad []Hook `yaml:"on_load"` + PreEval []Hook `yaml:"pre_eval"` + PostEval []Hook `yaml:"post_eval"` + OnMatch []Hook `yaml:"on_match"` + VariablesTracking []string `yaml:"variables_tracking"` + InbandOptions AppsecSubEngineOpts `yaml:"inband_options"` + OutOfBandOptions AppsecSubEngineOpts `yaml:"outofband_options"` LogLevel *log.Level `yaml:"log_level"` Logger *log.Entry `yaml:"-"` } func (w *AppsecRuntimeConfig) ClearResponse() { - w.Logger.Debugf("#-> %p", w) w.Response = AppsecTempResponse{} - w.Logger.Debugf("-> %p", w.Config) w.Response.Action = w.Config.DefaultPassAction - w.Response.HTTPResponseCode = w.Config.PassedHTTPCode + w.Response.BouncerHTTPResponseCode = w.Config.BouncerPassedHTTPCode + w.Response.UserHTTPResponseCode = w.Config.UserPassedHTTPCode w.Response.SendEvent = true w.Response.SendAlert = true } @@ -191,24 +201,35 @@ func (wc *AppsecConfig) GetDataDir() string { func (wc *AppsecConfig) Build() (*AppsecRuntimeConfig, error) { ret := &AppsecRuntimeConfig{Logger: wc.Logger.WithField("component", "appsec_runtime_config")} - //set the defaults - switch wc.DefaultRemediation { - case "": - wc.DefaultRemediation = "ban" - case "ban", "captcha", "log": - //those are the officially supported remediation(s) - default: - wc.Logger.Warningf("default '%s' remediation of %s is none of [ban,captcha,log] ensure bouncer compatbility!", wc.DefaultRemediation, wc.Name) + + if wc.BouncerBlockedHTTPCode == 0 { + wc.BouncerBlockedHTTPCode = http.StatusForbidden + } + if wc.BouncerPassedHTTPCode == 0 { + wc.BouncerPassedHTTPCode = http.StatusOK } - if wc.BlockedHTTPCode == 0 { - wc.BlockedHTTPCode = 403 + + if wc.UserBlockedHTTPCode == 0 { + wc.UserBlockedHTTPCode = http.StatusForbidden } - if wc.PassedHTTPCode == 0 { - wc.PassedHTTPCode = 200 + if wc.UserPassedHTTPCode == 0 { + wc.UserPassedHTTPCode = http.StatusOK } if wc.DefaultPassAction == "" { - wc.DefaultPassAction = "allow" + wc.DefaultPassAction = AllowRemediation } + if wc.DefaultRemediation == "" { + wc.DefaultRemediation = BanRemediation + } + + //set the defaults + switch wc.DefaultRemediation { + case BanRemediation, CaptchaRemediation, AllowRemediation: + //those are the officially supported remediation(s) + default: + wc.Logger.Warningf("default '%s' remediation of %s is none of [%s,%s,%s] ensure bouncer compatbility!", wc.DefaultRemediation, wc.Name, BanRemediation, CaptchaRemediation, AllowRemediation) + } + ret.Name = wc.Name ret.Config = wc ret.DefaultRemediation = wc.DefaultRemediation @@ -553,27 +574,13 @@ func (w *AppsecRuntimeConfig) SetActionByName(name string, action string) error func (w *AppsecRuntimeConfig) SetAction(action string) error { //log.Infof("setting to %s", action) w.Logger.Debugf("setting action to %s", action) - switch action { - case "allow": - w.Response.Action = action - w.Response.HTTPResponseCode = w.Config.PassedHTTPCode - //@tko how should we handle this ? it seems bouncer only understand bans, but it might be misleading ? - case "deny", "ban", "block": - w.Response.Action = "ban" - case "log": - w.Response.Action = action - w.Response.HTTPResponseCode = w.Config.PassedHTTPCode - case "captcha": - w.Response.Action = action - default: - w.Response.Action = action - } + w.Response.Action = action return nil } func (w *AppsecRuntimeConfig) SetHTTPCode(code int) error { w.Logger.Debugf("setting http code to %d", code) - w.Response.HTTPResponseCode = code + w.Response.UserHTTPResponseCode = code return nil } @@ -582,24 +589,23 @@ type BodyResponse struct { HTTPStatus int `json:"http_status"` } -func (w *AppsecRuntimeConfig) GenerateResponse(response AppsecTempResponse, logger *log.Entry) BodyResponse { - resp := BodyResponse{} - //if there is no interrupt, we should allow with default code - if !response.InBandInterrupt { - resp.Action = w.Config.DefaultPassAction - resp.HTTPStatus = w.Config.PassedHTTPCode - return resp - } - resp.Action = response.Action - if resp.Action == "" { - resp.Action = w.Config.DefaultRemediation - } - logger.Debugf("action is %s", resp.Action) +func (w *AppsecRuntimeConfig) GenerateResponse(response AppsecTempResponse, logger *log.Entry) (int, BodyResponse) { + var bouncerStatusCode int - resp.HTTPStatus = response.HTTPResponseCode - if resp.HTTPStatus == 0 { - resp.HTTPStatus = w.Config.BlockedHTTPCode + resp := BodyResponse{Action: response.Action} + if response.Action == AllowRemediation { + resp.HTTPStatus = w.Config.UserPassedHTTPCode + bouncerStatusCode = w.Config.BouncerPassedHTTPCode + } else { //ban, captcha and anything else + resp.HTTPStatus = response.UserHTTPResponseCode + if resp.HTTPStatus == 0 { + resp.HTTPStatus = w.Config.UserBlockedHTTPCode + } + bouncerStatusCode = response.BouncerHTTPResponseCode + if bouncerStatusCode == 0 { + bouncerStatusCode = w.Config.BouncerBlockedHTTPCode + } } - logger.Debugf("http status is %d", resp.HTTPStatus) - return resp + + return bouncerStatusCode, resp } From 58a1d7164f99bacbbe27c0d32b14bf63e27b4274 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Fri, 9 Feb 2024 17:39:50 +0100 Subject: [PATCH 018/581] refact "cscli lapi" (#2825) --- cmd/crowdsec-cli/lapi.go | 271 ++++++++++++++++++++++++--------------- cmd/crowdsec-cli/main.go | 2 +- 2 files changed, 167 insertions(+), 106 deletions(-) diff --git a/cmd/crowdsec-cli/lapi.go b/cmd/crowdsec-cli/lapi.go index ce59ac370cd..0bb4a31b72a 100644 --- a/cmd/crowdsec-cli/lapi.go +++ b/cmd/crowdsec-cli/lapi.go @@ -6,6 +6,7 @@ import ( "fmt" "net/url" "os" + "slices" "sort" "strings" @@ -13,7 +14,6 @@ import ( log "github.com/sirupsen/logrus" "github.com/spf13/cobra" "gopkg.in/yaml.v2" - "slices" "github.com/crowdsecurity/go-cs-lib/version" @@ -29,15 +29,27 @@ import ( const LAPIURLPrefix = "v1" -func runLapiStatus(cmd *cobra.Command, args []string) error { - password := strfmt.Password(csConfig.API.Client.Credentials.Password) - apiurl, err := url.Parse(csConfig.API.Client.Credentials.URL) - login := csConfig.API.Client.Credentials.Login +type cliLapi struct { + cfg configGetter +} + +func NewCLILapi(cfg configGetter) *cliLapi { + return &cliLapi{ + cfg: cfg, + } +} + +func (cli *cliLapi) status() error { + cfg := cli.cfg() + password := strfmt.Password(cfg.API.Client.Credentials.Password) + login := cfg.API.Client.Credentials.Login + + apiurl, err := url.Parse(cfg.API.Client.Credentials.URL) if err != nil { return fmt.Errorf("parsing api url: %w", err) } - hub, err := require.Hub(csConfig, nil, nil) + hub, err := require.Hub(cfg, nil, nil) if err != nil { return err } @@ -54,13 +66,14 @@ func runLapiStatus(cmd *cobra.Command, args []string) error { if err != nil { return fmt.Errorf("init default client: %w", err) } + t := models.WatcherAuthRequest{ MachineID: &login, Password: &password, Scenarios: scenarios, } - log.Infof("Loaded credentials from %s", csConfig.API.Client.CredentialsFilePath) + log.Infof("Loaded credentials from %s", cfg.API.Client.CredentialsFilePath) log.Infof("Trying to authenticate with username %s on %s", login, apiurl) _, _, err = Client.Auth.AuthenticateWatcher(context.Background(), t) @@ -69,26 +82,15 @@ func runLapiStatus(cmd *cobra.Command, args []string) error { } log.Infof("You can successfully interact with Local API (LAPI)") + return nil } -func runLapiRegister(cmd *cobra.Command, args []string) error { - flags := cmd.Flags() +func (cli *cliLapi) register(apiURL string, outputFile string, machine string) error { + var err error - apiURL, err := flags.GetString("url") - if err != nil { - return err - } - - outputFile, err := flags.GetString("file") - if err != nil { - return err - } - - lapiUser, err := flags.GetString("machine") - if err != nil { - return err - } + lapiUser := machine + cfg := cli.cfg() if lapiUser == "" { lapiUser, err = generateID("") @@ -96,12 +98,15 @@ func runLapiRegister(cmd *cobra.Command, args []string) error { return fmt.Errorf("unable to generate machine id: %w", err) } } + password := strfmt.Password(generatePassword(passwordLength)) + if apiURL == "" { - if csConfig.API.Client == nil || csConfig.API.Client.Credentials == nil || csConfig.API.Client.Credentials.URL == "" { + if cfg.API.Client == nil || cfg.API.Client.Credentials == nil || cfg.API.Client.Credentials.URL == "" { return fmt.Errorf("no Local API URL. Please provide it in your configuration or with the -u parameter") } - apiURL = csConfig.API.Client.Credentials.URL + + apiURL = cfg.API.Client.Credentials.URL } /*URL needs to end with /, but user doesn't care*/ if !strings.HasSuffix(apiURL, "/") { @@ -111,10 +116,12 @@ func runLapiRegister(cmd *cobra.Command, args []string) error { if !strings.HasPrefix(apiURL, "http://") && !strings.HasPrefix(apiURL, "https://") { apiURL = "http://" + apiURL } + apiurl, err := url.Parse(apiURL) if err != nil { return fmt.Errorf("parsing api url: %w", err) } + _, err = apiclient.RegisterClient(&apiclient.Config{ MachineID: lapiUser, Password: password, @@ -130,138 +137,142 @@ func runLapiRegister(cmd *cobra.Command, args []string) error { log.Printf("Successfully registered to Local API (LAPI)") var dumpFile string + if outputFile != "" { dumpFile = outputFile - } else if csConfig.API.Client.CredentialsFilePath != "" { - dumpFile = csConfig.API.Client.CredentialsFilePath + } else if cfg.API.Client.CredentialsFilePath != "" { + dumpFile = cfg.API.Client.CredentialsFilePath } else { dumpFile = "" } + apiCfg := csconfig.ApiCredentialsCfg{ Login: lapiUser, Password: password.String(), URL: apiURL, } + apiConfigDump, err := yaml.Marshal(apiCfg) if err != nil { return fmt.Errorf("unable to marshal api credentials: %w", err) } + if dumpFile != "" { err = os.WriteFile(dumpFile, apiConfigDump, 0o600) if err != nil { return fmt.Errorf("write api credentials to '%s' failed: %w", dumpFile, err) } + log.Printf("Local API credentials written to '%s'", dumpFile) } else { fmt.Printf("%s\n", string(apiConfigDump)) } + log.Warning(ReloadMessage()) return nil } -func NewLapiStatusCmd() *cobra.Command { +func (cli *cliLapi) newStatusCmd() *cobra.Command { cmdLapiStatus := &cobra.Command{ Use: "status", Short: "Check authentication to Local API (LAPI)", Args: cobra.MinimumNArgs(0), DisableAutoGenTag: true, - RunE: runLapiStatus, + RunE: func(cmd *cobra.Command, args []string) error { + return cli.status() + }, } return cmdLapiStatus } -func NewLapiRegisterCmd() *cobra.Command { - cmdLapiRegister := &cobra.Command{ +func (cli *cliLapi) newRegisterCmd() *cobra.Command { + var ( + apiURL string + outputFile string + machine string + ) + + cmd := &cobra.Command{ Use: "register", Short: "Register a machine to Local API (LAPI)", Long: `Register your machine to the Local API (LAPI). Keep in mind the machine needs to be validated by an administrator on LAPI side to be effective.`, Args: cobra.MinimumNArgs(0), DisableAutoGenTag: true, - RunE: runLapiRegister, + RunE: func(_ *cobra.Command, _ []string) error { + return cli.register(apiURL, outputFile, machine) + }, } - flags := cmdLapiRegister.Flags() - flags.StringP("url", "u", "", "URL of the API (ie. http://127.0.0.1)") - flags.StringP("file", "f", "", "output file destination") - flags.String("machine", "", "Name of the machine to register with") + flags := cmd.Flags() + flags.StringVarP(&apiURL, "url", "u", "", "URL of the API (ie. http://127.0.0.1)") + flags.StringVarP(&outputFile, "file", "f", "", "output file destination") + flags.StringVar(&machine, "machine", "", "Name of the machine to register with") - return cmdLapiRegister + return cmd } -func NewLapiCmd() *cobra.Command { - cmdLapi := &cobra.Command{ +func (cli *cliLapi) NewCommand() *cobra.Command { + cmd := &cobra.Command{ Use: "lapi [action]", Short: "Manage interaction with Local API (LAPI)", Args: cobra.MinimumNArgs(1), DisableAutoGenTag: true, - PersistentPreRunE: func(cmd *cobra.Command, args []string) error { - if err := csConfig.LoadAPIClient(); err != nil { + PersistentPreRunE: func(_ *cobra.Command, _ []string) error { + if err := cli.cfg().LoadAPIClient(); err != nil { return fmt.Errorf("loading api client: %w", err) } return nil }, } - cmdLapi.AddCommand(NewLapiRegisterCmd()) - cmdLapi.AddCommand(NewLapiStatusCmd()) - cmdLapi.AddCommand(NewLapiContextCmd()) + cmd.AddCommand(cli.newRegisterCmd()) + cmd.AddCommand(cli.newStatusCmd()) + cmd.AddCommand(cli.newContextCmd()) - return cmdLapi + return cmd } -func AddContext(key string, values []string) error { +func (cli *cliLapi) addContext(key string, values []string) error { + cfg := cli.cfg() + if err := alertcontext.ValidateContextExpr(key, values); err != nil { - return fmt.Errorf("invalid context configuration :%s", err) + return fmt.Errorf("invalid context configuration: %w", err) } - if _, ok := csConfig.Crowdsec.ContextToSend[key]; !ok { - csConfig.Crowdsec.ContextToSend[key] = make([]string, 0) + + if _, ok := cfg.Crowdsec.ContextToSend[key]; !ok { + cfg.Crowdsec.ContextToSend[key] = make([]string, 0) log.Infof("key '%s' added", key) } - data := csConfig.Crowdsec.ContextToSend[key] + + data := cfg.Crowdsec.ContextToSend[key] + for _, val := range values { if !slices.Contains(data, val) { log.Infof("value '%s' added to key '%s'", val, key) data = append(data, val) } - csConfig.Crowdsec.ContextToSend[key] = data + + cfg.Crowdsec.ContextToSend[key] = data } - if err := csConfig.Crowdsec.DumpContextConfigFile(); err != nil { + + if err := cfg.Crowdsec.DumpContextConfigFile(); err != nil { return err } return nil } -func NewLapiContextCmd() *cobra.Command { - cmdContext := &cobra.Command{ - Use: "context [command]", - Short: "Manage context to send with alerts", - DisableAutoGenTag: true, - PersistentPreRunE: func(cmd *cobra.Command, args []string) error { - if err := csConfig.LoadCrowdsec(); err != nil { - fileNotFoundMessage := fmt.Sprintf("failed to open context file: open %s: no such file or directory", csConfig.Crowdsec.ConsoleContextPath) - if err.Error() != fileNotFoundMessage { - return fmt.Errorf("unable to load CrowdSec agent configuration: %w", err) - } - } - if csConfig.DisableAgent { - return errors.New("agent is disabled and lapi context can only be used on the agent") - } - - return nil - }, - Run: func(cmd *cobra.Command, args []string) { - printHelp(cmd) - }, - } +func (cli *cliLapi) newContextAddCmd() *cobra.Command { + var ( + keyToAdd string + valuesToAdd []string + ) - var keyToAdd string - var valuesToAdd []string - cmdContextAdd := &cobra.Command{ + cmd := &cobra.Command{ Use: "add", Short: "Add context to send with alerts. You must specify the output key with the expr value you want", Example: `cscli lapi context add --key source_ip --value evt.Meta.source_ip @@ -269,18 +280,18 @@ cscli lapi context add --key file_source --value evt.Line.Src cscli lapi context add --value evt.Meta.source_ip --value evt.Meta.target_user `, DisableAutoGenTag: true, - RunE: func(cmd *cobra.Command, args []string) error { - hub, err := require.Hub(csConfig, nil, nil) + RunE: func(_ *cobra.Command, _ []string) error { + hub, err := require.Hub(cli.cfg(), nil, nil) if err != nil { return err } - if err = alertcontext.LoadConsoleContext(csConfig, hub); err != nil { + if err = alertcontext.LoadConsoleContext(cli.cfg(), hub); err != nil { return fmt.Errorf("while loading context: %w", err) } if keyToAdd != "" { - if err := AddContext(keyToAdd, valuesToAdd); err != nil { + if err := cli.addContext(keyToAdd, valuesToAdd); err != nil { return err } return nil @@ -290,7 +301,7 @@ cscli lapi context add --value evt.Meta.source_ip --value evt.Meta.target_user keySlice := strings.Split(v, ".") key := keySlice[len(keySlice)-1] value := []string{v} - if err := AddContext(key, value); err != nil { + if err := cli.addContext(key, value); err != nil { return err } } @@ -298,31 +309,37 @@ cscli lapi context add --value evt.Meta.source_ip --value evt.Meta.target_user return nil }, } - cmdContextAdd.Flags().StringVarP(&keyToAdd, "key", "k", "", "The key of the different values to send") - cmdContextAdd.Flags().StringSliceVar(&valuesToAdd, "value", []string{}, "The expr fields to associate with the key") - cmdContextAdd.MarkFlagRequired("value") - cmdContext.AddCommand(cmdContextAdd) - cmdContextStatus := &cobra.Command{ + flags := cmd.Flags() + flags.StringVarP(&keyToAdd, "key", "k", "", "The key of the different values to send") + flags.StringSliceVar(&valuesToAdd, "value", []string{}, "The expr fields to associate with the key") + cmd.MarkFlagRequired("value") + + return cmd +} + +func (cli *cliLapi) newContextStatusCmd() *cobra.Command { + cmd := &cobra.Command{ Use: "status", Short: "List context to send with alerts", DisableAutoGenTag: true, - RunE: func(cmd *cobra.Command, args []string) error { - hub, err := require.Hub(csConfig, nil, nil) + RunE: func(_ *cobra.Command, _ []string) error { + cfg := cli.cfg() + hub, err := require.Hub(cfg, nil, nil) if err != nil { return err } - if err = alertcontext.LoadConsoleContext(csConfig, hub); err != nil { + if err = alertcontext.LoadConsoleContext(cfg, hub); err != nil { return fmt.Errorf("while loading context: %w", err) } - if len(csConfig.Crowdsec.ContextToSend) == 0 { + if len(cfg.Crowdsec.ContextToSend) == 0 { fmt.Println("No context found on this agent. You can use 'cscli lapi context add' to add context to your alerts.") return nil } - dump, err := yaml.Marshal(csConfig.Crowdsec.ContextToSend) + dump, err := yaml.Marshal(cfg.Crowdsec.ContextToSend) if err != nil { return fmt.Errorf("unable to show context status: %w", err) } @@ -332,10 +349,14 @@ cscli lapi context add --value evt.Meta.source_ip --value evt.Meta.target_user return nil }, } - cmdContext.AddCommand(cmdContextStatus) + return cmd +} + +func (cli *cliLapi) newContextDetectCmd() *cobra.Command { var detectAll bool - cmdContextDetect := &cobra.Command{ + + cmd := &cobra.Command{ Use: "detect", Short: "Detect available fields from the installed parsers", Example: `cscli lapi context detect --all @@ -343,6 +364,7 @@ cscli lapi context detect crowdsecurity/sshd-logs `, DisableAutoGenTag: true, RunE: func(cmd *cobra.Command, args []string) error { + cfg := cli.cfg() if !detectAll && len(args) == 0 { log.Infof("Please provide parsers to detect or --all flag.") printHelp(cmd) @@ -355,13 +377,13 @@ cscli lapi context detect crowdsecurity/sshd-logs return fmt.Errorf("failed to init expr helpers: %w", err) } - hub, err := require.Hub(csConfig, nil, nil) + hub, err := require.Hub(cfg, nil, nil) if err != nil { return err } csParsers := parser.NewParsers(hub) - if csParsers, err = parser.LoadParsers(csConfig, csParsers); err != nil { + if csParsers, err = parser.LoadParsers(cfg, csParsers); err != nil { return fmt.Errorf("unable to load parsers: %w", err) } @@ -418,47 +440,85 @@ cscli lapi context detect crowdsecurity/sshd-logs return nil }, } - cmdContextDetect.Flags().BoolVarP(&detectAll, "all", "a", false, "Detect evt field for all installed parser") - cmdContext.AddCommand(cmdContextDetect) + cmd.Flags().BoolVarP(&detectAll, "all", "a", false, "Detect evt field for all installed parser") + + return cmd +} - cmdContextDelete := &cobra.Command{ +func (cli *cliLapi) newContextDeleteCmd() *cobra.Command { + cmd := &cobra.Command{ Use: "delete", DisableAutoGenTag: true, RunE: func(_ *cobra.Command, _ []string) error { - filePath := csConfig.Crowdsec.ConsoleContextPath + filePath := cli.cfg().Crowdsec.ConsoleContextPath if filePath == "" { filePath = "the context file" } - fmt.Printf("Command \"delete\" is deprecated, please manually edit %s.", filePath) + fmt.Printf("Command 'delete' is deprecated, please manually edit %s.", filePath) + + return nil + }, + } + + return cmd +} + +func (cli *cliLapi) newContextCmd() *cobra.Command { + cmd := &cobra.Command{ + Use: "context [command]", + Short: "Manage context to send with alerts", + DisableAutoGenTag: true, + PersistentPreRunE: func(_ *cobra.Command, _ []string) error { + cfg := cli.cfg() + if err := cfg.LoadCrowdsec(); err != nil { + fileNotFoundMessage := fmt.Sprintf("failed to open context file: open %s: no such file or directory", cfg.Crowdsec.ConsoleContextPath) + if err.Error() != fileNotFoundMessage { + return fmt.Errorf("unable to load CrowdSec agent configuration: %w", err) + } + } + if cfg.DisableAgent { + return errors.New("agent is disabled and lapi context can only be used on the agent") + } + return nil }, + Run: func(cmd *cobra.Command, _ []string) { + printHelp(cmd) + }, } - cmdContext.AddCommand(cmdContextDelete) - return cmdContext + cmd.AddCommand(cli.newContextAddCmd()) + cmd.AddCommand(cli.newContextStatusCmd()) + cmd.AddCommand(cli.newContextDetectCmd()) + cmd.AddCommand(cli.newContextDeleteCmd()) + + return cmd } -func detectStaticField(GrokStatics []parser.ExtraField) []string { +func detectStaticField(grokStatics []parser.ExtraField) []string { ret := make([]string, 0) - for _, static := range GrokStatics { + for _, static := range grokStatics { if static.Parsed != "" { fieldName := fmt.Sprintf("evt.Parsed.%s", static.Parsed) if !slices.Contains(ret, fieldName) { ret = append(ret, fieldName) } } + if static.Meta != "" { fieldName := fmt.Sprintf("evt.Meta.%s", static.Meta) if !slices.Contains(ret, fieldName) { ret = append(ret, fieldName) } } + if static.TargetByName != "" { fieldName := static.TargetByName if !strings.HasPrefix(fieldName, "evt.") { fieldName = "evt." + fieldName } + if !slices.Contains(ret, fieldName) { ret = append(ret, fieldName) } @@ -526,6 +586,7 @@ func detectSubNode(node parser.Node, parserCTX parser.UnixParserCtx) []string { } } } + if subnode.Grok.RegexpName != "" { grokCompiled, err := parserCTX.Grok.Get(subnode.Grok.RegexpName) if err == nil { diff --git a/cmd/crowdsec-cli/main.go b/cmd/crowdsec-cli/main.go index 62b85e63047..b0855fb047e 100644 --- a/cmd/crowdsec-cli/main.go +++ b/cmd/crowdsec-cli/main.go @@ -241,7 +241,7 @@ It is meant to allow you to manage bans, parsers/scenarios/etc, api and generall cmd.AddCommand(NewCLIBouncers(cli.cfg).NewCommand()) cmd.AddCommand(NewCLIMachines(cli.cfg).NewCommand()) cmd.AddCommand(NewCLICapi().NewCommand()) - cmd.AddCommand(NewLapiCmd()) + cmd.AddCommand(NewCLILapi(cli.cfg).NewCommand()) cmd.AddCommand(NewCompletionCmd()) cmd.AddCommand(NewConsoleCmd()) cmd.AddCommand(NewCLIExplain().NewCommand()) From 2853410576456471b3f0efef223c4f7bb04600ab Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Fri, 9 Feb 2024 17:51:29 +0100 Subject: [PATCH 019/581] refact "cscli alerts" (#2827) --- cmd/crowdsec-cli/alerts.go | 181 ++++++++++++++++++++++--------------- cmd/crowdsec-cli/main.go | 2 +- 2 files changed, 109 insertions(+), 74 deletions(-) diff --git a/cmd/crowdsec-cli/alerts.go b/cmd/crowdsec-cli/alerts.go index 4ab71be5bbf..ce304bcc777 100644 --- a/cmd/crowdsec-cli/alerts.go +++ b/cmd/crowdsec-cli/alerts.go @@ -29,39 +29,46 @@ import ( func DecisionsFromAlert(alert *models.Alert) string { ret := "" - var decMap = make(map[string]int) + decMap := make(map[string]int) + for _, decision := range alert.Decisions { k := *decision.Type if *decision.Simulated { k = fmt.Sprintf("(simul)%s", k) } + v := decMap[k] decMap[k] = v + 1 } + for k, v := range decMap { if len(ret) > 0 { ret += " " } + ret += fmt.Sprintf("%s:%d", k, v) } + return ret } -func alertsToTable(alerts *models.GetAlertsResponse, printMachine bool) error { - switch csConfig.Cscli.Output { +func (cli *cliAlerts) alertsToTable(alerts *models.GetAlertsResponse, printMachine bool) error { + switch cli.cfg().Cscli.Output { case "raw": csvwriter := csv.NewWriter(os.Stdout) header := []string{"id", "scope", "value", "reason", "country", "as", "decisions", "created_at"} + if printMachine { header = append(header, "machine") } - err := csvwriter.Write(header) - if err != nil { + + if err := csvwriter.Write(header); err != nil { return err } + for _, alertItem := range *alerts { row := []string{ - fmt.Sprintf("%d", alertItem.ID), + strconv.FormatInt(alertItem.ID, 10), *alertItem.Source.Scope, *alertItem.Source.Value, *alertItem.Scenario, @@ -73,11 +80,12 @@ func alertsToTable(alerts *models.GetAlertsResponse, printMachine bool) error { if printMachine { row = append(row, alertItem.MachineID) } - err := csvwriter.Write(row) - if err != nil { + + if err := csvwriter.Write(row); err != nil { return err } } + csvwriter.Flush() case "json": if *alerts == nil { @@ -86,6 +94,7 @@ func alertsToTable(alerts *models.GetAlertsResponse, printMachine bool) error { fmt.Println("[]") return nil } + x, _ := json.MarshalIndent(alerts, "", " ") fmt.Print(string(x)) case "human": @@ -93,8 +102,10 @@ func alertsToTable(alerts *models.GetAlertsResponse, printMachine bool) error { fmt.Println("No active alerts") return nil } + alertsTable(color.Output, alerts, printMachine) } + return nil } @@ -116,13 +127,13 @@ var alertTemplate = ` ` -func displayOneAlert(alert *models.Alert, withDetail bool) error { +func (cli *cliAlerts) displayOneAlert(alert *models.Alert, withDetail bool) error { tmpl, err := template.New("alert").Parse(alertTemplate) if err != nil { return err } - err = tmpl.Execute(os.Stdout, alert) - if err != nil { + + if err = tmpl.Execute(os.Stdout, alert); err != nil { return err } @@ -133,14 +144,17 @@ func displayOneAlert(alert *models.Alert, withDetail bool) error { sort.Slice(alert.Meta, func(i, j int) bool { return alert.Meta[i].Key < alert.Meta[j].Key }) + table := newTable(color.Output) table.SetRowLines(false) table.SetHeaders("Key", "Value") + for _, meta := range alert.Meta { var valSlice []string if err := json.Unmarshal([]byte(meta.Value), &valSlice); err != nil { - return fmt.Errorf("unknown context value type '%s' : %s", meta.Value, err) + return fmt.Errorf("unknown context value type '%s': %w", meta.Value, err) } + for _, value := range valSlice { table.AddRow( meta.Key, @@ -148,11 +162,13 @@ func displayOneAlert(alert *models.Alert, withDetail bool) error { ) } } + table.Render() } if withDetail { fmt.Printf("\n - Events :\n") + for _, event := range alert.Events { alertEventTable(color.Output, event) } @@ -163,10 +179,13 @@ func displayOneAlert(alert *models.Alert, withDetail bool) error { type cliAlerts struct{ client *apiclient.ApiClient + cfg configGetter } -func NewCLIAlerts() *cliAlerts { - return &cliAlerts{} +func NewCLIAlerts(getconfig configGetter) *cliAlerts { + return &cliAlerts{ + cfg: getconfig, + } } func (cli *cliAlerts) NewCommand() *cobra.Command { @@ -176,18 +195,18 @@ func (cli *cliAlerts) NewCommand() *cobra.Command { Args: cobra.MinimumNArgs(1), DisableAutoGenTag: true, Aliases: []string{"alert"}, - PersistentPreRunE: func(cmd *cobra.Command, args []string) error { - var err error - if err := csConfig.LoadAPIClient(); err != nil { + PersistentPreRunE: func(_ *cobra.Command, _ []string) error { + cfg := cli.cfg() + if err := cfg.LoadAPIClient(); err != nil { return fmt.Errorf("loading api client: %w", err) } - apiURL, err := url.Parse(csConfig.API.Client.Credentials.URL) + apiURL, err := url.Parse(cfg.API.Client.Credentials.URL) if err != nil { return fmt.Errorf("parsing api url %s: %w", apiURL, err) } cli.client, err = apiclient.NewClient(&apiclient.Config{ - MachineID: csConfig.API.Client.Credentials.Login, - Password: strfmt.Password(csConfig.API.Client.Credentials.Password), + MachineID: cfg.API.Client.Credentials.Login, + Password: strfmt.Password(cfg.API.Client.Credentials.Password), UserAgent: fmt.Sprintf("crowdsec/%s", version.String()), URL: apiURL, VersionPrefix: "v1", @@ -196,6 +215,7 @@ func (cli *cliAlerts) NewCommand() *cobra.Command { if err != nil { return fmt.Errorf("new api client: %w", err) } + return nil }, } @@ -221,8 +241,10 @@ func (cli *cliAlerts) NewListCmd() *cobra.Command { IncludeCAPI: new(bool), OriginEquals: new(string), } + limit := new(int) contained := new(bool) + var printMachine bool cmd := &cobra.Command{ @@ -234,9 +256,7 @@ cscli alerts list --range 1.2.3.0/24 cscli alerts list -s crowdsecurity/ssh-bf cscli alerts list --type ban`, DisableAutoGenTag: true, - RunE: func(cmd *cobra.Command, args []string) error { - var err error - + RunE: func(cmd *cobra.Command, _ []string) error { if err := manageCliDecisionAlerts(alertListFilter.IPEquals, alertListFilter.RangeEquals, alertListFilter.ScopeEquals, alertListFilter.ValueEquals); err != nil { printHelp(cmd) @@ -304,40 +324,43 @@ cscli alerts list --type ban`, alerts, _, err := cli.client.Alerts.List(context.Background(), alertListFilter) if err != nil { - return fmt.Errorf("unable to list alerts: %v", err) + return fmt.Errorf("unable to list alerts: %w", err) } - err = alertsToTable(alerts, printMachine) - if err != nil { - return fmt.Errorf("unable to list alerts: %v", err) + if err = cli.alertsToTable(alerts, printMachine); err != nil { + return fmt.Errorf("unable to list alerts: %w", err) } return nil }, } - cmd.Flags().SortFlags = false - cmd.Flags().BoolVarP(alertListFilter.IncludeCAPI, "all", "a", false, "Include decisions from Central API") - cmd.Flags().StringVar(alertListFilter.Until, "until", "", "restrict to alerts older than until (ie. 4h, 30d)") - cmd.Flags().StringVar(alertListFilter.Since, "since", "", "restrict to alerts newer than since (ie. 4h, 30d)") - cmd.Flags().StringVarP(alertListFilter.IPEquals, "ip", "i", "", "restrict to alerts from this source ip (shorthand for --scope ip --value )") - cmd.Flags().StringVarP(alertListFilter.ScenarioEquals, "scenario", "s", "", "the scenario (ie. crowdsecurity/ssh-bf)") - cmd.Flags().StringVarP(alertListFilter.RangeEquals, "range", "r", "", "restrict to alerts from this range (shorthand for --scope range --value )") - cmd.Flags().StringVar(alertListFilter.TypeEquals, "type", "", "restrict to alerts with given decision type (ie. ban, captcha)") - cmd.Flags().StringVar(alertListFilter.ScopeEquals, "scope", "", "restrict to alerts of this scope (ie. ip,range)") - cmd.Flags().StringVarP(alertListFilter.ValueEquals, "value", "v", "", "the value to match for in the specified scope") - cmd.Flags().StringVar(alertListFilter.OriginEquals, "origin", "", fmt.Sprintf("the value to match for the specified origin (%s ...)", strings.Join(types.GetOrigins(), ","))) - cmd.Flags().BoolVar(contained, "contained", false, "query decisions contained by range") - cmd.Flags().BoolVarP(&printMachine, "machine", "m", false, "print machines that sent alerts") - cmd.Flags().IntVarP(limit, "limit", "l", 50, "limit size of alerts list table (0 to view all alerts)") + + flags := cmd.Flags() + flags.SortFlags = false + flags.BoolVarP(alertListFilter.IncludeCAPI, "all", "a", false, "Include decisions from Central API") + flags.StringVar(alertListFilter.Until, "until", "", "restrict to alerts older than until (ie. 4h, 30d)") + flags.StringVar(alertListFilter.Since, "since", "", "restrict to alerts newer than since (ie. 4h, 30d)") + flags.StringVarP(alertListFilter.IPEquals, "ip", "i", "", "restrict to alerts from this source ip (shorthand for --scope ip --value )") + flags.StringVarP(alertListFilter.ScenarioEquals, "scenario", "s", "", "the scenario (ie. crowdsecurity/ssh-bf)") + flags.StringVarP(alertListFilter.RangeEquals, "range", "r", "", "restrict to alerts from this range (shorthand for --scope range --value )") + flags.StringVar(alertListFilter.TypeEquals, "type", "", "restrict to alerts with given decision type (ie. ban, captcha)") + flags.StringVar(alertListFilter.ScopeEquals, "scope", "", "restrict to alerts of this scope (ie. ip,range)") + flags.StringVarP(alertListFilter.ValueEquals, "value", "v", "", "the value to match for in the specified scope") + flags.StringVar(alertListFilter.OriginEquals, "origin", "", fmt.Sprintf("the value to match for the specified origin (%s ...)", strings.Join(types.GetOrigins(), ","))) + flags.BoolVar(contained, "contained", false, "query decisions contained by range") + flags.BoolVarP(&printMachine, "machine", "m", false, "print machines that sent alerts") + flags.IntVarP(limit, "limit", "l", 50, "limit size of alerts list table (0 to view all alerts)") return cmd } func (cli *cliAlerts) NewDeleteCmd() *cobra.Command { - var ActiveDecision *bool - var AlertDeleteAll bool - var delAlertByID string - contained := new(bool) + var ( + ActiveDecision *bool + AlertDeleteAll bool + delAlertByID string + ) + var alertDeleteFilter = apiclient.AlertsDeleteOpts{ ScopeEquals: new(string), ValueEquals: new(string), @@ -345,6 +368,9 @@ func (cli *cliAlerts) NewDeleteCmd() *cobra.Command { IPEquals: new(string), RangeEquals: new(string), } + + contained := new(bool) + cmd := &cobra.Command{ Use: "delete [filters] [--all]", Short: `Delete alerts @@ -355,7 +381,7 @@ cscli alerts delete -s crowdsecurity/ssh-bf"`, DisableAutoGenTag: true, Aliases: []string{"remove"}, Args: cobra.ExactArgs(0), - PreRunE: func(cmd *cobra.Command, args []string) error { + PreRunE: func(cmd *cobra.Command, _ []string) error { if AlertDeleteAll { return nil } @@ -368,11 +394,11 @@ cscli alerts delete -s crowdsecurity/ssh-bf"`, return nil }, - RunE: func(cmd *cobra.Command, args []string) error { + RunE: func(cmd *cobra.Command, _ []string) error { var err error if !AlertDeleteAll { - if err := manageCliDecisionAlerts(alertDeleteFilter.IPEquals, alertDeleteFilter.RangeEquals, + if err = manageCliDecisionAlerts(alertDeleteFilter.IPEquals, alertDeleteFilter.RangeEquals, alertDeleteFilter.ScopeEquals, alertDeleteFilter.ValueEquals); err != nil { printHelp(cmd) return err @@ -410,12 +436,12 @@ cscli alerts delete -s crowdsecurity/ssh-bf"`, if delAlertByID == "" { alerts, _, err = cli.client.Alerts.Delete(context.Background(), alertDeleteFilter) if err != nil { - return fmt.Errorf("unable to delete alerts : %v", err) + return fmt.Errorf("unable to delete alerts: %w", err) } } else { alerts, _, err = cli.client.Alerts.DeleteOne(context.Background(), delAlertByID) if err != nil { - return fmt.Errorf("unable to delete alert: %v", err) + return fmt.Errorf("unable to delete alert: %w", err) } } log.Infof("%s alert(s) deleted", alerts.NbDeleted) @@ -423,26 +449,31 @@ cscli alerts delete -s crowdsecurity/ssh-bf"`, return nil }, } - cmd.Flags().SortFlags = false - cmd.Flags().StringVar(alertDeleteFilter.ScopeEquals, "scope", "", "the scope (ie. ip,range)") - cmd.Flags().StringVarP(alertDeleteFilter.ValueEquals, "value", "v", "", "the value to match for in the specified scope") - cmd.Flags().StringVarP(alertDeleteFilter.ScenarioEquals, "scenario", "s", "", "the scenario (ie. crowdsecurity/ssh-bf)") - cmd.Flags().StringVarP(alertDeleteFilter.IPEquals, "ip", "i", "", "Source ip (shorthand for --scope ip --value )") - cmd.Flags().StringVarP(alertDeleteFilter.RangeEquals, "range", "r", "", "Range source ip (shorthand for --scope range --value )") - cmd.Flags().StringVar(&delAlertByID, "id", "", "alert ID") - cmd.Flags().BoolVarP(&AlertDeleteAll, "all", "a", false, "delete all alerts") - cmd.Flags().BoolVar(contained, "contained", false, "query decisions contained by range") + + flags := cmd.Flags() + flags.SortFlags = false + flags.StringVar(alertDeleteFilter.ScopeEquals, "scope", "", "the scope (ie. ip,range)") + flags.StringVarP(alertDeleteFilter.ValueEquals, "value", "v", "", "the value to match for in the specified scope") + flags.StringVarP(alertDeleteFilter.ScenarioEquals, "scenario", "s", "", "the scenario (ie. crowdsecurity/ssh-bf)") + flags.StringVarP(alertDeleteFilter.IPEquals, "ip", "i", "", "Source ip (shorthand for --scope ip --value )") + flags.StringVarP(alertDeleteFilter.RangeEquals, "range", "r", "", "Range source ip (shorthand for --scope range --value )") + flags.StringVar(&delAlertByID, "id", "", "alert ID") + flags.BoolVarP(&AlertDeleteAll, "all", "a", false, "delete all alerts") + flags.BoolVar(contained, "contained", false, "query decisions contained by range") + return cmd } func (cli *cliAlerts) NewInspectCmd() *cobra.Command { var details bool + cmd := &cobra.Command{ Use: `inspect "alert_id"`, Short: `Show info about an alert`, Example: `cscli alerts inspect 123`, DisableAutoGenTag: true, RunE: func(cmd *cobra.Command, args []string) error { + cfg := cli.cfg() if len(args) == 0 { printHelp(cmd) return fmt.Errorf("missing alert_id") @@ -454,31 +485,32 @@ func (cli *cliAlerts) NewInspectCmd() *cobra.Command { } alert, _, err := cli.client.Alerts.GetByID(context.Background(), id) if err != nil { - return fmt.Errorf("can't find alert with id %s: %s", alertID, err) + return fmt.Errorf("can't find alert with id %s: %w", alertID, err) } - switch csConfig.Cscli.Output { + switch cfg.Cscli.Output { case "human": - if err := displayOneAlert(alert, details); err != nil { + if err := cli.displayOneAlert(alert, details); err != nil { continue } case "json": data, err := json.MarshalIndent(alert, "", " ") if err != nil { - return fmt.Errorf("unable to marshal alert with id %s: %s", alertID, err) + return fmt.Errorf("unable to marshal alert with id %s: %w", alertID, err) } fmt.Printf("%s\n", string(data)) case "raw": data, err := yaml.Marshal(alert) if err != nil { - return fmt.Errorf("unable to marshal alert with id %s: %s", alertID, err) + return fmt.Errorf("unable to marshal alert with id %s: %w", alertID, err) } - fmt.Printf("%s\n", string(data)) + fmt.Println(string(data)) } } return nil }, } + cmd.Flags().SortFlags = false cmd.Flags().BoolVarP(&details, "details", "d", false, "show alerts with events") @@ -486,27 +518,30 @@ func (cli *cliAlerts) NewInspectCmd() *cobra.Command { } func (cli *cliAlerts) NewFlushCmd() *cobra.Command { - var maxItems int - var maxAge string + var ( + maxItems int + maxAge string + ) + cmd := &cobra.Command{ Use: `flush`, Short: `Flush alerts /!\ This command can be used only on the same machine than the local API`, Example: `cscli alerts flush --max-items 1000 --max-age 7d`, DisableAutoGenTag: true, - RunE: func(cmd *cobra.Command, args []string) error { - var err error - if err := require.LAPI(csConfig); err != nil { + RunE: func(_ *cobra.Command, _ []string) error { + cfg := cli.cfg() + if err := require.LAPI(cfg); err != nil { return err } - db, err := database.NewClient(csConfig.DbConfig) + db, err := database.NewClient(cfg.DbConfig) if err != nil { - return fmt.Errorf("unable to create new database client: %s", err) + return fmt.Errorf("unable to create new database client: %w", err) } log.Info("Flushing alerts. !! This may take a long time !!") err = db.FlushAlerts(maxAge, maxItems) if err != nil { - return fmt.Errorf("unable to flush alerts: %s", err) + return fmt.Errorf("unable to flush alerts: %w", err) } log.Info("Alerts flushed") diff --git a/cmd/crowdsec-cli/main.go b/cmd/crowdsec-cli/main.go index b0855fb047e..55fcacee39c 100644 --- a/cmd/crowdsec-cli/main.go +++ b/cmd/crowdsec-cli/main.go @@ -236,7 +236,7 @@ It is meant to allow you to manage bans, parsers/scenarios/etc, api and generall cmd.AddCommand(NewCLIMetrics(cli.cfg).NewCommand()) cmd.AddCommand(NewCLIDashboard(cli.cfg).NewCommand()) cmd.AddCommand(NewCLIDecisions(cli.cfg).NewCommand()) - cmd.AddCommand(NewCLIAlerts().NewCommand()) + cmd.AddCommand(NewCLIAlerts(cli.cfg).NewCommand()) cmd.AddCommand(NewCLISimulation(cli.cfg).NewCommand()) cmd.AddCommand(NewCLIBouncers(cli.cfg).NewCommand()) cmd.AddCommand(NewCLIMachines(cli.cfg).NewCommand()) From 5c83695177cd4044a8cc953978103377b63607f0 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Mon, 12 Feb 2024 11:23:17 +0100 Subject: [PATCH 020/581] refact "cscli explain" (#2835) --- cmd/crowdsec-cli/explain.go | 184 +++++++++++++++--------------------- cmd/crowdsec-cli/main.go | 2 +- go.mod | 4 +- go.sum | 14 +-- 4 files changed, 82 insertions(+), 122 deletions(-) diff --git a/cmd/crowdsec-cli/explain.go b/cmd/crowdsec-cli/explain.go index d21c1704930..ce323fd0ce1 100644 --- a/cmd/crowdsec-cli/explain.go +++ b/cmd/crowdsec-cli/explain.go @@ -16,33 +16,53 @@ import ( "github.com/crowdsecurity/crowdsec/pkg/hubtest" ) -func GetLineCountForFile(filepath string) (int, error) { +func getLineCountForFile(filepath string) (int, error) { f, err := os.Open(filepath) if err != nil { return 0, err } defer f.Close() + lc := 0 fs := bufio.NewReader(f) + for { input, err := fs.ReadBytes('\n') if len(input) > 1 { lc++ } + if err != nil && err == io.EOF { break } } + return lc, nil } -type cliExplain struct{} +type cliExplain struct { + cfg configGetter + flags struct { + logFile string + dsn string + logLine string + logType string + details bool + skipOk bool + onlySuccessfulParsers bool + noClean bool + crowdsec string + labels string + } +} -func NewCLIExplain() *cliExplain { - return &cliExplain{} +func NewCLIExplain(cfg configGetter) *cliExplain { + return &cliExplain{ + cfg: cfg, + } } -func (cli cliExplain) NewCommand() *cobra.Command { +func (cli *cliExplain) NewCommand() *cobra.Command { cmd := &cobra.Command{ Use: "explain", Short: "Explain log pipeline", @@ -57,118 +77,50 @@ tail -n 5 myfile.log | cscli explain --type nginx -f - `, Args: cobra.ExactArgs(0), DisableAutoGenTag: true, - RunE: cli.run, - PersistentPreRunE: func(cmd *cobra.Command, args []string) error { - flags := cmd.Flags() - - logFile, err := flags.GetString("file") - if err != nil { - return err - } - - dsn, err := flags.GetString("dsn") - if err != nil { - return err - } - - logLine, err := flags.GetString("log") - if err != nil { - return err - } - - logType, err := flags.GetString("type") - if err != nil { - return err - } - - if logLine == "" && logFile == "" && dsn == "" { - printHelp(cmd) - fmt.Println() - return fmt.Errorf("please provide --log, --file or --dsn flag") - } - if logType == "" { - printHelp(cmd) - fmt.Println() - return fmt.Errorf("please provide --type flag") - } + RunE: func(_ *cobra.Command, _ []string) error { + return cli.run() + }, + PersistentPreRunE: func(_ *cobra.Command, _ []string) error { fileInfo, _ := os.Stdin.Stat() - if logFile == "-" && ((fileInfo.Mode() & os.ModeCharDevice) == os.ModeCharDevice) { + if cli.flags.logFile == "-" && ((fileInfo.Mode() & os.ModeCharDevice) == os.ModeCharDevice) { return fmt.Errorf("the option -f - is intended to work with pipes") } + return nil }, } flags := cmd.Flags() - flags.StringP("file", "f", "", "Log file to test") - flags.StringP("dsn", "d", "", "DSN to test") - flags.StringP("log", "l", "", "Log line to test") - flags.StringP("type", "t", "", "Type of the acquisition to test") - flags.String("labels", "", "Additional labels to add to the acquisition format (key:value,key2:value2)") - flags.BoolP("verbose", "v", false, "Display individual changes") - flags.Bool("failures", false, "Only show failed lines") - flags.Bool("only-successful-parsers", false, "Only show successful parsers") - flags.String("crowdsec", "crowdsec", "Path to crowdsec") - flags.Bool("no-clean", false, "Don't clean runtime environment after tests") + flags.StringVarP(&cli.flags.logFile, "file", "f", "", "Log file to test") + flags.StringVarP(&cli.flags.dsn, "dsn", "d", "", "DSN to test") + flags.StringVarP(&cli.flags.logLine, "log", "l", "", "Log line to test") + flags.StringVarP(&cli.flags.logType, "type", "t", "", "Type of the acquisition to test") + flags.StringVar(&cli.flags.labels, "labels", "", "Additional labels to add to the acquisition format (key:value,key2:value2)") + flags.BoolVarP(&cli.flags.details, "verbose", "v", false, "Display individual changes") + flags.BoolVar(&cli.flags.skipOk, "failures", false, "Only show failed lines") + flags.BoolVar(&cli.flags.onlySuccessfulParsers, "only-successful-parsers", false, "Only show successful parsers") + flags.StringVar(&cli.flags.crowdsec, "crowdsec", "crowdsec", "Path to crowdsec") + flags.BoolVar(&cli.flags.noClean, "no-clean", false, "Don't clean runtime environment after tests") + + cmd.MarkFlagRequired("type") + cmd.MarkFlagsOneRequired("log", "file", "dsn") return cmd } -func (cli cliExplain) run(cmd *cobra.Command, args []string) error { - flags := cmd.Flags() - - logFile, err := flags.GetString("file") - if err != nil { - return err - } +func (cli *cliExplain) run() error { + logFile := cli.flags.logFile + logLine := cli.flags.logLine + logType := cli.flags.logType + dsn := cli.flags.dsn + labels := cli.flags.labels + crowdsec := cli.flags.crowdsec - dsn, err := flags.GetString("dsn") - if err != nil { - return err - } - - logLine, err := flags.GetString("log") - if err != nil { - return err - } - - logType, err := flags.GetString("type") - if err != nil { - return err - } - - opts := dumps.DumpOpts{} - - opts.Details, err = flags.GetBool("verbose") - if err != nil { - return err - } - - no_clean, err := flags.GetBool("no-clean") - if err != nil { - return err - } - - opts.SkipOk, err = flags.GetBool("failures") - if err != nil { - return err - } - - opts.ShowNotOkParsers, err = flags.GetBool("only-successful-parsers") - opts.ShowNotOkParsers = !opts.ShowNotOkParsers - if err != nil { - return err - } - - crowdsec, err := flags.GetString("crowdsec") - if err != nil { - return err - } - - labels, err := flags.GetString("labels") - if err != nil { - return err + opts := dumps.DumpOpts{ + Details: cli.flags.details, + SkipOk: cli.flags.skipOk, + ShowNotOkParsers: !cli.flags.onlySuccessfulParsers, } var f *os.File @@ -176,21 +128,25 @@ func (cli cliExplain) run(cmd *cobra.Command, args []string) error { // using empty string fallback to /tmp dir, err := os.MkdirTemp("", "cscli_explain") if err != nil { - return fmt.Errorf("couldn't create a temporary directory to store cscli explain result: %s", err) + return fmt.Errorf("couldn't create a temporary directory to store cscli explain result: %w", err) } + defer func() { - if no_clean { + if cli.flags.noClean { return } + if _, err := os.Stat(dir); !os.IsNotExist(err) { if err := os.RemoveAll(dir); err != nil { log.Errorf("unable to delete temporary directory '%s': %s", dir, err) } } }() + // we create a temporary log file if a log line/stdin has been provided if logLine != "" || logFile == "-" { tmpFile := filepath.Join(dir, "cscli_test_tmp.log") + f, err = os.Create(tmpFile) if err != nil { return err @@ -220,6 +176,7 @@ func (cli cliExplain) run(cmd *cobra.Command, args []string) error { log.Warnf("Failed to write %d lines to %s", errCount, tmpFile) } } + f.Close() // this is the file that was going to be read by crowdsec anyway logFile = tmpFile @@ -230,15 +187,20 @@ func (cli cliExplain) run(cmd *cobra.Command, args []string) error { if err != nil { return fmt.Errorf("unable to get absolute path of '%s', exiting", logFile) } + dsn = fmt.Sprintf("file://%s", absolutePath) - lineCount, err := GetLineCountForFile(absolutePath) + + lineCount, err := getLineCountForFile(absolutePath) if err != nil { return err } + log.Debugf("file %s has %d lines", absolutePath, lineCount) + if lineCount == 0 { return fmt.Errorf("the log file is empty: %s", absolutePath) } + if lineCount > 100 { log.Warnf("%s contains %d lines. This may take a lot of resources.", absolutePath, lineCount) } @@ -249,15 +211,19 @@ func (cli cliExplain) run(cmd *cobra.Command, args []string) error { } cmdArgs := []string{"-c", ConfigFilePath, "-type", logType, "-dsn", dsn, "-dump-data", dir, "-no-api"} + if labels != "" { log.Debugf("adding labels %s", labels) cmdArgs = append(cmdArgs, "-label", labels) } + crowdsecCmd := exec.Command(crowdsec, cmdArgs...) + output, err := crowdsecCmd.CombinedOutput() if err != nil { fmt.Println(string(output)) - return fmt.Errorf("fail to run crowdsec for test: %v", err) + + return fmt.Errorf("fail to run crowdsec for test: %w", err) } parserDumpFile := filepath.Join(dir, hubtest.ParserResultFileName) @@ -265,12 +231,12 @@ func (cli cliExplain) run(cmd *cobra.Command, args []string) error { parserDump, err := dumps.LoadParserDump(parserDumpFile) if err != nil { - return fmt.Errorf("unable to load parser dump result: %s", err) + return fmt.Errorf("unable to load parser dump result: %w", err) } bucketStateDump, err := dumps.LoadBucketPourDump(bucketStateDumpFile) if err != nil { - return fmt.Errorf("unable to load bucket dump result: %s", err) + return fmt.Errorf("unable to load bucket dump result: %w", err) } dumps.DumpTree(*parserDump, *bucketStateDump, opts) diff --git a/cmd/crowdsec-cli/main.go b/cmd/crowdsec-cli/main.go index 55fcacee39c..43998623566 100644 --- a/cmd/crowdsec-cli/main.go +++ b/cmd/crowdsec-cli/main.go @@ -244,7 +244,7 @@ It is meant to allow you to manage bans, parsers/scenarios/etc, api and generall cmd.AddCommand(NewCLILapi(cli.cfg).NewCommand()) cmd.AddCommand(NewCompletionCmd()) cmd.AddCommand(NewConsoleCmd()) - cmd.AddCommand(NewCLIExplain().NewCommand()) + cmd.AddCommand(NewCLIExplain(cli.cfg).NewCommand()) cmd.AddCommand(NewCLIHubTest().NewCommand()) cmd.AddCommand(NewCLINotifications().NewCommand()) cmd.AddCommand(NewCLISupport().NewCommand()) diff --git a/go.mod b/go.mod index d61c191c14f..e1da18387a5 100644 --- a/go.mod +++ b/go.mod @@ -77,7 +77,7 @@ require ( github.com/shirou/gopsutil/v3 v3.23.5 github.com/sirupsen/logrus v1.9.3 github.com/slack-go/slack v0.12.2 - github.com/spf13/cobra v1.7.0 + github.com/spf13/cobra v1.8.0 github.com/stretchr/testify v1.8.4 github.com/umahmood/haversine v0.0.0-20151105152445-808ab04add26 github.com/wasilibs/go-re2 v1.3.0 @@ -108,7 +108,7 @@ require ( github.com/chenzhuoyu/base64x v0.0.0-20221115062448-fe3a3abad311 // indirect github.com/corazawaf/libinjection-go v0.1.2 // indirect github.com/coreos/go-systemd/v22 v22.5.0 // indirect - github.com/cpuguy83/go-md2man/v2 v2.0.2 // indirect + github.com/cpuguy83/go-md2man/v2 v2.0.3 // indirect github.com/creack/pty v1.1.18 // indirect github.com/docker/distribution v2.8.2+incompatible // indirect github.com/docker/go-units v0.5.0 // indirect diff --git a/go.sum b/go.sum index f5f61594ecd..8fa2021316b 100644 --- a/go.sum +++ b/go.sum @@ -91,21 +91,17 @@ github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f/go.mod h1:F5haX7 github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs= github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= github.com/cpuguy83/go-md2man/v2 v2.0.1/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= -github.com/cpuguy83/go-md2man/v2 v2.0.2 h1:p1EgwI/C7NhT0JmVkwCD2ZBK8j4aeHQX2pMHHBfMQ6w= -github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= +github.com/cpuguy83/go-md2man/v2 v2.0.3 h1:qMCsGGgs+MAzDFyp9LpAe1Lqy/fY/qCovCm0qnXZOBM= +github.com/cpuguy83/go-md2man/v2 v2.0.3/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/creack/pty v1.1.17/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4= github.com/creack/pty v1.1.18 h1:n56/Zwd5o6whRC5PMGretI4IdRLlmBXYNjScPaBgsbY= github.com/creack/pty v1.1.18/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4= -github.com/crowdsecurity/coraza/v3 v3.0.0-20231213144607-41d5358da94f h1:FkOB9aDw0xzDd14pTarGRLsUNAymONq3dc7zhvsXElg= -github.com/crowdsecurity/coraza/v3 v3.0.0-20231213144607-41d5358da94f/go.mod h1:TrU7Li+z2RHNrPy0TKJ6R65V6Yzpan2sTIRryJJyJso= github.com/crowdsecurity/coraza/v3 v3.0.0-20240108124027-a62b8d8e5607 h1:hyrYw3h8clMcRL2u5ooZ3tmwnmJftmhb9Ws1MKmavvI= github.com/crowdsecurity/coraza/v3 v3.0.0-20240108124027-a62b8d8e5607/go.mod h1:br36fEqurGYZQGit+iDYsIzW0FF6VufMbDzyyLxEuPA= github.com/crowdsecurity/dlog v0.0.0-20170105205344-4fb5f8204f26 h1:r97WNVC30Uen+7WnLs4xDScS/Ex988+id2k6mDf8psU= github.com/crowdsecurity/dlog v0.0.0-20170105205344-4fb5f8204f26/go.mod h1:zpv7r+7KXwgVUZnUNjyP22zc/D7LKjyoY02weH2RBbk= -github.com/crowdsecurity/go-cs-lib v0.0.5 h1:eVLW+BRj3ZYn0xt5/xmgzfbbB8EBo32gM4+WpQQk2e8= -github.com/crowdsecurity/go-cs-lib v0.0.5/go.mod h1:8FMKNGsh3hMZi2SEv6P15PURhEJnZV431XjzzBSuf0k= github.com/crowdsecurity/go-cs-lib v0.0.6 h1:Ef6MylXe0GaJE9vrfvxEdbHb31+JUP1os+murPz7Pos= github.com/crowdsecurity/go-cs-lib v0.0.6/go.mod h1:8FMKNGsh3hMZi2SEv6P15PURhEJnZV431XjzzBSuf0k= github.com/crowdsecurity/grokky v0.2.1 h1:t4VYnDlAd0RjDM2SlILalbwfCrQxtJSMGdQOR0zwkE4= @@ -640,8 +636,8 @@ github.com/spf13/cast v1.3.1 h1:nFm6S0SMdyzrzcmThSipiEubIDy8WEXKNZ0UOgiRpng= github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= github.com/spf13/cobra v1.4.0/go.mod h1:Wo4iy3BUC+X2Fybo0PDqwJIv3dNRiZLHQymsfxlB84g= -github.com/spf13/cobra v1.7.0 h1:hyqWnYt1ZQShIddO5kBpj3vu05/++x6tJ6dg8EC572I= -github.com/spf13/cobra v1.7.0/go.mod h1:uLxZILRyS/50WlhOIKD7W6V5bgeIt+4sICxh6uRMrb0= +github.com/spf13/cobra v1.8.0 h1:7aJaZx1B85qltLMc546zn58BxxfZdR/W22ej9CFoEf0= +github.com/spf13/cobra v1.8.0/go.mod h1:WXLWApfZ71AjXPya3WOlMsY9yMs7YeiHhFVlvLyhcho= github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= @@ -809,8 +805,6 @@ golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.5.0 h1:60k92dhOjHxJkrqnwsfl8KuaHbn/5dl0lUPUklKo3qE= -golang.org/x/sync v0.5.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sync v0.6.0 h1:5BMeUDZ7vkXGfEr1x9B4bRcTH4lpkTkpdh0T/J+qjbQ= golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= From bdecf38616723dddf30a7c776694cd020f8a6944 Mon Sep 17 00:00:00 2001 From: blotus Date: Mon, 12 Feb 2024 11:33:44 +0100 Subject: [PATCH 021/581] update codeql action to v3 (#2822) --- .github/workflows/codeql-analysis.yml | 15 +++++++-------- 1 file changed, 7 insertions(+), 8 deletions(-) diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml index 0904769dd60..4b262f13d09 100644 --- a/.github/workflows/codeql-analysis.yml +++ b/.github/workflows/codeql-analysis.yml @@ -48,10 +48,15 @@ jobs: with: # required to pick up tags for BUILD_VERSION fetch-depth: 0 + - name: "Set up Go" + uses: actions/setup-go@v5 + with: + go-version: "1.21.6" + cache-dependency-path: "**/go.sum" # Initializes the CodeQL tools for scanning. - name: Initialize CodeQL - uses: github/codeql-action/init@v2 + uses: github/codeql-action/init@v3 with: languages: ${{ matrix.language }} # If you wish to specify custom queries, you can do so here or in a config file. @@ -71,14 +76,8 @@ jobs: # and modify them (or add more) to build your code if your project # uses a compiled language - - name: "Set up Go" - uses: actions/setup-go@v5 - with: - go-version: "1.21.6" - cache-dependency-path: "**/go.sum" - - run: | make clean build BUILD_RE2_WASM=1 - name: Perform CodeQL Analysis - uses: github/codeql-action/analyze@v2 + uses: github/codeql-action/analyze@v3 From eada3739e6849cf6da085dfa4862dcbfad4deb10 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Mon, 12 Feb 2024 11:40:59 +0100 Subject: [PATCH 022/581] refact "cscli notifications" (#2833) --- cmd/crowdsec-cli/main.go | 2 +- cmd/crowdsec-cli/notifications.go | 155 ++++++++++++++++++------------ 2 files changed, 95 insertions(+), 62 deletions(-) diff --git a/cmd/crowdsec-cli/main.go b/cmd/crowdsec-cli/main.go index 43998623566..63b7211b39b 100644 --- a/cmd/crowdsec-cli/main.go +++ b/cmd/crowdsec-cli/main.go @@ -246,7 +246,7 @@ It is meant to allow you to manage bans, parsers/scenarios/etc, api and generall cmd.AddCommand(NewConsoleCmd()) cmd.AddCommand(NewCLIExplain(cli.cfg).NewCommand()) cmd.AddCommand(NewCLIHubTest().NewCommand()) - cmd.AddCommand(NewCLINotifications().NewCommand()) + cmd.AddCommand(NewCLINotifications(cli.cfg).NewCommand()) cmd.AddCommand(NewCLISupport().NewCommand()) cmd.AddCommand(NewCLIPapi(cli.cfg).NewCommand()) cmd.AddCommand(NewCLICollection().NewCommand()) diff --git a/cmd/crowdsec-cli/notifications.go b/cmd/crowdsec-cli/notifications.go index da436420d12..f12333a3942 100644 --- a/cmd/crowdsec-cli/notifications.go +++ b/cmd/crowdsec-cli/notifications.go @@ -23,14 +23,13 @@ import ( "github.com/crowdsecurity/go-cs-lib/ptr" "github.com/crowdsecurity/go-cs-lib/version" + "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/require" "github.com/crowdsecurity/crowdsec/pkg/apiclient" "github.com/crowdsecurity/crowdsec/pkg/csconfig" "github.com/crowdsecurity/crowdsec/pkg/csplugin" "github.com/crowdsecurity/crowdsec/pkg/csprofiles" - "github.com/crowdsecurity/crowdsec/pkg/types" - - "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/require" "github.com/crowdsecurity/crowdsec/pkg/models" + "github.com/crowdsecurity/crowdsec/pkg/types" ) type NotificationsCfg struct { @@ -39,13 +38,17 @@ type NotificationsCfg struct { ids []uint } -type cliNotifications struct{} +type cliNotifications struct { + cfg configGetter +} -func NewCLINotifications() *cliNotifications { - return &cliNotifications{} +func NewCLINotifications(cfg configGetter) *cliNotifications { + return &cliNotifications{ + cfg: cfg, + } } -func (cli cliNotifications) NewCommand() *cobra.Command { +func (cli *cliNotifications) NewCommand() *cobra.Command { cmd := &cobra.Command{ Use: "notifications [action]", Short: "Helper for notification plugin configuration", @@ -53,14 +56,15 @@ func (cli cliNotifications) NewCommand() *cobra.Command { Args: cobra.MinimumNArgs(1), Aliases: []string{"notifications", "notification"}, DisableAutoGenTag: true, - PersistentPreRunE: func(cmd *cobra.Command, args []string) error { - if err := require.LAPI(csConfig); err != nil { + PersistentPreRunE: func(_ *cobra.Command, _ []string) error { + cfg := cli.cfg() + if err := require.LAPI(cfg); err != nil { return err } - if err := csConfig.LoadAPIClient(); err != nil { + if err := cfg.LoadAPIClient(); err != nil { return fmt.Errorf("loading api client: %w", err) } - if err := require.Notifications(csConfig); err != nil { + if err := require.Notifications(cfg); err != nil { return err } @@ -76,67 +80,79 @@ func (cli cliNotifications) NewCommand() *cobra.Command { return cmd } -func getPluginConfigs() (map[string]csplugin.PluginConfig, error) { +func (cli *cliNotifications) getPluginConfigs() (map[string]csplugin.PluginConfig, error) { + cfg := cli.cfg() pcfgs := map[string]csplugin.PluginConfig{} wf := func(path string, info fs.FileInfo, err error) error { if info == nil { return fmt.Errorf("error while traversing directory %s: %w", path, err) } - name := filepath.Join(csConfig.ConfigPaths.NotificationDir, info.Name()) //Avoid calling info.Name() twice + + name := filepath.Join(cfg.ConfigPaths.NotificationDir, info.Name()) //Avoid calling info.Name() twice if (strings.HasSuffix(name, "yaml") || strings.HasSuffix(name, "yml")) && !(info.IsDir()) { ts, err := csplugin.ParsePluginConfigFile(name) if err != nil { return fmt.Errorf("loading notifification plugin configuration with %s: %w", name, err) } + for _, t := range ts { csplugin.SetRequiredFields(&t) pcfgs[t.Name] = t } } + return nil } - if err := filepath.Walk(csConfig.ConfigPaths.NotificationDir, wf); err != nil { + if err := filepath.Walk(cfg.ConfigPaths.NotificationDir, wf); err != nil { return nil, fmt.Errorf("while loading notifification plugin configuration: %w", err) } + return pcfgs, nil } -func getProfilesConfigs() (map[string]NotificationsCfg, error) { +func (cli *cliNotifications) getProfilesConfigs() (map[string]NotificationsCfg, error) { + cfg := cli.cfg() // A bit of a tricky stuf now: reconcile profiles and notification plugins - pcfgs, err := getPluginConfigs() + pcfgs, err := cli.getPluginConfigs() if err != nil { return nil, err } + ncfgs := map[string]NotificationsCfg{} for _, pc := range pcfgs { ncfgs[pc.Name] = NotificationsCfg{ Config: pc, } } - profiles, err := csprofiles.NewProfile(csConfig.API.Server.Profiles) + + profiles, err := csprofiles.NewProfile(cfg.API.Server.Profiles) if err != nil { return nil, fmt.Errorf("while extracting profiles from configuration: %w", err) } + for profileID, profile := range profiles { for _, notif := range profile.Cfg.Notifications { pc, ok := pcfgs[notif] if !ok { return nil, fmt.Errorf("notification plugin '%s' does not exist", notif) } + tmp, ok := ncfgs[pc.Name] if !ok { return nil, fmt.Errorf("notification plugin '%s' does not exist", pc.Name) } + tmp.Profiles = append(tmp.Profiles, profile.Cfg) tmp.ids = append(tmp.ids, uint(profileID)) ncfgs[pc.Name] = tmp } } + return ncfgs, nil } -func (cli cliNotifications) NewListCmd() *cobra.Command { +func (cli *cliNotifications) NewListCmd() *cobra.Command { cmd := &cobra.Command{ Use: "list", Short: "list active notifications plugins", @@ -144,21 +160,22 @@ func (cli cliNotifications) NewListCmd() *cobra.Command { Example: `cscli notifications list`, Args: cobra.ExactArgs(0), DisableAutoGenTag: true, - RunE: func(cmd *cobra.Command, arg []string) error { - ncfgs, err := getProfilesConfigs() + RunE: func(_ *cobra.Command, _ []string) error { + cfg := cli.cfg() + ncfgs, err := cli.getProfilesConfigs() if err != nil { return fmt.Errorf("can't build profiles configuration: %w", err) } - if csConfig.Cscli.Output == "human" { + if cfg.Cscli.Output == "human" { notificationListTable(color.Output, ncfgs) - } else if csConfig.Cscli.Output == "json" { + } else if cfg.Cscli.Output == "json" { x, err := json.MarshalIndent(ncfgs, "", " ") if err != nil { return fmt.Errorf("failed to marshal notification configuration: %w", err) } fmt.Printf("%s", string(x)) - } else if csConfig.Cscli.Output == "raw" { + } else if cfg.Cscli.Output == "raw" { csvwriter := csv.NewWriter(os.Stdout) err := csvwriter.Write([]string{"Name", "Type", "Profile name"}) if err != nil { @@ -176,6 +193,7 @@ func (cli cliNotifications) NewListCmd() *cobra.Command { } csvwriter.Flush() } + return nil }, } @@ -183,7 +201,7 @@ func (cli cliNotifications) NewListCmd() *cobra.Command { return cmd } -func (cli cliNotifications) NewInspectCmd() *cobra.Command { +func (cli *cliNotifications) NewInspectCmd() *cobra.Command { cmd := &cobra.Command{ Use: "inspect", Short: "Inspect active notifications plugin configuration", @@ -191,36 +209,32 @@ func (cli cliNotifications) NewInspectCmd() *cobra.Command { Example: `cscli notifications inspect `, Args: cobra.ExactArgs(1), DisableAutoGenTag: true, - PreRunE: func(cmd *cobra.Command, args []string) error { - if args[0] == "" { - return fmt.Errorf("please provide a plugin name to inspect") - } - return nil - }, - RunE: func(cmd *cobra.Command, args []string) error { - ncfgs, err := getProfilesConfigs() + RunE: func(_ *cobra.Command, args []string) error { + cfg := cli.cfg() + ncfgs, err := cli.getProfilesConfigs() if err != nil { return fmt.Errorf("can't build profiles configuration: %w", err) } - cfg, ok := ncfgs[args[0]] + ncfg, ok := ncfgs[args[0]] if !ok { return fmt.Errorf("plugin '%s' does not exist or is not active", args[0]) } - if csConfig.Cscli.Output == "human" || csConfig.Cscli.Output == "raw" { - fmt.Printf(" - %15s: %15s\n", "Type", cfg.Config.Type) - fmt.Printf(" - %15s: %15s\n", "Name", cfg.Config.Name) - fmt.Printf(" - %15s: %15s\n", "Timeout", cfg.Config.TimeOut) - fmt.Printf(" - %15s: %15s\n", "Format", cfg.Config.Format) - for k, v := range cfg.Config.Config { + if cfg.Cscli.Output == "human" || cfg.Cscli.Output == "raw" { + fmt.Printf(" - %15s: %15s\n", "Type", ncfg.Config.Type) + fmt.Printf(" - %15s: %15s\n", "Name", ncfg.Config.Name) + fmt.Printf(" - %15s: %15s\n", "Timeout", ncfg.Config.TimeOut) + fmt.Printf(" - %15s: %15s\n", "Format", ncfg.Config.Format) + for k, v := range ncfg.Config.Config { fmt.Printf(" - %15s: %15v\n", k, v) } - } else if csConfig.Cscli.Output == "json" { + } else if cfg.Cscli.Output == "json" { x, err := json.MarshalIndent(cfg, "", " ") if err != nil { return fmt.Errorf("failed to marshal notification configuration: %w", err) } fmt.Printf("%s", string(x)) } + return nil }, } @@ -228,12 +242,13 @@ func (cli cliNotifications) NewInspectCmd() *cobra.Command { return cmd } -func (cli cliNotifications) NewTestCmd() *cobra.Command { +func (cli *cliNotifications) NewTestCmd() *cobra.Command { var ( pluginBroker csplugin.PluginBroker pluginTomb tomb.Tomb alertOverride string ) + cmd := &cobra.Command{ Use: "test [plugin name]", Short: "send a generic test alert to notification plugin", @@ -241,25 +256,26 @@ func (cli cliNotifications) NewTestCmd() *cobra.Command { Example: `cscli notifications test [plugin_name]`, Args: cobra.ExactArgs(1), DisableAutoGenTag: true, - PreRunE: func(cmd *cobra.Command, args []string) error { - pconfigs, err := getPluginConfigs() + PreRunE: func(_ *cobra.Command, args []string) error { + cfg := cli.cfg() + pconfigs, err := cli.getPluginConfigs() if err != nil { return fmt.Errorf("can't build profiles configuration: %w", err) } - cfg, ok := pconfigs[args[0]] + pcfg, ok := pconfigs[args[0]] if !ok { return fmt.Errorf("plugin name: '%s' does not exist", args[0]) } //Create a single profile with plugin name as notification name - return pluginBroker.Init(csConfig.PluginConfig, []*csconfig.ProfileCfg{ + return pluginBroker.Init(cfg.PluginConfig, []*csconfig.ProfileCfg{ { Notifications: []string{ - cfg.Name, + pcfg.Name, }, }, - }, csConfig.ConfigPaths) + }, cfg.ConfigPaths) }, - RunE: func(cmd *cobra.Command, args []string) error { + RunE: func(_ *cobra.Command, _ []string) error { pluginTomb.Go(func() error { pluginBroker.Run(&pluginTomb) return nil @@ -298,13 +314,16 @@ func (cli cliNotifications) NewTestCmd() *cobra.Command { if err := yaml.Unmarshal([]byte(alertOverride), alert); err != nil { return fmt.Errorf("failed to unmarshal alert override: %w", err) } + pluginBroker.PluginChannel <- csplugin.ProfileAlert{ ProfileID: uint(0), Alert: alert, } + //time.Sleep(2 * time.Second) // There's no mechanism to ensure notification has been sent pluginTomb.Kill(fmt.Errorf("terminating")) pluginTomb.Wait() + return nil }, } @@ -313,9 +332,11 @@ func (cli cliNotifications) NewTestCmd() *cobra.Command { return cmd } -func (cli cliNotifications) NewReinjectCmd() *cobra.Command { - var alertOverride string - var alert *models.Alert +func (cli *cliNotifications) NewReinjectCmd() *cobra.Command { + var ( + alertOverride string + alert *models.Alert + ) cmd := &cobra.Command{ Use: "reinject", @@ -328,25 +349,30 @@ cscli notifications reinject -a '{"remediation": true,"scenario":"not `, Args: cobra.ExactArgs(1), DisableAutoGenTag: true, - PreRunE: func(cmd *cobra.Command, args []string) error { + PreRunE: func(_ *cobra.Command, args []string) error { var err error - alert, err = FetchAlertFromArgString(args[0]) + alert, err = cli.fetchAlertFromArgString(args[0]) if err != nil { return err } + return nil }, - RunE: func(cmd *cobra.Command, args []string) error { + RunE: func(_ *cobra.Command, _ []string) error { var ( pluginBroker csplugin.PluginBroker pluginTomb tomb.Tomb ) + + cfg := cli.cfg() + if alertOverride != "" { if err := json.Unmarshal([]byte(alertOverride), alert); err != nil { return fmt.Errorf("can't unmarshal data in the alert flag: %w", err) } } - err := pluginBroker.Init(csConfig.PluginConfig, csConfig.API.Server.Profiles, csConfig.ConfigPaths) + + err := pluginBroker.Init(cfg.PluginConfig, cfg.API.Server.Profiles, cfg.ConfigPaths) if err != nil { return fmt.Errorf("can't initialize plugins: %w", err) } @@ -356,7 +382,7 @@ cscli notifications reinject -a '{"remediation": true,"scenario":"not return nil }) - profiles, err := csprofiles.NewProfile(csConfig.API.Server.Profiles) + profiles, err := csprofiles.NewProfile(cfg.API.Server.Profiles) if err != nil { return fmt.Errorf("cannot extract profiles from configuration: %w", err) } @@ -382,9 +408,9 @@ cscli notifications reinject -a '{"remediation": true,"scenario":"not default: time.Sleep(50 * time.Millisecond) log.Info("sleeping\n") - } } + if profile.Cfg.OnSuccess == "break" { log.Infof("The profile %s contains a 'on_success: break' so bailing out", profile.Cfg.Name) break @@ -393,6 +419,7 @@ cscli notifications reinject -a '{"remediation": true,"scenario":"not //time.Sleep(2 * time.Second) // There's no mechanism to ensure notification has been sent pluginTomb.Kill(fmt.Errorf("terminating")) pluginTomb.Wait() + return nil }, } @@ -401,18 +428,22 @@ cscli notifications reinject -a '{"remediation": true,"scenario":"not return cmd } -func FetchAlertFromArgString(toParse string) (*models.Alert, error) { +func (cli *cliNotifications) fetchAlertFromArgString(toParse string) (*models.Alert, error) { + cfg := cli.cfg() + id, err := strconv.Atoi(toParse) if err != nil { return nil, fmt.Errorf("bad alert id %s", toParse) } - apiURL, err := url.Parse(csConfig.API.Client.Credentials.URL) + + apiURL, err := url.Parse(cfg.API.Client.Credentials.URL) if err != nil { return nil, fmt.Errorf("error parsing the URL of the API: %w", err) } + client, err := apiclient.NewClient(&apiclient.Config{ - MachineID: csConfig.API.Client.Credentials.Login, - Password: strfmt.Password(csConfig.API.Client.Credentials.Password), + MachineID: cfg.API.Client.Credentials.Login, + Password: strfmt.Password(cfg.API.Client.Credentials.Password), UserAgent: fmt.Sprintf("crowdsec/%s", version.String()), URL: apiURL, VersionPrefix: "v1", @@ -420,9 +451,11 @@ func FetchAlertFromArgString(toParse string) (*models.Alert, error) { if err != nil { return nil, fmt.Errorf("error creating the client for the API: %w", err) } + alert, _, err := client.Alerts.GetByID(context.Background(), id) if err != nil { return nil, fmt.Errorf("can't find alert with id %d: %w", id, err) } + return alert, nil } From a6a4d460d7069a67369906fbe4447eed601b4942 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Mon, 12 Feb 2024 11:45:58 +0100 Subject: [PATCH 023/581] refact "cscli console" (#2834) --- cmd/crowdsec-cli/console.go | 253 ++++++++++++++++++------------ cmd/crowdsec-cli/console_table.go | 14 +- cmd/crowdsec-cli/main.go | 2 +- 3 files changed, 160 insertions(+), 109 deletions(-) diff --git a/cmd/crowdsec-cli/console.go b/cmd/crowdsec-cli/console.go index dcd6fb37f62..b1912825c06 100644 --- a/cmd/crowdsec-cli/console.go +++ b/cmd/crowdsec-cli/console.go @@ -25,32 +25,53 @@ import ( "github.com/crowdsecurity/crowdsec/pkg/types" ) -func NewConsoleCmd() *cobra.Command { - var cmdConsole = &cobra.Command{ +type cliConsole struct { + cfg configGetter +} + +func NewCLIConsole(cfg configGetter) *cliConsole { + return &cliConsole{ + cfg: cfg, + } +} + +func (cli *cliConsole) NewCommand() *cobra.Command { + var cmd = &cobra.Command{ Use: "console [action]", Short: "Manage interaction with Crowdsec console (https://app.crowdsec.net)", Args: cobra.MinimumNArgs(1), DisableAutoGenTag: true, - PersistentPreRunE: func(cmd *cobra.Command, args []string) error { - if err := require.LAPI(csConfig); err != nil { + PersistentPreRunE: func(_ *cobra.Command, _ []string) error { + cfg := cli.cfg() + if err := require.LAPI(cfg); err != nil { return err } - if err := require.CAPI(csConfig); err != nil { + if err := require.CAPI(cfg); err != nil { return err } - if err := require.CAPIRegistered(csConfig); err != nil { + if err := require.CAPIRegistered(cfg); err != nil { return err } + return nil }, } + cmd.AddCommand(cli.newEnrollCmd()) + cmd.AddCommand(cli.newEnableCmd()) + cmd.AddCommand(cli.newDisableCmd()) + cmd.AddCommand(cli.newStatusCmd()) + + return cmd +} + +func (cli *cliConsole) newEnrollCmd() *cobra.Command { name := "" overwrite := false tags := []string{} opts := []string{} - cmdEnroll := &cobra.Command{ + cmd := &cobra.Command{ Use: "enroll [enroll-key]", Short: "Enroll this instance to https://app.crowdsec.net [requires local API]", Long: ` @@ -66,96 +87,107 @@ After running this command your will need to validate the enrollment in the weba valid options are : %s,all (see 'cscli console status' for details)`, strings.Join(csconfig.CONSOLE_CONFIGS, ",")), Args: cobra.ExactArgs(1), DisableAutoGenTag: true, - RunE: func(cmd *cobra.Command, args []string) error { - password := strfmt.Password(csConfig.API.Server.OnlineClient.Credentials.Password) - apiURL, err := url.Parse(csConfig.API.Server.OnlineClient.Credentials.URL) + RunE: func(_ *cobra.Command, args []string) error { + cfg := cli.cfg() + password := strfmt.Password(cfg.API.Server.OnlineClient.Credentials.Password) + + apiURL, err := url.Parse(cfg.API.Server.OnlineClient.Credentials.URL) if err != nil { - return fmt.Errorf("could not parse CAPI URL: %s", err) + return fmt.Errorf("could not parse CAPI URL: %w", err) } - hub, err := require.Hub(csConfig, nil, nil) + hub, err := require.Hub(cfg, nil, nil) if err != nil { return err } scenarios, err := hub.GetInstalledItemNames(cwhub.SCENARIOS) if err != nil { - return fmt.Errorf("failed to get installed scenarios: %s", err) + return fmt.Errorf("failed to get installed scenarios: %w", err) } if len(scenarios) == 0 { scenarios = make([]string, 0) } - enable_opts := []string{csconfig.SEND_MANUAL_SCENARIOS, csconfig.SEND_TAINTED_SCENARIOS} + enableOpts := []string{csconfig.SEND_MANUAL_SCENARIOS, csconfig.SEND_TAINTED_SCENARIOS} if len(opts) != 0 { for _, opt := range opts { valid := false if opt == "all" { - enable_opts = csconfig.CONSOLE_CONFIGS + enableOpts = csconfig.CONSOLE_CONFIGS break } - for _, available_opt := range csconfig.CONSOLE_CONFIGS { - if opt == available_opt { + for _, availableOpt := range csconfig.CONSOLE_CONFIGS { + if opt == availableOpt { valid = true enable := true - for _, enabled_opt := range enable_opts { - if opt == enabled_opt { + for _, enabledOpt := range enableOpts { + if opt == enabledOpt { enable = false continue } } if enable { - enable_opts = append(enable_opts, opt) + enableOpts = append(enableOpts, opt) } + break } } if !valid { return fmt.Errorf("option %s doesn't exist", opt) - } } } c, _ := apiclient.NewClient(&apiclient.Config{ - MachineID: csConfig.API.Server.OnlineClient.Credentials.Login, + MachineID: cli.cfg().API.Server.OnlineClient.Credentials.Login, Password: password, Scenarios: scenarios, UserAgent: fmt.Sprintf("crowdsec/%s", version.String()), URL: apiURL, VersionPrefix: "v3", }) + resp, err := c.Auth.EnrollWatcher(context.Background(), args[0], name, tags, overwrite) if err != nil { - return fmt.Errorf("could not enroll instance: %s", err) + return fmt.Errorf("could not enroll instance: %w", err) } + if resp.Response.StatusCode == 200 && !overwrite { log.Warning("Instance already enrolled. You can use '--overwrite' to force enroll") return nil } - if err := SetConsoleOpts(enable_opts, true); err != nil { + if err := cli.setConsoleOpts(enableOpts, true); err != nil { return err } - for _, opt := range enable_opts { + for _, opt := range enableOpts { log.Infof("Enabled %s : %s", opt, csconfig.CONSOLE_CONFIGS_HELP[opt]) } + log.Info("Watcher successfully enrolled. Visit https://app.crowdsec.net to accept it.") log.Info("Please restart crowdsec after accepting the enrollment.") + return nil }, } - cmdEnroll.Flags().StringVarP(&name, "name", "n", "", "Name to display in the console") - cmdEnroll.Flags().BoolVarP(&overwrite, "overwrite", "", false, "Force enroll the instance") - cmdEnroll.Flags().StringSliceVarP(&tags, "tags", "t", tags, "Tags to display in the console") - cmdEnroll.Flags().StringSliceVarP(&opts, "enable", "e", opts, "Enable console options") - cmdConsole.AddCommand(cmdEnroll) - var enableAll, disableAll bool + flags := cmd.Flags() + flags.StringVarP(&name, "name", "n", "", "Name to display in the console") + flags.BoolVarP(&overwrite, "overwrite", "", false, "Force enroll the instance") + flags.StringSliceVarP(&tags, "tags", "t", tags, "Tags to display in the console") + flags.StringSliceVarP(&opts, "enable", "e", opts, "Enable console options") + + return cmd +} + +func (cli *cliConsole) newEnableCmd() *cobra.Command { + var enableAll bool - cmdEnable := &cobra.Command{ + cmd := &cobra.Command{ Use: "enable [option]", Short: "Enable a console option", Example: "sudo cscli console enable tainted", @@ -163,9 +195,9 @@ After running this command your will need to validate the enrollment in the weba Enable given information push to the central API. Allows to empower the console`, ValidArgs: csconfig.CONSOLE_CONFIGS, DisableAutoGenTag: true, - RunE: func(cmd *cobra.Command, args []string) error { + RunE: func(_ *cobra.Command, args []string) error { if enableAll { - if err := SetConsoleOpts(csconfig.CONSOLE_CONFIGS, true); err != nil { + if err := cli.setConsoleOpts(csconfig.CONSOLE_CONFIGS, true); err != nil { return err } log.Infof("All features have been enabled successfully") @@ -173,19 +205,26 @@ Enable given information push to the central API. Allows to empower the console` if len(args) == 0 { return fmt.Errorf("you must specify at least one feature to enable") } - if err := SetConsoleOpts(args, true); err != nil { + if err := cli.setConsoleOpts(args, true); err != nil { return err } log.Infof("%v have been enabled", args) } + log.Infof(ReloadMessage()) + return nil }, } - cmdEnable.Flags().BoolVarP(&enableAll, "all", "a", false, "Enable all console options") - cmdConsole.AddCommand(cmdEnable) + cmd.Flags().BoolVarP(&enableAll, "all", "a", false, "Enable all console options") + + return cmd +} - cmdDisable := &cobra.Command{ +func (cli *cliConsole) newDisableCmd() *cobra.Command { + var disableAll bool + + cmd := &cobra.Command{ Use: "disable [option]", Short: "Disable a console option", Example: "sudo cscli console disable tainted", @@ -193,47 +232,52 @@ Enable given information push to the central API. Allows to empower the console` Disable given information push to the central API.`, ValidArgs: csconfig.CONSOLE_CONFIGS, DisableAutoGenTag: true, - RunE: func(cmd *cobra.Command, args []string) error { + RunE: func(_ *cobra.Command, args []string) error { if disableAll { - if err := SetConsoleOpts(csconfig.CONSOLE_CONFIGS, false); err != nil { + if err := cli.setConsoleOpts(csconfig.CONSOLE_CONFIGS, false); err != nil { return err } log.Infof("All features have been disabled") } else { - if err := SetConsoleOpts(args, false); err != nil { + if err := cli.setConsoleOpts(args, false); err != nil { return err } log.Infof("%v have been disabled", args) } log.Infof(ReloadMessage()) + return nil }, } - cmdDisable.Flags().BoolVarP(&disableAll, "all", "a", false, "Disable all console options") - cmdConsole.AddCommand(cmdDisable) + cmd.Flags().BoolVarP(&disableAll, "all", "a", false, "Disable all console options") + + return cmd +} - cmdConsoleStatus := &cobra.Command{ +func (cli *cliConsole) newStatusCmd() *cobra.Command { + cmd := &cobra.Command{ Use: "status", Short: "Shows status of the console options", Example: `sudo cscli console status`, DisableAutoGenTag: true, - RunE: func(cmd *cobra.Command, args []string) error { - switch csConfig.Cscli.Output { + RunE: func(_ *cobra.Command, _ []string) error { + cfg := cli.cfg() + consoleCfg := cfg.API.Server.ConsoleConfig + switch cfg.Cscli.Output { case "human": - cmdConsoleStatusTable(color.Output, *csConfig) + cmdConsoleStatusTable(color.Output, *consoleCfg) case "json": - c := csConfig.API.Server.ConsoleConfig out := map[string](*bool){ - csconfig.SEND_MANUAL_SCENARIOS: c.ShareManualDecisions, - csconfig.SEND_CUSTOM_SCENARIOS: c.ShareCustomScenarios, - csconfig.SEND_TAINTED_SCENARIOS: c.ShareTaintedScenarios, - csconfig.SEND_CONTEXT: c.ShareContext, - csconfig.CONSOLE_MANAGEMENT: c.ConsoleManagement, + csconfig.SEND_MANUAL_SCENARIOS: consoleCfg.ShareManualDecisions, + csconfig.SEND_CUSTOM_SCENARIOS: consoleCfg.ShareCustomScenarios, + csconfig.SEND_TAINTED_SCENARIOS: consoleCfg.ShareTaintedScenarios, + csconfig.SEND_CONTEXT: consoleCfg.ShareContext, + csconfig.CONSOLE_MANAGEMENT: consoleCfg.ConsoleManagement, } data, err := json.MarshalIndent(out, "", " ") if err != nil { - return fmt.Errorf("failed to marshal configuration: %s", err) + return fmt.Errorf("failed to marshal configuration: %w", err) } fmt.Println(string(data)) case "raw": @@ -244,11 +288,11 @@ Disable given information push to the central API.`, } rows := [][]string{ - {csconfig.SEND_MANUAL_SCENARIOS, fmt.Sprintf("%t", *csConfig.API.Server.ConsoleConfig.ShareManualDecisions)}, - {csconfig.SEND_CUSTOM_SCENARIOS, fmt.Sprintf("%t", *csConfig.API.Server.ConsoleConfig.ShareCustomScenarios)}, - {csconfig.SEND_TAINTED_SCENARIOS, fmt.Sprintf("%t", *csConfig.API.Server.ConsoleConfig.ShareTaintedScenarios)}, - {csconfig.SEND_CONTEXT, fmt.Sprintf("%t", *csConfig.API.Server.ConsoleConfig.ShareContext)}, - {csconfig.CONSOLE_MANAGEMENT, fmt.Sprintf("%t", *csConfig.API.Server.ConsoleConfig.ConsoleManagement)}, + {csconfig.SEND_MANUAL_SCENARIOS, fmt.Sprintf("%t", *consoleCfg.ShareManualDecisions)}, + {csconfig.SEND_CUSTOM_SCENARIOS, fmt.Sprintf("%t", *consoleCfg.ShareCustomScenarios)}, + {csconfig.SEND_TAINTED_SCENARIOS, fmt.Sprintf("%t", *consoleCfg.ShareTaintedScenarios)}, + {csconfig.SEND_CONTEXT, fmt.Sprintf("%t", *consoleCfg.ShareContext)}, + {csconfig.CONSOLE_MANAGEMENT, fmt.Sprintf("%t", *consoleCfg.ConsoleManagement)}, } for _, row := range rows { err = csvwriter.Write(row) @@ -258,132 +302,137 @@ Disable given information push to the central API.`, } csvwriter.Flush() } + return nil }, } - cmdConsole.AddCommand(cmdConsoleStatus) - return cmdConsole + return cmd } -func dumpConsoleConfig(c *csconfig.LocalApiServerCfg) error { - out, err := yaml.Marshal(c.ConsoleConfig) +func (cli *cliConsole) dumpConfig() error { + serverCfg := cli.cfg().API.Server + + out, err := yaml.Marshal(serverCfg.ConsoleConfig) if err != nil { - return fmt.Errorf("while marshaling ConsoleConfig (for %s): %w", c.ConsoleConfigPath, err) + return fmt.Errorf("while marshaling ConsoleConfig (for %s): %w", serverCfg.ConsoleConfigPath, err) } - if c.ConsoleConfigPath == "" { - c.ConsoleConfigPath = csconfig.DefaultConsoleConfigFilePath - log.Debugf("Empty console_path, defaulting to %s", c.ConsoleConfigPath) + if serverCfg.ConsoleConfigPath == "" { + serverCfg.ConsoleConfigPath = csconfig.DefaultConsoleConfigFilePath + log.Debugf("Empty console_path, defaulting to %s", serverCfg.ConsoleConfigPath) } - if err := os.WriteFile(c.ConsoleConfigPath, out, 0o600); err != nil { - return fmt.Errorf("while dumping console config to %s: %w", c.ConsoleConfigPath, err) + if err := os.WriteFile(serverCfg.ConsoleConfigPath, out, 0o600); err != nil { + return fmt.Errorf("while dumping console config to %s: %w", serverCfg.ConsoleConfigPath, err) } return nil } -func SetConsoleOpts(args []string, wanted bool) error { +func (cli *cliConsole) setConsoleOpts(args []string, wanted bool) error { + cfg := cli.cfg() + consoleCfg := cfg.API.Server.ConsoleConfig + for _, arg := range args { switch arg { case csconfig.CONSOLE_MANAGEMENT: /*for each flag check if it's already set before setting it*/ - if csConfig.API.Server.ConsoleConfig.ConsoleManagement != nil { - if *csConfig.API.Server.ConsoleConfig.ConsoleManagement == wanted { + if consoleCfg.ConsoleManagement != nil { + if *consoleCfg.ConsoleManagement == wanted { log.Debugf("%s already set to %t", csconfig.CONSOLE_MANAGEMENT, wanted) } else { log.Infof("%s set to %t", csconfig.CONSOLE_MANAGEMENT, wanted) - *csConfig.API.Server.ConsoleConfig.ConsoleManagement = wanted + *consoleCfg.ConsoleManagement = wanted } } else { log.Infof("%s set to %t", csconfig.CONSOLE_MANAGEMENT, wanted) - csConfig.API.Server.ConsoleConfig.ConsoleManagement = ptr.Of(wanted) + consoleCfg.ConsoleManagement = ptr.Of(wanted) } - if csConfig.API.Server.OnlineClient.Credentials != nil { + if cfg.API.Server.OnlineClient.Credentials != nil { changed := false - if wanted && csConfig.API.Server.OnlineClient.Credentials.PapiURL == "" { + if wanted && cfg.API.Server.OnlineClient.Credentials.PapiURL == "" { changed = true - csConfig.API.Server.OnlineClient.Credentials.PapiURL = types.PAPIBaseURL - } else if !wanted && csConfig.API.Server.OnlineClient.Credentials.PapiURL != "" { + cfg.API.Server.OnlineClient.Credentials.PapiURL = types.PAPIBaseURL + } else if !wanted && cfg.API.Server.OnlineClient.Credentials.PapiURL != "" { changed = true - csConfig.API.Server.OnlineClient.Credentials.PapiURL = "" + cfg.API.Server.OnlineClient.Credentials.PapiURL = "" } if changed { - fileContent, err := yaml.Marshal(csConfig.API.Server.OnlineClient.Credentials) + fileContent, err := yaml.Marshal(cfg.API.Server.OnlineClient.Credentials) if err != nil { - return fmt.Errorf("cannot marshal credentials: %s", err) + return fmt.Errorf("cannot marshal credentials: %w", err) } - log.Infof("Updating credentials file: %s", csConfig.API.Server.OnlineClient.CredentialsFilePath) + log.Infof("Updating credentials file: %s", cfg.API.Server.OnlineClient.CredentialsFilePath) - err = os.WriteFile(csConfig.API.Server.OnlineClient.CredentialsFilePath, fileContent, 0o600) + err = os.WriteFile(cfg.API.Server.OnlineClient.CredentialsFilePath, fileContent, 0o600) if err != nil { - return fmt.Errorf("cannot write credentials file: %s", err) + return fmt.Errorf("cannot write credentials file: %w", err) } } } case csconfig.SEND_CUSTOM_SCENARIOS: /*for each flag check if it's already set before setting it*/ - if csConfig.API.Server.ConsoleConfig.ShareCustomScenarios != nil { - if *csConfig.API.Server.ConsoleConfig.ShareCustomScenarios == wanted { + if consoleCfg.ShareCustomScenarios != nil { + if *consoleCfg.ShareCustomScenarios == wanted { log.Debugf("%s already set to %t", csconfig.SEND_CUSTOM_SCENARIOS, wanted) } else { log.Infof("%s set to %t", csconfig.SEND_CUSTOM_SCENARIOS, wanted) - *csConfig.API.Server.ConsoleConfig.ShareCustomScenarios = wanted + *consoleCfg.ShareCustomScenarios = wanted } } else { log.Infof("%s set to %t", csconfig.SEND_CUSTOM_SCENARIOS, wanted) - csConfig.API.Server.ConsoleConfig.ShareCustomScenarios = ptr.Of(wanted) + consoleCfg.ShareCustomScenarios = ptr.Of(wanted) } case csconfig.SEND_TAINTED_SCENARIOS: /*for each flag check if it's already set before setting it*/ - if csConfig.API.Server.ConsoleConfig.ShareTaintedScenarios != nil { - if *csConfig.API.Server.ConsoleConfig.ShareTaintedScenarios == wanted { + if consoleCfg.ShareTaintedScenarios != nil { + if *consoleCfg.ShareTaintedScenarios == wanted { log.Debugf("%s already set to %t", csconfig.SEND_TAINTED_SCENARIOS, wanted) } else { log.Infof("%s set to %t", csconfig.SEND_TAINTED_SCENARIOS, wanted) - *csConfig.API.Server.ConsoleConfig.ShareTaintedScenarios = wanted + *consoleCfg.ShareTaintedScenarios = wanted } } else { log.Infof("%s set to %t", csconfig.SEND_TAINTED_SCENARIOS, wanted) - csConfig.API.Server.ConsoleConfig.ShareTaintedScenarios = ptr.Of(wanted) + consoleCfg.ShareTaintedScenarios = ptr.Of(wanted) } case csconfig.SEND_MANUAL_SCENARIOS: /*for each flag check if it's already set before setting it*/ - if csConfig.API.Server.ConsoleConfig.ShareManualDecisions != nil { - if *csConfig.API.Server.ConsoleConfig.ShareManualDecisions == wanted { + if consoleCfg.ShareManualDecisions != nil { + if *consoleCfg.ShareManualDecisions == wanted { log.Debugf("%s already set to %t", csconfig.SEND_MANUAL_SCENARIOS, wanted) } else { log.Infof("%s set to %t", csconfig.SEND_MANUAL_SCENARIOS, wanted) - *csConfig.API.Server.ConsoleConfig.ShareManualDecisions = wanted + *consoleCfg.ShareManualDecisions = wanted } } else { log.Infof("%s set to %t", csconfig.SEND_MANUAL_SCENARIOS, wanted) - csConfig.API.Server.ConsoleConfig.ShareManualDecisions = ptr.Of(wanted) + consoleCfg.ShareManualDecisions = ptr.Of(wanted) } case csconfig.SEND_CONTEXT: /*for each flag check if it's already set before setting it*/ - if csConfig.API.Server.ConsoleConfig.ShareContext != nil { - if *csConfig.API.Server.ConsoleConfig.ShareContext == wanted { + if consoleCfg.ShareContext != nil { + if *consoleCfg.ShareContext == wanted { log.Debugf("%s already set to %t", csconfig.SEND_CONTEXT, wanted) } else { log.Infof("%s set to %t", csconfig.SEND_CONTEXT, wanted) - *csConfig.API.Server.ConsoleConfig.ShareContext = wanted + *consoleCfg.ShareContext = wanted } } else { log.Infof("%s set to %t", csconfig.SEND_CONTEXT, wanted) - csConfig.API.Server.ConsoleConfig.ShareContext = ptr.Of(wanted) + consoleCfg.ShareContext = ptr.Of(wanted) } default: return fmt.Errorf("unknown flag %s", arg) } } - if err := dumpConsoleConfig(csConfig.API.Server); err != nil { - return fmt.Errorf("failed writing console config: %s", err) + if err := cli.dumpConfig(); err != nil { + return fmt.Errorf("failed writing console config: %w", err) } return nil diff --git a/cmd/crowdsec-cli/console_table.go b/cmd/crowdsec-cli/console_table.go index 2a221e36f07..e71ea8113fb 100644 --- a/cmd/crowdsec-cli/console_table.go +++ b/cmd/crowdsec-cli/console_table.go @@ -9,7 +9,7 @@ import ( "github.com/crowdsecurity/crowdsec/pkg/csconfig" ) -func cmdConsoleStatusTable(out io.Writer, csConfig csconfig.Config) { +func cmdConsoleStatusTable(out io.Writer, consoleCfg csconfig.ConsoleConfig) { t := newTable(out) t.SetRowLines(false) @@ -18,28 +18,30 @@ func cmdConsoleStatusTable(out io.Writer, csConfig csconfig.Config) { for _, option := range csconfig.CONSOLE_CONFIGS { activated := string(emoji.CrossMark) + switch option { case csconfig.SEND_CUSTOM_SCENARIOS: - if *csConfig.API.Server.ConsoleConfig.ShareCustomScenarios { + if *consoleCfg.ShareCustomScenarios { activated = string(emoji.CheckMarkButton) } case csconfig.SEND_MANUAL_SCENARIOS: - if *csConfig.API.Server.ConsoleConfig.ShareManualDecisions { + if *consoleCfg.ShareManualDecisions { activated = string(emoji.CheckMarkButton) } case csconfig.SEND_TAINTED_SCENARIOS: - if *csConfig.API.Server.ConsoleConfig.ShareTaintedScenarios { + if *consoleCfg.ShareTaintedScenarios { activated = string(emoji.CheckMarkButton) } case csconfig.SEND_CONTEXT: - if *csConfig.API.Server.ConsoleConfig.ShareContext { + if *consoleCfg.ShareContext { activated = string(emoji.CheckMarkButton) } case csconfig.CONSOLE_MANAGEMENT: - if *csConfig.API.Server.ConsoleConfig.ConsoleManagement { + if *consoleCfg.ConsoleManagement { activated = string(emoji.CheckMarkButton) } } + t.AddRow(option, activated, csconfig.CONSOLE_CONFIGS_HELP[option]) } diff --git a/cmd/crowdsec-cli/main.go b/cmd/crowdsec-cli/main.go index 63b7211b39b..27ac17d554f 100644 --- a/cmd/crowdsec-cli/main.go +++ b/cmd/crowdsec-cli/main.go @@ -243,7 +243,7 @@ It is meant to allow you to manage bans, parsers/scenarios/etc, api and generall cmd.AddCommand(NewCLICapi().NewCommand()) cmd.AddCommand(NewCLILapi(cli.cfg).NewCommand()) cmd.AddCommand(NewCompletionCmd()) - cmd.AddCommand(NewConsoleCmd()) + cmd.AddCommand(NewCLIConsole(cli.cfg).NewCommand()) cmd.AddCommand(NewCLIExplain(cli.cfg).NewCommand()) cmd.AddCommand(NewCLIHubTest().NewCommand()) cmd.AddCommand(NewCLINotifications(cli.cfg).NewCommand()) From 4561eb787be6e27693195807ba61181018aa6755 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Mon, 12 Feb 2024 20:15:16 +0100 Subject: [PATCH 024/581] bats: color formatter in CI (#2838) --- .github/workflows/bats-hub.yml | 5 +- .github/workflows/bats-mysql.yml | 5 +- .github/workflows/bats-postgres.yml | 5 +- .github/workflows/bats-sqlite-coverage.yml | 3 +- test/lib/color-formatter | 355 +++++++++++++++++++++ 5 files changed, 359 insertions(+), 14 deletions(-) create mode 100755 test/lib/color-formatter diff --git a/.github/workflows/bats-hub.yml b/.github/workflows/bats-hub.yml index fe45210ae96..7764da84812 100644 --- a/.github/workflows/bats-hub.yml +++ b/.github/workflows/bats-hub.yml @@ -8,9 +8,6 @@ on: GIST_BADGES_ID: required: true -env: - PREFIX_TEST_NAMES_WITH_FILE: true - jobs: build: strategy: @@ -50,7 +47,7 @@ jobs: - name: "Run hub tests" run: | ./test/bin/generate-hub-tests - ./test/run-tests test/dyn-bats/${{ matrix.test-file }} + ./test/run-tests ./test/dyn-bats/${{ matrix.test-file }} --formatter $(pwd)/test/lib/color-formatter - name: "Collect hub coverage" run: ./test/bin/collect-hub-coverage >> $GITHUB_ENV diff --git a/.github/workflows/bats-mysql.yml b/.github/workflows/bats-mysql.yml index 902c25ba329..243da6eb25d 100644 --- a/.github/workflows/bats-mysql.yml +++ b/.github/workflows/bats-mysql.yml @@ -7,9 +7,6 @@ on: required: true type: string -env: - PREFIX_TEST_NAMES_WITH_FILE: true - jobs: build: name: "Functional tests" @@ -58,7 +55,7 @@ jobs: MYSQL_USER: root - name: "Run tests" - run: make bats-test + run: ./test/run-tests ./test/bats --formatter $(pwd)/test/lib/color-formatter env: DB_BACKEND: mysql MYSQL_HOST: 127.0.0.1 diff --git a/.github/workflows/bats-postgres.yml b/.github/workflows/bats-postgres.yml index e15f1e410c1..07d3cd8d2f1 100644 --- a/.github/workflows/bats-postgres.yml +++ b/.github/workflows/bats-postgres.yml @@ -3,9 +3,6 @@ name: (sub) Bats / Postgres on: workflow_call: -env: - PREFIX_TEST_NAMES_WITH_FILE: true - jobs: build: name: "Functional tests" @@ -67,7 +64,7 @@ jobs: PGUSER: postgres - name: "Run tests (DB_BACKEND: pgx)" - run: make bats-test + run: ./test/run-tests ./test/bats --formatter $(pwd)/test/lib/color-formatter env: DB_BACKEND: pgx PGHOST: 127.0.0.1 diff --git a/.github/workflows/bats-sqlite-coverage.yml b/.github/workflows/bats-sqlite-coverage.yml index 36194555e1d..46a5dd8bc86 100644 --- a/.github/workflows/bats-sqlite-coverage.yml +++ b/.github/workflows/bats-sqlite-coverage.yml @@ -4,7 +4,6 @@ on: workflow_call: env: - PREFIX_TEST_NAMES_WITH_FILE: true TEST_COVERAGE: true jobs: @@ -42,7 +41,7 @@ jobs: make clean bats-build bats-fixture BUILD_STATIC=1 - name: "Run tests" - run: make bats-test + run: ./test/run-tests ./test/bats --formatter $(pwd)/test/lib/color-formatter - name: "Collect coverage data" run: | diff --git a/test/lib/color-formatter b/test/lib/color-formatter new file mode 100755 index 00000000000..aee8d750698 --- /dev/null +++ b/test/lib/color-formatter @@ -0,0 +1,355 @@ +#!/usr/bin/env bash + +# +# Taken from pretty formatter, minus the cursor movements. +# Used in gihtub workflows CI where color is allowed. +# + +set -e + +# shellcheck source=lib/bats-core/formatter.bash +source "$BATS_ROOT/lib/bats-core/formatter.bash" + +BASE_PATH=. +BATS_ENABLE_TIMING= + +while [[ "$#" -ne 0 ]]; do + case "$1" in + -T) + BATS_ENABLE_TIMING="-T" + ;; + --base-path) + shift + normalize_base_path BASE_PATH "$1" + ;; + esac + shift +done + +update_count_column_width() { + count_column_width=$((${#count} * 2 + 2)) + if [[ -n "$BATS_ENABLE_TIMING" ]]; then + # additional space for ' in %s sec' + count_column_width=$((count_column_width + ${#SECONDS} + 8)) + fi + # also update dependent value + update_count_column_left +} + +update_screen_width() { + screen_width="$(tput cols)" + # also update dependent value + update_count_column_left +} + +update_count_column_left() { + count_column_left=$((screen_width - count_column_width)) +} + +# avoid unset variables +count=0 +screen_width=80 +update_count_column_width +#update_screen_width +test_result= + +#trap update_screen_width WINCH + +begin() { + test_result= # reset to avoid carrying over result state from previous test + line_backoff_count=0 + #go_to_column 0 + #update_count_column_width + #buffer_with_truncation $((count_column_left - 1)) ' %s' "$name" + #clear_to_end_of_line + #go_to_column $count_column_left + #if [[ -n "$BATS_ENABLE_TIMING" ]]; then + # buffer "%${#count}s/${count} in %s sec" "$index" "$SECONDS" + #else + # buffer "%${#count}s/${count}" "$index" + #fi + #go_to_column 1 + buffer "%${#count}s" "$index" +} + +finish_test() { + #move_up $line_backoff_count + #go_to_column 0 + buffer "$@" + if [[ -n "${TIMEOUT-}" ]]; then + set_color 2 + if [[ -n "$BATS_ENABLE_TIMING" ]]; then + buffer ' [%s (timeout: %s)]' "$TIMING" "$TIMEOUT" + else + buffer ' [timeout: %s]' "$TIMEOUT" + fi + else + if [[ -n "$BATS_ENABLE_TIMING" ]]; then + set_color 2 + buffer ' [%s]' "$TIMING" + fi + fi + advance + move_down $((line_backoff_count - 1)) +} + +pass() { + local TIMING="${1:-}" + finish_test ' ✓ %s' "$name" + test_result=pass +} + +skip() { + local reason="$1" TIMING="${2:-}" + if [[ -n "$reason" ]]; then + reason=": $reason" + fi + finish_test ' - %s (skipped%s)' "$name" "$reason" + test_result=skip +} + +fail() { + local TIMING="${1:-}" + set_color 1 bold + finish_test ' ✗ %s' "$name" + test_result=fail +} + +timeout() { + local TIMING="${1:-}" + set_color 3 bold + TIMEOUT="${2:-}" finish_test ' ✗ %s' "$name" + test_result=timeout +} + +log() { + case ${test_result} in + pass) + clear_color + ;; + fail) + set_color 1 + ;; + timeout) + set_color 3 + ;; + esac + buffer ' %s\n' "$1" + clear_color +} + +summary() { + if [ "$failures" -eq 0 ]; then + set_color 2 bold + else + set_color 1 bold + fi + + buffer '\n%d test' "$count" + if [[ "$count" -ne 1 ]]; then + buffer 's' + fi + + buffer ', %d failure' "$failures" + if [[ "$failures" -ne 1 ]]; then + buffer 's' + fi + + if [[ "$skipped" -gt 0 ]]; then + buffer ', %d skipped' "$skipped" + fi + + if ((timed_out > 0)); then + buffer ', %d timed out' "$timed_out" + fi + + not_run=$((count - passed - failures - skipped - timed_out)) + if [[ "$not_run" -gt 0 ]]; then + buffer ', %d not run' "$not_run" + fi + + if [[ -n "$BATS_ENABLE_TIMING" ]]; then + buffer " in $SECONDS seconds" + fi + + buffer '\n' + clear_color +} + +buffer_with_truncation() { + local width="$1" + shift + local string + + # shellcheck disable=SC2059 + printf -v 'string' -- "$@" + + if [[ "${#string}" -gt "$width" ]]; then + buffer '%s...' "${string:0:$((width - 4))}" + else + buffer '%s' "$string" + fi +} + +move_up() { + if [[ $1 -gt 0 ]]; then # avoid moving if we got 0 + buffer '\x1B[%dA' "$1" + fi +} + +move_down() { + if [[ $1 -gt 0 ]]; then # avoid moving if we got 0 + buffer '\x1B[%dB' "$1" + fi +} + +go_to_column() { + local column="$1" + buffer '\x1B[%dG' $((column + 1)) +} + +clear_to_end_of_line() { + buffer '\x1B[K' +} + +advance() { + clear_to_end_of_line + buffer '\n' + clear_color +} + +set_color() { + local color="$1" + local weight=22 + + if [[ "${2:-}" == 'bold' ]]; then + weight=1 + fi + buffer '\x1B[%d;%dm' "$((30 + color))" "$weight" +} + +clear_color() { + buffer '\x1B[0m' +} + +_buffer= + +buffer() { + local content + # shellcheck disable=SC2059 + printf -v content -- "$@" + _buffer+="$content" +} + +prefix_buffer_with() { + local old_buffer="$_buffer" + _buffer='' + "$@" + _buffer="$_buffer$old_buffer" +} + +flush() { + printf '%s' "$_buffer" + _buffer= +} + +finish() { + flush + printf '\n' +} + +trap finish EXIT +trap '' INT + +bats_tap_stream_plan() { + count="$1" + index=0 + passed=0 + failures=0 + skipped=0 + timed_out=0 + name= + update_count_column_width +} + +bats_tap_stream_begin() { + index="$1" + name="$2" + begin + flush +} + +bats_tap_stream_ok() { + index="$1" + name="$2" + ((++passed)) + + pass "${BATS_FORMATTER_TEST_DURATION:-}" +} + +bats_tap_stream_skipped() { + index="$1" + name="$2" + ((++skipped)) + skip "$3" "${BATS_FORMATTER_TEST_DURATION:-}" +} + +bats_tap_stream_not_ok() { + index="$1" + name="$2" + + if [[ ${BATS_FORMATTER_TEST_TIMEOUT-x} != x ]]; then + timeout "${BATS_FORMATTER_TEST_DURATION:-}" "${BATS_FORMATTER_TEST_TIMEOUT}s" + ((++timed_out)) + else + fail "${BATS_FORMATTER_TEST_DURATION:-}" + ((++failures)) + fi + +} + +bats_tap_stream_comment() { # + local scope=$2 + # count the lines we printed after the begin text, + if [[ $line_backoff_count -eq 0 && $scope == begin ]]; then + # if this is the first line after begin, go down one line + buffer "\n" + ((++line_backoff_count)) # prefix-increment to avoid "error" due to returning 0 + fi + + ((++line_backoff_count)) + ((line_backoff_count += ${#1} / screen_width)) # account for linebreaks due to length + log "$1" +} + +bats_tap_stream_suite() { + #test_file="$1" + line_backoff_count=0 + index= + # indicate filename for failures + local file_name="${1#"$BASE_PATH"}" + name="File $file_name" + set_color 4 bold + buffer "%s\n" "$file_name" + clear_color +} + +line_backoff_count=0 +bats_tap_stream_unknown() { # + local scope=$2 + # count the lines we printed after the begin text, (or after suite, in case of syntax errors) + if [[ $line_backoff_count -eq 0 && ($scope == begin || $scope == suite) ]]; then + # if this is the first line after begin, go down one line + buffer "\n" + ((++line_backoff_count)) # prefix-increment to avoid "error" due to returning 0 + fi + + ((++line_backoff_count)) + ((line_backoff_count += ${#1} / screen_width)) # account for linebreaks due to length + buffer "%s\n" "$1" + flush +} + +bats_parse_internal_extended_tap + +summary From d34fb7e8a85deaa31697290cf583824911fa6913 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Tue, 13 Feb 2024 14:22:19 +0100 Subject: [PATCH 025/581] log processor: share apiclient in output goroutines (#2836) --- .golangci.yml | 10 ++- cmd/crowdsec/api.go | 4 +- cmd/crowdsec/crowdsec.go | 56 ++++++++++++--- cmd/crowdsec/lapiclient.go | 92 +++++++++++++++++++++++++ cmd/crowdsec/metrics.go | 13 ++-- cmd/crowdsec/output.go | 105 +++++------------------------ cmd/crowdsec/run_in_svc.go | 12 ++-- cmd/crowdsec/run_in_svc_windows.go | 7 +- cmd/crowdsec/serve.go | 27 ++++++-- test/bats/01_crowdsec.bats | 3 + test/bats/40_live-ban.bats | 21 ++++-- 11 files changed, 229 insertions(+), 121 deletions(-) create mode 100644 cmd/crowdsec/lapiclient.go diff --git a/.golangci.yml b/.golangci.yml index 3161b2c0aaf..e605ac079d4 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -11,7 +11,7 @@ run: linters-settings: cyclop: # lower this after refactoring - max-complexity: 70 + max-complexity: 53 gci: sections: @@ -26,7 +26,7 @@ linters-settings: gocyclo: # lower this after refactoring - min-complexity: 70 + min-complexity: 49 funlen: # Checks the number of lines in a function. @@ -46,7 +46,7 @@ linters-settings: maintidx: # raise this after refactoring - under: 9 + under: 11 misspell: locale: US @@ -263,6 +263,10 @@ issues: - perfsprint text: "fmt.Sprintf can be replaced .*" + - linters: + - perfsprint + text: "fmt.Errorf can be replaced with errors.New" + # # Will fix, easy but some neurons required # diff --git a/cmd/crowdsec/api.go b/cmd/crowdsec/api.go index a1e933cba89..4ac5c3ce96f 100644 --- a/cmd/crowdsec/api.go +++ b/cmd/crowdsec/api.go @@ -56,7 +56,8 @@ func initAPIServer(cConfig *csconfig.Config) (*apiserver.APIServer, error) { return apiServer, nil } -func serveAPIServer(apiServer *apiserver.APIServer, apiReady chan bool) { +func serveAPIServer(apiServer *apiserver.APIServer) { + apiReady := make(chan bool, 1) apiTomb.Go(func() error { defer trace.CatchPanic("crowdsec/serveAPIServer") go func() { @@ -80,6 +81,7 @@ func serveAPIServer(apiServer *apiserver.APIServer, apiReady chan bool) { } return nil }) + <-apiReady } func hasPlugins(profiles []*csconfig.ProfileCfg) bool { diff --git a/cmd/crowdsec/crowdsec.go b/cmd/crowdsec/crowdsec.go index 774b9d381cf..d4cd2d3cf74 100644 --- a/cmd/crowdsec/crowdsec.go +++ b/cmd/crowdsec/crowdsec.go @@ -1,6 +1,7 @@ package main import ( + "context" "fmt" "os" "path/filepath" @@ -13,8 +14,8 @@ import ( "github.com/crowdsecurity/go-cs-lib/trace" "github.com/crowdsecurity/crowdsec/pkg/acquisition" - "github.com/crowdsecurity/crowdsec/pkg/appsec" "github.com/crowdsecurity/crowdsec/pkg/alertcontext" + "github.com/crowdsecurity/crowdsec/pkg/appsec" "github.com/crowdsecurity/crowdsec/pkg/csconfig" "github.com/crowdsecurity/crowdsec/pkg/cwhub" leaky "github.com/crowdsecurity/crowdsec/pkg/leakybucket" @@ -56,63 +57,86 @@ func runCrowdsec(cConfig *csconfig.Config, parsers *parser.Parsers, hub *cwhub.H //start go-routines for parsing, buckets pour and outputs. parserWg := &sync.WaitGroup{} + parsersTomb.Go(func() error { parserWg.Add(1) + for i := 0; i < cConfig.Crowdsec.ParserRoutinesCount; i++ { parsersTomb.Go(func() error { defer trace.CatchPanic("crowdsec/runParse") + if err := runParse(inputLineChan, inputEventChan, *parsers.Ctx, parsers.Nodes); err != nil { //this error will never happen as parser.Parse is not able to return errors log.Fatalf("starting parse error : %s", err) return err } + return nil }) } parserWg.Done() + return nil }) parserWg.Wait() bucketWg := &sync.WaitGroup{} + bucketsTomb.Go(func() error { bucketWg.Add(1) /*restore previous state as well if present*/ if cConfig.Crowdsec.BucketStateFile != "" { log.Warningf("Restoring buckets state from %s", cConfig.Crowdsec.BucketStateFile) + if err := leaky.LoadBucketsState(cConfig.Crowdsec.BucketStateFile, buckets, holders); err != nil { - return fmt.Errorf("unable to restore buckets : %s", err) + return fmt.Errorf("unable to restore buckets: %w", err) } } for i := 0; i < cConfig.Crowdsec.BucketsRoutinesCount; i++ { bucketsTomb.Go(func() error { defer trace.CatchPanic("crowdsec/runPour") + if err := runPour(inputEventChan, holders, buckets, cConfig); err != nil { log.Fatalf("starting pour error : %s", err) return err } + return nil }) } bucketWg.Done() + return nil }) bucketWg.Wait() + apiClient, err := AuthenticatedLAPIClient(*cConfig.API.Client.Credentials, hub) + if err != nil { + return err + } + + log.Debugf("Starting HeartBeat service") + apiClient.HeartBeat.StartHeartBeat(context.Background(), &outputsTomb) + outputWg := &sync.WaitGroup{} + outputsTomb.Go(func() error { outputWg.Add(1) + for i := 0; i < cConfig.Crowdsec.OutputRoutinesCount; i++ { outputsTomb.Go(func() error { defer trace.CatchPanic("crowdsec/runOutput") - if err := runOutput(inputEventChan, outputEventChan, buckets, *parsers.Povfwctx, parsers.Povfwnodes, *cConfig.API.Client.Credentials, hub); err != nil { + + if err := runOutput(inputEventChan, outputEventChan, buckets, *parsers.Povfwctx, parsers.Povfwnodes, apiClient); err != nil { log.Fatalf("starting outputs error : %s", err) return err } + return nil }) } outputWg.Done() + return nil }) outputWg.Wait() @@ -122,16 +146,16 @@ func runCrowdsec(cConfig *csconfig.Config, parsers *parser.Parsers, hub *cwhub.H if cConfig.Prometheus.Level == "aggregated" { aggregated = true } + if err := acquisition.GetMetrics(dataSources, aggregated); err != nil { return fmt.Errorf("while fetching prometheus metrics for datasources: %w", err) } - } + log.Info("Starting processing data") if err := acquisition.StartAcquisition(dataSources, inputLineChan, &acquisTomb); err != nil { - log.Fatalf("starting acquisition error : %s", err) - return err + return fmt.Errorf("starting acquisition error: %w", err) } return nil @@ -140,11 +164,13 @@ func runCrowdsec(cConfig *csconfig.Config, parsers *parser.Parsers, hub *cwhub.H func serveCrowdsec(parsers *parser.Parsers, cConfig *csconfig.Config, hub *cwhub.Hub, agentReady chan bool) { crowdsecTomb.Go(func() error { defer trace.CatchPanic("crowdsec/serveCrowdsec") + go func() { defer trace.CatchPanic("crowdsec/runCrowdsec") // this logs every time, even at config reload log.Debugf("running agent after %s ms", time.Since(crowdsecT0)) agentReady <- true + if err := runCrowdsec(cConfig, parsers, hub); err != nil { log.Fatalf("unable to start crowdsec routines: %s", err) } @@ -156,16 +182,20 @@ func serveCrowdsec(parsers *parser.Parsers, cConfig *csconfig.Config, hub *cwhub */ waitOnTomb() log.Debugf("Shutting down crowdsec routines") + if err := ShutdownCrowdsecRoutines(); err != nil { log.Fatalf("unable to shutdown crowdsec routines: %s", err) } + log.Debugf("everything is dead, return crowdsecTomb") + if dumpStates { dumpParserState() dumpOverflowState() dumpBucketsPour() os.Exit(0) } + return nil }) } @@ -175,55 +205,65 @@ func dumpBucketsPour() { if err != nil { log.Fatalf("open: %s", err) } + out, err := yaml.Marshal(leaky.BucketPourCache) if err != nil { log.Fatalf("marshal: %s", err) } + b, err := fd.Write(out) if err != nil { log.Fatalf("write: %s", err) } + log.Tracef("wrote %d bytes", b) + if err := fd.Close(); err != nil { log.Fatalf(" close: %s", err) } } func dumpParserState() { - fd, err := os.OpenFile(filepath.Join(parser.DumpFolder, "parser-dump.yaml"), os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0666) if err != nil { log.Fatalf("open: %s", err) } + out, err := yaml.Marshal(parser.StageParseCache) if err != nil { log.Fatalf("marshal: %s", err) } + b, err := fd.Write(out) if err != nil { log.Fatalf("write: %s", err) } + log.Tracef("wrote %d bytes", b) + if err := fd.Close(); err != nil { log.Fatalf(" close: %s", err) } } func dumpOverflowState() { - fd, err := os.OpenFile(filepath.Join(parser.DumpFolder, "bucket-dump.yaml"), os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0666) if err != nil { log.Fatalf("open: %s", err) } + out, err := yaml.Marshal(bucketOverflows) if err != nil { log.Fatalf("marshal: %s", err) } + b, err := fd.Write(out) if err != nil { log.Fatalf("write: %s", err) } + log.Tracef("wrote %d bytes", b) + if err := fd.Close(); err != nil { log.Fatalf(" close: %s", err) } diff --git a/cmd/crowdsec/lapiclient.go b/cmd/crowdsec/lapiclient.go new file mode 100644 index 00000000000..fd29aa9d99b --- /dev/null +++ b/cmd/crowdsec/lapiclient.go @@ -0,0 +1,92 @@ +package main + +import ( + "context" + "fmt" + "net/url" + "time" + + "github.com/go-openapi/strfmt" + + "github.com/crowdsecurity/go-cs-lib/version" + + "github.com/crowdsecurity/crowdsec/pkg/apiclient" + "github.com/crowdsecurity/crowdsec/pkg/csconfig" + "github.com/crowdsecurity/crowdsec/pkg/cwhub" + "github.com/crowdsecurity/crowdsec/pkg/models" +) + +func AuthenticatedLAPIClient(credentials csconfig.ApiCredentialsCfg, hub *cwhub.Hub) (*apiclient.ApiClient, error) { + scenarios, err := hub.GetInstalledItemNames(cwhub.SCENARIOS) + if err != nil { + return nil, fmt.Errorf("loading list of installed hub scenarios: %w", err) + } + + appsecRules, err := hub.GetInstalledItemNames(cwhub.APPSEC_RULES) + if err != nil { + return nil, fmt.Errorf("loading list of installed hub appsec rules: %w", err) + } + + installedScenariosAndAppsecRules := make([]string, 0, len(scenarios)+len(appsecRules)) + installedScenariosAndAppsecRules = append(installedScenariosAndAppsecRules, scenarios...) + installedScenariosAndAppsecRules = append(installedScenariosAndAppsecRules, appsecRules...) + + apiURL, err := url.Parse(credentials.URL) + if err != nil { + return nil, fmt.Errorf("parsing api url ('%s'): %w", credentials.URL, err) + } + + papiURL, err := url.Parse(credentials.PapiURL) + if err != nil { + return nil, fmt.Errorf("parsing polling api url ('%s'): %w", credentials.PapiURL, err) + } + + password := strfmt.Password(credentials.Password) + + client, err := apiclient.NewClient(&apiclient.Config{ + MachineID: credentials.Login, + Password: password, + Scenarios: installedScenariosAndAppsecRules, + UserAgent: fmt.Sprintf("crowdsec/%s", version.String()), + URL: apiURL, + PapiURL: papiURL, + VersionPrefix: "v1", + UpdateScenario: func() ([]string, error) { + scenarios, err := hub.GetInstalledItemNames(cwhub.SCENARIOS) + if err != nil { + return nil, err + } + appsecRules, err := hub.GetInstalledItemNames(cwhub.APPSEC_RULES) + if err != nil { + return nil, err + } + ret := make([]string, 0, len(scenarios)+len(appsecRules)) + ret = append(ret, scenarios...) + ret = append(ret, appsecRules...) + + return ret, nil + }, + }) + if err != nil { + return nil, fmt.Errorf("new client api: %w", err) + } + + authResp, _, err := client.Auth.AuthenticateWatcher(context.Background(), models.WatcherAuthRequest{ + MachineID: &credentials.Login, + Password: &password, + Scenarios: installedScenariosAndAppsecRules, + }) + if err != nil { + return nil, fmt.Errorf("authenticate watcher (%s): %w", credentials.Login, err) + } + + var expiration time.Time + if err := expiration.UnmarshalText([]byte(authResp.Expire)); err != nil { + return nil, fmt.Errorf("unable to parse jwt expiration: %w", err) + } + + client.GetClient().Transport.(*apiclient.JWTTransport).Token = authResp.Token + client.GetClient().Transport.(*apiclient.JWTTransport).Expiration = expiration + + return client, nil +} diff --git a/cmd/crowdsec/metrics.go b/cmd/crowdsec/metrics.go index fa2d8d5de32..1199af0fe16 100644 --- a/cmd/crowdsec/metrics.go +++ b/cmd/crowdsec/metrics.go @@ -114,13 +114,17 @@ func computeDynamicMetrics(next http.Handler, dbClient *database.Client) http.Ha } decisionsFilters := make(map[string][]string, 0) + decisions, err := dbClient.QueryDecisionCountByScenario(decisionsFilters) if err != nil { log.Errorf("Error querying decisions for metrics: %v", err) next.ServeHTTP(w, r) + return } + globalActiveDecisions.Reset() + for _, d := range decisions { globalActiveDecisions.With(prometheus.Labels{"reason": d.Scenario, "origin": d.Origin, "action": d.Type}).Set(float64(d.Count)) } @@ -136,6 +140,7 @@ func computeDynamicMetrics(next http.Handler, dbClient *database.Client) http.Ha if err != nil { log.Errorf("Error querying alerts for metrics: %v", err) next.ServeHTTP(w, r) + return } @@ -173,11 +178,12 @@ func registerPrometheus(config *csconfig.PrometheusCfg) { globalActiveDecisions, globalAlerts, parser.NodesWlHitsOk, parser.NodesWlHits, cache.CacheMetrics, exprhelpers.RegexpCacheMetrics, ) - } } -func servePrometheus(config *csconfig.PrometheusCfg, dbClient *database.Client, apiReady chan bool, agentReady chan bool) { +func servePrometheus(config *csconfig.PrometheusCfg, dbClient *database.Client, agentReady chan bool) { + <-agentReady + if !config.Enabled { return } @@ -185,9 +191,8 @@ func servePrometheus(config *csconfig.PrometheusCfg, dbClient *database.Client, defer trace.CatchPanic("crowdsec/servePrometheus") http.Handle("/metrics", computeDynamicMetrics(promhttp.Handler(), dbClient)) - <-apiReady - <-agentReady log.Debugf("serving metrics after %s ms", time.Since(crowdsecT0)) + if err := http.ListenAndServe(fmt.Sprintf("%s:%d", config.ListenAddr, config.ListenPort), nil); err != nil { log.Warningf("prometheus: %s", err) } diff --git a/cmd/crowdsec/output.go b/cmd/crowdsec/output.go index ad53ce4c827..c4a2c0b6ac1 100644 --- a/cmd/crowdsec/output.go +++ b/cmd/crowdsec/output.go @@ -3,18 +3,12 @@ package main import ( "context" "fmt" - "net/url" "sync" "time" - "github.com/go-openapi/strfmt" log "github.com/sirupsen/logrus" - "github.com/crowdsecurity/go-cs-lib/version" - "github.com/crowdsecurity/crowdsec/pkg/apiclient" - "github.com/crowdsecurity/crowdsec/pkg/csconfig" - "github.com/crowdsecurity/crowdsec/pkg/cwhub" leaky "github.com/crowdsecurity/crowdsec/pkg/leakybucket" "github.com/crowdsecurity/crowdsec/pkg/models" "github.com/crowdsecurity/crowdsec/pkg/parser" @@ -22,7 +16,6 @@ import ( ) func dedupAlerts(alerts []types.RuntimeAlert) ([]*models.Alert, error) { - var dedupCache []*models.Alert for idx, alert := range alerts { @@ -32,16 +25,21 @@ func dedupAlerts(alerts []types.RuntimeAlert) ([]*models.Alert, error) { dedupCache = append(dedupCache, alert.Alert) continue } + for k, src := range alert.Sources { refsrc := *alert.Alert //copy + log.Tracef("source[%s]", k) + refsrc.Source = &src dedupCache = append(dedupCache, &refsrc) } } + if len(dedupCache) != len(alerts) { log.Tracef("went from %d to %d alerts", len(alerts), len(dedupCache)) } + return dedupCache, nil } @@ -52,93 +50,25 @@ func PushAlerts(alerts []types.RuntimeAlert, client *apiclient.ApiClient) error if err != nil { return fmt.Errorf("failed to transform alerts for api: %w", err) } + _, _, err = client.Alerts.Add(ctx, alertsToPush) if err != nil { return fmt.Errorf("failed sending alert to LAPI: %w", err) } + return nil } var bucketOverflows []types.Event -func runOutput(input chan types.Event, overflow chan types.Event, buckets *leaky.Buckets, - postOverflowCTX parser.UnixParserCtx, postOverflowNodes []parser.Node, - apiConfig csconfig.ApiCredentialsCfg, hub *cwhub.Hub) error { +func runOutput(input chan types.Event, overflow chan types.Event, buckets *leaky.Buckets, postOverflowCTX parser.UnixParserCtx, + postOverflowNodes []parser.Node, client *apiclient.ApiClient) error { + var ( + cache []types.RuntimeAlert + cacheMutex sync.Mutex + ) - var err error ticker := time.NewTicker(1 * time.Second) - - var cache []types.RuntimeAlert - var cacheMutex sync.Mutex - - scenarios, err := hub.GetInstalledItemNames(cwhub.SCENARIOS) - if err != nil { - return fmt.Errorf("loading list of installed hub scenarios: %w", err) - } - - appsecRules, err := hub.GetInstalledItemNames(cwhub.APPSEC_RULES) - if err != nil { - return fmt.Errorf("loading list of installed hub appsec rules: %w", err) - } - - installedScenariosAndAppsecRules := make([]string, 0, len(scenarios)+len(appsecRules)) - installedScenariosAndAppsecRules = append(installedScenariosAndAppsecRules, scenarios...) - installedScenariosAndAppsecRules = append(installedScenariosAndAppsecRules, appsecRules...) - - apiURL, err := url.Parse(apiConfig.URL) - if err != nil { - return fmt.Errorf("parsing api url ('%s'): %w", apiConfig.URL, err) - } - papiURL, err := url.Parse(apiConfig.PapiURL) - if err != nil { - return fmt.Errorf("parsing polling api url ('%s'): %w", apiConfig.PapiURL, err) - } - password := strfmt.Password(apiConfig.Password) - - Client, err := apiclient.NewClient(&apiclient.Config{ - MachineID: apiConfig.Login, - Password: password, - Scenarios: installedScenariosAndAppsecRules, - UserAgent: fmt.Sprintf("crowdsec/%s", version.String()), - URL: apiURL, - PapiURL: papiURL, - VersionPrefix: "v1", - UpdateScenario: func() ([]string, error) { - scenarios, err := hub.GetInstalledItemNames(cwhub.SCENARIOS) - if err != nil { - return nil, err - } - appsecRules, err := hub.GetInstalledItemNames(cwhub.APPSEC_RULES) - if err != nil { - return nil, err - } - ret := make([]string, 0, len(scenarios)+len(appsecRules)) - ret = append(ret, scenarios...) - ret = append(ret, appsecRules...) - return ret, nil - }, - }) - if err != nil { - return fmt.Errorf("new client api: %w", err) - } - authResp, _, err := Client.Auth.AuthenticateWatcher(context.Background(), models.WatcherAuthRequest{ - MachineID: &apiConfig.Login, - Password: &password, - Scenarios: installedScenariosAndAppsecRules, - }) - if err != nil { - return fmt.Errorf("authenticate watcher (%s): %w", apiConfig.Login, err) - } - - if err := Client.GetClient().Transport.(*apiclient.JWTTransport).Expiration.UnmarshalText([]byte(authResp.Expire)); err != nil { - return fmt.Errorf("unable to parse jwt expiration: %w", err) - } - - Client.GetClient().Transport.(*apiclient.JWTTransport).Token = authResp.Token - - //start the heartbeat service - log.Debugf("Starting HeartBeat service") - Client.HeartBeat.StartHeartBeat(context.Background(), &outputsTomb) LOOP: for { select { @@ -149,7 +79,7 @@ LOOP: newcache := make([]types.RuntimeAlert, 0) cache = newcache cacheMutex.Unlock() - if err := PushAlerts(cachecopy, Client); err != nil { + if err := PushAlerts(cachecopy, client); err != nil { log.Errorf("while pushing to api : %s", err) //just push back the events to the queue cacheMutex.Lock() @@ -162,10 +92,11 @@ LOOP: cacheMutex.Lock() cachecopy := cache cacheMutex.Unlock() - if err := PushAlerts(cachecopy, Client); err != nil { + if err := PushAlerts(cachecopy, client); err != nil { log.Errorf("while pushing leftovers to api : %s", err) } } + break LOOP case event := <-overflow: /*if alert is empty and mapKey is present, the overflow is just to cleanup bucket*/ @@ -176,7 +107,7 @@ LOOP: /* process post overflow parser nodes */ event, err := parser.Parse(postOverflowCTX, event, postOverflowNodes) if err != nil { - return fmt.Errorf("postoverflow failed : %s", err) + return fmt.Errorf("postoverflow failed: %w", err) } log.Printf("%s", *event.Overflow.Alert.Message) //if the Alert is nil, it's to signal bucket is ready for GC, don't track this @@ -206,6 +137,6 @@ LOOP: } ticker.Stop() - return nil + return nil } diff --git a/cmd/crowdsec/run_in_svc.go b/cmd/crowdsec/run_in_svc.go index 2020537908d..5a8bc9a6cd3 100644 --- a/cmd/crowdsec/run_in_svc.go +++ b/cmd/crowdsec/run_in_svc.go @@ -33,7 +33,6 @@ func StartRunSvc() error { log.Infof("Crowdsec %s", version.String()) - apiReady := make(chan bool, 1) agentReady := make(chan bool, 1) // Enable profiling early @@ -46,14 +45,19 @@ func StartRunSvc() error { dbClient, err = database.NewClient(cConfig.DbConfig) if err != nil { - return fmt.Errorf("unable to create database client: %s", err) + return fmt.Errorf("unable to create database client: %w", err) } } registerPrometheus(cConfig.Prometheus) - go servePrometheus(cConfig.Prometheus, dbClient, apiReady, agentReady) + go servePrometheus(cConfig.Prometheus, dbClient, agentReady) + } else { + // avoid leaking the channel + go func() { + <-agentReady + }() } - return Serve(cConfig, apiReady, agentReady) + return Serve(cConfig, agentReady) } diff --git a/cmd/crowdsec/run_in_svc_windows.go b/cmd/crowdsec/run_in_svc_windows.go index 991f7ae4491..7845e9c58b5 100644 --- a/cmd/crowdsec/run_in_svc_windows.go +++ b/cmd/crowdsec/run_in_svc_windows.go @@ -73,7 +73,6 @@ func WindowsRun() error { log.Infof("Crowdsec %s", version.String()) - apiReady := make(chan bool, 1) agentReady := make(chan bool, 1) // Enable profiling early @@ -85,11 +84,11 @@ func WindowsRun() error { dbClient, err = database.NewClient(cConfig.DbConfig) if err != nil { - return fmt.Errorf("unable to create database client: %s", err) + return fmt.Errorf("unable to create database client: %w", err) } } registerPrometheus(cConfig.Prometheus) - go servePrometheus(cConfig.Prometheus, dbClient, apiReady, agentReady) + go servePrometheus(cConfig.Prometheus, dbClient, agentReady) } - return Serve(cConfig, apiReady, agentReady) + return Serve(cConfig, agentReady) } diff --git a/cmd/crowdsec/serve.go b/cmd/crowdsec/serve.go index a5c8e24cf3f..22f65b927a0 100644 --- a/cmd/crowdsec/serve.go +++ b/cmd/crowdsec/serve.go @@ -42,7 +42,9 @@ func debugHandler(sig os.Signal, cConfig *csconfig.Config) error { if err := leaky.ShutdownAllBuckets(buckets); err != nil { log.Warningf("Failed to shut down routines : %s", err) } + log.Printf("Shutdown is finished, buckets are in %s", tmpFile) + return nil } @@ -66,15 +68,16 @@ func reloadHandler(sig os.Signal) (*csconfig.Config, error) { if !cConfig.DisableAPI { if flags.DisableCAPI { log.Warningf("Communication with CrowdSec Central API disabled from args") + cConfig.API.Server.OnlineClient = nil } + apiServer, err := initAPIServer(cConfig) if err != nil { return nil, fmt.Errorf("unable to init api server: %w", err) } - apiReady := make(chan bool, 1) - serveAPIServer(apiServer, apiReady) + serveAPIServer(apiServer) } if !cConfig.DisableAgent { @@ -110,6 +113,7 @@ func reloadHandler(sig os.Signal) (*csconfig.Config, error) { log.Warningf("Failed to delete temp file (%s) : %s", tmpFile, err) } } + return cConfig, nil } @@ -117,10 +121,12 @@ func ShutdownCrowdsecRoutines() error { var reterr error log.Debugf("Shutting down crowdsec sub-routines") + if len(dataSources) > 0 { acquisTomb.Kill(nil) log.Debugf("waiting for acquisition to finish") drainChan(inputLineChan) + if err := acquisTomb.Wait(); err != nil { log.Warningf("Acquisition returned error : %s", err) reterr = err @@ -130,6 +136,7 @@ func ShutdownCrowdsecRoutines() error { log.Debugf("acquisition is finished, wait for parser/bucket/ouputs.") parsersTomb.Kill(nil) drainChan(inputEventChan) + if err := parsersTomb.Wait(); err != nil { log.Warningf("Parsers returned error : %s", err) reterr = err @@ -160,6 +167,7 @@ func ShutdownCrowdsecRoutines() error { log.Warningf("Outputs returned error : %s", err) reterr = err } + log.Debugf("outputs are done") case <-time.After(3 * time.Second): // this can happen if outputs are stuck in a http retry loop @@ -181,6 +189,7 @@ func shutdownAPI() error { } log.Debugf("done") + return nil } @@ -193,6 +202,7 @@ func shutdownCrowdsec() error { } log.Debugf("done") + return nil } @@ -292,10 +302,11 @@ func HandleSignals(cConfig *csconfig.Config) error { if err == nil { log.Warning("Crowdsec service shutting down") } + return err } -func Serve(cConfig *csconfig.Config, apiReady chan bool, agentReady chan bool) error { +func Serve(cConfig *csconfig.Config, agentReady chan bool) error { acquisTomb = tomb.Tomb{} parsersTomb = tomb.Tomb{} bucketsTomb = tomb.Tomb{} @@ -325,6 +336,7 @@ func Serve(cConfig *csconfig.Config, apiReady chan bool, agentReady chan bool) e if cConfig.API.CTI != nil && *cConfig.API.CTI.Enabled { log.Infof("Crowdsec CTI helper enabled") + if err := exprhelpers.InitCrowdsecCTI(cConfig.API.CTI.Key, cConfig.API.CTI.CacheTimeout, cConfig.API.CTI.CacheSize, cConfig.API.CTI.LogLevel); err != nil { return fmt.Errorf("failed to init crowdsec cti: %w", err) } @@ -337,6 +349,7 @@ func Serve(cConfig *csconfig.Config, apiReady chan bool, agentReady chan bool) e if flags.DisableCAPI { log.Warningf("Communication with CrowdSec Central API disabled from args") + cConfig.API.Server.OnlineClient = nil } @@ -346,10 +359,8 @@ func Serve(cConfig *csconfig.Config, apiReady chan bool, agentReady chan bool) e } if !flags.TestMode { - serveAPIServer(apiServer, apiReady) + serveAPIServer(apiServer) } - } else { - apiReady <- true } if !cConfig.DisableAgent { @@ -366,6 +377,8 @@ func Serve(cConfig *csconfig.Config, apiReady chan bool, agentReady chan bool) e // if it's just linting, we're done if !flags.TestMode { serveCrowdsec(csParsers, cConfig, hub, agentReady) + } else { + agentReady <- true } } else { agentReady <- true @@ -395,6 +408,7 @@ func Serve(cConfig *csconfig.Config, apiReady chan bool, agentReady chan bool) e for _, ch := range waitChans { <-ch + switch ch { case apiTomb.Dead(): log.Infof("api shutdown") @@ -402,5 +416,6 @@ func Serve(cConfig *csconfig.Config, apiReady chan bool, agentReady chan bool) e log.Infof("crowdsec shutdown") } } + return nil } diff --git a/test/bats/01_crowdsec.bats b/test/bats/01_crowdsec.bats index be06ac9261a..a585930e34c 100644 --- a/test/bats/01_crowdsec.bats +++ b/test/bats/01_crowdsec.bats @@ -75,6 +75,9 @@ teardown() { rune -0 ./instance-crowdsec start-pid PID="$output" + + sleep .5 + assert_file_exists "$log_old" assert_file_contains "$log_old" "Starting processing data" diff --git a/test/bats/40_live-ban.bats b/test/bats/40_live-ban.bats index c6b8ddf1563..a544f67be18 100644 --- a/test/bats/40_live-ban.bats +++ b/test/bats/40_live-ban.bats @@ -41,10 +41,23 @@ teardown() { echo -e "---\nfilename: ${tmpfile}\nlabels:\n type: syslog\n" >>"${ACQUIS_YAML}" ./instance-crowdsec start + + sleep 0.2 + fake_log >>"${tmpfile}" - sleep 2 + + sleep 0.2 + rm -f -- "${tmpfile}" - rune -0 cscli decisions list -o json - rune -0 jq -r '.[].decisions[0].value' <(output) - assert_output '1.1.1.172' + + found=0 + # this may take some time in CI + for _ in $(seq 1 10); do + if cscli decisions list -o json | jq -r '.[].decisions[0].value' | grep -q '1.1.1.172'; then + found=1 + break + fi + sleep 0.2 + done + assert_equal 1 "${found}" } From 45571cea08591962b515ed903b0b00488a4f7c13 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Wed, 14 Feb 2024 09:47:12 +0100 Subject: [PATCH 026/581] use go 1.21.7 (#2830) --- .github/workflows/bats-hub.yml | 2 +- .github/workflows/bats-mysql.yml | 2 +- .github/workflows/bats-postgres.yml | 2 +- .github/workflows/bats-sqlite-coverage.yml | 2 +- .github/workflows/ci-windows-build-msi.yml | 2 +- .github/workflows/codeql-analysis.yml | 3 ++- .github/workflows/go-tests-windows.yml | 2 +- .github/workflows/go-tests.yml | 2 +- .github/workflows/publish-tarball-release.yml | 2 +- Dockerfile | 2 +- Dockerfile.debian | 2 +- azure-pipelines.yml | 2 +- 12 files changed, 13 insertions(+), 12 deletions(-) diff --git a/.github/workflows/bats-hub.yml b/.github/workflows/bats-hub.yml index 7764da84812..075480485ff 100644 --- a/.github/workflows/bats-hub.yml +++ b/.github/workflows/bats-hub.yml @@ -33,7 +33,7 @@ jobs: - name: "Set up Go" uses: actions/setup-go@v5 with: - go-version: "1.21.6" + go-version: "1.21.7" - name: "Install bats dependencies" env: diff --git a/.github/workflows/bats-mysql.yml b/.github/workflows/bats-mysql.yml index 243da6eb25d..5c019933304 100644 --- a/.github/workflows/bats-mysql.yml +++ b/.github/workflows/bats-mysql.yml @@ -36,7 +36,7 @@ jobs: - name: "Set up Go" uses: actions/setup-go@v5 with: - go-version: "1.21.6" + go-version: "1.21.7" - name: "Install bats dependencies" env: diff --git a/.github/workflows/bats-postgres.yml b/.github/workflows/bats-postgres.yml index 07d3cd8d2f1..0f3c69ccefa 100644 --- a/.github/workflows/bats-postgres.yml +++ b/.github/workflows/bats-postgres.yml @@ -45,7 +45,7 @@ jobs: - name: "Set up Go" uses: actions/setup-go@v5 with: - go-version: "1.21.6" + go-version: "1.21.7" - name: "Install bats dependencies" env: diff --git a/.github/workflows/bats-sqlite-coverage.yml b/.github/workflows/bats-sqlite-coverage.yml index 46a5dd8bc86..436eb0f04a4 100644 --- a/.github/workflows/bats-sqlite-coverage.yml +++ b/.github/workflows/bats-sqlite-coverage.yml @@ -28,7 +28,7 @@ jobs: - name: "Set up Go" uses: actions/setup-go@v5 with: - go-version: "1.21.6" + go-version: "1.21.7" - name: "Install bats dependencies" env: diff --git a/.github/workflows/ci-windows-build-msi.yml b/.github/workflows/ci-windows-build-msi.yml index 26c981143ad..7c6a6621de4 100644 --- a/.github/workflows/ci-windows-build-msi.yml +++ b/.github/workflows/ci-windows-build-msi.yml @@ -35,7 +35,7 @@ jobs: - name: "Set up Go" uses: actions/setup-go@v5 with: - go-version: "1.21.6" + go-version: "1.21.7" - name: Build run: make windows_installer BUILD_RE2_WASM=1 diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml index 4b262f13d09..bdc16e650f6 100644 --- a/.github/workflows/codeql-analysis.yml +++ b/.github/workflows/codeql-analysis.yml @@ -48,10 +48,11 @@ jobs: with: # required to pick up tags for BUILD_VERSION fetch-depth: 0 + - name: "Set up Go" uses: actions/setup-go@v5 with: - go-version: "1.21.6" + go-version: "1.21.7" cache-dependency-path: "**/go.sum" # Initializes the CodeQL tools for scanning. diff --git a/.github/workflows/go-tests-windows.yml b/.github/workflows/go-tests-windows.yml index 63781a7b25e..efe16ed66d9 100644 --- a/.github/workflows/go-tests-windows.yml +++ b/.github/workflows/go-tests-windows.yml @@ -34,7 +34,7 @@ jobs: - name: "Set up Go" uses: actions/setup-go@v5 with: - go-version: "1.21.6" + go-version: "1.21.7" - name: Build run: | diff --git a/.github/workflows/go-tests.yml b/.github/workflows/go-tests.yml index e8840c07f4e..865b2782a63 100644 --- a/.github/workflows/go-tests.yml +++ b/.github/workflows/go-tests.yml @@ -126,7 +126,7 @@ jobs: - name: "Set up Go" uses: actions/setup-go@v5 with: - go-version: "1.21.6" + go-version: "1.21.7" - name: Create localstack streams run: | diff --git a/.github/workflows/publish-tarball-release.yml b/.github/workflows/publish-tarball-release.yml index 202882791e7..d251677fd46 100644 --- a/.github/workflows/publish-tarball-release.yml +++ b/.github/workflows/publish-tarball-release.yml @@ -25,7 +25,7 @@ jobs: - name: "Set up Go" uses: actions/setup-go@v5 with: - go-version: "1.21.6" + go-version: "1.21.7" - name: Build the binaries run: | diff --git a/Dockerfile b/Dockerfile index 2369c09dfa6..420c521fa58 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,5 +1,5 @@ # vim: set ft=dockerfile: -FROM golang:1.21.6-alpine3.18 AS build +FROM golang:1.21.7-alpine3.18 AS build ARG BUILD_VERSION diff --git a/Dockerfile.debian b/Dockerfile.debian index ba0cd20fb43..48753e7acdb 100644 --- a/Dockerfile.debian +++ b/Dockerfile.debian @@ -1,5 +1,5 @@ # vim: set ft=dockerfile: -FROM golang:1.21.6-bookworm AS build +FROM golang:1.21.7-bookworm AS build ARG BUILD_VERSION diff --git a/azure-pipelines.yml b/azure-pipelines.yml index 82caba42bae..791f41f50ba 100644 --- a/azure-pipelines.yml +++ b/azure-pipelines.yml @@ -27,7 +27,7 @@ stages: - task: GoTool@0 displayName: "Install Go" inputs: - version: '1.21.6' + version: '1.21.7' - pwsh: | choco install -y make From 2bbf0b4762ad58f5e50858132085ac4586502008 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Wed, 14 Feb 2024 11:19:13 +0100 Subject: [PATCH 027/581] re-generate ent code (#2844) --- go.sum | 2 + pkg/database/ent/alert.go | 28 +- pkg/database/ent/alert/alert.go | 203 +++ pkg/database/ent/alert/where.go | 1525 +++++---------------- pkg/database/ent/alert_create.go | 221 +-- pkg/database/ent/alert_delete.go | 51 +- pkg/database/ent/alert_query.go | 279 ++-- pkg/database/ent/alert_update.go | 738 ++-------- pkg/database/ent/bouncer.go | 22 +- pkg/database/ent/bouncer/bouncer.go | 65 + pkg/database/ent/bouncer/where.go | 687 ++-------- pkg/database/ent/bouncer_create.go | 129 +- pkg/database/ent/bouncer_delete.go | 51 +- pkg/database/ent/bouncer_query.go | 239 ++-- pkg/database/ent/bouncer_update.go | 286 +--- pkg/database/ent/client.go | 466 ++++++- pkg/database/ent/config.go | 65 - pkg/database/ent/configitem.go | 22 +- pkg/database/ent/configitem/configitem.go | 30 + pkg/database/ent/configitem/where.go | 299 +--- pkg/database/ent/configitem_create.go | 87 +- pkg/database/ent/configitem_delete.go | 51 +- pkg/database/ent/configitem_query.go | 239 ++-- pkg/database/ent/configitem_update.go | 162 +-- pkg/database/ent/context.go | 33 - pkg/database/ent/decision.go | 24 +- pkg/database/ent/decision/decision.go | 105 ++ pkg/database/ent/decision/where.go | 930 +++---------- pkg/database/ent/decision_create.go | 158 +-- pkg/database/ent/decision_delete.go | 51 +- pkg/database/ent/decision_query.go | 249 ++-- pkg/database/ent/decision_update.go | 444 +----- pkg/database/ent/ent.go | 233 +++- pkg/database/ent/event.go | 24 +- pkg/database/ent/event/event.go | 50 + pkg/database/ent/event/where.go | 322 +---- pkg/database/ent/event_create.go | 92 +- pkg/database/ent/event_delete.go | 51 +- pkg/database/ent/event_query.go | 249 ++-- pkg/database/ent/event_update.go | 196 +-- pkg/database/ent/hook/hook.go | 49 +- pkg/database/ent/machine.go | 24 +- pkg/database/ent/machine/machine.go | 92 ++ pkg/database/ent/machine/where.go | 766 +++-------- pkg/database/ent/machine_create.go | 140 +- pkg/database/ent/machine_delete.go | 51 +- pkg/database/ent/machine_query.go | 247 ++-- pkg/database/ent/machine_update.go | 352 +---- pkg/database/ent/meta.go | 24 +- pkg/database/ent/meta/meta.go | 50 + pkg/database/ent/meta/where.go | 342 +---- pkg/database/ent/meta_create.go | 92 +- pkg/database/ent/meta_delete.go | 51 +- pkg/database/ent/meta_query.go | 249 ++-- pkg/database/ent/meta_update.go | 196 +-- pkg/database/ent/mutation.go | 112 +- pkg/database/ent/runtime/runtime.go | 4 +- pkg/database/ent/tx.go | 36 +- 58 files changed, 4026 insertions(+), 8009 deletions(-) delete mode 100644 pkg/database/ent/config.go delete mode 100644 pkg/database/ent/context.go diff --git a/go.sum b/go.sum index 8fa2021316b..2daf22cc99c 100644 --- a/go.sum +++ b/go.sum @@ -542,6 +542,8 @@ github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= github.com/oklog/run v1.0.0 h1:Ru7dDtJNOyC66gQ5dQmaCa0qIsAUFY3sFpK1Xk8igrw= github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA= +github.com/olekukonko/tablewriter v0.0.5 h1:P2Ga83D34wi1o9J6Wh1mRuqd4mF/x/lgBS7N7AbDhec= +github.com/olekukonko/tablewriter v0.0.5/go.mod h1:hPp6KlRPjbx+hW8ykQs1w3UBbZlj6HuIJcUGPhkA7kY= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= github.com/opencontainers/image-spec v1.0.3-0.20211202183452-c5a74bcca799 h1:rc3tiVYb5z54aKaDfakKn0dDjIyPpTtszkjuMzyt7ec= diff --git a/pkg/database/ent/alert.go b/pkg/database/ent/alert.go index 2649923bf5e..5cb4d1a352c 100644 --- a/pkg/database/ent/alert.go +++ b/pkg/database/ent/alert.go @@ -7,6 +7,7 @@ import ( "strings" "time" + "entgo.io/ent" "entgo.io/ent/dialect/sql" "github.com/crowdsecurity/crowdsec/pkg/database/ent/alert" "github.com/crowdsecurity/crowdsec/pkg/database/ent/machine" @@ -67,6 +68,7 @@ type Alert struct { // The values are being populated by the AlertQuery when eager-loading is set. Edges AlertEdges `json:"edges"` machine_alerts *int + selectValues sql.SelectValues } // AlertEdges holds the relations/edges for other nodes in the graph. @@ -142,7 +144,7 @@ func (*Alert) scanValues(columns []string) ([]any, error) { case alert.ForeignKeys[0]: // machine_alerts values[i] = new(sql.NullInt64) default: - return nil, fmt.Errorf("unexpected column %q for type Alert", columns[i]) + values[i] = new(sql.UnknownType) } } return values, nil @@ -309,36 +311,44 @@ func (a *Alert) assignValues(columns []string, values []any) error { a.machine_alerts = new(int) *a.machine_alerts = int(value.Int64) } + default: + a.selectValues.Set(columns[i], values[i]) } } return nil } +// Value returns the ent.Value that was dynamically selected and assigned to the Alert. +// This includes values selected through modifiers, order, etc. +func (a *Alert) Value(name string) (ent.Value, error) { + return a.selectValues.Get(name) +} + // QueryOwner queries the "owner" edge of the Alert entity. func (a *Alert) QueryOwner() *MachineQuery { - return (&AlertClient{config: a.config}).QueryOwner(a) + return NewAlertClient(a.config).QueryOwner(a) } // QueryDecisions queries the "decisions" edge of the Alert entity. func (a *Alert) QueryDecisions() *DecisionQuery { - return (&AlertClient{config: a.config}).QueryDecisions(a) + return NewAlertClient(a.config).QueryDecisions(a) } // QueryEvents queries the "events" edge of the Alert entity. func (a *Alert) QueryEvents() *EventQuery { - return (&AlertClient{config: a.config}).QueryEvents(a) + return NewAlertClient(a.config).QueryEvents(a) } // QueryMetas queries the "metas" edge of the Alert entity. func (a *Alert) QueryMetas() *MetaQuery { - return (&AlertClient{config: a.config}).QueryMetas(a) + return NewAlertClient(a.config).QueryMetas(a) } // Update returns a builder for updating this Alert. // Note that you need to call Alert.Unwrap() before calling this method if this Alert // was returned from a transaction, and the transaction was committed or rolled back. func (a *Alert) Update() *AlertUpdateOne { - return (&AlertClient{config: a.config}).UpdateOne(a) + return NewAlertClient(a.config).UpdateOne(a) } // Unwrap unwraps the Alert entity that was returned from a transaction after it was closed, @@ -435,9 +445,3 @@ func (a *Alert) String() string { // Alerts is a parsable slice of Alert. type Alerts []*Alert - -func (a Alerts) config(cfg config) { - for _i := range a { - a[_i].config = cfg - } -} diff --git a/pkg/database/ent/alert/alert.go b/pkg/database/ent/alert/alert.go index abee13fb97a..eb9f1d10788 100644 --- a/pkg/database/ent/alert/alert.go +++ b/pkg/database/ent/alert/alert.go @@ -4,6 +4,9 @@ package alert import ( "time" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" ) const ( @@ -168,3 +171,203 @@ var ( // DefaultSimulated holds the default value on creation for the "simulated" field. DefaultSimulated bool ) + +// OrderOption defines the ordering options for the Alert queries. +type OrderOption func(*sql.Selector) + +// ByID orders the results by the id field. +func ByID(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldID, opts...).ToFunc() +} + +// ByCreatedAt orders the results by the created_at field. +func ByCreatedAt(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldCreatedAt, opts...).ToFunc() +} + +// ByUpdatedAt orders the results by the updated_at field. +func ByUpdatedAt(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldUpdatedAt, opts...).ToFunc() +} + +// ByScenario orders the results by the scenario field. +func ByScenario(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldScenario, opts...).ToFunc() +} + +// ByBucketId orders the results by the bucketId field. +func ByBucketId(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldBucketId, opts...).ToFunc() +} + +// ByMessage orders the results by the message field. +func ByMessage(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldMessage, opts...).ToFunc() +} + +// ByEventsCountField orders the results by the eventsCount field. +func ByEventsCountField(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldEventsCount, opts...).ToFunc() +} + +// ByStartedAt orders the results by the startedAt field. +func ByStartedAt(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldStartedAt, opts...).ToFunc() +} + +// ByStoppedAt orders the results by the stoppedAt field. +func ByStoppedAt(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldStoppedAt, opts...).ToFunc() +} + +// BySourceIp orders the results by the sourceIp field. +func BySourceIp(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldSourceIp, opts...).ToFunc() +} + +// BySourceRange orders the results by the sourceRange field. +func BySourceRange(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldSourceRange, opts...).ToFunc() +} + +// BySourceAsNumber orders the results by the sourceAsNumber field. +func BySourceAsNumber(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldSourceAsNumber, opts...).ToFunc() +} + +// BySourceAsName orders the results by the sourceAsName field. +func BySourceAsName(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldSourceAsName, opts...).ToFunc() +} + +// BySourceCountry orders the results by the sourceCountry field. +func BySourceCountry(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldSourceCountry, opts...).ToFunc() +} + +// BySourceLatitude orders the results by the sourceLatitude field. +func BySourceLatitude(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldSourceLatitude, opts...).ToFunc() +} + +// BySourceLongitude orders the results by the sourceLongitude field. +func BySourceLongitude(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldSourceLongitude, opts...).ToFunc() +} + +// BySourceScope orders the results by the sourceScope field. +func BySourceScope(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldSourceScope, opts...).ToFunc() +} + +// BySourceValue orders the results by the sourceValue field. +func BySourceValue(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldSourceValue, opts...).ToFunc() +} + +// ByCapacity orders the results by the capacity field. +func ByCapacity(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldCapacity, opts...).ToFunc() +} + +// ByLeakSpeed orders the results by the leakSpeed field. +func ByLeakSpeed(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldLeakSpeed, opts...).ToFunc() +} + +// ByScenarioVersion orders the results by the scenarioVersion field. +func ByScenarioVersion(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldScenarioVersion, opts...).ToFunc() +} + +// ByScenarioHash orders the results by the scenarioHash field. +func ByScenarioHash(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldScenarioHash, opts...).ToFunc() +} + +// BySimulated orders the results by the simulated field. +func BySimulated(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldSimulated, opts...).ToFunc() +} + +// ByUUID orders the results by the uuid field. +func ByUUID(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldUUID, opts...).ToFunc() +} + +// ByOwnerField orders the results by owner field. +func ByOwnerField(field string, opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newOwnerStep(), sql.OrderByField(field, opts...)) + } +} + +// ByDecisionsCount orders the results by decisions count. +func ByDecisionsCount(opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborsCount(s, newDecisionsStep(), opts...) + } +} + +// ByDecisions orders the results by decisions terms. +func ByDecisions(term sql.OrderTerm, terms ...sql.OrderTerm) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newDecisionsStep(), append([]sql.OrderTerm{term}, terms...)...) + } +} + +// ByEventsCount orders the results by events count. +func ByEventsCount(opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborsCount(s, newEventsStep(), opts...) + } +} + +// ByEvents orders the results by events terms. +func ByEvents(term sql.OrderTerm, terms ...sql.OrderTerm) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newEventsStep(), append([]sql.OrderTerm{term}, terms...)...) + } +} + +// ByMetasCount orders the results by metas count. +func ByMetasCount(opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborsCount(s, newMetasStep(), opts...) + } +} + +// ByMetas orders the results by metas terms. +func ByMetas(term sql.OrderTerm, terms ...sql.OrderTerm) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newMetasStep(), append([]sql.OrderTerm{term}, terms...)...) + } +} +func newOwnerStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(OwnerInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.M2O, true, OwnerTable, OwnerColumn), + ) +} +func newDecisionsStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(DecisionsInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.O2M, false, DecisionsTable, DecisionsColumn), + ) +} +func newEventsStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(EventsInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.O2M, false, EventsTable, EventsColumn), + ) +} +func newMetasStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(MetasInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.O2M, false, MetasTable, MetasColumn), + ) +} diff --git a/pkg/database/ent/alert/where.go b/pkg/database/ent/alert/where.go index ef5b89b615f..516ead50636 100644 --- a/pkg/database/ent/alert/where.go +++ b/pkg/database/ent/alert/where.go @@ -12,2440 +12,1612 @@ import ( // ID filters vertices based on their ID field. func ID(id int) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldID), id)) - }) + return predicate.Alert(sql.FieldEQ(FieldID, id)) } // IDEQ applies the EQ predicate on the ID field. func IDEQ(id int) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldID), id)) - }) + return predicate.Alert(sql.FieldEQ(FieldID, id)) } // IDNEQ applies the NEQ predicate on the ID field. func IDNEQ(id int) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldID), id)) - }) + return predicate.Alert(sql.FieldNEQ(FieldID, id)) } // IDIn applies the In predicate on the ID field. func IDIn(ids ...int) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - v := make([]any, len(ids)) - for i := range v { - v[i] = ids[i] - } - s.Where(sql.In(s.C(FieldID), v...)) - }) + return predicate.Alert(sql.FieldIn(FieldID, ids...)) } // IDNotIn applies the NotIn predicate on the ID field. func IDNotIn(ids ...int) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - v := make([]any, len(ids)) - for i := range v { - v[i] = ids[i] - } - s.Where(sql.NotIn(s.C(FieldID), v...)) - }) + return predicate.Alert(sql.FieldNotIn(FieldID, ids...)) } // IDGT applies the GT predicate on the ID field. func IDGT(id int) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.GT(s.C(FieldID), id)) - }) + return predicate.Alert(sql.FieldGT(FieldID, id)) } // IDGTE applies the GTE predicate on the ID field. func IDGTE(id int) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.GTE(s.C(FieldID), id)) - }) + return predicate.Alert(sql.FieldGTE(FieldID, id)) } // IDLT applies the LT predicate on the ID field. func IDLT(id int) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.LT(s.C(FieldID), id)) - }) + return predicate.Alert(sql.FieldLT(FieldID, id)) } // IDLTE applies the LTE predicate on the ID field. func IDLTE(id int) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.LTE(s.C(FieldID), id)) - }) + return predicate.Alert(sql.FieldLTE(FieldID, id)) } // CreatedAt applies equality check predicate on the "created_at" field. It's identical to CreatedAtEQ. func CreatedAt(v time.Time) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldCreatedAt), v)) - }) + return predicate.Alert(sql.FieldEQ(FieldCreatedAt, v)) } // UpdatedAt applies equality check predicate on the "updated_at" field. It's identical to UpdatedAtEQ. func UpdatedAt(v time.Time) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldUpdatedAt), v)) - }) + return predicate.Alert(sql.FieldEQ(FieldUpdatedAt, v)) } // Scenario applies equality check predicate on the "scenario" field. It's identical to ScenarioEQ. func Scenario(v string) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldScenario), v)) - }) + return predicate.Alert(sql.FieldEQ(FieldScenario, v)) } // BucketId applies equality check predicate on the "bucketId" field. It's identical to BucketIdEQ. func BucketId(v string) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldBucketId), v)) - }) + return predicate.Alert(sql.FieldEQ(FieldBucketId, v)) } // Message applies equality check predicate on the "message" field. It's identical to MessageEQ. func Message(v string) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldMessage), v)) - }) + return predicate.Alert(sql.FieldEQ(FieldMessage, v)) } // EventsCount applies equality check predicate on the "eventsCount" field. It's identical to EventsCountEQ. func EventsCount(v int32) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldEventsCount), v)) - }) + return predicate.Alert(sql.FieldEQ(FieldEventsCount, v)) } // StartedAt applies equality check predicate on the "startedAt" field. It's identical to StartedAtEQ. func StartedAt(v time.Time) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldStartedAt), v)) - }) + return predicate.Alert(sql.FieldEQ(FieldStartedAt, v)) } // StoppedAt applies equality check predicate on the "stoppedAt" field. It's identical to StoppedAtEQ. func StoppedAt(v time.Time) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldStoppedAt), v)) - }) + return predicate.Alert(sql.FieldEQ(FieldStoppedAt, v)) } // SourceIp applies equality check predicate on the "sourceIp" field. It's identical to SourceIpEQ. func SourceIp(v string) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldSourceIp), v)) - }) + return predicate.Alert(sql.FieldEQ(FieldSourceIp, v)) } // SourceRange applies equality check predicate on the "sourceRange" field. It's identical to SourceRangeEQ. func SourceRange(v string) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldSourceRange), v)) - }) + return predicate.Alert(sql.FieldEQ(FieldSourceRange, v)) } // SourceAsNumber applies equality check predicate on the "sourceAsNumber" field. It's identical to SourceAsNumberEQ. func SourceAsNumber(v string) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldSourceAsNumber), v)) - }) + return predicate.Alert(sql.FieldEQ(FieldSourceAsNumber, v)) } // SourceAsName applies equality check predicate on the "sourceAsName" field. It's identical to SourceAsNameEQ. func SourceAsName(v string) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldSourceAsName), v)) - }) + return predicate.Alert(sql.FieldEQ(FieldSourceAsName, v)) } // SourceCountry applies equality check predicate on the "sourceCountry" field. It's identical to SourceCountryEQ. func SourceCountry(v string) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldSourceCountry), v)) - }) + return predicate.Alert(sql.FieldEQ(FieldSourceCountry, v)) } // SourceLatitude applies equality check predicate on the "sourceLatitude" field. It's identical to SourceLatitudeEQ. func SourceLatitude(v float32) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldSourceLatitude), v)) - }) + return predicate.Alert(sql.FieldEQ(FieldSourceLatitude, v)) } // SourceLongitude applies equality check predicate on the "sourceLongitude" field. It's identical to SourceLongitudeEQ. func SourceLongitude(v float32) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldSourceLongitude), v)) - }) + return predicate.Alert(sql.FieldEQ(FieldSourceLongitude, v)) } // SourceScope applies equality check predicate on the "sourceScope" field. It's identical to SourceScopeEQ. func SourceScope(v string) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldSourceScope), v)) - }) + return predicate.Alert(sql.FieldEQ(FieldSourceScope, v)) } // SourceValue applies equality check predicate on the "sourceValue" field. It's identical to SourceValueEQ. func SourceValue(v string) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldSourceValue), v)) - }) + return predicate.Alert(sql.FieldEQ(FieldSourceValue, v)) } // Capacity applies equality check predicate on the "capacity" field. It's identical to CapacityEQ. func Capacity(v int32) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldCapacity), v)) - }) + return predicate.Alert(sql.FieldEQ(FieldCapacity, v)) } // LeakSpeed applies equality check predicate on the "leakSpeed" field. It's identical to LeakSpeedEQ. func LeakSpeed(v string) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldLeakSpeed), v)) - }) + return predicate.Alert(sql.FieldEQ(FieldLeakSpeed, v)) } // ScenarioVersion applies equality check predicate on the "scenarioVersion" field. It's identical to ScenarioVersionEQ. func ScenarioVersion(v string) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldScenarioVersion), v)) - }) + return predicate.Alert(sql.FieldEQ(FieldScenarioVersion, v)) } // ScenarioHash applies equality check predicate on the "scenarioHash" field. It's identical to ScenarioHashEQ. func ScenarioHash(v string) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldScenarioHash), v)) - }) + return predicate.Alert(sql.FieldEQ(FieldScenarioHash, v)) } // Simulated applies equality check predicate on the "simulated" field. It's identical to SimulatedEQ. func Simulated(v bool) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldSimulated), v)) - }) + return predicate.Alert(sql.FieldEQ(FieldSimulated, v)) } // UUID applies equality check predicate on the "uuid" field. It's identical to UUIDEQ. func UUID(v string) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldUUID), v)) - }) + return predicate.Alert(sql.FieldEQ(FieldUUID, v)) } // CreatedAtEQ applies the EQ predicate on the "created_at" field. func CreatedAtEQ(v time.Time) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldCreatedAt), v)) - }) + return predicate.Alert(sql.FieldEQ(FieldCreatedAt, v)) } // CreatedAtNEQ applies the NEQ predicate on the "created_at" field. func CreatedAtNEQ(v time.Time) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldCreatedAt), v)) - }) + return predicate.Alert(sql.FieldNEQ(FieldCreatedAt, v)) } // CreatedAtIn applies the In predicate on the "created_at" field. func CreatedAtIn(vs ...time.Time) predicate.Alert { - v := make([]any, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.In(s.C(FieldCreatedAt), v...)) - }) + return predicate.Alert(sql.FieldIn(FieldCreatedAt, vs...)) } // CreatedAtNotIn applies the NotIn predicate on the "created_at" field. func CreatedAtNotIn(vs ...time.Time) predicate.Alert { - v := make([]any, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.NotIn(s.C(FieldCreatedAt), v...)) - }) + return predicate.Alert(sql.FieldNotIn(FieldCreatedAt, vs...)) } // CreatedAtGT applies the GT predicate on the "created_at" field. func CreatedAtGT(v time.Time) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.GT(s.C(FieldCreatedAt), v)) - }) + return predicate.Alert(sql.FieldGT(FieldCreatedAt, v)) } // CreatedAtGTE applies the GTE predicate on the "created_at" field. func CreatedAtGTE(v time.Time) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.GTE(s.C(FieldCreatedAt), v)) - }) + return predicate.Alert(sql.FieldGTE(FieldCreatedAt, v)) } // CreatedAtLT applies the LT predicate on the "created_at" field. func CreatedAtLT(v time.Time) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.LT(s.C(FieldCreatedAt), v)) - }) + return predicate.Alert(sql.FieldLT(FieldCreatedAt, v)) } // CreatedAtLTE applies the LTE predicate on the "created_at" field. func CreatedAtLTE(v time.Time) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.LTE(s.C(FieldCreatedAt), v)) - }) + return predicate.Alert(sql.FieldLTE(FieldCreatedAt, v)) } // CreatedAtIsNil applies the IsNil predicate on the "created_at" field. func CreatedAtIsNil() predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.IsNull(s.C(FieldCreatedAt))) - }) + return predicate.Alert(sql.FieldIsNull(FieldCreatedAt)) } // CreatedAtNotNil applies the NotNil predicate on the "created_at" field. func CreatedAtNotNil() predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.NotNull(s.C(FieldCreatedAt))) - }) + return predicate.Alert(sql.FieldNotNull(FieldCreatedAt)) } // UpdatedAtEQ applies the EQ predicate on the "updated_at" field. func UpdatedAtEQ(v time.Time) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldUpdatedAt), v)) - }) + return predicate.Alert(sql.FieldEQ(FieldUpdatedAt, v)) } // UpdatedAtNEQ applies the NEQ predicate on the "updated_at" field. func UpdatedAtNEQ(v time.Time) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldUpdatedAt), v)) - }) + return predicate.Alert(sql.FieldNEQ(FieldUpdatedAt, v)) } // UpdatedAtIn applies the In predicate on the "updated_at" field. func UpdatedAtIn(vs ...time.Time) predicate.Alert { - v := make([]any, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.In(s.C(FieldUpdatedAt), v...)) - }) + return predicate.Alert(sql.FieldIn(FieldUpdatedAt, vs...)) } // UpdatedAtNotIn applies the NotIn predicate on the "updated_at" field. func UpdatedAtNotIn(vs ...time.Time) predicate.Alert { - v := make([]any, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.NotIn(s.C(FieldUpdatedAt), v...)) - }) + return predicate.Alert(sql.FieldNotIn(FieldUpdatedAt, vs...)) } // UpdatedAtGT applies the GT predicate on the "updated_at" field. func UpdatedAtGT(v time.Time) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.GT(s.C(FieldUpdatedAt), v)) - }) + return predicate.Alert(sql.FieldGT(FieldUpdatedAt, v)) } // UpdatedAtGTE applies the GTE predicate on the "updated_at" field. func UpdatedAtGTE(v time.Time) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.GTE(s.C(FieldUpdatedAt), v)) - }) + return predicate.Alert(sql.FieldGTE(FieldUpdatedAt, v)) } // UpdatedAtLT applies the LT predicate on the "updated_at" field. func UpdatedAtLT(v time.Time) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.LT(s.C(FieldUpdatedAt), v)) - }) + return predicate.Alert(sql.FieldLT(FieldUpdatedAt, v)) } // UpdatedAtLTE applies the LTE predicate on the "updated_at" field. func UpdatedAtLTE(v time.Time) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.LTE(s.C(FieldUpdatedAt), v)) - }) + return predicate.Alert(sql.FieldLTE(FieldUpdatedAt, v)) } // UpdatedAtIsNil applies the IsNil predicate on the "updated_at" field. func UpdatedAtIsNil() predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.IsNull(s.C(FieldUpdatedAt))) - }) + return predicate.Alert(sql.FieldIsNull(FieldUpdatedAt)) } // UpdatedAtNotNil applies the NotNil predicate on the "updated_at" field. func UpdatedAtNotNil() predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.NotNull(s.C(FieldUpdatedAt))) - }) + return predicate.Alert(sql.FieldNotNull(FieldUpdatedAt)) } // ScenarioEQ applies the EQ predicate on the "scenario" field. func ScenarioEQ(v string) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldScenario), v)) - }) + return predicate.Alert(sql.FieldEQ(FieldScenario, v)) } // ScenarioNEQ applies the NEQ predicate on the "scenario" field. func ScenarioNEQ(v string) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldScenario), v)) - }) + return predicate.Alert(sql.FieldNEQ(FieldScenario, v)) } // ScenarioIn applies the In predicate on the "scenario" field. func ScenarioIn(vs ...string) predicate.Alert { - v := make([]any, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.In(s.C(FieldScenario), v...)) - }) + return predicate.Alert(sql.FieldIn(FieldScenario, vs...)) } // ScenarioNotIn applies the NotIn predicate on the "scenario" field. func ScenarioNotIn(vs ...string) predicate.Alert { - v := make([]any, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.NotIn(s.C(FieldScenario), v...)) - }) + return predicate.Alert(sql.FieldNotIn(FieldScenario, vs...)) } // ScenarioGT applies the GT predicate on the "scenario" field. func ScenarioGT(v string) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.GT(s.C(FieldScenario), v)) - }) + return predicate.Alert(sql.FieldGT(FieldScenario, v)) } // ScenarioGTE applies the GTE predicate on the "scenario" field. func ScenarioGTE(v string) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.GTE(s.C(FieldScenario), v)) - }) + return predicate.Alert(sql.FieldGTE(FieldScenario, v)) } // ScenarioLT applies the LT predicate on the "scenario" field. func ScenarioLT(v string) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.LT(s.C(FieldScenario), v)) - }) + return predicate.Alert(sql.FieldLT(FieldScenario, v)) } // ScenarioLTE applies the LTE predicate on the "scenario" field. func ScenarioLTE(v string) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.LTE(s.C(FieldScenario), v)) - }) + return predicate.Alert(sql.FieldLTE(FieldScenario, v)) } // ScenarioContains applies the Contains predicate on the "scenario" field. func ScenarioContains(v string) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.Contains(s.C(FieldScenario), v)) - }) + return predicate.Alert(sql.FieldContains(FieldScenario, v)) } // ScenarioHasPrefix applies the HasPrefix predicate on the "scenario" field. func ScenarioHasPrefix(v string) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.HasPrefix(s.C(FieldScenario), v)) - }) + return predicate.Alert(sql.FieldHasPrefix(FieldScenario, v)) } // ScenarioHasSuffix applies the HasSuffix predicate on the "scenario" field. func ScenarioHasSuffix(v string) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.HasSuffix(s.C(FieldScenario), v)) - }) + return predicate.Alert(sql.FieldHasSuffix(FieldScenario, v)) } // ScenarioEqualFold applies the EqualFold predicate on the "scenario" field. func ScenarioEqualFold(v string) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.EqualFold(s.C(FieldScenario), v)) - }) + return predicate.Alert(sql.FieldEqualFold(FieldScenario, v)) } // ScenarioContainsFold applies the ContainsFold predicate on the "scenario" field. func ScenarioContainsFold(v string) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.ContainsFold(s.C(FieldScenario), v)) - }) + return predicate.Alert(sql.FieldContainsFold(FieldScenario, v)) } // BucketIdEQ applies the EQ predicate on the "bucketId" field. func BucketIdEQ(v string) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldBucketId), v)) - }) + return predicate.Alert(sql.FieldEQ(FieldBucketId, v)) } // BucketIdNEQ applies the NEQ predicate on the "bucketId" field. func BucketIdNEQ(v string) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldBucketId), v)) - }) + return predicate.Alert(sql.FieldNEQ(FieldBucketId, v)) } // BucketIdIn applies the In predicate on the "bucketId" field. func BucketIdIn(vs ...string) predicate.Alert { - v := make([]any, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.In(s.C(FieldBucketId), v...)) - }) + return predicate.Alert(sql.FieldIn(FieldBucketId, vs...)) } // BucketIdNotIn applies the NotIn predicate on the "bucketId" field. func BucketIdNotIn(vs ...string) predicate.Alert { - v := make([]any, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.NotIn(s.C(FieldBucketId), v...)) - }) + return predicate.Alert(sql.FieldNotIn(FieldBucketId, vs...)) } // BucketIdGT applies the GT predicate on the "bucketId" field. func BucketIdGT(v string) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.GT(s.C(FieldBucketId), v)) - }) + return predicate.Alert(sql.FieldGT(FieldBucketId, v)) } // BucketIdGTE applies the GTE predicate on the "bucketId" field. func BucketIdGTE(v string) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.GTE(s.C(FieldBucketId), v)) - }) + return predicate.Alert(sql.FieldGTE(FieldBucketId, v)) } // BucketIdLT applies the LT predicate on the "bucketId" field. func BucketIdLT(v string) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.LT(s.C(FieldBucketId), v)) - }) + return predicate.Alert(sql.FieldLT(FieldBucketId, v)) } // BucketIdLTE applies the LTE predicate on the "bucketId" field. func BucketIdLTE(v string) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.LTE(s.C(FieldBucketId), v)) - }) + return predicate.Alert(sql.FieldLTE(FieldBucketId, v)) } // BucketIdContains applies the Contains predicate on the "bucketId" field. func BucketIdContains(v string) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.Contains(s.C(FieldBucketId), v)) - }) + return predicate.Alert(sql.FieldContains(FieldBucketId, v)) } // BucketIdHasPrefix applies the HasPrefix predicate on the "bucketId" field. func BucketIdHasPrefix(v string) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.HasPrefix(s.C(FieldBucketId), v)) - }) + return predicate.Alert(sql.FieldHasPrefix(FieldBucketId, v)) } // BucketIdHasSuffix applies the HasSuffix predicate on the "bucketId" field. func BucketIdHasSuffix(v string) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.HasSuffix(s.C(FieldBucketId), v)) - }) + return predicate.Alert(sql.FieldHasSuffix(FieldBucketId, v)) } // BucketIdIsNil applies the IsNil predicate on the "bucketId" field. func BucketIdIsNil() predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.IsNull(s.C(FieldBucketId))) - }) + return predicate.Alert(sql.FieldIsNull(FieldBucketId)) } // BucketIdNotNil applies the NotNil predicate on the "bucketId" field. func BucketIdNotNil() predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.NotNull(s.C(FieldBucketId))) - }) + return predicate.Alert(sql.FieldNotNull(FieldBucketId)) } // BucketIdEqualFold applies the EqualFold predicate on the "bucketId" field. func BucketIdEqualFold(v string) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.EqualFold(s.C(FieldBucketId), v)) - }) + return predicate.Alert(sql.FieldEqualFold(FieldBucketId, v)) } // BucketIdContainsFold applies the ContainsFold predicate on the "bucketId" field. func BucketIdContainsFold(v string) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.ContainsFold(s.C(FieldBucketId), v)) - }) + return predicate.Alert(sql.FieldContainsFold(FieldBucketId, v)) } // MessageEQ applies the EQ predicate on the "message" field. func MessageEQ(v string) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldMessage), v)) - }) + return predicate.Alert(sql.FieldEQ(FieldMessage, v)) } // MessageNEQ applies the NEQ predicate on the "message" field. func MessageNEQ(v string) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldMessage), v)) - }) + return predicate.Alert(sql.FieldNEQ(FieldMessage, v)) } // MessageIn applies the In predicate on the "message" field. func MessageIn(vs ...string) predicate.Alert { - v := make([]any, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.In(s.C(FieldMessage), v...)) - }) + return predicate.Alert(sql.FieldIn(FieldMessage, vs...)) } // MessageNotIn applies the NotIn predicate on the "message" field. func MessageNotIn(vs ...string) predicate.Alert { - v := make([]any, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.NotIn(s.C(FieldMessage), v...)) - }) + return predicate.Alert(sql.FieldNotIn(FieldMessage, vs...)) } // MessageGT applies the GT predicate on the "message" field. func MessageGT(v string) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.GT(s.C(FieldMessage), v)) - }) + return predicate.Alert(sql.FieldGT(FieldMessage, v)) } // MessageGTE applies the GTE predicate on the "message" field. func MessageGTE(v string) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.GTE(s.C(FieldMessage), v)) - }) + return predicate.Alert(sql.FieldGTE(FieldMessage, v)) } // MessageLT applies the LT predicate on the "message" field. func MessageLT(v string) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.LT(s.C(FieldMessage), v)) - }) + return predicate.Alert(sql.FieldLT(FieldMessage, v)) } // MessageLTE applies the LTE predicate on the "message" field. func MessageLTE(v string) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.LTE(s.C(FieldMessage), v)) - }) + return predicate.Alert(sql.FieldLTE(FieldMessage, v)) } // MessageContains applies the Contains predicate on the "message" field. func MessageContains(v string) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.Contains(s.C(FieldMessage), v)) - }) + return predicate.Alert(sql.FieldContains(FieldMessage, v)) } // MessageHasPrefix applies the HasPrefix predicate on the "message" field. func MessageHasPrefix(v string) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.HasPrefix(s.C(FieldMessage), v)) - }) + return predicate.Alert(sql.FieldHasPrefix(FieldMessage, v)) } // MessageHasSuffix applies the HasSuffix predicate on the "message" field. func MessageHasSuffix(v string) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.HasSuffix(s.C(FieldMessage), v)) - }) + return predicate.Alert(sql.FieldHasSuffix(FieldMessage, v)) } // MessageIsNil applies the IsNil predicate on the "message" field. func MessageIsNil() predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.IsNull(s.C(FieldMessage))) - }) + return predicate.Alert(sql.FieldIsNull(FieldMessage)) } // MessageNotNil applies the NotNil predicate on the "message" field. func MessageNotNil() predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.NotNull(s.C(FieldMessage))) - }) + return predicate.Alert(sql.FieldNotNull(FieldMessage)) } // MessageEqualFold applies the EqualFold predicate on the "message" field. func MessageEqualFold(v string) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.EqualFold(s.C(FieldMessage), v)) - }) + return predicate.Alert(sql.FieldEqualFold(FieldMessage, v)) } // MessageContainsFold applies the ContainsFold predicate on the "message" field. func MessageContainsFold(v string) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.ContainsFold(s.C(FieldMessage), v)) - }) + return predicate.Alert(sql.FieldContainsFold(FieldMessage, v)) } // EventsCountEQ applies the EQ predicate on the "eventsCount" field. func EventsCountEQ(v int32) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldEventsCount), v)) - }) + return predicate.Alert(sql.FieldEQ(FieldEventsCount, v)) } // EventsCountNEQ applies the NEQ predicate on the "eventsCount" field. func EventsCountNEQ(v int32) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldEventsCount), v)) - }) + return predicate.Alert(sql.FieldNEQ(FieldEventsCount, v)) } // EventsCountIn applies the In predicate on the "eventsCount" field. func EventsCountIn(vs ...int32) predicate.Alert { - v := make([]any, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.In(s.C(FieldEventsCount), v...)) - }) + return predicate.Alert(sql.FieldIn(FieldEventsCount, vs...)) } // EventsCountNotIn applies the NotIn predicate on the "eventsCount" field. func EventsCountNotIn(vs ...int32) predicate.Alert { - v := make([]any, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.NotIn(s.C(FieldEventsCount), v...)) - }) + return predicate.Alert(sql.FieldNotIn(FieldEventsCount, vs...)) } // EventsCountGT applies the GT predicate on the "eventsCount" field. func EventsCountGT(v int32) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.GT(s.C(FieldEventsCount), v)) - }) + return predicate.Alert(sql.FieldGT(FieldEventsCount, v)) } // EventsCountGTE applies the GTE predicate on the "eventsCount" field. func EventsCountGTE(v int32) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.GTE(s.C(FieldEventsCount), v)) - }) + return predicate.Alert(sql.FieldGTE(FieldEventsCount, v)) } // EventsCountLT applies the LT predicate on the "eventsCount" field. func EventsCountLT(v int32) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.LT(s.C(FieldEventsCount), v)) - }) + return predicate.Alert(sql.FieldLT(FieldEventsCount, v)) } // EventsCountLTE applies the LTE predicate on the "eventsCount" field. func EventsCountLTE(v int32) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.LTE(s.C(FieldEventsCount), v)) - }) + return predicate.Alert(sql.FieldLTE(FieldEventsCount, v)) } // EventsCountIsNil applies the IsNil predicate on the "eventsCount" field. func EventsCountIsNil() predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.IsNull(s.C(FieldEventsCount))) - }) + return predicate.Alert(sql.FieldIsNull(FieldEventsCount)) } // EventsCountNotNil applies the NotNil predicate on the "eventsCount" field. func EventsCountNotNil() predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.NotNull(s.C(FieldEventsCount))) - }) + return predicate.Alert(sql.FieldNotNull(FieldEventsCount)) } // StartedAtEQ applies the EQ predicate on the "startedAt" field. func StartedAtEQ(v time.Time) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldStartedAt), v)) - }) + return predicate.Alert(sql.FieldEQ(FieldStartedAt, v)) } // StartedAtNEQ applies the NEQ predicate on the "startedAt" field. func StartedAtNEQ(v time.Time) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldStartedAt), v)) - }) + return predicate.Alert(sql.FieldNEQ(FieldStartedAt, v)) } // StartedAtIn applies the In predicate on the "startedAt" field. func StartedAtIn(vs ...time.Time) predicate.Alert { - v := make([]any, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.In(s.C(FieldStartedAt), v...)) - }) + return predicate.Alert(sql.FieldIn(FieldStartedAt, vs...)) } // StartedAtNotIn applies the NotIn predicate on the "startedAt" field. func StartedAtNotIn(vs ...time.Time) predicate.Alert { - v := make([]any, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.NotIn(s.C(FieldStartedAt), v...)) - }) + return predicate.Alert(sql.FieldNotIn(FieldStartedAt, vs...)) } // StartedAtGT applies the GT predicate on the "startedAt" field. func StartedAtGT(v time.Time) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.GT(s.C(FieldStartedAt), v)) - }) + return predicate.Alert(sql.FieldGT(FieldStartedAt, v)) } // StartedAtGTE applies the GTE predicate on the "startedAt" field. func StartedAtGTE(v time.Time) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.GTE(s.C(FieldStartedAt), v)) - }) + return predicate.Alert(sql.FieldGTE(FieldStartedAt, v)) } // StartedAtLT applies the LT predicate on the "startedAt" field. func StartedAtLT(v time.Time) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.LT(s.C(FieldStartedAt), v)) - }) + return predicate.Alert(sql.FieldLT(FieldStartedAt, v)) } // StartedAtLTE applies the LTE predicate on the "startedAt" field. func StartedAtLTE(v time.Time) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.LTE(s.C(FieldStartedAt), v)) - }) + return predicate.Alert(sql.FieldLTE(FieldStartedAt, v)) } // StartedAtIsNil applies the IsNil predicate on the "startedAt" field. func StartedAtIsNil() predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.IsNull(s.C(FieldStartedAt))) - }) + return predicate.Alert(sql.FieldIsNull(FieldStartedAt)) } // StartedAtNotNil applies the NotNil predicate on the "startedAt" field. func StartedAtNotNil() predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.NotNull(s.C(FieldStartedAt))) - }) + return predicate.Alert(sql.FieldNotNull(FieldStartedAt)) } // StoppedAtEQ applies the EQ predicate on the "stoppedAt" field. func StoppedAtEQ(v time.Time) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldStoppedAt), v)) - }) + return predicate.Alert(sql.FieldEQ(FieldStoppedAt, v)) } // StoppedAtNEQ applies the NEQ predicate on the "stoppedAt" field. func StoppedAtNEQ(v time.Time) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldStoppedAt), v)) - }) + return predicate.Alert(sql.FieldNEQ(FieldStoppedAt, v)) } // StoppedAtIn applies the In predicate on the "stoppedAt" field. func StoppedAtIn(vs ...time.Time) predicate.Alert { - v := make([]any, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.In(s.C(FieldStoppedAt), v...)) - }) + return predicate.Alert(sql.FieldIn(FieldStoppedAt, vs...)) } // StoppedAtNotIn applies the NotIn predicate on the "stoppedAt" field. func StoppedAtNotIn(vs ...time.Time) predicate.Alert { - v := make([]any, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.NotIn(s.C(FieldStoppedAt), v...)) - }) + return predicate.Alert(sql.FieldNotIn(FieldStoppedAt, vs...)) } // StoppedAtGT applies the GT predicate on the "stoppedAt" field. func StoppedAtGT(v time.Time) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.GT(s.C(FieldStoppedAt), v)) - }) + return predicate.Alert(sql.FieldGT(FieldStoppedAt, v)) } // StoppedAtGTE applies the GTE predicate on the "stoppedAt" field. func StoppedAtGTE(v time.Time) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.GTE(s.C(FieldStoppedAt), v)) - }) + return predicate.Alert(sql.FieldGTE(FieldStoppedAt, v)) } // StoppedAtLT applies the LT predicate on the "stoppedAt" field. func StoppedAtLT(v time.Time) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.LT(s.C(FieldStoppedAt), v)) - }) + return predicate.Alert(sql.FieldLT(FieldStoppedAt, v)) } // StoppedAtLTE applies the LTE predicate on the "stoppedAt" field. func StoppedAtLTE(v time.Time) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.LTE(s.C(FieldStoppedAt), v)) - }) + return predicate.Alert(sql.FieldLTE(FieldStoppedAt, v)) } // StoppedAtIsNil applies the IsNil predicate on the "stoppedAt" field. func StoppedAtIsNil() predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.IsNull(s.C(FieldStoppedAt))) - }) + return predicate.Alert(sql.FieldIsNull(FieldStoppedAt)) } // StoppedAtNotNil applies the NotNil predicate on the "stoppedAt" field. func StoppedAtNotNil() predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.NotNull(s.C(FieldStoppedAt))) - }) + return predicate.Alert(sql.FieldNotNull(FieldStoppedAt)) } // SourceIpEQ applies the EQ predicate on the "sourceIp" field. func SourceIpEQ(v string) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldSourceIp), v)) - }) + return predicate.Alert(sql.FieldEQ(FieldSourceIp, v)) } // SourceIpNEQ applies the NEQ predicate on the "sourceIp" field. func SourceIpNEQ(v string) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldSourceIp), v)) - }) + return predicate.Alert(sql.FieldNEQ(FieldSourceIp, v)) } // SourceIpIn applies the In predicate on the "sourceIp" field. func SourceIpIn(vs ...string) predicate.Alert { - v := make([]any, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.In(s.C(FieldSourceIp), v...)) - }) + return predicate.Alert(sql.FieldIn(FieldSourceIp, vs...)) } // SourceIpNotIn applies the NotIn predicate on the "sourceIp" field. func SourceIpNotIn(vs ...string) predicate.Alert { - v := make([]any, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.NotIn(s.C(FieldSourceIp), v...)) - }) + return predicate.Alert(sql.FieldNotIn(FieldSourceIp, vs...)) } // SourceIpGT applies the GT predicate on the "sourceIp" field. func SourceIpGT(v string) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.GT(s.C(FieldSourceIp), v)) - }) + return predicate.Alert(sql.FieldGT(FieldSourceIp, v)) } // SourceIpGTE applies the GTE predicate on the "sourceIp" field. func SourceIpGTE(v string) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.GTE(s.C(FieldSourceIp), v)) - }) + return predicate.Alert(sql.FieldGTE(FieldSourceIp, v)) } // SourceIpLT applies the LT predicate on the "sourceIp" field. func SourceIpLT(v string) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.LT(s.C(FieldSourceIp), v)) - }) + return predicate.Alert(sql.FieldLT(FieldSourceIp, v)) } // SourceIpLTE applies the LTE predicate on the "sourceIp" field. func SourceIpLTE(v string) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.LTE(s.C(FieldSourceIp), v)) - }) + return predicate.Alert(sql.FieldLTE(FieldSourceIp, v)) } // SourceIpContains applies the Contains predicate on the "sourceIp" field. func SourceIpContains(v string) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.Contains(s.C(FieldSourceIp), v)) - }) + return predicate.Alert(sql.FieldContains(FieldSourceIp, v)) } // SourceIpHasPrefix applies the HasPrefix predicate on the "sourceIp" field. func SourceIpHasPrefix(v string) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.HasPrefix(s.C(FieldSourceIp), v)) - }) + return predicate.Alert(sql.FieldHasPrefix(FieldSourceIp, v)) } // SourceIpHasSuffix applies the HasSuffix predicate on the "sourceIp" field. func SourceIpHasSuffix(v string) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.HasSuffix(s.C(FieldSourceIp), v)) - }) + return predicate.Alert(sql.FieldHasSuffix(FieldSourceIp, v)) } // SourceIpIsNil applies the IsNil predicate on the "sourceIp" field. func SourceIpIsNil() predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.IsNull(s.C(FieldSourceIp))) - }) + return predicate.Alert(sql.FieldIsNull(FieldSourceIp)) } // SourceIpNotNil applies the NotNil predicate on the "sourceIp" field. func SourceIpNotNil() predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.NotNull(s.C(FieldSourceIp))) - }) + return predicate.Alert(sql.FieldNotNull(FieldSourceIp)) } // SourceIpEqualFold applies the EqualFold predicate on the "sourceIp" field. func SourceIpEqualFold(v string) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.EqualFold(s.C(FieldSourceIp), v)) - }) + return predicate.Alert(sql.FieldEqualFold(FieldSourceIp, v)) } // SourceIpContainsFold applies the ContainsFold predicate on the "sourceIp" field. func SourceIpContainsFold(v string) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.ContainsFold(s.C(FieldSourceIp), v)) - }) + return predicate.Alert(sql.FieldContainsFold(FieldSourceIp, v)) } // SourceRangeEQ applies the EQ predicate on the "sourceRange" field. func SourceRangeEQ(v string) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldSourceRange), v)) - }) + return predicate.Alert(sql.FieldEQ(FieldSourceRange, v)) } // SourceRangeNEQ applies the NEQ predicate on the "sourceRange" field. func SourceRangeNEQ(v string) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldSourceRange), v)) - }) + return predicate.Alert(sql.FieldNEQ(FieldSourceRange, v)) } // SourceRangeIn applies the In predicate on the "sourceRange" field. func SourceRangeIn(vs ...string) predicate.Alert { - v := make([]any, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.In(s.C(FieldSourceRange), v...)) - }) + return predicate.Alert(sql.FieldIn(FieldSourceRange, vs...)) } // SourceRangeNotIn applies the NotIn predicate on the "sourceRange" field. func SourceRangeNotIn(vs ...string) predicate.Alert { - v := make([]any, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.NotIn(s.C(FieldSourceRange), v...)) - }) + return predicate.Alert(sql.FieldNotIn(FieldSourceRange, vs...)) } // SourceRangeGT applies the GT predicate on the "sourceRange" field. func SourceRangeGT(v string) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.GT(s.C(FieldSourceRange), v)) - }) + return predicate.Alert(sql.FieldGT(FieldSourceRange, v)) } // SourceRangeGTE applies the GTE predicate on the "sourceRange" field. func SourceRangeGTE(v string) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.GTE(s.C(FieldSourceRange), v)) - }) + return predicate.Alert(sql.FieldGTE(FieldSourceRange, v)) } // SourceRangeLT applies the LT predicate on the "sourceRange" field. func SourceRangeLT(v string) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.LT(s.C(FieldSourceRange), v)) - }) + return predicate.Alert(sql.FieldLT(FieldSourceRange, v)) } // SourceRangeLTE applies the LTE predicate on the "sourceRange" field. func SourceRangeLTE(v string) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.LTE(s.C(FieldSourceRange), v)) - }) + return predicate.Alert(sql.FieldLTE(FieldSourceRange, v)) } // SourceRangeContains applies the Contains predicate on the "sourceRange" field. func SourceRangeContains(v string) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.Contains(s.C(FieldSourceRange), v)) - }) + return predicate.Alert(sql.FieldContains(FieldSourceRange, v)) } // SourceRangeHasPrefix applies the HasPrefix predicate on the "sourceRange" field. func SourceRangeHasPrefix(v string) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.HasPrefix(s.C(FieldSourceRange), v)) - }) + return predicate.Alert(sql.FieldHasPrefix(FieldSourceRange, v)) } // SourceRangeHasSuffix applies the HasSuffix predicate on the "sourceRange" field. func SourceRangeHasSuffix(v string) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.HasSuffix(s.C(FieldSourceRange), v)) - }) + return predicate.Alert(sql.FieldHasSuffix(FieldSourceRange, v)) } // SourceRangeIsNil applies the IsNil predicate on the "sourceRange" field. func SourceRangeIsNil() predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.IsNull(s.C(FieldSourceRange))) - }) + return predicate.Alert(sql.FieldIsNull(FieldSourceRange)) } // SourceRangeNotNil applies the NotNil predicate on the "sourceRange" field. func SourceRangeNotNil() predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.NotNull(s.C(FieldSourceRange))) - }) + return predicate.Alert(sql.FieldNotNull(FieldSourceRange)) } // SourceRangeEqualFold applies the EqualFold predicate on the "sourceRange" field. func SourceRangeEqualFold(v string) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.EqualFold(s.C(FieldSourceRange), v)) - }) + return predicate.Alert(sql.FieldEqualFold(FieldSourceRange, v)) } // SourceRangeContainsFold applies the ContainsFold predicate on the "sourceRange" field. func SourceRangeContainsFold(v string) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.ContainsFold(s.C(FieldSourceRange), v)) - }) + return predicate.Alert(sql.FieldContainsFold(FieldSourceRange, v)) } // SourceAsNumberEQ applies the EQ predicate on the "sourceAsNumber" field. func SourceAsNumberEQ(v string) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldSourceAsNumber), v)) - }) + return predicate.Alert(sql.FieldEQ(FieldSourceAsNumber, v)) } // SourceAsNumberNEQ applies the NEQ predicate on the "sourceAsNumber" field. func SourceAsNumberNEQ(v string) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldSourceAsNumber), v)) - }) + return predicate.Alert(sql.FieldNEQ(FieldSourceAsNumber, v)) } // SourceAsNumberIn applies the In predicate on the "sourceAsNumber" field. func SourceAsNumberIn(vs ...string) predicate.Alert { - v := make([]any, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.In(s.C(FieldSourceAsNumber), v...)) - }) + return predicate.Alert(sql.FieldIn(FieldSourceAsNumber, vs...)) } // SourceAsNumberNotIn applies the NotIn predicate on the "sourceAsNumber" field. func SourceAsNumberNotIn(vs ...string) predicate.Alert { - v := make([]any, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.NotIn(s.C(FieldSourceAsNumber), v...)) - }) + return predicate.Alert(sql.FieldNotIn(FieldSourceAsNumber, vs...)) } // SourceAsNumberGT applies the GT predicate on the "sourceAsNumber" field. func SourceAsNumberGT(v string) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.GT(s.C(FieldSourceAsNumber), v)) - }) + return predicate.Alert(sql.FieldGT(FieldSourceAsNumber, v)) } // SourceAsNumberGTE applies the GTE predicate on the "sourceAsNumber" field. func SourceAsNumberGTE(v string) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.GTE(s.C(FieldSourceAsNumber), v)) - }) + return predicate.Alert(sql.FieldGTE(FieldSourceAsNumber, v)) } // SourceAsNumberLT applies the LT predicate on the "sourceAsNumber" field. func SourceAsNumberLT(v string) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.LT(s.C(FieldSourceAsNumber), v)) - }) + return predicate.Alert(sql.FieldLT(FieldSourceAsNumber, v)) } // SourceAsNumberLTE applies the LTE predicate on the "sourceAsNumber" field. func SourceAsNumberLTE(v string) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.LTE(s.C(FieldSourceAsNumber), v)) - }) + return predicate.Alert(sql.FieldLTE(FieldSourceAsNumber, v)) } // SourceAsNumberContains applies the Contains predicate on the "sourceAsNumber" field. func SourceAsNumberContains(v string) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.Contains(s.C(FieldSourceAsNumber), v)) - }) + return predicate.Alert(sql.FieldContains(FieldSourceAsNumber, v)) } // SourceAsNumberHasPrefix applies the HasPrefix predicate on the "sourceAsNumber" field. func SourceAsNumberHasPrefix(v string) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.HasPrefix(s.C(FieldSourceAsNumber), v)) - }) + return predicate.Alert(sql.FieldHasPrefix(FieldSourceAsNumber, v)) } // SourceAsNumberHasSuffix applies the HasSuffix predicate on the "sourceAsNumber" field. func SourceAsNumberHasSuffix(v string) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.HasSuffix(s.C(FieldSourceAsNumber), v)) - }) + return predicate.Alert(sql.FieldHasSuffix(FieldSourceAsNumber, v)) } // SourceAsNumberIsNil applies the IsNil predicate on the "sourceAsNumber" field. func SourceAsNumberIsNil() predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.IsNull(s.C(FieldSourceAsNumber))) - }) + return predicate.Alert(sql.FieldIsNull(FieldSourceAsNumber)) } // SourceAsNumberNotNil applies the NotNil predicate on the "sourceAsNumber" field. func SourceAsNumberNotNil() predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.NotNull(s.C(FieldSourceAsNumber))) - }) + return predicate.Alert(sql.FieldNotNull(FieldSourceAsNumber)) } // SourceAsNumberEqualFold applies the EqualFold predicate on the "sourceAsNumber" field. func SourceAsNumberEqualFold(v string) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.EqualFold(s.C(FieldSourceAsNumber), v)) - }) + return predicate.Alert(sql.FieldEqualFold(FieldSourceAsNumber, v)) } // SourceAsNumberContainsFold applies the ContainsFold predicate on the "sourceAsNumber" field. func SourceAsNumberContainsFold(v string) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.ContainsFold(s.C(FieldSourceAsNumber), v)) - }) + return predicate.Alert(sql.FieldContainsFold(FieldSourceAsNumber, v)) } // SourceAsNameEQ applies the EQ predicate on the "sourceAsName" field. func SourceAsNameEQ(v string) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldSourceAsName), v)) - }) + return predicate.Alert(sql.FieldEQ(FieldSourceAsName, v)) } // SourceAsNameNEQ applies the NEQ predicate on the "sourceAsName" field. func SourceAsNameNEQ(v string) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldSourceAsName), v)) - }) + return predicate.Alert(sql.FieldNEQ(FieldSourceAsName, v)) } // SourceAsNameIn applies the In predicate on the "sourceAsName" field. func SourceAsNameIn(vs ...string) predicate.Alert { - v := make([]any, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.In(s.C(FieldSourceAsName), v...)) - }) + return predicate.Alert(sql.FieldIn(FieldSourceAsName, vs...)) } // SourceAsNameNotIn applies the NotIn predicate on the "sourceAsName" field. func SourceAsNameNotIn(vs ...string) predicate.Alert { - v := make([]any, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.NotIn(s.C(FieldSourceAsName), v...)) - }) + return predicate.Alert(sql.FieldNotIn(FieldSourceAsName, vs...)) } // SourceAsNameGT applies the GT predicate on the "sourceAsName" field. func SourceAsNameGT(v string) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.GT(s.C(FieldSourceAsName), v)) - }) + return predicate.Alert(sql.FieldGT(FieldSourceAsName, v)) } // SourceAsNameGTE applies the GTE predicate on the "sourceAsName" field. func SourceAsNameGTE(v string) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.GTE(s.C(FieldSourceAsName), v)) - }) + return predicate.Alert(sql.FieldGTE(FieldSourceAsName, v)) } // SourceAsNameLT applies the LT predicate on the "sourceAsName" field. func SourceAsNameLT(v string) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.LT(s.C(FieldSourceAsName), v)) - }) + return predicate.Alert(sql.FieldLT(FieldSourceAsName, v)) } // SourceAsNameLTE applies the LTE predicate on the "sourceAsName" field. func SourceAsNameLTE(v string) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.LTE(s.C(FieldSourceAsName), v)) - }) + return predicate.Alert(sql.FieldLTE(FieldSourceAsName, v)) } // SourceAsNameContains applies the Contains predicate on the "sourceAsName" field. func SourceAsNameContains(v string) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.Contains(s.C(FieldSourceAsName), v)) - }) + return predicate.Alert(sql.FieldContains(FieldSourceAsName, v)) } // SourceAsNameHasPrefix applies the HasPrefix predicate on the "sourceAsName" field. func SourceAsNameHasPrefix(v string) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.HasPrefix(s.C(FieldSourceAsName), v)) - }) + return predicate.Alert(sql.FieldHasPrefix(FieldSourceAsName, v)) } // SourceAsNameHasSuffix applies the HasSuffix predicate on the "sourceAsName" field. func SourceAsNameHasSuffix(v string) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.HasSuffix(s.C(FieldSourceAsName), v)) - }) + return predicate.Alert(sql.FieldHasSuffix(FieldSourceAsName, v)) } // SourceAsNameIsNil applies the IsNil predicate on the "sourceAsName" field. func SourceAsNameIsNil() predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.IsNull(s.C(FieldSourceAsName))) - }) + return predicate.Alert(sql.FieldIsNull(FieldSourceAsName)) } // SourceAsNameNotNil applies the NotNil predicate on the "sourceAsName" field. func SourceAsNameNotNil() predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.NotNull(s.C(FieldSourceAsName))) - }) + return predicate.Alert(sql.FieldNotNull(FieldSourceAsName)) } // SourceAsNameEqualFold applies the EqualFold predicate on the "sourceAsName" field. func SourceAsNameEqualFold(v string) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.EqualFold(s.C(FieldSourceAsName), v)) - }) + return predicate.Alert(sql.FieldEqualFold(FieldSourceAsName, v)) } // SourceAsNameContainsFold applies the ContainsFold predicate on the "sourceAsName" field. func SourceAsNameContainsFold(v string) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.ContainsFold(s.C(FieldSourceAsName), v)) - }) + return predicate.Alert(sql.FieldContainsFold(FieldSourceAsName, v)) } // SourceCountryEQ applies the EQ predicate on the "sourceCountry" field. func SourceCountryEQ(v string) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldSourceCountry), v)) - }) + return predicate.Alert(sql.FieldEQ(FieldSourceCountry, v)) } // SourceCountryNEQ applies the NEQ predicate on the "sourceCountry" field. func SourceCountryNEQ(v string) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldSourceCountry), v)) - }) + return predicate.Alert(sql.FieldNEQ(FieldSourceCountry, v)) } // SourceCountryIn applies the In predicate on the "sourceCountry" field. func SourceCountryIn(vs ...string) predicate.Alert { - v := make([]any, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.In(s.C(FieldSourceCountry), v...)) - }) + return predicate.Alert(sql.FieldIn(FieldSourceCountry, vs...)) } // SourceCountryNotIn applies the NotIn predicate on the "sourceCountry" field. func SourceCountryNotIn(vs ...string) predicate.Alert { - v := make([]any, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.NotIn(s.C(FieldSourceCountry), v...)) - }) + return predicate.Alert(sql.FieldNotIn(FieldSourceCountry, vs...)) } // SourceCountryGT applies the GT predicate on the "sourceCountry" field. func SourceCountryGT(v string) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.GT(s.C(FieldSourceCountry), v)) - }) + return predicate.Alert(sql.FieldGT(FieldSourceCountry, v)) } // SourceCountryGTE applies the GTE predicate on the "sourceCountry" field. func SourceCountryGTE(v string) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.GTE(s.C(FieldSourceCountry), v)) - }) + return predicate.Alert(sql.FieldGTE(FieldSourceCountry, v)) } // SourceCountryLT applies the LT predicate on the "sourceCountry" field. func SourceCountryLT(v string) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.LT(s.C(FieldSourceCountry), v)) - }) + return predicate.Alert(sql.FieldLT(FieldSourceCountry, v)) } // SourceCountryLTE applies the LTE predicate on the "sourceCountry" field. func SourceCountryLTE(v string) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.LTE(s.C(FieldSourceCountry), v)) - }) + return predicate.Alert(sql.FieldLTE(FieldSourceCountry, v)) } // SourceCountryContains applies the Contains predicate on the "sourceCountry" field. func SourceCountryContains(v string) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.Contains(s.C(FieldSourceCountry), v)) - }) + return predicate.Alert(sql.FieldContains(FieldSourceCountry, v)) } // SourceCountryHasPrefix applies the HasPrefix predicate on the "sourceCountry" field. func SourceCountryHasPrefix(v string) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.HasPrefix(s.C(FieldSourceCountry), v)) - }) + return predicate.Alert(sql.FieldHasPrefix(FieldSourceCountry, v)) } // SourceCountryHasSuffix applies the HasSuffix predicate on the "sourceCountry" field. func SourceCountryHasSuffix(v string) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.HasSuffix(s.C(FieldSourceCountry), v)) - }) + return predicate.Alert(sql.FieldHasSuffix(FieldSourceCountry, v)) } // SourceCountryIsNil applies the IsNil predicate on the "sourceCountry" field. func SourceCountryIsNil() predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.IsNull(s.C(FieldSourceCountry))) - }) + return predicate.Alert(sql.FieldIsNull(FieldSourceCountry)) } // SourceCountryNotNil applies the NotNil predicate on the "sourceCountry" field. func SourceCountryNotNil() predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.NotNull(s.C(FieldSourceCountry))) - }) + return predicate.Alert(sql.FieldNotNull(FieldSourceCountry)) } // SourceCountryEqualFold applies the EqualFold predicate on the "sourceCountry" field. func SourceCountryEqualFold(v string) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.EqualFold(s.C(FieldSourceCountry), v)) - }) + return predicate.Alert(sql.FieldEqualFold(FieldSourceCountry, v)) } // SourceCountryContainsFold applies the ContainsFold predicate on the "sourceCountry" field. func SourceCountryContainsFold(v string) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.ContainsFold(s.C(FieldSourceCountry), v)) - }) + return predicate.Alert(sql.FieldContainsFold(FieldSourceCountry, v)) } // SourceLatitudeEQ applies the EQ predicate on the "sourceLatitude" field. func SourceLatitudeEQ(v float32) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldSourceLatitude), v)) - }) + return predicate.Alert(sql.FieldEQ(FieldSourceLatitude, v)) } // SourceLatitudeNEQ applies the NEQ predicate on the "sourceLatitude" field. func SourceLatitudeNEQ(v float32) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldSourceLatitude), v)) - }) + return predicate.Alert(sql.FieldNEQ(FieldSourceLatitude, v)) } // SourceLatitudeIn applies the In predicate on the "sourceLatitude" field. func SourceLatitudeIn(vs ...float32) predicate.Alert { - v := make([]any, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.In(s.C(FieldSourceLatitude), v...)) - }) + return predicate.Alert(sql.FieldIn(FieldSourceLatitude, vs...)) } // SourceLatitudeNotIn applies the NotIn predicate on the "sourceLatitude" field. func SourceLatitudeNotIn(vs ...float32) predicate.Alert { - v := make([]any, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.NotIn(s.C(FieldSourceLatitude), v...)) - }) + return predicate.Alert(sql.FieldNotIn(FieldSourceLatitude, vs...)) } // SourceLatitudeGT applies the GT predicate on the "sourceLatitude" field. func SourceLatitudeGT(v float32) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.GT(s.C(FieldSourceLatitude), v)) - }) + return predicate.Alert(sql.FieldGT(FieldSourceLatitude, v)) } // SourceLatitudeGTE applies the GTE predicate on the "sourceLatitude" field. -func SourceLatitudeGTE(v float32) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.GTE(s.C(FieldSourceLatitude), v)) - }) +func SourceLatitudeGTE(v float32) predicate.Alert { + return predicate.Alert(sql.FieldGTE(FieldSourceLatitude, v)) } // SourceLatitudeLT applies the LT predicate on the "sourceLatitude" field. func SourceLatitudeLT(v float32) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.LT(s.C(FieldSourceLatitude), v)) - }) + return predicate.Alert(sql.FieldLT(FieldSourceLatitude, v)) } // SourceLatitudeLTE applies the LTE predicate on the "sourceLatitude" field. func SourceLatitudeLTE(v float32) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.LTE(s.C(FieldSourceLatitude), v)) - }) + return predicate.Alert(sql.FieldLTE(FieldSourceLatitude, v)) } // SourceLatitudeIsNil applies the IsNil predicate on the "sourceLatitude" field. func SourceLatitudeIsNil() predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.IsNull(s.C(FieldSourceLatitude))) - }) + return predicate.Alert(sql.FieldIsNull(FieldSourceLatitude)) } // SourceLatitudeNotNil applies the NotNil predicate on the "sourceLatitude" field. func SourceLatitudeNotNil() predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.NotNull(s.C(FieldSourceLatitude))) - }) + return predicate.Alert(sql.FieldNotNull(FieldSourceLatitude)) } // SourceLongitudeEQ applies the EQ predicate on the "sourceLongitude" field. func SourceLongitudeEQ(v float32) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldSourceLongitude), v)) - }) + return predicate.Alert(sql.FieldEQ(FieldSourceLongitude, v)) } // SourceLongitudeNEQ applies the NEQ predicate on the "sourceLongitude" field. func SourceLongitudeNEQ(v float32) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldSourceLongitude), v)) - }) + return predicate.Alert(sql.FieldNEQ(FieldSourceLongitude, v)) } // SourceLongitudeIn applies the In predicate on the "sourceLongitude" field. func SourceLongitudeIn(vs ...float32) predicate.Alert { - v := make([]any, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.In(s.C(FieldSourceLongitude), v...)) - }) + return predicate.Alert(sql.FieldIn(FieldSourceLongitude, vs...)) } // SourceLongitudeNotIn applies the NotIn predicate on the "sourceLongitude" field. func SourceLongitudeNotIn(vs ...float32) predicate.Alert { - v := make([]any, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.NotIn(s.C(FieldSourceLongitude), v...)) - }) + return predicate.Alert(sql.FieldNotIn(FieldSourceLongitude, vs...)) } // SourceLongitudeGT applies the GT predicate on the "sourceLongitude" field. func SourceLongitudeGT(v float32) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.GT(s.C(FieldSourceLongitude), v)) - }) + return predicate.Alert(sql.FieldGT(FieldSourceLongitude, v)) } // SourceLongitudeGTE applies the GTE predicate on the "sourceLongitude" field. func SourceLongitudeGTE(v float32) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.GTE(s.C(FieldSourceLongitude), v)) - }) + return predicate.Alert(sql.FieldGTE(FieldSourceLongitude, v)) } // SourceLongitudeLT applies the LT predicate on the "sourceLongitude" field. func SourceLongitudeLT(v float32) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.LT(s.C(FieldSourceLongitude), v)) - }) + return predicate.Alert(sql.FieldLT(FieldSourceLongitude, v)) } // SourceLongitudeLTE applies the LTE predicate on the "sourceLongitude" field. func SourceLongitudeLTE(v float32) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.LTE(s.C(FieldSourceLongitude), v)) - }) + return predicate.Alert(sql.FieldLTE(FieldSourceLongitude, v)) } // SourceLongitudeIsNil applies the IsNil predicate on the "sourceLongitude" field. func SourceLongitudeIsNil() predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.IsNull(s.C(FieldSourceLongitude))) - }) + return predicate.Alert(sql.FieldIsNull(FieldSourceLongitude)) } // SourceLongitudeNotNil applies the NotNil predicate on the "sourceLongitude" field. func SourceLongitudeNotNil() predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.NotNull(s.C(FieldSourceLongitude))) - }) + return predicate.Alert(sql.FieldNotNull(FieldSourceLongitude)) } // SourceScopeEQ applies the EQ predicate on the "sourceScope" field. func SourceScopeEQ(v string) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldSourceScope), v)) - }) + return predicate.Alert(sql.FieldEQ(FieldSourceScope, v)) } // SourceScopeNEQ applies the NEQ predicate on the "sourceScope" field. func SourceScopeNEQ(v string) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldSourceScope), v)) - }) + return predicate.Alert(sql.FieldNEQ(FieldSourceScope, v)) } // SourceScopeIn applies the In predicate on the "sourceScope" field. func SourceScopeIn(vs ...string) predicate.Alert { - v := make([]any, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.In(s.C(FieldSourceScope), v...)) - }) + return predicate.Alert(sql.FieldIn(FieldSourceScope, vs...)) } // SourceScopeNotIn applies the NotIn predicate on the "sourceScope" field. func SourceScopeNotIn(vs ...string) predicate.Alert { - v := make([]any, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.NotIn(s.C(FieldSourceScope), v...)) - }) + return predicate.Alert(sql.FieldNotIn(FieldSourceScope, vs...)) } // SourceScopeGT applies the GT predicate on the "sourceScope" field. func SourceScopeGT(v string) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.GT(s.C(FieldSourceScope), v)) - }) + return predicate.Alert(sql.FieldGT(FieldSourceScope, v)) } // SourceScopeGTE applies the GTE predicate on the "sourceScope" field. func SourceScopeGTE(v string) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.GTE(s.C(FieldSourceScope), v)) - }) + return predicate.Alert(sql.FieldGTE(FieldSourceScope, v)) } // SourceScopeLT applies the LT predicate on the "sourceScope" field. func SourceScopeLT(v string) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.LT(s.C(FieldSourceScope), v)) - }) + return predicate.Alert(sql.FieldLT(FieldSourceScope, v)) } // SourceScopeLTE applies the LTE predicate on the "sourceScope" field. func SourceScopeLTE(v string) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.LTE(s.C(FieldSourceScope), v)) - }) + return predicate.Alert(sql.FieldLTE(FieldSourceScope, v)) } // SourceScopeContains applies the Contains predicate on the "sourceScope" field. func SourceScopeContains(v string) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.Contains(s.C(FieldSourceScope), v)) - }) + return predicate.Alert(sql.FieldContains(FieldSourceScope, v)) } // SourceScopeHasPrefix applies the HasPrefix predicate on the "sourceScope" field. func SourceScopeHasPrefix(v string) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.HasPrefix(s.C(FieldSourceScope), v)) - }) + return predicate.Alert(sql.FieldHasPrefix(FieldSourceScope, v)) } // SourceScopeHasSuffix applies the HasSuffix predicate on the "sourceScope" field. func SourceScopeHasSuffix(v string) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.HasSuffix(s.C(FieldSourceScope), v)) - }) + return predicate.Alert(sql.FieldHasSuffix(FieldSourceScope, v)) } // SourceScopeIsNil applies the IsNil predicate on the "sourceScope" field. func SourceScopeIsNil() predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.IsNull(s.C(FieldSourceScope))) - }) + return predicate.Alert(sql.FieldIsNull(FieldSourceScope)) } // SourceScopeNotNil applies the NotNil predicate on the "sourceScope" field. func SourceScopeNotNil() predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.NotNull(s.C(FieldSourceScope))) - }) + return predicate.Alert(sql.FieldNotNull(FieldSourceScope)) } // SourceScopeEqualFold applies the EqualFold predicate on the "sourceScope" field. func SourceScopeEqualFold(v string) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.EqualFold(s.C(FieldSourceScope), v)) - }) + return predicate.Alert(sql.FieldEqualFold(FieldSourceScope, v)) } // SourceScopeContainsFold applies the ContainsFold predicate on the "sourceScope" field. func SourceScopeContainsFold(v string) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.ContainsFold(s.C(FieldSourceScope), v)) - }) + return predicate.Alert(sql.FieldContainsFold(FieldSourceScope, v)) } // SourceValueEQ applies the EQ predicate on the "sourceValue" field. func SourceValueEQ(v string) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldSourceValue), v)) - }) + return predicate.Alert(sql.FieldEQ(FieldSourceValue, v)) } // SourceValueNEQ applies the NEQ predicate on the "sourceValue" field. func SourceValueNEQ(v string) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldSourceValue), v)) - }) + return predicate.Alert(sql.FieldNEQ(FieldSourceValue, v)) } // SourceValueIn applies the In predicate on the "sourceValue" field. func SourceValueIn(vs ...string) predicate.Alert { - v := make([]any, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.In(s.C(FieldSourceValue), v...)) - }) + return predicate.Alert(sql.FieldIn(FieldSourceValue, vs...)) } // SourceValueNotIn applies the NotIn predicate on the "sourceValue" field. func SourceValueNotIn(vs ...string) predicate.Alert { - v := make([]any, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.NotIn(s.C(FieldSourceValue), v...)) - }) + return predicate.Alert(sql.FieldNotIn(FieldSourceValue, vs...)) } // SourceValueGT applies the GT predicate on the "sourceValue" field. func SourceValueGT(v string) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.GT(s.C(FieldSourceValue), v)) - }) + return predicate.Alert(sql.FieldGT(FieldSourceValue, v)) } // SourceValueGTE applies the GTE predicate on the "sourceValue" field. func SourceValueGTE(v string) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.GTE(s.C(FieldSourceValue), v)) - }) + return predicate.Alert(sql.FieldGTE(FieldSourceValue, v)) } // SourceValueLT applies the LT predicate on the "sourceValue" field. func SourceValueLT(v string) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.LT(s.C(FieldSourceValue), v)) - }) + return predicate.Alert(sql.FieldLT(FieldSourceValue, v)) } // SourceValueLTE applies the LTE predicate on the "sourceValue" field. func SourceValueLTE(v string) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.LTE(s.C(FieldSourceValue), v)) - }) + return predicate.Alert(sql.FieldLTE(FieldSourceValue, v)) } // SourceValueContains applies the Contains predicate on the "sourceValue" field. func SourceValueContains(v string) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.Contains(s.C(FieldSourceValue), v)) - }) + return predicate.Alert(sql.FieldContains(FieldSourceValue, v)) } // SourceValueHasPrefix applies the HasPrefix predicate on the "sourceValue" field. func SourceValueHasPrefix(v string) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.HasPrefix(s.C(FieldSourceValue), v)) - }) + return predicate.Alert(sql.FieldHasPrefix(FieldSourceValue, v)) } // SourceValueHasSuffix applies the HasSuffix predicate on the "sourceValue" field. func SourceValueHasSuffix(v string) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.HasSuffix(s.C(FieldSourceValue), v)) - }) + return predicate.Alert(sql.FieldHasSuffix(FieldSourceValue, v)) } // SourceValueIsNil applies the IsNil predicate on the "sourceValue" field. func SourceValueIsNil() predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.IsNull(s.C(FieldSourceValue))) - }) + return predicate.Alert(sql.FieldIsNull(FieldSourceValue)) } // SourceValueNotNil applies the NotNil predicate on the "sourceValue" field. func SourceValueNotNil() predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.NotNull(s.C(FieldSourceValue))) - }) + return predicate.Alert(sql.FieldNotNull(FieldSourceValue)) } // SourceValueEqualFold applies the EqualFold predicate on the "sourceValue" field. func SourceValueEqualFold(v string) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.EqualFold(s.C(FieldSourceValue), v)) - }) + return predicate.Alert(sql.FieldEqualFold(FieldSourceValue, v)) } // SourceValueContainsFold applies the ContainsFold predicate on the "sourceValue" field. func SourceValueContainsFold(v string) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.ContainsFold(s.C(FieldSourceValue), v)) - }) + return predicate.Alert(sql.FieldContainsFold(FieldSourceValue, v)) } // CapacityEQ applies the EQ predicate on the "capacity" field. func CapacityEQ(v int32) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldCapacity), v)) - }) + return predicate.Alert(sql.FieldEQ(FieldCapacity, v)) } // CapacityNEQ applies the NEQ predicate on the "capacity" field. func CapacityNEQ(v int32) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldCapacity), v)) - }) + return predicate.Alert(sql.FieldNEQ(FieldCapacity, v)) } // CapacityIn applies the In predicate on the "capacity" field. func CapacityIn(vs ...int32) predicate.Alert { - v := make([]any, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.In(s.C(FieldCapacity), v...)) - }) + return predicate.Alert(sql.FieldIn(FieldCapacity, vs...)) } // CapacityNotIn applies the NotIn predicate on the "capacity" field. func CapacityNotIn(vs ...int32) predicate.Alert { - v := make([]any, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.NotIn(s.C(FieldCapacity), v...)) - }) + return predicate.Alert(sql.FieldNotIn(FieldCapacity, vs...)) } // CapacityGT applies the GT predicate on the "capacity" field. func CapacityGT(v int32) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.GT(s.C(FieldCapacity), v)) - }) + return predicate.Alert(sql.FieldGT(FieldCapacity, v)) } // CapacityGTE applies the GTE predicate on the "capacity" field. func CapacityGTE(v int32) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.GTE(s.C(FieldCapacity), v)) - }) + return predicate.Alert(sql.FieldGTE(FieldCapacity, v)) } // CapacityLT applies the LT predicate on the "capacity" field. func CapacityLT(v int32) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.LT(s.C(FieldCapacity), v)) - }) + return predicate.Alert(sql.FieldLT(FieldCapacity, v)) } // CapacityLTE applies the LTE predicate on the "capacity" field. func CapacityLTE(v int32) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.LTE(s.C(FieldCapacity), v)) - }) + return predicate.Alert(sql.FieldLTE(FieldCapacity, v)) } // CapacityIsNil applies the IsNil predicate on the "capacity" field. func CapacityIsNil() predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.IsNull(s.C(FieldCapacity))) - }) + return predicate.Alert(sql.FieldIsNull(FieldCapacity)) } // CapacityNotNil applies the NotNil predicate on the "capacity" field. func CapacityNotNil() predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.NotNull(s.C(FieldCapacity))) - }) + return predicate.Alert(sql.FieldNotNull(FieldCapacity)) } // LeakSpeedEQ applies the EQ predicate on the "leakSpeed" field. func LeakSpeedEQ(v string) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldLeakSpeed), v)) - }) + return predicate.Alert(sql.FieldEQ(FieldLeakSpeed, v)) } // LeakSpeedNEQ applies the NEQ predicate on the "leakSpeed" field. func LeakSpeedNEQ(v string) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldLeakSpeed), v)) - }) + return predicate.Alert(sql.FieldNEQ(FieldLeakSpeed, v)) } // LeakSpeedIn applies the In predicate on the "leakSpeed" field. func LeakSpeedIn(vs ...string) predicate.Alert { - v := make([]any, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.In(s.C(FieldLeakSpeed), v...)) - }) + return predicate.Alert(sql.FieldIn(FieldLeakSpeed, vs...)) } // LeakSpeedNotIn applies the NotIn predicate on the "leakSpeed" field. func LeakSpeedNotIn(vs ...string) predicate.Alert { - v := make([]any, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.NotIn(s.C(FieldLeakSpeed), v...)) - }) + return predicate.Alert(sql.FieldNotIn(FieldLeakSpeed, vs...)) } // LeakSpeedGT applies the GT predicate on the "leakSpeed" field. func LeakSpeedGT(v string) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.GT(s.C(FieldLeakSpeed), v)) - }) + return predicate.Alert(sql.FieldGT(FieldLeakSpeed, v)) } // LeakSpeedGTE applies the GTE predicate on the "leakSpeed" field. func LeakSpeedGTE(v string) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.GTE(s.C(FieldLeakSpeed), v)) - }) + return predicate.Alert(sql.FieldGTE(FieldLeakSpeed, v)) } // LeakSpeedLT applies the LT predicate on the "leakSpeed" field. func LeakSpeedLT(v string) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.LT(s.C(FieldLeakSpeed), v)) - }) + return predicate.Alert(sql.FieldLT(FieldLeakSpeed, v)) } // LeakSpeedLTE applies the LTE predicate on the "leakSpeed" field. func LeakSpeedLTE(v string) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.LTE(s.C(FieldLeakSpeed), v)) - }) + return predicate.Alert(sql.FieldLTE(FieldLeakSpeed, v)) } // LeakSpeedContains applies the Contains predicate on the "leakSpeed" field. func LeakSpeedContains(v string) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.Contains(s.C(FieldLeakSpeed), v)) - }) + return predicate.Alert(sql.FieldContains(FieldLeakSpeed, v)) } // LeakSpeedHasPrefix applies the HasPrefix predicate on the "leakSpeed" field. func LeakSpeedHasPrefix(v string) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.HasPrefix(s.C(FieldLeakSpeed), v)) - }) + return predicate.Alert(sql.FieldHasPrefix(FieldLeakSpeed, v)) } // LeakSpeedHasSuffix applies the HasSuffix predicate on the "leakSpeed" field. func LeakSpeedHasSuffix(v string) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.HasSuffix(s.C(FieldLeakSpeed), v)) - }) + return predicate.Alert(sql.FieldHasSuffix(FieldLeakSpeed, v)) } // LeakSpeedIsNil applies the IsNil predicate on the "leakSpeed" field. func LeakSpeedIsNil() predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.IsNull(s.C(FieldLeakSpeed))) - }) + return predicate.Alert(sql.FieldIsNull(FieldLeakSpeed)) } // LeakSpeedNotNil applies the NotNil predicate on the "leakSpeed" field. func LeakSpeedNotNil() predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.NotNull(s.C(FieldLeakSpeed))) - }) + return predicate.Alert(sql.FieldNotNull(FieldLeakSpeed)) } // LeakSpeedEqualFold applies the EqualFold predicate on the "leakSpeed" field. func LeakSpeedEqualFold(v string) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.EqualFold(s.C(FieldLeakSpeed), v)) - }) + return predicate.Alert(sql.FieldEqualFold(FieldLeakSpeed, v)) } // LeakSpeedContainsFold applies the ContainsFold predicate on the "leakSpeed" field. func LeakSpeedContainsFold(v string) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.ContainsFold(s.C(FieldLeakSpeed), v)) - }) + return predicate.Alert(sql.FieldContainsFold(FieldLeakSpeed, v)) } // ScenarioVersionEQ applies the EQ predicate on the "scenarioVersion" field. func ScenarioVersionEQ(v string) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldScenarioVersion), v)) - }) + return predicate.Alert(sql.FieldEQ(FieldScenarioVersion, v)) } // ScenarioVersionNEQ applies the NEQ predicate on the "scenarioVersion" field. func ScenarioVersionNEQ(v string) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldScenarioVersion), v)) - }) + return predicate.Alert(sql.FieldNEQ(FieldScenarioVersion, v)) } // ScenarioVersionIn applies the In predicate on the "scenarioVersion" field. func ScenarioVersionIn(vs ...string) predicate.Alert { - v := make([]any, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.In(s.C(FieldScenarioVersion), v...)) - }) + return predicate.Alert(sql.FieldIn(FieldScenarioVersion, vs...)) } // ScenarioVersionNotIn applies the NotIn predicate on the "scenarioVersion" field. func ScenarioVersionNotIn(vs ...string) predicate.Alert { - v := make([]any, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.NotIn(s.C(FieldScenarioVersion), v...)) - }) + return predicate.Alert(sql.FieldNotIn(FieldScenarioVersion, vs...)) } // ScenarioVersionGT applies the GT predicate on the "scenarioVersion" field. func ScenarioVersionGT(v string) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.GT(s.C(FieldScenarioVersion), v)) - }) + return predicate.Alert(sql.FieldGT(FieldScenarioVersion, v)) } // ScenarioVersionGTE applies the GTE predicate on the "scenarioVersion" field. func ScenarioVersionGTE(v string) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.GTE(s.C(FieldScenarioVersion), v)) - }) + return predicate.Alert(sql.FieldGTE(FieldScenarioVersion, v)) } // ScenarioVersionLT applies the LT predicate on the "scenarioVersion" field. func ScenarioVersionLT(v string) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.LT(s.C(FieldScenarioVersion), v)) - }) + return predicate.Alert(sql.FieldLT(FieldScenarioVersion, v)) } // ScenarioVersionLTE applies the LTE predicate on the "scenarioVersion" field. func ScenarioVersionLTE(v string) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.LTE(s.C(FieldScenarioVersion), v)) - }) + return predicate.Alert(sql.FieldLTE(FieldScenarioVersion, v)) } // ScenarioVersionContains applies the Contains predicate on the "scenarioVersion" field. func ScenarioVersionContains(v string) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.Contains(s.C(FieldScenarioVersion), v)) - }) + return predicate.Alert(sql.FieldContains(FieldScenarioVersion, v)) } // ScenarioVersionHasPrefix applies the HasPrefix predicate on the "scenarioVersion" field. func ScenarioVersionHasPrefix(v string) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.HasPrefix(s.C(FieldScenarioVersion), v)) - }) + return predicate.Alert(sql.FieldHasPrefix(FieldScenarioVersion, v)) } // ScenarioVersionHasSuffix applies the HasSuffix predicate on the "scenarioVersion" field. func ScenarioVersionHasSuffix(v string) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.HasSuffix(s.C(FieldScenarioVersion), v)) - }) + return predicate.Alert(sql.FieldHasSuffix(FieldScenarioVersion, v)) } // ScenarioVersionIsNil applies the IsNil predicate on the "scenarioVersion" field. func ScenarioVersionIsNil() predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.IsNull(s.C(FieldScenarioVersion))) - }) + return predicate.Alert(sql.FieldIsNull(FieldScenarioVersion)) } // ScenarioVersionNotNil applies the NotNil predicate on the "scenarioVersion" field. func ScenarioVersionNotNil() predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.NotNull(s.C(FieldScenarioVersion))) - }) + return predicate.Alert(sql.FieldNotNull(FieldScenarioVersion)) } // ScenarioVersionEqualFold applies the EqualFold predicate on the "scenarioVersion" field. func ScenarioVersionEqualFold(v string) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.EqualFold(s.C(FieldScenarioVersion), v)) - }) + return predicate.Alert(sql.FieldEqualFold(FieldScenarioVersion, v)) } // ScenarioVersionContainsFold applies the ContainsFold predicate on the "scenarioVersion" field. func ScenarioVersionContainsFold(v string) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.ContainsFold(s.C(FieldScenarioVersion), v)) - }) + return predicate.Alert(sql.FieldContainsFold(FieldScenarioVersion, v)) } // ScenarioHashEQ applies the EQ predicate on the "scenarioHash" field. func ScenarioHashEQ(v string) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldScenarioHash), v)) - }) + return predicate.Alert(sql.FieldEQ(FieldScenarioHash, v)) } // ScenarioHashNEQ applies the NEQ predicate on the "scenarioHash" field. func ScenarioHashNEQ(v string) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldScenarioHash), v)) - }) + return predicate.Alert(sql.FieldNEQ(FieldScenarioHash, v)) } // ScenarioHashIn applies the In predicate on the "scenarioHash" field. func ScenarioHashIn(vs ...string) predicate.Alert { - v := make([]any, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.In(s.C(FieldScenarioHash), v...)) - }) + return predicate.Alert(sql.FieldIn(FieldScenarioHash, vs...)) } // ScenarioHashNotIn applies the NotIn predicate on the "scenarioHash" field. func ScenarioHashNotIn(vs ...string) predicate.Alert { - v := make([]any, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.NotIn(s.C(FieldScenarioHash), v...)) - }) + return predicate.Alert(sql.FieldNotIn(FieldScenarioHash, vs...)) } // ScenarioHashGT applies the GT predicate on the "scenarioHash" field. func ScenarioHashGT(v string) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.GT(s.C(FieldScenarioHash), v)) - }) + return predicate.Alert(sql.FieldGT(FieldScenarioHash, v)) } // ScenarioHashGTE applies the GTE predicate on the "scenarioHash" field. func ScenarioHashGTE(v string) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.GTE(s.C(FieldScenarioHash), v)) - }) + return predicate.Alert(sql.FieldGTE(FieldScenarioHash, v)) } // ScenarioHashLT applies the LT predicate on the "scenarioHash" field. func ScenarioHashLT(v string) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.LT(s.C(FieldScenarioHash), v)) - }) + return predicate.Alert(sql.FieldLT(FieldScenarioHash, v)) } // ScenarioHashLTE applies the LTE predicate on the "scenarioHash" field. func ScenarioHashLTE(v string) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.LTE(s.C(FieldScenarioHash), v)) - }) + return predicate.Alert(sql.FieldLTE(FieldScenarioHash, v)) } // ScenarioHashContains applies the Contains predicate on the "scenarioHash" field. func ScenarioHashContains(v string) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.Contains(s.C(FieldScenarioHash), v)) - }) + return predicate.Alert(sql.FieldContains(FieldScenarioHash, v)) } // ScenarioHashHasPrefix applies the HasPrefix predicate on the "scenarioHash" field. func ScenarioHashHasPrefix(v string) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.HasPrefix(s.C(FieldScenarioHash), v)) - }) + return predicate.Alert(sql.FieldHasPrefix(FieldScenarioHash, v)) } // ScenarioHashHasSuffix applies the HasSuffix predicate on the "scenarioHash" field. func ScenarioHashHasSuffix(v string) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.HasSuffix(s.C(FieldScenarioHash), v)) - }) + return predicate.Alert(sql.FieldHasSuffix(FieldScenarioHash, v)) } // ScenarioHashIsNil applies the IsNil predicate on the "scenarioHash" field. func ScenarioHashIsNil() predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.IsNull(s.C(FieldScenarioHash))) - }) + return predicate.Alert(sql.FieldIsNull(FieldScenarioHash)) } // ScenarioHashNotNil applies the NotNil predicate on the "scenarioHash" field. func ScenarioHashNotNil() predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.NotNull(s.C(FieldScenarioHash))) - }) + return predicate.Alert(sql.FieldNotNull(FieldScenarioHash)) } // ScenarioHashEqualFold applies the EqualFold predicate on the "scenarioHash" field. func ScenarioHashEqualFold(v string) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.EqualFold(s.C(FieldScenarioHash), v)) - }) + return predicate.Alert(sql.FieldEqualFold(FieldScenarioHash, v)) } // ScenarioHashContainsFold applies the ContainsFold predicate on the "scenarioHash" field. func ScenarioHashContainsFold(v string) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.ContainsFold(s.C(FieldScenarioHash), v)) - }) + return predicate.Alert(sql.FieldContainsFold(FieldScenarioHash, v)) } // SimulatedEQ applies the EQ predicate on the "simulated" field. func SimulatedEQ(v bool) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldSimulated), v)) - }) + return predicate.Alert(sql.FieldEQ(FieldSimulated, v)) } // SimulatedNEQ applies the NEQ predicate on the "simulated" field. func SimulatedNEQ(v bool) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldSimulated), v)) - }) + return predicate.Alert(sql.FieldNEQ(FieldSimulated, v)) } // UUIDEQ applies the EQ predicate on the "uuid" field. func UUIDEQ(v string) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldUUID), v)) - }) + return predicate.Alert(sql.FieldEQ(FieldUUID, v)) } // UUIDNEQ applies the NEQ predicate on the "uuid" field. func UUIDNEQ(v string) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldUUID), v)) - }) + return predicate.Alert(sql.FieldNEQ(FieldUUID, v)) } // UUIDIn applies the In predicate on the "uuid" field. func UUIDIn(vs ...string) predicate.Alert { - v := make([]any, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.In(s.C(FieldUUID), v...)) - }) + return predicate.Alert(sql.FieldIn(FieldUUID, vs...)) } // UUIDNotIn applies the NotIn predicate on the "uuid" field. func UUIDNotIn(vs ...string) predicate.Alert { - v := make([]any, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.NotIn(s.C(FieldUUID), v...)) - }) + return predicate.Alert(sql.FieldNotIn(FieldUUID, vs...)) } // UUIDGT applies the GT predicate on the "uuid" field. func UUIDGT(v string) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.GT(s.C(FieldUUID), v)) - }) + return predicate.Alert(sql.FieldGT(FieldUUID, v)) } // UUIDGTE applies the GTE predicate on the "uuid" field. func UUIDGTE(v string) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.GTE(s.C(FieldUUID), v)) - }) + return predicate.Alert(sql.FieldGTE(FieldUUID, v)) } // UUIDLT applies the LT predicate on the "uuid" field. func UUIDLT(v string) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.LT(s.C(FieldUUID), v)) - }) + return predicate.Alert(sql.FieldLT(FieldUUID, v)) } // UUIDLTE applies the LTE predicate on the "uuid" field. func UUIDLTE(v string) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.LTE(s.C(FieldUUID), v)) - }) + return predicate.Alert(sql.FieldLTE(FieldUUID, v)) } // UUIDContains applies the Contains predicate on the "uuid" field. func UUIDContains(v string) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.Contains(s.C(FieldUUID), v)) - }) + return predicate.Alert(sql.FieldContains(FieldUUID, v)) } // UUIDHasPrefix applies the HasPrefix predicate on the "uuid" field. func UUIDHasPrefix(v string) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.HasPrefix(s.C(FieldUUID), v)) - }) + return predicate.Alert(sql.FieldHasPrefix(FieldUUID, v)) } // UUIDHasSuffix applies the HasSuffix predicate on the "uuid" field. func UUIDHasSuffix(v string) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.HasSuffix(s.C(FieldUUID), v)) - }) + return predicate.Alert(sql.FieldHasSuffix(FieldUUID, v)) } // UUIDIsNil applies the IsNil predicate on the "uuid" field. func UUIDIsNil() predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.IsNull(s.C(FieldUUID))) - }) + return predicate.Alert(sql.FieldIsNull(FieldUUID)) } // UUIDNotNil applies the NotNil predicate on the "uuid" field. func UUIDNotNil() predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.NotNull(s.C(FieldUUID))) - }) + return predicate.Alert(sql.FieldNotNull(FieldUUID)) } // UUIDEqualFold applies the EqualFold predicate on the "uuid" field. func UUIDEqualFold(v string) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.EqualFold(s.C(FieldUUID), v)) - }) + return predicate.Alert(sql.FieldEqualFold(FieldUUID, v)) } // UUIDContainsFold applies the ContainsFold predicate on the "uuid" field. func UUIDContainsFold(v string) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s.Where(sql.ContainsFold(s.C(FieldUUID), v)) - }) + return predicate.Alert(sql.FieldContainsFold(FieldUUID, v)) } // HasOwner applies the HasEdge predicate on the "owner" edge. @@ -2453,7 +1625,6 @@ func HasOwner() predicate.Alert { return predicate.Alert(func(s *sql.Selector) { step := sqlgraph.NewStep( sqlgraph.From(Table, FieldID), - sqlgraph.To(OwnerTable, FieldID), sqlgraph.Edge(sqlgraph.M2O, true, OwnerTable, OwnerColumn), ) sqlgraph.HasNeighbors(s, step) @@ -2463,11 +1634,7 @@ func HasOwner() predicate.Alert { // HasOwnerWith applies the HasEdge predicate on the "owner" edge with a given conditions (other predicates). func HasOwnerWith(preds ...predicate.Machine) predicate.Alert { return predicate.Alert(func(s *sql.Selector) { - step := sqlgraph.NewStep( - sqlgraph.From(Table, FieldID), - sqlgraph.To(OwnerInverseTable, FieldID), - sqlgraph.Edge(sqlgraph.M2O, true, OwnerTable, OwnerColumn), - ) + step := newOwnerStep() sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { for _, p := range preds { p(s) @@ -2481,7 +1648,6 @@ func HasDecisions() predicate.Alert { return predicate.Alert(func(s *sql.Selector) { step := sqlgraph.NewStep( sqlgraph.From(Table, FieldID), - sqlgraph.To(DecisionsTable, FieldID), sqlgraph.Edge(sqlgraph.O2M, false, DecisionsTable, DecisionsColumn), ) sqlgraph.HasNeighbors(s, step) @@ -2491,11 +1657,7 @@ func HasDecisions() predicate.Alert { // HasDecisionsWith applies the HasEdge predicate on the "decisions" edge with a given conditions (other predicates). func HasDecisionsWith(preds ...predicate.Decision) predicate.Alert { return predicate.Alert(func(s *sql.Selector) { - step := sqlgraph.NewStep( - sqlgraph.From(Table, FieldID), - sqlgraph.To(DecisionsInverseTable, FieldID), - sqlgraph.Edge(sqlgraph.O2M, false, DecisionsTable, DecisionsColumn), - ) + step := newDecisionsStep() sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { for _, p := range preds { p(s) @@ -2509,7 +1671,6 @@ func HasEvents() predicate.Alert { return predicate.Alert(func(s *sql.Selector) { step := sqlgraph.NewStep( sqlgraph.From(Table, FieldID), - sqlgraph.To(EventsTable, FieldID), sqlgraph.Edge(sqlgraph.O2M, false, EventsTable, EventsColumn), ) sqlgraph.HasNeighbors(s, step) @@ -2519,11 +1680,7 @@ func HasEvents() predicate.Alert { // HasEventsWith applies the HasEdge predicate on the "events" edge with a given conditions (other predicates). func HasEventsWith(preds ...predicate.Event) predicate.Alert { return predicate.Alert(func(s *sql.Selector) { - step := sqlgraph.NewStep( - sqlgraph.From(Table, FieldID), - sqlgraph.To(EventsInverseTable, FieldID), - sqlgraph.Edge(sqlgraph.O2M, false, EventsTable, EventsColumn), - ) + step := newEventsStep() sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { for _, p := range preds { p(s) @@ -2537,7 +1694,6 @@ func HasMetas() predicate.Alert { return predicate.Alert(func(s *sql.Selector) { step := sqlgraph.NewStep( sqlgraph.From(Table, FieldID), - sqlgraph.To(MetasTable, FieldID), sqlgraph.Edge(sqlgraph.O2M, false, MetasTable, MetasColumn), ) sqlgraph.HasNeighbors(s, step) @@ -2547,11 +1703,7 @@ func HasMetas() predicate.Alert { // HasMetasWith applies the HasEdge predicate on the "metas" edge with a given conditions (other predicates). func HasMetasWith(preds ...predicate.Meta) predicate.Alert { return predicate.Alert(func(s *sql.Selector) { - step := sqlgraph.NewStep( - sqlgraph.From(Table, FieldID), - sqlgraph.To(MetasInverseTable, FieldID), - sqlgraph.Edge(sqlgraph.O2M, false, MetasTable, MetasColumn), - ) + step := newMetasStep() sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { for _, p := range preds { p(s) @@ -2562,32 +1714,15 @@ func HasMetasWith(preds ...predicate.Meta) predicate.Alert { // And groups predicates with the AND operator between them. func And(predicates ...predicate.Alert) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s1 := s.Clone().SetP(nil) - for _, p := range predicates { - p(s1) - } - s.Where(s1.P()) - }) + return predicate.Alert(sql.AndPredicates(predicates...)) } // Or groups predicates with the OR operator between them. func Or(predicates ...predicate.Alert) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - s1 := s.Clone().SetP(nil) - for i, p := range predicates { - if i > 0 { - s1.Or() - } - p(s1) - } - s.Where(s1.P()) - }) + return predicate.Alert(sql.OrPredicates(predicates...)) } // Not applies the not operator on the given predicate. func Not(p predicate.Alert) predicate.Alert { - return predicate.Alert(func(s *sql.Selector) { - p(s.Not()) - }) + return predicate.Alert(sql.NotPredicates(p)) } diff --git a/pkg/database/ent/alert_create.go b/pkg/database/ent/alert_create.go index 42da5b137ba..c7498442c06 100644 --- a/pkg/database/ent/alert_create.go +++ b/pkg/database/ent/alert_create.go @@ -409,50 +409,8 @@ func (ac *AlertCreate) Mutation() *AlertMutation { // Save creates the Alert in the database. func (ac *AlertCreate) Save(ctx context.Context) (*Alert, error) { - var ( - err error - node *Alert - ) ac.defaults() - if len(ac.hooks) == 0 { - if err = ac.check(); err != nil { - return nil, err - } - node, err = ac.sqlSave(ctx) - } else { - var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { - mutation, ok := m.(*AlertMutation) - if !ok { - return nil, fmt.Errorf("unexpected mutation type %T", m) - } - if err = ac.check(); err != nil { - return nil, err - } - ac.mutation = mutation - if node, err = ac.sqlSave(ctx); err != nil { - return nil, err - } - mutation.id = &node.ID - mutation.done = true - return node, err - }) - for i := len(ac.hooks) - 1; i >= 0; i-- { - if ac.hooks[i] == nil { - return nil, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)") - } - mut = ac.hooks[i](mut) - } - v, err := mut.Mutate(ctx, ac.mutation) - if err != nil { - return nil, err - } - nv, ok := v.(*Alert) - if !ok { - return nil, fmt.Errorf("unexpected node type %T returned from AlertMutation", v) - } - node = nv - } - return node, err + return withHooks(ctx, ac.sqlSave, ac.mutation, ac.hooks) } // SaveX calls Save and panics if Save returns an error. @@ -525,6 +483,9 @@ func (ac *AlertCreate) check() error { } func (ac *AlertCreate) sqlSave(ctx context.Context) (*Alert, error) { + if err := ac.check(); err != nil { + return nil, err + } _node, _spec := ac.createSpec() if err := sqlgraph.CreateNode(ctx, ac.driver, _spec); err != nil { if sqlgraph.IsConstraintError(err) { @@ -534,202 +495,106 @@ func (ac *AlertCreate) sqlSave(ctx context.Context) (*Alert, error) { } id := _spec.ID.Value.(int64) _node.ID = int(id) + ac.mutation.id = &_node.ID + ac.mutation.done = true return _node, nil } func (ac *AlertCreate) createSpec() (*Alert, *sqlgraph.CreateSpec) { var ( _node = &Alert{config: ac.config} - _spec = &sqlgraph.CreateSpec{ - Table: alert.Table, - ID: &sqlgraph.FieldSpec{ - Type: field.TypeInt, - Column: alert.FieldID, - }, - } + _spec = sqlgraph.NewCreateSpec(alert.Table, sqlgraph.NewFieldSpec(alert.FieldID, field.TypeInt)) ) if value, ok := ac.mutation.CreatedAt(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeTime, - Value: value, - Column: alert.FieldCreatedAt, - }) + _spec.SetField(alert.FieldCreatedAt, field.TypeTime, value) _node.CreatedAt = &value } if value, ok := ac.mutation.UpdatedAt(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeTime, - Value: value, - Column: alert.FieldUpdatedAt, - }) + _spec.SetField(alert.FieldUpdatedAt, field.TypeTime, value) _node.UpdatedAt = &value } if value, ok := ac.mutation.Scenario(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: alert.FieldScenario, - }) + _spec.SetField(alert.FieldScenario, field.TypeString, value) _node.Scenario = value } if value, ok := ac.mutation.BucketId(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: alert.FieldBucketId, - }) + _spec.SetField(alert.FieldBucketId, field.TypeString, value) _node.BucketId = value } if value, ok := ac.mutation.Message(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: alert.FieldMessage, - }) + _spec.SetField(alert.FieldMessage, field.TypeString, value) _node.Message = value } if value, ok := ac.mutation.EventsCount(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeInt32, - Value: value, - Column: alert.FieldEventsCount, - }) + _spec.SetField(alert.FieldEventsCount, field.TypeInt32, value) _node.EventsCount = value } if value, ok := ac.mutation.StartedAt(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeTime, - Value: value, - Column: alert.FieldStartedAt, - }) + _spec.SetField(alert.FieldStartedAt, field.TypeTime, value) _node.StartedAt = value } if value, ok := ac.mutation.StoppedAt(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeTime, - Value: value, - Column: alert.FieldStoppedAt, - }) + _spec.SetField(alert.FieldStoppedAt, field.TypeTime, value) _node.StoppedAt = value } if value, ok := ac.mutation.SourceIp(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: alert.FieldSourceIp, - }) + _spec.SetField(alert.FieldSourceIp, field.TypeString, value) _node.SourceIp = value } if value, ok := ac.mutation.SourceRange(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: alert.FieldSourceRange, - }) + _spec.SetField(alert.FieldSourceRange, field.TypeString, value) _node.SourceRange = value } if value, ok := ac.mutation.SourceAsNumber(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: alert.FieldSourceAsNumber, - }) + _spec.SetField(alert.FieldSourceAsNumber, field.TypeString, value) _node.SourceAsNumber = value } if value, ok := ac.mutation.SourceAsName(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: alert.FieldSourceAsName, - }) + _spec.SetField(alert.FieldSourceAsName, field.TypeString, value) _node.SourceAsName = value } if value, ok := ac.mutation.SourceCountry(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: alert.FieldSourceCountry, - }) + _spec.SetField(alert.FieldSourceCountry, field.TypeString, value) _node.SourceCountry = value } if value, ok := ac.mutation.SourceLatitude(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeFloat32, - Value: value, - Column: alert.FieldSourceLatitude, - }) + _spec.SetField(alert.FieldSourceLatitude, field.TypeFloat32, value) _node.SourceLatitude = value } if value, ok := ac.mutation.SourceLongitude(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeFloat32, - Value: value, - Column: alert.FieldSourceLongitude, - }) + _spec.SetField(alert.FieldSourceLongitude, field.TypeFloat32, value) _node.SourceLongitude = value } if value, ok := ac.mutation.SourceScope(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: alert.FieldSourceScope, - }) + _spec.SetField(alert.FieldSourceScope, field.TypeString, value) _node.SourceScope = value } if value, ok := ac.mutation.SourceValue(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: alert.FieldSourceValue, - }) + _spec.SetField(alert.FieldSourceValue, field.TypeString, value) _node.SourceValue = value } if value, ok := ac.mutation.Capacity(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeInt32, - Value: value, - Column: alert.FieldCapacity, - }) + _spec.SetField(alert.FieldCapacity, field.TypeInt32, value) _node.Capacity = value } if value, ok := ac.mutation.LeakSpeed(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: alert.FieldLeakSpeed, - }) + _spec.SetField(alert.FieldLeakSpeed, field.TypeString, value) _node.LeakSpeed = value } if value, ok := ac.mutation.ScenarioVersion(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: alert.FieldScenarioVersion, - }) + _spec.SetField(alert.FieldScenarioVersion, field.TypeString, value) _node.ScenarioVersion = value } if value, ok := ac.mutation.ScenarioHash(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: alert.FieldScenarioHash, - }) + _spec.SetField(alert.FieldScenarioHash, field.TypeString, value) _node.ScenarioHash = value } if value, ok := ac.mutation.Simulated(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeBool, - Value: value, - Column: alert.FieldSimulated, - }) + _spec.SetField(alert.FieldSimulated, field.TypeBool, value) _node.Simulated = value } if value, ok := ac.mutation.UUID(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: alert.FieldUUID, - }) + _spec.SetField(alert.FieldUUID, field.TypeString, value) _node.UUID = value } if nodes := ac.mutation.OwnerIDs(); len(nodes) > 0 { @@ -740,10 +605,7 @@ func (ac *AlertCreate) createSpec() (*Alert, *sqlgraph.CreateSpec) { Columns: []string{alert.OwnerColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeInt, - Column: machine.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(machine.FieldID, field.TypeInt), }, } for _, k := range nodes { @@ -760,10 +622,7 @@ func (ac *AlertCreate) createSpec() (*Alert, *sqlgraph.CreateSpec) { Columns: []string{alert.DecisionsColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeInt, - Column: decision.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(decision.FieldID, field.TypeInt), }, } for _, k := range nodes { @@ -779,10 +638,7 @@ func (ac *AlertCreate) createSpec() (*Alert, *sqlgraph.CreateSpec) { Columns: []string{alert.EventsColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeInt, - Column: event.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(event.FieldID, field.TypeInt), }, } for _, k := range nodes { @@ -798,10 +654,7 @@ func (ac *AlertCreate) createSpec() (*Alert, *sqlgraph.CreateSpec) { Columns: []string{alert.MetasColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeInt, - Column: meta.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(meta.FieldID, field.TypeInt), }, } for _, k := range nodes { @@ -815,11 +668,15 @@ func (ac *AlertCreate) createSpec() (*Alert, *sqlgraph.CreateSpec) { // AlertCreateBulk is the builder for creating many Alert entities in bulk. type AlertCreateBulk struct { config + err error builders []*AlertCreate } // Save creates the Alert entities in the database. func (acb *AlertCreateBulk) Save(ctx context.Context) ([]*Alert, error) { + if acb.err != nil { + return nil, acb.err + } specs := make([]*sqlgraph.CreateSpec, len(acb.builders)) nodes := make([]*Alert, len(acb.builders)) mutators := make([]Mutator, len(acb.builders)) @@ -836,8 +693,8 @@ func (acb *AlertCreateBulk) Save(ctx context.Context) ([]*Alert, error) { return nil, err } builder.mutation = mutation - nodes[i], specs[i] = builder.createSpec() var err error + nodes[i], specs[i] = builder.createSpec() if i < len(mutators)-1 { _, err = mutators[i+1].Mutate(root, acb.builders[i+1].mutation) } else { diff --git a/pkg/database/ent/alert_delete.go b/pkg/database/ent/alert_delete.go index 014bcc2e0c6..15b3a4c822a 100644 --- a/pkg/database/ent/alert_delete.go +++ b/pkg/database/ent/alert_delete.go @@ -4,7 +4,6 @@ package ent import ( "context" - "fmt" "entgo.io/ent/dialect/sql" "entgo.io/ent/dialect/sql/sqlgraph" @@ -28,34 +27,7 @@ func (ad *AlertDelete) Where(ps ...predicate.Alert) *AlertDelete { // Exec executes the deletion query and returns how many vertices were deleted. func (ad *AlertDelete) Exec(ctx context.Context) (int, error) { - var ( - err error - affected int - ) - if len(ad.hooks) == 0 { - affected, err = ad.sqlExec(ctx) - } else { - var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { - mutation, ok := m.(*AlertMutation) - if !ok { - return nil, fmt.Errorf("unexpected mutation type %T", m) - } - ad.mutation = mutation - affected, err = ad.sqlExec(ctx) - mutation.done = true - return affected, err - }) - for i := len(ad.hooks) - 1; i >= 0; i-- { - if ad.hooks[i] == nil { - return 0, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)") - } - mut = ad.hooks[i](mut) - } - if _, err := mut.Mutate(ctx, ad.mutation); err != nil { - return 0, err - } - } - return affected, err + return withHooks(ctx, ad.sqlExec, ad.mutation, ad.hooks) } // ExecX is like Exec, but panics if an error occurs. @@ -68,15 +40,7 @@ func (ad *AlertDelete) ExecX(ctx context.Context) int { } func (ad *AlertDelete) sqlExec(ctx context.Context) (int, error) { - _spec := &sqlgraph.DeleteSpec{ - Node: &sqlgraph.NodeSpec{ - Table: alert.Table, - ID: &sqlgraph.FieldSpec{ - Type: field.TypeInt, - Column: alert.FieldID, - }, - }, - } + _spec := sqlgraph.NewDeleteSpec(alert.Table, sqlgraph.NewFieldSpec(alert.FieldID, field.TypeInt)) if ps := ad.mutation.predicates; len(ps) > 0 { _spec.Predicate = func(selector *sql.Selector) { for i := range ps { @@ -88,6 +52,7 @@ func (ad *AlertDelete) sqlExec(ctx context.Context) (int, error) { if err != nil && sqlgraph.IsConstraintError(err) { err = &ConstraintError{msg: err.Error(), wrap: err} } + ad.mutation.done = true return affected, err } @@ -96,6 +61,12 @@ type AlertDeleteOne struct { ad *AlertDelete } +// Where appends a list predicates to the AlertDelete builder. +func (ado *AlertDeleteOne) Where(ps ...predicate.Alert) *AlertDeleteOne { + ado.ad.mutation.Where(ps...) + return ado +} + // Exec executes the deletion query. func (ado *AlertDeleteOne) Exec(ctx context.Context) error { n, err := ado.ad.Exec(ctx) @@ -111,5 +82,7 @@ func (ado *AlertDeleteOne) Exec(ctx context.Context) error { // ExecX is like Exec, but panics if an error occurs. func (ado *AlertDeleteOne) ExecX(ctx context.Context) { - ado.ad.ExecX(ctx) + if err := ado.Exec(ctx); err != nil { + panic(err) + } } diff --git a/pkg/database/ent/alert_query.go b/pkg/database/ent/alert_query.go index 68789196d24..7eddb6ce024 100644 --- a/pkg/database/ent/alert_query.go +++ b/pkg/database/ent/alert_query.go @@ -22,11 +22,9 @@ import ( // AlertQuery is the builder for querying Alert entities. type AlertQuery struct { config - limit *int - offset *int - unique *bool - order []OrderFunc - fields []string + ctx *QueryContext + order []alert.OrderOption + inters []Interceptor predicates []predicate.Alert withOwner *MachineQuery withDecisions *DecisionQuery @@ -44,34 +42,34 @@ func (aq *AlertQuery) Where(ps ...predicate.Alert) *AlertQuery { return aq } -// Limit adds a limit step to the query. +// Limit the number of records to be returned by this query. func (aq *AlertQuery) Limit(limit int) *AlertQuery { - aq.limit = &limit + aq.ctx.Limit = &limit return aq } -// Offset adds an offset step to the query. +// Offset to start from. func (aq *AlertQuery) Offset(offset int) *AlertQuery { - aq.offset = &offset + aq.ctx.Offset = &offset return aq } // Unique configures the query builder to filter duplicate records on query. // By default, unique is set to true, and can be disabled using this method. func (aq *AlertQuery) Unique(unique bool) *AlertQuery { - aq.unique = &unique + aq.ctx.Unique = &unique return aq } -// Order adds an order step to the query. -func (aq *AlertQuery) Order(o ...OrderFunc) *AlertQuery { +// Order specifies how the records should be ordered. +func (aq *AlertQuery) Order(o ...alert.OrderOption) *AlertQuery { aq.order = append(aq.order, o...) return aq } // QueryOwner chains the current query on the "owner" edge. func (aq *AlertQuery) QueryOwner() *MachineQuery { - query := &MachineQuery{config: aq.config} + query := (&MachineClient{config: aq.config}).Query() query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { if err := aq.prepareQuery(ctx); err != nil { return nil, err @@ -93,7 +91,7 @@ func (aq *AlertQuery) QueryOwner() *MachineQuery { // QueryDecisions chains the current query on the "decisions" edge. func (aq *AlertQuery) QueryDecisions() *DecisionQuery { - query := &DecisionQuery{config: aq.config} + query := (&DecisionClient{config: aq.config}).Query() query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { if err := aq.prepareQuery(ctx); err != nil { return nil, err @@ -115,7 +113,7 @@ func (aq *AlertQuery) QueryDecisions() *DecisionQuery { // QueryEvents chains the current query on the "events" edge. func (aq *AlertQuery) QueryEvents() *EventQuery { - query := &EventQuery{config: aq.config} + query := (&EventClient{config: aq.config}).Query() query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { if err := aq.prepareQuery(ctx); err != nil { return nil, err @@ -137,7 +135,7 @@ func (aq *AlertQuery) QueryEvents() *EventQuery { // QueryMetas chains the current query on the "metas" edge. func (aq *AlertQuery) QueryMetas() *MetaQuery { - query := &MetaQuery{config: aq.config} + query := (&MetaClient{config: aq.config}).Query() query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { if err := aq.prepareQuery(ctx); err != nil { return nil, err @@ -160,7 +158,7 @@ func (aq *AlertQuery) QueryMetas() *MetaQuery { // First returns the first Alert entity from the query. // Returns a *NotFoundError when no Alert was found. func (aq *AlertQuery) First(ctx context.Context) (*Alert, error) { - nodes, err := aq.Limit(1).All(ctx) + nodes, err := aq.Limit(1).All(setContextOp(ctx, aq.ctx, "First")) if err != nil { return nil, err } @@ -183,7 +181,7 @@ func (aq *AlertQuery) FirstX(ctx context.Context) *Alert { // Returns a *NotFoundError when no Alert ID was found. func (aq *AlertQuery) FirstID(ctx context.Context) (id int, err error) { var ids []int - if ids, err = aq.Limit(1).IDs(ctx); err != nil { + if ids, err = aq.Limit(1).IDs(setContextOp(ctx, aq.ctx, "FirstID")); err != nil { return } if len(ids) == 0 { @@ -206,7 +204,7 @@ func (aq *AlertQuery) FirstIDX(ctx context.Context) int { // Returns a *NotSingularError when more than one Alert entity is found. // Returns a *NotFoundError when no Alert entities are found. func (aq *AlertQuery) Only(ctx context.Context) (*Alert, error) { - nodes, err := aq.Limit(2).All(ctx) + nodes, err := aq.Limit(2).All(setContextOp(ctx, aq.ctx, "Only")) if err != nil { return nil, err } @@ -234,7 +232,7 @@ func (aq *AlertQuery) OnlyX(ctx context.Context) *Alert { // Returns a *NotFoundError when no entities are found. func (aq *AlertQuery) OnlyID(ctx context.Context) (id int, err error) { var ids []int - if ids, err = aq.Limit(2).IDs(ctx); err != nil { + if ids, err = aq.Limit(2).IDs(setContextOp(ctx, aq.ctx, "OnlyID")); err != nil { return } switch len(ids) { @@ -259,10 +257,12 @@ func (aq *AlertQuery) OnlyIDX(ctx context.Context) int { // All executes the query and returns a list of Alerts. func (aq *AlertQuery) All(ctx context.Context) ([]*Alert, error) { + ctx = setContextOp(ctx, aq.ctx, "All") if err := aq.prepareQuery(ctx); err != nil { return nil, err } - return aq.sqlAll(ctx) + qr := querierAll[[]*Alert, *AlertQuery]() + return withInterceptors[[]*Alert](ctx, aq, qr, aq.inters) } // AllX is like All, but panics if an error occurs. @@ -275,9 +275,12 @@ func (aq *AlertQuery) AllX(ctx context.Context) []*Alert { } // IDs executes the query and returns a list of Alert IDs. -func (aq *AlertQuery) IDs(ctx context.Context) ([]int, error) { - var ids []int - if err := aq.Select(alert.FieldID).Scan(ctx, &ids); err != nil { +func (aq *AlertQuery) IDs(ctx context.Context) (ids []int, err error) { + if aq.ctx.Unique == nil && aq.path != nil { + aq.Unique(true) + } + ctx = setContextOp(ctx, aq.ctx, "IDs") + if err = aq.Select(alert.FieldID).Scan(ctx, &ids); err != nil { return nil, err } return ids, nil @@ -294,10 +297,11 @@ func (aq *AlertQuery) IDsX(ctx context.Context) []int { // Count returns the count of the given query. func (aq *AlertQuery) Count(ctx context.Context) (int, error) { + ctx = setContextOp(ctx, aq.ctx, "Count") if err := aq.prepareQuery(ctx); err != nil { return 0, err } - return aq.sqlCount(ctx) + return withInterceptors[int](ctx, aq, querierCount[*AlertQuery](), aq.inters) } // CountX is like Count, but panics if an error occurs. @@ -311,10 +315,15 @@ func (aq *AlertQuery) CountX(ctx context.Context) int { // Exist returns true if the query has elements in the graph. func (aq *AlertQuery) Exist(ctx context.Context) (bool, error) { - if err := aq.prepareQuery(ctx); err != nil { - return false, err + ctx = setContextOp(ctx, aq.ctx, "Exist") + switch _, err := aq.FirstID(ctx); { + case IsNotFound(err): + return false, nil + case err != nil: + return false, fmt.Errorf("ent: check existence: %w", err) + default: + return true, nil } - return aq.sqlExist(ctx) } // ExistX is like Exist, but panics if an error occurs. @@ -334,25 +343,24 @@ func (aq *AlertQuery) Clone() *AlertQuery { } return &AlertQuery{ config: aq.config, - limit: aq.limit, - offset: aq.offset, - order: append([]OrderFunc{}, aq.order...), + ctx: aq.ctx.Clone(), + order: append([]alert.OrderOption{}, aq.order...), + inters: append([]Interceptor{}, aq.inters...), predicates: append([]predicate.Alert{}, aq.predicates...), withOwner: aq.withOwner.Clone(), withDecisions: aq.withDecisions.Clone(), withEvents: aq.withEvents.Clone(), withMetas: aq.withMetas.Clone(), // clone intermediate query. - sql: aq.sql.Clone(), - path: aq.path, - unique: aq.unique, + sql: aq.sql.Clone(), + path: aq.path, } } // WithOwner tells the query-builder to eager-load the nodes that are connected to // the "owner" edge. The optional arguments are used to configure the query builder of the edge. func (aq *AlertQuery) WithOwner(opts ...func(*MachineQuery)) *AlertQuery { - query := &MachineQuery{config: aq.config} + query := (&MachineClient{config: aq.config}).Query() for _, opt := range opts { opt(query) } @@ -363,7 +371,7 @@ func (aq *AlertQuery) WithOwner(opts ...func(*MachineQuery)) *AlertQuery { // WithDecisions tells the query-builder to eager-load the nodes that are connected to // the "decisions" edge. The optional arguments are used to configure the query builder of the edge. func (aq *AlertQuery) WithDecisions(opts ...func(*DecisionQuery)) *AlertQuery { - query := &DecisionQuery{config: aq.config} + query := (&DecisionClient{config: aq.config}).Query() for _, opt := range opts { opt(query) } @@ -374,7 +382,7 @@ func (aq *AlertQuery) WithDecisions(opts ...func(*DecisionQuery)) *AlertQuery { // WithEvents tells the query-builder to eager-load the nodes that are connected to // the "events" edge. The optional arguments are used to configure the query builder of the edge. func (aq *AlertQuery) WithEvents(opts ...func(*EventQuery)) *AlertQuery { - query := &EventQuery{config: aq.config} + query := (&EventClient{config: aq.config}).Query() for _, opt := range opts { opt(query) } @@ -385,7 +393,7 @@ func (aq *AlertQuery) WithEvents(opts ...func(*EventQuery)) *AlertQuery { // WithMetas tells the query-builder to eager-load the nodes that are connected to // the "metas" edge. The optional arguments are used to configure the query builder of the edge. func (aq *AlertQuery) WithMetas(opts ...func(*MetaQuery)) *AlertQuery { - query := &MetaQuery{config: aq.config} + query := (&MetaClient{config: aq.config}).Query() for _, opt := range opts { opt(query) } @@ -408,16 +416,11 @@ func (aq *AlertQuery) WithMetas(opts ...func(*MetaQuery)) *AlertQuery { // Aggregate(ent.Count()). // Scan(ctx, &v) func (aq *AlertQuery) GroupBy(field string, fields ...string) *AlertGroupBy { - grbuild := &AlertGroupBy{config: aq.config} - grbuild.fields = append([]string{field}, fields...) - grbuild.path = func(ctx context.Context) (prev *sql.Selector, err error) { - if err := aq.prepareQuery(ctx); err != nil { - return nil, err - } - return aq.sqlQuery(ctx), nil - } + aq.ctx.Fields = append([]string{field}, fields...) + grbuild := &AlertGroupBy{build: aq} + grbuild.flds = &aq.ctx.Fields grbuild.label = alert.Label - grbuild.flds, grbuild.scan = &grbuild.fields, grbuild.Scan + grbuild.scan = grbuild.Scan return grbuild } @@ -434,15 +437,30 @@ func (aq *AlertQuery) GroupBy(field string, fields ...string) *AlertGroupBy { // Select(alert.FieldCreatedAt). // Scan(ctx, &v) func (aq *AlertQuery) Select(fields ...string) *AlertSelect { - aq.fields = append(aq.fields, fields...) - selbuild := &AlertSelect{AlertQuery: aq} - selbuild.label = alert.Label - selbuild.flds, selbuild.scan = &aq.fields, selbuild.Scan - return selbuild + aq.ctx.Fields = append(aq.ctx.Fields, fields...) + sbuild := &AlertSelect{AlertQuery: aq} + sbuild.label = alert.Label + sbuild.flds, sbuild.scan = &aq.ctx.Fields, sbuild.Scan + return sbuild +} + +// Aggregate returns a AlertSelect configured with the given aggregations. +func (aq *AlertQuery) Aggregate(fns ...AggregateFunc) *AlertSelect { + return aq.Select().Aggregate(fns...) } func (aq *AlertQuery) prepareQuery(ctx context.Context) error { - for _, f := range aq.fields { + for _, inter := range aq.inters { + if inter == nil { + return fmt.Errorf("ent: uninitialized interceptor (forgotten import ent/runtime?)") + } + if trv, ok := inter.(Traverser); ok { + if err := trv.Traverse(ctx, aq); err != nil { + return err + } + } + } + for _, f := range aq.ctx.Fields { if !alert.ValidColumn(f) { return &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)} } @@ -536,6 +554,9 @@ func (aq *AlertQuery) loadOwner(ctx context.Context, query *MachineQuery, nodes } nodeids[fk] = append(nodeids[fk], nodes[i]) } + if len(ids) == 0 { + return nil + } query.Where(machine.IDIn(ids...)) neighbors, err := query.All(ctx) if err != nil { @@ -562,8 +583,11 @@ func (aq *AlertQuery) loadDecisions(ctx context.Context, query *DecisionQuery, n init(nodes[i]) } } + if len(query.ctx.Fields) > 0 { + query.ctx.AppendFieldOnce(decision.FieldAlertDecisions) + } query.Where(predicate.Decision(func(s *sql.Selector) { - s.Where(sql.InValues(alert.DecisionsColumn, fks...)) + s.Where(sql.InValues(s.C(alert.DecisionsColumn), fks...)) })) neighbors, err := query.All(ctx) if err != nil { @@ -573,7 +597,7 @@ func (aq *AlertQuery) loadDecisions(ctx context.Context, query *DecisionQuery, n fk := n.AlertDecisions node, ok := nodeids[fk] if !ok { - return fmt.Errorf(`unexpected foreign-key "alert_decisions" returned %v for node %v`, fk, n.ID) + return fmt.Errorf(`unexpected referenced foreign-key "alert_decisions" returned %v for node %v`, fk, n.ID) } assign(node, n) } @@ -589,8 +613,11 @@ func (aq *AlertQuery) loadEvents(ctx context.Context, query *EventQuery, nodes [ init(nodes[i]) } } + if len(query.ctx.Fields) > 0 { + query.ctx.AppendFieldOnce(event.FieldAlertEvents) + } query.Where(predicate.Event(func(s *sql.Selector) { - s.Where(sql.InValues(alert.EventsColumn, fks...)) + s.Where(sql.InValues(s.C(alert.EventsColumn), fks...)) })) neighbors, err := query.All(ctx) if err != nil { @@ -600,7 +627,7 @@ func (aq *AlertQuery) loadEvents(ctx context.Context, query *EventQuery, nodes [ fk := n.AlertEvents node, ok := nodeids[fk] if !ok { - return fmt.Errorf(`unexpected foreign-key "alert_events" returned %v for node %v`, fk, n.ID) + return fmt.Errorf(`unexpected referenced foreign-key "alert_events" returned %v for node %v`, fk, n.ID) } assign(node, n) } @@ -616,8 +643,11 @@ func (aq *AlertQuery) loadMetas(ctx context.Context, query *MetaQuery, nodes []* init(nodes[i]) } } + if len(query.ctx.Fields) > 0 { + query.ctx.AppendFieldOnce(meta.FieldAlertMetas) + } query.Where(predicate.Meta(func(s *sql.Selector) { - s.Where(sql.InValues(alert.MetasColumn, fks...)) + s.Where(sql.InValues(s.C(alert.MetasColumn), fks...)) })) neighbors, err := query.All(ctx) if err != nil { @@ -627,7 +657,7 @@ func (aq *AlertQuery) loadMetas(ctx context.Context, query *MetaQuery, nodes []* fk := n.AlertMetas node, ok := nodeids[fk] if !ok { - return fmt.Errorf(`unexpected foreign-key "alert_metas" returned %v for node %v`, fk, n.ID) + return fmt.Errorf(`unexpected referenced foreign-key "alert_metas" returned %v for node %v`, fk, n.ID) } assign(node, n) } @@ -636,41 +666,22 @@ func (aq *AlertQuery) loadMetas(ctx context.Context, query *MetaQuery, nodes []* func (aq *AlertQuery) sqlCount(ctx context.Context) (int, error) { _spec := aq.querySpec() - _spec.Node.Columns = aq.fields - if len(aq.fields) > 0 { - _spec.Unique = aq.unique != nil && *aq.unique + _spec.Node.Columns = aq.ctx.Fields + if len(aq.ctx.Fields) > 0 { + _spec.Unique = aq.ctx.Unique != nil && *aq.ctx.Unique } return sqlgraph.CountNodes(ctx, aq.driver, _spec) } -func (aq *AlertQuery) sqlExist(ctx context.Context) (bool, error) { - switch _, err := aq.FirstID(ctx); { - case IsNotFound(err): - return false, nil - case err != nil: - return false, fmt.Errorf("ent: check existence: %w", err) - default: - return true, nil - } -} - func (aq *AlertQuery) querySpec() *sqlgraph.QuerySpec { - _spec := &sqlgraph.QuerySpec{ - Node: &sqlgraph.NodeSpec{ - Table: alert.Table, - Columns: alert.Columns, - ID: &sqlgraph.FieldSpec{ - Type: field.TypeInt, - Column: alert.FieldID, - }, - }, - From: aq.sql, - Unique: true, - } - if unique := aq.unique; unique != nil { + _spec := sqlgraph.NewQuerySpec(alert.Table, alert.Columns, sqlgraph.NewFieldSpec(alert.FieldID, field.TypeInt)) + _spec.From = aq.sql + if unique := aq.ctx.Unique; unique != nil { _spec.Unique = *unique + } else if aq.path != nil { + _spec.Unique = true } - if fields := aq.fields; len(fields) > 0 { + if fields := aq.ctx.Fields; len(fields) > 0 { _spec.Node.Columns = make([]string, 0, len(fields)) _spec.Node.Columns = append(_spec.Node.Columns, alert.FieldID) for i := range fields { @@ -686,10 +697,10 @@ func (aq *AlertQuery) querySpec() *sqlgraph.QuerySpec { } } } - if limit := aq.limit; limit != nil { + if limit := aq.ctx.Limit; limit != nil { _spec.Limit = *limit } - if offset := aq.offset; offset != nil { + if offset := aq.ctx.Offset; offset != nil { _spec.Offset = *offset } if ps := aq.order; len(ps) > 0 { @@ -705,7 +716,7 @@ func (aq *AlertQuery) querySpec() *sqlgraph.QuerySpec { func (aq *AlertQuery) sqlQuery(ctx context.Context) *sql.Selector { builder := sql.Dialect(aq.driver.Dialect()) t1 := builder.Table(alert.Table) - columns := aq.fields + columns := aq.ctx.Fields if len(columns) == 0 { columns = alert.Columns } @@ -714,7 +725,7 @@ func (aq *AlertQuery) sqlQuery(ctx context.Context) *sql.Selector { selector = aq.sql selector.Select(selector.Columns(columns...)...) } - if aq.unique != nil && *aq.unique { + if aq.ctx.Unique != nil && *aq.ctx.Unique { selector.Distinct() } for _, p := range aq.predicates { @@ -723,12 +734,12 @@ func (aq *AlertQuery) sqlQuery(ctx context.Context) *sql.Selector { for _, p := range aq.order { p(selector) } - if offset := aq.offset; offset != nil { + if offset := aq.ctx.Offset; offset != nil { // limit is mandatory for offset clause. We start // with default value, and override it below if needed. selector.Offset(*offset).Limit(math.MaxInt32) } - if limit := aq.limit; limit != nil { + if limit := aq.ctx.Limit; limit != nil { selector.Limit(*limit) } return selector @@ -736,13 +747,8 @@ func (aq *AlertQuery) sqlQuery(ctx context.Context) *sql.Selector { // AlertGroupBy is the group-by builder for Alert entities. type AlertGroupBy struct { - config selector - fields []string - fns []AggregateFunc - // intermediate query (i.e. traversal path). - sql *sql.Selector - path func(context.Context) (*sql.Selector, error) + build *AlertQuery } // Aggregate adds the given aggregation functions to the group-by query. @@ -751,74 +757,77 @@ func (agb *AlertGroupBy) Aggregate(fns ...AggregateFunc) *AlertGroupBy { return agb } -// Scan applies the group-by query and scans the result into the given value. +// Scan applies the selector query and scans the result into the given value. func (agb *AlertGroupBy) Scan(ctx context.Context, v any) error { - query, err := agb.path(ctx) - if err != nil { + ctx = setContextOp(ctx, agb.build.ctx, "GroupBy") + if err := agb.build.prepareQuery(ctx); err != nil { return err } - agb.sql = query - return agb.sqlScan(ctx, v) + return scanWithInterceptors[*AlertQuery, *AlertGroupBy](ctx, agb.build, agb, agb.build.inters, v) } -func (agb *AlertGroupBy) sqlScan(ctx context.Context, v any) error { - for _, f := range agb.fields { - if !alert.ValidColumn(f) { - return &ValidationError{Name: f, err: fmt.Errorf("invalid field %q for group-by", f)} +func (agb *AlertGroupBy) sqlScan(ctx context.Context, root *AlertQuery, v any) error { + selector := root.sqlQuery(ctx).Select() + aggregation := make([]string, 0, len(agb.fns)) + for _, fn := range agb.fns { + aggregation = append(aggregation, fn(selector)) + } + if len(selector.SelectedColumns()) == 0 { + columns := make([]string, 0, len(*agb.flds)+len(agb.fns)) + for _, f := range *agb.flds { + columns = append(columns, selector.C(f)) } + columns = append(columns, aggregation...) + selector.Select(columns...) } - selector := agb.sqlQuery() + selector.GroupBy(selector.Columns(*agb.flds...)...) if err := selector.Err(); err != nil { return err } rows := &sql.Rows{} query, args := selector.Query() - if err := agb.driver.Query(ctx, query, args, rows); err != nil { + if err := agb.build.driver.Query(ctx, query, args, rows); err != nil { return err } defer rows.Close() return sql.ScanSlice(rows, v) } -func (agb *AlertGroupBy) sqlQuery() *sql.Selector { - selector := agb.sql.Select() - aggregation := make([]string, 0, len(agb.fns)) - for _, fn := range agb.fns { - aggregation = append(aggregation, fn(selector)) - } - // If no columns were selected in a custom aggregation function, the default - // selection is the fields used for "group-by", and the aggregation functions. - if len(selector.SelectedColumns()) == 0 { - columns := make([]string, 0, len(agb.fields)+len(agb.fns)) - for _, f := range agb.fields { - columns = append(columns, selector.C(f)) - } - columns = append(columns, aggregation...) - selector.Select(columns...) - } - return selector.GroupBy(selector.Columns(agb.fields...)...) -} - // AlertSelect is the builder for selecting fields of Alert entities. type AlertSelect struct { *AlertQuery selector - // intermediate query (i.e. traversal path). - sql *sql.Selector +} + +// Aggregate adds the given aggregation functions to the selector query. +func (as *AlertSelect) Aggregate(fns ...AggregateFunc) *AlertSelect { + as.fns = append(as.fns, fns...) + return as } // Scan applies the selector query and scans the result into the given value. func (as *AlertSelect) Scan(ctx context.Context, v any) error { + ctx = setContextOp(ctx, as.ctx, "Select") if err := as.prepareQuery(ctx); err != nil { return err } - as.sql = as.AlertQuery.sqlQuery(ctx) - return as.sqlScan(ctx, v) + return scanWithInterceptors[*AlertQuery, *AlertSelect](ctx, as.AlertQuery, as, as.inters, v) } -func (as *AlertSelect) sqlScan(ctx context.Context, v any) error { +func (as *AlertSelect) sqlScan(ctx context.Context, root *AlertQuery, v any) error { + selector := root.sqlQuery(ctx) + aggregation := make([]string, 0, len(as.fns)) + for _, fn := range as.fns { + aggregation = append(aggregation, fn(selector)) + } + switch n := len(*as.selector.flds); { + case n == 0 && len(aggregation) > 0: + selector.Select(aggregation...) + case n != 0 && len(aggregation) > 0: + selector.AppendSelect(aggregation...) + } rows := &sql.Rows{} - query, args := as.sql.Query() + query, args := selector.Query() if err := as.driver.Query(ctx, query, args, rows); err != nil { return err } diff --git a/pkg/database/ent/alert_update.go b/pkg/database/ent/alert_update.go index aaa12ef20a3..0e41ba18109 100644 --- a/pkg/database/ent/alert_update.go +++ b/pkg/database/ent/alert_update.go @@ -624,35 +624,8 @@ func (au *AlertUpdate) RemoveMetas(m ...*Meta) *AlertUpdate { // Save executes the query and returns the number of nodes affected by the update operation. func (au *AlertUpdate) Save(ctx context.Context) (int, error) { - var ( - err error - affected int - ) au.defaults() - if len(au.hooks) == 0 { - affected, err = au.sqlSave(ctx) - } else { - var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { - mutation, ok := m.(*AlertMutation) - if !ok { - return nil, fmt.Errorf("unexpected mutation type %T", m) - } - au.mutation = mutation - affected, err = au.sqlSave(ctx) - mutation.done = true - return affected, err - }) - for i := len(au.hooks) - 1; i >= 0; i-- { - if au.hooks[i] == nil { - return 0, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)") - } - mut = au.hooks[i](mut) - } - if _, err := mut.Mutate(ctx, au.mutation); err != nil { - return 0, err - } - } - return affected, err + return withHooks(ctx, au.sqlSave, au.mutation, au.hooks) } // SaveX is like Save, but panics if an error occurs. @@ -690,16 +663,7 @@ func (au *AlertUpdate) defaults() { } func (au *AlertUpdate) sqlSave(ctx context.Context) (n int, err error) { - _spec := &sqlgraph.UpdateSpec{ - Node: &sqlgraph.NodeSpec{ - Table: alert.Table, - Columns: alert.Columns, - ID: &sqlgraph.FieldSpec{ - Type: field.TypeInt, - Column: alert.FieldID, - }, - }, - } + _spec := sqlgraph.NewUpdateSpec(alert.Table, alert.Columns, sqlgraph.NewFieldSpec(alert.FieldID, field.TypeInt)) if ps := au.mutation.predicates; len(ps) > 0 { _spec.Predicate = func(selector *sql.Selector) { for i := range ps { @@ -708,319 +672,148 @@ func (au *AlertUpdate) sqlSave(ctx context.Context) (n int, err error) { } } if value, ok := au.mutation.CreatedAt(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeTime, - Value: value, - Column: alert.FieldCreatedAt, - }) + _spec.SetField(alert.FieldCreatedAt, field.TypeTime, value) } if au.mutation.CreatedAtCleared() { - _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ - Type: field.TypeTime, - Column: alert.FieldCreatedAt, - }) + _spec.ClearField(alert.FieldCreatedAt, field.TypeTime) } if value, ok := au.mutation.UpdatedAt(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeTime, - Value: value, - Column: alert.FieldUpdatedAt, - }) + _spec.SetField(alert.FieldUpdatedAt, field.TypeTime, value) } if au.mutation.UpdatedAtCleared() { - _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ - Type: field.TypeTime, - Column: alert.FieldUpdatedAt, - }) + _spec.ClearField(alert.FieldUpdatedAt, field.TypeTime) } if value, ok := au.mutation.Scenario(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: alert.FieldScenario, - }) + _spec.SetField(alert.FieldScenario, field.TypeString, value) } if value, ok := au.mutation.BucketId(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: alert.FieldBucketId, - }) + _spec.SetField(alert.FieldBucketId, field.TypeString, value) } if au.mutation.BucketIdCleared() { - _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Column: alert.FieldBucketId, - }) + _spec.ClearField(alert.FieldBucketId, field.TypeString) } if value, ok := au.mutation.Message(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: alert.FieldMessage, - }) + _spec.SetField(alert.FieldMessage, field.TypeString, value) } if au.mutation.MessageCleared() { - _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Column: alert.FieldMessage, - }) + _spec.ClearField(alert.FieldMessage, field.TypeString) } if value, ok := au.mutation.EventsCount(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeInt32, - Value: value, - Column: alert.FieldEventsCount, - }) + _spec.SetField(alert.FieldEventsCount, field.TypeInt32, value) } if value, ok := au.mutation.AddedEventsCount(); ok { - _spec.Fields.Add = append(_spec.Fields.Add, &sqlgraph.FieldSpec{ - Type: field.TypeInt32, - Value: value, - Column: alert.FieldEventsCount, - }) + _spec.AddField(alert.FieldEventsCount, field.TypeInt32, value) } if au.mutation.EventsCountCleared() { - _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ - Type: field.TypeInt32, - Column: alert.FieldEventsCount, - }) + _spec.ClearField(alert.FieldEventsCount, field.TypeInt32) } if value, ok := au.mutation.StartedAt(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeTime, - Value: value, - Column: alert.FieldStartedAt, - }) + _spec.SetField(alert.FieldStartedAt, field.TypeTime, value) } if au.mutation.StartedAtCleared() { - _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ - Type: field.TypeTime, - Column: alert.FieldStartedAt, - }) + _spec.ClearField(alert.FieldStartedAt, field.TypeTime) } if value, ok := au.mutation.StoppedAt(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeTime, - Value: value, - Column: alert.FieldStoppedAt, - }) + _spec.SetField(alert.FieldStoppedAt, field.TypeTime, value) } if au.mutation.StoppedAtCleared() { - _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ - Type: field.TypeTime, - Column: alert.FieldStoppedAt, - }) + _spec.ClearField(alert.FieldStoppedAt, field.TypeTime) } if value, ok := au.mutation.SourceIp(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: alert.FieldSourceIp, - }) + _spec.SetField(alert.FieldSourceIp, field.TypeString, value) } if au.mutation.SourceIpCleared() { - _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Column: alert.FieldSourceIp, - }) + _spec.ClearField(alert.FieldSourceIp, field.TypeString) } if value, ok := au.mutation.SourceRange(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: alert.FieldSourceRange, - }) + _spec.SetField(alert.FieldSourceRange, field.TypeString, value) } if au.mutation.SourceRangeCleared() { - _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Column: alert.FieldSourceRange, - }) + _spec.ClearField(alert.FieldSourceRange, field.TypeString) } if value, ok := au.mutation.SourceAsNumber(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: alert.FieldSourceAsNumber, - }) + _spec.SetField(alert.FieldSourceAsNumber, field.TypeString, value) } if au.mutation.SourceAsNumberCleared() { - _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Column: alert.FieldSourceAsNumber, - }) + _spec.ClearField(alert.FieldSourceAsNumber, field.TypeString) } if value, ok := au.mutation.SourceAsName(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: alert.FieldSourceAsName, - }) + _spec.SetField(alert.FieldSourceAsName, field.TypeString, value) } if au.mutation.SourceAsNameCleared() { - _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Column: alert.FieldSourceAsName, - }) + _spec.ClearField(alert.FieldSourceAsName, field.TypeString) } if value, ok := au.mutation.SourceCountry(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: alert.FieldSourceCountry, - }) + _spec.SetField(alert.FieldSourceCountry, field.TypeString, value) } if au.mutation.SourceCountryCleared() { - _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Column: alert.FieldSourceCountry, - }) + _spec.ClearField(alert.FieldSourceCountry, field.TypeString) } if value, ok := au.mutation.SourceLatitude(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeFloat32, - Value: value, - Column: alert.FieldSourceLatitude, - }) + _spec.SetField(alert.FieldSourceLatitude, field.TypeFloat32, value) } if value, ok := au.mutation.AddedSourceLatitude(); ok { - _spec.Fields.Add = append(_spec.Fields.Add, &sqlgraph.FieldSpec{ - Type: field.TypeFloat32, - Value: value, - Column: alert.FieldSourceLatitude, - }) + _spec.AddField(alert.FieldSourceLatitude, field.TypeFloat32, value) } if au.mutation.SourceLatitudeCleared() { - _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ - Type: field.TypeFloat32, - Column: alert.FieldSourceLatitude, - }) + _spec.ClearField(alert.FieldSourceLatitude, field.TypeFloat32) } if value, ok := au.mutation.SourceLongitude(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeFloat32, - Value: value, - Column: alert.FieldSourceLongitude, - }) + _spec.SetField(alert.FieldSourceLongitude, field.TypeFloat32, value) } if value, ok := au.mutation.AddedSourceLongitude(); ok { - _spec.Fields.Add = append(_spec.Fields.Add, &sqlgraph.FieldSpec{ - Type: field.TypeFloat32, - Value: value, - Column: alert.FieldSourceLongitude, - }) + _spec.AddField(alert.FieldSourceLongitude, field.TypeFloat32, value) } if au.mutation.SourceLongitudeCleared() { - _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ - Type: field.TypeFloat32, - Column: alert.FieldSourceLongitude, - }) + _spec.ClearField(alert.FieldSourceLongitude, field.TypeFloat32) } if value, ok := au.mutation.SourceScope(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: alert.FieldSourceScope, - }) + _spec.SetField(alert.FieldSourceScope, field.TypeString, value) } if au.mutation.SourceScopeCleared() { - _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Column: alert.FieldSourceScope, - }) + _spec.ClearField(alert.FieldSourceScope, field.TypeString) } if value, ok := au.mutation.SourceValue(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: alert.FieldSourceValue, - }) + _spec.SetField(alert.FieldSourceValue, field.TypeString, value) } if au.mutation.SourceValueCleared() { - _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Column: alert.FieldSourceValue, - }) + _spec.ClearField(alert.FieldSourceValue, field.TypeString) } if value, ok := au.mutation.Capacity(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeInt32, - Value: value, - Column: alert.FieldCapacity, - }) + _spec.SetField(alert.FieldCapacity, field.TypeInt32, value) } if value, ok := au.mutation.AddedCapacity(); ok { - _spec.Fields.Add = append(_spec.Fields.Add, &sqlgraph.FieldSpec{ - Type: field.TypeInt32, - Value: value, - Column: alert.FieldCapacity, - }) + _spec.AddField(alert.FieldCapacity, field.TypeInt32, value) } if au.mutation.CapacityCleared() { - _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ - Type: field.TypeInt32, - Column: alert.FieldCapacity, - }) + _spec.ClearField(alert.FieldCapacity, field.TypeInt32) } if value, ok := au.mutation.LeakSpeed(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: alert.FieldLeakSpeed, - }) + _spec.SetField(alert.FieldLeakSpeed, field.TypeString, value) } if au.mutation.LeakSpeedCleared() { - _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Column: alert.FieldLeakSpeed, - }) + _spec.ClearField(alert.FieldLeakSpeed, field.TypeString) } if value, ok := au.mutation.ScenarioVersion(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: alert.FieldScenarioVersion, - }) + _spec.SetField(alert.FieldScenarioVersion, field.TypeString, value) } if au.mutation.ScenarioVersionCleared() { - _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Column: alert.FieldScenarioVersion, - }) + _spec.ClearField(alert.FieldScenarioVersion, field.TypeString) } if value, ok := au.mutation.ScenarioHash(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: alert.FieldScenarioHash, - }) + _spec.SetField(alert.FieldScenarioHash, field.TypeString, value) } if au.mutation.ScenarioHashCleared() { - _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Column: alert.FieldScenarioHash, - }) + _spec.ClearField(alert.FieldScenarioHash, field.TypeString) } if value, ok := au.mutation.Simulated(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeBool, - Value: value, - Column: alert.FieldSimulated, - }) + _spec.SetField(alert.FieldSimulated, field.TypeBool, value) } if value, ok := au.mutation.UUID(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: alert.FieldUUID, - }) + _spec.SetField(alert.FieldUUID, field.TypeString, value) } if au.mutation.UUIDCleared() { - _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Column: alert.FieldUUID, - }) + _spec.ClearField(alert.FieldUUID, field.TypeString) } if au.mutation.OwnerCleared() { edge := &sqlgraph.EdgeSpec{ @@ -1030,10 +823,7 @@ func (au *AlertUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{alert.OwnerColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeInt, - Column: machine.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(machine.FieldID, field.TypeInt), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -1046,10 +836,7 @@ func (au *AlertUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{alert.OwnerColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeInt, - Column: machine.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(machine.FieldID, field.TypeInt), }, } for _, k := range nodes { @@ -1065,10 +852,7 @@ func (au *AlertUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{alert.DecisionsColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeInt, - Column: decision.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(decision.FieldID, field.TypeInt), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -1081,10 +865,7 @@ func (au *AlertUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{alert.DecisionsColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeInt, - Column: decision.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(decision.FieldID, field.TypeInt), }, } for _, k := range nodes { @@ -1100,10 +881,7 @@ func (au *AlertUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{alert.DecisionsColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeInt, - Column: decision.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(decision.FieldID, field.TypeInt), }, } for _, k := range nodes { @@ -1119,10 +897,7 @@ func (au *AlertUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{alert.EventsColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeInt, - Column: event.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(event.FieldID, field.TypeInt), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -1135,10 +910,7 @@ func (au *AlertUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{alert.EventsColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeInt, - Column: event.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(event.FieldID, field.TypeInt), }, } for _, k := range nodes { @@ -1154,10 +926,7 @@ func (au *AlertUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{alert.EventsColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeInt, - Column: event.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(event.FieldID, field.TypeInt), }, } for _, k := range nodes { @@ -1173,10 +942,7 @@ func (au *AlertUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{alert.MetasColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeInt, - Column: meta.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(meta.FieldID, field.TypeInt), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -1189,10 +955,7 @@ func (au *AlertUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{alert.MetasColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeInt, - Column: meta.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(meta.FieldID, field.TypeInt), }, } for _, k := range nodes { @@ -1208,10 +971,7 @@ func (au *AlertUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{alert.MetasColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeInt, - Column: meta.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(meta.FieldID, field.TypeInt), }, } for _, k := range nodes { @@ -1227,6 +987,7 @@ func (au *AlertUpdate) sqlSave(ctx context.Context) (n int, err error) { } return 0, err } + au.mutation.done = true return n, nil } @@ -1828,6 +1589,12 @@ func (auo *AlertUpdateOne) RemoveMetas(m ...*Meta) *AlertUpdateOne { return auo.RemoveMetaIDs(ids...) } +// Where appends a list predicates to the AlertUpdate builder. +func (auo *AlertUpdateOne) Where(ps ...predicate.Alert) *AlertUpdateOne { + auo.mutation.Where(ps...) + return auo +} + // Select allows selecting one or more fields (columns) of the returned entity. // The default is selecting all fields defined in the entity schema. func (auo *AlertUpdateOne) Select(field string, fields ...string) *AlertUpdateOne { @@ -1837,41 +1604,8 @@ func (auo *AlertUpdateOne) Select(field string, fields ...string) *AlertUpdateOn // Save executes the query and returns the updated Alert entity. func (auo *AlertUpdateOne) Save(ctx context.Context) (*Alert, error) { - var ( - err error - node *Alert - ) auo.defaults() - if len(auo.hooks) == 0 { - node, err = auo.sqlSave(ctx) - } else { - var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { - mutation, ok := m.(*AlertMutation) - if !ok { - return nil, fmt.Errorf("unexpected mutation type %T", m) - } - auo.mutation = mutation - node, err = auo.sqlSave(ctx) - mutation.done = true - return node, err - }) - for i := len(auo.hooks) - 1; i >= 0; i-- { - if auo.hooks[i] == nil { - return nil, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)") - } - mut = auo.hooks[i](mut) - } - v, err := mut.Mutate(ctx, auo.mutation) - if err != nil { - return nil, err - } - nv, ok := v.(*Alert) - if !ok { - return nil, fmt.Errorf("unexpected node type %T returned from AlertMutation", v) - } - node = nv - } - return node, err + return withHooks(ctx, auo.sqlSave, auo.mutation, auo.hooks) } // SaveX is like Save, but panics if an error occurs. @@ -1909,16 +1643,7 @@ func (auo *AlertUpdateOne) defaults() { } func (auo *AlertUpdateOne) sqlSave(ctx context.Context) (_node *Alert, err error) { - _spec := &sqlgraph.UpdateSpec{ - Node: &sqlgraph.NodeSpec{ - Table: alert.Table, - Columns: alert.Columns, - ID: &sqlgraph.FieldSpec{ - Type: field.TypeInt, - Column: alert.FieldID, - }, - }, - } + _spec := sqlgraph.NewUpdateSpec(alert.Table, alert.Columns, sqlgraph.NewFieldSpec(alert.FieldID, field.TypeInt)) id, ok := auo.mutation.ID() if !ok { return nil, &ValidationError{Name: "id", err: errors.New(`ent: missing "Alert.id" for update`)} @@ -1944,319 +1669,148 @@ func (auo *AlertUpdateOne) sqlSave(ctx context.Context) (_node *Alert, err error } } if value, ok := auo.mutation.CreatedAt(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeTime, - Value: value, - Column: alert.FieldCreatedAt, - }) + _spec.SetField(alert.FieldCreatedAt, field.TypeTime, value) } if auo.mutation.CreatedAtCleared() { - _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ - Type: field.TypeTime, - Column: alert.FieldCreatedAt, - }) + _spec.ClearField(alert.FieldCreatedAt, field.TypeTime) } if value, ok := auo.mutation.UpdatedAt(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeTime, - Value: value, - Column: alert.FieldUpdatedAt, - }) + _spec.SetField(alert.FieldUpdatedAt, field.TypeTime, value) } if auo.mutation.UpdatedAtCleared() { - _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ - Type: field.TypeTime, - Column: alert.FieldUpdatedAt, - }) + _spec.ClearField(alert.FieldUpdatedAt, field.TypeTime) } if value, ok := auo.mutation.Scenario(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: alert.FieldScenario, - }) + _spec.SetField(alert.FieldScenario, field.TypeString, value) } if value, ok := auo.mutation.BucketId(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: alert.FieldBucketId, - }) + _spec.SetField(alert.FieldBucketId, field.TypeString, value) } if auo.mutation.BucketIdCleared() { - _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Column: alert.FieldBucketId, - }) + _spec.ClearField(alert.FieldBucketId, field.TypeString) } if value, ok := auo.mutation.Message(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: alert.FieldMessage, - }) + _spec.SetField(alert.FieldMessage, field.TypeString, value) } if auo.mutation.MessageCleared() { - _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Column: alert.FieldMessage, - }) + _spec.ClearField(alert.FieldMessage, field.TypeString) } if value, ok := auo.mutation.EventsCount(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeInt32, - Value: value, - Column: alert.FieldEventsCount, - }) + _spec.SetField(alert.FieldEventsCount, field.TypeInt32, value) } if value, ok := auo.mutation.AddedEventsCount(); ok { - _spec.Fields.Add = append(_spec.Fields.Add, &sqlgraph.FieldSpec{ - Type: field.TypeInt32, - Value: value, - Column: alert.FieldEventsCount, - }) + _spec.AddField(alert.FieldEventsCount, field.TypeInt32, value) } if auo.mutation.EventsCountCleared() { - _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ - Type: field.TypeInt32, - Column: alert.FieldEventsCount, - }) + _spec.ClearField(alert.FieldEventsCount, field.TypeInt32) } if value, ok := auo.mutation.StartedAt(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeTime, - Value: value, - Column: alert.FieldStartedAt, - }) + _spec.SetField(alert.FieldStartedAt, field.TypeTime, value) } if auo.mutation.StartedAtCleared() { - _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ - Type: field.TypeTime, - Column: alert.FieldStartedAt, - }) + _spec.ClearField(alert.FieldStartedAt, field.TypeTime) } if value, ok := auo.mutation.StoppedAt(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeTime, - Value: value, - Column: alert.FieldStoppedAt, - }) + _spec.SetField(alert.FieldStoppedAt, field.TypeTime, value) } if auo.mutation.StoppedAtCleared() { - _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ - Type: field.TypeTime, - Column: alert.FieldStoppedAt, - }) + _spec.ClearField(alert.FieldStoppedAt, field.TypeTime) } if value, ok := auo.mutation.SourceIp(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: alert.FieldSourceIp, - }) + _spec.SetField(alert.FieldSourceIp, field.TypeString, value) } if auo.mutation.SourceIpCleared() { - _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Column: alert.FieldSourceIp, - }) + _spec.ClearField(alert.FieldSourceIp, field.TypeString) } if value, ok := auo.mutation.SourceRange(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: alert.FieldSourceRange, - }) + _spec.SetField(alert.FieldSourceRange, field.TypeString, value) } if auo.mutation.SourceRangeCleared() { - _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Column: alert.FieldSourceRange, - }) + _spec.ClearField(alert.FieldSourceRange, field.TypeString) } if value, ok := auo.mutation.SourceAsNumber(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: alert.FieldSourceAsNumber, - }) + _spec.SetField(alert.FieldSourceAsNumber, field.TypeString, value) } if auo.mutation.SourceAsNumberCleared() { - _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Column: alert.FieldSourceAsNumber, - }) + _spec.ClearField(alert.FieldSourceAsNumber, field.TypeString) } if value, ok := auo.mutation.SourceAsName(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: alert.FieldSourceAsName, - }) + _spec.SetField(alert.FieldSourceAsName, field.TypeString, value) } if auo.mutation.SourceAsNameCleared() { - _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Column: alert.FieldSourceAsName, - }) + _spec.ClearField(alert.FieldSourceAsName, field.TypeString) } if value, ok := auo.mutation.SourceCountry(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: alert.FieldSourceCountry, - }) + _spec.SetField(alert.FieldSourceCountry, field.TypeString, value) } if auo.mutation.SourceCountryCleared() { - _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Column: alert.FieldSourceCountry, - }) + _spec.ClearField(alert.FieldSourceCountry, field.TypeString) } if value, ok := auo.mutation.SourceLatitude(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeFloat32, - Value: value, - Column: alert.FieldSourceLatitude, - }) + _spec.SetField(alert.FieldSourceLatitude, field.TypeFloat32, value) } if value, ok := auo.mutation.AddedSourceLatitude(); ok { - _spec.Fields.Add = append(_spec.Fields.Add, &sqlgraph.FieldSpec{ - Type: field.TypeFloat32, - Value: value, - Column: alert.FieldSourceLatitude, - }) + _spec.AddField(alert.FieldSourceLatitude, field.TypeFloat32, value) } if auo.mutation.SourceLatitudeCleared() { - _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ - Type: field.TypeFloat32, - Column: alert.FieldSourceLatitude, - }) + _spec.ClearField(alert.FieldSourceLatitude, field.TypeFloat32) } if value, ok := auo.mutation.SourceLongitude(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeFloat32, - Value: value, - Column: alert.FieldSourceLongitude, - }) + _spec.SetField(alert.FieldSourceLongitude, field.TypeFloat32, value) } if value, ok := auo.mutation.AddedSourceLongitude(); ok { - _spec.Fields.Add = append(_spec.Fields.Add, &sqlgraph.FieldSpec{ - Type: field.TypeFloat32, - Value: value, - Column: alert.FieldSourceLongitude, - }) + _spec.AddField(alert.FieldSourceLongitude, field.TypeFloat32, value) } if auo.mutation.SourceLongitudeCleared() { - _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ - Type: field.TypeFloat32, - Column: alert.FieldSourceLongitude, - }) + _spec.ClearField(alert.FieldSourceLongitude, field.TypeFloat32) } if value, ok := auo.mutation.SourceScope(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: alert.FieldSourceScope, - }) + _spec.SetField(alert.FieldSourceScope, field.TypeString, value) } if auo.mutation.SourceScopeCleared() { - _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Column: alert.FieldSourceScope, - }) + _spec.ClearField(alert.FieldSourceScope, field.TypeString) } if value, ok := auo.mutation.SourceValue(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: alert.FieldSourceValue, - }) + _spec.SetField(alert.FieldSourceValue, field.TypeString, value) } if auo.mutation.SourceValueCleared() { - _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Column: alert.FieldSourceValue, - }) + _spec.ClearField(alert.FieldSourceValue, field.TypeString) } if value, ok := auo.mutation.Capacity(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeInt32, - Value: value, - Column: alert.FieldCapacity, - }) + _spec.SetField(alert.FieldCapacity, field.TypeInt32, value) } if value, ok := auo.mutation.AddedCapacity(); ok { - _spec.Fields.Add = append(_spec.Fields.Add, &sqlgraph.FieldSpec{ - Type: field.TypeInt32, - Value: value, - Column: alert.FieldCapacity, - }) + _spec.AddField(alert.FieldCapacity, field.TypeInt32, value) } if auo.mutation.CapacityCleared() { - _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ - Type: field.TypeInt32, - Column: alert.FieldCapacity, - }) + _spec.ClearField(alert.FieldCapacity, field.TypeInt32) } if value, ok := auo.mutation.LeakSpeed(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: alert.FieldLeakSpeed, - }) + _spec.SetField(alert.FieldLeakSpeed, field.TypeString, value) } if auo.mutation.LeakSpeedCleared() { - _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Column: alert.FieldLeakSpeed, - }) + _spec.ClearField(alert.FieldLeakSpeed, field.TypeString) } if value, ok := auo.mutation.ScenarioVersion(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: alert.FieldScenarioVersion, - }) + _spec.SetField(alert.FieldScenarioVersion, field.TypeString, value) } if auo.mutation.ScenarioVersionCleared() { - _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Column: alert.FieldScenarioVersion, - }) + _spec.ClearField(alert.FieldScenarioVersion, field.TypeString) } if value, ok := auo.mutation.ScenarioHash(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: alert.FieldScenarioHash, - }) + _spec.SetField(alert.FieldScenarioHash, field.TypeString, value) } if auo.mutation.ScenarioHashCleared() { - _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Column: alert.FieldScenarioHash, - }) + _spec.ClearField(alert.FieldScenarioHash, field.TypeString) } if value, ok := auo.mutation.Simulated(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeBool, - Value: value, - Column: alert.FieldSimulated, - }) + _spec.SetField(alert.FieldSimulated, field.TypeBool, value) } if value, ok := auo.mutation.UUID(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: alert.FieldUUID, - }) + _spec.SetField(alert.FieldUUID, field.TypeString, value) } if auo.mutation.UUIDCleared() { - _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Column: alert.FieldUUID, - }) + _spec.ClearField(alert.FieldUUID, field.TypeString) } if auo.mutation.OwnerCleared() { edge := &sqlgraph.EdgeSpec{ @@ -2266,10 +1820,7 @@ func (auo *AlertUpdateOne) sqlSave(ctx context.Context) (_node *Alert, err error Columns: []string{alert.OwnerColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeInt, - Column: machine.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(machine.FieldID, field.TypeInt), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -2282,10 +1833,7 @@ func (auo *AlertUpdateOne) sqlSave(ctx context.Context) (_node *Alert, err error Columns: []string{alert.OwnerColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeInt, - Column: machine.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(machine.FieldID, field.TypeInt), }, } for _, k := range nodes { @@ -2301,10 +1849,7 @@ func (auo *AlertUpdateOne) sqlSave(ctx context.Context) (_node *Alert, err error Columns: []string{alert.DecisionsColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeInt, - Column: decision.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(decision.FieldID, field.TypeInt), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -2317,10 +1862,7 @@ func (auo *AlertUpdateOne) sqlSave(ctx context.Context) (_node *Alert, err error Columns: []string{alert.DecisionsColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeInt, - Column: decision.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(decision.FieldID, field.TypeInt), }, } for _, k := range nodes { @@ -2336,10 +1878,7 @@ func (auo *AlertUpdateOne) sqlSave(ctx context.Context) (_node *Alert, err error Columns: []string{alert.DecisionsColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeInt, - Column: decision.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(decision.FieldID, field.TypeInt), }, } for _, k := range nodes { @@ -2355,10 +1894,7 @@ func (auo *AlertUpdateOne) sqlSave(ctx context.Context) (_node *Alert, err error Columns: []string{alert.EventsColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeInt, - Column: event.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(event.FieldID, field.TypeInt), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -2371,10 +1907,7 @@ func (auo *AlertUpdateOne) sqlSave(ctx context.Context) (_node *Alert, err error Columns: []string{alert.EventsColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeInt, - Column: event.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(event.FieldID, field.TypeInt), }, } for _, k := range nodes { @@ -2390,10 +1923,7 @@ func (auo *AlertUpdateOne) sqlSave(ctx context.Context) (_node *Alert, err error Columns: []string{alert.EventsColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeInt, - Column: event.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(event.FieldID, field.TypeInt), }, } for _, k := range nodes { @@ -2409,10 +1939,7 @@ func (auo *AlertUpdateOne) sqlSave(ctx context.Context) (_node *Alert, err error Columns: []string{alert.MetasColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeInt, - Column: meta.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(meta.FieldID, field.TypeInt), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -2425,10 +1952,7 @@ func (auo *AlertUpdateOne) sqlSave(ctx context.Context) (_node *Alert, err error Columns: []string{alert.MetasColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeInt, - Column: meta.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(meta.FieldID, field.TypeInt), }, } for _, k := range nodes { @@ -2444,10 +1968,7 @@ func (auo *AlertUpdateOne) sqlSave(ctx context.Context) (_node *Alert, err error Columns: []string{alert.MetasColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeInt, - Column: meta.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(meta.FieldID, field.TypeInt), }, } for _, k := range nodes { @@ -2466,5 +1987,6 @@ func (auo *AlertUpdateOne) sqlSave(ctx context.Context) (_node *Alert, err error } return nil, err } + auo.mutation.done = true return _node, nil } diff --git a/pkg/database/ent/bouncer.go b/pkg/database/ent/bouncer.go index 068fc6c6713..fe189c3817e 100644 --- a/pkg/database/ent/bouncer.go +++ b/pkg/database/ent/bouncer.go @@ -7,6 +7,7 @@ import ( "strings" "time" + "entgo.io/ent" "entgo.io/ent/dialect/sql" "github.com/crowdsecurity/crowdsec/pkg/database/ent/bouncer" ) @@ -37,7 +38,8 @@ type Bouncer struct { // LastPull holds the value of the "last_pull" field. LastPull time.Time `json:"last_pull"` // AuthType holds the value of the "auth_type" field. - AuthType string `json:"auth_type"` + AuthType string `json:"auth_type"` + selectValues sql.SelectValues } // scanValues returns the types for scanning values from sql.Rows. @@ -54,7 +56,7 @@ func (*Bouncer) scanValues(columns []string) ([]any, error) { case bouncer.FieldCreatedAt, bouncer.FieldUpdatedAt, bouncer.FieldUntil, bouncer.FieldLastPull: values[i] = new(sql.NullTime) default: - return nil, fmt.Errorf("unexpected column %q for type Bouncer", columns[i]) + values[i] = new(sql.UnknownType) } } return values, nil @@ -142,16 +144,24 @@ func (b *Bouncer) assignValues(columns []string, values []any) error { } else if value.Valid { b.AuthType = value.String } + default: + b.selectValues.Set(columns[i], values[i]) } } return nil } +// Value returns the ent.Value that was dynamically selected and assigned to the Bouncer. +// This includes values selected through modifiers, order, etc. +func (b *Bouncer) Value(name string) (ent.Value, error) { + return b.selectValues.Get(name) +} + // Update returns a builder for updating this Bouncer. // Note that you need to call Bouncer.Unwrap() before calling this method if this Bouncer // was returned from a transaction, and the transaction was committed or rolled back. func (b *Bouncer) Update() *BouncerUpdateOne { - return (&BouncerClient{config: b.config}).UpdateOne(b) + return NewBouncerClient(b.config).UpdateOne(b) } // Unwrap unwraps the Bouncer entity that was returned from a transaction after it was closed, @@ -212,9 +222,3 @@ func (b *Bouncer) String() string { // Bouncers is a parsable slice of Bouncer. type Bouncers []*Bouncer - -func (b Bouncers) config(cfg config) { - for _i := range b { - b[_i].config = cfg - } -} diff --git a/pkg/database/ent/bouncer/bouncer.go b/pkg/database/ent/bouncer/bouncer.go index b688594ece4..24d230d3b54 100644 --- a/pkg/database/ent/bouncer/bouncer.go +++ b/pkg/database/ent/bouncer/bouncer.go @@ -4,6 +4,8 @@ package bouncer import ( "time" + + "entgo.io/ent/dialect/sql" ) const ( @@ -81,3 +83,66 @@ var ( // DefaultAuthType holds the default value on creation for the "auth_type" field. DefaultAuthType string ) + +// OrderOption defines the ordering options for the Bouncer queries. +type OrderOption func(*sql.Selector) + +// ByID orders the results by the id field. +func ByID(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldID, opts...).ToFunc() +} + +// ByCreatedAt orders the results by the created_at field. +func ByCreatedAt(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldCreatedAt, opts...).ToFunc() +} + +// ByUpdatedAt orders the results by the updated_at field. +func ByUpdatedAt(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldUpdatedAt, opts...).ToFunc() +} + +// ByName orders the results by the name field. +func ByName(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldName, opts...).ToFunc() +} + +// ByAPIKey orders the results by the api_key field. +func ByAPIKey(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldAPIKey, opts...).ToFunc() +} + +// ByRevoked orders the results by the revoked field. +func ByRevoked(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldRevoked, opts...).ToFunc() +} + +// ByIPAddress orders the results by the ip_address field. +func ByIPAddress(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldIPAddress, opts...).ToFunc() +} + +// ByType orders the results by the type field. +func ByType(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldType, opts...).ToFunc() +} + +// ByVersion orders the results by the version field. +func ByVersion(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldVersion, opts...).ToFunc() +} + +// ByUntil orders the results by the until field. +func ByUntil(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldUntil, opts...).ToFunc() +} + +// ByLastPull orders the results by the last_pull field. +func ByLastPull(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldLastPull, opts...).ToFunc() +} + +// ByAuthType orders the results by the auth_type field. +func ByAuthType(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldAuthType, opts...).ToFunc() +} diff --git a/pkg/database/ent/bouncer/where.go b/pkg/database/ent/bouncer/where.go index 03a543f6d4f..5bf721dbf51 100644 --- a/pkg/database/ent/bouncer/where.go +++ b/pkg/database/ent/bouncer/where.go @@ -11,1128 +11,735 @@ import ( // ID filters vertices based on their ID field. func ID(id int) predicate.Bouncer { - return predicate.Bouncer(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldID), id)) - }) + return predicate.Bouncer(sql.FieldEQ(FieldID, id)) } // IDEQ applies the EQ predicate on the ID field. func IDEQ(id int) predicate.Bouncer { - return predicate.Bouncer(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldID), id)) - }) + return predicate.Bouncer(sql.FieldEQ(FieldID, id)) } // IDNEQ applies the NEQ predicate on the ID field. func IDNEQ(id int) predicate.Bouncer { - return predicate.Bouncer(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldID), id)) - }) + return predicate.Bouncer(sql.FieldNEQ(FieldID, id)) } // IDIn applies the In predicate on the ID field. func IDIn(ids ...int) predicate.Bouncer { - return predicate.Bouncer(func(s *sql.Selector) { - v := make([]any, len(ids)) - for i := range v { - v[i] = ids[i] - } - s.Where(sql.In(s.C(FieldID), v...)) - }) + return predicate.Bouncer(sql.FieldIn(FieldID, ids...)) } // IDNotIn applies the NotIn predicate on the ID field. func IDNotIn(ids ...int) predicate.Bouncer { - return predicate.Bouncer(func(s *sql.Selector) { - v := make([]any, len(ids)) - for i := range v { - v[i] = ids[i] - } - s.Where(sql.NotIn(s.C(FieldID), v...)) - }) + return predicate.Bouncer(sql.FieldNotIn(FieldID, ids...)) } // IDGT applies the GT predicate on the ID field. func IDGT(id int) predicate.Bouncer { - return predicate.Bouncer(func(s *sql.Selector) { - s.Where(sql.GT(s.C(FieldID), id)) - }) + return predicate.Bouncer(sql.FieldGT(FieldID, id)) } // IDGTE applies the GTE predicate on the ID field. func IDGTE(id int) predicate.Bouncer { - return predicate.Bouncer(func(s *sql.Selector) { - s.Where(sql.GTE(s.C(FieldID), id)) - }) + return predicate.Bouncer(sql.FieldGTE(FieldID, id)) } // IDLT applies the LT predicate on the ID field. func IDLT(id int) predicate.Bouncer { - return predicate.Bouncer(func(s *sql.Selector) { - s.Where(sql.LT(s.C(FieldID), id)) - }) + return predicate.Bouncer(sql.FieldLT(FieldID, id)) } // IDLTE applies the LTE predicate on the ID field. func IDLTE(id int) predicate.Bouncer { - return predicate.Bouncer(func(s *sql.Selector) { - s.Where(sql.LTE(s.C(FieldID), id)) - }) + return predicate.Bouncer(sql.FieldLTE(FieldID, id)) } // CreatedAt applies equality check predicate on the "created_at" field. It's identical to CreatedAtEQ. func CreatedAt(v time.Time) predicate.Bouncer { - return predicate.Bouncer(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldCreatedAt), v)) - }) + return predicate.Bouncer(sql.FieldEQ(FieldCreatedAt, v)) } // UpdatedAt applies equality check predicate on the "updated_at" field. It's identical to UpdatedAtEQ. func UpdatedAt(v time.Time) predicate.Bouncer { - return predicate.Bouncer(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldUpdatedAt), v)) - }) + return predicate.Bouncer(sql.FieldEQ(FieldUpdatedAt, v)) } // Name applies equality check predicate on the "name" field. It's identical to NameEQ. func Name(v string) predicate.Bouncer { - return predicate.Bouncer(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldName), v)) - }) + return predicate.Bouncer(sql.FieldEQ(FieldName, v)) } // APIKey applies equality check predicate on the "api_key" field. It's identical to APIKeyEQ. func APIKey(v string) predicate.Bouncer { - return predicate.Bouncer(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldAPIKey), v)) - }) + return predicate.Bouncer(sql.FieldEQ(FieldAPIKey, v)) } // Revoked applies equality check predicate on the "revoked" field. It's identical to RevokedEQ. func Revoked(v bool) predicate.Bouncer { - return predicate.Bouncer(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldRevoked), v)) - }) + return predicate.Bouncer(sql.FieldEQ(FieldRevoked, v)) } // IPAddress applies equality check predicate on the "ip_address" field. It's identical to IPAddressEQ. func IPAddress(v string) predicate.Bouncer { - return predicate.Bouncer(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldIPAddress), v)) - }) + return predicate.Bouncer(sql.FieldEQ(FieldIPAddress, v)) } // Type applies equality check predicate on the "type" field. It's identical to TypeEQ. func Type(v string) predicate.Bouncer { - return predicate.Bouncer(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldType), v)) - }) + return predicate.Bouncer(sql.FieldEQ(FieldType, v)) } // Version applies equality check predicate on the "version" field. It's identical to VersionEQ. func Version(v string) predicate.Bouncer { - return predicate.Bouncer(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldVersion), v)) - }) + return predicate.Bouncer(sql.FieldEQ(FieldVersion, v)) } // Until applies equality check predicate on the "until" field. It's identical to UntilEQ. func Until(v time.Time) predicate.Bouncer { - return predicate.Bouncer(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldUntil), v)) - }) + return predicate.Bouncer(sql.FieldEQ(FieldUntil, v)) } // LastPull applies equality check predicate on the "last_pull" field. It's identical to LastPullEQ. func LastPull(v time.Time) predicate.Bouncer { - return predicate.Bouncer(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldLastPull), v)) - }) + return predicate.Bouncer(sql.FieldEQ(FieldLastPull, v)) } // AuthType applies equality check predicate on the "auth_type" field. It's identical to AuthTypeEQ. func AuthType(v string) predicate.Bouncer { - return predicate.Bouncer(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldAuthType), v)) - }) + return predicate.Bouncer(sql.FieldEQ(FieldAuthType, v)) } // CreatedAtEQ applies the EQ predicate on the "created_at" field. func CreatedAtEQ(v time.Time) predicate.Bouncer { - return predicate.Bouncer(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldCreatedAt), v)) - }) + return predicate.Bouncer(sql.FieldEQ(FieldCreatedAt, v)) } // CreatedAtNEQ applies the NEQ predicate on the "created_at" field. func CreatedAtNEQ(v time.Time) predicate.Bouncer { - return predicate.Bouncer(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldCreatedAt), v)) - }) + return predicate.Bouncer(sql.FieldNEQ(FieldCreatedAt, v)) } // CreatedAtIn applies the In predicate on the "created_at" field. func CreatedAtIn(vs ...time.Time) predicate.Bouncer { - v := make([]any, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Bouncer(func(s *sql.Selector) { - s.Where(sql.In(s.C(FieldCreatedAt), v...)) - }) + return predicate.Bouncer(sql.FieldIn(FieldCreatedAt, vs...)) } // CreatedAtNotIn applies the NotIn predicate on the "created_at" field. func CreatedAtNotIn(vs ...time.Time) predicate.Bouncer { - v := make([]any, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Bouncer(func(s *sql.Selector) { - s.Where(sql.NotIn(s.C(FieldCreatedAt), v...)) - }) + return predicate.Bouncer(sql.FieldNotIn(FieldCreatedAt, vs...)) } // CreatedAtGT applies the GT predicate on the "created_at" field. func CreatedAtGT(v time.Time) predicate.Bouncer { - return predicate.Bouncer(func(s *sql.Selector) { - s.Where(sql.GT(s.C(FieldCreatedAt), v)) - }) + return predicate.Bouncer(sql.FieldGT(FieldCreatedAt, v)) } // CreatedAtGTE applies the GTE predicate on the "created_at" field. func CreatedAtGTE(v time.Time) predicate.Bouncer { - return predicate.Bouncer(func(s *sql.Selector) { - s.Where(sql.GTE(s.C(FieldCreatedAt), v)) - }) + return predicate.Bouncer(sql.FieldGTE(FieldCreatedAt, v)) } // CreatedAtLT applies the LT predicate on the "created_at" field. func CreatedAtLT(v time.Time) predicate.Bouncer { - return predicate.Bouncer(func(s *sql.Selector) { - s.Where(sql.LT(s.C(FieldCreatedAt), v)) - }) + return predicate.Bouncer(sql.FieldLT(FieldCreatedAt, v)) } // CreatedAtLTE applies the LTE predicate on the "created_at" field. func CreatedAtLTE(v time.Time) predicate.Bouncer { - return predicate.Bouncer(func(s *sql.Selector) { - s.Where(sql.LTE(s.C(FieldCreatedAt), v)) - }) + return predicate.Bouncer(sql.FieldLTE(FieldCreatedAt, v)) } // CreatedAtIsNil applies the IsNil predicate on the "created_at" field. func CreatedAtIsNil() predicate.Bouncer { - return predicate.Bouncer(func(s *sql.Selector) { - s.Where(sql.IsNull(s.C(FieldCreatedAt))) - }) + return predicate.Bouncer(sql.FieldIsNull(FieldCreatedAt)) } // CreatedAtNotNil applies the NotNil predicate on the "created_at" field. func CreatedAtNotNil() predicate.Bouncer { - return predicate.Bouncer(func(s *sql.Selector) { - s.Where(sql.NotNull(s.C(FieldCreatedAt))) - }) + return predicate.Bouncer(sql.FieldNotNull(FieldCreatedAt)) } // UpdatedAtEQ applies the EQ predicate on the "updated_at" field. func UpdatedAtEQ(v time.Time) predicate.Bouncer { - return predicate.Bouncer(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldUpdatedAt), v)) - }) + return predicate.Bouncer(sql.FieldEQ(FieldUpdatedAt, v)) } // UpdatedAtNEQ applies the NEQ predicate on the "updated_at" field. func UpdatedAtNEQ(v time.Time) predicate.Bouncer { - return predicate.Bouncer(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldUpdatedAt), v)) - }) + return predicate.Bouncer(sql.FieldNEQ(FieldUpdatedAt, v)) } // UpdatedAtIn applies the In predicate on the "updated_at" field. func UpdatedAtIn(vs ...time.Time) predicate.Bouncer { - v := make([]any, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Bouncer(func(s *sql.Selector) { - s.Where(sql.In(s.C(FieldUpdatedAt), v...)) - }) + return predicate.Bouncer(sql.FieldIn(FieldUpdatedAt, vs...)) } // UpdatedAtNotIn applies the NotIn predicate on the "updated_at" field. func UpdatedAtNotIn(vs ...time.Time) predicate.Bouncer { - v := make([]any, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Bouncer(func(s *sql.Selector) { - s.Where(sql.NotIn(s.C(FieldUpdatedAt), v...)) - }) + return predicate.Bouncer(sql.FieldNotIn(FieldUpdatedAt, vs...)) } // UpdatedAtGT applies the GT predicate on the "updated_at" field. func UpdatedAtGT(v time.Time) predicate.Bouncer { - return predicate.Bouncer(func(s *sql.Selector) { - s.Where(sql.GT(s.C(FieldUpdatedAt), v)) - }) + return predicate.Bouncer(sql.FieldGT(FieldUpdatedAt, v)) } // UpdatedAtGTE applies the GTE predicate on the "updated_at" field. func UpdatedAtGTE(v time.Time) predicate.Bouncer { - return predicate.Bouncer(func(s *sql.Selector) { - s.Where(sql.GTE(s.C(FieldUpdatedAt), v)) - }) + return predicate.Bouncer(sql.FieldGTE(FieldUpdatedAt, v)) } // UpdatedAtLT applies the LT predicate on the "updated_at" field. func UpdatedAtLT(v time.Time) predicate.Bouncer { - return predicate.Bouncer(func(s *sql.Selector) { - s.Where(sql.LT(s.C(FieldUpdatedAt), v)) - }) + return predicate.Bouncer(sql.FieldLT(FieldUpdatedAt, v)) } // UpdatedAtLTE applies the LTE predicate on the "updated_at" field. func UpdatedAtLTE(v time.Time) predicate.Bouncer { - return predicate.Bouncer(func(s *sql.Selector) { - s.Where(sql.LTE(s.C(FieldUpdatedAt), v)) - }) + return predicate.Bouncer(sql.FieldLTE(FieldUpdatedAt, v)) } // UpdatedAtIsNil applies the IsNil predicate on the "updated_at" field. func UpdatedAtIsNil() predicate.Bouncer { - return predicate.Bouncer(func(s *sql.Selector) { - s.Where(sql.IsNull(s.C(FieldUpdatedAt))) - }) + return predicate.Bouncer(sql.FieldIsNull(FieldUpdatedAt)) } // UpdatedAtNotNil applies the NotNil predicate on the "updated_at" field. func UpdatedAtNotNil() predicate.Bouncer { - return predicate.Bouncer(func(s *sql.Selector) { - s.Where(sql.NotNull(s.C(FieldUpdatedAt))) - }) + return predicate.Bouncer(sql.FieldNotNull(FieldUpdatedAt)) } // NameEQ applies the EQ predicate on the "name" field. func NameEQ(v string) predicate.Bouncer { - return predicate.Bouncer(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldName), v)) - }) + return predicate.Bouncer(sql.FieldEQ(FieldName, v)) } // NameNEQ applies the NEQ predicate on the "name" field. func NameNEQ(v string) predicate.Bouncer { - return predicate.Bouncer(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldName), v)) - }) + return predicate.Bouncer(sql.FieldNEQ(FieldName, v)) } // NameIn applies the In predicate on the "name" field. func NameIn(vs ...string) predicate.Bouncer { - v := make([]any, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Bouncer(func(s *sql.Selector) { - s.Where(sql.In(s.C(FieldName), v...)) - }) + return predicate.Bouncer(sql.FieldIn(FieldName, vs...)) } // NameNotIn applies the NotIn predicate on the "name" field. func NameNotIn(vs ...string) predicate.Bouncer { - v := make([]any, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Bouncer(func(s *sql.Selector) { - s.Where(sql.NotIn(s.C(FieldName), v...)) - }) + return predicate.Bouncer(sql.FieldNotIn(FieldName, vs...)) } // NameGT applies the GT predicate on the "name" field. func NameGT(v string) predicate.Bouncer { - return predicate.Bouncer(func(s *sql.Selector) { - s.Where(sql.GT(s.C(FieldName), v)) - }) + return predicate.Bouncer(sql.FieldGT(FieldName, v)) } // NameGTE applies the GTE predicate on the "name" field. func NameGTE(v string) predicate.Bouncer { - return predicate.Bouncer(func(s *sql.Selector) { - s.Where(sql.GTE(s.C(FieldName), v)) - }) + return predicate.Bouncer(sql.FieldGTE(FieldName, v)) } // NameLT applies the LT predicate on the "name" field. func NameLT(v string) predicate.Bouncer { - return predicate.Bouncer(func(s *sql.Selector) { - s.Where(sql.LT(s.C(FieldName), v)) - }) + return predicate.Bouncer(sql.FieldLT(FieldName, v)) } // NameLTE applies the LTE predicate on the "name" field. func NameLTE(v string) predicate.Bouncer { - return predicate.Bouncer(func(s *sql.Selector) { - s.Where(sql.LTE(s.C(FieldName), v)) - }) + return predicate.Bouncer(sql.FieldLTE(FieldName, v)) } // NameContains applies the Contains predicate on the "name" field. func NameContains(v string) predicate.Bouncer { - return predicate.Bouncer(func(s *sql.Selector) { - s.Where(sql.Contains(s.C(FieldName), v)) - }) + return predicate.Bouncer(sql.FieldContains(FieldName, v)) } // NameHasPrefix applies the HasPrefix predicate on the "name" field. func NameHasPrefix(v string) predicate.Bouncer { - return predicate.Bouncer(func(s *sql.Selector) { - s.Where(sql.HasPrefix(s.C(FieldName), v)) - }) + return predicate.Bouncer(sql.FieldHasPrefix(FieldName, v)) } // NameHasSuffix applies the HasSuffix predicate on the "name" field. func NameHasSuffix(v string) predicate.Bouncer { - return predicate.Bouncer(func(s *sql.Selector) { - s.Where(sql.HasSuffix(s.C(FieldName), v)) - }) + return predicate.Bouncer(sql.FieldHasSuffix(FieldName, v)) } // NameEqualFold applies the EqualFold predicate on the "name" field. func NameEqualFold(v string) predicate.Bouncer { - return predicate.Bouncer(func(s *sql.Selector) { - s.Where(sql.EqualFold(s.C(FieldName), v)) - }) + return predicate.Bouncer(sql.FieldEqualFold(FieldName, v)) } // NameContainsFold applies the ContainsFold predicate on the "name" field. func NameContainsFold(v string) predicate.Bouncer { - return predicate.Bouncer(func(s *sql.Selector) { - s.Where(sql.ContainsFold(s.C(FieldName), v)) - }) + return predicate.Bouncer(sql.FieldContainsFold(FieldName, v)) } // APIKeyEQ applies the EQ predicate on the "api_key" field. func APIKeyEQ(v string) predicate.Bouncer { - return predicate.Bouncer(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldAPIKey), v)) - }) + return predicate.Bouncer(sql.FieldEQ(FieldAPIKey, v)) } // APIKeyNEQ applies the NEQ predicate on the "api_key" field. func APIKeyNEQ(v string) predicate.Bouncer { - return predicate.Bouncer(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldAPIKey), v)) - }) + return predicate.Bouncer(sql.FieldNEQ(FieldAPIKey, v)) } // APIKeyIn applies the In predicate on the "api_key" field. func APIKeyIn(vs ...string) predicate.Bouncer { - v := make([]any, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Bouncer(func(s *sql.Selector) { - s.Where(sql.In(s.C(FieldAPIKey), v...)) - }) + return predicate.Bouncer(sql.FieldIn(FieldAPIKey, vs...)) } // APIKeyNotIn applies the NotIn predicate on the "api_key" field. func APIKeyNotIn(vs ...string) predicate.Bouncer { - v := make([]any, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Bouncer(func(s *sql.Selector) { - s.Where(sql.NotIn(s.C(FieldAPIKey), v...)) - }) + return predicate.Bouncer(sql.FieldNotIn(FieldAPIKey, vs...)) } // APIKeyGT applies the GT predicate on the "api_key" field. func APIKeyGT(v string) predicate.Bouncer { - return predicate.Bouncer(func(s *sql.Selector) { - s.Where(sql.GT(s.C(FieldAPIKey), v)) - }) + return predicate.Bouncer(sql.FieldGT(FieldAPIKey, v)) } // APIKeyGTE applies the GTE predicate on the "api_key" field. func APIKeyGTE(v string) predicate.Bouncer { - return predicate.Bouncer(func(s *sql.Selector) { - s.Where(sql.GTE(s.C(FieldAPIKey), v)) - }) + return predicate.Bouncer(sql.FieldGTE(FieldAPIKey, v)) } // APIKeyLT applies the LT predicate on the "api_key" field. func APIKeyLT(v string) predicate.Bouncer { - return predicate.Bouncer(func(s *sql.Selector) { - s.Where(sql.LT(s.C(FieldAPIKey), v)) - }) + return predicate.Bouncer(sql.FieldLT(FieldAPIKey, v)) } // APIKeyLTE applies the LTE predicate on the "api_key" field. func APIKeyLTE(v string) predicate.Bouncer { - return predicate.Bouncer(func(s *sql.Selector) { - s.Where(sql.LTE(s.C(FieldAPIKey), v)) - }) + return predicate.Bouncer(sql.FieldLTE(FieldAPIKey, v)) } // APIKeyContains applies the Contains predicate on the "api_key" field. func APIKeyContains(v string) predicate.Bouncer { - return predicate.Bouncer(func(s *sql.Selector) { - s.Where(sql.Contains(s.C(FieldAPIKey), v)) - }) + return predicate.Bouncer(sql.FieldContains(FieldAPIKey, v)) } // APIKeyHasPrefix applies the HasPrefix predicate on the "api_key" field. func APIKeyHasPrefix(v string) predicate.Bouncer { - return predicate.Bouncer(func(s *sql.Selector) { - s.Where(sql.HasPrefix(s.C(FieldAPIKey), v)) - }) + return predicate.Bouncer(sql.FieldHasPrefix(FieldAPIKey, v)) } // APIKeyHasSuffix applies the HasSuffix predicate on the "api_key" field. func APIKeyHasSuffix(v string) predicate.Bouncer { - return predicate.Bouncer(func(s *sql.Selector) { - s.Where(sql.HasSuffix(s.C(FieldAPIKey), v)) - }) + return predicate.Bouncer(sql.FieldHasSuffix(FieldAPIKey, v)) } // APIKeyEqualFold applies the EqualFold predicate on the "api_key" field. func APIKeyEqualFold(v string) predicate.Bouncer { - return predicate.Bouncer(func(s *sql.Selector) { - s.Where(sql.EqualFold(s.C(FieldAPIKey), v)) - }) + return predicate.Bouncer(sql.FieldEqualFold(FieldAPIKey, v)) } // APIKeyContainsFold applies the ContainsFold predicate on the "api_key" field. func APIKeyContainsFold(v string) predicate.Bouncer { - return predicate.Bouncer(func(s *sql.Selector) { - s.Where(sql.ContainsFold(s.C(FieldAPIKey), v)) - }) + return predicate.Bouncer(sql.FieldContainsFold(FieldAPIKey, v)) } // RevokedEQ applies the EQ predicate on the "revoked" field. func RevokedEQ(v bool) predicate.Bouncer { - return predicate.Bouncer(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldRevoked), v)) - }) + return predicate.Bouncer(sql.FieldEQ(FieldRevoked, v)) } // RevokedNEQ applies the NEQ predicate on the "revoked" field. func RevokedNEQ(v bool) predicate.Bouncer { - return predicate.Bouncer(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldRevoked), v)) - }) + return predicate.Bouncer(sql.FieldNEQ(FieldRevoked, v)) } // IPAddressEQ applies the EQ predicate on the "ip_address" field. func IPAddressEQ(v string) predicate.Bouncer { - return predicate.Bouncer(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldIPAddress), v)) - }) + return predicate.Bouncer(sql.FieldEQ(FieldIPAddress, v)) } // IPAddressNEQ applies the NEQ predicate on the "ip_address" field. func IPAddressNEQ(v string) predicate.Bouncer { - return predicate.Bouncer(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldIPAddress), v)) - }) + return predicate.Bouncer(sql.FieldNEQ(FieldIPAddress, v)) } // IPAddressIn applies the In predicate on the "ip_address" field. func IPAddressIn(vs ...string) predicate.Bouncer { - v := make([]any, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Bouncer(func(s *sql.Selector) { - s.Where(sql.In(s.C(FieldIPAddress), v...)) - }) + return predicate.Bouncer(sql.FieldIn(FieldIPAddress, vs...)) } // IPAddressNotIn applies the NotIn predicate on the "ip_address" field. func IPAddressNotIn(vs ...string) predicate.Bouncer { - v := make([]any, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Bouncer(func(s *sql.Selector) { - s.Where(sql.NotIn(s.C(FieldIPAddress), v...)) - }) + return predicate.Bouncer(sql.FieldNotIn(FieldIPAddress, vs...)) } // IPAddressGT applies the GT predicate on the "ip_address" field. func IPAddressGT(v string) predicate.Bouncer { - return predicate.Bouncer(func(s *sql.Selector) { - s.Where(sql.GT(s.C(FieldIPAddress), v)) - }) + return predicate.Bouncer(sql.FieldGT(FieldIPAddress, v)) } // IPAddressGTE applies the GTE predicate on the "ip_address" field. func IPAddressGTE(v string) predicate.Bouncer { - return predicate.Bouncer(func(s *sql.Selector) { - s.Where(sql.GTE(s.C(FieldIPAddress), v)) - }) + return predicate.Bouncer(sql.FieldGTE(FieldIPAddress, v)) } // IPAddressLT applies the LT predicate on the "ip_address" field. func IPAddressLT(v string) predicate.Bouncer { - return predicate.Bouncer(func(s *sql.Selector) { - s.Where(sql.LT(s.C(FieldIPAddress), v)) - }) + return predicate.Bouncer(sql.FieldLT(FieldIPAddress, v)) } // IPAddressLTE applies the LTE predicate on the "ip_address" field. func IPAddressLTE(v string) predicate.Bouncer { - return predicate.Bouncer(func(s *sql.Selector) { - s.Where(sql.LTE(s.C(FieldIPAddress), v)) - }) + return predicate.Bouncer(sql.FieldLTE(FieldIPAddress, v)) } // IPAddressContains applies the Contains predicate on the "ip_address" field. func IPAddressContains(v string) predicate.Bouncer { - return predicate.Bouncer(func(s *sql.Selector) { - s.Where(sql.Contains(s.C(FieldIPAddress), v)) - }) + return predicate.Bouncer(sql.FieldContains(FieldIPAddress, v)) } // IPAddressHasPrefix applies the HasPrefix predicate on the "ip_address" field. func IPAddressHasPrefix(v string) predicate.Bouncer { - return predicate.Bouncer(func(s *sql.Selector) { - s.Where(sql.HasPrefix(s.C(FieldIPAddress), v)) - }) + return predicate.Bouncer(sql.FieldHasPrefix(FieldIPAddress, v)) } // IPAddressHasSuffix applies the HasSuffix predicate on the "ip_address" field. func IPAddressHasSuffix(v string) predicate.Bouncer { - return predicate.Bouncer(func(s *sql.Selector) { - s.Where(sql.HasSuffix(s.C(FieldIPAddress), v)) - }) + return predicate.Bouncer(sql.FieldHasSuffix(FieldIPAddress, v)) } // IPAddressIsNil applies the IsNil predicate on the "ip_address" field. func IPAddressIsNil() predicate.Bouncer { - return predicate.Bouncer(func(s *sql.Selector) { - s.Where(sql.IsNull(s.C(FieldIPAddress))) - }) + return predicate.Bouncer(sql.FieldIsNull(FieldIPAddress)) } // IPAddressNotNil applies the NotNil predicate on the "ip_address" field. func IPAddressNotNil() predicate.Bouncer { - return predicate.Bouncer(func(s *sql.Selector) { - s.Where(sql.NotNull(s.C(FieldIPAddress))) - }) + return predicate.Bouncer(sql.FieldNotNull(FieldIPAddress)) } // IPAddressEqualFold applies the EqualFold predicate on the "ip_address" field. func IPAddressEqualFold(v string) predicate.Bouncer { - return predicate.Bouncer(func(s *sql.Selector) { - s.Where(sql.EqualFold(s.C(FieldIPAddress), v)) - }) + return predicate.Bouncer(sql.FieldEqualFold(FieldIPAddress, v)) } // IPAddressContainsFold applies the ContainsFold predicate on the "ip_address" field. func IPAddressContainsFold(v string) predicate.Bouncer { - return predicate.Bouncer(func(s *sql.Selector) { - s.Where(sql.ContainsFold(s.C(FieldIPAddress), v)) - }) + return predicate.Bouncer(sql.FieldContainsFold(FieldIPAddress, v)) } // TypeEQ applies the EQ predicate on the "type" field. func TypeEQ(v string) predicate.Bouncer { - return predicate.Bouncer(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldType), v)) - }) + return predicate.Bouncer(sql.FieldEQ(FieldType, v)) } // TypeNEQ applies the NEQ predicate on the "type" field. func TypeNEQ(v string) predicate.Bouncer { - return predicate.Bouncer(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldType), v)) - }) + return predicate.Bouncer(sql.FieldNEQ(FieldType, v)) } // TypeIn applies the In predicate on the "type" field. func TypeIn(vs ...string) predicate.Bouncer { - v := make([]any, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Bouncer(func(s *sql.Selector) { - s.Where(sql.In(s.C(FieldType), v...)) - }) + return predicate.Bouncer(sql.FieldIn(FieldType, vs...)) } // TypeNotIn applies the NotIn predicate on the "type" field. func TypeNotIn(vs ...string) predicate.Bouncer { - v := make([]any, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Bouncer(func(s *sql.Selector) { - s.Where(sql.NotIn(s.C(FieldType), v...)) - }) + return predicate.Bouncer(sql.FieldNotIn(FieldType, vs...)) } // TypeGT applies the GT predicate on the "type" field. func TypeGT(v string) predicate.Bouncer { - return predicate.Bouncer(func(s *sql.Selector) { - s.Where(sql.GT(s.C(FieldType), v)) - }) + return predicate.Bouncer(sql.FieldGT(FieldType, v)) } // TypeGTE applies the GTE predicate on the "type" field. func TypeGTE(v string) predicate.Bouncer { - return predicate.Bouncer(func(s *sql.Selector) { - s.Where(sql.GTE(s.C(FieldType), v)) - }) + return predicate.Bouncer(sql.FieldGTE(FieldType, v)) } // TypeLT applies the LT predicate on the "type" field. func TypeLT(v string) predicate.Bouncer { - return predicate.Bouncer(func(s *sql.Selector) { - s.Where(sql.LT(s.C(FieldType), v)) - }) + return predicate.Bouncer(sql.FieldLT(FieldType, v)) } // TypeLTE applies the LTE predicate on the "type" field. func TypeLTE(v string) predicate.Bouncer { - return predicate.Bouncer(func(s *sql.Selector) { - s.Where(sql.LTE(s.C(FieldType), v)) - }) + return predicate.Bouncer(sql.FieldLTE(FieldType, v)) } // TypeContains applies the Contains predicate on the "type" field. func TypeContains(v string) predicate.Bouncer { - return predicate.Bouncer(func(s *sql.Selector) { - s.Where(sql.Contains(s.C(FieldType), v)) - }) + return predicate.Bouncer(sql.FieldContains(FieldType, v)) } // TypeHasPrefix applies the HasPrefix predicate on the "type" field. func TypeHasPrefix(v string) predicate.Bouncer { - return predicate.Bouncer(func(s *sql.Selector) { - s.Where(sql.HasPrefix(s.C(FieldType), v)) - }) + return predicate.Bouncer(sql.FieldHasPrefix(FieldType, v)) } // TypeHasSuffix applies the HasSuffix predicate on the "type" field. func TypeHasSuffix(v string) predicate.Bouncer { - return predicate.Bouncer(func(s *sql.Selector) { - s.Where(sql.HasSuffix(s.C(FieldType), v)) - }) + return predicate.Bouncer(sql.FieldHasSuffix(FieldType, v)) } // TypeIsNil applies the IsNil predicate on the "type" field. func TypeIsNil() predicate.Bouncer { - return predicate.Bouncer(func(s *sql.Selector) { - s.Where(sql.IsNull(s.C(FieldType))) - }) + return predicate.Bouncer(sql.FieldIsNull(FieldType)) } // TypeNotNil applies the NotNil predicate on the "type" field. func TypeNotNil() predicate.Bouncer { - return predicate.Bouncer(func(s *sql.Selector) { - s.Where(sql.NotNull(s.C(FieldType))) - }) + return predicate.Bouncer(sql.FieldNotNull(FieldType)) } // TypeEqualFold applies the EqualFold predicate on the "type" field. func TypeEqualFold(v string) predicate.Bouncer { - return predicate.Bouncer(func(s *sql.Selector) { - s.Where(sql.EqualFold(s.C(FieldType), v)) - }) + return predicate.Bouncer(sql.FieldEqualFold(FieldType, v)) } // TypeContainsFold applies the ContainsFold predicate on the "type" field. func TypeContainsFold(v string) predicate.Bouncer { - return predicate.Bouncer(func(s *sql.Selector) { - s.Where(sql.ContainsFold(s.C(FieldType), v)) - }) + return predicate.Bouncer(sql.FieldContainsFold(FieldType, v)) } // VersionEQ applies the EQ predicate on the "version" field. func VersionEQ(v string) predicate.Bouncer { - return predicate.Bouncer(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldVersion), v)) - }) + return predicate.Bouncer(sql.FieldEQ(FieldVersion, v)) } // VersionNEQ applies the NEQ predicate on the "version" field. func VersionNEQ(v string) predicate.Bouncer { - return predicate.Bouncer(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldVersion), v)) - }) + return predicate.Bouncer(sql.FieldNEQ(FieldVersion, v)) } // VersionIn applies the In predicate on the "version" field. func VersionIn(vs ...string) predicate.Bouncer { - v := make([]any, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Bouncer(func(s *sql.Selector) { - s.Where(sql.In(s.C(FieldVersion), v...)) - }) + return predicate.Bouncer(sql.FieldIn(FieldVersion, vs...)) } // VersionNotIn applies the NotIn predicate on the "version" field. func VersionNotIn(vs ...string) predicate.Bouncer { - v := make([]any, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Bouncer(func(s *sql.Selector) { - s.Where(sql.NotIn(s.C(FieldVersion), v...)) - }) + return predicate.Bouncer(sql.FieldNotIn(FieldVersion, vs...)) } // VersionGT applies the GT predicate on the "version" field. func VersionGT(v string) predicate.Bouncer { - return predicate.Bouncer(func(s *sql.Selector) { - s.Where(sql.GT(s.C(FieldVersion), v)) - }) + return predicate.Bouncer(sql.FieldGT(FieldVersion, v)) } // VersionGTE applies the GTE predicate on the "version" field. func VersionGTE(v string) predicate.Bouncer { - return predicate.Bouncer(func(s *sql.Selector) { - s.Where(sql.GTE(s.C(FieldVersion), v)) - }) + return predicate.Bouncer(sql.FieldGTE(FieldVersion, v)) } // VersionLT applies the LT predicate on the "version" field. func VersionLT(v string) predicate.Bouncer { - return predicate.Bouncer(func(s *sql.Selector) { - s.Where(sql.LT(s.C(FieldVersion), v)) - }) + return predicate.Bouncer(sql.FieldLT(FieldVersion, v)) } // VersionLTE applies the LTE predicate on the "version" field. func VersionLTE(v string) predicate.Bouncer { - return predicate.Bouncer(func(s *sql.Selector) { - s.Where(sql.LTE(s.C(FieldVersion), v)) - }) + return predicate.Bouncer(sql.FieldLTE(FieldVersion, v)) } // VersionContains applies the Contains predicate on the "version" field. func VersionContains(v string) predicate.Bouncer { - return predicate.Bouncer(func(s *sql.Selector) { - s.Where(sql.Contains(s.C(FieldVersion), v)) - }) + return predicate.Bouncer(sql.FieldContains(FieldVersion, v)) } // VersionHasPrefix applies the HasPrefix predicate on the "version" field. func VersionHasPrefix(v string) predicate.Bouncer { - return predicate.Bouncer(func(s *sql.Selector) { - s.Where(sql.HasPrefix(s.C(FieldVersion), v)) - }) + return predicate.Bouncer(sql.FieldHasPrefix(FieldVersion, v)) } // VersionHasSuffix applies the HasSuffix predicate on the "version" field. func VersionHasSuffix(v string) predicate.Bouncer { - return predicate.Bouncer(func(s *sql.Selector) { - s.Where(sql.HasSuffix(s.C(FieldVersion), v)) - }) + return predicate.Bouncer(sql.FieldHasSuffix(FieldVersion, v)) } // VersionIsNil applies the IsNil predicate on the "version" field. func VersionIsNil() predicate.Bouncer { - return predicate.Bouncer(func(s *sql.Selector) { - s.Where(sql.IsNull(s.C(FieldVersion))) - }) + return predicate.Bouncer(sql.FieldIsNull(FieldVersion)) } // VersionNotNil applies the NotNil predicate on the "version" field. func VersionNotNil() predicate.Bouncer { - return predicate.Bouncer(func(s *sql.Selector) { - s.Where(sql.NotNull(s.C(FieldVersion))) - }) + return predicate.Bouncer(sql.FieldNotNull(FieldVersion)) } // VersionEqualFold applies the EqualFold predicate on the "version" field. func VersionEqualFold(v string) predicate.Bouncer { - return predicate.Bouncer(func(s *sql.Selector) { - s.Where(sql.EqualFold(s.C(FieldVersion), v)) - }) + return predicate.Bouncer(sql.FieldEqualFold(FieldVersion, v)) } // VersionContainsFold applies the ContainsFold predicate on the "version" field. func VersionContainsFold(v string) predicate.Bouncer { - return predicate.Bouncer(func(s *sql.Selector) { - s.Where(sql.ContainsFold(s.C(FieldVersion), v)) - }) + return predicate.Bouncer(sql.FieldContainsFold(FieldVersion, v)) } // UntilEQ applies the EQ predicate on the "until" field. func UntilEQ(v time.Time) predicate.Bouncer { - return predicate.Bouncer(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldUntil), v)) - }) + return predicate.Bouncer(sql.FieldEQ(FieldUntil, v)) } // UntilNEQ applies the NEQ predicate on the "until" field. func UntilNEQ(v time.Time) predicate.Bouncer { - return predicate.Bouncer(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldUntil), v)) - }) + return predicate.Bouncer(sql.FieldNEQ(FieldUntil, v)) } // UntilIn applies the In predicate on the "until" field. func UntilIn(vs ...time.Time) predicate.Bouncer { - v := make([]any, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Bouncer(func(s *sql.Selector) { - s.Where(sql.In(s.C(FieldUntil), v...)) - }) + return predicate.Bouncer(sql.FieldIn(FieldUntil, vs...)) } // UntilNotIn applies the NotIn predicate on the "until" field. func UntilNotIn(vs ...time.Time) predicate.Bouncer { - v := make([]any, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Bouncer(func(s *sql.Selector) { - s.Where(sql.NotIn(s.C(FieldUntil), v...)) - }) + return predicate.Bouncer(sql.FieldNotIn(FieldUntil, vs...)) } // UntilGT applies the GT predicate on the "until" field. func UntilGT(v time.Time) predicate.Bouncer { - return predicate.Bouncer(func(s *sql.Selector) { - s.Where(sql.GT(s.C(FieldUntil), v)) - }) + return predicate.Bouncer(sql.FieldGT(FieldUntil, v)) } // UntilGTE applies the GTE predicate on the "until" field. func UntilGTE(v time.Time) predicate.Bouncer { - return predicate.Bouncer(func(s *sql.Selector) { - s.Where(sql.GTE(s.C(FieldUntil), v)) - }) + return predicate.Bouncer(sql.FieldGTE(FieldUntil, v)) } // UntilLT applies the LT predicate on the "until" field. func UntilLT(v time.Time) predicate.Bouncer { - return predicate.Bouncer(func(s *sql.Selector) { - s.Where(sql.LT(s.C(FieldUntil), v)) - }) + return predicate.Bouncer(sql.FieldLT(FieldUntil, v)) } // UntilLTE applies the LTE predicate on the "until" field. func UntilLTE(v time.Time) predicate.Bouncer { - return predicate.Bouncer(func(s *sql.Selector) { - s.Where(sql.LTE(s.C(FieldUntil), v)) - }) + return predicate.Bouncer(sql.FieldLTE(FieldUntil, v)) } // UntilIsNil applies the IsNil predicate on the "until" field. func UntilIsNil() predicate.Bouncer { - return predicate.Bouncer(func(s *sql.Selector) { - s.Where(sql.IsNull(s.C(FieldUntil))) - }) + return predicate.Bouncer(sql.FieldIsNull(FieldUntil)) } // UntilNotNil applies the NotNil predicate on the "until" field. func UntilNotNil() predicate.Bouncer { - return predicate.Bouncer(func(s *sql.Selector) { - s.Where(sql.NotNull(s.C(FieldUntil))) - }) + return predicate.Bouncer(sql.FieldNotNull(FieldUntil)) } // LastPullEQ applies the EQ predicate on the "last_pull" field. func LastPullEQ(v time.Time) predicate.Bouncer { - return predicate.Bouncer(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldLastPull), v)) - }) + return predicate.Bouncer(sql.FieldEQ(FieldLastPull, v)) } // LastPullNEQ applies the NEQ predicate on the "last_pull" field. func LastPullNEQ(v time.Time) predicate.Bouncer { - return predicate.Bouncer(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldLastPull), v)) - }) + return predicate.Bouncer(sql.FieldNEQ(FieldLastPull, v)) } // LastPullIn applies the In predicate on the "last_pull" field. func LastPullIn(vs ...time.Time) predicate.Bouncer { - v := make([]any, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Bouncer(func(s *sql.Selector) { - s.Where(sql.In(s.C(FieldLastPull), v...)) - }) + return predicate.Bouncer(sql.FieldIn(FieldLastPull, vs...)) } // LastPullNotIn applies the NotIn predicate on the "last_pull" field. func LastPullNotIn(vs ...time.Time) predicate.Bouncer { - v := make([]any, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Bouncer(func(s *sql.Selector) { - s.Where(sql.NotIn(s.C(FieldLastPull), v...)) - }) + return predicate.Bouncer(sql.FieldNotIn(FieldLastPull, vs...)) } // LastPullGT applies the GT predicate on the "last_pull" field. func LastPullGT(v time.Time) predicate.Bouncer { - return predicate.Bouncer(func(s *sql.Selector) { - s.Where(sql.GT(s.C(FieldLastPull), v)) - }) + return predicate.Bouncer(sql.FieldGT(FieldLastPull, v)) } // LastPullGTE applies the GTE predicate on the "last_pull" field. func LastPullGTE(v time.Time) predicate.Bouncer { - return predicate.Bouncer(func(s *sql.Selector) { - s.Where(sql.GTE(s.C(FieldLastPull), v)) - }) + return predicate.Bouncer(sql.FieldGTE(FieldLastPull, v)) } // LastPullLT applies the LT predicate on the "last_pull" field. func LastPullLT(v time.Time) predicate.Bouncer { - return predicate.Bouncer(func(s *sql.Selector) { - s.Where(sql.LT(s.C(FieldLastPull), v)) - }) + return predicate.Bouncer(sql.FieldLT(FieldLastPull, v)) } // LastPullLTE applies the LTE predicate on the "last_pull" field. func LastPullLTE(v time.Time) predicate.Bouncer { - return predicate.Bouncer(func(s *sql.Selector) { - s.Where(sql.LTE(s.C(FieldLastPull), v)) - }) + return predicate.Bouncer(sql.FieldLTE(FieldLastPull, v)) } // AuthTypeEQ applies the EQ predicate on the "auth_type" field. func AuthTypeEQ(v string) predicate.Bouncer { - return predicate.Bouncer(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldAuthType), v)) - }) + return predicate.Bouncer(sql.FieldEQ(FieldAuthType, v)) } // AuthTypeNEQ applies the NEQ predicate on the "auth_type" field. func AuthTypeNEQ(v string) predicate.Bouncer { - return predicate.Bouncer(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldAuthType), v)) - }) + return predicate.Bouncer(sql.FieldNEQ(FieldAuthType, v)) } // AuthTypeIn applies the In predicate on the "auth_type" field. func AuthTypeIn(vs ...string) predicate.Bouncer { - v := make([]any, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Bouncer(func(s *sql.Selector) { - s.Where(sql.In(s.C(FieldAuthType), v...)) - }) + return predicate.Bouncer(sql.FieldIn(FieldAuthType, vs...)) } // AuthTypeNotIn applies the NotIn predicate on the "auth_type" field. func AuthTypeNotIn(vs ...string) predicate.Bouncer { - v := make([]any, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Bouncer(func(s *sql.Selector) { - s.Where(sql.NotIn(s.C(FieldAuthType), v...)) - }) + return predicate.Bouncer(sql.FieldNotIn(FieldAuthType, vs...)) } // AuthTypeGT applies the GT predicate on the "auth_type" field. func AuthTypeGT(v string) predicate.Bouncer { - return predicate.Bouncer(func(s *sql.Selector) { - s.Where(sql.GT(s.C(FieldAuthType), v)) - }) + return predicate.Bouncer(sql.FieldGT(FieldAuthType, v)) } // AuthTypeGTE applies the GTE predicate on the "auth_type" field. func AuthTypeGTE(v string) predicate.Bouncer { - return predicate.Bouncer(func(s *sql.Selector) { - s.Where(sql.GTE(s.C(FieldAuthType), v)) - }) + return predicate.Bouncer(sql.FieldGTE(FieldAuthType, v)) } // AuthTypeLT applies the LT predicate on the "auth_type" field. func AuthTypeLT(v string) predicate.Bouncer { - return predicate.Bouncer(func(s *sql.Selector) { - s.Where(sql.LT(s.C(FieldAuthType), v)) - }) + return predicate.Bouncer(sql.FieldLT(FieldAuthType, v)) } // AuthTypeLTE applies the LTE predicate on the "auth_type" field. func AuthTypeLTE(v string) predicate.Bouncer { - return predicate.Bouncer(func(s *sql.Selector) { - s.Where(sql.LTE(s.C(FieldAuthType), v)) - }) + return predicate.Bouncer(sql.FieldLTE(FieldAuthType, v)) } // AuthTypeContains applies the Contains predicate on the "auth_type" field. func AuthTypeContains(v string) predicate.Bouncer { - return predicate.Bouncer(func(s *sql.Selector) { - s.Where(sql.Contains(s.C(FieldAuthType), v)) - }) + return predicate.Bouncer(sql.FieldContains(FieldAuthType, v)) } // AuthTypeHasPrefix applies the HasPrefix predicate on the "auth_type" field. func AuthTypeHasPrefix(v string) predicate.Bouncer { - return predicate.Bouncer(func(s *sql.Selector) { - s.Where(sql.HasPrefix(s.C(FieldAuthType), v)) - }) + return predicate.Bouncer(sql.FieldHasPrefix(FieldAuthType, v)) } // AuthTypeHasSuffix applies the HasSuffix predicate on the "auth_type" field. func AuthTypeHasSuffix(v string) predicate.Bouncer { - return predicate.Bouncer(func(s *sql.Selector) { - s.Where(sql.HasSuffix(s.C(FieldAuthType), v)) - }) + return predicate.Bouncer(sql.FieldHasSuffix(FieldAuthType, v)) } // AuthTypeEqualFold applies the EqualFold predicate on the "auth_type" field. func AuthTypeEqualFold(v string) predicate.Bouncer { - return predicate.Bouncer(func(s *sql.Selector) { - s.Where(sql.EqualFold(s.C(FieldAuthType), v)) - }) + return predicate.Bouncer(sql.FieldEqualFold(FieldAuthType, v)) } // AuthTypeContainsFold applies the ContainsFold predicate on the "auth_type" field. func AuthTypeContainsFold(v string) predicate.Bouncer { - return predicate.Bouncer(func(s *sql.Selector) { - s.Where(sql.ContainsFold(s.C(FieldAuthType), v)) - }) + return predicate.Bouncer(sql.FieldContainsFold(FieldAuthType, v)) } // And groups predicates with the AND operator between them. func And(predicates ...predicate.Bouncer) predicate.Bouncer { - return predicate.Bouncer(func(s *sql.Selector) { - s1 := s.Clone().SetP(nil) - for _, p := range predicates { - p(s1) - } - s.Where(s1.P()) - }) + return predicate.Bouncer(sql.AndPredicates(predicates...)) } // Or groups predicates with the OR operator between them. func Or(predicates ...predicate.Bouncer) predicate.Bouncer { - return predicate.Bouncer(func(s *sql.Selector) { - s1 := s.Clone().SetP(nil) - for i, p := range predicates { - if i > 0 { - s1.Or() - } - p(s1) - } - s.Where(s1.P()) - }) + return predicate.Bouncer(sql.OrPredicates(predicates...)) } // Not applies the not operator on the given predicate. func Not(p predicate.Bouncer) predicate.Bouncer { - return predicate.Bouncer(func(s *sql.Selector) { - p(s.Not()) - }) + return predicate.Bouncer(sql.NotPredicates(p)) } diff --git a/pkg/database/ent/bouncer_create.go b/pkg/database/ent/bouncer_create.go index 685ce089d1e..3d08277dcfb 100644 --- a/pkg/database/ent/bouncer_create.go +++ b/pkg/database/ent/bouncer_create.go @@ -157,50 +157,8 @@ func (bc *BouncerCreate) Mutation() *BouncerMutation { // Save creates the Bouncer in the database. func (bc *BouncerCreate) Save(ctx context.Context) (*Bouncer, error) { - var ( - err error - node *Bouncer - ) bc.defaults() - if len(bc.hooks) == 0 { - if err = bc.check(); err != nil { - return nil, err - } - node, err = bc.sqlSave(ctx) - } else { - var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { - mutation, ok := m.(*BouncerMutation) - if !ok { - return nil, fmt.Errorf("unexpected mutation type %T", m) - } - if err = bc.check(); err != nil { - return nil, err - } - bc.mutation = mutation - if node, err = bc.sqlSave(ctx); err != nil { - return nil, err - } - mutation.id = &node.ID - mutation.done = true - return node, err - }) - for i := len(bc.hooks) - 1; i >= 0; i-- { - if bc.hooks[i] == nil { - return nil, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)") - } - mut = bc.hooks[i](mut) - } - v, err := mut.Mutate(ctx, bc.mutation) - if err != nil { - return nil, err - } - nv, ok := v.(*Bouncer) - if !ok { - return nil, fmt.Errorf("unexpected node type %T returned from BouncerMutation", v) - } - node = nv - } - return node, err + return withHooks(ctx, bc.sqlSave, bc.mutation, bc.hooks) } // SaveX calls Save and panics if Save returns an error. @@ -274,6 +232,9 @@ func (bc *BouncerCreate) check() error { } func (bc *BouncerCreate) sqlSave(ctx context.Context) (*Bouncer, error) { + if err := bc.check(); err != nil { + return nil, err + } _node, _spec := bc.createSpec() if err := sqlgraph.CreateNode(ctx, bc.driver, _spec); err != nil { if sqlgraph.IsConstraintError(err) { @@ -283,106 +244,58 @@ func (bc *BouncerCreate) sqlSave(ctx context.Context) (*Bouncer, error) { } id := _spec.ID.Value.(int64) _node.ID = int(id) + bc.mutation.id = &_node.ID + bc.mutation.done = true return _node, nil } func (bc *BouncerCreate) createSpec() (*Bouncer, *sqlgraph.CreateSpec) { var ( _node = &Bouncer{config: bc.config} - _spec = &sqlgraph.CreateSpec{ - Table: bouncer.Table, - ID: &sqlgraph.FieldSpec{ - Type: field.TypeInt, - Column: bouncer.FieldID, - }, - } + _spec = sqlgraph.NewCreateSpec(bouncer.Table, sqlgraph.NewFieldSpec(bouncer.FieldID, field.TypeInt)) ) if value, ok := bc.mutation.CreatedAt(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeTime, - Value: value, - Column: bouncer.FieldCreatedAt, - }) + _spec.SetField(bouncer.FieldCreatedAt, field.TypeTime, value) _node.CreatedAt = &value } if value, ok := bc.mutation.UpdatedAt(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeTime, - Value: value, - Column: bouncer.FieldUpdatedAt, - }) + _spec.SetField(bouncer.FieldUpdatedAt, field.TypeTime, value) _node.UpdatedAt = &value } if value, ok := bc.mutation.Name(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: bouncer.FieldName, - }) + _spec.SetField(bouncer.FieldName, field.TypeString, value) _node.Name = value } if value, ok := bc.mutation.APIKey(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: bouncer.FieldAPIKey, - }) + _spec.SetField(bouncer.FieldAPIKey, field.TypeString, value) _node.APIKey = value } if value, ok := bc.mutation.Revoked(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeBool, - Value: value, - Column: bouncer.FieldRevoked, - }) + _spec.SetField(bouncer.FieldRevoked, field.TypeBool, value) _node.Revoked = value } if value, ok := bc.mutation.IPAddress(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: bouncer.FieldIPAddress, - }) + _spec.SetField(bouncer.FieldIPAddress, field.TypeString, value) _node.IPAddress = value } if value, ok := bc.mutation.GetType(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: bouncer.FieldType, - }) + _spec.SetField(bouncer.FieldType, field.TypeString, value) _node.Type = value } if value, ok := bc.mutation.Version(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: bouncer.FieldVersion, - }) + _spec.SetField(bouncer.FieldVersion, field.TypeString, value) _node.Version = value } if value, ok := bc.mutation.Until(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeTime, - Value: value, - Column: bouncer.FieldUntil, - }) + _spec.SetField(bouncer.FieldUntil, field.TypeTime, value) _node.Until = value } if value, ok := bc.mutation.LastPull(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeTime, - Value: value, - Column: bouncer.FieldLastPull, - }) + _spec.SetField(bouncer.FieldLastPull, field.TypeTime, value) _node.LastPull = value } if value, ok := bc.mutation.AuthType(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: bouncer.FieldAuthType, - }) + _spec.SetField(bouncer.FieldAuthType, field.TypeString, value) _node.AuthType = value } return _node, _spec @@ -391,11 +304,15 @@ func (bc *BouncerCreate) createSpec() (*Bouncer, *sqlgraph.CreateSpec) { // BouncerCreateBulk is the builder for creating many Bouncer entities in bulk. type BouncerCreateBulk struct { config + err error builders []*BouncerCreate } // Save creates the Bouncer entities in the database. func (bcb *BouncerCreateBulk) Save(ctx context.Context) ([]*Bouncer, error) { + if bcb.err != nil { + return nil, bcb.err + } specs := make([]*sqlgraph.CreateSpec, len(bcb.builders)) nodes := make([]*Bouncer, len(bcb.builders)) mutators := make([]Mutator, len(bcb.builders)) @@ -412,8 +329,8 @@ func (bcb *BouncerCreateBulk) Save(ctx context.Context) ([]*Bouncer, error) { return nil, err } builder.mutation = mutation - nodes[i], specs[i] = builder.createSpec() var err error + nodes[i], specs[i] = builder.createSpec() if i < len(mutators)-1 { _, err = mutators[i+1].Mutate(root, bcb.builders[i+1].mutation) } else { diff --git a/pkg/database/ent/bouncer_delete.go b/pkg/database/ent/bouncer_delete.go index 6bfb9459190..bf459e77e28 100644 --- a/pkg/database/ent/bouncer_delete.go +++ b/pkg/database/ent/bouncer_delete.go @@ -4,7 +4,6 @@ package ent import ( "context" - "fmt" "entgo.io/ent/dialect/sql" "entgo.io/ent/dialect/sql/sqlgraph" @@ -28,34 +27,7 @@ func (bd *BouncerDelete) Where(ps ...predicate.Bouncer) *BouncerDelete { // Exec executes the deletion query and returns how many vertices were deleted. func (bd *BouncerDelete) Exec(ctx context.Context) (int, error) { - var ( - err error - affected int - ) - if len(bd.hooks) == 0 { - affected, err = bd.sqlExec(ctx) - } else { - var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { - mutation, ok := m.(*BouncerMutation) - if !ok { - return nil, fmt.Errorf("unexpected mutation type %T", m) - } - bd.mutation = mutation - affected, err = bd.sqlExec(ctx) - mutation.done = true - return affected, err - }) - for i := len(bd.hooks) - 1; i >= 0; i-- { - if bd.hooks[i] == nil { - return 0, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)") - } - mut = bd.hooks[i](mut) - } - if _, err := mut.Mutate(ctx, bd.mutation); err != nil { - return 0, err - } - } - return affected, err + return withHooks(ctx, bd.sqlExec, bd.mutation, bd.hooks) } // ExecX is like Exec, but panics if an error occurs. @@ -68,15 +40,7 @@ func (bd *BouncerDelete) ExecX(ctx context.Context) int { } func (bd *BouncerDelete) sqlExec(ctx context.Context) (int, error) { - _spec := &sqlgraph.DeleteSpec{ - Node: &sqlgraph.NodeSpec{ - Table: bouncer.Table, - ID: &sqlgraph.FieldSpec{ - Type: field.TypeInt, - Column: bouncer.FieldID, - }, - }, - } + _spec := sqlgraph.NewDeleteSpec(bouncer.Table, sqlgraph.NewFieldSpec(bouncer.FieldID, field.TypeInt)) if ps := bd.mutation.predicates; len(ps) > 0 { _spec.Predicate = func(selector *sql.Selector) { for i := range ps { @@ -88,6 +52,7 @@ func (bd *BouncerDelete) sqlExec(ctx context.Context) (int, error) { if err != nil && sqlgraph.IsConstraintError(err) { err = &ConstraintError{msg: err.Error(), wrap: err} } + bd.mutation.done = true return affected, err } @@ -96,6 +61,12 @@ type BouncerDeleteOne struct { bd *BouncerDelete } +// Where appends a list predicates to the BouncerDelete builder. +func (bdo *BouncerDeleteOne) Where(ps ...predicate.Bouncer) *BouncerDeleteOne { + bdo.bd.mutation.Where(ps...) + return bdo +} + // Exec executes the deletion query. func (bdo *BouncerDeleteOne) Exec(ctx context.Context) error { n, err := bdo.bd.Exec(ctx) @@ -111,5 +82,7 @@ func (bdo *BouncerDeleteOne) Exec(ctx context.Context) error { // ExecX is like Exec, but panics if an error occurs. func (bdo *BouncerDeleteOne) ExecX(ctx context.Context) { - bdo.bd.ExecX(ctx) + if err := bdo.Exec(ctx); err != nil { + panic(err) + } } diff --git a/pkg/database/ent/bouncer_query.go b/pkg/database/ent/bouncer_query.go index 2747a3e0b3a..ea2b7495733 100644 --- a/pkg/database/ent/bouncer_query.go +++ b/pkg/database/ent/bouncer_query.go @@ -17,11 +17,9 @@ import ( // BouncerQuery is the builder for querying Bouncer entities. type BouncerQuery struct { config - limit *int - offset *int - unique *bool - order []OrderFunc - fields []string + ctx *QueryContext + order []bouncer.OrderOption + inters []Interceptor predicates []predicate.Bouncer // intermediate query (i.e. traversal path). sql *sql.Selector @@ -34,27 +32,27 @@ func (bq *BouncerQuery) Where(ps ...predicate.Bouncer) *BouncerQuery { return bq } -// Limit adds a limit step to the query. +// Limit the number of records to be returned by this query. func (bq *BouncerQuery) Limit(limit int) *BouncerQuery { - bq.limit = &limit + bq.ctx.Limit = &limit return bq } -// Offset adds an offset step to the query. +// Offset to start from. func (bq *BouncerQuery) Offset(offset int) *BouncerQuery { - bq.offset = &offset + bq.ctx.Offset = &offset return bq } // Unique configures the query builder to filter duplicate records on query. // By default, unique is set to true, and can be disabled using this method. func (bq *BouncerQuery) Unique(unique bool) *BouncerQuery { - bq.unique = &unique + bq.ctx.Unique = &unique return bq } -// Order adds an order step to the query. -func (bq *BouncerQuery) Order(o ...OrderFunc) *BouncerQuery { +// Order specifies how the records should be ordered. +func (bq *BouncerQuery) Order(o ...bouncer.OrderOption) *BouncerQuery { bq.order = append(bq.order, o...) return bq } @@ -62,7 +60,7 @@ func (bq *BouncerQuery) Order(o ...OrderFunc) *BouncerQuery { // First returns the first Bouncer entity from the query. // Returns a *NotFoundError when no Bouncer was found. func (bq *BouncerQuery) First(ctx context.Context) (*Bouncer, error) { - nodes, err := bq.Limit(1).All(ctx) + nodes, err := bq.Limit(1).All(setContextOp(ctx, bq.ctx, "First")) if err != nil { return nil, err } @@ -85,7 +83,7 @@ func (bq *BouncerQuery) FirstX(ctx context.Context) *Bouncer { // Returns a *NotFoundError when no Bouncer ID was found. func (bq *BouncerQuery) FirstID(ctx context.Context) (id int, err error) { var ids []int - if ids, err = bq.Limit(1).IDs(ctx); err != nil { + if ids, err = bq.Limit(1).IDs(setContextOp(ctx, bq.ctx, "FirstID")); err != nil { return } if len(ids) == 0 { @@ -108,7 +106,7 @@ func (bq *BouncerQuery) FirstIDX(ctx context.Context) int { // Returns a *NotSingularError when more than one Bouncer entity is found. // Returns a *NotFoundError when no Bouncer entities are found. func (bq *BouncerQuery) Only(ctx context.Context) (*Bouncer, error) { - nodes, err := bq.Limit(2).All(ctx) + nodes, err := bq.Limit(2).All(setContextOp(ctx, bq.ctx, "Only")) if err != nil { return nil, err } @@ -136,7 +134,7 @@ func (bq *BouncerQuery) OnlyX(ctx context.Context) *Bouncer { // Returns a *NotFoundError when no entities are found. func (bq *BouncerQuery) OnlyID(ctx context.Context) (id int, err error) { var ids []int - if ids, err = bq.Limit(2).IDs(ctx); err != nil { + if ids, err = bq.Limit(2).IDs(setContextOp(ctx, bq.ctx, "OnlyID")); err != nil { return } switch len(ids) { @@ -161,10 +159,12 @@ func (bq *BouncerQuery) OnlyIDX(ctx context.Context) int { // All executes the query and returns a list of Bouncers. func (bq *BouncerQuery) All(ctx context.Context) ([]*Bouncer, error) { + ctx = setContextOp(ctx, bq.ctx, "All") if err := bq.prepareQuery(ctx); err != nil { return nil, err } - return bq.sqlAll(ctx) + qr := querierAll[[]*Bouncer, *BouncerQuery]() + return withInterceptors[[]*Bouncer](ctx, bq, qr, bq.inters) } // AllX is like All, but panics if an error occurs. @@ -177,9 +177,12 @@ func (bq *BouncerQuery) AllX(ctx context.Context) []*Bouncer { } // IDs executes the query and returns a list of Bouncer IDs. -func (bq *BouncerQuery) IDs(ctx context.Context) ([]int, error) { - var ids []int - if err := bq.Select(bouncer.FieldID).Scan(ctx, &ids); err != nil { +func (bq *BouncerQuery) IDs(ctx context.Context) (ids []int, err error) { + if bq.ctx.Unique == nil && bq.path != nil { + bq.Unique(true) + } + ctx = setContextOp(ctx, bq.ctx, "IDs") + if err = bq.Select(bouncer.FieldID).Scan(ctx, &ids); err != nil { return nil, err } return ids, nil @@ -196,10 +199,11 @@ func (bq *BouncerQuery) IDsX(ctx context.Context) []int { // Count returns the count of the given query. func (bq *BouncerQuery) Count(ctx context.Context) (int, error) { + ctx = setContextOp(ctx, bq.ctx, "Count") if err := bq.prepareQuery(ctx); err != nil { return 0, err } - return bq.sqlCount(ctx) + return withInterceptors[int](ctx, bq, querierCount[*BouncerQuery](), bq.inters) } // CountX is like Count, but panics if an error occurs. @@ -213,10 +217,15 @@ func (bq *BouncerQuery) CountX(ctx context.Context) int { // Exist returns true if the query has elements in the graph. func (bq *BouncerQuery) Exist(ctx context.Context) (bool, error) { - if err := bq.prepareQuery(ctx); err != nil { - return false, err + ctx = setContextOp(ctx, bq.ctx, "Exist") + switch _, err := bq.FirstID(ctx); { + case IsNotFound(err): + return false, nil + case err != nil: + return false, fmt.Errorf("ent: check existence: %w", err) + default: + return true, nil } - return bq.sqlExist(ctx) } // ExistX is like Exist, but panics if an error occurs. @@ -236,14 +245,13 @@ func (bq *BouncerQuery) Clone() *BouncerQuery { } return &BouncerQuery{ config: bq.config, - limit: bq.limit, - offset: bq.offset, - order: append([]OrderFunc{}, bq.order...), + ctx: bq.ctx.Clone(), + order: append([]bouncer.OrderOption{}, bq.order...), + inters: append([]Interceptor{}, bq.inters...), predicates: append([]predicate.Bouncer{}, bq.predicates...), // clone intermediate query. - sql: bq.sql.Clone(), - path: bq.path, - unique: bq.unique, + sql: bq.sql.Clone(), + path: bq.path, } } @@ -262,16 +270,11 @@ func (bq *BouncerQuery) Clone() *BouncerQuery { // Aggregate(ent.Count()). // Scan(ctx, &v) func (bq *BouncerQuery) GroupBy(field string, fields ...string) *BouncerGroupBy { - grbuild := &BouncerGroupBy{config: bq.config} - grbuild.fields = append([]string{field}, fields...) - grbuild.path = func(ctx context.Context) (prev *sql.Selector, err error) { - if err := bq.prepareQuery(ctx); err != nil { - return nil, err - } - return bq.sqlQuery(ctx), nil - } + bq.ctx.Fields = append([]string{field}, fields...) + grbuild := &BouncerGroupBy{build: bq} + grbuild.flds = &bq.ctx.Fields grbuild.label = bouncer.Label - grbuild.flds, grbuild.scan = &grbuild.fields, grbuild.Scan + grbuild.scan = grbuild.Scan return grbuild } @@ -288,15 +291,30 @@ func (bq *BouncerQuery) GroupBy(field string, fields ...string) *BouncerGroupBy // Select(bouncer.FieldCreatedAt). // Scan(ctx, &v) func (bq *BouncerQuery) Select(fields ...string) *BouncerSelect { - bq.fields = append(bq.fields, fields...) - selbuild := &BouncerSelect{BouncerQuery: bq} - selbuild.label = bouncer.Label - selbuild.flds, selbuild.scan = &bq.fields, selbuild.Scan - return selbuild + bq.ctx.Fields = append(bq.ctx.Fields, fields...) + sbuild := &BouncerSelect{BouncerQuery: bq} + sbuild.label = bouncer.Label + sbuild.flds, sbuild.scan = &bq.ctx.Fields, sbuild.Scan + return sbuild +} + +// Aggregate returns a BouncerSelect configured with the given aggregations. +func (bq *BouncerQuery) Aggregate(fns ...AggregateFunc) *BouncerSelect { + return bq.Select().Aggregate(fns...) } func (bq *BouncerQuery) prepareQuery(ctx context.Context) error { - for _, f := range bq.fields { + for _, inter := range bq.inters { + if inter == nil { + return fmt.Errorf("ent: uninitialized interceptor (forgotten import ent/runtime?)") + } + if trv, ok := inter.(Traverser); ok { + if err := trv.Traverse(ctx, bq); err != nil { + return err + } + } + } + for _, f := range bq.ctx.Fields { if !bouncer.ValidColumn(f) { return &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)} } @@ -338,41 +356,22 @@ func (bq *BouncerQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*Boun func (bq *BouncerQuery) sqlCount(ctx context.Context) (int, error) { _spec := bq.querySpec() - _spec.Node.Columns = bq.fields - if len(bq.fields) > 0 { - _spec.Unique = bq.unique != nil && *bq.unique + _spec.Node.Columns = bq.ctx.Fields + if len(bq.ctx.Fields) > 0 { + _spec.Unique = bq.ctx.Unique != nil && *bq.ctx.Unique } return sqlgraph.CountNodes(ctx, bq.driver, _spec) } -func (bq *BouncerQuery) sqlExist(ctx context.Context) (bool, error) { - switch _, err := bq.FirstID(ctx); { - case IsNotFound(err): - return false, nil - case err != nil: - return false, fmt.Errorf("ent: check existence: %w", err) - default: - return true, nil - } -} - func (bq *BouncerQuery) querySpec() *sqlgraph.QuerySpec { - _spec := &sqlgraph.QuerySpec{ - Node: &sqlgraph.NodeSpec{ - Table: bouncer.Table, - Columns: bouncer.Columns, - ID: &sqlgraph.FieldSpec{ - Type: field.TypeInt, - Column: bouncer.FieldID, - }, - }, - From: bq.sql, - Unique: true, - } - if unique := bq.unique; unique != nil { + _spec := sqlgraph.NewQuerySpec(bouncer.Table, bouncer.Columns, sqlgraph.NewFieldSpec(bouncer.FieldID, field.TypeInt)) + _spec.From = bq.sql + if unique := bq.ctx.Unique; unique != nil { _spec.Unique = *unique + } else if bq.path != nil { + _spec.Unique = true } - if fields := bq.fields; len(fields) > 0 { + if fields := bq.ctx.Fields; len(fields) > 0 { _spec.Node.Columns = make([]string, 0, len(fields)) _spec.Node.Columns = append(_spec.Node.Columns, bouncer.FieldID) for i := range fields { @@ -388,10 +387,10 @@ func (bq *BouncerQuery) querySpec() *sqlgraph.QuerySpec { } } } - if limit := bq.limit; limit != nil { + if limit := bq.ctx.Limit; limit != nil { _spec.Limit = *limit } - if offset := bq.offset; offset != nil { + if offset := bq.ctx.Offset; offset != nil { _spec.Offset = *offset } if ps := bq.order; len(ps) > 0 { @@ -407,7 +406,7 @@ func (bq *BouncerQuery) querySpec() *sqlgraph.QuerySpec { func (bq *BouncerQuery) sqlQuery(ctx context.Context) *sql.Selector { builder := sql.Dialect(bq.driver.Dialect()) t1 := builder.Table(bouncer.Table) - columns := bq.fields + columns := bq.ctx.Fields if len(columns) == 0 { columns = bouncer.Columns } @@ -416,7 +415,7 @@ func (bq *BouncerQuery) sqlQuery(ctx context.Context) *sql.Selector { selector = bq.sql selector.Select(selector.Columns(columns...)...) } - if bq.unique != nil && *bq.unique { + if bq.ctx.Unique != nil && *bq.ctx.Unique { selector.Distinct() } for _, p := range bq.predicates { @@ -425,12 +424,12 @@ func (bq *BouncerQuery) sqlQuery(ctx context.Context) *sql.Selector { for _, p := range bq.order { p(selector) } - if offset := bq.offset; offset != nil { + if offset := bq.ctx.Offset; offset != nil { // limit is mandatory for offset clause. We start // with default value, and override it below if needed. selector.Offset(*offset).Limit(math.MaxInt32) } - if limit := bq.limit; limit != nil { + if limit := bq.ctx.Limit; limit != nil { selector.Limit(*limit) } return selector @@ -438,13 +437,8 @@ func (bq *BouncerQuery) sqlQuery(ctx context.Context) *sql.Selector { // BouncerGroupBy is the group-by builder for Bouncer entities. type BouncerGroupBy struct { - config selector - fields []string - fns []AggregateFunc - // intermediate query (i.e. traversal path). - sql *sql.Selector - path func(context.Context) (*sql.Selector, error) + build *BouncerQuery } // Aggregate adds the given aggregation functions to the group-by query. @@ -453,74 +447,77 @@ func (bgb *BouncerGroupBy) Aggregate(fns ...AggregateFunc) *BouncerGroupBy { return bgb } -// Scan applies the group-by query and scans the result into the given value. +// Scan applies the selector query and scans the result into the given value. func (bgb *BouncerGroupBy) Scan(ctx context.Context, v any) error { - query, err := bgb.path(ctx) - if err != nil { + ctx = setContextOp(ctx, bgb.build.ctx, "GroupBy") + if err := bgb.build.prepareQuery(ctx); err != nil { return err } - bgb.sql = query - return bgb.sqlScan(ctx, v) + return scanWithInterceptors[*BouncerQuery, *BouncerGroupBy](ctx, bgb.build, bgb, bgb.build.inters, v) } -func (bgb *BouncerGroupBy) sqlScan(ctx context.Context, v any) error { - for _, f := range bgb.fields { - if !bouncer.ValidColumn(f) { - return &ValidationError{Name: f, err: fmt.Errorf("invalid field %q for group-by", f)} +func (bgb *BouncerGroupBy) sqlScan(ctx context.Context, root *BouncerQuery, v any) error { + selector := root.sqlQuery(ctx).Select() + aggregation := make([]string, 0, len(bgb.fns)) + for _, fn := range bgb.fns { + aggregation = append(aggregation, fn(selector)) + } + if len(selector.SelectedColumns()) == 0 { + columns := make([]string, 0, len(*bgb.flds)+len(bgb.fns)) + for _, f := range *bgb.flds { + columns = append(columns, selector.C(f)) } + columns = append(columns, aggregation...) + selector.Select(columns...) } - selector := bgb.sqlQuery() + selector.GroupBy(selector.Columns(*bgb.flds...)...) if err := selector.Err(); err != nil { return err } rows := &sql.Rows{} query, args := selector.Query() - if err := bgb.driver.Query(ctx, query, args, rows); err != nil { + if err := bgb.build.driver.Query(ctx, query, args, rows); err != nil { return err } defer rows.Close() return sql.ScanSlice(rows, v) } -func (bgb *BouncerGroupBy) sqlQuery() *sql.Selector { - selector := bgb.sql.Select() - aggregation := make([]string, 0, len(bgb.fns)) - for _, fn := range bgb.fns { - aggregation = append(aggregation, fn(selector)) - } - // If no columns were selected in a custom aggregation function, the default - // selection is the fields used for "group-by", and the aggregation functions. - if len(selector.SelectedColumns()) == 0 { - columns := make([]string, 0, len(bgb.fields)+len(bgb.fns)) - for _, f := range bgb.fields { - columns = append(columns, selector.C(f)) - } - columns = append(columns, aggregation...) - selector.Select(columns...) - } - return selector.GroupBy(selector.Columns(bgb.fields...)...) -} - // BouncerSelect is the builder for selecting fields of Bouncer entities. type BouncerSelect struct { *BouncerQuery selector - // intermediate query (i.e. traversal path). - sql *sql.Selector +} + +// Aggregate adds the given aggregation functions to the selector query. +func (bs *BouncerSelect) Aggregate(fns ...AggregateFunc) *BouncerSelect { + bs.fns = append(bs.fns, fns...) + return bs } // Scan applies the selector query and scans the result into the given value. func (bs *BouncerSelect) Scan(ctx context.Context, v any) error { + ctx = setContextOp(ctx, bs.ctx, "Select") if err := bs.prepareQuery(ctx); err != nil { return err } - bs.sql = bs.BouncerQuery.sqlQuery(ctx) - return bs.sqlScan(ctx, v) + return scanWithInterceptors[*BouncerQuery, *BouncerSelect](ctx, bs.BouncerQuery, bs, bs.inters, v) } -func (bs *BouncerSelect) sqlScan(ctx context.Context, v any) error { +func (bs *BouncerSelect) sqlScan(ctx context.Context, root *BouncerQuery, v any) error { + selector := root.sqlQuery(ctx) + aggregation := make([]string, 0, len(bs.fns)) + for _, fn := range bs.fns { + aggregation = append(aggregation, fn(selector)) + } + switch n := len(*bs.selector.flds); { + case n == 0 && len(aggregation) > 0: + selector.Select(aggregation...) + case n != 0 && len(aggregation) > 0: + selector.AppendSelect(aggregation...) + } rows := &sql.Rows{} - query, args := bs.sql.Query() + query, args := selector.Query() if err := bs.driver.Query(ctx, query, args, rows); err != nil { return err } diff --git a/pkg/database/ent/bouncer_update.go b/pkg/database/ent/bouncer_update.go index acf48dedeec..f7e71eb315e 100644 --- a/pkg/database/ent/bouncer_update.go +++ b/pkg/database/ent/bouncer_update.go @@ -185,35 +185,8 @@ func (bu *BouncerUpdate) Mutation() *BouncerMutation { // Save executes the query and returns the number of nodes affected by the update operation. func (bu *BouncerUpdate) Save(ctx context.Context) (int, error) { - var ( - err error - affected int - ) bu.defaults() - if len(bu.hooks) == 0 { - affected, err = bu.sqlSave(ctx) - } else { - var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { - mutation, ok := m.(*BouncerMutation) - if !ok { - return nil, fmt.Errorf("unexpected mutation type %T", m) - } - bu.mutation = mutation - affected, err = bu.sqlSave(ctx) - mutation.done = true - return affected, err - }) - for i := len(bu.hooks) - 1; i >= 0; i-- { - if bu.hooks[i] == nil { - return 0, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)") - } - mut = bu.hooks[i](mut) - } - if _, err := mut.Mutate(ctx, bu.mutation); err != nil { - return 0, err - } - } - return affected, err + return withHooks(ctx, bu.sqlSave, bu.mutation, bu.hooks) } // SaveX is like Save, but panics if an error occurs. @@ -251,16 +224,7 @@ func (bu *BouncerUpdate) defaults() { } func (bu *BouncerUpdate) sqlSave(ctx context.Context) (n int, err error) { - _spec := &sqlgraph.UpdateSpec{ - Node: &sqlgraph.NodeSpec{ - Table: bouncer.Table, - Columns: bouncer.Columns, - ID: &sqlgraph.FieldSpec{ - Type: field.TypeInt, - Column: bouncer.FieldID, - }, - }, - } + _spec := sqlgraph.NewUpdateSpec(bouncer.Table, bouncer.Columns, sqlgraph.NewFieldSpec(bouncer.FieldID, field.TypeInt)) if ps := bu.mutation.predicates; len(ps) > 0 { _spec.Predicate = func(selector *sql.Selector) { for i := range ps { @@ -269,117 +233,55 @@ func (bu *BouncerUpdate) sqlSave(ctx context.Context) (n int, err error) { } } if value, ok := bu.mutation.CreatedAt(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeTime, - Value: value, - Column: bouncer.FieldCreatedAt, - }) + _spec.SetField(bouncer.FieldCreatedAt, field.TypeTime, value) } if bu.mutation.CreatedAtCleared() { - _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ - Type: field.TypeTime, - Column: bouncer.FieldCreatedAt, - }) + _spec.ClearField(bouncer.FieldCreatedAt, field.TypeTime) } if value, ok := bu.mutation.UpdatedAt(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeTime, - Value: value, - Column: bouncer.FieldUpdatedAt, - }) + _spec.SetField(bouncer.FieldUpdatedAt, field.TypeTime, value) } if bu.mutation.UpdatedAtCleared() { - _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ - Type: field.TypeTime, - Column: bouncer.FieldUpdatedAt, - }) + _spec.ClearField(bouncer.FieldUpdatedAt, field.TypeTime) } if value, ok := bu.mutation.Name(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: bouncer.FieldName, - }) + _spec.SetField(bouncer.FieldName, field.TypeString, value) } if value, ok := bu.mutation.APIKey(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: bouncer.FieldAPIKey, - }) + _spec.SetField(bouncer.FieldAPIKey, field.TypeString, value) } if value, ok := bu.mutation.Revoked(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeBool, - Value: value, - Column: bouncer.FieldRevoked, - }) + _spec.SetField(bouncer.FieldRevoked, field.TypeBool, value) } if value, ok := bu.mutation.IPAddress(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: bouncer.FieldIPAddress, - }) + _spec.SetField(bouncer.FieldIPAddress, field.TypeString, value) } if bu.mutation.IPAddressCleared() { - _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Column: bouncer.FieldIPAddress, - }) + _spec.ClearField(bouncer.FieldIPAddress, field.TypeString) } if value, ok := bu.mutation.GetType(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: bouncer.FieldType, - }) + _spec.SetField(bouncer.FieldType, field.TypeString, value) } if bu.mutation.TypeCleared() { - _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Column: bouncer.FieldType, - }) + _spec.ClearField(bouncer.FieldType, field.TypeString) } if value, ok := bu.mutation.Version(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: bouncer.FieldVersion, - }) + _spec.SetField(bouncer.FieldVersion, field.TypeString, value) } if bu.mutation.VersionCleared() { - _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Column: bouncer.FieldVersion, - }) + _spec.ClearField(bouncer.FieldVersion, field.TypeString) } if value, ok := bu.mutation.Until(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeTime, - Value: value, - Column: bouncer.FieldUntil, - }) + _spec.SetField(bouncer.FieldUntil, field.TypeTime, value) } if bu.mutation.UntilCleared() { - _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ - Type: field.TypeTime, - Column: bouncer.FieldUntil, - }) + _spec.ClearField(bouncer.FieldUntil, field.TypeTime) } if value, ok := bu.mutation.LastPull(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeTime, - Value: value, - Column: bouncer.FieldLastPull, - }) + _spec.SetField(bouncer.FieldLastPull, field.TypeTime, value) } if value, ok := bu.mutation.AuthType(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: bouncer.FieldAuthType, - }) + _spec.SetField(bouncer.FieldAuthType, field.TypeString, value) } if n, err = sqlgraph.UpdateNodes(ctx, bu.driver, _spec); err != nil { if _, ok := err.(*sqlgraph.NotFoundError); ok { @@ -389,6 +291,7 @@ func (bu *BouncerUpdate) sqlSave(ctx context.Context) (n int, err error) { } return 0, err } + bu.mutation.done = true return n, nil } @@ -555,6 +458,12 @@ func (buo *BouncerUpdateOne) Mutation() *BouncerMutation { return buo.mutation } +// Where appends a list predicates to the BouncerUpdate builder. +func (buo *BouncerUpdateOne) Where(ps ...predicate.Bouncer) *BouncerUpdateOne { + buo.mutation.Where(ps...) + return buo +} + // Select allows selecting one or more fields (columns) of the returned entity. // The default is selecting all fields defined in the entity schema. func (buo *BouncerUpdateOne) Select(field string, fields ...string) *BouncerUpdateOne { @@ -564,41 +473,8 @@ func (buo *BouncerUpdateOne) Select(field string, fields ...string) *BouncerUpda // Save executes the query and returns the updated Bouncer entity. func (buo *BouncerUpdateOne) Save(ctx context.Context) (*Bouncer, error) { - var ( - err error - node *Bouncer - ) buo.defaults() - if len(buo.hooks) == 0 { - node, err = buo.sqlSave(ctx) - } else { - var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { - mutation, ok := m.(*BouncerMutation) - if !ok { - return nil, fmt.Errorf("unexpected mutation type %T", m) - } - buo.mutation = mutation - node, err = buo.sqlSave(ctx) - mutation.done = true - return node, err - }) - for i := len(buo.hooks) - 1; i >= 0; i-- { - if buo.hooks[i] == nil { - return nil, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)") - } - mut = buo.hooks[i](mut) - } - v, err := mut.Mutate(ctx, buo.mutation) - if err != nil { - return nil, err - } - nv, ok := v.(*Bouncer) - if !ok { - return nil, fmt.Errorf("unexpected node type %T returned from BouncerMutation", v) - } - node = nv - } - return node, err + return withHooks(ctx, buo.sqlSave, buo.mutation, buo.hooks) } // SaveX is like Save, but panics if an error occurs. @@ -636,16 +512,7 @@ func (buo *BouncerUpdateOne) defaults() { } func (buo *BouncerUpdateOne) sqlSave(ctx context.Context) (_node *Bouncer, err error) { - _spec := &sqlgraph.UpdateSpec{ - Node: &sqlgraph.NodeSpec{ - Table: bouncer.Table, - Columns: bouncer.Columns, - ID: &sqlgraph.FieldSpec{ - Type: field.TypeInt, - Column: bouncer.FieldID, - }, - }, - } + _spec := sqlgraph.NewUpdateSpec(bouncer.Table, bouncer.Columns, sqlgraph.NewFieldSpec(bouncer.FieldID, field.TypeInt)) id, ok := buo.mutation.ID() if !ok { return nil, &ValidationError{Name: "id", err: errors.New(`ent: missing "Bouncer.id" for update`)} @@ -671,117 +538,55 @@ func (buo *BouncerUpdateOne) sqlSave(ctx context.Context) (_node *Bouncer, err e } } if value, ok := buo.mutation.CreatedAt(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeTime, - Value: value, - Column: bouncer.FieldCreatedAt, - }) + _spec.SetField(bouncer.FieldCreatedAt, field.TypeTime, value) } if buo.mutation.CreatedAtCleared() { - _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ - Type: field.TypeTime, - Column: bouncer.FieldCreatedAt, - }) + _spec.ClearField(bouncer.FieldCreatedAt, field.TypeTime) } if value, ok := buo.mutation.UpdatedAt(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeTime, - Value: value, - Column: bouncer.FieldUpdatedAt, - }) + _spec.SetField(bouncer.FieldUpdatedAt, field.TypeTime, value) } if buo.mutation.UpdatedAtCleared() { - _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ - Type: field.TypeTime, - Column: bouncer.FieldUpdatedAt, - }) + _spec.ClearField(bouncer.FieldUpdatedAt, field.TypeTime) } if value, ok := buo.mutation.Name(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: bouncer.FieldName, - }) + _spec.SetField(bouncer.FieldName, field.TypeString, value) } if value, ok := buo.mutation.APIKey(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: bouncer.FieldAPIKey, - }) + _spec.SetField(bouncer.FieldAPIKey, field.TypeString, value) } if value, ok := buo.mutation.Revoked(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeBool, - Value: value, - Column: bouncer.FieldRevoked, - }) + _spec.SetField(bouncer.FieldRevoked, field.TypeBool, value) } if value, ok := buo.mutation.IPAddress(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: bouncer.FieldIPAddress, - }) + _spec.SetField(bouncer.FieldIPAddress, field.TypeString, value) } if buo.mutation.IPAddressCleared() { - _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Column: bouncer.FieldIPAddress, - }) + _spec.ClearField(bouncer.FieldIPAddress, field.TypeString) } if value, ok := buo.mutation.GetType(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: bouncer.FieldType, - }) + _spec.SetField(bouncer.FieldType, field.TypeString, value) } if buo.mutation.TypeCleared() { - _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Column: bouncer.FieldType, - }) + _spec.ClearField(bouncer.FieldType, field.TypeString) } if value, ok := buo.mutation.Version(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: bouncer.FieldVersion, - }) + _spec.SetField(bouncer.FieldVersion, field.TypeString, value) } if buo.mutation.VersionCleared() { - _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Column: bouncer.FieldVersion, - }) + _spec.ClearField(bouncer.FieldVersion, field.TypeString) } if value, ok := buo.mutation.Until(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeTime, - Value: value, - Column: bouncer.FieldUntil, - }) + _spec.SetField(bouncer.FieldUntil, field.TypeTime, value) } if buo.mutation.UntilCleared() { - _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ - Type: field.TypeTime, - Column: bouncer.FieldUntil, - }) + _spec.ClearField(bouncer.FieldUntil, field.TypeTime) } if value, ok := buo.mutation.LastPull(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeTime, - Value: value, - Column: bouncer.FieldLastPull, - }) + _spec.SetField(bouncer.FieldLastPull, field.TypeTime, value) } if value, ok := buo.mutation.AuthType(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: bouncer.FieldAuthType, - }) + _spec.SetField(bouncer.FieldAuthType, field.TypeString, value) } _node = &Bouncer{config: buo.config} _spec.Assign = _node.assignValues @@ -794,5 +599,6 @@ func (buo *BouncerUpdateOne) sqlSave(ctx context.Context) (_node *Bouncer, err e } return nil, err } + buo.mutation.done = true return _node, nil } diff --git a/pkg/database/ent/client.go b/pkg/database/ent/client.go index 815b1df6d16..2761ff088b5 100644 --- a/pkg/database/ent/client.go +++ b/pkg/database/ent/client.go @@ -7,9 +7,14 @@ import ( "errors" "fmt" "log" + "reflect" "github.com/crowdsecurity/crowdsec/pkg/database/ent/migrate" + "entgo.io/ent" + "entgo.io/ent/dialect" + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" "github.com/crowdsecurity/crowdsec/pkg/database/ent/alert" "github.com/crowdsecurity/crowdsec/pkg/database/ent/bouncer" "github.com/crowdsecurity/crowdsec/pkg/database/ent/configitem" @@ -17,10 +22,6 @@ import ( "github.com/crowdsecurity/crowdsec/pkg/database/ent/event" "github.com/crowdsecurity/crowdsec/pkg/database/ent/machine" "github.com/crowdsecurity/crowdsec/pkg/database/ent/meta" - - "entgo.io/ent/dialect" - "entgo.io/ent/dialect/sql" - "entgo.io/ent/dialect/sql/sqlgraph" ) // Client is the client that holds all ent builders. @@ -46,7 +47,7 @@ type Client struct { // NewClient creates a new client configured with the given options. func NewClient(opts ...Option) *Client { - cfg := config{log: log.Println, hooks: &hooks{}} + cfg := config{log: log.Println, hooks: &hooks{}, inters: &inters{}} cfg.options(opts...) client := &Client{config: cfg} client.init() @@ -64,6 +65,55 @@ func (c *Client) init() { c.Meta = NewMetaClient(c.config) } +type ( + // config is the configuration for the client and its builder. + config struct { + // driver used for executing database requests. + driver dialect.Driver + // debug enable a debug logging. + debug bool + // log used for logging on debug mode. + log func(...any) + // hooks to execute on mutations. + hooks *hooks + // interceptors to execute on queries. + inters *inters + } + // Option function to configure the client. + Option func(*config) +) + +// options applies the options on the config object. +func (c *config) options(opts ...Option) { + for _, opt := range opts { + opt(c) + } + if c.debug { + c.driver = dialect.Debug(c.driver, c.log) + } +} + +// Debug enables debug logging on the ent.Driver. +func Debug() Option { + return func(c *config) { + c.debug = true + } +} + +// Log sets the logging function for debug mode. +func Log(fn func(...any)) Option { + return func(c *config) { + c.log = fn + } +} + +// Driver configures the client driver. +func Driver(driver dialect.Driver) Option { + return func(c *config) { + c.driver = driver + } +} + // Open opens a database/sql.DB specified by the driver name and // the data source name, and returns a new client attached to it. // Optional parameters can be added for configuring the client. @@ -80,11 +130,14 @@ func Open(driverName, dataSourceName string, options ...Option) (*Client, error) } } +// ErrTxStarted is returned when trying to start a new transaction from a transactional client. +var ErrTxStarted = errors.New("ent: cannot start a transaction within a transaction") + // Tx returns a new transactional client. The provided context // is used until the transaction is committed or rolled back. func (c *Client) Tx(ctx context.Context) (*Tx, error) { if _, ok := c.driver.(*txDriver); ok { - return nil, errors.New("ent: cannot start a transaction within a transaction") + return nil, ErrTxStarted } tx, err := newTx(ctx, c.driver) if err != nil { @@ -156,13 +209,43 @@ func (c *Client) Close() error { // Use adds the mutation hooks to all the entity clients. // In order to add hooks to a specific client, call: `client.Node.Use(...)`. func (c *Client) Use(hooks ...Hook) { - c.Alert.Use(hooks...) - c.Bouncer.Use(hooks...) - c.ConfigItem.Use(hooks...) - c.Decision.Use(hooks...) - c.Event.Use(hooks...) - c.Machine.Use(hooks...) - c.Meta.Use(hooks...) + for _, n := range []interface{ Use(...Hook) }{ + c.Alert, c.Bouncer, c.ConfigItem, c.Decision, c.Event, c.Machine, c.Meta, + } { + n.Use(hooks...) + } +} + +// Intercept adds the query interceptors to all the entity clients. +// In order to add interceptors to a specific client, call: `client.Node.Intercept(...)`. +func (c *Client) Intercept(interceptors ...Interceptor) { + for _, n := range []interface{ Intercept(...Interceptor) }{ + c.Alert, c.Bouncer, c.ConfigItem, c.Decision, c.Event, c.Machine, c.Meta, + } { + n.Intercept(interceptors...) + } +} + +// Mutate implements the ent.Mutator interface. +func (c *Client) Mutate(ctx context.Context, m Mutation) (Value, error) { + switch m := m.(type) { + case *AlertMutation: + return c.Alert.mutate(ctx, m) + case *BouncerMutation: + return c.Bouncer.mutate(ctx, m) + case *ConfigItemMutation: + return c.ConfigItem.mutate(ctx, m) + case *DecisionMutation: + return c.Decision.mutate(ctx, m) + case *EventMutation: + return c.Event.mutate(ctx, m) + case *MachineMutation: + return c.Machine.mutate(ctx, m) + case *MetaMutation: + return c.Meta.mutate(ctx, m) + default: + return nil, fmt.Errorf("ent: unknown mutation type %T", m) + } } // AlertClient is a client for the Alert schema. @@ -181,6 +264,12 @@ func (c *AlertClient) Use(hooks ...Hook) { c.hooks.Alert = append(c.hooks.Alert, hooks...) } +// Intercept adds a list of query interceptors to the interceptors stack. +// A call to `Intercept(f, g, h)` equals to `alert.Intercept(f(g(h())))`. +func (c *AlertClient) Intercept(interceptors ...Interceptor) { + c.inters.Alert = append(c.inters.Alert, interceptors...) +} + // Create returns a builder for creating a Alert entity. func (c *AlertClient) Create() *AlertCreate { mutation := newAlertMutation(c.config, OpCreate) @@ -192,6 +281,21 @@ func (c *AlertClient) CreateBulk(builders ...*AlertCreate) *AlertCreateBulk { return &AlertCreateBulk{config: c.config, builders: builders} } +// MapCreateBulk creates a bulk creation builder from the given slice. For each item in the slice, the function creates +// a builder and applies setFunc on it. +func (c *AlertClient) MapCreateBulk(slice any, setFunc func(*AlertCreate, int)) *AlertCreateBulk { + rv := reflect.ValueOf(slice) + if rv.Kind() != reflect.Slice { + return &AlertCreateBulk{err: fmt.Errorf("calling to AlertClient.MapCreateBulk with wrong type %T, need slice", slice)} + } + builders := make([]*AlertCreate, rv.Len()) + for i := 0; i < rv.Len(); i++ { + builders[i] = c.Create() + setFunc(builders[i], i) + } + return &AlertCreateBulk{config: c.config, builders: builders} +} + // Update returns an update builder for Alert. func (c *AlertClient) Update() *AlertUpdate { mutation := newAlertMutation(c.config, OpUpdate) @@ -221,7 +325,7 @@ func (c *AlertClient) DeleteOne(a *Alert) *AlertDeleteOne { return c.DeleteOneID(a.ID) } -// DeleteOne returns a builder for deleting the given entity by its id. +// DeleteOneID returns a builder for deleting the given entity by its id. func (c *AlertClient) DeleteOneID(id int) *AlertDeleteOne { builder := c.Delete().Where(alert.ID(id)) builder.mutation.id = &id @@ -233,6 +337,8 @@ func (c *AlertClient) DeleteOneID(id int) *AlertDeleteOne { func (c *AlertClient) Query() *AlertQuery { return &AlertQuery{ config: c.config, + ctx: &QueryContext{Type: TypeAlert}, + inters: c.Interceptors(), } } @@ -252,8 +358,8 @@ func (c *AlertClient) GetX(ctx context.Context, id int) *Alert { // QueryOwner queries the owner edge of a Alert. func (c *AlertClient) QueryOwner(a *Alert) *MachineQuery { - query := &MachineQuery{config: c.config} - query.path = func(ctx context.Context) (fromV *sql.Selector, _ error) { + query := (&MachineClient{config: c.config}).Query() + query.path = func(context.Context) (fromV *sql.Selector, _ error) { id := a.ID step := sqlgraph.NewStep( sqlgraph.From(alert.Table, alert.FieldID, id), @@ -268,8 +374,8 @@ func (c *AlertClient) QueryOwner(a *Alert) *MachineQuery { // QueryDecisions queries the decisions edge of a Alert. func (c *AlertClient) QueryDecisions(a *Alert) *DecisionQuery { - query := &DecisionQuery{config: c.config} - query.path = func(ctx context.Context) (fromV *sql.Selector, _ error) { + query := (&DecisionClient{config: c.config}).Query() + query.path = func(context.Context) (fromV *sql.Selector, _ error) { id := a.ID step := sqlgraph.NewStep( sqlgraph.From(alert.Table, alert.FieldID, id), @@ -284,8 +390,8 @@ func (c *AlertClient) QueryDecisions(a *Alert) *DecisionQuery { // QueryEvents queries the events edge of a Alert. func (c *AlertClient) QueryEvents(a *Alert) *EventQuery { - query := &EventQuery{config: c.config} - query.path = func(ctx context.Context) (fromV *sql.Selector, _ error) { + query := (&EventClient{config: c.config}).Query() + query.path = func(context.Context) (fromV *sql.Selector, _ error) { id := a.ID step := sqlgraph.NewStep( sqlgraph.From(alert.Table, alert.FieldID, id), @@ -300,8 +406,8 @@ func (c *AlertClient) QueryEvents(a *Alert) *EventQuery { // QueryMetas queries the metas edge of a Alert. func (c *AlertClient) QueryMetas(a *Alert) *MetaQuery { - query := &MetaQuery{config: c.config} - query.path = func(ctx context.Context) (fromV *sql.Selector, _ error) { + query := (&MetaClient{config: c.config}).Query() + query.path = func(context.Context) (fromV *sql.Selector, _ error) { id := a.ID step := sqlgraph.NewStep( sqlgraph.From(alert.Table, alert.FieldID, id), @@ -319,6 +425,26 @@ func (c *AlertClient) Hooks() []Hook { return c.hooks.Alert } +// Interceptors returns the client interceptors. +func (c *AlertClient) Interceptors() []Interceptor { + return c.inters.Alert +} + +func (c *AlertClient) mutate(ctx context.Context, m *AlertMutation) (Value, error) { + switch m.Op() { + case OpCreate: + return (&AlertCreate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpUpdate: + return (&AlertUpdate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpUpdateOne: + return (&AlertUpdateOne{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpDelete, OpDeleteOne: + return (&AlertDelete{config: c.config, hooks: c.Hooks(), mutation: m}).Exec(ctx) + default: + return nil, fmt.Errorf("ent: unknown Alert mutation op: %q", m.Op()) + } +} + // BouncerClient is a client for the Bouncer schema. type BouncerClient struct { config @@ -335,6 +461,12 @@ func (c *BouncerClient) Use(hooks ...Hook) { c.hooks.Bouncer = append(c.hooks.Bouncer, hooks...) } +// Intercept adds a list of query interceptors to the interceptors stack. +// A call to `Intercept(f, g, h)` equals to `bouncer.Intercept(f(g(h())))`. +func (c *BouncerClient) Intercept(interceptors ...Interceptor) { + c.inters.Bouncer = append(c.inters.Bouncer, interceptors...) +} + // Create returns a builder for creating a Bouncer entity. func (c *BouncerClient) Create() *BouncerCreate { mutation := newBouncerMutation(c.config, OpCreate) @@ -346,6 +478,21 @@ func (c *BouncerClient) CreateBulk(builders ...*BouncerCreate) *BouncerCreateBul return &BouncerCreateBulk{config: c.config, builders: builders} } +// MapCreateBulk creates a bulk creation builder from the given slice. For each item in the slice, the function creates +// a builder and applies setFunc on it. +func (c *BouncerClient) MapCreateBulk(slice any, setFunc func(*BouncerCreate, int)) *BouncerCreateBulk { + rv := reflect.ValueOf(slice) + if rv.Kind() != reflect.Slice { + return &BouncerCreateBulk{err: fmt.Errorf("calling to BouncerClient.MapCreateBulk with wrong type %T, need slice", slice)} + } + builders := make([]*BouncerCreate, rv.Len()) + for i := 0; i < rv.Len(); i++ { + builders[i] = c.Create() + setFunc(builders[i], i) + } + return &BouncerCreateBulk{config: c.config, builders: builders} +} + // Update returns an update builder for Bouncer. func (c *BouncerClient) Update() *BouncerUpdate { mutation := newBouncerMutation(c.config, OpUpdate) @@ -375,7 +522,7 @@ func (c *BouncerClient) DeleteOne(b *Bouncer) *BouncerDeleteOne { return c.DeleteOneID(b.ID) } -// DeleteOne returns a builder for deleting the given entity by its id. +// DeleteOneID returns a builder for deleting the given entity by its id. func (c *BouncerClient) DeleteOneID(id int) *BouncerDeleteOne { builder := c.Delete().Where(bouncer.ID(id)) builder.mutation.id = &id @@ -387,6 +534,8 @@ func (c *BouncerClient) DeleteOneID(id int) *BouncerDeleteOne { func (c *BouncerClient) Query() *BouncerQuery { return &BouncerQuery{ config: c.config, + ctx: &QueryContext{Type: TypeBouncer}, + inters: c.Interceptors(), } } @@ -409,6 +558,26 @@ func (c *BouncerClient) Hooks() []Hook { return c.hooks.Bouncer } +// Interceptors returns the client interceptors. +func (c *BouncerClient) Interceptors() []Interceptor { + return c.inters.Bouncer +} + +func (c *BouncerClient) mutate(ctx context.Context, m *BouncerMutation) (Value, error) { + switch m.Op() { + case OpCreate: + return (&BouncerCreate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpUpdate: + return (&BouncerUpdate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpUpdateOne: + return (&BouncerUpdateOne{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpDelete, OpDeleteOne: + return (&BouncerDelete{config: c.config, hooks: c.Hooks(), mutation: m}).Exec(ctx) + default: + return nil, fmt.Errorf("ent: unknown Bouncer mutation op: %q", m.Op()) + } +} + // ConfigItemClient is a client for the ConfigItem schema. type ConfigItemClient struct { config @@ -425,6 +594,12 @@ func (c *ConfigItemClient) Use(hooks ...Hook) { c.hooks.ConfigItem = append(c.hooks.ConfigItem, hooks...) } +// Intercept adds a list of query interceptors to the interceptors stack. +// A call to `Intercept(f, g, h)` equals to `configitem.Intercept(f(g(h())))`. +func (c *ConfigItemClient) Intercept(interceptors ...Interceptor) { + c.inters.ConfigItem = append(c.inters.ConfigItem, interceptors...) +} + // Create returns a builder for creating a ConfigItem entity. func (c *ConfigItemClient) Create() *ConfigItemCreate { mutation := newConfigItemMutation(c.config, OpCreate) @@ -436,6 +611,21 @@ func (c *ConfigItemClient) CreateBulk(builders ...*ConfigItemCreate) *ConfigItem return &ConfigItemCreateBulk{config: c.config, builders: builders} } +// MapCreateBulk creates a bulk creation builder from the given slice. For each item in the slice, the function creates +// a builder and applies setFunc on it. +func (c *ConfigItemClient) MapCreateBulk(slice any, setFunc func(*ConfigItemCreate, int)) *ConfigItemCreateBulk { + rv := reflect.ValueOf(slice) + if rv.Kind() != reflect.Slice { + return &ConfigItemCreateBulk{err: fmt.Errorf("calling to ConfigItemClient.MapCreateBulk with wrong type %T, need slice", slice)} + } + builders := make([]*ConfigItemCreate, rv.Len()) + for i := 0; i < rv.Len(); i++ { + builders[i] = c.Create() + setFunc(builders[i], i) + } + return &ConfigItemCreateBulk{config: c.config, builders: builders} +} + // Update returns an update builder for ConfigItem. func (c *ConfigItemClient) Update() *ConfigItemUpdate { mutation := newConfigItemMutation(c.config, OpUpdate) @@ -465,7 +655,7 @@ func (c *ConfigItemClient) DeleteOne(ci *ConfigItem) *ConfigItemDeleteOne { return c.DeleteOneID(ci.ID) } -// DeleteOne returns a builder for deleting the given entity by its id. +// DeleteOneID returns a builder for deleting the given entity by its id. func (c *ConfigItemClient) DeleteOneID(id int) *ConfigItemDeleteOne { builder := c.Delete().Where(configitem.ID(id)) builder.mutation.id = &id @@ -477,6 +667,8 @@ func (c *ConfigItemClient) DeleteOneID(id int) *ConfigItemDeleteOne { func (c *ConfigItemClient) Query() *ConfigItemQuery { return &ConfigItemQuery{ config: c.config, + ctx: &QueryContext{Type: TypeConfigItem}, + inters: c.Interceptors(), } } @@ -499,6 +691,26 @@ func (c *ConfigItemClient) Hooks() []Hook { return c.hooks.ConfigItem } +// Interceptors returns the client interceptors. +func (c *ConfigItemClient) Interceptors() []Interceptor { + return c.inters.ConfigItem +} + +func (c *ConfigItemClient) mutate(ctx context.Context, m *ConfigItemMutation) (Value, error) { + switch m.Op() { + case OpCreate: + return (&ConfigItemCreate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpUpdate: + return (&ConfigItemUpdate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpUpdateOne: + return (&ConfigItemUpdateOne{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpDelete, OpDeleteOne: + return (&ConfigItemDelete{config: c.config, hooks: c.Hooks(), mutation: m}).Exec(ctx) + default: + return nil, fmt.Errorf("ent: unknown ConfigItem mutation op: %q", m.Op()) + } +} + // DecisionClient is a client for the Decision schema. type DecisionClient struct { config @@ -515,6 +727,12 @@ func (c *DecisionClient) Use(hooks ...Hook) { c.hooks.Decision = append(c.hooks.Decision, hooks...) } +// Intercept adds a list of query interceptors to the interceptors stack. +// A call to `Intercept(f, g, h)` equals to `decision.Intercept(f(g(h())))`. +func (c *DecisionClient) Intercept(interceptors ...Interceptor) { + c.inters.Decision = append(c.inters.Decision, interceptors...) +} + // Create returns a builder for creating a Decision entity. func (c *DecisionClient) Create() *DecisionCreate { mutation := newDecisionMutation(c.config, OpCreate) @@ -526,6 +744,21 @@ func (c *DecisionClient) CreateBulk(builders ...*DecisionCreate) *DecisionCreate return &DecisionCreateBulk{config: c.config, builders: builders} } +// MapCreateBulk creates a bulk creation builder from the given slice. For each item in the slice, the function creates +// a builder and applies setFunc on it. +func (c *DecisionClient) MapCreateBulk(slice any, setFunc func(*DecisionCreate, int)) *DecisionCreateBulk { + rv := reflect.ValueOf(slice) + if rv.Kind() != reflect.Slice { + return &DecisionCreateBulk{err: fmt.Errorf("calling to DecisionClient.MapCreateBulk with wrong type %T, need slice", slice)} + } + builders := make([]*DecisionCreate, rv.Len()) + for i := 0; i < rv.Len(); i++ { + builders[i] = c.Create() + setFunc(builders[i], i) + } + return &DecisionCreateBulk{config: c.config, builders: builders} +} + // Update returns an update builder for Decision. func (c *DecisionClient) Update() *DecisionUpdate { mutation := newDecisionMutation(c.config, OpUpdate) @@ -555,7 +788,7 @@ func (c *DecisionClient) DeleteOne(d *Decision) *DecisionDeleteOne { return c.DeleteOneID(d.ID) } -// DeleteOne returns a builder for deleting the given entity by its id. +// DeleteOneID returns a builder for deleting the given entity by its id. func (c *DecisionClient) DeleteOneID(id int) *DecisionDeleteOne { builder := c.Delete().Where(decision.ID(id)) builder.mutation.id = &id @@ -567,6 +800,8 @@ func (c *DecisionClient) DeleteOneID(id int) *DecisionDeleteOne { func (c *DecisionClient) Query() *DecisionQuery { return &DecisionQuery{ config: c.config, + ctx: &QueryContext{Type: TypeDecision}, + inters: c.Interceptors(), } } @@ -586,8 +821,8 @@ func (c *DecisionClient) GetX(ctx context.Context, id int) *Decision { // QueryOwner queries the owner edge of a Decision. func (c *DecisionClient) QueryOwner(d *Decision) *AlertQuery { - query := &AlertQuery{config: c.config} - query.path = func(ctx context.Context) (fromV *sql.Selector, _ error) { + query := (&AlertClient{config: c.config}).Query() + query.path = func(context.Context) (fromV *sql.Selector, _ error) { id := d.ID step := sqlgraph.NewStep( sqlgraph.From(decision.Table, decision.FieldID, id), @@ -605,6 +840,26 @@ func (c *DecisionClient) Hooks() []Hook { return c.hooks.Decision } +// Interceptors returns the client interceptors. +func (c *DecisionClient) Interceptors() []Interceptor { + return c.inters.Decision +} + +func (c *DecisionClient) mutate(ctx context.Context, m *DecisionMutation) (Value, error) { + switch m.Op() { + case OpCreate: + return (&DecisionCreate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpUpdate: + return (&DecisionUpdate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpUpdateOne: + return (&DecisionUpdateOne{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpDelete, OpDeleteOne: + return (&DecisionDelete{config: c.config, hooks: c.Hooks(), mutation: m}).Exec(ctx) + default: + return nil, fmt.Errorf("ent: unknown Decision mutation op: %q", m.Op()) + } +} + // EventClient is a client for the Event schema. type EventClient struct { config @@ -621,6 +876,12 @@ func (c *EventClient) Use(hooks ...Hook) { c.hooks.Event = append(c.hooks.Event, hooks...) } +// Intercept adds a list of query interceptors to the interceptors stack. +// A call to `Intercept(f, g, h)` equals to `event.Intercept(f(g(h())))`. +func (c *EventClient) Intercept(interceptors ...Interceptor) { + c.inters.Event = append(c.inters.Event, interceptors...) +} + // Create returns a builder for creating a Event entity. func (c *EventClient) Create() *EventCreate { mutation := newEventMutation(c.config, OpCreate) @@ -632,6 +893,21 @@ func (c *EventClient) CreateBulk(builders ...*EventCreate) *EventCreateBulk { return &EventCreateBulk{config: c.config, builders: builders} } +// MapCreateBulk creates a bulk creation builder from the given slice. For each item in the slice, the function creates +// a builder and applies setFunc on it. +func (c *EventClient) MapCreateBulk(slice any, setFunc func(*EventCreate, int)) *EventCreateBulk { + rv := reflect.ValueOf(slice) + if rv.Kind() != reflect.Slice { + return &EventCreateBulk{err: fmt.Errorf("calling to EventClient.MapCreateBulk with wrong type %T, need slice", slice)} + } + builders := make([]*EventCreate, rv.Len()) + for i := 0; i < rv.Len(); i++ { + builders[i] = c.Create() + setFunc(builders[i], i) + } + return &EventCreateBulk{config: c.config, builders: builders} +} + // Update returns an update builder for Event. func (c *EventClient) Update() *EventUpdate { mutation := newEventMutation(c.config, OpUpdate) @@ -661,7 +937,7 @@ func (c *EventClient) DeleteOne(e *Event) *EventDeleteOne { return c.DeleteOneID(e.ID) } -// DeleteOne returns a builder for deleting the given entity by its id. +// DeleteOneID returns a builder for deleting the given entity by its id. func (c *EventClient) DeleteOneID(id int) *EventDeleteOne { builder := c.Delete().Where(event.ID(id)) builder.mutation.id = &id @@ -673,6 +949,8 @@ func (c *EventClient) DeleteOneID(id int) *EventDeleteOne { func (c *EventClient) Query() *EventQuery { return &EventQuery{ config: c.config, + ctx: &QueryContext{Type: TypeEvent}, + inters: c.Interceptors(), } } @@ -692,8 +970,8 @@ func (c *EventClient) GetX(ctx context.Context, id int) *Event { // QueryOwner queries the owner edge of a Event. func (c *EventClient) QueryOwner(e *Event) *AlertQuery { - query := &AlertQuery{config: c.config} - query.path = func(ctx context.Context) (fromV *sql.Selector, _ error) { + query := (&AlertClient{config: c.config}).Query() + query.path = func(context.Context) (fromV *sql.Selector, _ error) { id := e.ID step := sqlgraph.NewStep( sqlgraph.From(event.Table, event.FieldID, id), @@ -711,6 +989,26 @@ func (c *EventClient) Hooks() []Hook { return c.hooks.Event } +// Interceptors returns the client interceptors. +func (c *EventClient) Interceptors() []Interceptor { + return c.inters.Event +} + +func (c *EventClient) mutate(ctx context.Context, m *EventMutation) (Value, error) { + switch m.Op() { + case OpCreate: + return (&EventCreate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpUpdate: + return (&EventUpdate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpUpdateOne: + return (&EventUpdateOne{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpDelete, OpDeleteOne: + return (&EventDelete{config: c.config, hooks: c.Hooks(), mutation: m}).Exec(ctx) + default: + return nil, fmt.Errorf("ent: unknown Event mutation op: %q", m.Op()) + } +} + // MachineClient is a client for the Machine schema. type MachineClient struct { config @@ -727,6 +1025,12 @@ func (c *MachineClient) Use(hooks ...Hook) { c.hooks.Machine = append(c.hooks.Machine, hooks...) } +// Intercept adds a list of query interceptors to the interceptors stack. +// A call to `Intercept(f, g, h)` equals to `machine.Intercept(f(g(h())))`. +func (c *MachineClient) Intercept(interceptors ...Interceptor) { + c.inters.Machine = append(c.inters.Machine, interceptors...) +} + // Create returns a builder for creating a Machine entity. func (c *MachineClient) Create() *MachineCreate { mutation := newMachineMutation(c.config, OpCreate) @@ -738,6 +1042,21 @@ func (c *MachineClient) CreateBulk(builders ...*MachineCreate) *MachineCreateBul return &MachineCreateBulk{config: c.config, builders: builders} } +// MapCreateBulk creates a bulk creation builder from the given slice. For each item in the slice, the function creates +// a builder and applies setFunc on it. +func (c *MachineClient) MapCreateBulk(slice any, setFunc func(*MachineCreate, int)) *MachineCreateBulk { + rv := reflect.ValueOf(slice) + if rv.Kind() != reflect.Slice { + return &MachineCreateBulk{err: fmt.Errorf("calling to MachineClient.MapCreateBulk with wrong type %T, need slice", slice)} + } + builders := make([]*MachineCreate, rv.Len()) + for i := 0; i < rv.Len(); i++ { + builders[i] = c.Create() + setFunc(builders[i], i) + } + return &MachineCreateBulk{config: c.config, builders: builders} +} + // Update returns an update builder for Machine. func (c *MachineClient) Update() *MachineUpdate { mutation := newMachineMutation(c.config, OpUpdate) @@ -767,7 +1086,7 @@ func (c *MachineClient) DeleteOne(m *Machine) *MachineDeleteOne { return c.DeleteOneID(m.ID) } -// DeleteOne returns a builder for deleting the given entity by its id. +// DeleteOneID returns a builder for deleting the given entity by its id. func (c *MachineClient) DeleteOneID(id int) *MachineDeleteOne { builder := c.Delete().Where(machine.ID(id)) builder.mutation.id = &id @@ -779,6 +1098,8 @@ func (c *MachineClient) DeleteOneID(id int) *MachineDeleteOne { func (c *MachineClient) Query() *MachineQuery { return &MachineQuery{ config: c.config, + ctx: &QueryContext{Type: TypeMachine}, + inters: c.Interceptors(), } } @@ -798,8 +1119,8 @@ func (c *MachineClient) GetX(ctx context.Context, id int) *Machine { // QueryAlerts queries the alerts edge of a Machine. func (c *MachineClient) QueryAlerts(m *Machine) *AlertQuery { - query := &AlertQuery{config: c.config} - query.path = func(ctx context.Context) (fromV *sql.Selector, _ error) { + query := (&AlertClient{config: c.config}).Query() + query.path = func(context.Context) (fromV *sql.Selector, _ error) { id := m.ID step := sqlgraph.NewStep( sqlgraph.From(machine.Table, machine.FieldID, id), @@ -817,6 +1138,26 @@ func (c *MachineClient) Hooks() []Hook { return c.hooks.Machine } +// Interceptors returns the client interceptors. +func (c *MachineClient) Interceptors() []Interceptor { + return c.inters.Machine +} + +func (c *MachineClient) mutate(ctx context.Context, m *MachineMutation) (Value, error) { + switch m.Op() { + case OpCreate: + return (&MachineCreate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpUpdate: + return (&MachineUpdate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpUpdateOne: + return (&MachineUpdateOne{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpDelete, OpDeleteOne: + return (&MachineDelete{config: c.config, hooks: c.Hooks(), mutation: m}).Exec(ctx) + default: + return nil, fmt.Errorf("ent: unknown Machine mutation op: %q", m.Op()) + } +} + // MetaClient is a client for the Meta schema. type MetaClient struct { config @@ -833,6 +1174,12 @@ func (c *MetaClient) Use(hooks ...Hook) { c.hooks.Meta = append(c.hooks.Meta, hooks...) } +// Intercept adds a list of query interceptors to the interceptors stack. +// A call to `Intercept(f, g, h)` equals to `meta.Intercept(f(g(h())))`. +func (c *MetaClient) Intercept(interceptors ...Interceptor) { + c.inters.Meta = append(c.inters.Meta, interceptors...) +} + // Create returns a builder for creating a Meta entity. func (c *MetaClient) Create() *MetaCreate { mutation := newMetaMutation(c.config, OpCreate) @@ -844,6 +1191,21 @@ func (c *MetaClient) CreateBulk(builders ...*MetaCreate) *MetaCreateBulk { return &MetaCreateBulk{config: c.config, builders: builders} } +// MapCreateBulk creates a bulk creation builder from the given slice. For each item in the slice, the function creates +// a builder and applies setFunc on it. +func (c *MetaClient) MapCreateBulk(slice any, setFunc func(*MetaCreate, int)) *MetaCreateBulk { + rv := reflect.ValueOf(slice) + if rv.Kind() != reflect.Slice { + return &MetaCreateBulk{err: fmt.Errorf("calling to MetaClient.MapCreateBulk with wrong type %T, need slice", slice)} + } + builders := make([]*MetaCreate, rv.Len()) + for i := 0; i < rv.Len(); i++ { + builders[i] = c.Create() + setFunc(builders[i], i) + } + return &MetaCreateBulk{config: c.config, builders: builders} +} + // Update returns an update builder for Meta. func (c *MetaClient) Update() *MetaUpdate { mutation := newMetaMutation(c.config, OpUpdate) @@ -873,7 +1235,7 @@ func (c *MetaClient) DeleteOne(m *Meta) *MetaDeleteOne { return c.DeleteOneID(m.ID) } -// DeleteOne returns a builder for deleting the given entity by its id. +// DeleteOneID returns a builder for deleting the given entity by its id. func (c *MetaClient) DeleteOneID(id int) *MetaDeleteOne { builder := c.Delete().Where(meta.ID(id)) builder.mutation.id = &id @@ -885,6 +1247,8 @@ func (c *MetaClient) DeleteOneID(id int) *MetaDeleteOne { func (c *MetaClient) Query() *MetaQuery { return &MetaQuery{ config: c.config, + ctx: &QueryContext{Type: TypeMeta}, + inters: c.Interceptors(), } } @@ -904,8 +1268,8 @@ func (c *MetaClient) GetX(ctx context.Context, id int) *Meta { // QueryOwner queries the owner edge of a Meta. func (c *MetaClient) QueryOwner(m *Meta) *AlertQuery { - query := &AlertQuery{config: c.config} - query.path = func(ctx context.Context) (fromV *sql.Selector, _ error) { + query := (&AlertClient{config: c.config}).Query() + query.path = func(context.Context) (fromV *sql.Selector, _ error) { id := m.ID step := sqlgraph.NewStep( sqlgraph.From(meta.Table, meta.FieldID, id), @@ -922,3 +1286,33 @@ func (c *MetaClient) QueryOwner(m *Meta) *AlertQuery { func (c *MetaClient) Hooks() []Hook { return c.hooks.Meta } + +// Interceptors returns the client interceptors. +func (c *MetaClient) Interceptors() []Interceptor { + return c.inters.Meta +} + +func (c *MetaClient) mutate(ctx context.Context, m *MetaMutation) (Value, error) { + switch m.Op() { + case OpCreate: + return (&MetaCreate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpUpdate: + return (&MetaUpdate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpUpdateOne: + return (&MetaUpdateOne{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpDelete, OpDeleteOne: + return (&MetaDelete{config: c.config, hooks: c.Hooks(), mutation: m}).Exec(ctx) + default: + return nil, fmt.Errorf("ent: unknown Meta mutation op: %q", m.Op()) + } +} + +// hooks and interceptors per client, for fast access. +type ( + hooks struct { + Alert, Bouncer, ConfigItem, Decision, Event, Machine, Meta []ent.Hook + } + inters struct { + Alert, Bouncer, ConfigItem, Decision, Event, Machine, Meta []ent.Interceptor + } +) diff --git a/pkg/database/ent/config.go b/pkg/database/ent/config.go deleted file mode 100644 index 1a152809a32..00000000000 --- a/pkg/database/ent/config.go +++ /dev/null @@ -1,65 +0,0 @@ -// Code generated by ent, DO NOT EDIT. - -package ent - -import ( - "entgo.io/ent" - "entgo.io/ent/dialect" -) - -// Option function to configure the client. -type Option func(*config) - -// Config is the configuration for the client and its builder. -type config struct { - // driver used for executing database requests. - driver dialect.Driver - // debug enable a debug logging. - debug bool - // log used for logging on debug mode. - log func(...any) - // hooks to execute on mutations. - hooks *hooks -} - -// hooks per client, for fast access. -type hooks struct { - Alert []ent.Hook - Bouncer []ent.Hook - ConfigItem []ent.Hook - Decision []ent.Hook - Event []ent.Hook - Machine []ent.Hook - Meta []ent.Hook -} - -// Options applies the options on the config object. -func (c *config) options(opts ...Option) { - for _, opt := range opts { - opt(c) - } - if c.debug { - c.driver = dialect.Debug(c.driver, c.log) - } -} - -// Debug enables debug logging on the ent.Driver. -func Debug() Option { - return func(c *config) { - c.debug = true - } -} - -// Log sets the logging function for debug mode. -func Log(fn func(...any)) Option { - return func(c *config) { - c.log = fn - } -} - -// Driver configures the client driver. -func Driver(driver dialect.Driver) Option { - return func(c *config) { - c.driver = driver - } -} diff --git a/pkg/database/ent/configitem.go b/pkg/database/ent/configitem.go index 615780dbacc..467e54386f6 100644 --- a/pkg/database/ent/configitem.go +++ b/pkg/database/ent/configitem.go @@ -7,6 +7,7 @@ import ( "strings" "time" + "entgo.io/ent" "entgo.io/ent/dialect/sql" "github.com/crowdsecurity/crowdsec/pkg/database/ent/configitem" ) @@ -23,7 +24,8 @@ type ConfigItem struct { // Name holds the value of the "name" field. Name string `json:"name"` // Value holds the value of the "value" field. - Value string `json:"value"` + Value string `json:"value"` + selectValues sql.SelectValues } // scanValues returns the types for scanning values from sql.Rows. @@ -38,7 +40,7 @@ func (*ConfigItem) scanValues(columns []string) ([]any, error) { case configitem.FieldCreatedAt, configitem.FieldUpdatedAt: values[i] = new(sql.NullTime) default: - return nil, fmt.Errorf("unexpected column %q for type ConfigItem", columns[i]) + values[i] = new(sql.UnknownType) } } return values, nil @@ -84,16 +86,24 @@ func (ci *ConfigItem) assignValues(columns []string, values []any) error { } else if value.Valid { ci.Value = value.String } + default: + ci.selectValues.Set(columns[i], values[i]) } } return nil } +// GetValue returns the ent.Value that was dynamically selected and assigned to the ConfigItem. +// This includes values selected through modifiers, order, etc. +func (ci *ConfigItem) GetValue(name string) (ent.Value, error) { + return ci.selectValues.Get(name) +} + // Update returns a builder for updating this ConfigItem. // Note that you need to call ConfigItem.Unwrap() before calling this method if this ConfigItem // was returned from a transaction, and the transaction was committed or rolled back. func (ci *ConfigItem) Update() *ConfigItemUpdateOne { - return (&ConfigItemClient{config: ci.config}).UpdateOne(ci) + return NewConfigItemClient(ci.config).UpdateOne(ci) } // Unwrap unwraps the ConfigItem entity that was returned from a transaction after it was closed, @@ -133,9 +143,3 @@ func (ci *ConfigItem) String() string { // ConfigItems is a parsable slice of ConfigItem. type ConfigItems []*ConfigItem - -func (ci ConfigItems) config(cfg config) { - for _i := range ci { - ci[_i].config = cfg - } -} diff --git a/pkg/database/ent/configitem/configitem.go b/pkg/database/ent/configitem/configitem.go index 80e93e4cc7e..a6ff6c32d57 100644 --- a/pkg/database/ent/configitem/configitem.go +++ b/pkg/database/ent/configitem/configitem.go @@ -4,6 +4,8 @@ package configitem import ( "time" + + "entgo.io/ent/dialect/sql" ) const ( @@ -52,3 +54,31 @@ var ( // UpdateDefaultUpdatedAt holds the default value on update for the "updated_at" field. UpdateDefaultUpdatedAt func() time.Time ) + +// OrderOption defines the ordering options for the ConfigItem queries. +type OrderOption func(*sql.Selector) + +// ByID orders the results by the id field. +func ByID(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldID, opts...).ToFunc() +} + +// ByCreatedAt orders the results by the created_at field. +func ByCreatedAt(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldCreatedAt, opts...).ToFunc() +} + +// ByUpdatedAt orders the results by the updated_at field. +func ByUpdatedAt(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldUpdatedAt, opts...).ToFunc() +} + +// ByName orders the results by the name field. +func ByName(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldName, opts...).ToFunc() +} + +// ByValue orders the results by the value field. +func ByValue(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldValue, opts...).ToFunc() +} diff --git a/pkg/database/ent/configitem/where.go b/pkg/database/ent/configitem/where.go index 6d06938a855..767f0b420f1 100644 --- a/pkg/database/ent/configitem/where.go +++ b/pkg/database/ent/configitem/where.go @@ -11,485 +11,310 @@ import ( // ID filters vertices based on their ID field. func ID(id int) predicate.ConfigItem { - return predicate.ConfigItem(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldID), id)) - }) + return predicate.ConfigItem(sql.FieldEQ(FieldID, id)) } // IDEQ applies the EQ predicate on the ID field. func IDEQ(id int) predicate.ConfigItem { - return predicate.ConfigItem(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldID), id)) - }) + return predicate.ConfigItem(sql.FieldEQ(FieldID, id)) } // IDNEQ applies the NEQ predicate on the ID field. func IDNEQ(id int) predicate.ConfigItem { - return predicate.ConfigItem(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldID), id)) - }) + return predicate.ConfigItem(sql.FieldNEQ(FieldID, id)) } // IDIn applies the In predicate on the ID field. func IDIn(ids ...int) predicate.ConfigItem { - return predicate.ConfigItem(func(s *sql.Selector) { - v := make([]any, len(ids)) - for i := range v { - v[i] = ids[i] - } - s.Where(sql.In(s.C(FieldID), v...)) - }) + return predicate.ConfigItem(sql.FieldIn(FieldID, ids...)) } // IDNotIn applies the NotIn predicate on the ID field. func IDNotIn(ids ...int) predicate.ConfigItem { - return predicate.ConfigItem(func(s *sql.Selector) { - v := make([]any, len(ids)) - for i := range v { - v[i] = ids[i] - } - s.Where(sql.NotIn(s.C(FieldID), v...)) - }) + return predicate.ConfigItem(sql.FieldNotIn(FieldID, ids...)) } // IDGT applies the GT predicate on the ID field. func IDGT(id int) predicate.ConfigItem { - return predicate.ConfigItem(func(s *sql.Selector) { - s.Where(sql.GT(s.C(FieldID), id)) - }) + return predicate.ConfigItem(sql.FieldGT(FieldID, id)) } // IDGTE applies the GTE predicate on the ID field. func IDGTE(id int) predicate.ConfigItem { - return predicate.ConfigItem(func(s *sql.Selector) { - s.Where(sql.GTE(s.C(FieldID), id)) - }) + return predicate.ConfigItem(sql.FieldGTE(FieldID, id)) } // IDLT applies the LT predicate on the ID field. func IDLT(id int) predicate.ConfigItem { - return predicate.ConfigItem(func(s *sql.Selector) { - s.Where(sql.LT(s.C(FieldID), id)) - }) + return predicate.ConfigItem(sql.FieldLT(FieldID, id)) } // IDLTE applies the LTE predicate on the ID field. func IDLTE(id int) predicate.ConfigItem { - return predicate.ConfigItem(func(s *sql.Selector) { - s.Where(sql.LTE(s.C(FieldID), id)) - }) + return predicate.ConfigItem(sql.FieldLTE(FieldID, id)) } // CreatedAt applies equality check predicate on the "created_at" field. It's identical to CreatedAtEQ. func CreatedAt(v time.Time) predicate.ConfigItem { - return predicate.ConfigItem(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldCreatedAt), v)) - }) + return predicate.ConfigItem(sql.FieldEQ(FieldCreatedAt, v)) } // UpdatedAt applies equality check predicate on the "updated_at" field. It's identical to UpdatedAtEQ. func UpdatedAt(v time.Time) predicate.ConfigItem { - return predicate.ConfigItem(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldUpdatedAt), v)) - }) + return predicate.ConfigItem(sql.FieldEQ(FieldUpdatedAt, v)) } // Name applies equality check predicate on the "name" field. It's identical to NameEQ. func Name(v string) predicate.ConfigItem { - return predicate.ConfigItem(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldName), v)) - }) + return predicate.ConfigItem(sql.FieldEQ(FieldName, v)) } // Value applies equality check predicate on the "value" field. It's identical to ValueEQ. func Value(v string) predicate.ConfigItem { - return predicate.ConfigItem(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldValue), v)) - }) + return predicate.ConfigItem(sql.FieldEQ(FieldValue, v)) } // CreatedAtEQ applies the EQ predicate on the "created_at" field. func CreatedAtEQ(v time.Time) predicate.ConfigItem { - return predicate.ConfigItem(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldCreatedAt), v)) - }) + return predicate.ConfigItem(sql.FieldEQ(FieldCreatedAt, v)) } // CreatedAtNEQ applies the NEQ predicate on the "created_at" field. func CreatedAtNEQ(v time.Time) predicate.ConfigItem { - return predicate.ConfigItem(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldCreatedAt), v)) - }) + return predicate.ConfigItem(sql.FieldNEQ(FieldCreatedAt, v)) } // CreatedAtIn applies the In predicate on the "created_at" field. func CreatedAtIn(vs ...time.Time) predicate.ConfigItem { - v := make([]any, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.ConfigItem(func(s *sql.Selector) { - s.Where(sql.In(s.C(FieldCreatedAt), v...)) - }) + return predicate.ConfigItem(sql.FieldIn(FieldCreatedAt, vs...)) } // CreatedAtNotIn applies the NotIn predicate on the "created_at" field. func CreatedAtNotIn(vs ...time.Time) predicate.ConfigItem { - v := make([]any, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.ConfigItem(func(s *sql.Selector) { - s.Where(sql.NotIn(s.C(FieldCreatedAt), v...)) - }) + return predicate.ConfigItem(sql.FieldNotIn(FieldCreatedAt, vs...)) } // CreatedAtGT applies the GT predicate on the "created_at" field. func CreatedAtGT(v time.Time) predicate.ConfigItem { - return predicate.ConfigItem(func(s *sql.Selector) { - s.Where(sql.GT(s.C(FieldCreatedAt), v)) - }) + return predicate.ConfigItem(sql.FieldGT(FieldCreatedAt, v)) } // CreatedAtGTE applies the GTE predicate on the "created_at" field. func CreatedAtGTE(v time.Time) predicate.ConfigItem { - return predicate.ConfigItem(func(s *sql.Selector) { - s.Where(sql.GTE(s.C(FieldCreatedAt), v)) - }) + return predicate.ConfigItem(sql.FieldGTE(FieldCreatedAt, v)) } // CreatedAtLT applies the LT predicate on the "created_at" field. func CreatedAtLT(v time.Time) predicate.ConfigItem { - return predicate.ConfigItem(func(s *sql.Selector) { - s.Where(sql.LT(s.C(FieldCreatedAt), v)) - }) + return predicate.ConfigItem(sql.FieldLT(FieldCreatedAt, v)) } // CreatedAtLTE applies the LTE predicate on the "created_at" field. func CreatedAtLTE(v time.Time) predicate.ConfigItem { - return predicate.ConfigItem(func(s *sql.Selector) { - s.Where(sql.LTE(s.C(FieldCreatedAt), v)) - }) + return predicate.ConfigItem(sql.FieldLTE(FieldCreatedAt, v)) } // CreatedAtIsNil applies the IsNil predicate on the "created_at" field. func CreatedAtIsNil() predicate.ConfigItem { - return predicate.ConfigItem(func(s *sql.Selector) { - s.Where(sql.IsNull(s.C(FieldCreatedAt))) - }) + return predicate.ConfigItem(sql.FieldIsNull(FieldCreatedAt)) } // CreatedAtNotNil applies the NotNil predicate on the "created_at" field. func CreatedAtNotNil() predicate.ConfigItem { - return predicate.ConfigItem(func(s *sql.Selector) { - s.Where(sql.NotNull(s.C(FieldCreatedAt))) - }) + return predicate.ConfigItem(sql.FieldNotNull(FieldCreatedAt)) } // UpdatedAtEQ applies the EQ predicate on the "updated_at" field. func UpdatedAtEQ(v time.Time) predicate.ConfigItem { - return predicate.ConfigItem(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldUpdatedAt), v)) - }) + return predicate.ConfigItem(sql.FieldEQ(FieldUpdatedAt, v)) } // UpdatedAtNEQ applies the NEQ predicate on the "updated_at" field. func UpdatedAtNEQ(v time.Time) predicate.ConfigItem { - return predicate.ConfigItem(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldUpdatedAt), v)) - }) + return predicate.ConfigItem(sql.FieldNEQ(FieldUpdatedAt, v)) } // UpdatedAtIn applies the In predicate on the "updated_at" field. func UpdatedAtIn(vs ...time.Time) predicate.ConfigItem { - v := make([]any, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.ConfigItem(func(s *sql.Selector) { - s.Where(sql.In(s.C(FieldUpdatedAt), v...)) - }) + return predicate.ConfigItem(sql.FieldIn(FieldUpdatedAt, vs...)) } // UpdatedAtNotIn applies the NotIn predicate on the "updated_at" field. func UpdatedAtNotIn(vs ...time.Time) predicate.ConfigItem { - v := make([]any, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.ConfigItem(func(s *sql.Selector) { - s.Where(sql.NotIn(s.C(FieldUpdatedAt), v...)) - }) + return predicate.ConfigItem(sql.FieldNotIn(FieldUpdatedAt, vs...)) } // UpdatedAtGT applies the GT predicate on the "updated_at" field. func UpdatedAtGT(v time.Time) predicate.ConfigItem { - return predicate.ConfigItem(func(s *sql.Selector) { - s.Where(sql.GT(s.C(FieldUpdatedAt), v)) - }) + return predicate.ConfigItem(sql.FieldGT(FieldUpdatedAt, v)) } // UpdatedAtGTE applies the GTE predicate on the "updated_at" field. func UpdatedAtGTE(v time.Time) predicate.ConfigItem { - return predicate.ConfigItem(func(s *sql.Selector) { - s.Where(sql.GTE(s.C(FieldUpdatedAt), v)) - }) + return predicate.ConfigItem(sql.FieldGTE(FieldUpdatedAt, v)) } // UpdatedAtLT applies the LT predicate on the "updated_at" field. func UpdatedAtLT(v time.Time) predicate.ConfigItem { - return predicate.ConfigItem(func(s *sql.Selector) { - s.Where(sql.LT(s.C(FieldUpdatedAt), v)) - }) + return predicate.ConfigItem(sql.FieldLT(FieldUpdatedAt, v)) } // UpdatedAtLTE applies the LTE predicate on the "updated_at" field. func UpdatedAtLTE(v time.Time) predicate.ConfigItem { - return predicate.ConfigItem(func(s *sql.Selector) { - s.Where(sql.LTE(s.C(FieldUpdatedAt), v)) - }) + return predicate.ConfigItem(sql.FieldLTE(FieldUpdatedAt, v)) } // UpdatedAtIsNil applies the IsNil predicate on the "updated_at" field. func UpdatedAtIsNil() predicate.ConfigItem { - return predicate.ConfigItem(func(s *sql.Selector) { - s.Where(sql.IsNull(s.C(FieldUpdatedAt))) - }) + return predicate.ConfigItem(sql.FieldIsNull(FieldUpdatedAt)) } // UpdatedAtNotNil applies the NotNil predicate on the "updated_at" field. func UpdatedAtNotNil() predicate.ConfigItem { - return predicate.ConfigItem(func(s *sql.Selector) { - s.Where(sql.NotNull(s.C(FieldUpdatedAt))) - }) + return predicate.ConfigItem(sql.FieldNotNull(FieldUpdatedAt)) } // NameEQ applies the EQ predicate on the "name" field. func NameEQ(v string) predicate.ConfigItem { - return predicate.ConfigItem(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldName), v)) - }) + return predicate.ConfigItem(sql.FieldEQ(FieldName, v)) } // NameNEQ applies the NEQ predicate on the "name" field. func NameNEQ(v string) predicate.ConfigItem { - return predicate.ConfigItem(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldName), v)) - }) + return predicate.ConfigItem(sql.FieldNEQ(FieldName, v)) } // NameIn applies the In predicate on the "name" field. func NameIn(vs ...string) predicate.ConfigItem { - v := make([]any, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.ConfigItem(func(s *sql.Selector) { - s.Where(sql.In(s.C(FieldName), v...)) - }) + return predicate.ConfigItem(sql.FieldIn(FieldName, vs...)) } // NameNotIn applies the NotIn predicate on the "name" field. func NameNotIn(vs ...string) predicate.ConfigItem { - v := make([]any, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.ConfigItem(func(s *sql.Selector) { - s.Where(sql.NotIn(s.C(FieldName), v...)) - }) + return predicate.ConfigItem(sql.FieldNotIn(FieldName, vs...)) } // NameGT applies the GT predicate on the "name" field. func NameGT(v string) predicate.ConfigItem { - return predicate.ConfigItem(func(s *sql.Selector) { - s.Where(sql.GT(s.C(FieldName), v)) - }) + return predicate.ConfigItem(sql.FieldGT(FieldName, v)) } // NameGTE applies the GTE predicate on the "name" field. func NameGTE(v string) predicate.ConfigItem { - return predicate.ConfigItem(func(s *sql.Selector) { - s.Where(sql.GTE(s.C(FieldName), v)) - }) + return predicate.ConfigItem(sql.FieldGTE(FieldName, v)) } // NameLT applies the LT predicate on the "name" field. func NameLT(v string) predicate.ConfigItem { - return predicate.ConfigItem(func(s *sql.Selector) { - s.Where(sql.LT(s.C(FieldName), v)) - }) + return predicate.ConfigItem(sql.FieldLT(FieldName, v)) } // NameLTE applies the LTE predicate on the "name" field. func NameLTE(v string) predicate.ConfigItem { - return predicate.ConfigItem(func(s *sql.Selector) { - s.Where(sql.LTE(s.C(FieldName), v)) - }) + return predicate.ConfigItem(sql.FieldLTE(FieldName, v)) } // NameContains applies the Contains predicate on the "name" field. func NameContains(v string) predicate.ConfigItem { - return predicate.ConfigItem(func(s *sql.Selector) { - s.Where(sql.Contains(s.C(FieldName), v)) - }) + return predicate.ConfigItem(sql.FieldContains(FieldName, v)) } // NameHasPrefix applies the HasPrefix predicate on the "name" field. func NameHasPrefix(v string) predicate.ConfigItem { - return predicate.ConfigItem(func(s *sql.Selector) { - s.Where(sql.HasPrefix(s.C(FieldName), v)) - }) + return predicate.ConfigItem(sql.FieldHasPrefix(FieldName, v)) } // NameHasSuffix applies the HasSuffix predicate on the "name" field. func NameHasSuffix(v string) predicate.ConfigItem { - return predicate.ConfigItem(func(s *sql.Selector) { - s.Where(sql.HasSuffix(s.C(FieldName), v)) - }) + return predicate.ConfigItem(sql.FieldHasSuffix(FieldName, v)) } // NameEqualFold applies the EqualFold predicate on the "name" field. func NameEqualFold(v string) predicate.ConfigItem { - return predicate.ConfigItem(func(s *sql.Selector) { - s.Where(sql.EqualFold(s.C(FieldName), v)) - }) + return predicate.ConfigItem(sql.FieldEqualFold(FieldName, v)) } // NameContainsFold applies the ContainsFold predicate on the "name" field. func NameContainsFold(v string) predicate.ConfigItem { - return predicate.ConfigItem(func(s *sql.Selector) { - s.Where(sql.ContainsFold(s.C(FieldName), v)) - }) + return predicate.ConfigItem(sql.FieldContainsFold(FieldName, v)) } // ValueEQ applies the EQ predicate on the "value" field. func ValueEQ(v string) predicate.ConfigItem { - return predicate.ConfigItem(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldValue), v)) - }) + return predicate.ConfigItem(sql.FieldEQ(FieldValue, v)) } // ValueNEQ applies the NEQ predicate on the "value" field. func ValueNEQ(v string) predicate.ConfigItem { - return predicate.ConfigItem(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldValue), v)) - }) + return predicate.ConfigItem(sql.FieldNEQ(FieldValue, v)) } // ValueIn applies the In predicate on the "value" field. func ValueIn(vs ...string) predicate.ConfigItem { - v := make([]any, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.ConfigItem(func(s *sql.Selector) { - s.Where(sql.In(s.C(FieldValue), v...)) - }) + return predicate.ConfigItem(sql.FieldIn(FieldValue, vs...)) } // ValueNotIn applies the NotIn predicate on the "value" field. func ValueNotIn(vs ...string) predicate.ConfigItem { - v := make([]any, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.ConfigItem(func(s *sql.Selector) { - s.Where(sql.NotIn(s.C(FieldValue), v...)) - }) + return predicate.ConfigItem(sql.FieldNotIn(FieldValue, vs...)) } // ValueGT applies the GT predicate on the "value" field. func ValueGT(v string) predicate.ConfigItem { - return predicate.ConfigItem(func(s *sql.Selector) { - s.Where(sql.GT(s.C(FieldValue), v)) - }) + return predicate.ConfigItem(sql.FieldGT(FieldValue, v)) } // ValueGTE applies the GTE predicate on the "value" field. func ValueGTE(v string) predicate.ConfigItem { - return predicate.ConfigItem(func(s *sql.Selector) { - s.Where(sql.GTE(s.C(FieldValue), v)) - }) + return predicate.ConfigItem(sql.FieldGTE(FieldValue, v)) } // ValueLT applies the LT predicate on the "value" field. func ValueLT(v string) predicate.ConfigItem { - return predicate.ConfigItem(func(s *sql.Selector) { - s.Where(sql.LT(s.C(FieldValue), v)) - }) + return predicate.ConfigItem(sql.FieldLT(FieldValue, v)) } // ValueLTE applies the LTE predicate on the "value" field. func ValueLTE(v string) predicate.ConfigItem { - return predicate.ConfigItem(func(s *sql.Selector) { - s.Where(sql.LTE(s.C(FieldValue), v)) - }) + return predicate.ConfigItem(sql.FieldLTE(FieldValue, v)) } // ValueContains applies the Contains predicate on the "value" field. func ValueContains(v string) predicate.ConfigItem { - return predicate.ConfigItem(func(s *sql.Selector) { - s.Where(sql.Contains(s.C(FieldValue), v)) - }) + return predicate.ConfigItem(sql.FieldContains(FieldValue, v)) } // ValueHasPrefix applies the HasPrefix predicate on the "value" field. func ValueHasPrefix(v string) predicate.ConfigItem { - return predicate.ConfigItem(func(s *sql.Selector) { - s.Where(sql.HasPrefix(s.C(FieldValue), v)) - }) + return predicate.ConfigItem(sql.FieldHasPrefix(FieldValue, v)) } // ValueHasSuffix applies the HasSuffix predicate on the "value" field. func ValueHasSuffix(v string) predicate.ConfigItem { - return predicate.ConfigItem(func(s *sql.Selector) { - s.Where(sql.HasSuffix(s.C(FieldValue), v)) - }) + return predicate.ConfigItem(sql.FieldHasSuffix(FieldValue, v)) } // ValueEqualFold applies the EqualFold predicate on the "value" field. func ValueEqualFold(v string) predicate.ConfigItem { - return predicate.ConfigItem(func(s *sql.Selector) { - s.Where(sql.EqualFold(s.C(FieldValue), v)) - }) + return predicate.ConfigItem(sql.FieldEqualFold(FieldValue, v)) } // ValueContainsFold applies the ContainsFold predicate on the "value" field. func ValueContainsFold(v string) predicate.ConfigItem { - return predicate.ConfigItem(func(s *sql.Selector) { - s.Where(sql.ContainsFold(s.C(FieldValue), v)) - }) + return predicate.ConfigItem(sql.FieldContainsFold(FieldValue, v)) } // And groups predicates with the AND operator between them. func And(predicates ...predicate.ConfigItem) predicate.ConfigItem { - return predicate.ConfigItem(func(s *sql.Selector) { - s1 := s.Clone().SetP(nil) - for _, p := range predicates { - p(s1) - } - s.Where(s1.P()) - }) + return predicate.ConfigItem(sql.AndPredicates(predicates...)) } // Or groups predicates with the OR operator between them. func Or(predicates ...predicate.ConfigItem) predicate.ConfigItem { - return predicate.ConfigItem(func(s *sql.Selector) { - s1 := s.Clone().SetP(nil) - for i, p := range predicates { - if i > 0 { - s1.Or() - } - p(s1) - } - s.Where(s1.P()) - }) + return predicate.ConfigItem(sql.OrPredicates(predicates...)) } // Not applies the not operator on the given predicate. func Not(p predicate.ConfigItem) predicate.ConfigItem { - return predicate.ConfigItem(func(s *sql.Selector) { - p(s.Not()) - }) + return predicate.ConfigItem(sql.NotPredicates(p)) } diff --git a/pkg/database/ent/configitem_create.go b/pkg/database/ent/configitem_create.go index 736e6a50514..19e73dea41c 100644 --- a/pkg/database/ent/configitem_create.go +++ b/pkg/database/ent/configitem_create.go @@ -67,50 +67,8 @@ func (cic *ConfigItemCreate) Mutation() *ConfigItemMutation { // Save creates the ConfigItem in the database. func (cic *ConfigItemCreate) Save(ctx context.Context) (*ConfigItem, error) { - var ( - err error - node *ConfigItem - ) cic.defaults() - if len(cic.hooks) == 0 { - if err = cic.check(); err != nil { - return nil, err - } - node, err = cic.sqlSave(ctx) - } else { - var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { - mutation, ok := m.(*ConfigItemMutation) - if !ok { - return nil, fmt.Errorf("unexpected mutation type %T", m) - } - if err = cic.check(); err != nil { - return nil, err - } - cic.mutation = mutation - if node, err = cic.sqlSave(ctx); err != nil { - return nil, err - } - mutation.id = &node.ID - mutation.done = true - return node, err - }) - for i := len(cic.hooks) - 1; i >= 0; i-- { - if cic.hooks[i] == nil { - return nil, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)") - } - mut = cic.hooks[i](mut) - } - v, err := mut.Mutate(ctx, cic.mutation) - if err != nil { - return nil, err - } - nv, ok := v.(*ConfigItem) - if !ok { - return nil, fmt.Errorf("unexpected node type %T returned from ConfigItemMutation", v) - } - node = nv - } - return node, err + return withHooks(ctx, cic.sqlSave, cic.mutation, cic.hooks) } // SaveX calls Save and panics if Save returns an error. @@ -159,6 +117,9 @@ func (cic *ConfigItemCreate) check() error { } func (cic *ConfigItemCreate) sqlSave(ctx context.Context) (*ConfigItem, error) { + if err := cic.check(); err != nil { + return nil, err + } _node, _spec := cic.createSpec() if err := sqlgraph.CreateNode(ctx, cic.driver, _spec); err != nil { if sqlgraph.IsConstraintError(err) { @@ -168,50 +129,30 @@ func (cic *ConfigItemCreate) sqlSave(ctx context.Context) (*ConfigItem, error) { } id := _spec.ID.Value.(int64) _node.ID = int(id) + cic.mutation.id = &_node.ID + cic.mutation.done = true return _node, nil } func (cic *ConfigItemCreate) createSpec() (*ConfigItem, *sqlgraph.CreateSpec) { var ( _node = &ConfigItem{config: cic.config} - _spec = &sqlgraph.CreateSpec{ - Table: configitem.Table, - ID: &sqlgraph.FieldSpec{ - Type: field.TypeInt, - Column: configitem.FieldID, - }, - } + _spec = sqlgraph.NewCreateSpec(configitem.Table, sqlgraph.NewFieldSpec(configitem.FieldID, field.TypeInt)) ) if value, ok := cic.mutation.CreatedAt(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeTime, - Value: value, - Column: configitem.FieldCreatedAt, - }) + _spec.SetField(configitem.FieldCreatedAt, field.TypeTime, value) _node.CreatedAt = &value } if value, ok := cic.mutation.UpdatedAt(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeTime, - Value: value, - Column: configitem.FieldUpdatedAt, - }) + _spec.SetField(configitem.FieldUpdatedAt, field.TypeTime, value) _node.UpdatedAt = &value } if value, ok := cic.mutation.Name(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: configitem.FieldName, - }) + _spec.SetField(configitem.FieldName, field.TypeString, value) _node.Name = value } if value, ok := cic.mutation.Value(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: configitem.FieldValue, - }) + _spec.SetField(configitem.FieldValue, field.TypeString, value) _node.Value = value } return _node, _spec @@ -220,11 +161,15 @@ func (cic *ConfigItemCreate) createSpec() (*ConfigItem, *sqlgraph.CreateSpec) { // ConfigItemCreateBulk is the builder for creating many ConfigItem entities in bulk. type ConfigItemCreateBulk struct { config + err error builders []*ConfigItemCreate } // Save creates the ConfigItem entities in the database. func (cicb *ConfigItemCreateBulk) Save(ctx context.Context) ([]*ConfigItem, error) { + if cicb.err != nil { + return nil, cicb.err + } specs := make([]*sqlgraph.CreateSpec, len(cicb.builders)) nodes := make([]*ConfigItem, len(cicb.builders)) mutators := make([]Mutator, len(cicb.builders)) @@ -241,8 +186,8 @@ func (cicb *ConfigItemCreateBulk) Save(ctx context.Context) ([]*ConfigItem, erro return nil, err } builder.mutation = mutation - nodes[i], specs[i] = builder.createSpec() var err error + nodes[i], specs[i] = builder.createSpec() if i < len(mutators)-1 { _, err = mutators[i+1].Mutate(root, cicb.builders[i+1].mutation) } else { diff --git a/pkg/database/ent/configitem_delete.go b/pkg/database/ent/configitem_delete.go index 223fa9eefbf..a5dc811f60d 100644 --- a/pkg/database/ent/configitem_delete.go +++ b/pkg/database/ent/configitem_delete.go @@ -4,7 +4,6 @@ package ent import ( "context" - "fmt" "entgo.io/ent/dialect/sql" "entgo.io/ent/dialect/sql/sqlgraph" @@ -28,34 +27,7 @@ func (cid *ConfigItemDelete) Where(ps ...predicate.ConfigItem) *ConfigItemDelete // Exec executes the deletion query and returns how many vertices were deleted. func (cid *ConfigItemDelete) Exec(ctx context.Context) (int, error) { - var ( - err error - affected int - ) - if len(cid.hooks) == 0 { - affected, err = cid.sqlExec(ctx) - } else { - var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { - mutation, ok := m.(*ConfigItemMutation) - if !ok { - return nil, fmt.Errorf("unexpected mutation type %T", m) - } - cid.mutation = mutation - affected, err = cid.sqlExec(ctx) - mutation.done = true - return affected, err - }) - for i := len(cid.hooks) - 1; i >= 0; i-- { - if cid.hooks[i] == nil { - return 0, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)") - } - mut = cid.hooks[i](mut) - } - if _, err := mut.Mutate(ctx, cid.mutation); err != nil { - return 0, err - } - } - return affected, err + return withHooks(ctx, cid.sqlExec, cid.mutation, cid.hooks) } // ExecX is like Exec, but panics if an error occurs. @@ -68,15 +40,7 @@ func (cid *ConfigItemDelete) ExecX(ctx context.Context) int { } func (cid *ConfigItemDelete) sqlExec(ctx context.Context) (int, error) { - _spec := &sqlgraph.DeleteSpec{ - Node: &sqlgraph.NodeSpec{ - Table: configitem.Table, - ID: &sqlgraph.FieldSpec{ - Type: field.TypeInt, - Column: configitem.FieldID, - }, - }, - } + _spec := sqlgraph.NewDeleteSpec(configitem.Table, sqlgraph.NewFieldSpec(configitem.FieldID, field.TypeInt)) if ps := cid.mutation.predicates; len(ps) > 0 { _spec.Predicate = func(selector *sql.Selector) { for i := range ps { @@ -88,6 +52,7 @@ func (cid *ConfigItemDelete) sqlExec(ctx context.Context) (int, error) { if err != nil && sqlgraph.IsConstraintError(err) { err = &ConstraintError{msg: err.Error(), wrap: err} } + cid.mutation.done = true return affected, err } @@ -96,6 +61,12 @@ type ConfigItemDeleteOne struct { cid *ConfigItemDelete } +// Where appends a list predicates to the ConfigItemDelete builder. +func (cido *ConfigItemDeleteOne) Where(ps ...predicate.ConfigItem) *ConfigItemDeleteOne { + cido.cid.mutation.Where(ps...) + return cido +} + // Exec executes the deletion query. func (cido *ConfigItemDeleteOne) Exec(ctx context.Context) error { n, err := cido.cid.Exec(ctx) @@ -111,5 +82,7 @@ func (cido *ConfigItemDeleteOne) Exec(ctx context.Context) error { // ExecX is like Exec, but panics if an error occurs. func (cido *ConfigItemDeleteOne) ExecX(ctx context.Context) { - cido.cid.ExecX(ctx) + if err := cido.Exec(ctx); err != nil { + panic(err) + } } diff --git a/pkg/database/ent/configitem_query.go b/pkg/database/ent/configitem_query.go index 6c9e6732a9b..f68b8953ddb 100644 --- a/pkg/database/ent/configitem_query.go +++ b/pkg/database/ent/configitem_query.go @@ -17,11 +17,9 @@ import ( // ConfigItemQuery is the builder for querying ConfigItem entities. type ConfigItemQuery struct { config - limit *int - offset *int - unique *bool - order []OrderFunc - fields []string + ctx *QueryContext + order []configitem.OrderOption + inters []Interceptor predicates []predicate.ConfigItem // intermediate query (i.e. traversal path). sql *sql.Selector @@ -34,27 +32,27 @@ func (ciq *ConfigItemQuery) Where(ps ...predicate.ConfigItem) *ConfigItemQuery { return ciq } -// Limit adds a limit step to the query. +// Limit the number of records to be returned by this query. func (ciq *ConfigItemQuery) Limit(limit int) *ConfigItemQuery { - ciq.limit = &limit + ciq.ctx.Limit = &limit return ciq } -// Offset adds an offset step to the query. +// Offset to start from. func (ciq *ConfigItemQuery) Offset(offset int) *ConfigItemQuery { - ciq.offset = &offset + ciq.ctx.Offset = &offset return ciq } // Unique configures the query builder to filter duplicate records on query. // By default, unique is set to true, and can be disabled using this method. func (ciq *ConfigItemQuery) Unique(unique bool) *ConfigItemQuery { - ciq.unique = &unique + ciq.ctx.Unique = &unique return ciq } -// Order adds an order step to the query. -func (ciq *ConfigItemQuery) Order(o ...OrderFunc) *ConfigItemQuery { +// Order specifies how the records should be ordered. +func (ciq *ConfigItemQuery) Order(o ...configitem.OrderOption) *ConfigItemQuery { ciq.order = append(ciq.order, o...) return ciq } @@ -62,7 +60,7 @@ func (ciq *ConfigItemQuery) Order(o ...OrderFunc) *ConfigItemQuery { // First returns the first ConfigItem entity from the query. // Returns a *NotFoundError when no ConfigItem was found. func (ciq *ConfigItemQuery) First(ctx context.Context) (*ConfigItem, error) { - nodes, err := ciq.Limit(1).All(ctx) + nodes, err := ciq.Limit(1).All(setContextOp(ctx, ciq.ctx, "First")) if err != nil { return nil, err } @@ -85,7 +83,7 @@ func (ciq *ConfigItemQuery) FirstX(ctx context.Context) *ConfigItem { // Returns a *NotFoundError when no ConfigItem ID was found. func (ciq *ConfigItemQuery) FirstID(ctx context.Context) (id int, err error) { var ids []int - if ids, err = ciq.Limit(1).IDs(ctx); err != nil { + if ids, err = ciq.Limit(1).IDs(setContextOp(ctx, ciq.ctx, "FirstID")); err != nil { return } if len(ids) == 0 { @@ -108,7 +106,7 @@ func (ciq *ConfigItemQuery) FirstIDX(ctx context.Context) int { // Returns a *NotSingularError when more than one ConfigItem entity is found. // Returns a *NotFoundError when no ConfigItem entities are found. func (ciq *ConfigItemQuery) Only(ctx context.Context) (*ConfigItem, error) { - nodes, err := ciq.Limit(2).All(ctx) + nodes, err := ciq.Limit(2).All(setContextOp(ctx, ciq.ctx, "Only")) if err != nil { return nil, err } @@ -136,7 +134,7 @@ func (ciq *ConfigItemQuery) OnlyX(ctx context.Context) *ConfigItem { // Returns a *NotFoundError when no entities are found. func (ciq *ConfigItemQuery) OnlyID(ctx context.Context) (id int, err error) { var ids []int - if ids, err = ciq.Limit(2).IDs(ctx); err != nil { + if ids, err = ciq.Limit(2).IDs(setContextOp(ctx, ciq.ctx, "OnlyID")); err != nil { return } switch len(ids) { @@ -161,10 +159,12 @@ func (ciq *ConfigItemQuery) OnlyIDX(ctx context.Context) int { // All executes the query and returns a list of ConfigItems. func (ciq *ConfigItemQuery) All(ctx context.Context) ([]*ConfigItem, error) { + ctx = setContextOp(ctx, ciq.ctx, "All") if err := ciq.prepareQuery(ctx); err != nil { return nil, err } - return ciq.sqlAll(ctx) + qr := querierAll[[]*ConfigItem, *ConfigItemQuery]() + return withInterceptors[[]*ConfigItem](ctx, ciq, qr, ciq.inters) } // AllX is like All, but panics if an error occurs. @@ -177,9 +177,12 @@ func (ciq *ConfigItemQuery) AllX(ctx context.Context) []*ConfigItem { } // IDs executes the query and returns a list of ConfigItem IDs. -func (ciq *ConfigItemQuery) IDs(ctx context.Context) ([]int, error) { - var ids []int - if err := ciq.Select(configitem.FieldID).Scan(ctx, &ids); err != nil { +func (ciq *ConfigItemQuery) IDs(ctx context.Context) (ids []int, err error) { + if ciq.ctx.Unique == nil && ciq.path != nil { + ciq.Unique(true) + } + ctx = setContextOp(ctx, ciq.ctx, "IDs") + if err = ciq.Select(configitem.FieldID).Scan(ctx, &ids); err != nil { return nil, err } return ids, nil @@ -196,10 +199,11 @@ func (ciq *ConfigItemQuery) IDsX(ctx context.Context) []int { // Count returns the count of the given query. func (ciq *ConfigItemQuery) Count(ctx context.Context) (int, error) { + ctx = setContextOp(ctx, ciq.ctx, "Count") if err := ciq.prepareQuery(ctx); err != nil { return 0, err } - return ciq.sqlCount(ctx) + return withInterceptors[int](ctx, ciq, querierCount[*ConfigItemQuery](), ciq.inters) } // CountX is like Count, but panics if an error occurs. @@ -213,10 +217,15 @@ func (ciq *ConfigItemQuery) CountX(ctx context.Context) int { // Exist returns true if the query has elements in the graph. func (ciq *ConfigItemQuery) Exist(ctx context.Context) (bool, error) { - if err := ciq.prepareQuery(ctx); err != nil { - return false, err + ctx = setContextOp(ctx, ciq.ctx, "Exist") + switch _, err := ciq.FirstID(ctx); { + case IsNotFound(err): + return false, nil + case err != nil: + return false, fmt.Errorf("ent: check existence: %w", err) + default: + return true, nil } - return ciq.sqlExist(ctx) } // ExistX is like Exist, but panics if an error occurs. @@ -236,14 +245,13 @@ func (ciq *ConfigItemQuery) Clone() *ConfigItemQuery { } return &ConfigItemQuery{ config: ciq.config, - limit: ciq.limit, - offset: ciq.offset, - order: append([]OrderFunc{}, ciq.order...), + ctx: ciq.ctx.Clone(), + order: append([]configitem.OrderOption{}, ciq.order...), + inters: append([]Interceptor{}, ciq.inters...), predicates: append([]predicate.ConfigItem{}, ciq.predicates...), // clone intermediate query. - sql: ciq.sql.Clone(), - path: ciq.path, - unique: ciq.unique, + sql: ciq.sql.Clone(), + path: ciq.path, } } @@ -262,16 +270,11 @@ func (ciq *ConfigItemQuery) Clone() *ConfigItemQuery { // Aggregate(ent.Count()). // Scan(ctx, &v) func (ciq *ConfigItemQuery) GroupBy(field string, fields ...string) *ConfigItemGroupBy { - grbuild := &ConfigItemGroupBy{config: ciq.config} - grbuild.fields = append([]string{field}, fields...) - grbuild.path = func(ctx context.Context) (prev *sql.Selector, err error) { - if err := ciq.prepareQuery(ctx); err != nil { - return nil, err - } - return ciq.sqlQuery(ctx), nil - } + ciq.ctx.Fields = append([]string{field}, fields...) + grbuild := &ConfigItemGroupBy{build: ciq} + grbuild.flds = &ciq.ctx.Fields grbuild.label = configitem.Label - grbuild.flds, grbuild.scan = &grbuild.fields, grbuild.Scan + grbuild.scan = grbuild.Scan return grbuild } @@ -288,15 +291,30 @@ func (ciq *ConfigItemQuery) GroupBy(field string, fields ...string) *ConfigItemG // Select(configitem.FieldCreatedAt). // Scan(ctx, &v) func (ciq *ConfigItemQuery) Select(fields ...string) *ConfigItemSelect { - ciq.fields = append(ciq.fields, fields...) - selbuild := &ConfigItemSelect{ConfigItemQuery: ciq} - selbuild.label = configitem.Label - selbuild.flds, selbuild.scan = &ciq.fields, selbuild.Scan - return selbuild + ciq.ctx.Fields = append(ciq.ctx.Fields, fields...) + sbuild := &ConfigItemSelect{ConfigItemQuery: ciq} + sbuild.label = configitem.Label + sbuild.flds, sbuild.scan = &ciq.ctx.Fields, sbuild.Scan + return sbuild +} + +// Aggregate returns a ConfigItemSelect configured with the given aggregations. +func (ciq *ConfigItemQuery) Aggregate(fns ...AggregateFunc) *ConfigItemSelect { + return ciq.Select().Aggregate(fns...) } func (ciq *ConfigItemQuery) prepareQuery(ctx context.Context) error { - for _, f := range ciq.fields { + for _, inter := range ciq.inters { + if inter == nil { + return fmt.Errorf("ent: uninitialized interceptor (forgotten import ent/runtime?)") + } + if trv, ok := inter.(Traverser); ok { + if err := trv.Traverse(ctx, ciq); err != nil { + return err + } + } + } + for _, f := range ciq.ctx.Fields { if !configitem.ValidColumn(f) { return &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)} } @@ -338,41 +356,22 @@ func (ciq *ConfigItemQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]* func (ciq *ConfigItemQuery) sqlCount(ctx context.Context) (int, error) { _spec := ciq.querySpec() - _spec.Node.Columns = ciq.fields - if len(ciq.fields) > 0 { - _spec.Unique = ciq.unique != nil && *ciq.unique + _spec.Node.Columns = ciq.ctx.Fields + if len(ciq.ctx.Fields) > 0 { + _spec.Unique = ciq.ctx.Unique != nil && *ciq.ctx.Unique } return sqlgraph.CountNodes(ctx, ciq.driver, _spec) } -func (ciq *ConfigItemQuery) sqlExist(ctx context.Context) (bool, error) { - switch _, err := ciq.FirstID(ctx); { - case IsNotFound(err): - return false, nil - case err != nil: - return false, fmt.Errorf("ent: check existence: %w", err) - default: - return true, nil - } -} - func (ciq *ConfigItemQuery) querySpec() *sqlgraph.QuerySpec { - _spec := &sqlgraph.QuerySpec{ - Node: &sqlgraph.NodeSpec{ - Table: configitem.Table, - Columns: configitem.Columns, - ID: &sqlgraph.FieldSpec{ - Type: field.TypeInt, - Column: configitem.FieldID, - }, - }, - From: ciq.sql, - Unique: true, - } - if unique := ciq.unique; unique != nil { + _spec := sqlgraph.NewQuerySpec(configitem.Table, configitem.Columns, sqlgraph.NewFieldSpec(configitem.FieldID, field.TypeInt)) + _spec.From = ciq.sql + if unique := ciq.ctx.Unique; unique != nil { _spec.Unique = *unique + } else if ciq.path != nil { + _spec.Unique = true } - if fields := ciq.fields; len(fields) > 0 { + if fields := ciq.ctx.Fields; len(fields) > 0 { _spec.Node.Columns = make([]string, 0, len(fields)) _spec.Node.Columns = append(_spec.Node.Columns, configitem.FieldID) for i := range fields { @@ -388,10 +387,10 @@ func (ciq *ConfigItemQuery) querySpec() *sqlgraph.QuerySpec { } } } - if limit := ciq.limit; limit != nil { + if limit := ciq.ctx.Limit; limit != nil { _spec.Limit = *limit } - if offset := ciq.offset; offset != nil { + if offset := ciq.ctx.Offset; offset != nil { _spec.Offset = *offset } if ps := ciq.order; len(ps) > 0 { @@ -407,7 +406,7 @@ func (ciq *ConfigItemQuery) querySpec() *sqlgraph.QuerySpec { func (ciq *ConfigItemQuery) sqlQuery(ctx context.Context) *sql.Selector { builder := sql.Dialect(ciq.driver.Dialect()) t1 := builder.Table(configitem.Table) - columns := ciq.fields + columns := ciq.ctx.Fields if len(columns) == 0 { columns = configitem.Columns } @@ -416,7 +415,7 @@ func (ciq *ConfigItemQuery) sqlQuery(ctx context.Context) *sql.Selector { selector = ciq.sql selector.Select(selector.Columns(columns...)...) } - if ciq.unique != nil && *ciq.unique { + if ciq.ctx.Unique != nil && *ciq.ctx.Unique { selector.Distinct() } for _, p := range ciq.predicates { @@ -425,12 +424,12 @@ func (ciq *ConfigItemQuery) sqlQuery(ctx context.Context) *sql.Selector { for _, p := range ciq.order { p(selector) } - if offset := ciq.offset; offset != nil { + if offset := ciq.ctx.Offset; offset != nil { // limit is mandatory for offset clause. We start // with default value, and override it below if needed. selector.Offset(*offset).Limit(math.MaxInt32) } - if limit := ciq.limit; limit != nil { + if limit := ciq.ctx.Limit; limit != nil { selector.Limit(*limit) } return selector @@ -438,13 +437,8 @@ func (ciq *ConfigItemQuery) sqlQuery(ctx context.Context) *sql.Selector { // ConfigItemGroupBy is the group-by builder for ConfigItem entities. type ConfigItemGroupBy struct { - config selector - fields []string - fns []AggregateFunc - // intermediate query (i.e. traversal path). - sql *sql.Selector - path func(context.Context) (*sql.Selector, error) + build *ConfigItemQuery } // Aggregate adds the given aggregation functions to the group-by query. @@ -453,74 +447,77 @@ func (cigb *ConfigItemGroupBy) Aggregate(fns ...AggregateFunc) *ConfigItemGroupB return cigb } -// Scan applies the group-by query and scans the result into the given value. +// Scan applies the selector query and scans the result into the given value. func (cigb *ConfigItemGroupBy) Scan(ctx context.Context, v any) error { - query, err := cigb.path(ctx) - if err != nil { + ctx = setContextOp(ctx, cigb.build.ctx, "GroupBy") + if err := cigb.build.prepareQuery(ctx); err != nil { return err } - cigb.sql = query - return cigb.sqlScan(ctx, v) + return scanWithInterceptors[*ConfigItemQuery, *ConfigItemGroupBy](ctx, cigb.build, cigb, cigb.build.inters, v) } -func (cigb *ConfigItemGroupBy) sqlScan(ctx context.Context, v any) error { - for _, f := range cigb.fields { - if !configitem.ValidColumn(f) { - return &ValidationError{Name: f, err: fmt.Errorf("invalid field %q for group-by", f)} +func (cigb *ConfigItemGroupBy) sqlScan(ctx context.Context, root *ConfigItemQuery, v any) error { + selector := root.sqlQuery(ctx).Select() + aggregation := make([]string, 0, len(cigb.fns)) + for _, fn := range cigb.fns { + aggregation = append(aggregation, fn(selector)) + } + if len(selector.SelectedColumns()) == 0 { + columns := make([]string, 0, len(*cigb.flds)+len(cigb.fns)) + for _, f := range *cigb.flds { + columns = append(columns, selector.C(f)) } + columns = append(columns, aggregation...) + selector.Select(columns...) } - selector := cigb.sqlQuery() + selector.GroupBy(selector.Columns(*cigb.flds...)...) if err := selector.Err(); err != nil { return err } rows := &sql.Rows{} query, args := selector.Query() - if err := cigb.driver.Query(ctx, query, args, rows); err != nil { + if err := cigb.build.driver.Query(ctx, query, args, rows); err != nil { return err } defer rows.Close() return sql.ScanSlice(rows, v) } -func (cigb *ConfigItemGroupBy) sqlQuery() *sql.Selector { - selector := cigb.sql.Select() - aggregation := make([]string, 0, len(cigb.fns)) - for _, fn := range cigb.fns { - aggregation = append(aggregation, fn(selector)) - } - // If no columns were selected in a custom aggregation function, the default - // selection is the fields used for "group-by", and the aggregation functions. - if len(selector.SelectedColumns()) == 0 { - columns := make([]string, 0, len(cigb.fields)+len(cigb.fns)) - for _, f := range cigb.fields { - columns = append(columns, selector.C(f)) - } - columns = append(columns, aggregation...) - selector.Select(columns...) - } - return selector.GroupBy(selector.Columns(cigb.fields...)...) -} - // ConfigItemSelect is the builder for selecting fields of ConfigItem entities. type ConfigItemSelect struct { *ConfigItemQuery selector - // intermediate query (i.e. traversal path). - sql *sql.Selector +} + +// Aggregate adds the given aggregation functions to the selector query. +func (cis *ConfigItemSelect) Aggregate(fns ...AggregateFunc) *ConfigItemSelect { + cis.fns = append(cis.fns, fns...) + return cis } // Scan applies the selector query and scans the result into the given value. func (cis *ConfigItemSelect) Scan(ctx context.Context, v any) error { + ctx = setContextOp(ctx, cis.ctx, "Select") if err := cis.prepareQuery(ctx); err != nil { return err } - cis.sql = cis.ConfigItemQuery.sqlQuery(ctx) - return cis.sqlScan(ctx, v) + return scanWithInterceptors[*ConfigItemQuery, *ConfigItemSelect](ctx, cis.ConfigItemQuery, cis, cis.inters, v) } -func (cis *ConfigItemSelect) sqlScan(ctx context.Context, v any) error { +func (cis *ConfigItemSelect) sqlScan(ctx context.Context, root *ConfigItemQuery, v any) error { + selector := root.sqlQuery(ctx) + aggregation := make([]string, 0, len(cis.fns)) + for _, fn := range cis.fns { + aggregation = append(aggregation, fn(selector)) + } + switch n := len(*cis.selector.flds); { + case n == 0 && len(aggregation) > 0: + selector.Select(aggregation...) + case n != 0 && len(aggregation) > 0: + selector.AppendSelect(aggregation...) + } rows := &sql.Rows{} - query, args := cis.sql.Query() + query, args := selector.Query() if err := cis.driver.Query(ctx, query, args, rows); err != nil { return err } diff --git a/pkg/database/ent/configitem_update.go b/pkg/database/ent/configitem_update.go index e591347a0c3..0db3a0b5233 100644 --- a/pkg/database/ent/configitem_update.go +++ b/pkg/database/ent/configitem_update.go @@ -71,35 +71,8 @@ func (ciu *ConfigItemUpdate) Mutation() *ConfigItemMutation { // Save executes the query and returns the number of nodes affected by the update operation. func (ciu *ConfigItemUpdate) Save(ctx context.Context) (int, error) { - var ( - err error - affected int - ) ciu.defaults() - if len(ciu.hooks) == 0 { - affected, err = ciu.sqlSave(ctx) - } else { - var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { - mutation, ok := m.(*ConfigItemMutation) - if !ok { - return nil, fmt.Errorf("unexpected mutation type %T", m) - } - ciu.mutation = mutation - affected, err = ciu.sqlSave(ctx) - mutation.done = true - return affected, err - }) - for i := len(ciu.hooks) - 1; i >= 0; i-- { - if ciu.hooks[i] == nil { - return 0, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)") - } - mut = ciu.hooks[i](mut) - } - if _, err := mut.Mutate(ctx, ciu.mutation); err != nil { - return 0, err - } - } - return affected, err + return withHooks(ctx, ciu.sqlSave, ciu.mutation, ciu.hooks) } // SaveX is like Save, but panics if an error occurs. @@ -137,16 +110,7 @@ func (ciu *ConfigItemUpdate) defaults() { } func (ciu *ConfigItemUpdate) sqlSave(ctx context.Context) (n int, err error) { - _spec := &sqlgraph.UpdateSpec{ - Node: &sqlgraph.NodeSpec{ - Table: configitem.Table, - Columns: configitem.Columns, - ID: &sqlgraph.FieldSpec{ - Type: field.TypeInt, - Column: configitem.FieldID, - }, - }, - } + _spec := sqlgraph.NewUpdateSpec(configitem.Table, configitem.Columns, sqlgraph.NewFieldSpec(configitem.FieldID, field.TypeInt)) if ps := ciu.mutation.predicates; len(ps) > 0 { _spec.Predicate = func(selector *sql.Selector) { for i := range ps { @@ -155,44 +119,22 @@ func (ciu *ConfigItemUpdate) sqlSave(ctx context.Context) (n int, err error) { } } if value, ok := ciu.mutation.CreatedAt(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeTime, - Value: value, - Column: configitem.FieldCreatedAt, - }) + _spec.SetField(configitem.FieldCreatedAt, field.TypeTime, value) } if ciu.mutation.CreatedAtCleared() { - _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ - Type: field.TypeTime, - Column: configitem.FieldCreatedAt, - }) + _spec.ClearField(configitem.FieldCreatedAt, field.TypeTime) } if value, ok := ciu.mutation.UpdatedAt(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeTime, - Value: value, - Column: configitem.FieldUpdatedAt, - }) + _spec.SetField(configitem.FieldUpdatedAt, field.TypeTime, value) } if ciu.mutation.UpdatedAtCleared() { - _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ - Type: field.TypeTime, - Column: configitem.FieldUpdatedAt, - }) + _spec.ClearField(configitem.FieldUpdatedAt, field.TypeTime) } if value, ok := ciu.mutation.Name(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: configitem.FieldName, - }) + _spec.SetField(configitem.FieldName, field.TypeString, value) } if value, ok := ciu.mutation.Value(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: configitem.FieldValue, - }) + _spec.SetField(configitem.FieldValue, field.TypeString, value) } if n, err = sqlgraph.UpdateNodes(ctx, ciu.driver, _spec); err != nil { if _, ok := err.(*sqlgraph.NotFoundError); ok { @@ -202,6 +144,7 @@ func (ciu *ConfigItemUpdate) sqlSave(ctx context.Context) (n int, err error) { } return 0, err } + ciu.mutation.done = true return n, nil } @@ -254,6 +197,12 @@ func (ciuo *ConfigItemUpdateOne) Mutation() *ConfigItemMutation { return ciuo.mutation } +// Where appends a list predicates to the ConfigItemUpdate builder. +func (ciuo *ConfigItemUpdateOne) Where(ps ...predicate.ConfigItem) *ConfigItemUpdateOne { + ciuo.mutation.Where(ps...) + return ciuo +} + // Select allows selecting one or more fields (columns) of the returned entity. // The default is selecting all fields defined in the entity schema. func (ciuo *ConfigItemUpdateOne) Select(field string, fields ...string) *ConfigItemUpdateOne { @@ -263,41 +212,8 @@ func (ciuo *ConfigItemUpdateOne) Select(field string, fields ...string) *ConfigI // Save executes the query and returns the updated ConfigItem entity. func (ciuo *ConfigItemUpdateOne) Save(ctx context.Context) (*ConfigItem, error) { - var ( - err error - node *ConfigItem - ) ciuo.defaults() - if len(ciuo.hooks) == 0 { - node, err = ciuo.sqlSave(ctx) - } else { - var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { - mutation, ok := m.(*ConfigItemMutation) - if !ok { - return nil, fmt.Errorf("unexpected mutation type %T", m) - } - ciuo.mutation = mutation - node, err = ciuo.sqlSave(ctx) - mutation.done = true - return node, err - }) - for i := len(ciuo.hooks) - 1; i >= 0; i-- { - if ciuo.hooks[i] == nil { - return nil, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)") - } - mut = ciuo.hooks[i](mut) - } - v, err := mut.Mutate(ctx, ciuo.mutation) - if err != nil { - return nil, err - } - nv, ok := v.(*ConfigItem) - if !ok { - return nil, fmt.Errorf("unexpected node type %T returned from ConfigItemMutation", v) - } - node = nv - } - return node, err + return withHooks(ctx, ciuo.sqlSave, ciuo.mutation, ciuo.hooks) } // SaveX is like Save, but panics if an error occurs. @@ -335,16 +251,7 @@ func (ciuo *ConfigItemUpdateOne) defaults() { } func (ciuo *ConfigItemUpdateOne) sqlSave(ctx context.Context) (_node *ConfigItem, err error) { - _spec := &sqlgraph.UpdateSpec{ - Node: &sqlgraph.NodeSpec{ - Table: configitem.Table, - Columns: configitem.Columns, - ID: &sqlgraph.FieldSpec{ - Type: field.TypeInt, - Column: configitem.FieldID, - }, - }, - } + _spec := sqlgraph.NewUpdateSpec(configitem.Table, configitem.Columns, sqlgraph.NewFieldSpec(configitem.FieldID, field.TypeInt)) id, ok := ciuo.mutation.ID() if !ok { return nil, &ValidationError{Name: "id", err: errors.New(`ent: missing "ConfigItem.id" for update`)} @@ -370,44 +277,22 @@ func (ciuo *ConfigItemUpdateOne) sqlSave(ctx context.Context) (_node *ConfigItem } } if value, ok := ciuo.mutation.CreatedAt(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeTime, - Value: value, - Column: configitem.FieldCreatedAt, - }) + _spec.SetField(configitem.FieldCreatedAt, field.TypeTime, value) } if ciuo.mutation.CreatedAtCleared() { - _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ - Type: field.TypeTime, - Column: configitem.FieldCreatedAt, - }) + _spec.ClearField(configitem.FieldCreatedAt, field.TypeTime) } if value, ok := ciuo.mutation.UpdatedAt(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeTime, - Value: value, - Column: configitem.FieldUpdatedAt, - }) + _spec.SetField(configitem.FieldUpdatedAt, field.TypeTime, value) } if ciuo.mutation.UpdatedAtCleared() { - _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ - Type: field.TypeTime, - Column: configitem.FieldUpdatedAt, - }) + _spec.ClearField(configitem.FieldUpdatedAt, field.TypeTime) } if value, ok := ciuo.mutation.Name(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: configitem.FieldName, - }) + _spec.SetField(configitem.FieldName, field.TypeString, value) } if value, ok := ciuo.mutation.Value(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: configitem.FieldValue, - }) + _spec.SetField(configitem.FieldValue, field.TypeString, value) } _node = &ConfigItem{config: ciuo.config} _spec.Assign = _node.assignValues @@ -420,5 +305,6 @@ func (ciuo *ConfigItemUpdateOne) sqlSave(ctx context.Context) (_node *ConfigItem } return nil, err } + ciuo.mutation.done = true return _node, nil } diff --git a/pkg/database/ent/context.go b/pkg/database/ent/context.go deleted file mode 100644 index 7811bfa2349..00000000000 --- a/pkg/database/ent/context.go +++ /dev/null @@ -1,33 +0,0 @@ -// Code generated by ent, DO NOT EDIT. - -package ent - -import ( - "context" -) - -type clientCtxKey struct{} - -// FromContext returns a Client stored inside a context, or nil if there isn't one. -func FromContext(ctx context.Context) *Client { - c, _ := ctx.Value(clientCtxKey{}).(*Client) - return c -} - -// NewContext returns a new context with the given Client attached. -func NewContext(parent context.Context, c *Client) context.Context { - return context.WithValue(parent, clientCtxKey{}, c) -} - -type txCtxKey struct{} - -// TxFromContext returns a Tx stored inside a context, or nil if there isn't one. -func TxFromContext(ctx context.Context) *Tx { - tx, _ := ctx.Value(txCtxKey{}).(*Tx) - return tx -} - -// NewTxContext returns a new context with the given Tx attached. -func NewTxContext(parent context.Context, tx *Tx) context.Context { - return context.WithValue(parent, txCtxKey{}, tx) -} diff --git a/pkg/database/ent/decision.go b/pkg/database/ent/decision.go index c969e576724..8a08bc1dfd4 100644 --- a/pkg/database/ent/decision.go +++ b/pkg/database/ent/decision.go @@ -7,6 +7,7 @@ import ( "strings" "time" + "entgo.io/ent" "entgo.io/ent/dialect/sql" "github.com/crowdsecurity/crowdsec/pkg/database/ent/alert" "github.com/crowdsecurity/crowdsec/pkg/database/ent/decision" @@ -51,7 +52,8 @@ type Decision struct { AlertDecisions int `json:"alert_decisions,omitempty"` // Edges holds the relations/edges for other nodes in the graph. // The values are being populated by the DecisionQuery when eager-loading is set. - Edges DecisionEdges `json:"edges"` + Edges DecisionEdges `json:"edges"` + selectValues sql.SelectValues } // DecisionEdges holds the relations/edges for other nodes in the graph. @@ -90,7 +92,7 @@ func (*Decision) scanValues(columns []string) ([]any, error) { case decision.FieldCreatedAt, decision.FieldUpdatedAt, decision.FieldUntil: values[i] = new(sql.NullTime) default: - return nil, fmt.Errorf("unexpected column %q for type Decision", columns[i]) + values[i] = new(sql.UnknownType) } } return values, nil @@ -209,21 +211,29 @@ func (d *Decision) assignValues(columns []string, values []any) error { } else if value.Valid { d.AlertDecisions = int(value.Int64) } + default: + d.selectValues.Set(columns[i], values[i]) } } return nil } +// GetValue returns the ent.Value that was dynamically selected and assigned to the Decision. +// This includes values selected through modifiers, order, etc. +func (d *Decision) GetValue(name string) (ent.Value, error) { + return d.selectValues.Get(name) +} + // QueryOwner queries the "owner" edge of the Decision entity. func (d *Decision) QueryOwner() *AlertQuery { - return (&DecisionClient{config: d.config}).QueryOwner(d) + return NewDecisionClient(d.config).QueryOwner(d) } // Update returns a builder for updating this Decision. // Note that you need to call Decision.Unwrap() before calling this method if this Decision // was returned from a transaction, and the transaction was committed or rolled back. func (d *Decision) Update() *DecisionUpdateOne { - return (&DecisionClient{config: d.config}).UpdateOne(d) + return NewDecisionClient(d.config).UpdateOne(d) } // Unwrap unwraps the Decision entity that was returned from a transaction after it was closed, @@ -301,9 +311,3 @@ func (d *Decision) String() string { // Decisions is a parsable slice of Decision. type Decisions []*Decision - -func (d Decisions) config(cfg config) { - for _i := range d { - d[_i].config = cfg - } -} diff --git a/pkg/database/ent/decision/decision.go b/pkg/database/ent/decision/decision.go index a0012d940a8..d9f67623bd8 100644 --- a/pkg/database/ent/decision/decision.go +++ b/pkg/database/ent/decision/decision.go @@ -4,6 +4,9 @@ package decision import ( "time" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" ) const ( @@ -99,3 +102,105 @@ var ( // DefaultSimulated holds the default value on creation for the "simulated" field. DefaultSimulated bool ) + +// OrderOption defines the ordering options for the Decision queries. +type OrderOption func(*sql.Selector) + +// ByID orders the results by the id field. +func ByID(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldID, opts...).ToFunc() +} + +// ByCreatedAt orders the results by the created_at field. +func ByCreatedAt(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldCreatedAt, opts...).ToFunc() +} + +// ByUpdatedAt orders the results by the updated_at field. +func ByUpdatedAt(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldUpdatedAt, opts...).ToFunc() +} + +// ByUntil orders the results by the until field. +func ByUntil(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldUntil, opts...).ToFunc() +} + +// ByScenario orders the results by the scenario field. +func ByScenario(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldScenario, opts...).ToFunc() +} + +// ByType orders the results by the type field. +func ByType(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldType, opts...).ToFunc() +} + +// ByStartIP orders the results by the start_ip field. +func ByStartIP(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldStartIP, opts...).ToFunc() +} + +// ByEndIP orders the results by the end_ip field. +func ByEndIP(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldEndIP, opts...).ToFunc() +} + +// ByStartSuffix orders the results by the start_suffix field. +func ByStartSuffix(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldStartSuffix, opts...).ToFunc() +} + +// ByEndSuffix orders the results by the end_suffix field. +func ByEndSuffix(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldEndSuffix, opts...).ToFunc() +} + +// ByIPSize orders the results by the ip_size field. +func ByIPSize(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldIPSize, opts...).ToFunc() +} + +// ByScope orders the results by the scope field. +func ByScope(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldScope, opts...).ToFunc() +} + +// ByValue orders the results by the value field. +func ByValue(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldValue, opts...).ToFunc() +} + +// ByOrigin orders the results by the origin field. +func ByOrigin(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldOrigin, opts...).ToFunc() +} + +// BySimulated orders the results by the simulated field. +func BySimulated(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldSimulated, opts...).ToFunc() +} + +// ByUUID orders the results by the uuid field. +func ByUUID(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldUUID, opts...).ToFunc() +} + +// ByAlertDecisions orders the results by the alert_decisions field. +func ByAlertDecisions(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldAlertDecisions, opts...).ToFunc() +} + +// ByOwnerField orders the results by owner field. +func ByOwnerField(field string, opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newOwnerStep(), sql.OrderByField(field, opts...)) + } +} +func newOwnerStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(OwnerInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.M2O, true, OwnerTable, OwnerColumn), + ) +} diff --git a/pkg/database/ent/decision/where.go b/pkg/database/ent/decision/where.go index 18716a4a7c1..36374f5714d 100644 --- a/pkg/database/ent/decision/where.go +++ b/pkg/database/ent/decision/where.go @@ -12,1481 +12,967 @@ import ( // ID filters vertices based on their ID field. func ID(id int) predicate.Decision { - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldID), id)) - }) + return predicate.Decision(sql.FieldEQ(FieldID, id)) } // IDEQ applies the EQ predicate on the ID field. func IDEQ(id int) predicate.Decision { - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldID), id)) - }) + return predicate.Decision(sql.FieldEQ(FieldID, id)) } // IDNEQ applies the NEQ predicate on the ID field. func IDNEQ(id int) predicate.Decision { - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldID), id)) - }) + return predicate.Decision(sql.FieldNEQ(FieldID, id)) } // IDIn applies the In predicate on the ID field. func IDIn(ids ...int) predicate.Decision { - return predicate.Decision(func(s *sql.Selector) { - v := make([]any, len(ids)) - for i := range v { - v[i] = ids[i] - } - s.Where(sql.In(s.C(FieldID), v...)) - }) + return predicate.Decision(sql.FieldIn(FieldID, ids...)) } // IDNotIn applies the NotIn predicate on the ID field. func IDNotIn(ids ...int) predicate.Decision { - return predicate.Decision(func(s *sql.Selector) { - v := make([]any, len(ids)) - for i := range v { - v[i] = ids[i] - } - s.Where(sql.NotIn(s.C(FieldID), v...)) - }) + return predicate.Decision(sql.FieldNotIn(FieldID, ids...)) } // IDGT applies the GT predicate on the ID field. func IDGT(id int) predicate.Decision { - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.GT(s.C(FieldID), id)) - }) + return predicate.Decision(sql.FieldGT(FieldID, id)) } // IDGTE applies the GTE predicate on the ID field. func IDGTE(id int) predicate.Decision { - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.GTE(s.C(FieldID), id)) - }) + return predicate.Decision(sql.FieldGTE(FieldID, id)) } // IDLT applies the LT predicate on the ID field. func IDLT(id int) predicate.Decision { - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.LT(s.C(FieldID), id)) - }) + return predicate.Decision(sql.FieldLT(FieldID, id)) } // IDLTE applies the LTE predicate on the ID field. func IDLTE(id int) predicate.Decision { - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.LTE(s.C(FieldID), id)) - }) + return predicate.Decision(sql.FieldLTE(FieldID, id)) } // CreatedAt applies equality check predicate on the "created_at" field. It's identical to CreatedAtEQ. func CreatedAt(v time.Time) predicate.Decision { - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldCreatedAt), v)) - }) + return predicate.Decision(sql.FieldEQ(FieldCreatedAt, v)) } // UpdatedAt applies equality check predicate on the "updated_at" field. It's identical to UpdatedAtEQ. func UpdatedAt(v time.Time) predicate.Decision { - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldUpdatedAt), v)) - }) + return predicate.Decision(sql.FieldEQ(FieldUpdatedAt, v)) } // Until applies equality check predicate on the "until" field. It's identical to UntilEQ. func Until(v time.Time) predicate.Decision { - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldUntil), v)) - }) + return predicate.Decision(sql.FieldEQ(FieldUntil, v)) } // Scenario applies equality check predicate on the "scenario" field. It's identical to ScenarioEQ. func Scenario(v string) predicate.Decision { - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldScenario), v)) - }) + return predicate.Decision(sql.FieldEQ(FieldScenario, v)) } // Type applies equality check predicate on the "type" field. It's identical to TypeEQ. func Type(v string) predicate.Decision { - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldType), v)) - }) + return predicate.Decision(sql.FieldEQ(FieldType, v)) } // StartIP applies equality check predicate on the "start_ip" field. It's identical to StartIPEQ. func StartIP(v int64) predicate.Decision { - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldStartIP), v)) - }) + return predicate.Decision(sql.FieldEQ(FieldStartIP, v)) } // EndIP applies equality check predicate on the "end_ip" field. It's identical to EndIPEQ. func EndIP(v int64) predicate.Decision { - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldEndIP), v)) - }) + return predicate.Decision(sql.FieldEQ(FieldEndIP, v)) } // StartSuffix applies equality check predicate on the "start_suffix" field. It's identical to StartSuffixEQ. func StartSuffix(v int64) predicate.Decision { - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldStartSuffix), v)) - }) + return predicate.Decision(sql.FieldEQ(FieldStartSuffix, v)) } // EndSuffix applies equality check predicate on the "end_suffix" field. It's identical to EndSuffixEQ. func EndSuffix(v int64) predicate.Decision { - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldEndSuffix), v)) - }) + return predicate.Decision(sql.FieldEQ(FieldEndSuffix, v)) } // IPSize applies equality check predicate on the "ip_size" field. It's identical to IPSizeEQ. func IPSize(v int64) predicate.Decision { - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldIPSize), v)) - }) + return predicate.Decision(sql.FieldEQ(FieldIPSize, v)) } // Scope applies equality check predicate on the "scope" field. It's identical to ScopeEQ. func Scope(v string) predicate.Decision { - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldScope), v)) - }) + return predicate.Decision(sql.FieldEQ(FieldScope, v)) } // Value applies equality check predicate on the "value" field. It's identical to ValueEQ. func Value(v string) predicate.Decision { - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldValue), v)) - }) + return predicate.Decision(sql.FieldEQ(FieldValue, v)) } // Origin applies equality check predicate on the "origin" field. It's identical to OriginEQ. func Origin(v string) predicate.Decision { - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldOrigin), v)) - }) + return predicate.Decision(sql.FieldEQ(FieldOrigin, v)) } // Simulated applies equality check predicate on the "simulated" field. It's identical to SimulatedEQ. func Simulated(v bool) predicate.Decision { - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldSimulated), v)) - }) + return predicate.Decision(sql.FieldEQ(FieldSimulated, v)) } // UUID applies equality check predicate on the "uuid" field. It's identical to UUIDEQ. func UUID(v string) predicate.Decision { - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldUUID), v)) - }) + return predicate.Decision(sql.FieldEQ(FieldUUID, v)) } // AlertDecisions applies equality check predicate on the "alert_decisions" field. It's identical to AlertDecisionsEQ. func AlertDecisions(v int) predicate.Decision { - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldAlertDecisions), v)) - }) + return predicate.Decision(sql.FieldEQ(FieldAlertDecisions, v)) } // CreatedAtEQ applies the EQ predicate on the "created_at" field. func CreatedAtEQ(v time.Time) predicate.Decision { - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldCreatedAt), v)) - }) + return predicate.Decision(sql.FieldEQ(FieldCreatedAt, v)) } // CreatedAtNEQ applies the NEQ predicate on the "created_at" field. func CreatedAtNEQ(v time.Time) predicate.Decision { - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldCreatedAt), v)) - }) + return predicate.Decision(sql.FieldNEQ(FieldCreatedAt, v)) } // CreatedAtIn applies the In predicate on the "created_at" field. func CreatedAtIn(vs ...time.Time) predicate.Decision { - v := make([]any, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.In(s.C(FieldCreatedAt), v...)) - }) + return predicate.Decision(sql.FieldIn(FieldCreatedAt, vs...)) } // CreatedAtNotIn applies the NotIn predicate on the "created_at" field. func CreatedAtNotIn(vs ...time.Time) predicate.Decision { - v := make([]any, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.NotIn(s.C(FieldCreatedAt), v...)) - }) + return predicate.Decision(sql.FieldNotIn(FieldCreatedAt, vs...)) } // CreatedAtGT applies the GT predicate on the "created_at" field. func CreatedAtGT(v time.Time) predicate.Decision { - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.GT(s.C(FieldCreatedAt), v)) - }) + return predicate.Decision(sql.FieldGT(FieldCreatedAt, v)) } // CreatedAtGTE applies the GTE predicate on the "created_at" field. func CreatedAtGTE(v time.Time) predicate.Decision { - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.GTE(s.C(FieldCreatedAt), v)) - }) + return predicate.Decision(sql.FieldGTE(FieldCreatedAt, v)) } // CreatedAtLT applies the LT predicate on the "created_at" field. func CreatedAtLT(v time.Time) predicate.Decision { - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.LT(s.C(FieldCreatedAt), v)) - }) + return predicate.Decision(sql.FieldLT(FieldCreatedAt, v)) } // CreatedAtLTE applies the LTE predicate on the "created_at" field. func CreatedAtLTE(v time.Time) predicate.Decision { - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.LTE(s.C(FieldCreatedAt), v)) - }) + return predicate.Decision(sql.FieldLTE(FieldCreatedAt, v)) } // CreatedAtIsNil applies the IsNil predicate on the "created_at" field. func CreatedAtIsNil() predicate.Decision { - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.IsNull(s.C(FieldCreatedAt))) - }) + return predicate.Decision(sql.FieldIsNull(FieldCreatedAt)) } // CreatedAtNotNil applies the NotNil predicate on the "created_at" field. func CreatedAtNotNil() predicate.Decision { - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.NotNull(s.C(FieldCreatedAt))) - }) + return predicate.Decision(sql.FieldNotNull(FieldCreatedAt)) } // UpdatedAtEQ applies the EQ predicate on the "updated_at" field. func UpdatedAtEQ(v time.Time) predicate.Decision { - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldUpdatedAt), v)) - }) + return predicate.Decision(sql.FieldEQ(FieldUpdatedAt, v)) } // UpdatedAtNEQ applies the NEQ predicate on the "updated_at" field. func UpdatedAtNEQ(v time.Time) predicate.Decision { - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldUpdatedAt), v)) - }) + return predicate.Decision(sql.FieldNEQ(FieldUpdatedAt, v)) } // UpdatedAtIn applies the In predicate on the "updated_at" field. func UpdatedAtIn(vs ...time.Time) predicate.Decision { - v := make([]any, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.In(s.C(FieldUpdatedAt), v...)) - }) + return predicate.Decision(sql.FieldIn(FieldUpdatedAt, vs...)) } // UpdatedAtNotIn applies the NotIn predicate on the "updated_at" field. func UpdatedAtNotIn(vs ...time.Time) predicate.Decision { - v := make([]any, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.NotIn(s.C(FieldUpdatedAt), v...)) - }) + return predicate.Decision(sql.FieldNotIn(FieldUpdatedAt, vs...)) } // UpdatedAtGT applies the GT predicate on the "updated_at" field. func UpdatedAtGT(v time.Time) predicate.Decision { - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.GT(s.C(FieldUpdatedAt), v)) - }) + return predicate.Decision(sql.FieldGT(FieldUpdatedAt, v)) } // UpdatedAtGTE applies the GTE predicate on the "updated_at" field. func UpdatedAtGTE(v time.Time) predicate.Decision { - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.GTE(s.C(FieldUpdatedAt), v)) - }) + return predicate.Decision(sql.FieldGTE(FieldUpdatedAt, v)) } // UpdatedAtLT applies the LT predicate on the "updated_at" field. func UpdatedAtLT(v time.Time) predicate.Decision { - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.LT(s.C(FieldUpdatedAt), v)) - }) + return predicate.Decision(sql.FieldLT(FieldUpdatedAt, v)) } // UpdatedAtLTE applies the LTE predicate on the "updated_at" field. func UpdatedAtLTE(v time.Time) predicate.Decision { - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.LTE(s.C(FieldUpdatedAt), v)) - }) + return predicate.Decision(sql.FieldLTE(FieldUpdatedAt, v)) } // UpdatedAtIsNil applies the IsNil predicate on the "updated_at" field. func UpdatedAtIsNil() predicate.Decision { - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.IsNull(s.C(FieldUpdatedAt))) - }) + return predicate.Decision(sql.FieldIsNull(FieldUpdatedAt)) } // UpdatedAtNotNil applies the NotNil predicate on the "updated_at" field. func UpdatedAtNotNil() predicate.Decision { - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.NotNull(s.C(FieldUpdatedAt))) - }) + return predicate.Decision(sql.FieldNotNull(FieldUpdatedAt)) } // UntilEQ applies the EQ predicate on the "until" field. func UntilEQ(v time.Time) predicate.Decision { - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldUntil), v)) - }) + return predicate.Decision(sql.FieldEQ(FieldUntil, v)) } // UntilNEQ applies the NEQ predicate on the "until" field. func UntilNEQ(v time.Time) predicate.Decision { - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldUntil), v)) - }) + return predicate.Decision(sql.FieldNEQ(FieldUntil, v)) } // UntilIn applies the In predicate on the "until" field. func UntilIn(vs ...time.Time) predicate.Decision { - v := make([]any, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.In(s.C(FieldUntil), v...)) - }) + return predicate.Decision(sql.FieldIn(FieldUntil, vs...)) } // UntilNotIn applies the NotIn predicate on the "until" field. func UntilNotIn(vs ...time.Time) predicate.Decision { - v := make([]any, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.NotIn(s.C(FieldUntil), v...)) - }) + return predicate.Decision(sql.FieldNotIn(FieldUntil, vs...)) } // UntilGT applies the GT predicate on the "until" field. func UntilGT(v time.Time) predicate.Decision { - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.GT(s.C(FieldUntil), v)) - }) + return predicate.Decision(sql.FieldGT(FieldUntil, v)) } // UntilGTE applies the GTE predicate on the "until" field. func UntilGTE(v time.Time) predicate.Decision { - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.GTE(s.C(FieldUntil), v)) - }) + return predicate.Decision(sql.FieldGTE(FieldUntil, v)) } // UntilLT applies the LT predicate on the "until" field. func UntilLT(v time.Time) predicate.Decision { - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.LT(s.C(FieldUntil), v)) - }) + return predicate.Decision(sql.FieldLT(FieldUntil, v)) } // UntilLTE applies the LTE predicate on the "until" field. func UntilLTE(v time.Time) predicate.Decision { - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.LTE(s.C(FieldUntil), v)) - }) + return predicate.Decision(sql.FieldLTE(FieldUntil, v)) } // UntilIsNil applies the IsNil predicate on the "until" field. func UntilIsNil() predicate.Decision { - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.IsNull(s.C(FieldUntil))) - }) + return predicate.Decision(sql.FieldIsNull(FieldUntil)) } // UntilNotNil applies the NotNil predicate on the "until" field. func UntilNotNil() predicate.Decision { - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.NotNull(s.C(FieldUntil))) - }) + return predicate.Decision(sql.FieldNotNull(FieldUntil)) } // ScenarioEQ applies the EQ predicate on the "scenario" field. func ScenarioEQ(v string) predicate.Decision { - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldScenario), v)) - }) + return predicate.Decision(sql.FieldEQ(FieldScenario, v)) } // ScenarioNEQ applies the NEQ predicate on the "scenario" field. func ScenarioNEQ(v string) predicate.Decision { - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldScenario), v)) - }) + return predicate.Decision(sql.FieldNEQ(FieldScenario, v)) } // ScenarioIn applies the In predicate on the "scenario" field. func ScenarioIn(vs ...string) predicate.Decision { - v := make([]any, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.In(s.C(FieldScenario), v...)) - }) + return predicate.Decision(sql.FieldIn(FieldScenario, vs...)) } // ScenarioNotIn applies the NotIn predicate on the "scenario" field. func ScenarioNotIn(vs ...string) predicate.Decision { - v := make([]any, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.NotIn(s.C(FieldScenario), v...)) - }) + return predicate.Decision(sql.FieldNotIn(FieldScenario, vs...)) } // ScenarioGT applies the GT predicate on the "scenario" field. func ScenarioGT(v string) predicate.Decision { - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.GT(s.C(FieldScenario), v)) - }) + return predicate.Decision(sql.FieldGT(FieldScenario, v)) } // ScenarioGTE applies the GTE predicate on the "scenario" field. func ScenarioGTE(v string) predicate.Decision { - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.GTE(s.C(FieldScenario), v)) - }) + return predicate.Decision(sql.FieldGTE(FieldScenario, v)) } // ScenarioLT applies the LT predicate on the "scenario" field. func ScenarioLT(v string) predicate.Decision { - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.LT(s.C(FieldScenario), v)) - }) + return predicate.Decision(sql.FieldLT(FieldScenario, v)) } // ScenarioLTE applies the LTE predicate on the "scenario" field. func ScenarioLTE(v string) predicate.Decision { - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.LTE(s.C(FieldScenario), v)) - }) + return predicate.Decision(sql.FieldLTE(FieldScenario, v)) } // ScenarioContains applies the Contains predicate on the "scenario" field. func ScenarioContains(v string) predicate.Decision { - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.Contains(s.C(FieldScenario), v)) - }) + return predicate.Decision(sql.FieldContains(FieldScenario, v)) } // ScenarioHasPrefix applies the HasPrefix predicate on the "scenario" field. func ScenarioHasPrefix(v string) predicate.Decision { - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.HasPrefix(s.C(FieldScenario), v)) - }) + return predicate.Decision(sql.FieldHasPrefix(FieldScenario, v)) } // ScenarioHasSuffix applies the HasSuffix predicate on the "scenario" field. func ScenarioHasSuffix(v string) predicate.Decision { - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.HasSuffix(s.C(FieldScenario), v)) - }) + return predicate.Decision(sql.FieldHasSuffix(FieldScenario, v)) } // ScenarioEqualFold applies the EqualFold predicate on the "scenario" field. func ScenarioEqualFold(v string) predicate.Decision { - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.EqualFold(s.C(FieldScenario), v)) - }) + return predicate.Decision(sql.FieldEqualFold(FieldScenario, v)) } // ScenarioContainsFold applies the ContainsFold predicate on the "scenario" field. func ScenarioContainsFold(v string) predicate.Decision { - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.ContainsFold(s.C(FieldScenario), v)) - }) + return predicate.Decision(sql.FieldContainsFold(FieldScenario, v)) } // TypeEQ applies the EQ predicate on the "type" field. func TypeEQ(v string) predicate.Decision { - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldType), v)) - }) + return predicate.Decision(sql.FieldEQ(FieldType, v)) } // TypeNEQ applies the NEQ predicate on the "type" field. func TypeNEQ(v string) predicate.Decision { - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldType), v)) - }) + return predicate.Decision(sql.FieldNEQ(FieldType, v)) } // TypeIn applies the In predicate on the "type" field. func TypeIn(vs ...string) predicate.Decision { - v := make([]any, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.In(s.C(FieldType), v...)) - }) + return predicate.Decision(sql.FieldIn(FieldType, vs...)) } // TypeNotIn applies the NotIn predicate on the "type" field. func TypeNotIn(vs ...string) predicate.Decision { - v := make([]any, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.NotIn(s.C(FieldType), v...)) - }) + return predicate.Decision(sql.FieldNotIn(FieldType, vs...)) } // TypeGT applies the GT predicate on the "type" field. func TypeGT(v string) predicate.Decision { - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.GT(s.C(FieldType), v)) - }) + return predicate.Decision(sql.FieldGT(FieldType, v)) } // TypeGTE applies the GTE predicate on the "type" field. func TypeGTE(v string) predicate.Decision { - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.GTE(s.C(FieldType), v)) - }) + return predicate.Decision(sql.FieldGTE(FieldType, v)) } // TypeLT applies the LT predicate on the "type" field. func TypeLT(v string) predicate.Decision { - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.LT(s.C(FieldType), v)) - }) + return predicate.Decision(sql.FieldLT(FieldType, v)) } // TypeLTE applies the LTE predicate on the "type" field. func TypeLTE(v string) predicate.Decision { - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.LTE(s.C(FieldType), v)) - }) + return predicate.Decision(sql.FieldLTE(FieldType, v)) } // TypeContains applies the Contains predicate on the "type" field. func TypeContains(v string) predicate.Decision { - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.Contains(s.C(FieldType), v)) - }) + return predicate.Decision(sql.FieldContains(FieldType, v)) } // TypeHasPrefix applies the HasPrefix predicate on the "type" field. func TypeHasPrefix(v string) predicate.Decision { - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.HasPrefix(s.C(FieldType), v)) - }) + return predicate.Decision(sql.FieldHasPrefix(FieldType, v)) } // TypeHasSuffix applies the HasSuffix predicate on the "type" field. func TypeHasSuffix(v string) predicate.Decision { - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.HasSuffix(s.C(FieldType), v)) - }) + return predicate.Decision(sql.FieldHasSuffix(FieldType, v)) } // TypeEqualFold applies the EqualFold predicate on the "type" field. func TypeEqualFold(v string) predicate.Decision { - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.EqualFold(s.C(FieldType), v)) - }) + return predicate.Decision(sql.FieldEqualFold(FieldType, v)) } // TypeContainsFold applies the ContainsFold predicate on the "type" field. func TypeContainsFold(v string) predicate.Decision { - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.ContainsFold(s.C(FieldType), v)) - }) + return predicate.Decision(sql.FieldContainsFold(FieldType, v)) } // StartIPEQ applies the EQ predicate on the "start_ip" field. func StartIPEQ(v int64) predicate.Decision { - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldStartIP), v)) - }) + return predicate.Decision(sql.FieldEQ(FieldStartIP, v)) } // StartIPNEQ applies the NEQ predicate on the "start_ip" field. func StartIPNEQ(v int64) predicate.Decision { - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldStartIP), v)) - }) + return predicate.Decision(sql.FieldNEQ(FieldStartIP, v)) } // StartIPIn applies the In predicate on the "start_ip" field. func StartIPIn(vs ...int64) predicate.Decision { - v := make([]any, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.In(s.C(FieldStartIP), v...)) - }) + return predicate.Decision(sql.FieldIn(FieldStartIP, vs...)) } // StartIPNotIn applies the NotIn predicate on the "start_ip" field. func StartIPNotIn(vs ...int64) predicate.Decision { - v := make([]any, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.NotIn(s.C(FieldStartIP), v...)) - }) + return predicate.Decision(sql.FieldNotIn(FieldStartIP, vs...)) } // StartIPGT applies the GT predicate on the "start_ip" field. func StartIPGT(v int64) predicate.Decision { - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.GT(s.C(FieldStartIP), v)) - }) + return predicate.Decision(sql.FieldGT(FieldStartIP, v)) } // StartIPGTE applies the GTE predicate on the "start_ip" field. func StartIPGTE(v int64) predicate.Decision { - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.GTE(s.C(FieldStartIP), v)) - }) + return predicate.Decision(sql.FieldGTE(FieldStartIP, v)) } // StartIPLT applies the LT predicate on the "start_ip" field. func StartIPLT(v int64) predicate.Decision { - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.LT(s.C(FieldStartIP), v)) - }) + return predicate.Decision(sql.FieldLT(FieldStartIP, v)) } // StartIPLTE applies the LTE predicate on the "start_ip" field. func StartIPLTE(v int64) predicate.Decision { - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.LTE(s.C(FieldStartIP), v)) - }) + return predicate.Decision(sql.FieldLTE(FieldStartIP, v)) } // StartIPIsNil applies the IsNil predicate on the "start_ip" field. func StartIPIsNil() predicate.Decision { - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.IsNull(s.C(FieldStartIP))) - }) + return predicate.Decision(sql.FieldIsNull(FieldStartIP)) } // StartIPNotNil applies the NotNil predicate on the "start_ip" field. func StartIPNotNil() predicate.Decision { - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.NotNull(s.C(FieldStartIP))) - }) + return predicate.Decision(sql.FieldNotNull(FieldStartIP)) } // EndIPEQ applies the EQ predicate on the "end_ip" field. func EndIPEQ(v int64) predicate.Decision { - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldEndIP), v)) - }) + return predicate.Decision(sql.FieldEQ(FieldEndIP, v)) } // EndIPNEQ applies the NEQ predicate on the "end_ip" field. func EndIPNEQ(v int64) predicate.Decision { - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldEndIP), v)) - }) + return predicate.Decision(sql.FieldNEQ(FieldEndIP, v)) } // EndIPIn applies the In predicate on the "end_ip" field. func EndIPIn(vs ...int64) predicate.Decision { - v := make([]any, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.In(s.C(FieldEndIP), v...)) - }) + return predicate.Decision(sql.FieldIn(FieldEndIP, vs...)) } // EndIPNotIn applies the NotIn predicate on the "end_ip" field. func EndIPNotIn(vs ...int64) predicate.Decision { - v := make([]any, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.NotIn(s.C(FieldEndIP), v...)) - }) + return predicate.Decision(sql.FieldNotIn(FieldEndIP, vs...)) } // EndIPGT applies the GT predicate on the "end_ip" field. func EndIPGT(v int64) predicate.Decision { - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.GT(s.C(FieldEndIP), v)) - }) + return predicate.Decision(sql.FieldGT(FieldEndIP, v)) } // EndIPGTE applies the GTE predicate on the "end_ip" field. func EndIPGTE(v int64) predicate.Decision { - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.GTE(s.C(FieldEndIP), v)) - }) + return predicate.Decision(sql.FieldGTE(FieldEndIP, v)) } // EndIPLT applies the LT predicate on the "end_ip" field. func EndIPLT(v int64) predicate.Decision { - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.LT(s.C(FieldEndIP), v)) - }) + return predicate.Decision(sql.FieldLT(FieldEndIP, v)) } // EndIPLTE applies the LTE predicate on the "end_ip" field. func EndIPLTE(v int64) predicate.Decision { - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.LTE(s.C(FieldEndIP), v)) - }) + return predicate.Decision(sql.FieldLTE(FieldEndIP, v)) } // EndIPIsNil applies the IsNil predicate on the "end_ip" field. func EndIPIsNil() predicate.Decision { - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.IsNull(s.C(FieldEndIP))) - }) + return predicate.Decision(sql.FieldIsNull(FieldEndIP)) } // EndIPNotNil applies the NotNil predicate on the "end_ip" field. func EndIPNotNil() predicate.Decision { - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.NotNull(s.C(FieldEndIP))) - }) + return predicate.Decision(sql.FieldNotNull(FieldEndIP)) } // StartSuffixEQ applies the EQ predicate on the "start_suffix" field. func StartSuffixEQ(v int64) predicate.Decision { - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldStartSuffix), v)) - }) + return predicate.Decision(sql.FieldEQ(FieldStartSuffix, v)) } // StartSuffixNEQ applies the NEQ predicate on the "start_suffix" field. func StartSuffixNEQ(v int64) predicate.Decision { - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldStartSuffix), v)) - }) + return predicate.Decision(sql.FieldNEQ(FieldStartSuffix, v)) } // StartSuffixIn applies the In predicate on the "start_suffix" field. func StartSuffixIn(vs ...int64) predicate.Decision { - v := make([]any, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.In(s.C(FieldStartSuffix), v...)) - }) + return predicate.Decision(sql.FieldIn(FieldStartSuffix, vs...)) } // StartSuffixNotIn applies the NotIn predicate on the "start_suffix" field. func StartSuffixNotIn(vs ...int64) predicate.Decision { - v := make([]any, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.NotIn(s.C(FieldStartSuffix), v...)) - }) + return predicate.Decision(sql.FieldNotIn(FieldStartSuffix, vs...)) } // StartSuffixGT applies the GT predicate on the "start_suffix" field. func StartSuffixGT(v int64) predicate.Decision { - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.GT(s.C(FieldStartSuffix), v)) - }) + return predicate.Decision(sql.FieldGT(FieldStartSuffix, v)) } // StartSuffixGTE applies the GTE predicate on the "start_suffix" field. func StartSuffixGTE(v int64) predicate.Decision { - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.GTE(s.C(FieldStartSuffix), v)) - }) + return predicate.Decision(sql.FieldGTE(FieldStartSuffix, v)) } // StartSuffixLT applies the LT predicate on the "start_suffix" field. func StartSuffixLT(v int64) predicate.Decision { - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.LT(s.C(FieldStartSuffix), v)) - }) + return predicate.Decision(sql.FieldLT(FieldStartSuffix, v)) } // StartSuffixLTE applies the LTE predicate on the "start_suffix" field. func StartSuffixLTE(v int64) predicate.Decision { - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.LTE(s.C(FieldStartSuffix), v)) - }) + return predicate.Decision(sql.FieldLTE(FieldStartSuffix, v)) } // StartSuffixIsNil applies the IsNil predicate on the "start_suffix" field. func StartSuffixIsNil() predicate.Decision { - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.IsNull(s.C(FieldStartSuffix))) - }) + return predicate.Decision(sql.FieldIsNull(FieldStartSuffix)) } // StartSuffixNotNil applies the NotNil predicate on the "start_suffix" field. func StartSuffixNotNil() predicate.Decision { - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.NotNull(s.C(FieldStartSuffix))) - }) + return predicate.Decision(sql.FieldNotNull(FieldStartSuffix)) } // EndSuffixEQ applies the EQ predicate on the "end_suffix" field. func EndSuffixEQ(v int64) predicate.Decision { - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldEndSuffix), v)) - }) + return predicate.Decision(sql.FieldEQ(FieldEndSuffix, v)) } // EndSuffixNEQ applies the NEQ predicate on the "end_suffix" field. func EndSuffixNEQ(v int64) predicate.Decision { - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldEndSuffix), v)) - }) + return predicate.Decision(sql.FieldNEQ(FieldEndSuffix, v)) } // EndSuffixIn applies the In predicate on the "end_suffix" field. func EndSuffixIn(vs ...int64) predicate.Decision { - v := make([]any, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.In(s.C(FieldEndSuffix), v...)) - }) + return predicate.Decision(sql.FieldIn(FieldEndSuffix, vs...)) } // EndSuffixNotIn applies the NotIn predicate on the "end_suffix" field. func EndSuffixNotIn(vs ...int64) predicate.Decision { - v := make([]any, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.NotIn(s.C(FieldEndSuffix), v...)) - }) + return predicate.Decision(sql.FieldNotIn(FieldEndSuffix, vs...)) } // EndSuffixGT applies the GT predicate on the "end_suffix" field. func EndSuffixGT(v int64) predicate.Decision { - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.GT(s.C(FieldEndSuffix), v)) - }) + return predicate.Decision(sql.FieldGT(FieldEndSuffix, v)) } // EndSuffixGTE applies the GTE predicate on the "end_suffix" field. func EndSuffixGTE(v int64) predicate.Decision { - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.GTE(s.C(FieldEndSuffix), v)) - }) + return predicate.Decision(sql.FieldGTE(FieldEndSuffix, v)) } // EndSuffixLT applies the LT predicate on the "end_suffix" field. func EndSuffixLT(v int64) predicate.Decision { - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.LT(s.C(FieldEndSuffix), v)) - }) + return predicate.Decision(sql.FieldLT(FieldEndSuffix, v)) } // EndSuffixLTE applies the LTE predicate on the "end_suffix" field. func EndSuffixLTE(v int64) predicate.Decision { - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.LTE(s.C(FieldEndSuffix), v)) - }) + return predicate.Decision(sql.FieldLTE(FieldEndSuffix, v)) } // EndSuffixIsNil applies the IsNil predicate on the "end_suffix" field. func EndSuffixIsNil() predicate.Decision { - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.IsNull(s.C(FieldEndSuffix))) - }) + return predicate.Decision(sql.FieldIsNull(FieldEndSuffix)) } // EndSuffixNotNil applies the NotNil predicate on the "end_suffix" field. func EndSuffixNotNil() predicate.Decision { - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.NotNull(s.C(FieldEndSuffix))) - }) + return predicate.Decision(sql.FieldNotNull(FieldEndSuffix)) } // IPSizeEQ applies the EQ predicate on the "ip_size" field. func IPSizeEQ(v int64) predicate.Decision { - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldIPSize), v)) - }) + return predicate.Decision(sql.FieldEQ(FieldIPSize, v)) } // IPSizeNEQ applies the NEQ predicate on the "ip_size" field. func IPSizeNEQ(v int64) predicate.Decision { - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldIPSize), v)) - }) + return predicate.Decision(sql.FieldNEQ(FieldIPSize, v)) } // IPSizeIn applies the In predicate on the "ip_size" field. func IPSizeIn(vs ...int64) predicate.Decision { - v := make([]any, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.In(s.C(FieldIPSize), v...)) - }) + return predicate.Decision(sql.FieldIn(FieldIPSize, vs...)) } // IPSizeNotIn applies the NotIn predicate on the "ip_size" field. func IPSizeNotIn(vs ...int64) predicate.Decision { - v := make([]any, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.NotIn(s.C(FieldIPSize), v...)) - }) + return predicate.Decision(sql.FieldNotIn(FieldIPSize, vs...)) } // IPSizeGT applies the GT predicate on the "ip_size" field. func IPSizeGT(v int64) predicate.Decision { - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.GT(s.C(FieldIPSize), v)) - }) + return predicate.Decision(sql.FieldGT(FieldIPSize, v)) } // IPSizeGTE applies the GTE predicate on the "ip_size" field. func IPSizeGTE(v int64) predicate.Decision { - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.GTE(s.C(FieldIPSize), v)) - }) + return predicate.Decision(sql.FieldGTE(FieldIPSize, v)) } // IPSizeLT applies the LT predicate on the "ip_size" field. func IPSizeLT(v int64) predicate.Decision { - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.LT(s.C(FieldIPSize), v)) - }) + return predicate.Decision(sql.FieldLT(FieldIPSize, v)) } // IPSizeLTE applies the LTE predicate on the "ip_size" field. func IPSizeLTE(v int64) predicate.Decision { - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.LTE(s.C(FieldIPSize), v)) - }) + return predicate.Decision(sql.FieldLTE(FieldIPSize, v)) } // IPSizeIsNil applies the IsNil predicate on the "ip_size" field. func IPSizeIsNil() predicate.Decision { - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.IsNull(s.C(FieldIPSize))) - }) + return predicate.Decision(sql.FieldIsNull(FieldIPSize)) } // IPSizeNotNil applies the NotNil predicate on the "ip_size" field. func IPSizeNotNil() predicate.Decision { - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.NotNull(s.C(FieldIPSize))) - }) + return predicate.Decision(sql.FieldNotNull(FieldIPSize)) } // ScopeEQ applies the EQ predicate on the "scope" field. func ScopeEQ(v string) predicate.Decision { - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldScope), v)) - }) + return predicate.Decision(sql.FieldEQ(FieldScope, v)) } // ScopeNEQ applies the NEQ predicate on the "scope" field. func ScopeNEQ(v string) predicate.Decision { - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldScope), v)) - }) + return predicate.Decision(sql.FieldNEQ(FieldScope, v)) } // ScopeIn applies the In predicate on the "scope" field. func ScopeIn(vs ...string) predicate.Decision { - v := make([]any, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.In(s.C(FieldScope), v...)) - }) + return predicate.Decision(sql.FieldIn(FieldScope, vs...)) } // ScopeNotIn applies the NotIn predicate on the "scope" field. func ScopeNotIn(vs ...string) predicate.Decision { - v := make([]any, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.NotIn(s.C(FieldScope), v...)) - }) + return predicate.Decision(sql.FieldNotIn(FieldScope, vs...)) } // ScopeGT applies the GT predicate on the "scope" field. func ScopeGT(v string) predicate.Decision { - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.GT(s.C(FieldScope), v)) - }) + return predicate.Decision(sql.FieldGT(FieldScope, v)) } // ScopeGTE applies the GTE predicate on the "scope" field. func ScopeGTE(v string) predicate.Decision { - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.GTE(s.C(FieldScope), v)) - }) + return predicate.Decision(sql.FieldGTE(FieldScope, v)) } // ScopeLT applies the LT predicate on the "scope" field. func ScopeLT(v string) predicate.Decision { - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.LT(s.C(FieldScope), v)) - }) + return predicate.Decision(sql.FieldLT(FieldScope, v)) } // ScopeLTE applies the LTE predicate on the "scope" field. func ScopeLTE(v string) predicate.Decision { - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.LTE(s.C(FieldScope), v)) - }) + return predicate.Decision(sql.FieldLTE(FieldScope, v)) } // ScopeContains applies the Contains predicate on the "scope" field. func ScopeContains(v string) predicate.Decision { - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.Contains(s.C(FieldScope), v)) - }) + return predicate.Decision(sql.FieldContains(FieldScope, v)) } // ScopeHasPrefix applies the HasPrefix predicate on the "scope" field. func ScopeHasPrefix(v string) predicate.Decision { - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.HasPrefix(s.C(FieldScope), v)) - }) + return predicate.Decision(sql.FieldHasPrefix(FieldScope, v)) } // ScopeHasSuffix applies the HasSuffix predicate on the "scope" field. func ScopeHasSuffix(v string) predicate.Decision { - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.HasSuffix(s.C(FieldScope), v)) - }) + return predicate.Decision(sql.FieldHasSuffix(FieldScope, v)) } // ScopeEqualFold applies the EqualFold predicate on the "scope" field. func ScopeEqualFold(v string) predicate.Decision { - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.EqualFold(s.C(FieldScope), v)) - }) + return predicate.Decision(sql.FieldEqualFold(FieldScope, v)) } // ScopeContainsFold applies the ContainsFold predicate on the "scope" field. func ScopeContainsFold(v string) predicate.Decision { - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.ContainsFold(s.C(FieldScope), v)) - }) + return predicate.Decision(sql.FieldContainsFold(FieldScope, v)) } // ValueEQ applies the EQ predicate on the "value" field. func ValueEQ(v string) predicate.Decision { - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldValue), v)) - }) + return predicate.Decision(sql.FieldEQ(FieldValue, v)) } // ValueNEQ applies the NEQ predicate on the "value" field. func ValueNEQ(v string) predicate.Decision { - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldValue), v)) - }) + return predicate.Decision(sql.FieldNEQ(FieldValue, v)) } // ValueIn applies the In predicate on the "value" field. func ValueIn(vs ...string) predicate.Decision { - v := make([]any, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.In(s.C(FieldValue), v...)) - }) + return predicate.Decision(sql.FieldIn(FieldValue, vs...)) } // ValueNotIn applies the NotIn predicate on the "value" field. func ValueNotIn(vs ...string) predicate.Decision { - v := make([]any, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.NotIn(s.C(FieldValue), v...)) - }) + return predicate.Decision(sql.FieldNotIn(FieldValue, vs...)) } // ValueGT applies the GT predicate on the "value" field. func ValueGT(v string) predicate.Decision { - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.GT(s.C(FieldValue), v)) - }) + return predicate.Decision(sql.FieldGT(FieldValue, v)) } // ValueGTE applies the GTE predicate on the "value" field. func ValueGTE(v string) predicate.Decision { - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.GTE(s.C(FieldValue), v)) - }) + return predicate.Decision(sql.FieldGTE(FieldValue, v)) } // ValueLT applies the LT predicate on the "value" field. func ValueLT(v string) predicate.Decision { - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.LT(s.C(FieldValue), v)) - }) + return predicate.Decision(sql.FieldLT(FieldValue, v)) } // ValueLTE applies the LTE predicate on the "value" field. func ValueLTE(v string) predicate.Decision { - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.LTE(s.C(FieldValue), v)) - }) + return predicate.Decision(sql.FieldLTE(FieldValue, v)) } // ValueContains applies the Contains predicate on the "value" field. func ValueContains(v string) predicate.Decision { - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.Contains(s.C(FieldValue), v)) - }) + return predicate.Decision(sql.FieldContains(FieldValue, v)) } // ValueHasPrefix applies the HasPrefix predicate on the "value" field. func ValueHasPrefix(v string) predicate.Decision { - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.HasPrefix(s.C(FieldValue), v)) - }) + return predicate.Decision(sql.FieldHasPrefix(FieldValue, v)) } // ValueHasSuffix applies the HasSuffix predicate on the "value" field. func ValueHasSuffix(v string) predicate.Decision { - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.HasSuffix(s.C(FieldValue), v)) - }) + return predicate.Decision(sql.FieldHasSuffix(FieldValue, v)) } // ValueEqualFold applies the EqualFold predicate on the "value" field. func ValueEqualFold(v string) predicate.Decision { - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.EqualFold(s.C(FieldValue), v)) - }) + return predicate.Decision(sql.FieldEqualFold(FieldValue, v)) } // ValueContainsFold applies the ContainsFold predicate on the "value" field. func ValueContainsFold(v string) predicate.Decision { - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.ContainsFold(s.C(FieldValue), v)) - }) + return predicate.Decision(sql.FieldContainsFold(FieldValue, v)) } // OriginEQ applies the EQ predicate on the "origin" field. func OriginEQ(v string) predicate.Decision { - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldOrigin), v)) - }) + return predicate.Decision(sql.FieldEQ(FieldOrigin, v)) } // OriginNEQ applies the NEQ predicate on the "origin" field. func OriginNEQ(v string) predicate.Decision { - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldOrigin), v)) - }) + return predicate.Decision(sql.FieldNEQ(FieldOrigin, v)) } // OriginIn applies the In predicate on the "origin" field. func OriginIn(vs ...string) predicate.Decision { - v := make([]any, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.In(s.C(FieldOrigin), v...)) - }) + return predicate.Decision(sql.FieldIn(FieldOrigin, vs...)) } // OriginNotIn applies the NotIn predicate on the "origin" field. func OriginNotIn(vs ...string) predicate.Decision { - v := make([]any, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.NotIn(s.C(FieldOrigin), v...)) - }) + return predicate.Decision(sql.FieldNotIn(FieldOrigin, vs...)) } // OriginGT applies the GT predicate on the "origin" field. func OriginGT(v string) predicate.Decision { - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.GT(s.C(FieldOrigin), v)) - }) + return predicate.Decision(sql.FieldGT(FieldOrigin, v)) } // OriginGTE applies the GTE predicate on the "origin" field. func OriginGTE(v string) predicate.Decision { - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.GTE(s.C(FieldOrigin), v)) - }) + return predicate.Decision(sql.FieldGTE(FieldOrigin, v)) } // OriginLT applies the LT predicate on the "origin" field. func OriginLT(v string) predicate.Decision { - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.LT(s.C(FieldOrigin), v)) - }) + return predicate.Decision(sql.FieldLT(FieldOrigin, v)) } // OriginLTE applies the LTE predicate on the "origin" field. func OriginLTE(v string) predicate.Decision { - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.LTE(s.C(FieldOrigin), v)) - }) + return predicate.Decision(sql.FieldLTE(FieldOrigin, v)) } // OriginContains applies the Contains predicate on the "origin" field. func OriginContains(v string) predicate.Decision { - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.Contains(s.C(FieldOrigin), v)) - }) + return predicate.Decision(sql.FieldContains(FieldOrigin, v)) } // OriginHasPrefix applies the HasPrefix predicate on the "origin" field. func OriginHasPrefix(v string) predicate.Decision { - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.HasPrefix(s.C(FieldOrigin), v)) - }) + return predicate.Decision(sql.FieldHasPrefix(FieldOrigin, v)) } // OriginHasSuffix applies the HasSuffix predicate on the "origin" field. func OriginHasSuffix(v string) predicate.Decision { - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.HasSuffix(s.C(FieldOrigin), v)) - }) + return predicate.Decision(sql.FieldHasSuffix(FieldOrigin, v)) } // OriginEqualFold applies the EqualFold predicate on the "origin" field. func OriginEqualFold(v string) predicate.Decision { - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.EqualFold(s.C(FieldOrigin), v)) - }) + return predicate.Decision(sql.FieldEqualFold(FieldOrigin, v)) } // OriginContainsFold applies the ContainsFold predicate on the "origin" field. func OriginContainsFold(v string) predicate.Decision { - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.ContainsFold(s.C(FieldOrigin), v)) - }) + return predicate.Decision(sql.FieldContainsFold(FieldOrigin, v)) } // SimulatedEQ applies the EQ predicate on the "simulated" field. func SimulatedEQ(v bool) predicate.Decision { - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldSimulated), v)) - }) + return predicate.Decision(sql.FieldEQ(FieldSimulated, v)) } // SimulatedNEQ applies the NEQ predicate on the "simulated" field. func SimulatedNEQ(v bool) predicate.Decision { - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldSimulated), v)) - }) + return predicate.Decision(sql.FieldNEQ(FieldSimulated, v)) } // UUIDEQ applies the EQ predicate on the "uuid" field. func UUIDEQ(v string) predicate.Decision { - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldUUID), v)) - }) + return predicate.Decision(sql.FieldEQ(FieldUUID, v)) } // UUIDNEQ applies the NEQ predicate on the "uuid" field. func UUIDNEQ(v string) predicate.Decision { - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldUUID), v)) - }) + return predicate.Decision(sql.FieldNEQ(FieldUUID, v)) } // UUIDIn applies the In predicate on the "uuid" field. func UUIDIn(vs ...string) predicate.Decision { - v := make([]any, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.In(s.C(FieldUUID), v...)) - }) + return predicate.Decision(sql.FieldIn(FieldUUID, vs...)) } // UUIDNotIn applies the NotIn predicate on the "uuid" field. func UUIDNotIn(vs ...string) predicate.Decision { - v := make([]any, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.NotIn(s.C(FieldUUID), v...)) - }) + return predicate.Decision(sql.FieldNotIn(FieldUUID, vs...)) } // UUIDGT applies the GT predicate on the "uuid" field. func UUIDGT(v string) predicate.Decision { - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.GT(s.C(FieldUUID), v)) - }) + return predicate.Decision(sql.FieldGT(FieldUUID, v)) } // UUIDGTE applies the GTE predicate on the "uuid" field. func UUIDGTE(v string) predicate.Decision { - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.GTE(s.C(FieldUUID), v)) - }) + return predicate.Decision(sql.FieldGTE(FieldUUID, v)) } // UUIDLT applies the LT predicate on the "uuid" field. func UUIDLT(v string) predicate.Decision { - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.LT(s.C(FieldUUID), v)) - }) + return predicate.Decision(sql.FieldLT(FieldUUID, v)) } // UUIDLTE applies the LTE predicate on the "uuid" field. func UUIDLTE(v string) predicate.Decision { - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.LTE(s.C(FieldUUID), v)) - }) + return predicate.Decision(sql.FieldLTE(FieldUUID, v)) } // UUIDContains applies the Contains predicate on the "uuid" field. func UUIDContains(v string) predicate.Decision { - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.Contains(s.C(FieldUUID), v)) - }) + return predicate.Decision(sql.FieldContains(FieldUUID, v)) } // UUIDHasPrefix applies the HasPrefix predicate on the "uuid" field. func UUIDHasPrefix(v string) predicate.Decision { - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.HasPrefix(s.C(FieldUUID), v)) - }) + return predicate.Decision(sql.FieldHasPrefix(FieldUUID, v)) } // UUIDHasSuffix applies the HasSuffix predicate on the "uuid" field. func UUIDHasSuffix(v string) predicate.Decision { - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.HasSuffix(s.C(FieldUUID), v)) - }) + return predicate.Decision(sql.FieldHasSuffix(FieldUUID, v)) } // UUIDIsNil applies the IsNil predicate on the "uuid" field. func UUIDIsNil() predicate.Decision { - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.IsNull(s.C(FieldUUID))) - }) + return predicate.Decision(sql.FieldIsNull(FieldUUID)) } // UUIDNotNil applies the NotNil predicate on the "uuid" field. func UUIDNotNil() predicate.Decision { - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.NotNull(s.C(FieldUUID))) - }) + return predicate.Decision(sql.FieldNotNull(FieldUUID)) } // UUIDEqualFold applies the EqualFold predicate on the "uuid" field. func UUIDEqualFold(v string) predicate.Decision { - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.EqualFold(s.C(FieldUUID), v)) - }) + return predicate.Decision(sql.FieldEqualFold(FieldUUID, v)) } // UUIDContainsFold applies the ContainsFold predicate on the "uuid" field. func UUIDContainsFold(v string) predicate.Decision { - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.ContainsFold(s.C(FieldUUID), v)) - }) + return predicate.Decision(sql.FieldContainsFold(FieldUUID, v)) } // AlertDecisionsEQ applies the EQ predicate on the "alert_decisions" field. func AlertDecisionsEQ(v int) predicate.Decision { - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldAlertDecisions), v)) - }) + return predicate.Decision(sql.FieldEQ(FieldAlertDecisions, v)) } // AlertDecisionsNEQ applies the NEQ predicate on the "alert_decisions" field. func AlertDecisionsNEQ(v int) predicate.Decision { - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldAlertDecisions), v)) - }) + return predicate.Decision(sql.FieldNEQ(FieldAlertDecisions, v)) } // AlertDecisionsIn applies the In predicate on the "alert_decisions" field. func AlertDecisionsIn(vs ...int) predicate.Decision { - v := make([]any, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.In(s.C(FieldAlertDecisions), v...)) - }) + return predicate.Decision(sql.FieldIn(FieldAlertDecisions, vs...)) } // AlertDecisionsNotIn applies the NotIn predicate on the "alert_decisions" field. func AlertDecisionsNotIn(vs ...int) predicate.Decision { - v := make([]any, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.NotIn(s.C(FieldAlertDecisions), v...)) - }) + return predicate.Decision(sql.FieldNotIn(FieldAlertDecisions, vs...)) } // AlertDecisionsIsNil applies the IsNil predicate on the "alert_decisions" field. func AlertDecisionsIsNil() predicate.Decision { - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.IsNull(s.C(FieldAlertDecisions))) - }) + return predicate.Decision(sql.FieldIsNull(FieldAlertDecisions)) } // AlertDecisionsNotNil applies the NotNil predicate on the "alert_decisions" field. func AlertDecisionsNotNil() predicate.Decision { - return predicate.Decision(func(s *sql.Selector) { - s.Where(sql.NotNull(s.C(FieldAlertDecisions))) - }) + return predicate.Decision(sql.FieldNotNull(FieldAlertDecisions)) } // HasOwner applies the HasEdge predicate on the "owner" edge. @@ -1494,7 +980,6 @@ func HasOwner() predicate.Decision { return predicate.Decision(func(s *sql.Selector) { step := sqlgraph.NewStep( sqlgraph.From(Table, FieldID), - sqlgraph.To(OwnerTable, FieldID), sqlgraph.Edge(sqlgraph.M2O, true, OwnerTable, OwnerColumn), ) sqlgraph.HasNeighbors(s, step) @@ -1504,11 +989,7 @@ func HasOwner() predicate.Decision { // HasOwnerWith applies the HasEdge predicate on the "owner" edge with a given conditions (other predicates). func HasOwnerWith(preds ...predicate.Alert) predicate.Decision { return predicate.Decision(func(s *sql.Selector) { - step := sqlgraph.NewStep( - sqlgraph.From(Table, FieldID), - sqlgraph.To(OwnerInverseTable, FieldID), - sqlgraph.Edge(sqlgraph.M2O, true, OwnerTable, OwnerColumn), - ) + step := newOwnerStep() sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { for _, p := range preds { p(s) @@ -1519,32 +1000,15 @@ func HasOwnerWith(preds ...predicate.Alert) predicate.Decision { // And groups predicates with the AND operator between them. func And(predicates ...predicate.Decision) predicate.Decision { - return predicate.Decision(func(s *sql.Selector) { - s1 := s.Clone().SetP(nil) - for _, p := range predicates { - p(s1) - } - s.Where(s1.P()) - }) + return predicate.Decision(sql.AndPredicates(predicates...)) } // Or groups predicates with the OR operator between them. func Or(predicates ...predicate.Decision) predicate.Decision { - return predicate.Decision(func(s *sql.Selector) { - s1 := s.Clone().SetP(nil) - for i, p := range predicates { - if i > 0 { - s1.Or() - } - p(s1) - } - s.Where(s1.P()) - }) + return predicate.Decision(sql.OrPredicates(predicates...)) } // Not applies the not operator on the given predicate. func Not(p predicate.Decision) predicate.Decision { - return predicate.Decision(func(s *sql.Selector) { - p(s.Not()) - }) + return predicate.Decision(sql.NotPredicates(p)) } diff --git a/pkg/database/ent/decision_create.go b/pkg/database/ent/decision_create.go index 64238cb7003..43a28c53114 100644 --- a/pkg/database/ent/decision_create.go +++ b/pkg/database/ent/decision_create.go @@ -231,50 +231,8 @@ func (dc *DecisionCreate) Mutation() *DecisionMutation { // Save creates the Decision in the database. func (dc *DecisionCreate) Save(ctx context.Context) (*Decision, error) { - var ( - err error - node *Decision - ) dc.defaults() - if len(dc.hooks) == 0 { - if err = dc.check(); err != nil { - return nil, err - } - node, err = dc.sqlSave(ctx) - } else { - var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { - mutation, ok := m.(*DecisionMutation) - if !ok { - return nil, fmt.Errorf("unexpected mutation type %T", m) - } - if err = dc.check(); err != nil { - return nil, err - } - dc.mutation = mutation - if node, err = dc.sqlSave(ctx); err != nil { - return nil, err - } - mutation.id = &node.ID - mutation.done = true - return node, err - }) - for i := len(dc.hooks) - 1; i >= 0; i-- { - if dc.hooks[i] == nil { - return nil, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)") - } - mut = dc.hooks[i](mut) - } - v, err := mut.Mutate(ctx, dc.mutation) - if err != nil { - return nil, err - } - nv, ok := v.(*Decision) - if !ok { - return nil, fmt.Errorf("unexpected node type %T returned from DecisionMutation", v) - } - node = nv - } - return node, err + return withHooks(ctx, dc.sqlSave, dc.mutation, dc.hooks) } // SaveX calls Save and panics if Save returns an error. @@ -339,6 +297,9 @@ func (dc *DecisionCreate) check() error { } func (dc *DecisionCreate) sqlSave(ctx context.Context) (*Decision, error) { + if err := dc.check(); err != nil { + return nil, err + } _node, _spec := dc.createSpec() if err := sqlgraph.CreateNode(ctx, dc.driver, _spec); err != nil { if sqlgraph.IsConstraintError(err) { @@ -348,138 +309,74 @@ func (dc *DecisionCreate) sqlSave(ctx context.Context) (*Decision, error) { } id := _spec.ID.Value.(int64) _node.ID = int(id) + dc.mutation.id = &_node.ID + dc.mutation.done = true return _node, nil } func (dc *DecisionCreate) createSpec() (*Decision, *sqlgraph.CreateSpec) { var ( _node = &Decision{config: dc.config} - _spec = &sqlgraph.CreateSpec{ - Table: decision.Table, - ID: &sqlgraph.FieldSpec{ - Type: field.TypeInt, - Column: decision.FieldID, - }, - } + _spec = sqlgraph.NewCreateSpec(decision.Table, sqlgraph.NewFieldSpec(decision.FieldID, field.TypeInt)) ) if value, ok := dc.mutation.CreatedAt(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeTime, - Value: value, - Column: decision.FieldCreatedAt, - }) + _spec.SetField(decision.FieldCreatedAt, field.TypeTime, value) _node.CreatedAt = &value } if value, ok := dc.mutation.UpdatedAt(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeTime, - Value: value, - Column: decision.FieldUpdatedAt, - }) + _spec.SetField(decision.FieldUpdatedAt, field.TypeTime, value) _node.UpdatedAt = &value } if value, ok := dc.mutation.Until(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeTime, - Value: value, - Column: decision.FieldUntil, - }) + _spec.SetField(decision.FieldUntil, field.TypeTime, value) _node.Until = &value } if value, ok := dc.mutation.Scenario(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: decision.FieldScenario, - }) + _spec.SetField(decision.FieldScenario, field.TypeString, value) _node.Scenario = value } if value, ok := dc.mutation.GetType(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: decision.FieldType, - }) + _spec.SetField(decision.FieldType, field.TypeString, value) _node.Type = value } if value, ok := dc.mutation.StartIP(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeInt64, - Value: value, - Column: decision.FieldStartIP, - }) + _spec.SetField(decision.FieldStartIP, field.TypeInt64, value) _node.StartIP = value } if value, ok := dc.mutation.EndIP(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeInt64, - Value: value, - Column: decision.FieldEndIP, - }) + _spec.SetField(decision.FieldEndIP, field.TypeInt64, value) _node.EndIP = value } if value, ok := dc.mutation.StartSuffix(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeInt64, - Value: value, - Column: decision.FieldStartSuffix, - }) + _spec.SetField(decision.FieldStartSuffix, field.TypeInt64, value) _node.StartSuffix = value } if value, ok := dc.mutation.EndSuffix(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeInt64, - Value: value, - Column: decision.FieldEndSuffix, - }) + _spec.SetField(decision.FieldEndSuffix, field.TypeInt64, value) _node.EndSuffix = value } if value, ok := dc.mutation.IPSize(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeInt64, - Value: value, - Column: decision.FieldIPSize, - }) + _spec.SetField(decision.FieldIPSize, field.TypeInt64, value) _node.IPSize = value } if value, ok := dc.mutation.Scope(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: decision.FieldScope, - }) + _spec.SetField(decision.FieldScope, field.TypeString, value) _node.Scope = value } if value, ok := dc.mutation.Value(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: decision.FieldValue, - }) + _spec.SetField(decision.FieldValue, field.TypeString, value) _node.Value = value } if value, ok := dc.mutation.Origin(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: decision.FieldOrigin, - }) + _spec.SetField(decision.FieldOrigin, field.TypeString, value) _node.Origin = value } if value, ok := dc.mutation.Simulated(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeBool, - Value: value, - Column: decision.FieldSimulated, - }) + _spec.SetField(decision.FieldSimulated, field.TypeBool, value) _node.Simulated = value } if value, ok := dc.mutation.UUID(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: decision.FieldUUID, - }) + _spec.SetField(decision.FieldUUID, field.TypeString, value) _node.UUID = value } if nodes := dc.mutation.OwnerIDs(); len(nodes) > 0 { @@ -490,10 +387,7 @@ func (dc *DecisionCreate) createSpec() (*Decision, *sqlgraph.CreateSpec) { Columns: []string{decision.OwnerColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeInt, - Column: alert.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(alert.FieldID, field.TypeInt), }, } for _, k := range nodes { @@ -508,11 +402,15 @@ func (dc *DecisionCreate) createSpec() (*Decision, *sqlgraph.CreateSpec) { // DecisionCreateBulk is the builder for creating many Decision entities in bulk. type DecisionCreateBulk struct { config + err error builders []*DecisionCreate } // Save creates the Decision entities in the database. func (dcb *DecisionCreateBulk) Save(ctx context.Context) ([]*Decision, error) { + if dcb.err != nil { + return nil, dcb.err + } specs := make([]*sqlgraph.CreateSpec, len(dcb.builders)) nodes := make([]*Decision, len(dcb.builders)) mutators := make([]Mutator, len(dcb.builders)) @@ -529,8 +427,8 @@ func (dcb *DecisionCreateBulk) Save(ctx context.Context) ([]*Decision, error) { return nil, err } builder.mutation = mutation - nodes[i], specs[i] = builder.createSpec() var err error + nodes[i], specs[i] = builder.createSpec() if i < len(mutators)-1 { _, err = mutators[i+1].Mutate(root, dcb.builders[i+1].mutation) } else { diff --git a/pkg/database/ent/decision_delete.go b/pkg/database/ent/decision_delete.go index 24b494b113e..35bb8767283 100644 --- a/pkg/database/ent/decision_delete.go +++ b/pkg/database/ent/decision_delete.go @@ -4,7 +4,6 @@ package ent import ( "context" - "fmt" "entgo.io/ent/dialect/sql" "entgo.io/ent/dialect/sql/sqlgraph" @@ -28,34 +27,7 @@ func (dd *DecisionDelete) Where(ps ...predicate.Decision) *DecisionDelete { // Exec executes the deletion query and returns how many vertices were deleted. func (dd *DecisionDelete) Exec(ctx context.Context) (int, error) { - var ( - err error - affected int - ) - if len(dd.hooks) == 0 { - affected, err = dd.sqlExec(ctx) - } else { - var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { - mutation, ok := m.(*DecisionMutation) - if !ok { - return nil, fmt.Errorf("unexpected mutation type %T", m) - } - dd.mutation = mutation - affected, err = dd.sqlExec(ctx) - mutation.done = true - return affected, err - }) - for i := len(dd.hooks) - 1; i >= 0; i-- { - if dd.hooks[i] == nil { - return 0, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)") - } - mut = dd.hooks[i](mut) - } - if _, err := mut.Mutate(ctx, dd.mutation); err != nil { - return 0, err - } - } - return affected, err + return withHooks(ctx, dd.sqlExec, dd.mutation, dd.hooks) } // ExecX is like Exec, but panics if an error occurs. @@ -68,15 +40,7 @@ func (dd *DecisionDelete) ExecX(ctx context.Context) int { } func (dd *DecisionDelete) sqlExec(ctx context.Context) (int, error) { - _spec := &sqlgraph.DeleteSpec{ - Node: &sqlgraph.NodeSpec{ - Table: decision.Table, - ID: &sqlgraph.FieldSpec{ - Type: field.TypeInt, - Column: decision.FieldID, - }, - }, - } + _spec := sqlgraph.NewDeleteSpec(decision.Table, sqlgraph.NewFieldSpec(decision.FieldID, field.TypeInt)) if ps := dd.mutation.predicates; len(ps) > 0 { _spec.Predicate = func(selector *sql.Selector) { for i := range ps { @@ -88,6 +52,7 @@ func (dd *DecisionDelete) sqlExec(ctx context.Context) (int, error) { if err != nil && sqlgraph.IsConstraintError(err) { err = &ConstraintError{msg: err.Error(), wrap: err} } + dd.mutation.done = true return affected, err } @@ -96,6 +61,12 @@ type DecisionDeleteOne struct { dd *DecisionDelete } +// Where appends a list predicates to the DecisionDelete builder. +func (ddo *DecisionDeleteOne) Where(ps ...predicate.Decision) *DecisionDeleteOne { + ddo.dd.mutation.Where(ps...) + return ddo +} + // Exec executes the deletion query. func (ddo *DecisionDeleteOne) Exec(ctx context.Context) error { n, err := ddo.dd.Exec(ctx) @@ -111,5 +82,7 @@ func (ddo *DecisionDeleteOne) Exec(ctx context.Context) error { // ExecX is like Exec, but panics if an error occurs. func (ddo *DecisionDeleteOne) ExecX(ctx context.Context) { - ddo.dd.ExecX(ctx) + if err := ddo.Exec(ctx); err != nil { + panic(err) + } } diff --git a/pkg/database/ent/decision_query.go b/pkg/database/ent/decision_query.go index 91aebded968..b050a4d9649 100644 --- a/pkg/database/ent/decision_query.go +++ b/pkg/database/ent/decision_query.go @@ -18,11 +18,9 @@ import ( // DecisionQuery is the builder for querying Decision entities. type DecisionQuery struct { config - limit *int - offset *int - unique *bool - order []OrderFunc - fields []string + ctx *QueryContext + order []decision.OrderOption + inters []Interceptor predicates []predicate.Decision withOwner *AlertQuery // intermediate query (i.e. traversal path). @@ -36,34 +34,34 @@ func (dq *DecisionQuery) Where(ps ...predicate.Decision) *DecisionQuery { return dq } -// Limit adds a limit step to the query. +// Limit the number of records to be returned by this query. func (dq *DecisionQuery) Limit(limit int) *DecisionQuery { - dq.limit = &limit + dq.ctx.Limit = &limit return dq } -// Offset adds an offset step to the query. +// Offset to start from. func (dq *DecisionQuery) Offset(offset int) *DecisionQuery { - dq.offset = &offset + dq.ctx.Offset = &offset return dq } // Unique configures the query builder to filter duplicate records on query. // By default, unique is set to true, and can be disabled using this method. func (dq *DecisionQuery) Unique(unique bool) *DecisionQuery { - dq.unique = &unique + dq.ctx.Unique = &unique return dq } -// Order adds an order step to the query. -func (dq *DecisionQuery) Order(o ...OrderFunc) *DecisionQuery { +// Order specifies how the records should be ordered. +func (dq *DecisionQuery) Order(o ...decision.OrderOption) *DecisionQuery { dq.order = append(dq.order, o...) return dq } // QueryOwner chains the current query on the "owner" edge. func (dq *DecisionQuery) QueryOwner() *AlertQuery { - query := &AlertQuery{config: dq.config} + query := (&AlertClient{config: dq.config}).Query() query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { if err := dq.prepareQuery(ctx); err != nil { return nil, err @@ -86,7 +84,7 @@ func (dq *DecisionQuery) QueryOwner() *AlertQuery { // First returns the first Decision entity from the query. // Returns a *NotFoundError when no Decision was found. func (dq *DecisionQuery) First(ctx context.Context) (*Decision, error) { - nodes, err := dq.Limit(1).All(ctx) + nodes, err := dq.Limit(1).All(setContextOp(ctx, dq.ctx, "First")) if err != nil { return nil, err } @@ -109,7 +107,7 @@ func (dq *DecisionQuery) FirstX(ctx context.Context) *Decision { // Returns a *NotFoundError when no Decision ID was found. func (dq *DecisionQuery) FirstID(ctx context.Context) (id int, err error) { var ids []int - if ids, err = dq.Limit(1).IDs(ctx); err != nil { + if ids, err = dq.Limit(1).IDs(setContextOp(ctx, dq.ctx, "FirstID")); err != nil { return } if len(ids) == 0 { @@ -132,7 +130,7 @@ func (dq *DecisionQuery) FirstIDX(ctx context.Context) int { // Returns a *NotSingularError when more than one Decision entity is found. // Returns a *NotFoundError when no Decision entities are found. func (dq *DecisionQuery) Only(ctx context.Context) (*Decision, error) { - nodes, err := dq.Limit(2).All(ctx) + nodes, err := dq.Limit(2).All(setContextOp(ctx, dq.ctx, "Only")) if err != nil { return nil, err } @@ -160,7 +158,7 @@ func (dq *DecisionQuery) OnlyX(ctx context.Context) *Decision { // Returns a *NotFoundError when no entities are found. func (dq *DecisionQuery) OnlyID(ctx context.Context) (id int, err error) { var ids []int - if ids, err = dq.Limit(2).IDs(ctx); err != nil { + if ids, err = dq.Limit(2).IDs(setContextOp(ctx, dq.ctx, "OnlyID")); err != nil { return } switch len(ids) { @@ -185,10 +183,12 @@ func (dq *DecisionQuery) OnlyIDX(ctx context.Context) int { // All executes the query and returns a list of Decisions. func (dq *DecisionQuery) All(ctx context.Context) ([]*Decision, error) { + ctx = setContextOp(ctx, dq.ctx, "All") if err := dq.prepareQuery(ctx); err != nil { return nil, err } - return dq.sqlAll(ctx) + qr := querierAll[[]*Decision, *DecisionQuery]() + return withInterceptors[[]*Decision](ctx, dq, qr, dq.inters) } // AllX is like All, but panics if an error occurs. @@ -201,9 +201,12 @@ func (dq *DecisionQuery) AllX(ctx context.Context) []*Decision { } // IDs executes the query and returns a list of Decision IDs. -func (dq *DecisionQuery) IDs(ctx context.Context) ([]int, error) { - var ids []int - if err := dq.Select(decision.FieldID).Scan(ctx, &ids); err != nil { +func (dq *DecisionQuery) IDs(ctx context.Context) (ids []int, err error) { + if dq.ctx.Unique == nil && dq.path != nil { + dq.Unique(true) + } + ctx = setContextOp(ctx, dq.ctx, "IDs") + if err = dq.Select(decision.FieldID).Scan(ctx, &ids); err != nil { return nil, err } return ids, nil @@ -220,10 +223,11 @@ func (dq *DecisionQuery) IDsX(ctx context.Context) []int { // Count returns the count of the given query. func (dq *DecisionQuery) Count(ctx context.Context) (int, error) { + ctx = setContextOp(ctx, dq.ctx, "Count") if err := dq.prepareQuery(ctx); err != nil { return 0, err } - return dq.sqlCount(ctx) + return withInterceptors[int](ctx, dq, querierCount[*DecisionQuery](), dq.inters) } // CountX is like Count, but panics if an error occurs. @@ -237,10 +241,15 @@ func (dq *DecisionQuery) CountX(ctx context.Context) int { // Exist returns true if the query has elements in the graph. func (dq *DecisionQuery) Exist(ctx context.Context) (bool, error) { - if err := dq.prepareQuery(ctx); err != nil { - return false, err + ctx = setContextOp(ctx, dq.ctx, "Exist") + switch _, err := dq.FirstID(ctx); { + case IsNotFound(err): + return false, nil + case err != nil: + return false, fmt.Errorf("ent: check existence: %w", err) + default: + return true, nil } - return dq.sqlExist(ctx) } // ExistX is like Exist, but panics if an error occurs. @@ -260,22 +269,21 @@ func (dq *DecisionQuery) Clone() *DecisionQuery { } return &DecisionQuery{ config: dq.config, - limit: dq.limit, - offset: dq.offset, - order: append([]OrderFunc{}, dq.order...), + ctx: dq.ctx.Clone(), + order: append([]decision.OrderOption{}, dq.order...), + inters: append([]Interceptor{}, dq.inters...), predicates: append([]predicate.Decision{}, dq.predicates...), withOwner: dq.withOwner.Clone(), // clone intermediate query. - sql: dq.sql.Clone(), - path: dq.path, - unique: dq.unique, + sql: dq.sql.Clone(), + path: dq.path, } } // WithOwner tells the query-builder to eager-load the nodes that are connected to // the "owner" edge. The optional arguments are used to configure the query builder of the edge. func (dq *DecisionQuery) WithOwner(opts ...func(*AlertQuery)) *DecisionQuery { - query := &AlertQuery{config: dq.config} + query := (&AlertClient{config: dq.config}).Query() for _, opt := range opts { opt(query) } @@ -298,16 +306,11 @@ func (dq *DecisionQuery) WithOwner(opts ...func(*AlertQuery)) *DecisionQuery { // Aggregate(ent.Count()). // Scan(ctx, &v) func (dq *DecisionQuery) GroupBy(field string, fields ...string) *DecisionGroupBy { - grbuild := &DecisionGroupBy{config: dq.config} - grbuild.fields = append([]string{field}, fields...) - grbuild.path = func(ctx context.Context) (prev *sql.Selector, err error) { - if err := dq.prepareQuery(ctx); err != nil { - return nil, err - } - return dq.sqlQuery(ctx), nil - } + dq.ctx.Fields = append([]string{field}, fields...) + grbuild := &DecisionGroupBy{build: dq} + grbuild.flds = &dq.ctx.Fields grbuild.label = decision.Label - grbuild.flds, grbuild.scan = &grbuild.fields, grbuild.Scan + grbuild.scan = grbuild.Scan return grbuild } @@ -324,15 +327,30 @@ func (dq *DecisionQuery) GroupBy(field string, fields ...string) *DecisionGroupB // Select(decision.FieldCreatedAt). // Scan(ctx, &v) func (dq *DecisionQuery) Select(fields ...string) *DecisionSelect { - dq.fields = append(dq.fields, fields...) - selbuild := &DecisionSelect{DecisionQuery: dq} - selbuild.label = decision.Label - selbuild.flds, selbuild.scan = &dq.fields, selbuild.Scan - return selbuild + dq.ctx.Fields = append(dq.ctx.Fields, fields...) + sbuild := &DecisionSelect{DecisionQuery: dq} + sbuild.label = decision.Label + sbuild.flds, sbuild.scan = &dq.ctx.Fields, sbuild.Scan + return sbuild +} + +// Aggregate returns a DecisionSelect configured with the given aggregations. +func (dq *DecisionQuery) Aggregate(fns ...AggregateFunc) *DecisionSelect { + return dq.Select().Aggregate(fns...) } func (dq *DecisionQuery) prepareQuery(ctx context.Context) error { - for _, f := range dq.fields { + for _, inter := range dq.inters { + if inter == nil { + return fmt.Errorf("ent: uninitialized interceptor (forgotten import ent/runtime?)") + } + if trv, ok := inter.(Traverser); ok { + if err := trv.Traverse(ctx, dq); err != nil { + return err + } + } + } + for _, f := range dq.ctx.Fields { if !decision.ValidColumn(f) { return &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)} } @@ -392,6 +410,9 @@ func (dq *DecisionQuery) loadOwner(ctx context.Context, query *AlertQuery, nodes } nodeids[fk] = append(nodeids[fk], nodes[i]) } + if len(ids) == 0 { + return nil + } query.Where(alert.IDIn(ids...)) neighbors, err := query.All(ctx) if err != nil { @@ -411,41 +432,22 @@ func (dq *DecisionQuery) loadOwner(ctx context.Context, query *AlertQuery, nodes func (dq *DecisionQuery) sqlCount(ctx context.Context) (int, error) { _spec := dq.querySpec() - _spec.Node.Columns = dq.fields - if len(dq.fields) > 0 { - _spec.Unique = dq.unique != nil && *dq.unique + _spec.Node.Columns = dq.ctx.Fields + if len(dq.ctx.Fields) > 0 { + _spec.Unique = dq.ctx.Unique != nil && *dq.ctx.Unique } return sqlgraph.CountNodes(ctx, dq.driver, _spec) } -func (dq *DecisionQuery) sqlExist(ctx context.Context) (bool, error) { - switch _, err := dq.FirstID(ctx); { - case IsNotFound(err): - return false, nil - case err != nil: - return false, fmt.Errorf("ent: check existence: %w", err) - default: - return true, nil - } -} - func (dq *DecisionQuery) querySpec() *sqlgraph.QuerySpec { - _spec := &sqlgraph.QuerySpec{ - Node: &sqlgraph.NodeSpec{ - Table: decision.Table, - Columns: decision.Columns, - ID: &sqlgraph.FieldSpec{ - Type: field.TypeInt, - Column: decision.FieldID, - }, - }, - From: dq.sql, - Unique: true, - } - if unique := dq.unique; unique != nil { + _spec := sqlgraph.NewQuerySpec(decision.Table, decision.Columns, sqlgraph.NewFieldSpec(decision.FieldID, field.TypeInt)) + _spec.From = dq.sql + if unique := dq.ctx.Unique; unique != nil { _spec.Unique = *unique + } else if dq.path != nil { + _spec.Unique = true } - if fields := dq.fields; len(fields) > 0 { + if fields := dq.ctx.Fields; len(fields) > 0 { _spec.Node.Columns = make([]string, 0, len(fields)) _spec.Node.Columns = append(_spec.Node.Columns, decision.FieldID) for i := range fields { @@ -453,6 +455,9 @@ func (dq *DecisionQuery) querySpec() *sqlgraph.QuerySpec { _spec.Node.Columns = append(_spec.Node.Columns, fields[i]) } } + if dq.withOwner != nil { + _spec.Node.AddColumnOnce(decision.FieldAlertDecisions) + } } if ps := dq.predicates; len(ps) > 0 { _spec.Predicate = func(selector *sql.Selector) { @@ -461,10 +466,10 @@ func (dq *DecisionQuery) querySpec() *sqlgraph.QuerySpec { } } } - if limit := dq.limit; limit != nil { + if limit := dq.ctx.Limit; limit != nil { _spec.Limit = *limit } - if offset := dq.offset; offset != nil { + if offset := dq.ctx.Offset; offset != nil { _spec.Offset = *offset } if ps := dq.order; len(ps) > 0 { @@ -480,7 +485,7 @@ func (dq *DecisionQuery) querySpec() *sqlgraph.QuerySpec { func (dq *DecisionQuery) sqlQuery(ctx context.Context) *sql.Selector { builder := sql.Dialect(dq.driver.Dialect()) t1 := builder.Table(decision.Table) - columns := dq.fields + columns := dq.ctx.Fields if len(columns) == 0 { columns = decision.Columns } @@ -489,7 +494,7 @@ func (dq *DecisionQuery) sqlQuery(ctx context.Context) *sql.Selector { selector = dq.sql selector.Select(selector.Columns(columns...)...) } - if dq.unique != nil && *dq.unique { + if dq.ctx.Unique != nil && *dq.ctx.Unique { selector.Distinct() } for _, p := range dq.predicates { @@ -498,12 +503,12 @@ func (dq *DecisionQuery) sqlQuery(ctx context.Context) *sql.Selector { for _, p := range dq.order { p(selector) } - if offset := dq.offset; offset != nil { + if offset := dq.ctx.Offset; offset != nil { // limit is mandatory for offset clause. We start // with default value, and override it below if needed. selector.Offset(*offset).Limit(math.MaxInt32) } - if limit := dq.limit; limit != nil { + if limit := dq.ctx.Limit; limit != nil { selector.Limit(*limit) } return selector @@ -511,13 +516,8 @@ func (dq *DecisionQuery) sqlQuery(ctx context.Context) *sql.Selector { // DecisionGroupBy is the group-by builder for Decision entities. type DecisionGroupBy struct { - config selector - fields []string - fns []AggregateFunc - // intermediate query (i.e. traversal path). - sql *sql.Selector - path func(context.Context) (*sql.Selector, error) + build *DecisionQuery } // Aggregate adds the given aggregation functions to the group-by query. @@ -526,74 +526,77 @@ func (dgb *DecisionGroupBy) Aggregate(fns ...AggregateFunc) *DecisionGroupBy { return dgb } -// Scan applies the group-by query and scans the result into the given value. +// Scan applies the selector query and scans the result into the given value. func (dgb *DecisionGroupBy) Scan(ctx context.Context, v any) error { - query, err := dgb.path(ctx) - if err != nil { + ctx = setContextOp(ctx, dgb.build.ctx, "GroupBy") + if err := dgb.build.prepareQuery(ctx); err != nil { return err } - dgb.sql = query - return dgb.sqlScan(ctx, v) + return scanWithInterceptors[*DecisionQuery, *DecisionGroupBy](ctx, dgb.build, dgb, dgb.build.inters, v) } -func (dgb *DecisionGroupBy) sqlScan(ctx context.Context, v any) error { - for _, f := range dgb.fields { - if !decision.ValidColumn(f) { - return &ValidationError{Name: f, err: fmt.Errorf("invalid field %q for group-by", f)} +func (dgb *DecisionGroupBy) sqlScan(ctx context.Context, root *DecisionQuery, v any) error { + selector := root.sqlQuery(ctx).Select() + aggregation := make([]string, 0, len(dgb.fns)) + for _, fn := range dgb.fns { + aggregation = append(aggregation, fn(selector)) + } + if len(selector.SelectedColumns()) == 0 { + columns := make([]string, 0, len(*dgb.flds)+len(dgb.fns)) + for _, f := range *dgb.flds { + columns = append(columns, selector.C(f)) } + columns = append(columns, aggregation...) + selector.Select(columns...) } - selector := dgb.sqlQuery() + selector.GroupBy(selector.Columns(*dgb.flds...)...) if err := selector.Err(); err != nil { return err } rows := &sql.Rows{} query, args := selector.Query() - if err := dgb.driver.Query(ctx, query, args, rows); err != nil { + if err := dgb.build.driver.Query(ctx, query, args, rows); err != nil { return err } defer rows.Close() return sql.ScanSlice(rows, v) } -func (dgb *DecisionGroupBy) sqlQuery() *sql.Selector { - selector := dgb.sql.Select() - aggregation := make([]string, 0, len(dgb.fns)) - for _, fn := range dgb.fns { - aggregation = append(aggregation, fn(selector)) - } - // If no columns were selected in a custom aggregation function, the default - // selection is the fields used for "group-by", and the aggregation functions. - if len(selector.SelectedColumns()) == 0 { - columns := make([]string, 0, len(dgb.fields)+len(dgb.fns)) - for _, f := range dgb.fields { - columns = append(columns, selector.C(f)) - } - columns = append(columns, aggregation...) - selector.Select(columns...) - } - return selector.GroupBy(selector.Columns(dgb.fields...)...) -} - // DecisionSelect is the builder for selecting fields of Decision entities. type DecisionSelect struct { *DecisionQuery selector - // intermediate query (i.e. traversal path). - sql *sql.Selector +} + +// Aggregate adds the given aggregation functions to the selector query. +func (ds *DecisionSelect) Aggregate(fns ...AggregateFunc) *DecisionSelect { + ds.fns = append(ds.fns, fns...) + return ds } // Scan applies the selector query and scans the result into the given value. func (ds *DecisionSelect) Scan(ctx context.Context, v any) error { + ctx = setContextOp(ctx, ds.ctx, "Select") if err := ds.prepareQuery(ctx); err != nil { return err } - ds.sql = ds.DecisionQuery.sqlQuery(ctx) - return ds.sqlScan(ctx, v) + return scanWithInterceptors[*DecisionQuery, *DecisionSelect](ctx, ds.DecisionQuery, ds, ds.inters, v) } -func (ds *DecisionSelect) sqlScan(ctx context.Context, v any) error { +func (ds *DecisionSelect) sqlScan(ctx context.Context, root *DecisionQuery, v any) error { + selector := root.sqlQuery(ctx) + aggregation := make([]string, 0, len(ds.fns)) + for _, fn := range ds.fns { + aggregation = append(aggregation, fn(selector)) + } + switch n := len(*ds.selector.flds); { + case n == 0 && len(aggregation) > 0: + selector.Select(aggregation...) + case n != 0 && len(aggregation) > 0: + selector.AppendSelect(aggregation...) + } rows := &sql.Rows{} - query, args := ds.sql.Query() + query, args := selector.Query() if err := ds.driver.Query(ctx, query, args, rows); err != nil { return err } diff --git a/pkg/database/ent/decision_update.go b/pkg/database/ent/decision_update.go index 64b40871eca..1b62cc54c30 100644 --- a/pkg/database/ent/decision_update.go +++ b/pkg/database/ent/decision_update.go @@ -324,35 +324,8 @@ func (du *DecisionUpdate) ClearOwner() *DecisionUpdate { // Save executes the query and returns the number of nodes affected by the update operation. func (du *DecisionUpdate) Save(ctx context.Context) (int, error) { - var ( - err error - affected int - ) du.defaults() - if len(du.hooks) == 0 { - affected, err = du.sqlSave(ctx) - } else { - var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { - mutation, ok := m.(*DecisionMutation) - if !ok { - return nil, fmt.Errorf("unexpected mutation type %T", m) - } - du.mutation = mutation - affected, err = du.sqlSave(ctx) - mutation.done = true - return affected, err - }) - for i := len(du.hooks) - 1; i >= 0; i-- { - if du.hooks[i] == nil { - return 0, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)") - } - mut = du.hooks[i](mut) - } - if _, err := mut.Mutate(ctx, du.mutation); err != nil { - return 0, err - } - } - return affected, err + return withHooks(ctx, du.sqlSave, du.mutation, du.hooks) } // SaveX is like Save, but panics if an error occurs. @@ -390,16 +363,7 @@ func (du *DecisionUpdate) defaults() { } func (du *DecisionUpdate) sqlSave(ctx context.Context) (n int, err error) { - _spec := &sqlgraph.UpdateSpec{ - Node: &sqlgraph.NodeSpec{ - Table: decision.Table, - Columns: decision.Columns, - ID: &sqlgraph.FieldSpec{ - Type: field.TypeInt, - Column: decision.FieldID, - }, - }, - } + _spec := sqlgraph.NewUpdateSpec(decision.Table, decision.Columns, sqlgraph.NewFieldSpec(decision.FieldID, field.TypeInt)) if ps := du.mutation.predicates; len(ps) > 0 { _spec.Predicate = func(selector *sql.Selector) { for i := range ps { @@ -408,198 +372,91 @@ func (du *DecisionUpdate) sqlSave(ctx context.Context) (n int, err error) { } } if value, ok := du.mutation.CreatedAt(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeTime, - Value: value, - Column: decision.FieldCreatedAt, - }) + _spec.SetField(decision.FieldCreatedAt, field.TypeTime, value) } if du.mutation.CreatedAtCleared() { - _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ - Type: field.TypeTime, - Column: decision.FieldCreatedAt, - }) + _spec.ClearField(decision.FieldCreatedAt, field.TypeTime) } if value, ok := du.mutation.UpdatedAt(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeTime, - Value: value, - Column: decision.FieldUpdatedAt, - }) + _spec.SetField(decision.FieldUpdatedAt, field.TypeTime, value) } if du.mutation.UpdatedAtCleared() { - _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ - Type: field.TypeTime, - Column: decision.FieldUpdatedAt, - }) + _spec.ClearField(decision.FieldUpdatedAt, field.TypeTime) } if value, ok := du.mutation.Until(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeTime, - Value: value, - Column: decision.FieldUntil, - }) + _spec.SetField(decision.FieldUntil, field.TypeTime, value) } if du.mutation.UntilCleared() { - _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ - Type: field.TypeTime, - Column: decision.FieldUntil, - }) + _spec.ClearField(decision.FieldUntil, field.TypeTime) } if value, ok := du.mutation.Scenario(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: decision.FieldScenario, - }) + _spec.SetField(decision.FieldScenario, field.TypeString, value) } if value, ok := du.mutation.GetType(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: decision.FieldType, - }) + _spec.SetField(decision.FieldType, field.TypeString, value) } if value, ok := du.mutation.StartIP(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeInt64, - Value: value, - Column: decision.FieldStartIP, - }) + _spec.SetField(decision.FieldStartIP, field.TypeInt64, value) } if value, ok := du.mutation.AddedStartIP(); ok { - _spec.Fields.Add = append(_spec.Fields.Add, &sqlgraph.FieldSpec{ - Type: field.TypeInt64, - Value: value, - Column: decision.FieldStartIP, - }) + _spec.AddField(decision.FieldStartIP, field.TypeInt64, value) } if du.mutation.StartIPCleared() { - _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ - Type: field.TypeInt64, - Column: decision.FieldStartIP, - }) + _spec.ClearField(decision.FieldStartIP, field.TypeInt64) } if value, ok := du.mutation.EndIP(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeInt64, - Value: value, - Column: decision.FieldEndIP, - }) + _spec.SetField(decision.FieldEndIP, field.TypeInt64, value) } if value, ok := du.mutation.AddedEndIP(); ok { - _spec.Fields.Add = append(_spec.Fields.Add, &sqlgraph.FieldSpec{ - Type: field.TypeInt64, - Value: value, - Column: decision.FieldEndIP, - }) + _spec.AddField(decision.FieldEndIP, field.TypeInt64, value) } if du.mutation.EndIPCleared() { - _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ - Type: field.TypeInt64, - Column: decision.FieldEndIP, - }) + _spec.ClearField(decision.FieldEndIP, field.TypeInt64) } if value, ok := du.mutation.StartSuffix(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeInt64, - Value: value, - Column: decision.FieldStartSuffix, - }) + _spec.SetField(decision.FieldStartSuffix, field.TypeInt64, value) } if value, ok := du.mutation.AddedStartSuffix(); ok { - _spec.Fields.Add = append(_spec.Fields.Add, &sqlgraph.FieldSpec{ - Type: field.TypeInt64, - Value: value, - Column: decision.FieldStartSuffix, - }) + _spec.AddField(decision.FieldStartSuffix, field.TypeInt64, value) } if du.mutation.StartSuffixCleared() { - _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ - Type: field.TypeInt64, - Column: decision.FieldStartSuffix, - }) + _spec.ClearField(decision.FieldStartSuffix, field.TypeInt64) } if value, ok := du.mutation.EndSuffix(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeInt64, - Value: value, - Column: decision.FieldEndSuffix, - }) + _spec.SetField(decision.FieldEndSuffix, field.TypeInt64, value) } if value, ok := du.mutation.AddedEndSuffix(); ok { - _spec.Fields.Add = append(_spec.Fields.Add, &sqlgraph.FieldSpec{ - Type: field.TypeInt64, - Value: value, - Column: decision.FieldEndSuffix, - }) + _spec.AddField(decision.FieldEndSuffix, field.TypeInt64, value) } if du.mutation.EndSuffixCleared() { - _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ - Type: field.TypeInt64, - Column: decision.FieldEndSuffix, - }) + _spec.ClearField(decision.FieldEndSuffix, field.TypeInt64) } if value, ok := du.mutation.IPSize(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeInt64, - Value: value, - Column: decision.FieldIPSize, - }) + _spec.SetField(decision.FieldIPSize, field.TypeInt64, value) } if value, ok := du.mutation.AddedIPSize(); ok { - _spec.Fields.Add = append(_spec.Fields.Add, &sqlgraph.FieldSpec{ - Type: field.TypeInt64, - Value: value, - Column: decision.FieldIPSize, - }) + _spec.AddField(decision.FieldIPSize, field.TypeInt64, value) } if du.mutation.IPSizeCleared() { - _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ - Type: field.TypeInt64, - Column: decision.FieldIPSize, - }) + _spec.ClearField(decision.FieldIPSize, field.TypeInt64) } if value, ok := du.mutation.Scope(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: decision.FieldScope, - }) + _spec.SetField(decision.FieldScope, field.TypeString, value) } if value, ok := du.mutation.Value(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: decision.FieldValue, - }) + _spec.SetField(decision.FieldValue, field.TypeString, value) } if value, ok := du.mutation.Origin(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: decision.FieldOrigin, - }) + _spec.SetField(decision.FieldOrigin, field.TypeString, value) } if value, ok := du.mutation.Simulated(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeBool, - Value: value, - Column: decision.FieldSimulated, - }) + _spec.SetField(decision.FieldSimulated, field.TypeBool, value) } if value, ok := du.mutation.UUID(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: decision.FieldUUID, - }) + _spec.SetField(decision.FieldUUID, field.TypeString, value) } if du.mutation.UUIDCleared() { - _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Column: decision.FieldUUID, - }) + _spec.ClearField(decision.FieldUUID, field.TypeString) } if du.mutation.OwnerCleared() { edge := &sqlgraph.EdgeSpec{ @@ -609,10 +466,7 @@ func (du *DecisionUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{decision.OwnerColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeInt, - Column: alert.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(alert.FieldID, field.TypeInt), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -625,10 +479,7 @@ func (du *DecisionUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{decision.OwnerColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeInt, - Column: alert.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(alert.FieldID, field.TypeInt), }, } for _, k := range nodes { @@ -644,6 +495,7 @@ func (du *DecisionUpdate) sqlSave(ctx context.Context) (n int, err error) { } return 0, err } + du.mutation.done = true return n, nil } @@ -948,6 +800,12 @@ func (duo *DecisionUpdateOne) ClearOwner() *DecisionUpdateOne { return duo } +// Where appends a list predicates to the DecisionUpdate builder. +func (duo *DecisionUpdateOne) Where(ps ...predicate.Decision) *DecisionUpdateOne { + duo.mutation.Where(ps...) + return duo +} + // Select allows selecting one or more fields (columns) of the returned entity. // The default is selecting all fields defined in the entity schema. func (duo *DecisionUpdateOne) Select(field string, fields ...string) *DecisionUpdateOne { @@ -957,41 +815,8 @@ func (duo *DecisionUpdateOne) Select(field string, fields ...string) *DecisionUp // Save executes the query and returns the updated Decision entity. func (duo *DecisionUpdateOne) Save(ctx context.Context) (*Decision, error) { - var ( - err error - node *Decision - ) duo.defaults() - if len(duo.hooks) == 0 { - node, err = duo.sqlSave(ctx) - } else { - var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { - mutation, ok := m.(*DecisionMutation) - if !ok { - return nil, fmt.Errorf("unexpected mutation type %T", m) - } - duo.mutation = mutation - node, err = duo.sqlSave(ctx) - mutation.done = true - return node, err - }) - for i := len(duo.hooks) - 1; i >= 0; i-- { - if duo.hooks[i] == nil { - return nil, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)") - } - mut = duo.hooks[i](mut) - } - v, err := mut.Mutate(ctx, duo.mutation) - if err != nil { - return nil, err - } - nv, ok := v.(*Decision) - if !ok { - return nil, fmt.Errorf("unexpected node type %T returned from DecisionMutation", v) - } - node = nv - } - return node, err + return withHooks(ctx, duo.sqlSave, duo.mutation, duo.hooks) } // SaveX is like Save, but panics if an error occurs. @@ -1029,16 +854,7 @@ func (duo *DecisionUpdateOne) defaults() { } func (duo *DecisionUpdateOne) sqlSave(ctx context.Context) (_node *Decision, err error) { - _spec := &sqlgraph.UpdateSpec{ - Node: &sqlgraph.NodeSpec{ - Table: decision.Table, - Columns: decision.Columns, - ID: &sqlgraph.FieldSpec{ - Type: field.TypeInt, - Column: decision.FieldID, - }, - }, - } + _spec := sqlgraph.NewUpdateSpec(decision.Table, decision.Columns, sqlgraph.NewFieldSpec(decision.FieldID, field.TypeInt)) id, ok := duo.mutation.ID() if !ok { return nil, &ValidationError{Name: "id", err: errors.New(`ent: missing "Decision.id" for update`)} @@ -1064,198 +880,91 @@ func (duo *DecisionUpdateOne) sqlSave(ctx context.Context) (_node *Decision, err } } if value, ok := duo.mutation.CreatedAt(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeTime, - Value: value, - Column: decision.FieldCreatedAt, - }) + _spec.SetField(decision.FieldCreatedAt, field.TypeTime, value) } if duo.mutation.CreatedAtCleared() { - _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ - Type: field.TypeTime, - Column: decision.FieldCreatedAt, - }) + _spec.ClearField(decision.FieldCreatedAt, field.TypeTime) } if value, ok := duo.mutation.UpdatedAt(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeTime, - Value: value, - Column: decision.FieldUpdatedAt, - }) + _spec.SetField(decision.FieldUpdatedAt, field.TypeTime, value) } if duo.mutation.UpdatedAtCleared() { - _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ - Type: field.TypeTime, - Column: decision.FieldUpdatedAt, - }) + _spec.ClearField(decision.FieldUpdatedAt, field.TypeTime) } if value, ok := duo.mutation.Until(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeTime, - Value: value, - Column: decision.FieldUntil, - }) + _spec.SetField(decision.FieldUntil, field.TypeTime, value) } if duo.mutation.UntilCleared() { - _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ - Type: field.TypeTime, - Column: decision.FieldUntil, - }) + _spec.ClearField(decision.FieldUntil, field.TypeTime) } if value, ok := duo.mutation.Scenario(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: decision.FieldScenario, - }) + _spec.SetField(decision.FieldScenario, field.TypeString, value) } if value, ok := duo.mutation.GetType(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: decision.FieldType, - }) + _spec.SetField(decision.FieldType, field.TypeString, value) } if value, ok := duo.mutation.StartIP(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeInt64, - Value: value, - Column: decision.FieldStartIP, - }) + _spec.SetField(decision.FieldStartIP, field.TypeInt64, value) } if value, ok := duo.mutation.AddedStartIP(); ok { - _spec.Fields.Add = append(_spec.Fields.Add, &sqlgraph.FieldSpec{ - Type: field.TypeInt64, - Value: value, - Column: decision.FieldStartIP, - }) + _spec.AddField(decision.FieldStartIP, field.TypeInt64, value) } if duo.mutation.StartIPCleared() { - _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ - Type: field.TypeInt64, - Column: decision.FieldStartIP, - }) + _spec.ClearField(decision.FieldStartIP, field.TypeInt64) } if value, ok := duo.mutation.EndIP(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeInt64, - Value: value, - Column: decision.FieldEndIP, - }) + _spec.SetField(decision.FieldEndIP, field.TypeInt64, value) } if value, ok := duo.mutation.AddedEndIP(); ok { - _spec.Fields.Add = append(_spec.Fields.Add, &sqlgraph.FieldSpec{ - Type: field.TypeInt64, - Value: value, - Column: decision.FieldEndIP, - }) + _spec.AddField(decision.FieldEndIP, field.TypeInt64, value) } if duo.mutation.EndIPCleared() { - _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ - Type: field.TypeInt64, - Column: decision.FieldEndIP, - }) + _spec.ClearField(decision.FieldEndIP, field.TypeInt64) } if value, ok := duo.mutation.StartSuffix(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeInt64, - Value: value, - Column: decision.FieldStartSuffix, - }) + _spec.SetField(decision.FieldStartSuffix, field.TypeInt64, value) } if value, ok := duo.mutation.AddedStartSuffix(); ok { - _spec.Fields.Add = append(_spec.Fields.Add, &sqlgraph.FieldSpec{ - Type: field.TypeInt64, - Value: value, - Column: decision.FieldStartSuffix, - }) + _spec.AddField(decision.FieldStartSuffix, field.TypeInt64, value) } if duo.mutation.StartSuffixCleared() { - _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ - Type: field.TypeInt64, - Column: decision.FieldStartSuffix, - }) + _spec.ClearField(decision.FieldStartSuffix, field.TypeInt64) } if value, ok := duo.mutation.EndSuffix(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeInt64, - Value: value, - Column: decision.FieldEndSuffix, - }) + _spec.SetField(decision.FieldEndSuffix, field.TypeInt64, value) } if value, ok := duo.mutation.AddedEndSuffix(); ok { - _spec.Fields.Add = append(_spec.Fields.Add, &sqlgraph.FieldSpec{ - Type: field.TypeInt64, - Value: value, - Column: decision.FieldEndSuffix, - }) + _spec.AddField(decision.FieldEndSuffix, field.TypeInt64, value) } if duo.mutation.EndSuffixCleared() { - _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ - Type: field.TypeInt64, - Column: decision.FieldEndSuffix, - }) + _spec.ClearField(decision.FieldEndSuffix, field.TypeInt64) } if value, ok := duo.mutation.IPSize(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeInt64, - Value: value, - Column: decision.FieldIPSize, - }) + _spec.SetField(decision.FieldIPSize, field.TypeInt64, value) } if value, ok := duo.mutation.AddedIPSize(); ok { - _spec.Fields.Add = append(_spec.Fields.Add, &sqlgraph.FieldSpec{ - Type: field.TypeInt64, - Value: value, - Column: decision.FieldIPSize, - }) + _spec.AddField(decision.FieldIPSize, field.TypeInt64, value) } if duo.mutation.IPSizeCleared() { - _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ - Type: field.TypeInt64, - Column: decision.FieldIPSize, - }) + _spec.ClearField(decision.FieldIPSize, field.TypeInt64) } if value, ok := duo.mutation.Scope(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: decision.FieldScope, - }) + _spec.SetField(decision.FieldScope, field.TypeString, value) } if value, ok := duo.mutation.Value(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: decision.FieldValue, - }) + _spec.SetField(decision.FieldValue, field.TypeString, value) } if value, ok := duo.mutation.Origin(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: decision.FieldOrigin, - }) + _spec.SetField(decision.FieldOrigin, field.TypeString, value) } if value, ok := duo.mutation.Simulated(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeBool, - Value: value, - Column: decision.FieldSimulated, - }) + _spec.SetField(decision.FieldSimulated, field.TypeBool, value) } if value, ok := duo.mutation.UUID(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: decision.FieldUUID, - }) + _spec.SetField(decision.FieldUUID, field.TypeString, value) } if duo.mutation.UUIDCleared() { - _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Column: decision.FieldUUID, - }) + _spec.ClearField(decision.FieldUUID, field.TypeString) } if duo.mutation.OwnerCleared() { edge := &sqlgraph.EdgeSpec{ @@ -1265,10 +974,7 @@ func (duo *DecisionUpdateOne) sqlSave(ctx context.Context) (_node *Decision, err Columns: []string{decision.OwnerColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeInt, - Column: alert.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(alert.FieldID, field.TypeInt), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -1281,10 +987,7 @@ func (duo *DecisionUpdateOne) sqlSave(ctx context.Context) (_node *Decision, err Columns: []string{decision.OwnerColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeInt, - Column: alert.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(alert.FieldID, field.TypeInt), }, } for _, k := range nodes { @@ -1303,5 +1006,6 @@ func (duo *DecisionUpdateOne) sqlSave(ctx context.Context) (_node *Decision, err } return nil, err } + duo.mutation.done = true return _node, nil } diff --git a/pkg/database/ent/ent.go b/pkg/database/ent/ent.go index 0455af444d2..393ce9f1869 100644 --- a/pkg/database/ent/ent.go +++ b/pkg/database/ent/ent.go @@ -6,6 +6,8 @@ import ( "context" "errors" "fmt" + "reflect" + "sync" "entgo.io/ent" "entgo.io/ent/dialect/sql" @@ -21,50 +23,79 @@ import ( // ent aliases to avoid import conflicts in user's code. type ( - Op = ent.Op - Hook = ent.Hook - Value = ent.Value - Query = ent.Query - Policy = ent.Policy - Mutator = ent.Mutator - Mutation = ent.Mutation - MutateFunc = ent.MutateFunc + Op = ent.Op + Hook = ent.Hook + Value = ent.Value + Query = ent.Query + QueryContext = ent.QueryContext + Querier = ent.Querier + QuerierFunc = ent.QuerierFunc + Interceptor = ent.Interceptor + InterceptFunc = ent.InterceptFunc + Traverser = ent.Traverser + TraverseFunc = ent.TraverseFunc + Policy = ent.Policy + Mutator = ent.Mutator + Mutation = ent.Mutation + MutateFunc = ent.MutateFunc ) +type clientCtxKey struct{} + +// FromContext returns a Client stored inside a context, or nil if there isn't one. +func FromContext(ctx context.Context) *Client { + c, _ := ctx.Value(clientCtxKey{}).(*Client) + return c +} + +// NewContext returns a new context with the given Client attached. +func NewContext(parent context.Context, c *Client) context.Context { + return context.WithValue(parent, clientCtxKey{}, c) +} + +type txCtxKey struct{} + +// TxFromContext returns a Tx stored inside a context, or nil if there isn't one. +func TxFromContext(ctx context.Context) *Tx { + tx, _ := ctx.Value(txCtxKey{}).(*Tx) + return tx +} + +// NewTxContext returns a new context with the given Tx attached. +func NewTxContext(parent context.Context, tx *Tx) context.Context { + return context.WithValue(parent, txCtxKey{}, tx) +} + // OrderFunc applies an ordering on the sql selector. +// Deprecated: Use Asc/Desc functions or the package builders instead. type OrderFunc func(*sql.Selector) -// columnChecker returns a function indicates if the column exists in the given column. -func columnChecker(table string) func(string) error { - checks := map[string]func(string) bool{ - alert.Table: alert.ValidColumn, - bouncer.Table: bouncer.ValidColumn, - configitem.Table: configitem.ValidColumn, - decision.Table: decision.ValidColumn, - event.Table: event.ValidColumn, - machine.Table: machine.ValidColumn, - meta.Table: meta.ValidColumn, - } - check, ok := checks[table] - if !ok { - return func(string) error { - return fmt.Errorf("unknown table %q", table) - } - } - return func(column string) error { - if !check(column) { - return fmt.Errorf("unknown column %q for table %q", column, table) - } - return nil - } +var ( + initCheck sync.Once + columnCheck sql.ColumnCheck +) + +// columnChecker checks if the column exists in the given table. +func checkColumn(table, column string) error { + initCheck.Do(func() { + columnCheck = sql.NewColumnCheck(map[string]func(string) bool{ + alert.Table: alert.ValidColumn, + bouncer.Table: bouncer.ValidColumn, + configitem.Table: configitem.ValidColumn, + decision.Table: decision.ValidColumn, + event.Table: event.ValidColumn, + machine.Table: machine.ValidColumn, + meta.Table: meta.ValidColumn, + }) + }) + return columnCheck(table, column) } // Asc applies the given fields in ASC order. -func Asc(fields ...string) OrderFunc { +func Asc(fields ...string) func(*sql.Selector) { return func(s *sql.Selector) { - check := columnChecker(s.TableName()) for _, f := range fields { - if err := check(f); err != nil { + if err := checkColumn(s.TableName(), f); err != nil { s.AddError(&ValidationError{Name: f, err: fmt.Errorf("ent: %w", err)}) } s.OrderBy(sql.Asc(s.C(f))) @@ -73,11 +104,10 @@ func Asc(fields ...string) OrderFunc { } // Desc applies the given fields in DESC order. -func Desc(fields ...string) OrderFunc { +func Desc(fields ...string) func(*sql.Selector) { return func(s *sql.Selector) { - check := columnChecker(s.TableName()) for _, f := range fields { - if err := check(f); err != nil { + if err := checkColumn(s.TableName(), f); err != nil { s.AddError(&ValidationError{Name: f, err: fmt.Errorf("ent: %w", err)}) } s.OrderBy(sql.Desc(s.C(f))) @@ -109,8 +139,7 @@ func Count() AggregateFunc { // Max applies the "max" aggregation function on the given field of each group. func Max(field string) AggregateFunc { return func(s *sql.Selector) string { - check := columnChecker(s.TableName()) - if err := check(field); err != nil { + if err := checkColumn(s.TableName(), field); err != nil { s.AddError(&ValidationError{Name: field, err: fmt.Errorf("ent: %w", err)}) return "" } @@ -121,8 +150,7 @@ func Max(field string) AggregateFunc { // Mean applies the "mean" aggregation function on the given field of each group. func Mean(field string) AggregateFunc { return func(s *sql.Selector) string { - check := columnChecker(s.TableName()) - if err := check(field); err != nil { + if err := checkColumn(s.TableName(), field); err != nil { s.AddError(&ValidationError{Name: field, err: fmt.Errorf("ent: %w", err)}) return "" } @@ -133,8 +161,7 @@ func Mean(field string) AggregateFunc { // Min applies the "min" aggregation function on the given field of each group. func Min(field string) AggregateFunc { return func(s *sql.Selector) string { - check := columnChecker(s.TableName()) - if err := check(field); err != nil { + if err := checkColumn(s.TableName(), field); err != nil { s.AddError(&ValidationError{Name: field, err: fmt.Errorf("ent: %w", err)}) return "" } @@ -145,8 +172,7 @@ func Min(field string) AggregateFunc { // Sum applies the "sum" aggregation function on the given field of each group. func Sum(field string) AggregateFunc { return func(s *sql.Selector) string { - check := columnChecker(s.TableName()) - if err := check(field); err != nil { + if err := checkColumn(s.TableName(), field); err != nil { s.AddError(&ValidationError{Name: field, err: fmt.Errorf("ent: %w", err)}) return "" } @@ -275,6 +301,7 @@ func IsConstraintError(err error) bool { type selector struct { label string flds *[]string + fns []AggregateFunc scan func(context.Context, any) error } @@ -473,5 +500,121 @@ func (s *selector) BoolX(ctx context.Context) bool { return v } +// withHooks invokes the builder operation with the given hooks, if any. +func withHooks[V Value, M any, PM interface { + *M + Mutation +}](ctx context.Context, exec func(context.Context) (V, error), mutation PM, hooks []Hook) (value V, err error) { + if len(hooks) == 0 { + return exec(ctx) + } + var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { + mutationT, ok := any(m).(PM) + if !ok { + return nil, fmt.Errorf("unexpected mutation type %T", m) + } + // Set the mutation to the builder. + *mutation = *mutationT + return exec(ctx) + }) + for i := len(hooks) - 1; i >= 0; i-- { + if hooks[i] == nil { + return value, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)") + } + mut = hooks[i](mut) + } + v, err := mut.Mutate(ctx, mutation) + if err != nil { + return value, err + } + nv, ok := v.(V) + if !ok { + return value, fmt.Errorf("unexpected node type %T returned from %T", v, mutation) + } + return nv, nil +} + +// setContextOp returns a new context with the given QueryContext attached (including its op) in case it does not exist. +func setContextOp(ctx context.Context, qc *QueryContext, op string) context.Context { + if ent.QueryFromContext(ctx) == nil { + qc.Op = op + ctx = ent.NewQueryContext(ctx, qc) + } + return ctx +} + +func querierAll[V Value, Q interface { + sqlAll(context.Context, ...queryHook) (V, error) +}]() Querier { + return QuerierFunc(func(ctx context.Context, q Query) (Value, error) { + query, ok := q.(Q) + if !ok { + return nil, fmt.Errorf("unexpected query type %T", q) + } + return query.sqlAll(ctx) + }) +} + +func querierCount[Q interface { + sqlCount(context.Context) (int, error) +}]() Querier { + return QuerierFunc(func(ctx context.Context, q Query) (Value, error) { + query, ok := q.(Q) + if !ok { + return nil, fmt.Errorf("unexpected query type %T", q) + } + return query.sqlCount(ctx) + }) +} + +func withInterceptors[V Value](ctx context.Context, q Query, qr Querier, inters []Interceptor) (v V, err error) { + for i := len(inters) - 1; i >= 0; i-- { + qr = inters[i].Intercept(qr) + } + rv, err := qr.Query(ctx, q) + if err != nil { + return v, err + } + vt, ok := rv.(V) + if !ok { + return v, fmt.Errorf("unexpected type %T returned from %T. expected type: %T", vt, q, v) + } + return vt, nil +} + +func scanWithInterceptors[Q1 ent.Query, Q2 interface { + sqlScan(context.Context, Q1, any) error +}](ctx context.Context, rootQuery Q1, selectOrGroup Q2, inters []Interceptor, v any) error { + rv := reflect.ValueOf(v) + var qr Querier = QuerierFunc(func(ctx context.Context, q Query) (Value, error) { + query, ok := q.(Q1) + if !ok { + return nil, fmt.Errorf("unexpected query type %T", q) + } + if err := selectOrGroup.sqlScan(ctx, query, v); err != nil { + return nil, err + } + if k := rv.Kind(); k == reflect.Pointer && rv.Elem().CanInterface() { + return rv.Elem().Interface(), nil + } + return v, nil + }) + for i := len(inters) - 1; i >= 0; i-- { + qr = inters[i].Intercept(qr) + } + vv, err := qr.Query(ctx, rootQuery) + if err != nil { + return err + } + switch rv2 := reflect.ValueOf(vv); { + case rv.IsNil(), rv2.IsNil(), rv.Kind() != reflect.Pointer: + case rv.Type() == rv2.Type(): + rv.Elem().Set(rv2.Elem()) + case rv.Elem().Type() == rv2.Type(): + rv.Elem().Set(rv2) + } + return nil +} + // queryHook describes an internal hook for the different sqlAll methods. type queryHook func(context.Context, *sqlgraph.QuerySpec) diff --git a/pkg/database/ent/event.go b/pkg/database/ent/event.go index 4754107fddc..df4a2d10c8b 100644 --- a/pkg/database/ent/event.go +++ b/pkg/database/ent/event.go @@ -7,6 +7,7 @@ import ( "strings" "time" + "entgo.io/ent" "entgo.io/ent/dialect/sql" "github.com/crowdsecurity/crowdsec/pkg/database/ent/alert" "github.com/crowdsecurity/crowdsec/pkg/database/ent/event" @@ -29,7 +30,8 @@ type Event struct { AlertEvents int `json:"alert_events,omitempty"` // Edges holds the relations/edges for other nodes in the graph. // The values are being populated by the EventQuery when eager-loading is set. - Edges EventEdges `json:"edges"` + Edges EventEdges `json:"edges"` + selectValues sql.SelectValues } // EventEdges holds the relations/edges for other nodes in the graph. @@ -66,7 +68,7 @@ func (*Event) scanValues(columns []string) ([]any, error) { case event.FieldCreatedAt, event.FieldUpdatedAt, event.FieldTime: values[i] = new(sql.NullTime) default: - return nil, fmt.Errorf("unexpected column %q for type Event", columns[i]) + values[i] = new(sql.UnknownType) } } return values, nil @@ -118,21 +120,29 @@ func (e *Event) assignValues(columns []string, values []any) error { } else if value.Valid { e.AlertEvents = int(value.Int64) } + default: + e.selectValues.Set(columns[i], values[i]) } } return nil } +// Value returns the ent.Value that was dynamically selected and assigned to the Event. +// This includes values selected through modifiers, order, etc. +func (e *Event) Value(name string) (ent.Value, error) { + return e.selectValues.Get(name) +} + // QueryOwner queries the "owner" edge of the Event entity. func (e *Event) QueryOwner() *AlertQuery { - return (&EventClient{config: e.config}).QueryOwner(e) + return NewEventClient(e.config).QueryOwner(e) } // Update returns a builder for updating this Event. // Note that you need to call Event.Unwrap() before calling this method if this Event // was returned from a transaction, and the transaction was committed or rolled back. func (e *Event) Update() *EventUpdateOne { - return (&EventClient{config: e.config}).UpdateOne(e) + return NewEventClient(e.config).UpdateOne(e) } // Unwrap unwraps the Event entity that was returned from a transaction after it was closed, @@ -175,9 +185,3 @@ func (e *Event) String() string { // Events is a parsable slice of Event. type Events []*Event - -func (e Events) config(cfg config) { - for _i := range e { - e[_i].config = cfg - } -} diff --git a/pkg/database/ent/event/event.go b/pkg/database/ent/event/event.go index 33b9b67f8b9..48f5a355824 100644 --- a/pkg/database/ent/event/event.go +++ b/pkg/database/ent/event/event.go @@ -4,6 +4,9 @@ package event import ( "time" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" ) const ( @@ -66,3 +69,50 @@ var ( // SerializedValidator is a validator for the "serialized" field. It is called by the builders before save. SerializedValidator func(string) error ) + +// OrderOption defines the ordering options for the Event queries. +type OrderOption func(*sql.Selector) + +// ByID orders the results by the id field. +func ByID(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldID, opts...).ToFunc() +} + +// ByCreatedAt orders the results by the created_at field. +func ByCreatedAt(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldCreatedAt, opts...).ToFunc() +} + +// ByUpdatedAt orders the results by the updated_at field. +func ByUpdatedAt(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldUpdatedAt, opts...).ToFunc() +} + +// ByTime orders the results by the time field. +func ByTime(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldTime, opts...).ToFunc() +} + +// BySerialized orders the results by the serialized field. +func BySerialized(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldSerialized, opts...).ToFunc() +} + +// ByAlertEvents orders the results by the alert_events field. +func ByAlertEvents(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldAlertEvents, opts...).ToFunc() +} + +// ByOwnerField orders the results by owner field. +func ByOwnerField(field string, opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newOwnerStep(), sql.OrderByField(field, opts...)) + } +} +func newOwnerStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(OwnerInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.M2O, true, OwnerTable, OwnerColumn), + ) +} diff --git a/pkg/database/ent/event/where.go b/pkg/database/ent/event/where.go index 7554e59e678..238bea988bd 100644 --- a/pkg/database/ent/event/where.go +++ b/pkg/database/ent/event/where.go @@ -12,477 +12,307 @@ import ( // ID filters vertices based on their ID field. func ID(id int) predicate.Event { - return predicate.Event(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldID), id)) - }) + return predicate.Event(sql.FieldEQ(FieldID, id)) } // IDEQ applies the EQ predicate on the ID field. func IDEQ(id int) predicate.Event { - return predicate.Event(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldID), id)) - }) + return predicate.Event(sql.FieldEQ(FieldID, id)) } // IDNEQ applies the NEQ predicate on the ID field. func IDNEQ(id int) predicate.Event { - return predicate.Event(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldID), id)) - }) + return predicate.Event(sql.FieldNEQ(FieldID, id)) } // IDIn applies the In predicate on the ID field. func IDIn(ids ...int) predicate.Event { - return predicate.Event(func(s *sql.Selector) { - v := make([]any, len(ids)) - for i := range v { - v[i] = ids[i] - } - s.Where(sql.In(s.C(FieldID), v...)) - }) + return predicate.Event(sql.FieldIn(FieldID, ids...)) } // IDNotIn applies the NotIn predicate on the ID field. func IDNotIn(ids ...int) predicate.Event { - return predicate.Event(func(s *sql.Selector) { - v := make([]any, len(ids)) - for i := range v { - v[i] = ids[i] - } - s.Where(sql.NotIn(s.C(FieldID), v...)) - }) + return predicate.Event(sql.FieldNotIn(FieldID, ids...)) } // IDGT applies the GT predicate on the ID field. func IDGT(id int) predicate.Event { - return predicate.Event(func(s *sql.Selector) { - s.Where(sql.GT(s.C(FieldID), id)) - }) + return predicate.Event(sql.FieldGT(FieldID, id)) } // IDGTE applies the GTE predicate on the ID field. func IDGTE(id int) predicate.Event { - return predicate.Event(func(s *sql.Selector) { - s.Where(sql.GTE(s.C(FieldID), id)) - }) + return predicate.Event(sql.FieldGTE(FieldID, id)) } // IDLT applies the LT predicate on the ID field. func IDLT(id int) predicate.Event { - return predicate.Event(func(s *sql.Selector) { - s.Where(sql.LT(s.C(FieldID), id)) - }) + return predicate.Event(sql.FieldLT(FieldID, id)) } // IDLTE applies the LTE predicate on the ID field. func IDLTE(id int) predicate.Event { - return predicate.Event(func(s *sql.Selector) { - s.Where(sql.LTE(s.C(FieldID), id)) - }) + return predicate.Event(sql.FieldLTE(FieldID, id)) } // CreatedAt applies equality check predicate on the "created_at" field. It's identical to CreatedAtEQ. func CreatedAt(v time.Time) predicate.Event { - return predicate.Event(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldCreatedAt), v)) - }) + return predicate.Event(sql.FieldEQ(FieldCreatedAt, v)) } // UpdatedAt applies equality check predicate on the "updated_at" field. It's identical to UpdatedAtEQ. func UpdatedAt(v time.Time) predicate.Event { - return predicate.Event(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldUpdatedAt), v)) - }) + return predicate.Event(sql.FieldEQ(FieldUpdatedAt, v)) } // Time applies equality check predicate on the "time" field. It's identical to TimeEQ. func Time(v time.Time) predicate.Event { - return predicate.Event(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldTime), v)) - }) + return predicate.Event(sql.FieldEQ(FieldTime, v)) } // Serialized applies equality check predicate on the "serialized" field. It's identical to SerializedEQ. func Serialized(v string) predicate.Event { - return predicate.Event(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldSerialized), v)) - }) + return predicate.Event(sql.FieldEQ(FieldSerialized, v)) } // AlertEvents applies equality check predicate on the "alert_events" field. It's identical to AlertEventsEQ. func AlertEvents(v int) predicate.Event { - return predicate.Event(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldAlertEvents), v)) - }) + return predicate.Event(sql.FieldEQ(FieldAlertEvents, v)) } // CreatedAtEQ applies the EQ predicate on the "created_at" field. func CreatedAtEQ(v time.Time) predicate.Event { - return predicate.Event(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldCreatedAt), v)) - }) + return predicate.Event(sql.FieldEQ(FieldCreatedAt, v)) } // CreatedAtNEQ applies the NEQ predicate on the "created_at" field. func CreatedAtNEQ(v time.Time) predicate.Event { - return predicate.Event(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldCreatedAt), v)) - }) + return predicate.Event(sql.FieldNEQ(FieldCreatedAt, v)) } // CreatedAtIn applies the In predicate on the "created_at" field. func CreatedAtIn(vs ...time.Time) predicate.Event { - v := make([]any, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Event(func(s *sql.Selector) { - s.Where(sql.In(s.C(FieldCreatedAt), v...)) - }) + return predicate.Event(sql.FieldIn(FieldCreatedAt, vs...)) } // CreatedAtNotIn applies the NotIn predicate on the "created_at" field. func CreatedAtNotIn(vs ...time.Time) predicate.Event { - v := make([]any, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Event(func(s *sql.Selector) { - s.Where(sql.NotIn(s.C(FieldCreatedAt), v...)) - }) + return predicate.Event(sql.FieldNotIn(FieldCreatedAt, vs...)) } // CreatedAtGT applies the GT predicate on the "created_at" field. func CreatedAtGT(v time.Time) predicate.Event { - return predicate.Event(func(s *sql.Selector) { - s.Where(sql.GT(s.C(FieldCreatedAt), v)) - }) + return predicate.Event(sql.FieldGT(FieldCreatedAt, v)) } // CreatedAtGTE applies the GTE predicate on the "created_at" field. func CreatedAtGTE(v time.Time) predicate.Event { - return predicate.Event(func(s *sql.Selector) { - s.Where(sql.GTE(s.C(FieldCreatedAt), v)) - }) + return predicate.Event(sql.FieldGTE(FieldCreatedAt, v)) } // CreatedAtLT applies the LT predicate on the "created_at" field. func CreatedAtLT(v time.Time) predicate.Event { - return predicate.Event(func(s *sql.Selector) { - s.Where(sql.LT(s.C(FieldCreatedAt), v)) - }) + return predicate.Event(sql.FieldLT(FieldCreatedAt, v)) } // CreatedAtLTE applies the LTE predicate on the "created_at" field. func CreatedAtLTE(v time.Time) predicate.Event { - return predicate.Event(func(s *sql.Selector) { - s.Where(sql.LTE(s.C(FieldCreatedAt), v)) - }) + return predicate.Event(sql.FieldLTE(FieldCreatedAt, v)) } // CreatedAtIsNil applies the IsNil predicate on the "created_at" field. func CreatedAtIsNil() predicate.Event { - return predicate.Event(func(s *sql.Selector) { - s.Where(sql.IsNull(s.C(FieldCreatedAt))) - }) + return predicate.Event(sql.FieldIsNull(FieldCreatedAt)) } // CreatedAtNotNil applies the NotNil predicate on the "created_at" field. func CreatedAtNotNil() predicate.Event { - return predicate.Event(func(s *sql.Selector) { - s.Where(sql.NotNull(s.C(FieldCreatedAt))) - }) + return predicate.Event(sql.FieldNotNull(FieldCreatedAt)) } // UpdatedAtEQ applies the EQ predicate on the "updated_at" field. func UpdatedAtEQ(v time.Time) predicate.Event { - return predicate.Event(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldUpdatedAt), v)) - }) + return predicate.Event(sql.FieldEQ(FieldUpdatedAt, v)) } // UpdatedAtNEQ applies the NEQ predicate on the "updated_at" field. func UpdatedAtNEQ(v time.Time) predicate.Event { - return predicate.Event(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldUpdatedAt), v)) - }) + return predicate.Event(sql.FieldNEQ(FieldUpdatedAt, v)) } // UpdatedAtIn applies the In predicate on the "updated_at" field. func UpdatedAtIn(vs ...time.Time) predicate.Event { - v := make([]any, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Event(func(s *sql.Selector) { - s.Where(sql.In(s.C(FieldUpdatedAt), v...)) - }) + return predicate.Event(sql.FieldIn(FieldUpdatedAt, vs...)) } // UpdatedAtNotIn applies the NotIn predicate on the "updated_at" field. func UpdatedAtNotIn(vs ...time.Time) predicate.Event { - v := make([]any, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Event(func(s *sql.Selector) { - s.Where(sql.NotIn(s.C(FieldUpdatedAt), v...)) - }) + return predicate.Event(sql.FieldNotIn(FieldUpdatedAt, vs...)) } // UpdatedAtGT applies the GT predicate on the "updated_at" field. func UpdatedAtGT(v time.Time) predicate.Event { - return predicate.Event(func(s *sql.Selector) { - s.Where(sql.GT(s.C(FieldUpdatedAt), v)) - }) + return predicate.Event(sql.FieldGT(FieldUpdatedAt, v)) } // UpdatedAtGTE applies the GTE predicate on the "updated_at" field. func UpdatedAtGTE(v time.Time) predicate.Event { - return predicate.Event(func(s *sql.Selector) { - s.Where(sql.GTE(s.C(FieldUpdatedAt), v)) - }) + return predicate.Event(sql.FieldGTE(FieldUpdatedAt, v)) } // UpdatedAtLT applies the LT predicate on the "updated_at" field. func UpdatedAtLT(v time.Time) predicate.Event { - return predicate.Event(func(s *sql.Selector) { - s.Where(sql.LT(s.C(FieldUpdatedAt), v)) - }) + return predicate.Event(sql.FieldLT(FieldUpdatedAt, v)) } // UpdatedAtLTE applies the LTE predicate on the "updated_at" field. func UpdatedAtLTE(v time.Time) predicate.Event { - return predicate.Event(func(s *sql.Selector) { - s.Where(sql.LTE(s.C(FieldUpdatedAt), v)) - }) + return predicate.Event(sql.FieldLTE(FieldUpdatedAt, v)) } // UpdatedAtIsNil applies the IsNil predicate on the "updated_at" field. func UpdatedAtIsNil() predicate.Event { - return predicate.Event(func(s *sql.Selector) { - s.Where(sql.IsNull(s.C(FieldUpdatedAt))) - }) + return predicate.Event(sql.FieldIsNull(FieldUpdatedAt)) } // UpdatedAtNotNil applies the NotNil predicate on the "updated_at" field. func UpdatedAtNotNil() predicate.Event { - return predicate.Event(func(s *sql.Selector) { - s.Where(sql.NotNull(s.C(FieldUpdatedAt))) - }) + return predicate.Event(sql.FieldNotNull(FieldUpdatedAt)) } // TimeEQ applies the EQ predicate on the "time" field. func TimeEQ(v time.Time) predicate.Event { - return predicate.Event(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldTime), v)) - }) + return predicate.Event(sql.FieldEQ(FieldTime, v)) } // TimeNEQ applies the NEQ predicate on the "time" field. func TimeNEQ(v time.Time) predicate.Event { - return predicate.Event(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldTime), v)) - }) + return predicate.Event(sql.FieldNEQ(FieldTime, v)) } // TimeIn applies the In predicate on the "time" field. func TimeIn(vs ...time.Time) predicate.Event { - v := make([]any, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Event(func(s *sql.Selector) { - s.Where(sql.In(s.C(FieldTime), v...)) - }) + return predicate.Event(sql.FieldIn(FieldTime, vs...)) } // TimeNotIn applies the NotIn predicate on the "time" field. func TimeNotIn(vs ...time.Time) predicate.Event { - v := make([]any, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Event(func(s *sql.Selector) { - s.Where(sql.NotIn(s.C(FieldTime), v...)) - }) + return predicate.Event(sql.FieldNotIn(FieldTime, vs...)) } // TimeGT applies the GT predicate on the "time" field. func TimeGT(v time.Time) predicate.Event { - return predicate.Event(func(s *sql.Selector) { - s.Where(sql.GT(s.C(FieldTime), v)) - }) + return predicate.Event(sql.FieldGT(FieldTime, v)) } // TimeGTE applies the GTE predicate on the "time" field. func TimeGTE(v time.Time) predicate.Event { - return predicate.Event(func(s *sql.Selector) { - s.Where(sql.GTE(s.C(FieldTime), v)) - }) + return predicate.Event(sql.FieldGTE(FieldTime, v)) } // TimeLT applies the LT predicate on the "time" field. func TimeLT(v time.Time) predicate.Event { - return predicate.Event(func(s *sql.Selector) { - s.Where(sql.LT(s.C(FieldTime), v)) - }) + return predicate.Event(sql.FieldLT(FieldTime, v)) } // TimeLTE applies the LTE predicate on the "time" field. func TimeLTE(v time.Time) predicate.Event { - return predicate.Event(func(s *sql.Selector) { - s.Where(sql.LTE(s.C(FieldTime), v)) - }) + return predicate.Event(sql.FieldLTE(FieldTime, v)) } // SerializedEQ applies the EQ predicate on the "serialized" field. func SerializedEQ(v string) predicate.Event { - return predicate.Event(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldSerialized), v)) - }) + return predicate.Event(sql.FieldEQ(FieldSerialized, v)) } // SerializedNEQ applies the NEQ predicate on the "serialized" field. func SerializedNEQ(v string) predicate.Event { - return predicate.Event(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldSerialized), v)) - }) + return predicate.Event(sql.FieldNEQ(FieldSerialized, v)) } // SerializedIn applies the In predicate on the "serialized" field. func SerializedIn(vs ...string) predicate.Event { - v := make([]any, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Event(func(s *sql.Selector) { - s.Where(sql.In(s.C(FieldSerialized), v...)) - }) + return predicate.Event(sql.FieldIn(FieldSerialized, vs...)) } // SerializedNotIn applies the NotIn predicate on the "serialized" field. func SerializedNotIn(vs ...string) predicate.Event { - v := make([]any, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Event(func(s *sql.Selector) { - s.Where(sql.NotIn(s.C(FieldSerialized), v...)) - }) + return predicate.Event(sql.FieldNotIn(FieldSerialized, vs...)) } // SerializedGT applies the GT predicate on the "serialized" field. func SerializedGT(v string) predicate.Event { - return predicate.Event(func(s *sql.Selector) { - s.Where(sql.GT(s.C(FieldSerialized), v)) - }) + return predicate.Event(sql.FieldGT(FieldSerialized, v)) } // SerializedGTE applies the GTE predicate on the "serialized" field. func SerializedGTE(v string) predicate.Event { - return predicate.Event(func(s *sql.Selector) { - s.Where(sql.GTE(s.C(FieldSerialized), v)) - }) + return predicate.Event(sql.FieldGTE(FieldSerialized, v)) } // SerializedLT applies the LT predicate on the "serialized" field. func SerializedLT(v string) predicate.Event { - return predicate.Event(func(s *sql.Selector) { - s.Where(sql.LT(s.C(FieldSerialized), v)) - }) + return predicate.Event(sql.FieldLT(FieldSerialized, v)) } // SerializedLTE applies the LTE predicate on the "serialized" field. func SerializedLTE(v string) predicate.Event { - return predicate.Event(func(s *sql.Selector) { - s.Where(sql.LTE(s.C(FieldSerialized), v)) - }) + return predicate.Event(sql.FieldLTE(FieldSerialized, v)) } // SerializedContains applies the Contains predicate on the "serialized" field. func SerializedContains(v string) predicate.Event { - return predicate.Event(func(s *sql.Selector) { - s.Where(sql.Contains(s.C(FieldSerialized), v)) - }) + return predicate.Event(sql.FieldContains(FieldSerialized, v)) } // SerializedHasPrefix applies the HasPrefix predicate on the "serialized" field. func SerializedHasPrefix(v string) predicate.Event { - return predicate.Event(func(s *sql.Selector) { - s.Where(sql.HasPrefix(s.C(FieldSerialized), v)) - }) + return predicate.Event(sql.FieldHasPrefix(FieldSerialized, v)) } // SerializedHasSuffix applies the HasSuffix predicate on the "serialized" field. func SerializedHasSuffix(v string) predicate.Event { - return predicate.Event(func(s *sql.Selector) { - s.Where(sql.HasSuffix(s.C(FieldSerialized), v)) - }) + return predicate.Event(sql.FieldHasSuffix(FieldSerialized, v)) } // SerializedEqualFold applies the EqualFold predicate on the "serialized" field. func SerializedEqualFold(v string) predicate.Event { - return predicate.Event(func(s *sql.Selector) { - s.Where(sql.EqualFold(s.C(FieldSerialized), v)) - }) + return predicate.Event(sql.FieldEqualFold(FieldSerialized, v)) } // SerializedContainsFold applies the ContainsFold predicate on the "serialized" field. func SerializedContainsFold(v string) predicate.Event { - return predicate.Event(func(s *sql.Selector) { - s.Where(sql.ContainsFold(s.C(FieldSerialized), v)) - }) + return predicate.Event(sql.FieldContainsFold(FieldSerialized, v)) } // AlertEventsEQ applies the EQ predicate on the "alert_events" field. func AlertEventsEQ(v int) predicate.Event { - return predicate.Event(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldAlertEvents), v)) - }) + return predicate.Event(sql.FieldEQ(FieldAlertEvents, v)) } // AlertEventsNEQ applies the NEQ predicate on the "alert_events" field. func AlertEventsNEQ(v int) predicate.Event { - return predicate.Event(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldAlertEvents), v)) - }) + return predicate.Event(sql.FieldNEQ(FieldAlertEvents, v)) } // AlertEventsIn applies the In predicate on the "alert_events" field. func AlertEventsIn(vs ...int) predicate.Event { - v := make([]any, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Event(func(s *sql.Selector) { - s.Where(sql.In(s.C(FieldAlertEvents), v...)) - }) + return predicate.Event(sql.FieldIn(FieldAlertEvents, vs...)) } // AlertEventsNotIn applies the NotIn predicate on the "alert_events" field. func AlertEventsNotIn(vs ...int) predicate.Event { - v := make([]any, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Event(func(s *sql.Selector) { - s.Where(sql.NotIn(s.C(FieldAlertEvents), v...)) - }) + return predicate.Event(sql.FieldNotIn(FieldAlertEvents, vs...)) } // AlertEventsIsNil applies the IsNil predicate on the "alert_events" field. func AlertEventsIsNil() predicate.Event { - return predicate.Event(func(s *sql.Selector) { - s.Where(sql.IsNull(s.C(FieldAlertEvents))) - }) + return predicate.Event(sql.FieldIsNull(FieldAlertEvents)) } // AlertEventsNotNil applies the NotNil predicate on the "alert_events" field. func AlertEventsNotNil() predicate.Event { - return predicate.Event(func(s *sql.Selector) { - s.Where(sql.NotNull(s.C(FieldAlertEvents))) - }) + return predicate.Event(sql.FieldNotNull(FieldAlertEvents)) } // HasOwner applies the HasEdge predicate on the "owner" edge. @@ -490,7 +320,6 @@ func HasOwner() predicate.Event { return predicate.Event(func(s *sql.Selector) { step := sqlgraph.NewStep( sqlgraph.From(Table, FieldID), - sqlgraph.To(OwnerTable, FieldID), sqlgraph.Edge(sqlgraph.M2O, true, OwnerTable, OwnerColumn), ) sqlgraph.HasNeighbors(s, step) @@ -500,11 +329,7 @@ func HasOwner() predicate.Event { // HasOwnerWith applies the HasEdge predicate on the "owner" edge with a given conditions (other predicates). func HasOwnerWith(preds ...predicate.Alert) predicate.Event { return predicate.Event(func(s *sql.Selector) { - step := sqlgraph.NewStep( - sqlgraph.From(Table, FieldID), - sqlgraph.To(OwnerInverseTable, FieldID), - sqlgraph.Edge(sqlgraph.M2O, true, OwnerTable, OwnerColumn), - ) + step := newOwnerStep() sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { for _, p := range preds { p(s) @@ -515,32 +340,15 @@ func HasOwnerWith(preds ...predicate.Alert) predicate.Event { // And groups predicates with the AND operator between them. func And(predicates ...predicate.Event) predicate.Event { - return predicate.Event(func(s *sql.Selector) { - s1 := s.Clone().SetP(nil) - for _, p := range predicates { - p(s1) - } - s.Where(s1.P()) - }) + return predicate.Event(sql.AndPredicates(predicates...)) } // Or groups predicates with the OR operator between them. func Or(predicates ...predicate.Event) predicate.Event { - return predicate.Event(func(s *sql.Selector) { - s1 := s.Clone().SetP(nil) - for i, p := range predicates { - if i > 0 { - s1.Or() - } - p(s1) - } - s.Where(s1.P()) - }) + return predicate.Event(sql.OrPredicates(predicates...)) } // Not applies the not operator on the given predicate. func Not(p predicate.Event) predicate.Event { - return predicate.Event(func(s *sql.Selector) { - p(s.Not()) - }) + return predicate.Event(sql.NotPredicates(p)) } diff --git a/pkg/database/ent/event_create.go b/pkg/database/ent/event_create.go index c5861305130..98194f2fd33 100644 --- a/pkg/database/ent/event_create.go +++ b/pkg/database/ent/event_create.go @@ -101,50 +101,8 @@ func (ec *EventCreate) Mutation() *EventMutation { // Save creates the Event in the database. func (ec *EventCreate) Save(ctx context.Context) (*Event, error) { - var ( - err error - node *Event - ) ec.defaults() - if len(ec.hooks) == 0 { - if err = ec.check(); err != nil { - return nil, err - } - node, err = ec.sqlSave(ctx) - } else { - var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { - mutation, ok := m.(*EventMutation) - if !ok { - return nil, fmt.Errorf("unexpected mutation type %T", m) - } - if err = ec.check(); err != nil { - return nil, err - } - ec.mutation = mutation - if node, err = ec.sqlSave(ctx); err != nil { - return nil, err - } - mutation.id = &node.ID - mutation.done = true - return node, err - }) - for i := len(ec.hooks) - 1; i >= 0; i-- { - if ec.hooks[i] == nil { - return nil, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)") - } - mut = ec.hooks[i](mut) - } - v, err := mut.Mutate(ctx, ec.mutation) - if err != nil { - return nil, err - } - nv, ok := v.(*Event) - if !ok { - return nil, fmt.Errorf("unexpected node type %T returned from EventMutation", v) - } - node = nv - } - return node, err + return withHooks(ctx, ec.sqlSave, ec.mutation, ec.hooks) } // SaveX calls Save and panics if Save returns an error. @@ -198,6 +156,9 @@ func (ec *EventCreate) check() error { } func (ec *EventCreate) sqlSave(ctx context.Context) (*Event, error) { + if err := ec.check(); err != nil { + return nil, err + } _node, _spec := ec.createSpec() if err := sqlgraph.CreateNode(ctx, ec.driver, _spec); err != nil { if sqlgraph.IsConstraintError(err) { @@ -207,50 +168,30 @@ func (ec *EventCreate) sqlSave(ctx context.Context) (*Event, error) { } id := _spec.ID.Value.(int64) _node.ID = int(id) + ec.mutation.id = &_node.ID + ec.mutation.done = true return _node, nil } func (ec *EventCreate) createSpec() (*Event, *sqlgraph.CreateSpec) { var ( _node = &Event{config: ec.config} - _spec = &sqlgraph.CreateSpec{ - Table: event.Table, - ID: &sqlgraph.FieldSpec{ - Type: field.TypeInt, - Column: event.FieldID, - }, - } + _spec = sqlgraph.NewCreateSpec(event.Table, sqlgraph.NewFieldSpec(event.FieldID, field.TypeInt)) ) if value, ok := ec.mutation.CreatedAt(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeTime, - Value: value, - Column: event.FieldCreatedAt, - }) + _spec.SetField(event.FieldCreatedAt, field.TypeTime, value) _node.CreatedAt = &value } if value, ok := ec.mutation.UpdatedAt(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeTime, - Value: value, - Column: event.FieldUpdatedAt, - }) + _spec.SetField(event.FieldUpdatedAt, field.TypeTime, value) _node.UpdatedAt = &value } if value, ok := ec.mutation.Time(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeTime, - Value: value, - Column: event.FieldTime, - }) + _spec.SetField(event.FieldTime, field.TypeTime, value) _node.Time = value } if value, ok := ec.mutation.Serialized(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: event.FieldSerialized, - }) + _spec.SetField(event.FieldSerialized, field.TypeString, value) _node.Serialized = value } if nodes := ec.mutation.OwnerIDs(); len(nodes) > 0 { @@ -261,10 +202,7 @@ func (ec *EventCreate) createSpec() (*Event, *sqlgraph.CreateSpec) { Columns: []string{event.OwnerColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeInt, - Column: alert.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(alert.FieldID, field.TypeInt), }, } for _, k := range nodes { @@ -279,11 +217,15 @@ func (ec *EventCreate) createSpec() (*Event, *sqlgraph.CreateSpec) { // EventCreateBulk is the builder for creating many Event entities in bulk. type EventCreateBulk struct { config + err error builders []*EventCreate } // Save creates the Event entities in the database. func (ecb *EventCreateBulk) Save(ctx context.Context) ([]*Event, error) { + if ecb.err != nil { + return nil, ecb.err + } specs := make([]*sqlgraph.CreateSpec, len(ecb.builders)) nodes := make([]*Event, len(ecb.builders)) mutators := make([]Mutator, len(ecb.builders)) @@ -300,8 +242,8 @@ func (ecb *EventCreateBulk) Save(ctx context.Context) ([]*Event, error) { return nil, err } builder.mutation = mutation - nodes[i], specs[i] = builder.createSpec() var err error + nodes[i], specs[i] = builder.createSpec() if i < len(mutators)-1 { _, err = mutators[i+1].Mutate(root, ecb.builders[i+1].mutation) } else { diff --git a/pkg/database/ent/event_delete.go b/pkg/database/ent/event_delete.go index 0220dc71d31..93dd1246b7e 100644 --- a/pkg/database/ent/event_delete.go +++ b/pkg/database/ent/event_delete.go @@ -4,7 +4,6 @@ package ent import ( "context" - "fmt" "entgo.io/ent/dialect/sql" "entgo.io/ent/dialect/sql/sqlgraph" @@ -28,34 +27,7 @@ func (ed *EventDelete) Where(ps ...predicate.Event) *EventDelete { // Exec executes the deletion query and returns how many vertices were deleted. func (ed *EventDelete) Exec(ctx context.Context) (int, error) { - var ( - err error - affected int - ) - if len(ed.hooks) == 0 { - affected, err = ed.sqlExec(ctx) - } else { - var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { - mutation, ok := m.(*EventMutation) - if !ok { - return nil, fmt.Errorf("unexpected mutation type %T", m) - } - ed.mutation = mutation - affected, err = ed.sqlExec(ctx) - mutation.done = true - return affected, err - }) - for i := len(ed.hooks) - 1; i >= 0; i-- { - if ed.hooks[i] == nil { - return 0, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)") - } - mut = ed.hooks[i](mut) - } - if _, err := mut.Mutate(ctx, ed.mutation); err != nil { - return 0, err - } - } - return affected, err + return withHooks(ctx, ed.sqlExec, ed.mutation, ed.hooks) } // ExecX is like Exec, but panics if an error occurs. @@ -68,15 +40,7 @@ func (ed *EventDelete) ExecX(ctx context.Context) int { } func (ed *EventDelete) sqlExec(ctx context.Context) (int, error) { - _spec := &sqlgraph.DeleteSpec{ - Node: &sqlgraph.NodeSpec{ - Table: event.Table, - ID: &sqlgraph.FieldSpec{ - Type: field.TypeInt, - Column: event.FieldID, - }, - }, - } + _spec := sqlgraph.NewDeleteSpec(event.Table, sqlgraph.NewFieldSpec(event.FieldID, field.TypeInt)) if ps := ed.mutation.predicates; len(ps) > 0 { _spec.Predicate = func(selector *sql.Selector) { for i := range ps { @@ -88,6 +52,7 @@ func (ed *EventDelete) sqlExec(ctx context.Context) (int, error) { if err != nil && sqlgraph.IsConstraintError(err) { err = &ConstraintError{msg: err.Error(), wrap: err} } + ed.mutation.done = true return affected, err } @@ -96,6 +61,12 @@ type EventDeleteOne struct { ed *EventDelete } +// Where appends a list predicates to the EventDelete builder. +func (edo *EventDeleteOne) Where(ps ...predicate.Event) *EventDeleteOne { + edo.ed.mutation.Where(ps...) + return edo +} + // Exec executes the deletion query. func (edo *EventDeleteOne) Exec(ctx context.Context) error { n, err := edo.ed.Exec(ctx) @@ -111,5 +82,7 @@ func (edo *EventDeleteOne) Exec(ctx context.Context) error { // ExecX is like Exec, but panics if an error occurs. func (edo *EventDeleteOne) ExecX(ctx context.Context) { - edo.ed.ExecX(ctx) + if err := edo.Exec(ctx); err != nil { + panic(err) + } } diff --git a/pkg/database/ent/event_query.go b/pkg/database/ent/event_query.go index 045d750f818..1493d7bd32c 100644 --- a/pkg/database/ent/event_query.go +++ b/pkg/database/ent/event_query.go @@ -18,11 +18,9 @@ import ( // EventQuery is the builder for querying Event entities. type EventQuery struct { config - limit *int - offset *int - unique *bool - order []OrderFunc - fields []string + ctx *QueryContext + order []event.OrderOption + inters []Interceptor predicates []predicate.Event withOwner *AlertQuery // intermediate query (i.e. traversal path). @@ -36,34 +34,34 @@ func (eq *EventQuery) Where(ps ...predicate.Event) *EventQuery { return eq } -// Limit adds a limit step to the query. +// Limit the number of records to be returned by this query. func (eq *EventQuery) Limit(limit int) *EventQuery { - eq.limit = &limit + eq.ctx.Limit = &limit return eq } -// Offset adds an offset step to the query. +// Offset to start from. func (eq *EventQuery) Offset(offset int) *EventQuery { - eq.offset = &offset + eq.ctx.Offset = &offset return eq } // Unique configures the query builder to filter duplicate records on query. // By default, unique is set to true, and can be disabled using this method. func (eq *EventQuery) Unique(unique bool) *EventQuery { - eq.unique = &unique + eq.ctx.Unique = &unique return eq } -// Order adds an order step to the query. -func (eq *EventQuery) Order(o ...OrderFunc) *EventQuery { +// Order specifies how the records should be ordered. +func (eq *EventQuery) Order(o ...event.OrderOption) *EventQuery { eq.order = append(eq.order, o...) return eq } // QueryOwner chains the current query on the "owner" edge. func (eq *EventQuery) QueryOwner() *AlertQuery { - query := &AlertQuery{config: eq.config} + query := (&AlertClient{config: eq.config}).Query() query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { if err := eq.prepareQuery(ctx); err != nil { return nil, err @@ -86,7 +84,7 @@ func (eq *EventQuery) QueryOwner() *AlertQuery { // First returns the first Event entity from the query. // Returns a *NotFoundError when no Event was found. func (eq *EventQuery) First(ctx context.Context) (*Event, error) { - nodes, err := eq.Limit(1).All(ctx) + nodes, err := eq.Limit(1).All(setContextOp(ctx, eq.ctx, "First")) if err != nil { return nil, err } @@ -109,7 +107,7 @@ func (eq *EventQuery) FirstX(ctx context.Context) *Event { // Returns a *NotFoundError when no Event ID was found. func (eq *EventQuery) FirstID(ctx context.Context) (id int, err error) { var ids []int - if ids, err = eq.Limit(1).IDs(ctx); err != nil { + if ids, err = eq.Limit(1).IDs(setContextOp(ctx, eq.ctx, "FirstID")); err != nil { return } if len(ids) == 0 { @@ -132,7 +130,7 @@ func (eq *EventQuery) FirstIDX(ctx context.Context) int { // Returns a *NotSingularError when more than one Event entity is found. // Returns a *NotFoundError when no Event entities are found. func (eq *EventQuery) Only(ctx context.Context) (*Event, error) { - nodes, err := eq.Limit(2).All(ctx) + nodes, err := eq.Limit(2).All(setContextOp(ctx, eq.ctx, "Only")) if err != nil { return nil, err } @@ -160,7 +158,7 @@ func (eq *EventQuery) OnlyX(ctx context.Context) *Event { // Returns a *NotFoundError when no entities are found. func (eq *EventQuery) OnlyID(ctx context.Context) (id int, err error) { var ids []int - if ids, err = eq.Limit(2).IDs(ctx); err != nil { + if ids, err = eq.Limit(2).IDs(setContextOp(ctx, eq.ctx, "OnlyID")); err != nil { return } switch len(ids) { @@ -185,10 +183,12 @@ func (eq *EventQuery) OnlyIDX(ctx context.Context) int { // All executes the query and returns a list of Events. func (eq *EventQuery) All(ctx context.Context) ([]*Event, error) { + ctx = setContextOp(ctx, eq.ctx, "All") if err := eq.prepareQuery(ctx); err != nil { return nil, err } - return eq.sqlAll(ctx) + qr := querierAll[[]*Event, *EventQuery]() + return withInterceptors[[]*Event](ctx, eq, qr, eq.inters) } // AllX is like All, but panics if an error occurs. @@ -201,9 +201,12 @@ func (eq *EventQuery) AllX(ctx context.Context) []*Event { } // IDs executes the query and returns a list of Event IDs. -func (eq *EventQuery) IDs(ctx context.Context) ([]int, error) { - var ids []int - if err := eq.Select(event.FieldID).Scan(ctx, &ids); err != nil { +func (eq *EventQuery) IDs(ctx context.Context) (ids []int, err error) { + if eq.ctx.Unique == nil && eq.path != nil { + eq.Unique(true) + } + ctx = setContextOp(ctx, eq.ctx, "IDs") + if err = eq.Select(event.FieldID).Scan(ctx, &ids); err != nil { return nil, err } return ids, nil @@ -220,10 +223,11 @@ func (eq *EventQuery) IDsX(ctx context.Context) []int { // Count returns the count of the given query. func (eq *EventQuery) Count(ctx context.Context) (int, error) { + ctx = setContextOp(ctx, eq.ctx, "Count") if err := eq.prepareQuery(ctx); err != nil { return 0, err } - return eq.sqlCount(ctx) + return withInterceptors[int](ctx, eq, querierCount[*EventQuery](), eq.inters) } // CountX is like Count, but panics if an error occurs. @@ -237,10 +241,15 @@ func (eq *EventQuery) CountX(ctx context.Context) int { // Exist returns true if the query has elements in the graph. func (eq *EventQuery) Exist(ctx context.Context) (bool, error) { - if err := eq.prepareQuery(ctx); err != nil { - return false, err + ctx = setContextOp(ctx, eq.ctx, "Exist") + switch _, err := eq.FirstID(ctx); { + case IsNotFound(err): + return false, nil + case err != nil: + return false, fmt.Errorf("ent: check existence: %w", err) + default: + return true, nil } - return eq.sqlExist(ctx) } // ExistX is like Exist, but panics if an error occurs. @@ -260,22 +269,21 @@ func (eq *EventQuery) Clone() *EventQuery { } return &EventQuery{ config: eq.config, - limit: eq.limit, - offset: eq.offset, - order: append([]OrderFunc{}, eq.order...), + ctx: eq.ctx.Clone(), + order: append([]event.OrderOption{}, eq.order...), + inters: append([]Interceptor{}, eq.inters...), predicates: append([]predicate.Event{}, eq.predicates...), withOwner: eq.withOwner.Clone(), // clone intermediate query. - sql: eq.sql.Clone(), - path: eq.path, - unique: eq.unique, + sql: eq.sql.Clone(), + path: eq.path, } } // WithOwner tells the query-builder to eager-load the nodes that are connected to // the "owner" edge. The optional arguments are used to configure the query builder of the edge. func (eq *EventQuery) WithOwner(opts ...func(*AlertQuery)) *EventQuery { - query := &AlertQuery{config: eq.config} + query := (&AlertClient{config: eq.config}).Query() for _, opt := range opts { opt(query) } @@ -298,16 +306,11 @@ func (eq *EventQuery) WithOwner(opts ...func(*AlertQuery)) *EventQuery { // Aggregate(ent.Count()). // Scan(ctx, &v) func (eq *EventQuery) GroupBy(field string, fields ...string) *EventGroupBy { - grbuild := &EventGroupBy{config: eq.config} - grbuild.fields = append([]string{field}, fields...) - grbuild.path = func(ctx context.Context) (prev *sql.Selector, err error) { - if err := eq.prepareQuery(ctx); err != nil { - return nil, err - } - return eq.sqlQuery(ctx), nil - } + eq.ctx.Fields = append([]string{field}, fields...) + grbuild := &EventGroupBy{build: eq} + grbuild.flds = &eq.ctx.Fields grbuild.label = event.Label - grbuild.flds, grbuild.scan = &grbuild.fields, grbuild.Scan + grbuild.scan = grbuild.Scan return grbuild } @@ -324,15 +327,30 @@ func (eq *EventQuery) GroupBy(field string, fields ...string) *EventGroupBy { // Select(event.FieldCreatedAt). // Scan(ctx, &v) func (eq *EventQuery) Select(fields ...string) *EventSelect { - eq.fields = append(eq.fields, fields...) - selbuild := &EventSelect{EventQuery: eq} - selbuild.label = event.Label - selbuild.flds, selbuild.scan = &eq.fields, selbuild.Scan - return selbuild + eq.ctx.Fields = append(eq.ctx.Fields, fields...) + sbuild := &EventSelect{EventQuery: eq} + sbuild.label = event.Label + sbuild.flds, sbuild.scan = &eq.ctx.Fields, sbuild.Scan + return sbuild +} + +// Aggregate returns a EventSelect configured with the given aggregations. +func (eq *EventQuery) Aggregate(fns ...AggregateFunc) *EventSelect { + return eq.Select().Aggregate(fns...) } func (eq *EventQuery) prepareQuery(ctx context.Context) error { - for _, f := range eq.fields { + for _, inter := range eq.inters { + if inter == nil { + return fmt.Errorf("ent: uninitialized interceptor (forgotten import ent/runtime?)") + } + if trv, ok := inter.(Traverser); ok { + if err := trv.Traverse(ctx, eq); err != nil { + return err + } + } + } + for _, f := range eq.ctx.Fields { if !event.ValidColumn(f) { return &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)} } @@ -392,6 +410,9 @@ func (eq *EventQuery) loadOwner(ctx context.Context, query *AlertQuery, nodes [] } nodeids[fk] = append(nodeids[fk], nodes[i]) } + if len(ids) == 0 { + return nil + } query.Where(alert.IDIn(ids...)) neighbors, err := query.All(ctx) if err != nil { @@ -411,41 +432,22 @@ func (eq *EventQuery) loadOwner(ctx context.Context, query *AlertQuery, nodes [] func (eq *EventQuery) sqlCount(ctx context.Context) (int, error) { _spec := eq.querySpec() - _spec.Node.Columns = eq.fields - if len(eq.fields) > 0 { - _spec.Unique = eq.unique != nil && *eq.unique + _spec.Node.Columns = eq.ctx.Fields + if len(eq.ctx.Fields) > 0 { + _spec.Unique = eq.ctx.Unique != nil && *eq.ctx.Unique } return sqlgraph.CountNodes(ctx, eq.driver, _spec) } -func (eq *EventQuery) sqlExist(ctx context.Context) (bool, error) { - switch _, err := eq.FirstID(ctx); { - case IsNotFound(err): - return false, nil - case err != nil: - return false, fmt.Errorf("ent: check existence: %w", err) - default: - return true, nil - } -} - func (eq *EventQuery) querySpec() *sqlgraph.QuerySpec { - _spec := &sqlgraph.QuerySpec{ - Node: &sqlgraph.NodeSpec{ - Table: event.Table, - Columns: event.Columns, - ID: &sqlgraph.FieldSpec{ - Type: field.TypeInt, - Column: event.FieldID, - }, - }, - From: eq.sql, - Unique: true, - } - if unique := eq.unique; unique != nil { + _spec := sqlgraph.NewQuerySpec(event.Table, event.Columns, sqlgraph.NewFieldSpec(event.FieldID, field.TypeInt)) + _spec.From = eq.sql + if unique := eq.ctx.Unique; unique != nil { _spec.Unique = *unique + } else if eq.path != nil { + _spec.Unique = true } - if fields := eq.fields; len(fields) > 0 { + if fields := eq.ctx.Fields; len(fields) > 0 { _spec.Node.Columns = make([]string, 0, len(fields)) _spec.Node.Columns = append(_spec.Node.Columns, event.FieldID) for i := range fields { @@ -453,6 +455,9 @@ func (eq *EventQuery) querySpec() *sqlgraph.QuerySpec { _spec.Node.Columns = append(_spec.Node.Columns, fields[i]) } } + if eq.withOwner != nil { + _spec.Node.AddColumnOnce(event.FieldAlertEvents) + } } if ps := eq.predicates; len(ps) > 0 { _spec.Predicate = func(selector *sql.Selector) { @@ -461,10 +466,10 @@ func (eq *EventQuery) querySpec() *sqlgraph.QuerySpec { } } } - if limit := eq.limit; limit != nil { + if limit := eq.ctx.Limit; limit != nil { _spec.Limit = *limit } - if offset := eq.offset; offset != nil { + if offset := eq.ctx.Offset; offset != nil { _spec.Offset = *offset } if ps := eq.order; len(ps) > 0 { @@ -480,7 +485,7 @@ func (eq *EventQuery) querySpec() *sqlgraph.QuerySpec { func (eq *EventQuery) sqlQuery(ctx context.Context) *sql.Selector { builder := sql.Dialect(eq.driver.Dialect()) t1 := builder.Table(event.Table) - columns := eq.fields + columns := eq.ctx.Fields if len(columns) == 0 { columns = event.Columns } @@ -489,7 +494,7 @@ func (eq *EventQuery) sqlQuery(ctx context.Context) *sql.Selector { selector = eq.sql selector.Select(selector.Columns(columns...)...) } - if eq.unique != nil && *eq.unique { + if eq.ctx.Unique != nil && *eq.ctx.Unique { selector.Distinct() } for _, p := range eq.predicates { @@ -498,12 +503,12 @@ func (eq *EventQuery) sqlQuery(ctx context.Context) *sql.Selector { for _, p := range eq.order { p(selector) } - if offset := eq.offset; offset != nil { + if offset := eq.ctx.Offset; offset != nil { // limit is mandatory for offset clause. We start // with default value, and override it below if needed. selector.Offset(*offset).Limit(math.MaxInt32) } - if limit := eq.limit; limit != nil { + if limit := eq.ctx.Limit; limit != nil { selector.Limit(*limit) } return selector @@ -511,13 +516,8 @@ func (eq *EventQuery) sqlQuery(ctx context.Context) *sql.Selector { // EventGroupBy is the group-by builder for Event entities. type EventGroupBy struct { - config selector - fields []string - fns []AggregateFunc - // intermediate query (i.e. traversal path). - sql *sql.Selector - path func(context.Context) (*sql.Selector, error) + build *EventQuery } // Aggregate adds the given aggregation functions to the group-by query. @@ -526,74 +526,77 @@ func (egb *EventGroupBy) Aggregate(fns ...AggregateFunc) *EventGroupBy { return egb } -// Scan applies the group-by query and scans the result into the given value. +// Scan applies the selector query and scans the result into the given value. func (egb *EventGroupBy) Scan(ctx context.Context, v any) error { - query, err := egb.path(ctx) - if err != nil { + ctx = setContextOp(ctx, egb.build.ctx, "GroupBy") + if err := egb.build.prepareQuery(ctx); err != nil { return err } - egb.sql = query - return egb.sqlScan(ctx, v) + return scanWithInterceptors[*EventQuery, *EventGroupBy](ctx, egb.build, egb, egb.build.inters, v) } -func (egb *EventGroupBy) sqlScan(ctx context.Context, v any) error { - for _, f := range egb.fields { - if !event.ValidColumn(f) { - return &ValidationError{Name: f, err: fmt.Errorf("invalid field %q for group-by", f)} +func (egb *EventGroupBy) sqlScan(ctx context.Context, root *EventQuery, v any) error { + selector := root.sqlQuery(ctx).Select() + aggregation := make([]string, 0, len(egb.fns)) + for _, fn := range egb.fns { + aggregation = append(aggregation, fn(selector)) + } + if len(selector.SelectedColumns()) == 0 { + columns := make([]string, 0, len(*egb.flds)+len(egb.fns)) + for _, f := range *egb.flds { + columns = append(columns, selector.C(f)) } + columns = append(columns, aggregation...) + selector.Select(columns...) } - selector := egb.sqlQuery() + selector.GroupBy(selector.Columns(*egb.flds...)...) if err := selector.Err(); err != nil { return err } rows := &sql.Rows{} query, args := selector.Query() - if err := egb.driver.Query(ctx, query, args, rows); err != nil { + if err := egb.build.driver.Query(ctx, query, args, rows); err != nil { return err } defer rows.Close() return sql.ScanSlice(rows, v) } -func (egb *EventGroupBy) sqlQuery() *sql.Selector { - selector := egb.sql.Select() - aggregation := make([]string, 0, len(egb.fns)) - for _, fn := range egb.fns { - aggregation = append(aggregation, fn(selector)) - } - // If no columns were selected in a custom aggregation function, the default - // selection is the fields used for "group-by", and the aggregation functions. - if len(selector.SelectedColumns()) == 0 { - columns := make([]string, 0, len(egb.fields)+len(egb.fns)) - for _, f := range egb.fields { - columns = append(columns, selector.C(f)) - } - columns = append(columns, aggregation...) - selector.Select(columns...) - } - return selector.GroupBy(selector.Columns(egb.fields...)...) -} - // EventSelect is the builder for selecting fields of Event entities. type EventSelect struct { *EventQuery selector - // intermediate query (i.e. traversal path). - sql *sql.Selector +} + +// Aggregate adds the given aggregation functions to the selector query. +func (es *EventSelect) Aggregate(fns ...AggregateFunc) *EventSelect { + es.fns = append(es.fns, fns...) + return es } // Scan applies the selector query and scans the result into the given value. func (es *EventSelect) Scan(ctx context.Context, v any) error { + ctx = setContextOp(ctx, es.ctx, "Select") if err := es.prepareQuery(ctx); err != nil { return err } - es.sql = es.EventQuery.sqlQuery(ctx) - return es.sqlScan(ctx, v) + return scanWithInterceptors[*EventQuery, *EventSelect](ctx, es.EventQuery, es, es.inters, v) } -func (es *EventSelect) sqlScan(ctx context.Context, v any) error { +func (es *EventSelect) sqlScan(ctx context.Context, root *EventQuery, v any) error { + selector := root.sqlQuery(ctx) + aggregation := make([]string, 0, len(es.fns)) + for _, fn := range es.fns { + aggregation = append(aggregation, fn(selector)) + } + switch n := len(*es.selector.flds); { + case n == 0 && len(aggregation) > 0: + selector.Select(aggregation...) + case n != 0 && len(aggregation) > 0: + selector.AppendSelect(aggregation...) + } rows := &sql.Rows{} - query, args := es.sql.Query() + query, args := selector.Query() if err := es.driver.Query(ctx, query, args, rows); err != nil { return err } diff --git a/pkg/database/ent/event_update.go b/pkg/database/ent/event_update.go index fcd0cc50c99..db748101519 100644 --- a/pkg/database/ent/event_update.go +++ b/pkg/database/ent/event_update.go @@ -117,41 +117,8 @@ func (eu *EventUpdate) ClearOwner() *EventUpdate { // Save executes the query and returns the number of nodes affected by the update operation. func (eu *EventUpdate) Save(ctx context.Context) (int, error) { - var ( - err error - affected int - ) eu.defaults() - if len(eu.hooks) == 0 { - if err = eu.check(); err != nil { - return 0, err - } - affected, err = eu.sqlSave(ctx) - } else { - var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { - mutation, ok := m.(*EventMutation) - if !ok { - return nil, fmt.Errorf("unexpected mutation type %T", m) - } - if err = eu.check(); err != nil { - return 0, err - } - eu.mutation = mutation - affected, err = eu.sqlSave(ctx) - mutation.done = true - return affected, err - }) - for i := len(eu.hooks) - 1; i >= 0; i-- { - if eu.hooks[i] == nil { - return 0, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)") - } - mut = eu.hooks[i](mut) - } - if _, err := mut.Mutate(ctx, eu.mutation); err != nil { - return 0, err - } - } - return affected, err + return withHooks(ctx, eu.sqlSave, eu.mutation, eu.hooks) } // SaveX is like Save, but panics if an error occurs. @@ -199,16 +166,10 @@ func (eu *EventUpdate) check() error { } func (eu *EventUpdate) sqlSave(ctx context.Context) (n int, err error) { - _spec := &sqlgraph.UpdateSpec{ - Node: &sqlgraph.NodeSpec{ - Table: event.Table, - Columns: event.Columns, - ID: &sqlgraph.FieldSpec{ - Type: field.TypeInt, - Column: event.FieldID, - }, - }, + if err := eu.check(); err != nil { + return n, err } + _spec := sqlgraph.NewUpdateSpec(event.Table, event.Columns, sqlgraph.NewFieldSpec(event.FieldID, field.TypeInt)) if ps := eu.mutation.predicates; len(ps) > 0 { _spec.Predicate = func(selector *sql.Selector) { for i := range ps { @@ -217,44 +178,22 @@ func (eu *EventUpdate) sqlSave(ctx context.Context) (n int, err error) { } } if value, ok := eu.mutation.CreatedAt(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeTime, - Value: value, - Column: event.FieldCreatedAt, - }) + _spec.SetField(event.FieldCreatedAt, field.TypeTime, value) } if eu.mutation.CreatedAtCleared() { - _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ - Type: field.TypeTime, - Column: event.FieldCreatedAt, - }) + _spec.ClearField(event.FieldCreatedAt, field.TypeTime) } if value, ok := eu.mutation.UpdatedAt(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeTime, - Value: value, - Column: event.FieldUpdatedAt, - }) + _spec.SetField(event.FieldUpdatedAt, field.TypeTime, value) } if eu.mutation.UpdatedAtCleared() { - _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ - Type: field.TypeTime, - Column: event.FieldUpdatedAt, - }) + _spec.ClearField(event.FieldUpdatedAt, field.TypeTime) } if value, ok := eu.mutation.Time(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeTime, - Value: value, - Column: event.FieldTime, - }) + _spec.SetField(event.FieldTime, field.TypeTime, value) } if value, ok := eu.mutation.Serialized(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: event.FieldSerialized, - }) + _spec.SetField(event.FieldSerialized, field.TypeString, value) } if eu.mutation.OwnerCleared() { edge := &sqlgraph.EdgeSpec{ @@ -264,10 +203,7 @@ func (eu *EventUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{event.OwnerColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeInt, - Column: alert.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(alert.FieldID, field.TypeInt), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -280,10 +216,7 @@ func (eu *EventUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{event.OwnerColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeInt, - Column: alert.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(alert.FieldID, field.TypeInt), }, } for _, k := range nodes { @@ -299,6 +232,7 @@ func (eu *EventUpdate) sqlSave(ctx context.Context) (n int, err error) { } return 0, err } + eu.mutation.done = true return n, nil } @@ -396,6 +330,12 @@ func (euo *EventUpdateOne) ClearOwner() *EventUpdateOne { return euo } +// Where appends a list predicates to the EventUpdate builder. +func (euo *EventUpdateOne) Where(ps ...predicate.Event) *EventUpdateOne { + euo.mutation.Where(ps...) + return euo +} + // Select allows selecting one or more fields (columns) of the returned entity. // The default is selecting all fields defined in the entity schema. func (euo *EventUpdateOne) Select(field string, fields ...string) *EventUpdateOne { @@ -405,47 +345,8 @@ func (euo *EventUpdateOne) Select(field string, fields ...string) *EventUpdateOn // Save executes the query and returns the updated Event entity. func (euo *EventUpdateOne) Save(ctx context.Context) (*Event, error) { - var ( - err error - node *Event - ) euo.defaults() - if len(euo.hooks) == 0 { - if err = euo.check(); err != nil { - return nil, err - } - node, err = euo.sqlSave(ctx) - } else { - var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { - mutation, ok := m.(*EventMutation) - if !ok { - return nil, fmt.Errorf("unexpected mutation type %T", m) - } - if err = euo.check(); err != nil { - return nil, err - } - euo.mutation = mutation - node, err = euo.sqlSave(ctx) - mutation.done = true - return node, err - }) - for i := len(euo.hooks) - 1; i >= 0; i-- { - if euo.hooks[i] == nil { - return nil, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)") - } - mut = euo.hooks[i](mut) - } - v, err := mut.Mutate(ctx, euo.mutation) - if err != nil { - return nil, err - } - nv, ok := v.(*Event) - if !ok { - return nil, fmt.Errorf("unexpected node type %T returned from EventMutation", v) - } - node = nv - } - return node, err + return withHooks(ctx, euo.sqlSave, euo.mutation, euo.hooks) } // SaveX is like Save, but panics if an error occurs. @@ -493,16 +394,10 @@ func (euo *EventUpdateOne) check() error { } func (euo *EventUpdateOne) sqlSave(ctx context.Context) (_node *Event, err error) { - _spec := &sqlgraph.UpdateSpec{ - Node: &sqlgraph.NodeSpec{ - Table: event.Table, - Columns: event.Columns, - ID: &sqlgraph.FieldSpec{ - Type: field.TypeInt, - Column: event.FieldID, - }, - }, + if err := euo.check(); err != nil { + return _node, err } + _spec := sqlgraph.NewUpdateSpec(event.Table, event.Columns, sqlgraph.NewFieldSpec(event.FieldID, field.TypeInt)) id, ok := euo.mutation.ID() if !ok { return nil, &ValidationError{Name: "id", err: errors.New(`ent: missing "Event.id" for update`)} @@ -528,44 +423,22 @@ func (euo *EventUpdateOne) sqlSave(ctx context.Context) (_node *Event, err error } } if value, ok := euo.mutation.CreatedAt(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeTime, - Value: value, - Column: event.FieldCreatedAt, - }) + _spec.SetField(event.FieldCreatedAt, field.TypeTime, value) } if euo.mutation.CreatedAtCleared() { - _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ - Type: field.TypeTime, - Column: event.FieldCreatedAt, - }) + _spec.ClearField(event.FieldCreatedAt, field.TypeTime) } if value, ok := euo.mutation.UpdatedAt(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeTime, - Value: value, - Column: event.FieldUpdatedAt, - }) + _spec.SetField(event.FieldUpdatedAt, field.TypeTime, value) } if euo.mutation.UpdatedAtCleared() { - _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ - Type: field.TypeTime, - Column: event.FieldUpdatedAt, - }) + _spec.ClearField(event.FieldUpdatedAt, field.TypeTime) } if value, ok := euo.mutation.Time(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeTime, - Value: value, - Column: event.FieldTime, - }) + _spec.SetField(event.FieldTime, field.TypeTime, value) } if value, ok := euo.mutation.Serialized(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: event.FieldSerialized, - }) + _spec.SetField(event.FieldSerialized, field.TypeString, value) } if euo.mutation.OwnerCleared() { edge := &sqlgraph.EdgeSpec{ @@ -575,10 +448,7 @@ func (euo *EventUpdateOne) sqlSave(ctx context.Context) (_node *Event, err error Columns: []string{event.OwnerColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeInt, - Column: alert.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(alert.FieldID, field.TypeInt), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -591,10 +461,7 @@ func (euo *EventUpdateOne) sqlSave(ctx context.Context) (_node *Event, err error Columns: []string{event.OwnerColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeInt, - Column: alert.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(alert.FieldID, field.TypeInt), }, } for _, k := range nodes { @@ -613,5 +480,6 @@ func (euo *EventUpdateOne) sqlSave(ctx context.Context) (_node *Event, err error } return nil, err } + euo.mutation.done = true return _node, nil } diff --git a/pkg/database/ent/hook/hook.go b/pkg/database/ent/hook/hook.go index 85ab00b01fb..7ec9c3ab1d8 100644 --- a/pkg/database/ent/hook/hook.go +++ b/pkg/database/ent/hook/hook.go @@ -15,11 +15,10 @@ type AlertFunc func(context.Context, *ent.AlertMutation) (ent.Value, error) // Mutate calls f(ctx, m). func (f AlertFunc) Mutate(ctx context.Context, m ent.Mutation) (ent.Value, error) { - mv, ok := m.(*ent.AlertMutation) - if !ok { - return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.AlertMutation", m) + if mv, ok := m.(*ent.AlertMutation); ok { + return f(ctx, mv) } - return f(ctx, mv) + return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.AlertMutation", m) } // The BouncerFunc type is an adapter to allow the use of ordinary @@ -28,11 +27,10 @@ type BouncerFunc func(context.Context, *ent.BouncerMutation) (ent.Value, error) // Mutate calls f(ctx, m). func (f BouncerFunc) Mutate(ctx context.Context, m ent.Mutation) (ent.Value, error) { - mv, ok := m.(*ent.BouncerMutation) - if !ok { - return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.BouncerMutation", m) + if mv, ok := m.(*ent.BouncerMutation); ok { + return f(ctx, mv) } - return f(ctx, mv) + return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.BouncerMutation", m) } // The ConfigItemFunc type is an adapter to allow the use of ordinary @@ -41,11 +39,10 @@ type ConfigItemFunc func(context.Context, *ent.ConfigItemMutation) (ent.Value, e // Mutate calls f(ctx, m). func (f ConfigItemFunc) Mutate(ctx context.Context, m ent.Mutation) (ent.Value, error) { - mv, ok := m.(*ent.ConfigItemMutation) - if !ok { - return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.ConfigItemMutation", m) + if mv, ok := m.(*ent.ConfigItemMutation); ok { + return f(ctx, mv) } - return f(ctx, mv) + return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.ConfigItemMutation", m) } // The DecisionFunc type is an adapter to allow the use of ordinary @@ -54,11 +51,10 @@ type DecisionFunc func(context.Context, *ent.DecisionMutation) (ent.Value, error // Mutate calls f(ctx, m). func (f DecisionFunc) Mutate(ctx context.Context, m ent.Mutation) (ent.Value, error) { - mv, ok := m.(*ent.DecisionMutation) - if !ok { - return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.DecisionMutation", m) + if mv, ok := m.(*ent.DecisionMutation); ok { + return f(ctx, mv) } - return f(ctx, mv) + return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.DecisionMutation", m) } // The EventFunc type is an adapter to allow the use of ordinary @@ -67,11 +63,10 @@ type EventFunc func(context.Context, *ent.EventMutation) (ent.Value, error) // Mutate calls f(ctx, m). func (f EventFunc) Mutate(ctx context.Context, m ent.Mutation) (ent.Value, error) { - mv, ok := m.(*ent.EventMutation) - if !ok { - return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.EventMutation", m) + if mv, ok := m.(*ent.EventMutation); ok { + return f(ctx, mv) } - return f(ctx, mv) + return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.EventMutation", m) } // The MachineFunc type is an adapter to allow the use of ordinary @@ -80,11 +75,10 @@ type MachineFunc func(context.Context, *ent.MachineMutation) (ent.Value, error) // Mutate calls f(ctx, m). func (f MachineFunc) Mutate(ctx context.Context, m ent.Mutation) (ent.Value, error) { - mv, ok := m.(*ent.MachineMutation) - if !ok { - return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.MachineMutation", m) + if mv, ok := m.(*ent.MachineMutation); ok { + return f(ctx, mv) } - return f(ctx, mv) + return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.MachineMutation", m) } // The MetaFunc type is an adapter to allow the use of ordinary @@ -93,11 +87,10 @@ type MetaFunc func(context.Context, *ent.MetaMutation) (ent.Value, error) // Mutate calls f(ctx, m). func (f MetaFunc) Mutate(ctx context.Context, m ent.Mutation) (ent.Value, error) { - mv, ok := m.(*ent.MetaMutation) - if !ok { - return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.MetaMutation", m) + if mv, ok := m.(*ent.MetaMutation); ok { + return f(ctx, mv) } - return f(ctx, mv) + return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.MetaMutation", m) } // Condition is a hook condition function. diff --git a/pkg/database/ent/machine.go b/pkg/database/ent/machine.go index dc2b18ee81c..346a8d084ba 100644 --- a/pkg/database/ent/machine.go +++ b/pkg/database/ent/machine.go @@ -7,6 +7,7 @@ import ( "strings" "time" + "entgo.io/ent" "entgo.io/ent/dialect/sql" "github.com/crowdsecurity/crowdsec/pkg/database/ent/machine" ) @@ -42,7 +43,8 @@ type Machine struct { AuthType string `json:"auth_type"` // Edges holds the relations/edges for other nodes in the graph. // The values are being populated by the MachineQuery when eager-loading is set. - Edges MachineEdges `json:"edges"` + Edges MachineEdges `json:"edges"` + selectValues sql.SelectValues } // MachineEdges holds the relations/edges for other nodes in the graph. @@ -77,7 +79,7 @@ func (*Machine) scanValues(columns []string) ([]any, error) { case machine.FieldCreatedAt, machine.FieldUpdatedAt, machine.FieldLastPush, machine.FieldLastHeartbeat: values[i] = new(sql.NullTime) default: - return nil, fmt.Errorf("unexpected column %q for type Machine", columns[i]) + values[i] = new(sql.UnknownType) } } return values, nil @@ -173,21 +175,29 @@ func (m *Machine) assignValues(columns []string, values []any) error { } else if value.Valid { m.AuthType = value.String } + default: + m.selectValues.Set(columns[i], values[i]) } } return nil } +// Value returns the ent.Value that was dynamically selected and assigned to the Machine. +// This includes values selected through modifiers, order, etc. +func (m *Machine) Value(name string) (ent.Value, error) { + return m.selectValues.Get(name) +} + // QueryAlerts queries the "alerts" edge of the Machine entity. func (m *Machine) QueryAlerts() *AlertQuery { - return (&MachineClient{config: m.config}).QueryAlerts(m) + return NewMachineClient(m.config).QueryAlerts(m) } // Update returns a builder for updating this Machine. // Note that you need to call Machine.Unwrap() before calling this method if this Machine // was returned from a transaction, and the transaction was committed or rolled back. func (m *Machine) Update() *MachineUpdateOne { - return (&MachineClient{config: m.config}).UpdateOne(m) + return NewMachineClient(m.config).UpdateOne(m) } // Unwrap unwraps the Machine entity that was returned from a transaction after it was closed, @@ -254,9 +264,3 @@ func (m *Machine) String() string { // Machines is a parsable slice of Machine. type Machines []*Machine - -func (m Machines) config(cfg config) { - for _i := range m { - m[_i].config = cfg - } -} diff --git a/pkg/database/ent/machine/machine.go b/pkg/database/ent/machine/machine.go index e6900dd21e1..5456935e04c 100644 --- a/pkg/database/ent/machine/machine.go +++ b/pkg/database/ent/machine/machine.go @@ -4,6 +4,9 @@ package machine import ( "time" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" ) const ( @@ -99,3 +102,92 @@ var ( // DefaultAuthType holds the default value on creation for the "auth_type" field. DefaultAuthType string ) + +// OrderOption defines the ordering options for the Machine queries. +type OrderOption func(*sql.Selector) + +// ByID orders the results by the id field. +func ByID(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldID, opts...).ToFunc() +} + +// ByCreatedAt orders the results by the created_at field. +func ByCreatedAt(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldCreatedAt, opts...).ToFunc() +} + +// ByUpdatedAt orders the results by the updated_at field. +func ByUpdatedAt(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldUpdatedAt, opts...).ToFunc() +} + +// ByLastPush orders the results by the last_push field. +func ByLastPush(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldLastPush, opts...).ToFunc() +} + +// ByLastHeartbeat orders the results by the last_heartbeat field. +func ByLastHeartbeat(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldLastHeartbeat, opts...).ToFunc() +} + +// ByMachineId orders the results by the machineId field. +func ByMachineId(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldMachineId, opts...).ToFunc() +} + +// ByPassword orders the results by the password field. +func ByPassword(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldPassword, opts...).ToFunc() +} + +// ByIpAddress orders the results by the ipAddress field. +func ByIpAddress(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldIpAddress, opts...).ToFunc() +} + +// ByScenarios orders the results by the scenarios field. +func ByScenarios(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldScenarios, opts...).ToFunc() +} + +// ByVersion orders the results by the version field. +func ByVersion(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldVersion, opts...).ToFunc() +} + +// ByIsValidated orders the results by the isValidated field. +func ByIsValidated(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldIsValidated, opts...).ToFunc() +} + +// ByStatus orders the results by the status field. +func ByStatus(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldStatus, opts...).ToFunc() +} + +// ByAuthType orders the results by the auth_type field. +func ByAuthType(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldAuthType, opts...).ToFunc() +} + +// ByAlertsCount orders the results by alerts count. +func ByAlertsCount(opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborsCount(s, newAlertsStep(), opts...) + } +} + +// ByAlerts orders the results by alerts terms. +func ByAlerts(term sql.OrderTerm, terms ...sql.OrderTerm) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newAlertsStep(), append([]sql.OrderTerm{term}, terms...)...) + } +} +func newAlertsStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(AlertsInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.O2M, false, AlertsTable, AlertsColumn), + ) +} diff --git a/pkg/database/ent/machine/where.go b/pkg/database/ent/machine/where.go index 7d0227731cc..e9d00e7e01e 100644 --- a/pkg/database/ent/machine/where.go +++ b/pkg/database/ent/machine/where.go @@ -12,1218 +12,802 @@ import ( // ID filters vertices based on their ID field. func ID(id int) predicate.Machine { - return predicate.Machine(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldID), id)) - }) + return predicate.Machine(sql.FieldEQ(FieldID, id)) } // IDEQ applies the EQ predicate on the ID field. func IDEQ(id int) predicate.Machine { - return predicate.Machine(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldID), id)) - }) + return predicate.Machine(sql.FieldEQ(FieldID, id)) } // IDNEQ applies the NEQ predicate on the ID field. func IDNEQ(id int) predicate.Machine { - return predicate.Machine(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldID), id)) - }) + return predicate.Machine(sql.FieldNEQ(FieldID, id)) } // IDIn applies the In predicate on the ID field. func IDIn(ids ...int) predicate.Machine { - return predicate.Machine(func(s *sql.Selector) { - v := make([]any, len(ids)) - for i := range v { - v[i] = ids[i] - } - s.Where(sql.In(s.C(FieldID), v...)) - }) + return predicate.Machine(sql.FieldIn(FieldID, ids...)) } // IDNotIn applies the NotIn predicate on the ID field. func IDNotIn(ids ...int) predicate.Machine { - return predicate.Machine(func(s *sql.Selector) { - v := make([]any, len(ids)) - for i := range v { - v[i] = ids[i] - } - s.Where(sql.NotIn(s.C(FieldID), v...)) - }) + return predicate.Machine(sql.FieldNotIn(FieldID, ids...)) } // IDGT applies the GT predicate on the ID field. func IDGT(id int) predicate.Machine { - return predicate.Machine(func(s *sql.Selector) { - s.Where(sql.GT(s.C(FieldID), id)) - }) + return predicate.Machine(sql.FieldGT(FieldID, id)) } // IDGTE applies the GTE predicate on the ID field. func IDGTE(id int) predicate.Machine { - return predicate.Machine(func(s *sql.Selector) { - s.Where(sql.GTE(s.C(FieldID), id)) - }) + return predicate.Machine(sql.FieldGTE(FieldID, id)) } // IDLT applies the LT predicate on the ID field. func IDLT(id int) predicate.Machine { - return predicate.Machine(func(s *sql.Selector) { - s.Where(sql.LT(s.C(FieldID), id)) - }) + return predicate.Machine(sql.FieldLT(FieldID, id)) } // IDLTE applies the LTE predicate on the ID field. func IDLTE(id int) predicate.Machine { - return predicate.Machine(func(s *sql.Selector) { - s.Where(sql.LTE(s.C(FieldID), id)) - }) + return predicate.Machine(sql.FieldLTE(FieldID, id)) } // CreatedAt applies equality check predicate on the "created_at" field. It's identical to CreatedAtEQ. func CreatedAt(v time.Time) predicate.Machine { - return predicate.Machine(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldCreatedAt), v)) - }) + return predicate.Machine(sql.FieldEQ(FieldCreatedAt, v)) } // UpdatedAt applies equality check predicate on the "updated_at" field. It's identical to UpdatedAtEQ. func UpdatedAt(v time.Time) predicate.Machine { - return predicate.Machine(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldUpdatedAt), v)) - }) + return predicate.Machine(sql.FieldEQ(FieldUpdatedAt, v)) } // LastPush applies equality check predicate on the "last_push" field. It's identical to LastPushEQ. func LastPush(v time.Time) predicate.Machine { - return predicate.Machine(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldLastPush), v)) - }) + return predicate.Machine(sql.FieldEQ(FieldLastPush, v)) } // LastHeartbeat applies equality check predicate on the "last_heartbeat" field. It's identical to LastHeartbeatEQ. func LastHeartbeat(v time.Time) predicate.Machine { - return predicate.Machine(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldLastHeartbeat), v)) - }) + return predicate.Machine(sql.FieldEQ(FieldLastHeartbeat, v)) } // MachineId applies equality check predicate on the "machineId" field. It's identical to MachineIdEQ. func MachineId(v string) predicate.Machine { - return predicate.Machine(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldMachineId), v)) - }) + return predicate.Machine(sql.FieldEQ(FieldMachineId, v)) } // Password applies equality check predicate on the "password" field. It's identical to PasswordEQ. func Password(v string) predicate.Machine { - return predicate.Machine(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldPassword), v)) - }) + return predicate.Machine(sql.FieldEQ(FieldPassword, v)) } // IpAddress applies equality check predicate on the "ipAddress" field. It's identical to IpAddressEQ. func IpAddress(v string) predicate.Machine { - return predicate.Machine(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldIpAddress), v)) - }) + return predicate.Machine(sql.FieldEQ(FieldIpAddress, v)) } // Scenarios applies equality check predicate on the "scenarios" field. It's identical to ScenariosEQ. func Scenarios(v string) predicate.Machine { - return predicate.Machine(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldScenarios), v)) - }) + return predicate.Machine(sql.FieldEQ(FieldScenarios, v)) } // Version applies equality check predicate on the "version" field. It's identical to VersionEQ. func Version(v string) predicate.Machine { - return predicate.Machine(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldVersion), v)) - }) + return predicate.Machine(sql.FieldEQ(FieldVersion, v)) } // IsValidated applies equality check predicate on the "isValidated" field. It's identical to IsValidatedEQ. func IsValidated(v bool) predicate.Machine { - return predicate.Machine(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldIsValidated), v)) - }) + return predicate.Machine(sql.FieldEQ(FieldIsValidated, v)) } // Status applies equality check predicate on the "status" field. It's identical to StatusEQ. func Status(v string) predicate.Machine { - return predicate.Machine(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldStatus), v)) - }) + return predicate.Machine(sql.FieldEQ(FieldStatus, v)) } // AuthType applies equality check predicate on the "auth_type" field. It's identical to AuthTypeEQ. func AuthType(v string) predicate.Machine { - return predicate.Machine(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldAuthType), v)) - }) + return predicate.Machine(sql.FieldEQ(FieldAuthType, v)) } // CreatedAtEQ applies the EQ predicate on the "created_at" field. func CreatedAtEQ(v time.Time) predicate.Machine { - return predicate.Machine(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldCreatedAt), v)) - }) + return predicate.Machine(sql.FieldEQ(FieldCreatedAt, v)) } // CreatedAtNEQ applies the NEQ predicate on the "created_at" field. func CreatedAtNEQ(v time.Time) predicate.Machine { - return predicate.Machine(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldCreatedAt), v)) - }) + return predicate.Machine(sql.FieldNEQ(FieldCreatedAt, v)) } // CreatedAtIn applies the In predicate on the "created_at" field. func CreatedAtIn(vs ...time.Time) predicate.Machine { - v := make([]any, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Machine(func(s *sql.Selector) { - s.Where(sql.In(s.C(FieldCreatedAt), v...)) - }) + return predicate.Machine(sql.FieldIn(FieldCreatedAt, vs...)) } // CreatedAtNotIn applies the NotIn predicate on the "created_at" field. func CreatedAtNotIn(vs ...time.Time) predicate.Machine { - v := make([]any, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Machine(func(s *sql.Selector) { - s.Where(sql.NotIn(s.C(FieldCreatedAt), v...)) - }) + return predicate.Machine(sql.FieldNotIn(FieldCreatedAt, vs...)) } // CreatedAtGT applies the GT predicate on the "created_at" field. func CreatedAtGT(v time.Time) predicate.Machine { - return predicate.Machine(func(s *sql.Selector) { - s.Where(sql.GT(s.C(FieldCreatedAt), v)) - }) + return predicate.Machine(sql.FieldGT(FieldCreatedAt, v)) } // CreatedAtGTE applies the GTE predicate on the "created_at" field. func CreatedAtGTE(v time.Time) predicate.Machine { - return predicate.Machine(func(s *sql.Selector) { - s.Where(sql.GTE(s.C(FieldCreatedAt), v)) - }) + return predicate.Machine(sql.FieldGTE(FieldCreatedAt, v)) } // CreatedAtLT applies the LT predicate on the "created_at" field. func CreatedAtLT(v time.Time) predicate.Machine { - return predicate.Machine(func(s *sql.Selector) { - s.Where(sql.LT(s.C(FieldCreatedAt), v)) - }) + return predicate.Machine(sql.FieldLT(FieldCreatedAt, v)) } // CreatedAtLTE applies the LTE predicate on the "created_at" field. func CreatedAtLTE(v time.Time) predicate.Machine { - return predicate.Machine(func(s *sql.Selector) { - s.Where(sql.LTE(s.C(FieldCreatedAt), v)) - }) + return predicate.Machine(sql.FieldLTE(FieldCreatedAt, v)) } // CreatedAtIsNil applies the IsNil predicate on the "created_at" field. func CreatedAtIsNil() predicate.Machine { - return predicate.Machine(func(s *sql.Selector) { - s.Where(sql.IsNull(s.C(FieldCreatedAt))) - }) + return predicate.Machine(sql.FieldIsNull(FieldCreatedAt)) } // CreatedAtNotNil applies the NotNil predicate on the "created_at" field. func CreatedAtNotNil() predicate.Machine { - return predicate.Machine(func(s *sql.Selector) { - s.Where(sql.NotNull(s.C(FieldCreatedAt))) - }) + return predicate.Machine(sql.FieldNotNull(FieldCreatedAt)) } // UpdatedAtEQ applies the EQ predicate on the "updated_at" field. func UpdatedAtEQ(v time.Time) predicate.Machine { - return predicate.Machine(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldUpdatedAt), v)) - }) + return predicate.Machine(sql.FieldEQ(FieldUpdatedAt, v)) } // UpdatedAtNEQ applies the NEQ predicate on the "updated_at" field. func UpdatedAtNEQ(v time.Time) predicate.Machine { - return predicate.Machine(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldUpdatedAt), v)) - }) + return predicate.Machine(sql.FieldNEQ(FieldUpdatedAt, v)) } // UpdatedAtIn applies the In predicate on the "updated_at" field. func UpdatedAtIn(vs ...time.Time) predicate.Machine { - v := make([]any, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Machine(func(s *sql.Selector) { - s.Where(sql.In(s.C(FieldUpdatedAt), v...)) - }) + return predicate.Machine(sql.FieldIn(FieldUpdatedAt, vs...)) } // UpdatedAtNotIn applies the NotIn predicate on the "updated_at" field. func UpdatedAtNotIn(vs ...time.Time) predicate.Machine { - v := make([]any, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Machine(func(s *sql.Selector) { - s.Where(sql.NotIn(s.C(FieldUpdatedAt), v...)) - }) + return predicate.Machine(sql.FieldNotIn(FieldUpdatedAt, vs...)) } // UpdatedAtGT applies the GT predicate on the "updated_at" field. func UpdatedAtGT(v time.Time) predicate.Machine { - return predicate.Machine(func(s *sql.Selector) { - s.Where(sql.GT(s.C(FieldUpdatedAt), v)) - }) + return predicate.Machine(sql.FieldGT(FieldUpdatedAt, v)) } // UpdatedAtGTE applies the GTE predicate on the "updated_at" field. func UpdatedAtGTE(v time.Time) predicate.Machine { - return predicate.Machine(func(s *sql.Selector) { - s.Where(sql.GTE(s.C(FieldUpdatedAt), v)) - }) + return predicate.Machine(sql.FieldGTE(FieldUpdatedAt, v)) } // UpdatedAtLT applies the LT predicate on the "updated_at" field. func UpdatedAtLT(v time.Time) predicate.Machine { - return predicate.Machine(func(s *sql.Selector) { - s.Where(sql.LT(s.C(FieldUpdatedAt), v)) - }) + return predicate.Machine(sql.FieldLT(FieldUpdatedAt, v)) } // UpdatedAtLTE applies the LTE predicate on the "updated_at" field. func UpdatedAtLTE(v time.Time) predicate.Machine { - return predicate.Machine(func(s *sql.Selector) { - s.Where(sql.LTE(s.C(FieldUpdatedAt), v)) - }) + return predicate.Machine(sql.FieldLTE(FieldUpdatedAt, v)) } // UpdatedAtIsNil applies the IsNil predicate on the "updated_at" field. func UpdatedAtIsNil() predicate.Machine { - return predicate.Machine(func(s *sql.Selector) { - s.Where(sql.IsNull(s.C(FieldUpdatedAt))) - }) + return predicate.Machine(sql.FieldIsNull(FieldUpdatedAt)) } // UpdatedAtNotNil applies the NotNil predicate on the "updated_at" field. func UpdatedAtNotNil() predicate.Machine { - return predicate.Machine(func(s *sql.Selector) { - s.Where(sql.NotNull(s.C(FieldUpdatedAt))) - }) + return predicate.Machine(sql.FieldNotNull(FieldUpdatedAt)) } // LastPushEQ applies the EQ predicate on the "last_push" field. func LastPushEQ(v time.Time) predicate.Machine { - return predicate.Machine(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldLastPush), v)) - }) + return predicate.Machine(sql.FieldEQ(FieldLastPush, v)) } // LastPushNEQ applies the NEQ predicate on the "last_push" field. func LastPushNEQ(v time.Time) predicate.Machine { - return predicate.Machine(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldLastPush), v)) - }) + return predicate.Machine(sql.FieldNEQ(FieldLastPush, v)) } // LastPushIn applies the In predicate on the "last_push" field. func LastPushIn(vs ...time.Time) predicate.Machine { - v := make([]any, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Machine(func(s *sql.Selector) { - s.Where(sql.In(s.C(FieldLastPush), v...)) - }) + return predicate.Machine(sql.FieldIn(FieldLastPush, vs...)) } // LastPushNotIn applies the NotIn predicate on the "last_push" field. func LastPushNotIn(vs ...time.Time) predicate.Machine { - v := make([]any, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Machine(func(s *sql.Selector) { - s.Where(sql.NotIn(s.C(FieldLastPush), v...)) - }) + return predicate.Machine(sql.FieldNotIn(FieldLastPush, vs...)) } // LastPushGT applies the GT predicate on the "last_push" field. func LastPushGT(v time.Time) predicate.Machine { - return predicate.Machine(func(s *sql.Selector) { - s.Where(sql.GT(s.C(FieldLastPush), v)) - }) + return predicate.Machine(sql.FieldGT(FieldLastPush, v)) } // LastPushGTE applies the GTE predicate on the "last_push" field. func LastPushGTE(v time.Time) predicate.Machine { - return predicate.Machine(func(s *sql.Selector) { - s.Where(sql.GTE(s.C(FieldLastPush), v)) - }) + return predicate.Machine(sql.FieldGTE(FieldLastPush, v)) } // LastPushLT applies the LT predicate on the "last_push" field. func LastPushLT(v time.Time) predicate.Machine { - return predicate.Machine(func(s *sql.Selector) { - s.Where(sql.LT(s.C(FieldLastPush), v)) - }) + return predicate.Machine(sql.FieldLT(FieldLastPush, v)) } // LastPushLTE applies the LTE predicate on the "last_push" field. func LastPushLTE(v time.Time) predicate.Machine { - return predicate.Machine(func(s *sql.Selector) { - s.Where(sql.LTE(s.C(FieldLastPush), v)) - }) + return predicate.Machine(sql.FieldLTE(FieldLastPush, v)) } // LastPushIsNil applies the IsNil predicate on the "last_push" field. func LastPushIsNil() predicate.Machine { - return predicate.Machine(func(s *sql.Selector) { - s.Where(sql.IsNull(s.C(FieldLastPush))) - }) + return predicate.Machine(sql.FieldIsNull(FieldLastPush)) } // LastPushNotNil applies the NotNil predicate on the "last_push" field. func LastPushNotNil() predicate.Machine { - return predicate.Machine(func(s *sql.Selector) { - s.Where(sql.NotNull(s.C(FieldLastPush))) - }) + return predicate.Machine(sql.FieldNotNull(FieldLastPush)) } // LastHeartbeatEQ applies the EQ predicate on the "last_heartbeat" field. func LastHeartbeatEQ(v time.Time) predicate.Machine { - return predicate.Machine(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldLastHeartbeat), v)) - }) + return predicate.Machine(sql.FieldEQ(FieldLastHeartbeat, v)) } // LastHeartbeatNEQ applies the NEQ predicate on the "last_heartbeat" field. func LastHeartbeatNEQ(v time.Time) predicate.Machine { - return predicate.Machine(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldLastHeartbeat), v)) - }) + return predicate.Machine(sql.FieldNEQ(FieldLastHeartbeat, v)) } // LastHeartbeatIn applies the In predicate on the "last_heartbeat" field. func LastHeartbeatIn(vs ...time.Time) predicate.Machine { - v := make([]any, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Machine(func(s *sql.Selector) { - s.Where(sql.In(s.C(FieldLastHeartbeat), v...)) - }) + return predicate.Machine(sql.FieldIn(FieldLastHeartbeat, vs...)) } // LastHeartbeatNotIn applies the NotIn predicate on the "last_heartbeat" field. func LastHeartbeatNotIn(vs ...time.Time) predicate.Machine { - v := make([]any, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Machine(func(s *sql.Selector) { - s.Where(sql.NotIn(s.C(FieldLastHeartbeat), v...)) - }) + return predicate.Machine(sql.FieldNotIn(FieldLastHeartbeat, vs...)) } // LastHeartbeatGT applies the GT predicate on the "last_heartbeat" field. func LastHeartbeatGT(v time.Time) predicate.Machine { - return predicate.Machine(func(s *sql.Selector) { - s.Where(sql.GT(s.C(FieldLastHeartbeat), v)) - }) + return predicate.Machine(sql.FieldGT(FieldLastHeartbeat, v)) } // LastHeartbeatGTE applies the GTE predicate on the "last_heartbeat" field. func LastHeartbeatGTE(v time.Time) predicate.Machine { - return predicate.Machine(func(s *sql.Selector) { - s.Where(sql.GTE(s.C(FieldLastHeartbeat), v)) - }) + return predicate.Machine(sql.FieldGTE(FieldLastHeartbeat, v)) } // LastHeartbeatLT applies the LT predicate on the "last_heartbeat" field. func LastHeartbeatLT(v time.Time) predicate.Machine { - return predicate.Machine(func(s *sql.Selector) { - s.Where(sql.LT(s.C(FieldLastHeartbeat), v)) - }) + return predicate.Machine(sql.FieldLT(FieldLastHeartbeat, v)) } // LastHeartbeatLTE applies the LTE predicate on the "last_heartbeat" field. func LastHeartbeatLTE(v time.Time) predicate.Machine { - return predicate.Machine(func(s *sql.Selector) { - s.Where(sql.LTE(s.C(FieldLastHeartbeat), v)) - }) + return predicate.Machine(sql.FieldLTE(FieldLastHeartbeat, v)) } // LastHeartbeatIsNil applies the IsNil predicate on the "last_heartbeat" field. func LastHeartbeatIsNil() predicate.Machine { - return predicate.Machine(func(s *sql.Selector) { - s.Where(sql.IsNull(s.C(FieldLastHeartbeat))) - }) + return predicate.Machine(sql.FieldIsNull(FieldLastHeartbeat)) } // LastHeartbeatNotNil applies the NotNil predicate on the "last_heartbeat" field. func LastHeartbeatNotNil() predicate.Machine { - return predicate.Machine(func(s *sql.Selector) { - s.Where(sql.NotNull(s.C(FieldLastHeartbeat))) - }) + return predicate.Machine(sql.FieldNotNull(FieldLastHeartbeat)) } // MachineIdEQ applies the EQ predicate on the "machineId" field. func MachineIdEQ(v string) predicate.Machine { - return predicate.Machine(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldMachineId), v)) - }) + return predicate.Machine(sql.FieldEQ(FieldMachineId, v)) } // MachineIdNEQ applies the NEQ predicate on the "machineId" field. func MachineIdNEQ(v string) predicate.Machine { - return predicate.Machine(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldMachineId), v)) - }) + return predicate.Machine(sql.FieldNEQ(FieldMachineId, v)) } // MachineIdIn applies the In predicate on the "machineId" field. func MachineIdIn(vs ...string) predicate.Machine { - v := make([]any, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Machine(func(s *sql.Selector) { - s.Where(sql.In(s.C(FieldMachineId), v...)) - }) + return predicate.Machine(sql.FieldIn(FieldMachineId, vs...)) } // MachineIdNotIn applies the NotIn predicate on the "machineId" field. func MachineIdNotIn(vs ...string) predicate.Machine { - v := make([]any, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Machine(func(s *sql.Selector) { - s.Where(sql.NotIn(s.C(FieldMachineId), v...)) - }) + return predicate.Machine(sql.FieldNotIn(FieldMachineId, vs...)) } // MachineIdGT applies the GT predicate on the "machineId" field. func MachineIdGT(v string) predicate.Machine { - return predicate.Machine(func(s *sql.Selector) { - s.Where(sql.GT(s.C(FieldMachineId), v)) - }) + return predicate.Machine(sql.FieldGT(FieldMachineId, v)) } // MachineIdGTE applies the GTE predicate on the "machineId" field. func MachineIdGTE(v string) predicate.Machine { - return predicate.Machine(func(s *sql.Selector) { - s.Where(sql.GTE(s.C(FieldMachineId), v)) - }) + return predicate.Machine(sql.FieldGTE(FieldMachineId, v)) } // MachineIdLT applies the LT predicate on the "machineId" field. func MachineIdLT(v string) predicate.Machine { - return predicate.Machine(func(s *sql.Selector) { - s.Where(sql.LT(s.C(FieldMachineId), v)) - }) + return predicate.Machine(sql.FieldLT(FieldMachineId, v)) } // MachineIdLTE applies the LTE predicate on the "machineId" field. func MachineIdLTE(v string) predicate.Machine { - return predicate.Machine(func(s *sql.Selector) { - s.Where(sql.LTE(s.C(FieldMachineId), v)) - }) + return predicate.Machine(sql.FieldLTE(FieldMachineId, v)) } // MachineIdContains applies the Contains predicate on the "machineId" field. func MachineIdContains(v string) predicate.Machine { - return predicate.Machine(func(s *sql.Selector) { - s.Where(sql.Contains(s.C(FieldMachineId), v)) - }) + return predicate.Machine(sql.FieldContains(FieldMachineId, v)) } // MachineIdHasPrefix applies the HasPrefix predicate on the "machineId" field. func MachineIdHasPrefix(v string) predicate.Machine { - return predicate.Machine(func(s *sql.Selector) { - s.Where(sql.HasPrefix(s.C(FieldMachineId), v)) - }) + return predicate.Machine(sql.FieldHasPrefix(FieldMachineId, v)) } // MachineIdHasSuffix applies the HasSuffix predicate on the "machineId" field. func MachineIdHasSuffix(v string) predicate.Machine { - return predicate.Machine(func(s *sql.Selector) { - s.Where(sql.HasSuffix(s.C(FieldMachineId), v)) - }) + return predicate.Machine(sql.FieldHasSuffix(FieldMachineId, v)) } // MachineIdEqualFold applies the EqualFold predicate on the "machineId" field. func MachineIdEqualFold(v string) predicate.Machine { - return predicate.Machine(func(s *sql.Selector) { - s.Where(sql.EqualFold(s.C(FieldMachineId), v)) - }) + return predicate.Machine(sql.FieldEqualFold(FieldMachineId, v)) } // MachineIdContainsFold applies the ContainsFold predicate on the "machineId" field. func MachineIdContainsFold(v string) predicate.Machine { - return predicate.Machine(func(s *sql.Selector) { - s.Where(sql.ContainsFold(s.C(FieldMachineId), v)) - }) + return predicate.Machine(sql.FieldContainsFold(FieldMachineId, v)) } // PasswordEQ applies the EQ predicate on the "password" field. func PasswordEQ(v string) predicate.Machine { - return predicate.Machine(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldPassword), v)) - }) + return predicate.Machine(sql.FieldEQ(FieldPassword, v)) } // PasswordNEQ applies the NEQ predicate on the "password" field. func PasswordNEQ(v string) predicate.Machine { - return predicate.Machine(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldPassword), v)) - }) + return predicate.Machine(sql.FieldNEQ(FieldPassword, v)) } // PasswordIn applies the In predicate on the "password" field. func PasswordIn(vs ...string) predicate.Machine { - v := make([]any, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Machine(func(s *sql.Selector) { - s.Where(sql.In(s.C(FieldPassword), v...)) - }) + return predicate.Machine(sql.FieldIn(FieldPassword, vs...)) } // PasswordNotIn applies the NotIn predicate on the "password" field. func PasswordNotIn(vs ...string) predicate.Machine { - v := make([]any, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Machine(func(s *sql.Selector) { - s.Where(sql.NotIn(s.C(FieldPassword), v...)) - }) + return predicate.Machine(sql.FieldNotIn(FieldPassword, vs...)) } // PasswordGT applies the GT predicate on the "password" field. func PasswordGT(v string) predicate.Machine { - return predicate.Machine(func(s *sql.Selector) { - s.Where(sql.GT(s.C(FieldPassword), v)) - }) + return predicate.Machine(sql.FieldGT(FieldPassword, v)) } // PasswordGTE applies the GTE predicate on the "password" field. func PasswordGTE(v string) predicate.Machine { - return predicate.Machine(func(s *sql.Selector) { - s.Where(sql.GTE(s.C(FieldPassword), v)) - }) + return predicate.Machine(sql.FieldGTE(FieldPassword, v)) } // PasswordLT applies the LT predicate on the "password" field. func PasswordLT(v string) predicate.Machine { - return predicate.Machine(func(s *sql.Selector) { - s.Where(sql.LT(s.C(FieldPassword), v)) - }) + return predicate.Machine(sql.FieldLT(FieldPassword, v)) } // PasswordLTE applies the LTE predicate on the "password" field. func PasswordLTE(v string) predicate.Machine { - return predicate.Machine(func(s *sql.Selector) { - s.Where(sql.LTE(s.C(FieldPassword), v)) - }) + return predicate.Machine(sql.FieldLTE(FieldPassword, v)) } // PasswordContains applies the Contains predicate on the "password" field. func PasswordContains(v string) predicate.Machine { - return predicate.Machine(func(s *sql.Selector) { - s.Where(sql.Contains(s.C(FieldPassword), v)) - }) + return predicate.Machine(sql.FieldContains(FieldPassword, v)) } // PasswordHasPrefix applies the HasPrefix predicate on the "password" field. func PasswordHasPrefix(v string) predicate.Machine { - return predicate.Machine(func(s *sql.Selector) { - s.Where(sql.HasPrefix(s.C(FieldPassword), v)) - }) + return predicate.Machine(sql.FieldHasPrefix(FieldPassword, v)) } // PasswordHasSuffix applies the HasSuffix predicate on the "password" field. func PasswordHasSuffix(v string) predicate.Machine { - return predicate.Machine(func(s *sql.Selector) { - s.Where(sql.HasSuffix(s.C(FieldPassword), v)) - }) + return predicate.Machine(sql.FieldHasSuffix(FieldPassword, v)) } // PasswordEqualFold applies the EqualFold predicate on the "password" field. func PasswordEqualFold(v string) predicate.Machine { - return predicate.Machine(func(s *sql.Selector) { - s.Where(sql.EqualFold(s.C(FieldPassword), v)) - }) + return predicate.Machine(sql.FieldEqualFold(FieldPassword, v)) } // PasswordContainsFold applies the ContainsFold predicate on the "password" field. func PasswordContainsFold(v string) predicate.Machine { - return predicate.Machine(func(s *sql.Selector) { - s.Where(sql.ContainsFold(s.C(FieldPassword), v)) - }) + return predicate.Machine(sql.FieldContainsFold(FieldPassword, v)) } // IpAddressEQ applies the EQ predicate on the "ipAddress" field. func IpAddressEQ(v string) predicate.Machine { - return predicate.Machine(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldIpAddress), v)) - }) + return predicate.Machine(sql.FieldEQ(FieldIpAddress, v)) } // IpAddressNEQ applies the NEQ predicate on the "ipAddress" field. func IpAddressNEQ(v string) predicate.Machine { - return predicate.Machine(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldIpAddress), v)) - }) + return predicate.Machine(sql.FieldNEQ(FieldIpAddress, v)) } // IpAddressIn applies the In predicate on the "ipAddress" field. func IpAddressIn(vs ...string) predicate.Machine { - v := make([]any, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Machine(func(s *sql.Selector) { - s.Where(sql.In(s.C(FieldIpAddress), v...)) - }) + return predicate.Machine(sql.FieldIn(FieldIpAddress, vs...)) } // IpAddressNotIn applies the NotIn predicate on the "ipAddress" field. func IpAddressNotIn(vs ...string) predicate.Machine { - v := make([]any, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Machine(func(s *sql.Selector) { - s.Where(sql.NotIn(s.C(FieldIpAddress), v...)) - }) + return predicate.Machine(sql.FieldNotIn(FieldIpAddress, vs...)) } // IpAddressGT applies the GT predicate on the "ipAddress" field. func IpAddressGT(v string) predicate.Machine { - return predicate.Machine(func(s *sql.Selector) { - s.Where(sql.GT(s.C(FieldIpAddress), v)) - }) + return predicate.Machine(sql.FieldGT(FieldIpAddress, v)) } // IpAddressGTE applies the GTE predicate on the "ipAddress" field. func IpAddressGTE(v string) predicate.Machine { - return predicate.Machine(func(s *sql.Selector) { - s.Where(sql.GTE(s.C(FieldIpAddress), v)) - }) + return predicate.Machine(sql.FieldGTE(FieldIpAddress, v)) } // IpAddressLT applies the LT predicate on the "ipAddress" field. func IpAddressLT(v string) predicate.Machine { - return predicate.Machine(func(s *sql.Selector) { - s.Where(sql.LT(s.C(FieldIpAddress), v)) - }) + return predicate.Machine(sql.FieldLT(FieldIpAddress, v)) } // IpAddressLTE applies the LTE predicate on the "ipAddress" field. func IpAddressLTE(v string) predicate.Machine { - return predicate.Machine(func(s *sql.Selector) { - s.Where(sql.LTE(s.C(FieldIpAddress), v)) - }) + return predicate.Machine(sql.FieldLTE(FieldIpAddress, v)) } // IpAddressContains applies the Contains predicate on the "ipAddress" field. func IpAddressContains(v string) predicate.Machine { - return predicate.Machine(func(s *sql.Selector) { - s.Where(sql.Contains(s.C(FieldIpAddress), v)) - }) + return predicate.Machine(sql.FieldContains(FieldIpAddress, v)) } // IpAddressHasPrefix applies the HasPrefix predicate on the "ipAddress" field. func IpAddressHasPrefix(v string) predicate.Machine { - return predicate.Machine(func(s *sql.Selector) { - s.Where(sql.HasPrefix(s.C(FieldIpAddress), v)) - }) + return predicate.Machine(sql.FieldHasPrefix(FieldIpAddress, v)) } // IpAddressHasSuffix applies the HasSuffix predicate on the "ipAddress" field. func IpAddressHasSuffix(v string) predicate.Machine { - return predicate.Machine(func(s *sql.Selector) { - s.Where(sql.HasSuffix(s.C(FieldIpAddress), v)) - }) + return predicate.Machine(sql.FieldHasSuffix(FieldIpAddress, v)) } // IpAddressEqualFold applies the EqualFold predicate on the "ipAddress" field. func IpAddressEqualFold(v string) predicate.Machine { - return predicate.Machine(func(s *sql.Selector) { - s.Where(sql.EqualFold(s.C(FieldIpAddress), v)) - }) + return predicate.Machine(sql.FieldEqualFold(FieldIpAddress, v)) } // IpAddressContainsFold applies the ContainsFold predicate on the "ipAddress" field. func IpAddressContainsFold(v string) predicate.Machine { - return predicate.Machine(func(s *sql.Selector) { - s.Where(sql.ContainsFold(s.C(FieldIpAddress), v)) - }) + return predicate.Machine(sql.FieldContainsFold(FieldIpAddress, v)) } // ScenariosEQ applies the EQ predicate on the "scenarios" field. func ScenariosEQ(v string) predicate.Machine { - return predicate.Machine(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldScenarios), v)) - }) + return predicate.Machine(sql.FieldEQ(FieldScenarios, v)) } // ScenariosNEQ applies the NEQ predicate on the "scenarios" field. func ScenariosNEQ(v string) predicate.Machine { - return predicate.Machine(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldScenarios), v)) - }) + return predicate.Machine(sql.FieldNEQ(FieldScenarios, v)) } // ScenariosIn applies the In predicate on the "scenarios" field. func ScenariosIn(vs ...string) predicate.Machine { - v := make([]any, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Machine(func(s *sql.Selector) { - s.Where(sql.In(s.C(FieldScenarios), v...)) - }) + return predicate.Machine(sql.FieldIn(FieldScenarios, vs...)) } // ScenariosNotIn applies the NotIn predicate on the "scenarios" field. func ScenariosNotIn(vs ...string) predicate.Machine { - v := make([]any, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Machine(func(s *sql.Selector) { - s.Where(sql.NotIn(s.C(FieldScenarios), v...)) - }) + return predicate.Machine(sql.FieldNotIn(FieldScenarios, vs...)) } // ScenariosGT applies the GT predicate on the "scenarios" field. func ScenariosGT(v string) predicate.Machine { - return predicate.Machine(func(s *sql.Selector) { - s.Where(sql.GT(s.C(FieldScenarios), v)) - }) + return predicate.Machine(sql.FieldGT(FieldScenarios, v)) } // ScenariosGTE applies the GTE predicate on the "scenarios" field. func ScenariosGTE(v string) predicate.Machine { - return predicate.Machine(func(s *sql.Selector) { - s.Where(sql.GTE(s.C(FieldScenarios), v)) - }) + return predicate.Machine(sql.FieldGTE(FieldScenarios, v)) } // ScenariosLT applies the LT predicate on the "scenarios" field. func ScenariosLT(v string) predicate.Machine { - return predicate.Machine(func(s *sql.Selector) { - s.Where(sql.LT(s.C(FieldScenarios), v)) - }) + return predicate.Machine(sql.FieldLT(FieldScenarios, v)) } // ScenariosLTE applies the LTE predicate on the "scenarios" field. func ScenariosLTE(v string) predicate.Machine { - return predicate.Machine(func(s *sql.Selector) { - s.Where(sql.LTE(s.C(FieldScenarios), v)) - }) + return predicate.Machine(sql.FieldLTE(FieldScenarios, v)) } // ScenariosContains applies the Contains predicate on the "scenarios" field. func ScenariosContains(v string) predicate.Machine { - return predicate.Machine(func(s *sql.Selector) { - s.Where(sql.Contains(s.C(FieldScenarios), v)) - }) + return predicate.Machine(sql.FieldContains(FieldScenarios, v)) } // ScenariosHasPrefix applies the HasPrefix predicate on the "scenarios" field. func ScenariosHasPrefix(v string) predicate.Machine { - return predicate.Machine(func(s *sql.Selector) { - s.Where(sql.HasPrefix(s.C(FieldScenarios), v)) - }) + return predicate.Machine(sql.FieldHasPrefix(FieldScenarios, v)) } // ScenariosHasSuffix applies the HasSuffix predicate on the "scenarios" field. func ScenariosHasSuffix(v string) predicate.Machine { - return predicate.Machine(func(s *sql.Selector) { - s.Where(sql.HasSuffix(s.C(FieldScenarios), v)) - }) + return predicate.Machine(sql.FieldHasSuffix(FieldScenarios, v)) } // ScenariosIsNil applies the IsNil predicate on the "scenarios" field. func ScenariosIsNil() predicate.Machine { - return predicate.Machine(func(s *sql.Selector) { - s.Where(sql.IsNull(s.C(FieldScenarios))) - }) + return predicate.Machine(sql.FieldIsNull(FieldScenarios)) } // ScenariosNotNil applies the NotNil predicate on the "scenarios" field. func ScenariosNotNil() predicate.Machine { - return predicate.Machine(func(s *sql.Selector) { - s.Where(sql.NotNull(s.C(FieldScenarios))) - }) + return predicate.Machine(sql.FieldNotNull(FieldScenarios)) } // ScenariosEqualFold applies the EqualFold predicate on the "scenarios" field. func ScenariosEqualFold(v string) predicate.Machine { - return predicate.Machine(func(s *sql.Selector) { - s.Where(sql.EqualFold(s.C(FieldScenarios), v)) - }) + return predicate.Machine(sql.FieldEqualFold(FieldScenarios, v)) } // ScenariosContainsFold applies the ContainsFold predicate on the "scenarios" field. func ScenariosContainsFold(v string) predicate.Machine { - return predicate.Machine(func(s *sql.Selector) { - s.Where(sql.ContainsFold(s.C(FieldScenarios), v)) - }) + return predicate.Machine(sql.FieldContainsFold(FieldScenarios, v)) } // VersionEQ applies the EQ predicate on the "version" field. func VersionEQ(v string) predicate.Machine { - return predicate.Machine(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldVersion), v)) - }) + return predicate.Machine(sql.FieldEQ(FieldVersion, v)) } // VersionNEQ applies the NEQ predicate on the "version" field. func VersionNEQ(v string) predicate.Machine { - return predicate.Machine(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldVersion), v)) - }) + return predicate.Machine(sql.FieldNEQ(FieldVersion, v)) } // VersionIn applies the In predicate on the "version" field. func VersionIn(vs ...string) predicate.Machine { - v := make([]any, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Machine(func(s *sql.Selector) { - s.Where(sql.In(s.C(FieldVersion), v...)) - }) + return predicate.Machine(sql.FieldIn(FieldVersion, vs...)) } // VersionNotIn applies the NotIn predicate on the "version" field. func VersionNotIn(vs ...string) predicate.Machine { - v := make([]any, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Machine(func(s *sql.Selector) { - s.Where(sql.NotIn(s.C(FieldVersion), v...)) - }) + return predicate.Machine(sql.FieldNotIn(FieldVersion, vs...)) } // VersionGT applies the GT predicate on the "version" field. func VersionGT(v string) predicate.Machine { - return predicate.Machine(func(s *sql.Selector) { - s.Where(sql.GT(s.C(FieldVersion), v)) - }) + return predicate.Machine(sql.FieldGT(FieldVersion, v)) } // VersionGTE applies the GTE predicate on the "version" field. func VersionGTE(v string) predicate.Machine { - return predicate.Machine(func(s *sql.Selector) { - s.Where(sql.GTE(s.C(FieldVersion), v)) - }) + return predicate.Machine(sql.FieldGTE(FieldVersion, v)) } // VersionLT applies the LT predicate on the "version" field. func VersionLT(v string) predicate.Machine { - return predicate.Machine(func(s *sql.Selector) { - s.Where(sql.LT(s.C(FieldVersion), v)) - }) + return predicate.Machine(sql.FieldLT(FieldVersion, v)) } // VersionLTE applies the LTE predicate on the "version" field. func VersionLTE(v string) predicate.Machine { - return predicate.Machine(func(s *sql.Selector) { - s.Where(sql.LTE(s.C(FieldVersion), v)) - }) + return predicate.Machine(sql.FieldLTE(FieldVersion, v)) } // VersionContains applies the Contains predicate on the "version" field. func VersionContains(v string) predicate.Machine { - return predicate.Machine(func(s *sql.Selector) { - s.Where(sql.Contains(s.C(FieldVersion), v)) - }) + return predicate.Machine(sql.FieldContains(FieldVersion, v)) } // VersionHasPrefix applies the HasPrefix predicate on the "version" field. func VersionHasPrefix(v string) predicate.Machine { - return predicate.Machine(func(s *sql.Selector) { - s.Where(sql.HasPrefix(s.C(FieldVersion), v)) - }) + return predicate.Machine(sql.FieldHasPrefix(FieldVersion, v)) } // VersionHasSuffix applies the HasSuffix predicate on the "version" field. func VersionHasSuffix(v string) predicate.Machine { - return predicate.Machine(func(s *sql.Selector) { - s.Where(sql.HasSuffix(s.C(FieldVersion), v)) - }) + return predicate.Machine(sql.FieldHasSuffix(FieldVersion, v)) } // VersionIsNil applies the IsNil predicate on the "version" field. func VersionIsNil() predicate.Machine { - return predicate.Machine(func(s *sql.Selector) { - s.Where(sql.IsNull(s.C(FieldVersion))) - }) + return predicate.Machine(sql.FieldIsNull(FieldVersion)) } // VersionNotNil applies the NotNil predicate on the "version" field. func VersionNotNil() predicate.Machine { - return predicate.Machine(func(s *sql.Selector) { - s.Where(sql.NotNull(s.C(FieldVersion))) - }) + return predicate.Machine(sql.FieldNotNull(FieldVersion)) } // VersionEqualFold applies the EqualFold predicate on the "version" field. func VersionEqualFold(v string) predicate.Machine { - return predicate.Machine(func(s *sql.Selector) { - s.Where(sql.EqualFold(s.C(FieldVersion), v)) - }) + return predicate.Machine(sql.FieldEqualFold(FieldVersion, v)) } // VersionContainsFold applies the ContainsFold predicate on the "version" field. func VersionContainsFold(v string) predicate.Machine { - return predicate.Machine(func(s *sql.Selector) { - s.Where(sql.ContainsFold(s.C(FieldVersion), v)) - }) + return predicate.Machine(sql.FieldContainsFold(FieldVersion, v)) } // IsValidatedEQ applies the EQ predicate on the "isValidated" field. func IsValidatedEQ(v bool) predicate.Machine { - return predicate.Machine(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldIsValidated), v)) - }) + return predicate.Machine(sql.FieldEQ(FieldIsValidated, v)) } // IsValidatedNEQ applies the NEQ predicate on the "isValidated" field. func IsValidatedNEQ(v bool) predicate.Machine { - return predicate.Machine(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldIsValidated), v)) - }) + return predicate.Machine(sql.FieldNEQ(FieldIsValidated, v)) } // StatusEQ applies the EQ predicate on the "status" field. func StatusEQ(v string) predicate.Machine { - return predicate.Machine(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldStatus), v)) - }) + return predicate.Machine(sql.FieldEQ(FieldStatus, v)) } // StatusNEQ applies the NEQ predicate on the "status" field. func StatusNEQ(v string) predicate.Machine { - return predicate.Machine(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldStatus), v)) - }) + return predicate.Machine(sql.FieldNEQ(FieldStatus, v)) } // StatusIn applies the In predicate on the "status" field. func StatusIn(vs ...string) predicate.Machine { - v := make([]any, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Machine(func(s *sql.Selector) { - s.Where(sql.In(s.C(FieldStatus), v...)) - }) + return predicate.Machine(sql.FieldIn(FieldStatus, vs...)) } // StatusNotIn applies the NotIn predicate on the "status" field. func StatusNotIn(vs ...string) predicate.Machine { - v := make([]any, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Machine(func(s *sql.Selector) { - s.Where(sql.NotIn(s.C(FieldStatus), v...)) - }) + return predicate.Machine(sql.FieldNotIn(FieldStatus, vs...)) } // StatusGT applies the GT predicate on the "status" field. func StatusGT(v string) predicate.Machine { - return predicate.Machine(func(s *sql.Selector) { - s.Where(sql.GT(s.C(FieldStatus), v)) - }) + return predicate.Machine(sql.FieldGT(FieldStatus, v)) } // StatusGTE applies the GTE predicate on the "status" field. func StatusGTE(v string) predicate.Machine { - return predicate.Machine(func(s *sql.Selector) { - s.Where(sql.GTE(s.C(FieldStatus), v)) - }) + return predicate.Machine(sql.FieldGTE(FieldStatus, v)) } // StatusLT applies the LT predicate on the "status" field. func StatusLT(v string) predicate.Machine { - return predicate.Machine(func(s *sql.Selector) { - s.Where(sql.LT(s.C(FieldStatus), v)) - }) + return predicate.Machine(sql.FieldLT(FieldStatus, v)) } // StatusLTE applies the LTE predicate on the "status" field. func StatusLTE(v string) predicate.Machine { - return predicate.Machine(func(s *sql.Selector) { - s.Where(sql.LTE(s.C(FieldStatus), v)) - }) + return predicate.Machine(sql.FieldLTE(FieldStatus, v)) } // StatusContains applies the Contains predicate on the "status" field. func StatusContains(v string) predicate.Machine { - return predicate.Machine(func(s *sql.Selector) { - s.Where(sql.Contains(s.C(FieldStatus), v)) - }) + return predicate.Machine(sql.FieldContains(FieldStatus, v)) } // StatusHasPrefix applies the HasPrefix predicate on the "status" field. func StatusHasPrefix(v string) predicate.Machine { - return predicate.Machine(func(s *sql.Selector) { - s.Where(sql.HasPrefix(s.C(FieldStatus), v)) - }) + return predicate.Machine(sql.FieldHasPrefix(FieldStatus, v)) } // StatusHasSuffix applies the HasSuffix predicate on the "status" field. func StatusHasSuffix(v string) predicate.Machine { - return predicate.Machine(func(s *sql.Selector) { - s.Where(sql.HasSuffix(s.C(FieldStatus), v)) - }) + return predicate.Machine(sql.FieldHasSuffix(FieldStatus, v)) } // StatusIsNil applies the IsNil predicate on the "status" field. func StatusIsNil() predicate.Machine { - return predicate.Machine(func(s *sql.Selector) { - s.Where(sql.IsNull(s.C(FieldStatus))) - }) + return predicate.Machine(sql.FieldIsNull(FieldStatus)) } // StatusNotNil applies the NotNil predicate on the "status" field. func StatusNotNil() predicate.Machine { - return predicate.Machine(func(s *sql.Selector) { - s.Where(sql.NotNull(s.C(FieldStatus))) - }) + return predicate.Machine(sql.FieldNotNull(FieldStatus)) } // StatusEqualFold applies the EqualFold predicate on the "status" field. func StatusEqualFold(v string) predicate.Machine { - return predicate.Machine(func(s *sql.Selector) { - s.Where(sql.EqualFold(s.C(FieldStatus), v)) - }) + return predicate.Machine(sql.FieldEqualFold(FieldStatus, v)) } // StatusContainsFold applies the ContainsFold predicate on the "status" field. func StatusContainsFold(v string) predicate.Machine { - return predicate.Machine(func(s *sql.Selector) { - s.Where(sql.ContainsFold(s.C(FieldStatus), v)) - }) + return predicate.Machine(sql.FieldContainsFold(FieldStatus, v)) } // AuthTypeEQ applies the EQ predicate on the "auth_type" field. func AuthTypeEQ(v string) predicate.Machine { - return predicate.Machine(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldAuthType), v)) - }) + return predicate.Machine(sql.FieldEQ(FieldAuthType, v)) } // AuthTypeNEQ applies the NEQ predicate on the "auth_type" field. func AuthTypeNEQ(v string) predicate.Machine { - return predicate.Machine(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldAuthType), v)) - }) + return predicate.Machine(sql.FieldNEQ(FieldAuthType, v)) } // AuthTypeIn applies the In predicate on the "auth_type" field. func AuthTypeIn(vs ...string) predicate.Machine { - v := make([]any, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Machine(func(s *sql.Selector) { - s.Where(sql.In(s.C(FieldAuthType), v...)) - }) + return predicate.Machine(sql.FieldIn(FieldAuthType, vs...)) } // AuthTypeNotIn applies the NotIn predicate on the "auth_type" field. func AuthTypeNotIn(vs ...string) predicate.Machine { - v := make([]any, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Machine(func(s *sql.Selector) { - s.Where(sql.NotIn(s.C(FieldAuthType), v...)) - }) + return predicate.Machine(sql.FieldNotIn(FieldAuthType, vs...)) } // AuthTypeGT applies the GT predicate on the "auth_type" field. func AuthTypeGT(v string) predicate.Machine { - return predicate.Machine(func(s *sql.Selector) { - s.Where(sql.GT(s.C(FieldAuthType), v)) - }) + return predicate.Machine(sql.FieldGT(FieldAuthType, v)) } // AuthTypeGTE applies the GTE predicate on the "auth_type" field. func AuthTypeGTE(v string) predicate.Machine { - return predicate.Machine(func(s *sql.Selector) { - s.Where(sql.GTE(s.C(FieldAuthType), v)) - }) + return predicate.Machine(sql.FieldGTE(FieldAuthType, v)) } // AuthTypeLT applies the LT predicate on the "auth_type" field. func AuthTypeLT(v string) predicate.Machine { - return predicate.Machine(func(s *sql.Selector) { - s.Where(sql.LT(s.C(FieldAuthType), v)) - }) + return predicate.Machine(sql.FieldLT(FieldAuthType, v)) } // AuthTypeLTE applies the LTE predicate on the "auth_type" field. func AuthTypeLTE(v string) predicate.Machine { - return predicate.Machine(func(s *sql.Selector) { - s.Where(sql.LTE(s.C(FieldAuthType), v)) - }) + return predicate.Machine(sql.FieldLTE(FieldAuthType, v)) } // AuthTypeContains applies the Contains predicate on the "auth_type" field. func AuthTypeContains(v string) predicate.Machine { - return predicate.Machine(func(s *sql.Selector) { - s.Where(sql.Contains(s.C(FieldAuthType), v)) - }) + return predicate.Machine(sql.FieldContains(FieldAuthType, v)) } // AuthTypeHasPrefix applies the HasPrefix predicate on the "auth_type" field. func AuthTypeHasPrefix(v string) predicate.Machine { - return predicate.Machine(func(s *sql.Selector) { - s.Where(sql.HasPrefix(s.C(FieldAuthType), v)) - }) + return predicate.Machine(sql.FieldHasPrefix(FieldAuthType, v)) } // AuthTypeHasSuffix applies the HasSuffix predicate on the "auth_type" field. func AuthTypeHasSuffix(v string) predicate.Machine { - return predicate.Machine(func(s *sql.Selector) { - s.Where(sql.HasSuffix(s.C(FieldAuthType), v)) - }) + return predicate.Machine(sql.FieldHasSuffix(FieldAuthType, v)) } // AuthTypeEqualFold applies the EqualFold predicate on the "auth_type" field. func AuthTypeEqualFold(v string) predicate.Machine { - return predicate.Machine(func(s *sql.Selector) { - s.Where(sql.EqualFold(s.C(FieldAuthType), v)) - }) + return predicate.Machine(sql.FieldEqualFold(FieldAuthType, v)) } // AuthTypeContainsFold applies the ContainsFold predicate on the "auth_type" field. func AuthTypeContainsFold(v string) predicate.Machine { - return predicate.Machine(func(s *sql.Selector) { - s.Where(sql.ContainsFold(s.C(FieldAuthType), v)) - }) + return predicate.Machine(sql.FieldContainsFold(FieldAuthType, v)) } // HasAlerts applies the HasEdge predicate on the "alerts" edge. @@ -1231,7 +815,6 @@ func HasAlerts() predicate.Machine { return predicate.Machine(func(s *sql.Selector) { step := sqlgraph.NewStep( sqlgraph.From(Table, FieldID), - sqlgraph.To(AlertsTable, FieldID), sqlgraph.Edge(sqlgraph.O2M, false, AlertsTable, AlertsColumn), ) sqlgraph.HasNeighbors(s, step) @@ -1241,11 +824,7 @@ func HasAlerts() predicate.Machine { // HasAlertsWith applies the HasEdge predicate on the "alerts" edge with a given conditions (other predicates). func HasAlertsWith(preds ...predicate.Alert) predicate.Machine { return predicate.Machine(func(s *sql.Selector) { - step := sqlgraph.NewStep( - sqlgraph.From(Table, FieldID), - sqlgraph.To(AlertsInverseTable, FieldID), - sqlgraph.Edge(sqlgraph.O2M, false, AlertsTable, AlertsColumn), - ) + step := newAlertsStep() sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { for _, p := range preds { p(s) @@ -1256,32 +835,15 @@ func HasAlertsWith(preds ...predicate.Alert) predicate.Machine { // And groups predicates with the AND operator between them. func And(predicates ...predicate.Machine) predicate.Machine { - return predicate.Machine(func(s *sql.Selector) { - s1 := s.Clone().SetP(nil) - for _, p := range predicates { - p(s1) - } - s.Where(s1.P()) - }) + return predicate.Machine(sql.AndPredicates(predicates...)) } // Or groups predicates with the OR operator between them. func Or(predicates ...predicate.Machine) predicate.Machine { - return predicate.Machine(func(s *sql.Selector) { - s1 := s.Clone().SetP(nil) - for i, p := range predicates { - if i > 0 { - s1.Or() - } - p(s1) - } - s.Where(s1.P()) - }) + return predicate.Machine(sql.OrPredicates(predicates...)) } // Not applies the not operator on the given predicate. func Not(p predicate.Machine) predicate.Machine { - return predicate.Machine(func(s *sql.Selector) { - p(s.Not()) - }) + return predicate.Machine(sql.NotPredicates(p)) } diff --git a/pkg/database/ent/machine_create.go b/pkg/database/ent/machine_create.go index efe02782f6b..ff704e6ab74 100644 --- a/pkg/database/ent/machine_create.go +++ b/pkg/database/ent/machine_create.go @@ -187,50 +187,8 @@ func (mc *MachineCreate) Mutation() *MachineMutation { // Save creates the Machine in the database. func (mc *MachineCreate) Save(ctx context.Context) (*Machine, error) { - var ( - err error - node *Machine - ) mc.defaults() - if len(mc.hooks) == 0 { - if err = mc.check(); err != nil { - return nil, err - } - node, err = mc.sqlSave(ctx) - } else { - var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { - mutation, ok := m.(*MachineMutation) - if !ok { - return nil, fmt.Errorf("unexpected mutation type %T", m) - } - if err = mc.check(); err != nil { - return nil, err - } - mc.mutation = mutation - if node, err = mc.sqlSave(ctx); err != nil { - return nil, err - } - mutation.id = &node.ID - mutation.done = true - return node, err - }) - for i := len(mc.hooks) - 1; i >= 0; i-- { - if mc.hooks[i] == nil { - return nil, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)") - } - mut = mc.hooks[i](mut) - } - v, err := mut.Mutate(ctx, mc.mutation) - if err != nil { - return nil, err - } - nv, ok := v.(*Machine) - if !ok { - return nil, fmt.Errorf("unexpected node type %T returned from MachineMutation", v) - } - node = nv - } - return node, err + return withHooks(ctx, mc.sqlSave, mc.mutation, mc.hooks) } // SaveX calls Save and panics if Save returns an error. @@ -309,6 +267,9 @@ func (mc *MachineCreate) check() error { } func (mc *MachineCreate) sqlSave(ctx context.Context) (*Machine, error) { + if err := mc.check(); err != nil { + return nil, err + } _node, _spec := mc.createSpec() if err := sqlgraph.CreateNode(ctx, mc.driver, _spec); err != nil { if sqlgraph.IsConstraintError(err) { @@ -318,114 +279,62 @@ func (mc *MachineCreate) sqlSave(ctx context.Context) (*Machine, error) { } id := _spec.ID.Value.(int64) _node.ID = int(id) + mc.mutation.id = &_node.ID + mc.mutation.done = true return _node, nil } func (mc *MachineCreate) createSpec() (*Machine, *sqlgraph.CreateSpec) { var ( _node = &Machine{config: mc.config} - _spec = &sqlgraph.CreateSpec{ - Table: machine.Table, - ID: &sqlgraph.FieldSpec{ - Type: field.TypeInt, - Column: machine.FieldID, - }, - } + _spec = sqlgraph.NewCreateSpec(machine.Table, sqlgraph.NewFieldSpec(machine.FieldID, field.TypeInt)) ) if value, ok := mc.mutation.CreatedAt(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeTime, - Value: value, - Column: machine.FieldCreatedAt, - }) + _spec.SetField(machine.FieldCreatedAt, field.TypeTime, value) _node.CreatedAt = &value } if value, ok := mc.mutation.UpdatedAt(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeTime, - Value: value, - Column: machine.FieldUpdatedAt, - }) + _spec.SetField(machine.FieldUpdatedAt, field.TypeTime, value) _node.UpdatedAt = &value } if value, ok := mc.mutation.LastPush(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeTime, - Value: value, - Column: machine.FieldLastPush, - }) + _spec.SetField(machine.FieldLastPush, field.TypeTime, value) _node.LastPush = &value } if value, ok := mc.mutation.LastHeartbeat(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeTime, - Value: value, - Column: machine.FieldLastHeartbeat, - }) + _spec.SetField(machine.FieldLastHeartbeat, field.TypeTime, value) _node.LastHeartbeat = &value } if value, ok := mc.mutation.MachineId(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: machine.FieldMachineId, - }) + _spec.SetField(machine.FieldMachineId, field.TypeString, value) _node.MachineId = value } if value, ok := mc.mutation.Password(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: machine.FieldPassword, - }) + _spec.SetField(machine.FieldPassword, field.TypeString, value) _node.Password = value } if value, ok := mc.mutation.IpAddress(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: machine.FieldIpAddress, - }) + _spec.SetField(machine.FieldIpAddress, field.TypeString, value) _node.IpAddress = value } if value, ok := mc.mutation.Scenarios(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: machine.FieldScenarios, - }) + _spec.SetField(machine.FieldScenarios, field.TypeString, value) _node.Scenarios = value } if value, ok := mc.mutation.Version(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: machine.FieldVersion, - }) + _spec.SetField(machine.FieldVersion, field.TypeString, value) _node.Version = value } if value, ok := mc.mutation.IsValidated(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeBool, - Value: value, - Column: machine.FieldIsValidated, - }) + _spec.SetField(machine.FieldIsValidated, field.TypeBool, value) _node.IsValidated = value } if value, ok := mc.mutation.Status(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: machine.FieldStatus, - }) + _spec.SetField(machine.FieldStatus, field.TypeString, value) _node.Status = value } if value, ok := mc.mutation.AuthType(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: machine.FieldAuthType, - }) + _spec.SetField(machine.FieldAuthType, field.TypeString, value) _node.AuthType = value } if nodes := mc.mutation.AlertsIDs(); len(nodes) > 0 { @@ -436,10 +345,7 @@ func (mc *MachineCreate) createSpec() (*Machine, *sqlgraph.CreateSpec) { Columns: []string{machine.AlertsColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeInt, - Column: alert.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(alert.FieldID, field.TypeInt), }, } for _, k := range nodes { @@ -453,11 +359,15 @@ func (mc *MachineCreate) createSpec() (*Machine, *sqlgraph.CreateSpec) { // MachineCreateBulk is the builder for creating many Machine entities in bulk. type MachineCreateBulk struct { config + err error builders []*MachineCreate } // Save creates the Machine entities in the database. func (mcb *MachineCreateBulk) Save(ctx context.Context) ([]*Machine, error) { + if mcb.err != nil { + return nil, mcb.err + } specs := make([]*sqlgraph.CreateSpec, len(mcb.builders)) nodes := make([]*Machine, len(mcb.builders)) mutators := make([]Mutator, len(mcb.builders)) @@ -474,8 +384,8 @@ func (mcb *MachineCreateBulk) Save(ctx context.Context) ([]*Machine, error) { return nil, err } builder.mutation = mutation - nodes[i], specs[i] = builder.createSpec() var err error + nodes[i], specs[i] = builder.createSpec() if i < len(mutators)-1 { _, err = mutators[i+1].Mutate(root, mcb.builders[i+1].mutation) } else { diff --git a/pkg/database/ent/machine_delete.go b/pkg/database/ent/machine_delete.go index bead8acb46d..ac3aa751d5e 100644 --- a/pkg/database/ent/machine_delete.go +++ b/pkg/database/ent/machine_delete.go @@ -4,7 +4,6 @@ package ent import ( "context" - "fmt" "entgo.io/ent/dialect/sql" "entgo.io/ent/dialect/sql/sqlgraph" @@ -28,34 +27,7 @@ func (md *MachineDelete) Where(ps ...predicate.Machine) *MachineDelete { // Exec executes the deletion query and returns how many vertices were deleted. func (md *MachineDelete) Exec(ctx context.Context) (int, error) { - var ( - err error - affected int - ) - if len(md.hooks) == 0 { - affected, err = md.sqlExec(ctx) - } else { - var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { - mutation, ok := m.(*MachineMutation) - if !ok { - return nil, fmt.Errorf("unexpected mutation type %T", m) - } - md.mutation = mutation - affected, err = md.sqlExec(ctx) - mutation.done = true - return affected, err - }) - for i := len(md.hooks) - 1; i >= 0; i-- { - if md.hooks[i] == nil { - return 0, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)") - } - mut = md.hooks[i](mut) - } - if _, err := mut.Mutate(ctx, md.mutation); err != nil { - return 0, err - } - } - return affected, err + return withHooks(ctx, md.sqlExec, md.mutation, md.hooks) } // ExecX is like Exec, but panics if an error occurs. @@ -68,15 +40,7 @@ func (md *MachineDelete) ExecX(ctx context.Context) int { } func (md *MachineDelete) sqlExec(ctx context.Context) (int, error) { - _spec := &sqlgraph.DeleteSpec{ - Node: &sqlgraph.NodeSpec{ - Table: machine.Table, - ID: &sqlgraph.FieldSpec{ - Type: field.TypeInt, - Column: machine.FieldID, - }, - }, - } + _spec := sqlgraph.NewDeleteSpec(machine.Table, sqlgraph.NewFieldSpec(machine.FieldID, field.TypeInt)) if ps := md.mutation.predicates; len(ps) > 0 { _spec.Predicate = func(selector *sql.Selector) { for i := range ps { @@ -88,6 +52,7 @@ func (md *MachineDelete) sqlExec(ctx context.Context) (int, error) { if err != nil && sqlgraph.IsConstraintError(err) { err = &ConstraintError{msg: err.Error(), wrap: err} } + md.mutation.done = true return affected, err } @@ -96,6 +61,12 @@ type MachineDeleteOne struct { md *MachineDelete } +// Where appends a list predicates to the MachineDelete builder. +func (mdo *MachineDeleteOne) Where(ps ...predicate.Machine) *MachineDeleteOne { + mdo.md.mutation.Where(ps...) + return mdo +} + // Exec executes the deletion query. func (mdo *MachineDeleteOne) Exec(ctx context.Context) error { n, err := mdo.md.Exec(ctx) @@ -111,5 +82,7 @@ func (mdo *MachineDeleteOne) Exec(ctx context.Context) error { // ExecX is like Exec, but panics if an error occurs. func (mdo *MachineDeleteOne) ExecX(ctx context.Context) { - mdo.md.ExecX(ctx) + if err := mdo.Exec(ctx); err != nil { + panic(err) + } } diff --git a/pkg/database/ent/machine_query.go b/pkg/database/ent/machine_query.go index 2839142196b..462c2cf35b1 100644 --- a/pkg/database/ent/machine_query.go +++ b/pkg/database/ent/machine_query.go @@ -19,11 +19,9 @@ import ( // MachineQuery is the builder for querying Machine entities. type MachineQuery struct { config - limit *int - offset *int - unique *bool - order []OrderFunc - fields []string + ctx *QueryContext + order []machine.OrderOption + inters []Interceptor predicates []predicate.Machine withAlerts *AlertQuery // intermediate query (i.e. traversal path). @@ -37,34 +35,34 @@ func (mq *MachineQuery) Where(ps ...predicate.Machine) *MachineQuery { return mq } -// Limit adds a limit step to the query. +// Limit the number of records to be returned by this query. func (mq *MachineQuery) Limit(limit int) *MachineQuery { - mq.limit = &limit + mq.ctx.Limit = &limit return mq } -// Offset adds an offset step to the query. +// Offset to start from. func (mq *MachineQuery) Offset(offset int) *MachineQuery { - mq.offset = &offset + mq.ctx.Offset = &offset return mq } // Unique configures the query builder to filter duplicate records on query. // By default, unique is set to true, and can be disabled using this method. func (mq *MachineQuery) Unique(unique bool) *MachineQuery { - mq.unique = &unique + mq.ctx.Unique = &unique return mq } -// Order adds an order step to the query. -func (mq *MachineQuery) Order(o ...OrderFunc) *MachineQuery { +// Order specifies how the records should be ordered. +func (mq *MachineQuery) Order(o ...machine.OrderOption) *MachineQuery { mq.order = append(mq.order, o...) return mq } // QueryAlerts chains the current query on the "alerts" edge. func (mq *MachineQuery) QueryAlerts() *AlertQuery { - query := &AlertQuery{config: mq.config} + query := (&AlertClient{config: mq.config}).Query() query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { if err := mq.prepareQuery(ctx); err != nil { return nil, err @@ -87,7 +85,7 @@ func (mq *MachineQuery) QueryAlerts() *AlertQuery { // First returns the first Machine entity from the query. // Returns a *NotFoundError when no Machine was found. func (mq *MachineQuery) First(ctx context.Context) (*Machine, error) { - nodes, err := mq.Limit(1).All(ctx) + nodes, err := mq.Limit(1).All(setContextOp(ctx, mq.ctx, "First")) if err != nil { return nil, err } @@ -110,7 +108,7 @@ func (mq *MachineQuery) FirstX(ctx context.Context) *Machine { // Returns a *NotFoundError when no Machine ID was found. func (mq *MachineQuery) FirstID(ctx context.Context) (id int, err error) { var ids []int - if ids, err = mq.Limit(1).IDs(ctx); err != nil { + if ids, err = mq.Limit(1).IDs(setContextOp(ctx, mq.ctx, "FirstID")); err != nil { return } if len(ids) == 0 { @@ -133,7 +131,7 @@ func (mq *MachineQuery) FirstIDX(ctx context.Context) int { // Returns a *NotSingularError when more than one Machine entity is found. // Returns a *NotFoundError when no Machine entities are found. func (mq *MachineQuery) Only(ctx context.Context) (*Machine, error) { - nodes, err := mq.Limit(2).All(ctx) + nodes, err := mq.Limit(2).All(setContextOp(ctx, mq.ctx, "Only")) if err != nil { return nil, err } @@ -161,7 +159,7 @@ func (mq *MachineQuery) OnlyX(ctx context.Context) *Machine { // Returns a *NotFoundError when no entities are found. func (mq *MachineQuery) OnlyID(ctx context.Context) (id int, err error) { var ids []int - if ids, err = mq.Limit(2).IDs(ctx); err != nil { + if ids, err = mq.Limit(2).IDs(setContextOp(ctx, mq.ctx, "OnlyID")); err != nil { return } switch len(ids) { @@ -186,10 +184,12 @@ func (mq *MachineQuery) OnlyIDX(ctx context.Context) int { // All executes the query and returns a list of Machines. func (mq *MachineQuery) All(ctx context.Context) ([]*Machine, error) { + ctx = setContextOp(ctx, mq.ctx, "All") if err := mq.prepareQuery(ctx); err != nil { return nil, err } - return mq.sqlAll(ctx) + qr := querierAll[[]*Machine, *MachineQuery]() + return withInterceptors[[]*Machine](ctx, mq, qr, mq.inters) } // AllX is like All, but panics if an error occurs. @@ -202,9 +202,12 @@ func (mq *MachineQuery) AllX(ctx context.Context) []*Machine { } // IDs executes the query and returns a list of Machine IDs. -func (mq *MachineQuery) IDs(ctx context.Context) ([]int, error) { - var ids []int - if err := mq.Select(machine.FieldID).Scan(ctx, &ids); err != nil { +func (mq *MachineQuery) IDs(ctx context.Context) (ids []int, err error) { + if mq.ctx.Unique == nil && mq.path != nil { + mq.Unique(true) + } + ctx = setContextOp(ctx, mq.ctx, "IDs") + if err = mq.Select(machine.FieldID).Scan(ctx, &ids); err != nil { return nil, err } return ids, nil @@ -221,10 +224,11 @@ func (mq *MachineQuery) IDsX(ctx context.Context) []int { // Count returns the count of the given query. func (mq *MachineQuery) Count(ctx context.Context) (int, error) { + ctx = setContextOp(ctx, mq.ctx, "Count") if err := mq.prepareQuery(ctx); err != nil { return 0, err } - return mq.sqlCount(ctx) + return withInterceptors[int](ctx, mq, querierCount[*MachineQuery](), mq.inters) } // CountX is like Count, but panics if an error occurs. @@ -238,10 +242,15 @@ func (mq *MachineQuery) CountX(ctx context.Context) int { // Exist returns true if the query has elements in the graph. func (mq *MachineQuery) Exist(ctx context.Context) (bool, error) { - if err := mq.prepareQuery(ctx); err != nil { - return false, err + ctx = setContextOp(ctx, mq.ctx, "Exist") + switch _, err := mq.FirstID(ctx); { + case IsNotFound(err): + return false, nil + case err != nil: + return false, fmt.Errorf("ent: check existence: %w", err) + default: + return true, nil } - return mq.sqlExist(ctx) } // ExistX is like Exist, but panics if an error occurs. @@ -261,22 +270,21 @@ func (mq *MachineQuery) Clone() *MachineQuery { } return &MachineQuery{ config: mq.config, - limit: mq.limit, - offset: mq.offset, - order: append([]OrderFunc{}, mq.order...), + ctx: mq.ctx.Clone(), + order: append([]machine.OrderOption{}, mq.order...), + inters: append([]Interceptor{}, mq.inters...), predicates: append([]predicate.Machine{}, mq.predicates...), withAlerts: mq.withAlerts.Clone(), // clone intermediate query. - sql: mq.sql.Clone(), - path: mq.path, - unique: mq.unique, + sql: mq.sql.Clone(), + path: mq.path, } } // WithAlerts tells the query-builder to eager-load the nodes that are connected to // the "alerts" edge. The optional arguments are used to configure the query builder of the edge. func (mq *MachineQuery) WithAlerts(opts ...func(*AlertQuery)) *MachineQuery { - query := &AlertQuery{config: mq.config} + query := (&AlertClient{config: mq.config}).Query() for _, opt := range opts { opt(query) } @@ -299,16 +307,11 @@ func (mq *MachineQuery) WithAlerts(opts ...func(*AlertQuery)) *MachineQuery { // Aggregate(ent.Count()). // Scan(ctx, &v) func (mq *MachineQuery) GroupBy(field string, fields ...string) *MachineGroupBy { - grbuild := &MachineGroupBy{config: mq.config} - grbuild.fields = append([]string{field}, fields...) - grbuild.path = func(ctx context.Context) (prev *sql.Selector, err error) { - if err := mq.prepareQuery(ctx); err != nil { - return nil, err - } - return mq.sqlQuery(ctx), nil - } + mq.ctx.Fields = append([]string{field}, fields...) + grbuild := &MachineGroupBy{build: mq} + grbuild.flds = &mq.ctx.Fields grbuild.label = machine.Label - grbuild.flds, grbuild.scan = &grbuild.fields, grbuild.Scan + grbuild.scan = grbuild.Scan return grbuild } @@ -325,15 +328,30 @@ func (mq *MachineQuery) GroupBy(field string, fields ...string) *MachineGroupBy // Select(machine.FieldCreatedAt). // Scan(ctx, &v) func (mq *MachineQuery) Select(fields ...string) *MachineSelect { - mq.fields = append(mq.fields, fields...) - selbuild := &MachineSelect{MachineQuery: mq} - selbuild.label = machine.Label - selbuild.flds, selbuild.scan = &mq.fields, selbuild.Scan - return selbuild + mq.ctx.Fields = append(mq.ctx.Fields, fields...) + sbuild := &MachineSelect{MachineQuery: mq} + sbuild.label = machine.Label + sbuild.flds, sbuild.scan = &mq.ctx.Fields, sbuild.Scan + return sbuild +} + +// Aggregate returns a MachineSelect configured with the given aggregations. +func (mq *MachineQuery) Aggregate(fns ...AggregateFunc) *MachineSelect { + return mq.Select().Aggregate(fns...) } func (mq *MachineQuery) prepareQuery(ctx context.Context) error { - for _, f := range mq.fields { + for _, inter := range mq.inters { + if inter == nil { + return fmt.Errorf("ent: uninitialized interceptor (forgotten import ent/runtime?)") + } + if trv, ok := inter.(Traverser); ok { + if err := trv.Traverse(ctx, mq); err != nil { + return err + } + } + } + for _, f := range mq.ctx.Fields { if !machine.ValidColumn(f) { return &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)} } @@ -396,7 +414,7 @@ func (mq *MachineQuery) loadAlerts(ctx context.Context, query *AlertQuery, nodes } query.withFKs = true query.Where(predicate.Alert(func(s *sql.Selector) { - s.Where(sql.InValues(machine.AlertsColumn, fks...)) + s.Where(sql.InValues(s.C(machine.AlertsColumn), fks...)) })) neighbors, err := query.All(ctx) if err != nil { @@ -409,7 +427,7 @@ func (mq *MachineQuery) loadAlerts(ctx context.Context, query *AlertQuery, nodes } node, ok := nodeids[*fk] if !ok { - return fmt.Errorf(`unexpected foreign-key "machine_alerts" returned %v for node %v`, *fk, n.ID) + return fmt.Errorf(`unexpected referenced foreign-key "machine_alerts" returned %v for node %v`, *fk, n.ID) } assign(node, n) } @@ -418,41 +436,22 @@ func (mq *MachineQuery) loadAlerts(ctx context.Context, query *AlertQuery, nodes func (mq *MachineQuery) sqlCount(ctx context.Context) (int, error) { _spec := mq.querySpec() - _spec.Node.Columns = mq.fields - if len(mq.fields) > 0 { - _spec.Unique = mq.unique != nil && *mq.unique + _spec.Node.Columns = mq.ctx.Fields + if len(mq.ctx.Fields) > 0 { + _spec.Unique = mq.ctx.Unique != nil && *mq.ctx.Unique } return sqlgraph.CountNodes(ctx, mq.driver, _spec) } -func (mq *MachineQuery) sqlExist(ctx context.Context) (bool, error) { - switch _, err := mq.FirstID(ctx); { - case IsNotFound(err): - return false, nil - case err != nil: - return false, fmt.Errorf("ent: check existence: %w", err) - default: - return true, nil - } -} - func (mq *MachineQuery) querySpec() *sqlgraph.QuerySpec { - _spec := &sqlgraph.QuerySpec{ - Node: &sqlgraph.NodeSpec{ - Table: machine.Table, - Columns: machine.Columns, - ID: &sqlgraph.FieldSpec{ - Type: field.TypeInt, - Column: machine.FieldID, - }, - }, - From: mq.sql, - Unique: true, - } - if unique := mq.unique; unique != nil { + _spec := sqlgraph.NewQuerySpec(machine.Table, machine.Columns, sqlgraph.NewFieldSpec(machine.FieldID, field.TypeInt)) + _spec.From = mq.sql + if unique := mq.ctx.Unique; unique != nil { _spec.Unique = *unique + } else if mq.path != nil { + _spec.Unique = true } - if fields := mq.fields; len(fields) > 0 { + if fields := mq.ctx.Fields; len(fields) > 0 { _spec.Node.Columns = make([]string, 0, len(fields)) _spec.Node.Columns = append(_spec.Node.Columns, machine.FieldID) for i := range fields { @@ -468,10 +467,10 @@ func (mq *MachineQuery) querySpec() *sqlgraph.QuerySpec { } } } - if limit := mq.limit; limit != nil { + if limit := mq.ctx.Limit; limit != nil { _spec.Limit = *limit } - if offset := mq.offset; offset != nil { + if offset := mq.ctx.Offset; offset != nil { _spec.Offset = *offset } if ps := mq.order; len(ps) > 0 { @@ -487,7 +486,7 @@ func (mq *MachineQuery) querySpec() *sqlgraph.QuerySpec { func (mq *MachineQuery) sqlQuery(ctx context.Context) *sql.Selector { builder := sql.Dialect(mq.driver.Dialect()) t1 := builder.Table(machine.Table) - columns := mq.fields + columns := mq.ctx.Fields if len(columns) == 0 { columns = machine.Columns } @@ -496,7 +495,7 @@ func (mq *MachineQuery) sqlQuery(ctx context.Context) *sql.Selector { selector = mq.sql selector.Select(selector.Columns(columns...)...) } - if mq.unique != nil && *mq.unique { + if mq.ctx.Unique != nil && *mq.ctx.Unique { selector.Distinct() } for _, p := range mq.predicates { @@ -505,12 +504,12 @@ func (mq *MachineQuery) sqlQuery(ctx context.Context) *sql.Selector { for _, p := range mq.order { p(selector) } - if offset := mq.offset; offset != nil { + if offset := mq.ctx.Offset; offset != nil { // limit is mandatory for offset clause. We start // with default value, and override it below if needed. selector.Offset(*offset).Limit(math.MaxInt32) } - if limit := mq.limit; limit != nil { + if limit := mq.ctx.Limit; limit != nil { selector.Limit(*limit) } return selector @@ -518,13 +517,8 @@ func (mq *MachineQuery) sqlQuery(ctx context.Context) *sql.Selector { // MachineGroupBy is the group-by builder for Machine entities. type MachineGroupBy struct { - config selector - fields []string - fns []AggregateFunc - // intermediate query (i.e. traversal path). - sql *sql.Selector - path func(context.Context) (*sql.Selector, error) + build *MachineQuery } // Aggregate adds the given aggregation functions to the group-by query. @@ -533,74 +527,77 @@ func (mgb *MachineGroupBy) Aggregate(fns ...AggregateFunc) *MachineGroupBy { return mgb } -// Scan applies the group-by query and scans the result into the given value. +// Scan applies the selector query and scans the result into the given value. func (mgb *MachineGroupBy) Scan(ctx context.Context, v any) error { - query, err := mgb.path(ctx) - if err != nil { + ctx = setContextOp(ctx, mgb.build.ctx, "GroupBy") + if err := mgb.build.prepareQuery(ctx); err != nil { return err } - mgb.sql = query - return mgb.sqlScan(ctx, v) + return scanWithInterceptors[*MachineQuery, *MachineGroupBy](ctx, mgb.build, mgb, mgb.build.inters, v) } -func (mgb *MachineGroupBy) sqlScan(ctx context.Context, v any) error { - for _, f := range mgb.fields { - if !machine.ValidColumn(f) { - return &ValidationError{Name: f, err: fmt.Errorf("invalid field %q for group-by", f)} +func (mgb *MachineGroupBy) sqlScan(ctx context.Context, root *MachineQuery, v any) error { + selector := root.sqlQuery(ctx).Select() + aggregation := make([]string, 0, len(mgb.fns)) + for _, fn := range mgb.fns { + aggregation = append(aggregation, fn(selector)) + } + if len(selector.SelectedColumns()) == 0 { + columns := make([]string, 0, len(*mgb.flds)+len(mgb.fns)) + for _, f := range *mgb.flds { + columns = append(columns, selector.C(f)) } + columns = append(columns, aggregation...) + selector.Select(columns...) } - selector := mgb.sqlQuery() + selector.GroupBy(selector.Columns(*mgb.flds...)...) if err := selector.Err(); err != nil { return err } rows := &sql.Rows{} query, args := selector.Query() - if err := mgb.driver.Query(ctx, query, args, rows); err != nil { + if err := mgb.build.driver.Query(ctx, query, args, rows); err != nil { return err } defer rows.Close() return sql.ScanSlice(rows, v) } -func (mgb *MachineGroupBy) sqlQuery() *sql.Selector { - selector := mgb.sql.Select() - aggregation := make([]string, 0, len(mgb.fns)) - for _, fn := range mgb.fns { - aggregation = append(aggregation, fn(selector)) - } - // If no columns were selected in a custom aggregation function, the default - // selection is the fields used for "group-by", and the aggregation functions. - if len(selector.SelectedColumns()) == 0 { - columns := make([]string, 0, len(mgb.fields)+len(mgb.fns)) - for _, f := range mgb.fields { - columns = append(columns, selector.C(f)) - } - columns = append(columns, aggregation...) - selector.Select(columns...) - } - return selector.GroupBy(selector.Columns(mgb.fields...)...) -} - // MachineSelect is the builder for selecting fields of Machine entities. type MachineSelect struct { *MachineQuery selector - // intermediate query (i.e. traversal path). - sql *sql.Selector +} + +// Aggregate adds the given aggregation functions to the selector query. +func (ms *MachineSelect) Aggregate(fns ...AggregateFunc) *MachineSelect { + ms.fns = append(ms.fns, fns...) + return ms } // Scan applies the selector query and scans the result into the given value. func (ms *MachineSelect) Scan(ctx context.Context, v any) error { + ctx = setContextOp(ctx, ms.ctx, "Select") if err := ms.prepareQuery(ctx); err != nil { return err } - ms.sql = ms.MachineQuery.sqlQuery(ctx) - return ms.sqlScan(ctx, v) + return scanWithInterceptors[*MachineQuery, *MachineSelect](ctx, ms.MachineQuery, ms, ms.inters, v) } -func (ms *MachineSelect) sqlScan(ctx context.Context, v any) error { +func (ms *MachineSelect) sqlScan(ctx context.Context, root *MachineQuery, v any) error { + selector := root.sqlQuery(ctx) + aggregation := make([]string, 0, len(ms.fns)) + for _, fn := range ms.fns { + aggregation = append(aggregation, fn(selector)) + } + switch n := len(*ms.selector.flds); { + case n == 0 && len(aggregation) > 0: + selector.Select(aggregation...) + case n != 0 && len(aggregation) > 0: + selector.AppendSelect(aggregation...) + } rows := &sql.Rows{} - query, args := ms.sql.Query() + query, args := selector.Query() if err := ms.driver.Query(ctx, query, args, rows); err != nil { return err } diff --git a/pkg/database/ent/machine_update.go b/pkg/database/ent/machine_update.go index de9f8d12460..eb517081174 100644 --- a/pkg/database/ent/machine_update.go +++ b/pkg/database/ent/machine_update.go @@ -226,41 +226,8 @@ func (mu *MachineUpdate) RemoveAlerts(a ...*Alert) *MachineUpdate { // Save executes the query and returns the number of nodes affected by the update operation. func (mu *MachineUpdate) Save(ctx context.Context) (int, error) { - var ( - err error - affected int - ) mu.defaults() - if len(mu.hooks) == 0 { - if err = mu.check(); err != nil { - return 0, err - } - affected, err = mu.sqlSave(ctx) - } else { - var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { - mutation, ok := m.(*MachineMutation) - if !ok { - return nil, fmt.Errorf("unexpected mutation type %T", m) - } - if err = mu.check(); err != nil { - return 0, err - } - mu.mutation = mutation - affected, err = mu.sqlSave(ctx) - mutation.done = true - return affected, err - }) - for i := len(mu.hooks) - 1; i >= 0; i-- { - if mu.hooks[i] == nil { - return 0, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)") - } - mut = mu.hooks[i](mut) - } - if _, err := mut.Mutate(ctx, mu.mutation); err != nil { - return 0, err - } - } - return affected, err + return withHooks(ctx, mu.sqlSave, mu.mutation, mu.hooks) } // SaveX is like Save, but panics if an error occurs. @@ -316,16 +283,10 @@ func (mu *MachineUpdate) check() error { } func (mu *MachineUpdate) sqlSave(ctx context.Context) (n int, err error) { - _spec := &sqlgraph.UpdateSpec{ - Node: &sqlgraph.NodeSpec{ - Table: machine.Table, - Columns: machine.Columns, - ID: &sqlgraph.FieldSpec{ - Type: field.TypeInt, - Column: machine.FieldID, - }, - }, + if err := mu.check(); err != nil { + return n, err } + _spec := sqlgraph.NewUpdateSpec(machine.Table, machine.Columns, sqlgraph.NewFieldSpec(machine.FieldID, field.TypeInt)) if ps := mu.mutation.predicates; len(ps) > 0 { _spec.Predicate = func(selector *sql.Selector) { for i := range ps { @@ -334,130 +295,61 @@ func (mu *MachineUpdate) sqlSave(ctx context.Context) (n int, err error) { } } if value, ok := mu.mutation.CreatedAt(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeTime, - Value: value, - Column: machine.FieldCreatedAt, - }) + _spec.SetField(machine.FieldCreatedAt, field.TypeTime, value) } if mu.mutation.CreatedAtCleared() { - _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ - Type: field.TypeTime, - Column: machine.FieldCreatedAt, - }) + _spec.ClearField(machine.FieldCreatedAt, field.TypeTime) } if value, ok := mu.mutation.UpdatedAt(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeTime, - Value: value, - Column: machine.FieldUpdatedAt, - }) + _spec.SetField(machine.FieldUpdatedAt, field.TypeTime, value) } if mu.mutation.UpdatedAtCleared() { - _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ - Type: field.TypeTime, - Column: machine.FieldUpdatedAt, - }) + _spec.ClearField(machine.FieldUpdatedAt, field.TypeTime) } if value, ok := mu.mutation.LastPush(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeTime, - Value: value, - Column: machine.FieldLastPush, - }) + _spec.SetField(machine.FieldLastPush, field.TypeTime, value) } if mu.mutation.LastPushCleared() { - _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ - Type: field.TypeTime, - Column: machine.FieldLastPush, - }) + _spec.ClearField(machine.FieldLastPush, field.TypeTime) } if value, ok := mu.mutation.LastHeartbeat(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeTime, - Value: value, - Column: machine.FieldLastHeartbeat, - }) + _spec.SetField(machine.FieldLastHeartbeat, field.TypeTime, value) } if mu.mutation.LastHeartbeatCleared() { - _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ - Type: field.TypeTime, - Column: machine.FieldLastHeartbeat, - }) + _spec.ClearField(machine.FieldLastHeartbeat, field.TypeTime) } if value, ok := mu.mutation.MachineId(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: machine.FieldMachineId, - }) + _spec.SetField(machine.FieldMachineId, field.TypeString, value) } if value, ok := mu.mutation.Password(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: machine.FieldPassword, - }) + _spec.SetField(machine.FieldPassword, field.TypeString, value) } if value, ok := mu.mutation.IpAddress(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: machine.FieldIpAddress, - }) + _spec.SetField(machine.FieldIpAddress, field.TypeString, value) } if value, ok := mu.mutation.Scenarios(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: machine.FieldScenarios, - }) + _spec.SetField(machine.FieldScenarios, field.TypeString, value) } if mu.mutation.ScenariosCleared() { - _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Column: machine.FieldScenarios, - }) + _spec.ClearField(machine.FieldScenarios, field.TypeString) } if value, ok := mu.mutation.Version(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: machine.FieldVersion, - }) + _spec.SetField(machine.FieldVersion, field.TypeString, value) } if mu.mutation.VersionCleared() { - _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Column: machine.FieldVersion, - }) + _spec.ClearField(machine.FieldVersion, field.TypeString) } if value, ok := mu.mutation.IsValidated(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeBool, - Value: value, - Column: machine.FieldIsValidated, - }) + _spec.SetField(machine.FieldIsValidated, field.TypeBool, value) } if value, ok := mu.mutation.Status(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: machine.FieldStatus, - }) + _spec.SetField(machine.FieldStatus, field.TypeString, value) } if mu.mutation.StatusCleared() { - _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Column: machine.FieldStatus, - }) + _spec.ClearField(machine.FieldStatus, field.TypeString) } if value, ok := mu.mutation.AuthType(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: machine.FieldAuthType, - }) + _spec.SetField(machine.FieldAuthType, field.TypeString, value) } if mu.mutation.AlertsCleared() { edge := &sqlgraph.EdgeSpec{ @@ -467,10 +359,7 @@ func (mu *MachineUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{machine.AlertsColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeInt, - Column: alert.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(alert.FieldID, field.TypeInt), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -483,10 +372,7 @@ func (mu *MachineUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{machine.AlertsColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeInt, - Column: alert.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(alert.FieldID, field.TypeInt), }, } for _, k := range nodes { @@ -502,10 +388,7 @@ func (mu *MachineUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{machine.AlertsColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeInt, - Column: alert.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(alert.FieldID, field.TypeInt), }, } for _, k := range nodes { @@ -521,6 +404,7 @@ func (mu *MachineUpdate) sqlSave(ctx context.Context) (n int, err error) { } return 0, err } + mu.mutation.done = true return n, nil } @@ -727,6 +611,12 @@ func (muo *MachineUpdateOne) RemoveAlerts(a ...*Alert) *MachineUpdateOne { return muo.RemoveAlertIDs(ids...) } +// Where appends a list predicates to the MachineUpdate builder. +func (muo *MachineUpdateOne) Where(ps ...predicate.Machine) *MachineUpdateOne { + muo.mutation.Where(ps...) + return muo +} + // Select allows selecting one or more fields (columns) of the returned entity. // The default is selecting all fields defined in the entity schema. func (muo *MachineUpdateOne) Select(field string, fields ...string) *MachineUpdateOne { @@ -736,47 +626,8 @@ func (muo *MachineUpdateOne) Select(field string, fields ...string) *MachineUpda // Save executes the query and returns the updated Machine entity. func (muo *MachineUpdateOne) Save(ctx context.Context) (*Machine, error) { - var ( - err error - node *Machine - ) muo.defaults() - if len(muo.hooks) == 0 { - if err = muo.check(); err != nil { - return nil, err - } - node, err = muo.sqlSave(ctx) - } else { - var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { - mutation, ok := m.(*MachineMutation) - if !ok { - return nil, fmt.Errorf("unexpected mutation type %T", m) - } - if err = muo.check(); err != nil { - return nil, err - } - muo.mutation = mutation - node, err = muo.sqlSave(ctx) - mutation.done = true - return node, err - }) - for i := len(muo.hooks) - 1; i >= 0; i-- { - if muo.hooks[i] == nil { - return nil, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)") - } - mut = muo.hooks[i](mut) - } - v, err := mut.Mutate(ctx, muo.mutation) - if err != nil { - return nil, err - } - nv, ok := v.(*Machine) - if !ok { - return nil, fmt.Errorf("unexpected node type %T returned from MachineMutation", v) - } - node = nv - } - return node, err + return withHooks(ctx, muo.sqlSave, muo.mutation, muo.hooks) } // SaveX is like Save, but panics if an error occurs. @@ -832,16 +683,10 @@ func (muo *MachineUpdateOne) check() error { } func (muo *MachineUpdateOne) sqlSave(ctx context.Context) (_node *Machine, err error) { - _spec := &sqlgraph.UpdateSpec{ - Node: &sqlgraph.NodeSpec{ - Table: machine.Table, - Columns: machine.Columns, - ID: &sqlgraph.FieldSpec{ - Type: field.TypeInt, - Column: machine.FieldID, - }, - }, + if err := muo.check(); err != nil { + return _node, err } + _spec := sqlgraph.NewUpdateSpec(machine.Table, machine.Columns, sqlgraph.NewFieldSpec(machine.FieldID, field.TypeInt)) id, ok := muo.mutation.ID() if !ok { return nil, &ValidationError{Name: "id", err: errors.New(`ent: missing "Machine.id" for update`)} @@ -867,130 +712,61 @@ func (muo *MachineUpdateOne) sqlSave(ctx context.Context) (_node *Machine, err e } } if value, ok := muo.mutation.CreatedAt(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeTime, - Value: value, - Column: machine.FieldCreatedAt, - }) + _spec.SetField(machine.FieldCreatedAt, field.TypeTime, value) } if muo.mutation.CreatedAtCleared() { - _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ - Type: field.TypeTime, - Column: machine.FieldCreatedAt, - }) + _spec.ClearField(machine.FieldCreatedAt, field.TypeTime) } if value, ok := muo.mutation.UpdatedAt(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeTime, - Value: value, - Column: machine.FieldUpdatedAt, - }) + _spec.SetField(machine.FieldUpdatedAt, field.TypeTime, value) } if muo.mutation.UpdatedAtCleared() { - _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ - Type: field.TypeTime, - Column: machine.FieldUpdatedAt, - }) + _spec.ClearField(machine.FieldUpdatedAt, field.TypeTime) } if value, ok := muo.mutation.LastPush(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeTime, - Value: value, - Column: machine.FieldLastPush, - }) + _spec.SetField(machine.FieldLastPush, field.TypeTime, value) } if muo.mutation.LastPushCleared() { - _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ - Type: field.TypeTime, - Column: machine.FieldLastPush, - }) + _spec.ClearField(machine.FieldLastPush, field.TypeTime) } if value, ok := muo.mutation.LastHeartbeat(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeTime, - Value: value, - Column: machine.FieldLastHeartbeat, - }) + _spec.SetField(machine.FieldLastHeartbeat, field.TypeTime, value) } if muo.mutation.LastHeartbeatCleared() { - _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ - Type: field.TypeTime, - Column: machine.FieldLastHeartbeat, - }) + _spec.ClearField(machine.FieldLastHeartbeat, field.TypeTime) } if value, ok := muo.mutation.MachineId(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: machine.FieldMachineId, - }) + _spec.SetField(machine.FieldMachineId, field.TypeString, value) } if value, ok := muo.mutation.Password(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: machine.FieldPassword, - }) + _spec.SetField(machine.FieldPassword, field.TypeString, value) } if value, ok := muo.mutation.IpAddress(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: machine.FieldIpAddress, - }) + _spec.SetField(machine.FieldIpAddress, field.TypeString, value) } if value, ok := muo.mutation.Scenarios(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: machine.FieldScenarios, - }) + _spec.SetField(machine.FieldScenarios, field.TypeString, value) } if muo.mutation.ScenariosCleared() { - _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Column: machine.FieldScenarios, - }) + _spec.ClearField(machine.FieldScenarios, field.TypeString) } if value, ok := muo.mutation.Version(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: machine.FieldVersion, - }) + _spec.SetField(machine.FieldVersion, field.TypeString, value) } if muo.mutation.VersionCleared() { - _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Column: machine.FieldVersion, - }) + _spec.ClearField(machine.FieldVersion, field.TypeString) } if value, ok := muo.mutation.IsValidated(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeBool, - Value: value, - Column: machine.FieldIsValidated, - }) + _spec.SetField(machine.FieldIsValidated, field.TypeBool, value) } if value, ok := muo.mutation.Status(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: machine.FieldStatus, - }) + _spec.SetField(machine.FieldStatus, field.TypeString, value) } if muo.mutation.StatusCleared() { - _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Column: machine.FieldStatus, - }) + _spec.ClearField(machine.FieldStatus, field.TypeString) } if value, ok := muo.mutation.AuthType(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: machine.FieldAuthType, - }) + _spec.SetField(machine.FieldAuthType, field.TypeString, value) } if muo.mutation.AlertsCleared() { edge := &sqlgraph.EdgeSpec{ @@ -1000,10 +776,7 @@ func (muo *MachineUpdateOne) sqlSave(ctx context.Context) (_node *Machine, err e Columns: []string{machine.AlertsColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeInt, - Column: alert.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(alert.FieldID, field.TypeInt), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -1016,10 +789,7 @@ func (muo *MachineUpdateOne) sqlSave(ctx context.Context) (_node *Machine, err e Columns: []string{machine.AlertsColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeInt, - Column: alert.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(alert.FieldID, field.TypeInt), }, } for _, k := range nodes { @@ -1035,10 +805,7 @@ func (muo *MachineUpdateOne) sqlSave(ctx context.Context) (_node *Machine, err e Columns: []string{machine.AlertsColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeInt, - Column: alert.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(alert.FieldID, field.TypeInt), }, } for _, k := range nodes { @@ -1057,5 +824,6 @@ func (muo *MachineUpdateOne) sqlSave(ctx context.Context) (_node *Machine, err e } return nil, err } + muo.mutation.done = true return _node, nil } diff --git a/pkg/database/ent/meta.go b/pkg/database/ent/meta.go index 660f1a4db73..cadc210937e 100644 --- a/pkg/database/ent/meta.go +++ b/pkg/database/ent/meta.go @@ -7,6 +7,7 @@ import ( "strings" "time" + "entgo.io/ent" "entgo.io/ent/dialect/sql" "github.com/crowdsecurity/crowdsec/pkg/database/ent/alert" "github.com/crowdsecurity/crowdsec/pkg/database/ent/meta" @@ -29,7 +30,8 @@ type Meta struct { AlertMetas int `json:"alert_metas,omitempty"` // Edges holds the relations/edges for other nodes in the graph. // The values are being populated by the MetaQuery when eager-loading is set. - Edges MetaEdges `json:"edges"` + Edges MetaEdges `json:"edges"` + selectValues sql.SelectValues } // MetaEdges holds the relations/edges for other nodes in the graph. @@ -66,7 +68,7 @@ func (*Meta) scanValues(columns []string) ([]any, error) { case meta.FieldCreatedAt, meta.FieldUpdatedAt: values[i] = new(sql.NullTime) default: - return nil, fmt.Errorf("unexpected column %q for type Meta", columns[i]) + values[i] = new(sql.UnknownType) } } return values, nil @@ -118,21 +120,29 @@ func (m *Meta) assignValues(columns []string, values []any) error { } else if value.Valid { m.AlertMetas = int(value.Int64) } + default: + m.selectValues.Set(columns[i], values[i]) } } return nil } +// GetValue returns the ent.Value that was dynamically selected and assigned to the Meta. +// This includes values selected through modifiers, order, etc. +func (m *Meta) GetValue(name string) (ent.Value, error) { + return m.selectValues.Get(name) +} + // QueryOwner queries the "owner" edge of the Meta entity. func (m *Meta) QueryOwner() *AlertQuery { - return (&MetaClient{config: m.config}).QueryOwner(m) + return NewMetaClient(m.config).QueryOwner(m) } // Update returns a builder for updating this Meta. // Note that you need to call Meta.Unwrap() before calling this method if this Meta // was returned from a transaction, and the transaction was committed or rolled back. func (m *Meta) Update() *MetaUpdateOne { - return (&MetaClient{config: m.config}).UpdateOne(m) + return NewMetaClient(m.config).UpdateOne(m) } // Unwrap unwraps the Meta entity that was returned from a transaction after it was closed, @@ -175,9 +185,3 @@ func (m *Meta) String() string { // MetaSlice is a parsable slice of Meta. type MetaSlice []*Meta - -func (m MetaSlice) config(cfg config) { - for _i := range m { - m[_i].config = cfg - } -} diff --git a/pkg/database/ent/meta/meta.go b/pkg/database/ent/meta/meta.go index 6d10f258919..583496fb710 100644 --- a/pkg/database/ent/meta/meta.go +++ b/pkg/database/ent/meta/meta.go @@ -4,6 +4,9 @@ package meta import ( "time" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" ) const ( @@ -66,3 +69,50 @@ var ( // ValueValidator is a validator for the "value" field. It is called by the builders before save. ValueValidator func(string) error ) + +// OrderOption defines the ordering options for the Meta queries. +type OrderOption func(*sql.Selector) + +// ByID orders the results by the id field. +func ByID(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldID, opts...).ToFunc() +} + +// ByCreatedAt orders the results by the created_at field. +func ByCreatedAt(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldCreatedAt, opts...).ToFunc() +} + +// ByUpdatedAt orders the results by the updated_at field. +func ByUpdatedAt(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldUpdatedAt, opts...).ToFunc() +} + +// ByKey orders the results by the key field. +func ByKey(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldKey, opts...).ToFunc() +} + +// ByValue orders the results by the value field. +func ByValue(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldValue, opts...).ToFunc() +} + +// ByAlertMetas orders the results by the alert_metas field. +func ByAlertMetas(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldAlertMetas, opts...).ToFunc() +} + +// ByOwnerField orders the results by owner field. +func ByOwnerField(field string, opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newOwnerStep(), sql.OrderByField(field, opts...)) + } +} +func newOwnerStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(OwnerInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.M2O, true, OwnerTable, OwnerColumn), + ) +} diff --git a/pkg/database/ent/meta/where.go b/pkg/database/ent/meta/where.go index 479792fd4a6..7fc99136972 100644 --- a/pkg/database/ent/meta/where.go +++ b/pkg/database/ent/meta/where.go @@ -12,512 +12,332 @@ import ( // ID filters vertices based on their ID field. func ID(id int) predicate.Meta { - return predicate.Meta(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldID), id)) - }) + return predicate.Meta(sql.FieldEQ(FieldID, id)) } // IDEQ applies the EQ predicate on the ID field. func IDEQ(id int) predicate.Meta { - return predicate.Meta(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldID), id)) - }) + return predicate.Meta(sql.FieldEQ(FieldID, id)) } // IDNEQ applies the NEQ predicate on the ID field. func IDNEQ(id int) predicate.Meta { - return predicate.Meta(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldID), id)) - }) + return predicate.Meta(sql.FieldNEQ(FieldID, id)) } // IDIn applies the In predicate on the ID field. func IDIn(ids ...int) predicate.Meta { - return predicate.Meta(func(s *sql.Selector) { - v := make([]any, len(ids)) - for i := range v { - v[i] = ids[i] - } - s.Where(sql.In(s.C(FieldID), v...)) - }) + return predicate.Meta(sql.FieldIn(FieldID, ids...)) } // IDNotIn applies the NotIn predicate on the ID field. func IDNotIn(ids ...int) predicate.Meta { - return predicate.Meta(func(s *sql.Selector) { - v := make([]any, len(ids)) - for i := range v { - v[i] = ids[i] - } - s.Where(sql.NotIn(s.C(FieldID), v...)) - }) + return predicate.Meta(sql.FieldNotIn(FieldID, ids...)) } // IDGT applies the GT predicate on the ID field. func IDGT(id int) predicate.Meta { - return predicate.Meta(func(s *sql.Selector) { - s.Where(sql.GT(s.C(FieldID), id)) - }) + return predicate.Meta(sql.FieldGT(FieldID, id)) } // IDGTE applies the GTE predicate on the ID field. func IDGTE(id int) predicate.Meta { - return predicate.Meta(func(s *sql.Selector) { - s.Where(sql.GTE(s.C(FieldID), id)) - }) + return predicate.Meta(sql.FieldGTE(FieldID, id)) } // IDLT applies the LT predicate on the ID field. func IDLT(id int) predicate.Meta { - return predicate.Meta(func(s *sql.Selector) { - s.Where(sql.LT(s.C(FieldID), id)) - }) + return predicate.Meta(sql.FieldLT(FieldID, id)) } // IDLTE applies the LTE predicate on the ID field. func IDLTE(id int) predicate.Meta { - return predicate.Meta(func(s *sql.Selector) { - s.Where(sql.LTE(s.C(FieldID), id)) - }) + return predicate.Meta(sql.FieldLTE(FieldID, id)) } // CreatedAt applies equality check predicate on the "created_at" field. It's identical to CreatedAtEQ. func CreatedAt(v time.Time) predicate.Meta { - return predicate.Meta(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldCreatedAt), v)) - }) + return predicate.Meta(sql.FieldEQ(FieldCreatedAt, v)) } // UpdatedAt applies equality check predicate on the "updated_at" field. It's identical to UpdatedAtEQ. func UpdatedAt(v time.Time) predicate.Meta { - return predicate.Meta(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldUpdatedAt), v)) - }) + return predicate.Meta(sql.FieldEQ(FieldUpdatedAt, v)) } // Key applies equality check predicate on the "key" field. It's identical to KeyEQ. func Key(v string) predicate.Meta { - return predicate.Meta(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldKey), v)) - }) + return predicate.Meta(sql.FieldEQ(FieldKey, v)) } // Value applies equality check predicate on the "value" field. It's identical to ValueEQ. func Value(v string) predicate.Meta { - return predicate.Meta(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldValue), v)) - }) + return predicate.Meta(sql.FieldEQ(FieldValue, v)) } // AlertMetas applies equality check predicate on the "alert_metas" field. It's identical to AlertMetasEQ. func AlertMetas(v int) predicate.Meta { - return predicate.Meta(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldAlertMetas), v)) - }) + return predicate.Meta(sql.FieldEQ(FieldAlertMetas, v)) } // CreatedAtEQ applies the EQ predicate on the "created_at" field. func CreatedAtEQ(v time.Time) predicate.Meta { - return predicate.Meta(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldCreatedAt), v)) - }) + return predicate.Meta(sql.FieldEQ(FieldCreatedAt, v)) } // CreatedAtNEQ applies the NEQ predicate on the "created_at" field. func CreatedAtNEQ(v time.Time) predicate.Meta { - return predicate.Meta(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldCreatedAt), v)) - }) + return predicate.Meta(sql.FieldNEQ(FieldCreatedAt, v)) } // CreatedAtIn applies the In predicate on the "created_at" field. func CreatedAtIn(vs ...time.Time) predicate.Meta { - v := make([]any, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Meta(func(s *sql.Selector) { - s.Where(sql.In(s.C(FieldCreatedAt), v...)) - }) + return predicate.Meta(sql.FieldIn(FieldCreatedAt, vs...)) } // CreatedAtNotIn applies the NotIn predicate on the "created_at" field. func CreatedAtNotIn(vs ...time.Time) predicate.Meta { - v := make([]any, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Meta(func(s *sql.Selector) { - s.Where(sql.NotIn(s.C(FieldCreatedAt), v...)) - }) + return predicate.Meta(sql.FieldNotIn(FieldCreatedAt, vs...)) } // CreatedAtGT applies the GT predicate on the "created_at" field. func CreatedAtGT(v time.Time) predicate.Meta { - return predicate.Meta(func(s *sql.Selector) { - s.Where(sql.GT(s.C(FieldCreatedAt), v)) - }) + return predicate.Meta(sql.FieldGT(FieldCreatedAt, v)) } // CreatedAtGTE applies the GTE predicate on the "created_at" field. func CreatedAtGTE(v time.Time) predicate.Meta { - return predicate.Meta(func(s *sql.Selector) { - s.Where(sql.GTE(s.C(FieldCreatedAt), v)) - }) + return predicate.Meta(sql.FieldGTE(FieldCreatedAt, v)) } // CreatedAtLT applies the LT predicate on the "created_at" field. func CreatedAtLT(v time.Time) predicate.Meta { - return predicate.Meta(func(s *sql.Selector) { - s.Where(sql.LT(s.C(FieldCreatedAt), v)) - }) + return predicate.Meta(sql.FieldLT(FieldCreatedAt, v)) } // CreatedAtLTE applies the LTE predicate on the "created_at" field. func CreatedAtLTE(v time.Time) predicate.Meta { - return predicate.Meta(func(s *sql.Selector) { - s.Where(sql.LTE(s.C(FieldCreatedAt), v)) - }) + return predicate.Meta(sql.FieldLTE(FieldCreatedAt, v)) } // CreatedAtIsNil applies the IsNil predicate on the "created_at" field. func CreatedAtIsNil() predicate.Meta { - return predicate.Meta(func(s *sql.Selector) { - s.Where(sql.IsNull(s.C(FieldCreatedAt))) - }) + return predicate.Meta(sql.FieldIsNull(FieldCreatedAt)) } // CreatedAtNotNil applies the NotNil predicate on the "created_at" field. func CreatedAtNotNil() predicate.Meta { - return predicate.Meta(func(s *sql.Selector) { - s.Where(sql.NotNull(s.C(FieldCreatedAt))) - }) + return predicate.Meta(sql.FieldNotNull(FieldCreatedAt)) } // UpdatedAtEQ applies the EQ predicate on the "updated_at" field. func UpdatedAtEQ(v time.Time) predicate.Meta { - return predicate.Meta(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldUpdatedAt), v)) - }) + return predicate.Meta(sql.FieldEQ(FieldUpdatedAt, v)) } // UpdatedAtNEQ applies the NEQ predicate on the "updated_at" field. func UpdatedAtNEQ(v time.Time) predicate.Meta { - return predicate.Meta(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldUpdatedAt), v)) - }) + return predicate.Meta(sql.FieldNEQ(FieldUpdatedAt, v)) } // UpdatedAtIn applies the In predicate on the "updated_at" field. func UpdatedAtIn(vs ...time.Time) predicate.Meta { - v := make([]any, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Meta(func(s *sql.Selector) { - s.Where(sql.In(s.C(FieldUpdatedAt), v...)) - }) + return predicate.Meta(sql.FieldIn(FieldUpdatedAt, vs...)) } // UpdatedAtNotIn applies the NotIn predicate on the "updated_at" field. func UpdatedAtNotIn(vs ...time.Time) predicate.Meta { - v := make([]any, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Meta(func(s *sql.Selector) { - s.Where(sql.NotIn(s.C(FieldUpdatedAt), v...)) - }) + return predicate.Meta(sql.FieldNotIn(FieldUpdatedAt, vs...)) } // UpdatedAtGT applies the GT predicate on the "updated_at" field. func UpdatedAtGT(v time.Time) predicate.Meta { - return predicate.Meta(func(s *sql.Selector) { - s.Where(sql.GT(s.C(FieldUpdatedAt), v)) - }) + return predicate.Meta(sql.FieldGT(FieldUpdatedAt, v)) } // UpdatedAtGTE applies the GTE predicate on the "updated_at" field. func UpdatedAtGTE(v time.Time) predicate.Meta { - return predicate.Meta(func(s *sql.Selector) { - s.Where(sql.GTE(s.C(FieldUpdatedAt), v)) - }) + return predicate.Meta(sql.FieldGTE(FieldUpdatedAt, v)) } // UpdatedAtLT applies the LT predicate on the "updated_at" field. func UpdatedAtLT(v time.Time) predicate.Meta { - return predicate.Meta(func(s *sql.Selector) { - s.Where(sql.LT(s.C(FieldUpdatedAt), v)) - }) + return predicate.Meta(sql.FieldLT(FieldUpdatedAt, v)) } // UpdatedAtLTE applies the LTE predicate on the "updated_at" field. func UpdatedAtLTE(v time.Time) predicate.Meta { - return predicate.Meta(func(s *sql.Selector) { - s.Where(sql.LTE(s.C(FieldUpdatedAt), v)) - }) + return predicate.Meta(sql.FieldLTE(FieldUpdatedAt, v)) } // UpdatedAtIsNil applies the IsNil predicate on the "updated_at" field. func UpdatedAtIsNil() predicate.Meta { - return predicate.Meta(func(s *sql.Selector) { - s.Where(sql.IsNull(s.C(FieldUpdatedAt))) - }) + return predicate.Meta(sql.FieldIsNull(FieldUpdatedAt)) } // UpdatedAtNotNil applies the NotNil predicate on the "updated_at" field. func UpdatedAtNotNil() predicate.Meta { - return predicate.Meta(func(s *sql.Selector) { - s.Where(sql.NotNull(s.C(FieldUpdatedAt))) - }) + return predicate.Meta(sql.FieldNotNull(FieldUpdatedAt)) } // KeyEQ applies the EQ predicate on the "key" field. func KeyEQ(v string) predicate.Meta { - return predicate.Meta(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldKey), v)) - }) + return predicate.Meta(sql.FieldEQ(FieldKey, v)) } // KeyNEQ applies the NEQ predicate on the "key" field. func KeyNEQ(v string) predicate.Meta { - return predicate.Meta(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldKey), v)) - }) + return predicate.Meta(sql.FieldNEQ(FieldKey, v)) } // KeyIn applies the In predicate on the "key" field. func KeyIn(vs ...string) predicate.Meta { - v := make([]any, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Meta(func(s *sql.Selector) { - s.Where(sql.In(s.C(FieldKey), v...)) - }) + return predicate.Meta(sql.FieldIn(FieldKey, vs...)) } // KeyNotIn applies the NotIn predicate on the "key" field. func KeyNotIn(vs ...string) predicate.Meta { - v := make([]any, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Meta(func(s *sql.Selector) { - s.Where(sql.NotIn(s.C(FieldKey), v...)) - }) + return predicate.Meta(sql.FieldNotIn(FieldKey, vs...)) } // KeyGT applies the GT predicate on the "key" field. func KeyGT(v string) predicate.Meta { - return predicate.Meta(func(s *sql.Selector) { - s.Where(sql.GT(s.C(FieldKey), v)) - }) + return predicate.Meta(sql.FieldGT(FieldKey, v)) } // KeyGTE applies the GTE predicate on the "key" field. func KeyGTE(v string) predicate.Meta { - return predicate.Meta(func(s *sql.Selector) { - s.Where(sql.GTE(s.C(FieldKey), v)) - }) + return predicate.Meta(sql.FieldGTE(FieldKey, v)) } // KeyLT applies the LT predicate on the "key" field. func KeyLT(v string) predicate.Meta { - return predicate.Meta(func(s *sql.Selector) { - s.Where(sql.LT(s.C(FieldKey), v)) - }) + return predicate.Meta(sql.FieldLT(FieldKey, v)) } // KeyLTE applies the LTE predicate on the "key" field. func KeyLTE(v string) predicate.Meta { - return predicate.Meta(func(s *sql.Selector) { - s.Where(sql.LTE(s.C(FieldKey), v)) - }) + return predicate.Meta(sql.FieldLTE(FieldKey, v)) } // KeyContains applies the Contains predicate on the "key" field. func KeyContains(v string) predicate.Meta { - return predicate.Meta(func(s *sql.Selector) { - s.Where(sql.Contains(s.C(FieldKey), v)) - }) + return predicate.Meta(sql.FieldContains(FieldKey, v)) } // KeyHasPrefix applies the HasPrefix predicate on the "key" field. func KeyHasPrefix(v string) predicate.Meta { - return predicate.Meta(func(s *sql.Selector) { - s.Where(sql.HasPrefix(s.C(FieldKey), v)) - }) + return predicate.Meta(sql.FieldHasPrefix(FieldKey, v)) } // KeyHasSuffix applies the HasSuffix predicate on the "key" field. func KeyHasSuffix(v string) predicate.Meta { - return predicate.Meta(func(s *sql.Selector) { - s.Where(sql.HasSuffix(s.C(FieldKey), v)) - }) + return predicate.Meta(sql.FieldHasSuffix(FieldKey, v)) } // KeyEqualFold applies the EqualFold predicate on the "key" field. func KeyEqualFold(v string) predicate.Meta { - return predicate.Meta(func(s *sql.Selector) { - s.Where(sql.EqualFold(s.C(FieldKey), v)) - }) + return predicate.Meta(sql.FieldEqualFold(FieldKey, v)) } // KeyContainsFold applies the ContainsFold predicate on the "key" field. func KeyContainsFold(v string) predicate.Meta { - return predicate.Meta(func(s *sql.Selector) { - s.Where(sql.ContainsFold(s.C(FieldKey), v)) - }) + return predicate.Meta(sql.FieldContainsFold(FieldKey, v)) } // ValueEQ applies the EQ predicate on the "value" field. func ValueEQ(v string) predicate.Meta { - return predicate.Meta(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldValue), v)) - }) + return predicate.Meta(sql.FieldEQ(FieldValue, v)) } // ValueNEQ applies the NEQ predicate on the "value" field. func ValueNEQ(v string) predicate.Meta { - return predicate.Meta(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldValue), v)) - }) + return predicate.Meta(sql.FieldNEQ(FieldValue, v)) } // ValueIn applies the In predicate on the "value" field. func ValueIn(vs ...string) predicate.Meta { - v := make([]any, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Meta(func(s *sql.Selector) { - s.Where(sql.In(s.C(FieldValue), v...)) - }) + return predicate.Meta(sql.FieldIn(FieldValue, vs...)) } // ValueNotIn applies the NotIn predicate on the "value" field. func ValueNotIn(vs ...string) predicate.Meta { - v := make([]any, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Meta(func(s *sql.Selector) { - s.Where(sql.NotIn(s.C(FieldValue), v...)) - }) + return predicate.Meta(sql.FieldNotIn(FieldValue, vs...)) } // ValueGT applies the GT predicate on the "value" field. func ValueGT(v string) predicate.Meta { - return predicate.Meta(func(s *sql.Selector) { - s.Where(sql.GT(s.C(FieldValue), v)) - }) + return predicate.Meta(sql.FieldGT(FieldValue, v)) } // ValueGTE applies the GTE predicate on the "value" field. func ValueGTE(v string) predicate.Meta { - return predicate.Meta(func(s *sql.Selector) { - s.Where(sql.GTE(s.C(FieldValue), v)) - }) + return predicate.Meta(sql.FieldGTE(FieldValue, v)) } // ValueLT applies the LT predicate on the "value" field. func ValueLT(v string) predicate.Meta { - return predicate.Meta(func(s *sql.Selector) { - s.Where(sql.LT(s.C(FieldValue), v)) - }) + return predicate.Meta(sql.FieldLT(FieldValue, v)) } // ValueLTE applies the LTE predicate on the "value" field. func ValueLTE(v string) predicate.Meta { - return predicate.Meta(func(s *sql.Selector) { - s.Where(sql.LTE(s.C(FieldValue), v)) - }) + return predicate.Meta(sql.FieldLTE(FieldValue, v)) } // ValueContains applies the Contains predicate on the "value" field. func ValueContains(v string) predicate.Meta { - return predicate.Meta(func(s *sql.Selector) { - s.Where(sql.Contains(s.C(FieldValue), v)) - }) + return predicate.Meta(sql.FieldContains(FieldValue, v)) } // ValueHasPrefix applies the HasPrefix predicate on the "value" field. func ValueHasPrefix(v string) predicate.Meta { - return predicate.Meta(func(s *sql.Selector) { - s.Where(sql.HasPrefix(s.C(FieldValue), v)) - }) + return predicate.Meta(sql.FieldHasPrefix(FieldValue, v)) } // ValueHasSuffix applies the HasSuffix predicate on the "value" field. func ValueHasSuffix(v string) predicate.Meta { - return predicate.Meta(func(s *sql.Selector) { - s.Where(sql.HasSuffix(s.C(FieldValue), v)) - }) + return predicate.Meta(sql.FieldHasSuffix(FieldValue, v)) } // ValueEqualFold applies the EqualFold predicate on the "value" field. func ValueEqualFold(v string) predicate.Meta { - return predicate.Meta(func(s *sql.Selector) { - s.Where(sql.EqualFold(s.C(FieldValue), v)) - }) + return predicate.Meta(sql.FieldEqualFold(FieldValue, v)) } // ValueContainsFold applies the ContainsFold predicate on the "value" field. func ValueContainsFold(v string) predicate.Meta { - return predicate.Meta(func(s *sql.Selector) { - s.Where(sql.ContainsFold(s.C(FieldValue), v)) - }) + return predicate.Meta(sql.FieldContainsFold(FieldValue, v)) } // AlertMetasEQ applies the EQ predicate on the "alert_metas" field. func AlertMetasEQ(v int) predicate.Meta { - return predicate.Meta(func(s *sql.Selector) { - s.Where(sql.EQ(s.C(FieldAlertMetas), v)) - }) + return predicate.Meta(sql.FieldEQ(FieldAlertMetas, v)) } // AlertMetasNEQ applies the NEQ predicate on the "alert_metas" field. func AlertMetasNEQ(v int) predicate.Meta { - return predicate.Meta(func(s *sql.Selector) { - s.Where(sql.NEQ(s.C(FieldAlertMetas), v)) - }) + return predicate.Meta(sql.FieldNEQ(FieldAlertMetas, v)) } // AlertMetasIn applies the In predicate on the "alert_metas" field. func AlertMetasIn(vs ...int) predicate.Meta { - v := make([]any, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Meta(func(s *sql.Selector) { - s.Where(sql.In(s.C(FieldAlertMetas), v...)) - }) + return predicate.Meta(sql.FieldIn(FieldAlertMetas, vs...)) } // AlertMetasNotIn applies the NotIn predicate on the "alert_metas" field. func AlertMetasNotIn(vs ...int) predicate.Meta { - v := make([]any, len(vs)) - for i := range v { - v[i] = vs[i] - } - return predicate.Meta(func(s *sql.Selector) { - s.Where(sql.NotIn(s.C(FieldAlertMetas), v...)) - }) + return predicate.Meta(sql.FieldNotIn(FieldAlertMetas, vs...)) } // AlertMetasIsNil applies the IsNil predicate on the "alert_metas" field. func AlertMetasIsNil() predicate.Meta { - return predicate.Meta(func(s *sql.Selector) { - s.Where(sql.IsNull(s.C(FieldAlertMetas))) - }) + return predicate.Meta(sql.FieldIsNull(FieldAlertMetas)) } // AlertMetasNotNil applies the NotNil predicate on the "alert_metas" field. func AlertMetasNotNil() predicate.Meta { - return predicate.Meta(func(s *sql.Selector) { - s.Where(sql.NotNull(s.C(FieldAlertMetas))) - }) + return predicate.Meta(sql.FieldNotNull(FieldAlertMetas)) } // HasOwner applies the HasEdge predicate on the "owner" edge. @@ -525,7 +345,6 @@ func HasOwner() predicate.Meta { return predicate.Meta(func(s *sql.Selector) { step := sqlgraph.NewStep( sqlgraph.From(Table, FieldID), - sqlgraph.To(OwnerTable, FieldID), sqlgraph.Edge(sqlgraph.M2O, true, OwnerTable, OwnerColumn), ) sqlgraph.HasNeighbors(s, step) @@ -535,11 +354,7 @@ func HasOwner() predicate.Meta { // HasOwnerWith applies the HasEdge predicate on the "owner" edge with a given conditions (other predicates). func HasOwnerWith(preds ...predicate.Alert) predicate.Meta { return predicate.Meta(func(s *sql.Selector) { - step := sqlgraph.NewStep( - sqlgraph.From(Table, FieldID), - sqlgraph.To(OwnerInverseTable, FieldID), - sqlgraph.Edge(sqlgraph.M2O, true, OwnerTable, OwnerColumn), - ) + step := newOwnerStep() sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { for _, p := range preds { p(s) @@ -550,32 +365,15 @@ func HasOwnerWith(preds ...predicate.Alert) predicate.Meta { // And groups predicates with the AND operator between them. func And(predicates ...predicate.Meta) predicate.Meta { - return predicate.Meta(func(s *sql.Selector) { - s1 := s.Clone().SetP(nil) - for _, p := range predicates { - p(s1) - } - s.Where(s1.P()) - }) + return predicate.Meta(sql.AndPredicates(predicates...)) } // Or groups predicates with the OR operator between them. func Or(predicates ...predicate.Meta) predicate.Meta { - return predicate.Meta(func(s *sql.Selector) { - s1 := s.Clone().SetP(nil) - for i, p := range predicates { - if i > 0 { - s1.Or() - } - p(s1) - } - s.Where(s1.P()) - }) + return predicate.Meta(sql.OrPredicates(predicates...)) } // Not applies the not operator on the given predicate. func Not(p predicate.Meta) predicate.Meta { - return predicate.Meta(func(s *sql.Selector) { - p(s.Not()) - }) + return predicate.Meta(sql.NotPredicates(p)) } diff --git a/pkg/database/ent/meta_create.go b/pkg/database/ent/meta_create.go index df4f6315911..3bf30f0def9 100644 --- a/pkg/database/ent/meta_create.go +++ b/pkg/database/ent/meta_create.go @@ -101,50 +101,8 @@ func (mc *MetaCreate) Mutation() *MetaMutation { // Save creates the Meta in the database. func (mc *MetaCreate) Save(ctx context.Context) (*Meta, error) { - var ( - err error - node *Meta - ) mc.defaults() - if len(mc.hooks) == 0 { - if err = mc.check(); err != nil { - return nil, err - } - node, err = mc.sqlSave(ctx) - } else { - var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { - mutation, ok := m.(*MetaMutation) - if !ok { - return nil, fmt.Errorf("unexpected mutation type %T", m) - } - if err = mc.check(); err != nil { - return nil, err - } - mc.mutation = mutation - if node, err = mc.sqlSave(ctx); err != nil { - return nil, err - } - mutation.id = &node.ID - mutation.done = true - return node, err - }) - for i := len(mc.hooks) - 1; i >= 0; i-- { - if mc.hooks[i] == nil { - return nil, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)") - } - mut = mc.hooks[i](mut) - } - v, err := mut.Mutate(ctx, mc.mutation) - if err != nil { - return nil, err - } - nv, ok := v.(*Meta) - if !ok { - return nil, fmt.Errorf("unexpected node type %T returned from MetaMutation", v) - } - node = nv - } - return node, err + return withHooks(ctx, mc.sqlSave, mc.mutation, mc.hooks) } // SaveX calls Save and panics if Save returns an error. @@ -198,6 +156,9 @@ func (mc *MetaCreate) check() error { } func (mc *MetaCreate) sqlSave(ctx context.Context) (*Meta, error) { + if err := mc.check(); err != nil { + return nil, err + } _node, _spec := mc.createSpec() if err := sqlgraph.CreateNode(ctx, mc.driver, _spec); err != nil { if sqlgraph.IsConstraintError(err) { @@ -207,50 +168,30 @@ func (mc *MetaCreate) sqlSave(ctx context.Context) (*Meta, error) { } id := _spec.ID.Value.(int64) _node.ID = int(id) + mc.mutation.id = &_node.ID + mc.mutation.done = true return _node, nil } func (mc *MetaCreate) createSpec() (*Meta, *sqlgraph.CreateSpec) { var ( _node = &Meta{config: mc.config} - _spec = &sqlgraph.CreateSpec{ - Table: meta.Table, - ID: &sqlgraph.FieldSpec{ - Type: field.TypeInt, - Column: meta.FieldID, - }, - } + _spec = sqlgraph.NewCreateSpec(meta.Table, sqlgraph.NewFieldSpec(meta.FieldID, field.TypeInt)) ) if value, ok := mc.mutation.CreatedAt(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeTime, - Value: value, - Column: meta.FieldCreatedAt, - }) + _spec.SetField(meta.FieldCreatedAt, field.TypeTime, value) _node.CreatedAt = &value } if value, ok := mc.mutation.UpdatedAt(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeTime, - Value: value, - Column: meta.FieldUpdatedAt, - }) + _spec.SetField(meta.FieldUpdatedAt, field.TypeTime, value) _node.UpdatedAt = &value } if value, ok := mc.mutation.Key(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: meta.FieldKey, - }) + _spec.SetField(meta.FieldKey, field.TypeString, value) _node.Key = value } if value, ok := mc.mutation.Value(); ok { - _spec.Fields = append(_spec.Fields, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: meta.FieldValue, - }) + _spec.SetField(meta.FieldValue, field.TypeString, value) _node.Value = value } if nodes := mc.mutation.OwnerIDs(); len(nodes) > 0 { @@ -261,10 +202,7 @@ func (mc *MetaCreate) createSpec() (*Meta, *sqlgraph.CreateSpec) { Columns: []string{meta.OwnerColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeInt, - Column: alert.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(alert.FieldID, field.TypeInt), }, } for _, k := range nodes { @@ -279,11 +217,15 @@ func (mc *MetaCreate) createSpec() (*Meta, *sqlgraph.CreateSpec) { // MetaCreateBulk is the builder for creating many Meta entities in bulk. type MetaCreateBulk struct { config + err error builders []*MetaCreate } // Save creates the Meta entities in the database. func (mcb *MetaCreateBulk) Save(ctx context.Context) ([]*Meta, error) { + if mcb.err != nil { + return nil, mcb.err + } specs := make([]*sqlgraph.CreateSpec, len(mcb.builders)) nodes := make([]*Meta, len(mcb.builders)) mutators := make([]Mutator, len(mcb.builders)) @@ -300,8 +242,8 @@ func (mcb *MetaCreateBulk) Save(ctx context.Context) ([]*Meta, error) { return nil, err } builder.mutation = mutation - nodes[i], specs[i] = builder.createSpec() var err error + nodes[i], specs[i] = builder.createSpec() if i < len(mutators)-1 { _, err = mutators[i+1].Mutate(root, mcb.builders[i+1].mutation) } else { diff --git a/pkg/database/ent/meta_delete.go b/pkg/database/ent/meta_delete.go index e1e49d2acdc..ee25dd07eb9 100644 --- a/pkg/database/ent/meta_delete.go +++ b/pkg/database/ent/meta_delete.go @@ -4,7 +4,6 @@ package ent import ( "context" - "fmt" "entgo.io/ent/dialect/sql" "entgo.io/ent/dialect/sql/sqlgraph" @@ -28,34 +27,7 @@ func (md *MetaDelete) Where(ps ...predicate.Meta) *MetaDelete { // Exec executes the deletion query and returns how many vertices were deleted. func (md *MetaDelete) Exec(ctx context.Context) (int, error) { - var ( - err error - affected int - ) - if len(md.hooks) == 0 { - affected, err = md.sqlExec(ctx) - } else { - var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { - mutation, ok := m.(*MetaMutation) - if !ok { - return nil, fmt.Errorf("unexpected mutation type %T", m) - } - md.mutation = mutation - affected, err = md.sqlExec(ctx) - mutation.done = true - return affected, err - }) - for i := len(md.hooks) - 1; i >= 0; i-- { - if md.hooks[i] == nil { - return 0, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)") - } - mut = md.hooks[i](mut) - } - if _, err := mut.Mutate(ctx, md.mutation); err != nil { - return 0, err - } - } - return affected, err + return withHooks(ctx, md.sqlExec, md.mutation, md.hooks) } // ExecX is like Exec, but panics if an error occurs. @@ -68,15 +40,7 @@ func (md *MetaDelete) ExecX(ctx context.Context) int { } func (md *MetaDelete) sqlExec(ctx context.Context) (int, error) { - _spec := &sqlgraph.DeleteSpec{ - Node: &sqlgraph.NodeSpec{ - Table: meta.Table, - ID: &sqlgraph.FieldSpec{ - Type: field.TypeInt, - Column: meta.FieldID, - }, - }, - } + _spec := sqlgraph.NewDeleteSpec(meta.Table, sqlgraph.NewFieldSpec(meta.FieldID, field.TypeInt)) if ps := md.mutation.predicates; len(ps) > 0 { _spec.Predicate = func(selector *sql.Selector) { for i := range ps { @@ -88,6 +52,7 @@ func (md *MetaDelete) sqlExec(ctx context.Context) (int, error) { if err != nil && sqlgraph.IsConstraintError(err) { err = &ConstraintError{msg: err.Error(), wrap: err} } + md.mutation.done = true return affected, err } @@ -96,6 +61,12 @@ type MetaDeleteOne struct { md *MetaDelete } +// Where appends a list predicates to the MetaDelete builder. +func (mdo *MetaDeleteOne) Where(ps ...predicate.Meta) *MetaDeleteOne { + mdo.md.mutation.Where(ps...) + return mdo +} + // Exec executes the deletion query. func (mdo *MetaDeleteOne) Exec(ctx context.Context) error { n, err := mdo.md.Exec(ctx) @@ -111,5 +82,7 @@ func (mdo *MetaDeleteOne) Exec(ctx context.Context) error { // ExecX is like Exec, but panics if an error occurs. func (mdo *MetaDeleteOne) ExecX(ctx context.Context) { - mdo.md.ExecX(ctx) + if err := mdo.Exec(ctx); err != nil { + panic(err) + } } diff --git a/pkg/database/ent/meta_query.go b/pkg/database/ent/meta_query.go index d6fd4f3d522..87d91d09e0e 100644 --- a/pkg/database/ent/meta_query.go +++ b/pkg/database/ent/meta_query.go @@ -18,11 +18,9 @@ import ( // MetaQuery is the builder for querying Meta entities. type MetaQuery struct { config - limit *int - offset *int - unique *bool - order []OrderFunc - fields []string + ctx *QueryContext + order []meta.OrderOption + inters []Interceptor predicates []predicate.Meta withOwner *AlertQuery // intermediate query (i.e. traversal path). @@ -36,34 +34,34 @@ func (mq *MetaQuery) Where(ps ...predicate.Meta) *MetaQuery { return mq } -// Limit adds a limit step to the query. +// Limit the number of records to be returned by this query. func (mq *MetaQuery) Limit(limit int) *MetaQuery { - mq.limit = &limit + mq.ctx.Limit = &limit return mq } -// Offset adds an offset step to the query. +// Offset to start from. func (mq *MetaQuery) Offset(offset int) *MetaQuery { - mq.offset = &offset + mq.ctx.Offset = &offset return mq } // Unique configures the query builder to filter duplicate records on query. // By default, unique is set to true, and can be disabled using this method. func (mq *MetaQuery) Unique(unique bool) *MetaQuery { - mq.unique = &unique + mq.ctx.Unique = &unique return mq } -// Order adds an order step to the query. -func (mq *MetaQuery) Order(o ...OrderFunc) *MetaQuery { +// Order specifies how the records should be ordered. +func (mq *MetaQuery) Order(o ...meta.OrderOption) *MetaQuery { mq.order = append(mq.order, o...) return mq } // QueryOwner chains the current query on the "owner" edge. func (mq *MetaQuery) QueryOwner() *AlertQuery { - query := &AlertQuery{config: mq.config} + query := (&AlertClient{config: mq.config}).Query() query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { if err := mq.prepareQuery(ctx); err != nil { return nil, err @@ -86,7 +84,7 @@ func (mq *MetaQuery) QueryOwner() *AlertQuery { // First returns the first Meta entity from the query. // Returns a *NotFoundError when no Meta was found. func (mq *MetaQuery) First(ctx context.Context) (*Meta, error) { - nodes, err := mq.Limit(1).All(ctx) + nodes, err := mq.Limit(1).All(setContextOp(ctx, mq.ctx, "First")) if err != nil { return nil, err } @@ -109,7 +107,7 @@ func (mq *MetaQuery) FirstX(ctx context.Context) *Meta { // Returns a *NotFoundError when no Meta ID was found. func (mq *MetaQuery) FirstID(ctx context.Context) (id int, err error) { var ids []int - if ids, err = mq.Limit(1).IDs(ctx); err != nil { + if ids, err = mq.Limit(1).IDs(setContextOp(ctx, mq.ctx, "FirstID")); err != nil { return } if len(ids) == 0 { @@ -132,7 +130,7 @@ func (mq *MetaQuery) FirstIDX(ctx context.Context) int { // Returns a *NotSingularError when more than one Meta entity is found. // Returns a *NotFoundError when no Meta entities are found. func (mq *MetaQuery) Only(ctx context.Context) (*Meta, error) { - nodes, err := mq.Limit(2).All(ctx) + nodes, err := mq.Limit(2).All(setContextOp(ctx, mq.ctx, "Only")) if err != nil { return nil, err } @@ -160,7 +158,7 @@ func (mq *MetaQuery) OnlyX(ctx context.Context) *Meta { // Returns a *NotFoundError when no entities are found. func (mq *MetaQuery) OnlyID(ctx context.Context) (id int, err error) { var ids []int - if ids, err = mq.Limit(2).IDs(ctx); err != nil { + if ids, err = mq.Limit(2).IDs(setContextOp(ctx, mq.ctx, "OnlyID")); err != nil { return } switch len(ids) { @@ -185,10 +183,12 @@ func (mq *MetaQuery) OnlyIDX(ctx context.Context) int { // All executes the query and returns a list of MetaSlice. func (mq *MetaQuery) All(ctx context.Context) ([]*Meta, error) { + ctx = setContextOp(ctx, mq.ctx, "All") if err := mq.prepareQuery(ctx); err != nil { return nil, err } - return mq.sqlAll(ctx) + qr := querierAll[[]*Meta, *MetaQuery]() + return withInterceptors[[]*Meta](ctx, mq, qr, mq.inters) } // AllX is like All, but panics if an error occurs. @@ -201,9 +201,12 @@ func (mq *MetaQuery) AllX(ctx context.Context) []*Meta { } // IDs executes the query and returns a list of Meta IDs. -func (mq *MetaQuery) IDs(ctx context.Context) ([]int, error) { - var ids []int - if err := mq.Select(meta.FieldID).Scan(ctx, &ids); err != nil { +func (mq *MetaQuery) IDs(ctx context.Context) (ids []int, err error) { + if mq.ctx.Unique == nil && mq.path != nil { + mq.Unique(true) + } + ctx = setContextOp(ctx, mq.ctx, "IDs") + if err = mq.Select(meta.FieldID).Scan(ctx, &ids); err != nil { return nil, err } return ids, nil @@ -220,10 +223,11 @@ func (mq *MetaQuery) IDsX(ctx context.Context) []int { // Count returns the count of the given query. func (mq *MetaQuery) Count(ctx context.Context) (int, error) { + ctx = setContextOp(ctx, mq.ctx, "Count") if err := mq.prepareQuery(ctx); err != nil { return 0, err } - return mq.sqlCount(ctx) + return withInterceptors[int](ctx, mq, querierCount[*MetaQuery](), mq.inters) } // CountX is like Count, but panics if an error occurs. @@ -237,10 +241,15 @@ func (mq *MetaQuery) CountX(ctx context.Context) int { // Exist returns true if the query has elements in the graph. func (mq *MetaQuery) Exist(ctx context.Context) (bool, error) { - if err := mq.prepareQuery(ctx); err != nil { - return false, err + ctx = setContextOp(ctx, mq.ctx, "Exist") + switch _, err := mq.FirstID(ctx); { + case IsNotFound(err): + return false, nil + case err != nil: + return false, fmt.Errorf("ent: check existence: %w", err) + default: + return true, nil } - return mq.sqlExist(ctx) } // ExistX is like Exist, but panics if an error occurs. @@ -260,22 +269,21 @@ func (mq *MetaQuery) Clone() *MetaQuery { } return &MetaQuery{ config: mq.config, - limit: mq.limit, - offset: mq.offset, - order: append([]OrderFunc{}, mq.order...), + ctx: mq.ctx.Clone(), + order: append([]meta.OrderOption{}, mq.order...), + inters: append([]Interceptor{}, mq.inters...), predicates: append([]predicate.Meta{}, mq.predicates...), withOwner: mq.withOwner.Clone(), // clone intermediate query. - sql: mq.sql.Clone(), - path: mq.path, - unique: mq.unique, + sql: mq.sql.Clone(), + path: mq.path, } } // WithOwner tells the query-builder to eager-load the nodes that are connected to // the "owner" edge. The optional arguments are used to configure the query builder of the edge. func (mq *MetaQuery) WithOwner(opts ...func(*AlertQuery)) *MetaQuery { - query := &AlertQuery{config: mq.config} + query := (&AlertClient{config: mq.config}).Query() for _, opt := range opts { opt(query) } @@ -298,16 +306,11 @@ func (mq *MetaQuery) WithOwner(opts ...func(*AlertQuery)) *MetaQuery { // Aggregate(ent.Count()). // Scan(ctx, &v) func (mq *MetaQuery) GroupBy(field string, fields ...string) *MetaGroupBy { - grbuild := &MetaGroupBy{config: mq.config} - grbuild.fields = append([]string{field}, fields...) - grbuild.path = func(ctx context.Context) (prev *sql.Selector, err error) { - if err := mq.prepareQuery(ctx); err != nil { - return nil, err - } - return mq.sqlQuery(ctx), nil - } + mq.ctx.Fields = append([]string{field}, fields...) + grbuild := &MetaGroupBy{build: mq} + grbuild.flds = &mq.ctx.Fields grbuild.label = meta.Label - grbuild.flds, grbuild.scan = &grbuild.fields, grbuild.Scan + grbuild.scan = grbuild.Scan return grbuild } @@ -324,15 +327,30 @@ func (mq *MetaQuery) GroupBy(field string, fields ...string) *MetaGroupBy { // Select(meta.FieldCreatedAt). // Scan(ctx, &v) func (mq *MetaQuery) Select(fields ...string) *MetaSelect { - mq.fields = append(mq.fields, fields...) - selbuild := &MetaSelect{MetaQuery: mq} - selbuild.label = meta.Label - selbuild.flds, selbuild.scan = &mq.fields, selbuild.Scan - return selbuild + mq.ctx.Fields = append(mq.ctx.Fields, fields...) + sbuild := &MetaSelect{MetaQuery: mq} + sbuild.label = meta.Label + sbuild.flds, sbuild.scan = &mq.ctx.Fields, sbuild.Scan + return sbuild +} + +// Aggregate returns a MetaSelect configured with the given aggregations. +func (mq *MetaQuery) Aggregate(fns ...AggregateFunc) *MetaSelect { + return mq.Select().Aggregate(fns...) } func (mq *MetaQuery) prepareQuery(ctx context.Context) error { - for _, f := range mq.fields { + for _, inter := range mq.inters { + if inter == nil { + return fmt.Errorf("ent: uninitialized interceptor (forgotten import ent/runtime?)") + } + if trv, ok := inter.(Traverser); ok { + if err := trv.Traverse(ctx, mq); err != nil { + return err + } + } + } + for _, f := range mq.ctx.Fields { if !meta.ValidColumn(f) { return &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)} } @@ -392,6 +410,9 @@ func (mq *MetaQuery) loadOwner(ctx context.Context, query *AlertQuery, nodes []* } nodeids[fk] = append(nodeids[fk], nodes[i]) } + if len(ids) == 0 { + return nil + } query.Where(alert.IDIn(ids...)) neighbors, err := query.All(ctx) if err != nil { @@ -411,41 +432,22 @@ func (mq *MetaQuery) loadOwner(ctx context.Context, query *AlertQuery, nodes []* func (mq *MetaQuery) sqlCount(ctx context.Context) (int, error) { _spec := mq.querySpec() - _spec.Node.Columns = mq.fields - if len(mq.fields) > 0 { - _spec.Unique = mq.unique != nil && *mq.unique + _spec.Node.Columns = mq.ctx.Fields + if len(mq.ctx.Fields) > 0 { + _spec.Unique = mq.ctx.Unique != nil && *mq.ctx.Unique } return sqlgraph.CountNodes(ctx, mq.driver, _spec) } -func (mq *MetaQuery) sqlExist(ctx context.Context) (bool, error) { - switch _, err := mq.FirstID(ctx); { - case IsNotFound(err): - return false, nil - case err != nil: - return false, fmt.Errorf("ent: check existence: %w", err) - default: - return true, nil - } -} - func (mq *MetaQuery) querySpec() *sqlgraph.QuerySpec { - _spec := &sqlgraph.QuerySpec{ - Node: &sqlgraph.NodeSpec{ - Table: meta.Table, - Columns: meta.Columns, - ID: &sqlgraph.FieldSpec{ - Type: field.TypeInt, - Column: meta.FieldID, - }, - }, - From: mq.sql, - Unique: true, - } - if unique := mq.unique; unique != nil { + _spec := sqlgraph.NewQuerySpec(meta.Table, meta.Columns, sqlgraph.NewFieldSpec(meta.FieldID, field.TypeInt)) + _spec.From = mq.sql + if unique := mq.ctx.Unique; unique != nil { _spec.Unique = *unique + } else if mq.path != nil { + _spec.Unique = true } - if fields := mq.fields; len(fields) > 0 { + if fields := mq.ctx.Fields; len(fields) > 0 { _spec.Node.Columns = make([]string, 0, len(fields)) _spec.Node.Columns = append(_spec.Node.Columns, meta.FieldID) for i := range fields { @@ -453,6 +455,9 @@ func (mq *MetaQuery) querySpec() *sqlgraph.QuerySpec { _spec.Node.Columns = append(_spec.Node.Columns, fields[i]) } } + if mq.withOwner != nil { + _spec.Node.AddColumnOnce(meta.FieldAlertMetas) + } } if ps := mq.predicates; len(ps) > 0 { _spec.Predicate = func(selector *sql.Selector) { @@ -461,10 +466,10 @@ func (mq *MetaQuery) querySpec() *sqlgraph.QuerySpec { } } } - if limit := mq.limit; limit != nil { + if limit := mq.ctx.Limit; limit != nil { _spec.Limit = *limit } - if offset := mq.offset; offset != nil { + if offset := mq.ctx.Offset; offset != nil { _spec.Offset = *offset } if ps := mq.order; len(ps) > 0 { @@ -480,7 +485,7 @@ func (mq *MetaQuery) querySpec() *sqlgraph.QuerySpec { func (mq *MetaQuery) sqlQuery(ctx context.Context) *sql.Selector { builder := sql.Dialect(mq.driver.Dialect()) t1 := builder.Table(meta.Table) - columns := mq.fields + columns := mq.ctx.Fields if len(columns) == 0 { columns = meta.Columns } @@ -489,7 +494,7 @@ func (mq *MetaQuery) sqlQuery(ctx context.Context) *sql.Selector { selector = mq.sql selector.Select(selector.Columns(columns...)...) } - if mq.unique != nil && *mq.unique { + if mq.ctx.Unique != nil && *mq.ctx.Unique { selector.Distinct() } for _, p := range mq.predicates { @@ -498,12 +503,12 @@ func (mq *MetaQuery) sqlQuery(ctx context.Context) *sql.Selector { for _, p := range mq.order { p(selector) } - if offset := mq.offset; offset != nil { + if offset := mq.ctx.Offset; offset != nil { // limit is mandatory for offset clause. We start // with default value, and override it below if needed. selector.Offset(*offset).Limit(math.MaxInt32) } - if limit := mq.limit; limit != nil { + if limit := mq.ctx.Limit; limit != nil { selector.Limit(*limit) } return selector @@ -511,13 +516,8 @@ func (mq *MetaQuery) sqlQuery(ctx context.Context) *sql.Selector { // MetaGroupBy is the group-by builder for Meta entities. type MetaGroupBy struct { - config selector - fields []string - fns []AggregateFunc - // intermediate query (i.e. traversal path). - sql *sql.Selector - path func(context.Context) (*sql.Selector, error) + build *MetaQuery } // Aggregate adds the given aggregation functions to the group-by query. @@ -526,74 +526,77 @@ func (mgb *MetaGroupBy) Aggregate(fns ...AggregateFunc) *MetaGroupBy { return mgb } -// Scan applies the group-by query and scans the result into the given value. +// Scan applies the selector query and scans the result into the given value. func (mgb *MetaGroupBy) Scan(ctx context.Context, v any) error { - query, err := mgb.path(ctx) - if err != nil { + ctx = setContextOp(ctx, mgb.build.ctx, "GroupBy") + if err := mgb.build.prepareQuery(ctx); err != nil { return err } - mgb.sql = query - return mgb.sqlScan(ctx, v) + return scanWithInterceptors[*MetaQuery, *MetaGroupBy](ctx, mgb.build, mgb, mgb.build.inters, v) } -func (mgb *MetaGroupBy) sqlScan(ctx context.Context, v any) error { - for _, f := range mgb.fields { - if !meta.ValidColumn(f) { - return &ValidationError{Name: f, err: fmt.Errorf("invalid field %q for group-by", f)} +func (mgb *MetaGroupBy) sqlScan(ctx context.Context, root *MetaQuery, v any) error { + selector := root.sqlQuery(ctx).Select() + aggregation := make([]string, 0, len(mgb.fns)) + for _, fn := range mgb.fns { + aggregation = append(aggregation, fn(selector)) + } + if len(selector.SelectedColumns()) == 0 { + columns := make([]string, 0, len(*mgb.flds)+len(mgb.fns)) + for _, f := range *mgb.flds { + columns = append(columns, selector.C(f)) } + columns = append(columns, aggregation...) + selector.Select(columns...) } - selector := mgb.sqlQuery() + selector.GroupBy(selector.Columns(*mgb.flds...)...) if err := selector.Err(); err != nil { return err } rows := &sql.Rows{} query, args := selector.Query() - if err := mgb.driver.Query(ctx, query, args, rows); err != nil { + if err := mgb.build.driver.Query(ctx, query, args, rows); err != nil { return err } defer rows.Close() return sql.ScanSlice(rows, v) } -func (mgb *MetaGroupBy) sqlQuery() *sql.Selector { - selector := mgb.sql.Select() - aggregation := make([]string, 0, len(mgb.fns)) - for _, fn := range mgb.fns { - aggregation = append(aggregation, fn(selector)) - } - // If no columns were selected in a custom aggregation function, the default - // selection is the fields used for "group-by", and the aggregation functions. - if len(selector.SelectedColumns()) == 0 { - columns := make([]string, 0, len(mgb.fields)+len(mgb.fns)) - for _, f := range mgb.fields { - columns = append(columns, selector.C(f)) - } - columns = append(columns, aggregation...) - selector.Select(columns...) - } - return selector.GroupBy(selector.Columns(mgb.fields...)...) -} - // MetaSelect is the builder for selecting fields of Meta entities. type MetaSelect struct { *MetaQuery selector - // intermediate query (i.e. traversal path). - sql *sql.Selector +} + +// Aggregate adds the given aggregation functions to the selector query. +func (ms *MetaSelect) Aggregate(fns ...AggregateFunc) *MetaSelect { + ms.fns = append(ms.fns, fns...) + return ms } // Scan applies the selector query and scans the result into the given value. func (ms *MetaSelect) Scan(ctx context.Context, v any) error { + ctx = setContextOp(ctx, ms.ctx, "Select") if err := ms.prepareQuery(ctx); err != nil { return err } - ms.sql = ms.MetaQuery.sqlQuery(ctx) - return ms.sqlScan(ctx, v) + return scanWithInterceptors[*MetaQuery, *MetaSelect](ctx, ms.MetaQuery, ms, ms.inters, v) } -func (ms *MetaSelect) sqlScan(ctx context.Context, v any) error { +func (ms *MetaSelect) sqlScan(ctx context.Context, root *MetaQuery, v any) error { + selector := root.sqlQuery(ctx) + aggregation := make([]string, 0, len(ms.fns)) + for _, fn := range ms.fns { + aggregation = append(aggregation, fn(selector)) + } + switch n := len(*ms.selector.flds); { + case n == 0 && len(aggregation) > 0: + selector.Select(aggregation...) + case n != 0 && len(aggregation) > 0: + selector.AppendSelect(aggregation...) + } rows := &sql.Rows{} - query, args := ms.sql.Query() + query, args := selector.Query() if err := ms.driver.Query(ctx, query, args, rows); err != nil { return err } diff --git a/pkg/database/ent/meta_update.go b/pkg/database/ent/meta_update.go index 67a198dddfa..8071c4f0df5 100644 --- a/pkg/database/ent/meta_update.go +++ b/pkg/database/ent/meta_update.go @@ -117,41 +117,8 @@ func (mu *MetaUpdate) ClearOwner() *MetaUpdate { // Save executes the query and returns the number of nodes affected by the update operation. func (mu *MetaUpdate) Save(ctx context.Context) (int, error) { - var ( - err error - affected int - ) mu.defaults() - if len(mu.hooks) == 0 { - if err = mu.check(); err != nil { - return 0, err - } - affected, err = mu.sqlSave(ctx) - } else { - var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { - mutation, ok := m.(*MetaMutation) - if !ok { - return nil, fmt.Errorf("unexpected mutation type %T", m) - } - if err = mu.check(); err != nil { - return 0, err - } - mu.mutation = mutation - affected, err = mu.sqlSave(ctx) - mutation.done = true - return affected, err - }) - for i := len(mu.hooks) - 1; i >= 0; i-- { - if mu.hooks[i] == nil { - return 0, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)") - } - mut = mu.hooks[i](mut) - } - if _, err := mut.Mutate(ctx, mu.mutation); err != nil { - return 0, err - } - } - return affected, err + return withHooks(ctx, mu.sqlSave, mu.mutation, mu.hooks) } // SaveX is like Save, but panics if an error occurs. @@ -199,16 +166,10 @@ func (mu *MetaUpdate) check() error { } func (mu *MetaUpdate) sqlSave(ctx context.Context) (n int, err error) { - _spec := &sqlgraph.UpdateSpec{ - Node: &sqlgraph.NodeSpec{ - Table: meta.Table, - Columns: meta.Columns, - ID: &sqlgraph.FieldSpec{ - Type: field.TypeInt, - Column: meta.FieldID, - }, - }, + if err := mu.check(); err != nil { + return n, err } + _spec := sqlgraph.NewUpdateSpec(meta.Table, meta.Columns, sqlgraph.NewFieldSpec(meta.FieldID, field.TypeInt)) if ps := mu.mutation.predicates; len(ps) > 0 { _spec.Predicate = func(selector *sql.Selector) { for i := range ps { @@ -217,44 +178,22 @@ func (mu *MetaUpdate) sqlSave(ctx context.Context) (n int, err error) { } } if value, ok := mu.mutation.CreatedAt(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeTime, - Value: value, - Column: meta.FieldCreatedAt, - }) + _spec.SetField(meta.FieldCreatedAt, field.TypeTime, value) } if mu.mutation.CreatedAtCleared() { - _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ - Type: field.TypeTime, - Column: meta.FieldCreatedAt, - }) + _spec.ClearField(meta.FieldCreatedAt, field.TypeTime) } if value, ok := mu.mutation.UpdatedAt(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeTime, - Value: value, - Column: meta.FieldUpdatedAt, - }) + _spec.SetField(meta.FieldUpdatedAt, field.TypeTime, value) } if mu.mutation.UpdatedAtCleared() { - _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ - Type: field.TypeTime, - Column: meta.FieldUpdatedAt, - }) + _spec.ClearField(meta.FieldUpdatedAt, field.TypeTime) } if value, ok := mu.mutation.Key(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: meta.FieldKey, - }) + _spec.SetField(meta.FieldKey, field.TypeString, value) } if value, ok := mu.mutation.Value(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: meta.FieldValue, - }) + _spec.SetField(meta.FieldValue, field.TypeString, value) } if mu.mutation.OwnerCleared() { edge := &sqlgraph.EdgeSpec{ @@ -264,10 +203,7 @@ func (mu *MetaUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{meta.OwnerColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeInt, - Column: alert.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(alert.FieldID, field.TypeInt), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -280,10 +216,7 @@ func (mu *MetaUpdate) sqlSave(ctx context.Context) (n int, err error) { Columns: []string{meta.OwnerColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeInt, - Column: alert.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(alert.FieldID, field.TypeInt), }, } for _, k := range nodes { @@ -299,6 +232,7 @@ func (mu *MetaUpdate) sqlSave(ctx context.Context) (n int, err error) { } return 0, err } + mu.mutation.done = true return n, nil } @@ -396,6 +330,12 @@ func (muo *MetaUpdateOne) ClearOwner() *MetaUpdateOne { return muo } +// Where appends a list predicates to the MetaUpdate builder. +func (muo *MetaUpdateOne) Where(ps ...predicate.Meta) *MetaUpdateOne { + muo.mutation.Where(ps...) + return muo +} + // Select allows selecting one or more fields (columns) of the returned entity. // The default is selecting all fields defined in the entity schema. func (muo *MetaUpdateOne) Select(field string, fields ...string) *MetaUpdateOne { @@ -405,47 +345,8 @@ func (muo *MetaUpdateOne) Select(field string, fields ...string) *MetaUpdateOne // Save executes the query and returns the updated Meta entity. func (muo *MetaUpdateOne) Save(ctx context.Context) (*Meta, error) { - var ( - err error - node *Meta - ) muo.defaults() - if len(muo.hooks) == 0 { - if err = muo.check(); err != nil { - return nil, err - } - node, err = muo.sqlSave(ctx) - } else { - var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { - mutation, ok := m.(*MetaMutation) - if !ok { - return nil, fmt.Errorf("unexpected mutation type %T", m) - } - if err = muo.check(); err != nil { - return nil, err - } - muo.mutation = mutation - node, err = muo.sqlSave(ctx) - mutation.done = true - return node, err - }) - for i := len(muo.hooks) - 1; i >= 0; i-- { - if muo.hooks[i] == nil { - return nil, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)") - } - mut = muo.hooks[i](mut) - } - v, err := mut.Mutate(ctx, muo.mutation) - if err != nil { - return nil, err - } - nv, ok := v.(*Meta) - if !ok { - return nil, fmt.Errorf("unexpected node type %T returned from MetaMutation", v) - } - node = nv - } - return node, err + return withHooks(ctx, muo.sqlSave, muo.mutation, muo.hooks) } // SaveX is like Save, but panics if an error occurs. @@ -493,16 +394,10 @@ func (muo *MetaUpdateOne) check() error { } func (muo *MetaUpdateOne) sqlSave(ctx context.Context) (_node *Meta, err error) { - _spec := &sqlgraph.UpdateSpec{ - Node: &sqlgraph.NodeSpec{ - Table: meta.Table, - Columns: meta.Columns, - ID: &sqlgraph.FieldSpec{ - Type: field.TypeInt, - Column: meta.FieldID, - }, - }, + if err := muo.check(); err != nil { + return _node, err } + _spec := sqlgraph.NewUpdateSpec(meta.Table, meta.Columns, sqlgraph.NewFieldSpec(meta.FieldID, field.TypeInt)) id, ok := muo.mutation.ID() if !ok { return nil, &ValidationError{Name: "id", err: errors.New(`ent: missing "Meta.id" for update`)} @@ -528,44 +423,22 @@ func (muo *MetaUpdateOne) sqlSave(ctx context.Context) (_node *Meta, err error) } } if value, ok := muo.mutation.CreatedAt(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeTime, - Value: value, - Column: meta.FieldCreatedAt, - }) + _spec.SetField(meta.FieldCreatedAt, field.TypeTime, value) } if muo.mutation.CreatedAtCleared() { - _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ - Type: field.TypeTime, - Column: meta.FieldCreatedAt, - }) + _spec.ClearField(meta.FieldCreatedAt, field.TypeTime) } if value, ok := muo.mutation.UpdatedAt(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeTime, - Value: value, - Column: meta.FieldUpdatedAt, - }) + _spec.SetField(meta.FieldUpdatedAt, field.TypeTime, value) } if muo.mutation.UpdatedAtCleared() { - _spec.Fields.Clear = append(_spec.Fields.Clear, &sqlgraph.FieldSpec{ - Type: field.TypeTime, - Column: meta.FieldUpdatedAt, - }) + _spec.ClearField(meta.FieldUpdatedAt, field.TypeTime) } if value, ok := muo.mutation.Key(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: meta.FieldKey, - }) + _spec.SetField(meta.FieldKey, field.TypeString, value) } if value, ok := muo.mutation.Value(); ok { - _spec.Fields.Set = append(_spec.Fields.Set, &sqlgraph.FieldSpec{ - Type: field.TypeString, - Value: value, - Column: meta.FieldValue, - }) + _spec.SetField(meta.FieldValue, field.TypeString, value) } if muo.mutation.OwnerCleared() { edge := &sqlgraph.EdgeSpec{ @@ -575,10 +448,7 @@ func (muo *MetaUpdateOne) sqlSave(ctx context.Context) (_node *Meta, err error) Columns: []string{meta.OwnerColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeInt, - Column: alert.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(alert.FieldID, field.TypeInt), }, } _spec.Edges.Clear = append(_spec.Edges.Clear, edge) @@ -591,10 +461,7 @@ func (muo *MetaUpdateOne) sqlSave(ctx context.Context) (_node *Meta, err error) Columns: []string{meta.OwnerColumn}, Bidi: false, Target: &sqlgraph.EdgeTarget{ - IDSpec: &sqlgraph.FieldSpec{ - Type: field.TypeInt, - Column: alert.FieldID, - }, + IDSpec: sqlgraph.NewFieldSpec(alert.FieldID, field.TypeInt), }, } for _, k := range nodes { @@ -613,5 +480,6 @@ func (muo *MetaUpdateOne) sqlSave(ctx context.Context) (_node *Meta, err error) } return nil, err } + muo.mutation.done = true return _node, nil } diff --git a/pkg/database/ent/mutation.go b/pkg/database/ent/mutation.go index 907c1ef015e..c5808d0d9b8 100644 --- a/pkg/database/ent/mutation.go +++ b/pkg/database/ent/mutation.go @@ -9,6 +9,8 @@ import ( "sync" "time" + "entgo.io/ent" + "entgo.io/ent/dialect/sql" "github.com/crowdsecurity/crowdsec/pkg/database/ent/alert" "github.com/crowdsecurity/crowdsec/pkg/database/ent/bouncer" "github.com/crowdsecurity/crowdsec/pkg/database/ent/configitem" @@ -17,8 +19,6 @@ import ( "github.com/crowdsecurity/crowdsec/pkg/database/ent/machine" "github.com/crowdsecurity/crowdsec/pkg/database/ent/meta" "github.com/crowdsecurity/crowdsec/pkg/database/ent/predicate" - - "entgo.io/ent" ) const ( @@ -1578,11 +1578,26 @@ func (m *AlertMutation) Where(ps ...predicate.Alert) { m.predicates = append(m.predicates, ps...) } +// WhereP appends storage-level predicates to the AlertMutation builder. Using this method, +// users can use type-assertion to append predicates that do not depend on any generated package. +func (m *AlertMutation) WhereP(ps ...func(*sql.Selector)) { + p := make([]predicate.Alert, len(ps)) + for i := range ps { + p[i] = ps[i] + } + m.Where(p...) +} + // Op returns the operation name. func (m *AlertMutation) Op() Op { return m.op } +// SetOp allows setting the mutation operation. +func (m *AlertMutation) SetOp(op Op) { + m.op = op +} + // Type returns the node type of this mutation (Alert). func (m *AlertMutation) Type() string { return m.typ @@ -2997,11 +3012,26 @@ func (m *BouncerMutation) Where(ps ...predicate.Bouncer) { m.predicates = append(m.predicates, ps...) } +// WhereP appends storage-level predicates to the BouncerMutation builder. Using this method, +// users can use type-assertion to append predicates that do not depend on any generated package. +func (m *BouncerMutation) WhereP(ps ...func(*sql.Selector)) { + p := make([]predicate.Bouncer, len(ps)) + for i := range ps { + p[i] = ps[i] + } + m.Where(p...) +} + // Op returns the operation name. func (m *BouncerMutation) Op() Op { return m.op } +// SetOp allows setting the mutation operation. +func (m *BouncerMutation) SetOp(op Op) { + m.op = op +} + // Type returns the node type of this mutation (Bouncer). func (m *BouncerMutation) Type() string { return m.typ @@ -3654,11 +3684,26 @@ func (m *ConfigItemMutation) Where(ps ...predicate.ConfigItem) { m.predicates = append(m.predicates, ps...) } +// WhereP appends storage-level predicates to the ConfigItemMutation builder. Using this method, +// users can use type-assertion to append predicates that do not depend on any generated package. +func (m *ConfigItemMutation) WhereP(ps ...func(*sql.Selector)) { + p := make([]predicate.ConfigItem, len(ps)) + for i := range ps { + p[i] = ps[i] + } + m.Where(p...) +} + // Op returns the operation name. func (m *ConfigItemMutation) Op() Op { return m.op } +// SetOp allows setting the mutation operation. +func (m *ConfigItemMutation) SetOp(op Op) { + m.op = op +} + // Type returns the node type of this mutation (ConfigItem). func (m *ConfigItemMutation) Type() string { return m.typ @@ -4830,6 +4875,7 @@ func (m *DecisionMutation) SetOwnerID(id int) { // ClearOwner clears the "owner" edge to the Alert entity. func (m *DecisionMutation) ClearOwner() { m.clearedowner = true + m.clearedFields[decision.FieldAlertDecisions] = struct{}{} } // OwnerCleared reports if the "owner" edge to the Alert entity was cleared. @@ -4866,11 +4912,26 @@ func (m *DecisionMutation) Where(ps ...predicate.Decision) { m.predicates = append(m.predicates, ps...) } +// WhereP appends storage-level predicates to the DecisionMutation builder. Using this method, +// users can use type-assertion to append predicates that do not depend on any generated package. +func (m *DecisionMutation) WhereP(ps ...func(*sql.Selector)) { + p := make([]predicate.Decision, len(ps)) + for i := range ps { + p[i] = ps[i] + } + m.Where(p...) +} + // Op returns the operation name. func (m *DecisionMutation) Op() Op { return m.op } +// SetOp allows setting the mutation operation. +func (m *DecisionMutation) SetOp(op Op) { + m.op = op +} + // Type returns the node type of this mutation (Decision). func (m *DecisionMutation) Type() string { return m.typ @@ -5775,6 +5836,7 @@ func (m *EventMutation) SetOwnerID(id int) { // ClearOwner clears the "owner" edge to the Alert entity. func (m *EventMutation) ClearOwner() { m.clearedowner = true + m.clearedFields[event.FieldAlertEvents] = struct{}{} } // OwnerCleared reports if the "owner" edge to the Alert entity was cleared. @@ -5811,11 +5873,26 @@ func (m *EventMutation) Where(ps ...predicate.Event) { m.predicates = append(m.predicates, ps...) } +// WhereP appends storage-level predicates to the EventMutation builder. Using this method, +// users can use type-assertion to append predicates that do not depend on any generated package. +func (m *EventMutation) WhereP(ps ...func(*sql.Selector)) { + p := make([]predicate.Event, len(ps)) + for i := range ps { + p[i] = ps[i] + } + m.Where(p...) +} + // Op returns the operation name. func (m *EventMutation) Op() Op { return m.op } +// SetOp allows setting the mutation operation. +func (m *EventMutation) SetOp(op Op) { + m.op = op +} + // Type returns the node type of this mutation (Event). func (m *EventMutation) Type() string { return m.typ @@ -6795,11 +6872,26 @@ func (m *MachineMutation) Where(ps ...predicate.Machine) { m.predicates = append(m.predicates, ps...) } +// WhereP appends storage-level predicates to the MachineMutation builder. Using this method, +// users can use type-assertion to append predicates that do not depend on any generated package. +func (m *MachineMutation) WhereP(ps ...func(*sql.Selector)) { + p := make([]predicate.Machine, len(ps)) + for i := range ps { + p[i] = ps[i] + } + m.Where(p...) +} + // Op returns the operation name. func (m *MachineMutation) Op() Op { return m.op } +// SetOp allows setting the mutation operation. +func (m *MachineMutation) SetOp(op Op) { + m.op = op +} + // Type returns the node type of this mutation (Machine). func (m *MachineMutation) Type() string { return m.typ @@ -7565,6 +7657,7 @@ func (m *MetaMutation) SetOwnerID(id int) { // ClearOwner clears the "owner" edge to the Alert entity. func (m *MetaMutation) ClearOwner() { m.clearedowner = true + m.clearedFields[meta.FieldAlertMetas] = struct{}{} } // OwnerCleared reports if the "owner" edge to the Alert entity was cleared. @@ -7601,11 +7694,26 @@ func (m *MetaMutation) Where(ps ...predicate.Meta) { m.predicates = append(m.predicates, ps...) } +// WhereP appends storage-level predicates to the MetaMutation builder. Using this method, +// users can use type-assertion to append predicates that do not depend on any generated package. +func (m *MetaMutation) WhereP(ps ...func(*sql.Selector)) { + p := make([]predicate.Meta, len(ps)) + for i := range ps { + p[i] = ps[i] + } + m.Where(p...) +} + // Op returns the operation name. func (m *MetaMutation) Op() Op { return m.op } +// SetOp allows setting the mutation operation. +func (m *MetaMutation) SetOp(op Op) { + m.op = op +} + // Type returns the node type of this mutation (Meta). func (m *MetaMutation) Type() string { return m.typ diff --git a/pkg/database/ent/runtime/runtime.go b/pkg/database/ent/runtime/runtime.go index e64f7bd7554..2a645f624d7 100644 --- a/pkg/database/ent/runtime/runtime.go +++ b/pkg/database/ent/runtime/runtime.go @@ -5,6 +5,6 @@ package runtime // The schema-stitching logic is generated in github.com/crowdsecurity/crowdsec/pkg/database/ent/runtime.go const ( - Version = "v0.11.3" // Version of ent codegen. - Sum = "h1:F5FBGAWiDCGder7YT+lqMnyzXl6d0xU3xMBM/SO3CMc=" // Sum of ent codegen. + Version = "v0.12.4" // Version of ent codegen. + Sum = "h1:LddPnAyxls/O7DTXZvUGDj0NZIdGSu317+aoNLJWbD8=" // Sum of ent codegen. ) diff --git a/pkg/database/ent/tx.go b/pkg/database/ent/tx.go index 2a1efd152a0..65c2ed00a44 100644 --- a/pkg/database/ent/tx.go +++ b/pkg/database/ent/tx.go @@ -30,12 +30,6 @@ type Tx struct { // lazily loaded. client *Client clientOnce sync.Once - - // completion callbacks. - mu sync.Mutex - onCommit []CommitHook - onRollback []RollbackHook - // ctx lives for the life of the transaction. It is // the same context used by the underlying connection. ctx context.Context @@ -80,9 +74,9 @@ func (tx *Tx) Commit() error { var fn Committer = CommitFunc(func(context.Context, *Tx) error { return txDriver.tx.Commit() }) - tx.mu.Lock() - hooks := append([]CommitHook(nil), tx.onCommit...) - tx.mu.Unlock() + txDriver.mu.Lock() + hooks := append([]CommitHook(nil), txDriver.onCommit...) + txDriver.mu.Unlock() for i := len(hooks) - 1; i >= 0; i-- { fn = hooks[i](fn) } @@ -91,9 +85,10 @@ func (tx *Tx) Commit() error { // OnCommit adds a hook to call on commit. func (tx *Tx) OnCommit(f CommitHook) { - tx.mu.Lock() - defer tx.mu.Unlock() - tx.onCommit = append(tx.onCommit, f) + txDriver := tx.config.driver.(*txDriver) + txDriver.mu.Lock() + txDriver.onCommit = append(txDriver.onCommit, f) + txDriver.mu.Unlock() } type ( @@ -135,9 +130,9 @@ func (tx *Tx) Rollback() error { var fn Rollbacker = RollbackFunc(func(context.Context, *Tx) error { return txDriver.tx.Rollback() }) - tx.mu.Lock() - hooks := append([]RollbackHook(nil), tx.onRollback...) - tx.mu.Unlock() + txDriver.mu.Lock() + hooks := append([]RollbackHook(nil), txDriver.onRollback...) + txDriver.mu.Unlock() for i := len(hooks) - 1; i >= 0; i-- { fn = hooks[i](fn) } @@ -146,9 +141,10 @@ func (tx *Tx) Rollback() error { // OnRollback adds a hook to call on rollback. func (tx *Tx) OnRollback(f RollbackHook) { - tx.mu.Lock() - defer tx.mu.Unlock() - tx.onRollback = append(tx.onRollback, f) + txDriver := tx.config.driver.(*txDriver) + txDriver.mu.Lock() + txDriver.onRollback = append(txDriver.onRollback, f) + txDriver.mu.Unlock() } // Client returns a Client that binds to current transaction. @@ -186,6 +182,10 @@ type txDriver struct { drv dialect.Driver // tx is the underlying transaction. tx dialect.Tx + // completion hooks. + mu sync.Mutex + onCommit []CommitHook + onRollback []RollbackHook } // newTx creates a new transactional driver. From 8de8bf0e0653beb80d847741b416ad51edd00272 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Wed, 14 Feb 2024 11:53:12 +0100 Subject: [PATCH 028/581] pkg/hubtest: extract methods + consistent error handling (#2756) * pkg/hubtest: extract methods + consistent error handling * lint * rename variables for further refactor --- pkg/hubtest/appsecrule.go | 80 ++++++++++++++++-------------- pkg/hubtest/parser.go | 99 +++++++++++++++++++------------------ pkg/hubtest/postoverflow.go | 98 ++++++++++++++++++------------------ pkg/hubtest/scenario.go | 78 +++++++++++++++-------------- 4 files changed, 185 insertions(+), 170 deletions(-) diff --git a/pkg/hubtest/appsecrule.go b/pkg/hubtest/appsecrule.go index 9b70e1441ac..fb4ad78cc18 100644 --- a/pkg/hubtest/appsecrule.go +++ b/pkg/hubtest/appsecrule.go @@ -11,75 +11,81 @@ import ( "github.com/crowdsecurity/crowdsec/pkg/cwhub" ) -func (t *HubTestItem) installAppsecRuleItem(hubAppsecRule *cwhub.Item) error { - appsecRuleSource, err := filepath.Abs(filepath.Join(t.HubPath, hubAppsecRule.RemotePath)) +func (t *HubTestItem) installAppsecRuleItem(item *cwhub.Item) error { + sourcePath, err := filepath.Abs(filepath.Join(t.HubPath, item.RemotePath)) if err != nil { - return fmt.Errorf("can't get absolute path of '%s': %s", appsecRuleSource, err) + return fmt.Errorf("can't get absolute path of '%s': %w", sourcePath, err) } - appsecRuleFilename := filepath.Base(appsecRuleSource) + sourceFilename := filepath.Base(sourcePath) // runtime/hub/appsec-rules/author/appsec-rule - hubDirAppsecRuleDest := filepath.Join(t.RuntimeHubPath, filepath.Dir(hubAppsecRule.RemotePath)) + hubDirAppsecRuleDest := filepath.Join(t.RuntimeHubPath, filepath.Dir(item.RemotePath)) // runtime/appsec-rules/ - appsecRuleDirDest := fmt.Sprintf("%s/appsec-rules/", t.RuntimePath) + itemTypeDirDest := fmt.Sprintf("%s/appsec-rules/", t.RuntimePath) if err := os.MkdirAll(hubDirAppsecRuleDest, os.ModePerm); err != nil { - return fmt.Errorf("unable to create folder '%s': %s", hubDirAppsecRuleDest, err) + return fmt.Errorf("unable to create folder '%s': %w", hubDirAppsecRuleDest, err) } - if err := os.MkdirAll(appsecRuleDirDest, os.ModePerm); err != nil { - return fmt.Errorf("unable to create folder '%s': %s", appsecRuleDirDest, err) + if err := os.MkdirAll(itemTypeDirDest, os.ModePerm); err != nil { + return fmt.Errorf("unable to create folder '%s': %w", itemTypeDirDest, err) } // runtime/hub/appsec-rules/crowdsecurity/rule.yaml - hubDirAppsecRulePath := filepath.Join(appsecRuleDirDest, appsecRuleFilename) - if err := Copy(appsecRuleSource, hubDirAppsecRulePath); err != nil { - return fmt.Errorf("unable to copy '%s' to '%s': %s", appsecRuleSource, hubDirAppsecRulePath, err) + hubDirAppsecRulePath := filepath.Join(itemTypeDirDest, sourceFilename) + if err := Copy(sourcePath, hubDirAppsecRulePath); err != nil { + return fmt.Errorf("unable to copy '%s' to '%s': %w", sourcePath, hubDirAppsecRulePath, err) } // runtime/appsec-rules/rule.yaml - appsecRulePath := filepath.Join(appsecRuleDirDest, appsecRuleFilename) + appsecRulePath := filepath.Join(itemTypeDirDest, sourceFilename) if err := os.Symlink(hubDirAppsecRulePath, appsecRulePath); err != nil { if !os.IsExist(err) { - return fmt.Errorf("unable to symlink appsec-rule '%s' to '%s': %s", hubDirAppsecRulePath, appsecRulePath, err) + return fmt.Errorf("unable to symlink appsec-rule '%s' to '%s': %w", hubDirAppsecRulePath, appsecRulePath, err) } } return nil } +func (t *HubTestItem) installAppsecRuleCustomFrom(appsecrule string, customPath string) (bool, error) { + // we check if its a custom appsec-rule + customAppsecRulePath := filepath.Join(customPath, appsecrule) + if _, err := os.Stat(customAppsecRulePath); os.IsNotExist(err) { + return false, nil + } + + customAppsecRulePathSplit := strings.Split(customAppsecRulePath, "/") + customAppsecRuleName := customAppsecRulePathSplit[len(customAppsecRulePathSplit)-1] + + itemTypeDirDest := fmt.Sprintf("%s/appsec-rules/", t.RuntimePath) + if err := os.MkdirAll(itemTypeDirDest, os.ModePerm); err != nil { + return false, fmt.Errorf("unable to create folder '%s': %w", itemTypeDirDest, err) + } + + customAppsecRuleDest := fmt.Sprintf("%s/appsec-rules/%s", t.RuntimePath, customAppsecRuleName) + if err := Copy(customAppsecRulePath, customAppsecRuleDest); err != nil { + return false, fmt.Errorf("unable to copy appsec-rule from '%s' to '%s': %w", customAppsecRulePath, customAppsecRuleDest, err) + } + + return true, nil +} + func (t *HubTestItem) installAppsecRuleCustom(appsecrule string) error { - customAppsecRuleExist := false for _, customPath := range t.CustomItemsLocation { - // we check if its a custom appsec-rule - customAppsecRulePath := filepath.Join(customPath, appsecrule) - if _, err := os.Stat(customAppsecRulePath); os.IsNotExist(err) { - continue - } - customAppsecRulePathSplit := strings.Split(customAppsecRulePath, "/") - customAppsecRuleName := customAppsecRulePathSplit[len(customAppsecRulePathSplit)-1] - - appsecRuleDirDest := fmt.Sprintf("%s/appsec-rules/", t.RuntimePath) - if err := os.MkdirAll(appsecRuleDirDest, os.ModePerm); err != nil { - return fmt.Errorf("unable to create folder '%s': %s", appsecRuleDirDest, err) + found, err := t.installAppsecRuleCustomFrom(appsecrule, customPath) + if err != nil { + return err } - // runtime/appsec-rules/ - customAppsecRuleDest := fmt.Sprintf("%s/appsec-rules/%s", t.RuntimePath, customAppsecRuleName) - // if path to postoverflow exist, copy it - if err := Copy(customAppsecRulePath, customAppsecRuleDest); err != nil { - continue + if found { + return nil } - customAppsecRuleExist = true - break - } - if !customAppsecRuleExist { - return fmt.Errorf("couldn't find custom appsec-rule '%s' in the following location: %+v", appsecrule, t.CustomItemsLocation) } - return nil + return fmt.Errorf("couldn't find custom appsec-rule '%s' in the following location: %+v", appsecrule, t.CustomItemsLocation) } func (t *HubTestItem) installAppsecRule(name string) error { diff --git a/pkg/hubtest/parser.go b/pkg/hubtest/parser.go index b8dcdb8b1d0..d40301e3015 100644 --- a/pkg/hubtest/parser.go +++ b/pkg/hubtest/parser.go @@ -9,89 +9,90 @@ import ( "github.com/crowdsecurity/crowdsec/pkg/cwhub" ) -func (t *HubTestItem) installParserItem(hubParser *cwhub.Item) error { - parserSource, err := filepath.Abs(filepath.Join(t.HubPath, hubParser.RemotePath)) +func (t *HubTestItem) installParserItem(item *cwhub.Item) error { + sourcePath, err := filepath.Abs(filepath.Join(t.HubPath, item.RemotePath)) if err != nil { - return fmt.Errorf("can't get absolute path of '%s': %s", parserSource, err) + return fmt.Errorf("can't get absolute path of '%s': %w", sourcePath, err) } - parserFileName := filepath.Base(parserSource) + sourceFilename := filepath.Base(sourcePath) // runtime/hub/parsers/s00-raw/crowdsecurity/ - hubDirParserDest := filepath.Join(t.RuntimeHubPath, filepath.Dir(hubParser.RemotePath)) + hubDirParserDest := filepath.Join(t.RuntimeHubPath, filepath.Dir(item.RemotePath)) // runtime/parsers/s00-raw/ - parserDirDest := fmt.Sprintf("%s/parsers/%s/", t.RuntimePath, hubParser.Stage) + itemTypeDirDest := fmt.Sprintf("%s/parsers/%s/", t.RuntimePath, item.Stage) if err := os.MkdirAll(hubDirParserDest, os.ModePerm); err != nil { - return fmt.Errorf("unable to create folder '%s': %s", hubDirParserDest, err) + return fmt.Errorf("unable to create folder '%s': %w", hubDirParserDest, err) } - if err := os.MkdirAll(parserDirDest, os.ModePerm); err != nil { - return fmt.Errorf("unable to create folder '%s': %s", parserDirDest, err) + if err := os.MkdirAll(itemTypeDirDest, os.ModePerm); err != nil { + return fmt.Errorf("unable to create folder '%s': %w", itemTypeDirDest, err) } // runtime/hub/parsers/s00-raw/crowdsecurity/syslog-logs.yaml - hubDirParserPath := filepath.Join(hubDirParserDest, parserFileName) - if err := Copy(parserSource, hubDirParserPath); err != nil { - return fmt.Errorf("unable to copy '%s' to '%s': %s", parserSource, hubDirParserPath, err) + hubDirParserPath := filepath.Join(hubDirParserDest, sourceFilename) + if err := Copy(sourcePath, hubDirParserPath); err != nil { + return fmt.Errorf("unable to copy '%s' to '%s': %w", sourcePath, hubDirParserPath, err) } // runtime/parsers/s00-raw/syslog-logs.yaml - parserDirParserPath := filepath.Join(parserDirDest, parserFileName) + parserDirParserPath := filepath.Join(itemTypeDirDest, sourceFilename) if err := os.Symlink(hubDirParserPath, parserDirParserPath); err != nil { if !os.IsExist(err) { - return fmt.Errorf("unable to symlink parser '%s' to '%s': %s", hubDirParserPath, parserDirParserPath, err) + return fmt.Errorf("unable to symlink parser '%s' to '%s': %w", hubDirParserPath, parserDirParserPath, err) } } return nil } -func (t *HubTestItem) installParserCustom(parser string) error { - customParserExist := false - for _, customPath := range t.CustomItemsLocation { - // we check if its a custom parser - customParserPath := filepath.Join(customPath, parser) - if _, err := os.Stat(customParserPath); os.IsNotExist(err) { - continue - //return fmt.Errorf("parser '%s' doesn't exist in the hub and doesn't appear to be a custom one.", parser) - } +func (t *HubTestItem) installParserCustomFrom(parser string, customPath string) (bool, error) { + // we check if its a custom parser + customParserPath := filepath.Join(customPath, parser) + if _, err := os.Stat(customParserPath); os.IsNotExist(err) { + return false, nil + } - customParserPathSplit, customParserName := filepath.Split(customParserPath) - // because path is parsers///parser.yaml and we wan't the stage - splittedPath := strings.Split(customParserPathSplit, string(os.PathSeparator)) - customParserStage := splittedPath[len(splittedPath)-3] + customParserPathSplit, customParserName := filepath.Split(customParserPath) + // because path is parsers///parser.yaml and we wan't the stage + splitPath := strings.Split(customParserPathSplit, string(os.PathSeparator)) + customParserStage := splitPath[len(splitPath)-3] - // check if stage exist - hubStagePath := filepath.Join(t.HubPath, fmt.Sprintf("parsers/%s", customParserStage)) + // check if stage exist + hubStagePath := filepath.Join(t.HubPath, fmt.Sprintf("parsers/%s", customParserStage)) + if _, err := os.Stat(hubStagePath); os.IsNotExist(err) { + return false, fmt.Errorf("stage '%s' extracted from '%s' doesn't exist in the hub", customParserStage, hubStagePath) + } - if _, err := os.Stat(hubStagePath); os.IsNotExist(err) { - continue - //return fmt.Errorf("stage '%s' extracted from '%s' doesn't exist in the hub", customParserStage, hubStagePath) - } + stageDirDest := fmt.Sprintf("%s/parsers/%s/", t.RuntimePath, customParserStage) + if err := os.MkdirAll(stageDirDest, os.ModePerm); err != nil { + return false, fmt.Errorf("unable to create folder '%s': %w", stageDirDest, err) + } - parserDirDest := fmt.Sprintf("%s/parsers/%s/", t.RuntimePath, customParserStage) - if err := os.MkdirAll(parserDirDest, os.ModePerm); err != nil { - continue - //return fmt.Errorf("unable to create folder '%s': %s", parserDirDest, err) - } + customParserDest := filepath.Join(stageDirDest, customParserName) + // if path to parser exist, copy it + if err := Copy(customParserPath, customParserDest); err != nil { + return false, fmt.Errorf("unable to copy custom parser '%s' to '%s': %w", customParserPath, customParserDest, err) + } + + return true, nil +} - customParserDest := filepath.Join(parserDirDest, customParserName) - // if path to parser exist, copy it - if err := Copy(customParserPath, customParserDest); err != nil { - continue - //return fmt.Errorf("unable to copy custom parser '%s' to '%s': %s", customParserPath, customParserDest, err) +func (t *HubTestItem) installParserCustom(parser string) error { + for _, customPath := range t.CustomItemsLocation { + found, err := t.installParserCustomFrom(parser, customPath) + if err != nil { + return err } - customParserExist = true - break - } - if !customParserExist { - return fmt.Errorf("couldn't find custom parser '%s' in the following location: %+v", parser, t.CustomItemsLocation) + if found { + return nil + } } - return nil + return fmt.Errorf("couldn't find custom parser '%s' in the following locations: %+v", parser, t.CustomItemsLocation) } func (t *HubTestItem) installParser(name string) error { diff --git a/pkg/hubtest/postoverflow.go b/pkg/hubtest/postoverflow.go index d5d43ddc742..76a67b58b76 100644 --- a/pkg/hubtest/postoverflow.go +++ b/pkg/hubtest/postoverflow.go @@ -9,88 +9,90 @@ import ( "github.com/crowdsecurity/crowdsec/pkg/cwhub" ) -func (t *HubTestItem) installPostoverflowItem(hubPostOverflow *cwhub.Item) error { - postoverflowSource, err := filepath.Abs(filepath.Join(t.HubPath, hubPostOverflow.RemotePath)) +func (t *HubTestItem) installPostoverflowItem(item *cwhub.Item) error { + sourcePath, err := filepath.Abs(filepath.Join(t.HubPath, item.RemotePath)) if err != nil { - return fmt.Errorf("can't get absolute path of '%s': %s", postoverflowSource, err) + return fmt.Errorf("can't get absolute path of '%s': %w", sourcePath, err) } - postoverflowFileName := filepath.Base(postoverflowSource) + sourceFilename := filepath.Base(sourcePath) // runtime/hub/postoverflows/s00-enrich/crowdsecurity/ - hubDirPostoverflowDest := filepath.Join(t.RuntimeHubPath, filepath.Dir(hubPostOverflow.RemotePath)) + hubDirPostoverflowDest := filepath.Join(t.RuntimeHubPath, filepath.Dir(item.RemotePath)) // runtime/postoverflows/s00-enrich - postoverflowDirDest := fmt.Sprintf("%s/postoverflows/%s/", t.RuntimePath, hubPostOverflow.Stage) + itemTypeDirDest := fmt.Sprintf("%s/postoverflows/%s/", t.RuntimePath, item.Stage) if err := os.MkdirAll(hubDirPostoverflowDest, os.ModePerm); err != nil { - return fmt.Errorf("unable to create folder '%s': %s", hubDirPostoverflowDest, err) + return fmt.Errorf("unable to create folder '%s': %w", hubDirPostoverflowDest, err) } - if err := os.MkdirAll(postoverflowDirDest, os.ModePerm); err != nil { - return fmt.Errorf("unable to create folder '%s': %s", postoverflowDirDest, err) + if err := os.MkdirAll(itemTypeDirDest, os.ModePerm); err != nil { + return fmt.Errorf("unable to create folder '%s': %w", itemTypeDirDest, err) } // runtime/hub/postoverflows/s00-enrich/crowdsecurity/rdns.yaml - hubDirPostoverflowPath := filepath.Join(hubDirPostoverflowDest, postoverflowFileName) - if err := Copy(postoverflowSource, hubDirPostoverflowPath); err != nil { - return fmt.Errorf("unable to copy '%s' to '%s': %s", postoverflowSource, hubDirPostoverflowPath, err) + hubDirPostoverflowPath := filepath.Join(hubDirPostoverflowDest, sourceFilename) + if err := Copy(sourcePath, hubDirPostoverflowPath); err != nil { + return fmt.Errorf("unable to copy '%s' to '%s': %w", sourcePath, hubDirPostoverflowPath, err) } // runtime/postoverflows/s00-enrich/rdns.yaml - postoverflowDirParserPath := filepath.Join(postoverflowDirDest, postoverflowFileName) + postoverflowDirParserPath := filepath.Join(itemTypeDirDest, sourceFilename) if err := os.Symlink(hubDirPostoverflowPath, postoverflowDirParserPath); err != nil { if !os.IsExist(err) { - return fmt.Errorf("unable to symlink postoverflow '%s' to '%s': %s", hubDirPostoverflowPath, postoverflowDirParserPath, err) + return fmt.Errorf("unable to symlink postoverflow '%s' to '%s': %w", hubDirPostoverflowPath, postoverflowDirParserPath, err) } } return nil } -func (t *HubTestItem) installPostoverflowCustom(postoverflow string) error { - customPostoverflowExist := false - for _, customPath := range t.CustomItemsLocation { - // we check if its a custom postoverflow - customPostOverflowPath := filepath.Join(customPath, postoverflow) - if _, err := os.Stat(customPostOverflowPath); os.IsNotExist(err) { - continue - //return fmt.Errorf("postoverflow '%s' doesn't exist in the hub and doesn't appear to be a custom one.", postoverflow) - } +func (t *HubTestItem) installPostoverflowCustomFrom(postoverflow string, customPath string) (bool, error) { + // we check if its a custom postoverflow + customPostOverflowPath := filepath.Join(customPath, postoverflow) + if _, err := os.Stat(customPostOverflowPath); os.IsNotExist(err) { + return false, nil + } - customPostOverflowPathSplit := strings.Split(customPostOverflowPath, "/") - customPostoverflowName := customPostOverflowPathSplit[len(customPostOverflowPathSplit)-1] - // because path is postoverflows///parser.yaml and we wan't the stage - customPostoverflowStage := customPostOverflowPathSplit[len(customPostOverflowPathSplit)-3] + customPostOverflowPathSplit := strings.Split(customPostOverflowPath, "/") + customPostoverflowName := customPostOverflowPathSplit[len(customPostOverflowPathSplit)-1] + // because path is postoverflows///parser.yaml and we wan't the stage + customPostoverflowStage := customPostOverflowPathSplit[len(customPostOverflowPathSplit)-3] - // check if stage exist - hubStagePath := filepath.Join(t.HubPath, fmt.Sprintf("postoverflows/%s", customPostoverflowStage)) + // check if stage exist + hubStagePath := filepath.Join(t.HubPath, fmt.Sprintf("postoverflows/%s", customPostoverflowStage)) + if _, err := os.Stat(hubStagePath); os.IsNotExist(err) { + return false, fmt.Errorf("stage '%s' from extracted '%s' doesn't exist in the hub", customPostoverflowStage, hubStagePath) + } - if _, err := os.Stat(hubStagePath); os.IsNotExist(err) { - continue - //return fmt.Errorf("stage '%s' from extracted '%s' doesn't exist in the hub", customPostoverflowStage, hubStagePath) - } + stageDirDest := fmt.Sprintf("%s/postoverflows/%s/", t.RuntimePath, customPostoverflowStage) + if err := os.MkdirAll(stageDirDest, os.ModePerm); err != nil { + return false, fmt.Errorf("unable to create folder '%s': %w", stageDirDest, err) + } - postoverflowDirDest := fmt.Sprintf("%s/postoverflows/%s/", t.RuntimePath, customPostoverflowStage) - if err := os.MkdirAll(postoverflowDirDest, os.ModePerm); err != nil { - continue - //return fmt.Errorf("unable to create folder '%s': %s", postoverflowDirDest, err) + customPostoverflowDest := filepath.Join(stageDirDest, customPostoverflowName) + // if path to postoverflow exist, copy it + if err := Copy(customPostOverflowPath, customPostoverflowDest); err != nil { + return false, fmt.Errorf("unable to copy custom parser '%s' to '%s': %w", customPostOverflowPath, customPostoverflowDest, err) + } + + return true, nil +} + +func (t *HubTestItem) installPostoverflowCustom(postoverflow string) error { + for _, customPath := range t.CustomItemsLocation { + found, err := t.installPostoverflowCustomFrom(postoverflow, customPath) + if err != nil { + return err } - customPostoverflowDest := filepath.Join(postoverflowDirDest, customPostoverflowName) - // if path to postoverflow exist, copy it - if err := Copy(customPostOverflowPath, customPostoverflowDest); err != nil { - continue - //return fmt.Errorf("unable to copy custom parser '%s' to '%s': %s", customPostOverflowPath, customPostoverflowDest, err) + if found { + return nil } - customPostoverflowExist = true - break - } - if !customPostoverflowExist { - return fmt.Errorf("couldn't find custom postoverflow '%s' in the following location: %+v", postoverflow, t.CustomItemsLocation) } - return nil + return fmt.Errorf("couldn't find custom postoverflow '%s' in the following location: %+v", postoverflow, t.CustomItemsLocation) } func (t *HubTestItem) installPostoverflow(name string) error { diff --git a/pkg/hubtest/scenario.go b/pkg/hubtest/scenario.go index eaa831d8013..35ea465b7c0 100644 --- a/pkg/hubtest/scenario.go +++ b/pkg/hubtest/scenario.go @@ -8,74 +8,80 @@ import ( "github.com/crowdsecurity/crowdsec/pkg/cwhub" ) -func (t *HubTestItem) installScenarioItem(hubScenario *cwhub.Item) error { - scenarioSource, err := filepath.Abs(filepath.Join(t.HubPath, hubScenario.RemotePath)) +func (t *HubTestItem) installScenarioItem(item *cwhub.Item) error { + sourcePath, err := filepath.Abs(filepath.Join(t.HubPath, item.RemotePath)) if err != nil { - return fmt.Errorf("can't get absolute path to: %s", scenarioSource) + return fmt.Errorf("can't get absolute path of '%s': %w", sourcePath, err) } - scenarioFileName := filepath.Base(scenarioSource) + sourceFilename := filepath.Base(sourcePath) // runtime/hub/scenarios/crowdsecurity/ - hubDirScenarioDest := filepath.Join(t.RuntimeHubPath, filepath.Dir(hubScenario.RemotePath)) + hubDirScenarioDest := filepath.Join(t.RuntimeHubPath, filepath.Dir(item.RemotePath)) // runtime/parsers/scenarios/ - scenarioDirDest := fmt.Sprintf("%s/scenarios/", t.RuntimePath) + itemTypeDirDest := fmt.Sprintf("%s/scenarios/", t.RuntimePath) if err := os.MkdirAll(hubDirScenarioDest, os.ModePerm); err != nil { - return fmt.Errorf("unable to create folder '%s': %s", hubDirScenarioDest, err) + return fmt.Errorf("unable to create folder '%s': %w", hubDirScenarioDest, err) } - if err := os.MkdirAll(scenarioDirDest, os.ModePerm); err != nil { - return fmt.Errorf("unable to create folder '%s': %s", scenarioDirDest, err) + if err := os.MkdirAll(itemTypeDirDest, os.ModePerm); err != nil { + return fmt.Errorf("unable to create folder '%s': %w", itemTypeDirDest, err) } // runtime/hub/scenarios/crowdsecurity/ssh-bf.yaml - hubDirScenarioPath := filepath.Join(hubDirScenarioDest, scenarioFileName) - if err := Copy(scenarioSource, hubDirScenarioPath); err != nil { - return fmt.Errorf("unable to copy '%s' to '%s': %s", scenarioSource, hubDirScenarioPath, err) + hubDirScenarioPath := filepath.Join(hubDirScenarioDest, sourceFilename) + if err := Copy(sourcePath, hubDirScenarioPath); err != nil { + return fmt.Errorf("unable to copy '%s' to '%s': %w", sourcePath, hubDirScenarioPath, err) } // runtime/scenarios/ssh-bf.yaml - scenarioDirParserPath := filepath.Join(scenarioDirDest, scenarioFileName) + scenarioDirParserPath := filepath.Join(itemTypeDirDest, sourceFilename) if err := os.Symlink(hubDirScenarioPath, scenarioDirParserPath); err != nil { if !os.IsExist(err) { - return fmt.Errorf("unable to symlink scenario '%s' to '%s': %s", hubDirScenarioPath, scenarioDirParserPath, err) + return fmt.Errorf("unable to symlink scenario '%s' to '%s': %w", hubDirScenarioPath, scenarioDirParserPath, err) } } return nil } +func (t *HubTestItem) installScenarioCustomFrom(scenario string, customPath string) (bool, error) { + // we check if its a custom scenario + customScenarioPath := filepath.Join(customPath, scenario) + if _, err := os.Stat(customScenarioPath); os.IsNotExist(err) { + return false, nil + } + + itemTypeDirDest := fmt.Sprintf("%s/scenarios/", t.RuntimePath) + if err := os.MkdirAll(itemTypeDirDest, os.ModePerm); err != nil { + return false, fmt.Errorf("unable to create folder '%s': %w", itemTypeDirDest, err) + } + + scenarioFileName := filepath.Base(customScenarioPath) + + scenarioFileDest := filepath.Join(itemTypeDirDest, scenarioFileName) + if err := Copy(customScenarioPath, scenarioFileDest); err != nil { + return false, fmt.Errorf("unable to copy scenario from '%s' to '%s': %w", customScenarioPath, scenarioFileDest, err) + } + + return true, nil +} + func (t *HubTestItem) installScenarioCustom(scenario string) error { - customScenarioExist := false for _, customPath := range t.CustomItemsLocation { - // we check if its a custom scenario - customScenarioPath := filepath.Join(customPath, scenario) - if _, err := os.Stat(customScenarioPath); os.IsNotExist(err) { - continue - //return fmt.Errorf("scenarios '%s' doesn't exist in the hub and doesn't appear to be a custom one.", scenario) - } - - scenarioDirDest := fmt.Sprintf("%s/scenarios/", t.RuntimePath) - if err := os.MkdirAll(scenarioDirDest, os.ModePerm); err != nil { - return fmt.Errorf("unable to create folder '%s': %s", scenarioDirDest, err) + found, err := t.installScenarioCustomFrom(scenario, customPath) + if err != nil { + return err } - scenarioFileName := filepath.Base(customScenarioPath) - scenarioFileDest := filepath.Join(scenarioDirDest, scenarioFileName) - if err := Copy(customScenarioPath, scenarioFileDest); err != nil { - continue - //return fmt.Errorf("unable to copy scenario from '%s' to '%s': %s", customScenarioPath, scenarioFileDest, err) + if found { + return nil } - customScenarioExist = true - break - } - if !customScenarioExist { - return fmt.Errorf("couldn't find custom scenario '%s' in the following location: %+v", scenario, t.CustomItemsLocation) } - return nil + return fmt.Errorf("couldn't find custom scenario '%s' in the following location: %+v", scenario, t.CustomItemsLocation) } func (t *HubTestItem) installScenario(name string) error { From 97c441dab6c387d8aff77c2b07a256b0b0321f16 Mon Sep 17 00:00:00 2001 From: he2ss Date: Wed, 14 Feb 2024 12:26:42 +0100 Subject: [PATCH 029/581] implement highAvailability feature (#2506) * implement highAvailability feature --------- Co-authored-by: Marco Mariani --- pkg/apiserver/apic.go | 12 + pkg/apiserver/apic_metrics_test.go | 8 +- pkg/database/ent/client.go | 152 ++++++- pkg/database/ent/ent.go | 2 + pkg/database/ent/hook/hook.go | 12 + pkg/database/ent/lock.go | 117 ++++++ pkg/database/ent/lock/lock.go | 62 +++ pkg/database/ent/lock/where.go | 185 +++++++++ pkg/database/ent/lock_create.go | 215 ++++++++++ pkg/database/ent/lock_delete.go | 88 ++++ pkg/database/ent/lock_query.go | 526 ++++++++++++++++++++++++ pkg/database/ent/lock_update.go | 228 ++++++++++ pkg/database/ent/migrate/schema.go | 13 + pkg/database/ent/mutation.go | 382 +++++++++++++++++ pkg/database/ent/predicate/predicate.go | 3 + pkg/database/ent/runtime.go | 7 + pkg/database/ent/schema/lock.go | 22 + pkg/database/ent/tx.go | 3 + pkg/database/lock.go | 67 +++ 19 files changed, 2096 insertions(+), 8 deletions(-) create mode 100644 pkg/database/ent/lock.go create mode 100644 pkg/database/ent/lock/lock.go create mode 100644 pkg/database/ent/lock/where.go create mode 100644 pkg/database/ent/lock_create.go create mode 100644 pkg/database/ent/lock_delete.go create mode 100644 pkg/database/ent/lock_query.go create mode 100644 pkg/database/ent/lock_update.go create mode 100644 pkg/database/ent/schema/lock.go create mode 100644 pkg/database/lock.go diff --git a/pkg/apiserver/apic.go b/pkg/apiserver/apic.go index d0b205c254d..2fdb01144a0 100644 --- a/pkg/apiserver/apic.go +++ b/pkg/apiserver/apic.go @@ -633,6 +633,13 @@ func (a *apic) PullTop(forcePull bool) error { } } + log.Debug("Acquiring lock for pullCAPI") + err = a.dbClient.AcquirePullCAPILock() + if a.dbClient.IsLocked(err) { + log.Info("PullCAPI is already running, skipping") + return nil + } + log.Infof("Starting community-blocklist update") data, _, err := a.apiClient.Decisions.GetStreamV3(context.Background(), apiclient.DecisionsStreamOpts{Startup: a.startup}) @@ -684,6 +691,11 @@ func (a *apic) PullTop(forcePull bool) error { return fmt.Errorf("while updating blocklists: %w", err) } + log.Debug("Releasing lock for pullCAPI") + if err := a.dbClient.ReleasePullCAPILock(); err != nil { + return fmt.Errorf("while releasing lock: %w", err) + } + return nil } diff --git a/pkg/apiserver/apic_metrics_test.go b/pkg/apiserver/apic_metrics_test.go index 2bc0dd26966..529dd6c6839 100644 --- a/pkg/apiserver/apic_metrics_test.go +++ b/pkg/apiserver/apic_metrics_test.go @@ -26,15 +26,15 @@ func TestAPICSendMetrics(t *testing.T) { }{ { name: "basic", - duration: time.Millisecond * 60, - metricsInterval: time.Millisecond * 10, + duration: time.Millisecond * 120, + metricsInterval: time.Millisecond * 20, expectedCalls: 5, setUp: func(api *apic) {}, }, { name: "with some metrics", - duration: time.Millisecond * 60, - metricsInterval: time.Millisecond * 10, + duration: time.Millisecond * 120, + metricsInterval: time.Millisecond * 20, expectedCalls: 5, setUp: func(api *apic) { api.dbClient.Ent.Machine.Delete().ExecX(context.Background()) diff --git a/pkg/database/ent/client.go b/pkg/database/ent/client.go index 2761ff088b5..006d52ef9ba 100644 --- a/pkg/database/ent/client.go +++ b/pkg/database/ent/client.go @@ -20,6 +20,7 @@ import ( "github.com/crowdsecurity/crowdsec/pkg/database/ent/configitem" "github.com/crowdsecurity/crowdsec/pkg/database/ent/decision" "github.com/crowdsecurity/crowdsec/pkg/database/ent/event" + "github.com/crowdsecurity/crowdsec/pkg/database/ent/lock" "github.com/crowdsecurity/crowdsec/pkg/database/ent/machine" "github.com/crowdsecurity/crowdsec/pkg/database/ent/meta" ) @@ -39,6 +40,8 @@ type Client struct { Decision *DecisionClient // Event is the client for interacting with the Event builders. Event *EventClient + // Lock is the client for interacting with the Lock builders. + Lock *LockClient // Machine is the client for interacting with the Machine builders. Machine *MachineClient // Meta is the client for interacting with the Meta builders. @@ -61,6 +64,7 @@ func (c *Client) init() { c.ConfigItem = NewConfigItemClient(c.config) c.Decision = NewDecisionClient(c.config) c.Event = NewEventClient(c.config) + c.Lock = NewLockClient(c.config) c.Machine = NewMachineClient(c.config) c.Meta = NewMetaClient(c.config) } @@ -153,6 +157,7 @@ func (c *Client) Tx(ctx context.Context) (*Tx, error) { ConfigItem: NewConfigItemClient(cfg), Decision: NewDecisionClient(cfg), Event: NewEventClient(cfg), + Lock: NewLockClient(cfg), Machine: NewMachineClient(cfg), Meta: NewMetaClient(cfg), }, nil @@ -179,6 +184,7 @@ func (c *Client) BeginTx(ctx context.Context, opts *sql.TxOptions) (*Tx, error) ConfigItem: NewConfigItemClient(cfg), Decision: NewDecisionClient(cfg), Event: NewEventClient(cfg), + Lock: NewLockClient(cfg), Machine: NewMachineClient(cfg), Meta: NewMetaClient(cfg), }, nil @@ -210,7 +216,8 @@ func (c *Client) Close() error { // In order to add hooks to a specific client, call: `client.Node.Use(...)`. func (c *Client) Use(hooks ...Hook) { for _, n := range []interface{ Use(...Hook) }{ - c.Alert, c.Bouncer, c.ConfigItem, c.Decision, c.Event, c.Machine, c.Meta, + c.Alert, c.Bouncer, c.ConfigItem, c.Decision, c.Event, c.Lock, c.Machine, + c.Meta, } { n.Use(hooks...) } @@ -220,7 +227,8 @@ func (c *Client) Use(hooks ...Hook) { // In order to add interceptors to a specific client, call: `client.Node.Intercept(...)`. func (c *Client) Intercept(interceptors ...Interceptor) { for _, n := range []interface{ Intercept(...Interceptor) }{ - c.Alert, c.Bouncer, c.ConfigItem, c.Decision, c.Event, c.Machine, c.Meta, + c.Alert, c.Bouncer, c.ConfigItem, c.Decision, c.Event, c.Lock, c.Machine, + c.Meta, } { n.Intercept(interceptors...) } @@ -239,6 +247,8 @@ func (c *Client) Mutate(ctx context.Context, m Mutation) (Value, error) { return c.Decision.mutate(ctx, m) case *EventMutation: return c.Event.mutate(ctx, m) + case *LockMutation: + return c.Lock.mutate(ctx, m) case *MachineMutation: return c.Machine.mutate(ctx, m) case *MetaMutation: @@ -1009,6 +1019,139 @@ func (c *EventClient) mutate(ctx context.Context, m *EventMutation) (Value, erro } } +// LockClient is a client for the Lock schema. +type LockClient struct { + config +} + +// NewLockClient returns a client for the Lock from the given config. +func NewLockClient(c config) *LockClient { + return &LockClient{config: c} +} + +// Use adds a list of mutation hooks to the hooks stack. +// A call to `Use(f, g, h)` equals to `lock.Hooks(f(g(h())))`. +func (c *LockClient) Use(hooks ...Hook) { + c.hooks.Lock = append(c.hooks.Lock, hooks...) +} + +// Intercept adds a list of query interceptors to the interceptors stack. +// A call to `Intercept(f, g, h)` equals to `lock.Intercept(f(g(h())))`. +func (c *LockClient) Intercept(interceptors ...Interceptor) { + c.inters.Lock = append(c.inters.Lock, interceptors...) +} + +// Create returns a builder for creating a Lock entity. +func (c *LockClient) Create() *LockCreate { + mutation := newLockMutation(c.config, OpCreate) + return &LockCreate{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// CreateBulk returns a builder for creating a bulk of Lock entities. +func (c *LockClient) CreateBulk(builders ...*LockCreate) *LockCreateBulk { + return &LockCreateBulk{config: c.config, builders: builders} +} + +// MapCreateBulk creates a bulk creation builder from the given slice. For each item in the slice, the function creates +// a builder and applies setFunc on it. +func (c *LockClient) MapCreateBulk(slice any, setFunc func(*LockCreate, int)) *LockCreateBulk { + rv := reflect.ValueOf(slice) + if rv.Kind() != reflect.Slice { + return &LockCreateBulk{err: fmt.Errorf("calling to LockClient.MapCreateBulk with wrong type %T, need slice", slice)} + } + builders := make([]*LockCreate, rv.Len()) + for i := 0; i < rv.Len(); i++ { + builders[i] = c.Create() + setFunc(builders[i], i) + } + return &LockCreateBulk{config: c.config, builders: builders} +} + +// Update returns an update builder for Lock. +func (c *LockClient) Update() *LockUpdate { + mutation := newLockMutation(c.config, OpUpdate) + return &LockUpdate{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// UpdateOne returns an update builder for the given entity. +func (c *LockClient) UpdateOne(l *Lock) *LockUpdateOne { + mutation := newLockMutation(c.config, OpUpdateOne, withLock(l)) + return &LockUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// UpdateOneID returns an update builder for the given id. +func (c *LockClient) UpdateOneID(id int) *LockUpdateOne { + mutation := newLockMutation(c.config, OpUpdateOne, withLockID(id)) + return &LockUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// Delete returns a delete builder for Lock. +func (c *LockClient) Delete() *LockDelete { + mutation := newLockMutation(c.config, OpDelete) + return &LockDelete{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// DeleteOne returns a builder for deleting the given entity. +func (c *LockClient) DeleteOne(l *Lock) *LockDeleteOne { + return c.DeleteOneID(l.ID) +} + +// DeleteOneID returns a builder for deleting the given entity by its id. +func (c *LockClient) DeleteOneID(id int) *LockDeleteOne { + builder := c.Delete().Where(lock.ID(id)) + builder.mutation.id = &id + builder.mutation.op = OpDeleteOne + return &LockDeleteOne{builder} +} + +// Query returns a query builder for Lock. +func (c *LockClient) Query() *LockQuery { + return &LockQuery{ + config: c.config, + ctx: &QueryContext{Type: TypeLock}, + inters: c.Interceptors(), + } +} + +// Get returns a Lock entity by its id. +func (c *LockClient) Get(ctx context.Context, id int) (*Lock, error) { + return c.Query().Where(lock.ID(id)).Only(ctx) +} + +// GetX is like Get, but panics if an error occurs. +func (c *LockClient) GetX(ctx context.Context, id int) *Lock { + obj, err := c.Get(ctx, id) + if err != nil { + panic(err) + } + return obj +} + +// Hooks returns the client hooks. +func (c *LockClient) Hooks() []Hook { + return c.hooks.Lock +} + +// Interceptors returns the client interceptors. +func (c *LockClient) Interceptors() []Interceptor { + return c.inters.Lock +} + +func (c *LockClient) mutate(ctx context.Context, m *LockMutation) (Value, error) { + switch m.Op() { + case OpCreate: + return (&LockCreate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpUpdate: + return (&LockUpdate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpUpdateOne: + return (&LockUpdateOne{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpDelete, OpDeleteOne: + return (&LockDelete{config: c.config, hooks: c.Hooks(), mutation: m}).Exec(ctx) + default: + return nil, fmt.Errorf("ent: unknown Lock mutation op: %q", m.Op()) + } +} + // MachineClient is a client for the Machine schema. type MachineClient struct { config @@ -1310,9 +1453,10 @@ func (c *MetaClient) mutate(ctx context.Context, m *MetaMutation) (Value, error) // hooks and interceptors per client, for fast access. type ( hooks struct { - Alert, Bouncer, ConfigItem, Decision, Event, Machine, Meta []ent.Hook + Alert, Bouncer, ConfigItem, Decision, Event, Lock, Machine, Meta []ent.Hook } inters struct { - Alert, Bouncer, ConfigItem, Decision, Event, Machine, Meta []ent.Interceptor + Alert, Bouncer, ConfigItem, Decision, Event, Lock, Machine, + Meta []ent.Interceptor } ) diff --git a/pkg/database/ent/ent.go b/pkg/database/ent/ent.go index 393ce9f1869..cb98ee9301c 100644 --- a/pkg/database/ent/ent.go +++ b/pkg/database/ent/ent.go @@ -17,6 +17,7 @@ import ( "github.com/crowdsecurity/crowdsec/pkg/database/ent/configitem" "github.com/crowdsecurity/crowdsec/pkg/database/ent/decision" "github.com/crowdsecurity/crowdsec/pkg/database/ent/event" + "github.com/crowdsecurity/crowdsec/pkg/database/ent/lock" "github.com/crowdsecurity/crowdsec/pkg/database/ent/machine" "github.com/crowdsecurity/crowdsec/pkg/database/ent/meta" ) @@ -84,6 +85,7 @@ func checkColumn(table, column string) error { configitem.Table: configitem.ValidColumn, decision.Table: decision.ValidColumn, event.Table: event.ValidColumn, + lock.Table: lock.ValidColumn, machine.Table: machine.ValidColumn, meta.Table: meta.ValidColumn, }) diff --git a/pkg/database/ent/hook/hook.go b/pkg/database/ent/hook/hook.go index 7ec9c3ab1d8..fdc31539679 100644 --- a/pkg/database/ent/hook/hook.go +++ b/pkg/database/ent/hook/hook.go @@ -69,6 +69,18 @@ func (f EventFunc) Mutate(ctx context.Context, m ent.Mutation) (ent.Value, error return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.EventMutation", m) } +// The LockFunc type is an adapter to allow the use of ordinary +// function as Lock mutator. +type LockFunc func(context.Context, *ent.LockMutation) (ent.Value, error) + +// Mutate calls f(ctx, m). +func (f LockFunc) Mutate(ctx context.Context, m ent.Mutation) (ent.Value, error) { + if mv, ok := m.(*ent.LockMutation); ok { + return f(ctx, mv) + } + return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.LockMutation", m) +} + // The MachineFunc type is an adapter to allow the use of ordinary // function as Machine mutator. type MachineFunc func(context.Context, *ent.MachineMutation) (ent.Value, error) diff --git a/pkg/database/ent/lock.go b/pkg/database/ent/lock.go new file mode 100644 index 00000000000..85556a30644 --- /dev/null +++ b/pkg/database/ent/lock.go @@ -0,0 +1,117 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "fmt" + "strings" + "time" + + "entgo.io/ent" + "entgo.io/ent/dialect/sql" + "github.com/crowdsecurity/crowdsec/pkg/database/ent/lock" +) + +// Lock is the model entity for the Lock schema. +type Lock struct { + config `json:"-"` + // ID of the ent. + ID int `json:"id,omitempty"` + // Name holds the value of the "name" field. + Name string `json:"name"` + // CreatedAt holds the value of the "created_at" field. + CreatedAt time.Time `json:"created_at"` + selectValues sql.SelectValues +} + +// scanValues returns the types for scanning values from sql.Rows. +func (*Lock) scanValues(columns []string) ([]any, error) { + values := make([]any, len(columns)) + for i := range columns { + switch columns[i] { + case lock.FieldID: + values[i] = new(sql.NullInt64) + case lock.FieldName: + values[i] = new(sql.NullString) + case lock.FieldCreatedAt: + values[i] = new(sql.NullTime) + default: + values[i] = new(sql.UnknownType) + } + } + return values, nil +} + +// assignValues assigns the values that were returned from sql.Rows (after scanning) +// to the Lock fields. +func (l *Lock) assignValues(columns []string, values []any) error { + if m, n := len(values), len(columns); m < n { + return fmt.Errorf("mismatch number of scan values: %d != %d", m, n) + } + for i := range columns { + switch columns[i] { + case lock.FieldID: + value, ok := values[i].(*sql.NullInt64) + if !ok { + return fmt.Errorf("unexpected type %T for field id", value) + } + l.ID = int(value.Int64) + case lock.FieldName: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field name", values[i]) + } else if value.Valid { + l.Name = value.String + } + case lock.FieldCreatedAt: + if value, ok := values[i].(*sql.NullTime); !ok { + return fmt.Errorf("unexpected type %T for field created_at", values[i]) + } else if value.Valid { + l.CreatedAt = value.Time + } + default: + l.selectValues.Set(columns[i], values[i]) + } + } + return nil +} + +// Value returns the ent.Value that was dynamically selected and assigned to the Lock. +// This includes values selected through modifiers, order, etc. +func (l *Lock) Value(name string) (ent.Value, error) { + return l.selectValues.Get(name) +} + +// Update returns a builder for updating this Lock. +// Note that you need to call Lock.Unwrap() before calling this method if this Lock +// was returned from a transaction, and the transaction was committed or rolled back. +func (l *Lock) Update() *LockUpdateOne { + return NewLockClient(l.config).UpdateOne(l) +} + +// Unwrap unwraps the Lock entity that was returned from a transaction after it was closed, +// so that all future queries will be executed through the driver which created the transaction. +func (l *Lock) Unwrap() *Lock { + _tx, ok := l.config.driver.(*txDriver) + if !ok { + panic("ent: Lock is not a transactional entity") + } + l.config.driver = _tx.drv + return l +} + +// String implements the fmt.Stringer. +func (l *Lock) String() string { + var builder strings.Builder + builder.WriteString("Lock(") + builder.WriteString(fmt.Sprintf("id=%v, ", l.ID)) + builder.WriteString("name=") + builder.WriteString(l.Name) + builder.WriteString(", ") + builder.WriteString("created_at=") + builder.WriteString(l.CreatedAt.Format(time.ANSIC)) + builder.WriteByte(')') + return builder.String() +} + +// Locks is a parsable slice of Lock. +type Locks []*Lock diff --git a/pkg/database/ent/lock/lock.go b/pkg/database/ent/lock/lock.go new file mode 100644 index 00000000000..d0143470a75 --- /dev/null +++ b/pkg/database/ent/lock/lock.go @@ -0,0 +1,62 @@ +// Code generated by ent, DO NOT EDIT. + +package lock + +import ( + "time" + + "entgo.io/ent/dialect/sql" +) + +const ( + // Label holds the string label denoting the lock type in the database. + Label = "lock" + // FieldID holds the string denoting the id field in the database. + FieldID = "id" + // FieldName holds the string denoting the name field in the database. + FieldName = "name" + // FieldCreatedAt holds the string denoting the created_at field in the database. + FieldCreatedAt = "created_at" + // Table holds the table name of the lock in the database. + Table = "locks" +) + +// Columns holds all SQL columns for lock fields. +var Columns = []string{ + FieldID, + FieldName, + FieldCreatedAt, +} + +// ValidColumn reports if the column name is valid (part of the table columns). +func ValidColumn(column string) bool { + for i := range Columns { + if column == Columns[i] { + return true + } + } + return false +} + +var ( + // DefaultCreatedAt holds the default value on creation for the "created_at" field. + DefaultCreatedAt func() time.Time +) + +// OrderOption defines the ordering options for the Lock queries. +type OrderOption func(*sql.Selector) + +// ByID orders the results by the id field. +func ByID(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldID, opts...).ToFunc() +} + +// ByName orders the results by the name field. +func ByName(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldName, opts...).ToFunc() +} + +// ByCreatedAt orders the results by the created_at field. +func ByCreatedAt(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldCreatedAt, opts...).ToFunc() +} diff --git a/pkg/database/ent/lock/where.go b/pkg/database/ent/lock/where.go new file mode 100644 index 00000000000..cf59362d203 --- /dev/null +++ b/pkg/database/ent/lock/where.go @@ -0,0 +1,185 @@ +// Code generated by ent, DO NOT EDIT. + +package lock + +import ( + "time" + + "entgo.io/ent/dialect/sql" + "github.com/crowdsecurity/crowdsec/pkg/database/ent/predicate" +) + +// ID filters vertices based on their ID field. +func ID(id int) predicate.Lock { + return predicate.Lock(sql.FieldEQ(FieldID, id)) +} + +// IDEQ applies the EQ predicate on the ID field. +func IDEQ(id int) predicate.Lock { + return predicate.Lock(sql.FieldEQ(FieldID, id)) +} + +// IDNEQ applies the NEQ predicate on the ID field. +func IDNEQ(id int) predicate.Lock { + return predicate.Lock(sql.FieldNEQ(FieldID, id)) +} + +// IDIn applies the In predicate on the ID field. +func IDIn(ids ...int) predicate.Lock { + return predicate.Lock(sql.FieldIn(FieldID, ids...)) +} + +// IDNotIn applies the NotIn predicate on the ID field. +func IDNotIn(ids ...int) predicate.Lock { + return predicate.Lock(sql.FieldNotIn(FieldID, ids...)) +} + +// IDGT applies the GT predicate on the ID field. +func IDGT(id int) predicate.Lock { + return predicate.Lock(sql.FieldGT(FieldID, id)) +} + +// IDGTE applies the GTE predicate on the ID field. +func IDGTE(id int) predicate.Lock { + return predicate.Lock(sql.FieldGTE(FieldID, id)) +} + +// IDLT applies the LT predicate on the ID field. +func IDLT(id int) predicate.Lock { + return predicate.Lock(sql.FieldLT(FieldID, id)) +} + +// IDLTE applies the LTE predicate on the ID field. +func IDLTE(id int) predicate.Lock { + return predicate.Lock(sql.FieldLTE(FieldID, id)) +} + +// Name applies equality check predicate on the "name" field. It's identical to NameEQ. +func Name(v string) predicate.Lock { + return predicate.Lock(sql.FieldEQ(FieldName, v)) +} + +// CreatedAt applies equality check predicate on the "created_at" field. It's identical to CreatedAtEQ. +func CreatedAt(v time.Time) predicate.Lock { + return predicate.Lock(sql.FieldEQ(FieldCreatedAt, v)) +} + +// NameEQ applies the EQ predicate on the "name" field. +func NameEQ(v string) predicate.Lock { + return predicate.Lock(sql.FieldEQ(FieldName, v)) +} + +// NameNEQ applies the NEQ predicate on the "name" field. +func NameNEQ(v string) predicate.Lock { + return predicate.Lock(sql.FieldNEQ(FieldName, v)) +} + +// NameIn applies the In predicate on the "name" field. +func NameIn(vs ...string) predicate.Lock { + return predicate.Lock(sql.FieldIn(FieldName, vs...)) +} + +// NameNotIn applies the NotIn predicate on the "name" field. +func NameNotIn(vs ...string) predicate.Lock { + return predicate.Lock(sql.FieldNotIn(FieldName, vs...)) +} + +// NameGT applies the GT predicate on the "name" field. +func NameGT(v string) predicate.Lock { + return predicate.Lock(sql.FieldGT(FieldName, v)) +} + +// NameGTE applies the GTE predicate on the "name" field. +func NameGTE(v string) predicate.Lock { + return predicate.Lock(sql.FieldGTE(FieldName, v)) +} + +// NameLT applies the LT predicate on the "name" field. +func NameLT(v string) predicate.Lock { + return predicate.Lock(sql.FieldLT(FieldName, v)) +} + +// NameLTE applies the LTE predicate on the "name" field. +func NameLTE(v string) predicate.Lock { + return predicate.Lock(sql.FieldLTE(FieldName, v)) +} + +// NameContains applies the Contains predicate on the "name" field. +func NameContains(v string) predicate.Lock { + return predicate.Lock(sql.FieldContains(FieldName, v)) +} + +// NameHasPrefix applies the HasPrefix predicate on the "name" field. +func NameHasPrefix(v string) predicate.Lock { + return predicate.Lock(sql.FieldHasPrefix(FieldName, v)) +} + +// NameHasSuffix applies the HasSuffix predicate on the "name" field. +func NameHasSuffix(v string) predicate.Lock { + return predicate.Lock(sql.FieldHasSuffix(FieldName, v)) +} + +// NameEqualFold applies the EqualFold predicate on the "name" field. +func NameEqualFold(v string) predicate.Lock { + return predicate.Lock(sql.FieldEqualFold(FieldName, v)) +} + +// NameContainsFold applies the ContainsFold predicate on the "name" field. +func NameContainsFold(v string) predicate.Lock { + return predicate.Lock(sql.FieldContainsFold(FieldName, v)) +} + +// CreatedAtEQ applies the EQ predicate on the "created_at" field. +func CreatedAtEQ(v time.Time) predicate.Lock { + return predicate.Lock(sql.FieldEQ(FieldCreatedAt, v)) +} + +// CreatedAtNEQ applies the NEQ predicate on the "created_at" field. +func CreatedAtNEQ(v time.Time) predicate.Lock { + return predicate.Lock(sql.FieldNEQ(FieldCreatedAt, v)) +} + +// CreatedAtIn applies the In predicate on the "created_at" field. +func CreatedAtIn(vs ...time.Time) predicate.Lock { + return predicate.Lock(sql.FieldIn(FieldCreatedAt, vs...)) +} + +// CreatedAtNotIn applies the NotIn predicate on the "created_at" field. +func CreatedAtNotIn(vs ...time.Time) predicate.Lock { + return predicate.Lock(sql.FieldNotIn(FieldCreatedAt, vs...)) +} + +// CreatedAtGT applies the GT predicate on the "created_at" field. +func CreatedAtGT(v time.Time) predicate.Lock { + return predicate.Lock(sql.FieldGT(FieldCreatedAt, v)) +} + +// CreatedAtGTE applies the GTE predicate on the "created_at" field. +func CreatedAtGTE(v time.Time) predicate.Lock { + return predicate.Lock(sql.FieldGTE(FieldCreatedAt, v)) +} + +// CreatedAtLT applies the LT predicate on the "created_at" field. +func CreatedAtLT(v time.Time) predicate.Lock { + return predicate.Lock(sql.FieldLT(FieldCreatedAt, v)) +} + +// CreatedAtLTE applies the LTE predicate on the "created_at" field. +func CreatedAtLTE(v time.Time) predicate.Lock { + return predicate.Lock(sql.FieldLTE(FieldCreatedAt, v)) +} + +// And groups predicates with the AND operator between them. +func And(predicates ...predicate.Lock) predicate.Lock { + return predicate.Lock(sql.AndPredicates(predicates...)) +} + +// Or groups predicates with the OR operator between them. +func Or(predicates ...predicate.Lock) predicate.Lock { + return predicate.Lock(sql.OrPredicates(predicates...)) +} + +// Not applies the not operator on the given predicate. +func Not(p predicate.Lock) predicate.Lock { + return predicate.Lock(sql.NotPredicates(p)) +} diff --git a/pkg/database/ent/lock_create.go b/pkg/database/ent/lock_create.go new file mode 100644 index 00000000000..e2c29c88324 --- /dev/null +++ b/pkg/database/ent/lock_create.go @@ -0,0 +1,215 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "errors" + "fmt" + "time" + + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/crowdsecurity/crowdsec/pkg/database/ent/lock" +) + +// LockCreate is the builder for creating a Lock entity. +type LockCreate struct { + config + mutation *LockMutation + hooks []Hook +} + +// SetName sets the "name" field. +func (lc *LockCreate) SetName(s string) *LockCreate { + lc.mutation.SetName(s) + return lc +} + +// SetCreatedAt sets the "created_at" field. +func (lc *LockCreate) SetCreatedAt(t time.Time) *LockCreate { + lc.mutation.SetCreatedAt(t) + return lc +} + +// SetNillableCreatedAt sets the "created_at" field if the given value is not nil. +func (lc *LockCreate) SetNillableCreatedAt(t *time.Time) *LockCreate { + if t != nil { + lc.SetCreatedAt(*t) + } + return lc +} + +// Mutation returns the LockMutation object of the builder. +func (lc *LockCreate) Mutation() *LockMutation { + return lc.mutation +} + +// Save creates the Lock in the database. +func (lc *LockCreate) Save(ctx context.Context) (*Lock, error) { + lc.defaults() + return withHooks(ctx, lc.sqlSave, lc.mutation, lc.hooks) +} + +// SaveX calls Save and panics if Save returns an error. +func (lc *LockCreate) SaveX(ctx context.Context) *Lock { + v, err := lc.Save(ctx) + if err != nil { + panic(err) + } + return v +} + +// Exec executes the query. +func (lc *LockCreate) Exec(ctx context.Context) error { + _, err := lc.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (lc *LockCreate) ExecX(ctx context.Context) { + if err := lc.Exec(ctx); err != nil { + panic(err) + } +} + +// defaults sets the default values of the builder before save. +func (lc *LockCreate) defaults() { + if _, ok := lc.mutation.CreatedAt(); !ok { + v := lock.DefaultCreatedAt() + lc.mutation.SetCreatedAt(v) + } +} + +// check runs all checks and user-defined validators on the builder. +func (lc *LockCreate) check() error { + if _, ok := lc.mutation.Name(); !ok { + return &ValidationError{Name: "name", err: errors.New(`ent: missing required field "Lock.name"`)} + } + if _, ok := lc.mutation.CreatedAt(); !ok { + return &ValidationError{Name: "created_at", err: errors.New(`ent: missing required field "Lock.created_at"`)} + } + return nil +} + +func (lc *LockCreate) sqlSave(ctx context.Context) (*Lock, error) { + if err := lc.check(); err != nil { + return nil, err + } + _node, _spec := lc.createSpec() + if err := sqlgraph.CreateNode(ctx, lc.driver, _spec); err != nil { + if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return nil, err + } + id := _spec.ID.Value.(int64) + _node.ID = int(id) + lc.mutation.id = &_node.ID + lc.mutation.done = true + return _node, nil +} + +func (lc *LockCreate) createSpec() (*Lock, *sqlgraph.CreateSpec) { + var ( + _node = &Lock{config: lc.config} + _spec = sqlgraph.NewCreateSpec(lock.Table, sqlgraph.NewFieldSpec(lock.FieldID, field.TypeInt)) + ) + if value, ok := lc.mutation.Name(); ok { + _spec.SetField(lock.FieldName, field.TypeString, value) + _node.Name = value + } + if value, ok := lc.mutation.CreatedAt(); ok { + _spec.SetField(lock.FieldCreatedAt, field.TypeTime, value) + _node.CreatedAt = value + } + return _node, _spec +} + +// LockCreateBulk is the builder for creating many Lock entities in bulk. +type LockCreateBulk struct { + config + err error + builders []*LockCreate +} + +// Save creates the Lock entities in the database. +func (lcb *LockCreateBulk) Save(ctx context.Context) ([]*Lock, error) { + if lcb.err != nil { + return nil, lcb.err + } + specs := make([]*sqlgraph.CreateSpec, len(lcb.builders)) + nodes := make([]*Lock, len(lcb.builders)) + mutators := make([]Mutator, len(lcb.builders)) + for i := range lcb.builders { + func(i int, root context.Context) { + builder := lcb.builders[i] + builder.defaults() + var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { + mutation, ok := m.(*LockMutation) + if !ok { + return nil, fmt.Errorf("unexpected mutation type %T", m) + } + if err := builder.check(); err != nil { + return nil, err + } + builder.mutation = mutation + var err error + nodes[i], specs[i] = builder.createSpec() + if i < len(mutators)-1 { + _, err = mutators[i+1].Mutate(root, lcb.builders[i+1].mutation) + } else { + spec := &sqlgraph.BatchCreateSpec{Nodes: specs} + // Invoke the actual operation on the latest mutation in the chain. + if err = sqlgraph.BatchCreate(ctx, lcb.driver, spec); err != nil { + if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + } + } + if err != nil { + return nil, err + } + mutation.id = &nodes[i].ID + if specs[i].ID.Value != nil { + id := specs[i].ID.Value.(int64) + nodes[i].ID = int(id) + } + mutation.done = true + return nodes[i], nil + }) + for i := len(builder.hooks) - 1; i >= 0; i-- { + mut = builder.hooks[i](mut) + } + mutators[i] = mut + }(i, ctx) + } + if len(mutators) > 0 { + if _, err := mutators[0].Mutate(ctx, lcb.builders[0].mutation); err != nil { + return nil, err + } + } + return nodes, nil +} + +// SaveX is like Save, but panics if an error occurs. +func (lcb *LockCreateBulk) SaveX(ctx context.Context) []*Lock { + v, err := lcb.Save(ctx) + if err != nil { + panic(err) + } + return v +} + +// Exec executes the query. +func (lcb *LockCreateBulk) Exec(ctx context.Context) error { + _, err := lcb.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (lcb *LockCreateBulk) ExecX(ctx context.Context) { + if err := lcb.Exec(ctx); err != nil { + panic(err) + } +} diff --git a/pkg/database/ent/lock_delete.go b/pkg/database/ent/lock_delete.go new file mode 100644 index 00000000000..2275c608f75 --- /dev/null +++ b/pkg/database/ent/lock_delete.go @@ -0,0 +1,88 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/crowdsecurity/crowdsec/pkg/database/ent/lock" + "github.com/crowdsecurity/crowdsec/pkg/database/ent/predicate" +) + +// LockDelete is the builder for deleting a Lock entity. +type LockDelete struct { + config + hooks []Hook + mutation *LockMutation +} + +// Where appends a list predicates to the LockDelete builder. +func (ld *LockDelete) Where(ps ...predicate.Lock) *LockDelete { + ld.mutation.Where(ps...) + return ld +} + +// Exec executes the deletion query and returns how many vertices were deleted. +func (ld *LockDelete) Exec(ctx context.Context) (int, error) { + return withHooks(ctx, ld.sqlExec, ld.mutation, ld.hooks) +} + +// ExecX is like Exec, but panics if an error occurs. +func (ld *LockDelete) ExecX(ctx context.Context) int { + n, err := ld.Exec(ctx) + if err != nil { + panic(err) + } + return n +} + +func (ld *LockDelete) sqlExec(ctx context.Context) (int, error) { + _spec := sqlgraph.NewDeleteSpec(lock.Table, sqlgraph.NewFieldSpec(lock.FieldID, field.TypeInt)) + if ps := ld.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + affected, err := sqlgraph.DeleteNodes(ctx, ld.driver, _spec) + if err != nil && sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + ld.mutation.done = true + return affected, err +} + +// LockDeleteOne is the builder for deleting a single Lock entity. +type LockDeleteOne struct { + ld *LockDelete +} + +// Where appends a list predicates to the LockDelete builder. +func (ldo *LockDeleteOne) Where(ps ...predicate.Lock) *LockDeleteOne { + ldo.ld.mutation.Where(ps...) + return ldo +} + +// Exec executes the deletion query. +func (ldo *LockDeleteOne) Exec(ctx context.Context) error { + n, err := ldo.ld.Exec(ctx) + switch { + case err != nil: + return err + case n == 0: + return &NotFoundError{lock.Label} + default: + return nil + } +} + +// ExecX is like Exec, but panics if an error occurs. +func (ldo *LockDeleteOne) ExecX(ctx context.Context) { + if err := ldo.Exec(ctx); err != nil { + panic(err) + } +} diff --git a/pkg/database/ent/lock_query.go b/pkg/database/ent/lock_query.go new file mode 100644 index 00000000000..75e5da48a94 --- /dev/null +++ b/pkg/database/ent/lock_query.go @@ -0,0 +1,526 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "fmt" + "math" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/crowdsecurity/crowdsec/pkg/database/ent/lock" + "github.com/crowdsecurity/crowdsec/pkg/database/ent/predicate" +) + +// LockQuery is the builder for querying Lock entities. +type LockQuery struct { + config + ctx *QueryContext + order []lock.OrderOption + inters []Interceptor + predicates []predicate.Lock + // intermediate query (i.e. traversal path). + sql *sql.Selector + path func(context.Context) (*sql.Selector, error) +} + +// Where adds a new predicate for the LockQuery builder. +func (lq *LockQuery) Where(ps ...predicate.Lock) *LockQuery { + lq.predicates = append(lq.predicates, ps...) + return lq +} + +// Limit the number of records to be returned by this query. +func (lq *LockQuery) Limit(limit int) *LockQuery { + lq.ctx.Limit = &limit + return lq +} + +// Offset to start from. +func (lq *LockQuery) Offset(offset int) *LockQuery { + lq.ctx.Offset = &offset + return lq +} + +// Unique configures the query builder to filter duplicate records on query. +// By default, unique is set to true, and can be disabled using this method. +func (lq *LockQuery) Unique(unique bool) *LockQuery { + lq.ctx.Unique = &unique + return lq +} + +// Order specifies how the records should be ordered. +func (lq *LockQuery) Order(o ...lock.OrderOption) *LockQuery { + lq.order = append(lq.order, o...) + return lq +} + +// First returns the first Lock entity from the query. +// Returns a *NotFoundError when no Lock was found. +func (lq *LockQuery) First(ctx context.Context) (*Lock, error) { + nodes, err := lq.Limit(1).All(setContextOp(ctx, lq.ctx, "First")) + if err != nil { + return nil, err + } + if len(nodes) == 0 { + return nil, &NotFoundError{lock.Label} + } + return nodes[0], nil +} + +// FirstX is like First, but panics if an error occurs. +func (lq *LockQuery) FirstX(ctx context.Context) *Lock { + node, err := lq.First(ctx) + if err != nil && !IsNotFound(err) { + panic(err) + } + return node +} + +// FirstID returns the first Lock ID from the query. +// Returns a *NotFoundError when no Lock ID was found. +func (lq *LockQuery) FirstID(ctx context.Context) (id int, err error) { + var ids []int + if ids, err = lq.Limit(1).IDs(setContextOp(ctx, lq.ctx, "FirstID")); err != nil { + return + } + if len(ids) == 0 { + err = &NotFoundError{lock.Label} + return + } + return ids[0], nil +} + +// FirstIDX is like FirstID, but panics if an error occurs. +func (lq *LockQuery) FirstIDX(ctx context.Context) int { + id, err := lq.FirstID(ctx) + if err != nil && !IsNotFound(err) { + panic(err) + } + return id +} + +// Only returns a single Lock entity found by the query, ensuring it only returns one. +// Returns a *NotSingularError when more than one Lock entity is found. +// Returns a *NotFoundError when no Lock entities are found. +func (lq *LockQuery) Only(ctx context.Context) (*Lock, error) { + nodes, err := lq.Limit(2).All(setContextOp(ctx, lq.ctx, "Only")) + if err != nil { + return nil, err + } + switch len(nodes) { + case 1: + return nodes[0], nil + case 0: + return nil, &NotFoundError{lock.Label} + default: + return nil, &NotSingularError{lock.Label} + } +} + +// OnlyX is like Only, but panics if an error occurs. +func (lq *LockQuery) OnlyX(ctx context.Context) *Lock { + node, err := lq.Only(ctx) + if err != nil { + panic(err) + } + return node +} + +// OnlyID is like Only, but returns the only Lock ID in the query. +// Returns a *NotSingularError when more than one Lock ID is found. +// Returns a *NotFoundError when no entities are found. +func (lq *LockQuery) OnlyID(ctx context.Context) (id int, err error) { + var ids []int + if ids, err = lq.Limit(2).IDs(setContextOp(ctx, lq.ctx, "OnlyID")); err != nil { + return + } + switch len(ids) { + case 1: + id = ids[0] + case 0: + err = &NotFoundError{lock.Label} + default: + err = &NotSingularError{lock.Label} + } + return +} + +// OnlyIDX is like OnlyID, but panics if an error occurs. +func (lq *LockQuery) OnlyIDX(ctx context.Context) int { + id, err := lq.OnlyID(ctx) + if err != nil { + panic(err) + } + return id +} + +// All executes the query and returns a list of Locks. +func (lq *LockQuery) All(ctx context.Context) ([]*Lock, error) { + ctx = setContextOp(ctx, lq.ctx, "All") + if err := lq.prepareQuery(ctx); err != nil { + return nil, err + } + qr := querierAll[[]*Lock, *LockQuery]() + return withInterceptors[[]*Lock](ctx, lq, qr, lq.inters) +} + +// AllX is like All, but panics if an error occurs. +func (lq *LockQuery) AllX(ctx context.Context) []*Lock { + nodes, err := lq.All(ctx) + if err != nil { + panic(err) + } + return nodes +} + +// IDs executes the query and returns a list of Lock IDs. +func (lq *LockQuery) IDs(ctx context.Context) (ids []int, err error) { + if lq.ctx.Unique == nil && lq.path != nil { + lq.Unique(true) + } + ctx = setContextOp(ctx, lq.ctx, "IDs") + if err = lq.Select(lock.FieldID).Scan(ctx, &ids); err != nil { + return nil, err + } + return ids, nil +} + +// IDsX is like IDs, but panics if an error occurs. +func (lq *LockQuery) IDsX(ctx context.Context) []int { + ids, err := lq.IDs(ctx) + if err != nil { + panic(err) + } + return ids +} + +// Count returns the count of the given query. +func (lq *LockQuery) Count(ctx context.Context) (int, error) { + ctx = setContextOp(ctx, lq.ctx, "Count") + if err := lq.prepareQuery(ctx); err != nil { + return 0, err + } + return withInterceptors[int](ctx, lq, querierCount[*LockQuery](), lq.inters) +} + +// CountX is like Count, but panics if an error occurs. +func (lq *LockQuery) CountX(ctx context.Context) int { + count, err := lq.Count(ctx) + if err != nil { + panic(err) + } + return count +} + +// Exist returns true if the query has elements in the graph. +func (lq *LockQuery) Exist(ctx context.Context) (bool, error) { + ctx = setContextOp(ctx, lq.ctx, "Exist") + switch _, err := lq.FirstID(ctx); { + case IsNotFound(err): + return false, nil + case err != nil: + return false, fmt.Errorf("ent: check existence: %w", err) + default: + return true, nil + } +} + +// ExistX is like Exist, but panics if an error occurs. +func (lq *LockQuery) ExistX(ctx context.Context) bool { + exist, err := lq.Exist(ctx) + if err != nil { + panic(err) + } + return exist +} + +// Clone returns a duplicate of the LockQuery builder, including all associated steps. It can be +// used to prepare common query builders and use them differently after the clone is made. +func (lq *LockQuery) Clone() *LockQuery { + if lq == nil { + return nil + } + return &LockQuery{ + config: lq.config, + ctx: lq.ctx.Clone(), + order: append([]lock.OrderOption{}, lq.order...), + inters: append([]Interceptor{}, lq.inters...), + predicates: append([]predicate.Lock{}, lq.predicates...), + // clone intermediate query. + sql: lq.sql.Clone(), + path: lq.path, + } +} + +// GroupBy is used to group vertices by one or more fields/columns. +// It is often used with aggregate functions, like: count, max, mean, min, sum. +// +// Example: +// +// var v []struct { +// Name string `json:"name"` +// Count int `json:"count,omitempty"` +// } +// +// client.Lock.Query(). +// GroupBy(lock.FieldName). +// Aggregate(ent.Count()). +// Scan(ctx, &v) +func (lq *LockQuery) GroupBy(field string, fields ...string) *LockGroupBy { + lq.ctx.Fields = append([]string{field}, fields...) + grbuild := &LockGroupBy{build: lq} + grbuild.flds = &lq.ctx.Fields + grbuild.label = lock.Label + grbuild.scan = grbuild.Scan + return grbuild +} + +// Select allows the selection one or more fields/columns for the given query, +// instead of selecting all fields in the entity. +// +// Example: +// +// var v []struct { +// Name string `json:"name"` +// } +// +// client.Lock.Query(). +// Select(lock.FieldName). +// Scan(ctx, &v) +func (lq *LockQuery) Select(fields ...string) *LockSelect { + lq.ctx.Fields = append(lq.ctx.Fields, fields...) + sbuild := &LockSelect{LockQuery: lq} + sbuild.label = lock.Label + sbuild.flds, sbuild.scan = &lq.ctx.Fields, sbuild.Scan + return sbuild +} + +// Aggregate returns a LockSelect configured with the given aggregations. +func (lq *LockQuery) Aggregate(fns ...AggregateFunc) *LockSelect { + return lq.Select().Aggregate(fns...) +} + +func (lq *LockQuery) prepareQuery(ctx context.Context) error { + for _, inter := range lq.inters { + if inter == nil { + return fmt.Errorf("ent: uninitialized interceptor (forgotten import ent/runtime?)") + } + if trv, ok := inter.(Traverser); ok { + if err := trv.Traverse(ctx, lq); err != nil { + return err + } + } + } + for _, f := range lq.ctx.Fields { + if !lock.ValidColumn(f) { + return &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)} + } + } + if lq.path != nil { + prev, err := lq.path(ctx) + if err != nil { + return err + } + lq.sql = prev + } + return nil +} + +func (lq *LockQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*Lock, error) { + var ( + nodes = []*Lock{} + _spec = lq.querySpec() + ) + _spec.ScanValues = func(columns []string) ([]any, error) { + return (*Lock).scanValues(nil, columns) + } + _spec.Assign = func(columns []string, values []any) error { + node := &Lock{config: lq.config} + nodes = append(nodes, node) + return node.assignValues(columns, values) + } + for i := range hooks { + hooks[i](ctx, _spec) + } + if err := sqlgraph.QueryNodes(ctx, lq.driver, _spec); err != nil { + return nil, err + } + if len(nodes) == 0 { + return nodes, nil + } + return nodes, nil +} + +func (lq *LockQuery) sqlCount(ctx context.Context) (int, error) { + _spec := lq.querySpec() + _spec.Node.Columns = lq.ctx.Fields + if len(lq.ctx.Fields) > 0 { + _spec.Unique = lq.ctx.Unique != nil && *lq.ctx.Unique + } + return sqlgraph.CountNodes(ctx, lq.driver, _spec) +} + +func (lq *LockQuery) querySpec() *sqlgraph.QuerySpec { + _spec := sqlgraph.NewQuerySpec(lock.Table, lock.Columns, sqlgraph.NewFieldSpec(lock.FieldID, field.TypeInt)) + _spec.From = lq.sql + if unique := lq.ctx.Unique; unique != nil { + _spec.Unique = *unique + } else if lq.path != nil { + _spec.Unique = true + } + if fields := lq.ctx.Fields; len(fields) > 0 { + _spec.Node.Columns = make([]string, 0, len(fields)) + _spec.Node.Columns = append(_spec.Node.Columns, lock.FieldID) + for i := range fields { + if fields[i] != lock.FieldID { + _spec.Node.Columns = append(_spec.Node.Columns, fields[i]) + } + } + } + if ps := lq.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if limit := lq.ctx.Limit; limit != nil { + _spec.Limit = *limit + } + if offset := lq.ctx.Offset; offset != nil { + _spec.Offset = *offset + } + if ps := lq.order; len(ps) > 0 { + _spec.Order = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + return _spec +} + +func (lq *LockQuery) sqlQuery(ctx context.Context) *sql.Selector { + builder := sql.Dialect(lq.driver.Dialect()) + t1 := builder.Table(lock.Table) + columns := lq.ctx.Fields + if len(columns) == 0 { + columns = lock.Columns + } + selector := builder.Select(t1.Columns(columns...)...).From(t1) + if lq.sql != nil { + selector = lq.sql + selector.Select(selector.Columns(columns...)...) + } + if lq.ctx.Unique != nil && *lq.ctx.Unique { + selector.Distinct() + } + for _, p := range lq.predicates { + p(selector) + } + for _, p := range lq.order { + p(selector) + } + if offset := lq.ctx.Offset; offset != nil { + // limit is mandatory for offset clause. We start + // with default value, and override it below if needed. + selector.Offset(*offset).Limit(math.MaxInt32) + } + if limit := lq.ctx.Limit; limit != nil { + selector.Limit(*limit) + } + return selector +} + +// LockGroupBy is the group-by builder for Lock entities. +type LockGroupBy struct { + selector + build *LockQuery +} + +// Aggregate adds the given aggregation functions to the group-by query. +func (lgb *LockGroupBy) Aggregate(fns ...AggregateFunc) *LockGroupBy { + lgb.fns = append(lgb.fns, fns...) + return lgb +} + +// Scan applies the selector query and scans the result into the given value. +func (lgb *LockGroupBy) Scan(ctx context.Context, v any) error { + ctx = setContextOp(ctx, lgb.build.ctx, "GroupBy") + if err := lgb.build.prepareQuery(ctx); err != nil { + return err + } + return scanWithInterceptors[*LockQuery, *LockGroupBy](ctx, lgb.build, lgb, lgb.build.inters, v) +} + +func (lgb *LockGroupBy) sqlScan(ctx context.Context, root *LockQuery, v any) error { + selector := root.sqlQuery(ctx).Select() + aggregation := make([]string, 0, len(lgb.fns)) + for _, fn := range lgb.fns { + aggregation = append(aggregation, fn(selector)) + } + if len(selector.SelectedColumns()) == 0 { + columns := make([]string, 0, len(*lgb.flds)+len(lgb.fns)) + for _, f := range *lgb.flds { + columns = append(columns, selector.C(f)) + } + columns = append(columns, aggregation...) + selector.Select(columns...) + } + selector.GroupBy(selector.Columns(*lgb.flds...)...) + if err := selector.Err(); err != nil { + return err + } + rows := &sql.Rows{} + query, args := selector.Query() + if err := lgb.build.driver.Query(ctx, query, args, rows); err != nil { + return err + } + defer rows.Close() + return sql.ScanSlice(rows, v) +} + +// LockSelect is the builder for selecting fields of Lock entities. +type LockSelect struct { + *LockQuery + selector +} + +// Aggregate adds the given aggregation functions to the selector query. +func (ls *LockSelect) Aggregate(fns ...AggregateFunc) *LockSelect { + ls.fns = append(ls.fns, fns...) + return ls +} + +// Scan applies the selector query and scans the result into the given value. +func (ls *LockSelect) Scan(ctx context.Context, v any) error { + ctx = setContextOp(ctx, ls.ctx, "Select") + if err := ls.prepareQuery(ctx); err != nil { + return err + } + return scanWithInterceptors[*LockQuery, *LockSelect](ctx, ls.LockQuery, ls, ls.inters, v) +} + +func (ls *LockSelect) sqlScan(ctx context.Context, root *LockQuery, v any) error { + selector := root.sqlQuery(ctx) + aggregation := make([]string, 0, len(ls.fns)) + for _, fn := range ls.fns { + aggregation = append(aggregation, fn(selector)) + } + switch n := len(*ls.selector.flds); { + case n == 0 && len(aggregation) > 0: + selector.Select(aggregation...) + case n != 0 && len(aggregation) > 0: + selector.AppendSelect(aggregation...) + } + rows := &sql.Rows{} + query, args := selector.Query() + if err := ls.driver.Query(ctx, query, args, rows); err != nil { + return err + } + defer rows.Close() + return sql.ScanSlice(rows, v) +} diff --git a/pkg/database/ent/lock_update.go b/pkg/database/ent/lock_update.go new file mode 100644 index 00000000000..f4deda6e3a8 --- /dev/null +++ b/pkg/database/ent/lock_update.go @@ -0,0 +1,228 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "errors" + "fmt" + "time" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/crowdsecurity/crowdsec/pkg/database/ent/lock" + "github.com/crowdsecurity/crowdsec/pkg/database/ent/predicate" +) + +// LockUpdate is the builder for updating Lock entities. +type LockUpdate struct { + config + hooks []Hook + mutation *LockMutation +} + +// Where appends a list predicates to the LockUpdate builder. +func (lu *LockUpdate) Where(ps ...predicate.Lock) *LockUpdate { + lu.mutation.Where(ps...) + return lu +} + +// SetName sets the "name" field. +func (lu *LockUpdate) SetName(s string) *LockUpdate { + lu.mutation.SetName(s) + return lu +} + +// SetCreatedAt sets the "created_at" field. +func (lu *LockUpdate) SetCreatedAt(t time.Time) *LockUpdate { + lu.mutation.SetCreatedAt(t) + return lu +} + +// SetNillableCreatedAt sets the "created_at" field if the given value is not nil. +func (lu *LockUpdate) SetNillableCreatedAt(t *time.Time) *LockUpdate { + if t != nil { + lu.SetCreatedAt(*t) + } + return lu +} + +// Mutation returns the LockMutation object of the builder. +func (lu *LockUpdate) Mutation() *LockMutation { + return lu.mutation +} + +// Save executes the query and returns the number of nodes affected by the update operation. +func (lu *LockUpdate) Save(ctx context.Context) (int, error) { + return withHooks(ctx, lu.sqlSave, lu.mutation, lu.hooks) +} + +// SaveX is like Save, but panics if an error occurs. +func (lu *LockUpdate) SaveX(ctx context.Context) int { + affected, err := lu.Save(ctx) + if err != nil { + panic(err) + } + return affected +} + +// Exec executes the query. +func (lu *LockUpdate) Exec(ctx context.Context) error { + _, err := lu.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (lu *LockUpdate) ExecX(ctx context.Context) { + if err := lu.Exec(ctx); err != nil { + panic(err) + } +} + +func (lu *LockUpdate) sqlSave(ctx context.Context) (n int, err error) { + _spec := sqlgraph.NewUpdateSpec(lock.Table, lock.Columns, sqlgraph.NewFieldSpec(lock.FieldID, field.TypeInt)) + if ps := lu.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if value, ok := lu.mutation.Name(); ok { + _spec.SetField(lock.FieldName, field.TypeString, value) + } + if value, ok := lu.mutation.CreatedAt(); ok { + _spec.SetField(lock.FieldCreatedAt, field.TypeTime, value) + } + if n, err = sqlgraph.UpdateNodes(ctx, lu.driver, _spec); err != nil { + if _, ok := err.(*sqlgraph.NotFoundError); ok { + err = &NotFoundError{lock.Label} + } else if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return 0, err + } + lu.mutation.done = true + return n, nil +} + +// LockUpdateOne is the builder for updating a single Lock entity. +type LockUpdateOne struct { + config + fields []string + hooks []Hook + mutation *LockMutation +} + +// SetName sets the "name" field. +func (luo *LockUpdateOne) SetName(s string) *LockUpdateOne { + luo.mutation.SetName(s) + return luo +} + +// SetCreatedAt sets the "created_at" field. +func (luo *LockUpdateOne) SetCreatedAt(t time.Time) *LockUpdateOne { + luo.mutation.SetCreatedAt(t) + return luo +} + +// SetNillableCreatedAt sets the "created_at" field if the given value is not nil. +func (luo *LockUpdateOne) SetNillableCreatedAt(t *time.Time) *LockUpdateOne { + if t != nil { + luo.SetCreatedAt(*t) + } + return luo +} + +// Mutation returns the LockMutation object of the builder. +func (luo *LockUpdateOne) Mutation() *LockMutation { + return luo.mutation +} + +// Where appends a list predicates to the LockUpdate builder. +func (luo *LockUpdateOne) Where(ps ...predicate.Lock) *LockUpdateOne { + luo.mutation.Where(ps...) + return luo +} + +// Select allows selecting one or more fields (columns) of the returned entity. +// The default is selecting all fields defined in the entity schema. +func (luo *LockUpdateOne) Select(field string, fields ...string) *LockUpdateOne { + luo.fields = append([]string{field}, fields...) + return luo +} + +// Save executes the query and returns the updated Lock entity. +func (luo *LockUpdateOne) Save(ctx context.Context) (*Lock, error) { + return withHooks(ctx, luo.sqlSave, luo.mutation, luo.hooks) +} + +// SaveX is like Save, but panics if an error occurs. +func (luo *LockUpdateOne) SaveX(ctx context.Context) *Lock { + node, err := luo.Save(ctx) + if err != nil { + panic(err) + } + return node +} + +// Exec executes the query on the entity. +func (luo *LockUpdateOne) Exec(ctx context.Context) error { + _, err := luo.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (luo *LockUpdateOne) ExecX(ctx context.Context) { + if err := luo.Exec(ctx); err != nil { + panic(err) + } +} + +func (luo *LockUpdateOne) sqlSave(ctx context.Context) (_node *Lock, err error) { + _spec := sqlgraph.NewUpdateSpec(lock.Table, lock.Columns, sqlgraph.NewFieldSpec(lock.FieldID, field.TypeInt)) + id, ok := luo.mutation.ID() + if !ok { + return nil, &ValidationError{Name: "id", err: errors.New(`ent: missing "Lock.id" for update`)} + } + _spec.Node.ID.Value = id + if fields := luo.fields; len(fields) > 0 { + _spec.Node.Columns = make([]string, 0, len(fields)) + _spec.Node.Columns = append(_spec.Node.Columns, lock.FieldID) + for _, f := range fields { + if !lock.ValidColumn(f) { + return nil, &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)} + } + if f != lock.FieldID { + _spec.Node.Columns = append(_spec.Node.Columns, f) + } + } + } + if ps := luo.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if value, ok := luo.mutation.Name(); ok { + _spec.SetField(lock.FieldName, field.TypeString, value) + } + if value, ok := luo.mutation.CreatedAt(); ok { + _spec.SetField(lock.FieldCreatedAt, field.TypeTime, value) + } + _node = &Lock{config: luo.config} + _spec.Assign = _node.assignValues + _spec.ScanValues = _node.scanValues + if err = sqlgraph.UpdateNode(ctx, luo.driver, _spec); err != nil { + if _, ok := err.(*sqlgraph.NotFoundError); ok { + err = &NotFoundError{lock.Label} + } else if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return nil, err + } + luo.mutation.done = true + return _node, nil +} diff --git a/pkg/database/ent/migrate/schema.go b/pkg/database/ent/migrate/schema.go index 375fd4e784a..c3ffed42239 100644 --- a/pkg/database/ent/migrate/schema.go +++ b/pkg/database/ent/migrate/schema.go @@ -178,6 +178,18 @@ var ( }, }, } + // LocksColumns holds the columns for the "locks" table. + LocksColumns = []*schema.Column{ + {Name: "id", Type: field.TypeInt, Increment: true}, + {Name: "name", Type: field.TypeString, Unique: true}, + {Name: "created_at", Type: field.TypeTime}, + } + // LocksTable holds the schema information for the "locks" table. + LocksTable = &schema.Table{ + Name: "locks", + Columns: LocksColumns, + PrimaryKey: []*schema.Column{LocksColumns[0]}, + } // MachinesColumns holds the columns for the "machines" table. MachinesColumns = []*schema.Column{ {Name: "id", Type: field.TypeInt, Increment: true}, @@ -237,6 +249,7 @@ var ( ConfigItemsTable, DecisionsTable, EventsTable, + LocksTable, MachinesTable, MetaTable, } diff --git a/pkg/database/ent/mutation.go b/pkg/database/ent/mutation.go index c5808d0d9b8..365824de739 100644 --- a/pkg/database/ent/mutation.go +++ b/pkg/database/ent/mutation.go @@ -16,6 +16,7 @@ import ( "github.com/crowdsecurity/crowdsec/pkg/database/ent/configitem" "github.com/crowdsecurity/crowdsec/pkg/database/ent/decision" "github.com/crowdsecurity/crowdsec/pkg/database/ent/event" + "github.com/crowdsecurity/crowdsec/pkg/database/ent/lock" "github.com/crowdsecurity/crowdsec/pkg/database/ent/machine" "github.com/crowdsecurity/crowdsec/pkg/database/ent/meta" "github.com/crowdsecurity/crowdsec/pkg/database/ent/predicate" @@ -35,6 +36,7 @@ const ( TypeConfigItem = "ConfigItem" TypeDecision = "Decision" TypeEvent = "Event" + TypeLock = "Lock" TypeMachine = "Machine" TypeMeta = "Meta" ) @@ -6165,6 +6167,386 @@ func (m *EventMutation) ResetEdge(name string) error { return fmt.Errorf("unknown Event edge %s", name) } +// LockMutation represents an operation that mutates the Lock nodes in the graph. +type LockMutation struct { + config + op Op + typ string + id *int + name *string + created_at *time.Time + clearedFields map[string]struct{} + done bool + oldValue func(context.Context) (*Lock, error) + predicates []predicate.Lock +} + +var _ ent.Mutation = (*LockMutation)(nil) + +// lockOption allows management of the mutation configuration using functional options. +type lockOption func(*LockMutation) + +// newLockMutation creates new mutation for the Lock entity. +func newLockMutation(c config, op Op, opts ...lockOption) *LockMutation { + m := &LockMutation{ + config: c, + op: op, + typ: TypeLock, + clearedFields: make(map[string]struct{}), + } + for _, opt := range opts { + opt(m) + } + return m +} + +// withLockID sets the ID field of the mutation. +func withLockID(id int) lockOption { + return func(m *LockMutation) { + var ( + err error + once sync.Once + value *Lock + ) + m.oldValue = func(ctx context.Context) (*Lock, error) { + once.Do(func() { + if m.done { + err = errors.New("querying old values post mutation is not allowed") + } else { + value, err = m.Client().Lock.Get(ctx, id) + } + }) + return value, err + } + m.id = &id + } +} + +// withLock sets the old Lock of the mutation. +func withLock(node *Lock) lockOption { + return func(m *LockMutation) { + m.oldValue = func(context.Context) (*Lock, error) { + return node, nil + } + m.id = &node.ID + } +} + +// Client returns a new `ent.Client` from the mutation. If the mutation was +// executed in a transaction (ent.Tx), a transactional client is returned. +func (m LockMutation) Client() *Client { + client := &Client{config: m.config} + client.init() + return client +} + +// Tx returns an `ent.Tx` for mutations that were executed in transactions; +// it returns an error otherwise. +func (m LockMutation) Tx() (*Tx, error) { + if _, ok := m.driver.(*txDriver); !ok { + return nil, errors.New("ent: mutation is not running in a transaction") + } + tx := &Tx{config: m.config} + tx.init() + return tx, nil +} + +// ID returns the ID value in the mutation. Note that the ID is only available +// if it was provided to the builder or after it was returned from the database. +func (m *LockMutation) ID() (id int, exists bool) { + if m.id == nil { + return + } + return *m.id, true +} + +// IDs queries the database and returns the entity ids that match the mutation's predicate. +// That means, if the mutation is applied within a transaction with an isolation level such +// as sql.LevelSerializable, the returned ids match the ids of the rows that will be updated +// or updated by the mutation. +func (m *LockMutation) IDs(ctx context.Context) ([]int, error) { + switch { + case m.op.Is(OpUpdateOne | OpDeleteOne): + id, exists := m.ID() + if exists { + return []int{id}, nil + } + fallthrough + case m.op.Is(OpUpdate | OpDelete): + return m.Client().Lock.Query().Where(m.predicates...).IDs(ctx) + default: + return nil, fmt.Errorf("IDs is not allowed on %s operations", m.op) + } +} + +// SetName sets the "name" field. +func (m *LockMutation) SetName(s string) { + m.name = &s +} + +// Name returns the value of the "name" field in the mutation. +func (m *LockMutation) Name() (r string, exists bool) { + v := m.name + if v == nil { + return + } + return *v, true +} + +// OldName returns the old "name" field's value of the Lock entity. +// If the Lock object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *LockMutation) OldName(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldName is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldName requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldName: %w", err) + } + return oldValue.Name, nil +} + +// ResetName resets all changes to the "name" field. +func (m *LockMutation) ResetName() { + m.name = nil +} + +// SetCreatedAt sets the "created_at" field. +func (m *LockMutation) SetCreatedAt(t time.Time) { + m.created_at = &t +} + +// CreatedAt returns the value of the "created_at" field in the mutation. +func (m *LockMutation) CreatedAt() (r time.Time, exists bool) { + v := m.created_at + if v == nil { + return + } + return *v, true +} + +// OldCreatedAt returns the old "created_at" field's value of the Lock entity. +// If the Lock object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *LockMutation) OldCreatedAt(ctx context.Context) (v time.Time, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldCreatedAt is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldCreatedAt requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldCreatedAt: %w", err) + } + return oldValue.CreatedAt, nil +} + +// ResetCreatedAt resets all changes to the "created_at" field. +func (m *LockMutation) ResetCreatedAt() { + m.created_at = nil +} + +// Where appends a list predicates to the LockMutation builder. +func (m *LockMutation) Where(ps ...predicate.Lock) { + m.predicates = append(m.predicates, ps...) +} + +// WhereP appends storage-level predicates to the LockMutation builder. Using this method, +// users can use type-assertion to append predicates that do not depend on any generated package. +func (m *LockMutation) WhereP(ps ...func(*sql.Selector)) { + p := make([]predicate.Lock, len(ps)) + for i := range ps { + p[i] = ps[i] + } + m.Where(p...) +} + +// Op returns the operation name. +func (m *LockMutation) Op() Op { + return m.op +} + +// SetOp allows setting the mutation operation. +func (m *LockMutation) SetOp(op Op) { + m.op = op +} + +// Type returns the node type of this mutation (Lock). +func (m *LockMutation) Type() string { + return m.typ +} + +// Fields returns all fields that were changed during this mutation. Note that in +// order to get all numeric fields that were incremented/decremented, call +// AddedFields(). +func (m *LockMutation) Fields() []string { + fields := make([]string, 0, 2) + if m.name != nil { + fields = append(fields, lock.FieldName) + } + if m.created_at != nil { + fields = append(fields, lock.FieldCreatedAt) + } + return fields +} + +// Field returns the value of a field with the given name. The second boolean +// return value indicates that this field was not set, or was not defined in the +// schema. +func (m *LockMutation) Field(name string) (ent.Value, bool) { + switch name { + case lock.FieldName: + return m.Name() + case lock.FieldCreatedAt: + return m.CreatedAt() + } + return nil, false +} + +// OldField returns the old value of the field from the database. An error is +// returned if the mutation operation is not UpdateOne, or the query to the +// database failed. +func (m *LockMutation) OldField(ctx context.Context, name string) (ent.Value, error) { + switch name { + case lock.FieldName: + return m.OldName(ctx) + case lock.FieldCreatedAt: + return m.OldCreatedAt(ctx) + } + return nil, fmt.Errorf("unknown Lock field %s", name) +} + +// SetField sets the value of a field with the given name. It returns an error if +// the field is not defined in the schema, or if the type mismatched the field +// type. +func (m *LockMutation) SetField(name string, value ent.Value) error { + switch name { + case lock.FieldName: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetName(v) + return nil + case lock.FieldCreatedAt: + v, ok := value.(time.Time) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetCreatedAt(v) + return nil + } + return fmt.Errorf("unknown Lock field %s", name) +} + +// AddedFields returns all numeric fields that were incremented/decremented during +// this mutation. +func (m *LockMutation) AddedFields() []string { + return nil +} + +// AddedField returns the numeric value that was incremented/decremented on a field +// with the given name. The second boolean return value indicates that this field +// was not set, or was not defined in the schema. +func (m *LockMutation) AddedField(name string) (ent.Value, bool) { + return nil, false +} + +// AddField adds the value to the field with the given name. It returns an error if +// the field is not defined in the schema, or if the type mismatched the field +// type. +func (m *LockMutation) AddField(name string, value ent.Value) error { + switch name { + } + return fmt.Errorf("unknown Lock numeric field %s", name) +} + +// ClearedFields returns all nullable fields that were cleared during this +// mutation. +func (m *LockMutation) ClearedFields() []string { + return nil +} + +// FieldCleared returns a boolean indicating if a field with the given name was +// cleared in this mutation. +func (m *LockMutation) FieldCleared(name string) bool { + _, ok := m.clearedFields[name] + return ok +} + +// ClearField clears the value of the field with the given name. It returns an +// error if the field is not defined in the schema. +func (m *LockMutation) ClearField(name string) error { + return fmt.Errorf("unknown Lock nullable field %s", name) +} + +// ResetField resets all changes in the mutation for the field with the given name. +// It returns an error if the field is not defined in the schema. +func (m *LockMutation) ResetField(name string) error { + switch name { + case lock.FieldName: + m.ResetName() + return nil + case lock.FieldCreatedAt: + m.ResetCreatedAt() + return nil + } + return fmt.Errorf("unknown Lock field %s", name) +} + +// AddedEdges returns all edge names that were set/added in this mutation. +func (m *LockMutation) AddedEdges() []string { + edges := make([]string, 0, 0) + return edges +} + +// AddedIDs returns all IDs (to other nodes) that were added for the given edge +// name in this mutation. +func (m *LockMutation) AddedIDs(name string) []ent.Value { + return nil +} + +// RemovedEdges returns all edge names that were removed in this mutation. +func (m *LockMutation) RemovedEdges() []string { + edges := make([]string, 0, 0) + return edges +} + +// RemovedIDs returns all IDs (to other nodes) that were removed for the edge with +// the given name in this mutation. +func (m *LockMutation) RemovedIDs(name string) []ent.Value { + return nil +} + +// ClearedEdges returns all edge names that were cleared in this mutation. +func (m *LockMutation) ClearedEdges() []string { + edges := make([]string, 0, 0) + return edges +} + +// EdgeCleared returns a boolean which indicates if the edge with the given name +// was cleared in this mutation. +func (m *LockMutation) EdgeCleared(name string) bool { + return false +} + +// ClearEdge clears the value of the edge with the given name. It returns an error +// if that edge is not defined in the schema. +func (m *LockMutation) ClearEdge(name string) error { + return fmt.Errorf("unknown Lock unique edge %s", name) +} + +// ResetEdge resets all changes to the edge with the given name in this mutation. +// It returns an error if the edge is not defined in the schema. +func (m *LockMutation) ResetEdge(name string) error { + return fmt.Errorf("unknown Lock edge %s", name) +} + // MachineMutation represents an operation that mutates the Machine nodes in the graph. type MachineMutation struct { config diff --git a/pkg/database/ent/predicate/predicate.go b/pkg/database/ent/predicate/predicate.go index e95abcec343..ad2e6d3f327 100644 --- a/pkg/database/ent/predicate/predicate.go +++ b/pkg/database/ent/predicate/predicate.go @@ -21,6 +21,9 @@ type Decision func(*sql.Selector) // Event is the predicate function for event builders. type Event func(*sql.Selector) +// Lock is the predicate function for lock builders. +type Lock func(*sql.Selector) + // Machine is the predicate function for machine builders. type Machine func(*sql.Selector) diff --git a/pkg/database/ent/runtime.go b/pkg/database/ent/runtime.go index bceea37b3a7..87073074563 100644 --- a/pkg/database/ent/runtime.go +++ b/pkg/database/ent/runtime.go @@ -10,6 +10,7 @@ import ( "github.com/crowdsecurity/crowdsec/pkg/database/ent/configitem" "github.com/crowdsecurity/crowdsec/pkg/database/ent/decision" "github.com/crowdsecurity/crowdsec/pkg/database/ent/event" + "github.com/crowdsecurity/crowdsec/pkg/database/ent/lock" "github.com/crowdsecurity/crowdsec/pkg/database/ent/machine" "github.com/crowdsecurity/crowdsec/pkg/database/ent/meta" "github.com/crowdsecurity/crowdsec/pkg/database/ent/schema" @@ -137,6 +138,12 @@ func init() { eventDescSerialized := eventFields[3].Descriptor() // event.SerializedValidator is a validator for the "serialized" field. It is called by the builders before save. event.SerializedValidator = eventDescSerialized.Validators[0].(func(string) error) + lockFields := schema.Lock{}.Fields() + _ = lockFields + // lockDescCreatedAt is the schema descriptor for created_at field. + lockDescCreatedAt := lockFields[1].Descriptor() + // lock.DefaultCreatedAt holds the default value on creation for the created_at field. + lock.DefaultCreatedAt = lockDescCreatedAt.Default.(func() time.Time) machineFields := schema.Machine{}.Fields() _ = machineFields // machineDescCreatedAt is the schema descriptor for created_at field. diff --git a/pkg/database/ent/schema/lock.go b/pkg/database/ent/schema/lock.go new file mode 100644 index 00000000000..de87efff3f7 --- /dev/null +++ b/pkg/database/ent/schema/lock.go @@ -0,0 +1,22 @@ +package schema + +import ( + "entgo.io/ent" + "entgo.io/ent/schema/field" + "github.com/crowdsecurity/crowdsec/pkg/types" +) + +type Lock struct { + ent.Schema +} + +func (Lock) Fields() []ent.Field { + return []ent.Field{ + field.String("name").Unique().StructTag(`json:"name"`), + field.Time("created_at").Default(types.UtcNow).StructTag(`json:"created_at"`), + } +} + +func (Lock) Edges() []ent.Edge { + return nil +} diff --git a/pkg/database/ent/tx.go b/pkg/database/ent/tx.go index 65c2ed00a44..27b39c12502 100644 --- a/pkg/database/ent/tx.go +++ b/pkg/database/ent/tx.go @@ -22,6 +22,8 @@ type Tx struct { Decision *DecisionClient // Event is the client for interacting with the Event builders. Event *EventClient + // Lock is the client for interacting with the Lock builders. + Lock *LockClient // Machine is the client for interacting with the Machine builders. Machine *MachineClient // Meta is the client for interacting with the Meta builders. @@ -162,6 +164,7 @@ func (tx *Tx) init() { tx.ConfigItem = NewConfigItemClient(tx.config) tx.Decision = NewDecisionClient(tx.config) tx.Event = NewEventClient(tx.config) + tx.Lock = NewLockClient(tx.config) tx.Machine = NewMachineClient(tx.config) tx.Meta = NewMetaClient(tx.config) } diff --git a/pkg/database/lock.go b/pkg/database/lock.go new file mode 100644 index 00000000000..339226e8592 --- /dev/null +++ b/pkg/database/lock.go @@ -0,0 +1,67 @@ +package database + +import ( + "time" + + "github.com/pkg/errors" + log "github.com/sirupsen/logrus" + + "github.com/crowdsecurity/crowdsec/pkg/database/ent" + "github.com/crowdsecurity/crowdsec/pkg/database/ent/lock" + "github.com/crowdsecurity/crowdsec/pkg/types" +) + +const ( + CAPIPullLockTimeout = 120 +) + +func (c *Client) AcquireLock(name string) error { + _, err := c.Ent.Lock.Create(). + SetName(name). + SetCreatedAt(types.UtcNow()). + Save(c.CTX) + if ent.IsConstraintError(err) { + return err + } + if err != nil { + return errors.Wrapf(InsertFail, "insert lock: %s", err) + } + return nil +} + +func (c *Client) ReleaseLock(name string) error { + _, err := c.Ent.Lock.Delete().Where(lock.NameEQ(name)).Exec(c.CTX) + if err != nil { + return errors.Wrapf(DeleteFail, "delete lock: %s", err) + } + return nil +} + +func (c *Client) ReleaseLockWithTimeout(name string, timeout int) error { + log.Debugf("(%s) releasing orphin locks", name) + _, err := c.Ent.Lock.Delete().Where( + lock.NameEQ(name), + lock.CreatedAtLT(time.Now().Add(-time.Duration(timeout)*time.Minute)), + ).Exec(c.CTX) + if err != nil { + return errors.Wrapf(DeleteFail, "delete lock: %s", err) + } + return nil +} + +func (c *Client) IsLocked(err error) bool { + return ent.IsConstraintError(err) +} + +func (c *Client) AcquirePullCAPILock() error { + lockName := "pullCAPI" + err := c.ReleaseLockWithTimeout(lockName, CAPIPullLockTimeout) + if err != nil { + log.Errorf("unable to release pullCAPI lock: %s", err) + } + return c.AcquireLock(lockName) +} + +func (c *Client) ReleasePullCAPILock() error { + return c.ReleaseLockWithTimeout("pullCAPI", CAPIPullLockTimeout) +} From 717fc97ca039a2fdf2afbdd73b2a8b417b48c69e Mon Sep 17 00:00:00 2001 From: "Thibault \"bui\" Koechlin" Date: Wed, 14 Feb 2024 13:38:40 +0100 Subject: [PATCH 030/581] add SetMeta and SetParsed helpers (#2845) * add SetMeta and SetParsed helpers --- pkg/types/event.go | 16 ++++++++ pkg/types/event_test.go | 82 +++++++++++++++++++++++++++++++++++++++++ 2 files changed, 98 insertions(+) diff --git a/pkg/types/event.go b/pkg/types/event.go index 074241918d8..c7b19fe3ca4 100644 --- a/pkg/types/event.go +++ b/pkg/types/event.go @@ -46,6 +46,22 @@ type Event struct { Meta map[string]string `yaml:"Meta,omitempty" json:"Meta,omitempty"` } +func (e *Event) SetMeta(key string, value string) bool { + if e.Meta == nil { + e.Meta = make(map[string]string) + } + e.Meta[key] = value + return true +} + +func (e *Event) SetParsed(key string, value string) bool { + if e.Parsed == nil { + e.Parsed = make(map[string]string) + } + e.Parsed[key] = value + return true +} + func (e *Event) GetType() string { if e.Type == OVFLW { return "overflow" diff --git a/pkg/types/event_test.go b/pkg/types/event_test.go index 14ca48cd2a8..a2fad9ebcc7 100644 --- a/pkg/types/event_test.go +++ b/pkg/types/event_test.go @@ -9,6 +9,88 @@ import ( "github.com/crowdsecurity/crowdsec/pkg/models" ) +func TestSetParsed(t *testing.T) { + tests := []struct { + name string + evt *Event + key string + value string + expected bool + }{ + { + name: "SetParsed: Valid", + evt: &Event{}, + key: "test", + value: "test", + expected: true, + }, + { + name: "SetParsed: Existing map", + evt: &Event{Parsed: map[string]string{}}, + key: "test", + value: "test", + expected: true, + }, + { + name: "SetParsed: Existing map+key", + evt: &Event{Parsed: map[string]string{"test": "foobar"}}, + key: "test", + value: "test", + expected: true, + }, + } + + for _, tt := range tests { + tt := tt + t.Run(tt.name, func(t *testing.T) { + tt.evt.SetParsed(tt.key, tt.value) + assert.Equal(t, tt.value, tt.evt.Parsed[tt.key]) + }) + } + +} + +func TestSetMeta(t *testing.T) { + tests := []struct { + name string + evt *Event + key string + value string + expected bool + }{ + { + name: "SetMeta: Valid", + evt: &Event{}, + key: "test", + value: "test", + expected: true, + }, + { + name: "SetMeta: Existing map", + evt: &Event{Meta: map[string]string{}}, + key: "test", + value: "test", + expected: true, + }, + { + name: "SetMeta: Existing map+key", + evt: &Event{Meta: map[string]string{"test": "foobar"}}, + key: "test", + value: "test", + expected: true, + }, + } + + for _, tt := range tests { + tt := tt + t.Run(tt.name, func(t *testing.T) { + tt.evt.SetMeta(tt.key, tt.value) + assert.Equal(t, tt.value, tt.evt.GetMeta(tt.key)) + }) + } + +} + func TestParseIPSources(t *testing.T) { tests := []struct { name string From e976614645aba906a096f4bdf46e09709f71d096 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Thu, 15 Feb 2024 14:34:12 +0100 Subject: [PATCH 031/581] cscli metrics: rename buckets -> scenarios (#2848) * cscli metrics: rename buckets -> scenarios * update lint configuration * lint --- .golangci.yml | 6 +++++- cmd/crowdsec-cli/metrics.go | 20 ++++++++++++-------- cmd/crowdsec-cli/metrics_table.go | 18 ++++++++++++++---- pkg/exprhelpers/exprlib_test.go | 4 ++-- pkg/parser/README.md | 2 +- pkg/setup/README.md | 2 +- 6 files changed, 35 insertions(+), 17 deletions(-) diff --git a/.golangci.yml b/.golangci.yml index e605ac079d4..29332447b61 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -73,6 +73,10 @@ linters-settings: - pkg: "github.com/pkg/errors" desc: "errors.Wrap() is deprecated in favor of fmt.Errorf()" + wsl: + # Allow blocks to end with comments + allow-trailing-comment: true + linters: enable-all: true disable: @@ -105,6 +109,7 @@ linters: # - durationcheck # check for two durations multiplied together # - errcheck # Errcheck is a program for checking for unchecked errors in go programs. These unchecked errors can be critical bugs in some cases # - errorlint # errorlint is a linter for that can be used to find code that will cause problems with the error wrapping scheme introduced in Go 1.13. + # - execinquery # execinquery is a linter about query string checker in Query function which reads your Go src files and warning it finds # - exportloopref # checks for pointers to enclosing loop variables # - funlen # Tool for detection of long functions # - ginkgolinter # enforces standards of using ginkgo and gomega @@ -203,7 +208,6 @@ linters: # # Too strict / too many false positives (for now?) # - - execinquery # execinquery is a linter about query string checker in Query function which reads your Go src files and warning it finds - exhaustruct # Checks if all structure fields are initialized - forbidigo # Forbids identifiers - gochecknoglobals # check that no global variables exist diff --git a/cmd/crowdsec-cli/metrics.go b/cmd/crowdsec-cli/metrics.go index 6e23bcf12e4..0f92343868d 100644 --- a/cmd/crowdsec-cli/metrics.go +++ b/cmd/crowdsec-cli/metrics.go @@ -44,9 +44,8 @@ type ( ) var ( - ErrMissingConfig = errors.New("prometheus section missing, can't show metrics") + ErrMissingConfig = errors.New("prometheus section missing, can't show metrics") ErrMetricsDisabled = errors.New("prometheus is not enabled, can't show metrics") - ) type metricSection interface { @@ -59,7 +58,7 @@ type metricStore map[string]metricSection func NewMetricStore() metricStore { return metricStore{ "acquisition": statAcquis{}, - "buckets": statBucket{}, + "scenarios": statBucket{}, "parsers": statParser{}, "lapi": statLapi{}, "lapi-machine": statLapiMachine{}, @@ -110,7 +109,7 @@ func (ms metricStore) Fetch(url string) error { mAcquis := ms["acquisition"].(statAcquis) mParser := ms["parsers"].(statParser) - mBucket := ms["buckets"].(statBucket) + mBucket := ms["scenarios"].(statBucket) mLapi := ms["lapi"].(statLapi) mLapiMachine := ms["lapi-machine"].(statLapiMachine) mLapiBouncer := ms["lapi-bouncer"].(statLapiBouncer) @@ -361,7 +360,7 @@ cscli metrics --url http://lapi.local:6060/metrics show acquisition parsers cscli metrics list`, Args: cobra.ExactArgs(0), DisableAutoGenTag: true, - RunE: func(cmd *cobra.Command, args []string) error { + RunE: func(_ *cobra.Command, _ []string) error { return cli.show(nil, url, noUnit) }, } @@ -383,7 +382,7 @@ func (cli *cliMetrics) expandSectionGroups(args []string) []string { for _, section := range args { switch section { case "engine": - ret = append(ret, "acquisition", "parsers", "buckets", "stash", "whitelists") + ret = append(ret, "acquisition", "parsers", "scenarios", "stash", "whitelists") case "lapi": ret = append(ret, "alerts", "decisions", "lapi", "lapi-bouncer", "lapi-decisions", "lapi-machine") case "appsec": @@ -413,10 +412,13 @@ cscli metrics show cscli metrics show engine # Show some specific metrics, show empty tables, connect to a different url -cscli metrics show acquisition parsers buckets stash --url http://lapi.local:6060/metrics +cscli metrics show acquisition parsers scenarios stash --url http://lapi.local:6060/metrics + +# To list available metric types, use "cscli metrics list" +cscli metrics list; cscli metrics list -o json # Show metrics in json format -cscli metrics show acquisition parsers buckets stash -o json`, +cscli metrics show acquisition parsers scenarios stash -o json`, // Positional args are optional DisableAutoGenTag: true, RunE: func(_ *cobra.Command, args []string) error { @@ -467,12 +469,14 @@ func (cli *cliMetrics) list() error { if err != nil { return fmt.Errorf("failed to marshal metric types: %w", err) } + fmt.Println(string(x)) case "raw": x, err := yaml.Marshal(allMetrics) if err != nil { return fmt.Errorf("failed to marshal metric types: %w", err) } + fmt.Println(string(x)) } diff --git a/cmd/crowdsec-cli/metrics_table.go b/cmd/crowdsec-cli/metrics_table.go index da6ea3d9f1d..689929500ad 100644 --- a/cmd/crowdsec-cli/metrics_table.go +++ b/cmd/crowdsec-cli/metrics_table.go @@ -1,6 +1,7 @@ package main import ( + "errors" "fmt" "io" "sort" @@ -13,7 +14,7 @@ import ( ) // ErrNilTable means a nil pointer was passed instead of a table instance. This is a programming error. -var ErrNilTable = fmt.Errorf("nil table") +var ErrNilTable = errors.New("nil table") func lapiMetricsToTable(t *table.Table, stats map[string]map[string]map[string]int) int { // stats: machine -> route -> method -> count @@ -44,6 +45,7 @@ func lapiMetricsToTable(t *table.Table, stats map[string]map[string]map[string]i } t.AddRow(row...) + numRows++ } } @@ -82,6 +84,7 @@ func wlMetricsToTable(t *table.Table, stats map[string]map[string]map[string]int } t.AddRow(row...) + numRows++ } } @@ -120,6 +123,7 @@ func metricsToTable(t *table.Table, stats map[string]map[string]int, keys []stri } t.AddRow(row...) + numRows++ } @@ -127,7 +131,7 @@ func metricsToTable(t *table.Table, stats map[string]map[string]int, keys []stri } func (s statBucket) Description() (string, string) { - return "Bucket Metrics", + return "Scenario Metrics", `Measure events in different scenarios. Current count is the number of buckets during metrics collection. ` + `Overflows are past event-producing buckets, while Expired are the ones that didn’t receive enough events to Overflow.` } @@ -143,13 +147,13 @@ func (s statBucket) Process(bucket, metric string, val int) { func (s statBucket) Table(out io.Writer, noUnit bool, showEmpty bool) { t := newTable(out) t.SetRowLines(false) - t.SetHeaders("Bucket", "Current Count", "Overflows", "Instantiated", "Poured", "Expired") + t.SetHeaders("Scenario", "Current Count", "Overflows", "Instantiated", "Poured", "Expired") t.SetAlignment(table.AlignLeft, table.AlignLeft, table.AlignLeft, table.AlignLeft, table.AlignLeft, table.AlignLeft) keys := []string{"curr_count", "overflow", "instantiation", "pour", "underflow"} if numRows, err := metricsToTable(t, s, keys, noUnit); err != nil { - log.Warningf("while collecting bucket stats: %s", err) + log.Warningf("while collecting scenario stats: %s", err) } else if numRows > 0 || showEmpty { title, _ := s.Description() renderTableTitle(out, "\n"+title+":") @@ -352,6 +356,7 @@ func (s statStash) Table(out io.Writer, noUnit bool, showEmpty bool) { strconv.Itoa(astats.Count), } t.AddRow(row...) + numRows++ } @@ -400,7 +405,9 @@ func (s statLapi) Table(out io.Writer, noUnit bool, showEmpty bool) { sl, strconv.Itoa(astats[sl]), } + t.AddRow(row...) + numRows++ } } @@ -515,6 +522,7 @@ func (s statLapiDecision) Table(out io.Writer, noUnit bool, showEmpty bool) { strconv.Itoa(hits.Empty), strconv.Itoa(hits.NonEmpty), ) + numRows++ } @@ -560,6 +568,7 @@ func (s statDecision) Table(out io.Writer, noUnit bool, showEmpty bool) { action, strconv.Itoa(hits), ) + numRows++ } } @@ -594,6 +603,7 @@ func (s statAlert) Table(out io.Writer, noUnit bool, showEmpty bool) { scenario, strconv.Itoa(hits), ) + numRows++ } diff --git a/pkg/exprhelpers/exprlib_test.go b/pkg/exprhelpers/exprlib_test.go index 6b9cd15c73b..9d5a6556b25 100644 --- a/pkg/exprhelpers/exprlib_test.go +++ b/pkg/exprhelpers/exprlib_test.go @@ -200,7 +200,7 @@ func TestDistanceHelper(t *testing.T) { ret, err := expr.Run(vm, env) if test.valid { require.NoError(t, err) - assert.Equal(t, test.dist, ret) + assert.InDelta(t, test.dist, ret, 0.000001) } else { require.Error(t, err) } @@ -592,7 +592,7 @@ func TestAtof(t *testing.T) { require.NoError(t, err) output, err := expr.Run(program, test.env) require.NoError(t, err) - require.Equal(t, test.result, output) + require.InDelta(t, test.result, output, 0.000001) } } diff --git a/pkg/parser/README.md b/pkg/parser/README.md index 62a56e61820..0fcccc811e4 100644 --- a/pkg/parser/README.md +++ b/pkg/parser/README.md @@ -45,7 +45,7 @@ statics: > `filter: "Line.Src endsWith '/foobar'"` - - *optional* `filter` : an [expression](https://github.com/antonmedv/expr/blob/master/docs/Language-Definition.md) that will be evaluated against the runtime of a line (`Event`) + - *optional* `filter` : an [expression](https://github.com/antonmedv/expr/blob/master/docs/language-definition.md) that will be evaluated against the runtime of a line (`Event`) - if the `filter` is present and returns false, node is not evaluated - if `filter` is absent or present and returns true, node is evaluated diff --git a/pkg/setup/README.md b/pkg/setup/README.md index 3585ee8b141..9cdc7243975 100644 --- a/pkg/setup/README.md +++ b/pkg/setup/README.md @@ -129,7 +129,7 @@ services: and must all return true for a service to be detected (implied *and* clause, no short-circuit). A missing or empty `when:` section is evaluated as true. The [expression -engine](https://github.com/antonmedv/expr/blob/master/docs/Language-Definition.md) +engine](https://github.com/antonmedv/expr/blob/master/docs/language-definition.md) is the same one used by CrowdSec parser filters. You can force the detection of a process by using the `cscli setup detect... --force-process ` flag. It will always behave as if `` was running. From f3ea88f64ce7a594830558c84bc6f196ddddc323 Mon Sep 17 00:00:00 2001 From: Laurence Jones Date: Wed, 21 Feb 2024 13:40:38 +0000 Subject: [PATCH 032/581] Appsec unix socket (#2737) * Appsec socket * Patch detection of nil listenaddr * Allow TLS unix socket * Merge diff issue --- pkg/acquisition/modules/appsec/appsec.go | 55 ++++++++++++++++++------ 1 file changed, 42 insertions(+), 13 deletions(-) diff --git a/pkg/acquisition/modules/appsec/appsec.go b/pkg/acquisition/modules/appsec/appsec.go index 4e2ff0bd22b..a3c8c7dd8ee 100644 --- a/pkg/acquisition/modules/appsec/appsec.go +++ b/pkg/acquisition/modules/appsec/appsec.go @@ -4,7 +4,9 @@ import ( "context" "encoding/json" "fmt" + "net" "net/http" + "os" "sync" "time" @@ -34,6 +36,7 @@ var ( // configuration structure of the acquis for the application security engine type AppsecSourceConfig struct { ListenAddr string `yaml:"listen_addr"` + ListenSocket string `yaml:"listen_socket"` CertFilePath string `yaml:"cert_file"` KeyFilePath string `yaml:"key_file"` Path string `yaml:"path"` @@ -97,7 +100,7 @@ func (w *AppsecSource) UnmarshalConfig(yamlConfig []byte) error { return errors.Wrap(err, "Cannot parse appsec configuration") } - if w.config.ListenAddr == "" { + if w.config.ListenAddr == "" && w.config.ListenSocket == "" { w.config.ListenAddr = "127.0.0.1:7422" } @@ -123,7 +126,12 @@ func (w *AppsecSource) UnmarshalConfig(yamlConfig []byte) error { } if w.config.Name == "" { - w.config.Name = fmt.Sprintf("%s%s", w.config.ListenAddr, w.config.Path) + if w.config.ListenSocket != "" && w.config.ListenAddr == "" { + w.config.Name = w.config.ListenSocket + } + if w.config.ListenSocket == "" { + w.config.Name = fmt.Sprintf("%s%s", w.config.ListenAddr, w.config.Path) + } } csConfig := csconfig.GetConfig() @@ -251,23 +259,44 @@ func (w *AppsecSource) StreamingAcquisition(out chan types.Event, t *tomb.Tomb) return runner.Run(t) }) } - - w.logger.Infof("Starting Appsec server on %s%s", w.config.ListenAddr, w.config.Path) t.Go(func() error { - var err error - if w.config.CertFilePath != "" && w.config.KeyFilePath != "" { - err = w.server.ListenAndServeTLS(w.config.CertFilePath, w.config.KeyFilePath) - } else { - err = w.server.ListenAndServe() + if w.config.ListenSocket != "" { + w.logger.Infof("creating unix socket %s", w.config.ListenSocket) + _ = os.RemoveAll(w.config.ListenSocket) + listener, err := net.Listen("unix", w.config.ListenSocket) + if err != nil { + return errors.Wrap(err, "Appsec server failed") + } + defer listener.Close() + if w.config.CertFilePath != "" && w.config.KeyFilePath != "" { + err = w.server.ServeTLS(listener, w.config.CertFilePath, w.config.KeyFilePath) + } else { + err = w.server.Serve(listener) + } + if err != nil && err != http.ErrServerClosed { + return errors.Wrap(err, "Appsec server failed") + } } - - if err != nil && err != http.ErrServerClosed { - return errors.Wrap(err, "Appsec server failed") + return nil + }) + t.Go(func() error { + var err error + if w.config.ListenAddr != "" { + w.logger.Infof("creating TCP server on %s", w.config.ListenAddr) + if w.config.CertFilePath != "" && w.config.KeyFilePath != "" { + err = w.server.ListenAndServeTLS(w.config.CertFilePath, w.config.KeyFilePath) + } else { + err = w.server.ListenAndServe() + } + + if err != nil && err != http.ErrServerClosed { + return errors.Wrap(err, "Appsec server failed") + } } return nil }) <-t.Dying() - w.logger.Infof("Stopping Appsec server on %s%s", w.config.ListenAddr, w.config.Path) + w.logger.Info("Shutting down Appsec server") //xx let's clean up the appsec runners :) appsec.AppsecRulesDetails = make(map[int]appsec.RulesDetails) w.server.Shutdown(context.TODO()) From 3e3df5e4c6e6deb1ef36bb406e86a7ebc8c30f06 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Thu, 22 Feb 2024 11:04:36 +0100 Subject: [PATCH 033/581] refact "cscli config", remove flag "cscli restore --old-backup" (#2832) * refact "cscli config show" * refact "cscli config backup" * refact "cscli confgi show-yaml" * refact "cscli config restore" * refact "cscli config feature-flags" * cscli restore: remove 'old-backup' option * lint (whitespace, wrapped errors) --- cmd/crowdsec-cli/config.go | 26 ++-- cmd/crowdsec-cli/config_backup.go | 99 ++++++------- cmd/crowdsec-cli/config_feature_flags.go | 25 ++-- cmd/crowdsec-cli/config_restore.go | 175 ++++++++--------------- cmd/crowdsec-cli/config_show.go | 37 +++-- cmd/crowdsec-cli/config_showyaml.go | 12 +- cmd/crowdsec-cli/main.go | 2 +- 7 files changed, 167 insertions(+), 209 deletions(-) diff --git a/cmd/crowdsec-cli/config.go b/cmd/crowdsec-cli/config.go index e60246db790..e88845798e2 100644 --- a/cmd/crowdsec-cli/config.go +++ b/cmd/crowdsec-cli/config.go @@ -4,19 +4,29 @@ import ( "github.com/spf13/cobra" ) -func NewConfigCmd() *cobra.Command { - cmdConfig := &cobra.Command{ +type cliConfig struct { + cfg configGetter +} + +func NewCLIConfig(cfg configGetter) *cliConfig { + return &cliConfig{ + cfg: cfg, + } +} + +func (cli *cliConfig) NewCommand() *cobra.Command { + cmd := &cobra.Command{ Use: "config [command]", Short: "Allows to view current config", Args: cobra.ExactArgs(0), DisableAutoGenTag: true, } - cmdConfig.AddCommand(NewConfigShowCmd()) - cmdConfig.AddCommand(NewConfigShowYAMLCmd()) - cmdConfig.AddCommand(NewConfigBackupCmd()) - cmdConfig.AddCommand(NewConfigRestoreCmd()) - cmdConfig.AddCommand(NewConfigFeatureFlagsCmd()) + cmd.AddCommand(cli.newShowCmd()) + cmd.AddCommand(cli.newShowYAMLCmd()) + cmd.AddCommand(cli.newBackupCmd()) + cmd.AddCommand(cli.newRestoreCmd()) + cmd.AddCommand(cli.newFeatureFlagsCmd()) - return cmdConfig + return cmd } diff --git a/cmd/crowdsec-cli/config_backup.go b/cmd/crowdsec-cli/config_backup.go index 9414fa51033..d1e4a393555 100644 --- a/cmd/crowdsec-cli/config_backup.go +++ b/cmd/crowdsec-cli/config_backup.go @@ -2,6 +2,7 @@ package main import ( "encoding/json" + "errors" "fmt" "os" "path/filepath" @@ -13,8 +14,8 @@ import ( "github.com/crowdsecurity/crowdsec/pkg/cwhub" ) -func backupHub(dirPath string) error { - hub, err := require.Hub(csConfig, nil, nil) +func (cli *cliConfig) backupHub(dirPath string) error { + hub, err := require.Hub(cli.cfg(), nil, nil) if err != nil { return err } @@ -32,7 +33,7 @@ func backupHub(dirPath string) error { itemDirectory := fmt.Sprintf("%s/%s/", dirPath, itemType) if err = os.MkdirAll(itemDirectory, os.ModePerm); err != nil { - return fmt.Errorf("error while creating %s : %s", itemDirectory, err) + return fmt.Errorf("error while creating %s: %w", itemDirectory, err) } upstreamParsers := []string{} @@ -41,18 +42,18 @@ func backupHub(dirPath string) error { clog = clog.WithFields(log.Fields{ "file": v.Name, }) - if !v.State.Installed { //only backup installed ones - clog.Debugf("[%s] : not installed", k) + if !v.State.Installed { // only backup installed ones + clog.Debugf("[%s]: not installed", k) continue } - //for the local/tainted ones, we back up the full file + // for the local/tainted ones, we back up the full file if v.State.Tainted || v.State.IsLocal() || !v.State.UpToDate { - //we need to backup stages for parsers + // we need to backup stages for parsers if itemType == cwhub.PARSERS || itemType == cwhub.POSTOVERFLOWS { fstagedir := fmt.Sprintf("%s%s", itemDirectory, v.Stage) if err = os.MkdirAll(fstagedir, os.ModePerm); err != nil { - return fmt.Errorf("error while creating stage dir %s : %s", fstagedir, err) + return fmt.Errorf("error while creating stage dir %s: %w", fstagedir, err) } } @@ -60,7 +61,7 @@ func backupHub(dirPath string) error { tfile := fmt.Sprintf("%s%s/%s", itemDirectory, v.Stage, v.FileName) if err = CopyFile(v.State.LocalPath, tfile); err != nil { - return fmt.Errorf("failed copy %s %s to %s : %s", itemType, v.State.LocalPath, tfile, err) + return fmt.Errorf("failed copy %s %s to %s: %w", itemType, v.State.LocalPath, tfile, err) } clog.Infof("local/tainted saved %s to %s", v.State.LocalPath, tfile) @@ -68,21 +69,21 @@ func backupHub(dirPath string) error { continue } - clog.Debugf("[%s] : from hub, just backup name (up-to-date:%t)", k, v.State.UpToDate) + clog.Debugf("[%s]: from hub, just backup name (up-to-date:%t)", k, v.State.UpToDate) clog.Infof("saving, version:%s, up-to-date:%t", v.Version, v.State.UpToDate) upstreamParsers = append(upstreamParsers, v.Name) } - //write the upstream items + // write the upstream items upstreamParsersFname := fmt.Sprintf("%s/upstream-%s.json", itemDirectory, itemType) upstreamParsersContent, err := json.MarshalIndent(upstreamParsers, "", " ") if err != nil { - return fmt.Errorf("failed marshaling upstream parsers : %s", err) + return fmt.Errorf("failed marshaling upstream parsers: %w", err) } err = os.WriteFile(upstreamParsersFname, upstreamParsersContent, 0o644) if err != nil { - return fmt.Errorf("unable to write to %s %s : %s", itemType, upstreamParsersFname, err) + return fmt.Errorf("unable to write to %s %s: %w", itemType, upstreamParsersFname, err) } clog.Infof("Wrote %d entries for %s to %s", len(upstreamParsers), itemType, upstreamParsersFname) @@ -102,11 +103,13 @@ func backupHub(dirPath string) error { - Tainted/local/out-of-date scenarios, parsers, postoverflows and collections - Acquisition files (acquis.yaml, acquis.d/*.yaml) */ -func backupConfigToDirectory(dirPath string) error { +func (cli *cliConfig) backup(dirPath string) error { var err error + cfg := cli.cfg() + if dirPath == "" { - return fmt.Errorf("directory path can't be empty") + return errors.New("directory path can't be empty") } log.Infof("Starting configuration backup") @@ -121,10 +124,10 @@ func backupConfigToDirectory(dirPath string) error { return fmt.Errorf("while creating %s: %w", dirPath, err) } - if csConfig.ConfigPaths.SimulationFilePath != "" { + if cfg.ConfigPaths.SimulationFilePath != "" { backupSimulation := filepath.Join(dirPath, "simulation.yaml") - if err = CopyFile(csConfig.ConfigPaths.SimulationFilePath, backupSimulation); err != nil { - return fmt.Errorf("failed copy %s to %s: %w", csConfig.ConfigPaths.SimulationFilePath, backupSimulation, err) + if err = CopyFile(cfg.ConfigPaths.SimulationFilePath, backupSimulation); err != nil { + return fmt.Errorf("failed copy %s to %s: %w", cfg.ConfigPaths.SimulationFilePath, backupSimulation, err) } log.Infof("Saved simulation to %s", backupSimulation) @@ -134,22 +137,22 @@ func backupConfigToDirectory(dirPath string) error { - backup AcquisitionFilePath - backup the other files of acquisition directory */ - if csConfig.Crowdsec != nil && csConfig.Crowdsec.AcquisitionFilePath != "" { + if cfg.Crowdsec != nil && cfg.Crowdsec.AcquisitionFilePath != "" { backupAcquisition := filepath.Join(dirPath, "acquis.yaml") - if err = CopyFile(csConfig.Crowdsec.AcquisitionFilePath, backupAcquisition); err != nil { - return fmt.Errorf("failed copy %s to %s: %s", csConfig.Crowdsec.AcquisitionFilePath, backupAcquisition, err) + if err = CopyFile(cfg.Crowdsec.AcquisitionFilePath, backupAcquisition); err != nil { + return fmt.Errorf("failed copy %s to %s: %w", cfg.Crowdsec.AcquisitionFilePath, backupAcquisition, err) } } acquisBackupDir := filepath.Join(dirPath, "acquis") if err = os.Mkdir(acquisBackupDir, 0o700); err != nil { - return fmt.Errorf("error while creating %s: %s", acquisBackupDir, err) + return fmt.Errorf("error while creating %s: %w", acquisBackupDir, err) } - if csConfig.Crowdsec != nil && len(csConfig.Crowdsec.AcquisitionFiles) > 0 { - for _, acquisFile := range csConfig.Crowdsec.AcquisitionFiles { + if cfg.Crowdsec != nil && len(cfg.Crowdsec.AcquisitionFiles) > 0 { + for _, acquisFile := range cfg.Crowdsec.AcquisitionFiles { /*if it was the default one, it was already backup'ed*/ - if csConfig.Crowdsec.AcquisitionFilePath == acquisFile { + if cfg.Crowdsec.AcquisitionFilePath == acquisFile { continue } @@ -169,56 +172,48 @@ func backupConfigToDirectory(dirPath string) error { if ConfigFilePath != "" { backupMain := fmt.Sprintf("%s/config.yaml", dirPath) if err = CopyFile(ConfigFilePath, backupMain); err != nil { - return fmt.Errorf("failed copy %s to %s: %s", ConfigFilePath, backupMain, err) + return fmt.Errorf("failed copy %s to %s: %w", ConfigFilePath, backupMain, err) } log.Infof("Saved default yaml to %s", backupMain) } - if csConfig.API != nil && csConfig.API.Server != nil && csConfig.API.Server.OnlineClient != nil && csConfig.API.Server.OnlineClient.CredentialsFilePath != "" { + if cfg.API != nil && cfg.API.Server != nil && cfg.API.Server.OnlineClient != nil && cfg.API.Server.OnlineClient.CredentialsFilePath != "" { backupCAPICreds := fmt.Sprintf("%s/online_api_credentials.yaml", dirPath) - if err = CopyFile(csConfig.API.Server.OnlineClient.CredentialsFilePath, backupCAPICreds); err != nil { - return fmt.Errorf("failed copy %s to %s: %s", csConfig.API.Server.OnlineClient.CredentialsFilePath, backupCAPICreds, err) + if err = CopyFile(cfg.API.Server.OnlineClient.CredentialsFilePath, backupCAPICreds); err != nil { + return fmt.Errorf("failed copy %s to %s: %w", cfg.API.Server.OnlineClient.CredentialsFilePath, backupCAPICreds, err) } log.Infof("Saved online API credentials to %s", backupCAPICreds) } - if csConfig.API != nil && csConfig.API.Client != nil && csConfig.API.Client.CredentialsFilePath != "" { + if cfg.API != nil && cfg.API.Client != nil && cfg.API.Client.CredentialsFilePath != "" { backupLAPICreds := fmt.Sprintf("%s/local_api_credentials.yaml", dirPath) - if err = CopyFile(csConfig.API.Client.CredentialsFilePath, backupLAPICreds); err != nil { - return fmt.Errorf("failed copy %s to %s: %s", csConfig.API.Client.CredentialsFilePath, backupLAPICreds, err) + if err = CopyFile(cfg.API.Client.CredentialsFilePath, backupLAPICreds); err != nil { + return fmt.Errorf("failed copy %s to %s: %w", cfg.API.Client.CredentialsFilePath, backupLAPICreds, err) } log.Infof("Saved local API credentials to %s", backupLAPICreds) } - if csConfig.API != nil && csConfig.API.Server != nil && csConfig.API.Server.ProfilesPath != "" { + if cfg.API != nil && cfg.API.Server != nil && cfg.API.Server.ProfilesPath != "" { backupProfiles := fmt.Sprintf("%s/profiles.yaml", dirPath) - if err = CopyFile(csConfig.API.Server.ProfilesPath, backupProfiles); err != nil { - return fmt.Errorf("failed copy %s to %s: %s", csConfig.API.Server.ProfilesPath, backupProfiles, err) + if err = CopyFile(cfg.API.Server.ProfilesPath, backupProfiles); err != nil { + return fmt.Errorf("failed copy %s to %s: %w", cfg.API.Server.ProfilesPath, backupProfiles, err) } log.Infof("Saved profiles to %s", backupProfiles) } - if err = backupHub(dirPath); err != nil { - return fmt.Errorf("failed to backup hub config: %s", err) - } - - return nil -} - -func runConfigBackup(cmd *cobra.Command, args []string) error { - if err := backupConfigToDirectory(args[0]); err != nil { - return fmt.Errorf("failed to backup config: %w", err) + if err = cli.backupHub(dirPath); err != nil { + return fmt.Errorf("failed to backup hub config: %w", err) } return nil } -func NewConfigBackupCmd() *cobra.Command { - cmdConfigBackup := &cobra.Command{ +func (cli *cliConfig) newBackupCmd() *cobra.Command { + cmd := &cobra.Command{ Use: `backup "directory"`, Short: "Backup current config", Long: `Backup the current crowdsec configuration including : @@ -232,8 +227,14 @@ func NewConfigBackupCmd() *cobra.Command { Example: `cscli config backup ./my-backup`, Args: cobra.ExactArgs(1), DisableAutoGenTag: true, - RunE: runConfigBackup, + RunE: func(_ *cobra.Command, args []string) error { + if err := cli.backup(args[0]); err != nil { + return fmt.Errorf("failed to backup config: %w", err) + } + + return nil + }, } - return cmdConfigBackup + return cmd } diff --git a/cmd/crowdsec-cli/config_feature_flags.go b/cmd/crowdsec-cli/config_feature_flags.go index fbba1f56736..d1dbe2b93b7 100644 --- a/cmd/crowdsec-cli/config_feature_flags.go +++ b/cmd/crowdsec-cli/config_feature_flags.go @@ -11,14 +11,7 @@ import ( "github.com/crowdsecurity/crowdsec/pkg/fflag" ) -func runConfigFeatureFlags(cmd *cobra.Command, args []string) error { - flags := cmd.Flags() - - showRetired, err := flags.GetBool("retired") - if err != nil { - return err - } - +func (cli *cliConfig) featureFlags(showRetired bool) error { green := color.New(color.FgGreen).SprintFunc() red := color.New(color.FgRed).SprintFunc() yellow := color.New(color.FgYellow).SprintFunc() @@ -121,18 +114,22 @@ func runConfigFeatureFlags(cmd *cobra.Command, args []string) error { return nil } -func NewConfigFeatureFlagsCmd() *cobra.Command { - cmdConfigFeatureFlags := &cobra.Command{ +func (cli *cliConfig) newFeatureFlagsCmd() *cobra.Command { + var showRetired bool + + cmd := &cobra.Command{ Use: "feature-flags", Short: "Displays feature flag status", Long: `Displays the supported feature flags and their current status.`, Args: cobra.ExactArgs(0), DisableAutoGenTag: true, - RunE: runConfigFeatureFlags, + RunE: func(_ *cobra.Command, _ []string) error { + return cli.featureFlags(showRetired) + }, } - flags := cmdConfigFeatureFlags.Flags() - flags.Bool("retired", false, "Show retired features") + flags := cmd.Flags() + flags.BoolVar(&showRetired, "retired", false, "Show retired features") - return cmdConfigFeatureFlags + return cmd } diff --git a/cmd/crowdsec-cli/config_restore.go b/cmd/crowdsec-cli/config_restore.go index 17d7494c60f..513f993ba80 100644 --- a/cmd/crowdsec-cli/config_restore.go +++ b/cmd/crowdsec-cli/config_restore.go @@ -3,25 +3,17 @@ package main import ( "encoding/json" "fmt" - "io" "os" "path/filepath" log "github.com/sirupsen/logrus" "github.com/spf13/cobra" - "gopkg.in/yaml.v2" "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/require" - "github.com/crowdsecurity/crowdsec/pkg/csconfig" "github.com/crowdsecurity/crowdsec/pkg/cwhub" ) -type OldAPICfg struct { - MachineID string `json:"machine_id"` - Password string `json:"password"` -} - -func restoreHub(dirPath string) error { +func (cli *cliConfig) restoreHub(dirPath string) error { hub, err := require.Hub(csConfig, require.RemoteHub(csConfig), nil) if err != nil { return err @@ -38,14 +30,14 @@ func restoreHub(dirPath string) error { file, err := os.ReadFile(upstreamListFN) if err != nil { - return fmt.Errorf("error while opening %s : %s", upstreamListFN, err) + return fmt.Errorf("error while opening %s: %w", upstreamListFN, err) } var upstreamList []string err = json.Unmarshal(file, &upstreamList) if err != nil { - return fmt.Errorf("error unmarshaling %s : %s", upstreamListFN, err) + return fmt.Errorf("error unmarshaling %s: %w", upstreamListFN, err) } for _, toinstall := range upstreamList { @@ -55,8 +47,7 @@ func restoreHub(dirPath string) error { continue } - err := item.Install(false, false) - if err != nil { + if err = item.Install(false, false); err != nil { log.Errorf("Error while installing %s : %s", toinstall, err) } } @@ -64,17 +55,17 @@ func restoreHub(dirPath string) error { /*restore the local and tainted items*/ files, err := os.ReadDir(itemDirectory) if err != nil { - return fmt.Errorf("failed enumerating files of %s : %s", itemDirectory, err) + return fmt.Errorf("failed enumerating files of %s: %w", itemDirectory, err) } for _, file := range files { - //this was the upstream data + // this was the upstream data if file.Name() == fmt.Sprintf("upstream-%s.json", itype) { continue } if itype == cwhub.PARSERS || itype == cwhub.POSTOVERFLOWS { - //we expect a stage here + // we expect a stage here if !file.IsDir() { continue } @@ -84,22 +75,23 @@ func restoreHub(dirPath string) error { log.Debugf("Found stage %s in %s, target directory : %s", stage, itype, stagedir) if err = os.MkdirAll(stagedir, os.ModePerm); err != nil { - return fmt.Errorf("error while creating stage directory %s : %s", stagedir, err) + return fmt.Errorf("error while creating stage directory %s: %w", stagedir, err) } // find items ifiles, err := os.ReadDir(itemDirectory + "/" + stage + "/") if err != nil { - return fmt.Errorf("failed enumerating files of %s : %s", itemDirectory+"/"+stage, err) + return fmt.Errorf("failed enumerating files of %s: %w", itemDirectory+"/"+stage, err) } - //finally copy item + + // finally copy item for _, tfile := range ifiles { log.Infof("Going to restore local/tainted [%s]", tfile.Name()) sourceFile := fmt.Sprintf("%s/%s/%s", itemDirectory, stage, tfile.Name()) destinationFile := fmt.Sprintf("%s%s", stagedir, tfile.Name()) if err = CopyFile(sourceFile, destinationFile); err != nil { - return fmt.Errorf("failed copy %s %s to %s : %s", itype, sourceFile, destinationFile, err) + return fmt.Errorf("failed copy %s %s to %s: %w", itype, sourceFile, destinationFile, err) } log.Infof("restored %s to %s", sourceFile, destinationFile) @@ -108,9 +100,11 @@ func restoreHub(dirPath string) error { log.Infof("Going to restore local/tainted [%s]", file.Name()) sourceFile := fmt.Sprintf("%s/%s", itemDirectory, file.Name()) destinationFile := fmt.Sprintf("%s/%s/%s", csConfig.ConfigPaths.ConfigDir, itype, file.Name()) + if err = CopyFile(sourceFile, destinationFile); err != nil { - return fmt.Errorf("failed copy %s %s to %s : %s", itype, sourceFile, destinationFile, err) + return fmt.Errorf("failed copy %s %s to %s: %w", itype, sourceFile, destinationFile, err) } + log.Infof("restored %s to %s", sourceFile, destinationFile) } } @@ -130,95 +124,60 @@ func restoreHub(dirPath string) error { - Tainted/local/out-of-date scenarios, parsers, postoverflows and collections - Acquisition files (acquis.yaml, acquis.d/*.yaml) */ -func restoreConfigFromDirectory(dirPath string, oldBackup bool) error { +func (cli *cliConfig) restore(dirPath string) error { var err error - if !oldBackup { - backupMain := fmt.Sprintf("%s/config.yaml", dirPath) - if _, err = os.Stat(backupMain); err == nil { - if csConfig.ConfigPaths != nil && csConfig.ConfigPaths.ConfigDir != "" { - if err = CopyFile(backupMain, fmt.Sprintf("%s/config.yaml", csConfig.ConfigPaths.ConfigDir)); err != nil { - return fmt.Errorf("failed copy %s to %s : %s", backupMain, csConfig.ConfigPaths.ConfigDir, err) - } + backupMain := fmt.Sprintf("%s/config.yaml", dirPath) + if _, err = os.Stat(backupMain); err == nil { + if csConfig.ConfigPaths != nil && csConfig.ConfigPaths.ConfigDir != "" { + if err = CopyFile(backupMain, fmt.Sprintf("%s/config.yaml", csConfig.ConfigPaths.ConfigDir)); err != nil { + return fmt.Errorf("failed copy %s to %s: %w", backupMain, csConfig.ConfigPaths.ConfigDir, err) } } + } - // Now we have config.yaml, we should regenerate config struct to have rights paths etc - ConfigFilePath = fmt.Sprintf("%s/config.yaml", csConfig.ConfigPaths.ConfigDir) - - log.Debug("Reloading configuration") + // Now we have config.yaml, we should regenerate config struct to have rights paths etc + ConfigFilePath = fmt.Sprintf("%s/config.yaml", csConfig.ConfigPaths.ConfigDir) - csConfig, _, err = loadConfigFor("config") - if err != nil { - return fmt.Errorf("failed to reload configuration: %s", err) - } + log.Debug("Reloading configuration") - backupCAPICreds := fmt.Sprintf("%s/online_api_credentials.yaml", dirPath) - if _, err = os.Stat(backupCAPICreds); err == nil { - if err = CopyFile(backupCAPICreds, csConfig.API.Server.OnlineClient.CredentialsFilePath); err != nil { - return fmt.Errorf("failed copy %s to %s : %s", backupCAPICreds, csConfig.API.Server.OnlineClient.CredentialsFilePath, err) - } - } + csConfig, _, err = loadConfigFor("config") + if err != nil { + return fmt.Errorf("failed to reload configuration: %w", err) + } - backupLAPICreds := fmt.Sprintf("%s/local_api_credentials.yaml", dirPath) - if _, err = os.Stat(backupLAPICreds); err == nil { - if err = CopyFile(backupLAPICreds, csConfig.API.Client.CredentialsFilePath); err != nil { - return fmt.Errorf("failed copy %s to %s : %s", backupLAPICreds, csConfig.API.Client.CredentialsFilePath, err) - } + backupCAPICreds := fmt.Sprintf("%s/online_api_credentials.yaml", dirPath) + if _, err = os.Stat(backupCAPICreds); err == nil { + if err = CopyFile(backupCAPICreds, csConfig.API.Server.OnlineClient.CredentialsFilePath); err != nil { + return fmt.Errorf("failed copy %s to %s: %w", backupCAPICreds, csConfig.API.Server.OnlineClient.CredentialsFilePath, err) } + } - backupProfiles := fmt.Sprintf("%s/profiles.yaml", dirPath) - if _, err = os.Stat(backupProfiles); err == nil { - if err = CopyFile(backupProfiles, csConfig.API.Server.ProfilesPath); err != nil { - return fmt.Errorf("failed copy %s to %s : %s", backupProfiles, csConfig.API.Server.ProfilesPath, err) - } + backupLAPICreds := fmt.Sprintf("%s/local_api_credentials.yaml", dirPath) + if _, err = os.Stat(backupLAPICreds); err == nil { + if err = CopyFile(backupLAPICreds, csConfig.API.Client.CredentialsFilePath); err != nil { + return fmt.Errorf("failed copy %s to %s: %w", backupLAPICreds, csConfig.API.Client.CredentialsFilePath, err) } - } else { - var oldAPICfg OldAPICfg - backupOldAPICfg := fmt.Sprintf("%s/api_creds.json", dirPath) - - jsonFile, err := os.Open(backupOldAPICfg) - if err != nil { - log.Warningf("failed to open %s : %s", backupOldAPICfg, err) - } else { - byteValue, _ := io.ReadAll(jsonFile) - err = json.Unmarshal(byteValue, &oldAPICfg) - if err != nil { - return fmt.Errorf("failed to load json file %s : %s", backupOldAPICfg, err) - } + } - apiCfg := csconfig.ApiCredentialsCfg{ - Login: oldAPICfg.MachineID, - Password: oldAPICfg.Password, - URL: CAPIBaseURL, - } - apiConfigDump, err := yaml.Marshal(apiCfg) - if err != nil { - return fmt.Errorf("unable to dump api credentials: %s", err) - } - apiConfigDumpFile := fmt.Sprintf("%s/online_api_credentials.yaml", csConfig.ConfigPaths.ConfigDir) - if csConfig.API.Server.OnlineClient != nil && csConfig.API.Server.OnlineClient.CredentialsFilePath != "" { - apiConfigDumpFile = csConfig.API.Server.OnlineClient.CredentialsFilePath - } - err = os.WriteFile(apiConfigDumpFile, apiConfigDump, 0o600) - if err != nil { - return fmt.Errorf("write api credentials in '%s' failed: %s", apiConfigDumpFile, err) - } - log.Infof("Saved API credentials to %s", apiConfigDumpFile) + backupProfiles := fmt.Sprintf("%s/profiles.yaml", dirPath) + if _, err = os.Stat(backupProfiles); err == nil { + if err = CopyFile(backupProfiles, csConfig.API.Server.ProfilesPath); err != nil { + return fmt.Errorf("failed copy %s to %s: %w", backupProfiles, csConfig.API.Server.ProfilesPath, err) } } backupSimulation := fmt.Sprintf("%s/simulation.yaml", dirPath) if _, err = os.Stat(backupSimulation); err == nil { if err = CopyFile(backupSimulation, csConfig.ConfigPaths.SimulationFilePath); err != nil { - return fmt.Errorf("failed copy %s to %s : %s", backupSimulation, csConfig.ConfigPaths.SimulationFilePath, err) + return fmt.Errorf("failed copy %s to %s: %w", backupSimulation, csConfig.ConfigPaths.SimulationFilePath, err) } } /*if there is a acquisition dir, restore its content*/ if csConfig.Crowdsec.AcquisitionDirPath != "" { if err = os.MkdirAll(csConfig.Crowdsec.AcquisitionDirPath, 0o700); err != nil { - return fmt.Errorf("error while creating %s : %s", csConfig.Crowdsec.AcquisitionDirPath, err) + return fmt.Errorf("error while creating %s: %w", csConfig.Crowdsec.AcquisitionDirPath, err) } } @@ -228,7 +187,7 @@ func restoreConfigFromDirectory(dirPath string, oldBackup bool) error { log.Debugf("restoring backup'ed %s", backupAcquisition) if err = CopyFile(backupAcquisition, csConfig.Crowdsec.AcquisitionFilePath); err != nil { - return fmt.Errorf("failed copy %s to %s : %s", backupAcquisition, csConfig.Crowdsec.AcquisitionFilePath, err) + return fmt.Errorf("failed copy %s to %s: %w", backupAcquisition, csConfig.Crowdsec.AcquisitionFilePath, err) } } @@ -244,7 +203,7 @@ func restoreConfigFromDirectory(dirPath string, oldBackup bool) error { log.Debugf("restoring %s to %s", acquisFile, targetFname) if err = CopyFile(acquisFile, targetFname); err != nil { - return fmt.Errorf("failed copy %s to %s : %s", acquisFile, targetFname, err) + return fmt.Errorf("failed copy %s to %s: %w", acquisFile, targetFname, err) } } } @@ -265,37 +224,22 @@ func restoreConfigFromDirectory(dirPath string, oldBackup bool) error { } if err = CopyFile(acquisFile, targetFname); err != nil { - return fmt.Errorf("failed copy %s to %s : %s", acquisFile, targetFname, err) + return fmt.Errorf("failed copy %s to %s: %w", acquisFile, targetFname, err) } log.Infof("Saved acquis %s to %s", acquisFile, targetFname) } } - if err = restoreHub(dirPath); err != nil { - return fmt.Errorf("failed to restore hub config : %s", err) + if err = cli.restoreHub(dirPath); err != nil { + return fmt.Errorf("failed to restore hub config: %w", err) } return nil } -func runConfigRestore(cmd *cobra.Command, args []string) error { - flags := cmd.Flags() - - oldBackup, err := flags.GetBool("old-backup") - if err != nil { - return err - } - - if err := restoreConfigFromDirectory(args[0], oldBackup); err != nil { - return fmt.Errorf("failed to restore config from %s: %w", args[0], err) - } - - return nil -} - -func NewConfigRestoreCmd() *cobra.Command { - cmdConfigRestore := &cobra.Command{ +func (cli *cliConfig) newRestoreCmd() *cobra.Command { + cmd := &cobra.Command{ Use: `restore "directory"`, Short: `Restore config in backup "directory"`, Long: `Restore the crowdsec configuration from specified backup "directory" including: @@ -308,11 +252,16 @@ func NewConfigRestoreCmd() *cobra.Command { - Backup of API credentials (local API and online API)`, Args: cobra.ExactArgs(1), DisableAutoGenTag: true, - RunE: runConfigRestore, - } + RunE: func(_ *cobra.Command, args []string) error { + dirPath := args[0] - flags := cmdConfigRestore.Flags() - flags.BoolP("old-backup", "", false, "To use when you are upgrading crowdsec v0.X to v1.X and you need to restore backup from v0.X") + if err := cli.restore(dirPath); err != nil { + return fmt.Errorf("failed to restore config from %s: %w", dirPath, err) + } + + return nil + }, + } - return cmdConfigRestore + return cmd } diff --git a/cmd/crowdsec-cli/config_show.go b/cmd/crowdsec-cli/config_show.go index bab911cc340..634ca77410e 100644 --- a/cmd/crowdsec-cli/config_show.go +++ b/cmd/crowdsec-cli/config_show.go @@ -182,31 +182,26 @@ Central API: {{- end }} ` -func runConfigShow(cmd *cobra.Command, args []string) error { - flags := cmd.Flags() +func (cli *cliConfig) show(key string) error { + cfg := cli.cfg() - if err := csConfig.LoadAPIClient(); err != nil { + if err := cfg.LoadAPIClient(); err != nil { log.Errorf("failed to load API client configuration: %s", err) // don't return, we can still show the configuration } - key, err := flags.GetString("key") - if err != nil { - return err - } - if key != "" { return showConfigKey(key) } - switch csConfig.Cscli.Output { + switch cfg.Cscli.Output { case "human": // The tests on .Enable look funny because the option has a true default which has // not been set yet (we don't really load the LAPI) and go templates don't dereference // pointers in boolean tests. Prefix notation is the cherry on top. funcs := template.FuncMap{ // can't use generics here - "ValueBool": func(b *bool) bool { return b!=nil && *b }, + "ValueBool": func(b *bool) bool { return b != nil && *b }, } tmp, err := template.New("config").Funcs(funcs).Parse(configShowTemplate) @@ -214,19 +209,19 @@ func runConfigShow(cmd *cobra.Command, args []string) error { return err } - err = tmp.Execute(os.Stdout, csConfig) + err = tmp.Execute(os.Stdout, cfg) if err != nil { return err } case "json": - data, err := json.MarshalIndent(csConfig, "", " ") + data, err := json.MarshalIndent(cfg, "", " ") if err != nil { return fmt.Errorf("failed to marshal configuration: %w", err) } fmt.Printf("%s\n", string(data)) case "raw": - data, err := yaml.Marshal(csConfig) + data, err := yaml.Marshal(cfg) if err != nil { return fmt.Errorf("failed to marshal configuration: %w", err) } @@ -237,18 +232,22 @@ func runConfigShow(cmd *cobra.Command, args []string) error { return nil } -func NewConfigShowCmd() *cobra.Command { - cmdConfigShow := &cobra.Command{ +func (cli *cliConfig) newShowCmd() *cobra.Command { + var key string + + cmd := &cobra.Command{ Use: "show", Short: "Displays current config", Long: `Displays the current cli configuration.`, Args: cobra.ExactArgs(0), DisableAutoGenTag: true, - RunE: runConfigShow, + RunE: func(_ *cobra.Command, _ []string) error { + return cli.show(key) + }, } - flags := cmdConfigShow.Flags() - flags.StringP("key", "", "", "Display only this value (Config.API.Server.ListenURI)") + flags := cmd.Flags() + flags.StringVarP(&key, "key", "", "", "Display only this value (Config.API.Server.ListenURI)") - return cmdConfigShow + return cmd } diff --git a/cmd/crowdsec-cli/config_showyaml.go b/cmd/crowdsec-cli/config_showyaml.go index 82bc67ffcb8..52daee6a65e 100644 --- a/cmd/crowdsec-cli/config_showyaml.go +++ b/cmd/crowdsec-cli/config_showyaml.go @@ -6,19 +6,21 @@ import ( "github.com/spf13/cobra" ) -func runConfigShowYAML(cmd *cobra.Command, args []string) error { +func (cli *cliConfig) showYAML() error { fmt.Println(mergedConfig) return nil } -func NewConfigShowYAMLCmd() *cobra.Command { - cmdConfigShow := &cobra.Command{ +func (cli *cliConfig) newShowYAMLCmd() *cobra.Command { + cmd := &cobra.Command{ Use: "show-yaml", Short: "Displays merged config.yaml + config.yaml.local", Args: cobra.ExactArgs(0), DisableAutoGenTag: true, - RunE: runConfigShowYAML, + RunE: func(_ *cobra.Command, _ []string) error { + return cli.showYAML() + }, } - return cmdConfigShow + return cmd } diff --git a/cmd/crowdsec-cli/main.go b/cmd/crowdsec-cli/main.go index 27ac17d554f..1f87390b636 100644 --- a/cmd/crowdsec-cli/main.go +++ b/cmd/crowdsec-cli/main.go @@ -231,7 +231,7 @@ It is meant to allow you to manage bans, parsers/scenarios/etc, api and generall cmd.AddCommand(NewCLIDoc().NewCommand(cmd)) cmd.AddCommand(NewCLIVersion().NewCommand()) - cmd.AddCommand(NewConfigCmd()) + cmd.AddCommand(NewCLIConfig(cli.cfg).NewCommand()) cmd.AddCommand(NewCLIHub(cli.cfg).NewCommand()) cmd.AddCommand(NewCLIMetrics(cli.cfg).NewCommand()) cmd.AddCommand(NewCLIDashboard(cli.cfg).NewCommand()) From 8da490f5930406180bef6f4b0b99e0b0dc86dff8 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Thu, 22 Feb 2024 11:42:33 +0100 Subject: [PATCH 034/581] refact pkg/apiclient (#2846) * extract resperr.go * extract method prepareRequest() * reset token inside mutex --- pkg/apiclient/auth_jwt.go | 37 +++++++++++++++++++++---------- pkg/apiclient/client.go | 36 ------------------------------ pkg/apiclient/resperr.go | 46 +++++++++++++++++++++++++++++++++++++++ pkg/apiserver/apic.go | 1 - 4 files changed, 72 insertions(+), 48 deletions(-) create mode 100644 pkg/apiclient/resperr.go diff --git a/pkg/apiclient/auth_jwt.go b/pkg/apiclient/auth_jwt.go index 71b0e273105..2ead10cf6da 100644 --- a/pkg/apiclient/auth_jwt.go +++ b/pkg/apiclient/auth_jwt.go @@ -130,20 +130,24 @@ func (t *JWTTransport) refreshJwtToken() error { return nil } -// RoundTrip implements the RoundTripper interface. -func (t *JWTTransport) RoundTrip(req *http.Request) (*http.Response, error) { - // In a few occasions several goroutines will execute refreshJwtToken concurrently which is useless and will cause overload on CAPI - // we use a mutex to avoid this - // We also bypass the refresh if we are requesting the login endpoint, as it does not require a token, and it leads to do 2 requests instead of one (refresh + actual login request) +func (t *JWTTransport) needsTokenRefresh() bool { + return t.Token == "" || t.Expiration.Add(-time.Minute).Before(time.Now().UTC()) +} + +// prepareRequest returns a copy of the request with the necessary authentication headers. +func (t *JWTTransport) prepareRequest(req *http.Request) (*http.Request, error) { + // In a few occasions several goroutines will execute refreshJwtToken concurrently which is useless + // and will cause overload on CAPI. We use a mutex to avoid this. t.refreshTokenMutex.Lock() - if req.URL.Path != "/"+t.VersionPrefix+"/watchers/login" && (t.Token == "" || t.Expiration.Add(-time.Minute).Before(time.Now().UTC())) { - if err := t.refreshJwtToken(); err != nil { - t.refreshTokenMutex.Unlock() + defer t.refreshTokenMutex.Unlock() + // We bypass the refresh if we are requesting the login endpoint, as it does not require a token, + // and it leads to do 2 requests instead of one (refresh + actual login request). + if req.URL.Path != "/"+t.VersionPrefix+"/watchers/login" && t.needsTokenRefresh() { + if err := t.refreshJwtToken(); err != nil { return nil, err } } - t.refreshTokenMutex.Unlock() if t.UserAgent != "" { req.Header.Add("User-Agent", t.UserAgent) @@ -151,6 +155,16 @@ func (t *JWTTransport) RoundTrip(req *http.Request) (*http.Response, error) { req.Header.Add("Authorization", fmt.Sprintf("Bearer %s", t.Token)) + return req, nil +} + +// RoundTrip implements the RoundTripper interface. +func (t *JWTTransport) RoundTrip(req *http.Request) (*http.Response, error) { + req, err := t.prepareRequest(req) + if err != nil { + return nil, err + } + if log.GetLevel() >= log.TraceLevel { //requestToDump := cloneRequest(req) dump, _ := httputil.DumpRequest(req, true) @@ -166,7 +180,7 @@ func (t *JWTTransport) RoundTrip(req *http.Request) (*http.Response, error) { if err != nil { // we had an error (network error for example, or 401 because token is refused), reset the token? - t.Token = "" + t.ResetToken() return resp, fmt.Errorf("performing jwt auth: %w", err) } @@ -189,7 +203,8 @@ func (t *JWTTransport) ResetToken() { t.refreshTokenMutex.Unlock() } -// transport() returns a round tripper that retries once when the status is unauthorized, and 5 times when the infrastructure is overloaded. +// transport() returns a round tripper that retries once when the status is unauthorized, +// and 5 times when the infrastructure is overloaded. func (t *JWTTransport) transport() http.RoundTripper { transport := t.Transport if transport == nil { diff --git a/pkg/apiclient/client.go b/pkg/apiclient/client.go index b183a8c7909..b487f68a698 100644 --- a/pkg/apiclient/client.go +++ b/pkg/apiclient/client.go @@ -4,9 +4,7 @@ import ( "context" "crypto/tls" "crypto/x509" - "encoding/json" "fmt" - "io" "net/http" "net/url" @@ -167,44 +165,10 @@ type Response struct { //... } -type ErrorResponse struct { - models.ErrorResponse -} - -func (e *ErrorResponse) Error() string { - err := fmt.Sprintf("API error: %s", *e.Message) - if len(e.Errors) > 0 { - err += fmt.Sprintf(" (%s)", e.Errors) - } - - return err -} - func newResponse(r *http.Response) *Response { return &Response{Response: r} } -func CheckResponse(r *http.Response) error { - if c := r.StatusCode; 200 <= c && c <= 299 || c == 304 { - return nil - } - - errorResponse := &ErrorResponse{} - - data, err := io.ReadAll(r.Body) - if err == nil && len(data)>0 { - err := json.Unmarshal(data, errorResponse) - if err != nil { - return fmt.Errorf("http code %d, invalid body: %w", r.StatusCode, err) - } - } else { - errorResponse.Message = new(string) - *errorResponse.Message = fmt.Sprintf("http code %d, no error message", r.StatusCode) - } - - return errorResponse -} - type ListOpts struct { //Page int //PerPage int diff --git a/pkg/apiclient/resperr.go b/pkg/apiclient/resperr.go new file mode 100644 index 00000000000..ff954a73609 --- /dev/null +++ b/pkg/apiclient/resperr.go @@ -0,0 +1,46 @@ +package apiclient + +import ( + "encoding/json" + "fmt" + "io" + "net/http" + + "github.com/crowdsecurity/go-cs-lib/ptr" + + "github.com/crowdsecurity/crowdsec/pkg/models" +) + +type ErrorResponse struct { + models.ErrorResponse +} + +func (e *ErrorResponse) Error() string { + err := fmt.Sprintf("API error: %s", *e.Message) + if len(e.Errors) > 0 { + err += fmt.Sprintf(" (%s)", e.Errors) + } + + return err +} + +// CheckResponse verifies the API response and builds an appropriate Go error if necessary. +func CheckResponse(r *http.Response) error { + if c := r.StatusCode; 200 <= c && c <= 299 || c == 304 { + return nil + } + + ret := &ErrorResponse{} + + data, err := io.ReadAll(r.Body) + if err != nil || len(data) == 0 { + ret.Message = ptr.Of(fmt.Sprintf("http code %d, no error message", r.StatusCode)) + return ret + } + + if err := json.Unmarshal(data, ret); err != nil { + return fmt.Errorf("http code %d, invalid body: %w", r.StatusCode, err) + } + + return ret +} diff --git a/pkg/apiserver/apic.go b/pkg/apiserver/apic.go index 2fdb01144a0..f57ae685e45 100644 --- a/pkg/apiserver/apic.go +++ b/pkg/apiserver/apic.go @@ -539,7 +539,6 @@ func createAlertForDecision(decision *models.Decision) *models.Alert { scenario = *decision.Scenario scope = types.ListOrigin default: - // XXX: this or nil? scenario = "" scope = "" From 0df8f54fbbd08ab857e153229a43cf9e3c3f258e Mon Sep 17 00:00:00 2001 From: Laurence Jones Date: Thu, 22 Feb 2024 11:18:29 +0000 Subject: [PATCH 035/581] Add unix socket option to http plugin, we have to use this in conjunction with URL parameter as we dont know which path the user wants so if they would like to communicate over unix socket they need to use both, however, the hostname can be whatever they want. We could be a little smarter and actually parse the url, however, increasing code when a user can just define it correctly make no sense (#2764) --- cmd/notification-http/main.go | 42 +++++++++++++++++++++-------------- 1 file changed, 25 insertions(+), 17 deletions(-) diff --git a/cmd/notification-http/main.go b/cmd/notification-http/main.go index 340d462c175..382f30fea53 100644 --- a/cmd/notification-http/main.go +++ b/cmd/notification-http/main.go @@ -7,8 +7,10 @@ import ( "crypto/x509" "fmt" "io" + "net" "net/http" "os" + "strings" "github.com/crowdsecurity/crowdsec/pkg/protobufs" "github.com/hashicorp/go-hclog" @@ -19,6 +21,7 @@ import ( type PluginConfig struct { Name string `yaml:"name"` URL string `yaml:"url"` + UnixSocket string `yaml:"unix_socket"` Headers map[string]string `yaml:"headers"` SkipTLSVerification bool `yaml:"skip_tls_verification"` Method string `yaml:"method"` @@ -66,36 +69,40 @@ func getCertPool(caPath string) (*x509.CertPool, error) { return cp, nil } -func getTLSClient(tlsVerify bool, caPath, certPath, keyPath string) (*http.Client, error) { - var client *http.Client - - caCertPool, err := getCertPool(caPath) +func getTLSClient(c *PluginConfig) error { + caCertPool, err := getCertPool(c.CAPath) if err != nil { - return nil, err + return err } tlsConfig := &tls.Config{ RootCAs: caCertPool, - InsecureSkipVerify: tlsVerify, + InsecureSkipVerify: c.SkipTLSVerification, } - if certPath != "" && keyPath != "" { - logger.Info(fmt.Sprintf("Using client certificate '%s' and key '%s'", certPath, keyPath)) + if c.CertPath != "" && c.KeyPath != "" { + logger.Info(fmt.Sprintf("Using client certificate '%s' and key '%s'", c.CertPath, c.KeyPath)) - cert, err := tls.LoadX509KeyPair(certPath, keyPath) + cert, err := tls.LoadX509KeyPair(c.CertPath, c.KeyPath) if err != nil { - return nil, fmt.Errorf("unable to load client certificate '%s' and key '%s': %w", certPath, keyPath, err) + return fmt.Errorf("unable to load client certificate '%s' and key '%s': %w", c.CertPath, c.KeyPath, err) } tlsConfig.Certificates = []tls.Certificate{cert} } - - client = &http.Client{ - Transport: &http.Transport{ - TLSClientConfig: tlsConfig, - }, + transport := &http.Transport{ + TLSClientConfig: tlsConfig, + } + if c.UnixSocket != "" { + logger.Info(fmt.Sprintf("Using socket '%s'", c.UnixSocket)) + transport.DialContext = func(_ context.Context, _, _ string) (net.Conn, error) { + return net.Dial("unix", strings.TrimSuffix(c.UnixSocket, "/")) + } + } + c.Client = &http.Client{ + Transport: transport, } - return client, err + return nil } func (s *HTTPPlugin) Notify(ctx context.Context, notification *protobufs.Notification) (*protobufs.Empty, error) { @@ -135,6 +142,7 @@ func (s *HTTPPlugin) Notify(ctx context.Context, notification *protobufs.Notific if resp.StatusCode < 200 || resp.StatusCode >= 300 { logger.Warn(fmt.Sprintf("HTTP server returned non 200 status code: %d", resp.StatusCode)) + logger.Debug(fmt.Sprintf("HTTP server returned body: %s", string(respData))) return &protobufs.Empty{}, nil } @@ -147,7 +155,7 @@ func (s *HTTPPlugin) Configure(ctx context.Context, config *protobufs.Config) (* if err != nil { return nil, err } - d.Client, err = getTLSClient(d.SkipTLSVerification, d.CAPath, d.CertPath, d.KeyPath) + err = getTLSClient(&d) if err != nil { return nil, err } From e34af358d7b96df49634f28696e9c1b1f01e097c Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Fri, 23 Feb 2024 10:37:04 +0100 Subject: [PATCH 036/581] refact cscli (globals) (#2854) * cscli capi: avoid globals, extract methods * cscli config restore: avoid global * cscli hubtest: avoid global * lint (whitespace, wrapped errors) --- cmd/crowdsec-cli/bouncers.go | 24 +-- cmd/crowdsec-cli/capi.go | 245 ++++++++++++++++------------- cmd/crowdsec-cli/config_restore.go | 54 ++++--- cmd/crowdsec-cli/hubtest.go | 170 +++++++++++--------- cmd/crowdsec-cli/main.go | 4 +- 5 files changed, 281 insertions(+), 216 deletions(-) diff --git a/cmd/crowdsec-cli/bouncers.go b/cmd/crowdsec-cli/bouncers.go index 717e9aef5fe..35f4320c520 100644 --- a/cmd/crowdsec-cli/bouncers.go +++ b/cmd/crowdsec-cli/bouncers.go @@ -3,6 +3,7 @@ package main import ( "encoding/csv" "encoding/json" + "errors" "fmt" "os" "slices" @@ -58,13 +59,16 @@ Note: This command requires database direct access, so is intended to be run on DisableAutoGenTag: true, PersistentPreRunE: func(_ *cobra.Command, _ []string) error { var err error - if err = require.LAPI(cli.cfg()); err != nil { + + cfg := cli.cfg() + + if err = require.LAPI(cfg); err != nil { return err } - cli.db, err = database.NewClient(cli.cfg().DbConfig) + cli.db, err = database.NewClient(cfg.DbConfig) if err != nil { - return fmt.Errorf("can't connect to the database: %s", err) + return fmt.Errorf("can't connect to the database: %w", err) } return nil @@ -84,7 +88,7 @@ func (cli *cliBouncers) list() error { bouncers, err := cli.db.ListBouncers() if err != nil { - return fmt.Errorf("unable to list bouncers: %s", err) + return fmt.Errorf("unable to list bouncers: %w", err) } switch cli.cfg().Cscli.Output { @@ -146,13 +150,13 @@ func (cli *cliBouncers) add(bouncerName string, key string) error { if key == "" { key, err = middlewares.GenerateAPIKey(keyLength) if err != nil { - return fmt.Errorf("unable to generate api key: %s", err) + return fmt.Errorf("unable to generate api key: %w", err) } } _, err = cli.db.CreateBouncer(bouncerName, "", middlewares.HashSHA512(key), types.ApiKeyAuthType) if err != nil { - return fmt.Errorf("unable to create bouncer: %s", err) + return fmt.Errorf("unable to create bouncer: %w", err) } switch cli.cfg().Cscli.Output { @@ -165,7 +169,7 @@ func (cli *cliBouncers) add(bouncerName string, key string) error { case "json": j, err := json.Marshal(key) if err != nil { - return fmt.Errorf("unable to marshal api key") + return errors.New("unable to marshal api key") } fmt.Print(string(j)) @@ -191,7 +195,7 @@ cscli bouncers add MyBouncerName --key `, flags := cmd.Flags() flags.StringP("length", "l", "", "length of the api key") - flags.MarkDeprecated("length", "use --key instead") + _ = flags.MarkDeprecated("length", "use --key instead") flags.StringVarP(&key, "key", "k", "", "api key for the bouncer") return cmd @@ -218,7 +222,7 @@ func (cli *cliBouncers) delete(bouncers []string) error { for _, bouncerID := range bouncers { err := cli.db.DeleteBouncer(bouncerID) if err != nil { - return fmt.Errorf("unable to delete bouncer '%s': %s", bouncerID, err) + return fmt.Errorf("unable to delete bouncer '%s': %w", bouncerID, err) } log.Infof("bouncer '%s' deleted successfully", bouncerID) @@ -280,7 +284,7 @@ func (cli *cliBouncers) prune(duration time.Duration, force bool) error { deleted, err := cli.db.BulkDeleteBouncers(bouncers) if err != nil { - return fmt.Errorf("unable to prune bouncers: %s", err) + return fmt.Errorf("unable to prune bouncers: %w", err) } fmt.Fprintf(os.Stderr, "Successfully deleted %d bouncers\n", deleted) diff --git a/cmd/crowdsec-cli/capi.go b/cmd/crowdsec-cli/capi.go index 358d91ee215..e56a8a74707 100644 --- a/cmd/crowdsec-cli/capi.go +++ b/cmd/crowdsec-cli/capi.go @@ -2,6 +2,7 @@ package main import ( "context" + "errors" "fmt" "net/url" "os" @@ -26,24 +27,29 @@ const ( CAPIURLPrefix = "v3" ) -type cliCapi struct{} +type cliCapi struct { + cfg configGetter +} -func NewCLICapi() *cliCapi { - return &cliCapi{} +func NewCLICapi(cfg configGetter) *cliCapi { + return &cliCapi{ + cfg: cfg, + } } -func (cli cliCapi) NewCommand() *cobra.Command { - var cmd = &cobra.Command{ +func (cli *cliCapi) NewCommand() *cobra.Command { + cmd := &cobra.Command{ Use: "capi [action]", Short: "Manage interaction with Central API (CAPI)", Args: cobra.MinimumNArgs(1), DisableAutoGenTag: true, PersistentPreRunE: func(_ *cobra.Command, _ []string) error { - if err := require.LAPI(csConfig); err != nil { + cfg := cli.cfg() + if err := require.LAPI(cfg); err != nil { return err } - if err := require.CAPI(csConfig); err != nil { + if err := require.CAPI(cfg); err != nil { return err } @@ -51,78 +57,92 @@ func (cli cliCapi) NewCommand() *cobra.Command { }, } - cmd.AddCommand(cli.NewRegisterCmd()) - cmd.AddCommand(cli.NewStatusCmd()) + cmd.AddCommand(cli.newRegisterCmd()) + cmd.AddCommand(cli.newStatusCmd()) return cmd } -func (cli cliCapi) NewRegisterCmd() *cobra.Command { +func (cli *cliCapi) register(capiUserPrefix string, outputFile string) error { + cfg := cli.cfg() + + capiUser, err := generateID(capiUserPrefix) + if err != nil { + return fmt.Errorf("unable to generate machine id: %w", err) + } + + password := strfmt.Password(generatePassword(passwordLength)) + + apiurl, err := url.Parse(types.CAPIBaseURL) + if err != nil { + return fmt.Errorf("unable to parse api url %s: %w", types.CAPIBaseURL, err) + } + + _, err = apiclient.RegisterClient(&apiclient.Config{ + MachineID: capiUser, + Password: password, + UserAgent: fmt.Sprintf("crowdsec/%s", version.String()), + URL: apiurl, + VersionPrefix: CAPIURLPrefix, + }, nil) + + if err != nil { + return fmt.Errorf("api client register ('%s'): %w", types.CAPIBaseURL, err) + } + + log.Infof("Successfully registered to Central API (CAPI)") + + var dumpFile string + + switch { + case outputFile != "": + dumpFile = outputFile + case cfg.API.Server.OnlineClient.CredentialsFilePath != "": + dumpFile = cfg.API.Server.OnlineClient.CredentialsFilePath + default: + dumpFile = "" + } + + apiCfg := csconfig.ApiCredentialsCfg{ + Login: capiUser, + Password: password.String(), + URL: types.CAPIBaseURL, + } + + apiConfigDump, err := yaml.Marshal(apiCfg) + if err != nil { + return fmt.Errorf("unable to marshal api credentials: %w", err) + } + + if dumpFile != "" { + err = os.WriteFile(dumpFile, apiConfigDump, 0o600) + if err != nil { + return fmt.Errorf("write api credentials in '%s' failed: %w", dumpFile, err) + } + + log.Infof("Central API credentials written to '%s'", dumpFile) + } else { + fmt.Println(string(apiConfigDump)) + } + + log.Warning(ReloadMessage()) + + return nil +} + +func (cli *cliCapi) newRegisterCmd() *cobra.Command { var ( capiUserPrefix string - outputFile string + outputFile string ) - var cmd = &cobra.Command{ + cmd := &cobra.Command{ Use: "register", Short: "Register to Central API (CAPI)", Args: cobra.MinimumNArgs(0), DisableAutoGenTag: true, RunE: func(_ *cobra.Command, _ []string) error { - var err error - capiUser, err := generateID(capiUserPrefix) - if err != nil { - return fmt.Errorf("unable to generate machine id: %s", err) - } - password := strfmt.Password(generatePassword(passwordLength)) - apiurl, err := url.Parse(types.CAPIBaseURL) - if err != nil { - return fmt.Errorf("unable to parse api url %s: %w", types.CAPIBaseURL, err) - } - _, err = apiclient.RegisterClient(&apiclient.Config{ - MachineID: capiUser, - Password: password, - UserAgent: fmt.Sprintf("crowdsec/%s", version.String()), - URL: apiurl, - VersionPrefix: CAPIURLPrefix, - }, nil) - - if err != nil { - return fmt.Errorf("api client register ('%s'): %w", types.CAPIBaseURL, err) - } - log.Printf("Successfully registered to Central API (CAPI)") - - var dumpFile string - - if outputFile != "" { - dumpFile = outputFile - } else if csConfig.API.Server.OnlineClient.CredentialsFilePath != "" { - dumpFile = csConfig.API.Server.OnlineClient.CredentialsFilePath - } else { - dumpFile = "" - } - apiCfg := csconfig.ApiCredentialsCfg{ - Login: capiUser, - Password: password.String(), - URL: types.CAPIBaseURL, - } - apiConfigDump, err := yaml.Marshal(apiCfg) - if err != nil { - return fmt.Errorf("unable to marshal api credentials: %w", err) - } - if dumpFile != "" { - err = os.WriteFile(dumpFile, apiConfigDump, 0o600) - if err != nil { - return fmt.Errorf("write api credentials in '%s' failed: %w", dumpFile, err) - } - log.Printf("Central API credentials written to '%s'", dumpFile) - } else { - fmt.Println(string(apiConfigDump)) - } - - log.Warning(ReloadMessage()) - - return nil + return cli.register(capiUserPrefix, outputFile) }, } @@ -136,59 +156,66 @@ func (cli cliCapi) NewRegisterCmd() *cobra.Command { return cmd } -func (cli cliCapi) NewStatusCmd() *cobra.Command { - cmd := &cobra.Command{ - Use: "status", - Short: "Check status with the Central API (CAPI)", - Args: cobra.MinimumNArgs(0), - DisableAutoGenTag: true, - RunE: func(_ *cobra.Command, _ []string) error { - if err := require.CAPIRegistered(csConfig); err != nil { - return err - } +func (cli *cliCapi) status() error { + cfg := cli.cfg() - password := strfmt.Password(csConfig.API.Server.OnlineClient.Credentials.Password) + if err := require.CAPIRegistered(cfg); err != nil { + return err + } - apiurl, err := url.Parse(csConfig.API.Server.OnlineClient.Credentials.URL) - if err != nil { - return fmt.Errorf("parsing api url ('%s'): %w", csConfig.API.Server.OnlineClient.Credentials.URL, err) - } + password := strfmt.Password(cfg.API.Server.OnlineClient.Credentials.Password) - hub, err := require.Hub(csConfig, nil, nil) - if err != nil { - return err - } + apiurl, err := url.Parse(cfg.API.Server.OnlineClient.Credentials.URL) + if err != nil { + return fmt.Errorf("parsing api url ('%s'): %w", cfg.API.Server.OnlineClient.Credentials.URL, err) + } - scenarios, err := hub.GetInstalledItemNames(cwhub.SCENARIOS) - if err != nil { - return fmt.Errorf("failed to get scenarios: %w", err) - } + hub, err := require.Hub(cfg, nil, nil) + if err != nil { + return err + } - if len(scenarios) == 0 { - return fmt.Errorf("no scenarios installed, abort") - } + scenarios, err := hub.GetInstalledItemNames(cwhub.SCENARIOS) + if err != nil { + return fmt.Errorf("failed to get scenarios: %w", err) + } - Client, err = apiclient.NewDefaultClient(apiurl, CAPIURLPrefix, fmt.Sprintf("crowdsec/%s", version.String()), nil) - if err != nil { - return fmt.Errorf("init default client: %w", err) - } + if len(scenarios) == 0 { + return errors.New("no scenarios installed, abort") + } - t := models.WatcherAuthRequest{ - MachineID: &csConfig.API.Server.OnlineClient.Credentials.Login, - Password: &password, - Scenarios: scenarios, - } + Client, err = apiclient.NewDefaultClient(apiurl, CAPIURLPrefix, fmt.Sprintf("crowdsec/%s", version.String()), nil) + if err != nil { + return fmt.Errorf("init default client: %w", err) + } - log.Infof("Loaded credentials from %s", csConfig.API.Server.OnlineClient.CredentialsFilePath) - log.Infof("Trying to authenticate with username %s on %s", csConfig.API.Server.OnlineClient.Credentials.Login, apiurl) + t := models.WatcherAuthRequest{ + MachineID: &cfg.API.Server.OnlineClient.Credentials.Login, + Password: &password, + Scenarios: scenarios, + } - _, _, err = Client.Auth.AuthenticateWatcher(context.Background(), t) - if err != nil { - return fmt.Errorf("failed to authenticate to Central API (CAPI): %w", err) - } - log.Infof("You can successfully interact with Central API (CAPI)") + log.Infof("Loaded credentials from %s", cfg.API.Server.OnlineClient.CredentialsFilePath) + log.Infof("Trying to authenticate with username %s on %s", cfg.API.Server.OnlineClient.Credentials.Login, apiurl) - return nil + _, _, err = Client.Auth.AuthenticateWatcher(context.Background(), t) + if err != nil { + return fmt.Errorf("failed to authenticate to Central API (CAPI): %w", err) + } + + log.Info("You can successfully interact with Central API (CAPI)") + + return nil +} + +func (cli *cliCapi) newStatusCmd() *cobra.Command { + cmd := &cobra.Command{ + Use: "status", + Short: "Check status with the Central API (CAPI)", + Args: cobra.MinimumNArgs(0), + DisableAutoGenTag: true, + RunE: func(_ *cobra.Command, _ []string) error { + return cli.status() }, } diff --git a/cmd/crowdsec-cli/config_restore.go b/cmd/crowdsec-cli/config_restore.go index 513f993ba80..ee7179b73c5 100644 --- a/cmd/crowdsec-cli/config_restore.go +++ b/cmd/crowdsec-cli/config_restore.go @@ -14,7 +14,9 @@ import ( ) func (cli *cliConfig) restoreHub(dirPath string) error { - hub, err := require.Hub(csConfig, require.RemoteHub(csConfig), nil) + cfg := cli.cfg() + + hub, err := require.Hub(cfg, require.RemoteHub(cfg), nil) if err != nil { return err } @@ -71,7 +73,7 @@ func (cli *cliConfig) restoreHub(dirPath string) error { } stage := file.Name() - stagedir := fmt.Sprintf("%s/%s/%s/", csConfig.ConfigPaths.ConfigDir, itype, stage) + stagedir := fmt.Sprintf("%s/%s/%s/", cfg.ConfigPaths.ConfigDir, itype, stage) log.Debugf("Found stage %s in %s, target directory : %s", stage, itype, stagedir) if err = os.MkdirAll(stagedir, os.ModePerm); err != nil { @@ -99,7 +101,7 @@ func (cli *cliConfig) restoreHub(dirPath string) error { } else { log.Infof("Going to restore local/tainted [%s]", file.Name()) sourceFile := fmt.Sprintf("%s/%s", itemDirectory, file.Name()) - destinationFile := fmt.Sprintf("%s/%s/%s", csConfig.ConfigPaths.ConfigDir, itype, file.Name()) + destinationFile := fmt.Sprintf("%s/%s/%s", cfg.ConfigPaths.ConfigDir, itype, file.Name()) if err = CopyFile(sourceFile, destinationFile); err != nil { return fmt.Errorf("failed copy %s %s to %s: %w", itype, sourceFile, destinationFile, err) @@ -127,17 +129,19 @@ func (cli *cliConfig) restoreHub(dirPath string) error { func (cli *cliConfig) restore(dirPath string) error { var err error + cfg := cli.cfg() + backupMain := fmt.Sprintf("%s/config.yaml", dirPath) if _, err = os.Stat(backupMain); err == nil { - if csConfig.ConfigPaths != nil && csConfig.ConfigPaths.ConfigDir != "" { - if err = CopyFile(backupMain, fmt.Sprintf("%s/config.yaml", csConfig.ConfigPaths.ConfigDir)); err != nil { - return fmt.Errorf("failed copy %s to %s: %w", backupMain, csConfig.ConfigPaths.ConfigDir, err) + if cfg.ConfigPaths != nil && cfg.ConfigPaths.ConfigDir != "" { + if err = CopyFile(backupMain, fmt.Sprintf("%s/config.yaml", cfg.ConfigPaths.ConfigDir)); err != nil { + return fmt.Errorf("failed copy %s to %s: %w", backupMain, cfg.ConfigPaths.ConfigDir, err) } } } // Now we have config.yaml, we should regenerate config struct to have rights paths etc - ConfigFilePath = fmt.Sprintf("%s/config.yaml", csConfig.ConfigPaths.ConfigDir) + ConfigFilePath = fmt.Sprintf("%s/config.yaml", cfg.ConfigPaths.ConfigDir) log.Debug("Reloading configuration") @@ -146,38 +150,40 @@ func (cli *cliConfig) restore(dirPath string) error { return fmt.Errorf("failed to reload configuration: %w", err) } + cfg = cli.cfg() + backupCAPICreds := fmt.Sprintf("%s/online_api_credentials.yaml", dirPath) if _, err = os.Stat(backupCAPICreds); err == nil { - if err = CopyFile(backupCAPICreds, csConfig.API.Server.OnlineClient.CredentialsFilePath); err != nil { - return fmt.Errorf("failed copy %s to %s: %w", backupCAPICreds, csConfig.API.Server.OnlineClient.CredentialsFilePath, err) + if err = CopyFile(backupCAPICreds, cfg.API.Server.OnlineClient.CredentialsFilePath); err != nil { + return fmt.Errorf("failed copy %s to %s: %w", backupCAPICreds, cfg.API.Server.OnlineClient.CredentialsFilePath, err) } } backupLAPICreds := fmt.Sprintf("%s/local_api_credentials.yaml", dirPath) if _, err = os.Stat(backupLAPICreds); err == nil { - if err = CopyFile(backupLAPICreds, csConfig.API.Client.CredentialsFilePath); err != nil { - return fmt.Errorf("failed copy %s to %s: %w", backupLAPICreds, csConfig.API.Client.CredentialsFilePath, err) + if err = CopyFile(backupLAPICreds, cfg.API.Client.CredentialsFilePath); err != nil { + return fmt.Errorf("failed copy %s to %s: %w", backupLAPICreds, cfg.API.Client.CredentialsFilePath, err) } } backupProfiles := fmt.Sprintf("%s/profiles.yaml", dirPath) if _, err = os.Stat(backupProfiles); err == nil { - if err = CopyFile(backupProfiles, csConfig.API.Server.ProfilesPath); err != nil { - return fmt.Errorf("failed copy %s to %s: %w", backupProfiles, csConfig.API.Server.ProfilesPath, err) + if err = CopyFile(backupProfiles, cfg.API.Server.ProfilesPath); err != nil { + return fmt.Errorf("failed copy %s to %s: %w", backupProfiles, cfg.API.Server.ProfilesPath, err) } } backupSimulation := fmt.Sprintf("%s/simulation.yaml", dirPath) if _, err = os.Stat(backupSimulation); err == nil { - if err = CopyFile(backupSimulation, csConfig.ConfigPaths.SimulationFilePath); err != nil { - return fmt.Errorf("failed copy %s to %s: %w", backupSimulation, csConfig.ConfigPaths.SimulationFilePath, err) + if err = CopyFile(backupSimulation, cfg.ConfigPaths.SimulationFilePath); err != nil { + return fmt.Errorf("failed copy %s to %s: %w", backupSimulation, cfg.ConfigPaths.SimulationFilePath, err) } } /*if there is a acquisition dir, restore its content*/ - if csConfig.Crowdsec.AcquisitionDirPath != "" { - if err = os.MkdirAll(csConfig.Crowdsec.AcquisitionDirPath, 0o700); err != nil { - return fmt.Errorf("error while creating %s: %w", csConfig.Crowdsec.AcquisitionDirPath, err) + if cfg.Crowdsec.AcquisitionDirPath != "" { + if err = os.MkdirAll(cfg.Crowdsec.AcquisitionDirPath, 0o700); err != nil { + return fmt.Errorf("error while creating %s: %w", cfg.Crowdsec.AcquisitionDirPath, err) } } @@ -186,8 +192,8 @@ func (cli *cliConfig) restore(dirPath string) error { if _, err = os.Stat(backupAcquisition); err == nil { log.Debugf("restoring backup'ed %s", backupAcquisition) - if err = CopyFile(backupAcquisition, csConfig.Crowdsec.AcquisitionFilePath); err != nil { - return fmt.Errorf("failed copy %s to %s: %w", backupAcquisition, csConfig.Crowdsec.AcquisitionFilePath, err) + if err = CopyFile(backupAcquisition, cfg.Crowdsec.AcquisitionFilePath); err != nil { + return fmt.Errorf("failed copy %s to %s: %w", backupAcquisition, cfg.Crowdsec.AcquisitionFilePath, err) } } @@ -195,7 +201,7 @@ func (cli *cliConfig) restore(dirPath string) error { acquisBackupDir := filepath.Join(dirPath, "acquis", "*.yaml") if acquisFiles, err := filepath.Glob(acquisBackupDir); err == nil { for _, acquisFile := range acquisFiles { - targetFname, err := filepath.Abs(csConfig.Crowdsec.AcquisitionDirPath + "/" + filepath.Base(acquisFile)) + targetFname, err := filepath.Abs(cfg.Crowdsec.AcquisitionDirPath + "/" + filepath.Base(acquisFile)) if err != nil { return fmt.Errorf("while saving %s to %s: %w", acquisFile, targetFname, err) } @@ -208,12 +214,12 @@ func (cli *cliConfig) restore(dirPath string) error { } } - if csConfig.Crowdsec != nil && len(csConfig.Crowdsec.AcquisitionFiles) > 0 { - for _, acquisFile := range csConfig.Crowdsec.AcquisitionFiles { + if cfg.Crowdsec != nil && len(cfg.Crowdsec.AcquisitionFiles) > 0 { + for _, acquisFile := range cfg.Crowdsec.AcquisitionFiles { log.Infof("backup filepath from dir -> %s", acquisFile) // if it was the default one, it has already been backed up - if csConfig.Crowdsec.AcquisitionFilePath == acquisFile { + if cfg.Crowdsec.AcquisitionFilePath == acquisFile { log.Infof("skip this one") continue } diff --git a/cmd/crowdsec-cli/hubtest.go b/cmd/crowdsec-cli/hubtest.go index 1860540e7dc..8f5ab087370 100644 --- a/cmd/crowdsec-cli/hubtest.go +++ b/cmd/crowdsec-cli/hubtest.go @@ -2,6 +2,7 @@ package main import ( "encoding/json" + "errors" "fmt" "math" "os" @@ -20,21 +21,29 @@ import ( "github.com/crowdsecurity/crowdsec/pkg/hubtest" ) -var HubTest hubtest.HubTest -var HubAppsecTests hubtest.HubTest -var hubPtr *hubtest.HubTest -var isAppsecTest bool +var ( + HubTest hubtest.HubTest + HubAppsecTests hubtest.HubTest + hubPtr *hubtest.HubTest + isAppsecTest bool +) -type cliHubTest struct{} +type cliHubTest struct { + cfg configGetter +} -func NewCLIHubTest() *cliHubTest { - return &cliHubTest{} +func NewCLIHubTest(cfg configGetter) *cliHubTest { + return &cliHubTest{ + cfg: cfg, + } } -func (cli cliHubTest) NewCommand() *cobra.Command { - var hubPath string - var crowdsecPath string - var cscliPath string +func (cli *cliHubTest) NewCommand() *cobra.Command { + var ( + hubPath string + crowdsecPath string + cscliPath string + ) cmd := &cobra.Command{ Use: "hubtest", @@ -53,11 +62,13 @@ func (cli cliHubTest) NewCommand() *cobra.Command { if err != nil { return fmt.Errorf("unable to load appsec specific hubtest: %+v", err) } - /*commands will use the hubPtr, will point to the default hubTest object, or the one dedicated to appsec tests*/ + + // commands will use the hubPtr, will point to the default hubTest object, or the one dedicated to appsec tests hubPtr = &HubTest if isAppsecTest { hubPtr = &HubAppsecTests } + return nil }, } @@ -79,13 +90,16 @@ func (cli cliHubTest) NewCommand() *cobra.Command { return cmd } -func (cli cliHubTest) NewCreateCmd() *cobra.Command { +func (cli *cliHubTest) NewCreateCmd() *cobra.Command { + var ( + ignoreParsers bool + labels map[string]string + logType string + ) + parsers := []string{} postoverflows := []string{} scenarios := []string{} - var ignoreParsers bool - var labels map[string]string - var logType string cmd := &cobra.Command{ Use: "create", @@ -107,7 +121,7 @@ cscli hubtest create my-scenario-test --parsers crowdsecurity/nginx --scenarios } if logType == "" { - return fmt.Errorf("please provide a type (--type) for the test") + return errors.New("please provide a type (--type) for the test") } if err := os.MkdirAll(testPath, os.ModePerm); err != nil { @@ -118,7 +132,7 @@ cscli hubtest create my-scenario-test --parsers crowdsecurity/nginx --scenarios configFileData := &hubtest.HubTestItemConfig{} if logType == "appsec" { - //create empty nuclei template file + // create empty nuclei template file nucleiFileName := fmt.Sprintf("%s.yaml", testName) nucleiFilePath := filepath.Join(testPath, nucleiFileName) nucleiFile, err := os.OpenFile(nucleiFilePath, os.O_RDWR|os.O_CREATE, 0755) @@ -128,7 +142,7 @@ cscli hubtest create my-scenario-test --parsers crowdsecurity/nginx --scenarios ntpl := template.Must(template.New("nuclei").Parse(hubtest.TemplateNucleiFile)) if ntpl == nil { - return fmt.Errorf("unable to parse nuclei template") + return errors.New("unable to parse nuclei template") } ntpl.ExecuteTemplate(nucleiFile, "nuclei", struct{ TestName string }{TestName: testName}) nucleiFile.Close() @@ -188,24 +202,24 @@ cscli hubtest create my-scenario-test --parsers crowdsecurity/nginx --scenarios fmt.Printf(" Parser assertion file : %s (please fill it with assertion)\n", parserAssertFilePath) fmt.Printf(" Scenario assertion file : %s (please fill it with assertion)\n", scenarioAssertFilePath) fmt.Printf(" Configuration File : %s (please fill it with parsers, scenarios...)\n", configFilePath) - } fd, err := os.Create(configFilePath) if err != nil { - return fmt.Errorf("open: %s", err) + return fmt.Errorf("open: %w", err) } data, err := yaml.Marshal(configFileData) if err != nil { - return fmt.Errorf("marshal: %s", err) + return fmt.Errorf("marshal: %w", err) } _, err = fd.Write(data) if err != nil { - return fmt.Errorf("write: %s", err) + return fmt.Errorf("write: %w", err) } if err := fd.Close(); err != nil { - return fmt.Errorf("close: %s", err) + return fmt.Errorf("close: %w", err) } + return nil }, } @@ -219,20 +233,25 @@ cscli hubtest create my-scenario-test --parsers crowdsecurity/nginx --scenarios return cmd } -func (cli cliHubTest) NewRunCmd() *cobra.Command { - var noClean bool - var runAll bool - var forceClean bool - var NucleiTargetHost string - var AppSecHost string - var cmd = &cobra.Command{ +func (cli *cliHubTest) NewRunCmd() *cobra.Command { + var ( + noClean bool + runAll bool + forceClean bool + NucleiTargetHost string + AppSecHost string + ) + + cmd := &cobra.Command{ Use: "run", Short: "run [test_name]", DisableAutoGenTag: true, RunE: func(cmd *cobra.Command, args []string) error { + cfg := cli.cfg() + if !runAll && len(args) == 0 { printHelp(cmd) - return fmt.Errorf("please provide test to run or --all flag") + return errors.New("please provide test to run or --all flag") } hubPtr.NucleiTargetHost = NucleiTargetHost hubPtr.AppSecHost = AppSecHost @@ -244,7 +263,7 @@ func (cli cliHubTest) NewRunCmd() *cobra.Command { for _, testName := range args { _, err := hubPtr.LoadTestItem(testName) if err != nil { - return fmt.Errorf("unable to load test '%s': %s", testName, err) + return fmt.Errorf("unable to load test '%s': %w", testName, err) } } } @@ -252,7 +271,7 @@ func (cli cliHubTest) NewRunCmd() *cobra.Command { // set timezone to avoid DST issues os.Setenv("TZ", "UTC") for _, test := range hubPtr.Tests { - if csConfig.Cscli.Output == "human" { + if cfg.Cscli.Output == "human" { log.Infof("Running test '%s'", test.Name) } err := test.Run() @@ -264,6 +283,8 @@ func (cli cliHubTest) NewRunCmd() *cobra.Command { return nil }, PersistentPostRunE: func(_ *cobra.Command, _ []string) error { + cfg := cli.cfg() + success := true testResult := make(map[string]bool) for _, test := range hubPtr.Tests { @@ -280,7 +301,7 @@ func (cli cliHubTest) NewRunCmd() *cobra.Command { } if !noClean { if err := test.Clean(); err != nil { - return fmt.Errorf("unable to clean test '%s' env: %s", test.Name, err) + return fmt.Errorf("unable to clean test '%s' env: %w", test.Name, err) } } fmt.Printf("\nPlease fill your assert file(s) for test '%s', exiting\n", test.Name) @@ -288,18 +309,18 @@ func (cli cliHubTest) NewRunCmd() *cobra.Command { } testResult[test.Name] = test.Success if test.Success { - if csConfig.Cscli.Output == "human" { + if cfg.Cscli.Output == "human" { log.Infof("Test '%s' passed successfully (%d assertions)\n", test.Name, test.ParserAssert.NbAssert+test.ScenarioAssert.NbAssert) } if !noClean { if err := test.Clean(); err != nil { - return fmt.Errorf("unable to clean test '%s' env: %s", test.Name, err) + return fmt.Errorf("unable to clean test '%s' env: %w", test.Name, err) } } } else { success = false cleanTestEnv := false - if csConfig.Cscli.Output == "human" { + if cfg.Cscli.Output == "human" { if len(test.ParserAssert.Fails) > 0 { fmt.Println() log.Errorf("Parser test '%s' failed (%d errors)\n", test.Name, len(test.ParserAssert.Fails)) @@ -330,20 +351,20 @@ func (cli cliHubTest) NewRunCmd() *cobra.Command { Default: true, } if err := survey.AskOne(prompt, &cleanTestEnv); err != nil { - return fmt.Errorf("unable to ask to remove runtime folder: %s", err) + return fmt.Errorf("unable to ask to remove runtime folder: %w", err) } } } if cleanTestEnv || forceClean { if err := test.Clean(); err != nil { - return fmt.Errorf("unable to clean test '%s' env: %s", test.Name, err) + return fmt.Errorf("unable to clean test '%s' env: %w", test.Name, err) } } } } - switch csConfig.Cscli.Output { + switch cfg.Cscli.Output { case "human": hubTestResultTable(color.Output, testResult) case "json": @@ -359,11 +380,11 @@ func (cli cliHubTest) NewRunCmd() *cobra.Command { } jsonStr, err := json.Marshal(jsonResult) if err != nil { - return fmt.Errorf("unable to json test result: %s", err) + return fmt.Errorf("unable to json test result: %w", err) } fmt.Println(string(jsonStr)) default: - return fmt.Errorf("only human/json output modes are supported") + return errors.New("only human/json output modes are supported") } if !success { @@ -383,7 +404,7 @@ func (cli cliHubTest) NewRunCmd() *cobra.Command { return cmd } -func (cli cliHubTest) NewCleanCmd() *cobra.Command { +func (cli *cliHubTest) NewCleanCmd() *cobra.Command { var cmd = &cobra.Command{ Use: "clean", Short: "clean [test_name]", @@ -393,10 +414,10 @@ func (cli cliHubTest) NewCleanCmd() *cobra.Command { for _, testName := range args { test, err := hubPtr.LoadTestItem(testName) if err != nil { - return fmt.Errorf("unable to load test '%s': %s", testName, err) + return fmt.Errorf("unable to load test '%s': %w", testName, err) } if err := test.Clean(); err != nil { - return fmt.Errorf("unable to clean test '%s' env: %s", test.Name, err) + return fmt.Errorf("unable to clean test '%s' env: %w", test.Name, err) } } @@ -407,7 +428,7 @@ func (cli cliHubTest) NewCleanCmd() *cobra.Command { return cmd } -func (cli cliHubTest) NewInfoCmd() *cobra.Command { +func (cli *cliHubTest) NewInfoCmd() *cobra.Command { cmd := &cobra.Command{ Use: "info", Short: "info [test_name]", @@ -417,7 +438,7 @@ func (cli cliHubTest) NewInfoCmd() *cobra.Command { for _, testName := range args { test, err := hubPtr.LoadTestItem(testName) if err != nil { - return fmt.Errorf("unable to load test '%s': %s", testName, err) + return fmt.Errorf("unable to load test '%s': %w", testName, err) } fmt.Println() fmt.Printf(" Test name : %s\n", test.Name) @@ -440,17 +461,19 @@ func (cli cliHubTest) NewInfoCmd() *cobra.Command { return cmd } -func (cli cliHubTest) NewListCmd() *cobra.Command { +func (cli *cliHubTest) NewListCmd() *cobra.Command { cmd := &cobra.Command{ Use: "list", Short: "list", DisableAutoGenTag: true, RunE: func(_ *cobra.Command, _ []string) error { + cfg := cli.cfg() + if err := hubPtr.LoadAllTests(); err != nil { - return fmt.Errorf("unable to load all tests: %s", err) + return fmt.Errorf("unable to load all tests: %w", err) } - switch csConfig.Cscli.Output { + switch cfg.Cscli.Output { case "human": hubTestListTable(color.Output, hubPtr.Tests) case "json": @@ -460,7 +483,7 @@ func (cli cliHubTest) NewListCmd() *cobra.Command { } fmt.Println(string(j)) default: - return fmt.Errorf("only human/json output modes are supported") + return errors.New("only human/json output modes are supported") } return nil @@ -470,18 +493,22 @@ func (cli cliHubTest) NewListCmd() *cobra.Command { return cmd } -func (cli cliHubTest) NewCoverageCmd() *cobra.Command { - var showParserCov bool - var showScenarioCov bool - var showOnlyPercent bool - var showAppsecCov bool +func (cli *cliHubTest) NewCoverageCmd() *cobra.Command { + var ( + showParserCov bool + showScenarioCov bool + showOnlyPercent bool + showAppsecCov bool + ) cmd := &cobra.Command{ Use: "coverage", Short: "coverage", DisableAutoGenTag: true, RunE: func(_ *cobra.Command, _ []string) error { - //for this one we explicitly don't do for appsec + cfg := cli.cfg() + + // for this one we explicitly don't do for appsec if err := HubTest.LoadAllTests(); err != nil { return fmt.Errorf("unable to load all tests: %+v", err) } @@ -499,7 +526,7 @@ func (cli cliHubTest) NewCoverageCmd() *cobra.Command { if showParserCov || showAll { parserCoverage, err = HubTest.GetParsersCoverage() if err != nil { - return fmt.Errorf("while getting parser coverage: %s", err) + return fmt.Errorf("while getting parser coverage: %w", err) } parserTested := 0 for _, test := range parserCoverage { @@ -513,7 +540,7 @@ func (cli cliHubTest) NewCoverageCmd() *cobra.Command { if showScenarioCov || showAll { scenarioCoverage, err = HubTest.GetScenariosCoverage() if err != nil { - return fmt.Errorf("while getting scenario coverage: %s", err) + return fmt.Errorf("while getting scenario coverage: %w", err) } scenarioTested := 0 @@ -529,7 +556,7 @@ func (cli cliHubTest) NewCoverageCmd() *cobra.Command { if showAppsecCov || showAll { appsecRuleCoverage, err = HubTest.GetAppsecCoverage() if err != nil { - return fmt.Errorf("while getting scenario coverage: %s", err) + return fmt.Errorf("while getting scenario coverage: %w", err) } appsecRuleTested := 0 @@ -542,19 +569,20 @@ func (cli cliHubTest) NewCoverageCmd() *cobra.Command { } if showOnlyPercent { - if showAll { + switch { + case showAll: fmt.Printf("parsers=%d%%\nscenarios=%d%%\nappsec_rules=%d%%", parserCoveragePercent, scenarioCoveragePercent, appsecRuleCoveragePercent) - } else if showParserCov { + case showParserCov: fmt.Printf("parsers=%d%%", parserCoveragePercent) - } else if showScenarioCov { + case showScenarioCov: fmt.Printf("scenarios=%d%%", scenarioCoveragePercent) - } else if showAppsecCov { + case showAppsecCov: fmt.Printf("appsec_rules=%d%%", appsecRuleCoveragePercent) } os.Exit(0) } - switch csConfig.Cscli.Output { + switch cfg.Cscli.Output { case "human": if showParserCov || showAll { hubTestParserCoverageTable(color.Output, parserCoverage) @@ -595,7 +623,7 @@ func (cli cliHubTest) NewCoverageCmd() *cobra.Command { } fmt.Printf("%s", dump) default: - return fmt.Errorf("only human/json output modes are supported") + return errors.New("only human/json output modes are supported") } return nil @@ -610,7 +638,7 @@ func (cli cliHubTest) NewCoverageCmd() *cobra.Command { return cmd } -func (cli cliHubTest) NewEvalCmd() *cobra.Command { +func (cli *cliHubTest) NewEvalCmd() *cobra.Command { var evalExpression string cmd := &cobra.Command{ @@ -647,7 +675,7 @@ func (cli cliHubTest) NewEvalCmd() *cobra.Command { return cmd } -func (cli cliHubTest) NewExplainCmd() *cobra.Command { +func (cli *cliHubTest) NewExplainCmd() *cobra.Command { cmd := &cobra.Command{ Use: "explain", Short: "explain [test_name]", @@ -666,7 +694,7 @@ func (cli cliHubTest) NewExplainCmd() *cobra.Command { } if err = test.ParserAssert.LoadTest(test.ParserResultFile); err != nil { - return fmt.Errorf("unable to load parser result after run: %s", err) + return fmt.Errorf("unable to load parser result after run: %w", err) } } @@ -677,7 +705,7 @@ func (cli cliHubTest) NewExplainCmd() *cobra.Command { } if err = test.ScenarioAssert.LoadTest(test.ScenarioResultFile, test.BucketPourResultFile); err != nil { - return fmt.Errorf("unable to load scenario result after run: %s", err) + return fmt.Errorf("unable to load scenario result after run: %w", err) } } opts := dumps.DumpOpts{} diff --git a/cmd/crowdsec-cli/main.go b/cmd/crowdsec-cli/main.go index 1f87390b636..446901e4aa9 100644 --- a/cmd/crowdsec-cli/main.go +++ b/cmd/crowdsec-cli/main.go @@ -240,12 +240,12 @@ It is meant to allow you to manage bans, parsers/scenarios/etc, api and generall cmd.AddCommand(NewCLISimulation(cli.cfg).NewCommand()) cmd.AddCommand(NewCLIBouncers(cli.cfg).NewCommand()) cmd.AddCommand(NewCLIMachines(cli.cfg).NewCommand()) - cmd.AddCommand(NewCLICapi().NewCommand()) + cmd.AddCommand(NewCLICapi(cli.cfg).NewCommand()) cmd.AddCommand(NewCLILapi(cli.cfg).NewCommand()) cmd.AddCommand(NewCompletionCmd()) cmd.AddCommand(NewCLIConsole(cli.cfg).NewCommand()) cmd.AddCommand(NewCLIExplain(cli.cfg).NewCommand()) - cmd.AddCommand(NewCLIHubTest().NewCommand()) + cmd.AddCommand(NewCLIHubTest(cli.cfg).NewCommand()) cmd.AddCommand(NewCLINotifications(cli.cfg).NewCommand()) cmd.AddCommand(NewCLISupport().NewCommand()) cmd.AddCommand(NewCLIPapi(cli.cfg).NewCommand()) From 4bf640c6e86185b506fde7332a338ccf2eb711ca Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Fri, 23 Feb 2024 14:03:50 +0100 Subject: [PATCH 037/581] refact pkg/apiserver (auth helpers) (#2856) --- pkg/apiserver/controllers/v1/alerts.go | 5 +--- pkg/apiserver/controllers/v1/heartbeat.go | 5 +--- pkg/apiserver/controllers/v1/metrics.go | 34 ++++++++++------------- pkg/apiserver/controllers/v1/utils.go | 32 +++++++++++++++++---- pkg/apiserver/middlewares/v1/api_key.go | 11 ++------ pkg/apiserver/middlewares/v1/jwt.go | 8 +++--- 6 files changed, 50 insertions(+), 45 deletions(-) diff --git a/pkg/apiserver/controllers/v1/alerts.go b/pkg/apiserver/controllers/v1/alerts.go index e7d106d72a3..ad183e4ba80 100644 --- a/pkg/apiserver/controllers/v1/alerts.go +++ b/pkg/apiserver/controllers/v1/alerts.go @@ -9,7 +9,6 @@ import ( "strings" "time" - jwt "github.com/appleboy/gin-jwt/v2" "github.com/gin-gonic/gin" "github.com/go-openapi/strfmt" "github.com/google/uuid" @@ -143,9 +142,7 @@ func normalizeScope(scope string) string { func (c *Controller) CreateAlert(gctx *gin.Context) { var input models.AddAlertsRequest - claims := jwt.ExtractClaims(gctx) - // TBD: use defined rather than hardcoded key to find back owner - machineID := claims["id"].(string) + machineID, _ := getMachineIDFromContext(gctx) if err := gctx.ShouldBindJSON(&input); err != nil { gctx.JSON(http.StatusBadRequest, gin.H{"message": err.Error()}) diff --git a/pkg/apiserver/controllers/v1/heartbeat.go b/pkg/apiserver/controllers/v1/heartbeat.go index b19b450f0d5..e1231eaa9ec 100644 --- a/pkg/apiserver/controllers/v1/heartbeat.go +++ b/pkg/apiserver/controllers/v1/heartbeat.go @@ -3,14 +3,11 @@ package v1 import ( "net/http" - jwt "github.com/appleboy/gin-jwt/v2" "github.com/gin-gonic/gin" ) func (c *Controller) HeartBeat(gctx *gin.Context) { - claims := jwt.ExtractClaims(gctx) - // TBD: use defined rather than hardcoded key to find back owner - machineID := claims["id"].(string) + machineID, _ := getMachineIDFromContext(gctx) if err := c.DBClient.UpdateMachineLastHeartBeat(machineID); err != nil { c.HandleDBErrors(gctx, err) diff --git a/pkg/apiserver/controllers/v1/metrics.go b/pkg/apiserver/controllers/v1/metrics.go index 13ccf9ac94f..ddb38512a11 100644 --- a/pkg/apiserver/controllers/v1/metrics.go +++ b/pkg/apiserver/controllers/v1/metrics.go @@ -3,7 +3,6 @@ package v1 import ( "time" - jwt "github.com/appleboy/gin-jwt/v2" "github.com/gin-gonic/gin" "github.com/prometheus/client_golang/prometheus" ) @@ -66,32 +65,29 @@ var LapiResponseTime = prometheus.NewHistogramVec( []string{"endpoint", "method"}) func PrometheusBouncersHasEmptyDecision(c *gin.Context) { - name, ok := c.Get("BOUNCER_NAME") - if ok { + bouncer, _ := getBouncerFromContext(c) + if bouncer != nil { LapiNilDecisions.With(prometheus.Labels{ - "bouncer": name.(string)}).Inc() + "bouncer": bouncer.Name}).Inc() } } func PrometheusBouncersHasNonEmptyDecision(c *gin.Context) { - name, ok := c.Get("BOUNCER_NAME") - if ok { + bouncer, _ := getBouncerFromContext(c) + if bouncer != nil { LapiNonNilDecisions.With(prometheus.Labels{ - "bouncer": name.(string)}).Inc() + "bouncer": bouncer.Name}).Inc() } } func PrometheusMachinesMiddleware() gin.HandlerFunc { return func(c *gin.Context) { - claims := jwt.ExtractClaims(c) - if claims != nil { - if rawID, ok := claims["id"]; ok { - machineID := rawID.(string) - LapiMachineHits.With(prometheus.Labels{ - "machine": machineID, - "route": c.Request.URL.Path, - "method": c.Request.Method}).Inc() - } + machineID, _ := getMachineIDFromContext(c) + if machineID != "" { + LapiMachineHits.With(prometheus.Labels{ + "machine": machineID, + "route": c.Request.URL.Path, + "method": c.Request.Method}).Inc() } c.Next() @@ -100,10 +96,10 @@ func PrometheusMachinesMiddleware() gin.HandlerFunc { func PrometheusBouncersMiddleware() gin.HandlerFunc { return func(c *gin.Context) { - name, ok := c.Get("BOUNCER_NAME") - if ok { + bouncer, _ := getBouncerFromContext(c) + if bouncer != nil { LapiBouncerHits.With(prometheus.Labels{ - "bouncer": name.(string), + "bouncer": bouncer.Name, "route": c.Request.URL.Path, "method": c.Request.Method}).Inc() } diff --git a/pkg/apiserver/controllers/v1/utils.go b/pkg/apiserver/controllers/v1/utils.go index 6afd005132a..6f14dd9204e 100644 --- a/pkg/apiserver/controllers/v1/utils.go +++ b/pkg/apiserver/controllers/v1/utils.go @@ -1,30 +1,50 @@ package v1 import ( - "fmt" + "errors" "net/http" + jwt "github.com/appleboy/gin-jwt/v2" "github.com/gin-gonic/gin" + middlewares "github.com/crowdsecurity/crowdsec/pkg/apiserver/middlewares/v1" "github.com/crowdsecurity/crowdsec/pkg/database/ent" ) -const bouncerContextKey = "bouncer_info" - func getBouncerFromContext(ctx *gin.Context) (*ent.Bouncer, error) { - bouncerInterface, exist := ctx.Get(bouncerContextKey) + bouncerInterface, exist := ctx.Get(middlewares.BouncerContextKey) if !exist { - return nil, fmt.Errorf("bouncer not found") + return nil, errors.New("bouncer not found") } bouncerInfo, ok := bouncerInterface.(*ent.Bouncer) if !ok { - return nil, fmt.Errorf("bouncer not found") + return nil, errors.New("bouncer not found") } return bouncerInfo, nil } +func getMachineIDFromContext(ctx *gin.Context) (string, error) { + claims := jwt.ExtractClaims(ctx) + if claims == nil { + return "", errors.New("failed to extract claims") + } + + rawID, ok := claims[middlewares.MachineIDKey] + if !ok { + return "", errors.New("MachineID not found in claims") + } + + id, ok := rawID.(string) + if !ok { + // should never happen + return "", errors.New("failed to cast machineID to string") + } + + return id, nil +} + func (c *Controller) AbortRemoteIf(option bool) gin.HandlerFunc { return func(gctx *gin.Context) { incomingIP := gctx.ClientIP() diff --git a/pkg/apiserver/middlewares/v1/api_key.go b/pkg/apiserver/middlewares/v1/api_key.go index 41ee15b4417..4e273371bfe 100644 --- a/pkg/apiserver/middlewares/v1/api_key.go +++ b/pkg/apiserver/middlewares/v1/api_key.go @@ -18,9 +18,9 @@ import ( const ( APIKeyHeader = "X-Api-Key" - bouncerContextKey = "bouncer_info" - // max allowed by bcrypt 72 = 54 bytes in base64 + BouncerContextKey = "bouncer_info" dummyAPIKeySize = 54 + // max allowed by bcrypt 72 = 54 bytes in base64 ) type APIKey struct { @@ -159,11 +159,6 @@ func (a *APIKey) MiddlewareFunc() gin.HandlerFunc { "name": bouncer.Name, }) - // maybe we want to store the whole bouncer object in the context instead, this would avoid another db query - // in StreamDecision - c.Set("BOUNCER_NAME", bouncer.Name) - c.Set("BOUNCER_HASHED_KEY", bouncer.APIKey) - if bouncer.IPAddress == "" { if err := a.DbClient.UpdateBouncerIP(c.ClientIP(), bouncer.ID); err != nil { logger.Errorf("Failed to update ip address for '%s': %s\n", bouncer.Name, err) @@ -203,7 +198,7 @@ func (a *APIKey) MiddlewareFunc() gin.HandlerFunc { } } - c.Set(bouncerContextKey, bouncer) + c.Set(BouncerContextKey, bouncer) c.Next() } } diff --git a/pkg/apiserver/middlewares/v1/jwt.go b/pkg/apiserver/middlewares/v1/jwt.go index ed4ad107b96..6fe053713bc 100644 --- a/pkg/apiserver/middlewares/v1/jwt.go +++ b/pkg/apiserver/middlewares/v1/jwt.go @@ -22,7 +22,7 @@ import ( "github.com/crowdsecurity/crowdsec/pkg/types" ) -var identityKey = "id" +const MachineIDKey = "id" type JWT struct { Middleware *jwt.GinJWTMiddleware @@ -33,7 +33,7 @@ type JWT struct { func PayloadFunc(data interface{}) jwt.MapClaims { if value, ok := data.(*models.WatcherAuthRequest); ok { return jwt.MapClaims{ - identityKey: &value.MachineID, + MachineIDKey: &value.MachineID, } } @@ -42,7 +42,7 @@ func PayloadFunc(data interface{}) jwt.MapClaims { func IdentityHandler(c *gin.Context) interface{} { claims := jwt.ExtractClaims(c) - machineID := claims[identityKey].(string) + machineID := claims[MachineIDKey].(string) return &models.WatcherAuthRequest{ MachineID: &machineID, @@ -307,7 +307,7 @@ func NewJWT(dbClient *database.Client) (*JWT, error) { Key: secret, Timeout: time.Hour, MaxRefresh: time.Hour, - IdentityKey: identityKey, + IdentityKey: MachineIDKey, PayloadFunc: PayloadFunc, IdentityHandler: IdentityHandler, Authenticator: jwtMiddleware.Authenticator, From a23fe06d6878c2770c524e905bcf7fd611abb3cc Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Fri, 23 Feb 2024 16:05:01 +0100 Subject: [PATCH 038/581] remove dependencies on enescakir/emoji, gotest.tools (#2837) * wrap emoji package in pkg/emoji * remove dependency on enescakir/emoji * remove dependency on gotest.tools * lint (whitespace) --- cmd/crowdsec-cli/bouncers_table.go | 10 ++-- cmd/crowdsec-cli/console_table.go | 14 ++--- cmd/crowdsec-cli/hubtest.go | 2 +- cmd/crowdsec-cli/hubtest_table.go | 21 ++++---- cmd/crowdsec-cli/machines_table.go | 11 ++-- cmd/crowdsec-cli/notifications_table.go | 14 +++-- cmd/crowdsec-cli/utils_table.go | 5 +- go.mod | 3 +- go.sum | 2 - pkg/acquisition/modules/loki/loki_test.go | 62 +++++++++++++++++++---- pkg/cwhub/item.go | 5 +- pkg/cwhub/itemupgrade.go | 5 +- pkg/dumps/parser_dump.go | 39 +++++++------- pkg/emoji/emoji.go | 14 +++++ 14 files changed, 140 insertions(+), 67 deletions(-) create mode 100644 pkg/emoji/emoji.go diff --git a/cmd/crowdsec-cli/bouncers_table.go b/cmd/crowdsec-cli/bouncers_table.go index 0ea725f5598..5fe48b49047 100644 --- a/cmd/crowdsec-cli/bouncers_table.go +++ b/cmd/crowdsec-cli/bouncers_table.go @@ -5,9 +5,9 @@ import ( "time" "github.com/aquasecurity/table" - "github.com/enescakir/emoji" "github.com/crowdsecurity/crowdsec/pkg/database/ent" + "github.com/crowdsecurity/crowdsec/pkg/emoji" ) func getBouncersTable(out io.Writer, bouncers []*ent.Bouncer) { @@ -17,11 +17,9 @@ func getBouncersTable(out io.Writer, bouncers []*ent.Bouncer) { t.SetAlignment(table.AlignLeft, table.AlignLeft, table.AlignLeft, table.AlignLeft, table.AlignLeft, table.AlignLeft) for _, b := range bouncers { - var revoked string - if !b.Revoked { - revoked = emoji.CheckMark.String() - } else { - revoked = emoji.Prohibited.String() + revoked := emoji.CheckMark + if b.Revoked { + revoked = emoji.Prohibited } t.AddRow(b.Name, b.IPAddress, revoked, b.LastPull.Format(time.RFC3339), b.Type, b.Version, b.AuthType) diff --git a/cmd/crowdsec-cli/console_table.go b/cmd/crowdsec-cli/console_table.go index e71ea8113fb..8f7ebb2100c 100644 --- a/cmd/crowdsec-cli/console_table.go +++ b/cmd/crowdsec-cli/console_table.go @@ -4,9 +4,9 @@ import ( "io" "github.com/aquasecurity/table" - "github.com/enescakir/emoji" "github.com/crowdsecurity/crowdsec/pkg/csconfig" + "github.com/crowdsecurity/crowdsec/pkg/emoji" ) func cmdConsoleStatusTable(out io.Writer, consoleCfg csconfig.ConsoleConfig) { @@ -17,28 +17,28 @@ func cmdConsoleStatusTable(out io.Writer, consoleCfg csconfig.ConsoleConfig) { t.SetHeaderAlignment(table.AlignLeft, table.AlignLeft, table.AlignLeft) for _, option := range csconfig.CONSOLE_CONFIGS { - activated := string(emoji.CrossMark) + activated := emoji.CrossMark switch option { case csconfig.SEND_CUSTOM_SCENARIOS: if *consoleCfg.ShareCustomScenarios { - activated = string(emoji.CheckMarkButton) + activated = emoji.CheckMarkButton } case csconfig.SEND_MANUAL_SCENARIOS: if *consoleCfg.ShareManualDecisions { - activated = string(emoji.CheckMarkButton) + activated = emoji.CheckMarkButton } case csconfig.SEND_TAINTED_SCENARIOS: if *consoleCfg.ShareTaintedScenarios { - activated = string(emoji.CheckMarkButton) + activated = emoji.CheckMarkButton } case csconfig.SEND_CONTEXT: if *consoleCfg.ShareContext { - activated = string(emoji.CheckMarkButton) + activated = emoji.CheckMarkButton } case csconfig.CONSOLE_MANAGEMENT: if *consoleCfg.ConsoleManagement { - activated = string(emoji.CheckMarkButton) + activated = emoji.CheckMarkButton } } diff --git a/cmd/crowdsec-cli/hubtest.go b/cmd/crowdsec-cli/hubtest.go index 8f5ab087370..d6ed4560056 100644 --- a/cmd/crowdsec-cli/hubtest.go +++ b/cmd/crowdsec-cli/hubtest.go @@ -11,13 +11,13 @@ import ( "text/template" "github.com/AlecAivazis/survey/v2" - "github.com/enescakir/emoji" "github.com/fatih/color" log "github.com/sirupsen/logrus" "github.com/spf13/cobra" "gopkg.in/yaml.v2" "github.com/crowdsecurity/crowdsec/pkg/dumps" + "github.com/crowdsecurity/crowdsec/pkg/emoji" "github.com/crowdsecurity/crowdsec/pkg/hubtest" ) diff --git a/cmd/crowdsec-cli/hubtest_table.go b/cmd/crowdsec-cli/hubtest_table.go index 4034da7e519..e6c5ee80abd 100644 --- a/cmd/crowdsec-cli/hubtest_table.go +++ b/cmd/crowdsec-cli/hubtest_table.go @@ -5,8 +5,8 @@ import ( "io" "github.com/aquasecurity/table" - "github.com/enescakir/emoji" + "github.com/crowdsecurity/crowdsec/pkg/emoji" "github.com/crowdsecurity/crowdsec/pkg/hubtest" ) @@ -17,9 +17,9 @@ func hubTestResultTable(out io.Writer, testResult map[string]bool) { t.SetAlignment(table.AlignLeft) for testName, success := range testResult { - status := emoji.CheckMarkButton.String() + status := emoji.CheckMarkButton if !success { - status = emoji.CrossMark.String() + status = emoji.CrossMark } t.AddRow(testName, status) @@ -50,11 +50,12 @@ func hubTestParserCoverageTable(out io.Writer, coverage []hubtest.Coverage) { parserTested := 0 for _, test := range coverage { - status := emoji.RedCircle.String() + status := emoji.RedCircle if test.TestsCount > 0 { - status = emoji.GreenCircle.String() + status = emoji.GreenCircle parserTested++ } + t.AddRow(test.Name, status, fmt.Sprintf("%d times (across %d tests)", test.TestsCount, len(test.PresentIn))) } @@ -70,11 +71,12 @@ func hubTestAppsecRuleCoverageTable(out io.Writer, coverage []hubtest.Coverage) parserTested := 0 for _, test := range coverage { - status := emoji.RedCircle.String() + status := emoji.RedCircle if test.TestsCount > 0 { - status = emoji.GreenCircle.String() + status = emoji.GreenCircle parserTested++ } + t.AddRow(test.Name, status, fmt.Sprintf("%d times (across %d tests)", test.TestsCount, len(test.PresentIn))) } @@ -90,11 +92,12 @@ func hubTestScenarioCoverageTable(out io.Writer, coverage []hubtest.Coverage) { parserTested := 0 for _, test := range coverage { - status := emoji.RedCircle.String() + status := emoji.RedCircle if test.TestsCount > 0 { - status = emoji.GreenCircle.String() + status = emoji.GreenCircle parserTested++ } + t.AddRow(test.Name, status, fmt.Sprintf("%d times (across %d tests)", test.TestsCount, len(test.PresentIn))) } diff --git a/cmd/crowdsec-cli/machines_table.go b/cmd/crowdsec-cli/machines_table.go index e166fb785a6..120929ea654 100644 --- a/cmd/crowdsec-cli/machines_table.go +++ b/cmd/crowdsec-cli/machines_table.go @@ -5,9 +5,9 @@ import ( "time" "github.com/aquasecurity/table" - "github.com/enescakir/emoji" "github.com/crowdsecurity/crowdsec/pkg/database/ent" + "github.com/crowdsecurity/crowdsec/pkg/emoji" ) func getAgentsTable(out io.Writer, machines []*ent.Machine) { @@ -17,17 +17,16 @@ func getAgentsTable(out io.Writer, machines []*ent.Machine) { t.SetAlignment(table.AlignLeft, table.AlignLeft, table.AlignLeft, table.AlignLeft, table.AlignLeft, table.AlignLeft, table.AlignLeft) for _, m := range machines { - var validated string + validated := emoji.Prohibited if m.IsValidated { - validated = emoji.CheckMark.String() - } else { - validated = emoji.Prohibited.String() + validated = emoji.CheckMark } hb, active := getLastHeartbeat(m) if !active { - hb = emoji.Warning.String() + " " + hb + hb = emoji.Warning + " " + hb } + t.AddRow(m.MachineId, m.IpAddress, m.UpdatedAt.Format(time.RFC3339), validated, m.Version, m.AuthType, hb) } diff --git a/cmd/crowdsec-cli/notifications_table.go b/cmd/crowdsec-cli/notifications_table.go index e0f61d9cebe..19d11cea741 100644 --- a/cmd/crowdsec-cli/notifications_table.go +++ b/cmd/crowdsec-cli/notifications_table.go @@ -6,7 +6,8 @@ import ( "strings" "github.com/aquasecurity/table" - "github.com/enescakir/emoji" + + "github.com/crowdsecurity/crowdsec/pkg/emoji" ) func notificationListTable(out io.Writer, ncfgs map[string]NotificationsCfg) { @@ -14,24 +15,31 @@ func notificationListTable(out io.Writer, ncfgs map[string]NotificationsCfg) { t.SetHeaders("Active", "Name", "Type", "Profile name") t.SetHeaderAlignment(table.AlignLeft, table.AlignLeft, table.AlignLeft, table.AlignLeft) t.SetAlignment(table.AlignLeft, table.AlignLeft, table.AlignLeft, table.AlignLeft) + keys := make([]string, 0, len(ncfgs)) for k := range ncfgs { keys = append(keys, k) } + sort.Slice(keys, func(i, j int) bool { return len(ncfgs[keys[i]].Profiles) > len(ncfgs[keys[j]].Profiles) }) + for _, k := range keys { b := ncfgs[k] profilesList := []string{} + for _, p := range b.Profiles { profilesList = append(profilesList, p.Name) } - active := emoji.CheckMark.String() + + active := emoji.CheckMark if len(profilesList) == 0 { - active = emoji.Prohibited.String() + active = emoji.Prohibited } + t.AddRow(active, b.Config.Name, b.Config.Type, strings.Join(profilesList, ", ")) } + t.Render() } diff --git a/cmd/crowdsec-cli/utils_table.go b/cmd/crowdsec-cli/utils_table.go index b1e4b6950b3..23bcff4e5c6 100644 --- a/cmd/crowdsec-cli/utils_table.go +++ b/cmd/crowdsec-cli/utils_table.go @@ -6,9 +6,9 @@ import ( "strconv" "github.com/aquasecurity/table" - "github.com/enescakir/emoji" "github.com/crowdsecurity/crowdsec/pkg/cwhub" + "github.com/crowdsecurity/crowdsec/pkg/emoji" ) func listHubItemTable(out io.Writer, title string, items []*cwhub.Item) { @@ -21,6 +21,7 @@ func listHubItemTable(out io.Writer, title string, items []*cwhub.Item) { status := fmt.Sprintf("%v %s", item.State.Emoji(), item.State.Text()) t.AddRow(item.Name, status, item.State.LocalVersion, item.State.LocalPath) } + renderTableTitle(out, title) t.Render() } @@ -42,6 +43,7 @@ func scenarioMetricsTable(out io.Writer, itemName string, metrics map[string]int if metrics["instantiation"] == 0 { return } + t := newTable(out) t.SetHeaders("Current Count", "Overflows", "Instantiated", "Poured", "Expired") @@ -72,6 +74,7 @@ func parserMetricsTable(out io.Writer, itemName string, metrics map[string]map[s strconv.Itoa(stats["parsed"]), strconv.Itoa(stats["unparsed"]), ) + showTable = true } } diff --git a/go.mod b/go.mod index e1da18387a5..c2d6ca2c148 100644 --- a/go.mod +++ b/go.mod @@ -33,7 +33,6 @@ require ( github.com/dghubble/sling v1.3.0 github.com/docker/docker v24.0.7+incompatible github.com/docker/go-connections v0.4.0 - github.com/enescakir/emoji v1.0.0 github.com/fatih/color v1.15.0 github.com/fsnotify/fsnotify v1.6.0 github.com/gin-gonic/gin v1.9.1 @@ -92,7 +91,6 @@ require ( gopkg.in/tomb.v2 v2.0.0-20161208151619-d5d1b5820637 gopkg.in/yaml.v2 v2.4.0 gopkg.in/yaml.v3 v3.0.1 - gotest.tools/v3 v3.5.0 k8s.io/apiserver v0.28.4 ) @@ -210,6 +208,7 @@ require ( google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234030-28d5490b6b19 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 // indirect + gotest.tools/v3 v3.5.0 // indirect k8s.io/api v0.28.4 // indirect k8s.io/apimachinery v0.28.4 // indirect k8s.io/klog/v2 v2.100.1 // indirect diff --git a/go.sum b/go.sum index 2daf22cc99c..7e860300089 100644 --- a/go.sum +++ b/go.sum @@ -124,8 +124,6 @@ github.com/docker/go-units v0.3.3/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDD github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= -github.com/enescakir/emoji v1.0.0 h1:W+HsNql8swfCQFtioDGDHCHri8nudlK1n5p2rHCJoog= -github.com/enescakir/emoji v1.0.0/go.mod h1:Bt1EKuLnKDTYpLALApstIkAjdDrS/8IAgTkKp+WKFD0= github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk= github.com/fatih/color v1.15.0 h1:kOqh6YHBtK8aywxGerMG2Eq3H6Qgoqeo13Bk2Mv/nBs= github.com/fatih/color v1.15.0/go.mod h1:0h5ZqXfHYED7Bhv2ZJamyIOUej9KtShiJESRwBDUSsw= diff --git a/pkg/acquisition/modules/loki/loki_test.go b/pkg/acquisition/modules/loki/loki_test.go index 6cac1c0fec3..8511d5445af 100644 --- a/pkg/acquisition/modules/loki/loki_test.go +++ b/pkg/acquisition/modules/loki/loki_test.go @@ -2,6 +2,7 @@ package loki_test import ( "bytes" + "context" "encoding/json" "fmt" "io" @@ -13,19 +14,17 @@ import ( "testing" "time" - "context" + log "github.com/sirupsen/logrus" + "github.com/stretchr/testify/assert" + tomb "gopkg.in/tomb.v2" "github.com/crowdsecurity/go-cs-lib/cstest" "github.com/crowdsecurity/crowdsec/pkg/acquisition/modules/loki" "github.com/crowdsecurity/crowdsec/pkg/types" - log "github.com/sirupsen/logrus" - tomb "gopkg.in/tomb.v2" - "gotest.tools/v3/assert" ) func TestConfiguration(t *testing.T) { - log.Infof("Test 'TestConfigure'") tests := []struct { @@ -127,22 +126,26 @@ query: > subLogger := log.WithFields(log.Fields{ "type": "loki", }) + for _, test := range tests { t.Run(test.testName, func(t *testing.T) { lokiSource := loki.LokiSource{} err := lokiSource.Configure([]byte(test.config), subLogger) cstest.AssertErrorContains(t, err, test.expectedErr) + if test.password != "" { p := lokiSource.Config.Auth.Password if test.password != p { t.Fatalf("Password mismatch : %s != %s", test.password, p) } } + if test.waitForReady != 0 { if lokiSource.Config.WaitForReady != test.waitForReady { t.Fatalf("Wrong WaitForReady %v != %v", lokiSource.Config.WaitForReady, test.waitForReady) } } + if test.delayFor != 0 { if lokiSource.Config.DelayFor != test.delayFor { t.Fatalf("Wrong DelayFor %v != %v", lokiSource.Config.DelayFor, test.delayFor) @@ -154,6 +157,7 @@ query: > func TestConfigureDSN(t *testing.T) { log.Infof("Test 'TestConfigureDSN'") + tests := []struct { name string dsn string @@ -218,7 +222,9 @@ func TestConfigureDSN(t *testing.T) { "type": "loki", "name": test.name, }) + t.Logf("Test : %s", test.name) + lokiSource := &loki.LokiSource{} err := lokiSource.ConfigureByDSN(test.dsn, map[string]string{"type": "testtype"}, subLogger, "") cstest.AssertErrorContains(t, err, test.expectedErr) @@ -234,17 +240,20 @@ func TestConfigureDSN(t *testing.T) { t.Fatalf("Password mismatch : %s != %s", test.password, p) } } + if test.scheme != "" { url, _ := url.Parse(lokiSource.Config.URL) if test.scheme != url.Scheme { t.Fatalf("Schema mismatch : %s != %s", test.scheme, url.Scheme) } } + if test.waitForReady != 0 { if lokiSource.Config.WaitForReady != test.waitForReady { t.Fatalf("Wrong WaitForReady %v != %v", lokiSource.Config.WaitForReady, test.waitForReady) } } + if test.delayFor != 0 { if lokiSource.Config.DelayFor != test.delayFor { t.Fatalf("Wrong DelayFor %v != %v", lokiSource.Config.DelayFor, test.delayFor) @@ -272,27 +281,36 @@ func feedLoki(logger *log.Entry, n int, title string) error { Line: fmt.Sprintf("Log line #%d %v", i, title), } } + buff, err := json.Marshal(streams) if err != nil { return err } + req, err := http.NewRequest(http.MethodPost, "http://127.0.0.1:3100/loki/api/v1/push", bytes.NewBuffer(buff)) if err != nil { return err } + req.Header.Set("Content-Type", "application/json") req.Header.Set("X-Scope-OrgID", "1234") + resp, err := http.DefaultClient.Do(req) if err != nil { return err } + defer resp.Body.Close() + if resp.StatusCode != http.StatusNoContent { b, _ := io.ReadAll(resp.Body) logger.Error(string(b)) + return fmt.Errorf("Bad post status %d", resp.StatusCode) } + logger.Info(n, " Events sent") + return nil } @@ -300,9 +318,11 @@ func TestOneShotAcquisition(t *testing.T) { if runtime.GOOS == "windows" { t.Skip("Skipping test on windows") } + log.SetOutput(os.Stdout) log.SetLevel(log.InfoLevel) log.Info("Test 'TestStreamingAcquisition'") + title := time.Now().String() // Loki will be messy, with a lot of stuff, lets use a unique key tests := []struct { config string @@ -327,6 +347,7 @@ since: 1h }) lokiSource := loki.LokiSource{} err := lokiSource.Configure([]byte(ts.config), subLogger) + if err != nil { t.Fatalf("Unexpected error : %s", err) } @@ -338,19 +359,23 @@ since: 1h out := make(chan types.Event) read := 0 + go func() { for { <-out + read++ } }() + lokiTomb := tomb.Tomb{} + err = lokiSource.OneShotAcquisition(out, &lokiTomb) if err != nil { t.Fatalf("Unexpected error : %s", err) } - assert.Equal(t, 20, read) + assert.Equal(t, 20, read) } } @@ -358,9 +383,11 @@ func TestStreamingAcquisition(t *testing.T) { if runtime.GOOS == "windows" { t.Skip("Skipping test on windows") } + log.SetOutput(os.Stdout) log.SetLevel(log.InfoLevel) log.Info("Test 'TestStreamingAcquisition'") + title := time.Now().String() tests := []struct { name string @@ -396,6 +423,7 @@ query: > expectedLines: 20, }, } + for _, ts := range tests { t.Run(ts.name, func(t *testing.T) { logger := log.New() @@ -407,10 +435,12 @@ query: > out := make(chan types.Event) lokiTomb := tomb.Tomb{} lokiSource := loki.LokiSource{} + err := lokiSource.Configure([]byte(ts.config), subLogger) if err != nil { t.Fatalf("Unexpected error : %s", err) } + err = lokiSource.StreamingAcquisition(out, &lokiTomb) cstest.AssertErrorContains(t, err, ts.streamErr) @@ -418,22 +448,26 @@ query: > return } - time.Sleep(time.Second * 2) //We need to give time to start reading from the WS + time.Sleep(time.Second * 2) // We need to give time to start reading from the WS + readTomb := tomb.Tomb{} readCtx, cancel := context.WithTimeout(context.Background(), time.Second*10) count := 0 readTomb.Go(func() error { defer cancel() + for { select { case <-readCtx.Done(): return readCtx.Err() case evt := <-out: count++ + if !strings.HasSuffix(evt.Line.Raw, title) { return fmt.Errorf("Incorrect suffix : %s", evt.Line.Raw) } + if count == ts.expectedLines { return nil } @@ -447,20 +481,23 @@ query: > } err = readTomb.Wait() + cancel() + if err != nil { t.Fatalf("Unexpected error : %s", err) } - assert.Equal(t, count, ts.expectedLines) + + assert.Equal(t, ts.expectedLines, count) }) } - } func TestStopStreaming(t *testing.T) { if runtime.GOOS == "windows" { t.Skip("Skipping test on windows") } + config := ` mode: tail source: loki @@ -476,24 +513,30 @@ query: > }) title := time.Now().String() lokiSource := loki.LokiSource{} + err := lokiSource.Configure([]byte(config), subLogger) if err != nil { t.Fatalf("Unexpected error : %s", err) } + out := make(chan types.Event) lokiTomb := &tomb.Tomb{} + err = lokiSource.StreamingAcquisition(out, lokiTomb) if err != nil { t.Fatalf("Unexpected error : %s", err) } + time.Sleep(time.Second * 2) + err = feedLoki(subLogger, 1, title) if err != nil { t.Fatalf("Unexpected error : %s", err) } lokiTomb.Kill(nil) + err = lokiTomb.Wait() if err != nil { t.Fatalf("Unexpected error : %s", err) @@ -519,5 +562,6 @@ func (l *LogValue) MarshalJSON() ([]byte, error) { if err != nil { return nil, err } + return []byte(fmt.Sprintf(`["%d",%s]`, l.Time.UnixNano(), string(line))), nil } diff --git a/pkg/cwhub/item.go b/pkg/cwhub/item.go index 6c7da06c313..6cdb5cadcb9 100644 --- a/pkg/cwhub/item.go +++ b/pkg/cwhub/item.go @@ -7,7 +7,8 @@ import ( "slices" "github.com/Masterminds/semver/v3" - "github.com/enescakir/emoji" + + "github.com/crowdsecurity/crowdsec/pkg/emoji" ) const ( @@ -84,7 +85,7 @@ func (s *ItemState) Text() string { } // Emoji returns the status of the item as an emoji (eg. emoji.Warning). -func (s *ItemState) Emoji() emoji.Emoji { +func (s *ItemState) Emoji() string { switch { case s.IsLocal(): return emoji.House diff --git a/pkg/cwhub/itemupgrade.go b/pkg/cwhub/itemupgrade.go index ac3b94f9836..6a8dc2f44b6 100644 --- a/pkg/cwhub/itemupgrade.go +++ b/pkg/cwhub/itemupgrade.go @@ -13,7 +13,7 @@ import ( "os" "path/filepath" - "github.com/enescakir/emoji" + "github.com/crowdsecurity/crowdsec/pkg/emoji" ) // Upgrade downloads and applies the last version of the item from the hub. @@ -60,6 +60,7 @@ func (i *Item) Upgrade(force bool) (bool, error) { // TODO: use a better way to communicate this fmt.Printf("updated %s\n", i.Name) i.hub.logger.Infof("%v %s: updated", emoji.Package, i.Name) + updated = true } @@ -151,7 +152,7 @@ func (i *Item) FetchLatest() ([]byte, string, error) { i.hub.logger.Errorf("Downloaded version doesn't match index, please 'hub update'") i.hub.logger.Debugf("got %s, expected %s", meow, i.Versions[i.Version].Digest) - return nil, "", fmt.Errorf("invalid download hash") + return nil, "", errors.New("invalid download hash") } return body, url, nil diff --git a/pkg/dumps/parser_dump.go b/pkg/dumps/parser_dump.go index 566b87a0803..9b4cdb1c22b 100644 --- a/pkg/dumps/parser_dump.go +++ b/pkg/dumps/parser_dump.go @@ -1,6 +1,7 @@ package dumps import ( + "errors" "fmt" "io" "os" @@ -8,13 +9,15 @@ import ( "strings" "time" - "github.com/crowdsecurity/crowdsec/pkg/types" - "github.com/crowdsecurity/go-cs-lib/maptools" - "github.com/enescakir/emoji" "github.com/fatih/color" diff "github.com/r3labs/diff/v2" log "github.com/sirupsen/logrus" "gopkg.in/yaml.v2" + + "github.com/crowdsecurity/go-cs-lib/maptools" + + "github.com/crowdsecurity/crowdsec/pkg/emoji" + "github.com/crowdsecurity/crowdsec/pkg/types" ) type ParserResult struct { @@ -56,7 +59,7 @@ func LoadParserDump(filepath string) (*ParserResults, error) { var lastStage string - //Loop over stages to find last successful one with at least one parser + // Loop over stages to find last successful one with at least one parser for i := len(stages) - 2; i >= 0; i-- { if len(pdump[stages[i]]) != 0 { lastStage = stages[i] @@ -73,7 +76,7 @@ func LoadParserDump(filepath string) (*ParserResults, error) { sort.Strings(parsers) if len(parsers) == 0 { - return nil, fmt.Errorf("no parser found. Please install the appropriate parser and retry") + return nil, errors.New("no parser found. Please install the appropriate parser and retry") } lastParser := parsers[len(parsers)-1] @@ -90,14 +93,15 @@ func LoadParserDump(filepath string) (*ParserResults, error) { } func DumpTree(parserResults ParserResults, bucketPour BucketPourInfo, opts DumpOpts) { - //note : we can use line -> time as the unique identifier (of acquisition) + // note : we can use line -> time as the unique identifier (of acquisition) state := make(map[time.Time]map[string]map[string]ParserResult) assoc := make(map[time.Time]string, 0) parser_order := make(map[string][]string) for stage, parsers := range parserResults { - //let's process parsers in the order according to idx + // let's process parsers in the order according to idx parser_order[stage] = make([]string, len(parsers)) + for pname, parser := range parsers { if len(parser) > 0 { parser_order[stage][parser[0].Idx-1] = pname @@ -128,14 +132,14 @@ func DumpTree(parserResults ParserResults, bucketPour BucketPourInfo, opts DumpO continue } - //it might be bucket overflow being reprocessed, skip this + // it might be bucket overflow being reprocessed, skip this if _, ok := state[evt.Line.Time]; !ok { state[evt.Line.Time] = make(map[string]map[string]ParserResult) assoc[evt.Line.Time] = evt.Line.Raw } - //there is a trick : to know if an event successfully exit the parsers, we check if it reached the pour() phase - //we thus use a fake stage "buckets" and a fake parser "OK" to know if it entered + // there is a trick : to know if an event successfully exit the parsers, we check if it reached the pour() phase + // we thus use a fake stage "buckets" and a fake parser "OK" to know if it entered if _, ok := state[evt.Line.Time]["buckets"]; !ok { state[evt.Line.Time]["buckets"] = make(map[string]ParserResult) } @@ -148,7 +152,7 @@ func DumpTree(parserResults ParserResults, bucketPour BucketPourInfo, opts DumpO red := color.New(color.FgRed).SprintFunc() green := color.New(color.FgGreen).SprintFunc() whitelistReason := "" - //get each line + // get each line for tstamp, rawstr := range assoc { if opts.SkipOk { if _, ok := state[tstamp]["buckets"]["OK"]; ok { @@ -161,8 +165,8 @@ func DumpTree(parserResults ParserResults, bucketPour BucketPourInfo, opts DumpO skeys := make([]string, 0, len(state[tstamp])) for k := range state[tstamp] { - //there is a trick : to know if an event successfully exit the parsers, we check if it reached the pour() phase - //we thus use a fake stage "buckets" and a fake parser "OK" to know if it entered + // there is a trick : to know if an event successfully exit the parsers, we check if it reached the pour() phase + // we thus use a fake stage "buckets" and a fake parser "OK" to know if it entered if k == "buckets" { continue } @@ -216,6 +220,7 @@ func DumpTree(parserResults ParserResults, bucketPour BucketPourInfo, opts DumpO whitelistReason = parsers[parser].Evt.WhitelistReason } } + updated++ case "delete": deleted++ @@ -277,7 +282,7 @@ func DumpTree(parserResults ParserResults, bucketPour BucketPourInfo, opts DumpO sep = "├" } - //did the event enter the bucket pour phase ? + // did the event enter the bucket pour phase ? if _, ok := state[tstamp]["buckets"]["OK"]; ok { fmt.Printf("\t%s-------- parser success %s\n", sep, emoji.GreenCircle) } else if whitelistReason != "" { @@ -286,7 +291,7 @@ func DumpTree(parserResults ParserResults, bucketPour BucketPourInfo, opts DumpO fmt.Printf("\t%s-------- parser failure %s\n", sep, emoji.RedCircle) } - //now print bucket info + // now print bucket info if len(state[tstamp]["buckets"]) > 0 { fmt.Printf("\t├ Scenarios\n") } @@ -294,8 +299,8 @@ func DumpTree(parserResults ParserResults, bucketPour BucketPourInfo, opts DumpO bnames := make([]string, 0, len(state[tstamp]["buckets"])) for k := range state[tstamp]["buckets"] { - //there is a trick : to know if an event successfully exit the parsers, we check if it reached the pour() phase - //we thus use a fake stage "buckets" and a fake parser "OK" to know if it entered + // there is a trick : to know if an event successfully exit the parsers, we check if it reached the pour() phase + // we thus use a fake stage "buckets" and a fake parser "OK" to know if it entered if k == "OK" { continue } diff --git a/pkg/emoji/emoji.go b/pkg/emoji/emoji.go new file mode 100644 index 00000000000..51295a85411 --- /dev/null +++ b/pkg/emoji/emoji.go @@ -0,0 +1,14 @@ +package emoji + +const ( + CheckMarkButton = "\u2705" // ✅ + CheckMark = "\u2714\ufe0f" // ✔️ + CrossMark = "\u274c" // ❌ + GreenCircle = "\U0001f7e2" // 🟢 + House = "\U0001f3e0" // 🏠 + Package = "\U0001f4e6" // 📦 + Prohibited = "\U0001f6ab" // 🚫 + QuestionMark = "\u2753" // ❓ + RedCircle = "\U0001f534" // 🔴 + Warning = "\u26a0\ufe0f" // ⚠️ +) From 8e9e091656f2a72a37e21c173ed0c2015e97e726 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Mon, 26 Feb 2024 13:44:40 +0100 Subject: [PATCH 039/581] systemd: check configuration before attempting reload (#2861) --- config/crowdsec.service | 1 + debian/crowdsec.service | 1 + rpm/SOURCES/crowdsec.unit.patch | 2 ++ 3 files changed, 4 insertions(+) diff --git a/config/crowdsec.service b/config/crowdsec.service index 147cae4946e..65a8d30bc5f 100644 --- a/config/crowdsec.service +++ b/config/crowdsec.service @@ -8,6 +8,7 @@ Environment=LC_ALL=C LANG=C ExecStartPre=/usr/local/bin/crowdsec -c /etc/crowdsec/config.yaml -t -error ExecStart=/usr/local/bin/crowdsec -c /etc/crowdsec/config.yaml #ExecStartPost=/bin/sleep 0.1 +ExecReload=/usr/local/bin/crowdsec -c /etc/crowdsec/config.yaml -t -error ExecReload=/bin/kill -HUP $MAINPID Restart=always RestartSec=60 diff --git a/debian/crowdsec.service b/debian/crowdsec.service index b65558f70d3..c1a5e403745 100644 --- a/debian/crowdsec.service +++ b/debian/crowdsec.service @@ -8,6 +8,7 @@ Environment=LC_ALL=C LANG=C ExecStartPre=/usr/bin/crowdsec -c /etc/crowdsec/config.yaml -t -error ExecStart=/usr/bin/crowdsec -c /etc/crowdsec/config.yaml #ExecStartPost=/bin/sleep 0.1 +ExecReload=/usr/bin/crowdsec -c /etc/crowdsec/config.yaml -t -error ExecReload=/bin/kill -HUP $MAINPID Restart=always RestartSec=60 diff --git a/rpm/SOURCES/crowdsec.unit.patch b/rpm/SOURCES/crowdsec.unit.patch index af9fe5c31e3..5173beb2710 100644 --- a/rpm/SOURCES/crowdsec.unit.patch +++ b/rpm/SOURCES/crowdsec.unit.patch @@ -9,5 +9,7 @@ +ExecStartPre=/usr/bin/crowdsec -c /etc/crowdsec/config.yaml -t -error +ExecStart=/usr/bin/crowdsec -c /etc/crowdsec/config.yaml #ExecStartPost=/bin/sleep 0.1 +-ExecReload=/usr/local/bin/crowdsec -c /etc/crowdsec/config.yaml -t -error ++ExecReload=/usr/bin/crowdsec -c /etc/crowdsec/config.yaml -t -error ExecReload=/bin/kill -HUP $MAINPID Restart=always From 41b43733b03d9fa3609c2ff333461bfa49ae587f Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Fri, 1 Mar 2024 10:52:35 +0100 Subject: [PATCH 040/581] fix: log stack trace while computing metrics (#2865) --- cmd/crowdsec/metrics.go | 2 ++ 1 file changed, 2 insertions(+) diff --git a/cmd/crowdsec/metrics.go b/cmd/crowdsec/metrics.go index 1199af0fe16..563bb56bfc9 100644 --- a/cmd/crowdsec/metrics.go +++ b/cmd/crowdsec/metrics.go @@ -102,6 +102,8 @@ var globalPourHistogram = prometheus.NewHistogramVec( func computeDynamicMetrics(next http.Handler, dbClient *database.Client) http.HandlerFunc { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + // catch panics here because they are not handled by servePrometheus + defer trace.CatchPanic("crowdsec/computeDynamicMetrics") //update cache metrics (stash) cache.UpdateCacheMetrics() //update cache metrics (regexp) From e7ecea764e99f873e1644b409d8eebec8e1c2630 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Mon, 4 Mar 2024 14:22:53 +0100 Subject: [PATCH 041/581] pkg/csconfig: use yaml.v3; deprecate yaml.v2 for new code (#2867) * pkg/csconfig: use yaml.v3; deprecate yaml.v2 for new code * yaml.v3: handle empty files * Lint whitespace, errors --- .golangci.yml | 50 ++++++++++++++++++++++++++++++++ pkg/csconfig/api.go | 33 ++++++++++++++------- pkg/csconfig/api_test.go | 12 ++++++-- pkg/csconfig/config.go | 17 +++++++---- pkg/csconfig/config_test.go | 2 +- pkg/csconfig/console.go | 12 ++++++-- pkg/csconfig/crowdsec_service.go | 11 +++---- pkg/csconfig/database.go | 9 +++++- pkg/csconfig/profiles.go | 23 ++++++++++----- pkg/csconfig/simulation.go | 23 +++++++++++++-- pkg/csconfig/simulation_test.go | 4 +-- 11 files changed, 153 insertions(+), 43 deletions(-) diff --git a/.golangci.yml b/.golangci.yml index 29332447b61..396da2141f1 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -72,6 +72,56 @@ linters-settings: deny: - pkg: "github.com/pkg/errors" desc: "errors.Wrap() is deprecated in favor of fmt.Errorf()" + yaml: + files: + - "!**/cmd/crowdsec-cli/alerts.go" + - "!**/cmd/crowdsec-cli/capi.go" + - "!**/cmd/crowdsec-cli/config_show.go" + - "!**/cmd/crowdsec-cli/hubtest.go" + - "!**/cmd/crowdsec-cli/lapi.go" + - "!**/cmd/crowdsec-cli/simulation.go" + - "!**/cmd/crowdsec/crowdsec.go" + - "!**/cmd/notification-dummy/main.go" + - "!**/cmd/notification-email/main.go" + - "!**/cmd/notification-http/main.go" + - "!**/cmd/notification-slack/main.go" + - "!**/cmd/notification-splunk/main.go" + - "!**/pkg/acquisition/acquisition.go" + - "!**/pkg/acquisition/acquisition_test.go" + - "!**/pkg/acquisition/modules/appsec/appsec.go" + - "!**/pkg/acquisition/modules/cloudwatch/cloudwatch.go" + - "!**/pkg/acquisition/modules/docker/docker.go" + - "!**/pkg/acquisition/modules/file/file.go" + - "!**/pkg/acquisition/modules/journalctl/journalctl.go" + - "!**/pkg/acquisition/modules/kafka/kafka.go" + - "!**/pkg/acquisition/modules/kinesis/kinesis.go" + - "!**/pkg/acquisition/modules/kubernetesaudit/k8s_audit.go" + - "!**/pkg/acquisition/modules/loki/loki.go" + - "!**/pkg/acquisition/modules/loki/timestamp_test.go" + - "!**/pkg/acquisition/modules/s3/s3.go" + - "!**/pkg/acquisition/modules/syslog/syslog.go" + - "!**/pkg/acquisition/modules/wineventlog/wineventlog_windows.go" + - "!**/pkg/appsec/appsec.go" + - "!**/pkg/appsec/loader.go" + - "!**/pkg/csplugin/broker.go" + - "!**/pkg/csplugin/broker_test.go" + - "!**/pkg/dumps/bucker_dump.go" + - "!**/pkg/dumps/bucket_dump.go" + - "!**/pkg/dumps/parser_dump.go" + - "!**/pkg/hubtest/coverage.go" + - "!**/pkg/hubtest/hubtest_item.go" + - "!**/pkg/hubtest/parser_assert.go" + - "!**/pkg/hubtest/scenario_assert.go" + - "!**/pkg/leakybucket/buckets_test.go" + - "!**/pkg/leakybucket/manager_load.go" + - "!**/pkg/metabase/metabase.go" + - "!**/pkg/parser/node.go" + - "!**/pkg/parser/node_test.go" + - "!**/pkg/parser/parsing_test.go" + - "!**/pkg/parser/stage.go" + deny: + - pkg: "gopkg.in/yaml.v2" + desc: "yaml.v2 is deprecated for new code in favor of yaml.v3" wsl: # Allow blocks to end with comments diff --git a/pkg/csconfig/api.go b/pkg/csconfig/api.go index de8ee4934a7..7fd1f588897 100644 --- a/pkg/csconfig/api.go +++ b/pkg/csconfig/api.go @@ -1,6 +1,7 @@ package csconfig import ( + "bytes" "crypto/tls" "crypto/x509" "errors" @@ -12,7 +13,7 @@ import ( "time" log "github.com/sirupsen/logrus" - "gopkg.in/yaml.v2" + "gopkg.in/yaml.v3" "github.com/crowdsecurity/go-cs-lib/ptr" "github.com/crowdsecurity/go-cs-lib/yamlpatch" @@ -63,7 +64,7 @@ func (a *CTICfg) Load() error { } if a.Key != nil && *a.Key == "" { - return fmt.Errorf("empty cti key") + return errors.New("empty cti key") } if a.Enabled == nil { @@ -92,9 +93,14 @@ func (o *OnlineApiClientCfg) Load() error { return err } - err = yaml.UnmarshalStrict(fcontent, o.Credentials) + dec := yaml.NewDecoder(bytes.NewReader(fcontent)) + dec.KnownFields(true) + + err = dec.Decode(o.Credentials) if err != nil { - return fmt.Errorf("failed unmarshaling api server credentials configuration file '%s': %w", o.CredentialsFilePath, err) + if !errors.Is(err, io.EOF) { + return fmt.Errorf("failed unmarshaling api server credentials configuration file '%s': %w", o.CredentialsFilePath, err) + } } switch { @@ -120,9 +126,14 @@ func (l *LocalApiClientCfg) Load() error { return err } - err = yaml.UnmarshalStrict(fcontent, &l.Credentials) + dec := yaml.NewDecoder(bytes.NewReader(fcontent)) + dec.KnownFields(true) + + err = dec.Decode(&l.Credentials) if err != nil { - return fmt.Errorf("failed unmarshaling api client credential configuration file '%s': %w", l.CredentialsFilePath, err) + if !errors.Is(err, io.EOF) { + return fmt.Errorf("failed unmarshaling api client credential configuration file '%s': %w", l.CredentialsFilePath, err) + } } if l.Credentials == nil || l.Credentials.URL == "" { @@ -136,7 +147,7 @@ func (l *LocalApiClientCfg) Load() error { } if l.Credentials.Login != "" && (l.Credentials.CertPath != "" || l.Credentials.KeyPath != "") { - return fmt.Errorf("user/password authentication and TLS authentication are mutually exclusive") + return errors.New("user/password authentication and TLS authentication are mutually exclusive") } if l.InsecureSkipVerify == nil { @@ -263,7 +274,7 @@ func (c *Config) LoadAPIServer(inCli bool) error { } if c.API.Server.ListenURI == "" { - return fmt.Errorf("no listen_uri specified") + return errors.New("no listen_uri specified") } // inherit log level from common, then api->server @@ -350,7 +361,7 @@ func parseCapiWhitelists(fd io.Reader) (*CapiWhitelist, error) { decoder := yaml.NewDecoder(fd) if err := decoder.Decode(&fromCfg); err != nil { if errors.Is(err, io.EOF) { - return nil, fmt.Errorf("empty file") + return nil, errors.New("empty file") } return nil, err @@ -389,7 +400,7 @@ func (s *LocalApiServerCfg) LoadCapiWhitelists() error { fd, err := os.Open(s.CapiWhitelistsPath) if err != nil { - return fmt.Errorf("while opening capi whitelist file: %s", err) + return fmt.Errorf("while opening capi whitelist file: %w", err) } defer fd.Close() @@ -404,7 +415,7 @@ func (s *LocalApiServerCfg) LoadCapiWhitelists() error { func (c *Config) LoadAPIClient() error { if c.API == nil || c.API.Client == nil || c.API.Client.CredentialsFilePath == "" || c.DisableAgent { - return fmt.Errorf("no API client section in configuration") + return errors.New("no API client section in configuration") } if err := c.API.Client.Load(); err != nil { diff --git a/pkg/csconfig/api_test.go b/pkg/csconfig/api_test.go index e22c78204e7..b6febd4d450 100644 --- a/pkg/csconfig/api_test.go +++ b/pkg/csconfig/api_test.go @@ -9,7 +9,7 @@ import ( log "github.com/sirupsen/logrus" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "gopkg.in/yaml.v2" + "gopkg.in/yaml.v3" "github.com/crowdsecurity/go-cs-lib/cstest" "github.com/crowdsecurity/go-cs-lib/ptr" @@ -68,6 +68,7 @@ func TestLoadLocalApiClientCfg(t *testing.T) { t.Run(tc.name, func(t *testing.T) { err := tc.input.Load() cstest.RequireErrorContains(t, err, tc.expectedErr) + if tc.expectedErr != "" { return } @@ -125,6 +126,7 @@ func TestLoadOnlineApiClientCfg(t *testing.T) { t.Run(tc.name, func(t *testing.T) { err := tc.input.Load() cstest.RequireErrorContains(t, err, tc.expectedErr) + if tc.expectedErr != "" { return } @@ -147,7 +149,11 @@ func TestLoadAPIServer(t *testing.T) { require.NoError(t, err) configData := os.ExpandEnv(string(fcontent)) - err = yaml.UnmarshalStrict([]byte(configData), &config) + + dec := yaml.NewDecoder(strings.NewReader(configData)) + dec.KnownFields(true) + + err = dec.Decode(&config) require.NoError(t, err) tests := []struct { @@ -242,6 +248,7 @@ func TestLoadAPIServer(t *testing.T) { t.Run(tc.name, func(t *testing.T) { err := tc.input.LoadAPIServer(false) cstest.RequireErrorContains(t, err, tc.expectedErr) + if tc.expectedErr != "" { return } @@ -305,6 +312,7 @@ func TestParseCapiWhitelists(t *testing.T) { t.Run(tc.name, func(t *testing.T) { wl, err := parseCapiWhitelists(strings.NewReader(tc.input)) cstest.RequireErrorContains(t, err, tc.expectedErr) + if tc.expectedErr != "" { return } diff --git a/pkg/csconfig/config.go b/pkg/csconfig/config.go index 2dc7ecc7d53..0c960803e04 100644 --- a/pkg/csconfig/config.go +++ b/pkg/csconfig/config.go @@ -1,14 +1,16 @@ // Package csconfig contains the configuration structures for crowdsec and cscli. - package csconfig import ( + "errors" "fmt" + "io" "os" "path/filepath" + "strings" log "github.com/sirupsen/logrus" - "gopkg.in/yaml.v2" + "gopkg.in/yaml.v3" "github.com/crowdsecurity/go-cs-lib/csstring" "github.com/crowdsecurity/go-cs-lib/ptr" @@ -57,10 +59,15 @@ func NewConfig(configFile string, disableAgent bool, disableAPI bool, inCli bool DisableAPI: disableAPI, } - err = yaml.UnmarshalStrict([]byte(configData), &cfg) + dec := yaml.NewDecoder(strings.NewReader(configData)) + dec.KnownFields(true) + + err = dec.Decode(&cfg) if err != nil { - // this is actually the "merged" yaml - return nil, "", fmt.Errorf("%s: %w", configFile, err) + if !errors.Is(err, io.EOF) { + // this is actually the "merged" yaml + return nil, "", fmt.Errorf("%s: %w", configFile, err) + } } if cfg.Prometheus == nil { diff --git a/pkg/csconfig/config_test.go b/pkg/csconfig/config_test.go index 4843c2f70f9..56ecc202373 100644 --- a/pkg/csconfig/config_test.go +++ b/pkg/csconfig/config_test.go @@ -5,7 +5,7 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "gopkg.in/yaml.v2" + "gopkg.in/yaml.v3" "github.com/crowdsecurity/go-cs-lib/cstest" ) diff --git a/pkg/csconfig/console.go b/pkg/csconfig/console.go index 1e8974154ec..01e74a94db4 100644 --- a/pkg/csconfig/console.go +++ b/pkg/csconfig/console.go @@ -5,7 +5,7 @@ import ( "os" log "github.com/sirupsen/logrus" - "gopkg.in/yaml.v2" + "gopkg.in/yaml.v3" "github.com/crowdsecurity/go-cs-lib/ptr" ) @@ -41,6 +41,7 @@ func (c *ConsoleConfig) IsPAPIEnabled() bool { if c == nil || c.ConsoleManagement == nil { return false } + return *c.ConsoleManagement } @@ -48,31 +49,36 @@ func (c *LocalApiServerCfg) LoadConsoleConfig() error { c.ConsoleConfig = &ConsoleConfig{} if _, err := os.Stat(c.ConsoleConfigPath); err != nil && os.IsNotExist(err) { log.Debugf("no console configuration to load") + c.ConsoleConfig.ShareCustomScenarios = ptr.Of(true) c.ConsoleConfig.ShareTaintedScenarios = ptr.Of(true) c.ConsoleConfig.ShareManualDecisions = ptr.Of(false) c.ConsoleConfig.ConsoleManagement = ptr.Of(false) c.ConsoleConfig.ShareContext = ptr.Of(false) + return nil } yamlFile, err := os.ReadFile(c.ConsoleConfigPath) if err != nil { - return fmt.Errorf("reading console config file '%s': %s", c.ConsoleConfigPath, err) + return fmt.Errorf("reading console config file '%s': %w", c.ConsoleConfigPath, err) } + err = yaml.Unmarshal(yamlFile, c.ConsoleConfig) if err != nil { - return fmt.Errorf("unmarshaling console config file '%s': %s", c.ConsoleConfigPath, err) + return fmt.Errorf("unmarshaling console config file '%s': %w", c.ConsoleConfigPath, err) } if c.ConsoleConfig.ShareCustomScenarios == nil { log.Debugf("no share_custom scenarios found, setting to true") c.ConsoleConfig.ShareCustomScenarios = ptr.Of(true) } + if c.ConsoleConfig.ShareTaintedScenarios == nil { log.Debugf("no share_tainted scenarios found, setting to true") c.ConsoleConfig.ShareTaintedScenarios = ptr.Of(true) } + if c.ConsoleConfig.ShareManualDecisions == nil { log.Debugf("no share_manual scenarios found, setting to false") c.ConsoleConfig.ShareManualDecisions = ptr.Of(false) diff --git a/pkg/csconfig/crowdsec_service.go b/pkg/csconfig/crowdsec_service.go index 36d38cf7481..7820595b46f 100644 --- a/pkg/csconfig/crowdsec_service.go +++ b/pkg/csconfig/crowdsec_service.go @@ -6,7 +6,7 @@ import ( "path/filepath" log "github.com/sirupsen/logrus" - "gopkg.in/yaml.v2" + "gopkg.in/yaml.v3" "github.com/crowdsecurity/go-cs-lib/ptr" ) @@ -133,19 +133,16 @@ func (c *Config) LoadCrowdsec() error { } if err = c.LoadAPIClient(); err != nil { - return fmt.Errorf("loading api client: %s", err) + return fmt.Errorf("loading api client: %w", err) } return nil } func (c *CrowdsecServiceCfg) DumpContextConfigFile() error { - var out []byte - var err error - // XXX: MakeDirs - - if out, err = yaml.Marshal(c.ContextToSend); err != nil { + out, err := yaml.Marshal(c.ContextToSend) + if err != nil { return fmt.Errorf("while marshaling ConsoleConfig (for %s): %w", c.ConsoleContextPath, err) } diff --git a/pkg/csconfig/database.go b/pkg/csconfig/database.go index 5149b4ae39e..2df2207859d 100644 --- a/pkg/csconfig/database.go +++ b/pkg/csconfig/database.go @@ -1,6 +1,7 @@ package csconfig import ( + "errors" "fmt" "time" @@ -45,6 +46,7 @@ type AuthGCCfg struct { type FlushDBCfg struct { MaxItems *int `yaml:"max_items,omitempty"` + // We could unmarshal as time.Duration, but alert filters right now are a map of strings MaxAge *string `yaml:"max_age,omitempty"` BouncersGC *AuthGCCfg `yaml:"bouncers_autodelete,omitempty"` AgentsGC *AuthGCCfg `yaml:"agents_autodelete,omitempty"` @@ -52,7 +54,7 @@ type FlushDBCfg struct { func (c *Config) LoadDBConfig(inCli bool) error { if c.DbConfig == nil { - return fmt.Errorf("no database configuration provided") + return errors.New("no database configuration provided") } if c.Cscli != nil { @@ -86,6 +88,7 @@ func (c *Config) LoadDBConfig(inCli bool) error { func (d *DatabaseCfg) ConnectionString() string { connString := "" + switch d.Type { case "sqlite": var sqliteConnectionStringParameters string @@ -94,6 +97,7 @@ func (d *DatabaseCfg) ConnectionString() string { } else { sqliteConnectionStringParameters = "_busy_timeout=100000&_fk=1" } + connString = fmt.Sprintf("file:%s?%s", d.DbPath, sqliteConnectionStringParameters) case "mysql": if d.isSocketConfig() { @@ -108,6 +112,7 @@ func (d *DatabaseCfg) ConnectionString() string { connString = fmt.Sprintf("host=%s port=%d user=%s dbname=%s password=%s sslmode=%s", d.Host, d.Port, d.User, d.DbName, d.Password, d.Sslmode) } } + return connString } @@ -121,8 +126,10 @@ func (d *DatabaseCfg) ConnectionDialect() (string, string, error) { if d.Type != "pgx" { log.Debugf("database type '%s' is deprecated, switching to 'pgx' instead", d.Type) } + return "pgx", dialect.Postgres, nil } + return "", "", fmt.Errorf("unknown database type '%s'", d.Type) } diff --git a/pkg/csconfig/profiles.go b/pkg/csconfig/profiles.go index ad3779ed12f..6fbb8ed8b21 100644 --- a/pkg/csconfig/profiles.go +++ b/pkg/csconfig/profiles.go @@ -6,7 +6,7 @@ import ( "fmt" "io" - "gopkg.in/yaml.v2" + "gopkg.in/yaml.v3" "github.com/crowdsecurity/go-cs-lib/yamlpatch" @@ -23,43 +23,50 @@ import ( type ProfileCfg struct { Name string `yaml:"name,omitempty"` Debug *bool `yaml:"debug,omitempty"` - Filters []string `yaml:"filters,omitempty"` //A list of OR'ed expressions. the models.Alert object + Filters []string `yaml:"filters,omitempty"` // A list of OR'ed expressions. the models.Alert object Decisions []models.Decision `yaml:"decisions,omitempty"` DurationExpr string `yaml:"duration_expr,omitempty"` - OnSuccess string `yaml:"on_success,omitempty"` //continue or break - OnFailure string `yaml:"on_failure,omitempty"` //continue or break - OnError string `yaml:"on_error,omitempty"` //continue, break, error, report, apply, ignore + OnSuccess string `yaml:"on_success,omitempty"` // continue or break + OnFailure string `yaml:"on_failure,omitempty"` // continue or break + OnError string `yaml:"on_error,omitempty"` // continue, break, error, report, apply, ignore Notifications []string `yaml:"notifications,omitempty"` } func (c *LocalApiServerCfg) LoadProfiles() error { if c.ProfilesPath == "" { - return fmt.Errorf("empty profiles path") + return errors.New("empty profiles path") } patcher := yamlpatch.NewPatcher(c.ProfilesPath, ".local") + fcontent, err := patcher.PrependedPatchContent() if err != nil { return err } + reader := bytes.NewReader(fcontent) dec := yaml.NewDecoder(reader) - dec.SetStrict(true) + dec.KnownFields(true) + for { t := ProfileCfg{} + err = dec.Decode(&t) if err != nil { if errors.Is(err, io.EOF) { break } + return fmt.Errorf("while decoding %s: %w", c.ProfilesPath, err) } + c.Profiles = append(c.Profiles, &t) } if len(c.Profiles) == 0 { - return fmt.Errorf("zero profiles loaded for LAPI") + return errors.New("zero profiles loaded for LAPI") } + return nil } diff --git a/pkg/csconfig/simulation.go b/pkg/csconfig/simulation.go index 0d09aa478ff..bf121ef56f9 100644 --- a/pkg/csconfig/simulation.go +++ b/pkg/csconfig/simulation.go @@ -1,10 +1,13 @@ package csconfig import ( + "bytes" + "errors" "fmt" + "io" "path/filepath" - "gopkg.in/yaml.v2" + "gopkg.in/yaml.v3" "github.com/crowdsecurity/go-cs-lib/yamlpatch" ) @@ -20,37 +23,51 @@ func (s *SimulationConfig) IsSimulated(scenario string) bool { if s.Simulation != nil && *s.Simulation { simulated = true } + for _, excluded := range s.Exclusions { if excluded == scenario { simulated = !simulated break } } + return simulated } func (c *Config) LoadSimulation() error { simCfg := SimulationConfig{} + if c.ConfigPaths.SimulationFilePath == "" { c.ConfigPaths.SimulationFilePath = filepath.Clean(c.ConfigPaths.ConfigDir + "/simulation.yaml") } patcher := yamlpatch.NewPatcher(c.ConfigPaths.SimulationFilePath, ".local") + rcfg, err := patcher.MergedPatchContent() if err != nil { return err } - if err := yaml.UnmarshalStrict(rcfg, &simCfg); err != nil { - return fmt.Errorf("while unmarshaling simulation file '%s' : %s", c.ConfigPaths.SimulationFilePath, err) + + dec := yaml.NewDecoder(bytes.NewReader(rcfg)) + dec.KnownFields(true) + + if err := dec.Decode(&simCfg); err != nil { + if !errors.Is(err, io.EOF) { + return fmt.Errorf("while unmarshaling simulation file '%s': %w", c.ConfigPaths.SimulationFilePath, err) + } } + if simCfg.Simulation == nil { simCfg.Simulation = new(bool) } + if c.Crowdsec != nil { c.Crowdsec.SimulationConfig = &simCfg } + if c.Cscli != nil { c.Cscli.SimulationConfig = &simCfg } + return nil } diff --git a/pkg/csconfig/simulation_test.go b/pkg/csconfig/simulation_test.go index 01f05e3975a..71b09ee397c 100644 --- a/pkg/csconfig/simulation_test.go +++ b/pkg/csconfig/simulation_test.go @@ -60,7 +60,7 @@ func TestSimulationLoading(t *testing.T) { }, Crowdsec: &CrowdsecServiceCfg{}, }, - expectedErr: "while unmarshaling simulation file './testdata/config.yaml' : yaml: unmarshal errors", + expectedErr: "while unmarshaling simulation file './testdata/config.yaml': yaml: unmarshal errors", }, { name: "basic bad file content", @@ -71,7 +71,7 @@ func TestSimulationLoading(t *testing.T) { }, Crowdsec: &CrowdsecServiceCfg{}, }, - expectedErr: "while unmarshaling simulation file './testdata/config.yaml' : yaml: unmarshal errors", + expectedErr: "while unmarshaling simulation file './testdata/config.yaml': yaml: unmarshal errors", }, } From d8877a71fc1faac18539cd29fa736deccaf4ea92 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Tue, 5 Mar 2024 14:56:14 +0100 Subject: [PATCH 042/581] lp metrics: collect datasources and console options (#2870) --- cmd/crowdsec/crowdsec.go | 31 ++++++++++++++++++------------- cmd/crowdsec/main.go | 19 +++++++++++-------- cmd/crowdsec/serve.go | 14 +++++++------- pkg/csconfig/console.go | 29 +++++++++++++++++++++++++++++ test/bats/01_crowdsec.bats | 2 +- test/bats/01_crowdsec_lapi.bats | 2 +- 6 files changed, 67 insertions(+), 30 deletions(-) diff --git a/cmd/crowdsec/crowdsec.go b/cmd/crowdsec/crowdsec.go index d4cd2d3cf74..0d7d454edf2 100644 --- a/cmd/crowdsec/crowdsec.go +++ b/cmd/crowdsec/crowdsec.go @@ -23,39 +23,42 @@ import ( "github.com/crowdsecurity/crowdsec/pkg/types" ) -func initCrowdsec(cConfig *csconfig.Config, hub *cwhub.Hub) (*parser.Parsers, error) { +// initCrowdsec prepares the log processor service +func initCrowdsec(cConfig *csconfig.Config, hub *cwhub.Hub) (*parser.Parsers, []acquisition.DataSource, error) { var err error if err = alertcontext.LoadConsoleContext(cConfig, hub); err != nil { - return nil, fmt.Errorf("while loading context: %w", err) + return nil, nil, fmt.Errorf("while loading context: %w", err) } // Start loading configs csParsers := parser.NewParsers(hub) if csParsers, err = parser.LoadParsers(cConfig, csParsers); err != nil { - return nil, fmt.Errorf("while loading parsers: %w", err) + return nil, nil, fmt.Errorf("while loading parsers: %w", err) } if err := LoadBuckets(cConfig, hub); err != nil { - return nil, fmt.Errorf("while loading scenarios: %w", err) + return nil, nil, fmt.Errorf("while loading scenarios: %w", err) } if err := appsec.LoadAppsecRules(hub); err != nil { - return nil, fmt.Errorf("while loading appsec rules: %w", err) + return nil, nil, fmt.Errorf("while loading appsec rules: %w", err) } - if err := LoadAcquisition(cConfig); err != nil { - return nil, fmt.Errorf("while loading acquisition config: %w", err) + datasources, err := LoadAcquisition(cConfig) + if err != nil { + return nil, nil, fmt.Errorf("while loading acquisition config: %w", err) } - return csParsers, nil + return csParsers, datasources, nil } -func runCrowdsec(cConfig *csconfig.Config, parsers *parser.Parsers, hub *cwhub.Hub) error { +// runCrowdsec starts the log processor service +func runCrowdsec(cConfig *csconfig.Config, parsers *parser.Parsers, hub *cwhub.Hub, datasources []acquisition.DataSource) error { inputEventChan = make(chan types.Event) inputLineChan = make(chan types.Event) - //start go-routines for parsing, buckets pour and outputs. + // start go-routines for parsing, buckets pour and outputs. parserWg := &sync.WaitGroup{} parsersTomb.Go(func() error { @@ -65,7 +68,8 @@ func runCrowdsec(cConfig *csconfig.Config, parsers *parser.Parsers, hub *cwhub.H parsersTomb.Go(func() error { defer trace.CatchPanic("crowdsec/runParse") - if err := runParse(inputLineChan, inputEventChan, *parsers.Ctx, parsers.Nodes); err != nil { //this error will never happen as parser.Parse is not able to return errors + if err := runParse(inputLineChan, inputEventChan, *parsers.Ctx, parsers.Nodes); err != nil { + // this error will never happen as parser.Parse is not able to return errors log.Fatalf("starting parse error : %s", err) return err } @@ -161,7 +165,8 @@ func runCrowdsec(cConfig *csconfig.Config, parsers *parser.Parsers, hub *cwhub.H return nil } -func serveCrowdsec(parsers *parser.Parsers, cConfig *csconfig.Config, hub *cwhub.Hub, agentReady chan bool) { +// serveCrowdsec wraps the log processor service +func serveCrowdsec(parsers *parser.Parsers, cConfig *csconfig.Config, hub *cwhub.Hub, datasources []acquisition.DataSource, agentReady chan bool) { crowdsecTomb.Go(func() error { defer trace.CatchPanic("crowdsec/serveCrowdsec") @@ -171,7 +176,7 @@ func serveCrowdsec(parsers *parser.Parsers, cConfig *csconfig.Config, hub *cwhub log.Debugf("running agent after %s ms", time.Since(crowdsecT0)) agentReady <- true - if err := runCrowdsec(cConfig, parsers, hub); err != nil { + if err := runCrowdsec(cConfig, parsers, hub, datasources); err != nil { log.Fatalf("unable to start crowdsec routines: %s", err) } }() diff --git a/cmd/crowdsec/main.go b/cmd/crowdsec/main.go index 2040141bb3e..7f3070b5f29 100644 --- a/cmd/crowdsec/main.go +++ b/cmd/crowdsec/main.go @@ -1,6 +1,7 @@ package main import ( + "errors" "flag" "fmt" _ "net/http/pprof" @@ -10,7 +11,6 @@ import ( "strings" "time" - "github.com/pkg/errors" log "github.com/sirupsen/logrus" "gopkg.in/tomb.v2" @@ -95,7 +95,7 @@ func LoadBuckets(cConfig *csconfig.Config, hub *cwhub.Hub) error { holders, outputEventChan, err = leakybucket.LoadBuckets(cConfig.Crowdsec, hub, files, &bucketsTomb, buckets, flags.OrderEvent) if err != nil { - return fmt.Errorf("scenario loading failed: %v", err) + return fmt.Errorf("scenario loading failed: %w", err) } if cConfig.Prometheus != nil && cConfig.Prometheus.Enabled { @@ -107,7 +107,7 @@ func LoadBuckets(cConfig *csconfig.Config, hub *cwhub.Hub) error { return nil } -func LoadAcquisition(cConfig *csconfig.Config) error { +func LoadAcquisition(cConfig *csconfig.Config) ([]acquisition.DataSource, error) { var err error if flags.SingleFileType != "" && flags.OneShotDSN != "" { @@ -116,20 +116,20 @@ func LoadAcquisition(cConfig *csconfig.Config) error { dataSources, err = acquisition.LoadAcquisitionFromDSN(flags.OneShotDSN, flags.Labels, flags.Transform) if err != nil { - return errors.Wrapf(err, "failed to configure datasource for %s", flags.OneShotDSN) + return nil, fmt.Errorf("failed to configure datasource for %s: %w", flags.OneShotDSN, err) } } else { dataSources, err = acquisition.LoadAcquisitionFromFile(cConfig.Crowdsec) if err != nil { - return err + return nil, err } } if len(dataSources) == 0 { - return fmt.Errorf("no datasource enabled") + return nil, errors.New("no datasource enabled") } - return nil + return dataSources, nil } var ( @@ -272,7 +272,7 @@ func LoadConfig(configFile string, disableAgent bool, disableAPI bool, quiet boo } if cConfig.DisableAPI && cConfig.DisableAgent { - return nil, errors.New("You must run at least the API Server or crowdsec") + return nil, errors.New("you must run at least the API Server or crowdsec") } if flags.OneShotDSN != "" && flags.SingleFileType == "" { @@ -360,11 +360,14 @@ func main() { if err != nil { log.Fatalf("could not create CPU profile: %s", err) } + log.Infof("CPU profile will be written to %s", flags.CpuProfile) + if err := pprof.StartCPUProfile(f); err != nil { f.Close() log.Fatalf("could not start CPU profile: %s", err) } + defer f.Close() defer pprof.StopCPUProfile() } diff --git a/cmd/crowdsec/serve.go b/cmd/crowdsec/serve.go index 22f65b927a0..c8ccd4d5d70 100644 --- a/cmd/crowdsec/serve.go +++ b/cmd/crowdsec/serve.go @@ -86,7 +86,7 @@ func reloadHandler(sig os.Signal) (*csconfig.Config, error) { return nil, fmt.Errorf("while loading hub index: %w", err) } - csParsers, err := initCrowdsec(cConfig, hub) + csParsers, datasources, err := initCrowdsec(cConfig, hub) if err != nil { return nil, fmt.Errorf("unable to init crowdsec: %w", err) } @@ -103,7 +103,7 @@ func reloadHandler(sig os.Signal) (*csconfig.Config, error) { } agentReady := make(chan bool, 1) - serveCrowdsec(csParsers, cConfig, hub, agentReady) + serveCrowdsec(csParsers, cConfig, hub, datasources, agentReady) } log.Printf("Reload is finished") @@ -230,7 +230,7 @@ func drainChan(c chan types.Event) { for { select { case _, ok := <-c: - if !ok { //closed + if !ok { // closed return } default: @@ -256,8 +256,8 @@ func HandleSignals(cConfig *csconfig.Config) error { exitChan := make(chan error) - //Always try to stop CPU profiling to avoid passing flags around - //It's a noop if profiling is not enabled + // Always try to stop CPU profiling to avoid passing flags around + // It's a noop if profiling is not enabled defer pprof.StopCPUProfile() go func() { @@ -369,14 +369,14 @@ func Serve(cConfig *csconfig.Config, agentReady chan bool) error { return fmt.Errorf("while loading hub index: %w", err) } - csParsers, err := initCrowdsec(cConfig, hub) + csParsers, datasources, err := initCrowdsec(cConfig, hub) if err != nil { return fmt.Errorf("crowdsec init: %w", err) } // if it's just linting, we're done if !flags.TestMode { - serveCrowdsec(csParsers, cConfig, hub, agentReady) + serveCrowdsec(csParsers, cConfig, hub, datasources, agentReady) } else { agentReady <- true } diff --git a/pkg/csconfig/console.go b/pkg/csconfig/console.go index 01e74a94db4..4c14f5f7d49 100644 --- a/pkg/csconfig/console.go +++ b/pkg/csconfig/console.go @@ -37,6 +37,35 @@ type ConsoleConfig struct { ShareContext *bool `yaml:"share_context"` } +func (c *ConsoleConfig) EnabledOptions() []string { + ret := []string{} + if c == nil { + return ret + } + + if c.ShareCustomScenarios != nil && *c.ShareCustomScenarios { + ret = append(ret, SEND_CUSTOM_SCENARIOS) + } + + if c.ShareTaintedScenarios != nil && *c.ShareTaintedScenarios { + ret = append(ret, SEND_TAINTED_SCENARIOS) + } + + if c.ShareManualDecisions != nil && *c.ShareManualDecisions { + ret = append(ret, SEND_MANUAL_SCENARIOS) + } + + if c.ConsoleManagement != nil && *c.ConsoleManagement { + ret = append(ret, CONSOLE_MANAGEMENT) + } + + if c.ShareContext != nil && *c.ShareContext { + ret = append(ret, SEND_CONTEXT) + } + + return ret +} + func (c *ConsoleConfig) IsPAPIEnabled() bool { if c == nil || c.ConsoleManagement == nil { return false diff --git a/test/bats/01_crowdsec.bats b/test/bats/01_crowdsec.bats index a585930e34c..7051b4d33a3 100644 --- a/test/bats/01_crowdsec.bats +++ b/test/bats/01_crowdsec.bats @@ -38,7 +38,7 @@ teardown() { @test "crowdsec (no api and no agent)" { rune -0 wait-for \ - --err "You must run at least the API Server or crowdsec" \ + --err "you must run at least the API Server or crowdsec" \ "${CROWDSEC}" -no-api -no-cs } diff --git a/test/bats/01_crowdsec_lapi.bats b/test/bats/01_crowdsec_lapi.bats index 4819d724fea..233340e500f 100644 --- a/test/bats/01_crowdsec_lapi.bats +++ b/test/bats/01_crowdsec_lapi.bats @@ -28,7 +28,7 @@ teardown() { @test "lapi (.api.server.enable=false)" { rune -0 config_set '.api.server.enable=false' rune -1 "${CROWDSEC}" -no-cs - assert_stderr --partial "You must run at least the API Server or crowdsec" + assert_stderr --partial "you must run at least the API Server or crowdsec" } @test "lapi (no .api.server.listen_uri)" { From 5356ccc6cd138faa0f41fabaa5733bb94d9fb017 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Wed, 6 Mar 2024 13:42:57 +0100 Subject: [PATCH 043/581] cron: spread server load when upgrading hub and data files (#2873) --- config/crowdsec.cron.daily | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/config/crowdsec.cron.daily b/config/crowdsec.cron.daily index 1c110df38fc..9c488d29884 100644 --- a/config/crowdsec.cron.daily +++ b/config/crowdsec.cron.daily @@ -2,12 +2,13 @@ test -x /usr/bin/cscli || exit 0 +# splay hub upgrade and crowdsec reload +sleep "$(seq 1 300 | shuf -n 1)" + /usr/bin/cscli --error hub update upgraded=$(/usr/bin/cscli --error hub upgrade) if [ -n "$upgraded" ]; then - # splay initial metrics push - sleep $(seq 1 90 | shuf -n 1) systemctl reload crowdsec fi From e611d01c90f3e57d591f3875c2e6c629cba9c68a Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Wed, 6 Mar 2024 14:27:05 +0100 Subject: [PATCH 044/581] cscli: hide hashed api keys (#2874) * cscli: hide hashed api keys * lint --- docker/test/tests/test_bouncer.py | 3 --- pkg/database/bouncers.go | 32 +++++++++++++++++++----------- pkg/database/ent/bouncer.go | 5 ++--- pkg/database/ent/schema/bouncer.go | 2 +- 4 files changed, 23 insertions(+), 19 deletions(-) diff --git a/docker/test/tests/test_bouncer.py b/docker/test/tests/test_bouncer.py index 1324c3bd38c..98b86de858c 100644 --- a/docker/test/tests/test_bouncer.py +++ b/docker/test/tests/test_bouncer.py @@ -36,8 +36,6 @@ def test_register_bouncer_env(crowdsec, flavor): bouncer1, bouncer2 = j assert bouncer1['name'] == 'bouncer1name' assert bouncer2['name'] == 'bouncer2name' - assert bouncer1['api_key'] == hex512('bouncer1key') - assert bouncer2['api_key'] == hex512('bouncer2key') # add a second bouncer at runtime res = cs.cont.exec_run('cscli bouncers add bouncer3name -k bouncer3key') @@ -48,7 +46,6 @@ def test_register_bouncer_env(crowdsec, flavor): assert len(j) == 3 bouncer3 = j[2] assert bouncer3['name'] == 'bouncer3name' - assert bouncer3['api_key'] == hex512('bouncer3key') # remove all bouncers res = cs.cont.exec_run('cscli bouncers delete bouncer1name bouncer2name bouncer3name') diff --git a/pkg/database/bouncers.go b/pkg/database/bouncers.go index 496b9b6cc9c..2cc6b9dcb47 100644 --- a/pkg/database/bouncers.go +++ b/pkg/database/bouncers.go @@ -33,6 +33,7 @@ func (c *Client) ListBouncers() ([]*ent.Bouncer, error) { if err != nil { return nil, errors.Wrapf(QueryFail, "listing bouncers: %s", err) } + return result, nil } @@ -48,8 +49,10 @@ func (c *Client) CreateBouncer(name string, ipAddr string, apiKey string, authTy if ent.IsConstraintError(err) { return nil, fmt.Errorf("bouncer %s already exists", name) } - return nil, fmt.Errorf("unable to create bouncer: %s", err) + + return nil, fmt.Errorf("unable to create bouncer: %w", err) } + return bouncer, nil } @@ -63,7 +66,7 @@ func (c *Client) DeleteBouncer(name string) error { } if nbDeleted == 0 { - return fmt.Errorf("bouncer doesn't exist") + return errors.New("bouncer doesn't exist") } return nil @@ -74,36 +77,41 @@ func (c *Client) BulkDeleteBouncers(bouncers []*ent.Bouncer) (int, error) { for i, b := range bouncers { ids[i] = b.ID } + nbDeleted, err := c.Ent.Bouncer.Delete().Where(bouncer.IDIn(ids...)).Exec(c.CTX) if err != nil { - return nbDeleted, fmt.Errorf("unable to delete bouncers: %s", err) + return nbDeleted, fmt.Errorf("unable to delete bouncers: %w", err) } + return nbDeleted, nil } -func (c *Client) UpdateBouncerLastPull(lastPull time.Time, ID int) error { - _, err := c.Ent.Bouncer.UpdateOneID(ID). +func (c *Client) UpdateBouncerLastPull(lastPull time.Time, id int) error { + _, err := c.Ent.Bouncer.UpdateOneID(id). SetLastPull(lastPull). Save(c.CTX) if err != nil { - return fmt.Errorf("unable to update machine last pull in database: %s", err) + return fmt.Errorf("unable to update machine last pull in database: %w", err) } + return nil } -func (c *Client) UpdateBouncerIP(ipAddr string, ID int) error { - _, err := c.Ent.Bouncer.UpdateOneID(ID).SetIPAddress(ipAddr).Save(c.CTX) +func (c *Client) UpdateBouncerIP(ipAddr string, id int) error { + _, err := c.Ent.Bouncer.UpdateOneID(id).SetIPAddress(ipAddr).Save(c.CTX) if err != nil { - return fmt.Errorf("unable to update bouncer ip address in database: %s", err) + return fmt.Errorf("unable to update bouncer ip address in database: %w", err) } + return nil } -func (c *Client) UpdateBouncerTypeAndVersion(bType string, version string, ID int) error { - _, err := c.Ent.Bouncer.UpdateOneID(ID).SetVersion(version).SetType(bType).Save(c.CTX) +func (c *Client) UpdateBouncerTypeAndVersion(bType string, version string, id int) error { + _, err := c.Ent.Bouncer.UpdateOneID(id).SetVersion(version).SetType(bType).Save(c.CTX) if err != nil { - return fmt.Errorf("unable to update bouncer type and version in database: %s", err) + return fmt.Errorf("unable to update bouncer type and version in database: %w", err) } + return nil } diff --git a/pkg/database/ent/bouncer.go b/pkg/database/ent/bouncer.go index fe189c3817e..203f49a432d 100644 --- a/pkg/database/ent/bouncer.go +++ b/pkg/database/ent/bouncer.go @@ -24,7 +24,7 @@ type Bouncer struct { // Name holds the value of the "name" field. Name string `json:"name"` // APIKey holds the value of the "api_key" field. - APIKey string `json:"api_key"` + APIKey string `json:"-"` // Revoked holds the value of the "revoked" field. Revoked bool `json:"revoked"` // IPAddress holds the value of the "ip_address" field. @@ -193,8 +193,7 @@ func (b *Bouncer) String() string { builder.WriteString("name=") builder.WriteString(b.Name) builder.WriteString(", ") - builder.WriteString("api_key=") - builder.WriteString(b.APIKey) + builder.WriteString("api_key=") builder.WriteString(", ") builder.WriteString("revoked=") builder.WriteString(fmt.Sprintf("%v", b.Revoked)) diff --git a/pkg/database/ent/schema/bouncer.go b/pkg/database/ent/schema/bouncer.go index c3081291254..986a1bf3ba8 100644 --- a/pkg/database/ent/schema/bouncer.go +++ b/pkg/database/ent/schema/bouncer.go @@ -21,7 +21,7 @@ func (Bouncer) Fields() []ent.Field { Default(types.UtcNow). UpdateDefault(types.UtcNow).Nillable().Optional().StructTag(`json:"updated_at"`), field.String("name").Unique().StructTag(`json:"name"`), - field.String("api_key").StructTag(`json:"api_key"`), // hash of api_key + field.String("api_key").Sensitive(), // hash of api_key field.Bool("revoked").StructTag(`json:"revoked"`), field.String("ip_address").Default("").Optional().StructTag(`json:"ip_address"`), field.String("type").Optional().StructTag(`json:"type"`), From 98560d0cf56d9c56366065b9a40a915844238468 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Thu, 7 Mar 2024 12:29:10 +0100 Subject: [PATCH 045/581] bin/crowdsec: avoid writing errors twice when log_media=stdout (#2876) * bin/crowdsec: avoid writing errors twice when log_media=stdout simpler, correct hook usage * lint --- cmd/crowdsec/api.go | 2 +- cmd/crowdsec/fatalhook.go | 28 +++++++++++++++++++ cmd/crowdsec/hook.go | 43 ------------------------------ cmd/crowdsec/main.go | 21 ++++++++++----- cmd/crowdsec/metrics.go | 6 ++--- cmd/crowdsec/output.go | 8 +++--- cmd/crowdsec/parse.go | 2 +- cmd/crowdsec/pour.go | 19 ++++++++----- cmd/crowdsec/run_in_svc.go | 4 +-- cmd/crowdsec/run_in_svc_windows.go | 4 +-- 10 files changed, 68 insertions(+), 69 deletions(-) create mode 100644 cmd/crowdsec/fatalhook.go delete mode 100644 cmd/crowdsec/hook.go diff --git a/cmd/crowdsec/api.go b/cmd/crowdsec/api.go index 4ac5c3ce96f..995345a25e4 100644 --- a/cmd/crowdsec/api.go +++ b/cmd/crowdsec/api.go @@ -1,11 +1,11 @@ package main import ( + "errors" "fmt" "runtime" "time" - "github.com/pkg/errors" log "github.com/sirupsen/logrus" "github.com/crowdsecurity/go-cs-lib/trace" diff --git a/cmd/crowdsec/fatalhook.go b/cmd/crowdsec/fatalhook.go new file mode 100644 index 00000000000..84a57406a21 --- /dev/null +++ b/cmd/crowdsec/fatalhook.go @@ -0,0 +1,28 @@ +package main + +import ( + "io" + + log "github.com/sirupsen/logrus" +) + +// FatalHook is used to log fatal messages to stderr when the rest goes to a file +type FatalHook struct { + Writer io.Writer + LogLevels []log.Level +} + +func (hook *FatalHook) Fire(entry *log.Entry) error { + line, err := entry.String() + if err != nil { + return err + } + + _, err = hook.Writer.Write([]byte(line)) + + return err +} + +func (hook *FatalHook) Levels() []log.Level { + return hook.LogLevels +} diff --git a/cmd/crowdsec/hook.go b/cmd/crowdsec/hook.go deleted file mode 100644 index 28515d9e474..00000000000 --- a/cmd/crowdsec/hook.go +++ /dev/null @@ -1,43 +0,0 @@ -package main - -import ( - "io" - "os" - - log "github.com/sirupsen/logrus" -) - -type ConditionalHook struct { - Writer io.Writer - LogLevels []log.Level - Enabled bool -} - -func (hook *ConditionalHook) Fire(entry *log.Entry) error { - if hook.Enabled { - line, err := entry.String() - if err != nil { - return err - } - - _, err = hook.Writer.Write([]byte(line)) - - return err - } - - return nil -} - -func (hook *ConditionalHook) Levels() []log.Level { - return hook.LogLevels -} - -// The primal logging hook is set up before parsing config.yaml. -// Once config.yaml is parsed, the primal hook is disabled if the -// configured logger is writing to stderr. Otherwise it's used to -// report fatal errors and panics to stderr in addition to the log file. -var primalHook = &ConditionalHook{ - Writer: os.Stderr, - LogLevels: []log.Level{log.FatalLevel, log.PanicLevel}, - Enabled: true, -} diff --git a/cmd/crowdsec/main.go b/cmd/crowdsec/main.go index 7f3070b5f29..70f7d48dce4 100644 --- a/cmd/crowdsec/main.go +++ b/cmd/crowdsec/main.go @@ -72,7 +72,7 @@ type Flags struct { DisableCAPI bool Transform string OrderEvent bool - CpuProfile string + CPUProfile string } type labelsMap map[string]string @@ -181,7 +181,7 @@ func (f *Flags) Parse() { } flag.StringVar(&dumpFolder, "dump-data", "", "dump parsers/buckets raw outputs") - flag.StringVar(&f.CpuProfile, "cpu-profile", "", "write cpu profile to file") + flag.StringVar(&f.CPUProfile, "cpu-profile", "", "write cpu profile to file") flag.Parse() } @@ -249,7 +249,12 @@ func LoadConfig(configFile string, disableAgent bool, disableAPI bool, quiet boo return nil, err } - primalHook.Enabled = (cConfig.Common.LogMedia != "stdout") + if cConfig.Common.LogMedia != "stdout" { + log.AddHook(&FatalHook{ + Writer: os.Stderr, + LogLevels: []log.Level{log.FatalLevel, log.PanicLevel}, + }) + } if err := csconfig.LoadFeatureFlagsFile(configFile, log.StandardLogger()); err != nil { return nil, err @@ -323,7 +328,9 @@ func LoadConfig(configFile string, disableAgent bool, disableAPI bool, quiet boo var crowdsecT0 time.Time func main() { - log.AddHook(primalHook) + // The initial log level is INFO, even if the user provided an -error or -warning flag + // because we need feature flags before parsing cli flags + log.SetFormatter(&log.TextFormatter{TimestampFormat: time.RFC3339, FullTimestamp: true}) if err := fflag.RegisterAllFeatures(); err != nil { log.Fatalf("failed to register features: %s", err) @@ -355,13 +362,13 @@ func main() { os.Exit(0) } - if flags.CpuProfile != "" { - f, err := os.Create(flags.CpuProfile) + if flags.CPUProfile != "" { + f, err := os.Create(flags.CPUProfile) if err != nil { log.Fatalf("could not create CPU profile: %s", err) } - log.Infof("CPU profile will be written to %s", flags.CpuProfile) + log.Infof("CPU profile will be written to %s", flags.CPUProfile) if err := pprof.StartCPUProfile(f); err != nil { f.Close() diff --git a/cmd/crowdsec/metrics.go b/cmd/crowdsec/metrics.go index 563bb56bfc9..aed43db00c8 100644 --- a/cmd/crowdsec/metrics.go +++ b/cmd/crowdsec/metrics.go @@ -104,12 +104,12 @@ func computeDynamicMetrics(next http.Handler, dbClient *database.Client) http.Ha return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { // catch panics here because they are not handled by servePrometheus defer trace.CatchPanic("crowdsec/computeDynamicMetrics") - //update cache metrics (stash) + // update cache metrics (stash) cache.UpdateCacheMetrics() - //update cache metrics (regexp) + // update cache metrics (regexp) exprhelpers.UpdateRegexpCacheMetrics() - //decision metrics are only relevant for LAPI + // decision metrics are only relevant for LAPI if dbClient == nil { next.ServeHTTP(w, r) return diff --git a/cmd/crowdsec/output.go b/cmd/crowdsec/output.go index c4a2c0b6ac1..ac05b502e52 100644 --- a/cmd/crowdsec/output.go +++ b/cmd/crowdsec/output.go @@ -27,7 +27,7 @@ func dedupAlerts(alerts []types.RuntimeAlert) ([]*models.Alert, error) { } for k, src := range alert.Sources { - refsrc := *alert.Alert //copy + refsrc := *alert.Alert // copy log.Tracef("source[%s]", k) @@ -81,7 +81,7 @@ LOOP: cacheMutex.Unlock() if err := PushAlerts(cachecopy, client); err != nil { log.Errorf("while pushing to api : %s", err) - //just push back the events to the queue + // just push back the events to the queue cacheMutex.Lock() cache = append(cache, cachecopy...) cacheMutex.Unlock() @@ -110,8 +110,8 @@ LOOP: return fmt.Errorf("postoverflow failed: %w", err) } log.Printf("%s", *event.Overflow.Alert.Message) - //if the Alert is nil, it's to signal bucket is ready for GC, don't track this - //dump after postoveflow processing to avoid missing whitelist info + // if the Alert is nil, it's to signal bucket is ready for GC, don't track this + // dump after postoveflow processing to avoid missing whitelist info if dumpStates && event.Overflow.Alert != nil { if bucketOverflows == nil { bucketOverflows = make([]types.Event, 0) diff --git a/cmd/crowdsec/parse.go b/cmd/crowdsec/parse.go index c62eeb5869d..53c9ee65d4f 100644 --- a/cmd/crowdsec/parse.go +++ b/cmd/crowdsec/parse.go @@ -11,7 +11,6 @@ import ( ) func runParse(input chan types.Event, output chan types.Event, parserCTX parser.UnixParserCtx, nodes []parser.Node) error { - LOOP: for { select { @@ -56,5 +55,6 @@ LOOP: output <- parsed } } + return nil } diff --git a/cmd/crowdsec/pour.go b/cmd/crowdsec/pour.go index 3f717e3975d..388c7a6c1b3 100644 --- a/cmd/crowdsec/pour.go +++ b/cmd/crowdsec/pour.go @@ -4,27 +4,30 @@ import ( "fmt" "time" + "github.com/prometheus/client_golang/prometheus" + log "github.com/sirupsen/logrus" + "github.com/crowdsecurity/crowdsec/pkg/csconfig" leaky "github.com/crowdsecurity/crowdsec/pkg/leakybucket" "github.com/crowdsecurity/crowdsec/pkg/types" - "github.com/prometheus/client_golang/prometheus" - log "github.com/sirupsen/logrus" ) func runPour(input chan types.Event, holders []leaky.BucketFactory, buckets *leaky.Buckets, cConfig *csconfig.Config) error { count := 0 + for { - //bucket is now ready + // bucket is now ready select { case <-bucketsTomb.Dying(): log.Infof("Bucket routine exiting") return nil case parsed := <-input: startTime := time.Now() + count++ if count%5000 == 0 { log.Infof("%d existing buckets", leaky.LeakyRoutineCount) - //when in forensics mode, garbage collect buckets + // when in forensics mode, garbage collect buckets if cConfig.Crowdsec.BucketsGCEnabled { if parsed.MarshaledTime != "" { z := &time.Time{} @@ -32,26 +35,30 @@ func runPour(input chan types.Event, holders []leaky.BucketFactory, buckets *lea log.Warningf("Failed to unmarshal time from event '%s' : %s", parsed.MarshaledTime, err) } else { log.Warning("Starting buckets garbage collection ...") + if err = leaky.GarbageCollectBuckets(*z, buckets); err != nil { - return fmt.Errorf("failed to start bucket GC : %s", err) + return fmt.Errorf("failed to start bucket GC : %w", err) } } } } } - //here we can bucketify with parsed + // here we can bucketify with parsed poured, err := leaky.PourItemToHolders(parsed, holders, buckets) if err != nil { log.Errorf("bucketify failed for: %v", parsed) continue } + elapsed := time.Since(startTime) globalPourHistogram.With(prometheus.Labels{"type": parsed.Line.Module, "source": parsed.Line.Src}).Observe(elapsed.Seconds()) + if poured { globalBucketPourOk.Inc() } else { globalBucketPourKo.Inc() } + if len(parsed.MarshaledTime) != 0 { if err := lastProcessedItem.UnmarshalText([]byte(parsed.MarshaledTime)); err != nil { log.Warningf("failed to unmarshal time from event : %s", err) diff --git a/cmd/crowdsec/run_in_svc.go b/cmd/crowdsec/run_in_svc.go index 5a8bc9a6cd3..58f4cdf005d 100644 --- a/cmd/crowdsec/run_in_svc.go +++ b/cmd/crowdsec/run_in_svc.go @@ -23,8 +23,8 @@ func StartRunSvc() error { defer trace.CatchPanic("crowdsec/StartRunSvc") - //Always try to stop CPU profiling to avoid passing flags around - //It's a noop if profiling is not enabled + // Always try to stop CPU profiling to avoid passing flags around + // It's a noop if profiling is not enabled defer pprof.StopCPUProfile() if cConfig, err = LoadConfig(flags.ConfigFile, flags.DisableAgent, flags.DisableAPI, false); err != nil { diff --git a/cmd/crowdsec/run_in_svc_windows.go b/cmd/crowdsec/run_in_svc_windows.go index 7845e9c58b5..c0aa18d7fc6 100644 --- a/cmd/crowdsec/run_in_svc_windows.go +++ b/cmd/crowdsec/run_in_svc_windows.go @@ -20,8 +20,8 @@ func StartRunSvc() error { defer trace.CatchPanic("crowdsec/StartRunSvc") - //Always try to stop CPU profiling to avoid passing flags around - //It's a noop if profiling is not enabled + // Always try to stop CPU profiling to avoid passing flags around + // It's a noop if profiling is not enabled defer pprof.StopCPUProfile() isRunninginService, err := svc.IsWindowsService() From 5731491b4e0948e5011e47a378a14f9a86e46b40 Mon Sep 17 00:00:00 2001 From: blotus Date: Thu, 7 Mar 2024 14:04:50 +0100 Subject: [PATCH 046/581] Auto detect if reading logs or storing sqlite db on a network share (#2241) --- pkg/acquisition/modules/file/file.go | 44 +++++++++-- pkg/csconfig/api_test.go | 1 + pkg/csconfig/database.go | 36 ++++++++- pkg/csconfig/database_test.go | 7 +- pkg/types/getfstype.go | 112 +++++++++++++++++++++++++++ pkg/types/getfstype_windows.go | 53 +++++++++++++ pkg/types/utils.go | 10 +++ 7 files changed, 251 insertions(+), 12 deletions(-) create mode 100644 pkg/types/getfstype.go create mode 100644 pkg/types/getfstype_windows.go diff --git a/pkg/acquisition/modules/file/file.go b/pkg/acquisition/modules/file/file.go index 4ea9466d457..9ab418a8442 100644 --- a/pkg/acquisition/modules/file/file.go +++ b/pkg/acquisition/modules/file/file.go @@ -38,9 +38,9 @@ type FileConfiguration struct { Filenames []string ExcludeRegexps []string `yaml:"exclude_regexps"` Filename string - ForceInotify bool `yaml:"force_inotify"` - MaxBufferSize int `yaml:"max_buffer_size"` - PollWithoutInotify bool `yaml:"poll_without_inotify"` + ForceInotify bool `yaml:"force_inotify"` + MaxBufferSize int `yaml:"max_buffer_size"` + PollWithoutInotify *bool `yaml:"poll_without_inotify"` configuration.DataSourceCommonCfg `yaml:",inline"` } @@ -330,7 +330,22 @@ func (f *FileSource) StreamingAcquisition(out chan types.Event, t *tomb.Tomb) er continue } - tail, err := tail.TailFile(file, tail.Config{ReOpen: true, Follow: true, Poll: f.config.PollWithoutInotify, Location: &tail.SeekInfo{Offset: 0, Whence: io.SeekEnd}, Logger: log.NewEntry(log.StandardLogger())}) + inotifyPoll := true + if f.config.PollWithoutInotify != nil { + inotifyPoll = *f.config.PollWithoutInotify + } else { + networkFS, fsType, err := types.IsNetworkFS(file) + if err != nil { + f.logger.Warningf("Could not get fs type for %s : %s", file, err) + } + f.logger.Debugf("fs for %s is network: %t (%s)", file, networkFS, fsType) + if networkFS { + f.logger.Warnf("Disabling inotify poll on %s as it is on a network share. You can manually set poll_without_inotify to true to make this message disappear, or to false to enforce inotify poll", file) + inotifyPoll = false + } + } + + tail, err := tail.TailFile(file, tail.Config{ReOpen: true, Follow: true, Poll: inotifyPoll, Location: &tail.SeekInfo{Offset: 0, Whence: io.SeekEnd}, Logger: log.NewEntry(log.StandardLogger())}) if err != nil { f.logger.Errorf("Could not start tailing file %s : %s", file, err) continue @@ -413,8 +428,27 @@ func (f *FileSource) monitorNewFiles(out chan types.Event, t *tomb.Tomb) error { f.logger.Errorf("unable to close %s : %s", event.Name, err) continue } + + inotifyPoll := true + if f.config.PollWithoutInotify != nil { + inotifyPoll = *f.config.PollWithoutInotify + } else { + if f.config.PollWithoutInotify != nil { + inotifyPoll = *f.config.PollWithoutInotify + } else { + networkFS, fsType, err := types.IsNetworkFS(event.Name) + if err != nil { + f.logger.Warningf("Could not get fs type for %s : %s", event.Name, err) + } + f.logger.Debugf("fs for %s is network: %t (%s)", event.Name, networkFS, fsType) + if networkFS { + inotifyPoll = false + } + } + } + //Slightly different parameters for Location, as we want to read the first lines of the newly created file - tail, err := tail.TailFile(event.Name, tail.Config{ReOpen: true, Follow: true, Poll: f.config.PollWithoutInotify, Location: &tail.SeekInfo{Offset: 0, Whence: io.SeekStart}}) + tail, err := tail.TailFile(event.Name, tail.Config{ReOpen: true, Follow: true, Poll: inotifyPoll, Location: &tail.SeekInfo{Offset: 0, Whence: io.SeekStart}}) if err != nil { logger.Errorf("Could not start tailing file %s : %s", event.Name, err) break diff --git a/pkg/csconfig/api_test.go b/pkg/csconfig/api_test.go index b6febd4d450..463b7c1b2ec 100644 --- a/pkg/csconfig/api_test.go +++ b/pkg/csconfig/api_test.go @@ -194,6 +194,7 @@ func TestLoadAPIServer(t *testing.T) { DbPath: "./testdata/test.db", Type: "sqlite", MaxOpenConns: ptr.Of(DEFAULT_MAX_OPEN_CONNS), + UseWal: ptr.Of(true), // autodetected DecisionBulkSize: defaultDecisionBulkSize, }, ConsoleConfigPath: DefaultConfigPath("console.yaml"), diff --git a/pkg/csconfig/database.go b/pkg/csconfig/database.go index 2df2207859d..a7bc57eefdc 100644 --- a/pkg/csconfig/database.go +++ b/pkg/csconfig/database.go @@ -3,12 +3,15 @@ package csconfig import ( "errors" "fmt" + "path/filepath" "time" "entgo.io/ent/dialect" log "github.com/sirupsen/logrus" "github.com/crowdsecurity/go-cs-lib/ptr" + + "github.com/crowdsecurity/crowdsec/pkg/types" ) const ( @@ -69,6 +72,35 @@ func (c *Config) LoadDBConfig(inCli bool) error { c.DbConfig.MaxOpenConns = ptr.Of(DEFAULT_MAX_OPEN_CONNS) } + if !inCli && c.DbConfig.Type == "sqlite" { + if c.DbConfig.UseWal == nil { + dbDir := filepath.Dir(c.DbConfig.DbPath) + isNetwork, fsType, err := types.IsNetworkFS(dbDir) + if err != nil { + log.Warnf("unable to determine if database is on network filesystem: %s", err) + log.Warning("You are using sqlite without WAL, this can have a performance impact. If you do not store the database in a network share, set db_config.use_wal to true. Set explicitly to false to disable this warning.") + return nil + } + if isNetwork { + log.Debugf("database is on network filesystem (%s), setting useWal to false", fsType) + c.DbConfig.UseWal = ptr.Of(false) + } else { + log.Debugf("database is on local filesystem (%s), setting useWal to true", fsType) + c.DbConfig.UseWal = ptr.Of(true) + } + } else if *c.DbConfig.UseWal { + dbDir := filepath.Dir(c.DbConfig.DbPath) + isNetwork, fsType, err := types.IsNetworkFS(dbDir) + if err != nil { + log.Warnf("unable to determine if database is on network filesystem: %s", err) + return nil + } + if isNetwork { + log.Warnf("database seems to be stored on a network share (%s), but useWal is set to true. Proceed at your own risk.", fsType) + } + } + } + if c.DbConfig.DecisionBulkSize == 0 { log.Tracef("No decision_bulk_size value provided, using default value of %d", defaultDecisionBulkSize) c.DbConfig.DecisionBulkSize = defaultDecisionBulkSize @@ -79,10 +111,6 @@ func (c *Config) LoadDBConfig(inCli bool) error { c.DbConfig.DecisionBulkSize = maxDecisionBulkSize } - if !inCli && c.DbConfig.Type == "sqlite" && c.DbConfig.UseWal == nil { - log.Warning("You are using sqlite without WAL, this can have a performance impact. If you do not store the database in a network share, set db_config.use_wal to true. Set explicitly to false to disable this warning.") - } - return nil } diff --git a/pkg/csconfig/database_test.go b/pkg/csconfig/database_test.go index a946025799d..c7741baf038 100644 --- a/pkg/csconfig/database_test.go +++ b/pkg/csconfig/database_test.go @@ -30,9 +30,10 @@ func TestLoadDBConfig(t *testing.T) { }, }, expected: &DatabaseCfg{ - Type: "sqlite", - DbPath: "./testdata/test.db", - MaxOpenConns: ptr.Of(10), + Type: "sqlite", + DbPath: "./testdata/test.db", + MaxOpenConns: ptr.Of(10), + UseWal: ptr.Of(true), DecisionBulkSize: defaultDecisionBulkSize, }, }, diff --git a/pkg/types/getfstype.go b/pkg/types/getfstype.go new file mode 100644 index 00000000000..4a54fc9481e --- /dev/null +++ b/pkg/types/getfstype.go @@ -0,0 +1,112 @@ +//go:build !windows + +package types + +import ( + "fmt" + "syscall" +) + +// Generated with `man statfs | grep _MAGIC | awk '{split(tolower($1),a,"_"); print $2 ": \"" a[1] "\","}'` +// ext2/3/4 duplicates removed to just have ext4 +// XIAFS removed as well +var fsTypeMapping map[int]string = map[int]string{ + 0xadf5: "adfs", + 0xadff: "affs", + 0x5346414f: "afs", + 0x09041934: "anon", + 0x0187: "autofs", + 0x62646576: "bdevfs", + 0x42465331: "befs", + 0x1badface: "bfs", + 0x42494e4d: "binfmtfs", + 0xcafe4a11: "bpf", + 0x9123683e: "btrfs", + 0x73727279: "btrfs", + 0x27e0eb: "cgroup", + 0x63677270: "cgroup2", + 0xff534d42: "cifs", + 0x73757245: "coda", + 0x012ff7b7: "coh", + 0x28cd3d45: "cramfs", + 0x64626720: "debugfs", + 0x1373: "devfs", + 0x1cd1: "devpts", + 0xf15f: "ecryptfs", + 0xde5e81e4: "efivarfs", + 0x00414a53: "efs", + 0x137d: "ext", + 0xef51: "ext2", + 0xef53: "ext4", + 0xf2f52010: "f2fs", + 0x65735546: "fuse", + 0xbad1dea: "futexfs", + 0x4244: "hfs", + 0x00c0ffee: "hostfs", + 0xf995e849: "hpfs", + 0x958458f6: "hugetlbfs", + 0x9660: "isofs", + 0x72b6: "jffs2", + 0x3153464a: "jfs", + 0x137f: "minix", + 0x138f: "minix", + 0x2468: "minix2", + 0x2478: "minix2", + 0x4d5a: "minix3", + 0x19800202: "mqueue", + 0x4d44: "msdos", + 0x11307854: "mtd", + 0x564c: "ncp", + 0x6969: "nfs", + 0x3434: "nilfs", + 0x6e736673: "nsfs", + 0x5346544e: "ntfs", + 0x7461636f: "ocfs2", + 0x9fa1: "openprom", + 0x794c7630: "overlayfs", + 0x50495045: "pipefs", + 0x9fa0: "proc", + 0x6165676c: "pstorefs", + 0x002f: "qnx4", + 0x68191122: "qnx6", + 0x858458f6: "ramfs", + 0x52654973: "reiserfs", + 0x7275: "romfs", + 0x73636673: "securityfs", + 0xf97cff8c: "selinux", + 0x43415d53: "smack", + 0x517b: "smb", + 0xfe534d42: "smb2", + 0x534f434b: "sockfs", + 0x73717368: "squashfs", + 0x62656572: "sysfs", + 0x012ff7b6: "sysv2", + 0x012ff7b5: "sysv4", + 0x01021994: "tmpfs", + 0x74726163: "tracefs", + 0x15013346: "udf", + 0x00011954: "ufs", + 0x9fa2: "usbdevice", + 0x01021997: "v9fs", + 0xa501fcf5: "vxfs", + 0xabba1974: "xenfs", + 0x012ff7b4: "xenix", + 0x58465342: "xfs", +} + +func GetFSType(path string) (string, error) { + var buf syscall.Statfs_t + + err := syscall.Statfs(path, &buf) + + if err != nil { + return "", err + } + + fsType, ok := fsTypeMapping[int(buf.Type)] + if !ok { + return "", fmt.Errorf("unknown fstype %d", buf.Type) + } + + return fsType, nil +} diff --git a/pkg/types/getfstype_windows.go b/pkg/types/getfstype_windows.go new file mode 100644 index 00000000000..03d8fffd48d --- /dev/null +++ b/pkg/types/getfstype_windows.go @@ -0,0 +1,53 @@ +package types + +import ( + "path/filepath" + "syscall" + "unsafe" +) + +func GetFSType(path string) (string, error) { + kernel32, err := syscall.LoadLibrary("kernel32.dll") + if err != nil { + return "", err + } + defer syscall.FreeLibrary(kernel32) + + getVolumeInformation, err := syscall.GetProcAddress(kernel32, "GetVolumeInformationW") + if err != nil { + return "", err + } + + // Convert relative path to absolute path + absPath, err := filepath.Abs(path) + if err != nil { + return "", err + } + + // Get the root path of the volume + volumeRoot := filepath.VolumeName(absPath) + "\\" + + volumeRootPtr, _ := syscall.UTF16PtrFromString(volumeRoot) + + var ( + fileSystemNameBuffer = make([]uint16, 260) + nFileSystemNameSize = uint32(len(fileSystemNameBuffer)) + ) + + ret, _, err := syscall.SyscallN(getVolumeInformation, + uintptr(unsafe.Pointer(volumeRootPtr)), + 0, + 0, + 0, + 0, + 0, + uintptr(unsafe.Pointer(&fileSystemNameBuffer[0])), + uintptr(nFileSystemNameSize), + 0) + + if ret == 0 { + return "", err + } + + return syscall.UTF16ToString(fileSystemNameBuffer), nil +} diff --git a/pkg/types/utils.go b/pkg/types/utils.go index e42c36d8aeb..712d44ba12d 100644 --- a/pkg/types/utils.go +++ b/pkg/types/utils.go @@ -3,6 +3,7 @@ package types import ( "fmt" "path/filepath" + "strings" "time" log "github.com/sirupsen/logrus" @@ -67,3 +68,12 @@ func ConfigureLogger(clog *log.Logger) error { func UtcNow() time.Time { return time.Now().UTC() } + +func IsNetworkFS(path string) (bool, string, error) { + fsType, err := GetFSType(path) + if err != nil { + return false, "", err + } + fsType = strings.ToLower(fsType) + return fsType == "nfs" || fsType == "cifs" || fsType == "smb" || fsType == "smb2", fsType, nil +} From 8108e4156d1f564d7acc950e9b49488da5021c17 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Thu, 7 Mar 2024 14:25:25 +0100 Subject: [PATCH 047/581] CI: "make generate" target; use ent 0.12.5 (#2871) * CI: "make generate" target; pin tool versions * use ent 0.12.5 * fix make help * fix model generation target; re-run swagger --- Makefile | 4 ++ go.mod | 2 +- go.sum | 6 +- pkg/database/ent/alert_update.go | 16 ++++++ pkg/database/ent/bouncer_update.go | 48 ++++++++++++++++ pkg/database/ent/client.go | 11 +++- pkg/database/ent/configitem_update.go | 32 +++++++++++ pkg/database/ent/decision_update.go | 80 +++++++++++++++++++++++++++ pkg/database/ent/event_update.go | 32 +++++++++++ pkg/database/ent/generate.go | 2 +- pkg/database/ent/lock_update.go | 16 ++++++ pkg/database/ent/machine_update.go | 48 ++++++++++++++++ pkg/database/ent/meta_update.go | 32 +++++++++++ pkg/database/ent/runtime/runtime.go | 4 +- pkg/models/add_alerts_request.go | 5 ++ pkg/models/alert.go | 11 ++++ pkg/models/generate.go | 4 ++ pkg/models/get_alerts_response.go | 5 ++ pkg/models/get_decisions_response.go | 5 ++ pkg/models/meta.go | 5 ++ pkg/models/metrics.go | 10 ++++ 21 files changed, 367 insertions(+), 11 deletions(-) create mode 100644 pkg/models/generate.go diff --git a/Makefile b/Makefile index 5d656165fa8..3f271c54ca4 100644 --- a/Makefile +++ b/Makefile @@ -202,6 +202,10 @@ cscli: goversion ## Build cscli crowdsec: goversion ## Build crowdsec @$(MAKE) -C $(CROWDSEC_FOLDER) build $(MAKE_FLAGS) +.PHONY: generate +generate: ## Generate code for the database and APIs + $(GO) generate ./pkg/database/ent + $(GO) generate ./pkg/models .PHONY: testclean testclean: bats-clean ## Remove test artifacts diff --git a/go.mod b/go.mod index c2d6ca2c148..06e0275c82b 100644 --- a/go.mod +++ b/go.mod @@ -7,7 +7,7 @@ go 1.21 // toolchain go1.21.3 require ( - entgo.io/ent v0.12.4 + entgo.io/ent v0.12.5 github.com/AlecAivazis/survey/v2 v2.3.7 github.com/Masterminds/semver/v3 v3.2.1 github.com/Masterminds/sprig/v3 v3.2.3 diff --git a/go.sum b/go.sum index 7e860300089..35ab5813fca 100644 --- a/go.sum +++ b/go.sum @@ -2,8 +2,8 @@ ariga.io/atlas v0.14.1-0.20230918065911-83ad451a4935 h1:JnYs/y8RJ3+MiIUp+3RgyyeO ariga.io/atlas v0.14.1-0.20230918065911-83ad451a4935/go.mod h1:isZrlzJ5cpoCoKFoY9knZug7Lq4pP1cm8g3XciLZ0Pw= bitbucket.org/creachadair/stringset v0.0.9 h1:L4vld9nzPt90UZNrXjNelTshD74ps4P5NGs3Iq6yN3o= bitbucket.org/creachadair/stringset v0.0.9/go.mod h1:t+4WcQ4+PXTa8aQdNKe40ZP6iwesoMFWAxPGd3UGjyY= -entgo.io/ent v0.12.4 h1:LddPnAyxls/O7DTXZvUGDj0NZIdGSu317+aoNLJWbD8= -entgo.io/ent v0.12.4/go.mod h1:Y3JVAjtlIk8xVZYSn3t3mf8xlZIn5SAOXZQxD6kKI+Q= +entgo.io/ent v0.12.5 h1:KREM5E4CSoej4zeGa88Ou/gfturAnpUv0mzAjch1sj4= +entgo.io/ent v0.12.5/go.mod h1:Y3JVAjtlIk8xVZYSn3t3mf8xlZIn5SAOXZQxD6kKI+Q= github.com/AlecAivazis/survey/v2 v2.3.7 h1:6I/u8FvytdGsgonrYsVn2t8t4QiRnh6QSTqkkhIiSjQ= github.com/AlecAivazis/survey/v2 v2.3.7/go.mod h1:xUTIdE4KCOIjsBAE1JYsUPoCqYdZ1reCfTwbto0Fduo= github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 h1:UQHMgLO+TxOElx5B5HZ4hJQsoJ/PvUvKRhJHDQXO8P8= @@ -540,8 +540,6 @@ github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= github.com/oklog/run v1.0.0 h1:Ru7dDtJNOyC66gQ5dQmaCa0qIsAUFY3sFpK1Xk8igrw= github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA= -github.com/olekukonko/tablewriter v0.0.5 h1:P2Ga83D34wi1o9J6Wh1mRuqd4mF/x/lgBS7N7AbDhec= -github.com/olekukonko/tablewriter v0.0.5/go.mod h1:hPp6KlRPjbx+hW8ykQs1w3UBbZlj6HuIJcUGPhkA7kY= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= github.com/opencontainers/image-spec v1.0.3-0.20211202183452-c5a74bcca799 h1:rc3tiVYb5z54aKaDfakKn0dDjIyPpTtszkjuMzyt7ec= diff --git a/pkg/database/ent/alert_update.go b/pkg/database/ent/alert_update.go index 0e41ba18109..f8a4d108527 100644 --- a/pkg/database/ent/alert_update.go +++ b/pkg/database/ent/alert_update.go @@ -62,6 +62,14 @@ func (au *AlertUpdate) SetScenario(s string) *AlertUpdate { return au } +// SetNillableScenario sets the "scenario" field if the given value is not nil. +func (au *AlertUpdate) SetNillableScenario(s *string) *AlertUpdate { + if s != nil { + au.SetScenario(*s) + } + return au +} + // SetBucketId sets the "bucketId" field. func (au *AlertUpdate) SetBucketId(s string) *AlertUpdate { au.mutation.SetBucketId(s) @@ -1029,6 +1037,14 @@ func (auo *AlertUpdateOne) SetScenario(s string) *AlertUpdateOne { return auo } +// SetNillableScenario sets the "scenario" field if the given value is not nil. +func (auo *AlertUpdateOne) SetNillableScenario(s *string) *AlertUpdateOne { + if s != nil { + auo.SetScenario(*s) + } + return auo +} + // SetBucketId sets the "bucketId" field. func (auo *AlertUpdateOne) SetBucketId(s string) *AlertUpdateOne { auo.mutation.SetBucketId(s) diff --git a/pkg/database/ent/bouncer_update.go b/pkg/database/ent/bouncer_update.go index f7e71eb315e..b3f5e1a5540 100644 --- a/pkg/database/ent/bouncer_update.go +++ b/pkg/database/ent/bouncer_update.go @@ -58,18 +58,42 @@ func (bu *BouncerUpdate) SetName(s string) *BouncerUpdate { return bu } +// SetNillableName sets the "name" field if the given value is not nil. +func (bu *BouncerUpdate) SetNillableName(s *string) *BouncerUpdate { + if s != nil { + bu.SetName(*s) + } + return bu +} + // SetAPIKey sets the "api_key" field. func (bu *BouncerUpdate) SetAPIKey(s string) *BouncerUpdate { bu.mutation.SetAPIKey(s) return bu } +// SetNillableAPIKey sets the "api_key" field if the given value is not nil. +func (bu *BouncerUpdate) SetNillableAPIKey(s *string) *BouncerUpdate { + if s != nil { + bu.SetAPIKey(*s) + } + return bu +} + // SetRevoked sets the "revoked" field. func (bu *BouncerUpdate) SetRevoked(b bool) *BouncerUpdate { bu.mutation.SetRevoked(b) return bu } +// SetNillableRevoked sets the "revoked" field if the given value is not nil. +func (bu *BouncerUpdate) SetNillableRevoked(b *bool) *BouncerUpdate { + if b != nil { + bu.SetRevoked(*b) + } + return bu +} + // SetIPAddress sets the "ip_address" field. func (bu *BouncerUpdate) SetIPAddress(s string) *BouncerUpdate { bu.mutation.SetIPAddress(s) @@ -333,18 +357,42 @@ func (buo *BouncerUpdateOne) SetName(s string) *BouncerUpdateOne { return buo } +// SetNillableName sets the "name" field if the given value is not nil. +func (buo *BouncerUpdateOne) SetNillableName(s *string) *BouncerUpdateOne { + if s != nil { + buo.SetName(*s) + } + return buo +} + // SetAPIKey sets the "api_key" field. func (buo *BouncerUpdateOne) SetAPIKey(s string) *BouncerUpdateOne { buo.mutation.SetAPIKey(s) return buo } +// SetNillableAPIKey sets the "api_key" field if the given value is not nil. +func (buo *BouncerUpdateOne) SetNillableAPIKey(s *string) *BouncerUpdateOne { + if s != nil { + buo.SetAPIKey(*s) + } + return buo +} + // SetRevoked sets the "revoked" field. func (buo *BouncerUpdateOne) SetRevoked(b bool) *BouncerUpdateOne { buo.mutation.SetRevoked(b) return buo } +// SetNillableRevoked sets the "revoked" field if the given value is not nil. +func (buo *BouncerUpdateOne) SetNillableRevoked(b *bool) *BouncerUpdateOne { + if b != nil { + buo.SetRevoked(*b) + } + return buo +} + // SetIPAddress sets the "ip_address" field. func (buo *BouncerUpdateOne) SetIPAddress(s string) *BouncerUpdateOne { buo.mutation.SetIPAddress(s) diff --git a/pkg/database/ent/client.go b/pkg/database/ent/client.go index 006d52ef9ba..5318109ed42 100644 --- a/pkg/database/ent/client.go +++ b/pkg/database/ent/client.go @@ -50,9 +50,7 @@ type Client struct { // NewClient creates a new client configured with the given options. func NewClient(opts ...Option) *Client { - cfg := config{log: log.Println, hooks: &hooks{}, inters: &inters{}} - cfg.options(opts...) - client := &Client{config: cfg} + client := &Client{config: newConfig(opts...)} client.init() return client } @@ -87,6 +85,13 @@ type ( Option func(*config) ) +// newConfig creates a new config for the client. +func newConfig(opts ...Option) config { + cfg := config{log: log.Println, hooks: &hooks{}, inters: &inters{}} + cfg.options(opts...) + return cfg +} + // options applies the options on the config object. func (c *config) options(opts ...Option) { for _, opt := range opts { diff --git a/pkg/database/ent/configitem_update.go b/pkg/database/ent/configitem_update.go index 0db3a0b5233..11fb0755191 100644 --- a/pkg/database/ent/configitem_update.go +++ b/pkg/database/ent/configitem_update.go @@ -58,12 +58,28 @@ func (ciu *ConfigItemUpdate) SetName(s string) *ConfigItemUpdate { return ciu } +// SetNillableName sets the "name" field if the given value is not nil. +func (ciu *ConfigItemUpdate) SetNillableName(s *string) *ConfigItemUpdate { + if s != nil { + ciu.SetName(*s) + } + return ciu +} + // SetValue sets the "value" field. func (ciu *ConfigItemUpdate) SetValue(s string) *ConfigItemUpdate { ciu.mutation.SetValue(s) return ciu } +// SetNillableValue sets the "value" field if the given value is not nil. +func (ciu *ConfigItemUpdate) SetNillableValue(s *string) *ConfigItemUpdate { + if s != nil { + ciu.SetValue(*s) + } + return ciu +} + // Mutation returns the ConfigItemMutation object of the builder. func (ciu *ConfigItemUpdate) Mutation() *ConfigItemMutation { return ciu.mutation @@ -186,12 +202,28 @@ func (ciuo *ConfigItemUpdateOne) SetName(s string) *ConfigItemUpdateOne { return ciuo } +// SetNillableName sets the "name" field if the given value is not nil. +func (ciuo *ConfigItemUpdateOne) SetNillableName(s *string) *ConfigItemUpdateOne { + if s != nil { + ciuo.SetName(*s) + } + return ciuo +} + // SetValue sets the "value" field. func (ciuo *ConfigItemUpdateOne) SetValue(s string) *ConfigItemUpdateOne { ciuo.mutation.SetValue(s) return ciuo } +// SetNillableValue sets the "value" field if the given value is not nil. +func (ciuo *ConfigItemUpdateOne) SetNillableValue(s *string) *ConfigItemUpdateOne { + if s != nil { + ciuo.SetValue(*s) + } + return ciuo +} + // Mutation returns the ConfigItemMutation object of the builder. func (ciuo *ConfigItemUpdateOne) Mutation() *ConfigItemMutation { return ciuo.mutation diff --git a/pkg/database/ent/decision_update.go b/pkg/database/ent/decision_update.go index 1b62cc54c30..182457e9f63 100644 --- a/pkg/database/ent/decision_update.go +++ b/pkg/database/ent/decision_update.go @@ -79,12 +79,28 @@ func (du *DecisionUpdate) SetScenario(s string) *DecisionUpdate { return du } +// SetNillableScenario sets the "scenario" field if the given value is not nil. +func (du *DecisionUpdate) SetNillableScenario(s *string) *DecisionUpdate { + if s != nil { + du.SetScenario(*s) + } + return du +} + // SetType sets the "type" field. func (du *DecisionUpdate) SetType(s string) *DecisionUpdate { du.mutation.SetType(s) return du } +// SetNillableType sets the "type" field if the given value is not nil. +func (du *DecisionUpdate) SetNillableType(s *string) *DecisionUpdate { + if s != nil { + du.SetType(*s) + } + return du +} + // SetStartIP sets the "start_ip" field. func (du *DecisionUpdate) SetStartIP(i int64) *DecisionUpdate { du.mutation.ResetStartIP() @@ -226,18 +242,42 @@ func (du *DecisionUpdate) SetScope(s string) *DecisionUpdate { return du } +// SetNillableScope sets the "scope" field if the given value is not nil. +func (du *DecisionUpdate) SetNillableScope(s *string) *DecisionUpdate { + if s != nil { + du.SetScope(*s) + } + return du +} + // SetValue sets the "value" field. func (du *DecisionUpdate) SetValue(s string) *DecisionUpdate { du.mutation.SetValue(s) return du } +// SetNillableValue sets the "value" field if the given value is not nil. +func (du *DecisionUpdate) SetNillableValue(s *string) *DecisionUpdate { + if s != nil { + du.SetValue(*s) + } + return du +} + // SetOrigin sets the "origin" field. func (du *DecisionUpdate) SetOrigin(s string) *DecisionUpdate { du.mutation.SetOrigin(s) return du } +// SetNillableOrigin sets the "origin" field if the given value is not nil. +func (du *DecisionUpdate) SetNillableOrigin(s *string) *DecisionUpdate { + if s != nil { + du.SetOrigin(*s) + } + return du +} + // SetSimulated sets the "simulated" field. func (du *DecisionUpdate) SetSimulated(b bool) *DecisionUpdate { du.mutation.SetSimulated(b) @@ -557,12 +597,28 @@ func (duo *DecisionUpdateOne) SetScenario(s string) *DecisionUpdateOne { return duo } +// SetNillableScenario sets the "scenario" field if the given value is not nil. +func (duo *DecisionUpdateOne) SetNillableScenario(s *string) *DecisionUpdateOne { + if s != nil { + duo.SetScenario(*s) + } + return duo +} + // SetType sets the "type" field. func (duo *DecisionUpdateOne) SetType(s string) *DecisionUpdateOne { duo.mutation.SetType(s) return duo } +// SetNillableType sets the "type" field if the given value is not nil. +func (duo *DecisionUpdateOne) SetNillableType(s *string) *DecisionUpdateOne { + if s != nil { + duo.SetType(*s) + } + return duo +} + // SetStartIP sets the "start_ip" field. func (duo *DecisionUpdateOne) SetStartIP(i int64) *DecisionUpdateOne { duo.mutation.ResetStartIP() @@ -704,18 +760,42 @@ func (duo *DecisionUpdateOne) SetScope(s string) *DecisionUpdateOne { return duo } +// SetNillableScope sets the "scope" field if the given value is not nil. +func (duo *DecisionUpdateOne) SetNillableScope(s *string) *DecisionUpdateOne { + if s != nil { + duo.SetScope(*s) + } + return duo +} + // SetValue sets the "value" field. func (duo *DecisionUpdateOne) SetValue(s string) *DecisionUpdateOne { duo.mutation.SetValue(s) return duo } +// SetNillableValue sets the "value" field if the given value is not nil. +func (duo *DecisionUpdateOne) SetNillableValue(s *string) *DecisionUpdateOne { + if s != nil { + duo.SetValue(*s) + } + return duo +} + // SetOrigin sets the "origin" field. func (duo *DecisionUpdateOne) SetOrigin(s string) *DecisionUpdateOne { duo.mutation.SetOrigin(s) return duo } +// SetNillableOrigin sets the "origin" field if the given value is not nil. +func (duo *DecisionUpdateOne) SetNillableOrigin(s *string) *DecisionUpdateOne { + if s != nil { + duo.SetOrigin(*s) + } + return duo +} + // SetSimulated sets the "simulated" field. func (duo *DecisionUpdateOne) SetSimulated(b bool) *DecisionUpdateOne { duo.mutation.SetSimulated(b) diff --git a/pkg/database/ent/event_update.go b/pkg/database/ent/event_update.go index db748101519..a06178f79af 100644 --- a/pkg/database/ent/event_update.go +++ b/pkg/database/ent/event_update.go @@ -59,12 +59,28 @@ func (eu *EventUpdate) SetTime(t time.Time) *EventUpdate { return eu } +// SetNillableTime sets the "time" field if the given value is not nil. +func (eu *EventUpdate) SetNillableTime(t *time.Time) *EventUpdate { + if t != nil { + eu.SetTime(*t) + } + return eu +} + // SetSerialized sets the "serialized" field. func (eu *EventUpdate) SetSerialized(s string) *EventUpdate { eu.mutation.SetSerialized(s) return eu } +// SetNillableSerialized sets the "serialized" field if the given value is not nil. +func (eu *EventUpdate) SetNillableSerialized(s *string) *EventUpdate { + if s != nil { + eu.SetSerialized(*s) + } + return eu +} + // SetAlertEvents sets the "alert_events" field. func (eu *EventUpdate) SetAlertEvents(i int) *EventUpdate { eu.mutation.SetAlertEvents(i) @@ -274,12 +290,28 @@ func (euo *EventUpdateOne) SetTime(t time.Time) *EventUpdateOne { return euo } +// SetNillableTime sets the "time" field if the given value is not nil. +func (euo *EventUpdateOne) SetNillableTime(t *time.Time) *EventUpdateOne { + if t != nil { + euo.SetTime(*t) + } + return euo +} + // SetSerialized sets the "serialized" field. func (euo *EventUpdateOne) SetSerialized(s string) *EventUpdateOne { euo.mutation.SetSerialized(s) return euo } +// SetNillableSerialized sets the "serialized" field if the given value is not nil. +func (euo *EventUpdateOne) SetNillableSerialized(s *string) *EventUpdateOne { + if s != nil { + euo.SetSerialized(*s) + } + return euo +} + // SetAlertEvents sets the "alert_events" field. func (euo *EventUpdateOne) SetAlertEvents(i int) *EventUpdateOne { euo.mutation.SetAlertEvents(i) diff --git a/pkg/database/ent/generate.go b/pkg/database/ent/generate.go index 9f3a916c7a4..5f4b39eec90 100644 --- a/pkg/database/ent/generate.go +++ b/pkg/database/ent/generate.go @@ -1,4 +1,4 @@ package ent -//go:generate go run -mod=mod entgo.io/ent/cmd/ent generate ./schema +//go:generate go run -mod=mod entgo.io/ent/cmd/ent@v0.12.5 generate ./schema diff --git a/pkg/database/ent/lock_update.go b/pkg/database/ent/lock_update.go index f4deda6e3a8..dc61dfdfde1 100644 --- a/pkg/database/ent/lock_update.go +++ b/pkg/database/ent/lock_update.go @@ -34,6 +34,14 @@ func (lu *LockUpdate) SetName(s string) *LockUpdate { return lu } +// SetNillableName sets the "name" field if the given value is not nil. +func (lu *LockUpdate) SetNillableName(s *string) *LockUpdate { + if s != nil { + lu.SetName(*s) + } + return lu +} + // SetCreatedAt sets the "created_at" field. func (lu *LockUpdate) SetCreatedAt(t time.Time) *LockUpdate { lu.mutation.SetCreatedAt(t) @@ -121,6 +129,14 @@ func (luo *LockUpdateOne) SetName(s string) *LockUpdateOne { return luo } +// SetNillableName sets the "name" field if the given value is not nil. +func (luo *LockUpdateOne) SetNillableName(s *string) *LockUpdateOne { + if s != nil { + luo.SetName(*s) + } + return luo +} + // SetCreatedAt sets the "created_at" field. func (luo *LockUpdateOne) SetCreatedAt(t time.Time) *LockUpdateOne { luo.mutation.SetCreatedAt(t) diff --git a/pkg/database/ent/machine_update.go b/pkg/database/ent/machine_update.go index eb517081174..1f87ac04d6f 100644 --- a/pkg/database/ent/machine_update.go +++ b/pkg/database/ent/machine_update.go @@ -83,18 +83,42 @@ func (mu *MachineUpdate) SetMachineId(s string) *MachineUpdate { return mu } +// SetNillableMachineId sets the "machineId" field if the given value is not nil. +func (mu *MachineUpdate) SetNillableMachineId(s *string) *MachineUpdate { + if s != nil { + mu.SetMachineId(*s) + } + return mu +} + // SetPassword sets the "password" field. func (mu *MachineUpdate) SetPassword(s string) *MachineUpdate { mu.mutation.SetPassword(s) return mu } +// SetNillablePassword sets the "password" field if the given value is not nil. +func (mu *MachineUpdate) SetNillablePassword(s *string) *MachineUpdate { + if s != nil { + mu.SetPassword(*s) + } + return mu +} + // SetIpAddress sets the "ipAddress" field. func (mu *MachineUpdate) SetIpAddress(s string) *MachineUpdate { mu.mutation.SetIpAddress(s) return mu } +// SetNillableIpAddress sets the "ipAddress" field if the given value is not nil. +func (mu *MachineUpdate) SetNillableIpAddress(s *string) *MachineUpdate { + if s != nil { + mu.SetIpAddress(*s) + } + return mu +} + // SetScenarios sets the "scenarios" field. func (mu *MachineUpdate) SetScenarios(s string) *MachineUpdate { mu.mutation.SetScenarios(s) @@ -470,18 +494,42 @@ func (muo *MachineUpdateOne) SetMachineId(s string) *MachineUpdateOne { return muo } +// SetNillableMachineId sets the "machineId" field if the given value is not nil. +func (muo *MachineUpdateOne) SetNillableMachineId(s *string) *MachineUpdateOne { + if s != nil { + muo.SetMachineId(*s) + } + return muo +} + // SetPassword sets the "password" field. func (muo *MachineUpdateOne) SetPassword(s string) *MachineUpdateOne { muo.mutation.SetPassword(s) return muo } +// SetNillablePassword sets the "password" field if the given value is not nil. +func (muo *MachineUpdateOne) SetNillablePassword(s *string) *MachineUpdateOne { + if s != nil { + muo.SetPassword(*s) + } + return muo +} + // SetIpAddress sets the "ipAddress" field. func (muo *MachineUpdateOne) SetIpAddress(s string) *MachineUpdateOne { muo.mutation.SetIpAddress(s) return muo } +// SetNillableIpAddress sets the "ipAddress" field if the given value is not nil. +func (muo *MachineUpdateOne) SetNillableIpAddress(s *string) *MachineUpdateOne { + if s != nil { + muo.SetIpAddress(*s) + } + return muo +} + // SetScenarios sets the "scenarios" field. func (muo *MachineUpdateOne) SetScenarios(s string) *MachineUpdateOne { muo.mutation.SetScenarios(s) diff --git a/pkg/database/ent/meta_update.go b/pkg/database/ent/meta_update.go index 8071c4f0df5..a1379faa130 100644 --- a/pkg/database/ent/meta_update.go +++ b/pkg/database/ent/meta_update.go @@ -59,12 +59,28 @@ func (mu *MetaUpdate) SetKey(s string) *MetaUpdate { return mu } +// SetNillableKey sets the "key" field if the given value is not nil. +func (mu *MetaUpdate) SetNillableKey(s *string) *MetaUpdate { + if s != nil { + mu.SetKey(*s) + } + return mu +} + // SetValue sets the "value" field. func (mu *MetaUpdate) SetValue(s string) *MetaUpdate { mu.mutation.SetValue(s) return mu } +// SetNillableValue sets the "value" field if the given value is not nil. +func (mu *MetaUpdate) SetNillableValue(s *string) *MetaUpdate { + if s != nil { + mu.SetValue(*s) + } + return mu +} + // SetAlertMetas sets the "alert_metas" field. func (mu *MetaUpdate) SetAlertMetas(i int) *MetaUpdate { mu.mutation.SetAlertMetas(i) @@ -274,12 +290,28 @@ func (muo *MetaUpdateOne) SetKey(s string) *MetaUpdateOne { return muo } +// SetNillableKey sets the "key" field if the given value is not nil. +func (muo *MetaUpdateOne) SetNillableKey(s *string) *MetaUpdateOne { + if s != nil { + muo.SetKey(*s) + } + return muo +} + // SetValue sets the "value" field. func (muo *MetaUpdateOne) SetValue(s string) *MetaUpdateOne { muo.mutation.SetValue(s) return muo } +// SetNillableValue sets the "value" field if the given value is not nil. +func (muo *MetaUpdateOne) SetNillableValue(s *string) *MetaUpdateOne { + if s != nil { + muo.SetValue(*s) + } + return muo +} + // SetAlertMetas sets the "alert_metas" field. func (muo *MetaUpdateOne) SetAlertMetas(i int) *MetaUpdateOne { muo.mutation.SetAlertMetas(i) diff --git a/pkg/database/ent/runtime/runtime.go b/pkg/database/ent/runtime/runtime.go index 2a645f624d7..d10a2fb5459 100644 --- a/pkg/database/ent/runtime/runtime.go +++ b/pkg/database/ent/runtime/runtime.go @@ -5,6 +5,6 @@ package runtime // The schema-stitching logic is generated in github.com/crowdsecurity/crowdsec/pkg/database/ent/runtime.go const ( - Version = "v0.12.4" // Version of ent codegen. - Sum = "h1:LddPnAyxls/O7DTXZvUGDj0NZIdGSu317+aoNLJWbD8=" // Sum of ent codegen. + Version = "v0.12.5" // Version of ent codegen. + Sum = "h1:KREM5E4CSoej4zeGa88Ou/gfturAnpUv0mzAjch1sj4=" // Sum of ent codegen. ) diff --git a/pkg/models/add_alerts_request.go b/pkg/models/add_alerts_request.go index fd7246be066..a69934ef770 100644 --- a/pkg/models/add_alerts_request.go +++ b/pkg/models/add_alerts_request.go @@ -54,6 +54,11 @@ func (m AddAlertsRequest) ContextValidate(ctx context.Context, formats strfmt.Re for i := 0; i < len(m); i++ { if m[i] != nil { + + if swag.IsZero(m[i]) { // not required + return nil + } + if err := m[i].ContextValidate(ctx, formats); err != nil { if ve, ok := err.(*errors.Validation); ok { return ve.ValidateName(strconv.Itoa(i)) diff --git a/pkg/models/alert.go b/pkg/models/alert.go index ec769a1fbb1..895f5ad76e1 100644 --- a/pkg/models/alert.go +++ b/pkg/models/alert.go @@ -399,6 +399,11 @@ func (m *Alert) contextValidateDecisions(ctx context.Context, formats strfmt.Reg for i := 0; i < len(m.Decisions); i++ { if m.Decisions[i] != nil { + + if swag.IsZero(m.Decisions[i]) { // not required + return nil + } + if err := m.Decisions[i].ContextValidate(ctx, formats); err != nil { if ve, ok := err.(*errors.Validation); ok { return ve.ValidateName("decisions" + "." + strconv.Itoa(i)) @@ -419,6 +424,11 @@ func (m *Alert) contextValidateEvents(ctx context.Context, formats strfmt.Regist for i := 0; i < len(m.Events); i++ { if m.Events[i] != nil { + + if swag.IsZero(m.Events[i]) { // not required + return nil + } + if err := m.Events[i].ContextValidate(ctx, formats); err != nil { if ve, ok := err.(*errors.Validation); ok { return ve.ValidateName("events" + "." + strconv.Itoa(i)) @@ -469,6 +479,7 @@ func (m *Alert) contextValidateMeta(ctx context.Context, formats strfmt.Registry func (m *Alert) contextValidateSource(ctx context.Context, formats strfmt.Registry) error { if m.Source != nil { + if err := m.Source.ContextValidate(ctx, formats); err != nil { if ve, ok := err.(*errors.Validation); ok { return ve.ValidateName("source") diff --git a/pkg/models/generate.go b/pkg/models/generate.go new file mode 100644 index 00000000000..ccacc409ab5 --- /dev/null +++ b/pkg/models/generate.go @@ -0,0 +1,4 @@ +package models + +//go:generate go run -mod=mod github.com/go-swagger/go-swagger/cmd/swagger@v0.30.5 generate model --spec=./localapi_swagger.yaml --target=../ + diff --git a/pkg/models/get_alerts_response.go b/pkg/models/get_alerts_response.go index 41b9d5afdbd..d4ea36e02c5 100644 --- a/pkg/models/get_alerts_response.go +++ b/pkg/models/get_alerts_response.go @@ -54,6 +54,11 @@ func (m GetAlertsResponse) ContextValidate(ctx context.Context, formats strfmt.R for i := 0; i < len(m); i++ { if m[i] != nil { + + if swag.IsZero(m[i]) { // not required + return nil + } + if err := m[i].ContextValidate(ctx, formats); err != nil { if ve, ok := err.(*errors.Validation); ok { return ve.ValidateName(strconv.Itoa(i)) diff --git a/pkg/models/get_decisions_response.go b/pkg/models/get_decisions_response.go index b65b950fc58..19437dc9b38 100644 --- a/pkg/models/get_decisions_response.go +++ b/pkg/models/get_decisions_response.go @@ -54,6 +54,11 @@ func (m GetDecisionsResponse) ContextValidate(ctx context.Context, formats strfm for i := 0; i < len(m); i++ { if m[i] != nil { + + if swag.IsZero(m[i]) { // not required + return nil + } + if err := m[i].ContextValidate(ctx, formats); err != nil { if ve, ok := err.(*errors.Validation); ok { return ve.ValidateName(strconv.Itoa(i)) diff --git a/pkg/models/meta.go b/pkg/models/meta.go index 6ad20856d6a..df5ae3c6285 100644 --- a/pkg/models/meta.go +++ b/pkg/models/meta.go @@ -56,6 +56,11 @@ func (m Meta) ContextValidate(ctx context.Context, formats strfmt.Registry) erro for i := 0; i < len(m); i++ { if m[i] != nil { + + if swag.IsZero(m[i]) { // not required + return nil + } + if err := m[i].ContextValidate(ctx, formats); err != nil { if ve, ok := err.(*errors.Validation); ok { return ve.ValidateName(strconv.Itoa(i)) diff --git a/pkg/models/metrics.go b/pkg/models/metrics.go index 573678d1f84..7fbb91c63e4 100644 --- a/pkg/models/metrics.go +++ b/pkg/models/metrics.go @@ -141,6 +141,11 @@ func (m *Metrics) contextValidateBouncers(ctx context.Context, formats strfmt.Re for i := 0; i < len(m.Bouncers); i++ { if m.Bouncers[i] != nil { + + if swag.IsZero(m.Bouncers[i]) { // not required + return nil + } + if err := m.Bouncers[i].ContextValidate(ctx, formats); err != nil { if ve, ok := err.(*errors.Validation); ok { return ve.ValidateName("bouncers" + "." + strconv.Itoa(i)) @@ -161,6 +166,11 @@ func (m *Metrics) contextValidateMachines(ctx context.Context, formats strfmt.Re for i := 0; i < len(m.Machines); i++ { if m.Machines[i] != nil { + + if swag.IsZero(m.Machines[i]) { // not required + return nil + } + if err := m.Machines[i].ContextValidate(ctx, formats); err != nil { if ve, ok := err.(*errors.Validation); ok { return ve.ValidateName("machines" + "." + strconv.Itoa(i)) From 1eab943ec224e63d15fce31ef55961741f9b4077 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Thu, 7 Mar 2024 14:36:28 +0100 Subject: [PATCH 048/581] crowdsec: remove warning if prometheus port is taken during cold logs processing (#2857) i.e. remove a "Warning: port is already in use" because it's probably LAPI --- cmd/crowdsec/crowdsec.go | 2 +- cmd/crowdsec/main.go | 4 ++++ cmd/crowdsec/metrics.go | 5 ++++- 3 files changed, 9 insertions(+), 2 deletions(-) diff --git a/cmd/crowdsec/crowdsec.go b/cmd/crowdsec/crowdsec.go index 0d7d454edf2..37a12b9d3cb 100644 --- a/cmd/crowdsec/crowdsec.go +++ b/cmd/crowdsec/crowdsec.go @@ -280,7 +280,7 @@ func waitOnTomb() { case <-acquisTomb.Dead(): /*if it's acquisition dying it means that we were in "cat" mode. while shutting down, we need to give time for all buckets to process in flight data*/ - log.Warning("Acquisition is finished, shutting down") + log.Info("Acquisition is finished, shutting down") /* While it might make sense to want to shut-down parser/buckets/etc. as soon as acquisition is finished, we might have some pending buckets: buckets that overflowed, but whose LeakRoutine are still alive because they diff --git a/cmd/crowdsec/main.go b/cmd/crowdsec/main.go index 70f7d48dce4..8950790480a 100644 --- a/cmd/crowdsec/main.go +++ b/cmd/crowdsec/main.go @@ -75,6 +75,10 @@ type Flags struct { CPUProfile string } +func (f *Flags) haveTimeMachine() bool { + return f.OneShotDSN != "" +} + type labelsMap map[string]string func LoadBuckets(cConfig *csconfig.Config, hub *cwhub.Hub) error { diff --git a/cmd/crowdsec/metrics.go b/cmd/crowdsec/metrics.go index aed43db00c8..682968bb75a 100644 --- a/cmd/crowdsec/metrics.go +++ b/cmd/crowdsec/metrics.go @@ -196,6 +196,9 @@ func servePrometheus(config *csconfig.PrometheusCfg, dbClient *database.Client, log.Debugf("serving metrics after %s ms", time.Since(crowdsecT0)) if err := http.ListenAndServe(fmt.Sprintf("%s:%d", config.ListenAddr, config.ListenPort), nil); err != nil { - log.Warningf("prometheus: %s", err) + // in time machine, we most likely have the LAPI using the port + if !flags.haveTimeMachine() { + log.Warningf("prometheus: %s", err) + } } } From 6c5e8afde9ab8f751bd4c35c42e66dd114354279 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Fri, 8 Mar 2024 10:55:30 +0100 Subject: [PATCH 049/581] pkg/cwhub: download data assets to temporary files to avoid partial fetch (#2879) --- pkg/cwhub/dataset.go | 22 ++++++++++++++++++---- pkg/cwhub/dataset_test.go | 8 ++++---- pkg/cwhub/errors.go | 2 +- pkg/cwhub/hub.go | 3 ++- pkg/cwhub/sync.go | 15 ++++++++------- 5 files changed, 33 insertions(+), 17 deletions(-) diff --git a/pkg/cwhub/dataset.go b/pkg/cwhub/dataset.go index c900752b8b3..4612f357626 100644 --- a/pkg/cwhub/dataset.go +++ b/pkg/cwhub/dataset.go @@ -6,6 +6,7 @@ import ( "io" "net/http" "os" + "path/filepath" "time" "github.com/sirupsen/logrus" @@ -31,19 +32,32 @@ func downloadFile(url string, destPath string) error { return fmt.Errorf("bad http code %d for %s", resp.StatusCode, url) } - file, err := os.Create(destPath) + tmpFile, err := os.CreateTemp(filepath.Dir(destPath), filepath.Base(destPath)+".*.tmp") if err != nil { return err } - defer file.Close() + + tmpFileName := tmpFile.Name() + defer func() { + tmpFile.Close() + os.Remove(tmpFileName) + }() // avoid reading the whole file in memory - _, err = io.Copy(file, resp.Body) + _, err = io.Copy(tmpFile, resp.Body) if err != nil { return err } - if err = file.Sync(); err != nil { + if err = tmpFile.Sync(); err != nil { + return err + } + + if err = tmpFile.Close(); err != nil { + return err + } + + if err = os.Rename(tmpFileName, destPath); err != nil { return err } diff --git a/pkg/cwhub/dataset_test.go b/pkg/cwhub/dataset_test.go index f23f4878285..93d3e3bf01e 100644 --- a/pkg/cwhub/dataset_test.go +++ b/pkg/cwhub/dataset_test.go @@ -16,7 +16,7 @@ func TestDownloadFile(t *testing.T) { httpmock.Activate() defer httpmock.DeactivateAndReset() - //OK + // OK httpmock.RegisterResponder( "GET", "https://example.com/xx", @@ -36,15 +36,15 @@ func TestDownloadFile(t *testing.T) { assert.Equal(t, "example content oneoneone", string(content)) require.NoError(t, err) - //bad uri + // bad uri err = downloadFile("https://zz.com", examplePath) require.Error(t, err) - //404 + // 404 err = downloadFile("https://example.com/x", examplePath) require.Error(t, err) - //bad target + // bad target err = downloadFile("https://example.com/xx", "") require.Error(t, err) } diff --git a/pkg/cwhub/errors.go b/pkg/cwhub/errors.go index 789c2eced7b..f1e779b5476 100644 --- a/pkg/cwhub/errors.go +++ b/pkg/cwhub/errors.go @@ -6,7 +6,7 @@ import ( ) var ( - // ErrNilRemoteHub is returned when the remote hub configuration is not provided to the NewHub constructor. + // ErrNilRemoteHub is returned when trying to download with a local-only configuration. ErrNilRemoteHub = errors.New("remote hub configuration is not provided. Please report this issue to the developers") ) diff --git a/pkg/cwhub/hub.go b/pkg/cwhub/hub.go index 21a19bc4526..44e24020d03 100644 --- a/pkg/cwhub/hub.go +++ b/pkg/cwhub/hub.go @@ -3,6 +3,7 @@ package cwhub import ( "bytes" "encoding/json" + "errors" "fmt" "io" "os" @@ -34,7 +35,7 @@ func (h *Hub) GetDataDir() string { // All download operations (including updateIndex) return ErrNilRemoteHub if the remote configuration is not set. func NewHub(local *csconfig.LocalHubCfg, remote *RemoteHubCfg, updateIndex bool, logger *logrus.Logger) (*Hub, error) { if local == nil { - return nil, fmt.Errorf("no hub configuration found") + return nil, errors.New("no hub configuration found") } if logger == nil { diff --git a/pkg/cwhub/sync.go b/pkg/cwhub/sync.go index 8ce91dc2193..cb7bf37867c 100644 --- a/pkg/cwhub/sync.go +++ b/pkg/cwhub/sync.go @@ -77,9 +77,9 @@ func (h *Hub) getItemFileInfo(path string, logger *logrus.Logger) (*itemFileInfo if strings.HasPrefix(path, hubDir) { logger.Tracef("in hub dir") - //.../hub/parsers/s00-raw/crowdsec/skip-pretag.yaml - //.../hub/scenarios/crowdsec/ssh_bf.yaml - //.../hub/profiles/crowdsec/linux.yaml + // .../hub/parsers/s00-raw/crowdsec/skip-pretag.yaml + // .../hub/scenarios/crowdsec/ssh_bf.yaml + // .../hub/profiles/crowdsec/linux.yaml if len(subs) < 4 { return nil, fmt.Errorf("path is too short: %s (%d)", path, len(subs)) } @@ -93,13 +93,14 @@ func (h *Hub) getItemFileInfo(path string, logger *logrus.Logger) (*itemFileInfo } } else if strings.HasPrefix(path, installDir) { // we're in install /etc/crowdsec//... logger.Tracef("in install dir") + if len(subs) < 3 { return nil, fmt.Errorf("path is too short: %s (%d)", path, len(subs)) } - ///.../config/parser/stage/file.yaml - ///.../config/postoverflow/stage/file.yaml - ///.../config/scenarios/scenar.yaml - ///.../config/collections/linux.yaml //file is empty + // .../config/parser/stage/file.yaml + // .../config/postoverflow/stage/file.yaml + // .../config/scenarios/scenar.yaml + // .../config/collections/linux.yaml //file is empty ret = &itemFileInfo{ inhub: false, fname: subs[len(subs)-1], From 44ec3b9e01ca992b7806c2359f9a9d50636698ea Mon Sep 17 00:00:00 2001 From: blotus Date: Fri, 8 Mar 2024 13:56:59 +0100 Subject: [PATCH 050/581] file acquis: add mutex to protect access to the internal tail map (#2878) --- pkg/acquisition/modules/file/file.go | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/pkg/acquisition/modules/file/file.go b/pkg/acquisition/modules/file/file.go index 9ab418a8442..a0c22657445 100644 --- a/pkg/acquisition/modules/file/file.go +++ b/pkg/acquisition/modules/file/file.go @@ -11,6 +11,7 @@ import ( "regexp" "strconv" "strings" + "sync" "time" "github.com/fsnotify/fsnotify" @@ -52,6 +53,7 @@ type FileSource struct { logger *log.Entry files []string exclude_regexps []*regexp.Regexp + tailMapMutex *sync.RWMutex } func (f *FileSource) GetUuid() string { @@ -105,6 +107,7 @@ func (f *FileSource) Configure(yamlConfig []byte, logger *log.Entry) error { } f.watchedDirectories = make(map[string]bool) + f.tailMapMutex = &sync.RWMutex{} f.tails = make(map[string]bool) f.watcher, err = fsnotify.NewWatcher() @@ -350,7 +353,9 @@ func (f *FileSource) StreamingAcquisition(out chan types.Event, t *tomb.Tomb) er f.logger.Errorf("Could not start tailing file %s : %s", file, err) continue } + f.tailMapMutex.Lock() f.tails[file] = true + f.tailMapMutex.Unlock() t.Go(func() error { defer trace.CatchPanic("crowdsec/acquis/file/live/fsnotify") return f.tailFile(out, t, tail) @@ -412,11 +417,14 @@ func (f *FileSource) monitorNewFiles(out chan types.Event, t *tomb.Tomb) error { continue } + f.tailMapMutex.RLock() if f.tails[event.Name] { + f.tailMapMutex.RUnlock() //we already have a tail on it, do not start a new one logger.Debugf("Already tailing file %s, not creating a new tail", event.Name) break } + f.tailMapMutex.RUnlock() //cf. https://github.com/crowdsecurity/crowdsec/issues/1168 //do not rely on stat, reclose file immediately as it's opened by Tail fd, err := os.Open(event.Name) @@ -453,7 +461,9 @@ func (f *FileSource) monitorNewFiles(out chan types.Event, t *tomb.Tomb) error { logger.Errorf("Could not start tailing file %s : %s", event.Name, err) break } + f.tailMapMutex.Lock() f.tails[event.Name] = true + f.tailMapMutex.Unlock() t.Go(func() error { defer trace.CatchPanic("crowdsec/acquis/tailfile") return f.tailFile(out, t, tail) From a928b4d001937396f7fd1dea134348a5802af8c2 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Fri, 8 Mar 2024 14:22:23 +0100 Subject: [PATCH 051/581] bump dependencies for geoip db / lookup (#2880) --- go.mod | 4 ++-- go.sum | 10 ++++------ 2 files changed, 6 insertions(+), 8 deletions(-) diff --git a/go.mod b/go.mod index 06e0275c82b..a2b9c8618c6 100644 --- a/go.mod +++ b/go.mod @@ -63,8 +63,8 @@ require ( github.com/mattn/go-sqlite3 v1.14.16 github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826 github.com/nxadm/tail v1.4.8 - github.com/oschwald/geoip2-golang v1.4.0 - github.com/oschwald/maxminddb-golang v1.8.0 + github.com/oschwald/geoip2-golang v1.9.0 + github.com/oschwald/maxminddb-golang v1.12.0 github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 github.com/pkg/errors v0.9.1 github.com/prometheus/client_golang v1.16.0 diff --git a/go.sum b/go.sum index 35ab5813fca..08a454cf0f0 100644 --- a/go.sum +++ b/go.sum @@ -544,11 +544,10 @@ github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8 github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= github.com/opencontainers/image-spec v1.0.3-0.20211202183452-c5a74bcca799 h1:rc3tiVYb5z54aKaDfakKn0dDjIyPpTtszkjuMzyt7ec= github.com/opencontainers/image-spec v1.0.3-0.20211202183452-c5a74bcca799/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= -github.com/oschwald/geoip2-golang v1.4.0 h1:5RlrjCgRyIGDz/mBmPfnAF4h8k0IAcRv9PvrpOfz+Ug= -github.com/oschwald/geoip2-golang v1.4.0/go.mod h1:8QwxJvRImBH+Zl6Aa6MaIcs5YdlZSTKtzmPGzQqi9ng= -github.com/oschwald/maxminddb-golang v1.6.0/go.mod h1:DUJFucBg2cvqx42YmDa/+xHvb0elJtOm3o4aFQ/nb/w= -github.com/oschwald/maxminddb-golang v1.8.0 h1:Uh/DSnGoxsyp/KYbY1AuP0tYEwfs0sCph9p/UMXK/Hk= -github.com/oschwald/maxminddb-golang v1.8.0/go.mod h1:RXZtst0N6+FY/3qCNmZMBApR19cdQj43/NM9VkrNAis= +github.com/oschwald/geoip2-golang v1.9.0 h1:uvD3O6fXAXs+usU+UGExshpdP13GAqp4GBrzN7IgKZc= +github.com/oschwald/geoip2-golang v1.9.0/go.mod h1:BHK6TvDyATVQhKNbQBdrj9eAvuwOMi2zSFXizL3K81Y= +github.com/oschwald/maxminddb-golang v1.12.0 h1:9FnTOD0YOhP7DGxGsq4glzpGy5+w7pq50AS6wALUMYs= +github.com/oschwald/maxminddb-golang v1.12.0/go.mod h1:q0Nob5lTCqyQ8WT6FYgS1L7PXKVVbgiymefNwIjPzgY= github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 h1:onHthvaw9LFnH4t2DcNVpwGmV9E1BkGknEliJkfwQj0= github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58/go.mod h1:DXv8WO4yhMYhSNPKjeNKa5WY9YCIEBRbNzFFPJbWO6Y= github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= @@ -821,7 +820,6 @@ golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191220142924-d4481acd189f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191224085550-c709ea063b76/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= From e8ff13bc17916c8a1373438aa6e0fb68ac3d3a9f Mon Sep 17 00:00:00 2001 From: blotus Date: Fri, 8 Mar 2024 15:04:36 +0100 Subject: [PATCH 052/581] appsec: get the original UA from headers (#2809) --- pkg/appsec/request.go | 21 ++++++++++++++++----- 1 file changed, 16 insertions(+), 5 deletions(-) diff --git a/pkg/appsec/request.go b/pkg/appsec/request.go index 6d472e8afae..0479dea471e 100644 --- a/pkg/appsec/request.go +++ b/pkg/appsec/request.go @@ -17,11 +17,12 @@ import ( ) const ( - URIHeaderName = "X-Crowdsec-Appsec-Uri" - VerbHeaderName = "X-Crowdsec-Appsec-Verb" - HostHeaderName = "X-Crowdsec-Appsec-Host" - IPHeaderName = "X-Crowdsec-Appsec-Ip" - APIKeyHeaderName = "X-Crowdsec-Appsec-Api-Key" + URIHeaderName = "X-Crowdsec-Appsec-Uri" + VerbHeaderName = "X-Crowdsec-Appsec-Verb" + HostHeaderName = "X-Crowdsec-Appsec-Host" + IPHeaderName = "X-Crowdsec-Appsec-Ip" + APIKeyHeaderName = "X-Crowdsec-Appsec-Api-Key" + UserAgentHeaderName = "X-Crowdsec-Appsec-User-Agent" ) type ParsedRequest struct { @@ -311,11 +312,14 @@ func NewParsedRequestFromRequest(r *http.Request, logger *logrus.Entry) (ParsedR logger.Debugf("missing '%s' header", HostHeaderName) } + userAgent := r.Header.Get(UserAgentHeaderName) //This one is optional + // delete those headers before coraza process the request delete(r.Header, IPHeaderName) delete(r.Header, HostHeaderName) delete(r.Header, URIHeaderName) delete(r.Header, VerbHeaderName) + delete(r.Header, UserAgentHeaderName) originalHTTPRequest := r.Clone(r.Context()) originalHTTPRequest.Body = io.NopCloser(bytes.NewBuffer(body)) @@ -323,6 +327,13 @@ func NewParsedRequestFromRequest(r *http.Request, logger *logrus.Entry) (ParsedR originalHTTPRequest.RequestURI = clientURI originalHTTPRequest.Method = clientMethod originalHTTPRequest.Host = clientHost + if userAgent != "" { + originalHTTPRequest.Header.Set("User-Agent", userAgent) + r.Header.Set("User-Agent", userAgent) //Override the UA in the original request, as this is what will be used by the waf engine + } else { + //If we don't have a forwarded UA, delete the one that was set by the bouncer + originalHTTPRequest.Header.Del("User-Agent") + } parsedURL, err := url.Parse(clientURI) if err != nil { From 6daaab1789fb4b02a33768f7835e4fd83217b4a7 Mon Sep 17 00:00:00 2001 From: blotus Date: Mon, 11 Mar 2024 10:54:40 +0100 Subject: [PATCH 053/581] support both scope and scopes parameter in decisions filter (#2882) --- pkg/database/decisions.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/database/decisions.go b/pkg/database/decisions.go index c4ea0bb119e..3175a916ff5 100644 --- a/pkg/database/decisions.go +++ b/pkg/database/decisions.go @@ -49,7 +49,7 @@ func BuildDecisionRequestWithFilter(query *ent.DecisionQuery, filter map[string] if err != nil { return nil, errors.Wrapf(InvalidFilter, "invalid contains value : %s", err) } - case "scopes": + case "scopes", "scope": //Swagger mentions both of them, let's just support both to make sure we don't break anything scopes := strings.Split(value[0], ",") for i, scope := range scopes { switch strings.ToLower(scope) { From 49e0735b536f8805d08b943b85ae6961dcb3e0dc Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Mon, 11 Mar 2024 13:14:01 +0100 Subject: [PATCH 054/581] cscli tests + fix bouncer/machine prune (#2883) * func tests: "cscli config feature-flags" * func tests: "cscli bouncers list" * func tests + fix: "cscli bouncers/machines prune" * lint --- cmd/crowdsec-cli/bouncers.go | 2 +- cmd/crowdsec-cli/machines.go | 37 ++++++++++++++++++------------------ test/bats/01_cscli.bats | 21 ++++++++++++++++++++ test/bats/10_bouncers.bats | 18 +++++++++++++++++- test/bats/30_machines.bats | 14 ++++++++++++++ 5 files changed, 72 insertions(+), 20 deletions(-) diff --git a/cmd/crowdsec-cli/bouncers.go b/cmd/crowdsec-cli/bouncers.go index 35f4320c520..2e0adb9b95f 100644 --- a/cmd/crowdsec-cli/bouncers.go +++ b/cmd/crowdsec-cli/bouncers.go @@ -259,7 +259,7 @@ func (cli *cliBouncers) prune(duration time.Duration, force bool) error { } } - bouncers, err := cli.db.QueryBouncersLastPulltimeLT(time.Now().UTC().Add(duration)) + bouncers, err := cli.db.QueryBouncersLastPulltimeLT(time.Now().UTC().Add(-duration)) if err != nil { return fmt.Errorf("unable to query bouncers: %w", err) } diff --git a/cmd/crowdsec-cli/machines.go b/cmd/crowdsec-cli/machines.go index 7c9b9708c92..df225c06f7f 100644 --- a/cmd/crowdsec-cli/machines.go +++ b/cmd/crowdsec-cli/machines.go @@ -4,6 +4,7 @@ import ( saferand "crypto/rand" "encoding/csv" "encoding/json" + "errors" "fmt" "math/big" "os" @@ -134,7 +135,7 @@ Note: This command requires database direct access, so is intended to be run on } cli.db, err = database.NewClient(cli.cfg().DbConfig) if err != nil { - return fmt.Errorf("unable to create new database client: %s", err) + return fmt.Errorf("unable to create new database client: %w", err) } return nil @@ -155,7 +156,7 @@ func (cli *cliMachines) list() error { machines, err := cli.db.ListMachines() if err != nil { - return fmt.Errorf("unable to list machines: %s", err) + return fmt.Errorf("unable to list machines: %w", err) } switch cli.cfg().Cscli.Output { @@ -166,7 +167,7 @@ func (cli *cliMachines) list() error { enc.SetIndent("", " ") if err := enc.Encode(machines); err != nil { - return fmt.Errorf("failed to marshal") + return errors.New("failed to marshal") } return nil @@ -175,7 +176,7 @@ func (cli *cliMachines) list() error { err := csvwriter.Write([]string{"machine_id", "ip_address", "updated_at", "validated", "version", "auth_type", "last_heartbeat"}) if err != nil { - return fmt.Errorf("failed to write header: %s", err) + return fmt.Errorf("failed to write header: %w", err) } for _, m := range machines { @@ -257,12 +258,12 @@ func (cli *cliMachines) add(args []string, machinePassword string, dumpFile stri // create machineID if not specified by user if len(args) == 0 { if !autoAdd { - return fmt.Errorf("please specify a machine name to add, or use --auto") + return errors.New("please specify a machine name to add, or use --auto") } machineID, err = generateID("") if err != nil { - return fmt.Errorf("unable to generate machine id: %s", err) + return fmt.Errorf("unable to generate machine id: %w", err) } } else { machineID = args[0] @@ -281,20 +282,20 @@ func (cli *cliMachines) add(args []string, machinePassword string, dumpFile stri case os.IsNotExist(err) || force: dumpFile = credFile case err != nil: - return fmt.Errorf("unable to stat '%s': %s", credFile, err) + return fmt.Errorf("unable to stat '%s': %w", credFile, err) default: return fmt.Errorf(`credentials file '%s' already exists: please remove it, use "--force" or specify a different file with "-f" ("-f -" for standard output)`, credFile) } } if dumpFile == "" { - return fmt.Errorf(`please specify a file to dump credentials to, with -f ("-f -" for standard output)`) + return errors.New(`please specify a file to dump credentials to, with -f ("-f -" for standard output)`) } // create a password if it's not specified by user if machinePassword == "" && !interactive { if !autoAdd { - return fmt.Errorf("please specify a password with --password or use --auto") + return errors.New("please specify a password with --password or use --auto") } machinePassword = generatePassword(passwordLength) @@ -309,7 +310,7 @@ func (cli *cliMachines) add(args []string, machinePassword string, dumpFile stri _, err = cli.db.CreateMachine(&machineID, &password, "", true, force, types.PasswordAuthType) if err != nil { - return fmt.Errorf("unable to create machine: %s", err) + return fmt.Errorf("unable to create machine: %w", err) } fmt.Fprintf(os.Stderr, "Machine '%s' successfully added to the local API.\n", machineID) @@ -320,7 +321,7 @@ func (cli *cliMachines) add(args []string, machinePassword string, dumpFile stri } else if serverCfg != nil && serverCfg.ListenURI != "" { apiURL = "http://" + serverCfg.ListenURI } else { - return fmt.Errorf("unable to dump an api URL. Please provide it in your configuration or with the -u parameter") + return errors.New("unable to dump an api URL. Please provide it in your configuration or with the -u parameter") } } @@ -332,12 +333,12 @@ func (cli *cliMachines) add(args []string, machinePassword string, dumpFile stri apiConfigDump, err := yaml.Marshal(apiCfg) if err != nil { - return fmt.Errorf("unable to marshal api credentials: %s", err) + return fmt.Errorf("unable to marshal api credentials: %w", err) } if dumpFile != "" && dumpFile != "-" { if err = os.WriteFile(dumpFile, apiConfigDump, 0o600); err != nil { - return fmt.Errorf("write api credentials in '%s' failed: %s", dumpFile, err) + return fmt.Errorf("write api credentials in '%s' failed: %w", dumpFile, err) } fmt.Fprintf(os.Stderr, "API credentials written to '%s'.\n", dumpFile) @@ -413,13 +414,13 @@ func (cli *cliMachines) prune(duration time.Duration, notValidOnly bool, force b } if !notValidOnly { - if pending, err := cli.db.QueryLastValidatedHeartbeatLT(time.Now().UTC().Add(duration)); err == nil { + if pending, err := cli.db.QueryLastValidatedHeartbeatLT(time.Now().UTC().Add(-duration)); err == nil { machines = append(machines, pending...) } } if len(machines) == 0 { - fmt.Println("no machines to prune") + fmt.Println("No machines to prune.") return nil } @@ -438,7 +439,7 @@ func (cli *cliMachines) prune(duration time.Duration, notValidOnly bool, force b deleted, err := cli.db.BulkDeleteWatchers(machines) if err != nil { - return fmt.Errorf("unable to prune machines: %s", err) + return fmt.Errorf("unable to prune machines: %w", err) } fmt.Fprintf(os.Stderr, "successfully delete %d machines\n", deleted) @@ -479,7 +480,7 @@ cscli machines prune --not-validated-only --force`, func (cli *cliMachines) validate(machineID string) error { if err := cli.db.ValidateMachine(machineID); err != nil { - return fmt.Errorf("unable to validate machine '%s': %s", machineID, err) + return fmt.Errorf("unable to validate machine '%s': %w", machineID, err) } log.Infof("machine '%s' validated successfully", machineID) @@ -495,7 +496,7 @@ func (cli *cliMachines) newValidateCmd() *cobra.Command { Example: `cscli machines validate "machine_name"`, Args: cobra.ExactArgs(1), DisableAutoGenTag: true, - RunE: func(cmd *cobra.Command, args []string) error { + RunE: func(_ *cobra.Command, args []string) error { return cli.validate(args[0]) }, } diff --git a/test/bats/01_cscli.bats b/test/bats/01_cscli.bats index 60a65b98d58..03f0132ea63 100644 --- a/test/bats/01_cscli.bats +++ b/test/bats/01_cscli.bats @@ -358,3 +358,24 @@ teardown() { rune -0 cscli setup assert_output --partial 'cscli setup [command]' } + +@test "cscli config feature-flags" { + # disabled + rune -0 cscli config feature-flags + assert_line '✗ cscli_setup: Enable cscli setup command (service detection)' + + # enabled in feature.yaml + CONFIG_DIR=$(dirname "$CONFIG_YAML") + echo ' - cscli_setup' >> "$CONFIG_DIR"/feature.yaml + rune -0 cscli config feature-flags + assert_line '✓ cscli_setup: Enable cscli setup command (service detection)' + + # enabled in environment + # shellcheck disable=SC2031 + export CROWDSEC_FEATURE_CSCLI_SETUP="true" + rune -0 cscli config feature-flags + assert_line '✓ cscli_setup: Enable cscli setup command (service detection)' + + # there are no retired features + rune -0 cscli config feature-flags --retired +} diff --git a/test/bats/10_bouncers.bats b/test/bats/10_bouncers.bats index 3f6167ff6f7..1ef39ceb05e 100644 --- a/test/bats/10_bouncers.bats +++ b/test/bats/10_bouncers.bats @@ -25,7 +25,13 @@ teardown() { @test "there are 0 bouncers" { rune -0 cscli bouncers list -o json - assert_output "[]" + assert_json '[]' + + rune -0 cscli bouncers list -o human + assert_output --partial "Name" + + rune -0 cscli bouncers list -o raw + assert_output --partial 'name' } @test "we can add one bouncer, and delete it" { @@ -68,3 +74,13 @@ teardown() { rune -1 cscli bouncers delete ciTestBouncer rune -1 cscli bouncers delete foobarbaz } + +@test "cscli bouncers prune" { + rune -0 cscli bouncers prune + assert_output 'No bouncers to prune.' + rune -0 cscli bouncers add ciTestBouncer + + rune -0 cscli bouncers prune + assert_output 'No bouncers to prune.' +} + diff --git a/test/bats/30_machines.bats b/test/bats/30_machines.bats index f32c376e5b0..2a04cc9bc20 100644 --- a/test/bats/30_machines.bats +++ b/test/bats/30_machines.bats @@ -90,3 +90,17 @@ teardown() { rune -0 jq '. | length' <(output) assert_output 1 } + +@test "cscli machines prune" { + rune -0 cscli metrics + + rune -0 cscli machines prune + assert_output 'No machines to prune.' + + rune -0 cscli machines list -o json + rune -0 jq -r '.[-1].machineId' <(output) + rune -0 cscli machines delete "$output" + + rune -0 cscli machines prune + assert_output 'No machines to prune.' +} From 1a56a0e0b9fb5c56e4cee6d3267e758d2885bf25 Mon Sep 17 00:00:00 2001 From: Manuel Sabban Date: Tue, 12 Mar 2024 14:33:10 +0100 Subject: [PATCH 055/581] armhf fix for getfstype (#2884) * armhf fix for getfstype --- pkg/types/getfstype.go | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/pkg/types/getfstype.go b/pkg/types/getfstype.go index 4a54fc9481e..a7ee249cdf1 100644 --- a/pkg/types/getfstype.go +++ b/pkg/types/getfstype.go @@ -4,13 +4,13 @@ package types import ( "fmt" - "syscall" + "golang.org/x/sys/unix" ) // Generated with `man statfs | grep _MAGIC | awk '{split(tolower($1),a,"_"); print $2 ": \"" a[1] "\","}'` // ext2/3/4 duplicates removed to just have ext4 // XIAFS removed as well -var fsTypeMapping map[int]string = map[int]string{ +var fsTypeMapping map[int64]string = map[int64]string{ 0xadf5: "adfs", 0xadff: "affs", 0x5346414f: "afs", @@ -95,15 +95,15 @@ var fsTypeMapping map[int]string = map[int]string{ } func GetFSType(path string) (string, error) { - var buf syscall.Statfs_t + var buf unix.Statfs_t - err := syscall.Statfs(path, &buf) + err := unix.Statfs(path, &buf) if err != nil { return "", err } - fsType, ok := fsTypeMapping[int(buf.Type)] + fsType, ok := fsTypeMapping[buf.Type] if !ok { return "", fmt.Errorf("unknown fstype %d", buf.Type) } From bd785ede15dd745518ba7914aae39f5e53d17a10 Mon Sep 17 00:00:00 2001 From: Manuel Sabban Date: Tue, 12 Mar 2024 17:33:22 +0100 Subject: [PATCH 056/581] Fix armhf (#2886) * armhf compile fix --- pkg/types/getfstype.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/pkg/types/getfstype.go b/pkg/types/getfstype.go index a7ee249cdf1..67e018782c1 100644 --- a/pkg/types/getfstype.go +++ b/pkg/types/getfstype.go @@ -103,7 +103,8 @@ func GetFSType(path string) (string, error) { return "", err } - fsType, ok := fsTypeMapping[buf.Type] + fsType, ok := fsTypeMapping[int64(buf.Type)] //nolint:unconvert + if !ok { return "", fmt.Errorf("unknown fstype %d", buf.Type) } From b1c09f75127704fea25dacaad1bccb7cddd008f5 Mon Sep 17 00:00:00 2001 From: "Thibault \"bui\" Koechlin" Date: Wed, 13 Mar 2024 14:57:19 +0100 Subject: [PATCH 057/581] acquisition : take prometheus level into account (#2885) * properly take into account the aggregation level of prometheus metrics in acquisition --- cmd/crowdsec/crowdsec.go | 3 +- cmd/crowdsec/main.go | 2 +- cmd/crowdsec/metrics.go | 3 +- pkg/acquisition/acquisition.go | 30 +++++++++++--- pkg/acquisition/acquisition_test.go | 16 ++++---- .../configuration/configuration.go | 11 ++++++ pkg/acquisition/modules/appsec/appsec.go | 5 ++- .../modules/cloudwatch/cloudwatch.go | 22 ++++++++--- .../modules/cloudwatch/cloudwatch_test.go | 5 ++- pkg/acquisition/modules/docker/docker.go | 9 +++-- pkg/acquisition/modules/docker/docker_test.go | 9 +++-- pkg/acquisition/modules/file/file.go | 15 +++++-- pkg/acquisition/modules/file/file_test.go | 9 +++-- .../modules/journalctl/journalctl.go | 16 +++++--- .../modules/journalctl/journalctl_test.go | 15 +++---- pkg/acquisition/modules/kafka/kafka.go | 14 ++++--- pkg/acquisition/modules/kafka/kafka_test.go | 7 ++-- pkg/acquisition/modules/kinesis/kinesis.go | 16 +++++--- .../modules/kinesis/kinesis_test.go | 9 +++-- .../modules/kubernetesaudit/k8s_audit.go | 25 +++++++----- .../modules/kubernetesaudit/k8s_audit_test.go | 5 ++- pkg/acquisition/modules/loki/loki.go | 10 +++-- pkg/acquisition/modules/loki/loki_test.go | 9 +++-- pkg/acquisition/modules/s3/s3.go | 39 ++++++++++++------- pkg/acquisition/modules/s3/s3_test.go | 9 +++-- pkg/acquisition/modules/syslog/syslog.go | 25 +++++++----- pkg/acquisition/modules/syslog/syslog_test.go | 5 ++- .../modules/wineventlog/wineventlog.go | 2 +- .../modules/wineventlog/wineventlog_test.go | 7 ++-- .../wineventlog/wineventlog_windows.go | 18 +++++---- pkg/csconfig/config.go | 3 +- 31 files changed, 245 insertions(+), 128 deletions(-) diff --git a/cmd/crowdsec/crowdsec.go b/cmd/crowdsec/crowdsec.go index 37a12b9d3cb..f604af1dedd 100644 --- a/cmd/crowdsec/crowdsec.go +++ b/cmd/crowdsec/crowdsec.go @@ -14,6 +14,7 @@ import ( "github.com/crowdsecurity/go-cs-lib/trace" "github.com/crowdsecurity/crowdsec/pkg/acquisition" + "github.com/crowdsecurity/crowdsec/pkg/acquisition/configuration" "github.com/crowdsecurity/crowdsec/pkg/alertcontext" "github.com/crowdsecurity/crowdsec/pkg/appsec" "github.com/crowdsecurity/crowdsec/pkg/csconfig" @@ -147,7 +148,7 @@ func runCrowdsec(cConfig *csconfig.Config, parsers *parser.Parsers, hub *cwhub.H if cConfig.Prometheus != nil && cConfig.Prometheus.Enabled { aggregated := false - if cConfig.Prometheus.Level == "aggregated" { + if cConfig.Prometheus.Level == configuration.CFG_METRICS_AGGREGATE { aggregated = true } diff --git a/cmd/crowdsec/main.go b/cmd/crowdsec/main.go index 8950790480a..5f04e9b99a4 100644 --- a/cmd/crowdsec/main.go +++ b/cmd/crowdsec/main.go @@ -123,7 +123,7 @@ func LoadAcquisition(cConfig *csconfig.Config) ([]acquisition.DataSource, error) return nil, fmt.Errorf("failed to configure datasource for %s: %w", flags.OneShotDSN, err) } } else { - dataSources, err = acquisition.LoadAcquisitionFromFile(cConfig.Crowdsec) + dataSources, err = acquisition.LoadAcquisitionFromFile(cConfig.Crowdsec, cConfig.Prometheus) if err != nil { return nil, err } diff --git a/cmd/crowdsec/metrics.go b/cmd/crowdsec/metrics.go index 682968bb75a..d670051cea0 100644 --- a/cmd/crowdsec/metrics.go +++ b/cmd/crowdsec/metrics.go @@ -12,6 +12,7 @@ import ( "github.com/crowdsecurity/go-cs-lib/trace" "github.com/crowdsecurity/go-cs-lib/version" + "github.com/crowdsecurity/crowdsec/pkg/acquisition/configuration" v1 "github.com/crowdsecurity/crowdsec/pkg/apiserver/controllers/v1" "github.com/crowdsecurity/crowdsec/pkg/cache" "github.com/crowdsecurity/crowdsec/pkg/csconfig" @@ -161,7 +162,7 @@ func registerPrometheus(config *csconfig.PrometheusCfg) { // Registering prometheus // If in aggregated mode, do not register events associated with a source, to keep the cardinality low - if config.Level == "aggregated" { + if config.Level == configuration.CFG_METRICS_AGGREGATE { log.Infof("Loading aggregated prometheus collectors") prometheus.MustRegister(globalParserHits, globalParserHitsOk, globalParserHitsKo, globalCsInfo, globalParsingHistogram, globalPourHistogram, diff --git a/pkg/acquisition/acquisition.go b/pkg/acquisition/acquisition.go index 33602936369..677bf664e31 100644 --- a/pkg/acquisition/acquisition.go +++ b/pkg/acquisition/acquisition.go @@ -54,7 +54,7 @@ type DataSource interface { GetMetrics() []prometheus.Collector // Returns pointers to metrics that are managed by the module GetAggregMetrics() []prometheus.Collector // Returns pointers to metrics that are managed by the module (aggregated mode, limits cardinality) UnmarshalConfig([]byte) error // Decode and pre-validate the YAML datasource - anything that can be checked before runtime - Configure([]byte, *log.Entry) error // Complete the YAML datasource configuration and perform runtime checks. + Configure([]byte, *log.Entry, int) error // Complete the YAML datasource configuration and perform runtime checks. ConfigureByDSN(string, map[string]string, *log.Entry, string) error // Configure the datasource GetMode() string // Get the mode (TAIL, CAT or SERVER) GetName() string // Get the name of the module @@ -94,7 +94,7 @@ func GetDataSourceIface(dataSourceType string) DataSource { // if the configuration is not valid it returns an error. // If the datasource can't be run (eg. journalctl not available), it still returns an error which // can be checked for the appropriate action. -func DataSourceConfigure(commonConfig configuration.DataSourceCommonCfg) (*DataSource, error) { +func DataSourceConfigure(commonConfig configuration.DataSourceCommonCfg, metricsLevel int) (*DataSource, error) { // we dump it back to []byte, because we want to decode the yaml blob twice: // once to DataSourceCommonCfg, and then later to the dedicated type of the datasource yamlConfig, err := yaml.Marshal(commonConfig) @@ -122,7 +122,7 @@ func DataSourceConfigure(commonConfig configuration.DataSourceCommonCfg) (*DataS return nil, &DataSourceUnavailableError{Name: commonConfig.Source, Err: err} } /* configure the actual datasource */ - if err := dataSrc.Configure(yamlConfig, subLogger); err != nil { + if err := dataSrc.Configure(yamlConfig, subLogger, metricsLevel); err != nil { return nil, fmt.Errorf("failed to configure datasource %s: %w", commonConfig.Source, err) } @@ -180,10 +180,30 @@ func LoadAcquisitionFromDSN(dsn string, labels map[string]string, transformExpr return sources, nil } +func GetMetricsLevelFromPromCfg(prom *csconfig.PrometheusCfg) int { + if prom == nil { + return configuration.METRICS_FULL + + } + if !prom.Enabled { + return configuration.METRICS_NONE + } + if prom.Level == configuration.CFG_METRICS_AGGREGATE { + return configuration.METRICS_AGGREGATE + } + + if prom.Level == configuration.CFG_METRICS_FULL { + return configuration.METRICS_FULL + } + return configuration.METRICS_FULL + +} + // LoadAcquisitionFromFile unmarshals the configuration item and checks its availability -func LoadAcquisitionFromFile(config *csconfig.CrowdsecServiceCfg) ([]DataSource, error) { +func LoadAcquisitionFromFile(config *csconfig.CrowdsecServiceCfg, prom *csconfig.PrometheusCfg) ([]DataSource, error) { var sources []DataSource + metrics_level := GetMetricsLevelFromPromCfg(prom) for _, acquisFile := range config.AcquisitionFiles { log.Infof("loading acquisition file : %s", acquisFile) yamlFile, err := os.Open(acquisFile) @@ -225,7 +245,7 @@ func LoadAcquisitionFromFile(config *csconfig.CrowdsecServiceCfg) ([]DataSource, } uniqueId := uuid.NewString() sub.UniqueId = uniqueId - src, err := DataSourceConfigure(sub) + src, err := DataSourceConfigure(sub, metrics_level) if err != nil { var dserr *DataSourceUnavailableError if errors.As(err, &dserr) { diff --git a/pkg/acquisition/acquisition_test.go b/pkg/acquisition/acquisition_test.go index 44b3878e1d0..33e4948552a 100644 --- a/pkg/acquisition/acquisition_test.go +++ b/pkg/acquisition/acquisition_test.go @@ -35,7 +35,7 @@ func (f *MockSource) UnmarshalConfig(cfg []byte) error { return nil } -func (f *MockSource) Configure(cfg []byte, logger *log.Entry) error { +func (f *MockSource) Configure(cfg []byte, logger *log.Entry, metricsLevel int) error { f.logger = logger if err := f.UnmarshalConfig(cfg); err != nil { return err @@ -182,7 +182,7 @@ wowo: ajsajasjas t.Run(tc.TestName, func(t *testing.T) { common := configuration.DataSourceCommonCfg{} yaml.Unmarshal([]byte(tc.String), &common) - ds, err := DataSourceConfigure(common) + ds, err := DataSourceConfigure(common, configuration.METRICS_NONE) cstest.RequireErrorContains(t, err, tc.ExpectedError) if tc.ExpectedError != "" { return @@ -283,7 +283,7 @@ func TestLoadAcquisitionFromFile(t *testing.T) { for _, tc := range tests { tc := tc t.Run(tc.TestName, func(t *testing.T) { - dss, err := LoadAcquisitionFromFile(&tc.Config) + dss, err := LoadAcquisitionFromFile(&tc.Config, nil) cstest.RequireErrorContains(t, err, tc.ExpectedError) if tc.ExpectedError != "" { return @@ -305,7 +305,7 @@ type MockCat struct { logger *log.Entry } -func (f *MockCat) Configure(cfg []byte, logger *log.Entry) error { +func (f *MockCat) Configure(cfg []byte, logger *log.Entry, metricsLevel int) error { f.logger = logger if f.Mode == "" { f.Mode = configuration.CAT_MODE @@ -349,7 +349,7 @@ type MockTail struct { logger *log.Entry } -func (f *MockTail) Configure(cfg []byte, logger *log.Entry) error { +func (f *MockTail) Configure(cfg []byte, logger *log.Entry, metricsLevel int) error { f.logger = logger if f.Mode == "" { f.Mode = configuration.TAIL_MODE @@ -497,8 +497,10 @@ type MockSourceByDSN struct { logger *log.Entry //nolint: unused } -func (f *MockSourceByDSN) UnmarshalConfig(cfg []byte) error { return nil } -func (f *MockSourceByDSN) Configure(cfg []byte, logger *log.Entry) error { return nil } +func (f *MockSourceByDSN) UnmarshalConfig(cfg []byte) error { return nil } +func (f *MockSourceByDSN) Configure(cfg []byte, logger *log.Entry, metricsLevel int) error { + return nil +} func (f *MockSourceByDSN) GetMode() string { return f.Mode } func (f *MockSourceByDSN) OneShotAcquisition(chan types.Event, *tomb.Tomb) error { return nil } func (f *MockSourceByDSN) StreamingAcquisition(chan types.Event, *tomb.Tomb) error { return nil } diff --git a/pkg/acquisition/configuration/configuration.go b/pkg/acquisition/configuration/configuration.go index 5ec1a4ac4c3..3e27da1b9e6 100644 --- a/pkg/acquisition/configuration/configuration.go +++ b/pkg/acquisition/configuration/configuration.go @@ -19,3 +19,14 @@ type DataSourceCommonCfg struct { var TAIL_MODE = "tail" var CAT_MODE = "cat" var SERVER_MODE = "server" // No difference with tail, just a bit more verbose + +const ( + METRICS_NONE = iota + METRICS_AGGREGATE + METRICS_FULL +) + +const ( + CFG_METRICS_AGGREGATE = "aggregated" + CFG_METRICS_FULL = "full" +) diff --git a/pkg/acquisition/modules/appsec/appsec.go b/pkg/acquisition/modules/appsec/appsec.go index a3c8c7dd8ee..f97905406ce 100644 --- a/pkg/acquisition/modules/appsec/appsec.go +++ b/pkg/acquisition/modules/appsec/appsec.go @@ -49,6 +49,7 @@ type AppsecSourceConfig struct { // runtime structure of AppsecSourceConfig type AppsecSource struct { + metricsLevel int config AppsecSourceConfig logger *log.Entry mux *http.ServeMux @@ -149,13 +150,13 @@ func (w *AppsecSource) GetAggregMetrics() []prometheus.Collector { return []prometheus.Collector{AppsecReqCounter, AppsecBlockCounter, AppsecRuleHits, AppsecOutbandParsingHistogram, AppsecInbandParsingHistogram, AppsecGlobalParsingHistogram} } -func (w *AppsecSource) Configure(yamlConfig []byte, logger *log.Entry) error { +func (w *AppsecSource) Configure(yamlConfig []byte, logger *log.Entry, MetricsLevel int) error { err := w.UnmarshalConfig(yamlConfig) if err != nil { return errors.Wrap(err, "unable to parse appsec configuration") } w.logger = logger - + w.metricsLevel = MetricsLevel w.logger.Tracef("Appsec configuration: %+v", w.config) if w.config.AuthCacheDuration == nil { diff --git a/pkg/acquisition/modules/cloudwatch/cloudwatch.go b/pkg/acquisition/modules/cloudwatch/cloudwatch.go index 89887bef0b8..1ac1465d390 100644 --- a/pkg/acquisition/modules/cloudwatch/cloudwatch.go +++ b/pkg/acquisition/modules/cloudwatch/cloudwatch.go @@ -43,7 +43,8 @@ var linesRead = prometheus.NewCounterVec( // CloudwatchSource is the runtime instance keeping track of N streams within 1 cloudwatch group type CloudwatchSource struct { - Config CloudwatchSourceConfiguration + metricsLevel int + Config CloudwatchSourceConfiguration /*runtime stuff*/ logger *log.Entry t *tomb.Tomb @@ -152,11 +153,12 @@ func (cw *CloudwatchSource) UnmarshalConfig(yamlConfig []byte) error { return nil } -func (cw *CloudwatchSource) Configure(yamlConfig []byte, logger *log.Entry) error { +func (cw *CloudwatchSource) Configure(yamlConfig []byte, logger *log.Entry, MetricsLevel int) error { err := cw.UnmarshalConfig(yamlConfig) if err != nil { return err } + cw.metricsLevel = MetricsLevel cw.logger = logger.WithField("group", cw.Config.GroupName) @@ -385,7 +387,9 @@ func (cw *CloudwatchSource) LogStreamManager(in chan LogStreamTailConfig, outCha if !stream.t.Alive() { cw.logger.Debugf("stream %s already exists, but is dead", newStream.StreamName) cw.monitoredStreams = append(cw.monitoredStreams[:idx], cw.monitoredStreams[idx+1:]...) - openedStreams.With(prometheus.Labels{"group": newStream.GroupName}).Dec() + if cw.metricsLevel != configuration.METRICS_NONE { + openedStreams.With(prometheus.Labels{"group": newStream.GroupName}).Dec() + } break } shouldCreate = false @@ -395,7 +399,9 @@ func (cw *CloudwatchSource) LogStreamManager(in chan LogStreamTailConfig, outCha //let's start watching this stream if shouldCreate { - openedStreams.With(prometheus.Labels{"group": newStream.GroupName}).Inc() + if cw.metricsLevel != configuration.METRICS_NONE { + openedStreams.With(prometheus.Labels{"group": newStream.GroupName}).Inc() + } newStream.t = tomb.Tomb{} newStream.logger = cw.logger.WithFields(log.Fields{"stream": newStream.StreamName}) cw.logger.Debugf("starting tail of stream %s", newStream.StreamName) @@ -409,7 +415,9 @@ func (cw *CloudwatchSource) LogStreamManager(in chan LogStreamTailConfig, outCha for idx, stream := range cw.monitoredStreams { if !cw.monitoredStreams[idx].t.Alive() { cw.logger.Debugf("remove dead stream %s", stream.StreamName) - openedStreams.With(prometheus.Labels{"group": cw.monitoredStreams[idx].GroupName}).Dec() + if cw.metricsLevel != configuration.METRICS_NONE { + openedStreams.With(prometheus.Labels{"group": cw.monitoredStreams[idx].GroupName}).Dec() + } } else { newMonitoredStreams = append(newMonitoredStreams, stream) } @@ -485,7 +493,9 @@ func (cw *CloudwatchSource) TailLogStream(cfg *LogStreamTailConfig, outChan chan cfg.logger.Warningf("cwLogToEvent error, discarded event : %s", err) } else { cfg.logger.Debugf("pushing message : %s", evt.Line.Raw) - linesRead.With(prometheus.Labels{"group": cfg.GroupName, "stream": cfg.StreamName}).Inc() + if cw.metricsLevel != configuration.METRICS_NONE { + linesRead.With(prometheus.Labels{"group": cfg.GroupName, "stream": cfg.StreamName}).Inc() + } outChan <- evt } } diff --git a/pkg/acquisition/modules/cloudwatch/cloudwatch_test.go b/pkg/acquisition/modules/cloudwatch/cloudwatch_test.go index 5d64755e2e9..11842e61ff2 100644 --- a/pkg/acquisition/modules/cloudwatch/cloudwatch_test.go +++ b/pkg/acquisition/modules/cloudwatch/cloudwatch_test.go @@ -13,6 +13,7 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/cloudwatchlogs" + "github.com/crowdsecurity/crowdsec/pkg/acquisition/configuration" "github.com/crowdsecurity/crowdsec/pkg/types" log "github.com/sirupsen/logrus" "github.com/stretchr/testify/require" @@ -427,7 +428,7 @@ stream_name: test_stream`), dbgLogger.Logger.SetLevel(log.DebugLevel) dbgLogger.Infof("starting test") cw := CloudwatchSource{} - err := cw.Configure(tc.config, dbgLogger) + err := cw.Configure(tc.config, dbgLogger, configuration.METRICS_NONE) cstest.RequireErrorContains(t, err, tc.expectedCfgErr) if tc.expectedCfgErr != "" { @@ -559,7 +560,7 @@ stream_name: test_stream`), dbgLogger := log.New().WithField("test", tc.name) dbgLogger.Logger.SetLevel(log.DebugLevel) cw := CloudwatchSource{} - err := cw.Configure(tc.config, dbgLogger) + err := cw.Configure(tc.config, dbgLogger, configuration.METRICS_NONE) cstest.RequireErrorContains(t, err, tc.expectedCfgErr) if tc.expectedCfgErr != "" { return diff --git a/pkg/acquisition/modules/docker/docker.go b/pkg/acquisition/modules/docker/docker.go index 60f1100b35a..9f1febf2cb7 100644 --- a/pkg/acquisition/modules/docker/docker.go +++ b/pkg/acquisition/modules/docker/docker.go @@ -46,6 +46,7 @@ type DockerConfiguration struct { } type DockerSource struct { + metricsLevel int Config DockerConfiguration runningContainerState map[string]*ContainerConfig compiledContainerName []*regexp.Regexp @@ -128,9 +129,9 @@ func (d *DockerSource) UnmarshalConfig(yamlConfig []byte) error { return nil } -func (d *DockerSource) Configure(yamlConfig []byte, logger *log.Entry) error { +func (d *DockerSource) Configure(yamlConfig []byte, logger *log.Entry, MetricsLevel int) error { d.logger = logger - + d.metricsLevel = MetricsLevel err := d.UnmarshalConfig(yamlConfig) if err != nil { return err @@ -325,7 +326,9 @@ func (d *DockerSource) OneShotAcquisition(out chan types.Event, t *tomb.Tomb) er l.Src = containerConfig.Name l.Process = true l.Module = d.GetName() - linesRead.With(prometheus.Labels{"source": containerConfig.Name}).Inc() + if d.metricsLevel != configuration.METRICS_NONE { + linesRead.With(prometheus.Labels{"source": containerConfig.Name}).Inc() + } evt := types.Event{Line: l, Process: true, Type: types.LOG, ExpectMode: types.TIMEMACHINE} out <- evt d.logger.Debugf("Sent line to parsing: %+v", evt.Line.Raw) diff --git a/pkg/acquisition/modules/docker/docker_test.go b/pkg/acquisition/modules/docker/docker_test.go index c4d23168a37..6c010f895d3 100644 --- a/pkg/acquisition/modules/docker/docker_test.go +++ b/pkg/acquisition/modules/docker/docker_test.go @@ -13,6 +13,7 @@ import ( "github.com/crowdsecurity/go-cs-lib/cstest" + "github.com/crowdsecurity/crowdsec/pkg/acquisition/configuration" "github.com/crowdsecurity/crowdsec/pkg/types" dockerTypes "github.com/docker/docker/api/types" dockerContainer "github.com/docker/docker/api/types/container" @@ -60,7 +61,7 @@ container_name: for _, test := range tests { f := DockerSource{} - err := f.Configure([]byte(test.config), subLogger) + err := f.Configure([]byte(test.config), subLogger, configuration.METRICS_NONE) cstest.AssertErrorContains(t, err, test.expectedErr) } } @@ -162,7 +163,7 @@ container_name_regexp: for _, ts := range tests { var ( - logger *log.Logger + logger *log.Logger subLogger *log.Entry ) @@ -182,7 +183,7 @@ container_name_regexp: out := make(chan types.Event) dockerSource := DockerSource{} - err := dockerSource.Configure([]byte(ts.config), subLogger) + err := dockerSource.Configure([]byte(ts.config), subLogger, configuration.METRICS_NONE) if err != nil { t.Fatalf("Unexpected error : %s", err) } @@ -304,7 +305,7 @@ func TestOneShot(t *testing.T) { for _, ts := range tests { var ( subLogger *log.Entry - logger *log.Logger + logger *log.Logger ) if ts.expectedOutput != "" { diff --git a/pkg/acquisition/modules/file/file.go b/pkg/acquisition/modules/file/file.go index a0c22657445..efc89715296 100644 --- a/pkg/acquisition/modules/file/file.go +++ b/pkg/acquisition/modules/file/file.go @@ -46,6 +46,7 @@ type FileConfiguration struct { } type FileSource struct { + metricsLevel int config FileConfiguration watcher *fsnotify.Watcher watchedDirectories map[string]bool @@ -98,8 +99,9 @@ func (f *FileSource) UnmarshalConfig(yamlConfig []byte) error { return nil } -func (f *FileSource) Configure(yamlConfig []byte, logger *log.Entry) error { +func (f *FileSource) Configure(yamlConfig []byte, logger *log.Entry, MetricsLevel int) error { f.logger = logger + f.metricsLevel = MetricsLevel err := f.UnmarshalConfig(yamlConfig) if err != nil { @@ -517,12 +519,19 @@ func (f *FileSource) tailFile(out chan types.Event, t *tomb.Tomb, tail *tail.Tai if line.Text == "" { //skip empty lines continue } - linesRead.With(prometheus.Labels{"source": tail.Filename}).Inc() + if f.metricsLevel != configuration.METRICS_NONE { + linesRead.With(prometheus.Labels{"source": tail.Filename}).Inc() + } + src := tail.Filename + if f.metricsLevel == configuration.METRICS_AGGREGATE { + src = filepath.Base(tail.Filename) + } + l := types.Line{ Raw: trimLine(line.Text), Labels: f.config.Labels, Time: line.Time, - Src: tail.Filename, + Src: src, Process: true, Module: f.GetName(), } diff --git a/pkg/acquisition/modules/file/file_test.go b/pkg/acquisition/modules/file/file_test.go index 410beb4bc85..ad5fe8bfabd 100644 --- a/pkg/acquisition/modules/file/file_test.go +++ b/pkg/acquisition/modules/file/file_test.go @@ -15,6 +15,7 @@ import ( "github.com/crowdsecurity/go-cs-lib/cstest" + "github.com/crowdsecurity/crowdsec/pkg/acquisition/configuration" fileacquisition "github.com/crowdsecurity/crowdsec/pkg/acquisition/modules/file" "github.com/crowdsecurity/crowdsec/pkg/types" ) @@ -56,7 +57,7 @@ exclude_regexps: ["as[a-$d"]`, tc := tc t.Run(tc.name, func(t *testing.T) { f := fileacquisition.FileSource{} - err := f.Configure([]byte(tc.config), subLogger) + err := f.Configure([]byte(tc.config), subLogger, configuration.METRICS_NONE) cstest.RequireErrorContains(t, err, tc.expectedErr) }) } @@ -222,7 +223,7 @@ filename: test_files/test_delete.log`, tc.setup() } - err := f.Configure([]byte(tc.config), subLogger) + err := f.Configure([]byte(tc.config), subLogger, configuration.METRICS_NONE) cstest.RequireErrorContains(t, err, tc.expectedConfigErr) if tc.expectedConfigErr != "" { return @@ -384,7 +385,7 @@ force_inotify: true`, testPattern), tc.setup() } - err := f.Configure([]byte(tc.config), subLogger) + err := f.Configure([]byte(tc.config), subLogger, configuration.METRICS_NONE) require.NoError(t, err) if tc.afterConfigure != nil { @@ -455,7 +456,7 @@ exclude_regexps: ["\\.gz$"]` }) f := fileacquisition.FileSource{} - if err := f.Configure([]byte(config), subLogger); err != nil { + if err := f.Configure([]byte(config), subLogger, configuration.METRICS_NONE); err != nil { subLogger.Fatalf("unexpected error: %s", err) } diff --git a/pkg/acquisition/modules/journalctl/journalctl.go b/pkg/acquisition/modules/journalctl/journalctl.go index 55091a7b5eb..e8bb5a3edd5 100644 --- a/pkg/acquisition/modules/journalctl/journalctl.go +++ b/pkg/acquisition/modules/journalctl/journalctl.go @@ -26,10 +26,11 @@ type JournalCtlConfiguration struct { } type JournalCtlSource struct { - config JournalCtlConfiguration - logger *log.Entry - src string - args []string + metricsLevel int + config JournalCtlConfiguration + logger *log.Entry + src string + args []string } const journalctlCmd string = "journalctl" @@ -131,7 +132,9 @@ func (j *JournalCtlSource) runJournalCtl(out chan types.Event, t *tomb.Tomb) err l.Src = j.src l.Process = true l.Module = j.GetName() - linesRead.With(prometheus.Labels{"source": j.src}).Inc() + if j.metricsLevel != configuration.METRICS_NONE { + linesRead.With(prometheus.Labels{"source": j.src}).Inc() + } var evt types.Event if !j.config.UseTimeMachine { evt = types.Event{Line: l, Process: true, Type: types.LOG, ExpectMode: types.LIVE} @@ -194,8 +197,9 @@ func (j *JournalCtlSource) UnmarshalConfig(yamlConfig []byte) error { return nil } -func (j *JournalCtlSource) Configure(yamlConfig []byte, logger *log.Entry) error { +func (j *JournalCtlSource) Configure(yamlConfig []byte, logger *log.Entry, MetricsLevel int) error { j.logger = logger + j.metricsLevel = MetricsLevel err := j.UnmarshalConfig(yamlConfig) if err != nil { diff --git a/pkg/acquisition/modules/journalctl/journalctl_test.go b/pkg/acquisition/modules/journalctl/journalctl_test.go index a91fba31b34..9d1f1bb7e0e 100644 --- a/pkg/acquisition/modules/journalctl/journalctl_test.go +++ b/pkg/acquisition/modules/journalctl/journalctl_test.go @@ -10,6 +10,7 @@ import ( "github.com/crowdsecurity/go-cs-lib/cstest" + "github.com/crowdsecurity/crowdsec/pkg/acquisition/configuration" "github.com/crowdsecurity/crowdsec/pkg/types" log "github.com/sirupsen/logrus" "github.com/sirupsen/logrus/hooks/test" @@ -52,7 +53,7 @@ journalctl_filter: for _, test := range tests { f := JournalCtlSource{} - err := f.Configure([]byte(test.config), subLogger) + err := f.Configure([]byte(test.config), subLogger, configuration.METRICS_NONE) cstest.AssertErrorContains(t, err, test.expectedErr) } } @@ -144,9 +145,9 @@ journalctl_filter: } for _, ts := range tests { var ( - logger *log.Logger + logger *log.Logger subLogger *log.Entry - hook *test.Hook + hook *test.Hook ) if ts.expectedOutput != "" { @@ -165,7 +166,7 @@ journalctl_filter: out := make(chan types.Event, 100) j := JournalCtlSource{} - err := j.Configure([]byte(ts.config), subLogger) + err := j.Configure([]byte(ts.config), subLogger, configuration.METRICS_NONE) if err != nil { t.Fatalf("Unexpected error : %s", err) } @@ -218,9 +219,9 @@ journalctl_filter: } for _, ts := range tests { var ( - logger *log.Logger + logger *log.Logger subLogger *log.Entry - hook *test.Hook + hook *test.Hook ) if ts.expectedOutput != "" { @@ -239,7 +240,7 @@ journalctl_filter: out := make(chan types.Event) j := JournalCtlSource{} - err := j.Configure([]byte(ts.config), subLogger) + err := j.Configure([]byte(ts.config), subLogger, configuration.METRICS_NONE) if err != nil { t.Fatalf("Unexpected error : %s", err) } diff --git a/pkg/acquisition/modules/kafka/kafka.go b/pkg/acquisition/modules/kafka/kafka.go index 5b6e8fc0d41..f64bb1df306 100644 --- a/pkg/acquisition/modules/kafka/kafka.go +++ b/pkg/acquisition/modules/kafka/kafka.go @@ -52,9 +52,10 @@ type TLSConfig struct { } type KafkaSource struct { - Config KafkaConfiguration - logger *log.Entry - Reader *kafka.Reader + metricsLevel int + Config KafkaConfiguration + logger *log.Entry + Reader *kafka.Reader } func (k *KafkaSource) GetUuid() string { @@ -86,8 +87,9 @@ func (k *KafkaSource) UnmarshalConfig(yamlConfig []byte) error { return err } -func (k *KafkaSource) Configure(yamlConfig []byte, logger *log.Entry) error { +func (k *KafkaSource) Configure(yamlConfig []byte, logger *log.Entry, MetricsLevel int) error { k.logger = logger + k.metricsLevel = MetricsLevel k.logger.Debugf("start configuring %s source", dataSourceName) @@ -170,7 +172,9 @@ func (k *KafkaSource) ReadMessage(out chan types.Event) error { Module: k.GetName(), } k.logger.Tracef("line with message read from topic '%s': %+v", k.Config.Topic, l) - linesRead.With(prometheus.Labels{"topic": k.Config.Topic}).Inc() + if k.metricsLevel != configuration.METRICS_NONE { + linesRead.With(prometheus.Labels{"topic": k.Config.Topic}).Inc() + } var evt types.Event if !k.Config.UseTimeMachine { diff --git a/pkg/acquisition/modules/kafka/kafka_test.go b/pkg/acquisition/modules/kafka/kafka_test.go index 92ccd4c7a3f..6eda37a377e 100644 --- a/pkg/acquisition/modules/kafka/kafka_test.go +++ b/pkg/acquisition/modules/kafka/kafka_test.go @@ -15,6 +15,7 @@ import ( "github.com/crowdsecurity/go-cs-lib/cstest" + "github.com/crowdsecurity/crowdsec/pkg/acquisition/configuration" "github.com/crowdsecurity/crowdsec/pkg/types" ) @@ -75,7 +76,7 @@ group_id: crowdsec`, }) for _, test := range tests { k := KafkaSource{} - err := k.Configure([]byte(test.config), subLogger) + err := k.Configure([]byte(test.config), subLogger, configuration.METRICS_NONE) cstest.AssertErrorContains(t, err, test.expectedErr) } } @@ -169,7 +170,7 @@ func TestStreamingAcquisition(t *testing.T) { source: kafka brokers: - localhost:9092 -topic: crowdsecplaintext`), subLogger) +topic: crowdsecplaintext`), subLogger, configuration.METRICS_NONE) if err != nil { t.Fatalf("could not configure kafka source : %s", err) } @@ -245,7 +246,7 @@ tls: client_cert: ./testdata/kafkaClient.certificate.pem client_key: ./testdata/kafkaClient.key ca_cert: ./testdata/snakeoil-ca-1.crt - `), subLogger) + `), subLogger, configuration.METRICS_NONE) if err != nil { t.Fatalf("could not configure kafka source : %s", err) } diff --git a/pkg/acquisition/modules/kinesis/kinesis.go b/pkg/acquisition/modules/kinesis/kinesis.go index e2cc7996349..a86816244f6 100644 --- a/pkg/acquisition/modules/kinesis/kinesis.go +++ b/pkg/acquisition/modules/kinesis/kinesis.go @@ -38,6 +38,7 @@ type KinesisConfiguration struct { } type KinesisSource struct { + metricsLevel int Config KinesisConfiguration logger *log.Entry kClient *kinesis.Kinesis @@ -149,8 +150,9 @@ func (k *KinesisSource) UnmarshalConfig(yamlConfig []byte) error { return nil } -func (k *KinesisSource) Configure(yamlConfig []byte, logger *log.Entry) error { +func (k *KinesisSource) Configure(yamlConfig []byte, logger *log.Entry, MetricsLevel int) error { k.logger = logger + k.metricsLevel = MetricsLevel err := k.UnmarshalConfig(yamlConfig) if err != nil { @@ -283,11 +285,15 @@ func (k *KinesisSource) RegisterConsumer() (*kinesis.RegisterStreamConsumerOutpu func (k *KinesisSource) ParseAndPushRecords(records []*kinesis.Record, out chan types.Event, logger *log.Entry, shardId string) { for _, record := range records { if k.Config.StreamARN != "" { - linesReadShards.With(prometheus.Labels{"stream": k.Config.StreamARN, "shard": shardId}).Inc() - linesRead.With(prometheus.Labels{"stream": k.Config.StreamARN}).Inc() + if k.metricsLevel != configuration.METRICS_NONE { + linesReadShards.With(prometheus.Labels{"stream": k.Config.StreamARN, "shard": shardId}).Inc() + linesRead.With(prometheus.Labels{"stream": k.Config.StreamARN}).Inc() + } } else { - linesReadShards.With(prometheus.Labels{"stream": k.Config.StreamName, "shard": shardId}).Inc() - linesRead.With(prometheus.Labels{"stream": k.Config.StreamName}).Inc() + if k.metricsLevel != configuration.METRICS_NONE { + linesReadShards.With(prometheus.Labels{"stream": k.Config.StreamName, "shard": shardId}).Inc() + linesRead.With(prometheus.Labels{"stream": k.Config.StreamName}).Inc() + } } var data []CloudwatchSubscriptionLogEvent var err error diff --git a/pkg/acquisition/modules/kinesis/kinesis_test.go b/pkg/acquisition/modules/kinesis/kinesis_test.go index 662d6040e0f..a4e4f2f7378 100644 --- a/pkg/acquisition/modules/kinesis/kinesis_test.go +++ b/pkg/acquisition/modules/kinesis/kinesis_test.go @@ -17,6 +17,7 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/session" "github.com/aws/aws-sdk-go/service/kinesis" + "github.com/crowdsecurity/crowdsec/pkg/acquisition/configuration" "github.com/crowdsecurity/crowdsec/pkg/types" log "github.com/sirupsen/logrus" "github.com/stretchr/testify/assert" @@ -143,7 +144,7 @@ stream_arn: arn:aws:kinesis:eu-west-1:123456789012:stream/my-stream`, }) for _, test := range tests { f := KinesisSource{} - err := f.Configure([]byte(test.config), subLogger) + err := f.Configure([]byte(test.config), subLogger, configuration.METRICS_NONE) cstest.AssertErrorContains(t, err, test.expectedErr) } } @@ -172,7 +173,7 @@ stream_name: stream-1-shard`, config := fmt.Sprintf(test.config, endpoint) err := f.Configure([]byte(config), log.WithFields(log.Fields{ "type": "kinesis", - })) + }), configuration.METRICS_NONE) if err != nil { t.Fatalf("Error configuring source: %s", err) } @@ -218,7 +219,7 @@ stream_name: stream-2-shards`, config := fmt.Sprintf(test.config, endpoint) err := f.Configure([]byte(config), log.WithFields(log.Fields{ "type": "kinesis", - })) + }), configuration.METRICS_NONE) if err != nil { t.Fatalf("Error configuring source: %s", err) } @@ -267,7 +268,7 @@ from_subscription: true`, config := fmt.Sprintf(test.config, endpoint) err := f.Configure([]byte(config), log.WithFields(log.Fields{ "type": "kinesis", - })) + }), configuration.METRICS_NONE) if err != nil { t.Fatalf("Error configuring source: %s", err) } diff --git a/pkg/acquisition/modules/kubernetesaudit/k8s_audit.go b/pkg/acquisition/modules/kubernetesaudit/k8s_audit.go index ee44bd01ae2..7d27f9e0390 100644 --- a/pkg/acquisition/modules/kubernetesaudit/k8s_audit.go +++ b/pkg/acquisition/modules/kubernetesaudit/k8s_audit.go @@ -28,12 +28,13 @@ type KubernetesAuditConfiguration struct { } type KubernetesAuditSource struct { - config KubernetesAuditConfiguration - logger *log.Entry - mux *http.ServeMux - server *http.Server - outChan chan types.Event - addr string + metricsLevel int + config KubernetesAuditConfiguration + logger *log.Entry + mux *http.ServeMux + server *http.Server + outChan chan types.Event + addr string } var eventCount = prometheus.NewCounterVec( @@ -93,8 +94,9 @@ func (ka *KubernetesAuditSource) UnmarshalConfig(yamlConfig []byte) error { return nil } -func (ka *KubernetesAuditSource) Configure(config []byte, logger *log.Entry) error { +func (ka *KubernetesAuditSource) Configure(config []byte, logger *log.Entry, MetricsLevel int) error { ka.logger = logger + ka.metricsLevel = MetricsLevel err := ka.UnmarshalConfig(config) if err != nil { @@ -161,7 +163,10 @@ func (ka *KubernetesAuditSource) Dump() interface{} { } func (ka *KubernetesAuditSource) webhookHandler(w http.ResponseWriter, r *http.Request) { - requestCount.WithLabelValues(ka.addr).Inc() + + if ka.metricsLevel != configuration.METRICS_NONE { + requestCount.WithLabelValues(ka.addr).Inc() + } if r.Method != http.MethodPost { w.WriteHeader(http.StatusMethodNotAllowed) return @@ -185,7 +190,9 @@ func (ka *KubernetesAuditSource) webhookHandler(w http.ResponseWriter, r *http.R remoteIP := strings.Split(r.RemoteAddr, ":")[0] for _, auditEvent := range auditEvents.Items { - eventCount.WithLabelValues(ka.addr).Inc() + if ka.metricsLevel != configuration.METRICS_NONE { + eventCount.WithLabelValues(ka.addr).Inc() + } bytesEvent, err := json.Marshal(auditEvent) if err != nil { ka.logger.Errorf("Error marshaling audit event: %s", err) diff --git a/pkg/acquisition/modules/kubernetesaudit/k8s_audit_test.go b/pkg/acquisition/modules/kubernetesaudit/k8s_audit_test.go index c3502c95685..331822ecf5b 100644 --- a/pkg/acquisition/modules/kubernetesaudit/k8s_audit_test.go +++ b/pkg/acquisition/modules/kubernetesaudit/k8s_audit_test.go @@ -6,6 +6,7 @@ import ( "testing" "time" + "github.com/crowdsecurity/crowdsec/pkg/acquisition/configuration" "github.com/crowdsecurity/crowdsec/pkg/types" log "github.com/sirupsen/logrus" "github.com/stretchr/testify/assert" @@ -81,7 +82,7 @@ webhook_path: /k8s-audit`, require.NoError(t, err) - err = f.Configure([]byte(test.config), subLogger) + err = f.Configure([]byte(test.config), subLogger, configuration.METRICS_NONE) require.NoError(t, err) f.StreamingAcquisition(out, tb) @@ -253,7 +254,7 @@ webhook_path: /k8s-audit`, f := KubernetesAuditSource{} err := f.UnmarshalConfig([]byte(test.config)) require.NoError(t, err) - err = f.Configure([]byte(test.config), subLogger) + err = f.Configure([]byte(test.config), subLogger, configuration.METRICS_NONE) require.NoError(t, err) diff --git a/pkg/acquisition/modules/loki/loki.go b/pkg/acquisition/modules/loki/loki.go index 555deefe25a..3625c689516 100644 --- a/pkg/acquisition/modules/loki/loki.go +++ b/pkg/acquisition/modules/loki/loki.go @@ -57,7 +57,8 @@ type LokiConfiguration struct { } type LokiSource struct { - Config LokiConfiguration + metricsLevel int + Config LokiConfiguration Client *lokiclient.LokiClient @@ -118,9 +119,10 @@ func (l *LokiSource) UnmarshalConfig(yamlConfig []byte) error { return nil } -func (l *LokiSource) Configure(config []byte, logger *log.Entry) error { +func (l *LokiSource) Configure(config []byte, logger *log.Entry, MetricsLevel int) error { l.Config = LokiConfiguration{} l.logger = logger + l.metricsLevel = MetricsLevel err := l.UnmarshalConfig(config) if err != nil { return err @@ -302,7 +304,9 @@ func (l *LokiSource) readOneEntry(entry lokiclient.Entry, labels map[string]stri ll.Process = true ll.Module = l.GetName() - linesRead.With(prometheus.Labels{"source": l.Config.URL}).Inc() + if l.metricsLevel != configuration.METRICS_NONE { + linesRead.With(prometheus.Labels{"source": l.Config.URL}).Inc() + } expectMode := types.LIVE if l.Config.UseTimeMachine { expectMode = types.TIMEMACHINE diff --git a/pkg/acquisition/modules/loki/loki_test.go b/pkg/acquisition/modules/loki/loki_test.go index 8511d5445af..9ac3ccbd321 100644 --- a/pkg/acquisition/modules/loki/loki_test.go +++ b/pkg/acquisition/modules/loki/loki_test.go @@ -20,6 +20,7 @@ import ( "github.com/crowdsecurity/go-cs-lib/cstest" + "github.com/crowdsecurity/crowdsec/pkg/acquisition/configuration" "github.com/crowdsecurity/crowdsec/pkg/acquisition/modules/loki" "github.com/crowdsecurity/crowdsec/pkg/types" ) @@ -130,7 +131,7 @@ query: > for _, test := range tests { t.Run(test.testName, func(t *testing.T) { lokiSource := loki.LokiSource{} - err := lokiSource.Configure([]byte(test.config), subLogger) + err := lokiSource.Configure([]byte(test.config), subLogger, configuration.METRICS_NONE) cstest.AssertErrorContains(t, err, test.expectedErr) if test.password != "" { @@ -346,7 +347,7 @@ since: 1h "type": "loki", }) lokiSource := loki.LokiSource{} - err := lokiSource.Configure([]byte(ts.config), subLogger) + err := lokiSource.Configure([]byte(ts.config), subLogger, configuration.METRICS_NONE) if err != nil { t.Fatalf("Unexpected error : %s", err) @@ -436,7 +437,7 @@ query: > lokiTomb := tomb.Tomb{} lokiSource := loki.LokiSource{} - err := lokiSource.Configure([]byte(ts.config), subLogger) + err := lokiSource.Configure([]byte(ts.config), subLogger, configuration.METRICS_NONE) if err != nil { t.Fatalf("Unexpected error : %s", err) } @@ -514,7 +515,7 @@ query: > title := time.Now().String() lokiSource := loki.LokiSource{} - err := lokiSource.Configure([]byte(config), subLogger) + err := lokiSource.Configure([]byte(config), subLogger, configuration.METRICS_NONE) if err != nil { t.Fatalf("Unexpected error : %s", err) } diff --git a/pkg/acquisition/modules/s3/s3.go b/pkg/acquisition/modules/s3/s3.go index 651d40d3d50..98b2e685cd0 100644 --- a/pkg/acquisition/modules/s3/s3.go +++ b/pkg/acquisition/modules/s3/s3.go @@ -47,15 +47,16 @@ type S3Configuration struct { } type S3Source struct { - Config S3Configuration - logger *log.Entry - s3Client s3iface.S3API - sqsClient sqsiface.SQSAPI - readerChan chan S3Object - t *tomb.Tomb - out chan types.Event - ctx aws.Context - cancel context.CancelFunc + MetricsLevel int + Config S3Configuration + logger *log.Entry + s3Client s3iface.S3API + sqsClient sqsiface.SQSAPI + readerChan chan S3Object + t *tomb.Tomb + out chan types.Event + ctx aws.Context + cancel context.CancelFunc } type S3Object struct { @@ -345,7 +346,9 @@ func (s *S3Source) sqsPoll() error { logger.Tracef("SQS output: %v", out) logger.Debugf("Received %d messages from SQS", len(out.Messages)) for _, message := range out.Messages { - sqsMessagesReceived.WithLabelValues(s.Config.SQSName).Inc() + if s.MetricsLevel != configuration.METRICS_NONE { + sqsMessagesReceived.WithLabelValues(s.Config.SQSName).Inc() + } bucket, key, err := s.extractBucketAndPrefix(message.Body) if err != nil { logger.Errorf("Error while parsing SQS message: %s", err) @@ -426,14 +429,20 @@ func (s *S3Source) readFile(bucket string, key string) error { default: text := scanner.Text() logger.Tracef("Read line %s", text) - linesRead.WithLabelValues(bucket).Inc() + if s.MetricsLevel != configuration.METRICS_NONE { + linesRead.WithLabelValues(bucket).Inc() + } l := types.Line{} l.Raw = text l.Labels = s.Config.Labels l.Time = time.Now().UTC() l.Process = true l.Module = s.GetName() - l.Src = bucket + "/" + key + if s.MetricsLevel == configuration.METRICS_FULL { + l.Src = bucket + "/" + key + } else if s.MetricsLevel == configuration.METRICS_AGGREGATE { + l.Src = bucket + } var evt types.Event if !s.Config.UseTimeMachine { evt = types.Event{Line: l, Process: true, Type: types.LOG, ExpectMode: types.LIVE} @@ -446,7 +455,9 @@ func (s *S3Source) readFile(bucket string, key string) error { if err := scanner.Err(); err != nil { return fmt.Errorf("failed to read object %s/%s: %s", bucket, key, err) } - objectsRead.WithLabelValues(bucket).Inc() + if s.MetricsLevel != configuration.METRICS_NONE { + objectsRead.WithLabelValues(bucket).Inc() + } return nil } @@ -505,7 +516,7 @@ func (s *S3Source) UnmarshalConfig(yamlConfig []byte) error { return nil } -func (s *S3Source) Configure(yamlConfig []byte, logger *log.Entry) error { +func (s *S3Source) Configure(yamlConfig []byte, logger *log.Entry, metricsLevel int) error { err := s.UnmarshalConfig(yamlConfig) if err != nil { return err diff --git a/pkg/acquisition/modules/s3/s3_test.go b/pkg/acquisition/modules/s3/s3_test.go index 02423b1392c..e94521d187f 100644 --- a/pkg/acquisition/modules/s3/s3_test.go +++ b/pkg/acquisition/modules/s3/s3_test.go @@ -14,6 +14,7 @@ import ( "github.com/aws/aws-sdk-go/service/s3/s3iface" "github.com/aws/aws-sdk-go/service/sqs" "github.com/aws/aws-sdk-go/service/sqs/sqsiface" + "github.com/crowdsecurity/crowdsec/pkg/acquisition/configuration" "github.com/crowdsecurity/crowdsec/pkg/types" log "github.com/sirupsen/logrus" "github.com/stretchr/testify/assert" @@ -66,7 +67,7 @@ sqs_name: foobar for _, test := range tests { t.Run(test.name, func(t *testing.T) { f := S3Source{} - err := f.Configure([]byte(test.config), nil) + err := f.Configure([]byte(test.config), nil, configuration.METRICS_NONE) if err == nil { t.Fatalf("expected error, got none") } @@ -111,7 +112,7 @@ polling_method: list t.Run(test.name, func(t *testing.T) { f := S3Source{} logger := log.NewEntry(log.New()) - err := f.Configure([]byte(test.config), logger) + err := f.Configure([]byte(test.config), logger, configuration.METRICS_NONE) if err != nil { t.Fatalf("unexpected error: %s", err.Error()) } @@ -306,7 +307,7 @@ prefix: foo/ f := S3Source{} logger := log.NewEntry(log.New()) logger.Logger.SetLevel(log.TraceLevel) - err := f.Configure([]byte(test.config), logger) + err := f.Configure([]byte(test.config), logger, configuration.METRICS_NONE) if err != nil { t.Fatalf("unexpected error: %s", err.Error()) } @@ -381,7 +382,7 @@ sqs_name: test linesRead := 0 f := S3Source{} logger := log.NewEntry(log.New()) - err := f.Configure([]byte(test.config), logger) + err := f.Configure([]byte(test.config), logger, configuration.METRICS_NONE) if err != nil { t.Fatalf("unexpected error: %s", err.Error()) } diff --git a/pkg/acquisition/modules/syslog/syslog.go b/pkg/acquisition/modules/syslog/syslog.go index 8aed2836816..47940c3294c 100644 --- a/pkg/acquisition/modules/syslog/syslog.go +++ b/pkg/acquisition/modules/syslog/syslog.go @@ -29,10 +29,11 @@ type SyslogConfiguration struct { } type SyslogSource struct { - config SyslogConfiguration - logger *log.Entry - server *syslogserver.SyslogServer - serverTomb *tomb.Tomb + metricsLevel int + config SyslogConfiguration + logger *log.Entry + server *syslogserver.SyslogServer + serverTomb *tomb.Tomb } var linesReceived = prometheus.NewCounterVec( @@ -121,10 +122,10 @@ func (s *SyslogSource) UnmarshalConfig(yamlConfig []byte) error { return nil } -func (s *SyslogSource) Configure(yamlConfig []byte, logger *log.Entry) error { +func (s *SyslogSource) Configure(yamlConfig []byte, logger *log.Entry, MetricsLevel int) error { s.logger = logger s.logger.Infof("Starting syslog datasource configuration") - + s.metricsLevel = MetricsLevel err := s.UnmarshalConfig(yamlConfig) if err != nil { return err @@ -198,7 +199,9 @@ func (s *SyslogSource) handleSyslogMsg(out chan types.Event, t *tomb.Tomb, c cha logger := s.logger.WithField("client", syslogLine.Client) logger.Tracef("raw: %s", syslogLine) - linesReceived.With(prometheus.Labels{"source": syslogLine.Client}).Inc() + if s.metricsLevel != configuration.METRICS_NONE { + linesReceived.With(prometheus.Labels{"source": syslogLine.Client}).Inc() + } p := rfc3164.NewRFC3164Parser(rfc3164.WithCurrentYear()) err := p.Parse(syslogLine.Message) if err != nil { @@ -211,10 +214,14 @@ func (s *SyslogSource) handleSyslogMsg(out chan types.Event, t *tomb.Tomb, c cha continue } line = s.buildLogFromSyslog(p2.Timestamp, p2.Hostname, p2.Tag, p2.PID, p2.Message) - linesParsed.With(prometheus.Labels{"source": syslogLine.Client, "type": "rfc5424"}).Inc() + if s.metricsLevel != configuration.METRICS_NONE { + linesParsed.With(prometheus.Labels{"source": syslogLine.Client, "type": "rfc5424"}).Inc() + } } else { line = s.buildLogFromSyslog(p.Timestamp, p.Hostname, p.Tag, p.PID, p.Message) - linesParsed.With(prometheus.Labels{"source": syslogLine.Client, "type": "rfc3164"}).Inc() + if s.metricsLevel != configuration.METRICS_NONE { + linesParsed.With(prometheus.Labels{"source": syslogLine.Client, "type": "rfc3164"}).Inc() + } } line = strings.TrimSuffix(line, "\n") diff --git a/pkg/acquisition/modules/syslog/syslog_test.go b/pkg/acquisition/modules/syslog/syslog_test.go index 1d2ba3fb648..ba14c7db053 100644 --- a/pkg/acquisition/modules/syslog/syslog_test.go +++ b/pkg/acquisition/modules/syslog/syslog_test.go @@ -9,6 +9,7 @@ import ( "github.com/crowdsecurity/go-cs-lib/cstest" + "github.com/crowdsecurity/crowdsec/pkg/acquisition/configuration" "github.com/crowdsecurity/crowdsec/pkg/types" log "github.com/sirupsen/logrus" "gopkg.in/tomb.v2" @@ -56,7 +57,7 @@ listen_addr: 10.0.0`, }) for _, test := range tests { s := SyslogSource{} - err := s.Configure([]byte(test.config), subLogger) + err := s.Configure([]byte(test.config), subLogger, configuration.METRICS_NONE) cstest.AssertErrorContains(t, err, test.expectedErr) } } @@ -137,7 +138,7 @@ listen_addr: 127.0.0.1`, "type": "syslog", }) s := SyslogSource{} - err := s.Configure([]byte(ts.config), subLogger) + err := s.Configure([]byte(ts.config), subLogger, configuration.METRICS_NONE) if err != nil { t.Fatalf("could not configure syslog source : %s", err) } diff --git a/pkg/acquisition/modules/wineventlog/wineventlog.go b/pkg/acquisition/modules/wineventlog/wineventlog.go index f0eca5d13d7..44035d0a708 100644 --- a/pkg/acquisition/modules/wineventlog/wineventlog.go +++ b/pkg/acquisition/modules/wineventlog/wineventlog.go @@ -23,7 +23,7 @@ func (w *WinEventLogSource) UnmarshalConfig(yamlConfig []byte) error { return nil } -func (w *WinEventLogSource) Configure(yamlConfig []byte, logger *log.Entry) error { +func (w *WinEventLogSource) Configure(yamlConfig []byte, logger *log.Entry, metricsLevel int) error { return nil } diff --git a/pkg/acquisition/modules/wineventlog/wineventlog_test.go b/pkg/acquisition/modules/wineventlog/wineventlog_test.go index 053ba88b52d..c937ceba825 100644 --- a/pkg/acquisition/modules/wineventlog/wineventlog_test.go +++ b/pkg/acquisition/modules/wineventlog/wineventlog_test.go @@ -7,6 +7,7 @@ import ( "testing" "time" + "github.com/crowdsecurity/crowdsec/pkg/acquisition/configuration" "github.com/crowdsecurity/crowdsec/pkg/exprhelpers" "github.com/crowdsecurity/crowdsec/pkg/types" log "github.com/sirupsen/logrus" @@ -58,7 +59,7 @@ xpath_query: test`, }) for _, test := range tests { f := WinEventLogSource{} - err := f.Configure([]byte(test.config), subLogger) + err := f.Configure([]byte(test.config), subLogger, configuration.METRICS_NONE) assert.Contains(t, err.Error(), test.expectedErr) } } @@ -117,7 +118,7 @@ event_level: bla`, }) for _, test := range tests { f := WinEventLogSource{} - f.Configure([]byte(test.config), subLogger) + f.Configure([]byte(test.config), subLogger, configuration.METRICS_NONE) q, err := f.buildXpathQuery() if test.expectedErr != "" { if err == nil { @@ -194,7 +195,7 @@ event_ids: to := &tomb.Tomb{} c := make(chan types.Event) f := WinEventLogSource{} - f.Configure([]byte(test.config), subLogger) + f.Configure([]byte(test.config), subLogger, configuration.METRICS_NONE) f.StreamingAcquisition(c, to) time.Sleep(time.Second) lines := test.expectedLines diff --git a/pkg/acquisition/modules/wineventlog/wineventlog_windows.go b/pkg/acquisition/modules/wineventlog/wineventlog_windows.go index ee69dc35cdd..8adbf1e53c5 100644 --- a/pkg/acquisition/modules/wineventlog/wineventlog_windows.go +++ b/pkg/acquisition/modules/wineventlog/wineventlog_windows.go @@ -34,11 +34,12 @@ type WinEventLogConfiguration struct { } type WinEventLogSource struct { - config WinEventLogConfiguration - logger *log.Entry - evtConfig *winlog.SubscribeConfig - query string - name string + metricsLevel int + config WinEventLogConfiguration + logger *log.Entry + evtConfig *winlog.SubscribeConfig + query string + name string } type QueryList struct { @@ -188,7 +189,9 @@ func (w *WinEventLogSource) getEvents(out chan types.Event, t *tomb.Tomb) error continue } for _, event := range renderedEvents { - linesRead.With(prometheus.Labels{"source": w.name}).Inc() + if w.metricsLevel != configuration.METRICS_NONE { + linesRead.With(prometheus.Labels{"source": w.name}).Inc() + } l := types.Line{} l.Raw = event l.Module = w.GetName() @@ -270,8 +273,9 @@ func (w *WinEventLogSource) UnmarshalConfig(yamlConfig []byte) error { return nil } -func (w *WinEventLogSource) Configure(yamlConfig []byte, logger *log.Entry) error { +func (w *WinEventLogSource) Configure(yamlConfig []byte, logger *log.Entry, MetricsLevel int) error { w.logger = logger + w.metricsLevel = MetricsLevel err := w.UnmarshalConfig(yamlConfig) if err != nil { diff --git a/pkg/csconfig/config.go b/pkg/csconfig/config.go index 0c960803e04..e007e042bd5 100644 --- a/pkg/csconfig/config.go +++ b/pkg/csconfig/config.go @@ -12,6 +12,7 @@ import ( log "github.com/sirupsen/logrus" "gopkg.in/yaml.v3" + "github.com/crowdsecurity/crowdsec/pkg/acquisition/configuration" "github.com/crowdsecurity/go-cs-lib/csstring" "github.com/crowdsecurity/go-cs-lib/ptr" "github.com/crowdsecurity/go-cs-lib/yamlpatch" @@ -118,7 +119,7 @@ func NewDefaultConfig() *Config { } prometheus := PrometheusCfg{ Enabled: true, - Level: "full", + Level: configuration.CFG_METRICS_FULL, } configPaths := ConfigurationPaths{ ConfigDir: DefaultConfigPath("."), From 2a7e8383c88674481102fe87ac9b86b1e87dbcb5 Mon Sep 17 00:00:00 2001 From: "Thibault \"bui\" Koechlin" Date: Wed, 13 Mar 2024 17:20:06 +0100 Subject: [PATCH 058/581] fix #2889 (#2892) * fix #2889 --- pkg/appsec/query_utils.go | 78 +++++++++++++ pkg/appsec/query_utils_test.go | 207 +++++++++++++++++++++++++++++++++ pkg/appsec/request.go | 2 +- 3 files changed, 286 insertions(+), 1 deletion(-) create mode 100644 pkg/appsec/query_utils.go create mode 100644 pkg/appsec/query_utils_test.go diff --git a/pkg/appsec/query_utils.go b/pkg/appsec/query_utils.go new file mode 100644 index 00000000000..0c886e0ea51 --- /dev/null +++ b/pkg/appsec/query_utils.go @@ -0,0 +1,78 @@ +package appsec + +// This file is mostly stolen from net/url package, but with some modifications to allow less strict parsing of query strings + +import ( + "net/url" + "strings" +) + +// parseQuery and parseQuery are copied net/url package, but allow semicolon in values +func ParseQuery(query string) url.Values { + m := make(url.Values) + parseQuery(m, query) + return m +} + +func parseQuery(m url.Values, query string) { + for query != "" { + var key string + key, query, _ = strings.Cut(query, "&") + + if key == "" { + continue + } + key, value, _ := strings.Cut(key, "=") + //for now we'll just ignore the errors, but ideally we want to fire some "internal" rules when we see invalid query strings + key = unescape(key) + value = unescape(value) + m[key] = append(m[key], value) + } +} + +func hexDigitToByte(digit byte) (byte, bool) { + switch { + case digit >= '0' && digit <= '9': + return digit - '0', true + case digit >= 'a' && digit <= 'f': + return digit - 'a' + 10, true + case digit >= 'A' && digit <= 'F': + return digit - 'A' + 10, true + default: + return 0, false + } +} + +func unescape(input string) string { + ilen := len(input) + res := strings.Builder{} + res.Grow(ilen) + for i := 0; i < ilen; i++ { + ci := input[i] + if ci == '+' { + res.WriteByte(' ') + continue + } + if ci == '%' { + if i+2 >= ilen { + res.WriteByte(ci) + continue + } + hi, ok := hexDigitToByte(input[i+1]) + if !ok { + res.WriteByte(ci) + continue + } + lo, ok := hexDigitToByte(input[i+2]) + if !ok { + res.WriteByte(ci) + continue + } + res.WriteByte(hi<<4 | lo) + i += 2 + continue + } + res.WriteByte(ci) + } + return res.String() +} diff --git a/pkg/appsec/query_utils_test.go b/pkg/appsec/query_utils_test.go new file mode 100644 index 00000000000..2ad7927968d --- /dev/null +++ b/pkg/appsec/query_utils_test.go @@ -0,0 +1,207 @@ +package appsec + +import ( + "net/url" + "reflect" + "testing" +) + +func TestParseQuery(t *testing.T) { + tests := []struct { + name string + query string + expected url.Values + }{ + { + name: "Simple query", + query: "foo=bar", + expected: url.Values{ + "foo": []string{"bar"}, + }, + }, + { + name: "Multiple values", + query: "foo=bar&foo=baz", + expected: url.Values{ + "foo": []string{"bar", "baz"}, + }, + }, + { + name: "Empty value", + query: "foo=", + expected: url.Values{ + "foo": []string{""}, + }, + }, + { + name: "Empty key", + query: "=bar", + expected: url.Values{ + "": []string{"bar"}, + }, + }, + { + name: "Empty query", + query: "", + expected: url.Values{}, + }, + { + name: "Multiple keys", + query: "foo=bar&baz=qux", + expected: url.Values{ + "foo": []string{"bar"}, + "baz": []string{"qux"}, + }, + }, + { + name: "Multiple keys with empty value", + query: "foo=bar&baz=qux&quux=", + expected: url.Values{ + "foo": []string{"bar"}, + "baz": []string{"qux"}, + "quux": []string{""}, + }, + }, + { + name: "Multiple keys with empty value and empty key", + query: "foo=bar&baz=qux&quux=&=quuz", + expected: url.Values{ + "foo": []string{"bar"}, + "baz": []string{"qux"}, + "quux": []string{""}, + "": []string{"quuz"}, + }, + }, + { + name: "Multiple keys with empty value and empty key and multiple values", + query: "foo=bar&baz=qux&quux=&=quuz&foo=baz", + expected: url.Values{ + "foo": []string{"bar", "baz"}, + "baz": []string{"qux"}, + "quux": []string{""}, + "": []string{"quuz"}, + }, + }, + { + name: "Multiple keys with empty value and empty key and multiple values and escaped characters", + query: "foo=bar&baz=qux&quux=&=quuz&foo=baz&foo=bar%20baz", + expected: url.Values{ + "foo": []string{"bar", "baz", "bar baz"}, + "baz": []string{"qux"}, + "quux": []string{""}, + "": []string{"quuz"}, + }, + }, + { + name: "Multiple keys with empty value and empty key and multiple values and escaped characters and semicolon", + query: "foo=bar&baz=qux&quux=&=quuz&foo=baz&foo=bar%20baz&foo=bar%3Bbaz", + expected: url.Values{ + "foo": []string{"bar", "baz", "bar baz", "bar;baz"}, + "baz": []string{"qux"}, + "quux": []string{""}, + "": []string{"quuz"}, + }, + }, + { + name: "Multiple keys with empty value and empty key and multiple values and escaped characters and semicolon and ampersand", + query: "foo=bar&baz=qux&quux=&=quuz&foo=baz&foo=bar%20baz&foo=bar%3Bbaz&foo=bar%26baz", + expected: url.Values{ + "foo": []string{"bar", "baz", "bar baz", "bar;baz", "bar&baz"}, + "baz": []string{"qux"}, + "quux": []string{""}, + "": []string{"quuz"}, + }, + }, + { + name: "Multiple keys with empty value and empty key and multiple values and escaped characters and semicolon and ampersand and equals", + query: "foo=bar&baz=qux&quux=&=quuz&foo=baz&foo=bar%20baz&foo=bar%3Bbaz&foo=bar%26baz&foo=bar%3Dbaz", + expected: url.Values{ + "foo": []string{"bar", "baz", "bar baz", "bar;baz", "bar&baz", "bar=baz"}, + "baz": []string{"qux"}, + "quux": []string{""}, + "": []string{"quuz"}, + }, + }, + { + name: "Multiple keys with empty value and empty key and multiple values and escaped characters and semicolon and ampersand and equals and question mark", + query: "foo=bar&baz=qux&quux=&=quuz&foo=baz&foo=bar%20baz&foo=bar%3Bbaz&foo=bar%26baz&foo=bar%3Dbaz&foo=bar%3Fbaz", + expected: url.Values{ + "foo": []string{"bar", "baz", "bar baz", "bar;baz", "bar&baz", "bar=baz", "bar?baz"}, + "baz": []string{"qux"}, + "quux": []string{""}, + "": []string{"quuz"}, + }, + }, + { + name: "keys with escaped characters", + query: "foo=ba;r&baz=qu;;x&quux=x\\&ww&xx=qu?uz&", + expected: url.Values{ + "foo": []string{"ba;r"}, + "baz": []string{"qu;;x"}, + "quux": []string{"x\\"}, + "ww": []string{""}, + "xx": []string{"qu?uz"}, + }, + }, + { + name: "hexadecimal characters", + query: "foo=bar%20baz", + expected: url.Values{ + "foo": []string{"bar baz"}, + }, + }, + { + name: "hexadecimal characters upper and lower case", + query: "foo=Ba%42%42&bar=w%2f%2F", + expected: url.Values{ + "foo": []string{"BaBB"}, + "bar": []string{"w//"}, + }, + }, + { + name: "hexadecimal characters with invalid characters", + query: "foo=bar%20baz%2", + expected: url.Values{ + "foo": []string{"bar baz%2"}, + }, + }, + { + name: "hexadecimal characters with invalid hex characters", + query: "foo=bar%xx", + expected: url.Values{ + "foo": []string{"bar%xx"}, + }, + }, + { + name: "hexadecimal characters with invalid 2nd hex character", + query: "foo=bar%2x", + expected: url.Values{ + "foo": []string{"bar%2x"}, + }, + }, + { + name: "url +", + query: "foo=bar+x", + expected: url.Values{ + "foo": []string{"bar x"}, + }, + }, + { + name: "url &&", + query: "foo=bar&&lol=bur", + expected: url.Values{ + "foo": []string{"bar"}, + "lol": []string{"bur"}, + }, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + res := ParseQuery(test.query) + if !reflect.DeepEqual(res, test.expected) { + t.Fatalf("unexpected result: %v", res) + } + }) + } +} diff --git a/pkg/appsec/request.go b/pkg/appsec/request.go index 0479dea471e..effb1828307 100644 --- a/pkg/appsec/request.go +++ b/pkg/appsec/request.go @@ -367,7 +367,7 @@ func NewParsedRequestFromRequest(r *http.Request, logger *logrus.Entry) (ParsedR URL: r.URL, Proto: r.Proto, Body: body, - Args: parsedURL.Query(), //TODO: Check if there's not potential bypass as it excludes malformed args + Args: ParseQuery(parsedURL.RawQuery), TransferEncoding: r.TransferEncoding, ResponseChannel: make(chan AppsecTempResponse), RemoteAddrNormalized: remoteAddrNormalized, From 6c042f18f0d2760018eb84ba7dc0a86dc38ca386 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Thu, 14 Mar 2024 10:43:02 +0100 Subject: [PATCH 059/581] LAPI: local api unix socket support (#2770) --- .gitignore | 5 +- cmd/crowdsec-cli/config_show.go | 1 + cmd/crowdsec-cli/lapi.go | 52 ++++---- cmd/crowdsec-cli/lapi_test.go | 49 ++++++++ cmd/crowdsec-cli/machines.go | 4 +- docker/test/tests/test_tls.py | 5 +- pkg/apiclient/auth_jwt.go | 11 +- pkg/apiclient/client.go | 92 ++++++++++---- pkg/apiclient/client_test.go | 154 ++++++++++++++++++++++- pkg/apiserver/apiserver.go | 119 +++++++++++++----- pkg/apiserver/controllers/v1/alerts.go | 7 +- pkg/apiserver/controllers/v1/utils.go | 20 ++- pkg/apiserver/middlewares/v1/api_key.go | 28 +++-- pkg/apiserver/middlewares/v1/jwt.go | 29 +++-- pkg/csconfig/api.go | 55 +++++++-- test/bats/01_crowdsec_lapi.bats | 10 +- test/bats/01_cscli.bats | 5 +- test/bats/09_socket.bats | 158 ++++++++++++++++++++++++ test/bats/30_machines_tls.bats | 45 ++++++- test/lib/config/config-global | 1 + test/lib/config/config-local | 5 +- 21 files changed, 717 insertions(+), 138 deletions(-) create mode 100644 cmd/crowdsec-cli/lapi_test.go create mode 100644 test/bats/09_socket.bats diff --git a/.gitignore b/.gitignore index 3054e9eb3c2..6e6624fd282 100644 --- a/.gitignore +++ b/.gitignore @@ -6,7 +6,10 @@ *.dylib *~ .pc + +# IDEs .vscode +.idea # If vendor is included, allow prebuilt (wasm?) libraries. !vendor/**/*.so @@ -34,7 +37,7 @@ test/coverage/* *.swo # Dependencies are not vendored by default, but a tarball is created by "make vendor" -# and provided in the release. Used by freebsd, gentoo, etc. +# and provided in the release. Used by gentoo, etc. vendor/ vendor.tgz diff --git a/cmd/crowdsec-cli/config_show.go b/cmd/crowdsec-cli/config_show.go index 634ca77410e..c277173c387 100644 --- a/cmd/crowdsec-cli/config_show.go +++ b/cmd/crowdsec-cli/config_show.go @@ -100,6 +100,7 @@ API Client: {{- if .API.Server }} Local API Server{{if and .API.Server.Enable (not (ValueBool .API.Server.Enable))}} (disabled){{end}}: - Listen URL : {{.API.Server.ListenURI}} + - Listen Socket : {{.API.Server.ListenSocket}} - Profile File : {{.API.Server.ProfilesPath}} {{- if .API.Server.TLS }} diff --git a/cmd/crowdsec-cli/lapi.go b/cmd/crowdsec-cli/lapi.go index 0bb4a31b72a..13a9d8d7e77 100644 --- a/cmd/crowdsec-cli/lapi.go +++ b/cmd/crowdsec-cli/lapi.go @@ -44,7 +44,9 @@ func (cli *cliLapi) status() error { password := strfmt.Password(cfg.API.Client.Credentials.Password) login := cfg.API.Client.Credentials.Login - apiurl, err := url.Parse(cfg.API.Client.Credentials.URL) + origURL := cfg.API.Client.Credentials.URL + + apiURL, err := url.Parse(origURL) if err != nil { return fmt.Errorf("parsing api url: %w", err) } @@ -59,7 +61,7 @@ func (cli *cliLapi) status() error { return fmt.Errorf("failed to get scenarios: %w", err) } - Client, err = apiclient.NewDefaultClient(apiurl, + Client, err = apiclient.NewDefaultClient(apiURL, LAPIURLPrefix, fmt.Sprintf("crowdsec/%s", version.String()), nil) @@ -74,7 +76,8 @@ func (cli *cliLapi) status() error { } log.Infof("Loaded credentials from %s", cfg.API.Client.CredentialsFilePath) - log.Infof("Trying to authenticate with username %s on %s", login, apiurl) + // use the original string because apiURL would print 'http://unix/' + log.Infof("Trying to authenticate with username %s on %s", login, origURL) _, _, err = Client.Auth.AuthenticateWatcher(context.Background(), t) if err != nil { @@ -101,23 +104,7 @@ func (cli *cliLapi) register(apiURL string, outputFile string, machine string) e password := strfmt.Password(generatePassword(passwordLength)) - if apiURL == "" { - if cfg.API.Client == nil || cfg.API.Client.Credentials == nil || cfg.API.Client.Credentials.URL == "" { - return fmt.Errorf("no Local API URL. Please provide it in your configuration or with the -u parameter") - } - - apiURL = cfg.API.Client.Credentials.URL - } - /*URL needs to end with /, but user doesn't care*/ - if !strings.HasSuffix(apiURL, "/") { - apiURL += "/" - } - /*URL needs to start with http://, but user doesn't care*/ - if !strings.HasPrefix(apiURL, "http://") && !strings.HasPrefix(apiURL, "https://") { - apiURL = "http://" + apiURL - } - - apiurl, err := url.Parse(apiURL) + apiurl, err := prepareAPIURL(cfg.API.Client, apiURL) if err != nil { return fmt.Errorf("parsing api url: %w", err) } @@ -173,13 +160,36 @@ func (cli *cliLapi) register(apiURL string, outputFile string, machine string) e return nil } +// prepareAPIURL checks/fixes a LAPI connection url (http, https or socket) and returns an URL struct +func prepareAPIURL(clientCfg *csconfig.LocalApiClientCfg, apiURL string) (*url.URL, error) { + if apiURL == "" { + if clientCfg == nil || clientCfg.Credentials == nil || clientCfg.Credentials.URL == "" { + return nil, errors.New("no Local API URL. Please provide it in your configuration or with the -u parameter") + } + + apiURL = clientCfg.Credentials.URL + } + + // URL needs to end with /, but user doesn't care + if !strings.HasSuffix(apiURL, "/") { + apiURL += "/" + } + + // URL needs to start with http://, but user doesn't care + if !strings.HasPrefix(apiURL, "http://") && !strings.HasPrefix(apiURL, "https://") && !strings.HasPrefix(apiURL, "/") { + apiURL = "http://" + apiURL + } + + return url.Parse(apiURL) +} + func (cli *cliLapi) newStatusCmd() *cobra.Command { cmdLapiStatus := &cobra.Command{ Use: "status", Short: "Check authentication to Local API (LAPI)", Args: cobra.MinimumNArgs(0), DisableAutoGenTag: true, - RunE: func(cmd *cobra.Command, args []string) error { + RunE: func(_ *cobra.Command, _ []string) error { return cli.status() }, } diff --git a/cmd/crowdsec-cli/lapi_test.go b/cmd/crowdsec-cli/lapi_test.go new file mode 100644 index 00000000000..018ecad8118 --- /dev/null +++ b/cmd/crowdsec-cli/lapi_test.go @@ -0,0 +1,49 @@ +package main + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/crowdsecurity/crowdsec/pkg/csconfig" +) + +func TestPrepareAPIURL_NoProtocol(t *testing.T) { + url, err := prepareAPIURL(nil, "localhost:81") + require.NoError(t, err) + assert.Equal(t, "http://localhost:81/", url.String()) +} + +func TestPrepareAPIURL_Http(t *testing.T) { + url, err := prepareAPIURL(nil, "http://localhost:81") + require.NoError(t, err) + assert.Equal(t, "http://localhost:81/", url.String()) +} + +func TestPrepareAPIURL_Https(t *testing.T) { + url, err := prepareAPIURL(nil, "https://localhost:81") + require.NoError(t, err) + assert.Equal(t, "https://localhost:81/", url.String()) +} + +func TestPrepareAPIURL_UnixSocket(t *testing.T) { + url, err := prepareAPIURL(nil, "/path/socket") + require.NoError(t, err) + assert.Equal(t, "/path/socket/", url.String()) +} + +func TestPrepareAPIURL_Empty(t *testing.T) { + _, err := prepareAPIURL(nil, "") + require.Error(t, err) +} + +func TestPrepareAPIURL_Empty_ConfigOverride(t *testing.T) { + url, err := prepareAPIURL(&csconfig.LocalApiClientCfg{ + Credentials: &csconfig.ApiCredentialsCfg{ + URL: "localhost:80", + }, + }, "") + require.NoError(t, err) + assert.Equal(t, "http://localhost:80/", url.String()) +} diff --git a/cmd/crowdsec-cli/machines.go b/cmd/crowdsec-cli/machines.go index df225c06f7f..1457fb5a0cc 100644 --- a/cmd/crowdsec-cli/machines.go +++ b/cmd/crowdsec-cli/machines.go @@ -318,8 +318,8 @@ func (cli *cliMachines) add(args []string, machinePassword string, dumpFile stri if apiURL == "" { if clientCfg != nil && clientCfg.Credentials != nil && clientCfg.Credentials.URL != "" { apiURL = clientCfg.Credentials.URL - } else if serverCfg != nil && serverCfg.ListenURI != "" { - apiURL = "http://" + serverCfg.ListenURI + } else if serverCfg.ClientURL() != "" { + apiURL = serverCfg.ClientURL() } else { return errors.New("unable to dump an api URL. Please provide it in your configuration or with the -u parameter") } diff --git a/docker/test/tests/test_tls.py b/docker/test/tests/test_tls.py index 591afe0d303..fe899b000af 100644 --- a/docker/test/tests/test_tls.py +++ b/docker/test/tests/test_tls.py @@ -22,8 +22,7 @@ def test_missing_key_file(crowdsec, flavor): } with crowdsec(flavor=flavor, environment=env, wait_status=Status.EXITED) as cs: - # XXX: this message appears twice, is that normal? - cs.wait_for_log("*while starting API server: missing TLS key file*") + cs.wait_for_log("*local API server stopped with error: missing TLS key file*") def test_missing_cert_file(crowdsec, flavor): @@ -35,7 +34,7 @@ def test_missing_cert_file(crowdsec, flavor): } with crowdsec(flavor=flavor, environment=env, wait_status=Status.EXITED) as cs: - cs.wait_for_log("*while starting API server: missing TLS cert file*") + cs.wait_for_log("*local API server stopped with error: missing TLS cert file*") def test_tls_missing_ca(crowdsec, flavor, certs_dir): diff --git a/pkg/apiclient/auth_jwt.go b/pkg/apiclient/auth_jwt.go index 2ead10cf6da..6ee17fa5e72 100644 --- a/pkg/apiclient/auth_jwt.go +++ b/pkg/apiclient/auth_jwt.go @@ -70,9 +70,14 @@ func (t *JWTTransport) refreshJwtToken() error { req.Header.Add("Content-Type", "application/json") + transport := t.Transport + if transport == nil { + transport = http.DefaultTransport + } + client := &http.Client{ Transport: &retryRoundTripper{ - next: http.DefaultTransport, + next: transport, maxAttempts: 5, withBackOff: true, retryStatusCodes: []int{http.StatusTooManyRequests, http.StatusServiceUnavailable, http.StatusGatewayTimeout, http.StatusInternalServerError}, @@ -153,7 +158,7 @@ func (t *JWTTransport) prepareRequest(req *http.Request) (*http.Request, error) req.Header.Add("User-Agent", t.UserAgent) } - req.Header.Add("Authorization", fmt.Sprintf("Bearer %s", t.Token)) + req.Header.Add("Authorization", "Bearer "+t.Token) return req, nil } @@ -166,7 +171,7 @@ func (t *JWTTransport) RoundTrip(req *http.Request) (*http.Response, error) { } if log.GetLevel() >= log.TraceLevel { - //requestToDump := cloneRequest(req) + // requestToDump := cloneRequest(req) dump, _ := httputil.DumpRequest(req, true) log.Tracef("req-jwt: %s", string(dump)) } diff --git a/pkg/apiclient/client.go b/pkg/apiclient/client.go index b487f68a698..e0e521d6a6f 100644 --- a/pkg/apiclient/client.go +++ b/pkg/apiclient/client.go @@ -5,8 +5,10 @@ import ( "crypto/tls" "crypto/x509" "fmt" + "net" "net/http" "net/url" + "strings" "github.com/golang-jwt/jwt/v4" @@ -67,12 +69,18 @@ func NewClient(config *Config) (*ApiClient, error) { MachineID: &config.MachineID, Password: &config.Password, Scenarios: config.Scenarios, - URL: config.URL, UserAgent: config.UserAgent, VersionPrefix: config.VersionPrefix, UpdateScenario: config.UpdateScenario, } + transport, baseURL := createTransport(config.URL) + if transport != nil { + t.Transport = transport + } + + t.URL = baseURL + tlsconfig := tls.Config{InsecureSkipVerify: InsecureSkipVerify} tlsconfig.RootCAs = CaCertPool @@ -84,7 +92,7 @@ func NewClient(config *Config) (*ApiClient, error) { ht.TLSClientConfig = &tlsconfig } - c := &ApiClient{client: t.Client(), BaseURL: config.URL, UserAgent: config.UserAgent, URLPrefix: config.VersionPrefix, PapiURL: config.PapiURL} + c := &ApiClient{client: t.Client(), BaseURL: baseURL, UserAgent: config.UserAgent, URLPrefix: config.VersionPrefix, PapiURL: config.PapiURL} c.common.client = c c.Decisions = (*DecisionsService)(&c.common) c.Alerts = (*AlertsService)(&c.common) @@ -98,23 +106,29 @@ func NewClient(config *Config) (*ApiClient, error) { } func NewDefaultClient(URL *url.URL, prefix string, userAgent string, client *http.Client) (*ApiClient, error) { + transport, baseURL := createTransport(URL) + if client == nil { client = &http.Client{} - if ht, ok := http.DefaultTransport.(*http.Transport); ok { - tlsconfig := tls.Config{InsecureSkipVerify: InsecureSkipVerify} - tlsconfig.RootCAs = CaCertPool + if transport != nil { + client.Transport = transport + } else { + if ht, ok := http.DefaultTransport.(*http.Transport); ok { + tlsconfig := tls.Config{InsecureSkipVerify: InsecureSkipVerify} + tlsconfig.RootCAs = CaCertPool - if Cert != nil { - tlsconfig.Certificates = []tls.Certificate{*Cert} - } + if Cert != nil { + tlsconfig.Certificates = []tls.Certificate{*Cert} + } - ht.TLSClientConfig = &tlsconfig - client.Transport = ht + ht.TLSClientConfig = &tlsconfig + client.Transport = ht + } } } - c := &ApiClient{client: client, BaseURL: URL, UserAgent: userAgent, URLPrefix: prefix} + c := &ApiClient{client: client, BaseURL: baseURL, UserAgent: userAgent, URLPrefix: prefix} c.common.client = c c.Decisions = (*DecisionsService)(&c.common) c.Alerts = (*AlertsService)(&c.common) @@ -128,18 +142,26 @@ func NewDefaultClient(URL *url.URL, prefix string, userAgent string, client *htt } func RegisterClient(config *Config, client *http.Client) (*ApiClient, error) { + transport, baseURL := createTransport(config.URL) + if client == nil { client = &http.Client{} - } + if transport != nil { + client.Transport = transport + } else { + tlsconfig := tls.Config{InsecureSkipVerify: InsecureSkipVerify} + if Cert != nil { + tlsconfig.RootCAs = CaCertPool + tlsconfig.Certificates = []tls.Certificate{*Cert} + } - tlsconfig := tls.Config{InsecureSkipVerify: InsecureSkipVerify} - if Cert != nil { - tlsconfig.RootCAs = CaCertPool - tlsconfig.Certificates = []tls.Certificate{*Cert} + http.DefaultTransport.(*http.Transport).TLSClientConfig = &tlsconfig + } + } else if client.Transport == nil && transport != nil { + client.Transport = transport } - http.DefaultTransport.(*http.Transport).TLSClientConfig = &tlsconfig - c := &ApiClient{client: client, BaseURL: config.URL, UserAgent: config.UserAgent, URLPrefix: config.VersionPrefix} + c := &ApiClient{client: client, BaseURL: baseURL, UserAgent: config.UserAgent, URLPrefix: config.VersionPrefix} c.common.client = c c.Decisions = (*DecisionsService)(&c.common) c.Alerts = (*AlertsService)(&c.common) @@ -158,11 +180,31 @@ func RegisterClient(config *Config, client *http.Client) (*ApiClient, error) { return c, nil } +func createTransport(url *url.URL) (*http.Transport, *url.URL) { + urlString := url.String() + + // TCP transport + if !strings.HasPrefix(urlString, "/") { + return nil, url + } + + // Unix transport + url.Path = "/" + url.Host = "unix" + url.Scheme = "http" + + return &http.Transport{ + DialContext: func(_ context.Context, _, _ string) (net.Conn, error) { + return net.Dial("unix", strings.TrimSuffix(urlString, "/")) + }, + }, url +} + type Response struct { Response *http.Response - //add our pagination stuff - //NextPage int - //... + // add our pagination stuff + // NextPage int + // ... } func newResponse(r *http.Response) *Response { @@ -170,14 +212,14 @@ func newResponse(r *http.Response) *Response { } type ListOpts struct { - //Page int - //PerPage int + // Page int + // PerPage int } type DeleteOpts struct { - //?? + // ?? } type AddOpts struct { - //?? + // ?? } diff --git a/pkg/apiclient/client_test.go b/pkg/apiclient/client_test.go index dc6eae16926..d3296c4b67f 100644 --- a/pkg/apiclient/client_test.go +++ b/pkg/apiclient/client_test.go @@ -3,10 +3,13 @@ package apiclient import ( "context" "fmt" + "net" "net/http" "net/http/httptest" "net/url" + "path" "runtime" + "strings" "testing" log "github.com/sirupsen/logrus" @@ -34,12 +37,50 @@ func setupWithPrefix(urlPrefix string) (*http.ServeMux, string, func()) { apiHandler := http.NewServeMux() apiHandler.Handle(baseURLPath+"/", http.StripPrefix(baseURLPath, mux)) - // server is a test HTTP server used to provide mock API responses. server := httptest.NewServer(apiHandler) return mux, server.URL, server.Close } +// toUNCPath converts a Windows file path to a UNC path. +// This is necessary because the Go http package does not support Windows file paths. +func toUNCPath(path string) (string, error) { + colonIdx := strings.Index(path, ":") + if colonIdx == -1 { + return "", fmt.Errorf("invalid path format, missing drive letter: %s", path) + } + + // URL parsing does not like backslashes + remaining := strings.ReplaceAll(path[colonIdx+1:], "\\", "/") + uncPath := "//localhost/" + path[:colonIdx] + "$" + remaining + + return uncPath, nil +} + +func setupUnixSocketWithPrefix(socket string, urlPrefix string) (mux *http.ServeMux, serverURL string, teardown func()) { + var err error + if runtime.GOOS == "windows" { + socket, err = toUNCPath(socket) + if err != nil { + log.Fatalf("converting to UNC path: %s", err) + } + } + + mux = http.NewServeMux() + baseURLPath := "/" + urlPrefix + + apiHandler := http.NewServeMux() + apiHandler.Handle(baseURLPath+"/", http.StripPrefix(baseURLPath, mux)) + + server := httptest.NewUnstartedServer(apiHandler) + l, _ := net.Listen("unix", socket) + _ = server.Listener.Close() + server.Listener = l + server.Start() + + return mux, socket, server.Close +} + func testMethod(t *testing.T, r *http.Request, want string) { t.Helper() assert.Equal(t, want, r.Method) @@ -77,6 +118,49 @@ func TestNewClientOk(t *testing.T) { assert.Equal(t, http.StatusOK, resp.Response.StatusCode) } +func TestNewClientOk_UnixSocket(t *testing.T) { + tmpDir := t.TempDir() + socket := path.Join(tmpDir, "socket") + + mux, urlx, teardown := setupUnixSocketWithPrefix(socket, "v1") + defer teardown() + + apiURL, err := url.Parse(urlx) + if err != nil { + t.Fatalf("parsing api url: %s", apiURL) + } + + client, err := NewClient(&Config{ + MachineID: "test_login", + Password: "test_password", + UserAgent: fmt.Sprintf("crowdsec/%s", version.String()), + URL: apiURL, + VersionPrefix: "v1", + }) + if err != nil { + t.Fatalf("new api client: %s", err) + } + /*mock login*/ + mux.HandleFunc("/watchers/login", func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) + w.Write([]byte(`{"code": 200, "expire": "2030-01-02T15:04:05Z", "token": "oklol"}`)) + }) + + mux.HandleFunc("/alerts", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "GET") + w.WriteHeader(http.StatusOK) + }) + + _, resp, err := client.Alerts.List(context.Background(), AlertsListOpts{}) + if err != nil { + t.Fatalf("test Unable to list alerts : %+v", err) + } + + if resp.Response.StatusCode != http.StatusOK { + t.Fatalf("Alerts.List returned status: %d, want %d", resp.Response.StatusCode, http.StatusCreated) + } +} + func TestNewClientKo(t *testing.T) { mux, urlx, teardown := setup() defer teardown() @@ -131,6 +215,33 @@ func TestNewDefaultClient(t *testing.T) { log.Printf("err-> %s", err) } +func TestNewDefaultClient_UnixSocket(t *testing.T) { + tmpDir := t.TempDir() + socket := path.Join(tmpDir, "socket") + + mux, urlx, teardown := setupUnixSocketWithPrefix(socket, "v1") + defer teardown() + + apiURL, err := url.Parse(urlx) + if err != nil { + t.Fatalf("parsing api url: %s", apiURL) + } + + client, err := NewDefaultClient(apiURL, "/v1", "", nil) + if err != nil { + t.Fatalf("new api client: %s", err) + } + + mux.HandleFunc("/alerts", func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusUnauthorized) + w.Write([]byte(`{"code": 401, "message" : "brr"}`)) + }) + + _, _, err = client.Alerts.List(context.Background(), AlertsListOpts{}) + assert.Contains(t, err.Error(), `performing request: API error: brr`) + log.Printf("err-> %s", err) +} + func TestNewClientRegisterKO(t *testing.T) { apiURL, err := url.Parse("http://127.0.0.1:4242/") require.NoError(t, err) @@ -143,10 +254,10 @@ func TestNewClientRegisterKO(t *testing.T) { VersionPrefix: "v1", }, &http.Client{}) - if runtime.GOOS != "windows" { - cstest.RequireErrorContains(t, err, "dial tcp 127.0.0.1:4242: connect: connection refused") - } else { + if runtime.GOOS == "windows" { cstest.RequireErrorContains(t, err, " No connection could be made because the target machine actively refused it.") + } else { + cstest.RequireErrorContains(t, err, "dial tcp 127.0.0.1:4242: connect: connection refused") } } @@ -178,6 +289,41 @@ func TestNewClientRegisterOK(t *testing.T) { log.Printf("->%T", client) } +func TestNewClientRegisterOK_UnixSocket(t *testing.T) { + log.SetLevel(log.TraceLevel) + + tmpDir := t.TempDir() + socket := path.Join(tmpDir, "socket") + + mux, urlx, teardown := setupUnixSocketWithPrefix(socket, "v1") + defer teardown() + + /*mock login*/ + mux.HandleFunc("/watchers", func(w http.ResponseWriter, r *http.Request) { + testMethod(t, r, "POST") + w.WriteHeader(http.StatusOK) + w.Write([]byte(`{"code": 200, "expire": "2030-01-02T15:04:05Z", "token": "oklol"}`)) + }) + + apiURL, err := url.Parse(urlx) + if err != nil { + t.Fatalf("parsing api url: %s", apiURL) + } + + client, err := RegisterClient(&Config{ + MachineID: "test_login", + Password: "test_password", + UserAgent: fmt.Sprintf("crowdsec/%s", version.String()), + URL: apiURL, + VersionPrefix: "v1", + }, &http.Client{}) + if err != nil { + t.Fatalf("while registering client : %s", err) + } + + log.Printf("->%T", client) +} + func TestNewClientBadAnswer(t *testing.T) { log.SetLevel(log.TraceLevel) diff --git a/pkg/apiserver/apiserver.go b/pkg/apiserver/apiserver.go index 19a0085d2dc..e42ad9a98fa 100644 --- a/pkg/apiserver/apiserver.go +++ b/pkg/apiserver/apiserver.go @@ -32,6 +32,7 @@ const keyLength = 32 type APIServer struct { URL string + UnixSocket string TLS *csconfig.TLSCfg dbClient *database.Client logFile string @@ -66,7 +67,7 @@ func recoverFromPanic(c *gin.Context) { // because of https://github.com/golang/net/blob/39120d07d75e76f0079fe5d27480bcb965a21e4c/http2/server.go // and because it seems gin doesn't handle those neither, we need to "hand define" some errors to properly catch them if strErr, ok := err.(error); ok { - //stolen from http2/server.go in x/net + // stolen from http2/server.go in x/net var ( errClientDisconnected = errors.New("client disconnected") errClosedBody = errors.New("body closed by handler") @@ -124,10 +125,10 @@ func newGinLogger(config *csconfig.LocalApiServerCfg) (*log.Logger, string, erro logger := &lumberjack.Logger{ Filename: logFile, - MaxSize: 500, //megabytes + MaxSize: 500, // megabytes MaxBackups: 3, - MaxAge: 28, //days - Compress: true, //disabled by default + MaxAge: 28, // days + Compress: true, // disabled by default } if config.LogMaxSize != 0 { @@ -176,6 +177,13 @@ func NewServer(config *csconfig.LocalApiServerCfg) (*APIServer, error) { router.ForwardedByClientIP = false + // set the remore address of the request to 127.0.0.1 if it comes from a unix socket + router.Use(func(c *gin.Context) { + if c.Request.RemoteAddr == "@" { + c.Request.RemoteAddr = "127.0.0.1:65535" + } + }) + if config.TrustedProxies != nil && config.UseForwardedForHeaders { if err = router.SetTrustedProxies(*config.TrustedProxies); err != nil { return nil, fmt.Errorf("while setting trusted_proxies: %w", err) @@ -223,8 +231,8 @@ func NewServer(config *csconfig.LocalApiServerCfg) (*APIServer, error) { } var ( - apiClient *apic - papiClient *Papi + apiClient *apic + papiClient *Papi ) controller.AlertsAddChan = nil @@ -267,6 +275,7 @@ func NewServer(config *csconfig.LocalApiServerCfg) (*APIServer, error) { return &APIServer{ URL: config.ListenURI, + UnixSocket: config.ListenSocket, TLS: config.TLS, logFile: logFile, dbClient: dbClient, @@ -317,11 +326,11 @@ func (s *APIServer) Run(apiReady chan bool) error { return nil }) - //csConfig.API.Server.ConsoleConfig.ShareCustomScenarios + // csConfig.API.Server.ConsoleConfig.ShareCustomScenarios if s.apic.apiClient.IsEnrolled() { if s.consoleConfig.IsPAPIEnabled() { if s.papi.URL != "" { - log.Infof("Starting PAPI decision receiver") + log.Info("Starting PAPI decision receiver") s.papi.pullTomb.Go(func() error { if err := s.papi.Pull(); err != nil { log.Errorf("papi pull: %s", err) @@ -353,29 +362,31 @@ func (s *APIServer) Run(apiReady chan bool) error { }) } - s.httpServerTomb.Go(func() error { s.listenAndServeURL(apiReady); return nil }) + s.httpServerTomb.Go(func() error { + return s.listenAndServeLAPI(apiReady) + }) + + if err := s.httpServerTomb.Wait(); err != nil { + return fmt.Errorf("local API server stopped with error: %w", err) + } return nil } -// listenAndServeURL starts the http server and blocks until it's closed +// listenAndServeLAPI starts the http server and blocks until it's closed // it also updates the URL field with the actual address the server is listening on // it's meant to be run in a separate goroutine -func (s *APIServer) listenAndServeURL(apiReady chan bool) { - serverError := make(chan error, 1) - - go func() { - listener, err := net.Listen("tcp", s.URL) - if err != nil { - serverError <- fmt.Errorf("listening on %s: %w", s.URL, err) - return - } - - s.URL = listener.Addr().String() - log.Infof("CrowdSec Local API listening on %s", s.URL) - apiReady <- true +func (s *APIServer) listenAndServeLAPI(apiReady chan bool) error { + var ( + tcpListener net.Listener + unixListener net.Listener + err error + serverError = make(chan error, 2) + listenerClosed = make(chan struct{}) + ) - if s.TLS != nil && (s.TLS.CertFilePath != "" || s.TLS.KeyFilePath != "") { + startServer := func(listener net.Listener, canTLS bool) { + if canTLS && s.TLS != nil && (s.TLS.CertFilePath != "" || s.TLS.KeyFilePath != "") { if s.TLS.KeyFilePath == "" { serverError <- errors.New("missing TLS key file") return @@ -391,25 +402,71 @@ func (s *APIServer) listenAndServeURL(apiReady chan bool) { err = s.httpServer.Serve(listener) } - if err != nil && err != http.ErrServerClosed { - serverError <- fmt.Errorf("while serving local API: %w", err) + switch { + case errors.Is(err, http.ErrServerClosed): + break + case err != nil: + serverError <- err + } + } + + // Starting TCP listener + go func() { + if s.URL == "" { + return + } + + tcpListener, err = net.Listen("tcp", s.URL) + if err != nil { + serverError <- fmt.Errorf("listening on %s: %w", s.URL, err) + return + } + + log.Infof("CrowdSec Local API listening on %s", s.URL) + startServer(tcpListener, true) + }() + + // Starting Unix socket listener + go func() { + if s.UnixSocket == "" { + return + } + + _ = os.RemoveAll(s.UnixSocket) + + unixListener, err = net.Listen("unix", s.UnixSocket) + if err != nil { + serverError <- fmt.Errorf("while creating unix listener: %w", err) return } + + log.Infof("CrowdSec Local API listening on Unix socket %s", s.UnixSocket) + startServer(unixListener, false) }() + apiReady <- true + select { case err := <-serverError: - log.Fatalf("while starting API server: %s", err) + return err case <-s.httpServerTomb.Dying(): - log.Infof("Shutting down API server") - // do we need a graceful shutdown here? + log.Info("Shutting down API server") + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) defer cancel() if err := s.httpServer.Shutdown(ctx); err != nil { - log.Errorf("while shutting down http server: %s", err) + log.Errorf("while shutting down http server: %v", err) + } + + close(listenerClosed) + case <-listenerClosed: + if s.UnixSocket != "" { + _ = os.RemoveAll(s.UnixSocket) } } + + return nil } func (s *APIServer) Close() { @@ -437,7 +494,7 @@ func (s *APIServer) Shutdown() error { } } - //close io.writer logger given to gin + // close io.writer logger given to gin if pipe, ok := gin.DefaultErrorWriter.(*io.PipeWriter); ok { pipe.Close() } diff --git a/pkg/apiserver/controllers/v1/alerts.go b/pkg/apiserver/controllers/v1/alerts.go index ad183e4ba80..19dbf8d0cfa 100644 --- a/pkg/apiserver/controllers/v1/alerts.go +++ b/pkg/apiserver/controllers/v1/alerts.go @@ -174,7 +174,7 @@ func (c *Controller) CreateAlert(gctx *gin.Context) { // if coming from cscli, alert already has decisions if len(alert.Decisions) != 0 { - //alert already has a decision (cscli decisions add etc.), generate uuid here + // alert already has a decision (cscli decisions add etc.), generate uuid here for _, decision := range alert.Decisions { decision.UUID = uuid.NewString() } @@ -323,12 +323,13 @@ func (c *Controller) DeleteAlertByID(gctx *gin.Context) { var err error incomingIP := gctx.ClientIP() - if incomingIP != "127.0.0.1" && incomingIP != "::1" && !networksContainIP(c.TrustedIPs, incomingIP) { + if incomingIP != "127.0.0.1" && incomingIP != "::1" && !networksContainIP(c.TrustedIPs, incomingIP) && !isUnixSocket(gctx) { gctx.JSON(http.StatusForbidden, gin.H{"message": fmt.Sprintf("access forbidden from this IP (%s)", incomingIP)}) return } decisionIDStr := gctx.Param("alert_id") + decisionID, err := strconv.Atoi(decisionIDStr) if err != nil { gctx.JSON(http.StatusBadRequest, gin.H{"message": "alert_id must be valid integer"}) @@ -349,7 +350,7 @@ func (c *Controller) DeleteAlertByID(gctx *gin.Context) { // DeleteAlerts deletes alerts from the database based on the specified filter func (c *Controller) DeleteAlerts(gctx *gin.Context) { incomingIP := gctx.ClientIP() - if incomingIP != "127.0.0.1" && incomingIP != "::1" && !networksContainIP(c.TrustedIPs, incomingIP) { + if incomingIP != "127.0.0.1" && incomingIP != "::1" && !networksContainIP(c.TrustedIPs, incomingIP) && !isUnixSocket(gctx) { gctx.JSON(http.StatusForbidden, gin.H{"message": fmt.Sprintf("access forbidden from this IP (%s)", incomingIP)}) return } diff --git a/pkg/apiserver/controllers/v1/utils.go b/pkg/apiserver/controllers/v1/utils.go index 6f14dd9204e..2fcf8099e67 100644 --- a/pkg/apiserver/controllers/v1/utils.go +++ b/pkg/apiserver/controllers/v1/utils.go @@ -2,7 +2,9 @@ package v1 import ( "errors" + "net" "net/http" + "strings" jwt "github.com/appleboy/gin-jwt/v2" "github.com/gin-gonic/gin" @@ -25,6 +27,14 @@ func getBouncerFromContext(ctx *gin.Context) (*ent.Bouncer, error) { return bouncerInfo, nil } +func isUnixSocket(c *gin.Context) bool { + if localAddr, ok := c.Request.Context().Value(http.LocalAddrContextKey).(net.Addr); ok { + return strings.HasPrefix(localAddr.Network(), "unix") + } + + return false +} + func getMachineIDFromContext(ctx *gin.Context) (string, error) { claims := jwt.ExtractClaims(ctx) if claims == nil { @@ -47,8 +57,16 @@ func getMachineIDFromContext(ctx *gin.Context) (string, error) { func (c *Controller) AbortRemoteIf(option bool) gin.HandlerFunc { return func(gctx *gin.Context) { + if !option { + return + } + + if isUnixSocket(gctx) { + return + } + incomingIP := gctx.ClientIP() - if option && incomingIP != "127.0.0.1" && incomingIP != "::1" { + if incomingIP != "127.0.0.1" && incomingIP != "::1" { gctx.JSON(http.StatusForbidden, gin.H{"message": "access forbidden"}) gctx.Abort() } diff --git a/pkg/apiserver/middlewares/v1/api_key.go b/pkg/apiserver/middlewares/v1/api_key.go index 4e273371bfe..4561b8f7789 100644 --- a/pkg/apiserver/middlewares/v1/api_key.go +++ b/pkg/apiserver/middlewares/v1/api_key.go @@ -19,7 +19,7 @@ import ( const ( APIKeyHeader = "X-Api-Key" BouncerContextKey = "bouncer_info" - dummyAPIKeySize = 54 + dummyAPIKeySize = 54 // max allowed by bcrypt 72 = 54 bytes in base64 ) @@ -82,10 +82,10 @@ func (a *APIKey) authTLS(c *gin.Context, logger *log.Entry) *ent.Bouncer { bouncerName := fmt.Sprintf("%s@%s", extractedCN, c.ClientIP()) bouncer, err := a.DbClient.SelectBouncerByName(bouncerName) - //This is likely not the proper way, but isNotFound does not seem to work + // This is likely not the proper way, but isNotFound does not seem to work if err != nil && strings.Contains(err.Error(), "bouncer not found") { - //Because we have a valid cert, automatically create the bouncer in the database if it does not exist - //Set a random API key, but it will never be used + // Because we have a valid cert, automatically create the bouncer in the database if it does not exist + // Set a random API key, but it will never be used apiKey, err := GenerateAPIKey(dummyAPIKeySize) if err != nil { logger.Errorf("error generating mock api key: %s", err) @@ -100,11 +100,11 @@ func (a *APIKey) authTLS(c *gin.Context, logger *log.Entry) *ent.Bouncer { return nil } } else if err != nil { - //error while selecting bouncer + // error while selecting bouncer logger.Errorf("while selecting bouncers: %s", err) return nil } else if bouncer.AuthType != types.TlsAuthType { - //bouncer was found in DB + // bouncer was found in DB logger.Errorf("bouncer isn't allowed to auth by TLS") return nil } @@ -139,8 +139,10 @@ func (a *APIKey) MiddlewareFunc() gin.HandlerFunc { return func(c *gin.Context) { var bouncer *ent.Bouncer + clientIP := c.ClientIP() + logger := log.WithFields(log.Fields{ - "ip": c.ClientIP(), + "ip": clientIP, }) if c.Request.TLS != nil && len(c.Request.TLS.PeerCertificates) > 0 { @@ -152,6 +154,7 @@ func (a *APIKey) MiddlewareFunc() gin.HandlerFunc { if bouncer == nil { c.JSON(http.StatusForbidden, gin.H{"message": "access forbidden"}) c.Abort() + return } @@ -160,7 +163,7 @@ func (a *APIKey) MiddlewareFunc() gin.HandlerFunc { }) if bouncer.IPAddress == "" { - if err := a.DbClient.UpdateBouncerIP(c.ClientIP(), bouncer.ID); err != nil { + if err := a.DbClient.UpdateBouncerIP(clientIP, bouncer.ID); err != nil { logger.Errorf("Failed to update ip address for '%s': %s\n", bouncer.Name, err) c.JSON(http.StatusForbidden, gin.H{"message": "access forbidden"}) c.Abort() @@ -169,11 +172,11 @@ func (a *APIKey) MiddlewareFunc() gin.HandlerFunc { } } - //Don't update IP on HEAD request, as it's used by the appsec to check the validity of the API key provided - if bouncer.IPAddress != c.ClientIP() && bouncer.IPAddress != "" && c.Request.Method != http.MethodHead { - log.Warningf("new IP address detected for bouncer '%s': %s (old: %s)", bouncer.Name, c.ClientIP(), bouncer.IPAddress) + // Don't update IP on HEAD request, as it's used by the appsec to check the validity of the API key provided + if bouncer.IPAddress != clientIP && bouncer.IPAddress != "" && c.Request.Method != http.MethodHead { + log.Warningf("new IP address detected for bouncer '%s': %s (old: %s)", bouncer.Name, clientIP, bouncer.IPAddress) - if err := a.DbClient.UpdateBouncerIP(c.ClientIP(), bouncer.ID); err != nil { + if err := a.DbClient.UpdateBouncerIP(clientIP, bouncer.ID); err != nil { logger.Errorf("Failed to update ip address for '%s': %s\n", bouncer.Name, err) c.JSON(http.StatusForbidden, gin.H{"message": "access forbidden"}) c.Abort() @@ -199,6 +202,5 @@ func (a *APIKey) MiddlewareFunc() gin.HandlerFunc { } c.Set(BouncerContextKey, bouncer) - c.Next() } } diff --git a/pkg/apiserver/middlewares/v1/jwt.go b/pkg/apiserver/middlewares/v1/jwt.go index 6fe053713bc..735c5f058cb 100644 --- a/pkg/apiserver/middlewares/v1/jwt.go +++ b/pkg/apiserver/middlewares/v1/jwt.go @@ -61,6 +61,7 @@ func (j *JWT) authTLS(c *gin.Context) (*authInput, error) { if j.TlsAuth == nil { c.JSON(http.StatusForbidden, gin.H{"message": "access forbidden"}) c.Abort() + return nil, errors.New("TLS auth is not configured") } @@ -76,7 +77,8 @@ func (j *JWT) authTLS(c *gin.Context) (*authInput, error) { if !validCert { c.JSON(http.StatusForbidden, gin.H{"message": "access forbidden"}) c.Abort() - return nil, fmt.Errorf("failed cert authentication") + + return nil, errors.New("failed cert authentication") } ret.machineID = fmt.Sprintf("%s@%s", extractedCN, c.ClientIP()) @@ -85,9 +87,9 @@ func (j *JWT) authTLS(c *gin.Context) (*authInput, error) { Where(machine.MachineId(ret.machineID)). First(j.DbClient.CTX) if ent.IsNotFound(err) { - //Machine was not found, let's create it + // Machine was not found, let's create it log.Infof("machine %s not found, create it", ret.machineID) - //let's use an apikey as the password, doesn't matter in this case (generatePassword is only available in cscli) + // let's use an apikey as the password, doesn't matter in this case (generatePassword is only available in cscli) pwd, err := GenerateAPIKey(dummyAPIKeySize) if err != nil { log.WithFields(log.Fields{ @@ -95,7 +97,7 @@ func (j *JWT) authTLS(c *gin.Context) (*authInput, error) { "cn": extractedCN, }).Errorf("error generating password: %s", err) - return nil, fmt.Errorf("error generating password") + return nil, errors.New("error generating password") } password := strfmt.Password(pwd) @@ -110,6 +112,7 @@ func (j *JWT) authTLS(c *gin.Context) (*authInput, error) { if ret.clientMachine.AuthType != types.TlsAuthType { return nil, fmt.Errorf("machine %s attempted to auth with TLS cert but it is configured to use %s", ret.machineID, ret.clientMachine.AuthType) } + ret.machineID = ret.clientMachine.MachineId } @@ -213,18 +216,20 @@ func (j *JWT) Authenticator(c *gin.Context) (interface{}, error) { } } + clientIP := c.ClientIP() + if auth.clientMachine.IpAddress == "" { - err = j.DbClient.UpdateMachineIP(c.ClientIP(), auth.clientMachine.ID) + err = j.DbClient.UpdateMachineIP(clientIP, auth.clientMachine.ID) if err != nil { log.Errorf("Failed to update ip address for '%s': %s\n", auth.machineID, err) return nil, jwt.ErrFailedAuthentication } } - if auth.clientMachine.IpAddress != c.ClientIP() && auth.clientMachine.IpAddress != "" { - log.Warningf("new IP address detected for machine '%s': %s (old: %s)", auth.clientMachine.MachineId, c.ClientIP(), auth.clientMachine.IpAddress) + if auth.clientMachine.IpAddress != clientIP && auth.clientMachine.IpAddress != "" { + log.Warningf("new IP address detected for machine '%s': %s (old: %s)", auth.clientMachine.MachineId, clientIP, auth.clientMachine.IpAddress) - err = j.DbClient.UpdateMachineIP(c.ClientIP(), auth.clientMachine.ID) + err = j.DbClient.UpdateMachineIP(clientIP, auth.clientMachine.ID) if err != nil { log.Errorf("Failed to update ip address for '%s': %s\n", auth.clientMachine.MachineId, err) return nil, jwt.ErrFailedAuthentication @@ -233,13 +238,14 @@ func (j *JWT) Authenticator(c *gin.Context) (interface{}, error) { useragent := strings.Split(c.Request.UserAgent(), "/") if len(useragent) != 2 { - log.Warningf("bad user agent '%s' from '%s'", c.Request.UserAgent(), c.ClientIP()) + log.Warningf("bad user agent '%s' from '%s'", c.Request.UserAgent(), clientIP) return nil, jwt.ErrFailedAuthentication } if err := j.DbClient.UpdateMachineVersion(useragent[1], auth.clientMachine.ID); err != nil { log.Errorf("unable to update machine '%s' version '%s': %s", auth.clientMachine.MachineId, useragent[1], err) - log.Errorf("bad user agent from : %s", c.ClientIP()) + log.Errorf("bad user agent from : %s", clientIP) + return nil, jwt.ErrFailedAuthentication } @@ -323,8 +329,9 @@ func NewJWT(dbClient *database.Client) (*JWT, error) { errInit := ret.MiddlewareInit() if errInit != nil { - return &JWT{}, fmt.Errorf("authMiddleware.MiddlewareInit() Error:" + errInit.Error()) + return &JWT{}, errors.New("authMiddleware.MiddlewareInit() Error:" + errInit.Error()) } + jwtMiddleware.Middleware = ret return jwtMiddleware, nil diff --git a/pkg/csconfig/api.go b/pkg/csconfig/api.go index 7fd1f588897..4d10690735d 100644 --- a/pkg/csconfig/api.go +++ b/pkg/csconfig/api.go @@ -141,12 +141,25 @@ func (l *LocalApiClientCfg) Load() error { } if l.Credentials != nil && l.Credentials.URL != "" { - if !strings.HasSuffix(l.Credentials.URL, "/") { + // don't append a trailing slash if the URL is a unix socket + if strings.HasPrefix(l.Credentials.URL, "http") && !strings.HasSuffix(l.Credentials.URL, "/") { l.Credentials.URL += "/" } } - if l.Credentials.Login != "" && (l.Credentials.CertPath != "" || l.Credentials.KeyPath != "") { + // is the configuration asking for client authentication via TLS? + credTLSClientAuth := l.Credentials.CertPath != "" || l.Credentials.KeyPath != "" + + // is the configuration asking for TLS encryption and server authentication? + credTLS := credTLSClientAuth || l.Credentials.CACertPath != "" + + credSocket := strings.HasPrefix(l.Credentials.URL, "/") + + if credTLS && credSocket { + return errors.New("cannot use TLS with a unix socket") + } + + if credTLSClientAuth && l.Credentials.Login != "" { return errors.New("user/password authentication and TLS authentication are mutually exclusive") } @@ -187,10 +200,10 @@ func (l *LocalApiClientCfg) Load() error { return nil } -func (lapiCfg *LocalApiServerCfg) GetTrustedIPs() ([]net.IPNet, error) { +func (c *LocalApiServerCfg) GetTrustedIPs() ([]net.IPNet, error) { trustedIPs := make([]net.IPNet, 0) - for _, ip := range lapiCfg.TrustedIPs { + for _, ip := range c.TrustedIPs { cidr := toValidCIDR(ip) _, ipNet, err := net.ParseCIDR(cidr) @@ -225,6 +238,7 @@ type CapiWhitelist struct { type LocalApiServerCfg struct { Enable *bool `yaml:"enable"` ListenURI string `yaml:"listen_uri,omitempty"` // 127.0.0.1:8080 + ListenSocket string `yaml:"listen_socket,omitempty"` TLS *TLSCfg `yaml:"tls"` DbConfig *DatabaseCfg `yaml:"-"` LogDir string `yaml:"-"` @@ -248,6 +262,22 @@ type LocalApiServerCfg struct { CapiWhitelists *CapiWhitelist `yaml:"-"` } +func (c *LocalApiServerCfg) ClientURL() string { + if c == nil { + return "" + } + + if c.ListenSocket != "" { + return c.ListenSocket + } + + if c.ListenURI != "" { + return "http://" + c.ListenURI + } + + return "" +} + func (c *Config) LoadAPIServer(inCli bool) error { if c.DisableAPI { log.Warning("crowdsec local API is disabled from flag") @@ -255,7 +285,9 @@ func (c *Config) LoadAPIServer(inCli bool) error { if c.API.Server == nil { log.Warning("crowdsec local API is disabled") + c.DisableAPI = true + return nil } @@ -266,6 +298,7 @@ func (c *Config) LoadAPIServer(inCli bool) error { if !*c.API.Server.Enable { log.Warning("crowdsec local API is disabled because 'enable' is set to false") + c.DisableAPI = true } @@ -273,8 +306,8 @@ func (c *Config) LoadAPIServer(inCli bool) error { return nil } - if c.API.Server.ListenURI == "" { - return errors.New("no listen_uri specified") + if c.API.Server.ListenURI == "" && c.API.Server.ListenSocket == "" { + return errors.New("no listen_uri or listen_socket specified") } // inherit log level from common, then api->server @@ -393,21 +426,21 @@ func parseCapiWhitelists(fd io.Reader) (*CapiWhitelist, error) { return ret, nil } -func (s *LocalApiServerCfg) LoadCapiWhitelists() error { - if s.CapiWhitelistsPath == "" { +func (c *LocalApiServerCfg) LoadCapiWhitelists() error { + if c.CapiWhitelistsPath == "" { return nil } - fd, err := os.Open(s.CapiWhitelistsPath) + fd, err := os.Open(c.CapiWhitelistsPath) if err != nil { return fmt.Errorf("while opening capi whitelist file: %w", err) } defer fd.Close() - s.CapiWhitelists, err = parseCapiWhitelists(fd) + c.CapiWhitelists, err = parseCapiWhitelists(fd) if err != nil { - return fmt.Errorf("while parsing capi whitelist file '%s': %w", s.CapiWhitelistsPath, err) + return fmt.Errorf("while parsing capi whitelist file '%s': %w", c.CapiWhitelistsPath, err) } return nil diff --git a/test/bats/01_crowdsec_lapi.bats b/test/bats/01_crowdsec_lapi.bats index 233340e500f..1b7940615ed 100644 --- a/test/bats/01_crowdsec_lapi.bats +++ b/test/bats/01_crowdsec_lapi.bats @@ -32,20 +32,20 @@ teardown() { } @test "lapi (no .api.server.listen_uri)" { - rune -0 config_set 'del(.api.server.listen_uri)' + rune -0 config_set 'del(.api.server.listen_socket) | del(.api.server.listen_uri)' rune -1 "${CROWDSEC}" -no-cs - assert_stderr --partial "no listen_uri specified" + assert_stderr --partial "no listen_uri or listen_socket specified" } @test "lapi (bad .api.server.listen_uri)" { - rune -0 config_set '.api.server.listen_uri="127.0.0.1:-80"' + rune -0 config_set 'del(.api.server.listen_socket) | .api.server.listen_uri="127.0.0.1:-80"' rune -1 "${CROWDSEC}" -no-cs - assert_stderr --partial "while starting API server: listening on 127.0.0.1:-80: listen tcp: address -80: invalid port" + assert_stderr --partial "local API server stopped with error: listening on 127.0.0.1:-80: listen tcp: address -80: invalid port" } @test "lapi (listen on random port)" { config_set '.common.log_media="stdout"' - rune -0 config_set '.api.server.listen_uri="127.0.0.1:0"' + rune -0 config_set 'del(.api.server.listen_socket) | .api.server.listen_uri="127.0.0.1:0"' rune -0 wait-for --err "CrowdSec Local API listening on 127.0.0.1:" "${CROWDSEC}" -no-cs } diff --git a/test/bats/01_cscli.bats b/test/bats/01_cscli.bats index 03f0132ea63..4c7ce7fbc2c 100644 --- a/test/bats/01_cscli.bats +++ b/test/bats/01_cscli.bats @@ -100,10 +100,14 @@ teardown() { # check that LAPI configuration is loaded (human and json, not shows in raw) + sock=$(config_get '.api.server.listen_socket') + rune -0 cscli config show -o human assert_line --regexp ".*- URL +: http://127.0.0.1:8080/" assert_line --regexp ".*- Login +: githubciXXXXXXXXXXXXXXXXXXXXXXXX([a-zA-Z0-9]{16})?" assert_line --regexp ".*- Credentials File +: .*/local_api_credentials.yaml" + assert_line --regexp ".*- Listen URL +: 127.0.0.1:8080" + assert_line --regexp ".*- Listen Socket +: $sock" rune -0 cscli config show -o json rune -0 jq -c '.API.Client.Credentials | [.url,.login[0:32]]' <(output) @@ -212,7 +216,6 @@ teardown() { assert_stderr --partial "Loaded credentials from" assert_stderr --partial "Trying to authenticate with username" - assert_stderr --partial " on http://127.0.0.1:8080/" assert_stderr --partial "You can successfully interact with Local API (LAPI)" } diff --git a/test/bats/09_socket.bats b/test/bats/09_socket.bats new file mode 100644 index 00000000000..f770abaad2e --- /dev/null +++ b/test/bats/09_socket.bats @@ -0,0 +1,158 @@ +#!/usr/bin/env bats +# vim: ft=bats:list:ts=8:sts=4:sw=4:et:ai:si: + +set -u + +setup_file() { + load "../lib/setup_file.sh" + sockdir=$(TMPDIR="$BATS_FILE_TMPDIR" mktemp -u) + export sockdir + mkdir -p "$sockdir" + socket="$sockdir/crowdsec_api.sock" + export socket + LOCAL_API_CREDENTIALS=$(config_get '.api.client.credentials_path') + export LOCAL_API_CREDENTIALS +} + +teardown_file() { + load "../lib/teardown_file.sh" +} + +setup() { + load "../lib/setup.sh" + load "../lib/bats-file/load.bash" + ./instance-data load + config_set ".api.server.listen_socket=strenv(socket)" +} + +teardown() { + ./instance-crowdsec stop +} + +#---------- + +@test "cscli - connects from existing machine with socket" { + config_set "$LOCAL_API_CREDENTIALS" ".url=strenv(socket)" + + ./instance-crowdsec start + + rune -0 cscli lapi status + assert_stderr --regexp "Trying to authenticate with username .* on $socket" + assert_stderr --partial "You can successfully interact with Local API (LAPI)" +} + +@test "crowdsec - listen on both socket and TCP" { + ./instance-crowdsec start + + rune -0 cscli lapi status + assert_stderr --regexp "Trying to authenticate with username .* on http://127.0.0.1:8080/" + assert_stderr --partial "You can successfully interact with Local API (LAPI)" + + config_set "$LOCAL_API_CREDENTIALS" ".url=strenv(socket)" + + rune -0 cscli lapi status + assert_stderr --regexp "Trying to authenticate with username .* on $socket" + assert_stderr --partial "You can successfully interact with Local API (LAPI)" +} + +@test "cscli - authenticate new machine with socket" { + # verify that if a listen_uri and a socket are set, the socket is used + # by default when creating a local machine. + + rune -0 cscli machines delete "$(cscli machines list -o json | jq -r '.[].machineId')" + + # this one should be using the socket + rune -0 cscli machines add --auto --force + + using=$(config_get "$LOCAL_API_CREDENTIALS" ".url") + + assert [ "$using" = "$socket" ] + + # disable the agent because it counts as a first authentication + config_disable_agent + ./instance-crowdsec start + + # the machine does not have an IP yet + + rune -0 cscli machines list -o json + rune -0 jq -r '.[].ipAddress' <(output) + assert_output null + + # upon first authentication, it's assigned to localhost + + rune -0 cscli lapi status + + rune -0 cscli machines list -o json + rune -0 jq -r '.[].ipAddress' <(output) + assert_output 127.0.0.1 +} + +bouncer_http() { + URI="$1" + curl -fs -H "X-Api-Key: $API_KEY" "http://localhost:8080$URI" +} + +bouncer_socket() { + URI="$1" + curl -fs -H "X-Api-Key: $API_KEY" --unix-socket "$socket" "http://localhost$URI" +} + +@test "lapi - connects from existing bouncer with socket" { + ./instance-crowdsec start + API_KEY=$(cscli bouncers add testbouncer -o raw) + export API_KEY + + # the bouncer does not have an IP yet + + rune -0 cscli bouncers list -o json + rune -0 jq -r '.[].ip_address' <(output) + assert_output "" + + # upon first authentication, it's assigned to localhost + + rune -0 bouncer_socket '/v1/decisions' + assert_output 'null' + refute_stderr + + rune -0 cscli bouncers list -o json + rune -0 jq -r '.[].ip_address' <(output) + assert_output "127.0.0.1" + + # we can still use TCP of course + + rune -0 bouncer_http '/v1/decisions' + assert_output 'null' + refute_stderr +} + +@test "lapi - listen on socket only" { + config_set "del(.api.server.listen_uri)" + + mkdir -p "$sockdir" + + # agent is not able to connect right now + config_disable_agent + ./instance-crowdsec start + + API_KEY=$(cscli bouncers add testbouncer -o raw) + export API_KEY + + # now we can't + + rune -1 cscli lapi status + assert_stderr --partial "connection refused" + + rune -7 bouncer_http '/v1/decisions' + refute_output + refute_stderr + + # here we can + + config_set "$LOCAL_API_CREDENTIALS" ".url=strenv(socket)" + + rune -0 cscli lapi status + + rune -0 bouncer_socket '/v1/decisions' + assert_output 'null' + refute_stderr +} diff --git a/test/bats/30_machines_tls.bats b/test/bats/30_machines_tls.bats index 311293ca70c..6909c89cb1f 100644 --- a/test/bats/30_machines_tls.bats +++ b/test/bats/30_machines_tls.bats @@ -120,7 +120,50 @@ teardown() { rune -0 jq -c '[. | length, .[0].machineId[0:32], .[0].isValidated, .[0].ipAddress, .[0].auth_type]' <(output) assert_output '[1,"localhost@127.0.0.1",true,"127.0.0.1","tls"]' - cscli machines delete localhost@127.0.0.1 + rune -0 cscli machines delete localhost@127.0.0.1 +} + +@test "a machine can still connect with a unix socket, no TLS" { + sock=$(config_get '.api.server.listen_socket') + export sock + + # an agent is a machine too + config_disable_agent + ./instance-crowdsec start + + rune -0 cscli machines add with-socket --auto --force + rune -0 cscli lapi status + + rune -0 cscli machines list -o json + rune -0 jq -c '[. | length, .[0].machineId[0:32], .[0].isValidated, .[0].ipAddress, .[0].auth_type]' <(output) + assert_output '[1,"with-socket",true,"127.0.0.1","password"]' + + # TLS cannot be used with a unix socket + + config_set "${CONFIG_DIR}/local_api_credentials.yaml" ' + .ca_cert_path=strenv(tmpdir) + "/bundle.pem" + ' + + rune -1 cscli lapi status + assert_stderr --partial "loading api client: cannot use TLS with a unix socket" + + config_set "${CONFIG_DIR}/local_api_credentials.yaml" ' + del(.ca_cert_path) | + .key_path=strenv(tmpdir) + "/agent-key.pem" + ' + + rune -1 cscli lapi status + assert_stderr --partial "loading api client: cannot use TLS with a unix socket" + + config_set "${CONFIG_DIR}/local_api_credentials.yaml" ' + del(.key_path) | + .cert_path=strenv(tmpdir) + "/agent.pem" + ' + + rune -1 cscli lapi status + assert_stderr --partial "loading api client: cannot use TLS with a unix socket" + + rune -0 cscli machines delete with-socket } @test "invalid cert for agent" { diff --git a/test/lib/config/config-global b/test/lib/config/config-global index 68346c18875..0caf0591f7d 100755 --- a/test/lib/config/config-global +++ b/test/lib/config/config-global @@ -58,6 +58,7 @@ config_prepare() { # remove trailing slash from CONFIG_DIR # since it's assumed to be missing during the tests yq e -i ' + .api.server.listen_socket="/run/crowdsec.sock" | .config_paths.config_dir |= sub("/$", "") ' "${CONFIG_DIR}/config.yaml" } diff --git a/test/lib/config/config-local b/test/lib/config/config-local index e3b7bc685d4..e5cfaf997be 100755 --- a/test/lib/config/config-local +++ b/test/lib/config/config-local @@ -57,7 +57,6 @@ config_generate() { cp ../config/profiles.yaml \ ../config/simulation.yaml \ - ../config/local_api_credentials.yaml \ ../config/online_api_credentials.yaml \ "${CONFIG_DIR}/" @@ -95,6 +94,7 @@ config_generate() { .db_config.db_path=strenv(DATA_DIR)+"/crowdsec.db" | .db_config.use_wal=true | .api.client.credentials_path=strenv(CONFIG_DIR)+"/local_api_credentials.yaml" | + .api.server.listen_socket=strenv(DATA_DIR)+"/crowdsec.sock" | .api.server.profiles_path=strenv(CONFIG_DIR)+"/profiles.yaml" | .api.server.console_path=strenv(CONFIG_DIR)+"/console.yaml" | del(.api.server.online_client) @@ -119,7 +119,8 @@ make_init_data() { ./bin/preload-hub-items - "$CSCLI" --warning machines add githubciXXXXXXXXXXXXXXXXXXXXXXXX --auto --force + # force TCP, the default would be unix socket + "$CSCLI" --warning machines add githubciXXXXXXXXXXXXXXXXXXXXXXXX --url http://127.0.0.1:8080 --auto --force mkdir -p "$LOCAL_INIT_DIR" From 742f5e8cdab3057ae04f68b445d0f9222bbbea40 Mon Sep 17 00:00:00 2001 From: blotus Date: Thu, 14 Mar 2024 14:00:39 +0100 Subject: [PATCH 060/581] [appsec] delete api key header before processing the request (#2890) --- pkg/appsec/request.go | 1 + 1 file changed, 1 insertion(+) diff --git a/pkg/appsec/request.go b/pkg/appsec/request.go index effb1828307..5b77f2285a3 100644 --- a/pkg/appsec/request.go +++ b/pkg/appsec/request.go @@ -320,6 +320,7 @@ func NewParsedRequestFromRequest(r *http.Request, logger *logrus.Entry) (ParsedR delete(r.Header, URIHeaderName) delete(r.Header, VerbHeaderName) delete(r.Header, UserAgentHeaderName) + delete(r.Header, APIKeyHeaderName) originalHTTPRequest := r.Clone(r.Context()) originalHTTPRequest.Body = io.NopCloser(bytes.NewBuffer(body)) From 06bebdeac78184c771fc077fccd9890e3493af63 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 14 Mar 2024 14:01:09 +0100 Subject: [PATCH 061/581] Bump google.golang.org/protobuf from 1.31.0 to 1.33.0 (#2893) Bumps google.golang.org/protobuf from 1.31.0 to 1.33.0. --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index a2b9c8618c6..22d52be1ca8 100644 --- a/go.mod +++ b/go.mod @@ -86,7 +86,7 @@ require ( golang.org/x/sys v0.15.0 golang.org/x/text v0.14.0 google.golang.org/grpc v1.56.3 - google.golang.org/protobuf v1.31.0 + google.golang.org/protobuf v1.33.0 gopkg.in/natefinch/lumberjack.v2 v2.2.1 gopkg.in/tomb.v2 v2.0.0-20161208151619-d5d1b5820637 gopkg.in/yaml.v2 v2.4.0 diff --git a/go.sum b/go.sum index 08a454cf0f0..07bddaf8967 100644 --- a/go.sum +++ b/go.sum @@ -908,8 +908,8 @@ google.golang.org/grpc v1.56.3 h1:8I4C0Yq1EjstUzUJzpcRVbuYA2mODtEmpWiQoN/b2nc= google.golang.org/grpc v1.56.3/go.mod h1:I9bI3vqKfayGqPUAwGdOSu7kt6oIJLixfffKrpXqQ9s= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8= -google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI= +google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= From 7dd86e2b95d73e70d3799223bf4e33dd0cb18036 Mon Sep 17 00:00:00 2001 From: blotus Date: Thu, 14 Mar 2024 14:02:53 +0100 Subject: [PATCH 062/581] add cron as a suggested package (#2799) --- debian/control | 1 + 1 file changed, 1 insertion(+) diff --git a/debian/control b/debian/control index 4673284e7b4..0ee08b71f85 100644 --- a/debian/control +++ b/debian/control @@ -8,3 +8,4 @@ Package: crowdsec Architecture: any Description: Crowdsec - An open-source, lightweight agent to detect and respond to bad behaviors. It also automatically benefits from our global community-wide IP reputation database Depends: coreutils +Suggests: cron From caca4032d18ab60a3b5c1437c81c29a3792cded9 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Thu, 14 Mar 2024 14:03:43 +0100 Subject: [PATCH 063/581] lapi: log error "can't sinchronize with console" only if papi is enabled (#2896) --- pkg/apiserver/apiserver.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/pkg/apiserver/apiserver.go b/pkg/apiserver/apiserver.go index e42ad9a98fa..7989cfc1d97 100644 --- a/pkg/apiserver/apiserver.go +++ b/pkg/apiserver/apiserver.go @@ -250,8 +250,8 @@ func NewServer(config *csconfig.LocalApiServerCfg) (*APIServer, error) { controller.AlertsAddChan = apiClient.AlertsAddChan - if apiClient.apiClient.IsEnrolled() { - if config.ConsoleConfig.IsPAPIEnabled() { + if config.ConsoleConfig.IsPAPIEnabled() { + if apiClient.apiClient.IsEnrolled() { log.Info("Machine is enrolled in the console, Loading PAPI Client") papiClient, err = NewPAPI(apiClient, dbClient, config.ConsoleConfig, *config.PapiLogLevel) @@ -260,9 +260,9 @@ func NewServer(config *csconfig.LocalApiServerCfg) (*APIServer, error) { } controller.DecisionDeleteChan = papiClient.Channels.DeleteDecisionChannel + } else { + log.Error("Machine is not enrolled in the console, can't synchronize with the console") } - } else { - log.Errorf("Machine is not enrolled in the console, can't synchronize with the console") } } From a6b0e5838086530294adc640ef4fe731c31b6edb Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Thu, 14 Mar 2024 14:04:45 +0100 Subject: [PATCH 064/581] CI: bump github actions (#2895) --- .github/workflows/bats-hub.yml | 4 ++-- .github/workflows/bats-sqlite-coverage.yml | 2 +- .github/workflows/ci-windows-build-msi.yml | 2 +- .github/workflows/ci_release-drafter.yml | 2 +- .github/workflows/codeql-analysis.yml | 2 +- .github/workflows/go-tests-windows.yml | 4 ++-- .github/workflows/go-tests.yml | 4 ++-- .github/workflows/governance-bot.yaml | 2 +- 8 files changed, 11 insertions(+), 11 deletions(-) diff --git a/.github/workflows/bats-hub.yml b/.github/workflows/bats-hub.yml index 075480485ff..816234adf61 100644 --- a/.github/workflows/bats-hub.yml +++ b/.github/workflows/bats-hub.yml @@ -53,7 +53,7 @@ jobs: run: ./test/bin/collect-hub-coverage >> $GITHUB_ENV - name: "Create Parsers badge" - uses: schneegans/dynamic-badges-action@v1.6.0 + uses: schneegans/dynamic-badges-action@v1.7.0 if: ${{ github.ref == 'refs/heads/master' && github.repository_owner == 'crowdsecurity' }} with: auth: ${{ secrets.GIST_BADGES_SECRET }} @@ -64,7 +64,7 @@ jobs: color: ${{ env.SCENARIO_BADGE_COLOR }} - name: "Create Scenarios badge" - uses: schneegans/dynamic-badges-action@v1.6.0 + uses: schneegans/dynamic-badges-action@v1.7.0 if: ${{ github.ref == 'refs/heads/master' && github.repository_owner == 'crowdsecurity' }} with: auth: ${{ secrets.GIST_BADGES_SECRET }} diff --git a/.github/workflows/bats-sqlite-coverage.yml b/.github/workflows/bats-sqlite-coverage.yml index 436eb0f04a4..8e890e0f8b9 100644 --- a/.github/workflows/bats-sqlite-coverage.yml +++ b/.github/workflows/bats-sqlite-coverage.yml @@ -77,7 +77,7 @@ jobs: if: ${{ always() }} - name: Upload crowdsec coverage to codecov - uses: codecov/codecov-action@v3 + uses: codecov/codecov-action@v4 with: files: ./coverage-bats.out flags: bats diff --git a/.github/workflows/ci-windows-build-msi.yml b/.github/workflows/ci-windows-build-msi.yml index 7c6a6621de4..e116e0d2e24 100644 --- a/.github/workflows/ci-windows-build-msi.yml +++ b/.github/workflows/ci-windows-build-msi.yml @@ -40,7 +40,7 @@ jobs: - name: Build run: make windows_installer BUILD_RE2_WASM=1 - name: Upload MSI - uses: actions/upload-artifact@v3 + uses: actions/upload-artifact@v4 with: path: crowdsec*msi name: crowdsec.msi diff --git a/.github/workflows/ci_release-drafter.yml b/.github/workflows/ci_release-drafter.yml index 2ccb6977cfd..0b8c9b386e6 100644 --- a/.github/workflows/ci_release-drafter.yml +++ b/.github/workflows/ci_release-drafter.yml @@ -12,7 +12,7 @@ jobs: runs-on: ubuntu-latest steps: # Drafts your next Release notes as Pull Requests are merged into "master" - - uses: release-drafter/release-drafter@v5 + - uses: release-drafter/release-drafter@v6 with: config-name: release-drafter.yml # (Optional) specify config name to use, relative to .github/. Default: release-drafter.yml diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml index bdc16e650f6..7c4e749ece7 100644 --- a/.github/workflows/codeql-analysis.yml +++ b/.github/workflows/codeql-analysis.yml @@ -68,7 +68,7 @@ jobs: # Autobuild attempts to build any compiled languages (C/C++, C#, or Java). # If this step fails, then you should remove it and run the build manually (see below) # - name: Autobuild - # uses: github/codeql-action/autobuild@v2 + # uses: github/codeql-action/autobuild@v3 # ℹ️ Command-line programs to run using the OS shell. # 📚 https://git.io/JvXDl diff --git a/.github/workflows/go-tests-windows.yml b/.github/workflows/go-tests-windows.yml index efe16ed66d9..9dfcc089a63 100644 --- a/.github/workflows/go-tests-windows.yml +++ b/.github/workflows/go-tests-windows.yml @@ -48,13 +48,13 @@ jobs: cat out.txt | sed 's/ *coverage:.*of statements in.*//' | richgo testfilter - name: Upload unit coverage to Codecov - uses: codecov/codecov-action@v3 + uses: codecov/codecov-action@v4 with: files: coverage.out flags: unit-windows - name: golangci-lint - uses: golangci/golangci-lint-action@v3 + uses: golangci/golangci-lint-action@v4 with: version: v1.55 args: --issues-exit-code=1 --timeout 10m diff --git a/.github/workflows/go-tests.yml b/.github/workflows/go-tests.yml index 865b2782a63..61dadfc0b78 100644 --- a/.github/workflows/go-tests.yml +++ b/.github/workflows/go-tests.yml @@ -149,13 +149,13 @@ jobs: make go-acc | sed 's/ *coverage:.*of statements in.*//' | richgo testfilter - name: Upload unit coverage to Codecov - uses: codecov/codecov-action@v3 + uses: codecov/codecov-action@v4 with: files: coverage.out flags: unit-linux - name: golangci-lint - uses: golangci/golangci-lint-action@v3 + uses: golangci/golangci-lint-action@v4 with: version: v1.55 args: --issues-exit-code=1 --timeout 10m diff --git a/.github/workflows/governance-bot.yaml b/.github/workflows/governance-bot.yaml index 5c08cabf5d1..c9e73e7811a 100644 --- a/.github/workflows/governance-bot.yaml +++ b/.github/workflows/governance-bot.yaml @@ -23,7 +23,7 @@ jobs: runs-on: ubuntu-latest steps: # Semantic versioning, lock to different version: v2, v2.0 or a commit hash. - - uses: BirthdayResearch/oss-governance-bot@v3 + - uses: BirthdayResearch/oss-governance-bot@v4 with: # You can use a PAT to post a comment/label/status so that it shows up as a user instead of github-actions github-token: ${{secrets.GITHUB_TOKEN}} # optional, default to '${{ github.token }}' From e9b0f3c54ed76cafa0a453c1417efa5846c99f5a Mon Sep 17 00:00:00 2001 From: Laurence Jones Date: Thu, 14 Mar 2024 15:36:47 +0000 Subject: [PATCH 065/581] wip: fix unix socket error (#2897) --- pkg/appsec/request.go | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/pkg/appsec/request.go b/pkg/appsec/request.go index 5b77f2285a3..a9eb0d372a4 100644 --- a/pkg/appsec/request.go +++ b/pkg/appsec/request.go @@ -342,6 +342,10 @@ func NewParsedRequestFromRequest(r *http.Request, logger *logrus.Entry) (ParsedR } var remoteAddrNormalized string + if r.RemoteAddr == "@" { + r.RemoteAddr = "127.0.0.1:65535" + } + // TODO we need to implement forwrded headers host, _, err := net.SplitHostPort(r.RemoteAddr) if err != nil { log.Errorf("Invalid appsec remote IP source %v: %s", r.RemoteAddr, err.Error()) From fd2bb8927cc8288bb6494f945abcdd67883f0905 Mon Sep 17 00:00:00 2001 From: Manuel Sabban Date: Fri, 15 Mar 2024 14:36:34 +0100 Subject: [PATCH 066/581] Fix rpm build (#2894) * fix rpm build --- rpm/SOURCES/crowdsec.unit.patch | 15 --------------- rpm/SPECS/crowdsec.spec | 6 ++---- 2 files changed, 2 insertions(+), 19 deletions(-) delete mode 100644 rpm/SOURCES/crowdsec.unit.patch diff --git a/rpm/SOURCES/crowdsec.unit.patch b/rpm/SOURCES/crowdsec.unit.patch deleted file mode 100644 index 5173beb2710..00000000000 --- a/rpm/SOURCES/crowdsec.unit.patch +++ /dev/null @@ -1,15 +0,0 @@ ---- config/crowdsec.service-orig 2022-03-24 09:46:16.581681532 +0000 -+++ config/crowdsec.service 2022-03-24 09:46:28.761681532 +0000 -@@ -5,8 +5,8 @@ - [Service] - Type=notify - Environment=LC_ALL=C LANG=C --ExecStartPre=/usr/local/bin/crowdsec -c /etc/crowdsec/config.yaml -t -error --ExecStart=/usr/local/bin/crowdsec -c /etc/crowdsec/config.yaml -+ExecStartPre=/usr/bin/crowdsec -c /etc/crowdsec/config.yaml -t -error -+ExecStart=/usr/bin/crowdsec -c /etc/crowdsec/config.yaml - #ExecStartPost=/bin/sleep 0.1 --ExecReload=/usr/local/bin/crowdsec -c /etc/crowdsec/config.yaml -t -error -+ExecReload=/usr/bin/crowdsec -c /etc/crowdsec/config.yaml -t -error - ExecReload=/bin/kill -HUP $MAINPID - Restart=always diff --git a/rpm/SPECS/crowdsec.spec b/rpm/SPECS/crowdsec.spec index f14df932590..60884dfd4e6 100644 --- a/rpm/SPECS/crowdsec.spec +++ b/rpm/SPECS/crowdsec.spec @@ -8,8 +8,7 @@ License: MIT URL: https://crowdsec.net Source0: https://github.com/crowdsecurity/%{name}/archive/v%(echo $VERSION).tar.gz Source1: 80-%{name}.preset -Patch0: crowdsec.unit.patch -Patch1: user.patch +Patch0: user.patch BuildRoot: %{_tmppath}/%{name}-%{version}-%{release}-root-%(%{__id_u} -n) BuildRequires: systemd @@ -32,7 +31,6 @@ Requires: crontabs %setup -q -T -b 0 %patch0 -%patch1 %build sed -i "s#/usr/local/lib/crowdsec/plugins/#%{_libdir}/%{name}/plugins/#g" config/config.yaml @@ -53,7 +51,7 @@ mkdir -p %{buildroot}%{_libdir}/%{name}/plugins/ install -m 755 -D cmd/crowdsec/crowdsec %{buildroot}%{_bindir}/%{name} install -m 755 -D cmd/crowdsec-cli/cscli %{buildroot}%{_bindir}/cscli install -m 755 -D wizard.sh %{buildroot}/usr/share/crowdsec/wizard.sh -install -m 644 -D config/crowdsec.service %{buildroot}%{_unitdir}/%{name}.service +install -m 644 -D debian/crowdsec.service %{buildroot}%{_unitdir}/%{name}.service install -m 644 -D config/patterns/* -t %{buildroot}%{_sysconfdir}/crowdsec/patterns install -m 600 -D config/config.yaml %{buildroot}%{_sysconfdir}/crowdsec install -m 644 -D config/simulation.yaml %{buildroot}%{_sysconfdir}/crowdsec From 2f490881637c28325e7567eb2f840a921137e921 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Mon, 18 Mar 2024 11:25:45 +0100 Subject: [PATCH 067/581] file acquisition: don't bubble error when tailed file disappears (#2903) * file acquisition: don't bubble error when tailed file disappears * don't call t.Kill() * lint (whitespace) --- pkg/acquisition/modules/file/file.go | 119 +++++++++++++++++++++------ 1 file changed, 96 insertions(+), 23 deletions(-) diff --git a/pkg/acquisition/modules/file/file.go b/pkg/acquisition/modules/file/file.go index efc89715296..ee3c844a556 100644 --- a/pkg/acquisition/modules/file/file.go +++ b/pkg/acquisition/modules/file/file.go @@ -3,6 +3,7 @@ package fileacquisition import ( "bufio" "compress/gzip" + "errors" "fmt" "io" "net/url" @@ -16,7 +17,6 @@ import ( "github.com/fsnotify/fsnotify" "github.com/nxadm/tail" - "github.com/pkg/errors" "github.com/prometheus/client_golang/prometheus" log "github.com/sirupsen/logrus" "gopkg.in/tomb.v2" @@ -63,6 +63,7 @@ func (f *FileSource) GetUuid() string { func (f *FileSource) UnmarshalConfig(yamlConfig []byte) error { f.config = FileConfiguration{} + err := yaml.UnmarshalStrict(yamlConfig, &f.config) if err != nil { return fmt.Errorf("cannot parse FileAcquisition configuration: %w", err) @@ -77,7 +78,7 @@ func (f *FileSource) UnmarshalConfig(yamlConfig []byte) error { } if len(f.config.Filenames) == 0 { - return fmt.Errorf("no filename or filenames configuration provided") + return errors.New("no filename or filenames configuration provided") } if f.config.Mode == "" { @@ -93,6 +94,7 @@ func (f *FileSource) UnmarshalConfig(yamlConfig []byte) error { if err != nil { return fmt.Errorf("could not compile regexp %s: %w", exclude, err) } + f.exclude_regexps = append(f.exclude_regexps, re) } @@ -123,56 +125,68 @@ func (f *FileSource) Configure(yamlConfig []byte, logger *log.Entry, MetricsLeve if f.config.ForceInotify { directory := filepath.Dir(pattern) f.logger.Infof("Force add watch on %s", directory) + if !f.watchedDirectories[directory] { err = f.watcher.Add(directory) if err != nil { f.logger.Errorf("Could not create watch on directory %s : %s", directory, err) continue } + f.watchedDirectories[directory] = true } } + files, err := filepath.Glob(pattern) if err != nil { return fmt.Errorf("glob failure: %w", err) } + if len(files) == 0 { f.logger.Warnf("No matching files for pattern %s", pattern) continue } - for _, file := range files { - //check if file is excluded + for _, file := range files { + // check if file is excluded excluded := false + for _, pattern := range f.exclude_regexps { if pattern.MatchString(file) { excluded = true + f.logger.Infof("Skipping file %s as it matches exclude pattern %s", file, pattern) + break } } + if excluded { continue } - if files[0] != pattern && f.config.Mode == configuration.TAIL_MODE { //we have a glob pattern + + if files[0] != pattern && f.config.Mode == configuration.TAIL_MODE { // we have a glob pattern directory := filepath.Dir(file) f.logger.Debugf("Will add watch to directory: %s", directory) - if !f.watchedDirectories[directory] { + if !f.watchedDirectories[directory] { err = f.watcher.Add(directory) if err != nil { f.logger.Errorf("Could not create watch on directory %s : %s", directory, err) continue } + f.watchedDirectories[directory] = true } else { f.logger.Debugf("Watch for directory %s already exists", directory) } } + f.logger.Infof("Adding file %s to datasources", file) f.files = append(f.files, file) } } + return nil } @@ -189,7 +203,7 @@ func (f *FileSource) ConfigureByDSN(dsn string, labels map[string]string, logger args := strings.Split(dsn, "?") if len(args[0]) == 0 { - return fmt.Errorf("empty file:// DSN") + return errors.New("empty file:// DSN") } if len(args) == 2 && len(args[1]) != 0 { @@ -197,25 +211,30 @@ func (f *FileSource) ConfigureByDSN(dsn string, labels map[string]string, logger if err != nil { return fmt.Errorf("could not parse file args: %w", err) } + for key, value := range params { switch key { case "log_level": if len(value) != 1 { return errors.New("expected zero or one value for 'log_level'") } + lvl, err := log.ParseLevel(value[0]) if err != nil { return fmt.Errorf("unknown level %s: %w", value[0], err) } + f.logger.Logger.SetLevel(lvl) case "max_buffer_size": if len(value) != 1 { return errors.New("expected zero or one value for 'max_buffer_size'") } + maxBufferSize, err := strconv.Atoi(value[0]) if err != nil { return fmt.Errorf("could not parse max_buffer_size %s: %w", value[0], err) } + f.config.MaxBufferSize = maxBufferSize default: return fmt.Errorf("unknown parameter %s", key) @@ -228,6 +247,7 @@ func (f *FileSource) ConfigureByDSN(dsn string, labels map[string]string, logger f.config.UniqueId = uuid f.logger.Debugf("Will try pattern %s", args[0]) + files, err := filepath.Glob(args[0]) if err != nil { return fmt.Errorf("glob failure: %w", err) @@ -245,6 +265,7 @@ func (f *FileSource) ConfigureByDSN(dsn string, labels map[string]string, logger f.logger.Infof("Adding file %s to filelist", file) f.files = append(f.files, file) } + return nil } @@ -260,22 +281,26 @@ func (f *FileSource) SupportedModes() []string { // OneShotAcquisition reads a set of file and returns when done func (f *FileSource) OneShotAcquisition(out chan types.Event, t *tomb.Tomb) error { f.logger.Debug("In oneshot") + for _, file := range f.files { fi, err := os.Stat(file) if err != nil { return fmt.Errorf("could not stat file %s : %w", file, err) } + if fi.IsDir() { f.logger.Warnf("%s is a directory, ignoring it.", file) continue } + f.logger.Infof("reading %s at once", file) + err = f.readFile(file, out, t) if err != nil { return err } - } + return nil } @@ -300,27 +325,33 @@ func (f *FileSource) StreamingAcquisition(out chan types.Event, t *tomb.Tomb) er t.Go(func() error { return f.monitorNewFiles(out, t) }) + for _, file := range f.files { - //before opening the file, check if we need to specifically avoid it. (XXX) + // before opening the file, check if we need to specifically avoid it. (XXX) skip := false + for _, pattern := range f.exclude_regexps { if pattern.MatchString(file) { f.logger.Infof("file %s matches exclusion pattern %s, skipping", file, pattern.String()) + skip = true + break } } + if skip { continue } - //cf. https://github.com/crowdsecurity/crowdsec/issues/1168 - //do not rely on stat, reclose file immediately as it's opened by Tail + // cf. https://github.com/crowdsecurity/crowdsec/issues/1168 + // do not rely on stat, reclose file immediately as it's opened by Tail fd, err := os.Open(file) if err != nil { f.logger.Errorf("unable to read %s : %s", file, err) continue } + if err := fd.Close(); err != nil { f.logger.Errorf("unable to close %s : %s", file, err) continue @@ -330,6 +361,7 @@ func (f *FileSource) StreamingAcquisition(out chan types.Event, t *tomb.Tomb) er if err != nil { return fmt.Errorf("could not stat file %s : %w", file, err) } + if fi.IsDir() { f.logger.Warnf("%s is a directory, ignoring it.", file) continue @@ -343,9 +375,12 @@ func (f *FileSource) StreamingAcquisition(out chan types.Event, t *tomb.Tomb) er if err != nil { f.logger.Warningf("Could not get fs type for %s : %s", file, err) } + f.logger.Debugf("fs for %s is network: %t (%s)", file, networkFS, fsType) + if networkFS { f.logger.Warnf("Disabling inotify poll on %s as it is on a network share. You can manually set poll_without_inotify to true to make this message disappear, or to false to enforce inotify poll", file) + inotifyPoll = false } } @@ -355,6 +390,7 @@ func (f *FileSource) StreamingAcquisition(out chan types.Event, t *tomb.Tomb) er f.logger.Errorf("Could not start tailing file %s : %s", file, err) continue } + f.tailMapMutex.Lock() f.tails[file] = true f.tailMapMutex.Unlock() @@ -363,6 +399,7 @@ func (f *FileSource) StreamingAcquisition(out chan types.Event, t *tomb.Tomb) er return f.tailFile(out, t, tail) }) } + return nil } @@ -372,6 +409,7 @@ func (f *FileSource) Dump() interface{} { func (f *FileSource) monitorNewFiles(out chan types.Event, t *tomb.Tomb) error { logger := f.logger.WithField("goroutine", "inotify") + for { select { case event, ok := <-f.watcher.Events: @@ -385,36 +423,47 @@ func (f *FileSource) monitorNewFiles(out chan types.Event, t *tomb.Tomb) error { logger.Errorf("Could not stat() new file %s, ignoring it : %s", event.Name, err) continue } + if fi.IsDir() { continue } + logger.Debugf("Detected new file %s", event.Name) + matched := false + for _, pattern := range f.config.Filenames { logger.Debugf("Matching %s with %s", pattern, event.Name) + matched, err = filepath.Match(pattern, event.Name) if err != nil { logger.Errorf("Could not match pattern : %s", err) continue } + if matched { logger.Debugf("Matched %s with %s", pattern, event.Name) break } } + if !matched { continue } - //before opening the file, check if we need to specifically avoid it. (XXX) + // before opening the file, check if we need to specifically avoid it. (XXX) skip := false + for _, pattern := range f.exclude_regexps { if pattern.MatchString(event.Name) { f.logger.Infof("file %s matches exclusion pattern %s, skipping", event.Name, pattern.String()) + skip = true + break } } + if skip { continue } @@ -422,13 +471,14 @@ func (f *FileSource) monitorNewFiles(out chan types.Event, t *tomb.Tomb) error { f.tailMapMutex.RLock() if f.tails[event.Name] { f.tailMapMutex.RUnlock() - //we already have a tail on it, do not start a new one + // we already have a tail on it, do not start a new one logger.Debugf("Already tailing file %s, not creating a new tail", event.Name) + break } f.tailMapMutex.RUnlock() - //cf. https://github.com/crowdsecurity/crowdsec/issues/1168 - //do not rely on stat, reclose file immediately as it's opened by Tail + // cf. https://github.com/crowdsecurity/crowdsec/issues/1168 + // do not rely on stat, reclose file immediately as it's opened by Tail fd, err := os.Open(event.Name) if err != nil { f.logger.Errorf("unable to read %s : %s", event.Name, err) @@ -450,19 +500,22 @@ func (f *FileSource) monitorNewFiles(out chan types.Event, t *tomb.Tomb) error { if err != nil { f.logger.Warningf("Could not get fs type for %s : %s", event.Name, err) } + f.logger.Debugf("fs for %s is network: %t (%s)", event.Name, networkFS, fsType) + if networkFS { inotifyPoll = false } } } - //Slightly different parameters for Location, as we want to read the first lines of the newly created file + // Slightly different parameters for Location, as we want to read the first lines of the newly created file tail, err := tail.TailFile(event.Name, tail.Config{ReOpen: true, Follow: true, Poll: inotifyPoll, Location: &tail.SeekInfo{Offset: 0, Whence: io.SeekStart}}) if err != nil { logger.Errorf("Could not start tailing file %s : %s", event.Name, err) break } + f.tailMapMutex.Lock() f.tails[event.Name] = true f.tailMapMutex.Unlock() @@ -475,12 +528,14 @@ func (f *FileSource) monitorNewFiles(out chan types.Event, t *tomb.Tomb) error { if !ok { return nil } + logger.Errorf("Error while monitoring folder: %s", err) case <-t.Dying(): err := f.watcher.Close() if err != nil { return fmt.Errorf("could not remove all inotify watches: %w", err) } + return nil } } @@ -489,39 +544,47 @@ func (f *FileSource) monitorNewFiles(out chan types.Event, t *tomb.Tomb) error { func (f *FileSource) tailFile(out chan types.Event, t *tomb.Tomb, tail *tail.Tail) error { logger := f.logger.WithField("tail", tail.Filename) logger.Debugf("-> Starting tail of %s", tail.Filename) + for { select { case <-t.Dying(): logger.Infof("File datasource %s stopping", tail.Filename) + if err := tail.Stop(); err != nil { f.logger.Errorf("error in stop : %s", err) return err } + return nil - case <-tail.Dying(): //our tailer is dying + case <-tail.Dying(): // our tailer is dying err := tail.Err() errMsg := fmt.Sprintf("file reader of %s died", tail.Filename) if err != nil { errMsg = fmt.Sprintf(errMsg+" : %s", err) } + logger.Warningf(errMsg) - t.Kill(fmt.Errorf(errMsg)) - return fmt.Errorf(errMsg) + + return nil case line := <-tail.Lines: if line == nil { logger.Warningf("tail for %s is empty", tail.Filename) continue } + if line.Err != nil { logger.Warningf("fetch error : %v", line.Err) return line.Err } - if line.Text == "" { //skip empty lines + + if line.Text == "" { // skip empty lines continue } + if f.metricsLevel != configuration.METRICS_NONE { linesRead.With(prometheus.Labels{"source": tail.Filename}).Inc() } + src := tail.Filename if f.metricsLevel == configuration.METRICS_AGGREGATE { src = filepath.Base(tail.Filename) @@ -535,7 +598,7 @@ func (f *FileSource) tailFile(out chan types.Event, t *tomb.Tomb, tail *tail.Tai Process: true, Module: f.GetName(), } - //we're tailing, it must be real time logs + // we're tailing, it must be real time logs logger.Debugf("pushing %+v", l) expectMode := types.LIVE @@ -549,12 +612,14 @@ func (f *FileSource) tailFile(out chan types.Event, t *tomb.Tomb, tail *tail.Tai func (f *FileSource) readFile(filename string, out chan types.Event, t *tomb.Tomb) error { var scanner *bufio.Scanner + logger := f.logger.WithField("oneshot", filename) fd, err := os.Open(filename) if err != nil { return fmt.Errorf("failed opening %s: %w", filename, err) } + defer fd.Close() if strings.HasSuffix(filename, ".gz") { @@ -563,17 +628,20 @@ func (f *FileSource) readFile(filename string, out chan types.Event, t *tomb.Tom logger.Errorf("Failed to read gz file: %s", err) return fmt.Errorf("failed to read gz %s: %w", filename, err) } + defer gz.Close() scanner = bufio.NewScanner(gz) - } else { scanner = bufio.NewScanner(fd) } + scanner.Split(bufio.ScanLines) + if f.config.MaxBufferSize > 0 { buf := make([]byte, 0, 64*1024) scanner.Buffer(buf, f.config.MaxBufferSize) } + for scanner.Scan() { select { case <-t.Dying(): @@ -583,6 +651,7 @@ func (f *FileSource) readFile(filename string, out chan types.Event, t *tomb.Tom if scanner.Text() == "" { continue } + l := types.Line{ Raw: scanner.Text(), Time: time.Now().UTC(), @@ -594,15 +663,19 @@ func (f *FileSource) readFile(filename string, out chan types.Event, t *tomb.Tom logger.Debugf("line %s", l.Raw) linesRead.With(prometheus.Labels{"source": filename}).Inc() - //we're reading logs at once, it must be time-machine buckets + // we're reading logs at once, it must be time-machine buckets out <- types.Event{Line: l, Process: true, Type: types.LOG, ExpectMode: types.TIMEMACHINE} } } + if err := scanner.Err(); err != nil { logger.Errorf("Error while reading file: %s", err) t.Kill(err) + return err } + t.Kill(nil) + return nil } From b411782648a958fab35d307fda27803b9751a8f6 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Tue, 19 Mar 2024 10:03:54 +0100 Subject: [PATCH 068/581] CI: use go 1.21.8 (#2906) --- .github/workflows/bats-hub.yml | 2 +- .github/workflows/bats-mysql.yml | 2 +- .github/workflows/bats-postgres.yml | 2 +- .github/workflows/bats-sqlite-coverage.yml | 2 +- .github/workflows/ci-windows-build-msi.yml | 2 +- .github/workflows/codeql-analysis.yml | 2 +- .github/workflows/go-tests-windows.yml | 2 +- .github/workflows/go-tests.yml | 2 +- .github/workflows/publish-tarball-release.yml | 2 +- Dockerfile | 2 +- Dockerfile.debian | 2 +- azure-pipelines.yml | 2 +- 12 files changed, 12 insertions(+), 12 deletions(-) diff --git a/.github/workflows/bats-hub.yml b/.github/workflows/bats-hub.yml index 816234adf61..bd84389b011 100644 --- a/.github/workflows/bats-hub.yml +++ b/.github/workflows/bats-hub.yml @@ -33,7 +33,7 @@ jobs: - name: "Set up Go" uses: actions/setup-go@v5 with: - go-version: "1.21.7" + go-version: "1.21.8" - name: "Install bats dependencies" env: diff --git a/.github/workflows/bats-mysql.yml b/.github/workflows/bats-mysql.yml index 5c019933304..cc90961bfd6 100644 --- a/.github/workflows/bats-mysql.yml +++ b/.github/workflows/bats-mysql.yml @@ -36,7 +36,7 @@ jobs: - name: "Set up Go" uses: actions/setup-go@v5 with: - go-version: "1.21.7" + go-version: "1.21.8" - name: "Install bats dependencies" env: diff --git a/.github/workflows/bats-postgres.yml b/.github/workflows/bats-postgres.yml index 0f3c69ccefa..18cbb50a12e 100644 --- a/.github/workflows/bats-postgres.yml +++ b/.github/workflows/bats-postgres.yml @@ -45,7 +45,7 @@ jobs: - name: "Set up Go" uses: actions/setup-go@v5 with: - go-version: "1.21.7" + go-version: "1.21.8" - name: "Install bats dependencies" env: diff --git a/.github/workflows/bats-sqlite-coverage.yml b/.github/workflows/bats-sqlite-coverage.yml index 8e890e0f8b9..6780727e968 100644 --- a/.github/workflows/bats-sqlite-coverage.yml +++ b/.github/workflows/bats-sqlite-coverage.yml @@ -28,7 +28,7 @@ jobs: - name: "Set up Go" uses: actions/setup-go@v5 with: - go-version: "1.21.7" + go-version: "1.21.8" - name: "Install bats dependencies" env: diff --git a/.github/workflows/ci-windows-build-msi.yml b/.github/workflows/ci-windows-build-msi.yml index e116e0d2e24..ae3a475774d 100644 --- a/.github/workflows/ci-windows-build-msi.yml +++ b/.github/workflows/ci-windows-build-msi.yml @@ -35,7 +35,7 @@ jobs: - name: "Set up Go" uses: actions/setup-go@v5 with: - go-version: "1.21.7" + go-version: "1.21.8" - name: Build run: make windows_installer BUILD_RE2_WASM=1 diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml index 7c4e749ece7..68c95ed6446 100644 --- a/.github/workflows/codeql-analysis.yml +++ b/.github/workflows/codeql-analysis.yml @@ -52,7 +52,7 @@ jobs: - name: "Set up Go" uses: actions/setup-go@v5 with: - go-version: "1.21.7" + go-version: "1.21.8" cache-dependency-path: "**/go.sum" # Initializes the CodeQL tools for scanning. diff --git a/.github/workflows/go-tests-windows.yml b/.github/workflows/go-tests-windows.yml index 9dfcc089a63..6629250c592 100644 --- a/.github/workflows/go-tests-windows.yml +++ b/.github/workflows/go-tests-windows.yml @@ -34,7 +34,7 @@ jobs: - name: "Set up Go" uses: actions/setup-go@v5 with: - go-version: "1.21.7" + go-version: "1.21.8" - name: Build run: | diff --git a/.github/workflows/go-tests.yml b/.github/workflows/go-tests.yml index 61dadfc0b78..e0603e0dec3 100644 --- a/.github/workflows/go-tests.yml +++ b/.github/workflows/go-tests.yml @@ -126,7 +126,7 @@ jobs: - name: "Set up Go" uses: actions/setup-go@v5 with: - go-version: "1.21.7" + go-version: "1.21.8" - name: Create localstack streams run: | diff --git a/.github/workflows/publish-tarball-release.yml b/.github/workflows/publish-tarball-release.yml index d251677fd46..6fd582da853 100644 --- a/.github/workflows/publish-tarball-release.yml +++ b/.github/workflows/publish-tarball-release.yml @@ -25,7 +25,7 @@ jobs: - name: "Set up Go" uses: actions/setup-go@v5 with: - go-version: "1.21.7" + go-version: "1.21.8" - name: Build the binaries run: | diff --git a/Dockerfile b/Dockerfile index 420c521fa58..ed1ac5e28c2 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,5 +1,5 @@ # vim: set ft=dockerfile: -FROM golang:1.21.7-alpine3.18 AS build +FROM golang:1.21.8-alpine3.18 AS build ARG BUILD_VERSION diff --git a/Dockerfile.debian b/Dockerfile.debian index 48753e7acdb..fd40bd475bb 100644 --- a/Dockerfile.debian +++ b/Dockerfile.debian @@ -1,5 +1,5 @@ # vim: set ft=dockerfile: -FROM golang:1.21.7-bookworm AS build +FROM golang:1.21.8-bookworm AS build ARG BUILD_VERSION diff --git a/azure-pipelines.yml b/azure-pipelines.yml index 791f41f50ba..6151fe22274 100644 --- a/azure-pipelines.yml +++ b/azure-pipelines.yml @@ -27,7 +27,7 @@ stages: - task: GoTool@0 displayName: "Install Go" inputs: - version: '1.21.7' + version: '1.21.8' - pwsh: | choco install -y make From 6de62a1468fef07a74690b878e8d5a451df9f022 Mon Sep 17 00:00:00 2001 From: blotus Date: Tue, 19 Mar 2024 10:22:43 +0100 Subject: [PATCH 069/581] warn if user is using inotify to tail a symlink (#2881) --- pkg/acquisition/modules/file/file.go | 61 +++++++++++++++++----------- 1 file changed, 38 insertions(+), 23 deletions(-) diff --git a/pkg/acquisition/modules/file/file.go b/pkg/acquisition/modules/file/file.go index ee3c844a556..c36672507db 100644 --- a/pkg/acquisition/modules/file/file.go +++ b/pkg/acquisition/modules/file/file.go @@ -367,9 +367,9 @@ func (f *FileSource) StreamingAcquisition(out chan types.Event, t *tomb.Tomb) er continue } - inotifyPoll := true + pollFile := false if f.config.PollWithoutInotify != nil { - inotifyPoll = *f.config.PollWithoutInotify + pollFile = *f.config.PollWithoutInotify } else { networkFS, fsType, err := types.IsNetworkFS(file) if err != nil { @@ -379,13 +379,23 @@ func (f *FileSource) StreamingAcquisition(out chan types.Event, t *tomb.Tomb) er f.logger.Debugf("fs for %s is network: %t (%s)", file, networkFS, fsType) if networkFS { - f.logger.Warnf("Disabling inotify poll on %s as it is on a network share. You can manually set poll_without_inotify to true to make this message disappear, or to false to enforce inotify poll", file) - - inotifyPoll = false + f.logger.Warnf("Disabling inotify polling on %s as it is on a network share. You can manually set poll_without_inotify to true to make this message disappear, or to false to enforce inotify poll", file) + pollFile = true } } - tail, err := tail.TailFile(file, tail.Config{ReOpen: true, Follow: true, Poll: inotifyPoll, Location: &tail.SeekInfo{Offset: 0, Whence: io.SeekEnd}, Logger: log.NewEntry(log.StandardLogger())}) + filink, err := os.Lstat(file) + + if err != nil { + f.logger.Errorf("Could not lstat() new file %s, ignoring it : %s", file, err) + continue + } + + if filink.Mode()&os.ModeSymlink == os.ModeSymlink && !pollFile { + f.logger.Warnf("File %s is a symlink, but inotify polling is enabled. Crowdsec will not be able to detect rotation. Consider setting poll_without_inotify to true in your configuration", file) + } + + tail, err := tail.TailFile(file, tail.Config{ReOpen: true, Follow: true, Poll: pollFile, Location: &tail.SeekInfo{Offset: 0, Whence: io.SeekEnd}, Logger: log.NewEntry(log.StandardLogger())}) if err != nil { f.logger.Errorf("Could not start tailing file %s : %s", file, err) continue @@ -489,28 +499,33 @@ func (f *FileSource) monitorNewFiles(out chan types.Event, t *tomb.Tomb) error { continue } - inotifyPoll := true + pollFile := false if f.config.PollWithoutInotify != nil { - inotifyPoll = *f.config.PollWithoutInotify + pollFile = *f.config.PollWithoutInotify } else { - if f.config.PollWithoutInotify != nil { - inotifyPoll = *f.config.PollWithoutInotify - } else { - networkFS, fsType, err := types.IsNetworkFS(event.Name) - if err != nil { - f.logger.Warningf("Could not get fs type for %s : %s", event.Name, err) - } - - f.logger.Debugf("fs for %s is network: %t (%s)", event.Name, networkFS, fsType) - - if networkFS { - inotifyPoll = false - } + networkFS, fsType, err := types.IsNetworkFS(event.Name) + if err != nil { + f.logger.Warningf("Could not get fs type for %s : %s", event.Name, err) } + f.logger.Debugf("fs for %s is network: %t (%s)", event.Name, networkFS, fsType) + if networkFS { + pollFile = true + } + } + + filink, err := os.Lstat(event.Name) + + if err != nil { + logger.Errorf("Could not lstat() new file %s, ignoring it : %s", event.Name, err) + continue + } + + if filink.Mode()&os.ModeSymlink == os.ModeSymlink && !pollFile { + logger.Warnf("File %s is a symlink, but inotify polling is enabled. Crowdsec will not be able to detect rotation. Consider setting poll_without_inotify to true in your configuration", event.Name) } - // Slightly different parameters for Location, as we want to read the first lines of the newly created file - tail, err := tail.TailFile(event.Name, tail.Config{ReOpen: true, Follow: true, Poll: inotifyPoll, Location: &tail.SeekInfo{Offset: 0, Whence: io.SeekStart}}) + //Slightly different parameters for Location, as we want to read the first lines of the newly created file + tail, err := tail.TailFile(event.Name, tail.Config{ReOpen: true, Follow: true, Poll: pollFile, Location: &tail.SeekInfo{Offset: 0, Whence: io.SeekStart}}) if err != nil { logger.Errorf("Could not start tailing file %s : %s", event.Name, err) break From b63e64ee9f164531ab9ba98ead10a76d21b87c1e Mon Sep 17 00:00:00 2001 From: "Thibault \"bui\" Koechlin" Date: Tue, 19 Mar 2024 10:29:16 +0100 Subject: [PATCH 070/581] Fix locking logic for HA + add list unsubscribe for PAPI (#2904) * add list unsubscribe operation for papi * fix the locking logic for HA --- pkg/apiserver/apic.go | 13 ++++++++----- pkg/apiserver/papi_cmd.go | 37 +++++++++++++++++++++++++++++++++---- pkg/database/lock.go | 26 +++++++++++++++++++------- 3 files changed, 60 insertions(+), 16 deletions(-) diff --git a/pkg/apiserver/apic.go b/pkg/apiserver/apic.go index f57ae685e45..2136edc8b8e 100644 --- a/pkg/apiserver/apic.go +++ b/pkg/apiserver/apic.go @@ -639,6 +639,14 @@ func (a *apic) PullTop(forcePull bool) error { return nil } + /*defer lock release*/ + defer func() { + log.Debug("Releasing lock for pullCAPI") + if err := a.dbClient.ReleasePullCAPILock(); err != nil { + log.Errorf("while releasing lock: %v", err) + } + }() + log.Infof("Starting community-blocklist update") data, _, err := a.apiClient.Decisions.GetStreamV3(context.Background(), apiclient.DecisionsStreamOpts{Startup: a.startup}) @@ -690,11 +698,6 @@ func (a *apic) PullTop(forcePull bool) error { return fmt.Errorf("while updating blocklists: %w", err) } - log.Debug("Releasing lock for pullCAPI") - if err := a.dbClient.ReleasePullCAPILock(); err != nil { - return fmt.Errorf("while releasing lock: %w", err) - } - return nil } diff --git a/pkg/apiserver/papi_cmd.go b/pkg/apiserver/papi_cmd.go index ba02034882c..fb76223b9a0 100644 --- a/pkg/apiserver/papi_cmd.go +++ b/pkg/apiserver/papi_cmd.go @@ -37,6 +37,10 @@ type forcePull struct { Blocklist *blocklistLink `json:"blocklist,omitempty"` } +type listUnsubscribe struct { + Name string `json:"name"` +} + func DecisionCmd(message *Message, p *Papi, sync bool) error { switch message.Header.OperationCmd { case "delete": @@ -163,13 +167,38 @@ func AlertCmd(message *Message, p *Papi, sync bool) error { func ManagementCmd(message *Message, p *Papi, sync bool) error { if sync { - log.Infof("Ignoring management command from PAPI in sync mode") + p.Logger.Infof("Ignoring management command from PAPI in sync mode") return nil } switch message.Header.OperationCmd { + + case "blocklist_unsubscribe": + data, err := json.Marshal(message.Data) + if err != nil { + return err + } + unsubscribeMsg := listUnsubscribe{} + if err := json.Unmarshal(data, &unsubscribeMsg); err != nil { + return fmt.Errorf("message for '%s' contains bad data format: %s", message.Header.OperationType, err) + } + if unsubscribeMsg.Name == "" { + return fmt.Errorf("message for '%s' contains bad data format: missing blocklist name", message.Header.OperationType) + } + p.Logger.Infof("Received blocklist_unsubscribe command from PAPI, unsubscribing from blocklist %s", unsubscribeMsg.Name) + + filter := make(map[string][]string) + filter["origin"] = []string{types.ListOrigin} + filter["scenario"] = []string{unsubscribeMsg.Name} + + _, deletedDecisions, err := p.DBClient.SoftDeleteDecisionsWithFilter(filter) + if err != nil { + return fmt.Errorf("unable to delete decisions for list %s : %w", unsubscribeMsg.Name, err) + } + p.Logger.Infof("deleted %d decisions for list %s", len(deletedDecisions), unsubscribeMsg.Name) + case "reauth": - log.Infof("Received reauth command from PAPI, resetting token") + p.Logger.Infof("Received reauth command from PAPI, resetting token") p.apiClient.GetClient().Transport.(*apiclient.JWTTransport).ResetToken() case "force_pull": data, err := json.Marshal(message.Data) @@ -182,13 +211,13 @@ func ManagementCmd(message *Message, p *Papi, sync bool) error { } if forcePullMsg.Blocklist == nil { - log.Infof("Received force_pull command from PAPI, pulling community and 3rd-party blocklists") + p.Logger.Infof("Received force_pull command from PAPI, pulling community and 3rd-party blocklists") err = p.apic.PullTop(true) if err != nil { return fmt.Errorf("failed to force pull operation: %s", err) } } else { - log.Infof("Received force_pull command from PAPI, pulling blocklist %s", forcePullMsg.Blocklist.Name) + p.Logger.Infof("Received force_pull command from PAPI, pulling blocklist %s", forcePullMsg.Blocklist.Name) err = p.apic.PullBlocklist(&modelscapi.BlocklistLink{ Name: &forcePullMsg.Blocklist.Name, URL: &forcePullMsg.Blocklist.Url, diff --git a/pkg/database/lock.go b/pkg/database/lock.go index 339226e8592..d25b71870f0 100644 --- a/pkg/database/lock.go +++ b/pkg/database/lock.go @@ -12,10 +12,12 @@ import ( ) const ( - CAPIPullLockTimeout = 120 + CAPIPullLockTimeout = 10 + CapiPullLockName = "pullCAPI" ) func (c *Client) AcquireLock(name string) error { + log.Debugf("acquiring lock %s", name) _, err := c.Ent.Lock.Create(). SetName(name). SetCreatedAt(types.UtcNow()). @@ -30,6 +32,7 @@ func (c *Client) AcquireLock(name string) error { } func (c *Client) ReleaseLock(name string) error { + log.Debugf("releasing lock %s", name) _, err := c.Ent.Lock.Delete().Where(lock.NameEQ(name)).Exec(c.CTX) if err != nil { return errors.Wrapf(DeleteFail, "delete lock: %s", err) @@ -38,11 +41,12 @@ func (c *Client) ReleaseLock(name string) error { } func (c *Client) ReleaseLockWithTimeout(name string, timeout int) error { - log.Debugf("(%s) releasing orphin locks", name) + log.Debugf("releasing lock %s with timeout of %d minutes", name, timeout) _, err := c.Ent.Lock.Delete().Where( lock.NameEQ(name), - lock.CreatedAtLT(time.Now().Add(-time.Duration(timeout)*time.Minute)), + lock.CreatedAtLT(time.Now().UTC().Add(-time.Duration(timeout)*time.Minute)), ).Exec(c.CTX) + if err != nil { return errors.Wrapf(DeleteFail, "delete lock: %s", err) } @@ -54,14 +58,22 @@ func (c *Client) IsLocked(err error) bool { } func (c *Client) AcquirePullCAPILock() error { - lockName := "pullCAPI" - err := c.ReleaseLockWithTimeout(lockName, CAPIPullLockTimeout) + + /*delete orphan "old" lock if present*/ + err := c.ReleaseLockWithTimeout(CapiPullLockName, CAPIPullLockTimeout) if err != nil { log.Errorf("unable to release pullCAPI lock: %s", err) } - return c.AcquireLock(lockName) + return c.AcquireLock(CapiPullLockName) } func (c *Client) ReleasePullCAPILock() error { - return c.ReleaseLockWithTimeout("pullCAPI", CAPIPullLockTimeout) + log.Debugf("deleting lock %s", CapiPullLockName) + _, err := c.Ent.Lock.Delete().Where( + lock.NameEQ(CapiPullLockName), + ).Exec(c.CTX) + if err != nil { + return errors.Wrapf(DeleteFail, "delete lock: %s", err) + } + return nil } From dd71f0a8663ac6615fdedf3c56f59c55853df858 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Tue, 19 Mar 2024 10:48:49 +0100 Subject: [PATCH 071/581] CI: bump lint version and update configuration (#2901) * bump golangci-lint to 1.56 * lint (testifylint) * update lint configuration * windows test: remove stale code --- .github/workflows/go-tests-windows.yml | 2 +- .github/workflows/go-tests.yml | 2 +- .golangci.yml | 42 +++++++++++++++----------- pkg/acquisition/modules/loki/loki.go | 2 +- pkg/apiserver/alerts_test.go | 2 +- pkg/csplugin/utils_windows.go | 4 --- pkg/exprhelpers/crowdsec_cti_test.go | 8 ++--- 7 files changed, 33 insertions(+), 29 deletions(-) diff --git a/.github/workflows/go-tests-windows.yml b/.github/workflows/go-tests-windows.yml index 6629250c592..643cb9b39c1 100644 --- a/.github/workflows/go-tests-windows.yml +++ b/.github/workflows/go-tests-windows.yml @@ -56,7 +56,7 @@ jobs: - name: golangci-lint uses: golangci/golangci-lint-action@v4 with: - version: v1.55 + version: v1.56 args: --issues-exit-code=1 --timeout 10m only-new-issues: false # the cache is already managed above, enabling it here diff --git a/.github/workflows/go-tests.yml b/.github/workflows/go-tests.yml index e0603e0dec3..918cefb26ed 100644 --- a/.github/workflows/go-tests.yml +++ b/.github/workflows/go-tests.yml @@ -157,7 +157,7 @@ jobs: - name: golangci-lint uses: golangci/golangci-lint-action@v4 with: - version: v1.55 + version: v1.56 args: --issues-exit-code=1 --timeout 10m only-new-issues: false # the cache is already managed above, enabling it here diff --git a/.golangci.yml b/.golangci.yml index 396da2141f1..758327e40fd 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -4,7 +4,6 @@ run: skip-dirs: - pkg/time/rate skip-files: - - pkg/database/ent/generate.go - pkg/yamlpatch/merge.go - pkg/yamlpatch/merge_test.go @@ -32,17 +31,31 @@ linters-settings: # Checks the number of lines in a function. # If lower than 0, disable the check. # Default: 60 - lines: -1 + # lower this after refactoring + lines: 437 # Checks the number of statements in a function. # If lower than 0, disable the check. # Default: 40 - statements: -1 + # lower this after refactoring + statements: 122 govet: check-shadowing: true + enable: + - atomicalign + - deepequalerrors + # TODO: - fieldalignment + - findcall + - nilness + # TODO: - reflectvaluecompare + - shadow + - sortslice + - timeformat + - unusedwrite lll: - line-length: 140 + # lower this after refactoring + line-length: 2607 maintidx: # raise this after refactoring @@ -68,10 +81,16 @@ linters-settings: depguard: rules: - main: + wrap: deny: - pkg: "github.com/pkg/errors" desc: "errors.Wrap() is deprecated in favor of fmt.Errorf()" + files: + - "!**/pkg/database/*.go" + - "!**/pkg/exprhelpers/*.go" + - "!**/pkg/acquisition/modules/appsec/appsec.go" + - "!**/pkg/acquisition/modules/loki/internal/lokiclient/loki_client.go" + - "!**/pkg/apiserver/controllers/v1/errors.go" yaml: files: - "!**/cmd/crowdsec-cli/alerts.go" @@ -105,7 +124,6 @@ linters-settings: - "!**/pkg/appsec/loader.go" - "!**/pkg/csplugin/broker.go" - "!**/pkg/csplugin/broker_test.go" - - "!**/pkg/dumps/bucker_dump.go" - "!**/pkg/dumps/bucket_dump.go" - "!**/pkg/dumps/parser_dump.go" - "!**/pkg/hubtest/coverage.go" @@ -177,6 +195,7 @@ linters: # - importas # Enforces consistent import aliases # - ineffassign # Detects when assignments to existing variables are not used # - interfacebloat # A linter that checks the number of methods inside an interface. + # - lll # Reports long lines # - logrlint # Check logr arguments. # - maintidx # maintidx measures the maintainability index of each function. # - makezero # Finds slice declarations with non-zero initial length @@ -214,7 +233,6 @@ linters: - goimports # In addition to fixing imports, goimports also formats your code in the same style as gofmt. - gosec # (gas): Inspects source code for security problems - inamedparam # reports interfaces with unnamed method parameters - - lll # Reports long lines - musttag # enforce field tags in (un)marshaled structs - promlinter # Check Prometheus metrics naming via promlint - protogetter # Reports direct reads from proto message fields when getters should be used @@ -363,13 +381,3 @@ issues: - linters: - nonamedreturns text: "named return .* with type .* found" - - # - # Will fix, might be trickier - # - - # https://github.com/pkg/errors/issues/245 - - linters: - - depguard - text: "import 'github.com/pkg/errors' is not allowed .*" - diff --git a/pkg/acquisition/modules/loki/loki.go b/pkg/acquisition/modules/loki/loki.go index 3625c689516..52b87eb95d3 100644 --- a/pkg/acquisition/modules/loki/loki.go +++ b/pkg/acquisition/modules/loki/loki.go @@ -6,13 +6,13 @@ https://grafana.com/docs/loki/latest/api/#get-lokiapiv1tail import ( "context" + "errors" "fmt" "net/url" "strconv" "strings" "time" - "github.com/pkg/errors" "github.com/prometheus/client_golang/prometheus" log "github.com/sirupsen/logrus" tomb "gopkg.in/tomb.v2" diff --git a/pkg/apiserver/alerts_test.go b/pkg/apiserver/alerts_test.go index 5365058176d..812e33ae13b 100644 --- a/pkg/apiserver/alerts_test.go +++ b/pkg/apiserver/alerts_test.go @@ -151,7 +151,7 @@ func TestCreateAlertChannels(t *testing.T) { wg.Done() }() - go lapi.InsertAlertFromFile(t, "./tests/alert_ssh-bf.json") + lapi.InsertAlertFromFile(t, "./tests/alert_ssh-bf.json") wg.Wait() assert.Len(t, pd.Alert.Decisions, 1) apiServer.Close() diff --git a/pkg/csplugin/utils_windows.go b/pkg/csplugin/utils_windows.go index dfb11aff548..2870a2addb5 100644 --- a/pkg/csplugin/utils_windows.go +++ b/pkg/csplugin/utils_windows.go @@ -100,10 +100,6 @@ func CheckPerms(path string) error { return fmt.Errorf("no DACL found on plugin, meaning fully permissive access on plugin %s", path) } - if err != nil { - return fmt.Errorf("while looking up current user sid: %w", err) - } - rs := reflect.ValueOf(dacl).Elem() /* diff --git a/pkg/exprhelpers/crowdsec_cti_test.go b/pkg/exprhelpers/crowdsec_cti_test.go index fc3a236c561..84a4b245720 100644 --- a/pkg/exprhelpers/crowdsec_cti_test.go +++ b/pkg/exprhelpers/crowdsec_cti_test.go @@ -118,7 +118,7 @@ func TestNillClient(t *testing.T) { item, err := CrowdsecCTI("1.2.3.4") assert.Equal(t, err, cticlient.ErrDisabled) - assert.Equal(t, item, &cticlient.SmokeItem{}) + assert.Equal(t, &cticlient.SmokeItem{}, item) } func TestInvalidAuth(t *testing.T) { @@ -133,7 +133,7 @@ func TestInvalidAuth(t *testing.T) { })) item, err := CrowdsecCTI("1.2.3.4") - assert.Equal(t, item, &cticlient.SmokeItem{}) + assert.Equal(t, &cticlient.SmokeItem{}, item) assert.False(t, CTIApiEnabled) assert.Equal(t, err, cticlient.ErrUnauthorized) @@ -143,7 +143,7 @@ func TestInvalidAuth(t *testing.T) { })) item, err = CrowdsecCTI("1.2.3.4") - assert.Equal(t, item, &cticlient.SmokeItem{}) + assert.Equal(t, &cticlient.SmokeItem{}, item) assert.False(t, CTIApiEnabled) assert.Equal(t, err, cticlient.ErrDisabled) } @@ -159,7 +159,7 @@ func TestNoKey(t *testing.T) { })) item, err := CrowdsecCTI("1.2.3.4") - assert.Equal(t, item, &cticlient.SmokeItem{}) + assert.Equal(t, &cticlient.SmokeItem{}, item) assert.False(t, CTIApiEnabled) assert.Equal(t, err, cticlient.ErrDisabled) } From c76325b91b64137ee3df3f02b473270819f8d241 Mon Sep 17 00:00:00 2001 From: blotus Date: Tue, 19 Mar 2024 17:42:08 +0100 Subject: [PATCH 072/581] Update windows pipeline (#2909) --- azure-pipelines.yml | 154 ++++++++++++++++++++++++++++++-------------- make_chocolatey.ps1 | 4 +- make_installer.ps1 | 2 +- 3 files changed, 111 insertions(+), 49 deletions(-) diff --git a/azure-pipelines.yml b/azure-pipelines.yml index 6151fe22274..f90af6f1718 100644 --- a/azure-pipelines.yml +++ b/azure-pipelines.yml @@ -15,15 +15,9 @@ pool: stages: - stage: Build jobs: - - job: + - job: Build displayName: "Build" steps: - - task: DotNetCoreCLI@2 - displayName: "Install SignClient" - inputs: - command: 'custom' - custom: 'tool' - arguments: 'install --global SignClient --version 1.3.155' - task: GoTool@0 displayName: "Install Go" inputs: @@ -39,24 +33,14 @@ stages: #we are not calling make windows_installer because we want to sign the binaries before they are added to the MSI script: | make build BUILD_RE2_WASM=1 - - task: AzureKeyVault@2 - inputs: - azureSubscription: 'Azure subscription 1(8a93ab40-7e99-445e-ad47-0f6a3e2ef546)' - KeyVaultName: 'CodeSigningSecrets' - SecretsFilter: 'CodeSigningUser,CodeSigningPassword' - RunAsPreJob: false - - - task: DownloadSecureFile@1 - inputs: - secureFile: appsettings.json - - - pwsh: | - SignClient.exe Sign --name "crowdsec-binaries" ` - --input "**/*.exe" --config (Join-Path -Path $(Agent.TempDirectory) -ChildPath "appsettings.json") ` - --user $(CodeSigningUser) --secret '$(CodeSigningPassword)' - displayName: "Sign Crowdsec binaries + plugins" + - pwsh: | $build_version=$env:BUILD_SOURCEBRANCHNAME + #Override the version if it's set in the pipeline + if ( ${env:USERBUILDVERSION} -ne "") + { + $build_version = ${env:USERBUILDVERSION} + } if ($build_version.StartsWith("v")) { $build_version = $build_version.Substring(1) @@ -69,35 +53,112 @@ stages: displayName: GetCrowdsecVersion name: GetCrowdsecVersion - pwsh: | - .\make_installer.ps1 -version '$(GetCrowdsecVersion.BuildVersion)' + Get-ChildItem -Path .\cmd -Directory | ForEach-Object { + $dirName = $_.Name + Get-ChildItem -Path .\cmd\$dirName -File -Filter '*.exe' | ForEach-Object { + $fileName = $_.Name + $destDir = Join-Path $(Build.ArtifactStagingDirectory) cmd\$dirName + New-Item -ItemType Directory -Path $destDir -Force + Copy-Item -Path .\cmd\$dirName\$fileName -Destination $destDir + } + } + displayName: "Copy binaries to staging directory" + - task: PublishPipelineArtifact@1 + inputs: + targetPath: '$(Build.ArtifactStagingDirectory)' + artifact: 'unsigned_binaries' + displayName: "Upload binaries artifact" + + - stage: Sign + dependsOn: Build + variables: + - group: 'FOSS Build Variables' + - name: BuildVersion + value: $[ stageDependencies.Build.Build.outputs['GetCrowdsecVersion.BuildVersion'] ] + condition: succeeded() + jobs: + - job: Sign + displayName: "Sign" + steps: + - download: current + artifact: unsigned_binaries + displayName: "Download binaries artifact" + - task: CopyFiles@2 + inputs: + SourceFolder: '$(Pipeline.Workspace)/unsigned_binaries' + TargetFolder: '$(Build.SourcesDirectory)' + displayName: "Copy binaries to workspace" + - task: DotNetCoreCLI@2 + displayName: "Install SignTool tool" + inputs: + command: 'custom' + custom: 'tool' + arguments: install --global sign --version 0.9.0-beta.23127.3 + - task: AzureKeyVault@2 + displayName: "Get signing parameters" + inputs: + azureSubscription: "Azure subscription" + KeyVaultName: "$(KeyVaultName)" + SecretsFilter: "TenantId,ClientId,ClientSecret,Certificate,KeyVaultUrl" + - pwsh: | + sign code azure-key-vault ` + "**/*.exe" ` + --base-directory "$(Build.SourcesDirectory)/cmd/" ` + --publisher-name "CrowdSec" ` + --description "CrowdSec" ` + --description-url "https://github.com/crowdsecurity/crowdsec" ` + --azure-key-vault-tenant-id "$(TenantId)" ` + --azure-key-vault-client-id "$(ClientId)" ` + --azure-key-vault-client-secret "$(ClientSecret)" ` + --azure-key-vault-certificate "$(Certificate)" ` + --azure-key-vault-url "$(KeyVaultUrl)" + displayName: "Sign crowdsec binaries" + - pwsh: | + .\make_installer.ps1 -version '$(BuildVersion)' displayName: "Build Crowdsec MSI" name: BuildMSI - - pwsh: | - .\make_chocolatey.ps1 -version '$(GetCrowdsecVersion.BuildVersion)' + .\make_chocolatey.ps1 -version '$(BuildVersion)' displayName: "Build Chocolatey nupkg" - - pwsh: | - SignClient.exe Sign --name "crowdsec-msi" ` - --input "*.msi" --config (Join-Path -Path $(Agent.TempDirectory) -ChildPath "appsettings.json") ` - --user $(CodeSigningUser) --secret '$(CodeSigningPassword)' - displayName: "Sign Crowdsec MSI" - - - task: PublishBuildArtifacts@1 + sign code azure-key-vault ` + "*.msi" ` + --base-directory "$(Build.SourcesDirectory)" ` + --publisher-name "CrowdSec" ` + --description "CrowdSec" ` + --description-url "https://github.com/crowdsecurity/crowdsec" ` + --azure-key-vault-tenant-id "$(TenantId)" ` + --azure-key-vault-client-id "$(ClientId)" ` + --azure-key-vault-client-secret "$(ClientSecret)" ` + --azure-key-vault-certificate "$(Certificate)" ` + --azure-key-vault-url "$(KeyVaultUrl)" + displayName: "Sign MSI package" + - pwsh: | + sign code azure-key-vault ` + "*.nupkg" ` + --base-directory "$(Build.SourcesDirectory)" ` + --publisher-name "CrowdSec" ` + --description "CrowdSec" ` + --description-url "https://github.com/crowdsecurity/crowdsec" ` + --azure-key-vault-tenant-id "$(TenantId)" ` + --azure-key-vault-client-id "$(ClientId)" ` + --azure-key-vault-client-secret "$(ClientSecret)" ` + --azure-key-vault-certificate "$(Certificate)" ` + --azure-key-vault-url "$(KeyVaultUrl)" + displayName: "Sign nuget package" + - task: PublishPipelineArtifact@1 inputs: - PathtoPublish: '$(Build.Repository.LocalPath)\\crowdsec_$(GetCrowdsecVersion.BuildVersion).msi' - ArtifactName: 'crowdsec.msi' - publishLocation: 'Container' - displayName: "Upload MSI artifact" - - - task: PublishBuildArtifacts@1 + targetPath: '$(Build.SourcesDirectory)/crowdsec_$(BuildVersion).msi' + artifact: 'signed_msi_package' + displayName: "Upload signed MSI artifact" + - task: PublishPipelineArtifact@1 inputs: - PathtoPublish: '$(Build.Repository.LocalPath)\\windows\\Chocolatey\\crowdsec\\crowdsec.$(GetCrowdsecVersion.BuildVersion).nupkg' - ArtifactName: 'crowdsec.nupkg' - publishLocation: 'Container' - displayName: "Upload nupkg artifact" + targetPath: '$(Build.SourcesDirectory)/crowdsec.$(BuildVersion).nupkg' + artifact: 'signed_nuget_package' + displayName: "Upload signed nuget artifact" + - stage: Publish - dependsOn: Build + dependsOn: Sign jobs: - deployment: "Publish" displayName: "Publish to GitHub" @@ -119,8 +180,7 @@ stages: assetUploadMode: 'replace' addChangeLog: false isPreRelease: true #we force prerelease because the pipeline is invoked on tag creation, which happens when we do a prerelease - #the .. is an ugly hack, but I can't find the var that gives D:\a\1 ... assets: | - $(Build.ArtifactStagingDirectory)\..\crowdsec.msi/*.msi - $(Build.ArtifactStagingDirectory)\..\crowdsec.nupkg/*.nupkg + $(Pipeline.Workspace)/signed_msi_package/*.msi + $(Pipeline.Workspace)/signed_nuget_package/*.nupkg condition: ne(variables['GetLatestPrelease.LatestPreRelease'], '') diff --git a/make_chocolatey.ps1 b/make_chocolatey.ps1 index 67f85c33d89..cceed28402f 100644 --- a/make_chocolatey.ps1 +++ b/make_chocolatey.ps1 @@ -15,4 +15,6 @@ if ($version.Contains("-")) Set-Location .\windows\Chocolatey\crowdsec Copy-Item ..\..\..\crowdsec_$version.msi tools\crowdsec.msi -choco pack --version $version \ No newline at end of file +choco pack --version $version + +Copy-Item crowdsec.$version.nupkg ..\..\..\ \ No newline at end of file diff --git a/make_installer.ps1 b/make_installer.ps1 index a20ffaf55b5..c927452ff72 100644 --- a/make_installer.ps1 +++ b/make_installer.ps1 @@ -1,7 +1,7 @@ param ( $version ) -$env:Path += ";C:\Program Files (x86)\WiX Toolset v3.11\bin" +$env:Path += ";C:\Program Files (x86)\WiX Toolset v3.14\bin" if ($version.StartsWith("v")) { $version = $version.Substring(1) From d9f2a22ee5b5a982bbf7a2f970b5929944ad7f2b Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Wed, 20 Mar 2024 13:27:28 +0100 Subject: [PATCH 073/581] cscli metrics -> sort table order (#2908) --- cmd/crowdsec-cli/metrics.go | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/cmd/crowdsec-cli/metrics.go b/cmd/crowdsec-cli/metrics.go index 0f92343868d..ad2b9ee79d8 100644 --- a/cmd/crowdsec-cli/metrics.go +++ b/cmd/crowdsec-cli/metrics.go @@ -272,9 +272,7 @@ func (ms metricStore) Format(out io.Writer, sections []string, formatType string // if no sections are specified, we want all of them if len(sections) == 0 { - for section := range ms { - sections = append(sections, section) - } + sections = maptools.SortedKeys(ms) } for _, section := range sections { @@ -283,7 +281,7 @@ func (ms metricStore) Format(out io.Writer, sections []string, formatType string switch formatType { case "human": - for section := range want { + for _, section := range maptools.SortedKeys(want) { want[section].Table(out, noUnit, showEmpty) } case "json": @@ -376,7 +374,7 @@ cscli metrics list`, } // expandAlias returns a list of sections. The input can be a list of sections or alias. -func (cli *cliMetrics) expandSectionGroups(args []string) []string { +func (cli *cliMetrics) expandAlias(args []string) []string { ret := []string{} for _, section := range args { @@ -422,7 +420,7 @@ cscli metrics show acquisition parsers scenarios stash -o json`, // Positional args are optional DisableAutoGenTag: true, RunE: func(_ *cobra.Command, args []string) error { - args = cli.expandSectionGroups(args) + args = cli.expandAlias(args) return cli.show(args, url, noUnit) }, } From 75a50c0c9d5b31cc6200e55f13c7b94ae1d5a41c Mon Sep 17 00:00:00 2001 From: "Thibault \"bui\" Koechlin" Date: Wed, 20 Mar 2024 14:02:29 +0100 Subject: [PATCH 074/581] improve a bit cscli examples when it comes to list mgmt (#2911) --- cmd/crowdsec-cli/alerts.go | 12 +++++++----- cmd/crowdsec-cli/decisions.go | 3 ++- 2 files changed, 9 insertions(+), 6 deletions(-) diff --git a/cmd/crowdsec-cli/alerts.go b/cmd/crowdsec-cli/alerts.go index ce304bcc777..908466f9eb2 100644 --- a/cmd/crowdsec-cli/alerts.go +++ b/cmd/crowdsec-cli/alerts.go @@ -177,9 +177,9 @@ func (cli *cliAlerts) displayOneAlert(alert *models.Alert, withDetail bool) erro return nil } -type cliAlerts struct{ +type cliAlerts struct { client *apiclient.ApiClient - cfg configGetter + cfg configGetter } func NewCLIAlerts(getconfig configGetter) *cliAlerts { @@ -253,8 +253,10 @@ func (cli *cliAlerts) NewListCmd() *cobra.Command { Example: `cscli alerts list cscli alerts list --ip 1.2.3.4 cscli alerts list --range 1.2.3.0/24 +cscli alerts list --origin lists cscli alerts list -s crowdsecurity/ssh-bf cscli alerts list --type ban`, + Long: `List alerts with optional filters`, DisableAutoGenTag: true, RunE: func(cmd *cobra.Command, _ []string) error { if err := manageCliDecisionAlerts(alertListFilter.IPEquals, alertListFilter.RangeEquals, @@ -358,7 +360,7 @@ func (cli *cliAlerts) NewDeleteCmd() *cobra.Command { var ( ActiveDecision *bool AlertDeleteAll bool - delAlertByID string + delAlertByID string ) var alertDeleteFilter = apiclient.AlertsDeleteOpts{ @@ -449,7 +451,7 @@ cscli alerts delete -s crowdsecurity/ssh-bf"`, return nil }, } - + flags := cmd.Flags() flags.SortFlags = false flags.StringVar(alertDeleteFilter.ScopeEquals, "scope", "", "the scope (ie. ip,range)") @@ -520,7 +522,7 @@ func (cli *cliAlerts) NewInspectCmd() *cobra.Command { func (cli *cliAlerts) NewFlushCmd() *cobra.Command { var ( maxItems int - maxAge string + maxAge string ) cmd := &cobra.Command{ diff --git a/cmd/crowdsec-cli/decisions.go b/cmd/crowdsec-cli/decisions.go index d7165367898..a97536ddc14 100644 --- a/cmd/crowdsec-cli/decisions.go +++ b/cmd/crowdsec-cli/decisions.go @@ -195,7 +195,7 @@ func (cli *cliDecisions) newListCmd() *cobra.Command { Example: `cscli decisions list -i 1.2.3.4 cscli decisions list -r 1.2.3.0/24 cscli decisions list -s crowdsecurity/ssh-bf -cscli decisions list -t ban +cscli decisions list --origin lists --scenario list_name `, Args: cobra.ExactArgs(0), DisableAutoGenTag: true, @@ -436,6 +436,7 @@ func (cli *cliDecisions) newDeleteCmd() *cobra.Command { cscli decisions delete -i 1.2.3.4 cscli decisions delete --id 42 cscli decisions delete --type captcha +cscli decisions delete --origin lists --scenario list_name `, /*TBD : refaire le Long/Example*/ PreRunE: func(cmd *cobra.Command, _ []string) error { From 7779c7ff0c010537d9a1dc3a73abdb0e2c750c33 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Wed, 20 Mar 2024 15:46:14 +0100 Subject: [PATCH 075/581] hub update: reload crowdsec if only data files have changed (#2912) --- pkg/cwhub/dataset.go | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/pkg/cwhub/dataset.go b/pkg/cwhub/dataset.go index 4612f357626..97fd9c5a0ff 100644 --- a/pkg/cwhub/dataset.go +++ b/pkg/cwhub/dataset.go @@ -32,6 +32,9 @@ func downloadFile(url string, destPath string) error { return fmt.Errorf("bad http code %d for %s", resp.StatusCode, url) } + // Download to a temporary location to avoid corrupting files + // that are currently in use or memory mapped. + tmpFile, err := os.CreateTemp(filepath.Dir(destPath), filepath.Base(destPath)+".*.tmp") if err != nil { return err @@ -57,6 +60,11 @@ func downloadFile(url string, destPath string) error { return err } + // a check on stdout is used while scripting to know if the hub has been upgraded + // and a configuration reload is required + // TODO: use a better way to communicate this + fmt.Printf("updated %s\n", filepath.Base(destPath)) + if err = os.Rename(tmpFileName, destPath); err != nil { return err } From 52f86c2d10f51a238b9e23961e65075a44c85fc9 Mon Sep 17 00:00:00 2001 From: blotus Date: Thu, 21 Mar 2024 11:39:37 +0100 Subject: [PATCH 076/581] add libinjection expr helpers (#2914) --- pkg/exprhelpers/expr_lib.go | 14 +++++++ pkg/exprhelpers/libinjection.go | 17 ++++++++ pkg/exprhelpers/libinjection_test.go | 60 ++++++++++++++++++++++++++++ 3 files changed, 91 insertions(+) create mode 100644 pkg/exprhelpers/libinjection.go create mode 100644 pkg/exprhelpers/libinjection_test.go diff --git a/pkg/exprhelpers/expr_lib.go b/pkg/exprhelpers/expr_lib.go index db191b84a8d..520799287eb 100644 --- a/pkg/exprhelpers/expr_lib.go +++ b/pkg/exprhelpers/expr_lib.go @@ -441,6 +441,20 @@ var exprFuncs = []exprCustomFunc{ new(func(float64, float64) bool), }, }, + { + name: "LibInjectionIsSQLI", + function: LibInjectionIsSQLI, + signature: []interface{}{ + new(func(string) bool), + }, + }, + { + name: "LibInjectionIsXSS", + function: LibInjectionIsXSS, + signature: []interface{}{ + new(func(string) bool), + }, + }, } //go 1.20 "CutPrefix": strings.CutPrefix, diff --git a/pkg/exprhelpers/libinjection.go b/pkg/exprhelpers/libinjection.go new file mode 100644 index 00000000000..e9f33e4f459 --- /dev/null +++ b/pkg/exprhelpers/libinjection.go @@ -0,0 +1,17 @@ +package exprhelpers + +import "github.com/corazawaf/libinjection-go" + +func LibInjectionIsSQLI(params ...any) (any, error) { + str := params[0].(string) + + ret, _ := libinjection.IsSQLi(str) + return ret, nil +} + +func LibInjectionIsXSS(params ...any) (any, error) { + str := params[0].(string) + + ret := libinjection.IsXSS(str) + return ret, nil +} diff --git a/pkg/exprhelpers/libinjection_test.go b/pkg/exprhelpers/libinjection_test.go new file mode 100644 index 00000000000..7b4ab825db9 --- /dev/null +++ b/pkg/exprhelpers/libinjection_test.go @@ -0,0 +1,60 @@ +package exprhelpers + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestLibinjectionHelpers(t *testing.T) { + tests := []struct { + name string + function func(params ...any) (any, error) + params []any + expectResult any + }{ + { + name: "LibInjectionIsSQLI", + function: LibInjectionIsSQLI, + params: []any{"?__f__73=73&&__f__75=75&delivery=1&max=24.9&min=15.9&n=12&o=2&p=(select(0)from(select(sleep(15)))v)/*'%2B(select(0)from(select(sleep(15)))v)%2B'\x22%2B(select(0)from(select(sleep(15)))v)%2B\x22*/&rating=4"}, + expectResult: true, + }, + { + name: "LibInjectionIsSQLI - no match", + function: LibInjectionIsSQLI, + params: []any{"?bla=42&foo=bar"}, + expectResult: false, + }, + { + name: "LibInjectionIsSQLI - no match 2", + function: LibInjectionIsSQLI, + params: []any{"https://foo.com/asdkfj?bla=42&foo=bar"}, + expectResult: false, + }, + { + name: "LibInjectionIsXSS", + function: LibInjectionIsXSS, + params: []any{""}, + expectResult: true, + }, + { + name: "LibInjectionIsXSS - no match", + function: LibInjectionIsXSS, + params: []any{"?bla=42&foo=bar"}, + expectResult: false, + }, + { + name: "LibInjectionIsXSS - no match 2", + function: LibInjectionIsXSS, + params: []any{"https://foo.com/asdkfj?bla=42&foo[]=bar&foo"}, + expectResult: false, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + result, _ := test.function(test.params...) + assert.Equal(t, test.expectResult, result) + }) + } +} From 2e1ddec107ed01937809d4f53b4ab8fa2e00f7e4 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Mon, 25 Mar 2024 10:40:41 +0100 Subject: [PATCH 077/581] cscli: Add user-agent to all hub requests (#2915) * cscli: Add user-agent to all hub requests * fix unit test and avoid httpmock * fix windows test --- pkg/cwhub/cwhub.go | 14 +++++++++ pkg/cwhub/dataset_test.go | 62 +++++++++++++++++++++------------------ 2 files changed, 48 insertions(+), 28 deletions(-) diff --git a/pkg/cwhub/cwhub.go b/pkg/cwhub/cwhub.go index 9ce091fad39..a7864d4c076 100644 --- a/pkg/cwhub/cwhub.go +++ b/pkg/cwhub/cwhub.go @@ -7,10 +7,24 @@ import ( "sort" "strings" "time" + + "github.com/crowdsecurity/go-cs-lib/version" ) +// hubTransport wraps a Transport to set a custom User-Agent. +type hubTransport struct { + http.RoundTripper +} + +func (t *hubTransport) RoundTrip(req *http.Request) (*http.Response, error) { + req.Header.Set("User-Agent", "crowdsec/"+version.String()) + return t.RoundTripper.RoundTrip(req) +} + +// hubClient is the HTTP client used to communicate with the CrowdSec Hub. var hubClient = &http.Client{ Timeout: 120 * time.Second, + Transport: &hubTransport{http.DefaultTransport}, } // safePath returns a joined path and ensures that it does not escape the base directory. diff --git a/pkg/cwhub/dataset_test.go b/pkg/cwhub/dataset_test.go index 93d3e3bf01e..e48202e4821 100644 --- a/pkg/cwhub/dataset_test.go +++ b/pkg/cwhub/dataset_test.go @@ -1,50 +1,56 @@ package cwhub import ( + "io" + "net/http" + "net/http/httptest" "os" + "path/filepath" "testing" - "github.com/jarcoal/httpmock" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + + "github.com/crowdsecurity/go-cs-lib/cstest" ) func TestDownloadFile(t *testing.T) { - examplePath := "./example.txt" - defer os.Remove(examplePath) - - httpmock.Activate() - defer httpmock.DeactivateAndReset() - - // OK - httpmock.RegisterResponder( - "GET", - "https://example.com/xx", - httpmock.NewStringResponder(200, "example content oneoneone"), - ) - - httpmock.RegisterResponder( - "GET", - "https://example.com/x", - httpmock.NewStringResponder(404, "not found"), - ) - - err := downloadFile("https://example.com/xx", examplePath) + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + switch r.URL.Path { + case "/xx": + w.WriteHeader(http.StatusOK) + _, _ = io.WriteString(w, "example content oneoneone") + default: + w.WriteHeader(http.StatusNotFound) + _, _ = io.WriteString(w, "not found") + } + })) + defer ts.Close() + + dest := filepath.Join(t.TempDir(), "example.txt") + defer os.Remove(dest) + + err := downloadFile(ts.URL+"/xx", dest) require.NoError(t, err) - content, err := os.ReadFile(examplePath) + content, err := os.ReadFile(dest) assert.Equal(t, "example content oneoneone", string(content)) require.NoError(t, err) // bad uri - err = downloadFile("https://zz.com", examplePath) - require.Error(t, err) + err = downloadFile("https://zz.com", dest) + cstest.RequireErrorContains(t, err, "lookup zz.com") + cstest.RequireErrorContains(t, err, "no such host") // 404 - err = downloadFile("https://example.com/x", examplePath) - require.Error(t, err) + err = downloadFile(ts.URL+"/x", dest) + cstest.RequireErrorContains(t, err, "bad http code 404") // bad target - err = downloadFile("https://example.com/xx", "") - require.Error(t, err) + err = downloadFile(ts.URL+"/xx", "") + cstest.RequireErrorContains(t, err, cstest.PathNotFoundMessage) + + // destination directory does not exist + err = downloadFile(ts.URL+"/xx", filepath.Join(t.TempDir(), "missing/example.txt")) + cstest.RequireErrorContains(t, err, cstest.PathNotFoundMessage) } From f6bb8412c55426c7f459a85fa4f1b08fec4f487f Mon Sep 17 00:00:00 2001 From: Christian Kampka Date: Mon, 25 Mar 2024 16:20:16 +0100 Subject: [PATCH 078/581] Add patterns_dir configuration option (#2868) * Add patterns_dir configuration option * Update config.yaml --------- Co-authored-by: mmetc <92726601+mmetc@users.noreply.github.com> --- pkg/csconfig/config_paths.go | 6 ++++++ pkg/parser/unix_parser.go | 2 +- 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/pkg/csconfig/config_paths.go b/pkg/csconfig/config_paths.go index 71e3bacdaac..3de05ee0292 100644 --- a/pkg/csconfig/config_paths.go +++ b/pkg/csconfig/config_paths.go @@ -13,6 +13,7 @@ type ConfigurationPaths struct { HubDir string `yaml:"hub_dir,omitempty"` PluginDir string `yaml:"plugin_dir,omitempty"` NotificationDir string `yaml:"notification_dir,omitempty"` + PatternDir string `yaml:"pattern_dir,omitempty"` } func (c *Config) loadConfigurationPaths() error { @@ -33,6 +34,10 @@ func (c *Config) loadConfigurationPaths() error { c.ConfigPaths.HubIndexFile = filepath.Clean(c.ConfigPaths.HubDir + "/.index.json") } + if c.ConfigPaths.PatternDir == "" { + c.ConfigPaths.PatternDir = filepath.Join(c.ConfigPaths.ConfigDir, "patterns/") + } + var configPathsCleanup = []*string{ &c.ConfigPaths.HubDir, &c.ConfigPaths.HubIndexFile, @@ -41,6 +46,7 @@ func (c *Config) loadConfigurationPaths() error { &c.ConfigPaths.SimulationFilePath, &c.ConfigPaths.PluginDir, &c.ConfigPaths.NotificationDir, + &c.ConfigPaths.PatternDir, } for _, k := range configPathsCleanup { if *k == "" { diff --git a/pkg/parser/unix_parser.go b/pkg/parser/unix_parser.go index 617e46189f3..720bac3d1fe 100644 --- a/pkg/parser/unix_parser.go +++ b/pkg/parser/unix_parser.go @@ -98,7 +98,7 @@ func NewParsers(hub *cwhub.Hub) *Parsers { func LoadParsers(cConfig *csconfig.Config, parsers *Parsers) (*Parsers, error) { var err error - patternsDir := filepath.Join(cConfig.ConfigPaths.ConfigDir, "patterns/") + patternsDir := cConfig.ConfigPaths.PatternDir log.Infof("Loading grok library %s", patternsDir) /* load base regexps for two grok parsers */ parsers.Ctx, err = Init(map[string]interface{}{"patterns": patternsDir, From 368d22ec305c483c45ec459baafff34e8308e8de Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 26 Mar 2024 09:12:57 +0100 Subject: [PATCH 079/581] Bump github.com/jackc/pgx/v4 from 4.14.1 to 4.18.2 (#2887) Bumps [github.com/jackc/pgx/v4](https://github.com/jackc/pgx) from 4.14.1 to 4.18.2. - [Changelog](https://github.com/jackc/pgx/blob/v4.18.2/CHANGELOG.md) - [Commits](https://github.com/jackc/pgx/compare/v4.14.1...v4.18.2) --- updated-dependencies: - dependency-name: github.com/jackc/pgx/v4 dependency-type: direct:production ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 18 +++++++++--------- go.sum | 36 ++++++++++++++++++------------------ 2 files changed, 27 insertions(+), 27 deletions(-) diff --git a/go.mod b/go.mod index 22d52be1ca8..604fef762b8 100644 --- a/go.mod +++ b/go.mod @@ -55,7 +55,7 @@ require ( github.com/hashicorp/go-version v1.2.1 github.com/hexops/gotextdiff v1.0.3 github.com/ivanpirog/coloredcobra v1.0.1 - github.com/jackc/pgx/v4 v4.14.1 + github.com/jackc/pgx/v4 v4.18.2 github.com/jarcoal/httpmock v1.1.0 github.com/jszwec/csvutil v1.5.1 github.com/lithammer/dedent v1.1.0 @@ -81,9 +81,9 @@ require ( github.com/umahmood/haversine v0.0.0-20151105152445-808ab04add26 github.com/wasilibs/go-re2 v1.3.0 github.com/xhit/go-simple-mail/v2 v2.16.0 - golang.org/x/crypto v0.17.0 + golang.org/x/crypto v0.20.0 golang.org/x/mod v0.11.0 - golang.org/x/sys v0.15.0 + golang.org/x/sys v0.17.0 golang.org/x/text v0.14.0 google.golang.org/grpc v1.56.3 google.golang.org/protobuf v1.33.0 @@ -137,12 +137,12 @@ require ( github.com/imdario/mergo v0.3.12 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/jackc/chunkreader/v2 v2.0.1 // indirect - github.com/jackc/pgconn v1.10.1 // indirect + github.com/jackc/pgconn v1.14.3 // indirect github.com/jackc/pgio v1.0.0 // indirect github.com/jackc/pgpassfile v1.0.0 // indirect - github.com/jackc/pgproto3/v2 v2.2.0 // indirect - github.com/jackc/pgservicefile v0.0.0-20200714003250-2b9c44734f2b // indirect - github.com/jackc/pgtype v1.9.1 // indirect + github.com/jackc/pgproto3/v2 v2.3.3 // indirect + github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a // indirect + github.com/jackc/pgtype v1.14.0 // indirect github.com/jmespath/go-jmespath v0.4.0 // indirect github.com/josharian/intern v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect @@ -198,9 +198,9 @@ require ( github.com/zclconf/go-cty v1.8.0 // indirect go.mongodb.org/mongo-driver v1.9.4 // indirect golang.org/x/arch v0.3.0 // indirect - golang.org/x/net v0.19.0 // indirect + golang.org/x/net v0.21.0 // indirect golang.org/x/sync v0.6.0 // indirect - golang.org/x/term v0.15.0 // indirect + golang.org/x/term v0.17.0 // indirect golang.org/x/time v0.3.0 // indirect golang.org/x/tools v0.8.1-0.20230428195545-5283a0178901 // indirect golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 // indirect diff --git a/go.sum b/go.sum index 07bddaf8967..17a83cd6637 100644 --- a/go.sum +++ b/go.sum @@ -368,8 +368,8 @@ github.com/jackc/pgconn v0.0.0-20190831204454-2fabfa3c18b7/go.mod h1:ZJKsE/KZfsU github.com/jackc/pgconn v1.8.0/go.mod h1:1C2Pb36bGIP9QHGBYCjnyhqu7Rv3sGshaQUvmfGIB/o= github.com/jackc/pgconn v1.9.0/go.mod h1:YctiPyvzfU11JFxoXokUOOKQXQmDMoJL9vJzHH8/2JY= github.com/jackc/pgconn v1.9.1-0.20210724152538-d89c8390a530/go.mod h1:4z2w8XhRbP1hYxkpTuBjTS3ne3J48K83+u0zoyvg2pI= -github.com/jackc/pgconn v1.10.1 h1:DzdIHIjG1AxGwoEEqS+mGsURyjt4enSmqzACXvVzOT8= -github.com/jackc/pgconn v1.10.1/go.mod h1:4z2w8XhRbP1hYxkpTuBjTS3ne3J48K83+u0zoyvg2pI= +github.com/jackc/pgconn v1.14.3 h1:bVoTr12EGANZz66nZPkMInAV/KHD2TxH9npjXXgiB3w= +github.com/jackc/pgconn v1.14.3/go.mod h1:RZbme4uasqzybK2RK5c65VsHxoyaml09lx3tXOcO/VM= github.com/jackc/pgio v1.0.0 h1:g12B9UwVnzGhueNavwioyEEpAmqMe1E/BN9ES+8ovkE= github.com/jackc/pgio v1.0.0/go.mod h1:oP+2QK2wFfUWgr+gxjoBH9KGBb31Eio69xUb0w5bYf8= github.com/jackc/pgmock v0.0.0-20190831213851-13a1b77aafa2/go.mod h1:fGZlG77KXmcq05nJLRkk0+p82V8B8Dw8KN2/V9c/OAE= @@ -385,26 +385,26 @@ github.com/jackc/pgproto3/v2 v2.0.0-rc3/go.mod h1:ryONWYqW6dqSg1Lw6vXNMXoBJhpzvW github.com/jackc/pgproto3/v2 v2.0.0-rc3.0.20190831210041-4c03ce451f29/go.mod h1:ryONWYqW6dqSg1Lw6vXNMXoBJhpzvWKnT95C46ckYeM= github.com/jackc/pgproto3/v2 v2.0.6/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA= github.com/jackc/pgproto3/v2 v2.1.1/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA= -github.com/jackc/pgproto3/v2 v2.2.0 h1:r7JypeP2D3onoQTCxWdTpCtJ4D+qpKr0TxvoyMhZ5ns= -github.com/jackc/pgproto3/v2 v2.2.0/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA= -github.com/jackc/pgservicefile v0.0.0-20200714003250-2b9c44734f2b h1:C8S2+VttkHFdOOCXJe+YGfa4vHYwlt4Zx+IVXQ97jYg= +github.com/jackc/pgproto3/v2 v2.3.3 h1:1HLSx5H+tXR9pW3in3zaztoEwQYRC9SQaYUHjTSUOag= +github.com/jackc/pgproto3/v2 v2.3.3/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA= github.com/jackc/pgservicefile v0.0.0-20200714003250-2b9c44734f2b/go.mod h1:vsD4gTJCa9TptPL8sPkXrLZ+hDuNrZCnj29CQpr4X1E= +github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a h1:bbPeKD0xmW/Y25WS6cokEszi5g+S0QxI/d45PkRi7Nk= +github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a/go.mod h1:5TJZWKEWniPve33vlWYSoGYefn3gLQRzjfDlhSJ9ZKM= github.com/jackc/pgtype v0.0.0-20190421001408-4ed0de4755e0/go.mod h1:hdSHsc1V01CGwFsrv11mJRHWJ6aifDLfdV3aVjFF0zg= github.com/jackc/pgtype v0.0.0-20190824184912-ab885b375b90/go.mod h1:KcahbBH1nCMSo2DXpzsoWOAfFkdEtEJpPbVLq8eE+mc= github.com/jackc/pgtype v0.0.0-20190828014616-a8802b16cc59/go.mod h1:MWlu30kVJrUS8lot6TQqcg7mtthZ9T0EoIBFiJcmcyw= github.com/jackc/pgtype v1.8.1-0.20210724151600-32e20a603178/go.mod h1:C516IlIV9NKqfsMCXTdChteoXmwgUceqaLfjg2e3NlM= -github.com/jackc/pgtype v1.9.1 h1:MJc2s0MFS8C3ok1wQTdQxWuXQcB6+HwAm5x1CzW7mf0= -github.com/jackc/pgtype v1.9.1/go.mod h1:LUMuVrfsFfdKGLw+AFFVv6KtHOFMwRgDDzBt76IqCA4= +github.com/jackc/pgtype v1.14.0 h1:y+xUdabmyMkJLyApYuPj38mW+aAIqCe5uuBB51rH3Vw= +github.com/jackc/pgtype v1.14.0/go.mod h1:LUMuVrfsFfdKGLw+AFFVv6KtHOFMwRgDDzBt76IqCA4= github.com/jackc/pgx/v4 v4.0.0-20190420224344-cc3461e65d96/go.mod h1:mdxmSJJuR08CZQyj1PVQBHy9XOp5p8/SHH6a0psbY9Y= github.com/jackc/pgx/v4 v4.0.0-20190421002000-1b8f0016e912/go.mod h1:no/Y67Jkk/9WuGR0JG/JseM9irFbnEPbuWV2EELPNuM= github.com/jackc/pgx/v4 v4.0.0-pre1.0.20190824185557-6972a5742186/go.mod h1:X+GQnOEnf1dqHGpw7JmHqHc1NxDoalibchSk9/RWuDc= github.com/jackc/pgx/v4 v4.12.1-0.20210724153913-640aa07df17c/go.mod h1:1QD0+tgSXP7iUjYm9C1NxKhny7lq6ee99u/z+IHFcgs= -github.com/jackc/pgx/v4 v4.14.1 h1:71oo1KAGI6mXhLiTMn6iDFcp3e7+zon/capWjl2OEFU= -github.com/jackc/pgx/v4 v4.14.1/go.mod h1:RgDuE4Z34o7XE92RpLsvFiOEfrAUT0Xt2KxvX73W06M= +github.com/jackc/pgx/v4 v4.18.2 h1:xVpYkNR5pk5bMCZGfClbO962UIqVABcAGt7ha1s/FeU= +github.com/jackc/pgx/v4 v4.18.2/go.mod h1:Ey4Oru5tH5sB6tV7hDmfWFahwF15Eb7DNXlRKx2CkVw= github.com/jackc/puddle v0.0.0-20190413234325-e4ced69a3a2b/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= github.com/jackc/puddle v0.0.0-20190608224051-11cab39313c9/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= github.com/jackc/puddle v1.1.3/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= -github.com/jackc/puddle v1.2.0/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= github.com/jarcoal/httpmock v1.1.0 h1:F47ChZj1Y2zFsCXxNkBPwNNKnAyOATcdQibk0qEdVCE= github.com/jarcoal/httpmock v1.1.0/go.mod h1:ATjnClrvW/3tijVmpL/va5Z3aAyGvqU3gCT8nX0Txik= github.com/jhump/protoreflect v1.6.0 h1:h5jfMVslIg6l29nsMs0D8Wj17RDVdNYti0vDN/PZZoE= @@ -757,8 +757,8 @@ golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97/go.mod h1:GvvjBRRGRdwPK5y golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.3.0/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4= golang.org/x/crypto v0.14.0/go.mod h1:MVFd36DqK4CsrnJYDkBA3VC4m2GkXAM0PvzMCn4JQf4= -golang.org/x/crypto v0.17.0 h1:r8bRNjWL3GshPW3gkd+RpvzWrZAwPS49OmTGZ/uhM4k= -golang.org/x/crypto v0.17.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4= +golang.org/x/crypto v0.20.0 h1:jmAMJJZXr5KiCw05dfYK9QnqaqKLYXijU23lsEdcQqg= +golang.org/x/crypto v0.20.0/go.mod h1:Xwo95rrVNIoSMx9wa1JroENMToLWn3RNVrTBpLHgZPQ= golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= @@ -791,8 +791,8 @@ golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY= golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE= -golang.org/x/net v0.19.0 h1:zTwKpTd2XuCqf8huc7Fo2iSy+4RHPd10s4KzeTnVr1c= -golang.org/x/net v0.19.0/go.mod h1:CfAk/cbD4CthTvqiEl8NpboMuiuOYsAr/7NOjZJtv1U= +golang.org/x/net v0.21.0 h1:AQyQV4dYCvJ7vGmJyKki9+PBdyvhkSd8EIx/qb0AYv4= +golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -841,8 +841,8 @@ golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.15.0 h1:h48lPFYpsTvQJZF4EKyI4aLHaev3CxivZmv7yZig9pc= -golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.17.0 h1:25cE3gD+tdBA7lp7QfhuV+rJiE9YXTcS3VG1SqssI/Y= +golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= @@ -850,8 +850,8 @@ golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= golang.org/x/term v0.13.0/go.mod h1:LTmsnFJwVN6bCy1rVCoS+qHT1HhALEFxKncY3WNNh4U= -golang.org/x/term v0.15.0 h1:y/Oo/a/q3IXu26lQgl04j/gjuBDOBlx7X6Om1j2CPW4= -golang.org/x/term v0.15.0/go.mod h1:BDl952bC7+uMoWR75FIrCDx79TPU9oHkTZ9yRbYOrX0= +golang.org/x/term v0.17.0 h1:mkTF7LCd6WGJNL3K1Ad7kwxNfYAW6a8a8QqtMblp/4U= +golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= From df13f4315620ab601a16a077936b0185c3a584d0 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 26 Mar 2024 09:13:16 +0100 Subject: [PATCH 080/581] Bump github.com/docker/docker (#2913) Bumps [github.com/docker/docker](https://github.com/docker/docker) from 24.0.7+incompatible to 24.0.9+incompatible. - [Release notes](https://github.com/docker/docker/releases) - [Commits](https://github.com/docker/docker/compare/v24.0.7...v24.0.9) --- updated-dependencies: - dependency-name: github.com/docker/docker dependency-type: direct:production ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 604fef762b8..50b88f7d425 100644 --- a/go.mod +++ b/go.mod @@ -31,7 +31,7 @@ require ( github.com/crowdsecurity/machineid v1.0.2 github.com/davecgh/go-spew v1.1.1 github.com/dghubble/sling v1.3.0 - github.com/docker/docker v24.0.7+incompatible + github.com/docker/docker v24.0.9+incompatible github.com/docker/go-connections v0.4.0 github.com/fatih/color v1.15.0 github.com/fsnotify/fsnotify v1.6.0 diff --git a/go.sum b/go.sum index 17a83cd6637..8f91bd31ff6 100644 --- a/go.sum +++ b/go.sum @@ -116,8 +116,8 @@ github.com/dghubble/sling v1.3.0 h1:pZHjCJq4zJvc6qVQ5wN1jo5oNZlNE0+8T/h0XeXBUKU= github.com/dghubble/sling v1.3.0/go.mod h1:XXShWaBWKzNLhu2OxikSNFrlsvowtz4kyRuXUG7oQKY= github.com/docker/distribution v2.8.2+incompatible h1:T3de5rq0dB1j30rp0sA2rER+m322EBzniBPB6ZIzuh8= github.com/docker/distribution v2.8.2+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= -github.com/docker/docker v24.0.7+incompatible h1:Wo6l37AuwP3JaMnZa226lzVXGA3F9Ig1seQen0cKYlM= -github.com/docker/docker v24.0.7+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v24.0.9+incompatible h1:HPGzNmwfLZWdxHqK9/II92pyi1EpYKsAqcl4G0Of9v0= +github.com/docker/docker v24.0.9+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ= github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= github.com/docker/go-units v0.3.3/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= From be97466809af04775f82d481371f459498536fd8 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Tue, 26 Mar 2024 09:30:32 +0100 Subject: [PATCH 081/581] CI: use golangci-lint 1.57 (#2916) --- .github/workflows/go-tests-windows.yml | 2 +- .github/workflows/go-tests.yml | 2 +- .golangci.yml | 24 +++++++++++++----------- 3 files changed, 15 insertions(+), 13 deletions(-) diff --git a/.github/workflows/go-tests-windows.yml b/.github/workflows/go-tests-windows.yml index 643cb9b39c1..6b2f1132a82 100644 --- a/.github/workflows/go-tests-windows.yml +++ b/.github/workflows/go-tests-windows.yml @@ -56,7 +56,7 @@ jobs: - name: golangci-lint uses: golangci/golangci-lint-action@v4 with: - version: v1.56 + version: v1.57 args: --issues-exit-code=1 --timeout 10m only-new-issues: false # the cache is already managed above, enabling it here diff --git a/.github/workflows/go-tests.yml b/.github/workflows/go-tests.yml index 918cefb26ed..d76315462f3 100644 --- a/.github/workflows/go-tests.yml +++ b/.github/workflows/go-tests.yml @@ -157,7 +157,7 @@ jobs: - name: golangci-lint uses: golangci/golangci-lint-action@v4 with: - version: v1.56 + version: v1.57 args: --issues-exit-code=1 --timeout 10m only-new-issues: false # the cache is already managed above, enabling it here diff --git a/.golangci.yml b/.golangci.yml index 758327e40fd..df0cb67d1a8 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -1,12 +1,5 @@ # https://github.com/golangci/golangci-lint/blob/master/.golangci.reference.yml -run: - skip-dirs: - - pkg/time/rate - skip-files: - - pkg/yamlpatch/merge.go - - pkg/yamlpatch/merge_test.go - linters-settings: cyclop: # lower this after refactoring @@ -19,6 +12,10 @@ linters-settings: - prefix(github.com/crowdsecurity) - prefix(github.com/crowdsecurity/crowdsec) + gomoddirectives: + replace-allow-list: + - golang.org/x/time/rate + gocognit: # lower this after refactoring min-complexity: 145 @@ -40,7 +37,6 @@ linters-settings: statements: 122 govet: - check-shadowing: true enable: - atomicalign - deepequalerrors @@ -295,15 +291,21 @@ issues: # “Look, that’s why there’s rules, understand? So that you think before you # break ‘em.” ― Terry Pratchett + exclude-dirs: + - pkg/time/rate + + exclude-files: + - pkg/yamlpatch/merge.go + - pkg/yamlpatch/merge_test.go + + exclude-generated-strict: true + max-issues-per-linter: 0 max-same-issues: 0 exclude-rules: # Won't fix: - - path: go.mod - text: "replacement are not allowed: golang.org/x/time/rate" - # `err` is often shadowed, we may continue to do it - linters: - govet From 63bd31b471e765dd85a2d7760483782111e0fb68 Mon Sep 17 00:00:00 2001 From: "Thibault \"bui\" Koechlin" Date: Fri, 29 Mar 2024 17:57:54 +0100 Subject: [PATCH 082/581] Fix REQUEST_URI behavior + fix #2891 (#2917) * fix our behavior to comply more with modsec, REQUEST_URI should be: path+query string * fix #2891 as well * add new transforms * add transform tests --- .../modules/appsec/appsec_others_test.go | 74 +++++++ pkg/acquisition/modules/appsec/appsec_test.go | 200 ++++++++++++++++++ .../modules/appsec/appsec_win_test.go | 46 ++++ pkg/appsec/appsec_rule/modsecurity.go | 13 +- pkg/appsec/request.go | 6 +- 5 files changed, 333 insertions(+), 6 deletions(-) create mode 100644 pkg/acquisition/modules/appsec/appsec_others_test.go create mode 100644 pkg/acquisition/modules/appsec/appsec_win_test.go diff --git a/pkg/acquisition/modules/appsec/appsec_others_test.go b/pkg/acquisition/modules/appsec/appsec_others_test.go new file mode 100644 index 00000000000..93edc9d9ec3 --- /dev/null +++ b/pkg/acquisition/modules/appsec/appsec_others_test.go @@ -0,0 +1,74 @@ +//go:build !windows +// +build !windows + +package appsecacquisition + +import ( + "testing" + + "github.com/crowdsecurity/crowdsec/pkg/appsec" + "github.com/crowdsecurity/crowdsec/pkg/appsec/appsec_rule" + "github.com/crowdsecurity/crowdsec/pkg/types" + log "github.com/sirupsen/logrus" + "github.com/stretchr/testify/require" +) + +func TestAppsecRuleTransformsOthers(t *testing.T) { + + log.SetLevel(log.TraceLevel) + tests := []appsecRuleTest{ + { + name: "normalizepath", + expected_load_ok: true, + inband_rules: []appsec_rule.CustomRule{ + { + Name: "rule1", + Zones: []string{"ARGS"}, + Variables: []string{"foo"}, + Match: appsec_rule.Match{Type: "equals", Value: "b/c"}, + Transform: []string{"normalizepath"}, + }, + }, + input_request: appsec.ParsedRequest{ + RemoteAddr: "1.2.3.4", + Method: "GET", + URI: "/?foo=a/../b/c", + }, + output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { + require.Len(t, events, 2) + require.Equal(t, types.APPSEC, events[0].Type) + require.Equal(t, types.LOG, events[1].Type) + require.Equal(t, "rule1", events[1].Appsec.MatchedRules[0]["msg"]) + }, + }, + { + name: "normalizepath #2", + expected_load_ok: true, + inband_rules: []appsec_rule.CustomRule{ + { + Name: "rule1", + Zones: []string{"ARGS"}, + Variables: []string{"foo"}, + Match: appsec_rule.Match{Type: "equals", Value: "b/c/"}, + Transform: []string{"normalizepath"}, + }, + }, + input_request: appsec.ParsedRequest{ + RemoteAddr: "1.2.3.4", + Method: "GET", + URI: "/?foo=a/../b/c/////././././", + }, + output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { + require.Len(t, events, 2) + require.Equal(t, types.APPSEC, events[0].Type) + require.Equal(t, types.LOG, events[1].Type) + require.Equal(t, "rule1", events[1].Appsec.MatchedRules[0]["msg"]) + }, + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + loadAppSecEngine(test, t) + }) + } +} diff --git a/pkg/acquisition/modules/appsec/appsec_test.go b/pkg/acquisition/modules/appsec/appsec_test.go index 25aea0c78ea..d98215bf2c7 100644 --- a/pkg/acquisition/modules/appsec/appsec_test.go +++ b/pkg/acquisition/modules/appsec/appsec_test.go @@ -1284,6 +1284,206 @@ func TestAppsecRuleMatches(t *testing.T) { } } +func TestAppsecRuleTransforms(t *testing.T) { + + log.SetLevel(log.TraceLevel) + tests := []appsecRuleTest{ + { + name: "Basic matching rule", + expected_load_ok: true, + inband_rules: []appsec_rule.CustomRule{ + { + Name: "rule1", + Zones: []string{"URI"}, + Match: appsec_rule.Match{Type: "equals", Value: "/toto"}, + }, + }, + input_request: appsec.ParsedRequest{ + RemoteAddr: "1.2.3.4", + Method: "GET", + URI: "/toto", + }, + output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { + require.Len(t, events, 2) + require.Equal(t, types.APPSEC, events[0].Type) + require.Equal(t, types.LOG, events[1].Type) + require.Equal(t, "rule1", events[1].Appsec.MatchedRules[0]["msg"]) + }, + }, + { + name: "lowercase", + expected_load_ok: true, + inband_rules: []appsec_rule.CustomRule{ + { + Name: "rule1", + Zones: []string{"URI"}, + Match: appsec_rule.Match{Type: "equals", Value: "/toto"}, + Transform: []string{"lowercase"}, + }, + }, + input_request: appsec.ParsedRequest{ + RemoteAddr: "1.2.3.4", + Method: "GET", + URI: "/TOTO", + }, + output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { + require.Len(t, events, 2) + require.Equal(t, types.APPSEC, events[0].Type) + require.Equal(t, types.LOG, events[1].Type) + require.Equal(t, "rule1", events[1].Appsec.MatchedRules[0]["msg"]) + }, + }, + { + name: "uppercase", + expected_load_ok: true, + inband_rules: []appsec_rule.CustomRule{ + { + Name: "rule1", + Zones: []string{"URI"}, + Match: appsec_rule.Match{Type: "equals", Value: "/TOTO"}, + Transform: []string{"uppercase"}, + }, + }, + input_request: appsec.ParsedRequest{ + RemoteAddr: "1.2.3.4", + Method: "GET", + URI: "/toto", + }, + output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { + require.Len(t, events, 2) + require.Equal(t, types.APPSEC, events[0].Type) + require.Equal(t, types.LOG, events[1].Type) + require.Equal(t, "rule1", events[1].Appsec.MatchedRules[0]["msg"]) + }, + }, + { + name: "b64decode", + expected_load_ok: true, + inband_rules: []appsec_rule.CustomRule{ + { + Name: "rule1", + Zones: []string{"ARGS"}, + Variables: []string{"foo"}, + Match: appsec_rule.Match{Type: "equals", Value: "toto"}, + Transform: []string{"b64decode"}, + }, + }, + input_request: appsec.ParsedRequest{ + RemoteAddr: "1.2.3.4", + Method: "GET", + URI: "/?foo=dG90bw", + }, + output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { + require.Len(t, events, 2) + require.Equal(t, types.APPSEC, events[0].Type) + require.Equal(t, types.LOG, events[1].Type) + require.Equal(t, "rule1", events[1].Appsec.MatchedRules[0]["msg"]) + }, + }, + { + name: "b64decode with extra padding", + expected_load_ok: true, + inband_rules: []appsec_rule.CustomRule{ + { + Name: "rule1", + Zones: []string{"ARGS"}, + Variables: []string{"foo"}, + Match: appsec_rule.Match{Type: "equals", Value: "toto"}, + Transform: []string{"b64decode"}, + }, + }, + input_request: appsec.ParsedRequest{ + RemoteAddr: "1.2.3.4", + Method: "GET", + URI: "/?foo=dG90bw===", + }, + output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { + require.Len(t, events, 2) + require.Equal(t, types.APPSEC, events[0].Type) + require.Equal(t, types.LOG, events[1].Type) + require.Equal(t, "rule1", events[1].Appsec.MatchedRules[0]["msg"]) + }, + }, + { + name: "length", + expected_load_ok: true, + inband_rules: []appsec_rule.CustomRule{ + { + Name: "rule1", + Zones: []string{"ARGS"}, + Variables: []string{"foo"}, + Match: appsec_rule.Match{Type: "gte", Value: "3"}, + Transform: []string{"length"}, + }, + }, + input_request: appsec.ParsedRequest{ + RemoteAddr: "1.2.3.4", + Method: "GET", + URI: "/?foo=toto", + }, + output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { + require.Len(t, events, 2) + require.Equal(t, types.APPSEC, events[0].Type) + require.Equal(t, types.LOG, events[1].Type) + require.Equal(t, "rule1", events[1].Appsec.MatchedRules[0]["msg"]) + }, + }, + { + name: "urldecode", + expected_load_ok: true, + inband_rules: []appsec_rule.CustomRule{ + { + Name: "rule1", + Zones: []string{"ARGS"}, + Variables: []string{"foo"}, + Match: appsec_rule.Match{Type: "equals", Value: "BB/A"}, + Transform: []string{"urldecode"}, + }, + }, + input_request: appsec.ParsedRequest{ + RemoteAddr: "1.2.3.4", + Method: "GET", + URI: "/?foo=%42%42%2F%41", + }, + output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { + require.Len(t, events, 2) + require.Equal(t, types.APPSEC, events[0].Type) + require.Equal(t, types.LOG, events[1].Type) + require.Equal(t, "rule1", events[1].Appsec.MatchedRules[0]["msg"]) + }, + }, + { + name: "trim", + expected_load_ok: true, + inband_rules: []appsec_rule.CustomRule{ + { + Name: "rule1", + Zones: []string{"ARGS"}, + Variables: []string{"foo"}, + Match: appsec_rule.Match{Type: "equals", Value: "BB/A"}, + Transform: []string{"urldecode", "trim"}, + }, + }, + input_request: appsec.ParsedRequest{ + RemoteAddr: "1.2.3.4", + Method: "GET", + URI: "/?foo=%20%20%42%42%2F%41%20%20", + }, + output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { + require.Len(t, events, 2) + require.Equal(t, types.APPSEC, events[0].Type) + require.Equal(t, types.LOG, events[1].Type) + require.Equal(t, "rule1", events[1].Appsec.MatchedRules[0]["msg"]) + }, + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + loadAppSecEngine(test, t) + }) + } +} + func loadAppSecEngine(test appsecRuleTest, t *testing.T) { if testing.Verbose() { log.SetLevel(log.TraceLevel) diff --git a/pkg/acquisition/modules/appsec/appsec_win_test.go b/pkg/acquisition/modules/appsec/appsec_win_test.go new file mode 100644 index 00000000000..e85d75df251 --- /dev/null +++ b/pkg/acquisition/modules/appsec/appsec_win_test.go @@ -0,0 +1,46 @@ +//go:build windows +// +build windows + +package appsecacquisition + +import ( + "testing" + + log "github.com/sirupsen/logrus" +) + +func TestAppsecRuleTransformsWindows(t *testing.T) { + + log.SetLevel(log.TraceLevel) + tests := []appsecRuleTest{ + // { + // name: "normalizepath", + // expected_load_ok: true, + // inband_rules: []appsec_rule.CustomRule{ + // { + // Name: "rule1", + // Zones: []string{"ARGS"}, + // Variables: []string{"foo"}, + // Match: appsec_rule.Match{Type: "equals", Value: "b/c"}, + // Transform: []string{"normalizepath"}, + // }, + // }, + // input_request: appsec.ParsedRequest{ + // RemoteAddr: "1.2.3.4", + // Method: "GET", + // URI: "/?foo=a/../b/c", + // }, + // output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { + // require.Len(t, events, 2) + // require.Equal(t, types.APPSEC, events[0].Type) + // require.Equal(t, types.LOG, events[1].Type) + // require.Equal(t, "rule1", events[1].Appsec.MatchedRules[0]["msg"]) + // }, + // }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + loadAppSecEngine(test, t) + }) + } +} diff --git a/pkg/appsec/appsec_rule/modsecurity.go b/pkg/appsec/appsec_rule/modsecurity.go index 0b117cd773d..a269384ccb9 100644 --- a/pkg/appsec/appsec_rule/modsecurity.go +++ b/pkg/appsec/appsec_rule/modsecurity.go @@ -19,7 +19,8 @@ var zonesMap map[string]string = map[string]string{ "HEADERS": "REQUEST_HEADERS", "METHOD": "REQUEST_METHOD", "PROTOCOL": "REQUEST_PROTOCOL", - "URI": "REQUEST_URI", + "URI": "REQUEST_FILENAME", + "URI_FULL": "REQUEST_URI", "RAW_BODY": "REQUEST_BODY", "FILENAMES": "FILES", } @@ -28,8 +29,14 @@ var transformMap map[string]string = map[string]string{ "lowercase": "t:lowercase", "uppercase": "t:uppercase", "b64decode": "t:base64Decode", - "hexdecode": "t:hexDecode", - "length": "t:length", + //"hexdecode": "t:hexDecode", -> not supported by coraza + "length": "t:length", + "urldecode": "t:urlDecode", + "trim": "t:trim", + "normalize_path": "t:normalizePath", + "normalizepath": "t:normalizePath", + "htmlentitydecode": "t:htmlEntityDecode", + "html_entity_decode": "t:htmlEntityDecode", } var matchMap map[string]string = map[string]string{ diff --git a/pkg/appsec/request.go b/pkg/appsec/request.go index a9eb0d372a4..66b5d797fd7 100644 --- a/pkg/appsec/request.go +++ b/pkg/appsec/request.go @@ -365,11 +365,11 @@ func NewParsedRequestFromRequest(r *http.Request, logger *logrus.Entry) (ParsedR UUID: uuid.New().String(), ClientHost: clientHost, ClientIP: clientIP, - URI: parsedURL.Path, + URI: clientURI, Method: clientMethod, - Host: r.Host, + Host: clientHost, Headers: r.Header, - URL: r.URL, + URL: parsedURL, Proto: r.Proto, Body: body, Args: ParseQuery(parsedURL.RawQuery), From 26bcd0912aa432dbf525359ab0fd8426fd24745a Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Wed, 3 Apr 2024 13:34:35 +0200 Subject: [PATCH 083/581] docker: distribute geoip db in slim image (#2920) --- Dockerfile | 11 ++--------- docker/test/default.env | 2 +- docker/test/tests/test_flavors.py | 2 +- 3 files changed, 4 insertions(+), 11 deletions(-) diff --git a/Dockerfile b/Dockerfile index ed1ac5e28c2..53a6cd04512 100644 --- a/Dockerfile +++ b/Dockerfile @@ -43,11 +43,12 @@ COPY --from=build /go/bin/yq /usr/local/bin/crowdsec /usr/local/bin/cscli /usr/l COPY --from=build /etc/crowdsec /staging/etc/crowdsec COPY --from=build /go/src/crowdsec/docker/docker_start.sh / COPY --from=build /go/src/crowdsec/docker/config.yaml /staging/etc/crowdsec/config.yaml +COPY --from=build /var/lib/crowdsec /staging/var/lib/crowdsec RUN yq -n '.url="http://0.0.0.0:8080"' | install -m 0600 /dev/stdin /staging/etc/crowdsec/local_api_credentials.yaml ENTRYPOINT /bin/bash /docker_start.sh -FROM slim as plugins +FROM slim as full # Due to the wizard using cp -n, we have to copy the config files directly from the source as -n does not exist in busybox cp # The files are here for reference, as users will need to mount a new version to be actually able to use notifications @@ -60,11 +61,3 @@ COPY --from=build \ /staging/etc/crowdsec/notifications/ COPY --from=build /usr/local/lib/crowdsec/plugins /usr/local/lib/crowdsec/plugins - -FROM slim as geoip - -COPY --from=build /var/lib/crowdsec /staging/var/lib/crowdsec - -FROM plugins as full - -COPY --from=build /var/lib/crowdsec /staging/var/lib/crowdsec diff --git a/docker/test/default.env b/docker/test/default.env index c46fdab7f1d..9607c8aaa5b 100644 --- a/docker/test/default.env +++ b/docker/test/default.env @@ -6,7 +6,7 @@ CROWDSEC_TEST_VERSION="dev" # All of the following flavors will be tested when using the "flavor" fixture CROWDSEC_TEST_FLAVORS="full" # CROWDSEC_TEST_FLAVORS="full,slim,debian" -# CROWDSEC_TEST_FLAVORS="full,slim,debian,geoip,plugins-debian-slim,debian-geoip,debian-plugins" +# CROWDSEC_TEST_FLAVORS="full,slim,debian,debian-slim" # network to use CROWDSEC_TEST_NETWORK="net-test" diff --git a/docker/test/tests/test_flavors.py b/docker/test/tests/test_flavors.py index 223cf995cba..7e78b8d681b 100644 --- a/docker/test/tests/test_flavors.py +++ b/docker/test/tests/test_flavors.py @@ -42,7 +42,7 @@ def test_flavor_content(crowdsec, flavor): x = cs.cont.exec_run( 'ls -1 /usr/local/lib/crowdsec/plugins/') stdout = x.output.decode() - if 'slim' in flavor or 'geoip' in flavor: + if 'slim' in flavor: # the exact return code and full message depend # on the 'ls' implementation (busybox vs coreutils) assert x.exit_code != 0 From 912c4bca707cbe9434f5ee62f1d1b92d34395ccd Mon Sep 17 00:00:00 2001 From: "Thibault \"bui\" Koechlin" Date: Wed, 3 Apr 2024 17:49:05 +0200 Subject: [PATCH 084/581] split & reorganize tests a bit. Add tests on existing zones (#2925) --- .../modules/appsec/appsec_hooks_test.go | 714 ++++++++ ...psec_others_test.go => appsec_lnx_test.go} | 0 .../modules/appsec/appsec_remediation_test.go | 320 ++++ .../modules/appsec/appsec_rules_test.go | 733 +++++++++ pkg/acquisition/modules/appsec/appsec_test.go | 1454 ----------------- 5 files changed, 1767 insertions(+), 1454 deletions(-) create mode 100644 pkg/acquisition/modules/appsec/appsec_hooks_test.go rename pkg/acquisition/modules/appsec/{appsec_others_test.go => appsec_lnx_test.go} (100%) create mode 100644 pkg/acquisition/modules/appsec/appsec_remediation_test.go create mode 100644 pkg/acquisition/modules/appsec/appsec_rules_test.go diff --git a/pkg/acquisition/modules/appsec/appsec_hooks_test.go b/pkg/acquisition/modules/appsec/appsec_hooks_test.go new file mode 100644 index 00000000000..3cb2fcfde29 --- /dev/null +++ b/pkg/acquisition/modules/appsec/appsec_hooks_test.go @@ -0,0 +1,714 @@ +package appsecacquisition + +import ( + "net/http" + "net/url" + "testing" + + "github.com/crowdsecurity/crowdsec/pkg/appsec" + "github.com/crowdsecurity/crowdsec/pkg/appsec/appsec_rule" + "github.com/crowdsecurity/crowdsec/pkg/types" + "github.com/davecgh/go-spew/spew" + log "github.com/sirupsen/logrus" + "github.com/stretchr/testify/require" +) + +func TestAppsecOnMatchHooks(t *testing.T) { + tests := []appsecRuleTest{ + { + name: "no rule : check return code", + expected_load_ok: true, + inband_rules: []appsec_rule.CustomRule{ + { + Name: "rule1", + Zones: []string{"ARGS"}, + Variables: []string{"foo"}, + Match: appsec_rule.Match{Type: "regex", Value: "^toto"}, + Transform: []string{"lowercase"}, + }, + }, + input_request: appsec.ParsedRequest{ + RemoteAddr: "1.2.3.4", + Method: "GET", + URI: "/urllll", + Args: url.Values{"foo": []string{"toto"}}, + }, + output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { + require.Len(t, events, 2) + require.Equal(t, types.APPSEC, events[0].Type) + require.Equal(t, types.LOG, events[1].Type) + require.Len(t, responses, 1) + require.Equal(t, 403, responses[0].BouncerHTTPResponseCode) + require.Equal(t, 403, responses[0].UserHTTPResponseCode) + require.Equal(t, appsec.BanRemediation, responses[0].Action) + + }, + }, + { + name: "on_match: change return code", + expected_load_ok: true, + inband_rules: []appsec_rule.CustomRule{ + { + Name: "rule1", + Zones: []string{"ARGS"}, + Variables: []string{"foo"}, + Match: appsec_rule.Match{Type: "regex", Value: "^toto"}, + Transform: []string{"lowercase"}, + }, + }, + on_match: []appsec.Hook{ + {Filter: "IsInBand == true", Apply: []string{"SetReturnCode(413)"}}, + }, + input_request: appsec.ParsedRequest{ + RemoteAddr: "1.2.3.4", + Method: "GET", + URI: "/urllll", + Args: url.Values{"foo": []string{"toto"}}, + }, + output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { + require.Len(t, events, 2) + require.Equal(t, types.APPSEC, events[0].Type) + require.Equal(t, types.LOG, events[1].Type) + require.Len(t, responses, 1) + require.Equal(t, 403, responses[0].BouncerHTTPResponseCode) + require.Equal(t, 413, responses[0].UserHTTPResponseCode) + require.Equal(t, appsec.BanRemediation, responses[0].Action) + }, + }, + { + name: "on_match: change action to a non standard one (log)", + expected_load_ok: true, + inband_rules: []appsec_rule.CustomRule{ + { + Name: "rule1", + Zones: []string{"ARGS"}, + Variables: []string{"foo"}, + Match: appsec_rule.Match{Type: "regex", Value: "^toto"}, + Transform: []string{"lowercase"}, + }, + }, + on_match: []appsec.Hook{ + {Filter: "IsInBand == true", Apply: []string{"SetRemediation('log')"}}, + }, + input_request: appsec.ParsedRequest{ + RemoteAddr: "1.2.3.4", + Method: "GET", + URI: "/urllll", + Args: url.Values{"foo": []string{"toto"}}, + }, + output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { + require.Len(t, events, 2) + require.Equal(t, types.APPSEC, events[0].Type) + require.Equal(t, types.LOG, events[1].Type) + require.Len(t, responses, 1) + require.Equal(t, "log", responses[0].Action) + require.Equal(t, 403, responses[0].BouncerHTTPResponseCode) + require.Equal(t, 403, responses[0].UserHTTPResponseCode) + }, + }, + { + name: "on_match: change action to another standard one (allow)", + expected_load_ok: true, + inband_rules: []appsec_rule.CustomRule{ + { + Name: "rule1", + Zones: []string{"ARGS"}, + Variables: []string{"foo"}, + Match: appsec_rule.Match{Type: "regex", Value: "^toto"}, + Transform: []string{"lowercase"}, + }, + }, + on_match: []appsec.Hook{ + {Filter: "IsInBand == true", Apply: []string{"SetRemediation('allow')"}}, + }, + input_request: appsec.ParsedRequest{ + RemoteAddr: "1.2.3.4", + Method: "GET", + URI: "/urllll", + Args: url.Values{"foo": []string{"toto"}}, + }, + output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { + require.Len(t, events, 2) + require.Equal(t, types.APPSEC, events[0].Type) + require.Equal(t, types.LOG, events[1].Type) + require.Len(t, responses, 1) + require.Equal(t, appsec.AllowRemediation, responses[0].Action) + }, + }, + { + name: "on_match: change action to another standard one (ban)", + expected_load_ok: true, + inband_rules: []appsec_rule.CustomRule{ + { + Name: "rule1", + Zones: []string{"ARGS"}, + Variables: []string{"foo"}, + Match: appsec_rule.Match{Type: "regex", Value: "^toto"}, + Transform: []string{"lowercase"}, + }, + }, + on_match: []appsec.Hook{ + {Filter: "IsInBand == true", Apply: []string{"SetRemediation('ban')"}}, + }, + input_request: appsec.ParsedRequest{ + RemoteAddr: "1.2.3.4", + Method: "GET", + URI: "/urllll", + Args: url.Values{"foo": []string{"toto"}}, + }, + output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { + require.Len(t, responses, 1) + //note: SetAction normalizes deny, ban and block to ban + require.Equal(t, appsec.BanRemediation, responses[0].Action) + }, + }, + { + name: "on_match: change action to another standard one (captcha)", + expected_load_ok: true, + inband_rules: []appsec_rule.CustomRule{ + { + Name: "rule1", + Zones: []string{"ARGS"}, + Variables: []string{"foo"}, + Match: appsec_rule.Match{Type: "regex", Value: "^toto"}, + Transform: []string{"lowercase"}, + }, + }, + on_match: []appsec.Hook{ + {Filter: "IsInBand == true", Apply: []string{"SetRemediation('captcha')"}}, + }, + input_request: appsec.ParsedRequest{ + RemoteAddr: "1.2.3.4", + Method: "GET", + URI: "/urllll", + Args: url.Values{"foo": []string{"toto"}}, + }, + output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { + require.Len(t, responses, 1) + //note: SetAction normalizes deny, ban and block to ban + require.Equal(t, appsec.CaptchaRemediation, responses[0].Action) + }, + }, + { + name: "on_match: change action to a non standard one", + expected_load_ok: true, + inband_rules: []appsec_rule.CustomRule{ + { + Name: "rule1", + Zones: []string{"ARGS"}, + Variables: []string{"foo"}, + Match: appsec_rule.Match{Type: "regex", Value: "^toto"}, + Transform: []string{"lowercase"}, + }, + }, + on_match: []appsec.Hook{ + {Filter: "IsInBand == true", Apply: []string{"SetRemediation('foobar')"}}, + }, + input_request: appsec.ParsedRequest{ + RemoteAddr: "1.2.3.4", + Method: "GET", + URI: "/urllll", + Args: url.Values{"foo": []string{"toto"}}, + }, + output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { + require.Len(t, events, 2) + require.Equal(t, types.APPSEC, events[0].Type) + require.Equal(t, types.LOG, events[1].Type) + require.Len(t, responses, 1) + require.Equal(t, "foobar", responses[0].Action) + }, + }, + { + name: "on_match: cancel alert", + expected_load_ok: true, + inband_rules: []appsec_rule.CustomRule{ + { + Name: "rule42", + Zones: []string{"ARGS"}, + Variables: []string{"foo"}, + Match: appsec_rule.Match{Type: "regex", Value: "^toto"}, + Transform: []string{"lowercase"}, + }, + }, + on_match: []appsec.Hook{ + {Filter: "IsInBand == true && LogInfo('XX -> %s', evt.Appsec.MatchedRules.GetName())", Apply: []string{"CancelAlert()"}}, + }, + input_request: appsec.ParsedRequest{ + RemoteAddr: "1.2.3.4", + Method: "GET", + URI: "/urllll", + Args: url.Values{"foo": []string{"toto"}}, + }, + output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { + require.Len(t, events, 1) + require.Equal(t, types.LOG, events[0].Type) + require.Len(t, responses, 1) + require.Equal(t, appsec.BanRemediation, responses[0].Action) + }, + }, + { + name: "on_match: cancel event", + expected_load_ok: true, + inband_rules: []appsec_rule.CustomRule{ + { + Name: "rule42", + Zones: []string{"ARGS"}, + Variables: []string{"foo"}, + Match: appsec_rule.Match{Type: "regex", Value: "^toto"}, + Transform: []string{"lowercase"}, + }, + }, + on_match: []appsec.Hook{ + {Filter: "IsInBand == true", Apply: []string{"CancelEvent()"}}, + }, + input_request: appsec.ParsedRequest{ + RemoteAddr: "1.2.3.4", + Method: "GET", + URI: "/urllll", + Args: url.Values{"foo": []string{"toto"}}, + }, + output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { + require.Len(t, events, 1) + require.Equal(t, types.APPSEC, events[0].Type) + require.Len(t, responses, 1) + require.Equal(t, appsec.BanRemediation, responses[0].Action) + }, + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + loadAppSecEngine(test, t) + }) + } +} + +func TestAppsecPreEvalHooks(t *testing.T) { + + tests := []appsecRuleTest{ + { + name: "Basic on_load hook to disable inband rule", + expected_load_ok: true, + inband_rules: []appsec_rule.CustomRule{ + { + Name: "rule1", + Zones: []string{"ARGS"}, + Variables: []string{"foo"}, + Match: appsec_rule.Match{Type: "regex", Value: "^toto"}, + Transform: []string{"lowercase"}, + }, + }, + pre_eval: []appsec.Hook{ + {Filter: "1 == 1", Apply: []string{"RemoveInBandRuleByName('rule1')"}}, + }, + input_request: appsec.ParsedRequest{ + RemoteAddr: "1.2.3.4", + Method: "GET", + URI: "/urllll", + Args: url.Values{"foo": []string{"toto"}}, + }, + output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { + require.Empty(t, events) + require.Len(t, responses, 1) + require.False(t, responses[0].InBandInterrupt) + require.False(t, responses[0].OutOfBandInterrupt) + }, + }, + { + name: "Basic on_load fails to disable rule", + expected_load_ok: true, + inband_rules: []appsec_rule.CustomRule{ + { + Name: "rule1", + Zones: []string{"ARGS"}, + Variables: []string{"foo"}, + Match: appsec_rule.Match{Type: "regex", Value: "^toto"}, + Transform: []string{"lowercase"}, + }, + }, + pre_eval: []appsec.Hook{ + {Filter: "1 ==2", Apply: []string{"RemoveInBandRuleByName('rule1')"}}, + }, + input_request: appsec.ParsedRequest{ + RemoteAddr: "1.2.3.4", + Method: "GET", + URI: "/urllll", + Args: url.Values{"foo": []string{"toto"}}, + }, + output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { + require.Len(t, events, 2) + require.Equal(t, types.APPSEC, events[0].Type) + + require.Equal(t, types.LOG, events[1].Type) + require.True(t, events[1].Appsec.HasInBandMatches) + require.Len(t, events[1].Appsec.MatchedRules, 1) + require.Equal(t, "rule1", events[1].Appsec.MatchedRules[0]["msg"]) + + require.Len(t, responses, 1) + require.True(t, responses[0].InBandInterrupt) + + }, + }, + { + name: "on_load : disable inband by tag", + expected_load_ok: true, + inband_rules: []appsec_rule.CustomRule{ + { + Name: "rulez", + Zones: []string{"ARGS"}, + Variables: []string{"foo"}, + Match: appsec_rule.Match{Type: "regex", Value: "^toto"}, + Transform: []string{"lowercase"}, + }, + }, + pre_eval: []appsec.Hook{ + {Apply: []string{"RemoveInBandRuleByTag('crowdsec-rulez')"}}, + }, + input_request: appsec.ParsedRequest{ + RemoteAddr: "1.2.3.4", + Method: "GET", + URI: "/urllll", + Args: url.Values{"foo": []string{"toto"}}, + }, + output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { + require.Empty(t, events) + require.Len(t, responses, 1) + require.False(t, responses[0].InBandInterrupt) + require.False(t, responses[0].OutOfBandInterrupt) + }, + }, + { + name: "on_load : disable inband by ID", + expected_load_ok: true, + inband_rules: []appsec_rule.CustomRule{ + { + Name: "rulez", + Zones: []string{"ARGS"}, + Variables: []string{"foo"}, + Match: appsec_rule.Match{Type: "regex", Value: "^toto"}, + Transform: []string{"lowercase"}, + }, + }, + pre_eval: []appsec.Hook{ + {Apply: []string{"RemoveInBandRuleByID(1516470898)"}}, //rule ID is generated at runtime. If you change rule, it will break the test (: + }, + input_request: appsec.ParsedRequest{ + RemoteAddr: "1.2.3.4", + Method: "GET", + URI: "/urllll", + Args: url.Values{"foo": []string{"toto"}}, + }, + output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { + require.Empty(t, events) + require.Len(t, responses, 1) + require.False(t, responses[0].InBandInterrupt) + require.False(t, responses[0].OutOfBandInterrupt) + }, + }, + { + name: "on_load : disable inband by name", + expected_load_ok: true, + inband_rules: []appsec_rule.CustomRule{ + { + Name: "rulez", + Zones: []string{"ARGS"}, + Variables: []string{"foo"}, + Match: appsec_rule.Match{Type: "regex", Value: "^toto"}, + Transform: []string{"lowercase"}, + }, + }, + pre_eval: []appsec.Hook{ + {Apply: []string{"RemoveInBandRuleByName('rulez')"}}, + }, + input_request: appsec.ParsedRequest{ + RemoteAddr: "1.2.3.4", + Method: "GET", + URI: "/urllll", + Args: url.Values{"foo": []string{"toto"}}, + }, + output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { + require.Empty(t, events) + require.Len(t, responses, 1) + require.False(t, responses[0].InBandInterrupt) + require.False(t, responses[0].OutOfBandInterrupt) + }, + }, + { + name: "on_load : outofband default behavior", + expected_load_ok: true, + outofband_rules: []appsec_rule.CustomRule{ + { + Name: "rulez", + Zones: []string{"ARGS"}, + Variables: []string{"foo"}, + Match: appsec_rule.Match{Type: "regex", Value: "^toto"}, + Transform: []string{"lowercase"}, + }, + }, + input_request: appsec.ParsedRequest{ + RemoteAddr: "1.2.3.4", + Method: "GET", + URI: "/urllll", + Args: url.Values{"foo": []string{"toto"}}, + }, + output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { + require.Len(t, events, 1) + require.Equal(t, types.LOG, events[0].Type) + require.True(t, events[0].Appsec.HasOutBandMatches) + require.False(t, events[0].Appsec.HasInBandMatches) + require.Len(t, events[0].Appsec.MatchedRules, 1) + require.Equal(t, "rulez", events[0].Appsec.MatchedRules[0]["msg"]) + //maybe surprising, but response won't mention OOB event, as it's sent as soon as the inband phase is over. + require.Len(t, responses, 1) + require.False(t, responses[0].InBandInterrupt) + require.False(t, responses[0].OutOfBandInterrupt) + }, + }, + { + name: "on_load : set remediation by tag", + expected_load_ok: true, + inband_rules: []appsec_rule.CustomRule{ + { + Name: "rulez", + Zones: []string{"ARGS"}, + Variables: []string{"foo"}, + Match: appsec_rule.Match{Type: "regex", Value: "^toto"}, + Transform: []string{"lowercase"}, + }, + }, + pre_eval: []appsec.Hook{ + {Apply: []string{"SetRemediationByTag('crowdsec-rulez', 'foobar')"}}, //rule ID is generated at runtime. If you change rule, it will break the test (: + }, + input_request: appsec.ParsedRequest{ + RemoteAddr: "1.2.3.4", + Method: "GET", + URI: "/urllll", + Args: url.Values{"foo": []string{"toto"}}, + }, + output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { + require.Len(t, events, 2) + require.Len(t, responses, 1) + require.Equal(t, "foobar", responses[0].Action) + }, + }, + { + name: "on_load : set remediation by name", + expected_load_ok: true, + inband_rules: []appsec_rule.CustomRule{ + { + Name: "rulez", + Zones: []string{"ARGS"}, + Variables: []string{"foo"}, + Match: appsec_rule.Match{Type: "regex", Value: "^toto"}, + Transform: []string{"lowercase"}, + }, + }, + pre_eval: []appsec.Hook{ + {Apply: []string{"SetRemediationByName('rulez', 'foobar')"}}, //rule ID is generated at runtime. If you change rule, it will break the test (: + }, + input_request: appsec.ParsedRequest{ + RemoteAddr: "1.2.3.4", + Method: "GET", + URI: "/urllll", + Args: url.Values{"foo": []string{"toto"}}, + }, + output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { + require.Len(t, events, 2) + require.Len(t, responses, 1) + require.Equal(t, "foobar", responses[0].Action) + }, + }, + { + name: "on_load : set remediation by ID", + expected_load_ok: true, + inband_rules: []appsec_rule.CustomRule{ + { + Name: "rulez", + Zones: []string{"ARGS"}, + Variables: []string{"foo"}, + Match: appsec_rule.Match{Type: "regex", Value: "^toto"}, + Transform: []string{"lowercase"}, + }, + }, + pre_eval: []appsec.Hook{ + {Apply: []string{"SetRemediationByID(1516470898, 'foobar')"}}, //rule ID is generated at runtime. If you change rule, it will break the test (: + }, + input_request: appsec.ParsedRequest{ + RemoteAddr: "1.2.3.4", + Method: "GET", + URI: "/urllll", + Args: url.Values{"foo": []string{"toto"}}, + }, + output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { + require.Len(t, events, 2) + require.Len(t, responses, 1) + require.Equal(t, "foobar", responses[0].Action) + require.Equal(t, "foobar", appsecResponse.Action) + require.Equal(t, http.StatusForbidden, appsecResponse.HTTPStatus) + }, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + loadAppSecEngine(test, t) + }) + } +} + +func TestAppsecRemediationConfigHooks(t *testing.T) { + + tests := []appsecRuleTest{ + { + name: "Basic matching rule", + expected_load_ok: true, + inband_rules: []appsec_rule.CustomRule{ + { + Name: "rule1", + Zones: []string{"ARGS"}, + Variables: []string{"foo"}, + Match: appsec_rule.Match{Type: "regex", Value: "^toto"}, + Transform: []string{"lowercase"}, + }, + }, + input_request: appsec.ParsedRequest{ + RemoteAddr: "1.2.3.4", + Method: "GET", + URI: "/urllll", + Args: url.Values{"foo": []string{"toto"}}, + }, + output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { + require.Equal(t, appsec.BanRemediation, responses[0].Action) + require.Equal(t, http.StatusForbidden, statusCode) + require.Equal(t, appsec.BanRemediation, appsecResponse.Action) + require.Equal(t, http.StatusForbidden, appsecResponse.HTTPStatus) + }, + }, + { + name: "SetRemediation", + expected_load_ok: true, + inband_rules: []appsec_rule.CustomRule{ + { + Name: "rule1", + Zones: []string{"ARGS"}, + Variables: []string{"foo"}, + Match: appsec_rule.Match{Type: "regex", Value: "^toto"}, + Transform: []string{"lowercase"}, + }, + }, + input_request: appsec.ParsedRequest{ + RemoteAddr: "1.2.3.4", + Method: "GET", + URI: "/urllll", + Args: url.Values{"foo": []string{"toto"}}, + }, + on_match: []appsec.Hook{{Apply: []string{"SetRemediation('captcha')"}}}, //rule ID is generated at runtime. If you change rule, it will break the test (: + + output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { + require.Equal(t, appsec.CaptchaRemediation, responses[0].Action) + require.Equal(t, http.StatusForbidden, statusCode) + require.Equal(t, appsec.CaptchaRemediation, appsecResponse.Action) + require.Equal(t, http.StatusForbidden, appsecResponse.HTTPStatus) + }, + }, + { + name: "SetRemediation", + expected_load_ok: true, + inband_rules: []appsec_rule.CustomRule{ + { + Name: "rule1", + Zones: []string{"ARGS"}, + Variables: []string{"foo"}, + Match: appsec_rule.Match{Type: "regex", Value: "^toto"}, + Transform: []string{"lowercase"}, + }, + }, + input_request: appsec.ParsedRequest{ + RemoteAddr: "1.2.3.4", + Method: "GET", + URI: "/urllll", + Args: url.Values{"foo": []string{"toto"}}, + }, + on_match: []appsec.Hook{{Apply: []string{"SetReturnCode(418)"}}}, //rule ID is generated at runtime. If you change rule, it will break the test (: + + output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { + require.Equal(t, appsec.BanRemediation, responses[0].Action) + require.Equal(t, http.StatusForbidden, statusCode) + require.Equal(t, appsec.BanRemediation, appsecResponse.Action) + require.Equal(t, http.StatusTeapot, appsecResponse.HTTPStatus) + }, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + loadAppSecEngine(test, t) + }) + } +} +func TestOnMatchRemediationHooks(t *testing.T) { + tests := []appsecRuleTest{ + { + name: "set remediation to allow with on_match hook", + expected_load_ok: true, + inband_rules: []appsec_rule.CustomRule{ + { + Name: "rule42", + Zones: []string{"ARGS"}, + Variables: []string{"foo"}, + Match: appsec_rule.Match{Type: "regex", Value: "^toto"}, + Transform: []string{"lowercase"}, + }, + }, + input_request: appsec.ParsedRequest{ + RemoteAddr: "1.2.3.4", + Method: "GET", + URI: "/urllll", + Args: url.Values{"foo": []string{"toto"}}, + }, + on_match: []appsec.Hook{ + {Filter: "IsInBand == true", Apply: []string{"SetRemediation('allow')"}}, + }, + output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { + require.Equal(t, appsec.AllowRemediation, appsecResponse.Action) + require.Equal(t, http.StatusOK, appsecResponse.HTTPStatus) + }, + }, + { + name: "set remediation to captcha + custom user code with on_match hook", + expected_load_ok: true, + inband_rules: []appsec_rule.CustomRule{ + { + Name: "rule42", + Zones: []string{"ARGS"}, + Variables: []string{"foo"}, + Match: appsec_rule.Match{Type: "regex", Value: "^toto"}, + Transform: []string{"lowercase"}, + }, + }, + input_request: appsec.ParsedRequest{ + RemoteAddr: "1.2.3.4", + Method: "GET", + URI: "/urllll", + Args: url.Values{"foo": []string{"toto"}}, + }, + DefaultRemediation: appsec.AllowRemediation, + on_match: []appsec.Hook{ + {Filter: "IsInBand == true", Apply: []string{"SetRemediation('captcha')", "SetReturnCode(418)"}}, + }, + output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { + spew.Dump(responses) + spew.Dump(appsecResponse) + + log.Errorf("http status : %d", statusCode) + require.Equal(t, appsec.CaptchaRemediation, appsecResponse.Action) + require.Equal(t, http.StatusTeapot, appsecResponse.HTTPStatus) + require.Equal(t, http.StatusForbidden, statusCode) + }, + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + loadAppSecEngine(test, t) + }) + } +} diff --git a/pkg/acquisition/modules/appsec/appsec_others_test.go b/pkg/acquisition/modules/appsec/appsec_lnx_test.go similarity index 100% rename from pkg/acquisition/modules/appsec/appsec_others_test.go rename to pkg/acquisition/modules/appsec/appsec_lnx_test.go diff --git a/pkg/acquisition/modules/appsec/appsec_remediation_test.go b/pkg/acquisition/modules/appsec/appsec_remediation_test.go new file mode 100644 index 00000000000..a7f117389b3 --- /dev/null +++ b/pkg/acquisition/modules/appsec/appsec_remediation_test.go @@ -0,0 +1,320 @@ +package appsecacquisition + +import ( + "net/http" + "net/url" + "testing" + + "github.com/crowdsecurity/crowdsec/pkg/appsec" + "github.com/crowdsecurity/crowdsec/pkg/appsec/appsec_rule" + "github.com/crowdsecurity/crowdsec/pkg/types" + "github.com/stretchr/testify/require" +) + +func TestAppsecDefaultPassRemediation(t *testing.T) { + + tests := []appsecRuleTest{ + { + name: "Basic non-matching rule", + expected_load_ok: true, + inband_rules: []appsec_rule.CustomRule{ + { + Name: "rule1", + Zones: []string{"ARGS"}, + Variables: []string{"foo"}, + Match: appsec_rule.Match{Type: "regex", Value: "^toto"}, + Transform: []string{"lowercase"}, + }, + }, + input_request: appsec.ParsedRequest{ + RemoteAddr: "1.2.3.4", + Method: "GET", + URI: "/", + Args: url.Values{"foo": []string{"tutu"}}, + }, + output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { + require.Equal(t, appsec.AllowRemediation, responses[0].Action) + require.Equal(t, http.StatusOK, statusCode) + require.Equal(t, appsec.AllowRemediation, appsecResponse.Action) + require.Equal(t, http.StatusOK, appsecResponse.HTTPStatus) + }, + }, + { + name: "DefaultPassAction: pass", + expected_load_ok: true, + inband_rules: []appsec_rule.CustomRule{ + { + Name: "rule1", + Zones: []string{"ARGS"}, + Variables: []string{"foo"}, + Match: appsec_rule.Match{Type: "regex", Value: "^toto"}, + Transform: []string{"lowercase"}, + }, + }, + input_request: appsec.ParsedRequest{ + RemoteAddr: "1.2.3.4", + Method: "GET", + URI: "/", + Args: url.Values{"foo": []string{"tutu"}}, + }, + DefaultPassAction: "allow", + output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { + require.Equal(t, appsec.AllowRemediation, responses[0].Action) + require.Equal(t, http.StatusOK, statusCode) + require.Equal(t, appsec.AllowRemediation, appsecResponse.Action) + require.Equal(t, http.StatusOK, appsecResponse.HTTPStatus) + }, + }, + { + name: "DefaultPassAction: captcha", + expected_load_ok: true, + inband_rules: []appsec_rule.CustomRule{ + { + Name: "rule1", + Zones: []string{"ARGS"}, + Variables: []string{"foo"}, + Match: appsec_rule.Match{Type: "regex", Value: "^toto"}, + Transform: []string{"lowercase"}, + }, + }, + input_request: appsec.ParsedRequest{ + RemoteAddr: "1.2.3.4", + Method: "GET", + URI: "/", + Args: url.Values{"foo": []string{"tutu"}}, + }, + DefaultPassAction: "captcha", + output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { + require.Equal(t, appsec.CaptchaRemediation, responses[0].Action) + require.Equal(t, http.StatusOK, statusCode) //@tko: body is captcha, but as it's 200, captcha won't be showed to user + require.Equal(t, appsec.CaptchaRemediation, appsecResponse.Action) + require.Equal(t, http.StatusOK, appsecResponse.HTTPStatus) + }, + }, + { + name: "DefaultPassHTTPCode: 200", + expected_load_ok: true, + inband_rules: []appsec_rule.CustomRule{ + { + Name: "rule1", + Zones: []string{"ARGS"}, + Variables: []string{"foo"}, + Match: appsec_rule.Match{Type: "regex", Value: "^toto"}, + Transform: []string{"lowercase"}, + }, + }, + input_request: appsec.ParsedRequest{ + RemoteAddr: "1.2.3.4", + Method: "GET", + URI: "/", + Args: url.Values{"foo": []string{"tutu"}}, + }, + UserPassedHTTPCode: 200, + output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { + require.Equal(t, appsec.AllowRemediation, responses[0].Action) + require.Equal(t, http.StatusOK, statusCode) + require.Equal(t, appsec.AllowRemediation, appsecResponse.Action) + require.Equal(t, http.StatusOK, appsecResponse.HTTPStatus) + }, + }, + { + name: "DefaultPassHTTPCode: 200", + expected_load_ok: true, + inband_rules: []appsec_rule.CustomRule{ + { + Name: "rule1", + Zones: []string{"ARGS"}, + Variables: []string{"foo"}, + Match: appsec_rule.Match{Type: "regex", Value: "^toto"}, + Transform: []string{"lowercase"}, + }, + }, + input_request: appsec.ParsedRequest{ + RemoteAddr: "1.2.3.4", + Method: "GET", + URI: "/", + Args: url.Values{"foo": []string{"tutu"}}, + }, + UserPassedHTTPCode: 418, + output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { + require.Equal(t, appsec.AllowRemediation, responses[0].Action) + require.Equal(t, http.StatusOK, statusCode) + require.Equal(t, appsec.AllowRemediation, appsecResponse.Action) + require.Equal(t, http.StatusTeapot, appsecResponse.HTTPStatus) + }, + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + loadAppSecEngine(test, t) + }) + } +} + +func TestAppsecDefaultRemediation(t *testing.T) { + + tests := []appsecRuleTest{ + { + name: "Basic matching rule", + expected_load_ok: true, + inband_rules: []appsec_rule.CustomRule{ + { + Name: "rule1", + Zones: []string{"ARGS"}, + Variables: []string{"foo"}, + Match: appsec_rule.Match{Type: "regex", Value: "^toto"}, + Transform: []string{"lowercase"}, + }, + }, + input_request: appsec.ParsedRequest{ + RemoteAddr: "1.2.3.4", + Method: "GET", + URI: "/urllll", + Args: url.Values{"foo": []string{"toto"}}, + }, + output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { + require.Equal(t, appsec.BanRemediation, responses[0].Action) + require.Equal(t, http.StatusForbidden, statusCode) + require.Equal(t, appsec.BanRemediation, appsecResponse.Action) + require.Equal(t, http.StatusForbidden, appsecResponse.HTTPStatus) + }, + }, + { + name: "default remediation to ban (default)", + expected_load_ok: true, + inband_rules: []appsec_rule.CustomRule{ + { + Name: "rule42", + Zones: []string{"ARGS"}, + Variables: []string{"foo"}, + Match: appsec_rule.Match{Type: "regex", Value: "^toto"}, + Transform: []string{"lowercase"}, + }, + }, + input_request: appsec.ParsedRequest{ + RemoteAddr: "1.2.3.4", + Method: "GET", + URI: "/urllll", + Args: url.Values{"foo": []string{"toto"}}, + }, + DefaultRemediation: "ban", + output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { + require.Equal(t, appsec.BanRemediation, responses[0].Action) + require.Equal(t, http.StatusForbidden, statusCode) + require.Equal(t, appsec.BanRemediation, appsecResponse.Action) + require.Equal(t, http.StatusForbidden, appsecResponse.HTTPStatus) + }, + }, + { + name: "default remediation to allow", + expected_load_ok: true, + inband_rules: []appsec_rule.CustomRule{ + { + Name: "rule42", + Zones: []string{"ARGS"}, + Variables: []string{"foo"}, + Match: appsec_rule.Match{Type: "regex", Value: "^toto"}, + Transform: []string{"lowercase"}, + }, + }, + input_request: appsec.ParsedRequest{ + RemoteAddr: "1.2.3.4", + Method: "GET", + URI: "/urllll", + Args: url.Values{"foo": []string{"toto"}}, + }, + DefaultRemediation: "allow", + output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { + require.Equal(t, appsec.AllowRemediation, responses[0].Action) + require.Equal(t, http.StatusOK, statusCode) + require.Equal(t, appsec.AllowRemediation, appsecResponse.Action) + require.Equal(t, http.StatusOK, appsecResponse.HTTPStatus) + }, + }, + { + name: "default remediation to captcha", + expected_load_ok: true, + inband_rules: []appsec_rule.CustomRule{ + { + Name: "rule42", + Zones: []string{"ARGS"}, + Variables: []string{"foo"}, + Match: appsec_rule.Match{Type: "regex", Value: "^toto"}, + Transform: []string{"lowercase"}, + }, + }, + input_request: appsec.ParsedRequest{ + RemoteAddr: "1.2.3.4", + Method: "GET", + URI: "/urllll", + Args: url.Values{"foo": []string{"toto"}}, + }, + DefaultRemediation: "captcha", + output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { + require.Equal(t, appsec.CaptchaRemediation, responses[0].Action) + require.Equal(t, http.StatusForbidden, statusCode) + require.Equal(t, appsec.CaptchaRemediation, appsecResponse.Action) + require.Equal(t, http.StatusForbidden, appsecResponse.HTTPStatus) + }, + }, + { + name: "custom user HTTP code", + expected_load_ok: true, + inband_rules: []appsec_rule.CustomRule{ + { + Name: "rule42", + Zones: []string{"ARGS"}, + Variables: []string{"foo"}, + Match: appsec_rule.Match{Type: "regex", Value: "^toto"}, + Transform: []string{"lowercase"}, + }, + }, + input_request: appsec.ParsedRequest{ + RemoteAddr: "1.2.3.4", + Method: "GET", + URI: "/urllll", + Args: url.Values{"foo": []string{"toto"}}, + }, + UserBlockedHTTPCode: 418, + output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { + require.Equal(t, appsec.BanRemediation, responses[0].Action) + require.Equal(t, http.StatusForbidden, statusCode) + require.Equal(t, appsec.BanRemediation, appsecResponse.Action) + require.Equal(t, http.StatusTeapot, appsecResponse.HTTPStatus) + }, + }, + { + name: "custom remediation + HTTP code", + expected_load_ok: true, + inband_rules: []appsec_rule.CustomRule{ + { + Name: "rule42", + Zones: []string{"ARGS"}, + Variables: []string{"foo"}, + Match: appsec_rule.Match{Type: "regex", Value: "^toto"}, + Transform: []string{"lowercase"}, + }, + }, + input_request: appsec.ParsedRequest{ + RemoteAddr: "1.2.3.4", + Method: "GET", + URI: "/urllll", + Args: url.Values{"foo": []string{"toto"}}, + }, + UserBlockedHTTPCode: 418, + DefaultRemediation: "foobar", + output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { + require.Equal(t, "foobar", responses[0].Action) + require.Equal(t, http.StatusForbidden, statusCode) + require.Equal(t, "foobar", appsecResponse.Action) + require.Equal(t, http.StatusTeapot, appsecResponse.HTTPStatus) + }, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + loadAppSecEngine(test, t) + }) + } +} diff --git a/pkg/acquisition/modules/appsec/appsec_rules_test.go b/pkg/acquisition/modules/appsec/appsec_rules_test.go new file mode 100644 index 00000000000..3c48c50fabb --- /dev/null +++ b/pkg/acquisition/modules/appsec/appsec_rules_test.go @@ -0,0 +1,733 @@ +package appsecacquisition + +import ( + "net/http" + "net/url" + "testing" + + "github.com/crowdsecurity/crowdsec/pkg/appsec" + "github.com/crowdsecurity/crowdsec/pkg/appsec/appsec_rule" + "github.com/crowdsecurity/crowdsec/pkg/types" + log "github.com/sirupsen/logrus" + "github.com/stretchr/testify/require" +) + +func TestAppsecRuleMatches(t *testing.T) { + + tests := []appsecRuleTest{ + { + name: "Basic matching rule", + expected_load_ok: true, + inband_rules: []appsec_rule.CustomRule{ + { + Name: "rule1", + Zones: []string{"ARGS"}, + Variables: []string{"foo"}, + Match: appsec_rule.Match{Type: "regex", Value: "^toto"}, + Transform: []string{"lowercase"}, + }, + }, + input_request: appsec.ParsedRequest{ + RemoteAddr: "1.2.3.4", + Method: "GET", + URI: "/urllll", + Args: url.Values{"foo": []string{"toto"}}, + }, + output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { + require.Len(t, events, 2) + require.Equal(t, types.APPSEC, events[0].Type) + + require.Equal(t, types.LOG, events[1].Type) + require.True(t, events[1].Appsec.HasInBandMatches) + require.Len(t, events[1].Appsec.MatchedRules, 1) + require.Equal(t, "rule1", events[1].Appsec.MatchedRules[0]["msg"]) + + require.Len(t, responses, 1) + require.True(t, responses[0].InBandInterrupt) + }, + }, + { + name: "Basic non-matching rule", + expected_load_ok: true, + inband_rules: []appsec_rule.CustomRule{ + { + Name: "rule1", + Zones: []string{"ARGS"}, + Variables: []string{"foo"}, + Match: appsec_rule.Match{Type: "regex", Value: "^toto"}, + Transform: []string{"lowercase"}, + }, + }, + input_request: appsec.ParsedRequest{ + RemoteAddr: "1.2.3.4", + Method: "GET", + URI: "/urllll", + Args: url.Values{"foo": []string{"tutu"}}, + }, + output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { + require.Empty(t, events) + require.Len(t, responses, 1) + require.False(t, responses[0].InBandInterrupt) + require.False(t, responses[0].OutOfBandInterrupt) + }, + }, + { + name: "default remediation to allow", + expected_load_ok: true, + inband_rules: []appsec_rule.CustomRule{ + { + Name: "rule42", + Zones: []string{"ARGS"}, + Variables: []string{"foo"}, + Match: appsec_rule.Match{Type: "regex", Value: "^toto"}, + Transform: []string{"lowercase"}, + }, + }, + input_request: appsec.ParsedRequest{ + RemoteAddr: "1.2.3.4", + Method: "GET", + URI: "/urllll", + Args: url.Values{"foo": []string{"toto"}}, + }, + DefaultRemediation: "allow", + output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { + require.Equal(t, appsec.AllowRemediation, responses[0].Action) + require.Equal(t, http.StatusOK, statusCode) + require.Equal(t, appsec.AllowRemediation, appsecResponse.Action) + require.Equal(t, http.StatusOK, appsecResponse.HTTPStatus) + }, + }, + { + name: "default remediation to captcha", + expected_load_ok: true, + inband_rules: []appsec_rule.CustomRule{ + { + Name: "rule42", + Zones: []string{"ARGS"}, + Variables: []string{"foo"}, + Match: appsec_rule.Match{Type: "regex", Value: "^toto"}, + Transform: []string{"lowercase"}, + }, + }, + input_request: appsec.ParsedRequest{ + RemoteAddr: "1.2.3.4", + Method: "GET", + URI: "/urllll", + Args: url.Values{"foo": []string{"toto"}}, + }, + DefaultRemediation: "captcha", + output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { + require.Equal(t, appsec.CaptchaRemediation, responses[0].Action) + require.Equal(t, http.StatusForbidden, statusCode) + require.Equal(t, appsec.CaptchaRemediation, appsecResponse.Action) + require.Equal(t, http.StatusForbidden, appsecResponse.HTTPStatus) + }, + }, + { + name: "no default remediation / custom user HTTP code", + expected_load_ok: true, + inband_rules: []appsec_rule.CustomRule{ + { + Name: "rule42", + Zones: []string{"ARGS"}, + Variables: []string{"foo"}, + Match: appsec_rule.Match{Type: "regex", Value: "^toto"}, + Transform: []string{"lowercase"}, + }, + }, + input_request: appsec.ParsedRequest{ + RemoteAddr: "1.2.3.4", + Method: "GET", + URI: "/urllll", + Args: url.Values{"foo": []string{"toto"}}, + }, + UserBlockedHTTPCode: 418, + output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { + require.Equal(t, appsec.BanRemediation, responses[0].Action) + require.Equal(t, http.StatusForbidden, statusCode) + require.Equal(t, appsec.BanRemediation, appsecResponse.Action) + require.Equal(t, http.StatusTeapot, appsecResponse.HTTPStatus) + }, + }, + { + name: "no match but try to set remediation to captcha with on_match hook", + expected_load_ok: true, + inband_rules: []appsec_rule.CustomRule{ + { + Name: "rule42", + Zones: []string{"ARGS"}, + Variables: []string{"foo"}, + Match: appsec_rule.Match{Type: "regex", Value: "^toto"}, + Transform: []string{"lowercase"}, + }, + }, + on_match: []appsec.Hook{ + {Filter: "IsInBand == true", Apply: []string{"SetRemediation('captcha')"}}, + }, + input_request: appsec.ParsedRequest{ + RemoteAddr: "1.2.3.4", + Method: "GET", + URI: "/urllll", + Args: url.Values{"foo": []string{"bla"}}, + }, + output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { + require.Empty(t, events) + require.Equal(t, http.StatusOK, statusCode) + require.Equal(t, appsec.AllowRemediation, appsecResponse.Action) + }, + }, + { + name: "no match but try to set user HTTP code with on_match hook", + expected_load_ok: true, + inband_rules: []appsec_rule.CustomRule{ + { + Name: "rule42", + Zones: []string{"ARGS"}, + Variables: []string{"foo"}, + Match: appsec_rule.Match{Type: "regex", Value: "^toto"}, + Transform: []string{"lowercase"}, + }, + }, + on_match: []appsec.Hook{ + {Filter: "IsInBand == true", Apply: []string{"SetReturnCode(418)"}}, + }, + input_request: appsec.ParsedRequest{ + RemoteAddr: "1.2.3.4", + Method: "GET", + URI: "/urllll", + Args: url.Values{"foo": []string{"bla"}}, + }, + output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { + require.Empty(t, events) + require.Equal(t, http.StatusOK, statusCode) + require.Equal(t, appsec.AllowRemediation, appsecResponse.Action) + }, + }, + { + name: "no match but try to set remediation with pre_eval hook", + expected_load_ok: true, + inband_rules: []appsec_rule.CustomRule{ + { + Name: "rule42", + Zones: []string{"ARGS"}, + Variables: []string{"foo"}, + Match: appsec_rule.Match{Type: "regex", Value: "^toto"}, + Transform: []string{"lowercase"}, + }, + }, + pre_eval: []appsec.Hook{ + {Filter: "IsInBand == true", Apply: []string{"SetRemediationByName('rule42', 'captcha')"}}, + }, + input_request: appsec.ParsedRequest{ + RemoteAddr: "1.2.3.4", + Method: "GET", + URI: "/urllll", + Args: url.Values{"foo": []string{"bla"}}, + }, + output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { + require.Empty(t, events) + require.Equal(t, http.StatusOK, statusCode) + require.Equal(t, appsec.AllowRemediation, appsecResponse.Action) + }, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + loadAppSecEngine(test, t) + }) + } +} + +func TestAppsecRuleTransforms(t *testing.T) { + + log.SetLevel(log.TraceLevel) + tests := []appsecRuleTest{ + { + name: "Basic matching rule", + expected_load_ok: true, + inband_rules: []appsec_rule.CustomRule{ + { + Name: "rule1", + Zones: []string{"URI"}, + Match: appsec_rule.Match{Type: "equals", Value: "/toto"}, + }, + }, + input_request: appsec.ParsedRequest{ + RemoteAddr: "1.2.3.4", + Method: "GET", + URI: "/toto", + }, + output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { + require.Len(t, events, 2) + require.Equal(t, types.APPSEC, events[0].Type) + require.Equal(t, types.LOG, events[1].Type) + require.Equal(t, "rule1", events[1].Appsec.MatchedRules[0]["msg"]) + }, + }, + { + name: "lowercase", + expected_load_ok: true, + inband_rules: []appsec_rule.CustomRule{ + { + Name: "rule1", + Zones: []string{"URI"}, + Match: appsec_rule.Match{Type: "equals", Value: "/toto"}, + Transform: []string{"lowercase"}, + }, + }, + input_request: appsec.ParsedRequest{ + RemoteAddr: "1.2.3.4", + Method: "GET", + URI: "/TOTO", + }, + output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { + require.Len(t, events, 2) + require.Equal(t, types.APPSEC, events[0].Type) + require.Equal(t, types.LOG, events[1].Type) + require.Equal(t, "rule1", events[1].Appsec.MatchedRules[0]["msg"]) + }, + }, + { + name: "uppercase", + expected_load_ok: true, + inband_rules: []appsec_rule.CustomRule{ + { + Name: "rule1", + Zones: []string{"URI"}, + Match: appsec_rule.Match{Type: "equals", Value: "/TOTO"}, + Transform: []string{"uppercase"}, + }, + }, + input_request: appsec.ParsedRequest{ + RemoteAddr: "1.2.3.4", + Method: "GET", + URI: "/toto", + }, + output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { + require.Len(t, events, 2) + require.Equal(t, types.APPSEC, events[0].Type) + require.Equal(t, types.LOG, events[1].Type) + require.Equal(t, "rule1", events[1].Appsec.MatchedRules[0]["msg"]) + }, + }, + { + name: "b64decode", + expected_load_ok: true, + inband_rules: []appsec_rule.CustomRule{ + { + Name: "rule1", + Zones: []string{"ARGS"}, + Variables: []string{"foo"}, + Match: appsec_rule.Match{Type: "equals", Value: "toto"}, + Transform: []string{"b64decode"}, + }, + }, + input_request: appsec.ParsedRequest{ + RemoteAddr: "1.2.3.4", + Method: "GET", + URI: "/?foo=dG90bw", + }, + output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { + require.Len(t, events, 2) + require.Equal(t, types.APPSEC, events[0].Type) + require.Equal(t, types.LOG, events[1].Type) + require.Equal(t, "rule1", events[1].Appsec.MatchedRules[0]["msg"]) + }, + }, + { + name: "b64decode with extra padding", + expected_load_ok: true, + inband_rules: []appsec_rule.CustomRule{ + { + Name: "rule1", + Zones: []string{"ARGS"}, + Variables: []string{"foo"}, + Match: appsec_rule.Match{Type: "equals", Value: "toto"}, + Transform: []string{"b64decode"}, + }, + }, + input_request: appsec.ParsedRequest{ + RemoteAddr: "1.2.3.4", + Method: "GET", + URI: "/?foo=dG90bw===", + }, + output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { + require.Len(t, events, 2) + require.Equal(t, types.APPSEC, events[0].Type) + require.Equal(t, types.LOG, events[1].Type) + require.Equal(t, "rule1", events[1].Appsec.MatchedRules[0]["msg"]) + }, + }, + { + name: "length", + expected_load_ok: true, + inband_rules: []appsec_rule.CustomRule{ + { + Name: "rule1", + Zones: []string{"ARGS"}, + Variables: []string{"foo"}, + Match: appsec_rule.Match{Type: "gte", Value: "3"}, + Transform: []string{"length"}, + }, + }, + input_request: appsec.ParsedRequest{ + RemoteAddr: "1.2.3.4", + Method: "GET", + URI: "/?foo=toto", + }, + output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { + require.Len(t, events, 2) + require.Equal(t, types.APPSEC, events[0].Type) + require.Equal(t, types.LOG, events[1].Type) + require.Equal(t, "rule1", events[1].Appsec.MatchedRules[0]["msg"]) + }, + }, + { + name: "urldecode", + expected_load_ok: true, + inband_rules: []appsec_rule.CustomRule{ + { + Name: "rule1", + Zones: []string{"ARGS"}, + Variables: []string{"foo"}, + Match: appsec_rule.Match{Type: "equals", Value: "BB/A"}, + Transform: []string{"urldecode"}, + }, + }, + input_request: appsec.ParsedRequest{ + RemoteAddr: "1.2.3.4", + Method: "GET", + URI: "/?foo=%42%42%2F%41", + }, + output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { + require.Len(t, events, 2) + require.Equal(t, types.APPSEC, events[0].Type) + require.Equal(t, types.LOG, events[1].Type) + require.Equal(t, "rule1", events[1].Appsec.MatchedRules[0]["msg"]) + }, + }, + { + name: "trim", + expected_load_ok: true, + inband_rules: []appsec_rule.CustomRule{ + { + Name: "rule1", + Zones: []string{"ARGS"}, + Variables: []string{"foo"}, + Match: appsec_rule.Match{Type: "equals", Value: "BB/A"}, + Transform: []string{"urldecode", "trim"}, + }, + }, + input_request: appsec.ParsedRequest{ + RemoteAddr: "1.2.3.4", + Method: "GET", + URI: "/?foo=%20%20%42%42%2F%41%20%20", + }, + output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { + require.Len(t, events, 2) + require.Equal(t, types.APPSEC, events[0].Type) + require.Equal(t, types.LOG, events[1].Type) + require.Equal(t, "rule1", events[1].Appsec.MatchedRules[0]["msg"]) + }, + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + loadAppSecEngine(test, t) + }) + } +} + +func TestAppsecRuleZones(t *testing.T) { + + log.SetLevel(log.TraceLevel) + tests := []appsecRuleTest{ + { + name: "rule: ARGS", + expected_load_ok: true, + inband_rules: []appsec_rule.CustomRule{ + { + Name: "rule1", + Zones: []string{"ARGS"}, + Match: appsec_rule.Match{Type: "equals", Value: "toto"}, + }, + { + Name: "rule2", + Zones: []string{"ARGS"}, + Match: appsec_rule.Match{Type: "equals", Value: "foobar"}, + }, + }, + input_request: appsec.ParsedRequest{ + RemoteAddr: "1.2.3.4", + Method: "GET", + URI: "/foobar?something=toto&foobar=smth", + }, + output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { + require.Len(t, events, 2) + require.Equal(t, types.APPSEC, events[0].Type) + require.Equal(t, types.LOG, events[1].Type) + require.Equal(t, "rule1", events[1].Appsec.MatchedRules[0]["msg"]) + }, + }, + { + name: "rule: ARGS_NAMES", + expected_load_ok: true, + inband_rules: []appsec_rule.CustomRule{ + { + Name: "rule1", + Zones: []string{"ARGS_NAMES"}, + Match: appsec_rule.Match{Type: "equals", Value: "toto"}, + }, + { + Name: "rule2", + Zones: []string{"ARGS_NAMES"}, + Match: appsec_rule.Match{Type: "equals", Value: "foobar"}, + }, + }, + input_request: appsec.ParsedRequest{ + RemoteAddr: "1.2.3.4", + Method: "GET", + URI: "/foobar?something=toto&foobar=smth", + }, + output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { + require.Len(t, events, 2) + require.Equal(t, types.APPSEC, events[0].Type) + require.Equal(t, types.LOG, events[1].Type) + require.Equal(t, "rule2", events[1].Appsec.MatchedRules[0]["msg"]) + }, + }, + { + name: "rule: BODY_ARGS", + expected_load_ok: true, + inband_rules: []appsec_rule.CustomRule{ + { + Name: "rule1", + Zones: []string{"BODY_ARGS"}, + Match: appsec_rule.Match{Type: "equals", Value: "toto"}, + }, + { + Name: "rule2", + Zones: []string{"BODY_ARGS"}, + Match: appsec_rule.Match{Type: "equals", Value: "foobar"}, + }, + }, + input_request: appsec.ParsedRequest{ + RemoteAddr: "1.2.3.4", + Method: "GET", + URI: "/", + Body: []byte("smth=toto&foobar=other"), + Headers: http.Header{"Content-Type": []string{"application/x-www-form-urlencoded"}}, + }, + output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { + require.Len(t, events, 2) + require.Equal(t, types.APPSEC, events[0].Type) + require.Equal(t, types.LOG, events[1].Type) + require.Equal(t, "rule1", events[1].Appsec.MatchedRules[0]["msg"]) + }, + }, + { + name: "rule: BODY_ARGS_NAMES", + expected_load_ok: true, + inband_rules: []appsec_rule.CustomRule{ + { + Name: "rule1", + Zones: []string{"BODY_ARGS_NAMES"}, + Match: appsec_rule.Match{Type: "equals", Value: "toto"}, + }, + { + Name: "rule2", + Zones: []string{"BODY_ARGS_NAMES"}, + Match: appsec_rule.Match{Type: "equals", Value: "foobar"}, + }, + }, + input_request: appsec.ParsedRequest{ + RemoteAddr: "1.2.3.4", + Method: "GET", + URI: "/", + Body: []byte("smth=toto&foobar=other"), + Headers: http.Header{"Content-Type": []string{"application/x-www-form-urlencoded"}}, + }, + output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { + require.Len(t, events, 2) + require.Equal(t, types.APPSEC, events[0].Type) + require.Equal(t, types.LOG, events[1].Type) + require.Equal(t, "rule2", events[1].Appsec.MatchedRules[0]["msg"]) + }, + }, + { + name: "rule: HEADERS", + expected_load_ok: true, + inband_rules: []appsec_rule.CustomRule{ + { + Name: "rule1", + Zones: []string{"HEADERS"}, + Match: appsec_rule.Match{Type: "equals", Value: "toto"}, + }, + { + Name: "rule2", + Zones: []string{"HEADERS"}, + Match: appsec_rule.Match{Type: "equals", Value: "foobar"}, + }, + }, + input_request: appsec.ParsedRequest{ + RemoteAddr: "1.2.3.4", + Method: "GET", + URI: "/", + Headers: http.Header{"foobar": []string{"toto"}}, + }, + output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { + require.Len(t, events, 2) + require.Equal(t, types.APPSEC, events[0].Type) + require.Equal(t, types.LOG, events[1].Type) + require.Equal(t, "rule1", events[1].Appsec.MatchedRules[0]["msg"]) + }, + }, + { + name: "rule: HEADERS_NAMES", + expected_load_ok: true, + inband_rules: []appsec_rule.CustomRule{ + { + Name: "rule1", + Zones: []string{"HEADERS_NAMES"}, + Match: appsec_rule.Match{Type: "equals", Value: "toto"}, + }, + { + Name: "rule2", + Zones: []string{"HEADERS_NAMES"}, + Match: appsec_rule.Match{Type: "equals", Value: "foobar"}, + }, + }, + input_request: appsec.ParsedRequest{ + RemoteAddr: "1.2.3.4", + Method: "GET", + URI: "/", + Headers: http.Header{"foobar": []string{"toto"}}, + }, + output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { + require.Len(t, events, 2) + require.Equal(t, types.APPSEC, events[0].Type) + require.Equal(t, types.LOG, events[1].Type) + require.Equal(t, "rule2", events[1].Appsec.MatchedRules[0]["msg"]) + }, + }, + { + name: "rule: METHOD", + expected_load_ok: true, + inband_rules: []appsec_rule.CustomRule{ + { + Name: "rule1", + Zones: []string{"METHOD"}, + Match: appsec_rule.Match{Type: "equals", Value: "GET"}, + }, + }, + input_request: appsec.ParsedRequest{ + RemoteAddr: "1.2.3.4", + Method: "GET", + URI: "/", + }, + output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { + require.Len(t, events, 2) + require.Equal(t, types.APPSEC, events[0].Type) + require.Equal(t, types.LOG, events[1].Type) + require.Equal(t, "rule1", events[1].Appsec.MatchedRules[0]["msg"]) + }, + }, + { + name: "rule: PROTOCOL", + expected_load_ok: true, + inband_rules: []appsec_rule.CustomRule{ + { + Name: "rule1", + Zones: []string{"PROTOCOL"}, + Match: appsec_rule.Match{Type: "contains", Value: "3.1"}, + }, + }, + input_request: appsec.ParsedRequest{ + RemoteAddr: "1.2.3.4", + Method: "GET", + URI: "/", + Proto: "HTTP/3.1", + }, + output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { + require.Len(t, events, 2) + require.Equal(t, types.APPSEC, events[0].Type) + require.Equal(t, types.LOG, events[1].Type) + require.Equal(t, "rule1", events[1].Appsec.MatchedRules[0]["msg"]) + }, + }, + { + name: "rule: URI", + expected_load_ok: true, + inband_rules: []appsec_rule.CustomRule{ + { + Name: "rule1", + Zones: []string{"URI"}, + Match: appsec_rule.Match{Type: "equals", Value: "/foobar"}, + }, + }, + input_request: appsec.ParsedRequest{ + RemoteAddr: "1.2.3.4", + Method: "GET", + URI: "/foobar", + }, + output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { + require.Len(t, events, 2) + require.Equal(t, types.APPSEC, events[0].Type) + require.Equal(t, types.LOG, events[1].Type) + require.Equal(t, "rule1", events[1].Appsec.MatchedRules[0]["msg"]) + }, + }, + { + name: "rule: URI_FULL", + expected_load_ok: true, + inband_rules: []appsec_rule.CustomRule{ + { + Name: "rule1", + Zones: []string{"URI_FULL"}, + Match: appsec_rule.Match{Type: "equals", Value: "/foobar?a=b"}, + }, + }, + input_request: appsec.ParsedRequest{ + RemoteAddr: "1.2.3.4", + Method: "GET", + URI: "/foobar?a=b", + }, + output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { + require.Len(t, events, 2) + require.Equal(t, types.APPSEC, events[0].Type) + require.Equal(t, types.LOG, events[1].Type) + require.Equal(t, "rule1", events[1].Appsec.MatchedRules[0]["msg"]) + }, + }, + { + name: "rule: RAW_BODY", + expected_load_ok: true, + inband_rules: []appsec_rule.CustomRule{ + { + Name: "rule1", + Zones: []string{"RAW_BODY"}, + Match: appsec_rule.Match{Type: "equals", Value: "foobar=42421"}, + }, + }, + input_request: appsec.ParsedRequest{ + RemoteAddr: "1.2.3.4", + Method: "GET", + URI: "/", + Body: []byte("foobar=42421"), + Headers: http.Header{"Content-Type": []string{"application/x-www-form-urlencoded"}}, + }, + output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { + require.Len(t, events, 2) + require.Equal(t, types.APPSEC, events[0].Type) + require.Equal(t, types.LOG, events[1].Type) + require.Equal(t, "rule1", events[1].Appsec.MatchedRules[0]["msg"]) + }, + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + loadAppSecEngine(test, t) + }) + } +} diff --git a/pkg/acquisition/modules/appsec/appsec_test.go b/pkg/acquisition/modules/appsec/appsec_test.go index d98215bf2c7..5fe4cfe236c 100644 --- a/pkg/acquisition/modules/appsec/appsec_test.go +++ b/pkg/acquisition/modules/appsec/appsec_test.go @@ -1,8 +1,6 @@ package appsecacquisition import ( - "net/http" - "net/url" "testing" "time" @@ -12,15 +10,8 @@ import ( "github.com/davecgh/go-spew/spew" "github.com/google/uuid" log "github.com/sirupsen/logrus" - "github.com/stretchr/testify/require" ) -/* -Missing tests (wip): - - GenerateResponse - - evt.Appsec and it's subobjects and methods -*/ - type appsecRuleTest struct { name string expected_load_ok bool @@ -39,1451 +30,6 @@ type appsecRuleTest struct { output_asserts func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) } -func TestAppsecOnMatchHooks(t *testing.T) { - tests := []appsecRuleTest{ - { - name: "no rule : check return code", - expected_load_ok: true, - inband_rules: []appsec_rule.CustomRule{ - { - Name: "rule1", - Zones: []string{"ARGS"}, - Variables: []string{"foo"}, - Match: appsec_rule.Match{Type: "regex", Value: "^toto"}, - Transform: []string{"lowercase"}, - }, - }, - input_request: appsec.ParsedRequest{ - RemoteAddr: "1.2.3.4", - Method: "GET", - URI: "/urllll", - Args: url.Values{"foo": []string{"toto"}}, - }, - output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { - require.Len(t, events, 2) - require.Equal(t, types.APPSEC, events[0].Type) - require.Equal(t, types.LOG, events[1].Type) - require.Len(t, responses, 1) - require.Equal(t, 403, responses[0].BouncerHTTPResponseCode) - require.Equal(t, 403, responses[0].UserHTTPResponseCode) - require.Equal(t, appsec.BanRemediation, responses[0].Action) - - }, - }, - { - name: "on_match: change return code", - expected_load_ok: true, - inband_rules: []appsec_rule.CustomRule{ - { - Name: "rule1", - Zones: []string{"ARGS"}, - Variables: []string{"foo"}, - Match: appsec_rule.Match{Type: "regex", Value: "^toto"}, - Transform: []string{"lowercase"}, - }, - }, - on_match: []appsec.Hook{ - {Filter: "IsInBand == true", Apply: []string{"SetReturnCode(413)"}}, - }, - input_request: appsec.ParsedRequest{ - RemoteAddr: "1.2.3.4", - Method: "GET", - URI: "/urllll", - Args: url.Values{"foo": []string{"toto"}}, - }, - output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { - require.Len(t, events, 2) - require.Equal(t, types.APPSEC, events[0].Type) - require.Equal(t, types.LOG, events[1].Type) - require.Len(t, responses, 1) - require.Equal(t, 403, responses[0].BouncerHTTPResponseCode) - require.Equal(t, 413, responses[0].UserHTTPResponseCode) - require.Equal(t, appsec.BanRemediation, responses[0].Action) - }, - }, - { - name: "on_match: change action to a non standard one (log)", - expected_load_ok: true, - inband_rules: []appsec_rule.CustomRule{ - { - Name: "rule1", - Zones: []string{"ARGS"}, - Variables: []string{"foo"}, - Match: appsec_rule.Match{Type: "regex", Value: "^toto"}, - Transform: []string{"lowercase"}, - }, - }, - on_match: []appsec.Hook{ - {Filter: "IsInBand == true", Apply: []string{"SetRemediation('log')"}}, - }, - input_request: appsec.ParsedRequest{ - RemoteAddr: "1.2.3.4", - Method: "GET", - URI: "/urllll", - Args: url.Values{"foo": []string{"toto"}}, - }, - output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { - require.Len(t, events, 2) - require.Equal(t, types.APPSEC, events[0].Type) - require.Equal(t, types.LOG, events[1].Type) - require.Len(t, responses, 1) - require.Equal(t, "log", responses[0].Action) - }, - }, - { - name: "on_match: change action to another standard one (allow)", - expected_load_ok: true, - inband_rules: []appsec_rule.CustomRule{ - { - Name: "rule1", - Zones: []string{"ARGS"}, - Variables: []string{"foo"}, - Match: appsec_rule.Match{Type: "regex", Value: "^toto"}, - Transform: []string{"lowercase"}, - }, - }, - on_match: []appsec.Hook{ - {Filter: "IsInBand == true", Apply: []string{"SetRemediation('allow')"}}, - }, - input_request: appsec.ParsedRequest{ - RemoteAddr: "1.2.3.4", - Method: "GET", - URI: "/urllll", - Args: url.Values{"foo": []string{"toto"}}, - }, - output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { - require.Len(t, events, 2) - require.Equal(t, types.APPSEC, events[0].Type) - require.Equal(t, types.LOG, events[1].Type) - require.Len(t, responses, 1) - require.Equal(t, appsec.AllowRemediation, responses[0].Action) - }, - }, - { - name: "on_match: change action to another standard one (ban)", - expected_load_ok: true, - inband_rules: []appsec_rule.CustomRule{ - { - Name: "rule1", - Zones: []string{"ARGS"}, - Variables: []string{"foo"}, - Match: appsec_rule.Match{Type: "regex", Value: "^toto"}, - Transform: []string{"lowercase"}, - }, - }, - on_match: []appsec.Hook{ - {Filter: "IsInBand == true", Apply: []string{"SetRemediation('ban')"}}, - }, - input_request: appsec.ParsedRequest{ - RemoteAddr: "1.2.3.4", - Method: "GET", - URI: "/urllll", - Args: url.Values{"foo": []string{"toto"}}, - }, - output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { - require.Len(t, responses, 1) - //note: SetAction normalizes deny, ban and block to ban - require.Equal(t, appsec.BanRemediation, responses[0].Action) - }, - }, - { - name: "on_match: change action to another standard one (captcha)", - expected_load_ok: true, - inband_rules: []appsec_rule.CustomRule{ - { - Name: "rule1", - Zones: []string{"ARGS"}, - Variables: []string{"foo"}, - Match: appsec_rule.Match{Type: "regex", Value: "^toto"}, - Transform: []string{"lowercase"}, - }, - }, - on_match: []appsec.Hook{ - {Filter: "IsInBand == true", Apply: []string{"SetRemediation('captcha')"}}, - }, - input_request: appsec.ParsedRequest{ - RemoteAddr: "1.2.3.4", - Method: "GET", - URI: "/urllll", - Args: url.Values{"foo": []string{"toto"}}, - }, - output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { - require.Len(t, responses, 1) - //note: SetAction normalizes deny, ban and block to ban - require.Equal(t, appsec.CaptchaRemediation, responses[0].Action) - }, - }, - { - name: "on_match: change action to a non standard one", - expected_load_ok: true, - inband_rules: []appsec_rule.CustomRule{ - { - Name: "rule1", - Zones: []string{"ARGS"}, - Variables: []string{"foo"}, - Match: appsec_rule.Match{Type: "regex", Value: "^toto"}, - Transform: []string{"lowercase"}, - }, - }, - on_match: []appsec.Hook{ - {Filter: "IsInBand == true", Apply: []string{"SetRemediation('foobar')"}}, - }, - input_request: appsec.ParsedRequest{ - RemoteAddr: "1.2.3.4", - Method: "GET", - URI: "/urllll", - Args: url.Values{"foo": []string{"toto"}}, - }, - output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { - require.Len(t, events, 2) - require.Equal(t, types.APPSEC, events[0].Type) - require.Equal(t, types.LOG, events[1].Type) - require.Len(t, responses, 1) - require.Equal(t, "foobar", responses[0].Action) - }, - }, - { - name: "on_match: cancel alert", - expected_load_ok: true, - inband_rules: []appsec_rule.CustomRule{ - { - Name: "rule42", - Zones: []string{"ARGS"}, - Variables: []string{"foo"}, - Match: appsec_rule.Match{Type: "regex", Value: "^toto"}, - Transform: []string{"lowercase"}, - }, - }, - on_match: []appsec.Hook{ - {Filter: "IsInBand == true && LogInfo('XX -> %s', evt.Appsec.MatchedRules.GetName())", Apply: []string{"CancelAlert()"}}, - }, - input_request: appsec.ParsedRequest{ - RemoteAddr: "1.2.3.4", - Method: "GET", - URI: "/urllll", - Args: url.Values{"foo": []string{"toto"}}, - }, - output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { - require.Len(t, events, 1) - require.Equal(t, types.LOG, events[0].Type) - require.Len(t, responses, 1) - require.Equal(t, appsec.BanRemediation, responses[0].Action) - }, - }, - { - name: "on_match: cancel event", - expected_load_ok: true, - inband_rules: []appsec_rule.CustomRule{ - { - Name: "rule42", - Zones: []string{"ARGS"}, - Variables: []string{"foo"}, - Match: appsec_rule.Match{Type: "regex", Value: "^toto"}, - Transform: []string{"lowercase"}, - }, - }, - on_match: []appsec.Hook{ - {Filter: "IsInBand == true", Apply: []string{"CancelEvent()"}}, - }, - input_request: appsec.ParsedRequest{ - RemoteAddr: "1.2.3.4", - Method: "GET", - URI: "/urllll", - Args: url.Values{"foo": []string{"toto"}}, - }, - output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { - require.Len(t, events, 1) - require.Equal(t, types.APPSEC, events[0].Type) - require.Len(t, responses, 1) - require.Equal(t, appsec.BanRemediation, responses[0].Action) - }, - }, - } - for _, test := range tests { - t.Run(test.name, func(t *testing.T) { - loadAppSecEngine(test, t) - }) - } -} - -func TestAppsecPreEvalHooks(t *testing.T) { - /* - [x] basic working hook - [x] basic failing hook - [ ] test the "OnSuccess" feature - [ ] test multiple competing hooks - [ ] test the variety of helpers - */ - tests := []appsecRuleTest{ - { - name: "Basic on_load hook to disable inband rule", - expected_load_ok: true, - inband_rules: []appsec_rule.CustomRule{ - { - Name: "rule1", - Zones: []string{"ARGS"}, - Variables: []string{"foo"}, - Match: appsec_rule.Match{Type: "regex", Value: "^toto"}, - Transform: []string{"lowercase"}, - }, - }, - pre_eval: []appsec.Hook{ - {Filter: "1 == 1", Apply: []string{"RemoveInBandRuleByName('rule1')"}}, - }, - input_request: appsec.ParsedRequest{ - RemoteAddr: "1.2.3.4", - Method: "GET", - URI: "/urllll", - Args: url.Values{"foo": []string{"toto"}}, - }, - output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { - require.Empty(t, events) - require.Len(t, responses, 1) - require.False(t, responses[0].InBandInterrupt) - require.False(t, responses[0].OutOfBandInterrupt) - }, - }, - { - name: "Basic on_load fails to disable rule", - expected_load_ok: true, - inband_rules: []appsec_rule.CustomRule{ - { - Name: "rule1", - Zones: []string{"ARGS"}, - Variables: []string{"foo"}, - Match: appsec_rule.Match{Type: "regex", Value: "^toto"}, - Transform: []string{"lowercase"}, - }, - }, - pre_eval: []appsec.Hook{ - {Filter: "1 ==2", Apply: []string{"RemoveInBandRuleByName('rule1')"}}, - }, - input_request: appsec.ParsedRequest{ - RemoteAddr: "1.2.3.4", - Method: "GET", - URI: "/urllll", - Args: url.Values{"foo": []string{"toto"}}, - }, - output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { - require.Len(t, events, 2) - require.Equal(t, types.APPSEC, events[0].Type) - - require.Equal(t, types.LOG, events[1].Type) - require.True(t, events[1].Appsec.HasInBandMatches) - require.Len(t, events[1].Appsec.MatchedRules, 1) - require.Equal(t, "rule1", events[1].Appsec.MatchedRules[0]["msg"]) - - require.Len(t, responses, 1) - require.True(t, responses[0].InBandInterrupt) - - }, - }, - { - name: "on_load : disable inband by tag", - expected_load_ok: true, - inband_rules: []appsec_rule.CustomRule{ - { - Name: "rulez", - Zones: []string{"ARGS"}, - Variables: []string{"foo"}, - Match: appsec_rule.Match{Type: "regex", Value: "^toto"}, - Transform: []string{"lowercase"}, - }, - }, - pre_eval: []appsec.Hook{ - {Apply: []string{"RemoveInBandRuleByTag('crowdsec-rulez')"}}, - }, - input_request: appsec.ParsedRequest{ - RemoteAddr: "1.2.3.4", - Method: "GET", - URI: "/urllll", - Args: url.Values{"foo": []string{"toto"}}, - }, - output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { - require.Empty(t, events) - require.Len(t, responses, 1) - require.False(t, responses[0].InBandInterrupt) - require.False(t, responses[0].OutOfBandInterrupt) - }, - }, - { - name: "on_load : disable inband by ID", - expected_load_ok: true, - inband_rules: []appsec_rule.CustomRule{ - { - Name: "rulez", - Zones: []string{"ARGS"}, - Variables: []string{"foo"}, - Match: appsec_rule.Match{Type: "regex", Value: "^toto"}, - Transform: []string{"lowercase"}, - }, - }, - pre_eval: []appsec.Hook{ - {Apply: []string{"RemoveInBandRuleByID(1516470898)"}}, //rule ID is generated at runtime. If you change rule, it will break the test (: - }, - input_request: appsec.ParsedRequest{ - RemoteAddr: "1.2.3.4", - Method: "GET", - URI: "/urllll", - Args: url.Values{"foo": []string{"toto"}}, - }, - output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { - require.Empty(t, events) - require.Len(t, responses, 1) - require.False(t, responses[0].InBandInterrupt) - require.False(t, responses[0].OutOfBandInterrupt) - }, - }, - { - name: "on_load : disable inband by name", - expected_load_ok: true, - inband_rules: []appsec_rule.CustomRule{ - { - Name: "rulez", - Zones: []string{"ARGS"}, - Variables: []string{"foo"}, - Match: appsec_rule.Match{Type: "regex", Value: "^toto"}, - Transform: []string{"lowercase"}, - }, - }, - pre_eval: []appsec.Hook{ - {Apply: []string{"RemoveInBandRuleByName('rulez')"}}, - }, - input_request: appsec.ParsedRequest{ - RemoteAddr: "1.2.3.4", - Method: "GET", - URI: "/urllll", - Args: url.Values{"foo": []string{"toto"}}, - }, - output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { - require.Empty(t, events) - require.Len(t, responses, 1) - require.False(t, responses[0].InBandInterrupt) - require.False(t, responses[0].OutOfBandInterrupt) - }, - }, - { - name: "on_load : outofband default behavior", - expected_load_ok: true, - outofband_rules: []appsec_rule.CustomRule{ - { - Name: "rulez", - Zones: []string{"ARGS"}, - Variables: []string{"foo"}, - Match: appsec_rule.Match{Type: "regex", Value: "^toto"}, - Transform: []string{"lowercase"}, - }, - }, - input_request: appsec.ParsedRequest{ - RemoteAddr: "1.2.3.4", - Method: "GET", - URI: "/urllll", - Args: url.Values{"foo": []string{"toto"}}, - }, - output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { - require.Len(t, events, 1) - require.Equal(t, types.LOG, events[0].Type) - require.True(t, events[0].Appsec.HasOutBandMatches) - require.False(t, events[0].Appsec.HasInBandMatches) - require.Len(t, events[0].Appsec.MatchedRules, 1) - require.Equal(t, "rulez", events[0].Appsec.MatchedRules[0]["msg"]) - //maybe surprising, but response won't mention OOB event, as it's sent as soon as the inband phase is over. - require.Len(t, responses, 1) - require.False(t, responses[0].InBandInterrupt) - require.False(t, responses[0].OutOfBandInterrupt) - }, - }, - { - name: "on_load : set remediation by tag", - expected_load_ok: true, - inband_rules: []appsec_rule.CustomRule{ - { - Name: "rulez", - Zones: []string{"ARGS"}, - Variables: []string{"foo"}, - Match: appsec_rule.Match{Type: "regex", Value: "^toto"}, - Transform: []string{"lowercase"}, - }, - }, - pre_eval: []appsec.Hook{ - {Apply: []string{"SetRemediationByTag('crowdsec-rulez', 'foobar')"}}, //rule ID is generated at runtime. If you change rule, it will break the test (: - }, - input_request: appsec.ParsedRequest{ - RemoteAddr: "1.2.3.4", - Method: "GET", - URI: "/urllll", - Args: url.Values{"foo": []string{"toto"}}, - }, - output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { - require.Len(t, events, 2) - require.Len(t, responses, 1) - require.Equal(t, "foobar", responses[0].Action) - }, - }, - { - name: "on_load : set remediation by name", - expected_load_ok: true, - inband_rules: []appsec_rule.CustomRule{ - { - Name: "rulez", - Zones: []string{"ARGS"}, - Variables: []string{"foo"}, - Match: appsec_rule.Match{Type: "regex", Value: "^toto"}, - Transform: []string{"lowercase"}, - }, - }, - pre_eval: []appsec.Hook{ - {Apply: []string{"SetRemediationByName('rulez', 'foobar')"}}, //rule ID is generated at runtime. If you change rule, it will break the test (: - }, - input_request: appsec.ParsedRequest{ - RemoteAddr: "1.2.3.4", - Method: "GET", - URI: "/urllll", - Args: url.Values{"foo": []string{"toto"}}, - }, - output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { - require.Len(t, events, 2) - require.Len(t, responses, 1) - require.Equal(t, "foobar", responses[0].Action) - }, - }, - { - name: "on_load : set remediation by ID", - expected_load_ok: true, - inband_rules: []appsec_rule.CustomRule{ - { - Name: "rulez", - Zones: []string{"ARGS"}, - Variables: []string{"foo"}, - Match: appsec_rule.Match{Type: "regex", Value: "^toto"}, - Transform: []string{"lowercase"}, - }, - }, - pre_eval: []appsec.Hook{ - {Apply: []string{"SetRemediationByID(1516470898, 'foobar')"}}, //rule ID is generated at runtime. If you change rule, it will break the test (: - }, - input_request: appsec.ParsedRequest{ - RemoteAddr: "1.2.3.4", - Method: "GET", - URI: "/urllll", - Args: url.Values{"foo": []string{"toto"}}, - }, - output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { - require.Len(t, events, 2) - require.Len(t, responses, 1) - require.Equal(t, "foobar", responses[0].Action) - require.Equal(t, "foobar", appsecResponse.Action) - require.Equal(t, http.StatusForbidden, appsecResponse.HTTPStatus) - }, - }, - } - - for _, test := range tests { - t.Run(test.name, func(t *testing.T) { - loadAppSecEngine(test, t) - }) - } -} - -func TestAppsecRemediationConfigHooks(t *testing.T) { - - tests := []appsecRuleTest{ - { - name: "Basic matching rule", - expected_load_ok: true, - inband_rules: []appsec_rule.CustomRule{ - { - Name: "rule1", - Zones: []string{"ARGS"}, - Variables: []string{"foo"}, - Match: appsec_rule.Match{Type: "regex", Value: "^toto"}, - Transform: []string{"lowercase"}, - }, - }, - input_request: appsec.ParsedRequest{ - RemoteAddr: "1.2.3.4", - Method: "GET", - URI: "/urllll", - Args: url.Values{"foo": []string{"toto"}}, - }, - output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { - require.Equal(t, appsec.BanRemediation, responses[0].Action) - require.Equal(t, http.StatusForbidden, statusCode) - require.Equal(t, appsec.BanRemediation, appsecResponse.Action) - require.Equal(t, http.StatusForbidden, appsecResponse.HTTPStatus) - }, - }, - { - name: "SetRemediation", - expected_load_ok: true, - inband_rules: []appsec_rule.CustomRule{ - { - Name: "rule1", - Zones: []string{"ARGS"}, - Variables: []string{"foo"}, - Match: appsec_rule.Match{Type: "regex", Value: "^toto"}, - Transform: []string{"lowercase"}, - }, - }, - input_request: appsec.ParsedRequest{ - RemoteAddr: "1.2.3.4", - Method: "GET", - URI: "/urllll", - Args: url.Values{"foo": []string{"toto"}}, - }, - on_match: []appsec.Hook{{Apply: []string{"SetRemediation('captcha')"}}}, //rule ID is generated at runtime. If you change rule, it will break the test (: - - output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { - require.Equal(t, appsec.CaptchaRemediation, responses[0].Action) - require.Equal(t, http.StatusForbidden, statusCode) - require.Equal(t, appsec.CaptchaRemediation, appsecResponse.Action) - require.Equal(t, http.StatusForbidden, appsecResponse.HTTPStatus) - }, - }, - { - name: "SetRemediation", - expected_load_ok: true, - inband_rules: []appsec_rule.CustomRule{ - { - Name: "rule1", - Zones: []string{"ARGS"}, - Variables: []string{"foo"}, - Match: appsec_rule.Match{Type: "regex", Value: "^toto"}, - Transform: []string{"lowercase"}, - }, - }, - input_request: appsec.ParsedRequest{ - RemoteAddr: "1.2.3.4", - Method: "GET", - URI: "/urllll", - Args: url.Values{"foo": []string{"toto"}}, - }, - on_match: []appsec.Hook{{Apply: []string{"SetReturnCode(418)"}}}, //rule ID is generated at runtime. If you change rule, it will break the test (: - - output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { - require.Equal(t, appsec.BanRemediation, responses[0].Action) - require.Equal(t, http.StatusForbidden, statusCode) - require.Equal(t, appsec.BanRemediation, appsecResponse.Action) - require.Equal(t, http.StatusTeapot, appsecResponse.HTTPStatus) - }, - }, - } - - for _, test := range tests { - t.Run(test.name, func(t *testing.T) { - loadAppSecEngine(test, t) - }) - } -} -func TestOnMatchRemediationHooks(t *testing.T) { - tests := []appsecRuleTest{ - { - name: "set remediation to allow with on_match hook", - expected_load_ok: true, - inband_rules: []appsec_rule.CustomRule{ - { - Name: "rule42", - Zones: []string{"ARGS"}, - Variables: []string{"foo"}, - Match: appsec_rule.Match{Type: "regex", Value: "^toto"}, - Transform: []string{"lowercase"}, - }, - }, - input_request: appsec.ParsedRequest{ - RemoteAddr: "1.2.3.4", - Method: "GET", - URI: "/urllll", - Args: url.Values{"foo": []string{"toto"}}, - }, - on_match: []appsec.Hook{ - {Filter: "IsInBand == true", Apply: []string{"SetRemediation('allow')"}}, - }, - output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { - require.Equal(t, appsec.AllowRemediation, appsecResponse.Action) - require.Equal(t, http.StatusOK, appsecResponse.HTTPStatus) - }, - }, - { - name: "set remediation to captcha + custom user code with on_match hook", - expected_load_ok: true, - inband_rules: []appsec_rule.CustomRule{ - { - Name: "rule42", - Zones: []string{"ARGS"}, - Variables: []string{"foo"}, - Match: appsec_rule.Match{Type: "regex", Value: "^toto"}, - Transform: []string{"lowercase"}, - }, - }, - input_request: appsec.ParsedRequest{ - RemoteAddr: "1.2.3.4", - Method: "GET", - URI: "/urllll", - Args: url.Values{"foo": []string{"toto"}}, - }, - DefaultRemediation: appsec.AllowRemediation, - on_match: []appsec.Hook{ - {Filter: "IsInBand == true", Apply: []string{"SetRemediation('captcha')", "SetReturnCode(418)"}}, - }, - output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { - spew.Dump(responses) - spew.Dump(appsecResponse) - - log.Errorf("http status : %d", statusCode) - require.Equal(t, appsec.CaptchaRemediation, appsecResponse.Action) - require.Equal(t, http.StatusTeapot, appsecResponse.HTTPStatus) - require.Equal(t, http.StatusForbidden, statusCode) - }, - }, - } - for _, test := range tests { - t.Run(test.name, func(t *testing.T) { - loadAppSecEngine(test, t) - }) - } -} - -func TestAppsecDefaultPassRemediation(t *testing.T) { - - tests := []appsecRuleTest{ - { - name: "Basic non-matching rule", - expected_load_ok: true, - inband_rules: []appsec_rule.CustomRule{ - { - Name: "rule1", - Zones: []string{"ARGS"}, - Variables: []string{"foo"}, - Match: appsec_rule.Match{Type: "regex", Value: "^toto"}, - Transform: []string{"lowercase"}, - }, - }, - input_request: appsec.ParsedRequest{ - RemoteAddr: "1.2.3.4", - Method: "GET", - URI: "/", - Args: url.Values{"foo": []string{"tutu"}}, - }, - output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { - require.Equal(t, appsec.AllowRemediation, responses[0].Action) - require.Equal(t, http.StatusOK, statusCode) - require.Equal(t, appsec.AllowRemediation, appsecResponse.Action) - require.Equal(t, http.StatusOK, appsecResponse.HTTPStatus) - }, - }, - { - name: "DefaultPassAction: pass", - expected_load_ok: true, - inband_rules: []appsec_rule.CustomRule{ - { - Name: "rule1", - Zones: []string{"ARGS"}, - Variables: []string{"foo"}, - Match: appsec_rule.Match{Type: "regex", Value: "^toto"}, - Transform: []string{"lowercase"}, - }, - }, - input_request: appsec.ParsedRequest{ - RemoteAddr: "1.2.3.4", - Method: "GET", - URI: "/", - Args: url.Values{"foo": []string{"tutu"}}, - }, - DefaultPassAction: "allow", - output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { - require.Equal(t, appsec.AllowRemediation, responses[0].Action) - require.Equal(t, http.StatusOK, statusCode) - require.Equal(t, appsec.AllowRemediation, appsecResponse.Action) - require.Equal(t, http.StatusOK, appsecResponse.HTTPStatus) - }, - }, - { - name: "DefaultPassAction: captcha", - expected_load_ok: true, - inband_rules: []appsec_rule.CustomRule{ - { - Name: "rule1", - Zones: []string{"ARGS"}, - Variables: []string{"foo"}, - Match: appsec_rule.Match{Type: "regex", Value: "^toto"}, - Transform: []string{"lowercase"}, - }, - }, - input_request: appsec.ParsedRequest{ - RemoteAddr: "1.2.3.4", - Method: "GET", - URI: "/", - Args: url.Values{"foo": []string{"tutu"}}, - }, - DefaultPassAction: "captcha", - output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { - require.Equal(t, appsec.CaptchaRemediation, responses[0].Action) - require.Equal(t, http.StatusOK, statusCode) //@tko: body is captcha, but as it's 200, captcha won't be showed to user - require.Equal(t, appsec.CaptchaRemediation, appsecResponse.Action) - require.Equal(t, http.StatusOK, appsecResponse.HTTPStatus) - }, - }, - { - name: "DefaultPassHTTPCode: 200", - expected_load_ok: true, - inband_rules: []appsec_rule.CustomRule{ - { - Name: "rule1", - Zones: []string{"ARGS"}, - Variables: []string{"foo"}, - Match: appsec_rule.Match{Type: "regex", Value: "^toto"}, - Transform: []string{"lowercase"}, - }, - }, - input_request: appsec.ParsedRequest{ - RemoteAddr: "1.2.3.4", - Method: "GET", - URI: "/", - Args: url.Values{"foo": []string{"tutu"}}, - }, - UserPassedHTTPCode: 200, - output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { - require.Equal(t, appsec.AllowRemediation, responses[0].Action) - require.Equal(t, http.StatusOK, statusCode) - require.Equal(t, appsec.AllowRemediation, appsecResponse.Action) - require.Equal(t, http.StatusOK, appsecResponse.HTTPStatus) - }, - }, - { - name: "DefaultPassHTTPCode: 200", - expected_load_ok: true, - inband_rules: []appsec_rule.CustomRule{ - { - Name: "rule1", - Zones: []string{"ARGS"}, - Variables: []string{"foo"}, - Match: appsec_rule.Match{Type: "regex", Value: "^toto"}, - Transform: []string{"lowercase"}, - }, - }, - input_request: appsec.ParsedRequest{ - RemoteAddr: "1.2.3.4", - Method: "GET", - URI: "/", - Args: url.Values{"foo": []string{"tutu"}}, - }, - UserPassedHTTPCode: 418, - output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { - require.Equal(t, appsec.AllowRemediation, responses[0].Action) - require.Equal(t, http.StatusOK, statusCode) - require.Equal(t, appsec.AllowRemediation, appsecResponse.Action) - require.Equal(t, http.StatusTeapot, appsecResponse.HTTPStatus) - }, - }, - } - for _, test := range tests { - t.Run(test.name, func(t *testing.T) { - loadAppSecEngine(test, t) - }) - } -} - -func TestAppsecDefaultRemediation(t *testing.T) { - - tests := []appsecRuleTest{ - { - name: "Basic matching rule", - expected_load_ok: true, - inband_rules: []appsec_rule.CustomRule{ - { - Name: "rule1", - Zones: []string{"ARGS"}, - Variables: []string{"foo"}, - Match: appsec_rule.Match{Type: "regex", Value: "^toto"}, - Transform: []string{"lowercase"}, - }, - }, - input_request: appsec.ParsedRequest{ - RemoteAddr: "1.2.3.4", - Method: "GET", - URI: "/urllll", - Args: url.Values{"foo": []string{"toto"}}, - }, - output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { - require.Equal(t, appsec.BanRemediation, responses[0].Action) - require.Equal(t, http.StatusForbidden, statusCode) - require.Equal(t, appsec.BanRemediation, appsecResponse.Action) - require.Equal(t, http.StatusForbidden, appsecResponse.HTTPStatus) - }, - }, - { - name: "default remediation to ban (default)", - expected_load_ok: true, - inband_rules: []appsec_rule.CustomRule{ - { - Name: "rule42", - Zones: []string{"ARGS"}, - Variables: []string{"foo"}, - Match: appsec_rule.Match{Type: "regex", Value: "^toto"}, - Transform: []string{"lowercase"}, - }, - }, - input_request: appsec.ParsedRequest{ - RemoteAddr: "1.2.3.4", - Method: "GET", - URI: "/urllll", - Args: url.Values{"foo": []string{"toto"}}, - }, - DefaultRemediation: "ban", - output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { - require.Equal(t, appsec.BanRemediation, responses[0].Action) - require.Equal(t, http.StatusForbidden, statusCode) - require.Equal(t, appsec.BanRemediation, appsecResponse.Action) - require.Equal(t, http.StatusForbidden, appsecResponse.HTTPStatus) - }, - }, - { - name: "default remediation to allow", - expected_load_ok: true, - inband_rules: []appsec_rule.CustomRule{ - { - Name: "rule42", - Zones: []string{"ARGS"}, - Variables: []string{"foo"}, - Match: appsec_rule.Match{Type: "regex", Value: "^toto"}, - Transform: []string{"lowercase"}, - }, - }, - input_request: appsec.ParsedRequest{ - RemoteAddr: "1.2.3.4", - Method: "GET", - URI: "/urllll", - Args: url.Values{"foo": []string{"toto"}}, - }, - DefaultRemediation: "allow", - output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { - require.Equal(t, appsec.AllowRemediation, responses[0].Action) - require.Equal(t, http.StatusOK, statusCode) - require.Equal(t, appsec.AllowRemediation, appsecResponse.Action) - require.Equal(t, http.StatusOK, appsecResponse.HTTPStatus) - }, - }, - { - name: "default remediation to captcha", - expected_load_ok: true, - inband_rules: []appsec_rule.CustomRule{ - { - Name: "rule42", - Zones: []string{"ARGS"}, - Variables: []string{"foo"}, - Match: appsec_rule.Match{Type: "regex", Value: "^toto"}, - Transform: []string{"lowercase"}, - }, - }, - input_request: appsec.ParsedRequest{ - RemoteAddr: "1.2.3.4", - Method: "GET", - URI: "/urllll", - Args: url.Values{"foo": []string{"toto"}}, - }, - DefaultRemediation: "captcha", - output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { - require.Equal(t, appsec.CaptchaRemediation, responses[0].Action) - require.Equal(t, http.StatusForbidden, statusCode) - require.Equal(t, appsec.CaptchaRemediation, appsecResponse.Action) - require.Equal(t, http.StatusForbidden, appsecResponse.HTTPStatus) - }, - }, - { - name: "custom user HTTP code", - expected_load_ok: true, - inband_rules: []appsec_rule.CustomRule{ - { - Name: "rule42", - Zones: []string{"ARGS"}, - Variables: []string{"foo"}, - Match: appsec_rule.Match{Type: "regex", Value: "^toto"}, - Transform: []string{"lowercase"}, - }, - }, - input_request: appsec.ParsedRequest{ - RemoteAddr: "1.2.3.4", - Method: "GET", - URI: "/urllll", - Args: url.Values{"foo": []string{"toto"}}, - }, - UserBlockedHTTPCode: 418, - output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { - require.Equal(t, appsec.BanRemediation, responses[0].Action) - require.Equal(t, http.StatusForbidden, statusCode) - require.Equal(t, appsec.BanRemediation, appsecResponse.Action) - require.Equal(t, http.StatusTeapot, appsecResponse.HTTPStatus) - }, - }, - { - name: "custom remediation + HTTP code", - expected_load_ok: true, - inband_rules: []appsec_rule.CustomRule{ - { - Name: "rule42", - Zones: []string{"ARGS"}, - Variables: []string{"foo"}, - Match: appsec_rule.Match{Type: "regex", Value: "^toto"}, - Transform: []string{"lowercase"}, - }, - }, - input_request: appsec.ParsedRequest{ - RemoteAddr: "1.2.3.4", - Method: "GET", - URI: "/urllll", - Args: url.Values{"foo": []string{"toto"}}, - }, - UserBlockedHTTPCode: 418, - DefaultRemediation: "foobar", - output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { - require.Equal(t, "foobar", responses[0].Action) - require.Equal(t, http.StatusForbidden, statusCode) - require.Equal(t, "foobar", appsecResponse.Action) - require.Equal(t, http.StatusTeapot, appsecResponse.HTTPStatus) - }, - }, - } - - for _, test := range tests { - t.Run(test.name, func(t *testing.T) { - loadAppSecEngine(test, t) - }) - } -} - -func TestAppsecRuleMatches(t *testing.T) { - - /* - [x] basic matching rule - [x] basic non-matching rule - [ ] test the transformation - [ ] ? - */ - tests := []appsecRuleTest{ - { - name: "Basic matching rule", - expected_load_ok: true, - inband_rules: []appsec_rule.CustomRule{ - { - Name: "rule1", - Zones: []string{"ARGS"}, - Variables: []string{"foo"}, - Match: appsec_rule.Match{Type: "regex", Value: "^toto"}, - Transform: []string{"lowercase"}, - }, - }, - input_request: appsec.ParsedRequest{ - RemoteAddr: "1.2.3.4", - Method: "GET", - URI: "/urllll", - Args: url.Values{"foo": []string{"toto"}}, - }, - output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { - require.Len(t, events, 2) - require.Equal(t, types.APPSEC, events[0].Type) - - require.Equal(t, types.LOG, events[1].Type) - require.True(t, events[1].Appsec.HasInBandMatches) - require.Len(t, events[1].Appsec.MatchedRules, 1) - require.Equal(t, "rule1", events[1].Appsec.MatchedRules[0]["msg"]) - - require.Len(t, responses, 1) - require.True(t, responses[0].InBandInterrupt) - }, - }, - { - name: "Basic non-matching rule", - expected_load_ok: true, - inband_rules: []appsec_rule.CustomRule{ - { - Name: "rule1", - Zones: []string{"ARGS"}, - Variables: []string{"foo"}, - Match: appsec_rule.Match{Type: "regex", Value: "^toto"}, - Transform: []string{"lowercase"}, - }, - }, - input_request: appsec.ParsedRequest{ - RemoteAddr: "1.2.3.4", - Method: "GET", - URI: "/urllll", - Args: url.Values{"foo": []string{"tutu"}}, - }, - output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { - require.Empty(t, events) - require.Len(t, responses, 1) - require.False(t, responses[0].InBandInterrupt) - require.False(t, responses[0].OutOfBandInterrupt) - }, - }, - { - name: "default remediation to allow", - expected_load_ok: true, - inband_rules: []appsec_rule.CustomRule{ - { - Name: "rule42", - Zones: []string{"ARGS"}, - Variables: []string{"foo"}, - Match: appsec_rule.Match{Type: "regex", Value: "^toto"}, - Transform: []string{"lowercase"}, - }, - }, - input_request: appsec.ParsedRequest{ - RemoteAddr: "1.2.3.4", - Method: "GET", - URI: "/urllll", - Args: url.Values{"foo": []string{"toto"}}, - }, - DefaultRemediation: "allow", - output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { - require.Equal(t, appsec.AllowRemediation, responses[0].Action) - require.Equal(t, http.StatusOK, statusCode) - require.Equal(t, appsec.AllowRemediation, appsecResponse.Action) - require.Equal(t, http.StatusOK, appsecResponse.HTTPStatus) - }, - }, - { - name: "default remediation to captcha", - expected_load_ok: true, - inband_rules: []appsec_rule.CustomRule{ - { - Name: "rule42", - Zones: []string{"ARGS"}, - Variables: []string{"foo"}, - Match: appsec_rule.Match{Type: "regex", Value: "^toto"}, - Transform: []string{"lowercase"}, - }, - }, - input_request: appsec.ParsedRequest{ - RemoteAddr: "1.2.3.4", - Method: "GET", - URI: "/urllll", - Args: url.Values{"foo": []string{"toto"}}, - }, - DefaultRemediation: "captcha", - output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { - require.Equal(t, appsec.CaptchaRemediation, responses[0].Action) - require.Equal(t, http.StatusForbidden, statusCode) - require.Equal(t, appsec.CaptchaRemediation, appsecResponse.Action) - require.Equal(t, http.StatusForbidden, appsecResponse.HTTPStatus) - }, - }, - { - name: "no default remediation / custom user HTTP code", - expected_load_ok: true, - inband_rules: []appsec_rule.CustomRule{ - { - Name: "rule42", - Zones: []string{"ARGS"}, - Variables: []string{"foo"}, - Match: appsec_rule.Match{Type: "regex", Value: "^toto"}, - Transform: []string{"lowercase"}, - }, - }, - input_request: appsec.ParsedRequest{ - RemoteAddr: "1.2.3.4", - Method: "GET", - URI: "/urllll", - Args: url.Values{"foo": []string{"toto"}}, - }, - UserBlockedHTTPCode: 418, - output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { - require.Equal(t, appsec.BanRemediation, responses[0].Action) - require.Equal(t, http.StatusForbidden, statusCode) - require.Equal(t, appsec.BanRemediation, appsecResponse.Action) - require.Equal(t, http.StatusTeapot, appsecResponse.HTTPStatus) - }, - }, - { - name: "no match but try to set remediation to captcha with on_match hook", - expected_load_ok: true, - inband_rules: []appsec_rule.CustomRule{ - { - Name: "rule42", - Zones: []string{"ARGS"}, - Variables: []string{"foo"}, - Match: appsec_rule.Match{Type: "regex", Value: "^toto"}, - Transform: []string{"lowercase"}, - }, - }, - on_match: []appsec.Hook{ - {Filter: "IsInBand == true", Apply: []string{"SetRemediation('captcha')"}}, - }, - input_request: appsec.ParsedRequest{ - RemoteAddr: "1.2.3.4", - Method: "GET", - URI: "/urllll", - Args: url.Values{"foo": []string{"bla"}}, - }, - output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { - require.Empty(t, events) - require.Equal(t, http.StatusOK, statusCode) - require.Equal(t, appsec.AllowRemediation, appsecResponse.Action) - }, - }, - { - name: "no match but try to set user HTTP code with on_match hook", - expected_load_ok: true, - inband_rules: []appsec_rule.CustomRule{ - { - Name: "rule42", - Zones: []string{"ARGS"}, - Variables: []string{"foo"}, - Match: appsec_rule.Match{Type: "regex", Value: "^toto"}, - Transform: []string{"lowercase"}, - }, - }, - on_match: []appsec.Hook{ - {Filter: "IsInBand == true", Apply: []string{"SetReturnCode(418)"}}, - }, - input_request: appsec.ParsedRequest{ - RemoteAddr: "1.2.3.4", - Method: "GET", - URI: "/urllll", - Args: url.Values{"foo": []string{"bla"}}, - }, - output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { - require.Empty(t, events) - require.Equal(t, http.StatusOK, statusCode) - require.Equal(t, appsec.AllowRemediation, appsecResponse.Action) - }, - }, - { - name: "no match but try to set remediation with pre_eval hook", - expected_load_ok: true, - inband_rules: []appsec_rule.CustomRule{ - { - Name: "rule42", - Zones: []string{"ARGS"}, - Variables: []string{"foo"}, - Match: appsec_rule.Match{Type: "regex", Value: "^toto"}, - Transform: []string{"lowercase"}, - }, - }, - pre_eval: []appsec.Hook{ - {Filter: "IsInBand == true", Apply: []string{"SetRemediationByName('rule42', 'captcha')"}}, - }, - input_request: appsec.ParsedRequest{ - RemoteAddr: "1.2.3.4", - Method: "GET", - URI: "/urllll", - Args: url.Values{"foo": []string{"bla"}}, - }, - output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { - require.Empty(t, events) - require.Equal(t, http.StatusOK, statusCode) - require.Equal(t, appsec.AllowRemediation, appsecResponse.Action) - }, - }, - } - - for _, test := range tests { - t.Run(test.name, func(t *testing.T) { - loadAppSecEngine(test, t) - }) - } -} - -func TestAppsecRuleTransforms(t *testing.T) { - - log.SetLevel(log.TraceLevel) - tests := []appsecRuleTest{ - { - name: "Basic matching rule", - expected_load_ok: true, - inband_rules: []appsec_rule.CustomRule{ - { - Name: "rule1", - Zones: []string{"URI"}, - Match: appsec_rule.Match{Type: "equals", Value: "/toto"}, - }, - }, - input_request: appsec.ParsedRequest{ - RemoteAddr: "1.2.3.4", - Method: "GET", - URI: "/toto", - }, - output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { - require.Len(t, events, 2) - require.Equal(t, types.APPSEC, events[0].Type) - require.Equal(t, types.LOG, events[1].Type) - require.Equal(t, "rule1", events[1].Appsec.MatchedRules[0]["msg"]) - }, - }, - { - name: "lowercase", - expected_load_ok: true, - inband_rules: []appsec_rule.CustomRule{ - { - Name: "rule1", - Zones: []string{"URI"}, - Match: appsec_rule.Match{Type: "equals", Value: "/toto"}, - Transform: []string{"lowercase"}, - }, - }, - input_request: appsec.ParsedRequest{ - RemoteAddr: "1.2.3.4", - Method: "GET", - URI: "/TOTO", - }, - output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { - require.Len(t, events, 2) - require.Equal(t, types.APPSEC, events[0].Type) - require.Equal(t, types.LOG, events[1].Type) - require.Equal(t, "rule1", events[1].Appsec.MatchedRules[0]["msg"]) - }, - }, - { - name: "uppercase", - expected_load_ok: true, - inband_rules: []appsec_rule.CustomRule{ - { - Name: "rule1", - Zones: []string{"URI"}, - Match: appsec_rule.Match{Type: "equals", Value: "/TOTO"}, - Transform: []string{"uppercase"}, - }, - }, - input_request: appsec.ParsedRequest{ - RemoteAddr: "1.2.3.4", - Method: "GET", - URI: "/toto", - }, - output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { - require.Len(t, events, 2) - require.Equal(t, types.APPSEC, events[0].Type) - require.Equal(t, types.LOG, events[1].Type) - require.Equal(t, "rule1", events[1].Appsec.MatchedRules[0]["msg"]) - }, - }, - { - name: "b64decode", - expected_load_ok: true, - inband_rules: []appsec_rule.CustomRule{ - { - Name: "rule1", - Zones: []string{"ARGS"}, - Variables: []string{"foo"}, - Match: appsec_rule.Match{Type: "equals", Value: "toto"}, - Transform: []string{"b64decode"}, - }, - }, - input_request: appsec.ParsedRequest{ - RemoteAddr: "1.2.3.4", - Method: "GET", - URI: "/?foo=dG90bw", - }, - output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { - require.Len(t, events, 2) - require.Equal(t, types.APPSEC, events[0].Type) - require.Equal(t, types.LOG, events[1].Type) - require.Equal(t, "rule1", events[1].Appsec.MatchedRules[0]["msg"]) - }, - }, - { - name: "b64decode with extra padding", - expected_load_ok: true, - inband_rules: []appsec_rule.CustomRule{ - { - Name: "rule1", - Zones: []string{"ARGS"}, - Variables: []string{"foo"}, - Match: appsec_rule.Match{Type: "equals", Value: "toto"}, - Transform: []string{"b64decode"}, - }, - }, - input_request: appsec.ParsedRequest{ - RemoteAddr: "1.2.3.4", - Method: "GET", - URI: "/?foo=dG90bw===", - }, - output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { - require.Len(t, events, 2) - require.Equal(t, types.APPSEC, events[0].Type) - require.Equal(t, types.LOG, events[1].Type) - require.Equal(t, "rule1", events[1].Appsec.MatchedRules[0]["msg"]) - }, - }, - { - name: "length", - expected_load_ok: true, - inband_rules: []appsec_rule.CustomRule{ - { - Name: "rule1", - Zones: []string{"ARGS"}, - Variables: []string{"foo"}, - Match: appsec_rule.Match{Type: "gte", Value: "3"}, - Transform: []string{"length"}, - }, - }, - input_request: appsec.ParsedRequest{ - RemoteAddr: "1.2.3.4", - Method: "GET", - URI: "/?foo=toto", - }, - output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { - require.Len(t, events, 2) - require.Equal(t, types.APPSEC, events[0].Type) - require.Equal(t, types.LOG, events[1].Type) - require.Equal(t, "rule1", events[1].Appsec.MatchedRules[0]["msg"]) - }, - }, - { - name: "urldecode", - expected_load_ok: true, - inband_rules: []appsec_rule.CustomRule{ - { - Name: "rule1", - Zones: []string{"ARGS"}, - Variables: []string{"foo"}, - Match: appsec_rule.Match{Type: "equals", Value: "BB/A"}, - Transform: []string{"urldecode"}, - }, - }, - input_request: appsec.ParsedRequest{ - RemoteAddr: "1.2.3.4", - Method: "GET", - URI: "/?foo=%42%42%2F%41", - }, - output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { - require.Len(t, events, 2) - require.Equal(t, types.APPSEC, events[0].Type) - require.Equal(t, types.LOG, events[1].Type) - require.Equal(t, "rule1", events[1].Appsec.MatchedRules[0]["msg"]) - }, - }, - { - name: "trim", - expected_load_ok: true, - inband_rules: []appsec_rule.CustomRule{ - { - Name: "rule1", - Zones: []string{"ARGS"}, - Variables: []string{"foo"}, - Match: appsec_rule.Match{Type: "equals", Value: "BB/A"}, - Transform: []string{"urldecode", "trim"}, - }, - }, - input_request: appsec.ParsedRequest{ - RemoteAddr: "1.2.3.4", - Method: "GET", - URI: "/?foo=%20%20%42%42%2F%41%20%20", - }, - output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { - require.Len(t, events, 2) - require.Equal(t, types.APPSEC, events[0].Type) - require.Equal(t, types.LOG, events[1].Type) - require.Equal(t, "rule1", events[1].Appsec.MatchedRules[0]["msg"]) - }, - }, - } - for _, test := range tests { - t.Run(test.name, func(t *testing.T) { - loadAppSecEngine(test, t) - }) - } -} - func loadAppSecEngine(test appsecRuleTest, t *testing.T) { if testing.Verbose() { log.SetLevel(log.TraceLevel) From 2682f801dfe90a85a7bf4d3ec2a51136280a2dac Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Fri, 5 Apr 2024 14:57:33 +0200 Subject: [PATCH 085/581] windows: fix data file update (remove before rename) (#2930) --- pkg/cwhub/dataset.go | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/pkg/cwhub/dataset.go b/pkg/cwhub/dataset.go index 97fd9c5a0ff..921361e3fcf 100644 --- a/pkg/cwhub/dataset.go +++ b/pkg/cwhub/dataset.go @@ -4,9 +4,11 @@ import ( "errors" "fmt" "io" + "io/fs" "net/http" "os" "path/filepath" + "runtime" "time" "github.com/sirupsen/logrus" @@ -65,6 +67,18 @@ func downloadFile(url string, destPath string) error { // TODO: use a better way to communicate this fmt.Printf("updated %s\n", filepath.Base(destPath)) + if runtime.GOOS == "windows" { + // On Windows, rename will fail if the destination file already exists + // so we remove it first. + err = os.Remove(destPath) + switch { + case errors.Is(err, fs.ErrNotExist): + break + case err != nil: + return err + } + } + if err = os.Rename(tmpFileName, destPath); err != nil { return err } From 990dd5e08e6efaa608910fdc258958c9db279518 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Fri, 5 Apr 2024 15:11:11 +0200 Subject: [PATCH 086/581] use go 1.21.9; update dependencies (#2931) --- .github/workflows/bats-hub.yml | 2 +- .github/workflows/bats-mysql.yml | 2 +- .github/workflows/bats-postgres.yml | 2 +- .github/workflows/bats-sqlite-coverage.yml | 2 +- .github/workflows/ci-windows-build-msi.yml | 2 +- .github/workflows/codeql-analysis.yml | 2 +- .github/workflows/go-tests-windows.yml | 2 +- .github/workflows/go-tests.yml | 2 +- .github/workflows/publish-tarball-release.yml | 2 +- Dockerfile | 2 +- Dockerfile.debian | 2 +- azure-pipelines.yml | 2 +- go.mod | 10 +++++----- go.sum | 16 ++++++++-------- 14 files changed, 25 insertions(+), 25 deletions(-) diff --git a/.github/workflows/bats-hub.yml b/.github/workflows/bats-hub.yml index bd84389b011..4e977201c5a 100644 --- a/.github/workflows/bats-hub.yml +++ b/.github/workflows/bats-hub.yml @@ -33,7 +33,7 @@ jobs: - name: "Set up Go" uses: actions/setup-go@v5 with: - go-version: "1.21.8" + go-version: "1.21.9" - name: "Install bats dependencies" env: diff --git a/.github/workflows/bats-mysql.yml b/.github/workflows/bats-mysql.yml index cc90961bfd6..9e320b1b3de 100644 --- a/.github/workflows/bats-mysql.yml +++ b/.github/workflows/bats-mysql.yml @@ -36,7 +36,7 @@ jobs: - name: "Set up Go" uses: actions/setup-go@v5 with: - go-version: "1.21.8" + go-version: "1.21.9" - name: "Install bats dependencies" env: diff --git a/.github/workflows/bats-postgres.yml b/.github/workflows/bats-postgres.yml index 18cbb50a12e..2cd09b494a0 100644 --- a/.github/workflows/bats-postgres.yml +++ b/.github/workflows/bats-postgres.yml @@ -45,7 +45,7 @@ jobs: - name: "Set up Go" uses: actions/setup-go@v5 with: - go-version: "1.21.8" + go-version: "1.21.9" - name: "Install bats dependencies" env: diff --git a/.github/workflows/bats-sqlite-coverage.yml b/.github/workflows/bats-sqlite-coverage.yml index 6780727e968..d56d69f28b2 100644 --- a/.github/workflows/bats-sqlite-coverage.yml +++ b/.github/workflows/bats-sqlite-coverage.yml @@ -28,7 +28,7 @@ jobs: - name: "Set up Go" uses: actions/setup-go@v5 with: - go-version: "1.21.8" + go-version: "1.21.9" - name: "Install bats dependencies" env: diff --git a/.github/workflows/ci-windows-build-msi.yml b/.github/workflows/ci-windows-build-msi.yml index ae3a475774d..278426d778c 100644 --- a/.github/workflows/ci-windows-build-msi.yml +++ b/.github/workflows/ci-windows-build-msi.yml @@ -35,7 +35,7 @@ jobs: - name: "Set up Go" uses: actions/setup-go@v5 with: - go-version: "1.21.8" + go-version: "1.21.9" - name: Build run: make windows_installer BUILD_RE2_WASM=1 diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml index 68c95ed6446..1f27a8c7c2a 100644 --- a/.github/workflows/codeql-analysis.yml +++ b/.github/workflows/codeql-analysis.yml @@ -52,7 +52,7 @@ jobs: - name: "Set up Go" uses: actions/setup-go@v5 with: - go-version: "1.21.8" + go-version: "1.21.9" cache-dependency-path: "**/go.sum" # Initializes the CodeQL tools for scanning. diff --git a/.github/workflows/go-tests-windows.yml b/.github/workflows/go-tests-windows.yml index 6b2f1132a82..781f2a4a930 100644 --- a/.github/workflows/go-tests-windows.yml +++ b/.github/workflows/go-tests-windows.yml @@ -34,7 +34,7 @@ jobs: - name: "Set up Go" uses: actions/setup-go@v5 with: - go-version: "1.21.8" + go-version: "1.21.9" - name: Build run: | diff --git a/.github/workflows/go-tests.yml b/.github/workflows/go-tests.yml index d76315462f3..67f73d81a45 100644 --- a/.github/workflows/go-tests.yml +++ b/.github/workflows/go-tests.yml @@ -126,7 +126,7 @@ jobs: - name: "Set up Go" uses: actions/setup-go@v5 with: - go-version: "1.21.8" + go-version: "1.21.9" - name: Create localstack streams run: | diff --git a/.github/workflows/publish-tarball-release.yml b/.github/workflows/publish-tarball-release.yml index 6fd582da853..6cdf111a4ba 100644 --- a/.github/workflows/publish-tarball-release.yml +++ b/.github/workflows/publish-tarball-release.yml @@ -25,7 +25,7 @@ jobs: - name: "Set up Go" uses: actions/setup-go@v5 with: - go-version: "1.21.8" + go-version: "1.21.9" - name: Build the binaries run: | diff --git a/Dockerfile b/Dockerfile index 53a6cd04512..1e311bfa89c 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,5 +1,5 @@ # vim: set ft=dockerfile: -FROM golang:1.21.8-alpine3.18 AS build +FROM golang:1.21.9-alpine3.18 AS build ARG BUILD_VERSION diff --git a/Dockerfile.debian b/Dockerfile.debian index fd40bd475bb..ee0fa32bbd9 100644 --- a/Dockerfile.debian +++ b/Dockerfile.debian @@ -1,5 +1,5 @@ # vim: set ft=dockerfile: -FROM golang:1.21.8-bookworm AS build +FROM golang:1.21.9-bookworm AS build ARG BUILD_VERSION diff --git a/azure-pipelines.yml b/azure-pipelines.yml index f90af6f1718..b662a809ad7 100644 --- a/azure-pipelines.yml +++ b/azure-pipelines.yml @@ -21,7 +21,7 @@ stages: - task: GoTool@0 displayName: "Install Go" inputs: - version: '1.21.8' + version: '1.21.9' - pwsh: | choco install -y make diff --git a/go.mod b/go.mod index 50b88f7d425..04f34e6485e 100644 --- a/go.mod +++ b/go.mod @@ -24,6 +24,7 @@ require ( github.com/buger/jsonparser v1.1.1 github.com/c-robinson/iplib v1.0.3 github.com/cespare/xxhash/v2 v2.2.0 + github.com/corazawaf/libinjection-go v0.1.2 github.com/crowdsecurity/coraza/v3 v3.0.0-20240108124027-a62b8d8e5607 github.com/crowdsecurity/dlog v0.0.0-20170105205344-4fb5f8204f26 github.com/crowdsecurity/go-cs-lib v0.0.6 @@ -81,9 +82,9 @@ require ( github.com/umahmood/haversine v0.0.0-20151105152445-808ab04add26 github.com/wasilibs/go-re2 v1.3.0 github.com/xhit/go-simple-mail/v2 v2.16.0 - golang.org/x/crypto v0.20.0 + golang.org/x/crypto v0.22.0 golang.org/x/mod v0.11.0 - golang.org/x/sys v0.17.0 + golang.org/x/sys v0.19.0 golang.org/x/text v0.14.0 google.golang.org/grpc v1.56.3 google.golang.org/protobuf v1.33.0 @@ -104,7 +105,6 @@ require ( github.com/beorn7/perks v1.0.1 // indirect github.com/bytedance/sonic v1.9.1 // indirect github.com/chenzhuoyu/base64x v0.0.0-20221115062448-fe3a3abad311 // indirect - github.com/corazawaf/libinjection-go v0.1.2 // indirect github.com/coreos/go-systemd/v22 v22.5.0 // indirect github.com/cpuguy83/go-md2man/v2 v2.0.3 // indirect github.com/creack/pty v1.1.18 // indirect @@ -198,9 +198,9 @@ require ( github.com/zclconf/go-cty v1.8.0 // indirect go.mongodb.org/mongo-driver v1.9.4 // indirect golang.org/x/arch v0.3.0 // indirect - golang.org/x/net v0.21.0 // indirect + golang.org/x/net v0.24.0 // indirect golang.org/x/sync v0.6.0 // indirect - golang.org/x/term v0.17.0 // indirect + golang.org/x/term v0.19.0 // indirect golang.org/x/time v0.3.0 // indirect golang.org/x/tools v0.8.1-0.20230428195545-5283a0178901 // indirect golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 // indirect diff --git a/go.sum b/go.sum index 8f91bd31ff6..29e23f02ab9 100644 --- a/go.sum +++ b/go.sum @@ -757,8 +757,8 @@ golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97/go.mod h1:GvvjBRRGRdwPK5y golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.3.0/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4= golang.org/x/crypto v0.14.0/go.mod h1:MVFd36DqK4CsrnJYDkBA3VC4m2GkXAM0PvzMCn4JQf4= -golang.org/x/crypto v0.20.0 h1:jmAMJJZXr5KiCw05dfYK9QnqaqKLYXijU23lsEdcQqg= -golang.org/x/crypto v0.20.0/go.mod h1:Xwo95rrVNIoSMx9wa1JroENMToLWn3RNVrTBpLHgZPQ= +golang.org/x/crypto v0.22.0 h1:g1v0xeRhjcugydODzvb3mEM9SQ0HGp9s/nh3COQ/C30= +golang.org/x/crypto v0.22.0/go.mod h1:vr6Su+7cTlO45qkww3VDJlzDn0ctJvRgYbC2NvXHt+M= golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= @@ -791,8 +791,8 @@ golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY= golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE= -golang.org/x/net v0.21.0 h1:AQyQV4dYCvJ7vGmJyKki9+PBdyvhkSd8EIx/qb0AYv4= -golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44= +golang.org/x/net v0.24.0 h1:1PcaxkF854Fu3+lvBIx5SYn9wRlBzzcnHZSiaFFAb0w= +golang.org/x/net v0.24.0/go.mod h1:2Q7sJY5mzlzWjKtYUEXSlBWCdyaioyXzRB2RtU8KVE8= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -841,8 +841,8 @@ golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.17.0 h1:25cE3gD+tdBA7lp7QfhuV+rJiE9YXTcS3VG1SqssI/Y= -golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.19.0 h1:q5f1RH2jigJ1MoAWp2KTp3gm5zAGFUTarQZ5U386+4o= +golang.org/x/sys v0.19.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= @@ -850,8 +850,8 @@ golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= golang.org/x/term v0.13.0/go.mod h1:LTmsnFJwVN6bCy1rVCoS+qHT1HhALEFxKncY3WNNh4U= -golang.org/x/term v0.17.0 h1:mkTF7LCd6WGJNL3K1Ad7kwxNfYAW6a8a8QqtMblp/4U= -golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk= +golang.org/x/term v0.19.0 h1:+ThwsDv+tYfnJFhF4L8jITxu1tdTWRTZpdsWgEgjL6Q= +golang.org/x/term v0.19.0/go.mod h1:2CuTdWZ7KHSQwUzKva0cbMg6q2DMI3Mmxp+gKJbskEk= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= From 0e8a1c681b0c72dc45509aa14f6c5a6c9df83ab0 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Mon, 8 Apr 2024 14:53:12 +0200 Subject: [PATCH 087/581] docker: pre-download all hub items and data, opt-in hub update/upgrade (#2933) * docker: pre-download all hub items and data, opt-in hub update/upgrade * docker/bars: don't purge anything before pre-downloading hub * Docker: README update --- Dockerfile | 1 + docker/README.md | 11 +++++++++-- docker/docker_start.sh | 5 ++--- docker/preload-hub-items | 22 ++++++++++++++++++++++ test/bin/preload-hub-items | 19 ++----------------- 5 files changed, 36 insertions(+), 22 deletions(-) create mode 100755 docker/preload-hub-items diff --git a/Dockerfile b/Dockerfile index 1e311bfa89c..d2b01ed7753 100644 --- a/Dockerfile +++ b/Dockerfile @@ -25,6 +25,7 @@ RUN make clean release DOCKER_BUILD=1 BUILD_STATIC=1 && \ ./wizard.sh --docker-mode && \ cd - >/dev/null && \ cscli hub update && \ + ./docker/preload-hub-items && \ cscli collections install crowdsecurity/linux && \ cscli parsers install crowdsecurity/whitelists diff --git a/docker/README.md b/docker/README.md index 5e39838a175..2fea57a6161 100644 --- a/docker/README.md +++ b/docker/README.md @@ -134,7 +134,6 @@ labels: type: apache2 ``` - ## Recommended configuration ### Volumes @@ -146,6 +145,14 @@ to avoid losing credentials and decision data in case of container destruction a * Acquisition: `/etc/crowdsec/acquis.d` and/or `/etc/crowdsec.acquis.yaml` (yes, they can be nested in `/etc/crowdsec`) * Database when using SQLite (default): `/var/lib/crowdsec/data` +### Hub updates + +To ensure you have the latest version of the collections, scenarios, parsers, etc., you can set the variable `DO_HUB_UPGRADE` to true. +This will perform an update/upgrade of the hub every time the container is started. + +Be aware that if your container is misbehaving and caught in a restart loop, the CrowdSec hub may ban your IP for some time and your containers +will run with the version of the hub that is cached in the container's image. If you enable `DO_HUB_UPGRADE`, do it when your infrastructure is running +correctly and make sure you have some monitoring in place. ## Start a Crowdsec instance @@ -316,7 +323,7 @@ config.yaml) each time the container is run. | `BOUNCERS_ALLOWED_OU` | bouncer-ou | OU values allowed for bouncers, separated by comma | | | | | | __Hub management__ | | | -| `NO_HUB_UPGRADE` | false | Skip hub update / upgrade when the container starts | +| `DO_HUB_UPGRADE` | false | Force hub update / upgrade when the container starts. If for some reason the container restarts too often, it may lead to a temporary ban from hub updates. | | `COLLECTIONS` | | Collections to install, separated by space: `-e COLLECTIONS="crowdsecurity/linux crowdsecurity/apache2"` | | `PARSERS` | | Parsers to install, separated by space | | `SCENARIOS` | | Scenarios to install, separated by space | diff --git a/docker/docker_start.sh b/docker/docker_start.sh index dd96184ccbc..26c5b0eeedc 100755 --- a/docker/docker_start.sh +++ b/docker/docker_start.sh @@ -304,9 +304,8 @@ conf_set_if "$PLUGIN_DIR" '.config_paths.plugin_dir = strenv(PLUGIN_DIR)' ## Install hub items -cscli hub update || true - -if isfalse "$NO_HUB_UPGRADE"; then +if istrue "$DO_HUB_UPGRADE"; then + cscli hub update || true cscli hub upgrade || true fi diff --git a/docker/preload-hub-items b/docker/preload-hub-items new file mode 100755 index 00000000000..d02b094851b --- /dev/null +++ b/docker/preload-hub-items @@ -0,0 +1,22 @@ +#!/usr/bin/env bash + +set -eu + +# pre-download everything but don't install anything + +echo "Pre-downloading Hub content..." + +types=$(cscli hub types -o raw) + +for itemtype in $types; do + ALL_ITEMS=$(cscli "$itemtype" list -a -o json | itemtype="$itemtype" yq '.[env(itemtype)][] | .name') + if [[ -n "${ALL_ITEMS}" ]]; then + #shellcheck disable=SC2086 + cscli "$itemtype" install \ + $ALL_ITEMS \ + --download-only \ + --error + fi +done + +echo " done." diff --git a/test/bin/preload-hub-items b/test/bin/preload-hub-items index 14e9cff998c..ddf7fecbabc 100755 --- a/test/bin/preload-hub-items +++ b/test/bin/preload-hub-items @@ -9,20 +9,12 @@ THIS_DIR=$(CDPATH= cd -- "$(dirname -- "$0")" && pwd) # pre-download everything but don't install anything -echo -n "Purging existing hub..." +echo "Pre-downloading Hub content..." types=$("$CSCLI" hub types -o raw) for itemtype in $types; do - "$CSCLI" "${itemtype}" delete --all --error --purge --force -done - -echo " done." - -echo -n "Pre-downloading Hub content..." - -for itemtype in $types; do - ALL_ITEMS=$("$CSCLI" "$itemtype" list -a -o json | jq --arg itemtype "$itemtype" -r '.[$itemtype][].name') + ALL_ITEMS=$("$CSCLI" "$itemtype" list -a -o json | itemtype="$itemtype" yq '.[env(itemtype)][] | .name') if [[ -n "${ALL_ITEMS}" ]]; then #shellcheck disable=SC2086 "$CSCLI" "$itemtype" install \ @@ -32,11 +24,4 @@ for itemtype in $types; do fi done -# XXX: download-only works only for collections, not for parsers, scenarios, postoverflows. -# so we have to delete the links manually, and leave the downloaded files in place - -for itemtype in $types; do - "$CSCLI" "$itemtype" delete --all --error -done - echo " done." From 2291a232cb7188862bd1976615a19835e0f94ad2 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Mon, 8 Apr 2024 15:00:45 +0200 Subject: [PATCH 088/581] docker: pre-download hub items (debian image) (#2934) --- Dockerfile.debian | 1 + 1 file changed, 1 insertion(+) diff --git a/Dockerfile.debian b/Dockerfile.debian index ee0fa32bbd9..0d094f8dfa5 100644 --- a/Dockerfile.debian +++ b/Dockerfile.debian @@ -30,6 +30,7 @@ RUN make clean release DOCKER_BUILD=1 BUILD_STATIC=1 && \ ./wizard.sh --docker-mode && \ cd - >/dev/null && \ cscli hub update && \ + ./docker/preload-hub-items && \ cscli collections install crowdsecurity/linux && \ cscli parsers install crowdsecurity/whitelists From 0746e0c091b8426791a4a495b25a186efd899c9d Mon Sep 17 00:00:00 2001 From: AlteredCoder <64792091+AlteredCoder@users.noreply.github.com> Date: Thu, 11 Apr 2024 11:23:19 +0200 Subject: [PATCH 089/581] Rename bouncers to Remediation component in openAPI (#2936) * Rename bouncers to Remediation component in openAPI --- pkg/models/localapi_swagger.yaml | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/pkg/models/localapi_swagger.yaml b/pkg/models/localapi_swagger.yaml index 66132e5e36e..d167da9b6af 100644 --- a/pkg/models/localapi_swagger.yaml +++ b/pkg/models/localapi_swagger.yaml @@ -26,10 +26,10 @@ produces: paths: /decisions/stream: get: - description: Returns a list of new/expired decisions. Intended for bouncers that need to "stream" decisions + description: Returns a list of new/expired decisions. Intended for remediation component that need to "stream" decisions summary: getDecisionsStream tags: - - bouncers + - Remediation component operationId: getDecisionsStream deprecated: false produces: @@ -39,7 +39,7 @@ paths: in: query required: false type: boolean - description: 'If true, means that the bouncers is starting and a full list must be provided' + description: 'If true, means that the remediation component is starting and a full list must be provided' - name: scopes in: query required: false @@ -73,10 +73,10 @@ paths: security: - APIKeyAuthorizer: [] head: - description: Returns a list of new/expired decisions. Intended for bouncers that need to "stream" decisions + description: Returns a list of new/expired decisions. Intended for remediation component that need to "stream" decisions summary: GetDecisionsStream tags: - - bouncers + - Remediation component operationId: headDecisionsStream deprecated: false produces: @@ -100,7 +100,7 @@ paths: description: Returns information about existing decisions summary: getDecisions tags: - - bouncers + - Remediation component operationId: getDecisions deprecated: false produces: @@ -164,7 +164,7 @@ paths: description: Returns information about existing decisions summary: GetDecisions tags: - - bouncers + - Remediation component operationId: headDecisions deprecated: false produces: @@ -1008,7 +1008,7 @@ definitions: title: "error response" description: "error response return by the API" tags: - - name: bouncers + - name: Remediation component description: 'Operations about decisions : bans, captcha, rate-limit etc.' - name: watchers description: 'Operations about watchers : cscli & crowdsec' From c6e40191ddf7a87b8c0d946c36083c084677e5f1 Mon Sep 17 00:00:00 2001 From: blotus Date: Thu, 18 Apr 2024 15:33:51 +0200 Subject: [PATCH 090/581] =?UTF-8?q?Revert=20"docker:=20pre-download=20all?= =?UTF-8?q?=20hub=20items=20and=20data,=20opt-in=20hub=20updat=E2=80=A6=20?= =?UTF-8?q?(#2947)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .github/workflows/docker-tests.yml | 14 +++++++------- Dockerfile | 1 - Dockerfile.debian | 1 - docker/README.md | 11 ++--------- docker/docker_start.sh | 5 +++-- docker/preload-hub-items | 22 ---------------------- test/bin/preload-hub-items | 19 +++++++++++++++++-- 7 files changed, 29 insertions(+), 44 deletions(-) delete mode 100755 docker/preload-hub-items diff --git a/.github/workflows/docker-tests.yml b/.github/workflows/docker-tests.yml index d3ae4f90d79..3e87d3ba4f1 100644 --- a/.github/workflows/docker-tests.yml +++ b/.github/workflows/docker-tests.yml @@ -59,15 +59,15 @@ jobs: cd docker/test python -m pip install --upgrade pipenv wheel - - name: "Cache virtualenvs" - id: cache-pipenv - uses: actions/cache@v4 - with: - path: ~/.local/share/virtualenvs - key: ${{ runner.os }}-pipenv-${{ hashFiles('**/Pipfile.lock') }} + #- name: "Cache virtualenvs" + # id: cache-pipenv + # uses: actions/cache@v4 + # with: + # path: ~/.local/share/virtualenvs + # key: ${{ runner.os }}-pipenv-${{ hashFiles('**/Pipfile.lock') }} - name: "Install dependencies" - if: steps.cache-pipenv.outputs.cache-hit != 'true' + #if: steps.cache-pipenv.outputs.cache-hit != 'true' run: | cd docker/test pipenv install --deploy diff --git a/Dockerfile b/Dockerfile index d2b01ed7753..1e311bfa89c 100644 --- a/Dockerfile +++ b/Dockerfile @@ -25,7 +25,6 @@ RUN make clean release DOCKER_BUILD=1 BUILD_STATIC=1 && \ ./wizard.sh --docker-mode && \ cd - >/dev/null && \ cscli hub update && \ - ./docker/preload-hub-items && \ cscli collections install crowdsecurity/linux && \ cscli parsers install crowdsecurity/whitelists diff --git a/Dockerfile.debian b/Dockerfile.debian index 0d094f8dfa5..ee0fa32bbd9 100644 --- a/Dockerfile.debian +++ b/Dockerfile.debian @@ -30,7 +30,6 @@ RUN make clean release DOCKER_BUILD=1 BUILD_STATIC=1 && \ ./wizard.sh --docker-mode && \ cd - >/dev/null && \ cscli hub update && \ - ./docker/preload-hub-items && \ cscli collections install crowdsecurity/linux && \ cscli parsers install crowdsecurity/whitelists diff --git a/docker/README.md b/docker/README.md index 2fea57a6161..5e39838a175 100644 --- a/docker/README.md +++ b/docker/README.md @@ -134,6 +134,7 @@ labels: type: apache2 ``` + ## Recommended configuration ### Volumes @@ -145,14 +146,6 @@ to avoid losing credentials and decision data in case of container destruction a * Acquisition: `/etc/crowdsec/acquis.d` and/or `/etc/crowdsec.acquis.yaml` (yes, they can be nested in `/etc/crowdsec`) * Database when using SQLite (default): `/var/lib/crowdsec/data` -### Hub updates - -To ensure you have the latest version of the collections, scenarios, parsers, etc., you can set the variable `DO_HUB_UPGRADE` to true. -This will perform an update/upgrade of the hub every time the container is started. - -Be aware that if your container is misbehaving and caught in a restart loop, the CrowdSec hub may ban your IP for some time and your containers -will run with the version of the hub that is cached in the container's image. If you enable `DO_HUB_UPGRADE`, do it when your infrastructure is running -correctly and make sure you have some monitoring in place. ## Start a Crowdsec instance @@ -323,7 +316,7 @@ config.yaml) each time the container is run. | `BOUNCERS_ALLOWED_OU` | bouncer-ou | OU values allowed for bouncers, separated by comma | | | | | | __Hub management__ | | | -| `DO_HUB_UPGRADE` | false | Force hub update / upgrade when the container starts. If for some reason the container restarts too often, it may lead to a temporary ban from hub updates. | +| `NO_HUB_UPGRADE` | false | Skip hub update / upgrade when the container starts | | `COLLECTIONS` | | Collections to install, separated by space: `-e COLLECTIONS="crowdsecurity/linux crowdsecurity/apache2"` | | `PARSERS` | | Parsers to install, separated by space | | `SCENARIOS` | | Scenarios to install, separated by space | diff --git a/docker/docker_start.sh b/docker/docker_start.sh index 26c5b0eeedc..dd96184ccbc 100755 --- a/docker/docker_start.sh +++ b/docker/docker_start.sh @@ -304,8 +304,9 @@ conf_set_if "$PLUGIN_DIR" '.config_paths.plugin_dir = strenv(PLUGIN_DIR)' ## Install hub items -if istrue "$DO_HUB_UPGRADE"; then - cscli hub update || true +cscli hub update || true + +if isfalse "$NO_HUB_UPGRADE"; then cscli hub upgrade || true fi diff --git a/docker/preload-hub-items b/docker/preload-hub-items deleted file mode 100755 index d02b094851b..00000000000 --- a/docker/preload-hub-items +++ /dev/null @@ -1,22 +0,0 @@ -#!/usr/bin/env bash - -set -eu - -# pre-download everything but don't install anything - -echo "Pre-downloading Hub content..." - -types=$(cscli hub types -o raw) - -for itemtype in $types; do - ALL_ITEMS=$(cscli "$itemtype" list -a -o json | itemtype="$itemtype" yq '.[env(itemtype)][] | .name') - if [[ -n "${ALL_ITEMS}" ]]; then - #shellcheck disable=SC2086 - cscli "$itemtype" install \ - $ALL_ITEMS \ - --download-only \ - --error - fi -done - -echo " done." diff --git a/test/bin/preload-hub-items b/test/bin/preload-hub-items index ddf7fecbabc..14e9cff998c 100755 --- a/test/bin/preload-hub-items +++ b/test/bin/preload-hub-items @@ -9,12 +9,20 @@ THIS_DIR=$(CDPATH= cd -- "$(dirname -- "$0")" && pwd) # pre-download everything but don't install anything -echo "Pre-downloading Hub content..." +echo -n "Purging existing hub..." types=$("$CSCLI" hub types -o raw) for itemtype in $types; do - ALL_ITEMS=$("$CSCLI" "$itemtype" list -a -o json | itemtype="$itemtype" yq '.[env(itemtype)][] | .name') + "$CSCLI" "${itemtype}" delete --all --error --purge --force +done + +echo " done." + +echo -n "Pre-downloading Hub content..." + +for itemtype in $types; do + ALL_ITEMS=$("$CSCLI" "$itemtype" list -a -o json | jq --arg itemtype "$itemtype" -r '.[$itemtype][].name') if [[ -n "${ALL_ITEMS}" ]]; then #shellcheck disable=SC2086 "$CSCLI" "$itemtype" install \ @@ -24,4 +32,11 @@ for itemtype in $types; do fi done +# XXX: download-only works only for collections, not for parsers, scenarios, postoverflows. +# so we have to delete the links manually, and leave the downloaded files in place + +for itemtype in $types; do + "$CSCLI" "$itemtype" delete --all --error +done + echo " done." From 95f27677e4c27d66cbf387c58c77a58491c5b378 Mon Sep 17 00:00:00 2001 From: Laurence Jones Date: Mon, 22 Apr 2024 14:18:34 +0100 Subject: [PATCH 091/581] enhance: add refactoring to governance (#2955) --- .github/governance.yml | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/.github/governance.yml b/.github/governance.yml index c57fd362600..0457a24c7b0 100644 --- a/.github/governance.yml +++ b/.github/governance.yml @@ -42,7 +42,7 @@ issue: 3. Check [Releases](https://github.com/crowdsecurity/crowdsec/releases/latest) to make sure your agent is on the latest version. - prefix: kind - list: ['feature', 'bug', 'packaging', 'enhancement'] + list: ['feature', 'bug', 'packaging', 'enhancement', 'refactoring'] multiple: false author_association: author: true @@ -54,6 +54,7 @@ issue: @$AUTHOR: There are no 'kind' label on this issue. You need a 'kind' label to start the triage process. * `/kind feature` * `/kind enhancement` + * `/kind refactoring` * `/kind bug` * `/kind packaging` @@ -65,12 +66,13 @@ pull_request: labels: - prefix: kind multiple: false - list: [ 'feature', 'enhancement', 'fix', 'chore', 'dependencies'] + list: [ 'feature', 'enhancement', 'fix', 'chore', 'dependencies', 'refactoring'] needs: comment: | @$AUTHOR: There are no 'kind' label on this PR. You need a 'kind' label to generate the release automatically. * `/kind feature` * `/kind enhancement` + * `/kind refactoring` * `/kind fix` * `/kind chore` * `/kind dependencies` From 630cbf0c70658dff679bb1b95e7b54ba43dd4c0a Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Mon, 22 Apr 2024 17:18:11 +0200 Subject: [PATCH 092/581] update linter list and descriptions (#2951) --- .golangci.yml | 77 +++++++++++++++++++++++++++++---------------------- 1 file changed, 44 insertions(+), 33 deletions(-) diff --git a/.golangci.yml b/.golangci.yml index df0cb67d1a8..f38fa337a8d 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -147,37 +147,40 @@ linters: # # DEPRECATED by golangi-lint # - - deadcode # The owner seems to have abandoned the linter. Replaced by unused. - - exhaustivestruct # The owner seems to have abandoned the linter. Replaced by exhaustruct. - - golint # Golint differs from gofmt. Gofmt reformats Go source code, whereas golint prints out style mistakes - - ifshort # Checks that your code uses short syntax for if-statements whenever possible - - interfacer # Linter that suggests narrower interface types - - maligned # Tool to detect Go structs that would take less memory if their fields were sorted - - nosnakecase # nosnakecase is a linter that detects snake case of variable naming and function name. - - scopelint # Scopelint checks for unpinned variables in go programs - - structcheck # The owner seems to have abandoned the linter. Replaced by unused. - - varcheck # The owner seems to have abandoned the linter. Replaced by unused. + - deadcode + - exhaustivestruct + - golint + - ifshort + - interfacer + - maligned + - nosnakecase + - scopelint + - structcheck + - varcheck # # Enabled # # - asasalint # check for pass []any as any in variadic func(...any) - # - asciicheck # Simple linter to check that your code does not contain non-ASCII identifiers + # - asciicheck # checks that all code identifiers does not have non-ASCII symbols in the name # - bidichk # Checks for dangerous unicode character sequences # - bodyclose # checks whether HTTP response body is closed successfully + # - copyloopvar # copyloopvar is a linter detects places where loop variables are copied # - cyclop # checks function and package cyclomatic complexity # - decorder # check declaration order and count of types, constants, variables and functions # - depguard # Go linter that checks if package imports are in a list of acceptable packages # - dupword # checks for duplicate words in the source code # - durationcheck # check for two durations multiplied together - # - errcheck # Errcheck is a program for checking for unchecked errors in go programs. These unchecked errors can be critical bugs in some cases + # - errcheck # errcheck is a program for checking for unchecked errors in Go code. These unchecked errors can be critical bugs in some cases # - errorlint # errorlint is a linter for that can be used to find code that will cause problems with the error wrapping scheme introduced in Go 1.13. # - execinquery # execinquery is a linter about query string checker in Query function which reads your Go src files and warning it finds # - exportloopref # checks for pointers to enclosing loop variables # - funlen # Tool for detection of long functions # - ginkgolinter # enforces standards of using ginkgo and gomega + # - gocheckcompilerdirectives # Checks that go compiler directive comments (//go:) are valid. # - gochecknoinits # Checks that no init functions are present in Go code + # - gochecksumtype # Run exhaustiveness checks on Go "sum types" # - gocognit # Computes and checks the cognitive complexity of functions # - gocritic # Provides diagnostics that check for bugs, performance and style issues. # - gocyclo # Computes and checks the cyclomatic complexity of functions @@ -185,48 +188,56 @@ linters: # - gomoddirectives # Manage the use of 'replace', 'retract', and 'excludes' directives in go.mod. # - gomodguard # Allow and block list linter for direct Go module dependencies. This is different from depguard where there are different block types for example version constraints and module recommendations. # - goprintffuncname # Checks that printf-like functions are named with `f` at the end - # - gosimple # (megacheck): Linter for Go source code that specializes in simplifying a code - # - govet # (vet, vetshadow): Vet examines Go source code and reports suspicious constructs, such as Printf calls whose arguments do not align with the format string - # - grouper # An analyzer to analyze expression groups. + # - gosimple # (megacheck): Linter for Go source code that specializes in simplifying code + # - gosmopolitan # Report certain i18n/l10n anti-patterns in your Go codebase + # - govet # (vet, vetshadow): Vet examines Go source code and reports suspicious constructs. It is roughly the same as 'go vet' and uses its passes. + # - grouper # Analyze expression groups. # - importas # Enforces consistent import aliases # - ineffassign # Detects when assignments to existing variables are not used # - interfacebloat # A linter that checks the number of methods inside an interface. + # - intrange # intrange is a linter to find places where for loops could make use of an integer range. # - lll # Reports long lines + # - loggercheck # (logrlint): Checks key value pairs for common logger libraries (kitlog,klog,logr,zap). # - logrlint # Check logr arguments. # - maintidx # maintidx measures the maintainability index of each function. # - makezero # Finds slice declarations with non-zero initial length - # - misspell # Finds commonly misspelled English words in comments - # - nakedret # Finds naked returns in functions greater than a specified function length + # - mirror # reports wrong mirror patterns of bytes/strings usage + # - misspell # Finds commonly misspelled English words + # - nakedret # Checks that functions with naked returns are not longer than a maximum size (can be zero). # - nestif # Reports deeply nested if statements # - nilerr # Finds the code that returns nil even if it checks that the error is not nil. # - nolintlint # Reports ill-formed or insufficient nolint directives # - nonamedreturns # Reports all named returns # - nosprintfhostport # Checks for misuse of Sprintf to construct a host with port in a URL. + # - perfsprint # Checks that fmt.Sprintf can be replaced with a faster alternative. # - predeclared # find code that shadows one of Go's predeclared identifiers # - reassign # Checks that package variables are not reassigned - # - rowserrcheck # checks whether Err of rows is checked successfully - # - sqlclosecheck # Checks that sql.Rows and sql.Stmt are closed. - # - staticcheck # (megacheck): Staticcheck is a go vet on steroids, applying a ton of static analysis checks - # - testableexamples # linter checks if examples are testable (have an expected output) + # - rowserrcheck # checks whether Rows.Err of rows is checked successfully + # - sloglint # ensure consistent code style when using log/slog + # - spancheck # Checks for mistakes with OpenTelemetry/Census spans. + # - sqlclosecheck # Checks that sql.Rows, sql.Stmt, sqlx.NamedStmt, pgx.Query are closed. + # - staticcheck # (megacheck): It's a set of rules from staticcheck. It's not the same thing as the staticcheck binary. The author of staticcheck doesn't support or approve the use of staticcheck as a library inside golangci-lint. # - tenv # tenv is analyzer that detects using os.Setenv instead of t.Setenv since Go1.17 + # - testableexamples # linter checks if examples are testable (have an expected output) + # - testifylint # Checks usage of github.com/stretchr/testify. # - tparallel # tparallel detects inappropriate usage of t.Parallel() method in your Go test codes - # - typecheck # Like the front-end of a Go compiler, parses and type-checks Go code # - unconvert # Remove unnecessary type conversions # - unused # (megacheck): Checks Go code for unused constants, variables, functions and types # - usestdlibvars # A linter that detect the possibility to use variables/constants from the Go standard library. - # - wastedassign # wastedassign finds wasted assignment statements. + # - wastedassign # Finds wasted assignment statements + # - zerologlint # Detects the wrong usage of `zerolog` that a user forgets to dispatch with `Send` or `Msg` # # Recommended? (easy) # - dogsled # Checks assignments with too many blank identifiers (e.g. x, _, _, _, := f()) - - errchkjson # Checks types passed to the json encoding functions. Reports unsupported types and optionally reports occations, where the check for the returned error can be omitted. + - errchkjson # Checks types passed to the json encoding functions. Reports unsupported types and reports occations, where the check for the returned error can be omitted. - exhaustive # check exhaustiveness of enum switch statements - gci # Gci control golang package import order and make it always deterministic. - godot # Check if comments end in a period - gofmt # Gofmt checks whether code was gofmt-ed. By default this tool runs with -s option to check for code simplification - - goimports # In addition to fixing imports, goimports also formats your code in the same style as gofmt. + - goimports # Check import statements are formatted according to the 'goimport' command. Reformat imports in autofix mode. - gosec # (gas): Inspects source code for security problems - inamedparam # reports interfaces with unnamed method parameters - musttag # enforce field tags in (un)marshaled structs @@ -234,7 +245,7 @@ linters: - protogetter # Reports direct reads from proto message fields when getters should be used - revive # Fast, configurable, extensible, flexible, and beautiful linter for Go. Drop-in replacement of golint. - tagalign # check that struct tags are well aligned - - thelper # thelper detects golang test helpers without t.Helper() call and checks the consistency of test helpers + - thelper # thelper detects tests helpers which is not start with t.Helper() method. - wrapcheck # Checks that errors returned from external packages are wrapped # @@ -242,12 +253,12 @@ linters: # - containedctx # containedctx is a linter that detects struct contained context.Context field - - contextcheck # check the function whether use a non-inherited context + - contextcheck # check whether the function uses a non-inherited context - errname # Checks that sentinel errors are prefixed with the `Err` and error types are suffixed with the `Error`. - gomnd # An analyzer to detect magic numbers. - ireturn # Accept Interfaces, Return Concrete Types - nilnil # Checks that there is no simultaneous return of `nil` error and an invalid value. - - noctx # noctx finds sending http request without context.Context + - noctx # Finds sending http request without context.Context - unparam # Reports unused function parameters # @@ -256,8 +267,8 @@ linters: - gofumpt # Gofumpt checks whether code was gofumpt-ed. - nlreturn # nlreturn checks for a new line before return and branch statements to increase code clarity - - whitespace # Tool for detection of leading and trailing whitespace - - wsl # Whitespace Linter - Forces you to use empty lines! + - whitespace # Whitespace is a linter that checks for unnecessary newlines at the start and end of functions, if, for, etc. + - wsl # add or remove empty lines # # Well intended, but not ready for this @@ -265,8 +276,8 @@ linters: - dupl # Tool for code clone detection - forcetypeassert # finds forced type assertions - godox # Tool for detection of FIXME, TODO and other comment keywords - - goerr113 # Golang linter to check the errors handling expressions - - paralleltest # paralleltest detects missing usage of t.Parallel() method in your Go test + - goerr113 # Go linter to check the errors handling expressions + - paralleltest # Detects missing usage of t.Parallel() method in your Go test - testpackage # linter that makes you use a separate _test package # @@ -274,7 +285,7 @@ linters: # - exhaustruct # Checks if all structure fields are initialized - forbidigo # Forbids identifiers - - gochecknoglobals # check that no global variables exist + - gochecknoglobals # Check that no global variables exist. - goconst # Finds repeated strings that could be replaced by a constant - stylecheck # Stylecheck is a replacement for golint - tagliatelle # Checks the struct tags. From fb393f1c57cd72298d6684884bbb7419365a798e Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Mon, 22 Apr 2024 17:19:00 +0200 Subject: [PATCH 093/581] tests: bump yq, cfssl (#2952) --- Dockerfile | 2 +- Dockerfile.debian | 2 +- test/bats.mk | 10 +++++----- 3 files changed, 7 insertions(+), 7 deletions(-) diff --git a/Dockerfile b/Dockerfile index 1e311bfa89c..69de0f9df8f 100644 --- a/Dockerfile +++ b/Dockerfile @@ -16,7 +16,7 @@ RUN apk add --no-cache git g++ gcc libc-dev make bash gettext binutils-gold core cd re2-${RE2_VERSION} && \ make install && \ echo "githubciXXXXXXXXXXXXXXXXXXXXXXXX" > /etc/machine-id && \ - go install github.com/mikefarah/yq/v4@v4.40.4 + go install github.com/mikefarah/yq/v4@v4.43.1 COPY . . diff --git a/Dockerfile.debian b/Dockerfile.debian index ee0fa32bbd9..9bcb517bb2d 100644 --- a/Dockerfile.debian +++ b/Dockerfile.debian @@ -21,7 +21,7 @@ RUN apt-get update && \ make && \ make install && \ echo "githubciXXXXXXXXXXXXXXXXXXXXXXXX" > /etc/machine-id && \ - go install github.com/mikefarah/yq/v4@v4.40.4 + go install github.com/mikefarah/yq/v4@v4.43.1 COPY . . diff --git a/test/bats.mk b/test/bats.mk index 0cc5deb9b7a..8f507cb659b 100644 --- a/test/bats.mk +++ b/test/bats.mk @@ -66,11 +66,11 @@ bats-check-requirements: ## Check dependencies for functional tests @$(TEST_DIR)/bin/check-requirements bats-update-tools: ## Install/update tools required for functional tests - # yq v4.40.4 - GOBIN=$(TEST_DIR)/tools go install github.com/mikefarah/yq/v4@1c3d55106075bd37df197b4bc03cb4a413fdb903 - # cfssl v1.6.4 - GOBIN=$(TEST_DIR)/tools go install github.com/cloudflare/cfssl/cmd/cfssl@b4d0d877cac528f63db39dfb62d5c96cd3a32a0b - GOBIN=$(TEST_DIR)/tools go install github.com/cloudflare/cfssl/cmd/cfssljson@b4d0d877cac528f63db39dfb62d5c96cd3a32a0b + # yq v4.43.1 + GOBIN=$(TEST_DIR)/tools go install github.com/mikefarah/yq/v4@c35ec752e38ea0c096d3c44e13cfc0797ac394d8 + # cfssl v1.6.5 + GOBIN=$(TEST_DIR)/tools go install github.com/cloudflare/cfssl/cmd/cfssl@96259aa29c9cc9b2f4e04bad7d4bc152e5405dda + GOBIN=$(TEST_DIR)/tools go install github.com/cloudflare/cfssl/cmd/cfssljson@96259aa29c9cc9b2f4e04bad7d4bc152e5405dda # Build and installs crowdsec in a local directory. Rebuilds if already exists. bats-build: bats-environment ## Build binaries for functional tests From b48b7283178b3f44bf0a60aa37cf28377cc46189 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Mon, 22 Apr 2024 23:54:51 +0200 Subject: [PATCH 094/581] cscli support: include stack traces (#2935) --- .golangci.yml | 15 +++------- cmd/crowdsec-cli/main.go | 17 +++++++++-- cmd/crowdsec-cli/support.go | 60 +++++++++++++++++++++++++++---------- cmd/crowdsec/main.go | 9 +++++- cmd/crowdsec/serve.go | 2 +- go.mod | 2 +- go.sum | 4 +-- pkg/apiserver/apiserver.go | 11 +++++-- 8 files changed, 82 insertions(+), 38 deletions(-) diff --git a/.golangci.yml b/.golangci.yml index f38fa337a8d..cf13d9b6d8d 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -37,17 +37,10 @@ linters-settings: statements: 122 govet: - enable: - - atomicalign - - deepequalerrors - # TODO: - fieldalignment - - findcall - - nilness - # TODO: - reflectvaluecompare - - shadow - - sortslice - - timeformat - - unusedwrite + enable-all: true + disable: + - reflectvaluecompare + - fieldalignment lll: # lower this after refactoring diff --git a/cmd/crowdsec-cli/main.go b/cmd/crowdsec-cli/main.go index 446901e4aa9..9e721f1fac6 100644 --- a/cmd/crowdsec-cli/main.go +++ b/cmd/crowdsec-cli/main.go @@ -1,7 +1,9 @@ package main import ( + "fmt" "os" + "path/filepath" "slices" "time" @@ -10,14 +12,18 @@ import ( log "github.com/sirupsen/logrus" "github.com/spf13/cobra" + "github.com/crowdsecurity/go-cs-lib/trace" + "github.com/crowdsecurity/crowdsec/pkg/csconfig" "github.com/crowdsecurity/crowdsec/pkg/database" "github.com/crowdsecurity/crowdsec/pkg/fflag" ) -var ConfigFilePath string -var csConfig *csconfig.Config -var dbClient *database.Client +var ( + ConfigFilePath string + csConfig *csconfig.Config + dbClient *database.Client +) type configGetter func() *csconfig.Config @@ -82,6 +88,11 @@ func loadConfigFor(command string) (*csconfig.Config, string, error) { return nil, "", err } + // set up directory for trace files + if err := trace.Init(filepath.Join(config.ConfigPaths.DataDir, "trace")); err != nil { + return nil, "", fmt.Errorf("while setting up trace directory: %w", err) + } + return config, merged, nil } diff --git a/cmd/crowdsec-cli/support.go b/cmd/crowdsec-cli/support.go index 661950fa8f6..8b2481b4cf2 100644 --- a/cmd/crowdsec-cli/support.go +++ b/cmd/crowdsec-cli/support.go @@ -4,6 +4,7 @@ import ( "archive/zip" "bytes" "context" + "errors" "fmt" "io" "net/http" @@ -12,12 +13,14 @@ import ( "path/filepath" "regexp" "strings" + "time" "github.com/blackfireio/osinfo" "github.com/go-openapi/strfmt" log "github.com/sirupsen/logrus" "github.com/spf13/cobra" + "github.com/crowdsecurity/go-cs-lib/trace" "github.com/crowdsecurity/go-cs-lib/version" "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/require" @@ -47,6 +50,7 @@ const ( SUPPORT_CAPI_STATUS_PATH = "capi_status.txt" SUPPORT_ACQUISITION_CONFIG_BASE_PATH = "config/acquis/" SUPPORT_CROWDSEC_PROFILE_PATH = "config/profiles.yaml" + SUPPORT_CRASH_PATH = "crash/" ) // from https://github.com/acarl005/stripansi @@ -62,7 +66,7 @@ func collectMetrics() ([]byte, []byte, error) { if csConfig.Cscli.PrometheusUrl == "" { log.Warn("No Prometheus URL configured, metrics will not be collected") - return nil, nil, fmt.Errorf("prometheus_uri is not set") + return nil, nil, errors.New("prometheus_uri is not set") } humanMetrics := bytes.NewBuffer(nil) @@ -70,7 +74,7 @@ func collectMetrics() ([]byte, []byte, error) { ms := NewMetricStore() if err := ms.Fetch(csConfig.Cscli.PrometheusUrl); err != nil { - return nil, nil, fmt.Errorf("could not fetch prometheus metrics: %s", err) + return nil, nil, fmt.Errorf("could not fetch prometheus metrics: %w", err) } if err := ms.Format(humanMetrics, nil, "human", false); err != nil { @@ -79,21 +83,21 @@ func collectMetrics() ([]byte, []byte, error) { req, err := http.NewRequest(http.MethodGet, csConfig.Cscli.PrometheusUrl, nil) if err != nil { - return nil, nil, fmt.Errorf("could not create requests to prometheus endpoint: %s", err) + return nil, nil, fmt.Errorf("could not create requests to prometheus endpoint: %w", err) } client := &http.Client{} resp, err := client.Do(req) if err != nil { - return nil, nil, fmt.Errorf("could not get metrics from prometheus endpoint: %s", err) + return nil, nil, fmt.Errorf("could not get metrics from prometheus endpoint: %w", err) } defer resp.Body.Close() body, err := io.ReadAll(resp.Body) if err != nil { - return nil, nil, fmt.Errorf("could not read metrics from prometheus endpoint: %s", err) + return nil, nil, fmt.Errorf("could not read metrics from prometheus endpoint: %w", err) } return humanMetrics.Bytes(), body, nil @@ -121,19 +125,18 @@ func collectOSInfo() ([]byte, error) { log.Info("Collecting OS info") info, err := osinfo.GetOSInfo() - if err != nil { return nil, err } w := bytes.NewBuffer(nil) - w.WriteString(fmt.Sprintf("Architecture: %s\n", info.Architecture)) - w.WriteString(fmt.Sprintf("Family: %s\n", info.Family)) - w.WriteString(fmt.Sprintf("ID: %s\n", info.ID)) - w.WriteString(fmt.Sprintf("Name: %s\n", info.Name)) - w.WriteString(fmt.Sprintf("Codename: %s\n", info.Codename)) - w.WriteString(fmt.Sprintf("Version: %s\n", info.Version)) - w.WriteString(fmt.Sprintf("Build: %s\n", info.Build)) + fmt.Fprintf(w, "Architecture: %s\n", info.Architecture) + fmt.Fprintf(w, "Family: %s\n", info.Family) + fmt.Fprintf(w, "ID: %s\n", info.ID) + fmt.Fprintf(w, "Name: %s\n", info.Name) + fmt.Fprintf(w, "Codename: %s\n", info.Codename) + fmt.Fprintf(w, "Version: %s\n", info.Version) + fmt.Fprintf(w, "Build: %s\n", info.Build) return w.Bytes(), nil } @@ -163,7 +166,7 @@ func collectBouncers(dbClient *database.Client) ([]byte, error) { bouncers, err := dbClient.ListBouncers() if err != nil { - return nil, fmt.Errorf("unable to list bouncers: %s", err) + return nil, fmt.Errorf("unable to list bouncers: %w", err) } getBouncersTable(out, bouncers) @@ -176,7 +179,7 @@ func collectAgents(dbClient *database.Client) ([]byte, error) { machines, err := dbClient.ListMachines() if err != nil { - return nil, fmt.Errorf("unable to list machines: %s", err) + return nil, fmt.Errorf("unable to list machines: %w", err) } getAgentsTable(out, machines) @@ -264,6 +267,11 @@ func collectAcquisitionConfig() map[string][]byte { return ret } +func collectCrash() ([]string, error) { + log.Info("Collecting crash dumps") + return trace.List() +} + type cliSupport struct{} func NewCLISupport() *cliSupport { @@ -431,11 +439,31 @@ cscli support dump -f /tmp/crowdsec-support.zip } } + crash, err := collectCrash() + if err != nil { + log.Errorf("could not collect crash dumps: %s", err) + } + + for _, filename := range crash { + content, err := os.ReadFile(filename) + if err != nil { + log.Errorf("could not read crash dump %s: %s", filename, err) + } + + infos[SUPPORT_CRASH_PATH+filepath.Base(filename)] = content + } + w := bytes.NewBuffer(nil) zipWriter := zip.NewWriter(w) for filename, data := range infos { - fw, err := zipWriter.Create(filename) + header := &zip.FileHeader{ + Name: filename, + Method: zip.Deflate, + // TODO: retain mtime where possible (esp. trace) + Modified: time.Now(), + } + fw, err := zipWriter.CreateHeader(header) if err != nil { log.Errorf("Could not add zip entry for %s: %s", filename, err) continue diff --git a/cmd/crowdsec/main.go b/cmd/crowdsec/main.go index 5f04e9b99a4..0d96692ba5f 100644 --- a/cmd/crowdsec/main.go +++ b/cmd/crowdsec/main.go @@ -6,6 +6,7 @@ import ( "fmt" _ "net/http/pprof" "os" + "path/filepath" "runtime" "runtime/pprof" "strings" @@ -14,6 +15,8 @@ import ( log "github.com/sirupsen/logrus" "gopkg.in/tomb.v2" + "github.com/crowdsecurity/go-cs-lib/trace" + "github.com/crowdsecurity/crowdsec/pkg/acquisition" "github.com/crowdsecurity/crowdsec/pkg/csconfig" "github.com/crowdsecurity/crowdsec/pkg/csplugin" @@ -96,8 +99,8 @@ func LoadBuckets(cConfig *csconfig.Config, hub *cwhub.Hub) error { buckets = leakybucket.NewBuckets() log.Infof("Loading %d scenario files", len(files)) - holders, outputEventChan, err = leakybucket.LoadBuckets(cConfig.Crowdsec, hub, files, &bucketsTomb, buckets, flags.OrderEvent) + holders, outputEventChan, err = leakybucket.LoadBuckets(cConfig.Crowdsec, hub, files, &bucketsTomb, buckets, flags.OrderEvent) if err != nil { return fmt.Errorf("scenario loading failed: %w", err) } @@ -230,6 +233,10 @@ func LoadConfig(configFile string, disableAgent bool, disableAPI bool, quiet boo return nil, fmt.Errorf("while loading configuration file: %w", err) } + if err := trace.Init(filepath.Join(cConfig.ConfigPaths.DataDir, "trace")); err != nil { + return nil, fmt.Errorf("while setting up trace directory: %w", err) + } + cConfig.Common.LogLevel = newLogLevel(cConfig.Common.LogLevel, flags) if dumpFolder != "" { diff --git a/cmd/crowdsec/serve.go b/cmd/crowdsec/serve.go index c8ccd4d5d70..9da3d80106a 100644 --- a/cmd/crowdsec/serve.go +++ b/cmd/crowdsec/serve.go @@ -391,7 +391,7 @@ func Serve(cConfig *csconfig.Config, agentReady chan bool) error { } if cConfig.Common != nil && cConfig.Common.Daemonize { - csdaemon.NotifySystemd(log.StandardLogger()) + csdaemon.Notify(csdaemon.Ready, log.StandardLogger()) // wait for signals return HandleSignals(cConfig) } diff --git a/go.mod b/go.mod index 04f34e6485e..70d819a4059 100644 --- a/go.mod +++ b/go.mod @@ -27,7 +27,7 @@ require ( github.com/corazawaf/libinjection-go v0.1.2 github.com/crowdsecurity/coraza/v3 v3.0.0-20240108124027-a62b8d8e5607 github.com/crowdsecurity/dlog v0.0.0-20170105205344-4fb5f8204f26 - github.com/crowdsecurity/go-cs-lib v0.0.6 + github.com/crowdsecurity/go-cs-lib v0.0.10 github.com/crowdsecurity/grokky v0.2.1 github.com/crowdsecurity/machineid v1.0.2 github.com/davecgh/go-spew v1.1.1 diff --git a/go.sum b/go.sum index 29e23f02ab9..750439e4f0e 100644 --- a/go.sum +++ b/go.sum @@ -102,8 +102,8 @@ github.com/crowdsecurity/coraza/v3 v3.0.0-20240108124027-a62b8d8e5607 h1:hyrYw3h github.com/crowdsecurity/coraza/v3 v3.0.0-20240108124027-a62b8d8e5607/go.mod h1:br36fEqurGYZQGit+iDYsIzW0FF6VufMbDzyyLxEuPA= github.com/crowdsecurity/dlog v0.0.0-20170105205344-4fb5f8204f26 h1:r97WNVC30Uen+7WnLs4xDScS/Ex988+id2k6mDf8psU= github.com/crowdsecurity/dlog v0.0.0-20170105205344-4fb5f8204f26/go.mod h1:zpv7r+7KXwgVUZnUNjyP22zc/D7LKjyoY02weH2RBbk= -github.com/crowdsecurity/go-cs-lib v0.0.6 h1:Ef6MylXe0GaJE9vrfvxEdbHb31+JUP1os+murPz7Pos= -github.com/crowdsecurity/go-cs-lib v0.0.6/go.mod h1:8FMKNGsh3hMZi2SEv6P15PURhEJnZV431XjzzBSuf0k= +github.com/crowdsecurity/go-cs-lib v0.0.10 h1:Twt/y/rYCUspGY1zxDnGurL2svRSREAz+2+puLepd9c= +github.com/crowdsecurity/go-cs-lib v0.0.10/go.mod h1:8FMKNGsh3hMZi2SEv6P15PURhEJnZV431XjzzBSuf0k= github.com/crowdsecurity/grokky v0.2.1 h1:t4VYnDlAd0RjDM2SlILalbwfCrQxtJSMGdQOR0zwkE4= github.com/crowdsecurity/grokky v0.2.1/go.mod h1:33usDIYzGDsgX1kHAThCbseso6JuWNJXOzRQDGXHtWM= github.com/crowdsecurity/machineid v1.0.2 h1:wpkpsUghJF8Khtmn/tg6GxgdhLA1Xflerh5lirI+bdc= diff --git a/pkg/apiserver/apiserver.go b/pkg/apiserver/apiserver.go index 7989cfc1d97..6592c8bbf76 100644 --- a/pkg/apiserver/apiserver.go +++ b/pkg/apiserver/apiserver.go @@ -84,11 +84,16 @@ func recoverFromPanic(c *gin.Context) { } if brokenPipe { - log.Warningf("client %s disconnected : %s", c.ClientIP(), err) + log.Warningf("client %s disconnected: %s", c.ClientIP(), err) c.Abort() } else { - filename := trace.WriteStackTrace(err) - log.Warningf("client %s error : %s", c.ClientIP(), err) + log.Warningf("client %s error: %s", c.ClientIP(), err) + + filename, err := trace.WriteStackTrace(err) + if err != nil { + log.Errorf("also while writing stacktrace: %s", err) + } + log.Warningf("stacktrace written to %s, please join to your issue", filename) c.AbortWithStatus(http.StatusInternalServerError) } From 718d1c54b2950653c3ab3bf20873c89f416be3ac Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Tue, 23 Apr 2024 11:15:27 +0200 Subject: [PATCH 095/581] pkg/database/decisiosn: remove filter parameter, which is always passed empty (#2954) --- .golangci.yml | 2 +- cmd/crowdsec/metrics.go | 12 ++-- pkg/database/decisions.go | 121 ++++++++++++++++++++++---------------- 3 files changed, 76 insertions(+), 59 deletions(-) diff --git a/.golangci.yml b/.golangci.yml index cf13d9b6d8d..ff46ef1c02a 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -58,7 +58,7 @@ linters-settings: min-complexity: 28 nlreturn: - block-size: 4 + block-size: 5 nolintlint: allow-unused: false # report any unused nolint directives diff --git a/cmd/crowdsec/metrics.go b/cmd/crowdsec/metrics.go index d670051cea0..cc0c118b436 100644 --- a/cmd/crowdsec/metrics.go +++ b/cmd/crowdsec/metrics.go @@ -3,7 +3,6 @@ package main import ( "fmt" "net/http" - "time" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promhttp" @@ -22,7 +21,8 @@ import ( "github.com/crowdsecurity/crowdsec/pkg/parser" ) -/*prometheus*/ +// Prometheus + var globalParserHits = prometheus.NewCounterVec( prometheus.CounterOpts{ Name: "cs_parser_hits_total", @@ -30,6 +30,7 @@ var globalParserHits = prometheus.NewCounterVec( }, []string{"source", "type"}, ) + var globalParserHitsOk = prometheus.NewCounterVec( prometheus.CounterOpts{ Name: "cs_parser_hits_ok_total", @@ -37,6 +38,7 @@ var globalParserHitsOk = prometheus.NewCounterVec( }, []string{"source", "type"}, ) + var globalParserHitsKo = prometheus.NewCounterVec( prometheus.CounterOpts{ Name: "cs_parser_hits_ko_total", @@ -116,9 +118,7 @@ func computeDynamicMetrics(next http.Handler, dbClient *database.Client) http.Ha return } - decisionsFilters := make(map[string][]string, 0) - - decisions, err := dbClient.QueryDecisionCountByScenario(decisionsFilters) + decisions, err := dbClient.QueryDecisionCountByScenario() if err != nil { log.Errorf("Error querying decisions for metrics: %v", err) next.ServeHTTP(w, r) @@ -139,7 +139,6 @@ func computeDynamicMetrics(next http.Handler, dbClient *database.Client) http.Ha } alerts, err := dbClient.AlertsCountPerScenario(alertsFilter) - if err != nil { log.Errorf("Error querying alerts for metrics: %v", err) next.ServeHTTP(w, r) @@ -194,7 +193,6 @@ func servePrometheus(config *csconfig.PrometheusCfg, dbClient *database.Client, defer trace.CatchPanic("crowdsec/servePrometheus") http.Handle("/metrics", computeDynamicMetrics(promhttp.Handler(), dbClient)) - log.Debugf("serving metrics after %s ms", time.Since(crowdsecT0)) if err := http.ListenAndServe(fmt.Sprintf("%s:%d", config.ListenAddr, config.ListenPort), nil); err != nil { // in time machine, we most likely have the LAPI using the port diff --git a/pkg/database/decisions.go b/pkg/database/decisions.go index 3175a916ff5..20a49c79143 100644 --- a/pkg/database/decisions.go +++ b/pkg/database/decisions.go @@ -37,6 +37,7 @@ func BuildDecisionRequestWithFilter(query *ent.DecisionQuery, filter map[string] if v[0] == "false" { query = query.Where(decision.SimulatedEQ(false)) } + delete(filter, "simulated") } else { query = query.Where(decision.SimulatedEQ(false)) @@ -49,7 +50,7 @@ func BuildDecisionRequestWithFilter(query *ent.DecisionQuery, filter map[string] if err != nil { return nil, errors.Wrapf(InvalidFilter, "invalid contains value : %s", err) } - case "scopes", "scope": //Swagger mentions both of them, let's just support both to make sure we don't break anything + case "scopes", "scope": // Swagger mentions both of them, let's just support both to make sure we don't break anything scopes := strings.Split(value[0], ",") for i, scope := range scopes { switch strings.ToLower(scope) { @@ -63,6 +64,7 @@ func BuildDecisionRequestWithFilter(query *ent.DecisionQuery, filter map[string] scopes[i] = types.AS } } + query = query.Where(decision.ScopeIn(scopes...)) case "value": query = query.Where(decision.ValueEQ(value[0])) @@ -164,11 +166,11 @@ func (c *Client) QueryExpiredDecisionsWithFilters(filters map[string][]string) ( return data, nil } -func (c *Client) QueryDecisionCountByScenario(filters map[string][]string) ([]*DecisionsByScenario, error) { +func (c *Client) QueryDecisionCountByScenario() ([]*DecisionsByScenario, error) { query := c.Ent.Decision.Query().Where( decision.UntilGT(time.Now().UTC()), ) - query, err := BuildDecisionRequestWithFilter(query, filters) + query, err := BuildDecisionRequestWithFilter(query, make(map[string][]string)) if err != nil { c.Log.Warningf("QueryDecisionCountByScenario : %s", err) @@ -277,10 +279,12 @@ func (c *Client) QueryNewDecisionsSinceWithFilters(since time.Time, filters map[ decision.CreatedAtGT(since), decision.UntilGT(time.Now().UTC()), ) - //Allow a bouncer to ask for non-deduplicated results + + // Allow a bouncer to ask for non-deduplicated results if v, ok := filters["dedup"]; !ok || v[0] != "false" { query = query.Where(longestDecisionForScopeTypeValue) } + query, err := BuildDecisionRequestWithFilter(query, filters) if err != nil { c.Log.Warningf("QueryNewDecisionsSinceWithFilters : %s", err) @@ -294,17 +298,20 @@ func (c *Client) QueryNewDecisionsSinceWithFilters(since time.Time, filters map[ c.Log.Warningf("QueryNewDecisionsSinceWithFilters : %s", err) return []*ent.Decision{}, errors.Wrapf(QueryFail, "new decisions since '%s'", since.String()) } + return data, nil } -func (c *Client) DeleteDecisionById(decisionId int) ([]*ent.Decision, error) { - toDelete, err := c.Ent.Decision.Query().Where(decision.IDEQ(decisionId)).All(c.CTX) +func (c *Client) DeleteDecisionById(decisionID int) ([]*ent.Decision, error) { + toDelete, err := c.Ent.Decision.Query().Where(decision.IDEQ(decisionID)).All(c.CTX) if err != nil { c.Log.Warningf("DeleteDecisionById : %s", err) - return nil, errors.Wrapf(DeleteFail, "decision with id '%d' doesn't exist", decisionId) + return nil, errors.Wrapf(DeleteFail, "decision with id '%d' doesn't exist", decisionID) } + count, err := c.BulkDeleteDecisions(toDelete, false) c.Log.Debugf("deleted %d decisions", count) + return toDelete, err } @@ -317,6 +324,7 @@ func (c *Client) DeleteDecisionsWithFilter(filter map[string][]string) (string, else, return bans that are *contained* by the given value (value is the outer) */ decisions := c.Ent.Decision.Query() + for param, value := range filter { switch param { case "contains": @@ -359,48 +367,48 @@ func (c *Client) DeleteDecisionsWithFilter(filter map[string][]string) (string, } else if ip_sz == 16 { if contains { /*decision contains {start_ip,end_ip}*/ decisions = decisions.Where(decision.And( - //matching addr size + // matching addr size decision.IPSizeEQ(int64(ip_sz)), decision.Or( - //decision.start_ip < query.start_ip + // decision.start_ip < query.start_ip decision.StartIPLT(start_ip), decision.And( - //decision.start_ip == query.start_ip + // decision.start_ip == query.start_ip decision.StartIPEQ(start_ip), - //decision.start_suffix <= query.start_suffix + // decision.start_suffix <= query.start_suffix decision.StartSuffixLTE(start_sfx), )), decision.Or( - //decision.end_ip > query.end_ip + // decision.end_ip > query.end_ip decision.EndIPGT(end_ip), decision.And( - //decision.end_ip == query.end_ip + // decision.end_ip == query.end_ip decision.EndIPEQ(end_ip), - //decision.end_suffix >= query.end_suffix + // decision.end_suffix >= query.end_suffix decision.EndSuffixGTE(end_sfx), ), ), )) } else { decisions = decisions.Where(decision.And( - //matching addr size + // matching addr size decision.IPSizeEQ(int64(ip_sz)), decision.Or( - //decision.start_ip > query.start_ip + // decision.start_ip > query.start_ip decision.StartIPGT(start_ip), decision.And( - //decision.start_ip == query.start_ip + // decision.start_ip == query.start_ip decision.StartIPEQ(start_ip), - //decision.start_suffix >= query.start_suffix + // decision.start_suffix >= query.start_suffix decision.StartSuffixGTE(start_sfx), )), decision.Or( - //decision.end_ip < query.end_ip + // decision.end_ip < query.end_ip decision.EndIPLT(end_ip), decision.And( - //decision.end_ip == query.end_ip + // decision.end_ip == query.end_ip decision.EndIPEQ(end_ip), - //decision.end_suffix <= query.end_suffix + // decision.end_suffix <= query.end_suffix decision.EndSuffixLTE(end_sfx), ), ), @@ -415,11 +423,13 @@ func (c *Client) DeleteDecisionsWithFilter(filter map[string][]string) (string, c.Log.Warningf("DeleteDecisionsWithFilter : %s", err) return "0", nil, errors.Wrap(DeleteFail, "decisions with provided filter") } + count, err := c.BulkDeleteDecisions(toDelete, false) if err != nil { c.Log.Warningf("While deleting decisions : %s", err) return "0", nil, errors.Wrap(DeleteFail, "decisions with provided filter") } + return strconv.Itoa(count), toDelete, nil } @@ -432,6 +442,7 @@ func (c *Client) SoftDeleteDecisionsWithFilter(filter map[string][]string) (stri /*if contains is true, return bans that *contains* the given value (value is the inner) else, return bans that are *contained* by the given value (value is the outer)*/ decisions := c.Ent.Decision.Query().Where(decision.UntilGT(time.Now().UTC())) + for param, value := range filter { switch param { case "contains": @@ -480,24 +491,24 @@ func (c *Client) SoftDeleteDecisionsWithFilter(filter map[string][]string) (stri /*decision contains {start_ip,end_ip}*/ if contains { decisions = decisions.Where(decision.And( - //matching addr size + // matching addr size decision.IPSizeEQ(int64(ip_sz)), decision.Or( - //decision.start_ip < query.start_ip + // decision.start_ip < query.start_ip decision.StartIPLT(start_ip), decision.And( - //decision.start_ip == query.start_ip + // decision.start_ip == query.start_ip decision.StartIPEQ(start_ip), - //decision.start_suffix <= query.start_suffix + // decision.start_suffix <= query.start_suffix decision.StartSuffixLTE(start_sfx), )), decision.Or( - //decision.end_ip > query.end_ip + // decision.end_ip > query.end_ip decision.EndIPGT(end_ip), decision.And( - //decision.end_ip == query.end_ip + // decision.end_ip == query.end_ip decision.EndIPEQ(end_ip), - //decision.end_suffix >= query.end_suffix + // decision.end_suffix >= query.end_suffix decision.EndSuffixGTE(end_sfx), ), ), @@ -505,24 +516,24 @@ func (c *Client) SoftDeleteDecisionsWithFilter(filter map[string][]string) (stri } else { /*decision is contained within {start_ip,end_ip}*/ decisions = decisions.Where(decision.And( - //matching addr size + // matching addr size decision.IPSizeEQ(int64(ip_sz)), decision.Or( - //decision.start_ip > query.start_ip + // decision.start_ip > query.start_ip decision.StartIPGT(start_ip), decision.And( - //decision.start_ip == query.start_ip + // decision.start_ip == query.start_ip decision.StartIPEQ(start_ip), - //decision.start_suffix >= query.start_suffix + // decision.start_suffix >= query.start_suffix decision.StartSuffixGTE(start_sfx), )), decision.Or( - //decision.end_ip < query.end_ip + // decision.end_ip < query.end_ip decision.EndIPLT(end_ip), decision.And( - //decision.end_ip == query.end_ip + // decision.end_ip == query.end_ip decision.EndIPEQ(end_ip), - //decision.end_suffix <= query.end_suffix + // decision.end_suffix <= query.end_suffix decision.EndSuffixLTE(end_sfx), ), ), @@ -531,6 +542,7 @@ func (c *Client) SoftDeleteDecisionsWithFilter(filter map[string][]string) (stri } else if ip_sz != 0 { return "0", nil, errors.Wrapf(InvalidFilter, "Unknown ip size %d", ip_sz) } + DecisionsToDelete, err := decisions.All(c.CTX) if err != nil { c.Log.Warningf("SoftDeleteDecisionsWithFilter : %s", err) @@ -541,13 +553,14 @@ func (c *Client) SoftDeleteDecisionsWithFilter(filter map[string][]string) (stri if err != nil { return "0", nil, errors.Wrapf(DeleteFail, "soft delete decisions with provided filter : %s", err) } + return strconv.Itoa(count), DecisionsToDelete, err } -// BulkDeleteDecisions set the expiration of a bulk of decisions to now() or hard deletes them. +// BulkDeleteDecisions sets the expiration of a bulk of decisions to now() or hard deletes them. // We are doing it this way so we can return impacted decisions for sync with CAPI/PAPI func (c *Client) BulkDeleteDecisions(decisionsToDelete []*ent.Decision, softDelete bool) (int, error) { - const bulkSize = 256 //scientifically proven to be the best value for bulk delete + const bulkSize = 256 // scientifically proven to be the best value for bulk delete var ( nbUpdates int @@ -576,6 +589,7 @@ func (c *Client) BulkDeleteDecisions(decisionsToDelete []*ent.Decision, softDele return totalUpdates, fmt.Errorf("hard delete decisions with provided filter: %w", err) } } + totalUpdates += nbUpdates } @@ -612,6 +626,7 @@ func (c *Client) CountDecisionsByValue(decisionValue string) (int, error) { contains := true decisions := c.Ent.Decision.Query() + decisions, err = applyStartIpEndIpFilter(decisions, contains, ip_sz, start_ip, start_sfx, end_ip, end_sfx) if err != nil { return 0, errors.Wrapf(err, "fail to apply StartIpEndIpFilter") @@ -667,6 +682,7 @@ func applyStartIpEndIpFilter(decisions *ent.DecisionQuery, contains bool, ip_sz decision.IPSizeEQ(int64(ip_sz)), )) } + return decisions, nil } @@ -674,24 +690,24 @@ func applyStartIpEndIpFilter(decisions *ent.DecisionQuery, contains bool, ip_sz /*decision contains {start_ip,end_ip}*/ if contains { decisions = decisions.Where(decision.And( - //matching addr size + // matching addr size decision.IPSizeEQ(int64(ip_sz)), decision.Or( - //decision.start_ip < query.start_ip + // decision.start_ip < query.start_ip decision.StartIPLT(start_ip), decision.And( - //decision.start_ip == query.start_ip + // decision.start_ip == query.start_ip decision.StartIPEQ(start_ip), - //decision.start_suffix <= query.start_suffix + // decision.start_suffix <= query.start_suffix decision.StartSuffixLTE(start_sfx), )), decision.Or( - //decision.end_ip > query.end_ip + // decision.end_ip > query.end_ip decision.EndIPGT(end_ip), decision.And( - //decision.end_ip == query.end_ip + // decision.end_ip == query.end_ip decision.EndIPEQ(end_ip), - //decision.end_suffix >= query.end_suffix + // decision.end_suffix >= query.end_suffix decision.EndSuffixGTE(end_sfx), ), ), @@ -699,29 +715,30 @@ func applyStartIpEndIpFilter(decisions *ent.DecisionQuery, contains bool, ip_sz } else { /*decision is contained within {start_ip,end_ip}*/ decisions = decisions.Where(decision.And( - //matching addr size + // matching addr size decision.IPSizeEQ(int64(ip_sz)), decision.Or( - //decision.start_ip > query.start_ip + // decision.start_ip > query.start_ip decision.StartIPGT(start_ip), decision.And( - //decision.start_ip == query.start_ip + // decision.start_ip == query.start_ip decision.StartIPEQ(start_ip), - //decision.start_suffix >= query.start_suffix + // decision.start_suffix >= query.start_suffix decision.StartSuffixGTE(start_sfx), )), decision.Or( - //decision.end_ip < query.end_ip + // decision.end_ip < query.end_ip decision.EndIPLT(end_ip), decision.And( - //decision.end_ip == query.end_ip + // decision.end_ip == query.end_ip decision.EndIPEQ(end_ip), - //decision.end_suffix <= query.end_suffix + // decision.end_suffix <= query.end_suffix decision.EndSuffixLTE(end_sfx), ), ), )) } + return decisions, nil } @@ -735,8 +752,10 @@ func applyStartIpEndIpFilter(decisions *ent.DecisionQuery, contains bool, ip_sz func decisionPredicatesFromStr(s string, predicateFunc func(string) predicate.Decision) []predicate.Decision { words := strings.Split(s, ",") predicates := make([]predicate.Decision, len(words)) + for i, word := range words { predicates[i] = predicateFunc(word) } + return predicates } From c64332d30abf725e0233bbd89874cec4bc8b0419 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Tue, 23 Apr 2024 12:28:38 +0200 Subject: [PATCH 096/581] cscli config show: avoid globals, use yaml v3 (#2863) * cscli config show: avoid globals, use yaml v3 * lint (whitespace/errors) --- cmd/crowdsec-cli/alerts.go | 11 ++++--- cmd/crowdsec-cli/config_show.go | 46 +++++++++++++++------------- cmd/crowdsec-cli/console.go | 16 +++++----- cmd/crowdsec-cli/copyfile.go | 3 +- cmd/crowdsec-cli/decisions.go | 9 +++--- cmd/crowdsec-cli/decisions_import.go | 12 ++++---- cmd/crowdsec-cli/doc.go | 2 ++ cmd/crowdsec-cli/explain.go | 8 +++-- 8 files changed, 60 insertions(+), 47 deletions(-) diff --git a/cmd/crowdsec-cli/alerts.go b/cmd/crowdsec-cli/alerts.go index 908466f9eb2..59dff809458 100644 --- a/cmd/crowdsec-cli/alerts.go +++ b/cmd/crowdsec-cli/alerts.go @@ -4,6 +4,7 @@ import ( "context" "encoding/csv" "encoding/json" + "errors" "fmt" "net/url" "os" @@ -204,6 +205,7 @@ func (cli *cliAlerts) NewCommand() *cobra.Command { if err != nil { return fmt.Errorf("parsing api url %s: %w", apiURL, err) } + cli.client, err = apiclient.NewClient(&apiclient.Config{ MachineID: cfg.API.Client.Credentials.Login, Password: strfmt.Password(cfg.API.Client.Credentials.Password), @@ -211,7 +213,6 @@ func (cli *cliAlerts) NewCommand() *cobra.Command { URL: apiURL, VersionPrefix: "v1", }) - if err != nil { return fmt.Errorf("new api client: %w", err) } @@ -229,7 +230,7 @@ func (cli *cliAlerts) NewCommand() *cobra.Command { } func (cli *cliAlerts) NewListCmd() *cobra.Command { - var alertListFilter = apiclient.AlertsListOpts{ + alertListFilter := apiclient.AlertsListOpts{ ScopeEquals: new(string), ValueEquals: new(string), ScenarioEquals: new(string), @@ -363,7 +364,7 @@ func (cli *cliAlerts) NewDeleteCmd() *cobra.Command { delAlertByID string ) - var alertDeleteFilter = apiclient.AlertsDeleteOpts{ + alertDeleteFilter := apiclient.AlertsDeleteOpts{ ScopeEquals: new(string), ValueEquals: new(string), ScenarioEquals: new(string), @@ -391,7 +392,7 @@ cscli alerts delete -s crowdsecurity/ssh-bf"`, *alertDeleteFilter.ScenarioEquals == "" && *alertDeleteFilter.IPEquals == "" && *alertDeleteFilter.RangeEquals == "" && delAlertByID == "" { _ = cmd.Usage() - return fmt.Errorf("at least one filter or --all must be specified") + return errors.New("at least one filter or --all must be specified") } return nil @@ -478,7 +479,7 @@ func (cli *cliAlerts) NewInspectCmd() *cobra.Command { cfg := cli.cfg() if len(args) == 0 { printHelp(cmd) - return fmt.Errorf("missing alert_id") + return errors.New("missing alert_id") } for _, alertID := range args { id, err := strconv.Atoi(alertID) diff --git a/cmd/crowdsec-cli/config_show.go b/cmd/crowdsec-cli/config_show.go index c277173c387..c7138c98e33 100644 --- a/cmd/crowdsec-cli/config_show.go +++ b/cmd/crowdsec-cli/config_show.go @@ -10,13 +10,15 @@ import ( "github.com/sanity-io/litter" log "github.com/sirupsen/logrus" "github.com/spf13/cobra" - "gopkg.in/yaml.v2" + "gopkg.in/yaml.v3" "github.com/crowdsecurity/crowdsec/pkg/csconfig" "github.com/crowdsecurity/crowdsec/pkg/exprhelpers" ) -func showConfigKey(key string) error { +func (cli *cliConfig) showKey(key string) error { + cfg := cli.cfg() + type Env struct { Config *csconfig.Config } @@ -30,15 +32,15 @@ func showConfigKey(key string) error { return err } - output, err := expr.Run(program, Env{Config: csConfig}) + output, err := expr.Run(program, Env{Config: cfg}) if err != nil { return err } - switch csConfig.Cscli.Output { + switch cfg.Cscli.Output { case "human", "raw": // Don't use litter for strings, it adds quotes - // that we didn't have before + // that would break compatibility with previous versions switch output.(type) { case string: fmt.Println(output) @@ -51,13 +53,14 @@ func showConfigKey(key string) error { return fmt.Errorf("failed to marshal configuration: %w", err) } - fmt.Printf("%s\n", string(data)) + fmt.Println(string(data)) } return nil } -var configShowTemplate = `Global: +func (cli *cliConfig) template() string { + return `Global: {{- if .ConfigPaths }} - Configuration Folder : {{.ConfigPaths.ConfigDir}} @@ -182,19 +185,11 @@ Central API: {{- end }} {{- end }} ` +} -func (cli *cliConfig) show(key string) error { +func (cli *cliConfig) show() error { cfg := cli.cfg() - if err := cfg.LoadAPIClient(); err != nil { - log.Errorf("failed to load API client configuration: %s", err) - // don't return, we can still show the configuration - } - - if key != "" { - return showConfigKey(key) - } - switch cfg.Cscli.Output { case "human": // The tests on .Enable look funny because the option has a true default which has @@ -205,7 +200,7 @@ func (cli *cliConfig) show(key string) error { "ValueBool": func(b *bool) bool { return b != nil && *b }, } - tmp, err := template.New("config").Funcs(funcs).Parse(configShowTemplate) + tmp, err := template.New("config").Funcs(funcs).Parse(cli.template()) if err != nil { return err } @@ -220,14 +215,14 @@ func (cli *cliConfig) show(key string) error { return fmt.Errorf("failed to marshal configuration: %w", err) } - fmt.Printf("%s\n", string(data)) + fmt.Println(string(data)) case "raw": data, err := yaml.Marshal(cfg) if err != nil { return fmt.Errorf("failed to marshal configuration: %w", err) } - fmt.Printf("%s\n", string(data)) + fmt.Println(string(data)) } return nil @@ -243,7 +238,16 @@ func (cli *cliConfig) newShowCmd() *cobra.Command { Args: cobra.ExactArgs(0), DisableAutoGenTag: true, RunE: func(_ *cobra.Command, _ []string) error { - return cli.show(key) + if err := cli.cfg().LoadAPIClient(); err != nil { + log.Errorf("failed to load API client configuration: %s", err) + // don't return, we can still show the configuration + } + + if key != "" { + return cli.showKey(key) + } + + return cli.show() }, } diff --git a/cmd/crowdsec-cli/console.go b/cmd/crowdsec-cli/console.go index b1912825c06..9e881a43f53 100644 --- a/cmd/crowdsec-cli/console.go +++ b/cmd/crowdsec-cli/console.go @@ -4,9 +4,11 @@ import ( "context" "encoding/csv" "encoding/json" + "errors" "fmt" "net/url" "os" + "strconv" "strings" "github.com/fatih/color" @@ -36,7 +38,7 @@ func NewCLIConsole(cfg configGetter) *cliConsole { } func (cli *cliConsole) NewCommand() *cobra.Command { - var cmd = &cobra.Command{ + cmd := &cobra.Command{ Use: "console [action]", Short: "Manage interaction with Crowdsec console (https://app.crowdsec.net)", Args: cobra.MinimumNArgs(1), @@ -203,7 +205,7 @@ Enable given information push to the central API. Allows to empower the console` log.Infof("All features have been enabled successfully") } else { if len(args) == 0 { - return fmt.Errorf("you must specify at least one feature to enable") + return errors.New("you must specify at least one feature to enable") } if err := cli.setConsoleOpts(args, true); err != nil { return err @@ -288,11 +290,11 @@ func (cli *cliConsole) newStatusCmd() *cobra.Command { } rows := [][]string{ - {csconfig.SEND_MANUAL_SCENARIOS, fmt.Sprintf("%t", *consoleCfg.ShareManualDecisions)}, - {csconfig.SEND_CUSTOM_SCENARIOS, fmt.Sprintf("%t", *consoleCfg.ShareCustomScenarios)}, - {csconfig.SEND_TAINTED_SCENARIOS, fmt.Sprintf("%t", *consoleCfg.ShareTaintedScenarios)}, - {csconfig.SEND_CONTEXT, fmt.Sprintf("%t", *consoleCfg.ShareContext)}, - {csconfig.CONSOLE_MANAGEMENT, fmt.Sprintf("%t", *consoleCfg.ConsoleManagement)}, + {csconfig.SEND_MANUAL_SCENARIOS, strconv.FormatBool(*consoleCfg.ShareManualDecisions)}, + {csconfig.SEND_CUSTOM_SCENARIOS, strconv.FormatBool(*consoleCfg.ShareCustomScenarios)}, + {csconfig.SEND_TAINTED_SCENARIOS, strconv.FormatBool(*consoleCfg.ShareTaintedScenarios)}, + {csconfig.SEND_CONTEXT, strconv.FormatBool(*consoleCfg.ShareContext)}, + {csconfig.CONSOLE_MANAGEMENT, strconv.FormatBool(*consoleCfg.ConsoleManagement)}, } for _, row := range rows { err = csvwriter.Write(row) diff --git a/cmd/crowdsec-cli/copyfile.go b/cmd/crowdsec-cli/copyfile.go index 332f744be80..272fb3f7851 100644 --- a/cmd/crowdsec-cli/copyfile.go +++ b/cmd/crowdsec-cli/copyfile.go @@ -9,7 +9,6 @@ import ( log "github.com/sirupsen/logrus" ) - /*help to copy the file, ioutil doesn't offer the feature*/ func copyFileContents(src, dst string) (err error) { @@ -69,6 +68,7 @@ func CopyFile(sourceSymLink, destinationFile string) error { if !(destinationFileStat.Mode().IsRegular()) { return fmt.Errorf("copyFile: non-regular destination file %s (%q)", destinationFileStat.Name(), destinationFileStat.Mode().String()) } + if os.SameFile(sourceFileStat, destinationFileStat) { return err } @@ -80,4 +80,3 @@ func CopyFile(sourceSymLink, destinationFile string) error { return err } - diff --git a/cmd/crowdsec-cli/decisions.go b/cmd/crowdsec-cli/decisions.go index a97536ddc14..3fb790633b5 100644 --- a/cmd/crowdsec-cli/decisions.go +++ b/cmd/crowdsec-cli/decisions.go @@ -4,6 +4,7 @@ import ( "context" "encoding/csv" "encoding/json" + "errors" "fmt" "net/url" "os" @@ -346,7 +347,7 @@ cscli decisions add --scope username --value foobar addScope = types.Range } else if addValue == "" { printHelp(cmd) - return fmt.Errorf("missing arguments, a value is required (--ip, --range or --scope and --value)") + return errors.New("missing arguments, a value is required (--ip, --range or --scope and --value)") } if addReason == "" { @@ -371,7 +372,7 @@ cscli decisions add --scope username --value foobar Scenario: &addReason, ScenarioVersion: &empty, Simulated: &simulated, - //setting empty scope/value broke plugins, and it didn't seem to be needed anymore w/ latest papi changes + // setting empty scope/value broke plugins, and it didn't seem to be needed anymore w/ latest papi changes Source: &models.Source{ AsName: empty, AsNumber: empty, @@ -411,7 +412,7 @@ cscli decisions add --scope username --value foobar } func (cli *cliDecisions) newDeleteCmd() *cobra.Command { - var delFilter = apiclient.DecisionsDeleteOpts{ + delFilter := apiclient.DecisionsDeleteOpts{ ScopeEquals: new(string), ValueEquals: new(string), TypeEquals: new(string), @@ -448,7 +449,7 @@ cscli decisions delete --origin lists --scenario list_name *delFilter.RangeEquals == "" && *delFilter.ScenarioEquals == "" && *delFilter.OriginEquals == "" && delDecisionID == "" { cmd.Usage() - return fmt.Errorf("at least one filter or --all must be specified") + return errors.New("at least one filter or --all must be specified") } return nil diff --git a/cmd/crowdsec-cli/decisions_import.go b/cmd/crowdsec-cli/decisions_import.go index 45d1841a603..8c36bd5dc92 100644 --- a/cmd/crowdsec-cli/decisions_import.go +++ b/cmd/crowdsec-cli/decisions_import.go @@ -5,6 +5,7 @@ import ( "bytes" "context" "encoding/json" + "errors" "fmt" "io" "os" @@ -81,7 +82,7 @@ func (cli *cliDecisions) runImport(cmd *cobra.Command, args []string) error { } if defaultDuration == "" { - return fmt.Errorf("--duration cannot be empty") + return errors.New("--duration cannot be empty") } defaultScope, err := flags.GetString("scope") @@ -90,7 +91,7 @@ func (cli *cliDecisions) runImport(cmd *cobra.Command, args []string) error { } if defaultScope == "" { - return fmt.Errorf("--scope cannot be empty") + return errors.New("--scope cannot be empty") } defaultReason, err := flags.GetString("reason") @@ -99,7 +100,7 @@ func (cli *cliDecisions) runImport(cmd *cobra.Command, args []string) error { } if defaultReason == "" { - return fmt.Errorf("--reason cannot be empty") + return errors.New("--reason cannot be empty") } defaultType, err := flags.GetString("type") @@ -108,7 +109,7 @@ func (cli *cliDecisions) runImport(cmd *cobra.Command, args []string) error { } if defaultType == "" { - return fmt.Errorf("--type cannot be empty") + return errors.New("--type cannot be empty") } batchSize, err := flags.GetInt("batch") @@ -136,7 +137,7 @@ func (cli *cliDecisions) runImport(cmd *cobra.Command, args []string) error { } if format == "" { - return fmt.Errorf("unable to guess format from file extension, please provide a format with --format flag") + return errors.New("unable to guess format from file extension, please provide a format with --format flag") } if input == "-" { @@ -235,7 +236,6 @@ func (cli *cliDecisions) runImport(cmd *cobra.Command, args []string) error { return nil } - func (cli *cliDecisions) newImportCmd() *cobra.Command { cmd := &cobra.Command{ Use: "import [options]", diff --git a/cmd/crowdsec-cli/doc.go b/cmd/crowdsec-cli/doc.go index a4896f3da30..4b1d50d1583 100644 --- a/cmd/crowdsec-cli/doc.go +++ b/cmd/crowdsec-cli/doc.go @@ -39,8 +39,10 @@ id: %s title: %s --- ` + name := filepath.Base(filename) base := strings.TrimSuffix(name, filepath.Ext(name)) + return fmt.Sprintf(header, base, strings.ReplaceAll(base, "_", " ")) } diff --git a/cmd/crowdsec-cli/explain.go b/cmd/crowdsec-cli/explain.go index ce323fd0ce1..c322cce47fe 100644 --- a/cmd/crowdsec-cli/explain.go +++ b/cmd/crowdsec-cli/explain.go @@ -83,7 +83,7 @@ tail -n 5 myfile.log | cscli explain --type nginx -f - PersistentPreRunE: func(_ *cobra.Command, _ []string) error { fileInfo, _ := os.Stdin.Stat() if cli.flags.logFile == "-" && ((fileInfo.Mode() & os.ModeCharDevice) == os.ModeCharDevice) { - return fmt.Errorf("the option -f - is intended to work with pipes") + return errors.New("the option -f - is intended to work with pipes") } return nil @@ -160,18 +160,22 @@ func (cli *cliExplain) run() error { } else if logFile == "-" { reader := bufio.NewReader(os.Stdin) errCount := 0 + for { input, err := reader.ReadBytes('\n') if err != nil && errors.Is(err, io.EOF) { break } + if len(input) > 1 { _, err = f.Write(input) } + if err != nil || len(input) <= 1 { errCount++ } } + if errCount > 0 { log.Warnf("Failed to write %d lines to %s", errCount, tmpFile) } @@ -207,7 +211,7 @@ func (cli *cliExplain) run() error { } if dsn == "" { - return fmt.Errorf("no acquisition (--file or --dsn) provided, can't run cscli test") + return errors.New("no acquisition (--file or --dsn) provided, can't run cscli test") } cmdArgs := []string{"-c", ConfigFilePath, "-type", logType, "-dsn", dsn, "-dump-data", dir, "-no-api"} From 725cae1fa88fd533045b902e0ff4372936dcf8cf Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Tue, 23 Apr 2024 12:41:50 +0200 Subject: [PATCH 097/581] CI: upload coverage with token (#2958) --- .github/workflows/bats-sqlite-coverage.yml | 1 + .github/workflows/go-tests-windows.yml | 1 + .github/workflows/go-tests.yml | 1 + 3 files changed, 3 insertions(+) diff --git a/.github/workflows/bats-sqlite-coverage.yml b/.github/workflows/bats-sqlite-coverage.yml index d56d69f28b2..742d1ee65ff 100644 --- a/.github/workflows/bats-sqlite-coverage.yml +++ b/.github/workflows/bats-sqlite-coverage.yml @@ -81,3 +81,4 @@ jobs: with: files: ./coverage-bats.out flags: bats + token: ${{ secrets.CODECOV_TOKEN }} diff --git a/.github/workflows/go-tests-windows.yml b/.github/workflows/go-tests-windows.yml index 781f2a4a930..9d5c1739745 100644 --- a/.github/workflows/go-tests-windows.yml +++ b/.github/workflows/go-tests-windows.yml @@ -52,6 +52,7 @@ jobs: with: files: coverage.out flags: unit-windows + token: ${{ secrets.CODECOV_TOKEN }} - name: golangci-lint uses: golangci/golangci-lint-action@v4 diff --git a/.github/workflows/go-tests.yml b/.github/workflows/go-tests.yml index 67f73d81a45..4eac3777df9 100644 --- a/.github/workflows/go-tests.yml +++ b/.github/workflows/go-tests.yml @@ -153,6 +153,7 @@ jobs: with: files: coverage.out flags: unit-linux + token: ${{ secrets.CODECOV_TOKEN }} - name: golangci-lint uses: golangci/golangci-lint-action@v4 From 97e6588a45f23931c64f83b584a9af7b3e58db80 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Wed, 24 Apr 2024 10:05:55 +0200 Subject: [PATCH 098/581] cscli hub items: avoid global (#2960) * cscli hub items: avoid global * lint (whitespace, errors) * lint --- cmd/crowdsec-cli/hubappsec.go | 12 ++++---- cmd/crowdsec-cli/hubcollection.go | 3 +- cmd/crowdsec-cli/hubcontext.go | 3 +- cmd/crowdsec-cli/hubparser.go | 3 +- cmd/crowdsec-cli/hubpostoverflow.go | 3 +- cmd/crowdsec-cli/hubscenario.go | 3 +- cmd/crowdsec-cli/hubtest.go | 3 +- cmd/crowdsec-cli/item_metrics.go | 30 +++++++++++++++++++ cmd/crowdsec-cli/itemcli.go | 46 ++++++++++++++++++----------- cmd/crowdsec-cli/items.go | 8 ++--- cmd/crowdsec-cli/lapi.go | 3 +- cmd/crowdsec-cli/main.go | 14 ++++----- 12 files changed, 90 insertions(+), 41 deletions(-) diff --git a/cmd/crowdsec-cli/hubappsec.go b/cmd/crowdsec-cli/hubappsec.go index ff41ad5f9ad..7ee578edc2f 100644 --- a/cmd/crowdsec-cli/hubappsec.go +++ b/cmd/crowdsec-cli/hubappsec.go @@ -13,8 +13,9 @@ import ( "github.com/crowdsecurity/crowdsec/pkg/cwhub" ) -func NewCLIAppsecConfig() *cliItem { +func NewCLIAppsecConfig(cfg configGetter) *cliItem { return &cliItem{ + cfg: cfg, name: cwhub.APPSEC_CONFIGS, singular: "appsec-config", oneOrMore: "appsec-config(s)", @@ -46,7 +47,7 @@ cscli appsec-configs list crowdsecurity/vpatch`, } } -func NewCLIAppsecRule() *cliItem { +func NewCLIAppsecRule(cfg configGetter) *cliItem { inspectDetail := func(item *cwhub.Item) error { // Only show the converted rules in human mode if csConfig.Cscli.Output != "human" { @@ -57,11 +58,11 @@ func NewCLIAppsecRule() *cliItem { yamlContent, err := os.ReadFile(item.State.LocalPath) if err != nil { - return fmt.Errorf("unable to read file %s : %s", item.State.LocalPath, err) + return fmt.Errorf("unable to read file %s: %w", item.State.LocalPath, err) } if err := yaml.Unmarshal(yamlContent, &appsecRule); err != nil { - return fmt.Errorf("unable to unmarshal yaml file %s : %s", item.State.LocalPath, err) + return fmt.Errorf("unable to unmarshal yaml file %s: %w", item.State.LocalPath, err) } for _, ruleType := range appsec_rule.SupportedTypes() { @@ -70,7 +71,7 @@ func NewCLIAppsecRule() *cliItem { for _, rule := range appsecRule.Rules { convertedRule, _, err := rule.Convert(ruleType, appsecRule.Name) if err != nil { - return fmt.Errorf("unable to convert rule %s : %s", rule.Name, err) + return fmt.Errorf("unable to convert rule %s: %w", rule.Name, err) } fmt.Println(convertedRule) @@ -88,6 +89,7 @@ func NewCLIAppsecRule() *cliItem { } return &cliItem{ + cfg: cfg, name: "appsec-rules", singular: "appsec-rule", oneOrMore: "appsec-rule(s)", diff --git a/cmd/crowdsec-cli/hubcollection.go b/cmd/crowdsec-cli/hubcollection.go index dee9a0b9e66..655b36eb1b8 100644 --- a/cmd/crowdsec-cli/hubcollection.go +++ b/cmd/crowdsec-cli/hubcollection.go @@ -4,8 +4,9 @@ import ( "github.com/crowdsecurity/crowdsec/pkg/cwhub" ) -func NewCLICollection() *cliItem { +func NewCLICollection(cfg configGetter) *cliItem { return &cliItem{ + cfg: cfg, name: cwhub.COLLECTIONS, singular: "collection", oneOrMore: "collection(s)", diff --git a/cmd/crowdsec-cli/hubcontext.go b/cmd/crowdsec-cli/hubcontext.go index 630dbb2f7b6..2a777327379 100644 --- a/cmd/crowdsec-cli/hubcontext.go +++ b/cmd/crowdsec-cli/hubcontext.go @@ -4,8 +4,9 @@ import ( "github.com/crowdsecurity/crowdsec/pkg/cwhub" ) -func NewCLIContext() *cliItem { +func NewCLIContext(cfg configGetter) *cliItem { return &cliItem{ + cfg: cfg, name: cwhub.CONTEXTS, singular: "context", oneOrMore: "context(s)", diff --git a/cmd/crowdsec-cli/hubparser.go b/cmd/crowdsec-cli/hubparser.go index 0b224c8a7f6..cc856cbedb9 100644 --- a/cmd/crowdsec-cli/hubparser.go +++ b/cmd/crowdsec-cli/hubparser.go @@ -4,8 +4,9 @@ import ( "github.com/crowdsecurity/crowdsec/pkg/cwhub" ) -func NewCLIParser() *cliItem { +func NewCLIParser(cfg configGetter) *cliItem { return &cliItem{ + cfg: cfg, name: cwhub.PARSERS, singular: "parser", oneOrMore: "parser(s)", diff --git a/cmd/crowdsec-cli/hubpostoverflow.go b/cmd/crowdsec-cli/hubpostoverflow.go index 908ccbea0fd..3fd45fd113d 100644 --- a/cmd/crowdsec-cli/hubpostoverflow.go +++ b/cmd/crowdsec-cli/hubpostoverflow.go @@ -4,8 +4,9 @@ import ( "github.com/crowdsecurity/crowdsec/pkg/cwhub" ) -func NewCLIPostOverflow() *cliItem { +func NewCLIPostOverflow(cfg configGetter) *cliItem { return &cliItem{ + cfg: cfg, name: cwhub.POSTOVERFLOWS, singular: "postoverflow", oneOrMore: "postoverflow(s)", diff --git a/cmd/crowdsec-cli/hubscenario.go b/cmd/crowdsec-cli/hubscenario.go index 1de2182bfc5..4434b9a2c45 100644 --- a/cmd/crowdsec-cli/hubscenario.go +++ b/cmd/crowdsec-cli/hubscenario.go @@ -4,8 +4,9 @@ import ( "github.com/crowdsecurity/crowdsec/pkg/cwhub" ) -func NewCLIScenario() *cliItem { +func NewCLIScenario(cfg configGetter) *cliItem { return &cliItem{ + cfg: cfg, name: cwhub.SCENARIOS, singular: "scenario", oneOrMore: "scenario(s)", diff --git a/cmd/crowdsec-cli/hubtest.go b/cmd/crowdsec-cli/hubtest.go index d6ed4560056..51735ce19ce 100644 --- a/cmd/crowdsec-cli/hubtest.go +++ b/cmd/crowdsec-cli/hubtest.go @@ -135,6 +135,7 @@ cscli hubtest create my-scenario-test --parsers crowdsecurity/nginx --scenarios // create empty nuclei template file nucleiFileName := fmt.Sprintf("%s.yaml", testName) nucleiFilePath := filepath.Join(testPath, nucleiFileName) + nucleiFile, err := os.OpenFile(nucleiFilePath, os.O_RDWR|os.O_CREATE, 0755) if err != nil { return err @@ -405,7 +406,7 @@ func (cli *cliHubTest) NewRunCmd() *cobra.Command { } func (cli *cliHubTest) NewCleanCmd() *cobra.Command { - var cmd = &cobra.Command{ + cmd := &cobra.Command{ Use: "clean", Short: "clean [test_name]", Args: cobra.MinimumNArgs(1), diff --git a/cmd/crowdsec-cli/item_metrics.go b/cmd/crowdsec-cli/item_metrics.go index e6f27ae5d0d..b571fb1c5ed 100644 --- a/cmd/crowdsec-cli/item_metrics.go +++ b/cmd/crowdsec-cli/item_metrics.go @@ -37,6 +37,7 @@ func ShowMetrics(hubItem *cwhub.Item) error { appsecMetricsTable(color.Output, hubItem.Name, metrics) default: // no metrics for this item type } + return nil } @@ -49,21 +50,27 @@ func GetParserMetric(url string, itemName string) map[string]map[string]int { if !strings.HasPrefix(fam.Name, "cs_") { continue } + log.Tracef("round %d", idx) + for _, m := range fam.Metrics { metric, ok := m.(prom2json.Metric) if !ok { log.Debugf("failed to convert metric to prom2json.Metric") continue } + name, ok := metric.Labels["name"] if !ok { log.Debugf("no name in Metric %v", metric.Labels) } + if name != itemName { continue } + source, ok := metric.Labels["source"] + if !ok { log.Debugf("no source in Metric %v", metric.Labels) } else { @@ -71,12 +78,15 @@ func GetParserMetric(url string, itemName string) map[string]map[string]int { source = srctype + ":" + source } } + value := m.(prom2json.Metric).Value + fval, err := strconv.ParseFloat(value, 32) if err != nil { log.Errorf("Unexpected int value %s : %s", value, err) continue } + ival := int(fval) switch fam.Name { @@ -119,6 +129,7 @@ func GetParserMetric(url string, itemName string) map[string]map[string]int { } } } + return stats } @@ -136,26 +147,34 @@ func GetScenarioMetric(url string, itemName string) map[string]int { if !strings.HasPrefix(fam.Name, "cs_") { continue } + log.Tracef("round %d", idx) + for _, m := range fam.Metrics { metric, ok := m.(prom2json.Metric) if !ok { log.Debugf("failed to convert metric to prom2json.Metric") continue } + name, ok := metric.Labels["name"] + if !ok { log.Debugf("no name in Metric %v", metric.Labels) } + if name != itemName { continue } + value := m.(prom2json.Metric).Value + fval, err := strconv.ParseFloat(value, 32) if err != nil { log.Errorf("Unexpected int value %s : %s", value, err) continue } + ival := int(fval) switch fam.Name { @@ -174,6 +193,7 @@ func GetScenarioMetric(url string, itemName string) map[string]int { } } } + return stats } @@ -188,17 +208,22 @@ func GetAppsecRuleMetric(url string, itemName string) map[string]int { if !strings.HasPrefix(fam.Name, "cs_") { continue } + log.Tracef("round %d", idx) + for _, m := range fam.Metrics { metric, ok := m.(prom2json.Metric) if !ok { log.Debugf("failed to convert metric to prom2json.Metric") continue } + name, ok := metric.Labels["rule_name"] + if !ok { log.Debugf("no rule_name in Metric %v", metric.Labels) } + if name != itemName { continue } @@ -209,11 +234,13 @@ func GetAppsecRuleMetric(url string, itemName string) map[string]int { } value := m.(prom2json.Metric).Value + fval, err := strconv.ParseFloat(value, 32) if err != nil { log.Errorf("Unexpected int value %s : %s", value, err) continue } + ival := int(fval) switch fam.Name { @@ -231,6 +258,7 @@ func GetAppsecRuleMetric(url string, itemName string) map[string]int { } } } + return stats } @@ -247,6 +275,7 @@ func GetPrometheusMetric(url string) []*prom2json.Family { go func() { defer trace.CatchPanic("crowdsec/GetPrometheusMetric") + err := prom2json.FetchMetricFamilies(url, mfChan, transport) if err != nil { log.Fatalf("failed to fetch prometheus metrics : %v", err) @@ -257,6 +286,7 @@ func GetPrometheusMetric(url string) []*prom2json.Family { for mf := range mfChan { result = append(result, prom2json.NewFamily(mf)) } + log.Debugf("Finished reading prometheus output, %d entries", len(result)) return result diff --git a/cmd/crowdsec-cli/itemcli.go b/cmd/crowdsec-cli/itemcli.go index 4f3dc40ae04..c2614068fd7 100644 --- a/cmd/crowdsec-cli/itemcli.go +++ b/cmd/crowdsec-cli/itemcli.go @@ -1,6 +1,7 @@ package main import ( + "errors" "fmt" "os" "strings" @@ -28,6 +29,7 @@ type cliHelp struct { } type cliItem struct { + cfg configGetter name string // plural, as used in the hub index singular string oneOrMore string // parenthetical pluralizaion: "parser(s)" @@ -61,7 +63,9 @@ func (cli cliItem) NewCommand() *cobra.Command { } func (cli cliItem) install(args []string, downloadOnly bool, force bool, ignoreError bool) error { - hub, err := require.Hub(csConfig, require.RemoteHub(csConfig), log.StandardLogger()) + cfg := cli.cfg() + + hub, err := require.Hub(cfg, require.RemoteHub(cfg), log.StandardLogger()) if err != nil { return err } @@ -71,7 +75,7 @@ func (cli cliItem) install(args []string, downloadOnly bool, force bool, ignoreE if item == nil { msg := suggestNearestMessage(hub, cli.name, name) if !ignoreError { - return fmt.Errorf(msg) + return errors.New(msg) } log.Errorf(msg) @@ -107,10 +111,10 @@ func (cli cliItem) newInstallCmd() *cobra.Command { Example: cli.installHelp.example, Args: cobra.MinimumNArgs(1), DisableAutoGenTag: true, - ValidArgsFunction: func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { + ValidArgsFunction: func(_ *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { return compAllItems(cli.name, args, toComplete) }, - RunE: func(cmd *cobra.Command, args []string) error { + RunE: func(_ *cobra.Command, args []string) error { return cli.install(args, downloadOnly, force, ignoreError) }, } @@ -137,7 +141,7 @@ func istalledParentNames(item *cwhub.Item) []string { } func (cli cliItem) remove(args []string, purge bool, force bool, all bool) error { - hub, err := require.Hub(csConfig, nil, log.StandardLogger()) + hub, err := require.Hub(cli.cfg(), nil, log.StandardLogger()) if err != nil { return err } @@ -163,6 +167,7 @@ func (cli cliItem) remove(args []string, purge bool, force bool, all bool) error if didRemove { log.Infof("Removed %s", item.Name) + removed++ } } @@ -204,6 +209,7 @@ func (cli cliItem) remove(args []string, purge bool, force bool, all bool) error if didRemove { log.Infof("Removed %s", item.Name) + removed++ } } @@ -231,10 +237,10 @@ func (cli cliItem) newRemoveCmd() *cobra.Command { Example: cli.removeHelp.example, Aliases: []string{"delete"}, DisableAutoGenTag: true, - ValidArgsFunction: func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { + ValidArgsFunction: func(_ *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { return compInstalledItems(cli.name, args, toComplete) }, - RunE: func(cmd *cobra.Command, args []string) error { + RunE: func(_ *cobra.Command, args []string) error { return cli.remove(args, purge, force, all) }, } @@ -248,7 +254,9 @@ func (cli cliItem) newRemoveCmd() *cobra.Command { } func (cli cliItem) upgrade(args []string, force bool, all bool) error { - hub, err := require.Hub(csConfig, require.RemoteHub(csConfig), log.StandardLogger()) + cfg := cli.cfg() + + hub, err := require.Hub(cfg, require.RemoteHub(cfg), log.StandardLogger()) if err != nil { return err } @@ -300,6 +308,7 @@ func (cli cliItem) upgrade(args []string, force bool, all bool) error { if didUpdate { log.Infof("Updated %s", item.Name) + updated++ } } @@ -323,10 +332,10 @@ func (cli cliItem) newUpgradeCmd() *cobra.Command { Long: coalesce.String(cli.upgradeHelp.long, fmt.Sprintf("Fetch and upgrade one or more %s from the hub", cli.name)), Example: cli.upgradeHelp.example, DisableAutoGenTag: true, - ValidArgsFunction: func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { + ValidArgsFunction: func(_ *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { return compInstalledItems(cli.name, args, toComplete) }, - RunE: func(cmd *cobra.Command, args []string) error { + RunE: func(_ *cobra.Command, args []string) error { return cli.upgrade(args, force, all) }, } @@ -339,21 +348,23 @@ func (cli cliItem) newUpgradeCmd() *cobra.Command { } func (cli cliItem) inspect(args []string, url string, diff bool, rev bool, noMetrics bool) error { + cfg := cli.cfg() + if rev && !diff { - return fmt.Errorf("--rev can only be used with --diff") + return errors.New("--rev can only be used with --diff") } if url != "" { - csConfig.Cscli.PrometheusUrl = url + cfg.Cscli.PrometheusUrl = url } remote := (*cwhub.RemoteHubCfg)(nil) if diff { - remote = require.RemoteHub(csConfig) + remote = require.RemoteHub(cfg) } - hub, err := require.Hub(csConfig, remote, log.StandardLogger()) + hub, err := require.Hub(cfg, remote, log.StandardLogger()) if err != nil { return err } @@ -399,10 +410,10 @@ func (cli cliItem) newInspectCmd() *cobra.Command { Example: cli.inspectHelp.example, Args: cobra.MinimumNArgs(1), DisableAutoGenTag: true, - ValidArgsFunction: func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { + ValidArgsFunction: func(_ *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { return compInstalledItems(cli.name, args, toComplete) }, - RunE: func(cmd *cobra.Command, args []string) error { + RunE: func(_ *cobra.Command, args []string) error { return cli.inspect(args, url, diff, rev, noMetrics) }, } @@ -417,7 +428,7 @@ func (cli cliItem) newInspectCmd() *cobra.Command { } func (cli cliItem) list(args []string, all bool) error { - hub, err := require.Hub(csConfig, nil, log.StandardLogger()) + hub, err := require.Hub(cli.cfg(), nil, log.StandardLogger()) if err != nil { return err } @@ -526,6 +537,7 @@ func (cli cliItem) whyTainted(hub *cwhub.Hub, item *cwhub.Item, reverse bool) st // hack: avoid message "item is tainted by itself" continue } + ret = append(ret, fmt.Sprintf("# %s is tainted by %s", sub.FQName(), taintList)) } } diff --git a/cmd/crowdsec-cli/items.go b/cmd/crowdsec-cli/items.go index ea6d8a25631..b8c83809dec 100644 --- a/cmd/crowdsec-cli/items.go +++ b/cmd/crowdsec-cli/items.go @@ -116,7 +116,7 @@ func listItems(out io.Writer, itemTypes []string, items map[string][]*cwhub.Item } if err := csvwriter.Write(header); err != nil { - return fmt.Errorf("failed to write header: %s", err) + return fmt.Errorf("failed to write header: %w", err) } for _, itemType := range itemTypes { @@ -132,7 +132,7 @@ func listItems(out io.Writer, itemTypes []string, items map[string][]*cwhub.Item } if err := csvwriter.Write(row); err != nil { - return fmt.Errorf("failed to write raw output: %s", err) + return fmt.Errorf("failed to write raw output: %w", err) } } } @@ -150,12 +150,12 @@ func inspectItem(item *cwhub.Item, showMetrics bool) error { enc.SetIndent(2) if err := enc.Encode(item); err != nil { - return fmt.Errorf("unable to encode item: %s", err) + return fmt.Errorf("unable to encode item: %w", err) } case "json": b, err := json.MarshalIndent(*item, "", " ") if err != nil { - return fmt.Errorf("unable to marshal item: %s", err) + return fmt.Errorf("unable to marshal item: %w", err) } fmt.Print(string(b)) diff --git a/cmd/crowdsec-cli/lapi.go b/cmd/crowdsec-cli/lapi.go index 13a9d8d7e77..51f372cc2d1 100644 --- a/cmd/crowdsec-cli/lapi.go +++ b/cmd/crowdsec-cli/lapi.go @@ -116,7 +116,6 @@ func (cli *cliLapi) register(apiURL string, outputFile string, machine string) e URL: apiurl, VersionPrefix: LAPIURLPrefix, }, nil) - if err != nil { return fmt.Errorf("api client register: %w", err) } @@ -585,7 +584,7 @@ func detectNode(node parser.Node, parserCTX parser.UnixParserCtx) []string { } func detectSubNode(node parser.Node, parserCTX parser.UnixParserCtx) []string { - var ret = make([]string, 0) + ret := make([]string, 0) for _, subnode := range node.LeavesNodes { if subnode.Grok.RunTimeRegexp != nil { diff --git a/cmd/crowdsec-cli/main.go b/cmd/crowdsec-cli/main.go index 9e721f1fac6..0705faa4065 100644 --- a/cmd/crowdsec-cli/main.go +++ b/cmd/crowdsec-cli/main.go @@ -260,13 +260,13 @@ It is meant to allow you to manage bans, parsers/scenarios/etc, api and generall cmd.AddCommand(NewCLINotifications(cli.cfg).NewCommand()) cmd.AddCommand(NewCLISupport().NewCommand()) cmd.AddCommand(NewCLIPapi(cli.cfg).NewCommand()) - cmd.AddCommand(NewCLICollection().NewCommand()) - cmd.AddCommand(NewCLIParser().NewCommand()) - cmd.AddCommand(NewCLIScenario().NewCommand()) - cmd.AddCommand(NewCLIPostOverflow().NewCommand()) - cmd.AddCommand(NewCLIContext().NewCommand()) - cmd.AddCommand(NewCLIAppsecConfig().NewCommand()) - cmd.AddCommand(NewCLIAppsecRule().NewCommand()) + cmd.AddCommand(NewCLICollection(cli.cfg).NewCommand()) + cmd.AddCommand(NewCLIParser(cli.cfg).NewCommand()) + cmd.AddCommand(NewCLIScenario(cli.cfg).NewCommand()) + cmd.AddCommand(NewCLIPostOverflow(cli.cfg).NewCommand()) + cmd.AddCommand(NewCLIContext(cli.cfg).NewCommand()) + cmd.AddCommand(NewCLIAppsecConfig(cli.cfg).NewCommand()) + cmd.AddCommand(NewCLIAppsecRule(cli.cfg).NewCommand()) if fflag.CscliSetup.IsEnabled() { cmd.AddCommand(NewSetupCmd()) From 0f942a95f112bc0ad4efdcb18b33c0d1d20e44fb Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Wed, 24 Apr 2024 11:09:37 +0200 Subject: [PATCH 099/581] pkg/cwhub - rename methods for clarity (#2961) * pkg/cwhub - rename methods for clarity * lint --- cmd/crowdsec-cli/capi.go | 2 +- cmd/crowdsec-cli/console.go | 2 +- cmd/crowdsec-cli/hub.go | 4 ++-- cmd/crowdsec-cli/item_suggest.go | 2 +- cmd/crowdsec-cli/itemcli.go | 6 +++--- cmd/crowdsec-cli/items.go | 2 +- cmd/crowdsec-cli/lapi.go | 2 +- cmd/crowdsec-cli/notifications.go | 13 +++++++------ cmd/crowdsec-cli/papi.go | 20 ++++++++------------ cmd/crowdsec-cli/require/require.go | 11 ++++++----- cmd/crowdsec-cli/setup.go | 9 ++++++++- cmd/crowdsec-cli/simulation.go | 23 +++++++++++++---------- cmd/crowdsec-cli/support.go | 2 +- cmd/crowdsec/lapiclient.go | 8 ++++---- pkg/alertcontext/config.go | 2 +- pkg/cwhub/hub.go | 18 +++++++++--------- 16 files changed, 67 insertions(+), 59 deletions(-) diff --git a/cmd/crowdsec-cli/capi.go b/cmd/crowdsec-cli/capi.go index e56a8a74707..ec11acbbec4 100644 --- a/cmd/crowdsec-cli/capi.go +++ b/cmd/crowdsec-cli/capi.go @@ -175,7 +175,7 @@ func (cli *cliCapi) status() error { return err } - scenarios, err := hub.GetInstalledItemNames(cwhub.SCENARIOS) + scenarios, err := hub.GetInstalledNamesByType(cwhub.SCENARIOS) if err != nil { return fmt.Errorf("failed to get scenarios: %w", err) } diff --git a/cmd/crowdsec-cli/console.go b/cmd/crowdsec-cli/console.go index 9e881a43f53..4aba689aa05 100644 --- a/cmd/crowdsec-cli/console.go +++ b/cmd/crowdsec-cli/console.go @@ -103,7 +103,7 @@ After running this command your will need to validate the enrollment in the weba return err } - scenarios, err := hub.GetInstalledItemNames(cwhub.SCENARIOS) + scenarios, err := hub.GetInstalledNamesByType(cwhub.SCENARIOS) if err != nil { return fmt.Errorf("failed to get installed scenarios: %w", err) } diff --git a/cmd/crowdsec-cli/hub.go b/cmd/crowdsec-cli/hub.go index 600e56889f7..71347a5a57b 100644 --- a/cmd/crowdsec-cli/hub.go +++ b/cmd/crowdsec-cli/hub.go @@ -13,7 +13,7 @@ import ( "github.com/crowdsecurity/crowdsec/pkg/cwhub" ) -type cliHub struct { +type cliHub struct{ cfg configGetter } @@ -137,7 +137,7 @@ func (cli *cliHub) upgrade(force bool) error { } for _, itemType := range cwhub.ItemTypes { - items, err := hub.GetInstalledItems(itemType) + items, err := hub.GetInstalledItemsByType(itemType) if err != nil { return err } diff --git a/cmd/crowdsec-cli/item_suggest.go b/cmd/crowdsec-cli/item_suggest.go index d3beee72100..9272abedd50 100644 --- a/cmd/crowdsec-cli/item_suggest.go +++ b/cmd/crowdsec-cli/item_suggest.go @@ -61,7 +61,7 @@ func compInstalledItems(itemType string, args []string, toComplete string) ([]st return nil, cobra.ShellCompDirectiveDefault } - items, err := hub.GetInstalledItemNames(itemType) + items, err := hub.GetInstalledNamesByType(itemType) if err != nil { cobra.CompDebugln(fmt.Sprintf("list installed %s err: %s", itemType, err), true) return nil, cobra.ShellCompDirectiveDefault diff --git a/cmd/crowdsec-cli/itemcli.go b/cmd/crowdsec-cli/itemcli.go index c2614068fd7..cdaf7877054 100644 --- a/cmd/crowdsec-cli/itemcli.go +++ b/cmd/crowdsec-cli/itemcli.go @@ -147,9 +147,9 @@ func (cli cliItem) remove(args []string, purge bool, force bool, all bool) error } if all { - getter := hub.GetInstalledItems + getter := hub.GetInstalledItemsByType if purge { - getter = hub.GetAllItems + getter = hub.GetItemsByType } items, err := getter(cli.name) @@ -262,7 +262,7 @@ func (cli cliItem) upgrade(args []string, force bool, all bool) error { } if all { - items, err := hub.GetInstalledItems(cli.name) + items, err := hub.GetInstalledItemsByType(cli.name) if err != nil { return err } diff --git a/cmd/crowdsec-cli/items.go b/cmd/crowdsec-cli/items.go index b8c83809dec..4ae643151f2 100644 --- a/cmd/crowdsec-cli/items.go +++ b/cmd/crowdsec-cli/items.go @@ -17,7 +17,7 @@ import ( // selectItems returns a slice of items of a given type, selected by name and sorted by case-insensitive name func selectItems(hub *cwhub.Hub, itemType string, args []string, installedOnly bool) ([]*cwhub.Item, error) { - itemNames := hub.GetItemNames(itemType) + itemNames := hub.GetNamesByType(itemType) notExist := []string{} diff --git a/cmd/crowdsec-cli/lapi.go b/cmd/crowdsec-cli/lapi.go index 51f372cc2d1..99c3ee45a13 100644 --- a/cmd/crowdsec-cli/lapi.go +++ b/cmd/crowdsec-cli/lapi.go @@ -56,7 +56,7 @@ func (cli *cliLapi) status() error { return err } - scenarios, err := hub.GetInstalledItemNames(cwhub.SCENARIOS) + scenarios, err := hub.GetInstalledNamesByType(cwhub.SCENARIOS) if err != nil { return fmt.Errorf("failed to get scenarios: %w", err) } diff --git a/cmd/crowdsec-cli/notifications.go b/cmd/crowdsec-cli/notifications.go index f12333a3942..0b5ee537806 100644 --- a/cmd/crowdsec-cli/notifications.go +++ b/cmd/crowdsec-cli/notifications.go @@ -4,6 +4,7 @@ import ( "context" "encoding/csv" "encoding/json" + "errors" "fmt" "io/fs" "net/url" @@ -88,7 +89,7 @@ func (cli *cliNotifications) getPluginConfigs() (map[string]csplugin.PluginConfi return fmt.Errorf("error while traversing directory %s: %w", path, err) } - name := filepath.Join(cfg.ConfigPaths.NotificationDir, info.Name()) //Avoid calling info.Name() twice + name := filepath.Join(cfg.ConfigPaths.NotificationDir, info.Name()) // Avoid calling info.Name() twice if (strings.HasSuffix(name, "yaml") || strings.HasSuffix(name, "yml")) && !(info.IsDir()) { ts, err := csplugin.ParsePluginConfigFile(name) if err != nil { @@ -266,7 +267,7 @@ func (cli *cliNotifications) NewTestCmd() *cobra.Command { if !ok { return fmt.Errorf("plugin name: '%s' does not exist", args[0]) } - //Create a single profile with plugin name as notification name + // Create a single profile with plugin name as notification name return pluginBroker.Init(cfg.PluginConfig, []*csconfig.ProfileCfg{ { Notifications: []string{ @@ -320,8 +321,8 @@ func (cli *cliNotifications) NewTestCmd() *cobra.Command { Alert: alert, } - //time.Sleep(2 * time.Second) // There's no mechanism to ensure notification has been sent - pluginTomb.Kill(fmt.Errorf("terminating")) + // time.Sleep(2 * time.Second) // There's no mechanism to ensure notification has been sent + pluginTomb.Kill(errors.New("terminating")) pluginTomb.Wait() return nil @@ -416,8 +417,8 @@ cscli notifications reinject -a '{"remediation": true,"scenario":"not break } } - //time.Sleep(2 * time.Second) // There's no mechanism to ensure notification has been sent - pluginTomb.Kill(fmt.Errorf("terminating")) + // time.Sleep(2 * time.Second) // There's no mechanism to ensure notification has been sent + pluginTomb.Kill(errors.New("terminating")) pluginTomb.Wait() return nil diff --git a/cmd/crowdsec-cli/papi.go b/cmd/crowdsec-cli/papi.go index e18af94d4bb..5808fcce5f6 100644 --- a/cmd/crowdsec-cli/papi.go +++ b/cmd/crowdsec-cli/papi.go @@ -64,25 +64,22 @@ func (cli *cliPapi) NewStatusCmd() *cobra.Command { cfg := cli.cfg() dbClient, err = database.NewClient(cfg.DbConfig) if err != nil { - return fmt.Errorf("unable to initialize database client: %s", err) + return fmt.Errorf("unable to initialize database client: %w", err) } apic, err := apiserver.NewAPIC(cfg.API.Server.OnlineClient, dbClient, cfg.API.Server.ConsoleConfig, cfg.API.Server.CapiWhitelists) - if err != nil { - return fmt.Errorf("unable to initialize API client: %s", err) + return fmt.Errorf("unable to initialize API client: %w", err) } papi, err := apiserver.NewPAPI(apic, dbClient, cfg.API.Server.ConsoleConfig, log.GetLevel()) - if err != nil { - return fmt.Errorf("unable to initialize PAPI client: %s", err) + return fmt.Errorf("unable to initialize PAPI client: %w", err) } perms, err := papi.GetPermissions() - if err != nil { - return fmt.Errorf("unable to get PAPI permissions: %s", err) + return fmt.Errorf("unable to get PAPI permissions: %w", err) } var lastTimestampStr *string lastTimestampStr, err = dbClient.GetConfigItem(apiserver.PapiPullKey) @@ -118,27 +115,26 @@ func (cli *cliPapi) NewSyncCmd() *cobra.Command { dbClient, err = database.NewClient(cfg.DbConfig) if err != nil { - return fmt.Errorf("unable to initialize database client: %s", err) + return fmt.Errorf("unable to initialize database client: %w", err) } apic, err := apiserver.NewAPIC(cfg.API.Server.OnlineClient, dbClient, cfg.API.Server.ConsoleConfig, cfg.API.Server.CapiWhitelists) if err != nil { - return fmt.Errorf("unable to initialize API client: %s", err) + return fmt.Errorf("unable to initialize API client: %w", err) } t.Go(apic.Push) papi, err := apiserver.NewPAPI(apic, dbClient, cfg.API.Server.ConsoleConfig, log.GetLevel()) if err != nil { - return fmt.Errorf("unable to initialize PAPI client: %s", err) + return fmt.Errorf("unable to initialize PAPI client: %w", err) } t.Go(papi.SyncDecisions) err = papi.PullOnce(time.Time{}, true) - if err != nil { - return fmt.Errorf("unable to sync decisions: %s", err) + return fmt.Errorf("unable to sync decisions: %w", err) } log.Infof("Sending acknowledgements to CAPI") diff --git a/cmd/crowdsec-cli/require/require.go b/cmd/crowdsec-cli/require/require.go index 0f5ce182d9a..708b2d1c7a2 100644 --- a/cmd/crowdsec-cli/require/require.go +++ b/cmd/crowdsec-cli/require/require.go @@ -1,6 +1,7 @@ package require import ( + "errors" "fmt" "io" @@ -16,7 +17,7 @@ func LAPI(c *csconfig.Config) error { } if c.DisableAPI { - return fmt.Errorf("local API is disabled -- this command must be run on the local API machine") + return errors.New("local API is disabled -- this command must be run on the local API machine") } return nil @@ -32,7 +33,7 @@ func CAPI(c *csconfig.Config) error { func PAPI(c *csconfig.Config) error { if c.API.Server.OnlineClient.Credentials.PapiURL == "" { - return fmt.Errorf("no PAPI URL in configuration") + return errors.New("no PAPI URL in configuration") } return nil @@ -40,7 +41,7 @@ func PAPI(c *csconfig.Config) error { func CAPIRegistered(c *csconfig.Config) error { if c.API.Server.OnlineClient.Credentials == nil { - return fmt.Errorf("the Central API (CAPI) must be configured with 'cscli capi register'") + return errors.New("the Central API (CAPI) must be configured with 'cscli capi register'") } return nil @@ -56,7 +57,7 @@ func DB(c *csconfig.Config) error { func Notifications(c *csconfig.Config) error { if c.ConfigPaths.NotificationDir == "" { - return fmt.Errorf("config_paths.notification_dir is not set in crowdsec config") + return errors.New("config_paths.notification_dir is not set in crowdsec config") } return nil @@ -82,7 +83,7 @@ func Hub(c *csconfig.Config, remote *cwhub.RemoteHubCfg, logger *logrus.Logger) local := c.Hub if local == nil { - return nil, fmt.Errorf("you must configure cli before interacting with hub") + return nil, errors.New("you must configure cli before interacting with hub") } if logger == nil { diff --git a/cmd/crowdsec-cli/setup.go b/cmd/crowdsec-cli/setup.go index 48dcee08905..ba3670848d8 100644 --- a/cmd/crowdsec-cli/setup.go +++ b/cmd/crowdsec-cli/setup.go @@ -2,6 +2,7 @@ package main import ( "bytes" + "errors" "fmt" "os" "os/exec" @@ -118,9 +119,11 @@ func runSetupDetect(cmd *cobra.Command, args []string) error { switch detectConfigFile { case "-": log.Tracef("Reading detection rules from stdin") + detectReader = os.Stdin default: log.Tracef("Reading detection rules: %s", detectConfigFile) + detectReader, err = os.Open(detectConfigFile) if err != nil { return err @@ -171,6 +174,7 @@ func runSetupDetect(cmd *cobra.Command, args []string) error { _, err := exec.LookPath("systemctl") if err != nil { log.Debug("systemctl not available: snubbing systemd") + snubSystemd = true } } @@ -182,6 +186,7 @@ func runSetupDetect(cmd *cobra.Command, args []string) error { if forcedOSFamily == "" && forcedOSID != "" { log.Debug("force-os-id is set: force-os-family defaults to 'linux'") + forcedOSFamily = "linux" } @@ -219,6 +224,7 @@ func runSetupDetect(cmd *cobra.Command, args []string) error { if err != nil { return err } + fmt.Println(setup) return nil @@ -318,6 +324,7 @@ func runSetupInstallHub(cmd *cobra.Command, args []string) error { func runSetupValidate(cmd *cobra.Command, args []string) error { fromFile := args[0] + input, err := os.ReadFile(fromFile) if err != nil { return fmt.Errorf("while reading stdin: %w", err) @@ -325,7 +332,7 @@ func runSetupValidate(cmd *cobra.Command, args []string) error { if err = setup.Validate(input); err != nil { fmt.Printf("%v\n", err) - return fmt.Errorf("invalid setup file") + return errors.New("invalid setup file") } return nil diff --git a/cmd/crowdsec-cli/simulation.go b/cmd/crowdsec-cli/simulation.go index 6ccac761727..f3c1a6273c1 100644 --- a/cmd/crowdsec-cli/simulation.go +++ b/cmd/crowdsec-cli/simulation.go @@ -1,6 +1,7 @@ package main import ( + "errors" "fmt" "os" "slices" @@ -36,7 +37,7 @@ cscli simulation disable crowdsecurity/ssh-bf`, return err } if cli.cfg().Cscli.SimulationConfig == nil { - return fmt.Errorf("no simulation configured") + return errors.New("no simulation configured") } return nil @@ -99,11 +100,11 @@ func (cli *cliSimulation) NewEnableCmd() *cobra.Command { log.Printf("simulation mode for '%s' enabled", scenario) } if err := cli.dumpSimulationFile(); err != nil { - return fmt.Errorf("simulation enable: %s", err) + return fmt.Errorf("simulation enable: %w", err) } } else if forceGlobalSimulation { if err := cli.enableGlobalSimulation(); err != nil { - return fmt.Errorf("unable to enable global simulation mode: %s", err) + return fmt.Errorf("unable to enable global simulation mode: %w", err) } } else { printHelp(cmd) @@ -146,11 +147,11 @@ func (cli *cliSimulation) NewDisableCmd() *cobra.Command { log.Printf("simulation mode for '%s' disabled", scenario) } if err := cli.dumpSimulationFile(); err != nil { - return fmt.Errorf("simulation disable: %s", err) + return fmt.Errorf("simulation disable: %w", err) } } else if forceGlobalSimulation { if err := cli.disableGlobalSimulation(); err != nil { - return fmt.Errorf("unable to disable global simulation mode: %s", err) + return fmt.Errorf("unable to disable global simulation mode: %w", err) } } else { printHelp(cmd) @@ -202,7 +203,7 @@ func (cli *cliSimulation) enableGlobalSimulation() error { cfg.Cscli.SimulationConfig.Exclusions = []string{} if err := cli.dumpSimulationFile(); err != nil { - return fmt.Errorf("unable to dump simulation file: %s", err) + return fmt.Errorf("unable to dump simulation file: %w", err) } log.Printf("global simulation: enabled") @@ -215,12 +216,12 @@ func (cli *cliSimulation) dumpSimulationFile() error { newConfigSim, err := yaml.Marshal(cfg.Cscli.SimulationConfig) if err != nil { - return fmt.Errorf("unable to marshal simulation configuration: %s", err) + return fmt.Errorf("unable to marshal simulation configuration: %w", err) } err = os.WriteFile(cfg.ConfigPaths.SimulationFilePath, newConfigSim, 0o644) if err != nil { - return fmt.Errorf("write simulation config in '%s' failed: %s", cfg.ConfigPaths.SimulationFilePath, err) + return fmt.Errorf("write simulation config in '%s' failed: %w", cfg.ConfigPaths.SimulationFilePath, err) } log.Debugf("updated simulation file %s", cfg.ConfigPaths.SimulationFilePath) @@ -237,12 +238,12 @@ func (cli *cliSimulation) disableGlobalSimulation() error { newConfigSim, err := yaml.Marshal(cfg.Cscli.SimulationConfig) if err != nil { - return fmt.Errorf("unable to marshal new simulation configuration: %s", err) + return fmt.Errorf("unable to marshal new simulation configuration: %w", err) } err = os.WriteFile(cfg.ConfigPaths.SimulationFilePath, newConfigSim, 0o644) if err != nil { - return fmt.Errorf("unable to write new simulation config in '%s': %s", cfg.ConfigPaths.SimulationFilePath, err) + return fmt.Errorf("unable to write new simulation config in '%s': %w", cfg.ConfigPaths.SimulationFilePath, err) } log.Printf("global simulation: disabled") @@ -269,8 +270,10 @@ func (cli *cliSimulation) status() { } } else { log.Println("global simulation: disabled") + if len(cfg.Cscli.SimulationConfig.Exclusions) > 0 { log.Println("Scenarios in simulation mode :") + for _, scenario := range cfg.Cscli.SimulationConfig.Exclusions { log.Printf(" - %s", scenario) } diff --git a/cmd/crowdsec-cli/support.go b/cmd/crowdsec-cli/support.go index 8b2481b4cf2..a48edeeeb9f 100644 --- a/cmd/crowdsec-cli/support.go +++ b/cmd/crowdsec-cli/support.go @@ -199,7 +199,7 @@ func collectAPIStatus(login string, password string, endpoint string, prefix str return []byte(fmt.Sprintf("cannot parse API URL: %s", err)) } - scenarios, err := hub.GetInstalledItemNames(cwhub.SCENARIOS) + scenarios, err := hub.GetInstalledNamesByType(cwhub.SCENARIOS) if err != nil { return []byte(fmt.Sprintf("could not collect scenarios: %s", err)) } diff --git a/cmd/crowdsec/lapiclient.go b/cmd/crowdsec/lapiclient.go index fd29aa9d99b..f12aea5ac0d 100644 --- a/cmd/crowdsec/lapiclient.go +++ b/cmd/crowdsec/lapiclient.go @@ -17,12 +17,12 @@ import ( ) func AuthenticatedLAPIClient(credentials csconfig.ApiCredentialsCfg, hub *cwhub.Hub) (*apiclient.ApiClient, error) { - scenarios, err := hub.GetInstalledItemNames(cwhub.SCENARIOS) + scenarios, err := hub.GetInstalledNamesByType(cwhub.SCENARIOS) if err != nil { return nil, fmt.Errorf("loading list of installed hub scenarios: %w", err) } - appsecRules, err := hub.GetInstalledItemNames(cwhub.APPSEC_RULES) + appsecRules, err := hub.GetInstalledNamesByType(cwhub.APPSEC_RULES) if err != nil { return nil, fmt.Errorf("loading list of installed hub appsec rules: %w", err) } @@ -52,11 +52,11 @@ func AuthenticatedLAPIClient(credentials csconfig.ApiCredentialsCfg, hub *cwhub. PapiURL: papiURL, VersionPrefix: "v1", UpdateScenario: func() ([]string, error) { - scenarios, err := hub.GetInstalledItemNames(cwhub.SCENARIOS) + scenarios, err := hub.GetInstalledNamesByType(cwhub.SCENARIOS) if err != nil { return nil, err } - appsecRules, err := hub.GetInstalledItemNames(cwhub.APPSEC_RULES) + appsecRules, err := hub.GetInstalledNamesByType(cwhub.APPSEC_RULES) if err != nil { return nil, err } diff --git a/pkg/alertcontext/config.go b/pkg/alertcontext/config.go index 74ca1523a7d..21d16db3972 100644 --- a/pkg/alertcontext/config.go +++ b/pkg/alertcontext/config.go @@ -104,7 +104,7 @@ func LoadConsoleContext(c *csconfig.Config, hub *cwhub.Hub) error { c.Crowdsec.ContextToSend = make(map[string][]string, 0) if hub != nil { - items, err := hub.GetInstalledItems(cwhub.CONTEXTS) + items, err := hub.GetInstalledItemsByType(cwhub.CONTEXTS) if err != nil { return err } diff --git a/pkg/cwhub/hub.go b/pkg/cwhub/hub.go index 44e24020d03..6b9f56b2e17 100644 --- a/pkg/cwhub/hub.go +++ b/pkg/cwhub/hub.go @@ -214,9 +214,9 @@ func (h *Hub) GetItemFQ(itemFQName string) (*Item, error) { return i, nil } -// GetItemNames returns a slice of (full) item names for a given type +// GetNamesByType returns a slice of (full) item names for a given type // (eg. for collections: crowdsecurity/apache2 crowdsecurity/nginx). -func (h *Hub) GetItemNames(itemType string) []string { +func (h *Hub) GetNamesByType(itemType string) []string { m := h.GetItemMap(itemType) if m == nil { return nil @@ -230,8 +230,8 @@ func (h *Hub) GetItemNames(itemType string) []string { return names } -// GetAllItems returns a slice of all the items of a given type, installed or not. -func (h *Hub) GetAllItems(itemType string) ([]*Item, error) { +// GetItemsByType returns a slice of all the items of a given type, installed or not. +func (h *Hub) GetItemsByType(itemType string) ([]*Item, error) { if !slices.Contains(ItemTypes, itemType) { return nil, fmt.Errorf("invalid item type %s", itemType) } @@ -250,8 +250,8 @@ func (h *Hub) GetAllItems(itemType string) ([]*Item, error) { return ret, nil } -// GetInstalledItems returns a slice of the installed items of a given type. -func (h *Hub) GetInstalledItems(itemType string) ([]*Item, error) { +// GetInstalledItemsByType returns a slice of the installed items of a given type. +func (h *Hub) GetInstalledItemsByType(itemType string) ([]*Item, error) { if !slices.Contains(ItemTypes, itemType) { return nil, fmt.Errorf("invalid item type %s", itemType) } @@ -269,9 +269,9 @@ func (h *Hub) GetInstalledItems(itemType string) ([]*Item, error) { return retItems, nil } -// GetInstalledItemNames returns the names of the installed items of a given type. -func (h *Hub) GetInstalledItemNames(itemType string) ([]string, error) { - items, err := h.GetInstalledItems(itemType) +// GetInstalledNamesByType returns the names of the installed items of a given type. +func (h *Hub) GetInstalledNamesByType(itemType string) ([]string, error) { + items, err := h.GetInstalledItemsByType(itemType) if err != nil { return nil, err } From 60431804d8440b68e7c253be8f44137c832bf4b4 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Thu, 25 Apr 2024 11:11:57 +0200 Subject: [PATCH 100/581] db config: don't exit setup if can't detect fs, improve detection for freebsd (#2963) --- pkg/csconfig/database.go | 16 +++++++--------- pkg/types/getfstype.go | 2 +- pkg/types/getfstype_freebsd.go | 25 +++++++++++++++++++++++++ 3 files changed, 33 insertions(+), 10 deletions(-) create mode 100644 pkg/types/getfstype_freebsd.go diff --git a/pkg/csconfig/database.go b/pkg/csconfig/database.go index a7bc57eefdc..9a9ed9a9f7f 100644 --- a/pkg/csconfig/database.go +++ b/pkg/csconfig/database.go @@ -76,26 +76,24 @@ func (c *Config) LoadDBConfig(inCli bool) error { if c.DbConfig.UseWal == nil { dbDir := filepath.Dir(c.DbConfig.DbPath) isNetwork, fsType, err := types.IsNetworkFS(dbDir) - if err != nil { + switch { + case err != nil: log.Warnf("unable to determine if database is on network filesystem: %s", err) log.Warning("You are using sqlite without WAL, this can have a performance impact. If you do not store the database in a network share, set db_config.use_wal to true. Set explicitly to false to disable this warning.") - return nil - } - if isNetwork { + case isNetwork: log.Debugf("database is on network filesystem (%s), setting useWal to false", fsType) c.DbConfig.UseWal = ptr.Of(false) - } else { + default: log.Debugf("database is on local filesystem (%s), setting useWal to true", fsType) c.DbConfig.UseWal = ptr.Of(true) } } else if *c.DbConfig.UseWal { dbDir := filepath.Dir(c.DbConfig.DbPath) isNetwork, fsType, err := types.IsNetworkFS(dbDir) - if err != nil { + switch { + case err != nil: log.Warnf("unable to determine if database is on network filesystem: %s", err) - return nil - } - if isNetwork { + case isNetwork: log.Warnf("database seems to be stored on a network share (%s), but useWal is set to true. Proceed at your own risk.", fsType) } } diff --git a/pkg/types/getfstype.go b/pkg/types/getfstype.go index 67e018782c1..25790ecb6c5 100644 --- a/pkg/types/getfstype.go +++ b/pkg/types/getfstype.go @@ -1,4 +1,4 @@ -//go:build !windows +//go:build !windows && !freebsd package types diff --git a/pkg/types/getfstype_freebsd.go b/pkg/types/getfstype_freebsd.go new file mode 100644 index 00000000000..8fbe3dd7cc4 --- /dev/null +++ b/pkg/types/getfstype_freebsd.go @@ -0,0 +1,25 @@ +//go:build freebsd + +package types + +import ( + "fmt" + "syscall" +) + +func GetFSType(path string) (string, error) { + var fsStat syscall.Statfs_t + + if err := syscall.Statfs(path, &fsStat); err != nil { + return "", fmt.Errorf("failed to get filesystem type: %w", err) + } + + bs := fsStat.Fstypename + + b := make([]byte, len(bs)) + for i, v := range bs { + b[i] = byte(v) + } + + return string(b), nil +} From f4ed7b35205e87001e715ee7b0ac223f2f23e7da Mon Sep 17 00:00:00 2001 From: "Thibault \"bui\" Koechlin" Date: Thu, 25 Apr 2024 13:43:38 +0200 Subject: [PATCH 101/581] Truncate meta data (#2966) * truncate meta-data if they are too big --- pkg/database/alerts.go | 16 +++++++++++++--- 1 file changed, 13 insertions(+), 3 deletions(-) diff --git a/pkg/database/alerts.go b/pkg/database/alerts.go index 0502c25312d..5559cbb3972 100644 --- a/pkg/database/alerts.go +++ b/pkg/database/alerts.go @@ -636,14 +636,24 @@ func (c *Client) createAlertChunk(machineID string, owner *ent.Machine, alerts [ if len(alertItem.Meta) > 0 { metaBulk := make([]*ent.MetaCreate, len(alertItem.Meta)) for i, metaItem := range alertItem.Meta { + key := metaItem.Key + value := metaItem.Value + if len(metaItem.Value) > 4095 { + c.Log.Warningf("truncated meta %s : value too long", metaItem.Key) + value = value[:4095] + } + if len(metaItem.Key) > 255 { + c.Log.Warningf("truncated meta %s : key too long", metaItem.Key) + key = key[:255] + } metaBulk[i] = c.Ent.Meta.Create(). - SetKey(metaItem.Key). - SetValue(metaItem.Value) + SetKey(key). + SetValue(value) } metas, err = c.Ent.Meta.CreateBulk(metaBulk...).Save(c.CTX) if err != nil { - return nil, errors.Wrapf(BulkError, "creating alert meta: %s", err) + c.Log.Warningf("error creating alert meta: %s", err) } } From 845d4542bbc88586e8cad61df0d893ec6b1c03a7 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Thu, 25 Apr 2024 14:41:02 +0200 Subject: [PATCH 102/581] cscli: use yaml.v3 (#2965) * cscli: use yaml.v3 * lint --- .golangci.yml | 7 ------- cmd/crowdsec-cli/alerts.go | 2 +- cmd/crowdsec-cli/capi.go | 3 +-- cmd/crowdsec-cli/hubtest.go | 4 ++-- cmd/crowdsec-cli/lapi.go | 2 +- cmd/crowdsec-cli/simulation.go | 4 ++-- cmd/crowdsec/crowdsec.go | 8 ++++---- 7 files changed, 11 insertions(+), 19 deletions(-) diff --git a/.golangci.yml b/.golangci.yml index ff46ef1c02a..b1aa22da52f 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -82,13 +82,6 @@ linters-settings: - "!**/pkg/apiserver/controllers/v1/errors.go" yaml: files: - - "!**/cmd/crowdsec-cli/alerts.go" - - "!**/cmd/crowdsec-cli/capi.go" - - "!**/cmd/crowdsec-cli/config_show.go" - - "!**/cmd/crowdsec-cli/hubtest.go" - - "!**/cmd/crowdsec-cli/lapi.go" - - "!**/cmd/crowdsec-cli/simulation.go" - - "!**/cmd/crowdsec/crowdsec.go" - "!**/cmd/notification-dummy/main.go" - "!**/cmd/notification-email/main.go" - "!**/cmd/notification-http/main.go" diff --git a/cmd/crowdsec-cli/alerts.go b/cmd/crowdsec-cli/alerts.go index 59dff809458..d31c99e1b12 100644 --- a/cmd/crowdsec-cli/alerts.go +++ b/cmd/crowdsec-cli/alerts.go @@ -17,7 +17,7 @@ import ( "github.com/go-openapi/strfmt" log "github.com/sirupsen/logrus" "github.com/spf13/cobra" - "gopkg.in/yaml.v2" + "gopkg.in/yaml.v3" "github.com/crowdsecurity/go-cs-lib/version" diff --git a/cmd/crowdsec-cli/capi.go b/cmd/crowdsec-cli/capi.go index ec11acbbec4..b5180d0505a 100644 --- a/cmd/crowdsec-cli/capi.go +++ b/cmd/crowdsec-cli/capi.go @@ -10,7 +10,7 @@ import ( "github.com/go-openapi/strfmt" log "github.com/sirupsen/logrus" "github.com/spf13/cobra" - "gopkg.in/yaml.v2" + "gopkg.in/yaml.v3" "github.com/crowdsecurity/go-cs-lib/version" @@ -85,7 +85,6 @@ func (cli *cliCapi) register(capiUserPrefix string, outputFile string) error { URL: apiurl, VersionPrefix: CAPIURLPrefix, }, nil) - if err != nil { return fmt.Errorf("api client register ('%s'): %w", types.CAPIBaseURL, err) } diff --git a/cmd/crowdsec-cli/hubtest.go b/cmd/crowdsec-cli/hubtest.go index 51735ce19ce..458de672349 100644 --- a/cmd/crowdsec-cli/hubtest.go +++ b/cmd/crowdsec-cli/hubtest.go @@ -14,7 +14,7 @@ import ( "github.com/fatih/color" log "github.com/sirupsen/logrus" "github.com/spf13/cobra" - "gopkg.in/yaml.v2" + "gopkg.in/yaml.v3" "github.com/crowdsecurity/crowdsec/pkg/dumps" "github.com/crowdsecurity/crowdsec/pkg/emoji" @@ -136,7 +136,7 @@ cscli hubtest create my-scenario-test --parsers crowdsecurity/nginx --scenarios nucleiFileName := fmt.Sprintf("%s.yaml", testName) nucleiFilePath := filepath.Join(testPath, nucleiFileName) - nucleiFile, err := os.OpenFile(nucleiFilePath, os.O_RDWR|os.O_CREATE, 0755) + nucleiFile, err := os.OpenFile(nucleiFilePath, os.O_RDWR|os.O_CREATE, 0o755) if err != nil { return err } diff --git a/cmd/crowdsec-cli/lapi.go b/cmd/crowdsec-cli/lapi.go index 99c3ee45a13..369de5b426b 100644 --- a/cmd/crowdsec-cli/lapi.go +++ b/cmd/crowdsec-cli/lapi.go @@ -13,7 +13,7 @@ import ( "github.com/go-openapi/strfmt" log "github.com/sirupsen/logrus" "github.com/spf13/cobra" - "gopkg.in/yaml.v2" + "gopkg.in/yaml.v3" "github.com/crowdsecurity/go-cs-lib/version" diff --git a/cmd/crowdsec-cli/simulation.go b/cmd/crowdsec-cli/simulation.go index f3c1a6273c1..3301c4b797e 100644 --- a/cmd/crowdsec-cli/simulation.go +++ b/cmd/crowdsec-cli/simulation.go @@ -8,7 +8,7 @@ import ( log "github.com/sirupsen/logrus" "github.com/spf13/cobra" - "gopkg.in/yaml.v2" + "gopkg.in/yaml.v3" "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/require" "github.com/crowdsecurity/crowdsec/pkg/cwhub" @@ -74,7 +74,7 @@ func (cli *cliSimulation) NewEnableCmd() *cobra.Command { if len(args) > 0 { for _, scenario := range args { - var item = hub.GetItem(cwhub.SCENARIOS, scenario) + item := hub.GetItem(cwhub.SCENARIOS, scenario) if item == nil { log.Errorf("'%s' doesn't exist or is not a scenario", scenario) continue diff --git a/cmd/crowdsec/crowdsec.go b/cmd/crowdsec/crowdsec.go index f604af1dedd..8f07d165f6b 100644 --- a/cmd/crowdsec/crowdsec.go +++ b/cmd/crowdsec/crowdsec.go @@ -9,7 +9,7 @@ import ( "time" log "github.com/sirupsen/logrus" - "gopkg.in/yaml.v2" + "gopkg.in/yaml.v3" "github.com/crowdsecurity/go-cs-lib/trace" @@ -207,7 +207,7 @@ func serveCrowdsec(parsers *parser.Parsers, cConfig *csconfig.Config, hub *cwhub } func dumpBucketsPour() { - fd, err := os.OpenFile(filepath.Join(parser.DumpFolder, "bucketpour-dump.yaml"), os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0666) + fd, err := os.OpenFile(filepath.Join(parser.DumpFolder, "bucketpour-dump.yaml"), os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0o666) if err != nil { log.Fatalf("open: %s", err) } @@ -230,7 +230,7 @@ func dumpBucketsPour() { } func dumpParserState() { - fd, err := os.OpenFile(filepath.Join(parser.DumpFolder, "parser-dump.yaml"), os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0666) + fd, err := os.OpenFile(filepath.Join(parser.DumpFolder, "parser-dump.yaml"), os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0o666) if err != nil { log.Fatalf("open: %s", err) } @@ -253,7 +253,7 @@ func dumpParserState() { } func dumpOverflowState() { - fd, err := os.OpenFile(filepath.Join(parser.DumpFolder, "bucket-dump.yaml"), os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0666) + fd, err := os.OpenFile(filepath.Join(parser.DumpFolder, "bucket-dump.yaml"), os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0o666) if err != nil { log.Fatalf("open: %s", err) } From ceb4479ec41e6afd10e9ad4c7a5e5f45e612a03c Mon Sep 17 00:00:00 2001 From: blotus Date: Thu, 25 Apr 2024 15:05:11 +0200 Subject: [PATCH 103/581] add zfs magic for GetFSType (#2950) --- pkg/types/getfstype.go | 2 ++ 1 file changed, 2 insertions(+) diff --git a/pkg/types/getfstype.go b/pkg/types/getfstype.go index 25790ecb6c5..aac12c7fc94 100644 --- a/pkg/types/getfstype.go +++ b/pkg/types/getfstype.go @@ -4,6 +4,7 @@ package types import ( "fmt" + "golang.org/x/sys/unix" ) @@ -92,6 +93,7 @@ var fsTypeMapping map[int64]string = map[int64]string{ 0xabba1974: "xenfs", 0x012ff7b4: "xenix", 0x58465342: "xfs", + 0x2fc12fc1: "zfs", } func GetFSType(path string) (string, error) { From 2abc078e535b807f0753131bb9a6aa429a075cb5 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Thu, 25 Apr 2024 15:11:08 +0200 Subject: [PATCH 104/581] use go 1.22.2 (#2826) --- .github/workflows/bats-hub.yml | 2 +- .github/workflows/bats-mysql.yml | 2 +- .github/workflows/bats-postgres.yml | 2 +- .github/workflows/bats-sqlite-coverage.yml | 2 +- .github/workflows/ci-windows-build-msi.yml | 2 +- .github/workflows/codeql-analysis.yml | 2 +- .github/workflows/go-tests-windows.yml | 2 +- .github/workflows/go-tests.yml | 2 +- .github/workflows/publish-tarball-release.yml | 2 +- .golangci.yml | 9 +++++++-- Dockerfile | 2 +- Dockerfile.debian | 2 +- azure-pipelines.yml | 2 +- go.mod | 2 +- 14 files changed, 20 insertions(+), 15 deletions(-) diff --git a/.github/workflows/bats-hub.yml b/.github/workflows/bats-hub.yml index 4e977201c5a..72694276ac1 100644 --- a/.github/workflows/bats-hub.yml +++ b/.github/workflows/bats-hub.yml @@ -33,7 +33,7 @@ jobs: - name: "Set up Go" uses: actions/setup-go@v5 with: - go-version: "1.21.9" + go-version: "1.22.2" - name: "Install bats dependencies" env: diff --git a/.github/workflows/bats-mysql.yml b/.github/workflows/bats-mysql.yml index 9e320b1b3de..e52c4759254 100644 --- a/.github/workflows/bats-mysql.yml +++ b/.github/workflows/bats-mysql.yml @@ -36,7 +36,7 @@ jobs: - name: "Set up Go" uses: actions/setup-go@v5 with: - go-version: "1.21.9" + go-version: "1.22.2" - name: "Install bats dependencies" env: diff --git a/.github/workflows/bats-postgres.yml b/.github/workflows/bats-postgres.yml index 2cd09b494a0..c603e468a7b 100644 --- a/.github/workflows/bats-postgres.yml +++ b/.github/workflows/bats-postgres.yml @@ -45,7 +45,7 @@ jobs: - name: "Set up Go" uses: actions/setup-go@v5 with: - go-version: "1.21.9" + go-version: "1.22.2" - name: "Install bats dependencies" env: diff --git a/.github/workflows/bats-sqlite-coverage.yml b/.github/workflows/bats-sqlite-coverage.yml index 742d1ee65ff..345f8761868 100644 --- a/.github/workflows/bats-sqlite-coverage.yml +++ b/.github/workflows/bats-sqlite-coverage.yml @@ -28,7 +28,7 @@ jobs: - name: "Set up Go" uses: actions/setup-go@v5 with: - go-version: "1.21.9" + go-version: "1.22.2" - name: "Install bats dependencies" env: diff --git a/.github/workflows/ci-windows-build-msi.yml b/.github/workflows/ci-windows-build-msi.yml index 278426d778c..0ea60a31dd5 100644 --- a/.github/workflows/ci-windows-build-msi.yml +++ b/.github/workflows/ci-windows-build-msi.yml @@ -35,7 +35,7 @@ jobs: - name: "Set up Go" uses: actions/setup-go@v5 with: - go-version: "1.21.9" + go-version: "1.22.2" - name: Build run: make windows_installer BUILD_RE2_WASM=1 diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml index 1f27a8c7c2a..e4c078f3762 100644 --- a/.github/workflows/codeql-analysis.yml +++ b/.github/workflows/codeql-analysis.yml @@ -52,7 +52,7 @@ jobs: - name: "Set up Go" uses: actions/setup-go@v5 with: - go-version: "1.21.9" + go-version: "1.22.2" cache-dependency-path: "**/go.sum" # Initializes the CodeQL tools for scanning. diff --git a/.github/workflows/go-tests-windows.yml b/.github/workflows/go-tests-windows.yml index 9d5c1739745..d8ec22e6a02 100644 --- a/.github/workflows/go-tests-windows.yml +++ b/.github/workflows/go-tests-windows.yml @@ -34,7 +34,7 @@ jobs: - name: "Set up Go" uses: actions/setup-go@v5 with: - go-version: "1.21.9" + go-version: "1.22.2" - name: Build run: | diff --git a/.github/workflows/go-tests.yml b/.github/workflows/go-tests.yml index 4eac3777df9..3674358ff28 100644 --- a/.github/workflows/go-tests.yml +++ b/.github/workflows/go-tests.yml @@ -126,7 +126,7 @@ jobs: - name: "Set up Go" uses: actions/setup-go@v5 with: - go-version: "1.21.9" + go-version: "1.22.2" - name: Create localstack streams run: | diff --git a/.github/workflows/publish-tarball-release.yml b/.github/workflows/publish-tarball-release.yml index 6cdf111a4ba..d2a5df0535c 100644 --- a/.github/workflows/publish-tarball-release.yml +++ b/.github/workflows/publish-tarball-release.yml @@ -25,7 +25,7 @@ jobs: - name: "Set up Go" uses: actions/setup-go@v5 with: - go-version: "1.21.9" + go-version: "1.22.2" - name: Build the binaries run: | diff --git a/.golangci.yml b/.golangci.yml index b1aa22da52f..f27c5d863e5 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -144,6 +144,13 @@ linters: - structcheck - varcheck + # + # Disabled until fixed for go 1.22 + # + + - copyloopvar # copyloopvar is a linter detects places where loop variables are copied + - intrange # intrange is a linter to find places where for loops could make use of an integer range. + # # Enabled # @@ -152,7 +159,6 @@ linters: # - asciicheck # checks that all code identifiers does not have non-ASCII symbols in the name # - bidichk # Checks for dangerous unicode character sequences # - bodyclose # checks whether HTTP response body is closed successfully - # - copyloopvar # copyloopvar is a linter detects places where loop variables are copied # - cyclop # checks function and package cyclomatic complexity # - decorder # check declaration order and count of types, constants, variables and functions # - depguard # Go linter that checks if package imports are in a list of acceptable packages @@ -181,7 +187,6 @@ linters: # - importas # Enforces consistent import aliases # - ineffassign # Detects when assignments to existing variables are not used # - interfacebloat # A linter that checks the number of methods inside an interface. - # - intrange # intrange is a linter to find places where for loops could make use of an integer range. # - lll # Reports long lines # - loggercheck # (logrlint): Checks key value pairs for common logger libraries (kitlog,klog,logr,zap). # - logrlint # Check logr arguments. diff --git a/Dockerfile b/Dockerfile index 69de0f9df8f..d67d534fe3d 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,5 +1,5 @@ # vim: set ft=dockerfile: -FROM golang:1.21.9-alpine3.18 AS build +FROM golang:1.22.2-alpine3.18 AS build ARG BUILD_VERSION diff --git a/Dockerfile.debian b/Dockerfile.debian index 9bcb517bb2d..4fc3a923823 100644 --- a/Dockerfile.debian +++ b/Dockerfile.debian @@ -1,5 +1,5 @@ # vim: set ft=dockerfile: -FROM golang:1.21.9-bookworm AS build +FROM golang:1.22.2-bookworm AS build ARG BUILD_VERSION diff --git a/azure-pipelines.yml b/azure-pipelines.yml index b662a809ad7..99909bbb467 100644 --- a/azure-pipelines.yml +++ b/azure-pipelines.yml @@ -21,7 +21,7 @@ stages: - task: GoTool@0 displayName: "Install Go" inputs: - version: '1.21.9' + version: '1.22.2' - pwsh: | choco install -y make diff --git a/go.mod b/go.mod index 70d819a4059..8afc2b8a095 100644 --- a/go.mod +++ b/go.mod @@ -1,6 +1,6 @@ module github.com/crowdsecurity/crowdsec -go 1.21 +go 1.22 // Don't use the toolchain directive to avoid uncontrolled downloads during // a build, especially in sandboxed environments (freebsd, gentoo...). From d2c4bc55fc7a77be2f5cc697eaee71a9634d2d02 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Thu, 25 Apr 2024 17:34:49 +0200 Subject: [PATCH 105/581] plugins: use yaml.v3 (#2969) * plugins: use yaml.v3 * lint --- .golangci.yml | 5 ----- cmd/notification-dummy/main.go | 14 ++++++++++---- cmd/notification-email/main.go | 22 ++++++++++++++++------ cmd/notification-http/main.go | 23 +++++++++++++++++++---- cmd/notification-slack/main.go | 14 ++++++++++---- cmd/notification-splunk/main.go | 18 +++++++++++++----- 6 files changed, 68 insertions(+), 28 deletions(-) diff --git a/.golangci.yml b/.golangci.yml index f27c5d863e5..cc6551310c6 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -82,11 +82,6 @@ linters-settings: - "!**/pkg/apiserver/controllers/v1/errors.go" yaml: files: - - "!**/cmd/notification-dummy/main.go" - - "!**/cmd/notification-email/main.go" - - "!**/cmd/notification-http/main.go" - - "!**/cmd/notification-slack/main.go" - - "!**/cmd/notification-splunk/main.go" - "!**/pkg/acquisition/acquisition.go" - "!**/pkg/acquisition/acquisition_test.go" - "!**/pkg/acquisition/modules/appsec/appsec.go" diff --git a/cmd/notification-dummy/main.go b/cmd/notification-dummy/main.go index ef8d29ffa44..024a1eb81ba 100644 --- a/cmd/notification-dummy/main.go +++ b/cmd/notification-dummy/main.go @@ -5,10 +5,11 @@ import ( "fmt" "os" - "github.com/crowdsecurity/crowdsec/pkg/protobufs" "github.com/hashicorp/go-hclog" plugin "github.com/hashicorp/go-plugin" - "gopkg.in/yaml.v2" + "gopkg.in/yaml.v3" + + "github.com/crowdsecurity/crowdsec/pkg/protobufs" ) type PluginConfig struct { @@ -32,6 +33,7 @@ func (s *DummyPlugin) Notify(ctx context.Context, notification *protobufs.Notifi if _, ok := s.PluginConfigByName[notification.Name]; !ok { return nil, fmt.Errorf("invalid plugin config name %s", notification.Name) } + cfg := s.PluginConfigByName[notification.Name] if cfg.LogLevel != nil && *cfg.LogLevel != "" { @@ -42,19 +44,22 @@ func (s *DummyPlugin) Notify(ctx context.Context, notification *protobufs.Notifi logger.Debug(notification.Text) if cfg.OutputFile != nil && *cfg.OutputFile != "" { - f, err := os.OpenFile(*cfg.OutputFile, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644) + f, err := os.OpenFile(*cfg.OutputFile, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0o644) if err != nil { logger.Error(fmt.Sprintf("Cannot open notification file: %s", err)) } + if _, err := f.WriteString(notification.Text + "\n"); err != nil { f.Close() logger.Error(fmt.Sprintf("Cannot write notification to file: %s", err)) } + err = f.Close() if err != nil { logger.Error(fmt.Sprintf("Cannot close notification file: %s", err)) } } + fmt.Println(notification.Text) return &protobufs.Empty{}, nil @@ -64,11 +69,12 @@ func (s *DummyPlugin) Configure(ctx context.Context, config *protobufs.Config) ( d := PluginConfig{} err := yaml.Unmarshal(config.Config, &d) s.PluginConfigByName[d.Name] = d + return &protobufs.Empty{}, err } func main() { - var handshake = plugin.HandshakeConfig{ + handshake := plugin.HandshakeConfig{ ProtocolVersion: 1, MagicCookieKey: "CROWDSEC_PLUGIN_KEY", MagicCookieValue: os.Getenv("CROWDSEC_PLUGIN_KEY"), diff --git a/cmd/notification-email/main.go b/cmd/notification-email/main.go index 789740156fe..3b535ae7ffa 100644 --- a/cmd/notification-email/main.go +++ b/cmd/notification-email/main.go @@ -2,15 +2,17 @@ package main import ( "context" + "errors" "fmt" "os" "time" - "github.com/crowdsecurity/crowdsec/pkg/protobufs" "github.com/hashicorp/go-hclog" plugin "github.com/hashicorp/go-plugin" mail "github.com/xhit/go-simple-mail/v2" - "gopkg.in/yaml.v2" + "gopkg.in/yaml.v3" + + "github.com/crowdsecurity/crowdsec/pkg/protobufs" ) var baseLogger hclog.Logger = hclog.New(&hclog.LoggerOptions{ @@ -72,19 +74,20 @@ func (n *EmailPlugin) Configure(ctx context.Context, config *protobufs.Config) ( } if d.Name == "" { - return nil, fmt.Errorf("name is required") + return nil, errors.New("name is required") } if d.SMTPHost == "" { - return nil, fmt.Errorf("SMTP host is not set") + return nil, errors.New("SMTP host is not set") } if d.ReceiverEmails == nil || len(d.ReceiverEmails) == 0 { - return nil, fmt.Errorf("receiver emails are not set") + return nil, errors.New("receiver emails are not set") } n.ConfigByName[d.Name] = d baseLogger.Debug(fmt.Sprintf("Email plugin '%s' use SMTP host '%s:%d'", d.Name, d.SMTPHost, d.SMTPPort)) + return &protobufs.Empty{}, nil } @@ -92,6 +95,7 @@ func (n *EmailPlugin) Notify(ctx context.Context, notification *protobufs.Notifi if _, ok := n.ConfigByName[notification.Name]; !ok { return nil, fmt.Errorf("invalid plugin config name %s", notification.Name) } + cfg := n.ConfigByName[notification.Name] logger := baseLogger.Named(cfg.Name) @@ -117,6 +121,7 @@ func (n *EmailPlugin) Notify(ctx context.Context, notification *protobufs.Notifi server.ConnectTimeout, err = time.ParseDuration(cfg.ConnectTimeout) if err != nil { logger.Warn(fmt.Sprintf("invalid connect timeout '%s', using default '10s'", cfg.ConnectTimeout)) + server.ConnectTimeout = 10 * time.Second } } @@ -125,15 +130,18 @@ func (n *EmailPlugin) Notify(ctx context.Context, notification *protobufs.Notifi server.SendTimeout, err = time.ParseDuration(cfg.SendTimeout) if err != nil { logger.Warn(fmt.Sprintf("invalid send timeout '%s', using default '10s'", cfg.SendTimeout)) + server.SendTimeout = 10 * time.Second } } logger.Debug("making smtp connection") + smtpClient, err := server.Connect() if err != nil { return &protobufs.Empty{}, err } + logger.Debug("smtp connection done") email := mail.NewMSG() @@ -146,12 +154,14 @@ func (n *EmailPlugin) Notify(ctx context.Context, notification *protobufs.Notifi if err != nil { return &protobufs.Empty{}, err } + logger.Info(fmt.Sprintf("sent email to %v", cfg.ReceiverEmails)) + return &protobufs.Empty{}, nil } func main() { - var handshake = plugin.HandshakeConfig{ + handshake := plugin.HandshakeConfig{ ProtocolVersion: 1, MagicCookieKey: "CROWDSEC_PLUGIN_KEY", MagicCookieValue: os.Getenv("CROWDSEC_PLUGIN_KEY"), diff --git a/cmd/notification-http/main.go b/cmd/notification-http/main.go index 382f30fea53..6b11a78ef86 100644 --- a/cmd/notification-http/main.go +++ b/cmd/notification-http/main.go @@ -12,10 +12,11 @@ import ( "os" "strings" - "github.com/crowdsecurity/crowdsec/pkg/protobufs" "github.com/hashicorp/go-hclog" plugin "github.com/hashicorp/go-plugin" - "gopkg.in/yaml.v2" + "gopkg.in/yaml.v3" + + "github.com/crowdsecurity/crowdsec/pkg/protobufs" ) type PluginConfig struct { @@ -90,18 +91,23 @@ func getTLSClient(c *PluginConfig) error { tlsConfig.Certificates = []tls.Certificate{cert} } + transport := &http.Transport{ TLSClientConfig: tlsConfig, } + if c.UnixSocket != "" { logger.Info(fmt.Sprintf("Using socket '%s'", c.UnixSocket)) + transport.DialContext = func(_ context.Context, _, _ string) (net.Conn, error) { return net.Dial("unix", strings.TrimSuffix(c.UnixSocket, "/")) } } + c.Client = &http.Client{ Transport: transport, } + return nil } @@ -109,6 +115,7 @@ func (s *HTTPPlugin) Notify(ctx context.Context, notification *protobufs.Notific if _, ok := s.PluginConfigByName[notification.Name]; !ok { return nil, fmt.Errorf("invalid plugin config name %s", notification.Name) } + cfg := s.PluginConfigByName[notification.Name] if cfg.LogLevel != nil && *cfg.LogLevel != "" { @@ -121,11 +128,14 @@ func (s *HTTPPlugin) Notify(ctx context.Context, notification *protobufs.Notific if err != nil { return nil, err } + for headerName, headerValue := range cfg.Headers { logger.Debug(fmt.Sprintf("adding header %s: %s", headerName, headerValue)) request.Header.Add(headerName, headerValue) } + logger.Debug(fmt.Sprintf("making HTTP %s call to %s with body %s", cfg.Method, cfg.URL, notification.Text)) + resp, err := cfg.Client.Do(request.WithContext(ctx)) if err != nil { logger.Error(fmt.Sprintf("Failed to make HTTP request : %s", err)) @@ -135,7 +145,7 @@ func (s *HTTPPlugin) Notify(ctx context.Context, notification *protobufs.Notific respData, err := io.ReadAll(resp.Body) if err != nil { - return nil, fmt.Errorf("failed to read response body got error %s", err) + return nil, fmt.Errorf("failed to read response body got error %w", err) } logger.Debug(fmt.Sprintf("got response %s", string(respData))) @@ -143,6 +153,7 @@ func (s *HTTPPlugin) Notify(ctx context.Context, notification *protobufs.Notific if resp.StatusCode < 200 || resp.StatusCode >= 300 { logger.Warn(fmt.Sprintf("HTTP server returned non 200 status code: %d", resp.StatusCode)) logger.Debug(fmt.Sprintf("HTTP server returned body: %s", string(respData))) + return &protobufs.Empty{}, nil } @@ -151,21 +162,25 @@ func (s *HTTPPlugin) Notify(ctx context.Context, notification *protobufs.Notific func (s *HTTPPlugin) Configure(ctx context.Context, config *protobufs.Config) (*protobufs.Empty, error) { d := PluginConfig{} + err := yaml.Unmarshal(config.Config, &d) if err != nil { return nil, err } + err = getTLSClient(&d) if err != nil { return nil, err } + s.PluginConfigByName[d.Name] = d logger.Debug(fmt.Sprintf("HTTP plugin '%s' use URL '%s'", d.Name, d.URL)) + return &protobufs.Empty{}, err } func main() { - var handshake = plugin.HandshakeConfig{ + handshake := plugin.HandshakeConfig{ ProtocolVersion: 1, MagicCookieKey: "CROWDSEC_PLUGIN_KEY", MagicCookieValue: os.Getenv("CROWDSEC_PLUGIN_KEY"), diff --git a/cmd/notification-slack/main.go b/cmd/notification-slack/main.go index 373cd9527ab..1e73d005fd8 100644 --- a/cmd/notification-slack/main.go +++ b/cmd/notification-slack/main.go @@ -5,12 +5,12 @@ import ( "fmt" "os" - "github.com/crowdsecurity/crowdsec/pkg/protobufs" "github.com/hashicorp/go-hclog" plugin "github.com/hashicorp/go-plugin" - "github.com/slack-go/slack" - "gopkg.in/yaml.v2" + "gopkg.in/yaml.v3" + + "github.com/crowdsecurity/crowdsec/pkg/protobufs" ) type PluginConfig struct { @@ -33,13 +33,16 @@ func (n *Notify) Notify(ctx context.Context, notification *protobufs.Notificatio if _, ok := n.ConfigByName[notification.Name]; !ok { return nil, fmt.Errorf("invalid plugin config name %s", notification.Name) } + cfg := n.ConfigByName[notification.Name] if cfg.LogLevel != nil && *cfg.LogLevel != "" { logger.SetLevel(hclog.LevelFromString(*cfg.LogLevel)) } + logger.Info(fmt.Sprintf("found notify signal for %s config", notification.Name)) logger.Debug(fmt.Sprintf("posting to %s webhook, message %s", cfg.Webhook, notification.Text)) + err := slack.PostWebhookContext(ctx, n.ConfigByName[notification.Name].Webhook, &slack.WebhookMessage{ Text: notification.Text, }) @@ -52,16 +55,19 @@ func (n *Notify) Notify(ctx context.Context, notification *protobufs.Notificatio func (n *Notify) Configure(ctx context.Context, config *protobufs.Config) (*protobufs.Empty, error) { d := PluginConfig{} + if err := yaml.Unmarshal(config.Config, &d); err != nil { return nil, err } + n.ConfigByName[d.Name] = d logger.Debug(fmt.Sprintf("Slack plugin '%s' use URL '%s'", d.Name, d.Webhook)) + return &protobufs.Empty{}, nil } func main() { - var handshake = plugin.HandshakeConfig{ + handshake := plugin.HandshakeConfig{ ProtocolVersion: 1, MagicCookieKey: "CROWDSEC_PLUGIN_KEY", MagicCookieValue: os.Getenv("CROWDSEC_PLUGIN_KEY"), diff --git a/cmd/notification-splunk/main.go b/cmd/notification-splunk/main.go index b24aa538f9a..26190c58a89 100644 --- a/cmd/notification-splunk/main.go +++ b/cmd/notification-splunk/main.go @@ -10,11 +10,11 @@ import ( "os" "strings" - "github.com/crowdsecurity/crowdsec/pkg/protobufs" "github.com/hashicorp/go-hclog" plugin "github.com/hashicorp/go-plugin" + "gopkg.in/yaml.v3" - "gopkg.in/yaml.v2" + "github.com/crowdsecurity/crowdsec/pkg/protobufs" ) var logger hclog.Logger = hclog.New(&hclog.LoggerOptions{ @@ -44,6 +44,7 @@ func (s *Splunk) Notify(ctx context.Context, notification *protobufs.Notificatio if _, ok := s.PluginConfigByName[notification.Name]; !ok { return &protobufs.Empty{}, fmt.Errorf("splunk invalid config name %s", notification.Name) } + cfg := s.PluginConfigByName[notification.Name] if cfg.LogLevel != nil && *cfg.LogLevel != "" { @@ -53,6 +54,7 @@ func (s *Splunk) Notify(ctx context.Context, notification *protobufs.Notificatio logger.Info(fmt.Sprintf("received notify signal for %s config", notification.Name)) p := Payload{Event: notification.Text} + data, err := json.Marshal(p) if err != nil { return &protobufs.Empty{}, err @@ -65,6 +67,7 @@ func (s *Splunk) Notify(ctx context.Context, notification *protobufs.Notificatio req.Header.Add("Authorization", fmt.Sprintf("Splunk %s", cfg.Token)) logger.Debug(fmt.Sprintf("posting event %s to %s", string(data), req.URL)) + resp, err := s.Client.Do(req.WithContext(ctx)) if err != nil { return &protobufs.Empty{}, err @@ -73,15 +76,19 @@ func (s *Splunk) Notify(ctx context.Context, notification *protobufs.Notificatio if resp.StatusCode != http.StatusOK { content, err := io.ReadAll(resp.Body) if err != nil { - return &protobufs.Empty{}, fmt.Errorf("got non 200 response and failed to read error %s", err) + return &protobufs.Empty{}, fmt.Errorf("got non 200 response and failed to read error %w", err) } + return &protobufs.Empty{}, fmt.Errorf("got non 200 response %s", string(content)) } + respData, err := io.ReadAll(resp.Body) if err != nil { - return &protobufs.Empty{}, fmt.Errorf("failed to read response body got error %s", err) + return &protobufs.Empty{}, fmt.Errorf("failed to read response body got error %w", err) } + logger.Debug(fmt.Sprintf("got response %s", string(respData))) + return &protobufs.Empty{}, nil } @@ -90,11 +97,12 @@ func (s *Splunk) Configure(ctx context.Context, config *protobufs.Config) (*prot err := yaml.Unmarshal(config.Config, &d) s.PluginConfigByName[d.Name] = d logger.Debug(fmt.Sprintf("Splunk plugin '%s' use URL '%s'", d.Name, d.URL)) + return &protobufs.Empty{}, err } func main() { - var handshake = plugin.HandshakeConfig{ + handshake := plugin.HandshakeConfig{ ProtocolVersion: 1, MagicCookieKey: "CROWDSEC_PLUGIN_KEY", MagicCookieValue: os.Getenv("CROWDSEC_PLUGIN_KEY"), From c4473839c43dad25727f8cd341d94b9baa5b1702 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Thu, 25 Apr 2024 17:53:10 +0200 Subject: [PATCH 106/581] Refact pkg/parser/node (#2953) * extract method processFilter() * extract method processWhitelist() * lint (whitespace, errors) --- .golangci.yml | 4 +- pkg/parser/node.go | 230 +++++++++++++++++++++++++++++---------------- 2 files changed, 153 insertions(+), 81 deletions(-) diff --git a/.golangci.yml b/.golangci.yml index cc6551310c6..9f059aa8aa9 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -3,7 +3,7 @@ linters-settings: cyclop: # lower this after refactoring - max-complexity: 53 + max-complexity: 48 gci: sections: @@ -22,7 +22,7 @@ linters-settings: gocyclo: # lower this after refactoring - min-complexity: 49 + min-complexity: 48 funlen: # Checks the number of lines in a function. diff --git a/pkg/parser/node.go b/pkg/parser/node.go index fe5432ce938..0906f0b5143 100644 --- a/pkg/parser/node.go +++ b/pkg/parser/node.go @@ -22,69 +22,70 @@ import ( type Node struct { FormatVersion string `yaml:"format"` - //Enable config + runtime debug of node via config o/ + // Enable config + runtime debug of node via config o/ Debug bool `yaml:"debug,omitempty"` - //If enabled, the node (and its child) will report their own statistics + // If enabled, the node (and its child) will report their own statistics Profiling bool `yaml:"profiling,omitempty"` - //Name, author, description and reference(s) for parser pattern + // Name, author, description and reference(s) for parser pattern Name string `yaml:"name,omitempty"` Author string `yaml:"author,omitempty"` Description string `yaml:"description,omitempty"` References []string `yaml:"references,omitempty"` - //if debug is present in the node, keep its specific Logger in runtime structure + // if debug is present in the node, keep its specific Logger in runtime structure Logger *log.Entry `yaml:"-"` - //This is mostly a hack to make writing less repetitive. - //relying on stage, we know which field to parse, and we - //can also promote log to next stage on success + // This is mostly a hack to make writing less repetitive. + // relying on stage, we know which field to parse, and we + // can also promote log to next stage on success Stage string `yaml:"stage,omitempty"` - //OnSuccess allows to tag a node to be able to move log to next stage on success + // OnSuccess allows to tag a node to be able to move log to next stage on success OnSuccess string `yaml:"onsuccess,omitempty"` - rn string //this is only for us in debug, a random generated name for each node - //Filter is executed at runtime (with current log line as context) - //and must succeed or node is exited + rn string // this is only for us in debug, a random generated name for each node + // Filter is executed at runtime (with current log line as context) + // and must succeed or node is exited Filter string `yaml:"filter,omitempty"` - RunTimeFilter *vm.Program `yaml:"-" json:"-"` //the actual compiled filter - //If node has leafs, execute all of them until one asks for a 'break' + RunTimeFilter *vm.Program `yaml:"-" json:"-"` // the actual compiled filter + // If node has leafs, execute all of them until one asks for a 'break' LeavesNodes []Node `yaml:"nodes,omitempty"` - //Flag used to describe when to 'break' or return an 'error' + // Flag used to describe when to 'break' or return an 'error' EnrichFunctions EnricherCtx /* If the node is actually a leaf, it can have : grok, enrich, statics */ - //pattern_syntax are named grok patterns that are re-utilized over several grok patterns + // pattern_syntax are named grok patterns that are re-utilized over several grok patterns SubGroks yaml.MapSlice `yaml:"pattern_syntax,omitempty"` - //Holds a grok pattern + // Holds a grok pattern Grok GrokPattern `yaml:"grok,omitempty"` - //Statics can be present in any type of node and is executed last + // Statics can be present in any type of node and is executed last Statics []ExtraField `yaml:"statics,omitempty"` - //Stash allows to capture data from the log line and store it in an accessible cache + // Stash allows to capture data from the log line and store it in an accessible cache Stash []DataCapture `yaml:"stash,omitempty"` - //Whitelists + // Whitelists Whitelist Whitelist `yaml:"whitelist,omitempty"` Data []*types.DataSource `yaml:"data,omitempty"` } func (n *Node) validate(pctx *UnixParserCtx, ectx EnricherCtx) error { - - //stage is being set automagically + // stage is being set automagically if n.Stage == "" { - return fmt.Errorf("stage needs to be an existing stage") + return errors.New("stage needs to be an existing stage") } /* "" behaves like continue */ if n.OnSuccess != "continue" && n.OnSuccess != "next_stage" && n.OnSuccess != "" { return fmt.Errorf("onsuccess '%s' not continue,next_stage", n.OnSuccess) } + if n.Filter != "" && n.RunTimeFilter == nil { return fmt.Errorf("non-empty filter '%s' was not compiled", n.Filter) } if n.Grok.RunTimeRegexp != nil || n.Grok.TargetField != "" { if n.Grok.TargetField == "" && n.Grok.ExpValue == "" { - return fmt.Errorf("grok requires 'expression' or 'apply_on'") + return errors.New("grok requires 'expression' or 'apply_on'") } + if n.Grok.RegexpName == "" && n.Grok.RegexpValue == "" { - return fmt.Errorf("grok needs 'pattern' or 'name'") + return errors.New("grok needs 'pattern' or 'name'") } } @@ -93,6 +94,7 @@ func (n *Node) validate(pctx *UnixParserCtx, ectx EnricherCtx) error { if static.ExpValue == "" { return fmt.Errorf("static %d : when method is set, expression must be present", idx) } + if _, ok := ectx.Registered[static.Method]; !ok { log.Warningf("the method '%s' doesn't exist or the plugin has not been initialized", static.Method) } @@ -100,6 +102,7 @@ func (n *Node) validate(pctx *UnixParserCtx, ectx EnricherCtx) error { if static.Meta == "" && static.Parsed == "" && static.TargetByName == "" { return fmt.Errorf("static %d : at least one of meta/event/target must be set", idx) } + if static.Value == "" && static.RunTimeValue == nil { return fmt.Errorf("static %d value or expression must be set", idx) } @@ -110,72 +113,76 @@ func (n *Node) validate(pctx *UnixParserCtx, ectx EnricherCtx) error { if stash.Name == "" { return fmt.Errorf("stash %d : name must be set", idx) } + if stash.Value == "" { return fmt.Errorf("stash %s : value expression must be set", stash.Name) } + if stash.Key == "" { return fmt.Errorf("stash %s : key expression must be set", stash.Name) } + if stash.TTL == "" { return fmt.Errorf("stash %s : ttl must be set", stash.Name) } + if stash.Strategy == "" { stash.Strategy = "LRU" } - //should be configurable + // should be configurable if stash.MaxMapSize == 0 { stash.MaxMapSize = 100 } } + return nil } -func (n *Node) process(p *types.Event, ctx UnixParserCtx, expressionEnv map[string]interface{}) (bool, error) { - var NodeState bool - var NodeHasOKGrok bool +func (n *Node) processFilter(cachedExprEnv map[string]interface{}) (bool, error) { clog := n.Logger + if n.RunTimeFilter == nil { + clog.Tracef("Node has not filter, enter") + return true, nil + } - cachedExprEnv := expressionEnv + // Evaluate node's filter + output, err := exprhelpers.Run(n.RunTimeFilter, cachedExprEnv, clog, n.Debug) + if err != nil { + clog.Warningf("failed to run filter : %v", err) + clog.Debugf("Event leaving node : ko") - clog.Tracef("Event entering node") - if n.RunTimeFilter != nil { - //Evaluate node's filter - output, err := exprhelpers.Run(n.RunTimeFilter, cachedExprEnv, clog, n.Debug) - if err != nil { - clog.Warningf("failed to run filter : %v", err) - clog.Debugf("Event leaving node : ko") - return false, nil - } + return false, nil + } - switch out := output.(type) { - case bool: - if !out { - clog.Debugf("Event leaving node : ko (failed filter)") - return false, nil - } - default: - clog.Warningf("Expr '%s' returned non-bool, abort : %T", n.Filter, output) - clog.Debugf("Event leaving node : ko") + switch out := output.(type) { + case bool: + if !out { + clog.Debugf("Event leaving node : ko (failed filter)") return false, nil } - NodeState = true - } else { - clog.Tracef("Node has not filter, enter") - NodeState = true - } + default: + clog.Warningf("Expr '%s' returned non-bool, abort : %T", n.Filter, output) + clog.Debugf("Event leaving node : ko") - if n.Name != "" { - NodesHits.With(prometheus.Labels{"source": p.Line.Src, "type": p.Line.Module, "name": n.Name}).Inc() + return false, nil } - exprErr := error(nil) + + return true, nil +} + +func (n *Node) processWhitelist(cachedExprEnv map[string]interface{}, p *types.Event) (bool, error) { + var exprErr error + isWhitelisted := n.CheckIPsWL(p) if !isWhitelisted { isWhitelisted, exprErr = n.CheckExprWL(cachedExprEnv, p) } + if exprErr != nil { // Previous code returned nil if there was an error, so we keep this behavior return false, nil //nolint:nilerr } + if isWhitelisted && !p.Whitelisted { p.Whitelisted = true p.WhitelistReason = n.Whitelist.Reason @@ -185,18 +192,51 @@ func (n *Node) process(p *types.Event, ctx UnixParserCtx, expressionEnv map[stri for k := range p.Overflow.Sources { ips = append(ips, k) } - clog.Infof("Ban for %s whitelisted, reason [%s]", strings.Join(ips, ","), n.Whitelist.Reason) + + n.Logger.Infof("Ban for %s whitelisted, reason [%s]", strings.Join(ips, ","), n.Whitelist.Reason) + p.Overflow.Whitelisted = true } } - //Process grok if present, should be exclusive with nodes :) + return isWhitelisted, nil +} + +func (n *Node) process(p *types.Event, ctx UnixParserCtx, expressionEnv map[string]interface{}) (bool, error) { + var NodeHasOKGrok bool + + clog := n.Logger + + cachedExprEnv := expressionEnv + + clog.Tracef("Event entering node") + + NodeState, err := n.processFilter(cachedExprEnv) + if err != nil { + return false, err + } + + if !NodeState { + return false, nil + } + + if n.Name != "" { + NodesHits.With(prometheus.Labels{"source": p.Line.Src, "type": p.Line.Module, "name": n.Name}).Inc() + } + + isWhitelisted, err := n.processWhitelist(cachedExprEnv, p) + if err != nil { + return false, err + } + + // Process grok if present, should be exclusive with nodes :) gstr := "" + if n.Grok.RunTimeRegexp != nil { clog.Tracef("Processing grok pattern : %s : %p", n.Grok.RegexpName, n.Grok.RunTimeRegexp) - //for unparsed, parsed etc. set sensible defaults to reduce user hassle + // for unparsed, parsed etc. set sensible defaults to reduce user hassle if n.Grok.TargetField != "" { - //it's a hack to avoid using real reflect + // it's a hack to avoid using real reflect if n.Grok.TargetField == "Line.Raw" { gstr = p.Line.Raw } else if val, ok := p.Parsed[n.Grok.TargetField]; ok { @@ -211,6 +251,7 @@ func (n *Node) process(p *types.Event, ctx UnixParserCtx, expressionEnv map[stri clog.Warningf("failed to run RunTimeValue : %v", err) NodeState = false } + switch out := output.(type) { case string: gstr = out @@ -229,12 +270,14 @@ func (n *Node) process(p *types.Event, ctx UnixParserCtx, expressionEnv map[stri } else { groklabel = n.Grok.RegexpName } + grok := n.Grok.RunTimeRegexp.Parse(gstr) if len(grok) > 0 { /*tag explicitly that the *current* node had a successful grok pattern. it's important to know success state*/ NodeHasOKGrok = true + clog.Debugf("+ Grok '%s' returned %d entries to merge in Parsed", groklabel, len(grok)) - //We managed to grok stuff, merged into parse + // We managed to grok stuff, merged into parse for k, v := range grok { clog.Debugf("\t.Parsed['%s'] = '%s'", k, v) p.Parsed[k] = v @@ -246,34 +289,37 @@ func (n *Node) process(p *types.Event, ctx UnixParserCtx, expressionEnv map[stri return false, err } } else { - //grok failed, node failed + // grok failed, node failed clog.Debugf("+ Grok '%s' didn't return data on '%s'", groklabel, gstr) NodeState = false } - } else { clog.Tracef("! No grok pattern : %p", n.Grok.RunTimeRegexp) } - //Process the stash (data collection) if : a grok was present and succeeded, or if there is no grok + // Process the stash (data collection) if : a grok was present and succeeded, or if there is no grok if NodeHasOKGrok || n.Grok.RunTimeRegexp == nil { for idx, stash := range n.Stash { - var value string - var key string + var ( + key string + value string + ) + if stash.ValueExpression == nil { clog.Warningf("Stash %d has no value expression, skipping", idx) continue } + if stash.KeyExpression == nil { clog.Warningf("Stash %d has no key expression, skipping", idx) continue } - //collect the data + // collect the data output, err := exprhelpers.Run(stash.ValueExpression, cachedExprEnv, clog, n.Debug) if err != nil { clog.Warningf("Error while running stash val expression : %v", err) } - //can we expect anything else than a string ? + // can we expect anything else than a string ? switch output := output.(type) { case string: value = output @@ -282,12 +328,12 @@ func (n *Node) process(p *types.Event, ctx UnixParserCtx, expressionEnv map[stri continue } - //collect the key + // collect the key output, err = exprhelpers.Run(stash.KeyExpression, cachedExprEnv, clog, n.Debug) if err != nil { clog.Warningf("Error while running stash key expression : %v", err) } - //can we expect anything else than a string ? + // can we expect anything else than a string ? switch output := output.(type) { case string: key = output @@ -299,7 +345,7 @@ func (n *Node) process(p *types.Event, ctx UnixParserCtx, expressionEnv map[stri } } - //Iterate on leafs + // Iterate on leafs for _, leaf := range n.LeavesNodes { ret, err := leaf.process(p, ctx, cachedExprEnv) if err != nil { @@ -307,7 +353,9 @@ func (n *Node) process(p *types.Event, ctx UnixParserCtx, expressionEnv map[stri clog.Debugf("Event leaving node : ko") return false, err } + clog.Tracef("\tsub-node (%s) ret : %v (strategy:%s)", leaf.rn, ret, n.OnSuccess) + if ret { NodeState = true /* if child is successful, stop processing */ @@ -328,12 +376,14 @@ func (n *Node) process(p *types.Event, ctx UnixParserCtx, expressionEnv map[stri clog.Tracef("State after nodes : %v", NodeState) - //grok or leafs failed, don't process statics + // grok or leafs failed, don't process statics if !NodeState { if n.Name != "" { NodesHitsKo.With(prometheus.Labels{"source": p.Line.Src, "type": p.Line.Module, "name": n.Name}).Inc() } + clog.Debugf("Event leaving node : ko") + return NodeState, nil } @@ -360,9 +410,10 @@ func (n *Node) process(p *types.Event, ctx UnixParserCtx, expressionEnv map[stri if NodeState { clog.Debugf("Event leaving node : ok") log.Tracef("node is successful, check strategy") + if n.OnSuccess == "next_stage" { idx := stageidx(p.Stage, ctx.Stages) - //we're at the last stage + // we're at the last stage if idx+1 == len(ctx.Stages) { clog.Debugf("node reached the last stage : %s", p.Stage) } else { @@ -375,15 +426,16 @@ func (n *Node) process(p *types.Event, ctx UnixParserCtx, expressionEnv map[stri } else { clog.Debugf("Event leaving node : ko") } + clog.Tracef("Node successful, continue") + return NodeState, nil } func (n *Node) compile(pctx *UnixParserCtx, ectx EnricherCtx) error { var err error - var valid bool - valid = false + valid := false dumpr := spew.ConfigState{MaxDepth: 1, DisablePointerAddresses: true} n.rn = seed.Generate() @@ -393,10 +445,11 @@ func (n *Node) compile(pctx *UnixParserCtx, ectx EnricherCtx) error { /* if the node has debugging enabled, create a specific logger with debug that will be used only for processing this node ;) */ if n.Debug { - var clog = log.New() + clog := log.New() if err = types.ConfigureLogger(clog); err != nil { log.Fatalf("While creating bucket-specific logger : %s", err) } + clog.SetLevel(log.DebugLevel) n.Logger = clog.WithFields(log.Fields{ "id": n.rn, @@ -414,7 +467,7 @@ func (n *Node) compile(pctx *UnixParserCtx, ectx EnricherCtx) error { n.Logger.Tracef("Compiling : %s", dumpr.Sdump(n)) - //compile filter if present + // compile filter if present if n.Filter != "" { n.RunTimeFilter, err = expr.Compile(n.Filter, exprhelpers.GetExprOptions(map[string]interface{}{"evt": &types.Event{}})...) if err != nil { @@ -425,12 +478,15 @@ func (n *Node) compile(pctx *UnixParserCtx, ectx EnricherCtx) error { /* handle pattern_syntax and groks */ for _, pattern := range n.SubGroks { n.Logger.Tracef("Adding subpattern '%s' : '%s'", pattern.Key, pattern.Value) + if err = pctx.Grok.Add(pattern.Key.(string), pattern.Value.(string)); err != nil { if errors.Is(err, grokky.ErrAlreadyExist) { n.Logger.Warningf("grok '%s' already registred", pattern.Key) continue } + n.Logger.Errorf("Unable to compile subpattern %s : %v", pattern.Key, err) + return err } } @@ -438,28 +494,36 @@ func (n *Node) compile(pctx *UnixParserCtx, ectx EnricherCtx) error { /* load grok by name or compile in-place */ if n.Grok.RegexpName != "" { n.Logger.Tracef("+ Regexp Compilation '%s'", n.Grok.RegexpName) + n.Grok.RunTimeRegexp, err = pctx.Grok.Get(n.Grok.RegexpName) if err != nil { return fmt.Errorf("unable to find grok '%s' : %v", n.Grok.RegexpName, err) } + if n.Grok.RunTimeRegexp == nil { return fmt.Errorf("empty grok '%s'", n.Grok.RegexpName) } + n.Logger.Tracef("%s regexp: %s", n.Grok.RegexpName, n.Grok.RunTimeRegexp.String()) + valid = true } else if n.Grok.RegexpValue != "" { if strings.HasSuffix(n.Grok.RegexpValue, "\n") { n.Logger.Debugf("Beware, pattern ends with \\n : '%s'", n.Grok.RegexpValue) } + n.Grok.RunTimeRegexp, err = pctx.Grok.Compile(n.Grok.RegexpValue) if err != nil { return fmt.Errorf("failed to compile grok '%s': %v", n.Grok.RegexpValue, err) } + if n.Grok.RunTimeRegexp == nil { // We shouldn't be here because compilation succeeded, so regexp shouldn't be nil return fmt.Errorf("grok compilation failure: %s", n.Grok.RegexpValue) } + n.Logger.Tracef("%s regexp : %s", n.Grok.RegexpValue, n.Grok.RunTimeRegexp.String()) + valid = true } @@ -473,7 +537,7 @@ func (n *Node) compile(pctx *UnixParserCtx, ectx EnricherCtx) error { } /* load grok statics */ - //compile expr statics if present + // compile expr statics if present for idx := range n.Grok.Statics { if n.Grok.Statics[idx].ExpValue != "" { n.Grok.Statics[idx].RunTimeValue, err = expr.Compile(n.Grok.Statics[idx].ExpValue, @@ -482,6 +546,7 @@ func (n *Node) compile(pctx *UnixParserCtx, ectx EnricherCtx) error { return err } } + valid = true } @@ -505,7 +570,7 @@ func (n *Node) compile(pctx *UnixParserCtx, ectx EnricherCtx) error { } logLvl := n.Logger.Logger.GetLevel() - //init the cache, does it make sense to create it here just to be sure everything is fine ? + // init the cache, does it make sense to create it here just to be sure everything is fine ? if err = cache.CacheInit(cache.CacheCfg{ Size: n.Stash[i].MaxMapSize, TTL: n.Stash[i].TTLVal, @@ -526,14 +591,18 @@ func (n *Node) compile(pctx *UnixParserCtx, ectx EnricherCtx) error { if !n.LeavesNodes[idx].Debug && n.Debug { n.LeavesNodes[idx].Debug = true } + if !n.LeavesNodes[idx].Profiling && n.Profiling { n.LeavesNodes[idx].Profiling = true } + n.LeavesNodes[idx].Stage = n.Stage + err = n.LeavesNodes[idx].compile(pctx, ectx) if err != nil { return err } + valid = true } @@ -546,6 +615,7 @@ func (n *Node) compile(pctx *UnixParserCtx, ectx EnricherCtx) error { return err } } + valid = true } @@ -554,13 +624,15 @@ func (n *Node) compile(pctx *UnixParserCtx, ectx EnricherCtx) error { if err != nil { return err } + valid = valid || whitelistValid if !valid { /* node is empty, error force return */ n.Logger.Error("Node is empty or invalid, abort") n.Stage = "" - return fmt.Errorf("Node is empty") + + return errors.New("Node is empty") } if err := n.validate(pctx, ectx); err != nil { From 05b54687b6e638fe2092d42d84d26d527e2b183d Mon Sep 17 00:00:00 2001 From: Laurence Jones Date: Fri, 26 Apr 2024 15:56:15 +0100 Subject: [PATCH 107/581] feat: support stdout in cscli support dump (#2939) * feat: support stdout in cscli support dump * fix: skip log.info if stdout * fix: handle errors by returning to runE instead --- cmd/crowdsec-cli/support.go | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/cmd/crowdsec-cli/support.go b/cmd/crowdsec-cli/support.go index a48edeeeb9f..737411e28b8 100644 --- a/cmd/crowdsec-cli/support.go +++ b/cmd/crowdsec-cli/support.go @@ -319,7 +319,7 @@ cscli support dump -f /tmp/crowdsec-support.zip `, Args: cobra.NoArgs, DisableAutoGenTag: true, - Run: func(_ *cobra.Command, _ []string) { + RunE: func(_ *cobra.Command, _ []string) error { var err error var skipHub, skipDB, skipCAPI, skipLAPI, skipAgent bool infos := map[string][]byte{ @@ -473,15 +473,19 @@ cscli support dump -f /tmp/crowdsec-support.zip err = zipWriter.Close() if err != nil { - log.Fatalf("could not finalize zip file: %s", err) + return fmt.Errorf("could not finalize zip file: %s", err) } + if outFile == "-" { + _, err = os.Stdout.Write(w.Bytes()) + return err + } err = os.WriteFile(outFile, w.Bytes(), 0o600) if err != nil { - log.Fatalf("could not write zip file to %s: %s", outFile, err) + return fmt.Errorf("could not write zip file to %s: %s", outFile, err) } - log.Infof("Written zip file to %s", outFile) + return nil }, } From 3788610aff31782f949cc9963ad9e3b32b26f961 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Thu, 2 May 2024 10:25:04 +0200 Subject: [PATCH 108/581] cscli: avoid global vars (#2977) * cscli: avoid global usage This is required to make it possible to split the package * lint (fmt.Errorf) --- cmd/crowdsec-cli/hub.go | 6 ++++-- cmd/crowdsec-cli/hubappsec.go | 2 +- cmd/crowdsec-cli/item_metrics.go | 10 ++++----- cmd/crowdsec-cli/item_suggest.go | 8 +++---- cmd/crowdsec-cli/itemcli.go | 14 +++++++------ cmd/crowdsec-cli/items.go | 12 +++++------ cmd/crowdsec-cli/support.go | 2 +- pkg/acquisition/acquisition_test.go | 21 ++++++++++--------- .../modules/cloudwatch/cloudwatch_test.go | 3 ++- pkg/csconfig/config_paths.go | 5 +++-- pkg/longpollclient/client.go | 4 ++-- pkg/parser/parsing_test.go | 4 ++-- 12 files changed, 49 insertions(+), 42 deletions(-) diff --git a/cmd/crowdsec-cli/hub.go b/cmd/crowdsec-cli/hub.go index 71347a5a57b..62eb894d8f2 100644 --- a/cmd/crowdsec-cli/hub.go +++ b/cmd/crowdsec-cli/hub.go @@ -47,7 +47,9 @@ cscli hub upgrade`, } func (cli *cliHub) list(all bool) error { - hub, err := require.Hub(cli.cfg(), nil, log.StandardLogger()) + cfg := cli.cfg() + + hub, err := require.Hub(cfg, nil, log.StandardLogger()) if err != nil { return err } @@ -69,7 +71,7 @@ func (cli *cliHub) list(all bool) error { } } - err = listItems(color.Output, cwhub.ItemTypes, items, true) + err = listItems(color.Output, cwhub.ItemTypes, items, true, cfg.Cscli.Output) if err != nil { return err } diff --git a/cmd/crowdsec-cli/hubappsec.go b/cmd/crowdsec-cli/hubappsec.go index 7ee578edc2f..1df3212f941 100644 --- a/cmd/crowdsec-cli/hubappsec.go +++ b/cmd/crowdsec-cli/hubappsec.go @@ -50,7 +50,7 @@ cscli appsec-configs list crowdsecurity/vpatch`, func NewCLIAppsecRule(cfg configGetter) *cliItem { inspectDetail := func(item *cwhub.Item) error { // Only show the converted rules in human mode - if csConfig.Cscli.Output != "human" { + if cfg().Cscli.Output != "human" { return nil } diff --git a/cmd/crowdsec-cli/item_metrics.go b/cmd/crowdsec-cli/item_metrics.go index b571fb1c5ed..9459968790b 100644 --- a/cmd/crowdsec-cli/item_metrics.go +++ b/cmd/crowdsec-cli/item_metrics.go @@ -18,22 +18,22 @@ import ( "github.com/crowdsecurity/crowdsec/pkg/cwhub" ) -func ShowMetrics(hubItem *cwhub.Item) error { +func ShowMetrics(prometheusURL string, hubItem *cwhub.Item) error { switch hubItem.Type { case cwhub.PARSERS: - metrics := GetParserMetric(csConfig.Cscli.PrometheusUrl, hubItem.Name) + metrics := GetParserMetric(prometheusURL, hubItem.Name) parserMetricsTable(color.Output, hubItem.Name, metrics) case cwhub.SCENARIOS: - metrics := GetScenarioMetric(csConfig.Cscli.PrometheusUrl, hubItem.Name) + metrics := GetScenarioMetric(prometheusURL, hubItem.Name) scenarioMetricsTable(color.Output, hubItem.Name, metrics) case cwhub.COLLECTIONS: for _, sub := range hubItem.SubItems() { - if err := ShowMetrics(sub); err != nil { + if err := ShowMetrics(prometheusURL, sub); err != nil { return err } } case cwhub.APPSEC_RULES: - metrics := GetAppsecRuleMetric(csConfig.Cscli.PrometheusUrl, hubItem.Name) + metrics := GetAppsecRuleMetric(prometheusURL, hubItem.Name) appsecMetricsTable(color.Output, hubItem.Name, metrics) default: // no metrics for this item type } diff --git a/cmd/crowdsec-cli/item_suggest.go b/cmd/crowdsec-cli/item_suggest.go index 9272abedd50..0ea656549ba 100644 --- a/cmd/crowdsec-cli/item_suggest.go +++ b/cmd/crowdsec-cli/item_suggest.go @@ -36,8 +36,8 @@ func suggestNearestMessage(hub *cwhub.Hub, itemType string, itemName string) str return msg } -func compAllItems(itemType string, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { - hub, err := require.Hub(csConfig, nil, nil) +func compAllItems(itemType string, args []string, toComplete string, cfg configGetter) ([]string, cobra.ShellCompDirective) { + hub, err := require.Hub(cfg(), nil, nil) if err != nil { return nil, cobra.ShellCompDirectiveDefault } @@ -55,8 +55,8 @@ func compAllItems(itemType string, args []string, toComplete string) ([]string, return comp, cobra.ShellCompDirectiveNoFileComp } -func compInstalledItems(itemType string, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { - hub, err := require.Hub(csConfig, nil, nil) +func compInstalledItems(itemType string, args []string, toComplete string, cfg configGetter) ([]string, cobra.ShellCompDirective) { + hub, err := require.Hub(cfg(), nil, nil) if err != nil { return nil, cobra.ShellCompDirectiveDefault } diff --git a/cmd/crowdsec-cli/itemcli.go b/cmd/crowdsec-cli/itemcli.go index cdaf7877054..44a734d5e6a 100644 --- a/cmd/crowdsec-cli/itemcli.go +++ b/cmd/crowdsec-cli/itemcli.go @@ -112,7 +112,7 @@ func (cli cliItem) newInstallCmd() *cobra.Command { Args: cobra.MinimumNArgs(1), DisableAutoGenTag: true, ValidArgsFunction: func(_ *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { - return compAllItems(cli.name, args, toComplete) + return compAllItems(cli.name, args, toComplete, cli.cfg) }, RunE: func(_ *cobra.Command, args []string) error { return cli.install(args, downloadOnly, force, ignoreError) @@ -238,7 +238,7 @@ func (cli cliItem) newRemoveCmd() *cobra.Command { Aliases: []string{"delete"}, DisableAutoGenTag: true, ValidArgsFunction: func(_ *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { - return compInstalledItems(cli.name, args, toComplete) + return compInstalledItems(cli.name, args, toComplete, cli.cfg) }, RunE: func(_ *cobra.Command, args []string) error { return cli.remove(args, purge, force, all) @@ -333,7 +333,7 @@ func (cli cliItem) newUpgradeCmd() *cobra.Command { Example: cli.upgradeHelp.example, DisableAutoGenTag: true, ValidArgsFunction: func(_ *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { - return compInstalledItems(cli.name, args, toComplete) + return compInstalledItems(cli.name, args, toComplete, cli.cfg) }, RunE: func(_ *cobra.Command, args []string) error { return cli.upgrade(args, force, all) @@ -381,7 +381,7 @@ func (cli cliItem) inspect(args []string, url string, diff bool, rev bool, noMet continue } - if err = inspectItem(item, !noMetrics); err != nil { + if err = inspectItem(item, !noMetrics, cfg.Cscli.Output, cfg.Cscli.PrometheusUrl); err != nil { return err } @@ -411,7 +411,7 @@ func (cli cliItem) newInspectCmd() *cobra.Command { Args: cobra.MinimumNArgs(1), DisableAutoGenTag: true, ValidArgsFunction: func(_ *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { - return compInstalledItems(cli.name, args, toComplete) + return compInstalledItems(cli.name, args, toComplete, cli.cfg) }, RunE: func(_ *cobra.Command, args []string) error { return cli.inspect(args, url, diff, rev, noMetrics) @@ -428,6 +428,8 @@ func (cli cliItem) newInspectCmd() *cobra.Command { } func (cli cliItem) list(args []string, all bool) error { + cfg := cli.cfg() + hub, err := require.Hub(cli.cfg(), nil, log.StandardLogger()) if err != nil { return err @@ -440,7 +442,7 @@ func (cli cliItem) list(args []string, all bool) error { return err } - if err = listItems(color.Output, []string{cli.name}, items, false); err != nil { + if err = listItems(color.Output, []string{cli.name}, items, false, cfg.Cscli.Output); err != nil { return err } diff --git a/cmd/crowdsec-cli/items.go b/cmd/crowdsec-cli/items.go index 4ae643151f2..9af432c32c1 100644 --- a/cmd/crowdsec-cli/items.go +++ b/cmd/crowdsec-cli/items.go @@ -54,8 +54,8 @@ func selectItems(hub *cwhub.Hub, itemType string, args []string, installedOnly b return items, nil } -func listItems(out io.Writer, itemTypes []string, items map[string][]*cwhub.Item, omitIfEmpty bool) error { - switch csConfig.Cscli.Output { +func listItems(out io.Writer, itemTypes []string, items map[string][]*cwhub.Item, omitIfEmpty bool, output string) error { + switch output { case "human": nothingToDisplay := true @@ -143,8 +143,8 @@ func listItems(out io.Writer, itemTypes []string, items map[string][]*cwhub.Item return nil } -func inspectItem(item *cwhub.Item, showMetrics bool) error { - switch csConfig.Cscli.Output { +func inspectItem(item *cwhub.Item, showMetrics bool, output string, prometheusURL string) error { + switch output { case "human", "raw": enc := yaml.NewEncoder(os.Stdout) enc.SetIndent(2) @@ -161,7 +161,7 @@ func inspectItem(item *cwhub.Item, showMetrics bool) error { fmt.Print(string(b)) } - if csConfig.Cscli.Output != "human" { + if output != "human" { return nil } @@ -174,7 +174,7 @@ func inspectItem(item *cwhub.Item, showMetrics bool) error { if showMetrics { fmt.Printf("\nCurrent metrics: \n") - if err := ShowMetrics(item); err != nil { + if err := ShowMetrics(prometheusURL, item); err != nil { return err } } diff --git a/cmd/crowdsec-cli/support.go b/cmd/crowdsec-cli/support.go index 737411e28b8..418a981adee 100644 --- a/cmd/crowdsec-cli/support.go +++ b/cmd/crowdsec-cli/support.go @@ -154,7 +154,7 @@ func collectHubItems(hub *cwhub.Hub, itemType string) []byte { log.Warnf("could not collect %s list: %s", itemType, err) } - if err := listItems(out, []string{itemType}, items, false); err != nil { + if err := listItems(out, []string{itemType}, items, false, "human"); err != nil { log.Warnf("could not collect %s list: %s", itemType, err) } diff --git a/pkg/acquisition/acquisition_test.go b/pkg/acquisition/acquisition_test.go index 33e4948552a..1fbac2cdc00 100644 --- a/pkg/acquisition/acquisition_test.go +++ b/pkg/acquisition/acquisition_test.go @@ -1,6 +1,7 @@ package acquisition import ( + "errors" "fmt" "strings" "testing" @@ -50,7 +51,7 @@ func (f *MockSource) Configure(cfg []byte, logger *log.Entry, metricsLevel int) } if f.Toto == "" { - return fmt.Errorf("expect non-empty toto") + return errors.New("expect non-empty toto") } return nil @@ -64,7 +65,7 @@ func (f *MockSource) GetAggregMetrics() []prometheus.Collector { func (f *MockSource) Dump() interface{} { return f } func (f *MockSource) GetName() string { return "mock" } func (f *MockSource) ConfigureByDSN(string, map[string]string, *log.Entry, string) error { - return fmt.Errorf("not supported") + return errors.New("not supported") } func (f *MockSource) GetUuid() string { return "" } @@ -73,7 +74,7 @@ type MockSourceCantRun struct { MockSource } -func (f *MockSourceCantRun) CanRun() error { return fmt.Errorf("can't run bro") } +func (f *MockSourceCantRun) CanRun() error { return errors.New("can't run bro") } func (f *MockSourceCantRun) GetName() string { return "mock_cant_run" } // appendMockSource is only used to add mock source for tests @@ -331,14 +332,14 @@ func (f *MockCat) OneShotAcquisition(out chan types.Event, tomb *tomb.Tomb) erro return nil } func (f *MockCat) StreamingAcquisition(chan types.Event, *tomb.Tomb) error { - return fmt.Errorf("can't run in tail") + return errors.New("can't run in tail") } func (f *MockCat) CanRun() error { return nil } func (f *MockCat) GetMetrics() []prometheus.Collector { return nil } func (f *MockCat) GetAggregMetrics() []prometheus.Collector { return nil } func (f *MockCat) Dump() interface{} { return f } func (f *MockCat) ConfigureByDSN(string, map[string]string, *log.Entry, string) error { - return fmt.Errorf("not supported") + return errors.New("not supported") } func (f *MockCat) GetUuid() string { return "" } @@ -366,7 +367,7 @@ func (f *MockTail) UnmarshalConfig(cfg []byte) error { return nil } func (f *MockTail) GetName() string { return "mock_tail" } func (f *MockTail) GetMode() string { return "tail" } func (f *MockTail) OneShotAcquisition(out chan types.Event, tomb *tomb.Tomb) error { - return fmt.Errorf("can't run in cat mode") + return errors.New("can't run in cat mode") } func (f *MockTail) StreamingAcquisition(out chan types.Event, t *tomb.Tomb) error { for i := 0; i < 10; i++ { @@ -383,7 +384,7 @@ func (f *MockTail) GetMetrics() []prometheus.Collector { return nil } func (f *MockTail) GetAggregMetrics() []prometheus.Collector { return nil } func (f *MockTail) Dump() interface{} { return f } func (f *MockTail) ConfigureByDSN(string, map[string]string, *log.Entry, string) error { - return fmt.Errorf("not supported") + return errors.New("not supported") } func (f *MockTail) GetUuid() string { return "" } @@ -457,9 +458,9 @@ func (f *MockTailError) StreamingAcquisition(out chan types.Event, t *tomb.Tomb) evt.Line.Src = "test" out <- evt } - t.Kill(fmt.Errorf("got error (tomb)")) + t.Kill(errors.New("got error (tomb)")) - return fmt.Errorf("got error") + return errors.New("got error") } func TestStartAcquisitionTailError(t *testing.T) { @@ -512,7 +513,7 @@ func (f *MockSourceByDSN) GetName() string func (f *MockSourceByDSN) ConfigureByDSN(dsn string, labels map[string]string, logger *log.Entry, uuid string) error { dsn = strings.TrimPrefix(dsn, "mockdsn://") if dsn != "test_expect" { - return fmt.Errorf("unexpected value") + return errors.New("unexpected value") } return nil diff --git a/pkg/acquisition/modules/cloudwatch/cloudwatch_test.go b/pkg/acquisition/modules/cloudwatch/cloudwatch_test.go index 11842e61ff2..12e01ddf609 100644 --- a/pkg/acquisition/modules/cloudwatch/cloudwatch_test.go +++ b/pkg/acquisition/modules/cloudwatch/cloudwatch_test.go @@ -1,6 +1,7 @@ package cloudwatchacquisition import ( + "errors" "fmt" "net" "os" @@ -43,7 +44,7 @@ func deleteAllLogGroups(t *testing.T, cw *CloudwatchSource) { func checkForLocalStackAvailability() error { v := os.Getenv("AWS_ENDPOINT_FORCE") if v == "" { - return fmt.Errorf("missing aws endpoint for tests : AWS_ENDPOINT_FORCE") + return errors.New("missing aws endpoint for tests : AWS_ENDPOINT_FORCE") } v = strings.TrimPrefix(v, "http://") diff --git a/pkg/csconfig/config_paths.go b/pkg/csconfig/config_paths.go index 3de05ee0292..7675b90d7dd 100644 --- a/pkg/csconfig/config_paths.go +++ b/pkg/csconfig/config_paths.go @@ -1,6 +1,7 @@ package csconfig import ( + "errors" "fmt" "path/filepath" ) @@ -19,11 +20,11 @@ type ConfigurationPaths struct { func (c *Config) loadConfigurationPaths() error { var err error if c.ConfigPaths == nil { - return fmt.Errorf("no configuration paths provided") + return errors.New("no configuration paths provided") } if c.ConfigPaths.DataDir == "" { - return fmt.Errorf("please provide a data directory with the 'data_dir' directive in the 'config_paths' section") + return errors.New("please provide a data directory with the 'data_dir' directive in the 'config_paths' section") } if c.ConfigPaths.HubDir == "" { diff --git a/pkg/longpollclient/client.go b/pkg/longpollclient/client.go index e93870a2869..9fa3b4b3f9a 100644 --- a/pkg/longpollclient/client.go +++ b/pkg/longpollclient/client.go @@ -46,7 +46,7 @@ type pollResponse struct { ErrorMessage string `json:"error"` } -var errUnauthorized = fmt.Errorf("user is not authorized to use PAPI") +var errUnauthorized = errors.New("user is not authorized to use PAPI") const timeoutMessage = "no events before timeout" @@ -225,7 +225,7 @@ func (c *LongPollClient) PullOnce(since time.Time) ([]Event, error) { func NewLongPollClient(config LongPollClientConfig) (*LongPollClient, error) { var logger *log.Entry if config.Url == (url.URL{}) { - return nil, fmt.Errorf("url is required") + return nil, errors.New("url is required") } if config.Logger == nil { logger = log.WithField("component", "longpollclient") diff --git a/pkg/parser/parsing_test.go b/pkg/parser/parsing_test.go index 04d08cc2785..d009bd0c515 100644 --- a/pkg/parser/parsing_test.go +++ b/pkg/parser/parsing_test.go @@ -131,7 +131,7 @@ func testOneParser(pctx *UnixParserCtx, ectx EnricherCtx, dir string, b *testing } for n := 0; n < count; n++ { if testFile(tests, *pctx, pnodes) != true { - return fmt.Errorf("test failed !") + return errors.New("test failed !") } } return nil @@ -296,7 +296,7 @@ func testSubSet(testSet TestFile, pctx UnixParserCtx, nodes []Node) (bool, error */ if len(testSet.Results) == 0 && len(results) == 0 { log.Fatal("No results, no tests, abort.") - return false, fmt.Errorf("no tests, no results") + return false, errors.New("no tests, no results") } reCheck: From 87564ea462254d00859ae6b3e5575490b663b0de Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Thu, 2 May 2024 10:59:24 +0200 Subject: [PATCH 109/581] pkg/dumps,hubtest: use yaml.v3 (#2972) * pkg/dumps,hubtest: use yaml.v3 * lint (whitespace/fmt.Errorf) --- .golangci.yml | 7 ---- pkg/csplugin/broker_test.go | 5 ++- pkg/dumps/bucket_dump.go | 3 +- pkg/dumps/parser_dump.go | 2 +- pkg/hubtest/coverage.go | 32 +++++++++------ pkg/hubtest/hubtest_item.go | 72 +++++++++++++++++----------------- pkg/hubtest/parser_assert.go | 29 +++++++------- pkg/hubtest/scenario_assert.go | 18 ++++----- 8 files changed, 87 insertions(+), 81 deletions(-) diff --git a/.golangci.yml b/.golangci.yml index 9f059aa8aa9..8e60a3ca8f8 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -100,13 +100,6 @@ linters-settings: - "!**/pkg/appsec/appsec.go" - "!**/pkg/appsec/loader.go" - "!**/pkg/csplugin/broker.go" - - "!**/pkg/csplugin/broker_test.go" - - "!**/pkg/dumps/bucket_dump.go" - - "!**/pkg/dumps/parser_dump.go" - - "!**/pkg/hubtest/coverage.go" - - "!**/pkg/hubtest/hubtest_item.go" - - "!**/pkg/hubtest/parser_assert.go" - - "!**/pkg/hubtest/scenario_assert.go" - "!**/pkg/leakybucket/buckets_test.go" - "!**/pkg/leakybucket/manager_load.go" - "!**/pkg/metabase/metabase.go" diff --git a/pkg/csplugin/broker_test.go b/pkg/csplugin/broker_test.go index 9adb35ad7cc..34c9ce7d684 100644 --- a/pkg/csplugin/broker_test.go +++ b/pkg/csplugin/broker_test.go @@ -14,7 +14,7 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "gopkg.in/tomb.v2" - "gopkg.in/yaml.v2" + "gopkg.in/yaml.v3" "github.com/crowdsecurity/go-cs-lib/cstest" @@ -48,7 +48,7 @@ func (s *PluginSuite) writeconfig(config PluginConfig) { data, err := yaml.Marshal(&config) require.NoError(t, err, "unable to marshal config file") - err = os.WriteFile(s.pluginConfig, data, 0644) + err = os.WriteFile(s.pluginConfig, data, 0o644) require.NoError(t, err, "unable to write config file %s", s.pluginConfig) } @@ -135,6 +135,7 @@ func (s *PluginSuite) TestBrokerInit() { if tc.action != nil { tc.action(t) } + _, err := s.InitBroker(&tc.procCfg) cstest.RequireErrorContains(t, err, tc.expectedErr) }) diff --git a/pkg/dumps/bucket_dump.go b/pkg/dumps/bucket_dump.go index 5f5ce1c4028..328c581928b 100644 --- a/pkg/dumps/bucket_dump.go +++ b/pkg/dumps/bucket_dump.go @@ -4,8 +4,9 @@ import ( "io" "os" + "gopkg.in/yaml.v3" + "github.com/crowdsecurity/crowdsec/pkg/types" - "gopkg.in/yaml.v2" ) type BucketPourInfo map[string][]types.Event diff --git a/pkg/dumps/parser_dump.go b/pkg/dumps/parser_dump.go index 9b4cdb1c22b..227f96315d4 100644 --- a/pkg/dumps/parser_dump.go +++ b/pkg/dumps/parser_dump.go @@ -12,7 +12,7 @@ import ( "github.com/fatih/color" diff "github.com/r3labs/diff/v2" log "github.com/sirupsen/logrus" - "gopkg.in/yaml.v2" + "gopkg.in/yaml.v3" "github.com/crowdsecurity/go-cs-lib/maptools" diff --git a/pkg/hubtest/coverage.go b/pkg/hubtest/coverage.go index dc3d1d13ad2..4156def06d7 100644 --- a/pkg/hubtest/coverage.go +++ b/pkg/hubtest/coverage.go @@ -2,27 +2,30 @@ package hubtest import ( "bufio" + "errors" "fmt" "os" "path/filepath" "strings" + log "github.com/sirupsen/logrus" + "gopkg.in/yaml.v3" + + "github.com/crowdsecurity/go-cs-lib/maptools" + "github.com/crowdsecurity/crowdsec/pkg/appsec/appsec_rule" "github.com/crowdsecurity/crowdsec/pkg/cwhub" - "github.com/crowdsecurity/go-cs-lib/maptools" - log "github.com/sirupsen/logrus" - "gopkg.in/yaml.v2" ) type Coverage struct { Name string TestsCount int - PresentIn map[string]bool //poorman's set + PresentIn map[string]bool // poorman's set } func (h *HubTest) GetAppsecCoverage() ([]Coverage, error) { if len(h.HubIndex.GetItemMap(cwhub.APPSEC_RULES)) == 0 { - return nil, fmt.Errorf("no appsec rules in hub index") + return nil, errors.New("no appsec rules in hub index") } // populate from hub, iterate in alphabetical order @@ -40,16 +43,18 @@ func (h *HubTest) GetAppsecCoverage() ([]Coverage, error) { // parser the expressions a-la-oneagain appsecTestConfigs, err := filepath.Glob(".appsec-tests/*/config.yaml") if err != nil { - return nil, fmt.Errorf("while find appsec-tests config: %s", err) + return nil, fmt.Errorf("while find appsec-tests config: %w", err) } for _, appsecTestConfigPath := range appsecTestConfigs { configFileData := &HubTestItemConfig{} + yamlFile, err := os.ReadFile(appsecTestConfigPath) if err != nil { log.Printf("unable to open appsec test config file '%s': %s", appsecTestConfigPath, err) continue } + err = yaml.Unmarshal(yamlFile, configFileData) if err != nil { return nil, fmt.Errorf("unmarshal: %v", err) @@ -57,14 +62,17 @@ func (h *HubTest) GetAppsecCoverage() ([]Coverage, error) { for _, appsecRulesFile := range configFileData.AppsecRules { appsecRuleData := &appsec_rule.CustomRule{} + yamlFile, err := os.ReadFile(appsecRulesFile) if err != nil { log.Printf("unable to open appsec rule '%s': %s", appsecRulesFile, err) } + err = yaml.Unmarshal(yamlFile, appsecRuleData) if err != nil { return nil, fmt.Errorf("unmarshal: %v", err) } + appsecRuleName := appsecRuleData.Name for idx, cov := range coverage { @@ -81,7 +89,7 @@ func (h *HubTest) GetAppsecCoverage() ([]Coverage, error) { func (h *HubTest) GetParsersCoverage() ([]Coverage, error) { if len(h.HubIndex.GetItemMap(cwhub.PARSERS)) == 0 { - return nil, fmt.Errorf("no parsers in hub index") + return nil, errors.New("no parsers in hub index") } // populate from hub, iterate in alphabetical order @@ -99,13 +107,13 @@ func (h *HubTest) GetParsersCoverage() ([]Coverage, error) { // parser the expressions a-la-oneagain passerts, err := filepath.Glob(".tests/*/parser.assert") if err != nil { - return nil, fmt.Errorf("while find parser asserts : %s", err) + return nil, fmt.Errorf("while find parser asserts: %w", err) } for _, assert := range passerts { file, err := os.Open(assert) if err != nil { - return nil, fmt.Errorf("while reading %s : %s", assert, err) + return nil, fmt.Errorf("while reading %s: %w", assert, err) } scanner := bufio.NewScanner(file) @@ -167,7 +175,7 @@ func (h *HubTest) GetParsersCoverage() ([]Coverage, error) { func (h *HubTest) GetScenariosCoverage() ([]Coverage, error) { if len(h.HubIndex.GetItemMap(cwhub.SCENARIOS)) == 0 { - return nil, fmt.Errorf("no scenarios in hub index") + return nil, errors.New("no scenarios in hub index") } // populate from hub, iterate in alphabetical order @@ -185,13 +193,13 @@ func (h *HubTest) GetScenariosCoverage() ([]Coverage, error) { // parser the expressions a-la-oneagain passerts, err := filepath.Glob(".tests/*/scenario.assert") if err != nil { - return nil, fmt.Errorf("while find scenario asserts : %s", err) + return nil, fmt.Errorf("while find scenario asserts: %w", err) } for _, assert := range passerts { file, err := os.Open(assert) if err != nil { - return nil, fmt.Errorf("while reading %s : %s", assert, err) + return nil, fmt.Errorf("while reading %s: %w", assert, err) } scanner := bufio.NewScanner(file) diff --git a/pkg/hubtest/hubtest_item.go b/pkg/hubtest/hubtest_item.go index b8a042f071f..1a2b4863105 100644 --- a/pkg/hubtest/hubtest_item.go +++ b/pkg/hubtest/hubtest_item.go @@ -10,7 +10,7 @@ import ( "strings" log "github.com/sirupsen/logrus" - "gopkg.in/yaml.v2" + "gopkg.in/yaml.v3" "github.com/crowdsecurity/crowdsec/pkg/csconfig" "github.com/crowdsecurity/crowdsec/pkg/cwhub" @@ -28,7 +28,7 @@ type HubTestItemConfig struct { LogType string `yaml:"log_type,omitempty"` Labels map[string]string `yaml:"labels,omitempty"` IgnoreParsers bool `yaml:"ignore_parsers,omitempty"` // if we test a scenario, we don't want to assert on Parser - OverrideStatics []parser.ExtraField `yaml:"override_statics,omitempty"` //Allow to override statics. Executed before s00 + OverrideStatics []parser.ExtraField `yaml:"override_statics,omitempty"` // Allow to override statics. Executed before s00 } type HubTestItem struct { @@ -200,12 +200,12 @@ func (t *HubTestItem) InstallHub() error { b, err := yaml.Marshal(n) if err != nil { - return fmt.Errorf("unable to marshal overrides: %s", err) + return fmt.Errorf("unable to marshal overrides: %w", err) } tgtFilename := fmt.Sprintf("%s/parsers/s00-raw/00_overrides.yaml", t.RuntimePath) if err := os.WriteFile(tgtFilename, b, os.ModePerm); err != nil { - return fmt.Errorf("unable to write overrides to '%s': %s", tgtFilename, err) + return fmt.Errorf("unable to write overrides to '%s': %w", tgtFilename, err) } } @@ -267,10 +267,10 @@ func (t *HubTestItem) RunWithNucleiTemplate() error { } if err := os.Chdir(testPath); err != nil { - return fmt.Errorf("can't 'cd' to '%s': %s", testPath, err) + return fmt.Errorf("can't 'cd' to '%s': %w", testPath, err) } - //machine add + // machine add cmdArgs := []string{"-c", t.RuntimeConfigFilePath, "machines", "add", "testMachine", "--force", "--auto"} cscliRegisterCmd := exec.Command(t.CscliPath, cmdArgs...) @@ -282,7 +282,7 @@ func (t *HubTestItem) RunWithNucleiTemplate() error { } } - //hardcode bouncer key + // hardcode bouncer key cmdArgs = []string{"-c", t.RuntimeConfigFilePath, "bouncers", "add", "appsectests", "-k", TestBouncerApiKey} cscliBouncerCmd := exec.Command(t.CscliPath, cmdArgs...) @@ -294,13 +294,13 @@ func (t *HubTestItem) RunWithNucleiTemplate() error { } } - //start crowdsec service + // start crowdsec service cmdArgs = []string{"-c", t.RuntimeConfigFilePath} crowdsecDaemon := exec.Command(t.CrowdSecPath, cmdArgs...) crowdsecDaemon.Start() - //wait for the appsec port to be available + // wait for the appsec port to be available if _, err := IsAlive(t.AppSecHost); err != nil { crowdsecLog, err2 := os.ReadFile(crowdsecLogFile) if err2 != nil { @@ -310,27 +310,27 @@ func (t *HubTestItem) RunWithNucleiTemplate() error { log.Errorf("%s\n", string(crowdsecLog)) } - return fmt.Errorf("appsec is down: %s", err) + return fmt.Errorf("appsec is down: %w", err) } // check if the target is available nucleiTargetParsedURL, err := url.Parse(t.NucleiTargetHost) if err != nil { - return fmt.Errorf("unable to parse target '%s': %s", t.NucleiTargetHost, err) + return fmt.Errorf("unable to parse target '%s': %w", t.NucleiTargetHost, err) } nucleiTargetHost := nucleiTargetParsedURL.Host if _, err := IsAlive(nucleiTargetHost); err != nil { - return fmt.Errorf("target is down: %s", err) + return fmt.Errorf("target is down: %w", err) } nucleiConfig := NucleiConfig{ Path: "nuclei", OutputDir: t.RuntimePath, - CmdLineOptions: []string{"-ev", //allow variables from environment - "-nc", //no colors in output - "-dresp", //dump response - "-j", //json output + CmdLineOptions: []string{"-ev", // allow variables from environment + "-nc", // no colors in output + "-dresp", // dump response + "-j", // json output }, } @@ -341,6 +341,7 @@ func (t *HubTestItem) RunWithNucleiTemplate() error { t.Success = true } else { log.Errorf("Appsec test %s failed: %s", t.Name, err) + crowdsecLog, err := os.ReadFile(crowdsecLogFile) if err != nil { log.Errorf("unable to read crowdsec log file '%s': %s", crowdsecLogFile, err) @@ -355,6 +356,7 @@ func (t *HubTestItem) RunWithNucleiTemplate() error { t.Success = true } else { log.Errorf("Appsec test %s failed: %s", t.Name, err) + crowdsecLog, err := os.ReadFile(crowdsecLogFile) if err != nil { log.Errorf("unable to read crowdsec log file '%s': %s", crowdsecLogFile, err) @@ -376,7 +378,7 @@ func (t *HubTestItem) RunWithLogFile() error { return fmt.Errorf("test '%s' doesn't exist in '%s', exiting", t.Name, t.HubTestPath) } - currentDir, err := os.Getwd() //xx + currentDir, err := os.Getwd() // xx if err != nil { return fmt.Errorf("can't get current directory: %+v", err) } @@ -397,7 +399,7 @@ func (t *HubTestItem) RunWithLogFile() error { } if err = Copy(t.HubIndexFile, filepath.Join(t.RuntimeHubPath, ".index.json")); err != nil { - return fmt.Errorf("unable to copy .index.json file in '%s': %s", filepath.Join(t.RuntimeHubPath, ".index.json"), err) + return fmt.Errorf("unable to copy .index.json file in '%s': %w", filepath.Join(t.RuntimeHubPath, ".index.json"), err) } // create results folder @@ -424,12 +426,12 @@ func (t *HubTestItem) RunWithLogFile() error { // copy template patterns folder to runtime folder if err = CopyDir(crowdsecPatternsFolder, t.RuntimePatternsPath); err != nil { - return fmt.Errorf("unable to copy 'patterns' from '%s' to '%s': %s", crowdsecPatternsFolder, t.RuntimePatternsPath, err) + return fmt.Errorf("unable to copy 'patterns' from '%s' to '%s': %w", crowdsecPatternsFolder, t.RuntimePatternsPath, err) } // install the hub in the runtime folder if err = t.InstallHub(); err != nil { - return fmt.Errorf("unable to install hub in '%s': %s", t.RuntimeHubPath, err) + return fmt.Errorf("unable to install hub in '%s': %w", t.RuntimeHubPath, err) } logFile := t.Config.LogFile @@ -437,12 +439,12 @@ func (t *HubTestItem) RunWithLogFile() error { dsn := fmt.Sprintf("file://%s", logFile) if err = os.Chdir(testPath); err != nil { - return fmt.Errorf("can't 'cd' to '%s': %s", testPath, err) + return fmt.Errorf("can't 'cd' to '%s': %w", testPath, err) } logFileStat, err := os.Stat(logFile) if err != nil { - return fmt.Errorf("unable to stat log file '%s': %s", logFile, err) + return fmt.Errorf("unable to stat log file '%s': %w", logFile, err) } if logFileStat.Size() == 0 { @@ -481,7 +483,7 @@ func (t *HubTestItem) RunWithLogFile() error { } if err := os.Chdir(currentDir); err != nil { - return fmt.Errorf("can't 'cd' to '%s': %s", currentDir, err) + return fmt.Errorf("can't 'cd' to '%s': %w", currentDir, err) } // assert parsers @@ -498,20 +500,20 @@ func (t *HubTestItem) RunWithLogFile() error { assertFileStat, err := os.Stat(t.ParserAssert.File) if err != nil { - return fmt.Errorf("error while stats '%s': %s", t.ParserAssert.File, err) + return fmt.Errorf("error while stats '%s': %w", t.ParserAssert.File, err) } if assertFileStat.Size() == 0 { assertData, err := t.ParserAssert.AutoGenFromFile(t.ParserResultFile) if err != nil { - return fmt.Errorf("couldn't generate assertion: %s", err) + return fmt.Errorf("couldn't generate assertion: %w", err) } t.ParserAssert.AutoGenAssertData = assertData t.ParserAssert.AutoGenAssert = true } else { if err := t.ParserAssert.AssertFile(t.ParserResultFile); err != nil { - return fmt.Errorf("unable to run assertion on file '%s': %s", t.ParserResultFile, err) + return fmt.Errorf("unable to run assertion on file '%s': %w", t.ParserResultFile, err) } } } @@ -540,20 +542,20 @@ func (t *HubTestItem) RunWithLogFile() error { assertFileStat, err := os.Stat(t.ScenarioAssert.File) if err != nil { - return fmt.Errorf("error while stats '%s': %s", t.ScenarioAssert.File, err) + return fmt.Errorf("error while stats '%s': %w", t.ScenarioAssert.File, err) } if assertFileStat.Size() == 0 { assertData, err := t.ScenarioAssert.AutoGenFromFile(t.ScenarioResultFile) if err != nil { - return fmt.Errorf("couldn't generate assertion: %s", err) + return fmt.Errorf("couldn't generate assertion: %w", err) } t.ScenarioAssert.AutoGenAssertData = assertData t.ScenarioAssert.AutoGenAssert = true } else { if err := t.ScenarioAssert.AssertFile(t.ScenarioResultFile); err != nil { - return fmt.Errorf("unable to run assertion on file '%s': %s", t.ScenarioResultFile, err) + return fmt.Errorf("unable to run assertion on file '%s': %w", t.ScenarioResultFile, err) } } } @@ -591,7 +593,7 @@ func (t *HubTestItem) Run() error { } if err = Copy(t.HubIndexFile, filepath.Join(t.RuntimeHubPath, ".index.json")); err != nil { - return fmt.Errorf("unable to copy .index.json file in '%s': %s", filepath.Join(t.RuntimeHubPath, ".index.json"), err) + return fmt.Errorf("unable to copy .index.json file in '%s': %w", filepath.Join(t.RuntimeHubPath, ".index.json"), err) } // create results folder @@ -618,7 +620,7 @@ func (t *HubTestItem) Run() error { // copy template patterns folder to runtime folder if err = CopyDir(crowdsecPatternsFolder, t.RuntimePatternsPath); err != nil { - return fmt.Errorf("unable to copy 'patterns' from '%s' to '%s': %s", crowdsecPatternsFolder, t.RuntimePatternsPath, err) + return fmt.Errorf("unable to copy 'patterns' from '%s' to '%s': %w", crowdsecPatternsFolder, t.RuntimePatternsPath, err) } // create the appsec-configs dir @@ -626,7 +628,7 @@ func (t *HubTestItem) Run() error { return fmt.Errorf("unable to create folder '%s': %+v", t.RuntimePath, err) } - //if it's an appsec rule test, we need acquis and appsec profile + // if it's an appsec rule test, we need acquis and appsec profile if len(t.Config.AppsecRules) > 0 { // copy template acquis file to runtime folder log.Debugf("copying %s to %s", t.TemplateAcquisPath, t.RuntimeAcquisFilePath) @@ -640,15 +642,15 @@ func (t *HubTestItem) Run() error { if err = Copy(t.TemplateAppsecProfilePath, filepath.Join(t.RuntimePath, "appsec-configs", "config.yaml")); err != nil { return fmt.Errorf("unable to copy '%s' to '%s': %v", t.TemplateAppsecProfilePath, filepath.Join(t.RuntimePath, "appsec-configs", "config.yaml"), err) } - } else { //otherwise we drop a blank acquis file + } else { // otherwise we drop a blank acquis file if err = os.WriteFile(t.RuntimeAcquisFilePath, []byte(""), os.ModePerm); err != nil { - return fmt.Errorf("unable to write blank acquis file '%s': %s", t.RuntimeAcquisFilePath, err) + return fmt.Errorf("unable to write blank acquis file '%s': %w", t.RuntimeAcquisFilePath, err) } } // install the hub in the runtime folder if err = t.InstallHub(); err != nil { - return fmt.Errorf("unable to install hub in '%s': %s", t.RuntimeHubPath, err) + return fmt.Errorf("unable to install hub in '%s': %w", t.RuntimeHubPath, err) } if t.Config.LogFile != "" { diff --git a/pkg/hubtest/parser_assert.go b/pkg/hubtest/parser_assert.go index 7eec8e535e5..d79d26fb9d0 100644 --- a/pkg/hubtest/parser_assert.go +++ b/pkg/hubtest/parser_assert.go @@ -2,17 +2,19 @@ package hubtest import ( "bufio" + "errors" "fmt" "os" "strings" "github.com/antonmedv/expr" log "github.com/sirupsen/logrus" - "gopkg.in/yaml.v2" + "gopkg.in/yaml.v3" + + "github.com/crowdsecurity/go-cs-lib/maptools" "github.com/crowdsecurity/crowdsec/pkg/dumps" "github.com/crowdsecurity/crowdsec/pkg/exprhelpers" - "github.com/crowdsecurity/go-cs-lib/maptools" ) type AssertFail struct { @@ -69,13 +71,12 @@ func (p *ParserAssert) LoadTest(filename string) error { func (p *ParserAssert) AssertFile(testFile string) error { file, err := os.Open(p.File) - if err != nil { - return fmt.Errorf("failed to open") + return errors.New("failed to open") } if err := p.LoadTest(testFile); err != nil { - return fmt.Errorf("unable to load parser dump file '%s': %s", testFile, err) + return fmt.Errorf("unable to load parser dump file '%s': %w", testFile, err) } scanner := bufio.NewScanner(file) @@ -107,6 +108,7 @@ func (p *ParserAssert) AssertFile(testFile string) error { } match := variableRE.FindStringSubmatch(scanner.Text()) + var variable string if len(match) == 0 { @@ -127,7 +129,7 @@ func (p *ParserAssert) AssertFile(testFile string) error { continue } - //fmt.Printf(" %s '%s'\n", emoji.GreenSquare, scanner.Text()) + // fmt.Printf(" %s '%s'\n", emoji.GreenSquare, scanner.Text()) } file.Close() @@ -135,7 +137,7 @@ func (p *ParserAssert) AssertFile(testFile string) error { if p.NbAssert == 0 { assertData, err := p.AutoGenFromFile(testFile) if err != nil { - return fmt.Errorf("couldn't generate assertion: %s", err) + return fmt.Errorf("couldn't generate assertion: %w", err) } p.AutoGenAssertData = assertData @@ -150,8 +152,8 @@ func (p *ParserAssert) AssertFile(testFile string) error { } func (p *ParserAssert) RunExpression(expression string) (interface{}, error) { - //debug doesn't make much sense with the ability to evaluate "on the fly" - //var debugFilter *exprhelpers.ExprDebugger + // debug doesn't make much sense with the ability to evaluate "on the fly" + // var debugFilter *exprhelpers.ExprDebugger var output interface{} env := map[string]interface{}{"results": *p.TestData} @@ -162,7 +164,7 @@ func (p *ParserAssert) RunExpression(expression string) (interface{}, error) { return output, err } - //dump opcode in trace level + // dump opcode in trace level log.Tracef("%s", runtimeFilter.Disassemble()) output, err = expr.Run(runtimeFilter, env) @@ -183,7 +185,6 @@ func (p *ParserAssert) EvalExpression(expression string) (string, error) { } ret, err := yaml.Marshal(output) - if err != nil { return "", err } @@ -213,16 +214,16 @@ func Escape(val string) string { } func (p *ParserAssert) AutoGenParserAssert() string { - //attempt to autogen parser asserts + // attempt to autogen parser asserts ret := fmt.Sprintf("len(results) == %d\n", len(*p.TestData)) - //sort map keys for consistent order + // sort map keys for consistent order stages := maptools.SortedKeys(*p.TestData) for _, stage := range stages { parsers := (*p.TestData)[stage] - //sort map keys for consistent order + // sort map keys for consistent order pnames := maptools.SortedKeys(parsers) for _, parser := range pnames { diff --git a/pkg/hubtest/scenario_assert.go b/pkg/hubtest/scenario_assert.go index 5195b814ef3..bb004daad49 100644 --- a/pkg/hubtest/scenario_assert.go +++ b/pkg/hubtest/scenario_assert.go @@ -2,6 +2,7 @@ package hubtest import ( "bufio" + "errors" "fmt" "io" "os" @@ -10,7 +11,7 @@ import ( "github.com/antonmedv/expr" log "github.com/sirupsen/logrus" - "gopkg.in/yaml.v2" + "gopkg.in/yaml.v3" "github.com/crowdsecurity/crowdsec/pkg/dumps" "github.com/crowdsecurity/crowdsec/pkg/exprhelpers" @@ -77,13 +78,12 @@ func (s *ScenarioAssert) LoadTest(filename string, bucketpour string) error { func (s *ScenarioAssert) AssertFile(testFile string) error { file, err := os.Open(s.File) - if err != nil { - return fmt.Errorf("failed to open") + return errors.New("failed to open") } if err := s.LoadTest(testFile, ""); err != nil { - return fmt.Errorf("unable to load parser dump file '%s': %s", testFile, err) + return fmt.Errorf("unable to load parser dump file '%s': %w", testFile, err) } scanner := bufio.NewScanner(file) @@ -134,7 +134,7 @@ func (s *ScenarioAssert) AssertFile(testFile string) error { continue } - //fmt.Printf(" %s '%s'\n", emoji.GreenSquare, scanner.Text()) + // fmt.Printf(" %s '%s'\n", emoji.GreenSquare, scanner.Text()) } file.Close() @@ -142,7 +142,7 @@ func (s *ScenarioAssert) AssertFile(testFile string) error { if s.NbAssert == 0 { assertData, err := s.AutoGenFromFile(testFile) if err != nil { - return fmt.Errorf("couldn't generate assertion: %s", err) + return fmt.Errorf("couldn't generate assertion: %w", err) } s.AutoGenAssertData = assertData @@ -157,8 +157,8 @@ func (s *ScenarioAssert) AssertFile(testFile string) error { } func (s *ScenarioAssert) RunExpression(expression string) (interface{}, error) { - //debug doesn't make much sense with the ability to evaluate "on the fly" - //var debugFilter *exprhelpers.ExprDebugger + // debug doesn't make much sense with the ability to evaluate "on the fly" + // var debugFilter *exprhelpers.ExprDebugger var output interface{} env := map[string]interface{}{"results": *s.TestData} @@ -171,7 +171,7 @@ func (s *ScenarioAssert) RunExpression(expression string) (interface{}, error) { // log.Warningf("Failed building debugher for %s : %s", assert, err) // } - //dump opcode in trace level + // dump opcode in trace level log.Tracef("%s", runtimeFilter.Disassemble()) output, err = expr.Run(runtimeFilter, map[string]interface{}{"results": *s.TestData}) From c70a2fe9bf8122a154a89d7a54875b55fb71abf1 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Thu, 2 May 2024 11:01:31 +0200 Subject: [PATCH 110/581] update go dependencies (#2968) --- go.mod | 48 ++++++++++++----------- go.sum | 120 ++++++++++++++++++++++++++++----------------------------- 2 files changed, 85 insertions(+), 83 deletions(-) diff --git a/go.mod b/go.mod index 8afc2b8a095..d78908db0c6 100644 --- a/go.mod +++ b/go.mod @@ -11,18 +11,18 @@ require ( github.com/AlecAivazis/survey/v2 v2.3.7 github.com/Masterminds/semver/v3 v3.2.1 github.com/Masterminds/sprig/v3 v3.2.3 - github.com/agext/levenshtein v1.2.1 + github.com/agext/levenshtein v1.2.3 github.com/alexliesenfeld/health v0.8.0 github.com/antonmedv/expr v1.15.3 - github.com/appleboy/gin-jwt/v2 v2.8.0 + github.com/appleboy/gin-jwt/v2 v2.9.2 github.com/aquasecurity/table v1.8.0 - github.com/aws/aws-lambda-go v1.41.0 - github.com/aws/aws-sdk-go v1.48.15 - github.com/beevik/etree v1.1.0 - github.com/blackfireio/osinfo v1.0.3 + github.com/aws/aws-lambda-go v1.47.0 + github.com/aws/aws-sdk-go v1.52.0 + github.com/beevik/etree v1.3.0 + github.com/blackfireio/osinfo v1.0.5 github.com/bluele/gcache v0.0.2 github.com/buger/jsonparser v1.1.1 - github.com/c-robinson/iplib v1.0.3 + github.com/c-robinson/iplib v1.0.8 github.com/cespare/xxhash/v2 v2.2.0 github.com/corazawaf/libinjection-go v0.1.2 github.com/crowdsecurity/coraza/v3 v3.0.0-20240108124027-a62b8d8e5607 @@ -31,13 +31,13 @@ require ( github.com/crowdsecurity/grokky v0.2.1 github.com/crowdsecurity/machineid v1.0.2 github.com/davecgh/go-spew v1.1.1 - github.com/dghubble/sling v1.3.0 + github.com/dghubble/sling v1.4.2 github.com/docker/docker v24.0.9+incompatible github.com/docker/go-connections v0.4.0 - github.com/fatih/color v1.15.0 - github.com/fsnotify/fsnotify v1.6.0 + github.com/fatih/color v1.16.0 + github.com/fsnotify/fsnotify v1.7.0 github.com/gin-gonic/gin v1.9.1 - github.com/go-co-op/gocron v1.17.0 + github.com/go-co-op/gocron v1.37.0 github.com/go-openapi/errors v0.20.1 github.com/go-openapi/strfmt v0.19.11 github.com/go-openapi/swag v0.22.3 @@ -46,8 +46,8 @@ require ( github.com/goccy/go-yaml v1.11.0 github.com/gofrs/uuid v4.0.0+incompatible github.com/golang-jwt/jwt/v4 v4.5.0 - github.com/google/go-querystring v1.0.0 - github.com/google/uuid v1.3.0 + github.com/google/go-querystring v1.1.0 + github.com/google/uuid v1.6.0 github.com/google/winops v0.0.0-20230712152054-af9b550d0601 github.com/goombaio/namegenerator v0.0.0-20181006234301-989e774b106e github.com/gorilla/websocket v1.5.0 @@ -60,7 +60,7 @@ require ( github.com/jarcoal/httpmock v1.1.0 github.com/jszwec/csvutil v1.5.1 github.com/lithammer/dedent v1.1.0 - github.com/mattn/go-isatty v0.0.19 + github.com/mattn/go-isatty v0.0.20 github.com/mattn/go-sqlite3 v1.14.16 github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826 github.com/nxadm/tail v1.4.8 @@ -103,14 +103,15 @@ require ( github.com/apparentlymart/go-textseg/v13 v13.0.0 // indirect github.com/asaskevich/govalidator v0.0.0-20200907205600-7a23bdc65eef // indirect github.com/beorn7/perks v1.0.1 // indirect - github.com/bytedance/sonic v1.9.1 // indirect - github.com/chenzhuoyu/base64x v0.0.0-20221115062448-fe3a3abad311 // indirect + github.com/bytedance/sonic v1.10.2 // indirect + github.com/chenzhuoyu/base64x v0.0.0-20230717121745-296ad89f973d // indirect + github.com/chenzhuoyu/iasm v0.9.1 // indirect github.com/coreos/go-systemd/v22 v22.5.0 // indirect github.com/cpuguy83/go-md2man/v2 v2.0.3 // indirect github.com/creack/pty v1.1.18 // indirect github.com/docker/distribution v2.8.2+incompatible // indirect github.com/docker/go-units v0.5.0 // indirect - github.com/gabriel-vasile/mimetype v1.4.2 // indirect + github.com/gabriel-vasile/mimetype v1.4.3 // indirect github.com/gin-contrib/sse v0.1.0 // indirect github.com/go-logr/logr v1.2.4 // indirect github.com/go-ole/go-ole v1.2.6 // indirect @@ -123,7 +124,7 @@ require ( github.com/go-openapi/spec v0.20.0 // indirect github.com/go-playground/locales v0.14.1 // indirect github.com/go-playground/universal-translator v0.18.1 // indirect - github.com/go-playground/validator/v10 v10.14.0 // indirect + github.com/go-playground/validator/v10 v10.17.0 // indirect github.com/go-stack/stack v1.8.0 // indirect github.com/goccy/go-json v0.10.2 // indirect github.com/gogo/protobuf v1.3.2 // indirect @@ -148,8 +149,8 @@ require ( github.com/json-iterator/go v1.1.12 // indirect github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 // indirect github.com/klauspost/compress v1.17.3 // indirect - github.com/klauspost/cpuid/v2 v2.2.4 // indirect - github.com/leodido/go-urn v1.2.4 // indirect + github.com/klauspost/cpuid/v2 v2.2.6 // indirect + github.com/leodido/go-urn v1.3.0 // indirect github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 // indirect github.com/magefile/mage v1.15.0 // indirect github.com/mailru/easyjson v0.7.7 // indirect @@ -169,7 +170,7 @@ require ( github.com/oklog/run v1.0.0 // indirect github.com/opencontainers/go-digest v1.0.0 // indirect github.com/opencontainers/image-spec v1.0.3-0.20211202183452-c5a74bcca799 // indirect - github.com/pelletier/go-toml/v2 v2.0.8 // indirect + github.com/pelletier/go-toml/v2 v2.1.1 // indirect github.com/petar-dambovaliev/aho-corasick v0.0.0-20230725210150-fb29fc3c913e // indirect github.com/pierrec/lz4/v4 v4.1.18 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect @@ -192,12 +193,13 @@ require ( github.com/tklauser/numcpus v0.6.0 // indirect github.com/toorop/go-dkim v0.0.0-20201103131630-e1cd1a0a5208 // indirect github.com/twitchyliquid64/golang-asm v0.15.1 // indirect - github.com/ugorji/go/codec v1.2.11 // indirect + github.com/ugorji/go/codec v1.2.12 // indirect github.com/vmihailenco/msgpack v4.0.4+incompatible // indirect github.com/yusufpapurcu/wmi v1.2.3 // indirect github.com/zclconf/go-cty v1.8.0 // indirect go.mongodb.org/mongo-driver v1.9.4 // indirect - golang.org/x/arch v0.3.0 // indirect + go.uber.org/atomic v1.10.0 // indirect + golang.org/x/arch v0.7.0 // indirect golang.org/x/net v0.24.0 // indirect golang.org/x/sync v0.6.0 // indirect golang.org/x/term v0.19.0 // indirect diff --git a/go.sum b/go.sum index 750439e4f0e..29af3c795fc 100644 --- a/go.sum +++ b/go.sum @@ -26,8 +26,8 @@ github.com/Netflix/go-expect v0.0.0-20220104043353-73e0943537d2/go.mod h1:HBCaDe github.com/PuerkitoBio/purell v1.1.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= -github.com/agext/levenshtein v1.2.1 h1:QmvMAjj2aEICytGiWzmxoE0x2KZvE0fvmqMOfy2tjT8= -github.com/agext/levenshtein v1.2.1/go.mod h1:JEDfjyjHDjOF/1e4FlBE/PkbqA9OfWu2ki2W0IB5558= +github.com/agext/levenshtein v1.2.3 h1:YB2fHEn0UJagG8T1rrWknE3ZQzWM06O8AMAatNn7lmo= +github.com/agext/levenshtein v1.2.3/go.mod h1:JEDfjyjHDjOF/1e4FlBE/PkbqA9OfWu2ki2W0IB5558= github.com/agnivade/levenshtein v1.0.1/go.mod h1:CURSv5d9Uaml+FovSIICkLbAUZ9S4RqaHDIsdSBg7lM= github.com/ahmetalpbalkan/dlog v0.0.0-20170105205344-4fb5f8204f26 h1:pzStYMLAXM7CNQjS/Wn+zK9MUxDhSUNfVvnHsyQyjs0= github.com/ahmetalpbalkan/dlog v0.0.0-20170105205344-4fb5f8204f26/go.mod h1:ilK+u7u1HoqaDk0mjhh27QJB7PyWMreGffEvOCoEKiY= @@ -43,8 +43,8 @@ github.com/antonmedv/expr v1.15.3 h1:q3hOJZNvLvhqE8OHBs1cFRdbXFNKuA+bHmRaI+AmRmI github.com/antonmedv/expr v1.15.3/go.mod h1:0E/6TxnOlRNp81GMzX9QfDPAmHo2Phg00y4JUv1ihsE= github.com/apparentlymart/go-textseg/v13 v13.0.0 h1:Y+KvPE1NYz0xl601PVImeQfFyEy6iT90AvPUL1NNfNw= github.com/apparentlymart/go-textseg/v13 v13.0.0/go.mod h1:ZK2fH7c4NqDTLtiYLvIkEghdlcqw7yxLeM89kiTRPUo= -github.com/appleboy/gin-jwt/v2 v2.8.0 h1:Glo7cb9eBR+hj8Y7WzgfkOlqCaNLjP+RV4dNO3fpdps= -github.com/appleboy/gin-jwt/v2 v2.8.0/go.mod h1:KsK7E8HTvRg3vOiumTsr/ntNTHbZ3IbHLe4Eto31p7k= +github.com/appleboy/gin-jwt/v2 v2.9.2 h1:GeS3lm9mb9HMmj7+GNjYUtpp3V1DAQ1TkUFa5poiZ7Y= +github.com/appleboy/gin-jwt/v2 v2.9.2/go.mod h1:mxGjKt9Lrx9Xusy1SrnmsCJMZG6UJwmdHN9bN27/QDw= github.com/appleboy/gofight/v2 v2.1.2 h1:VOy3jow4vIK8BRQJoC/I9muxyYlJ2yb9ht2hZoS3rf4= github.com/appleboy/gofight/v2 v2.1.2/go.mod h1:frW+U1QZEdDgixycTj4CygQ48yLTUhplt43+Wczp3rw= github.com/aquasecurity/table v1.8.0 h1:9ntpSwrUfjrM6/YviArlx/ZBGd6ix8W+MtojQcM7tv0= @@ -55,33 +55,38 @@ github.com/asaskevich/govalidator v0.0.0-20200108200545-475eaeb16496/go.mod h1:o github.com/asaskevich/govalidator v0.0.0-20200428143746-21a406dcc535/go.mod h1:oGkLhpf+kjZl6xBf758TQhh5XrAeiJv/7FRz/2spLIg= github.com/asaskevich/govalidator v0.0.0-20200907205600-7a23bdc65eef h1:46PFijGLmAjMPwCCCo7Jf0W6f9slllCkkv7vyc1yOSg= github.com/asaskevich/govalidator v0.0.0-20200907205600-7a23bdc65eef/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw= -github.com/aws/aws-lambda-go v1.41.0 h1:l/5fyVb6Ud9uYd411xdHZzSf2n86TakxzpvIoz7l+3Y= -github.com/aws/aws-lambda-go v1.41.0/go.mod h1:jwFe2KmMsHmffA1X2R09hH6lFzJQxzI8qK17ewzbQMM= +github.com/aws/aws-lambda-go v1.47.0 h1:0H8s0vumYx/YKs4sE7YM0ktwL2eWse+kfopsRI1sXVI= +github.com/aws/aws-lambda-go v1.47.0/go.mod h1:dpMpZgvWx5vuQJfBt0zqBha60q7Dd7RfgJv23DymV8A= github.com/aws/aws-sdk-go v1.34.28/go.mod h1:H7NKnBqNVzoTJpGfLrQkkD+ytBA93eiDYi/+8rV9s48= -github.com/aws/aws-sdk-go v1.48.15 h1:Gad2C4pLzuZDd5CA0Rvkfko6qUDDTOYru145gkO7w/Y= -github.com/aws/aws-sdk-go v1.48.15/go.mod h1:LF8svs817+Nz+DmiMQKTO3ubZ/6IaTpq3TjupRn3Eqk= -github.com/beevik/etree v1.1.0 h1:T0xke/WvNtMoCqgzPhkX2r4rjY3GDZFi+FjpRZY2Jbs= -github.com/beevik/etree v1.1.0/go.mod h1:r8Aw8JqVegEf0w2fDnATrX9VpkMcyFeM0FhwO62wh+A= +github.com/aws/aws-sdk-go v1.52.0 h1:ptgek/4B2v/ljsjYSEvLQ8LTD+SQyrqhOOWvHc/VGPI= +github.com/aws/aws-sdk-go v1.52.0/go.mod h1:LF8svs817+Nz+DmiMQKTO3ubZ/6IaTpq3TjupRn3Eqk= +github.com/beevik/etree v1.3.0 h1:hQTc+pylzIKDb23yYprodCWWTt+ojFfUZyzU09a/hmU= +github.com/beevik/etree v1.3.0/go.mod h1:aiPf89g/1k3AShMVAzriilpcE4R/Vuor90y83zVZWFc= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= -github.com/blackfireio/osinfo v1.0.3 h1:Yk2t2GTPjBcESv6nDSWZKO87bGMQgO+Hi9OoXPpxX8c= -github.com/blackfireio/osinfo v1.0.3/go.mod h1:Pd987poVNmd5Wsx6PRPw4+w7kLlf9iJxoRKPtPAjOrA= +github.com/blackfireio/osinfo v1.0.5 h1:6hlaWzfcpb87gRmznVf7wSdhysGqLRz9V/xuSdCEXrA= +github.com/blackfireio/osinfo v1.0.5/go.mod h1:Pd987poVNmd5Wsx6PRPw4+w7kLlf9iJxoRKPtPAjOrA= github.com/bluele/gcache v0.0.2 h1:WcbfdXICg7G/DGBh1PFfcirkWOQV+v077yF1pSy3DGw= github.com/bluele/gcache v0.0.2/go.mod h1:m15KV+ECjptwSPxKhOhQoAFQVtUFjTVkc3H8o0t/fp0= github.com/buger/jsonparser v1.1.1 h1:2PnMjfWD7wBILjqQbt530v576A/cAbQvEW9gGIpYMUs= github.com/buger/jsonparser v1.1.1/go.mod h1:6RYKKt7H4d4+iWqouImQ9R2FZql3VbhNgx27UK13J/0= github.com/bytedance/sonic v1.5.0/go.mod h1:ED5hyg4y6t3/9Ku1R6dU/4KyJ48DZ4jPhfY1O2AihPM= -github.com/bytedance/sonic v1.9.1 h1:6iJ6NqdoxCDr6mbY8h18oSO+cShGSMRGCEo7F2h0x8s= -github.com/bytedance/sonic v1.9.1/go.mod h1:i736AoUSYt75HyZLoJW9ERYxcy6eaN6h4BZXU064P/U= -github.com/c-robinson/iplib v1.0.3 h1:NG0UF0GoEsrC1/vyfX1Lx2Ss7CySWl3KqqXh3q4DdPU= -github.com/c-robinson/iplib v1.0.3/go.mod h1:i3LuuFL1hRT5gFpBRnEydzw8R6yhGkF4szNDIbF8pgo= +github.com/bytedance/sonic v1.10.0-rc/go.mod h1:ElCzW+ufi8qKqNW0FY314xriJhyJhuoJ3gFZdAHF7NM= +github.com/bytedance/sonic v1.10.2 h1:GQebETVBxYB7JGWJtLBi07OVzWwt+8dWA00gEVW2ZFE= +github.com/bytedance/sonic v1.10.2/go.mod h1:iZcSUejdk5aukTND/Eu/ivjQuEL0Cu9/rf50Hi0u/g4= +github.com/c-robinson/iplib v1.0.8 h1:exDRViDyL9UBLcfmlxxkY5odWX5092nPsQIykHXhIn4= +github.com/c-robinson/iplib v1.0.8/go.mod h1:i3LuuFL1hRT5gFpBRnEydzw8R6yhGkF4szNDIbF8pgo= github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/chenzhuoyu/base64x v0.0.0-20211019084208-fb5309c8db06/go.mod h1:DH46F32mSOjUmXrMHnKwZdA8wcEefY7UVqBKYGjpdQY= -github.com/chenzhuoyu/base64x v0.0.0-20221115062448-fe3a3abad311 h1:qSGYFH7+jGhDF8vLC+iwCD4WpbV1EBDSzWkJODFLams= github.com/chenzhuoyu/base64x v0.0.0-20221115062448-fe3a3abad311/go.mod h1:b583jCggY9gE99b6G5LEC39OIiVsWj+R97kbl5odCEk= +github.com/chenzhuoyu/base64x v0.0.0-20230717121745-296ad89f973d h1:77cEq6EriyTZ0g/qfRdp61a3Uu/AWrgIq2s0ClJV1g0= +github.com/chenzhuoyu/base64x v0.0.0-20230717121745-296ad89f973d/go.mod h1:8EPpVsBuRksnlj1mLy4AWzRNQYxauNi62uWcE3to6eA= +github.com/chenzhuoyu/iasm v0.9.0/go.mod h1:Xjy2NpN3h7aUqeqM+woSuuvxmIe6+DDsiNLIrkAmYog= +github.com/chenzhuoyu/iasm v0.9.1 h1:tUHQJXo3NhBqw6s33wkGn9SP3bvrWLdlVIJ3hQBL7P0= +github.com/chenzhuoyu/iasm v0.9.1/go.mod h1:Xjy2NpN3h7aUqeqM+woSuuvxmIe6+DDsiNLIrkAmYog= github.com/cockroachdb/apd v1.1.0 h1:3LFP3629v+1aKXU5Q37mxmRxX/pIu1nijXydLShEq5I= github.com/cockroachdb/apd v1.1.0/go.mod h1:8Sl8LxpKi29FqWXR16WEFZRNSz3SoPzUzeMeY4+DwBQ= github.com/corazawaf/libinjection-go v0.1.2 h1:oeiV9pc5rvJ+2oqOqXEAMJousPpGiup6f7Y3nZj5GoM= @@ -112,8 +117,8 @@ github.com/davecgh/go-spew v0.0.0-20161028175848-04cdfd42973b/go.mod h1:J7Y8YcW2 github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/dghubble/sling v1.3.0 h1:pZHjCJq4zJvc6qVQ5wN1jo5oNZlNE0+8T/h0XeXBUKU= -github.com/dghubble/sling v1.3.0/go.mod h1:XXShWaBWKzNLhu2OxikSNFrlsvowtz4kyRuXUG7oQKY= +github.com/dghubble/sling v1.4.2 h1:vs1HIGBbSl2SEALyU+irpYFLZMfc49Fp+jYryFebQjM= +github.com/dghubble/sling v1.4.2/go.mod h1:o0arCOz0HwfqYQJLrRtqunaWOn4X6jxE/6ORKRpVTD4= github.com/docker/distribution v2.8.2+incompatible h1:T3de5rq0dB1j30rp0sA2rER+m322EBzniBPB6ZIzuh8= github.com/docker/distribution v2.8.2+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= github.com/docker/docker v24.0.9+incompatible h1:HPGzNmwfLZWdxHqK9/II92pyi1EpYKsAqcl4G0Of9v0= @@ -125,24 +130,23 @@ github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDD github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk= -github.com/fatih/color v1.15.0 h1:kOqh6YHBtK8aywxGerMG2Eq3H6Qgoqeo13Bk2Mv/nBs= -github.com/fatih/color v1.15.0/go.mod h1:0h5ZqXfHYED7Bhv2ZJamyIOUej9KtShiJESRwBDUSsw= +github.com/fatih/color v1.16.0 h1:zmkK9Ngbjj+K0yRhTVONQh1p/HknKYSlNT+vZCzyokM= +github.com/fatih/color v1.16.0/go.mod h1:fL2Sau1YI5c0pdGEVCbKQbLXB6edEj1ZgiY4NijnWvE= github.com/foxcpp/go-mockdns v1.0.0 h1:7jBqxd3WDWwi/6WhDvacvH1XsN3rOLXyHM1uhvIx6FI= github.com/foxcpp/go-mockdns v1.0.0/go.mod h1:lgRN6+KxQBawyIghpnl5CezHFGS9VLzvtVlwxvzXTQ4= github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= -github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4HY= -github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbSClcnxKAGw= -github.com/gabriel-vasile/mimetype v1.4.2 h1:w5qFW6JKBz9Y393Y4q372O9A7cUSequkh1Q7OhCmWKU= -github.com/gabriel-vasile/mimetype v1.4.2/go.mod h1:zApsH/mKG4w07erKIaJPFiX0Tsq9BFQgN3qGY5GnNgA= +github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= +github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= +github.com/gabriel-vasile/mimetype v1.4.3 h1:in2uUcidCuFcDKtdcBxlR0rJ1+fsokWf+uqxgUFjbI0= +github.com/gabriel-vasile/mimetype v1.4.3/go.mod h1:d8uq/6HKRL6CGdk+aubisF/M5GcPfT7nKyLpA0lbSSk= github.com/gin-contrib/sse v0.1.0 h1:Y/yl/+YNO8GZSjAhjMsSuLt29uWRFHdHYUb5lYOV9qE= github.com/gin-contrib/sse v0.1.0/go.mod h1:RHrZQHXnP2xjPF+u1gW/2HnVO7nvIa9PG3Gm+fLHvGI= -github.com/gin-gonic/gin v1.7.7/go.mod h1:axIBovoeJpVj8S3BwE0uPMTeReE4+AfFtqpqaZ1qq1U= github.com/gin-gonic/gin v1.9.1 h1:4idEAncQnU5cB7BeOkPtxjfCSye0AAm1R0RVIqJ+Jmg= github.com/gin-gonic/gin v1.9.1/go.mod h1:hPrL7YrpYKXt5YId3A/Tnip5kqbEAP+KLuI3SUcPTeU= github.com/globalsign/mgo v0.0.0-20180905125535-1ca0a4f7cbcb/go.mod h1:xkRDCp4j0OGD1HRkm4kmhM+pmpv3AKq5SU7GMg4oO/Q= github.com/globalsign/mgo v0.0.0-20181015135952-eeefdecb41b8/go.mod h1:xkRDCp4j0OGD1HRkm4kmhM+pmpv3AKq5SU7GMg4oO/Q= -github.com/go-co-op/gocron v1.17.0 h1:IixLXsti+Qo0wMvmn6Kmjp2csk2ykpkcL+EmHmST18w= -github.com/go-co-op/gocron v1.17.0/go.mod h1:IpDBSaJOVfFw7hXZuTag3SCSkqazXBBUkbQ1m1aesBs= +github.com/go-co-op/gocron v1.37.0 h1:ZYDJGtQ4OMhTLKOKMIch+/CY70Brbb1dGdooLEhh7b0= +github.com/go-co-op/gocron v1.37.0/go.mod h1:3L/n6BkO7ABj+TrfSVXLRzsP26zmikL4ISkLQ0O8iNY= github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= @@ -241,18 +245,14 @@ github.com/go-openapi/validate v0.19.12/go.mod h1:Rzou8hA/CBw8donlS6WNEUQupNvUZ0 github.com/go-openapi/validate v0.19.15/go.mod h1:tbn/fdOwYHgrhPBzidZfJC2MIVvs9GA7monOmWBbeCI= github.com/go-openapi/validate v0.20.0 h1:pzutNCCBZGZlE+u8HD3JZyWdc/TVbtVwlWUp8/vgUKk= github.com/go-openapi/validate v0.20.0/go.mod h1:b60iJT+xNNLfaQJUqLI7946tYiFEOuE9E4k54HpKcJ0= -github.com/go-playground/assert/v2 v2.0.1/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4= github.com/go-playground/assert/v2 v2.2.0 h1:JvknZsQTYeFEAhQwI4qEt9cyV5ONwRHC+lYKSsYSR8s= github.com/go-playground/assert/v2 v2.2.0/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4= -github.com/go-playground/locales v0.13.0/go.mod h1:taPMhCMXrRLJO55olJkUXHZBHCxTMfnGwq/HNwmWNS8= github.com/go-playground/locales v0.14.1 h1:EWaQ/wswjilfKLTECiXz7Rh+3BjFhfDFKv/oXslEjJA= github.com/go-playground/locales v0.14.1/go.mod h1:hxrqLVvrK65+Rwrd5Fc6F2O76J/NuW9t0sjnWqG1slY= -github.com/go-playground/universal-translator v0.17.0/go.mod h1:UkSxE5sNxxRwHyU+Scu5vgOQjsIJAF8j9muTVoKLVtA= github.com/go-playground/universal-translator v0.18.1 h1:Bcnm0ZwsGyWbCzImXv+pAJnYK9S473LQFuzCbDbfSFY= github.com/go-playground/universal-translator v0.18.1/go.mod h1:xekY+UJKNuX9WP91TpwSH2VMlDf28Uj24BCp08ZFTUY= -github.com/go-playground/validator/v10 v10.4.1/go.mod h1:nlOn6nFhuKACm19sB/8EGNn9GlaMV7XkbRSipzJ0Ii4= -github.com/go-playground/validator/v10 v10.14.0 h1:vgvQWe3XCz3gIeFDm/HnTIbj6UGmg/+t63MyGU2n5js= -github.com/go-playground/validator/v10 v10.14.0/go.mod h1:9iXMNT7sEkjXb0I+enO7QXmzG6QCsPWY4zveKFVRSyU= +github.com/go-playground/validator/v10 v10.17.0 h1:SmVVlfAOtlZncTxRuinDPomC2DkXJ4E5T9gDA0AIH74= +github.com/go-playground/validator/v10 v10.17.0/go.mod h1:9iXMNT7sEkjXb0I+enO7QXmzG6QCsPWY4zveKFVRSyU= github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= github.com/go-sql-driver/mysql v1.6.0 h1:BCTh4TKNUYmOmMUcQ3IipzF5prigylS7XXjEkfCHuOE= github.com/go-sql-driver/mysql v1.6.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= @@ -294,7 +294,6 @@ github.com/gofrs/uuid v4.0.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRx github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= -github.com/golang-jwt/jwt/v4 v4.2.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg= github.com/golang-jwt/jwt/v4 v4.5.0 h1:7cYmW1XlMY7h7ii7UhUyChSgS5wUJEnm9uZVTGqOWzg= github.com/golang-jwt/jwt/v4 v4.5.0/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= github.com/golang/glog v1.1.0 h1:/d3pCKDPWNnvIWe0vVUpNP32qc8U3PDVxySP/y360qE= @@ -302,7 +301,6 @@ github.com/golang/glog v1.1.0/go.mod h1:pfYeQZ3JWZoXTV5sFc986z3HTpwQs9At6P4ImfuP github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= @@ -317,16 +315,17 @@ github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= -github.com/google/go-querystring v1.0.0 h1:Xkwi/a1rcvNg1PPYe5vI8GbeBY/jrVuDX5ASuANWTrk= -github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck= +github.com/google/go-querystring v1.1.0 h1:AnCroh3fv4ZBgVIf1Iwtovgjaw/GiKJo8M8yD/fhyJ8= +github.com/google/go-querystring v1.1.0/go.mod h1:Kcdr2DB4koayq7X8pmAG4sNG59So17icRSOU623lUBU= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= -github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.4.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= +github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/winops v0.0.0-20230712152054-af9b550d0601 h1:XvlrmqZIuwxuRE88S9mkxX+FkV+YakqbiAC5Z4OzDnM= github.com/google/winops v0.0.0-20230712152054-af9b550d0601/go.mod h1:rT1mcjzuvcDDbRmUTsoH6kV0DG91AkFe9UCjASraK5I= github.com/goombaio/namegenerator v0.0.0-20181006234301-989e774b106e h1:XmA6L9IPRdUr28a+SK/oMchGgQy159wvzXA5tJ7l+40= @@ -417,7 +416,6 @@ github.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqx github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= -github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= github.com/jszwec/csvutil v1.5.1 h1:c3GFBhj6DFMUl4dMK3+B6rz2+LWWS/e9VJiVJ9t9kfQ= @@ -436,13 +434,15 @@ github.com/klauspost/compress v1.15.9/go.mod h1:PhcZ0MbTNciWF3rruxRgKxI5NkcHHrHU github.com/klauspost/compress v1.17.3 h1:qkRjuerhUU1EmXLYGkSH6EZL+vPSxIrYjLNAK4slzwA= github.com/klauspost/compress v1.17.3/go.mod h1:/dCuZOvVtNoHsyb+cuJD3itjs3NbnF6KH9zAO4BDxPM= github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= -github.com/klauspost/cpuid/v2 v2.2.4 h1:acbojRNwl3o09bUq+yDCtZFc1aiwaAAxtcn8YkZXnvk= -github.com/klauspost/cpuid/v2 v2.2.4/go.mod h1:RVVoqg1df56z8g3pUjL/3lE5UfnlrJX8tyFgg4nqhuY= +github.com/klauspost/cpuid/v2 v2.2.6 h1:ndNyv040zDGIDh8thGkXYjnFtiN02M1PVVF+JE/48xc= +github.com/klauspost/cpuid/v2 v2.2.6/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws= +github.com/knz/go-libedit v1.10.1/go.mod h1:MZTVkCWyz0oBc7JOWP3wNAzd002ZbM/5hgShxwh4x8M= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= @@ -453,9 +453,8 @@ github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/kylelemons/godebug v0.0.0-20170820004349-d65d576e9348 h1:MtvEpTB6LX3vkb4ax0b5D2DHbNAUsen0Gx5wZoq3lV4= github.com/kylelemons/godebug v0.0.0-20170820004349-d65d576e9348/go.mod h1:B69LEHPfb2qLo0BaaOLcbitczOKLWTsrBG9LczfCD4k= -github.com/leodido/go-urn v1.2.0/go.mod h1:+8+nEpDfqqsY+g338gtMEUOtuK+4dEMhiQEgxpxOKII= -github.com/leodido/go-urn v1.2.4 h1:XlAE/cm/ms7TE/VMVoduSpNBoyc2dOxHs5MZSwAN63Q= -github.com/leodido/go-urn v1.2.4/go.mod h1:7ZrI8mTSeBSHl/UaRyKQW1qZeMgak41ANeCNaVckg+4= +github.com/leodido/go-urn v1.3.0 h1:jX8FDLfW4ThVXctBNZ+3cIWnCSnrACDV73r76dy0aQQ= +github.com/leodido/go-urn v1.3.0/go.mod h1:bvxc+MVxLKB4z00jd1z+Dvzr47oO32F/QSNjSBOlFxI= github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= github.com/lib/pq v1.1.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= @@ -490,8 +489,8 @@ github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hd github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= -github.com/mattn/go-isatty v0.0.19 h1:JITubQf0MOLdlGRuRq+jtsDlekdYPia9ZFsB8h/APPA= -github.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= +github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= +github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= github.com/mattn/go-runewidth v0.0.13 h1:lTGmDsbAYt5DmK6OnoV7EuIF1wEIFAcxld6ypU4OSgU= github.com/mattn/go-runewidth v0.0.13/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= github.com/mattn/go-sqlite3 v1.14.16 h1:yOQRA0RpS5PFz/oikGwBEqvAWhWg5ufRz4ETLjwpU1Y= @@ -525,7 +524,6 @@ github.com/moby/term v0.5.0/go.mod h1:8FzsFHVUBGZdbDsJw/ot+X+d5HLUbvklYLJ9uGfcI3 github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= -github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= @@ -553,13 +551,14 @@ github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58/go.mod h1:DXv8WO4yhM github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= github.com/pelletier/go-toml v1.4.0/go.mod h1:PN7xzY2wHTK0K9p34ErDQMlFxa51Fk0OUruD3k1mMwo= github.com/pelletier/go-toml v1.7.0/go.mod h1:vwGMzjaWMwyfHwgIBhI2YUM4fB6nL6lVAvS1LBMMhTE= -github.com/pelletier/go-toml/v2 v2.0.8 h1:0ctb6s9mE31h0/lhu+J6OPmVeDxJn+kYnJc2jZR9tGQ= -github.com/pelletier/go-toml/v2 v2.0.8/go.mod h1:vuYfssBdrU2XDZ9bYydBu6t+6a6PYNcZljzZR9VXg+4= +github.com/pelletier/go-toml/v2 v2.1.1 h1:LWAJwfNvjQZCFIDKWYQaM62NcYeYViCmWIwmOStowAI= +github.com/pelletier/go-toml/v2 v2.1.1/go.mod h1:tJU2Z3ZkXwnxa4DPO899bsyIoywizdUvyaeZurnPPDc= github.com/petar-dambovaliev/aho-corasick v0.0.0-20230725210150-fb29fc3c913e h1:POJco99aNgosh92lGqmx7L1ei+kCymivB/419SD15PQ= github.com/petar-dambovaliev/aho-corasick v0.0.0-20230725210150-fb29fc3c913e/go.mod h1:EHPiTAKtiFmrMldLUNswFwfZ2eJIYBHktdaUTZxYWRw= github.com/pierrec/lz4/v4 v4.1.15/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= github.com/pierrec/lz4/v4 v4.1.18 h1:xaKrnTkyoqfh1YItXl56+6KJNVYWlEEPuAQW9xsplYQ= github.com/pierrec/lz4/v4 v4.1.18/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= +github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= @@ -597,6 +596,8 @@ github.com/robfig/cron/v3 v3.0.1/go.mod h1:eQICP3HwyT7UooqI/z+Ov+PtYAWygg1TEWWzG github.com/rogpeppe/go-internal v1.1.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.2.2/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= +github.com/rogpeppe/go-internal v1.8.1/go.mod h1:JeRgkft04UBgHMgCIwADu4Pn6Mtm5d4nPKWu0nJ5d+o= github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog= github.com/rs/xid v1.2.1/go.mod h1:+uKXf+4Djp6Md1KODXJxgGQPKngRmWyn10oCKFzNHOQ= @@ -661,7 +662,6 @@ github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcU github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= github.com/tetratelabs/wazero v1.2.1 h1:J4X2hrGzJvt+wqltuvcSjHQ7ujQxA9gb6PeMs4qlUWs= github.com/tetratelabs/wazero v1.2.1/go.mod h1:wYx2gNRg8/WihJfSDxA1TIL8H+GkfLYm+bIfbblu9VQ= -github.com/tidwall/gjson v1.12.1/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk= github.com/tidwall/gjson v1.17.0 h1:/Jocvlh98kcTfpN2+JzGQWQcqrPQwDrVEMApx/M5ZwM= github.com/tidwall/gjson v1.17.0/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk= github.com/tidwall/match v1.1.1 h1:+Ho715JplO36QYgwN9PGYNhgZvoUSc9X2c80KVTi+GA= @@ -678,10 +678,8 @@ github.com/toorop/go-dkim v0.0.0-20201103131630-e1cd1a0a5208 h1:PM5hJF7HVfNWmCjM github.com/toorop/go-dkim v0.0.0-20201103131630-e1cd1a0a5208/go.mod h1:BzWtXXrXzZUvMacR0oF/fbDDgUPO8L36tDMmRAf14ns= github.com/twitchyliquid64/golang-asm v0.15.1 h1:SU5vSMR7hnwNxj24w34ZyCi/FmDZTkS4MhqMhdFk5YI= github.com/twitchyliquid64/golang-asm v0.15.1/go.mod h1:a1lVb/DtPvCB8fslRZhAngC2+aY1QWCk3Cedj/Gdt08= -github.com/ugorji/go v1.1.7/go.mod h1:kZn38zHttfInRq0xu/PH0az30d+z6vm202qpg1oXVMw= -github.com/ugorji/go/codec v1.1.7/go.mod h1:Ax+UKWsSmolVDwsd+7N3ZtXu+yMGCf907BLYF3GoBXY= -github.com/ugorji/go/codec v1.2.11 h1:BMaWp1Bb6fHwEtbplGBGJ498wD+LKlNSl25MjdZY4dU= -github.com/ugorji/go/codec v1.2.11/go.mod h1:UNopzCgEMSXjBc6AOMqYvWC1ktqTAfzJZUZgYf6w6lg= +github.com/ugorji/go/codec v1.2.12 h1:9LC83zGrHhuUA9l16C9AHXAqEV/2wBQ4nkvumAE65EE= +github.com/ugorji/go/codec v1.2.12/go.mod h1:UNopzCgEMSXjBc6AOMqYvWC1ktqTAfzJZUZgYf6w6lg= github.com/umahmood/haversine v0.0.0-20151105152445-808ab04add26 h1:UFHFmFfixpmfRBcxuu+LA9l8MdURWVdVNUHxO5n1d2w= github.com/umahmood/haversine v0.0.0-20151105152445-808ab04add26/go.mod h1:IGhd0qMDsUa9acVjsbsT7bu3ktadtGOHI79+idTew/M= github.com/vektah/gqlparser v1.1.2/go.mod h1:1ycwN7Ij5njmMkPPAOaRFY4rET2Enx7IkVv3vaXspKw= @@ -728,6 +726,9 @@ go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= +go.uber.org/atomic v1.9.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= +go.uber.org/atomic v1.10.0 h1:9qC72Qh0+3MqyJbAn8YU5xVq1frD8bn3JtD2oXtafVQ= +go.uber.org/atomic v1.10.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU= @@ -736,8 +737,8 @@ go.uber.org/zap v1.9.1/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= go.uber.org/zap v1.13.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM= golang.org/x/arch v0.0.0-20210923205945-b76863e36670/go.mod h1:5om86z9Hs0C8fWVUuoMHwpExlXzs5Tkyp9hOrfG7pp8= -golang.org/x/arch v0.3.0 h1:02VY4/ZcO/gBOH6PUaoiptASxtXU10jazRCP865E97k= -golang.org/x/arch v0.3.0/go.mod h1:5om86z9Hs0C8fWVUuoMHwpExlXzs5Tkyp9hOrfG7pp8= +golang.org/x/arch v0.7.0 h1:pskyeJh/3AmoQ8CPE95vxHLqp1G1GfGNXTmcl9NEKTc= +golang.org/x/arch v0.7.0/go.mod h1:FEVrYAQjsQXMVJ1nsMoVVXPZg6p2JE2mx8psSWTDQys= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190320223903-b7391e95e576/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= @@ -831,11 +832,9 @@ golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220704084225-05e143d24a9e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -952,6 +951,7 @@ k8s.io/klog/v2 v2.100.1 h1:7WCHKK6K8fNhTqfBhISHQ97KrnJNFZMcQvKp7gP/tmg= k8s.io/klog/v2 v2.100.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= k8s.io/utils v0.0.0-20230406110748-d93618cff8a2 h1:qY1Ad8PODbnymg2pRbkyMT/ylpTrCM8P2RJ0yroCyIk= k8s.io/utils v0.0.0-20230406110748-d93618cff8a2/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= +nullprogram.com/x/optparse v1.0.0/go.mod h1:KdyPE+Igbe0jQUrVfMqDMeJQIJZEuyV7pjYmp6pbG50= rsc.io/binaryregexp v0.2.0 h1:HfqmD5MEmC0zvwBuF187nq9mdnXjXsSivRiXN7SmRkE= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4= From 91fbc6353368349ca1fbdf0eb0b01835827409af Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Thu, 2 May 2024 12:56:41 +0200 Subject: [PATCH 111/581] db: review update timestamps, immutable columns (#2981) * orm: correct behavior of created_at, updated_at, define immutable fields * remove updatedefault for last_push, last_heartbeat * re-generate db schema * update last_push in CreateAlert() * lint --- pkg/database/alerts.go | 124 +++++---- pkg/database/ent/alert.go | 22 +- pkg/database/ent/alert/alert.go | 2 - pkg/database/ent/alert/where.go | 20 -- pkg/database/ent/alert_create.go | 10 +- pkg/database/ent/alert_update.go | 66 +---- pkg/database/ent/bouncer.go | 22 +- pkg/database/ent/bouncer/bouncer.go | 2 - pkg/database/ent/bouncer/where.go | 20 -- pkg/database/ent/bouncer_create.go | 10 +- pkg/database/ent/bouncer_update.go | 52 +--- pkg/database/ent/configitem.go | 22 +- pkg/database/ent/configitem/configitem.go | 2 - pkg/database/ent/configitem/where.go | 20 -- pkg/database/ent/configitem_create.go | 10 +- pkg/database/ent/configitem_update.go | 66 +---- pkg/database/ent/decision.go | 22 +- pkg/database/ent/decision/decision.go | 2 - pkg/database/ent/decision/where.go | 20 -- pkg/database/ent/decision_create.go | 10 +- pkg/database/ent/decision_update.go | 66 +---- pkg/database/ent/event.go | 22 +- pkg/database/ent/event/event.go | 2 - pkg/database/ent/event/where.go | 20 -- pkg/database/ent/event_create.go | 10 +- pkg/database/ent/event_update.go | 66 +---- pkg/database/ent/lock_update.go | 34 --- pkg/database/ent/machine.go | 22 +- pkg/database/ent/machine/machine.go | 6 - pkg/database/ent/machine/where.go | 20 -- pkg/database/ent/machine_create.go | 10 +- pkg/database/ent/machine_update.go | 148 +++-------- pkg/database/ent/meta.go | 22 +- pkg/database/ent/meta/meta.go | 2 - pkg/database/ent/meta/where.go | 20 -- pkg/database/ent/meta_create.go | 10 +- pkg/database/ent/meta_update.go | 52 +--- pkg/database/ent/migrate/schema.go | 28 +- pkg/database/ent/mutation.go | 299 ++-------------------- pkg/database/ent/runtime.go | 18 -- pkg/database/ent/schema/alert.go | 4 +- pkg/database/ent/schema/bouncer.go | 4 +- pkg/database/ent/schema/config.go | 7 +- pkg/database/ent/schema/decision.go | 4 +- pkg/database/ent/schema/event.go | 4 +- pkg/database/ent/schema/lock.go | 2 +- pkg/database/ent/schema/machine.go | 12 +- pkg/database/ent/schema/meta.go | 5 +- pkg/database/machines.go | 8 - 49 files changed, 300 insertions(+), 1151 deletions(-) diff --git a/pkg/database/alerts.go b/pkg/database/alerts.go index 5559cbb3972..602846eb31a 100644 --- a/pkg/database/alerts.go +++ b/pkg/database/alerts.go @@ -9,9 +9,8 @@ import ( "strings" "time" - "github.com/mattn/go-sqlite3" - "github.com/davecgh/go-spew/spew" + "github.com/mattn/go-sqlite3" "github.com/pkg/errors" log "github.com/sirupsen/logrus" @@ -134,7 +133,7 @@ func formatAlertAsString(machineID string, alert *models.Alert) []string { // if some associated decisions are missing (ie. previous insert ended up in error) it inserts them func (c *Client) CreateOrUpdateAlert(machineID string, alertItem *models.Alert) (string, error) { if alertItem.UUID == "" { - return "", fmt.Errorf("alert UUID is empty") + return "", errors.New("alert UUID is empty") } alerts, err := c.Ent.Alert.Query().Where(alert.UUID(alertItem.UUID)).WithDecisions().All(c.CTX) @@ -143,7 +142,7 @@ func (c *Client) CreateOrUpdateAlert(machineID string, alertItem *models.Alert) return "", fmt.Errorf("unable to query alerts for uuid %s: %w", alertItem.UUID, err) } - //alert wasn't found, insert it (expected hotpath) + // alert wasn't found, insert it (expected hotpath) if ent.IsNotFound(err) || len(alerts) == 0 { alertIDs, err := c.CreateAlert(machineID, []*models.Alert{alertItem}) if err != nil { @@ -153,14 +152,14 @@ func (c *Client) CreateOrUpdateAlert(machineID string, alertItem *models.Alert) return alertIDs[0], nil } - //this should never happen + // this should never happen if len(alerts) > 1 { return "", fmt.Errorf("multiple alerts found for uuid %s", alertItem.UUID) } log.Infof("Alert %s already exists, checking associated decisions", alertItem.UUID) - //alert is found, check for any missing decisions + // alert is found, check for any missing decisions newUuids := make([]string, len(alertItem.Decisions)) for i, decItem := range alertItem.Decisions { @@ -203,14 +202,16 @@ func (c *Client) CreateOrUpdateAlert(machineID string, alertItem *models.Alert) } } - //add missing decisions + // add missing decisions log.Debugf("Adding %d missing decisions to alert %s", len(missingDecisions), foundAlert.UUID) decisionBuilders := []*ent.DecisionCreate{} for _, decisionItem := range missingDecisions { - var start_ip, start_sfx, end_ip, end_sfx int64 - var sz int + var ( + start_ip, start_sfx, end_ip, end_sfx int64 + sz int + ) /*if the scope is IP or Range, convert the value to integers */ if strings.ToLower(*decisionItem.Scope) == "ip" || strings.ToLower(*decisionItem.Scope) == "range" { @@ -227,7 +228,7 @@ func (c *Client) CreateOrUpdateAlert(machineID string, alertItem *models.Alert) continue } - //use the created_at from the alert instead + // use the created_at from the alert instead alertTime, err := time.Parse(time.RFC3339, alertItem.CreatedAt) if err != nil { log.Errorf("unable to parse alert time %s : %s", alertItem.CreatedAt, err) @@ -268,7 +269,7 @@ func (c *Client) CreateOrUpdateAlert(machineID string, alertItem *models.Alert) decisions = append(decisions, decisionsCreateRet...) } - //now that we bulk created missing decisions, let's update the alert + // now that we bulk created missing decisions, let's update the alert decisionChunks := slicetools.Chunks(decisions, c.decisionBulkSize) @@ -288,11 +289,11 @@ func (c *Client) CreateOrUpdateAlert(machineID string, alertItem *models.Alert) // 2nd pull, you get decisions [1,2,3,4]. it inserts [1,2,3,4] and will try to delete [1,2,3,4] with a different alert ID and same origin func (c *Client) UpdateCommunityBlocklist(alertItem *models.Alert) (int, int, int, error) { if alertItem == nil { - return 0, 0, 0, fmt.Errorf("nil alert") + return 0, 0, 0, errors.New("nil alert") } if alertItem.StartAt == nil { - return 0, 0, 0, fmt.Errorf("nil start_at") + return 0, 0, 0, errors.New("nil start_at") } startAtTime, err := time.Parse(time.RFC3339, *alertItem.StartAt) @@ -301,7 +302,7 @@ func (c *Client) UpdateCommunityBlocklist(alertItem *models.Alert) (int, int, in } if alertItem.StopAt == nil { - return 0, 0, 0, fmt.Errorf("nil stop_at") + return 0, 0, 0, errors.New("nil stop_at") } stopAtTime, err := time.Parse(time.RFC3339, *alertItem.StopAt) @@ -367,8 +368,10 @@ func (c *Client) UpdateCommunityBlocklist(alertItem *models.Alert) (int, int, in valueList := make([]string, 0, len(alertItem.Decisions)) for _, decisionItem := range alertItem.Decisions { - var start_ip, start_sfx, end_ip, end_sfx int64 - var sz int + var ( + start_ip, start_sfx, end_ip, end_sfx int64 + sz int + ) if decisionItem.Duration == nil { log.Warning("nil duration in community decision") @@ -487,8 +490,10 @@ func (c *Client) createDecisionChunk(simulated bool, stopAtTime time.Time, decis decisionCreate := []*ent.DecisionCreate{} for _, decisionItem := range decisions { - var start_ip, start_sfx, end_ip, end_sfx int64 - var sz int + var ( + start_ip, start_sfx, end_ip, end_sfx int64 + sz int + ) duration, err := time.ParseDuration(*decisionItem.Duration) if err != nil { @@ -539,8 +544,10 @@ func (c *Client) createAlertChunk(machineID string, owner *ent.Machine, alerts [ alertDecisions := [][]*ent.Decision{} for _, alertItem := range alerts { - var metas []*ent.Meta - var events []*ent.Event + var ( + metas []*ent.Meta + events []*ent.Event + ) startAtTime, err := time.Parse(time.RFC3339, *alertItem.StartAt) if err != nil { @@ -560,7 +567,7 @@ func (c *Client) createAlertChunk(machineID string, owner *ent.Machine, alerts [ c.Log.Info(disp) } - //let's track when we strip or drop data, notify outside of loop to avoid spam + // let's track when we strip or drop data, notify outside of loop to avoid spam stripped := false dropped := false @@ -580,7 +587,7 @@ func (c *Client) createAlertChunk(machineID string, owner *ent.Machine, alerts [ return nil, errors.Wrapf(MarshalFail, "event meta '%v' : %s", eventItem.Meta, err) } - //the serialized field is too big, let's try to progressively strip it + // the serialized field is too big, let's try to progressively strip it if event.SerializedValidator(string(marshallMetas)) != nil { stripped = true @@ -606,7 +613,7 @@ func (c *Client) createAlertChunk(machineID string, owner *ent.Machine, alerts [ stripSize /= 2 } - //nothing worked, drop it + // nothing worked, drop it if !valid { dropped = true stripped = false @@ -635,17 +642,21 @@ func (c *Client) createAlertChunk(machineID string, owner *ent.Machine, alerts [ if len(alertItem.Meta) > 0 { metaBulk := make([]*ent.MetaCreate, len(alertItem.Meta)) + for i, metaItem := range alertItem.Meta { key := metaItem.Key value := metaItem.Value + if len(metaItem.Value) > 4095 { c.Log.Warningf("truncated meta %s : value too long", metaItem.Key) value = value[:4095] } + if len(metaItem.Key) > 255 { c.Log.Warningf("truncated meta %s : key too long", metaItem.Key) key = key[:255] } + metaBulk[i] = c.Ent.Meta.Create(). SetKey(key). SetValue(value) @@ -765,8 +776,10 @@ func (c *Client) createAlertChunk(machineID string, owner *ent.Machine, alerts [ } func (c *Client) CreateAlert(machineID string, alertList []*models.Alert) ([]string, error) { - var owner *ent.Machine - var err error + var ( + owner *ent.Machine + err error + ) if machineID != "" { owner, err = c.QueryMachineByID(machineID) @@ -795,17 +808,27 @@ func (c *Client) CreateAlert(machineID string, alertList []*models.Alert) ([]str alertIDs = append(alertIDs, ids...) } + if owner != nil { + err = owner.Update().SetLastPush(time.Now().UTC()).Exec(c.CTX) + if err != nil { + return nil, fmt.Errorf("machine '%s': %w", machineID, err) + } + } + return alertIDs, nil } func AlertPredicatesFromFilter(filter map[string][]string) ([]predicate.Alert, error) { predicates := make([]predicate.Alert, 0) - var err error - var start_ip, start_sfx, end_ip, end_sfx int64 - var hasActiveDecision bool - var ip_sz int - var contains = true + var ( + err error + start_ip, start_sfx, end_ip, end_sfx int64 + hasActiveDecision bool + ip_sz int + ) + + contains := true /*if contains is true, return bans that *contains* the given value (value is the inner) else, return bans that are *contained* by the given value (value is the outer)*/ @@ -829,7 +852,7 @@ func AlertPredicatesFromFilter(filter map[string][]string) ([]predicate.Alert, e return nil, errors.Wrapf(InvalidFilter, "invalid contains value : %s", err) } case "scope": - var scope = value[0] + scope := value[0] if strings.ToLower(scope) == "ip" { scope = types.Ip } else if strings.ToLower(scope) == "range" { @@ -886,17 +909,17 @@ func AlertPredicatesFromFilter(filter map[string][]string) ([]predicate.Alert, e predicates = append(predicates, alert.HasDecisionsWith(decision.TypeEQ(value[0]))) case "origin": predicates = append(predicates, alert.HasDecisionsWith(decision.OriginEQ(value[0]))) - case "include_capi": //allows to exclude one or more specific origins + case "include_capi": // allows to exclude one or more specific origins if value[0] == "false" { predicates = append(predicates, alert.And( - //do not show alerts with active decisions having origin CAPI or lists + // do not show alerts with active decisions having origin CAPI or lists alert.And( alert.Not(alert.HasDecisionsWith(decision.OriginEQ(types.CAPIOrigin))), alert.Not(alert.HasDecisionsWith(decision.OriginEQ(types.ListOrigin))), ), alert.Not( alert.And( - //do not show neither alerts with no decisions if the Source Scope is lists: or CAPI + // do not show neither alerts with no decisions if the Source Scope is lists: or CAPI alert.Not(alert.HasDecisions()), alert.Or( alert.SourceScopeHasPrefix(types.ListOrigin+":"), @@ -906,7 +929,6 @@ func AlertPredicatesFromFilter(filter map[string][]string) ([]predicate.Alert, e ), ), ) - } else if value[0] != "true" { log.Errorf("Invalid bool '%s' for include_capi", value[0]) } @@ -950,48 +972,48 @@ func AlertPredicatesFromFilter(filter map[string][]string) ([]predicate.Alert, e } else if ip_sz == 16 { if contains { /*decision contains {start_ip,end_ip}*/ predicates = append(predicates, alert.And( - //matching addr size + // matching addr size alert.HasDecisionsWith(decision.IPSizeEQ(int64(ip_sz))), alert.Or( - //decision.start_ip < query.start_ip + // decision.start_ip < query.start_ip alert.HasDecisionsWith(decision.StartIPLT(start_ip)), alert.And( - //decision.start_ip == query.start_ip + // decision.start_ip == query.start_ip alert.HasDecisionsWith(decision.StartIPEQ(start_ip)), - //decision.start_suffix <= query.start_suffix + // decision.start_suffix <= query.start_suffix alert.HasDecisionsWith(decision.StartSuffixLTE(start_sfx)), )), alert.Or( - //decision.end_ip > query.end_ip + // decision.end_ip > query.end_ip alert.HasDecisionsWith(decision.EndIPGT(end_ip)), alert.And( - //decision.end_ip == query.end_ip + // decision.end_ip == query.end_ip alert.HasDecisionsWith(decision.EndIPEQ(end_ip)), - //decision.end_suffix >= query.end_suffix + // decision.end_suffix >= query.end_suffix alert.HasDecisionsWith(decision.EndSuffixGTE(end_sfx)), ), ), )) } else { /*decision is contained within {start_ip,end_ip}*/ predicates = append(predicates, alert.And( - //matching addr size + // matching addr size alert.HasDecisionsWith(decision.IPSizeEQ(int64(ip_sz))), alert.Or( - //decision.start_ip > query.start_ip + // decision.start_ip > query.start_ip alert.HasDecisionsWith(decision.StartIPGT(start_ip)), alert.And( - //decision.start_ip == query.start_ip + // decision.start_ip == query.start_ip alert.HasDecisionsWith(decision.StartIPEQ(start_ip)), - //decision.start_suffix >= query.start_suffix + // decision.start_suffix >= query.start_suffix alert.HasDecisionsWith(decision.StartSuffixGTE(start_sfx)), )), alert.Or( - //decision.end_ip < query.end_ip + // decision.end_ip < query.end_ip alert.HasDecisionsWith(decision.EndIPLT(end_ip)), alert.And( - //decision.end_ip == query.end_ip + // decision.end_ip == query.end_ip alert.HasDecisionsWith(decision.EndIPEQ(end_ip)), - //decision.end_suffix <= query.end_suffix + // decision.end_suffix <= query.end_suffix alert.HasDecisionsWith(decision.EndSuffixLTE(end_sfx)), ), ), @@ -1024,13 +1046,11 @@ func (c *Client) AlertsCountPerScenario(filters map[string][]string) (map[string query := c.Ent.Alert.Query() query, err := BuildAlertRequestFromFilter(query, filters) - if err != nil { return nil, fmt.Errorf("failed to build alert request: %w", err) } err = query.GroupBy(alert.FieldScenario).Aggregate(ent.Count()).Scan(ctx, &res) - if err != nil { return nil, fmt.Errorf("failed to count alerts per scenario: %w", err) } @@ -1081,7 +1101,7 @@ func (c *Client) QueryAlertWithFilter(filter map[string][]string) ([]*ent.Alert, return nil, err } - //only if with_decisions is present and set to false, we exclude this + // only if with_decisions is present and set to false, we exclude this if val, ok := filter["with_decisions"]; ok && val[0] == "false" { c.Log.Debugf("skipping decisions") } else { diff --git a/pkg/database/ent/alert.go b/pkg/database/ent/alert.go index 5cb4d1a352c..6da9f0efe76 100644 --- a/pkg/database/ent/alert.go +++ b/pkg/database/ent/alert.go @@ -19,9 +19,9 @@ type Alert struct { // ID of the ent. ID int `json:"id,omitempty"` // CreatedAt holds the value of the "created_at" field. - CreatedAt *time.Time `json:"created_at,omitempty"` + CreatedAt time.Time `json:"created_at,omitempty"` // UpdatedAt holds the value of the "updated_at" field. - UpdatedAt *time.Time `json:"updated_at,omitempty"` + UpdatedAt time.Time `json:"updated_at,omitempty"` // Scenario holds the value of the "scenario" field. Scenario string `json:"scenario,omitempty"` // BucketId holds the value of the "bucketId" field. @@ -168,15 +168,13 @@ func (a *Alert) assignValues(columns []string, values []any) error { if value, ok := values[i].(*sql.NullTime); !ok { return fmt.Errorf("unexpected type %T for field created_at", values[i]) } else if value.Valid { - a.CreatedAt = new(time.Time) - *a.CreatedAt = value.Time + a.CreatedAt = value.Time } case alert.FieldUpdatedAt: if value, ok := values[i].(*sql.NullTime); !ok { return fmt.Errorf("unexpected type %T for field updated_at", values[i]) } else if value.Valid { - a.UpdatedAt = new(time.Time) - *a.UpdatedAt = value.Time + a.UpdatedAt = value.Time } case alert.FieldScenario: if value, ok := values[i].(*sql.NullString); !ok { @@ -367,15 +365,11 @@ func (a *Alert) String() string { var builder strings.Builder builder.WriteString("Alert(") builder.WriteString(fmt.Sprintf("id=%v, ", a.ID)) - if v := a.CreatedAt; v != nil { - builder.WriteString("created_at=") - builder.WriteString(v.Format(time.ANSIC)) - } + builder.WriteString("created_at=") + builder.WriteString(a.CreatedAt.Format(time.ANSIC)) builder.WriteString(", ") - if v := a.UpdatedAt; v != nil { - builder.WriteString("updated_at=") - builder.WriteString(v.Format(time.ANSIC)) - } + builder.WriteString("updated_at=") + builder.WriteString(a.UpdatedAt.Format(time.ANSIC)) builder.WriteString(", ") builder.WriteString("scenario=") builder.WriteString(a.Scenario) diff --git a/pkg/database/ent/alert/alert.go b/pkg/database/ent/alert/alert.go index eb9f1d10788..16e0b019e14 100644 --- a/pkg/database/ent/alert/alert.go +++ b/pkg/database/ent/alert/alert.go @@ -152,8 +152,6 @@ func ValidColumn(column string) bool { var ( // DefaultCreatedAt holds the default value on creation for the "created_at" field. DefaultCreatedAt func() time.Time - // UpdateDefaultCreatedAt holds the default value on update for the "created_at" field. - UpdateDefaultCreatedAt func() time.Time // DefaultUpdatedAt holds the default value on creation for the "updated_at" field. DefaultUpdatedAt func() time.Time // UpdateDefaultUpdatedAt holds the default value on update for the "updated_at" field. diff --git a/pkg/database/ent/alert/where.go b/pkg/database/ent/alert/where.go index 516ead50636..c109b78704b 100644 --- a/pkg/database/ent/alert/where.go +++ b/pkg/database/ent/alert/where.go @@ -210,16 +210,6 @@ func CreatedAtLTE(v time.Time) predicate.Alert { return predicate.Alert(sql.FieldLTE(FieldCreatedAt, v)) } -// CreatedAtIsNil applies the IsNil predicate on the "created_at" field. -func CreatedAtIsNil() predicate.Alert { - return predicate.Alert(sql.FieldIsNull(FieldCreatedAt)) -} - -// CreatedAtNotNil applies the NotNil predicate on the "created_at" field. -func CreatedAtNotNil() predicate.Alert { - return predicate.Alert(sql.FieldNotNull(FieldCreatedAt)) -} - // UpdatedAtEQ applies the EQ predicate on the "updated_at" field. func UpdatedAtEQ(v time.Time) predicate.Alert { return predicate.Alert(sql.FieldEQ(FieldUpdatedAt, v)) @@ -260,16 +250,6 @@ func UpdatedAtLTE(v time.Time) predicate.Alert { return predicate.Alert(sql.FieldLTE(FieldUpdatedAt, v)) } -// UpdatedAtIsNil applies the IsNil predicate on the "updated_at" field. -func UpdatedAtIsNil() predicate.Alert { - return predicate.Alert(sql.FieldIsNull(FieldUpdatedAt)) -} - -// UpdatedAtNotNil applies the NotNil predicate on the "updated_at" field. -func UpdatedAtNotNil() predicate.Alert { - return predicate.Alert(sql.FieldNotNull(FieldUpdatedAt)) -} - // ScenarioEQ applies the EQ predicate on the "scenario" field. func ScenarioEQ(v string) predicate.Alert { return predicate.Alert(sql.FieldEQ(FieldScenario, v)) diff --git a/pkg/database/ent/alert_create.go b/pkg/database/ent/alert_create.go index c7498442c06..45a6e40b64f 100644 --- a/pkg/database/ent/alert_create.go +++ b/pkg/database/ent/alert_create.go @@ -473,6 +473,12 @@ func (ac *AlertCreate) defaults() { // check runs all checks and user-defined validators on the builder. func (ac *AlertCreate) check() error { + if _, ok := ac.mutation.CreatedAt(); !ok { + return &ValidationError{Name: "created_at", err: errors.New(`ent: missing required field "Alert.created_at"`)} + } + if _, ok := ac.mutation.UpdatedAt(); !ok { + return &ValidationError{Name: "updated_at", err: errors.New(`ent: missing required field "Alert.updated_at"`)} + } if _, ok := ac.mutation.Scenario(); !ok { return &ValidationError{Name: "scenario", err: errors.New(`ent: missing required field "Alert.scenario"`)} } @@ -507,11 +513,11 @@ func (ac *AlertCreate) createSpec() (*Alert, *sqlgraph.CreateSpec) { ) if value, ok := ac.mutation.CreatedAt(); ok { _spec.SetField(alert.FieldCreatedAt, field.TypeTime, value) - _node.CreatedAt = &value + _node.CreatedAt = value } if value, ok := ac.mutation.UpdatedAt(); ok { _spec.SetField(alert.FieldUpdatedAt, field.TypeTime, value) - _node.UpdatedAt = &value + _node.UpdatedAt = value } if value, ok := ac.mutation.Scenario(); ok { _spec.SetField(alert.FieldScenario, field.TypeString, value) diff --git a/pkg/database/ent/alert_update.go b/pkg/database/ent/alert_update.go index f8a4d108527..8b88c35c7d7 100644 --- a/pkg/database/ent/alert_update.go +++ b/pkg/database/ent/alert_update.go @@ -32,30 +32,12 @@ func (au *AlertUpdate) Where(ps ...predicate.Alert) *AlertUpdate { return au } -// SetCreatedAt sets the "created_at" field. -func (au *AlertUpdate) SetCreatedAt(t time.Time) *AlertUpdate { - au.mutation.SetCreatedAt(t) - return au -} - -// ClearCreatedAt clears the value of the "created_at" field. -func (au *AlertUpdate) ClearCreatedAt() *AlertUpdate { - au.mutation.ClearCreatedAt() - return au -} - // SetUpdatedAt sets the "updated_at" field. func (au *AlertUpdate) SetUpdatedAt(t time.Time) *AlertUpdate { au.mutation.SetUpdatedAt(t) return au } -// ClearUpdatedAt clears the value of the "updated_at" field. -func (au *AlertUpdate) ClearUpdatedAt() *AlertUpdate { - au.mutation.ClearUpdatedAt() - return au -} - // SetScenario sets the "scenario" field. func (au *AlertUpdate) SetScenario(s string) *AlertUpdate { au.mutation.SetScenario(s) @@ -660,11 +642,7 @@ func (au *AlertUpdate) ExecX(ctx context.Context) { // defaults sets the default values of the builder before save. func (au *AlertUpdate) defaults() { - if _, ok := au.mutation.CreatedAt(); !ok && !au.mutation.CreatedAtCleared() { - v := alert.UpdateDefaultCreatedAt() - au.mutation.SetCreatedAt(v) - } - if _, ok := au.mutation.UpdatedAt(); !ok && !au.mutation.UpdatedAtCleared() { + if _, ok := au.mutation.UpdatedAt(); !ok { v := alert.UpdateDefaultUpdatedAt() au.mutation.SetUpdatedAt(v) } @@ -679,18 +657,9 @@ func (au *AlertUpdate) sqlSave(ctx context.Context) (n int, err error) { } } } - if value, ok := au.mutation.CreatedAt(); ok { - _spec.SetField(alert.FieldCreatedAt, field.TypeTime, value) - } - if au.mutation.CreatedAtCleared() { - _spec.ClearField(alert.FieldCreatedAt, field.TypeTime) - } if value, ok := au.mutation.UpdatedAt(); ok { _spec.SetField(alert.FieldUpdatedAt, field.TypeTime, value) } - if au.mutation.UpdatedAtCleared() { - _spec.ClearField(alert.FieldUpdatedAt, field.TypeTime) - } if value, ok := au.mutation.Scenario(); ok { _spec.SetField(alert.FieldScenario, field.TypeString, value) } @@ -1007,30 +976,12 @@ type AlertUpdateOne struct { mutation *AlertMutation } -// SetCreatedAt sets the "created_at" field. -func (auo *AlertUpdateOne) SetCreatedAt(t time.Time) *AlertUpdateOne { - auo.mutation.SetCreatedAt(t) - return auo -} - -// ClearCreatedAt clears the value of the "created_at" field. -func (auo *AlertUpdateOne) ClearCreatedAt() *AlertUpdateOne { - auo.mutation.ClearCreatedAt() - return auo -} - // SetUpdatedAt sets the "updated_at" field. func (auo *AlertUpdateOne) SetUpdatedAt(t time.Time) *AlertUpdateOne { auo.mutation.SetUpdatedAt(t) return auo } -// ClearUpdatedAt clears the value of the "updated_at" field. -func (auo *AlertUpdateOne) ClearUpdatedAt() *AlertUpdateOne { - auo.mutation.ClearUpdatedAt() - return auo -} - // SetScenario sets the "scenario" field. func (auo *AlertUpdateOne) SetScenario(s string) *AlertUpdateOne { auo.mutation.SetScenario(s) @@ -1648,11 +1599,7 @@ func (auo *AlertUpdateOne) ExecX(ctx context.Context) { // defaults sets the default values of the builder before save. func (auo *AlertUpdateOne) defaults() { - if _, ok := auo.mutation.CreatedAt(); !ok && !auo.mutation.CreatedAtCleared() { - v := alert.UpdateDefaultCreatedAt() - auo.mutation.SetCreatedAt(v) - } - if _, ok := auo.mutation.UpdatedAt(); !ok && !auo.mutation.UpdatedAtCleared() { + if _, ok := auo.mutation.UpdatedAt(); !ok { v := alert.UpdateDefaultUpdatedAt() auo.mutation.SetUpdatedAt(v) } @@ -1684,18 +1631,9 @@ func (auo *AlertUpdateOne) sqlSave(ctx context.Context) (_node *Alert, err error } } } - if value, ok := auo.mutation.CreatedAt(); ok { - _spec.SetField(alert.FieldCreatedAt, field.TypeTime, value) - } - if auo.mutation.CreatedAtCleared() { - _spec.ClearField(alert.FieldCreatedAt, field.TypeTime) - } if value, ok := auo.mutation.UpdatedAt(); ok { _spec.SetField(alert.FieldUpdatedAt, field.TypeTime, value) } - if auo.mutation.UpdatedAtCleared() { - _spec.ClearField(alert.FieldUpdatedAt, field.TypeTime) - } if value, ok := auo.mutation.Scenario(); ok { _spec.SetField(alert.FieldScenario, field.TypeString, value) } diff --git a/pkg/database/ent/bouncer.go b/pkg/database/ent/bouncer.go index 203f49a432d..7a3b280f53d 100644 --- a/pkg/database/ent/bouncer.go +++ b/pkg/database/ent/bouncer.go @@ -18,9 +18,9 @@ type Bouncer struct { // ID of the ent. ID int `json:"id,omitempty"` // CreatedAt holds the value of the "created_at" field. - CreatedAt *time.Time `json:"created_at"` + CreatedAt time.Time `json:"created_at"` // UpdatedAt holds the value of the "updated_at" field. - UpdatedAt *time.Time `json:"updated_at"` + UpdatedAt time.Time `json:"updated_at"` // Name holds the value of the "name" field. Name string `json:"name"` // APIKey holds the value of the "api_key" field. @@ -80,15 +80,13 @@ func (b *Bouncer) assignValues(columns []string, values []any) error { if value, ok := values[i].(*sql.NullTime); !ok { return fmt.Errorf("unexpected type %T for field created_at", values[i]) } else if value.Valid { - b.CreatedAt = new(time.Time) - *b.CreatedAt = value.Time + b.CreatedAt = value.Time } case bouncer.FieldUpdatedAt: if value, ok := values[i].(*sql.NullTime); !ok { return fmt.Errorf("unexpected type %T for field updated_at", values[i]) } else if value.Valid { - b.UpdatedAt = new(time.Time) - *b.UpdatedAt = value.Time + b.UpdatedAt = value.Time } case bouncer.FieldName: if value, ok := values[i].(*sql.NullString); !ok { @@ -180,15 +178,11 @@ func (b *Bouncer) String() string { var builder strings.Builder builder.WriteString("Bouncer(") builder.WriteString(fmt.Sprintf("id=%v, ", b.ID)) - if v := b.CreatedAt; v != nil { - builder.WriteString("created_at=") - builder.WriteString(v.Format(time.ANSIC)) - } + builder.WriteString("created_at=") + builder.WriteString(b.CreatedAt.Format(time.ANSIC)) builder.WriteString(", ") - if v := b.UpdatedAt; v != nil { - builder.WriteString("updated_at=") - builder.WriteString(v.Format(time.ANSIC)) - } + builder.WriteString("updated_at=") + builder.WriteString(b.UpdatedAt.Format(time.ANSIC)) builder.WriteString(", ") builder.WriteString("name=") builder.WriteString(b.Name) diff --git a/pkg/database/ent/bouncer/bouncer.go b/pkg/database/ent/bouncer/bouncer.go index 24d230d3b54..7683c07752b 100644 --- a/pkg/database/ent/bouncer/bouncer.go +++ b/pkg/database/ent/bouncer/bouncer.go @@ -68,8 +68,6 @@ func ValidColumn(column string) bool { var ( // DefaultCreatedAt holds the default value on creation for the "created_at" field. DefaultCreatedAt func() time.Time - // UpdateDefaultCreatedAt holds the default value on update for the "created_at" field. - UpdateDefaultCreatedAt func() time.Time // DefaultUpdatedAt holds the default value on creation for the "updated_at" field. DefaultUpdatedAt func() time.Time // UpdateDefaultUpdatedAt holds the default value on update for the "updated_at" field. diff --git a/pkg/database/ent/bouncer/where.go b/pkg/database/ent/bouncer/where.go index 5bf721dbf51..ee28d7bb6ff 100644 --- a/pkg/database/ent/bouncer/where.go +++ b/pkg/database/ent/bouncer/where.go @@ -149,16 +149,6 @@ func CreatedAtLTE(v time.Time) predicate.Bouncer { return predicate.Bouncer(sql.FieldLTE(FieldCreatedAt, v)) } -// CreatedAtIsNil applies the IsNil predicate on the "created_at" field. -func CreatedAtIsNil() predicate.Bouncer { - return predicate.Bouncer(sql.FieldIsNull(FieldCreatedAt)) -} - -// CreatedAtNotNil applies the NotNil predicate on the "created_at" field. -func CreatedAtNotNil() predicate.Bouncer { - return predicate.Bouncer(sql.FieldNotNull(FieldCreatedAt)) -} - // UpdatedAtEQ applies the EQ predicate on the "updated_at" field. func UpdatedAtEQ(v time.Time) predicate.Bouncer { return predicate.Bouncer(sql.FieldEQ(FieldUpdatedAt, v)) @@ -199,16 +189,6 @@ func UpdatedAtLTE(v time.Time) predicate.Bouncer { return predicate.Bouncer(sql.FieldLTE(FieldUpdatedAt, v)) } -// UpdatedAtIsNil applies the IsNil predicate on the "updated_at" field. -func UpdatedAtIsNil() predicate.Bouncer { - return predicate.Bouncer(sql.FieldIsNull(FieldUpdatedAt)) -} - -// UpdatedAtNotNil applies the NotNil predicate on the "updated_at" field. -func UpdatedAtNotNil() predicate.Bouncer { - return predicate.Bouncer(sql.FieldNotNull(FieldUpdatedAt)) -} - // NameEQ applies the EQ predicate on the "name" field. func NameEQ(v string) predicate.Bouncer { return predicate.Bouncer(sql.FieldEQ(FieldName, v)) diff --git a/pkg/database/ent/bouncer_create.go b/pkg/database/ent/bouncer_create.go index 3d08277dcfb..ceff4db583e 100644 --- a/pkg/database/ent/bouncer_create.go +++ b/pkg/database/ent/bouncer_create.go @@ -213,6 +213,12 @@ func (bc *BouncerCreate) defaults() { // check runs all checks and user-defined validators on the builder. func (bc *BouncerCreate) check() error { + if _, ok := bc.mutation.CreatedAt(); !ok { + return &ValidationError{Name: "created_at", err: errors.New(`ent: missing required field "Bouncer.created_at"`)} + } + if _, ok := bc.mutation.UpdatedAt(); !ok { + return &ValidationError{Name: "updated_at", err: errors.New(`ent: missing required field "Bouncer.updated_at"`)} + } if _, ok := bc.mutation.Name(); !ok { return &ValidationError{Name: "name", err: errors.New(`ent: missing required field "Bouncer.name"`)} } @@ -256,11 +262,11 @@ func (bc *BouncerCreate) createSpec() (*Bouncer, *sqlgraph.CreateSpec) { ) if value, ok := bc.mutation.CreatedAt(); ok { _spec.SetField(bouncer.FieldCreatedAt, field.TypeTime, value) - _node.CreatedAt = &value + _node.CreatedAt = value } if value, ok := bc.mutation.UpdatedAt(); ok { _spec.SetField(bouncer.FieldUpdatedAt, field.TypeTime, value) - _node.UpdatedAt = &value + _node.UpdatedAt = value } if value, ok := bc.mutation.Name(); ok { _spec.SetField(bouncer.FieldName, field.TypeString, value) diff --git a/pkg/database/ent/bouncer_update.go b/pkg/database/ent/bouncer_update.go index b3f5e1a5540..76968cf5202 100644 --- a/pkg/database/ent/bouncer_update.go +++ b/pkg/database/ent/bouncer_update.go @@ -34,9 +34,11 @@ func (bu *BouncerUpdate) SetCreatedAt(t time.Time) *BouncerUpdate { return bu } -// ClearCreatedAt clears the value of the "created_at" field. -func (bu *BouncerUpdate) ClearCreatedAt() *BouncerUpdate { - bu.mutation.ClearCreatedAt() +// SetNillableCreatedAt sets the "created_at" field if the given value is not nil. +func (bu *BouncerUpdate) SetNillableCreatedAt(t *time.Time) *BouncerUpdate { + if t != nil { + bu.SetCreatedAt(*t) + } return bu } @@ -46,12 +48,6 @@ func (bu *BouncerUpdate) SetUpdatedAt(t time.Time) *BouncerUpdate { return bu } -// ClearUpdatedAt clears the value of the "updated_at" field. -func (bu *BouncerUpdate) ClearUpdatedAt() *BouncerUpdate { - bu.mutation.ClearUpdatedAt() - return bu -} - // SetName sets the "name" field. func (bu *BouncerUpdate) SetName(s string) *BouncerUpdate { bu.mutation.SetName(s) @@ -237,11 +233,7 @@ func (bu *BouncerUpdate) ExecX(ctx context.Context) { // defaults sets the default values of the builder before save. func (bu *BouncerUpdate) defaults() { - if _, ok := bu.mutation.CreatedAt(); !ok && !bu.mutation.CreatedAtCleared() { - v := bouncer.UpdateDefaultCreatedAt() - bu.mutation.SetCreatedAt(v) - } - if _, ok := bu.mutation.UpdatedAt(); !ok && !bu.mutation.UpdatedAtCleared() { + if _, ok := bu.mutation.UpdatedAt(); !ok { v := bouncer.UpdateDefaultUpdatedAt() bu.mutation.SetUpdatedAt(v) } @@ -259,15 +251,9 @@ func (bu *BouncerUpdate) sqlSave(ctx context.Context) (n int, err error) { if value, ok := bu.mutation.CreatedAt(); ok { _spec.SetField(bouncer.FieldCreatedAt, field.TypeTime, value) } - if bu.mutation.CreatedAtCleared() { - _spec.ClearField(bouncer.FieldCreatedAt, field.TypeTime) - } if value, ok := bu.mutation.UpdatedAt(); ok { _spec.SetField(bouncer.FieldUpdatedAt, field.TypeTime, value) } - if bu.mutation.UpdatedAtCleared() { - _spec.ClearField(bouncer.FieldUpdatedAt, field.TypeTime) - } if value, ok := bu.mutation.Name(); ok { _spec.SetField(bouncer.FieldName, field.TypeString, value) } @@ -333,9 +319,11 @@ func (buo *BouncerUpdateOne) SetCreatedAt(t time.Time) *BouncerUpdateOne { return buo } -// ClearCreatedAt clears the value of the "created_at" field. -func (buo *BouncerUpdateOne) ClearCreatedAt() *BouncerUpdateOne { - buo.mutation.ClearCreatedAt() +// SetNillableCreatedAt sets the "created_at" field if the given value is not nil. +func (buo *BouncerUpdateOne) SetNillableCreatedAt(t *time.Time) *BouncerUpdateOne { + if t != nil { + buo.SetCreatedAt(*t) + } return buo } @@ -345,12 +333,6 @@ func (buo *BouncerUpdateOne) SetUpdatedAt(t time.Time) *BouncerUpdateOne { return buo } -// ClearUpdatedAt clears the value of the "updated_at" field. -func (buo *BouncerUpdateOne) ClearUpdatedAt() *BouncerUpdateOne { - buo.mutation.ClearUpdatedAt() - return buo -} - // SetName sets the "name" field. func (buo *BouncerUpdateOne) SetName(s string) *BouncerUpdateOne { buo.mutation.SetName(s) @@ -549,11 +531,7 @@ func (buo *BouncerUpdateOne) ExecX(ctx context.Context) { // defaults sets the default values of the builder before save. func (buo *BouncerUpdateOne) defaults() { - if _, ok := buo.mutation.CreatedAt(); !ok && !buo.mutation.CreatedAtCleared() { - v := bouncer.UpdateDefaultCreatedAt() - buo.mutation.SetCreatedAt(v) - } - if _, ok := buo.mutation.UpdatedAt(); !ok && !buo.mutation.UpdatedAtCleared() { + if _, ok := buo.mutation.UpdatedAt(); !ok { v := bouncer.UpdateDefaultUpdatedAt() buo.mutation.SetUpdatedAt(v) } @@ -588,15 +566,9 @@ func (buo *BouncerUpdateOne) sqlSave(ctx context.Context) (_node *Bouncer, err e if value, ok := buo.mutation.CreatedAt(); ok { _spec.SetField(bouncer.FieldCreatedAt, field.TypeTime, value) } - if buo.mutation.CreatedAtCleared() { - _spec.ClearField(bouncer.FieldCreatedAt, field.TypeTime) - } if value, ok := buo.mutation.UpdatedAt(); ok { _spec.SetField(bouncer.FieldUpdatedAt, field.TypeTime, value) } - if buo.mutation.UpdatedAtCleared() { - _spec.ClearField(bouncer.FieldUpdatedAt, field.TypeTime) - } if value, ok := buo.mutation.Name(); ok { _spec.SetField(bouncer.FieldName, field.TypeString, value) } diff --git a/pkg/database/ent/configitem.go b/pkg/database/ent/configitem.go index 467e54386f6..bdf23ef4948 100644 --- a/pkg/database/ent/configitem.go +++ b/pkg/database/ent/configitem.go @@ -18,9 +18,9 @@ type ConfigItem struct { // ID of the ent. ID int `json:"id,omitempty"` // CreatedAt holds the value of the "created_at" field. - CreatedAt *time.Time `json:"created_at"` + CreatedAt time.Time `json:"created_at"` // UpdatedAt holds the value of the "updated_at" field. - UpdatedAt *time.Time `json:"updated_at"` + UpdatedAt time.Time `json:"updated_at"` // Name holds the value of the "name" field. Name string `json:"name"` // Value holds the value of the "value" field. @@ -64,15 +64,13 @@ func (ci *ConfigItem) assignValues(columns []string, values []any) error { if value, ok := values[i].(*sql.NullTime); !ok { return fmt.Errorf("unexpected type %T for field created_at", values[i]) } else if value.Valid { - ci.CreatedAt = new(time.Time) - *ci.CreatedAt = value.Time + ci.CreatedAt = value.Time } case configitem.FieldUpdatedAt: if value, ok := values[i].(*sql.NullTime); !ok { return fmt.Errorf("unexpected type %T for field updated_at", values[i]) } else if value.Valid { - ci.UpdatedAt = new(time.Time) - *ci.UpdatedAt = value.Time + ci.UpdatedAt = value.Time } case configitem.FieldName: if value, ok := values[i].(*sql.NullString); !ok { @@ -122,15 +120,11 @@ func (ci *ConfigItem) String() string { var builder strings.Builder builder.WriteString("ConfigItem(") builder.WriteString(fmt.Sprintf("id=%v, ", ci.ID)) - if v := ci.CreatedAt; v != nil { - builder.WriteString("created_at=") - builder.WriteString(v.Format(time.ANSIC)) - } + builder.WriteString("created_at=") + builder.WriteString(ci.CreatedAt.Format(time.ANSIC)) builder.WriteString(", ") - if v := ci.UpdatedAt; v != nil { - builder.WriteString("updated_at=") - builder.WriteString(v.Format(time.ANSIC)) - } + builder.WriteString("updated_at=") + builder.WriteString(ci.UpdatedAt.Format(time.ANSIC)) builder.WriteString(", ") builder.WriteString("name=") builder.WriteString(ci.Name) diff --git a/pkg/database/ent/configitem/configitem.go b/pkg/database/ent/configitem/configitem.go index a6ff6c32d57..611d81a3960 100644 --- a/pkg/database/ent/configitem/configitem.go +++ b/pkg/database/ent/configitem/configitem.go @@ -47,8 +47,6 @@ func ValidColumn(column string) bool { var ( // DefaultCreatedAt holds the default value on creation for the "created_at" field. DefaultCreatedAt func() time.Time - // UpdateDefaultCreatedAt holds the default value on update for the "created_at" field. - UpdateDefaultCreatedAt func() time.Time // DefaultUpdatedAt holds the default value on creation for the "updated_at" field. DefaultUpdatedAt func() time.Time // UpdateDefaultUpdatedAt holds the default value on update for the "updated_at" field. diff --git a/pkg/database/ent/configitem/where.go b/pkg/database/ent/configitem/where.go index 767f0b420f1..48ae792fd72 100644 --- a/pkg/database/ent/configitem/where.go +++ b/pkg/database/ent/configitem/where.go @@ -114,16 +114,6 @@ func CreatedAtLTE(v time.Time) predicate.ConfigItem { return predicate.ConfigItem(sql.FieldLTE(FieldCreatedAt, v)) } -// CreatedAtIsNil applies the IsNil predicate on the "created_at" field. -func CreatedAtIsNil() predicate.ConfigItem { - return predicate.ConfigItem(sql.FieldIsNull(FieldCreatedAt)) -} - -// CreatedAtNotNil applies the NotNil predicate on the "created_at" field. -func CreatedAtNotNil() predicate.ConfigItem { - return predicate.ConfigItem(sql.FieldNotNull(FieldCreatedAt)) -} - // UpdatedAtEQ applies the EQ predicate on the "updated_at" field. func UpdatedAtEQ(v time.Time) predicate.ConfigItem { return predicate.ConfigItem(sql.FieldEQ(FieldUpdatedAt, v)) @@ -164,16 +154,6 @@ func UpdatedAtLTE(v time.Time) predicate.ConfigItem { return predicate.ConfigItem(sql.FieldLTE(FieldUpdatedAt, v)) } -// UpdatedAtIsNil applies the IsNil predicate on the "updated_at" field. -func UpdatedAtIsNil() predicate.ConfigItem { - return predicate.ConfigItem(sql.FieldIsNull(FieldUpdatedAt)) -} - -// UpdatedAtNotNil applies the NotNil predicate on the "updated_at" field. -func UpdatedAtNotNil() predicate.ConfigItem { - return predicate.ConfigItem(sql.FieldNotNull(FieldUpdatedAt)) -} - // NameEQ applies the EQ predicate on the "name" field. func NameEQ(v string) predicate.ConfigItem { return predicate.ConfigItem(sql.FieldEQ(FieldName, v)) diff --git a/pkg/database/ent/configitem_create.go b/pkg/database/ent/configitem_create.go index 19e73dea41c..a2679927aee 100644 --- a/pkg/database/ent/configitem_create.go +++ b/pkg/database/ent/configitem_create.go @@ -107,6 +107,12 @@ func (cic *ConfigItemCreate) defaults() { // check runs all checks and user-defined validators on the builder. func (cic *ConfigItemCreate) check() error { + if _, ok := cic.mutation.CreatedAt(); !ok { + return &ValidationError{Name: "created_at", err: errors.New(`ent: missing required field "ConfigItem.created_at"`)} + } + if _, ok := cic.mutation.UpdatedAt(); !ok { + return &ValidationError{Name: "updated_at", err: errors.New(`ent: missing required field "ConfigItem.updated_at"`)} + } if _, ok := cic.mutation.Name(); !ok { return &ValidationError{Name: "name", err: errors.New(`ent: missing required field "ConfigItem.name"`)} } @@ -141,11 +147,11 @@ func (cic *ConfigItemCreate) createSpec() (*ConfigItem, *sqlgraph.CreateSpec) { ) if value, ok := cic.mutation.CreatedAt(); ok { _spec.SetField(configitem.FieldCreatedAt, field.TypeTime, value) - _node.CreatedAt = &value + _node.CreatedAt = value } if value, ok := cic.mutation.UpdatedAt(); ok { _spec.SetField(configitem.FieldUpdatedAt, field.TypeTime, value) - _node.UpdatedAt = &value + _node.UpdatedAt = value } if value, ok := cic.mutation.Name(); ok { _spec.SetField(configitem.FieldName, field.TypeString, value) diff --git a/pkg/database/ent/configitem_update.go b/pkg/database/ent/configitem_update.go index 11fb0755191..d4f1f15d23a 100644 --- a/pkg/database/ent/configitem_update.go +++ b/pkg/database/ent/configitem_update.go @@ -28,30 +28,12 @@ func (ciu *ConfigItemUpdate) Where(ps ...predicate.ConfigItem) *ConfigItemUpdate return ciu } -// SetCreatedAt sets the "created_at" field. -func (ciu *ConfigItemUpdate) SetCreatedAt(t time.Time) *ConfigItemUpdate { - ciu.mutation.SetCreatedAt(t) - return ciu -} - -// ClearCreatedAt clears the value of the "created_at" field. -func (ciu *ConfigItemUpdate) ClearCreatedAt() *ConfigItemUpdate { - ciu.mutation.ClearCreatedAt() - return ciu -} - // SetUpdatedAt sets the "updated_at" field. func (ciu *ConfigItemUpdate) SetUpdatedAt(t time.Time) *ConfigItemUpdate { ciu.mutation.SetUpdatedAt(t) return ciu } -// ClearUpdatedAt clears the value of the "updated_at" field. -func (ciu *ConfigItemUpdate) ClearUpdatedAt() *ConfigItemUpdate { - ciu.mutation.ClearUpdatedAt() - return ciu -} - // SetName sets the "name" field. func (ciu *ConfigItemUpdate) SetName(s string) *ConfigItemUpdate { ciu.mutation.SetName(s) @@ -115,11 +97,7 @@ func (ciu *ConfigItemUpdate) ExecX(ctx context.Context) { // defaults sets the default values of the builder before save. func (ciu *ConfigItemUpdate) defaults() { - if _, ok := ciu.mutation.CreatedAt(); !ok && !ciu.mutation.CreatedAtCleared() { - v := configitem.UpdateDefaultCreatedAt() - ciu.mutation.SetCreatedAt(v) - } - if _, ok := ciu.mutation.UpdatedAt(); !ok && !ciu.mutation.UpdatedAtCleared() { + if _, ok := ciu.mutation.UpdatedAt(); !ok { v := configitem.UpdateDefaultUpdatedAt() ciu.mutation.SetUpdatedAt(v) } @@ -134,18 +112,9 @@ func (ciu *ConfigItemUpdate) sqlSave(ctx context.Context) (n int, err error) { } } } - if value, ok := ciu.mutation.CreatedAt(); ok { - _spec.SetField(configitem.FieldCreatedAt, field.TypeTime, value) - } - if ciu.mutation.CreatedAtCleared() { - _spec.ClearField(configitem.FieldCreatedAt, field.TypeTime) - } if value, ok := ciu.mutation.UpdatedAt(); ok { _spec.SetField(configitem.FieldUpdatedAt, field.TypeTime, value) } - if ciu.mutation.UpdatedAtCleared() { - _spec.ClearField(configitem.FieldUpdatedAt, field.TypeTime) - } if value, ok := ciu.mutation.Name(); ok { _spec.SetField(configitem.FieldName, field.TypeString, value) } @@ -172,30 +141,12 @@ type ConfigItemUpdateOne struct { mutation *ConfigItemMutation } -// SetCreatedAt sets the "created_at" field. -func (ciuo *ConfigItemUpdateOne) SetCreatedAt(t time.Time) *ConfigItemUpdateOne { - ciuo.mutation.SetCreatedAt(t) - return ciuo -} - -// ClearCreatedAt clears the value of the "created_at" field. -func (ciuo *ConfigItemUpdateOne) ClearCreatedAt() *ConfigItemUpdateOne { - ciuo.mutation.ClearCreatedAt() - return ciuo -} - // SetUpdatedAt sets the "updated_at" field. func (ciuo *ConfigItemUpdateOne) SetUpdatedAt(t time.Time) *ConfigItemUpdateOne { ciuo.mutation.SetUpdatedAt(t) return ciuo } -// ClearUpdatedAt clears the value of the "updated_at" field. -func (ciuo *ConfigItemUpdateOne) ClearUpdatedAt() *ConfigItemUpdateOne { - ciuo.mutation.ClearUpdatedAt() - return ciuo -} - // SetName sets the "name" field. func (ciuo *ConfigItemUpdateOne) SetName(s string) *ConfigItemUpdateOne { ciuo.mutation.SetName(s) @@ -272,11 +223,7 @@ func (ciuo *ConfigItemUpdateOne) ExecX(ctx context.Context) { // defaults sets the default values of the builder before save. func (ciuo *ConfigItemUpdateOne) defaults() { - if _, ok := ciuo.mutation.CreatedAt(); !ok && !ciuo.mutation.CreatedAtCleared() { - v := configitem.UpdateDefaultCreatedAt() - ciuo.mutation.SetCreatedAt(v) - } - if _, ok := ciuo.mutation.UpdatedAt(); !ok && !ciuo.mutation.UpdatedAtCleared() { + if _, ok := ciuo.mutation.UpdatedAt(); !ok { v := configitem.UpdateDefaultUpdatedAt() ciuo.mutation.SetUpdatedAt(v) } @@ -308,18 +255,9 @@ func (ciuo *ConfigItemUpdateOne) sqlSave(ctx context.Context) (_node *ConfigItem } } } - if value, ok := ciuo.mutation.CreatedAt(); ok { - _spec.SetField(configitem.FieldCreatedAt, field.TypeTime, value) - } - if ciuo.mutation.CreatedAtCleared() { - _spec.ClearField(configitem.FieldCreatedAt, field.TypeTime) - } if value, ok := ciuo.mutation.UpdatedAt(); ok { _spec.SetField(configitem.FieldUpdatedAt, field.TypeTime, value) } - if ciuo.mutation.UpdatedAtCleared() { - _spec.ClearField(configitem.FieldUpdatedAt, field.TypeTime) - } if value, ok := ciuo.mutation.Name(); ok { _spec.SetField(configitem.FieldName, field.TypeString, value) } diff --git a/pkg/database/ent/decision.go b/pkg/database/ent/decision.go index 8a08bc1dfd4..1cc0df4c784 100644 --- a/pkg/database/ent/decision.go +++ b/pkg/database/ent/decision.go @@ -19,9 +19,9 @@ type Decision struct { // ID of the ent. ID int `json:"id,omitempty"` // CreatedAt holds the value of the "created_at" field. - CreatedAt *time.Time `json:"created_at,omitempty"` + CreatedAt time.Time `json:"created_at,omitempty"` // UpdatedAt holds the value of the "updated_at" field. - UpdatedAt *time.Time `json:"updated_at,omitempty"` + UpdatedAt time.Time `json:"updated_at,omitempty"` // Until holds the value of the "until" field. Until *time.Time `json:"until,omitempty"` // Scenario holds the value of the "scenario" field. @@ -116,15 +116,13 @@ func (d *Decision) assignValues(columns []string, values []any) error { if value, ok := values[i].(*sql.NullTime); !ok { return fmt.Errorf("unexpected type %T for field created_at", values[i]) } else if value.Valid { - d.CreatedAt = new(time.Time) - *d.CreatedAt = value.Time + d.CreatedAt = value.Time } case decision.FieldUpdatedAt: if value, ok := values[i].(*sql.NullTime); !ok { return fmt.Errorf("unexpected type %T for field updated_at", values[i]) } else if value.Valid { - d.UpdatedAt = new(time.Time) - *d.UpdatedAt = value.Time + d.UpdatedAt = value.Time } case decision.FieldUntil: if value, ok := values[i].(*sql.NullTime); !ok { @@ -252,15 +250,11 @@ func (d *Decision) String() string { var builder strings.Builder builder.WriteString("Decision(") builder.WriteString(fmt.Sprintf("id=%v, ", d.ID)) - if v := d.CreatedAt; v != nil { - builder.WriteString("created_at=") - builder.WriteString(v.Format(time.ANSIC)) - } + builder.WriteString("created_at=") + builder.WriteString(d.CreatedAt.Format(time.ANSIC)) builder.WriteString(", ") - if v := d.UpdatedAt; v != nil { - builder.WriteString("updated_at=") - builder.WriteString(v.Format(time.ANSIC)) - } + builder.WriteString("updated_at=") + builder.WriteString(d.UpdatedAt.Format(time.ANSIC)) builder.WriteString(", ") if v := d.Until; v != nil { builder.WriteString("until=") diff --git a/pkg/database/ent/decision/decision.go b/pkg/database/ent/decision/decision.go index d9f67623bd8..38c9721db48 100644 --- a/pkg/database/ent/decision/decision.go +++ b/pkg/database/ent/decision/decision.go @@ -93,8 +93,6 @@ func ValidColumn(column string) bool { var ( // DefaultCreatedAt holds the default value on creation for the "created_at" field. DefaultCreatedAt func() time.Time - // UpdateDefaultCreatedAt holds the default value on update for the "created_at" field. - UpdateDefaultCreatedAt func() time.Time // DefaultUpdatedAt holds the default value on creation for the "updated_at" field. DefaultUpdatedAt func() time.Time // UpdateDefaultUpdatedAt holds the default value on update for the "updated_at" field. diff --git a/pkg/database/ent/decision/where.go b/pkg/database/ent/decision/where.go index 36374f5714d..99a1889e63e 100644 --- a/pkg/database/ent/decision/where.go +++ b/pkg/database/ent/decision/where.go @@ -175,16 +175,6 @@ func CreatedAtLTE(v time.Time) predicate.Decision { return predicate.Decision(sql.FieldLTE(FieldCreatedAt, v)) } -// CreatedAtIsNil applies the IsNil predicate on the "created_at" field. -func CreatedAtIsNil() predicate.Decision { - return predicate.Decision(sql.FieldIsNull(FieldCreatedAt)) -} - -// CreatedAtNotNil applies the NotNil predicate on the "created_at" field. -func CreatedAtNotNil() predicate.Decision { - return predicate.Decision(sql.FieldNotNull(FieldCreatedAt)) -} - // UpdatedAtEQ applies the EQ predicate on the "updated_at" field. func UpdatedAtEQ(v time.Time) predicate.Decision { return predicate.Decision(sql.FieldEQ(FieldUpdatedAt, v)) @@ -225,16 +215,6 @@ func UpdatedAtLTE(v time.Time) predicate.Decision { return predicate.Decision(sql.FieldLTE(FieldUpdatedAt, v)) } -// UpdatedAtIsNil applies the IsNil predicate on the "updated_at" field. -func UpdatedAtIsNil() predicate.Decision { - return predicate.Decision(sql.FieldIsNull(FieldUpdatedAt)) -} - -// UpdatedAtNotNil applies the NotNil predicate on the "updated_at" field. -func UpdatedAtNotNil() predicate.Decision { - return predicate.Decision(sql.FieldNotNull(FieldUpdatedAt)) -} - // UntilEQ applies the EQ predicate on the "until" field. func UntilEQ(v time.Time) predicate.Decision { return predicate.Decision(sql.FieldEQ(FieldUntil, v)) diff --git a/pkg/database/ent/decision_create.go b/pkg/database/ent/decision_create.go index 43a28c53114..f30d5452120 100644 --- a/pkg/database/ent/decision_create.go +++ b/pkg/database/ent/decision_create.go @@ -275,6 +275,12 @@ func (dc *DecisionCreate) defaults() { // check runs all checks and user-defined validators on the builder. func (dc *DecisionCreate) check() error { + if _, ok := dc.mutation.CreatedAt(); !ok { + return &ValidationError{Name: "created_at", err: errors.New(`ent: missing required field "Decision.created_at"`)} + } + if _, ok := dc.mutation.UpdatedAt(); !ok { + return &ValidationError{Name: "updated_at", err: errors.New(`ent: missing required field "Decision.updated_at"`)} + } if _, ok := dc.mutation.Scenario(); !ok { return &ValidationError{Name: "scenario", err: errors.New(`ent: missing required field "Decision.scenario"`)} } @@ -321,11 +327,11 @@ func (dc *DecisionCreate) createSpec() (*Decision, *sqlgraph.CreateSpec) { ) if value, ok := dc.mutation.CreatedAt(); ok { _spec.SetField(decision.FieldCreatedAt, field.TypeTime, value) - _node.CreatedAt = &value + _node.CreatedAt = value } if value, ok := dc.mutation.UpdatedAt(); ok { _spec.SetField(decision.FieldUpdatedAt, field.TypeTime, value) - _node.UpdatedAt = &value + _node.UpdatedAt = value } if value, ok := dc.mutation.Until(); ok { _spec.SetField(decision.FieldUntil, field.TypeTime, value) diff --git a/pkg/database/ent/decision_update.go b/pkg/database/ent/decision_update.go index 182457e9f63..1bcb42f8c1f 100644 --- a/pkg/database/ent/decision_update.go +++ b/pkg/database/ent/decision_update.go @@ -29,30 +29,12 @@ func (du *DecisionUpdate) Where(ps ...predicate.Decision) *DecisionUpdate { return du } -// SetCreatedAt sets the "created_at" field. -func (du *DecisionUpdate) SetCreatedAt(t time.Time) *DecisionUpdate { - du.mutation.SetCreatedAt(t) - return du -} - -// ClearCreatedAt clears the value of the "created_at" field. -func (du *DecisionUpdate) ClearCreatedAt() *DecisionUpdate { - du.mutation.ClearCreatedAt() - return du -} - // SetUpdatedAt sets the "updated_at" field. func (du *DecisionUpdate) SetUpdatedAt(t time.Time) *DecisionUpdate { du.mutation.SetUpdatedAt(t) return du } -// ClearUpdatedAt clears the value of the "updated_at" field. -func (du *DecisionUpdate) ClearUpdatedAt() *DecisionUpdate { - du.mutation.ClearUpdatedAt() - return du -} - // SetUntil sets the "until" field. func (du *DecisionUpdate) SetUntil(t time.Time) *DecisionUpdate { du.mutation.SetUntil(t) @@ -392,11 +374,7 @@ func (du *DecisionUpdate) ExecX(ctx context.Context) { // defaults sets the default values of the builder before save. func (du *DecisionUpdate) defaults() { - if _, ok := du.mutation.CreatedAt(); !ok && !du.mutation.CreatedAtCleared() { - v := decision.UpdateDefaultCreatedAt() - du.mutation.SetCreatedAt(v) - } - if _, ok := du.mutation.UpdatedAt(); !ok && !du.mutation.UpdatedAtCleared() { + if _, ok := du.mutation.UpdatedAt(); !ok { v := decision.UpdateDefaultUpdatedAt() du.mutation.SetUpdatedAt(v) } @@ -411,18 +389,9 @@ func (du *DecisionUpdate) sqlSave(ctx context.Context) (n int, err error) { } } } - if value, ok := du.mutation.CreatedAt(); ok { - _spec.SetField(decision.FieldCreatedAt, field.TypeTime, value) - } - if du.mutation.CreatedAtCleared() { - _spec.ClearField(decision.FieldCreatedAt, field.TypeTime) - } if value, ok := du.mutation.UpdatedAt(); ok { _spec.SetField(decision.FieldUpdatedAt, field.TypeTime, value) } - if du.mutation.UpdatedAtCleared() { - _spec.ClearField(decision.FieldUpdatedAt, field.TypeTime) - } if value, ok := du.mutation.Until(); ok { _spec.SetField(decision.FieldUntil, field.TypeTime, value) } @@ -547,30 +516,12 @@ type DecisionUpdateOne struct { mutation *DecisionMutation } -// SetCreatedAt sets the "created_at" field. -func (duo *DecisionUpdateOne) SetCreatedAt(t time.Time) *DecisionUpdateOne { - duo.mutation.SetCreatedAt(t) - return duo -} - -// ClearCreatedAt clears the value of the "created_at" field. -func (duo *DecisionUpdateOne) ClearCreatedAt() *DecisionUpdateOne { - duo.mutation.ClearCreatedAt() - return duo -} - // SetUpdatedAt sets the "updated_at" field. func (duo *DecisionUpdateOne) SetUpdatedAt(t time.Time) *DecisionUpdateOne { duo.mutation.SetUpdatedAt(t) return duo } -// ClearUpdatedAt clears the value of the "updated_at" field. -func (duo *DecisionUpdateOne) ClearUpdatedAt() *DecisionUpdateOne { - duo.mutation.ClearUpdatedAt() - return duo -} - // SetUntil sets the "until" field. func (duo *DecisionUpdateOne) SetUntil(t time.Time) *DecisionUpdateOne { duo.mutation.SetUntil(t) @@ -923,11 +874,7 @@ func (duo *DecisionUpdateOne) ExecX(ctx context.Context) { // defaults sets the default values of the builder before save. func (duo *DecisionUpdateOne) defaults() { - if _, ok := duo.mutation.CreatedAt(); !ok && !duo.mutation.CreatedAtCleared() { - v := decision.UpdateDefaultCreatedAt() - duo.mutation.SetCreatedAt(v) - } - if _, ok := duo.mutation.UpdatedAt(); !ok && !duo.mutation.UpdatedAtCleared() { + if _, ok := duo.mutation.UpdatedAt(); !ok { v := decision.UpdateDefaultUpdatedAt() duo.mutation.SetUpdatedAt(v) } @@ -959,18 +906,9 @@ func (duo *DecisionUpdateOne) sqlSave(ctx context.Context) (_node *Decision, err } } } - if value, ok := duo.mutation.CreatedAt(); ok { - _spec.SetField(decision.FieldCreatedAt, field.TypeTime, value) - } - if duo.mutation.CreatedAtCleared() { - _spec.ClearField(decision.FieldCreatedAt, field.TypeTime) - } if value, ok := duo.mutation.UpdatedAt(); ok { _spec.SetField(decision.FieldUpdatedAt, field.TypeTime, value) } - if duo.mutation.UpdatedAtCleared() { - _spec.ClearField(decision.FieldUpdatedAt, field.TypeTime) - } if value, ok := duo.mutation.Until(); ok { _spec.SetField(decision.FieldUntil, field.TypeTime, value) } diff --git a/pkg/database/ent/event.go b/pkg/database/ent/event.go index df4a2d10c8b..10e6d01c9d5 100644 --- a/pkg/database/ent/event.go +++ b/pkg/database/ent/event.go @@ -19,9 +19,9 @@ type Event struct { // ID of the ent. ID int `json:"id,omitempty"` // CreatedAt holds the value of the "created_at" field. - CreatedAt *time.Time `json:"created_at,omitempty"` + CreatedAt time.Time `json:"created_at,omitempty"` // UpdatedAt holds the value of the "updated_at" field. - UpdatedAt *time.Time `json:"updated_at,omitempty"` + UpdatedAt time.Time `json:"updated_at,omitempty"` // Time holds the value of the "time" field. Time time.Time `json:"time,omitempty"` // Serialized holds the value of the "serialized" field. @@ -92,15 +92,13 @@ func (e *Event) assignValues(columns []string, values []any) error { if value, ok := values[i].(*sql.NullTime); !ok { return fmt.Errorf("unexpected type %T for field created_at", values[i]) } else if value.Valid { - e.CreatedAt = new(time.Time) - *e.CreatedAt = value.Time + e.CreatedAt = value.Time } case event.FieldUpdatedAt: if value, ok := values[i].(*sql.NullTime); !ok { return fmt.Errorf("unexpected type %T for field updated_at", values[i]) } else if value.Valid { - e.UpdatedAt = new(time.Time) - *e.UpdatedAt = value.Time + e.UpdatedAt = value.Time } case event.FieldTime: if value, ok := values[i].(*sql.NullTime); !ok { @@ -161,15 +159,11 @@ func (e *Event) String() string { var builder strings.Builder builder.WriteString("Event(") builder.WriteString(fmt.Sprintf("id=%v, ", e.ID)) - if v := e.CreatedAt; v != nil { - builder.WriteString("created_at=") - builder.WriteString(v.Format(time.ANSIC)) - } + builder.WriteString("created_at=") + builder.WriteString(e.CreatedAt.Format(time.ANSIC)) builder.WriteString(", ") - if v := e.UpdatedAt; v != nil { - builder.WriteString("updated_at=") - builder.WriteString(v.Format(time.ANSIC)) - } + builder.WriteString("updated_at=") + builder.WriteString(e.UpdatedAt.Format(time.ANSIC)) builder.WriteString(", ") builder.WriteString("time=") builder.WriteString(e.Time.Format(time.ANSIC)) diff --git a/pkg/database/ent/event/event.go b/pkg/database/ent/event/event.go index 48f5a355824..c975a612669 100644 --- a/pkg/database/ent/event/event.go +++ b/pkg/database/ent/event/event.go @@ -60,8 +60,6 @@ func ValidColumn(column string) bool { var ( // DefaultCreatedAt holds the default value on creation for the "created_at" field. DefaultCreatedAt func() time.Time - // UpdateDefaultCreatedAt holds the default value on update for the "created_at" field. - UpdateDefaultCreatedAt func() time.Time // DefaultUpdatedAt holds the default value on creation for the "updated_at" field. DefaultUpdatedAt func() time.Time // UpdateDefaultUpdatedAt holds the default value on update for the "updated_at" field. diff --git a/pkg/database/ent/event/where.go b/pkg/database/ent/event/where.go index 238bea988bd..d420b125026 100644 --- a/pkg/database/ent/event/where.go +++ b/pkg/database/ent/event/where.go @@ -120,16 +120,6 @@ func CreatedAtLTE(v time.Time) predicate.Event { return predicate.Event(sql.FieldLTE(FieldCreatedAt, v)) } -// CreatedAtIsNil applies the IsNil predicate on the "created_at" field. -func CreatedAtIsNil() predicate.Event { - return predicate.Event(sql.FieldIsNull(FieldCreatedAt)) -} - -// CreatedAtNotNil applies the NotNil predicate on the "created_at" field. -func CreatedAtNotNil() predicate.Event { - return predicate.Event(sql.FieldNotNull(FieldCreatedAt)) -} - // UpdatedAtEQ applies the EQ predicate on the "updated_at" field. func UpdatedAtEQ(v time.Time) predicate.Event { return predicate.Event(sql.FieldEQ(FieldUpdatedAt, v)) @@ -170,16 +160,6 @@ func UpdatedAtLTE(v time.Time) predicate.Event { return predicate.Event(sql.FieldLTE(FieldUpdatedAt, v)) } -// UpdatedAtIsNil applies the IsNil predicate on the "updated_at" field. -func UpdatedAtIsNil() predicate.Event { - return predicate.Event(sql.FieldIsNull(FieldUpdatedAt)) -} - -// UpdatedAtNotNil applies the NotNil predicate on the "updated_at" field. -func UpdatedAtNotNil() predicate.Event { - return predicate.Event(sql.FieldNotNull(FieldUpdatedAt)) -} - // TimeEQ applies the EQ predicate on the "time" field. func TimeEQ(v time.Time) predicate.Event { return predicate.Event(sql.FieldEQ(FieldTime, v)) diff --git a/pkg/database/ent/event_create.go b/pkg/database/ent/event_create.go index 98194f2fd33..36747babe47 100644 --- a/pkg/database/ent/event_create.go +++ b/pkg/database/ent/event_create.go @@ -141,6 +141,12 @@ func (ec *EventCreate) defaults() { // check runs all checks and user-defined validators on the builder. func (ec *EventCreate) check() error { + if _, ok := ec.mutation.CreatedAt(); !ok { + return &ValidationError{Name: "created_at", err: errors.New(`ent: missing required field "Event.created_at"`)} + } + if _, ok := ec.mutation.UpdatedAt(); !ok { + return &ValidationError{Name: "updated_at", err: errors.New(`ent: missing required field "Event.updated_at"`)} + } if _, ok := ec.mutation.Time(); !ok { return &ValidationError{Name: "time", err: errors.New(`ent: missing required field "Event.time"`)} } @@ -180,11 +186,11 @@ func (ec *EventCreate) createSpec() (*Event, *sqlgraph.CreateSpec) { ) if value, ok := ec.mutation.CreatedAt(); ok { _spec.SetField(event.FieldCreatedAt, field.TypeTime, value) - _node.CreatedAt = &value + _node.CreatedAt = value } if value, ok := ec.mutation.UpdatedAt(); ok { _spec.SetField(event.FieldUpdatedAt, field.TypeTime, value) - _node.UpdatedAt = &value + _node.UpdatedAt = value } if value, ok := ec.mutation.Time(); ok { _spec.SetField(event.FieldTime, field.TypeTime, value) diff --git a/pkg/database/ent/event_update.go b/pkg/database/ent/event_update.go index a06178f79af..0bc8a7f9243 100644 --- a/pkg/database/ent/event_update.go +++ b/pkg/database/ent/event_update.go @@ -29,30 +29,12 @@ func (eu *EventUpdate) Where(ps ...predicate.Event) *EventUpdate { return eu } -// SetCreatedAt sets the "created_at" field. -func (eu *EventUpdate) SetCreatedAt(t time.Time) *EventUpdate { - eu.mutation.SetCreatedAt(t) - return eu -} - -// ClearCreatedAt clears the value of the "created_at" field. -func (eu *EventUpdate) ClearCreatedAt() *EventUpdate { - eu.mutation.ClearCreatedAt() - return eu -} - // SetUpdatedAt sets the "updated_at" field. func (eu *EventUpdate) SetUpdatedAt(t time.Time) *EventUpdate { eu.mutation.SetUpdatedAt(t) return eu } -// ClearUpdatedAt clears the value of the "updated_at" field. -func (eu *EventUpdate) ClearUpdatedAt() *EventUpdate { - eu.mutation.ClearUpdatedAt() - return eu -} - // SetTime sets the "time" field. func (eu *EventUpdate) SetTime(t time.Time) *EventUpdate { eu.mutation.SetTime(t) @@ -161,11 +143,7 @@ func (eu *EventUpdate) ExecX(ctx context.Context) { // defaults sets the default values of the builder before save. func (eu *EventUpdate) defaults() { - if _, ok := eu.mutation.CreatedAt(); !ok && !eu.mutation.CreatedAtCleared() { - v := event.UpdateDefaultCreatedAt() - eu.mutation.SetCreatedAt(v) - } - if _, ok := eu.mutation.UpdatedAt(); !ok && !eu.mutation.UpdatedAtCleared() { + if _, ok := eu.mutation.UpdatedAt(); !ok { v := event.UpdateDefaultUpdatedAt() eu.mutation.SetUpdatedAt(v) } @@ -193,18 +171,9 @@ func (eu *EventUpdate) sqlSave(ctx context.Context) (n int, err error) { } } } - if value, ok := eu.mutation.CreatedAt(); ok { - _spec.SetField(event.FieldCreatedAt, field.TypeTime, value) - } - if eu.mutation.CreatedAtCleared() { - _spec.ClearField(event.FieldCreatedAt, field.TypeTime) - } if value, ok := eu.mutation.UpdatedAt(); ok { _spec.SetField(event.FieldUpdatedAt, field.TypeTime, value) } - if eu.mutation.UpdatedAtCleared() { - _spec.ClearField(event.FieldUpdatedAt, field.TypeTime) - } if value, ok := eu.mutation.Time(); ok { _spec.SetField(event.FieldTime, field.TypeTime, value) } @@ -260,30 +229,12 @@ type EventUpdateOne struct { mutation *EventMutation } -// SetCreatedAt sets the "created_at" field. -func (euo *EventUpdateOne) SetCreatedAt(t time.Time) *EventUpdateOne { - euo.mutation.SetCreatedAt(t) - return euo -} - -// ClearCreatedAt clears the value of the "created_at" field. -func (euo *EventUpdateOne) ClearCreatedAt() *EventUpdateOne { - euo.mutation.ClearCreatedAt() - return euo -} - // SetUpdatedAt sets the "updated_at" field. func (euo *EventUpdateOne) SetUpdatedAt(t time.Time) *EventUpdateOne { euo.mutation.SetUpdatedAt(t) return euo } -// ClearUpdatedAt clears the value of the "updated_at" field. -func (euo *EventUpdateOne) ClearUpdatedAt() *EventUpdateOne { - euo.mutation.ClearUpdatedAt() - return euo -} - // SetTime sets the "time" field. func (euo *EventUpdateOne) SetTime(t time.Time) *EventUpdateOne { euo.mutation.SetTime(t) @@ -405,11 +356,7 @@ func (euo *EventUpdateOne) ExecX(ctx context.Context) { // defaults sets the default values of the builder before save. func (euo *EventUpdateOne) defaults() { - if _, ok := euo.mutation.CreatedAt(); !ok && !euo.mutation.CreatedAtCleared() { - v := event.UpdateDefaultCreatedAt() - euo.mutation.SetCreatedAt(v) - } - if _, ok := euo.mutation.UpdatedAt(); !ok && !euo.mutation.UpdatedAtCleared() { + if _, ok := euo.mutation.UpdatedAt(); !ok { v := event.UpdateDefaultUpdatedAt() euo.mutation.SetUpdatedAt(v) } @@ -454,18 +401,9 @@ func (euo *EventUpdateOne) sqlSave(ctx context.Context) (_node *Event, err error } } } - if value, ok := euo.mutation.CreatedAt(); ok { - _spec.SetField(event.FieldCreatedAt, field.TypeTime, value) - } - if euo.mutation.CreatedAtCleared() { - _spec.ClearField(event.FieldCreatedAt, field.TypeTime) - } if value, ok := euo.mutation.UpdatedAt(); ok { _spec.SetField(event.FieldUpdatedAt, field.TypeTime, value) } - if euo.mutation.UpdatedAtCleared() { - _spec.ClearField(event.FieldUpdatedAt, field.TypeTime) - } if value, ok := euo.mutation.Time(); ok { _spec.SetField(event.FieldTime, field.TypeTime, value) } diff --git a/pkg/database/ent/lock_update.go b/pkg/database/ent/lock_update.go index dc61dfdfde1..988363abd17 100644 --- a/pkg/database/ent/lock_update.go +++ b/pkg/database/ent/lock_update.go @@ -28,20 +28,6 @@ func (lu *LockUpdate) Where(ps ...predicate.Lock) *LockUpdate { return lu } -// SetName sets the "name" field. -func (lu *LockUpdate) SetName(s string) *LockUpdate { - lu.mutation.SetName(s) - return lu -} - -// SetNillableName sets the "name" field if the given value is not nil. -func (lu *LockUpdate) SetNillableName(s *string) *LockUpdate { - if s != nil { - lu.SetName(*s) - } - return lu -} - // SetCreatedAt sets the "created_at" field. func (lu *LockUpdate) SetCreatedAt(t time.Time) *LockUpdate { lu.mutation.SetCreatedAt(t) @@ -97,9 +83,6 @@ func (lu *LockUpdate) sqlSave(ctx context.Context) (n int, err error) { } } } - if value, ok := lu.mutation.Name(); ok { - _spec.SetField(lock.FieldName, field.TypeString, value) - } if value, ok := lu.mutation.CreatedAt(); ok { _spec.SetField(lock.FieldCreatedAt, field.TypeTime, value) } @@ -123,20 +106,6 @@ type LockUpdateOne struct { mutation *LockMutation } -// SetName sets the "name" field. -func (luo *LockUpdateOne) SetName(s string) *LockUpdateOne { - luo.mutation.SetName(s) - return luo -} - -// SetNillableName sets the "name" field if the given value is not nil. -func (luo *LockUpdateOne) SetNillableName(s *string) *LockUpdateOne { - if s != nil { - luo.SetName(*s) - } - return luo -} - // SetCreatedAt sets the "created_at" field. func (luo *LockUpdateOne) SetCreatedAt(t time.Time) *LockUpdateOne { luo.mutation.SetCreatedAt(t) @@ -222,9 +191,6 @@ func (luo *LockUpdateOne) sqlSave(ctx context.Context) (_node *Lock, err error) } } } - if value, ok := luo.mutation.Name(); ok { - _spec.SetField(lock.FieldName, field.TypeString, value) - } if value, ok := luo.mutation.CreatedAt(); ok { _spec.SetField(lock.FieldCreatedAt, field.TypeTime, value) } diff --git a/pkg/database/ent/machine.go b/pkg/database/ent/machine.go index 346a8d084ba..475eab72ecd 100644 --- a/pkg/database/ent/machine.go +++ b/pkg/database/ent/machine.go @@ -18,9 +18,9 @@ type Machine struct { // ID of the ent. ID int `json:"id,omitempty"` // CreatedAt holds the value of the "created_at" field. - CreatedAt *time.Time `json:"created_at,omitempty"` + CreatedAt time.Time `json:"created_at,omitempty"` // UpdatedAt holds the value of the "updated_at" field. - UpdatedAt *time.Time `json:"updated_at,omitempty"` + UpdatedAt time.Time `json:"updated_at,omitempty"` // LastPush holds the value of the "last_push" field. LastPush *time.Time `json:"last_push,omitempty"` // LastHeartbeat holds the value of the "last_heartbeat" field. @@ -103,15 +103,13 @@ func (m *Machine) assignValues(columns []string, values []any) error { if value, ok := values[i].(*sql.NullTime); !ok { return fmt.Errorf("unexpected type %T for field created_at", values[i]) } else if value.Valid { - m.CreatedAt = new(time.Time) - *m.CreatedAt = value.Time + m.CreatedAt = value.Time } case machine.FieldUpdatedAt: if value, ok := values[i].(*sql.NullTime); !ok { return fmt.Errorf("unexpected type %T for field updated_at", values[i]) } else if value.Valid { - m.UpdatedAt = new(time.Time) - *m.UpdatedAt = value.Time + m.UpdatedAt = value.Time } case machine.FieldLastPush: if value, ok := values[i].(*sql.NullTime); !ok { @@ -216,15 +214,11 @@ func (m *Machine) String() string { var builder strings.Builder builder.WriteString("Machine(") builder.WriteString(fmt.Sprintf("id=%v, ", m.ID)) - if v := m.CreatedAt; v != nil { - builder.WriteString("created_at=") - builder.WriteString(v.Format(time.ANSIC)) - } + builder.WriteString("created_at=") + builder.WriteString(m.CreatedAt.Format(time.ANSIC)) builder.WriteString(", ") - if v := m.UpdatedAt; v != nil { - builder.WriteString("updated_at=") - builder.WriteString(v.Format(time.ANSIC)) - } + builder.WriteString("updated_at=") + builder.WriteString(m.UpdatedAt.Format(time.ANSIC)) builder.WriteString(", ") if v := m.LastPush; v != nil { builder.WriteString("last_push=") diff --git a/pkg/database/ent/machine/machine.go b/pkg/database/ent/machine/machine.go index 5456935e04c..46ea6deb03d 100644 --- a/pkg/database/ent/machine/machine.go +++ b/pkg/database/ent/machine/machine.go @@ -81,20 +81,14 @@ func ValidColumn(column string) bool { var ( // DefaultCreatedAt holds the default value on creation for the "created_at" field. DefaultCreatedAt func() time.Time - // UpdateDefaultCreatedAt holds the default value on update for the "created_at" field. - UpdateDefaultCreatedAt func() time.Time // DefaultUpdatedAt holds the default value on creation for the "updated_at" field. DefaultUpdatedAt func() time.Time // UpdateDefaultUpdatedAt holds the default value on update for the "updated_at" field. UpdateDefaultUpdatedAt func() time.Time // DefaultLastPush holds the default value on creation for the "last_push" field. DefaultLastPush func() time.Time - // UpdateDefaultLastPush holds the default value on update for the "last_push" field. - UpdateDefaultLastPush func() time.Time // DefaultLastHeartbeat holds the default value on creation for the "last_heartbeat" field. DefaultLastHeartbeat func() time.Time - // UpdateDefaultLastHeartbeat holds the default value on update for the "last_heartbeat" field. - UpdateDefaultLastHeartbeat func() time.Time // ScenariosValidator is a validator for the "scenarios" field. It is called by the builders before save. ScenariosValidator func(string) error // DefaultIsValidated holds the default value on creation for the "isValidated" field. diff --git a/pkg/database/ent/machine/where.go b/pkg/database/ent/machine/where.go index e9d00e7e01e..24c9ab154c8 100644 --- a/pkg/database/ent/machine/where.go +++ b/pkg/database/ent/machine/where.go @@ -155,16 +155,6 @@ func CreatedAtLTE(v time.Time) predicate.Machine { return predicate.Machine(sql.FieldLTE(FieldCreatedAt, v)) } -// CreatedAtIsNil applies the IsNil predicate on the "created_at" field. -func CreatedAtIsNil() predicate.Machine { - return predicate.Machine(sql.FieldIsNull(FieldCreatedAt)) -} - -// CreatedAtNotNil applies the NotNil predicate on the "created_at" field. -func CreatedAtNotNil() predicate.Machine { - return predicate.Machine(sql.FieldNotNull(FieldCreatedAt)) -} - // UpdatedAtEQ applies the EQ predicate on the "updated_at" field. func UpdatedAtEQ(v time.Time) predicate.Machine { return predicate.Machine(sql.FieldEQ(FieldUpdatedAt, v)) @@ -205,16 +195,6 @@ func UpdatedAtLTE(v time.Time) predicate.Machine { return predicate.Machine(sql.FieldLTE(FieldUpdatedAt, v)) } -// UpdatedAtIsNil applies the IsNil predicate on the "updated_at" field. -func UpdatedAtIsNil() predicate.Machine { - return predicate.Machine(sql.FieldIsNull(FieldUpdatedAt)) -} - -// UpdatedAtNotNil applies the NotNil predicate on the "updated_at" field. -func UpdatedAtNotNil() predicate.Machine { - return predicate.Machine(sql.FieldNotNull(FieldUpdatedAt)) -} - // LastPushEQ applies the EQ predicate on the "last_push" field. func LastPushEQ(v time.Time) predicate.Machine { return predicate.Machine(sql.FieldEQ(FieldLastPush, v)) diff --git a/pkg/database/ent/machine_create.go b/pkg/database/ent/machine_create.go index ff704e6ab74..8d4bfb74b2a 100644 --- a/pkg/database/ent/machine_create.go +++ b/pkg/database/ent/machine_create.go @@ -243,6 +243,12 @@ func (mc *MachineCreate) defaults() { // check runs all checks and user-defined validators on the builder. func (mc *MachineCreate) check() error { + if _, ok := mc.mutation.CreatedAt(); !ok { + return &ValidationError{Name: "created_at", err: errors.New(`ent: missing required field "Machine.created_at"`)} + } + if _, ok := mc.mutation.UpdatedAt(); !ok { + return &ValidationError{Name: "updated_at", err: errors.New(`ent: missing required field "Machine.updated_at"`)} + } if _, ok := mc.mutation.MachineId(); !ok { return &ValidationError{Name: "machineId", err: errors.New(`ent: missing required field "Machine.machineId"`)} } @@ -291,11 +297,11 @@ func (mc *MachineCreate) createSpec() (*Machine, *sqlgraph.CreateSpec) { ) if value, ok := mc.mutation.CreatedAt(); ok { _spec.SetField(machine.FieldCreatedAt, field.TypeTime, value) - _node.CreatedAt = &value + _node.CreatedAt = value } if value, ok := mc.mutation.UpdatedAt(); ok { _spec.SetField(machine.FieldUpdatedAt, field.TypeTime, value) - _node.UpdatedAt = &value + _node.UpdatedAt = value } if value, ok := mc.mutation.LastPush(); ok { _spec.SetField(machine.FieldLastPush, field.TypeTime, value) diff --git a/pkg/database/ent/machine_update.go b/pkg/database/ent/machine_update.go index 1f87ac04d6f..5fbd15220f9 100644 --- a/pkg/database/ent/machine_update.go +++ b/pkg/database/ent/machine_update.go @@ -29,36 +29,26 @@ func (mu *MachineUpdate) Where(ps ...predicate.Machine) *MachineUpdate { return mu } -// SetCreatedAt sets the "created_at" field. -func (mu *MachineUpdate) SetCreatedAt(t time.Time) *MachineUpdate { - mu.mutation.SetCreatedAt(t) - return mu -} - -// ClearCreatedAt clears the value of the "created_at" field. -func (mu *MachineUpdate) ClearCreatedAt() *MachineUpdate { - mu.mutation.ClearCreatedAt() - return mu -} - // SetUpdatedAt sets the "updated_at" field. func (mu *MachineUpdate) SetUpdatedAt(t time.Time) *MachineUpdate { mu.mutation.SetUpdatedAt(t) return mu } -// ClearUpdatedAt clears the value of the "updated_at" field. -func (mu *MachineUpdate) ClearUpdatedAt() *MachineUpdate { - mu.mutation.ClearUpdatedAt() - return mu -} - // SetLastPush sets the "last_push" field. func (mu *MachineUpdate) SetLastPush(t time.Time) *MachineUpdate { mu.mutation.SetLastPush(t) return mu } +// SetNillableLastPush sets the "last_push" field if the given value is not nil. +func (mu *MachineUpdate) SetNillableLastPush(t *time.Time) *MachineUpdate { + if t != nil { + mu.SetLastPush(*t) + } + return mu +} + // ClearLastPush clears the value of the "last_push" field. func (mu *MachineUpdate) ClearLastPush() *MachineUpdate { mu.mutation.ClearLastPush() @@ -71,23 +61,17 @@ func (mu *MachineUpdate) SetLastHeartbeat(t time.Time) *MachineUpdate { return mu } -// ClearLastHeartbeat clears the value of the "last_heartbeat" field. -func (mu *MachineUpdate) ClearLastHeartbeat() *MachineUpdate { - mu.mutation.ClearLastHeartbeat() - return mu -} - -// SetMachineId sets the "machineId" field. -func (mu *MachineUpdate) SetMachineId(s string) *MachineUpdate { - mu.mutation.SetMachineId(s) +// SetNillableLastHeartbeat sets the "last_heartbeat" field if the given value is not nil. +func (mu *MachineUpdate) SetNillableLastHeartbeat(t *time.Time) *MachineUpdate { + if t != nil { + mu.SetLastHeartbeat(*t) + } return mu } -// SetNillableMachineId sets the "machineId" field if the given value is not nil. -func (mu *MachineUpdate) SetNillableMachineId(s *string) *MachineUpdate { - if s != nil { - mu.SetMachineId(*s) - } +// ClearLastHeartbeat clears the value of the "last_heartbeat" field. +func (mu *MachineUpdate) ClearLastHeartbeat() *MachineUpdate { + mu.mutation.ClearLastHeartbeat() return mu } @@ -278,22 +262,10 @@ func (mu *MachineUpdate) ExecX(ctx context.Context) { // defaults sets the default values of the builder before save. func (mu *MachineUpdate) defaults() { - if _, ok := mu.mutation.CreatedAt(); !ok && !mu.mutation.CreatedAtCleared() { - v := machine.UpdateDefaultCreatedAt() - mu.mutation.SetCreatedAt(v) - } - if _, ok := mu.mutation.UpdatedAt(); !ok && !mu.mutation.UpdatedAtCleared() { + if _, ok := mu.mutation.UpdatedAt(); !ok { v := machine.UpdateDefaultUpdatedAt() mu.mutation.SetUpdatedAt(v) } - if _, ok := mu.mutation.LastPush(); !ok && !mu.mutation.LastPushCleared() { - v := machine.UpdateDefaultLastPush() - mu.mutation.SetLastPush(v) - } - if _, ok := mu.mutation.LastHeartbeat(); !ok && !mu.mutation.LastHeartbeatCleared() { - v := machine.UpdateDefaultLastHeartbeat() - mu.mutation.SetLastHeartbeat(v) - } } // check runs all checks and user-defined validators on the builder. @@ -318,18 +290,9 @@ func (mu *MachineUpdate) sqlSave(ctx context.Context) (n int, err error) { } } } - if value, ok := mu.mutation.CreatedAt(); ok { - _spec.SetField(machine.FieldCreatedAt, field.TypeTime, value) - } - if mu.mutation.CreatedAtCleared() { - _spec.ClearField(machine.FieldCreatedAt, field.TypeTime) - } if value, ok := mu.mutation.UpdatedAt(); ok { _spec.SetField(machine.FieldUpdatedAt, field.TypeTime, value) } - if mu.mutation.UpdatedAtCleared() { - _spec.ClearField(machine.FieldUpdatedAt, field.TypeTime) - } if value, ok := mu.mutation.LastPush(); ok { _spec.SetField(machine.FieldLastPush, field.TypeTime, value) } @@ -342,9 +305,6 @@ func (mu *MachineUpdate) sqlSave(ctx context.Context) (n int, err error) { if mu.mutation.LastHeartbeatCleared() { _spec.ClearField(machine.FieldLastHeartbeat, field.TypeTime) } - if value, ok := mu.mutation.MachineId(); ok { - _spec.SetField(machine.FieldMachineId, field.TypeString, value) - } if value, ok := mu.mutation.Password(); ok { _spec.SetField(machine.FieldPassword, field.TypeString, value) } @@ -440,36 +400,26 @@ type MachineUpdateOne struct { mutation *MachineMutation } -// SetCreatedAt sets the "created_at" field. -func (muo *MachineUpdateOne) SetCreatedAt(t time.Time) *MachineUpdateOne { - muo.mutation.SetCreatedAt(t) - return muo -} - -// ClearCreatedAt clears the value of the "created_at" field. -func (muo *MachineUpdateOne) ClearCreatedAt() *MachineUpdateOne { - muo.mutation.ClearCreatedAt() - return muo -} - // SetUpdatedAt sets the "updated_at" field. func (muo *MachineUpdateOne) SetUpdatedAt(t time.Time) *MachineUpdateOne { muo.mutation.SetUpdatedAt(t) return muo } -// ClearUpdatedAt clears the value of the "updated_at" field. -func (muo *MachineUpdateOne) ClearUpdatedAt() *MachineUpdateOne { - muo.mutation.ClearUpdatedAt() - return muo -} - // SetLastPush sets the "last_push" field. func (muo *MachineUpdateOne) SetLastPush(t time.Time) *MachineUpdateOne { muo.mutation.SetLastPush(t) return muo } +// SetNillableLastPush sets the "last_push" field if the given value is not nil. +func (muo *MachineUpdateOne) SetNillableLastPush(t *time.Time) *MachineUpdateOne { + if t != nil { + muo.SetLastPush(*t) + } + return muo +} + // ClearLastPush clears the value of the "last_push" field. func (muo *MachineUpdateOne) ClearLastPush() *MachineUpdateOne { muo.mutation.ClearLastPush() @@ -482,23 +432,17 @@ func (muo *MachineUpdateOne) SetLastHeartbeat(t time.Time) *MachineUpdateOne { return muo } -// ClearLastHeartbeat clears the value of the "last_heartbeat" field. -func (muo *MachineUpdateOne) ClearLastHeartbeat() *MachineUpdateOne { - muo.mutation.ClearLastHeartbeat() - return muo -} - -// SetMachineId sets the "machineId" field. -func (muo *MachineUpdateOne) SetMachineId(s string) *MachineUpdateOne { - muo.mutation.SetMachineId(s) +// SetNillableLastHeartbeat sets the "last_heartbeat" field if the given value is not nil. +func (muo *MachineUpdateOne) SetNillableLastHeartbeat(t *time.Time) *MachineUpdateOne { + if t != nil { + muo.SetLastHeartbeat(*t) + } return muo } -// SetNillableMachineId sets the "machineId" field if the given value is not nil. -func (muo *MachineUpdateOne) SetNillableMachineId(s *string) *MachineUpdateOne { - if s != nil { - muo.SetMachineId(*s) - } +// ClearLastHeartbeat clears the value of the "last_heartbeat" field. +func (muo *MachineUpdateOne) ClearLastHeartbeat() *MachineUpdateOne { + muo.mutation.ClearLastHeartbeat() return muo } @@ -702,22 +646,10 @@ func (muo *MachineUpdateOne) ExecX(ctx context.Context) { // defaults sets the default values of the builder before save. func (muo *MachineUpdateOne) defaults() { - if _, ok := muo.mutation.CreatedAt(); !ok && !muo.mutation.CreatedAtCleared() { - v := machine.UpdateDefaultCreatedAt() - muo.mutation.SetCreatedAt(v) - } - if _, ok := muo.mutation.UpdatedAt(); !ok && !muo.mutation.UpdatedAtCleared() { + if _, ok := muo.mutation.UpdatedAt(); !ok { v := machine.UpdateDefaultUpdatedAt() muo.mutation.SetUpdatedAt(v) } - if _, ok := muo.mutation.LastPush(); !ok && !muo.mutation.LastPushCleared() { - v := machine.UpdateDefaultLastPush() - muo.mutation.SetLastPush(v) - } - if _, ok := muo.mutation.LastHeartbeat(); !ok && !muo.mutation.LastHeartbeatCleared() { - v := machine.UpdateDefaultLastHeartbeat() - muo.mutation.SetLastHeartbeat(v) - } } // check runs all checks and user-defined validators on the builder. @@ -759,18 +691,9 @@ func (muo *MachineUpdateOne) sqlSave(ctx context.Context) (_node *Machine, err e } } } - if value, ok := muo.mutation.CreatedAt(); ok { - _spec.SetField(machine.FieldCreatedAt, field.TypeTime, value) - } - if muo.mutation.CreatedAtCleared() { - _spec.ClearField(machine.FieldCreatedAt, field.TypeTime) - } if value, ok := muo.mutation.UpdatedAt(); ok { _spec.SetField(machine.FieldUpdatedAt, field.TypeTime, value) } - if muo.mutation.UpdatedAtCleared() { - _spec.ClearField(machine.FieldUpdatedAt, field.TypeTime) - } if value, ok := muo.mutation.LastPush(); ok { _spec.SetField(machine.FieldLastPush, field.TypeTime, value) } @@ -783,9 +706,6 @@ func (muo *MachineUpdateOne) sqlSave(ctx context.Context) (_node *Machine, err e if muo.mutation.LastHeartbeatCleared() { _spec.ClearField(machine.FieldLastHeartbeat, field.TypeTime) } - if value, ok := muo.mutation.MachineId(); ok { - _spec.SetField(machine.FieldMachineId, field.TypeString, value) - } if value, ok := muo.mutation.Password(); ok { _spec.SetField(machine.FieldPassword, field.TypeString, value) } diff --git a/pkg/database/ent/meta.go b/pkg/database/ent/meta.go index cadc210937e..768358ca2bf 100644 --- a/pkg/database/ent/meta.go +++ b/pkg/database/ent/meta.go @@ -19,9 +19,9 @@ type Meta struct { // ID of the ent. ID int `json:"id,omitempty"` // CreatedAt holds the value of the "created_at" field. - CreatedAt *time.Time `json:"created_at,omitempty"` + CreatedAt time.Time `json:"created_at,omitempty"` // UpdatedAt holds the value of the "updated_at" field. - UpdatedAt *time.Time `json:"updated_at,omitempty"` + UpdatedAt time.Time `json:"updated_at,omitempty"` // Key holds the value of the "key" field. Key string `json:"key,omitempty"` // Value holds the value of the "value" field. @@ -92,15 +92,13 @@ func (m *Meta) assignValues(columns []string, values []any) error { if value, ok := values[i].(*sql.NullTime); !ok { return fmt.Errorf("unexpected type %T for field created_at", values[i]) } else if value.Valid { - m.CreatedAt = new(time.Time) - *m.CreatedAt = value.Time + m.CreatedAt = value.Time } case meta.FieldUpdatedAt: if value, ok := values[i].(*sql.NullTime); !ok { return fmt.Errorf("unexpected type %T for field updated_at", values[i]) } else if value.Valid { - m.UpdatedAt = new(time.Time) - *m.UpdatedAt = value.Time + m.UpdatedAt = value.Time } case meta.FieldKey: if value, ok := values[i].(*sql.NullString); !ok { @@ -161,15 +159,11 @@ func (m *Meta) String() string { var builder strings.Builder builder.WriteString("Meta(") builder.WriteString(fmt.Sprintf("id=%v, ", m.ID)) - if v := m.CreatedAt; v != nil { - builder.WriteString("created_at=") - builder.WriteString(v.Format(time.ANSIC)) - } + builder.WriteString("created_at=") + builder.WriteString(m.CreatedAt.Format(time.ANSIC)) builder.WriteString(", ") - if v := m.UpdatedAt; v != nil { - builder.WriteString("updated_at=") - builder.WriteString(v.Format(time.ANSIC)) - } + builder.WriteString("updated_at=") + builder.WriteString(m.UpdatedAt.Format(time.ANSIC)) builder.WriteString(", ") builder.WriteString("key=") builder.WriteString(m.Key) diff --git a/pkg/database/ent/meta/meta.go b/pkg/database/ent/meta/meta.go index 583496fb710..ff41361616a 100644 --- a/pkg/database/ent/meta/meta.go +++ b/pkg/database/ent/meta/meta.go @@ -60,8 +60,6 @@ func ValidColumn(column string) bool { var ( // DefaultCreatedAt holds the default value on creation for the "created_at" field. DefaultCreatedAt func() time.Time - // UpdateDefaultCreatedAt holds the default value on update for the "created_at" field. - UpdateDefaultCreatedAt func() time.Time // DefaultUpdatedAt holds the default value on creation for the "updated_at" field. DefaultUpdatedAt func() time.Time // UpdateDefaultUpdatedAt holds the default value on update for the "updated_at" field. diff --git a/pkg/database/ent/meta/where.go b/pkg/database/ent/meta/where.go index 7fc99136972..6d5d54c0482 100644 --- a/pkg/database/ent/meta/where.go +++ b/pkg/database/ent/meta/where.go @@ -120,16 +120,6 @@ func CreatedAtLTE(v time.Time) predicate.Meta { return predicate.Meta(sql.FieldLTE(FieldCreatedAt, v)) } -// CreatedAtIsNil applies the IsNil predicate on the "created_at" field. -func CreatedAtIsNil() predicate.Meta { - return predicate.Meta(sql.FieldIsNull(FieldCreatedAt)) -} - -// CreatedAtNotNil applies the NotNil predicate on the "created_at" field. -func CreatedAtNotNil() predicate.Meta { - return predicate.Meta(sql.FieldNotNull(FieldCreatedAt)) -} - // UpdatedAtEQ applies the EQ predicate on the "updated_at" field. func UpdatedAtEQ(v time.Time) predicate.Meta { return predicate.Meta(sql.FieldEQ(FieldUpdatedAt, v)) @@ -170,16 +160,6 @@ func UpdatedAtLTE(v time.Time) predicate.Meta { return predicate.Meta(sql.FieldLTE(FieldUpdatedAt, v)) } -// UpdatedAtIsNil applies the IsNil predicate on the "updated_at" field. -func UpdatedAtIsNil() predicate.Meta { - return predicate.Meta(sql.FieldIsNull(FieldUpdatedAt)) -} - -// UpdatedAtNotNil applies the NotNil predicate on the "updated_at" field. -func UpdatedAtNotNil() predicate.Meta { - return predicate.Meta(sql.FieldNotNull(FieldUpdatedAt)) -} - // KeyEQ applies the EQ predicate on the "key" field. func KeyEQ(v string) predicate.Meta { return predicate.Meta(sql.FieldEQ(FieldKey, v)) diff --git a/pkg/database/ent/meta_create.go b/pkg/database/ent/meta_create.go index 3bf30f0def9..321c4bd7ab4 100644 --- a/pkg/database/ent/meta_create.go +++ b/pkg/database/ent/meta_create.go @@ -141,6 +141,12 @@ func (mc *MetaCreate) defaults() { // check runs all checks and user-defined validators on the builder. func (mc *MetaCreate) check() error { + if _, ok := mc.mutation.CreatedAt(); !ok { + return &ValidationError{Name: "created_at", err: errors.New(`ent: missing required field "Meta.created_at"`)} + } + if _, ok := mc.mutation.UpdatedAt(); !ok { + return &ValidationError{Name: "updated_at", err: errors.New(`ent: missing required field "Meta.updated_at"`)} + } if _, ok := mc.mutation.Key(); !ok { return &ValidationError{Name: "key", err: errors.New(`ent: missing required field "Meta.key"`)} } @@ -180,11 +186,11 @@ func (mc *MetaCreate) createSpec() (*Meta, *sqlgraph.CreateSpec) { ) if value, ok := mc.mutation.CreatedAt(); ok { _spec.SetField(meta.FieldCreatedAt, field.TypeTime, value) - _node.CreatedAt = &value + _node.CreatedAt = value } if value, ok := mc.mutation.UpdatedAt(); ok { _spec.SetField(meta.FieldUpdatedAt, field.TypeTime, value) - _node.UpdatedAt = &value + _node.UpdatedAt = value } if value, ok := mc.mutation.Key(); ok { _spec.SetField(meta.FieldKey, field.TypeString, value) diff --git a/pkg/database/ent/meta_update.go b/pkg/database/ent/meta_update.go index a1379faa130..76567c5eff7 100644 --- a/pkg/database/ent/meta_update.go +++ b/pkg/database/ent/meta_update.go @@ -35,9 +35,11 @@ func (mu *MetaUpdate) SetCreatedAt(t time.Time) *MetaUpdate { return mu } -// ClearCreatedAt clears the value of the "created_at" field. -func (mu *MetaUpdate) ClearCreatedAt() *MetaUpdate { - mu.mutation.ClearCreatedAt() +// SetNillableCreatedAt sets the "created_at" field if the given value is not nil. +func (mu *MetaUpdate) SetNillableCreatedAt(t *time.Time) *MetaUpdate { + if t != nil { + mu.SetCreatedAt(*t) + } return mu } @@ -47,12 +49,6 @@ func (mu *MetaUpdate) SetUpdatedAt(t time.Time) *MetaUpdate { return mu } -// ClearUpdatedAt clears the value of the "updated_at" field. -func (mu *MetaUpdate) ClearUpdatedAt() *MetaUpdate { - mu.mutation.ClearUpdatedAt() - return mu -} - // SetKey sets the "key" field. func (mu *MetaUpdate) SetKey(s string) *MetaUpdate { mu.mutation.SetKey(s) @@ -161,11 +157,7 @@ func (mu *MetaUpdate) ExecX(ctx context.Context) { // defaults sets the default values of the builder before save. func (mu *MetaUpdate) defaults() { - if _, ok := mu.mutation.CreatedAt(); !ok && !mu.mutation.CreatedAtCleared() { - v := meta.UpdateDefaultCreatedAt() - mu.mutation.SetCreatedAt(v) - } - if _, ok := mu.mutation.UpdatedAt(); !ok && !mu.mutation.UpdatedAtCleared() { + if _, ok := mu.mutation.UpdatedAt(); !ok { v := meta.UpdateDefaultUpdatedAt() mu.mutation.SetUpdatedAt(v) } @@ -196,15 +188,9 @@ func (mu *MetaUpdate) sqlSave(ctx context.Context) (n int, err error) { if value, ok := mu.mutation.CreatedAt(); ok { _spec.SetField(meta.FieldCreatedAt, field.TypeTime, value) } - if mu.mutation.CreatedAtCleared() { - _spec.ClearField(meta.FieldCreatedAt, field.TypeTime) - } if value, ok := mu.mutation.UpdatedAt(); ok { _spec.SetField(meta.FieldUpdatedAt, field.TypeTime, value) } - if mu.mutation.UpdatedAtCleared() { - _spec.ClearField(meta.FieldUpdatedAt, field.TypeTime) - } if value, ok := mu.mutation.Key(); ok { _spec.SetField(meta.FieldKey, field.TypeString, value) } @@ -266,9 +252,11 @@ func (muo *MetaUpdateOne) SetCreatedAt(t time.Time) *MetaUpdateOne { return muo } -// ClearCreatedAt clears the value of the "created_at" field. -func (muo *MetaUpdateOne) ClearCreatedAt() *MetaUpdateOne { - muo.mutation.ClearCreatedAt() +// SetNillableCreatedAt sets the "created_at" field if the given value is not nil. +func (muo *MetaUpdateOne) SetNillableCreatedAt(t *time.Time) *MetaUpdateOne { + if t != nil { + muo.SetCreatedAt(*t) + } return muo } @@ -278,12 +266,6 @@ func (muo *MetaUpdateOne) SetUpdatedAt(t time.Time) *MetaUpdateOne { return muo } -// ClearUpdatedAt clears the value of the "updated_at" field. -func (muo *MetaUpdateOne) ClearUpdatedAt() *MetaUpdateOne { - muo.mutation.ClearUpdatedAt() - return muo -} - // SetKey sets the "key" field. func (muo *MetaUpdateOne) SetKey(s string) *MetaUpdateOne { muo.mutation.SetKey(s) @@ -405,11 +387,7 @@ func (muo *MetaUpdateOne) ExecX(ctx context.Context) { // defaults sets the default values of the builder before save. func (muo *MetaUpdateOne) defaults() { - if _, ok := muo.mutation.CreatedAt(); !ok && !muo.mutation.CreatedAtCleared() { - v := meta.UpdateDefaultCreatedAt() - muo.mutation.SetCreatedAt(v) - } - if _, ok := muo.mutation.UpdatedAt(); !ok && !muo.mutation.UpdatedAtCleared() { + if _, ok := muo.mutation.UpdatedAt(); !ok { v := meta.UpdateDefaultUpdatedAt() muo.mutation.SetUpdatedAt(v) } @@ -457,15 +435,9 @@ func (muo *MetaUpdateOne) sqlSave(ctx context.Context) (_node *Meta, err error) if value, ok := muo.mutation.CreatedAt(); ok { _spec.SetField(meta.FieldCreatedAt, field.TypeTime, value) } - if muo.mutation.CreatedAtCleared() { - _spec.ClearField(meta.FieldCreatedAt, field.TypeTime) - } if value, ok := muo.mutation.UpdatedAt(); ok { _spec.SetField(meta.FieldUpdatedAt, field.TypeTime, value) } - if muo.mutation.UpdatedAtCleared() { - _spec.ClearField(meta.FieldUpdatedAt, field.TypeTime) - } if value, ok := muo.mutation.Key(); ok { _spec.SetField(meta.FieldKey, field.TypeString, value) } diff --git a/pkg/database/ent/migrate/schema.go b/pkg/database/ent/migrate/schema.go index c3ffed42239..e5d43d42314 100644 --- a/pkg/database/ent/migrate/schema.go +++ b/pkg/database/ent/migrate/schema.go @@ -11,8 +11,8 @@ var ( // AlertsColumns holds the columns for the "alerts" table. AlertsColumns = []*schema.Column{ {Name: "id", Type: field.TypeInt, Increment: true}, - {Name: "created_at", Type: field.TypeTime, Nullable: true}, - {Name: "updated_at", Type: field.TypeTime, Nullable: true}, + {Name: "created_at", Type: field.TypeTime}, + {Name: "updated_at", Type: field.TypeTime}, {Name: "scenario", Type: field.TypeString}, {Name: "bucket_id", Type: field.TypeString, Nullable: true, Default: ""}, {Name: "message", Type: field.TypeString, Nullable: true, Default: ""}, @@ -60,8 +60,8 @@ var ( // BouncersColumns holds the columns for the "bouncers" table. BouncersColumns = []*schema.Column{ {Name: "id", Type: field.TypeInt, Increment: true}, - {Name: "created_at", Type: field.TypeTime, Nullable: true}, - {Name: "updated_at", Type: field.TypeTime, Nullable: true}, + {Name: "created_at", Type: field.TypeTime}, + {Name: "updated_at", Type: field.TypeTime}, {Name: "name", Type: field.TypeString, Unique: true}, {Name: "api_key", Type: field.TypeString}, {Name: "revoked", Type: field.TypeBool}, @@ -81,8 +81,8 @@ var ( // ConfigItemsColumns holds the columns for the "config_items" table. ConfigItemsColumns = []*schema.Column{ {Name: "id", Type: field.TypeInt, Increment: true}, - {Name: "created_at", Type: field.TypeTime, Nullable: true}, - {Name: "updated_at", Type: field.TypeTime, Nullable: true}, + {Name: "created_at", Type: field.TypeTime}, + {Name: "updated_at", Type: field.TypeTime}, {Name: "name", Type: field.TypeString, Unique: true}, {Name: "value", Type: field.TypeString}, } @@ -95,8 +95,8 @@ var ( // DecisionsColumns holds the columns for the "decisions" table. DecisionsColumns = []*schema.Column{ {Name: "id", Type: field.TypeInt, Increment: true}, - {Name: "created_at", Type: field.TypeTime, Nullable: true}, - {Name: "updated_at", Type: field.TypeTime, Nullable: true}, + {Name: "created_at", Type: field.TypeTime}, + {Name: "updated_at", Type: field.TypeTime}, {Name: "until", Type: field.TypeTime, Nullable: true, SchemaType: map[string]string{"mysql": "datetime"}}, {Name: "scenario", Type: field.TypeString}, {Name: "type", Type: field.TypeString}, @@ -151,8 +151,8 @@ var ( // EventsColumns holds the columns for the "events" table. EventsColumns = []*schema.Column{ {Name: "id", Type: field.TypeInt, Increment: true}, - {Name: "created_at", Type: field.TypeTime, Nullable: true}, - {Name: "updated_at", Type: field.TypeTime, Nullable: true}, + {Name: "created_at", Type: field.TypeTime}, + {Name: "updated_at", Type: field.TypeTime}, {Name: "time", Type: field.TypeTime}, {Name: "serialized", Type: field.TypeString, Size: 8191}, {Name: "alert_events", Type: field.TypeInt, Nullable: true}, @@ -193,8 +193,8 @@ var ( // MachinesColumns holds the columns for the "machines" table. MachinesColumns = []*schema.Column{ {Name: "id", Type: field.TypeInt, Increment: true}, - {Name: "created_at", Type: field.TypeTime, Nullable: true}, - {Name: "updated_at", Type: field.TypeTime, Nullable: true}, + {Name: "created_at", Type: field.TypeTime}, + {Name: "updated_at", Type: field.TypeTime}, {Name: "last_push", Type: field.TypeTime, Nullable: true}, {Name: "last_heartbeat", Type: field.TypeTime, Nullable: true}, {Name: "machine_id", Type: field.TypeString, Unique: true}, @@ -215,8 +215,8 @@ var ( // MetaColumns holds the columns for the "meta" table. MetaColumns = []*schema.Column{ {Name: "id", Type: field.TypeInt, Increment: true}, - {Name: "created_at", Type: field.TypeTime, Nullable: true}, - {Name: "updated_at", Type: field.TypeTime, Nullable: true}, + {Name: "created_at", Type: field.TypeTime}, + {Name: "updated_at", Type: field.TypeTime}, {Name: "key", Type: field.TypeString}, {Name: "value", Type: field.TypeString, Size: 4095}, {Name: "alert_metas", Type: field.TypeInt, Nullable: true}, diff --git a/pkg/database/ent/mutation.go b/pkg/database/ent/mutation.go index 365824de739..aed004fb7a6 100644 --- a/pkg/database/ent/mutation.go +++ b/pkg/database/ent/mutation.go @@ -206,7 +206,7 @@ func (m *AlertMutation) CreatedAt() (r time.Time, exists bool) { // OldCreatedAt returns the old "created_at" field's value of the Alert entity. // If the Alert object wasn't provided to the builder, the object is fetched from the database. // An error is returned if the mutation operation is not UpdateOne, or the database query fails. -func (m *AlertMutation) OldCreatedAt(ctx context.Context) (v *time.Time, err error) { +func (m *AlertMutation) OldCreatedAt(ctx context.Context) (v time.Time, err error) { if !m.op.Is(OpUpdateOne) { return v, errors.New("OldCreatedAt is only allowed on UpdateOne operations") } @@ -220,22 +220,9 @@ func (m *AlertMutation) OldCreatedAt(ctx context.Context) (v *time.Time, err err return oldValue.CreatedAt, nil } -// ClearCreatedAt clears the value of the "created_at" field. -func (m *AlertMutation) ClearCreatedAt() { - m.created_at = nil - m.clearedFields[alert.FieldCreatedAt] = struct{}{} -} - -// CreatedAtCleared returns if the "created_at" field was cleared in this mutation. -func (m *AlertMutation) CreatedAtCleared() bool { - _, ok := m.clearedFields[alert.FieldCreatedAt] - return ok -} - // ResetCreatedAt resets all changes to the "created_at" field. func (m *AlertMutation) ResetCreatedAt() { m.created_at = nil - delete(m.clearedFields, alert.FieldCreatedAt) } // SetUpdatedAt sets the "updated_at" field. @@ -255,7 +242,7 @@ func (m *AlertMutation) UpdatedAt() (r time.Time, exists bool) { // OldUpdatedAt returns the old "updated_at" field's value of the Alert entity. // If the Alert object wasn't provided to the builder, the object is fetched from the database. // An error is returned if the mutation operation is not UpdateOne, or the database query fails. -func (m *AlertMutation) OldUpdatedAt(ctx context.Context) (v *time.Time, err error) { +func (m *AlertMutation) OldUpdatedAt(ctx context.Context) (v time.Time, err error) { if !m.op.Is(OpUpdateOne) { return v, errors.New("OldUpdatedAt is only allowed on UpdateOne operations") } @@ -269,22 +256,9 @@ func (m *AlertMutation) OldUpdatedAt(ctx context.Context) (v *time.Time, err err return oldValue.UpdatedAt, nil } -// ClearUpdatedAt clears the value of the "updated_at" field. -func (m *AlertMutation) ClearUpdatedAt() { - m.updated_at = nil - m.clearedFields[alert.FieldUpdatedAt] = struct{}{} -} - -// UpdatedAtCleared returns if the "updated_at" field was cleared in this mutation. -func (m *AlertMutation) UpdatedAtCleared() bool { - _, ok := m.clearedFields[alert.FieldUpdatedAt] - return ok -} - // ResetUpdatedAt resets all changes to the "updated_at" field. func (m *AlertMutation) ResetUpdatedAt() { m.updated_at = nil - delete(m.clearedFields, alert.FieldUpdatedAt) } // SetScenario sets the "scenario" field. @@ -2039,12 +2013,6 @@ func (m *AlertMutation) AddField(name string, value ent.Value) error { // mutation. func (m *AlertMutation) ClearedFields() []string { var fields []string - if m.FieldCleared(alert.FieldCreatedAt) { - fields = append(fields, alert.FieldCreatedAt) - } - if m.FieldCleared(alert.FieldUpdatedAt) { - fields = append(fields, alert.FieldUpdatedAt) - } if m.FieldCleared(alert.FieldBucketId) { fields = append(fields, alert.FieldBucketId) } @@ -2116,12 +2084,6 @@ func (m *AlertMutation) FieldCleared(name string) bool { // error if the field is not defined in the schema. func (m *AlertMutation) ClearField(name string) error { switch name { - case alert.FieldCreatedAt: - m.ClearCreatedAt() - return nil - case alert.FieldUpdatedAt: - m.ClearUpdatedAt() - return nil case alert.FieldBucketId: m.ClearBucketId() return nil @@ -2552,7 +2514,7 @@ func (m *BouncerMutation) CreatedAt() (r time.Time, exists bool) { // OldCreatedAt returns the old "created_at" field's value of the Bouncer entity. // If the Bouncer object wasn't provided to the builder, the object is fetched from the database. // An error is returned if the mutation operation is not UpdateOne, or the database query fails. -func (m *BouncerMutation) OldCreatedAt(ctx context.Context) (v *time.Time, err error) { +func (m *BouncerMutation) OldCreatedAt(ctx context.Context) (v time.Time, err error) { if !m.op.Is(OpUpdateOne) { return v, errors.New("OldCreatedAt is only allowed on UpdateOne operations") } @@ -2566,22 +2528,9 @@ func (m *BouncerMutation) OldCreatedAt(ctx context.Context) (v *time.Time, err e return oldValue.CreatedAt, nil } -// ClearCreatedAt clears the value of the "created_at" field. -func (m *BouncerMutation) ClearCreatedAt() { - m.created_at = nil - m.clearedFields[bouncer.FieldCreatedAt] = struct{}{} -} - -// CreatedAtCleared returns if the "created_at" field was cleared in this mutation. -func (m *BouncerMutation) CreatedAtCleared() bool { - _, ok := m.clearedFields[bouncer.FieldCreatedAt] - return ok -} - // ResetCreatedAt resets all changes to the "created_at" field. func (m *BouncerMutation) ResetCreatedAt() { m.created_at = nil - delete(m.clearedFields, bouncer.FieldCreatedAt) } // SetUpdatedAt sets the "updated_at" field. @@ -2601,7 +2550,7 @@ func (m *BouncerMutation) UpdatedAt() (r time.Time, exists bool) { // OldUpdatedAt returns the old "updated_at" field's value of the Bouncer entity. // If the Bouncer object wasn't provided to the builder, the object is fetched from the database. // An error is returned if the mutation operation is not UpdateOne, or the database query fails. -func (m *BouncerMutation) OldUpdatedAt(ctx context.Context) (v *time.Time, err error) { +func (m *BouncerMutation) OldUpdatedAt(ctx context.Context) (v time.Time, err error) { if !m.op.Is(OpUpdateOne) { return v, errors.New("OldUpdatedAt is only allowed on UpdateOne operations") } @@ -2615,22 +2564,9 @@ func (m *BouncerMutation) OldUpdatedAt(ctx context.Context) (v *time.Time, err e return oldValue.UpdatedAt, nil } -// ClearUpdatedAt clears the value of the "updated_at" field. -func (m *BouncerMutation) ClearUpdatedAt() { - m.updated_at = nil - m.clearedFields[bouncer.FieldUpdatedAt] = struct{}{} -} - -// UpdatedAtCleared returns if the "updated_at" field was cleared in this mutation. -func (m *BouncerMutation) UpdatedAtCleared() bool { - _, ok := m.clearedFields[bouncer.FieldUpdatedAt] - return ok -} - // ResetUpdatedAt resets all changes to the "updated_at" field. func (m *BouncerMutation) ResetUpdatedAt() { m.updated_at = nil - delete(m.clearedFields, bouncer.FieldUpdatedAt) } // SetName sets the "name" field. @@ -3254,12 +3190,6 @@ func (m *BouncerMutation) AddField(name string, value ent.Value) error { // mutation. func (m *BouncerMutation) ClearedFields() []string { var fields []string - if m.FieldCleared(bouncer.FieldCreatedAt) { - fields = append(fields, bouncer.FieldCreatedAt) - } - if m.FieldCleared(bouncer.FieldUpdatedAt) { - fields = append(fields, bouncer.FieldUpdatedAt) - } if m.FieldCleared(bouncer.FieldIPAddress) { fields = append(fields, bouncer.FieldIPAddress) } @@ -3286,12 +3216,6 @@ func (m *BouncerMutation) FieldCleared(name string) bool { // error if the field is not defined in the schema. func (m *BouncerMutation) ClearField(name string) error { switch name { - case bouncer.FieldCreatedAt: - m.ClearCreatedAt() - return nil - case bouncer.FieldUpdatedAt: - m.ClearUpdatedAt() - return nil case bouncer.FieldIPAddress: m.ClearIPAddress() return nil @@ -3528,7 +3452,7 @@ func (m *ConfigItemMutation) CreatedAt() (r time.Time, exists bool) { // OldCreatedAt returns the old "created_at" field's value of the ConfigItem entity. // If the ConfigItem object wasn't provided to the builder, the object is fetched from the database. // An error is returned if the mutation operation is not UpdateOne, or the database query fails. -func (m *ConfigItemMutation) OldCreatedAt(ctx context.Context) (v *time.Time, err error) { +func (m *ConfigItemMutation) OldCreatedAt(ctx context.Context) (v time.Time, err error) { if !m.op.Is(OpUpdateOne) { return v, errors.New("OldCreatedAt is only allowed on UpdateOne operations") } @@ -3542,22 +3466,9 @@ func (m *ConfigItemMutation) OldCreatedAt(ctx context.Context) (v *time.Time, er return oldValue.CreatedAt, nil } -// ClearCreatedAt clears the value of the "created_at" field. -func (m *ConfigItemMutation) ClearCreatedAt() { - m.created_at = nil - m.clearedFields[configitem.FieldCreatedAt] = struct{}{} -} - -// CreatedAtCleared returns if the "created_at" field was cleared in this mutation. -func (m *ConfigItemMutation) CreatedAtCleared() bool { - _, ok := m.clearedFields[configitem.FieldCreatedAt] - return ok -} - // ResetCreatedAt resets all changes to the "created_at" field. func (m *ConfigItemMutation) ResetCreatedAt() { m.created_at = nil - delete(m.clearedFields, configitem.FieldCreatedAt) } // SetUpdatedAt sets the "updated_at" field. @@ -3577,7 +3488,7 @@ func (m *ConfigItemMutation) UpdatedAt() (r time.Time, exists bool) { // OldUpdatedAt returns the old "updated_at" field's value of the ConfigItem entity. // If the ConfigItem object wasn't provided to the builder, the object is fetched from the database. // An error is returned if the mutation operation is not UpdateOne, or the database query fails. -func (m *ConfigItemMutation) OldUpdatedAt(ctx context.Context) (v *time.Time, err error) { +func (m *ConfigItemMutation) OldUpdatedAt(ctx context.Context) (v time.Time, err error) { if !m.op.Is(OpUpdateOne) { return v, errors.New("OldUpdatedAt is only allowed on UpdateOne operations") } @@ -3591,22 +3502,9 @@ func (m *ConfigItemMutation) OldUpdatedAt(ctx context.Context) (v *time.Time, er return oldValue.UpdatedAt, nil } -// ClearUpdatedAt clears the value of the "updated_at" field. -func (m *ConfigItemMutation) ClearUpdatedAt() { - m.updated_at = nil - m.clearedFields[configitem.FieldUpdatedAt] = struct{}{} -} - -// UpdatedAtCleared returns if the "updated_at" field was cleared in this mutation. -func (m *ConfigItemMutation) UpdatedAtCleared() bool { - _, ok := m.clearedFields[configitem.FieldUpdatedAt] - return ok -} - // ResetUpdatedAt resets all changes to the "updated_at" field. func (m *ConfigItemMutation) ResetUpdatedAt() { m.updated_at = nil - delete(m.clearedFields, configitem.FieldUpdatedAt) } // SetName sets the "name" field. @@ -3827,14 +3725,7 @@ func (m *ConfigItemMutation) AddField(name string, value ent.Value) error { // ClearedFields returns all nullable fields that were cleared during this // mutation. func (m *ConfigItemMutation) ClearedFields() []string { - var fields []string - if m.FieldCleared(configitem.FieldCreatedAt) { - fields = append(fields, configitem.FieldCreatedAt) - } - if m.FieldCleared(configitem.FieldUpdatedAt) { - fields = append(fields, configitem.FieldUpdatedAt) - } - return fields + return nil } // FieldCleared returns a boolean indicating if a field with the given name was @@ -3847,14 +3738,6 @@ func (m *ConfigItemMutation) FieldCleared(name string) bool { // ClearField clears the value of the field with the given name. It returns an // error if the field is not defined in the schema. func (m *ConfigItemMutation) ClearField(name string) error { - switch name { - case configitem.FieldCreatedAt: - m.ClearCreatedAt() - return nil - case configitem.FieldUpdatedAt: - m.ClearUpdatedAt() - return nil - } return fmt.Errorf("unknown ConfigItem nullable field %s", name) } @@ -4075,7 +3958,7 @@ func (m *DecisionMutation) CreatedAt() (r time.Time, exists bool) { // OldCreatedAt returns the old "created_at" field's value of the Decision entity. // If the Decision object wasn't provided to the builder, the object is fetched from the database. // An error is returned if the mutation operation is not UpdateOne, or the database query fails. -func (m *DecisionMutation) OldCreatedAt(ctx context.Context) (v *time.Time, err error) { +func (m *DecisionMutation) OldCreatedAt(ctx context.Context) (v time.Time, err error) { if !m.op.Is(OpUpdateOne) { return v, errors.New("OldCreatedAt is only allowed on UpdateOne operations") } @@ -4089,22 +3972,9 @@ func (m *DecisionMutation) OldCreatedAt(ctx context.Context) (v *time.Time, err return oldValue.CreatedAt, nil } -// ClearCreatedAt clears the value of the "created_at" field. -func (m *DecisionMutation) ClearCreatedAt() { - m.created_at = nil - m.clearedFields[decision.FieldCreatedAt] = struct{}{} -} - -// CreatedAtCleared returns if the "created_at" field was cleared in this mutation. -func (m *DecisionMutation) CreatedAtCleared() bool { - _, ok := m.clearedFields[decision.FieldCreatedAt] - return ok -} - // ResetCreatedAt resets all changes to the "created_at" field. func (m *DecisionMutation) ResetCreatedAt() { m.created_at = nil - delete(m.clearedFields, decision.FieldCreatedAt) } // SetUpdatedAt sets the "updated_at" field. @@ -4124,7 +3994,7 @@ func (m *DecisionMutation) UpdatedAt() (r time.Time, exists bool) { // OldUpdatedAt returns the old "updated_at" field's value of the Decision entity. // If the Decision object wasn't provided to the builder, the object is fetched from the database. // An error is returned if the mutation operation is not UpdateOne, or the database query fails. -func (m *DecisionMutation) OldUpdatedAt(ctx context.Context) (v *time.Time, err error) { +func (m *DecisionMutation) OldUpdatedAt(ctx context.Context) (v time.Time, err error) { if !m.op.Is(OpUpdateOne) { return v, errors.New("OldUpdatedAt is only allowed on UpdateOne operations") } @@ -4138,22 +4008,9 @@ func (m *DecisionMutation) OldUpdatedAt(ctx context.Context) (v *time.Time, err return oldValue.UpdatedAt, nil } -// ClearUpdatedAt clears the value of the "updated_at" field. -func (m *DecisionMutation) ClearUpdatedAt() { - m.updated_at = nil - m.clearedFields[decision.FieldUpdatedAt] = struct{}{} -} - -// UpdatedAtCleared returns if the "updated_at" field was cleared in this mutation. -func (m *DecisionMutation) UpdatedAtCleared() bool { - _, ok := m.clearedFields[decision.FieldUpdatedAt] - return ok -} - // ResetUpdatedAt resets all changes to the "updated_at" field. func (m *DecisionMutation) ResetUpdatedAt() { m.updated_at = nil - delete(m.clearedFields, decision.FieldUpdatedAt) } // SetUntil sets the "until" field. @@ -5287,12 +5144,6 @@ func (m *DecisionMutation) AddField(name string, value ent.Value) error { // mutation. func (m *DecisionMutation) ClearedFields() []string { var fields []string - if m.FieldCleared(decision.FieldCreatedAt) { - fields = append(fields, decision.FieldCreatedAt) - } - if m.FieldCleared(decision.FieldUpdatedAt) { - fields = append(fields, decision.FieldUpdatedAt) - } if m.FieldCleared(decision.FieldUntil) { fields = append(fields, decision.FieldUntil) } @@ -5331,12 +5182,6 @@ func (m *DecisionMutation) FieldCleared(name string) bool { // error if the field is not defined in the schema. func (m *DecisionMutation) ClearField(name string) error { switch name { - case decision.FieldCreatedAt: - m.ClearCreatedAt() - return nil - case decision.FieldUpdatedAt: - m.ClearUpdatedAt() - return nil case decision.FieldUntil: m.ClearUntil() return nil @@ -5628,7 +5473,7 @@ func (m *EventMutation) CreatedAt() (r time.Time, exists bool) { // OldCreatedAt returns the old "created_at" field's value of the Event entity. // If the Event object wasn't provided to the builder, the object is fetched from the database. // An error is returned if the mutation operation is not UpdateOne, or the database query fails. -func (m *EventMutation) OldCreatedAt(ctx context.Context) (v *time.Time, err error) { +func (m *EventMutation) OldCreatedAt(ctx context.Context) (v time.Time, err error) { if !m.op.Is(OpUpdateOne) { return v, errors.New("OldCreatedAt is only allowed on UpdateOne operations") } @@ -5642,22 +5487,9 @@ func (m *EventMutation) OldCreatedAt(ctx context.Context) (v *time.Time, err err return oldValue.CreatedAt, nil } -// ClearCreatedAt clears the value of the "created_at" field. -func (m *EventMutation) ClearCreatedAt() { - m.created_at = nil - m.clearedFields[event.FieldCreatedAt] = struct{}{} -} - -// CreatedAtCleared returns if the "created_at" field was cleared in this mutation. -func (m *EventMutation) CreatedAtCleared() bool { - _, ok := m.clearedFields[event.FieldCreatedAt] - return ok -} - // ResetCreatedAt resets all changes to the "created_at" field. func (m *EventMutation) ResetCreatedAt() { m.created_at = nil - delete(m.clearedFields, event.FieldCreatedAt) } // SetUpdatedAt sets the "updated_at" field. @@ -5677,7 +5509,7 @@ func (m *EventMutation) UpdatedAt() (r time.Time, exists bool) { // OldUpdatedAt returns the old "updated_at" field's value of the Event entity. // If the Event object wasn't provided to the builder, the object is fetched from the database. // An error is returned if the mutation operation is not UpdateOne, or the database query fails. -func (m *EventMutation) OldUpdatedAt(ctx context.Context) (v *time.Time, err error) { +func (m *EventMutation) OldUpdatedAt(ctx context.Context) (v time.Time, err error) { if !m.op.Is(OpUpdateOne) { return v, errors.New("OldUpdatedAt is only allowed on UpdateOne operations") } @@ -5691,22 +5523,9 @@ func (m *EventMutation) OldUpdatedAt(ctx context.Context) (v *time.Time, err err return oldValue.UpdatedAt, nil } -// ClearUpdatedAt clears the value of the "updated_at" field. -func (m *EventMutation) ClearUpdatedAt() { - m.updated_at = nil - m.clearedFields[event.FieldUpdatedAt] = struct{}{} -} - -// UpdatedAtCleared returns if the "updated_at" field was cleared in this mutation. -func (m *EventMutation) UpdatedAtCleared() bool { - _, ok := m.clearedFields[event.FieldUpdatedAt] - return ok -} - // ResetUpdatedAt resets all changes to the "updated_at" field. func (m *EventMutation) ResetUpdatedAt() { m.updated_at = nil - delete(m.clearedFields, event.FieldUpdatedAt) } // SetTime sets the "time" field. @@ -6034,12 +5853,6 @@ func (m *EventMutation) AddField(name string, value ent.Value) error { // mutation. func (m *EventMutation) ClearedFields() []string { var fields []string - if m.FieldCleared(event.FieldCreatedAt) { - fields = append(fields, event.FieldCreatedAt) - } - if m.FieldCleared(event.FieldUpdatedAt) { - fields = append(fields, event.FieldUpdatedAt) - } if m.FieldCleared(event.FieldAlertEvents) { fields = append(fields, event.FieldAlertEvents) } @@ -6057,12 +5870,6 @@ func (m *EventMutation) FieldCleared(name string) bool { // error if the field is not defined in the schema. func (m *EventMutation) ClearField(name string) error { switch name { - case event.FieldCreatedAt: - m.ClearCreatedAt() - return nil - case event.FieldUpdatedAt: - m.ClearUpdatedAt() - return nil case event.FieldAlertEvents: m.ClearAlertEvents() return nil @@ -6689,7 +6496,7 @@ func (m *MachineMutation) CreatedAt() (r time.Time, exists bool) { // OldCreatedAt returns the old "created_at" field's value of the Machine entity. // If the Machine object wasn't provided to the builder, the object is fetched from the database. // An error is returned if the mutation operation is not UpdateOne, or the database query fails. -func (m *MachineMutation) OldCreatedAt(ctx context.Context) (v *time.Time, err error) { +func (m *MachineMutation) OldCreatedAt(ctx context.Context) (v time.Time, err error) { if !m.op.Is(OpUpdateOne) { return v, errors.New("OldCreatedAt is only allowed on UpdateOne operations") } @@ -6703,22 +6510,9 @@ func (m *MachineMutation) OldCreatedAt(ctx context.Context) (v *time.Time, err e return oldValue.CreatedAt, nil } -// ClearCreatedAt clears the value of the "created_at" field. -func (m *MachineMutation) ClearCreatedAt() { - m.created_at = nil - m.clearedFields[machine.FieldCreatedAt] = struct{}{} -} - -// CreatedAtCleared returns if the "created_at" field was cleared in this mutation. -func (m *MachineMutation) CreatedAtCleared() bool { - _, ok := m.clearedFields[machine.FieldCreatedAt] - return ok -} - // ResetCreatedAt resets all changes to the "created_at" field. func (m *MachineMutation) ResetCreatedAt() { m.created_at = nil - delete(m.clearedFields, machine.FieldCreatedAt) } // SetUpdatedAt sets the "updated_at" field. @@ -6738,7 +6532,7 @@ func (m *MachineMutation) UpdatedAt() (r time.Time, exists bool) { // OldUpdatedAt returns the old "updated_at" field's value of the Machine entity. // If the Machine object wasn't provided to the builder, the object is fetched from the database. // An error is returned if the mutation operation is not UpdateOne, or the database query fails. -func (m *MachineMutation) OldUpdatedAt(ctx context.Context) (v *time.Time, err error) { +func (m *MachineMutation) OldUpdatedAt(ctx context.Context) (v time.Time, err error) { if !m.op.Is(OpUpdateOne) { return v, errors.New("OldUpdatedAt is only allowed on UpdateOne operations") } @@ -6752,22 +6546,9 @@ func (m *MachineMutation) OldUpdatedAt(ctx context.Context) (v *time.Time, err e return oldValue.UpdatedAt, nil } -// ClearUpdatedAt clears the value of the "updated_at" field. -func (m *MachineMutation) ClearUpdatedAt() { - m.updated_at = nil - m.clearedFields[machine.FieldUpdatedAt] = struct{}{} -} - -// UpdatedAtCleared returns if the "updated_at" field was cleared in this mutation. -func (m *MachineMutation) UpdatedAtCleared() bool { - _, ok := m.clearedFields[machine.FieldUpdatedAt] - return ok -} - // ResetUpdatedAt resets all changes to the "updated_at" field. func (m *MachineMutation) ResetUpdatedAt() { m.updated_at = nil - delete(m.clearedFields, machine.FieldUpdatedAt) } // SetLastPush sets the "last_push" field. @@ -7508,12 +7289,6 @@ func (m *MachineMutation) AddField(name string, value ent.Value) error { // mutation. func (m *MachineMutation) ClearedFields() []string { var fields []string - if m.FieldCleared(machine.FieldCreatedAt) { - fields = append(fields, machine.FieldCreatedAt) - } - if m.FieldCleared(machine.FieldUpdatedAt) { - fields = append(fields, machine.FieldUpdatedAt) - } if m.FieldCleared(machine.FieldLastPush) { fields = append(fields, machine.FieldLastPush) } @@ -7543,12 +7318,6 @@ func (m *MachineMutation) FieldCleared(name string) bool { // error if the field is not defined in the schema. func (m *MachineMutation) ClearField(name string) error { switch name { - case machine.FieldCreatedAt: - m.ClearCreatedAt() - return nil - case machine.FieldUpdatedAt: - m.ClearUpdatedAt() - return nil case machine.FieldLastPush: m.ClearLastPush() return nil @@ -7829,7 +7598,7 @@ func (m *MetaMutation) CreatedAt() (r time.Time, exists bool) { // OldCreatedAt returns the old "created_at" field's value of the Meta entity. // If the Meta object wasn't provided to the builder, the object is fetched from the database. // An error is returned if the mutation operation is not UpdateOne, or the database query fails. -func (m *MetaMutation) OldCreatedAt(ctx context.Context) (v *time.Time, err error) { +func (m *MetaMutation) OldCreatedAt(ctx context.Context) (v time.Time, err error) { if !m.op.Is(OpUpdateOne) { return v, errors.New("OldCreatedAt is only allowed on UpdateOne operations") } @@ -7843,22 +7612,9 @@ func (m *MetaMutation) OldCreatedAt(ctx context.Context) (v *time.Time, err erro return oldValue.CreatedAt, nil } -// ClearCreatedAt clears the value of the "created_at" field. -func (m *MetaMutation) ClearCreatedAt() { - m.created_at = nil - m.clearedFields[meta.FieldCreatedAt] = struct{}{} -} - -// CreatedAtCleared returns if the "created_at" field was cleared in this mutation. -func (m *MetaMutation) CreatedAtCleared() bool { - _, ok := m.clearedFields[meta.FieldCreatedAt] - return ok -} - // ResetCreatedAt resets all changes to the "created_at" field. func (m *MetaMutation) ResetCreatedAt() { m.created_at = nil - delete(m.clearedFields, meta.FieldCreatedAt) } // SetUpdatedAt sets the "updated_at" field. @@ -7878,7 +7634,7 @@ func (m *MetaMutation) UpdatedAt() (r time.Time, exists bool) { // OldUpdatedAt returns the old "updated_at" field's value of the Meta entity. // If the Meta object wasn't provided to the builder, the object is fetched from the database. // An error is returned if the mutation operation is not UpdateOne, or the database query fails. -func (m *MetaMutation) OldUpdatedAt(ctx context.Context) (v *time.Time, err error) { +func (m *MetaMutation) OldUpdatedAt(ctx context.Context) (v time.Time, err error) { if !m.op.Is(OpUpdateOne) { return v, errors.New("OldUpdatedAt is only allowed on UpdateOne operations") } @@ -7892,22 +7648,9 @@ func (m *MetaMutation) OldUpdatedAt(ctx context.Context) (v *time.Time, err erro return oldValue.UpdatedAt, nil } -// ClearUpdatedAt clears the value of the "updated_at" field. -func (m *MetaMutation) ClearUpdatedAt() { - m.updated_at = nil - m.clearedFields[meta.FieldUpdatedAt] = struct{}{} -} - -// UpdatedAtCleared returns if the "updated_at" field was cleared in this mutation. -func (m *MetaMutation) UpdatedAtCleared() bool { - _, ok := m.clearedFields[meta.FieldUpdatedAt] - return ok -} - // ResetUpdatedAt resets all changes to the "updated_at" field. func (m *MetaMutation) ResetUpdatedAt() { m.updated_at = nil - delete(m.clearedFields, meta.FieldUpdatedAt) } // SetKey sets the "key" field. @@ -8235,12 +7978,6 @@ func (m *MetaMutation) AddField(name string, value ent.Value) error { // mutation. func (m *MetaMutation) ClearedFields() []string { var fields []string - if m.FieldCleared(meta.FieldCreatedAt) { - fields = append(fields, meta.FieldCreatedAt) - } - if m.FieldCleared(meta.FieldUpdatedAt) { - fields = append(fields, meta.FieldUpdatedAt) - } if m.FieldCleared(meta.FieldAlertMetas) { fields = append(fields, meta.FieldAlertMetas) } @@ -8258,12 +7995,6 @@ func (m *MetaMutation) FieldCleared(name string) bool { // error if the field is not defined in the schema. func (m *MetaMutation) ClearField(name string) error { switch name { - case meta.FieldCreatedAt: - m.ClearCreatedAt() - return nil - case meta.FieldUpdatedAt: - m.ClearUpdatedAt() - return nil case meta.FieldAlertMetas: m.ClearAlertMetas() return nil diff --git a/pkg/database/ent/runtime.go b/pkg/database/ent/runtime.go index 87073074563..1c5b3460cce 100644 --- a/pkg/database/ent/runtime.go +++ b/pkg/database/ent/runtime.go @@ -26,8 +26,6 @@ func init() { alertDescCreatedAt := alertFields[0].Descriptor() // alert.DefaultCreatedAt holds the default value on creation for the created_at field. alert.DefaultCreatedAt = alertDescCreatedAt.Default.(func() time.Time) - // alert.UpdateDefaultCreatedAt holds the default value on update for the created_at field. - alert.UpdateDefaultCreatedAt = alertDescCreatedAt.UpdateDefault.(func() time.Time) // alertDescUpdatedAt is the schema descriptor for updated_at field. alertDescUpdatedAt := alertFields[1].Descriptor() // alert.DefaultUpdatedAt holds the default value on creation for the updated_at field. @@ -64,8 +62,6 @@ func init() { bouncerDescCreatedAt := bouncerFields[0].Descriptor() // bouncer.DefaultCreatedAt holds the default value on creation for the created_at field. bouncer.DefaultCreatedAt = bouncerDescCreatedAt.Default.(func() time.Time) - // bouncer.UpdateDefaultCreatedAt holds the default value on update for the created_at field. - bouncer.UpdateDefaultCreatedAt = bouncerDescCreatedAt.UpdateDefault.(func() time.Time) // bouncerDescUpdatedAt is the schema descriptor for updated_at field. bouncerDescUpdatedAt := bouncerFields[1].Descriptor() // bouncer.DefaultUpdatedAt holds the default value on creation for the updated_at field. @@ -94,8 +90,6 @@ func init() { configitemDescCreatedAt := configitemFields[0].Descriptor() // configitem.DefaultCreatedAt holds the default value on creation for the created_at field. configitem.DefaultCreatedAt = configitemDescCreatedAt.Default.(func() time.Time) - // configitem.UpdateDefaultCreatedAt holds the default value on update for the created_at field. - configitem.UpdateDefaultCreatedAt = configitemDescCreatedAt.UpdateDefault.(func() time.Time) // configitemDescUpdatedAt is the schema descriptor for updated_at field. configitemDescUpdatedAt := configitemFields[1].Descriptor() // configitem.DefaultUpdatedAt holds the default value on creation for the updated_at field. @@ -108,8 +102,6 @@ func init() { decisionDescCreatedAt := decisionFields[0].Descriptor() // decision.DefaultCreatedAt holds the default value on creation for the created_at field. decision.DefaultCreatedAt = decisionDescCreatedAt.Default.(func() time.Time) - // decision.UpdateDefaultCreatedAt holds the default value on update for the created_at field. - decision.UpdateDefaultCreatedAt = decisionDescCreatedAt.UpdateDefault.(func() time.Time) // decisionDescUpdatedAt is the schema descriptor for updated_at field. decisionDescUpdatedAt := decisionFields[1].Descriptor() // decision.DefaultUpdatedAt holds the default value on creation for the updated_at field. @@ -126,8 +118,6 @@ func init() { eventDescCreatedAt := eventFields[0].Descriptor() // event.DefaultCreatedAt holds the default value on creation for the created_at field. event.DefaultCreatedAt = eventDescCreatedAt.Default.(func() time.Time) - // event.UpdateDefaultCreatedAt holds the default value on update for the created_at field. - event.UpdateDefaultCreatedAt = eventDescCreatedAt.UpdateDefault.(func() time.Time) // eventDescUpdatedAt is the schema descriptor for updated_at field. eventDescUpdatedAt := eventFields[1].Descriptor() // event.DefaultUpdatedAt holds the default value on creation for the updated_at field. @@ -150,8 +140,6 @@ func init() { machineDescCreatedAt := machineFields[0].Descriptor() // machine.DefaultCreatedAt holds the default value on creation for the created_at field. machine.DefaultCreatedAt = machineDescCreatedAt.Default.(func() time.Time) - // machine.UpdateDefaultCreatedAt holds the default value on update for the created_at field. - machine.UpdateDefaultCreatedAt = machineDescCreatedAt.UpdateDefault.(func() time.Time) // machineDescUpdatedAt is the schema descriptor for updated_at field. machineDescUpdatedAt := machineFields[1].Descriptor() // machine.DefaultUpdatedAt holds the default value on creation for the updated_at field. @@ -162,14 +150,10 @@ func init() { machineDescLastPush := machineFields[2].Descriptor() // machine.DefaultLastPush holds the default value on creation for the last_push field. machine.DefaultLastPush = machineDescLastPush.Default.(func() time.Time) - // machine.UpdateDefaultLastPush holds the default value on update for the last_push field. - machine.UpdateDefaultLastPush = machineDescLastPush.UpdateDefault.(func() time.Time) // machineDescLastHeartbeat is the schema descriptor for last_heartbeat field. machineDescLastHeartbeat := machineFields[3].Descriptor() // machine.DefaultLastHeartbeat holds the default value on creation for the last_heartbeat field. machine.DefaultLastHeartbeat = machineDescLastHeartbeat.Default.(func() time.Time) - // machine.UpdateDefaultLastHeartbeat holds the default value on update for the last_heartbeat field. - machine.UpdateDefaultLastHeartbeat = machineDescLastHeartbeat.UpdateDefault.(func() time.Time) // machineDescScenarios is the schema descriptor for scenarios field. machineDescScenarios := machineFields[7].Descriptor() // machine.ScenariosValidator is a validator for the "scenarios" field. It is called by the builders before save. @@ -188,8 +172,6 @@ func init() { metaDescCreatedAt := metaFields[0].Descriptor() // meta.DefaultCreatedAt holds the default value on creation for the created_at field. meta.DefaultCreatedAt = metaDescCreatedAt.Default.(func() time.Time) - // meta.UpdateDefaultCreatedAt holds the default value on update for the created_at field. - meta.UpdateDefaultCreatedAt = metaDescCreatedAt.UpdateDefault.(func() time.Time) // metaDescUpdatedAt is the schema descriptor for updated_at field. metaDescUpdatedAt := metaFields[1].Descriptor() // meta.DefaultUpdatedAt holds the default value on creation for the updated_at field. diff --git a/pkg/database/ent/schema/alert.go b/pkg/database/ent/schema/alert.go index f2df9d7f09c..bda7cc7d0b9 100644 --- a/pkg/database/ent/schema/alert.go +++ b/pkg/database/ent/schema/alert.go @@ -19,10 +19,10 @@ func (Alert) Fields() []ent.Field { return []ent.Field{ field.Time("created_at"). Default(types.UtcNow). - UpdateDefault(types.UtcNow).Nillable().Optional(), + Immutable(), field.Time("updated_at"). Default(types.UtcNow). - UpdateDefault(types.UtcNow).Nillable().Optional(), + UpdateDefault(types.UtcNow), field.String("scenario"), field.String("bucketId").Default("").Optional(), field.String("message").Default("").Optional(), diff --git a/pkg/database/ent/schema/bouncer.go b/pkg/database/ent/schema/bouncer.go index 986a1bf3ba8..18efecb3f03 100644 --- a/pkg/database/ent/schema/bouncer.go +++ b/pkg/database/ent/schema/bouncer.go @@ -16,10 +16,10 @@ func (Bouncer) Fields() []ent.Field { return []ent.Field{ field.Time("created_at"). Default(types.UtcNow). - UpdateDefault(types.UtcNow).Nillable().Optional().StructTag(`json:"created_at"`), + StructTag(`json:"created_at"`), field.Time("updated_at"). Default(types.UtcNow). - UpdateDefault(types.UtcNow).Nillable().Optional().StructTag(`json:"updated_at"`), + UpdateDefault(types.UtcNow).StructTag(`json:"updated_at"`), field.String("name").Unique().StructTag(`json:"name"`), field.String("api_key").Sensitive(), // hash of api_key field.Bool("revoked").StructTag(`json:"revoked"`), diff --git a/pkg/database/ent/schema/config.go b/pkg/database/ent/schema/config.go index f3320a9cce6..036c55908ba 100644 --- a/pkg/database/ent/schema/config.go +++ b/pkg/database/ent/schema/config.go @@ -11,21 +11,20 @@ type ConfigItem struct { ent.Schema } -// Fields of the Bouncer. func (ConfigItem) Fields() []ent.Field { return []ent.Field{ field.Time("created_at"). Default(types.UtcNow). - UpdateDefault(types.UtcNow).Nillable().Optional().StructTag(`json:"created_at"`), + Immutable(). + StructTag(`json:"created_at"`), field.Time("updated_at"). Default(types.UtcNow). - UpdateDefault(types.UtcNow).Nillable().Optional().StructTag(`json:"updated_at"`), + UpdateDefault(types.UtcNow).StructTag(`json:"updated_at"`), field.String("name").Unique().StructTag(`json:"name"`), field.String("value").StructTag(`json:"value"`), // a json object } } -// Edges of the Bouncer. func (ConfigItem) Edges() []ent.Edge { return nil } diff --git a/pkg/database/ent/schema/decision.go b/pkg/database/ent/schema/decision.go index b7a99fb7a70..d5193910146 100644 --- a/pkg/database/ent/schema/decision.go +++ b/pkg/database/ent/schema/decision.go @@ -19,10 +19,10 @@ func (Decision) Fields() []ent.Field { return []ent.Field{ field.Time("created_at"). Default(types.UtcNow). - UpdateDefault(types.UtcNow).Nillable().Optional(), + Immutable(), field.Time("updated_at"). Default(types.UtcNow). - UpdateDefault(types.UtcNow).Nillable().Optional(), + UpdateDefault(types.UtcNow), field.Time("until").Nillable().Optional().SchemaType(map[string]string{ dialect.MySQL: "datetime", }), diff --git a/pkg/database/ent/schema/event.go b/pkg/database/ent/schema/event.go index 6b6d2733ff7..f982ebe9653 100644 --- a/pkg/database/ent/schema/event.go +++ b/pkg/database/ent/schema/event.go @@ -18,10 +18,10 @@ func (Event) Fields() []ent.Field { return []ent.Field{ field.Time("created_at"). Default(types.UtcNow). - UpdateDefault(types.UtcNow).Nillable().Optional(), + Immutable(), field.Time("updated_at"). Default(types.UtcNow). - UpdateDefault(types.UtcNow).Nillable().Optional(), + UpdateDefault(types.UtcNow), field.Time("time"), field.String("serialized").MaxLen(8191), field.Int("alert_events").Optional(), diff --git a/pkg/database/ent/schema/lock.go b/pkg/database/ent/schema/lock.go index de87efff3f7..0d49bac1bf6 100644 --- a/pkg/database/ent/schema/lock.go +++ b/pkg/database/ent/schema/lock.go @@ -12,7 +12,7 @@ type Lock struct { func (Lock) Fields() []ent.Field { return []ent.Field{ - field.String("name").Unique().StructTag(`json:"name"`), + field.String("name").Unique().Immutable().StructTag(`json:"name"`), field.Time("created_at").Default(types.UtcNow).StructTag(`json:"created_at"`), } } diff --git a/pkg/database/ent/schema/machine.go b/pkg/database/ent/schema/machine.go index e155c936071..997a2041453 100644 --- a/pkg/database/ent/schema/machine.go +++ b/pkg/database/ent/schema/machine.go @@ -17,17 +17,19 @@ func (Machine) Fields() []ent.Field { return []ent.Field{ field.Time("created_at"). Default(types.UtcNow). - UpdateDefault(types.UtcNow).Nillable().Optional(), + Immutable(), field.Time("updated_at"). Default(types.UtcNow). - UpdateDefault(types.UtcNow).Nillable().Optional(), + UpdateDefault(types.UtcNow), field.Time("last_push"). Default(types.UtcNow). - UpdateDefault(types.UtcNow).Nillable().Optional(), + Nillable().Optional(), field.Time("last_heartbeat"). Default(types.UtcNow). - UpdateDefault(types.UtcNow).Nillable().Optional(), - field.String("machineId").Unique(), + Nillable().Optional(), + field.String("machineId"). + Unique(). + Immutable(), field.String("password").Sensitive(), field.String("ipAddress"), field.String("scenarios").MaxLen(100000).Optional(), diff --git a/pkg/database/ent/schema/meta.go b/pkg/database/ent/schema/meta.go index 1a84bb1b667..877fffa8a2e 100644 --- a/pkg/database/ent/schema/meta.go +++ b/pkg/database/ent/schema/meta.go @@ -17,11 +17,10 @@ type Meta struct { func (Meta) Fields() []ent.Field { return []ent.Field{ field.Time("created_at"). - Default(types.UtcNow). - UpdateDefault(types.UtcNow).Nillable().Optional(), + Default(types.UtcNow), field.Time("updated_at"). Default(types.UtcNow). - UpdateDefault(types.UtcNow).Nillable().Optional(), + UpdateDefault(types.UtcNow), field.String("key"), field.String("value").MaxLen(4095), field.Int("alert_metas").Optional(), diff --git a/pkg/database/machines.go b/pkg/database/machines.go index b9834e57e09..7a64c1d4d6e 100644 --- a/pkg/database/machines.go +++ b/pkg/database/machines.go @@ -134,14 +134,6 @@ func (c *Client) BulkDeleteWatchers(machines []*ent.Machine) (int, error) { return nbDeleted, nil } -func (c *Client) UpdateMachineLastPush(machineID string) error { - _, err := c.Ent.Machine.Update().Where(machine.MachineIdEQ(machineID)).SetLastPush(time.Now().UTC()).Save(c.CTX) - if err != nil { - return errors.Wrapf(UpdateFail, "updating machine last_push: %s", err) - } - return nil -} - func (c *Client) UpdateMachineLastHeartBeat(machineID string) error { _, err := c.Ent.Machine.Update().Where(machine.MachineIdEQ(machineID)).SetLastHeartbeat(time.Now().UTC()).Save(c.CTX) if err != nil { From 529d3b20796c5df53a231bca20c35f80b44f9f08 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Thu, 2 May 2024 13:42:47 +0200 Subject: [PATCH 112/581] minor lint/readability (#2976) * simplify a couple loops * if/else -> switch * drop redundant else * comment + drop var declaration + explicit zero return * lint (whitespace/fmt.Errorf) --- pkg/alertcontext/alertcontext.go | 36 ++++-- pkg/apiserver/apic.go | 58 +++++---- pkg/database/utils.go | 29 +++-- pkg/leakybucket/manager_load.go | 196 +++++++++++++++++++------------ pkg/types/ip.go | 21 +++- 5 files changed, 212 insertions(+), 128 deletions(-) diff --git a/pkg/alertcontext/alertcontext.go b/pkg/alertcontext/alertcontext.go index 7586e7cb4af..8b0648ca0eb 100644 --- a/pkg/alertcontext/alertcontext.go +++ b/pkg/alertcontext/alertcontext.go @@ -19,9 +19,7 @@ const ( maxContextValueLen = 4000 ) -var ( - alertContext = Context{} -) +var alertContext = Context{} type Context struct { ContextToSend map[string][]string @@ -37,19 +35,21 @@ func ValidateContextExpr(key string, expressions []string) error { return fmt.Errorf("compilation of '%s' failed: %v", expression, err) } } + return nil } func NewAlertContext(contextToSend map[string][]string, valueLength int) error { - var clog = log.New() + clog := log.New() if err := types.ConfigureLogger(clog); err != nil { - return fmt.Errorf("couldn't create logger for alert context: %s", err) + return fmt.Errorf("couldn't create logger for alert context: %w", err) } if valueLength == 0 { clog.Debugf("No console context value length provided, using default: %d", maxContextValueLen) valueLength = maxContextValueLen } + if valueLength > maxContextValueLen { clog.Debugf("Provided console context value length (%d) is higher than the maximum, using default: %d", valueLength, maxContextValueLen) valueLength = maxContextValueLen @@ -76,6 +76,7 @@ func NewAlertContext(contextToSend map[string][]string, valueLength int) error { if err != nil { return fmt.Errorf("compilation of '%s' context value failed: %v", value, err) } + alertContext.ContextToSendCompiled[key] = append(alertContext.ContextToSendCompiled[key], valueCompiled) alertContext.ContextToSend[key] = append(alertContext.ContextToSend[key], value) } @@ -85,16 +86,13 @@ func NewAlertContext(contextToSend map[string][]string, valueLength int) error { } func truncate(values []string, contextValueLen int) (string, error) { - var ret string valueByte, err := json.Marshal(values) if err != nil { - return "", fmt.Errorf("unable to dump metas: %s", err) + return "", fmt.Errorf("unable to dump metas: %w", err) } - ret = string(valueByte) - for { - if len(ret) <= contextValueLen { - break - } + + ret := string(valueByte) + for len(ret) > contextValueLen { // if there is only 1 value left and that the size is too big, truncate it if len(values) == 1 { valueToTruncate := values[0] @@ -106,12 +104,15 @@ func truncate(values []string, contextValueLen int) (string, error) { // if there is multiple value inside, just remove the last one values = values[:len(values)-1] } + valueByte, err = json.Marshal(values) if err != nil { - return "", fmt.Errorf("unable to dump metas: %s", err) + return "", fmt.Errorf("unable to dump metas: %w", err) } + ret = string(valueByte) } + return ret, nil } @@ -120,18 +121,22 @@ func EventToContext(events []types.Event) (models.Meta, []error) { metas := make([]*models.MetaItems0, 0) tmpContext := make(map[string][]string) + for _, evt := range events { for key, values := range alertContext.ContextToSendCompiled { if _, ok := tmpContext[key]; !ok { tmpContext[key] = make([]string, 0) } + for _, value := range values { var val string + output, err := expr.Run(value, map[string]interface{}{"evt": evt}) if err != nil { errors = append(errors, fmt.Errorf("failed to get value for %s : %v", key, err)) continue } + switch out := output.(type) { case string: val = out @@ -141,20 +146,24 @@ func EventToContext(events []types.Event) (models.Meta, []error) { errors = append(errors, fmt.Errorf("unexpected return type for %s : %T", key, output)) continue } + if val != "" && !slices.Contains(tmpContext[key], val) { tmpContext[key] = append(tmpContext[key], val) } } } } + for key, values := range tmpContext { if len(values) == 0 { continue } + valueStr, err := truncate(values, alertContext.ContextValueLen) if err != nil { log.Warningf(err.Error()) } + meta := models.MetaItems0{ Key: key, Value: valueStr, @@ -163,5 +172,6 @@ func EventToContext(events []types.Event) (models.Meta, []error) { } ret := models.Meta(metas) + return ret, errors } diff --git a/pkg/apiserver/apic.go b/pkg/apiserver/apic.go index 2136edc8b8e..3f646071b0e 100644 --- a/pkg/apiserver/apic.go +++ b/pkg/apiserver/apic.go @@ -81,12 +81,12 @@ func randomDuration(d time.Duration, delta time.Duration) time.Duration { func (a *apic) FetchScenariosListFromDB() ([]string, error) { scenarios := make([]string, 0) - machines, err := a.dbClient.ListMachines() + machines, err := a.dbClient.ListMachines() if err != nil { return nil, fmt.Errorf("while listing machines: %w", err) } - //merge all scenarios together + // merge all scenarios together for _, v := range machines { machineScenarios := strings.Split(v.Scenarios, ",") log.Debugf("%d scenarios for machine %d", len(machineScenarios), v.ID) @@ -113,7 +113,7 @@ func decisionsToApiDecisions(decisions []*models.Decision) models.AddSignalsRequ Origin: ptr.Of(*decision.Origin), Scenario: ptr.Of(*decision.Scenario), Scope: ptr.Of(*decision.Scope), - //Simulated: *decision.Simulated, + // Simulated: *decision.Simulated, Type: ptr.Of(*decision.Type), Until: decision.Until, Value: ptr.Of(*decision.Value), @@ -196,8 +196,8 @@ func NewAPIC(config *csconfig.OnlineApiClientCfg, dbClient *database.Client, con } password := strfmt.Password(config.Credentials.Password) - apiURL, err := url.Parse(config.Credentials.URL) + apiURL, err := url.Parse(config.Credentials.URL) if err != nil { return nil, fmt.Errorf("while parsing '%s': %w", config.Credentials.URL, err) } @@ -376,7 +376,6 @@ func (a *apic) Send(cacheOrig *models.AddSignalsRequest) { defer cancel() _, _, err := a.apiClient.Signal.Add(ctx, &send) - if err != nil { log.Errorf("sending signal to central API: %s", err) return @@ -391,9 +390,8 @@ func (a *apic) Send(cacheOrig *models.AddSignalsRequest) { defer cancel() _, _, err := a.apiClient.Signal.Add(ctx, &send) - if err != nil { - //we log it here as well, because the return value of func might be discarded + // we log it here as well, because the return value of func might be discarded log.Errorf("sending signal to central API: %s", err) } @@ -407,8 +405,8 @@ func (a *apic) CAPIPullIsOld() (bool, error) { alerts := a.dbClient.Ent.Alert.Query() alerts = alerts.Where(alert.HasDecisionsWith(decision.OriginEQ(database.CapiMachineID))) alerts = alerts.Where(alert.CreatedAtGTE(time.Now().UTC().Add(-time.Duration(1*time.Hour + 30*time.Minute)))) //nolint:unconvert - count, err := alerts.Count(a.dbClient.CTX) + count, err := alerts.Count(a.dbClient.CTX) if err != nil { return false, fmt.Errorf("while looking for CAPI alert: %w", err) } @@ -506,6 +504,7 @@ func createAlertsForDecisions(decisions []*models.Decision) []*models.Alert { if sub.Scenario == nil { log.Warningf("nil scenario in %+v", sub) } + if *sub.Scenario == *decision.Scenario { found = true break @@ -567,7 +566,7 @@ func createAlertForDecision(decision *models.Decision) *models.Alert { // This function takes in list of parent alerts and decisions and then pairs them up. func fillAlertsWithDecisions(alerts []*models.Alert, decisions []*models.Decision, addCounters map[string]map[string]int) []*models.Alert { for _, decision := range decisions { - //count and create separate alerts for each list + // count and create separate alerts for each list updateCounterForDecision(addCounters, decision.Origin, decision.Scenario, 1) /*CAPI might send lower case scopes, unify it.*/ @@ -579,7 +578,7 @@ func fillAlertsWithDecisions(alerts []*models.Alert, decisions []*models.Decisio } found := false - //add the individual decisions to the right list + // add the individual decisions to the right list for idx, alert := range alerts { if *decision.Origin == types.CAPIOrigin { if *alert.Source.Scope == types.CAPIOrigin { @@ -592,6 +591,7 @@ func fillAlertsWithDecisions(alerts []*models.Alert, decisions []*models.Decisio if *alert.Source.Scope == types.ListOrigin && *alert.Scenario == *decision.Scenario { alerts[idx].Decisions = append(alerts[idx].Decisions, decision) found = true + break } } else { @@ -613,8 +613,8 @@ func fillAlertsWithDecisions(alerts []*models.Alert, decisions []*models.Decisio func (a *apic) PullTop(forcePull bool) error { var err error - //A mutex with TryLock would be a bit simpler - //But go does not guarantee that TryLock will be able to acquire the lock even if it is available + // A mutex with TryLock would be a bit simpler + // But go does not guarantee that TryLock will be able to acquire the lock even if it is available select { case a.isPulling <- true: defer func() { @@ -633,6 +633,7 @@ func (a *apic) PullTop(forcePull bool) error { } log.Debug("Acquiring lock for pullCAPI") + err = a.dbClient.AcquirePullCAPILock() if a.dbClient.IsLocked(err) { log.Info("PullCAPI is already running, skipping") @@ -642,6 +643,7 @@ func (a *apic) PullTop(forcePull bool) error { /*defer lock release*/ defer func() { log.Debug("Releasing lock for pullCAPI") + if err := a.dbClient.ReleasePullCAPILock(); err != nil { log.Errorf("while releasing lock: %v", err) } @@ -681,7 +683,7 @@ func (a *apic) PullTop(forcePull bool) error { // create one alert for community blocklist using the first decision decisions := a.apiClient.Decisions.GetDecisionsFromGroups(data.New) - //apply APIC specific whitelists + // apply APIC specific whitelists decisions = a.ApplyApicWhitelists(decisions) alert := createAlertForDecision(decisions[0]) @@ -740,7 +742,7 @@ func (a *apic) ApplyApicWhitelists(decisions []*models.Decision) []*models.Decis if a.whitelists == nil || len(a.whitelists.Cidrs) == 0 && len(a.whitelists.Ips) == 0 { return decisions } - //deal with CAPI whitelists for fire. We want to avoid having a second list, so we shrink in place + // deal with CAPI whitelists for fire. We want to avoid having a second list, so we shrink in place outIdx := 0 for _, decision := range decisions { @@ -753,7 +755,7 @@ func (a *apic) ApplyApicWhitelists(decisions []*models.Decision) []*models.Decis decisions[outIdx] = decision outIdx++ } - //shrink the list, those are deleted items + // shrink the list, those are deleted items return decisions[:outIdx] } @@ -782,8 +784,8 @@ func (a *apic) ShouldForcePullBlocklist(blocklist *modelscapi.BlocklistLink) (bo alertQuery := a.dbClient.Ent.Alert.Query() alertQuery.Where(alert.SourceScopeEQ(fmt.Sprintf("%s:%s", types.ListOrigin, *blocklist.Name))) alertQuery.Order(ent.Desc(alert.FieldCreatedAt)) - alertInstance, err := alertQuery.First(context.Background()) + alertInstance, err := alertQuery.First(context.Background()) if err != nil { if ent.IsNotFound(err) { log.Debugf("no alert found for %s, force refresh", *blocklist.Name) @@ -795,8 +797,8 @@ func (a *apic) ShouldForcePullBlocklist(blocklist *modelscapi.BlocklistLink) (bo decisionQuery := a.dbClient.Ent.Decision.Query() decisionQuery.Where(decision.HasOwnerWith(alert.IDEQ(alertInstance.ID))) - firstDecision, err := decisionQuery.First(context.Background()) + firstDecision, err := decisionQuery.First(context.Background()) if err != nil { if ent.IsNotFound(err) { log.Debugf("no decision found for %s, force refresh", *blocklist.Name) @@ -872,7 +874,7 @@ func (a *apic) updateBlocklist(client *apiclient.ApiClient, blocklist *modelscap log.Infof("blocklist %s has no decisions", *blocklist.Name) return nil } - //apply APIC specific whitelists + // apply APIC specific whitelists decisions = a.ApplyApicWhitelists(decisions) alert := createAlertForDecision(decisions[0]) alertsFromCapi := []*models.Alert{alert} @@ -911,12 +913,17 @@ func (a *apic) UpdateBlocklists(links *modelscapi.GetDecisionsStreamResponseLink } func setAlertScenario(alert *models.Alert, addCounters map[string]map[string]int, deleteCounters map[string]map[string]int) { - if *alert.Source.Scope == types.CAPIOrigin { + switch *alert.Source.Scope { + case types.CAPIOrigin: *alert.Source.Scope = types.CommunityBlocklistPullSourceScope - alert.Scenario = ptr.Of(fmt.Sprintf("update : +%d/-%d IPs", addCounters[types.CAPIOrigin]["all"], deleteCounters[types.CAPIOrigin]["all"])) - } else if *alert.Source.Scope == types.ListOrigin { + alert.Scenario = ptr.Of(fmt.Sprintf("update : +%d/-%d IPs", + addCounters[types.CAPIOrigin]["all"], + deleteCounters[types.CAPIOrigin]["all"])) + case types.ListOrigin: *alert.Source.Scope = fmt.Sprintf("%s:%s", types.ListOrigin, *alert.Scenario) - alert.Scenario = ptr.Of(fmt.Sprintf("update : +%d/-%d IPs", addCounters[types.ListOrigin][*alert.Scenario], deleteCounters[types.ListOrigin][*alert.Scenario])) + alert.Scenario = ptr.Of(fmt.Sprintf("update : +%d/-%d IPs", + addCounters[types.ListOrigin][*alert.Scenario], + deleteCounters[types.ListOrigin][*alert.Scenario])) } } @@ -988,11 +995,12 @@ func makeAddAndDeleteCounters() (map[string]map[string]int, map[string]map[strin } func updateCounterForDecision(counter map[string]map[string]int, origin *string, scenario *string, totalDecisions int) { - if *origin == types.CAPIOrigin { + switch *origin { + case types.CAPIOrigin: counter[*origin]["all"] += totalDecisions - } else if *origin == types.ListOrigin { + case types.ListOrigin: counter[*origin][*scenario] += totalDecisions - } else { + default: log.Warningf("Unknown origin %s", *origin) } } diff --git a/pkg/database/utils.go b/pkg/database/utils.go index 2414e702786..f1c06565635 100644 --- a/pkg/database/utils.go +++ b/pkg/database/utils.go @@ -13,12 +13,14 @@ func IP2Int(ip net.IP) uint32 { if len(ip) == 16 { return binary.BigEndian.Uint32(ip[12:16]) } + return binary.BigEndian.Uint32(ip) } func Int2ip(nn uint32) net.IP { ip := make(net.IP, 4) binary.BigEndian.PutUint32(ip, nn) + return ip } @@ -26,13 +28,14 @@ func IsIpv4(host string) bool { return net.ParseIP(host) != nil } -//Stolen from : https://github.com/llimllib/ipaddress/ +// Stolen from : https://github.com/llimllib/ipaddress/ // Return the final address of a net range. Convert to IPv4 if possible, // otherwise return an ipv6 func LastAddress(n *net.IPNet) net.IP { ip := n.IP.To4() if ip == nil { ip = n.IP + return net.IP{ ip[0] | ^n.Mask[0], ip[1] | ^n.Mask[1], ip[2] | ^n.Mask[2], ip[3] | ^n.Mask[3], ip[4] | ^n.Mask[4], ip[5] | ^n.Mask[5], @@ -49,40 +52,44 @@ func LastAddress(n *net.IPNet) net.IP { ip[3]|^n.Mask[3]) } +// GetIpsFromIpRange takes a CIDR range and returns the start and end IP func GetIpsFromIpRange(host string) (int64, int64, error) { - var ipStart int64 - var ipEnd int64 - var err error - var parsedRange *net.IPNet - - if _, parsedRange, err = net.ParseCIDR(host); err != nil { - return ipStart, ipEnd, fmt.Errorf("'%s' is not a valid CIDR", host) + _, parsedRange, err := net.ParseCIDR(host) + if err != nil { + return 0, 0, fmt.Errorf("'%s' is not a valid CIDR", host) } + if parsedRange == nil { - return ipStart, ipEnd, fmt.Errorf("unable to parse network : %s", err) + return 0, 0, fmt.Errorf("unable to parse network: %w", err) } - ipStart = int64(IP2Int(parsedRange.IP)) - ipEnd = int64(IP2Int(LastAddress(parsedRange))) + + ipStart := int64(IP2Int(parsedRange.IP)) + ipEnd := int64(IP2Int(LastAddress(parsedRange))) return ipStart, ipEnd, nil } func ParseDuration(d string) (time.Duration, error) { durationStr := d + if strings.HasSuffix(d, "d") { days := strings.Split(d, "d")[0] if len(days) == 0 { return 0, fmt.Errorf("'%s' can't be parsed as duration", d) } + daysInt, err := strconv.Atoi(days) if err != nil { return 0, err } + durationStr = strconv.Itoa(daysInt*24) + "h" } + duration, err := time.ParseDuration(durationStr) if err != nil { return 0, err } + return duration, nil } diff --git a/pkg/leakybucket/manager_load.go b/pkg/leakybucket/manager_load.go index 85eee89d933..bc259c18319 100644 --- a/pkg/leakybucket/manager_load.go +++ b/pkg/leakybucket/manager_load.go @@ -34,42 +34,42 @@ type BucketFactory struct { Author string `yaml:"author"` Description string `yaml:"description"` References []string `yaml:"references"` - Type string `yaml:"type"` //Type can be : leaky, counter, trigger. It determines the main bucket characteristics - Name string `yaml:"name"` //Name of the bucket, used later in log and user-messages. Should be unique - Capacity int `yaml:"capacity"` //Capacity is applicable to leaky buckets and determines the "burst" capacity - LeakSpeed string `yaml:"leakspeed"` //Leakspeed is a float representing how many events per second leak out of the bucket - Duration string `yaml:"duration"` //Duration allows 'counter' buckets to have a fixed life-time - Filter string `yaml:"filter"` //Filter is an expr that determines if an event is elligible for said bucket. Filter is evaluated against the Event struct - GroupBy string `yaml:"groupby,omitempty"` //groupy is an expr that allows to determine the partitions of the bucket. A common example is the source_ip - Distinct string `yaml:"distinct"` //Distinct, when present, adds a `Pour()` processor that will only pour uniq items (based on distinct expr result) - Debug bool `yaml:"debug"` //Debug, when set to true, will enable debugging for _this_ scenario specifically - Labels map[string]interface{} `yaml:"labels"` //Labels is K:V list aiming at providing context the overflow - Blackhole string `yaml:"blackhole,omitempty"` //Blackhole is a duration that, if present, will prevent same bucket partition to overflow more often than $duration - logger *log.Entry `yaml:"-"` //logger is bucket-specific logger (used by Debug as well) - Reprocess bool `yaml:"reprocess"` //Reprocess, if true, will for the bucket to be re-injected into processing chain - CacheSize int `yaml:"cache_size"` //CacheSize, if > 0, limits the size of in-memory cache of the bucket - Profiling bool `yaml:"profiling"` //Profiling, if true, will make the bucket record pours/overflows/etc. - OverflowFilter string `yaml:"overflow_filter"` //OverflowFilter if present, is a filter that must return true for the overflow to go through - ConditionalOverflow string `yaml:"condition"` //condition if present, is an expression that must return true for the bucket to overflow + Type string `yaml:"type"` // Type can be : leaky, counter, trigger. It determines the main bucket characteristics + Name string `yaml:"name"` // Name of the bucket, used later in log and user-messages. Should be unique + Capacity int `yaml:"capacity"` // Capacity is applicable to leaky buckets and determines the "burst" capacity + LeakSpeed string `yaml:"leakspeed"` // Leakspeed is a float representing how many events per second leak out of the bucket + Duration string `yaml:"duration"` // Duration allows 'counter' buckets to have a fixed life-time + Filter string `yaml:"filter"` // Filter is an expr that determines if an event is elligible for said bucket. Filter is evaluated against the Event struct + GroupBy string `yaml:"groupby,omitempty"` // groupy is an expr that allows to determine the partitions of the bucket. A common example is the source_ip + Distinct string `yaml:"distinct"` // Distinct, when present, adds a `Pour()` processor that will only pour uniq items (based on distinct expr result) + Debug bool `yaml:"debug"` // Debug, when set to true, will enable debugging for _this_ scenario specifically + Labels map[string]interface{} `yaml:"labels"` // Labels is K:V list aiming at providing context the overflow + Blackhole string `yaml:"blackhole,omitempty"` // Blackhole is a duration that, if present, will prevent same bucket partition to overflow more often than $duration + logger *log.Entry `yaml:"-"` // logger is bucket-specific logger (used by Debug as well) + Reprocess bool `yaml:"reprocess"` // Reprocess, if true, will for the bucket to be re-injected into processing chain + CacheSize int `yaml:"cache_size"` // CacheSize, if > 0, limits the size of in-memory cache of the bucket + Profiling bool `yaml:"profiling"` // Profiling, if true, will make the bucket record pours/overflows/etc. + OverflowFilter string `yaml:"overflow_filter"` // OverflowFilter if present, is a filter that must return true for the overflow to go through + ConditionalOverflow string `yaml:"condition"` // condition if present, is an expression that must return true for the bucket to overflow BayesianPrior float32 `yaml:"bayesian_prior"` BayesianThreshold float32 `yaml:"bayesian_threshold"` - BayesianConditions []RawBayesianCondition `yaml:"bayesian_conditions"` //conditions for the bayesian bucket - ScopeType types.ScopeType `yaml:"scope,omitempty"` //to enforce a different remediation than blocking an IP. Will default this to IP + BayesianConditions []RawBayesianCondition `yaml:"bayesian_conditions"` // conditions for the bayesian bucket + ScopeType types.ScopeType `yaml:"scope,omitempty"` // to enforce a different remediation than blocking an IP. Will default this to IP BucketName string `yaml:"-"` Filename string `yaml:"-"` RunTimeFilter *vm.Program `json:"-"` RunTimeGroupBy *vm.Program `json:"-"` Data []*types.DataSource `yaml:"data,omitempty"` DataDir string `yaml:"-"` - CancelOnFilter string `yaml:"cancel_on,omitempty"` //a filter that, if matched, kills the bucket - leakspeed time.Duration //internal representation of `Leakspeed` - duration time.Duration //internal representation of `Duration` - ret chan types.Event //the bucket-specific output chan for overflows - processors []Processor //processors is the list of hooks for pour/overflow/create (cf. uniq, blackhole etc.) - output bool //?? + CancelOnFilter string `yaml:"cancel_on,omitempty"` // a filter that, if matched, kills the bucket + leakspeed time.Duration // internal representation of `Leakspeed` + duration time.Duration // internal representation of `Duration` + ret chan types.Event // the bucket-specific output chan for overflows + processors []Processor // processors is the list of hooks for pour/overflow/create (cf. uniq, blackhole etc.) + output bool // ?? ScenarioVersion string `yaml:"version,omitempty"` hash string `yaml:"-"` - Simulated bool `yaml:"simulated"` //Set to true if the scenario instantiating the bucket was in the exclusion list + Simulated bool `yaml:"simulated"` // Set to true if the scenario instantiating the bucket was in the exclusion list tomb *tomb.Tomb `yaml:"-"` wgPour *sync.WaitGroup `yaml:"-"` wgDumpState *sync.WaitGroup `yaml:"-"` @@ -81,66 +81,80 @@ var seed namegenerator.Generator = namegenerator.NewNameGenerator(time.Now().UTC func ValidateFactory(bucketFactory *BucketFactory) error { if bucketFactory.Name == "" { - return fmt.Errorf("bucket must have name") + return errors.New("bucket must have name") } + if bucketFactory.Description == "" { - return fmt.Errorf("description is mandatory") + return errors.New("description is mandatory") } + if bucketFactory.Type == "leaky" { - if bucketFactory.Capacity <= 0 { //capacity must be a positive int + if bucketFactory.Capacity <= 0 { // capacity must be a positive int return fmt.Errorf("bad capacity for leaky '%d'", bucketFactory.Capacity) } + if bucketFactory.LeakSpeed == "" { - return fmt.Errorf("leakspeed can't be empty for leaky") + return errors.New("leakspeed can't be empty for leaky") } + if bucketFactory.leakspeed == 0 { return fmt.Errorf("bad leakspeed for leaky '%s'", bucketFactory.LeakSpeed) } } else if bucketFactory.Type == "counter" { if bucketFactory.Duration == "" { - return fmt.Errorf("duration can't be empty for counter") + return errors.New("duration can't be empty for counter") } + if bucketFactory.duration == 0 { return fmt.Errorf("bad duration for counter bucket '%d'", bucketFactory.duration) } + if bucketFactory.Capacity != -1 { - return fmt.Errorf("counter bucket must have -1 capacity") + return errors.New("counter bucket must have -1 capacity") } } else if bucketFactory.Type == "trigger" { if bucketFactory.Capacity != 0 { - return fmt.Errorf("trigger bucket must have 0 capacity") + return errors.New("trigger bucket must have 0 capacity") } } else if bucketFactory.Type == "conditional" { if bucketFactory.ConditionalOverflow == "" { - return fmt.Errorf("conditional bucket must have a condition") + return errors.New("conditional bucket must have a condition") } + if bucketFactory.Capacity != -1 { bucketFactory.logger.Warnf("Using a value different than -1 as capacity for conditional bucket, this may lead to unexpected overflows") } + if bucketFactory.LeakSpeed == "" { - return fmt.Errorf("leakspeed can't be empty for conditional bucket") + return errors.New("leakspeed can't be empty for conditional bucket") } + if bucketFactory.leakspeed == 0 { return fmt.Errorf("bad leakspeed for conditional bucket '%s'", bucketFactory.LeakSpeed) } } else if bucketFactory.Type == "bayesian" { if bucketFactory.BayesianConditions == nil { - return fmt.Errorf("bayesian bucket must have bayesian conditions") + return errors.New("bayesian bucket must have bayesian conditions") } + if bucketFactory.BayesianPrior == 0 { - return fmt.Errorf("bayesian bucket must have a valid, non-zero prior") + return errors.New("bayesian bucket must have a valid, non-zero prior") } + if bucketFactory.BayesianThreshold == 0 { - return fmt.Errorf("bayesian bucket must have a valid, non-zero threshold") + return errors.New("bayesian bucket must have a valid, non-zero threshold") } + if bucketFactory.BayesianPrior > 1 { - return fmt.Errorf("bayesian bucket must have a valid, non-zero prior") + return errors.New("bayesian bucket must have a valid, non-zero prior") } + if bucketFactory.BayesianThreshold > 1 { - return fmt.Errorf("bayesian bucket must have a valid, non-zero threshold") + return errors.New("bayesian bucket must have a valid, non-zero threshold") } + if bucketFactory.Capacity != -1 { - return fmt.Errorf("bayesian bucket must have capacity -1") + return errors.New("bayesian bucket must have capacity -1") } } else { return fmt.Errorf("unknown bucket type '%s'", bucketFactory.Type) @@ -155,26 +169,31 @@ func ValidateFactory(bucketFactory *BucketFactory) error { runTimeFilter *vm.Program err error ) + if bucketFactory.ScopeType.Filter != "" { if runTimeFilter, err = expr.Compile(bucketFactory.ScopeType.Filter, exprhelpers.GetExprOptions(map[string]interface{}{"evt": &types.Event{}})...); err != nil { - return fmt.Errorf("Error compiling the scope filter: %s", err) + return fmt.Errorf("error compiling the scope filter: %w", err) } + bucketFactory.ScopeType.RunTimeFilter = runTimeFilter } default: - //Compile the scope filter + // Compile the scope filter var ( runTimeFilter *vm.Program err error ) + if bucketFactory.ScopeType.Filter != "" { if runTimeFilter, err = expr.Compile(bucketFactory.ScopeType.Filter, exprhelpers.GetExprOptions(map[string]interface{}{"evt": &types.Event{}})...); err != nil { - return fmt.Errorf("Error compiling the scope filter: %s", err) + return fmt.Errorf("error compiling the scope filter: %w", err) } + bucketFactory.ScopeType.RunTimeFilter = runTimeFilter } } + return nil } @@ -185,48 +204,58 @@ func LoadBuckets(cscfg *csconfig.CrowdsecServiceCfg, hub *cwhub.Hub, files []str ) response = make(chan types.Event, 1) + for _, f := range files { log.Debugf("Loading '%s'", f) + if !strings.HasSuffix(f, ".yaml") && !strings.HasSuffix(f, ".yml") { log.Debugf("Skipping %s : not a yaml file", f) continue } - //process the yaml + // process the yaml bucketConfigurationFile, err := os.Open(f) if err != nil { log.Errorf("Can't access leaky configuration file %s", f) return nil, nil, err } + defer bucketConfigurationFile.Close() dec := yaml.NewDecoder(bucketConfigurationFile) dec.SetStrict(true) + for { bucketFactory := BucketFactory{} + err = dec.Decode(&bucketFactory) if err != nil { if !errors.Is(err, io.EOF) { log.Errorf("Bad yaml in %s : %v", f, err) return nil, nil, fmt.Errorf("bad yaml in %s : %v", f, err) } + log.Tracef("End of yaml file") + break } + bucketFactory.DataDir = hub.GetDataDir() - //check empty + // check empty if bucketFactory.Name == "" { log.Errorf("Won't load nameless bucket") - return nil, nil, fmt.Errorf("nameless bucket") + return nil, nil, errors.New("nameless bucket") } - //check compat + // check compat if bucketFactory.FormatVersion == "" { log.Tracef("no version in %s : %s, assuming '1.0'", bucketFactory.Name, f) bucketFactory.FormatVersion = "1.0" } + ok, err := cwversion.Satisfies(bucketFactory.FormatVersion, cwversion.Constraint_scenario) if err != nil { return nil, nil, fmt.Errorf("failed to check version : %s", err) } + if !ok { log.Errorf("can't load %s : %s doesn't satisfy scenario format %s, skip", bucketFactory.Name, bucketFactory.FormatVersion, cwversion.Constraint_scenario) continue @@ -235,6 +264,7 @@ func LoadBuckets(cscfg *csconfig.CrowdsecServiceCfg, hub *cwhub.Hub, files []str bucketFactory.Filename = filepath.Clean(f) bucketFactory.BucketName = seed.Generate() bucketFactory.ret = response + hubItem, err := hub.GetItemByPath(cwhub.SCENARIOS, bucketFactory.Filename) if err != nil { log.Errorf("scenario %s (%s) couldn't be find in hub (ignore if in unit tests)", bucketFactory.Name, bucketFactory.Filename) @@ -242,6 +272,7 @@ func LoadBuckets(cscfg *csconfig.CrowdsecServiceCfg, hub *cwhub.Hub, files []str if cscfg.SimulationConfig != nil { bucketFactory.Simulated = cscfg.SimulationConfig.IsSimulated(hubItem.Name) } + if hubItem != nil { bucketFactory.ScenarioVersion = hubItem.State.LocalVersion bucketFactory.hash = hubItem.State.LocalHash @@ -252,6 +283,7 @@ func LoadBuckets(cscfg *csconfig.CrowdsecServiceCfg, hub *cwhub.Hub, files []str bucketFactory.wgDumpState = buckets.wgDumpState bucketFactory.wgPour = buckets.wgPour + err = LoadBucket(&bucketFactory, tomb) if err != nil { log.Errorf("Failed to load bucket %s : %v", bucketFactory.Name, err) @@ -265,21 +297,24 @@ func LoadBuckets(cscfg *csconfig.CrowdsecServiceCfg, hub *cwhub.Hub, files []str } if err := alertcontext.NewAlertContext(cscfg.ContextToSend, cscfg.ConsoleContextValueLength); err != nil { - return nil, nil, fmt.Errorf("unable to load alert context: %s", err) + return nil, nil, fmt.Errorf("unable to load alert context: %w", err) } log.Infof("Loaded %d scenarios", len(ret)) + return ret, response, nil } /* Init recursively process yaml files from a directory and loads them as BucketFactory */ func LoadBucket(bucketFactory *BucketFactory, tomb *tomb.Tomb) error { var err error + if bucketFactory.Debug { - var clog = log.New() + clog := log.New() if err := types.ConfigureLogger(clog); err != nil { log.Fatalf("While creating bucket-specific logger : %s", err) } + clog.SetLevel(log.DebugLevel) bucketFactory.logger = clog.WithFields(log.Fields{ "cfg": bucketFactory.BucketName, @@ -300,6 +335,7 @@ func LoadBucket(bucketFactory *BucketFactory, tomb *tomb.Tomb) error { } else { bucketFactory.leakspeed = time.Duration(0) } + if bucketFactory.Duration != "" { if bucketFactory.duration, err = time.ParseDuration(bucketFactory.Duration); err != nil { return fmt.Errorf("invalid Duration '%s' in %s : %v", bucketFactory.Duration, bucketFactory.Filename, err) @@ -308,8 +344,9 @@ func LoadBucket(bucketFactory *BucketFactory, tomb *tomb.Tomb) error { if bucketFactory.Filter == "" { bucketFactory.logger.Warning("Bucket without filter, abort.") - return fmt.Errorf("bucket without filter directive") + return errors.New("bucket without filter directive") } + bucketFactory.RunTimeFilter, err = expr.Compile(bucketFactory.Filter, exprhelpers.GetExprOptions(map[string]interface{}{"evt": &types.Event{}})...) if err != nil { return fmt.Errorf("invalid filter '%s' in %s : %v", bucketFactory.Filter, bucketFactory.Filename, err) @@ -323,7 +360,7 @@ func LoadBucket(bucketFactory *BucketFactory, tomb *tomb.Tomb) error { } bucketFactory.logger.Infof("Adding %s bucket", bucketFactory.Type) - //return the Holder corresponding to the type of bucket + // return the Holder corresponding to the type of bucket bucketFactory.processors = []Processor{} switch bucketFactory.Type { case "leaky": @@ -352,21 +389,25 @@ func LoadBucket(bucketFactory *BucketFactory, tomb *tomb.Tomb) error { if bucketFactory.OverflowFilter != "" { bucketFactory.logger.Tracef("Adding an overflow filter") + filovflw, err := NewOverflowFilter(bucketFactory) if err != nil { bucketFactory.logger.Errorf("Error creating overflow_filter : %s", err) - return fmt.Errorf("error creating overflow_filter : %s", err) + return fmt.Errorf("error creating overflow_filter: %w", err) } + bucketFactory.processors = append(bucketFactory.processors, filovflw) } if bucketFactory.Blackhole != "" { bucketFactory.logger.Tracef("Adding blackhole.") + blackhole, err := NewBlackhole(bucketFactory) if err != nil { bucketFactory.logger.Errorf("Error creating blackhole : %s", err) - return fmt.Errorf("error creating blackhole : %s", err) + return fmt.Errorf("error creating blackhole : %w", err) } + bucketFactory.processors = append(bucketFactory.processors, blackhole) } @@ -380,19 +421,19 @@ func LoadBucket(bucketFactory *BucketFactory, tomb *tomb.Tomb) error { bucketFactory.processors = append(bucketFactory.processors, &BayesianBucket{}) } - if len(bucketFactory.Data) > 0 { - for _, data := range bucketFactory.Data { - if data.DestPath == "" { - bucketFactory.logger.Errorf("no dest_file provided for '%s'", bucketFactory.Name) - continue - } - err = exprhelpers.FileInit(bucketFactory.DataDir, data.DestPath, data.Type) - if err != nil { - bucketFactory.logger.Errorf("unable to init data for file '%s': %s", data.DestPath, err) - } - if data.Type == "regexp" { //cache only makes sense for regexp - exprhelpers.RegexpCacheInit(data.DestPath, *data) - } + for _, data := range bucketFactory.Data { + if data.DestPath == "" { + bucketFactory.logger.Errorf("no dest_file provided for '%s'", bucketFactory.Name) + continue + } + + err = exprhelpers.FileInit(bucketFactory.DataDir, data.DestPath, data.Type) + if err != nil { + bucketFactory.logger.Errorf("unable to init data for file '%s': %s", data.DestPath, err) + } + + if data.Type == "regexp" { // cache only makes sense for regexp + exprhelpers.RegexpCacheInit(data.DestPath, *data) } } @@ -400,34 +441,40 @@ func LoadBucket(bucketFactory *BucketFactory, tomb *tomb.Tomb) error { if err := ValidateFactory(bucketFactory); err != nil { return fmt.Errorf("invalid bucket from %s : %v", bucketFactory.Filename, err) } + bucketFactory.tomb = tomb return nil - } func LoadBucketsState(file string, buckets *Buckets, bucketFactories []BucketFactory) error { var state map[string]Leaky + body, err := os.ReadFile(file) if err != nil { - return fmt.Errorf("can't state file %s : %s", file, err) + return fmt.Errorf("can't read state file %s: %w", file, err) } + if err := json.Unmarshal(body, &state); err != nil { - return fmt.Errorf("can't unmarshal state file %s : %s", file, err) + return fmt.Errorf("can't unmarshal state file %s: %w", file, err) } + for k, v := range state { var tbucket *Leaky + log.Debugf("Reloading bucket %s", k) + val, ok := buckets.Bucket_map.Load(k) if ok { log.Fatalf("key %s already exists : %+v", k, val) } - //find back our holder + // find back our holder found := false + for _, h := range bucketFactories { if h.Name == v.Name { log.Debugf("found factory %s/%s -> %s", h.Author, h.Name, h.Description) - //check in which mode the bucket was + // check in which mode the bucket was if v.Mode == types.TIMEMACHINE { tbucket = NewTimeMachine(h) } else if v.Mode == types.LIVE { @@ -451,16 +498,19 @@ func LoadBucketsState(file string, buckets *Buckets, bucketFactories []BucketFac return LeakRoutine(tbucket) }) <-tbucket.Signal + found = true + break } } + if !found { log.Fatalf("Unable to find holder for bucket %s : %s", k, spew.Sdump(v)) } } log.Infof("Restored %d buckets from dump", len(state)) - return nil + return nil } diff --git a/pkg/types/ip.go b/pkg/types/ip.go index 5e4d7734f2d..9d08afd8809 100644 --- a/pkg/types/ip.go +++ b/pkg/types/ip.go @@ -2,6 +2,7 @@ package types import ( "encoding/binary" + "errors" "fmt" "math" "net" @@ -15,6 +16,7 @@ func LastAddress(n net.IPNet) net.IP { if ip == nil { // IPv6 ip = n.IP + return net.IP{ ip[0] | ^n.Mask[0], ip[1] | ^n.Mask[1], ip[2] | ^n.Mask[2], ip[3] | ^n.Mask[3], ip[4] | ^n.Mask[4], ip[5] | ^n.Mask[5], @@ -38,12 +40,13 @@ func Addr2Ints(anyIP string) (int, int64, int64, int64, int64, error) { if err != nil { return -1, 0, 0, 0, 0, fmt.Errorf("while parsing range %s: %w", anyIP, err) } + return Range2Ints(*net) } ip := net.ParseIP(anyIP) if ip == nil { - return -1, 0, 0, 0, 0, fmt.Errorf("invalid address") + return -1, 0, 0, 0, 0, errors.New("invalid address") } sz, start, end, err := IP2Ints(ip) @@ -56,19 +59,22 @@ func Addr2Ints(anyIP string) (int, int64, int64, int64, int64, error) { /*size (16|4), nw_start, suffix_start, nw_end, suffix_end, error*/ func Range2Ints(network net.IPNet) (int, int64, int64, int64, int64, error) { - szStart, nwStart, sfxStart, err := IP2Ints(network.IP) if err != nil { return -1, 0, 0, 0, 0, fmt.Errorf("converting first ip in range: %w", err) } + lastAddr := LastAddress(network) + szEnd, nwEnd, sfxEnd, err := IP2Ints(lastAddr) if err != nil { return -1, 0, 0, 0, 0, fmt.Errorf("transforming last address of range: %w", err) } + if szEnd != szStart { return -1, 0, 0, 0, 0, fmt.Errorf("inconsistent size for range first(%d) and last(%d) ip", szStart, szEnd) } + return szStart, nwStart, sfxStart, nwEnd, sfxEnd, nil } @@ -85,6 +91,7 @@ func uint2int(u uint64) int64 { ret = int64(u) ret -= math.MaxInt64 } + return ret } @@ -97,13 +104,15 @@ func IP2Ints(pip net.IP) (int, int64, int64, error) { if pip4 != nil { ip_nw32 := binary.BigEndian.Uint32(pip4) - return 4, uint2int(uint64(ip_nw32)), uint2int(ip_sfx), nil - } else if pip16 != nil { + } + + if pip16 != nil { ip_nw = binary.BigEndian.Uint64(pip16[0:8]) ip_sfx = binary.BigEndian.Uint64(pip16[8:16]) + return 16, uint2int(ip_nw), uint2int(ip_sfx), nil - } else { - return -1, 0, 0, fmt.Errorf("unexpected len %d for %s", len(pip), pip) } + + return -1, 0, 0, fmt.Errorf("unexpected len %d for %s", len(pip), pip) } From 3392e5e00f93fdd0e88d2e22a7c9acded777a811 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Thu, 2 May 2024 14:22:02 +0200 Subject: [PATCH 113/581] enable linter "revive" (#2978) * enable linter "revive" * enable some revive checks * pointer receiver for crzLogger * fix "range-loop variables always have the same address" * lint (whitespace) --- .golangci.yml | 135 +++++++++++++++++++++++++++++++++++- cmd/crowdsec/output.go | 5 +- pkg/appsec/coraza_logger.go | 56 ++++++++++----- 3 files changed, 177 insertions(+), 19 deletions(-) diff --git a/.golangci.yml b/.golangci.yml index 8e60a3ca8f8..c044903d0f2 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -111,6 +111,118 @@ linters-settings: - pkg: "gopkg.in/yaml.v2" desc: "yaml.v2 is deprecated for new code in favor of yaml.v3" + revive: + ignore-generated-header: true + severity: error + enable-all-rules: true + rules: + - name: add-constant + disabled: true + - name: argument-limit + disabled: true + - name: bare-return + disabled: true + - name: blank-imports + disabled: true + - name: bool-literal-in-expr + disabled: true + - name: cognitive-complexity + disabled: true + - name: comment-spacings + disabled: true + - name: confusing-results + disabled: true + - name: context-as-argument + disabled: true + - name: cyclomatic + disabled: true + - name: deep-exit + disabled: true + - name: defer + disabled: true + - name: duplicated-imports + disabled: true + - name: early-return + disabled: true + - name: empty-block + disabled: true + - name: empty-lines + disabled: true + - name: error-naming + disabled: true + - name: error-strings + disabled: true + - name: flag-parameter + disabled: true + - name: function-result-limit + disabled: true + - name: function-length + disabled: true + - name: get-return + disabled: true + - name: if-return + disabled: true + - name: increment-decrement + disabled: true + - name: indent-error-flow + disabled: true + - name: import-alias-naming + disabled: true + - name: import-shadowing + disabled: true + - name: line-length-limit + disabled: true + - name: max-control-nesting + disabled: true + - name: max-public-structs + disabled: true + - name: modifies-parameter + disabled: true + - name: optimize-operands-order + disabled: true + - name: nested-structs + disabled: true + - name: package-comments + disabled: true + - name: redundant-import-alias + disabled: true + - name: struct-tag + disabled: true + - name: superfluous-else + disabled: true + - name: time-equal + disabled: true + - name: var-naming + disabled: true + - name: var-declaration + disabled: true + - name: unchecked-type-assertion + disabled: true + - name: exported + disabled: true + - name: unexported-naming + disabled: true + - name: unexported-return + disabled: true + - name: unhandled-error + disabled: true + arguments: + - "fmt.Print" + - "fmt.Printf" + - "fmt.Println" + - name: unnecessary-stmt + disabled: true + - name: unreachable-code + disabled: true + - name: unused-parameter + disabled: true + - name: unused-receiver + disabled: true + - name: use-any + disabled: true + - name: useless-break + disabled: true + wsl: # Allow blocks to end with comments allow-trailing-comment: true @@ -191,6 +303,7 @@ linters: # - perfsprint # Checks that fmt.Sprintf can be replaced with a faster alternative. # - predeclared # find code that shadows one of Go's predeclared identifiers # - reassign # Checks that package variables are not reassigned + # - revive # Fast, configurable, extensible, flexible, and beautiful linter for Go. Drop-in replacement of golint. # - rowserrcheck # checks whether Rows.Err of rows is checked successfully # - sloglint # ensure consistent code style when using log/slog # - spancheck # Checks for mistakes with OpenTelemetry/Census spans. @@ -222,7 +335,6 @@ linters: - musttag # enforce field tags in (un)marshaled structs - promlinter # Check Prometheus metrics naming via promlint - protogetter # Reports direct reads from proto message fields when getters should be used - - revive # Fast, configurable, extensible, flexible, and beautiful linter for Go. Drop-in replacement of golint. - tagalign # check that struct tags are well aligned - thelper # thelper detects tests helpers which is not start with t.Helper() method. - wrapcheck # Checks that errors returned from external packages are wrapped @@ -373,3 +485,24 @@ issues: - linters: - nonamedreturns text: "named return .* with type .* found" + + - linters: + - revive + path: pkg/leakybucket/manager_load.go + text: "confusing-naming: Field '.*' differs only by capitalization to other field in the struct type BucketFactory" + + - linters: + - revive + path: pkg/exprhelpers/helpers.go + text: "confusing-naming: Method 'flatten' differs only by capitalization to function 'Flatten' in the same source file" + + - linters: + - revive + path: pkg/appsec/query_utils.go + text: "confusing-naming: Method 'parseQuery' differs only by capitalization to function 'ParseQuery' in the same source file" + + - linters: + - revive + path: pkg/acquisition/modules/loki/internal/lokiclient/loki_client.go + text: "confusing-naming: Method 'QueryRange' differs only by capitalization to method 'queryRange' in the same source file" + diff --git a/cmd/crowdsec/output.go b/cmd/crowdsec/output.go index ac05b502e52..6f507fdcd6f 100644 --- a/cmd/crowdsec/output.go +++ b/cmd/crowdsec/output.go @@ -26,11 +26,12 @@ func dedupAlerts(alerts []types.RuntimeAlert) ([]*models.Alert, error) { continue } - for k, src := range alert.Sources { + for k := range alert.Sources { refsrc := *alert.Alert // copy log.Tracef("source[%s]", k) + src := alert.Sources[k] refsrc.Source = &src dedupCache = append(dedupCache, &refsrc) } @@ -45,8 +46,8 @@ func dedupAlerts(alerts []types.RuntimeAlert) ([]*models.Alert, error) { func PushAlerts(alerts []types.RuntimeAlert, client *apiclient.ApiClient) error { ctx := context.Background() - alertsToPush, err := dedupAlerts(alerts) + alertsToPush, err := dedupAlerts(alerts) if err != nil { return fmt.Errorf("failed to transform alerts for api: %w", err) } diff --git a/pkg/appsec/coraza_logger.go b/pkg/appsec/coraza_logger.go index 372a0098ecc..7229f038b92 100644 --- a/pkg/appsec/coraza_logger.go +++ b/pkg/appsec/coraza_logger.go @@ -4,8 +4,9 @@ import ( "fmt" "io" - dbg "github.com/crowdsecurity/coraza/v3/debuglog" log "github.com/sirupsen/logrus" + + dbg "github.com/crowdsecurity/coraza/v3/debuglog" ) var DebugRules map[int]bool = map[int]bool{} @@ -18,6 +19,7 @@ func GetRuleDebug(id int) bool { if val, ok := DebugRules[id]; ok { return val } + return false } @@ -60,7 +62,9 @@ func (e *crzLogEvent) Str(key, val string) dbg.Event { if e.muted { return e } + e.fields[key] = val + return e } @@ -68,7 +72,9 @@ func (e *crzLogEvent) Err(err error) dbg.Event { if e.muted { return e } + e.fields["error"] = err + return e } @@ -76,13 +82,15 @@ func (e *crzLogEvent) Bool(key string, b bool) dbg.Event { if e.muted { return e } + e.fields[key] = b + return e } func (e *crzLogEvent) Int(key string, i int) dbg.Event { if e.muted { - //this allows us to have per-rule debug logging + // this allows us to have per-rule debug logging if key == "rule_id" && GetRuleDebug(i) { e.muted = false e.fields = map[string]interface{}{} @@ -91,7 +99,9 @@ func (e *crzLogEvent) Int(key string, i int) dbg.Event { return e } } + e.fields[key] = i + return e } @@ -99,7 +109,9 @@ func (e *crzLogEvent) Uint(key string, i uint) dbg.Event { if e.muted { return e } + e.fields[key] = i + return e } @@ -107,7 +119,9 @@ func (e *crzLogEvent) Stringer(key string, val fmt.Stringer) dbg.Event { if e.muted { return e } + e.fields[key] = val + return e } @@ -121,74 +135,84 @@ type crzLogger struct { logLevel log.Level } -func NewCrzLogger(logger *log.Entry) crzLogger { - return crzLogger{logger: logger, logLevel: logger.Logger.GetLevel()} +func NewCrzLogger(logger *log.Entry) *crzLogger { + return &crzLogger{logger: logger, logLevel: logger.Logger.GetLevel()} } -func (c crzLogger) NewMutedEvt(lvl log.Level) dbg.Event { +func (c *crzLogger) NewMutedEvt(lvl log.Level) dbg.Event { return &crzLogEvent{muted: true, logger: c.logger, level: lvl} } -func (c crzLogger) NewEvt(lvl log.Level) dbg.Event { + +func (c *crzLogger) NewEvt(lvl log.Level) dbg.Event { evt := &crzLogEvent{fields: map[string]interface{}{}, logger: c.logger, level: lvl} + if c.defaultFields != nil { for k, v := range c.defaultFields { evt.fields[k] = v } } + return evt } -func (c crzLogger) WithOutput(w io.Writer) dbg.Logger { +func (c *crzLogger) WithOutput(w io.Writer) dbg.Logger { return c } -func (c crzLogger) WithLevel(lvl dbg.Level) dbg.Logger { +func (c *crzLogger) WithLevel(lvl dbg.Level) dbg.Logger { c.logLevel = log.Level(lvl) c.logger.Logger.SetLevel(c.logLevel) + return c } -func (c crzLogger) With(fs ...dbg.ContextField) dbg.Logger { - var e dbg.Event = c.NewEvt(c.logLevel) +func (c *crzLogger) With(fs ...dbg.ContextField) dbg.Logger { + e := c.NewEvt(c.logLevel) for _, f := range fs { e = f(e) } + c.defaultFields = e.(*crzLogEvent).fields + return c } -func (c crzLogger) Trace() dbg.Event { +func (c *crzLogger) Trace() dbg.Event { if c.logLevel < log.TraceLevel { return c.NewMutedEvt(log.TraceLevel) } + return c.NewEvt(log.TraceLevel) } -func (c crzLogger) Debug() dbg.Event { +func (c *crzLogger) Debug() dbg.Event { if c.logLevel < log.DebugLevel { return c.NewMutedEvt(log.DebugLevel) - } + return c.NewEvt(log.DebugLevel) } -func (c crzLogger) Info() dbg.Event { +func (c *crzLogger) Info() dbg.Event { if c.logLevel < log.InfoLevel { return c.NewMutedEvt(log.InfoLevel) } + return c.NewEvt(log.InfoLevel) } -func (c crzLogger) Warn() dbg.Event { +func (c *crzLogger) Warn() dbg.Event { if c.logLevel < log.WarnLevel { return c.NewMutedEvt(log.WarnLevel) } + return c.NewEvt(log.WarnLevel) } -func (c crzLogger) Error() dbg.Event { +func (c *crzLogger) Error() dbg.Event { if c.logLevel < log.ErrorLevel { return c.NewMutedEvt(log.ErrorLevel) } + return c.NewEvt(log.ErrorLevel) } From 659feec496387cf9dcfe647d4d579b3557fb6751 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Mon, 6 May 2024 10:43:54 +0200 Subject: [PATCH 114/581] cscli: don't print timestamps (#2984) * cscli: don't print timestamps * lint (whitespace, errors) --- cmd/crowdsec-cli/main.go | 2 ++ pkg/apiserver/papi.go | 60 +++++++++++++++++++++++----------------- pkg/database/database.go | 3 +- pkg/database/flush.go | 3 +- pkg/hubtest/utils.go | 5 ++-- pkg/setup/detect.go | 6 ++-- pkg/setup/detect_test.go | 6 +++- pkg/setup/install.go | 3 +- pkg/setup/units.go | 3 +- 9 files changed, 56 insertions(+), 35 deletions(-) diff --git a/cmd/crowdsec-cli/main.go b/cmd/crowdsec-cli/main.go index 0705faa4065..e3c45390a18 100644 --- a/cmd/crowdsec-cli/main.go +++ b/cmd/crowdsec-cli/main.go @@ -132,6 +132,8 @@ func (cli *cliRoot) initialize() { log.Fatalf("output format '%s' not supported: must be one of human, json, raw", csConfig.Cscli.Output) } + log.SetFormatter(&log.TextFormatter{DisableTimestamp: true}) + if csConfig.Cscli.Output == "json" { log.SetFormatter(&log.JSONFormatter{}) log.SetLevel(log.ErrorLevel) diff --git a/pkg/apiserver/papi.go b/pkg/apiserver/papi.go index a3996850a2b..8dbd1bb9641 100644 --- a/pkg/apiserver/papi.go +++ b/pkg/apiserver/papi.go @@ -3,6 +3,7 @@ package apiserver import ( "context" "encoding/json" + "errors" "fmt" "net/http" "sync" @@ -21,21 +22,15 @@ import ( "github.com/crowdsecurity/crowdsec/pkg/types" ) -var ( - SyncInterval = time.Second * 10 -) +var SyncInterval = time.Second * 10 -const ( - PapiPullKey = "papi:last_pull" -) +const PapiPullKey = "papi:last_pull" -var ( - operationMap = map[string]func(*Message, *Papi, bool) error{ - "decision": DecisionCmd, - "alert": AlertCmd, - "management": ManagementCmd, - } -) +var operationMap = map[string]func(*Message, *Papi, bool) error{ + "decision": DecisionCmd, + "alert": AlertCmd, + "management": ManagementCmd, +} type Header struct { OperationType string `json:"operation_type"` @@ -87,21 +82,21 @@ type PapiPermCheckSuccess struct { } func NewPAPI(apic *apic, dbClient *database.Client, consoleConfig *csconfig.ConsoleConfig, logLevel log.Level) (*Papi, error) { - logger := log.New() if err := types.ConfigureLogger(logger); err != nil { - return &Papi{}, fmt.Errorf("creating papi logger: %s", err) + return &Papi{}, fmt.Errorf("creating papi logger: %w", err) } + logger.SetLevel(logLevel) papiUrl := *apic.apiClient.PapiURL papiUrl.Path = fmt.Sprintf("%s%s", types.PAPIVersion, types.PAPIPollUrl) + longPollClient, err := longpollclient.NewLongPollClient(longpollclient.LongPollClientConfig{ Url: papiUrl, Logger: logger, HttpClient: apic.apiClient.GetClient(), }) - if err != nil { return &Papi{}, fmt.Errorf("failed to create PAPI client: %w", err) } @@ -132,55 +127,68 @@ func NewPAPI(apic *apic, dbClient *database.Client, consoleConfig *csconfig.Cons func (p *Papi) handleEvent(event longpollclient.Event, sync bool) error { logger := p.Logger.WithField("request-id", event.RequestId) logger.Debugf("message received: %+v", event.Data) + message := &Message{} if err := json.Unmarshal([]byte(event.Data), message); err != nil { return fmt.Errorf("polling papi message format is not compatible: %+v: %s", event.Data, err) } + if message.Header == nil { - return fmt.Errorf("no header in message, skipping") + return errors.New("no header in message, skipping") } + if message.Header.Source == nil { - return fmt.Errorf("no source user in header message, skipping") + return errors.New("no source user in header message, skipping") } if operationFunc, ok := operationMap[message.Header.OperationType]; ok { logger.Debugf("Calling operation '%s'", message.Header.OperationType) + err := operationFunc(message, p, sync) if err != nil { - return fmt.Errorf("'%s %s failed: %s", message.Header.OperationType, message.Header.OperationCmd, err) + return fmt.Errorf("'%s %s failed: %w", message.Header.OperationType, message.Header.OperationCmd, err) } } else { return fmt.Errorf("operation '%s' unknown, continue", message.Header.OperationType) } + return nil } func (p *Papi) GetPermissions() (PapiPermCheckSuccess, error) { httpClient := p.apiClient.GetClient() papiCheckUrl := fmt.Sprintf("%s%s%s", p.URL, types.PAPIVersion, types.PAPIPermissionsUrl) + req, err := http.NewRequest(http.MethodGet, papiCheckUrl, nil) if err != nil { return PapiPermCheckSuccess{}, fmt.Errorf("failed to create request : %s", err) } + resp, err := httpClient.Do(req) if err != nil { log.Fatalf("failed to get response : %s", err) } defer resp.Body.Close() + if resp.StatusCode != http.StatusOK { errResp := PapiPermCheckError{} + err = json.NewDecoder(resp.Body).Decode(&errResp) if err != nil { return PapiPermCheckSuccess{}, fmt.Errorf("failed to decode response : %s", err) } + return PapiPermCheckSuccess{}, fmt.Errorf("unable to query PAPI : %s (%d)", errResp.Error, resp.StatusCode) } + respBody := PapiPermCheckSuccess{} + err = json.NewDecoder(resp.Body).Decode(&respBody) if err != nil { return PapiPermCheckSuccess{}, fmt.Errorf("failed to decode response : %s", err) } + return respBody, nil } @@ -202,7 +210,7 @@ func (p *Papi) PullOnce(since time.Time, sync bool) error { return err } - reversedEvents := reverse(events) //PAPI sends events in the reverse order, which is not an issue when pulling them in real time, but here we need the correct order + reversedEvents := reverse(events) // PAPI sends events in the reverse order, which is not an issue when pulling them in real time, but here we need the correct order eventsCount := len(events) p.Logger.Infof("received %d events", eventsCount) @@ -215,8 +223,8 @@ func (p *Papi) PullOnce(since time.Time, sync bool) error { } p.Logger.Debugf("finished handling events") - //Don't update the timestamp in DB, as a "real" LAPI might be running - //Worst case, crowdsec will receive a few duplicated events and will discard them + // Don't update the timestamp in DB, as a "real" LAPI might be running + // Worst case, crowdsec will receive a few duplicated events and will discard them return nil } @@ -232,7 +240,7 @@ func (p *Papi) Pull() error { p.Logger.Warningf("failed to get last timestamp for papi pull: %s", err) } - //value doesn't exist, it's first time we're pulling + // value doesn't exist, it's first time we're pulling if lastTimestampStr == nil { binTime, err := lastTimestamp.MarshalText() if err != nil { @@ -254,7 +262,7 @@ func (p *Papi) Pull() error { for event := range p.Client.Start(lastTimestamp) { logger := p.Logger.WithField("request-id", event.RequestId) - //update last timestamp in database + // update last timestamp in database newTime := time.Now().UTC() binTime, err := newTime.MarshalText() @@ -329,7 +337,7 @@ func (p *Papi) SyncDecisions() error { func (p *Papi) SendDeletedDecisions(cacheOrig *models.DecisionsDeleteRequest) { var ( cache []models.DecisionsDeleteRequestItem = *cacheOrig - send models.DecisionsDeleteRequest + send models.DecisionsDeleteRequest ) bulkSize := 50 @@ -359,7 +367,7 @@ func (p *Papi) SendDeletedDecisions(cacheOrig *models.DecisionsDeleteRequest) { _, _, err := p.apiClient.DecisionDelete.Add(ctx, &send) if err != nil { - //we log it here as well, because the return value of func might be discarded + // we log it here as well, because the return value of func might be discarded p.Logger.Errorf("sending deleted decisions to central API: %s", err) } diff --git a/pkg/database/database.go b/pkg/database/database.go index aa191d7dc43..d984aefb170 100644 --- a/pkg/database/database.go +++ b/pkg/database/database.go @@ -3,6 +3,7 @@ package database import ( "context" "database/sql" + "errors" "fmt" "os" @@ -47,7 +48,7 @@ func NewClient(config *csconfig.DatabaseCfg) (*Client, error) { var client *ent.Client var err error if config == nil { - return &Client{}, fmt.Errorf("DB config is empty") + return &Client{}, errors.New("DB config is empty") } /*The logger that will be used by db operations*/ clog := log.New() diff --git a/pkg/database/flush.go b/pkg/database/flush.go index a7b364fa970..ad4a912de84 100644 --- a/pkg/database/flush.go +++ b/pkg/database/flush.go @@ -1,6 +1,7 @@ package database import ( + "errors" "fmt" "time" @@ -21,7 +22,7 @@ func (c *Client) StartFlushScheduler(config *csconfig.FlushDBCfg) (*gocron.Sched maxItems := 0 maxAge := "" if config.MaxItems != nil && *config.MaxItems <= 0 { - return nil, fmt.Errorf("max_items can't be zero or negative number") + return nil, errors.New("max_items can't be zero or negative number") } if config.MaxItems != nil { maxItems = *config.MaxItems diff --git a/pkg/hubtest/utils.go b/pkg/hubtest/utils.go index 9009d0dddec..a7373fcc0bf 100644 --- a/pkg/hubtest/utils.go +++ b/pkg/hubtest/utils.go @@ -1,6 +1,7 @@ package hubtest import ( + "errors" "fmt" "net" "os" @@ -56,7 +57,7 @@ func checkPathNotContained(path string, subpath string) error { for { if current == absPath { - return fmt.Errorf("cannot copy a folder onto itself") + return errors.New("cannot copy a folder onto itself") } up := filepath.Dir(current) @@ -87,7 +88,7 @@ func CopyDir(src string, dest string) error { } if !file.IsDir() { - return fmt.Errorf("Source " + file.Name() + " is not a directory!") + return errors.New("Source " + file.Name() + " is not a directory!") } err = os.MkdirAll(dest, 0755) diff --git a/pkg/setup/detect.go b/pkg/setup/detect.go index 7d73092f74e..5deff904e19 100644 --- a/pkg/setup/detect.go +++ b/pkg/setup/detect.go @@ -2,6 +2,7 @@ package setup import ( "bytes" + "errors" "fmt" "io" "os" @@ -53,6 +54,7 @@ func validateDataSource(opaqueDS DataSourceItem) error { // formally validate YAML commonDS := configuration.DataSourceCommonCfg{} + body, err := yaml.Marshal(opaqueDS) if err != nil { return err @@ -66,7 +68,7 @@ func validateDataSource(opaqueDS DataSourceItem) error { // source is mandatory // XXX unless it's not? if commonDS.Source == "" { - return fmt.Errorf("source is empty") + return errors.New("source is empty") } // source must be known @@ -104,7 +106,7 @@ func readDetectConfig(fin io.Reader) (DetectConfig, error) { switch dc.Version { case "": - return DetectConfig{}, fmt.Errorf("missing version tag (must be 1.0)") + return DetectConfig{}, errors.New("missing version tag (must be 1.0)") case "1.0": // all is well default: diff --git a/pkg/setup/detect_test.go b/pkg/setup/detect_test.go index 242ade0494b..0ff3438d974 100644 --- a/pkg/setup/detect_test.go +++ b/pkg/setup/detect_test.go @@ -99,6 +99,7 @@ func TestPathExists(t *testing.T) { t.Run(tc.path, func(t *testing.T) { t.Parallel() + actual := env.PathExists(tc.path) require.Equal(t, tc.expected, actual) }) @@ -152,6 +153,7 @@ func TestVersionCheck(t *testing.T) { t.Run(fmt.Sprintf("Check(%s,%s)", tc.version, tc.constraint), func(t *testing.T) { t.Parallel() + actual, err := e.VersionCheck(tc.constraint) cstest.RequireErrorContains(t, err, tc.expectedErr) require.Equal(t, tc.expected, actual) @@ -249,8 +251,10 @@ func TestListSupported(t *testing.T) { tc := tc t.Run(tc.name, func(t *testing.T) { t.Parallel() + f := tempYAML(t, tc.yml) defer os.Remove(f.Name()) + supported, err := setup.ListSupported(&f) cstest.RequireErrorContains(t, err, tc.expectedErr) require.ElementsMatch(t, tc.expected, supported) @@ -332,6 +336,7 @@ func TestApplyRules(t *testing.T) { tc := tc t.Run(tc.name, func(t *testing.T) { t.Parallel() + svc := setup.Service{When: tc.rules} _, actualOk, err := setup.ApplyRules(svc, env) //nolint:typecheck,nolintlint // exported only for tests cstest.RequireErrorContains(t, err, tc.expectedErr) @@ -840,7 +845,6 @@ func TestDetectForcedOS(t *testing.T) { func TestDetectDatasourceValidation(t *testing.T) { // It could be a good idea to test UnmarshalConfig() separately in addition // to Configure(), in each datasource. For now, we test these here. - require := require.New(t) setup.ExecCommand = fakeExecCommand diff --git a/pkg/setup/install.go b/pkg/setup/install.go index fc922c5d19b..dc85706a15c 100644 --- a/pkg/setup/install.go +++ b/pkg/setup/install.go @@ -2,6 +2,7 @@ package setup import ( "bytes" + "errors" "fmt" "os" "path/filepath" @@ -173,7 +174,7 @@ func marshalAcquisDocuments(ads []AcquisDocument, toDir string) (string, error) if toDir != "" { if ad.AcquisFilename == "" { - return "", fmt.Errorf("empty acquis filename") + return "", errors.New("empty acquis filename") } fname := filepath.Join(toDir, ad.AcquisFilename) diff --git a/pkg/setup/units.go b/pkg/setup/units.go index a0bccba4aac..ab1eec6f33e 100644 --- a/pkg/setup/units.go +++ b/pkg/setup/units.go @@ -2,6 +2,7 @@ package setup import ( "bufio" + "errors" "fmt" "strings" @@ -41,7 +42,7 @@ func systemdUnitList() ([]string, error) { if !header { spaceIdx := strings.IndexRune(line, ' ') if spaceIdx == -1 { - return ret, fmt.Errorf("can't parse systemctl output") + return ret, errors.New("can't parse systemctl output") } line = line[:spaceIdx] From a2dcc0ef9a534fd9998651fa0b31338acd24e109 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Mon, 6 May 2024 12:33:54 +0200 Subject: [PATCH 115/581] cscli: remove global dbClient (#2985) * cscli: remove global dbClient * lint (whitespace, errors) --- cmd/crowdsec-cli/main.go | 2 - cmd/crowdsec-cli/papi.go | 14 +++---- cmd/crowdsec-cli/support.go | 2 +- pkg/database/database.go | 46 ++++++++++++++--------- pkg/leakybucket/manager_run.go | 6 +-- pkg/leakybucket/overflows.go | 68 +++++++++++++++++++++++++++------- 6 files changed, 94 insertions(+), 44 deletions(-) diff --git a/cmd/crowdsec-cli/main.go b/cmd/crowdsec-cli/main.go index e3c45390a18..95c528f20b5 100644 --- a/cmd/crowdsec-cli/main.go +++ b/cmd/crowdsec-cli/main.go @@ -15,14 +15,12 @@ import ( "github.com/crowdsecurity/go-cs-lib/trace" "github.com/crowdsecurity/crowdsec/pkg/csconfig" - "github.com/crowdsecurity/crowdsec/pkg/database" "github.com/crowdsecurity/crowdsec/pkg/fflag" ) var ( ConfigFilePath string csConfig *csconfig.Config - dbClient *database.Client ) type configGetter func() *csconfig.Config diff --git a/cmd/crowdsec-cli/papi.go b/cmd/crowdsec-cli/papi.go index 5808fcce5f6..558409b2d4d 100644 --- a/cmd/crowdsec-cli/papi.go +++ b/cmd/crowdsec-cli/papi.go @@ -62,17 +62,17 @@ func (cli *cliPapi) NewStatusCmd() *cobra.Command { RunE: func(_ *cobra.Command, _ []string) error { var err error cfg := cli.cfg() - dbClient, err = database.NewClient(cfg.DbConfig) + db, err := database.NewClient(cfg.DbConfig) if err != nil { return fmt.Errorf("unable to initialize database client: %w", err) } - apic, err := apiserver.NewAPIC(cfg.API.Server.OnlineClient, dbClient, cfg.API.Server.ConsoleConfig, cfg.API.Server.CapiWhitelists) + apic, err := apiserver.NewAPIC(cfg.API.Server.OnlineClient, db, cfg.API.Server.ConsoleConfig, cfg.API.Server.CapiWhitelists) if err != nil { return fmt.Errorf("unable to initialize API client: %w", err) } - papi, err := apiserver.NewPAPI(apic, dbClient, cfg.API.Server.ConsoleConfig, log.GetLevel()) + papi, err := apiserver.NewPAPI(apic, db, cfg.API.Server.ConsoleConfig, log.GetLevel()) if err != nil { return fmt.Errorf("unable to initialize PAPI client: %w", err) } @@ -82,7 +82,7 @@ func (cli *cliPapi) NewStatusCmd() *cobra.Command { return fmt.Errorf("unable to get PAPI permissions: %w", err) } var lastTimestampStr *string - lastTimestampStr, err = dbClient.GetConfigItem(apiserver.PapiPullKey) + lastTimestampStr, err = db.GetConfigItem(apiserver.PapiPullKey) if err != nil { lastTimestampStr = ptr.Of("never") } @@ -113,19 +113,19 @@ func (cli *cliPapi) NewSyncCmd() *cobra.Command { cfg := cli.cfg() t := tomb.Tomb{} - dbClient, err = database.NewClient(cfg.DbConfig) + db, err := database.NewClient(cfg.DbConfig) if err != nil { return fmt.Errorf("unable to initialize database client: %w", err) } - apic, err := apiserver.NewAPIC(cfg.API.Server.OnlineClient, dbClient, cfg.API.Server.ConsoleConfig, cfg.API.Server.CapiWhitelists) + apic, err := apiserver.NewAPIC(cfg.API.Server.OnlineClient, db, cfg.API.Server.ConsoleConfig, cfg.API.Server.CapiWhitelists) if err != nil { return fmt.Errorf("unable to initialize API client: %w", err) } t.Go(apic.Push) - papi, err := apiserver.NewPAPI(apic, dbClient, cfg.API.Server.ConsoleConfig, log.GetLevel()) + papi, err := apiserver.NewPAPI(apic, db, cfg.API.Server.ConsoleConfig, log.GetLevel()) if err != nil { return fmt.Errorf("unable to initialize PAPI client: %w", err) } diff --git a/cmd/crowdsec-cli/support.go b/cmd/crowdsec-cli/support.go index 418a981adee..5890061f502 100644 --- a/cmd/crowdsec-cli/support.go +++ b/cmd/crowdsec-cli/support.go @@ -331,7 +331,7 @@ cscli support dump -f /tmp/crowdsec-support.zip outFile = "/tmp/crowdsec-support.zip" } - dbClient, err = database.NewClient(csConfig.DbConfig) + dbClient, err := database.NewClient(csConfig.DbConfig) if err != nil { log.Warnf("Could not connect to database: %s", err) skipDB = true diff --git a/pkg/database/database.go b/pkg/database/database.go index d984aefb170..96a495f6731 100644 --- a/pkg/database/database.go +++ b/pkg/database/database.go @@ -35,72 +35,84 @@ func getEntDriver(dbtype string, dbdialect string, dsn string, config *csconfig. if err != nil { return nil, err } + if config.MaxOpenConns == nil { log.Warningf("MaxOpenConns is 0, defaulting to %d", csconfig.DEFAULT_MAX_OPEN_CONNS) config.MaxOpenConns = ptr.Of(csconfig.DEFAULT_MAX_OPEN_CONNS) } + db.SetMaxOpenConns(*config.MaxOpenConns) drv := entsql.OpenDB(dbdialect, db) + return drv, nil } func NewClient(config *csconfig.DatabaseCfg) (*Client, error) { var client *ent.Client - var err error + if config == nil { - return &Client{}, errors.New("DB config is empty") + return nil, errors.New("DB config is empty") } /*The logger that will be used by db operations*/ clog := log.New() if err := types.ConfigureLogger(clog); err != nil { return nil, fmt.Errorf("while configuring db logger: %w", err) } + if config.LogLevel != nil { clog.SetLevel(*config.LogLevel) } - entLogger := clog.WithField("context", "ent") + entLogger := clog.WithField("context", "ent") entOpt := ent.Log(entLogger.Debug) + typ, dia, err := config.ConnectionDialect() if err != nil { - return &Client{}, err //unsupported database caught here + return nil, err // unsupported database caught here } + if config.Type == "sqlite" { /*if it's the first startup, we want to touch and chmod file*/ if _, err := os.Stat(config.DbPath); os.IsNotExist(err) { - f, err := os.OpenFile(config.DbPath, os.O_CREATE|os.O_RDWR, 0600) + f, err := os.OpenFile(config.DbPath, os.O_CREATE|os.O_RDWR, 0o600) if err != nil { - return &Client{}, fmt.Errorf("failed to create SQLite database file %q: %w", config.DbPath, err) + return nil, fmt.Errorf("failed to create SQLite database file %q: %w", config.DbPath, err) } + if err := f.Close(); err != nil { - return &Client{}, fmt.Errorf("failed to create SQLite database file %q: %w", config.DbPath, err) + return nil, fmt.Errorf("failed to create SQLite database file %q: %w", config.DbPath, err) } } - //Always try to set permissions to simplify a bit the code for windows (as the permissions set by OpenFile will be garbage) - if err := setFilePerm(config.DbPath, 0640); err != nil { - return &Client{}, fmt.Errorf("unable to set perms on %s: %v", config.DbPath, err) + // Always try to set permissions to simplify a bit the code for windows (as the permissions set by OpenFile will be garbage) + if err := setFilePerm(config.DbPath, 0o640); err != nil { + return nil, fmt.Errorf("unable to set perms on %s: %v", config.DbPath, err) } } + drv, err := getEntDriver(typ, dia, config.ConnectionString(), config) if err != nil { - return &Client{}, fmt.Errorf("failed opening connection to %s: %v", config.Type, err) + return nil, fmt.Errorf("failed opening connection to %s: %v", config.Type, err) } + client = ent.NewClient(ent.Driver(drv), entOpt) + if config.LogLevel != nil && *config.LogLevel >= log.DebugLevel { clog.Debugf("Enabling request debug") + client = client.Debug() } + if err = client.Schema.Create(context.Background()); err != nil { return nil, fmt.Errorf("failed creating schema resources: %v", err) } return &Client{ - Ent: client, - CTX: context.Background(), - Log: clog, - CanFlush: true, - Type: config.Type, - WalMode: config.UseWal, + Ent: client, + CTX: context.Background(), + Log: clog, + CanFlush: true, + Type: config.Type, + WalMode: config.UseWal, decisionBulkSize: config.DecisionBulkSize, }, nil } diff --git a/pkg/leakybucket/manager_run.go b/pkg/leakybucket/manager_run.go index ae7a86a4e4e..1d34c238ea5 100644 --- a/pkg/leakybucket/manager_run.go +++ b/pkg/leakybucket/manager_run.go @@ -85,7 +85,7 @@ func DumpBucketsStateAt(deadline time.Time, outputdir string, buckets *Buckets) defer buckets.wgDumpState.Done() if outputdir == "" { - return "", fmt.Errorf("empty output dir for dump bucket state") + return "", errors.New("empty output dir for dump bucket state") } tmpFd, err := os.CreateTemp(os.TempDir(), "crowdsec-buckets-dump-") if err != nil { @@ -132,11 +132,11 @@ func DumpBucketsStateAt(deadline time.Time, outputdir string, buckets *Buckets) }) bbuckets, err := json.MarshalIndent(serialized, "", " ") if err != nil { - return "", fmt.Errorf("Failed to unmarshal buckets : %s", err) + return "", fmt.Errorf("failed to unmarshal buckets: %s", err) } size, err := tmpFd.Write(bbuckets) if err != nil { - return "", fmt.Errorf("failed to write temp file : %s", err) + return "", fmt.Errorf("failed to write temp file: %s", err) } log.Infof("Serialized %d live buckets (+%d expired) in %d bytes to %s", len(serialized), discard, size, tmpFd.Name()) serialized = nil diff --git a/pkg/leakybucket/overflows.go b/pkg/leakybucket/overflows.go index 80226aafb2a..8092ef35e77 100644 --- a/pkg/leakybucket/overflows.go +++ b/pkg/leakybucket/overflows.go @@ -1,6 +1,7 @@ package leakybucket import ( + "errors" "fmt" "net" "sort" @@ -22,9 +23,7 @@ func SourceFromEvent(evt types.Event, leaky *Leaky) (map[string]models.Source, e /*if it's already an overflow, we have properly formatted sources. we can just twitch them to reflect the requested scope*/ if evt.Type == types.OVFLW { - for k, v := range evt.Overflow.Sources { - /*the scopes are already similar, nothing to do*/ if leaky.scopeType.Scope == *v.Scope { srcs[k] = v @@ -46,20 +45,25 @@ func SourceFromEvent(evt types.Event, leaky *Leaky) (map[string]models.Source, e src.Scope = new(string) *src.Scope = leaky.scopeType.Scope *src.Value = "" + if v.Range != "" { *src.Value = v.Range } + if leaky.scopeType.RunTimeFilter != nil { retValue, err := exprhelpers.Run(leaky.scopeType.RunTimeFilter, map[string]interface{}{"evt": &evt}, leaky.logger, leaky.BucketConfig.Debug) if err != nil { return srcs, fmt.Errorf("while running scope filter: %w", err) } + value, ok := retValue.(string) if !ok { value = "" } + src.Value = &value } + if *src.Value != "" { srcs[*src.Value] = src } else { @@ -71,50 +75,64 @@ func SourceFromEvent(evt types.Event, leaky *Leaky) (map[string]models.Source, e } } } + return srcs, nil } + src := models.Source{} + switch leaky.scopeType.Scope { case types.Range, types.Ip: v, ok := evt.Meta["source_ip"] if !ok { return srcs, fmt.Errorf("scope is %s but Meta[source_ip] doesn't exist", leaky.scopeType.Scope) } + if net.ParseIP(v) == nil { return srcs, fmt.Errorf("scope is %s but '%s' isn't a valid ip", leaky.scopeType.Scope, v) } + src.IP = v src.Scope = &leaky.scopeType.Scope + if v, ok := evt.Enriched["ASNumber"]; ok { src.AsNumber = v } else if v, ok := evt.Enriched["ASNNumber"]; ok { src.AsNumber = v } + if v, ok := evt.Enriched["IsoCode"]; ok { src.Cn = v } + if v, ok := evt.Enriched["ASNOrg"]; ok { src.AsName = v } + if v, ok := evt.Enriched["Latitude"]; ok { l, err := strconv.ParseFloat(v, 32) if err != nil { log.Warningf("bad latitude %s : %s", v, err) } + src.Latitude = float32(l) } + if v, ok := evt.Enriched["Longitude"]; ok { l, err := strconv.ParseFloat(v, 32) if err != nil { log.Warningf("bad longitude %s : %s", v, err) } + src.Longitude = float32(l) } + if v, ok := evt.Meta["SourceRange"]; ok && v != "" { _, ipNet, err := net.ParseCIDR(v) if err != nil { - return srcs, fmt.Errorf("Declared range %s of %s can't be parsed", v, src.IP) + return srcs, fmt.Errorf("declared range %s of %s can't be parsed", v, src.IP) } + if ipNet != nil { src.Range = ipNet.String() leaky.logger.Tracef("Valid range from %s : %s", src.IP, src.Range) @@ -124,6 +142,7 @@ func SourceFromEvent(evt types.Event, leaky *Leaky) (map[string]models.Source, e src.Value = &src.IP } else if leaky.scopeType.Scope == types.Range { src.Value = &src.Range + if leaky.scopeType.RunTimeFilter != nil { retValue, err := exprhelpers.Run(leaky.scopeType.RunTimeFilter, map[string]interface{}{"evt": &evt}, leaky.logger, leaky.BucketConfig.Debug) if err != nil { @@ -134,14 +153,17 @@ func SourceFromEvent(evt types.Event, leaky *Leaky) (map[string]models.Source, e if !ok { value = "" } + src.Value = &value } } + srcs[*src.Value] = src default: if leaky.scopeType.RunTimeFilter == nil { - return srcs, fmt.Errorf("empty scope information") + return srcs, errors.New("empty scope information") } + retValue, err := exprhelpers.Run(leaky.scopeType.RunTimeFilter, map[string]interface{}{"evt": &evt}, leaky.logger, leaky.BucketConfig.Debug) if err != nil { return srcs, fmt.Errorf("while running scope filter: %w", err) @@ -151,30 +173,34 @@ func SourceFromEvent(evt types.Event, leaky *Leaky) (map[string]models.Source, e if !ok { value = "" } + src.Value = &value src.Scope = new(string) *src.Scope = leaky.scopeType.Scope srcs[*src.Value] = src } + return srcs, nil } // EventsFromQueue iterates the queue to collect & prepare meta-datas from alert func EventsFromQueue(queue *types.Queue) []*models.Event { - events := []*models.Event{} for _, evt := range queue.Queue { if evt.Meta == nil { continue } + meta := models.Meta{} - //we want consistence + // we want consistence skeys := make([]string, 0, len(evt.Meta)) for k := range evt.Meta { skeys = append(skeys, k) } + sort.Strings(skeys) + for _, k := range skeys { v := evt.Meta[k] subMeta := models.MetaItems0{Key: k, Value: v} @@ -185,12 +211,13 @@ func EventsFromQueue(queue *types.Queue) []*models.Event { ovflwEvent := models.Event{ Meta: meta, } - //either MarshaledTime is present and is extracted from log + // either MarshaledTime is present and is extracted from log if evt.MarshaledTime != "" { tmpTimeStamp := evt.MarshaledTime ovflwEvent.Timestamp = &tmpTimeStamp - } else if !evt.Time.IsZero() { //or .Time has been set during parse as time.Now().UTC() + } else if !evt.Time.IsZero() { // or .Time has been set during parse as time.Now().UTC() ovflwEvent.Timestamp = new(string) + raw, err := evt.Time.MarshalText() if err != nil { log.Warningf("while marshaling time '%s' : %s", evt.Time.String(), err) @@ -203,6 +230,7 @@ func EventsFromQueue(queue *types.Queue) []*models.Event { events = append(events, &ovflwEvent) } + return events } @@ -218,17 +246,21 @@ func alertFormatSource(leaky *Leaky, queue *types.Queue) (map[string]models.Sour if err != nil { return nil, "", fmt.Errorf("while extracting scope from bucket %s: %w", leaky.Name, err) } + for key, src := range srcs { if source_type == types.Undefined { source_type = *src.Scope } + if *src.Scope != source_type { return nil, "", fmt.Errorf("event has multiple source types : %s != %s", *src.Scope, source_type) } + sources[key] = src } } + return sources, source_type, nil } @@ -244,10 +276,12 @@ func NewAlert(leaky *Leaky, queue *types.Queue) (types.RuntimeAlert, error) { if err != nil { log.Warningf("failed to marshal start ts %s : %s", leaky.First_ts.String(), err) } + stop_at, err := leaky.Ovflw_ts.MarshalText() if err != nil { log.Warningf("failed to marshal ovflw ts %s : %s", leaky.First_ts.String(), err) } + capacity := int32(leaky.Capacity) EventsCount := int32(leaky.Total_count) leakSpeed := leaky.Leakspeed.String() @@ -266,19 +300,20 @@ func NewAlert(leaky *Leaky, queue *types.Queue) (types.RuntimeAlert, error) { Simulated: &leaky.Simulated, } if leaky.BucketConfig == nil { - return runtimeAlert, fmt.Errorf("leaky.BucketConfig is nil") + return runtimeAlert, errors.New("leaky.BucketConfig is nil") } - //give information about the bucket + // give information about the bucket runtimeAlert.Mapkey = leaky.Mapkey - //Get the sources from Leaky/Queue + // Get the sources from Leaky/Queue sources, source_scope, err := alertFormatSource(leaky, queue) if err != nil { return runtimeAlert, fmt.Errorf("unable to collect sources from bucket: %w", err) } + runtimeAlert.Sources = sources - //Include source info in format string + // Include source info in format string sourceStr := "UNKNOWN" if len(sources) > 1 { sourceStr = fmt.Sprintf("%d sources", len(sources)) @@ -290,19 +325,22 @@ func NewAlert(leaky *Leaky, queue *types.Queue) (types.RuntimeAlert, error) { } *apiAlert.Message = fmt.Sprintf("%s %s performed '%s' (%d events over %s) at %s", source_scope, sourceStr, leaky.Name, leaky.Total_count, leaky.Ovflw_ts.Sub(leaky.First_ts), leaky.Last_ts) - //Get the events from Leaky/Queue + // Get the events from Leaky/Queue apiAlert.Events = EventsFromQueue(queue) + var warnings []error + apiAlert.Meta, warnings = alertcontext.EventToContext(leaky.Queue.GetQueue()) for _, w := range warnings { log.Warningf("while extracting context from bucket %s : %s", leaky.Name, w) } - //Loop over the Sources and generate appropriate number of ApiAlerts + // Loop over the Sources and generate appropriate number of ApiAlerts for _, srcValue := range sources { newApiAlert := apiAlert srcCopy := srcValue newApiAlert.Source = &srcCopy + if v, ok := leaky.BucketConfig.Labels["remediation"]; ok && v == true { newApiAlert.Remediation = true } @@ -312,6 +350,7 @@ func NewAlert(leaky *Leaky, queue *types.Queue) (types.RuntimeAlert, error) { log.Errorf("->%s", spew.Sdump(newApiAlert)) log.Fatalf("error : %s", err) } + runtimeAlert.APIAlerts = append(runtimeAlert.APIAlerts, newApiAlert) } @@ -322,5 +361,6 @@ func NewAlert(leaky *Leaky, queue *types.Queue) (types.RuntimeAlert, error) { if leaky.Reprocess { runtimeAlert.Reprocess = true } + return runtimeAlert, nil } From 11da728b514be8df3c0a3acffb8a6d86230b06c2 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Tue, 7 May 2024 12:59:38 +0200 Subject: [PATCH 116/581] cscli support: collect profiling data and logs (#2987) * extract methods, avoid globals * collect logs to file dump.log * include pprof data * include latest logs --- cmd/crowdsec-cli/capi.go | 58 +-- cmd/crowdsec-cli/lapi.go | 44 ++- cmd/crowdsec-cli/main.go | 2 +- cmd/crowdsec-cli/support.go | 717 +++++++++++++++++++++--------------- pkg/database/database.go | 2 +- test/bats/01_cscli.bats | 2 +- 6 files changed, 489 insertions(+), 336 deletions(-) diff --git a/cmd/crowdsec-cli/capi.go b/cmd/crowdsec-cli/capi.go index b5180d0505a..b89d9c7edb0 100644 --- a/cmd/crowdsec-cli/capi.go +++ b/cmd/crowdsec-cli/capi.go @@ -155,23 +155,11 @@ func (cli *cliCapi) newRegisterCmd() *cobra.Command { return cmd } -func (cli *cliCapi) status() error { - cfg := cli.cfg() - - if err := require.CAPIRegistered(cfg); err != nil { - return err - } - - password := strfmt.Password(cfg.API.Server.OnlineClient.Credentials.Password) - - apiurl, err := url.Parse(cfg.API.Server.OnlineClient.Credentials.URL) +// QueryCAPIStatus checks if the Local API is reachable, and if the credentials are correct +func QueryCAPIStatus(hub *cwhub.Hub, credURL string, login string, password string) error { + apiURL, err := url.Parse(credURL) if err != nil { - return fmt.Errorf("parsing api url ('%s'): %w", cfg.API.Server.OnlineClient.Credentials.URL, err) - } - - hub, err := require.Hub(cfg, nil, nil) - if err != nil { - return err + return fmt.Errorf("parsing api url: %w", err) } scenarios, err := hub.GetInstalledNamesByType(cwhub.SCENARIOS) @@ -183,22 +171,48 @@ func (cli *cliCapi) status() error { return errors.New("no scenarios installed, abort") } - Client, err = apiclient.NewDefaultClient(apiurl, CAPIURLPrefix, fmt.Sprintf("crowdsec/%s", version.String()), nil) + Client, err = apiclient.NewDefaultClient(apiURL, + CAPIURLPrefix, + fmt.Sprintf("crowdsec/%s", version.String()), + nil) if err != nil { return fmt.Errorf("init default client: %w", err) } + pw := strfmt.Password(password) + t := models.WatcherAuthRequest{ - MachineID: &cfg.API.Server.OnlineClient.Credentials.Login, - Password: &password, + MachineID: &login, + Password: &pw, Scenarios: scenarios, } - log.Infof("Loaded credentials from %s", cfg.API.Server.OnlineClient.CredentialsFilePath) - log.Infof("Trying to authenticate with username %s on %s", cfg.API.Server.OnlineClient.Credentials.Login, apiurl) - _, _, err = Client.Auth.AuthenticateWatcher(context.Background(), t) if err != nil { + return err + } + + return nil +} + +func (cli *cliCapi) status() error { + cfg := cli.cfg() + + if err := require.CAPIRegistered(cfg); err != nil { + return err + } + + cred := cfg.API.Server.OnlineClient.Credentials + + hub, err := require.Hub(cfg, nil, nil) + if err != nil { + return err + } + + log.Infof("Loaded credentials from %s", cfg.API.Server.OnlineClient.CredentialsFilePath) + log.Infof("Trying to authenticate with username %s on %s", cred.Login, cred.URL) + + if err := QueryCAPIStatus(hub, cred.URL, cred.Login, cred.Password); err != nil { return fmt.Errorf("failed to authenticate to Central API (CAPI): %w", err) } diff --git a/cmd/crowdsec-cli/lapi.go b/cmd/crowdsec-cli/lapi.go index 369de5b426b..7cffd7ffc7f 100644 --- a/cmd/crowdsec-cli/lapi.go +++ b/cmd/crowdsec-cli/lapi.go @@ -39,23 +39,13 @@ func NewCLILapi(cfg configGetter) *cliLapi { } } -func (cli *cliLapi) status() error { - cfg := cli.cfg() - password := strfmt.Password(cfg.API.Client.Credentials.Password) - login := cfg.API.Client.Credentials.Login - - origURL := cfg.API.Client.Credentials.URL - - apiURL, err := url.Parse(origURL) +// QueryLAPIStatus checks if the Local API is reachable, and if the credentials are correct +func QueryLAPIStatus(hub *cwhub.Hub, credURL string, login string, password string) error { + apiURL, err := url.Parse(credURL) if err != nil { return fmt.Errorf("parsing api url: %w", err) } - hub, err := require.Hub(cfg, nil, nil) - if err != nil { - return err - } - scenarios, err := hub.GetInstalledNamesByType(cwhub.SCENARIOS) if err != nil { return fmt.Errorf("failed to get scenarios: %w", err) @@ -69,18 +59,36 @@ func (cli *cliLapi) status() error { return fmt.Errorf("init default client: %w", err) } + pw := strfmt.Password(password) + t := models.WatcherAuthRequest{ MachineID: &login, - Password: &password, + Password: &pw, Scenarios: scenarios, } - log.Infof("Loaded credentials from %s", cfg.API.Client.CredentialsFilePath) - // use the original string because apiURL would print 'http://unix/' - log.Infof("Trying to authenticate with username %s on %s", login, origURL) - _, _, err = Client.Auth.AuthenticateWatcher(context.Background(), t) if err != nil { + return err + } + + return nil +} + +func (cli *cliLapi) status() error { + cfg := cli.cfg() + + cred := cfg.API.Client.Credentials + + hub, err := require.Hub(cfg, nil, nil) + if err != nil { + return err + } + + log.Infof("Loaded credentials from %s", cfg.API.Client.CredentialsFilePath) + log.Infof("Trying to authenticate with username %s on %s", cred.Login, cred.URL) + + if err := QueryLAPIStatus(hub, cred.URL, cred.Login, cred.Password); err != nil { return fmt.Errorf("failed to authenticate to Local API (LAPI): %w", err) } diff --git a/cmd/crowdsec-cli/main.go b/cmd/crowdsec-cli/main.go index 95c528f20b5..3881818123f 100644 --- a/cmd/crowdsec-cli/main.go +++ b/cmd/crowdsec-cli/main.go @@ -258,7 +258,7 @@ It is meant to allow you to manage bans, parsers/scenarios/etc, api and generall cmd.AddCommand(NewCLIExplain(cli.cfg).NewCommand()) cmd.AddCommand(NewCLIHubTest(cli.cfg).NewCommand()) cmd.AddCommand(NewCLINotifications(cli.cfg).NewCommand()) - cmd.AddCommand(NewCLISupport().NewCommand()) + cmd.AddCommand(NewCLISupport(cli.cfg).NewCommand()) cmd.AddCommand(NewCLIPapi(cli.cfg).NewCommand()) cmd.AddCommand(NewCLICollection(cli.cfg).NewCommand()) cmd.AddCommand(NewCLIParser(cli.cfg).NewCommand()) diff --git a/cmd/crowdsec-cli/support.go b/cmd/crowdsec-cli/support.go index 5890061f502..54b2e7ad9ad 100644 --- a/cmd/crowdsec-cli/support.go +++ b/cmd/crowdsec-cli/support.go @@ -7,52 +7,67 @@ import ( "errors" "fmt" "io" + "net" "net/http" - "net/url" "os" "path/filepath" "regexp" + "strconv" "strings" "time" "github.com/blackfireio/osinfo" - "github.com/go-openapi/strfmt" log "github.com/sirupsen/logrus" "github.com/spf13/cobra" "github.com/crowdsecurity/go-cs-lib/trace" - "github.com/crowdsecurity/go-cs-lib/version" "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/require" - "github.com/crowdsecurity/crowdsec/pkg/apiclient" "github.com/crowdsecurity/crowdsec/pkg/cwhub" "github.com/crowdsecurity/crowdsec/pkg/cwversion" "github.com/crowdsecurity/crowdsec/pkg/database" "github.com/crowdsecurity/crowdsec/pkg/fflag" - "github.com/crowdsecurity/crowdsec/pkg/models" ) const ( - SUPPORT_METRICS_HUMAN_PATH = "metrics/metrics.human" - SUPPORT_METRICS_PROMETHEUS_PATH = "metrics/metrics.prometheus" - SUPPORT_VERSION_PATH = "version.txt" - SUPPORT_FEATURES_PATH = "features.txt" - SUPPORT_OS_INFO_PATH = "osinfo.txt" - SUPPORT_PARSERS_PATH = "hub/parsers.txt" - SUPPORT_SCENARIOS_PATH = "hub/scenarios.txt" - SUPPORT_CONTEXTS_PATH = "hub/scenarios.txt" - SUPPORT_COLLECTIONS_PATH = "hub/collections.txt" - SUPPORT_POSTOVERFLOWS_PATH = "hub/postoverflows.txt" - SUPPORT_BOUNCERS_PATH = "lapi/bouncers.txt" - SUPPORT_AGENTS_PATH = "lapi/agents.txt" - SUPPORT_CROWDSEC_CONFIG_PATH = "config/crowdsec.yaml" - SUPPORT_LAPI_STATUS_PATH = "lapi_status.txt" - SUPPORT_CAPI_STATUS_PATH = "capi_status.txt" - SUPPORT_ACQUISITION_CONFIG_BASE_PATH = "config/acquis/" - SUPPORT_CROWDSEC_PROFILE_PATH = "config/profiles.yaml" - SUPPORT_CRASH_PATH = "crash/" + SUPPORT_METRICS_DIR = "metrics/" + SUPPORT_VERSION_PATH = "version.txt" + SUPPORT_FEATURES_PATH = "features.txt" + SUPPORT_OS_INFO_PATH = "osinfo.txt" + SUPPORT_HUB_DIR = "hub/" + SUPPORT_BOUNCERS_PATH = "lapi/bouncers.txt" + SUPPORT_AGENTS_PATH = "lapi/agents.txt" + SUPPORT_CROWDSEC_CONFIG_PATH = "config/crowdsec.yaml" + SUPPORT_LAPI_STATUS_PATH = "lapi_status.txt" + SUPPORT_CAPI_STATUS_PATH = "capi_status.txt" + SUPPORT_ACQUISITION_DIR = "config/acquis/" + SUPPORT_CROWDSEC_PROFILE_PATH = "config/profiles.yaml" + SUPPORT_CRASH_DIR = "crash/" + SUPPORT_LOG_DIR = "log/" + SUPPORT_PPROF_DIR = "pprof/" ) +// StringHook collects log entries in a string +type StringHook struct { + LogBuilder strings.Builder + LogLevels []log.Level +} + +func (hook *StringHook) Levels() []log.Level { + return hook.LogLevels +} + +func (hook *StringHook) Fire(entry *log.Entry) error { + logEntry, err := entry.String() + if err != nil { + return err + } + + hook.LogBuilder.WriteString(logEntry) + + return nil +} + // from https://github.com/acarl005/stripansi var reStripAnsi = regexp.MustCompile("[\u001B\u009B][[\\]()#;?]*(?:(?:(?:[a-zA-Z\\d]*(?:;[a-zA-Z\\d]*)*)?\u0007)|(?:(?:\\d{1,4}(?:;\\d{0,4})*)?[\\dA-PRZcf-ntqry=><~]))") @@ -61,75 +76,76 @@ func stripAnsiString(str string) string { return reStripAnsi.ReplaceAllString(str, "") } -func collectMetrics() ([]byte, []byte, error) { +func (cli *cliSupport) dumpMetrics(ctx context.Context, zw *zip.Writer) error { log.Info("Collecting prometheus metrics") - if csConfig.Cscli.PrometheusUrl == "" { - log.Warn("No Prometheus URL configured, metrics will not be collected") - return nil, nil, errors.New("prometheus_uri is not set") + cfg := cli.cfg() + + if cfg.Cscli.PrometheusUrl == "" { + log.Warn("can't collect metrics: prometheus_uri is not set") } - humanMetrics := bytes.NewBuffer(nil) + humanMetrics := new(bytes.Buffer) ms := NewMetricStore() - if err := ms.Fetch(csConfig.Cscli.PrometheusUrl); err != nil { - return nil, nil, fmt.Errorf("could not fetch prometheus metrics: %w", err) + if err := ms.Fetch(cfg.Cscli.PrometheusUrl); err != nil { + return err } if err := ms.Format(humanMetrics, nil, "human", false); err != nil { - return nil, nil, err + return fmt.Errorf("could not format prometheus metrics: %w", err) } - req, err := http.NewRequest(http.MethodGet, csConfig.Cscli.PrometheusUrl, nil) + req, err := http.NewRequestWithContext(ctx, http.MethodGet, cfg.Cscli.PrometheusUrl, nil) if err != nil { - return nil, nil, fmt.Errorf("could not create requests to prometheus endpoint: %w", err) + return fmt.Errorf("could not create request to prometheus endpoint: %w", err) } client := &http.Client{} resp, err := client.Do(req) if err != nil { - return nil, nil, fmt.Errorf("could not get metrics from prometheus endpoint: %w", err) + return fmt.Errorf("could not get metrics from prometheus endpoint: %w", err) } defer resp.Body.Close() - body, err := io.ReadAll(resp.Body) - if err != nil { - return nil, nil, fmt.Errorf("could not read metrics from prometheus endpoint: %w", err) - } + cli.writeToZip(zw, SUPPORT_METRICS_DIR+"metrics.prometheus", time.Now(), resp.Body) - return humanMetrics.Bytes(), body, nil + stripped := stripAnsiString(humanMetrics.String()) + + cli.writeToZip(zw, SUPPORT_METRICS_DIR+"metrics.human", time.Now(), strings.NewReader(stripped)) + + return nil } -func collectVersion() []byte { +func (cli *cliSupport) dumpVersion(zw *zip.Writer) { log.Info("Collecting version") - return []byte(cwversion.ShowStr()) + + cli.writeToZip(zw, SUPPORT_VERSION_PATH, time.Now(), strings.NewReader(cwversion.ShowStr())) } -func collectFeatures() []byte { +func (cli *cliSupport) dumpFeatures(zw *zip.Writer) { log.Info("Collecting feature flags") - enabledFeatures := fflag.Crowdsec.GetEnabledFeatures() - - w := bytes.NewBuffer(nil) - for _, k := range enabledFeatures { - fmt.Fprintf(w, "%s\n", k) + w := new(bytes.Buffer) + for _, k := range fflag.Crowdsec.GetEnabledFeatures() { + fmt.Fprintln(w, k) } - return w.Bytes() + cli.writeToZip(zw, SUPPORT_FEATURES_PATH, time.Now(), w) } -func collectOSInfo() ([]byte, error) { +func (cli *cliSupport) dumpOSInfo(zw *zip.Writer) error { log.Info("Collecting OS info") info, err := osinfo.GetOSInfo() if err != nil { - return nil, err + return err } - w := bytes.NewBuffer(nil) + w := new(bytes.Buffer) fmt.Fprintf(w, "Architecture: %s\n", info.Architecture) fmt.Fprintf(w, "Family: %s\n", info.Family) fmt.Fprintf(w, "ID: %s\n", info.ID) @@ -138,155 +154,251 @@ func collectOSInfo() ([]byte, error) { fmt.Fprintf(w, "Version: %s\n", info.Version) fmt.Fprintf(w, "Build: %s\n", info.Build) - return w.Bytes(), nil + cli.writeToZip(zw, SUPPORT_OS_INFO_PATH, time.Now(), w) + + return nil } -func collectHubItems(hub *cwhub.Hub, itemType string) []byte { +func (cli *cliSupport) dumpHubItems(zw *zip.Writer, hub *cwhub.Hub, itemType string) error { var err error - out := bytes.NewBuffer(nil) + out := new(bytes.Buffer) - log.Infof("Collecting %s list", itemType) + log.Infof("Collecting hub: %s", itemType) items := make(map[string][]*cwhub.Item) if items[itemType], err = selectItems(hub, itemType, nil, true); err != nil { - log.Warnf("could not collect %s list: %s", itemType, err) + return fmt.Errorf("could not collect %s list: %w", itemType, err) } if err := listItems(out, []string{itemType}, items, false, "human"); err != nil { - log.Warnf("could not collect %s list: %s", itemType, err) + return fmt.Errorf("could not list %s: %w", itemType, err) } - return out.Bytes() + stripped := stripAnsiString(out.String()) + + cli.writeToZip(zw, SUPPORT_HUB_DIR+itemType+".txt", time.Now(), strings.NewReader(stripped)) + + return nil } -func collectBouncers(dbClient *database.Client) ([]byte, error) { - out := bytes.NewBuffer(nil) +func (cli *cliSupport) dumpBouncers(zw *zip.Writer, db *database.Client) error { + log.Info("Collecting bouncers") + + if db == nil { + return errors.New("no database connection") + } + + out := new(bytes.Buffer) - bouncers, err := dbClient.ListBouncers() + bouncers, err := db.ListBouncers() if err != nil { - return nil, fmt.Errorf("unable to list bouncers: %w", err) + return fmt.Errorf("unable to list bouncers: %w", err) } getBouncersTable(out, bouncers) - return out.Bytes(), nil + stripped := stripAnsiString(out.String()) + + cli.writeToZip(zw, SUPPORT_BOUNCERS_PATH, time.Now(), strings.NewReader(stripped)) + + return nil } -func collectAgents(dbClient *database.Client) ([]byte, error) { - out := bytes.NewBuffer(nil) +func (cli *cliSupport) dumpAgents(zw *zip.Writer, db *database.Client) error { + log.Info("Collecting agents") + + if db == nil { + return errors.New("no database connection") + } - machines, err := dbClient.ListMachines() + out := new(bytes.Buffer) + + machines, err := db.ListMachines() if err != nil { - return nil, fmt.Errorf("unable to list machines: %w", err) + return fmt.Errorf("unable to list machines: %w", err) } getAgentsTable(out, machines) - return out.Bytes(), nil + stripped := stripAnsiString(out.String()) + + cli.writeToZip(zw, SUPPORT_AGENTS_PATH, time.Now(), strings.NewReader(stripped)) + + return nil } -func collectAPIStatus(login string, password string, endpoint string, prefix string, hub *cwhub.Hub) []byte { - if csConfig.API.Client == nil || csConfig.API.Client.Credentials == nil { - return []byte("No agent credentials found, are we LAPI ?") - } +func (cli *cliSupport) dumpLAPIStatus(zw *zip.Writer, hub *cwhub.Hub) error { + log.Info("Collecting LAPI status") - pwd := strfmt.Password(password) + cfg := cli.cfg() + cred := cfg.API.Client.Credentials - apiurl, err := url.Parse(endpoint) - if err != nil { - return []byte(fmt.Sprintf("cannot parse API URL: %s", err)) - } + out := new(bytes.Buffer) - scenarios, err := hub.GetInstalledNamesByType(cwhub.SCENARIOS) - if err != nil { - return []byte(fmt.Sprintf("could not collect scenarios: %s", err)) - } + fmt.Fprintf(out, "LAPI credentials file: %s\n", cfg.API.Client.CredentialsFilePath) + fmt.Fprintf(out, "LAPI URL: %s\n", cred.URL) + fmt.Fprintf(out, "LAPI username: %s\n", cred.Login) - Client, err = apiclient.NewDefaultClient(apiurl, - prefix, - fmt.Sprintf("crowdsec/%s", version.String()), - nil) - if err != nil { - return []byte(fmt.Sprintf("could not init client: %s", err)) + if err := QueryLAPIStatus(hub, cred.URL, cred.Login, cred.Password); err != nil { + return fmt.Errorf("could not authenticate to Local API (LAPI): %w", err) } - t := models.WatcherAuthRequest{ - MachineID: &login, - Password: &pwd, - Scenarios: scenarios, - } + fmt.Fprintln(out, "You can successfully interact with Local API (LAPI)") - _, _, err = Client.Auth.AuthenticateWatcher(context.Background(), t) - if err != nil { - return []byte(fmt.Sprintf("Could not authenticate to API: %s", err)) - } else { - return []byte("Successfully authenticated to LAPI") + cli.writeToZip(zw, SUPPORT_LAPI_STATUS_PATH, time.Now(), out) + + return nil +} + +func (cli *cliSupport) dumpCAPIStatus(zw *zip.Writer, hub *cwhub.Hub) error { + log.Info("Collecting CAPI status") + + cfg := cli.cfg() + cred := cfg.API.Server.OnlineClient.Credentials + + out := new(bytes.Buffer) + + fmt.Fprintf(out, "CAPI credentials file: %s\n", cfg.API.Server.OnlineClient.CredentialsFilePath) + fmt.Fprintf(out, "CAPI URL: %s\n", cred.URL) + fmt.Fprintf(out, "CAPI username: %s\n", cred.Login) + + if err := QueryCAPIStatus(hub, cred.URL, cred.Login, cred.Password); err != nil { + return fmt.Errorf("could not authenticate to Central API (CAPI): %w", err) } + + fmt.Fprintln(out, "You can successfully interact with Central API (CAPI)") + + cli.writeToZip(zw, SUPPORT_CAPI_STATUS_PATH, time.Now(), out) + + return nil } -func collectCrowdsecConfig() []byte { +func (cli *cliSupport) dumpConfigYAML(zw *zip.Writer) error { log.Info("Collecting crowdsec config") - config, err := os.ReadFile(*csConfig.FilePath) + cfg := cli.cfg() + + config, err := os.ReadFile(*cfg.FilePath) if err != nil { - return []byte(fmt.Sprintf("could not read config file: %s", err)) + return fmt.Errorf("could not read config file: %w", err) } r := regexp.MustCompile(`(\s+password:|\s+user:|\s+host:)\s+.*`) - return r.ReplaceAll(config, []byte("$1 ****REDACTED****")) + redacted := r.ReplaceAll(config, []byte("$1 ****REDACTED****")) + + cli.writeToZip(zw, SUPPORT_CROWDSEC_CONFIG_PATH, time.Now(), bytes.NewReader(redacted)) + + return nil } -func collectCrowdsecProfile() []byte { - log.Info("Collecting crowdsec profile") +func (cli *cliSupport) dumpPprof(ctx context.Context, zw *zip.Writer, endpoint string) error { + log.Infof("Collecting pprof/%s data", endpoint) + + ctx, cancel := context.WithTimeout(ctx, 120*time.Second) + defer cancel() + + req, err := http.NewRequestWithContext( + ctx, + http.MethodGet, + fmt.Sprintf( + "http://%s/debug/pprof/%s?debug=1", + net.JoinHostPort( + csConfig.Prometheus.ListenAddr, + strconv.Itoa(csConfig.Prometheus.ListenPort), + ), + endpoint, + ), + nil, + ) + if err != nil { + return fmt.Errorf("could not create request to pprof endpoint: %w", err) + } - config, err := os.ReadFile(csConfig.API.Server.ProfilesPath) + client := &http.Client{} + + resp, err := client.Do(req) if err != nil { - return []byte(fmt.Sprintf("could not read profile file: %s", err)) + return fmt.Errorf("could not get pprof data from endpoint: %w", err) } - return config + defer resp.Body.Close() + + cli.writeToZip(zw, SUPPORT_PPROF_DIR+endpoint+".pprof", time.Now(), resp.Body) + + return nil } -func collectAcquisitionConfig() map[string][]byte { +func (cli *cliSupport) dumpProfiles(zw *zip.Writer) { + log.Info("Collecting crowdsec profile") + + cfg := cli.cfg() + cli.writeFileToZip(zw, SUPPORT_CROWDSEC_PROFILE_PATH, cfg.API.Server.ProfilesPath) +} + +func (cli *cliSupport) dumpAcquisitionConfig(zw *zip.Writer) { log.Info("Collecting acquisition config") - ret := make(map[string][]byte) + cfg := cli.cfg() - for _, filename := range csConfig.Crowdsec.AcquisitionFiles { - fileContent, err := os.ReadFile(filename) - if err != nil { - ret[filename] = []byte(fmt.Sprintf("could not read file: %s", err)) - } else { - ret[filename] = fileContent - } + for _, filename := range cfg.Crowdsec.AcquisitionFiles { + fname := strings.ReplaceAll(filename, string(filepath.Separator), "___") + cli.writeFileToZip(zw, SUPPORT_ACQUISITION_DIR+fname, filename) + } +} + +func (cli *cliSupport) dumpLogs(zw *zip.Writer) error { + log.Info("Collecting CrowdSec logs") + + cfg := cli.cfg() + + logDir := cfg.Common.LogDir + + logFiles, err := filepath.Glob(filepath.Join(logDir, "crowdsec*.log")) + if err != nil { + return fmt.Errorf("could not list log files: %w", err) } - return ret + for _, filename := range logFiles { + cli.writeFileToZip(zw, SUPPORT_LOG_DIR+filepath.Base(filename), filename) + } + + return nil } -func collectCrash() ([]string, error) { +func (cli *cliSupport) dumpCrash(zw *zip.Writer) error { log.Info("Collecting crash dumps") - return trace.List() + + traceFiles, err := trace.List() + if err != nil { + return fmt.Errorf("could not list crash dumps: %w", err) + } + + for _, filename := range traceFiles { + cli.writeFileToZip(zw, SUPPORT_CRASH_DIR+filepath.Base(filename), filename) + } + + return nil } -type cliSupport struct{} +type cliSupport struct { + cfg configGetter +} -func NewCLISupport() *cliSupport { - return &cliSupport{} +func NewCLISupport(cfg configGetter) *cliSupport { + return &cliSupport{ + cfg: cfg, + } } -func (cli cliSupport) NewCommand() *cobra.Command { +func (cli *cliSupport) NewCommand() *cobra.Command { cmd := &cobra.Command{ Use: "support [action]", Short: "Provide commands to help during support", Args: cobra.MinimumNArgs(1), DisableAutoGenTag: true, - PersistentPreRunE: func(cmd *cobra.Command, args []string) error { - return nil - }, } cmd.AddCommand(cli.NewDumpCmd()) @@ -294,198 +406,217 @@ func (cli cliSupport) NewCommand() *cobra.Command { return cmd } -func (cli cliSupport) NewDumpCmd() *cobra.Command { - var outFile string +// writeToZip adds a file to the zip archive, from a reader +func (cli *cliSupport) writeToZip(zipWriter *zip.Writer, filename string, mtime time.Time, reader io.Reader) { + header := &zip.FileHeader{ + Name: filename, + Method: zip.Deflate, + Modified: mtime, + } - cmd := &cobra.Command{ - Use: "dump", - Short: "Dump all your configuration to a zip file for easier support", - Long: `Dump the following informations: -- Crowdsec version -- OS version -- Installed collections list -- Installed parsers list -- Installed scenarios list -- Installed postoverflows list -- Installed context list -- Bouncers list -- Machines list -- CAPI status -- LAPI status -- Crowdsec config (sensitive information like username and password are redacted) -- Crowdsec metrics`, - Example: `cscli support dump -cscli support dump -f /tmp/crowdsec-support.zip -`, - Args: cobra.NoArgs, - DisableAutoGenTag: true, - RunE: func(_ *cobra.Command, _ []string) error { - var err error - var skipHub, skipDB, skipCAPI, skipLAPI, skipAgent bool - infos := map[string][]byte{ - SUPPORT_VERSION_PATH: collectVersion(), - SUPPORT_FEATURES_PATH: collectFeatures(), - } + fw, err := zipWriter.CreateHeader(header) + if err != nil { + log.Errorf("could not add zip entry for %s: %s", filename, err) + return + } - if outFile == "" { - outFile = "/tmp/crowdsec-support.zip" - } + _, err = io.Copy(fw, reader) + if err != nil { + log.Errorf("could not write zip entry for %s: %s", filename, err) + } +} - dbClient, err := database.NewClient(csConfig.DbConfig) - if err != nil { - log.Warnf("Could not connect to database: %s", err) - skipDB = true - infos[SUPPORT_BOUNCERS_PATH] = []byte(err.Error()) - infos[SUPPORT_AGENTS_PATH] = []byte(err.Error()) - } +// writeToZip adds a file to the zip archive, from a file, and retains the mtime +func (cli *cliSupport) writeFileToZip(zw *zip.Writer, filename string, fromFile string) { + mtime := time.Now() - if err = csConfig.LoadAPIServer(true); err != nil { - log.Warnf("could not load LAPI, skipping CAPI check") - skipLAPI = true - infos[SUPPORT_CAPI_STATUS_PATH] = []byte(err.Error()) - } + fi, err := os.Stat(fromFile) + if err == nil { + mtime = fi.ModTime() + } - if err = csConfig.LoadCrowdsec(); err != nil { - log.Warnf("could not load agent config, skipping crowdsec config check") - skipAgent = true - } + fin, err := os.Open(fromFile) + if err != nil { + log.Errorf("could not open file %s: %s", fromFile, err) + return + } + defer fin.Close() - hub, err := require.Hub(csConfig, nil, nil) - if err != nil { - log.Warn("Could not init hub, running on LAPI ? Hub related information will not be collected") - skipHub = true - infos[SUPPORT_PARSERS_PATH] = []byte(err.Error()) - infos[SUPPORT_SCENARIOS_PATH] = []byte(err.Error()) - infos[SUPPORT_POSTOVERFLOWS_PATH] = []byte(err.Error()) - infos[SUPPORT_CONTEXTS_PATH] = []byte(err.Error()) - infos[SUPPORT_COLLECTIONS_PATH] = []byte(err.Error()) - } + cli.writeToZip(zw, filename, mtime, fin) +} - if csConfig.API.Client == nil || csConfig.API.Client.Credentials == nil { - log.Warn("no agent credentials found, skipping LAPI connectivity check") - if _, ok := infos[SUPPORT_LAPI_STATUS_PATH]; ok { - infos[SUPPORT_LAPI_STATUS_PATH] = append(infos[SUPPORT_LAPI_STATUS_PATH], []byte("\nNo LAPI credentials found")...) - } - skipLAPI = true - } +func (cli *cliSupport) dump(ctx context.Context, outFile string) error { + var skipCAPI, skipLAPI, skipAgent bool - if csConfig.API.Server == nil || csConfig.API.Server.OnlineClient == nil || csConfig.API.Server.OnlineClient.Credentials == nil { - log.Warn("no CAPI credentials found, skipping CAPI connectivity check") - skipCAPI = true - } + collector := &StringHook{ + LogLevels: log.AllLevels, + } + log.AddHook(collector) - infos[SUPPORT_METRICS_HUMAN_PATH], infos[SUPPORT_METRICS_PROMETHEUS_PATH], err = collectMetrics() - if err != nil { - log.Warnf("could not collect prometheus metrics information: %s", err) - infos[SUPPORT_METRICS_HUMAN_PATH] = []byte(err.Error()) - infos[SUPPORT_METRICS_PROMETHEUS_PATH] = []byte(err.Error()) - } + cfg := cli.cfg() - infos[SUPPORT_OS_INFO_PATH], err = collectOSInfo() - if err != nil { - log.Warnf("could not collect OS information: %s", err) - infos[SUPPORT_OS_INFO_PATH] = []byte(err.Error()) - } + if outFile == "" { + outFile = filepath.Join(os.TempDir(), "crowdsec-support.zip") + } - infos[SUPPORT_CROWDSEC_CONFIG_PATH] = collectCrowdsecConfig() + w := bytes.NewBuffer(nil) + zipWriter := zip.NewWriter(w) - if !skipHub { - infos[SUPPORT_PARSERS_PATH] = collectHubItems(hub, cwhub.PARSERS) - infos[SUPPORT_SCENARIOS_PATH] = collectHubItems(hub, cwhub.SCENARIOS) - infos[SUPPORT_POSTOVERFLOWS_PATH] = collectHubItems(hub, cwhub.POSTOVERFLOWS) - infos[SUPPORT_CONTEXTS_PATH] = collectHubItems(hub, cwhub.POSTOVERFLOWS) - infos[SUPPORT_COLLECTIONS_PATH] = collectHubItems(hub, cwhub.COLLECTIONS) - } + db, err := database.NewClient(cfg.DbConfig) + if err != nil { + log.Warnf("Could not connect to database: %s", err) + } - if !skipDB { - infos[SUPPORT_BOUNCERS_PATH], err = collectBouncers(dbClient) - if err != nil { - log.Warnf("could not collect bouncers information: %s", err) - infos[SUPPORT_BOUNCERS_PATH] = []byte(err.Error()) - } - - infos[SUPPORT_AGENTS_PATH], err = collectAgents(dbClient) - if err != nil { - log.Warnf("could not collect agents information: %s", err) - infos[SUPPORT_AGENTS_PATH] = []byte(err.Error()) - } - } + if err = cfg.LoadAPIServer(true); err != nil { + log.Warnf("could not load LAPI, skipping CAPI check") - if !skipCAPI { - log.Info("Collecting CAPI status") - infos[SUPPORT_CAPI_STATUS_PATH] = collectAPIStatus(csConfig.API.Server.OnlineClient.Credentials.Login, - csConfig.API.Server.OnlineClient.Credentials.Password, - csConfig.API.Server.OnlineClient.Credentials.URL, - CAPIURLPrefix, - hub) - } + skipCAPI = true + } - if !skipLAPI { - log.Info("Collection LAPI status") - infos[SUPPORT_LAPI_STATUS_PATH] = collectAPIStatus(csConfig.API.Client.Credentials.Login, - csConfig.API.Client.Credentials.Password, - csConfig.API.Client.Credentials.URL, - LAPIURLPrefix, - hub) - infos[SUPPORT_CROWDSEC_PROFILE_PATH] = collectCrowdsecProfile() - } + if err = cfg.LoadCrowdsec(); err != nil { + log.Warnf("could not load agent config, skipping crowdsec config check") - if !skipAgent { - acquis := collectAcquisitionConfig() + skipAgent = true + } - for filename, content := range acquis { - fname := strings.ReplaceAll(filename, string(filepath.Separator), "___") - infos[SUPPORT_ACQUISITION_CONFIG_BASE_PATH+fname] = content - } - } + hub, err := require.Hub(cfg, nil, nil) + if err != nil { + log.Warn("Could not init hub, running on LAPI ? Hub related information will not be collected") + // XXX: lapi status check requires scenarios, will return an error + } - crash, err := collectCrash() - if err != nil { - log.Errorf("could not collect crash dumps: %s", err) - } + if cfg.API.Client == nil || cfg.API.Client.Credentials == nil { + log.Warn("no agent credentials found, skipping LAPI connectivity check") - for _, filename := range crash { - content, err := os.ReadFile(filename) - if err != nil { - log.Errorf("could not read crash dump %s: %s", filename, err) - } + skipLAPI = true + } - infos[SUPPORT_CRASH_PATH+filepath.Base(filename)] = content - } + if cfg.API.Server == nil || cfg.API.Server.OnlineClient == nil || cfg.API.Server.OnlineClient.Credentials == nil { + log.Warn("no CAPI credentials found, skipping CAPI connectivity check") - w := bytes.NewBuffer(nil) - zipWriter := zip.NewWriter(w) - - for filename, data := range infos { - header := &zip.FileHeader{ - Name: filename, - Method: zip.Deflate, - // TODO: retain mtime where possible (esp. trace) - Modified: time.Now(), - } - fw, err := zipWriter.CreateHeader(header) - if err != nil { - log.Errorf("Could not add zip entry for %s: %s", filename, err) - continue - } - fw.Write([]byte(stripAnsiString(string(data)))) - } + skipCAPI = true + } - err = zipWriter.Close() - if err != nil { - return fmt.Errorf("could not finalize zip file: %s", err) - } + if err = cli.dumpMetrics(ctx, zipWriter); err != nil { + log.Warn(err) + } - if outFile == "-" { - _, err = os.Stdout.Write(w.Bytes()) - return err - } - err = os.WriteFile(outFile, w.Bytes(), 0o600) - if err != nil { - return fmt.Errorf("could not write zip file to %s: %s", outFile, err) + if err = cli.dumpOSInfo(zipWriter); err != nil { + log.Warnf("could not collect OS information: %s", err) + } + + if err = cli.dumpConfigYAML(zipWriter); err != nil { + log.Warnf("could not collect main config file: %s", err) + } + + if hub != nil { + for _, itemType := range cwhub.ItemTypes { + if err = cli.dumpHubItems(zipWriter, hub, itemType); err != nil { + log.Warnf("could not collect %s information: %s", itemType, err) } - log.Infof("Written zip file to %s", outFile) - return nil + } + } + + if err = cli.dumpBouncers(zipWriter, db); err != nil { + log.Warnf("could not collect bouncers information: %s", err) + } + + if err = cli.dumpAgents(zipWriter, db); err != nil { + log.Warnf("could not collect agents information: %s", err) + } + + if !skipCAPI { + if err = cli.dumpCAPIStatus(zipWriter, hub); err != nil { + log.Warnf("could not collect CAPI status: %s", err) + } + } + + if !skipLAPI { + if err = cli.dumpLAPIStatus(zipWriter, hub); err != nil { + log.Warnf("could not collect LAPI status: %s", err) + } + + // call pprof separately, one might fail for timeout + + if err = cli.dumpPprof(ctx, zipWriter, "goroutine"); err != nil { + log.Warnf("could not collect pprof goroutine data: %s", err) + } + + if err = cli.dumpPprof(ctx, zipWriter, "heap"); err != nil { + log.Warnf("could not collect pprof heap data: %s", err) + } + + if err = cli.dumpPprof(ctx, zipWriter, "profile"); err != nil { + log.Warnf("could not collect pprof cpu data: %s", err) + } + + cli.dumpProfiles(zipWriter) + } + + if !skipAgent { + cli.dumpAcquisitionConfig(zipWriter) + } + + if err = cli.dumpCrash(zipWriter); err != nil { + log.Warnf("could not collect crash dumps: %s", err) + } + + if err = cli.dumpLogs(zipWriter); err != nil { + log.Warnf("could not collect log files: %s", err) + } + + cli.dumpVersion(zipWriter) + cli.dumpFeatures(zipWriter) + + // log of the dump process, without color codes + collectedOutput := stripAnsiString(collector.LogBuilder.String()) + + cli.writeToZip(zipWriter, "dump.log", time.Now(), strings.NewReader(collectedOutput)) + + err = zipWriter.Close() + if err != nil { + return fmt.Errorf("could not finalize zip file: %w", err) + } + + if outFile == "-" { + _, err = os.Stdout.Write(w.Bytes()) + return err + } + + err = os.WriteFile(outFile, w.Bytes(), 0o600) + if err != nil { + return fmt.Errorf("could not write zip file to %s: %w", outFile, err) + } + + log.Infof("Written zip file to %s", outFile) + + return nil +} + +func (cli *cliSupport) NewDumpCmd() *cobra.Command { + var outFile string + + cmd := &cobra.Command{ + Use: "dump", + Short: "Dump all your configuration to a zip file for easier support", + Long: `Dump the following information: +- Crowdsec version +- OS version +- Enabled feature flags +- Latest Crowdsec logs (log processor, LAPI, remediation components) +- Installed collections, parsers, scenarios... +- Bouncers and machines list +- CAPI/LAPI status +- Crowdsec config (sensitive information like username and password are redacted) +- Crowdsec metrics +- Stack trace in case of process crash`, + Example: `cscli support dump +cscli support dump -f /tmp/crowdsec-support.zip +`, + Args: cobra.NoArgs, + DisableAutoGenTag: true, + RunE: func(cmd *cobra.Command, _ []string) error { + return cli.dump(cmd.Context(), outFile) }, } diff --git a/pkg/database/database.go b/pkg/database/database.go index 96a495f6731..357077e7d6f 100644 --- a/pkg/database/database.go +++ b/pkg/database/database.go @@ -68,7 +68,7 @@ func NewClient(config *csconfig.DatabaseCfg) (*Client, error) { typ, dia, err := config.ConnectionDialect() if err != nil { - return nil, err // unsupported database caught here + return nil, err //unsupported database caught here } if config.Type == "sqlite" { diff --git a/test/bats/01_cscli.bats b/test/bats/01_cscli.bats index 4c7ce7fbc2c..7e74f6f9714 100644 --- a/test/bats/01_cscli.bats +++ b/test/bats/01_cscli.bats @@ -263,7 +263,7 @@ teardown() { rune -1 cscli lapi status -o json rune -0 jq -r '.msg' <(stderr) - assert_output 'parsing api url: parse "http://127.0.0.1:-80/": invalid port ":-80" after host' + assert_output 'failed to authenticate to Local API (LAPI): parsing api url: parse "http://127.0.0.1:-80/": invalid port ":-80" after host' } @test "cscli - bad LAPI password" { From 0d9e00054320df3ebb560a600c33db2939656954 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Tue, 7 May 2024 13:44:17 +0200 Subject: [PATCH 117/581] use go-cs-lib/downloader for data files, hub index, item YAML (#2926) --- cmd/crowdsec-cli/itemcli.go | 13 ++- go.mod | 2 +- go.sum | 4 +- pkg/cwhub/cwhub.go | 2 +- pkg/cwhub/dataset.go | 159 +++++----------------------- pkg/cwhub/dataset_test.go | 56 ---------- pkg/cwhub/hub.go | 21 +--- pkg/cwhub/hub_test.go | 6 +- pkg/cwhub/item.go | 6 +- pkg/cwhub/iteminstall.go | 11 +- pkg/cwhub/iteminstall_test.go | 3 +- pkg/cwhub/itemupgrade.go | 146 ++++++++++--------------- pkg/cwhub/remote.go | 39 ++++--- pkg/cwhub/sync.go | 24 +---- test/bats/20_hub.bats | 8 +- test/bats/20_hub_collections.bats | 1 - test/bats/20_hub_parsers.bats | 1 - test/bats/20_hub_postoverflows.bats | 1 - test/bats/20_hub_scenarios.bats | 1 - 19 files changed, 151 insertions(+), 353 deletions(-) delete mode 100644 pkg/cwhub/dataset_test.go diff --git a/cmd/crowdsec-cli/itemcli.go b/cmd/crowdsec-cli/itemcli.go index 44a734d5e6a..a72cfa735c5 100644 --- a/cmd/crowdsec-cli/itemcli.go +++ b/cmd/crowdsec-cli/itemcli.go @@ -475,11 +475,22 @@ func (cli cliItem) itemDiff(item *cwhub.Item, reverse bool) (string, error) { return "", fmt.Errorf("'%s' is not installed", item.FQName()) } - latestContent, remoteURL, err := item.FetchLatest() + dest, err := os.CreateTemp("", "cscli-diff-*") + if err != nil { + return "", fmt.Errorf("while creating temporary file: %w", err) + } + defer os.Remove(dest.Name()) + + _, remoteURL, err := item.FetchContentTo(dest.Name()) if err != nil { return "", err } + latestContent, err := os.ReadFile(dest.Name()) + if err != nil { + return "", fmt.Errorf("while reading %s: %w", dest.Name(), err) + } + localContent, err := os.ReadFile(item.State.LocalPath) if err != nil { return "", fmt.Errorf("while reading %s: %w", item.State.LocalPath, err) diff --git a/go.mod b/go.mod index d78908db0c6..2aae7f2a857 100644 --- a/go.mod +++ b/go.mod @@ -27,7 +27,7 @@ require ( github.com/corazawaf/libinjection-go v0.1.2 github.com/crowdsecurity/coraza/v3 v3.0.0-20240108124027-a62b8d8e5607 github.com/crowdsecurity/dlog v0.0.0-20170105205344-4fb5f8204f26 - github.com/crowdsecurity/go-cs-lib v0.0.10 + github.com/crowdsecurity/go-cs-lib v0.0.11-0.20240502193824-180b39e88a51 github.com/crowdsecurity/grokky v0.2.1 github.com/crowdsecurity/machineid v1.0.2 github.com/davecgh/go-spew v1.1.1 diff --git a/go.sum b/go.sum index 29af3c795fc..ebf10e598fe 100644 --- a/go.sum +++ b/go.sum @@ -107,8 +107,8 @@ github.com/crowdsecurity/coraza/v3 v3.0.0-20240108124027-a62b8d8e5607 h1:hyrYw3h github.com/crowdsecurity/coraza/v3 v3.0.0-20240108124027-a62b8d8e5607/go.mod h1:br36fEqurGYZQGit+iDYsIzW0FF6VufMbDzyyLxEuPA= github.com/crowdsecurity/dlog v0.0.0-20170105205344-4fb5f8204f26 h1:r97WNVC30Uen+7WnLs4xDScS/Ex988+id2k6mDf8psU= github.com/crowdsecurity/dlog v0.0.0-20170105205344-4fb5f8204f26/go.mod h1:zpv7r+7KXwgVUZnUNjyP22zc/D7LKjyoY02weH2RBbk= -github.com/crowdsecurity/go-cs-lib v0.0.10 h1:Twt/y/rYCUspGY1zxDnGurL2svRSREAz+2+puLepd9c= -github.com/crowdsecurity/go-cs-lib v0.0.10/go.mod h1:8FMKNGsh3hMZi2SEv6P15PURhEJnZV431XjzzBSuf0k= +github.com/crowdsecurity/go-cs-lib v0.0.11-0.20240502193824-180b39e88a51 h1:Dd/T2IMB3KG1+dvm0LUuT9AKKnT2CO0sCjFfsKDIxXE= +github.com/crowdsecurity/go-cs-lib v0.0.11-0.20240502193824-180b39e88a51/go.mod h1:8FMKNGsh3hMZi2SEv6P15PURhEJnZV431XjzzBSuf0k= github.com/crowdsecurity/grokky v0.2.1 h1:t4VYnDlAd0RjDM2SlILalbwfCrQxtJSMGdQOR0zwkE4= github.com/crowdsecurity/grokky v0.2.1/go.mod h1:33usDIYzGDsgX1kHAThCbseso6JuWNJXOzRQDGXHtWM= github.com/crowdsecurity/machineid v1.0.2 h1:wpkpsUghJF8Khtmn/tg6GxgdhLA1Xflerh5lirI+bdc= diff --git a/pkg/cwhub/cwhub.go b/pkg/cwhub/cwhub.go index a7864d4c076..0496834e472 100644 --- a/pkg/cwhub/cwhub.go +++ b/pkg/cwhub/cwhub.go @@ -23,7 +23,7 @@ func (t *hubTransport) RoundTrip(req *http.Request) (*http.Response, error) { // hubClient is the HTTP client used to communicate with the CrowdSec Hub. var hubClient = &http.Client{ - Timeout: 120 * time.Second, + Timeout: 120 * time.Second, Transport: &hubTransport{http.DefaultTransport}, } diff --git a/pkg/cwhub/dataset.go b/pkg/cwhub/dataset.go index 921361e3fcf..eb56d8e32a8 100644 --- a/pkg/cwhub/dataset.go +++ b/pkg/cwhub/dataset.go @@ -1,19 +1,17 @@ package cwhub import ( + "context" "errors" "fmt" "io" - "io/fs" - "net/http" - "os" - "path/filepath" - "runtime" "time" "github.com/sirupsen/logrus" "gopkg.in/yaml.v3" + "github.com/crowdsecurity/go-cs-lib/downloader" + "github.com/crowdsecurity/crowdsec/pkg/types" ) @@ -22,128 +20,6 @@ type DataSet struct { Data []types.DataSource `yaml:"data,omitempty"` } -// downloadFile downloads a file and writes it to disk, with no hash verification. -func downloadFile(url string, destPath string) error { - resp, err := hubClient.Get(url) - if err != nil { - return fmt.Errorf("while downloading %s: %w", url, err) - } - defer resp.Body.Close() - - if resp.StatusCode != http.StatusOK { - return fmt.Errorf("bad http code %d for %s", resp.StatusCode, url) - } - - // Download to a temporary location to avoid corrupting files - // that are currently in use or memory mapped. - - tmpFile, err := os.CreateTemp(filepath.Dir(destPath), filepath.Base(destPath)+".*.tmp") - if err != nil { - return err - } - - tmpFileName := tmpFile.Name() - defer func() { - tmpFile.Close() - os.Remove(tmpFileName) - }() - - // avoid reading the whole file in memory - _, err = io.Copy(tmpFile, resp.Body) - if err != nil { - return err - } - - if err = tmpFile.Sync(); err != nil { - return err - } - - if err = tmpFile.Close(); err != nil { - return err - } - - // a check on stdout is used while scripting to know if the hub has been upgraded - // and a configuration reload is required - // TODO: use a better way to communicate this - fmt.Printf("updated %s\n", filepath.Base(destPath)) - - if runtime.GOOS == "windows" { - // On Windows, rename will fail if the destination file already exists - // so we remove it first. - err = os.Remove(destPath) - switch { - case errors.Is(err, fs.ErrNotExist): - break - case err != nil: - return err - } - } - - if err = os.Rename(tmpFileName, destPath); err != nil { - return err - } - - return nil -} - -// needsUpdate checks if a data file has to be downloaded (or updated). -// if the local file doesn't exist, update. -// if the remote is newer than the local file, update. -// if the remote has no modification date, but local file has been modified > a week ago, update. -func needsUpdate(destPath string, url string, logger *logrus.Logger) bool { - fileInfo, err := os.Stat(destPath) - - switch { - case os.IsNotExist(err): - return true - case err != nil: - logger.Errorf("while getting %s: %s", destPath, err) - return true - } - - resp, err := hubClient.Head(url) - if err != nil { - logger.Errorf("while getting %s: %s", url, err) - // Head failed, Get would likely fail too -> no update - return false - } - defer resp.Body.Close() - - if resp.StatusCode != http.StatusOK { - logger.Errorf("bad http code %d for %s", resp.StatusCode, url) - return false - } - - // update if local file is older than this - shelfLife := 7 * 24 * time.Hour - - lastModify := fileInfo.ModTime() - - localIsOld := lastModify.Add(shelfLife).Before(time.Now()) - - remoteLastModified := resp.Header.Get("Last-Modified") - if remoteLastModified == "" { - if localIsOld { - logger.Infof("no last modified date for %s, but local file is older than %s", url, shelfLife) - } - - return localIsOld - } - - lastAvailable, err := time.Parse(time.RFC1123, remoteLastModified) - if err != nil { - logger.Warningf("while parsing last modified date for %s: %s", url, err) - return localIsOld - } - - if lastModify.Before(lastAvailable) { - logger.Infof("new version available, updating %s", destPath) - return true - } - - return false -} - // downloadDataSet downloads all the data files for an item. func downloadDataSet(dataFolder string, force bool, reader io.Reader, logger *logrus.Logger) error { dec := yaml.NewDecoder(reader) @@ -165,12 +41,31 @@ func downloadDataSet(dataFolder string, force bool, reader io.Reader, logger *lo return err } - if force || needsUpdate(destPath, dataS.SourceURL, logger) { - logger.Debugf("downloading %s in %s", dataS.SourceURL, destPath) + d := downloader. + New(). + WithHTTPClient(hubClient). + ToFile(destPath). + CompareContent(). + WithLogger(logrus.WithFields(logrus.Fields{"url": dataS.SourceURL})) + + if !force { + d = d.WithLastModified(). + WithShelfLife(7 * 24 * time.Hour) + } + + ctx := context.TODO() + + downloaded, err := d.Download(ctx, dataS.SourceURL) + if err != nil { + return fmt.Errorf("while getting data: %w", err) + } - if err := downloadFile(dataS.SourceURL, destPath); err != nil { - return fmt.Errorf("while getting data: %w", err) - } + if downloaded { + logger.Infof("Downloaded %s", destPath) + // a check on stdout is used while scripting to know if the hub has been upgraded + // and a configuration reload is required + // TODO: use a better way to communicate this + fmt.Printf("updated %s\n", destPath) } } } diff --git a/pkg/cwhub/dataset_test.go b/pkg/cwhub/dataset_test.go deleted file mode 100644 index e48202e4821..00000000000 --- a/pkg/cwhub/dataset_test.go +++ /dev/null @@ -1,56 +0,0 @@ -package cwhub - -import ( - "io" - "net/http" - "net/http/httptest" - "os" - "path/filepath" - "testing" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - "github.com/crowdsecurity/go-cs-lib/cstest" -) - -func TestDownloadFile(t *testing.T) { - ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - switch r.URL.Path { - case "/xx": - w.WriteHeader(http.StatusOK) - _, _ = io.WriteString(w, "example content oneoneone") - default: - w.WriteHeader(http.StatusNotFound) - _, _ = io.WriteString(w, "not found") - } - })) - defer ts.Close() - - dest := filepath.Join(t.TempDir(), "example.txt") - defer os.Remove(dest) - - err := downloadFile(ts.URL+"/xx", dest) - require.NoError(t, err) - - content, err := os.ReadFile(dest) - assert.Equal(t, "example content oneoneone", string(content)) - require.NoError(t, err) - - // bad uri - err = downloadFile("https://zz.com", dest) - cstest.RequireErrorContains(t, err, "lookup zz.com") - cstest.RequireErrorContains(t, err, "no such host") - - // 404 - err = downloadFile(ts.URL+"/x", dest) - cstest.RequireErrorContains(t, err, "bad http code 404") - - // bad target - err = downloadFile(ts.URL+"/xx", "") - cstest.RequireErrorContains(t, err, cstest.PathNotFoundMessage) - - // destination directory does not exist - err = downloadFile(ts.URL+"/xx", filepath.Join(t.TempDir(), "missing/example.txt")) - cstest.RequireErrorContains(t, err, cstest.PathNotFoundMessage) -} diff --git a/pkg/cwhub/hub.go b/pkg/cwhub/hub.go index 6b9f56b2e17..87a6644bc72 100644 --- a/pkg/cwhub/hub.go +++ b/pkg/cwhub/hub.go @@ -1,7 +1,6 @@ package cwhub import ( - "bytes" "encoding/json" "errors" "fmt" @@ -21,8 +20,8 @@ type Hub struct { items HubItems // Items read from HubDir and InstallDir local *csconfig.LocalHubCfg remote *RemoteHubCfg - Warnings []string // Warnings encountered during sync logger *logrus.Logger + Warnings []string // Warnings encountered during sync } // GetDataDir returns the data directory, where data sets are installed. @@ -150,27 +149,17 @@ func (h *Hub) ItemStats() []string { // updateIndex downloads the latest version of the index and writes it to disk if it changed. func (h *Hub) updateIndex() error { - body, err := h.remote.fetchIndex() + downloaded, err := h.remote.fetchIndex(h.local.HubIndexFile) if err != nil { return err } - oldContent, err := os.ReadFile(h.local.HubIndexFile) - if err != nil { - if !os.IsNotExist(err) { - h.logger.Warningf("failed to read hub index: %s", err) - } - } else if bytes.Equal(body, oldContent) { + if downloaded { + h.logger.Infof("Wrote index to %s", h.local.HubIndexFile) + } else { h.logger.Info("hub index is up to date") - return nil - } - - if err = os.WriteFile(h.local.HubIndexFile, body, 0o644); err != nil { - return fmt.Errorf("failed to write hub index: %w", err) } - h.logger.Infof("Wrote index to %s, %d bytes", h.local.HubIndexFile, len(body)) - return nil } diff --git a/pkg/cwhub/hub_test.go b/pkg/cwhub/hub_test.go index 86569cde324..d5592a16c39 100644 --- a/pkg/cwhub/hub_test.go +++ b/pkg/cwhub/hub_test.go @@ -29,6 +29,10 @@ func TestUpdateIndex(t *testing.T) { tmpIndex, err := os.CreateTemp("", "index.json") require.NoError(t, err) + // close the file to avoid preventing the rename on windows + err = tmpIndex.Close() + require.NoError(t, err) + t.Cleanup(func() { os.Remove(tmpIndex.Name()) }) @@ -72,5 +76,5 @@ func TestUpdateIndex(t *testing.T) { hub.local.HubIndexFile = "/does/not/exist/index.json" err = hub.updateIndex() - cstest.RequireErrorContains(t, err, "failed to write hub index: open /does/not/exist/index.json:") + cstest.RequireErrorContains(t, err, "failed to create temporary download file for /does/not/exist/index.json:") } diff --git a/pkg/cwhub/item.go b/pkg/cwhub/item.go index 6cdb5cadcb9..4249a20e134 100644 --- a/pkg/cwhub/item.go +++ b/pkg/cwhub/item.go @@ -29,10 +29,8 @@ const ( versionFuture // local version is higher latest, but is included in the index: should not happen ) -var ( - // The order is important, as it is used to range over sub-items in collections. - ItemTypes = []string{PARSERS, POSTOVERFLOWS, SCENARIOS, CONTEXTS, APPSEC_CONFIGS, APPSEC_RULES, COLLECTIONS} -) +// The order is important, as it is used to range over sub-items in collections. +var ItemTypes = []string{PARSERS, POSTOVERFLOWS, SCENARIOS, CONTEXTS, APPSEC_CONFIGS, APPSEC_RULES, COLLECTIONS} type HubItems map[string]map[string]*Item diff --git a/pkg/cwhub/iteminstall.go b/pkg/cwhub/iteminstall.go index ceae3649118..274e7128a04 100644 --- a/pkg/cwhub/iteminstall.go +++ b/pkg/cwhub/iteminstall.go @@ -48,13 +48,13 @@ func (i *Item) Install(force bool, downloadOnly bool) error { } } - filePath, err := i.downloadLatest(force, true) + downloaded, err := i.downloadLatest(force, true) if err != nil { return err } - if downloadOnly { - i.hub.logger.Infof("Downloaded %s to %s", i.Name, filePath) + if downloadOnly && downloaded { + i.hub.logger.Infof("Downloaded %s", i.Name) return nil } @@ -62,6 +62,11 @@ func (i *Item) Install(force bool, downloadOnly bool) error { return fmt.Errorf("while enabling %s: %w", i.Name, err) } + // a check on stdout is used while scripting to know if the hub has been upgraded + // and a configuration reload is required + // TODO: use a better way to communicate this + fmt.Printf("installed %s\n", i.Name) + i.hub.logger.Infof("Enabled %s", i.Name) return nil diff --git a/pkg/cwhub/iteminstall_test.go b/pkg/cwhub/iteminstall_test.go index 80a419ec5da..337f66f95fa 100644 --- a/pkg/cwhub/iteminstall_test.go +++ b/pkg/cwhub/iteminstall_test.go @@ -35,7 +35,8 @@ func testTaint(hub *Hub, t *testing.T, item *Item) { // truncate the file f, err := os.Create(item.State.LocalPath) require.NoError(t, err) - f.Close() + err = f.Close() + require.NoError(t, err) // Local sync and check status err = hub.localSync() diff --git a/pkg/cwhub/itemupgrade.go b/pkg/cwhub/itemupgrade.go index 6a8dc2f44b6..8b3ec7481ef 100644 --- a/pkg/cwhub/itemupgrade.go +++ b/pkg/cwhub/itemupgrade.go @@ -3,23 +3,20 @@ package cwhub // Install, upgrade and remove items from the hub to the local configuration import ( - "bytes" - "crypto/sha256" - "encoding/hex" + "context" "errors" "fmt" - "io" - "net/http" "os" - "path/filepath" + + "github.com/sirupsen/logrus" + + "github.com/crowdsecurity/go-cs-lib/downloader" "github.com/crowdsecurity/crowdsec/pkg/emoji" ) // Upgrade downloads and applies the last version of the item from the hub. func (i *Item) Upgrade(force bool) (bool, error) { - updated := false - if i.State.IsLocal() { i.hub.logger.Infof("not upgrading %s: local item", i.Name) return false, nil @@ -54,21 +51,21 @@ func (i *Item) Upgrade(force bool) (bool, error) { if i.State.Tainted { i.hub.logger.Warningf("%v %s is tainted, --force to overwrite", emoji.Warning, i.Name) } - } else { - // a check on stdout is used while scripting to know if the hub has been upgraded - // and a configuration reload is required - // TODO: use a better way to communicate this - fmt.Printf("updated %s\n", i.Name) - i.hub.logger.Infof("%v %s: updated", emoji.Package, i.Name) - updated = true + return false, nil } - return updated, nil + // a check on stdout is used while scripting to know if the hub has been upgraded + // and a configuration reload is required + // TODO: use a better way to communicate this + fmt.Printf("updated %s\n", i.Name) + i.hub.logger.Infof("%v %s: updated", emoji.Package, i.Name) + + return true, nil } // downloadLatest downloads the latest version of the item to the hub directory. -func (i *Item) downloadLatest(overwrite bool, updateOnly bool) (string, error) { +func (i *Item) downloadLatest(overwrite bool, updateOnly bool) (bool, error) { i.hub.logger.Debugf("Downloading %s %s", i.Type, i.Name) for _, sub := range i.SubItems() { @@ -84,98 +81,84 @@ func (i *Item) downloadLatest(overwrite bool, updateOnly bool) (string, error) { i.hub.logger.Tracef("collection, recurse") if _, err := sub.downloadLatest(overwrite, updateOnly); err != nil { - return "", err + return false, err } } downloaded := sub.State.Downloaded if _, err := sub.download(overwrite); err != nil { - return "", err + return false, err } // We need to enable an item when it has been added to a collection since latest release of the collection. // We check if sub.Downloaded is false because maybe the item has been disabled by the user. if !sub.State.Installed && !downloaded { if err := sub.enable(); err != nil { - return "", fmt.Errorf("enabling '%s': %w", sub.Name, err) + return false, fmt.Errorf("enabling '%s': %w", sub.Name, err) } } } if !i.State.Installed && updateOnly && i.State.Downloaded && !overwrite { i.hub.logger.Debugf("skipping upgrade of %s: not installed", i.Name) - return "", nil - } - - ret, err := i.download(overwrite) - if err != nil { - return "", err + return false, nil } - return ret, nil + return i.download(overwrite) } -// FetchLatest downloads the latest item from the hub, verifies the hash and returns the content and the used url. -func (i *Item) FetchLatest() ([]byte, string, error) { - if i.latestHash() == "" { - return nil, "", errors.New("latest hash missing from index") - } - +// FetchContentTo downloads the last version of the item's YAML file to the specified path. +func (i *Item) FetchContentTo(destPath string) (bool, string, error) { url, err := i.hub.remote.urlTo(i.RemotePath) if err != nil { - return nil, "", fmt.Errorf("failed to build request: %w", err) - } - - resp, err := hubClient.Get(url) - if err != nil { - return nil, "", err + return false, "", fmt.Errorf("failed to build request: %w", err) } - defer resp.Body.Close() - if resp.StatusCode != http.StatusOK { - return nil, "", fmt.Errorf("bad http code %d", resp.StatusCode) + wantHash := i.latestHash() + if wantHash == "" { + return false, "", errors.New("latest hash missing from index") } - body, err := io.ReadAll(resp.Body) - if err != nil { - return nil, "", err - } + d := downloader. + New(). + WithHTTPClient(hubClient). + ToFile(destPath). + WithMakeDirs(true). + WithLogger(logrus.WithFields(logrus.Fields{"url": url})). + CompareContent(). + VerifyHash("sha256", wantHash) - hash := sha256.New() - if _, err = hash.Write(body); err != nil { - return nil, "", fmt.Errorf("while hashing %s: %w", i.Name, err) - } + // TODO: recommend hub update if hash does not match - meow := hex.EncodeToString(hash.Sum(nil)) - if meow != i.Versions[i.Version].Digest { - i.hub.logger.Errorf("Downloaded version doesn't match index, please 'hub update'") - i.hub.logger.Debugf("got %s, expected %s", meow, i.Versions[i.Version].Digest) + ctx := context.TODO() - return nil, "", errors.New("invalid download hash") + downloaded, err := d.Download(ctx, url) + if err != nil { + return false, "", fmt.Errorf("while downloading %s to %s: %w", i.Name, url, err) } - return body, url, nil + return downloaded, url, nil } // download downloads the item from the hub and writes it to the hub directory. -func (i *Item) download(overwrite bool) (string, error) { +func (i *Item) download(overwrite bool) (bool, error) { // ensure that target file is within target dir finalPath, err := i.downloadPath() if err != nil { - return "", err + return false, err } if i.State.IsLocal() { i.hub.logger.Warningf("%s is local, can't download", i.Name) - return finalPath, nil + return false, nil } // if user didn't --force, don't overwrite local, tainted, up-to-date files if !overwrite { if i.State.Tainted { i.hub.logger.Debugf("%s: tainted, not updated", i.Name) - return "", nil + return false, nil } if i.State.UpToDate { @@ -184,45 +167,32 @@ func (i *Item) download(overwrite bool) (string, error) { } } - body, url, err := i.FetchLatest() + downloaded, _, err := i.FetchContentTo(finalPath) if err != nil { - what := i.Name - if url != "" { - what += " from " + url - } - - return "", fmt.Errorf("while downloading %s: %w", what, err) - } - - // all good, install - - parentDir := filepath.Dir(finalPath) - - if err = os.MkdirAll(parentDir, os.ModePerm); err != nil { - return "", fmt.Errorf("while creating %s: %w", parentDir, err) - } - - // check actual file - if _, err = os.Stat(finalPath); !os.IsNotExist(err) { - i.hub.logger.Warningf("%s: overwrite", i.Name) - i.hub.logger.Debugf("target: %s", finalPath) - } else { - i.hub.logger.Infof("%s: OK", i.Name) + return false, fmt.Errorf("while downloading %s: %w", i.Name, err) } - if err = os.WriteFile(finalPath, body, 0o644); err != nil { - return "", fmt.Errorf("while writing %s: %w", finalPath, err) + if downloaded { + i.hub.logger.Infof("Downloaded %s", i.Name) } i.State.Downloaded = true i.State.Tainted = false i.State.UpToDate = true - if err = downloadDataSet(i.hub.local.InstallDataDir, overwrite, bytes.NewReader(body), i.hub.logger); err != nil { - return "", fmt.Errorf("while downloading data for %s: %w", i.FileName, err) + // read content to get the list of data files + reader, err := os.Open(finalPath) + if err != nil { + return false, fmt.Errorf("while opening %s: %w", finalPath, err) + } + + defer reader.Close() + + if err = downloadDataSet(i.hub.local.InstallDataDir, overwrite, reader, i.hub.logger); err != nil { + return false, fmt.Errorf("while downloading data for %s: %w", i.FileName, err) } - return finalPath, nil + return true, nil } // DownloadDataIfNeeded downloads the data set for the item. diff --git a/pkg/cwhub/remote.go b/pkg/cwhub/remote.go index c1eb5a7080f..5e42555fa61 100644 --- a/pkg/cwhub/remote.go +++ b/pkg/cwhub/remote.go @@ -1,9 +1,12 @@ package cwhub import ( + "context" "fmt" - "io" - "net/http" + + "github.com/sirupsen/logrus" + + "github.com/crowdsecurity/go-cs-lib/downloader" ) // RemoteHubCfg is used to retrieve index and items from the remote hub. @@ -28,34 +31,28 @@ func (r *RemoteHubCfg) urlTo(remotePath string) (string, error) { } // fetchIndex downloads the index from the hub and returns the content. -func (r *RemoteHubCfg) fetchIndex() ([]byte, error) { +func (r *RemoteHubCfg) fetchIndex(destPath string) (bool, error) { if r == nil { - return nil, ErrNilRemoteHub + return false, ErrNilRemoteHub } url, err := r.urlTo(r.IndexPath) if err != nil { - return nil, fmt.Errorf("failed to build hub index request: %w", err) + return false, fmt.Errorf("failed to build hub index request: %w", err) } - resp, err := hubClient.Get(url) - if err != nil { - return nil, fmt.Errorf("failed http request for hub index: %w", err) - } - defer resp.Body.Close() - - if resp.StatusCode != http.StatusOK { - if resp.StatusCode == http.StatusNotFound { - return nil, IndexNotFoundError{url, r.Branch} - } - - return nil, fmt.Errorf("bad http code %d for %s", resp.StatusCode, url) - } + ctx := context.TODO() - body, err := io.ReadAll(resp.Body) + downloaded, err := downloader. + New(). + WithHTTPClient(hubClient). + ToFile(destPath). + CompareContent(). + WithLogger(logrus.WithFields(logrus.Fields{"url": url})). + Download(ctx, url) if err != nil { - return nil, fmt.Errorf("failed to read request answer for hub index: %w", err) + return false, err } - return body, nil + return downloaded, nil } diff --git a/pkg/cwhub/sync.go b/pkg/cwhub/sync.go index cb7bf37867c..42db255c883 100644 --- a/pkg/cwhub/sync.go +++ b/pkg/cwhub/sync.go @@ -1,10 +1,7 @@ package cwhub import ( - "crypto/sha256" - "encoding/hex" "fmt" - "io" "os" "path/filepath" "slices" @@ -12,6 +9,7 @@ import ( "strings" "github.com/Masterminds/semver/v3" + "github.com/crowdsecurity/go-cs-lib/downloader" "github.com/sirupsen/logrus" "gopkg.in/yaml.v3" ) @@ -38,29 +36,13 @@ func linkTarget(path string, logger *logrus.Logger) (string, error) { return hubpath, nil } -func getSHA256(filepath string) (string, error) { - f, err := os.Open(filepath) - if err != nil { - return "", fmt.Errorf("unable to open '%s': %w", filepath, err) - } - - defer f.Close() - - h := sha256.New() - if _, err := io.Copy(h, f); err != nil { - return "", fmt.Errorf("unable to calculate sha256 of '%s': %w", filepath, err) - } - - return hex.EncodeToString(h.Sum(nil)), nil -} - // information used to create a new Item, from a file path. type itemFileInfo struct { - inhub bool fname string stage string ftype string fauthor string + inhub bool } func (h *Hub) getItemFileInfo(path string, logger *logrus.Logger) (*itemFileInfo, error) { @@ -466,7 +448,7 @@ func (h *Hub) localSync() error { func (i *Item) setVersionState(path string, inhub bool) error { var err error - i.State.LocalHash, err = getSHA256(path) + i.State.LocalHash, err = downloader.SHA256(path) if err != nil { return fmt.Errorf("failed to get sha256 of %s: %w", path, err) } diff --git a/test/bats/20_hub.bats b/test/bats/20_hub.bats index 18e3770bcd0..0d9f29b2418 100644 --- a/test/bats/20_hub.bats +++ b/test/bats/20_hub.bats @@ -125,13 +125,19 @@ teardown() { assert_stderr --partial "Upgraded 0 contexts" assert_stderr --partial "Upgrading collections" assert_stderr --partial "Upgraded 0 collections" + assert_stderr --partial "Upgrading appsec-configs" + assert_stderr --partial "Upgraded 0 appsec-configs" + assert_stderr --partial "Upgrading appsec-rules" + assert_stderr --partial "Upgraded 0 appsec-rules" + assert_stderr --partial "Upgrading collections" + assert_stderr --partial "Upgraded 0 collections" rune -0 cscli parsers install crowdsecurity/syslog-logs rune -0 cscli hub upgrade assert_stderr --partial "crowdsecurity/syslog-logs: up-to-date" rune -0 cscli hub upgrade --force - assert_stderr --partial "crowdsecurity/syslog-logs: overwrite" + assert_stderr --partial "crowdsecurity/syslog-logs: up-to-date" assert_stderr --partial "crowdsecurity/syslog-logs: updated" assert_stderr --partial "Upgraded 1 parsers" # this is used by the cron script to know if the hub was updated diff --git a/test/bats/20_hub_collections.bats b/test/bats/20_hub_collections.bats index 5e5b43a9e4f..1381fe8e55a 100644 --- a/test/bats/20_hub_collections.bats +++ b/test/bats/20_hub_collections.bats @@ -180,7 +180,6 @@ teardown() { assert_stderr --partial "error while installing 'crowdsecurity/sshd': while enabling crowdsecurity/sshd: crowdsecurity/sshd is tainted, won't enable unless --force" rune -0 cscli collections install crowdsecurity/sshd --force - assert_stderr --partial "crowdsecurity/sshd: overwrite" assert_stderr --partial "Enabled crowdsecurity/sshd" } diff --git a/test/bats/20_hub_parsers.bats b/test/bats/20_hub_parsers.bats index 71a1f933a92..214463f9cfc 100644 --- a/test/bats/20_hub_parsers.bats +++ b/test/bats/20_hub_parsers.bats @@ -180,7 +180,6 @@ teardown() { assert_stderr --partial "error while installing 'crowdsecurity/whitelists': while enabling crowdsecurity/whitelists: crowdsecurity/whitelists is tainted, won't enable unless --force" rune -0 cscli parsers install crowdsecurity/whitelists --force - assert_stderr --partial "crowdsecurity/whitelists: overwrite" assert_stderr --partial "Enabled crowdsecurity/whitelists" } diff --git a/test/bats/20_hub_postoverflows.bats b/test/bats/20_hub_postoverflows.bats index de4b1e8a59e..5123966a44e 100644 --- a/test/bats/20_hub_postoverflows.bats +++ b/test/bats/20_hub_postoverflows.bats @@ -180,7 +180,6 @@ teardown() { assert_stderr --partial "error while installing 'crowdsecurity/rdns': while enabling crowdsecurity/rdns: crowdsecurity/rdns is tainted, won't enable unless --force" rune -0 cscli postoverflows install crowdsecurity/rdns --force - assert_stderr --partial "crowdsecurity/rdns: overwrite" assert_stderr --partial "Enabled crowdsecurity/rdns" } diff --git a/test/bats/20_hub_scenarios.bats b/test/bats/20_hub_scenarios.bats index 9c441057aa2..3089e244cf1 100644 --- a/test/bats/20_hub_scenarios.bats +++ b/test/bats/20_hub_scenarios.bats @@ -181,7 +181,6 @@ teardown() { assert_stderr --partial "error while installing 'crowdsecurity/ssh-bf': while enabling crowdsecurity/ssh-bf: crowdsecurity/ssh-bf is tainted, won't enable unless --force" rune -0 cscli scenarios install crowdsecurity/ssh-bf --force - assert_stderr --partial "crowdsecurity/ssh-bf: overwrite" assert_stderr --partial "Enabled crowdsecurity/ssh-bf" } From e5cd480425d6367f84cac845dbba9bc6daa241f3 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Tue, 7 May 2024 13:49:19 +0200 Subject: [PATCH 118/581] update go-cs-lib to 0.0.11 (#2990) --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 2aae7f2a857..49cfe05ad39 100644 --- a/go.mod +++ b/go.mod @@ -27,7 +27,7 @@ require ( github.com/corazawaf/libinjection-go v0.1.2 github.com/crowdsecurity/coraza/v3 v3.0.0-20240108124027-a62b8d8e5607 github.com/crowdsecurity/dlog v0.0.0-20170105205344-4fb5f8204f26 - github.com/crowdsecurity/go-cs-lib v0.0.11-0.20240502193824-180b39e88a51 + github.com/crowdsecurity/go-cs-lib v0.0.11 github.com/crowdsecurity/grokky v0.2.1 github.com/crowdsecurity/machineid v1.0.2 github.com/davecgh/go-spew v1.1.1 diff --git a/go.sum b/go.sum index ebf10e598fe..05b38f68384 100644 --- a/go.sum +++ b/go.sum @@ -107,8 +107,8 @@ github.com/crowdsecurity/coraza/v3 v3.0.0-20240108124027-a62b8d8e5607 h1:hyrYw3h github.com/crowdsecurity/coraza/v3 v3.0.0-20240108124027-a62b8d8e5607/go.mod h1:br36fEqurGYZQGit+iDYsIzW0FF6VufMbDzyyLxEuPA= github.com/crowdsecurity/dlog v0.0.0-20170105205344-4fb5f8204f26 h1:r97WNVC30Uen+7WnLs4xDScS/Ex988+id2k6mDf8psU= github.com/crowdsecurity/dlog v0.0.0-20170105205344-4fb5f8204f26/go.mod h1:zpv7r+7KXwgVUZnUNjyP22zc/D7LKjyoY02weH2RBbk= -github.com/crowdsecurity/go-cs-lib v0.0.11-0.20240502193824-180b39e88a51 h1:Dd/T2IMB3KG1+dvm0LUuT9AKKnT2CO0sCjFfsKDIxXE= -github.com/crowdsecurity/go-cs-lib v0.0.11-0.20240502193824-180b39e88a51/go.mod h1:8FMKNGsh3hMZi2SEv6P15PURhEJnZV431XjzzBSuf0k= +github.com/crowdsecurity/go-cs-lib v0.0.11 h1:ygUOKrkMLaJ2wjC020LgtY6XDkToNFK4NmYlhpkk5ko= +github.com/crowdsecurity/go-cs-lib v0.0.11/go.mod h1:8FMKNGsh3hMZi2SEv6P15PURhEJnZV431XjzzBSuf0k= github.com/crowdsecurity/grokky v0.2.1 h1:t4VYnDlAd0RjDM2SlILalbwfCrQxtJSMGdQOR0zwkE4= github.com/crowdsecurity/grokky v0.2.1/go.mod h1:33usDIYzGDsgX1kHAThCbseso6JuWNJXOzRQDGXHtWM= github.com/crowdsecurity/machineid v1.0.2 h1:wpkpsUghJF8Khtmn/tg6GxgdhLA1Xflerh5lirI+bdc= From defa0767940fc6e157fd620a9d9be24b691671ce Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Tue, 7 May 2024 15:09:22 +0200 Subject: [PATCH 119/581] CI: disable log formatting for consistency (#2989) * CI: disable log formatting for consistency * lint (shellharden) --- test/bats/01_crowdsec.bats | 40 +++++++++--------- test/bats/01_crowdsec_lapi.bats | 8 ++-- test/bats/01_cscli.bats | 40 +++++++++--------- test/bats/02_nolapi.bats | 12 +++--- test/bats/03_noagent.bats | 12 +++--- test/bats/04_nocapi.bats | 10 ++--- test/bats/05_config_yaml_local.bats | 22 +++++----- test/bats/11_bouncers_tls.bats | 2 +- test/bats/13_capi_whitelists.bats | 14 +++---- test/bats/20_hub_collections_dep.bats | 2 +- test/bats/30_machines_tls.bats | 8 ++-- test/bats/40_cold-logs.bats | 6 +-- test/bats/40_live-ban.bats | 12 +++--- test/bats/50_simulation.bats | 10 ++--- test/bats/70_plugin_http.bats | 14 +++---- test/bats/71_plugin_dummy.bats | 10 ++--- test/bats/72_plugin_badconfig.bats | 60 +++++++++++++-------------- test/bats/73_plugin_formatting.bats | 6 +-- test/bats/80_alerts.bats | 16 +++---- test/bats/81_alert_context.bats | 8 ++-- test/bats/90_decisions.bats | 4 +- test/lib/config/config-local | 1 - 22 files changed, 158 insertions(+), 159 deletions(-) diff --git a/test/bats/01_crowdsec.bats b/test/bats/01_crowdsec.bats index 7051b4d33a3..d8d369eedf4 100644 --- a/test/bats/01_crowdsec.bats +++ b/test/bats/01_crowdsec.bats @@ -24,8 +24,8 @@ teardown() { #---------- @test "crowdsec (usage)" { - rune -0 wait-for --out "Usage of " "${CROWDSEC}" -h - rune -0 wait-for --out "Usage of " "${CROWDSEC}" --help + rune -0 wait-for --out "Usage of " "$CROWDSEC" -h + rune -0 wait-for --out "Usage of " "$CROWDSEC" --help } @test "crowdsec (unknown flag)" { @@ -33,19 +33,19 @@ teardown() { } @test "crowdsec (unknown argument)" { - rune -0 wait-for --err "argument provided but not defined: trololo" "${CROWDSEC}" trololo + rune -0 wait-for --err "argument provided but not defined: trololo" "$CROWDSEC" trololo } @test "crowdsec (no api and no agent)" { rune -0 wait-for \ --err "you must run at least the API Server or crowdsec" \ - "${CROWDSEC}" -no-api -no-cs + "$CROWDSEC" -no-api -no-cs } @test "crowdsec - print error on exit" { # errors that cause program termination are printed to stderr, not only logs config_set '.db_config.type="meh"' - rune -1 "${CROWDSEC}" + rune -1 "$CROWDSEC" assert_stderr --partial "unable to create database client: unknown database type 'meh'" } @@ -53,23 +53,23 @@ teardown() { config_set '.common={}' rune -0 wait-for \ --err "Starting processing data" \ - "${CROWDSEC}" + "$CROWDSEC" refute_output config_set 'del(.common)' rune -0 wait-for \ --err "Starting processing data" \ - "${CROWDSEC}" + "$CROWDSEC" refute_output } @test "CS_LAPI_SECRET not strong enough" { - CS_LAPI_SECRET=foo rune -1 wait-for "${CROWDSEC}" + CS_LAPI_SECRET=foo rune -1 wait-for "$CROWDSEC" assert_stderr --partial "api server init: unable to run local API: controller init: CS_LAPI_SECRET not strong enough" } @test "crowdsec - reload (change of logfile, disabled agent)" { - logdir1=$(TMPDIR="${BATS_TEST_TMPDIR}" mktemp -u) + logdir1=$(TMPDIR="$BATS_TEST_TMPDIR" mktemp -u) log_old="${logdir1}/crowdsec.log" config_set ".common.log_dir=\"${logdir1}\"" @@ -81,7 +81,7 @@ teardown() { assert_file_exists "$log_old" assert_file_contains "$log_old" "Starting processing data" - logdir2=$(TMPDIR="${BATS_TEST_TMPDIR}" mktemp -u) + logdir2=$(TMPDIR="$BATS_TEST_TMPDIR" mktemp -u) log_new="${logdir2}/crowdsec.log" config_set ".common.log_dir=\"${logdir2}\"" @@ -137,7 +137,7 @@ teardown() { ACQUIS_YAML=$(config_get '.crowdsec_service.acquisition_path') rm -f "$ACQUIS_YAML" - rune -1 wait-for "${CROWDSEC}" + rune -1 wait-for "$CROWDSEC" assert_stderr --partial "acquis.yaml: no such file or directory" } @@ -150,7 +150,7 @@ teardown() { rm -f "$ACQUIS_DIR" config_set '.common.log_media="stdout"' - rune -1 wait-for "${CROWDSEC}" + rune -1 wait-for "$CROWDSEC" # check warning assert_stderr --partial "no acquisition file found" assert_stderr --partial "crowdsec init: while loading acquisition config: no datasource enabled" @@ -166,7 +166,7 @@ teardown() { config_set '.crowdsec_service.acquisition_dir=""' config_set '.common.log_media="stdout"' - rune -1 wait-for "${CROWDSEC}" + rune -1 wait-for "$CROWDSEC" # check warning assert_stderr --partial "no acquisition_path or acquisition_dir specified" assert_stderr --partial "crowdsec init: while loading acquisition config: no datasource enabled" @@ -184,13 +184,13 @@ teardown() { rune -0 wait-for \ --err "Starting processing data" \ - "${CROWDSEC}" + "$CROWDSEC" # now, if foo.yaml is empty instead, there won't be valid datasources. cat /dev/null >"$ACQUIS_DIR"/foo.yaml - rune -1 wait-for "${CROWDSEC}" + rune -1 wait-for "$CROWDSEC" assert_stderr --partial "crowdsec init: while loading acquisition config: no datasource enabled" } @@ -217,8 +217,8 @@ teardown() { #shellcheck disable=SC2016 rune -0 wait-for \ - --err 'datasource '\''journalctl'\'' is not available: exec: "journalctl": executable file not found in ' \ - env PATH='' "${CROWDSEC}" + --err 'datasource '\''journalctl'\'' is not available: exec: \\"journalctl\\": executable file not found in ' \ + env PATH='' "$CROWDSEC" # if all datasources are disabled, crowdsec should exit @@ -226,7 +226,7 @@ teardown() { rm -f "$ACQUIS_YAML" config_set '.crowdsec_service.acquisition_path=""' - rune -1 wait-for env PATH='' "${CROWDSEC}" + rune -1 wait-for env PATH='' "$CROWDSEC" assert_stderr --partial "crowdsec init: while loading acquisition config: no datasource enabled" } @@ -237,11 +237,11 @@ teardown() { # if filenames are missing, it won't be able to detect source type config_set "$ACQUIS_YAML" '.source="file"' - rune -1 wait-for "${CROWDSEC}" + rune -1 wait-for "$CROWDSEC" assert_stderr --partial "failed to configure datasource file: no filename or filenames configuration provided" config_set "$ACQUIS_YAML" '.filenames=["file.log"]' config_set "$ACQUIS_YAML" '.meh=3' - rune -1 wait-for "${CROWDSEC}" + rune -1 wait-for "$CROWDSEC" assert_stderr --partial "field meh not found in type fileacquisition.FileConfiguration" } diff --git a/test/bats/01_crowdsec_lapi.bats b/test/bats/01_crowdsec_lapi.bats index 1b7940615ed..0f6c41cc53d 100644 --- a/test/bats/01_crowdsec_lapi.bats +++ b/test/bats/01_crowdsec_lapi.bats @@ -27,25 +27,25 @@ teardown() { @test "lapi (.api.server.enable=false)" { rune -0 config_set '.api.server.enable=false' - rune -1 "${CROWDSEC}" -no-cs + rune -1 "$CROWDSEC" -no-cs assert_stderr --partial "you must run at least the API Server or crowdsec" } @test "lapi (no .api.server.listen_uri)" { rune -0 config_set 'del(.api.server.listen_socket) | del(.api.server.listen_uri)' - rune -1 "${CROWDSEC}" -no-cs + rune -1 "$CROWDSEC" -no-cs assert_stderr --partial "no listen_uri or listen_socket specified" } @test "lapi (bad .api.server.listen_uri)" { rune -0 config_set 'del(.api.server.listen_socket) | .api.server.listen_uri="127.0.0.1:-80"' - rune -1 "${CROWDSEC}" -no-cs + rune -1 "$CROWDSEC" -no-cs assert_stderr --partial "local API server stopped with error: listening on 127.0.0.1:-80: listen tcp: address -80: invalid port" } @test "lapi (listen on random port)" { config_set '.common.log_media="stdout"' rune -0 config_set 'del(.api.server.listen_socket) | .api.server.listen_uri="127.0.0.1:0"' - rune -0 wait-for --err "CrowdSec Local API listening on 127.0.0.1:" "${CROWDSEC}" -no-cs + rune -0 wait-for --err "CrowdSec Local API listening on 127.0.0.1:" "$CROWDSEC" -no-cs } diff --git a/test/bats/01_cscli.bats b/test/bats/01_cscli.bats index 7e74f6f9714..33dd2e12ec2 100644 --- a/test/bats/01_cscli.bats +++ b/test/bats/01_cscli.bats @@ -51,7 +51,7 @@ teardown() { assert_stderr --partial "Constraint_acquis:" # should work without configuration file - rm "${CONFIG_YAML}" + rm "$CONFIG_YAML" rune -0 cscli version assert_stderr --partial "version:" } @@ -62,7 +62,7 @@ teardown() { assert_line --regexp ".* help .* Help about any command" # should work without configuration file - rm "${CONFIG_YAML}" + rm "$CONFIG_YAML" rune -0 cscli help assert_line "Available Commands:" } @@ -132,7 +132,7 @@ teardown() { @test "cscli - required configuration paths" { - config=$(cat "${CONFIG_YAML}") + config=$(cat "$CONFIG_YAML") configdir=$(config_get '.config_paths.config_dir') # required configuration paths with no defaults @@ -140,12 +140,12 @@ teardown() { config_set 'del(.config_paths)' rune -1 cscli hub list assert_stderr --partial 'no configuration paths provided' - echo "$config" > "${CONFIG_YAML}" + echo "$config" > "$CONFIG_YAML" config_set 'del(.config_paths.data_dir)' rune -1 cscli hub list assert_stderr --partial "please provide a data directory with the 'data_dir' directive in the 'config_paths' section" - echo "$config" > "${CONFIG_YAML}" + echo "$config" > "$CONFIG_YAML" # defaults @@ -153,13 +153,13 @@ teardown() { rune -0 cscli hub list rune -0 cscli config show --key Config.ConfigPaths.HubDir assert_output "$configdir/hub" - echo "$config" > "${CONFIG_YAML}" + echo "$config" > "$CONFIG_YAML" config_set 'del(.config_paths.index_path)' rune -0 cscli hub list rune -0 cscli config show --key Config.ConfigPaths.HubIndexFile assert_output "$configdir/hub/.index.json" - echo "$config" > "${CONFIG_YAML}" + echo "$config" > "$CONFIG_YAML" } @test "cscli config show-yaml" { @@ -182,30 +182,30 @@ teardown() { assert_stderr --partial "failed to backup config: while creating /dev/null/blah: mkdir /dev/null/blah: not a directory" # pick a dirpath - backupdir=$(TMPDIR="${BATS_TEST_TMPDIR}" mktemp -u) + backupdir=$(TMPDIR="$BATS_TEST_TMPDIR" mktemp -u) # succeed the first time - rune -0 cscli config backup "${backupdir}" + rune -0 cscli config backup "$backupdir" assert_stderr --partial "Starting configuration backup" # don't overwrite an existing backup - rune -1 cscli config backup "${backupdir}" + rune -1 cscli config backup "$backupdir" assert_stderr --partial "failed to backup config" assert_stderr --partial "file exists" SIMULATION_YAML="$(config_get '.config_paths.simulation_path')" # restore - rm "${SIMULATION_YAML}" - rune -0 cscli config restore "${backupdir}" - assert_file_exists "${SIMULATION_YAML}" + rm "$SIMULATION_YAML" + rune -0 cscli config restore "$backupdir" + assert_file_exists "$SIMULATION_YAML" # cleanup rm -rf -- "${backupdir:?}" # backup: detect missing files - rm "${SIMULATION_YAML}" - rune -1 cscli config backup "${backupdir}" + rm "$SIMULATION_YAML" + rune -1 cscli config backup "$backupdir" assert_stderr --regexp "failed to backup config: failed copy .* to .*: stat .*: no such file or directory" rm -rf -- "${backupdir:?}" } @@ -221,7 +221,7 @@ teardown() { @test "cscli - missing LAPI credentials file" { LOCAL_API_CREDENTIALS=$(config_get '.api.client.credentials_path') - rm -f "${LOCAL_API_CREDENTIALS}" + rm -f "$LOCAL_API_CREDENTIALS" rune -1 cscli lapi status assert_stderr --partial "loading api client: while reading yaml file: open ${LOCAL_API_CREDENTIALS}: no such file or directory" @@ -234,7 +234,7 @@ teardown() { @test "cscli - empty LAPI credentials file" { LOCAL_API_CREDENTIALS=$(config_get '.api.client.credentials_path') - : > "${LOCAL_API_CREDENTIALS}" + : > "$LOCAL_API_CREDENTIALS" rune -1 cscli lapi status assert_stderr --partial "no credentials or URL found in api client configuration '${LOCAL_API_CREDENTIALS}'" @@ -259,7 +259,7 @@ teardown() { @test "cscli - malformed LAPI url" { LOCAL_API_CREDENTIALS=$(config_get '.api.client.credentials_path') - config_set "${LOCAL_API_CREDENTIALS}" '.url="http://127.0.0.1:-80"' + config_set "$LOCAL_API_CREDENTIALS" '.url="http://127.0.0.1:-80"' rune -1 cscli lapi status -o json rune -0 jq -r '.msg' <(stderr) @@ -269,7 +269,7 @@ teardown() { @test "cscli - bad LAPI password" { rune -0 ./instance-crowdsec start LOCAL_API_CREDENTIALS=$(config_get '.api.client.credentials_path') - config_set "${LOCAL_API_CREDENTIALS}" '.password="meh"' + config_set "$LOCAL_API_CREDENTIALS" '.password="meh"' rune -1 cscli lapi status -o json rune -0 jq -r '.msg' <(stderr) @@ -286,7 +286,7 @@ teardown() { rune -0 cscli completion fish assert_output --partial "# fish completion for cscli" - rm "${CONFIG_YAML}" + rm "$CONFIG_YAML" rune -0 cscli completion bash assert_output --partial "# bash completion for cscli" } diff --git a/test/bats/02_nolapi.bats b/test/bats/02_nolapi.bats index f1d810bc166..cefa6d798b4 100644 --- a/test/bats/02_nolapi.bats +++ b/test/bats/02_nolapi.bats @@ -27,12 +27,12 @@ teardown() { config_set '.common.log_media="stdout"' rune -0 wait-for \ --err "CrowdSec Local API listening" \ - "${CROWDSEC}" + "$CROWDSEC" } @test "crowdsec should not run without LAPI (-no-api flag)" { config_set '.common.log_media="stdout"' - rune -1 wait-for "${CROWDSEC}" -no-api + rune -1 wait-for "$CROWDSEC" -no-api } @test "crowdsec should not run without LAPI (no api.server in configuration file)" { @@ -40,7 +40,7 @@ teardown() { config_log_stderr rune -0 wait-for \ --err "crowdsec local API is disabled" \ - "${CROWDSEC}" + "$CROWDSEC" } @test "capi status shouldn't be ok without api.server" { @@ -68,10 +68,10 @@ teardown() { @test "cscli config backup" { config_disable_lapi - backupdir=$(TMPDIR="${BATS_TEST_TMPDIR}" mktemp -u) - rune -0 cscli config backup "${backupdir}" + backupdir=$(TMPDIR="$BATS_TEST_TMPDIR" mktemp -u) + rune -0 cscli config backup "$backupdir" assert_stderr --partial "Starting configuration backup" - rune -1 cscli config backup "${backupdir}" + rune -1 cscli config backup "$backupdir" rm -rf -- "${backupdir:?}" assert_stderr --partial "failed to backup config" diff --git a/test/bats/03_noagent.bats b/test/bats/03_noagent.bats index e75e375ad1c..60731b90713 100644 --- a/test/bats/03_noagent.bats +++ b/test/bats/03_noagent.bats @@ -26,14 +26,14 @@ teardown() { config_set '.common.log_media="stdout"' rune -0 wait-for \ --err "Starting processing data" \ - "${CROWDSEC}" + "$CROWDSEC" } @test "no agent: crowdsec LAPI should run (-no-cs flag)" { config_set '.common.log_media="stdout"' rune -0 wait-for \ --err "CrowdSec Local API listening" \ - "${CROWDSEC}" -no-cs + "$CROWDSEC" -no-cs } @test "no agent: crowdsec LAPI should run (no crowdsec_service in configuration file)" { @@ -41,7 +41,7 @@ teardown() { config_log_stderr rune -0 wait-for \ --err "crowdsec agent is disabled" \ - "${CROWDSEC}" + "$CROWDSEC" } @test "no agent: cscli config show" { @@ -62,10 +62,10 @@ teardown() { @test "no agent: cscli config backup" { config_disable_agent - backupdir=$(TMPDIR="${BATS_TEST_TMPDIR}" mktemp -u) - rune -0 cscli config backup "${backupdir}" + backupdir=$(TMPDIR="$BATS_TEST_TMPDIR" mktemp -u) + rune -0 cscli config backup "$backupdir" assert_stderr --partial "Starting configuration backup" - rune -1 cscli config backup "${backupdir}" + rune -1 cscli config backup "$backupdir" assert_stderr --partial "failed to backup config" assert_stderr --partial "file exists" diff --git a/test/bats/04_nocapi.bats b/test/bats/04_nocapi.bats index 234db182a53..c02a75810b9 100644 --- a/test/bats/04_nocapi.bats +++ b/test/bats/04_nocapi.bats @@ -27,7 +27,7 @@ teardown() { rune -0 wait-for \ --err "Communication with CrowdSec Central API disabled from args" \ - "${CROWDSEC}" -no-capi + "$CROWDSEC" -no-capi } @test "without capi: crowdsec LAPI should still work" { @@ -35,7 +35,7 @@ teardown() { config_set '.common.log_media="stdout"' rune -0 wait-for \ --err "push and pull to Central API disabled" \ - "${CROWDSEC}" + "$CROWDSEC" } @test "without capi: cscli capi status -> fail" { @@ -53,10 +53,10 @@ teardown() { @test "no agent: cscli config backup" { config_disable_capi - backupdir=$(TMPDIR="${BATS_TEST_TMPDIR}" mktemp -u) - rune -0 cscli config backup "${backupdir}" + backupdir=$(TMPDIR="$BATS_TEST_TMPDIR" mktemp -u) + rune -0 cscli config backup "$backupdir" assert_stderr --partial "Starting configuration backup" - rune -1 cscli config backup "${backupdir}" + rune -1 cscli config backup "$backupdir" assert_stderr --partial "failed to backup config" assert_stderr --partial "file exists" rm -rf -- "${backupdir:?}" diff --git a/test/bats/05_config_yaml_local.bats b/test/bats/05_config_yaml_local.bats index b8b6da117ea..ec7a4201964 100644 --- a/test/bats/05_config_yaml_local.bats +++ b/test/bats/05_config_yaml_local.bats @@ -21,7 +21,7 @@ setup() { load "../lib/setup.sh" ./instance-data load rune -0 config_get '.api.client.credentials_path' - LOCAL_API_CREDENTIALS="${output}" + LOCAL_API_CREDENTIALS="$output" export LOCAL_API_CREDENTIALS } @@ -88,13 +88,13 @@ teardown() { @test "simulation.yaml.local" { rune -0 config_get '.config_paths.simulation_path' refute_output null - SIMULATION="${output}" + SIMULATION="$output" - echo "simulation: off" >"${SIMULATION}" + echo "simulation: off" >"$SIMULATION" rune -0 cscli simulation status -o human assert_stderr --partial "global simulation: disabled" - echo "simulation: on" >"${SIMULATION}" + echo "simulation: on" >"$SIMULATION" rune -0 cscli simulation status -o human assert_stderr --partial "global simulation: enabled" @@ -110,7 +110,7 @@ teardown() { @test "profiles.yaml.local" { rune -0 config_get '.api.server.profiles_path' refute_output null - PROFILES="${output}" + PROFILES="$output" cat <<-EOT >"${PROFILES}.local" name: default_ip_remediation @@ -122,17 +122,17 @@ teardown() { on_success: break EOT - tmpfile=$(TMPDIR="${BATS_TEST_TMPDIR}" mktemp) - touch "${tmpfile}" + tmpfile=$(TMPDIR="$BATS_TEST_TMPDIR" mktemp) + touch "$tmpfile" ACQUIS_YAML=$(config_get '.crowdsec_service.acquisition_path') - echo -e "---\nfilename: ${tmpfile}\nlabels:\n type: syslog\n" >>"${ACQUIS_YAML}" + echo -e "---\nfilename: ${tmpfile}\nlabels:\n type: syslog\n" >>"$ACQUIS_YAML" rune -0 cscli collections install crowdsecurity/sshd rune -0 cscli parsers install crowdsecurity/syslog-logs ./instance-crowdsec start sleep .5 - fake_log >>"${tmpfile}" + fake_log >>"$tmpfile" # this could be simplified, but some systems are slow and we don't want to # wait more than required @@ -141,6 +141,6 @@ teardown() { rune -0 cscli decisions list -o json rune -0 jq --exit-status '.[].decisions[0] | [.value,.type] == ["1.1.1.172","captcha"]' <(output) && break done - rm -f -- "${tmpfile}" - [[ "${status}" -eq 0 ]] || fail "captcha not triggered" + rm -f -- "$tmpfile" + [[ "$status" -eq 0 ]] || fail "captcha not triggered" } diff --git a/test/bats/11_bouncers_tls.bats b/test/bats/11_bouncers_tls.bats index 2c39aae3079..438bec40242 100644 --- a/test/bats/11_bouncers_tls.bats +++ b/test/bats/11_bouncers_tls.bats @@ -7,7 +7,7 @@ setup_file() { load "../lib/setup_file.sh" ./instance-data load - tmpdir="${BATS_FILE_TMPDIR}" + tmpdir="$BATS_FILE_TMPDIR" export tmpdir CFDIR="${BATS_TEST_DIRNAME}/testdata/cfssl" diff --git a/test/bats/13_capi_whitelists.bats b/test/bats/13_capi_whitelists.bats index d05a9d93294..ed7ef2ac560 100644 --- a/test/bats/13_capi_whitelists.bats +++ b/test/bats/13_capi_whitelists.bats @@ -31,7 +31,7 @@ teardown() { @test "capi_whitelists: file missing" { rune -0 wait-for \ --err "while opening capi whitelist file: open $CAPI_WHITELISTS_YAML: no such file or directory" \ - "${CROWDSEC}" + "$CROWDSEC" } @test "capi_whitelists: error on open" { @@ -40,11 +40,11 @@ teardown() { if is_package_testing; then rune -0 wait-for \ --err "while parsing capi whitelist file .*: empty file" \ - "${CROWDSEC}" + "$CROWDSEC" else rune -0 wait-for \ --err "while opening capi whitelist file: open $CAPI_WHITELISTS_YAML: permission denied" \ - "${CROWDSEC}" + "$CROWDSEC" fi } @@ -52,28 +52,28 @@ teardown() { echo > "$CAPI_WHITELISTS_YAML" rune -0 wait-for \ --err "while parsing capi whitelist file '$CAPI_WHITELISTS_YAML': empty file" \ - "${CROWDSEC}" + "$CROWDSEC" } @test "capi_whitelists: empty lists" { echo '{"ips": [], "cidrs": []}' > "$CAPI_WHITELISTS_YAML" rune -0 wait-for \ --err "Starting processing data" \ - "${CROWDSEC}" + "$CROWDSEC" } @test "capi_whitelists: bad ip" { echo '{"ips": ["blahblah"], "cidrs": []}' > "$CAPI_WHITELISTS_YAML" rune -0 wait-for \ --err "while parsing capi whitelist file '$CAPI_WHITELISTS_YAML': invalid IP address: blahblah" \ - "${CROWDSEC}" + "$CROWDSEC" } @test "capi_whitelists: bad cidr" { echo '{"ips": [], "cidrs": ["blahblah"]}' > "$CAPI_WHITELISTS_YAML" rune -0 wait-for \ --err "while parsing capi whitelist file '$CAPI_WHITELISTS_YAML': invalid CIDR address: blahblah" \ - "${CROWDSEC}" + "$CROWDSEC" } @test "capi_whitelists: file with ip and cidr values" { diff --git a/test/bats/20_hub_collections_dep.bats b/test/bats/20_hub_collections_dep.bats index c3df948a353..673b812dc0d 100644 --- a/test/bats/20_hub_collections_dep.bats +++ b/test/bats/20_hub_collections_dep.bats @@ -121,6 +121,6 @@ teardown() { rune -1 cscli hub list assert_stderr --partial "circular dependency detected" - rune -1 wait-for "${CROWDSEC}" + rune -1 wait-for "$CROWDSEC" assert_stderr --partial "circular dependency detected" } diff --git a/test/bats/30_machines_tls.bats b/test/bats/30_machines_tls.bats index 6909c89cb1f..877f8672b24 100644 --- a/test/bats/30_machines_tls.bats +++ b/test/bats/30_machines_tls.bats @@ -7,10 +7,10 @@ setup_file() { load "../lib/setup_file.sh" ./instance-data load - CONFIG_DIR=$(dirname "${CONFIG_YAML}") + CONFIG_DIR=$(dirname "$CONFIG_YAML") export CONFIG_DIR - tmpdir="${BATS_FILE_TMPDIR}" + tmpdir="$BATS_FILE_TMPDIR" export tmpdir CFDIR="${BATS_TEST_DIRNAME}/testdata/cfssl" @@ -80,7 +80,7 @@ teardown() { rune -0 wait-for \ --err "missing TLS key file" \ - "${CROWDSEC}" + "$CROWDSEC" } @test "missing cert_file" { @@ -88,7 +88,7 @@ teardown() { rune -0 wait-for \ --err "missing TLS cert file" \ - "${CROWDSEC}" + "$CROWDSEC" } @test "invalid OU for agent" { diff --git a/test/bats/40_cold-logs.bats b/test/bats/40_cold-logs.bats index 36220375b87..0e167d3d077 100644 --- a/test/bats/40_cold-logs.bats +++ b/test/bats/40_cold-logs.bats @@ -32,14 +32,14 @@ setup() { #---------- @test "-type and -dsn are required together" { - rune -1 "${CROWDSEC}" -no-api -type syslog + rune -1 "$CROWDSEC" -no-api -type syslog assert_stderr --partial "-type requires a -dsn argument" - rune -1 "${CROWDSEC}" -no-api -dsn file:///dev/fd/0 + rune -1 "$CROWDSEC" -no-api -dsn file:///dev/fd/0 assert_stderr --partial "-dsn requires a -type argument" } @test "the one-shot mode works" { - rune -0 "${CROWDSEC}" -dsn file://<(fake_log) -type syslog -no-api + rune -0 "$CROWDSEC" -dsn file://<(fake_log) -type syslog -no-api refute_output assert_stderr --partial "single file mode : log_media=stdout daemonize=false" assert_stderr --regexp "Adding file .* to filelist" diff --git a/test/bats/40_live-ban.bats b/test/bats/40_live-ban.bats index a544f67be18..122ea05e41a 100644 --- a/test/bats/40_live-ban.bats +++ b/test/bats/40_live-ban.bats @@ -35,20 +35,20 @@ teardown() { #---------- @test "1.1.1.172 has been banned" { - tmpfile=$(TMPDIR="${BATS_TEST_TMPDIR}" mktemp) - touch "${tmpfile}" + tmpfile=$(TMPDIR="$BATS_TEST_TMPDIR" mktemp) + touch "$tmpfile" ACQUIS_YAML=$(config_get '.crowdsec_service.acquisition_path') - echo -e "---\nfilename: ${tmpfile}\nlabels:\n type: syslog\n" >>"${ACQUIS_YAML}" + echo -e "---\nfilename: ${tmpfile}\nlabels:\n type: syslog\n" >>"$ACQUIS_YAML" ./instance-crowdsec start sleep 0.2 - fake_log >>"${tmpfile}" + fake_log >>"$tmpfile" sleep 0.2 - rm -f -- "${tmpfile}" + rm -f -- "$tmpfile" found=0 # this may take some time in CI @@ -59,5 +59,5 @@ teardown() { fi sleep 0.2 done - assert_equal 1 "${found}" + assert_equal 1 "$found" } diff --git a/test/bats/50_simulation.bats b/test/bats/50_simulation.bats index 0d29d6bfd52..ab4145551ff 100644 --- a/test/bats/50_simulation.bats +++ b/test/bats/50_simulation.bats @@ -33,7 +33,7 @@ setup() { @test "we have one decision" { rune -0 cscli simulation disable --global - fake_log | "${CROWDSEC}" -dsn file:///dev/fd/0 -type syslog -no-api + fake_log | "$CROWDSEC" -dsn file:///dev/fd/0 -type syslog -no-api rune -0 cscli decisions list -o json rune -0 jq '. | length' <(output) assert_output 1 @@ -41,7 +41,7 @@ setup() { @test "1.1.1.174 has been banned (exact)" { rune -0 cscli simulation disable --global - fake_log | "${CROWDSEC}" -dsn file:///dev/fd/0 -type syslog -no-api + fake_log | "$CROWDSEC" -dsn file:///dev/fd/0 -type syslog -no-api rune -0 cscli decisions list -o json rune -0 jq -r '.[].decisions[0].value' <(output) assert_output '1.1.1.174' @@ -49,7 +49,7 @@ setup() { @test "decision has simulated == false (exact)" { rune -0 cscli simulation disable --global - fake_log | "${CROWDSEC}" -dsn file:///dev/fd/0 -type syslog -no-api + fake_log | "$CROWDSEC" -dsn file:///dev/fd/0 -type syslog -no-api rune -0 cscli decisions list -o json rune -0 jq '.[].decisions[0].simulated' <(output) assert_output 'false' @@ -57,7 +57,7 @@ setup() { @test "simulated scenario, listing non-simulated: expect no decision" { rune -0 cscli simulation enable crowdsecurity/ssh-bf - fake_log | "${CROWDSEC}" -dsn file:///dev/fd/0 -type syslog -no-api + fake_log | "$CROWDSEC" -dsn file:///dev/fd/0 -type syslog -no-api rune -0 cscli decisions list --no-simu -o json assert_json '[]' } @@ -65,7 +65,7 @@ setup() { @test "global simulation, listing non-simulated: expect no decision" { rune -0 cscli simulation disable crowdsecurity/ssh-bf rune -0 cscli simulation enable --global - fake_log | "${CROWDSEC}" -dsn file:///dev/fd/0 -type syslog -no-api + fake_log | "$CROWDSEC" -dsn file:///dev/fd/0 -type syslog -no-api rune -0 cscli decisions list --no-simu -o json assert_json '[]' } diff --git a/test/bats/70_plugin_http.bats b/test/bats/70_plugin_http.bats index a8b860aab83..462fc7c9406 100644 --- a/test/bats/70_plugin_http.bats +++ b/test/bats/70_plugin_http.bats @@ -15,7 +15,7 @@ setup_file() { export MOCK_URL PLUGIN_DIR=$(config_get '.config_paths.plugin_dir') # could have a trailing slash - PLUGIN_DIR=$(realpath "${PLUGIN_DIR}") + PLUGIN_DIR=$(realpath "$PLUGIN_DIR") export PLUGIN_DIR # https://mikefarah.gitbook.io/yq/operators/env-variable-operators @@ -35,10 +35,10 @@ setup_file() { .plugin_config.group="" ' - rm -f -- "${MOCK_OUT}" + rm -f -- "$MOCK_OUT" ./instance-crowdsec start - ./instance-mock-http start "${MOCK_PORT}" + ./instance-mock-http start "$MOCK_PORT" } teardown_file() { @@ -63,24 +63,24 @@ setup() { } @test "expected 1 log line from http server" { - rune -0 wc -l <"${MOCK_OUT}" + rune -0 wc -l <"$MOCK_OUT" # wc can pad with spaces on some platforms rune -0 tr -d ' ' < <(output) assert_output 1 } @test "expected to receive 2 alerts in the request body from plugin" { - rune -0 jq -r '.request_body' <"${MOCK_OUT}" + rune -0 jq -r '.request_body' <"$MOCK_OUT" rune -0 jq -r 'length' <(output) assert_output 2 } @test "expected to receive IP 1.2.3.4 as value of first decision" { - rune -0 jq -r '.request_body[0].decisions[0].value' <"${MOCK_OUT}" + rune -0 jq -r '.request_body[0].decisions[0].value' <"$MOCK_OUT" assert_output 1.2.3.4 } @test "expected to receive IP 1.2.3.5 as value of second decision" { - rune -0 jq -r '.request_body[1].decisions[0].value' <"${MOCK_OUT}" + rune -0 jq -r '.request_body[1].decisions[0].value' <"$MOCK_OUT" assert_output 1.2.3.5 } diff --git a/test/bats/71_plugin_dummy.bats b/test/bats/71_plugin_dummy.bats index 95b64fea070..c242d7ec4bc 100644 --- a/test/bats/71_plugin_dummy.bats +++ b/test/bats/71_plugin_dummy.bats @@ -9,15 +9,15 @@ setup_file() { ./instance-data load - tempfile=$(TMPDIR="${BATS_FILE_TMPDIR}" mktemp) + tempfile=$(TMPDIR="$BATS_FILE_TMPDIR" mktemp) export tempfile - tempfile2=$(TMPDIR="${BATS_FILE_TMPDIR}" mktemp) + tempfile2=$(TMPDIR="$BATS_FILE_TMPDIR" mktemp) export tempfile2 DUMMY_YAML="$(config_get '.config_paths.notification_dir')/dummy.yaml" - config_set "${DUMMY_YAML}" ' + config_set "$DUMMY_YAML" ' .group_wait="5s" | .group_threshold=2 | .output_file=strenv(tempfile) | @@ -67,12 +67,12 @@ setup() { } @test "expected 1 notification" { - rune -0 cat "${tempfile}" + rune -0 cat "$tempfile" assert_output --partial 1.2.3.4 assert_output --partial 1.2.3.5 } @test "second notification works too" { - rune -0 cat "${tempfile2}" + rune -0 cat "$tempfile2" assert_output --partial secondfile } diff --git a/test/bats/72_plugin_badconfig.bats b/test/bats/72_plugin_badconfig.bats index c9a69b9fcb0..7be16c6cf8e 100644 --- a/test/bats/72_plugin_badconfig.bats +++ b/test/bats/72_plugin_badconfig.bats @@ -8,7 +8,7 @@ setup_file() { PLUGIN_DIR=$(config_get '.config_paths.plugin_dir') # could have a trailing slash - PLUGIN_DIR=$(realpath "${PLUGIN_DIR}") + PLUGIN_DIR=$(realpath "$PLUGIN_DIR") export PLUGIN_DIR PROFILES_PATH=$(config_get '.api.server.profiles_path') @@ -26,50 +26,50 @@ setup() { teardown() { ./instance-crowdsec stop - rm -f "${PLUGIN_DIR}"/badname - chmod go-w "${PLUGIN_DIR}"/notification-http || true + rm -f "$PLUGIN_DIR"/badname + chmod go-w "$PLUGIN_DIR"/notification-http || true } #---------- @test "misconfigured plugin, only user is empty" { config_set '.plugin_config.user="" | .plugin_config.group="nogroup"' - config_set "${PROFILES_PATH}" '.notifications=["http_default"]' + config_set "$PROFILES_PATH" '.notifications=["http_default"]' rune -0 wait-for \ --err "api server init: unable to run plugin broker: while loading plugin: while getting process attributes: both plugin user and group must be set" \ - "${CROWDSEC}" + "$CROWDSEC" } @test "misconfigured plugin, only group is empty" { config_set '(.plugin_config.user="nobody") | (.plugin_config.group="")' - config_set "${PROFILES_PATH}" '.notifications=["http_default"]' + config_set "$PROFILES_PATH" '.notifications=["http_default"]' rune -0 wait-for \ --err "api server init: unable to run plugin broker: while loading plugin: while getting process attributes: both plugin user and group must be set" \ - "${CROWDSEC}" + "$CROWDSEC" } @test "misconfigured plugin, user does not exist" { config_set '(.plugin_config.user="userdoesnotexist") | (.plugin_config.group="groupdoesnotexist")' - config_set "${PROFILES_PATH}" '.notifications=["http_default"]' + config_set "$PROFILES_PATH" '.notifications=["http_default"]' rune -0 wait-for \ --err "api server init: unable to run plugin broker: while loading plugin: while getting process attributes: user: unknown user userdoesnotexist" \ - "${CROWDSEC}" + "$CROWDSEC" } @test "misconfigured plugin, group does not exist" { config_set '(.plugin_config.user=strenv(USER)) | (.plugin_config.group="groupdoesnotexist")' - config_set "${PROFILES_PATH}" '.notifications=["http_default"]' + config_set "$PROFILES_PATH" '.notifications=["http_default"]' rune -0 wait-for \ --err "api server init: unable to run plugin broker: while loading plugin: while getting process attributes: group: unknown group groupdoesnotexist" \ - "${CROWDSEC}" + "$CROWDSEC" } @test "bad plugin name" { - config_set "${PROFILES_PATH}" '.notifications=["http_default"]' - cp "${PLUGIN_DIR}"/notification-http "${PLUGIN_DIR}"/badname + config_set "$PROFILES_PATH" '.notifications=["http_default"]' + cp "$PLUGIN_DIR"/notification-http "$PLUGIN_DIR"/badname rune -0 wait-for \ --err "api server init: unable to run plugin broker: while loading plugin: plugin name ${PLUGIN_DIR}/badname is invalid. Name should be like {type-name}" \ - "${CROWDSEC}" + "$CROWDSEC" } @test "duplicate notification config" { @@ -77,58 +77,58 @@ teardown() { # email_default has two configurations rune -0 yq -i '.name="email_default"' "$CONFIG_DIR/notifications/http.yaml" # enable a notification, otherwise plugins are ignored - config_set "${PROFILES_PATH}" '.notifications=["slack_default"]' + config_set "$PROFILES_PATH" '.notifications=["slack_default"]' # the slack plugin may fail or not, but we just need the logs config_set '.common.log_media="stdout"' rune wait-for \ --err "notification 'email_default' is defined multiple times" \ - "${CROWDSEC}" + "$CROWDSEC" } @test "bad plugin permission (group writable)" { - config_set "${PROFILES_PATH}" '.notifications=["http_default"]' - chmod g+w "${PLUGIN_DIR}"/notification-http + config_set "$PROFILES_PATH" '.notifications=["http_default"]' + chmod g+w "$PLUGIN_DIR"/notification-http rune -0 wait-for \ --err "api server init: unable to run plugin broker: while loading plugin: plugin at ${PLUGIN_DIR}/notification-http is group writable, group writable plugins are invalid" \ - "${CROWDSEC}" + "$CROWDSEC" } @test "bad plugin permission (world writable)" { - config_set "${PROFILES_PATH}" '.notifications=["http_default"]' - chmod o+w "${PLUGIN_DIR}"/notification-http + config_set "$PROFILES_PATH" '.notifications=["http_default"]' + chmod o+w "$PLUGIN_DIR"/notification-http rune -0 wait-for \ --err "api server init: unable to run plugin broker: while loading plugin: plugin at ${PLUGIN_DIR}/notification-http is world writable, world writable plugins are invalid" \ - "${CROWDSEC}" + "$CROWDSEC" } @test "config.yaml: missing .plugin_config section" { config_set 'del(.plugin_config)' - config_set "${PROFILES_PATH}" '.notifications=["http_default"]' + config_set "$PROFILES_PATH" '.notifications=["http_default"]' rune -0 wait-for \ --err "api server init: plugins are enabled, but the plugin_config section is missing in the configuration" \ - "${CROWDSEC}" + "$CROWDSEC" } @test "config.yaml: missing config_paths.notification_dir" { config_set 'del(.config_paths.notification_dir)' - config_set "${PROFILES_PATH}" '.notifications=["http_default"]' + config_set "$PROFILES_PATH" '.notifications=["http_default"]' rune -0 wait-for \ --err "api server init: plugins are enabled, but config_paths.notification_dir is not defined" \ - "${CROWDSEC}" + "$CROWDSEC" } @test "config.yaml: missing config_paths.plugin_dir" { config_set 'del(.config_paths.plugin_dir)' - config_set "${PROFILES_PATH}" '.notifications=["http_default"]' + config_set "$PROFILES_PATH" '.notifications=["http_default"]' rune -0 wait-for \ --err "api server init: plugins are enabled, but config_paths.plugin_dir is not defined" \ - "${CROWDSEC}" + "$CROWDSEC" } @test "unable to run plugin broker: while reading plugin config" { config_set '.config_paths.notification_dir="/this/path/does/not/exist"' - config_set "${PROFILES_PATH}" '.notifications=["http_default"]' + config_set "$PROFILES_PATH" '.notifications=["http_default"]' rune -0 wait-for \ --err "api server init: unable to run plugin broker: while loading plugin config: open /this/path/does/not/exist: no such file or directory" \ - "${CROWDSEC}" + "$CROWDSEC" } diff --git a/test/bats/73_plugin_formatting.bats b/test/bats/73_plugin_formatting.bats index 153193fb18f..9ed64837403 100644 --- a/test/bats/73_plugin_formatting.bats +++ b/test/bats/73_plugin_formatting.bats @@ -9,7 +9,7 @@ setup_file() { ./instance-data load - tempfile=$(TMPDIR="${BATS_FILE_TMPDIR}" mktemp) + tempfile=$(TMPDIR="$BATS_FILE_TMPDIR" mktemp) export tempfile DUMMY_YAML="$(config_get '.config_paths.notification_dir')/dummy.yaml" @@ -17,7 +17,7 @@ setup_file() { # we test the template that is suggested in the email notification # the $alert is not a shell variable # shellcheck disable=SC2016 - config_set "${DUMMY_YAML}" ' + config_set "$DUMMY_YAML" ' .group_wait="5s" | .group_threshold=2 | .output_file=strenv(tempfile) | @@ -58,7 +58,7 @@ setup() { } @test "expected 1 notification" { - rune -0 cat "${tempfile}" + rune -0 cat "$tempfile" assert_output - <<-EOT

1.2.3.4 will get ban for next 30s for triggering manual 'ban' from 'githubciXXXXXXXXXXXXXXXXXXXXXXXX' on machine githubciXXXXXXXXXXXXXXXXXXXXXXXX.

CrowdSec CTI

1.2.3.5 will get ban for next 30s for triggering manual 'ban' from 'githubciXXXXXXXXXXXXXXXXXXXXXXXX' on machine githubciXXXXXXXXXXXXXXXXXXXXXXXX.

CrowdSec CTI

EOT diff --git a/test/bats/80_alerts.bats b/test/bats/80_alerts.bats index e0fdcb02271..6d84c1a1fce 100644 --- a/test/bats/80_alerts.bats +++ b/test/bats/80_alerts.bats @@ -73,9 +73,9 @@ teardown() { rune -0 cscli alerts list -o raw <(output) rune -0 grep 10.20.30.40 <(output) rune -0 cut -d, -f1 <(output) - ALERT_ID="${output}" + ALERT_ID="$output" - rune -0 cscli alerts inspect "${ALERT_ID}" -o human + rune -0 cscli alerts inspect "$ALERT_ID" -o human rune -0 plaintext < <(output) assert_line --regexp '^#+$' assert_line --regexp "^ - ID *: ${ALERT_ID}$" @@ -93,10 +93,10 @@ teardown() { assert_line --regexp "^.* ID .* scope:value .* action .* expiration .* created_at .*$" assert_line --regexp "^.* Ip:10.20.30.40 .* ban .*$" - rune -0 cscli alerts inspect "${ALERT_ID}" -o human --details + rune -0 cscli alerts inspect "$ALERT_ID" -o human --details # XXX can we have something here? - rune -0 cscli alerts inspect "${ALERT_ID}" -o raw + rune -0 cscli alerts inspect "$ALERT_ID" -o raw assert_line --regexp "^ *capacity: 0$" assert_line --regexp "^ *id: ${ALERT_ID}$" assert_line --regexp "^ *origin: cscli$" @@ -106,11 +106,11 @@ teardown() { assert_line --regexp "^ *type: ban$" assert_line --regexp "^ *value: 10.20.30.40$" - rune -0 cscli alerts inspect "${ALERT_ID}" -o json + rune -0 cscli alerts inspect "$ALERT_ID" -o json alert=${output} - rune jq -c '.decisions[] | [.origin,.scenario,.scope,.simulated,.type,.value]' <<<"${alert}" + rune jq -c '.decisions[] | [.origin,.scenario,.scope,.simulated,.type,.value]' <<<"$alert" assert_output --regexp "\[\"cscli\",\"manual 'ban' from 'githubciXXXXXXXXXXXXXXXXXXXXXXXX.*'\",\"Ip\",false,\"ban\",\"10.20.30.40\"\]" - rune jq -c '.source' <<<"${alert}" + rune jq -c '.source' <<<"$alert" assert_json '{ip:"10.20.30.40",scope:"Ip",value:"10.20.30.40"}' } @@ -188,7 +188,7 @@ teardown() { rune -0 cscli decisions add -i 10.20.30.40 -t ban rune -9 cscli decisions list --ip 10.20.30.40 -o json rune -9 jq -r '.[].decisions[].id' <(output) - DECISION_ID="${output}" + DECISION_ID="$output" ./instance-crowdsec stop rune -0 ./instance-db exec_sql "UPDATE decisions SET ... WHERE id=${DECISION_ID}" diff --git a/test/bats/81_alert_context.bats b/test/bats/81_alert_context.bats index df741f5f99c..69fb4158ffd 100644 --- a/test/bats/81_alert_context.bats +++ b/test/bats/81_alert_context.bats @@ -32,8 +32,8 @@ teardown() { #---------- @test "$FILE 1.1.1.172 has context" { - tmpfile=$(TMPDIR="${BATS_TEST_TMPDIR}" mktemp) - touch "${tmpfile}" + tmpfile=$(TMPDIR="$BATS_TEST_TMPDIR" mktemp) + touch "$tmpfile" ACQUIS_YAML=$(config_get '.crowdsec_service.acquisition_path') @@ -61,9 +61,9 @@ teardown() { ./instance-crowdsec start sleep 2 - fake_log >>"${tmpfile}" + fake_log >>"$tmpfile" sleep 2 - rm -f -- "${tmpfile}" + rm -f -- "$tmpfile" rune -0 cscli alerts list -o json rune -0 jq '.[0].id' <(output) diff --git a/test/bats/90_decisions.bats b/test/bats/90_decisions.bats index 8a2b9d3ae6f..f0213a8a236 100644 --- a/test/bats/90_decisions.bats +++ b/test/bats/90_decisions.bats @@ -166,7 +166,7 @@ teardown() { # silently discarding (but logging) invalid decisions rune -0 cscli alerts delete --all - truncate -s 0 "${LOGFILE}" + truncate -s 0 "$LOGFILE" rune -0 cscli decisions import -i - --format values <<-EOT whatever @@ -182,7 +182,7 @@ teardown() { rune -0 cscli alerts delete --all - truncate -s 0 "${LOGFILE}" + truncate -s 0 "$LOGFILE" rune -0 cscli decisions import -i - --format values <<-EOT 1.2.3.4 diff --git a/test/lib/config/config-local b/test/lib/config/config-local index e5cfaf997be..693b157f531 100755 --- a/test/lib/config/config-local +++ b/test/lib/config/config-local @@ -80,7 +80,6 @@ config_generate() { .common.daemonize=true | del(.common.pid_dir) | .common.log_level="info" | - .common.force_color_logs=true | .common.log_dir=strenv(LOG_DIR) | .config_paths.config_dir=strenv(CONFIG_DIR) | .config_paths.data_dir=strenv(DATA_DIR) | From 14dc26b128042cc802ddaed0e277e27359022670 Mon Sep 17 00:00:00 2001 From: Blesswin Samuel Date: Fri, 10 May 2024 18:30:34 +0530 Subject: [PATCH 120/581] Add ability to configure channel, username, icon emoji/url in slack notification (#2975) * Add ability to configure channel, username, icon emoji/url in slack cfg * Update slack.yaml --------- Co-authored-by: Thibault "bui" Koechlin Co-authored-by: Thibault "bui" Koechlin --- cmd/notification-slack/main.go | 18 +++++++++++++----- cmd/notification-slack/slack.yaml | 6 ++++++ 2 files changed, 19 insertions(+), 5 deletions(-) diff --git a/cmd/notification-slack/main.go b/cmd/notification-slack/main.go index 1e73d005fd8..fba1b33e334 100644 --- a/cmd/notification-slack/main.go +++ b/cmd/notification-slack/main.go @@ -14,9 +14,13 @@ import ( ) type PluginConfig struct { - Name string `yaml:"name"` - Webhook string `yaml:"webhook"` - LogLevel *string `yaml:"log_level"` + Name string `yaml:"name"` + Webhook string `yaml:"webhook"` + Channel string `yaml:"channel"` + Username string `yaml:"username"` + IconEmoji string `yaml:"icon_emoji"` + IconURL string `yaml:"icon_url"` + LogLevel *string `yaml:"log_level"` } type Notify struct { ConfigByName map[string]PluginConfig @@ -43,8 +47,12 @@ func (n *Notify) Notify(ctx context.Context, notification *protobufs.Notificatio logger.Info(fmt.Sprintf("found notify signal for %s config", notification.Name)) logger.Debug(fmt.Sprintf("posting to %s webhook, message %s", cfg.Webhook, notification.Text)) - err := slack.PostWebhookContext(ctx, n.ConfigByName[notification.Name].Webhook, &slack.WebhookMessage{ - Text: notification.Text, + err := slack.PostWebhookContext(ctx, cfg.Webhook, &slack.WebhookMessage{ + Text: notification.Text, + Channel: cfg.Channel, + Username: cfg.Username, + IconEmoji: cfg.IconEmoji, + IconURL: cfg.IconURL, }) if err != nil { logger.Error(err.Error()) diff --git a/cmd/notification-slack/slack.yaml b/cmd/notification-slack/slack.yaml index 4768e869780..677d4b757c1 100644 --- a/cmd/notification-slack/slack.yaml +++ b/cmd/notification-slack/slack.yaml @@ -28,6 +28,12 @@ format: | webhook: +# API request data as defined by the Slack webhook API. +#channel: +#username: +#icon_emoji: +#icon_url: + --- # type: slack From 26b3410ef1c807a9cd4bde4c66a46d73c38f60ed Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Mon, 13 May 2024 09:48:08 +0200 Subject: [PATCH 121/581] CI: improve test "cscli machines prune" (#2992) --- test/bats/30_machines.bats | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/test/bats/30_machines.bats b/test/bats/30_machines.bats index 2a04cc9bc20..415e5f8693f 100644 --- a/test/bats/30_machines.bats +++ b/test/bats/30_machines.bats @@ -94,7 +94,10 @@ teardown() { @test "cscli machines prune" { rune -0 cscli metrics - rune -0 cscli machines prune + # if the fixture has been created some time ago, + # the machines may be old enough to trigger a user prompt. + # make sure the prune duration is high enough. + rune -0 cscli machines prune --duration 1000000h assert_output 'No machines to prune.' rune -0 cscli machines list -o json From b4e79aaf0ad86361cdcd03776061bf8d6ff7b3a9 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Mon, 13 May 2024 10:01:48 +0200 Subject: [PATCH 122/581] use go 1.22.3 (#2994) * use go 1.22.3; alpine 3.19 * revert to alpine 3.18 --- .github/workflows/bats-hub.yml | 2 +- .github/workflows/bats-mysql.yml | 2 +- .github/workflows/bats-postgres.yml | 2 +- .github/workflows/bats-sqlite-coverage.yml | 2 +- .github/workflows/ci-windows-build-msi.yml | 2 +- .github/workflows/codeql-analysis.yml | 2 +- .github/workflows/go-tests-windows.yml | 2 +- .github/workflows/go-tests.yml | 2 +- .github/workflows/publish-tarball-release.yml | 2 +- Dockerfile | 2 +- Dockerfile.debian | 2 +- azure-pipelines.yml | 2 +- 12 files changed, 12 insertions(+), 12 deletions(-) diff --git a/.github/workflows/bats-hub.yml b/.github/workflows/bats-hub.yml index 72694276ac1..941d4d15f13 100644 --- a/.github/workflows/bats-hub.yml +++ b/.github/workflows/bats-hub.yml @@ -33,7 +33,7 @@ jobs: - name: "Set up Go" uses: actions/setup-go@v5 with: - go-version: "1.22.2" + go-version: "1.22.3" - name: "Install bats dependencies" env: diff --git a/.github/workflows/bats-mysql.yml b/.github/workflows/bats-mysql.yml index e52c4759254..8cc544523ef 100644 --- a/.github/workflows/bats-mysql.yml +++ b/.github/workflows/bats-mysql.yml @@ -36,7 +36,7 @@ jobs: - name: "Set up Go" uses: actions/setup-go@v5 with: - go-version: "1.22.2" + go-version: "1.22.3" - name: "Install bats dependencies" env: diff --git a/.github/workflows/bats-postgres.yml b/.github/workflows/bats-postgres.yml index c603e468a7b..87101be0159 100644 --- a/.github/workflows/bats-postgres.yml +++ b/.github/workflows/bats-postgres.yml @@ -45,7 +45,7 @@ jobs: - name: "Set up Go" uses: actions/setup-go@v5 with: - go-version: "1.22.2" + go-version: "1.22.3" - name: "Install bats dependencies" env: diff --git a/.github/workflows/bats-sqlite-coverage.yml b/.github/workflows/bats-sqlite-coverage.yml index 345f8761868..4f724656ed6 100644 --- a/.github/workflows/bats-sqlite-coverage.yml +++ b/.github/workflows/bats-sqlite-coverage.yml @@ -28,7 +28,7 @@ jobs: - name: "Set up Go" uses: actions/setup-go@v5 with: - go-version: "1.22.2" + go-version: "1.22.3" - name: "Install bats dependencies" env: diff --git a/.github/workflows/ci-windows-build-msi.yml b/.github/workflows/ci-windows-build-msi.yml index 0ea60a31dd5..9bca2122513 100644 --- a/.github/workflows/ci-windows-build-msi.yml +++ b/.github/workflows/ci-windows-build-msi.yml @@ -35,7 +35,7 @@ jobs: - name: "Set up Go" uses: actions/setup-go@v5 with: - go-version: "1.22.2" + go-version: "1.22.3" - name: Build run: make windows_installer BUILD_RE2_WASM=1 diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml index e4c078f3762..cee77161a8e 100644 --- a/.github/workflows/codeql-analysis.yml +++ b/.github/workflows/codeql-analysis.yml @@ -52,7 +52,7 @@ jobs: - name: "Set up Go" uses: actions/setup-go@v5 with: - go-version: "1.22.2" + go-version: "1.22.3" cache-dependency-path: "**/go.sum" # Initializes the CodeQL tools for scanning. diff --git a/.github/workflows/go-tests-windows.yml b/.github/workflows/go-tests-windows.yml index d8ec22e6a02..e707062b5ad 100644 --- a/.github/workflows/go-tests-windows.yml +++ b/.github/workflows/go-tests-windows.yml @@ -34,7 +34,7 @@ jobs: - name: "Set up Go" uses: actions/setup-go@v5 with: - go-version: "1.22.2" + go-version: "1.22.3" - name: Build run: | diff --git a/.github/workflows/go-tests.yml b/.github/workflows/go-tests.yml index 3674358ff28..3496674e7b9 100644 --- a/.github/workflows/go-tests.yml +++ b/.github/workflows/go-tests.yml @@ -126,7 +126,7 @@ jobs: - name: "Set up Go" uses: actions/setup-go@v5 with: - go-version: "1.22.2" + go-version: "1.22.3" - name: Create localstack streams run: | diff --git a/.github/workflows/publish-tarball-release.yml b/.github/workflows/publish-tarball-release.yml index d2a5df0535c..bf8d5fe5f1a 100644 --- a/.github/workflows/publish-tarball-release.yml +++ b/.github/workflows/publish-tarball-release.yml @@ -25,7 +25,7 @@ jobs: - name: "Set up Go" uses: actions/setup-go@v5 with: - go-version: "1.22.2" + go-version: "1.22.3" - name: Build the binaries run: | diff --git a/Dockerfile b/Dockerfile index d67d534fe3d..aabb0a24fc5 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,5 +1,5 @@ # vim: set ft=dockerfile: -FROM golang:1.22.2-alpine3.18 AS build +FROM golang:1.22.3-alpine3.18 AS build ARG BUILD_VERSION diff --git a/Dockerfile.debian b/Dockerfile.debian index 4fc3a923823..655eeb0225d 100644 --- a/Dockerfile.debian +++ b/Dockerfile.debian @@ -1,5 +1,5 @@ # vim: set ft=dockerfile: -FROM golang:1.22.2-bookworm AS build +FROM golang:1.22.3-bookworm AS build ARG BUILD_VERSION diff --git a/azure-pipelines.yml b/azure-pipelines.yml index 99909bbb467..0fc53242a9f 100644 --- a/azure-pipelines.yml +++ b/azure-pipelines.yml @@ -21,7 +21,7 @@ stages: - task: GoTool@0 displayName: "Install Go" inputs: - version: '1.22.2' + version: '1.22.3' - pwsh: | choco install -y make From e4a8d3b99e20b113b947156cadc3f41ecdfa7f59 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Mon, 13 May 2024 12:01:17 +0200 Subject: [PATCH 123/581] deb,rpm: include empty directory etc/crowdsec/acquis.d (#2997) --- debian/rules | 1 + rpm/SPECS/crowdsec.spec | 1 + wizard.sh | 2 ++ 3 files changed, 4 insertions(+) diff --git a/debian/rules b/debian/rules index 655af3dfeea..50daae08754 100755 --- a/debian/rules +++ b/debian/rules @@ -17,6 +17,7 @@ override_dh_auto_install: mkdir -p debian/crowdsec/usr/bin mkdir -p debian/crowdsec/etc/crowdsec + mkdir -p debian/crowdsec/etc/crowdsec/acquis.d mkdir -p debian/crowdsec/usr/share/crowdsec mkdir -p debian/crowdsec/etc/crowdsec/hub/ mkdir -p debian/crowdsec/usr/share/crowdsec/config diff --git a/rpm/SPECS/crowdsec.spec b/rpm/SPECS/crowdsec.spec index 60884dfd4e6..0a20dc97deb 100644 --- a/rpm/SPECS/crowdsec.spec +++ b/rpm/SPECS/crowdsec.spec @@ -37,6 +37,7 @@ sed -i "s#/usr/local/lib/crowdsec/plugins/#%{_libdir}/%{name}/plugins/#g" config %install rm -rf %{buildroot} +mkdir -p %{buildroot}/etc/crowdsec/acquis.d mkdir -p %{buildroot}/etc/crowdsec/hub mkdir -p %{buildroot}/etc/crowdsec/patterns mkdir -p %{buildroot}/etc/crowdsec/console/ diff --git a/wizard.sh b/wizard.sh index 598f0c765f0..a3afc789bf5 100755 --- a/wizard.sh +++ b/wizard.sh @@ -409,12 +409,14 @@ check_cs_version () { install_crowdsec() { mkdir -p "${CROWDSEC_DATA_DIR}" (cd config && find patterns -type f -exec install -Dm 644 "{}" "${CROWDSEC_CONFIG_PATH}/{}" \; && cd ../) || exit + mkdir -p "${CROWDSEC_CONFIG_PATH}/acquis.d" || exit mkdir -p "${CROWDSEC_CONFIG_PATH}/scenarios" || exit mkdir -p "${CROWDSEC_CONFIG_PATH}/postoverflows" || exit mkdir -p "${CROWDSEC_CONFIG_PATH}/collections" || exit mkdir -p "${CROWDSEC_CONFIG_PATH}/patterns" || exit mkdir -p "${CROWDSEC_CONFIG_PATH}/appsec-configs" || exit mkdir -p "${CROWDSEC_CONFIG_PATH}/appsec-rules" || exit + mkdir -p "${CROWDSEC_CONFIG_PATH}/contexts" || exit mkdir -p "${CROWDSEC_CONSOLE_DIR}" || exit # tmp From e3c6a5ba70df511c212d8927ed24aaade56271f0 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Mon, 13 May 2024 17:09:30 +0200 Subject: [PATCH 124/581] LAPI: support CRL files with multiple PEM blocks (#3002) --- pkg/apiserver/middlewares/v1/tls_auth.go | 63 +++++++++++----------- test/bats/11_bouncers_tls.bats | 50 ++++++++++++------ test/bats/30_machines_tls.bats | 67 +++++++++++++++--------- 3 files changed, 108 insertions(+), 72 deletions(-) diff --git a/pkg/apiserver/middlewares/v1/tls_auth.go b/pkg/apiserver/middlewares/v1/tls_auth.go index bd2c4bb30e7..c2fcc9c7264 100644 --- a/pkg/apiserver/middlewares/v1/tls_auth.go +++ b/pkg/apiserver/middlewares/v1/tls_auth.go @@ -5,6 +5,7 @@ import ( "crypto" "crypto/x509" "encoding/pem" + "errors" "fmt" "io" "net/http" @@ -135,31 +136,35 @@ func (ta *TLSAuth) isCRLRevoked(cert *x509.Certificate) (bool, bool) { return false, false } - crlBinary, rest := pem.Decode(crlContent) - if len(rest) > 0 { - ta.logger.Warn("CRL file contains more than one PEM block, ignoring the rest") - } + var crlBlock *pem.Block - crl, err := x509.ParseRevocationList(crlBinary.Bytes) - if err != nil { - ta.logger.Errorf("could not parse CRL file, skipping check: %s", err) - return false, false - } + for { + crlBlock, crlContent = pem.Decode(crlContent) + if crlBlock == nil { + break // no more PEM blocks + } - now := time.Now().UTC() + crl, err := x509.ParseRevocationList(crlBlock.Bytes) + if err != nil { + ta.logger.Errorf("could not parse a PEM block in CRL file, skipping: %s", err) + continue + } - if now.After(crl.NextUpdate) { - ta.logger.Warn("CRL has expired, will still validate the cert against it.") - } + now := time.Now().UTC() - if now.Before(crl.ThisUpdate) { - ta.logger.Warn("CRL is not yet valid, will still validate the cert against it.") - } + if now.After(crl.NextUpdate) { + ta.logger.Warn("CRL has expired, will still validate the cert against it.") + } - for _, revoked := range crl.RevokedCertificateEntries { - if revoked.SerialNumber.Cmp(cert.SerialNumber) == 0 { - ta.logger.Warn("client certificate is revoked by CRL") - return true, true + if now.Before(crl.ThisUpdate) { + ta.logger.Warn("CRL is not yet valid, will still validate the cert against it.") + } + + for _, revoked := range crl.RevokedCertificateEntries { + if revoked.SerialNumber.Cmp(cert.SerialNumber) == 0 { + ta.logger.Warn("client certificate is revoked by CRL") + return true, true + } } } @@ -181,9 +186,7 @@ func (ta *TLSAuth) isRevoked(cert *x509.Certificate, issuer *x509.Certificate) ( } revokedByOCSP, cacheOCSP := ta.isOCSPRevoked(cert, issuer) - revokedByCRL, cacheCRL := ta.isCRLRevoked(cert) - revoked := revokedByOCSP || revokedByCRL if cacheOCSP && cacheCRL { @@ -203,8 +206,8 @@ func (ta *TLSAuth) isInvalid(cert *x509.Certificate, issuer *x509.Certificate) ( revoked, err := ta.isRevoked(cert, issuer) if err != nil { - //Fail securely, if we can't check the revocation status, let's consider the cert invalid - //We may change this in the future based on users feedback, but this seems the most sensible thing to do + // Fail securely, if we can't check the revocation status, let's consider the cert invalid + // We may change this in the future based on users feedback, but this seems the most sensible thing to do return true, fmt.Errorf("could not check for client certification revocation status: %w", err) } @@ -213,12 +216,12 @@ func (ta *TLSAuth) isInvalid(cert *x509.Certificate, issuer *x509.Certificate) ( func (ta *TLSAuth) SetAllowedOu(allowedOus []string) error { for _, ou := range allowedOus { - //disallow empty ou + // disallow empty ou if ou == "" { - return fmt.Errorf("empty ou isn't allowed") + return errors.New("empty ou isn't allowed") } - //drop & warn on duplicate ou + // drop & warn on duplicate ou ok := true for _, validOu := range ta.AllowedOUs { @@ -238,11 +241,11 @@ func (ta *TLSAuth) SetAllowedOu(allowedOus []string) error { } func (ta *TLSAuth) ValidateCert(c *gin.Context) (bool, string, error) { - //Checks cert validity, Returns true + CN if client cert matches requested OU + // Checks cert validity, Returns true + CN if client cert matches requested OU var clientCert *x509.Certificate if c.Request.TLS == nil || len(c.Request.TLS.PeerCertificates) == 0 { - //do not error if it's not TLS or there are no peer certs + // do not error if it's not TLS or there are no peer certs return false, "", nil } @@ -279,7 +282,7 @@ func (ta *TLSAuth) ValidateCert(c *gin.Context) (bool, string, error) { return true, clientCert.Subject.CommonName, nil } - return false, "", fmt.Errorf("no verified cert in request") + return false, "", errors.New("no verified cert in request") } func NewTLSAuth(allowedOus []string, crlPath string, cacheExpiration time.Duration, logger *log.Entry) (*TLSAuth, error) { diff --git a/test/bats/11_bouncers_tls.bats b/test/bats/11_bouncers_tls.bats index 438bec40242..84de3d2f488 100644 --- a/test/bats/11_bouncers_tls.bats +++ b/test/bats/11_bouncers_tls.bats @@ -13,24 +13,37 @@ setup_file() { CFDIR="${BATS_TEST_DIRNAME}/testdata/cfssl" export CFDIR - #gen the CA + # Generate the CA cfssl gencert --initca "${CFDIR}/ca.json" 2>/dev/null | cfssljson --bare "${tmpdir}/ca" - #gen an intermediate + + # Generate an intermediate cfssl gencert --initca "${CFDIR}/intermediate.json" 2>/dev/null | cfssljson --bare "${tmpdir}/inter" cfssl sign -ca "${tmpdir}/ca.pem" -ca-key "${tmpdir}/ca-key.pem" -config "${CFDIR}/profiles.json" -profile intermediate_ca "${tmpdir}/inter.csr" 2>/dev/null | cfssljson --bare "${tmpdir}/inter" - #gen server cert for crowdsec with the intermediate + + # Generate server cert for crowdsec with the intermediate cfssl gencert -ca "${tmpdir}/inter.pem" -ca-key "${tmpdir}/inter-key.pem" -config "${CFDIR}/profiles.json" -profile=server "${CFDIR}/server.json" 2>/dev/null | cfssljson --bare "${tmpdir}/server" - #gen client cert for the bouncer + + # Generate client cert for the bouncer cfssl gencert -ca "${tmpdir}/inter.pem" -ca-key "${tmpdir}/inter-key.pem" -config "${CFDIR}/profiles.json" -profile=client "${CFDIR}/bouncer.json" 2>/dev/null | cfssljson --bare "${tmpdir}/bouncer" - #gen client cert for the bouncer with an invalid OU + + # Genearte client cert for the bouncer with an invalid OU cfssl gencert -ca "${tmpdir}/inter.pem" -ca-key "${tmpdir}/inter-key.pem" -config "${CFDIR}/profiles.json" -profile=client "${CFDIR}/bouncer_invalid.json" 2>/dev/null | cfssljson --bare "${tmpdir}/bouncer_bad_ou" - #gen client cert for the bouncer directly signed by the CA, it should be refused by crowdsec as uses the intermediate + + # Generate client cert for the bouncer directly signed by the CA, it should be refused by crowdsec as uses the intermediate cfssl gencert -ca "${tmpdir}/ca.pem" -ca-key "${tmpdir}/ca-key.pem" -config "${CFDIR}/profiles.json" -profile=client "${CFDIR}/bouncer.json" 2>/dev/null | cfssljson --bare "${tmpdir}/bouncer_invalid" - cfssl gencert -ca "${tmpdir}/inter.pem" -ca-key "${tmpdir}/inter-key.pem" -config "${CFDIR}/profiles.json" -profile=client "${CFDIR}/bouncer.json" 2>/dev/null | cfssljson --bare "${tmpdir}/bouncer_revoked" - serial="$(openssl x509 -noout -serial -in "${tmpdir}/bouncer_revoked.pem" | cut -d '=' -f2)" - echo "ibase=16; ${serial}" | bc >"${tmpdir}/serials.txt" - cfssl gencrl "${tmpdir}/serials.txt" "${tmpdir}/ca.pem" "${tmpdir}/ca-key.pem" | base64 -d | openssl crl -inform DER -out "${tmpdir}/crl.pem" + # Generate revoked client certs + for cert_name in "revoked_1" "revoked_2"; do + cfssl gencert -ca "${tmpdir}/inter.pem" -ca-key "${tmpdir}/inter-key.pem" -config "${CFDIR}/profiles.json" -profile=client "${CFDIR}/bouncer.json" 2>/dev/null | cfssljson --bare "${tmpdir}/${cert_name}" + serial="$(openssl x509 -noout -serial -in "${tmpdir}/${cert_name}.pem" | cut -d '=' -f2)" + echo "ibase=16; ${serial}" | bc >"${tmpdir}/serials_${cert_name}.txt" + done + + # Generate separate CRL blocks and concatenate them + for cert_name in "revoked_1" "revoked_2"; do + cfssl gencrl "${tmpdir}/serials_${cert_name}.txt" "${tmpdir}/ca.pem" "${tmpdir}/ca-key.pem" | base64 -d | openssl crl -inform DER -out "${tmpdir}/crl_${cert_name}.pem" + done + cat "${tmpdir}/crl_revoked_1.pem" "${tmpdir}/crl_revoked_2.pem" >"${tmpdir}/crl.pem" cat "${tmpdir}/ca.pem" "${tmpdir}/inter.pem" > "${tmpdir}/bundle.pem" @@ -90,11 +103,14 @@ teardown() { } @test "simulate one bouncer request with a revoked certificate" { - truncate_log - rune -0 curl -i -s --cert "${tmpdir}/bouncer_revoked.pem" --key "${tmpdir}/bouncer_revoked-key.pem" --cacert "${tmpdir}/bundle.pem" https://localhost:8080/v1/decisions\?ip=42.42.42.42 - assert_log --partial "client certificate is revoked by CRL" - assert_log --partial "client certificate for CN=localhost OU=[bouncer-ou] is revoked" - assert_output --partial "access forbidden" - rune -0 cscli bouncers list -o json - assert_output "[]" + # we have two certificates revoked by different CRL blocks + for cert_name in "revoked_1" "revoked_2"; do + truncate_log + rune -0 curl -i -s --cert "${tmpdir}/${cert_name}.pem" --key "${tmpdir}/${cert_name}-key.pem" --cacert "${tmpdir}/bundle.pem" https://localhost:8080/v1/decisions\?ip=42.42.42.42 + assert_log --partial "client certificate is revoked by CRL" + assert_log --partial "client certificate for CN=localhost OU=[bouncer-ou] is revoked" + assert_output --partial "access forbidden" + rune -0 cscli bouncers list -o json + assert_output "[]" + done } diff --git a/test/bats/30_machines_tls.bats b/test/bats/30_machines_tls.bats index 877f8672b24..b6d089d68e7 100644 --- a/test/bats/30_machines_tls.bats +++ b/test/bats/30_machines_tls.bats @@ -16,24 +16,37 @@ setup_file() { CFDIR="${BATS_TEST_DIRNAME}/testdata/cfssl" export CFDIR - #gen the CA + # Generate the CA cfssl gencert --initca "${CFDIR}/ca.json" 2>/dev/null | cfssljson --bare "${tmpdir}/ca" - #gen an intermediate + + # Generate an intermediate cfssl gencert --initca "${CFDIR}/intermediate.json" 2>/dev/null | cfssljson --bare "${tmpdir}/inter" cfssl sign -ca "${tmpdir}/ca.pem" -ca-key "${tmpdir}/ca-key.pem" -config "${CFDIR}/profiles.json" -profile intermediate_ca "${tmpdir}/inter.csr" 2>/dev/null | cfssljson --bare "${tmpdir}/inter" - #gen server cert for crowdsec with the intermediate + + # Generate server cert for crowdsec with the intermediate cfssl gencert -ca "${tmpdir}/inter.pem" -ca-key "${tmpdir}/inter-key.pem" -config "${CFDIR}/profiles.json" -profile=server "${CFDIR}/server.json" 2>/dev/null | cfssljson --bare "${tmpdir}/server" - #gen client cert for the agent + + # Generate client cert for the agent cfssl gencert -ca "${tmpdir}/inter.pem" -ca-key "${tmpdir}/inter-key.pem" -config "${CFDIR}/profiles.json" -profile=client "${CFDIR}/agent.json" 2>/dev/null | cfssljson --bare "${tmpdir}/agent" - #gen client cert for the agent with an invalid OU + + # Genearte client cert for the agent with an invalid OU cfssl gencert -ca "${tmpdir}/inter.pem" -ca-key "${tmpdir}/inter-key.pem" -config "${CFDIR}/profiles.json" -profile=client "${CFDIR}/agent_invalid.json" 2>/dev/null | cfssljson --bare "${tmpdir}/agent_bad_ou" - #gen client cert for the agent directly signed by the CA, it should be refused by crowdsec as uses the intermediate + + # Generate client cert for the bouncer directly signed by the CA, it should be refused by crowdsec as uses the intermediate cfssl gencert -ca "${tmpdir}/ca.pem" -ca-key "${tmpdir}/ca-key.pem" -config "${CFDIR}/profiles.json" -profile=client "${CFDIR}/agent.json" 2>/dev/null | cfssljson --bare "${tmpdir}/agent_invalid" - cfssl gencert -ca "${tmpdir}/inter.pem" -ca-key "${tmpdir}/inter-key.pem" -config "${CFDIR}/profiles.json" -profile=client "${CFDIR}/agent.json" 2>/dev/null | cfssljson --bare "${tmpdir}/agent_revoked" - serial="$(openssl x509 -noout -serial -in "${tmpdir}/agent_revoked.pem" | cut -d '=' -f2)" - echo "ibase=16; ${serial}" | bc >"${tmpdir}/serials.txt" - cfssl gencrl "${tmpdir}/serials.txt" "${tmpdir}/ca.pem" "${tmpdir}/ca-key.pem" | base64 -d | openssl crl -inform DER -out "${tmpdir}/crl.pem" + # Generate revoked client cert + for cert_name in "revoked_1" "revoked_2"; do + cfssl gencert -ca "${tmpdir}/inter.pem" -ca-key "${tmpdir}/inter-key.pem" -config "${CFDIR}/profiles.json" -profile=client "${CFDIR}/agent.json" 2>/dev/null | cfssljson --bare "${tmpdir}/${cert_name}" + serial="$(openssl x509 -noout -serial -in "${tmpdir}/${cert_name}.pem" | cut -d '=' -f2)" + echo "ibase=16; ${serial}" | bc >"${tmpdir}/serials_${cert_name}.txt" + done + + # Generate separate CRL blocks and concatenate them + for cert_name in "revoked_1" "revoked_2"; do + cfssl gencrl "${tmpdir}/serials_${cert_name}.txt" "${tmpdir}/ca.pem" "${tmpdir}/ca-key.pem" | base64 -d | openssl crl -inform DER -out "${tmpdir}/crl_${cert_name}.pem" + done + cat "${tmpdir}/crl_revoked_1.pem" "${tmpdir}/crl_revoked_2.pem" >"${tmpdir}/crl.pem" cat "${tmpdir}/ca.pem" "${tmpdir}/inter.pem" > "${tmpdir}/bundle.pem" @@ -181,19 +194,23 @@ teardown() { } @test "revoked cert for agent" { - truncate_log - config_set "${CONFIG_DIR}/local_api_credentials.yaml" ' - .ca_cert_path=strenv(tmpdir) + "/bundle.pem" | - .key_path=strenv(tmpdir) + "/agent_revoked-key.pem" | - .cert_path=strenv(tmpdir) + "/agent_revoked.pem" | - .url="https://127.0.0.1:8080" - ' - - config_set "${CONFIG_DIR}/local_api_credentials.yaml" 'del(.login,.password)' - ./instance-crowdsec start - rune -1 cscli lapi status - assert_log --partial "client certificate is revoked by CRL" - assert_log --partial "client certificate for CN=localhost OU=[agent-ou] is revoked" - rune -0 cscli machines list -o json - assert_output '[]' + # we have two certificates revoked by different CRL blocks + for cert_name in "revoked_1" "revoked_2"; do + truncate_log + cert_name="$cert_name" config_set "${CONFIG_DIR}/local_api_credentials.yaml" ' + .ca_cert_path=strenv(tmpdir) + "/bundle.pem" | + .key_path=strenv(tmpdir) + "/" + strenv(cert_name) + "-key.pem" | + .cert_path=strenv(tmpdir) + "/" + strenv(cert_name) + ".pem" | + .url="https://127.0.0.1:8080" + ' + + config_set "${CONFIG_DIR}/local_api_credentials.yaml" 'del(.login,.password)' + ./instance-crowdsec start + rune -1 cscli lapi status + assert_log --partial "client certificate is revoked by CRL" + assert_log --partial "client certificate for CN=localhost OU=[agent-ou] is revoked" + rune -0 cscli machines list -o json + assert_output '[]' + ./instance-crowdsec stop + done } From 11893b2915c9ae9d153cc6a20355e356b08599db Mon Sep 17 00:00:00 2001 From: Laurence Jones Date: Tue, 14 May 2024 14:55:08 +0100 Subject: [PATCH 125/581] [cscli] Add tab completion to notifications [test, inspect] (#2765) * Add cscli notifications test ValidArgsFunction for tab completion * Split function to reusable and pass to inspect since both rely on a valid plugin name, fix short/long of list command * Short should be short * Removed redundant prerune since Args with validate it for us * Make english more english * fix: bad merge now fixed --------- Co-authored-by: Thibault "bui" Koechlin --- cmd/crowdsec-cli/notifications.go | 29 +++++++++++++++++++++++------ 1 file changed, 23 insertions(+), 6 deletions(-) diff --git a/cmd/crowdsec-cli/notifications.go b/cmd/crowdsec-cli/notifications.go index 0b5ee537806..cb102df6928 100644 --- a/cmd/crowdsec-cli/notifications.go +++ b/cmd/crowdsec-cli/notifications.go @@ -10,6 +10,7 @@ import ( "net/url" "os" "path/filepath" + "slices" "strconv" "strings" "time" @@ -156,8 +157,8 @@ func (cli *cliNotifications) getProfilesConfigs() (map[string]NotificationsCfg, func (cli *cliNotifications) NewListCmd() *cobra.Command { cmd := &cobra.Command{ Use: "list", - Short: "list active notifications plugins", - Long: `list active notifications plugins`, + Short: "list notifications plugins", + Long: `list notifications plugins and their status (active or not)`, Example: `cscli notifications list`, Args: cobra.ExactArgs(0), DisableAutoGenTag: true, @@ -205,10 +206,11 @@ func (cli *cliNotifications) NewListCmd() *cobra.Command { func (cli *cliNotifications) NewInspectCmd() *cobra.Command { cmd := &cobra.Command{ Use: "inspect", - Short: "Inspect active notifications plugin configuration", - Long: `Inspect active notifications plugin and show configuration`, + Short: "Inspect notifications plugin", + Long: `Inspect notifications plugin and show configuration`, Example: `cscli notifications inspect `, Args: cobra.ExactArgs(1), + ValidArgsFunction: cli.notificationConfigFilter, DisableAutoGenTag: true, RunE: func(_ *cobra.Command, args []string) error { cfg := cli.cfg() @@ -243,7 +245,21 @@ func (cli *cliNotifications) NewInspectCmd() *cobra.Command { return cmd } -func (cli *cliNotifications) NewTestCmd() *cobra.Command { +func (cli *cliNotifications) notificationConfigFilter(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { + ncfgs, err := cli.getProfilesConfigs() + if err != nil { + return nil, cobra.ShellCompDirectiveError + } + var ret []string + for k := range ncfgs { + if strings.Contains(k, toComplete) && !slices.Contains(args, k) { + ret = append(ret, k) + } + } + return ret, cobra.ShellCompDirectiveNoFileComp +} + +func (cli cliNotifications) NewTestCmd() *cobra.Command { var ( pluginBroker csplugin.PluginBroker pluginTomb tomb.Tomb @@ -253,10 +269,11 @@ func (cli *cliNotifications) NewTestCmd() *cobra.Command { cmd := &cobra.Command{ Use: "test [plugin name]", Short: "send a generic test alert to notification plugin", - Long: `send a generic test alert to a notification plugin to test configuration even if is not active`, + Long: `send a generic test alert to a notification plugin even if it is not active in profiles`, Example: `cscli notifications test [plugin_name]`, Args: cobra.ExactArgs(1), DisableAutoGenTag: true, + ValidArgsFunction: cli.notificationConfigFilter, PreRunE: func(_ *cobra.Command, args []string) error { cfg := cli.cfg() pconfigs, err := cli.getPluginConfigs() From b5e5078fc72c32bd94e86b90ecf27698eceb6fb5 Mon Sep 17 00:00:00 2001 From: Laurence Jones Date: Tue, 14 May 2024 17:31:15 +0100 Subject: [PATCH 126/581] fix: add sslmode check and apend flag to conn string (#3009) --- pkg/csconfig/database.go | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/pkg/csconfig/database.go b/pkg/csconfig/database.go index 9a9ed9a9f7f..2fe610eba68 100644 --- a/pkg/csconfig/database.go +++ b/pkg/csconfig/database.go @@ -48,7 +48,7 @@ type AuthGCCfg struct { } type FlushDBCfg struct { - MaxItems *int `yaml:"max_items,omitempty"` + MaxItems *int `yaml:"max_items,omitempty"` // We could unmarshal as time.Duration, but alert filters right now are a map of strings MaxAge *string `yaml:"max_age,omitempty"` BouncersGC *AuthGCCfg `yaml:"bouncers_autodelete,omitempty"` @@ -131,6 +131,9 @@ func (d *DatabaseCfg) ConnectionString() string { } else { connString = fmt.Sprintf("%s:%s@tcp(%s:%d)/%s?parseTime=True", d.User, d.Password, d.Host, d.Port, d.DbName) } + if d.Sslmode != "" { + connString = fmt.Sprintf("%s&tls=%s", connString, d.Sslmode) + } case "postgres", "postgresql", "pgx": if d.isSocketConfig() { connString = fmt.Sprintf("host=%s user=%s dbname=%s password=%s", d.DbPath, d.User, d.DbName, d.Password) From 6b978b09b3a6fe1fc6001c2c40bb824a78fbbe06 Mon Sep 17 00:00:00 2001 From: blotus Date: Wed, 15 May 2024 10:04:42 +0200 Subject: [PATCH 127/581] docker: conditionally update hub (#2948) --- .github/workflows/docker-tests.yml | 14 +++--- Dockerfile | 1 + Dockerfile.debian | 1 + docker/docker_start.sh | 69 ++++++++++++++++++++++++++---- docker/preload-hub-items | 22 ++++++++++ test/bin/preload-hub-items | 21 ++------- 6 files changed, 94 insertions(+), 34 deletions(-) create mode 100755 docker/preload-hub-items diff --git a/.github/workflows/docker-tests.yml b/.github/workflows/docker-tests.yml index 3e87d3ba4f1..d3ae4f90d79 100644 --- a/.github/workflows/docker-tests.yml +++ b/.github/workflows/docker-tests.yml @@ -59,15 +59,15 @@ jobs: cd docker/test python -m pip install --upgrade pipenv wheel - #- name: "Cache virtualenvs" - # id: cache-pipenv - # uses: actions/cache@v4 - # with: - # path: ~/.local/share/virtualenvs - # key: ${{ runner.os }}-pipenv-${{ hashFiles('**/Pipfile.lock') }} + - name: "Cache virtualenvs" + id: cache-pipenv + uses: actions/cache@v4 + with: + path: ~/.local/share/virtualenvs + key: ${{ runner.os }}-pipenv-${{ hashFiles('**/Pipfile.lock') }} - name: "Install dependencies" - #if: steps.cache-pipenv.outputs.cache-hit != 'true' + if: steps.cache-pipenv.outputs.cache-hit != 'true' run: | cd docker/test pipenv install --deploy diff --git a/Dockerfile b/Dockerfile index aabb0a24fc5..faa50f3f79a 100644 --- a/Dockerfile +++ b/Dockerfile @@ -25,6 +25,7 @@ RUN make clean release DOCKER_BUILD=1 BUILD_STATIC=1 && \ ./wizard.sh --docker-mode && \ cd - >/dev/null && \ cscli hub update && \ + ./docker/preload-hub-items && \ cscli collections install crowdsecurity/linux && \ cscli parsers install crowdsecurity/whitelists diff --git a/Dockerfile.debian b/Dockerfile.debian index 655eeb0225d..0ef1727f3e6 100644 --- a/Dockerfile.debian +++ b/Dockerfile.debian @@ -30,6 +30,7 @@ RUN make clean release DOCKER_BUILD=1 BUILD_STATIC=1 && \ ./wizard.sh --docker-mode && \ cd - >/dev/null && \ cscli hub update && \ + ./docker/preload-hub-items && \ cscli collections install crowdsecurity/linux && \ cscli parsers install crowdsecurity/whitelists diff --git a/docker/docker_start.sh b/docker/docker_start.sh index dd96184ccbc..954dbd9fc79 100755 --- a/docker/docker_start.sh +++ b/docker/docker_start.sh @@ -6,6 +6,9 @@ set -e shopt -s inherit_errexit +# Note that "if function_name" in bash matches when the function returns 0, +# meaning successful execution. + # match true, TRUE, True, tRuE, etc. istrue() { case "$(echo "$1" | tr '[:upper:]' '[:lower:]')" in @@ -50,6 +53,52 @@ cscli() { command cscli -c "$CONFIG_FILE" "$@" } +run_hub_update() { + index_modification_time=$(stat -c %Y /etc/crowdsec/hub/.index.json 2>/dev/null) + # Run cscli hub update if no date or if the index file is older than 24h + if [ -z "$index_modification_time" ] || [ $(( $(date +%s) - index_modification_time )) -gt 86400 ]; then + cscli hub update + else + echo "Skipping hub update, index file is recent" + fi +} + +is_mounted() { + path=$(readlink -f "$1") + mounts=$(awk '{print $2}' /proc/mounts) + while true; do + if grep -qE ^"$path"$ <<< "$mounts"; then + echo "$path was found in a volume" + return 0 + fi + path=$(dirname "$path") + if [ "$path" = "/" ]; then + return 1 + fi + done + return 1 #unreachable +} + +run_hub_update_if_from_volume() { + if is_mounted "/etc/crowdsec/hub/.index.json"; then + echo "Running hub update" + run_hub_update + else + echo "Skipping hub update, index file is not in a volume" + fi +} + +run_hub_upgrade_if_from_volume() { + isfalse "$NO_HUB_UPGRADE" || return 0 + if is_mounted "/var/lib/crowdsec/data"; then + echo "Running hub upgrade" + cscli hub upgrade + else + echo "Skipping hub upgrade, data directory is not in a volume" + fi + +} + # conf_get [file_path] # retrieve a value from a file (by default $CONFIG_FILE) conf_get() { @@ -119,7 +168,12 @@ cscli_if_clean() { error_only="" echo "Running: cscli $error_only $itemtype $action \"$obj\" $*" # shellcheck disable=SC2086 - cscli $error_only "$itemtype" "$action" "$obj" "$@" + if ! cscli $error_only "$itemtype" "$action" "$obj" "$@"; then + echo "Failed to $action $itemtype/$obj, running hub update before retrying" + run_hub_update + # shellcheck disable=SC2086 + cscli $error_only "$itemtype" "$action" "$obj" "$@" + fi fi done } @@ -280,9 +334,9 @@ fi if [ "$GID" != "" ]; then if istrue "$(conf_get '.db_config.type == "sqlite"')"; then # don't fail if the db is not there yet - chown -f ":$GID" "$(conf_get '.db_config.db_path')" 2>/dev/null \ - && echo "sqlite database permissions updated" \ - || true + if chown -f ":$GID" "$(conf_get '.db_config.db_path')" 2>/dev/null; then + echo "sqlite database permissions updated" + fi fi fi @@ -304,11 +358,8 @@ conf_set_if "$PLUGIN_DIR" '.config_paths.plugin_dir = strenv(PLUGIN_DIR)' ## Install hub items -cscli hub update || true - -if isfalse "$NO_HUB_UPGRADE"; then - cscli hub upgrade || true -fi +run_hub_update_if_from_volume || true +run_hub_upgrade_if_from_volume || true cscli_if_clean parsers install crowdsecurity/docker-logs cscli_if_clean parsers install crowdsecurity/cri-logs diff --git a/docker/preload-hub-items b/docker/preload-hub-items new file mode 100755 index 00000000000..45155d17af9 --- /dev/null +++ b/docker/preload-hub-items @@ -0,0 +1,22 @@ +#!/usr/bin/env bash + +set -eu + +# pre-download everything but don't install anything + +echo "Pre-downloading Hub content..." + +types=$(cscli hub types -o raw) + +for itemtype in $types; do + ALL_ITEMS=$(cscli "$itemtype" list -a -o json | itemtype="$itemtype" yq '.[env(itemtype)][] | .name') + if [[ -n "${ALL_ITEMS}" ]]; then + #shellcheck disable=SC2086 + cscli "$itemtype" install \ + $ALL_ITEMS \ + --download-only \ + --error + fi +done + +echo " done." \ No newline at end of file diff --git a/test/bin/preload-hub-items b/test/bin/preload-hub-items index 14e9cff998c..319544d843d 100755 --- a/test/bin/preload-hub-items +++ b/test/bin/preload-hub-items @@ -9,20 +9,12 @@ THIS_DIR=$(CDPATH= cd -- "$(dirname -- "$0")" && pwd) # pre-download everything but don't install anything -echo -n "Purging existing hub..." +echo "Pre-downloading Hub content..." types=$("$CSCLI" hub types -o raw) for itemtype in $types; do - "$CSCLI" "${itemtype}" delete --all --error --purge --force -done - -echo " done." - -echo -n "Pre-downloading Hub content..." - -for itemtype in $types; do - ALL_ITEMS=$("$CSCLI" "$itemtype" list -a -o json | jq --arg itemtype "$itemtype" -r '.[$itemtype][].name') + ALL_ITEMS=$("$CSCLI" "$itemtype" list -a -o json | itemtype="$itemtype" yq '.[env(itemtype)][] | .name') if [[ -n "${ALL_ITEMS}" ]]; then #shellcheck disable=SC2086 "$CSCLI" "$itemtype" install \ @@ -32,11 +24,4 @@ for itemtype in $types; do fi done -# XXX: download-only works only for collections, not for parsers, scenarios, postoverflows. -# so we have to delete the links manually, and leave the downloaded files in place - -for itemtype in $types; do - "$CSCLI" "$itemtype" delete --all --error -done - -echo " done." +echo " done." \ No newline at end of file From ecd82ecfbd7dc09514029ef1cd99267b4c84ad0b Mon Sep 17 00:00:00 2001 From: Laurence Jones Date: Wed, 15 May 2024 09:21:45 +0100 Subject: [PATCH 128/581] feat: File notification plugin (#2932) * wip: basic impl of file notification no log rotate but might now do it :shrug: * wip: ticker to 2 seconds and lower some log levels * wip: remove redundant logrus formatter * wip: the plugin should not handle it own data queue since the plugin process may timeout, so instead have a function that uses said context and loop whilst locking the filewriter this may not be the best way :shrug:, however, I dont want multiple notifications to attempt to reopen the file if it has been rotated outside of the plugin context * wip: impl log rotation which checks on check append, however, this may cause some issues in slow systems as the mutex lock doesnt give up until the file is rotated, however, the plugin looks for context and will give up if the plugin broker decides its timeout and will retry once the plugin has pushed again * wip: update yaml dep * wip: me no english great * wip: even if the file has been rotated outside our control we should still compute the file size * wip: improve context handling with creating a custom io writer struct which checks the context before attempting to write * wip: used return byte count instead of calling a conversion again * wip: actually check the enabled flag on log rotate * wip: changed my mind, we check when we check file size * wip: use io copy instead for memory alloc * fix: add notification file to deb/rpm build --- cmd/notification-file/Makefile | 17 +++ cmd/notification-file/file.yaml | 23 +++ cmd/notification-file/main.go | 250 ++++++++++++++++++++++++++++++++ debian/install | 1 + debian/rules | 1 + rpm/SPECS/crowdsec.spec | 5 +- wizard.sh | 4 + 7 files changed, 300 insertions(+), 1 deletion(-) create mode 100644 cmd/notification-file/Makefile create mode 100644 cmd/notification-file/file.yaml create mode 100644 cmd/notification-file/main.go diff --git a/cmd/notification-file/Makefile b/cmd/notification-file/Makefile new file mode 100644 index 00000000000..4504328c49a --- /dev/null +++ b/cmd/notification-file/Makefile @@ -0,0 +1,17 @@ +ifeq ($(OS), Windows_NT) + SHELL := pwsh.exe + .SHELLFLAGS := -NoProfile -Command + EXT = .exe +endif + +GO = go +GOBUILD = $(GO) build + +BINARY_NAME = notification-file$(EXT) + +build: clean + $(GOBUILD) $(LD_OPTS) -o $(BINARY_NAME) + +.PHONY: clean +clean: + @$(RM) $(BINARY_NAME) $(WIN_IGNORE_ERR) diff --git a/cmd/notification-file/file.yaml b/cmd/notification-file/file.yaml new file mode 100644 index 00000000000..61c77b9eb49 --- /dev/null +++ b/cmd/notification-file/file.yaml @@ -0,0 +1,23 @@ +# Don't change this +type: file + +name: file_default # this must match with the registered plugin in the profile +log_level: info # Options include: trace, debug, info, warn, error, off + +# This template render all events as ndjson +format: | + {{range . -}} + { "time": "{{.StopAt}}", "program": "crowdsec", "alert": {{. | toJson }} } + {{ end -}} + +# group_wait: # duration to wait collecting alerts before sending to this plugin, eg "30s" +# group_threshold: # if alerts exceed this, then the plugin will be sent the message. eg "10" + +#Use full path EG /tmp/crowdsec_alerts.json or %TEMP%\crowdsec_alerts.json +log_path: "/tmp/crowdsec_alerts.json" +rotate: + enabled: true # Change to false if you want to handle log rotate on system basis + max_size: 500 # in MB + max_files: 5 + max_age: 5 + compress: true diff --git a/cmd/notification-file/main.go b/cmd/notification-file/main.go new file mode 100644 index 00000000000..467bdd4a4ff --- /dev/null +++ b/cmd/notification-file/main.go @@ -0,0 +1,250 @@ +package main + +import ( + "compress/gzip" + "context" + "fmt" + "io" + "os" + "path/filepath" + "sort" + "sync" + "time" + + "github.com/crowdsecurity/crowdsec/pkg/protobufs" + "github.com/hashicorp/go-hclog" + plugin "github.com/hashicorp/go-plugin" + "gopkg.in/yaml.v3" +) + +var ( + FileWriter *os.File + FileWriteMutex *sync.Mutex + FileSize int64 +) + +type FileWriteCtx struct { + Ctx context.Context + Writer io.Writer +} + +func (w *FileWriteCtx) Write(p []byte) (n int, err error) { + if err := w.Ctx.Err(); err != nil { + return 0, err + } + return w.Writer.Write(p) +} + +type PluginConfig struct { + Name string `yaml:"name"` + LogLevel string `yaml:"log_level"` + LogPath string `yaml:"log_path"` + LogRotate LogRotate `yaml:"rotate"` +} + +type LogRotate struct { + MaxSize int `yaml:"max_size"` + MaxAge int `yaml:"max_age"` + MaxFiles int `yaml:"max_files"` + Enabled bool `yaml:"enabled"` + Compress bool `yaml:"compress"` +} + +type FilePlugin struct { + PluginConfigByName map[string]PluginConfig +} + +var logger hclog.Logger = hclog.New(&hclog.LoggerOptions{ + Name: "file-plugin", + Level: hclog.LevelFromString("INFO"), + Output: os.Stderr, + JSONFormat: true, +}) + +func (r *LogRotate) rotateLogs(cfg PluginConfig) { + // Rotate the log file + err := r.rotateLogFile(cfg.LogPath, r.MaxFiles) + if err != nil { + logger.Error("Failed to rotate log file", "error", err) + } + // Reopen the FileWriter + FileWriter.Close() + FileWriter, err = os.OpenFile(cfg.LogPath, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644) + if err != nil { + logger.Error("Failed to reopen log file", "error", err) + } + // Reset the file size + FileInfo, err := FileWriter.Stat() + if err != nil { + logger.Error("Failed to get file info", "error", err) + } + FileSize = FileInfo.Size() +} + +func (r *LogRotate) rotateLogFile(logPath string, maxBackups int) error { + // Rename the current log file + backupPath := logPath + "." + time.Now().Format("20060102-150405") + err := os.Rename(logPath, backupPath) + if err != nil { + return err + } + glob := logPath + ".*" + if r.Compress { + glob = logPath + ".*.gz" + err = compressFile(backupPath) + if err != nil { + return err + } + } + + // Remove old backups + files, err := filepath.Glob(glob) + if err != nil { + return err + } + + sort.Sort(sort.Reverse(sort.StringSlice(files))) + + for i, file := range files { + logger.Trace("Checking file", "file", file, "index", i, "maxBackups", maxBackups) + if i >= maxBackups { + logger.Trace("Removing file as over max backup count", "file", file) + os.Remove(file) + } else { + // Check the age of the file + fileInfo, err := os.Stat(file) + if err != nil { + return err + } + age := time.Since(fileInfo.ModTime()).Hours() + if age > float64(r.MaxAge*24) { + logger.Trace("Removing file as age was over configured amount", "file", file, "age", age) + os.Remove(file) + } + } + } + + return nil +} + +func compressFile(src string) error { + // Open the source file for reading + srcFile, err := os.Open(src) + if err != nil { + return err + } + defer srcFile.Close() + + // Create the destination file + dstFile, err := os.Create(src + ".gz") + if err != nil { + return err + } + defer dstFile.Close() + + // Create a gzip writer + gw := gzip.NewWriter(dstFile) + defer gw.Close() + + // Read the source file and write its contents to the gzip writer + _, err = io.Copy(gw, srcFile) + if err != nil { + return err + } + + // Delete the original (uncompressed) backup file + err = os.Remove(src) + if err != nil { + return err + } + + return nil +} + +func WriteToFileWithCtx(ctx context.Context, cfg PluginConfig, log string) error { + FileWriteMutex.Lock() + defer FileWriteMutex.Unlock() + originalFileInfo, err := FileWriter.Stat() + if err != nil { + logger.Error("Failed to get file info", "error", err) + } + currentFileInfo, _ := os.Stat(cfg.LogPath) + if !os.SameFile(originalFileInfo, currentFileInfo) { + // The file has been rotated outside our control + logger.Info("Log file has been rotated or missing attempting to reopen it") + FileWriter.Close() + FileWriter, err = os.OpenFile(cfg.LogPath, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644) + if err != nil { + return err + } + FileInfo, err := FileWriter.Stat() + if err != nil { + return err + } + FileSize = FileInfo.Size() + logger.Info("Log file has been reopened successfully") + } + n, err := io.WriteString(&FileWriteCtx{Ctx: ctx, Writer: FileWriter}, log) + if err == nil { + FileSize += int64(n) + if FileSize > int64(cfg.LogRotate.MaxSize)*1024*1024 && cfg.LogRotate.Enabled { + logger.Debug("Rotating log file", "file", cfg.LogPath) + // Rotate the log file + cfg.LogRotate.rotateLogs(cfg) + } + } + return err +} + +func (s *FilePlugin) Notify(ctx context.Context, notification *protobufs.Notification) (*protobufs.Empty, error) { + if _, ok := s.PluginConfigByName[notification.Name]; !ok { + return nil, fmt.Errorf("invalid plugin config name %s", notification.Name) + } + cfg := s.PluginConfigByName[notification.Name] + + return &protobufs.Empty{}, WriteToFileWithCtx(ctx, cfg, notification.Text) +} + +func (s *FilePlugin) Configure(ctx context.Context, config *protobufs.Config) (*protobufs.Empty, error) { + d := PluginConfig{} + err := yaml.Unmarshal(config.Config, &d) + if err != nil { + logger.Error("Failed to unmarshal config", "error", err) + return &protobufs.Empty{}, err + } + FileWriteMutex = &sync.Mutex{} + FileWriter, err = os.OpenFile(d.LogPath, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644) + if err != nil { + logger.Error("Failed to open log file", "error", err) + return &protobufs.Empty{}, err + } + FileInfo, err := FileWriter.Stat() + if err != nil { + logger.Error("Failed to get file info", "error", err) + return &protobufs.Empty{}, err + } + FileSize = FileInfo.Size() + s.PluginConfigByName[d.Name] = d + logger.SetLevel(hclog.LevelFromString(d.LogLevel)) + return &protobufs.Empty{}, err +} + +func main() { + var handshake = plugin.HandshakeConfig{ + ProtocolVersion: 1, + MagicCookieKey: "CROWDSEC_PLUGIN_KEY", + MagicCookieValue: os.Getenv("CROWDSEC_PLUGIN_KEY"), + } + + sp := &FilePlugin{PluginConfigByName: make(map[string]PluginConfig)} + plugin.Serve(&plugin.ServeConfig{ + HandshakeConfig: handshake, + Plugins: map[string]plugin.Plugin{ + "file": &protobufs.NotifierPlugin{ + Impl: sp, + }, + }, + GRPCServer: plugin.DefaultGRPCServer, + Logger: logger, + }) +} diff --git a/debian/install b/debian/install index 3153244b8e9..fa422cac8d9 100644 --- a/debian/install +++ b/debian/install @@ -11,3 +11,4 @@ cmd/notification-http/http.yaml etc/crowdsec/notifications/ cmd/notification-splunk/splunk.yaml etc/crowdsec/notifications/ cmd/notification-email/email.yaml etc/crowdsec/notifications/ cmd/notification-sentinel/sentinel.yaml etc/crowdsec/notifications/ +cmd/notification-file/file.yaml etc/crowdsec/notifications/ diff --git a/debian/rules b/debian/rules index 50daae08754..c11771282ea 100755 --- a/debian/rules +++ b/debian/rules @@ -31,6 +31,7 @@ override_dh_auto_install: install -m 551 cmd/notification-splunk/notification-splunk debian/crowdsec/usr/lib/crowdsec/plugins/ install -m 551 cmd/notification-email/notification-email debian/crowdsec/usr/lib/crowdsec/plugins/ install -m 551 cmd/notification-sentinel/notification-sentinel debian/crowdsec/usr/lib/crowdsec/plugins/ + install -m 551 cmd/notification-file/notification-file debian/crowdsec/usr/lib/crowdsec/plugins/ cp cmd/crowdsec/crowdsec debian/crowdsec/usr/bin cp cmd/crowdsec-cli/cscli debian/crowdsec/usr/bin diff --git a/rpm/SPECS/crowdsec.spec b/rpm/SPECS/crowdsec.spec index 0a20dc97deb..ab71b650d11 100644 --- a/rpm/SPECS/crowdsec.spec +++ b/rpm/SPECS/crowdsec.spec @@ -67,13 +67,14 @@ install -m 551 cmd/notification-http/notification-http %{buildroot}%{_libdir}/%{ install -m 551 cmd/notification-splunk/notification-splunk %{buildroot}%{_libdir}/%{name}/plugins/ install -m 551 cmd/notification-email/notification-email %{buildroot}%{_libdir}/%{name}/plugins/ install -m 551 cmd/notification-sentinel/notification-sentinel %{buildroot}%{_libdir}/%{name}/plugins/ +install -m 551 cmd/notification-file/notification-file %{buildroot}%{_libdir}/%{name}/plugins/ install -m 600 cmd/notification-slack/slack.yaml %{buildroot}%{_sysconfdir}/crowdsec/notifications/ install -m 600 cmd/notification-http/http.yaml %{buildroot}%{_sysconfdir}/crowdsec/notifications/ install -m 600 cmd/notification-splunk/splunk.yaml %{buildroot}%{_sysconfdir}/crowdsec/notifications/ install -m 600 cmd/notification-email/email.yaml %{buildroot}%{_sysconfdir}/crowdsec/notifications/ install -m 600 cmd/notification-sentinel/sentinel.yaml %{buildroot}%{_sysconfdir}/crowdsec/notifications/ - +install -m 600 cmd/notification-file/file.yaml %{buildroot}%{_sysconfdir}/crowdsec/notifications/ %clean rm -rf %{buildroot} @@ -88,6 +89,7 @@ rm -rf %{buildroot} %{_libdir}/%{name}/plugins/notification-splunk %{_libdir}/%{name}/plugins/notification-email %{_libdir}/%{name}/plugins/notification-sentinel +%{_libdir}/%{name}/plugins/notification-file %{_sysconfdir}/%{name}/patterns/linux-syslog %{_sysconfdir}/%{name}/patterns/ruby %{_sysconfdir}/%{name}/patterns/nginx @@ -123,6 +125,7 @@ rm -rf %{buildroot} %config(noreplace) %{_sysconfdir}/%{name}/notifications/splunk.yaml %config(noreplace) %{_sysconfdir}/%{name}/notifications/email.yaml %config(noreplace) %{_sysconfdir}/%{name}/notifications/sentinel.yaml +%config(noreplace) %{_sysconfdir}/%{name}/notifications/file.yaml %config(noreplace) %{_sysconfdir}/cron.daily/%{name} %{_unitdir}/%{name}.service diff --git a/wizard.sh b/wizard.sh index a3afc789bf5..226b4e0609b 100755 --- a/wizard.sh +++ b/wizard.sh @@ -82,12 +82,14 @@ SLACK_PLUGIN_BINARY="./cmd/notification-slack/notification-slack" SPLUNK_PLUGIN_BINARY="./cmd/notification-splunk/notification-splunk" EMAIL_PLUGIN_BINARY="./cmd/notification-email/notification-email" SENTINEL_PLUGIN_BINARY="./cmd/notification-sentinel/notification-sentinel" +FILE_PLUGIN_BINARY="./cmd/notification-file/notification-file" HTTP_PLUGIN_CONFIG="./cmd/notification-http/http.yaml" SLACK_PLUGIN_CONFIG="./cmd/notification-slack/slack.yaml" SPLUNK_PLUGIN_CONFIG="./cmd/notification-splunk/splunk.yaml" EMAIL_PLUGIN_CONFIG="./cmd/notification-email/email.yaml" SENTINEL_PLUGIN_CONFIG="./cmd/notification-sentinel/sentinel.yaml" +FILE_PLUGIN_CONFIG="./cmd/notification-file/file.yaml" BACKUP_DIR=$(mktemp -d) @@ -525,6 +527,7 @@ install_plugins(){ cp ${HTTP_PLUGIN_BINARY} ${CROWDSEC_PLUGIN_DIR} cp ${EMAIL_PLUGIN_BINARY} ${CROWDSEC_PLUGIN_DIR} cp ${SENTINEL_PLUGIN_BINARY} ${CROWDSEC_PLUGIN_DIR} + cp ${FILE_PLUGIN_BINARY} ${CROWDSEC_PLUGIN_DIR} if [[ ${DOCKER_MODE} == "false" ]]; then cp -n ${SLACK_PLUGIN_CONFIG} /etc/crowdsec/notifications/ @@ -532,6 +535,7 @@ install_plugins(){ cp -n ${HTTP_PLUGIN_CONFIG} /etc/crowdsec/notifications/ cp -n ${EMAIL_PLUGIN_CONFIG} /etc/crowdsec/notifications/ cp -n ${SENTINEL_PLUGIN_CONFIG} /etc/crowdsec/notifications/ + cp -n ${FILE_PLUGIN_CONFIG} /etc/crowdsec/notifications/ fi } From 41ec90ae8c6f38a11a99b2c0a07138717b4659af Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Wed, 15 May 2024 10:37:25 +0200 Subject: [PATCH 129/581] make: remove unused targets (#2998) --- cmd/crowdsec-cli/Makefile | 14 ------------ cmd/crowdsec/Makefile | 45 --------------------------------------- wizard.sh | 1 - 3 files changed, 60 deletions(-) diff --git a/cmd/crowdsec-cli/Makefile b/cmd/crowdsec-cli/Makefile index 392361ef82e..6d6e4da8dbd 100644 --- a/cmd/crowdsec-cli/Makefile +++ b/cmd/crowdsec-cli/Makefile @@ -8,8 +8,6 @@ GO = go GOBUILD = $(GO) build BINARY_NAME = cscli$(EXT) -PREFIX ?= "/" -BIN_PREFIX = $(PREFIX)"/usr/local/bin/" .PHONY: all all: clean build @@ -17,17 +15,5 @@ all: clean build build: clean $(GOBUILD) $(LD_OPTS) -o $(BINARY_NAME) -.PHONY: install -install: install-conf install-bin - -install-conf: - -install-bin: - @install -v -m 755 -D "$(BINARY_NAME)" "$(BIN_PREFIX)/$(BINARY_NAME)" || exit - -uninstall: - @$(RM) $(CSCLI_CONFIG) $(WIN_IGNORE_ERR) - @$(RM) $(BIN_PREFIX)$(BINARY_NAME) $(WIN_IGNORE_ERR) - clean: @$(RM) $(BINARY_NAME) $(WIN_IGNORE_ERR) diff --git a/cmd/crowdsec/Makefile b/cmd/crowdsec/Makefile index 7425d970ad1..39f807cab88 100644 --- a/cmd/crowdsec/Makefile +++ b/cmd/crowdsec/Makefile @@ -10,13 +10,6 @@ GOTEST = $(GO) test CROWDSEC_BIN = crowdsec$(EXT) # names longer than 15 chars break 'pgrep' -PREFIX ?= "/" -CFG_PREFIX = $(PREFIX)"/etc/crowdsec/config/" -BIN_PREFIX = $(PREFIX)"/usr/local/bin/" -DATA_PREFIX = $(PREFIX)"/var/run/crowdsec/" -PID_DIR = $(PREFIX)"/var/run/" - -SYSTEMD_PATH_FILE = "/etc/systemd/system/crowdsec.service" .PHONY: all all: clean test build @@ -29,41 +22,3 @@ test: clean: @$(RM) $(CROWDSEC_BIN) $(WIN_IGNORE_ERR) - -.PHONY: install -install: install-conf install-bin - -.PHONY: install-conf -install-conf: - mkdir -p $(DATA_PREFIX) || exit - (cd ../.. / && find ./data -type f -exec install -Dm 755 "{}" "$(DATA_PREFIX){}" \; && cd ./cmd/crowdsec) || exit - (cd ../../config && find ./patterns -type f -exec install -Dm 755 "{}" "$(CFG_PREFIX){}" \; && cd ../cmd/crowdsec) || exit - mkdir -p "$(CFG_PREFIX)" || exit - mkdir -p "$(CFG_PREFIX)/parsers" || exit - mkdir -p "$(CFG_PREFIX)/scenarios" || exit - mkdir -p "$(CFG_PREFIX)/postoverflows" || exit - mkdir -p "$(CFG_PREFIX)/collections" || exit - mkdir -p "$(CFG_PREFIX)/patterns" || exit - install -v -m 755 -D ../../config/prod.yaml "$(CFG_PREFIX)" || exit - install -v -m 755 -D ../../config/dev.yaml "$(CFG_PREFIX)" || exit - install -v -m 755 -D ../../config/acquis.yaml "$(CFG_PREFIX)" || exit - install -v -m 755 -D ../../config/profiles.yaml "$(CFG_PREFIX)" || exit - install -v -m 755 -D ../../config/api.yaml "$(CFG_PREFIX)" || exit - mkdir -p $(PID_DIR) || exit - PID=$(PID_DIR) DATA=$(DATA_PREFIX)"/data/" CFG=$(CFG_PREFIX) envsubst < ../../config/prod.yaml > $(CFG_PREFIX)"/default.yaml" - -.PHONY: install-bin -install-bin: - install -v -m 755 -D "$(CROWDSEC_BIN)" "$(BIN_PREFIX)/$(CROWDSEC_BIN)" || exit - -.PHONY: systemd -systemd: install - CFG=$(CFG_PREFIX) PID=$(PID_DIR) BIN=$(BIN_PREFIX)"/"$(CROWDSEC_BIN) envsubst < ../../config/crowdsec.service > "$(SYSTEMD_PATH_FILE)" - systemctl daemon-reload - -.PHONY: uninstall -uninstall: - $(RM) $(CFG_PREFIX) $(WIN_IGNORE_ERR) - $(RM) $(DATA_PREFIX) $(WIN_IGNORE_ERR) - $(RM) "$(BIN_PREFIX)/$(CROWDSEC_BIN)" $(WIN_IGNORE_ERR) - $(RM) "$(SYSTEMD_PATH_FILE)" $(WIN_IGNORE_ERR) diff --git a/wizard.sh b/wizard.sh index 226b4e0609b..6e215365f6c 100755 --- a/wizard.sh +++ b/wizard.sh @@ -18,7 +18,6 @@ NC='\033[0m' SILENT="false" DOCKER_MODE="false" -CROWDSEC_RUN_DIR="/var/run" CROWDSEC_LIB_DIR="/var/lib/crowdsec" CROWDSEC_USR_DIR="/usr/local/lib/crowdsec" CROWDSEC_DATA_DIR="${CROWDSEC_LIB_DIR}/data" From ccab6e991001bbd3ac850ae30fd03d9237ae34df Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Wed, 15 May 2024 10:38:36 +0200 Subject: [PATCH 130/581] bats: clean up extra output (#3008) --- test/bats/40_cold-logs.bats | 6 +++--- test/bats/40_live-ban.bats | 7 +++---- 2 files changed, 6 insertions(+), 7 deletions(-) diff --git a/test/bats/40_cold-logs.bats b/test/bats/40_cold-logs.bats index 0e167d3d077..070a9eac5f1 100644 --- a/test/bats/40_cold-logs.bats +++ b/test/bats/40_cold-logs.bats @@ -14,9 +14,9 @@ setup_file() { # we reset config and data, and only run the daemon once for all the tests in this file ./instance-data load - cscli collections install crowdsecurity/sshd --error - cscli parsers install crowdsecurity/syslog-logs --error - cscli parsers install crowdsecurity/dateparse-enrich --error + cscli collections install crowdsecurity/sshd --error >/dev/null + cscli parsers install crowdsecurity/syslog-logs --error >/dev/null + cscli parsers install crowdsecurity/dateparse-enrich --error >/dev/null ./instance-crowdsec start } diff --git a/test/bats/40_live-ban.bats b/test/bats/40_live-ban.bats index 122ea05e41a..fb5fd1fd435 100644 --- a/test/bats/40_live-ban.bats +++ b/test/bats/40_live-ban.bats @@ -14,10 +14,9 @@ setup_file() { # we reset config and data, but run the daemon only in the tests that need it ./instance-data load - cscli collections install crowdsecurity/sshd --error - cscli parsers install crowdsecurity/syslog-logs --error - cscli parsers install crowdsecurity/dateparse-enrich --error - + cscli collections install crowdsecurity/sshd --error >/dev/null + cscli parsers install crowdsecurity/syslog-logs --error >/dev/null + cscli parsers install crowdsecurity/dateparse-enrich --error >/dev/null } teardown_file() { From cc63729b2cae9cda94786d8374097a7f5863167b Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Wed, 15 May 2024 10:52:43 +0200 Subject: [PATCH 131/581] version: write to stdout, add missing newline (#3012) * pkg/cwversion cleanup - add missing newline between version and codename - add more information to "support dump" - write "cscli version" and "crowdsec -version" to stdout, not stderr * fix func test * lint --- cmd/crowdsec-cli/support.go | 2 +- cmd/crowdsec-cli/version.go | 8 +++++--- cmd/crowdsec/main.go | 2 +- pkg/cwversion/version.go | 33 ++++++++++++--------------------- test/bats/01_crowdsec.bats | 5 +++++ test/bats/01_cscli.bats | 20 ++++++++++---------- 6 files changed, 34 insertions(+), 36 deletions(-) diff --git a/cmd/crowdsec-cli/support.go b/cmd/crowdsec-cli/support.go index 54b2e7ad9ad..61fa2b55817 100644 --- a/cmd/crowdsec-cli/support.go +++ b/cmd/crowdsec-cli/support.go @@ -123,7 +123,7 @@ func (cli *cliSupport) dumpMetrics(ctx context.Context, zw *zip.Writer) error { func (cli *cliSupport) dumpVersion(zw *zip.Writer) { log.Info("Collecting version") - cli.writeToZip(zw, SUPPORT_VERSION_PATH, time.Now(), strings.NewReader(cwversion.ShowStr())) + cli.writeToZip(zw, SUPPORT_VERSION_PATH, time.Now(), strings.NewReader(cwversion.FullString())) } func (cli *cliSupport) dumpFeatures(zw *zip.Writer) { diff --git a/cmd/crowdsec-cli/version.go b/cmd/crowdsec-cli/version.go index de36c9be28f..7ec5c459968 100644 --- a/cmd/crowdsec-cli/version.go +++ b/cmd/crowdsec-cli/version.go @@ -1,6 +1,8 @@ package main import ( + "os" + "github.com/spf13/cobra" "github.com/crowdsecurity/crowdsec/pkg/cwversion" @@ -12,14 +14,14 @@ func NewCLIVersion() *cliVersion { return &cliVersion{} } -func (cli cliVersion) NewCommand() *cobra.Command { +func (cliVersion) NewCommand() *cobra.Command { cmd := &cobra.Command{ Use: "version", Short: "Display version", - Args: cobra.ExactArgs(0), + Args: cobra.NoArgs, DisableAutoGenTag: true, Run: func(_ *cobra.Command, _ []string) { - cwversion.Show() + _, _ = os.Stdout.WriteString(cwversion.FullString()) }, } diff --git a/cmd/crowdsec/main.go b/cmd/crowdsec/main.go index 0d96692ba5f..26e39eb069c 100644 --- a/cmd/crowdsec/main.go +++ b/cmd/crowdsec/main.go @@ -369,7 +369,7 @@ func main() { } if flags.PrintVersion { - cwversion.Show() + os.Stdout.WriteString(cwversion.FullString()) os.Exit(0) } diff --git a/pkg/cwversion/version.go b/pkg/cwversion/version.go index 6f85704d8e5..ac51567c2fc 100644 --- a/pkg/cwversion/version.go +++ b/pkg/cwversion/version.go @@ -3,7 +3,6 @@ package cwversion import ( "encoding/json" "fmt" - "log" "net/http" "runtime" "strings" @@ -30,44 +29,36 @@ func versionWithTag() string { ret := version.Version if !strings.HasSuffix(ret, version.Tag) { - ret += fmt.Sprintf("-%s", version.Tag) + ret += "-" + version.Tag } return ret } -func ShowStr() string { - ret := fmt.Sprintf("version: %s", versionWithTag()) +func FullString() string { + ret := fmt.Sprintf("version: %s\n", versionWithTag()) ret += fmt.Sprintf("Codename: %s\n", Codename) ret += fmt.Sprintf("BuildDate: %s\n", version.BuildDate) ret += fmt.Sprintf("GoVersion: %s\n", version.GoVersion) ret += fmt.Sprintf("Platform: %s\n", System) + ret += fmt.Sprintf("libre2: %s\n", Libre2) + ret += fmt.Sprintf("Constraint_parser: %s\n", Constraint_parser) + ret += fmt.Sprintf("Constraint_scenario: %s\n", Constraint_scenario) + ret += fmt.Sprintf("Constraint_api: %s\n", Constraint_api) + ret += fmt.Sprintf("Constraint_acquis: %s\n", Constraint_acquis) return ret } -func Show() { - log.Printf("version: %s", versionWithTag()) - log.Printf("Codename: %s", Codename) - log.Printf("BuildDate: %s", version.BuildDate) - log.Printf("GoVersion: %s", version.GoVersion) - log.Printf("Platform: %s\n", System) - log.Printf("libre2: %s\n", Libre2) - log.Printf("Constraint_parser: %s", Constraint_parser) - log.Printf("Constraint_scenario: %s", Constraint_scenario) - log.Printf("Constraint_api: %s", Constraint_api) - log.Printf("Constraint_acquis: %s", Constraint_acquis) -} - func VersionStr() string { return fmt.Sprintf("%s-%s-%s", version.Version, System, version.Tag) } func VersionStrip() string { - version := strings.Split(version.Version, "~") - version = strings.Split(version[0], "-") + ret := strings.Split(version.Version, "~") + ret = strings.Split(ret[0], "-") - return version[0] + return ret[0] } func Satisfies(strvers string, constraint string) (bool, error) { @@ -90,7 +81,7 @@ func Satisfies(strvers string, constraint string) (bool, error) { // Latest return latest crowdsec version based on github func Latest() (string, error) { - latest := make(map[string]interface{}) + latest := make(map[string]any) resp, err := http.Get("https://version.crowdsec.net/latest") if err != nil { diff --git a/test/bats/01_crowdsec.bats b/test/bats/01_crowdsec.bats index d8d369eedf4..7a449ebd047 100644 --- a/test/bats/01_crowdsec.bats +++ b/test/bats/01_crowdsec.bats @@ -36,6 +36,11 @@ teardown() { rune -0 wait-for --err "argument provided but not defined: trololo" "$CROWDSEC" trololo } +@test "crowdsec -version" { + rune -0 "$CROWDSEC" -version + assert_output --partial "version:" +} + @test "crowdsec (no api and no agent)" { rune -0 wait-for \ --err "you must run at least the API Server or crowdsec" \ diff --git a/test/bats/01_cscli.bats b/test/bats/01_cscli.bats index 33dd2e12ec2..8dfdf701a95 100644 --- a/test/bats/01_cscli.bats +++ b/test/bats/01_cscli.bats @@ -40,20 +40,20 @@ teardown() { @test "cscli version" { rune -0 cscli version - assert_stderr --partial "version:" - assert_stderr --partial "Codename:" - assert_stderr --partial "BuildDate:" - assert_stderr --partial "GoVersion:" - assert_stderr --partial "Platform:" - assert_stderr --partial "Constraint_parser:" - assert_stderr --partial "Constraint_scenario:" - assert_stderr --partial "Constraint_api:" - assert_stderr --partial "Constraint_acquis:" + assert_output --partial "version:" + assert_output --partial "Codename:" + assert_output --partial "BuildDate:" + assert_output --partial "GoVersion:" + assert_output --partial "Platform:" + assert_output --partial "Constraint_parser:" + assert_output --partial "Constraint_scenario:" + assert_output --partial "Constraint_api:" + assert_output --partial "Constraint_acquis:" # should work without configuration file rm "$CONFIG_YAML" rune -0 cscli version - assert_stderr --partial "version:" + assert_output --partial "version:" } @test "cscli help" { From 1b894a292ba837cac09eb8e5fde6d58231a69daf Mon Sep 17 00:00:00 2001 From: blotus Date: Wed, 15 May 2024 15:33:43 +0200 Subject: [PATCH 132/581] `GetActiveDecisionsCount()`and `GetActiveDecisionsTimeLeft()`expr helpers (#3013) --- pkg/database/decisions.go | 62 ++++++++ pkg/exprhelpers/expr_lib.go | 14 ++ pkg/exprhelpers/exprlib_test.go | 262 ++++++++++++++++++++++++++++++++ pkg/exprhelpers/helpers.go | 30 +++- 4 files changed, 367 insertions(+), 1 deletion(-) diff --git a/pkg/database/decisions.go b/pkg/database/decisions.go index 20a49c79143..941fc5c7791 100644 --- a/pkg/database/decisions.go +++ b/pkg/database/decisions.go @@ -640,6 +640,68 @@ func (c *Client) CountDecisionsByValue(decisionValue string) (int, error) { return count, nil } +func (c *Client) CountActiveDecisionsByValue(decisionValue string) (int, error) { + var err error + var start_ip, start_sfx, end_ip, end_sfx int64 + var ip_sz, count int + ip_sz, start_ip, start_sfx, end_ip, end_sfx, err = types.Addr2Ints(decisionValue) + + if err != nil { + return 0, fmt.Errorf("unable to convert '%s' to int: %s", decisionValue, err) + } + + contains := true + decisions := c.Ent.Decision.Query() + + decisions, err = applyStartIpEndIpFilter(decisions, contains, ip_sz, start_ip, start_sfx, end_ip, end_sfx) + if err != nil { + return 0, fmt.Errorf("fail to apply StartIpEndIpFilter: %w", err) + } + + decisions = decisions.Where(decision.UntilGT(time.Now().UTC())) + + count, err = decisions.Count(c.CTX) + if err != nil { + return 0, fmt.Errorf("fail to count decisions: %w", err) + } + + return count, nil +} + +func (c *Client) GetActiveDecisionsTimeLeftByValue(decisionValue string) (time.Duration, error) { + var err error + var start_ip, start_sfx, end_ip, end_sfx int64 + var ip_sz int + ip_sz, start_ip, start_sfx, end_ip, end_sfx, err = types.Addr2Ints(decisionValue) + + if err != nil { + return 0, fmt.Errorf("unable to convert '%s' to int: %s", decisionValue, err) + } + + contains := true + decisions := c.Ent.Decision.Query().Where( + decision.UntilGT(time.Now().UTC()), + ) + + decisions, err = applyStartIpEndIpFilter(decisions, contains, ip_sz, start_ip, start_sfx, end_ip, end_sfx) + if err != nil { + return 0, fmt.Errorf("fail to apply StartIpEndIpFilter: %w", err) + } + + decisions = decisions.Order(ent.Desc(decision.FieldUntil)) + + decision, err := decisions.First(c.CTX) + if err != nil && !ent.IsNotFound(err) { + return 0, fmt.Errorf("fail to get decision: %w", err) + } + + if decision == nil { + return 0, nil + } + + return decision.Until.Sub(time.Now().UTC()), nil +} + func (c *Client) CountDecisionsSinceByValue(decisionValue string, since time.Time) (int, error) { ip_sz, start_ip, start_sfx, end_ip, end_sfx, err := types.Addr2Ints(decisionValue) diff --git a/pkg/exprhelpers/expr_lib.go b/pkg/exprhelpers/expr_lib.go index 520799287eb..5041b234db1 100644 --- a/pkg/exprhelpers/expr_lib.go +++ b/pkg/exprhelpers/expr_lib.go @@ -231,6 +231,20 @@ var exprFuncs = []exprCustomFunc{ new(func(string) int), }, }, + { + name: "GetActiveDecisionsCount", + function: GetActiveDecisionsCount, + signature: []interface{}{ + new(func(string) int), + }, + }, + { + name: "GetActiveDecisionsTimeLeft", + function: GetActiveDecisionsTimeLeft, + signature: []interface{}{ + new(func(string) time.Duration), + }, + }, { name: "GetDecisionsSinceCount", function: GetDecisionsSinceCount, diff --git a/pkg/exprhelpers/exprlib_test.go b/pkg/exprhelpers/exprlib_test.go index 9d5a6556b25..38528083272 100644 --- a/pkg/exprhelpers/exprlib_test.go +++ b/pkg/exprhelpers/exprlib_test.go @@ -1118,6 +1118,268 @@ func TestGetDecisionsSinceCount(t *testing.T) { } } +func TestGetActiveDecisionsCount(t *testing.T) { + existingIP := "1.2.3.4" + unknownIP := "1.2.3.5" + + ip_sz, start_ip, start_sfx, end_ip, end_sfx, err := types.Addr2Ints(existingIP) + if err != nil { + t.Errorf("unable to convert '%s' to int: %s", existingIP, err) + } + + // Add sample data to DB + dbClient = getDBClient(t) + + decision := dbClient.Ent.Decision.Create(). + SetUntil(time.Now().UTC().Add(time.Hour)). + SetScenario("crowdsec/test"). + SetStartIP(start_ip). + SetStartSuffix(start_sfx). + SetEndIP(end_ip). + SetEndSuffix(end_sfx). + SetIPSize(int64(ip_sz)). + SetType("ban"). + SetScope("IP"). + SetValue(existingIP). + SetOrigin("CAPI"). + SaveX(context.Background()) + + if decision == nil { + require.Error(t, errors.Errorf("Failed to create sample decision")) + } + + expiredDecision := dbClient.Ent.Decision.Create(). + SetUntil(time.Now().UTC().Add(-time.Hour)). + SetScenario("crowdsec/test"). + SetStartIP(start_ip). + SetStartSuffix(start_sfx). + SetEndIP(end_ip). + SetEndSuffix(end_sfx). + SetIPSize(int64(ip_sz)). + SetType("ban"). + SetScope("IP"). + SetValue(existingIP). + SetOrigin("CAPI"). + SaveX(context.Background()) + + if expiredDecision == nil { + require.Error(t, errors.Errorf("Failed to create sample decision")) + } + + err = Init(dbClient) + require.NoError(t, err) + + tests := []struct { + name string + env map[string]interface{} + code string + result string + err string + }{ + { + name: "GetActiveDecisionsCount() test: existing IP count", + env: map[string]interface{}{ + "Alert": &models.Alert{ + Source: &models.Source{ + Value: &existingIP, + }, + Decisions: []*models.Decision{ + { + Value: &existingIP, + }, + }, + }, + }, + code: "Sprintf('%d', GetActiveDecisionsCount(Alert.GetValue()))", + result: "1", + err: "", + }, + { + name: "GetActiveDecisionsCount() test: unknown IP count", + env: map[string]interface{}{ + "Alert": &models.Alert{ + Source: &models.Source{ + Value: &unknownIP, + }, + Decisions: []*models.Decision{ + { + Value: &unknownIP, + }, + }, + }, + }, + code: "Sprintf('%d', GetActiveDecisionsCount(Alert.GetValue()))", + result: "0", + err: "", + }, + } + + for _, test := range tests { + program, err := expr.Compile(test.code, GetExprOptions(test.env)...) + require.NoError(t, err) + output, err := expr.Run(program, test.env) + require.NoError(t, err) + require.Equal(t, test.result, output) + log.Printf("test '%s' : OK", test.name) + } +} + +func TestGetActiveDecisionsTimeLeft(t *testing.T) { + existingIP := "1.2.3.4" + unknownIP := "1.2.3.5" + + ip_sz, start_ip, start_sfx, end_ip, end_sfx, err := types.Addr2Ints(existingIP) + if err != nil { + t.Errorf("unable to convert '%s' to int: %s", existingIP, err) + } + + // Add sample data to DB + dbClient = getDBClient(t) + + decision := dbClient.Ent.Decision.Create(). + SetUntil(time.Now().UTC().Add(time.Hour)). + SetScenario("crowdsec/test"). + SetStartIP(start_ip). + SetStartSuffix(start_sfx). + SetEndIP(end_ip). + SetEndSuffix(end_sfx). + SetIPSize(int64(ip_sz)). + SetType("ban"). + SetScope("IP"). + SetValue(existingIP). + SetOrigin("CAPI"). + SaveX(context.Background()) + + if decision == nil { + require.Error(t, errors.Errorf("Failed to create sample decision")) + } + + longerDecision := dbClient.Ent.Decision.Create(). + SetUntil(time.Now().UTC().Add(2 * time.Hour)). + SetScenario("crowdsec/test"). + SetStartIP(start_ip). + SetStartSuffix(start_sfx). + SetEndIP(end_ip). + SetEndSuffix(end_sfx). + SetIPSize(int64(ip_sz)). + SetType("ban"). + SetScope("IP"). + SetValue(existingIP). + SetOrigin("CAPI"). + SaveX(context.Background()) + + if longerDecision == nil { + require.Error(t, errors.Errorf("Failed to create sample decision")) + } + + err = Init(dbClient) + require.NoError(t, err) + + tests := []struct { + name string + env map[string]interface{} + code string + min float64 + max float64 + err string + }{ + { + name: "GetActiveDecisionsTimeLeft() test: existing IP time left", + env: map[string]interface{}{ + "Alert": &models.Alert{ + Source: &models.Source{ + Value: &existingIP, + }, + Decisions: []*models.Decision{ + { + Value: &existingIP, + }, + }, + }, + }, + code: "GetActiveDecisionsTimeLeft(Alert.GetValue())", + min: 7195, // 5 seconds margin to make sure the test doesn't fail randomly in the CI + max: 7200, + err: "", + }, + { + name: "GetActiveDecisionsTimeLeft() test: unknown IP time left", + env: map[string]interface{}{ + "Alert": &models.Alert{ + Source: &models.Source{ + Value: &unknownIP, + }, + Decisions: []*models.Decision{ + { + Value: &unknownIP, + }, + }, + }, + }, + code: "GetActiveDecisionsTimeLeft(Alert.GetValue())", + min: 0, + max: 0, + err: "", + }, + { + name: "GetActiveDecisionsTimeLeft() test: existing IP and call time.Duration method", + env: map[string]interface{}{ + "Alert": &models.Alert{ + Source: &models.Source{ + Value: &existingIP, + }, + Decisions: []*models.Decision{ + { + Value: &existingIP, + }, + }, + }, + }, + code: "GetActiveDecisionsTimeLeft(Alert.GetValue()).Hours()", + min: 2, + max: 2, + }, + { + name: "GetActiveDecisionsTimeLeft() test: unknown IP and call time.Duration method", + env: map[string]interface{}{ + "Alert": &models.Alert{ + Source: &models.Source{ + Value: &unknownIP, + }, + Decisions: []*models.Decision{ + { + Value: &unknownIP, + }, + }, + }, + }, + code: "GetActiveDecisionsTimeLeft(Alert.GetValue()).Hours()", + min: 0, + max: 0, + }, + } + + delta := 0.0001 + + for _, test := range tests { + program, err := expr.Compile(test.code, GetExprOptions(test.env)...) + require.NoError(t, err) + output, err := expr.Run(program, test.env) + require.NoError(t, err) + switch o := output.(type) { + case time.Duration: + require.LessOrEqual(t, int(o.Seconds()), int(test.max)) + require.GreaterOrEqual(t, int(o.Seconds()), int(test.min)) + case float64: + require.LessOrEqual(t, o, test.max+delta) + require.GreaterOrEqual(t, o, test.min-delta) + default: + t.Fatalf("GetActiveDecisionsTimeLeft() should return a time.Duration or a float64") + } + } + +} + func TestParseUnixTime(t *testing.T) { tests := []struct { name string diff --git a/pkg/exprhelpers/helpers.go b/pkg/exprhelpers/helpers.go index 79a621c7d35..e4e38e48474 100644 --- a/pkg/exprhelpers/helpers.go +++ b/pkg/exprhelpers/helpers.go @@ -550,7 +550,7 @@ func GetDecisionsSinceCount(params ...any) (any, error) { value := params[0].(string) since := params[1].(string) if dbClient == nil { - log.Error("No database config to call GetDecisionsCount()") + log.Error("No database config to call GetDecisionsSinceCount()") return 0, nil } sinceDuration, err := time.ParseDuration(since) @@ -567,6 +567,34 @@ func GetDecisionsSinceCount(params ...any) (any, error) { return count, nil } +func GetActiveDecisionsCount(params ...any) (any, error) { + value := params[0].(string) + if dbClient == nil { + log.Error("No database config to call GetActiveDecisionsCount()") + return 0, nil + } + count, err := dbClient.CountActiveDecisionsByValue(value) + if err != nil { + log.Errorf("Failed to get active decisions count from value '%s'", value) + return 0, err + } + return count, nil +} + +func GetActiveDecisionsTimeLeft(params ...any) (any, error) { + value := params[0].(string) + if dbClient == nil { + log.Error("No database config to call GetActiveDecisionsTimeLeft()") + return 0, nil + } + timeLeft, err := dbClient.GetActiveDecisionsTimeLeftByValue(value) + if err != nil { + log.Errorf("Failed to get active decisions time left from value '%s'", value) + return 0, err + } + return timeLeft, nil +} + // func LookupHost(value string) []string { func LookupHost(params ...any) (any, error) { value := params[0].(string) From ccf08e56d9be2e0a621803d6b203230a1747aea5 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Thu, 16 May 2024 10:58:00 +0200 Subject: [PATCH 133/581] bats: no need for openssl, netcat; allow to prevent tests from running with a lock (#3016) * bats: lock/unlock instance data; replace openssl w/ cfssl; update dep list * concat pem without openssl :-/ * unused/unreachable code * lint * redundant {} (shellharden) --- test/README.md | 7 +++++-- test/bats/11_bouncers_tls.bats | 7 ++++--- test/bats/30_machines_tls.bats | 7 ++++--- test/bin/mock-http.py | 6 ++++-- test/bin/wait-for | 6 +++--- test/bin/wait-for-port | 4 ---- test/disable-capi | 2 +- test/enable-capi | 4 ++-- test/instance-crowdsec | 6 +++--- test/instance-data | 6 +++--- test/instance-db | 6 +++--- test/instance-mock-http | 10 +++++----- test/lib/config/config-local | 17 ++++++++++++++++- test/run-tests | 6 ++++-- 14 files changed, 57 insertions(+), 37 deletions(-) diff --git a/test/README.md b/test/README.md index 723ee5d3e9b..4ed132b2cfa 100644 --- a/test/README.md +++ b/test/README.md @@ -61,8 +61,6 @@ architectures. - `curl` - `daemonize` - `jq` - - `nc` - - `openssl` - `python3` ## Running all tests @@ -241,6 +239,11 @@ according to the specific needs of the group of tests in the file. crowdsec instance. Crowdsec must not be running while this operation is performed. + - instance-data lock/unlock + +When playing around with a local crowdsec installation, you can run "instance-data lock" +to prevent the bats suite from running, so it won't overwrite your configuration or data. + - `instance-crowdsec [ start | stop ]` Runs (or stops) crowdsec as a background process. PID and lockfiles are diff --git a/test/bats/11_bouncers_tls.bats b/test/bats/11_bouncers_tls.bats index 84de3d2f488..6b4986d45d7 100644 --- a/test/bats/11_bouncers_tls.bats +++ b/test/bats/11_bouncers_tls.bats @@ -35,13 +35,14 @@ setup_file() { # Generate revoked client certs for cert_name in "revoked_1" "revoked_2"; do cfssl gencert -ca "${tmpdir}/inter.pem" -ca-key "${tmpdir}/inter-key.pem" -config "${CFDIR}/profiles.json" -profile=client "${CFDIR}/bouncer.json" 2>/dev/null | cfssljson --bare "${tmpdir}/${cert_name}" - serial="$(openssl x509 -noout -serial -in "${tmpdir}/${cert_name}.pem" | cut -d '=' -f2)" - echo "ibase=16; ${serial}" | bc >"${tmpdir}/serials_${cert_name}.txt" + cfssl certinfo -cert "${tmpdir}/${cert_name}.pem" | jq -r '.serial_number' > "${tmpdir}/serials_${cert_name}.txt" done # Generate separate CRL blocks and concatenate them for cert_name in "revoked_1" "revoked_2"; do - cfssl gencrl "${tmpdir}/serials_${cert_name}.txt" "${tmpdir}/ca.pem" "${tmpdir}/ca-key.pem" | base64 -d | openssl crl -inform DER -out "${tmpdir}/crl_${cert_name}.pem" + echo '-----BEGIN X509 CRL-----' > "${tmpdir}/crl_${cert_name}.pem" + cfssl gencrl "${tmpdir}/serials_${cert_name}.txt" "${tmpdir}/ca.pem" "${tmpdir}/ca-key.pem" >> "${tmpdir}/crl_${cert_name}.pem" + echo '-----END X509 CRL-----' >> "${tmpdir}/crl_${cert_name}.pem" done cat "${tmpdir}/crl_revoked_1.pem" "${tmpdir}/crl_revoked_2.pem" >"${tmpdir}/crl.pem" diff --git a/test/bats/30_machines_tls.bats b/test/bats/30_machines_tls.bats index b6d089d68e7..52231704558 100644 --- a/test/bats/30_machines_tls.bats +++ b/test/bats/30_machines_tls.bats @@ -38,13 +38,14 @@ setup_file() { # Generate revoked client cert for cert_name in "revoked_1" "revoked_2"; do cfssl gencert -ca "${tmpdir}/inter.pem" -ca-key "${tmpdir}/inter-key.pem" -config "${CFDIR}/profiles.json" -profile=client "${CFDIR}/agent.json" 2>/dev/null | cfssljson --bare "${tmpdir}/${cert_name}" - serial="$(openssl x509 -noout -serial -in "${tmpdir}/${cert_name}.pem" | cut -d '=' -f2)" - echo "ibase=16; ${serial}" | bc >"${tmpdir}/serials_${cert_name}.txt" + cfssl certinfo -cert "${tmpdir}/${cert_name}.pem" | jq -r '.serial_number' > "${tmpdir}/serials_${cert_name}.txt" done # Generate separate CRL blocks and concatenate them for cert_name in "revoked_1" "revoked_2"; do - cfssl gencrl "${tmpdir}/serials_${cert_name}.txt" "${tmpdir}/ca.pem" "${tmpdir}/ca-key.pem" | base64 -d | openssl crl -inform DER -out "${tmpdir}/crl_${cert_name}.pem" + echo '-----BEGIN X509 CRL-----' > "${tmpdir}/crl_${cert_name}.pem" + cfssl gencrl "${tmpdir}/serials_${cert_name}.txt" "${tmpdir}/ca.pem" "${tmpdir}/ca-key.pem" >> "${tmpdir}/crl_${cert_name}.pem" + echo '-----END X509 CRL-----' >> "${tmpdir}/crl_${cert_name}.pem" done cat "${tmpdir}/crl_revoked_1.pem" "${tmpdir}/crl_revoked_2.pem" >"${tmpdir}/crl.pem" diff --git a/test/bin/mock-http.py b/test/bin/mock-http.py index 3f26271b400..d11a4ebf717 100644 --- a/test/bin/mock-http.py +++ b/test/bin/mock-http.py @@ -6,6 +6,7 @@ from http.server import HTTPServer, BaseHTTPRequestHandler + class RequestHandler(BaseHTTPRequestHandler): def do_POST(self): request_path = self.path @@ -18,7 +19,7 @@ def do_POST(self): } print(json.dumps(log)) self.send_response(200) - self.send_header('Content-type','application/json') + self.send_header('Content-type', 'application/json') self.end_headers() self.wfile.write(json.dumps({}).encode()) self.wfile.flush() @@ -27,6 +28,7 @@ def do_POST(self): def log_message(self, format, *args): return + def main(argv): try: port = int(argv[1]) @@ -42,6 +44,6 @@ def main(argv): return 0 -if __name__ == "__main__" : +if __name__ == "__main__": logging.basicConfig(level=logging.INFO) sys.exit(main(sys.argv)) diff --git a/test/bin/wait-for b/test/bin/wait-for index 6c6fdd5ce2b..b226783d44b 100755 --- a/test/bin/wait-for +++ b/test/bin/wait-for @@ -39,7 +39,7 @@ async def monitor(cmd, args, want_out, want_err, timeout): status = None - async def read_stream(p, stream, outstream, pattern): + async def read_stream(stream, outstream, pattern): nonlocal status if stream is None: return @@ -84,8 +84,8 @@ async def monitor(cmd, args, want_out, want_err, timeout): await asyncio.wait_for( asyncio.wait([ asyncio.create_task(process.wait()), - asyncio.create_task(read_stream(process, process.stdout, sys.stdout, out_regex)), - asyncio.create_task(read_stream(process, process.stderr, sys.stderr, err_regex)) + asyncio.create_task(read_stream(process.stdout, sys.stdout, out_regex)), + asyncio.create_task(read_stream(process.stderr, sys.stderr, err_regex)) ]), timeout) if status is None: status = process.returncode diff --git a/test/bin/wait-for-port b/test/bin/wait-for-port index 15408b8e5a0..72f26bf409c 100755 --- a/test/bin/wait-for-port +++ b/test/bin/wait-for-port @@ -54,10 +54,6 @@ def main(argv): if not args.quiet: write_error(ex) sys.exit(1) - else: - sys.exit(0) - - sys.exit(1) if __name__ == "__main__": diff --git a/test/disable-capi b/test/disable-capi index f19bef5314c..b847accae48 100755 --- a/test/disable-capi +++ b/test/disable-capi @@ -5,4 +5,4 @@ THIS_DIR=$(CDPATH= cd -- "$(dirname -- "$0")" && pwd) # shellcheck disable=SC1091 . "${THIS_DIR}/.environment.sh" -yq e 'del(.api.server.online_client)' -i "${CONFIG_YAML}" +yq e 'del(.api.server.online_client)' -i "$CONFIG_YAML" diff --git a/test/enable-capi b/test/enable-capi index ddbf8764c44..59980e6a059 100755 --- a/test/enable-capi +++ b/test/enable-capi @@ -5,7 +5,7 @@ THIS_DIR=$(CDPATH= cd -- "$(dirname -- "$0")" && pwd) # shellcheck disable=SC1091 . "${THIS_DIR}/.environment.sh" -online_api_credentials="$(dirname "${CONFIG_YAML}")/online_api_credentials.yaml" +online_api_credentials="$(dirname "$CONFIG_YAML")/online_api_credentials.yaml" export online_api_credentials -yq e '.api.server.online_client.credentials_path=strenv(online_api_credentials)' -i "${CONFIG_YAML}" +yq e '.api.server.online_client.credentials_path=strenv(online_api_credentials)' -i "$CONFIG_YAML" diff --git a/test/instance-crowdsec b/test/instance-crowdsec index d87145c3881..f0cef729693 100755 --- a/test/instance-crowdsec +++ b/test/instance-crowdsec @@ -2,15 +2,15 @@ #shellcheck disable=SC1007 THIS_DIR=$(CDPATH= cd -- "$(dirname -- "$0")" && pwd) -cd "${THIS_DIR}" || exit 1 +cd "$THIS_DIR" || exit 1 # shellcheck disable=SC1091 . ./.environment.sh backend_script="./lib/init/crowdsec-${INIT_BACKEND}" -if [[ ! -x "${backend_script}" ]]; then +if [[ ! -x "$backend_script" ]]; then echo "unknown init system '${INIT_BACKEND}'" >&2 exit 1 fi -exec "${backend_script}" "$@" +exec "$backend_script" "$@" diff --git a/test/instance-data b/test/instance-data index 02742b4ec85..e4e76d3980a 100755 --- a/test/instance-data +++ b/test/instance-data @@ -2,15 +2,15 @@ #shellcheck disable=SC1007 THIS_DIR=$(CDPATH= cd -- "$(dirname -- "$0")" && pwd) -cd "${THIS_DIR}" || exit 1 +cd "$THIS_DIR" || exit 1 # shellcheck disable=SC1091 . ./.environment.sh backend_script="./lib/config/config-${CONFIG_BACKEND}" -if [[ ! -x "${backend_script}" ]]; then +if [[ ! -x "$backend_script" ]]; then echo "unknown config backend '${CONFIG_BACKEND}'" >&2 exit 1 fi -exec "${backend_script}" "$@" +exec "$backend_script" "$@" diff --git a/test/instance-db b/test/instance-db index fbbc18dc433..de09465bc32 100755 --- a/test/instance-db +++ b/test/instance-db @@ -2,7 +2,7 @@ #shellcheck disable=SC1007 THIS_DIR=$(CDPATH= cd -- "$(dirname -- "$0")" && pwd) -cd "${THIS_DIR}" || exit 1 +cd "$THIS_DIR" || exit 1 # shellcheck disable=SC1091 . ./.environment.sh @@ -10,9 +10,9 @@ cd "${THIS_DIR}" || exit 1 backend_script="./lib/db/instance-${DB_BACKEND}" -if [[ ! -x "${backend_script}" ]]; then +if [[ ! -x "$backend_script" ]]; then echo "unknown database '${DB_BACKEND}'" >&2 exit 1 fi -exec "${backend_script}" "$@" +exec "$backend_script" "$@" diff --git a/test/instance-mock-http b/test/instance-mock-http index cca19b79e3e..b5a56d3489d 100755 --- a/test/instance-mock-http +++ b/test/instance-mock-http @@ -13,7 +13,7 @@ about() { #shellcheck disable=SC1007 THIS_DIR=$(CDPATH= cd -- "$(dirname -- "$0")" && pwd) -cd "${THIS_DIR}" +cd "$THIS_DIR" # shellcheck disable=SC1091 . ./.environment.sh @@ -31,7 +31,7 @@ DAEMON_PID=${PID_DIR}/mock-http.pid start_instance() { [[ $# -lt 1 ]] && about daemonize \ - -p "${DAEMON_PID}" \ + -p "$DAEMON_PID" \ -e "${LOG_DIR}/mock-http.err" \ -o "${LOG_DIR}/mock-http.out" \ /usr/bin/env python3 -u "${THIS_DIR}/bin/mock-http.py" "$1" @@ -40,10 +40,10 @@ start_instance() { } stop_instance() { - if [[ -f "${DAEMON_PID}" ]]; then + if [[ -f "$DAEMON_PID" ]]; then # terminate with extreme prejudice, all the application data will be thrown away anyway - kill -9 "$(cat "${DAEMON_PID}")" > /dev/null 2>&1 - rm -f -- "${DAEMON_PID}" + kill -9 "$(cat "$DAEMON_PID")" > /dev/null 2>&1 + rm -f -- "$DAEMON_PID" fi } diff --git a/test/lib/config/config-local b/test/lib/config/config-local index 693b157f531..76bd4c3fbce 100755 --- a/test/lib/config/config-local +++ b/test/lib/config/config-local @@ -9,7 +9,7 @@ die() { } about() { - die "usage: ${script_name} [make | load | clean]" + die "usage: ${script_name} [make | load | lock | unlock | clean]" } #shellcheck disable=SC1007 @@ -134,7 +134,16 @@ make_init_data() { remove_init_data } +lock_init_data() { + touch "${LOCAL_INIT_DIR}/.lock" +} + +unlock_init_data() { + rm -f "${LOCAL_INIT_DIR}/.lock" +} + load_init_data() { + [[ -f "${LOCAL_INIT_DIR}/.lock" ]] && die "init data is locked" ./bin/assert-crowdsec-not-running || die "Cannot load fixture data." if [[ ! -f "${LOCAL_INIT_DIR}/init-config-data.tar" ]]; then @@ -164,6 +173,12 @@ case "$1" in load) load_init_data ;; + lock) + lock_init_data + ;; + unlock) + unlock_init_data + ;; clean) remove_init_data ;; diff --git a/test/run-tests b/test/run-tests index 21b7a7320c5..6fe3bd004e2 100755 --- a/test/run-tests +++ b/test/run-tests @@ -16,14 +16,16 @@ TEST_DIR=$(CDPATH= cd -- "$(dirname -- "$0")" && pwd) echo "Running tests..." echo "DB_BACKEND: ${DB_BACKEND}" -if [[ -z "${TEST_COVERAGE}" ]]; then +if [[ -z "$TEST_COVERAGE" ]]; then echo "Coverage report: no" else echo "Coverage report: yes" fi +[[ -f "$LOCAL_INIT_DIR/.lock" ]] && die "init data is locked: are you doing some manual test? if so, please finish what you are doing, run 'instance-data unlock' and retry" + dump_backend="$(cat "${LOCAL_INIT_DIR}/.backend")" -if [[ "${DB_BACKEND}" != "${dump_backend}" ]]; then +if [[ "$DB_BACKEND" != "$dump_backend" ]]; then die "Can't run with backend '${DB_BACKEND}' because the test data was build with '${dump_backend}'" fi From b6253d567bbe50e6aa146e148a333c0f2e104e51 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Thu, 16 May 2024 11:13:39 +0200 Subject: [PATCH 134/581] simulation for local scenarios (#3010) --- pkg/cwhub/hub.go | 26 ++++++++++------ pkg/cwhub/leakybucket.go | 53 --------------------------------- pkg/cwhub/sync.go | 5 +++- pkg/leakybucket/manager_load.go | 16 ++++------ test/bats/50_simulation.bats | 13 ++++++++ 5 files changed, 40 insertions(+), 73 deletions(-) delete mode 100644 pkg/cwhub/leakybucket.go diff --git a/pkg/cwhub/hub.go b/pkg/cwhub/hub.go index 87a6644bc72..e7d927f54b1 100644 --- a/pkg/cwhub/hub.go +++ b/pkg/cwhub/hub.go @@ -17,11 +17,12 @@ import ( // Hub is the main structure for the package. type Hub struct { - items HubItems // Items read from HubDir and InstallDir - local *csconfig.LocalHubCfg - remote *RemoteHubCfg - logger *logrus.Logger - Warnings []string // Warnings encountered during sync + items HubItems // Items read from HubDir and InstallDir + pathIndex map[string]*Item + local *csconfig.LocalHubCfg + remote *RemoteHubCfg + logger *logrus.Logger + Warnings []string // Warnings encountered during sync } // GetDataDir returns the data directory, where data sets are installed. @@ -43,9 +44,10 @@ func NewHub(local *csconfig.LocalHubCfg, remote *RemoteHubCfg, updateIndex bool, } hub := &Hub{ - local: local, - remote: remote, - logger: logger, + local: local, + remote: remote, + logger: logger, + pathIndex: make(map[string]*Item, 0), } if updateIndex { @@ -137,7 +139,7 @@ func (h *Hub) ItemStats() []string { } ret := []string{ - fmt.Sprintf("Loaded: %s", loaded), + "Loaded: " + loaded, } if local > 0 || tainted > 0 { @@ -169,6 +171,7 @@ func (h *Hub) addItem(item *Item) { } h.items[item.Type][item.Name] = item + h.pathIndex[item.State.LocalPath] = item } // GetItemMap returns the map of items for a given type. @@ -181,6 +184,11 @@ func (h *Hub) GetItem(itemType string, itemName string) *Item { return h.GetItemMap(itemType)[itemName] } +// GetItemByPath returns an item from hub based on its (absolute) local path. +func (h *Hub) GetItemByPath(itemPath string) *Item { + return h.pathIndex[itemPath] +} + // GetItemFQ returns an item from hub based on its type and name (type:author/name). func (h *Hub) GetItemFQ(itemFQName string) (*Item, error) { // type and name are separated by a colon diff --git a/pkg/cwhub/leakybucket.go b/pkg/cwhub/leakybucket.go deleted file mode 100644 index 8143e9433ee..00000000000 --- a/pkg/cwhub/leakybucket.go +++ /dev/null @@ -1,53 +0,0 @@ -package cwhub - -// Resolve a symlink to find the hub item it points to. -// This file is used only by pkg/leakybucket - -import ( - "fmt" - "os" - "path/filepath" - "strings" -) - -// itemKey extracts the map key of an item (i.e. author/name) from its pathname. Follows a symlink if necessary. -func itemKey(itemPath string) (string, error) { - f, err := os.Lstat(itemPath) - if err != nil { - return "", fmt.Errorf("while performing lstat on %s: %w", itemPath, err) - } - - if f.Mode()&os.ModeSymlink == 0 { - // it's not a symlink, so the filename itsef should be the key - return filepath.Base(itemPath), nil - } - - // resolve the symlink to hub file - pathInHub, err := os.Readlink(itemPath) - if err != nil { - return "", fmt.Errorf("while reading symlink of %s: %w", itemPath, err) - } - - author := filepath.Base(filepath.Dir(pathInHub)) - - fname := filepath.Base(pathInHub) - fname = strings.TrimSuffix(fname, ".yaml") - fname = strings.TrimSuffix(fname, ".yml") - - return fmt.Sprintf("%s/%s", author, fname), nil -} - -// GetItemByPath retrieves an item from the hub index based on its local path. -func (h *Hub) GetItemByPath(itemType string, itemPath string) (*Item, error) { - itemKey, err := itemKey(itemPath) - if err != nil { - return nil, err - } - - item := h.GetItem(itemType, itemKey) - if item == nil { - return nil, fmt.Errorf("%s not found in %s", itemKey, itemType) - } - - return item, nil -} diff --git a/pkg/cwhub/sync.go b/pkg/cwhub/sync.go index 42db255c883..fd5d6b81220 100644 --- a/pkg/cwhub/sync.go +++ b/pkg/cwhub/sync.go @@ -9,9 +9,10 @@ import ( "strings" "github.com/Masterminds/semver/v3" - "github.com/crowdsecurity/go-cs-lib/downloader" "github.com/sirupsen/logrus" "gopkg.in/yaml.v3" + + "github.com/crowdsecurity/go-cs-lib/downloader" ) func isYAMLFileName(path string) bool { @@ -271,6 +272,8 @@ func (h *Hub) itemVisit(path string, f os.DirEntry, err error) error { return err } + h.pathIndex[path] = item + return nil } diff --git a/pkg/leakybucket/manager_load.go b/pkg/leakybucket/manager_load.go index bc259c18319..c94291100a4 100644 --- a/pkg/leakybucket/manager_load.go +++ b/pkg/leakybucket/manager_load.go @@ -253,7 +253,7 @@ func LoadBuckets(cscfg *csconfig.CrowdsecServiceCfg, hub *cwhub.Hub, files []str ok, err := cwversion.Satisfies(bucketFactory.FormatVersion, cwversion.Constraint_scenario) if err != nil { - return nil, nil, fmt.Errorf("failed to check version : %s", err) + return nil, nil, fmt.Errorf("failed to check version: %w", err) } if !ok { @@ -265,20 +265,16 @@ func LoadBuckets(cscfg *csconfig.CrowdsecServiceCfg, hub *cwhub.Hub, files []str bucketFactory.BucketName = seed.Generate() bucketFactory.ret = response - hubItem, err := hub.GetItemByPath(cwhub.SCENARIOS, bucketFactory.Filename) - if err != nil { - log.Errorf("scenario %s (%s) couldn't be find in hub (ignore if in unit tests)", bucketFactory.Name, bucketFactory.Filename) + hubItem := hub.GetItemByPath(bucketFactory.Filename) + if hubItem == nil { + log.Errorf("scenario %s (%s) could not be found in hub (ignore if in unit tests)", bucketFactory.Name, bucketFactory.Filename) } else { if cscfg.SimulationConfig != nil { bucketFactory.Simulated = cscfg.SimulationConfig.IsSimulated(hubItem.Name) } - if hubItem != nil { - bucketFactory.ScenarioVersion = hubItem.State.LocalVersion - bucketFactory.hash = hubItem.State.LocalHash - } else { - log.Errorf("scenario %s (%s) couldn't be find in hub (ignore if in unit tests)", bucketFactory.Name, bucketFactory.Filename) - } + bucketFactory.ScenarioVersion = hubItem.State.LocalVersion + bucketFactory.hash = hubItem.State.LocalHash } bucketFactory.wgDumpState = buckets.wgDumpState diff --git a/test/bats/50_simulation.bats b/test/bats/50_simulation.bats index ab4145551ff..2dc93e62d06 100644 --- a/test/bats/50_simulation.bats +++ b/test/bats/50_simulation.bats @@ -62,6 +62,19 @@ setup() { assert_json '[]' } +@test "simulated local scenario: expect no decision" { + CONFIG_DIR=$(dirname "$CONFIG_YAML") + HUB_DIR=$(config_get '.config_paths.hub_dir') + rune -0 mkdir -p "$CONFIG_DIR"/scenarios + # replace an installed scenario with a local version + rune -0 cp -r "$HUB_DIR"/scenarios/crowdsecurity/ssh-bf.yaml "$CONFIG_DIR"/scenarios/ssh-bf2.yaml + rune -0 cscli scenarios remove crowdsecurity/ssh-bf --force --purge + rune -0 cscli simulation enable crowdsecurity/ssh-bf + fake_log | "$CROWDSEC" -dsn file:///dev/fd/0 -type syslog -no-api + rune -0 cscli decisions list --no-simu -o json + assert_json '[]' +} + @test "global simulation, listing non-simulated: expect no decision" { rune -0 cscli simulation disable crowdsecurity/ssh-bf rune -0 cscli simulation enable --global From 0ba05acc03b6a177fbd14acda6b97216bcbdb13d Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Fri, 17 May 2024 10:07:28 +0200 Subject: [PATCH 135/581] tests: replace "docker-compose" -> "docker compose" (#3000) * replace "docker-compose" -> "docker compose" * obsolete doc note * bats: clean up extra output --- Makefile | 15 ++++++++++----- test/README.md | 7 ------- test/bats/50_simulation.bats | 6 +++--- 3 files changed, 13 insertions(+), 15 deletions(-) diff --git a/Makefile b/Makefile index 3f271c54ca4..9e21abee02a 100644 --- a/Makefile +++ b/Makefile @@ -220,7 +220,7 @@ export AWS_ACCESS_KEY_ID=test export AWS_SECRET_ACCESS_KEY=test testenv: - @echo 'NOTE: You need Docker, docker-compose and run "make localstack" in a separate shell ("make localstack-stop" to terminate it)' + @echo 'NOTE: You need to run "make localstack" in a separate shell, "make localstack-stop" to terminate it' .PHONY: test test: testenv goversion ## Run unit tests with localstack @@ -230,14 +230,19 @@ test: testenv goversion ## Run unit tests with localstack go-acc: testenv goversion ## Run unit tests with localstack + coverage go-acc ./... -o coverage.out --ignore database,notifications,protobufs,cwversion,cstest,models -- $(LD_OPTS) +check_docker: + @if ! docker info > /dev/null 2>&1; then \ + echo "Could not run 'docker info': check that docker is running, and if you need to run this command with sudo."; \ + fi + # mock AWS services .PHONY: localstack -localstack: ## Run localstack containers (required for unit testing) - docker-compose -f test/localstack/docker-compose.yml up +localstack: check_docker ## Run localstack containers (required for unit testing) + docker compose -f test/localstack/docker-compose.yml up .PHONY: localstack-stop -localstack-stop: ## Stop localstack containers - docker-compose -f test/localstack/docker-compose.yml down +localstack-stop: check_docker ## Stop localstack containers + docker compose -f test/localstack/docker-compose.yml down # build vendor.tgz to be distributed with the release .PHONY: vendor diff --git a/test/README.md b/test/README.md index 4ed132b2cfa..f7b036e7905 100644 --- a/test/README.md +++ b/test/README.md @@ -415,10 +415,3 @@ different syntax. Check the heredocs (the </dev/null + cscli parsers install crowdsecurity/syslog-logs --error >/dev/null + cscli parsers install crowdsecurity/dateparse-enrich --error >/dev/null ./instance-crowdsec start } From 1a4ac9d2391c5c53f4038d8fc3cfb2550d126702 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Fri, 17 May 2024 14:26:10 +0200 Subject: [PATCH 136/581] replace log.Fatal with error return (#2996) * log.Fatal -> fmt.Errorf * lint --- pkg/apiserver/papi.go | 10 +++++----- pkg/cache/cache.go | 31 +++++++++++++++++++++---------- pkg/csprofiles/csprofiles.go | 3 ++- pkg/hubtest/hubtest_item.go | 5 +++-- pkg/leakybucket/manager_load.go | 6 +++--- pkg/parser/node.go | 2 +- 6 files changed, 35 insertions(+), 22 deletions(-) diff --git a/pkg/apiserver/papi.go b/pkg/apiserver/papi.go index 8dbd1bb9641..169f1441df4 100644 --- a/pkg/apiserver/papi.go +++ b/pkg/apiserver/papi.go @@ -130,7 +130,7 @@ func (p *Papi) handleEvent(event longpollclient.Event, sync bool) error { message := &Message{} if err := json.Unmarshal([]byte(event.Data), message); err != nil { - return fmt.Errorf("polling papi message format is not compatible: %+v: %s", event.Data, err) + return fmt.Errorf("polling papi message format is not compatible: %+v: %w", event.Data, err) } if message.Header == nil { @@ -161,12 +161,12 @@ func (p *Papi) GetPermissions() (PapiPermCheckSuccess, error) { req, err := http.NewRequest(http.MethodGet, papiCheckUrl, nil) if err != nil { - return PapiPermCheckSuccess{}, fmt.Errorf("failed to create request : %s", err) + return PapiPermCheckSuccess{}, fmt.Errorf("failed to create request: %w", err) } resp, err := httpClient.Do(req) if err != nil { - log.Fatalf("failed to get response : %s", err) + return PapiPermCheckSuccess{}, fmt.Errorf("failed to get response: %w", err) } defer resp.Body.Close() @@ -176,7 +176,7 @@ func (p *Papi) GetPermissions() (PapiPermCheckSuccess, error) { err = json.NewDecoder(resp.Body).Decode(&errResp) if err != nil { - return PapiPermCheckSuccess{}, fmt.Errorf("failed to decode response : %s", err) + return PapiPermCheckSuccess{}, fmt.Errorf("failed to decode response: %w", err) } return PapiPermCheckSuccess{}, fmt.Errorf("unable to query PAPI : %s (%d)", errResp.Error, resp.StatusCode) @@ -186,7 +186,7 @@ func (p *Papi) GetPermissions() (PapiPermCheckSuccess, error) { err = json.NewDecoder(resp.Body).Decode(&respBody) if err != nil { - return PapiPermCheckSuccess{}, fmt.Errorf("failed to decode response : %s", err) + return PapiPermCheckSuccess{}, fmt.Errorf("failed to decode response: %w", err) } return respBody, nil diff --git a/pkg/cache/cache.go b/pkg/cache/cache.go index 1fd65dc38c3..5b0dcbdc251 100644 --- a/pkg/cache/cache.go +++ b/pkg/cache/cache.go @@ -2,6 +2,7 @@ package cache import ( "errors" + "fmt" "time" "github.com/bluele/gcache" @@ -11,9 +12,11 @@ import ( "github.com/crowdsecurity/crowdsec/pkg/types" ) -var Caches []gcache.Cache -var CacheNames []string -var CacheConfig []CacheCfg +var ( + Caches []gcache.Cache + CacheNames []string + CacheConfig []CacheCfg +) /*prometheus*/ var CacheMetrics = prometheus.NewGaugeVec( @@ -27,6 +30,7 @@ var CacheMetrics = prometheus.NewGaugeVec( // UpdateCacheMetrics is called directly by the prom handler func UpdateCacheMetrics() { CacheMetrics.Reset() + for i, name := range CacheNames { CacheMetrics.With(prometheus.Labels{"name": name, "type": CacheConfig[i].Strategy}).Set(float64(Caches[i].Len(false))) } @@ -42,27 +46,30 @@ type CacheCfg struct { } func CacheInit(cfg CacheCfg) error { - for _, name := range CacheNames { if name == cfg.Name { log.Infof("Cache %s already exists", cfg.Name) } } - //get a default logger + // get a default logger if cfg.LogLevel == nil { cfg.LogLevel = new(log.Level) *cfg.LogLevel = log.InfoLevel } - var clog = log.New() + + clog := log.New() + if err := types.ConfigureLogger(clog); err != nil { - log.Fatalf("While creating cache logger : %s", err) + return fmt.Errorf("while creating cache logger: %w", err) } + clog.SetLevel(*cfg.LogLevel) cfg.Logger = clog.WithFields(log.Fields{ "cache": cfg.Name, }) tmpCache := gcache.New(cfg.Size) + switch cfg.Strategy { case "LRU": tmpCache = tmpCache.LRU() @@ -73,7 +80,6 @@ func CacheInit(cfg CacheCfg) error { default: cfg.Strategy = "LRU" tmpCache = tmpCache.LRU() - } CTICache := tmpCache.Build() @@ -85,18 +91,20 @@ func CacheInit(cfg CacheCfg) error { } func SetKey(cacheName string, key string, value string, expiration *time.Duration) error { - for i, name := range CacheNames { if name == cacheName { if expiration == nil { expiration = &CacheConfig[i].TTL } + CacheConfig[i].Logger.Debugf("Setting key %s to %s with expiration %v", key, value, *expiration) + if err := Caches[i].SetWithExpire(key, value, *expiration); err != nil { CacheConfig[i].Logger.Warningf("While setting key %s in cache %s: %s", key, cacheName, err) } } } + return nil } @@ -104,17 +112,20 @@ func GetKey(cacheName string, key string) (string, error) { for i, name := range CacheNames { if name == cacheName { if value, err := Caches[i].Get(key); err != nil { - //do not warn or log if key not found + // do not warn or log if key not found if errors.Is(err, gcache.KeyNotFoundError) { return "", nil } CacheConfig[i].Logger.Warningf("While getting key %s in cache %s: %s", key, cacheName, err) + return "", err } else { return value.(string), nil } } } + log.Warningf("Cache %s not found", cacheName) + return "", nil } diff --git a/pkg/csprofiles/csprofiles.go b/pkg/csprofiles/csprofiles.go index 95fbb356f3d..42509eaceae 100644 --- a/pkg/csprofiles/csprofiles.go +++ b/pkg/csprofiles/csprofiles.go @@ -35,7 +35,7 @@ func NewProfile(profilesCfg []*csconfig.ProfileCfg) ([]*Runtime, error) { xlog := log.New() if err := types.ConfigureLogger(xlog); err != nil { - log.Fatalf("While creating profiles-specific logger : %s", err) + return nil, fmt.Errorf("while configuring profiles-specific logger: %w", err) } xlog.SetLevel(log.InfoLevel) @@ -196,6 +196,7 @@ func (Profile *Runtime) EvaluateProfile(Alert *models.Alert) ([]*models.Decision decisions = append(decisions, subdecisions...) } else { Profile.Logger.Debugf("Profile %s filter is unsuccessful", Profile.Cfg.Name) + if Profile.Cfg.OnFailure == "break" { break } diff --git a/pkg/hubtest/hubtest_item.go b/pkg/hubtest/hubtest_item.go index 1a2b4863105..918a10f62e1 100644 --- a/pkg/hubtest/hubtest_item.go +++ b/pkg/hubtest/hubtest_item.go @@ -212,7 +212,7 @@ func (t *HubTestItem) InstallHub() error { // load installed hub hub, err := cwhub.NewHub(t.RuntimeHubConfig, nil, false, nil) if err != nil { - log.Fatal(err) + return err } // install data for parsers if needed @@ -327,7 +327,8 @@ func (t *HubTestItem) RunWithNucleiTemplate() error { nucleiConfig := NucleiConfig{ Path: "nuclei", OutputDir: t.RuntimePath, - CmdLineOptions: []string{"-ev", // allow variables from environment + CmdLineOptions: []string{ + "-ev", // allow variables from environment "-nc", // no colors in output "-dresp", // dump response "-j", // json output diff --git a/pkg/leakybucket/manager_load.go b/pkg/leakybucket/manager_load.go index c94291100a4..3e47f1fc16f 100644 --- a/pkg/leakybucket/manager_load.go +++ b/pkg/leakybucket/manager_load.go @@ -308,7 +308,7 @@ func LoadBucket(bucketFactory *BucketFactory, tomb *tomb.Tomb) error { if bucketFactory.Debug { clog := log.New() if err := types.ConfigureLogger(clog); err != nil { - log.Fatalf("While creating bucket-specific logger : %s", err) + return fmt.Errorf("while creating bucket-specific logger: %w", err) } clog.SetLevel(log.DebugLevel) @@ -462,7 +462,7 @@ func LoadBucketsState(file string, buckets *Buckets, bucketFactories []BucketFac val, ok := buckets.Bucket_map.Load(k) if ok { - log.Fatalf("key %s already exists : %+v", k, val) + return fmt.Errorf("key %s already exists: %+v", k, val) } // find back our holder found := false @@ -502,7 +502,7 @@ func LoadBucketsState(file string, buckets *Buckets, bucketFactories []BucketFac } if !found { - log.Fatalf("Unable to find holder for bucket %s : %s", k, spew.Sdump(v)) + return fmt.Errorf("unable to find holder for bucket %s: %s", k, spew.Sdump(v)) } } diff --git a/pkg/parser/node.go b/pkg/parser/node.go index 0906f0b5143..244f361d6b8 100644 --- a/pkg/parser/node.go +++ b/pkg/parser/node.go @@ -447,7 +447,7 @@ func (n *Node) compile(pctx *UnixParserCtx, ectx EnricherCtx) error { if n.Debug { clog := log.New() if err = types.ConfigureLogger(clog); err != nil { - log.Fatalf("While creating bucket-specific logger : %s", err) + return fmt.Errorf("while creating bucket-specific logger: %w", err) } clog.SetLevel(log.DebugLevel) From 20e44cd18ab0ec408108b83616303ff64218da34 Mon Sep 17 00:00:00 2001 From: blotus Date: Fri, 17 May 2024 16:16:04 +0200 Subject: [PATCH 137/581] appsec: add files and cookies related zones (#2956) --- .../modules/appsec/appsec_rules_test.go | 128 ++++++++++++++++++ pkg/appsec/appsec_rule/modsecurity.go | 29 ++-- 2 files changed, 145 insertions(+), 12 deletions(-) diff --git a/pkg/acquisition/modules/appsec/appsec_rules_test.go b/pkg/acquisition/modules/appsec/appsec_rules_test.go index 3c48c50fabb..b25e4465f0f 100644 --- a/pkg/acquisition/modules/appsec/appsec_rules_test.go +++ b/pkg/acquisition/modules/appsec/appsec_rules_test.go @@ -230,6 +230,134 @@ func TestAppsecRuleMatches(t *testing.T) { require.Equal(t, appsec.AllowRemediation, appsecResponse.Action) }, }, + { + name: "Basic matching in cookies", + expected_load_ok: true, + inband_rules: []appsec_rule.CustomRule{ + { + Name: "rule1", + Zones: []string{"COOKIES"}, + Variables: []string{"foo"}, + Match: appsec_rule.Match{Type: "regex", Value: "^toto"}, + Transform: []string{"lowercase"}, + }, + }, + input_request: appsec.ParsedRequest{ + RemoteAddr: "1.2.3.4", + Method: "GET", + URI: "/urllll", + Headers: http.Header{"Cookie": []string{"foo=toto"}}, + }, + output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { + require.Len(t, events, 2) + require.Equal(t, types.APPSEC, events[0].Type) + + require.Equal(t, types.LOG, events[1].Type) + require.True(t, events[1].Appsec.HasInBandMatches) + require.Len(t, events[1].Appsec.MatchedRules, 1) + require.Equal(t, "rule1", events[1].Appsec.MatchedRules[0]["msg"]) + + require.Len(t, responses, 1) + require.True(t, responses[0].InBandInterrupt) + }, + }, + { + name: "Basic matching in all cookies", + expected_load_ok: true, + inband_rules: []appsec_rule.CustomRule{ + { + Name: "rule1", + Zones: []string{"COOKIES"}, + Match: appsec_rule.Match{Type: "regex", Value: "^tutu"}, + Transform: []string{"lowercase"}, + }, + }, + input_request: appsec.ParsedRequest{ + RemoteAddr: "1.2.3.4", + Method: "GET", + URI: "/urllll", + Headers: http.Header{"Cookie": []string{"foo=toto; bar=tutu"}}, + }, + output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { + require.Len(t, events, 2) + require.Equal(t, types.APPSEC, events[0].Type) + + require.Equal(t, types.LOG, events[1].Type) + require.True(t, events[1].Appsec.HasInBandMatches) + require.Len(t, events[1].Appsec.MatchedRules, 1) + require.Equal(t, "rule1", events[1].Appsec.MatchedRules[0]["msg"]) + + require.Len(t, responses, 1) + require.True(t, responses[0].InBandInterrupt) + }, + }, + { + name: "Basic matching in cookie name", + expected_load_ok: true, + inband_rules: []appsec_rule.CustomRule{ + { + Name: "rule1", + Zones: []string{"COOKIES_NAMES"}, + Match: appsec_rule.Match{Type: "regex", Value: "^tutu"}, + Transform: []string{"lowercase"}, + }, + }, + input_request: appsec.ParsedRequest{ + RemoteAddr: "1.2.3.4", + Method: "GET", + URI: "/urllll", + Headers: http.Header{"Cookie": []string{"bar=tutu; tututata=toto"}}, + }, + output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { + require.Len(t, events, 2) + require.Equal(t, types.APPSEC, events[0].Type) + + require.Equal(t, types.LOG, events[1].Type) + require.True(t, events[1].Appsec.HasInBandMatches) + require.Len(t, events[1].Appsec.MatchedRules, 1) + require.Equal(t, "rule1", events[1].Appsec.MatchedRules[0]["msg"]) + + require.Len(t, responses, 1) + require.True(t, responses[0].InBandInterrupt) + }, + }, + { + name: "Basic matching in multipart file name", + expected_load_ok: true, + inband_rules: []appsec_rule.CustomRule{ + { + Name: "rule1", + Zones: []string{"FILES"}, + Match: appsec_rule.Match{Type: "regex", Value: "\\.php$"}, + Transform: []string{"lowercase"}, + }, + }, + input_request: appsec.ParsedRequest{ + RemoteAddr: "1.2.3.4", + Method: "GET", + URI: "/urllll", + Headers: http.Header{"Content-Type": []string{"multipart/form-data; boundary=boundary"}}, + Body: []byte(` +--boundary +Content-Disposition: form-data; name="foo"; filename="bar.php" +Content-Type: application/octet-stream + +toto +--boundary--`), + }, + output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { + require.Len(t, events, 2) + require.Equal(t, types.APPSEC, events[0].Type) + + require.Equal(t, types.LOG, events[1].Type) + require.True(t, events[1].Appsec.HasInBandMatches) + require.Len(t, events[1].Appsec.MatchedRules, 1) + require.Equal(t, "rule1", events[1].Appsec.MatchedRules[0]["msg"]) + + require.Len(t, responses, 1) + require.True(t, responses[0].InBandInterrupt) + }, + }, } for _, test := range tests { diff --git a/pkg/appsec/appsec_rule/modsecurity.go b/pkg/appsec/appsec_rule/modsecurity.go index a269384ccb9..03a840cf436 100644 --- a/pkg/appsec/appsec_rule/modsecurity.go +++ b/pkg/appsec/appsec_rule/modsecurity.go @@ -11,18 +11,23 @@ type ModsecurityRule struct { } var zonesMap map[string]string = map[string]string{ - "ARGS": "ARGS_GET", - "ARGS_NAMES": "ARGS_GET_NAMES", - "BODY_ARGS": "ARGS_POST", - "BODY_ARGS_NAMES": "ARGS_POST_NAMES", - "HEADERS_NAMES": "REQUEST_HEADERS_NAMES", - "HEADERS": "REQUEST_HEADERS", - "METHOD": "REQUEST_METHOD", - "PROTOCOL": "REQUEST_PROTOCOL", - "URI": "REQUEST_FILENAME", - "URI_FULL": "REQUEST_URI", - "RAW_BODY": "REQUEST_BODY", - "FILENAMES": "FILES", + "ARGS": "ARGS_GET", + "ARGS_NAMES": "ARGS_GET_NAMES", + "BODY_ARGS": "ARGS_POST", + "BODY_ARGS_NAMES": "ARGS_POST_NAMES", + "COOKIES": "REQUEST_COOKIES", + "COOKIES_NAMES": "REQUEST_COOKIES_NAMES", + "FILES": "FILES", + "FILES_NAMES": "FILES_NAMES", + "FILES_TOTAL_SIZE": "FILES_COMBINED_SIZE", + "HEADERS_NAMES": "REQUEST_HEADERS_NAMES", + "HEADERS": "REQUEST_HEADERS", + "METHOD": "REQUEST_METHOD", + "PROTOCOL": "REQUEST_PROTOCOL", + "URI": "REQUEST_FILENAME", + "URI_FULL": "REQUEST_URI", + "RAW_BODY": "REQUEST_BODY", + "FILENAMES": "FILES", } var transformMap map[string]string = map[string]string{ From 572b387fce5ad9f11e56e6e104a576e1b612bc57 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Fri, 24 May 2024 10:20:47 +0200 Subject: [PATCH 138/581] CI: improve code coverage report (#3029) --- .github/codecov-ignore-generated.sh | 11 ++ .github/codecov.yml | 129 +++++++++++++++++++++ .github/workflows/bats-sqlite-coverage.yml | 5 +- .github/workflows/bats.yml | 2 + 4 files changed, 146 insertions(+), 1 deletion(-) create mode 100755 .github/codecov-ignore-generated.sh diff --git a/.github/codecov-ignore-generated.sh b/.github/codecov-ignore-generated.sh new file mode 100755 index 00000000000..3c896d47be7 --- /dev/null +++ b/.github/codecov-ignore-generated.sh @@ -0,0 +1,11 @@ +#!/bin/bash + +# Run this from the repository root: +# +# .github/codecov-ignore-generated.sh >> .github/codecov.yml + +find . -name "*.go" | while read -r file; do + if head -n 1 "$file" | grep -q "Code generated by"; then + echo " - \"$file\"" + fi +done diff --git a/.github/codecov.yml b/.github/codecov.yml index 5f721427d7a..82598c15511 100644 --- a/.github/codecov.yml +++ b/.github/codecov.yml @@ -8,3 +8,132 @@ coverage: project: default: target: 0% + +# if a directory is ignored, there is no way to un-ignore files like pkg/models/helpers.go +# so we make a full list, manually updated - but it could be generated right before running codecov +ignore: + - "./pkg/modelscapi/success_response.go" + - "./pkg/modelscapi/get_decisions_stream_response_deleted.go" + - "./pkg/modelscapi/login_request.go" + - "./pkg/modelscapi/get_decisions_stream_response_links.go" + - "./pkg/modelscapi/login_response.go" + - "./pkg/modelscapi/add_signals_request_item.go" + - "./pkg/modelscapi/blocklist_link.go" + - "./pkg/modelscapi/get_decisions_stream_response_deleted_item.go" + - "./pkg/modelscapi/decisions_sync_request.go" + - "./pkg/modelscapi/get_decisions_stream_response.go" + - "./pkg/modelscapi/metrics_request_machines_item.go" + - "./pkg/modelscapi/metrics_request.go" + - "./pkg/modelscapi/get_decisions_stream_response_new.go" + - "./pkg/modelscapi/add_signals_request_item_decisions_item.go" + - "./pkg/modelscapi/metrics_request_bouncers_item.go" + - "./pkg/modelscapi/decisions_sync_request_item_decisions_item.go" + - "./pkg/modelscapi/decisions_delete_request_item.go" + - "./pkg/modelscapi/get_decisions_stream_response_new_item.go" + - "./pkg/modelscapi/decisions_sync_request_item.go" + - "./pkg/modelscapi/add_signals_request.go" + - "./pkg/modelscapi/reset_password_request.go" + - "./pkg/modelscapi/add_signals_request_item_decisions.go" + - "./pkg/modelscapi/decisions_sync_request_item_source.go" + - "./pkg/modelscapi/error_response.go" + - "./pkg/modelscapi/decisions_delete_request.go" + - "./pkg/modelscapi/decisions_sync_request_item_decisions.go" + - "./pkg/modelscapi/enroll_request.go" + - "./pkg/modelscapi/register_request.go" + - "./pkg/modelscapi/add_signals_request_item_source.go" + - "./pkg/models/alert.go" + - "./pkg/models/metrics_bouncer_info.go" + - "./pkg/models/add_signals_request_item.go" + - "./pkg/models/add_signals_request_item_decisions_item.go" + - "./pkg/models/get_alerts_response.go" + - "./pkg/models/watcher_auth_request.go" + - "./pkg/models/add_alerts_request.go" + - "./pkg/models/event.go" + - "./pkg/models/decisions_delete_request_item.go" + - "./pkg/models/meta.go" + - "./pkg/models/delete_alerts_response.go" + - "./pkg/models/topx_response.go" + - "./pkg/models/add_signals_request.go" + - "./pkg/models/delete_decision_response.go" + - "./pkg/models/get_decisions_response.go" + - "./pkg/models/add_signals_request_item_decisions.go" + - "./pkg/models/source.go" + - "./pkg/models/decisions_stream_response.go" + - "./pkg/models/error_response.go" + - "./pkg/models/decision.go" + - "./pkg/models/decisions_delete_request.go" + - "./pkg/models/flush_decision_response.go" + - "./pkg/models/watcher_auth_response.go" + - "./pkg/models/watcher_registration_request.go" + - "./pkg/models/metrics_agent_info.go" + - "./pkg/models/add_signals_request_item_source.go" + - "./pkg/models/add_alerts_response.go" + - "./pkg/models/metrics.go" + - "./pkg/protobufs/notifier.pb.go" + - "./pkg/database/ent/machine_delete.go" + - "./pkg/database/ent/decision_query.go" + - "./pkg/database/ent/meta_query.go" + - "./pkg/database/ent/machine_create.go" + - "./pkg/database/ent/alert.go" + - "./pkg/database/ent/event_update.go" + - "./pkg/database/ent/alert_create.go" + - "./pkg/database/ent/alert_query.go" + - "./pkg/database/ent/lock_create.go" + - "./pkg/database/ent/bouncer_update.go" + - "./pkg/database/ent/meta_update.go" + - "./pkg/database/ent/decision_create.go" + - "./pkg/database/ent/configitem_update.go" + - "./pkg/database/ent/machine_query.go" + - "./pkg/database/ent/client.go" + - "./pkg/database/ent/predicate/predicate.go" + - "./pkg/database/ent/lock/where.go" + - "./pkg/database/ent/lock/lock.go" + - "./pkg/database/ent/mutation.go" + - "./pkg/database/ent/migrate/migrate.go" + - "./pkg/database/ent/migrate/schema.go" + - "./pkg/database/ent/configitem.go" + - "./pkg/database/ent/event.go" + - "./pkg/database/ent/event_query.go" + - "./pkg/database/ent/lock_update.go" + - "./pkg/database/ent/meta.go" + - "./pkg/database/ent/configitem_query.go" + - "./pkg/database/ent/bouncer.go" + - "./pkg/database/ent/alert_update.go" + - "./pkg/database/ent/meta/meta.go" + - "./pkg/database/ent/meta/where.go" + - "./pkg/database/ent/decision_update.go" + - "./pkg/database/ent/alert_delete.go" + - "./pkg/database/ent/lock.go" + - "./pkg/database/ent/runtime/runtime.go" + - "./pkg/database/ent/alert/alert.go" + - "./pkg/database/ent/alert/where.go" + - "./pkg/database/ent/runtime.go" + - "./pkg/database/ent/bouncer/bouncer.go" + - "./pkg/database/ent/bouncer/where.go" + - "./pkg/database/ent/hook/hook.go" + - "./pkg/database/ent/configitem_create.go" + - "./pkg/database/ent/configitem_delete.go" + - "./pkg/database/ent/tx.go" + - "./pkg/database/ent/decision.go" + - "./pkg/database/ent/lock_delete.go" + - "./pkg/database/ent/decision_delete.go" + - "./pkg/database/ent/machine/where.go" + - "./pkg/database/ent/machine/machine.go" + - "./pkg/database/ent/event_create.go" + - "./pkg/database/ent/decision/where.go" + - "./pkg/database/ent/decision/decision.go" + - "./pkg/database/ent/enttest/enttest.go" + - "./pkg/database/ent/lock_query.go" + - "./pkg/database/ent/bouncer_create.go" + - "./pkg/database/ent/event_delete.go" + - "./pkg/database/ent/bouncer_delete.go" + - "./pkg/database/ent/event/event.go" + - "./pkg/database/ent/event/where.go" + - "./pkg/database/ent/machine.go" + - "./pkg/database/ent/ent.go" + - "./pkg/database/ent/meta_create.go" + - "./pkg/database/ent/bouncer_query.go" + - "./pkg/database/ent/meta_delete.go" + - "./pkg/database/ent/machine_update.go" + - "./pkg/database/ent/configitem/configitem.go" + - "./pkg/database/ent/configitem/where.go" diff --git a/.github/workflows/bats-sqlite-coverage.yml b/.github/workflows/bats-sqlite-coverage.yml index 4f724656ed6..0d9906d11f0 100644 --- a/.github/workflows/bats-sqlite-coverage.yml +++ b/.github/workflows/bats-sqlite-coverage.yml @@ -2,6 +2,9 @@ name: (sub) Bats / sqlite + coverage on: workflow_call: + secrets: + CODECOV_TOKEN: + required: true env: TEST_COVERAGE: true @@ -76,7 +79,7 @@ jobs: run: for file in $(find ./test/local/var/log -type f); do echo ">>>>> $file"; cat $file; echo; done if: ${{ always() }} - - name: Upload crowdsec coverage to codecov + - name: Upload bats coverage to codecov uses: codecov/codecov-action@v4 with: files: ./coverage-bats.out diff --git a/.github/workflows/bats.yml b/.github/workflows/bats.yml index 0ce8cf041ed..59976bad87d 100644 --- a/.github/workflows/bats.yml +++ b/.github/workflows/bats.yml @@ -28,6 +28,8 @@ on: jobs: sqlite: uses: ./.github/workflows/bats-sqlite-coverage.yml + secrets: + CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }} # Jobs for Postgres (and sometimes MySQL) can have failing tests on GitHub # CI, but they pass when run on devs' machines or in the release checks. We From 816608daf42d9de8d6447f823599302fb45f328d Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Fri, 24 May 2024 11:17:52 +0200 Subject: [PATCH 139/581] cscli: faster table rendering (#3027) * cscli: use go-pretty for tables * lint --- cmd/crowdsec-cli/bouncers_table.go | 3 +- cmd/crowdsec-cli/console_table.go | 3 +- cmd/crowdsec-cli/hubtest_table.go | 3 +- cmd/crowdsec-cli/machines_table.go | 3 +- cmd/crowdsec-cli/metrics_table.go | 9 +- cmd/crowdsec-cli/notifications_table.go | 3 +- cmd/crowdsec-cli/prettytable.go | 147 ++++++++++++++++++++++++ cmd/crowdsec-cli/table/align.go | 12 ++ cmd/crowdsec-cli/tables.go | 77 ++----------- cmd/crowdsec-cli/utils_table.go | 3 +- go.mod | 4 +- go.sum | 8 +- 12 files changed, 183 insertions(+), 92 deletions(-) create mode 100644 cmd/crowdsec-cli/prettytable.go create mode 100644 cmd/crowdsec-cli/table/align.go diff --git a/cmd/crowdsec-cli/bouncers_table.go b/cmd/crowdsec-cli/bouncers_table.go index 5fe48b49047..417eb9e8e0b 100644 --- a/cmd/crowdsec-cli/bouncers_table.go +++ b/cmd/crowdsec-cli/bouncers_table.go @@ -4,8 +4,7 @@ import ( "io" "time" - "github.com/aquasecurity/table" - + "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/table" "github.com/crowdsecurity/crowdsec/pkg/database/ent" "github.com/crowdsecurity/crowdsec/pkg/emoji" ) diff --git a/cmd/crowdsec-cli/console_table.go b/cmd/crowdsec-cli/console_table.go index 8f7ebb2100c..4623f3bb62a 100644 --- a/cmd/crowdsec-cli/console_table.go +++ b/cmd/crowdsec-cli/console_table.go @@ -3,8 +3,7 @@ package main import ( "io" - "github.com/aquasecurity/table" - + "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/table" "github.com/crowdsecurity/crowdsec/pkg/csconfig" "github.com/crowdsecurity/crowdsec/pkg/emoji" ) diff --git a/cmd/crowdsec-cli/hubtest_table.go b/cmd/crowdsec-cli/hubtest_table.go index e6c5ee80abd..1b76e75263e 100644 --- a/cmd/crowdsec-cli/hubtest_table.go +++ b/cmd/crowdsec-cli/hubtest_table.go @@ -4,8 +4,7 @@ import ( "fmt" "io" - "github.com/aquasecurity/table" - + "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/table" "github.com/crowdsecurity/crowdsec/pkg/emoji" "github.com/crowdsecurity/crowdsec/pkg/hubtest" ) diff --git a/cmd/crowdsec-cli/machines_table.go b/cmd/crowdsec-cli/machines_table.go index 120929ea654..18e16bbde3a 100644 --- a/cmd/crowdsec-cli/machines_table.go +++ b/cmd/crowdsec-cli/machines_table.go @@ -4,8 +4,7 @@ import ( "io" "time" - "github.com/aquasecurity/table" - + "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/table" "github.com/crowdsecurity/crowdsec/pkg/database/ent" "github.com/crowdsecurity/crowdsec/pkg/emoji" ) diff --git a/cmd/crowdsec-cli/metrics_table.go b/cmd/crowdsec-cli/metrics_table.go index 689929500ad..f42d5bdaf91 100644 --- a/cmd/crowdsec-cli/metrics_table.go +++ b/cmd/crowdsec-cli/metrics_table.go @@ -7,16 +7,17 @@ import ( "sort" "strconv" - "github.com/aquasecurity/table" log "github.com/sirupsen/logrus" "github.com/crowdsecurity/go-cs-lib/maptools" + + "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/table" ) // ErrNilTable means a nil pointer was passed instead of a table instance. This is a programming error. var ErrNilTable = errors.New("nil table") -func lapiMetricsToTable(t *table.Table, stats map[string]map[string]map[string]int) int { +func lapiMetricsToTable(t *Table, stats map[string]map[string]map[string]int) int { // stats: machine -> route -> method -> count // sort keys to keep consistent order when printing machineKeys := []string{} @@ -54,7 +55,7 @@ func lapiMetricsToTable(t *table.Table, stats map[string]map[string]map[string]i return numRows } -func wlMetricsToTable(t *table.Table, stats map[string]map[string]map[string]int, noUnit bool) (int, error) { +func wlMetricsToTable(t *Table, stats map[string]map[string]map[string]int, noUnit bool) (int, error) { if t == nil { return 0, ErrNilTable } @@ -92,7 +93,7 @@ func wlMetricsToTable(t *table.Table, stats map[string]map[string]map[string]int return numRows, nil } -func metricsToTable(t *table.Table, stats map[string]map[string]int, keys []string, noUnit bool) (int, error) { +func metricsToTable(t *Table, stats map[string]map[string]int, keys []string, noUnit bool) (int, error) { if t == nil { return 0, ErrNilTable } diff --git a/cmd/crowdsec-cli/notifications_table.go b/cmd/crowdsec-cli/notifications_table.go index 19d11cea741..b96c8ca4783 100644 --- a/cmd/crowdsec-cli/notifications_table.go +++ b/cmd/crowdsec-cli/notifications_table.go @@ -5,8 +5,7 @@ import ( "sort" "strings" - "github.com/aquasecurity/table" - + "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/table" "github.com/crowdsecurity/crowdsec/pkg/emoji" ) diff --git a/cmd/crowdsec-cli/prettytable.go b/cmd/crowdsec-cli/prettytable.go new file mode 100644 index 00000000000..0ce7e9755f9 --- /dev/null +++ b/cmd/crowdsec-cli/prettytable.go @@ -0,0 +1,147 @@ +package main + +// transisional file to keep (minimal) backwards compatibility with the old table +// we can migrate the code to the new dependency later, it can already use the Writer interface + +import ( + "fmt" + "io" + + "github.com/jedib0t/go-pretty/v6/table" + "github.com/jedib0t/go-pretty/v6/text" +) + +type Table struct { + Writer table.Writer + output io.Writer + align []text.Align + alignHeader []text.Align +} + +func newTable(out io.Writer) *Table { + if out == nil { + panic("newTable: out is nil") + } + + t := table.NewWriter() + + // colorize output, use unicode box characters + fancy := shouldWeColorize() + + color := table.ColorOptions{} + + if fancy { + color.Header = text.Colors{text.Italic} + color.Border = text.Colors{text.FgHiBlack} + color.Separator = text.Colors{text.FgHiBlack} + } + + // no upper/lower case transformations + format := table.FormatOptions{} + + box := table.StyleBoxDefault + if fancy { + box = table.StyleBoxRounded + } + + style := table.Style{ + Box: box, + Color: color, + Format: format, + HTML: table.DefaultHTMLOptions, + Options: table.OptionsDefault, + Title: table.TitleOptionsDefault, + } + + t.SetStyle(style) + + return &Table{ + Writer: t, + output: out, + align: make([]text.Align, 0), + alignHeader: make([]text.Align, 0), + } +} + +func newLightTable(output io.Writer) *Table { + t := newTable(output) + s := t.Writer.Style() + s.Box.Left = "" + s.Box.LeftSeparator = "" + s.Box.TopLeft = "" + s.Box.BottomLeft = "" + s.Box.Right = "" + s.Box.RightSeparator = "" + s.Box.TopRight = "" + s.Box.BottomRight = "" + s.Options.SeparateRows = false + s.Options.SeparateFooter = false + s.Options.SeparateHeader = true + s.Options.SeparateColumns = false + + return t +} + +// +// wrapper methods for backwards compatibility +// + +// setColumnConfigs must be called right before rendering, +// to allow for setting the alignment like the old API +func (t *Table) setColumnConfigs() { + configs := []table.ColumnConfig{} + // the go-pretty table does not expose the names or number of columns + for i := 0; i < len(t.align); i++ { + configs = append(configs, table.ColumnConfig{ + Number: i + 1, + AlignHeader: t.alignHeader[i], + Align: t.align[i], + WidthMax: 60, + WidthMaxEnforcer: text.WrapSoft, + }) + } + t.Writer.SetColumnConfigs(configs) +} + +func (t *Table) Render() { + // change default options for backwards compatibility. + // we do this late to allow changing the alignment like the old API + t.setColumnConfigs() + fmt.Fprintln(t.output, t.Writer.Render()) +} + +func (t *Table) SetHeaders(str ...string) { + row := table.Row{} + t.align = make([]text.Align, len(str)) + t.alignHeader = make([]text.Align, len(str)) + + for i, v := range str { + row = append(row, v) + t.align[i] = text.AlignLeft + t.alignHeader[i] = text.AlignCenter + } + + t.Writer.AppendHeader(row) +} + +func (t *Table) AddRow(str ...string) { + row := table.Row{} + for _, v := range str { + row = append(row, v) + } + + t.Writer.AppendRow(row) +} + +func (t *Table) SetRowLines(rowLines bool) { + t.Writer.Style().Options.SeparateRows = rowLines +} + +func (t *Table) SetAlignment(align ...text.Align) { + // align can be shorter than t.align, it will leave the default value + copy(t.align, align) +} + +func (t *Table) SetHeaderAlignment(align ...text.Align) { + copy(t.alignHeader, align) +} diff --git a/cmd/crowdsec-cli/table/align.go b/cmd/crowdsec-cli/table/align.go new file mode 100644 index 00000000000..e0582007c57 --- /dev/null +++ b/cmd/crowdsec-cli/table/align.go @@ -0,0 +1,12 @@ +package table + +import ( + "github.com/jedib0t/go-pretty/v6/text" +) + +// temporary, backward compatibility only + +const ( + AlignLeft = text.AlignLeft + AlignRight = text.AlignRight +) diff --git a/cmd/crowdsec-cli/tables.go b/cmd/crowdsec-cli/tables.go index 2c3173d0b0b..e6dba0c2644 100644 --- a/cmd/crowdsec-cli/tables.go +++ b/cmd/crowdsec-cli/tables.go @@ -5,91 +5,28 @@ import ( "io" "os" - "github.com/aquasecurity/table" isatty "github.com/mattn/go-isatty" ) func shouldWeColorize() bool { - if csConfig.Cscli.Color == "yes" { + switch csConfig.Cscli.Color { + case "yes": return true - } - if csConfig.Cscli.Color == "no" { + case "no": return false + default: + return isatty.IsTerminal(os.Stdout.Fd()) || isatty.IsCygwinTerminal(os.Stdout.Fd()) } - return isatty.IsTerminal(os.Stdout.Fd()) || isatty.IsCygwinTerminal(os.Stdout.Fd()) -} - -func newTable(out io.Writer) *table.Table { - if out == nil { - panic("newTable: out is nil") - } - t := table.New(out) - if shouldWeColorize() { - t.SetLineStyle(table.StyleBrightBlack) - t.SetHeaderStyle(table.StyleItalic) - } - - if shouldWeColorize() { - t.SetDividers(table.UnicodeRoundedDividers) - } else { - t.SetDividers(table.ASCIIDividers) - } - - return t -} - -func newLightTable(out io.Writer) *table.Table { - if out == nil { - panic("newTable: out is nil") - } - t := newTable(out) - t.SetRowLines(false) - t.SetBorderLeft(false) - t.SetBorderRight(false) - // This leaves three spaces between columns: - // left padding, invisible border, right padding - // There is no way to make two spaces without - // a SetColumnLines() method, but it's close enough. - t.SetPadding(1) - - if shouldWeColorize() { - t.SetDividers(table.Dividers{ - ALL: "─", - NES: "─", - NSW: "─", - NEW: "─", - ESW: "─", - NE: "─", - NW: "─", - SW: "─", - ES: "─", - EW: "─", - NS: " ", - }) - } else { - t.SetDividers(table.Dividers{ - ALL: "-", - NES: "-", - NSW: "-", - NEW: "-", - ESW: "-", - NE: "-", - NW: "-", - SW: "-", - ES: "-", - EW: "-", - NS: " ", - }) - } - return t } func renderTableTitle(out io.Writer, title string) { if out == nil { panic("renderTableTitle: out is nil") } + if title == "" { return } + fmt.Fprintln(out, title) } diff --git a/cmd/crowdsec-cli/utils_table.go b/cmd/crowdsec-cli/utils_table.go index 23bcff4e5c6..d7d26a65c12 100644 --- a/cmd/crowdsec-cli/utils_table.go +++ b/cmd/crowdsec-cli/utils_table.go @@ -5,8 +5,7 @@ import ( "io" "strconv" - "github.com/aquasecurity/table" - + "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/table" "github.com/crowdsecurity/crowdsec/pkg/cwhub" "github.com/crowdsecurity/crowdsec/pkg/emoji" ) diff --git a/go.mod b/go.mod index 49cfe05ad39..93f3e4cd806 100644 --- a/go.mod +++ b/go.mod @@ -15,7 +15,6 @@ require ( github.com/alexliesenfeld/health v0.8.0 github.com/antonmedv/expr v1.15.3 github.com/appleboy/gin-jwt/v2 v2.9.2 - github.com/aquasecurity/table v1.8.0 github.com/aws/aws-lambda-go v1.47.0 github.com/aws/aws-sdk-go v1.52.0 github.com/beevik/etree v1.3.0 @@ -58,6 +57,7 @@ require ( github.com/ivanpirog/coloredcobra v1.0.1 github.com/jackc/pgx/v4 v4.18.2 github.com/jarcoal/httpmock v1.1.0 + github.com/jedib0t/go-pretty/v6 v6.5.9 github.com/jszwec/csvutil v1.5.1 github.com/lithammer/dedent v1.1.0 github.com/mattn/go-isatty v0.0.20 @@ -155,7 +155,7 @@ require ( github.com/magefile/mage v1.15.0 // indirect github.com/mailru/easyjson v0.7.7 // indirect github.com/mattn/go-colorable v0.1.13 // indirect - github.com/mattn/go-runewidth v0.0.13 // indirect + github.com/mattn/go-runewidth v0.0.15 // indirect github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect github.com/mgutz/ansi v0.0.0-20200706080929-d51e80ef957d // indirect github.com/mitchellh/copystructure v1.2.0 // indirect diff --git a/go.sum b/go.sum index 05b38f68384..310cbd85ab7 100644 --- a/go.sum +++ b/go.sum @@ -47,8 +47,6 @@ github.com/appleboy/gin-jwt/v2 v2.9.2 h1:GeS3lm9mb9HMmj7+GNjYUtpp3V1DAQ1TkUFa5po github.com/appleboy/gin-jwt/v2 v2.9.2/go.mod h1:mxGjKt9Lrx9Xusy1SrnmsCJMZG6UJwmdHN9bN27/QDw= github.com/appleboy/gofight/v2 v2.1.2 h1:VOy3jow4vIK8BRQJoC/I9muxyYlJ2yb9ht2hZoS3rf4= github.com/appleboy/gofight/v2 v2.1.2/go.mod h1:frW+U1QZEdDgixycTj4CygQ48yLTUhplt43+Wczp3rw= -github.com/aquasecurity/table v1.8.0 h1:9ntpSwrUfjrM6/YviArlx/ZBGd6ix8W+MtojQcM7tv0= -github.com/aquasecurity/table v1.8.0/go.mod h1:eqOmvjjB7AhXFgFqpJUEE/ietg7RrMSJZXyTN8E/wZw= github.com/asaskevich/govalidator v0.0.0-20180720115003-f9ffefc3facf/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= github.com/asaskevich/govalidator v0.0.0-20200108200545-475eaeb16496/go.mod h1:oGkLhpf+kjZl6xBf758TQhh5XrAeiJv/7FRz/2spLIg= @@ -406,6 +404,8 @@ github.com/jackc/puddle v0.0.0-20190608224051-11cab39313c9/go.mod h1:m4B5Dj62Y0f github.com/jackc/puddle v1.1.3/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= github.com/jarcoal/httpmock v1.1.0 h1:F47ChZj1Y2zFsCXxNkBPwNNKnAyOATcdQibk0qEdVCE= github.com/jarcoal/httpmock v1.1.0/go.mod h1:ATjnClrvW/3tijVmpL/va5Z3aAyGvqU3gCT8nX0Txik= +github.com/jedib0t/go-pretty/v6 v6.5.9 h1:ACteMBRrrmm1gMsXe9PSTOClQ63IXDUt03H5U+UV8OU= +github.com/jedib0t/go-pretty/v6 v6.5.9/go.mod h1:zbn98qrYlh95FIhwwsbIip0LYpwSG8SUOScs+v9/t0E= github.com/jhump/protoreflect v1.6.0 h1:h5jfMVslIg6l29nsMs0D8Wj17RDVdNYti0vDN/PZZoE= github.com/jhump/protoreflect v1.6.0/go.mod h1:eaTn3RZAmMBcV0fifFvlm6VHNz3wSkYyXYWUh7ymB74= github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= @@ -491,8 +491,8 @@ github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27k github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= -github.com/mattn/go-runewidth v0.0.13 h1:lTGmDsbAYt5DmK6OnoV7EuIF1wEIFAcxld6ypU4OSgU= -github.com/mattn/go-runewidth v0.0.13/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= +github.com/mattn/go-runewidth v0.0.15 h1:UNAjwbU9l54TA3KzvqLGxwWjHmMgBUVhBiTjelZgg3U= +github.com/mattn/go-runewidth v0.0.15/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= github.com/mattn/go-sqlite3 v1.14.16 h1:yOQRA0RpS5PFz/oikGwBEqvAWhWg5ufRz4ETLjwpU1Y= github.com/mattn/go-sqlite3 v1.14.16/go.mod h1:2eHXhiwb8IkHr+BDWZGa96P6+rkvnG63S2DGjv9HUNg= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= From 45b403ecd7ff754b8ad45ba904b4e90ab3f941af Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Fri, 24 May 2024 14:37:46 +0200 Subject: [PATCH 140/581] fix nil deref w/ malformed cti config (#3028) --- cmd/crowdsec/serve.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmd/crowdsec/serve.go b/cmd/crowdsec/serve.go index 9da3d80106a..a27622a641a 100644 --- a/cmd/crowdsec/serve.go +++ b/cmd/crowdsec/serve.go @@ -334,7 +334,7 @@ func Serve(cConfig *csconfig.Config, agentReady chan bool) error { log.Warningln("Exprhelpers loaded without database client.") } - if cConfig.API.CTI != nil && *cConfig.API.CTI.Enabled { + if cConfig.API.CTI != nil && cConfig.API.CTI.Enabled != nil && *cConfig.API.CTI.Enabled { log.Infof("Crowdsec CTI helper enabled") if err := exprhelpers.InitCrowdsecCTI(cConfig.API.CTI.Key, cConfig.API.CTI.CacheTimeout, cConfig.API.CTI.CacheSize, cConfig.API.CTI.LogLevel); err != nil { From 09afcbe93a5a36ce25553f7ebc8c42cee0b98098 Mon Sep 17 00:00:00 2001 From: blotus Date: Fri, 24 May 2024 14:42:17 +0200 Subject: [PATCH 141/581] appsec: respect on_success parameter in hooks (#3017) --- .../modules/appsec/appsec_hooks_test.go | 198 +++++++++++++++++- pkg/appsec/appsec.go | 34 ++- 2 files changed, 221 insertions(+), 11 deletions(-) diff --git a/pkg/acquisition/modules/appsec/appsec_hooks_test.go b/pkg/acquisition/modules/appsec/appsec_hooks_test.go index 3cb2fcfde29..65fba33ae81 100644 --- a/pkg/acquisition/modules/appsec/appsec_hooks_test.go +++ b/pkg/acquisition/modules/appsec/appsec_hooks_test.go @@ -274,6 +274,64 @@ func TestAppsecOnMatchHooks(t *testing.T) { require.Equal(t, appsec.BanRemediation, responses[0].Action) }, }, + { + name: "on_match: on_success break", + expected_load_ok: true, + inband_rules: []appsec_rule.CustomRule{ + { + Name: "rule42", + Zones: []string{"ARGS"}, + Variables: []string{"foo"}, + Match: appsec_rule.Match{Type: "regex", Value: "^toto"}, + Transform: []string{"lowercase"}, + }, + }, + on_match: []appsec.Hook{ + {Filter: "IsInBand == true", Apply: []string{"CancelEvent()"}, OnSuccess: "break"}, + {Filter: "IsInBand == true", Apply: []string{"SetRemediation('captcha')"}}, + }, + input_request: appsec.ParsedRequest{ + RemoteAddr: "1.2.3.4", + Method: "GET", + URI: "/urllll", + Args: url.Values{"foo": []string{"toto"}}, + }, + output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { + require.Len(t, events, 1) + require.Equal(t, types.APPSEC, events[0].Type) + require.Len(t, responses, 1) + require.Equal(t, appsec.BanRemediation, responses[0].Action) + }, + }, + { + name: "on_match: on_success continue", + expected_load_ok: true, + inband_rules: []appsec_rule.CustomRule{ + { + Name: "rule42", + Zones: []string{"ARGS"}, + Variables: []string{"foo"}, + Match: appsec_rule.Match{Type: "regex", Value: "^toto"}, + Transform: []string{"lowercase"}, + }, + }, + on_match: []appsec.Hook{ + {Filter: "IsInBand == true", Apply: []string{"CancelEvent()"}, OnSuccess: "continue"}, + {Filter: "IsInBand == true", Apply: []string{"SetRemediation('captcha')"}}, + }, + input_request: appsec.ParsedRequest{ + RemoteAddr: "1.2.3.4", + Method: "GET", + URI: "/urllll", + Args: url.Values{"foo": []string{"toto"}}, + }, + output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { + require.Len(t, events, 1) + require.Equal(t, types.APPSEC, events[0].Type) + require.Len(t, responses, 1) + require.Equal(t, appsec.CaptchaRemediation, responses[0].Action) + }, + }, } for _, test := range tests { t.Run(test.name, func(t *testing.T) { @@ -286,7 +344,7 @@ func TestAppsecPreEvalHooks(t *testing.T) { tests := []appsecRuleTest{ { - name: "Basic on_load hook to disable inband rule", + name: "Basic pre_eval hook to disable inband rule", expected_load_ok: true, inband_rules: []appsec_rule.CustomRule{ { @@ -314,7 +372,7 @@ func TestAppsecPreEvalHooks(t *testing.T) { }, }, { - name: "Basic on_load fails to disable rule", + name: "Basic pre_eval fails to disable rule", expected_load_ok: true, inband_rules: []appsec_rule.CustomRule{ { @@ -349,7 +407,7 @@ func TestAppsecPreEvalHooks(t *testing.T) { }, }, { - name: "on_load : disable inband by tag", + name: "pre_eval : disable inband by tag", expected_load_ok: true, inband_rules: []appsec_rule.CustomRule{ { @@ -377,7 +435,7 @@ func TestAppsecPreEvalHooks(t *testing.T) { }, }, { - name: "on_load : disable inband by ID", + name: "pre_eval : disable inband by ID", expected_load_ok: true, inband_rules: []appsec_rule.CustomRule{ { @@ -405,7 +463,7 @@ func TestAppsecPreEvalHooks(t *testing.T) { }, }, { - name: "on_load : disable inband by name", + name: "pre_eval : disable inband by name", expected_load_ok: true, inband_rules: []appsec_rule.CustomRule{ { @@ -433,7 +491,7 @@ func TestAppsecPreEvalHooks(t *testing.T) { }, }, { - name: "on_load : outofband default behavior", + name: "pre_eval : outofband default behavior", expected_load_ok: true, outofband_rules: []appsec_rule.CustomRule{ { @@ -464,7 +522,7 @@ func TestAppsecPreEvalHooks(t *testing.T) { }, }, { - name: "on_load : set remediation by tag", + name: "pre_eval : set remediation by tag", expected_load_ok: true, inband_rules: []appsec_rule.CustomRule{ { @@ -491,7 +549,7 @@ func TestAppsecPreEvalHooks(t *testing.T) { }, }, { - name: "on_load : set remediation by name", + name: "pre_eval : set remediation by name", expected_load_ok: true, inband_rules: []appsec_rule.CustomRule{ { @@ -518,7 +576,7 @@ func TestAppsecPreEvalHooks(t *testing.T) { }, }, { - name: "on_load : set remediation by ID", + name: "pre_eval : set remediation by ID", expected_load_ok: true, inband_rules: []appsec_rule.CustomRule{ { @@ -546,6 +604,62 @@ func TestAppsecPreEvalHooks(t *testing.T) { require.Equal(t, http.StatusForbidden, appsecResponse.HTTPStatus) }, }, + { + name: "pre_eval : on_success continue", + expected_load_ok: true, + inband_rules: []appsec_rule.CustomRule{ + { + Name: "rulez", + Zones: []string{"ARGS"}, + Variables: []string{"foo"}, + Match: appsec_rule.Match{Type: "regex", Value: "^toto"}, + Transform: []string{"lowercase"}, + }, + }, + pre_eval: []appsec.Hook{ + {Filter: "1==1", Apply: []string{"SetRemediationByName('rulez', 'foobar')"}, OnSuccess: "continue"}, + {Filter: "1==1", Apply: []string{"SetRemediationByName('rulez', 'foobar2')"}}, + }, + input_request: appsec.ParsedRequest{ + RemoteAddr: "1.2.3.4", + Method: "GET", + URI: "/urllll", + Args: url.Values{"foo": []string{"toto"}}, + }, + output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { + require.Len(t, events, 2) + require.Len(t, responses, 1) + require.Equal(t, "foobar2", responses[0].Action) + }, + }, + { + name: "pre_eval : on_success break", + expected_load_ok: true, + inband_rules: []appsec_rule.CustomRule{ + { + Name: "rulez", + Zones: []string{"ARGS"}, + Variables: []string{"foo"}, + Match: appsec_rule.Match{Type: "regex", Value: "^toto"}, + Transform: []string{"lowercase"}, + }, + }, + pre_eval: []appsec.Hook{ + {Filter: "1==1", Apply: []string{"SetRemediationByName('rulez', 'foobar')"}, OnSuccess: "break"}, + {Filter: "1==1", Apply: []string{"SetRemediationByName('rulez', 'foobar2')"}}, + }, + input_request: appsec.ParsedRequest{ + RemoteAddr: "1.2.3.4", + Method: "GET", + URI: "/urllll", + Args: url.Values{"foo": []string{"toto"}}, + }, + output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { + require.Len(t, events, 2) + require.Len(t, responses, 1) + require.Equal(t, "foobar", responses[0].Action) + }, + }, } for _, test := range tests { @@ -705,6 +819,72 @@ func TestOnMatchRemediationHooks(t *testing.T) { require.Equal(t, http.StatusForbidden, statusCode) }, }, + { + name: "on_match: on_success break", + expected_load_ok: true, + inband_rules: []appsec_rule.CustomRule{ + { + Name: "rule42", + Zones: []string{"ARGS"}, + Variables: []string{"foo"}, + Match: appsec_rule.Match{Type: "regex", Value: "^toto"}, + Transform: []string{"lowercase"}, + }, + }, + input_request: appsec.ParsedRequest{ + RemoteAddr: "1.2.3.4", + Method: "GET", + URI: "/urllll", + Args: url.Values{"foo": []string{"toto"}}, + }, + DefaultRemediation: appsec.AllowRemediation, + on_match: []appsec.Hook{ + {Filter: "IsInBand == true", Apply: []string{"SetRemediation('captcha')", "SetReturnCode(418)"}, OnSuccess: "break"}, + {Filter: "IsInBand == true", Apply: []string{"SetRemediation('ban')"}}, + }, + output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { + spew.Dump(responses) + spew.Dump(appsecResponse) + + log.Errorf("http status : %d", statusCode) + require.Equal(t, appsec.CaptchaRemediation, appsecResponse.Action) + require.Equal(t, http.StatusTeapot, appsecResponse.HTTPStatus) + require.Equal(t, http.StatusForbidden, statusCode) + }, + }, + { + name: "on_match: on_success continue", + expected_load_ok: true, + inband_rules: []appsec_rule.CustomRule{ + { + Name: "rule42", + Zones: []string{"ARGS"}, + Variables: []string{"foo"}, + Match: appsec_rule.Match{Type: "regex", Value: "^toto"}, + Transform: []string{"lowercase"}, + }, + }, + input_request: appsec.ParsedRequest{ + RemoteAddr: "1.2.3.4", + Method: "GET", + URI: "/urllll", + Args: url.Values{"foo": []string{"toto"}}, + }, + DefaultRemediation: appsec.AllowRemediation, + on_match: []appsec.Hook{ + {Filter: "IsInBand == true", Apply: []string{"SetRemediation('captcha')", "SetReturnCode(418)"}, OnSuccess: "continue"}, + {Filter: "IsInBand == true", Apply: []string{"SetRemediation('ban')"}}, + }, + output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { + spew.Dump(responses) + spew.Dump(appsecResponse) + + log.Errorf("http status : %d", statusCode) + require.Equal(t, appsec.BanRemediation, appsecResponse.Action) + require.Equal(t, http.StatusTeapot, appsecResponse.HTTPStatus) + require.Equal(t, http.StatusForbidden, statusCode) + }, + }, } for _, test := range tests { t.Run(test.name, func(t *testing.T) { diff --git a/pkg/appsec/appsec.go b/pkg/appsec/appsec.go index 554fc3b7123..2c971fb36c5 100644 --- a/pkg/appsec/appsec.go +++ b/pkg/appsec/appsec.go @@ -259,6 +259,9 @@ func (wc *AppsecConfig) Build() (*AppsecRuntimeConfig, error) { //load hooks for _, hook := range wc.OnLoad { + if hook.OnSuccess != "" && hook.OnSuccess != "continue" && hook.OnSuccess != "break" { + return nil, fmt.Errorf("invalid 'on_success' for on_load hook : %s", hook.OnSuccess) + } err := hook.Build(hookOnLoad) if err != nil { return nil, fmt.Errorf("unable to build on_load hook : %s", err) @@ -267,6 +270,9 @@ func (wc *AppsecConfig) Build() (*AppsecRuntimeConfig, error) { } for _, hook := range wc.PreEval { + if hook.OnSuccess != "" && hook.OnSuccess != "continue" && hook.OnSuccess != "break" { + return nil, fmt.Errorf("invalid 'on_success' for pre_eval hook : %s", hook.OnSuccess) + } err := hook.Build(hookPreEval) if err != nil { return nil, fmt.Errorf("unable to build pre_eval hook : %s", err) @@ -275,6 +281,9 @@ func (wc *AppsecConfig) Build() (*AppsecRuntimeConfig, error) { } for _, hook := range wc.PostEval { + if hook.OnSuccess != "" && hook.OnSuccess != "continue" && hook.OnSuccess != "break" { + return nil, fmt.Errorf("invalid 'on_success' for post_eval hook : %s", hook.OnSuccess) + } err := hook.Build(hookPostEval) if err != nil { return nil, fmt.Errorf("unable to build post_eval hook : %s", err) @@ -283,6 +292,9 @@ func (wc *AppsecConfig) Build() (*AppsecRuntimeConfig, error) { } for _, hook := range wc.OnMatch { + if hook.OnSuccess != "" && hook.OnSuccess != "continue" && hook.OnSuccess != "break" { + return nil, fmt.Errorf("invalid 'on_success' for on_match hook : %s", hook.OnSuccess) + } err := hook.Build(hookOnMatch) if err != nil { return nil, fmt.Errorf("unable to build on_match hook : %s", err) @@ -302,6 +314,7 @@ func (wc *AppsecConfig) Build() (*AppsecRuntimeConfig, error) { } func (w *AppsecRuntimeConfig) ProcessOnLoadRules() error { + has_match := false for _, rule := range w.CompiledOnLoad { if rule.FilterExpr != nil { output, err := exprhelpers.Run(rule.FilterExpr, GetOnLoadEnv(w), w.Logger, w.Logger.Level >= log.DebugLevel) @@ -318,6 +331,7 @@ func (w *AppsecRuntimeConfig) ProcessOnLoadRules() error { w.Logger.Errorf("Filter must return a boolean, can't filter") continue } + has_match = true } for _, applyExpr := range rule.ApplyExpr { o, err := exprhelpers.Run(applyExpr, GetOnLoadEnv(w), w.Logger, w.Logger.Level >= log.DebugLevel) @@ -332,12 +346,15 @@ func (w *AppsecRuntimeConfig) ProcessOnLoadRules() error { default: } } + if has_match && rule.OnSuccess == "break" { + break + } } return nil } func (w *AppsecRuntimeConfig) ProcessOnMatchRules(request *ParsedRequest, evt types.Event) error { - + has_match := false for _, rule := range w.CompiledOnMatch { if rule.FilterExpr != nil { output, err := exprhelpers.Run(rule.FilterExpr, GetOnMatchEnv(w, request, evt), w.Logger, w.Logger.Level >= log.DebugLevel) @@ -354,6 +371,7 @@ func (w *AppsecRuntimeConfig) ProcessOnMatchRules(request *ParsedRequest, evt ty w.Logger.Errorf("Filter must return a boolean, can't filter") continue } + has_match = true } for _, applyExpr := range rule.ApplyExpr { o, err := exprhelpers.Run(applyExpr, GetOnMatchEnv(w, request, evt), w.Logger, w.Logger.Level >= log.DebugLevel) @@ -368,12 +386,15 @@ func (w *AppsecRuntimeConfig) ProcessOnMatchRules(request *ParsedRequest, evt ty default: } } + if has_match && rule.OnSuccess == "break" { + break + } } return nil } func (w *AppsecRuntimeConfig) ProcessPreEvalRules(request *ParsedRequest) error { - w.Logger.Debugf("processing %d pre_eval rules", len(w.CompiledPreEval)) + has_match := false for _, rule := range w.CompiledPreEval { if rule.FilterExpr != nil { output, err := exprhelpers.Run(rule.FilterExpr, GetPreEvalEnv(w, request), w.Logger, w.Logger.Level >= log.DebugLevel) @@ -390,6 +411,7 @@ func (w *AppsecRuntimeConfig) ProcessPreEvalRules(request *ParsedRequest) error w.Logger.Errorf("Filter must return a boolean, can't filter") continue } + has_match = true } // here means there is no filter or the filter matched for _, applyExpr := range rule.ApplyExpr { @@ -405,12 +427,16 @@ func (w *AppsecRuntimeConfig) ProcessPreEvalRules(request *ParsedRequest) error default: } } + if has_match && rule.OnSuccess == "break" { + break + } } return nil } func (w *AppsecRuntimeConfig) ProcessPostEvalRules(request *ParsedRequest) error { + has_match := false for _, rule := range w.CompiledPostEval { if rule.FilterExpr != nil { output, err := exprhelpers.Run(rule.FilterExpr, GetPostEvalEnv(w, request), w.Logger, w.Logger.Level >= log.DebugLevel) @@ -427,6 +453,7 @@ func (w *AppsecRuntimeConfig) ProcessPostEvalRules(request *ParsedRequest) error w.Logger.Errorf("Filter must return a boolean, can't filter") continue } + has_match = true } // here means there is no filter or the filter matched for _, applyExpr := range rule.ApplyExpr { @@ -444,6 +471,9 @@ func (w *AppsecRuntimeConfig) ProcessPostEvalRules(request *ParsedRequest) error default: } } + if has_match && rule.OnSuccess == "break" { + break + } } return nil From f06e3e78ab48d48dc910d83bbbcd966ff19e5e77 Mon Sep 17 00:00:00 2001 From: blotus Date: Fri, 24 May 2024 14:43:12 +0200 Subject: [PATCH 142/581] jwt transport: fix retry on unauthorized from CAPI(#3006) --- pkg/apiclient/auth_jwt.go | 103 +++++++++++++++++++--------------- pkg/apiclient/client.go | 7 +++ pkg/apiclient/retry_config.go | 33 +++++++++++ 3 files changed, 98 insertions(+), 45 deletions(-) create mode 100644 pkg/apiclient/retry_config.go diff --git a/pkg/apiclient/auth_jwt.go b/pkg/apiclient/auth_jwt.go index 6ee17fa5e72..b202e382842 100644 --- a/pkg/apiclient/auth_jwt.go +++ b/pkg/apiclient/auth_jwt.go @@ -26,6 +26,7 @@ type JWTTransport struct { URL *url.URL VersionPrefix string UserAgent string + RetryConfig *RetryConfig // Transport is the underlying HTTP transport to use when making requests. // It will default to http.DefaultTransport if nil. Transport http.RoundTripper @@ -165,36 +166,67 @@ func (t *JWTTransport) prepareRequest(req *http.Request) (*http.Request, error) // RoundTrip implements the RoundTripper interface. func (t *JWTTransport) RoundTrip(req *http.Request) (*http.Response, error) { - req, err := t.prepareRequest(req) - if err != nil { - return nil, err - } - if log.GetLevel() >= log.TraceLevel { - // requestToDump := cloneRequest(req) - dump, _ := httputil.DumpRequest(req, true) - log.Tracef("req-jwt: %s", string(dump)) - } + var resp *http.Response + attemptsCount := make(map[int]int) - // Make the HTTP request. - resp, err := t.transport().RoundTrip(req) - if log.GetLevel() >= log.TraceLevel { - dump, _ := httputil.DumpResponse(resp, true) - log.Tracef("resp-jwt: %s (err:%v)", string(dump), err) - } + for { + if log.GetLevel() >= log.TraceLevel { + // requestToDump := cloneRequest(req) + dump, _ := httputil.DumpRequest(req, true) + log.Tracef("req-jwt: %s", string(dump)) + } + // Make the HTTP request. + clonedReq := cloneRequest(req) - if err != nil { - // we had an error (network error for example, or 401 because token is refused), reset the token? - t.ResetToken() + clonedReq, err := t.prepareRequest(clonedReq) + if err != nil { + return nil, err + } - return resp, fmt.Errorf("performing jwt auth: %w", err) - } + resp, err = t.transport().RoundTrip(clonedReq) + if log.GetLevel() >= log.TraceLevel { + dump, _ := httputil.DumpResponse(resp, true) + log.Tracef("resp-jwt: %s (err:%v)", string(dump), err) + } - if resp != nil { - log.Debugf("resp-jwt: %d", resp.StatusCode) - } + if err != nil { + // we had an error (network error for example), reset the token? + t.ResetToken() + return resp, fmt.Errorf("performing jwt auth: %w", err) + } + + if resp != nil { + log.Debugf("resp-jwt: %d", resp.StatusCode) + } + config, shouldRetry := t.RetryConfig.StatusCodeConfig[resp.StatusCode] + if !shouldRetry { + break + } + + if attemptsCount[resp.StatusCode] >= config.MaxAttempts { + log.Infof("max attempts reached for status code %d", resp.StatusCode) + break + } + + if config.InvalidateToken { + log.Debugf("invalidating token for status code %d", resp.StatusCode) + t.ResetToken() + } + + log.Debugf("retrying request to %s", req.URL.String()) + attemptsCount[resp.StatusCode]++ + log.Infof("attempt %d out of %d", attemptsCount[resp.StatusCode], config.MaxAttempts) + + if config.Backoff { + backoff := 2*attemptsCount[resp.StatusCode] + 5 + log.Infof("retrying in %d seconds (attempt %d of %d)", backoff, attemptsCount[resp.StatusCode], config.MaxAttempts) + time.Sleep(time.Duration(backoff) * time.Second) + } + } return resp, nil + } func (t *JWTTransport) Client() *http.Client { @@ -211,27 +243,8 @@ func (t *JWTTransport) ResetToken() { // transport() returns a round tripper that retries once when the status is unauthorized, // and 5 times when the infrastructure is overloaded. func (t *JWTTransport) transport() http.RoundTripper { - transport := t.Transport - if transport == nil { - transport = http.DefaultTransport - } - - return &retryRoundTripper{ - next: &retryRoundTripper{ - next: transport, - maxAttempts: 5, - withBackOff: true, - retryStatusCodes: []int{http.StatusTooManyRequests, http.StatusServiceUnavailable, http.StatusGatewayTimeout}, - }, - maxAttempts: 2, - withBackOff: false, - retryStatusCodes: []int{http.StatusUnauthorized, http.StatusForbidden}, - onBeforeRequest: func(attempt int) { - // reset the token only in the second attempt as this is when we know we had a 401 or 403 - // the second attempt is supposed to refresh the token - if attempt > 0 { - t.ResetToken() - } - }, + if t.Transport != nil { + return t.Transport } + return http.DefaultTransport } diff --git a/pkg/apiclient/client.go b/pkg/apiclient/client.go index e0e521d6a6f..b702829efd3 100644 --- a/pkg/apiclient/client.go +++ b/pkg/apiclient/client.go @@ -72,6 +72,13 @@ func NewClient(config *Config) (*ApiClient, error) { UserAgent: config.UserAgent, VersionPrefix: config.VersionPrefix, UpdateScenario: config.UpdateScenario, + RetryConfig: NewRetryConfig( + WithStatusCodeConfig(http.StatusUnauthorized, 2, false, true), + WithStatusCodeConfig(http.StatusForbidden, 2, false, true), + WithStatusCodeConfig(http.StatusTooManyRequests, 5, true, false), + WithStatusCodeConfig(http.StatusServiceUnavailable, 5, true, false), + WithStatusCodeConfig(http.StatusGatewayTimeout, 5, true, false), + ), } transport, baseURL := createTransport(config.URL) diff --git a/pkg/apiclient/retry_config.go b/pkg/apiclient/retry_config.go new file mode 100644 index 00000000000..8a0d1096f84 --- /dev/null +++ b/pkg/apiclient/retry_config.go @@ -0,0 +1,33 @@ +package apiclient + +type StatusCodeConfig struct { + MaxAttempts int + Backoff bool + InvalidateToken bool +} + +type RetryConfig struct { + StatusCodeConfig map[int]StatusCodeConfig +} + +type RetryConfigOption func(*RetryConfig) + +func NewRetryConfig(options ...RetryConfigOption) *RetryConfig { + rc := &RetryConfig{ + StatusCodeConfig: make(map[int]StatusCodeConfig), + } + for _, opt := range options { + opt(rc) + } + return rc +} + +func WithStatusCodeConfig(statusCode int, maxAttempts int, backOff bool, invalidateToken bool) RetryConfigOption { + return func(rc *RetryConfig) { + rc.StatusCodeConfig[statusCode] = StatusCodeConfig{ + MaxAttempts: maxAttempts, + Backoff: backOff, + InvalidateToken: invalidateToken, + } + } +} From 9088f31b7d046be2f5a26d64e21314400f5b28b7 Mon Sep 17 00:00:00 2001 From: Laurence Jones Date: Fri, 24 May 2024 14:27:25 +0100 Subject: [PATCH 143/581] enhance: container discovery via labels (#2959) * wip: attempt to autodiscover via labels * wip: remove labels dep on docker acquistion * wip: remove labels dep on docker acquistion * wip: add debug * wip: try fix parser maps * wip: remove redundant pointer * wip: add debug * wip: cant type assert * wip: reinstate debug * wip: reinstate debug * wip: reinstate debug * wip: oops * wip: add a debug * wip: fix labels * wip: remove redundant paramter * wip: rename config option to be more self declarative * wip: update log wording * wip: the if check was not correct * wip: me lost * fix: add checks to typecast and log useful information * add tests for parseLabels * return nil instead of pointer to empty struct * simplify EvalContainer return value --------- Co-authored-by: Sebastien Blot --- pkg/acquisition/acquisition.go | 5 +- pkg/acquisition/modules/docker/docker.go | 75 ++++++++++++++++--- pkg/acquisition/modules/docker/docker_test.go | 52 +++++++++++++ pkg/acquisition/modules/docker/utils.go | 38 ++++++++++ 4 files changed, 158 insertions(+), 12 deletions(-) create mode 100644 pkg/acquisition/modules/docker/utils.go diff --git a/pkg/acquisition/acquisition.go b/pkg/acquisition/acquisition.go index 677bf664e31..ab7d954cac1 100644 --- a/pkg/acquisition/acquisition.go +++ b/pkg/acquisition/acquisition.go @@ -235,7 +235,10 @@ func LoadAcquisitionFromFile(config *csconfig.CrowdsecServiceCfg, prom *csconfig log.Debugf("skipping empty item in %s", acquisFile) continue } - return nil, fmt.Errorf("missing labels in %s (position: %d)", acquisFile, idx) + if sub.Source != "docker" { + //docker is the only source that can be empty + return nil, fmt.Errorf("missing labels in %s (position: %d)", acquisFile, idx) + } } if sub.Source == "" { return nil, fmt.Errorf("data source type is empty ('source') in %s (position: %d)", acquisFile, idx) diff --git a/pkg/acquisition/modules/docker/docker.go b/pkg/acquisition/modules/docker/docker.go index 9f1febf2cb7..3a694b99d76 100644 --- a/pkg/acquisition/modules/docker/docker.go +++ b/pkg/acquisition/modules/docker/docker.go @@ -41,7 +41,7 @@ type DockerConfiguration struct { ContainerID []string `yaml:"container_id"` ContainerNameRegexp []string `yaml:"container_name_regexp"` ContainerIDRegexp []string `yaml:"container_id_regexp"` - ForceInotify bool `yaml:"force_inotify"` + UseContainerLabels bool `yaml:"use_container_labels"` configuration.DataSourceCommonCfg `yaml:",inline"` } @@ -87,10 +87,14 @@ func (d *DockerSource) UnmarshalConfig(yamlConfig []byte) error { d.logger.Tracef("DockerAcquisition configuration: %+v", d.Config) } - if len(d.Config.ContainerName) == 0 && len(d.Config.ContainerID) == 0 && len(d.Config.ContainerIDRegexp) == 0 && len(d.Config.ContainerNameRegexp) == 0 { + if len(d.Config.ContainerName) == 0 && len(d.Config.ContainerID) == 0 && len(d.Config.ContainerIDRegexp) == 0 && len(d.Config.ContainerNameRegexp) == 0 && !d.Config.UseContainerLabels { return fmt.Errorf("no containers names or containers ID configuration provided") } + if d.Config.UseContainerLabels && (len(d.Config.ContainerName) > 0 || len(d.Config.ContainerID) > 0 || len(d.Config.ContainerIDRegexp) > 0 || len(d.Config.ContainerNameRegexp) > 0) { + return fmt.Errorf("use_container_labels and container_name, container_id, container_id_regexp, container_name_regexp are mutually exclusive") + } + d.CheckIntervalDuration, err = time.ParseDuration(d.Config.CheckInterval) if err != nil { return fmt.Errorf("parsing 'check_interval' parameters: %s", d.CheckIntervalDuration) @@ -293,7 +297,7 @@ func (d *DockerSource) OneShotAcquisition(out chan types.Event, t *tomb.Tomb) er d.logger.Debugf("container with id %s is already being read from", container.ID) continue } - if containerConfig, ok := d.EvalContainer(container); ok { + if containerConfig := d.EvalContainer(container); containerConfig != nil { d.logger.Infof("reading logs from container %s", containerConfig.Name) d.logger.Debugf("logs options: %+v", *d.containerLogsOptions) dockerReader, err := d.Client.ContainerLogs(context.Background(), containerConfig.ID, *d.containerLogsOptions) @@ -375,10 +379,18 @@ func (d *DockerSource) getContainerTTY(containerId string) bool { return containerDetails.Config.Tty } -func (d *DockerSource) EvalContainer(container dockerTypes.Container) (*ContainerConfig, bool) { +func (d *DockerSource) getContainerLabels(containerId string) map[string]interface{} { + containerDetails, err := d.Client.ContainerInspect(context.Background(), containerId) + if err != nil { + return map[string]interface{}{} + } + return parseLabels(containerDetails.Config.Labels) +} + +func (d *DockerSource) EvalContainer(container dockerTypes.Container) *ContainerConfig { for _, containerID := range d.Config.ContainerID { if containerID == container.ID { - return &ContainerConfig{ID: container.ID, Name: container.Names[0], Labels: d.Config.Labels, Tty: d.getContainerTTY(container.ID)}, true + return &ContainerConfig{ID: container.ID, Name: container.Names[0], Labels: d.Config.Labels, Tty: d.getContainerTTY(container.ID)} } } @@ -388,7 +400,7 @@ func (d *DockerSource) EvalContainer(container dockerTypes.Container) (*Containe name = name[1:] } if name == containerName { - return &ContainerConfig{ID: container.ID, Name: name, Labels: d.Config.Labels, Tty: d.getContainerTTY(container.ID)}, true + return &ContainerConfig{ID: container.ID, Name: name, Labels: d.Config.Labels, Tty: d.getContainerTTY(container.ID)} } } @@ -396,20 +408,61 @@ func (d *DockerSource) EvalContainer(container dockerTypes.Container) (*Containe for _, cont := range d.compiledContainerID { if matched := cont.MatchString(container.ID); matched { - return &ContainerConfig{ID: container.ID, Name: container.Names[0], Labels: d.Config.Labels, Tty: d.getContainerTTY(container.ID)}, true + return &ContainerConfig{ID: container.ID, Name: container.Names[0], Labels: d.Config.Labels, Tty: d.getContainerTTY(container.ID)} } } for _, cont := range d.compiledContainerName { for _, name := range container.Names { if matched := cont.MatchString(name); matched { - return &ContainerConfig{ID: container.ID, Name: name, Labels: d.Config.Labels, Tty: d.getContainerTTY(container.ID)}, true + return &ContainerConfig{ID: container.ID, Name: name, Labels: d.Config.Labels, Tty: d.getContainerTTY(container.ID)} } } } - return &ContainerConfig{}, false + if d.Config.UseContainerLabels { + parsedLabels := d.getContainerLabels(container.ID) + if len(parsedLabels) == 0 { + d.logger.Tracef("container has no 'crowdsec' labels set, ignoring container: %s", container.ID) + return nil + } + if _, ok := parsedLabels["enable"]; !ok { + d.logger.Errorf("container has 'crowdsec' labels set but no 'crowdsec.enable' key found") + return nil + } + enable, ok := parsedLabels["enable"].(string) + if !ok { + d.logger.Error("container has 'crowdsec.enable' label set but it's not a string") + return nil + } + if strings.ToLower(enable) != "true" { + d.logger.Debugf("container has 'crowdsec.enable' label not set to true ignoring container: %s", container.ID) + return nil + } + if _, ok = parsedLabels["labels"]; !ok { + d.logger.Error("container has 'crowdsec.enable' label set to true but no 'labels' keys found") + return nil + } + labelsTypeCast, ok := parsedLabels["labels"].(map[string]interface{}) + if !ok { + d.logger.Error("container has 'crowdsec.enable' label set to true but 'labels' is not a map") + return nil + } + d.logger.Debugf("container labels %+v", labelsTypeCast) + labels := make(map[string]string) + for k, v := range labelsTypeCast { + if v, ok := v.(string); ok { + log.Debugf("label %s is a string with value %s", k, v) + labels[k] = v + continue + } + d.logger.Errorf("label %s is not a string", k) + } + return &ContainerConfig{ID: container.ID, Name: container.Names[0], Labels: labels, Tty: d.getContainerTTY(container.ID)} + } + + return nil } func (d *DockerSource) WatchContainer(monitChan chan *ContainerConfig, deleteChan chan *ContainerConfig) error { @@ -449,7 +502,7 @@ func (d *DockerSource) WatchContainer(monitChan chan *ContainerConfig, deleteCha if _, ok := d.runningContainerState[container.ID]; ok { continue } - if containerConfig, ok := d.EvalContainer(container); ok { + if containerConfig := d.EvalContainer(container); containerConfig != nil { monitChan <- containerConfig } } @@ -522,7 +575,7 @@ func (d *DockerSource) TailDocker(container *ContainerConfig, outChan chan types } l := types.Line{} l.Raw = line - l.Labels = d.Config.Labels + l.Labels = container.Labels l.Time = time.Now().UTC() l.Src = container.Name l.Process = true diff --git a/pkg/acquisition/modules/docker/docker_test.go b/pkg/acquisition/modules/docker/docker_test.go index 6c010f895d3..c2624229afd 100644 --- a/pkg/acquisition/modules/docker/docker_test.go +++ b/pkg/acquisition/modules/docker/docker_test.go @@ -341,3 +341,55 @@ func TestOneShot(t *testing.T) { } } } + +func TestParseLabels(t *testing.T) { + tests := []struct { + name string + labels map[string]string + expected map[string]interface{} + }{ + { + name: "bad label", + labels: map[string]string{"crowdsecfoo": "bar"}, + expected: map[string]interface{}{}, + }, + { + name: "simple label", + labels: map[string]string{"crowdsec.bar": "baz"}, + expected: map[string]interface{}{"bar": "baz"}, + }, + { + name: "multiple simple labels", + labels: map[string]string{"crowdsec.bar": "baz", "crowdsec.foo": "bar"}, + expected: map[string]interface{}{"bar": "baz", "foo": "bar"}, + }, + { + name: "multiple simple labels 2", + labels: map[string]string{"crowdsec.bar": "baz", "bla": "foo"}, + expected: map[string]interface{}{"bar": "baz"}, + }, + { + name: "end with dot", + labels: map[string]string{"crowdsec.bar.": "baz"}, + expected: map[string]interface{}{}, + }, + { + name: "consecutive dots", + labels: map[string]string{"crowdsec......bar": "baz"}, + expected: map[string]interface{}{}, + }, + { + name: "crowdsec labels", + labels: map[string]string{"crowdsec.labels.type": "nginx"}, + expected: map[string]interface{}{"labels": map[string]interface{}{"type": "nginx"}}, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + labels := parseLabels(test.labels) + assert.Equal(t, test.expected, labels) + }) + } + +} diff --git a/pkg/acquisition/modules/docker/utils.go b/pkg/acquisition/modules/docker/utils.go new file mode 100644 index 00000000000..5439163e5b9 --- /dev/null +++ b/pkg/acquisition/modules/docker/utils.go @@ -0,0 +1,38 @@ +package dockeracquisition + +import ( + "strings" +) + +func parseLabels(labels map[string]string) map[string]interface{} { + result := make(map[string]interface{}) + for key, value := range labels { + parseKeyToMap(result, key, value) + } + return result +} + +func parseKeyToMap(m map[string]interface{}, key string, value string) { + if !strings.HasPrefix(key, "crowdsec") { + return + } + parts := strings.Split(key, ".") + + if len(parts) < 2 || parts[0] != "crowdsec" { + return + } + + for i := 0; i < len(parts); i++ { + if parts[i] == "" { + return + } + } + + for i := 1; i < len(parts)-1; i++ { + if _, ok := m[parts[i]]; !ok { + m[parts[i]] = make(map[string]interface{}) + } + m = m[parts[i]].(map[string]interface{}) + } + m[parts[len(parts)-1]] = value +} From f3341c13823ce7ab0c6b9123f4e9ed3d4af0d226 Mon Sep 17 00:00:00 2001 From: blotus Date: Mon, 27 May 2024 10:15:38 +0200 Subject: [PATCH 144/581] Appsec: properly populate event (#2943) --- cmd/crowdsec-cli/alerts.go | 1 + cmd/crowdsec/crowdsec.go | 8 ++ cmd/crowdsec/serve.go | 3 + pkg/acquisition/modules/appsec/utils.go | 161 +++++++++++++++++++----- pkg/alertcontext/alertcontext.go | 16 +-- pkg/exprhelpers/expr_lib.go | 23 ++++ pkg/exprhelpers/geoip.go | 63 ++++++++++ pkg/exprhelpers/helpers.go | 42 +++++++ pkg/parser/enrich.go | 21 +--- pkg/parser/enrich_date.go | 6 +- pkg/parser/enrich_date_test.go | 2 +- pkg/parser/enrich_dns.go | 6 +- pkg/parser/enrich_geoip.go | 98 +++++---------- pkg/parser/enrich_unmarshal.go | 6 +- pkg/parser/node.go | 7 +- pkg/parser/node_test.go | 2 +- pkg/parser/parsing_test.go | 6 +- pkg/parser/runtime.go | 2 +- pkg/parser/unix_parser.go | 2 +- 19 files changed, 333 insertions(+), 142 deletions(-) create mode 100644 pkg/exprhelpers/geoip.go diff --git a/cmd/crowdsec-cli/alerts.go b/cmd/crowdsec-cli/alerts.go index d31c99e1b12..4cc4a992c43 100644 --- a/cmd/crowdsec-cli/alerts.go +++ b/cmd/crowdsec-cli/alerts.go @@ -493,6 +493,7 @@ func (cli *cliAlerts) NewInspectCmd() *cobra.Command { switch cfg.Cscli.Output { case "human": if err := cli.displayOneAlert(alert, details); err != nil { + log.Warnf("unable to display alert with id %s: %s", alertID, err) continue } case "json": diff --git a/cmd/crowdsec/crowdsec.go b/cmd/crowdsec/crowdsec.go index 8f07d165f6b..d226e3a5796 100644 --- a/cmd/crowdsec/crowdsec.go +++ b/cmd/crowdsec/crowdsec.go @@ -19,6 +19,7 @@ import ( "github.com/crowdsecurity/crowdsec/pkg/appsec" "github.com/crowdsecurity/crowdsec/pkg/csconfig" "github.com/crowdsecurity/crowdsec/pkg/cwhub" + "github.com/crowdsecurity/crowdsec/pkg/exprhelpers" leaky "github.com/crowdsecurity/crowdsec/pkg/leakybucket" "github.com/crowdsecurity/crowdsec/pkg/parser" "github.com/crowdsecurity/crowdsec/pkg/types" @@ -32,6 +33,13 @@ func initCrowdsec(cConfig *csconfig.Config, hub *cwhub.Hub) (*parser.Parsers, [] return nil, nil, fmt.Errorf("while loading context: %w", err) } + err = exprhelpers.GeoIPInit(hub.GetDataDir()) + + if err != nil { + //GeoIP databases are not mandatory, do not make crowdsec fail if they are not present + log.Warnf("unable to initialize GeoIP: %s", err) + } + // Start loading configs csParsers := parser.NewParsers(hub) if csParsers, err = parser.LoadParsers(cConfig, csParsers); err != nil { diff --git a/cmd/crowdsec/serve.go b/cmd/crowdsec/serve.go index a27622a641a..497215d74a1 100644 --- a/cmd/crowdsec/serve.go +++ b/cmd/crowdsec/serve.go @@ -177,6 +177,9 @@ func ShutdownCrowdsecRoutines() error { // He's dead, Jim. crowdsecTomb.Kill(nil) + // close the potential geoips reader we have to avoid leaking ressources on reload + exprhelpers.GeoIPClose() + return reterr } diff --git a/pkg/acquisition/modules/appsec/utils.go b/pkg/acquisition/modules/appsec/utils.go index 7600617965a..02ded2a2437 100644 --- a/pkg/acquisition/modules/appsec/utils.go +++ b/pkg/acquisition/modules/appsec/utils.go @@ -1,20 +1,46 @@ package appsecacquisition import ( - "encoding/json" "fmt" + "net" + "slices" + "strconv" "time" "github.com/crowdsecurity/coraza/v3/collection" "github.com/crowdsecurity/coraza/v3/types/variables" + "github.com/crowdsecurity/crowdsec/pkg/alertcontext" "github.com/crowdsecurity/crowdsec/pkg/appsec" + "github.com/crowdsecurity/crowdsec/pkg/exprhelpers" "github.com/crowdsecurity/crowdsec/pkg/models" "github.com/crowdsecurity/crowdsec/pkg/types" "github.com/crowdsecurity/go-cs-lib/ptr" + "github.com/oschwald/geoip2-golang" "github.com/prometheus/client_golang/prometheus" log "github.com/sirupsen/logrus" ) +var appsecMetaKeys = []string{ + "id", + "name", + "method", + "uri", + "matched_zones", + "msg", +} + +func appendMeta(meta models.Meta, key string, value string) models.Meta { + if value == "" { + return meta + } + + meta = append(meta, &models.MetaItems0{ + Key: key, + Value: value, + }) + return meta +} + func AppsecEventGeneration(inEvt types.Event) (*types.Event, error) { //if the request didnd't trigger inband rules, we don't want to generate an event to LAPI/CAPI if !inEvt.Appsec.HasInBandMatches { @@ -23,48 +49,127 @@ func AppsecEventGeneration(inEvt types.Event) (*types.Event, error) { evt := types.Event{} evt.Type = types.APPSEC evt.Process = true + sourceIP := inEvt.Parsed["source_ip"] source := models.Source{ - Value: ptr.Of(inEvt.Parsed["source_ip"]), - IP: inEvt.Parsed["source_ip"], + Value: &sourceIP, + IP: sourceIP, Scope: ptr.Of(types.Ip), } + asndata, err := exprhelpers.GeoIPASNEnrich(sourceIP) + + if err != nil { + log.Errorf("Unable to enrich ip '%s' for ASN: %s", sourceIP, err) + } else if asndata != nil { + record := asndata.(*geoip2.ASN) + source.AsName = record.AutonomousSystemOrganization + source.AsNumber = fmt.Sprintf("%d", record.AutonomousSystemNumber) + } + + cityData, err := exprhelpers.GeoIPEnrich(sourceIP) + if err != nil { + log.Errorf("Unable to enrich ip '%s' for geo data: %s", sourceIP, err) + } else if cityData != nil { + record := cityData.(*geoip2.City) + source.Cn = record.Country.IsoCode + source.Latitude = float32(record.Location.Latitude) + source.Longitude = float32(record.Location.Longitude) + } + + rangeData, err := exprhelpers.GeoIPRangeEnrich(sourceIP) + if err != nil { + log.Errorf("Unable to enrich ip '%s' for range: %s", sourceIP, err) + } else if rangeData != nil { + record := rangeData.(*net.IPNet) + source.Range = record.String() + } + evt.Overflow.Sources = make(map[string]models.Source) - evt.Overflow.Sources["ip"] = source + evt.Overflow.Sources[sourceIP] = source alert := models.Alert{} alert.Capacity = ptr.Of(int32(1)) - alert.Events = make([]*models.Event, 0) - alert.Meta = make(models.Meta, 0) - for _, key := range []string{"target_uri", "method"} { + alert.Events = make([]*models.Event, len(evt.Appsec.GetRuleIDs())) - valueByte, err := json.Marshal([]string{inEvt.Parsed[key]}) - if err != nil { - log.Debugf("unable to serialize key %s", key) + now := ptr.Of(time.Now().UTC().Format(time.RFC3339)) + + tmpAppsecContext := make(map[string][]string) + + for _, matched_rule := range inEvt.Appsec.MatchedRules { + evtRule := models.Event{} + + evtRule.Timestamp = now + + evtRule.Meta = make(models.Meta, 0) + + for _, key := range appsecMetaKeys { + + if tmpAppsecContext[key] == nil { + tmpAppsecContext[key] = make([]string, 0) + } + + switch value := matched_rule[key].(type) { + case string: + evtRule.Meta = appendMeta(evtRule.Meta, key, value) + if value != "" && !slices.Contains(tmpAppsecContext[key], value) { + tmpAppsecContext[key] = append(tmpAppsecContext[key], value) + } + case int: + val := strconv.Itoa(value) + evtRule.Meta = appendMeta(evtRule.Meta, key, val) + if val != "" && !slices.Contains(tmpAppsecContext[key], val) { + tmpAppsecContext[key] = append(tmpAppsecContext[key], val) + } + case []string: + for _, v := range value { + evtRule.Meta = appendMeta(evtRule.Meta, key, v) + if v != "" && !slices.Contains(tmpAppsecContext[key], v) { + tmpAppsecContext[key] = append(tmpAppsecContext[key], v) + } + } + case []int: + for _, v := range value { + val := strconv.Itoa(v) + evtRule.Meta = appendMeta(evtRule.Meta, key, val) + if val != "" && !slices.Contains(tmpAppsecContext[key], val) { + tmpAppsecContext[key] = append(tmpAppsecContext[key], val) + } + + } + default: + val := fmt.Sprintf("%v", value) + evtRule.Meta = appendMeta(evtRule.Meta, key, val) + if val != "" && !slices.Contains(tmpAppsecContext[key], val) { + tmpAppsecContext[key] = append(tmpAppsecContext[key], val) + } + + } + } + alert.Events = append(alert.Events, &evtRule) + } + + metas := make([]*models.MetaItems0, 0) + + for key, values := range tmpAppsecContext { + if len(values) == 0 { continue } + valueStr, err := alertcontext.TruncateContext(values, alertcontext.MaxContextValueLen) + if err != nil { + log.Warningf(err.Error()) + } + meta := models.MetaItems0{ Key: key, - Value: string(valueByte), - } - alert.Meta = append(alert.Meta, &meta) - } - matchedZones := inEvt.Appsec.GetMatchedZones() - if matchedZones != nil { - valueByte, err := json.Marshal(matchedZones) - if err != nil { - log.Debugf("unable to serialize key matched_zones") - } else { - meta := models.MetaItems0{ - Key: "matched_zones", - Value: string(valueByte), - } - alert.Meta = append(alert.Meta, &meta) + Value: valueStr, } + metas = append(metas, &meta) } - alert.EventsCount = ptr.Of(int32(1)) + alert.Meta = metas + + alert.EventsCount = ptr.Of(int32(len(alert.Events))) alert.Leakspeed = ptr.Of("") alert.Scenario = ptr.Of(inEvt.Appsec.MatchedRules.GetName()) alert.ScenarioHash = ptr.Of(inEvt.Appsec.MatchedRules.GetHash()) @@ -200,7 +305,7 @@ func (r *AppsecRunner) AccumulateTxToEvent(evt *types.Event, req *appsec.ParsedR }) for _, rule := range req.Tx.MatchedRules() { - if rule.Message() == "" || rule.DisruptiveAction() == "pass" || rule.DisruptiveAction() == "allow" { + if rule.Message() == "" { r.logger.Tracef("discarding rule %d (action: %s)", rule.Rule().ID(), rule.DisruptiveAction()) continue } @@ -242,7 +347,7 @@ func (r *AppsecRunner) AccumulateTxToEvent(evt *types.Event, req *appsec.ParsedR corazaRule := map[string]interface{}{ "id": rule.Rule().ID(), - "uri": evt.Parsed["uri"], + "uri": evt.Parsed["target_uri"], "rule_type": kind, "method": evt.Parsed["method"], "disruptive": rule.Disruptive(), diff --git a/pkg/alertcontext/alertcontext.go b/pkg/alertcontext/alertcontext.go index 8b0648ca0eb..9946d694363 100644 --- a/pkg/alertcontext/alertcontext.go +++ b/pkg/alertcontext/alertcontext.go @@ -16,7 +16,7 @@ import ( ) const ( - maxContextValueLen = 4000 + MaxContextValueLen = 4000 ) var alertContext = Context{} @@ -46,13 +46,13 @@ func NewAlertContext(contextToSend map[string][]string, valueLength int) error { } if valueLength == 0 { - clog.Debugf("No console context value length provided, using default: %d", maxContextValueLen) - valueLength = maxContextValueLen + clog.Debugf("No console context value length provided, using default: %d", MaxContextValueLen) + valueLength = MaxContextValueLen } - if valueLength > maxContextValueLen { - clog.Debugf("Provided console context value length (%d) is higher than the maximum, using default: %d", valueLength, maxContextValueLen) - valueLength = maxContextValueLen + if valueLength > MaxContextValueLen { + clog.Debugf("Provided console context value length (%d) is higher than the maximum, using default: %d", valueLength, MaxContextValueLen) + valueLength = MaxContextValueLen } alertContext = Context{ @@ -85,7 +85,7 @@ func NewAlertContext(contextToSend map[string][]string, valueLength int) error { return nil } -func truncate(values []string, contextValueLen int) (string, error) { +func TruncateContext(values []string, contextValueLen int) (string, error) { valueByte, err := json.Marshal(values) if err != nil { return "", fmt.Errorf("unable to dump metas: %w", err) @@ -159,7 +159,7 @@ func EventToContext(events []types.Event) (models.Meta, []error) { continue } - valueStr, err := truncate(values, alertContext.ContextValueLen) + valueStr, err := TruncateContext(values, alertContext.ContextValueLen) if err != nil { log.Warningf(err.Error()) } diff --git a/pkg/exprhelpers/expr_lib.go b/pkg/exprhelpers/expr_lib.go index 5041b234db1..19b25e25895 100644 --- a/pkg/exprhelpers/expr_lib.go +++ b/pkg/exprhelpers/expr_lib.go @@ -1,9 +1,11 @@ package exprhelpers import ( + "net" "time" "github.com/crowdsecurity/crowdsec/pkg/cticlient" + "github.com/oschwald/geoip2-golang" ) type exprCustomFunc struct { @@ -469,6 +471,27 @@ var exprFuncs = []exprCustomFunc{ new(func(string) bool), }, }, + { + name: "GeoIPEnrich", + function: GeoIPEnrich, + signature: []interface{}{ + new(func(string) *geoip2.City), + }, + }, + { + name: "GeoIPASNEnrich", + function: GeoIPASNEnrich, + signature: []interface{}{ + new(func(string) *geoip2.ASN), + }, + }, + { + name: "GeoIPRangeEnrich", + function: GeoIPRangeEnrich, + signature: []interface{}{ + new(func(string) *net.IPNet), + }, + }, } //go 1.20 "CutPrefix": strings.CutPrefix, diff --git a/pkg/exprhelpers/geoip.go b/pkg/exprhelpers/geoip.go new file mode 100644 index 00000000000..fb0c344d884 --- /dev/null +++ b/pkg/exprhelpers/geoip.go @@ -0,0 +1,63 @@ +package exprhelpers + +import ( + "net" +) + +func GeoIPEnrich(params ...any) (any, error) { + if geoIPCityReader == nil { + return nil, nil + } + + ip := params[0].(string) + + parsedIP := net.ParseIP(ip) + + city, err := geoIPCityReader.City(parsedIP) + + if err != nil { + return nil, err + } + + return city, nil +} + +func GeoIPASNEnrich(params ...any) (any, error) { + if geoIPASNReader == nil { + return nil, nil + } + + ip := params[0].(string) + + parsedIP := net.ParseIP(ip) + asn, err := geoIPASNReader.ASN(parsedIP) + + if err != nil { + return nil, err + } + + return asn, nil +} + +func GeoIPRangeEnrich(params ...any) (any, error) { + if geoIPRangeReader == nil { + return nil, nil + } + + ip := params[0].(string) + + var dummy interface{} + + parsedIP := net.ParseIP(ip) + rangeIP, ok, err := geoIPRangeReader.LookupNetwork(parsedIP, &dummy) + + if err != nil { + return nil, err + } + + if !ok { + return nil, nil + } + + return rangeIP, nil +} diff --git a/pkg/exprhelpers/helpers.go b/pkg/exprhelpers/helpers.go index e4e38e48474..575425ef83e 100644 --- a/pkg/exprhelpers/helpers.go +++ b/pkg/exprhelpers/helpers.go @@ -20,6 +20,8 @@ import ( "github.com/c-robinson/iplib" "github.com/cespare/xxhash/v2" "github.com/davecgh/go-spew/spew" + "github.com/oschwald/geoip2-golang" + "github.com/oschwald/maxminddb-golang" "github.com/prometheus/client_golang/prometheus" log "github.com/sirupsen/logrus" "github.com/umahmood/haversine" @@ -55,6 +57,10 @@ var exprFunctionOptions []expr.Option var keyValuePattern = regexp.MustCompile(`(?P[^=\s]+)=(?:"(?P[^"\\]*(?:\\.[^"\\]*)*)"|(?P[^=\s]+)|\s*)`) +var geoIPCityReader *geoip2.Reader +var geoIPASNReader *geoip2.Reader +var geoIPRangeReader *maxminddb.Reader + func GetExprOptions(ctx map[string]interface{}) []expr.Option { if len(exprFunctionOptions) == 0 { exprFunctionOptions = []expr.Option{} @@ -72,6 +78,42 @@ func GetExprOptions(ctx map[string]interface{}) []expr.Option { return ret } +func GeoIPInit(datadir string) error { + var err error + + geoIPCityReader, err = geoip2.Open(filepath.Join(datadir, "GeoLite2-City.mmdb")) + if err != nil { + log.Errorf("unable to open GeoLite2-City.mmdb : %s", err) + return err + } + + geoIPASNReader, err = geoip2.Open(filepath.Join(datadir, "GeoLite2-ASN.mmdb")) + if err != nil { + log.Errorf("unable to open GeoLite2-ASN.mmdb : %s", err) + return err + } + + geoIPRangeReader, err = maxminddb.Open(filepath.Join(datadir, "GeoLite2-ASN.mmdb")) + if err != nil { + log.Errorf("unable to open GeoLite2-ASN.mmdb : %s", err) + return err + } + + return nil +} + +func GeoIPClose() { + if geoIPCityReader != nil { + geoIPCityReader.Close() + } + if geoIPASNReader != nil { + geoIPASNReader.Close() + } + if geoIPRangeReader != nil { + geoIPRangeReader.Close() + } +} + func Init(databaseClient *database.Client) error { dataFile = make(map[string][]string) dataFileRegex = make(map[string][]*regexp.Regexp) diff --git a/pkg/parser/enrich.go b/pkg/parser/enrich.go index 5180b9a5fb9..661410d20d3 100644 --- a/pkg/parser/enrich.go +++ b/pkg/parser/enrich.go @@ -7,7 +7,7 @@ import ( ) /* should be part of a package shared with enrich/geoip.go */ -type EnrichFunc func(string, *types.Event, interface{}, *log.Entry) (map[string]string, error) +type EnrichFunc func(string, *types.Event, *log.Entry) (map[string]string, error) type InitFunc func(map[string]string) (interface{}, error) type EnricherCtx struct { @@ -16,59 +16,42 @@ type EnricherCtx struct { type Enricher struct { Name string - InitFunc InitFunc EnrichFunc EnrichFunc - Ctx interface{} } /* mimic plugin loading */ -func Loadplugin(path string) (EnricherCtx, error) { +func Loadplugin() (EnricherCtx, error) { enricherCtx := EnricherCtx{} enricherCtx.Registered = make(map[string]*Enricher) - enricherConfig := map[string]string{"datadir": path} - EnrichersList := []*Enricher{ { Name: "GeoIpCity", - InitFunc: GeoIPCityInit, EnrichFunc: GeoIpCity, }, { Name: "GeoIpASN", - InitFunc: GeoIPASNInit, EnrichFunc: GeoIpASN, }, { Name: "IpToRange", - InitFunc: IpToRangeInit, EnrichFunc: IpToRange, }, { Name: "reverse_dns", - InitFunc: reverseDNSInit, EnrichFunc: reverse_dns, }, { Name: "ParseDate", - InitFunc: parseDateInit, EnrichFunc: ParseDate, }, { Name: "UnmarshalJSON", - InitFunc: unmarshalInit, EnrichFunc: unmarshalJSON, }, } for _, enricher := range EnrichersList { - log.Debugf("Initiating enricher '%s'", enricher.Name) - pluginCtx, err := enricher.InitFunc(enricherConfig) - if err != nil { - log.Errorf("unable to register plugin '%s': %v", enricher.Name, err) - continue - } - enricher.Ctx = pluginCtx log.Infof("Successfully registered enricher '%s'", enricher.Name) enricherCtx.Registered[enricher.Name] = enricher } diff --git a/pkg/parser/enrich_date.go b/pkg/parser/enrich_date.go index 20828af9037..748a466d7c3 100644 --- a/pkg/parser/enrich_date.go +++ b/pkg/parser/enrich_date.go @@ -56,7 +56,7 @@ func GenDateParse(date string) (string, time.Time) { return "", time.Time{} } -func ParseDate(in string, p *types.Event, x interface{}, plog *log.Entry) (map[string]string, error) { +func ParseDate(in string, p *types.Event, plog *log.Entry) (map[string]string, error) { var ret = make(map[string]string) var strDate string @@ -105,7 +105,3 @@ func ParseDate(in string, p *types.Event, x interface{}, plog *log.Entry) (map[s return ret, nil } - -func parseDateInit(cfg map[string]string) (interface{}, error) { - return nil, nil -} diff --git a/pkg/parser/enrich_date_test.go b/pkg/parser/enrich_date_test.go index 084ded52573..085ef5ca342 100644 --- a/pkg/parser/enrich_date_test.go +++ b/pkg/parser/enrich_date_test.go @@ -48,7 +48,7 @@ func TestDateParse(t *testing.T) { for _, tt := range tests { tt := tt t.Run(tt.name, func(t *testing.T) { - strTime, err := ParseDate(tt.evt.StrTime, &tt.evt, nil, logger) + strTime, err := ParseDate(tt.evt.StrTime, &tt.evt, logger) cstest.RequireErrorContains(t, err, tt.expectedErr) if tt.expectedErr != "" { return diff --git a/pkg/parser/enrich_dns.go b/pkg/parser/enrich_dns.go index f622e6c359a..1ff5b0f4f16 100644 --- a/pkg/parser/enrich_dns.go +++ b/pkg/parser/enrich_dns.go @@ -11,7 +11,7 @@ import ( /* All plugins must export a list of function pointers for exported symbols */ //var ExportedFuncs = []string{"reverse_dns"} -func reverse_dns(field string, p *types.Event, ctx interface{}, plog *log.Entry) (map[string]string, error) { +func reverse_dns(field string, p *types.Event, plog *log.Entry) (map[string]string, error) { ret := make(map[string]string) if field == "" { return nil, nil @@ -25,7 +25,3 @@ func reverse_dns(field string, p *types.Event, ctx interface{}, plog *log.Entry) ret["reverse_dns"] = rets[0] return ret, nil } - -func reverseDNSInit(cfg map[string]string) (interface{}, error) { - return nil, nil -} diff --git a/pkg/parser/enrich_geoip.go b/pkg/parser/enrich_geoip.go index 0a263c82793..5e1fdbfc437 100644 --- a/pkg/parser/enrich_geoip.go +++ b/pkg/parser/enrich_geoip.go @@ -6,53 +6,53 @@ import ( "strconv" "github.com/oschwald/geoip2-golang" - "github.com/oschwald/maxminddb-golang" log "github.com/sirupsen/logrus" + "github.com/crowdsecurity/crowdsec/pkg/exprhelpers" "github.com/crowdsecurity/crowdsec/pkg/types" ) -func IpToRange(field string, p *types.Event, ctx interface{}, plog *log.Entry) (map[string]string, error) { - var dummy interface{} - ret := make(map[string]string) - +func IpToRange(field string, p *types.Event, plog *log.Entry) (map[string]string, error) { if field == "" { return nil, nil } - ip := net.ParseIP(field) - if ip == nil { - plog.Infof("Can't parse ip %s, no range enrich", field) - return nil, nil - } - net, ok, err := ctx.(*maxminddb.Reader).LookupNetwork(ip, &dummy) + + r, err := exprhelpers.GeoIPRangeEnrich(field) + if err != nil { - plog.Errorf("Failed to fetch network for %s : %v", ip.String(), err) - return nil, nil + plog.Errorf("Unable to enrich ip '%s'", field) + return nil, nil //nolint:nilerr } - if !ok { - plog.Debugf("Unable to find range of %s", ip.String()) + + if r == nil { + plog.Warnf("No range found for ip '%s'", field) return nil, nil } - ret["SourceRange"] = net.String() + + record := r.(*net.IPNet) + + ret := make(map[string]string) + ret["SourceRange"] = record.String() + return ret, nil } -func GeoIpASN(field string, p *types.Event, ctx interface{}, plog *log.Entry) (map[string]string, error) { - ret := make(map[string]string) +func GeoIpASN(field string, p *types.Event, plog *log.Entry) (map[string]string, error) { if field == "" { return nil, nil } - ip := net.ParseIP(field) - if ip == nil { - plog.Infof("Can't parse ip %s, no ASN enrich", ip) - return nil, nil - } - record, err := ctx.(*geoip2.Reader).ASN(ip) + r, err := exprhelpers.GeoIPASNEnrich(field) + if err != nil { plog.Errorf("Unable to enrich ip '%s'", field) return nil, nil //nolint:nilerr } + + record := r.(*geoip2.ASN) + + ret := make(map[string]string) + ret["ASNNumber"] = fmt.Sprintf("%d", record.AutonomousSystemNumber) ret["ASNumber"] = fmt.Sprintf("%d", record.AutonomousSystemNumber) ret["ASNOrg"] = record.AutonomousSystemOrganization @@ -62,21 +62,21 @@ func GeoIpASN(field string, p *types.Event, ctx interface{}, plog *log.Entry) (m return ret, nil } -func GeoIpCity(field string, p *types.Event, ctx interface{}, plog *log.Entry) (map[string]string, error) { - ret := make(map[string]string) +func GeoIpCity(field string, p *types.Event, plog *log.Entry) (map[string]string, error) { if field == "" { return nil, nil } - ip := net.ParseIP(field) - if ip == nil { - plog.Infof("Can't parse ip %s, no City enrich", ip) - return nil, nil - } - record, err := ctx.(*geoip2.Reader).City(ip) + + r, err := exprhelpers.GeoIPEnrich(field) + if err != nil { - plog.Debugf("Unable to enrich ip '%s'", ip) + plog.Errorf("Unable to enrich ip '%s'", field) return nil, nil //nolint:nilerr } + + record := r.(*geoip2.City) + ret := make(map[string]string) + if record.Country.IsoCode != "" { ret["IsoCode"] = record.Country.IsoCode ret["IsInEU"] = strconv.FormatBool(record.Country.IsInEuropeanUnion) @@ -88,7 +88,7 @@ func GeoIpCity(field string, p *types.Event, ctx interface{}, plog *log.Entry) ( ret["IsInEU"] = strconv.FormatBool(record.RepresentedCountry.IsInEuropeanUnion) } else { ret["IsoCode"] = "" - ret["IsInEU"] = strconv.FormatBool(false) + ret["IsInEU"] = "false" } ret["Latitude"] = fmt.Sprintf("%f", record.Location.Latitude) @@ -98,33 +98,3 @@ func GeoIpCity(field string, p *types.Event, ctx interface{}, plog *log.Entry) ( return ret, nil } - -func GeoIPCityInit(cfg map[string]string) (interface{}, error) { - dbCityReader, err := geoip2.Open(cfg["datadir"] + "/GeoLite2-City.mmdb") - if err != nil { - log.Debugf("couldn't open geoip : %v", err) - return nil, err - } - - return dbCityReader, nil -} - -func GeoIPASNInit(cfg map[string]string) (interface{}, error) { - dbASReader, err := geoip2.Open(cfg["datadir"] + "/GeoLite2-ASN.mmdb") - if err != nil { - log.Debugf("couldn't open geoip : %v", err) - return nil, err - } - - return dbASReader, nil -} - -func IpToRangeInit(cfg map[string]string) (interface{}, error) { - ipToRangeReader, err := maxminddb.Open(cfg["datadir"] + "/GeoLite2-ASN.mmdb") - if err != nil { - log.Debugf("couldn't open geoip : %v", err) - return nil, err - } - - return ipToRangeReader, nil -} diff --git a/pkg/parser/enrich_unmarshal.go b/pkg/parser/enrich_unmarshal.go index dce9c75d466..7ff91b70aea 100644 --- a/pkg/parser/enrich_unmarshal.go +++ b/pkg/parser/enrich_unmarshal.go @@ -8,7 +8,7 @@ import ( "github.com/crowdsecurity/crowdsec/pkg/types" ) -func unmarshalJSON(field string, p *types.Event, ctx interface{}, plog *log.Entry) (map[string]string, error) { +func unmarshalJSON(field string, p *types.Event, plog *log.Entry) (map[string]string, error) { err := json.Unmarshal([]byte(p.Line.Raw), &p.Unmarshaled) if err != nil { plog.Errorf("could not unmarshal JSON: %s", err) @@ -17,7 +17,3 @@ func unmarshalJSON(field string, p *types.Event, ctx interface{}, plog *log.Entr plog.Tracef("unmarshaled JSON: %+v", p.Unmarshaled) return nil, nil } - -func unmarshalInit(cfg map[string]string) (interface{}, error) { - return nil, nil -} diff --git a/pkg/parser/node.go b/pkg/parser/node.go index 244f361d6b8..11ffb8aa7fa 100644 --- a/pkg/parser/node.go +++ b/pkg/parser/node.go @@ -64,8 +64,9 @@ type Node struct { Data []*types.DataSource `yaml:"data,omitempty"` } -func (n *Node) validate(pctx *UnixParserCtx, ectx EnricherCtx) error { - // stage is being set automagically +func (n *Node) validate(ectx EnricherCtx) error { + + //stage is being set automagically if n.Stage == "" { return errors.New("stage needs to be an existing stage") } @@ -635,7 +636,7 @@ func (n *Node) compile(pctx *UnixParserCtx, ectx EnricherCtx) error { return errors.New("Node is empty") } - if err := n.validate(pctx, ectx); err != nil { + if err := n.validate(ectx); err != nil { return err } diff --git a/pkg/parser/node_test.go b/pkg/parser/node_test.go index d85aa82a8ae..be12176bff9 100644 --- a/pkg/parser/node_test.go +++ b/pkg/parser/node_test.go @@ -56,7 +56,7 @@ func TestParserConfigs(t *testing.T) { t.Fatalf("Compile: (%d/%d) expected error", idx+1, len(CfgTests)) } - err = CfgTests[idx].NodeCfg.validate(pctx, EnricherCtx{}) + err = CfgTests[idx].NodeCfg.validate(EnricherCtx{}) if CfgTests[idx].Valid == true && err != nil { t.Fatalf("Valid: (%d/%d) expected valid, got : %s", idx+1, len(CfgTests), err) } diff --git a/pkg/parser/parsing_test.go b/pkg/parser/parsing_test.go index d009bd0c515..d97dce8d20f 100644 --- a/pkg/parser/parsing_test.go +++ b/pkg/parser/parsing_test.go @@ -152,7 +152,11 @@ func prepTests() (*UnixParserCtx, EnricherCtx, error) { //Load enrichment datadir := "./test_data/" - ectx, err = Loadplugin(datadir) + err = exprhelpers.GeoIPInit(datadir) + if err != nil { + log.Fatalf("unable to initialize GeoIP: %s", err) + } + ectx, err = Loadplugin() if err != nil { log.Fatalf("failed to load plugin geoip : %v", err) } diff --git a/pkg/parser/runtime.go b/pkg/parser/runtime.go index afdf88dc873..1596ef5ffd9 100644 --- a/pkg/parser/runtime.go +++ b/pkg/parser/runtime.go @@ -155,7 +155,7 @@ func (n *Node) ProcessStatics(statics []ExtraField, event *types.Event) error { /*still way too hackish, but : inject all the results in enriched, and */ if enricherPlugin, ok := n.EnrichFunctions.Registered[static.Method]; ok { clog.Tracef("Found method '%s'", static.Method) - ret, err := enricherPlugin.EnrichFunc(value, event, enricherPlugin.Ctx, n.Logger.WithField("method", static.Method)) + ret, err := enricherPlugin.EnrichFunc(value, event, n.Logger.WithField("method", static.Method)) if err != nil { clog.Errorf("method '%s' returned an error : %v", static.Method, err) } diff --git a/pkg/parser/unix_parser.go b/pkg/parser/unix_parser.go index 720bac3d1fe..280d122ecc1 100644 --- a/pkg/parser/unix_parser.go +++ b/pkg/parser/unix_parser.go @@ -117,7 +117,7 @@ func LoadParsers(cConfig *csconfig.Config, parsers *Parsers) (*Parsers, error) { */ log.Infof("Loading enrich plugins") - parsers.EnricherCtx, err = Loadplugin(cConfig.ConfigPaths.DataDir) + parsers.EnricherCtx, err = Loadplugin() if err != nil { return parsers, fmt.Errorf("failed to load enrich plugin : %v", err) } From 7d6514c7cc457331d3707d1bd2dc92af76a5b2e8 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Thu, 30 May 2024 09:45:43 +0200 Subject: [PATCH 145/581] update user agent with version and platform information (#3035) * update user agent with version and platform information * remove unused import * user agent: omit tag correctly if git is dirty --- Makefile | 2 +- cmd/crowdsec-cli/alerts.go | 5 ++--- cmd/crowdsec-cli/capi.go | 7 +++---- cmd/crowdsec-cli/console.go | 4 ++-- cmd/crowdsec-cli/decisions.go | 5 ++--- cmd/crowdsec-cli/lapi.go | 7 +++---- cmd/crowdsec-cli/notifications.go | 4 ++-- cmd/crowdsec/lapiclient.go | 5 ++--- .../loki/internal/lokiclient/loki_client.go | 2 +- pkg/apiclient/alerts_service_test.go | 10 +++++----- pkg/apiclient/auth_service_test.go | 11 +++++------ pkg/apiclient/client_http_test.go | 8 ++++---- pkg/apiclient/client_test.go | 17 +++++++++-------- pkg/apiclient/decisions_service_test.go | 7 +++---- pkg/apiserver/apic.go | 4 ++-- pkg/apiserver/apic_metrics_test.go | 6 ++---- pkg/apiserver/apic_test.go | 15 ++++++++------- pkg/cwhub/cwhub.go | 4 ++-- pkg/cwversion/version.go | 13 +++++++------ pkg/metabase/api.go | 6 +++--- 20 files changed, 68 insertions(+), 74 deletions(-) diff --git a/Makefile b/Makefile index 9e21abee02a..324be04683d 100644 --- a/Makefile +++ b/Makefile @@ -78,7 +78,7 @@ LD_OPTS_VARS= \ -X '$(GO_MODULE_NAME)/pkg/csconfig.defaultDataDir=$(DEFAULT_DATADIR)' ifneq (,$(DOCKER_BUILD)) -LD_OPTS_VARS += -X '$(GO_MODULE_NAME)/pkg/cwversion.System=docker' +LD_OPTS_VARS += -X 'github.com/crowdsecurity/go-cs-lib/version.System=docker' endif GO_TAGS := netgo,osusergo,sqlite_omit_load_extension diff --git a/cmd/crowdsec-cli/alerts.go b/cmd/crowdsec-cli/alerts.go index 4cc4a992c43..d32af6ae56c 100644 --- a/cmd/crowdsec-cli/alerts.go +++ b/cmd/crowdsec-cli/alerts.go @@ -19,10 +19,9 @@ import ( "github.com/spf13/cobra" "gopkg.in/yaml.v3" - "github.com/crowdsecurity/go-cs-lib/version" - "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/require" "github.com/crowdsecurity/crowdsec/pkg/apiclient" + "github.com/crowdsecurity/crowdsec/pkg/cwversion" "github.com/crowdsecurity/crowdsec/pkg/database" "github.com/crowdsecurity/crowdsec/pkg/models" "github.com/crowdsecurity/crowdsec/pkg/types" @@ -209,7 +208,7 @@ func (cli *cliAlerts) NewCommand() *cobra.Command { cli.client, err = apiclient.NewClient(&apiclient.Config{ MachineID: cfg.API.Client.Credentials.Login, Password: strfmt.Password(cfg.API.Client.Credentials.Password), - UserAgent: fmt.Sprintf("crowdsec/%s", version.String()), + UserAgent: cwversion.UserAgent(), URL: apiURL, VersionPrefix: "v1", }) diff --git a/cmd/crowdsec-cli/capi.go b/cmd/crowdsec-cli/capi.go index b89d9c7edb0..f5940d71b66 100644 --- a/cmd/crowdsec-cli/capi.go +++ b/cmd/crowdsec-cli/capi.go @@ -12,12 +12,11 @@ import ( "github.com/spf13/cobra" "gopkg.in/yaml.v3" - "github.com/crowdsecurity/go-cs-lib/version" - "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/require" "github.com/crowdsecurity/crowdsec/pkg/apiclient" "github.com/crowdsecurity/crowdsec/pkg/csconfig" "github.com/crowdsecurity/crowdsec/pkg/cwhub" + "github.com/crowdsecurity/crowdsec/pkg/cwversion" "github.com/crowdsecurity/crowdsec/pkg/models" "github.com/crowdsecurity/crowdsec/pkg/types" ) @@ -81,7 +80,7 @@ func (cli *cliCapi) register(capiUserPrefix string, outputFile string) error { _, err = apiclient.RegisterClient(&apiclient.Config{ MachineID: capiUser, Password: password, - UserAgent: fmt.Sprintf("crowdsec/%s", version.String()), + UserAgent: cwversion.UserAgent(), URL: apiurl, VersionPrefix: CAPIURLPrefix, }, nil) @@ -173,7 +172,7 @@ func QueryCAPIStatus(hub *cwhub.Hub, credURL string, login string, password stri Client, err = apiclient.NewDefaultClient(apiURL, CAPIURLPrefix, - fmt.Sprintf("crowdsec/%s", version.String()), + cwversion.UserAgent(), nil) if err != nil { return fmt.Errorf("init default client: %w", err) diff --git a/cmd/crowdsec-cli/console.go b/cmd/crowdsec-cli/console.go index 4aba689aa05..149b7656884 100644 --- a/cmd/crowdsec-cli/console.go +++ b/cmd/crowdsec-cli/console.go @@ -18,12 +18,12 @@ import ( "gopkg.in/yaml.v3" "github.com/crowdsecurity/go-cs-lib/ptr" - "github.com/crowdsecurity/go-cs-lib/version" "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/require" "github.com/crowdsecurity/crowdsec/pkg/apiclient" "github.com/crowdsecurity/crowdsec/pkg/csconfig" "github.com/crowdsecurity/crowdsec/pkg/cwhub" + "github.com/crowdsecurity/crowdsec/pkg/cwversion" "github.com/crowdsecurity/crowdsec/pkg/types" ) @@ -147,7 +147,7 @@ After running this command your will need to validate the enrollment in the weba MachineID: cli.cfg().API.Server.OnlineClient.Credentials.Login, Password: password, Scenarios: scenarios, - UserAgent: fmt.Sprintf("crowdsec/%s", version.String()), + UserAgent: cwversion.UserAgent(), URL: apiURL, VersionPrefix: "v3", }) diff --git a/cmd/crowdsec-cli/decisions.go b/cmd/crowdsec-cli/decisions.go index 3fb790633b5..9b9159561be 100644 --- a/cmd/crowdsec-cli/decisions.go +++ b/cmd/crowdsec-cli/decisions.go @@ -17,9 +17,8 @@ import ( log "github.com/sirupsen/logrus" "github.com/spf13/cobra" - "github.com/crowdsecurity/go-cs-lib/version" - "github.com/crowdsecurity/crowdsec/pkg/apiclient" + "github.com/crowdsecurity/crowdsec/pkg/cwversion" "github.com/crowdsecurity/crowdsec/pkg/models" "github.com/crowdsecurity/crowdsec/pkg/types" ) @@ -150,7 +149,7 @@ func (cli *cliDecisions) NewCommand() *cobra.Command { Client, err = apiclient.NewClient(&apiclient.Config{ MachineID: cfg.API.Client.Credentials.Login, Password: password, - UserAgent: fmt.Sprintf("crowdsec/%s", version.String()), + UserAgent: cwversion.UserAgent(), URL: apiurl, VersionPrefix: "v1", }) diff --git a/cmd/crowdsec-cli/lapi.go b/cmd/crowdsec-cli/lapi.go index 7cffd7ffc7f..0d7ebc64431 100644 --- a/cmd/crowdsec-cli/lapi.go +++ b/cmd/crowdsec-cli/lapi.go @@ -15,13 +15,12 @@ import ( "github.com/spf13/cobra" "gopkg.in/yaml.v3" - "github.com/crowdsecurity/go-cs-lib/version" - "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/require" "github.com/crowdsecurity/crowdsec/pkg/alertcontext" "github.com/crowdsecurity/crowdsec/pkg/apiclient" "github.com/crowdsecurity/crowdsec/pkg/csconfig" "github.com/crowdsecurity/crowdsec/pkg/cwhub" + "github.com/crowdsecurity/crowdsec/pkg/cwversion" "github.com/crowdsecurity/crowdsec/pkg/exprhelpers" "github.com/crowdsecurity/crowdsec/pkg/models" "github.com/crowdsecurity/crowdsec/pkg/parser" @@ -53,7 +52,7 @@ func QueryLAPIStatus(hub *cwhub.Hub, credURL string, login string, password stri Client, err = apiclient.NewDefaultClient(apiURL, LAPIURLPrefix, - fmt.Sprintf("crowdsec/%s", version.String()), + cwversion.UserAgent(), nil) if err != nil { return fmt.Errorf("init default client: %w", err) @@ -120,7 +119,7 @@ func (cli *cliLapi) register(apiURL string, outputFile string, machine string) e _, err = apiclient.RegisterClient(&apiclient.Config{ MachineID: lapiUser, Password: password, - UserAgent: fmt.Sprintf("crowdsec/%s", version.String()), + UserAgent: cwversion.UserAgent(), URL: apiurl, VersionPrefix: LAPIURLPrefix, }, nil) diff --git a/cmd/crowdsec-cli/notifications.go b/cmd/crowdsec-cli/notifications.go index cb102df6928..84dd6b941c9 100644 --- a/cmd/crowdsec-cli/notifications.go +++ b/cmd/crowdsec-cli/notifications.go @@ -23,13 +23,13 @@ import ( "gopkg.in/yaml.v3" "github.com/crowdsecurity/go-cs-lib/ptr" - "github.com/crowdsecurity/go-cs-lib/version" "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/require" "github.com/crowdsecurity/crowdsec/pkg/apiclient" "github.com/crowdsecurity/crowdsec/pkg/csconfig" "github.com/crowdsecurity/crowdsec/pkg/csplugin" "github.com/crowdsecurity/crowdsec/pkg/csprofiles" + "github.com/crowdsecurity/crowdsec/pkg/cwversion" "github.com/crowdsecurity/crowdsec/pkg/models" "github.com/crowdsecurity/crowdsec/pkg/types" ) @@ -462,7 +462,7 @@ func (cli *cliNotifications) fetchAlertFromArgString(toParse string) (*models.Al client, err := apiclient.NewClient(&apiclient.Config{ MachineID: cfg.API.Client.Credentials.Login, Password: strfmt.Password(cfg.API.Client.Credentials.Password), - UserAgent: fmt.Sprintf("crowdsec/%s", version.String()), + UserAgent: cwversion.UserAgent(), URL: apiURL, VersionPrefix: "v1", }) diff --git a/cmd/crowdsec/lapiclient.go b/cmd/crowdsec/lapiclient.go index f12aea5ac0d..ae23850eb0a 100644 --- a/cmd/crowdsec/lapiclient.go +++ b/cmd/crowdsec/lapiclient.go @@ -8,10 +8,9 @@ import ( "github.com/go-openapi/strfmt" - "github.com/crowdsecurity/go-cs-lib/version" - "github.com/crowdsecurity/crowdsec/pkg/apiclient" "github.com/crowdsecurity/crowdsec/pkg/csconfig" + "github.com/crowdsecurity/crowdsec/pkg/cwversion" "github.com/crowdsecurity/crowdsec/pkg/cwhub" "github.com/crowdsecurity/crowdsec/pkg/models" ) @@ -47,7 +46,7 @@ func AuthenticatedLAPIClient(credentials csconfig.ApiCredentialsCfg, hub *cwhub. MachineID: credentials.Login, Password: password, Scenarios: installedScenariosAndAppsecRules, - UserAgent: fmt.Sprintf("crowdsec/%s", version.String()), + UserAgent: cwversion.UserAgent(), URL: apiURL, PapiURL: papiURL, VersionPrefix: "v1", diff --git a/pkg/acquisition/modules/loki/internal/lokiclient/loki_client.go b/pkg/acquisition/modules/loki/internal/lokiclient/loki_client.go index d2af4e8af28..be14939c44d 100644 --- a/pkg/acquisition/modules/loki/internal/lokiclient/loki_client.go +++ b/pkg/acquisition/modules/loki/internal/lokiclient/loki_client.go @@ -321,6 +321,6 @@ func NewLokiClient(config Config) *LokiClient { if config.Username != "" || config.Password != "" { headers["Authorization"] = "Basic " + base64.StdEncoding.EncodeToString([]byte(config.Username+":"+config.Password)) } - headers["User-Agent"] = "Crowdsec " + cwversion.VersionStr() + headers["User-Agent"] = cwversion.UserAgent() return &LokiClient{Logger: log.WithField("component", "lokiclient"), config: config, requestHeaders: headers} } diff --git a/pkg/apiclient/alerts_service_test.go b/pkg/apiclient/alerts_service_test.go index 31a947556bb..12ef2d295f4 100644 --- a/pkg/apiclient/alerts_service_test.go +++ b/pkg/apiclient/alerts_service_test.go @@ -13,8 +13,8 @@ import ( "github.com/crowdsecurity/go-cs-lib/cstest" "github.com/crowdsecurity/go-cs-lib/ptr" - "github.com/crowdsecurity/go-cs-lib/version" + "github.com/crowdsecurity/crowdsec/pkg/cwversion" "github.com/crowdsecurity/crowdsec/pkg/models" ) @@ -35,7 +35,7 @@ func TestAlertsListAsMachine(t *testing.T) { client, err := NewClient(&Config{ MachineID: "test_login", Password: "test_password", - UserAgent: fmt.Sprintf("crowdsec/%s", version.String()), + UserAgent: cwversion.UserAgent(), URL: apiURL, VersionPrefix: "v1", }) @@ -214,7 +214,7 @@ func TestAlertsGetAsMachine(t *testing.T) { client, err := NewClient(&Config{ MachineID: "test_login", Password: "test_password", - UserAgent: fmt.Sprintf("crowdsec/%s", version.String()), + UserAgent: cwversion.UserAgent(), URL: apiURL, VersionPrefix: "v1", }) @@ -388,7 +388,7 @@ func TestAlertsCreateAsMachine(t *testing.T) { client, err := NewClient(&Config{ MachineID: "test_login", Password: "test_password", - UserAgent: fmt.Sprintf("crowdsec/%s", version.String()), + UserAgent: cwversion.UserAgent(), URL: apiURL, VersionPrefix: "v1", }) @@ -430,7 +430,7 @@ func TestAlertsDeleteAsMachine(t *testing.T) { client, err := NewClient(&Config{ MachineID: "test_login", Password: "test_password", - UserAgent: fmt.Sprintf("crowdsec/%s", version.String()), + UserAgent: cwversion.UserAgent(), URL: apiURL, VersionPrefix: "v1", }) diff --git a/pkg/apiclient/auth_service_test.go b/pkg/apiclient/auth_service_test.go index f5de827a121..3e887149a98 100644 --- a/pkg/apiclient/auth_service_test.go +++ b/pkg/apiclient/auth_service_test.go @@ -14,8 +14,7 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "github.com/crowdsecurity/go-cs-lib/version" - + "github.com/crowdsecurity/crowdsec/pkg/cwversion" "github.com/crowdsecurity/crowdsec/pkg/models" ) @@ -88,7 +87,7 @@ func TestWatcherRegister(t *testing.T) { clientconfig := Config{ MachineID: "test_login", Password: "test_password", - UserAgent: fmt.Sprintf("crowdsec/%s", version.String()), + UserAgent: cwversion.UserAgent(), URL: apiURL, VersionPrefix: "v1", } @@ -126,7 +125,7 @@ func TestWatcherAuth(t *testing.T) { clientConfig := &Config{ MachineID: "test_login", Password: "test_password", - UserAgent: fmt.Sprintf("crowdsec/%s", version.String()), + UserAgent: cwversion.UserAgent(), URL: apiURL, VersionPrefix: "v1", Scenarios: []string{"crowdsecurity/test"}, @@ -207,7 +206,7 @@ func TestWatcherUnregister(t *testing.T) { mycfg := &Config{ MachineID: "test_login", Password: "test_password", - UserAgent: fmt.Sprintf("crowdsec/%s", version.String()), + UserAgent: cwversion.UserAgent(), URL: apiURL, VersionPrefix: "v1", Scenarios: []string{"crowdsecurity/test"}, @@ -261,7 +260,7 @@ func TestWatcherEnroll(t *testing.T) { mycfg := &Config{ MachineID: "test_login", Password: "test_password", - UserAgent: fmt.Sprintf("crowdsec/%s", version.String()), + UserAgent: cwversion.UserAgent(), URL: apiURL, VersionPrefix: "v1", Scenarios: []string{"crowdsecurity/test"}, diff --git a/pkg/apiclient/client_http_test.go b/pkg/apiclient/client_http_test.go index a7582eaf437..4bdfe1d0da5 100644 --- a/pkg/apiclient/client_http_test.go +++ b/pkg/apiclient/client_http_test.go @@ -2,7 +2,6 @@ package apiclient import ( "context" - "fmt" "net/http" "net/url" "testing" @@ -11,7 +10,8 @@ import ( "github.com/stretchr/testify/require" "github.com/crowdsecurity/go-cs-lib/cstest" - "github.com/crowdsecurity/go-cs-lib/version" + + "github.com/crowdsecurity/crowdsec/pkg/cwversion" ) func TestNewRequestInvalid(t *testing.T) { @@ -25,7 +25,7 @@ func TestNewRequestInvalid(t *testing.T) { client, err := NewClient(&Config{ MachineID: "test_login", Password: "test_password", - UserAgent: fmt.Sprintf("crowdsec/%s", version.String()), + UserAgent: cwversion.UserAgent(), URL: apiURL, VersionPrefix: "v1", }) @@ -57,7 +57,7 @@ func TestNewRequestTimeout(t *testing.T) { client, err := NewClient(&Config{ MachineID: "test_login", Password: "test_password", - UserAgent: fmt.Sprintf("crowdsec/%s", version.String()), + UserAgent: cwversion.UserAgent(), URL: apiURL, VersionPrefix: "v1", }) diff --git a/pkg/apiclient/client_test.go b/pkg/apiclient/client_test.go index d3296c4b67f..2adba170584 100644 --- a/pkg/apiclient/client_test.go +++ b/pkg/apiclient/client_test.go @@ -17,7 +17,8 @@ import ( "github.com/stretchr/testify/require" "github.com/crowdsecurity/go-cs-lib/cstest" - "github.com/crowdsecurity/go-cs-lib/version" + + "github.com/crowdsecurity/crowdsec/pkg/cwversion" ) /*this is a ripoff of google/go-github approach : @@ -96,7 +97,7 @@ func TestNewClientOk(t *testing.T) { client, err := NewClient(&Config{ MachineID: "test_login", Password: "test_password", - UserAgent: fmt.Sprintf("crowdsec/%s", version.String()), + UserAgent: cwversion.UserAgent(), URL: apiURL, VersionPrefix: "v1", }) @@ -133,7 +134,7 @@ func TestNewClientOk_UnixSocket(t *testing.T) { client, err := NewClient(&Config{ MachineID: "test_login", Password: "test_password", - UserAgent: fmt.Sprintf("crowdsec/%s", version.String()), + UserAgent: cwversion.UserAgent(), URL: apiURL, VersionPrefix: "v1", }) @@ -171,7 +172,7 @@ func TestNewClientKo(t *testing.T) { client, err := NewClient(&Config{ MachineID: "test_login", Password: "test_password", - UserAgent: fmt.Sprintf("crowdsec/%s", version.String()), + UserAgent: cwversion.UserAgent(), URL: apiURL, VersionPrefix: "v1", }) @@ -249,7 +250,7 @@ func TestNewClientRegisterKO(t *testing.T) { _, err = RegisterClient(&Config{ MachineID: "test_login", Password: "test_password", - UserAgent: fmt.Sprintf("crowdsec/%s", version.String()), + UserAgent: cwversion.UserAgent(), URL: apiURL, VersionPrefix: "v1", }, &http.Client{}) @@ -280,7 +281,7 @@ func TestNewClientRegisterOK(t *testing.T) { client, err := RegisterClient(&Config{ MachineID: "test_login", Password: "test_password", - UserAgent: fmt.Sprintf("crowdsec/%s", version.String()), + UserAgent: cwversion.UserAgent(), URL: apiURL, VersionPrefix: "v1", }, &http.Client{}) @@ -313,7 +314,7 @@ func TestNewClientRegisterOK_UnixSocket(t *testing.T) { client, err := RegisterClient(&Config{ MachineID: "test_login", Password: "test_password", - UserAgent: fmt.Sprintf("crowdsec/%s", version.String()), + UserAgent: cwversion.UserAgent(), URL: apiURL, VersionPrefix: "v1", }, &http.Client{}) @@ -343,7 +344,7 @@ func TestNewClientBadAnswer(t *testing.T) { _, err = RegisterClient(&Config{ MachineID: "test_login", Password: "test_password", - UserAgent: fmt.Sprintf("crowdsec/%s", version.String()), + UserAgent: cwversion.UserAgent(), URL: apiURL, VersionPrefix: "v1", }, &http.Client{}) diff --git a/pkg/apiclient/decisions_service_test.go b/pkg/apiclient/decisions_service_test.go index fb2fb7342f7..97b5aa26482 100644 --- a/pkg/apiclient/decisions_service_test.go +++ b/pkg/apiclient/decisions_service_test.go @@ -2,7 +2,6 @@ package apiclient import ( "context" - "fmt" "net/http" "net/url" "testing" @@ -13,8 +12,8 @@ import ( "github.com/crowdsecurity/go-cs-lib/cstest" "github.com/crowdsecurity/go-cs-lib/ptr" - "github.com/crowdsecurity/go-cs-lib/version" + "github.com/crowdsecurity/crowdsec/pkg/cwversion" "github.com/crowdsecurity/crowdsec/pkg/models" "github.com/crowdsecurity/crowdsec/pkg/modelscapi" ) @@ -403,7 +402,7 @@ func TestDeleteDecisions(t *testing.T) { client, err := NewClient(&Config{ MachineID: "test_login", Password: "test_password", - UserAgent: fmt.Sprintf("crowdsec/%s", version.String()), + UserAgent: cwversion.UserAgent(), URL: apiURL, VersionPrefix: "v1", }) @@ -504,7 +503,7 @@ func TestDecisionsStreamOpts_addQueryParamsToURL(t *testing.T) { // client, err := NewClient(&Config{ // MachineID: "test_login", // Password: "test_password", -// UserAgent: fmt.Sprintf("crowdsec/%s", cwversion.VersionStr()), +// UserAgent: cwversion.UserAgent(), // URL: apiURL, // VersionPrefix: "v1", // }) diff --git a/pkg/apiserver/apic.go b/pkg/apiserver/apic.go index 3f646071b0e..b6c560c0a5d 100644 --- a/pkg/apiserver/apic.go +++ b/pkg/apiserver/apic.go @@ -20,10 +20,10 @@ import ( "github.com/crowdsecurity/go-cs-lib/ptr" "github.com/crowdsecurity/go-cs-lib/trace" - "github.com/crowdsecurity/go-cs-lib/version" "github.com/crowdsecurity/crowdsec/pkg/apiclient" "github.com/crowdsecurity/crowdsec/pkg/csconfig" + "github.com/crowdsecurity/crowdsec/pkg/cwversion" "github.com/crowdsecurity/crowdsec/pkg/database" "github.com/crowdsecurity/crowdsec/pkg/database/ent" "github.com/crowdsecurity/crowdsec/pkg/database/ent/alert" @@ -215,7 +215,7 @@ func NewAPIC(config *csconfig.OnlineApiClientCfg, dbClient *database.Client, con ret.apiClient, err = apiclient.NewClient(&apiclient.Config{ MachineID: config.Credentials.Login, Password: password, - UserAgent: fmt.Sprintf("crowdsec/%s", version.String()), + UserAgent: cwversion.UserAgent(), URL: apiURL, PapiURL: papiURL, VersionPrefix: "v3", diff --git a/pkg/apiserver/apic_metrics_test.go b/pkg/apiserver/apic_metrics_test.go index 529dd6c6839..f3b9b352316 100644 --- a/pkg/apiserver/apic_metrics_test.go +++ b/pkg/apiserver/apic_metrics_test.go @@ -2,7 +2,6 @@ package apiserver import ( "context" - "fmt" "net/url" "testing" "time" @@ -11,9 +10,8 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "github.com/crowdsecurity/go-cs-lib/version" - "github.com/crowdsecurity/crowdsec/pkg/apiclient" + "github.com/crowdsecurity/crowdsec/pkg/cwversion" ) func TestAPICSendMetrics(t *testing.T) { @@ -73,7 +71,7 @@ func TestAPICSendMetrics(t *testing.T) { apiClient, err := apiclient.NewDefaultClient( url, "/api", - fmt.Sprintf("crowdsec/%s", version.String()), + cwversion.UserAgent(), nil, ) require.NoError(t, err) diff --git a/pkg/apiserver/apic_test.go b/pkg/apiserver/apic_test.go index 74c627cd020..c5a39455ff9 100644 --- a/pkg/apiserver/apic_test.go +++ b/pkg/apiserver/apic_test.go @@ -26,6 +26,7 @@ import ( "github.com/crowdsecurity/crowdsec/pkg/apiclient" "github.com/crowdsecurity/crowdsec/pkg/csconfig" + "github.com/crowdsecurity/crowdsec/pkg/cwversion" "github.com/crowdsecurity/crowdsec/pkg/database" "github.com/crowdsecurity/crowdsec/pkg/database/ent/decision" "github.com/crowdsecurity/crowdsec/pkg/database/ent/machine" @@ -675,7 +676,7 @@ func TestAPICWhitelists(t *testing.T) { apic, err := apiclient.NewDefaultClient( url, "/api", - fmt.Sprintf("crowdsec/%s", version.String()), + cwversion.UserAgent(), nil, ) require.NoError(t, err) @@ -816,7 +817,7 @@ func TestAPICPullTop(t *testing.T) { apic, err := apiclient.NewDefaultClient( url, "/api", - fmt.Sprintf("crowdsec/%s", version.String()), + cwversion.UserAgent(), nil, ) require.NoError(t, err) @@ -904,7 +905,7 @@ func TestAPICPullTopBLCacheFirstCall(t *testing.T) { apic, err := apiclient.NewDefaultClient( url, "/api", - fmt.Sprintf("crowdsec/%s", version.String()), + cwversion.UserAgent(), nil, ) require.NoError(t, err) @@ -996,7 +997,7 @@ func TestAPICPullTopBLCacheForceCall(t *testing.T) { apic, err := apiclient.NewDefaultClient( url, "/api", - fmt.Sprintf("crowdsec/%s", version.String()), + cwversion.UserAgent(), nil, ) require.NoError(t, err) @@ -1023,7 +1024,7 @@ func TestAPICPullBlocklistCall(t *testing.T) { apic, err := apiclient.NewDefaultClient( url, "/api", - fmt.Sprintf("crowdsec/%s", version.String()), + cwversion.UserAgent(), nil, ) require.NoError(t, err) @@ -1105,7 +1106,7 @@ func TestAPICPush(t *testing.T) { apic, err := apiclient.NewDefaultClient( url, "/api", - fmt.Sprintf("crowdsec/%s", version.String()), + cwversion.UserAgent(), nil, ) require.NoError(t, err) @@ -1164,7 +1165,7 @@ func TestAPICPull(t *testing.T) { apic, err := apiclient.NewDefaultClient( url, "/api", - fmt.Sprintf("crowdsec/%s", version.String()), + cwversion.UserAgent(), nil, ) require.NoError(t, err) diff --git a/pkg/cwhub/cwhub.go b/pkg/cwhub/cwhub.go index 0496834e472..0a9cc443ce0 100644 --- a/pkg/cwhub/cwhub.go +++ b/pkg/cwhub/cwhub.go @@ -8,7 +8,7 @@ import ( "strings" "time" - "github.com/crowdsecurity/go-cs-lib/version" + "github.com/crowdsecurity/crowdsec/pkg/cwversion" ) // hubTransport wraps a Transport to set a custom User-Agent. @@ -17,7 +17,7 @@ type hubTransport struct { } func (t *hubTransport) RoundTrip(req *http.Request) (*http.Response, error) { - req.Header.Set("User-Agent", "crowdsec/"+version.String()) + req.Header.Set("User-Agent", cwversion.UserAgent()) return t.RoundTripper.RoundTrip(req) } diff --git a/pkg/cwversion/version.go b/pkg/cwversion/version.go index ac51567c2fc..d13767e665b 100644 --- a/pkg/cwversion/version.go +++ b/pkg/cwversion/version.go @@ -4,7 +4,6 @@ import ( "encoding/json" "fmt" "net/http" - "runtime" "strings" goversion "github.com/hashicorp/go-version" @@ -14,7 +13,6 @@ import ( var ( Codename string // = "SoumSoum" - System = runtime.GOOS // = "linux" Libre2 = "WebAssembly" ) @@ -26,9 +24,10 @@ const ( ) func versionWithTag() string { + // if the version number already contains the tag, don't duplicate it ret := version.Version - if !strings.HasSuffix(ret, version.Tag) { + if !strings.HasSuffix(ret, version.Tag) && !strings.HasSuffix(ret, "g" + version.Tag + "-dirty") { ret += "-" + version.Tag } @@ -40,8 +39,9 @@ func FullString() string { ret += fmt.Sprintf("Codename: %s\n", Codename) ret += fmt.Sprintf("BuildDate: %s\n", version.BuildDate) ret += fmt.Sprintf("GoVersion: %s\n", version.GoVersion) - ret += fmt.Sprintf("Platform: %s\n", System) + ret += fmt.Sprintf("Platform: %s\n", version.System) ret += fmt.Sprintf("libre2: %s\n", Libre2) + ret += fmt.Sprintf("User-Agent: %s\n", UserAgent()) ret += fmt.Sprintf("Constraint_parser: %s\n", Constraint_parser) ret += fmt.Sprintf("Constraint_scenario: %s\n", Constraint_scenario) ret += fmt.Sprintf("Constraint_api: %s\n", Constraint_api) @@ -50,10 +50,11 @@ func FullString() string { return ret } -func VersionStr() string { - return fmt.Sprintf("%s-%s-%s", version.Version, System, version.Tag) +func UserAgent() string { + return "crowdsec/" + versionWithTag() + "-" + version.System } +// VersionStrip remove the tag from the version string, used to match with a hub branch func VersionStrip() string { ret := strings.Split(version.Version, "~") ret = strings.Split(ret[0], "-") diff --git a/pkg/metabase/api.go b/pkg/metabase/api.go index bded4c9e83d..387e8d151e0 100644 --- a/pkg/metabase/api.go +++ b/pkg/metabase/api.go @@ -6,10 +6,10 @@ import ( "net/http" "time" - "github.com/crowdsecurity/go-cs-lib/version" - "github.com/dghubble/sling" log "github.com/sirupsen/logrus" + + "github.com/crowdsecurity/crowdsec/pkg/cwversion" ) type MBClient struct { @@ -38,7 +38,7 @@ var ( func NewMBClient(url string) (*MBClient, error) { httpClient := &http.Client{Timeout: 20 * time.Second} return &MBClient{ - CTX: sling.New().Client(httpClient).Base(url).Set("User-Agent", fmt.Sprintf("crowdsec/%s", version.String())), + CTX: sling.New().Client(httpClient).Base(url).Set("User-Agent", cwversion.UserAgent()), Client: httpClient, }, nil } From 16bfab86c8e0de6631b8fcea99b1da009affb902 Mon Sep 17 00:00:00 2001 From: blotus Date: Fri, 31 May 2024 09:38:43 +0200 Subject: [PATCH 146/581] check type assertion in geoip enrichers (#3040) --- pkg/parser/enrich_geoip.go | 19 ++++++++++++++++--- 1 file changed, 16 insertions(+), 3 deletions(-) diff --git a/pkg/parser/enrich_geoip.go b/pkg/parser/enrich_geoip.go index 5e1fdbfc437..58732129af8 100644 --- a/pkg/parser/enrich_geoip.go +++ b/pkg/parser/enrich_geoip.go @@ -29,7 +29,11 @@ func IpToRange(field string, p *types.Event, plog *log.Entry) (map[string]string return nil, nil } - record := r.(*net.IPNet) + record, ok := r.(*net.IPNet) + + if !ok { + return nil, nil + } ret := make(map[string]string) ret["SourceRange"] = record.String() @@ -49,7 +53,11 @@ func GeoIpASN(field string, p *types.Event, plog *log.Entry) (map[string]string, return nil, nil //nolint:nilerr } - record := r.(*geoip2.ASN) + record, ok := r.(*geoip2.ASN) + + if !ok { + return nil, nil + } ret := make(map[string]string) @@ -74,7 +82,12 @@ func GeoIpCity(field string, p *types.Event, plog *log.Entry) (map[string]string return nil, nil //nolint:nilerr } - record := r.(*geoip2.City) + record, ok := r.(*geoip2.City) + + if !ok { + return nil, nil + } + ret := make(map[string]string) if record.Country.IsoCode != "" { From 02e2c8aed74e4cc62632cd70cd0c76f007190dc6 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Fri, 31 May 2024 15:00:38 +0200 Subject: [PATCH 147/581] deps: use ent 0.13.1 (#3023) --- go.mod | 10 +++++----- go.sum | 23 ++++++++++++----------- pkg/database/ent/alert.go | 8 +++----- pkg/database/ent/decision.go | 8 +++----- pkg/database/ent/event.go | 8 +++----- pkg/database/ent/generate.go | 2 +- pkg/database/ent/meta.go | 8 +++----- pkg/database/ent/runtime/runtime.go | 4 ++-- 8 files changed, 32 insertions(+), 39 deletions(-) diff --git a/go.mod b/go.mod index 93f3e4cd806..1bc63a470df 100644 --- a/go.mod +++ b/go.mod @@ -7,7 +7,7 @@ go 1.22 // toolchain go1.21.3 require ( - entgo.io/ent v0.12.5 + entgo.io/ent v0.13.1 github.com/AlecAivazis/survey/v2 v2.3.7 github.com/Masterminds/semver/v3 v3.2.1 github.com/Masterminds/sprig/v3 v3.2.3 @@ -83,7 +83,7 @@ require ( github.com/wasilibs/go-re2 v1.3.0 github.com/xhit/go-simple-mail/v2 v2.16.0 golang.org/x/crypto v0.22.0 - golang.org/x/mod v0.11.0 + golang.org/x/mod v0.15.0 golang.org/x/sys v0.19.0 golang.org/x/text v0.14.0 google.golang.org/grpc v1.56.3 @@ -96,7 +96,7 @@ require ( ) require ( - ariga.io/atlas v0.14.1-0.20230918065911-83ad451a4935 // indirect + ariga.io/atlas v0.19.1-0.20240203083654-5948b60a8e43 // indirect github.com/Masterminds/goutils v1.1.1 // indirect github.com/Microsoft/go-winio v0.6.1 // indirect github.com/ahmetalpbalkan/dlog v0.0.0-20170105205344-4fb5f8204f26 // indirect @@ -130,7 +130,7 @@ require ( github.com/gogo/protobuf v1.3.2 // indirect github.com/golang/glog v1.1.0 // indirect github.com/golang/protobuf v1.5.3 // indirect - github.com/google/go-cmp v0.5.9 // indirect + github.com/google/go-cmp v0.6.0 // indirect github.com/google/gofuzz v1.2.0 // indirect github.com/hashicorp/hcl/v2 v2.13.0 // indirect github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb // indirect @@ -204,7 +204,7 @@ require ( golang.org/x/sync v0.6.0 // indirect golang.org/x/term v0.19.0 // indirect golang.org/x/time v0.3.0 // indirect - golang.org/x/tools v0.8.1-0.20230428195545-5283a0178901 // indirect + golang.org/x/tools v0.18.0 // indirect golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 // indirect google.golang.org/appengine v1.6.7 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234030-28d5490b6b19 // indirect diff --git a/go.sum b/go.sum index 310cbd85ab7..ba4e6267bb9 100644 --- a/go.sum +++ b/go.sum @@ -1,9 +1,9 @@ -ariga.io/atlas v0.14.1-0.20230918065911-83ad451a4935 h1:JnYs/y8RJ3+MiIUp+3RgyyeO48VHLAZimqiaZYnMKk8= -ariga.io/atlas v0.14.1-0.20230918065911-83ad451a4935/go.mod h1:isZrlzJ5cpoCoKFoY9knZug7Lq4pP1cm8g3XciLZ0Pw= +ariga.io/atlas v0.19.1-0.20240203083654-5948b60a8e43 h1:GwdJbXydHCYPedeeLt4x/lrlIISQ4JTH1mRWuE5ZZ14= +ariga.io/atlas v0.19.1-0.20240203083654-5948b60a8e43/go.mod h1:uj3pm+hUTVN/X5yfdBexHlZv+1Xu5u5ZbZx7+CDavNU= bitbucket.org/creachadair/stringset v0.0.9 h1:L4vld9nzPt90UZNrXjNelTshD74ps4P5NGs3Iq6yN3o= bitbucket.org/creachadair/stringset v0.0.9/go.mod h1:t+4WcQ4+PXTa8aQdNKe40ZP6iwesoMFWAxPGd3UGjyY= -entgo.io/ent v0.12.5 h1:KREM5E4CSoej4zeGa88Ou/gfturAnpUv0mzAjch1sj4= -entgo.io/ent v0.12.5/go.mod h1:Y3JVAjtlIk8xVZYSn3t3mf8xlZIn5SAOXZQxD6kKI+Q= +entgo.io/ent v0.13.1 h1:uD8QwN1h6SNphdCCzmkMN3feSUzNnVvV/WIkHKMbzOE= +entgo.io/ent v0.13.1/go.mod h1:qCEmo+biw3ccBn9OyL4ZK5dfpwg++l1Gxwac5B1206A= github.com/AlecAivazis/survey/v2 v2.3.7 h1:6I/u8FvytdGsgonrYsVn2t8t4QiRnh6QSTqkkhIiSjQ= github.com/AlecAivazis/survey/v2 v2.3.7/go.mod h1:xUTIdE4KCOIjsBAE1JYsUPoCqYdZ1reCfTwbto0Fduo= github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 h1:UQHMgLO+TxOElx5B5HZ4hJQsoJ/PvUvKRhJHDQXO8P8= @@ -311,8 +311,9 @@ github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= -github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= +github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-querystring v1.1.0 h1:AnCroh3fv4ZBgVIf1Iwtovgjaw/GiKJo8M8yD/fhyJ8= github.com/google/go-querystring v1.1.0/go.mod h1:Kcdr2DB4koayq7X8pmAG4sNG59So17icRSOU623lUBU= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= @@ -451,8 +452,8 @@ github.com/kr/pty v1.1.8/go.mod h1:O1sed60cT9XZ5uDucP5qwvh+TE3NnUj51EiZO/lmSfw= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= -github.com/kylelemons/godebug v0.0.0-20170820004349-d65d576e9348 h1:MtvEpTB6LX3vkb4ax0b5D2DHbNAUsen0Gx5wZoq3lV4= -github.com/kylelemons/godebug v0.0.0-20170820004349-d65d576e9348/go.mod h1:B69LEHPfb2qLo0BaaOLcbitczOKLWTsrBG9LczfCD4k= +github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= +github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= github.com/leodido/go-urn v1.3.0 h1:jX8FDLfW4ThVXctBNZ+3cIWnCSnrACDV73r76dy0aQQ= github.com/leodido/go-urn v1.3.0/go.mod h1:bvxc+MVxLKB4z00jd1z+Dvzr47oO32F/QSNjSBOlFxI= github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= @@ -767,8 +768,8 @@ golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= -golang.org/x/mod v0.11.0 h1:bUO06HqtnRcc/7l71XBe4WcqTZ+3AH1J59zWDDwLKgU= -golang.org/x/mod v0.11.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/mod v0.15.0 h1:SernR4v+D55NyBH2QiEQrlBAnj1ECL6AGrA5+dPaMY8= +golang.org/x/mod v0.15.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/net v0.0.0-20181005035420-146acd28ed58/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= @@ -888,8 +889,8 @@ golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roY golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= -golang.org/x/tools v0.8.1-0.20230428195545-5283a0178901 h1:0wxTF6pSjIIhNt7mo9GvjDfzyCOiWhmICgtO/Ah948s= -golang.org/x/tools v0.8.1-0.20230428195545-5283a0178901/go.mod h1:JxBZ99ISMI5ViVkT1tr6tdNmXeTrcpVSD3vZ1RsRdN4= +golang.org/x/tools v0.18.0 h1:k8NLag8AGHnn+PHbl7g43CtqZAwG60vZkLqgyZgIHgQ= +golang.org/x/tools v0.18.0/go.mod h1:GL7B4CwcLLeo59yx/9UWWuNOW1n3VZ4f5axWfML7Lcg= golang.org/x/xerrors v0.0.0-20190410155217-1f06c39b4373/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20190513163551-3ee3066db522/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= diff --git a/pkg/database/ent/alert.go b/pkg/database/ent/alert.go index 6da9f0efe76..8bfe0badc09 100644 --- a/pkg/database/ent/alert.go +++ b/pkg/database/ent/alert.go @@ -89,12 +89,10 @@ type AlertEdges struct { // OwnerOrErr returns the Owner value or an error if the edge // was not loaded in eager-loading, or loaded but was not found. func (e AlertEdges) OwnerOrErr() (*Machine, error) { - if e.loadedTypes[0] { - if e.Owner == nil { - // Edge was loaded but was not found. - return nil, &NotFoundError{label: machine.Label} - } + if e.Owner != nil { return e.Owner, nil + } else if e.loadedTypes[0] { + return nil, &NotFoundError{label: machine.Label} } return nil, &NotLoadedError{edge: "owner"} } diff --git a/pkg/database/ent/decision.go b/pkg/database/ent/decision.go index 1cc0df4c784..4a6dc728509 100644 --- a/pkg/database/ent/decision.go +++ b/pkg/database/ent/decision.go @@ -68,12 +68,10 @@ type DecisionEdges struct { // OwnerOrErr returns the Owner value or an error if the edge // was not loaded in eager-loading, or loaded but was not found. func (e DecisionEdges) OwnerOrErr() (*Alert, error) { - if e.loadedTypes[0] { - if e.Owner == nil { - // Edge was loaded but was not found. - return nil, &NotFoundError{label: alert.Label} - } + if e.Owner != nil { return e.Owner, nil + } else if e.loadedTypes[0] { + return nil, &NotFoundError{label: alert.Label} } return nil, &NotLoadedError{edge: "owner"} } diff --git a/pkg/database/ent/event.go b/pkg/database/ent/event.go index 10e6d01c9d5..b57f1f34ac9 100644 --- a/pkg/database/ent/event.go +++ b/pkg/database/ent/event.go @@ -46,12 +46,10 @@ type EventEdges struct { // OwnerOrErr returns the Owner value or an error if the edge // was not loaded in eager-loading, or loaded but was not found. func (e EventEdges) OwnerOrErr() (*Alert, error) { - if e.loadedTypes[0] { - if e.Owner == nil { - // Edge was loaded but was not found. - return nil, &NotFoundError{label: alert.Label} - } + if e.Owner != nil { return e.Owner, nil + } else if e.loadedTypes[0] { + return nil, &NotFoundError{label: alert.Label} } return nil, &NotLoadedError{edge: "owner"} } diff --git a/pkg/database/ent/generate.go b/pkg/database/ent/generate.go index 5f4b39eec90..8ada999d7ab 100644 --- a/pkg/database/ent/generate.go +++ b/pkg/database/ent/generate.go @@ -1,4 +1,4 @@ package ent -//go:generate go run -mod=mod entgo.io/ent/cmd/ent@v0.12.5 generate ./schema +//go:generate go run -mod=mod entgo.io/ent/cmd/ent@v0.13.1 generate ./schema diff --git a/pkg/database/ent/meta.go b/pkg/database/ent/meta.go index 768358ca2bf..7e29627957c 100644 --- a/pkg/database/ent/meta.go +++ b/pkg/database/ent/meta.go @@ -46,12 +46,10 @@ type MetaEdges struct { // OwnerOrErr returns the Owner value or an error if the edge // was not loaded in eager-loading, or loaded but was not found. func (e MetaEdges) OwnerOrErr() (*Alert, error) { - if e.loadedTypes[0] { - if e.Owner == nil { - // Edge was loaded but was not found. - return nil, &NotFoundError{label: alert.Label} - } + if e.Owner != nil { return e.Owner, nil + } else if e.loadedTypes[0] { + return nil, &NotFoundError{label: alert.Label} } return nil, &NotLoadedError{edge: "owner"} } diff --git a/pkg/database/ent/runtime/runtime.go b/pkg/database/ent/runtime/runtime.go index d10a2fb5459..9cb9d96258a 100644 --- a/pkg/database/ent/runtime/runtime.go +++ b/pkg/database/ent/runtime/runtime.go @@ -5,6 +5,6 @@ package runtime // The schema-stitching logic is generated in github.com/crowdsecurity/crowdsec/pkg/database/ent/runtime.go const ( - Version = "v0.12.5" // Version of ent codegen. - Sum = "h1:KREM5E4CSoej4zeGa88Ou/gfturAnpUv0mzAjch1sj4=" // Sum of ent codegen. + Version = "v0.13.1" // Version of ent codegen. + Sum = "h1:uD8QwN1h6SNphdCCzmkMN3feSUzNnVvV/WIkHKMbzOE=" // Sum of ent codegen. ) From 6dbc5fd522889f0441b6705bebe61c46f74833fe Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Fri, 31 May 2024 15:19:48 +0200 Subject: [PATCH 148/581] db: mark immutable columns / remove unused (#3024) * db: mark immutable columns * db: drop unused column * lint --- pkg/database/ent/alert_update.go | 1022 ------------------------- pkg/database/ent/bouncer.go | 13 +- pkg/database/ent/bouncer/bouncer.go | 10 - pkg/database/ent/bouncer/where.go | 55 -- pkg/database/ent/bouncer_create.go | 22 - pkg/database/ent/bouncer_update.go | 120 --- pkg/database/ent/configitem_update.go | 34 - pkg/database/ent/decision_update.go | 580 -------------- pkg/database/ent/event_update.go | 94 --- pkg/database/ent/lock_update.go | 35 - pkg/database/ent/meta_update.go | 128 ---- pkg/database/ent/migrate/schema.go | 1 - pkg/database/ent/mutation.go | 75 +- pkg/database/ent/runtime.go | 8 +- pkg/database/ent/schema/alert.go | 43 +- pkg/database/ent/schema/bouncer.go | 7 +- pkg/database/ent/schema/config.go | 3 +- pkg/database/ent/schema/decision.go | 25 +- pkg/database/ent/schema/event.go | 5 +- pkg/database/ent/schema/lock.go | 3 +- pkg/database/ent/schema/meta.go | 7 +- 21 files changed, 54 insertions(+), 2236 deletions(-) diff --git a/pkg/database/ent/alert_update.go b/pkg/database/ent/alert_update.go index 8b88c35c7d7..48ce221ac82 100644 --- a/pkg/database/ent/alert_update.go +++ b/pkg/database/ent/alert_update.go @@ -38,442 +38,6 @@ func (au *AlertUpdate) SetUpdatedAt(t time.Time) *AlertUpdate { return au } -// SetScenario sets the "scenario" field. -func (au *AlertUpdate) SetScenario(s string) *AlertUpdate { - au.mutation.SetScenario(s) - return au -} - -// SetNillableScenario sets the "scenario" field if the given value is not nil. -func (au *AlertUpdate) SetNillableScenario(s *string) *AlertUpdate { - if s != nil { - au.SetScenario(*s) - } - return au -} - -// SetBucketId sets the "bucketId" field. -func (au *AlertUpdate) SetBucketId(s string) *AlertUpdate { - au.mutation.SetBucketId(s) - return au -} - -// SetNillableBucketId sets the "bucketId" field if the given value is not nil. -func (au *AlertUpdate) SetNillableBucketId(s *string) *AlertUpdate { - if s != nil { - au.SetBucketId(*s) - } - return au -} - -// ClearBucketId clears the value of the "bucketId" field. -func (au *AlertUpdate) ClearBucketId() *AlertUpdate { - au.mutation.ClearBucketId() - return au -} - -// SetMessage sets the "message" field. -func (au *AlertUpdate) SetMessage(s string) *AlertUpdate { - au.mutation.SetMessage(s) - return au -} - -// SetNillableMessage sets the "message" field if the given value is not nil. -func (au *AlertUpdate) SetNillableMessage(s *string) *AlertUpdate { - if s != nil { - au.SetMessage(*s) - } - return au -} - -// ClearMessage clears the value of the "message" field. -func (au *AlertUpdate) ClearMessage() *AlertUpdate { - au.mutation.ClearMessage() - return au -} - -// SetEventsCount sets the "eventsCount" field. -func (au *AlertUpdate) SetEventsCount(i int32) *AlertUpdate { - au.mutation.ResetEventsCount() - au.mutation.SetEventsCount(i) - return au -} - -// SetNillableEventsCount sets the "eventsCount" field if the given value is not nil. -func (au *AlertUpdate) SetNillableEventsCount(i *int32) *AlertUpdate { - if i != nil { - au.SetEventsCount(*i) - } - return au -} - -// AddEventsCount adds i to the "eventsCount" field. -func (au *AlertUpdate) AddEventsCount(i int32) *AlertUpdate { - au.mutation.AddEventsCount(i) - return au -} - -// ClearEventsCount clears the value of the "eventsCount" field. -func (au *AlertUpdate) ClearEventsCount() *AlertUpdate { - au.mutation.ClearEventsCount() - return au -} - -// SetStartedAt sets the "startedAt" field. -func (au *AlertUpdate) SetStartedAt(t time.Time) *AlertUpdate { - au.mutation.SetStartedAt(t) - return au -} - -// SetNillableStartedAt sets the "startedAt" field if the given value is not nil. -func (au *AlertUpdate) SetNillableStartedAt(t *time.Time) *AlertUpdate { - if t != nil { - au.SetStartedAt(*t) - } - return au -} - -// ClearStartedAt clears the value of the "startedAt" field. -func (au *AlertUpdate) ClearStartedAt() *AlertUpdate { - au.mutation.ClearStartedAt() - return au -} - -// SetStoppedAt sets the "stoppedAt" field. -func (au *AlertUpdate) SetStoppedAt(t time.Time) *AlertUpdate { - au.mutation.SetStoppedAt(t) - return au -} - -// SetNillableStoppedAt sets the "stoppedAt" field if the given value is not nil. -func (au *AlertUpdate) SetNillableStoppedAt(t *time.Time) *AlertUpdate { - if t != nil { - au.SetStoppedAt(*t) - } - return au -} - -// ClearStoppedAt clears the value of the "stoppedAt" field. -func (au *AlertUpdate) ClearStoppedAt() *AlertUpdate { - au.mutation.ClearStoppedAt() - return au -} - -// SetSourceIp sets the "sourceIp" field. -func (au *AlertUpdate) SetSourceIp(s string) *AlertUpdate { - au.mutation.SetSourceIp(s) - return au -} - -// SetNillableSourceIp sets the "sourceIp" field if the given value is not nil. -func (au *AlertUpdate) SetNillableSourceIp(s *string) *AlertUpdate { - if s != nil { - au.SetSourceIp(*s) - } - return au -} - -// ClearSourceIp clears the value of the "sourceIp" field. -func (au *AlertUpdate) ClearSourceIp() *AlertUpdate { - au.mutation.ClearSourceIp() - return au -} - -// SetSourceRange sets the "sourceRange" field. -func (au *AlertUpdate) SetSourceRange(s string) *AlertUpdate { - au.mutation.SetSourceRange(s) - return au -} - -// SetNillableSourceRange sets the "sourceRange" field if the given value is not nil. -func (au *AlertUpdate) SetNillableSourceRange(s *string) *AlertUpdate { - if s != nil { - au.SetSourceRange(*s) - } - return au -} - -// ClearSourceRange clears the value of the "sourceRange" field. -func (au *AlertUpdate) ClearSourceRange() *AlertUpdate { - au.mutation.ClearSourceRange() - return au -} - -// SetSourceAsNumber sets the "sourceAsNumber" field. -func (au *AlertUpdate) SetSourceAsNumber(s string) *AlertUpdate { - au.mutation.SetSourceAsNumber(s) - return au -} - -// SetNillableSourceAsNumber sets the "sourceAsNumber" field if the given value is not nil. -func (au *AlertUpdate) SetNillableSourceAsNumber(s *string) *AlertUpdate { - if s != nil { - au.SetSourceAsNumber(*s) - } - return au -} - -// ClearSourceAsNumber clears the value of the "sourceAsNumber" field. -func (au *AlertUpdate) ClearSourceAsNumber() *AlertUpdate { - au.mutation.ClearSourceAsNumber() - return au -} - -// SetSourceAsName sets the "sourceAsName" field. -func (au *AlertUpdate) SetSourceAsName(s string) *AlertUpdate { - au.mutation.SetSourceAsName(s) - return au -} - -// SetNillableSourceAsName sets the "sourceAsName" field if the given value is not nil. -func (au *AlertUpdate) SetNillableSourceAsName(s *string) *AlertUpdate { - if s != nil { - au.SetSourceAsName(*s) - } - return au -} - -// ClearSourceAsName clears the value of the "sourceAsName" field. -func (au *AlertUpdate) ClearSourceAsName() *AlertUpdate { - au.mutation.ClearSourceAsName() - return au -} - -// SetSourceCountry sets the "sourceCountry" field. -func (au *AlertUpdate) SetSourceCountry(s string) *AlertUpdate { - au.mutation.SetSourceCountry(s) - return au -} - -// SetNillableSourceCountry sets the "sourceCountry" field if the given value is not nil. -func (au *AlertUpdate) SetNillableSourceCountry(s *string) *AlertUpdate { - if s != nil { - au.SetSourceCountry(*s) - } - return au -} - -// ClearSourceCountry clears the value of the "sourceCountry" field. -func (au *AlertUpdate) ClearSourceCountry() *AlertUpdate { - au.mutation.ClearSourceCountry() - return au -} - -// SetSourceLatitude sets the "sourceLatitude" field. -func (au *AlertUpdate) SetSourceLatitude(f float32) *AlertUpdate { - au.mutation.ResetSourceLatitude() - au.mutation.SetSourceLatitude(f) - return au -} - -// SetNillableSourceLatitude sets the "sourceLatitude" field if the given value is not nil. -func (au *AlertUpdate) SetNillableSourceLatitude(f *float32) *AlertUpdate { - if f != nil { - au.SetSourceLatitude(*f) - } - return au -} - -// AddSourceLatitude adds f to the "sourceLatitude" field. -func (au *AlertUpdate) AddSourceLatitude(f float32) *AlertUpdate { - au.mutation.AddSourceLatitude(f) - return au -} - -// ClearSourceLatitude clears the value of the "sourceLatitude" field. -func (au *AlertUpdate) ClearSourceLatitude() *AlertUpdate { - au.mutation.ClearSourceLatitude() - return au -} - -// SetSourceLongitude sets the "sourceLongitude" field. -func (au *AlertUpdate) SetSourceLongitude(f float32) *AlertUpdate { - au.mutation.ResetSourceLongitude() - au.mutation.SetSourceLongitude(f) - return au -} - -// SetNillableSourceLongitude sets the "sourceLongitude" field if the given value is not nil. -func (au *AlertUpdate) SetNillableSourceLongitude(f *float32) *AlertUpdate { - if f != nil { - au.SetSourceLongitude(*f) - } - return au -} - -// AddSourceLongitude adds f to the "sourceLongitude" field. -func (au *AlertUpdate) AddSourceLongitude(f float32) *AlertUpdate { - au.mutation.AddSourceLongitude(f) - return au -} - -// ClearSourceLongitude clears the value of the "sourceLongitude" field. -func (au *AlertUpdate) ClearSourceLongitude() *AlertUpdate { - au.mutation.ClearSourceLongitude() - return au -} - -// SetSourceScope sets the "sourceScope" field. -func (au *AlertUpdate) SetSourceScope(s string) *AlertUpdate { - au.mutation.SetSourceScope(s) - return au -} - -// SetNillableSourceScope sets the "sourceScope" field if the given value is not nil. -func (au *AlertUpdate) SetNillableSourceScope(s *string) *AlertUpdate { - if s != nil { - au.SetSourceScope(*s) - } - return au -} - -// ClearSourceScope clears the value of the "sourceScope" field. -func (au *AlertUpdate) ClearSourceScope() *AlertUpdate { - au.mutation.ClearSourceScope() - return au -} - -// SetSourceValue sets the "sourceValue" field. -func (au *AlertUpdate) SetSourceValue(s string) *AlertUpdate { - au.mutation.SetSourceValue(s) - return au -} - -// SetNillableSourceValue sets the "sourceValue" field if the given value is not nil. -func (au *AlertUpdate) SetNillableSourceValue(s *string) *AlertUpdate { - if s != nil { - au.SetSourceValue(*s) - } - return au -} - -// ClearSourceValue clears the value of the "sourceValue" field. -func (au *AlertUpdate) ClearSourceValue() *AlertUpdate { - au.mutation.ClearSourceValue() - return au -} - -// SetCapacity sets the "capacity" field. -func (au *AlertUpdate) SetCapacity(i int32) *AlertUpdate { - au.mutation.ResetCapacity() - au.mutation.SetCapacity(i) - return au -} - -// SetNillableCapacity sets the "capacity" field if the given value is not nil. -func (au *AlertUpdate) SetNillableCapacity(i *int32) *AlertUpdate { - if i != nil { - au.SetCapacity(*i) - } - return au -} - -// AddCapacity adds i to the "capacity" field. -func (au *AlertUpdate) AddCapacity(i int32) *AlertUpdate { - au.mutation.AddCapacity(i) - return au -} - -// ClearCapacity clears the value of the "capacity" field. -func (au *AlertUpdate) ClearCapacity() *AlertUpdate { - au.mutation.ClearCapacity() - return au -} - -// SetLeakSpeed sets the "leakSpeed" field. -func (au *AlertUpdate) SetLeakSpeed(s string) *AlertUpdate { - au.mutation.SetLeakSpeed(s) - return au -} - -// SetNillableLeakSpeed sets the "leakSpeed" field if the given value is not nil. -func (au *AlertUpdate) SetNillableLeakSpeed(s *string) *AlertUpdate { - if s != nil { - au.SetLeakSpeed(*s) - } - return au -} - -// ClearLeakSpeed clears the value of the "leakSpeed" field. -func (au *AlertUpdate) ClearLeakSpeed() *AlertUpdate { - au.mutation.ClearLeakSpeed() - return au -} - -// SetScenarioVersion sets the "scenarioVersion" field. -func (au *AlertUpdate) SetScenarioVersion(s string) *AlertUpdate { - au.mutation.SetScenarioVersion(s) - return au -} - -// SetNillableScenarioVersion sets the "scenarioVersion" field if the given value is not nil. -func (au *AlertUpdate) SetNillableScenarioVersion(s *string) *AlertUpdate { - if s != nil { - au.SetScenarioVersion(*s) - } - return au -} - -// ClearScenarioVersion clears the value of the "scenarioVersion" field. -func (au *AlertUpdate) ClearScenarioVersion() *AlertUpdate { - au.mutation.ClearScenarioVersion() - return au -} - -// SetScenarioHash sets the "scenarioHash" field. -func (au *AlertUpdate) SetScenarioHash(s string) *AlertUpdate { - au.mutation.SetScenarioHash(s) - return au -} - -// SetNillableScenarioHash sets the "scenarioHash" field if the given value is not nil. -func (au *AlertUpdate) SetNillableScenarioHash(s *string) *AlertUpdate { - if s != nil { - au.SetScenarioHash(*s) - } - return au -} - -// ClearScenarioHash clears the value of the "scenarioHash" field. -func (au *AlertUpdate) ClearScenarioHash() *AlertUpdate { - au.mutation.ClearScenarioHash() - return au -} - -// SetSimulated sets the "simulated" field. -func (au *AlertUpdate) SetSimulated(b bool) *AlertUpdate { - au.mutation.SetSimulated(b) - return au -} - -// SetNillableSimulated sets the "simulated" field if the given value is not nil. -func (au *AlertUpdate) SetNillableSimulated(b *bool) *AlertUpdate { - if b != nil { - au.SetSimulated(*b) - } - return au -} - -// SetUUID sets the "uuid" field. -func (au *AlertUpdate) SetUUID(s string) *AlertUpdate { - au.mutation.SetUUID(s) - return au -} - -// SetNillableUUID sets the "uuid" field if the given value is not nil. -func (au *AlertUpdate) SetNillableUUID(s *string) *AlertUpdate { - if s != nil { - au.SetUUID(*s) - } - return au -} - -// ClearUUID clears the value of the "uuid" field. -func (au *AlertUpdate) ClearUUID() *AlertUpdate { - au.mutation.ClearUUID() - return au -} - // SetOwnerID sets the "owner" edge to the Machine entity by ID. func (au *AlertUpdate) SetOwnerID(id int) *AlertUpdate { au.mutation.SetOwnerID(id) @@ -660,135 +224,60 @@ func (au *AlertUpdate) sqlSave(ctx context.Context) (n int, err error) { if value, ok := au.mutation.UpdatedAt(); ok { _spec.SetField(alert.FieldUpdatedAt, field.TypeTime, value) } - if value, ok := au.mutation.Scenario(); ok { - _spec.SetField(alert.FieldScenario, field.TypeString, value) - } - if value, ok := au.mutation.BucketId(); ok { - _spec.SetField(alert.FieldBucketId, field.TypeString, value) - } if au.mutation.BucketIdCleared() { _spec.ClearField(alert.FieldBucketId, field.TypeString) } - if value, ok := au.mutation.Message(); ok { - _spec.SetField(alert.FieldMessage, field.TypeString, value) - } if au.mutation.MessageCleared() { _spec.ClearField(alert.FieldMessage, field.TypeString) } - if value, ok := au.mutation.EventsCount(); ok { - _spec.SetField(alert.FieldEventsCount, field.TypeInt32, value) - } - if value, ok := au.mutation.AddedEventsCount(); ok { - _spec.AddField(alert.FieldEventsCount, field.TypeInt32, value) - } if au.mutation.EventsCountCleared() { _spec.ClearField(alert.FieldEventsCount, field.TypeInt32) } - if value, ok := au.mutation.StartedAt(); ok { - _spec.SetField(alert.FieldStartedAt, field.TypeTime, value) - } if au.mutation.StartedAtCleared() { _spec.ClearField(alert.FieldStartedAt, field.TypeTime) } - if value, ok := au.mutation.StoppedAt(); ok { - _spec.SetField(alert.FieldStoppedAt, field.TypeTime, value) - } if au.mutation.StoppedAtCleared() { _spec.ClearField(alert.FieldStoppedAt, field.TypeTime) } - if value, ok := au.mutation.SourceIp(); ok { - _spec.SetField(alert.FieldSourceIp, field.TypeString, value) - } if au.mutation.SourceIpCleared() { _spec.ClearField(alert.FieldSourceIp, field.TypeString) } - if value, ok := au.mutation.SourceRange(); ok { - _spec.SetField(alert.FieldSourceRange, field.TypeString, value) - } if au.mutation.SourceRangeCleared() { _spec.ClearField(alert.FieldSourceRange, field.TypeString) } - if value, ok := au.mutation.SourceAsNumber(); ok { - _spec.SetField(alert.FieldSourceAsNumber, field.TypeString, value) - } if au.mutation.SourceAsNumberCleared() { _spec.ClearField(alert.FieldSourceAsNumber, field.TypeString) } - if value, ok := au.mutation.SourceAsName(); ok { - _spec.SetField(alert.FieldSourceAsName, field.TypeString, value) - } if au.mutation.SourceAsNameCleared() { _spec.ClearField(alert.FieldSourceAsName, field.TypeString) } - if value, ok := au.mutation.SourceCountry(); ok { - _spec.SetField(alert.FieldSourceCountry, field.TypeString, value) - } if au.mutation.SourceCountryCleared() { _spec.ClearField(alert.FieldSourceCountry, field.TypeString) } - if value, ok := au.mutation.SourceLatitude(); ok { - _spec.SetField(alert.FieldSourceLatitude, field.TypeFloat32, value) - } - if value, ok := au.mutation.AddedSourceLatitude(); ok { - _spec.AddField(alert.FieldSourceLatitude, field.TypeFloat32, value) - } if au.mutation.SourceLatitudeCleared() { _spec.ClearField(alert.FieldSourceLatitude, field.TypeFloat32) } - if value, ok := au.mutation.SourceLongitude(); ok { - _spec.SetField(alert.FieldSourceLongitude, field.TypeFloat32, value) - } - if value, ok := au.mutation.AddedSourceLongitude(); ok { - _spec.AddField(alert.FieldSourceLongitude, field.TypeFloat32, value) - } if au.mutation.SourceLongitudeCleared() { _spec.ClearField(alert.FieldSourceLongitude, field.TypeFloat32) } - if value, ok := au.mutation.SourceScope(); ok { - _spec.SetField(alert.FieldSourceScope, field.TypeString, value) - } if au.mutation.SourceScopeCleared() { _spec.ClearField(alert.FieldSourceScope, field.TypeString) } - if value, ok := au.mutation.SourceValue(); ok { - _spec.SetField(alert.FieldSourceValue, field.TypeString, value) - } if au.mutation.SourceValueCleared() { _spec.ClearField(alert.FieldSourceValue, field.TypeString) } - if value, ok := au.mutation.Capacity(); ok { - _spec.SetField(alert.FieldCapacity, field.TypeInt32, value) - } - if value, ok := au.mutation.AddedCapacity(); ok { - _spec.AddField(alert.FieldCapacity, field.TypeInt32, value) - } if au.mutation.CapacityCleared() { _spec.ClearField(alert.FieldCapacity, field.TypeInt32) } - if value, ok := au.mutation.LeakSpeed(); ok { - _spec.SetField(alert.FieldLeakSpeed, field.TypeString, value) - } if au.mutation.LeakSpeedCleared() { _spec.ClearField(alert.FieldLeakSpeed, field.TypeString) } - if value, ok := au.mutation.ScenarioVersion(); ok { - _spec.SetField(alert.FieldScenarioVersion, field.TypeString, value) - } if au.mutation.ScenarioVersionCleared() { _spec.ClearField(alert.FieldScenarioVersion, field.TypeString) } - if value, ok := au.mutation.ScenarioHash(); ok { - _spec.SetField(alert.FieldScenarioHash, field.TypeString, value) - } if au.mutation.ScenarioHashCleared() { _spec.ClearField(alert.FieldScenarioHash, field.TypeString) } - if value, ok := au.mutation.Simulated(); ok { - _spec.SetField(alert.FieldSimulated, field.TypeBool, value) - } - if value, ok := au.mutation.UUID(); ok { - _spec.SetField(alert.FieldUUID, field.TypeString, value) - } if au.mutation.UUIDCleared() { _spec.ClearField(alert.FieldUUID, field.TypeString) } @@ -982,442 +471,6 @@ func (auo *AlertUpdateOne) SetUpdatedAt(t time.Time) *AlertUpdateOne { return auo } -// SetScenario sets the "scenario" field. -func (auo *AlertUpdateOne) SetScenario(s string) *AlertUpdateOne { - auo.mutation.SetScenario(s) - return auo -} - -// SetNillableScenario sets the "scenario" field if the given value is not nil. -func (auo *AlertUpdateOne) SetNillableScenario(s *string) *AlertUpdateOne { - if s != nil { - auo.SetScenario(*s) - } - return auo -} - -// SetBucketId sets the "bucketId" field. -func (auo *AlertUpdateOne) SetBucketId(s string) *AlertUpdateOne { - auo.mutation.SetBucketId(s) - return auo -} - -// SetNillableBucketId sets the "bucketId" field if the given value is not nil. -func (auo *AlertUpdateOne) SetNillableBucketId(s *string) *AlertUpdateOne { - if s != nil { - auo.SetBucketId(*s) - } - return auo -} - -// ClearBucketId clears the value of the "bucketId" field. -func (auo *AlertUpdateOne) ClearBucketId() *AlertUpdateOne { - auo.mutation.ClearBucketId() - return auo -} - -// SetMessage sets the "message" field. -func (auo *AlertUpdateOne) SetMessage(s string) *AlertUpdateOne { - auo.mutation.SetMessage(s) - return auo -} - -// SetNillableMessage sets the "message" field if the given value is not nil. -func (auo *AlertUpdateOne) SetNillableMessage(s *string) *AlertUpdateOne { - if s != nil { - auo.SetMessage(*s) - } - return auo -} - -// ClearMessage clears the value of the "message" field. -func (auo *AlertUpdateOne) ClearMessage() *AlertUpdateOne { - auo.mutation.ClearMessage() - return auo -} - -// SetEventsCount sets the "eventsCount" field. -func (auo *AlertUpdateOne) SetEventsCount(i int32) *AlertUpdateOne { - auo.mutation.ResetEventsCount() - auo.mutation.SetEventsCount(i) - return auo -} - -// SetNillableEventsCount sets the "eventsCount" field if the given value is not nil. -func (auo *AlertUpdateOne) SetNillableEventsCount(i *int32) *AlertUpdateOne { - if i != nil { - auo.SetEventsCount(*i) - } - return auo -} - -// AddEventsCount adds i to the "eventsCount" field. -func (auo *AlertUpdateOne) AddEventsCount(i int32) *AlertUpdateOne { - auo.mutation.AddEventsCount(i) - return auo -} - -// ClearEventsCount clears the value of the "eventsCount" field. -func (auo *AlertUpdateOne) ClearEventsCount() *AlertUpdateOne { - auo.mutation.ClearEventsCount() - return auo -} - -// SetStartedAt sets the "startedAt" field. -func (auo *AlertUpdateOne) SetStartedAt(t time.Time) *AlertUpdateOne { - auo.mutation.SetStartedAt(t) - return auo -} - -// SetNillableStartedAt sets the "startedAt" field if the given value is not nil. -func (auo *AlertUpdateOne) SetNillableStartedAt(t *time.Time) *AlertUpdateOne { - if t != nil { - auo.SetStartedAt(*t) - } - return auo -} - -// ClearStartedAt clears the value of the "startedAt" field. -func (auo *AlertUpdateOne) ClearStartedAt() *AlertUpdateOne { - auo.mutation.ClearStartedAt() - return auo -} - -// SetStoppedAt sets the "stoppedAt" field. -func (auo *AlertUpdateOne) SetStoppedAt(t time.Time) *AlertUpdateOne { - auo.mutation.SetStoppedAt(t) - return auo -} - -// SetNillableStoppedAt sets the "stoppedAt" field if the given value is not nil. -func (auo *AlertUpdateOne) SetNillableStoppedAt(t *time.Time) *AlertUpdateOne { - if t != nil { - auo.SetStoppedAt(*t) - } - return auo -} - -// ClearStoppedAt clears the value of the "stoppedAt" field. -func (auo *AlertUpdateOne) ClearStoppedAt() *AlertUpdateOne { - auo.mutation.ClearStoppedAt() - return auo -} - -// SetSourceIp sets the "sourceIp" field. -func (auo *AlertUpdateOne) SetSourceIp(s string) *AlertUpdateOne { - auo.mutation.SetSourceIp(s) - return auo -} - -// SetNillableSourceIp sets the "sourceIp" field if the given value is not nil. -func (auo *AlertUpdateOne) SetNillableSourceIp(s *string) *AlertUpdateOne { - if s != nil { - auo.SetSourceIp(*s) - } - return auo -} - -// ClearSourceIp clears the value of the "sourceIp" field. -func (auo *AlertUpdateOne) ClearSourceIp() *AlertUpdateOne { - auo.mutation.ClearSourceIp() - return auo -} - -// SetSourceRange sets the "sourceRange" field. -func (auo *AlertUpdateOne) SetSourceRange(s string) *AlertUpdateOne { - auo.mutation.SetSourceRange(s) - return auo -} - -// SetNillableSourceRange sets the "sourceRange" field if the given value is not nil. -func (auo *AlertUpdateOne) SetNillableSourceRange(s *string) *AlertUpdateOne { - if s != nil { - auo.SetSourceRange(*s) - } - return auo -} - -// ClearSourceRange clears the value of the "sourceRange" field. -func (auo *AlertUpdateOne) ClearSourceRange() *AlertUpdateOne { - auo.mutation.ClearSourceRange() - return auo -} - -// SetSourceAsNumber sets the "sourceAsNumber" field. -func (auo *AlertUpdateOne) SetSourceAsNumber(s string) *AlertUpdateOne { - auo.mutation.SetSourceAsNumber(s) - return auo -} - -// SetNillableSourceAsNumber sets the "sourceAsNumber" field if the given value is not nil. -func (auo *AlertUpdateOne) SetNillableSourceAsNumber(s *string) *AlertUpdateOne { - if s != nil { - auo.SetSourceAsNumber(*s) - } - return auo -} - -// ClearSourceAsNumber clears the value of the "sourceAsNumber" field. -func (auo *AlertUpdateOne) ClearSourceAsNumber() *AlertUpdateOne { - auo.mutation.ClearSourceAsNumber() - return auo -} - -// SetSourceAsName sets the "sourceAsName" field. -func (auo *AlertUpdateOne) SetSourceAsName(s string) *AlertUpdateOne { - auo.mutation.SetSourceAsName(s) - return auo -} - -// SetNillableSourceAsName sets the "sourceAsName" field if the given value is not nil. -func (auo *AlertUpdateOne) SetNillableSourceAsName(s *string) *AlertUpdateOne { - if s != nil { - auo.SetSourceAsName(*s) - } - return auo -} - -// ClearSourceAsName clears the value of the "sourceAsName" field. -func (auo *AlertUpdateOne) ClearSourceAsName() *AlertUpdateOne { - auo.mutation.ClearSourceAsName() - return auo -} - -// SetSourceCountry sets the "sourceCountry" field. -func (auo *AlertUpdateOne) SetSourceCountry(s string) *AlertUpdateOne { - auo.mutation.SetSourceCountry(s) - return auo -} - -// SetNillableSourceCountry sets the "sourceCountry" field if the given value is not nil. -func (auo *AlertUpdateOne) SetNillableSourceCountry(s *string) *AlertUpdateOne { - if s != nil { - auo.SetSourceCountry(*s) - } - return auo -} - -// ClearSourceCountry clears the value of the "sourceCountry" field. -func (auo *AlertUpdateOne) ClearSourceCountry() *AlertUpdateOne { - auo.mutation.ClearSourceCountry() - return auo -} - -// SetSourceLatitude sets the "sourceLatitude" field. -func (auo *AlertUpdateOne) SetSourceLatitude(f float32) *AlertUpdateOne { - auo.mutation.ResetSourceLatitude() - auo.mutation.SetSourceLatitude(f) - return auo -} - -// SetNillableSourceLatitude sets the "sourceLatitude" field if the given value is not nil. -func (auo *AlertUpdateOne) SetNillableSourceLatitude(f *float32) *AlertUpdateOne { - if f != nil { - auo.SetSourceLatitude(*f) - } - return auo -} - -// AddSourceLatitude adds f to the "sourceLatitude" field. -func (auo *AlertUpdateOne) AddSourceLatitude(f float32) *AlertUpdateOne { - auo.mutation.AddSourceLatitude(f) - return auo -} - -// ClearSourceLatitude clears the value of the "sourceLatitude" field. -func (auo *AlertUpdateOne) ClearSourceLatitude() *AlertUpdateOne { - auo.mutation.ClearSourceLatitude() - return auo -} - -// SetSourceLongitude sets the "sourceLongitude" field. -func (auo *AlertUpdateOne) SetSourceLongitude(f float32) *AlertUpdateOne { - auo.mutation.ResetSourceLongitude() - auo.mutation.SetSourceLongitude(f) - return auo -} - -// SetNillableSourceLongitude sets the "sourceLongitude" field if the given value is not nil. -func (auo *AlertUpdateOne) SetNillableSourceLongitude(f *float32) *AlertUpdateOne { - if f != nil { - auo.SetSourceLongitude(*f) - } - return auo -} - -// AddSourceLongitude adds f to the "sourceLongitude" field. -func (auo *AlertUpdateOne) AddSourceLongitude(f float32) *AlertUpdateOne { - auo.mutation.AddSourceLongitude(f) - return auo -} - -// ClearSourceLongitude clears the value of the "sourceLongitude" field. -func (auo *AlertUpdateOne) ClearSourceLongitude() *AlertUpdateOne { - auo.mutation.ClearSourceLongitude() - return auo -} - -// SetSourceScope sets the "sourceScope" field. -func (auo *AlertUpdateOne) SetSourceScope(s string) *AlertUpdateOne { - auo.mutation.SetSourceScope(s) - return auo -} - -// SetNillableSourceScope sets the "sourceScope" field if the given value is not nil. -func (auo *AlertUpdateOne) SetNillableSourceScope(s *string) *AlertUpdateOne { - if s != nil { - auo.SetSourceScope(*s) - } - return auo -} - -// ClearSourceScope clears the value of the "sourceScope" field. -func (auo *AlertUpdateOne) ClearSourceScope() *AlertUpdateOne { - auo.mutation.ClearSourceScope() - return auo -} - -// SetSourceValue sets the "sourceValue" field. -func (auo *AlertUpdateOne) SetSourceValue(s string) *AlertUpdateOne { - auo.mutation.SetSourceValue(s) - return auo -} - -// SetNillableSourceValue sets the "sourceValue" field if the given value is not nil. -func (auo *AlertUpdateOne) SetNillableSourceValue(s *string) *AlertUpdateOne { - if s != nil { - auo.SetSourceValue(*s) - } - return auo -} - -// ClearSourceValue clears the value of the "sourceValue" field. -func (auo *AlertUpdateOne) ClearSourceValue() *AlertUpdateOne { - auo.mutation.ClearSourceValue() - return auo -} - -// SetCapacity sets the "capacity" field. -func (auo *AlertUpdateOne) SetCapacity(i int32) *AlertUpdateOne { - auo.mutation.ResetCapacity() - auo.mutation.SetCapacity(i) - return auo -} - -// SetNillableCapacity sets the "capacity" field if the given value is not nil. -func (auo *AlertUpdateOne) SetNillableCapacity(i *int32) *AlertUpdateOne { - if i != nil { - auo.SetCapacity(*i) - } - return auo -} - -// AddCapacity adds i to the "capacity" field. -func (auo *AlertUpdateOne) AddCapacity(i int32) *AlertUpdateOne { - auo.mutation.AddCapacity(i) - return auo -} - -// ClearCapacity clears the value of the "capacity" field. -func (auo *AlertUpdateOne) ClearCapacity() *AlertUpdateOne { - auo.mutation.ClearCapacity() - return auo -} - -// SetLeakSpeed sets the "leakSpeed" field. -func (auo *AlertUpdateOne) SetLeakSpeed(s string) *AlertUpdateOne { - auo.mutation.SetLeakSpeed(s) - return auo -} - -// SetNillableLeakSpeed sets the "leakSpeed" field if the given value is not nil. -func (auo *AlertUpdateOne) SetNillableLeakSpeed(s *string) *AlertUpdateOne { - if s != nil { - auo.SetLeakSpeed(*s) - } - return auo -} - -// ClearLeakSpeed clears the value of the "leakSpeed" field. -func (auo *AlertUpdateOne) ClearLeakSpeed() *AlertUpdateOne { - auo.mutation.ClearLeakSpeed() - return auo -} - -// SetScenarioVersion sets the "scenarioVersion" field. -func (auo *AlertUpdateOne) SetScenarioVersion(s string) *AlertUpdateOne { - auo.mutation.SetScenarioVersion(s) - return auo -} - -// SetNillableScenarioVersion sets the "scenarioVersion" field if the given value is not nil. -func (auo *AlertUpdateOne) SetNillableScenarioVersion(s *string) *AlertUpdateOne { - if s != nil { - auo.SetScenarioVersion(*s) - } - return auo -} - -// ClearScenarioVersion clears the value of the "scenarioVersion" field. -func (auo *AlertUpdateOne) ClearScenarioVersion() *AlertUpdateOne { - auo.mutation.ClearScenarioVersion() - return auo -} - -// SetScenarioHash sets the "scenarioHash" field. -func (auo *AlertUpdateOne) SetScenarioHash(s string) *AlertUpdateOne { - auo.mutation.SetScenarioHash(s) - return auo -} - -// SetNillableScenarioHash sets the "scenarioHash" field if the given value is not nil. -func (auo *AlertUpdateOne) SetNillableScenarioHash(s *string) *AlertUpdateOne { - if s != nil { - auo.SetScenarioHash(*s) - } - return auo -} - -// ClearScenarioHash clears the value of the "scenarioHash" field. -func (auo *AlertUpdateOne) ClearScenarioHash() *AlertUpdateOne { - auo.mutation.ClearScenarioHash() - return auo -} - -// SetSimulated sets the "simulated" field. -func (auo *AlertUpdateOne) SetSimulated(b bool) *AlertUpdateOne { - auo.mutation.SetSimulated(b) - return auo -} - -// SetNillableSimulated sets the "simulated" field if the given value is not nil. -func (auo *AlertUpdateOne) SetNillableSimulated(b *bool) *AlertUpdateOne { - if b != nil { - auo.SetSimulated(*b) - } - return auo -} - -// SetUUID sets the "uuid" field. -func (auo *AlertUpdateOne) SetUUID(s string) *AlertUpdateOne { - auo.mutation.SetUUID(s) - return auo -} - -// SetNillableUUID sets the "uuid" field if the given value is not nil. -func (auo *AlertUpdateOne) SetNillableUUID(s *string) *AlertUpdateOne { - if s != nil { - auo.SetUUID(*s) - } - return auo -} - -// ClearUUID clears the value of the "uuid" field. -func (auo *AlertUpdateOne) ClearUUID() *AlertUpdateOne { - auo.mutation.ClearUUID() - return auo -} - // SetOwnerID sets the "owner" edge to the Machine entity by ID. func (auo *AlertUpdateOne) SetOwnerID(id int) *AlertUpdateOne { auo.mutation.SetOwnerID(id) @@ -1634,135 +687,60 @@ func (auo *AlertUpdateOne) sqlSave(ctx context.Context) (_node *Alert, err error if value, ok := auo.mutation.UpdatedAt(); ok { _spec.SetField(alert.FieldUpdatedAt, field.TypeTime, value) } - if value, ok := auo.mutation.Scenario(); ok { - _spec.SetField(alert.FieldScenario, field.TypeString, value) - } - if value, ok := auo.mutation.BucketId(); ok { - _spec.SetField(alert.FieldBucketId, field.TypeString, value) - } if auo.mutation.BucketIdCleared() { _spec.ClearField(alert.FieldBucketId, field.TypeString) } - if value, ok := auo.mutation.Message(); ok { - _spec.SetField(alert.FieldMessage, field.TypeString, value) - } if auo.mutation.MessageCleared() { _spec.ClearField(alert.FieldMessage, field.TypeString) } - if value, ok := auo.mutation.EventsCount(); ok { - _spec.SetField(alert.FieldEventsCount, field.TypeInt32, value) - } - if value, ok := auo.mutation.AddedEventsCount(); ok { - _spec.AddField(alert.FieldEventsCount, field.TypeInt32, value) - } if auo.mutation.EventsCountCleared() { _spec.ClearField(alert.FieldEventsCount, field.TypeInt32) } - if value, ok := auo.mutation.StartedAt(); ok { - _spec.SetField(alert.FieldStartedAt, field.TypeTime, value) - } if auo.mutation.StartedAtCleared() { _spec.ClearField(alert.FieldStartedAt, field.TypeTime) } - if value, ok := auo.mutation.StoppedAt(); ok { - _spec.SetField(alert.FieldStoppedAt, field.TypeTime, value) - } if auo.mutation.StoppedAtCleared() { _spec.ClearField(alert.FieldStoppedAt, field.TypeTime) } - if value, ok := auo.mutation.SourceIp(); ok { - _spec.SetField(alert.FieldSourceIp, field.TypeString, value) - } if auo.mutation.SourceIpCleared() { _spec.ClearField(alert.FieldSourceIp, field.TypeString) } - if value, ok := auo.mutation.SourceRange(); ok { - _spec.SetField(alert.FieldSourceRange, field.TypeString, value) - } if auo.mutation.SourceRangeCleared() { _spec.ClearField(alert.FieldSourceRange, field.TypeString) } - if value, ok := auo.mutation.SourceAsNumber(); ok { - _spec.SetField(alert.FieldSourceAsNumber, field.TypeString, value) - } if auo.mutation.SourceAsNumberCleared() { _spec.ClearField(alert.FieldSourceAsNumber, field.TypeString) } - if value, ok := auo.mutation.SourceAsName(); ok { - _spec.SetField(alert.FieldSourceAsName, field.TypeString, value) - } if auo.mutation.SourceAsNameCleared() { _spec.ClearField(alert.FieldSourceAsName, field.TypeString) } - if value, ok := auo.mutation.SourceCountry(); ok { - _spec.SetField(alert.FieldSourceCountry, field.TypeString, value) - } if auo.mutation.SourceCountryCleared() { _spec.ClearField(alert.FieldSourceCountry, field.TypeString) } - if value, ok := auo.mutation.SourceLatitude(); ok { - _spec.SetField(alert.FieldSourceLatitude, field.TypeFloat32, value) - } - if value, ok := auo.mutation.AddedSourceLatitude(); ok { - _spec.AddField(alert.FieldSourceLatitude, field.TypeFloat32, value) - } if auo.mutation.SourceLatitudeCleared() { _spec.ClearField(alert.FieldSourceLatitude, field.TypeFloat32) } - if value, ok := auo.mutation.SourceLongitude(); ok { - _spec.SetField(alert.FieldSourceLongitude, field.TypeFloat32, value) - } - if value, ok := auo.mutation.AddedSourceLongitude(); ok { - _spec.AddField(alert.FieldSourceLongitude, field.TypeFloat32, value) - } if auo.mutation.SourceLongitudeCleared() { _spec.ClearField(alert.FieldSourceLongitude, field.TypeFloat32) } - if value, ok := auo.mutation.SourceScope(); ok { - _spec.SetField(alert.FieldSourceScope, field.TypeString, value) - } if auo.mutation.SourceScopeCleared() { _spec.ClearField(alert.FieldSourceScope, field.TypeString) } - if value, ok := auo.mutation.SourceValue(); ok { - _spec.SetField(alert.FieldSourceValue, field.TypeString, value) - } if auo.mutation.SourceValueCleared() { _spec.ClearField(alert.FieldSourceValue, field.TypeString) } - if value, ok := auo.mutation.Capacity(); ok { - _spec.SetField(alert.FieldCapacity, field.TypeInt32, value) - } - if value, ok := auo.mutation.AddedCapacity(); ok { - _spec.AddField(alert.FieldCapacity, field.TypeInt32, value) - } if auo.mutation.CapacityCleared() { _spec.ClearField(alert.FieldCapacity, field.TypeInt32) } - if value, ok := auo.mutation.LeakSpeed(); ok { - _spec.SetField(alert.FieldLeakSpeed, field.TypeString, value) - } if auo.mutation.LeakSpeedCleared() { _spec.ClearField(alert.FieldLeakSpeed, field.TypeString) } - if value, ok := auo.mutation.ScenarioVersion(); ok { - _spec.SetField(alert.FieldScenarioVersion, field.TypeString, value) - } if auo.mutation.ScenarioVersionCleared() { _spec.ClearField(alert.FieldScenarioVersion, field.TypeString) } - if value, ok := auo.mutation.ScenarioHash(); ok { - _spec.SetField(alert.FieldScenarioHash, field.TypeString, value) - } if auo.mutation.ScenarioHashCleared() { _spec.ClearField(alert.FieldScenarioHash, field.TypeString) } - if value, ok := auo.mutation.Simulated(); ok { - _spec.SetField(alert.FieldSimulated, field.TypeBool, value) - } - if value, ok := auo.mutation.UUID(); ok { - _spec.SetField(alert.FieldUUID, field.TypeString, value) - } if auo.mutation.UUIDCleared() { _spec.ClearField(alert.FieldUUID, field.TypeString) } diff --git a/pkg/database/ent/bouncer.go b/pkg/database/ent/bouncer.go index 7a3b280f53d..973442bfa66 100644 --- a/pkg/database/ent/bouncer.go +++ b/pkg/database/ent/bouncer.go @@ -33,8 +33,6 @@ type Bouncer struct { Type string `json:"type"` // Version holds the value of the "version" field. Version string `json:"version"` - // Until holds the value of the "until" field. - Until time.Time `json:"until"` // LastPull holds the value of the "last_pull" field. LastPull time.Time `json:"last_pull"` // AuthType holds the value of the "auth_type" field. @@ -53,7 +51,7 @@ func (*Bouncer) scanValues(columns []string) ([]any, error) { values[i] = new(sql.NullInt64) case bouncer.FieldName, bouncer.FieldAPIKey, bouncer.FieldIPAddress, bouncer.FieldType, bouncer.FieldVersion, bouncer.FieldAuthType: values[i] = new(sql.NullString) - case bouncer.FieldCreatedAt, bouncer.FieldUpdatedAt, bouncer.FieldUntil, bouncer.FieldLastPull: + case bouncer.FieldCreatedAt, bouncer.FieldUpdatedAt, bouncer.FieldLastPull: values[i] = new(sql.NullTime) default: values[i] = new(sql.UnknownType) @@ -124,12 +122,6 @@ func (b *Bouncer) assignValues(columns []string, values []any) error { } else if value.Valid { b.Version = value.String } - case bouncer.FieldUntil: - if value, ok := values[i].(*sql.NullTime); !ok { - return fmt.Errorf("unexpected type %T for field until", values[i]) - } else if value.Valid { - b.Until = value.Time - } case bouncer.FieldLastPull: if value, ok := values[i].(*sql.NullTime); !ok { return fmt.Errorf("unexpected type %T for field last_pull", values[i]) @@ -201,9 +193,6 @@ func (b *Bouncer) String() string { builder.WriteString("version=") builder.WriteString(b.Version) builder.WriteString(", ") - builder.WriteString("until=") - builder.WriteString(b.Until.Format(time.ANSIC)) - builder.WriteString(", ") builder.WriteString("last_pull=") builder.WriteString(b.LastPull.Format(time.ANSIC)) builder.WriteString(", ") diff --git a/pkg/database/ent/bouncer/bouncer.go b/pkg/database/ent/bouncer/bouncer.go index 7683c07752b..3f201347e40 100644 --- a/pkg/database/ent/bouncer/bouncer.go +++ b/pkg/database/ent/bouncer/bouncer.go @@ -29,8 +29,6 @@ const ( FieldType = "type" // FieldVersion holds the string denoting the version field in the database. FieldVersion = "version" - // FieldUntil holds the string denoting the until field in the database. - FieldUntil = "until" // FieldLastPull holds the string denoting the last_pull field in the database. FieldLastPull = "last_pull" // FieldAuthType holds the string denoting the auth_type field in the database. @@ -50,7 +48,6 @@ var Columns = []string{ FieldIPAddress, FieldType, FieldVersion, - FieldUntil, FieldLastPull, FieldAuthType, } @@ -74,8 +71,6 @@ var ( UpdateDefaultUpdatedAt func() time.Time // DefaultIPAddress holds the default value on creation for the "ip_address" field. DefaultIPAddress string - // DefaultUntil holds the default value on creation for the "until" field. - DefaultUntil func() time.Time // DefaultLastPull holds the default value on creation for the "last_pull" field. DefaultLastPull func() time.Time // DefaultAuthType holds the default value on creation for the "auth_type" field. @@ -130,11 +125,6 @@ func ByVersion(opts ...sql.OrderTermOption) OrderOption { return sql.OrderByField(FieldVersion, opts...).ToFunc() } -// ByUntil orders the results by the until field. -func ByUntil(opts ...sql.OrderTermOption) OrderOption { - return sql.OrderByField(FieldUntil, opts...).ToFunc() -} - // ByLastPull orders the results by the last_pull field. func ByLastPull(opts ...sql.OrderTermOption) OrderOption { return sql.OrderByField(FieldLastPull, opts...).ToFunc() diff --git a/pkg/database/ent/bouncer/where.go b/pkg/database/ent/bouncer/where.go index ee28d7bb6ff..86079794fee 100644 --- a/pkg/database/ent/bouncer/where.go +++ b/pkg/database/ent/bouncer/where.go @@ -94,11 +94,6 @@ func Version(v string) predicate.Bouncer { return predicate.Bouncer(sql.FieldEQ(FieldVersion, v)) } -// Until applies equality check predicate on the "until" field. It's identical to UntilEQ. -func Until(v time.Time) predicate.Bouncer { - return predicate.Bouncer(sql.FieldEQ(FieldUntil, v)) -} - // LastPull applies equality check predicate on the "last_pull" field. It's identical to LastPullEQ. func LastPull(v time.Time) predicate.Bouncer { return predicate.Bouncer(sql.FieldEQ(FieldLastPull, v)) @@ -554,56 +549,6 @@ func VersionContainsFold(v string) predicate.Bouncer { return predicate.Bouncer(sql.FieldContainsFold(FieldVersion, v)) } -// UntilEQ applies the EQ predicate on the "until" field. -func UntilEQ(v time.Time) predicate.Bouncer { - return predicate.Bouncer(sql.FieldEQ(FieldUntil, v)) -} - -// UntilNEQ applies the NEQ predicate on the "until" field. -func UntilNEQ(v time.Time) predicate.Bouncer { - return predicate.Bouncer(sql.FieldNEQ(FieldUntil, v)) -} - -// UntilIn applies the In predicate on the "until" field. -func UntilIn(vs ...time.Time) predicate.Bouncer { - return predicate.Bouncer(sql.FieldIn(FieldUntil, vs...)) -} - -// UntilNotIn applies the NotIn predicate on the "until" field. -func UntilNotIn(vs ...time.Time) predicate.Bouncer { - return predicate.Bouncer(sql.FieldNotIn(FieldUntil, vs...)) -} - -// UntilGT applies the GT predicate on the "until" field. -func UntilGT(v time.Time) predicate.Bouncer { - return predicate.Bouncer(sql.FieldGT(FieldUntil, v)) -} - -// UntilGTE applies the GTE predicate on the "until" field. -func UntilGTE(v time.Time) predicate.Bouncer { - return predicate.Bouncer(sql.FieldGTE(FieldUntil, v)) -} - -// UntilLT applies the LT predicate on the "until" field. -func UntilLT(v time.Time) predicate.Bouncer { - return predicate.Bouncer(sql.FieldLT(FieldUntil, v)) -} - -// UntilLTE applies the LTE predicate on the "until" field. -func UntilLTE(v time.Time) predicate.Bouncer { - return predicate.Bouncer(sql.FieldLTE(FieldUntil, v)) -} - -// UntilIsNil applies the IsNil predicate on the "until" field. -func UntilIsNil() predicate.Bouncer { - return predicate.Bouncer(sql.FieldIsNull(FieldUntil)) -} - -// UntilNotNil applies the NotNil predicate on the "until" field. -func UntilNotNil() predicate.Bouncer { - return predicate.Bouncer(sql.FieldNotNull(FieldUntil)) -} - // LastPullEQ applies the EQ predicate on the "last_pull" field. func LastPullEQ(v time.Time) predicate.Bouncer { return predicate.Bouncer(sql.FieldEQ(FieldLastPull, v)) diff --git a/pkg/database/ent/bouncer_create.go b/pkg/database/ent/bouncer_create.go index ceff4db583e..7a4b3d9b013 100644 --- a/pkg/database/ent/bouncer_create.go +++ b/pkg/database/ent/bouncer_create.go @@ -108,20 +108,6 @@ func (bc *BouncerCreate) SetNillableVersion(s *string) *BouncerCreate { return bc } -// SetUntil sets the "until" field. -func (bc *BouncerCreate) SetUntil(t time.Time) *BouncerCreate { - bc.mutation.SetUntil(t) - return bc -} - -// SetNillableUntil sets the "until" field if the given value is not nil. -func (bc *BouncerCreate) SetNillableUntil(t *time.Time) *BouncerCreate { - if t != nil { - bc.SetUntil(*t) - } - return bc -} - // SetLastPull sets the "last_pull" field. func (bc *BouncerCreate) SetLastPull(t time.Time) *BouncerCreate { bc.mutation.SetLastPull(t) @@ -197,10 +183,6 @@ func (bc *BouncerCreate) defaults() { v := bouncer.DefaultIPAddress bc.mutation.SetIPAddress(v) } - if _, ok := bc.mutation.Until(); !ok { - v := bouncer.DefaultUntil() - bc.mutation.SetUntil(v) - } if _, ok := bc.mutation.LastPull(); !ok { v := bouncer.DefaultLastPull() bc.mutation.SetLastPull(v) @@ -292,10 +274,6 @@ func (bc *BouncerCreate) createSpec() (*Bouncer, *sqlgraph.CreateSpec) { _spec.SetField(bouncer.FieldVersion, field.TypeString, value) _node.Version = value } - if value, ok := bc.mutation.Until(); ok { - _spec.SetField(bouncer.FieldUntil, field.TypeTime, value) - _node.Until = value - } if value, ok := bc.mutation.LastPull(); ok { _spec.SetField(bouncer.FieldLastPull, field.TypeTime, value) _node.LastPull = value diff --git a/pkg/database/ent/bouncer_update.go b/pkg/database/ent/bouncer_update.go index 76968cf5202..1dc5aa080c3 100644 --- a/pkg/database/ent/bouncer_update.go +++ b/pkg/database/ent/bouncer_update.go @@ -28,40 +28,12 @@ func (bu *BouncerUpdate) Where(ps ...predicate.Bouncer) *BouncerUpdate { return bu } -// SetCreatedAt sets the "created_at" field. -func (bu *BouncerUpdate) SetCreatedAt(t time.Time) *BouncerUpdate { - bu.mutation.SetCreatedAt(t) - return bu -} - -// SetNillableCreatedAt sets the "created_at" field if the given value is not nil. -func (bu *BouncerUpdate) SetNillableCreatedAt(t *time.Time) *BouncerUpdate { - if t != nil { - bu.SetCreatedAt(*t) - } - return bu -} - // SetUpdatedAt sets the "updated_at" field. func (bu *BouncerUpdate) SetUpdatedAt(t time.Time) *BouncerUpdate { bu.mutation.SetUpdatedAt(t) return bu } -// SetName sets the "name" field. -func (bu *BouncerUpdate) SetName(s string) *BouncerUpdate { - bu.mutation.SetName(s) - return bu -} - -// SetNillableName sets the "name" field if the given value is not nil. -func (bu *BouncerUpdate) SetNillableName(s *string) *BouncerUpdate { - if s != nil { - bu.SetName(*s) - } - return bu -} - // SetAPIKey sets the "api_key" field. func (bu *BouncerUpdate) SetAPIKey(s string) *BouncerUpdate { bu.mutation.SetAPIKey(s) @@ -150,26 +122,6 @@ func (bu *BouncerUpdate) ClearVersion() *BouncerUpdate { return bu } -// SetUntil sets the "until" field. -func (bu *BouncerUpdate) SetUntil(t time.Time) *BouncerUpdate { - bu.mutation.SetUntil(t) - return bu -} - -// SetNillableUntil sets the "until" field if the given value is not nil. -func (bu *BouncerUpdate) SetNillableUntil(t *time.Time) *BouncerUpdate { - if t != nil { - bu.SetUntil(*t) - } - return bu -} - -// ClearUntil clears the value of the "until" field. -func (bu *BouncerUpdate) ClearUntil() *BouncerUpdate { - bu.mutation.ClearUntil() - return bu -} - // SetLastPull sets the "last_pull" field. func (bu *BouncerUpdate) SetLastPull(t time.Time) *BouncerUpdate { bu.mutation.SetLastPull(t) @@ -248,15 +200,9 @@ func (bu *BouncerUpdate) sqlSave(ctx context.Context) (n int, err error) { } } } - if value, ok := bu.mutation.CreatedAt(); ok { - _spec.SetField(bouncer.FieldCreatedAt, field.TypeTime, value) - } if value, ok := bu.mutation.UpdatedAt(); ok { _spec.SetField(bouncer.FieldUpdatedAt, field.TypeTime, value) } - if value, ok := bu.mutation.Name(); ok { - _spec.SetField(bouncer.FieldName, field.TypeString, value) - } if value, ok := bu.mutation.APIKey(); ok { _spec.SetField(bouncer.FieldAPIKey, field.TypeString, value) } @@ -281,12 +227,6 @@ func (bu *BouncerUpdate) sqlSave(ctx context.Context) (n int, err error) { if bu.mutation.VersionCleared() { _spec.ClearField(bouncer.FieldVersion, field.TypeString) } - if value, ok := bu.mutation.Until(); ok { - _spec.SetField(bouncer.FieldUntil, field.TypeTime, value) - } - if bu.mutation.UntilCleared() { - _spec.ClearField(bouncer.FieldUntil, field.TypeTime) - } if value, ok := bu.mutation.LastPull(); ok { _spec.SetField(bouncer.FieldLastPull, field.TypeTime, value) } @@ -313,40 +253,12 @@ type BouncerUpdateOne struct { mutation *BouncerMutation } -// SetCreatedAt sets the "created_at" field. -func (buo *BouncerUpdateOne) SetCreatedAt(t time.Time) *BouncerUpdateOne { - buo.mutation.SetCreatedAt(t) - return buo -} - -// SetNillableCreatedAt sets the "created_at" field if the given value is not nil. -func (buo *BouncerUpdateOne) SetNillableCreatedAt(t *time.Time) *BouncerUpdateOne { - if t != nil { - buo.SetCreatedAt(*t) - } - return buo -} - // SetUpdatedAt sets the "updated_at" field. func (buo *BouncerUpdateOne) SetUpdatedAt(t time.Time) *BouncerUpdateOne { buo.mutation.SetUpdatedAt(t) return buo } -// SetName sets the "name" field. -func (buo *BouncerUpdateOne) SetName(s string) *BouncerUpdateOne { - buo.mutation.SetName(s) - return buo -} - -// SetNillableName sets the "name" field if the given value is not nil. -func (buo *BouncerUpdateOne) SetNillableName(s *string) *BouncerUpdateOne { - if s != nil { - buo.SetName(*s) - } - return buo -} - // SetAPIKey sets the "api_key" field. func (buo *BouncerUpdateOne) SetAPIKey(s string) *BouncerUpdateOne { buo.mutation.SetAPIKey(s) @@ -435,26 +347,6 @@ func (buo *BouncerUpdateOne) ClearVersion() *BouncerUpdateOne { return buo } -// SetUntil sets the "until" field. -func (buo *BouncerUpdateOne) SetUntil(t time.Time) *BouncerUpdateOne { - buo.mutation.SetUntil(t) - return buo -} - -// SetNillableUntil sets the "until" field if the given value is not nil. -func (buo *BouncerUpdateOne) SetNillableUntil(t *time.Time) *BouncerUpdateOne { - if t != nil { - buo.SetUntil(*t) - } - return buo -} - -// ClearUntil clears the value of the "until" field. -func (buo *BouncerUpdateOne) ClearUntil() *BouncerUpdateOne { - buo.mutation.ClearUntil() - return buo -} - // SetLastPull sets the "last_pull" field. func (buo *BouncerUpdateOne) SetLastPull(t time.Time) *BouncerUpdateOne { buo.mutation.SetLastPull(t) @@ -563,15 +455,9 @@ func (buo *BouncerUpdateOne) sqlSave(ctx context.Context) (_node *Bouncer, err e } } } - if value, ok := buo.mutation.CreatedAt(); ok { - _spec.SetField(bouncer.FieldCreatedAt, field.TypeTime, value) - } if value, ok := buo.mutation.UpdatedAt(); ok { _spec.SetField(bouncer.FieldUpdatedAt, field.TypeTime, value) } - if value, ok := buo.mutation.Name(); ok { - _spec.SetField(bouncer.FieldName, field.TypeString, value) - } if value, ok := buo.mutation.APIKey(); ok { _spec.SetField(bouncer.FieldAPIKey, field.TypeString, value) } @@ -596,12 +482,6 @@ func (buo *BouncerUpdateOne) sqlSave(ctx context.Context) (_node *Bouncer, err e if buo.mutation.VersionCleared() { _spec.ClearField(bouncer.FieldVersion, field.TypeString) } - if value, ok := buo.mutation.Until(); ok { - _spec.SetField(bouncer.FieldUntil, field.TypeTime, value) - } - if buo.mutation.UntilCleared() { - _spec.ClearField(bouncer.FieldUntil, field.TypeTime) - } if value, ok := buo.mutation.LastPull(); ok { _spec.SetField(bouncer.FieldLastPull, field.TypeTime, value) } diff --git a/pkg/database/ent/configitem_update.go b/pkg/database/ent/configitem_update.go index d4f1f15d23a..82309459e76 100644 --- a/pkg/database/ent/configitem_update.go +++ b/pkg/database/ent/configitem_update.go @@ -34,20 +34,6 @@ func (ciu *ConfigItemUpdate) SetUpdatedAt(t time.Time) *ConfigItemUpdate { return ciu } -// SetName sets the "name" field. -func (ciu *ConfigItemUpdate) SetName(s string) *ConfigItemUpdate { - ciu.mutation.SetName(s) - return ciu -} - -// SetNillableName sets the "name" field if the given value is not nil. -func (ciu *ConfigItemUpdate) SetNillableName(s *string) *ConfigItemUpdate { - if s != nil { - ciu.SetName(*s) - } - return ciu -} - // SetValue sets the "value" field. func (ciu *ConfigItemUpdate) SetValue(s string) *ConfigItemUpdate { ciu.mutation.SetValue(s) @@ -115,9 +101,6 @@ func (ciu *ConfigItemUpdate) sqlSave(ctx context.Context) (n int, err error) { if value, ok := ciu.mutation.UpdatedAt(); ok { _spec.SetField(configitem.FieldUpdatedAt, field.TypeTime, value) } - if value, ok := ciu.mutation.Name(); ok { - _spec.SetField(configitem.FieldName, field.TypeString, value) - } if value, ok := ciu.mutation.Value(); ok { _spec.SetField(configitem.FieldValue, field.TypeString, value) } @@ -147,20 +130,6 @@ func (ciuo *ConfigItemUpdateOne) SetUpdatedAt(t time.Time) *ConfigItemUpdateOne return ciuo } -// SetName sets the "name" field. -func (ciuo *ConfigItemUpdateOne) SetName(s string) *ConfigItemUpdateOne { - ciuo.mutation.SetName(s) - return ciuo -} - -// SetNillableName sets the "name" field if the given value is not nil. -func (ciuo *ConfigItemUpdateOne) SetNillableName(s *string) *ConfigItemUpdateOne { - if s != nil { - ciuo.SetName(*s) - } - return ciuo -} - // SetValue sets the "value" field. func (ciuo *ConfigItemUpdateOne) SetValue(s string) *ConfigItemUpdateOne { ciuo.mutation.SetValue(s) @@ -258,9 +227,6 @@ func (ciuo *ConfigItemUpdateOne) sqlSave(ctx context.Context) (_node *ConfigItem if value, ok := ciuo.mutation.UpdatedAt(); ok { _spec.SetField(configitem.FieldUpdatedAt, field.TypeTime, value) } - if value, ok := ciuo.mutation.Name(); ok { - _spec.SetField(configitem.FieldName, field.TypeString, value) - } if value, ok := ciuo.mutation.Value(); ok { _spec.SetField(configitem.FieldValue, field.TypeString, value) } diff --git a/pkg/database/ent/decision_update.go b/pkg/database/ent/decision_update.go index 1bcb42f8c1f..68d0eb4ace7 100644 --- a/pkg/database/ent/decision_update.go +++ b/pkg/database/ent/decision_update.go @@ -55,245 +55,6 @@ func (du *DecisionUpdate) ClearUntil() *DecisionUpdate { return du } -// SetScenario sets the "scenario" field. -func (du *DecisionUpdate) SetScenario(s string) *DecisionUpdate { - du.mutation.SetScenario(s) - return du -} - -// SetNillableScenario sets the "scenario" field if the given value is not nil. -func (du *DecisionUpdate) SetNillableScenario(s *string) *DecisionUpdate { - if s != nil { - du.SetScenario(*s) - } - return du -} - -// SetType sets the "type" field. -func (du *DecisionUpdate) SetType(s string) *DecisionUpdate { - du.mutation.SetType(s) - return du -} - -// SetNillableType sets the "type" field if the given value is not nil. -func (du *DecisionUpdate) SetNillableType(s *string) *DecisionUpdate { - if s != nil { - du.SetType(*s) - } - return du -} - -// SetStartIP sets the "start_ip" field. -func (du *DecisionUpdate) SetStartIP(i int64) *DecisionUpdate { - du.mutation.ResetStartIP() - du.mutation.SetStartIP(i) - return du -} - -// SetNillableStartIP sets the "start_ip" field if the given value is not nil. -func (du *DecisionUpdate) SetNillableStartIP(i *int64) *DecisionUpdate { - if i != nil { - du.SetStartIP(*i) - } - return du -} - -// AddStartIP adds i to the "start_ip" field. -func (du *DecisionUpdate) AddStartIP(i int64) *DecisionUpdate { - du.mutation.AddStartIP(i) - return du -} - -// ClearStartIP clears the value of the "start_ip" field. -func (du *DecisionUpdate) ClearStartIP() *DecisionUpdate { - du.mutation.ClearStartIP() - return du -} - -// SetEndIP sets the "end_ip" field. -func (du *DecisionUpdate) SetEndIP(i int64) *DecisionUpdate { - du.mutation.ResetEndIP() - du.mutation.SetEndIP(i) - return du -} - -// SetNillableEndIP sets the "end_ip" field if the given value is not nil. -func (du *DecisionUpdate) SetNillableEndIP(i *int64) *DecisionUpdate { - if i != nil { - du.SetEndIP(*i) - } - return du -} - -// AddEndIP adds i to the "end_ip" field. -func (du *DecisionUpdate) AddEndIP(i int64) *DecisionUpdate { - du.mutation.AddEndIP(i) - return du -} - -// ClearEndIP clears the value of the "end_ip" field. -func (du *DecisionUpdate) ClearEndIP() *DecisionUpdate { - du.mutation.ClearEndIP() - return du -} - -// SetStartSuffix sets the "start_suffix" field. -func (du *DecisionUpdate) SetStartSuffix(i int64) *DecisionUpdate { - du.mutation.ResetStartSuffix() - du.mutation.SetStartSuffix(i) - return du -} - -// SetNillableStartSuffix sets the "start_suffix" field if the given value is not nil. -func (du *DecisionUpdate) SetNillableStartSuffix(i *int64) *DecisionUpdate { - if i != nil { - du.SetStartSuffix(*i) - } - return du -} - -// AddStartSuffix adds i to the "start_suffix" field. -func (du *DecisionUpdate) AddStartSuffix(i int64) *DecisionUpdate { - du.mutation.AddStartSuffix(i) - return du -} - -// ClearStartSuffix clears the value of the "start_suffix" field. -func (du *DecisionUpdate) ClearStartSuffix() *DecisionUpdate { - du.mutation.ClearStartSuffix() - return du -} - -// SetEndSuffix sets the "end_suffix" field. -func (du *DecisionUpdate) SetEndSuffix(i int64) *DecisionUpdate { - du.mutation.ResetEndSuffix() - du.mutation.SetEndSuffix(i) - return du -} - -// SetNillableEndSuffix sets the "end_suffix" field if the given value is not nil. -func (du *DecisionUpdate) SetNillableEndSuffix(i *int64) *DecisionUpdate { - if i != nil { - du.SetEndSuffix(*i) - } - return du -} - -// AddEndSuffix adds i to the "end_suffix" field. -func (du *DecisionUpdate) AddEndSuffix(i int64) *DecisionUpdate { - du.mutation.AddEndSuffix(i) - return du -} - -// ClearEndSuffix clears the value of the "end_suffix" field. -func (du *DecisionUpdate) ClearEndSuffix() *DecisionUpdate { - du.mutation.ClearEndSuffix() - return du -} - -// SetIPSize sets the "ip_size" field. -func (du *DecisionUpdate) SetIPSize(i int64) *DecisionUpdate { - du.mutation.ResetIPSize() - du.mutation.SetIPSize(i) - return du -} - -// SetNillableIPSize sets the "ip_size" field if the given value is not nil. -func (du *DecisionUpdate) SetNillableIPSize(i *int64) *DecisionUpdate { - if i != nil { - du.SetIPSize(*i) - } - return du -} - -// AddIPSize adds i to the "ip_size" field. -func (du *DecisionUpdate) AddIPSize(i int64) *DecisionUpdate { - du.mutation.AddIPSize(i) - return du -} - -// ClearIPSize clears the value of the "ip_size" field. -func (du *DecisionUpdate) ClearIPSize() *DecisionUpdate { - du.mutation.ClearIPSize() - return du -} - -// SetScope sets the "scope" field. -func (du *DecisionUpdate) SetScope(s string) *DecisionUpdate { - du.mutation.SetScope(s) - return du -} - -// SetNillableScope sets the "scope" field if the given value is not nil. -func (du *DecisionUpdate) SetNillableScope(s *string) *DecisionUpdate { - if s != nil { - du.SetScope(*s) - } - return du -} - -// SetValue sets the "value" field. -func (du *DecisionUpdate) SetValue(s string) *DecisionUpdate { - du.mutation.SetValue(s) - return du -} - -// SetNillableValue sets the "value" field if the given value is not nil. -func (du *DecisionUpdate) SetNillableValue(s *string) *DecisionUpdate { - if s != nil { - du.SetValue(*s) - } - return du -} - -// SetOrigin sets the "origin" field. -func (du *DecisionUpdate) SetOrigin(s string) *DecisionUpdate { - du.mutation.SetOrigin(s) - return du -} - -// SetNillableOrigin sets the "origin" field if the given value is not nil. -func (du *DecisionUpdate) SetNillableOrigin(s *string) *DecisionUpdate { - if s != nil { - du.SetOrigin(*s) - } - return du -} - -// SetSimulated sets the "simulated" field. -func (du *DecisionUpdate) SetSimulated(b bool) *DecisionUpdate { - du.mutation.SetSimulated(b) - return du -} - -// SetNillableSimulated sets the "simulated" field if the given value is not nil. -func (du *DecisionUpdate) SetNillableSimulated(b *bool) *DecisionUpdate { - if b != nil { - du.SetSimulated(*b) - } - return du -} - -// SetUUID sets the "uuid" field. -func (du *DecisionUpdate) SetUUID(s string) *DecisionUpdate { - du.mutation.SetUUID(s) - return du -} - -// SetNillableUUID sets the "uuid" field if the given value is not nil. -func (du *DecisionUpdate) SetNillableUUID(s *string) *DecisionUpdate { - if s != nil { - du.SetUUID(*s) - } - return du -} - -// ClearUUID clears the value of the "uuid" field. -func (du *DecisionUpdate) ClearUUID() *DecisionUpdate { - du.mutation.ClearUUID() - return du -} - // SetAlertDecisions sets the "alert_decisions" field. func (du *DecisionUpdate) SetAlertDecisions(i int) *DecisionUpdate { du.mutation.SetAlertDecisions(i) @@ -398,72 +159,21 @@ func (du *DecisionUpdate) sqlSave(ctx context.Context) (n int, err error) { if du.mutation.UntilCleared() { _spec.ClearField(decision.FieldUntil, field.TypeTime) } - if value, ok := du.mutation.Scenario(); ok { - _spec.SetField(decision.FieldScenario, field.TypeString, value) - } - if value, ok := du.mutation.GetType(); ok { - _spec.SetField(decision.FieldType, field.TypeString, value) - } - if value, ok := du.mutation.StartIP(); ok { - _spec.SetField(decision.FieldStartIP, field.TypeInt64, value) - } - if value, ok := du.mutation.AddedStartIP(); ok { - _spec.AddField(decision.FieldStartIP, field.TypeInt64, value) - } if du.mutation.StartIPCleared() { _spec.ClearField(decision.FieldStartIP, field.TypeInt64) } - if value, ok := du.mutation.EndIP(); ok { - _spec.SetField(decision.FieldEndIP, field.TypeInt64, value) - } - if value, ok := du.mutation.AddedEndIP(); ok { - _spec.AddField(decision.FieldEndIP, field.TypeInt64, value) - } if du.mutation.EndIPCleared() { _spec.ClearField(decision.FieldEndIP, field.TypeInt64) } - if value, ok := du.mutation.StartSuffix(); ok { - _spec.SetField(decision.FieldStartSuffix, field.TypeInt64, value) - } - if value, ok := du.mutation.AddedStartSuffix(); ok { - _spec.AddField(decision.FieldStartSuffix, field.TypeInt64, value) - } if du.mutation.StartSuffixCleared() { _spec.ClearField(decision.FieldStartSuffix, field.TypeInt64) } - if value, ok := du.mutation.EndSuffix(); ok { - _spec.SetField(decision.FieldEndSuffix, field.TypeInt64, value) - } - if value, ok := du.mutation.AddedEndSuffix(); ok { - _spec.AddField(decision.FieldEndSuffix, field.TypeInt64, value) - } if du.mutation.EndSuffixCleared() { _spec.ClearField(decision.FieldEndSuffix, field.TypeInt64) } - if value, ok := du.mutation.IPSize(); ok { - _spec.SetField(decision.FieldIPSize, field.TypeInt64, value) - } - if value, ok := du.mutation.AddedIPSize(); ok { - _spec.AddField(decision.FieldIPSize, field.TypeInt64, value) - } if du.mutation.IPSizeCleared() { _spec.ClearField(decision.FieldIPSize, field.TypeInt64) } - if value, ok := du.mutation.Scope(); ok { - _spec.SetField(decision.FieldScope, field.TypeString, value) - } - if value, ok := du.mutation.Value(); ok { - _spec.SetField(decision.FieldValue, field.TypeString, value) - } - if value, ok := du.mutation.Origin(); ok { - _spec.SetField(decision.FieldOrigin, field.TypeString, value) - } - if value, ok := du.mutation.Simulated(); ok { - _spec.SetField(decision.FieldSimulated, field.TypeBool, value) - } - if value, ok := du.mutation.UUID(); ok { - _spec.SetField(decision.FieldUUID, field.TypeString, value) - } if du.mutation.UUIDCleared() { _spec.ClearField(decision.FieldUUID, field.TypeString) } @@ -542,245 +252,6 @@ func (duo *DecisionUpdateOne) ClearUntil() *DecisionUpdateOne { return duo } -// SetScenario sets the "scenario" field. -func (duo *DecisionUpdateOne) SetScenario(s string) *DecisionUpdateOne { - duo.mutation.SetScenario(s) - return duo -} - -// SetNillableScenario sets the "scenario" field if the given value is not nil. -func (duo *DecisionUpdateOne) SetNillableScenario(s *string) *DecisionUpdateOne { - if s != nil { - duo.SetScenario(*s) - } - return duo -} - -// SetType sets the "type" field. -func (duo *DecisionUpdateOne) SetType(s string) *DecisionUpdateOne { - duo.mutation.SetType(s) - return duo -} - -// SetNillableType sets the "type" field if the given value is not nil. -func (duo *DecisionUpdateOne) SetNillableType(s *string) *DecisionUpdateOne { - if s != nil { - duo.SetType(*s) - } - return duo -} - -// SetStartIP sets the "start_ip" field. -func (duo *DecisionUpdateOne) SetStartIP(i int64) *DecisionUpdateOne { - duo.mutation.ResetStartIP() - duo.mutation.SetStartIP(i) - return duo -} - -// SetNillableStartIP sets the "start_ip" field if the given value is not nil. -func (duo *DecisionUpdateOne) SetNillableStartIP(i *int64) *DecisionUpdateOne { - if i != nil { - duo.SetStartIP(*i) - } - return duo -} - -// AddStartIP adds i to the "start_ip" field. -func (duo *DecisionUpdateOne) AddStartIP(i int64) *DecisionUpdateOne { - duo.mutation.AddStartIP(i) - return duo -} - -// ClearStartIP clears the value of the "start_ip" field. -func (duo *DecisionUpdateOne) ClearStartIP() *DecisionUpdateOne { - duo.mutation.ClearStartIP() - return duo -} - -// SetEndIP sets the "end_ip" field. -func (duo *DecisionUpdateOne) SetEndIP(i int64) *DecisionUpdateOne { - duo.mutation.ResetEndIP() - duo.mutation.SetEndIP(i) - return duo -} - -// SetNillableEndIP sets the "end_ip" field if the given value is not nil. -func (duo *DecisionUpdateOne) SetNillableEndIP(i *int64) *DecisionUpdateOne { - if i != nil { - duo.SetEndIP(*i) - } - return duo -} - -// AddEndIP adds i to the "end_ip" field. -func (duo *DecisionUpdateOne) AddEndIP(i int64) *DecisionUpdateOne { - duo.mutation.AddEndIP(i) - return duo -} - -// ClearEndIP clears the value of the "end_ip" field. -func (duo *DecisionUpdateOne) ClearEndIP() *DecisionUpdateOne { - duo.mutation.ClearEndIP() - return duo -} - -// SetStartSuffix sets the "start_suffix" field. -func (duo *DecisionUpdateOne) SetStartSuffix(i int64) *DecisionUpdateOne { - duo.mutation.ResetStartSuffix() - duo.mutation.SetStartSuffix(i) - return duo -} - -// SetNillableStartSuffix sets the "start_suffix" field if the given value is not nil. -func (duo *DecisionUpdateOne) SetNillableStartSuffix(i *int64) *DecisionUpdateOne { - if i != nil { - duo.SetStartSuffix(*i) - } - return duo -} - -// AddStartSuffix adds i to the "start_suffix" field. -func (duo *DecisionUpdateOne) AddStartSuffix(i int64) *DecisionUpdateOne { - duo.mutation.AddStartSuffix(i) - return duo -} - -// ClearStartSuffix clears the value of the "start_suffix" field. -func (duo *DecisionUpdateOne) ClearStartSuffix() *DecisionUpdateOne { - duo.mutation.ClearStartSuffix() - return duo -} - -// SetEndSuffix sets the "end_suffix" field. -func (duo *DecisionUpdateOne) SetEndSuffix(i int64) *DecisionUpdateOne { - duo.mutation.ResetEndSuffix() - duo.mutation.SetEndSuffix(i) - return duo -} - -// SetNillableEndSuffix sets the "end_suffix" field if the given value is not nil. -func (duo *DecisionUpdateOne) SetNillableEndSuffix(i *int64) *DecisionUpdateOne { - if i != nil { - duo.SetEndSuffix(*i) - } - return duo -} - -// AddEndSuffix adds i to the "end_suffix" field. -func (duo *DecisionUpdateOne) AddEndSuffix(i int64) *DecisionUpdateOne { - duo.mutation.AddEndSuffix(i) - return duo -} - -// ClearEndSuffix clears the value of the "end_suffix" field. -func (duo *DecisionUpdateOne) ClearEndSuffix() *DecisionUpdateOne { - duo.mutation.ClearEndSuffix() - return duo -} - -// SetIPSize sets the "ip_size" field. -func (duo *DecisionUpdateOne) SetIPSize(i int64) *DecisionUpdateOne { - duo.mutation.ResetIPSize() - duo.mutation.SetIPSize(i) - return duo -} - -// SetNillableIPSize sets the "ip_size" field if the given value is not nil. -func (duo *DecisionUpdateOne) SetNillableIPSize(i *int64) *DecisionUpdateOne { - if i != nil { - duo.SetIPSize(*i) - } - return duo -} - -// AddIPSize adds i to the "ip_size" field. -func (duo *DecisionUpdateOne) AddIPSize(i int64) *DecisionUpdateOne { - duo.mutation.AddIPSize(i) - return duo -} - -// ClearIPSize clears the value of the "ip_size" field. -func (duo *DecisionUpdateOne) ClearIPSize() *DecisionUpdateOne { - duo.mutation.ClearIPSize() - return duo -} - -// SetScope sets the "scope" field. -func (duo *DecisionUpdateOne) SetScope(s string) *DecisionUpdateOne { - duo.mutation.SetScope(s) - return duo -} - -// SetNillableScope sets the "scope" field if the given value is not nil. -func (duo *DecisionUpdateOne) SetNillableScope(s *string) *DecisionUpdateOne { - if s != nil { - duo.SetScope(*s) - } - return duo -} - -// SetValue sets the "value" field. -func (duo *DecisionUpdateOne) SetValue(s string) *DecisionUpdateOne { - duo.mutation.SetValue(s) - return duo -} - -// SetNillableValue sets the "value" field if the given value is not nil. -func (duo *DecisionUpdateOne) SetNillableValue(s *string) *DecisionUpdateOne { - if s != nil { - duo.SetValue(*s) - } - return duo -} - -// SetOrigin sets the "origin" field. -func (duo *DecisionUpdateOne) SetOrigin(s string) *DecisionUpdateOne { - duo.mutation.SetOrigin(s) - return duo -} - -// SetNillableOrigin sets the "origin" field if the given value is not nil. -func (duo *DecisionUpdateOne) SetNillableOrigin(s *string) *DecisionUpdateOne { - if s != nil { - duo.SetOrigin(*s) - } - return duo -} - -// SetSimulated sets the "simulated" field. -func (duo *DecisionUpdateOne) SetSimulated(b bool) *DecisionUpdateOne { - duo.mutation.SetSimulated(b) - return duo -} - -// SetNillableSimulated sets the "simulated" field if the given value is not nil. -func (duo *DecisionUpdateOne) SetNillableSimulated(b *bool) *DecisionUpdateOne { - if b != nil { - duo.SetSimulated(*b) - } - return duo -} - -// SetUUID sets the "uuid" field. -func (duo *DecisionUpdateOne) SetUUID(s string) *DecisionUpdateOne { - duo.mutation.SetUUID(s) - return duo -} - -// SetNillableUUID sets the "uuid" field if the given value is not nil. -func (duo *DecisionUpdateOne) SetNillableUUID(s *string) *DecisionUpdateOne { - if s != nil { - duo.SetUUID(*s) - } - return duo -} - -// ClearUUID clears the value of the "uuid" field. -func (duo *DecisionUpdateOne) ClearUUID() *DecisionUpdateOne { - duo.mutation.ClearUUID() - return duo -} - // SetAlertDecisions sets the "alert_decisions" field. func (duo *DecisionUpdateOne) SetAlertDecisions(i int) *DecisionUpdateOne { duo.mutation.SetAlertDecisions(i) @@ -915,72 +386,21 @@ func (duo *DecisionUpdateOne) sqlSave(ctx context.Context) (_node *Decision, err if duo.mutation.UntilCleared() { _spec.ClearField(decision.FieldUntil, field.TypeTime) } - if value, ok := duo.mutation.Scenario(); ok { - _spec.SetField(decision.FieldScenario, field.TypeString, value) - } - if value, ok := duo.mutation.GetType(); ok { - _spec.SetField(decision.FieldType, field.TypeString, value) - } - if value, ok := duo.mutation.StartIP(); ok { - _spec.SetField(decision.FieldStartIP, field.TypeInt64, value) - } - if value, ok := duo.mutation.AddedStartIP(); ok { - _spec.AddField(decision.FieldStartIP, field.TypeInt64, value) - } if duo.mutation.StartIPCleared() { _spec.ClearField(decision.FieldStartIP, field.TypeInt64) } - if value, ok := duo.mutation.EndIP(); ok { - _spec.SetField(decision.FieldEndIP, field.TypeInt64, value) - } - if value, ok := duo.mutation.AddedEndIP(); ok { - _spec.AddField(decision.FieldEndIP, field.TypeInt64, value) - } if duo.mutation.EndIPCleared() { _spec.ClearField(decision.FieldEndIP, field.TypeInt64) } - if value, ok := duo.mutation.StartSuffix(); ok { - _spec.SetField(decision.FieldStartSuffix, field.TypeInt64, value) - } - if value, ok := duo.mutation.AddedStartSuffix(); ok { - _spec.AddField(decision.FieldStartSuffix, field.TypeInt64, value) - } if duo.mutation.StartSuffixCleared() { _spec.ClearField(decision.FieldStartSuffix, field.TypeInt64) } - if value, ok := duo.mutation.EndSuffix(); ok { - _spec.SetField(decision.FieldEndSuffix, field.TypeInt64, value) - } - if value, ok := duo.mutation.AddedEndSuffix(); ok { - _spec.AddField(decision.FieldEndSuffix, field.TypeInt64, value) - } if duo.mutation.EndSuffixCleared() { _spec.ClearField(decision.FieldEndSuffix, field.TypeInt64) } - if value, ok := duo.mutation.IPSize(); ok { - _spec.SetField(decision.FieldIPSize, field.TypeInt64, value) - } - if value, ok := duo.mutation.AddedIPSize(); ok { - _spec.AddField(decision.FieldIPSize, field.TypeInt64, value) - } if duo.mutation.IPSizeCleared() { _spec.ClearField(decision.FieldIPSize, field.TypeInt64) } - if value, ok := duo.mutation.Scope(); ok { - _spec.SetField(decision.FieldScope, field.TypeString, value) - } - if value, ok := duo.mutation.Value(); ok { - _spec.SetField(decision.FieldValue, field.TypeString, value) - } - if value, ok := duo.mutation.Origin(); ok { - _spec.SetField(decision.FieldOrigin, field.TypeString, value) - } - if value, ok := duo.mutation.Simulated(); ok { - _spec.SetField(decision.FieldSimulated, field.TypeBool, value) - } - if value, ok := duo.mutation.UUID(); ok { - _spec.SetField(decision.FieldUUID, field.TypeString, value) - } if duo.mutation.UUIDCleared() { _spec.ClearField(decision.FieldUUID, field.TypeString) } diff --git a/pkg/database/ent/event_update.go b/pkg/database/ent/event_update.go index 0bc8a7f9243..c2f5c6cddb1 100644 --- a/pkg/database/ent/event_update.go +++ b/pkg/database/ent/event_update.go @@ -35,34 +35,6 @@ func (eu *EventUpdate) SetUpdatedAt(t time.Time) *EventUpdate { return eu } -// SetTime sets the "time" field. -func (eu *EventUpdate) SetTime(t time.Time) *EventUpdate { - eu.mutation.SetTime(t) - return eu -} - -// SetNillableTime sets the "time" field if the given value is not nil. -func (eu *EventUpdate) SetNillableTime(t *time.Time) *EventUpdate { - if t != nil { - eu.SetTime(*t) - } - return eu -} - -// SetSerialized sets the "serialized" field. -func (eu *EventUpdate) SetSerialized(s string) *EventUpdate { - eu.mutation.SetSerialized(s) - return eu -} - -// SetNillableSerialized sets the "serialized" field if the given value is not nil. -func (eu *EventUpdate) SetNillableSerialized(s *string) *EventUpdate { - if s != nil { - eu.SetSerialized(*s) - } - return eu -} - // SetAlertEvents sets the "alert_events" field. func (eu *EventUpdate) SetAlertEvents(i int) *EventUpdate { eu.mutation.SetAlertEvents(i) @@ -149,20 +121,7 @@ func (eu *EventUpdate) defaults() { } } -// check runs all checks and user-defined validators on the builder. -func (eu *EventUpdate) check() error { - if v, ok := eu.mutation.Serialized(); ok { - if err := event.SerializedValidator(v); err != nil { - return &ValidationError{Name: "serialized", err: fmt.Errorf(`ent: validator failed for field "Event.serialized": %w`, err)} - } - } - return nil -} - func (eu *EventUpdate) sqlSave(ctx context.Context) (n int, err error) { - if err := eu.check(); err != nil { - return n, err - } _spec := sqlgraph.NewUpdateSpec(event.Table, event.Columns, sqlgraph.NewFieldSpec(event.FieldID, field.TypeInt)) if ps := eu.mutation.predicates; len(ps) > 0 { _spec.Predicate = func(selector *sql.Selector) { @@ -174,12 +133,6 @@ func (eu *EventUpdate) sqlSave(ctx context.Context) (n int, err error) { if value, ok := eu.mutation.UpdatedAt(); ok { _spec.SetField(event.FieldUpdatedAt, field.TypeTime, value) } - if value, ok := eu.mutation.Time(); ok { - _spec.SetField(event.FieldTime, field.TypeTime, value) - } - if value, ok := eu.mutation.Serialized(); ok { - _spec.SetField(event.FieldSerialized, field.TypeString, value) - } if eu.mutation.OwnerCleared() { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.M2O, @@ -235,34 +188,6 @@ func (euo *EventUpdateOne) SetUpdatedAt(t time.Time) *EventUpdateOne { return euo } -// SetTime sets the "time" field. -func (euo *EventUpdateOne) SetTime(t time.Time) *EventUpdateOne { - euo.mutation.SetTime(t) - return euo -} - -// SetNillableTime sets the "time" field if the given value is not nil. -func (euo *EventUpdateOne) SetNillableTime(t *time.Time) *EventUpdateOne { - if t != nil { - euo.SetTime(*t) - } - return euo -} - -// SetSerialized sets the "serialized" field. -func (euo *EventUpdateOne) SetSerialized(s string) *EventUpdateOne { - euo.mutation.SetSerialized(s) - return euo -} - -// SetNillableSerialized sets the "serialized" field if the given value is not nil. -func (euo *EventUpdateOne) SetNillableSerialized(s *string) *EventUpdateOne { - if s != nil { - euo.SetSerialized(*s) - } - return euo -} - // SetAlertEvents sets the "alert_events" field. func (euo *EventUpdateOne) SetAlertEvents(i int) *EventUpdateOne { euo.mutation.SetAlertEvents(i) @@ -362,20 +287,7 @@ func (euo *EventUpdateOne) defaults() { } } -// check runs all checks and user-defined validators on the builder. -func (euo *EventUpdateOne) check() error { - if v, ok := euo.mutation.Serialized(); ok { - if err := event.SerializedValidator(v); err != nil { - return &ValidationError{Name: "serialized", err: fmt.Errorf(`ent: validator failed for field "Event.serialized": %w`, err)} - } - } - return nil -} - func (euo *EventUpdateOne) sqlSave(ctx context.Context) (_node *Event, err error) { - if err := euo.check(); err != nil { - return _node, err - } _spec := sqlgraph.NewUpdateSpec(event.Table, event.Columns, sqlgraph.NewFieldSpec(event.FieldID, field.TypeInt)) id, ok := euo.mutation.ID() if !ok { @@ -404,12 +316,6 @@ func (euo *EventUpdateOne) sqlSave(ctx context.Context) (_node *Event, err error if value, ok := euo.mutation.UpdatedAt(); ok { _spec.SetField(event.FieldUpdatedAt, field.TypeTime, value) } - if value, ok := euo.mutation.Time(); ok { - _spec.SetField(event.FieldTime, field.TypeTime, value) - } - if value, ok := euo.mutation.Serialized(); ok { - _spec.SetField(event.FieldSerialized, field.TypeString, value) - } if euo.mutation.OwnerCleared() { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.M2O, diff --git a/pkg/database/ent/lock_update.go b/pkg/database/ent/lock_update.go index 988363abd17..934e68c0762 100644 --- a/pkg/database/ent/lock_update.go +++ b/pkg/database/ent/lock_update.go @@ -6,7 +6,6 @@ import ( "context" "errors" "fmt" - "time" "entgo.io/ent/dialect/sql" "entgo.io/ent/dialect/sql/sqlgraph" @@ -28,20 +27,6 @@ func (lu *LockUpdate) Where(ps ...predicate.Lock) *LockUpdate { return lu } -// SetCreatedAt sets the "created_at" field. -func (lu *LockUpdate) SetCreatedAt(t time.Time) *LockUpdate { - lu.mutation.SetCreatedAt(t) - return lu -} - -// SetNillableCreatedAt sets the "created_at" field if the given value is not nil. -func (lu *LockUpdate) SetNillableCreatedAt(t *time.Time) *LockUpdate { - if t != nil { - lu.SetCreatedAt(*t) - } - return lu -} - // Mutation returns the LockMutation object of the builder. func (lu *LockUpdate) Mutation() *LockMutation { return lu.mutation @@ -83,9 +68,6 @@ func (lu *LockUpdate) sqlSave(ctx context.Context) (n int, err error) { } } } - if value, ok := lu.mutation.CreatedAt(); ok { - _spec.SetField(lock.FieldCreatedAt, field.TypeTime, value) - } if n, err = sqlgraph.UpdateNodes(ctx, lu.driver, _spec); err != nil { if _, ok := err.(*sqlgraph.NotFoundError); ok { err = &NotFoundError{lock.Label} @@ -106,20 +88,6 @@ type LockUpdateOne struct { mutation *LockMutation } -// SetCreatedAt sets the "created_at" field. -func (luo *LockUpdateOne) SetCreatedAt(t time.Time) *LockUpdateOne { - luo.mutation.SetCreatedAt(t) - return luo -} - -// SetNillableCreatedAt sets the "created_at" field if the given value is not nil. -func (luo *LockUpdateOne) SetNillableCreatedAt(t *time.Time) *LockUpdateOne { - if t != nil { - luo.SetCreatedAt(*t) - } - return luo -} - // Mutation returns the LockMutation object of the builder. func (luo *LockUpdateOne) Mutation() *LockMutation { return luo.mutation @@ -191,9 +159,6 @@ func (luo *LockUpdateOne) sqlSave(ctx context.Context) (_node *Lock, err error) } } } - if value, ok := luo.mutation.CreatedAt(); ok { - _spec.SetField(lock.FieldCreatedAt, field.TypeTime, value) - } _node = &Lock{config: luo.config} _spec.Assign = _node.assignValues _spec.ScanValues = _node.scanValues diff --git a/pkg/database/ent/meta_update.go b/pkg/database/ent/meta_update.go index 76567c5eff7..bdf622eb6c3 100644 --- a/pkg/database/ent/meta_update.go +++ b/pkg/database/ent/meta_update.go @@ -29,54 +29,12 @@ func (mu *MetaUpdate) Where(ps ...predicate.Meta) *MetaUpdate { return mu } -// SetCreatedAt sets the "created_at" field. -func (mu *MetaUpdate) SetCreatedAt(t time.Time) *MetaUpdate { - mu.mutation.SetCreatedAt(t) - return mu -} - -// SetNillableCreatedAt sets the "created_at" field if the given value is not nil. -func (mu *MetaUpdate) SetNillableCreatedAt(t *time.Time) *MetaUpdate { - if t != nil { - mu.SetCreatedAt(*t) - } - return mu -} - // SetUpdatedAt sets the "updated_at" field. func (mu *MetaUpdate) SetUpdatedAt(t time.Time) *MetaUpdate { mu.mutation.SetUpdatedAt(t) return mu } -// SetKey sets the "key" field. -func (mu *MetaUpdate) SetKey(s string) *MetaUpdate { - mu.mutation.SetKey(s) - return mu -} - -// SetNillableKey sets the "key" field if the given value is not nil. -func (mu *MetaUpdate) SetNillableKey(s *string) *MetaUpdate { - if s != nil { - mu.SetKey(*s) - } - return mu -} - -// SetValue sets the "value" field. -func (mu *MetaUpdate) SetValue(s string) *MetaUpdate { - mu.mutation.SetValue(s) - return mu -} - -// SetNillableValue sets the "value" field if the given value is not nil. -func (mu *MetaUpdate) SetNillableValue(s *string) *MetaUpdate { - if s != nil { - mu.SetValue(*s) - } - return mu -} - // SetAlertMetas sets the "alert_metas" field. func (mu *MetaUpdate) SetAlertMetas(i int) *MetaUpdate { mu.mutation.SetAlertMetas(i) @@ -163,20 +121,7 @@ func (mu *MetaUpdate) defaults() { } } -// check runs all checks and user-defined validators on the builder. -func (mu *MetaUpdate) check() error { - if v, ok := mu.mutation.Value(); ok { - if err := meta.ValueValidator(v); err != nil { - return &ValidationError{Name: "value", err: fmt.Errorf(`ent: validator failed for field "Meta.value": %w`, err)} - } - } - return nil -} - func (mu *MetaUpdate) sqlSave(ctx context.Context) (n int, err error) { - if err := mu.check(); err != nil { - return n, err - } _spec := sqlgraph.NewUpdateSpec(meta.Table, meta.Columns, sqlgraph.NewFieldSpec(meta.FieldID, field.TypeInt)) if ps := mu.mutation.predicates; len(ps) > 0 { _spec.Predicate = func(selector *sql.Selector) { @@ -185,18 +130,9 @@ func (mu *MetaUpdate) sqlSave(ctx context.Context) (n int, err error) { } } } - if value, ok := mu.mutation.CreatedAt(); ok { - _spec.SetField(meta.FieldCreatedAt, field.TypeTime, value) - } if value, ok := mu.mutation.UpdatedAt(); ok { _spec.SetField(meta.FieldUpdatedAt, field.TypeTime, value) } - if value, ok := mu.mutation.Key(); ok { - _spec.SetField(meta.FieldKey, field.TypeString, value) - } - if value, ok := mu.mutation.Value(); ok { - _spec.SetField(meta.FieldValue, field.TypeString, value) - } if mu.mutation.OwnerCleared() { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.M2O, @@ -246,54 +182,12 @@ type MetaUpdateOne struct { mutation *MetaMutation } -// SetCreatedAt sets the "created_at" field. -func (muo *MetaUpdateOne) SetCreatedAt(t time.Time) *MetaUpdateOne { - muo.mutation.SetCreatedAt(t) - return muo -} - -// SetNillableCreatedAt sets the "created_at" field if the given value is not nil. -func (muo *MetaUpdateOne) SetNillableCreatedAt(t *time.Time) *MetaUpdateOne { - if t != nil { - muo.SetCreatedAt(*t) - } - return muo -} - // SetUpdatedAt sets the "updated_at" field. func (muo *MetaUpdateOne) SetUpdatedAt(t time.Time) *MetaUpdateOne { muo.mutation.SetUpdatedAt(t) return muo } -// SetKey sets the "key" field. -func (muo *MetaUpdateOne) SetKey(s string) *MetaUpdateOne { - muo.mutation.SetKey(s) - return muo -} - -// SetNillableKey sets the "key" field if the given value is not nil. -func (muo *MetaUpdateOne) SetNillableKey(s *string) *MetaUpdateOne { - if s != nil { - muo.SetKey(*s) - } - return muo -} - -// SetValue sets the "value" field. -func (muo *MetaUpdateOne) SetValue(s string) *MetaUpdateOne { - muo.mutation.SetValue(s) - return muo -} - -// SetNillableValue sets the "value" field if the given value is not nil. -func (muo *MetaUpdateOne) SetNillableValue(s *string) *MetaUpdateOne { - if s != nil { - muo.SetValue(*s) - } - return muo -} - // SetAlertMetas sets the "alert_metas" field. func (muo *MetaUpdateOne) SetAlertMetas(i int) *MetaUpdateOne { muo.mutation.SetAlertMetas(i) @@ -393,20 +287,7 @@ func (muo *MetaUpdateOne) defaults() { } } -// check runs all checks and user-defined validators on the builder. -func (muo *MetaUpdateOne) check() error { - if v, ok := muo.mutation.Value(); ok { - if err := meta.ValueValidator(v); err != nil { - return &ValidationError{Name: "value", err: fmt.Errorf(`ent: validator failed for field "Meta.value": %w`, err)} - } - } - return nil -} - func (muo *MetaUpdateOne) sqlSave(ctx context.Context) (_node *Meta, err error) { - if err := muo.check(); err != nil { - return _node, err - } _spec := sqlgraph.NewUpdateSpec(meta.Table, meta.Columns, sqlgraph.NewFieldSpec(meta.FieldID, field.TypeInt)) id, ok := muo.mutation.ID() if !ok { @@ -432,18 +313,9 @@ func (muo *MetaUpdateOne) sqlSave(ctx context.Context) (_node *Meta, err error) } } } - if value, ok := muo.mutation.CreatedAt(); ok { - _spec.SetField(meta.FieldCreatedAt, field.TypeTime, value) - } if value, ok := muo.mutation.UpdatedAt(); ok { _spec.SetField(meta.FieldUpdatedAt, field.TypeTime, value) } - if value, ok := muo.mutation.Key(); ok { - _spec.SetField(meta.FieldKey, field.TypeString, value) - } - if value, ok := muo.mutation.Value(); ok { - _spec.SetField(meta.FieldValue, field.TypeString, value) - } if muo.mutation.OwnerCleared() { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.M2O, diff --git a/pkg/database/ent/migrate/schema.go b/pkg/database/ent/migrate/schema.go index e5d43d42314..b0e7f990f6e 100644 --- a/pkg/database/ent/migrate/schema.go +++ b/pkg/database/ent/migrate/schema.go @@ -68,7 +68,6 @@ var ( {Name: "ip_address", Type: field.TypeString, Nullable: true, Default: ""}, {Name: "type", Type: field.TypeString, Nullable: true}, {Name: "version", Type: field.TypeString, Nullable: true}, - {Name: "until", Type: field.TypeTime, Nullable: true}, {Name: "last_pull", Type: field.TypeTime}, {Name: "auth_type", Type: field.TypeString, Default: "api-key"}, } diff --git a/pkg/database/ent/mutation.go b/pkg/database/ent/mutation.go index aed004fb7a6..b88154324bb 100644 --- a/pkg/database/ent/mutation.go +++ b/pkg/database/ent/mutation.go @@ -2390,7 +2390,6 @@ type BouncerMutation struct { ip_address *string _type *string version *string - until *time.Time last_pull *time.Time auth_type *string clearedFields map[string]struct{} @@ -2824,55 +2823,6 @@ func (m *BouncerMutation) ResetVersion() { delete(m.clearedFields, bouncer.FieldVersion) } -// SetUntil sets the "until" field. -func (m *BouncerMutation) SetUntil(t time.Time) { - m.until = &t -} - -// Until returns the value of the "until" field in the mutation. -func (m *BouncerMutation) Until() (r time.Time, exists bool) { - v := m.until - if v == nil { - return - } - return *v, true -} - -// OldUntil returns the old "until" field's value of the Bouncer entity. -// If the Bouncer object wasn't provided to the builder, the object is fetched from the database. -// An error is returned if the mutation operation is not UpdateOne, or the database query fails. -func (m *BouncerMutation) OldUntil(ctx context.Context) (v time.Time, err error) { - if !m.op.Is(OpUpdateOne) { - return v, errors.New("OldUntil is only allowed on UpdateOne operations") - } - if m.id == nil || m.oldValue == nil { - return v, errors.New("OldUntil requires an ID field in the mutation") - } - oldValue, err := m.oldValue(ctx) - if err != nil { - return v, fmt.Errorf("querying old value for OldUntil: %w", err) - } - return oldValue.Until, nil -} - -// ClearUntil clears the value of the "until" field. -func (m *BouncerMutation) ClearUntil() { - m.until = nil - m.clearedFields[bouncer.FieldUntil] = struct{}{} -} - -// UntilCleared returns if the "until" field was cleared in this mutation. -func (m *BouncerMutation) UntilCleared() bool { - _, ok := m.clearedFields[bouncer.FieldUntil] - return ok -} - -// ResetUntil resets all changes to the "until" field. -func (m *BouncerMutation) ResetUntil() { - m.until = nil - delete(m.clearedFields, bouncer.FieldUntil) -} - // SetLastPull sets the "last_pull" field. func (m *BouncerMutation) SetLastPull(t time.Time) { m.last_pull = &t @@ -2979,7 +2929,7 @@ func (m *BouncerMutation) Type() string { // order to get all numeric fields that were incremented/decremented, call // AddedFields(). func (m *BouncerMutation) Fields() []string { - fields := make([]string, 0, 11) + fields := make([]string, 0, 10) if m.created_at != nil { fields = append(fields, bouncer.FieldCreatedAt) } @@ -3004,9 +2954,6 @@ func (m *BouncerMutation) Fields() []string { if m.version != nil { fields = append(fields, bouncer.FieldVersion) } - if m.until != nil { - fields = append(fields, bouncer.FieldUntil) - } if m.last_pull != nil { fields = append(fields, bouncer.FieldLastPull) } @@ -3037,8 +2984,6 @@ func (m *BouncerMutation) Field(name string) (ent.Value, bool) { return m.GetType() case bouncer.FieldVersion: return m.Version() - case bouncer.FieldUntil: - return m.Until() case bouncer.FieldLastPull: return m.LastPull() case bouncer.FieldAuthType: @@ -3068,8 +3013,6 @@ func (m *BouncerMutation) OldField(ctx context.Context, name string) (ent.Value, return m.OldType(ctx) case bouncer.FieldVersion: return m.OldVersion(ctx) - case bouncer.FieldUntil: - return m.OldUntil(ctx) case bouncer.FieldLastPull: return m.OldLastPull(ctx) case bouncer.FieldAuthType: @@ -3139,13 +3082,6 @@ func (m *BouncerMutation) SetField(name string, value ent.Value) error { } m.SetVersion(v) return nil - case bouncer.FieldUntil: - v, ok := value.(time.Time) - if !ok { - return fmt.Errorf("unexpected type %T for field %s", value, name) - } - m.SetUntil(v) - return nil case bouncer.FieldLastPull: v, ok := value.(time.Time) if !ok { @@ -3199,9 +3135,6 @@ func (m *BouncerMutation) ClearedFields() []string { if m.FieldCleared(bouncer.FieldVersion) { fields = append(fields, bouncer.FieldVersion) } - if m.FieldCleared(bouncer.FieldUntil) { - fields = append(fields, bouncer.FieldUntil) - } return fields } @@ -3225,9 +3158,6 @@ func (m *BouncerMutation) ClearField(name string) error { case bouncer.FieldVersion: m.ClearVersion() return nil - case bouncer.FieldUntil: - m.ClearUntil() - return nil } return fmt.Errorf("unknown Bouncer nullable field %s", name) } @@ -3260,9 +3190,6 @@ func (m *BouncerMutation) ResetField(name string) error { case bouncer.FieldVersion: m.ResetVersion() return nil - case bouncer.FieldUntil: - m.ResetUntil() - return nil case bouncer.FieldLastPull: m.ResetLastPull() return nil diff --git a/pkg/database/ent/runtime.go b/pkg/database/ent/runtime.go index 1c5b3460cce..c593cd89fcb 100644 --- a/pkg/database/ent/runtime.go +++ b/pkg/database/ent/runtime.go @@ -72,16 +72,12 @@ func init() { bouncerDescIPAddress := bouncerFields[5].Descriptor() // bouncer.DefaultIPAddress holds the default value on creation for the ip_address field. bouncer.DefaultIPAddress = bouncerDescIPAddress.Default.(string) - // bouncerDescUntil is the schema descriptor for until field. - bouncerDescUntil := bouncerFields[8].Descriptor() - // bouncer.DefaultUntil holds the default value on creation for the until field. - bouncer.DefaultUntil = bouncerDescUntil.Default.(func() time.Time) // bouncerDescLastPull is the schema descriptor for last_pull field. - bouncerDescLastPull := bouncerFields[9].Descriptor() + bouncerDescLastPull := bouncerFields[8].Descriptor() // bouncer.DefaultLastPull holds the default value on creation for the last_pull field. bouncer.DefaultLastPull = bouncerDescLastPull.Default.(func() time.Time) // bouncerDescAuthType is the schema descriptor for auth_type field. - bouncerDescAuthType := bouncerFields[10].Descriptor() + bouncerDescAuthType := bouncerFields[9].Descriptor() // bouncer.DefaultAuthType holds the default value on creation for the auth_type field. bouncer.DefaultAuthType = bouncerDescAuthType.Default.(string) configitemFields := schema.ConfigItem{}.Fields() diff --git a/pkg/database/ent/schema/alert.go b/pkg/database/ent/schema/alert.go index bda7cc7d0b9..343979e3db7 100644 --- a/pkg/database/ent/schema/alert.go +++ b/pkg/database/ent/schema/alert.go @@ -6,6 +6,7 @@ import ( "entgo.io/ent/schema/edge" "entgo.io/ent/schema/field" "entgo.io/ent/schema/index" + "github.com/crowdsecurity/crowdsec/pkg/types" ) @@ -23,34 +24,34 @@ func (Alert) Fields() []ent.Field { field.Time("updated_at"). Default(types.UtcNow). UpdateDefault(types.UtcNow), - field.String("scenario"), - field.String("bucketId").Default("").Optional(), - field.String("message").Default("").Optional(), - field.Int32("eventsCount").Default(0).Optional(), - field.Time("startedAt").Default(types.UtcNow).Optional(), - field.Time("stoppedAt").Default(types.UtcNow).Optional(), + field.String("scenario").Immutable(), + field.String("bucketId").Default("").Optional().Immutable(), + field.String("message").Default("").Optional().Immutable(), + field.Int32("eventsCount").Default(0).Optional().Immutable(), + field.Time("startedAt").Default(types.UtcNow).Optional().Immutable(), + field.Time("stoppedAt").Default(types.UtcNow).Optional().Immutable(), field.String("sourceIp"). - Optional(), + Optional().Immutable(), field.String("sourceRange"). - Optional(), + Optional().Immutable(), field.String("sourceAsNumber"). - Optional(), + Optional().Immutable(), field.String("sourceAsName"). - Optional(), + Optional().Immutable(), field.String("sourceCountry"). - Optional(), + Optional().Immutable(), field.Float32("sourceLatitude"). - Optional(), + Optional().Immutable(), field.Float32("sourceLongitude"). - Optional(), - field.String("sourceScope").Optional(), - field.String("sourceValue").Optional(), - field.Int32("capacity").Optional(), - field.String("leakSpeed").Optional(), - field.String("scenarioVersion").Optional(), - field.String("scenarioHash").Optional(), - field.Bool("simulated").Default(false), - field.String("uuid").Optional(), //this uuid is mostly here to ensure that CAPI/PAPI has a unique id for each alert + Optional().Immutable(), + field.String("sourceScope").Optional().Immutable(), + field.String("sourceValue").Optional().Immutable(), + field.Int32("capacity").Optional().Immutable(), + field.String("leakSpeed").Optional().Immutable(), + field.String("scenarioVersion").Optional().Immutable(), + field.String("scenarioHash").Optional().Immutable(), + field.Bool("simulated").Default(false).Immutable(), + field.String("uuid").Optional().Immutable(), // this uuid is mostly here to ensure that CAPI/PAPI has a unique id for each alert } } diff --git a/pkg/database/ent/schema/bouncer.go b/pkg/database/ent/schema/bouncer.go index 18efecb3f03..acaa86008f5 100644 --- a/pkg/database/ent/schema/bouncer.go +++ b/pkg/database/ent/schema/bouncer.go @@ -3,6 +3,7 @@ package schema import ( "entgo.io/ent" "entgo.io/ent/schema/field" + "github.com/crowdsecurity/crowdsec/pkg/types" ) @@ -16,17 +17,17 @@ func (Bouncer) Fields() []ent.Field { return []ent.Field{ field.Time("created_at"). Default(types.UtcNow). - StructTag(`json:"created_at"`), + StructTag(`json:"created_at"`). + Immutable(), field.Time("updated_at"). Default(types.UtcNow). UpdateDefault(types.UtcNow).StructTag(`json:"updated_at"`), - field.String("name").Unique().StructTag(`json:"name"`), + field.String("name").Unique().StructTag(`json:"name"`).Immutable(), field.String("api_key").Sensitive(), // hash of api_key field.Bool("revoked").StructTag(`json:"revoked"`), field.String("ip_address").Default("").Optional().StructTag(`json:"ip_address"`), field.String("type").Optional().StructTag(`json:"type"`), field.String("version").Optional().StructTag(`json:"version"`), - field.Time("until").Default(types.UtcNow).Optional().StructTag(`json:"until"`), field.Time("last_pull"). Default(types.UtcNow).StructTag(`json:"last_pull"`), field.String("auth_type").StructTag(`json:"auth_type"`).Default(types.ApiKeyAuthType), diff --git a/pkg/database/ent/schema/config.go b/pkg/database/ent/schema/config.go index 036c55908ba..d526db25a8d 100644 --- a/pkg/database/ent/schema/config.go +++ b/pkg/database/ent/schema/config.go @@ -3,6 +3,7 @@ package schema import ( "entgo.io/ent" "entgo.io/ent/schema/field" + "github.com/crowdsecurity/crowdsec/pkg/types" ) @@ -20,7 +21,7 @@ func (ConfigItem) Fields() []ent.Field { field.Time("updated_at"). Default(types.UtcNow). UpdateDefault(types.UtcNow).StructTag(`json:"updated_at"`), - field.String("name").Unique().StructTag(`json:"name"`), + field.String("name").Unique().StructTag(`json:"name"`).Immutable(), field.String("value").StructTag(`json:"value"`), // a json object } } diff --git a/pkg/database/ent/schema/decision.go b/pkg/database/ent/schema/decision.go index d5193910146..4089be38096 100644 --- a/pkg/database/ent/schema/decision.go +++ b/pkg/database/ent/schema/decision.go @@ -6,6 +6,7 @@ import ( "entgo.io/ent/schema/edge" "entgo.io/ent/schema/field" "entgo.io/ent/schema/index" + "github.com/crowdsecurity/crowdsec/pkg/types" ) @@ -26,18 +27,18 @@ func (Decision) Fields() []ent.Field { field.Time("until").Nillable().Optional().SchemaType(map[string]string{ dialect.MySQL: "datetime", }), - field.String("scenario"), - field.String("type"), - field.Int64("start_ip").Optional(), - field.Int64("end_ip").Optional(), - field.Int64("start_suffix").Optional(), - field.Int64("end_suffix").Optional(), - field.Int64("ip_size").Optional(), - field.String("scope"), - field.String("value"), - field.String("origin"), - field.Bool("simulated").Default(false), - field.String("uuid").Optional(), //this uuid is mostly here to ensure that CAPI/PAPI has a unique id for each decision + field.String("scenario").Immutable(), + field.String("type").Immutable(), + field.Int64("start_ip").Optional().Immutable(), + field.Int64("end_ip").Optional().Immutable(), + field.Int64("start_suffix").Optional().Immutable(), + field.Int64("end_suffix").Optional().Immutable(), + field.Int64("ip_size").Optional().Immutable(), + field.String("scope").Immutable(), + field.String("value").Immutable(), + field.String("origin").Immutable(), + field.Bool("simulated").Default(false).Immutable(), + field.String("uuid").Optional().Immutable(), // this uuid is mostly here to ensure that CAPI/PAPI has a unique id for each decision field.Int("alert_decisions").Optional(), } } diff --git a/pkg/database/ent/schema/event.go b/pkg/database/ent/schema/event.go index f982ebe9653..107f68e5274 100644 --- a/pkg/database/ent/schema/event.go +++ b/pkg/database/ent/schema/event.go @@ -5,6 +5,7 @@ import ( "entgo.io/ent/schema/edge" "entgo.io/ent/schema/field" "entgo.io/ent/schema/index" + "github.com/crowdsecurity/crowdsec/pkg/types" ) @@ -22,8 +23,8 @@ func (Event) Fields() []ent.Field { field.Time("updated_at"). Default(types.UtcNow). UpdateDefault(types.UtcNow), - field.Time("time"), - field.String("serialized").MaxLen(8191), + field.Time("time").Immutable(), + field.String("serialized").MaxLen(8191).Immutable(), field.Int("alert_events").Optional(), } } diff --git a/pkg/database/ent/schema/lock.go b/pkg/database/ent/schema/lock.go index 0d49bac1bf6..a287e2b59ad 100644 --- a/pkg/database/ent/schema/lock.go +++ b/pkg/database/ent/schema/lock.go @@ -3,6 +3,7 @@ package schema import ( "entgo.io/ent" "entgo.io/ent/schema/field" + "github.com/crowdsecurity/crowdsec/pkg/types" ) @@ -13,7 +14,7 @@ type Lock struct { func (Lock) Fields() []ent.Field { return []ent.Field{ field.String("name").Unique().Immutable().StructTag(`json:"name"`), - field.Time("created_at").Default(types.UtcNow).StructTag(`json:"created_at"`), + field.Time("created_at").Default(types.UtcNow).StructTag(`json:"created_at"`).Immutable(), } } diff --git a/pkg/database/ent/schema/meta.go b/pkg/database/ent/schema/meta.go index 877fffa8a2e..a87010cd8a3 100644 --- a/pkg/database/ent/schema/meta.go +++ b/pkg/database/ent/schema/meta.go @@ -5,6 +5,7 @@ import ( "entgo.io/ent/schema/edge" "entgo.io/ent/schema/field" "entgo.io/ent/schema/index" + "github.com/crowdsecurity/crowdsec/pkg/types" ) @@ -17,12 +18,12 @@ type Meta struct { func (Meta) Fields() []ent.Field { return []ent.Field{ field.Time("created_at"). - Default(types.UtcNow), + Default(types.UtcNow).Immutable(), field.Time("updated_at"). Default(types.UtcNow). UpdateDefault(types.UtcNow), - field.String("key"), - field.String("value").MaxLen(4095), + field.String("key").Immutable(), + field.String("value").MaxLen(4095).Immutable(), field.Int("alert_metas").Optional(), } } From b8ee31ade232f85388880702a24c5edbde3ced63 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Fri, 31 May 2024 15:44:30 +0200 Subject: [PATCH 149/581] pkg/database: simplify flush agents/bouncers (#3026) * pkg/database: simplify flush agents/bouncers * lint --- pkg/database/flush.go | 153 +++++++++++++++++++++--------------------- 1 file changed, 75 insertions(+), 78 deletions(-) diff --git a/pkg/database/flush.go b/pkg/database/flush.go index ad4a912de84..56e42715b2c 100644 --- a/pkg/database/flush.go +++ b/pkg/database/flush.go @@ -17,22 +17,25 @@ import ( "github.com/crowdsecurity/crowdsec/pkg/types" ) - func (c *Client) StartFlushScheduler(config *csconfig.FlushDBCfg) (*gocron.Scheduler, error) { maxItems := 0 maxAge := "" + if config.MaxItems != nil && *config.MaxItems <= 0 { return nil, errors.New("max_items can't be zero or negative number") } + if config.MaxItems != nil { maxItems = *config.MaxItems } + if config.MaxAge != nil && *config.MaxAge != "" { maxAge = *config.MaxAge } // Init & Start cronjob every minute for alerts scheduler := gocron.NewScheduler(time.UTC) + job, err := scheduler.Every(1).Minute().Do(c.FlushAlerts, maxAge, maxItems) if err != nil { return nil, fmt.Errorf("while starting FlushAlerts scheduler: %w", err) @@ -46,38 +49,48 @@ func (c *Client) StartFlushScheduler(config *csconfig.FlushDBCfg) (*gocron.Sched if err != nil { return nil, fmt.Errorf("while parsing agents cert auto-delete duration: %w", err) } + config.AgentsGC.CertDuration = &duration } + if config.AgentsGC.LoginPassword != nil { duration, err := ParseDuration(*config.AgentsGC.LoginPassword) if err != nil { return nil, fmt.Errorf("while parsing agents login/password auto-delete duration: %w", err) } + config.AgentsGC.LoginPasswordDuration = &duration } + if config.AgentsGC.Api != nil { log.Warning("agents auto-delete for API auth is not supported (use cert or login_password)") } } + if config.BouncersGC != nil { if config.BouncersGC.Cert != nil { duration, err := ParseDuration(*config.BouncersGC.Cert) if err != nil { return nil, fmt.Errorf("while parsing bouncers cert auto-delete duration: %w", err) } + config.BouncersGC.CertDuration = &duration } + if config.BouncersGC.Api != nil { duration, err := ParseDuration(*config.BouncersGC.Api) if err != nil { return nil, fmt.Errorf("while parsing bouncers api auto-delete duration: %w", err) } + config.BouncersGC.ApiDuration = &duration } + if config.BouncersGC.LoginPassword != nil { log.Warning("bouncers auto-delete for login/password auth is not supported (use cert or api)") } } + baJob, err := scheduler.Every(1).Minute().Do(c.FlushAgentsAndBouncers, config.AgentsGC, config.BouncersGC) if err != nil { return nil, fmt.Errorf("while starting FlushAgentsAndBouncers scheduler: %w", err) @@ -89,7 +102,6 @@ func (c *Client) StartFlushScheduler(config *csconfig.FlushDBCfg) (*gocron.Sched return scheduler, nil } - func (c *Client) FlushOrphans() { /* While it has only been linked to some very corner-case bug : https://github.com/crowdsecurity/crowdsec/issues/778 */ /* We want to take care of orphaned events for which the parent alert/decision has been deleted */ @@ -98,6 +110,7 @@ func (c *Client) FlushOrphans() { c.Log.Warningf("error while deleting orphan events: %s", err) return } + if eventsCount > 0 { c.Log.Infof("%d deleted orphan events", eventsCount) } @@ -109,103 +122,77 @@ func (c *Client) FlushOrphans() { c.Log.Warningf("error while deleting orphan decisions: %s", err) return } + if eventsCount > 0 { c.Log.Infof("%d deleted orphan decisions", eventsCount) } } -func (c *Client) flushBouncers(bouncersCfg *csconfig.AuthGCCfg) { - if bouncersCfg == nil { +func (c *Client) flushBouncers(authType string, duration *time.Duration) { + if duration == nil { return } - if bouncersCfg.ApiDuration != nil { - log.Debug("trying to delete old bouncers from api") + count, err := c.Ent.Bouncer.Delete().Where( + bouncer.LastPullLTE(time.Now().UTC().Add(-*duration)), + ).Where( + bouncer.AuthTypeEQ(authType), + ).Exec(c.CTX) - deletionCount, err := c.Ent.Bouncer.Delete().Where( - bouncer.LastPullLTE(time.Now().UTC().Add(-*bouncersCfg.ApiDuration)), - ).Where( - bouncer.AuthTypeEQ(types.ApiKeyAuthType), - ).Exec(c.CTX) - if err != nil { - c.Log.Errorf("while auto-deleting expired bouncers (api key): %s", err) - } else if deletionCount > 0 { - c.Log.Infof("deleted %d expired bouncers (api auth)", deletionCount) - } + if err != nil { + c.Log.Errorf("while auto-deleting expired bouncers (%s): %s", authType, err) + return } - if bouncersCfg.CertDuration != nil { - log.Debug("trying to delete old bouncers from cert") - - deletionCount, err := c.Ent.Bouncer.Delete().Where( - bouncer.LastPullLTE(time.Now().UTC().Add(-*bouncersCfg.CertDuration)), - ).Where( - bouncer.AuthTypeEQ(types.TlsAuthType), - ).Exec(c.CTX) - if err != nil { - c.Log.Errorf("while auto-deleting expired bouncers (api key): %s", err) - } else if deletionCount > 0 { - c.Log.Infof("deleted %d expired bouncers (api auth)", deletionCount) - } + if count > 0 { + c.Log.Infof("deleted %d expired bouncers (%s)", count, authType) } } -func (c *Client) flushAgents(agentsCfg *csconfig.AuthGCCfg) { - if agentsCfg == nil { +func (c *Client) flushAgents(authType string, duration *time.Duration) { + if duration == nil { return } - if agentsCfg.CertDuration != nil { - log.Debug("trying to delete old agents from cert") - - deletionCount, err := c.Ent.Machine.Delete().Where( - machine.LastHeartbeatLTE(time.Now().UTC().Add(-*agentsCfg.CertDuration)), - ).Where( - machine.Not(machine.HasAlerts()), - ).Where( - machine.AuthTypeEQ(types.TlsAuthType), - ).Exec(c.CTX) - log.Debugf("deleted %d entries", deletionCount) - if err != nil { - c.Log.Errorf("while auto-deleting expired machine (cert): %s", err) - } else if deletionCount > 0 { - c.Log.Infof("deleted %d expired machine (cert auth)", deletionCount) - } + count, err := c.Ent.Machine.Delete().Where( + machine.LastHeartbeatLTE(time.Now().UTC().Add(-*duration)), + machine.Not(machine.HasAlerts()), + machine.AuthTypeEQ(authType), + ).Exec(c.CTX) + + if err != nil { + c.Log.Errorf("while auto-deleting expired machines (%s): %s", authType, err) + return } - if agentsCfg.LoginPasswordDuration != nil { - log.Debug("trying to delete old agents from password") - - deletionCount, err := c.Ent.Machine.Delete().Where( - machine.LastHeartbeatLTE(time.Now().UTC().Add(-*agentsCfg.LoginPasswordDuration)), - ).Where( - machine.Not(machine.HasAlerts()), - ).Where( - machine.AuthTypeEQ(types.PasswordAuthType), - ).Exec(c.CTX) - log.Debugf("deleted %d entries", deletionCount) - if err != nil { - c.Log.Errorf("while auto-deleting expired machine (password): %s", err) - } else if deletionCount > 0 { - c.Log.Infof("deleted %d expired machine (password auth)", deletionCount) - } + if count > 0 { + c.Log.Infof("deleted %d expired machines (%s auth)", count, authType) } } func (c *Client) FlushAgentsAndBouncers(agentsCfg *csconfig.AuthGCCfg, bouncersCfg *csconfig.AuthGCCfg) error { log.Debug("starting FlushAgentsAndBouncers") - c.flushBouncers(bouncersCfg) - c.flushAgents(agentsCfg) + if agentsCfg != nil { + c.flushAgents(types.TlsAuthType, agentsCfg.CertDuration) + c.flushAgents(types.PasswordAuthType, agentsCfg.LoginPasswordDuration) + } + + if bouncersCfg != nil { + c.flushBouncers(types.TlsAuthType, bouncersCfg.CertDuration) + c.flushBouncers(types.ApiKeyAuthType, bouncersCfg.ApiDuration) + } return nil } func (c *Client) FlushAlerts(MaxAge string, MaxItems int) error { - var deletedByAge int - var deletedByNbItem int - var totalAlerts int - var err error + var ( + deletedByAge int + deletedByNbItem int + totalAlerts int + err error + ) if !c.CanFlush { c.Log.Debug("a list is being imported, flushing later") @@ -215,6 +202,7 @@ func (c *Client) FlushAlerts(MaxAge string, MaxItems int) error { c.Log.Debug("Flushing orphan alerts") c.FlushOrphans() c.Log.Debug("Done flushing orphan alerts") + totalAlerts, err = c.TotalAlerts() if err != nil { c.Log.Warningf("FlushAlerts (max items count): %s", err) @@ -222,10 +210,12 @@ func (c *Client) FlushAlerts(MaxAge string, MaxItems int) error { } c.Log.Debugf("FlushAlerts (Total alerts): %d", totalAlerts) + if MaxAge != "" { filter := map[string][]string{ "created_before": {MaxAge}, } + nbDeleted, err := c.DeleteAlertWithFilter(filter) if err != nil { c.Log.Warningf("FlushAlerts (max age): %s", err) @@ -235,19 +225,21 @@ func (c *Client) FlushAlerts(MaxAge string, MaxItems int) error { c.Log.Debugf("FlushAlerts (deleted max age alerts): %d", nbDeleted) deletedByAge = nbDeleted } + if MaxItems > 0 { - //We get the highest id for the alerts - //We subtract MaxItems to avoid deleting alerts that are not old enough - //This gives us the oldest alert that we want to keep - //We then delete all the alerts with an id lower than this one - //We can do this because the id is auto-increment, and the database won't reuse the same id twice + // We get the highest id for the alerts + // We subtract MaxItems to avoid deleting alerts that are not old enough + // This gives us the oldest alert that we want to keep + // We then delete all the alerts with an id lower than this one + // We can do this because the id is auto-increment, and the database won't reuse the same id twice lastAlert, err := c.QueryAlertWithFilter(map[string][]string{ "sort": {"DESC"}, "limit": {"1"}, - //we do not care about fetching the edges, we just want the id + // we do not care about fetching the edges, we just want the id "with_decisions": {"false"}, }) c.Log.Debugf("FlushAlerts (last alert): %+v", lastAlert) + if err != nil { c.Log.Errorf("FlushAlerts: could not get last alert: %s", err) return fmt.Errorf("could not get last alert: %w", err) @@ -259,7 +251,7 @@ func (c *Client) FlushAlerts(MaxAge string, MaxItems int) error { c.Log.Debugf("FlushAlerts (max id): %d", maxid) if maxid > 0 { - //This may lead to orphan alerts (at least on MySQL), but the next time the flush job will run, they will be deleted + // This may lead to orphan alerts (at least on MySQL), but the next time the flush job will run, they will be deleted deletedByNbItem, err = c.Ent.Alert.Delete().Where(alert.IDLT(maxid)).Exec(c.CTX) if err != nil { @@ -269,11 +261,16 @@ func (c *Client) FlushAlerts(MaxAge string, MaxItems int) error { } } } + if deletedByNbItem > 0 { - c.Log.Infof("flushed %d/%d alerts because the max number of alerts has been reached (%d max)", deletedByNbItem, totalAlerts, MaxItems) + c.Log.Infof("flushed %d/%d alerts because the max number of alerts has been reached (%d max)", + deletedByNbItem, totalAlerts, MaxItems) } + if deletedByAge > 0 { - c.Log.Infof("flushed %d/%d alerts because they were created %s ago or more", deletedByAge, totalAlerts, MaxAge) + c.Log.Infof("flushed %d/%d alerts because they were created %s ago or more", + deletedByAge, totalAlerts, MaxAge) } + return nil } From 95bc5880f4ad65c799cc6d85d4ae54bcd9f95d36 Mon Sep 17 00:00:00 2001 From: Manuel Sabban Date: Fri, 31 May 2024 15:56:13 +0200 Subject: [PATCH 150/581] update vagrant image for fc39, fc40 and ubuntu24.04 (#3042) * update test distribution * add skip file to avoid mysql tests * add ubuntu 24.04 --- test/ansible/vagrant/{fedora-33 => fedora-37}/skip | 0 test/ansible/vagrant/{fedora-34 => fedora-38}/skip | 0 .../vagrant/{fedora-33 => fedora-39}/Vagrantfile | 3 +-- test/ansible/vagrant/fedora-39/skip | 9 +++++++++ .../vagrant/{fedora-34 => fedora-40}/Vagrantfile | 3 +-- test/ansible/vagrant/fedora-40/skip | 9 +++++++++ test/ansible/vagrant/ubuntu-24-04-noble/Vagrantfile | 10 ++++++++++ 7 files changed, 30 insertions(+), 4 deletions(-) rename test/ansible/vagrant/{fedora-33 => fedora-37}/skip (100%) mode change 100755 => 100644 rename test/ansible/vagrant/{fedora-34 => fedora-38}/skip (100%) mode change 100755 => 100644 rename test/ansible/vagrant/{fedora-33 => fedora-39}/Vagrantfile (69%) create mode 100644 test/ansible/vagrant/fedora-39/skip rename test/ansible/vagrant/{fedora-34 => fedora-40}/Vagrantfile (69%) create mode 100644 test/ansible/vagrant/fedora-40/skip create mode 100644 test/ansible/vagrant/ubuntu-24-04-noble/Vagrantfile diff --git a/test/ansible/vagrant/fedora-33/skip b/test/ansible/vagrant/fedora-37/skip old mode 100755 new mode 100644 similarity index 100% rename from test/ansible/vagrant/fedora-33/skip rename to test/ansible/vagrant/fedora-37/skip diff --git a/test/ansible/vagrant/fedora-34/skip b/test/ansible/vagrant/fedora-38/skip old mode 100755 new mode 100644 similarity index 100% rename from test/ansible/vagrant/fedora-34/skip rename to test/ansible/vagrant/fedora-38/skip diff --git a/test/ansible/vagrant/fedora-33/Vagrantfile b/test/ansible/vagrant/fedora-39/Vagrantfile similarity index 69% rename from test/ansible/vagrant/fedora-33/Vagrantfile rename to test/ansible/vagrant/fedora-39/Vagrantfile index df6f06944ae..ec03661fe39 100644 --- a/test/ansible/vagrant/fedora-33/Vagrantfile +++ b/test/ansible/vagrant/fedora-39/Vagrantfile @@ -1,8 +1,7 @@ # frozen_string_literal: true Vagrant.configure('2') do |config| - # config.vm.box = "fedora/33-cloud-base" - config.vm.box = 'generic/fedora33' + config.vm.box = "fedora/39-cloud-base" config.vm.provision "shell", inline: <<-SHELL SHELL end diff --git a/test/ansible/vagrant/fedora-39/skip b/test/ansible/vagrant/fedora-39/skip new file mode 100644 index 00000000000..4f1a9063d2b --- /dev/null +++ b/test/ansible/vagrant/fedora-39/skip @@ -0,0 +1,9 @@ +#!/bin/sh + +die() { + echo "$@" >&2 + exit 1 +} + +[ "${DB_BACKEND}" = "mysql" ] && die "mysql role does not support this distribution" +exit 0 diff --git a/test/ansible/vagrant/fedora-34/Vagrantfile b/test/ansible/vagrant/fedora-40/Vagrantfile similarity index 69% rename from test/ansible/vagrant/fedora-34/Vagrantfile rename to test/ansible/vagrant/fedora-40/Vagrantfile index db2db8d0879..ec03661fe39 100644 --- a/test/ansible/vagrant/fedora-34/Vagrantfile +++ b/test/ansible/vagrant/fedora-40/Vagrantfile @@ -1,8 +1,7 @@ # frozen_string_literal: true Vagrant.configure('2') do |config| - # config.vm.box = "fedora/34-cloud-base" - config.vm.box = 'generic/fedora34' + config.vm.box = "fedora/39-cloud-base" config.vm.provision "shell", inline: <<-SHELL SHELL end diff --git a/test/ansible/vagrant/fedora-40/skip b/test/ansible/vagrant/fedora-40/skip new file mode 100644 index 00000000000..4f1a9063d2b --- /dev/null +++ b/test/ansible/vagrant/fedora-40/skip @@ -0,0 +1,9 @@ +#!/bin/sh + +die() { + echo "$@" >&2 + exit 1 +} + +[ "${DB_BACKEND}" = "mysql" ] && die "mysql role does not support this distribution" +exit 0 diff --git a/test/ansible/vagrant/ubuntu-24-04-noble/Vagrantfile b/test/ansible/vagrant/ubuntu-24-04-noble/Vagrantfile new file mode 100644 index 00000000000..52490900fd8 --- /dev/null +++ b/test/ansible/vagrant/ubuntu-24-04-noble/Vagrantfile @@ -0,0 +1,10 @@ +# frozen_string_literal: true + +Vagrant.configure('2') do |config| + config.vm.box = 'alvistack/ubuntu-24.04' + config.vm.provision "shell", inline: <<-SHELL + SHELL +end + +common = '../common' +load common if File.exist?(common) From 599a5a49a1733057e6537bf6f4148124fe3e605a Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Fri, 31 May 2024 16:08:06 +0200 Subject: [PATCH 151/581] pkg/dumps.DumpTree: split to reduce complexity (#3001) * pkg/dumps.DumpTree: split to reduce complexity * lint --- .golangci.yml | 8 ++-- pkg/dumps/parser_dump.go | 84 +++++++++++++++++++++++++--------------- 2 files changed, 57 insertions(+), 35 deletions(-) diff --git a/.golangci.yml b/.golangci.yml index c044903d0f2..544cebbac6a 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -3,7 +3,7 @@ linters-settings: cyclop: # lower this after refactoring - max-complexity: 48 + max-complexity: 45 gci: sections: @@ -18,11 +18,11 @@ linters-settings: gocognit: # lower this after refactoring - min-complexity: 145 + min-complexity: 128 gocyclo: # lower this after refactoring - min-complexity: 48 + min-complexity: 45 funlen: # Checks the number of lines in a function. @@ -55,7 +55,7 @@ linters-settings: nestif: # lower this after refactoring - min-complexity: 28 + min-complexity: 24 nlreturn: block-size: 5 diff --git a/pkg/dumps/parser_dump.go b/pkg/dumps/parser_dump.go index 227f96315d4..7d4e09ecf1a 100644 --- a/pkg/dumps/parser_dump.go +++ b/pkg/dumps/parser_dump.go @@ -92,40 +92,59 @@ func LoadParserDump(filepath string) (*ParserResults, error) { return &pdump, nil } -func DumpTree(parserResults ParserResults, bucketPour BucketPourInfo, opts DumpOpts) { +type tree struct { // note : we can use line -> time as the unique identifier (of acquisition) - state := make(map[time.Time]map[string]map[string]ParserResult) - assoc := make(map[time.Time]string, 0) - parser_order := make(map[string][]string) + state map[time.Time]map[string]map[string]ParserResult + assoc map[time.Time]string + parserOrder map[string][]string +} + +func newTree() *tree { + return &tree{ + state: make(map[time.Time]map[string]map[string]ParserResult), + assoc: make(map[time.Time]string), + parserOrder: make(map[string][]string), + } +} +func DumpTree(parserResults ParserResults, bucketPour BucketPourInfo, opts DumpOpts) { + t := newTree() + t.processEvents(parserResults) + t.processBuckets(bucketPour) + t.displayResults(opts) +} + +func (t *tree) processEvents(parserResults ParserResults) { for stage, parsers := range parserResults { // let's process parsers in the order according to idx - parser_order[stage] = make([]string, len(parsers)) + t.parserOrder[stage] = make([]string, len(parsers)) for pname, parser := range parsers { if len(parser) > 0 { - parser_order[stage][parser[0].Idx-1] = pname + t.parserOrder[stage][parser[0].Idx-1] = pname } } - for _, parser := range parser_order[stage] { + for _, parser := range t.parserOrder[stage] { results := parsers[parser] for _, parserRes := range results { evt := parserRes.Evt - if _, ok := state[evt.Line.Time]; !ok { - state[evt.Line.Time] = make(map[string]map[string]ParserResult) - assoc[evt.Line.Time] = evt.Line.Raw + if _, ok := t.state[evt.Line.Time]; !ok { + t.state[evt.Line.Time] = make(map[string]map[string]ParserResult) + t.assoc[evt.Line.Time] = evt.Line.Raw } - if _, ok := state[evt.Line.Time][stage]; !ok { - state[evt.Line.Time][stage] = make(map[string]ParserResult) + if _, ok := t.state[evt.Line.Time][stage]; !ok { + t.state[evt.Line.Time][stage] = make(map[string]ParserResult) } - state[evt.Line.Time][stage][parser] = ParserResult{Evt: evt, Success: parserRes.Success} + t.state[evt.Line.Time][stage][parser] = ParserResult{Evt: evt, Success: parserRes.Success} } } } +} +func (t *tree) processBuckets(bucketPour BucketPourInfo) { for bname, evtlist := range bucketPour { for _, evt := range evtlist { if evt.Line.Raw == "" { @@ -133,38 +152,41 @@ func DumpTree(parserResults ParserResults, bucketPour BucketPourInfo, opts DumpO } // it might be bucket overflow being reprocessed, skip this - if _, ok := state[evt.Line.Time]; !ok { - state[evt.Line.Time] = make(map[string]map[string]ParserResult) - assoc[evt.Line.Time] = evt.Line.Raw + if _, ok := t.state[evt.Line.Time]; !ok { + t.state[evt.Line.Time] = make(map[string]map[string]ParserResult) + t.assoc[evt.Line.Time] = evt.Line.Raw } // there is a trick : to know if an event successfully exit the parsers, we check if it reached the pour() phase // we thus use a fake stage "buckets" and a fake parser "OK" to know if it entered - if _, ok := state[evt.Line.Time]["buckets"]; !ok { - state[evt.Line.Time]["buckets"] = make(map[string]ParserResult) + if _, ok := t.state[evt.Line.Time]["buckets"]; !ok { + t.state[evt.Line.Time]["buckets"] = make(map[string]ParserResult) } - state[evt.Line.Time]["buckets"][bname] = ParserResult{Success: true} + t.state[evt.Line.Time]["buckets"][bname] = ParserResult{Success: true} } } +} +func (t *tree) displayResults(opts DumpOpts) { yellow := color.New(color.FgYellow).SprintFunc() red := color.New(color.FgRed).SprintFunc() green := color.New(color.FgGreen).SprintFunc() whitelistReason := "" + // get each line - for tstamp, rawstr := range assoc { + for tstamp, rawstr := range t.assoc { if opts.SkipOk { - if _, ok := state[tstamp]["buckets"]["OK"]; ok { + if _, ok := t.state[tstamp]["buckets"]["OK"]; ok { continue } } fmt.Printf("line: %s\n", rawstr) - skeys := make([]string, 0, len(state[tstamp])) + skeys := make([]string, 0, len(t.state[tstamp])) - for k := range state[tstamp] { + for k := range t.state[tstamp] { // there is a trick : to know if an event successfully exit the parsers, we check if it reached the pour() phase // we thus use a fake stage "buckets" and a fake parser "OK" to know if it entered if k == "buckets" { @@ -180,18 +202,18 @@ func DumpTree(parserResults ParserResults, bucketPour BucketPourInfo, opts DumpO var prevItem types.Event for _, stage := range skeys { - parsers := state[tstamp][stage] + parsers := t.state[tstamp][stage] sep := "├" presep := "|" fmt.Printf("\t%s %s\n", sep, stage) - for idx, parser := range parser_order[stage] { + for idx, parser := range t.parserOrder[stage] { res := parsers[parser].Success sep := "├" - if idx == len(parser_order[stage])-1 { + if idx == len(t.parserOrder[stage])-1 { sep = "└" } @@ -278,12 +300,12 @@ func DumpTree(parserResults ParserResults, bucketPour BucketPourInfo, opts DumpO sep := "└" - if len(state[tstamp]["buckets"]) > 0 { + if len(t.state[tstamp]["buckets"]) > 0 { sep = "├" } // did the event enter the bucket pour phase ? - if _, ok := state[tstamp]["buckets"]["OK"]; ok { + if _, ok := t.state[tstamp]["buckets"]["OK"]; ok { fmt.Printf("\t%s-------- parser success %s\n", sep, emoji.GreenCircle) } else if whitelistReason != "" { fmt.Printf("\t%s-------- parser success, ignored by whitelist (%s) %s\n", sep, whitelistReason, emoji.GreenCircle) @@ -292,13 +314,13 @@ func DumpTree(parserResults ParserResults, bucketPour BucketPourInfo, opts DumpO } // now print bucket info - if len(state[tstamp]["buckets"]) > 0 { + if len(t.state[tstamp]["buckets"]) > 0 { fmt.Printf("\t├ Scenarios\n") } - bnames := make([]string, 0, len(state[tstamp]["buckets"])) + bnames := make([]string, 0, len(t.state[tstamp]["buckets"])) - for k := range state[tstamp]["buckets"] { + for k := range t.state[tstamp]["buckets"] { // there is a trick : to know if an event successfully exit the parsers, we check if it reached the pour() phase // we thus use a fake stage "buckets" and a fake parser "OK" to know if it entered if k == "OK" { From 8501f459c360c30bb7dae895c8c4a29cf803a339 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Fri, 31 May 2024 16:24:33 +0200 Subject: [PATCH 152/581] db,lapi: round durations and timestamps to 1 second (#3015) * cscli/lapi: round decision duration to 1 second * db: round created, updated... timestamps to 1 second --- pkg/apiserver/controllers/v1/alerts.go | 2 +- pkg/apiserver/controllers/v1/decisions.go | 2 +- pkg/types/utils.go | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/pkg/apiserver/controllers/v1/alerts.go b/pkg/apiserver/controllers/v1/alerts.go index 19dbf8d0cfa..7483e8dcdf9 100644 --- a/pkg/apiserver/controllers/v1/alerts.go +++ b/pkg/apiserver/controllers/v1/alerts.go @@ -80,7 +80,7 @@ func FormatOneAlert(alert *ent.Alert) *models.Alert { } for _, decisionItem := range alert.Edges.Decisions { - duration := decisionItem.Until.Sub(time.Now().UTC()).String() + duration := decisionItem.Until.Sub(time.Now().UTC()).Round(time.Second).String() outputAlert.Decisions = append(outputAlert.Decisions, &models.Decision{ Duration: &duration, // transform into time.Time ? Scenario: &decisionItem.Scenario, diff --git a/pkg/apiserver/controllers/v1/decisions.go b/pkg/apiserver/controllers/v1/decisions.go index f3c6a7bba26..5cc7628458c 100644 --- a/pkg/apiserver/controllers/v1/decisions.go +++ b/pkg/apiserver/controllers/v1/decisions.go @@ -20,7 +20,7 @@ func FormatDecisions(decisions []*ent.Decision) []*models.Decision { var results []*models.Decision for _, dbDecision := range decisions { - duration := dbDecision.Until.Sub(time.Now().UTC()).String() + duration := dbDecision.Until.Sub(time.Now().UTC()).Round(time.Second).String() decision := models.Decision{ ID: int64(dbDecision.ID), Duration: &duration, diff --git a/pkg/types/utils.go b/pkg/types/utils.go index 712d44ba12d..384dd00367e 100644 --- a/pkg/types/utils.go +++ b/pkg/types/utils.go @@ -66,7 +66,7 @@ func ConfigureLogger(clog *log.Logger) error { } func UtcNow() time.Time { - return time.Now().UTC() + return time.Now().UTC().Round(time.Second) } func IsNetworkFS(path string) (bool, string, error) { From 89d02e94582d9dbc6ad52961429e8da289df8638 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Fri, 31 May 2024 16:45:47 +0200 Subject: [PATCH 153/581] CI/lint: enable some "revive" rules (#3041) argument-limit Warns when a function receives more parameters than the maximum set by the rule's configuration. bare-return Warns on bare (a.k.a. naked) returns context-as-argument By convention, context.Context should be the first parameter of a function. duplicated-imports It is possible to unintentionally import the same package twice. modifies-parameter A function that modifies its parameters can be hard to understand. It can also be misleading if the arguments are passed by value by the caller. redundant-import-alias This rule warns on redundant import aliases. unreachable-code This rule spots and proposes to remove unreachable code. --- .golangci.yml | 22 +++++++------------ cmd/crowdsec/metrics.go | 2 +- pkg/acquisition/modules/file/file_test.go | 2 +- .../loki/internal/lokiclient/loki_client.go | 4 ++-- pkg/acquisition/modules/loki/loki.go | 2 +- pkg/apiserver/apiserver.go | 2 +- pkg/apiserver/controllers/controller.go | 2 +- pkg/appsec/request.go | 3 +-- pkg/csplugin/hclog_adapter.go | 2 +- 9 files changed, 17 insertions(+), 24 deletions(-) diff --git a/.golangci.yml b/.golangci.yml index 544cebbac6a..34393fce56e 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -118,10 +118,6 @@ linters-settings: rules: - name: add-constant disabled: true - - name: argument-limit - disabled: true - - name: bare-return - disabled: true - name: blank-imports disabled: true - name: bool-literal-in-expr @@ -132,16 +128,12 @@ linters-settings: disabled: true - name: confusing-results disabled: true - - name: context-as-argument - disabled: true - name: cyclomatic disabled: true - name: deep-exit disabled: true - name: defer disabled: true - - name: duplicated-imports - disabled: true - name: early-return disabled: true - name: empty-block @@ -176,16 +168,12 @@ linters-settings: disabled: true - name: max-public-structs disabled: true - - name: modifies-parameter - disabled: true - name: optimize-operands-order disabled: true - name: nested-structs disabled: true - name: package-comments disabled: true - - name: redundant-import-alias - disabled: true - name: struct-tag disabled: true - name: superfluous-else @@ -212,8 +200,6 @@ linters-settings: - "fmt.Println" - name: unnecessary-stmt disabled: true - - name: unreachable-code - disabled: true - name: unused-parameter disabled: true - name: unused-receiver @@ -506,3 +492,11 @@ issues: path: pkg/acquisition/modules/loki/internal/lokiclient/loki_client.go text: "confusing-naming: Method 'QueryRange' differs only by capitalization to method 'queryRange' in the same source file" + - linters: + - revive + path: pkg/metabase/metabase.go + + - linters: + - revive + path: cmd/crowdsec-cli/copyfile.go + diff --git a/cmd/crowdsec/metrics.go b/cmd/crowdsec/metrics.go index cc0c118b436..d3c6e172091 100644 --- a/cmd/crowdsec/metrics.go +++ b/cmd/crowdsec/metrics.go @@ -12,7 +12,7 @@ import ( "github.com/crowdsecurity/go-cs-lib/version" "github.com/crowdsecurity/crowdsec/pkg/acquisition/configuration" - v1 "github.com/crowdsecurity/crowdsec/pkg/apiserver/controllers/v1" + "github.com/crowdsecurity/crowdsec/pkg/apiserver/controllers/v1" "github.com/crowdsecurity/crowdsec/pkg/cache" "github.com/crowdsecurity/crowdsec/pkg/csconfig" "github.com/crowdsecurity/crowdsec/pkg/database" diff --git a/pkg/acquisition/modules/file/file_test.go b/pkg/acquisition/modules/file/file_test.go index ad5fe8bfabd..0873b837a3f 100644 --- a/pkg/acquisition/modules/file/file_test.go +++ b/pkg/acquisition/modules/file/file_test.go @@ -416,8 +416,8 @@ force_inotify: true`, testPattern), for i := 0; i < 5; i++ { _, err = fmt.Fprintf(fd, "%d\n", i) if err != nil { - t.Fatalf("could not write test file : %s", err) os.Remove("test_files/stream.log") + t.Fatalf("could not write test file : %s", err) } } diff --git a/pkg/acquisition/modules/loki/internal/lokiclient/loki_client.go b/pkg/acquisition/modules/loki/internal/lokiclient/loki_client.go index be14939c44d..6437a951504 100644 --- a/pkg/acquisition/modules/loki/internal/lokiclient/loki_client.go +++ b/pkg/acquisition/modules/loki/internal/lokiclient/loki_client.go @@ -106,7 +106,7 @@ func (lc *LokiClient) decreaseTicker(ticker *time.Ticker) { } } -func (lc *LokiClient) queryRange(uri string, ctx context.Context, c chan *LokiQueryRangeResponse, infinite bool) error { +func (lc *LokiClient) queryRange(ctx context.Context, uri string, c chan *LokiQueryRangeResponse, infinite bool) error { lc.currentTickerInterval = 100 * time.Millisecond ticker := time.NewTicker(lc.currentTickerInterval) defer ticker.Stop() @@ -296,7 +296,7 @@ func (lc *LokiClient) QueryRange(ctx context.Context, infinite bool) chan *LokiQ lc.Logger.Infof("Connecting to %s", url) lc.t.Go(func() error { - return lc.queryRange(url, ctx, c, infinite) + return lc.queryRange(ctx, url, c, infinite) }) return c } diff --git a/pkg/acquisition/modules/loki/loki.go b/pkg/acquisition/modules/loki/loki.go index 52b87eb95d3..15c454723ee 100644 --- a/pkg/acquisition/modules/loki/loki.go +++ b/pkg/acquisition/modules/loki/loki.go @@ -19,7 +19,7 @@ import ( yaml "gopkg.in/yaml.v2" "github.com/crowdsecurity/crowdsec/pkg/acquisition/configuration" - lokiclient "github.com/crowdsecurity/crowdsec/pkg/acquisition/modules/loki/internal/lokiclient" + "github.com/crowdsecurity/crowdsec/pkg/acquisition/modules/loki/internal/lokiclient" "github.com/crowdsecurity/crowdsec/pkg/types" ) diff --git a/pkg/apiserver/apiserver.go b/pkg/apiserver/apiserver.go index 6592c8bbf76..056f74a7b31 100644 --- a/pkg/apiserver/apiserver.go +++ b/pkg/apiserver/apiserver.go @@ -21,7 +21,7 @@ import ( "github.com/crowdsecurity/go-cs-lib/trace" "github.com/crowdsecurity/crowdsec/pkg/apiserver/controllers" - v1 "github.com/crowdsecurity/crowdsec/pkg/apiserver/middlewares/v1" + "github.com/crowdsecurity/crowdsec/pkg/apiserver/middlewares/v1" "github.com/crowdsecurity/crowdsec/pkg/csconfig" "github.com/crowdsecurity/crowdsec/pkg/csplugin" "github.com/crowdsecurity/crowdsec/pkg/database" diff --git a/pkg/apiserver/controllers/controller.go b/pkg/apiserver/controllers/controller.go index bab1965123e..51f359244be 100644 --- a/pkg/apiserver/controllers/controller.go +++ b/pkg/apiserver/controllers/controller.go @@ -9,7 +9,7 @@ import ( "github.com/gin-gonic/gin" log "github.com/sirupsen/logrus" - v1 "github.com/crowdsecurity/crowdsec/pkg/apiserver/controllers/v1" + "github.com/crowdsecurity/crowdsec/pkg/apiserver/controllers/v1" "github.com/crowdsecurity/crowdsec/pkg/csconfig" "github.com/crowdsecurity/crowdsec/pkg/csplugin" "github.com/crowdsecurity/crowdsec/pkg/database" diff --git a/pkg/appsec/request.go b/pkg/appsec/request.go index 66b5d797fd7..d0e3632abda 100644 --- a/pkg/appsec/request.go +++ b/pkg/appsec/request.go @@ -12,7 +12,6 @@ import ( "regexp" "github.com/google/uuid" - "github.com/sirupsen/logrus" log "github.com/sirupsen/logrus" ) @@ -276,7 +275,7 @@ func (r *ReqDumpFilter) ToJSON() error { } // Generate a ParsedRequest from a http.Request. ParsedRequest can be consumed by the App security Engine -func NewParsedRequestFromRequest(r *http.Request, logger *logrus.Entry) (ParsedRequest, error) { +func NewParsedRequestFromRequest(r *http.Request, logger *log.Entry) (ParsedRequest, error) { var err error contentLength := r.ContentLength if contentLength < 0 { diff --git a/pkg/csplugin/hclog_adapter.go b/pkg/csplugin/hclog_adapter.go index 9550e4b4539..58190684ebc 100644 --- a/pkg/csplugin/hclog_adapter.go +++ b/pkg/csplugin/hclog_adapter.go @@ -230,5 +230,5 @@ func safeString(str fmt.Stringer) (s string) { }() s = str.String() - return + return //nolint:revive // bare return for the defer } From c1e858cbcaf17b0b4f61991c3b268f43dfe1559a Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Fri, 31 May 2024 19:15:05 +0200 Subject: [PATCH 154/581] cscli: refactor alerts/console/decisions/setup (#3003) * cscli alerts: extract list(), inspect() * cscli console: extract enroll() * cscli decisions: extract list() * support dump: don't use global * cscli setup: split command constructors * lint --- cmd/crowdsec-cli/alerts.go | 239 ++++++++++++++++++---------------- cmd/crowdsec-cli/console.go | 178 +++++++++++++------------ cmd/crowdsec-cli/decisions.go | 177 +++++++++++++------------ cmd/crowdsec-cli/main.go | 2 +- cmd/crowdsec-cli/setup.go | 153 +++++++++++----------- cmd/crowdsec-cli/support.go | 13 +- 6 files changed, 407 insertions(+), 355 deletions(-) diff --git a/cmd/crowdsec-cli/alerts.go b/cmd/crowdsec-cli/alerts.go index d32af6ae56c..9f4c5093afc 100644 --- a/cmd/crowdsec-cli/alerts.go +++ b/cmd/crowdsec-cli/alerts.go @@ -109,7 +109,8 @@ func (cli *cliAlerts) alertsToTable(alerts *models.GetAlertsResponse, printMachi return nil } -var alertTemplate = ` +func (cli *cliAlerts) displayOneAlert(alert *models.Alert, withDetail bool) error { + alertTemplate := ` ################################################################################################ - ID : {{.ID}} @@ -127,7 +128,6 @@ var alertTemplate = ` ` -func (cli *cliAlerts) displayOneAlert(alert *models.Alert, withDetail bool) error { tmpl, err := template.New("alert").Parse(alertTemplate) if err != nil { return err @@ -228,6 +228,92 @@ func (cli *cliAlerts) NewCommand() *cobra.Command { return cmd } +func (cli *cliAlerts) list(alertListFilter apiclient.AlertsListOpts, limit *int, contained *bool, printMachine bool) error { + if err := manageCliDecisionAlerts(alertListFilter.IPEquals, alertListFilter.RangeEquals, + alertListFilter.ScopeEquals, alertListFilter.ValueEquals); err != nil { + return err + } + + if limit != nil { + alertListFilter.Limit = limit + } + + if *alertListFilter.Until == "" { + alertListFilter.Until = nil + } else if strings.HasSuffix(*alertListFilter.Until, "d") { + /*time.ParseDuration support hours 'h' as bigger unit, let's make the user's life easier*/ + realDuration := strings.TrimSuffix(*alertListFilter.Until, "d") + + days, err := strconv.Atoi(realDuration) + if err != nil { + return fmt.Errorf("can't parse duration %s, valid durations format: 1d, 4h, 4h15m", *alertListFilter.Until) + } + + *alertListFilter.Until = fmt.Sprintf("%d%s", days*24, "h") + } + + if *alertListFilter.Since == "" { + alertListFilter.Since = nil + } else if strings.HasSuffix(*alertListFilter.Since, "d") { + // time.ParseDuration support hours 'h' as bigger unit, let's make the user's life easier + realDuration := strings.TrimSuffix(*alertListFilter.Since, "d") + + days, err := strconv.Atoi(realDuration) + if err != nil { + return fmt.Errorf("can't parse duration %s, valid durations format: 1d, 4h, 4h15m", *alertListFilter.Since) + } + + *alertListFilter.Since = fmt.Sprintf("%d%s", days*24, "h") + } + + if *alertListFilter.IncludeCAPI { + *alertListFilter.Limit = 0 + } + + if *alertListFilter.TypeEquals == "" { + alertListFilter.TypeEquals = nil + } + + if *alertListFilter.ScopeEquals == "" { + alertListFilter.ScopeEquals = nil + } + + if *alertListFilter.ValueEquals == "" { + alertListFilter.ValueEquals = nil + } + + if *alertListFilter.ScenarioEquals == "" { + alertListFilter.ScenarioEquals = nil + } + + if *alertListFilter.IPEquals == "" { + alertListFilter.IPEquals = nil + } + + if *alertListFilter.RangeEquals == "" { + alertListFilter.RangeEquals = nil + } + + if *alertListFilter.OriginEquals == "" { + alertListFilter.OriginEquals = nil + } + + if contained != nil && *contained { + alertListFilter.Contains = new(bool) + } + + alerts, _, err := cli.client.Alerts.List(context.Background(), alertListFilter) + if err != nil { + return fmt.Errorf("unable to list alerts: %w", err) + } + + if err = cli.alertsToTable(alerts, printMachine); err != nil { + return fmt.Errorf("unable to list alerts: %w", err) + } + + return nil +} + func (cli *cliAlerts) NewListCmd() *cobra.Command { alertListFilter := apiclient.AlertsListOpts{ ScopeEquals: new(string), @@ -259,81 +345,7 @@ cscli alerts list --type ban`, Long: `List alerts with optional filters`, DisableAutoGenTag: true, RunE: func(cmd *cobra.Command, _ []string) error { - if err := manageCliDecisionAlerts(alertListFilter.IPEquals, alertListFilter.RangeEquals, - alertListFilter.ScopeEquals, alertListFilter.ValueEquals); err != nil { - printHelp(cmd) - return err - } - if limit != nil { - alertListFilter.Limit = limit - } - - if *alertListFilter.Until == "" { - alertListFilter.Until = nil - } else if strings.HasSuffix(*alertListFilter.Until, "d") { - /*time.ParseDuration support hours 'h' as bigger unit, let's make the user's life easier*/ - realDuration := strings.TrimSuffix(*alertListFilter.Until, "d") - days, err := strconv.Atoi(realDuration) - if err != nil { - printHelp(cmd) - return fmt.Errorf("can't parse duration %s, valid durations format: 1d, 4h, 4h15m", *alertListFilter.Until) - } - *alertListFilter.Until = fmt.Sprintf("%d%s", days*24, "h") - } - if *alertListFilter.Since == "" { - alertListFilter.Since = nil - } else if strings.HasSuffix(*alertListFilter.Since, "d") { - /*time.ParseDuration support hours 'h' as bigger unit, let's make the user's life easier*/ - realDuration := strings.TrimSuffix(*alertListFilter.Since, "d") - days, err := strconv.Atoi(realDuration) - if err != nil { - printHelp(cmd) - return fmt.Errorf("can't parse duration %s, valid durations format: 1d, 4h, 4h15m", *alertListFilter.Since) - } - *alertListFilter.Since = fmt.Sprintf("%d%s", days*24, "h") - } - - if *alertListFilter.IncludeCAPI { - *alertListFilter.Limit = 0 - } - - if *alertListFilter.TypeEquals == "" { - alertListFilter.TypeEquals = nil - } - if *alertListFilter.ScopeEquals == "" { - alertListFilter.ScopeEquals = nil - } - if *alertListFilter.ValueEquals == "" { - alertListFilter.ValueEquals = nil - } - if *alertListFilter.ScenarioEquals == "" { - alertListFilter.ScenarioEquals = nil - } - if *alertListFilter.IPEquals == "" { - alertListFilter.IPEquals = nil - } - if *alertListFilter.RangeEquals == "" { - alertListFilter.RangeEquals = nil - } - - if *alertListFilter.OriginEquals == "" { - alertListFilter.OriginEquals = nil - } - - if contained != nil && *contained { - alertListFilter.Contains = new(bool) - } - - alerts, _, err := cli.client.Alerts.List(context.Background(), alertListFilter) - if err != nil { - return fmt.Errorf("unable to list alerts: %w", err) - } - - if err = cli.alertsToTable(alerts, printMachine); err != nil { - return fmt.Errorf("unable to list alerts: %w", err) - } - - return nil + return cli.list(alertListFilter, limit, contained, printMachine) }, } @@ -466,6 +478,46 @@ cscli alerts delete -s crowdsecurity/ssh-bf"`, return cmd } +func (cli *cliAlerts) inspect(details bool, alertIDs ...string) error { + cfg := cli.cfg() + + for _, alertID := range alertIDs { + id, err := strconv.Atoi(alertID) + if err != nil { + return fmt.Errorf("bad alert id %s", alertID) + } + + alert, _, err := cli.client.Alerts.GetByID(context.Background(), id) + if err != nil { + return fmt.Errorf("can't find alert with id %s: %w", alertID, err) + } + + switch cfg.Cscli.Output { + case "human": + if err := cli.displayOneAlert(alert, details); err != nil { + log.Warnf("unable to display alert with id %s: %s", alertID, err) + continue + } + case "json": + data, err := json.MarshalIndent(alert, "", " ") + if err != nil { + return fmt.Errorf("unable to marshal alert with id %s: %w", alertID, err) + } + + fmt.Printf("%s\n", string(data)) + case "raw": + data, err := yaml.Marshal(alert) + if err != nil { + return fmt.Errorf("unable to marshal alert with id %s: %w", alertID, err) + } + + fmt.Println(string(data)) + } + } + + return nil +} + func (cli *cliAlerts) NewInspectCmd() *cobra.Command { var details bool @@ -475,42 +527,11 @@ func (cli *cliAlerts) NewInspectCmd() *cobra.Command { Example: `cscli alerts inspect 123`, DisableAutoGenTag: true, RunE: func(cmd *cobra.Command, args []string) error { - cfg := cli.cfg() if len(args) == 0 { printHelp(cmd) return errors.New("missing alert_id") } - for _, alertID := range args { - id, err := strconv.Atoi(alertID) - if err != nil { - return fmt.Errorf("bad alert id %s", alertID) - } - alert, _, err := cli.client.Alerts.GetByID(context.Background(), id) - if err != nil { - return fmt.Errorf("can't find alert with id %s: %w", alertID, err) - } - switch cfg.Cscli.Output { - case "human": - if err := cli.displayOneAlert(alert, details); err != nil { - log.Warnf("unable to display alert with id %s: %s", alertID, err) - continue - } - case "json": - data, err := json.MarshalIndent(alert, "", " ") - if err != nil { - return fmt.Errorf("unable to marshal alert with id %s: %w", alertID, err) - } - fmt.Printf("%s\n", string(data)) - case "raw": - data, err := yaml.Marshal(alert) - if err != nil { - return fmt.Errorf("unable to marshal alert with id %s: %w", alertID, err) - } - fmt.Println(string(data)) - } - } - - return nil + return cli.inspect(details, args...) }, } diff --git a/cmd/crowdsec-cli/console.go b/cmd/crowdsec-cli/console.go index 149b7656884..eab24a5d709 100644 --- a/cmd/crowdsec-cli/console.go +++ b/cmd/crowdsec-cli/console.go @@ -67,6 +67,99 @@ func (cli *cliConsole) NewCommand() *cobra.Command { return cmd } +func (cli *cliConsole) enroll(key string, name string, overwrite bool, tags []string, opts []string) error { + cfg := cli.cfg() + password := strfmt.Password(cfg.API.Server.OnlineClient.Credentials.Password) + + apiURL, err := url.Parse(cfg.API.Server.OnlineClient.Credentials.URL) + if err != nil { + return fmt.Errorf("could not parse CAPI URL: %w", err) + } + + hub, err := require.Hub(cfg, nil, nil) + if err != nil { + return err + } + + scenarios, err := hub.GetInstalledNamesByType(cwhub.SCENARIOS) + if err != nil { + return fmt.Errorf("failed to get installed scenarios: %w", err) + } + + if len(scenarios) == 0 { + scenarios = make([]string, 0) + } + + enableOpts := []string{csconfig.SEND_MANUAL_SCENARIOS, csconfig.SEND_TAINTED_SCENARIOS} + + if len(opts) != 0 { + for _, opt := range opts { + valid := false + + if opt == "all" { + enableOpts = csconfig.CONSOLE_CONFIGS + break + } + + for _, availableOpt := range csconfig.CONSOLE_CONFIGS { + if opt == availableOpt { + valid = true + enable := true + + for _, enabledOpt := range enableOpts { + if opt == enabledOpt { + enable = false + continue + } + } + + if enable { + enableOpts = append(enableOpts, opt) + } + + break + } + } + + if !valid { + return fmt.Errorf("option %s doesn't exist", opt) + } + } + } + + c, _ := apiclient.NewClient(&apiclient.Config{ + MachineID: cli.cfg().API.Server.OnlineClient.Credentials.Login, + Password: password, + Scenarios: scenarios, + UserAgent: cwversion.UserAgent(), + URL: apiURL, + VersionPrefix: "v3", + }) + + resp, err := c.Auth.EnrollWatcher(context.Background(), key, name, tags, overwrite) + if err != nil { + return fmt.Errorf("could not enroll instance: %w", err) + } + + if resp.Response.StatusCode == 200 && !overwrite { + log.Warning("Instance already enrolled. You can use '--overwrite' to force enroll") + return nil + } + + if err := cli.setConsoleOpts(enableOpts, true); err != nil { + return err + } + + for _, opt := range enableOpts { + log.Infof("Enabled %s : %s", opt, csconfig.CONSOLE_CONFIGS_HELP[opt]) + } + + log.Info("Watcher successfully enrolled. Visit https://app.crowdsec.net to accept it.") + log.Info("Please restart crowdsec after accepting the enrollment.") + + return nil +} + func (cli *cliConsole) newEnrollCmd() *cobra.Command { name := "" overwrite := false @@ -90,90 +183,7 @@ After running this command your will need to validate the enrollment in the weba Args: cobra.ExactArgs(1), DisableAutoGenTag: true, RunE: func(_ *cobra.Command, args []string) error { - cfg := cli.cfg() - password := strfmt.Password(cfg.API.Server.OnlineClient.Credentials.Password) - - apiURL, err := url.Parse(cfg.API.Server.OnlineClient.Credentials.URL) - if err != nil { - return fmt.Errorf("could not parse CAPI URL: %w", err) - } - - hub, err := require.Hub(cfg, nil, nil) - if err != nil { - return err - } - - scenarios, err := hub.GetInstalledNamesByType(cwhub.SCENARIOS) - if err != nil { - return fmt.Errorf("failed to get installed scenarios: %w", err) - } - - if len(scenarios) == 0 { - scenarios = make([]string, 0) - } - - enableOpts := []string{csconfig.SEND_MANUAL_SCENARIOS, csconfig.SEND_TAINTED_SCENARIOS} - if len(opts) != 0 { - for _, opt := range opts { - valid := false - if opt == "all" { - enableOpts = csconfig.CONSOLE_CONFIGS - break - } - for _, availableOpt := range csconfig.CONSOLE_CONFIGS { - if opt == availableOpt { - valid = true - enable := true - for _, enabledOpt := range enableOpts { - if opt == enabledOpt { - enable = false - continue - } - } - if enable { - enableOpts = append(enableOpts, opt) - } - - break - } - } - if !valid { - return fmt.Errorf("option %s doesn't exist", opt) - } - } - } - - c, _ := apiclient.NewClient(&apiclient.Config{ - MachineID: cli.cfg().API.Server.OnlineClient.Credentials.Login, - Password: password, - Scenarios: scenarios, - UserAgent: cwversion.UserAgent(), - URL: apiURL, - VersionPrefix: "v3", - }) - - resp, err := c.Auth.EnrollWatcher(context.Background(), args[0], name, tags, overwrite) - if err != nil { - return fmt.Errorf("could not enroll instance: %w", err) - } - - if resp.Response.StatusCode == 200 && !overwrite { - log.Warning("Instance already enrolled. You can use '--overwrite' to force enroll") - return nil - } - - if err := cli.setConsoleOpts(enableOpts, true); err != nil { - return err - } - - for _, opt := range enableOpts { - log.Infof("Enabled %s : %s", opt, csconfig.CONSOLE_CONFIGS_HELP[opt]) - } - - log.Info("Watcher successfully enrolled. Visit https://app.crowdsec.net to accept it.") - log.Info("Please restart crowdsec after accepting the enrollment.") - - return nil + return cli.enroll(args[0], name, overwrite, tags, opts) }, } diff --git a/cmd/crowdsec-cli/decisions.go b/cmd/crowdsec-cli/decisions.go index 9b9159561be..de3bf73dca2 100644 --- a/cmd/crowdsec-cli/decisions.go +++ b/cmd/crowdsec-cli/decisions.go @@ -169,8 +169,99 @@ func (cli *cliDecisions) NewCommand() *cobra.Command { return cmd } +func (cli *cliDecisions) list(filter apiclient.AlertsListOpts, NoSimu *bool, contained *bool, printMachine bool) error { + var err error + /*take care of shorthand options*/ + if err = manageCliDecisionAlerts(filter.IPEquals, filter.RangeEquals, filter.ScopeEquals, filter.ValueEquals); err != nil { + return err + } + + filter.ActiveDecisionEquals = new(bool) + *filter.ActiveDecisionEquals = true + + if NoSimu != nil && *NoSimu { + filter.IncludeSimulated = new(bool) + } + /* nullify the empty entries to avoid bad filter */ + if *filter.Until == "" { + filter.Until = nil + } else if strings.HasSuffix(*filter.Until, "d") { + /*time.ParseDuration support hours 'h' as bigger unit, let's make the user's life easier*/ + realDuration := strings.TrimSuffix(*filter.Until, "d") + + days, err := strconv.Atoi(realDuration) + if err != nil { + return fmt.Errorf("can't parse duration %s, valid durations format: 1d, 4h, 4h15m", *filter.Until) + } + + *filter.Until = fmt.Sprintf("%d%s", days*24, "h") + } + + if *filter.Since == "" { + filter.Since = nil + } else if strings.HasSuffix(*filter.Since, "d") { + /*time.ParseDuration support hours 'h' as bigger unit, let's make the user's life easier*/ + realDuration := strings.TrimSuffix(*filter.Since, "d") + + days, err := strconv.Atoi(realDuration) + if err != nil { + return fmt.Errorf("can't parse duration %s, valid durations format: 1d, 4h, 4h15m", *filter.Since) + } + + *filter.Since = fmt.Sprintf("%d%s", days*24, "h") + } + + if *filter.IncludeCAPI { + *filter.Limit = 0 + } + + if *filter.TypeEquals == "" { + filter.TypeEquals = nil + } + + if *filter.ValueEquals == "" { + filter.ValueEquals = nil + } + + if *filter.ScopeEquals == "" { + filter.ScopeEquals = nil + } + + if *filter.ScenarioEquals == "" { + filter.ScenarioEquals = nil + } + + if *filter.IPEquals == "" { + filter.IPEquals = nil + } + + if *filter.RangeEquals == "" { + filter.RangeEquals = nil + } + + if *filter.OriginEquals == "" { + filter.OriginEquals = nil + } + + if contained != nil && *contained { + filter.Contains = new(bool) + } + + alerts, _, err := Client.Alerts.List(context.Background(), filter) + if err != nil { + return fmt.Errorf("unable to retrieve decisions: %w", err) + } + + err = cli.decisionsToTable(alerts, printMachine) + if err != nil { + return fmt.Errorf("unable to print decisions: %w", err) + } + + return nil +} + func (cli *cliDecisions) newListCmd() *cobra.Command { - var filter = apiclient.AlertsListOpts{ + filter := apiclient.AlertsListOpts{ ValueEquals: new(string), ScopeEquals: new(string), ScenarioEquals: new(string), @@ -200,83 +291,7 @@ cscli decisions list --origin lists --scenario list_name Args: cobra.ExactArgs(0), DisableAutoGenTag: true, RunE: func(cmd *cobra.Command, _ []string) error { - var err error - /*take care of shorthand options*/ - if err = manageCliDecisionAlerts(filter.IPEquals, filter.RangeEquals, filter.ScopeEquals, filter.ValueEquals); err != nil { - return err - } - filter.ActiveDecisionEquals = new(bool) - *filter.ActiveDecisionEquals = true - if NoSimu != nil && *NoSimu { - filter.IncludeSimulated = new(bool) - } - /* nullify the empty entries to avoid bad filter */ - if *filter.Until == "" { - filter.Until = nil - } else if strings.HasSuffix(*filter.Until, "d") { - /*time.ParseDuration support hours 'h' as bigger unit, let's make the user's life easier*/ - realDuration := strings.TrimSuffix(*filter.Until, "d") - days, err := strconv.Atoi(realDuration) - if err != nil { - printHelp(cmd) - return fmt.Errorf("can't parse duration %s, valid durations format: 1d, 4h, 4h15m", *filter.Until) - } - *filter.Until = fmt.Sprintf("%d%s", days*24, "h") - } - - if *filter.Since == "" { - filter.Since = nil - } else if strings.HasSuffix(*filter.Since, "d") { - /*time.ParseDuration support hours 'h' as bigger unit, let's make the user's life easier*/ - realDuration := strings.TrimSuffix(*filter.Since, "d") - days, err := strconv.Atoi(realDuration) - if err != nil { - printHelp(cmd) - return fmt.Errorf("can't parse duration %s, valid durations format: 1d, 4h, 4h15m", *filter.Since) - } - *filter.Since = fmt.Sprintf("%d%s", days*24, "h") - } - if *filter.IncludeCAPI { - *filter.Limit = 0 - } - if *filter.TypeEquals == "" { - filter.TypeEquals = nil - } - if *filter.ValueEquals == "" { - filter.ValueEquals = nil - } - if *filter.ScopeEquals == "" { - filter.ScopeEquals = nil - } - if *filter.ScenarioEquals == "" { - filter.ScenarioEquals = nil - } - if *filter.IPEquals == "" { - filter.IPEquals = nil - } - if *filter.RangeEquals == "" { - filter.RangeEquals = nil - } - - if *filter.OriginEquals == "" { - filter.OriginEquals = nil - } - - if contained != nil && *contained { - filter.Contains = new(bool) - } - - alerts, _, err := Client.Alerts.List(context.Background(), filter) - if err != nil { - return fmt.Errorf("unable to retrieve decisions: %w", err) - } - - err = cli.decisionsToTable(alerts, printMachine) - if err != nil { - return fmt.Errorf("unable to print decisions: %w", err) - } - - return nil + return cli.list(filter, NoSimu, contained, printMachine) }, } cmd.Flags().SortFlags = false @@ -489,15 +504,15 @@ cscli decisions delete --origin lists --scenario list_name if delDecisionID == "" { decisions, _, err = Client.Decisions.Delete(context.Background(), delFilter) if err != nil { - return fmt.Errorf("unable to delete decisions: %v", err) + return fmt.Errorf("unable to delete decisions: %w", err) } } else { if _, err = strconv.Atoi(delDecisionID); err != nil { - return fmt.Errorf("id '%s' is not an integer: %v", delDecisionID, err) + return fmt.Errorf("id '%s' is not an integer: %w", delDecisionID, err) } decisions, _, err = Client.Decisions.DeleteOne(context.Background(), delDecisionID) if err != nil { - return fmt.Errorf("unable to delete decision: %v", err) + return fmt.Errorf("unable to delete decision: %w", err) } } log.Infof("%s decision(s) deleted", decisions.NbDeleted) diff --git a/cmd/crowdsec-cli/main.go b/cmd/crowdsec-cli/main.go index 3881818123f..622fe50a09d 100644 --- a/cmd/crowdsec-cli/main.go +++ b/cmd/crowdsec-cli/main.go @@ -269,7 +269,7 @@ It is meant to allow you to manage bans, parsers/scenarios/etc, api and generall cmd.AddCommand(NewCLIAppsecRule(cli.cfg).NewCommand()) if fflag.CscliSetup.IsEnabled() { - cmd.AddCommand(NewSetupCmd()) + cmd.AddCommand(NewCLISetup(cli.cfg).NewCommand()) } return cmd diff --git a/cmd/crowdsec-cli/setup.go b/cmd/crowdsec-cli/setup.go index ba3670848d8..8a8c5a6c665 100644 --- a/cmd/crowdsec-cli/setup.go +++ b/cmd/crowdsec-cli/setup.go @@ -17,9 +17,18 @@ import ( "github.com/crowdsecurity/crowdsec/pkg/setup" ) -// NewSetupCmd defines the "cscli setup" command. -func NewSetupCmd() *cobra.Command { - cmdSetup := &cobra.Command{ +type cliSetup struct { + cfg configGetter +} + +func NewCLISetup(cfg configGetter) *cliSetup { + return &cliSetup{ + cfg: cfg, + } +} + +func (cli *cliSetup) NewCommand() *cobra.Command { + cmd := &cobra.Command{ Use: "setup", Short: "Tools to configure crowdsec", Long: "Manage hub configuration and service detection", @@ -27,83 +36,79 @@ func NewSetupCmd() *cobra.Command { DisableAutoGenTag: true, } - // - // cscli setup detect - // - { - cmdSetupDetect := &cobra.Command{ - Use: "detect", - Short: "detect running services, generate a setup file", - DisableAutoGenTag: true, - RunE: runSetupDetect, - } + cmd.AddCommand(cli.NewDetectCmd()) + cmd.AddCommand(cli.NewInstallHubCmd()) + cmd.AddCommand(cli.NewDataSourcesCmd()) + cmd.AddCommand(cli.NewValidateCmd()) - defaultServiceDetect := csconfig.DefaultConfigPath("hub", "detect.yaml") - - flags := cmdSetupDetect.Flags() - flags.String("detect-config", defaultServiceDetect, "path to service detection configuration") - flags.Bool("list-supported-services", false, "do not detect; only print supported services") - flags.StringSlice("force-unit", nil, "force detection of a systemd unit (can be repeated)") - flags.StringSlice("force-process", nil, "force detection of a running process (can be repeated)") - flags.StringSlice("skip-service", nil, "ignore a service, don't recommend hub/datasources (can be repeated)") - flags.String("force-os-family", "", "override OS.Family: one of linux, freebsd, windows or darwin") - flags.String("force-os-id", "", "override OS.ID=[debian | ubuntu | , redhat...]") - flags.String("force-os-version", "", "override OS.RawVersion (of OS or Linux distribution)") - flags.Bool("snub-systemd", false, "don't use systemd, even if available") - flags.Bool("yaml", false, "output yaml, not json") - cmdSetup.AddCommand(cmdSetupDetect) - } - - // - // cscli setup install-hub - // - { - cmdSetupInstallHub := &cobra.Command{ - Use: "install-hub [setup_file] [flags]", - Short: "install items from a setup file", - Args: cobra.ExactArgs(1), - DisableAutoGenTag: true, - RunE: runSetupInstallHub, - } + return cmd +} - flags := cmdSetupInstallHub.Flags() - flags.Bool("dry-run", false, "don't install anything; print out what would have been") - cmdSetup.AddCommand(cmdSetupInstallHub) - } - - // - // cscli setup datasources - // - { - cmdSetupDataSources := &cobra.Command{ - Use: "datasources [setup_file] [flags]", - Short: "generate datasource (acquisition) configuration from a setup file", - Args: cobra.ExactArgs(1), - DisableAutoGenTag: true, - RunE: runSetupDataSources, - } +func (cli *cliSetup) NewDetectCmd() *cobra.Command { + cmd := &cobra.Command{ + Use: "detect", + Short: "detect running services, generate a setup file", + DisableAutoGenTag: true, + RunE: runSetupDetect, + } - flags := cmdSetupDataSources.Flags() - flags.String("to-dir", "", "write the configuration to a directory, in multiple files") - cmdSetup.AddCommand(cmdSetupDataSources) - } - - // - // cscli setup validate - // - { - cmdSetupValidate := &cobra.Command{ - Use: "validate [setup_file]", - Short: "validate a setup file", - Args: cobra.ExactArgs(1), - DisableAutoGenTag: true, - RunE: runSetupValidate, - } + defaultServiceDetect := csconfig.DefaultConfigPath("hub", "detect.yaml") + + flags := cmd.Flags() + flags.String("detect-config", defaultServiceDetect, "path to service detection configuration") + flags.Bool("list-supported-services", false, "do not detect; only print supported services") + flags.StringSlice("force-unit", nil, "force detection of a systemd unit (can be repeated)") + flags.StringSlice("force-process", nil, "force detection of a running process (can be repeated)") + flags.StringSlice("skip-service", nil, "ignore a service, don't recommend hub/datasources (can be repeated)") + flags.String("force-os-family", "", "override OS.Family: one of linux, freebsd, windows or darwin") + flags.String("force-os-id", "", "override OS.ID=[debian | ubuntu | , redhat...]") + flags.String("force-os-version", "", "override OS.RawVersion (of OS or Linux distribution)") + flags.Bool("snub-systemd", false, "don't use systemd, even if available") + flags.Bool("yaml", false, "output yaml, not json") + + return cmd +} - cmdSetup.AddCommand(cmdSetupValidate) +func (cli *cliSetup) NewInstallHubCmd() *cobra.Command { + cmd := &cobra.Command{ + Use: "install-hub [setup_file] [flags]", + Short: "install items from a setup file", + Args: cobra.ExactArgs(1), + DisableAutoGenTag: true, + RunE: runSetupInstallHub, + } + + flags := cmd.Flags() + flags.Bool("dry-run", false, "don't install anything; print out what would have been") + + return cmd +} + +func (cli *cliSetup) NewDataSourcesCmd() *cobra.Command { + cmd := &cobra.Command{ + Use: "datasources [setup_file] [flags]", + Short: "generate datasource (acquisition) configuration from a setup file", + Args: cobra.ExactArgs(1), + DisableAutoGenTag: true, + RunE: runSetupDataSources, + } + + flags := cmd.Flags() + flags.String("to-dir", "", "write the configuration to a directory, in multiple files") + + return cmd +} + +func (cli *cliSetup) NewValidateCmd() *cobra.Command { + cmd := &cobra.Command{ + Use: "validate [setup_file]", + Short: "validate a setup file", + Args: cobra.ExactArgs(1), + DisableAutoGenTag: true, + RunE: runSetupValidate, } - return cmdSetup + return cmd } func runSetupDetect(cmd *cobra.Command, args []string) error { diff --git a/cmd/crowdsec-cli/support.go b/cmd/crowdsec-cli/support.go index 61fa2b55817..41e23dc9654 100644 --- a/cmd/crowdsec-cli/support.go +++ b/cmd/crowdsec-cli/support.go @@ -23,6 +23,7 @@ import ( "github.com/crowdsecurity/go-cs-lib/trace" "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/require" + "github.com/crowdsecurity/crowdsec/pkg/csconfig" "github.com/crowdsecurity/crowdsec/pkg/cwhub" "github.com/crowdsecurity/crowdsec/pkg/cwversion" "github.com/crowdsecurity/crowdsec/pkg/database" @@ -294,7 +295,7 @@ func (cli *cliSupport) dumpConfigYAML(zw *zip.Writer) error { return nil } -func (cli *cliSupport) dumpPprof(ctx context.Context, zw *zip.Writer, endpoint string) error { +func (cli *cliSupport) dumpPprof(ctx context.Context, zw *zip.Writer, prometheusCfg csconfig.PrometheusCfg, endpoint string) error { log.Infof("Collecting pprof/%s data", endpoint) ctx, cancel := context.WithTimeout(ctx, 120*time.Second) @@ -306,8 +307,8 @@ func (cli *cliSupport) dumpPprof(ctx context.Context, zw *zip.Writer, endpoint s fmt.Sprintf( "http://%s/debug/pprof/%s?debug=1", net.JoinHostPort( - csConfig.Prometheus.ListenAddr, - strconv.Itoa(csConfig.Prometheus.ListenPort), + prometheusCfg.ListenAddr, + strconv.Itoa(prometheusCfg.ListenPort), ), endpoint, ), @@ -538,15 +539,15 @@ func (cli *cliSupport) dump(ctx context.Context, outFile string) error { // call pprof separately, one might fail for timeout - if err = cli.dumpPprof(ctx, zipWriter, "goroutine"); err != nil { + if err = cli.dumpPprof(ctx, zipWriter, *cfg.Prometheus, "goroutine"); err != nil { log.Warnf("could not collect pprof goroutine data: %s", err) } - if err = cli.dumpPprof(ctx, zipWriter, "heap"); err != nil { + if err = cli.dumpPprof(ctx, zipWriter, *cfg.Prometheus, "heap"); err != nil { log.Warnf("could not collect pprof heap data: %s", err) } - if err = cli.dumpPprof(ctx, zipWriter, "profile"); err != nil { + if err = cli.dumpPprof(ctx, zipWriter, *cfg.Prometheus, "profile"); err != nil { log.Warnf("could not collect pprof cpu data: %s", err) } From 9823ed61b694d7b65f79dfe354679593908d8272 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Mon, 3 Jun 2024 12:21:00 +0200 Subject: [PATCH 155/581] cscli: log.Fatal -> return err (#3049) * cscli: log.Fatal -> return err * lint --- cmd/crowdsec-cli/alerts_table.go | 8 ++++++ cmd/crowdsec-cli/capi.go | 10 ++------ cmd/crowdsec-cli/decisions_import.go | 13 +++++----- cmd/crowdsec-cli/doc.go | 2 +- cmd/crowdsec-cli/flag.go | 14 +++++----- cmd/crowdsec-cli/hub.go | 2 +- cmd/crowdsec-cli/itemcli.go | 26 +++++++++---------- cmd/crowdsec-cli/main.go | 38 ++++++++++++++++++---------- cmd/crowdsec-cli/notifications.go | 3 +++ cmd/crowdsec-cli/papi.go | 2 +- 10 files changed, 66 insertions(+), 52 deletions(-) diff --git a/cmd/crowdsec-cli/alerts_table.go b/cmd/crowdsec-cli/alerts_table.go index ec457f3723e..fbde4d2aaa9 100644 --- a/cmd/crowdsec-cli/alerts_table.go +++ b/cmd/crowdsec-cli/alerts_table.go @@ -15,10 +15,12 @@ import ( func alertsTable(out io.Writer, alerts *models.GetAlertsResponse, printMachine bool) { t := newTable(out) t.SetRowLines(false) + header := []string{"ID", "value", "reason", "country", "as", "decisions", "created_at"} if printMachine { header = append(header, "machine") } + t.SetHeaders(header...) for _, alertItem := range *alerts { @@ -54,20 +56,25 @@ func alertDecisionsTable(out io.Writer, alert *models.Alert) { t := newTable(out) t.SetRowLines(false) t.SetHeaders("ID", "scope:value", "action", "expiration", "created_at") + for _, decision := range alert.Decisions { parsedDuration, err := time.ParseDuration(*decision.Duration) if err != nil { log.Error(err) } + expire := time.Now().UTC().Add(parsedDuration) if time.Now().UTC().After(expire) { continue } + foundActive = true scopeAndValue := *decision.Scope + if *decision.Value != "" { scopeAndValue += ":" + *decision.Value } + t.AddRow( strconv.Itoa(int(decision.ID)), scopeAndValue, @@ -76,6 +83,7 @@ func alertDecisionsTable(out io.Writer, alert *models.Alert) { alert.CreatedAt, ) } + if foundActive { fmt.Printf(" - Active Decisions :\n") t.Render() // Send output diff --git a/cmd/crowdsec-cli/capi.go b/cmd/crowdsec-cli/capi.go index f5940d71b66..7b1613c5463 100644 --- a/cmd/crowdsec-cli/capi.go +++ b/cmd/crowdsec-cli/capi.go @@ -48,11 +48,7 @@ func (cli *cliCapi) NewCommand() *cobra.Command { return err } - if err := require.CAPI(cfg); err != nil { - return err - } - - return nil + return require.CAPI(cfg) }, } @@ -147,9 +143,7 @@ func (cli *cliCapi) newRegisterCmd() *cobra.Command { cmd.Flags().StringVarP(&outputFile, "file", "f", "", "output file destination") cmd.Flags().StringVar(&capiUserPrefix, "schmilblick", "", "set a schmilblick (use in tests only)") - if err := cmd.Flags().MarkHidden("schmilblick"); err != nil { - log.Fatalf("failed to hide flag: %s", err) - } + _ = cmd.Flags().MarkHidden("schmilblick") return cmd } diff --git a/cmd/crowdsec-cli/decisions_import.go b/cmd/crowdsec-cli/decisions_import.go index 8c36bd5dc92..1df9d1ae4a1 100644 --- a/cmd/crowdsec-cli/decisions_import.go +++ b/cmd/crowdsec-cli/decisions_import.go @@ -46,7 +46,7 @@ func parseDecisionList(content []byte, format string) ([]decisionRaw, error) { } if err := scanner.Err(); err != nil { - return nil, fmt.Errorf("unable to parse values: '%s'", err) + return nil, fmt.Errorf("unable to parse values: '%w'", err) } case "json": log.Infof("Parsing json") @@ -58,7 +58,7 @@ func parseDecisionList(content []byte, format string) ([]decisionRaw, error) { log.Infof("Parsing csv") if err := csvutil.Unmarshal(content, &ret); err != nil { - return nil, fmt.Errorf("unable to parse csv: '%s'", err) + return nil, fmt.Errorf("unable to parse csv: '%w'", err) } default: return nil, fmt.Errorf("invalid format '%s', expected one of 'json', 'csv', 'values'", format) @@ -67,8 +67,7 @@ func parseDecisionList(content []byte, format string) ([]decisionRaw, error) { return ret, nil } - -func (cli *cliDecisions) runImport(cmd *cobra.Command, args []string) error { +func (cli *cliDecisions) runImport(cmd *cobra.Command, args []string) error { flags := cmd.Flags() input, err := flags.GetString("input") @@ -146,13 +145,13 @@ func (cli *cliDecisions) runImport(cmd *cobra.Command, args []string) error { } else { fin, err = os.Open(input) if err != nil { - return fmt.Errorf("unable to open %s: %s", input, err) + return fmt.Errorf("unable to open %s: %w", input, err) } } content, err = io.ReadAll(fin) if err != nil { - return fmt.Errorf("unable to read from %s: %s", input, err) + return fmt.Errorf("unable to read from %s: %w", input, err) } decisionsListRaw, err := parseDecisionList(content, format) @@ -243,7 +242,7 @@ func (cli *cliDecisions) newImportCmd() *cobra.Command { Long: "expected format:\n" + "csv : any of duration,reason,scope,type,value, with a header line\n" + "json :" + "`{" + `"duration" : "24h", "reason" : "my_scenario", "scope" : "ip", "type" : "ban", "value" : "x.y.z.z"` + "}`", - Args: cobra.NoArgs, + Args: cobra.NoArgs, DisableAutoGenTag: true, Example: `decisions.csv: duration,scope,value diff --git a/cmd/crowdsec-cli/doc.go b/cmd/crowdsec-cli/doc.go index 4b1d50d1583..db1e642115e 100644 --- a/cmd/crowdsec-cli/doc.go +++ b/cmd/crowdsec-cli/doc.go @@ -24,7 +24,7 @@ func (cli cliDoc) NewCommand(rootCmd *cobra.Command) *cobra.Command { DisableAutoGenTag: true, RunE: func(_ *cobra.Command, _ []string) error { if err := doc.GenMarkdownTreeCustom(rootCmd, "./doc/", cli.filePrepender, cli.linkHandler); err != nil { - return fmt.Errorf("failed to generate cobra doc: %s", err) + return fmt.Errorf("failed to generate cobra doc: %w", err) } return nil }, diff --git a/cmd/crowdsec-cli/flag.go b/cmd/crowdsec-cli/flag.go index e85f33d4467..1780d08e5f7 100644 --- a/cmd/crowdsec-cli/flag.go +++ b/cmd/crowdsec-cli/flag.go @@ -9,21 +9,21 @@ import ( type MachinePassword string func (p *MachinePassword) String() string { - return string(*p) + return string(*p) } func (p *MachinePassword) Set(v string) error { // a password can't be more than 72 characters // due to bcrypt limitations - if len(v) > 72 { - return errors.New("password too long (max 72 characters)") - } + if len(v) > 72 { + return errors.New("password too long (max 72 characters)") + } - *p = MachinePassword(v) + *p = MachinePassword(v) - return nil + return nil } func (p *MachinePassword) Type() string { - return "string" + return "string" } diff --git a/cmd/crowdsec-cli/hub.go b/cmd/crowdsec-cli/hub.go index 62eb894d8f2..318dd018e06 100644 --- a/cmd/crowdsec-cli/hub.go +++ b/cmd/crowdsec-cli/hub.go @@ -13,7 +13,7 @@ import ( "github.com/crowdsecurity/crowdsec/pkg/cwhub" ) -type cliHub struct{ +type cliHub struct { cfg configGetter } diff --git a/cmd/crowdsec-cli/itemcli.go b/cmd/crowdsec-cli/itemcli.go index a72cfa735c5..e1a908f3492 100644 --- a/cmd/crowdsec-cli/itemcli.go +++ b/cmd/crowdsec-cli/itemcli.go @@ -44,8 +44,8 @@ type cliItem struct { func (cli cliItem) NewCommand() *cobra.Command { cmd := &cobra.Command{ - Use: coalesce.String(cli.help.use, fmt.Sprintf("%s [item]...", cli.name)), - Short: coalesce.String(cli.help.short, fmt.Sprintf("Manage hub %s", cli.name)), + Use: coalesce.String(cli.help.use, cli.name+" [item]..."), + Short: coalesce.String(cli.help.short, "Manage hub "+cli.name), Long: cli.help.long, Example: cli.help.example, Args: cobra.MinimumNArgs(1), @@ -106,7 +106,7 @@ func (cli cliItem) newInstallCmd() *cobra.Command { cmd := &cobra.Command{ Use: coalesce.String(cli.installHelp.use, "install [item]..."), - Short: coalesce.String(cli.installHelp.short, fmt.Sprintf("Install given %s", cli.oneOrMore)), + Short: coalesce.String(cli.installHelp.short, "Install given "+cli.oneOrMore), Long: coalesce.String(cli.installHelp.long, fmt.Sprintf("Fetch and install one or more %s from the hub", cli.name)), Example: cli.installHelp.example, Args: cobra.MinimumNArgs(1), @@ -122,7 +122,7 @@ func (cli cliItem) newInstallCmd() *cobra.Command { flags := cmd.Flags() flags.BoolVarP(&downloadOnly, "download-only", "d", false, "Only download packages, don't enable") flags.BoolVar(&force, "force", false, "Force install: overwrite tainted and outdated files") - flags.BoolVar(&ignoreError, "ignore", false, fmt.Sprintf("Ignore errors when installing multiple %s", cli.name)) + flags.BoolVar(&ignoreError, "ignore", false, "Ignore errors when installing multiple "+cli.name) return cmd } @@ -232,8 +232,8 @@ func (cli cliItem) newRemoveCmd() *cobra.Command { cmd := &cobra.Command{ Use: coalesce.String(cli.removeHelp.use, "remove [item]..."), - Short: coalesce.String(cli.removeHelp.short, fmt.Sprintf("Remove given %s", cli.oneOrMore)), - Long: coalesce.String(cli.removeHelp.long, fmt.Sprintf("Remove one or more %s", cli.name)), + Short: coalesce.String(cli.removeHelp.short, "Remove given "+cli.oneOrMore), + Long: coalesce.String(cli.removeHelp.long, "Remove one or more "+cli.name), Example: cli.removeHelp.example, Aliases: []string{"delete"}, DisableAutoGenTag: true, @@ -248,7 +248,7 @@ func (cli cliItem) newRemoveCmd() *cobra.Command { flags := cmd.Flags() flags.BoolVar(&purge, "purge", false, "Delete source file too") flags.BoolVar(&force, "force", false, "Force remove: remove tainted and outdated files") - flags.BoolVar(&all, "all", false, fmt.Sprintf("Remove all the %s", cli.name)) + flags.BoolVar(&all, "all", false, "Remove all the "+cli.name) return cmd } @@ -328,7 +328,7 @@ func (cli cliItem) newUpgradeCmd() *cobra.Command { cmd := &cobra.Command{ Use: coalesce.String(cli.upgradeHelp.use, "upgrade [item]..."), - Short: coalesce.String(cli.upgradeHelp.short, fmt.Sprintf("Upgrade given %s", cli.oneOrMore)), + Short: coalesce.String(cli.upgradeHelp.short, "Upgrade given "+cli.oneOrMore), Long: coalesce.String(cli.upgradeHelp.long, fmt.Sprintf("Fetch and upgrade one or more %s from the hub", cli.name)), Example: cli.upgradeHelp.example, DisableAutoGenTag: true, @@ -341,7 +341,7 @@ func (cli cliItem) newUpgradeCmd() *cobra.Command { } flags := cmd.Flags() - flags.BoolVarP(&all, "all", "a", false, fmt.Sprintf("Upgrade all the %s", cli.name)) + flags.BoolVarP(&all, "all", "a", false, "Upgrade all the "+cli.name) flags.BoolVar(&force, "force", false, "Force upgrade: overwrite tainted and outdated files") return cmd @@ -405,8 +405,8 @@ func (cli cliItem) newInspectCmd() *cobra.Command { cmd := &cobra.Command{ Use: coalesce.String(cli.inspectHelp.use, "inspect [item]..."), - Short: coalesce.String(cli.inspectHelp.short, fmt.Sprintf("Inspect given %s", cli.oneOrMore)), - Long: coalesce.String(cli.inspectHelp.long, fmt.Sprintf("Inspect the state of one or more %s", cli.name)), + Short: coalesce.String(cli.inspectHelp.short, "Inspect given "+cli.oneOrMore), + Long: coalesce.String(cli.inspectHelp.long, "Inspect the state of one or more "+cli.name), Example: cli.inspectHelp.example, Args: cobra.MinimumNArgs(1), DisableAutoGenTag: true, @@ -454,8 +454,8 @@ func (cli cliItem) newListCmd() *cobra.Command { cmd := &cobra.Command{ Use: coalesce.String(cli.listHelp.use, "list [item... | -a]"), - Short: coalesce.String(cli.listHelp.short, fmt.Sprintf("List %s", cli.oneOrMore)), - Long: coalesce.String(cli.listHelp.long, fmt.Sprintf("List of installed/available/specified %s", cli.name)), + Short: coalesce.String(cli.listHelp.short, "List "+cli.oneOrMore), + Long: coalesce.String(cli.listHelp.long, "List of installed/available/specified "+cli.name), Example: cli.listHelp.example, DisableAutoGenTag: true, RunE: func(_ *cobra.Command, args []string) error { diff --git a/cmd/crowdsec-cli/main.go b/cmd/crowdsec-cli/main.go index 622fe50a09d..a712960810a 100644 --- a/cmd/crowdsec-cli/main.go +++ b/cmd/crowdsec-cli/main.go @@ -98,14 +98,14 @@ func loadConfigFor(command string) (*csconfig.Config, string, error) { } // initialize is called before the subcommand is executed. -func (cli *cliRoot) initialize() { +func (cli *cliRoot) initialize() error { var err error log.SetLevel(cli.wantedLogLevel()) csConfig, mergedConfig, err = loadConfigFor(os.Args[1]) if err != nil { - log.Fatal(err) + return err } // recap of the enabled feature flags, because logging @@ -127,7 +127,7 @@ func (cli *cliRoot) initialize() { } if csConfig.Cscli.Output != "human" && csConfig.Cscli.Output != "json" && csConfig.Cscli.Output != "raw" { - log.Fatalf("output format '%s' not supported: must be one of human, json, raw", csConfig.Cscli.Output) + return fmt.Errorf("output format '%s' not supported: must be one of human, json, raw", csConfig.Cscli.Output) } log.SetFormatter(&log.TextFormatter{DisableTimestamp: true}) @@ -143,9 +143,11 @@ func (cli *cliRoot) initialize() { csConfig.Cscli.Color = cli.outputColor if cli.outputColor != "yes" && cli.outputColor != "no" && cli.outputColor != "auto" { - log.Fatalf("output color %s unknown", cli.outputColor) + return fmt.Errorf("output color '%s' not supported: must be one of yes, no, auto", cli.outputColor) } } + + return nil } // list of valid subcommands for the shell completion @@ -174,17 +176,17 @@ func (cli *cliRoot) colorize(cmd *cobra.Command) { cmd.SetOut(color.Output) } -func (cli *cliRoot) NewCommand() *cobra.Command { +func (cli *cliRoot) NewCommand() (*cobra.Command, error) { // set the formatter asap and worry about level later logFormatter := &log.TextFormatter{TimestampFormat: time.RFC3339, FullTimestamp: true} log.SetFormatter(logFormatter) if err := fflag.RegisterAllFeatures(); err != nil { - log.Fatalf("failed to register features: %s", err) + return nil, fmt.Errorf("failed to register features: %w", err) } if err := csconfig.LoadFeatureFlagsEnv(log.StandardLogger()); err != nil { - log.Fatalf("failed to set feature flags from env: %s", err) + return nil, fmt.Errorf("failed to set feature flags from env: %w", err) } cmd := &cobra.Command{ @@ -217,9 +219,7 @@ It is meant to allow you to manage bans, parsers/scenarios/etc, api and generall pflags.BoolVar(&cli.logTrace, "trace", false, "Set logging to trace") pflags.StringVar(&cli.flagBranch, "branch", "", "Override hub branch on github") - if err := pflags.MarkHidden("branch"); err != nil { - log.Fatalf("failed to hide flag: %s", err) - } + _ = pflags.MarkHidden("branch") // Look for "-c /path/to/config.yaml" // This duplicates the logic in cobra, but we need to do it before @@ -233,11 +233,17 @@ It is meant to allow you to manage bans, parsers/scenarios/etc, api and generall } if err := csconfig.LoadFeatureFlagsFile(ConfigFilePath, log.StandardLogger()); err != nil { - log.Fatal(err) + return nil, err } if len(os.Args) > 1 { - cobra.OnInitialize(cli.initialize) + cobra.OnInitialize( + func() { + if err := cli.initialize(); err != nil { + log.Fatal(err) + } + }, + ) } cmd.AddCommand(NewCLIDoc().NewCommand(cmd)) @@ -272,11 +278,15 @@ It is meant to allow you to manage bans, parsers/scenarios/etc, api and generall cmd.AddCommand(NewCLISetup(cli.cfg).NewCommand()) } - return cmd + return cmd, nil } func main() { - cmd := newCliRoot().NewCommand() + cmd, err := newCliRoot().NewCommand() + if err != nil { + log.Fatal(err) + } + if err := cmd.Execute(); err != nil { log.Fatal(err) } diff --git a/cmd/crowdsec-cli/notifications.go b/cmd/crowdsec-cli/notifications.go index 84dd6b941c9..d678bf0e306 100644 --- a/cmd/crowdsec-cli/notifications.go +++ b/cmd/crowdsec-cli/notifications.go @@ -250,12 +250,15 @@ func (cli *cliNotifications) notificationConfigFilter(cmd *cobra.Command, args [ if err != nil { return nil, cobra.ShellCompDirectiveError } + var ret []string + for k := range ncfgs { if strings.Contains(k, toComplete) && !slices.Contains(args, k) { ret = append(ret, k) } } + return ret, cobra.ShellCompDirectiveNoFileComp } diff --git a/cmd/crowdsec-cli/papi.go b/cmd/crowdsec-cli/papi.go index 558409b2d4d..bea03c12848 100644 --- a/cmd/crowdsec-cli/papi.go +++ b/cmd/crowdsec-cli/papi.go @@ -142,7 +142,7 @@ func (cli *cliPapi) NewSyncCmd() *cobra.Command { apic.Shutdown() papi.Shutdown() t.Wait() - time.Sleep(5 * time.Second) //FIXME: the push done by apic.Push is run inside a sub goroutine, sleep to make sure it's done + time.Sleep(5 * time.Second) // FIXME: the push done by apic.Push is run inside a sub goroutine, sleep to make sure it's done return nil }, From 7313d491450974f8595db6afe209a2bcccddb70a Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Tue, 4 Jun 2024 09:47:25 +0200 Subject: [PATCH 156/581] enable linter: revive(bool-literal-in-expr) (#2983) --- .golangci.yml | 2 -- pkg/acquisition/modules/docker/docker_test.go | 4 ++-- pkg/dumps/parser_dump.go | 2 +- pkg/leakybucket/buckets_test.go | 2 +- pkg/leakybucket/overflows.go | 2 +- pkg/parser/node_test.go | 8 ++++---- pkg/parser/parsing_test.go | 4 ++-- 7 files changed, 11 insertions(+), 13 deletions(-) diff --git a/.golangci.yml b/.golangci.yml index 34393fce56e..61278185633 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -120,8 +120,6 @@ linters-settings: disabled: true - name: blank-imports disabled: true - - name: bool-literal-in-expr - disabled: true - name: cognitive-complexity disabled: true - name: comment-spacings diff --git a/pkg/acquisition/modules/docker/docker_test.go b/pkg/acquisition/modules/docker/docker_test.go index c2624229afd..bcf0966a2d1 100644 --- a/pkg/acquisition/modules/docker/docker_test.go +++ b/pkg/acquisition/modules/docker/docker_test.go @@ -228,7 +228,7 @@ container_name_regexp: } func (cli *mockDockerCli) ContainerList(ctx context.Context, options dockerTypes.ContainerListOptions) ([]dockerTypes.Container, error) { - if readLogs == true { + if readLogs { return []dockerTypes.Container{}, nil } @@ -243,7 +243,7 @@ func (cli *mockDockerCli) ContainerList(ctx context.Context, options dockerTypes } func (cli *mockDockerCli) ContainerLogs(ctx context.Context, container string, options dockerTypes.ContainerLogsOptions) (io.ReadCloser, error) { - if readLogs == true { + if readLogs { return io.NopCloser(strings.NewReader("")), nil } diff --git a/pkg/dumps/parser_dump.go b/pkg/dumps/parser_dump.go index 7d4e09ecf1a..d43f3cdc1b9 100644 --- a/pkg/dumps/parser_dump.go +++ b/pkg/dumps/parser_dump.go @@ -235,7 +235,7 @@ func (t *tree) displayResults(opts DumpOpts) { case "update": detailsDisplay += fmt.Sprintf("\t%s\t\t%s %s evt.%s : %s -> %s\n", presep, sep, change.Type, strings.Join(change.Path, "."), change.From, yellow(change.To)) - if change.Path[0] == "Whitelisted" && change.To == true { + if change.Path[0] == "Whitelisted" && change.To == true { //nolint:revive whitelisted = true if whitelistReason == "" { diff --git a/pkg/leakybucket/buckets_test.go b/pkg/leakybucket/buckets_test.go index 9e7205e8613..ad0d1a79879 100644 --- a/pkg/leakybucket/buckets_test.go +++ b/pkg/leakybucket/buckets_test.go @@ -88,7 +88,7 @@ func TestBucket(t *testing.T) { // we want to avoid the death of the tomb because all existing buckets have been destroyed. func watchTomb(tomb *tomb.Tomb) { for { - if tomb.Alive() == false { + if !tomb.Alive() { log.Warning("Tomb is dead") break } diff --git a/pkg/leakybucket/overflows.go b/pkg/leakybucket/overflows.go index 8092ef35e77..3ee067177ef 100644 --- a/pkg/leakybucket/overflows.go +++ b/pkg/leakybucket/overflows.go @@ -341,7 +341,7 @@ func NewAlert(leaky *Leaky, queue *types.Queue) (types.RuntimeAlert, error) { srcCopy := srcValue newApiAlert.Source = &srcCopy - if v, ok := leaky.BucketConfig.Labels["remediation"]; ok && v == true { + if v, ok := leaky.BucketConfig.Labels["remediation"]; ok && v == true { //nolint:revive newApiAlert.Remediation = true } diff --git a/pkg/parser/node_test.go b/pkg/parser/node_test.go index be12176bff9..76d35a9ffb0 100644 --- a/pkg/parser/node_test.go +++ b/pkg/parser/node_test.go @@ -49,18 +49,18 @@ func TestParserConfigs(t *testing.T) { } for idx := range CfgTests { err := CfgTests[idx].NodeCfg.compile(pctx, EnricherCtx{}) - if CfgTests[idx].Compiles == true && err != nil { + if CfgTests[idx].Compiles && err != nil { t.Fatalf("Compile: (%d/%d) expected valid, got : %s", idx+1, len(CfgTests), err) } - if CfgTests[idx].Compiles == false && err == nil { + if !CfgTests[idx].Compiles && err == nil { t.Fatalf("Compile: (%d/%d) expected error", idx+1, len(CfgTests)) } err = CfgTests[idx].NodeCfg.validate(EnricherCtx{}) - if CfgTests[idx].Valid == true && err != nil { + if CfgTests[idx].Valid && err != nil { t.Fatalf("Valid: (%d/%d) expected valid, got : %s", idx+1, len(CfgTests), err) } - if CfgTests[idx].Valid == false && err == nil { + if !CfgTests[idx].Valid && err == nil { t.Fatalf("Valid: (%d/%d) expected error", idx+1, len(CfgTests)) } } diff --git a/pkg/parser/parsing_test.go b/pkg/parser/parsing_test.go index d97dce8d20f..902a3cccbc7 100644 --- a/pkg/parser/parsing_test.go +++ b/pkg/parser/parsing_test.go @@ -130,7 +130,7 @@ func testOneParser(pctx *UnixParserCtx, ectx EnricherCtx, dir string, b *testing b.ResetTimer() } for n := 0; n < count; n++ { - if testFile(tests, *pctx, pnodes) != true { + if !testFile(tests, *pctx, pnodes) { return errors.New("test failed !") } } @@ -308,7 +308,7 @@ reCheck: for ridx, result := range results { for eidx, expected := range testSet.Results { explain, match := matchEvent(expected, result, debug) - if match == true { + if match { log.Infof("expected %d/%d matches result %d/%d", eidx, len(testSet.Results), ridx, len(results)) if len(explain) > 0 { log.Printf("-> %s", explain[len(explain)-1]) From fba4237fbd2fdbe4751b6880b95001af31c2ace0 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Tue, 4 Jun 2024 09:48:21 +0200 Subject: [PATCH 157/581] lint: revive/useless-break (#3050) --- cmd/crowdsec/parse.go | 5 +---- pkg/csconfig/simulation.go | 3 +-- pkg/csplugin/broker.go | 3 +-- 3 files changed, 3 insertions(+), 8 deletions(-) diff --git a/cmd/crowdsec/parse.go b/cmd/crowdsec/parse.go index 53c9ee65d4f..39eedfc858e 100644 --- a/cmd/crowdsec/parse.go +++ b/cmd/crowdsec/parse.go @@ -11,12 +11,11 @@ import ( ) func runParse(input chan types.Event, output chan types.Event, parserCTX parser.UnixParserCtx, nodes []parser.Node) error { -LOOP: for { select { case <-parsersTomb.Dying(): log.Infof("Killing parser routines") - break LOOP + return nil case event := <-input: if !event.Process { continue @@ -55,6 +54,4 @@ LOOP: output <- parsed } } - - return nil } diff --git a/pkg/csconfig/simulation.go b/pkg/csconfig/simulation.go index bf121ef56f9..947b47e3c1e 100644 --- a/pkg/csconfig/simulation.go +++ b/pkg/csconfig/simulation.go @@ -26,8 +26,7 @@ func (s *SimulationConfig) IsSimulated(scenario string) bool { for _, excluded := range s.Exclusions { if excluded == scenario { - simulated = !simulated - break + return !simulated } } diff --git a/pkg/csplugin/broker.go b/pkg/csplugin/broker.go index b5c86f224ab..f6629b2609e 100644 --- a/pkg/csplugin/broker.go +++ b/pkg/csplugin/broker.go @@ -103,7 +103,6 @@ func (pb *PluginBroker) Kill() { func (pb *PluginBroker) Run(pluginTomb *tomb.Tomb) { //we get signaled via the channel when notifications need to be delivered to plugin (via the watcher) pb.watcher.Start(&tomb.Tomb{}) -loop: for { select { case profileAlert := <-pb.PluginChannel: @@ -137,7 +136,7 @@ loop: case <-pb.watcher.tomb.Dead(): log.Info("killing all plugins") pb.Kill() - break loop + return case pluginName := <-pb.watcher.PluginEvents: // this can be run in goroutine, but then locks will be needed pluginMutex.Lock() From 9101de95d8f88df8a3cc77cdedeb062fa167040e Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Tue, 4 Jun 2024 13:19:46 +0200 Subject: [PATCH 158/581] pkg/database: refactor & rename "soft delete" to "expire" (#3025) * pkg/database: replace parameter with explicit methods * merge methods with recusion * rename "soft delete" -> "expire" * lint --- pkg/apiserver/apic.go | 8 +- pkg/apiserver/controllers/v1/decisions.go | 4 +- pkg/apiserver/papi_cmd.go | 29 +++-- pkg/database/alerts.go | 12 +- pkg/database/decisions.go | 147 ++++++++++++++-------- 5 files changed, 124 insertions(+), 76 deletions(-) diff --git a/pkg/apiserver/apic.go b/pkg/apiserver/apic.go index b6c560c0a5d..68dc94367e2 100644 --- a/pkg/apiserver/apic.go +++ b/pkg/apiserver/apic.go @@ -432,9 +432,9 @@ func (a *apic) HandleDeletedDecisions(deletedDecisions []*models.Decision, delet filter["scopes"] = []string{*decision.Scope} } - dbCliRet, _, err := a.dbClient.SoftDeleteDecisionsWithFilter(filter) + dbCliRet, _, err := a.dbClient.ExpireDecisionsWithFilter(filter) if err != nil { - return 0, fmt.Errorf("deleting decisions error: %w", err) + return 0, fmt.Errorf("expiring decisions error: %w", err) } dbCliDel, err := strconv.Atoi(dbCliRet) @@ -464,9 +464,9 @@ func (a *apic) HandleDeletedDecisionsV3(deletedDecisions []*modelscapi.GetDecisi filter["scopes"] = []string{*scope} } - dbCliRet, _, err := a.dbClient.SoftDeleteDecisionsWithFilter(filter) + dbCliRet, _, err := a.dbClient.ExpireDecisionsWithFilter(filter) if err != nil { - return 0, fmt.Errorf("deleting decisions error: %w", err) + return 0, fmt.Errorf("expiring decisions error: %w", err) } dbCliDel, err := strconv.Atoi(dbCliRet) diff --git a/pkg/apiserver/controllers/v1/decisions.go b/pkg/apiserver/controllers/v1/decisions.go index 5cc7628458c..543c832095a 100644 --- a/pkg/apiserver/controllers/v1/decisions.go +++ b/pkg/apiserver/controllers/v1/decisions.go @@ -91,7 +91,7 @@ func (c *Controller) DeleteDecisionById(gctx *gin.Context) { return } - nbDeleted, deletedFromDB, err := c.DBClient.SoftDeleteDecisionByID(decisionID) + nbDeleted, deletedFromDB, err := c.DBClient.ExpireDecisionByID(decisionID) if err != nil { c.HandleDBErrors(gctx, err) @@ -113,7 +113,7 @@ func (c *Controller) DeleteDecisionById(gctx *gin.Context) { } func (c *Controller) DeleteDecisions(gctx *gin.Context) { - nbDeleted, deletedFromDB, err := c.DBClient.SoftDeleteDecisionsWithFilter(gctx.Request.URL.Query()) + nbDeleted, deletedFromDB, err := c.DBClient.ExpireDecisionsWithFilter(gctx.Request.URL.Query()) if err != nil { c.HandleDBErrors(gctx, err) diff --git a/pkg/apiserver/papi_cmd.go b/pkg/apiserver/papi_cmd.go index fb76223b9a0..a1137161698 100644 --- a/pkg/apiserver/papi_cmd.go +++ b/pkg/apiserver/papi_cmd.go @@ -63,10 +63,10 @@ func DecisionCmd(message *Message, p *Papi, sync bool) error { filter := make(map[string][]string) filter["uuid"] = UUIDs - _, deletedDecisions, err := p.DBClient.SoftDeleteDecisionsWithFilter(filter) + _, deletedDecisions, err := p.DBClient.ExpireDecisionsWithFilter(filter) if err != nil { - return fmt.Errorf("unable to delete decisions %+v: %w", UUIDs, err) + return fmt.Errorf("unable to expire decisions %+v: %w", UUIDs, err) } decisions := make([]*models.Decision, 0) @@ -130,12 +130,13 @@ func AlertCmd(message *Message, p *Papi, sync bool) error { alert.Scenario = ptr.Of("") alert.Source = &models.Source{} - //if we're setting Source.Scope to types.ConsoleOrigin, it messes up the alert's value + // if we're setting Source.Scope to types.ConsoleOrigin, it messes up the alert's value if len(alert.Decisions) >= 1 { alert.Source.Scope = alert.Decisions[0].Scope alert.Source.Value = alert.Decisions[0].Value } else { log.Warningf("No decision found in alert for Polling API (%s : %s)", message.Header.Source.User, message.Header.Message) + alert.Source.Scope = ptr.Of(types.ConsoleOrigin) alert.Source.Value = &message.Header.Source.User } @@ -150,7 +151,7 @@ func AlertCmd(message *Message, p *Papi, sync bool) error { log.Infof("Adding decision for '%s' with UUID: %s", *decision.Value, decision.UUID) } - //use a different method : alert and/or decision might already be partially present in the database + // use a different method: alert and/or decision might already be partially present in the database _, err = p.DBClient.CreateOrUpdateAlert("", alert) if err != nil { log.Errorf("Failed to create alerts in DB: %s", err) @@ -172,31 +173,33 @@ func ManagementCmd(message *Message, p *Papi, sync bool) error { } switch message.Header.OperationCmd { - case "blocklist_unsubscribe": data, err := json.Marshal(message.Data) if err != nil { return err } + unsubscribeMsg := listUnsubscribe{} if err := json.Unmarshal(data, &unsubscribeMsg); err != nil { - return fmt.Errorf("message for '%s' contains bad data format: %s", message.Header.OperationType, err) + return fmt.Errorf("message for '%s' contains bad data format: %w", message.Header.OperationType, err) } + if unsubscribeMsg.Name == "" { return fmt.Errorf("message for '%s' contains bad data format: missing blocklist name", message.Header.OperationType) } + p.Logger.Infof("Received blocklist_unsubscribe command from PAPI, unsubscribing from blocklist %s", unsubscribeMsg.Name) filter := make(map[string][]string) filter["origin"] = []string{types.ListOrigin} filter["scenario"] = []string{unsubscribeMsg.Name} - _, deletedDecisions, err := p.DBClient.SoftDeleteDecisionsWithFilter(filter) + _, deletedDecisions, err := p.DBClient.ExpireDecisionsWithFilter(filter) if err != nil { - return fmt.Errorf("unable to delete decisions for list %s : %w", unsubscribeMsg.Name, err) + return fmt.Errorf("unable to expire decisions for list %s : %w", unsubscribeMsg.Name, err) } - p.Logger.Infof("deleted %d decisions for list %s", len(deletedDecisions), unsubscribeMsg.Name) + p.Logger.Infof("deleted %d decisions for list %s", len(deletedDecisions), unsubscribeMsg.Name) case "reauth": p.Logger.Infof("Received reauth command from PAPI, resetting token") p.apiClient.GetClient().Transport.(*apiclient.JWTTransport).ResetToken() @@ -205,19 +208,23 @@ func ManagementCmd(message *Message, p *Papi, sync bool) error { if err != nil { return err } + forcePullMsg := forcePull{} + if err := json.Unmarshal(data, &forcePullMsg); err != nil { - return fmt.Errorf("message for '%s' contains bad data format: %s", message.Header.OperationType, err) + return fmt.Errorf("message for '%s' contains bad data format: %w", message.Header.OperationType, err) } if forcePullMsg.Blocklist == nil { p.Logger.Infof("Received force_pull command from PAPI, pulling community and 3rd-party blocklists") + err = p.apic.PullTop(true) if err != nil { - return fmt.Errorf("failed to force pull operation: %s", err) + return fmt.Errorf("failed to force pull operation: %w", err) } } else { p.Logger.Infof("Received force_pull command from PAPI, pulling blocklist %s", forcePullMsg.Blocklist.Name) + err = p.apic.PullBlocklist(&modelscapi.BlocklistLink{ Name: &forcePullMsg.Blocklist.Name, URL: &forcePullMsg.Blocklist.Url, diff --git a/pkg/database/alerts.go b/pkg/database/alerts.go index 602846eb31a..37e9397709e 100644 --- a/pkg/database/alerts.go +++ b/pkg/database/alerts.go @@ -27,10 +27,10 @@ import ( ) const ( - paginationSize = 100 // used to queryAlert to avoid 'too many SQL variable' - defaultLimit = 100 // default limit of element to returns when query alerts - bulkSize = 50 // bulk size when create alerts - maxLockRetries = 10 // how many times to retry a bulk operation when sqlite3.ErrBusy is encountered + paginationSize = 100 // used to queryAlert to avoid 'too many SQL variable' + defaultLimit = 100 // default limit of element to returns when query alerts + alertCreateBulkSize = 50 // bulk size when create alerts + maxLockRetries = 10 // how many times to retry a bulk operation when sqlite3.ErrBusy is encountered ) func formatAlertCN(source models.Source) string { @@ -796,7 +796,7 @@ func (c *Client) CreateAlert(machineID string, alertList []*models.Alert) ([]str c.Log.Debugf("writing %d items", len(alertList)) - alertChunks := slicetools.Chunks(alertList, bulkSize) + alertChunks := slicetools.Chunks(alertList, alertCreateBulkSize) alertIDs := []string{} for _, alertChunk := range alertChunks { @@ -1117,7 +1117,7 @@ func (c *Client) QueryAlertWithFilter(filter map[string][]string) ([]*ent.Alert, if limit == 0 { limit, err = alerts.Count(c.CTX) if err != nil { - return nil, fmt.Errorf("unable to count nb alerts: %s", err) + return nil, fmt.Errorf("unable to count nb alerts: %w", err) } } diff --git a/pkg/database/decisions.go b/pkg/database/decisions.go index 941fc5c7791..294515d603e 100644 --- a/pkg/database/decisions.go +++ b/pkg/database/decisions.go @@ -17,6 +17,8 @@ import ( "github.com/crowdsecurity/crowdsec/pkg/types" ) +const decisionDeleteBulkSize = 256 // scientifically proven to be the best value for bulk delete + type DecisionsByScenario struct { Scenario string Count int @@ -109,23 +111,25 @@ func BuildDecisionRequestWithFilter(query *ent.DecisionQuery, filter map[string] query = query.Where(decision.IDGT(id)) } } + query, err = applyStartIpEndIpFilter(query, contains, ip_sz, start_ip, start_sfx, end_ip, end_sfx) if err != nil { return nil, fmt.Errorf("fail to apply StartIpEndIpFilter: %w", err) } + return query, nil } + func (c *Client) QueryAllDecisionsWithFilters(filters map[string][]string) ([]*ent.Decision, error) { query := c.Ent.Decision.Query().Where( decision.UntilGT(time.Now().UTC()), ) - //Allow a bouncer to ask for non-deduplicated results + // Allow a bouncer to ask for non-deduplicated results if v, ok := filters["dedup"]; !ok || v[0] != "false" { query = query.Where(longestDecisionForScopeTypeValue) } query, err := BuildDecisionRequestWithFilter(query, filters) - if err != nil { c.Log.Warningf("QueryAllDecisionsWithFilters : %s", err) return []*ent.Decision{}, errors.Wrap(QueryFail, "get all decisions with filters") @@ -138,6 +142,7 @@ func (c *Client) QueryAllDecisionsWithFilters(filters map[string][]string) ([]*e c.Log.Warningf("QueryAllDecisionsWithFilters : %s", err) return []*ent.Decision{}, errors.Wrap(QueryFail, "get all decisions with filters") } + return data, nil } @@ -145,7 +150,7 @@ func (c *Client) QueryExpiredDecisionsWithFilters(filters map[string][]string) ( query := c.Ent.Decision.Query().Where( decision.UntilLT(time.Now().UTC()), ) - //Allow a bouncer to ask for non-deduplicated results + // Allow a bouncer to ask for non-deduplicated results if v, ok := filters["dedup"]; !ok || v[0] != "false" { query = query.Where(longestDecisionForScopeTypeValue) } @@ -158,11 +163,13 @@ func (c *Client) QueryExpiredDecisionsWithFilters(filters map[string][]string) ( c.Log.Warningf("QueryExpiredDecisionsWithFilters : %s", err) return []*ent.Decision{}, errors.Wrap(QueryFail, "get expired decisions with filters") } + data, err := query.All(c.CTX) if err != nil { c.Log.Warningf("QueryExpiredDecisionsWithFilters : %s", err) return []*ent.Decision{}, errors.Wrap(QueryFail, "expired decisions") } + return data, nil } @@ -170,8 +177,8 @@ func (c *Client) QueryDecisionCountByScenario() ([]*DecisionsByScenario, error) query := c.Ent.Decision.Query().Where( decision.UntilGT(time.Now().UTC()), ) - query, err := BuildDecisionRequestWithFilter(query, make(map[string][]string)) + query, err := BuildDecisionRequestWithFilter(query, make(map[string][]string)) if err != nil { c.Log.Warningf("QueryDecisionCountByScenario : %s", err) return nil, errors.Wrap(QueryFail, "count all decisions with filters") @@ -180,7 +187,6 @@ func (c *Client) QueryDecisionCountByScenario() ([]*DecisionsByScenario, error) var r []*DecisionsByScenario err = query.GroupBy(decision.FieldScenario, decision.FieldOrigin, decision.FieldType).Aggregate(ent.Count()).Scan(c.CTX, &r) - if err != nil { c.Log.Warningf("QueryDecisionCountByScenario : %s", err) return nil, errors.Wrap(QueryFail, "count all decisions with filters") @@ -253,10 +259,11 @@ func (c *Client) QueryExpiredDecisionsSinceWithFilters(since time.Time, filters decision.UntilLT(time.Now().UTC()), decision.UntilGT(since), ) - //Allow a bouncer to ask for non-deduplicated results + // Allow a bouncer to ask for non-deduplicated results if v, ok := filters["dedup"]; !ok || v[0] != "false" { query = query.Where(longestDecisionForScopeTypeValue) } + query, err := BuildDecisionRequestWithFilter(query, filters) if err != nil { c.Log.Warningf("QueryExpiredDecisionsSinceWithFilters : %s", err) @@ -309,7 +316,7 @@ func (c *Client) DeleteDecisionById(decisionID int) ([]*ent.Decision, error) { return nil, errors.Wrapf(DeleteFail, "decision with id '%d' doesn't exist", decisionID) } - count, err := c.BulkDeleteDecisions(toDelete, false) + count, err := c.DeleteDecisions(toDelete) c.Log.Debugf("deleted %d decisions", count) return toDelete, err @@ -424,7 +431,7 @@ func (c *Client) DeleteDecisionsWithFilter(filter map[string][]string) (string, return "0", nil, errors.Wrap(DeleteFail, "decisions with provided filter") } - count, err := c.BulkDeleteDecisions(toDelete, false) + count, err := c.DeleteDecisions(toDelete) if err != nil { c.Log.Warningf("While deleting decisions : %s", err) return "0", nil, errors.Wrap(DeleteFail, "decisions with provided filter") @@ -433,8 +440,8 @@ func (c *Client) DeleteDecisionsWithFilter(filter map[string][]string) (string, return strconv.Itoa(count), toDelete, nil } -// SoftDeleteDecisionsWithFilter updates the expiration time to now() for the decisions matching the filter, and returns the updated items -func (c *Client) SoftDeleteDecisionsWithFilter(filter map[string][]string) (string, []*ent.Decision, error) { +// ExpireDecisionsWithFilter updates the expiration time to now() for the decisions matching the filter, and returns the updated items +func (c *Client) ExpireDecisionsWithFilter(filter map[string][]string) (string, []*ent.Decision, error) { var err error var start_ip, start_sfx, end_ip, end_sfx int64 var ip_sz int @@ -545,64 +552,98 @@ func (c *Client) SoftDeleteDecisionsWithFilter(filter map[string][]string) (stri DecisionsToDelete, err := decisions.All(c.CTX) if err != nil { - c.Log.Warningf("SoftDeleteDecisionsWithFilter : %s", err) - return "0", nil, errors.Wrap(DeleteFail, "soft delete decisions with provided filter") + c.Log.Warningf("ExpireDecisionsWithFilter : %s", err) + return "0", nil, errors.Wrap(DeleteFail, "expire decisions with provided filter") } - count, err := c.BulkDeleteDecisions(DecisionsToDelete, true) + count, err := c.ExpireDecisions(DecisionsToDelete) if err != nil { - return "0", nil, errors.Wrapf(DeleteFail, "soft delete decisions with provided filter : %s", err) + return "0", nil, errors.Wrapf(DeleteFail, "expire decisions with provided filter : %s", err) } return strconv.Itoa(count), DecisionsToDelete, err } -// BulkDeleteDecisions sets the expiration of a bulk of decisions to now() or hard deletes them. -// We are doing it this way so we can return impacted decisions for sync with CAPI/PAPI -func (c *Client) BulkDeleteDecisions(decisionsToDelete []*ent.Decision, softDelete bool) (int, error) { - const bulkSize = 256 // scientifically proven to be the best value for bulk delete +func decisionIDs(decisions []*ent.Decision) []int { + ids := make([]int, len(decisions)) + for i, d := range decisions { + ids[i] = d.ID + } - var ( - nbUpdates int - err error - totalUpdates = 0 - ) + return ids +} - idsToDelete := make([]int, len(decisionsToDelete)) - for i, decision := range decisionsToDelete { - idsToDelete[i] = decision.ID +// ExpireDecisions sets the expiration of a list of decisions to now() +// It returns the number of impacted decisions for the CAPI/PAPI +func (c *Client) ExpireDecisions(decisions []*ent.Decision) (int, error) { + if len(decisions) <= decisionDeleteBulkSize { + ids := decisionIDs(decisions) + + rows, err := c.Ent.Decision.Update().Where( + decision.IDIn(ids...), + ).SetUntil(time.Now().UTC()).Save(c.CTX) + if err != nil { + return 0, fmt.Errorf("expire decisions with provided filter: %w", err) + } + + return rows, nil } - for _, chunk := range slicetools.Chunks(idsToDelete, bulkSize) { - if softDelete { - nbUpdates, err = c.Ent.Decision.Update().Where( - decision.IDIn(chunk...), - ).SetUntil(time.Now().UTC()).Save(c.CTX) - if err != nil { - return totalUpdates, fmt.Errorf("soft delete decisions with provided filter: %w", err) - } - } else { - nbUpdates, err = c.Ent.Decision.Delete().Where( - decision.IDIn(chunk...), - ).Exec(c.CTX) - if err != nil { - return totalUpdates, fmt.Errorf("hard delete decisions with provided filter: %w", err) - } + // big batch, let's split it and recurse + + total := 0 + + for _, chunk := range slicetools.Chunks(decisions, decisionDeleteBulkSize) { + rows, err := c.ExpireDecisions(chunk) + if err != nil { + return total, err } - totalUpdates += nbUpdates + total += rows } - return totalUpdates, nil + return total, nil } -// SoftDeleteDecisionByID set the expiration of a decision to now() -func (c *Client) SoftDeleteDecisionByID(decisionID int) (int, []*ent.Decision, error) { +// DeleteDecisions removes a list of decisions from the database +// It returns the number of impacted decisions for the CAPI/PAPI +func (c *Client) DeleteDecisions(decisions []*ent.Decision) (int, error) { + if len(decisions) < decisionDeleteBulkSize { + ids := decisionIDs(decisions) + + rows, err := c.Ent.Decision.Delete().Where( + decision.IDIn(ids...), + ).Exec(c.CTX) + if err != nil { + return 0, fmt.Errorf("hard delete decisions with provided filter: %w", err) + } + + return rows, nil + } + + // big batch, let's split it and recurse + + tot := 0 + + for _, chunk := range slicetools.Chunks(decisions, decisionDeleteBulkSize) { + rows, err := c.DeleteDecisions(chunk) + if err != nil { + return tot, err + } + + tot += rows + } + + return tot, nil +} + +// ExpireDecision set the expiration of a decision to now() +func (c *Client) ExpireDecisionByID(decisionID int) (int, []*ent.Decision, error) { toUpdate, err := c.Ent.Decision.Query().Where(decision.IDEQ(decisionID)).All(c.CTX) // XXX: do we want 500 or 404 here? if err != nil || len(toUpdate) == 0 { - c.Log.Warningf("SoftDeleteDecisionByID : %v (nb soft deleted: %d)", err, len(toUpdate)) + c.Log.Warningf("ExpireDecisionByID : %v (nb expired: %d)", err, len(toUpdate)) return 0, nil, errors.Wrapf(DeleteFail, "decision with id '%d' doesn't exist", decisionID) } @@ -610,7 +651,8 @@ func (c *Client) SoftDeleteDecisionByID(decisionID int) (int, []*ent.Decision, e return 0, nil, ItemNotFound } - count, err := c.BulkDeleteDecisions(toUpdate, true) + count, err := c.ExpireDecisions(toUpdate) + return count, toUpdate, err } @@ -618,8 +660,8 @@ func (c *Client) CountDecisionsByValue(decisionValue string) (int, error) { var err error var start_ip, start_sfx, end_ip, end_sfx int64 var ip_sz, count int - ip_sz, start_ip, start_sfx, end_ip, end_sfx, err = types.Addr2Ints(decisionValue) + ip_sz, start_ip, start_sfx, end_ip, end_sfx, err = types.Addr2Ints(decisionValue) if err != nil { return 0, errors.Wrapf(InvalidIPOrRange, "unable to convert '%s' to int: %s", decisionValue, err) } @@ -644,10 +686,10 @@ func (c *Client) CountActiveDecisionsByValue(decisionValue string) (int, error) var err error var start_ip, start_sfx, end_ip, end_sfx int64 var ip_sz, count int - ip_sz, start_ip, start_sfx, end_ip, end_sfx, err = types.Addr2Ints(decisionValue) + ip_sz, start_ip, start_sfx, end_ip, end_sfx, err = types.Addr2Ints(decisionValue) if err != nil { - return 0, fmt.Errorf("unable to convert '%s' to int: %s", decisionValue, err) + return 0, fmt.Errorf("unable to convert '%s' to int: %w", decisionValue, err) } contains := true @@ -672,10 +714,10 @@ func (c *Client) GetActiveDecisionsTimeLeftByValue(decisionValue string) (time.D var err error var start_ip, start_sfx, end_ip, end_sfx int64 var ip_sz int - ip_sz, start_ip, start_sfx, end_ip, end_sfx, err = types.Addr2Ints(decisionValue) + ip_sz, start_ip, start_sfx, end_ip, end_sfx, err = types.Addr2Ints(decisionValue) if err != nil { - return 0, fmt.Errorf("unable to convert '%s' to int: %s", decisionValue, err) + return 0, fmt.Errorf("unable to convert '%s' to int: %w", decisionValue, err) } contains := true @@ -704,7 +746,6 @@ func (c *Client) GetActiveDecisionsTimeLeftByValue(decisionValue string) (time.D func (c *Client) CountDecisionsSinceByValue(decisionValue string, since time.Time) (int, error) { ip_sz, start_ip, start_sfx, end_ip, end_sfx, err := types.Addr2Ints(decisionValue) - if err != nil { return 0, errors.Wrapf(InvalidIPOrRange, "unable to convert '%s' to int: %s", decisionValue, err) } From d614ecbdcf8dbc88dfbcf505fde0f7de030474f7 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Tue, 4 Jun 2024 15:09:19 +0200 Subject: [PATCH 159/581] CI/bats: test remediation api via socket (#3052) --- test/bats/97_ipv4_single.bats | 19 ++++--------- test/bats/97_ipv6_single.bats | 23 ++++++--------- test/bats/98_ipv4_range.bats | 23 ++++++--------- test/bats/98_ipv6_range.bats | 31 ++++++++------------- test/bats/99_lapi-stream-mode-scenario.bats | 29 ++++++++----------- test/bats/99_lapi-stream-mode-scopes.bats | 15 +++------- test/bats/99_lapi-stream-mode.bats | 15 +++------- test/lib/setup_file.sh | 14 ++++++++++ 8 files changed, 67 insertions(+), 102 deletions(-) diff --git a/test/bats/97_ipv4_single.bats b/test/bats/97_ipv4_single.bats index 1ada1c4646b..f02d9ebd503 100644 --- a/test/bats/97_ipv4_single.bats +++ b/test/bats/97_ipv4_single.bats @@ -9,8 +9,6 @@ setup_file() { ./instance-crowdsec start API_KEY=$(cscli bouncers add testbouncer -o raw) export API_KEY - CROWDSEC_API_URL="http://localhost:8080" - export CROWDSEC_API_URL } teardown_file() { @@ -22,11 +20,6 @@ setup() { if is_db_mysql; then sleep 0.3; fi } -api() { - URI="$1" - curl -s -H "X-Api-Key: ${API_KEY}" "${CROWDSEC_API_URL}${URI}" -} - #---------- @test "cli - first decisions list: must be empty" { @@ -37,7 +30,7 @@ api() { } @test "API - first decisions list: must be empty" { - rune -0 api '/v1/decisions' + rune -0 lapi-get '/v1/decisions' assert_output 'null' } @@ -53,7 +46,7 @@ api() { } @test "API - all decisions" { - rune -0 api '/v1/decisions' + rune -0 lapi-get '/v1/decisions' rune -0 jq -c '[ . | length, .[0].value ]' <(output) assert_output '[1,"1.2.3.4"]' } @@ -67,7 +60,7 @@ api() { } @test "API - decision for 1.2.3.4" { - rune -0 api '/v1/decisions?ip=1.2.3.4' + rune -0 lapi-get '/v1/decisions?ip=1.2.3.4' rune -0 jq -r '.[0].value' <(output) assert_output '1.2.3.4' } @@ -78,7 +71,7 @@ api() { } @test "API - decision for 1.2.3.5" { - rune -0 api '/v1/decisions?ip=1.2.3.5' + rune -0 lapi-get '/v1/decisions?ip=1.2.3.5' assert_output 'null' } @@ -90,7 +83,7 @@ api() { } @test "API - decision for 1.2.3.0/24" { - rune -0 api '/v1/decisions?range=1.2.3.0/24' + rune -0 lapi-get '/v1/decisions?range=1.2.3.0/24' assert_output 'null' } @@ -101,7 +94,7 @@ api() { } @test "API - decisions where IP in 1.2.3.0/24" { - rune -0 api '/v1/decisions?range=1.2.3.0/24&contains=false' + rune -0 lapi-get '/v1/decisions?range=1.2.3.0/24&contains=false' rune -0 jq -r '.[0].value' <(output) assert_output '1.2.3.4' } diff --git a/test/bats/97_ipv6_single.bats b/test/bats/97_ipv6_single.bats index 982976d70ed..3cb1cedd0c2 100644 --- a/test/bats/97_ipv6_single.bats +++ b/test/bats/97_ipv6_single.bats @@ -9,8 +9,6 @@ setup_file() { ./instance-crowdsec start API_KEY=$(cscli bouncers add testbouncer -o raw) export API_KEY - CROWDSEC_API_URL="http://localhost:8080" - export CROWDSEC_API_URL } teardown_file() { @@ -22,11 +20,6 @@ setup() { if is_db_mysql; then sleep 0.5; fi } -api() { - URI="$1" - curl -s -H "X-Api-Key: ${API_KEY}" "${CROWDSEC_API_URL}${URI}" -} - #---------- @test "cli - first decisions list: must be empty" { @@ -48,7 +41,7 @@ api() { } @test "API - all decisions" { - rune -0 api "/v1/decisions" + rune -0 lapi-get "/v1/decisions" rune -0 jq -r '.[].value' <(output) assert_output '1111:2222:3333:4444:5555:6666:7777:8888' } @@ -60,7 +53,7 @@ api() { } @test "API - decisions for ip 1111:2222:3333:4444:5555:6666:7777:888" { - rune -0 api '/v1/decisions?ip=1111:2222:3333:4444:5555:6666:7777:8888' + rune -0 lapi-get '/v1/decisions?ip=1111:2222:3333:4444:5555:6666:7777:8888' rune -0 jq -r '.[].value' <(output) assert_output '1111:2222:3333:4444:5555:6666:7777:8888' } @@ -71,7 +64,7 @@ api() { } @test "API - decisions for ip 1211:2222:3333:4444:5555:6666:7777:888" { - rune -0 api '/v1/decisions?ip=1211:2222:3333:4444:5555:6666:7777:8888' + rune -0 lapi-get '/v1/decisions?ip=1211:2222:3333:4444:5555:6666:7777:8888' assert_output 'null' } @@ -81,7 +74,7 @@ api() { } @test "API - decisions for ip 1111:2222:3333:4444:5555:6666:7777:8887" { - rune -0 api '/v1/decisions?ip=1111:2222:3333:4444:5555:6666:7777:8887' + rune -0 lapi-get '/v1/decisions?ip=1111:2222:3333:4444:5555:6666:7777:8887' assert_output 'null' } @@ -91,7 +84,7 @@ api() { } @test "API - decisions for range 1111:2222:3333:4444:5555:6666:7777:8888/48" { - rune -0 api '/v1/decisions?range=1111:2222:3333:4444:5555:6666:7777:8888/48' + rune -0 lapi-get '/v1/decisions?range=1111:2222:3333:4444:5555:6666:7777:8888/48' assert_output 'null' } @@ -102,7 +95,7 @@ api() { } @test "API - decisions for ip/range in 1111:2222:3333:4444:5555:6666:7777:8888/48" { - rune -0 api '/v1/decisions?range=1111:2222:3333:4444:5555:6666:7777:8888/48&&contains=false' + rune -0 lapi-get '/v1/decisions?range=1111:2222:3333:4444:5555:6666:7777:8888/48&&contains=false' rune -0 jq -r '.[].value' <(output) assert_output '1111:2222:3333:4444:5555:6666:7777:8888' } @@ -113,7 +106,7 @@ api() { } @test "API - decisions for range 1111:2222:3333:4444:5555:6666:7777:8888/64" { - rune -0 api '/v1/decisions?range=1111:2222:3333:4444:5555:6666:7777:8888/64' + rune -0 lapi-get '/v1/decisions?range=1111:2222:3333:4444:5555:6666:7777:8888/64' assert_output 'null' } @@ -124,7 +117,7 @@ api() { } @test "API - decisions for ip/range in 1111:2222:3333:4444:5555:6666:7777:8888/64" { - rune -0 api '/v1/decisions?range=1111:2222:3333:4444:5555:6666:7777:8888/64&&contains=false' + rune -0 lapi-get '/v1/decisions?range=1111:2222:3333:4444:5555:6666:7777:8888/64&&contains=false' rune -0 jq -r '.[].value' <(output) assert_output '1111:2222:3333:4444:5555:6666:7777:8888' } diff --git a/test/bats/98_ipv4_range.bats b/test/bats/98_ipv4_range.bats index b0f6f482944..a58f144b885 100644 --- a/test/bats/98_ipv4_range.bats +++ b/test/bats/98_ipv4_range.bats @@ -9,8 +9,6 @@ setup_file() { ./instance-crowdsec start API_KEY=$(cscli bouncers add testbouncer -o raw) export API_KEY - CROWDSEC_API_URL="http://localhost:8080" - export CROWDSEC_API_URL } teardown_file() { @@ -22,11 +20,6 @@ setup() { if is_db_mysql; then sleep 0.3; fi } -api() { - URI="$1" - curl -s -H "X-Api-Key: ${API_KEY}" "${CROWDSEC_API_URL}${URI}" -} - #---------- @test "cli - first decisions list: must be empty" { @@ -48,7 +41,7 @@ api() { } @test "API - all decisions" { - rune -0 api '/v1/decisions' + rune -0 lapi-get '/v1/decisions' rune -0 jq -r '.[0].value' <(output) assert_output '4.4.4.0/24' } @@ -62,7 +55,7 @@ api() { } @test "API - decisions for ip 4.4.4." { - rune -0 api '/v1/decisions?ip=4.4.4.3' + rune -0 lapi-get '/v1/decisions?ip=4.4.4.3' rune -0 jq -r '.[0].value' <(output) assert_output '4.4.4.0/24' } @@ -73,7 +66,7 @@ api() { } @test "API - decisions for ip contained in 4.4.4." { - rune -0 api '/v1/decisions?ip=4.4.4.4&contains=false' + rune -0 lapi-get '/v1/decisions?ip=4.4.4.4&contains=false' assert_output 'null' } @@ -83,7 +76,7 @@ api() { } @test "API - decisions for ip 5.4.4." { - rune -0 api '/v1/decisions?ip=5.4.4.3' + rune -0 lapi-get '/v1/decisions?ip=5.4.4.3' assert_output 'null' } @@ -93,7 +86,7 @@ api() { } @test "API - decisions for range 4.4.0.0/1" { - rune -0 api '/v1/decisions?range=4.4.0.0/16' + rune -0 lapi-get '/v1/decisions?range=4.4.0.0/16' assert_output 'null' } @@ -104,7 +97,7 @@ api() { } @test "API - decisions for ip/range in 4.4.0.0/1" { - rune -0 api '/v1/decisions?range=4.4.0.0/16&contains=false' + rune -0 lapi-get '/v1/decisions?range=4.4.0.0/16&contains=false' rune -0 jq -r '.[0].value' <(output) assert_output '4.4.4.0/24' } @@ -118,7 +111,7 @@ api() { } @test "API - decisions for range 4.4.4.2/2" { - rune -0 api '/v1/decisions?range=4.4.4.2/28' + rune -0 lapi-get '/v1/decisions?range=4.4.4.2/28' rune -0 jq -r '.[].value' <(output) assert_output '4.4.4.0/24' } @@ -129,6 +122,6 @@ api() { } @test "API - decisions for range 4.4.3.2/2" { - rune -0 api '/v1/decisions?range=4.4.3.2/28' + rune -0 lapi-get '/v1/decisions?range=4.4.3.2/28' assert_output 'null' } diff --git a/test/bats/98_ipv6_range.bats b/test/bats/98_ipv6_range.bats index d3c347583da..065f32b74ba 100644 --- a/test/bats/98_ipv6_range.bats +++ b/test/bats/98_ipv6_range.bats @@ -9,8 +9,6 @@ setup_file() { ./instance-crowdsec start API_KEY=$(cscli bouncers add testbouncer -o raw) export API_KEY - CROWDSEC_API_URL="http://localhost:8080" - export CROWDSEC_API_URL } teardown_file() { @@ -22,11 +20,6 @@ setup() { if is_db_mysql; then sleep 0.3; fi } -api() { - URI="$1" - curl -s -H "X-Api-Key: ${API_KEY}" "${CROWDSEC_API_URL}${URI}" -} - #---------- @test "cli - first decisions list: must be empty" { @@ -48,7 +41,7 @@ api() { } @test "API - all decisions (2)" { - rune -0 api '/v1/decisions' + rune -0 lapi-get '/v1/decisions' rune -0 jq -r '.[].value' <(output) assert_output 'aaaa:2222:3333:4444::/64' } @@ -62,7 +55,7 @@ api() { } @test "API - decisions for ip aaaa:2222:3333:4444:5555:6666:7777:8888" { - rune -0 api '/v1/decisions?ip=aaaa:2222:3333:4444:5555:6666:7777:8888' + rune -0 lapi-get '/v1/decisions?ip=aaaa:2222:3333:4444:5555:6666:7777:8888' rune -0 jq -r '.[].value' <(output) assert_output 'aaaa:2222:3333:4444::/64' } @@ -73,7 +66,7 @@ api() { } @test "API - decisions for ip aaaa:2222:3333:4445:5555:6666:7777:8888" { - rune -0 api '/v1/decisions?ip=aaaa:2222:3333:4445:5555:6666:7777:8888' + rune -0 lapi-get '/v1/decisions?ip=aaaa:2222:3333:4445:5555:6666:7777:8888' assert_output 'null' } @@ -83,7 +76,7 @@ api() { } @test "API - decisions for ip aaa1:2222:3333:4444:5555:6666:7777:8887" { - rune -0 api '/v1/decisions?ip=aaa1:2222:3333:4444:5555:6666:7777:8887' + rune -0 lapi-get '/v1/decisions?ip=aaa1:2222:3333:4444:5555:6666:7777:8887' assert_output 'null' } @@ -96,7 +89,7 @@ api() { } @test "API - decisions for range aaaa:2222:3333:4444:5555::/80" { - rune -0 api '/v1/decisions?range=aaaa:2222:3333:4444:5555::/80' + rune -0 lapi-get '/v1/decisions?range=aaaa:2222:3333:4444:5555::/80' rune -0 jq -r '.[].value' <(output) assert_output 'aaaa:2222:3333:4444::/64' } @@ -108,7 +101,7 @@ api() { } @test "API - decisions for range aaaa:2222:3333:4441:5555::/80" { - rune -0 api '/v1/decisions?range=aaaa:2222:3333:4441:5555::/80' + rune -0 lapi-get '/v1/decisions?range=aaaa:2222:3333:4441:5555::/80' assert_output 'null' } @@ -118,7 +111,7 @@ api() { } @test "API - decisions for range aaa1:2222:3333:4444:5555::/80" { - rune -0 api '/v1/decisions?range=aaa1:2222:3333:4444:5555::/80' + rune -0 lapi-get '/v1/decisions?range=aaa1:2222:3333:4444:5555::/80' assert_output 'null' } @@ -130,7 +123,7 @@ api() { } @test "API - decisions for range aaaa:2222:3333:4444:5555:6666:7777:8888/48" { - rune -0 api '/v1/decisions?range=aaaa:2222:3333:4444:5555:6666:7777:8888/48' + rune -0 lapi-get '/v1/decisions?range=aaaa:2222:3333:4444:5555:6666:7777:8888/48' assert_output 'null' } @@ -141,7 +134,7 @@ api() { } @test "API - decisions for ip/range in aaaa:2222:3333:4444:5555:6666:7777:8888/48" { - rune -0 api '/v1/decisions?range=aaaa:2222:3333:4444:5555:6666:7777:8888/48&contains=false' + rune -0 lapi-get '/v1/decisions?range=aaaa:2222:3333:4444:5555:6666:7777:8888/48&contains=false' rune -0 jq -r '.[].value' <(output) assert_output 'aaaa:2222:3333:4444::/64' } @@ -152,7 +145,7 @@ api() { } @test "API - decisions for ip/range in aaaa:2222:3333:4445:5555:6666:7777:8888/48" { - rune -0 api '/v1/decisions?range=aaaa:2222:3333:4445:5555:6666:7777:8888/48' + rune -0 lapi-get '/v1/decisions?range=aaaa:2222:3333:4445:5555:6666:7777:8888/48' assert_output 'null' } @@ -170,7 +163,7 @@ api() { } @test "API - decisions for ip in bbbb:db8:0000:0000:0000:6fff:ffff:ffff" { - rune -0 api '/v1/decisions?ip=bbbb:db8:0000:0000:0000:6fff:ffff:ffff' + rune -0 lapi-get '/v1/decisions?ip=bbbb:db8:0000:0000:0000:6fff:ffff:ffff' rune -0 jq -r '.[].value' <(output) assert_output 'bbbb:db8::/81' } @@ -181,7 +174,7 @@ api() { } @test "API - decisions for ip in bbbb:db8:0000:0000:0000:8fff:ffff:ffff" { - rune -0 api '/v1/decisions?ip=bbbb:db8:0000:0000:0000:8fff:ffff:ffff' + rune -0 lapi-get '/v1/decisions?ip=bbbb:db8:0000:0000:0000:8fff:ffff:ffff' assert_output 'null' } diff --git a/test/bats/99_lapi-stream-mode-scenario.bats b/test/bats/99_lapi-stream-mode-scenario.bats index 9b4d562f3c9..1cd44c1ae80 100644 --- a/test/bats/99_lapi-stream-mode-scenario.bats +++ b/test/bats/99_lapi-stream-mode-scenario.bats @@ -9,8 +9,6 @@ setup_file() { ./instance-crowdsec start API_KEY=$(cscli bouncers add testbouncer -o raw) export API_KEY - CROWDSEC_API_URL="http://localhost:8080" - export CROWDSEC_API_URL } teardown_file() { @@ -24,11 +22,6 @@ setup() { #---------- -api() { - URI="$1" - curl -s -H "X-Api-Key:${API_KEY}" "${CROWDSEC_API_URL}${URI}" -} - output_new_decisions() { jq -c '.new | map(select(.origin!="CAPI")) | .[] | del(.id) | (.. | .duration?) |= capture("(?[[:digit:]]+h[[:digit:]]+m)").d' <(output) | sort } @@ -62,7 +55,7 @@ output_new_decisions() { } @test "test startup" { - rune -0 api "/v1/decisions/stream?startup=true" + rune -0 lapi-get "/v1/decisions/stream?startup=true" rune -0 output_new_decisions assert_output - <<-EOT {"duration":"2h59m","origin":"test","scenario":"crowdsecurity/test","scope":"Ip","type":"ban","value":"127.0.0.2"} @@ -71,7 +64,7 @@ output_new_decisions() { } @test "test startup with scenarios containing" { - rune -0 api "/v1/decisions/stream?startup=true&scenarios_containing=ssh_bf" + rune -0 lapi-get "/v1/decisions/stream?startup=true&scenarios_containing=ssh_bf" rune -0 output_new_decisions assert_output - <<-EOT {"duration":"2h59m","origin":"another_origin","scenario":"crowdsecurity/ssh_bf","scope":"Ip","type":"ban","value":"127.0.0.1"} @@ -80,7 +73,7 @@ output_new_decisions() { } @test "test startup with multiple scenarios containing" { - rune -0 api "/v1/decisions/stream?startup=true&scenarios_containing=ssh_bf,test" + rune -0 lapi-get "/v1/decisions/stream?startup=true&scenarios_containing=ssh_bf,test" rune -0 output_new_decisions assert_output - <<-EOT {"duration":"2h59m","origin":"another_origin","scenario":"crowdsecurity/ssh_bf","scope":"Ip","type":"ban","value":"127.0.0.1"} @@ -89,12 +82,12 @@ output_new_decisions() { } @test "test startup with unknown scenarios containing" { - rune -0 api "/v1/decisions/stream?startup=true&scenarios_containing=unknown" + rune -0 lapi-get "/v1/decisions/stream?startup=true&scenarios_containing=unknown" assert_output '{"deleted":null,"new":null}' } @test "test startup with scenarios containing and not containing" { - rune -0 api "/v1/decisions/stream?startup=true&scenarios_containing=test&scenarios_not_containing=ssh_bf" + rune -0 lapi-get "/v1/decisions/stream?startup=true&scenarios_containing=test&scenarios_not_containing=ssh_bf" rune -0 output_new_decisions assert_output - <<-EOT {"duration":"2h59m","origin":"test","scenario":"crowdsecurity/test","scope":"Ip","type":"ban","value":"127.0.0.2"} @@ -103,7 +96,7 @@ output_new_decisions() { } @test "test startup with scenarios containing and not containing 2" { - rune -0 api "/v1/decisions/stream?startup=true&scenarios_containing=longest&scenarios_not_containing=ssh_bf,test" + rune -0 lapi-get "/v1/decisions/stream?startup=true&scenarios_containing=longest&scenarios_not_containing=ssh_bf,test" rune -0 output_new_decisions assert_output - <<-EOT {"duration":"4h59m","origin":"test","scenario":"crowdsecurity/longest","scope":"Ip","type":"ban","value":"127.0.0.1"} @@ -111,7 +104,7 @@ output_new_decisions() { } @test "test startup with scenarios not containing" { - rune -0 api "/v1/decisions/stream?startup=true&scenarios_not_containing=ssh_bf" + rune -0 lapi-get "/v1/decisions/stream?startup=true&scenarios_not_containing=ssh_bf" rune -0 output_new_decisions assert_output - <<-EOT {"duration":"2h59m","origin":"test","scenario":"crowdsecurity/test","scope":"Ip","type":"ban","value":"127.0.0.2"} @@ -120,7 +113,7 @@ output_new_decisions() { } @test "test startup with multiple scenarios not containing" { - rune -0 api "/v1/decisions/stream?startup=true&scenarios_not_containing=ssh_bf,test" + rune -0 lapi-get "/v1/decisions/stream?startup=true&scenarios_not_containing=ssh_bf,test" rune -0 output_new_decisions assert_output - <<-EOT {"duration":"4h59m","origin":"test","scenario":"crowdsecurity/longest","scope":"Ip","type":"ban","value":"127.0.0.1"} @@ -128,7 +121,7 @@ output_new_decisions() { } @test "test startup with origins parameter" { - rune -0 api "/v1/decisions/stream?startup=true&origins=another_origin" + rune -0 lapi-get "/v1/decisions/stream?startup=true&origins=another_origin" rune -0 output_new_decisions assert_output - <<-EOT {"duration":"1h59m","origin":"another_origin","scenario":"crowdsecurity/test","scope":"Ip","type":"ban","value":"127.0.0.2"} @@ -137,7 +130,7 @@ output_new_decisions() { } @test "test startup with multiple origins parameter" { - rune -0 api "/v1/decisions/stream?startup=true&origins=another_origin,test" + rune -0 lapi-get "/v1/decisions/stream?startup=true&origins=another_origin,test" rune -0 output_new_decisions assert_output - <<-EOT {"duration":"2h59m","origin":"test","scenario":"crowdsecurity/test","scope":"Ip","type":"ban","value":"127.0.0.2"} @@ -146,7 +139,7 @@ output_new_decisions() { } @test "test startup with unknown origins" { - rune -0 api "/v1/decisions/stream?startup=true&origins=unknown" + rune -0 lapi-get "/v1/decisions/stream?startup=true&origins=unknown" assert_output '{"deleted":null,"new":null}' } diff --git a/test/bats/99_lapi-stream-mode-scopes.bats b/test/bats/99_lapi-stream-mode-scopes.bats index a1d01c489e6..a9ed494e69c 100644 --- a/test/bats/99_lapi-stream-mode-scopes.bats +++ b/test/bats/99_lapi-stream-mode-scopes.bats @@ -9,8 +9,6 @@ setup_file() { ./instance-crowdsec start API_KEY=$(cscli bouncers add testbouncer -o raw) export API_KEY - CROWDSEC_API_URL="http://localhost:8080" - export CROWDSEC_API_URL } teardown_file() { @@ -23,11 +21,6 @@ setup() { #---------- -api() { - URI="$1" - curl -s -H "X-Api-Key: ${API_KEY}" "${CROWDSEC_API_URL}${URI}" -} - @test "adding decisions for multiple scopes" { rune -0 cscli decisions add -i '1.2.3.6' assert_stderr --partial 'Decision successfully added' @@ -36,28 +29,28 @@ api() { } @test "stream start (implicit ip scope)" { - rune -0 api "/v1/decisions/stream?startup=true" + rune -0 lapi-get "/v1/decisions/stream?startup=true" rune -0 jq -r '.new' <(output) assert_output --partial '1.2.3.6' refute_output --partial 'toto' } @test "stream start (explicit ip scope)" { - rune -0 api "/v1/decisions/stream?startup=true&scopes=ip" + rune -0 lapi-get "/v1/decisions/stream?startup=true&scopes=ip" rune -0 jq -r '.new' <(output) assert_output --partial '1.2.3.6' refute_output --partial 'toto' } @test "stream start (user scope)" { - rune -0 api "/v1/decisions/stream?startup=true&scopes=user" + rune -0 lapi-get "/v1/decisions/stream?startup=true&scopes=user" rune -0 jq -r '.new' <(output) refute_output --partial '1.2.3.6' assert_output --partial 'toto' } @test "stream start (user+ip scope)" { - rune -0 api "/v1/decisions/stream?startup=true&scopes=user,ip" + rune -0 lapi-get "/v1/decisions/stream?startup=true&scopes=user,ip" rune -0 jq -r '.new' <(output) assert_output --partial '1.2.3.6' assert_output --partial 'toto' diff --git a/test/bats/99_lapi-stream-mode.bats b/test/bats/99_lapi-stream-mode.bats index 08ddde42c5f..c683a6150f4 100644 --- a/test/bats/99_lapi-stream-mode.bats +++ b/test/bats/99_lapi-stream-mode.bats @@ -9,8 +9,6 @@ setup_file() { ./instance-crowdsec start API_KEY=$(cscli bouncers add testbouncer -o raw) export API_KEY - CROWDSEC_API_URL="http://localhost:8080" - export CROWDSEC_API_URL } teardown_file() { @@ -23,11 +21,6 @@ setup() { #---------- -api() { - URI="$1" - curl -s -H "X-Api-Key: ${API_KEY}" "${CROWDSEC_API_URL}${URI}" -} - @test "adding decisions for multiple ips" { rune -0 cscli decisions add -i '1111:2222:3333:4444:5555:6666:7777:8888' assert_stderr --partial 'Decision successfully added' @@ -38,7 +31,7 @@ api() { } @test "stream start" { - rune -0 api "/v1/decisions/stream?startup=true" + rune -0 lapi-get "/v1/decisions/stream?startup=true" if is_db_mysql; then sleep 3; fi rune -0 jq -r '.new' <(output) assert_output --partial '1111:2222:3333:4444:5555:6666:7777:8888' @@ -49,7 +42,7 @@ api() { @test "stream cont (add)" { rune -0 cscli decisions add -i '1.2.3.5' if is_db_mysql; then sleep 3; fi - rune -0 api "/v1/decisions/stream" + rune -0 lapi-get "/v1/decisions/stream" rune -0 jq -r '.new' <(output) assert_output --partial '1.2.3.5' } @@ -57,13 +50,13 @@ api() { @test "stream cont (del)" { rune -0 cscli decisions delete -i '1.2.3.4' if is_db_mysql; then sleep 3; fi - rune -0 api "/v1/decisions/stream" + rune -0 lapi-get "/v1/decisions/stream" rune -0 jq -r '.deleted' <(output) assert_output --partial '1.2.3.4' } @test "stream restart" { - rune -0 api "/v1/decisions/stream?startup=true" + rune -0 lapi-get "/v1/decisions/stream?startup=true" api_out=${output} rune -0 jq -r '.deleted' <(output) assert_output --partial '1.2.3.4' diff --git a/test/lib/setup_file.sh b/test/lib/setup_file.sh index 1aca32fa6d0..3e6db0f12ff 100755 --- a/test/lib/setup_file.sh +++ b/test/lib/setup_file.sh @@ -276,3 +276,17 @@ rune() { run --separate-stderr "$@" } export -f rune + +# call the lapi through unix socket with an API_KEY (authenticates as a bouncer) +lapi-get() { + [[ -z "$1" ]] && { fail "lapi-get: missing path"; } + [[ -z "$API_KEY" ]] && { fail "lapi-get: missing API_KEY"; } + local socket + socket=$(config_get '.api.server.listen_socket') + [[ -z "$socket" ]] && { fail "lapi-get: missing .api.server.listen_socket"; } + + # curl needs a fake hostname when using a unix socket + curl -s -f -H "X-Api-Key: $API_KEY" --unix-socket "$socket" "http://lapi$1" +} +export -f lapi-get + From f0f27ee5cbb13fa192e103238f818aade84640d7 Mon Sep 17 00:00:00 2001 From: Manuel Sabban Date: Tue, 4 Jun 2024 22:39:13 +0200 Subject: [PATCH 160/581] add decently new version of python for centos 7 and 8 for testing (#3053) * add decently new version of python for centos 7 and 8 * update ansible requirements --- test/ansible/provision_dependencies.yml | 47 +++++++++++++++++++++++++ test/ansible/requirements.yml | 2 +- test/ansible/vars/python.yml | 1 + 3 files changed, 49 insertions(+), 1 deletion(-) create mode 100644 test/ansible/vars/python.yml diff --git a/test/ansible/provision_dependencies.yml b/test/ansible/provision_dependencies.yml index bcfe8fccafb..144adf8ca36 100644 --- a/test/ansible/provision_dependencies.yml +++ b/test/ansible/provision_dependencies.yml @@ -1,6 +1,40 @@ # vim: set ft=yaml.ansible: --- +- name: "Fix EOL'd centos Stream 8" + hosts: all + tasks: + - name: "update repositories file" + ansible.builtin.find: + paths: /etc/yum.repos.d + patterns: "*.repo" + register: "repo_files" + when: + - ansible_facts.distribution == "CentOS" + - ansible_facts.distribution_major_version == '8' + - name: Replace old text with new text + become: true + ansible.builtin.replace: + path: "{{ item.path }}" + regexp: 'mirrorlist' + replace: '#mirrorlist' + loop: "{{ repo_files.files }}" + when: + - ansible_facts.distribution == "CentOS" + - ansible_facts.distribution_major_version == '8' + - repo_files.matched > 0 + - name: Replace old text with new text + become: true + ansible.builtin.replace: + path: "{{ item.path }}" + regexp: '#baseurl=http://mirror.centos.org' + replace: 'baseurl=https://vault.centos.org' + loop: "{{ repo_files.files }}" + when: + - ansible_facts.distribution == "CentOS" + - ansible_facts.distribution_major_version == '8' + - repo_files.matched > 0 + - name: "Install required packages" hosts: all vars_files: @@ -17,6 +51,19 @@ - crowdsecurity.testing.re2 - crowdsecurity.testing.bats_requirements +- name: "Install recent python" + hosts: all + vars_files: + - vars/python.yml + tasks: + - name: role "crowdsecurity.testing.python3" + ansible.builtin.include_role: + name: crowdsecurity.testing.python3 + when: + - ansible_facts.distribution in ['CentOS', 'OracleLinux'] + - ansible_facts.distribution_major_version == '8' or ansible_facts.distribution_major_version == '7' + + - name: "Install Postgres" hosts: all become: true diff --git a/test/ansible/requirements.yml b/test/ansible/requirements.yml index a780e827f85..d5a9b80f659 100644 --- a/test/ansible/requirements.yml +++ b/test/ansible/requirements.yml @@ -14,7 +14,7 @@ collections: - name: ansible.posix - name: https://github.com/crowdsecurity/ansible-collection-crowdsecurity.testing.git type: git - version: v0.0.5 + version: v0.0.7 # - name: crowdsecurity.testing # source: ../../../crowdsecurity.testing diff --git a/test/ansible/vars/python.yml b/test/ansible/vars/python.yml new file mode 100644 index 00000000000..0cafdcc3d4c --- /dev/null +++ b/test/ansible/vars/python.yml @@ -0,0 +1 @@ +python_version: "3.12.3" From 2865b69855bab38f45912c84abf87babfa621095 Mon Sep 17 00:00:00 2001 From: Manuel Sabban Date: Wed, 5 Jun 2024 08:26:32 +0200 Subject: [PATCH 161/581] fix tests in 01_crowdsec.bats for packages testing (#3054) --- test/bats/01_crowdsec.bats | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/test/bats/01_crowdsec.bats b/test/bats/01_crowdsec.bats index 7a449ebd047..83072b0f159 100644 --- a/test/bats/01_crowdsec.bats +++ b/test/bats/01_crowdsec.bats @@ -152,7 +152,7 @@ teardown() { config_set '.crowdsec_service.acquisition_path=""' ACQUIS_DIR=$(config_get '.crowdsec_service.acquisition_dir') - rm -f "$ACQUIS_DIR" + rm -rf "$ACQUIS_DIR" config_set '.common.log_media="stdout"' rune -1 wait-for "$CROWDSEC" @@ -167,7 +167,7 @@ teardown() { config_set '.crowdsec_service.acquisition_path=""' ACQUIS_DIR=$(config_get '.crowdsec_service.acquisition_dir') - rm -f "$ACQUIS_DIR" + rm -rf "$ACQUIS_DIR" config_set '.crowdsec_service.acquisition_dir=""' config_set '.common.log_media="stdout"' From 3dd17b908153192a0c3cc4d825a6cf06cce7670f Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Wed, 5 Jun 2024 11:04:54 +0200 Subject: [PATCH 162/581] tests: log.Fatal -> return err (#3056) * tests: log.Fatal -> return err * lint --- pkg/acquisition/modules/kafka/kafka_test.go | 22 +++++++---- pkg/cwhub/cwhub_test.go | 3 +- pkg/exprhelpers/exprlib_test.go | 41 ++++++++++----------- pkg/exprhelpers/jsonextract_test.go | 18 ++++----- pkg/exprhelpers/xml_test.go | 11 ++++-- pkg/leakybucket/buckets_test.go | 32 +++++++++++----- pkg/parser/parsing_test.go | 13 +++---- 7 files changed, 80 insertions(+), 60 deletions(-) diff --git a/pkg/acquisition/modules/kafka/kafka_test.go b/pkg/acquisition/modules/kafka/kafka_test.go index 6eda37a377e..54715a81251 100644 --- a/pkg/acquisition/modules/kafka/kafka_test.go +++ b/pkg/acquisition/modules/kafka/kafka_test.go @@ -71,9 +71,8 @@ group_id: crowdsec`, }, } - subLogger := log.WithFields(log.Fields{ - "type": "kafka", - }) + subLogger := log.WithField("type", "kafka") + for _, test := range tests { k := KafkaSource{} err := k.Configure([]byte(test.config), subLogger, configuration.METRICS_NONE) @@ -82,7 +81,6 @@ group_id: crowdsec`, } func writeToKafka(w *kafka.Writer, logs []string) { - for idx, log := range logs { err := w.WriteMessages(context.Background(), kafka.Message{ Key: []byte(strconv.Itoa(idx)), @@ -106,7 +104,9 @@ func createTopic(topic string, broker string) { if err != nil { panic(err) } + var controllerConn *kafka.Conn + controllerConn, err = kafka.Dial("tcp", net.JoinHostPort(controller.Host, strconv.Itoa(controller.Port))) if err != nil { panic(err) @@ -131,6 +131,7 @@ func TestStreamingAcquisition(t *testing.T) { if runtime.GOOS == "windows" { t.Skip("Skipping test on windows") } + tests := []struct { name string logs []string @@ -159,13 +160,14 @@ func TestStreamingAcquisition(t *testing.T) { Topic: "crowdsecplaintext", }) if w == nil { - log.Fatalf("Unable to setup a kafka producer") + t.Fatal("Unable to setup a kafka producer") } for _, ts := range tests { ts := ts t.Run(ts.name, func(t *testing.T) { k := KafkaSource{} + err := k.Configure([]byte(` source: kafka brokers: @@ -174,12 +176,14 @@ topic: crowdsecplaintext`), subLogger, configuration.METRICS_NONE) if err != nil { t.Fatalf("could not configure kafka source : %s", err) } + tomb := tomb.Tomb{} out := make(chan types.Event) err = k.StreamingAcquisition(out, &tomb) cstest.AssertErrorContains(t, err, ts.expectedErr) actualLines := 0 + go writeToKafka(w, ts.logs) READLOOP: for { @@ -195,13 +199,13 @@ topic: crowdsecplaintext`), subLogger, configuration.METRICS_NONE) tomb.Wait() }) } - } func TestStreamingAcquisitionWithSSL(t *testing.T) { if runtime.GOOS == "windows" { t.Skip("Skipping test on windows") } + tests := []struct { name string logs []string @@ -229,13 +233,14 @@ func TestStreamingAcquisitionWithSSL(t *testing.T) { Topic: "crowdsecssl", }) if w2 == nil { - log.Fatalf("Unable to setup a kafka producer") + t.Fatal("Unable to setup a kafka producer") } for _, ts := range tests { ts := ts t.Run(ts.name, func(t *testing.T) { k := KafkaSource{} + err := k.Configure([]byte(` source: kafka brokers: @@ -250,12 +255,14 @@ tls: if err != nil { t.Fatalf("could not configure kafka source : %s", err) } + tomb := tomb.Tomb{} out := make(chan types.Event) err = k.StreamingAcquisition(out, &tomb) cstest.AssertErrorContains(t, err, ts.expectedErr) actualLines := 0 + go writeToKafka(w2, ts.logs) READLOOP: for { @@ -271,5 +278,4 @@ tls: tomb.Wait() }) } - } diff --git a/pkg/cwhub/cwhub_test.go b/pkg/cwhub/cwhub_test.go index 0a1363ebe09..09455fd65a9 100644 --- a/pkg/cwhub/cwhub_test.go +++ b/pkg/cwhub/cwhub_test.go @@ -1,6 +1,7 @@ package cwhub import ( + "fmt" "io" "net/http" "os" @@ -107,7 +108,7 @@ func (t *mockTransport) RoundTrip(req *http.Request) (*http.Response, error) { // FAKE PARSER resp, ok := responseByPath[req.URL.Path] if !ok { - log.Fatalf("unexpected url :/ %s", req.URL.Path) + return nil, fmt.Errorf("unexpected url: %s", req.URL.Path) } response.Body = io.NopCloser(strings.NewReader(resp)) diff --git a/pkg/exprhelpers/exprlib_test.go b/pkg/exprhelpers/exprlib_test.go index 38528083272..687465d9493 100644 --- a/pkg/exprhelpers/exprlib_test.go +++ b/pkg/exprhelpers/exprlib_test.go @@ -2,7 +2,6 @@ package exprhelpers import ( "context" - "fmt" "os" "testing" "time" @@ -22,9 +21,7 @@ import ( "github.com/crowdsecurity/crowdsec/pkg/types" ) -var ( - TestFolder = "tests" -) +const TestFolder = "tests" func getDBClient(t *testing.T) *database.Client { t.Helper() @@ -78,21 +75,21 @@ func TestVisitor(t *testing.T) { name: "debug : can't compile", filter: "static_one.foo.toto == 'lol'", result: false, - err: fmt.Errorf("bad syntax"), + err: errors.New("bad syntax"), env: map[string]interface{}{"static_one": map[string]string{"foo": "bar"}}, }, { name: "debug : can't compile #2", filter: "static_one.f!oo.to/to == 'lol'", result: false, - err: fmt.Errorf("bad syntax"), + err: errors.New("bad syntax"), env: map[string]interface{}{"static_one": map[string]string{"foo": "bar"}}, }, { name: "debug : can't compile #3", filter: "", result: false, - err: fmt.Errorf("bad syntax"), + err: errors.New("bad syntax"), env: map[string]interface{}{"static_one": map[string]string{"foo": "bar"}}, }, } @@ -102,13 +99,13 @@ func TestVisitor(t *testing.T) { for _, test := range tests { compiledFilter, err := expr.Compile(test.filter, GetExprOptions(test.env)...) if err != nil && test.err == nil { - log.Fatalf("compile: %s", err) + t.Fatalf("compile: %s", err) } if compiledFilter != nil { result, err := expr.Run(compiledFilter, test.env) if err != nil && test.err == nil { - log.Fatalf("run : %s", err) + t.Fatalf("run: %s", err) } if isOk := assert.Equal(t, test.result, result); !isOk { @@ -193,10 +190,12 @@ func TestDistanceHelper(t *testing.T) { "lat2": test.lat2, "lon2": test.lon2, } + vm, err := expr.Compile(test.expr, GetExprOptions(env)...) if err != nil { t.Fatalf("pattern:%s val:%s NOK %s", test.lat1, test.lon1, err) } + ret, err := expr.Run(vm, env) if test.valid { require.NoError(t, err) @@ -243,12 +242,12 @@ func TestRegexpCacheBehavior(t *testing.T) { func TestRegexpInFile(t *testing.T) { if err := Init(nil); err != nil { - log.Fatal(err) + t.Fatal(err) } err := FileInit(TestFolder, "test_data_re.txt", "regex") if err != nil { - log.Fatal(err) + t.Fatal(err) } tests := []struct { @@ -286,23 +285,23 @@ func TestRegexpInFile(t *testing.T) { for _, test := range tests { compiledFilter, err := expr.Compile(test.filter, GetExprOptions(map[string]interface{}{})...) if err != nil { - log.Fatal(err) + t.Fatal(err) } result, err := expr.Run(compiledFilter, map[string]interface{}{}) if err != nil { - log.Fatal(err) + t.Fatal(err) } if isOk := assert.Equal(t, test.result, result); !isOk { - t.Fatalf("test '%s' : NOK", test.name) + t.Fatalf("test '%s': NOK", test.name) } } } func TestFileInit(t *testing.T) { if err := Init(nil); err != nil { - log.Fatal(err) + t.Fatal(err) } tests := []struct { @@ -340,7 +339,7 @@ func TestFileInit(t *testing.T) { for _, test := range tests { err := FileInit(TestFolder, test.filename, test.types) if err != nil { - log.Fatal(err) + t.Fatal(err) } switch test.types { @@ -376,12 +375,12 @@ func TestFileInit(t *testing.T) { func TestFile(t *testing.T) { if err := Init(nil); err != nil { - log.Fatal(err) + t.Fatal(err) } err := FileInit(TestFolder, "test_data.txt", "string") if err != nil { - log.Fatal(err) + t.Fatal(err) } tests := []struct { @@ -419,12 +418,12 @@ func TestFile(t *testing.T) { for _, test := range tests { compiledFilter, err := expr.Compile(test.filter, GetExprOptions(map[string]interface{}{})...) if err != nil { - log.Fatal(err) + t.Fatal(err) } result, err := expr.Run(compiledFilter, map[string]interface{}{}) if err != nil { - log.Fatal(err) + t.Fatal(err) } if isOk := assert.Equal(t, test.result, result); !isOk { @@ -1426,7 +1425,7 @@ func TestParseUnixTime(t *testing.T) { func TestIsIp(t *testing.T) { if err := Init(nil); err != nil { - log.Fatal(err) + t.Fatal(err) } tests := []struct { diff --git a/pkg/exprhelpers/jsonextract_test.go b/pkg/exprhelpers/jsonextract_test.go index 1bd45aa2d6a..2ee3ada5ea7 100644 --- a/pkg/exprhelpers/jsonextract_test.go +++ b/pkg/exprhelpers/jsonextract_test.go @@ -3,8 +3,6 @@ package exprhelpers import ( "testing" - log "github.com/sirupsen/logrus" - "github.com/antonmedv/expr" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -12,12 +10,12 @@ import ( func TestJsonExtract(t *testing.T) { if err := Init(nil); err != nil { - log.Fatal(err) + t.Fatal(err) } err := FileInit(TestFolder, "test_data_re.txt", "regex") if err != nil { - log.Fatal(err) + t.Fatal(err) } tests := []struct { @@ -67,12 +65,12 @@ func TestJsonExtract(t *testing.T) { func TestJsonExtractUnescape(t *testing.T) { if err := Init(nil); err != nil { - log.Fatal(err) + t.Fatal(err) } err := FileInit(TestFolder, "test_data_re.txt", "regex") if err != nil { - log.Fatal(err) + t.Fatal(err) } tests := []struct { @@ -115,12 +113,12 @@ func TestJsonExtractUnescape(t *testing.T) { func TestJsonExtractSlice(t *testing.T) { if err := Init(nil); err != nil { - log.Fatal(err) + t.Fatal(err) } err := FileInit(TestFolder, "test_data_re.txt", "regex") if err != nil { - log.Fatal(err) + t.Fatal(err) } tests := []struct { @@ -178,12 +176,12 @@ func TestJsonExtractSlice(t *testing.T) { func TestJsonExtractObject(t *testing.T) { if err := Init(nil); err != nil { - log.Fatal(err) + t.Fatal(err) } err := FileInit(TestFolder, "test_data_re.txt", "regex") if err != nil { - log.Fatal(err) + t.Fatal(err) } tests := []struct { diff --git a/pkg/exprhelpers/xml_test.go b/pkg/exprhelpers/xml_test.go index 516387f764b..42823884025 100644 --- a/pkg/exprhelpers/xml_test.go +++ b/pkg/exprhelpers/xml_test.go @@ -9,7 +9,7 @@ import ( func TestXMLGetAttributeValue(t *testing.T) { if err := Init(nil); err != nil { - log.Fatal(err) + t.Fatal(err) } tests := []struct { @@ -58,17 +58,19 @@ func TestXMLGetAttributeValue(t *testing.T) { for _, test := range tests { result, _ := XMLGetAttributeValue(test.xmlString, test.path, test.attribute) + isOk := assert.Equal(t, test.expectResult, result) if !isOk { t.Fatalf("test '%s' failed", test.name) } + log.Printf("test '%s' : OK", test.name) } - } + func TestXMLGetNodeValue(t *testing.T) { if err := Init(nil); err != nil { - log.Fatal(err) + t.Fatal(err) } tests := []struct { @@ -105,11 +107,12 @@ func TestXMLGetNodeValue(t *testing.T) { for _, test := range tests { result, _ := XMLGetNodeValue(test.xmlString, test.path) + isOk := assert.Equal(t, test.expectResult, result) if !isOk { t.Fatalf("test '%s' failed", test.name) } + log.Printf("test '%s' : OK", test.name) } - } diff --git a/pkg/leakybucket/buckets_test.go b/pkg/leakybucket/buckets_test.go index ad0d1a79879..4bb3c96759e 100644 --- a/pkg/leakybucket/buckets_test.go +++ b/pkg/leakybucket/buckets_test.go @@ -52,7 +52,7 @@ func TestBucket(t *testing.T) { err = exprhelpers.Init(nil) if err != nil { - log.Fatalf("exprhelpers init failed: %s", err) + t.Fatalf("exprhelpers init failed: %s", err) } if envSetting != "" { @@ -61,25 +61,31 @@ func TestBucket(t *testing.T) { } } else { wg := new(sync.WaitGroup) + fds, err := os.ReadDir(testdata) if err != nil { t.Fatalf("Unable to read test directory : %s", err) } + for _, fd := range fds { if fd.Name() == "hub" { continue } + fname := filepath.Join(testdata, fd.Name()) log.Infof("Running test on %s", fname) tomb.Go(func() error { wg.Add(1) defer wg.Done() + if err := testOneBucket(t, hub, fname, tomb); err != nil { t.Fatalf("Test '%s' failed : %s", fname, err) } + return nil }) } + wg.Wait() } } @@ -92,12 +98,12 @@ func watchTomb(tomb *tomb.Tomb) { log.Warning("Tomb is dead") break } + time.Sleep(100 * time.Millisecond) } } func testOneBucket(t *testing.T, hub *cwhub.Hub, dir string, tomb *tomb.Tomb) error { - var ( holders []BucketFactory @@ -105,9 +111,9 @@ func testOneBucket(t *testing.T, hub *cwhub.Hub, dir string, tomb *tomb.Tomb) er stagecfg string stages []parser.Stagefile err error - buckets *Buckets ) - buckets = NewBuckets() + + buckets := NewBuckets() /*load the scenarios*/ stagecfg = dir + "/scenarios.yaml" @@ -117,51 +123,59 @@ func testOneBucket(t *testing.T, hub *cwhub.Hub, dir string, tomb *tomb.Tomb) er tmpl, err := template.New("test").Parse(string(stagefiles)) if err != nil { - return fmt.Errorf("failed to parse template %s : %s", stagefiles, err) + return fmt.Errorf("failed to parse template %s: %w", stagefiles, err) } + var out bytes.Buffer + err = tmpl.Execute(&out, map[string]string{"TestDirectory": dir}) if err != nil { panic(err) } + if err := yaml.UnmarshalStrict(out.Bytes(), &stages); err != nil { - log.Fatalf("failed unmarshaling %s : %s", stagecfg, err) + t.Fatalf("failed unmarshaling %s : %s", stagecfg, err) } + files := []string{} for _, x := range stages { files = append(files, x.Filename) } cscfg := &csconfig.CrowdsecServiceCfg{} + holders, response, err := LoadBuckets(cscfg, hub, files, tomb, buckets, false) if err != nil { t.Fatalf("failed loading bucket : %s", err) } + tomb.Go(func() error { watchTomb(tomb) return nil }) + if !testFile(t, filepath.Join(dir, "test.json"), filepath.Join(dir, "in-buckets_state.json"), holders, response, buckets) { return fmt.Errorf("tests from %s failed", dir) } + return nil } func testFile(t *testing.T, file string, bs string, holders []BucketFactory, response chan types.Event, buckets *Buckets) bool { - var results []types.Event var dump bool - //should we restore + // should we restore if _, err := os.Stat(bs); err == nil { dump = true + if err := LoadBucketsState(bs, buckets, holders); err != nil { t.Fatalf("Failed to load bucket state : %s", err) } } /* now we can load the test files */ - //process the yaml + // process the yaml yamlFile, err := os.Open(file) if err != nil { t.Errorf("yamlFile.Get err #%v ", err) diff --git a/pkg/parser/parsing_test.go b/pkg/parser/parsing_test.go index 902a3cccbc7..ed7c0d3df9a 100644 --- a/pkg/parser/parsing_test.go +++ b/pkg/parser/parsing_test.go @@ -100,11 +100,11 @@ func testOneParser(pctx *UnixParserCtx, ectx EnricherCtx, dir string, b *testing parser_cfg_file := fmt.Sprintf("%s/parsers.yaml", dir) cfg, err := os.ReadFile(parser_cfg_file) if err != nil { - return fmt.Errorf("failed opening %s : %s", parser_cfg_file, err) + return fmt.Errorf("failed opening %s: %w", parser_cfg_file, err) } tmpl, err := template.New("test").Parse(string(cfg)) if err != nil { - return fmt.Errorf("failed to parse template %s : %s", cfg, err) + return fmt.Errorf("failed to parse template %s: %w", cfg, err) } var out bytes.Buffer err = tmpl.Execute(&out, map[string]string{"TestDirectory": dir}) @@ -112,12 +112,12 @@ func testOneParser(pctx *UnixParserCtx, ectx EnricherCtx, dir string, b *testing panic(err) } if err = yaml.UnmarshalStrict(out.Bytes(), &parser_configs); err != nil { - return fmt.Errorf("failed unmarshaling %s : %s", parser_cfg_file, err) + return fmt.Errorf("failed unmarshaling %s: %w", parser_cfg_file, err) } pnodes, err = LoadStages(parser_configs, pctx, ectx) if err != nil { - return fmt.Errorf("unable to load parser config : %s", err) + return fmt.Errorf("unable to load parser config: %w", err) } //TBD: Load post overflows @@ -147,7 +147,7 @@ func prepTests() (*UnixParserCtx, EnricherCtx, error) { err = exprhelpers.Init(nil) if err != nil { - log.Fatalf("exprhelpers init failed: %s", err) + return nil, ectx, fmt.Errorf("exprhelpers init failed: %w", err) } //Load enrichment @@ -158,7 +158,7 @@ func prepTests() (*UnixParserCtx, EnricherCtx, error) { } ectx, err = Loadplugin() if err != nil { - log.Fatalf("failed to load plugin geoip : %v", err) + return nil, ectx, fmt.Errorf("failed to load plugin geoip: %v", err) } log.Printf("Loaded -> %+v", ectx) @@ -299,7 +299,6 @@ func testSubSet(testSet TestFile, pctx UnixParserCtx, nodes []Node) (bool, error only the keys of the expected part are checked against result */ if len(testSet.Results) == 0 && len(results) == 0 { - log.Fatal("No results, no tests, abort.") return false, errors.New("no tests, no results") } From 73e03ef556e15139d4ab30309703c688e7806326 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Wed, 5 Jun 2024 11:37:57 +0200 Subject: [PATCH 163/581] lint: enable revive/if-return, revive/error-strings (#3057) * lint: enable revive/if-return, revive/error-strings * lint --- .golangci.yml | 4 --- cmd/crowdsec-cli/console.go | 5 +-- cmd/crowdsec-cli/itemcli.go | 6 +--- cmd/crowdsec-cli/lapi.go | 11 ++---- cmd/crowdsec-cli/metrics.go | 6 +--- cmd/crowdsec-cli/notifications.go | 5 +-- cmd/crowdsec-cli/papi.go | 5 +-- cmd/crowdsec-cli/setup.go | 6 +--- cmd/crowdsec/api.go | 5 +-- pkg/csconfig/api.go | 6 +--- pkg/csconfig/fflag.go | 5 +-- pkg/exprhelpers/helpers.go | 42 +++++++++++++++-------- pkg/parser/node.go | 14 ++++---- pkg/parser/parsing_test.go | 57 ++++++++++++++++++++++++------- 14 files changed, 89 insertions(+), 88 deletions(-) diff --git a/.golangci.yml b/.golangci.yml index 61278185633..fd237c95bcc 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -140,8 +140,6 @@ linters-settings: disabled: true - name: error-naming disabled: true - - name: error-strings - disabled: true - name: flag-parameter disabled: true - name: function-result-limit @@ -150,8 +148,6 @@ linters-settings: disabled: true - name: get-return disabled: true - - name: if-return - disabled: true - name: increment-decrement disabled: true - name: indent-error-flow diff --git a/cmd/crowdsec-cli/console.go b/cmd/crowdsec-cli/console.go index eab24a5d709..972d43122cc 100644 --- a/cmd/crowdsec-cli/console.go +++ b/cmd/crowdsec-cli/console.go @@ -51,11 +51,8 @@ func (cli *cliConsole) NewCommand() *cobra.Command { if err := require.CAPI(cfg); err != nil { return err } - if err := require.CAPIRegistered(cfg); err != nil { - return err - } - return nil + return require.CAPIRegistered(cfg) }, } diff --git a/cmd/crowdsec-cli/itemcli.go b/cmd/crowdsec-cli/itemcli.go index e1a908f3492..66a3f4fc051 100644 --- a/cmd/crowdsec-cli/itemcli.go +++ b/cmd/crowdsec-cli/itemcli.go @@ -442,11 +442,7 @@ func (cli cliItem) list(args []string, all bool) error { return err } - if err = listItems(color.Output, []string{cli.name}, items, false, cfg.Cscli.Output); err != nil { - return err - } - - return nil + return listItems(color.Output, []string{cli.name}, items, false, cfg.Cscli.Output) } func (cli cliItem) newListCmd() *cobra.Command { diff --git a/cmd/crowdsec-cli/lapi.go b/cmd/crowdsec-cli/lapi.go index 0d7ebc64431..fcef137e424 100644 --- a/cmd/crowdsec-cli/lapi.go +++ b/cmd/crowdsec-cli/lapi.go @@ -275,11 +275,7 @@ func (cli *cliLapi) addContext(key string, values []string) error { cfg.Crowdsec.ContextToSend[key] = data } - if err := cfg.Crowdsec.DumpContextConfigFile(); err != nil { - return err - } - - return nil + return cfg.Crowdsec.DumpContextConfigFile() } func (cli *cliLapi) newContextAddCmd() *cobra.Command { @@ -307,10 +303,7 @@ cscli lapi context add --value evt.Meta.source_ip --value evt.Meta.target_user } if keyToAdd != "" { - if err := cli.addContext(keyToAdd, valuesToAdd); err != nil { - return err - } - return nil + return cli.addContext(keyToAdd, valuesToAdd) } for _, v := range valuesToAdd { diff --git a/cmd/crowdsec-cli/metrics.go b/cmd/crowdsec-cli/metrics.go index ad2b9ee79d8..7858a7a7b2b 100644 --- a/cmd/crowdsec-cli/metrics.go +++ b/cmd/crowdsec-cli/metrics.go @@ -331,11 +331,7 @@ func (cli *cliMetrics) show(sections []string, url string, noUnit bool) error { } } - if err := ms.Format(color.Output, sections, cfg.Cscli.Output, noUnit); err != nil { - return err - } - - return nil + return ms.Format(color.Output, sections, cfg.Cscli.Output, noUnit) } func (cli *cliMetrics) NewCommand() *cobra.Command { diff --git a/cmd/crowdsec-cli/notifications.go b/cmd/crowdsec-cli/notifications.go index d678bf0e306..768d6a1c47e 100644 --- a/cmd/crowdsec-cli/notifications.go +++ b/cmd/crowdsec-cli/notifications.go @@ -66,11 +66,8 @@ func (cli *cliNotifications) NewCommand() *cobra.Command { if err := cfg.LoadAPIClient(); err != nil { return fmt.Errorf("loading api client: %w", err) } - if err := require.Notifications(cfg); err != nil { - return err - } - return nil + return require.Notifications(cfg) }, } diff --git a/cmd/crowdsec-cli/papi.go b/cmd/crowdsec-cli/papi.go index bea03c12848..b8ed0fd7356 100644 --- a/cmd/crowdsec-cli/papi.go +++ b/cmd/crowdsec-cli/papi.go @@ -39,11 +39,8 @@ func (cli *cliPapi) NewCommand() *cobra.Command { if err := require.CAPI(cfg); err != nil { return err } - if err := require.PAPI(cfg); err != nil { - return err - } - return nil + return require.PAPI(cfg) }, } diff --git a/cmd/crowdsec-cli/setup.go b/cmd/crowdsec-cli/setup.go index 8a8c5a6c665..3e12b2465dd 100644 --- a/cmd/crowdsec-cli/setup.go +++ b/cmd/crowdsec-cli/setup.go @@ -320,11 +320,7 @@ func runSetupInstallHub(cmd *cobra.Command, args []string) error { return err } - if err = setup.InstallHubItems(hub, input, dryRun); err != nil { - return err - } - - return nil + return setup.InstallHubItems(hub, input, dryRun) } func runSetupValidate(cmd *cobra.Command, args []string) error { diff --git a/cmd/crowdsec/api.go b/cmd/crowdsec/api.go index 995345a25e4..c57b8d87cff 100644 --- a/cmd/crowdsec/api.go +++ b/cmd/crowdsec/api.go @@ -76,10 +76,7 @@ func serveAPIServer(apiServer *apiserver.APIServer) { <-apiTomb.Dying() // lock until go routine is dying pluginTomb.Kill(nil) log.Infof("serve: shutting down api server") - if err := apiServer.Shutdown(); err != nil { - return err - } - return nil + return apiServer.Shutdown() }) <-apiReady } diff --git a/pkg/csconfig/api.go b/pkg/csconfig/api.go index 4d10690735d..267e101cea8 100644 --- a/pkg/csconfig/api.go +++ b/pkg/csconfig/api.go @@ -451,9 +451,5 @@ func (c *Config) LoadAPIClient() error { return errors.New("no API client section in configuration") } - if err := c.API.Client.Load(); err != nil { - return err - } - - return nil + return c.API.Client.Load() } diff --git a/pkg/csconfig/fflag.go b/pkg/csconfig/fflag.go index 7311f9e751a..c86686889eb 100644 --- a/pkg/csconfig/fflag.go +++ b/pkg/csconfig/fflag.go @@ -12,10 +12,7 @@ import ( // LoadFeatureFlagsEnv parses the environment variables to enable feature flags. func LoadFeatureFlagsEnv(logger *log.Logger) error { - if err := fflag.Crowdsec.SetFromEnv(logger); err != nil { - return err - } - return nil + return fflag.Crowdsec.SetFromEnv(logger) } // FeatureFlagsFileLocation returns the path to the feature.yaml file. diff --git a/pkg/exprhelpers/helpers.go b/pkg/exprhelpers/helpers.go index 575425ef83e..e0a3a523c3e 100644 --- a/pkg/exprhelpers/helpers.go +++ b/pkg/exprhelpers/helpers.go @@ -35,9 +35,11 @@ import ( "github.com/crowdsecurity/crowdsec/pkg/types" ) -var dataFile map[string][]string -var dataFileRegex map[string][]*regexp.Regexp -var dataFileRe2 map[string][]*re2.Regexp +var ( + dataFile map[string][]string + dataFileRegex map[string][]*regexp.Regexp + dataFileRe2 map[string][]*re2.Regexp +) // This is used to (optionally) cache regexp results for RegexpInFile operations var dataFileRegexCache map[string]gcache.Cache = make(map[string]gcache.Cache) @@ -57,9 +59,11 @@ var exprFunctionOptions []expr.Option var keyValuePattern = regexp.MustCompile(`(?P[^=\s]+)=(?:"(?P[^"\\]*(?:\\.[^"\\]*)*)"|(?P[^=\s]+)|\s*)`) -var geoIPCityReader *geoip2.Reader -var geoIPASNReader *geoip2.Reader -var geoIPRangeReader *maxminddb.Reader +var ( + geoIPCityReader *geoip2.Reader + geoIPASNReader *geoip2.Reader + geoIPRangeReader *maxminddb.Reader +) func GetExprOptions(ctx map[string]interface{}) []expr.Option { if len(exprFunctionOptions) == 0 { @@ -72,9 +76,11 @@ func GetExprOptions(ctx map[string]interface{}) []expr.Option { )) } } + ret := []expr.Option{} ret = append(ret, exprFunctionOptions...) ret = append(ret, expr.Env(ctx)) + return ret } @@ -106,9 +112,11 @@ func GeoIPClose() { if geoIPCityReader != nil { geoIPCityReader.Close() } + if geoIPASNReader != nil { geoIPASNReader.Close() } + if geoIPRangeReader != nil { geoIPRangeReader.Close() } @@ -124,16 +132,15 @@ func Init(databaseClient *database.Client) error { } func RegexpCacheInit(filename string, CacheCfg types.DataSource) error { - - //cache is explicitly disabled + // cache is explicitly disabled if CacheCfg.Cache != nil && !*CacheCfg.Cache { return nil } - //cache is implicitly disabled if no cache config is provided + // cache is implicitly disabled if no cache config is provided if CacheCfg.Strategy == nil && CacheCfg.TTL == nil && CacheCfg.Size == nil { return nil } - //cache is enabled + // cache is enabled if CacheCfg.Size == nil { CacheCfg.Size = ptr.Of(50) @@ -144,6 +151,7 @@ func RegexpCacheInit(filename string, CacheCfg types.DataSource) error { if CacheCfg.Strategy == nil { CacheCfg.Strategy = ptr.Of("LRU") } + switch *CacheCfg.Strategy { case "LRU": gc = gc.LRU() @@ -158,14 +166,17 @@ func RegexpCacheInit(filename string, CacheCfg types.DataSource) error { if CacheCfg.TTL != nil { gc.Expiration(*CacheCfg.TTL) } + cache := gc.Build() dataFileRegexCache[filename] = cache + return nil } // UpdateCacheMetrics is called directly by the prom handler func UpdateRegexpCacheMetrics() { RegexpCacheMetrics.Reset() + for name := range dataFileRegexCache { RegexpCacheMetrics.With(prometheus.Labels{"name": name}).Set(float64(dataFileRegexCache[name].Len(true))) } @@ -173,10 +184,12 @@ func UpdateRegexpCacheMetrics() { func FileInit(fileFolder string, filename string, fileType string) error { log.Debugf("init (folder:%s) (file:%s) (type:%s)", fileFolder, filename, fileType) + if fileType == "" { log.Debugf("ignored file %s%s because no type specified", fileFolder, filename) return nil } + ok, err := existsInFileMaps(filename, fileType) if ok { log.Debugf("ignored file %s%s because already loaded", fileFolder, filename) @@ -187,6 +200,7 @@ func FileInit(fileFolder string, filename string, fileType string) error { } filepath := filepath.Join(fileFolder, filename) + file, err := os.Open(filepath) if err != nil { return err @@ -201,28 +215,26 @@ func FileInit(fileFolder string, filename string, fileType string) error { if len(scanner.Text()) == 0 { //skip empty lines continue } + switch fileType { case "regex", "regexp": if fflag.Re2RegexpInfileSupport.IsEnabled() { dataFileRe2[filename] = append(dataFileRe2[filename], re2.MustCompile(scanner.Text())) continue } + dataFileRegex[filename] = append(dataFileRegex[filename], regexp.MustCompile(scanner.Text())) case "string": dataFile[filename] = append(dataFile[filename], scanner.Text()) } } - if err := scanner.Err(); err != nil { - return err - } - return nil + return scanner.Err() } // Expr helpers func Distinct(params ...any) (any, error) { - if rt := reflect.TypeOf(params[0]).Kind(); rt != reflect.Slice && rt != reflect.Array { return nil, nil } diff --git a/pkg/parser/node.go b/pkg/parser/node.go index 11ffb8aa7fa..74c9e94a331 100644 --- a/pkg/parser/node.go +++ b/pkg/parser/node.go @@ -65,8 +65,7 @@ type Node struct { } func (n *Node) validate(ectx EnricherCtx) error { - - //stage is being set automagically + // stage is being set automagically if n.Stage == "" { return errors.New("stage needs to be an existing stage") } @@ -244,12 +243,14 @@ func (n *Node) process(p *types.Event, ctx UnixParserCtx, expressionEnv map[stri gstr = val } else { clog.Debugf("(%s) target field '%s' doesn't exist in %v", n.rn, n.Grok.TargetField, p.Parsed) + NodeState = false } } else if n.Grok.RunTimeValue != nil { output, err := exprhelpers.Run(n.Grok.RunTimeValue, cachedExprEnv, clog, n.Debug) if err != nil { clog.Warningf("failed to run RunTimeValue : %v", err) + NodeState = false } @@ -352,6 +353,7 @@ func (n *Node) process(p *types.Event, ctx UnixParserCtx, expressionEnv map[stri if err != nil { clog.Tracef("\tNode (%s) failed : %v", leaf.rn, err) clog.Debugf("Event leaving node : ko") + return false, err } @@ -498,7 +500,7 @@ func (n *Node) compile(pctx *UnixParserCtx, ectx EnricherCtx) error { n.Grok.RunTimeRegexp, err = pctx.Grok.Get(n.Grok.RegexpName) if err != nil { - return fmt.Errorf("unable to find grok '%s' : %v", n.Grok.RegexpName, err) + return fmt.Errorf("unable to find grok '%s': %v", n.Grok.RegexpName, err) } if n.Grok.RunTimeRegexp == nil { @@ -636,9 +638,5 @@ func (n *Node) compile(pctx *UnixParserCtx, ectx EnricherCtx) error { return errors.New("Node is empty") } - if err := n.validate(ectx); err != nil { - return err - } - - return nil + return n.validate(ectx) } diff --git a/pkg/parser/parsing_test.go b/pkg/parser/parsing_test.go index ed7c0d3df9a..3193631f4dd 100644 --- a/pkg/parser/parsing_test.go +++ b/pkg/parser/parsing_test.go @@ -28,13 +28,16 @@ var debug bool = false func TestParser(t *testing.T) { debug = true + log.SetLevel(log.InfoLevel) - var envSetting = os.Getenv("TEST_ONLY") + envSetting := os.Getenv("TEST_ONLY") + pctx, ectx, err := prepTests() if err != nil { t.Fatalf("failed to load env : %s", err) } - //Init the enricher + + // Init the enricher if envSetting != "" { if err := testOneParser(pctx, ectx, envSetting, nil); err != nil { t.Fatalf("Test '%s' failed : %s", envSetting, err) @@ -44,12 +47,15 @@ func TestParser(t *testing.T) { if err != nil { t.Fatalf("Unable to read test directory : %s", err) } + for _, fd := range fds { if !fd.IsDir() { continue } + fname := "./tests/" + fd.Name() log.Infof("Running test on %s", fname) + if err := testOneParser(pctx, ectx, fname, nil); err != nil { t.Fatalf("Test '%s' failed : %s", fname, err) } @@ -59,13 +65,17 @@ func TestParser(t *testing.T) { func BenchmarkParser(t *testing.B) { log.Printf("start bench !!!!") + debug = false + log.SetLevel(log.ErrorLevel) + pctx, ectx, err := prepTests() if err != nil { t.Fatalf("failed to load env : %s", err) } - var envSetting = os.Getenv("TEST_ONLY") + + envSetting := os.Getenv("TEST_ONLY") if envSetting != "" { if err := testOneParser(pctx, ectx, envSetting, t); err != nil { @@ -76,12 +86,15 @@ func BenchmarkParser(t *testing.B) { if err != nil { t.Fatalf("Unable to read test directory : %s", err) } + for _, fd := range fds { if !fd.IsDir() { continue } + fname := "./tests/" + fd.Name() log.Infof("Running test on %s", fname) + if err := testOneParser(pctx, ectx, fname, t); err != nil { t.Fatalf("Test '%s' failed : %s", fname, err) } @@ -91,26 +104,32 @@ func BenchmarkParser(t *testing.B) { func testOneParser(pctx *UnixParserCtx, ectx EnricherCtx, dir string, b *testing.B) error { var ( - err error - pnodes []Node - + err error + pnodes []Node parser_configs []Stagefile ) + log.Warningf("testing %s", dir) + parser_cfg_file := fmt.Sprintf("%s/parsers.yaml", dir) + cfg, err := os.ReadFile(parser_cfg_file) if err != nil { return fmt.Errorf("failed opening %s: %w", parser_cfg_file, err) } + tmpl, err := template.New("test").Parse(string(cfg)) if err != nil { return fmt.Errorf("failed to parse template %s: %w", cfg, err) } + var out bytes.Buffer + err = tmpl.Execute(&out, map[string]string{"TestDirectory": dir}) if err != nil { panic(err) } + if err = yaml.UnmarshalStrict(out.Bytes(), &parser_configs); err != nil { return fmt.Errorf("failed unmarshaling %s: %w", parser_cfg_file, err) } @@ -120,20 +139,23 @@ func testOneParser(pctx *UnixParserCtx, ectx EnricherCtx, dir string, b *testing return fmt.Errorf("unable to load parser config: %w", err) } - //TBD: Load post overflows - //func testFile(t *testing.T, file string, pctx UnixParserCtx, nodes []Node) bool { + // TBD: Load post overflows + // func testFile(t *testing.T, file string, pctx UnixParserCtx, nodes []Node) bool { parser_test_file := fmt.Sprintf("%s/test.yaml", dir) tests := loadTestFile(parser_test_file) count := 1 + if b != nil { count = b.N b.ResetTimer() } + for n := 0; n < count; n++ { if !testFile(tests, *pctx, pnodes) { - return errors.New("test failed !") + return errors.New("test failed") } } + return nil } @@ -150,27 +172,31 @@ func prepTests() (*UnixParserCtx, EnricherCtx, error) { return nil, ectx, fmt.Errorf("exprhelpers init failed: %w", err) } - //Load enrichment + // Load enrichment datadir := "./test_data/" + err = exprhelpers.GeoIPInit(datadir) if err != nil { log.Fatalf("unable to initialize GeoIP: %s", err) } + ectx, err = Loadplugin() if err != nil { return nil, ectx, fmt.Errorf("failed to load plugin geoip: %v", err) } + log.Printf("Loaded -> %+v", ectx) - //Load the parser patterns + // Load the parser patterns cfgdir := "../../config/" /* this should be refactored to 2 lines :p */ // Init the parser pctx, err = Init(map[string]interface{}{"patterns": cfgdir + string("/patterns/"), "data": "./tests/"}) if err != nil { - return nil, ectx, fmt.Errorf("failed to initialize parser : %v", err) + return nil, ectx, fmt.Errorf("failed to initialize parser: %v", err) } + return pctx, ectx, nil } @@ -179,21 +205,28 @@ func loadTestFile(file string) []TestFile { if err != nil { log.Fatalf("yamlFile.Get err #%v ", err) } + dec := yaml.NewDecoder(yamlFile) dec.SetStrict(true) var testSet []TestFile + for { tf := TestFile{} + err := dec.Decode(&tf) if err != nil { if errors.Is(err, io.EOF) { break } + log.Fatalf("Failed to load testfile '%s' yaml error : %v", file, err) + return nil } + testSet = append(testSet, tf) } + return testSet } From 9e859c0c8c97c3bec6564170ab796847ddf98e24 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Wed, 5 Jun 2024 12:01:13 +0200 Subject: [PATCH 164/581] refactor pkg/database, pkg/models (#3022) * pkg/models: Source.String() * pkg/models: Alert.FormatAsStrings() * cscli alerts list: sort remediation keys avoid printing "ban: ... captcha: ..." in one line, and "captcha: ... ban: ..." in another * remove unused methods; drop else branch * lint --- cmd/crowdsec-cli/alerts.go | 6 +- pkg/database/alerts.go | 100 +------------------------------- pkg/models/helpers.go | 113 +++++++++++++++++++++++++++++++++---- 3 files changed, 109 insertions(+), 110 deletions(-) diff --git a/cmd/crowdsec-cli/alerts.go b/cmd/crowdsec-cli/alerts.go index 9f4c5093afc..ca99b54f59f 100644 --- a/cmd/crowdsec-cli/alerts.go +++ b/cmd/crowdsec-cli/alerts.go @@ -19,6 +19,8 @@ import ( "github.com/spf13/cobra" "gopkg.in/yaml.v3" + "github.com/crowdsecurity/go-cs-lib/maptools" + "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/require" "github.com/crowdsecurity/crowdsec/pkg/apiclient" "github.com/crowdsecurity/crowdsec/pkg/cwversion" @@ -41,12 +43,12 @@ func DecisionsFromAlert(alert *models.Alert) string { decMap[k] = v + 1 } - for k, v := range decMap { + for _, key := range maptools.SortedKeys(decMap) { if len(ret) > 0 { ret += " " } - ret += fmt.Sprintf("%s:%d", k, v) + ret += fmt.Sprintf("%s:%d", key, decMap[key]) } return ret diff --git a/pkg/database/alerts.go b/pkg/database/alerts.go index 37e9397709e..d9efe4c254b 100644 --- a/pkg/database/alerts.go +++ b/pkg/database/alerts.go @@ -9,7 +9,6 @@ import ( "strings" "time" - "github.com/davecgh/go-spew/spew" "github.com/mattn/go-sqlite3" "github.com/pkg/errors" log "github.com/sirupsen/logrus" @@ -33,101 +32,6 @@ const ( maxLockRetries = 10 // how many times to retry a bulk operation when sqlite3.ErrBusy is encountered ) -func formatAlertCN(source models.Source) string { - cn := source.Cn - - if source.AsNumber != "" { - cn += "/" + source.AsNumber - } - - return cn -} - -func formatAlertSource(alert *models.Alert) string { - if alert.Source == nil || alert.Source.Scope == nil || *alert.Source.Scope == "" { - return "empty source" - } - - if *alert.Source.Scope == types.Ip { - ret := "ip " + *alert.Source.Value - - cn := formatAlertCN(*alert.Source) - if cn != "" { - ret += " (" + cn + ")" - } - - return ret - } - - if *alert.Source.Scope == types.Range { - ret := "range " + *alert.Source.Value - - cn := formatAlertCN(*alert.Source) - if cn != "" { - ret += " (" + cn + ")" - } - - return ret - } - - return *alert.Source.Scope + " " + *alert.Source.Value -} - -func formatAlertAsString(machineID string, alert *models.Alert) []string { - src := formatAlertSource(alert) - - msg := "empty scenario" - if alert.Scenario != nil && *alert.Scenario != "" { - msg = *alert.Scenario - } else if alert.Message != nil && *alert.Message != "" { - msg = *alert.Message - } - - reason := fmt.Sprintf("%s by %s", msg, src) - - if len(alert.Decisions) == 0 { - return []string{fmt.Sprintf("(%s) alert : %s", machineID, reason)} - } - - var retStr []string - - if alert.Decisions[0].Origin != nil && *alert.Decisions[0].Origin == types.CscliImportOrigin { - return []string{fmt.Sprintf("(%s) alert : %s", machineID, reason)} - } - - for i, decisionItem := range alert.Decisions { - decision := "" - if alert.Simulated != nil && *alert.Simulated { - decision = "(simulated alert)" - } else if decisionItem.Simulated != nil && *decisionItem.Simulated { - decision = "(simulated decision)" - } - - if log.GetLevel() >= log.DebugLevel { - /*spew is expensive*/ - log.Debugf("%s", spew.Sdump(decisionItem)) - } - - if len(alert.Decisions) > 1 { - reason = fmt.Sprintf("%s for %d/%d decisions", msg, i+1, len(alert.Decisions)) - } - - var machineIDOrigin string - if machineID == "" { - machineIDOrigin = *decisionItem.Origin - } else { - machineIDOrigin = fmt.Sprintf("%s/%s", machineID, *decisionItem.Origin) - } - - decision += fmt.Sprintf("%s %s on %s %s", *decisionItem.Duration, - *decisionItem.Type, *decisionItem.Scope, *decisionItem.Value) - retStr = append(retStr, - fmt.Sprintf("(%s) %s : %s", machineIDOrigin, reason, decision)) - } - - return retStr -} - // CreateOrUpdateAlert is specific to PAPI : It checks if alert already exists, otherwise inserts it // if alert already exists, it checks it associated decisions already exists // if some associated decisions are missing (ie. previous insert ended up in error) it inserts them @@ -562,8 +466,9 @@ func (c *Client) createAlertChunk(machineID string, owner *ent.Machine, alerts [ stopAtTime = time.Now().UTC() } + /*display proper alert in logs*/ - for _, disp := range formatAlertAsString(machineID, alertItem) { + for _, disp := range alertItem.FormatAsStrings(machineID, log.StandardLogger()) { c.Log.Info(disp) } @@ -649,6 +554,7 @@ func (c *Client) createAlertChunk(machineID string, owner *ent.Machine, alerts [ if len(metaItem.Value) > 4095 { c.Log.Warningf("truncated meta %s : value too long", metaItem.Key) + value = value[:4095] } diff --git a/pkg/models/helpers.go b/pkg/models/helpers.go index 8c082550d48..5bc3f2a28b3 100644 --- a/pkg/models/helpers.go +++ b/pkg/models/helpers.go @@ -1,27 +1,33 @@ package models -func (a *Alert) HasRemediation() bool { - return true -} +import ( + "fmt" + + "github.com/davecgh/go-spew/spew" + log "github.com/sirupsen/logrus" +) + +const ( + // these are duplicated from pkg/types + // TODO XXX: de-duplicate + Ip = "Ip" + Range = "Range" + CscliImportOrigin = "cscli-import" +) func (a *Alert) GetScope() string { - if a.Source.Scope == nil { - return "" - } - return *a.Source.Scope + return a.Source.GetScope() } func (a *Alert) GetValue() string { - if a.Source.Value == nil { - return "" - } - return *a.Source.Value + return a.Source.GetValue() } func (a *Alert) GetScenario() string { if a.Scenario == nil { return "" } + return *a.Scenario } @@ -29,6 +35,7 @@ func (a *Alert) GetEventsCount() int32 { if a.EventsCount == nil { return 0 } + return *a.EventsCount } @@ -38,6 +45,7 @@ func (e *Event) GetMeta(key string) string { return meta.Value } } + return "" } @@ -47,6 +55,7 @@ func (a *Alert) GetMeta(key string) string { return meta.Value } } + return "" } @@ -54,6 +63,7 @@ func (s Source) GetValue() string { if s.Value == nil { return "" } + return *s.Value } @@ -61,6 +71,7 @@ func (s Source) GetScope() string { if s.Scope == nil { return "" } + return *s.Scope } @@ -69,8 +80,88 @@ func (s Source) GetAsNumberName() string { if s.AsNumber != "0" { ret += s.AsNumber } + if s.AsName != "" { ret += " " + s.AsName } + return ret } + +func (s *Source) String() string { + if s == nil || s.Scope == nil || *s.Scope == "" { + return "empty source" + } + + cn := s.Cn + + if s.AsNumber != "" { + cn += "/" + s.AsNumber + } + + if cn != "" { + cn = " (" + cn + ")" + } + + switch *s.Scope { + case Ip: + return "ip " + *s.Value + cn + case Range: + return "range " + *s.Value + cn + default: + return *s.Scope + " " + *s.Value + } +} + +func (a *Alert) FormatAsStrings(machineID string, logger *log.Logger) []string { + src := a.Source.String() + + msg := "empty scenario" + if a.Scenario != nil && *a.Scenario != "" { + msg = *a.Scenario + } else if a.Message != nil && *a.Message != "" { + msg = *a.Message + } + + reason := fmt.Sprintf("%s by %s", msg, src) + + if len(a.Decisions) == 0 { + return []string{fmt.Sprintf("(%s) alert : %s", machineID, reason)} + } + + var retStr []string + + if a.Decisions[0].Origin != nil && *a.Decisions[0].Origin == CscliImportOrigin { + return []string{fmt.Sprintf("(%s) alert : %s", machineID, reason)} + } + + for i, decisionItem := range a.Decisions { + decision := "" + if a.Simulated != nil && *a.Simulated { + decision = "(simulated alert)" + } else if decisionItem.Simulated != nil && *decisionItem.Simulated { + decision = "(simulated decision)" + } + + if logger.GetLevel() >= log.DebugLevel { + /*spew is expensive*/ + logger.Debug(spew.Sdump(decisionItem)) + } + + if len(a.Decisions) > 1 { + reason = fmt.Sprintf("%s for %d/%d decisions", msg, i+1, len(a.Decisions)) + } + + origin := *decisionItem.Origin + if machineID != "" { + origin = machineID + "/" + origin + } + + decision += fmt.Sprintf("%s %s on %s %s", *decisionItem.Duration, + *decisionItem.Type, *decisionItem.Scope, *decisionItem.Value) + retStr = append(retStr, + fmt.Sprintf("(%s) %s : %s", origin, reason, decision)) + } + + return retStr +} From 114a966129caa2962ada88f3368a27859f382a86 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Thu, 6 Jun 2024 10:21:03 +0200 Subject: [PATCH 165/581] refactor "cscli decisions" (#3061) * cscli decisions: extract method add() * cscli decisions: extract method delete() * cscli lapi: avoid apiclient global * cscli capi: avoid apiclient global * cscli decisions: error cleanup, avoid global for api client * update test * cscli alerts: extract method delete() * lint --- cmd/crowdsec-cli/alerts.go | 111 ++++----- cmd/crowdsec-cli/capi.go | 4 +- cmd/crowdsec-cli/decisions.go | 351 ++++++++++++++------------- cmd/crowdsec-cli/decisions_import.go | 6 +- cmd/crowdsec-cli/lapi.go | 7 +- test/bats/90_decisions.bats | 1 - 6 files changed, 252 insertions(+), 228 deletions(-) diff --git a/cmd/crowdsec-cli/alerts.go b/cmd/crowdsec-cli/alerts.go index ca99b54f59f..e1b070ab0fc 100644 --- a/cmd/crowdsec-cli/alerts.go +++ b/cmd/crowdsec-cli/alerts.go @@ -204,7 +204,7 @@ func (cli *cliAlerts) NewCommand() *cobra.Command { } apiURL, err := url.Parse(cfg.API.Client.Credentials.URL) if err != nil { - return fmt.Errorf("parsing api url %s: %w", apiURL, err) + return fmt.Errorf("parsing api url: %w", err) } cli.client, err = apiclient.NewClient(&apiclient.Config{ @@ -215,7 +215,7 @@ func (cli *cliAlerts) NewCommand() *cobra.Command { VersionPrefix: "v1", }) if err != nil { - return fmt.Errorf("new api client: %w", err) + return fmt.Errorf("creating api client: %w", err) } return nil @@ -370,6 +370,60 @@ cscli alerts list --type ban`, return cmd } +func (cli *cliAlerts) delete(alertDeleteFilter apiclient.AlertsDeleteOpts, ActiveDecision *bool, AlertDeleteAll bool, delAlertByID string, contained *bool) error { + var err error + + if !AlertDeleteAll { + if err = manageCliDecisionAlerts(alertDeleteFilter.IPEquals, alertDeleteFilter.RangeEquals, + alertDeleteFilter.ScopeEquals, alertDeleteFilter.ValueEquals); err != nil { + return err + } + if ActiveDecision != nil { + alertDeleteFilter.ActiveDecisionEquals = ActiveDecision + } + + if *alertDeleteFilter.ScopeEquals == "" { + alertDeleteFilter.ScopeEquals = nil + } + if *alertDeleteFilter.ValueEquals == "" { + alertDeleteFilter.ValueEquals = nil + } + if *alertDeleteFilter.ScenarioEquals == "" { + alertDeleteFilter.ScenarioEquals = nil + } + if *alertDeleteFilter.IPEquals == "" { + alertDeleteFilter.IPEquals = nil + } + if *alertDeleteFilter.RangeEquals == "" { + alertDeleteFilter.RangeEquals = nil + } + if contained != nil && *contained { + alertDeleteFilter.Contains = new(bool) + } + limit := 0 + alertDeleteFilter.Limit = &limit + } else { + limit := 0 + alertDeleteFilter = apiclient.AlertsDeleteOpts{Limit: &limit} + } + + var alerts *models.DeleteAlertsResponse + if delAlertByID == "" { + alerts, _, err = cli.client.Alerts.Delete(context.Background(), alertDeleteFilter) + if err != nil { + return fmt.Errorf("unable to delete alerts: %w", err) + } + } else { + alerts, _, err = cli.client.Alerts.DeleteOne(context.Background(), delAlertByID) + if err != nil { + return fmt.Errorf("unable to delete alert: %w", err) + } + } + log.Infof("%s alert(s) deleted", alerts.NbDeleted) + + return nil +} + func (cli *cliAlerts) NewDeleteCmd() *cobra.Command { var ( ActiveDecision *bool @@ -411,58 +465,7 @@ cscli alerts delete -s crowdsecurity/ssh-bf"`, return nil }, RunE: func(cmd *cobra.Command, _ []string) error { - var err error - - if !AlertDeleteAll { - if err = manageCliDecisionAlerts(alertDeleteFilter.IPEquals, alertDeleteFilter.RangeEquals, - alertDeleteFilter.ScopeEquals, alertDeleteFilter.ValueEquals); err != nil { - printHelp(cmd) - return err - } - if ActiveDecision != nil { - alertDeleteFilter.ActiveDecisionEquals = ActiveDecision - } - - if *alertDeleteFilter.ScopeEquals == "" { - alertDeleteFilter.ScopeEquals = nil - } - if *alertDeleteFilter.ValueEquals == "" { - alertDeleteFilter.ValueEquals = nil - } - if *alertDeleteFilter.ScenarioEquals == "" { - alertDeleteFilter.ScenarioEquals = nil - } - if *alertDeleteFilter.IPEquals == "" { - alertDeleteFilter.IPEquals = nil - } - if *alertDeleteFilter.RangeEquals == "" { - alertDeleteFilter.RangeEquals = nil - } - if contained != nil && *contained { - alertDeleteFilter.Contains = new(bool) - } - limit := 0 - alertDeleteFilter.Limit = &limit - } else { - limit := 0 - alertDeleteFilter = apiclient.AlertsDeleteOpts{Limit: &limit} - } - - var alerts *models.DeleteAlertsResponse - if delAlertByID == "" { - alerts, _, err = cli.client.Alerts.Delete(context.Background(), alertDeleteFilter) - if err != nil { - return fmt.Errorf("unable to delete alerts: %w", err) - } - } else { - alerts, _, err = cli.client.Alerts.DeleteOne(context.Background(), delAlertByID) - if err != nil { - return fmt.Errorf("unable to delete alert: %w", err) - } - } - log.Infof("%s alert(s) deleted", alerts.NbDeleted) - - return nil + return cli.delete(alertDeleteFilter, ActiveDecision, AlertDeleteAll, delAlertByID, contained) }, } diff --git a/cmd/crowdsec-cli/capi.go b/cmd/crowdsec-cli/capi.go index 7b1613c5463..6933d355071 100644 --- a/cmd/crowdsec-cli/capi.go +++ b/cmd/crowdsec-cli/capi.go @@ -164,7 +164,7 @@ func QueryCAPIStatus(hub *cwhub.Hub, credURL string, login string, password stri return errors.New("no scenarios installed, abort") } - Client, err = apiclient.NewDefaultClient(apiURL, + client, err := apiclient.NewDefaultClient(apiURL, CAPIURLPrefix, cwversion.UserAgent(), nil) @@ -180,7 +180,7 @@ func QueryCAPIStatus(hub *cwhub.Hub, credURL string, login string, password stri Scenarios: scenarios, } - _, _, err = Client.Auth.AuthenticateWatcher(context.Background(), t) + _, _, err = client.Auth.AuthenticateWatcher(context.Background(), t) if err != nil { return err } diff --git a/cmd/crowdsec-cli/decisions.go b/cmd/crowdsec-cli/decisions.go index de3bf73dca2..df35fb9bb18 100644 --- a/cmd/crowdsec-cli/decisions.go +++ b/cmd/crowdsec-cli/decisions.go @@ -23,8 +23,6 @@ import ( "github.com/crowdsecurity/crowdsec/pkg/types" ) -var Client *apiclient.ApiClient - func (cli *cliDecisions) decisionsToTable(alerts *models.GetAlertsResponse, printMachine bool) error { /*here we cheat a bit : to make it more readable for the user, we dedup some entries*/ spamLimit := make(map[string]bool) @@ -117,7 +115,8 @@ func (cli *cliDecisions) decisionsToTable(alerts *models.GetAlertsResponse, prin } type cliDecisions struct { - cfg configGetter + client *apiclient.ApiClient + cfg configGetter } func NewCLIDecisions(cfg configGetter) *cliDecisions { @@ -141,16 +140,16 @@ func (cli *cliDecisions) NewCommand() *cobra.Command { if err := cfg.LoadAPIClient(); err != nil { return fmt.Errorf("loading api client: %w", err) } - password := strfmt.Password(cfg.API.Client.Credentials.Password) - apiurl, err := url.Parse(cfg.API.Client.Credentials.URL) + apiURL, err := url.Parse(cfg.API.Client.Credentials.URL) if err != nil { - return fmt.Errorf("parsing api url %s: %w", cfg.API.Client.Credentials.URL, err) + return fmt.Errorf("parsing api url: %w", err) } - Client, err = apiclient.NewClient(&apiclient.Config{ + + cli.client, err = apiclient.NewClient(&apiclient.Config{ MachineID: cfg.API.Client.Credentials.Login, - Password: password, + Password: strfmt.Password(cfg.API.Client.Credentials.Password), UserAgent: cwversion.UserAgent(), - URL: apiurl, + URL: apiURL, VersionPrefix: "v1", }) if err != nil { @@ -247,7 +246,7 @@ func (cli *cliDecisions) list(filter apiclient.AlertsListOpts, NoSimu *bool, con filter.Contains = new(bool) } - alerts, _, err := Client.Alerts.List(context.Background(), filter) + alerts, _, err := cli.client.Alerts.List(context.Background(), filter) if err != nil { return fmt.Errorf("unable to retrieve decisions: %w", err) } @@ -294,25 +293,103 @@ cscli decisions list --origin lists --scenario list_name return cli.list(filter, NoSimu, contained, printMachine) }, } - cmd.Flags().SortFlags = false - cmd.Flags().BoolVarP(filter.IncludeCAPI, "all", "a", false, "Include decisions from Central API") - cmd.Flags().StringVar(filter.Since, "since", "", "restrict to alerts newer than since (ie. 4h, 30d)") - cmd.Flags().StringVar(filter.Until, "until", "", "restrict to alerts older than until (ie. 4h, 30d)") - cmd.Flags().StringVarP(filter.TypeEquals, "type", "t", "", "restrict to this decision type (ie. ban,captcha)") - cmd.Flags().StringVar(filter.ScopeEquals, "scope", "", "restrict to this scope (ie. ip,range,session)") - cmd.Flags().StringVar(filter.OriginEquals, "origin", "", fmt.Sprintf("the value to match for the specified origin (%s ...)", strings.Join(types.GetOrigins(), ","))) - cmd.Flags().StringVarP(filter.ValueEquals, "value", "v", "", "restrict to this value (ie. 1.2.3.4,userName)") - cmd.Flags().StringVarP(filter.ScenarioEquals, "scenario", "s", "", "restrict to this scenario (ie. crowdsecurity/ssh-bf)") - cmd.Flags().StringVarP(filter.IPEquals, "ip", "i", "", "restrict to alerts from this source ip (shorthand for --scope ip --value )") - cmd.Flags().StringVarP(filter.RangeEquals, "range", "r", "", "restrict to alerts from this source range (shorthand for --scope range --value )") - cmd.Flags().IntVarP(filter.Limit, "limit", "l", 100, "number of alerts to get (use 0 to remove the limit)") - cmd.Flags().BoolVar(NoSimu, "no-simu", false, "exclude decisions in simulation mode") - cmd.Flags().BoolVarP(&printMachine, "machine", "m", false, "print machines that triggered decisions") - cmd.Flags().BoolVar(contained, "contained", false, "query decisions contained by range") + + flags := cmd.Flags() + flags.SortFlags = false + flags.BoolVarP(filter.IncludeCAPI, "all", "a", false, "Include decisions from Central API") + flags.StringVar(filter.Since, "since", "", "restrict to alerts newer than since (ie. 4h, 30d)") + flags.StringVar(filter.Until, "until", "", "restrict to alerts older than until (ie. 4h, 30d)") + flags.StringVarP(filter.TypeEquals, "type", "t", "", "restrict to this decision type (ie. ban,captcha)") + flags.StringVar(filter.ScopeEquals, "scope", "", "restrict to this scope (ie. ip,range,session)") + flags.StringVar(filter.OriginEquals, "origin", "", fmt.Sprintf("the value to match for the specified origin (%s ...)", strings.Join(types.GetOrigins(), ","))) + flags.StringVarP(filter.ValueEquals, "value", "v", "", "restrict to this value (ie. 1.2.3.4,userName)") + flags.StringVarP(filter.ScenarioEquals, "scenario", "s", "", "restrict to this scenario (ie. crowdsecurity/ssh-bf)") + flags.StringVarP(filter.IPEquals, "ip", "i", "", "restrict to alerts from this source ip (shorthand for --scope ip --value )") + flags.StringVarP(filter.RangeEquals, "range", "r", "", "restrict to alerts from this source range (shorthand for --scope range --value )") + flags.IntVarP(filter.Limit, "limit", "l", 100, "number of alerts to get (use 0 to remove the limit)") + flags.BoolVar(NoSimu, "no-simu", false, "exclude decisions in simulation mode") + flags.BoolVarP(&printMachine, "machine", "m", false, "print machines that triggered decisions") + flags.BoolVar(contained, "contained", false, "query decisions contained by range") return cmd } +func (cli *cliDecisions) add(addIP, addRange, addDuration, addValue, addScope, addReason, addType string) error { + alerts := models.AddAlertsRequest{} + origin := types.CscliOrigin + capacity := int32(0) + leakSpeed := "0" + eventsCount := int32(1) + empty := "" + simulated := false + startAt := time.Now().UTC().Format(time.RFC3339) + stopAt := time.Now().UTC().Format(time.RFC3339) + createdAt := time.Now().UTC().Format(time.RFC3339) + + /*take care of shorthand options*/ + if err := manageCliDecisionAlerts(&addIP, &addRange, &addScope, &addValue); err != nil { + return err + } + + if addIP != "" { + addValue = addIP + addScope = types.Ip + } else if addRange != "" { + addValue = addRange + addScope = types.Range + } else if addValue == "" { + return errors.New("missing arguments, a value is required (--ip, --range or --scope and --value)") + } + + if addReason == "" { + addReason = fmt.Sprintf("manual '%s' from '%s'", addType, cli.cfg().API.Client.Credentials.Login) + } + + decision := models.Decision{ + Duration: &addDuration, + Scope: &addScope, + Value: &addValue, + Type: &addType, + Scenario: &addReason, + Origin: &origin, + } + alert := models.Alert{ + Capacity: &capacity, + Decisions: []*models.Decision{&decision}, + Events: []*models.Event{}, + EventsCount: &eventsCount, + Leakspeed: &leakSpeed, + Message: &addReason, + ScenarioHash: &empty, + Scenario: &addReason, + ScenarioVersion: &empty, + Simulated: &simulated, + // setting empty scope/value broke plugins, and it didn't seem to be needed anymore w/ latest papi changes + Source: &models.Source{ + AsName: "", + AsNumber: "", + Cn: "", + IP: addValue, + Range: "", + Scope: &addScope, + Value: &addValue, + }, + StartAt: &startAt, + StopAt: &stopAt, + CreatedAt: createdAt, + } + alerts = append(alerts, &alert) + + _, _, err := cli.client.Alerts.Add(context.Background(), alerts) + if err != nil { + return err + } + + log.Info("Decision successfully added") + + return nil +} + func (cli *cliDecisions) newAddCmd() *cobra.Command { var ( addIP string @@ -336,93 +413,84 @@ cscli decisions add --scope username --value foobar Args: cobra.ExactArgs(0), DisableAutoGenTag: true, RunE: func(cmd *cobra.Command, _ []string) error { - var err error - alerts := models.AddAlertsRequest{} - origin := types.CscliOrigin - capacity := int32(0) - leakSpeed := "0" - eventsCount := int32(1) - empty := "" - simulated := false - startAt := time.Now().UTC().Format(time.RFC3339) - stopAt := time.Now().UTC().Format(time.RFC3339) - createdAt := time.Now().UTC().Format(time.RFC3339) - - /*take care of shorthand options*/ - if err = manageCliDecisionAlerts(&addIP, &addRange, &addScope, &addValue); err != nil { - return err - } + return cli.add(addIP, addRange, addDuration, addValue, addScope, addReason, addType) + }, + } - if addIP != "" { - addValue = addIP - addScope = types.Ip - } else if addRange != "" { - addValue = addRange - addScope = types.Range - } else if addValue == "" { - printHelp(cmd) - return errors.New("missing arguments, a value is required (--ip, --range or --scope and --value)") - } + flags := cmd.Flags() + flags.SortFlags = false + flags.StringVarP(&addIP, "ip", "i", "", "Source ip (shorthand for --scope ip --value )") + flags.StringVarP(&addRange, "range", "r", "", "Range source ip (shorthand for --scope range --value )") + flags.StringVarP(&addDuration, "duration", "d", "4h", "Decision duration (ie. 1h,4h,30m)") + flags.StringVarP(&addValue, "value", "v", "", "The value (ie. --scope username --value foobar)") + flags.StringVar(&addScope, "scope", types.Ip, "Decision scope (ie. ip,range,username)") + flags.StringVarP(&addReason, "reason", "R", "", "Decision reason (ie. scenario-name)") + flags.StringVarP(&addType, "type", "t", "ban", "Decision type (ie. ban,captcha,throttle)") - if addReason == "" { - addReason = fmt.Sprintf("manual '%s' from '%s'", addType, cli.cfg().API.Client.Credentials.Login) - } - decision := models.Decision{ - Duration: &addDuration, - Scope: &addScope, - Value: &addValue, - Type: &addType, - Scenario: &addReason, - Origin: &origin, - } - alert := models.Alert{ - Capacity: &capacity, - Decisions: []*models.Decision{&decision}, - Events: []*models.Event{}, - EventsCount: &eventsCount, - Leakspeed: &leakSpeed, - Message: &addReason, - ScenarioHash: &empty, - Scenario: &addReason, - ScenarioVersion: &empty, - Simulated: &simulated, - // setting empty scope/value broke plugins, and it didn't seem to be needed anymore w/ latest papi changes - Source: &models.Source{ - AsName: empty, - AsNumber: empty, - Cn: empty, - IP: addValue, - Range: "", - Scope: &addScope, - Value: &addValue, - }, - StartAt: &startAt, - StopAt: &stopAt, - CreatedAt: createdAt, - } - alerts = append(alerts, &alert) + return cmd +} - _, _, err = Client.Alerts.Add(context.Background(), alerts) - if err != nil { - return err - } +func (cli *cliDecisions) delete(delFilter apiclient.DecisionsDeleteOpts, delDecisionID string, contained *bool) error { + var err error - log.Info("Decision successfully added") + /*take care of shorthand options*/ + if err = manageCliDecisionAlerts(delFilter.IPEquals, delFilter.RangeEquals, delFilter.ScopeEquals, delFilter.ValueEquals); err != nil { + return err + } - return nil - }, + if *delFilter.ScopeEquals == "" { + delFilter.ScopeEquals = nil } - cmd.Flags().SortFlags = false - cmd.Flags().StringVarP(&addIP, "ip", "i", "", "Source ip (shorthand for --scope ip --value )") - cmd.Flags().StringVarP(&addRange, "range", "r", "", "Range source ip (shorthand for --scope range --value )") - cmd.Flags().StringVarP(&addDuration, "duration", "d", "4h", "Decision duration (ie. 1h,4h,30m)") - cmd.Flags().StringVarP(&addValue, "value", "v", "", "The value (ie. --scope username --value foobar)") - cmd.Flags().StringVar(&addScope, "scope", types.Ip, "Decision scope (ie. ip,range,username)") - cmd.Flags().StringVarP(&addReason, "reason", "R", "", "Decision reason (ie. scenario-name)") - cmd.Flags().StringVarP(&addType, "type", "t", "ban", "Decision type (ie. ban,captcha,throttle)") + if *delFilter.OriginEquals == "" { + delFilter.OriginEquals = nil + } - return cmd + if *delFilter.ValueEquals == "" { + delFilter.ValueEquals = nil + } + + if *delFilter.ScenarioEquals == "" { + delFilter.ScenarioEquals = nil + } + + if *delFilter.TypeEquals == "" { + delFilter.TypeEquals = nil + } + + if *delFilter.IPEquals == "" { + delFilter.IPEquals = nil + } + + if *delFilter.RangeEquals == "" { + delFilter.RangeEquals = nil + } + + if contained != nil && *contained { + delFilter.Contains = new(bool) + } + + var decisions *models.DeleteDecisionResponse + + if delDecisionID == "" { + decisions, _, err = cli.client.Decisions.Delete(context.Background(), delFilter) + if err != nil { + return fmt.Errorf("unable to delete decisions: %w", err) + } + } else { + if _, err = strconv.Atoi(delDecisionID); err != nil { + return fmt.Errorf("id '%s' is not an integer: %w", delDecisionID, err) + } + + decisions, _, err = cli.client.Decisions.DeleteOne(context.Background(), delDecisionID) + if err != nil { + return fmt.Errorf("unable to delete decision: %w", err) + } + } + + log.Infof("%s decision(s) deleted", decisions.NbDeleted) + + return nil } func (cli *cliDecisions) newDeleteCmd() *cobra.Command { @@ -462,76 +530,29 @@ cscli decisions delete --origin lists --scenario list_name *delFilter.TypeEquals == "" && *delFilter.IPEquals == "" && *delFilter.RangeEquals == "" && *delFilter.ScenarioEquals == "" && *delFilter.OriginEquals == "" && delDecisionID == "" { - cmd.Usage() + _ = cmd.Usage() return errors.New("at least one filter or --all must be specified") } return nil }, RunE: func(_ *cobra.Command, _ []string) error { - var err error - var decisions *models.DeleteDecisionResponse - - /*take care of shorthand options*/ - if err = manageCliDecisionAlerts(delFilter.IPEquals, delFilter.RangeEquals, delFilter.ScopeEquals, delFilter.ValueEquals); err != nil { - return err - } - if *delFilter.ScopeEquals == "" { - delFilter.ScopeEquals = nil - } - if *delFilter.OriginEquals == "" { - delFilter.OriginEquals = nil - } - if *delFilter.ValueEquals == "" { - delFilter.ValueEquals = nil - } - if *delFilter.ScenarioEquals == "" { - delFilter.ScenarioEquals = nil - } - if *delFilter.TypeEquals == "" { - delFilter.TypeEquals = nil - } - if *delFilter.IPEquals == "" { - delFilter.IPEquals = nil - } - if *delFilter.RangeEquals == "" { - delFilter.RangeEquals = nil - } - if contained != nil && *contained { - delFilter.Contains = new(bool) - } - - if delDecisionID == "" { - decisions, _, err = Client.Decisions.Delete(context.Background(), delFilter) - if err != nil { - return fmt.Errorf("unable to delete decisions: %w", err) - } - } else { - if _, err = strconv.Atoi(delDecisionID); err != nil { - return fmt.Errorf("id '%s' is not an integer: %w", delDecisionID, err) - } - decisions, _, err = Client.Decisions.DeleteOne(context.Background(), delDecisionID) - if err != nil { - return fmt.Errorf("unable to delete decision: %w", err) - } - } - log.Infof("%s decision(s) deleted", decisions.NbDeleted) - - return nil + return cli.delete(delFilter, delDecisionID, contained) }, } - cmd.Flags().SortFlags = false - cmd.Flags().StringVarP(delFilter.IPEquals, "ip", "i", "", "Source ip (shorthand for --scope ip --value )") - cmd.Flags().StringVarP(delFilter.RangeEquals, "range", "r", "", "Range source ip (shorthand for --scope range --value )") - cmd.Flags().StringVarP(delFilter.TypeEquals, "type", "t", "", "the decision type (ie. ban,captcha)") - cmd.Flags().StringVarP(delFilter.ValueEquals, "value", "v", "", "the value to match for in the specified scope") - cmd.Flags().StringVarP(delFilter.ScenarioEquals, "scenario", "s", "", "the scenario name (ie. crowdsecurity/ssh-bf)") - cmd.Flags().StringVar(delFilter.OriginEquals, "origin", "", fmt.Sprintf("the value to match for the specified origin (%s ...)", strings.Join(types.GetOrigins(), ","))) - - cmd.Flags().StringVar(&delDecisionID, "id", "", "decision id") - cmd.Flags().BoolVar(&delDecisionAll, "all", false, "delete all decisions") - cmd.Flags().BoolVar(contained, "contained", false, "query decisions contained by range") + flags := cmd.Flags() + flags.SortFlags = false + flags.StringVarP(delFilter.IPEquals, "ip", "i", "", "Source ip (shorthand for --scope ip --value )") + flags.StringVarP(delFilter.RangeEquals, "range", "r", "", "Range source ip (shorthand for --scope range --value )") + flags.StringVarP(delFilter.TypeEquals, "type", "t", "", "the decision type (ie. ban,captcha)") + flags.StringVarP(delFilter.ValueEquals, "value", "v", "", "the value to match for in the specified scope") + flags.StringVarP(delFilter.ScenarioEquals, "scenario", "s", "", "the scenario name (ie. crowdsecurity/ssh-bf)") + flags.StringVar(delFilter.OriginEquals, "origin", "", fmt.Sprintf("the value to match for the specified origin (%s ...)", strings.Join(types.GetOrigins(), ","))) + + flags.StringVar(&delDecisionID, "id", "", "decision id") + flags.BoolVar(&delDecisionAll, "all", false, "delete all decisions") + flags.BoolVar(contained, "contained", false, "query decisions contained by range") return cmd } diff --git a/cmd/crowdsec-cli/decisions_import.go b/cmd/crowdsec-cli/decisions_import.go index 1df9d1ae4a1..338c1b7fb3e 100644 --- a/cmd/crowdsec-cli/decisions_import.go +++ b/cmd/crowdsec-cli/decisions_import.go @@ -122,7 +122,7 @@ func (cli *cliDecisions) runImport(cmd *cobra.Command, args []string) error { } var ( - content []byte + content []byte fin *os.File ) @@ -224,7 +224,7 @@ func (cli *cliDecisions) runImport(cmd *cobra.Command, args []string) error { Decisions: chunk, } - _, _, err = Client.Alerts.Add(context.Background(), models.AddAlertsRequest{&importAlert}) + _, _, err = cli.client.Alerts.Add(context.Background(), models.AddAlertsRequest{&importAlert}) if err != nil { return err } @@ -273,7 +273,7 @@ $ echo "1.2.3.4" | cscli decisions import -i - --format values flags.Int("batch", 0, "Split import in batches of N decisions") flags.String("format", "", "Input format: 'json', 'csv' or 'values' (each line is a value, no headers)") - cmd.MarkFlagRequired("input") + _ = cmd.MarkFlagRequired("input") return cmd } diff --git a/cmd/crowdsec-cli/lapi.go b/cmd/crowdsec-cli/lapi.go index fcef137e424..c2c92140fd6 100644 --- a/cmd/crowdsec-cli/lapi.go +++ b/cmd/crowdsec-cli/lapi.go @@ -50,7 +50,7 @@ func QueryLAPIStatus(hub *cwhub.Hub, credURL string, login string, password stri return fmt.Errorf("failed to get scenarios: %w", err) } - Client, err = apiclient.NewDefaultClient(apiURL, + client, err := apiclient.NewDefaultClient(apiURL, LAPIURLPrefix, cwversion.UserAgent(), nil) @@ -66,7 +66,7 @@ func QueryLAPIStatus(hub *cwhub.Hub, credURL string, login string, password stri Scenarios: scenarios, } - _, _, err = Client.Auth.AuthenticateWatcher(context.Background(), t) + _, _, err = client.Auth.AuthenticateWatcher(context.Background(), t) if err != nil { return err } @@ -322,7 +322,8 @@ cscli lapi context add --value evt.Meta.source_ip --value evt.Meta.target_user flags := cmd.Flags() flags.StringVarP(&keyToAdd, "key", "k", "", "The key of the different values to send") flags.StringSliceVar(&valuesToAdd, "value", []string{}, "The expr fields to associate with the key") - cmd.MarkFlagRequired("value") + + _ = cmd.MarkFlagRequired("value") return cmd } diff --git a/test/bats/90_decisions.bats b/test/bats/90_decisions.bats index f0213a8a236..be6e905356e 100644 --- a/test/bats/90_decisions.bats +++ b/test/bats/90_decisions.bats @@ -31,7 +31,6 @@ teardown() { @test "'decisions add' requires parameters" { rune -1 cscli decisions add - assert_line "Usage:" assert_stderr --partial "missing arguments, a value is required (--ip, --range or --scope and --value)" rune -1 cscli decisions add -o json From 1378e16578dfae6b900823e6c89a62e01066ec47 Mon Sep 17 00:00:00 2001 From: Manuel Sabban Date: Thu, 6 Jun 2024 11:19:05 +0200 Subject: [PATCH 166/581] fix test 01_cscli by avoiding discrepancies in cscli explain (#3059) * fix hub installed items in pacakge tests --- test/bin/remove-all-hub-items | 20 ++++++++++++++++++++ test/lib/config/config-global | 1 + 2 files changed, 21 insertions(+) create mode 100755 test/bin/remove-all-hub-items diff --git a/test/bin/remove-all-hub-items b/test/bin/remove-all-hub-items new file mode 100755 index 00000000000..981602b775a --- /dev/null +++ b/test/bin/remove-all-hub-items @@ -0,0 +1,20 @@ +#!/usr/bin/env bash + +set -eu + +# shellcheck disable=SC1007 +THIS_DIR=$(CDPATH= cd -- "$(dirname -- "$0")" && pwd) +# shellcheck disable=SC1091 +. "${THIS_DIR}/../.environment.sh" + +# pre-download everything but don't install anything + +echo "Pre-downloading Hub content..." + +types=$("$CSCLI" hub types -o raw) + +for itemtype in $types; do + "$CSCLI" "$itemtype" remove --all --force +done + +echo " done." diff --git a/test/lib/config/config-global b/test/lib/config/config-global index 0caf0591f7d..f77fb3e27bc 100755 --- a/test/lib/config/config-global +++ b/test/lib/config/config-global @@ -71,6 +71,7 @@ make_init_data() { ./instance-db setup ./bin/preload-hub-items + ./bin/remove-all-hub-items # when installed packages are always using sqlite, so no need to regenerate # local credz for sqlite From b38b959f71daea46bd6e8037e4c73a6f4df7919d Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Thu, 6 Jun 2024 14:51:49 +0200 Subject: [PATCH 167/581] enable linter "stylecheck" (#3060) * enable linter "stylecheck" * stylecheck: omit redundant types --- .golangci.yml | 11 ++++++++++- pkg/exprhelpers/debugger.go | 9 ++++----- pkg/exprhelpers/helpers.go | 4 ++-- 3 files changed, 16 insertions(+), 8 deletions(-) diff --git a/.golangci.yml b/.golangci.yml index fd237c95bcc..5565ee7466e 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -111,6 +111,15 @@ linters-settings: - pkg: "gopkg.in/yaml.v2" desc: "yaml.v2 is deprecated for new code in favor of yaml.v3" + stylecheck: + checks: + - all + - -ST1003 # should not use underscores in Go names; ... + - -ST1005 # error strings should not be capitalized + - -ST1012 # error var ... should have name of the form ErrFoo + - -ST1016 # methods on the same type should have the same receiver name + - -ST1022 # comment on exported var ... should be of the form ... + revive: ignore-generated-header: true severity: error @@ -289,6 +298,7 @@ linters: # - spancheck # Checks for mistakes with OpenTelemetry/Census spans. # - sqlclosecheck # Checks that sql.Rows, sql.Stmt, sqlx.NamedStmt, pgx.Query are closed. # - staticcheck # (megacheck): It's a set of rules from staticcheck. It's not the same thing as the staticcheck binary. The author of staticcheck doesn't support or approve the use of staticcheck as a library inside golangci-lint. + # - stylecheck # Stylecheck is a replacement for golint # - tenv # tenv is analyzer that detects using os.Setenv instead of t.Setenv since Go1.17 # - testableexamples # linter checks if examples are testable (have an expected output) # - testifylint # Checks usage of github.com/stretchr/testify. @@ -358,7 +368,6 @@ linters: - forbidigo # Forbids identifiers - gochecknoglobals # Check that no global variables exist. - goconst # Finds repeated strings that could be replaced by a constant - - stylecheck # Stylecheck is a replacement for golint - tagliatelle # Checks the struct tags. - varnamelen # checks that the length of a variable's name matches its scope diff --git a/pkg/exprhelpers/debugger.go b/pkg/exprhelpers/debugger.go index 432bb737eae..5ab2fc48c70 100644 --- a/pkg/exprhelpers/debugger.go +++ b/pkg/exprhelpers/debugger.go @@ -379,13 +379,12 @@ func DisplayExprDebug(program *vm.Program, outputs []OpOutput, logger *log.Entry // TBD: Based on the level of the logger (ie. trace vs debug) we could decide to add more low level instructions (pop, push, etc.) func RunWithDebug(program *vm.Program, env interface{}, logger *log.Entry) ([]OpOutput, any, error) { - - var outputs []OpOutput = []OpOutput{} - var buf strings.Builder - var erp ExprRuntimeDebug = ExprRuntimeDebug{ + outputs := []OpOutput{} + erp := ExprRuntimeDebug{ Logger: logger, } - var debugErr chan error = make(chan error) + debugErr := make(chan error) + var buf strings.Builder vm := vm.Debug() done := false program.Opcodes(&buf) diff --git a/pkg/exprhelpers/helpers.go b/pkg/exprhelpers/helpers.go index e0a3a523c3e..5c041aa2886 100644 --- a/pkg/exprhelpers/helpers.go +++ b/pkg/exprhelpers/helpers.go @@ -243,8 +243,8 @@ func Distinct(params ...any) (any, error) { return []interface{}{}, nil } - var exists map[any]bool = make(map[any]bool) - var ret []interface{} = make([]interface{}, 0) + exists := make(map[any]bool) + ret := make([]interface{}, 0) for _, val := range array { if _, ok := exists[val]; !ok { From 7d75290a0f7deac6eca11c93a87631b2265a2ce4 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Thu, 6 Jun 2024 14:53:30 +0200 Subject: [PATCH 168/581] Use cmp.Or from go 1.22 (#3058) --- cmd/crowdsec-cli/itemcli.go | 37 ++++++++++++++++++------------------- 1 file changed, 18 insertions(+), 19 deletions(-) diff --git a/cmd/crowdsec-cli/itemcli.go b/cmd/crowdsec-cli/itemcli.go index 66a3f4fc051..367e22f2962 100644 --- a/cmd/crowdsec-cli/itemcli.go +++ b/cmd/crowdsec-cli/itemcli.go @@ -1,6 +1,7 @@ package main import ( + "cmp" "errors" "fmt" "os" @@ -13,8 +14,6 @@ import ( log "github.com/sirupsen/logrus" "github.com/spf13/cobra" - "github.com/crowdsecurity/go-cs-lib/coalesce" - "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/require" "github.com/crowdsecurity/crowdsec/pkg/cwhub" ) @@ -44,8 +43,8 @@ type cliItem struct { func (cli cliItem) NewCommand() *cobra.Command { cmd := &cobra.Command{ - Use: coalesce.String(cli.help.use, cli.name+" [item]..."), - Short: coalesce.String(cli.help.short, "Manage hub "+cli.name), + Use: cmp.Or(cli.help.use, cli.name+" [item]..."), + Short: cmp.Or(cli.help.short, "Manage hub "+cli.name), Long: cli.help.long, Example: cli.help.example, Args: cobra.MinimumNArgs(1), @@ -105,9 +104,9 @@ func (cli cliItem) newInstallCmd() *cobra.Command { ) cmd := &cobra.Command{ - Use: coalesce.String(cli.installHelp.use, "install [item]..."), - Short: coalesce.String(cli.installHelp.short, "Install given "+cli.oneOrMore), - Long: coalesce.String(cli.installHelp.long, fmt.Sprintf("Fetch and install one or more %s from the hub", cli.name)), + Use: cmp.Or(cli.installHelp.use, "install [item]..."), + Short: cmp.Or(cli.installHelp.short, "Install given "+cli.oneOrMore), + Long: cmp.Or(cli.installHelp.long, fmt.Sprintf("Fetch and install one or more %s from the hub", cli.name)), Example: cli.installHelp.example, Args: cobra.MinimumNArgs(1), DisableAutoGenTag: true, @@ -231,9 +230,9 @@ func (cli cliItem) newRemoveCmd() *cobra.Command { ) cmd := &cobra.Command{ - Use: coalesce.String(cli.removeHelp.use, "remove [item]..."), - Short: coalesce.String(cli.removeHelp.short, "Remove given "+cli.oneOrMore), - Long: coalesce.String(cli.removeHelp.long, "Remove one or more "+cli.name), + Use: cmp.Or(cli.removeHelp.use, "remove [item]..."), + Short: cmp.Or(cli.removeHelp.short, "Remove given "+cli.oneOrMore), + Long: cmp.Or(cli.removeHelp.long, "Remove one or more "+cli.name), Example: cli.removeHelp.example, Aliases: []string{"delete"}, DisableAutoGenTag: true, @@ -327,9 +326,9 @@ func (cli cliItem) newUpgradeCmd() *cobra.Command { ) cmd := &cobra.Command{ - Use: coalesce.String(cli.upgradeHelp.use, "upgrade [item]..."), - Short: coalesce.String(cli.upgradeHelp.short, "Upgrade given "+cli.oneOrMore), - Long: coalesce.String(cli.upgradeHelp.long, fmt.Sprintf("Fetch and upgrade one or more %s from the hub", cli.name)), + Use: cmp.Or(cli.upgradeHelp.use, "upgrade [item]..."), + Short: cmp.Or(cli.upgradeHelp.short, "Upgrade given "+cli.oneOrMore), + Long: cmp.Or(cli.upgradeHelp.long, fmt.Sprintf("Fetch and upgrade one or more %s from the hub", cli.name)), Example: cli.upgradeHelp.example, DisableAutoGenTag: true, ValidArgsFunction: func(_ *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { @@ -404,9 +403,9 @@ func (cli cliItem) newInspectCmd() *cobra.Command { ) cmd := &cobra.Command{ - Use: coalesce.String(cli.inspectHelp.use, "inspect [item]..."), - Short: coalesce.String(cli.inspectHelp.short, "Inspect given "+cli.oneOrMore), - Long: coalesce.String(cli.inspectHelp.long, "Inspect the state of one or more "+cli.name), + Use: cmp.Or(cli.inspectHelp.use, "inspect [item]..."), + Short: cmp.Or(cli.inspectHelp.short, "Inspect given "+cli.oneOrMore), + Long: cmp.Or(cli.inspectHelp.long, "Inspect the state of one or more "+cli.name), Example: cli.inspectHelp.example, Args: cobra.MinimumNArgs(1), DisableAutoGenTag: true, @@ -449,9 +448,9 @@ func (cli cliItem) newListCmd() *cobra.Command { var all bool cmd := &cobra.Command{ - Use: coalesce.String(cli.listHelp.use, "list [item... | -a]"), - Short: coalesce.String(cli.listHelp.short, "List "+cli.oneOrMore), - Long: coalesce.String(cli.listHelp.long, "List of installed/available/specified "+cli.name), + Use: cmp.Or(cli.listHelp.use, "list [item... | -a]"), + Short: cmp.Or(cli.listHelp.short, "List "+cli.oneOrMore), + Long: cmp.Or(cli.listHelp.long, "List of installed/available/specified "+cli.name), Example: cli.listHelp.example, DisableAutoGenTag: true, RunE: func(_ *cobra.Command, args []string) error { From 6ef2396c91110bf52e7e2a1900993ea189eaf188 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Thu, 6 Jun 2024 16:03:32 +0200 Subject: [PATCH 169/581] enable linter: revive (blank-imports) (#3062) * lint: revive (blank-imports) require a comment to justify blank imports * typo --- .golangci.yml | 2 -- pkg/acquisition/modules/appsec/appsec_runner.go | 1 + pkg/database/database.go | 1 + pkg/parser/stage.go | 1 + 4 files changed, 3 insertions(+), 2 deletions(-) diff --git a/.golangci.yml b/.golangci.yml index 5565ee7466e..ba54bfbbd89 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -127,8 +127,6 @@ linters-settings: rules: - name: add-constant disabled: true - - name: blank-imports - disabled: true - name: cognitive-complexity disabled: true - name: comment-spacings diff --git a/pkg/acquisition/modules/appsec/appsec_runner.go b/pkg/acquisition/modules/appsec/appsec_runner.go index cc7264aa2c8..c015db74d82 100644 --- a/pkg/acquisition/modules/appsec/appsec_runner.go +++ b/pkg/acquisition/modules/appsec/appsec_runner.go @@ -14,6 +14,7 @@ import ( log "github.com/sirupsen/logrus" "gopkg.in/tomb.v2" + // load body processors via init() _ "github.com/crowdsecurity/crowdsec/pkg/acquisition/modules/appsec/bodyprocessors" ) diff --git a/pkg/database/database.go b/pkg/database/database.go index 357077e7d6f..fc89aa5cd4d 100644 --- a/pkg/database/database.go +++ b/pkg/database/database.go @@ -8,6 +8,7 @@ import ( "os" entsql "entgo.io/ent/dialect/sql" + // load database backends _ "github.com/go-sql-driver/mysql" _ "github.com/jackc/pgx/v4/stdlib" _ "github.com/mattn/go-sqlite3" diff --git a/pkg/parser/stage.go b/pkg/parser/stage.go index 1eac2b83ede..fe538023b61 100644 --- a/pkg/parser/stage.go +++ b/pkg/parser/stage.go @@ -10,6 +10,7 @@ import ( "errors" "fmt" "io" + // enable profiling _ "net/http/pprof" "os" "sort" From 550d422d2399a81615e7073127ae813ca9a3b7fd Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Fri, 7 Jun 2024 12:01:08 +0200 Subject: [PATCH 170/581] lint revive(deep-exit): refactor cmd/crowdsec (#3063) * lint revive(deep-exit): refactor cmd/crowdsec * lint --- .golangci.yml | 4 ++ cmd/crowdsec/crowdsec.go | 103 ++++----------------------------------- cmd/crowdsec/dump.go | 56 +++++++++++++++++++++ cmd/crowdsec/serve.go | 3 +- 4 files changed, 72 insertions(+), 94 deletions(-) create mode 100644 cmd/crowdsec/dump.go diff --git a/.golangci.yml b/.golangci.yml index ba54bfbbd89..c096ef5f88a 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -497,6 +497,10 @@ issues: - revive path: pkg/metabase/metabase.go + - linters: + - revive + path: pkg/metabase/container.go + - linters: - revive path: cmd/crowdsec-cli/copyfile.go diff --git a/cmd/crowdsec/crowdsec.go b/cmd/crowdsec/crowdsec.go index d226e3a5796..0fd269b3537 100644 --- a/cmd/crowdsec/crowdsec.go +++ b/cmd/crowdsec/crowdsec.go @@ -4,12 +4,10 @@ import ( "context" "fmt" "os" - "path/filepath" "sync" "time" log "github.com/sirupsen/logrus" - "gopkg.in/yaml.v3" "github.com/crowdsecurity/go-cs-lib/trace" @@ -34,9 +32,8 @@ func initCrowdsec(cConfig *csconfig.Config, hub *cwhub.Hub) (*parser.Parsers, [] } err = exprhelpers.GeoIPInit(hub.GetDataDir()) - if err != nil { - //GeoIP databases are not mandatory, do not make crowdsec fail if they are not present + // GeoIP databases are not mandatory, do not make crowdsec fail if they are not present log.Warnf("unable to initialize GeoIP: %s", err) } @@ -79,7 +76,6 @@ func runCrowdsec(cConfig *csconfig.Config, parsers *parser.Parsers, hub *cwhub.H if err := runParse(inputLineChan, inputEventChan, *parsers.Ctx, parsers.Nodes); err != nil { // this error will never happen as parser.Parse is not able to return errors - log.Fatalf("starting parse error : %s", err) return err } @@ -96,7 +92,7 @@ func runCrowdsec(cConfig *csconfig.Config, parsers *parser.Parsers, hub *cwhub.H bucketsTomb.Go(func() error { bucketWg.Add(1) - /*restore previous state as well if present*/ + // restore previous state as well if present if cConfig.Crowdsec.BucketStateFile != "" { log.Warningf("Restoring buckets state from %s", cConfig.Crowdsec.BucketStateFile) @@ -109,12 +105,7 @@ func runCrowdsec(cConfig *csconfig.Config, parsers *parser.Parsers, hub *cwhub.H bucketsTomb.Go(func() error { defer trace.CatchPanic("crowdsec/runPour") - if err := runPour(inputEventChan, holders, buckets, cConfig); err != nil { - log.Fatalf("starting pour error : %s", err) - return err - } - - return nil + return runPour(inputEventChan, holders, buckets, cConfig) }) } bucketWg.Done() @@ -140,12 +131,7 @@ func runCrowdsec(cConfig *csconfig.Config, parsers *parser.Parsers, hub *cwhub.H outputsTomb.Go(func() error { defer trace.CatchPanic("crowdsec/runOutput") - if err := runOutput(inputEventChan, outputEventChan, buckets, *parsers.Povfwctx, parsers.Povfwnodes, apiClient); err != nil { - log.Fatalf("starting outputs error : %s", err) - return err - } - - return nil + return runOutput(inputEventChan, outputEventChan, buckets, *parsers.Povfwctx, parsers.Povfwnodes, apiClient) }) } outputWg.Done() @@ -190,7 +176,7 @@ func serveCrowdsec(parsers *parser.Parsers, cConfig *csconfig.Config, hub *cwhub } }() - /*we should stop in two cases : + /* we should stop in two cases : - crowdsecTomb has been Killed() : it might be shutdown or reload, so stop - acquisTomb is dead, it means that we were in "cat" mode and files are done reading, quit */ @@ -198,15 +184,15 @@ func serveCrowdsec(parsers *parser.Parsers, cConfig *csconfig.Config, hub *cwhub log.Debugf("Shutting down crowdsec routines") if err := ShutdownCrowdsecRoutines(); err != nil { - log.Fatalf("unable to shutdown crowdsec routines: %s", err) + return fmt.Errorf("unable to shutdown crowdsec routines: %w", err) } log.Debugf("everything is dead, return crowdsecTomb") if dumpStates { - dumpParserState() - dumpOverflowState() - dumpBucketsPour() + if err := dumpAllStates(); err != nil { + log.Fatal(err) + } os.Exit(0) } @@ -214,80 +200,11 @@ func serveCrowdsec(parsers *parser.Parsers, cConfig *csconfig.Config, hub *cwhub }) } -func dumpBucketsPour() { - fd, err := os.OpenFile(filepath.Join(parser.DumpFolder, "bucketpour-dump.yaml"), os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0o666) - if err != nil { - log.Fatalf("open: %s", err) - } - - out, err := yaml.Marshal(leaky.BucketPourCache) - if err != nil { - log.Fatalf("marshal: %s", err) - } - - b, err := fd.Write(out) - if err != nil { - log.Fatalf("write: %s", err) - } - - log.Tracef("wrote %d bytes", b) - - if err := fd.Close(); err != nil { - log.Fatalf(" close: %s", err) - } -} - -func dumpParserState() { - fd, err := os.OpenFile(filepath.Join(parser.DumpFolder, "parser-dump.yaml"), os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0o666) - if err != nil { - log.Fatalf("open: %s", err) - } - - out, err := yaml.Marshal(parser.StageParseCache) - if err != nil { - log.Fatalf("marshal: %s", err) - } - - b, err := fd.Write(out) - if err != nil { - log.Fatalf("write: %s", err) - } - - log.Tracef("wrote %d bytes", b) - - if err := fd.Close(); err != nil { - log.Fatalf(" close: %s", err) - } -} - -func dumpOverflowState() { - fd, err := os.OpenFile(filepath.Join(parser.DumpFolder, "bucket-dump.yaml"), os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0o666) - if err != nil { - log.Fatalf("open: %s", err) - } - - out, err := yaml.Marshal(bucketOverflows) - if err != nil { - log.Fatalf("marshal: %s", err) - } - - b, err := fd.Write(out) - if err != nil { - log.Fatalf("write: %s", err) - } - - log.Tracef("wrote %d bytes", b) - - if err := fd.Close(); err != nil { - log.Fatalf(" close: %s", err) - } -} - func waitOnTomb() { for { select { case <-acquisTomb.Dead(): - /*if it's acquisition dying it means that we were in "cat" mode. + /* if it's acquisition dying it means that we were in "cat" mode. while shutting down, we need to give time for all buckets to process in flight data*/ log.Info("Acquisition is finished, shutting down") /* diff --git a/cmd/crowdsec/dump.go b/cmd/crowdsec/dump.go new file mode 100644 index 00000000000..33c65878b11 --- /dev/null +++ b/cmd/crowdsec/dump.go @@ -0,0 +1,56 @@ +package main + +import ( + "fmt" + "os" + "path/filepath" + + log "github.com/sirupsen/logrus" + "gopkg.in/yaml.v3" + + leaky "github.com/crowdsecurity/crowdsec/pkg/leakybucket" + "github.com/crowdsecurity/crowdsec/pkg/parser" +) + +func dumpAllStates() error { + log.Debugf("Dumping parser+bucket states to %s", parser.DumpFolder) + + if err := dumpState( + filepath.Join(parser.DumpFolder, "parser-dump.yaml"), + parser.StageParseCache, + ); err != nil { + return fmt.Errorf("while dumping parser state: %w", err) + } + + if err := dumpState( + filepath.Join(parser.DumpFolder, "bucket-dump.yaml"), + bucketOverflows, + ); err != nil { + return fmt.Errorf("while dumping bucket overflow state: %w", err) + } + + if err := dumpState( + filepath.Join(parser.DumpFolder, "bucketpour-dump.yaml"), + leaky.BucketPourCache, + ); err != nil { + return fmt.Errorf("while dumping bucket pour state: %w", err) + } + + return nil +} + +func dumpState(destPath string, obj any) error { + dir := filepath.Dir(destPath) + + err := os.MkdirAll(dir, 0o755) + if err != nil { + return err + } + + out, err := yaml.Marshal(obj) + if err != nil { + return err + } + + return os.WriteFile(destPath, out, 0o666) +} diff --git a/cmd/crowdsec/serve.go b/cmd/crowdsec/serve.go index 497215d74a1..da79e50c427 100644 --- a/cmd/crowdsec/serve.go +++ b/cmd/crowdsec/serve.go @@ -390,7 +390,8 @@ func Serve(cConfig *csconfig.Config, agentReady chan bool) error { if flags.TestMode { log.Infof("Configuration test done") pluginBroker.Kill() - os.Exit(0) + + return nil } if cConfig.Common != nil && cConfig.Common.Daemonize { From d3974894fc8976f385ae4768420e739a3ebf82ee Mon Sep 17 00:00:00 2001 From: MazzMa Date: Fri, 7 Jun 2024 15:58:16 +0200 Subject: [PATCH 171/581] add: go reference badge (#3066) --- README.md | 1 + 1 file changed, 1 insertion(+) diff --git a/README.md b/README.md index 6428c3a8053..a900f0ee514 100644 --- a/README.md +++ b/README.md @@ -12,6 +12,7 @@ +Go Reference From 72b6da99258afb781430642d0c72c294c6c9888c Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Fri, 7 Jun 2024 16:53:23 +0200 Subject: [PATCH 172/581] enable linter: revive (early-return) (#3051) * enable linter: revive (early-return) * lint --- .golangci.yml | 4 +--- pkg/acquisition/modules/s3/s3.go | 22 +++++++++---------- pkg/apiserver/papi.go | 17 ++++++++------- pkg/appsec/appsec_rule/modsecurity.go | 31 +++++++++++++-------------- pkg/appsec/coraza_logger.go | 11 +++++----- pkg/csplugin/hclog_adapter.go | 5 ++--- pkg/exprhelpers/crowdsec_cti.go | 7 +++--- pkg/parser/parsing_test.go | 27 +++++++++++------------ 8 files changed, 58 insertions(+), 66 deletions(-) diff --git a/.golangci.yml b/.golangci.yml index c096ef5f88a..1ec386183e1 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -102,7 +102,6 @@ linters-settings: - "!**/pkg/csplugin/broker.go" - "!**/pkg/leakybucket/buckets_test.go" - "!**/pkg/leakybucket/manager_load.go" - - "!**/pkg/metabase/metabase.go" - "!**/pkg/parser/node.go" - "!**/pkg/parser/node_test.go" - "!**/pkg/parser/parsing_test.go" @@ -139,8 +138,6 @@ linters-settings: disabled: true - name: defer disabled: true - - name: early-return - disabled: true - name: empty-block disabled: true - name: empty-lines @@ -382,6 +379,7 @@ issues: exclude-dirs: - pkg/time/rate + - pkg/metabase exclude-files: - pkg/yamlpatch/merge.go diff --git a/pkg/acquisition/modules/s3/s3.go b/pkg/acquisition/modules/s3/s3.go index 98b2e685cd0..d1bf881079a 100644 --- a/pkg/acquisition/modules/s3/s3.go +++ b/pkg/acquisition/modules/s3/s3.go @@ -131,7 +131,6 @@ func (s *S3Source) newS3Client() error { } sess, err := session.NewSessionWithOptions(options) - if err != nil { return fmt.Errorf("failed to create aws session: %w", err) } @@ -146,7 +145,7 @@ func (s *S3Source) newS3Client() error { s.s3Client = s3.New(sess, config) if s.s3Client == nil { - return fmt.Errorf("failed to create S3 client") + return errors.New("failed to create S3 client") } return nil @@ -167,7 +166,7 @@ func (s *S3Source) newSQSClient() error { } if sess == nil { - return fmt.Errorf("failed to create aws session") + return errors.New("failed to create aws session") } config := aws.NewConfig() if s.Config.AwsRegion != "" { @@ -178,7 +177,7 @@ func (s *S3Source) newSQSClient() error { } s.sqsClient = sqs.New(sess, config) if s.sqsClient == nil { - return fmt.Errorf("failed to create SQS client") + return errors.New("failed to create SQS client") } return nil } @@ -251,16 +250,15 @@ func (s *S3Source) listPoll() error { continue } for i := len(bucketObjects) - 1; i >= 0; i-- { - if bucketObjects[i].LastModified.After(lastObjectDate) { - newObject = true - logger.Debugf("Found new object %s", *bucketObjects[i].Key) - s.readerChan <- S3Object{ - Bucket: s.Config.BucketName, - Key: *bucketObjects[i].Key, - } - } else { + if !bucketObjects[i].LastModified.After(lastObjectDate) { break } + newObject = true + logger.Debugf("Found new object %s", *bucketObjects[i].Key) + s.readerChan <- S3Object{ + Bucket: s.Config.BucketName, + Key: *bucketObjects[i].Key, + } } if newObject { lastObjectDate = *bucketObjects[len(bucketObjects)-1].LastModified diff --git a/pkg/apiserver/papi.go b/pkg/apiserver/papi.go index 169f1441df4..0d0fd0ecd42 100644 --- a/pkg/apiserver/papi.go +++ b/pkg/apiserver/papi.go @@ -141,17 +141,18 @@ func (p *Papi) handleEvent(event longpollclient.Event, sync bool) error { return errors.New("no source user in header message, skipping") } - if operationFunc, ok := operationMap[message.Header.OperationType]; ok { - logger.Debugf("Calling operation '%s'", message.Header.OperationType) - - err := operationFunc(message, p, sync) - if err != nil { - return fmt.Errorf("'%s %s failed: %w", message.Header.OperationType, message.Header.OperationCmd, err) - } - } else { + operationFunc, ok := operationMap[message.Header.OperationType] + if !ok { return fmt.Errorf("operation '%s' unknown, continue", message.Header.OperationType) } + logger.Debugf("Calling operation '%s'", message.Header.OperationType) + + err := operationFunc(message, p, sync) + if err != nil { + return fmt.Errorf("'%s %s failed: %w", message.Header.OperationType, message.Header.OperationCmd, err) + } + return nil } diff --git a/pkg/appsec/appsec_rule/modsecurity.go b/pkg/appsec/appsec_rule/modsecurity.go index 03a840cf436..8f58a9589ca 100644 --- a/pkg/appsec/appsec_rule/modsecurity.go +++ b/pkg/appsec/appsec_rule/modsecurity.go @@ -1,6 +1,7 @@ package appsec_rule import ( + "errors" "fmt" "hash/fnv" "strings" @@ -67,9 +68,7 @@ var bodyTypeMatch map[string]string = map[string]string{ } func (m *ModsecurityRule) Build(rule *CustomRule, appsecRuleName string) (string, []uint32, error) { - rules, err := m.buildRules(rule, appsecRuleName, false, 0, 0) - if err != nil { return "", nil, err } @@ -99,7 +98,7 @@ func (m *ModsecurityRule) buildRules(rule *CustomRule, appsecRuleName string, an ret := make([]string, 0) if len(rule.And) != 0 && len(rule.Or) != 0 { - return nil, fmt.Errorf("cannot have both 'and' and 'or' in the same rule") + return nil, errors.New("cannot have both 'and' and 'or' in the same rule") } if rule.And != nil { @@ -166,15 +165,15 @@ func (m *ModsecurityRule) buildRules(rule *CustomRule, appsecRuleName string, an r.WriteByte(' ') if rule.Match.Type != "" { - if match, ok := matchMap[rule.Match.Type]; ok { - prefix := "" - if rule.Match.Not { - prefix = "!" - } - r.WriteString(fmt.Sprintf(`"%s%s %s"`, prefix, match, rule.Match.Value)) - } else { + match, ok := matchMap[rule.Match.Type] + if !ok { return nil, fmt.Errorf("unknown match type '%s'", rule.Match.Type) } + prefix := "" + if rule.Match.Not { + prefix = "!" + } + r.WriteString(fmt.Sprintf(`"%s%s %s"`, prefix, match, rule.Match.Value)) } //Should phase:2 be configurable? @@ -186,20 +185,20 @@ func (m *ModsecurityRule) buildRules(rule *CustomRule, appsecRuleName string, an continue } r.WriteByte(',') - if mappedTransform, ok := transformMap[transform]; ok { - r.WriteString(mappedTransform) - } else { + mappedTransform, ok := transformMap[transform] + if !ok { return nil, fmt.Errorf("unknown transform '%s'", transform) } + r.WriteString(mappedTransform) } } if rule.BodyType != "" { - if mappedBodyType, ok := bodyTypeMatch[rule.BodyType]; ok { - r.WriteString(fmt.Sprintf(",ctl:requestBodyProcessor=%s", mappedBodyType)) - } else { + mappedBodyType, ok := bodyTypeMatch[rule.BodyType] + if !ok { return nil, fmt.Errorf("unknown body type '%s'", rule.BodyType) } + r.WriteString(fmt.Sprintf(",ctl:requestBodyProcessor=%s", mappedBodyType)) } if and { diff --git a/pkg/appsec/coraza_logger.go b/pkg/appsec/coraza_logger.go index 7229f038b92..2b7f85d4e46 100644 --- a/pkg/appsec/coraza_logger.go +++ b/pkg/appsec/coraza_logger.go @@ -90,14 +90,13 @@ func (e *crzLogEvent) Bool(key string, b bool) dbg.Event { func (e *crzLogEvent) Int(key string, i int) dbg.Event { if e.muted { - // this allows us to have per-rule debug logging - if key == "rule_id" && GetRuleDebug(i) { - e.muted = false - e.fields = map[string]interface{}{} - e.level = log.DebugLevel - } else { + if key != "rule_id" || !GetRuleDebug(i) { return e } + // this allows us to have per-rule debug logging + e.muted = false + e.fields = map[string]interface{}{} + e.level = log.DebugLevel } e.fields[key] = i diff --git a/pkg/csplugin/hclog_adapter.go b/pkg/csplugin/hclog_adapter.go index 58190684ebc..44a22463709 100644 --- a/pkg/csplugin/hclog_adapter.go +++ b/pkg/csplugin/hclog_adapter.go @@ -221,11 +221,10 @@ func merge(dst map[string]interface{}, k, v interface{}) { func safeString(str fmt.Stringer) (s string) { defer func() { if panicVal := recover(); panicVal != nil { - if v := reflect.ValueOf(str); v.Kind() == reflect.Ptr && v.IsNil() { - s = "NULL" - } else { + if v := reflect.ValueOf(str); v.Kind() != reflect.Ptr || !v.IsNil() { panic(panicVal) } + s = "NULL" } }() diff --git a/pkg/exprhelpers/crowdsec_cti.go b/pkg/exprhelpers/crowdsec_cti.go index 59a239722e3..268979ee243 100644 --- a/pkg/exprhelpers/crowdsec_cti.go +++ b/pkg/exprhelpers/crowdsec_cti.go @@ -86,12 +86,11 @@ func CrowdsecCTI(params ...any) (any, error) { if val, err := CTICache.Get(ip); err == nil && val != nil { ctiClient.Logger.Debugf("cti cache fetch for %s", ip) ret, ok := val.(*cticlient.SmokeItem) - if !ok { - ctiClient.Logger.Warningf("CrowdsecCTI: invalid type in cache, removing") - CTICache.Remove(ip) - } else { + if ok { return ret, nil } + ctiClient.Logger.Warningf("CrowdsecCTI: invalid type in cache, removing") + CTICache.Remove(ip) } if !CTIBackOffUntil.IsZero() && time.Now().Before(CTIBackOffUntil) { diff --git a/pkg/parser/parsing_test.go b/pkg/parser/parsing_test.go index 3193631f4dd..f142e1bc29a 100644 --- a/pkg/parser/parsing_test.go +++ b/pkg/parser/parsing_test.go @@ -278,26 +278,25 @@ func matchEvent(expected types.Event, out types.Event, debug bool) ([]string, bo for mapIdx := 0; mapIdx < len(expectMaps); mapIdx++ { for expKey, expVal := range expectMaps[mapIdx] { - if outVal, ok := outMaps[mapIdx][expKey]; ok { - if outVal == expVal { //ok entry - if debug { - retInfo = append(retInfo, fmt.Sprintf("ok %s[%s] %s == %s", outLabels[mapIdx], expKey, expVal, outVal)) - } - valid = true - } else { //mismatch entry - if debug { - retInfo = append(retInfo, fmt.Sprintf("mismatch %s[%s] %s != %s", outLabels[mapIdx], expKey, expVal, outVal)) - } - valid = false - goto checkFinished - } - } else { //missing entry + outVal, ok := outMaps[mapIdx][expKey] + if !ok { if debug { retInfo = append(retInfo, fmt.Sprintf("missing entry %s[%s]", outLabels[mapIdx], expKey)) } valid = false goto checkFinished } + if outVal != expVal { //ok entry + if debug { + retInfo = append(retInfo, fmt.Sprintf("mismatch %s[%s] %s != %s", outLabels[mapIdx], expKey, expVal, outVal)) + } + valid = false + goto checkFinished + } + if debug { + retInfo = append(retInfo, fmt.Sprintf("ok %s[%s] %s == %s", outLabels[mapIdx], expKey, expVal, outVal)) + } + valid = true } } checkFinished: From cad760e605b4f6140b4cdee8665a0951c5b212cd Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Fri, 7 Jun 2024 16:56:02 +0200 Subject: [PATCH 173/581] pkg/cwhub: use explicit context for version check, index update (#3064) * cscli refactor: explicit context for hub (version lookup) * change order of version checks * pkg/cwhub: split NewHub() + Update() + Load() * cscli refactor: explicit context for hub (index update) * updated pkg/cwhub docs * lint --- cmd/crowdsec-cli/config_restore.go | 13 +++--- cmd/crowdsec-cli/hub.go | 27 ++++++++---- cmd/crowdsec-cli/itemcli.go | 25 +++++------ cmd/crowdsec-cli/require/branch.go | 66 ++++++++++++++++++++++++----- cmd/crowdsec-cli/require/require.go | 13 ++++-- cmd/crowdsec-cli/setup.go | 2 +- cmd/crowdsec/serve.go | 16 +++++-- pkg/cwhub/cwhub_test.go | 11 ++++- pkg/cwhub/doc.go | 29 ++++++++++--- pkg/cwhub/hub.go | 41 ++++++++++-------- pkg/cwhub/hub_test.go | 15 +++++-- pkg/cwhub/itemupgrade_test.go | 38 +++++++++++++---- pkg/cwhub/remote.go | 4 +- pkg/cwversion/version.go | 28 +----------- pkg/hubtest/hubtest.go | 16 +++++-- pkg/hubtest/hubtest_item.go | 8 +++- pkg/leakybucket/buckets_test.go | 14 ++++-- test/bats/20_hub_items.bats | 2 +- 18 files changed, 244 insertions(+), 124 deletions(-) diff --git a/cmd/crowdsec-cli/config_restore.go b/cmd/crowdsec-cli/config_restore.go index ee7179b73c5..6147a7518c2 100644 --- a/cmd/crowdsec-cli/config_restore.go +++ b/cmd/crowdsec-cli/config_restore.go @@ -1,6 +1,7 @@ package main import ( + "context" "encoding/json" "fmt" "os" @@ -13,10 +14,10 @@ import ( "github.com/crowdsecurity/crowdsec/pkg/cwhub" ) -func (cli *cliConfig) restoreHub(dirPath string) error { +func (cli *cliConfig) restoreHub(ctx context.Context, dirPath string) error { cfg := cli.cfg() - hub, err := require.Hub(cfg, require.RemoteHub(cfg), nil) + hub, err := require.Hub(cfg, require.RemoteHub(ctx, cfg), nil) if err != nil { return err } @@ -126,7 +127,7 @@ func (cli *cliConfig) restoreHub(dirPath string) error { - Tainted/local/out-of-date scenarios, parsers, postoverflows and collections - Acquisition files (acquis.yaml, acquis.d/*.yaml) */ -func (cli *cliConfig) restore(dirPath string) error { +func (cli *cliConfig) restore(ctx context.Context, dirPath string) error { var err error cfg := cli.cfg() @@ -237,7 +238,7 @@ func (cli *cliConfig) restore(dirPath string) error { } } - if err = cli.restoreHub(dirPath); err != nil { + if err = cli.restoreHub(ctx, dirPath); err != nil { return fmt.Errorf("failed to restore hub config: %w", err) } @@ -258,10 +259,10 @@ func (cli *cliConfig) newRestoreCmd() *cobra.Command { - Backup of API credentials (local API and online API)`, Args: cobra.ExactArgs(1), DisableAutoGenTag: true, - RunE: func(_ *cobra.Command, args []string) error { + RunE: func(cmd *cobra.Command, args []string) error { dirPath := args[0] - if err := cli.restore(dirPath); err != nil { + if err := cli.restore(cmd.Context(), dirPath); err != nil { return fmt.Errorf("failed to restore config from %s: %w", dirPath, err) } diff --git a/cmd/crowdsec-cli/hub.go b/cmd/crowdsec-cli/hub.go index 318dd018e06..cf9f7f282a4 100644 --- a/cmd/crowdsec-cli/hub.go +++ b/cmd/crowdsec-cli/hub.go @@ -1,6 +1,7 @@ package main import ( + "context" "encoding/json" "fmt" @@ -98,16 +99,24 @@ func (cli *cliHub) newListCmd() *cobra.Command { return cmd } -func (cli *cliHub) update() error { +func (cli *cliHub) update(ctx context.Context) error { local := cli.cfg().Hub - remote := require.RemoteHub(cli.cfg()) + remote := require.RemoteHub(ctx, cli.cfg()) // don't use require.Hub because if there is no index file, it would fail - hub, err := cwhub.NewHub(local, remote, true, log.StandardLogger()) + hub, err := cwhub.NewHub(local, remote, log.StandardLogger()) if err != nil { + return err + } + + if err := hub.Update(ctx); err != nil { return fmt.Errorf("failed to update hub: %w", err) } + if err := hub.Load(); err != nil { + return fmt.Errorf("failed to load hub: %w", err) + } + for _, v := range hub.Warnings { log.Info(v) } @@ -124,16 +133,16 @@ Fetches the .index.json file from the hub, containing the list of available conf `, Args: cobra.ExactArgs(0), DisableAutoGenTag: true, - RunE: func(_ *cobra.Command, _ []string) error { - return cli.update() + RunE: func(cmd *cobra.Command, _ []string) error { + return cli.update(cmd.Context()) }, } return cmd } -func (cli *cliHub) upgrade(force bool) error { - hub, err := require.Hub(cli.cfg(), require.RemoteHub(cli.cfg()), log.StandardLogger()) +func (cli *cliHub) upgrade(ctx context.Context, force bool) error { + hub, err := require.Hub(cli.cfg(), require.RemoteHub(ctx, cli.cfg()), log.StandardLogger()) if err != nil { return err } @@ -176,8 +185,8 @@ Upgrade all configs installed from Crowdsec Hub. Run 'sudo cscli hub update' if `, Args: cobra.ExactArgs(0), DisableAutoGenTag: true, - RunE: func(_ *cobra.Command, _ []string) error { - return cli.upgrade(force) + RunE: func(cmd *cobra.Command, _ []string) error { + return cli.upgrade(cmd.Context(), force) }, } diff --git a/cmd/crowdsec-cli/itemcli.go b/cmd/crowdsec-cli/itemcli.go index 367e22f2962..dedbab3b914 100644 --- a/cmd/crowdsec-cli/itemcli.go +++ b/cmd/crowdsec-cli/itemcli.go @@ -2,6 +2,7 @@ package main import ( "cmp" + "context" "errors" "fmt" "os" @@ -61,10 +62,10 @@ func (cli cliItem) NewCommand() *cobra.Command { return cmd } -func (cli cliItem) install(args []string, downloadOnly bool, force bool, ignoreError bool) error { +func (cli cliItem) install(ctx context.Context, args []string, downloadOnly bool, force bool, ignoreError bool) error { cfg := cli.cfg() - hub, err := require.Hub(cfg, require.RemoteHub(cfg), log.StandardLogger()) + hub, err := require.Hub(cfg, require.RemoteHub(ctx, cfg), log.StandardLogger()) if err != nil { return err } @@ -113,8 +114,8 @@ func (cli cliItem) newInstallCmd() *cobra.Command { ValidArgsFunction: func(_ *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { return compAllItems(cli.name, args, toComplete, cli.cfg) }, - RunE: func(_ *cobra.Command, args []string) error { - return cli.install(args, downloadOnly, force, ignoreError) + RunE: func(cmd *cobra.Command, args []string) error { + return cli.install(cmd.Context(), args, downloadOnly, force, ignoreError) }, } @@ -252,10 +253,10 @@ func (cli cliItem) newRemoveCmd() *cobra.Command { return cmd } -func (cli cliItem) upgrade(args []string, force bool, all bool) error { +func (cli cliItem) upgrade(ctx context.Context, args []string, force bool, all bool) error { cfg := cli.cfg() - hub, err := require.Hub(cfg, require.RemoteHub(cfg), log.StandardLogger()) + hub, err := require.Hub(cfg, require.RemoteHub(ctx, cfg), log.StandardLogger()) if err != nil { return err } @@ -334,8 +335,8 @@ func (cli cliItem) newUpgradeCmd() *cobra.Command { ValidArgsFunction: func(_ *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { return compInstalledItems(cli.name, args, toComplete, cli.cfg) }, - RunE: func(_ *cobra.Command, args []string) error { - return cli.upgrade(args, force, all) + RunE: func(cmd *cobra.Command, args []string) error { + return cli.upgrade(cmd.Context(), args, force, all) }, } @@ -346,7 +347,7 @@ func (cli cliItem) newUpgradeCmd() *cobra.Command { return cmd } -func (cli cliItem) inspect(args []string, url string, diff bool, rev bool, noMetrics bool) error { +func (cli cliItem) inspect(ctx context.Context, args []string, url string, diff bool, rev bool, noMetrics bool) error { cfg := cli.cfg() if rev && !diff { @@ -360,7 +361,7 @@ func (cli cliItem) inspect(args []string, url string, diff bool, rev bool, noMet remote := (*cwhub.RemoteHubCfg)(nil) if diff { - remote = require.RemoteHub(cfg) + remote = require.RemoteHub(ctx, cfg) } hub, err := require.Hub(cfg, remote, log.StandardLogger()) @@ -412,8 +413,8 @@ func (cli cliItem) newInspectCmd() *cobra.Command { ValidArgsFunction: func(_ *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { return compInstalledItems(cli.name, args, toComplete, cli.cfg) }, - RunE: func(_ *cobra.Command, args []string) error { - return cli.inspect(args, url, diff, rev, noMetrics) + RunE: func(cmd *cobra.Command, args []string) error { + return cli.inspect(cmd.Context(), args, url, diff, rev, noMetrics) }, } diff --git a/cmd/crowdsec-cli/require/branch.go b/cmd/crowdsec-cli/require/branch.go index 6fcaaacea2d..503cb6d2326 100644 --- a/cmd/crowdsec-cli/require/branch.go +++ b/cmd/crowdsec-cli/require/branch.go @@ -3,27 +3,76 @@ package require // Set the appropriate hub branch according to config settings and crowdsec version import ( + "context" + "encoding/json" + "fmt" + "net/http" + "time" + log "github.com/sirupsen/logrus" "golang.org/x/mod/semver" - "github.com/crowdsecurity/crowdsec/pkg/cwversion" "github.com/crowdsecurity/crowdsec/pkg/csconfig" + "github.com/crowdsecurity/crowdsec/pkg/cwversion" ) -func chooseBranch(cfg *csconfig.Config) string { +// lookupLatest returns the latest crowdsec version based on github +func lookupLatest(ctx context.Context) (string, error) { + ctx, cancel := context.WithTimeout(ctx, 10*time.Second) + defer cancel() + + url := "https://version.crowdsec.net/latest" + + req, err := http.NewRequestWithContext(ctx, http.MethodGet, url, nil) + if err != nil { + return "", fmt.Errorf("unable to create request for %s: %w", url, err) + } + + client := &http.Client{} + + resp, err := client.Do(req) + if err != nil { + return "", fmt.Errorf("unable to send request to %s: %w", url, err) + } + defer resp.Body.Close() + + latest := make(map[string]any) + + if err := json.NewDecoder(resp.Body).Decode(&latest); err != nil { + return "", fmt.Errorf("unable to decode response from %s: %w", url, err) + } + + if _, ok := latest["name"]; !ok { + return "", fmt.Errorf("unable to find 'name' key in response from %s", url) + } + + name, ok := latest["name"].(string) + if !ok { + return "", fmt.Errorf("unable to convert 'name' key to string in response from %s", url) + } + + return name, nil +} + +func chooseBranch(ctx context.Context, cfg *csconfig.Config) string { // this was set from config.yaml or flag if cfg.Cscli.HubBranch != "" { log.Debugf("Hub override from config: branch '%s'", cfg.Cscli.HubBranch) return cfg.Cscli.HubBranch } - latest, err := cwversion.Latest() + latest, err := lookupLatest(ctx) if err != nil { log.Warningf("Unable to retrieve latest crowdsec version: %s, using hub branch 'master'", err) return "master" } csVersion := cwversion.VersionStrip() + if csVersion == "" { + log.Warning("Crowdsec version is not set, using hub branch 'master'") + return "master" + } + if csVersion == latest { log.Debugf("Latest crowdsec version (%s), using hub branch 'master'", csVersion) return "master" @@ -35,22 +84,17 @@ func chooseBranch(cfg *csconfig.Config) string { return "master" } - if csVersion == "" { - log.Warning("Crowdsec version is not set, using hub branch 'master'") - return "master" - } - log.Warnf("A new CrowdSec release is available (%s). "+ "Your version is '%s'. Please update it to use new parsers/scenarios/collections.", latest, csVersion) + return csVersion } - // HubBranch sets the branch (in cscli config) and returns its value // It can be "master", or the branch corresponding to the current crowdsec version, or the value overridden in config/flag -func HubBranch(cfg *csconfig.Config) string { - branch := chooseBranch(cfg) +func HubBranch(ctx context.Context, cfg *csconfig.Config) string { + branch := chooseBranch(ctx, cfg) cfg.Cscli.HubBranch = branch diff --git a/cmd/crowdsec-cli/require/require.go b/cmd/crowdsec-cli/require/require.go index 708b2d1c7a2..3ff66254466 100644 --- a/cmd/crowdsec-cli/require/require.go +++ b/cmd/crowdsec-cli/require/require.go @@ -1,6 +1,7 @@ package require import ( + "context" "errors" "fmt" "io" @@ -64,14 +65,14 @@ func Notifications(c *csconfig.Config) error { } // RemoteHub returns the configuration required to download hub index and items: url, branch, etc. -func RemoteHub(c *csconfig.Config) *cwhub.RemoteHubCfg { +func RemoteHub(ctx context.Context, c *csconfig.Config) *cwhub.RemoteHubCfg { // set branch in config, and log if necessary - branch := HubBranch(c) + branch := HubBranch(ctx, c) urlTemplate := HubURLTemplate(c) remote := &cwhub.RemoteHubCfg{ Branch: branch, URLTemplate: urlTemplate, - IndexPath: ".index.json", + IndexPath: ".index.json", } return remote @@ -91,8 +92,12 @@ func Hub(c *csconfig.Config, remote *cwhub.RemoteHubCfg, logger *logrus.Logger) logger.SetOutput(io.Discard) } - hub, err := cwhub.NewHub(local, remote, false, logger) + hub, err := cwhub.NewHub(local, remote, logger) if err != nil { + return nil, err + } + + if err := hub.Load(); err != nil { return nil, fmt.Errorf("failed to read Hub index: %w. Run 'sudo cscli hub update' to download the index again", err) } diff --git a/cmd/crowdsec-cli/setup.go b/cmd/crowdsec-cli/setup.go index 3e12b2465dd..ad02b357b70 100644 --- a/cmd/crowdsec-cli/setup.go +++ b/cmd/crowdsec-cli/setup.go @@ -315,7 +315,7 @@ func runSetupInstallHub(cmd *cobra.Command, args []string) error { return fmt.Errorf("while reading file %s: %w", fromFile, err) } - hub, err := require.Hub(csConfig, require.RemoteHub(csConfig), log.StandardLogger()) + hub, err := require.Hub(csConfig, require.RemoteHub(cmd.Context(), csConfig), log.StandardLogger()) if err != nil { return err } diff --git a/cmd/crowdsec/serve.go b/cmd/crowdsec/serve.go index da79e50c427..6c15b2d347e 100644 --- a/cmd/crowdsec/serve.go +++ b/cmd/crowdsec/serve.go @@ -81,9 +81,13 @@ func reloadHandler(sig os.Signal) (*csconfig.Config, error) { } if !cConfig.DisableAgent { - hub, err := cwhub.NewHub(cConfig.Hub, nil, false, log.StandardLogger()) + hub, err := cwhub.NewHub(cConfig.Hub, nil, log.StandardLogger()) if err != nil { - return nil, fmt.Errorf("while loading hub index: %w", err) + return nil, err + } + + if err := hub.Load(); err != nil { + return nil, err } csParsers, datasources, err := initCrowdsec(cConfig, hub) @@ -367,9 +371,13 @@ func Serve(cConfig *csconfig.Config, agentReady chan bool) error { } if !cConfig.DisableAgent { - hub, err := cwhub.NewHub(cConfig.Hub, nil, false, log.StandardLogger()) + hub, err := cwhub.NewHub(cConfig.Hub, nil, log.StandardLogger()) if err != nil { - return fmt.Errorf("while loading hub index: %w", err) + return err + } + + if err := hub.Load(); err != nil { + return err } csParsers, datasources, err := initCrowdsec(cConfig, hub) diff --git a/pkg/cwhub/cwhub_test.go b/pkg/cwhub/cwhub_test.go index 09455fd65a9..17cf258745f 100644 --- a/pkg/cwhub/cwhub_test.go +++ b/pkg/cwhub/cwhub_test.go @@ -1,6 +1,7 @@ package cwhub import ( + "context" "fmt" "io" "net/http" @@ -62,7 +63,15 @@ func testHub(t *testing.T, update bool) *Hub { IndexPath: ".index.json", } - hub, err := NewHub(local, remote, update, log.StandardLogger()) + hub, err := NewHub(local, remote, log.StandardLogger()) + require.NoError(t, err) + + if update { + err := hub.Update(context.TODO()) + require.NoError(t, err) + } + + err = hub.Load() require.NoError(t, err) return hub diff --git a/pkg/cwhub/doc.go b/pkg/cwhub/doc.go index 85767265048..8cbf77ba00f 100644 --- a/pkg/cwhub/doc.go +++ b/pkg/cwhub/doc.go @@ -58,12 +58,21 @@ // InstallDir: "/etc/crowdsec", // InstallDataDir: "/var/lib/crowdsec/data", // } -// hub, err := cwhub.NewHub(localHub, nil, false) +// +// hub, err := cwhub.NewHub(localHub, nil, logger) // if err != nil { // return fmt.Errorf("unable to initialize hub: %w", err) // } // -// Now you can use the hub to access the existing items: +// If the logger is nil, the item-by-item messages will be discarded, including warnings. +// After configuring the hub, you must sync its state with items on disk. +// +// err := hub.Load() +// if err != nil { +// return fmt.Errorf("unable to load hub: %w", err) +// } +// +// Now you can use the hub object to access the existing items: // // // list all the parsers // for _, parser := range hub.GetItemMap(cwhub.PARSERS) { @@ -97,8 +106,8 @@ // Branch: "master", // IndexPath: ".index.json", // } -// updateIndex := false -// hub, err := cwhub.NewHub(localHub, remoteHub, updateIndex) +// +// hub, err := cwhub.NewHub(localHub, remoteHub, logger) // if err != nil { // return fmt.Errorf("unable to initialize hub: %w", err) // } @@ -106,8 +115,14 @@ // The URLTemplate is a string that will be used to build the URL of the remote hub. It must contain two // placeholders: the branch and the file path (it will be an index or an item). // -// Setting the third parameter to true will download the latest version of the index, if available on the -// specified branch. -// There is no exported method to update the index once the hub struct is created. +// Before calling hub.Load(), you can update the index file by calling the Update() method: +// +// err := hub.Update(context.Background()) +// if err != nil { +// return fmt.Errorf("unable to update hub index: %w", err) +// } +// +// Note that the command will fail if the hub has already been synced. If you want to do it (ex. after a configuration +// change the application is notified with SIGHUP) you have to instantiate a new hub object and dispose of the old one. // package cwhub diff --git a/pkg/cwhub/hub.go b/pkg/cwhub/hub.go index e7d927f54b1..1293d6fa235 100644 --- a/pkg/cwhub/hub.go +++ b/pkg/cwhub/hub.go @@ -1,6 +1,7 @@ package cwhub import ( + "context" "encoding/json" "errors" "fmt" @@ -30,10 +31,11 @@ func (h *Hub) GetDataDir() string { return h.local.InstallDataDir } -// NewHub returns a new Hub instance with local and (optionally) remote configuration, and syncs the local state. -// If updateIndex is true, the local index file is updated from the remote before reading the state of the items. +// NewHub returns a new Hub instance with local and (optionally) remote configuration. +// The hub is not synced automatically. Load() must be called to read the index, sync the local state, +// and check for unmanaged items. // All download operations (including updateIndex) return ErrNilRemoteHub if the remote configuration is not set. -func NewHub(local *csconfig.LocalHubCfg, remote *RemoteHubCfg, updateIndex bool, logger *logrus.Logger) (*Hub, error) { +func NewHub(local *csconfig.LocalHubCfg, remote *RemoteHubCfg, logger *logrus.Logger) (*Hub, error) { if local == nil { return nil, errors.New("no hub configuration found") } @@ -50,23 +52,22 @@ func NewHub(local *csconfig.LocalHubCfg, remote *RemoteHubCfg, updateIndex bool, pathIndex: make(map[string]*Item, 0), } - if updateIndex { - if err := hub.updateIndex(); err != nil { - return nil, err - } - } + return hub, nil +} - logger.Debugf("loading hub idx %s", local.HubIndexFile) +// Load reads the state of the items on disk. +func (h *Hub) Load() error { + h.logger.Debugf("loading hub idx %s", h.local.HubIndexFile) - if err := hub.parseIndex(); err != nil { - return nil, fmt.Errorf("failed to load index: %w", err) + if err := h.parseIndex(); err != nil { + return fmt.Errorf("failed to load hub index: %w", err) } - if err := hub.localSync(); err != nil { - return nil, fmt.Errorf("failed to sync items: %w", err) + if err := h.localSync(); err != nil { + return fmt.Errorf("failed to sync hub items: %w", err) } - return hub, nil + return nil } // parseIndex takes the content of an index file and fills the map of associated parsers/scenarios/collections. @@ -149,9 +150,15 @@ func (h *Hub) ItemStats() []string { return ret } -// updateIndex downloads the latest version of the index and writes it to disk if it changed. -func (h *Hub) updateIndex() error { - downloaded, err := h.remote.fetchIndex(h.local.HubIndexFile) +// Update downloads the latest version of the index and writes it to disk if it changed. It cannot be called after Load() +// unless the hub is completely empty. +func (h *Hub) Update(ctx context.Context) error { + if h.pathIndex != nil && len(h.pathIndex) > 0 { + // if this happens, it's a bug. + return errors.New("cannot update hub after items have been loaded") + } + + downloaded, err := h.remote.fetchIndex(ctx, h.local.HubIndexFile) if err != nil { return err } diff --git a/pkg/cwhub/hub_test.go b/pkg/cwhub/hub_test.go index d5592a16c39..13c495e2fcc 100644 --- a/pkg/cwhub/hub_test.go +++ b/pkg/cwhub/hub_test.go @@ -1,6 +1,7 @@ package cwhub import ( + "context" "fmt" "os" "testing" @@ -18,7 +19,13 @@ func TestInitHubUpdate(t *testing.T) { IndexPath: ".index.json", } - _, err := NewHub(hub.local, remote, true, nil) + _, err := NewHub(hub.local, remote, nil) + require.NoError(t, err) + + err = hub.Update(context.TODO()) + require.NoError(t, err) + + err = hub.Load() require.NoError(t, err) } @@ -47,7 +54,7 @@ func TestUpdateIndex(t *testing.T) { hub.local.HubIndexFile = tmpIndex.Name() - err = hub.updateIndex() + err = hub.Update(context.TODO()) cstest.RequireErrorContains(t, err, "failed to build hub index request: invalid URL template 'x'") // bad domain @@ -59,7 +66,7 @@ func TestUpdateIndex(t *testing.T) { IndexPath: ".index.json", } - err = hub.updateIndex() + err = hub.Update(context.TODO()) require.NoError(t, err) // XXX: this is not failing // cstest.RequireErrorContains(t, err, "failed http request for hub index: Get") @@ -75,6 +82,6 @@ func TestUpdateIndex(t *testing.T) { hub.local.HubIndexFile = "/does/not/exist/index.json" - err = hub.updateIndex() + err = hub.Update(context.TODO()) cstest.RequireErrorContains(t, err, "failed to create temporary download file for /does/not/exist/index.json:") } diff --git a/pkg/cwhub/itemupgrade_test.go b/pkg/cwhub/itemupgrade_test.go index 1bd62ad63e8..d86d2094955 100644 --- a/pkg/cwhub/itemupgrade_test.go +++ b/pkg/cwhub/itemupgrade_test.go @@ -1,6 +1,7 @@ package cwhub import ( + "context" "testing" "github.com/stretchr/testify/require" @@ -39,8 +40,14 @@ func TestUpgradeItemNewScenarioInCollection(t *testing.T) { IndexPath: ".index.json", } - hub, err := NewHub(hub.local, remote, true, nil) - require.NoError(t, err, "failed to download index: %s", err) + hub, err := NewHub(hub.local, remote, nil) + require.NoError(t, err) + + err = hub.Update(context.TODO()) + require.NoError(t, err) + + err = hub.Load() + require.NoError(t, err) hub = getHubOrFail(t, hub.local, remote) @@ -100,8 +107,14 @@ func TestUpgradeItemInDisabledScenarioShouldNotBeInstalled(t *testing.T) { require.True(t, hub.GetItem(COLLECTIONS, "crowdsecurity/test_collection").State.Installed) require.True(t, hub.GetItem(COLLECTIONS, "crowdsecurity/test_collection").State.UpToDate) - hub, err = NewHub(hub.local, remote, true, nil) - require.NoError(t, err, "failed to download index: %s", err) + hub, err = NewHub(hub.local, remote, nil) + require.NoError(t, err) + + err = hub.Update(context.TODO()) + require.NoError(t, err) + + err = hub.Load() + require.NoError(t, err) item = hub.GetItem(COLLECTIONS, "crowdsecurity/test_collection") didUpdate, err := item.Upgrade(false) @@ -114,8 +127,11 @@ func TestUpgradeItemInDisabledScenarioShouldNotBeInstalled(t *testing.T) { // getHubOrFail refreshes the hub state (load index, sync) and returns the singleton, or fails the test. func getHubOrFail(t *testing.T, local *csconfig.LocalHubCfg, remote *RemoteHubCfg) *Hub { - hub, err := NewHub(local, remote, false, nil) - require.NoError(t, err, "failed to load hub index") + hub, err := NewHub(local, remote, nil) + require.NoError(t, err) + + err = hub.Load() + require.NoError(t, err) return hub } @@ -166,8 +182,14 @@ func TestUpgradeItemNewScenarioIsInstalledWhenReferencedScenarioIsDisabled(t *te // we just removed. Nor should it install the newly added scenario pushUpdateToCollectionInHub() - hub, err = NewHub(hub.local, remote, true, nil) - require.NoError(t, err, "failed to download index: %s", err) + hub, err = NewHub(hub.local, remote, nil) + require.NoError(t, err) + + err = hub.Update(context.TODO()) + require.NoError(t, err) + + err = hub.Load() + require.NoError(t, err) require.False(t, hub.GetItem(SCENARIOS, "crowdsecurity/foobar_scenario").State.Installed) hub = getHubOrFail(t, hub.local, remote) diff --git a/pkg/cwhub/remote.go b/pkg/cwhub/remote.go index 5e42555fa61..abb2ddae2ad 100644 --- a/pkg/cwhub/remote.go +++ b/pkg/cwhub/remote.go @@ -31,7 +31,7 @@ func (r *RemoteHubCfg) urlTo(remotePath string) (string, error) { } // fetchIndex downloads the index from the hub and returns the content. -func (r *RemoteHubCfg) fetchIndex(destPath string) (bool, error) { +func (r *RemoteHubCfg) fetchIndex(ctx context.Context, destPath string) (bool, error) { if r == nil { return false, ErrNilRemoteHub } @@ -41,8 +41,6 @@ func (r *RemoteHubCfg) fetchIndex(destPath string) (bool, error) { return false, fmt.Errorf("failed to build hub index request: %w", err) } - ctx := context.TODO() - downloaded, err := downloader. New(). WithHTTPClient(hubClient). diff --git a/pkg/cwversion/version.go b/pkg/cwversion/version.go index d13767e665b..01509833c1c 100644 --- a/pkg/cwversion/version.go +++ b/pkg/cwversion/version.go @@ -1,9 +1,7 @@ package cwversion import ( - "encoding/json" "fmt" - "net/http" "strings" goversion "github.com/hashicorp/go-version" @@ -27,7 +25,7 @@ func versionWithTag() string { // if the version number already contains the tag, don't duplicate it ret := version.Version - if !strings.HasSuffix(ret, version.Tag) && !strings.HasSuffix(ret, "g" + version.Tag + "-dirty") { + if !strings.HasSuffix(ret, version.Tag) && !strings.HasSuffix(ret, "g"+version.Tag+"-dirty") { ret += "-" + version.Tag } @@ -65,7 +63,7 @@ func VersionStrip() string { func Satisfies(strvers string, constraint string) (bool, error) { vers, err := goversion.NewVersion(strvers) if err != nil { - return false, fmt.Errorf("failed to parse '%s' : %v", strvers, err) + return false, fmt.Errorf("failed to parse '%s': %w", strvers, err) } constraints, err := goversion.NewConstraint(constraint) @@ -79,25 +77,3 @@ func Satisfies(strvers string, constraint string) (bool, error) { return true, nil } - -// Latest return latest crowdsec version based on github -func Latest() (string, error) { - latest := make(map[string]any) - - resp, err := http.Get("https://version.crowdsec.net/latest") - if err != nil { - return "", err - } - defer resp.Body.Close() - - err = json.NewDecoder(resp.Body).Decode(&latest) - if err != nil { - return "", err - } - - if _, ok := latest["name"]; !ok { - return "", fmt.Errorf("unable to find latest release name from github api: %+v", latest) - } - - return latest["name"].(string), nil -} diff --git a/pkg/hubtest/hubtest.go b/pkg/hubtest/hubtest.go index 6610652f78a..a4ca275c310 100644 --- a/pkg/hubtest/hubtest.go +++ b/pkg/hubtest/hubtest.go @@ -93,9 +93,13 @@ func NewHubTest(hubPath string, crowdsecPath string, cscliPath string, isAppsecT InstallDataDir: HubTestPath, } - hub, err := cwhub.NewHub(local, nil, false, nil) + hub, err := cwhub.NewHub(local, nil, nil) if err != nil { - return HubTest{}, fmt.Errorf("unable to load hub: %s", err) + return HubTest{}, err + } + + if err := hub.Load(); err != nil { + return HubTest{}, err } return HubTest{ @@ -126,9 +130,13 @@ func NewHubTest(hubPath string, crowdsecPath string, cscliPath string, isAppsecT InstallDataDir: HubTestPath, } - hub, err := cwhub.NewHub(local, nil, false, nil) + hub, err := cwhub.NewHub(local, nil, nil) if err != nil { - return HubTest{}, fmt.Errorf("unable to load hub: %s", err) + return HubTest{}, err + } + + if err := hub.Load(); err != nil { + return HubTest{}, err } return HubTest{ diff --git a/pkg/hubtest/hubtest_item.go b/pkg/hubtest/hubtest_item.go index 918a10f62e1..096bdcd0a02 100644 --- a/pkg/hubtest/hubtest_item.go +++ b/pkg/hubtest/hubtest_item.go @@ -110,7 +110,7 @@ func NewTest(name string, hubTest *HubTest) (*HubTestItem, error) { err = yaml.Unmarshal(yamlFile, configFileData) if err != nil { - return nil, fmt.Errorf("unmarshal: %v", err) + return nil, fmt.Errorf("unmarshal: %w", err) } parserAssertFilePath := filepath.Join(testPath, ParserAssertFileName) @@ -210,11 +210,15 @@ func (t *HubTestItem) InstallHub() error { } // load installed hub - hub, err := cwhub.NewHub(t.RuntimeHubConfig, nil, false, nil) + hub, err := cwhub.NewHub(t.RuntimeHubConfig, nil, nil) if err != nil { return err } + if err := hub.Load(); err != nil { + return err + } + // install data for parsers if needed ret := hub.GetItemMap(cwhub.PARSERS) for parserName, item := range ret { diff --git a/pkg/leakybucket/buckets_test.go b/pkg/leakybucket/buckets_test.go index 4bb3c96759e..989e03944c3 100644 --- a/pkg/leakybucket/buckets_test.go +++ b/pkg/leakybucket/buckets_test.go @@ -16,6 +16,7 @@ import ( "github.com/davecgh/go-spew/spew" log "github.com/sirupsen/logrus" + "github.com/stretchr/testify/require" "gopkg.in/tomb.v2" yaml "gopkg.in/yaml.v2" @@ -45,10 +46,11 @@ func TestBucket(t *testing.T) { InstallDataDir: testdata, } - hub, err := cwhub.NewHub(hubCfg, nil, false, nil) - if err != nil { - t.Fatalf("failed to init hub: %s", err) - } + hub, err := cwhub.NewHub(hubCfg, nil, nil) + require.NoError(t, err) + + err = hub.Load() + require.NoError(t, err) err = exprhelpers.Init(nil) if err != nil { @@ -197,9 +199,11 @@ func testFile(t *testing.T, file string, bs string, holders []BucketFactory, res //just to avoid any race during ingestion of funny scenarios time.Sleep(50 * time.Millisecond) var ts time.Time + if err := ts.UnmarshalText([]byte(in.MarshaledTime)); err != nil { t.Fatalf("Failed to unmarshal time from input event : %s", err) } + if latest_ts.IsZero() { latest_ts = ts } else if ts.After(latest_ts) { @@ -208,10 +212,12 @@ func testFile(t *testing.T, file string, bs string, holders []BucketFactory, res in.ExpectMode = types.TIMEMACHINE log.Infof("Buckets input : %s", spew.Sdump(in)) + ok, err := PourItemToHolders(in, holders, buckets) if err != nil { t.Fatalf("Failed to pour : %s", err) } + if !ok { log.Warning("Event wasn't poured") } diff --git a/test/bats/20_hub_items.bats b/test/bats/20_hub_items.bats index 72e09dfa268..c6dbafc0911 100644 --- a/test/bats/20_hub_items.bats +++ b/test/bats/20_hub_items.bats @@ -83,7 +83,7 @@ teardown() { rune -1 cscli collections inspect crowdsecurity/sshd --no-metrics -o json # XXX: we are on the verbose side here... rune -0 jq -r ".msg" <(stderr) - assert_output --regexp "failed to read Hub index: failed to sync items: failed to scan .*: while syncing collections sshd.yaml: 1.2.3.4: Invalid Semantic Version. Run 'sudo cscli hub update' to download the index again" + assert_output --regexp "failed to read Hub index: failed to sync hub items: failed to scan .*: while syncing collections sshd.yaml: 1.2.3.4: Invalid Semantic Version. Run 'sudo cscli hub update' to download the index again" } @test "removing or purging an item already removed by hand" { From dd6cf2b844f801fe045dc102471064ae1ef66bdc Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Fri, 7 Jun 2024 17:32:52 +0200 Subject: [PATCH 174/581] pkg/cwhub: use explicit context for item install, upgrade (#3067) --- cmd/crowdsec-cli/config_restore.go | 2 +- cmd/crowdsec-cli/hub.go | 2 +- cmd/crowdsec-cli/itemcli.go | 16 ++++++++-------- cmd/crowdsec-cli/setup.go | 2 +- pkg/cwhub/cwhub_test.go | 3 ++- pkg/cwhub/dataset.go | 4 +--- pkg/cwhub/hub_test.go | 12 ++++++++---- pkg/cwhub/iteminstall.go | 5 +++-- pkg/cwhub/iteminstall_test.go | 9 +++++++-- pkg/cwhub/itemupgrade.go | 28 +++++++++++++--------------- pkg/cwhub/itemupgrade_test.go | 24 +++++++++++++++--------- pkg/hubtest/hubtest_item.go | 9 ++++++--- pkg/setup/install.go | 11 ++++++----- 13 files changed, 72 insertions(+), 55 deletions(-) diff --git a/cmd/crowdsec-cli/config_restore.go b/cmd/crowdsec-cli/config_restore.go index 6147a7518c2..fc3670165f8 100644 --- a/cmd/crowdsec-cli/config_restore.go +++ b/cmd/crowdsec-cli/config_restore.go @@ -50,7 +50,7 @@ func (cli *cliConfig) restoreHub(ctx context.Context, dirPath string) error { continue } - if err = item.Install(false, false); err != nil { + if err = item.Install(ctx, false, false); err != nil { log.Errorf("Error while installing %s : %s", toinstall, err) } } diff --git a/cmd/crowdsec-cli/hub.go b/cmd/crowdsec-cli/hub.go index cf9f7f282a4..737b93d8da8 100644 --- a/cmd/crowdsec-cli/hub.go +++ b/cmd/crowdsec-cli/hub.go @@ -158,7 +158,7 @@ func (cli *cliHub) upgrade(ctx context.Context, force bool) error { log.Infof("Upgrading %s", itemType) for _, item := range items { - didUpdate, err := item.Upgrade(force) + didUpdate, err := item.Upgrade(ctx, force) if err != nil { return err } diff --git a/cmd/crowdsec-cli/itemcli.go b/cmd/crowdsec-cli/itemcli.go index dedbab3b914..55396a10995 100644 --- a/cmd/crowdsec-cli/itemcli.go +++ b/cmd/crowdsec-cli/itemcli.go @@ -83,7 +83,7 @@ func (cli cliItem) install(ctx context.Context, args []string, downloadOnly bool continue } - if err := item.Install(force, downloadOnly); err != nil { + if err := item.Install(ctx, force, downloadOnly); err != nil { if !ignoreError { return fmt.Errorf("error while installing '%s': %w", item.Name, err) } @@ -270,7 +270,7 @@ func (cli cliItem) upgrade(ctx context.Context, args []string, force bool, all b updated := 0 for _, item := range items { - didUpdate, err := item.Upgrade(force) + didUpdate, err := item.Upgrade(ctx, force) if err != nil { return err } @@ -301,7 +301,7 @@ func (cli cliItem) upgrade(ctx context.Context, args []string, force bool, all b return fmt.Errorf("can't find '%s' in %s", itemName, cli.name) } - didUpdate, err := item.Upgrade(force) + didUpdate, err := item.Upgrade(ctx, force) if err != nil { return err } @@ -376,7 +376,7 @@ func (cli cliItem) inspect(ctx context.Context, args []string, url string, diff } if diff { - fmt.Println(cli.whyTainted(hub, item, rev)) + fmt.Println(cli.whyTainted(ctx, hub, item, rev)) continue } @@ -466,7 +466,7 @@ func (cli cliItem) newListCmd() *cobra.Command { } // return the diff between the installed version and the latest version -func (cli cliItem) itemDiff(item *cwhub.Item, reverse bool) (string, error) { +func (cli cliItem) itemDiff(ctx context.Context, item *cwhub.Item, reverse bool) (string, error) { if !item.State.Installed { return "", fmt.Errorf("'%s' is not installed", item.FQName()) } @@ -477,7 +477,7 @@ func (cli cliItem) itemDiff(item *cwhub.Item, reverse bool) (string, error) { } defer os.Remove(dest.Name()) - _, remoteURL, err := item.FetchContentTo(dest.Name()) + _, remoteURL, err := item.FetchContentTo(ctx, dest.Name()) if err != nil { return "", err } @@ -508,7 +508,7 @@ func (cli cliItem) itemDiff(item *cwhub.Item, reverse bool) (string, error) { return fmt.Sprintf("%s", diff), nil } -func (cli cliItem) whyTainted(hub *cwhub.Hub, item *cwhub.Item, reverse bool) string { +func (cli cliItem) whyTainted(ctx context.Context, hub *cwhub.Hub, item *cwhub.Item, reverse bool) string { if !item.State.Installed { return fmt.Sprintf("# %s is not installed", item.FQName()) } @@ -533,7 +533,7 @@ func (cli cliItem) whyTainted(hub *cwhub.Hub, item *cwhub.Item, reverse bool) st ret = append(ret, err.Error()) } - diff, err := cli.itemDiff(sub, reverse) + diff, err := cli.itemDiff(ctx, sub, reverse) if err != nil { ret = append(ret, err.Error()) } diff --git a/cmd/crowdsec-cli/setup.go b/cmd/crowdsec-cli/setup.go index ad02b357b70..9f685d0fac1 100644 --- a/cmd/crowdsec-cli/setup.go +++ b/cmd/crowdsec-cli/setup.go @@ -320,7 +320,7 @@ func runSetupInstallHub(cmd *cobra.Command, args []string) error { return err } - return setup.InstallHubItems(hub, input, dryRun) + return setup.InstallHubItems(cmd.Context(), hub, input, dryRun) } func runSetupValidate(cmd *cobra.Command, args []string) error { diff --git a/pkg/cwhub/cwhub_test.go b/pkg/cwhub/cwhub_test.go index 17cf258745f..d11ed2b9a95 100644 --- a/pkg/cwhub/cwhub_test.go +++ b/pkg/cwhub/cwhub_test.go @@ -67,7 +67,8 @@ func testHub(t *testing.T, update bool) *Hub { require.NoError(t, err) if update { - err := hub.Update(context.TODO()) + ctx := context.Background() + err := hub.Update(ctx) require.NoError(t, err) } diff --git a/pkg/cwhub/dataset.go b/pkg/cwhub/dataset.go index eb56d8e32a8..6d4f35c285c 100644 --- a/pkg/cwhub/dataset.go +++ b/pkg/cwhub/dataset.go @@ -21,7 +21,7 @@ type DataSet struct { } // downloadDataSet downloads all the data files for an item. -func downloadDataSet(dataFolder string, force bool, reader io.Reader, logger *logrus.Logger) error { +func downloadDataSet(ctx context.Context, dataFolder string, force bool, reader io.Reader, logger *logrus.Logger) error { dec := yaml.NewDecoder(reader) for { @@ -53,8 +53,6 @@ func downloadDataSet(dataFolder string, force bool, reader io.Reader, logger *lo WithShelfLife(7 * 24 * time.Hour) } - ctx := context.TODO() - downloaded, err := d.Download(ctx, dataS.SourceURL) if err != nil { return fmt.Errorf("while getting data: %w", err) diff --git a/pkg/cwhub/hub_test.go b/pkg/cwhub/hub_test.go index 13c495e2fcc..3d4ae5793b3 100644 --- a/pkg/cwhub/hub_test.go +++ b/pkg/cwhub/hub_test.go @@ -22,7 +22,9 @@ func TestInitHubUpdate(t *testing.T) { _, err := NewHub(hub.local, remote, nil) require.NoError(t, err) - err = hub.Update(context.TODO()) + ctx := context.Background() + + err = hub.Update(ctx) require.NoError(t, err) err = hub.Load() @@ -54,7 +56,9 @@ func TestUpdateIndex(t *testing.T) { hub.local.HubIndexFile = tmpIndex.Name() - err = hub.Update(context.TODO()) + ctx := context.Background() + + err = hub.Update(ctx) cstest.RequireErrorContains(t, err, "failed to build hub index request: invalid URL template 'x'") // bad domain @@ -66,7 +70,7 @@ func TestUpdateIndex(t *testing.T) { IndexPath: ".index.json", } - err = hub.Update(context.TODO()) + err = hub.Update(ctx) require.NoError(t, err) // XXX: this is not failing // cstest.RequireErrorContains(t, err, "failed http request for hub index: Get") @@ -82,6 +86,6 @@ func TestUpdateIndex(t *testing.T) { hub.local.HubIndexFile = "/does/not/exist/index.json" - err = hub.Update(context.TODO()) + err = hub.Update(ctx) cstest.RequireErrorContains(t, err, "failed to create temporary download file for /does/not/exist/index.json:") } diff --git a/pkg/cwhub/iteminstall.go b/pkg/cwhub/iteminstall.go index 274e7128a04..6a16ad0a65f 100644 --- a/pkg/cwhub/iteminstall.go +++ b/pkg/cwhub/iteminstall.go @@ -1,6 +1,7 @@ package cwhub import ( + "context" "fmt" ) @@ -39,7 +40,7 @@ func (i *Item) enable() error { } // Install installs the item from the hub, downloading it if needed. -func (i *Item) Install(force bool, downloadOnly bool) error { +func (i *Item) Install(ctx context.Context, force bool, downloadOnly bool) error { if downloadOnly && i.State.Downloaded && i.State.UpToDate { i.hub.logger.Infof("%s is already downloaded and up-to-date", i.Name) @@ -48,7 +49,7 @@ func (i *Item) Install(force bool, downloadOnly bool) error { } } - downloaded, err := i.downloadLatest(force, true) + downloaded, err := i.downloadLatest(ctx, force, true) if err != nil { return err } diff --git a/pkg/cwhub/iteminstall_test.go b/pkg/cwhub/iteminstall_test.go index 337f66f95fa..5bfc7e8148e 100644 --- a/pkg/cwhub/iteminstall_test.go +++ b/pkg/cwhub/iteminstall_test.go @@ -1,6 +1,7 @@ package cwhub import ( + "context" "os" "testing" @@ -9,8 +10,10 @@ import ( ) func testInstall(hub *Hub, t *testing.T, item *Item) { + ctx := context.Background() + // Install the parser - _, err := item.downloadLatest(false, false) + _, err := item.downloadLatest(ctx, false, false) require.NoError(t, err, "failed to download %s", item.Name) err = hub.localSync() @@ -48,8 +51,10 @@ func testTaint(hub *Hub, t *testing.T, item *Item) { func testUpdate(hub *Hub, t *testing.T, item *Item) { assert.False(t, item.State.UpToDate, "%s should not be up-to-date", item.Name) + ctx := context.Background() + // Update it + check status - _, err := item.downloadLatest(true, true) + _, err := item.downloadLatest(ctx, true, true) require.NoError(t, err, "failed to update %s", item.Name) // Local sync and check status diff --git a/pkg/cwhub/itemupgrade.go b/pkg/cwhub/itemupgrade.go index 8b3ec7481ef..4dad226fd78 100644 --- a/pkg/cwhub/itemupgrade.go +++ b/pkg/cwhub/itemupgrade.go @@ -16,7 +16,7 @@ import ( ) // Upgrade downloads and applies the last version of the item from the hub. -func (i *Item) Upgrade(force bool) (bool, error) { +func (i *Item) Upgrade(ctx context.Context, force bool) (bool, error) { if i.State.IsLocal() { i.hub.logger.Infof("not upgrading %s: local item", i.Name) return false, nil @@ -33,7 +33,7 @@ func (i *Item) Upgrade(force bool) (bool, error) { if i.State.UpToDate { i.hub.logger.Infof("%s: up-to-date", i.Name) - if err := i.DownloadDataIfNeeded(force); err != nil { + if err := i.DownloadDataIfNeeded(ctx, force); err != nil { return false, fmt.Errorf("%s: download failed: %w", i.Name, err) } @@ -43,7 +43,7 @@ func (i *Item) Upgrade(force bool) (bool, error) { } } - if _, err := i.downloadLatest(force, true); err != nil { + if _, err := i.downloadLatest(ctx, force, true); err != nil { return false, fmt.Errorf("%s: download failed: %w", i.Name, err) } @@ -65,7 +65,7 @@ func (i *Item) Upgrade(force bool) (bool, error) { } // downloadLatest downloads the latest version of the item to the hub directory. -func (i *Item) downloadLatest(overwrite bool, updateOnly bool) (bool, error) { +func (i *Item) downloadLatest(ctx context.Context, overwrite bool, updateOnly bool) (bool, error) { i.hub.logger.Debugf("Downloading %s %s", i.Type, i.Name) for _, sub := range i.SubItems() { @@ -80,14 +80,14 @@ func (i *Item) downloadLatest(overwrite bool, updateOnly bool) (bool, error) { if sub.HasSubItems() { i.hub.logger.Tracef("collection, recurse") - if _, err := sub.downloadLatest(overwrite, updateOnly); err != nil { + if _, err := sub.downloadLatest(ctx, overwrite, updateOnly); err != nil { return false, err } } downloaded := sub.State.Downloaded - if _, err := sub.download(overwrite); err != nil { + if _, err := sub.download(ctx, overwrite); err != nil { return false, err } @@ -105,11 +105,11 @@ func (i *Item) downloadLatest(overwrite bool, updateOnly bool) (bool, error) { return false, nil } - return i.download(overwrite) + return i.download(ctx, overwrite) } // FetchContentTo downloads the last version of the item's YAML file to the specified path. -func (i *Item) FetchContentTo(destPath string) (bool, string, error) { +func (i *Item) FetchContentTo(ctx context.Context, destPath string) (bool, string, error) { url, err := i.hub.remote.urlTo(i.RemotePath) if err != nil { return false, "", fmt.Errorf("failed to build request: %w", err) @@ -131,8 +131,6 @@ func (i *Item) FetchContentTo(destPath string) (bool, string, error) { // TODO: recommend hub update if hash does not match - ctx := context.TODO() - downloaded, err := d.Download(ctx, url) if err != nil { return false, "", fmt.Errorf("while downloading %s to %s: %w", i.Name, url, err) @@ -142,7 +140,7 @@ func (i *Item) FetchContentTo(destPath string) (bool, string, error) { } // download downloads the item from the hub and writes it to the hub directory. -func (i *Item) download(overwrite bool) (bool, error) { +func (i *Item) download(ctx context.Context, overwrite bool) (bool, error) { // ensure that target file is within target dir finalPath, err := i.downloadPath() if err != nil { @@ -167,7 +165,7 @@ func (i *Item) download(overwrite bool) (bool, error) { } } - downloaded, _, err := i.FetchContentTo(finalPath) + downloaded, _, err := i.FetchContentTo(ctx, finalPath) if err != nil { return false, fmt.Errorf("while downloading %s: %w", i.Name, err) } @@ -188,7 +186,7 @@ func (i *Item) download(overwrite bool) (bool, error) { defer reader.Close() - if err = downloadDataSet(i.hub.local.InstallDataDir, overwrite, reader, i.hub.logger); err != nil { + if err = downloadDataSet(ctx, i.hub.local.InstallDataDir, overwrite, reader, i.hub.logger); err != nil { return false, fmt.Errorf("while downloading data for %s: %w", i.FileName, err) } @@ -196,7 +194,7 @@ func (i *Item) download(overwrite bool) (bool, error) { } // DownloadDataIfNeeded downloads the data set for the item. -func (i *Item) DownloadDataIfNeeded(force bool) error { +func (i *Item) DownloadDataIfNeeded(ctx context.Context, force bool) error { itemFilePath, err := i.installPath() if err != nil { return err @@ -209,7 +207,7 @@ func (i *Item) DownloadDataIfNeeded(force bool) error { defer itemFile.Close() - if err = downloadDataSet(i.hub.local.InstallDataDir, force, itemFile, i.hub.logger); err != nil { + if err = downloadDataSet(ctx, i.hub.local.InstallDataDir, force, itemFile, i.hub.logger); err != nil { return fmt.Errorf("while downloading data for %s: %w", itemFilePath, err) } diff --git a/pkg/cwhub/itemupgrade_test.go b/pkg/cwhub/itemupgrade_test.go index d86d2094955..5d302db3345 100644 --- a/pkg/cwhub/itemupgrade_test.go +++ b/pkg/cwhub/itemupgrade_test.go @@ -19,7 +19,9 @@ func TestUpgradeItemNewScenarioInCollection(t *testing.T) { require.False(t, item.State.Downloaded) require.False(t, item.State.Installed) - require.NoError(t, item.Install(false, false)) + ctx := context.Background() + + require.NoError(t, item.Install(ctx, false, false)) require.True(t, item.State.Downloaded) require.True(t, item.State.Installed) @@ -43,7 +45,7 @@ func TestUpgradeItemNewScenarioInCollection(t *testing.T) { hub, err := NewHub(hub.local, remote, nil) require.NoError(t, err) - err = hub.Update(context.TODO()) + err = hub.Update(ctx) require.NoError(t, err) err = hub.Load() @@ -58,7 +60,7 @@ func TestUpgradeItemNewScenarioInCollection(t *testing.T) { require.False(t, item.State.UpToDate) require.False(t, item.State.Tainted) - didUpdate, err := item.Upgrade(false) + didUpdate, err := item.Upgrade(ctx, false) require.NoError(t, err) require.True(t, didUpdate) assertCollectionDepsInstalled(t, hub, "crowdsecurity/test_collection") @@ -78,7 +80,9 @@ func TestUpgradeItemInDisabledScenarioShouldNotBeInstalled(t *testing.T) { require.False(t, item.State.Installed) require.False(t, hub.GetItem(SCENARIOS, "crowdsecurity/foobar_scenario").State.Installed) - require.NoError(t, item.Install(false, false)) + ctx := context.Background() + + require.NoError(t, item.Install(ctx, false, false)) require.True(t, item.State.Downloaded) require.True(t, item.State.Installed) @@ -110,14 +114,14 @@ func TestUpgradeItemInDisabledScenarioShouldNotBeInstalled(t *testing.T) { hub, err = NewHub(hub.local, remote, nil) require.NoError(t, err) - err = hub.Update(context.TODO()) + err = hub.Update(ctx) require.NoError(t, err) err = hub.Load() require.NoError(t, err) item = hub.GetItem(COLLECTIONS, "crowdsecurity/test_collection") - didUpdate, err := item.Upgrade(false) + didUpdate, err := item.Upgrade(ctx, false) require.NoError(t, err) require.False(t, didUpdate) @@ -148,7 +152,9 @@ func TestUpgradeItemNewScenarioIsInstalledWhenReferencedScenarioIsDisabled(t *te require.False(t, item.State.Installed) require.False(t, hub.GetItem(SCENARIOS, "crowdsecurity/foobar_scenario").State.Installed) - require.NoError(t, item.Install(false, false)) + ctx := context.Background() + + require.NoError(t, item.Install(ctx, false, false)) require.True(t, item.State.Downloaded) require.True(t, item.State.Installed) @@ -185,7 +191,7 @@ func TestUpgradeItemNewScenarioIsInstalledWhenReferencedScenarioIsDisabled(t *te hub, err = NewHub(hub.local, remote, nil) require.NoError(t, err) - err = hub.Update(context.TODO()) + err = hub.Update(ctx) require.NoError(t, err) err = hub.Load() @@ -195,7 +201,7 @@ func TestUpgradeItemNewScenarioIsInstalledWhenReferencedScenarioIsDisabled(t *te hub = getHubOrFail(t, hub.local, remote) item = hub.GetItem(COLLECTIONS, "crowdsecurity/test_collection") - didUpdate, err := item.Upgrade(false) + didUpdate, err := item.Upgrade(ctx, false) require.NoError(t, err) require.True(t, didUpdate) diff --git a/pkg/hubtest/hubtest_item.go b/pkg/hubtest/hubtest_item.go index 096bdcd0a02..4b105777952 100644 --- a/pkg/hubtest/hubtest_item.go +++ b/pkg/hubtest/hubtest_item.go @@ -1,6 +1,7 @@ package hubtest import ( + "context" "errors" "fmt" "net/url" @@ -219,11 +220,13 @@ func (t *HubTestItem) InstallHub() error { return err } + ctx := context.Background() + // install data for parsers if needed ret := hub.GetItemMap(cwhub.PARSERS) for parserName, item := range ret { if item.State.Installed { - if err := item.DownloadDataIfNeeded(true); err != nil { + if err := item.DownloadDataIfNeeded(ctx, true); err != nil { return fmt.Errorf("unable to download data for parser '%s': %+v", parserName, err) } @@ -235,7 +238,7 @@ func (t *HubTestItem) InstallHub() error { ret = hub.GetItemMap(cwhub.SCENARIOS) for scenarioName, item := range ret { if item.State.Installed { - if err := item.DownloadDataIfNeeded(true); err != nil { + if err := item.DownloadDataIfNeeded(ctx, true); err != nil { return fmt.Errorf("unable to download data for parser '%s': %+v", scenarioName, err) } @@ -247,7 +250,7 @@ func (t *HubTestItem) InstallHub() error { ret = hub.GetItemMap(cwhub.POSTOVERFLOWS) for postoverflowName, item := range ret { if item.State.Installed { - if err := item.DownloadDataIfNeeded(true); err != nil { + if err := item.DownloadDataIfNeeded(ctx, true); err != nil { return fmt.Errorf("unable to download data for parser '%s': %+v", postoverflowName, err) } diff --git a/pkg/setup/install.go b/pkg/setup/install.go index dc85706a15c..09ac15971f8 100644 --- a/pkg/setup/install.go +++ b/pkg/setup/install.go @@ -2,6 +2,7 @@ package setup import ( "bytes" + "context" "errors" "fmt" "os" @@ -46,7 +47,7 @@ func decodeSetup(input []byte, fancyErrors bool) (Setup, error) { } // InstallHubItems installs the objects recommended in a setup file. -func InstallHubItems(hub *cwhub.Hub, input []byte, dryRun bool) error { +func InstallHubItems(ctx context.Context, hub *cwhub.Hub, input []byte, dryRun bool) error { setupEnvelope, err := decodeSetup(input, false) if err != nil { return err @@ -74,7 +75,7 @@ func InstallHubItems(hub *cwhub.Hub, input []byte, dryRun bool) error { continue } - if err := item.Install(forceAction, downloadOnly); err != nil { + if err := item.Install(ctx, forceAction, downloadOnly); err != nil { return fmt.Errorf("while installing collection %s: %w", item.Name, err) } } @@ -93,7 +94,7 @@ func InstallHubItems(hub *cwhub.Hub, input []byte, dryRun bool) error { return fmt.Errorf("parser %s not found", parser) } - if err := item.Install(forceAction, downloadOnly); err != nil { + if err := item.Install(ctx, forceAction, downloadOnly); err != nil { return fmt.Errorf("while installing parser %s: %w", item.Name, err) } } @@ -112,7 +113,7 @@ func InstallHubItems(hub *cwhub.Hub, input []byte, dryRun bool) error { return fmt.Errorf("scenario %s not found", scenario) } - if err := item.Install(forceAction, downloadOnly); err != nil { + if err := item.Install(ctx, forceAction, downloadOnly); err != nil { return fmt.Errorf("while installing scenario %s: %w", item.Name, err) } } @@ -131,7 +132,7 @@ func InstallHubItems(hub *cwhub.Hub, input []byte, dryRun bool) error { return fmt.Errorf("postoverflow %s not found", postoverflow) } - if err := item.Install(forceAction, downloadOnly); err != nil { + if err := item.Install(ctx, forceAction, downloadOnly); err != nil { return fmt.Errorf("while installing postoverflow %s: %w", item.Name, err) } } From 13fb25257134090d14ec717f70c7d871d9ad730c Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Fri, 7 Jun 2024 19:03:23 +0200 Subject: [PATCH 175/581] lint: replace cyclop, gocyclo with revive; basic pkg/hubtest helper (#3065) --- .golangci.yml | 24 +++++++++-------- pkg/hubtest/appsecrule.go | 8 ++---- pkg/hubtest/hubtest_item.go | 52 ++++++++++++------------------------- pkg/hubtest/parser.go | 8 ++---- pkg/hubtest/postoverflow.go | 8 ++---- pkg/hubtest/scenario.go | 8 ++---- 6 files changed, 37 insertions(+), 71 deletions(-) diff --git a/.golangci.yml b/.golangci.yml index 1ec386183e1..8feb9921175 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -1,10 +1,6 @@ # https://github.com/golangci/golangci-lint/blob/master/.golangci.reference.yml linters-settings: - cyclop: - # lower this after refactoring - max-complexity: 45 - gci: sections: - standard @@ -20,10 +16,6 @@ linters-settings: # lower this after refactoring min-complexity: 128 - gocyclo: - # lower this after refactoring - min-complexity: 45 - funlen: # Checks the number of lines in a function. # If lower than 0, disable the check. @@ -133,7 +125,8 @@ linters-settings: - name: confusing-results disabled: true - name: cyclomatic - disabled: true + # lower this after refactoring + arguments: [45] - name: deep-exit disabled: true - name: defer @@ -228,6 +221,13 @@ linters: - structcheck - varcheck + # + # Redundant + # + + - gocyclo # revive + - cyclop # revive + # # Disabled until fixed for go 1.22 # @@ -243,7 +243,6 @@ linters: # - asciicheck # checks that all code identifiers does not have non-ASCII symbols in the name # - bidichk # Checks for dangerous unicode character sequences # - bodyclose # checks whether HTTP response body is closed successfully - # - cyclop # checks function and package cyclomatic complexity # - decorder # check declaration order and count of types, constants, variables and functions # - depguard # Go linter that checks if package imports are in a list of acceptable packages # - dupword # checks for duplicate words in the source code @@ -259,7 +258,6 @@ linters: # - gochecksumtype # Run exhaustiveness checks on Go "sum types" # - gocognit # Computes and checks the cognitive complexity of functions # - gocritic # Provides diagnostics that check for bugs, performance and style issues. - # - gocyclo # Computes and checks the cyclomatic complexity of functions # - goheader # Checks is file header matches to pattern # - gomoddirectives # Manage the use of 'replace', 'retract', and 'excludes' directives in go.mod. # - gomodguard # Allow and block list linter for direct Go module dependencies. This is different from depguard where there are different block types for example version constraints and module recommendations. @@ -503,3 +501,7 @@ issues: - revive path: cmd/crowdsec-cli/copyfile.go + - linters: + - revive + path: pkg/hubtest/hubtest_item.go + text: "cyclomatic: .*RunWithLogFile" diff --git a/pkg/hubtest/appsecrule.go b/pkg/hubtest/appsecrule.go index fb4ad78cc18..1c4416c2e9b 100644 --- a/pkg/hubtest/appsecrule.go +++ b/pkg/hubtest/appsecrule.go @@ -25,12 +25,8 @@ func (t *HubTestItem) installAppsecRuleItem(item *cwhub.Item) error { // runtime/appsec-rules/ itemTypeDirDest := fmt.Sprintf("%s/appsec-rules/", t.RuntimePath) - if err := os.MkdirAll(hubDirAppsecRuleDest, os.ModePerm); err != nil { - return fmt.Errorf("unable to create folder '%s': %w", hubDirAppsecRuleDest, err) - } - - if err := os.MkdirAll(itemTypeDirDest, os.ModePerm); err != nil { - return fmt.Errorf("unable to create folder '%s': %w", itemTypeDirDest, err) + if err := createDirs([]string{hubDirAppsecRuleDest, itemTypeDirDest}); err != nil { + return err } // runtime/hub/appsec-rules/crowdsecurity/rule.yaml diff --git a/pkg/hubtest/hubtest_item.go b/pkg/hubtest/hubtest_item.go index 4b105777952..5346fb0be50 100644 --- a/pkg/hubtest/hubtest_item.go +++ b/pkg/hubtest/hubtest_item.go @@ -380,6 +380,16 @@ func (t *HubTestItem) RunWithNucleiTemplate() error { return nil } +func createDirs(dirs []string) error { + for _, dir := range dirs { + if err := os.MkdirAll(dir, os.ModePerm); err != nil { + return fmt.Errorf("unable to create directory '%s': %w", dir, err) + } + } + + return nil +} + func (t *HubTestItem) RunWithLogFile() error { testPath := filepath.Join(t.HubTestPath, t.Name) if _, err := os.Stat(testPath); os.IsNotExist(err) { @@ -391,30 +401,15 @@ func (t *HubTestItem) RunWithLogFile() error { return fmt.Errorf("can't get current directory: %+v", err) } - // create runtime folder - if err = os.MkdirAll(t.RuntimePath, os.ModePerm); err != nil { - return fmt.Errorf("unable to create folder '%s': %+v", t.RuntimePath, err) - } - - // create runtime data folder - if err = os.MkdirAll(t.RuntimeDataPath, os.ModePerm); err != nil { - return fmt.Errorf("unable to create folder '%s': %+v", t.RuntimeDataPath, err) - } - - // create runtime hub folder - if err = os.MkdirAll(t.RuntimeHubPath, os.ModePerm); err != nil { - return fmt.Errorf("unable to create folder '%s': %+v", t.RuntimeHubPath, err) + // create runtime, data, hub folders + if err = createDirs([]string{t.RuntimePath, t.RuntimeDataPath, t.RuntimeHubPath, t.ResultsPath}); err != nil { + return err } if err = Copy(t.HubIndexFile, filepath.Join(t.RuntimeHubPath, ".index.json")); err != nil { return fmt.Errorf("unable to copy .index.json file in '%s': %w", filepath.Join(t.RuntimeHubPath, ".index.json"), err) } - // create results folder - if err = os.MkdirAll(t.ResultsPath, os.ModePerm); err != nil { - return fmt.Errorf("unable to create folder '%s': %+v", t.ResultsPath, err) - } - // copy template config file to runtime folder if err = Copy(t.TemplateConfigPath, t.RuntimeConfigFilePath); err != nil { return fmt.Errorf("unable to copy '%s' to '%s': %v", t.TemplateConfigPath, t.RuntimeConfigFilePath, err) @@ -585,30 +580,15 @@ func (t *HubTestItem) Run() error { t.Success = false t.ErrorsList = make([]string, 0) - // create runtime folder - if err = os.MkdirAll(t.RuntimePath, os.ModePerm); err != nil { - return fmt.Errorf("unable to create folder '%s': %+v", t.RuntimePath, err) - } - - // create runtime data folder - if err = os.MkdirAll(t.RuntimeDataPath, os.ModePerm); err != nil { - return fmt.Errorf("unable to create folder '%s': %+v", t.RuntimeDataPath, err) - } - - // create runtime hub folder - if err = os.MkdirAll(t.RuntimeHubPath, os.ModePerm); err != nil { - return fmt.Errorf("unable to create folder '%s': %+v", t.RuntimeHubPath, err) + // create runtime, data, hub, result folders + if err = createDirs([]string{t.RuntimePath, t.RuntimeDataPath, t.RuntimeHubPath, t.ResultsPath}); err != nil { + return err } if err = Copy(t.HubIndexFile, filepath.Join(t.RuntimeHubPath, ".index.json")); err != nil { return fmt.Errorf("unable to copy .index.json file in '%s': %w", filepath.Join(t.RuntimeHubPath, ".index.json"), err) } - // create results folder - if err = os.MkdirAll(t.ResultsPath, os.ModePerm); err != nil { - return fmt.Errorf("unable to create folder '%s': %+v", t.ResultsPath, err) - } - // copy template config file to runtime folder if err = Copy(t.TemplateConfigPath, t.RuntimeConfigFilePath); err != nil { return fmt.Errorf("unable to copy '%s' to '%s': %v", t.TemplateConfigPath, t.RuntimeConfigFilePath, err) diff --git a/pkg/hubtest/parser.go b/pkg/hubtest/parser.go index d40301e3015..31ff459ca77 100644 --- a/pkg/hubtest/parser.go +++ b/pkg/hubtest/parser.go @@ -23,12 +23,8 @@ func (t *HubTestItem) installParserItem(item *cwhub.Item) error { // runtime/parsers/s00-raw/ itemTypeDirDest := fmt.Sprintf("%s/parsers/%s/", t.RuntimePath, item.Stage) - if err := os.MkdirAll(hubDirParserDest, os.ModePerm); err != nil { - return fmt.Errorf("unable to create folder '%s': %w", hubDirParserDest, err) - } - - if err := os.MkdirAll(itemTypeDirDest, os.ModePerm); err != nil { - return fmt.Errorf("unable to create folder '%s': %w", itemTypeDirDest, err) + if err := createDirs([]string{hubDirParserDest, itemTypeDirDest}); err != nil { + return err } // runtime/hub/parsers/s00-raw/crowdsecurity/syslog-logs.yaml diff --git a/pkg/hubtest/postoverflow.go b/pkg/hubtest/postoverflow.go index 76a67b58b76..65fd0bfbc5d 100644 --- a/pkg/hubtest/postoverflow.go +++ b/pkg/hubtest/postoverflow.go @@ -23,12 +23,8 @@ func (t *HubTestItem) installPostoverflowItem(item *cwhub.Item) error { // runtime/postoverflows/s00-enrich itemTypeDirDest := fmt.Sprintf("%s/postoverflows/%s/", t.RuntimePath, item.Stage) - if err := os.MkdirAll(hubDirPostoverflowDest, os.ModePerm); err != nil { - return fmt.Errorf("unable to create folder '%s': %w", hubDirPostoverflowDest, err) - } - - if err := os.MkdirAll(itemTypeDirDest, os.ModePerm); err != nil { - return fmt.Errorf("unable to create folder '%s': %w", itemTypeDirDest, err) + if err := createDirs([]string{hubDirPostoverflowDest, itemTypeDirDest}); err != nil { + return err } // runtime/hub/postoverflows/s00-enrich/crowdsecurity/rdns.yaml diff --git a/pkg/hubtest/scenario.go b/pkg/hubtest/scenario.go index 35ea465b7c0..7f61e48accf 100644 --- a/pkg/hubtest/scenario.go +++ b/pkg/hubtest/scenario.go @@ -22,12 +22,8 @@ func (t *HubTestItem) installScenarioItem(item *cwhub.Item) error { // runtime/parsers/scenarios/ itemTypeDirDest := fmt.Sprintf("%s/scenarios/", t.RuntimePath) - if err := os.MkdirAll(hubDirScenarioDest, os.ModePerm); err != nil { - return fmt.Errorf("unable to create folder '%s': %w", hubDirScenarioDest, err) - } - - if err := os.MkdirAll(itemTypeDirDest, os.ModePerm); err != nil { - return fmt.Errorf("unable to create folder '%s': %w", itemTypeDirDest, err) + if err := createDirs([]string{hubDirScenarioDest, itemTypeDirDest}); err != nil { + return err } // runtime/hub/scenarios/crowdsecurity/ssh-bf.yaml From 819fa0ee2271cd665620c45ae4e886ec275f7c8e Mon Sep 17 00:00:00 2001 From: Laurence Jones Date: Mon, 10 Jun 2024 10:09:10 +0100 Subject: [PATCH 176/581] enhancement: add other log levels to docker start script and document them (#3072) --- docker/README.md | 3 +++ docker/docker_start.sh | 12 ++++++++++++ 2 files changed, 15 insertions(+) diff --git a/docker/README.md b/docker/README.md index 5e39838a175..ad31d10aed6 100644 --- a/docker/README.md +++ b/docker/README.md @@ -333,6 +333,9 @@ config.yaml) each time the container is run. | `DISABLE_APPSEC_RULES` | | Appsec rules files to remove, separated by space | | | | | | __Log verbosity__ | | | +| `LEVEL_FATAL` | false | Force FATAL level for the container log | +| `LEVEL_ERROR` | false | Force ERROR level for the container log | +| `LEVEL_WARN` | false | Force WARN level for the container log | | `LEVEL_INFO` | false | Force INFO level for the container log | | `LEVEL_DEBUG` | false | Force DEBUG level for the container log | | `LEVEL_TRACE` | false | Force TRACE level (VERY verbose) for the container log | diff --git a/docker/docker_start.sh b/docker/docker_start.sh index 954dbd9fc79..a3e9226bced 100755 --- a/docker/docker_start.sh +++ b/docker/docker_start.sh @@ -504,5 +504,17 @@ if istrue "$LEVEL_INFO"; then ARGS="$ARGS -info" fi +if istrue "$LEVEL_WARN"; then + ARGS="$ARGS -warning" +fi + +if istrue "$LEVEL_ERROR"; then + ARGS="$ARGS -error" +fi + +if istrue "$LEVEL_FATAL"; then + ARGS="$ARGS -fatal" +fi + # shellcheck disable=SC2086 exec crowdsec $ARGS From 7fd01ae3fced9b50f31734c1d59a72f0553be05a Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Mon, 10 Jun 2024 16:13:49 +0200 Subject: [PATCH 177/581] CI: update golangci-lint to 1.59; enforce canonical HTTP headers (#3074) --- .github/workflows/go-tests-windows.yml | 2 +- .github/workflows/go-tests.yml | 2 +- .golangci.yml | 21 ++++++++------------- cmd/crowdsec-cli/console.go | 3 ++- cmd/notification-sentinel/main.go | 4 ++-- pkg/acquisition/modules/loki/loki_test.go | 2 +- pkg/cticlient/client.go | 2 +- pkg/cticlient/client_test.go | 8 ++++---- pkg/exprhelpers/crowdsec_cti_test.go | 2 +- 9 files changed, 21 insertions(+), 25 deletions(-) diff --git a/.github/workflows/go-tests-windows.yml b/.github/workflows/go-tests-windows.yml index e707062b5ad..daf6312447b 100644 --- a/.github/workflows/go-tests-windows.yml +++ b/.github/workflows/go-tests-windows.yml @@ -57,7 +57,7 @@ jobs: - name: golangci-lint uses: golangci/golangci-lint-action@v4 with: - version: v1.57 + version: v1.59 args: --issues-exit-code=1 --timeout 10m only-new-issues: false # the cache is already managed above, enabling it here diff --git a/.github/workflows/go-tests.yml b/.github/workflows/go-tests.yml index 3496674e7b9..41e84189383 100644 --- a/.github/workflows/go-tests.yml +++ b/.github/workflows/go-tests.yml @@ -158,7 +158,7 @@ jobs: - name: golangci-lint uses: golangci/golangci-lint-action@v4 with: - version: v1.57 + version: v1.59 args: --issues-exit-code=1 --timeout 10m only-new-issues: false # the cache is already managed above, enabling it here diff --git a/.golangci.yml b/.golangci.yml index 8feb9921175..bcd1d2113f6 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -210,16 +210,7 @@ linters: # # DEPRECATED by golangi-lint # - - deadcode - - exhaustivestruct - - golint - - ifshort - - interfacer - - maligned - - nosnakecase - - scopelint - - structcheck - - varcheck + - execinquery # # Redundant @@ -249,7 +240,6 @@ linters: # - durationcheck # check for two durations multiplied together # - errcheck # errcheck is a program for checking for unchecked errors in Go code. These unchecked errors can be critical bugs in some cases # - errorlint # errorlint is a linter for that can be used to find code that will cause problems with the error wrapping scheme introduced in Go 1.13. - # - execinquery # execinquery is a linter about query string checker in Query function which reads your Go src files and warning it finds # - exportloopref # checks for pointers to enclosing loop variables # - funlen # Tool for detection of long functions # - ginkgolinter # enforces standards of using ginkgo and gomega @@ -331,6 +321,7 @@ linters: - errname # Checks that sentinel errors are prefixed with the `Err` and error types are suffixed with the `Error`. - gomnd # An analyzer to detect magic numbers. - ireturn # Accept Interfaces, Return Concrete Types + - mnd # An analyzer to detect magic numbers. - nilnil # Checks that there is no simultaneous return of `nil` error and an invalid value. - noctx # Finds sending http request without context.Context - unparam # Reports unused function parameters @@ -350,7 +341,7 @@ linters: - dupl # Tool for code clone detection - forcetypeassert # finds forced type assertions - godox # Tool for detection of FIXME, TODO and other comment keywords - - goerr113 # Go linter to check the errors handling expressions + - err113 # Go linter to check the errors handling expressions - paralleltest # Detects missing usage of t.Parallel() method in your Go test - testpackage # linter that makes you use a separate _test package @@ -383,7 +374,7 @@ issues: - pkg/yamlpatch/merge.go - pkg/yamlpatch/merge_test.go - exclude-generated-strict: true + exclude-generated: strict max-issues-per-linter: 0 max-same-issues: 0 @@ -505,3 +496,7 @@ issues: - revive path: pkg/hubtest/hubtest_item.go text: "cyclomatic: .*RunWithLogFile" + + - linters: + - canonicalheader + path: pkg/apiserver/middlewares/v1/tls_auth.go diff --git a/cmd/crowdsec-cli/console.go b/cmd/crowdsec-cli/console.go index 972d43122cc..3c7df395b30 100644 --- a/cmd/crowdsec-cli/console.go +++ b/cmd/crowdsec-cli/console.go @@ -6,6 +6,7 @@ import ( "encoding/json" "errors" "fmt" + "net/http" "net/url" "os" "strconv" @@ -138,7 +139,7 @@ func (cli *cliConsole) enroll(key string, name string, overwrite bool, tags []st return fmt.Errorf("could not enroll instance: %w", err) } - if resp.Response.StatusCode == 200 && !overwrite { + if resp.Response.StatusCode == http.StatusOK && !overwrite { log.Warning("Instance already enrolled. You can use '--overwrite' to force enroll") return nil } diff --git a/cmd/notification-sentinel/main.go b/cmd/notification-sentinel/main.go index c627f9271e2..9c34e63a289 100644 --- a/cmd/notification-sentinel/main.go +++ b/cmd/notification-sentinel/main.go @@ -37,7 +37,7 @@ var logger hclog.Logger = hclog.New(&hclog.LoggerOptions{ }) func (s *SentinelPlugin) getAuthorizationHeader(now string, length int, pluginName string) (string, error) { - xHeaders := "x-ms-date:" + now + xHeaders := "X-Ms-Date:" + now stringToHash := fmt.Sprintf("POST\n%d\napplication/json\n%s\n/api/logs", length, xHeaders) decodedKey, _ := base64.StdEncoding.DecodeString(s.PluginConfigByName[pluginName].SharedKey) @@ -87,7 +87,7 @@ func (s *SentinelPlugin) Notify(ctx context.Context, notification *protobufs.Not req.Header.Set("Content-Type", "application/json") req.Header.Set("Log-Type", s.PluginConfigByName[notification.Name].LogType) req.Header.Set("Authorization", authorization) - req.Header.Set("x-ms-date", now) + req.Header.Set("X-Ms-Date", now) client := &http.Client{} resp, err := client.Do(req.WithContext(ctx)) diff --git a/pkg/acquisition/modules/loki/loki_test.go b/pkg/acquisition/modules/loki/loki_test.go index 9ac3ccbd321..83742546959 100644 --- a/pkg/acquisition/modules/loki/loki_test.go +++ b/pkg/acquisition/modules/loki/loki_test.go @@ -294,7 +294,7 @@ func feedLoki(logger *log.Entry, n int, title string) error { } req.Header.Set("Content-Type", "application/json") - req.Header.Set("X-Scope-OrgID", "1234") + req.Header.Set("X-Scope-Orgid", "1234") resp, err := http.DefaultClient.Do(req) if err != nil { diff --git a/pkg/cticlient/client.go b/pkg/cticlient/client.go index 4df4d65a63c..b95d1237619 100644 --- a/pkg/cticlient/client.go +++ b/pkg/cticlient/client.go @@ -43,7 +43,7 @@ func (c *CrowdsecCTIClient) doRequest(method string, endpoint string, params map if err != nil { return nil, err } - req.Header.Set("x-api-key", c.apiKey) + req.Header.Set("X-Api-Key", c.apiKey) resp, err := c.httpClient.Do(req) if err != nil { return nil, err diff --git a/pkg/cticlient/client_test.go b/pkg/cticlient/client_test.go index 79406a6c2a9..cdbbd0c9732 100644 --- a/pkg/cticlient/client_test.go +++ b/pkg/cticlient/client_test.go @@ -38,7 +38,7 @@ func (f RoundTripFunc) RoundTrip(req *http.Request) (*http.Response, error) { func fireHandler(req *http.Request) *http.Response { var err error - apiKey := req.Header.Get("x-api-key") + apiKey := req.Header.Get("X-Api-Key") if apiKey != validApiKey { log.Warningf("invalid api key: %s", apiKey) @@ -105,7 +105,7 @@ func fireHandler(req *http.Request) *http.Response { } func smokeHandler(req *http.Request) *http.Response { - apiKey := req.Header.Get("x-api-key") + apiKey := req.Header.Get("X-Api-Key") if apiKey != validApiKey { return &http.Response{ StatusCode: http.StatusForbidden, @@ -137,7 +137,7 @@ func smokeHandler(req *http.Request) *http.Response { } func rateLimitedHandler(req *http.Request) *http.Response { - apiKey := req.Header.Get("x-api-key") + apiKey := req.Header.Get("X-Api-Key") if apiKey != validApiKey { return &http.Response{ StatusCode: http.StatusForbidden, @@ -154,7 +154,7 @@ func rateLimitedHandler(req *http.Request) *http.Response { } func searchHandler(req *http.Request) *http.Response { - apiKey := req.Header.Get("x-api-key") + apiKey := req.Header.Get("X-Api-Key") if apiKey != validApiKey { return &http.Response{ StatusCode: http.StatusForbidden, diff --git a/pkg/exprhelpers/crowdsec_cti_test.go b/pkg/exprhelpers/crowdsec_cti_test.go index 84a4b245720..4c0346e447c 100644 --- a/pkg/exprhelpers/crowdsec_cti_test.go +++ b/pkg/exprhelpers/crowdsec_cti_test.go @@ -69,7 +69,7 @@ func (f RoundTripFunc) RoundTrip(req *http.Request) (*http.Response, error) { } func smokeHandler(req *http.Request) *http.Response { - apiKey := req.Header.Get("x-api-key") + apiKey := req.Header.Get("X-Api-Key") if apiKey != validApiKey { return &http.Response{ StatusCode: http.StatusForbidden, From 31ed9fb5eed446250b824831493075c509088636 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Mon, 10 Jun 2024 17:36:22 +0200 Subject: [PATCH 178/581] enable linter: revive (indent-error-flow) (#3068) * enable linter: revive (indent-error-flow) * lint --- .golangci.yml | 3 -- pkg/acquisition/modules/appsec/rx_operator.go | 4 +-- pkg/acquisition/modules/kinesis/kinesis.go | 22 ++++++------- .../loki/internal/lokiclient/loki_client.go | 25 +++++++-------- pkg/acquisition/modules/s3/s3.go | 21 ++++++------ pkg/cache/cache.go | 7 ++-- pkg/hubtest/hubtest_item.go | 3 +- pkg/leakybucket/reset_filter.go | 32 +++++++++---------- pkg/leakybucket/uniq.go | 6 ++-- pkg/types/event.go | 7 ++-- 10 files changed, 62 insertions(+), 68 deletions(-) diff --git a/.golangci.yml b/.golangci.yml index bcd1d2113f6..14abfa3e1ae 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -70,7 +70,6 @@ linters-settings: - "!**/pkg/database/*.go" - "!**/pkg/exprhelpers/*.go" - "!**/pkg/acquisition/modules/appsec/appsec.go" - - "!**/pkg/acquisition/modules/loki/internal/lokiclient/loki_client.go" - "!**/pkg/apiserver/controllers/v1/errors.go" yaml: files: @@ -147,8 +146,6 @@ linters-settings: disabled: true - name: increment-decrement disabled: true - - name: indent-error-flow - disabled: true - name: import-alias-naming disabled: true - name: import-shadowing diff --git a/pkg/acquisition/modules/appsec/rx_operator.go b/pkg/acquisition/modules/appsec/rx_operator.go index 43aaf9e94be..73060037657 100644 --- a/pkg/acquisition/modules/appsec/rx_operator.go +++ b/pkg/acquisition/modules/appsec/rx_operator.go @@ -50,9 +50,9 @@ func (o *rx) Evaluate(tx plugintypes.TransactionState, value string) bool { tx.CaptureField(i, c) } return true - } else { - return o.re.MatchString(value) } + + return o.re.MatchString(value) } // RegisterRX registers the rx operator using a WASI implementation instead of Go. diff --git a/pkg/acquisition/modules/kinesis/kinesis.go b/pkg/acquisition/modules/kinesis/kinesis.go index a86816244f6..5d3cf8f80a0 100644 --- a/pkg/acquisition/modules/kinesis/kinesis.go +++ b/pkg/acquisition/modules/kinesis/kinesis.go @@ -4,6 +4,7 @@ import ( "bytes" "compress/gzip" "encoding/json" + "errors" "fmt" "io" "strings" @@ -95,7 +96,7 @@ func (k *KinesisSource) newClient() error { } if sess == nil { - return fmt.Errorf("failed to create aws session") + return errors.New("failed to create aws session") } config := aws.NewConfig() if k.Config.AwsRegion != "" { @@ -106,7 +107,7 @@ func (k *KinesisSource) newClient() error { } k.kClient = kinesis.New(sess, config) if k.kClient == nil { - return fmt.Errorf("failed to create kinesis client") + return errors.New("failed to create kinesis client") } return nil } @@ -124,7 +125,7 @@ func (k *KinesisSource) UnmarshalConfig(yamlConfig []byte) error { err := yaml.UnmarshalStrict(yamlConfig, &k.Config) if err != nil { - return fmt.Errorf("Cannot parse kinesis datasource configuration: %w", err) + return fmt.Errorf("cannot parse kinesis datasource configuration: %w", err) } if k.Config.Mode == "" { @@ -132,16 +133,16 @@ func (k *KinesisSource) UnmarshalConfig(yamlConfig []byte) error { } if k.Config.StreamName == "" && !k.Config.UseEnhancedFanOut { - return fmt.Errorf("stream_name is mandatory when use_enhanced_fanout is false") + return errors.New("stream_name is mandatory when use_enhanced_fanout is false") } if k.Config.StreamARN == "" && k.Config.UseEnhancedFanOut { - return fmt.Errorf("stream_arn is mandatory when use_enhanced_fanout is true") + return errors.New("stream_arn is mandatory when use_enhanced_fanout is true") } if k.Config.ConsumerName == "" && k.Config.UseEnhancedFanOut { - return fmt.Errorf("consumer_name is mandatory when use_enhanced_fanout is true") + return errors.New("consumer_name is mandatory when use_enhanced_fanout is true") } if k.Config.StreamARN != "" && k.Config.StreamName != "" { - return fmt.Errorf("stream_arn and stream_name are mutually exclusive") + return errors.New("stream_arn and stream_name are mutually exclusive") } if k.Config.MaxRetries <= 0 { k.Config.MaxRetries = 10 @@ -169,7 +170,7 @@ func (k *KinesisSource) Configure(yamlConfig []byte, logger *log.Entry, MetricsL } func (k *KinesisSource) ConfigureByDSN(string, map[string]string, *log.Entry, string) error { - return fmt.Errorf("kinesis datasource does not support command-line acquisition") + return errors.New("kinesis datasource does not support command-line acquisition") } func (k *KinesisSource) GetMode() string { @@ -181,7 +182,7 @@ func (k *KinesisSource) GetName() string { } func (k *KinesisSource) OneShotAcquisition(out chan types.Event, t *tomb.Tomb) error { - return fmt.Errorf("kinesis datasource does not support one-shot acquisition") + return errors.New("kinesis datasource does not support one-shot acquisition") } func (k *KinesisSource) decodeFromSubscription(record []byte) ([]CloudwatchSubscriptionLogEvent, error) { @@ -524,9 +525,8 @@ func (k *KinesisSource) StreamingAcquisition(out chan types.Event, t *tomb.Tomb) defer trace.CatchPanic("crowdsec/acquis/kinesis/streaming") if k.Config.UseEnhancedFanOut { return k.EnhancedRead(out, t) - } else { - return k.ReadFromStream(out, t) } + return k.ReadFromStream(out, t) }) return nil } diff --git a/pkg/acquisition/modules/loki/internal/lokiclient/loki_client.go b/pkg/acquisition/modules/loki/internal/lokiclient/loki_client.go index 6437a951504..359fef5bb96 100644 --- a/pkg/acquisition/modules/loki/internal/lokiclient/loki_client.go +++ b/pkg/acquisition/modules/loki/internal/lokiclient/loki_client.go @@ -4,6 +4,7 @@ import ( "context" "encoding/base64" "encoding/json" + "errors" "fmt" "io" "net/http" @@ -13,7 +14,6 @@ import ( "github.com/crowdsecurity/crowdsec/pkg/cwversion" "github.com/gorilla/websocket" - "github.com/pkg/errors" log "github.com/sirupsen/logrus" "gopkg.in/tomb.v2" ) @@ -120,11 +120,10 @@ func (lc *LokiClient) queryRange(ctx context.Context, uri string, c chan *LokiQu resp, err := lc.Get(uri) if err != nil { if ok := lc.shouldRetry(); !ok { - return errors.Wrapf(err, "error querying range") - } else { - lc.increaseTicker(ticker) - continue + return fmt.Errorf("error querying range: %w", err) } + lc.increaseTicker(ticker) + continue } if resp.StatusCode != http.StatusOK { @@ -132,22 +131,20 @@ func (lc *LokiClient) queryRange(ctx context.Context, uri string, c chan *LokiQu body, _ := io.ReadAll(resp.Body) resp.Body.Close() if ok := lc.shouldRetry(); !ok { - return errors.Wrapf(err, "bad HTTP response code: %d: %s", resp.StatusCode, string(body)) - } else { - lc.increaseTicker(ticker) - continue + return fmt.Errorf("bad HTTP response code: %d: %s: %w", resp.StatusCode, string(body), err) } + lc.increaseTicker(ticker) + continue } var lq LokiQueryRangeResponse if err := json.NewDecoder(resp.Body).Decode(&lq); err != nil { resp.Body.Close() if ok := lc.shouldRetry(); !ok { - return errors.Wrapf(err, "error decoding Loki response") - } else { - lc.increaseTicker(ticker) - continue + return fmt.Errorf("error decoding Loki response: %w", err) } + lc.increaseTicker(ticker) + continue } resp.Body.Close() lc.Logger.Tracef("Got response: %+v", lq) @@ -261,7 +258,7 @@ func (lc *LokiClient) Tail(ctx context.Context) (chan *LokiResponse, error) { if err != nil { lc.Logger.Errorf("Error connecting to websocket, err: %s", err) - return responseChan, fmt.Errorf("error connecting to websocket") + return responseChan, errors.New("error connecting to websocket") } lc.t.Go(func() error { diff --git a/pkg/acquisition/modules/s3/s3.go b/pkg/acquisition/modules/s3/s3.go index d1bf881079a..71fcb23cbc4 100644 --- a/pkg/acquisition/modules/s3/s3.go +++ b/pkg/acquisition/modules/s3/s3.go @@ -276,7 +276,7 @@ func extractBucketAndPrefixFromEventBridge(message *string) (string, string, err if eventBody.Detail.Bucket.Name != "" { return eventBody.Detail.Bucket.Name, eventBody.Detail.Object.Key, nil } - return "", "", fmt.Errorf("invalid event body for event bridge format") + return "", "", errors.New("invalid event body for event bridge format") } func extractBucketAndPrefixFromS3Notif(message *string) (string, string, error) { @@ -286,7 +286,7 @@ func extractBucketAndPrefixFromS3Notif(message *string) (string, string, error) return "", "", err } if len(s3notifBody.Records) == 0 { - return "", "", fmt.Errorf("no records found in S3 notification") + return "", "", errors.New("no records found in S3 notification") } if !strings.HasPrefix(s3notifBody.Records[0].EventName, "ObjectCreated:") { return "", "", fmt.Errorf("event %s is not supported", s3notifBody.Records[0].EventName) @@ -295,19 +295,20 @@ func extractBucketAndPrefixFromS3Notif(message *string) (string, string, error) } func (s *S3Source) extractBucketAndPrefix(message *string) (string, string, error) { - if s.Config.SQSFormat == SQSFormatEventBridge { + switch s.Config.SQSFormat { + case SQSFormatEventBridge: bucket, key, err := extractBucketAndPrefixFromEventBridge(message) if err != nil { return "", "", err } return bucket, key, nil - } else if s.Config.SQSFormat == SQSFormatS3Notification { + case SQSFormatS3Notification: bucket, key, err := extractBucketAndPrefixFromS3Notif(message) if err != nil { return "", "", err } return bucket, key, nil - } else { + default: bucket, key, err := extractBucketAndPrefixFromEventBridge(message) if err == nil { s.Config.SQSFormat = SQSFormatEventBridge @@ -318,7 +319,7 @@ func (s *S3Source) extractBucketAndPrefix(message *string) (string, string, erro s.Config.SQSFormat = SQSFormatS3Notification return bucket, key, nil } - return "", "", fmt.Errorf("SQS message format not supported") + return "", "", errors.New("SQS message format not supported") } } @@ -496,15 +497,15 @@ func (s *S3Source) UnmarshalConfig(yamlConfig []byte) error { } if s.Config.BucketName != "" && s.Config.SQSName != "" { - return fmt.Errorf("bucket_name and sqs_name are mutually exclusive") + return errors.New("bucket_name and sqs_name are mutually exclusive") } if s.Config.PollingMethod == PollMethodSQS && s.Config.SQSName == "" { - return fmt.Errorf("sqs_name is required when using sqs polling method") + return errors.New("sqs_name is required when using sqs polling method") } if s.Config.BucketName == "" && s.Config.PollingMethod == PollMethodList { - return fmt.Errorf("bucket_name is required") + return errors.New("bucket_name is required") } if s.Config.SQSFormat != "" && s.Config.SQSFormat != SQSFormatEventBridge && s.Config.SQSFormat != SQSFormatS3Notification { @@ -567,7 +568,7 @@ func (s *S3Source) ConfigureByDSN(dsn string, labels map[string]string, logger * dsn = strings.TrimPrefix(dsn, "s3://") args := strings.Split(dsn, "?") if len(args[0]) == 0 { - return fmt.Errorf("empty s3:// DSN") + return errors.New("empty s3:// DSN") } if len(args) == 2 && len(args[1]) != 0 { diff --git a/pkg/cache/cache.go b/pkg/cache/cache.go index 5b0dcbdc251..c2153ee2342 100644 --- a/pkg/cache/cache.go +++ b/pkg/cache/cache.go @@ -111,7 +111,8 @@ func SetKey(cacheName string, key string, value string, expiration *time.Duratio func GetKey(cacheName string, key string) (string, error) { for i, name := range CacheNames { if name == cacheName { - if value, err := Caches[i].Get(key); err != nil { + value, err := Caches[i].Get(key) + if err != nil { // do not warn or log if key not found if errors.Is(err, gcache.KeyNotFoundError) { return "", nil @@ -119,9 +120,9 @@ func GetKey(cacheName string, key string) (string, error) { CacheConfig[i].Logger.Warningf("While getting key %s in cache %s: %s", key, cacheName, err) return "", err - } else { - return value.(string), nil } + + return value.(string), nil } } diff --git a/pkg/hubtest/hubtest_item.go b/pkg/hubtest/hubtest_item.go index 5346fb0be50..da4969ee8dd 100644 --- a/pkg/hubtest/hubtest_item.go +++ b/pkg/hubtest/hubtest_item.go @@ -645,7 +645,6 @@ func (t *HubTestItem) Run() error { return t.RunWithLogFile() } else if t.Config.NucleiTemplate != "" { return t.RunWithNucleiTemplate() - } else { - return fmt.Errorf("log file or nuclei template must be set in '%s'", t.Name) } + return fmt.Errorf("log file or nuclei template must be set in '%s'", t.Name) } diff --git a/pkg/leakybucket/reset_filter.go b/pkg/leakybucket/reset_filter.go index 5884bf4a10c..6e61510fcc7 100644 --- a/pkg/leakybucket/reset_filter.go +++ b/pkg/leakybucket/reset_filter.go @@ -82,22 +82,22 @@ func (u *CancelOnFilter) OnBucketInit(bucketFactory *BucketFactory) error { cancelExprCacheLock.Unlock() u.CancelOnFilter = compiled.CancelOnFilter return nil - } else { - cancelExprCacheLock.Unlock() - //release the lock during compile + } - compiledExpr.CancelOnFilter, err = expr.Compile(bucketFactory.CancelOnFilter, exprhelpers.GetExprOptions(map[string]interface{}{"evt": &types.Event{}})...) - if err != nil { - bucketFactory.logger.Errorf("reset_filter compile error : %s", err) - return err - } - u.CancelOnFilter = compiledExpr.CancelOnFilter - if bucketFactory.Debug { - u.Debug = true - } - cancelExprCacheLock.Lock() - cancelExprCache[bucketFactory.CancelOnFilter] = compiledExpr - cancelExprCacheLock.Unlock() + cancelExprCacheLock.Unlock() + //release the lock during compile + + compiledExpr.CancelOnFilter, err = expr.Compile(bucketFactory.CancelOnFilter, exprhelpers.GetExprOptions(map[string]interface{}{"evt": &types.Event{}})...) + if err != nil { + bucketFactory.logger.Errorf("reset_filter compile error : %s", err) + return err } - return err + u.CancelOnFilter = compiledExpr.CancelOnFilter + if bucketFactory.Debug { + u.Debug = true + } + cancelExprCacheLock.Lock() + cancelExprCache[bucketFactory.CancelOnFilter] = compiledExpr + cancelExprCacheLock.Unlock() + return nil } diff --git a/pkg/leakybucket/uniq.go b/pkg/leakybucket/uniq.go index 06d1e154a6f..197246c91a0 100644 --- a/pkg/leakybucket/uniq.go +++ b/pkg/leakybucket/uniq.go @@ -39,11 +39,9 @@ func (u *Uniq) OnBucketPour(bucketFactory *BucketFactory) func(types.Event, *Lea leaky.logger.Debugf("Uniq(%s) : ok", element) u.KeyCache[element] = true return &msg - - } else { - leaky.logger.Debugf("Uniq(%s) : ko, discard event", element) - return nil } + leaky.logger.Debugf("Uniq(%s) : ko, discard event", element) + return nil } } diff --git a/pkg/types/event.go b/pkg/types/event.go index c7b19fe3ca4..90464aadf2c 100644 --- a/pkg/types/event.go +++ b/pkg/types/event.go @@ -63,11 +63,12 @@ func (e *Event) SetParsed(key string, value string) bool { } func (e *Event) GetType() string { - if e.Type == OVFLW { + switch e.Type { + case OVFLW: return "overflow" - } else if e.Type == LOG { + case LOG: return "log" - } else { + default: log.Warningf("unknown event type for %+v", e) return "unknown" } From ae58b158a5306760f18de337564e65f7f80686ba Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Tue, 11 Jun 2024 09:26:50 +0200 Subject: [PATCH 179/581] enable linter: revive (var-declaration) (#3069) --- .golangci.yml | 2 -- pkg/acquisition/modules/s3/s3.go | 2 +- pkg/appsec/appsec_rule/modsecurity.go | 8 ++++---- pkg/appsec/coraza_logger.go | 2 +- pkg/appsec/loader.go | 2 +- pkg/exprhelpers/crowdsec_cti.go | 2 +- pkg/parser/parsing_test.go | 2 +- pkg/types/getfstype.go | 2 +- 8 files changed, 10 insertions(+), 12 deletions(-) diff --git a/.golangci.yml b/.golangci.yml index 14abfa3e1ae..c72cb70bf42 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -170,8 +170,6 @@ linters-settings: disabled: true - name: var-naming disabled: true - - name: var-declaration - disabled: true - name: unchecked-type-assertion disabled: true - name: exported diff --git a/pkg/acquisition/modules/s3/s3.go b/pkg/acquisition/modules/s3/s3.go index 71fcb23cbc4..9ef4d2ba757 100644 --- a/pkg/acquisition/modules/s3/s3.go +++ b/pkg/acquisition/modules/s3/s3.go @@ -204,7 +204,7 @@ func (s *S3Source) getBucketContent() ([]*s3.Object, error) { logger := s.logger.WithField("method", "getBucketContent") logger.Debugf("Getting bucket content for %s", s.Config.BucketName) bucketObjects := make([]*s3.Object, 0) - var continuationToken *string = nil + var continuationToken *string for { out, err := s.s3Client.ListObjectsV2WithContext(s.ctx, &s3.ListObjectsV2Input{ Bucket: aws.String(s.Config.BucketName), diff --git a/pkg/appsec/appsec_rule/modsecurity.go b/pkg/appsec/appsec_rule/modsecurity.go index 8f58a9589ca..135ba525e8e 100644 --- a/pkg/appsec/appsec_rule/modsecurity.go +++ b/pkg/appsec/appsec_rule/modsecurity.go @@ -11,7 +11,7 @@ type ModsecurityRule struct { ids []uint32 } -var zonesMap map[string]string = map[string]string{ +var zonesMap = map[string]string{ "ARGS": "ARGS_GET", "ARGS_NAMES": "ARGS_GET_NAMES", "BODY_ARGS": "ARGS_POST", @@ -31,7 +31,7 @@ var zonesMap map[string]string = map[string]string{ "FILENAMES": "FILES", } -var transformMap map[string]string = map[string]string{ +var transformMap = map[string]string{ "lowercase": "t:lowercase", "uppercase": "t:uppercase", "b64decode": "t:base64Decode", @@ -45,7 +45,7 @@ var transformMap map[string]string = map[string]string{ "html_entity_decode": "t:htmlEntityDecode", } -var matchMap map[string]string = map[string]string{ +var matchMap = map[string]string{ "regex": "@rx", "equals": "@streq", "startsWith": "@beginsWith", @@ -60,7 +60,7 @@ var matchMap map[string]string = map[string]string{ "eq": "@eq", } -var bodyTypeMatch map[string]string = map[string]string{ +var bodyTypeMatch = map[string]string{ "json": "JSON", "xml": "XML", "multipart": "MULTIPART", diff --git a/pkg/appsec/coraza_logger.go b/pkg/appsec/coraza_logger.go index 2b7f85d4e46..d2c1612cbd7 100644 --- a/pkg/appsec/coraza_logger.go +++ b/pkg/appsec/coraza_logger.go @@ -9,7 +9,7 @@ import ( dbg "github.com/crowdsecurity/coraza/v3/debuglog" ) -var DebugRules map[int]bool = map[int]bool{} +var DebugRules = map[int]bool{} func SetRuleDebug(id int, debug bool) { DebugRules[id] = debug diff --git a/pkg/appsec/loader.go b/pkg/appsec/loader.go index 86c1dc0a80e..82b8d440771 100644 --- a/pkg/appsec/loader.go +++ b/pkg/appsec/loader.go @@ -8,7 +8,7 @@ import ( "gopkg.in/yaml.v2" ) -var appsecRules map[string]AppsecCollectionConfig = make(map[string]AppsecCollectionConfig) //FIXME: would probably be better to have a struct for this +var appsecRules = make(map[string]AppsecCollectionConfig) //FIXME: would probably be better to have a struct for this var hub *cwhub.Hub //FIXME: this is a temporary hack to make the hub available in the package diff --git a/pkg/exprhelpers/crowdsec_cti.go b/pkg/exprhelpers/crowdsec_cti.go index 268979ee243..a640ea79f48 100644 --- a/pkg/exprhelpers/crowdsec_cti.go +++ b/pkg/exprhelpers/crowdsec_cti.go @@ -20,7 +20,7 @@ var CTIApiEnabled = false // when hitting quotas or auth errors, we temporarily disable the API var CTIBackOffUntil time.Time -var CTIBackOffDuration time.Duration = 5 * time.Minute +var CTIBackOffDuration = 5 * time.Minute var ctiClient *cticlient.CrowdsecCTIClient diff --git a/pkg/parser/parsing_test.go b/pkg/parser/parsing_test.go index f142e1bc29a..534b433a1b9 100644 --- a/pkg/parser/parsing_test.go +++ b/pkg/parser/parsing_test.go @@ -24,7 +24,7 @@ type TestFile struct { Results []types.Event `yaml:"results,omitempty"` } -var debug bool = false +var debug = false func TestParser(t *testing.T) { debug = true diff --git a/pkg/types/getfstype.go b/pkg/types/getfstype.go index aac12c7fc94..c16eea5cf98 100644 --- a/pkg/types/getfstype.go +++ b/pkg/types/getfstype.go @@ -11,7 +11,7 @@ import ( // Generated with `man statfs | grep _MAGIC | awk '{split(tolower($1),a,"_"); print $2 ": \"" a[1] "\","}'` // ext2/3/4 duplicates removed to just have ext4 // XIAFS removed as well -var fsTypeMapping map[int64]string = map[int64]string{ +var fsTypeMapping = map[int64]string{ 0xadf5: "adfs", 0xadff: "affs", 0x5346414f: "afs", From c39c9cf8b76d5e34548f2ae25ba51844f6ef9aa1 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Tue, 11 Jun 2024 09:28:10 +0200 Subject: [PATCH 180/581] cscli: fix deprecation message for "context delete" (#3078) missing newline, proper return code, same msg as dashboard, test --- cmd/crowdsec-cli/lapi.go | 3 +-- test/bats/09_context.bats | 5 +++++ 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/cmd/crowdsec-cli/lapi.go b/cmd/crowdsec-cli/lapi.go index c2c92140fd6..d2955230de9 100644 --- a/cmd/crowdsec-cli/lapi.go +++ b/cmd/crowdsec-cli/lapi.go @@ -464,9 +464,8 @@ func (cli *cliLapi) newContextDeleteCmd() *cobra.Command { if filePath == "" { filePath = "the context file" } - fmt.Printf("Command 'delete' is deprecated, please manually edit %s.", filePath) - return nil + return fmt.Errorf("command 'delete' has been removed, please manually edit %s", filePath) }, } diff --git a/test/bats/09_context.bats b/test/bats/09_context.bats index ba295451070..71aabc68d29 100644 --- a/test/bats/09_context.bats +++ b/test/bats/09_context.bats @@ -65,6 +65,11 @@ teardown() { assert_stderr --partial "while checking console_context_path: stat $CONTEXT_YAML: no such file or directory" } +@test "csli lapi context delete" { + rune -1 cscli lapi context delete + assert_stderr --partial "command 'delete' has been removed, please manually edit the context file" +} + @test "context file is bad" { echo "bad yaml" > "$CONTEXT_YAML" rune -1 "$CROWDSEC" -t From 1d08edc200c388a1741fad51f1b567e0ecfa7cad Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Tue, 11 Jun 2024 09:32:20 +0200 Subject: [PATCH 181/581] enable linter: revive (deep-exit) (#3077) --- .golangci.yml | 54 +++++++++++++++++++++++++++++++++++++++++++++++++-- 1 file changed, 52 insertions(+), 2 deletions(-) diff --git a/.golangci.yml b/.golangci.yml index c72cb70bf42..925005a5b05 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -126,8 +126,6 @@ linters-settings: - name: cyclomatic # lower this after refactoring arguments: [45] - - name: deep-exit - disabled: true - name: defer disabled: true - name: empty-block @@ -495,3 +493,55 @@ issues: - linters: - canonicalheader path: pkg/apiserver/middlewares/v1/tls_auth.go + + # tolerate deep exit in tests, for now + - linters: + - revive + path: "pkg/(.+)_test.go" + text: "deep-exit: .*" + + # tolerate deep exit in cobra's OnInitialize, for now + - linters: + - revive + path: "cmd/crowdsec-cli/main.go" + text: "deep-exit: .*" + + - linters: + - revive + path: "cmd/crowdsec-cli/item_metrics.go" + text: "deep-exit: .*" + + - linters: + - revive + path: "cmd/crowdsec-cli/machines.go" + text: "deep-exit: .*" + + - linters: + - revive + path: "cmd/crowdsec-cli/utils.go" + text: "deep-exit: .*" + + - linters: + - revive + path: "cmd/crowdsec-cli/hubtest.go" + text: "deep-exit: .*" + + - linters: + - revive + path: "pkg/leakybucket/overflows.go" + text: "deep-exit: .*" + + - linters: + - revive + path: "cmd/crowdsec/crowdsec.go" + text: "deep-exit: .*" + + - linters: + - revive + path: "cmd/crowdsec/api.go" + text: "deep-exit: .*" + + - linters: + - revive + path: "cmd/crowdsec/win_service.go" + text: "deep-exit: .*" From 24687e982a5c38dd91688057c09d4a0e1be6b0af Mon Sep 17 00:00:00 2001 From: Laurence Jones Date: Tue, 11 Jun 2024 09:34:37 +0100 Subject: [PATCH 182/581] enhancement: add deprecation notice to cscli dashboard prerun (#3079) --- cmd/crowdsec-cli/dashboard.go | 2 ++ 1 file changed, 2 insertions(+) diff --git a/cmd/crowdsec-cli/dashboard.go b/cmd/crowdsec-cli/dashboard.go index 59b9e67cd94..beff06d478a 100644 --- a/cmd/crowdsec-cli/dashboard.go +++ b/cmd/crowdsec-cli/dashboard.go @@ -99,6 +99,8 @@ cscli dashboard remove } } + log.Warn("cscli dashboard will be deprecated in version 1.7.0, read more at https://docs.crowdsec.net/blog/cscli_dashboard_deprecation/") + return nil }, } From bd4540b1bf5bfc086fb69ec765605ad066300bc4 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Tue, 11 Jun 2024 12:13:18 +0200 Subject: [PATCH 183/581] refactor context (cscli, pkg/database) (#3071) * cscli: helper require.DBClient() * refactor pkg/database: explicit context to dbclient constructor * lint --- cmd/crowdsec-cli/alerts.go | 15 +++++++++++---- cmd/crowdsec-cli/bouncers.go | 6 +++--- cmd/crowdsec-cli/machines.go | 6 +++--- cmd/crowdsec-cli/papi.go | 13 ++++++------- cmd/crowdsec-cli/require/require.go | 10 ++++++++++ cmd/crowdsec-cli/support.go | 4 ++-- cmd/crowdsec/run_in_svc.go | 6 ++++-- cmd/crowdsec/run_in_svc_windows.go | 5 ++++- cmd/crowdsec/serve.go | 5 ++++- pkg/apiserver/apic_test.go | 22 ++++++++++++++-------- pkg/apiserver/apiserver.go | 6 ++++-- pkg/apiserver/apiserver_test.go | 21 ++++++++++++++------- pkg/database/alerts.go | 2 +- pkg/database/database.go | 8 ++++---- pkg/exprhelpers/exprlib_test.go | 17 +++++++++++++---- 15 files changed, 97 insertions(+), 49 deletions(-) diff --git a/cmd/crowdsec-cli/alerts.go b/cmd/crowdsec-cli/alerts.go index e1b070ab0fc..7c9c5f23032 100644 --- a/cmd/crowdsec-cli/alerts.go +++ b/cmd/crowdsec-cli/alerts.go @@ -24,7 +24,6 @@ import ( "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/require" "github.com/crowdsecurity/crowdsec/pkg/apiclient" "github.com/crowdsecurity/crowdsec/pkg/cwversion" - "github.com/crowdsecurity/crowdsec/pkg/database" "github.com/crowdsecurity/crowdsec/pkg/models" "github.com/crowdsecurity/crowdsec/pkg/types" ) @@ -378,6 +377,7 @@ func (cli *cliAlerts) delete(alertDeleteFilter apiclient.AlertsDeleteOpts, Activ alertDeleteFilter.ScopeEquals, alertDeleteFilter.ValueEquals); err != nil { return err } + if ActiveDecision != nil { alertDeleteFilter.ActiveDecisionEquals = ActiveDecision } @@ -385,21 +385,27 @@ func (cli *cliAlerts) delete(alertDeleteFilter apiclient.AlertsDeleteOpts, Activ if *alertDeleteFilter.ScopeEquals == "" { alertDeleteFilter.ScopeEquals = nil } + if *alertDeleteFilter.ValueEquals == "" { alertDeleteFilter.ValueEquals = nil } + if *alertDeleteFilter.ScenarioEquals == "" { alertDeleteFilter.ScenarioEquals = nil } + if *alertDeleteFilter.IPEquals == "" { alertDeleteFilter.IPEquals = nil } + if *alertDeleteFilter.RangeEquals == "" { alertDeleteFilter.RangeEquals = nil } + if contained != nil && *contained { alertDeleteFilter.Contains = new(bool) } + limit := 0 alertDeleteFilter.Limit = &limit } else { @@ -419,6 +425,7 @@ func (cli *cliAlerts) delete(alertDeleteFilter apiclient.AlertsDeleteOpts, Activ return fmt.Errorf("unable to delete alert: %w", err) } } + log.Infof("%s alert(s) deleted", alerts.NbDeleted) return nil @@ -558,14 +565,14 @@ func (cli *cliAlerts) NewFlushCmd() *cobra.Command { /!\ This command can be used only on the same machine than the local API`, Example: `cscli alerts flush --max-items 1000 --max-age 7d`, DisableAutoGenTag: true, - RunE: func(_ *cobra.Command, _ []string) error { + RunE: func(cmd *cobra.Command, _ []string) error { cfg := cli.cfg() if err := require.LAPI(cfg); err != nil { return err } - db, err := database.NewClient(cfg.DbConfig) + db, err := require.DBClient(cmd.Context(), cfg.DbConfig) if err != nil { - return fmt.Errorf("unable to create new database client: %w", err) + return err } log.Info("Flushing alerts. !! This may take a long time !!") err = db.FlushAlerts(maxAge, maxItems) diff --git a/cmd/crowdsec-cli/bouncers.go b/cmd/crowdsec-cli/bouncers.go index 2e0adb9b95f..f8628538378 100644 --- a/cmd/crowdsec-cli/bouncers.go +++ b/cmd/crowdsec-cli/bouncers.go @@ -57,7 +57,7 @@ Note: This command requires database direct access, so is intended to be run on Args: cobra.MinimumNArgs(1), Aliases: []string{"bouncer"}, DisableAutoGenTag: true, - PersistentPreRunE: func(_ *cobra.Command, _ []string) error { + PersistentPreRunE: func(cmd *cobra.Command, _ []string) error { var err error cfg := cli.cfg() @@ -66,9 +66,9 @@ Note: This command requires database direct access, so is intended to be run on return err } - cli.db, err = database.NewClient(cfg.DbConfig) + cli.db, err = require.DBClient(cmd.Context(), cfg.DbConfig) if err != nil { - return fmt.Errorf("can't connect to the database: %w", err) + return err } return nil diff --git a/cmd/crowdsec-cli/machines.go b/cmd/crowdsec-cli/machines.go index 1457fb5a0cc..7beaa5c7fdd 100644 --- a/cmd/crowdsec-cli/machines.go +++ b/cmd/crowdsec-cli/machines.go @@ -128,14 +128,14 @@ Note: This command requires database direct access, so is intended to be run on Example: `cscli machines [action]`, DisableAutoGenTag: true, Aliases: []string{"machine"}, - PersistentPreRunE: func(_ *cobra.Command, _ []string) error { + PersistentPreRunE: func(cmd *cobra.Command, _ []string) error { var err error if err = require.LAPI(cli.cfg()); err != nil { return err } - cli.db, err = database.NewClient(cli.cfg().DbConfig) + cli.db, err = require.DBClient(cmd.Context(), cli.cfg().DbConfig) if err != nil { - return fmt.Errorf("unable to create new database client: %w", err) + return err } return nil diff --git a/cmd/crowdsec-cli/papi.go b/cmd/crowdsec-cli/papi.go index b8ed0fd7356..a2fa0a90871 100644 --- a/cmd/crowdsec-cli/papi.go +++ b/cmd/crowdsec-cli/papi.go @@ -12,7 +12,6 @@ import ( "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/require" "github.com/crowdsecurity/crowdsec/pkg/apiserver" - "github.com/crowdsecurity/crowdsec/pkg/database" ) type cliPapi struct { @@ -56,12 +55,12 @@ func (cli *cliPapi) NewStatusCmd() *cobra.Command { Short: "Get status of the Polling API", Args: cobra.MinimumNArgs(0), DisableAutoGenTag: true, - RunE: func(_ *cobra.Command, _ []string) error { + RunE: func(cmd *cobra.Command, _ []string) error { var err error cfg := cli.cfg() - db, err := database.NewClient(cfg.DbConfig) + db, err := require.DBClient(cmd.Context(), cfg.DbConfig) if err != nil { - return fmt.Errorf("unable to initialize database client: %w", err) + return err } apic, err := apiserver.NewAPIC(cfg.API.Server.OnlineClient, db, cfg.API.Server.ConsoleConfig, cfg.API.Server.CapiWhitelists) @@ -105,14 +104,14 @@ func (cli *cliPapi) NewSyncCmd() *cobra.Command { Short: "Sync with the Polling API, pulling all non-expired orders for the instance", Args: cobra.MinimumNArgs(0), DisableAutoGenTag: true, - RunE: func(_ *cobra.Command, _ []string) error { + RunE: func(cmd *cobra.Command, _ []string) error { var err error cfg := cli.cfg() t := tomb.Tomb{} - db, err := database.NewClient(cfg.DbConfig) + db, err := require.DBClient(cmd.Context(), cfg.DbConfig) if err != nil { - return fmt.Errorf("unable to initialize database client: %w", err) + return err } apic, err := apiserver.NewAPIC(cfg.API.Server.OnlineClient, db, cfg.API.Server.ConsoleConfig, cfg.API.Server.CapiWhitelists) diff --git a/cmd/crowdsec-cli/require/require.go b/cmd/crowdsec-cli/require/require.go index 3ff66254466..15d8bce682d 100644 --- a/cmd/crowdsec-cli/require/require.go +++ b/cmd/crowdsec-cli/require/require.go @@ -10,6 +10,7 @@ import ( "github.com/crowdsecurity/crowdsec/pkg/csconfig" "github.com/crowdsecurity/crowdsec/pkg/cwhub" + "github.com/crowdsecurity/crowdsec/pkg/database" ) func LAPI(c *csconfig.Config) error { @@ -48,6 +49,15 @@ func CAPIRegistered(c *csconfig.Config) error { return nil } +func DBClient(ctx context.Context, dbcfg *csconfig.DatabaseCfg) (*database.Client, error) { + db, err := database.NewClient(ctx, dbcfg) + if err != nil { + return nil, fmt.Errorf("failed to connect to database: %w", err) + } + + return db, nil +} + func DB(c *csconfig.Config) error { if err := c.LoadDBConfig(true); err != nil { return fmt.Errorf("this command requires direct database access (must be run on the local API machine): %w", err) diff --git a/cmd/crowdsec-cli/support.go b/cmd/crowdsec-cli/support.go index 41e23dc9654..3b0f53cd6e1 100644 --- a/cmd/crowdsec-cli/support.go +++ b/cmd/crowdsec-cli/support.go @@ -463,9 +463,9 @@ func (cli *cliSupport) dump(ctx context.Context, outFile string) error { w := bytes.NewBuffer(nil) zipWriter := zip.NewWriter(w) - db, err := database.NewClient(cfg.DbConfig) + db, err := require.DBClient(ctx, cfg.DbConfig) if err != nil { - log.Warnf("Could not connect to database: %s", err) + log.Warn(err) } if err = cfg.LoadAPIServer(true); err != nil { diff --git a/cmd/crowdsec/run_in_svc.go b/cmd/crowdsec/run_in_svc.go index 58f4cdf005d..288b565e890 100644 --- a/cmd/crowdsec/run_in_svc.go +++ b/cmd/crowdsec/run_in_svc.go @@ -3,6 +3,7 @@ package main import ( + "context" "fmt" "runtime/pprof" @@ -41,9 +42,10 @@ func StartRunSvc() error { var err error - if cConfig.DbConfig != nil { - dbClient, err = database.NewClient(cConfig.DbConfig) + ctx := context.TODO() + if cConfig.DbConfig != nil { + dbClient, err = database.NewClient(ctx, cConfig.DbConfig) if err != nil { return fmt.Errorf("unable to create database client: %w", err) } diff --git a/cmd/crowdsec/run_in_svc_windows.go b/cmd/crowdsec/run_in_svc_windows.go index c0aa18d7fc6..a2a2dd8c47a 100644 --- a/cmd/crowdsec/run_in_svc_windows.go +++ b/cmd/crowdsec/run_in_svc_windows.go @@ -1,6 +1,7 @@ package main import ( + "context" "fmt" "runtime/pprof" @@ -80,8 +81,10 @@ func WindowsRun() error { var dbClient *database.Client var err error + ctx := context.TODO() + if cConfig.DbConfig != nil { - dbClient, err = database.NewClient(cConfig.DbConfig) + dbClient, err = database.NewClient(ctx, cConfig.DbConfig) if err != nil { return fmt.Errorf("unable to create database client: %w", err) diff --git a/cmd/crowdsec/serve.go b/cmd/crowdsec/serve.go index 6c15b2d347e..5fb7b86f181 100644 --- a/cmd/crowdsec/serve.go +++ b/cmd/crowdsec/serve.go @@ -1,6 +1,7 @@ package main import ( + "context" "fmt" "os" "os/signal" @@ -322,8 +323,10 @@ func Serve(cConfig *csconfig.Config, agentReady chan bool) error { crowdsecTomb = tomb.Tomb{} pluginTomb = tomb.Tomb{} + ctx := context.TODO() + if cConfig.API.Server != nil && cConfig.API.Server.DbConfig != nil { - dbClient, err := database.NewClient(cConfig.API.Server.DbConfig) + dbClient, err := database.NewClient(ctx, cConfig.API.Server.DbConfig) if err != nil { return fmt.Errorf("failed to get database client: %w", err) } diff --git a/pkg/apiserver/apic_test.go b/pkg/apiserver/apic_test.go index c5a39455ff9..10f4cf9444b 100644 --- a/pkg/apiserver/apic_test.go +++ b/pkg/apiserver/apic_test.go @@ -38,9 +38,11 @@ import ( func getDBClient(t *testing.T) *database.Client { t.Helper() + ctx := context.Background() + dbPath, err := os.CreateTemp("", "*sqlite") require.NoError(t, err) - dbClient, err := database.NewClient(&csconfig.DatabaseCfg{ + dbClient, err := database.NewClient(ctx, &csconfig.DatabaseCfg{ Type: "sqlite", DbName: "crowdsec", DbPath: dbPath.Name(), @@ -56,7 +58,7 @@ func getAPIC(t *testing.T) *apic { return &apic{ AlertsAddChan: make(chan []*models.Alert), - //DecisionDeleteChan: make(chan []*models.Decision), + // DecisionDeleteChan: make(chan []*models.Decision), dbClient: dbClient, mu: sync.Mutex{}, startup: true, @@ -176,10 +178,11 @@ func TestAPICFetchScenariosListFromDB(t *testing.T) { } scenarios, err := api.FetchScenariosListFromDB() + require.NoError(t, err) + for machineID := range tc.machineIDsWithScenarios { api.dbClient.Ent.Machine.Delete().Where(machine.MachineIdEQ(machineID)).ExecX(context.Background()) } - require.NoError(t, err) assert.ElementsMatch(t, tc.expectedScenarios, scenarios) }) @@ -234,6 +237,7 @@ func TestNewAPIC(t *testing.T) { t.Run(tc.name, func(t *testing.T) { setConfig() httpmock.Activate() + defer httpmock.DeactivateAndReset() httpmock.RegisterResponder("POST", "http://foobar/v3/watchers/login", httpmock.NewBytesResponder( 200, jsonMarshalX( @@ -353,6 +357,7 @@ func TestAPICGetMetrics(t *testing.T) { t.Run(tc.name, func(t *testing.T) { apiClient := getAPIC(t) cleanUp(apiClient) + for i, machineID := range tc.machineIDs { apiClient.dbClient.Ent.Machine.Create(). SetMachineId(machineID). @@ -548,7 +553,7 @@ func TestFillAlertsWithDecisions(t *testing.T) { func TestAPICWhitelists(t *testing.T) { api := getAPIC(t) - //one whitelist on IP, one on CIDR + // one whitelist on IP, one on CIDR api.whitelists = &csconfig.CapiWhitelist{} api.whitelists.Ips = append(api.whitelists.Ips, net.ParseIP("9.2.3.4"), net.ParseIP("7.2.3.4")) @@ -593,7 +598,7 @@ func TestAPICWhitelists(t *testing.T) { Scope: ptr.Of("Ip"), Decisions: []*modelscapi.GetDecisionsStreamResponseNewItemDecisionsItems0{ { - Value: ptr.Of("13.2.3.4"), //wl by cidr + Value: ptr.Of("13.2.3.4"), // wl by cidr Duration: ptr.Of("24h"), }, }, @@ -614,7 +619,7 @@ func TestAPICWhitelists(t *testing.T) { Scope: ptr.Of("Ip"), Decisions: []*modelscapi.GetDecisionsStreamResponseNewItemDecisionsItems0{ { - Value: ptr.Of("13.2.3.5"), //wl by cidr + Value: ptr.Of("13.2.3.5"), // wl by cidr Duration: ptr.Of("24h"), }, }, @@ -634,7 +639,7 @@ func TestAPICWhitelists(t *testing.T) { Scope: ptr.Of("Ip"), Decisions: []*modelscapi.GetDecisionsStreamResponseNewItemDecisionsItems0{ { - Value: ptr.Of("9.2.3.4"), //wl by ip + Value: ptr.Of("9.2.3.4"), // wl by ip Duration: ptr.Of("24h"), }, }, @@ -685,7 +690,7 @@ func TestAPICWhitelists(t *testing.T) { err = api.PullTop(false) require.NoError(t, err) - assertTotalDecisionCount(t, api.dbClient, 5) //2 from FIRE + 2 from bl + 1 existing + assertTotalDecisionCount(t, api.dbClient, 5) // 2 from FIRE + 2 from bl + 1 existing assertTotalValidDecisionCount(t, api.dbClient, 4) assertTotalAlertCount(t, api.dbClient, 3) // 2 for list sub , 1 for community list. alerts := api.dbClient.Ent.Alert.Query().AllX(context.Background()) @@ -1103,6 +1108,7 @@ func TestAPICPush(t *testing.T) { httpmock.Activate() defer httpmock.DeactivateAndReset() + apic, err := apiclient.NewDefaultClient( url, "/api", diff --git a/pkg/apiserver/apiserver.go b/pkg/apiserver/apiserver.go index 056f74a7b31..c6074801d7e 100644 --- a/pkg/apiserver/apiserver.go +++ b/pkg/apiserver/apiserver.go @@ -162,7 +162,9 @@ func newGinLogger(config *csconfig.LocalApiServerCfg) (*log.Logger, string, erro func NewServer(config *csconfig.LocalApiServerCfg) (*APIServer, error) { var flushScheduler *gocron.Scheduler - dbClient, err := database.NewClient(config.DbConfig) + ctx := context.TODO() + + dbClient, err := database.NewClient(ctx, config.DbConfig) if err != nil { return nil, fmt.Errorf("unable to init database client: %w", err) } @@ -227,7 +229,7 @@ func NewServer(config *csconfig.LocalApiServerCfg) (*APIServer, error) { controller := &controllers.Controller{ DBClient: dbClient, - Ectx: context.Background(), + Ectx: ctx, Router: router, Profiles: config.Profiles, Log: clog, diff --git a/pkg/apiserver/apiserver_test.go b/pkg/apiserver/apiserver_test.go index b7f6be5fe36..20c48337833 100644 --- a/pkg/apiserver/apiserver_test.go +++ b/pkg/apiserver/apiserver_test.go @@ -1,6 +1,7 @@ package apiserver import ( + "context" "encoding/json" "fmt" "net/http" @@ -161,7 +162,9 @@ func NewAPITestForwardedFor(t *testing.T) (*gin.Engine, csconfig.Config) { } func ValidateMachine(t *testing.T, machineID string, config *csconfig.DatabaseCfg) { - dbClient, err := database.NewClient(config) + ctx := context.Background() + + dbClient, err := database.NewClient(ctx, config) require.NoError(t, err) err = dbClient.ValidateMachine(machineID) @@ -169,7 +172,9 @@ func ValidateMachine(t *testing.T, machineID string, config *csconfig.DatabaseCf } func GetMachineIP(t *testing.T, machineID string, config *csconfig.DatabaseCfg) string { - dbClient, err := database.NewClient(config) + ctx := context.Background() + + dbClient, err := database.NewClient(ctx, config) require.NoError(t, err) machines, err := dbClient.ListMachines() @@ -260,7 +265,9 @@ func CreateTestMachine(t *testing.T, router *gin.Engine) string { } func CreateTestBouncer(t *testing.T, config *csconfig.DatabaseCfg) string { - dbClient, err := database.NewClient(config) + ctx := context.Background() + + dbClient, err := database.NewClient(ctx, config) require.NoError(t, err) apiKey, err := middlewares.GenerateAPIKey(keyLength) @@ -356,10 +363,10 @@ func TestLoggingDebugToFileConfig(t *testing.T) { req.Header.Set("User-Agent", UserAgent) api.router.ServeHTTP(w, req) assert.Equal(t, 404, w.Code) - //wait for the request to happen + // wait for the request to happen time.Sleep(500 * time.Millisecond) - //check file content + // check file content data, err := os.ReadFile(expectedFile) require.NoError(t, err) @@ -406,10 +413,10 @@ func TestLoggingErrorToFileConfig(t *testing.T) { req.Header.Set("User-Agent", UserAgent) api.router.ServeHTTP(w, req) assert.Equal(t, http.StatusNotFound, w.Code) - //wait for the request to happen + // wait for the request to happen time.Sleep(500 * time.Millisecond) - //check file content + // check file content x, err := os.ReadFile(expectedFile) if err == nil { require.Empty(t, x) diff --git a/pkg/database/alerts.go b/pkg/database/alerts.go index d9efe4c254b..3563adba68c 100644 --- a/pkg/database/alerts.go +++ b/pkg/database/alerts.go @@ -947,7 +947,7 @@ func (c *Client) AlertsCountPerScenario(filters map[string][]string) (map[string Count int } - ctx := context.Background() + ctx := context.TODO() query := c.Ent.Alert.Query() diff --git a/pkg/database/database.go b/pkg/database/database.go index fc89aa5cd4d..6f392c46d21 100644 --- a/pkg/database/database.go +++ b/pkg/database/database.go @@ -48,7 +48,7 @@ func getEntDriver(dbtype string, dbdialect string, dsn string, config *csconfig. return drv, nil } -func NewClient(config *csconfig.DatabaseCfg) (*Client, error) { +func NewClient(ctx context.Context, config *csconfig.DatabaseCfg) (*Client, error) { var client *ent.Client if config == nil { @@ -69,7 +69,7 @@ func NewClient(config *csconfig.DatabaseCfg) (*Client, error) { typ, dia, err := config.ConnectionDialect() if err != nil { - return nil, err //unsupported database caught here + return nil, err // unsupported database caught here } if config.Type == "sqlite" { @@ -103,13 +103,13 @@ func NewClient(config *csconfig.DatabaseCfg) (*Client, error) { client = client.Debug() } - if err = client.Schema.Create(context.Background()); err != nil { + if err = client.Schema.Create(ctx); err != nil { return nil, fmt.Errorf("failed creating schema resources: %v", err) } return &Client{ Ent: client, - CTX: context.Background(), + CTX: ctx, Log: clog, CanFlush: true, Type: config.Type, diff --git a/pkg/exprhelpers/exprlib_test.go b/pkg/exprhelpers/exprlib_test.go index 687465d9493..0f6e1a21e2f 100644 --- a/pkg/exprhelpers/exprlib_test.go +++ b/pkg/exprhelpers/exprlib_test.go @@ -29,7 +29,9 @@ func getDBClient(t *testing.T) *database.Client { dbPath, err := os.CreateTemp("", "*sqlite") require.NoError(t, err) - testDBClient, err := database.NewClient(&csconfig.DatabaseCfg{ + ctx := context.Background() + + testDBClient, err := database.NewClient(ctx, &csconfig.DatabaseCfg{ Type: "sqlite", DbName: "crowdsec", DbPath: dbPath.Name(), @@ -215,7 +217,7 @@ func TestRegexpCacheBehavior(t *testing.T) { err = FileInit(TestFolder, filename, "regex") require.NoError(t, err) - //cache with no TTL + // cache with no TTL err = RegexpCacheInit(filename, types.DataSource{Type: "regex", Size: ptr.Of(1)}) require.NoError(t, err) @@ -227,7 +229,7 @@ func TestRegexpCacheBehavior(t *testing.T) { assert.True(t, ret.(bool)) assert.Equal(t, 1, dataFileRegexCache[filename].Len(false)) - //cache with TTL + // cache with TTL ttl := 500 * time.Millisecond err = RegexpCacheInit(filename, types.DataSource{Type: "regex", Size: ptr.Of(2), TTL: &ttl}) require.NoError(t, err) @@ -994,6 +996,7 @@ func TestGetDecisionsCount(t *testing.T) { log.Printf("test '%s' : OK", test.name) } } + func TestGetDecisionsSinceCount(t *testing.T) { existingIP := "1.2.3.4" unknownIP := "1.2.3.5" @@ -1365,6 +1368,7 @@ func TestGetActiveDecisionsTimeLeft(t *testing.T) { require.NoError(t, err) output, err := expr.Run(program, test.env) require.NoError(t, err) + switch o := output.(type) { case time.Duration: require.LessOrEqual(t, int(o.Seconds()), int(test.max)) @@ -1376,7 +1380,6 @@ func TestGetActiveDecisionsTimeLeft(t *testing.T) { t.Fatalf("GetActiveDecisionsTimeLeft() should return a time.Duration or a float64") } } - } func TestParseUnixTime(t *testing.T) { @@ -1415,9 +1418,11 @@ func TestParseUnixTime(t *testing.T) { t.Run(tc.name, func(t *testing.T) { output, err := ParseUnixTime(tc.value) cstest.RequireErrorContains(t, err, tc.expectedErr) + if tc.expectedErr != "" { return } + require.WithinDuration(t, tc.expected, output.(time.Time), time.Second) }) } @@ -1520,6 +1525,7 @@ func TestIsIp(t *testing.T) { require.Error(t, err) return } + require.NoError(t, err) output, err := expr.Run(vm, map[string]interface{}{"value": tc.value}) require.NoError(t, err) @@ -1619,12 +1625,15 @@ func TestB64Decode(t *testing.T) { require.Error(t, err) return } + require.NoError(t, err) + output, err := expr.Run(vm, map[string]interface{}{"value": tc.value}) if tc.expectedRuntimeErr { require.Error(t, err) return } + require.NoError(t, err) require.Equal(t, tc.expected, output) }) From fd433a7e738dd7da0d0102bc957ed8cd86a0ab98 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Wed, 12 Jun 2024 11:07:10 +0200 Subject: [PATCH 184/581] cscli: refactor "cscli setup" to avoid global variable (#3075) * cscli refactor: extract method cliSetup.detect() * cscli refactor: extract method cliSetup.install() * cscli refactor: extract method cliSetup.dataSources() * cscli refactor: method ccliSetup.validate() * skip redundant pre-loop checks --- cmd/crowdsec-cli/setup.go | 194 ++++++++++++++++---------------------- pkg/setup/install.go | 102 +++++++++----------- 2 files changed, 126 insertions(+), 170 deletions(-) diff --git a/cmd/crowdsec-cli/setup.go b/cmd/crowdsec-cli/setup.go index 9f685d0fac1..d747af9225f 100644 --- a/cmd/crowdsec-cli/setup.go +++ b/cmd/crowdsec-cli/setup.go @@ -2,6 +2,7 @@ package main import ( "bytes" + "context" "errors" "fmt" "os" @@ -44,57 +45,85 @@ func (cli *cliSetup) NewCommand() *cobra.Command { return cmd } +type detectFlags struct { + detectConfigFile string + listSupportedServices bool + forcedUnits []string + forcedProcesses []string + forcedOSFamily string + forcedOSID string + forcedOSVersion string + skipServices []string + snubSystemd bool + outYaml bool +} + +func (f *detectFlags) bind(cmd *cobra.Command) { + defaultServiceDetect := csconfig.DefaultConfigPath("hub", "detect.yaml") + + flags := cmd.Flags() + flags.StringVar(&f.detectConfigFile, "detect-config", defaultServiceDetect, "path to service detection configuration") + flags.BoolVar(&f.listSupportedServices, "list-supported-services", false, "do not detect; only print supported services") + flags.StringSliceVar(&f.forcedUnits, "force-unit", nil, "force detection of a systemd unit (can be repeated)") + flags.StringSliceVar(&f.forcedProcesses, "force-process", nil, "force detection of a running process (can be repeated)") + flags.StringSliceVar(&f.skipServices, "skip-service", nil, "ignore a service, don't recommend hub/datasources (can be repeated)") + flags.StringVar(&f.forcedOSFamily, "force-os-family", "", "override OS.Family: one of linux, freebsd, windows or darwin") + flags.StringVar(&f.forcedOSID, "force-os-id", "", "override OS.ID=[debian | ubuntu | , redhat...]") + flags.StringVar(&f.forcedOSVersion, "force-os-version", "", "override OS.RawVersion (of OS or Linux distribution)") + flags.BoolVar(&f.snubSystemd, "snub-systemd", false, "don't use systemd, even if available") + flags.BoolVar(&f.outYaml, "yaml", false, "output yaml, not json") +} + func (cli *cliSetup) NewDetectCmd() *cobra.Command { + f := detectFlags{} + cmd := &cobra.Command{ Use: "detect", Short: "detect running services, generate a setup file", DisableAutoGenTag: true, - RunE: runSetupDetect, + RunE: func(_ *cobra.Command, args []string) error { + return cli.detect(f) + }, } - defaultServiceDetect := csconfig.DefaultConfigPath("hub", "detect.yaml") - - flags := cmd.Flags() - flags.String("detect-config", defaultServiceDetect, "path to service detection configuration") - flags.Bool("list-supported-services", false, "do not detect; only print supported services") - flags.StringSlice("force-unit", nil, "force detection of a systemd unit (can be repeated)") - flags.StringSlice("force-process", nil, "force detection of a running process (can be repeated)") - flags.StringSlice("skip-service", nil, "ignore a service, don't recommend hub/datasources (can be repeated)") - flags.String("force-os-family", "", "override OS.Family: one of linux, freebsd, windows or darwin") - flags.String("force-os-id", "", "override OS.ID=[debian | ubuntu | , redhat...]") - flags.String("force-os-version", "", "override OS.RawVersion (of OS or Linux distribution)") - flags.Bool("snub-systemd", false, "don't use systemd, even if available") - flags.Bool("yaml", false, "output yaml, not json") - + f.bind(cmd) return cmd } func (cli *cliSetup) NewInstallHubCmd() *cobra.Command { + var dryRun bool + cmd := &cobra.Command{ Use: "install-hub [setup_file] [flags]", Short: "install items from a setup file", Args: cobra.ExactArgs(1), DisableAutoGenTag: true, - RunE: runSetupInstallHub, + RunE: func(cmd *cobra.Command, args []string) error { + return cli.install(cmd.Context(), dryRun, args[0]) + }, } flags := cmd.Flags() - flags.Bool("dry-run", false, "don't install anything; print out what would have been") + flags.BoolVar(&dryRun, "dry-run", false, "don't install anything; print out what would have been") return cmd } func (cli *cliSetup) NewDataSourcesCmd() *cobra.Command { + var toDir string + cmd := &cobra.Command{ Use: "datasources [setup_file] [flags]", Short: "generate datasource (acquisition) configuration from a setup file", Args: cobra.ExactArgs(1), DisableAutoGenTag: true, - RunE: runSetupDataSources, + RunE: func(cmd *cobra.Command, args []string) error { + return cli.dataSources(args[0], toDir) + }, } flags := cmd.Flags() - flags.String("to-dir", "", "write the configuration to a directory, in multiple files") + flags.StringVar(&toDir, "to-dir", "", "write the configuration to a directory, in multiple files") return cmd } @@ -105,97 +134,50 @@ func (cli *cliSetup) NewValidateCmd() *cobra.Command { Short: "validate a setup file", Args: cobra.ExactArgs(1), DisableAutoGenTag: true, - RunE: runSetupValidate, + RunE: func(cmd *cobra.Command, args []string) error { + return cli.validate(args[0]) + }, } return cmd } -func runSetupDetect(cmd *cobra.Command, args []string) error { - flags := cmd.Flags() - - detectConfigFile, err := flags.GetString("detect-config") - if err != nil { - return err - } - - var detectReader *os.File +func (cli *cliSetup) detect(f detectFlags) error { + var ( + detectReader *os.File + err error + ) - switch detectConfigFile { + switch f.detectConfigFile { case "-": log.Tracef("Reading detection rules from stdin") detectReader = os.Stdin default: - log.Tracef("Reading detection rules: %s", detectConfigFile) + log.Tracef("Reading detection rules: %s", f.detectConfigFile) - detectReader, err = os.Open(detectConfigFile) + detectReader, err = os.Open(f.detectConfigFile) if err != nil { return err } } - listSupportedServices, err := flags.GetBool("list-supported-services") - if err != nil { - return err - } - - forcedUnits, err := flags.GetStringSlice("force-unit") - if err != nil { - return err - } - - forcedProcesses, err := flags.GetStringSlice("force-process") - if err != nil { - return err - } - - forcedOSFamily, err := flags.GetString("force-os-family") - if err != nil { - return err - } - - forcedOSID, err := flags.GetString("force-os-id") - if err != nil { - return err - } - - forcedOSVersion, err := flags.GetString("force-os-version") - if err != nil { - return err - } - - skipServices, err := flags.GetStringSlice("skip-service") - if err != nil { - return err - } - - snubSystemd, err := flags.GetBool("snub-systemd") - if err != nil { - return err - } - - if !snubSystemd { + if !f.snubSystemd { _, err := exec.LookPath("systemctl") if err != nil { log.Debug("systemctl not available: snubbing systemd") - snubSystemd = true + f.snubSystemd = true } } - outYaml, err := flags.GetBool("yaml") - if err != nil { - return err - } - - if forcedOSFamily == "" && forcedOSID != "" { + if f.forcedOSFamily == "" && f.forcedOSID != "" { log.Debug("force-os-id is set: force-os-family defaults to 'linux'") - forcedOSFamily = "linux" + f.forcedOSFamily = "linux" } - if listSupportedServices { + if f.listSupportedServices { supported, err := setup.ListSupported(detectReader) if err != nil { return err @@ -209,15 +191,15 @@ func runSetupDetect(cmd *cobra.Command, args []string) error { } opts := setup.DetectOptions{ - ForcedUnits: forcedUnits, - ForcedProcesses: forcedProcesses, + ForcedUnits: f.forcedUnits, + ForcedProcesses: f.forcedProcesses, ForcedOS: setup.ExprOS{ - Family: forcedOSFamily, - ID: forcedOSID, - RawVersion: forcedOSVersion, + Family: f.forcedOSFamily, + ID: f.forcedOSID, + RawVersion: f.forcedOSVersion, }, - SkipServices: skipServices, - SnubSystemd: snubSystemd, + SkipServices: f.skipServices, + SnubSystemd: f.snubSystemd, } hubSetup, err := setup.Detect(detectReader, opts) @@ -225,7 +207,7 @@ func runSetupDetect(cmd *cobra.Command, args []string) error { return fmt.Errorf("detecting services: %w", err) } - setup, err := setupAsString(hubSetup, outYaml) + setup, err := setupAsString(hubSetup, f.outYaml) if err != nil { return err } @@ -273,16 +255,7 @@ func setupAsString(cs setup.Setup, outYaml bool) (string, error) { return string(ret), nil } -func runSetupDataSources(cmd *cobra.Command, args []string) error { - flags := cmd.Flags() - - fromFile := args[0] - - toDir, err := flags.GetString("to-dir") - if err != nil { - return err - } - +func (cli *cliSetup) dataSources(fromFile string, toDir string) error { input, err := os.ReadFile(fromFile) if err != nil { return fmt.Errorf("while reading setup file: %w", err) @@ -300,32 +273,23 @@ func runSetupDataSources(cmd *cobra.Command, args []string) error { return nil } -func runSetupInstallHub(cmd *cobra.Command, args []string) error { - flags := cmd.Flags() - - fromFile := args[0] - - dryRun, err := flags.GetBool("dry-run") - if err != nil { - return err - } - +func (cli *cliSetup) install(ctx context.Context, dryRun bool, fromFile string) error { input, err := os.ReadFile(fromFile) if err != nil { return fmt.Errorf("while reading file %s: %w", fromFile, err) } - hub, err := require.Hub(csConfig, require.RemoteHub(cmd.Context(), csConfig), log.StandardLogger()) + cfg := cli.cfg() + + hub, err := require.Hub(cfg, require.RemoteHub(ctx, cfg), log.StandardLogger()) if err != nil { return err } - return setup.InstallHubItems(cmd.Context(), hub, input, dryRun) + return setup.InstallHubItems(ctx, hub, input, dryRun) } -func runSetupValidate(cmd *cobra.Command, args []string) error { - fromFile := args[0] - +func (cli *cliSetup) validate(fromFile string) error { input, err := os.ReadFile(fromFile) if err != nil { return fmt.Errorf("while reading stdin: %w", err) diff --git a/pkg/setup/install.go b/pkg/setup/install.go index 09ac15971f8..fc5bd380fd9 100644 --- a/pkg/setup/install.go +++ b/pkg/setup/install.go @@ -62,79 +62,71 @@ func InstallHubItems(ctx context.Context, hub *cwhub.Hub, input []byte, dryRun b continue } - if len(install.Collections) > 0 { - for _, collection := range setupItem.Install.Collections { - item := hub.GetItem(cwhub.COLLECTIONS, collection) - if item == nil { - return fmt.Errorf("collection %s not found", collection) - } - - if dryRun { - fmt.Println("dry-run: would install collection", collection) - - continue - } - - if err := item.Install(ctx, forceAction, downloadOnly); err != nil { - return fmt.Errorf("while installing collection %s: %w", item.Name, err) - } + for _, collection := range setupItem.Install.Collections { + item := hub.GetItem(cwhub.COLLECTIONS, collection) + if item == nil { + return fmt.Errorf("collection %s not found", collection) + } + + if dryRun { + fmt.Println("dry-run: would install collection", collection) + + continue + } + + if err := item.Install(ctx, forceAction, downloadOnly); err != nil { + return fmt.Errorf("while installing collection %s: %w", item.Name, err) } } - if len(install.Parsers) > 0 { - for _, parser := range setupItem.Install.Parsers { - if dryRun { - fmt.Println("dry-run: would install parser", parser) + for _, parser := range setupItem.Install.Parsers { + if dryRun { + fmt.Println("dry-run: would install parser", parser) - continue - } + continue + } - item := hub.GetItem(cwhub.PARSERS, parser) - if item == nil { - return fmt.Errorf("parser %s not found", parser) - } + item := hub.GetItem(cwhub.PARSERS, parser) + if item == nil { + return fmt.Errorf("parser %s not found", parser) + } - if err := item.Install(ctx, forceAction, downloadOnly); err != nil { - return fmt.Errorf("while installing parser %s: %w", item.Name, err) - } + if err := item.Install(ctx, forceAction, downloadOnly); err != nil { + return fmt.Errorf("while installing parser %s: %w", item.Name, err) } } - if len(install.Scenarios) > 0 { - for _, scenario := range setupItem.Install.Scenarios { - if dryRun { - fmt.Println("dry-run: would install scenario", scenario) + for _, scenario := range setupItem.Install.Scenarios { + if dryRun { + fmt.Println("dry-run: would install scenario", scenario) - continue - } + continue + } - item := hub.GetItem(cwhub.SCENARIOS, scenario) - if item == nil { - return fmt.Errorf("scenario %s not found", scenario) - } + item := hub.GetItem(cwhub.SCENARIOS, scenario) + if item == nil { + return fmt.Errorf("scenario %s not found", scenario) + } - if err := item.Install(ctx, forceAction, downloadOnly); err != nil { - return fmt.Errorf("while installing scenario %s: %w", item.Name, err) - } + if err := item.Install(ctx, forceAction, downloadOnly); err != nil { + return fmt.Errorf("while installing scenario %s: %w", item.Name, err) } } - if len(install.PostOverflows) > 0 { - for _, postoverflow := range setupItem.Install.PostOverflows { - if dryRun { - fmt.Println("dry-run: would install postoverflow", postoverflow) + for _, postoverflow := range setupItem.Install.PostOverflows { + if dryRun { + fmt.Println("dry-run: would install postoverflow", postoverflow) - continue - } + continue + } - item := hub.GetItem(cwhub.POSTOVERFLOWS, postoverflow) - if item == nil { - return fmt.Errorf("postoverflow %s not found", postoverflow) - } + item := hub.GetItem(cwhub.POSTOVERFLOWS, postoverflow) + if item == nil { + return fmt.Errorf("postoverflow %s not found", postoverflow) + } - if err := item.Install(ctx, forceAction, downloadOnly); err != nil { - return fmt.Errorf("while installing postoverflow %s: %w", item.Name, err) - } + if err := item.Install(ctx, forceAction, downloadOnly); err != nil { + return fmt.Errorf("while installing postoverflow %s: %w", item.Name, err) } } } From 73792eacb60ae30919f3dd741301fd9690632f90 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Wed, 12 Jun 2024 11:49:19 +0200 Subject: [PATCH 185/581] refactor pkg/parser: extract processGrok (#3080) * pkg/parser: extract method processGrok() * early return * early return/2 --- pkg/parser/node.go | 148 ++++++++++++++++++++++++--------------------- 1 file changed, 79 insertions(+), 69 deletions(-) diff --git a/pkg/parser/node.go b/pkg/parser/node.go index 74c9e94a331..4bb2c4c5eaf 100644 --- a/pkg/parser/node.go +++ b/pkg/parser/node.go @@ -202,9 +202,84 @@ func (n *Node) processWhitelist(cachedExprEnv map[string]interface{}, p *types.E return isWhitelisted, nil } -func (n *Node) process(p *types.Event, ctx UnixParserCtx, expressionEnv map[string]interface{}) (bool, error) { + +func (n *Node) processGrok(p *types.Event, cachedExprEnv map[string]any) (bool, bool, error) { + // Process grok if present, should be exclusive with nodes :) + clog := n.Logger var NodeHasOKGrok bool + gstr := "" + + if n.Grok.RunTimeRegexp == nil { + clog.Tracef("! No grok pattern : %p", n.Grok.RunTimeRegexp) + return true, false, nil + } + + clog.Tracef("Processing grok pattern : %s : %p", n.Grok.RegexpName, n.Grok.RunTimeRegexp) + // for unparsed, parsed etc. set sensible defaults to reduce user hassle + if n.Grok.TargetField != "" { + // it's a hack to avoid using real reflect + if n.Grok.TargetField == "Line.Raw" { + gstr = p.Line.Raw + } else if val, ok := p.Parsed[n.Grok.TargetField]; ok { + gstr = val + } else { + clog.Debugf("(%s) target field '%s' doesn't exist in %v", n.rn, n.Grok.TargetField, p.Parsed) + return false, false, nil + } + } else if n.Grok.RunTimeValue != nil { + output, err := exprhelpers.Run(n.Grok.RunTimeValue, cachedExprEnv, clog, n.Debug) + if err != nil { + clog.Warningf("failed to run RunTimeValue : %v", err) + return false, false, nil + } + + switch out := output.(type) { + case string: + gstr = out + case int: + gstr = fmt.Sprintf("%d", out) + case float64, float32: + gstr = fmt.Sprintf("%f", out) + default: + clog.Errorf("unexpected return type for RunTimeValue : %T", output) + } + } + + var groklabel string + if n.Grok.RegexpName == "" { + groklabel = fmt.Sprintf("%5.5s...", n.Grok.RegexpValue) + } else { + groklabel = n.Grok.RegexpName + } + + grok := n.Grok.RunTimeRegexp.Parse(gstr) + + if len(grok) == 0 { + // grok failed, node failed + clog.Debugf("+ Grok '%s' didn't return data on '%s'", groklabel, gstr) + return false, false, nil + } + + /*tag explicitly that the *current* node had a successful grok pattern. it's important to know success state*/ + NodeHasOKGrok = true + clog.Debugf("+ Grok '%s' returned %d entries to merge in Parsed", groklabel, len(grok)) + // We managed to grok stuff, merged into parse + for k, v := range grok { + clog.Debugf("\t.Parsed['%s'] = '%s'", k, v) + p.Parsed[k] = v + } + // if the grok succeed, process associated statics + err := n.ProcessStatics(n.Grok.Statics, p) + if err != nil { + clog.Errorf("(%s) Failed to process statics : %v", n.rn, err) + return false, false, err + } + + return true, NodeHasOKGrok, nil +} + +func (n *Node) process(p *types.Event, ctx UnixParserCtx, expressionEnv map[string]interface{}) (bool, error) { clog := n.Logger cachedExprEnv := expressionEnv @@ -229,74 +304,9 @@ func (n *Node) process(p *types.Event, ctx UnixParserCtx, expressionEnv map[stri return false, err } - // Process grok if present, should be exclusive with nodes :) - gstr := "" - - if n.Grok.RunTimeRegexp != nil { - clog.Tracef("Processing grok pattern : %s : %p", n.Grok.RegexpName, n.Grok.RunTimeRegexp) - // for unparsed, parsed etc. set sensible defaults to reduce user hassle - if n.Grok.TargetField != "" { - // it's a hack to avoid using real reflect - if n.Grok.TargetField == "Line.Raw" { - gstr = p.Line.Raw - } else if val, ok := p.Parsed[n.Grok.TargetField]; ok { - gstr = val - } else { - clog.Debugf("(%s) target field '%s' doesn't exist in %v", n.rn, n.Grok.TargetField, p.Parsed) - - NodeState = false - } - } else if n.Grok.RunTimeValue != nil { - output, err := exprhelpers.Run(n.Grok.RunTimeValue, cachedExprEnv, clog, n.Debug) - if err != nil { - clog.Warningf("failed to run RunTimeValue : %v", err) - - NodeState = false - } - - switch out := output.(type) { - case string: - gstr = out - case int: - gstr = fmt.Sprintf("%d", out) - case float64, float32: - gstr = fmt.Sprintf("%f", out) - default: - clog.Errorf("unexpected return type for RunTimeValue : %T", output) - } - } - - var groklabel string - if n.Grok.RegexpName == "" { - groklabel = fmt.Sprintf("%5.5s...", n.Grok.RegexpValue) - } else { - groklabel = n.Grok.RegexpName - } - - grok := n.Grok.RunTimeRegexp.Parse(gstr) - if len(grok) > 0 { - /*tag explicitly that the *current* node had a successful grok pattern. it's important to know success state*/ - NodeHasOKGrok = true - - clog.Debugf("+ Grok '%s' returned %d entries to merge in Parsed", groklabel, len(grok)) - // We managed to grok stuff, merged into parse - for k, v := range grok { - clog.Debugf("\t.Parsed['%s'] = '%s'", k, v) - p.Parsed[k] = v - } - // if the grok succeed, process associated statics - err := n.ProcessStatics(n.Grok.Statics, p) - if err != nil { - clog.Errorf("(%s) Failed to process statics : %v", n.rn, err) - return false, err - } - } else { - // grok failed, node failed - clog.Debugf("+ Grok '%s' didn't return data on '%s'", groklabel, gstr) - NodeState = false - } - } else { - clog.Tracef("! No grok pattern : %p", n.Grok.RunTimeRegexp) + NodeState, NodeHasOKGrok, err := n.processGrok(p, cachedExprEnv) + if err != nil { + return false, err } // Process the stash (data collection) if : a grok was present and succeeded, or if there is no grok From e859a751e9e431845d4f309b5c858f5cb7ca8256 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Wed, 12 Jun 2024 13:04:47 +0200 Subject: [PATCH 186/581] lint: disable redundant linters, update revive configuration and code metrics (#3081) --- .golangci.yml | 53 +++++++++++++++++----------------------- pkg/csconfig/database.go | 5 +++- 2 files changed, 26 insertions(+), 32 deletions(-) diff --git a/.golangci.yml b/.golangci.yml index 925005a5b05..62fca6eb1d1 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -16,28 +16,12 @@ linters-settings: # lower this after refactoring min-complexity: 128 - funlen: - # Checks the number of lines in a function. - # If lower than 0, disable the check. - # Default: 60 - # lower this after refactoring - lines: 437 - # Checks the number of statements in a function. - # If lower than 0, disable the check. - # Default: 40 - # lower this after refactoring - statements: 122 - govet: enable-all: true disable: - reflectvaluecompare - fieldalignment - lll: - # lower this after refactoring - line-length: 2607 - maintidx: # raise this after refactoring under: 11 @@ -125,7 +109,7 @@ linters-settings: disabled: true - name: cyclomatic # lower this after refactoring - arguments: [45] + arguments: [42] - name: defer disabled: true - name: empty-block @@ -137,9 +121,10 @@ linters-settings: - name: flag-parameter disabled: true - name: function-result-limit - disabled: true + arguments: [6] - name: function-length - disabled: true + # lower this after refactoring + arguments: [110, 235] - name: get-return disabled: true - name: increment-decrement @@ -149,9 +134,11 @@ linters-settings: - name: import-shadowing disabled: true - name: line-length-limit - disabled: true + # lower this after refactoring + arguments: [221] - name: max-control-nesting - disabled: true + # lower this after refactoring + arguments: [7] - name: max-public-structs disabled: true - name: optimize-operands-order @@ -211,6 +198,8 @@ linters: - gocyclo # revive - cyclop # revive + - lll # revive + - funlen # revive # # Disabled until fixed for go 1.22 @@ -234,7 +223,6 @@ linters: # - errcheck # errcheck is a program for checking for unchecked errors in Go code. These unchecked errors can be critical bugs in some cases # - errorlint # errorlint is a linter for that can be used to find code that will cause problems with the error wrapping scheme introduced in Go 1.13. # - exportloopref # checks for pointers to enclosing loop variables - # - funlen # Tool for detection of long functions # - ginkgolinter # enforces standards of using ginkgo and gomega # - gocheckcompilerdirectives # Checks that go compiler directive comments (//go:) are valid. # - gochecknoinits # Checks that no init functions are present in Go code @@ -252,7 +240,6 @@ linters: # - importas # Enforces consistent import aliases # - ineffassign # Detects when assignments to existing variables are not used # - interfacebloat # A linter that checks the number of methods inside an interface. - # - lll # Reports long lines # - loggercheck # (logrlint): Checks key value pairs for common logger libraries (kitlog,klog,logr,zap). # - logrlint # Check logr arguments. # - maintidx # maintidx measures the maintainability index of each function. @@ -473,14 +460,6 @@ issues: path: pkg/acquisition/modules/loki/internal/lokiclient/loki_client.go text: "confusing-naming: Method 'QueryRange' differs only by capitalization to method 'queryRange' in the same source file" - - linters: - - revive - path: pkg/metabase/metabase.go - - - linters: - - revive - path: pkg/metabase/container.go - - linters: - revive path: cmd/crowdsec-cli/copyfile.go @@ -494,6 +473,18 @@ issues: - canonicalheader path: pkg/apiserver/middlewares/v1/tls_auth.go + # tolerate long functions in tests + - linters: + - revive + path: "pkg/(.+)_test.go" + text: "function-length: .*" + + # tolerate long lines in tests + - linters: + - revive + path: "pkg/(.+)_test.go" + text: "line-length-limit: .*" + # tolerate deep exit in tests, for now - linters: - revive diff --git a/pkg/csconfig/database.go b/pkg/csconfig/database.go index 2fe610eba68..d30cc20a938 100644 --- a/pkg/csconfig/database.go +++ b/pkg/csconfig/database.go @@ -79,7 +79,10 @@ func (c *Config) LoadDBConfig(inCli bool) error { switch { case err != nil: log.Warnf("unable to determine if database is on network filesystem: %s", err) - log.Warning("You are using sqlite without WAL, this can have a performance impact. If you do not store the database in a network share, set db_config.use_wal to true. Set explicitly to false to disable this warning.") + log.Warning( + "You are using sqlite without WAL, this can have a performance impact. " + + "If you do not store the database in a network share, set db_config.use_wal to true. " + + "Set explicitly to false to disable this warning.") case isNetwork: log.Debugf("database is on network filesystem (%s), setting useWal to false", fsType) c.DbConfig.UseWal = ptr.Of(false) From 4e09ae21828c3b8fb3c9c6a015c8162ebd53f4cc Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Wed, 12 Jun 2024 13:06:44 +0200 Subject: [PATCH 187/581] cscli: fixed some inconsistency in returning errors (#3076) --- cmd/crowdsec-cli/alerts.go | 2 +- cmd/crowdsec-cli/hubtest.go | 9 ++++----- cmd/crowdsec-cli/lapi.go | 7 ++----- cmd/crowdsec-cli/simulation.go | 4 ++-- cmd/crowdsec-cli/utils.go | 9 --------- 5 files changed, 9 insertions(+), 22 deletions(-) diff --git a/cmd/crowdsec-cli/alerts.go b/cmd/crowdsec-cli/alerts.go index 7c9c5f23032..0fe9852519c 100644 --- a/cmd/crowdsec-cli/alerts.go +++ b/cmd/crowdsec-cli/alerts.go @@ -540,7 +540,7 @@ func (cli *cliAlerts) NewInspectCmd() *cobra.Command { DisableAutoGenTag: true, RunE: func(cmd *cobra.Command, args []string) error { if len(args) == 0 { - printHelp(cmd) + _ = cmd.Help() return errors.New("missing alert_id") } return cli.inspect(details, args...) diff --git a/cmd/crowdsec-cli/hubtest.go b/cmd/crowdsec-cli/hubtest.go index 458de672349..d58631e26c9 100644 --- a/cmd/crowdsec-cli/hubtest.go +++ b/cmd/crowdsec-cli/hubtest.go @@ -251,7 +251,7 @@ func (cli *cliHubTest) NewRunCmd() *cobra.Command { cfg := cli.cfg() if !runAll && len(args) == 0 { - printHelp(cmd) + _ = cmd.Help() return errors.New("please provide test to run or --all flag") } hubPtr.NucleiTargetHost = NucleiTargetHost @@ -305,8 +305,7 @@ func (cli *cliHubTest) NewRunCmd() *cobra.Command { return fmt.Errorf("unable to clean test '%s' env: %w", test.Name, err) } } - fmt.Printf("\nPlease fill your assert file(s) for test '%s', exiting\n", test.Name) - os.Exit(1) + return fmt.Errorf("please fill your assert file(s) for test '%s', exiting", test.Name) } testResult[test.Name] = test.Success if test.Success { @@ -389,7 +388,7 @@ func (cli *cliHubTest) NewRunCmd() *cobra.Command { } if !success { - os.Exit(1) + return errors.New("some tests failed") } return nil @@ -580,7 +579,7 @@ func (cli *cliHubTest) NewCoverageCmd() *cobra.Command { case showAppsecCov: fmt.Printf("appsec_rules=%d%%", appsecRuleCoveragePercent) } - os.Exit(0) + return nil } switch cfg.Cscli.Output { diff --git a/cmd/crowdsec-cli/lapi.go b/cmd/crowdsec-cli/lapi.go index d2955230de9..0b8bc59dad5 100644 --- a/cmd/crowdsec-cli/lapi.go +++ b/cmd/crowdsec-cli/lapi.go @@ -376,8 +376,8 @@ cscli lapi context detect crowdsecurity/sshd-logs RunE: func(cmd *cobra.Command, args []string) error { cfg := cli.cfg() if !detectAll && len(args) == 0 { - log.Infof("Please provide parsers to detect or --all flag.") - printHelp(cmd) + _ = cmd.Help() + return errors.New("please provide parsers to detect or --all flag") } // to avoid all the log.Info from the loaders functions @@ -491,9 +491,6 @@ func (cli *cliLapi) newContextCmd() *cobra.Command { return nil }, - Run: func(cmd *cobra.Command, _ []string) { - printHelp(cmd) - }, } cmd.AddCommand(cli.newContextAddCmd()) diff --git a/cmd/crowdsec-cli/simulation.go b/cmd/crowdsec-cli/simulation.go index 3301c4b797e..f8d8a660b8c 100644 --- a/cmd/crowdsec-cli/simulation.go +++ b/cmd/crowdsec-cli/simulation.go @@ -107,7 +107,7 @@ func (cli *cliSimulation) NewEnableCmd() *cobra.Command { return fmt.Errorf("unable to enable global simulation mode: %w", err) } } else { - printHelp(cmd) + _ = cmd.Help() } return nil @@ -154,7 +154,7 @@ func (cli *cliSimulation) NewDisableCmd() *cobra.Command { return fmt.Errorf("unable to disable global simulation mode: %w", err) } } else { - printHelp(cmd) + _ = cmd.Help() } return nil diff --git a/cmd/crowdsec-cli/utils.go b/cmd/crowdsec-cli/utils.go index 1ae8f9da2ed..f6c32094958 100644 --- a/cmd/crowdsec-cli/utils.go +++ b/cmd/crowdsec-cli/utils.go @@ -5,18 +5,9 @@ import ( "net" "strings" - log "github.com/sirupsen/logrus" - "github.com/spf13/cobra" - "github.com/crowdsecurity/crowdsec/pkg/types" ) -func printHelp(cmd *cobra.Command) { - if err := cmd.Help(); err != nil { - log.Fatalf("unable to print help(): %s", err) - } -} - func manageCliDecisionAlerts(ip *string, ipRange *string, scope *string, value *string) error { /*if a range is provided, change the scope*/ if *ipRange != "" { From a529e66cd893018f93ddebf0c968a772e8a6ae47 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Thu, 13 Jun 2024 11:07:44 +0200 Subject: [PATCH 188/581] Typos (#3084) * comment fix * redundancy * typo nill -> nil * remove extra newline from log --- cmd/crowdsec-cli/support.go | 2 +- cmd/crowdsec/parse.go | 2 +- pkg/apiserver/apiserver_test.go | 2 +- pkg/database/flush.go | 2 +- pkg/exprhelpers/crowdsec_cti_test.go | 2 +- pkg/exprhelpers/debugger_test.go | 10 +++++----- pkg/parser/runtime.go | 4 ++-- 7 files changed, 12 insertions(+), 12 deletions(-) diff --git a/cmd/crowdsec-cli/support.go b/cmd/crowdsec-cli/support.go index 3b0f53cd6e1..061733ef8d3 100644 --- a/cmd/crowdsec-cli/support.go +++ b/cmd/crowdsec-cli/support.go @@ -427,7 +427,7 @@ func (cli *cliSupport) writeToZip(zipWriter *zip.Writer, filename string, mtime } } -// writeToZip adds a file to the zip archive, from a file, and retains the mtime +// writeFileToZip adds a file to the zip archive, from a file, and retains the mtime func (cli *cliSupport) writeFileToZip(zw *zip.Writer, filename string, fromFile string) { mtime := time.Now() diff --git a/cmd/crowdsec/parse.go b/cmd/crowdsec/parse.go index 39eedfc858e..26eae66be2b 100644 --- a/cmd/crowdsec/parse.go +++ b/cmd/crowdsec/parse.go @@ -37,7 +37,7 @@ func runParse(input chan types.Event, output chan types.Event, parserCTX parser. /* parse the log using magic */ parsed, err := parser.Parse(parserCTX, event, nodes) if err != nil { - log.Errorf("failed parsing : %v\n", err) + log.Errorf("failed parsing: %v", err) } elapsed := time.Since(startParsing) globalParsingHistogram.With(prometheus.Labels{"source": event.Line.Src, "type": event.Line.Module}).Observe(elapsed.Seconds()) diff --git a/pkg/apiserver/apiserver_test.go b/pkg/apiserver/apiserver_test.go index 20c48337833..26531a592da 100644 --- a/pkg/apiserver/apiserver_test.go +++ b/pkg/apiserver/apiserver_test.go @@ -294,7 +294,7 @@ func TestWithWrongFlushConfig(t *testing.T) { config.API.Server.DbConfig.Flush.MaxItems = &maxItems apiServer, err := NewServer(config.API.Server) - cstest.RequireErrorContains(t, err, "max_items can't be zero or negative number") + cstest.RequireErrorContains(t, err, "max_items can't be zero or negative") assert.Nil(t, apiServer) } diff --git a/pkg/database/flush.go b/pkg/database/flush.go index 56e42715b2c..5a1f0bea5bf 100644 --- a/pkg/database/flush.go +++ b/pkg/database/flush.go @@ -22,7 +22,7 @@ func (c *Client) StartFlushScheduler(config *csconfig.FlushDBCfg) (*gocron.Sched maxAge := "" if config.MaxItems != nil && *config.MaxItems <= 0 { - return nil, errors.New("max_items can't be zero or negative number") + return nil, errors.New("max_items can't be zero or negative") } if config.MaxItems != nil { diff --git a/pkg/exprhelpers/crowdsec_cti_test.go b/pkg/exprhelpers/crowdsec_cti_test.go index 4c0346e447c..9f78b932d6d 100644 --- a/pkg/exprhelpers/crowdsec_cti_test.go +++ b/pkg/exprhelpers/crowdsec_cti_test.go @@ -109,7 +109,7 @@ func smokeHandler(req *http.Request) *http.Response { } } -func TestNillClient(t *testing.T) { +func TestNilClient(t *testing.T) { defer ShutdownCrowdsecCTI() if err := InitCrowdsecCTI(ptr.Of(""), nil, nil, nil); !errors.Is(err, cticlient.ErrDisabled) { diff --git a/pkg/exprhelpers/debugger_test.go b/pkg/exprhelpers/debugger_test.go index 9c713a8d4f5..6832b4efaa8 100644 --- a/pkg/exprhelpers/debugger_test.go +++ b/pkg/exprhelpers/debugger_test.go @@ -59,7 +59,7 @@ func TestBaseDbg(t *testing.T) { "base_string": "hello world", "base_int": 42, "base_float": 42.42, - "nillvar": &teststruct{}, + "nilvar": &teststruct{}, "base_struct": struct { Foo string Bar int @@ -79,8 +79,8 @@ func TestBaseDbg(t *testing.T) { //Missing multi parametes function tests := []ExprDbgTest{ { - Name: "nill deref", - Expr: "Upper('1') == '1' && nillvar.Foo == '42'", + Name: "nil deref", + Expr: "Upper('1') == '1' && nilvar.Foo == '42'", Env: defaultEnv, ExpectedFailRuntime: true, ExpectedOutputs: []OpOutput{ @@ -307,8 +307,8 @@ func TestBaseDbg(t *testing.T) { t.Fatalf("test %s : unexpected compile error : %s", test.Name, err) } } - if test.Name == "nill deref" { - test.Env["nillvar"] = nil + if test.Name == "nil deref" { + test.Env["nilvar"] = nil } outdbg, ret, err := RunWithDebug(prog, test.Env, logger) if test.ExpectedFailRuntime { diff --git a/pkg/parser/runtime.go b/pkg/parser/runtime.go index 1596ef5ffd9..8068690b68f 100644 --- a/pkg/parser/runtime.go +++ b/pkg/parser/runtime.go @@ -42,8 +42,8 @@ func SetTargetByName(target string, value string, evt *types.Event) bool { iter := reflect.ValueOf(evt).Elem() if (iter == reflect.Value{}) || iter.IsZero() { - log.Tracef("event is nill") - //event is nill + log.Tracef("event is nil") + //event is nil return false } for _, f := range strings.Split(target, ".") { From 2f6d4cccd7729ff809c920c3dea128c51e0fbe9b Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Thu, 13 Jun 2024 11:17:27 +0200 Subject: [PATCH 189/581] cscli: refactor hubtests / extract methods run(), coverage() (#3086) --- .golangci.yml | 14 +- cmd/crowdsec-cli/hubtest.go | 319 ++++++++++++++++++------------------ 2 files changed, 171 insertions(+), 162 deletions(-) diff --git a/.golangci.yml b/.golangci.yml index 62fca6eb1d1..3e402529180 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -14,7 +14,7 @@ linters-settings: gocognit: # lower this after refactoring - min-complexity: 128 + min-complexity: 118 govet: enable-all: true @@ -24,7 +24,7 @@ linters-settings: maintidx: # raise this after refactoring - under: 11 + under: 16 misspell: locale: US @@ -473,6 +473,11 @@ issues: - canonicalheader path: pkg/apiserver/middlewares/v1/tls_auth.go + # tolerate complex functions in tests for now + - linters: + - maintidx + path: "(.+)_test.go" + # tolerate long functions in tests - linters: - revive @@ -512,11 +517,6 @@ issues: path: "cmd/crowdsec-cli/utils.go" text: "deep-exit: .*" - - linters: - - revive - path: "cmd/crowdsec-cli/hubtest.go" - text: "deep-exit: .*" - - linters: - revive path: "pkg/leakybucket/overflows.go" diff --git a/cmd/crowdsec-cli/hubtest.go b/cmd/crowdsec-cli/hubtest.go index d58631e26c9..8796fa48a17 100644 --- a/cmd/crowdsec-cli/hubtest.go +++ b/cmd/crowdsec-cli/hubtest.go @@ -234,6 +234,44 @@ cscli hubtest create my-scenario-test --parsers crowdsecurity/nginx --scenarios return cmd } + +func (cli *cliHubTest) run(runAll bool, NucleiTargetHost string, AppSecHost string, args []string) error { + cfg := cli.cfg() + + if !runAll && len(args) == 0 { + return errors.New("please provide test to run or --all flag") + } + hubPtr.NucleiTargetHost = NucleiTargetHost + hubPtr.AppSecHost = AppSecHost + if runAll { + if err := hubPtr.LoadAllTests(); err != nil { + return fmt.Errorf("unable to load all tests: %+v", err) + } + } else { + for _, testName := range args { + _, err := hubPtr.LoadTestItem(testName) + if err != nil { + return fmt.Errorf("unable to load test '%s': %w", testName, err) + } + } + } + + // set timezone to avoid DST issues + os.Setenv("TZ", "UTC") + for _, test := range hubPtr.Tests { + if cfg.Cscli.Output == "human" { + log.Infof("Running test '%s'", test.Name) + } + err := test.Run() + if err != nil { + log.Errorf("running test '%s' failed: %+v", test.Name, err) + } + } + + return nil +} + + func (cli *cliHubTest) NewRunCmd() *cobra.Command { var ( noClean bool @@ -247,41 +285,8 @@ func (cli *cliHubTest) NewRunCmd() *cobra.Command { Use: "run", Short: "run [test_name]", DisableAutoGenTag: true, - RunE: func(cmd *cobra.Command, args []string) error { - cfg := cli.cfg() - - if !runAll && len(args) == 0 { - _ = cmd.Help() - return errors.New("please provide test to run or --all flag") - } - hubPtr.NucleiTargetHost = NucleiTargetHost - hubPtr.AppSecHost = AppSecHost - if runAll { - if err := hubPtr.LoadAllTests(); err != nil { - return fmt.Errorf("unable to load all tests: %+v", err) - } - } else { - for _, testName := range args { - _, err := hubPtr.LoadTestItem(testName) - if err != nil { - return fmt.Errorf("unable to load test '%s': %w", testName, err) - } - } - } - - // set timezone to avoid DST issues - os.Setenv("TZ", "UTC") - for _, test := range hubPtr.Tests { - if cfg.Cscli.Output == "human" { - log.Infof("Running test '%s'", test.Name) - } - err := test.Run() - if err != nil { - log.Errorf("running test '%s' failed: %+v", test.Name, err) - } - } - - return nil + RunE: func(_ *cobra.Command, args []string) error { + return cli.run(runAll, NucleiTargetHost, AppSecHost, args) }, PersistentPostRunE: func(_ *cobra.Command, _ []string) error { cfg := cli.cfg() @@ -493,140 +498,144 @@ func (cli *cliHubTest) NewListCmd() *cobra.Command { return cmd } -func (cli *cliHubTest) NewCoverageCmd() *cobra.Command { - var ( - showParserCov bool - showScenarioCov bool - showOnlyPercent bool - showAppsecCov bool - ) +func (cli *cliHubTest) coverage(showScenarioCov bool, showParserCov bool, showAppsecCov bool, showOnlyPercent bool) error { + cfg := cli.cfg() - cmd := &cobra.Command{ - Use: "coverage", - Short: "coverage", - DisableAutoGenTag: true, - RunE: func(_ *cobra.Command, _ []string) error { - cfg := cli.cfg() - - // for this one we explicitly don't do for appsec - if err := HubTest.LoadAllTests(); err != nil { - return fmt.Errorf("unable to load all tests: %+v", err) - } - var err error - scenarioCoverage := []hubtest.Coverage{} - parserCoverage := []hubtest.Coverage{} - appsecRuleCoverage := []hubtest.Coverage{} - scenarioCoveragePercent := 0 - parserCoveragePercent := 0 - appsecRuleCoveragePercent := 0 - - // if both are false (flag by default), show both - showAll := !showScenarioCov && !showParserCov && !showAppsecCov - - if showParserCov || showAll { - parserCoverage, err = HubTest.GetParsersCoverage() - if err != nil { - return fmt.Errorf("while getting parser coverage: %w", err) - } - parserTested := 0 - for _, test := range parserCoverage { - if test.TestsCount > 0 { - parserTested++ - } - } - parserCoveragePercent = int(math.Round((float64(parserTested) / float64(len(parserCoverage)) * 100))) + // for this one we explicitly don't do for appsec + if err := HubTest.LoadAllTests(); err != nil { + return fmt.Errorf("unable to load all tests: %+v", err) + } + var err error + scenarioCoverage := []hubtest.Coverage{} + parserCoverage := []hubtest.Coverage{} + appsecRuleCoverage := []hubtest.Coverage{} + scenarioCoveragePercent := 0 + parserCoveragePercent := 0 + appsecRuleCoveragePercent := 0 + + // if both are false (flag by default), show both + showAll := !showScenarioCov && !showParserCov && !showAppsecCov + + if showParserCov || showAll { + parserCoverage, err = HubTest.GetParsersCoverage() + if err != nil { + return fmt.Errorf("while getting parser coverage: %w", err) + } + parserTested := 0 + for _, test := range parserCoverage { + if test.TestsCount > 0 { + parserTested++ } + } + parserCoveragePercent = int(math.Round((float64(parserTested) / float64(len(parserCoverage)) * 100))) + } - if showScenarioCov || showAll { - scenarioCoverage, err = HubTest.GetScenariosCoverage() - if err != nil { - return fmt.Errorf("while getting scenario coverage: %w", err) - } - - scenarioTested := 0 - for _, test := range scenarioCoverage { - if test.TestsCount > 0 { - scenarioTested++ - } - } + if showScenarioCov || showAll { + scenarioCoverage, err = HubTest.GetScenariosCoverage() + if err != nil { + return fmt.Errorf("while getting scenario coverage: %w", err) + } - scenarioCoveragePercent = int(math.Round((float64(scenarioTested) / float64(len(scenarioCoverage)) * 100))) + scenarioTested := 0 + for _, test := range scenarioCoverage { + if test.TestsCount > 0 { + scenarioTested++ } + } - if showAppsecCov || showAll { - appsecRuleCoverage, err = HubTest.GetAppsecCoverage() - if err != nil { - return fmt.Errorf("while getting scenario coverage: %w", err) - } + scenarioCoveragePercent = int(math.Round((float64(scenarioTested) / float64(len(scenarioCoverage)) * 100))) + } - appsecRuleTested := 0 - for _, test := range appsecRuleCoverage { - if test.TestsCount > 0 { - appsecRuleTested++ - } - } - appsecRuleCoveragePercent = int(math.Round((float64(appsecRuleTested) / float64(len(appsecRuleCoverage)) * 100))) - } + if showAppsecCov || showAll { + appsecRuleCoverage, err = HubTest.GetAppsecCoverage() + if err != nil { + return fmt.Errorf("while getting scenario coverage: %w", err) + } - if showOnlyPercent { - switch { - case showAll: - fmt.Printf("parsers=%d%%\nscenarios=%d%%\nappsec_rules=%d%%", parserCoveragePercent, scenarioCoveragePercent, appsecRuleCoveragePercent) - case showParserCov: - fmt.Printf("parsers=%d%%", parserCoveragePercent) - case showScenarioCov: - fmt.Printf("scenarios=%d%%", scenarioCoveragePercent) - case showAppsecCov: - fmt.Printf("appsec_rules=%d%%", appsecRuleCoveragePercent) - } - return nil + appsecRuleTested := 0 + for _, test := range appsecRuleCoverage { + if test.TestsCount > 0 { + appsecRuleTested++ } + } + appsecRuleCoveragePercent = int(math.Round((float64(appsecRuleTested) / float64(len(appsecRuleCoverage)) * 100))) + } - switch cfg.Cscli.Output { - case "human": - if showParserCov || showAll { - hubTestParserCoverageTable(color.Output, parserCoverage) - } + if showOnlyPercent { + switch { + case showAll: + fmt.Printf("parsers=%d%%\nscenarios=%d%%\nappsec_rules=%d%%", parserCoveragePercent, scenarioCoveragePercent, appsecRuleCoveragePercent) + case showParserCov: + fmt.Printf("parsers=%d%%", parserCoveragePercent) + case showScenarioCov: + fmt.Printf("scenarios=%d%%", scenarioCoveragePercent) + case showAppsecCov: + fmt.Printf("appsec_rules=%d%%", appsecRuleCoveragePercent) + } + return nil + } - if showScenarioCov || showAll { - hubTestScenarioCoverageTable(color.Output, scenarioCoverage) - } + switch cfg.Cscli.Output { + case "human": + if showParserCov || showAll { + hubTestParserCoverageTable(color.Output, parserCoverage) + } + + if showScenarioCov || showAll { + hubTestScenarioCoverageTable(color.Output, scenarioCoverage) + } + + if showAppsecCov || showAll { + hubTestAppsecRuleCoverageTable(color.Output, appsecRuleCoverage) + } + + fmt.Println() + if showParserCov || showAll { + fmt.Printf("PARSERS : %d%% of coverage\n", parserCoveragePercent) + } + if showScenarioCov || showAll { + fmt.Printf("SCENARIOS : %d%% of coverage\n", scenarioCoveragePercent) + } + if showAppsecCov || showAll { + fmt.Printf("APPSEC RULES : %d%% of coverage\n", appsecRuleCoveragePercent) + } + case "json": + dump, err := json.MarshalIndent(parserCoverage, "", " ") + if err != nil { + return err + } + fmt.Printf("%s", dump) + dump, err = json.MarshalIndent(scenarioCoverage, "", " ") + if err != nil { + return err + } + fmt.Printf("%s", dump) + dump, err = json.MarshalIndent(appsecRuleCoverage, "", " ") + if err != nil { + return err + } + fmt.Printf("%s", dump) + default: + return errors.New("only human/json output modes are supported") + } - if showAppsecCov || showAll { - hubTestAppsecRuleCoverageTable(color.Output, appsecRuleCoverage) - } + return nil +} - fmt.Println() - if showParserCov || showAll { - fmt.Printf("PARSERS : %d%% of coverage\n", parserCoveragePercent) - } - if showScenarioCov || showAll { - fmt.Printf("SCENARIOS : %d%% of coverage\n", scenarioCoveragePercent) - } - if showAppsecCov || showAll { - fmt.Printf("APPSEC RULES : %d%% of coverage\n", appsecRuleCoveragePercent) - } - case "json": - dump, err := json.MarshalIndent(parserCoverage, "", " ") - if err != nil { - return err - } - fmt.Printf("%s", dump) - dump, err = json.MarshalIndent(scenarioCoverage, "", " ") - if err != nil { - return err - } - fmt.Printf("%s", dump) - dump, err = json.MarshalIndent(appsecRuleCoverage, "", " ") - if err != nil { - return err - } - fmt.Printf("%s", dump) - default: - return errors.New("only human/json output modes are supported") - } +func (cli *cliHubTest) NewCoverageCmd() *cobra.Command { + var ( + showParserCov bool + showScenarioCov bool + showOnlyPercent bool + showAppsecCov bool + ) - return nil + cmd := &cobra.Command{ + Use: "coverage", + Short: "coverage", + DisableAutoGenTag: true, + RunE: func(_ *cobra.Command, _ []string) error { + return cli.coverage(showScenarioCov, showParserCov, showAppsecCov, showOnlyPercent) }, } From e6ebf7af223e36f47d06dc009ecd4a308da34464 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Thu, 13 Jun 2024 11:33:01 +0200 Subject: [PATCH 190/581] enable linter: revive (superfluous-else) (#3082) * enable linter: revive (superfluous-else) * lint (whitespace) --- .golangci.yml | 2 - pkg/parser/parsing_test.go | 87 +++++++++++++++++++++++++++----------- 2 files changed, 63 insertions(+), 26 deletions(-) diff --git a/.golangci.yml b/.golangci.yml index 3e402529180..bb20d42699f 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -149,8 +149,6 @@ linters-settings: disabled: true - name: struct-tag disabled: true - - name: superfluous-else - disabled: true - name: time-equal disabled: true - name: var-naming diff --git a/pkg/parser/parsing_test.go b/pkg/parser/parsing_test.go index 534b433a1b9..c5b1c353e9f 100644 --- a/pkg/parser/parsing_test.go +++ b/pkg/parser/parsing_test.go @@ -30,6 +30,7 @@ func TestParser(t *testing.T) { debug = true log.SetLevel(log.InfoLevel) + envSetting := os.Getenv("TEST_ONLY") pctx, ectx, err := prepTests() @@ -208,6 +209,7 @@ func loadTestFile(file string) []TestFile { dec := yaml.NewDecoder(yamlFile) dec.SetStrict(true) + var testSet []TestFile for { @@ -232,23 +234,26 @@ func loadTestFile(file string) []TestFile { func matchEvent(expected types.Event, out types.Event, debug bool) ([]string, bool) { var retInfo []string - var valid = false + + valid := false expectMaps := []map[string]string{expected.Parsed, expected.Meta, expected.Enriched} outMaps := []map[string]string{out.Parsed, out.Meta, out.Enriched} outLabels := []string{"Parsed", "Meta", "Enriched"} - //allow to check as well for stage and processed flags + // allow to check as well for stage and processed flags if expected.Stage != "" { if expected.Stage != out.Stage { if debug { retInfo = append(retInfo, fmt.Sprintf("mismatch stage %s != %s", expected.Stage, out.Stage)) } + goto checkFinished - } else { - valid = true - if debug { - retInfo = append(retInfo, fmt.Sprintf("ok stage %s == %s", expected.Stage, out.Stage)) - } + } + + valid = true + + if debug { + retInfo = append(retInfo, fmt.Sprintf("ok stage %s == %s", expected.Stage, out.Stage)) } } @@ -256,26 +261,30 @@ func matchEvent(expected types.Event, out types.Event, debug bool) ([]string, bo if debug { retInfo = append(retInfo, fmt.Sprintf("mismatch process %t != %t", expected.Process, out.Process)) } + goto checkFinished - } else { - valid = true - if debug { - retInfo = append(retInfo, fmt.Sprintf("ok process %t == %t", expected.Process, out.Process)) - } + } + + valid = true + + if debug { + retInfo = append(retInfo, fmt.Sprintf("ok process %t == %t", expected.Process, out.Process)) } if expected.Whitelisted != out.Whitelisted { if debug { retInfo = append(retInfo, fmt.Sprintf("mismatch whitelist %t != %t", expected.Whitelisted, out.Whitelisted)) } + goto checkFinished - } else { - if debug { - retInfo = append(retInfo, fmt.Sprintf("ok whitelist %t == %t", expected.Whitelisted, out.Whitelisted)) - } - valid = true } + if debug { + retInfo = append(retInfo, fmt.Sprintf("ok whitelist %t == %t", expected.Whitelisted, out.Whitelisted)) + } + + valid = true + for mapIdx := 0; mapIdx < len(expectMaps); mapIdx++ { for expKey, expVal := range expectMaps[mapIdx] { outVal, ok := outMaps[mapIdx][expKey] @@ -283,19 +292,26 @@ func matchEvent(expected types.Event, out types.Event, debug bool) ([]string, bo if debug { retInfo = append(retInfo, fmt.Sprintf("missing entry %s[%s]", outLabels[mapIdx], expKey)) } + valid = false + goto checkFinished } - if outVal != expVal { //ok entry + + if outVal != expVal { // ok entry if debug { retInfo = append(retInfo, fmt.Sprintf("mismatch %s[%s] %s != %s", outLabels[mapIdx], expKey, expVal, outVal)) } + valid = false + goto checkFinished } + if debug { retInfo = append(retInfo, fmt.Sprintf("ok %s[%s] %s == %s", outLabels[mapIdx], expKey, expVal, outVal)) } + valid = true } } @@ -309,6 +325,7 @@ checkFinished: retInfo = append(retInfo, fmt.Sprintf("KO ! \n\t%s", strings.Join(retInfo, "\n\t"))) } } + return retInfo, valid } @@ -320,9 +337,10 @@ func testSubSet(testSet TestFile, pctx UnixParserCtx, nodes []Node) (bool, error if err != nil { log.Errorf("Failed to process %s : %v", spew.Sdump(in), err) } - //log.Infof("Parser output : %s", spew.Sdump(out)) + // log.Infof("Parser output : %s", spew.Sdump(out)) results = append(results, out) } + log.Infof("parsed %d lines", len(testSet.Lines)) log.Infof("got %d results", len(results)) @@ -336,15 +354,17 @@ func testSubSet(testSet TestFile, pctx UnixParserCtx, nodes []Node) (bool, error reCheck: failinfo := []string{} + for ridx, result := range results { for eidx, expected := range testSet.Results { explain, match := matchEvent(expected, result, debug) if match { log.Infof("expected %d/%d matches result %d/%d", eidx, len(testSet.Results), ridx, len(results)) + if len(explain) > 0 { log.Printf("-> %s", explain[len(explain)-1]) } - //don't do this at home : delete current element from list and redo + // don't do this at home : delete current element from list and redo results[len(results)-1], results[ridx] = results[ridx], results[len(results)-1] results = results[:len(results)-1] @@ -352,34 +372,40 @@ reCheck: testSet.Results = testSet.Results[:len(testSet.Results)-1] goto reCheck - } else { - failinfo = append(failinfo, explain...) } + + failinfo = append(failinfo, explain...) } } + if len(results) > 0 { log.Printf("Errors : %s", strings.Join(failinfo, " / ")) return false, fmt.Errorf("leftover results : %+v", results) } + if len(testSet.Results) > 0 { log.Printf("Errors : %s", strings.Join(failinfo, " / ")) return false, fmt.Errorf("leftover expected results : %+v", testSet.Results) } + return true, nil } func testFile(testSet []TestFile, pctx UnixParserCtx, nodes []Node) bool { log.Warning("Going to process one test set") + for _, tf := range testSet { - //func testSubSet(testSet TestFile, pctx UnixParserCtx, nodes []Node) (bool, error) { + // func testSubSet(testSet TestFile, pctx UnixParserCtx, nodes []Node) (bool, error) { testOk, err := testSubSet(tf, pctx, nodes) if err != nil { log.Fatalf("test failed : %s", err) } + if !testOk { log.Fatalf("failed test : %+v", tf) } } + return true } @@ -404,48 +430,61 @@ func TestGeneratePatternsDoc(t *testing.T) { if err != nil { t.Fatalf("unable to load patterns : %s", err) } + log.Infof("-> %s", spew.Sdump(pctx)) /*don't judge me, we do it for the users*/ p := make(PairList, len(pctx.Grok.Patterns)) i := 0 + for key, val := range pctx.Grok.Patterns { p[i] = Pair{key, val} p[i].Value = strings.ReplaceAll(p[i].Value, "{%{", "\\{\\%\\{") i++ } + sort.Sort(p) - f, err := os.OpenFile("./patterns-documentation.md", os.O_TRUNC|os.O_CREATE|os.O_WRONLY, 0644) + f, err := os.OpenFile("./patterns-documentation.md", os.O_TRUNC|os.O_CREATE|os.O_WRONLY, 0o644) if err != nil { t.Fatalf("failed to open : %s", err) } + if _, err := f.WriteString("# Patterns documentation\n\n"); err != nil { t.Fatal("failed to write to file") } + if _, err := f.WriteString("You will find here a generated documentation of all the patterns loaded by crowdsec.\n"); err != nil { t.Fatal("failed to write to file") } + if _, err := f.WriteString("They are sorted by pattern length, and are meant to be used in parsers, in the form %{PATTERN_NAME}.\n"); err != nil { t.Fatal("failed to write to file") } + if _, err := f.WriteString("\n\n"); err != nil { t.Fatal("failed to write to file") } + for _, k := range p { if _, err := fmt.Fprintf(f, "## %s\n\nPattern :\n```\n%s\n```\n\n", k.Key, k.Value); err != nil { t.Fatal("failed to write to file") } + fmt.Printf("%v\t%v\n", k.Key, k.Value) } + if _, err := f.WriteString("\n"); err != nil { t.Fatal("failed to write to file") } + if _, err := f.WriteString("# Documentation generation\n"); err != nil { t.Fatal("failed to write to file") } + if _, err := f.WriteString("This documentation is generated by `pkg/parser` : `GO_WANT_TEST_DOC=1 go test -run TestGeneratePatternsDoc`\n"); err != nil { t.Fatal("failed to write to file") } + f.Close() } From 44a2014f6266c076b87335b8fbb0e469511e44ed Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Mon, 17 Jun 2024 10:16:46 +0200 Subject: [PATCH 191/581] db: don't set bouncer last_pull until first connection (#3020) * db: don't set bouncer last_pull until first connection * cscli bouncers prune: query creation date if they never connected --- cmd/crowdsec-cli/bouncers.go | 9 ++++++-- cmd/crowdsec-cli/bouncers_table.go | 7 +++++- pkg/apiserver/controllers/v1/decisions.go | 11 +++++++--- pkg/database/bouncers.go | 13 ++++++++++-- pkg/database/decisions.go | 15 +++++++++---- pkg/database/ent/bouncer.go | 11 ++++++---- pkg/database/ent/bouncer/bouncer.go | 2 -- pkg/database/ent/bouncer/where.go | 10 +++++++++ pkg/database/ent/bouncer_create.go | 9 +------- pkg/database/ent/bouncer_update.go | 18 ++++++++++++++++ pkg/database/ent/migrate/schema.go | 2 +- pkg/database/ent/mutation.go | 21 +++++++++++++++++- pkg/database/ent/runtime.go | 4 ---- pkg/database/ent/schema/bouncer.go | 3 +-- test/bats/10_bouncers.bats | 26 +++++++++++++++++++++-- 15 files changed, 125 insertions(+), 36 deletions(-) diff --git a/cmd/crowdsec-cli/bouncers.go b/cmd/crowdsec-cli/bouncers.go index f8628538378..0c96cc5eee4 100644 --- a/cmd/crowdsec-cli/bouncers.go +++ b/cmd/crowdsec-cli/bouncers.go @@ -116,7 +116,12 @@ func (cli *cliBouncers) list() error { valid = "pending" } - if err := csvwriter.Write([]string{b.Name, b.IPAddress, valid, b.LastPull.Format(time.RFC3339), b.Type, b.Version, b.AuthType}); err != nil { + lastPull := "" + if b.LastPull != nil { + lastPull = b.LastPull.Format(time.RFC3339) + } + + if err := csvwriter.Write([]string{b.Name, b.IPAddress, valid, lastPull, b.Type, b.Version, b.AuthType}); err != nil { return fmt.Errorf("failed to write raw: %w", err) } } @@ -259,7 +264,7 @@ func (cli *cliBouncers) prune(duration time.Duration, force bool) error { } } - bouncers, err := cli.db.QueryBouncersLastPulltimeLT(time.Now().UTC().Add(-duration)) + bouncers, err := cli.db.QueryBouncersInactiveSince(time.Now().UTC().Add(-duration)) if err != nil { return fmt.Errorf("unable to query bouncers: %w", err) } diff --git a/cmd/crowdsec-cli/bouncers_table.go b/cmd/crowdsec-cli/bouncers_table.go index 417eb9e8e0b..c32762ba266 100644 --- a/cmd/crowdsec-cli/bouncers_table.go +++ b/cmd/crowdsec-cli/bouncers_table.go @@ -21,7 +21,12 @@ func getBouncersTable(out io.Writer, bouncers []*ent.Bouncer) { revoked = emoji.Prohibited } - t.AddRow(b.Name, b.IPAddress, revoked, b.LastPull.Format(time.RFC3339), b.Type, b.Version, b.AuthType) + lastPull := "" + if b.LastPull != nil { + lastPull = b.LastPull.Format(time.RFC3339) + } + + t.AddRow(b.Name, b.IPAddress, revoked, lastPull, b.Type, b.Version, b.AuthType) } t.Render() diff --git a/pkg/apiserver/controllers/v1/decisions.go b/pkg/apiserver/controllers/v1/decisions.go index 543c832095a..3d8e0232224 100644 --- a/pkg/apiserver/controllers/v1/decisions.go +++ b/pkg/apiserver/controllers/v1/decisions.go @@ -72,7 +72,7 @@ func (c *Controller) GetDecision(gctx *gin.Context) { return } - if time.Now().UTC().Sub(bouncerInfo.LastPull) >= time.Minute { + if bouncerInfo.LastPull == nil || time.Now().UTC().Sub(*bouncerInfo.LastPull) >= time.Minute { if err := c.DBClient.UpdateBouncerLastPull(time.Now().UTC(), bouncerInfo.ID); err != nil { log.Errorf("failed to update bouncer last pull: %v", err) } @@ -186,7 +186,7 @@ func writeStartupDecisions(gctx *gin.Context, filters map[string][]string, dbFun return nil } -func writeDeltaDecisions(gctx *gin.Context, filters map[string][]string, lastPull time.Time, dbFunc func(time.Time, map[string][]string) ([]*ent.Decision, error)) error { +func writeDeltaDecisions(gctx *gin.Context, filters map[string][]string, lastPull *time.Time, dbFunc func(*time.Time, map[string][]string) ([]*ent.Decision, error)) error { //respBuffer := bytes.NewBuffer([]byte{}) limit := 30000 //FIXME : make it configurable needComma := false @@ -348,8 +348,13 @@ func (c *Controller) StreamDecisionNonChunked(gctx *gin.Context, bouncerInfo *en //data = KeepLongestDecision(data) ret["new"] = FormatDecisions(data) + since := time.Time{} + if bouncerInfo.LastPull != nil { + since = bouncerInfo.LastPull.Add(-2 * time.Second) + } + // getting expired decisions - data, err = c.DBClient.QueryExpiredDecisionsSinceWithFilters(bouncerInfo.LastPull.Add((-2 * time.Second)), filters) // do we want to give exactly lastPull time ? + data, err = c.DBClient.QueryExpiredDecisionsSinceWithFilters(&since, filters) // do we want to give exactly lastPull time ? if err != nil { log.Errorf("unable to query expired decision for '%s' : %v", bouncerInfo.Name, err) gctx.JSON(http.StatusInternalServerError, gin.H{"message": err.Error()}) diff --git a/pkg/database/bouncers.go b/pkg/database/bouncers.go index 2cc6b9dcb47..03a3227301d 100644 --- a/pkg/database/bouncers.go +++ b/pkg/database/bouncers.go @@ -115,6 +115,15 @@ func (c *Client) UpdateBouncerTypeAndVersion(bType string, version string, id in return nil } -func (c *Client) QueryBouncersLastPulltimeLT(t time.Time) ([]*ent.Bouncer, error) { - return c.Ent.Bouncer.Query().Where(bouncer.LastPullLT(t)).All(c.CTX) +func (c *Client) QueryBouncersInactiveSince(t time.Time) ([]*ent.Bouncer, error) { + return c.Ent.Bouncer.Query().Where( + // poor man's coalesce + bouncer.Or( + bouncer.LastPullLT(t), + bouncer.And( + bouncer.LastPullIsNil(), + bouncer.CreatedAtLT(t), + ), + ), + ).All(c.CTX) } diff --git a/pkg/database/decisions.go b/pkg/database/decisions.go index 294515d603e..fc582247e59 100644 --- a/pkg/database/decisions.go +++ b/pkg/database/decisions.go @@ -254,11 +254,15 @@ func longestDecisionForScopeTypeValue(s *sql.Selector) { ) } -func (c *Client) QueryExpiredDecisionsSinceWithFilters(since time.Time, filters map[string][]string) ([]*ent.Decision, error) { +func (c *Client) QueryExpiredDecisionsSinceWithFilters(since *time.Time, filters map[string][]string) ([]*ent.Decision, error) { query := c.Ent.Decision.Query().Where( decision.UntilLT(time.Now().UTC()), - decision.UntilGT(since), ) + + if since != nil { + query = query.Where(decision.UntilGT(*since)) + } + // Allow a bouncer to ask for non-deduplicated results if v, ok := filters["dedup"]; !ok || v[0] != "false" { query = query.Where(longestDecisionForScopeTypeValue) @@ -281,12 +285,15 @@ func (c *Client) QueryExpiredDecisionsSinceWithFilters(since time.Time, filters return data, nil } -func (c *Client) QueryNewDecisionsSinceWithFilters(since time.Time, filters map[string][]string) ([]*ent.Decision, error) { +func (c *Client) QueryNewDecisionsSinceWithFilters(since *time.Time, filters map[string][]string) ([]*ent.Decision, error) { query := c.Ent.Decision.Query().Where( - decision.CreatedAtGT(since), decision.UntilGT(time.Now().UTC()), ) + if since != nil { + query = query.Where(decision.CreatedAtGT(*since)) + } + // Allow a bouncer to ask for non-deduplicated results if v, ok := filters["dedup"]; !ok || v[0] != "false" { query = query.Where(longestDecisionForScopeTypeValue) diff --git a/pkg/database/ent/bouncer.go b/pkg/database/ent/bouncer.go index 973442bfa66..d7597d2a449 100644 --- a/pkg/database/ent/bouncer.go +++ b/pkg/database/ent/bouncer.go @@ -34,7 +34,7 @@ type Bouncer struct { // Version holds the value of the "version" field. Version string `json:"version"` // LastPull holds the value of the "last_pull" field. - LastPull time.Time `json:"last_pull"` + LastPull *time.Time `json:"last_pull"` // AuthType holds the value of the "auth_type" field. AuthType string `json:"auth_type"` selectValues sql.SelectValues @@ -126,7 +126,8 @@ func (b *Bouncer) assignValues(columns []string, values []any) error { if value, ok := values[i].(*sql.NullTime); !ok { return fmt.Errorf("unexpected type %T for field last_pull", values[i]) } else if value.Valid { - b.LastPull = value.Time + b.LastPull = new(time.Time) + *b.LastPull = value.Time } case bouncer.FieldAuthType: if value, ok := values[i].(*sql.NullString); !ok { @@ -193,8 +194,10 @@ func (b *Bouncer) String() string { builder.WriteString("version=") builder.WriteString(b.Version) builder.WriteString(", ") - builder.WriteString("last_pull=") - builder.WriteString(b.LastPull.Format(time.ANSIC)) + if v := b.LastPull; v != nil { + builder.WriteString("last_pull=") + builder.WriteString(v.Format(time.ANSIC)) + } builder.WriteString(", ") builder.WriteString("auth_type=") builder.WriteString(b.AuthType) diff --git a/pkg/database/ent/bouncer/bouncer.go b/pkg/database/ent/bouncer/bouncer.go index 3f201347e40..59afb199cb5 100644 --- a/pkg/database/ent/bouncer/bouncer.go +++ b/pkg/database/ent/bouncer/bouncer.go @@ -71,8 +71,6 @@ var ( UpdateDefaultUpdatedAt func() time.Time // DefaultIPAddress holds the default value on creation for the "ip_address" field. DefaultIPAddress string - // DefaultLastPull holds the default value on creation for the "last_pull" field. - DefaultLastPull func() time.Time // DefaultAuthType holds the default value on creation for the "auth_type" field. DefaultAuthType string ) diff --git a/pkg/database/ent/bouncer/where.go b/pkg/database/ent/bouncer/where.go index 86079794fee..e3c5752331e 100644 --- a/pkg/database/ent/bouncer/where.go +++ b/pkg/database/ent/bouncer/where.go @@ -589,6 +589,16 @@ func LastPullLTE(v time.Time) predicate.Bouncer { return predicate.Bouncer(sql.FieldLTE(FieldLastPull, v)) } +// LastPullIsNil applies the IsNil predicate on the "last_pull" field. +func LastPullIsNil() predicate.Bouncer { + return predicate.Bouncer(sql.FieldIsNull(FieldLastPull)) +} + +// LastPullNotNil applies the NotNil predicate on the "last_pull" field. +func LastPullNotNil() predicate.Bouncer { + return predicate.Bouncer(sql.FieldNotNull(FieldLastPull)) +} + // AuthTypeEQ applies the EQ predicate on the "auth_type" field. func AuthTypeEQ(v string) predicate.Bouncer { return predicate.Bouncer(sql.FieldEQ(FieldAuthType, v)) diff --git a/pkg/database/ent/bouncer_create.go b/pkg/database/ent/bouncer_create.go index 7a4b3d9b013..f2dfc767872 100644 --- a/pkg/database/ent/bouncer_create.go +++ b/pkg/database/ent/bouncer_create.go @@ -183,10 +183,6 @@ func (bc *BouncerCreate) defaults() { v := bouncer.DefaultIPAddress bc.mutation.SetIPAddress(v) } - if _, ok := bc.mutation.LastPull(); !ok { - v := bouncer.DefaultLastPull() - bc.mutation.SetLastPull(v) - } if _, ok := bc.mutation.AuthType(); !ok { v := bouncer.DefaultAuthType bc.mutation.SetAuthType(v) @@ -210,9 +206,6 @@ func (bc *BouncerCreate) check() error { if _, ok := bc.mutation.Revoked(); !ok { return &ValidationError{Name: "revoked", err: errors.New(`ent: missing required field "Bouncer.revoked"`)} } - if _, ok := bc.mutation.LastPull(); !ok { - return &ValidationError{Name: "last_pull", err: errors.New(`ent: missing required field "Bouncer.last_pull"`)} - } if _, ok := bc.mutation.AuthType(); !ok { return &ValidationError{Name: "auth_type", err: errors.New(`ent: missing required field "Bouncer.auth_type"`)} } @@ -276,7 +269,7 @@ func (bc *BouncerCreate) createSpec() (*Bouncer, *sqlgraph.CreateSpec) { } if value, ok := bc.mutation.LastPull(); ok { _spec.SetField(bouncer.FieldLastPull, field.TypeTime, value) - _node.LastPull = value + _node.LastPull = &value } if value, ok := bc.mutation.AuthType(); ok { _spec.SetField(bouncer.FieldAuthType, field.TypeString, value) diff --git a/pkg/database/ent/bouncer_update.go b/pkg/database/ent/bouncer_update.go index 1dc5aa080c3..31dd0bd708e 100644 --- a/pkg/database/ent/bouncer_update.go +++ b/pkg/database/ent/bouncer_update.go @@ -136,6 +136,12 @@ func (bu *BouncerUpdate) SetNillableLastPull(t *time.Time) *BouncerUpdate { return bu } +// ClearLastPull clears the value of the "last_pull" field. +func (bu *BouncerUpdate) ClearLastPull() *BouncerUpdate { + bu.mutation.ClearLastPull() + return bu +} + // SetAuthType sets the "auth_type" field. func (bu *BouncerUpdate) SetAuthType(s string) *BouncerUpdate { bu.mutation.SetAuthType(s) @@ -230,6 +236,9 @@ func (bu *BouncerUpdate) sqlSave(ctx context.Context) (n int, err error) { if value, ok := bu.mutation.LastPull(); ok { _spec.SetField(bouncer.FieldLastPull, field.TypeTime, value) } + if bu.mutation.LastPullCleared() { + _spec.ClearField(bouncer.FieldLastPull, field.TypeTime) + } if value, ok := bu.mutation.AuthType(); ok { _spec.SetField(bouncer.FieldAuthType, field.TypeString, value) } @@ -361,6 +370,12 @@ func (buo *BouncerUpdateOne) SetNillableLastPull(t *time.Time) *BouncerUpdateOne return buo } +// ClearLastPull clears the value of the "last_pull" field. +func (buo *BouncerUpdateOne) ClearLastPull() *BouncerUpdateOne { + buo.mutation.ClearLastPull() + return buo +} + // SetAuthType sets the "auth_type" field. func (buo *BouncerUpdateOne) SetAuthType(s string) *BouncerUpdateOne { buo.mutation.SetAuthType(s) @@ -485,6 +500,9 @@ func (buo *BouncerUpdateOne) sqlSave(ctx context.Context) (_node *Bouncer, err e if value, ok := buo.mutation.LastPull(); ok { _spec.SetField(bouncer.FieldLastPull, field.TypeTime, value) } + if buo.mutation.LastPullCleared() { + _spec.ClearField(bouncer.FieldLastPull, field.TypeTime) + } if value, ok := buo.mutation.AuthType(); ok { _spec.SetField(bouncer.FieldAuthType, field.TypeString, value) } diff --git a/pkg/database/ent/migrate/schema.go b/pkg/database/ent/migrate/schema.go index b0e7f990f6e..584e848f09e 100644 --- a/pkg/database/ent/migrate/schema.go +++ b/pkg/database/ent/migrate/schema.go @@ -68,7 +68,7 @@ var ( {Name: "ip_address", Type: field.TypeString, Nullable: true, Default: ""}, {Name: "type", Type: field.TypeString, Nullable: true}, {Name: "version", Type: field.TypeString, Nullable: true}, - {Name: "last_pull", Type: field.TypeTime}, + {Name: "last_pull", Type: field.TypeTime, Nullable: true}, {Name: "auth_type", Type: field.TypeString, Default: "api-key"}, } // BouncersTable holds the schema information for the "bouncers" table. diff --git a/pkg/database/ent/mutation.go b/pkg/database/ent/mutation.go index b88154324bb..c012e870c8f 100644 --- a/pkg/database/ent/mutation.go +++ b/pkg/database/ent/mutation.go @@ -2840,7 +2840,7 @@ func (m *BouncerMutation) LastPull() (r time.Time, exists bool) { // OldLastPull returns the old "last_pull" field's value of the Bouncer entity. // If the Bouncer object wasn't provided to the builder, the object is fetched from the database. // An error is returned if the mutation operation is not UpdateOne, or the database query fails. -func (m *BouncerMutation) OldLastPull(ctx context.Context) (v time.Time, err error) { +func (m *BouncerMutation) OldLastPull(ctx context.Context) (v *time.Time, err error) { if !m.op.Is(OpUpdateOne) { return v, errors.New("OldLastPull is only allowed on UpdateOne operations") } @@ -2854,9 +2854,22 @@ func (m *BouncerMutation) OldLastPull(ctx context.Context) (v time.Time, err err return oldValue.LastPull, nil } +// ClearLastPull clears the value of the "last_pull" field. +func (m *BouncerMutation) ClearLastPull() { + m.last_pull = nil + m.clearedFields[bouncer.FieldLastPull] = struct{}{} +} + +// LastPullCleared returns if the "last_pull" field was cleared in this mutation. +func (m *BouncerMutation) LastPullCleared() bool { + _, ok := m.clearedFields[bouncer.FieldLastPull] + return ok +} + // ResetLastPull resets all changes to the "last_pull" field. func (m *BouncerMutation) ResetLastPull() { m.last_pull = nil + delete(m.clearedFields, bouncer.FieldLastPull) } // SetAuthType sets the "auth_type" field. @@ -3135,6 +3148,9 @@ func (m *BouncerMutation) ClearedFields() []string { if m.FieldCleared(bouncer.FieldVersion) { fields = append(fields, bouncer.FieldVersion) } + if m.FieldCleared(bouncer.FieldLastPull) { + fields = append(fields, bouncer.FieldLastPull) + } return fields } @@ -3158,6 +3174,9 @@ func (m *BouncerMutation) ClearField(name string) error { case bouncer.FieldVersion: m.ClearVersion() return nil + case bouncer.FieldLastPull: + m.ClearLastPull() + return nil } return fmt.Errorf("unknown Bouncer nullable field %s", name) } diff --git a/pkg/database/ent/runtime.go b/pkg/database/ent/runtime.go index c593cd89fcb..b4da6dfb9db 100644 --- a/pkg/database/ent/runtime.go +++ b/pkg/database/ent/runtime.go @@ -72,10 +72,6 @@ func init() { bouncerDescIPAddress := bouncerFields[5].Descriptor() // bouncer.DefaultIPAddress holds the default value on creation for the ip_address field. bouncer.DefaultIPAddress = bouncerDescIPAddress.Default.(string) - // bouncerDescLastPull is the schema descriptor for last_pull field. - bouncerDescLastPull := bouncerFields[8].Descriptor() - // bouncer.DefaultLastPull holds the default value on creation for the last_pull field. - bouncer.DefaultLastPull = bouncerDescLastPull.Default.(func() time.Time) // bouncerDescAuthType is the schema descriptor for auth_type field. bouncerDescAuthType := bouncerFields[9].Descriptor() // bouncer.DefaultAuthType holds the default value on creation for the auth_type field. diff --git a/pkg/database/ent/schema/bouncer.go b/pkg/database/ent/schema/bouncer.go index acaa86008f5..242b5f5fe4a 100644 --- a/pkg/database/ent/schema/bouncer.go +++ b/pkg/database/ent/schema/bouncer.go @@ -28,8 +28,7 @@ func (Bouncer) Fields() []ent.Field { field.String("ip_address").Default("").Optional().StructTag(`json:"ip_address"`), field.String("type").Optional().StructTag(`json:"type"`), field.String("version").Optional().StructTag(`json:"version"`), - field.Time("last_pull"). - Default(types.UtcNow).StructTag(`json:"last_pull"`), + field.Time("last_pull").Nillable().Optional().StructTag(`json:"last_pull"`), field.String("auth_type").StructTag(`json:"auth_type"`).Default(types.ApiKeyAuthType), } } diff --git a/test/bats/10_bouncers.bats b/test/bats/10_bouncers.bats index 1ef39ceb05e..5bf4b5358db 100644 --- a/test/bats/10_bouncers.bats +++ b/test/bats/10_bouncers.bats @@ -39,7 +39,30 @@ teardown() { assert_output --partial "API key for 'ciTestBouncer':" rune -0 cscli bouncers delete ciTestBouncer rune -0 cscli bouncers list -o json - assert_output '[]' + assert_json '[]' +} + +@test "cscli bouncers list" { + export API_KEY=bouncerkey + rune -0 cscli bouncers add ciTestBouncer --key "$API_KEY" + + rune -0 cscli bouncers list -o json + rune -0 jq -c '.[] | [.ip_address,.last_pull,.name]' <(output) + assert_json '["",null,"ciTestBouncer"]' + rune -0 cscli bouncers list -o raw + assert_line 'name,ip,revoked,last_pull,type,version,auth_type' + assert_line 'ciTestBouncer,,validated,,,,api-key' + rune -0 cscli bouncers list -o human + assert_output --regexp 'ciTestBouncer.*api-key.*' + + # the first connection sets last_pull and ip address + rune -0 lapi-get '/v1/decisions' + rune -0 cscli bouncers list -o json + rune -0 jq -r '.[] | .ip_address' <(output) + assert_output 127.0.0.1 + rune -0 cscli bouncers list -o json + rune -0 jq -r '.[] | .last_pull' <(output) + refute_output null } @test "we can create a bouncer with a known key" { @@ -83,4 +106,3 @@ teardown() { rune -0 cscli bouncers prune assert_output 'No bouncers to prune.' } - From 4521a98ecc34cf4beeee653f1eb5914f1879f42d Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Mon, 17 Jun 2024 10:39:50 +0200 Subject: [PATCH 192/581] db: don't set machine heartbeat until first connection (#3019) * db: don't set machine heartbeat until first connection * cscli machines prune: if hearbeat is not set, look at creation date * lint --- cmd/crowdsec-cli/machines.go | 2 +- pkg/database/ent/machine/machine.go | 2 -- pkg/database/ent/machine_create.go | 4 --- pkg/database/ent/runtime.go | 4 --- pkg/database/ent/schema/machine.go | 2 +- pkg/database/machines.go | 45 ++++++++++++++++++++--------- test/bats/30_machines.bats | 7 +++++ 7 files changed, 41 insertions(+), 25 deletions(-) diff --git a/cmd/crowdsec-cli/machines.go b/cmd/crowdsec-cli/machines.go index 7beaa5c7fdd..20933dc28e5 100644 --- a/cmd/crowdsec-cli/machines.go +++ b/cmd/crowdsec-cli/machines.go @@ -414,7 +414,7 @@ func (cli *cliMachines) prune(duration time.Duration, notValidOnly bool, force b } if !notValidOnly { - if pending, err := cli.db.QueryLastValidatedHeartbeatLT(time.Now().UTC().Add(-duration)); err == nil { + if pending, err := cli.db.QueryMachinesInactiveSince(time.Now().UTC().Add(-duration)); err == nil { machines = append(machines, pending...) } } diff --git a/pkg/database/ent/machine/machine.go b/pkg/database/ent/machine/machine.go index 46ea6deb03d..d7dece9f8ef 100644 --- a/pkg/database/ent/machine/machine.go +++ b/pkg/database/ent/machine/machine.go @@ -87,8 +87,6 @@ var ( UpdateDefaultUpdatedAt func() time.Time // DefaultLastPush holds the default value on creation for the "last_push" field. DefaultLastPush func() time.Time - // DefaultLastHeartbeat holds the default value on creation for the "last_heartbeat" field. - DefaultLastHeartbeat func() time.Time // ScenariosValidator is a validator for the "scenarios" field. It is called by the builders before save. ScenariosValidator func(string) error // DefaultIsValidated holds the default value on creation for the "isValidated" field. diff --git a/pkg/database/ent/machine_create.go b/pkg/database/ent/machine_create.go index 8d4bfb74b2a..2e4cf9f1500 100644 --- a/pkg/database/ent/machine_create.go +++ b/pkg/database/ent/machine_create.go @@ -227,10 +227,6 @@ func (mc *MachineCreate) defaults() { v := machine.DefaultLastPush() mc.mutation.SetLastPush(v) } - if _, ok := mc.mutation.LastHeartbeat(); !ok { - v := machine.DefaultLastHeartbeat() - mc.mutation.SetLastHeartbeat(v) - } if _, ok := mc.mutation.IsValidated(); !ok { v := machine.DefaultIsValidated mc.mutation.SetIsValidated(v) diff --git a/pkg/database/ent/runtime.go b/pkg/database/ent/runtime.go index b4da6dfb9db..8d50d916029 100644 --- a/pkg/database/ent/runtime.go +++ b/pkg/database/ent/runtime.go @@ -142,10 +142,6 @@ func init() { machineDescLastPush := machineFields[2].Descriptor() // machine.DefaultLastPush holds the default value on creation for the last_push field. machine.DefaultLastPush = machineDescLastPush.Default.(func() time.Time) - // machineDescLastHeartbeat is the schema descriptor for last_heartbeat field. - machineDescLastHeartbeat := machineFields[3].Descriptor() - // machine.DefaultLastHeartbeat holds the default value on creation for the last_heartbeat field. - machine.DefaultLastHeartbeat = machineDescLastHeartbeat.Default.(func() time.Time) // machineDescScenarios is the schema descriptor for scenarios field. machineDescScenarios := machineFields[7].Descriptor() // machine.ScenariosValidator is a validator for the "scenarios" field. It is called by the builders before save. diff --git a/pkg/database/ent/schema/machine.go b/pkg/database/ent/schema/machine.go index 997a2041453..7b4d97ed35c 100644 --- a/pkg/database/ent/schema/machine.go +++ b/pkg/database/ent/schema/machine.go @@ -4,6 +4,7 @@ import ( "entgo.io/ent" "entgo.io/ent/schema/edge" "entgo.io/ent/schema/field" + "github.com/crowdsecurity/crowdsec/pkg/types" ) @@ -25,7 +26,6 @@ func (Machine) Fields() []ent.Field { Default(types.UtcNow). Nillable().Optional(), field.Time("last_heartbeat"). - Default(types.UtcNow). Nillable().Optional(), field.String("machineId"). Unique(). diff --git a/pkg/database/machines.go b/pkg/database/machines.go index 7a64c1d4d6e..18fd32fdd84 100644 --- a/pkg/database/machines.go +++ b/pkg/database/machines.go @@ -13,8 +13,10 @@ import ( "github.com/crowdsecurity/crowdsec/pkg/types" ) -const CapiMachineID = types.CAPIOrigin -const CapiListsMachineID = types.ListOrigin +const ( + CapiMachineID = types.CAPIOrigin + CapiListsMachineID = types.ListOrigin +) func (c *Client) CreateMachine(machineID *string, password *strfmt.Password, ipAddress string, isValidated bool, force bool, authType string) (*ent.Machine, error) { hashPassword, err := bcrypt.GenerateFromPassword([]byte(*password), bcrypt.DefaultCost) @@ -30,6 +32,7 @@ func (c *Client) CreateMachine(machineID *string, password *strfmt.Password, ipA if err != nil { return nil, errors.Wrapf(QueryFail, "machine '%s': %s", *machineID, err) } + if len(machineExist) > 0 { if force { _, err := c.Ent.Machine.Update().Where(machine.MachineIdEQ(*machineID)).SetPassword(string(hashPassword)).Save(c.CTX) @@ -37,12 +40,15 @@ func (c *Client) CreateMachine(machineID *string, password *strfmt.Password, ipA c.Log.Warningf("CreateMachine : %s", err) return nil, errors.Wrapf(UpdateFail, "machine '%s'", *machineID) } + machine, err := c.QueryMachineByID(*machineID) if err != nil { return nil, errors.Wrapf(QueryFail, "machine '%s': %s", *machineID, err) } + return machine, nil } + return nil, errors.Wrapf(UserExists, "user '%s'", *machineID) } @@ -54,7 +60,6 @@ func (c *Client) CreateMachine(machineID *string, password *strfmt.Password, ipA SetIsValidated(isValidated). SetAuthType(authType). Save(c.CTX) - if err != nil { c.Log.Warningf("CreateMachine : %s", err) return nil, errors.Wrapf(InsertFail, "creating machine '%s'", *machineID) @@ -72,6 +77,7 @@ func (c *Client) QueryMachineByID(machineID string) (*ent.Machine, error) { c.Log.Warningf("QueryMachineByID : %s", err) return &ent.Machine{}, errors.Wrapf(UserNotExists, "user '%s'", machineID) } + return machine, nil } @@ -80,6 +86,7 @@ func (c *Client) ListMachines() ([]*ent.Machine, error) { if err != nil { return nil, errors.Wrapf(QueryFail, "listing machines: %s", err) } + return machines, nil } @@ -88,21 +95,21 @@ func (c *Client) ValidateMachine(machineID string) error { if err != nil { return errors.Wrapf(UpdateFail, "validating machine: %s", err) } + if rets == 0 { - return fmt.Errorf("machine not found") + return errors.New("machine not found") } + return nil } func (c *Client) QueryPendingMachine() ([]*ent.Machine, error) { - var machines []*ent.Machine - var err error - - machines, err = c.Ent.Machine.Query().Where(machine.IsValidatedEQ(false)).All(c.CTX) + machines, err := c.Ent.Machine.Query().Where(machine.IsValidatedEQ(false)).All(c.CTX) if err != nil { c.Log.Warningf("QueryPendingMachine : %s", err) return nil, errors.Wrapf(QueryFail, "querying pending machines: %s", err) } + return machines, nil } @@ -116,7 +123,7 @@ func (c *Client) DeleteWatcher(name string) error { } if nbDeleted == 0 { - return fmt.Errorf("machine doesn't exist") + return errors.New("machine doesn't exist") } return nil @@ -127,10 +134,12 @@ func (c *Client) BulkDeleteWatchers(machines []*ent.Machine) (int, error) { for i, b := range machines { ids[i] = b.ID } + nbDeleted, err := c.Ent.Machine.Delete().Where(machine.IDIn(ids...)).Exec(c.CTX) if err != nil { return nbDeleted, err } + return nbDeleted, nil } @@ -139,6 +148,7 @@ func (c *Client) UpdateMachineLastHeartBeat(machineID string) error { if err != nil { return errors.Wrapf(UpdateFail, "updating machine last_heartbeat: %s", err) } + return nil } @@ -150,6 +160,7 @@ func (c *Client) UpdateMachineScenarios(scenarios string, ID int) error { if err != nil { return fmt.Errorf("unable to update machine in database: %s", err) } + return nil } @@ -160,6 +171,7 @@ func (c *Client) UpdateMachineIP(ipAddr string, ID int) error { if err != nil { return fmt.Errorf("unable to update machine IP in database: %s", err) } + return nil } @@ -170,6 +182,7 @@ func (c *Client) UpdateMachineVersion(ipAddr string, ID int) error { if err != nil { return fmt.Errorf("unable to update machine version in database: %s", err) } + return nil } @@ -178,17 +191,23 @@ func (c *Client) IsMachineRegistered(machineID string) (bool, error) { if err != nil { return false, err } + if len(exist) == 1 { return true, nil } + if len(exist) > 1 { - return false, fmt.Errorf("more than one item with the same machineID in database") + return false, errors.New("more than one item with the same machineID in database") } return false, nil - } -func (c *Client) QueryLastValidatedHeartbeatLT(t time.Time) ([]*ent.Machine, error) { - return c.Ent.Machine.Query().Where(machine.LastHeartbeatLT(t), machine.IsValidatedEQ(true)).All(c.CTX) +func (c *Client) QueryMachinesInactiveSince(t time.Time) ([]*ent.Machine, error) { + return c.Ent.Machine.Query().Where( + machine.Or( + machine.And(machine.LastHeartbeatLT(t), machine.IsValidatedEQ(true)), + machine.And(machine.LastHeartbeatIsNil(), machine.CreatedAtLT(t)), + ), + ).All(c.CTX) } diff --git a/test/bats/30_machines.bats b/test/bats/30_machines.bats index 415e5f8693f..1d65151b6c8 100644 --- a/test/bats/30_machines.bats +++ b/test/bats/30_machines.bats @@ -62,6 +62,13 @@ teardown() { assert_output 1 } +@test "heartbeat is initially null" { + rune -0 cscli machines add foo --auto --file /dev/null + rune -0 cscli machines list -o json + rune -0 yq '.[] | select(.machineId == "foo") | .last_heartbeat' <(output) + assert_output null +} + @test "register, validate and then remove a machine" { rune -0 cscli lapi register --machine CiTestMachineRegister -f /dev/null -o human assert_stderr --partial "Successfully registered to Local API (LAPI)" From 659774fd3d17eea9a872aafbe553477da036b547 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Thu, 20 Jun 2024 10:38:23 +0200 Subject: [PATCH 193/581] refactor: prefer logrus.WithField over WithFields with a single param (#3087) --- cmd/crowdsec-cli/config_backup.go | 8 ++----- pkg/acquisition/acquisition.go | 4 +--- pkg/acquisition/modules/appsec/appsec.go | 4 +--- pkg/acquisition/modules/appsec/appsec_test.go | 2 +- .../modules/cloudwatch/cloudwatch.go | 2 +- pkg/acquisition/modules/docker/docker.go | 2 +- pkg/acquisition/modules/docker/docker_test.go | 24 +++++-------------- pkg/acquisition/modules/file/file_test.go | 20 ++++------------ .../modules/journalctl/journalctl_test.go | 24 +++++-------------- pkg/acquisition/modules/kafka/kafka_test.go | 8 ++----- pkg/acquisition/modules/kinesis/kinesis.go | 8 +++---- .../modules/kinesis/kinesis_test.go | 20 ++++------------ .../modules/kubernetesaudit/k8s_audit_test.go | 8 ++----- pkg/acquisition/modules/loki/loki_test.go | 12 +++------- pkg/acquisition/modules/syslog/syslog_test.go | 8 ++----- .../modules/wineventlog/wineventlog_test.go | 12 +++------- pkg/apiserver/middlewares/v1/api_key.go | 12 +++------- pkg/cache/cache.go | 4 +--- pkg/cwhub/dataset.go | 2 +- pkg/cwhub/itemupgrade.go | 2 +- pkg/cwhub/remote.go | 2 +- pkg/exprhelpers/crowdsec_cti.go | 5 +--- pkg/parser/enrich_date_test.go | 4 +--- pkg/parser/node.go | 8 ++----- pkg/types/queue.go | 2 +- 25 files changed, 57 insertions(+), 150 deletions(-) diff --git a/cmd/crowdsec-cli/config_backup.go b/cmd/crowdsec-cli/config_backup.go index d1e4a393555..e8ac6213530 100644 --- a/cmd/crowdsec-cli/config_backup.go +++ b/cmd/crowdsec-cli/config_backup.go @@ -21,9 +21,7 @@ func (cli *cliConfig) backupHub(dirPath string) error { } for _, itemType := range cwhub.ItemTypes { - clog := log.WithFields(log.Fields{ - "type": itemType, - }) + clog := log.WithField("type", itemType) itemMap := hub.GetItemMap(itemType) if itemMap == nil { @@ -39,9 +37,7 @@ func (cli *cliConfig) backupHub(dirPath string) error { upstreamParsers := []string{} for k, v := range itemMap { - clog = clog.WithFields(log.Fields{ - "file": v.Name, - }) + clog = clog.WithField("file", v.Name) if !v.State.Installed { // only backup installed ones clog.Debugf("[%s]: not installed", k) continue diff --git a/pkg/acquisition/acquisition.go b/pkg/acquisition/acquisition.go index ab7d954cac1..069bf67d852 100644 --- a/pkg/acquisition/acquisition.go +++ b/pkg/acquisition/acquisition.go @@ -161,9 +161,7 @@ func LoadAcquisitionFromDSN(dsn string, labels map[string]string, transformExpr if err := types.ConfigureLogger(clog); err != nil { return nil, fmt.Errorf("while configuring datasource logger: %w", err) } - subLogger := clog.WithFields(log.Fields{ - "type": dsn, - }) + subLogger := clog.WithField("type", dsn) uniqueId := uuid.NewString() if transformExpr != "" { vm, err := expr.Compile(transformExpr, exprhelpers.GetExprOptions(map[string]interface{}{"evt": &types.Event{}})...) diff --git a/pkg/acquisition/modules/appsec/appsec.go b/pkg/acquisition/modules/appsec/appsec.go index f97905406ce..07ca56dfb70 100644 --- a/pkg/acquisition/modules/appsec/appsec.go +++ b/pkg/acquisition/modules/appsec/appsec.go @@ -210,9 +210,7 @@ func (w *AppsecSource) Configure(yamlConfig []byte, logger *log.Entry, MetricsLe runner := AppsecRunner{ inChan: w.InChan, UUID: appsecRunnerUUID, - logger: w.logger.WithFields(log.Fields{ - "runner_uuid": appsecRunnerUUID, - }), + logger: w.logger.WithField("runner_uuid", appsecRunnerUUID), AppsecRuntime: &wrt, Labels: w.config.Labels, } diff --git a/pkg/acquisition/modules/appsec/appsec_test.go b/pkg/acquisition/modules/appsec/appsec_test.go index 5fe4cfe236c..c769ea3d0fd 100644 --- a/pkg/acquisition/modules/appsec/appsec_test.go +++ b/pkg/acquisition/modules/appsec/appsec_test.go @@ -41,7 +41,7 @@ func loadAppSecEngine(test appsecRuleTest, t *testing.T) { InChan := make(chan appsec.ParsedRequest) OutChan := make(chan types.Event) - logger := log.WithFields(log.Fields{"test": test.name}) + logger := log.WithField("test", test.name) //build rules for ridx, rule := range test.inband_rules { diff --git a/pkg/acquisition/modules/cloudwatch/cloudwatch.go b/pkg/acquisition/modules/cloudwatch/cloudwatch.go index 1ac1465d390..1859bbf0f84 100644 --- a/pkg/acquisition/modules/cloudwatch/cloudwatch.go +++ b/pkg/acquisition/modules/cloudwatch/cloudwatch.go @@ -403,7 +403,7 @@ func (cw *CloudwatchSource) LogStreamManager(in chan LogStreamTailConfig, outCha openedStreams.With(prometheus.Labels{"group": newStream.GroupName}).Inc() } newStream.t = tomb.Tomb{} - newStream.logger = cw.logger.WithFields(log.Fields{"stream": newStream.StreamName}) + newStream.logger = cw.logger.WithField("stream", newStream.StreamName) cw.logger.Debugf("starting tail of stream %s", newStream.StreamName) newStream.t.Go(func() error { return cw.TailLogStream(&newStream, outChan) diff --git a/pkg/acquisition/modules/docker/docker.go b/pkg/acquisition/modules/docker/docker.go index 3a694b99d76..857d7e7af78 100644 --- a/pkg/acquisition/modules/docker/docker.go +++ b/pkg/acquisition/modules/docker/docker.go @@ -609,7 +609,7 @@ func (d *DockerSource) DockerManager(in chan *ContainerConfig, deleteChan chan * case newContainer := <-in: if _, ok := d.runningContainerState[newContainer.ID]; !ok { newContainer.t = &tomb.Tomb{} - newContainer.logger = d.logger.WithFields(log.Fields{"container_name": newContainer.Name}) + newContainer.logger = d.logger.WithField("container_name", newContainer.Name) newContainer.t.Go(func() error { return d.TailDocker(newContainer, outChan, deleteChan) }) diff --git a/pkg/acquisition/modules/docker/docker_test.go b/pkg/acquisition/modules/docker/docker_test.go index bcf0966a2d1..e1cc4db96ad 100644 --- a/pkg/acquisition/modules/docker/docker_test.go +++ b/pkg/acquisition/modules/docker/docker_test.go @@ -55,9 +55,7 @@ container_name: }, } - subLogger := log.WithFields(log.Fields{ - "type": "docker", - }) + subLogger := log.WithField("type", "docker") for _, test := range tests { f := DockerSource{} @@ -108,9 +106,7 @@ func TestConfigureDSN(t *testing.T) { expectedErr: "", }, } - subLogger := log.WithFields(log.Fields{ - "type": "docker", - }) + subLogger := log.WithField("type", "docker") for _, test := range tests { f := DockerSource{} @@ -169,13 +165,9 @@ container_name_regexp: if ts.expectedOutput != "" { logger.SetLevel(ts.logLevel) - subLogger = logger.WithFields(log.Fields{ - "type": "docker", - }) + subLogger = logger.WithField("type", "docker") } else { - subLogger = log.WithFields(log.Fields{ - "type": "docker", - }) + subLogger = log.WithField("type", "docker") } readLogs = false @@ -310,14 +302,10 @@ func TestOneShot(t *testing.T) { if ts.expectedOutput != "" { logger.SetLevel(ts.logLevel) - subLogger = logger.WithFields(log.Fields{ - "type": "docker", - }) + subLogger = logger.WithField("type", "docker") } else { log.SetLevel(ts.logLevel) - subLogger = log.WithFields(log.Fields{ - "type": "docker", - }) + subLogger = log.WithField("type", "docker") } readLogs = false diff --git a/pkg/acquisition/modules/file/file_test.go b/pkg/acquisition/modules/file/file_test.go index 0873b837a3f..688812f2fd3 100644 --- a/pkg/acquisition/modules/file/file_test.go +++ b/pkg/acquisition/modules/file/file_test.go @@ -49,9 +49,7 @@ exclude_regexps: ["as[a-$d"]`, }, } - subLogger := log.WithFields(log.Fields{ - "type": "file", - }) + subLogger := log.WithField("type", "file") for _, tc := range tests { tc := tc @@ -91,9 +89,7 @@ func TestConfigureDSN(t *testing.T) { }, } - subLogger := log.WithFields(log.Fields{ - "type": "file", - }) + subLogger := log.WithField("type", "file") for _, tc := range tests { tc := tc @@ -211,9 +207,7 @@ filename: test_files/test_delete.log`, logger, hook := test.NewNullLogger() logger.SetLevel(tc.logLevel) - subLogger := logger.WithFields(log.Fields{ - "type": "file", - }) + subLogger := logger.WithField("type", "file") tomb := tomb.Tomb{} out := make(chan types.Event, 100) @@ -372,9 +366,7 @@ force_inotify: true`, testPattern), logger, hook := test.NewNullLogger() logger.SetLevel(tc.logLevel) - subLogger := logger.WithFields(log.Fields{ - "type": "file", - }) + subLogger := logger.WithField("type", "file") tomb := tomb.Tomb{} out := make(chan types.Event) @@ -451,9 +443,7 @@ func TestExclusion(t *testing.T) { exclude_regexps: ["\\.gz$"]` logger, hook := test.NewNullLogger() // logger.SetLevel(ts.logLevel) - subLogger := logger.WithFields(log.Fields{ - "type": "file", - }) + subLogger := logger.WithField("type", "file") f := fileacquisition.FileSource{} if err := f.Configure([]byte(config), subLogger, configuration.METRICS_NONE); err != nil { diff --git a/pkg/acquisition/modules/journalctl/journalctl_test.go b/pkg/acquisition/modules/journalctl/journalctl_test.go index 9d1f1bb7e0e..f381a227534 100644 --- a/pkg/acquisition/modules/journalctl/journalctl_test.go +++ b/pkg/acquisition/modules/journalctl/journalctl_test.go @@ -47,9 +47,7 @@ journalctl_filter: }, } - subLogger := log.WithFields(log.Fields{ - "type": "journalctl", - }) + subLogger := log.WithField("type", "journalctl") for _, test := range tests { f := JournalCtlSource{} @@ -97,9 +95,7 @@ func TestConfigureDSN(t *testing.T) { }, } - subLogger := log.WithFields(log.Fields{ - "type": "journalctl", - }) + subLogger := log.WithField("type", "journalctl") for _, test := range tests { f := JournalCtlSource{} @@ -153,13 +149,9 @@ journalctl_filter: if ts.expectedOutput != "" { logger, hook = test.NewNullLogger() logger.SetLevel(ts.logLevel) - subLogger = logger.WithFields(log.Fields{ - "type": "journalctl", - }) + subLogger = logger.WithField("type", "journalctl") } else { - subLogger = log.WithFields(log.Fields{ - "type": "journalctl", - }) + subLogger = log.WithField("type", "journalctl") } tomb := tomb.Tomb{} @@ -227,13 +219,9 @@ journalctl_filter: if ts.expectedOutput != "" { logger, hook = test.NewNullLogger() logger.SetLevel(ts.logLevel) - subLogger = logger.WithFields(log.Fields{ - "type": "journalctl", - }) + subLogger = logger.WithField("type", "journalctl") } else { - subLogger = log.WithFields(log.Fields{ - "type": "journalctl", - }) + subLogger = log.WithField("type", "journalctl") } tomb := tomb.Tomb{} diff --git a/pkg/acquisition/modules/kafka/kafka_test.go b/pkg/acquisition/modules/kafka/kafka_test.go index 54715a81251..245d3ed58c8 100644 --- a/pkg/acquisition/modules/kafka/kafka_test.go +++ b/pkg/acquisition/modules/kafka/kafka_test.go @@ -149,9 +149,7 @@ func TestStreamingAcquisition(t *testing.T) { }, } - subLogger := log.WithFields(log.Fields{ - "type": "kafka", - }) + subLogger := log.WithField("type", "kafka") createTopic("crowdsecplaintext", "localhost:9092") @@ -222,9 +220,7 @@ func TestStreamingAcquisitionWithSSL(t *testing.T) { }, } - subLogger := log.WithFields(log.Fields{ - "type": "kafka", - }) + subLogger := log.WithField("type", "kafka") createTopic("crowdsecssl", "localhost:9092") diff --git a/pkg/acquisition/modules/kinesis/kinesis.go b/pkg/acquisition/modules/kinesis/kinesis.go index 5d3cf8f80a0..485cefcf01d 100644 --- a/pkg/acquisition/modules/kinesis/kinesis.go +++ b/pkg/acquisition/modules/kinesis/kinesis.go @@ -334,7 +334,7 @@ func (k *KinesisSource) ParseAndPushRecords(records []*kinesis.Record, out chan } func (k *KinesisSource) ReadFromSubscription(reader kinesis.SubscribeToShardEventStreamReader, out chan types.Event, shardId string, streamName string) error { - logger := k.logger.WithFields(log.Fields{"shard_id": shardId}) + logger := k.logger.WithField("shard_id", shardId) //ghetto sync, kinesis allows to subscribe to a closed shard, which will make the goroutine exit immediately //and we won't be able to start a new one if this is the first one started by the tomb //TODO: look into parent shards to see if a shard is closed before starting to read it ? @@ -397,7 +397,7 @@ func (k *KinesisSource) EnhancedRead(out chan types.Event, t *tomb.Tomb) error { return fmt.Errorf("resource part of stream ARN %s does not start with stream/", k.Config.StreamARN) } - k.logger = k.logger.WithFields(log.Fields{"stream": parsedARN.Resource[7:]}) + k.logger = k.logger.WithField("stream", parsedARN.Resource[7:]) k.logger.Info("starting kinesis acquisition with enhanced fan-out") err = k.DeregisterConsumer() if err != nil { @@ -439,7 +439,7 @@ func (k *KinesisSource) EnhancedRead(out chan types.Event, t *tomb.Tomb) error { } func (k *KinesisSource) ReadFromShard(out chan types.Event, shardId string) error { - logger := k.logger.WithFields(log.Fields{"shard": shardId}) + logger := k.logger.WithField("shard", shardId) logger.Debugf("Starting to read shard") sharIt, err := k.kClient.GetShardIterator(&kinesis.GetShardIteratorInput{ShardId: aws.String(shardId), StreamName: &k.Config.StreamName, @@ -485,7 +485,7 @@ func (k *KinesisSource) ReadFromShard(out chan types.Event, shardId string) erro } func (k *KinesisSource) ReadFromStream(out chan types.Event, t *tomb.Tomb) error { - k.logger = k.logger.WithFields(log.Fields{"stream": k.Config.StreamName}) + k.logger = k.logger.WithField("stream", k.Config.StreamName) k.logger.Info("starting kinesis acquisition from shards") for { shards, err := k.kClient.ListShards(&kinesis.ListShardsInput{ diff --git a/pkg/acquisition/modules/kinesis/kinesis_test.go b/pkg/acquisition/modules/kinesis/kinesis_test.go index a4e4f2f7378..d1d398c129e 100644 --- a/pkg/acquisition/modules/kinesis/kinesis_test.go +++ b/pkg/acquisition/modules/kinesis/kinesis_test.go @@ -139,9 +139,7 @@ stream_arn: arn:aws:kinesis:eu-west-1:123456789012:stream/my-stream`, }, } - subLogger := log.WithFields(log.Fields{ - "type": "kinesis", - }) + subLogger := log.WithField("type", "kinesis") for _, test := range tests { f := KinesisSource{} err := f.Configure([]byte(test.config), subLogger, configuration.METRICS_NONE) @@ -171,9 +169,7 @@ stream_name: stream-1-shard`, for _, test := range tests { f := KinesisSource{} config := fmt.Sprintf(test.config, endpoint) - err := f.Configure([]byte(config), log.WithFields(log.Fields{ - "type": "kinesis", - }), configuration.METRICS_NONE) + err := f.Configure([]byte(config), log.WithField("type", "kinesis"), configuration.METRICS_NONE) if err != nil { t.Fatalf("Error configuring source: %s", err) } @@ -217,9 +213,7 @@ stream_name: stream-2-shards`, for _, test := range tests { f := KinesisSource{} config := fmt.Sprintf(test.config, endpoint) - err := f.Configure([]byte(config), log.WithFields(log.Fields{ - "type": "kinesis", - }), configuration.METRICS_NONE) + err := f.Configure([]byte(config), log.WithField("type", "kinesis"), configuration.METRICS_NONE) if err != nil { t.Fatalf("Error configuring source: %s", err) } @@ -266,9 +260,7 @@ from_subscription: true`, for _, test := range tests { f := KinesisSource{} config := fmt.Sprintf(test.config, endpoint) - err := f.Configure([]byte(config), log.WithFields(log.Fields{ - "type": "kinesis", - }), configuration.METRICS_NONE) + err := f.Configure([]byte(config), log.WithField("type", "kinesis"), configuration.METRICS_NONE) if err != nil { t.Fatalf("Error configuring source: %s", err) } @@ -312,9 +304,7 @@ use_enhanced_fanout: true`, for _, test := range tests { f := KinesisSource{} config := fmt.Sprintf(test.config, endpoint) - err := f.Configure([]byte(config), log.WithFields(log.Fields{ - "type": "kinesis", - })) + err := f.Configure([]byte(config), log.WithField("type", "kinesis")) if err != nil { t.Fatalf("Error configuring source: %s", err) } diff --git a/pkg/acquisition/modules/kubernetesaudit/k8s_audit_test.go b/pkg/acquisition/modules/kubernetesaudit/k8s_audit_test.go index 331822ecf5b..b6e6f6b03e9 100644 --- a/pkg/acquisition/modules/kubernetesaudit/k8s_audit_test.go +++ b/pkg/acquisition/modules/kubernetesaudit/k8s_audit_test.go @@ -67,9 +67,7 @@ webhook_path: /k8s-audit`, }, } - subLogger := log.WithFields(log.Fields{ - "type": "k8s-audit", - }) + subLogger := log.WithField("type", "k8s-audit") for _, test := range tests { t.Run(test.name, func(t *testing.T) { @@ -230,9 +228,7 @@ webhook_path: /k8s-audit`, }, } - subLogger := log.WithFields(log.Fields{ - "type": "k8s-audit", - }) + subLogger := log.WithField("type", "k8s-audit") for _, test := range tests { t.Run(test.name, func(t *testing.T) { diff --git a/pkg/acquisition/modules/loki/loki_test.go b/pkg/acquisition/modules/loki/loki_test.go index 83742546959..051a9b93ed5 100644 --- a/pkg/acquisition/modules/loki/loki_test.go +++ b/pkg/acquisition/modules/loki/loki_test.go @@ -124,9 +124,7 @@ query: > testName: "Invalid DelayFor", }, } - subLogger := log.WithFields(log.Fields{ - "type": "loki", - }) + subLogger := log.WithField("type", "loki") for _, test := range tests { t.Run(test.testName, func(t *testing.T) { @@ -343,9 +341,7 @@ since: 1h for _, ts := range tests { logger := log.New() - subLogger := logger.WithFields(log.Fields{ - "type": "loki", - }) + subLogger := logger.WithField("type", "loki") lokiSource := loki.LokiSource{} err := lokiSource.Configure([]byte(ts.config), subLogger, configuration.METRICS_NONE) @@ -509,9 +505,7 @@ query: > {server="demo"} ` logger := log.New() - subLogger := logger.WithFields(log.Fields{ - "type": "loki", - }) + subLogger := logger.WithField("type", "loki") title := time.Now().String() lokiSource := loki.LokiSource{} diff --git a/pkg/acquisition/modules/syslog/syslog_test.go b/pkg/acquisition/modules/syslog/syslog_test.go index ba14c7db053..8096740f5e4 100644 --- a/pkg/acquisition/modules/syslog/syslog_test.go +++ b/pkg/acquisition/modules/syslog/syslog_test.go @@ -52,9 +52,7 @@ listen_addr: 10.0.0`, }, } - subLogger := log.WithFields(log.Fields{ - "type": "syslog", - }) + subLogger := log.WithField("type", "syslog") for _, test := range tests { s := SyslogSource{} err := s.Configure([]byte(test.config), subLogger, configuration.METRICS_NONE) @@ -134,9 +132,7 @@ listen_addr: 127.0.0.1`, for _, ts := range tests { ts := ts t.Run(ts.name, func(t *testing.T) { - subLogger := log.WithFields(log.Fields{ - "type": "syslog", - }) + subLogger := log.WithField("type", "syslog") s := SyslogSource{} err := s.Configure([]byte(ts.config), subLogger, configuration.METRICS_NONE) if err != nil { diff --git a/pkg/acquisition/modules/wineventlog/wineventlog_test.go b/pkg/acquisition/modules/wineventlog/wineventlog_test.go index c937ceba825..2ea0e365be5 100644 --- a/pkg/acquisition/modules/wineventlog/wineventlog_test.go +++ b/pkg/acquisition/modules/wineventlog/wineventlog_test.go @@ -54,9 +54,7 @@ xpath_query: test`, }, } - subLogger := log.WithFields(log.Fields{ - "type": "windowseventlog", - }) + subLogger := log.WithField("type", "windowseventlog") for _, test := range tests { f := WinEventLogSource{} err := f.Configure([]byte(test.config), subLogger, configuration.METRICS_NONE) @@ -113,9 +111,7 @@ event_level: bla`, expectedErr: "invalid log level", }, } - subLogger := log.WithFields(log.Fields{ - "type": "windowseventlog", - }) + subLogger := log.WithField("type", "windowseventlog") for _, test := range tests { f := WinEventLogSource{} f.Configure([]byte(test.config), subLogger, configuration.METRICS_NONE) @@ -181,9 +177,7 @@ event_ids: expectedLines: nil, }, } - subLogger := log.WithFields(log.Fields{ - "type": "windowseventlog", - }) + subLogger := log.WithField("type", "windowseventlog") evthandler, err := eventlog.Open("Application") diff --git a/pkg/apiserver/middlewares/v1/api_key.go b/pkg/apiserver/middlewares/v1/api_key.go index 4561b8f7789..314a4da1046 100644 --- a/pkg/apiserver/middlewares/v1/api_key.go +++ b/pkg/apiserver/middlewares/v1/api_key.go @@ -75,9 +75,7 @@ func (a *APIKey) authTLS(c *gin.Context, logger *log.Entry) *ent.Bouncer { return nil } - logger = logger.WithFields(log.Fields{ - "cn": extractedCN, - }) + logger = logger.WithField("cn", extractedCN) bouncerName := fmt.Sprintf("%s@%s", extractedCN, c.ClientIP()) bouncer, err := a.DbClient.SelectBouncerByName(bouncerName) @@ -141,9 +139,7 @@ func (a *APIKey) MiddlewareFunc() gin.HandlerFunc { clientIP := c.ClientIP() - logger := log.WithFields(log.Fields{ - "ip": clientIP, - }) + logger := log.WithField("ip", clientIP) if c.Request.TLS != nil && len(c.Request.TLS.PeerCertificates) > 0 { bouncer = a.authTLS(c, logger) @@ -158,9 +154,7 @@ func (a *APIKey) MiddlewareFunc() gin.HandlerFunc { return } - logger = logger.WithFields(log.Fields{ - "name": bouncer.Name, - }) + logger = logger.WithField("name", bouncer.Name) if bouncer.IPAddress == "" { if err := a.DbClient.UpdateBouncerIP(clientIP, bouncer.ID); err != nil { diff --git a/pkg/cache/cache.go b/pkg/cache/cache.go index c2153ee2342..8a696caf1f4 100644 --- a/pkg/cache/cache.go +++ b/pkg/cache/cache.go @@ -64,9 +64,7 @@ func CacheInit(cfg CacheCfg) error { } clog.SetLevel(*cfg.LogLevel) - cfg.Logger = clog.WithFields(log.Fields{ - "cache": cfg.Name, - }) + cfg.Logger = clog.WithField("cache", cfg.Name) tmpCache := gcache.New(cfg.Size) diff --git a/pkg/cwhub/dataset.go b/pkg/cwhub/dataset.go index 6d4f35c285c..90bc9e057f9 100644 --- a/pkg/cwhub/dataset.go +++ b/pkg/cwhub/dataset.go @@ -46,7 +46,7 @@ func downloadDataSet(ctx context.Context, dataFolder string, force bool, reader WithHTTPClient(hubClient). ToFile(destPath). CompareContent(). - WithLogger(logrus.WithFields(logrus.Fields{"url": dataS.SourceURL})) + WithLogger(logrus.WithField("url", dataS.SourceURL)) if !force { d = d.WithLastModified(). diff --git a/pkg/cwhub/itemupgrade.go b/pkg/cwhub/itemupgrade.go index 4dad226fd78..d74544ddaed 100644 --- a/pkg/cwhub/itemupgrade.go +++ b/pkg/cwhub/itemupgrade.go @@ -125,7 +125,7 @@ func (i *Item) FetchContentTo(ctx context.Context, destPath string) (bool, strin WithHTTPClient(hubClient). ToFile(destPath). WithMakeDirs(true). - WithLogger(logrus.WithFields(logrus.Fields{"url": url})). + WithLogger(logrus.WithField("url", url)). CompareContent(). VerifyHash("sha256", wantHash) diff --git a/pkg/cwhub/remote.go b/pkg/cwhub/remote.go index abb2ddae2ad..0678a7488f8 100644 --- a/pkg/cwhub/remote.go +++ b/pkg/cwhub/remote.go @@ -46,7 +46,7 @@ func (r *RemoteHubCfg) fetchIndex(ctx context.Context, destPath string) (bool, e WithHTTPClient(hubClient). ToFile(destPath). CompareContent(). - WithLogger(logrus.WithFields(logrus.Fields{"url": url})). + WithLogger(logrus.WithField("url", url)). Download(ctx, url) if err != nil { return false, err diff --git a/pkg/exprhelpers/crowdsec_cti.go b/pkg/exprhelpers/crowdsec_cti.go index a640ea79f48..4e54b07bfc2 100644 --- a/pkg/exprhelpers/crowdsec_cti.go +++ b/pkg/exprhelpers/crowdsec_cti.go @@ -45,10 +45,7 @@ func InitCrowdsecCTI(Key *string, TTL *time.Duration, Size *int, LogLevel *log.L if LogLevel != nil { clog.SetLevel(*LogLevel) } - customLog := log.Fields{ - "type": "crowdsec-cti", - } - subLogger := clog.WithFields(customLog) + subLogger := clog.WithField("type", "crowdsec-cti") CrowdsecCTIInitCache(*Size, *TTL) ctiClient = cticlient.NewCrowdsecCTIClient(cticlient.WithAPIKey(CTIApiKey), cticlient.WithLogger(subLogger)) CTIApiEnabled = true diff --git a/pkg/parser/enrich_date_test.go b/pkg/parser/enrich_date_test.go index 085ef5ca342..434667cbeaf 100644 --- a/pkg/parser/enrich_date_test.go +++ b/pkg/parser/enrich_date_test.go @@ -42,9 +42,7 @@ func TestDateParse(t *testing.T) { }, } - logger := log.WithFields(log.Fields{ - "test": "test", - }) + logger := log.WithField("test", "test") for _, tt := range tests { tt := tt t.Run(tt.name, func(t *testing.T) { diff --git a/pkg/parser/node.go b/pkg/parser/node.go index 4bb2c4c5eaf..bb57995a129 100644 --- a/pkg/parser/node.go +++ b/pkg/parser/node.go @@ -464,15 +464,11 @@ func (n *Node) compile(pctx *UnixParserCtx, ectx EnricherCtx) error { } clog.SetLevel(log.DebugLevel) - n.Logger = clog.WithFields(log.Fields{ - "id": n.rn, - }) + n.Logger = clog.WithField("id", n.rn) n.Logger.Infof("%s has debug enabled", n.Name) } else { /* else bind it to the default one (might find something more elegant here)*/ - n.Logger = log.WithFields(log.Fields{ - "id": n.rn, - }) + n.Logger = log.WithField("id", n.rn) } /* display info about top-level nodes, they should be the only one with explicit stage name ?*/ diff --git a/pkg/types/queue.go b/pkg/types/queue.go index d9b737d548f..12a3ab37074 100644 --- a/pkg/types/queue.go +++ b/pkg/types/queue.go @@ -22,7 +22,7 @@ func NewQueue(l int) *Queue { Queue: make([]Event, 0, l), L: l, } - log.WithFields(log.Fields{"Capacity": q.L}).Debugf("Creating queue") + log.WithField("Capacity", q.L).Debugf("Creating queue") return q } From a1ebe1822186c298a03de3ae2668038f27174033 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Thu, 20 Jun 2024 17:18:41 +0200 Subject: [PATCH 194/581] config: expand env variables in local_api_credentials.yaml and .yaml.local (#3093) * config: expand env variables in local_api_credentials.yaml and .yaml.local * fix func test --- pkg/csconfig/api.go | 5 ++++- test/bats/01_cscli.bats | 37 +++++++++++++++++++++++++++++++++++++ 2 files changed, 41 insertions(+), 1 deletion(-) diff --git a/pkg/csconfig/api.go b/pkg/csconfig/api.go index 267e101cea8..fb29111ecd5 100644 --- a/pkg/csconfig/api.go +++ b/pkg/csconfig/api.go @@ -15,6 +15,7 @@ import ( log "github.com/sirupsen/logrus" "gopkg.in/yaml.v3" + "github.com/crowdsecurity/go-cs-lib/csstring" "github.com/crowdsecurity/go-cs-lib/ptr" "github.com/crowdsecurity/go-cs-lib/yamlpatch" @@ -126,7 +127,9 @@ func (l *LocalApiClientCfg) Load() error { return err } - dec := yaml.NewDecoder(bytes.NewReader(fcontent)) + configData := csstring.StrictExpand(string(fcontent), os.LookupEnv) + + dec := yaml.NewDecoder(strings.NewReader(configData)) dec.KnownFields(true) err = dec.Decode(&l.Credentials) diff --git a/test/bats/01_cscli.bats b/test/bats/01_cscli.bats index 8dfdf701a95..a0878ad0e12 100644 --- a/test/bats/01_cscli.bats +++ b/test/bats/01_cscli.bats @@ -245,6 +245,43 @@ teardown() { assert_stderr --partial "no credentials or URL found in api client configuration '${LOCAL_API_CREDENTIALS}'" } +@test "cscli - LAPI credentials file can reference env variables" { + LOCAL_API_CREDENTIALS=$(config_get '.api.client.credentials_path') + URL=$(config_get "$LOCAL_API_CREDENTIALS" '.url') + export URL + LOGIN=$(config_get "$LOCAL_API_CREDENTIALS" '.login') + export LOGIN + PASSWORD=$(config_get "$LOCAL_API_CREDENTIALS" '.password') + export PASSWORD + + # shellcheck disable=SC2016 + echo '{"url":"$URL","login":"$LOGIN","password":"$PASSWORD"}' > "$LOCAL_API_CREDENTIALS".local + + config_set '.crowdsec_service.enable=false' + rune -0 ./instance-crowdsec start + + rune -0 cscli lapi status + assert_stderr --partial "You can successfully interact with Local API (LAPI)" + + rm "$LOCAL_API_CREDENTIALS".local + + # shellcheck disable=SC2016 + config_set "$LOCAL_API_CREDENTIALS" '.url="$URL"' + # shellcheck disable=SC2016 + config_set "$LOCAL_API_CREDENTIALS" '.login="$LOGIN"' + # shellcheck disable=SC2016 + config_set "$LOCAL_API_CREDENTIALS" '.password="$PASSWORD"' + + rune -0 cscli lapi status + assert_stderr --partial "You can successfully interact with Local API (LAPI)" + + # but if a variable is not defined, there is no specific error message + unset URL + rune -1 cscli lapi status + # shellcheck disable=SC2016 + assert_stderr --partial 'BaseURL must have a trailing slash' +} + @test "cscli - missing LAPI client settings" { config_set 'del(.api.client)' rune -1 cscli lapi status From 8a259fd25b1c6abe3c7ee446c955a3144b295248 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Thu, 20 Jun 2024 22:13:26 +0200 Subject: [PATCH 195/581] lint (copyloopvar) (#2971) --- pkg/acquisition/acquisition_test.go | 3 --- pkg/acquisition/modules/appsec/appsec.go | 1 - pkg/acquisition/modules/cloudwatch/cloudwatch_test.go | 4 ---- pkg/acquisition/modules/file/file_test.go | 4 ---- pkg/acquisition/modules/kafka/kafka_test.go | 2 -- .../modules/syslog/internal/parser/rfc3164/parse_test.go | 6 ------ .../modules/syslog/internal/parser/rfc3164/perf_test.go | 1 - .../modules/syslog/internal/parser/rfc5424/parse_test.go | 3 --- .../modules/syslog/internal/parser/rfc5424/perf_test.go | 1 - pkg/acquisition/modules/syslog/syslog_test.go | 1 - pkg/apiclient/decisions_service_test.go | 1 - pkg/apiserver/apic_metrics_test.go | 1 - pkg/apiserver/apic_test.go | 7 ------- pkg/csconfig/api_test.go | 4 ---- pkg/csconfig/config_test.go | 1 - pkg/csconfig/crowdsec_service_test.go | 1 - pkg/csconfig/cscli_test.go | 1 - pkg/csconfig/database_test.go | 1 - pkg/csconfig/hub_test.go | 1 - pkg/csconfig/simulation_test.go | 2 -- pkg/csplugin/broker_test.go | 1 - pkg/csplugin/listfiles_test.go | 1 - pkg/csplugin/utils_test.go | 1 - pkg/csprofiles/csprofiles_test.go | 2 -- pkg/exprhelpers/exprlib_test.go | 5 ----- pkg/exprhelpers/jsonextract_test.go | 2 -- pkg/fflag/features_test.go | 7 ------- pkg/parser/enrich_date_test.go | 1 - pkg/parser/whitelist_test.go | 2 -- pkg/setup/detect_test.go | 8 -------- pkg/types/event_test.go | 3 --- 31 files changed, 79 deletions(-) diff --git a/pkg/acquisition/acquisition_test.go b/pkg/acquisition/acquisition_test.go index 1fbac2cdc00..cc57aecb732 100644 --- a/pkg/acquisition/acquisition_test.go +++ b/pkg/acquisition/acquisition_test.go @@ -179,7 +179,6 @@ wowo: ajsajasjas } for _, tc := range tests { - tc := tc t.Run(tc.TestName, func(t *testing.T) { common := configuration.DataSourceCommonCfg{} yaml.Unmarshal([]byte(tc.String), &common) @@ -282,7 +281,6 @@ func TestLoadAcquisitionFromFile(t *testing.T) { }, } for _, tc := range tests { - tc := tc t.Run(tc.TestName, func(t *testing.T) { dss, err := LoadAcquisitionFromFile(&tc.Config, nil) cstest.RequireErrorContains(t, err, tc.ExpectedError) @@ -549,7 +547,6 @@ func TestConfigureByDSN(t *testing.T) { } for _, tc := range tests { - tc := tc t.Run(tc.dsn, func(t *testing.T) { srcs, err := LoadAcquisitionFromDSN(tc.dsn, map[string]string{"type": "test_label"}, "") cstest.RequireErrorContains(t, err, tc.ExpectedError) diff --git a/pkg/acquisition/modules/appsec/appsec.go b/pkg/acquisition/modules/appsec/appsec.go index 07ca56dfb70..1d9a9bfec3c 100644 --- a/pkg/acquisition/modules/appsec/appsec.go +++ b/pkg/acquisition/modules/appsec/appsec.go @@ -251,7 +251,6 @@ func (w *AppsecSource) StreamingAcquisition(out chan types.Event, t *tomb.Tomb) w.logger.Infof("%d appsec runner to start", len(w.AppsecRunners)) for _, runner := range w.AppsecRunners { - runner := runner runner.outChan = out t.Go(func() error { defer trace.CatchPanic("crowdsec/acquis/appsec/live/runner") diff --git a/pkg/acquisition/modules/cloudwatch/cloudwatch_test.go b/pkg/acquisition/modules/cloudwatch/cloudwatch_test.go index 12e01ddf609..89a2b56bc00 100644 --- a/pkg/acquisition/modules/cloudwatch/cloudwatch_test.go +++ b/pkg/acquisition/modules/cloudwatch/cloudwatch_test.go @@ -423,7 +423,6 @@ stream_name: test_stream`), } for _, tc := range tests { - tc := tc t.Run(tc.name, func(t *testing.T) { dbgLogger := log.New().WithField("test", tc.name) dbgLogger.Logger.SetLevel(log.DebugLevel) @@ -556,7 +555,6 @@ stream_name: test_stream`), } for _, tc := range tests { - tc := tc t.Run(tc.name, func(t *testing.T) { dbgLogger := log.New().WithField("test", tc.name) dbgLogger.Logger.SetLevel(log.DebugLevel) @@ -621,7 +619,6 @@ func TestConfigureByDSN(t *testing.T) { } for _, tc := range tests { - tc := tc t.Run(tc.name, func(t *testing.T) { dbgLogger := log.New().WithField("test", tc.name) dbgLogger.Logger.SetLevel(log.DebugLevel) @@ -743,7 +740,6 @@ func TestOneShotAcquisition(t *testing.T) { } for _, tc := range tests { - tc := tc t.Run(tc.name, func(t *testing.T) { dbgLogger := log.New().WithField("test", tc.name) dbgLogger.Logger.SetLevel(log.DebugLevel) diff --git a/pkg/acquisition/modules/file/file_test.go b/pkg/acquisition/modules/file/file_test.go index 688812f2fd3..2f84c6de324 100644 --- a/pkg/acquisition/modules/file/file_test.go +++ b/pkg/acquisition/modules/file/file_test.go @@ -52,7 +52,6 @@ exclude_regexps: ["as[a-$d"]`, subLogger := log.WithField("type", "file") for _, tc := range tests { - tc := tc t.Run(tc.name, func(t *testing.T) { f := fileacquisition.FileSource{} err := f.Configure([]byte(tc.config), subLogger, configuration.METRICS_NONE) @@ -92,7 +91,6 @@ func TestConfigureDSN(t *testing.T) { subLogger := log.WithField("type", "file") for _, tc := range tests { - tc := tc t.Run(tc.dsn, func(t *testing.T) { f := fileacquisition.FileSource{} err := f.ConfigureByDSN(tc.dsn, map[string]string{"type": "testtype"}, subLogger, "") @@ -202,7 +200,6 @@ filename: test_files/test_delete.log`, } for _, tc := range tests { - tc := tc t.Run(tc.name, func(t *testing.T) { logger, hook := test.NewNullLogger() logger.SetLevel(tc.logLevel) @@ -361,7 +358,6 @@ force_inotify: true`, testPattern), } for _, tc := range tests { - tc := tc t.Run(tc.name, func(t *testing.T) { logger, hook := test.NewNullLogger() logger.SetLevel(tc.logLevel) diff --git a/pkg/acquisition/modules/kafka/kafka_test.go b/pkg/acquisition/modules/kafka/kafka_test.go index 245d3ed58c8..7b467142cc9 100644 --- a/pkg/acquisition/modules/kafka/kafka_test.go +++ b/pkg/acquisition/modules/kafka/kafka_test.go @@ -162,7 +162,6 @@ func TestStreamingAcquisition(t *testing.T) { } for _, ts := range tests { - ts := ts t.Run(ts.name, func(t *testing.T) { k := KafkaSource{} @@ -233,7 +232,6 @@ func TestStreamingAcquisitionWithSSL(t *testing.T) { } for _, ts := range tests { - ts := ts t.Run(ts.name, func(t *testing.T) { k := KafkaSource{} diff --git a/pkg/acquisition/modules/syslog/internal/parser/rfc3164/parse_test.go b/pkg/acquisition/modules/syslog/internal/parser/rfc3164/parse_test.go index 48772d596f4..8fb5089a61f 100644 --- a/pkg/acquisition/modules/syslog/internal/parser/rfc3164/parse_test.go +++ b/pkg/acquisition/modules/syslog/internal/parser/rfc3164/parse_test.go @@ -22,7 +22,6 @@ func TestPri(t *testing.T) { } for _, test := range tests { - test := test t.Run(test.input, func(t *testing.T) { r := &RFC3164{} r.buf = []byte(test.input) @@ -64,7 +63,6 @@ func TestTimestamp(t *testing.T) { } for _, test := range tests { - test := test t.Run(test.input, func(t *testing.T) { opts := []RFC3164Option{} if test.currentYear { @@ -118,7 +116,6 @@ func TestHostname(t *testing.T) { } for _, test := range tests { - test := test t.Run(test.input, func(t *testing.T) { opts := []RFC3164Option{} if test.strictHostname { @@ -163,7 +160,6 @@ func TestTag(t *testing.T) { } for _, test := range tests { - test := test t.Run(test.input, func(t *testing.T) { r := &RFC3164{} r.buf = []byte(test.input) @@ -207,7 +203,6 @@ func TestMessage(t *testing.T) { } for _, test := range tests { - test := test t.Run(test.input, func(t *testing.T) { r := &RFC3164{} r.buf = []byte(test.input) @@ -329,7 +324,6 @@ func TestParse(t *testing.T) { } for _, test := range tests { - test := test t.Run(test.input, func(t *testing.T) { r := NewRFC3164Parser(test.opts...) err := r.Parse([]byte(test.input)) diff --git a/pkg/acquisition/modules/syslog/internal/parser/rfc3164/perf_test.go b/pkg/acquisition/modules/syslog/internal/parser/rfc3164/perf_test.go index 42073cafbae..3805090f57f 100644 --- a/pkg/acquisition/modules/syslog/internal/parser/rfc3164/perf_test.go +++ b/pkg/acquisition/modules/syslog/internal/parser/rfc3164/perf_test.go @@ -51,7 +51,6 @@ func BenchmarkParse(b *testing.B) { } var err error for _, test := range tests { - test := test b.Run(string(test.input), func(b *testing.B) { for i := 0; i < b.N; i++ { r := NewRFC3164Parser(test.opts...) diff --git a/pkg/acquisition/modules/syslog/internal/parser/rfc5424/parse_test.go b/pkg/acquisition/modules/syslog/internal/parser/rfc5424/parse_test.go index 66a20d594e4..eed72244867 100644 --- a/pkg/acquisition/modules/syslog/internal/parser/rfc5424/parse_test.go +++ b/pkg/acquisition/modules/syslog/internal/parser/rfc5424/parse_test.go @@ -25,7 +25,6 @@ func TestPri(t *testing.T) { } for _, test := range tests { - test := test t.Run(test.input, func(t *testing.T) { r := &RFC5424{} r.buf = []byte(test.input) @@ -61,7 +60,6 @@ func TestHostname(t *testing.T) { } for _, test := range tests { - test := test t.Run(test.input, func(t *testing.T) { opts := []RFC5424Option{} if test.strictHostname { @@ -200,7 +198,6 @@ func TestParse(t *testing.T) { } for _, test := range tests { - test := test t.Run(test.name, func(t *testing.T) { r := NewRFC5424Parser(test.opts...) err := r.Parse([]byte(test.input)) diff --git a/pkg/acquisition/modules/syslog/internal/parser/rfc5424/perf_test.go b/pkg/acquisition/modules/syslog/internal/parser/rfc5424/perf_test.go index 318571e91ee..a86c17e8ddf 100644 --- a/pkg/acquisition/modules/syslog/internal/parser/rfc5424/perf_test.go +++ b/pkg/acquisition/modules/syslog/internal/parser/rfc5424/perf_test.go @@ -92,7 +92,6 @@ func BenchmarkParse(b *testing.B) { } var err error for _, test := range tests { - test := test b.Run(test.label, func(b *testing.B) { for i := 0; i < b.N; i++ { r := NewRFC5424Parser() diff --git a/pkg/acquisition/modules/syslog/syslog_test.go b/pkg/acquisition/modules/syslog/syslog_test.go index 8096740f5e4..0e823ecd32a 100644 --- a/pkg/acquisition/modules/syslog/syslog_test.go +++ b/pkg/acquisition/modules/syslog/syslog_test.go @@ -130,7 +130,6 @@ listen_addr: 127.0.0.1`, } for _, ts := range tests { - ts := ts t.Run(ts.name, func(t *testing.T) { subLogger := log.WithField("type", "syslog") s := SyslogSource{} diff --git a/pkg/apiclient/decisions_service_test.go b/pkg/apiclient/decisions_service_test.go index 97b5aa26482..6942cfc9d85 100644 --- a/pkg/apiclient/decisions_service_test.go +++ b/pkg/apiclient/decisions_service_test.go @@ -458,7 +458,6 @@ func TestDecisionsStreamOpts_addQueryParamsToURL(t *testing.T) { } for _, tt := range tests { - tt := tt t.Run(tt.name, func(t *testing.T) { o := &DecisionsStreamOpts{ Startup: tt.fields.Startup, diff --git a/pkg/apiserver/apic_metrics_test.go b/pkg/apiserver/apic_metrics_test.go index f3b9b352316..15ad63b391b 100644 --- a/pkg/apiserver/apic_metrics_test.go +++ b/pkg/apiserver/apic_metrics_test.go @@ -63,7 +63,6 @@ func TestAPICSendMetrics(t *testing.T) { defer httpmock.Deactivate() for _, tc := range tests { - tc := tc t.Run(tc.name, func(t *testing.T) { url, err := url.ParseRequestURI("http://api.crowdsec.net/") require.NoError(t, err) diff --git a/pkg/apiserver/apic_test.go b/pkg/apiserver/apic_test.go index 10f4cf9444b..dc6ed98f946 100644 --- a/pkg/apiserver/apic_test.go +++ b/pkg/apiserver/apic_test.go @@ -165,7 +165,6 @@ func TestAPICFetchScenariosListFromDB(t *testing.T) { } for _, tc := range tests { - tc := tc t.Run(tc.name, func(t *testing.T) { api := getAPIC(t) for machineID, scenarios := range tc.machineIDsWithScenarios { @@ -233,7 +232,6 @@ func TestNewAPIC(t *testing.T) { } for _, tc := range tests { - tc := tc t.Run(tc.name, func(t *testing.T) { setConfig() httpmock.Activate() @@ -353,7 +351,6 @@ func TestAPICGetMetrics(t *testing.T) { } for _, tc := range tests { - tc := tc t.Run(tc.name, func(t *testing.T) { apiClient := getAPIC(t) cleanUp(apiClient) @@ -461,7 +458,6 @@ func TestCreateAlertsForDecision(t *testing.T) { } for _, tc := range tests { - tc := tc t.Run(tc.name, func(t *testing.T) { if got := createAlertsForDecisions(tc.args.decisions); !reflect.DeepEqual(got, tc.want) { t.Errorf("createAlertsForDecisions() = %v, want %v", got, tc.want) @@ -541,7 +537,6 @@ func TestFillAlertsWithDecisions(t *testing.T) { } for _, tc := range tests { - tc := tc t.Run(tc.name, func(t *testing.T) { addCounters, _ := makeAddAndDeleteCounters() if got := fillAlertsWithDecisions(tc.args.alerts, tc.args.decisions, addCounters); !reflect.DeepEqual(got, tc.want) { @@ -1159,7 +1154,6 @@ func TestAPICPull(t *testing.T) { } for _, tc := range tests { - tc := tc t.Run(tc.name, func(t *testing.T) { api = getAPIC(t) api.pullInterval = time.Millisecond @@ -1286,7 +1280,6 @@ func TestShouldShareAlert(t *testing.T) { } for _, tc := range tests { - tc := tc t.Run(tc.name, func(t *testing.T) { ret := shouldShareAlert(tc.alert, tc.consoleConfig) assert.Equal(t, tc.expectedRet, ret) diff --git a/pkg/csconfig/api_test.go b/pkg/csconfig/api_test.go index 463b7c1b2ec..079936253a1 100644 --- a/pkg/csconfig/api_test.go +++ b/pkg/csconfig/api_test.go @@ -64,7 +64,6 @@ func TestLoadLocalApiClientCfg(t *testing.T) { } for _, tc := range tests { - tc := tc t.Run(tc.name, func(t *testing.T) { err := tc.input.Load() cstest.RequireErrorContains(t, err, tc.expectedErr) @@ -122,7 +121,6 @@ func TestLoadOnlineApiClientCfg(t *testing.T) { } for _, tc := range tests { - tc := tc t.Run(tc.name, func(t *testing.T) { err := tc.input.Load() cstest.RequireErrorContains(t, err, tc.expectedErr) @@ -245,7 +243,6 @@ func TestLoadAPIServer(t *testing.T) { } for _, tc := range tests { - tc := tc t.Run(tc.name, func(t *testing.T) { err := tc.input.LoadAPIServer(false) cstest.RequireErrorContains(t, err, tc.expectedErr) @@ -309,7 +306,6 @@ func TestParseCapiWhitelists(t *testing.T) { } for _, tc := range tests { - tc := tc t.Run(tc.name, func(t *testing.T) { wl, err := parseCapiWhitelists(strings.NewReader(tc.input)) cstest.RequireErrorContains(t, err, tc.expectedErr) diff --git a/pkg/csconfig/config_test.go b/pkg/csconfig/config_test.go index 56ecc202373..11f1f0cf68d 100644 --- a/pkg/csconfig/config_test.go +++ b/pkg/csconfig/config_test.go @@ -32,7 +32,6 @@ func TestNewCrowdSecConfig(t *testing.T) { }, } for _, tc := range tests { - tc := tc t.Run(tc.name, func(t *testing.T) { result := &Config{} assert.Equal(t, tc.expected, result) diff --git a/pkg/csconfig/crowdsec_service_test.go b/pkg/csconfig/crowdsec_service_test.go index 8d332271b03..2f41beaf55e 100644 --- a/pkg/csconfig/crowdsec_service_test.go +++ b/pkg/csconfig/crowdsec_service_test.go @@ -181,7 +181,6 @@ func TestLoadCrowdsec(t *testing.T) { } for _, tc := range tests { - tc := tc t.Run(tc.name, func(t *testing.T) { err := tc.input.LoadCrowdsec() cstest.RequireErrorContains(t, err, tc.expectedErr) diff --git a/pkg/csconfig/cscli_test.go b/pkg/csconfig/cscli_test.go index 807f02d216c..a58fdd6f857 100644 --- a/pkg/csconfig/cscli_test.go +++ b/pkg/csconfig/cscli_test.go @@ -39,7 +39,6 @@ func TestLoadCSCLI(t *testing.T) { } for _, tc := range tests { - tc := tc t.Run(tc.name, func(t *testing.T) { err := tc.input.loadCSCLI() cstest.RequireErrorContains(t, err, tc.expectedErr) diff --git a/pkg/csconfig/database_test.go b/pkg/csconfig/database_test.go index c7741baf038..954b1c47fd7 100644 --- a/pkg/csconfig/database_test.go +++ b/pkg/csconfig/database_test.go @@ -46,7 +46,6 @@ func TestLoadDBConfig(t *testing.T) { } for _, tc := range tests { - tc := tc t.Run(tc.name, func(t *testing.T) { err := tc.input.LoadDBConfig(false) cstest.RequireErrorContains(t, err, tc.expectedErr) diff --git a/pkg/csconfig/hub_test.go b/pkg/csconfig/hub_test.go index 2f9528c6043..49d010a04f4 100644 --- a/pkg/csconfig/hub_test.go +++ b/pkg/csconfig/hub_test.go @@ -35,7 +35,6 @@ func TestLoadHub(t *testing.T) { } for _, tc := range tests { - tc := tc t.Run(tc.name, func(t *testing.T) { err := tc.input.loadHub() cstest.RequireErrorContains(t, err, tc.expectedErr) diff --git a/pkg/csconfig/simulation_test.go b/pkg/csconfig/simulation_test.go index 71b09ee397c..a678d7edd49 100644 --- a/pkg/csconfig/simulation_test.go +++ b/pkg/csconfig/simulation_test.go @@ -76,7 +76,6 @@ func TestSimulationLoading(t *testing.T) { } for _, tc := range tests { - tc := tc t.Run(tc.name, func(t *testing.T) { err := tc.input.LoadSimulation() cstest.RequireErrorContains(t, err, tc.expectedErr) @@ -124,7 +123,6 @@ func TestIsSimulated(t *testing.T) { }, } for _, tc := range tests { - tc := tc t.Run(tc.name, func(t *testing.T) { isSimulated := tc.SimulationConfig.IsSimulated(tc.Input) require.Equal(t, tc.expected, isSimulated) diff --git a/pkg/csplugin/broker_test.go b/pkg/csplugin/broker_test.go index 34c9ce7d684..f2179acb2c1 100644 --- a/pkg/csplugin/broker_test.go +++ b/pkg/csplugin/broker_test.go @@ -129,7 +129,6 @@ func (s *PluginSuite) TestBrokerInit() { } for _, tc := range tests { - tc := tc s.Run(tc.name, func() { t := s.T() if tc.action != nil { diff --git a/pkg/csplugin/listfiles_test.go b/pkg/csplugin/listfiles_test.go index a7b41c51d07..a4188804149 100644 --- a/pkg/csplugin/listfiles_test.go +++ b/pkg/csplugin/listfiles_test.go @@ -47,7 +47,6 @@ func TestListFilesAtPath(t *testing.T) { }, } for _, tc := range tests { - tc := tc t.Run(tc.name, func(t *testing.T) { got, err := listFilesAtPath(tc.path) cstest.RequireErrorContains(t, err, tc.expectedErr) diff --git a/pkg/csplugin/utils_test.go b/pkg/csplugin/utils_test.go index f02e7f491b2..7fa9a77acd5 100644 --- a/pkg/csplugin/utils_test.go +++ b/pkg/csplugin/utils_test.go @@ -37,7 +37,6 @@ func TestGetPluginNameAndTypeFromPath(t *testing.T) { }, } for _, tc := range tests { - tc := tc t.Run(tc.name, func(t *testing.T) { got, got1, err := getPluginTypeAndSubtypeFromPath(tc.path) cstest.RequireErrorContains(t, err, tc.expectedErr) diff --git a/pkg/csprofiles/csprofiles_test.go b/pkg/csprofiles/csprofiles_test.go index be1d0178e72..0247243ddd3 100644 --- a/pkg/csprofiles/csprofiles_test.go +++ b/pkg/csprofiles/csprofiles_test.go @@ -102,7 +102,6 @@ func TestNewProfile(t *testing.T) { } for _, test := range tests { - test := test t.Run(test.name, func(t *testing.T) { profilesCfg := []*csconfig.ProfileCfg{ test.profileCfg, @@ -196,7 +195,6 @@ func TestEvaluateProfile(t *testing.T) { }, } for _, tt := range tests { - tt := tt t.Run(tt.name, func(t *testing.T) { profilesCfg := []*csconfig.ProfileCfg{ tt.args.profileCfg, diff --git a/pkg/exprhelpers/exprlib_test.go b/pkg/exprhelpers/exprlib_test.go index 0f6e1a21e2f..e65d568f65b 100644 --- a/pkg/exprhelpers/exprlib_test.go +++ b/pkg/exprhelpers/exprlib_test.go @@ -1414,7 +1414,6 @@ func TestParseUnixTime(t *testing.T) { } for _, tc := range tests { - tc := tc t.Run(tc.name, func(t *testing.T) { output, err := ParseUnixTime(tc.value) cstest.RequireErrorContains(t, err, tc.expectedErr) @@ -1518,7 +1517,6 @@ func TestIsIp(t *testing.T) { } for _, tc := range tests { - tc := tc t.Run(tc.name, func(t *testing.T) { vm, err := expr.Compile(tc.expr, GetExprOptions(map[string]interface{}{"value": tc.value})...) if tc.expectedBuildErr { @@ -1571,7 +1569,6 @@ func TestToString(t *testing.T) { }, } for _, tc := range tests { - tc := tc t.Run(tc.name, func(t *testing.T) { vm, err := expr.Compile(tc.expr, GetExprOptions(map[string]interface{}{"value": tc.value})...) require.NoError(t, err) @@ -1618,7 +1615,6 @@ func TestB64Decode(t *testing.T) { }, } for _, tc := range tests { - tc := tc t.Run(tc.name, func(t *testing.T) { vm, err := expr.Compile(tc.expr, GetExprOptions(map[string]interface{}{"value": tc.value})...) if tc.expectedBuildErr { @@ -1691,7 +1687,6 @@ func TestParseKv(t *testing.T) { } for _, tc := range tests { - tc := tc t.Run(tc.name, func(t *testing.T) { outMap := make(map[string]interface{}) env := map[string]interface{}{ diff --git a/pkg/exprhelpers/jsonextract_test.go b/pkg/exprhelpers/jsonextract_test.go index 2ee3ada5ea7..80de1619580 100644 --- a/pkg/exprhelpers/jsonextract_test.go +++ b/pkg/exprhelpers/jsonextract_test.go @@ -159,7 +159,6 @@ func TestJsonExtractSlice(t *testing.T) { } for _, test := range tests { - test := test t.Run(test.name, func(t *testing.T) { env := map[string]interface{}{ "blob": test.jsonBlob, @@ -215,7 +214,6 @@ func TestJsonExtractObject(t *testing.T) { } for _, test := range tests { - test := test t.Run(test.name, func(t *testing.T) { env := map[string]interface{}{ "blob": test.jsonBlob, diff --git a/pkg/fflag/features_test.go b/pkg/fflag/features_test.go index 57745b3c38c..481e86573e8 100644 --- a/pkg/fflag/features_test.go +++ b/pkg/fflag/features_test.go @@ -50,8 +50,6 @@ func TestRegisterFeature(t *testing.T) { } for _, tc := range tests { - tc := tc - t.Run("", func(t *testing.T) { fr := fflag.FeatureRegister{EnvPrefix: "FFLAG_TEST_"} err := fr.RegisterFeature(&tc.feature) @@ -112,7 +110,6 @@ func TestGetFeature(t *testing.T) { fr := setUp(t) for _, tc := range tests { - tc := tc t.Run(tc.name, func(t *testing.T) { _, err := fr.GetFeature(tc.feature) cstest.RequireErrorMessage(t, err, tc.expectedErr) @@ -145,7 +142,6 @@ func TestIsEnabled(t *testing.T) { fr := setUp(t) for _, tc := range tests { - tc := tc t.Run(tc.name, func(t *testing.T) { feat, err := fr.GetFeature(tc.feature) require.NoError(t, err) @@ -204,7 +200,6 @@ func TestFeatureSet(t *testing.T) { fr := setUp(t) for _, tc := range tests { - tc := tc t.Run(tc.name, func(t *testing.T) { feat, err := fr.GetFeature(tc.feature) cstest.RequireErrorMessage(t, err, tc.expectedGetErr) @@ -284,7 +279,6 @@ func TestSetFromEnv(t *testing.T) { fr := setUp(t) for _, tc := range tests { - tc := tc t.Run(tc.name, func(t *testing.T) { logger, hook := logtest.NewNullLogger() logger.SetLevel(logrus.DebugLevel) @@ -344,7 +338,6 @@ func TestSetFromYaml(t *testing.T) { fr := setUp(t) for _, tc := range tests { - tc := tc t.Run(tc.name, func(t *testing.T) { logger, hook := logtest.NewNullLogger() logger.SetLevel(logrus.DebugLevel) diff --git a/pkg/parser/enrich_date_test.go b/pkg/parser/enrich_date_test.go index 434667cbeaf..930633feb35 100644 --- a/pkg/parser/enrich_date_test.go +++ b/pkg/parser/enrich_date_test.go @@ -44,7 +44,6 @@ func TestDateParse(t *testing.T) { logger := log.WithField("test", "test") for _, tt := range tests { - tt := tt t.Run(tt.name, func(t *testing.T) { strTime, err := ParseDate(tt.evt.StrTime, &tt.evt, logger) cstest.RequireErrorContains(t, err, tt.expectedErr) diff --git a/pkg/parser/whitelist_test.go b/pkg/parser/whitelist_test.go index 501c655243d..02846f17fc1 100644 --- a/pkg/parser/whitelist_test.go +++ b/pkg/parser/whitelist_test.go @@ -62,7 +62,6 @@ func TestWhitelistCompile(t *testing.T) { } for _, tt := range tests { - tt := tt t.Run(tt.name, func(t *testing.T) { node.Whitelist = tt.whitelist _, err := node.CompileWLs() @@ -284,7 +283,6 @@ func TestWhitelistCheck(t *testing.T) { } for _, tt := range tests { - tt := tt t.Run(tt.name, func(t *testing.T) { var err error node.Whitelist = tt.whitelist diff --git a/pkg/setup/detect_test.go b/pkg/setup/detect_test.go index 0ff3438d974..c744e7d6796 100644 --- a/pkg/setup/detect_test.go +++ b/pkg/setup/detect_test.go @@ -94,7 +94,6 @@ func TestPathExists(t *testing.T) { } for _, tc := range tests { - tc := tc env := setup.NewExprEnvironment(setup.DetectOptions{}, setup.ExprOS{}) t.Run(tc.path, func(t *testing.T) { @@ -148,7 +147,6 @@ func TestVersionCheck(t *testing.T) { } for _, tc := range tests { - tc := tc e := setup.ExprOS{RawVersion: tc.version} t.Run(fmt.Sprintf("Check(%s,%s)", tc.version, tc.constraint), func(t *testing.T) { @@ -248,7 +246,6 @@ func TestListSupported(t *testing.T) { } for _, tc := range tests { - tc := tc t.Run(tc.name, func(t *testing.T) { t.Parallel() @@ -333,7 +330,6 @@ func TestApplyRules(t *testing.T) { env := setup.ExprEnvironment{} for _, tc := range tests { - tc := tc t.Run(tc.name, func(t *testing.T) { t.Parallel() @@ -424,7 +420,6 @@ detect: } for _, tc := range tests { - tc := tc t.Run(tc.name, func(t *testing.T) { f := tempYAML(t, tc.config) defer os.Remove(f.Name()) @@ -518,7 +513,6 @@ detect: } for _, tc := range tests { - tc := tc t.Run(tc.name, func(t *testing.T) { f := tempYAML(t, tc.config) defer os.Remove(f.Name()) @@ -830,7 +824,6 @@ func TestDetectForcedOS(t *testing.T) { } for _, tc := range tests { - tc := tc t.Run(tc.name, func(t *testing.T) { f := tempYAML(t, tc.config) defer os.Remove(f.Name()) @@ -1015,7 +1008,6 @@ func TestDetectDatasourceValidation(t *testing.T) { } for _, tc := range tests { - tc := tc t.Run(tc.name, func(t *testing.T) { f := tempYAML(t, tc.config) defer os.Remove(f.Name()) diff --git a/pkg/types/event_test.go b/pkg/types/event_test.go index a2fad9ebcc7..97b13f96d9a 100644 --- a/pkg/types/event_test.go +++ b/pkg/types/event_test.go @@ -41,7 +41,6 @@ func TestSetParsed(t *testing.T) { } for _, tt := range tests { - tt := tt t.Run(tt.name, func(t *testing.T) { tt.evt.SetParsed(tt.key, tt.value) assert.Equal(t, tt.value, tt.evt.Parsed[tt.key]) @@ -82,7 +81,6 @@ func TestSetMeta(t *testing.T) { } for _, tt := range tests { - tt := tt t.Run(tt.name, func(t *testing.T) { tt.evt.SetMeta(tt.key, tt.value) assert.Equal(t, tt.value, tt.evt.GetMeta(tt.key)) @@ -152,7 +150,6 @@ func TestParseIPSources(t *testing.T) { } for _, tt := range tests { - tt := tt t.Run(tt.name, func(t *testing.T) { ips := tt.evt.ParseIPSources() assert.Equal(t, tt.expected, ips) From 4b988701ed4660165e3def1be7391f336a68e461 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Fri, 21 Jun 2024 13:47:26 +0200 Subject: [PATCH 196/581] lint (intrange) (#2970) --- cmd/crowdsec-cli/decisions.go | 2 +- cmd/crowdsec-cli/machines.go | 2 +- cmd/crowdsec-cli/prettytable.go | 2 +- cmd/crowdsec/crowdsec.go | 6 +++--- pkg/acquisition/acquisition.go | 4 ++-- pkg/acquisition/acquisition_test.go | 6 +++--- pkg/acquisition/modules/appsec/appsec.go | 2 +- pkg/acquisition/modules/docker/utils.go | 2 +- pkg/acquisition/modules/file/file_test.go | 2 +- pkg/acquisition/modules/kinesis/kinesis.go | 4 ++-- pkg/acquisition/modules/kinesis/kinesis_test.go | 8 ++++---- pkg/acquisition/modules/loki/loki_test.go | 2 +- .../modules/syslog/internal/parser/utils/utils.go | 2 +- pkg/apiclient/auth_retry.go | 2 +- pkg/apiserver/apic_test.go | 2 +- pkg/apiserver/controllers/v1/alerts.go | 2 +- pkg/csplugin/watcher_test.go | 2 +- pkg/exprhelpers/debugger.go | 2 +- pkg/exprhelpers/helpers.go | 2 +- pkg/leakybucket/manager_run.go | 2 +- pkg/parser/parsing_test.go | 4 ++-- 21 files changed, 31 insertions(+), 31 deletions(-) diff --git a/cmd/crowdsec-cli/decisions.go b/cmd/crowdsec-cli/decisions.go index df35fb9bb18..92a0de72e58 100644 --- a/cmd/crowdsec-cli/decisions.go +++ b/cmd/crowdsec-cli/decisions.go @@ -28,7 +28,7 @@ func (cli *cliDecisions) decisionsToTable(alerts *models.GetAlertsResponse, prin spamLimit := make(map[string]bool) skipped := 0 - for aIdx := 0; aIdx < len(*alerts); aIdx++ { + for aIdx := range len(*alerts) { alertItem := (*alerts)[aIdx] newDecisions := make([]*models.Decision, 0) diff --git a/cmd/crowdsec-cli/machines.go b/cmd/crowdsec-cli/machines.go index 20933dc28e5..9014d3d803a 100644 --- a/cmd/crowdsec-cli/machines.go +++ b/cmd/crowdsec-cli/machines.go @@ -41,7 +41,7 @@ func generatePassword(length int) string { buf := make([]byte, length) - for i := 0; i < length; i++ { + for i := range length { rInt, err := saferand.Int(saferand.Reader, big.NewInt(int64(charsetLength))) if err != nil { log.Fatalf("failed getting data from prng for password generation : %s", err) diff --git a/cmd/crowdsec-cli/prettytable.go b/cmd/crowdsec-cli/prettytable.go index 0ce7e9755f9..f17472722f1 100644 --- a/cmd/crowdsec-cli/prettytable.go +++ b/cmd/crowdsec-cli/prettytable.go @@ -91,7 +91,7 @@ func newLightTable(output io.Writer) *Table { func (t *Table) setColumnConfigs() { configs := []table.ColumnConfig{} // the go-pretty table does not expose the names or number of columns - for i := 0; i < len(t.align); i++ { + for i := range len(t.align) { configs = append(configs, table.ColumnConfig{ Number: i + 1, AlignHeader: t.alignHeader[i], diff --git a/cmd/crowdsec/crowdsec.go b/cmd/crowdsec/crowdsec.go index 0fd269b3537..2be8a84fec0 100644 --- a/cmd/crowdsec/crowdsec.go +++ b/cmd/crowdsec/crowdsec.go @@ -70,7 +70,7 @@ func runCrowdsec(cConfig *csconfig.Config, parsers *parser.Parsers, hub *cwhub.H parsersTomb.Go(func() error { parserWg.Add(1) - for i := 0; i < cConfig.Crowdsec.ParserRoutinesCount; i++ { + for range cConfig.Crowdsec.ParserRoutinesCount { parsersTomb.Go(func() error { defer trace.CatchPanic("crowdsec/runParse") @@ -101,7 +101,7 @@ func runCrowdsec(cConfig *csconfig.Config, parsers *parser.Parsers, hub *cwhub.H } } - for i := 0; i < cConfig.Crowdsec.BucketsRoutinesCount; i++ { + for range cConfig.Crowdsec.BucketsRoutinesCount { bucketsTomb.Go(func() error { defer trace.CatchPanic("crowdsec/runPour") @@ -127,7 +127,7 @@ func runCrowdsec(cConfig *csconfig.Config, parsers *parser.Parsers, hub *cwhub.H outputsTomb.Go(func() error { outputWg.Add(1) - for i := 0; i < cConfig.Crowdsec.OutputRoutinesCount; i++ { + for range cConfig.Crowdsec.OutputRoutinesCount { outputsTomb.Go(func() error { defer trace.CatchPanic("crowdsec/runOutput") diff --git a/pkg/acquisition/acquisition.go b/pkg/acquisition/acquisition.go index 069bf67d852..6a91423c158 100644 --- a/pkg/acquisition/acquisition.go +++ b/pkg/acquisition/acquisition.go @@ -270,7 +270,7 @@ func LoadAcquisitionFromFile(config *csconfig.CrowdsecServiceCfg, prom *csconfig func GetMetrics(sources []DataSource, aggregated bool) error { var metrics []prometheus.Collector - for i := 0; i < len(sources); i++ { + for i := range len(sources) { if aggregated { metrics = sources[i].GetMetrics() } else { @@ -344,7 +344,7 @@ func StartAcquisition(sources []DataSource, output chan types.Event, AcquisTomb return nil } - for i := 0; i < len(sources); i++ { + for i := range len(sources) { subsrc := sources[i] //ensure its a copy log.Debugf("starting one source %d/%d ->> %T", i, len(sources), subsrc) diff --git a/pkg/acquisition/acquisition_test.go b/pkg/acquisition/acquisition_test.go index cc57aecb732..a5eecbc20ed 100644 --- a/pkg/acquisition/acquisition_test.go +++ b/pkg/acquisition/acquisition_test.go @@ -321,7 +321,7 @@ func (f *MockCat) UnmarshalConfig(cfg []byte) error { return nil } func (f *MockCat) GetName() string { return "mock_cat" } func (f *MockCat) GetMode() string { return "cat" } func (f *MockCat) OneShotAcquisition(out chan types.Event, tomb *tomb.Tomb) error { - for i := 0; i < 10; i++ { + for range 10 { evt := types.Event{} evt.Line.Src = "test" out <- evt @@ -368,7 +368,7 @@ func (f *MockTail) OneShotAcquisition(out chan types.Event, tomb *tomb.Tomb) err return errors.New("can't run in cat mode") } func (f *MockTail) StreamingAcquisition(out chan types.Event, t *tomb.Tomb) error { - for i := 0; i < 10; i++ { + for range 10 { evt := types.Event{} evt.Line.Src = "test" out <- evt @@ -451,7 +451,7 @@ type MockTailError struct { } func (f *MockTailError) StreamingAcquisition(out chan types.Event, t *tomb.Tomb) error { - for i := 0; i < 10; i++ { + for range 10 { evt := types.Event{} evt.Line.Src = "test" out <- evt diff --git a/pkg/acquisition/modules/appsec/appsec.go b/pkg/acquisition/modules/appsec/appsec.go index 1d9a9bfec3c..e1415faa25b 100644 --- a/pkg/acquisition/modules/appsec/appsec.go +++ b/pkg/acquisition/modules/appsec/appsec.go @@ -202,7 +202,7 @@ func (w *AppsecSource) Configure(yamlConfig []byte, logger *log.Entry, MetricsLe w.AppsecRunners = make([]AppsecRunner, w.config.Routines) - for nbRoutine := 0; nbRoutine < w.config.Routines; nbRoutine++ { + for nbRoutine := range w.config.Routines { appsecRunnerUUID := uuid.New().String() //we copy AppsecRutime for each runner wrt := *w.AppsecRuntime diff --git a/pkg/acquisition/modules/docker/utils.go b/pkg/acquisition/modules/docker/utils.go index 5439163e5b9..c724f581194 100644 --- a/pkg/acquisition/modules/docker/utils.go +++ b/pkg/acquisition/modules/docker/utils.go @@ -22,7 +22,7 @@ func parseKeyToMap(m map[string]interface{}, key string, value string) { return } - for i := 0; i < len(parts); i++ { + for i := range len(parts) { if parts[i] == "" { return } diff --git a/pkg/acquisition/modules/file/file_test.go b/pkg/acquisition/modules/file/file_test.go index 2f84c6de324..5d38552b3c5 100644 --- a/pkg/acquisition/modules/file/file_test.go +++ b/pkg/acquisition/modules/file/file_test.go @@ -401,7 +401,7 @@ force_inotify: true`, testPattern), fd, err := os.Create("test_files/stream.log") require.NoError(t, err, "could not create test file") - for i := 0; i < 5; i++ { + for i := range 5 { _, err = fmt.Fprintf(fd, "%d\n", i) if err != nil { os.Remove("test_files/stream.log") diff --git a/pkg/acquisition/modules/kinesis/kinesis.go b/pkg/acquisition/modules/kinesis/kinesis.go index 485cefcf01d..0e6c1980fa9 100644 --- a/pkg/acquisition/modules/kinesis/kinesis.go +++ b/pkg/acquisition/modules/kinesis/kinesis.go @@ -209,7 +209,7 @@ func (k *KinesisSource) decodeFromSubscription(record []byte) ([]CloudwatchSubsc func (k *KinesisSource) WaitForConsumerDeregistration(consumerName string, streamARN string) error { maxTries := k.Config.MaxRetries - for i := 0; i < maxTries; i++ { + for i := range maxTries { _, err := k.kClient.DescribeStreamConsumer(&kinesis.DescribeStreamConsumerInput{ ConsumerName: aws.String(consumerName), StreamARN: aws.String(streamARN), @@ -250,7 +250,7 @@ func (k *KinesisSource) DeregisterConsumer() error { func (k *KinesisSource) WaitForConsumerRegistration(consumerARN string) error { maxTries := k.Config.MaxRetries - for i := 0; i < maxTries; i++ { + for i := range maxTries { describeOutput, err := k.kClient.DescribeStreamConsumer(&kinesis.DescribeStreamConsumerInput{ ConsumerARN: aws.String(consumerARN), }) diff --git a/pkg/acquisition/modules/kinesis/kinesis_test.go b/pkg/acquisition/modules/kinesis/kinesis_test.go index d1d398c129e..ae601020621 100644 --- a/pkg/acquisition/modules/kinesis/kinesis_test.go +++ b/pkg/acquisition/modules/kinesis/kinesis_test.go @@ -71,7 +71,7 @@ func WriteToStream(streamName string, count int, shards int, sub bool) { } sess := session.Must(session.NewSession()) kinesisClient := kinesis.New(sess, aws.NewConfig().WithEndpoint(endpoint).WithRegion("us-east-1")) - for i := 0; i < count; i++ { + for i := range count { partition := "partition" if shards != 1 { partition = fmt.Sprintf("partition-%d", i%shards) @@ -182,7 +182,7 @@ stream_name: stream-1-shard`, //Allow the datasource to start listening to the stream time.Sleep(4 * time.Second) WriteToStream(f.Config.StreamName, test.count, test.shards, false) - for i := 0; i < test.count; i++ { + for i := range test.count { e := <-out assert.Equal(t, fmt.Sprintf("%d", i), e.Line.Raw) } @@ -227,7 +227,7 @@ stream_name: stream-2-shards`, time.Sleep(4 * time.Second) WriteToStream(f.Config.StreamName, test.count, test.shards, false) c := 0 - for i := 0; i < test.count; i++ { + for range test.count { <-out c += 1 } @@ -273,7 +273,7 @@ from_subscription: true`, //Allow the datasource to start listening to the stream time.Sleep(4 * time.Second) WriteToStream(f.Config.StreamName, test.count, test.shards, true) - for i := 0; i < test.count; i++ { + for i := range test.count { e := <-out assert.Equal(t, fmt.Sprintf("%d", i), e.Line.Raw) } diff --git a/pkg/acquisition/modules/loki/loki_test.go b/pkg/acquisition/modules/loki/loki_test.go index 051a9b93ed5..5f41cd4c62e 100644 --- a/pkg/acquisition/modules/loki/loki_test.go +++ b/pkg/acquisition/modules/loki/loki_test.go @@ -274,7 +274,7 @@ func feedLoki(logger *log.Entry, n int, title string) error { }, }, } - for i := 0; i < n; i++ { + for i := range n { streams.Streams[0].Values[i] = LogValue{ Time: time.Now(), Line: fmt.Sprintf("Log line #%d %v", i, title), diff --git a/pkg/acquisition/modules/syslog/internal/parser/utils/utils.go b/pkg/acquisition/modules/syslog/internal/parser/utils/utils.go index 8fe717a6ab2..5e0bf8fe771 100644 --- a/pkg/acquisition/modules/syslog/internal/parser/utils/utils.go +++ b/pkg/acquisition/modules/syslog/internal/parser/utils/utils.go @@ -34,7 +34,7 @@ func isValidHostname(s string) bool { last := byte('.') nonNumeric := false // true once we've seen a letter or hyphen partlen := 0 - for i := 0; i < len(s); i++ { + for i := range len(s) { c := s[i] switch { default: diff --git a/pkg/apiclient/auth_retry.go b/pkg/apiclient/auth_retry.go index 8ec8823f6e7..a17725439bc 100644 --- a/pkg/apiclient/auth_retry.go +++ b/pkg/apiclient/auth_retry.go @@ -41,7 +41,7 @@ func (r retryRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) maxAttempts = 1 } - for i := 0; i < maxAttempts; i++ { + for i := range maxAttempts { if i > 0 { if r.withBackOff { //nolint:gosec diff --git a/pkg/apiserver/apic_test.go b/pkg/apiserver/apic_test.go index dc6ed98f946..a3aa956ed98 100644 --- a/pkg/apiserver/apic_test.go +++ b/pkg/apiserver/apic_test.go @@ -1077,7 +1077,7 @@ func TestAPICPush(t *testing.T) { expectedCalls: 2, alerts: func() []*models.Alert { alerts := make([]*models.Alert, 100) - for i := 0; i < 100; i++ { + for i := range 100 { alerts[i] = &models.Alert{ Scenario: ptr.Of("crowdsec/test"), ScenarioHash: ptr.Of("certified"), diff --git a/pkg/apiserver/controllers/v1/alerts.go b/pkg/apiserver/controllers/v1/alerts.go index 7483e8dcdf9..c8cd54203bc 100644 --- a/pkg/apiserver/controllers/v1/alerts.go +++ b/pkg/apiserver/controllers/v1/alerts.go @@ -109,7 +109,7 @@ func FormatAlerts(result []*ent.Alert) models.AddAlertsRequest { func (c *Controller) sendAlertToPluginChannel(alert *models.Alert, profileID uint) { if c.PluginChannel != nil { RETRY: - for try := 0; try < 3; try++ { + for try := range 3 { select { case c.PluginChannel <- csplugin.ProfileAlert{ProfileID: profileID, Alert: alert}: log.Debugf("alert sent to Plugin channel") diff --git a/pkg/csplugin/watcher_test.go b/pkg/csplugin/watcher_test.go index d0bb7b2f142..b76c3c4eadd 100644 --- a/pkg/csplugin/watcher_test.go +++ b/pkg/csplugin/watcher_test.go @@ -34,7 +34,7 @@ func resetWatcherAlertCounter(pw *PluginWatcher) { } func insertNAlertsToPlugin(pw *PluginWatcher, n int, pluginName string) { - for i := 0; i < n; i++ { + for range n { pw.Inserts <- pluginName } } diff --git a/pkg/exprhelpers/debugger.go b/pkg/exprhelpers/debugger.go index 5ab2fc48c70..a2bd489acfa 100644 --- a/pkg/exprhelpers/debugger.go +++ b/pkg/exprhelpers/debugger.go @@ -346,7 +346,7 @@ func (erp ExprRuntimeDebug) ipDebug(ip int, vm *vm.VM, program *vm.Program, part } func (erp ExprRuntimeDebug) ipSeek(ip int) []string { - for i := 0; i < len(erp.Lines); i++ { + for i := range len(erp.Lines) { parts := strings.Split(erp.Lines[i], "\t") if parts[0] == strconv.Itoa(ip) { return parts diff --git a/pkg/exprhelpers/helpers.go b/pkg/exprhelpers/helpers.go index 5c041aa2886..43be5d82de0 100644 --- a/pkg/exprhelpers/helpers.go +++ b/pkg/exprhelpers/helpers.go @@ -270,7 +270,7 @@ func flatten(args []interface{}, v reflect.Value) []interface{} { } if v.Kind() == reflect.Array || v.Kind() == reflect.Slice { - for i := 0; i < v.Len(); i++ { + for i := range v.Len() { args = flatten(args, v.Index(i)) } } else { diff --git a/pkg/leakybucket/manager_run.go b/pkg/leakybucket/manager_run.go index 1d34c238ea5..673b372d81e 100644 --- a/pkg/leakybucket/manager_run.go +++ b/pkg/leakybucket/manager_run.go @@ -298,7 +298,7 @@ func PourItemToHolders(parsed types.Event, holders []BucketFactory, buckets *Buc BucketPourCache["OK"] = append(BucketPourCache["OK"], evt.(types.Event)) } //find the relevant holders (scenarios) - for idx := 0; idx < len(holders); idx++ { + for idx := range len(holders) { //for idx, holder := range holders { //evaluate bucket's condition diff --git a/pkg/parser/parsing_test.go b/pkg/parser/parsing_test.go index c5b1c353e9f..0542c69c049 100644 --- a/pkg/parser/parsing_test.go +++ b/pkg/parser/parsing_test.go @@ -151,7 +151,7 @@ func testOneParser(pctx *UnixParserCtx, ectx EnricherCtx, dir string, b *testing b.ResetTimer() } - for n := 0; n < count; n++ { + for range(count) { if !testFile(tests, *pctx, pnodes) { return errors.New("test failed") } @@ -285,7 +285,7 @@ func matchEvent(expected types.Event, out types.Event, debug bool) ([]string, bo valid = true - for mapIdx := 0; mapIdx < len(expectMaps); mapIdx++ { + for mapIdx := range(len(expectMaps)) { for expKey, expVal := range expectMaps[mapIdx] { outVal, ok := outMaps[mapIdx][expKey] if !ok { From 0e93f98cad58a88be3952d0a45564f532538a101 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Fri, 21 Jun 2024 14:31:45 +0200 Subject: [PATCH 197/581] lint: github.com/pkg/errors -> errors (#3091) --- .golangci.yml | 2 -- pkg/acquisition/modules/appsec/appsec.go | 11 +++++------ pkg/exprhelpers/crowdsec_cti.go | 4 ++-- pkg/exprhelpers/exprlib_test.go | 16 ++++++++-------- 4 files changed, 15 insertions(+), 18 deletions(-) diff --git a/.golangci.yml b/.golangci.yml index bb20d42699f..ffe634ca0ab 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -52,8 +52,6 @@ linters-settings: desc: "errors.Wrap() is deprecated in favor of fmt.Errorf()" files: - "!**/pkg/database/*.go" - - "!**/pkg/exprhelpers/*.go" - - "!**/pkg/acquisition/modules/appsec/appsec.go" - "!**/pkg/apiserver/controllers/v1/errors.go" yaml: files: diff --git a/pkg/acquisition/modules/appsec/appsec.go b/pkg/acquisition/modules/appsec/appsec.go index e1415faa25b..b141ee6d666 100644 --- a/pkg/acquisition/modules/appsec/appsec.go +++ b/pkg/acquisition/modules/appsec/appsec.go @@ -17,7 +17,6 @@ import ( "github.com/crowdsecurity/crowdsec/pkg/types" "github.com/crowdsecurity/go-cs-lib/trace" "github.com/google/uuid" - "github.com/pkg/errors" "github.com/prometheus/client_golang/prometheus" log "github.com/sirupsen/logrus" "gopkg.in/tomb.v2" @@ -98,7 +97,7 @@ func (w *AppsecSource) UnmarshalConfig(yamlConfig []byte) error { err := yaml.UnmarshalStrict(yamlConfig, &w.config) if err != nil { - return errors.Wrap(err, "Cannot parse appsec configuration") + return fmt.Errorf("Cannot parse appsec configuration: %w", err) } if w.config.ListenAddr == "" && w.config.ListenSocket == "" { @@ -153,7 +152,7 @@ func (w *AppsecSource) GetAggregMetrics() []prometheus.Collector { func (w *AppsecSource) Configure(yamlConfig []byte, logger *log.Entry, MetricsLevel int) error { err := w.UnmarshalConfig(yamlConfig) if err != nil { - return errors.Wrap(err, "unable to parse appsec configuration") + return fmt.Errorf("unable to parse appsec configuration: %w", err) } w.logger = logger w.metricsLevel = MetricsLevel @@ -263,7 +262,7 @@ func (w *AppsecSource) StreamingAcquisition(out chan types.Event, t *tomb.Tomb) _ = os.RemoveAll(w.config.ListenSocket) listener, err := net.Listen("unix", w.config.ListenSocket) if err != nil { - return errors.Wrap(err, "Appsec server failed") + return fmt.Errorf("Appsec server failed: %w", err) } defer listener.Close() if w.config.CertFilePath != "" && w.config.KeyFilePath != "" { @@ -272,7 +271,7 @@ func (w *AppsecSource) StreamingAcquisition(out chan types.Event, t *tomb.Tomb) err = w.server.Serve(listener) } if err != nil && err != http.ErrServerClosed { - return errors.Wrap(err, "Appsec server failed") + return fmt.Errorf("Appsec server failed: %w", err) } } return nil @@ -288,7 +287,7 @@ func (w *AppsecSource) StreamingAcquisition(out chan types.Event, t *tomb.Tomb) } if err != nil && err != http.ErrServerClosed { - return errors.Wrap(err, "Appsec server failed") + return fmt.Errorf("Appsec server failed: %w", err) } } return nil diff --git a/pkg/exprhelpers/crowdsec_cti.go b/pkg/exprhelpers/crowdsec_cti.go index 4e54b07bfc2..4103bad1d43 100644 --- a/pkg/exprhelpers/crowdsec_cti.go +++ b/pkg/exprhelpers/crowdsec_cti.go @@ -1,13 +1,13 @@ package exprhelpers import ( + "errors" "fmt" "time" "github.com/bluele/gcache" "github.com/crowdsecurity/crowdsec/pkg/cticlient" "github.com/crowdsecurity/crowdsec/pkg/types" - "github.com/pkg/errors" log "github.com/sirupsen/logrus" ) @@ -40,7 +40,7 @@ func InitCrowdsecCTI(Key *string, TTL *time.Duration, Size *int, LogLevel *log.L } clog := log.New() if err := types.ConfigureLogger(clog); err != nil { - return errors.Wrap(err, "while configuring datasource logger") + return fmt.Errorf("while configuring datasource logger: %w", err) } if LogLevel != nil { clog.SetLevel(*LogLevel) diff --git a/pkg/exprhelpers/exprlib_test.go b/pkg/exprhelpers/exprlib_test.go index e65d568f65b..33025d4a992 100644 --- a/pkg/exprhelpers/exprlib_test.go +++ b/pkg/exprhelpers/exprlib_test.go @@ -2,12 +2,12 @@ package exprhelpers import ( "context" + "errors" "os" "testing" "time" "github.com/antonmedv/expr" - "github.com/pkg/errors" log "github.com/sirupsen/logrus" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -936,7 +936,7 @@ func TestGetDecisionsCount(t *testing.T) { SaveX(context.Background()) if decision == nil { - require.Error(t, errors.Errorf("Failed to create sample decision")) + require.Error(t, errors.New("Failed to create sample decision")) } err = Init(dbClient) @@ -1022,7 +1022,7 @@ func TestGetDecisionsSinceCount(t *testing.T) { SetOrigin("CAPI"). SaveX(context.Background()) if decision == nil { - require.Error(t, errors.Errorf("Failed to create sample decision")) + require.Error(t, errors.New("Failed to create sample decision")) } decision2 := dbClient.Ent.Decision.Create(). @@ -1041,7 +1041,7 @@ func TestGetDecisionsSinceCount(t *testing.T) { SaveX(context.Background()) if decision2 == nil { - require.Error(t, errors.Errorf("Failed to create sample decision")) + require.Error(t, errors.New("Failed to create sample decision")) } err = Init(dbClient) @@ -1147,7 +1147,7 @@ func TestGetActiveDecisionsCount(t *testing.T) { SaveX(context.Background()) if decision == nil { - require.Error(t, errors.Errorf("Failed to create sample decision")) + require.Error(t, errors.New("Failed to create sample decision")) } expiredDecision := dbClient.Ent.Decision.Create(). @@ -1165,7 +1165,7 @@ func TestGetActiveDecisionsCount(t *testing.T) { SaveX(context.Background()) if expiredDecision == nil { - require.Error(t, errors.Errorf("Failed to create sample decision")) + require.Error(t, errors.New("Failed to create sample decision")) } err = Init(dbClient) @@ -1253,7 +1253,7 @@ func TestGetActiveDecisionsTimeLeft(t *testing.T) { SaveX(context.Background()) if decision == nil { - require.Error(t, errors.Errorf("Failed to create sample decision")) + require.Error(t, errors.New("Failed to create sample decision")) } longerDecision := dbClient.Ent.Decision.Create(). @@ -1271,7 +1271,7 @@ func TestGetActiveDecisionsTimeLeft(t *testing.T) { SaveX(context.Background()) if longerDecision == nil { - require.Error(t, errors.Errorf("Failed to create sample decision")) + require.Error(t, errors.New("Failed to create sample decision")) } err = Init(dbClient) From 3097c0ce908cd103aca734a22af58fcd36022772 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Tue, 25 Jun 2024 11:28:27 +0200 Subject: [PATCH 198/581] lint: replace gocognit with revive (#3094) --- .golangci.yml | 17 +++++++---------- 1 file changed, 7 insertions(+), 10 deletions(-) diff --git a/.golangci.yml b/.golangci.yml index ffe634ca0ab..ea871205485 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -12,10 +12,6 @@ linters-settings: replace-allow-list: - golang.org/x/time/rate - gocognit: - # lower this after refactoring - min-complexity: 118 - govet: enable-all: true disable: @@ -100,7 +96,8 @@ linters-settings: - name: add-constant disabled: true - name: cognitive-complexity - disabled: true + # lower this after refactoring + arguments: [119] - name: comment-spacings disabled: true - name: confusing-results @@ -192,10 +189,11 @@ linters: # Redundant # - - gocyclo # revive - - cyclop # revive - - lll # revive - - funlen # revive + - gocyclo # revive + - cyclop # revive + - lll # revive + - funlen # revive + - gocognit # revive # # Disabled until fixed for go 1.22 @@ -223,7 +221,6 @@ linters: # - gocheckcompilerdirectives # Checks that go compiler directive comments (//go:) are valid. # - gochecknoinits # Checks that no init functions are present in Go code # - gochecksumtype # Run exhaustiveness checks on Go "sum types" - # - gocognit # Computes and checks the cognitive complexity of functions # - gocritic # Provides diagnostics that check for bugs, performance and style issues. # - goheader # Checks is file header matches to pattern # - gomoddirectives # Manage the use of 'replace', 'retract', and 'excludes' directives in go.mod. From a3bd9baec1ff5faf1ae3e126d45bfa68601bd805 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Wed, 26 Jun 2024 10:41:30 +0200 Subject: [PATCH 199/581] improved tls middleware revocation checks (#3034) --- .golangci.yml | 4 - docker/test/tests/test_tls.py | 2 +- pkg/apiclient/alerts_service.go | 2 - pkg/apiclient/auth_service.go | 2 - pkg/apiserver/middlewares/v1/api_key.go | 12 +- pkg/apiserver/middlewares/v1/cache.go | 99 ++++++ pkg/apiserver/middlewares/v1/crl.go | 145 +++++++++ pkg/apiserver/middlewares/v1/jwt.go | 31 +- pkg/apiserver/middlewares/v1/ocsp.go | 100 ++++++ pkg/apiserver/middlewares/v1/tls_auth.go | 305 ++++++------------ test/bats/11_bouncers_tls.bats | 181 ++++++++--- test/bats/30_machines_tls.bats | 194 +++++++---- test/bats/testdata/cfssl/agent.json | 16 +- test/bats/testdata/cfssl/agent_invalid.json | 16 +- test/bats/testdata/cfssl/bouncer.json | 16 +- test/bats/testdata/cfssl/bouncer_invalid.json | 16 +- test/bats/testdata/cfssl/ca.json | 16 - ...intermediate.json => ca_intermediate.json} | 18 +- test/bats/testdata/cfssl/ca_root.json | 16 + test/bats/testdata/cfssl/profiles.json | 71 ++-- test/bats/testdata/cfssl/server.json | 24 +- test/lib/setup_file.sh | 5 + 22 files changed, 835 insertions(+), 456 deletions(-) create mode 100644 pkg/apiserver/middlewares/v1/cache.go create mode 100644 pkg/apiserver/middlewares/v1/crl.go create mode 100644 pkg/apiserver/middlewares/v1/ocsp.go delete mode 100644 test/bats/testdata/cfssl/ca.json rename test/bats/testdata/cfssl/{intermediate.json => ca_intermediate.json} (53%) create mode 100644 test/bats/testdata/cfssl/ca_root.json diff --git a/.golangci.yml b/.golangci.yml index ea871205485..d89c8e9ed01 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -462,10 +462,6 @@ issues: path: pkg/hubtest/hubtest_item.go text: "cyclomatic: .*RunWithLogFile" - - linters: - - canonicalheader - path: pkg/apiserver/middlewares/v1/tls_auth.go - # tolerate complex functions in tests for now - linters: - maintidx diff --git a/docker/test/tests/test_tls.py b/docker/test/tests/test_tls.py index fe899b000af..d2f512fcbc1 100644 --- a/docker/test/tests/test_tls.py +++ b/docker/test/tests/test_tls.py @@ -281,7 +281,7 @@ def test_tls_client_ou(crowdsec, flavor, certs_dir): lapi.wait_for_http(8080, '/health', want_status=None) with cs_agent as agent: lapi.wait_for_log([ - "*client certificate OU (?custom-client-ou?) doesn't match expected OU (?agent-ou?)*", + "*client certificate OU ?custom-client-ou? doesn't match expected OU ?agent-ou?*", ]) lapi_env['AGENTS_ALLOWED_OU'] = 'custom-client-ou' diff --git a/pkg/apiclient/alerts_service.go b/pkg/apiclient/alerts_service.go index ad75dd39342..a3da84d306e 100644 --- a/pkg/apiclient/alerts_service.go +++ b/pkg/apiclient/alerts_service.go @@ -10,8 +10,6 @@ import ( "github.com/crowdsecurity/crowdsec/pkg/models" ) -// type ApiAlerts service - type AlertsService service type AlertsListOpts struct { diff --git a/pkg/apiclient/auth_service.go b/pkg/apiclient/auth_service.go index e4350385237..e7a423cfd95 100644 --- a/pkg/apiclient/auth_service.go +++ b/pkg/apiclient/auth_service.go @@ -8,8 +8,6 @@ import ( "github.com/crowdsecurity/crowdsec/pkg/models" ) -// type ApiAlerts service - type AuthService service // Don't add it to the models, as they are used with LAPI, but the enroll endpoint is specific to CAPI diff --git a/pkg/apiserver/middlewares/v1/api_key.go b/pkg/apiserver/middlewares/v1/api_key.go index 314a4da1046..e822666db0f 100644 --- a/pkg/apiserver/middlewares/v1/api_key.go +++ b/pkg/apiserver/middlewares/v1/api_key.go @@ -60,18 +60,13 @@ func HashSHA512(str string) string { func (a *APIKey) authTLS(c *gin.Context, logger *log.Entry) *ent.Bouncer { if a.TlsAuth == nil { - logger.Error("TLS Auth is not configured but client presented a certificate") - return nil - } - - validCert, extractedCN, err := a.TlsAuth.ValidateCert(c) - if !validCert { - logger.Error(err) + logger.Warn("TLS Auth is not configured but client presented a certificate") return nil } + extractedCN, err := a.TlsAuth.ValidateCert(c) if err != nil { - logger.Error(err) + logger.Warn(err) return nil } @@ -148,6 +143,7 @@ func (a *APIKey) MiddlewareFunc() gin.HandlerFunc { } if bouncer == nil { + // XXX: StatusUnauthorized? c.JSON(http.StatusForbidden, gin.H{"message": "access forbidden"}) c.Abort() diff --git a/pkg/apiserver/middlewares/v1/cache.go b/pkg/apiserver/middlewares/v1/cache.go new file mode 100644 index 00000000000..a058ec40393 --- /dev/null +++ b/pkg/apiserver/middlewares/v1/cache.go @@ -0,0 +1,99 @@ +package v1 + +import ( + "crypto/x509" + "sync" + "time" + + log "github.com/sirupsen/logrus" +) + +type cacheEntry struct { + err error // if nil, the certificate is not revocated + timestamp time.Time +} + +type RevocationCache struct { + mu sync.RWMutex + cache map[string]cacheEntry + expiration time.Duration + lastPurge time.Time + logger *log.Entry +} + +func NewRevocationCache(expiration time.Duration, logger *log.Entry) *RevocationCache { + return &RevocationCache{ + cache: make(map[string]cacheEntry), + expiration: expiration, + lastPurge: time.Now(), + logger: logger, + } +} + +func (*RevocationCache) generateKey(cert *x509.Certificate) string { + return cert.SerialNumber.String() + "-" + cert.Issuer.String() +} + +// purge removes expired entries from the cache +func (rc *RevocationCache) purgeExpired() { + // we don't keep a separate interval for the full sweep, we'll just double the expiration + if time.Since(rc.lastPurge) < rc.expiration { + return + } + + rc.mu.Lock() + defer rc.mu.Unlock() + + for key, entry := range rc.cache { + if time.Since(entry.timestamp) > rc.expiration { + rc.logger.Debugf("purging expired entry for cert %s", key) + delete(rc.cache, key) + } + } +} + +func (rc *RevocationCache) Get(cert *x509.Certificate) (error, bool) { //nolint:revive + rc.purgeExpired() + key := rc.generateKey(cert) + rc.mu.RLock() + entry, exists := rc.cache[key] + rc.mu.RUnlock() + + if !exists { + rc.logger.Tracef("no cached value for cert %s", key) + return nil, false + } + + // Upgrade to write lock to potentially modify the cache + rc.mu.Lock() + defer rc.mu.Unlock() + + if entry.timestamp.Add(rc.expiration).Before(time.Now()) { + rc.logger.Debugf("cached value for %s expired, removing from cache", key) + delete(rc.cache, key) + + return nil, false + } + + rc.logger.Debugf("using cached value for cert %s: %v", key, entry.err) + + return entry.err, true +} + +func (rc *RevocationCache) Set(cert *x509.Certificate, err error) { + key := rc.generateKey(cert) + + rc.mu.Lock() + defer rc.mu.Unlock() + + rc.cache[key] = cacheEntry{ + err: err, + timestamp: time.Now(), + } +} + +func (rc *RevocationCache) Empty() { + rc.mu.Lock() + defer rc.mu.Unlock() + rc.cache = make(map[string]cacheEntry) +} diff --git a/pkg/apiserver/middlewares/v1/crl.go b/pkg/apiserver/middlewares/v1/crl.go new file mode 100644 index 00000000000..f85a410998e --- /dev/null +++ b/pkg/apiserver/middlewares/v1/crl.go @@ -0,0 +1,145 @@ +package v1 + +import ( + "crypto/x509" + "encoding/pem" + "fmt" + "os" + "sync" + "time" + + log "github.com/sirupsen/logrus" +) + +type CRLChecker struct { + path string // path to the CRL file + fileInfo os.FileInfo // last stat of the CRL file + crls []*x509.RevocationList // parsed CRLs + logger *log.Entry + mu sync.RWMutex + lastLoad time.Time // time when the CRL file was last read successfully + onLoad func() // called when the CRL file changes (and is read successfully) +} + +func NewCRLChecker(crlPath string, onLoad func(), logger *log.Entry) (*CRLChecker, error) { + cc := &CRLChecker{ + path: crlPath, + logger: logger, + onLoad: onLoad, + } + + err := cc.refresh() + if err != nil { + return nil, err + } + + return cc, nil +} + +func (*CRLChecker) decodeCRLs(content []byte) ([]*x509.RevocationList, error) { + var crls []*x509.RevocationList + + for { + block, rest := pem.Decode(content) + if block == nil { + break // no more PEM blocks + } + + content = rest + + crl, err := x509.ParseRevocationList(block.Bytes) + if err != nil { + // invalidate the whole CRL file so we can still use the previous version + return nil, fmt.Errorf("could not parse file: %w", err) + } + + crls = append(crls, crl) + } + + return crls, nil +} + +// refresh() reads the CRL file if new or changed since the last time +func (cc *CRLChecker) refresh() error { + // noop if lastLoad is less than 5 seconds ago + if time.Since(cc.lastLoad) < 5*time.Second { + return nil + } + + cc.mu.Lock() + defer cc.mu.Unlock() + + cc.logger.Debugf("loading CRL file from %s", cc.path) + + fileInfo, err := os.Stat(cc.path) + if err != nil { + return fmt.Errorf("could not access CRL file: %w", err) + } + + // noop if the file didn't change + if cc.fileInfo != nil && fileInfo.ModTime().Equal(cc.fileInfo.ModTime()) && fileInfo.Size() == cc.fileInfo.Size() { + return nil + } + + // the encoding/pem package wants bytes, not io.Reader + crlContent, err := os.ReadFile(cc.path) + if err != nil { + return fmt.Errorf("could not read CRL file: %w", err) + } + + cc.crls, err = cc.decodeCRLs(crlContent) + if err != nil { + return err + } + + cc.fileInfo = fileInfo + cc.lastLoad = time.Now() + cc.onLoad() + + return nil +} + +// isRevoked checks if the client certificate is revoked by any of the CRL blocks +// It returns a boolean indicating if the certificate is revoked and a boolean indicating +// if the CRL check was successful and could be cached. +func (cc *CRLChecker) isRevokedBy(cert *x509.Certificate, issuer *x509.Certificate) (bool, bool) { + if cc == nil { + return false, true + } + + err := cc.refresh() + if err != nil { + // we can't quit obviously, so we just log the error and continue + // but we can assume we have loaded a CRL, or it would have quit the first time + cc.logger.Errorf("while refreshing CRL: %s - will keep using CRL file read at %s", err, + cc.lastLoad.Format(time.RFC3339)) + } + + now := time.Now().UTC() + + cc.mu.RLock() + defer cc.mu.RUnlock() + + for _, crl := range cc.crls { + if err := crl.CheckSignatureFrom(issuer); err != nil { + continue + } + + if now.After(crl.NextUpdate) { + cc.logger.Warn("CRL has expired, will still validate the cert against it.") + } + + if now.Before(crl.ThisUpdate) { + cc.logger.Warn("CRL is not yet valid, will still validate the cert against it.") + } + + for _, revoked := range crl.RevokedCertificateEntries { + if revoked.SerialNumber.Cmp(cert.SerialNumber) == 0 { + cc.logger.Warn("client certificate is revoked by CRL") + return true, true + } + } + } + + return false, true +} diff --git a/pkg/apiserver/middlewares/v1/jwt.go b/pkg/apiserver/middlewares/v1/jwt.go index 735c5f058cb..64406deff3e 100644 --- a/pkg/apiserver/middlewares/v1/jwt.go +++ b/pkg/apiserver/middlewares/v1/jwt.go @@ -4,7 +4,6 @@ import ( "crypto/rand" "errors" "fmt" - "net/http" "os" "strings" "time" @@ -59,27 +58,19 @@ func (j *JWT) authTLS(c *gin.Context) (*authInput, error) { ret := authInput{} if j.TlsAuth == nil { - c.JSON(http.StatusForbidden, gin.H{"message": "access forbidden"}) - c.Abort() + err := errors.New("tls authentication required") + log.Warn(err) - return nil, errors.New("TLS auth is not configured") + return nil, err } - validCert, extractedCN, err := j.TlsAuth.ValidateCert(c) + extractedCN, err := j.TlsAuth.ValidateCert(c) if err != nil { - log.Error(err) - c.JSON(http.StatusForbidden, gin.H{"message": "access forbidden"}) - c.Abort() - - return nil, fmt.Errorf("while trying to validate client cert: %w", err) + log.Warn(err) + return nil, err } - if !validCert { - c.JSON(http.StatusForbidden, gin.H{"message": "access forbidden"}) - c.Abort() - - return nil, errors.New("failed cert authentication") - } + logger := log.WithField("ip", c.ClientIP()) ret.machineID = fmt.Sprintf("%s@%s", extractedCN, c.ClientIP()) @@ -88,14 +79,12 @@ func (j *JWT) authTLS(c *gin.Context) (*authInput, error) { First(j.DbClient.CTX) if ent.IsNotFound(err) { // Machine was not found, let's create it - log.Infof("machine %s not found, create it", ret.machineID) + logger.Infof("machine %s not found, create it", ret.machineID) // let's use an apikey as the password, doesn't matter in this case (generatePassword is only available in cscli) pwd, err := GenerateAPIKey(dummyAPIKeySize) if err != nil { - log.WithFields(log.Fields{ - "ip": c.ClientIP(), - "cn": extractedCN, - }).Errorf("error generating password: %s", err) + logger.WithField("cn", extractedCN). + Errorf("error generating password: %s", err) return nil, errors.New("error generating password") } diff --git a/pkg/apiserver/middlewares/v1/ocsp.go b/pkg/apiserver/middlewares/v1/ocsp.go new file mode 100644 index 00000000000..24557bfda7b --- /dev/null +++ b/pkg/apiserver/middlewares/v1/ocsp.go @@ -0,0 +1,100 @@ +package v1 + +import ( + "bytes" + "crypto" + "crypto/x509" + "io" + "net/http" + "net/url" + + log "github.com/sirupsen/logrus" + "golang.org/x/crypto/ocsp" +) + +type OCSPChecker struct { + logger *log.Entry +} + +func NewOCSPChecker(logger *log.Entry) *OCSPChecker { + return &OCSPChecker{ + logger: logger, + } +} + +func (oc *OCSPChecker) query(server string, cert *x509.Certificate, issuer *x509.Certificate) (*ocsp.Response, error) { + req, err := ocsp.CreateRequest(cert, issuer, &ocsp.RequestOptions{Hash: crypto.SHA256}) + if err != nil { + oc.logger.Errorf("TLSAuth: error creating OCSP request: %s", err) + return nil, err + } + + httpRequest, err := http.NewRequest(http.MethodPost, server, bytes.NewBuffer(req)) + if err != nil { + oc.logger.Error("TLSAuth: cannot create HTTP request for OCSP") + return nil, err + } + + ocspURL, err := url.Parse(server) + if err != nil { + oc.logger.Error("TLSAuth: cannot parse OCSP URL") + return nil, err + } + + httpRequest.Header.Add("Content-Type", "application/ocsp-request") + httpRequest.Header.Add("Accept", "application/ocsp-response") + httpRequest.Header.Add("Host", ocspURL.Host) + + httpClient := &http.Client{} + + // XXX: timeout, context? + httpResponse, err := httpClient.Do(httpRequest) + if err != nil { + oc.logger.Error("TLSAuth: cannot send HTTP request to OCSP") + return nil, err + } + defer httpResponse.Body.Close() + + output, err := io.ReadAll(httpResponse.Body) + if err != nil { + oc.logger.Error("TLSAuth: cannot read HTTP response from OCSP") + return nil, err + } + + ocspResponse, err := ocsp.ParseResponseForCert(output, cert, issuer) + + return ocspResponse, err +} + +// isRevokedBy checks if the client certificate is revoked by the issuer via any of the OCSP servers present in the certificate. +// It returns a boolean indicating if the certificate is revoked and a boolean indicating +// if the OCSP check was successful and could be cached. +func (oc *OCSPChecker) isRevokedBy(cert *x509.Certificate, issuer *x509.Certificate) (bool, bool) { + if cert.OCSPServer == nil || len(cert.OCSPServer) == 0 { + oc.logger.Infof("TLSAuth: no OCSP Server present in client certificate, skipping OCSP verification") + return false, true + } + + for _, server := range cert.OCSPServer { + ocspResponse, err := oc.query(server, cert, issuer) + if err != nil { + oc.logger.Errorf("TLSAuth: error querying OCSP server %s: %s", server, err) + continue + } + + switch ocspResponse.Status { + case ocsp.Good: + return false, true + case ocsp.Revoked: + oc.logger.Errorf("TLSAuth: client certificate is revoked by server %s", server) + return true, true + case ocsp.Unknown: + log.Debugf("unknown OCSP status for server %s", server) + continue + } + } + + log.Infof("Could not get any valid OCSP response, assuming the cert is revoked") + + return true, false +} diff --git a/pkg/apiserver/middlewares/v1/tls_auth.go b/pkg/apiserver/middlewares/v1/tls_auth.go index c2fcc9c7264..673c8d0cdce 100644 --- a/pkg/apiserver/middlewares/v1/tls_auth.go +++ b/pkg/apiserver/middlewares/v1/tls_auth.go @@ -1,79 +1,24 @@ package v1 import ( - "bytes" - "crypto" "crypto/x509" - "encoding/pem" "errors" "fmt" - "io" - "net/http" - "net/url" - "os" + "slices" "time" "github.com/gin-gonic/gin" log "github.com/sirupsen/logrus" - "golang.org/x/crypto/ocsp" ) type TLSAuth struct { AllowedOUs []string - CrlPath string - revocationCache map[string]cacheEntry - cacheExpiration time.Duration + crlChecker *CRLChecker + ocspChecker *OCSPChecker + revocationCache *RevocationCache logger *log.Entry } -type cacheEntry struct { - revoked bool - timestamp time.Time -} - -func (ta *TLSAuth) ocspQuery(server string, cert *x509.Certificate, issuer *x509.Certificate) (*ocsp.Response, error) { - req, err := ocsp.CreateRequest(cert, issuer, &ocsp.RequestOptions{Hash: crypto.SHA256}) - if err != nil { - ta.logger.Errorf("TLSAuth: error creating OCSP request: %s", err) - return nil, err - } - - httpRequest, err := http.NewRequest(http.MethodPost, server, bytes.NewBuffer(req)) - if err != nil { - ta.logger.Error("TLSAuth: cannot create HTTP request for OCSP") - return nil, err - } - - ocspURL, err := url.Parse(server) - if err != nil { - ta.logger.Error("TLSAuth: cannot parse OCSP URL") - return nil, err - } - - httpRequest.Header.Add("Content-Type", "application/ocsp-request") - httpRequest.Header.Add("Accept", "application/ocsp-response") - httpRequest.Header.Add("host", ocspURL.Host) - - httpClient := &http.Client{} - - httpResponse, err := httpClient.Do(httpRequest) - if err != nil { - ta.logger.Error("TLSAuth: cannot send HTTP request to OCSP") - return nil, err - } - defer httpResponse.Body.Close() - - output, err := io.ReadAll(httpResponse.Body) - if err != nil { - ta.logger.Error("TLSAuth: cannot read HTTP response from OCSP") - return nil, err - } - - ocspResponse, err := ocsp.ParseResponseForCert(output, cert, issuer) - - return ocspResponse, err -} - func (ta *TLSAuth) isExpired(cert *x509.Certificate) bool { now := time.Now().UTC() @@ -90,211 +35,147 @@ func (ta *TLSAuth) isExpired(cert *x509.Certificate) bool { return false } -// isOCSPRevoked checks if the client certificate is revoked by any of the OCSP servers present in the certificate. -// It returns a boolean indicating if the certificate is revoked and a boolean indicating if the OCSP check was successful and could be cached. -func (ta *TLSAuth) isOCSPRevoked(cert *x509.Certificate, issuer *x509.Certificate) (bool, bool) { - if cert.OCSPServer == nil || len(cert.OCSPServer) == 0 { - ta.logger.Infof("TLSAuth: no OCSP Server present in client certificate, skipping OCSP verification") - return false, true - } +// checkRevocationPath checks a single chain against OCSP and CRL +func (ta *TLSAuth) checkRevocationPath(chain []*x509.Certificate) (error, bool) { //nolint:revive + // if we ever fail to check OCSP or CRL, we should not cache the result + couldCheck := true - for _, server := range cert.OCSPServer { - ocspResponse, err := ta.ocspQuery(server, cert, issuer) - if err != nil { - ta.logger.Errorf("TLSAuth: error querying OCSP server %s: %s", server, err) - continue - } + // starting from the root CA and moving towards the leaf certificate, + // check for revocation of intermediates too + for i := len(chain) - 1; i > 0; i-- { + cert := chain[i-1] + issuer := chain[i] - switch ocspResponse.Status { - case ocsp.Good: - return false, true - case ocsp.Revoked: - ta.logger.Errorf("TLSAuth: client certificate is revoked by server %s", server) - return true, true - case ocsp.Unknown: - log.Debugf("unknow OCSP status for server %s", server) - continue + revokedByOCSP, checkedByOCSP := ta.ocspChecker.isRevokedBy(cert, issuer) + couldCheck = couldCheck && checkedByOCSP + + if revokedByOCSP && checkedByOCSP { + return errors.New("certificate revoked by OCSP"), couldCheck } - } - log.Infof("Could not get any valid OCSP response, assuming the cert is revoked") + revokedByCRL, checkedByCRL := ta.crlChecker.isRevokedBy(cert, issuer) + couldCheck = couldCheck && checkedByCRL - return true, false -} - -// isCRLRevoked checks if the client certificate is revoked by the CRL present in the CrlPath. -// It returns a boolean indicating if the certificate is revoked and a boolean indicating if the CRL check was successful and could be cached. -func (ta *TLSAuth) isCRLRevoked(cert *x509.Certificate) (bool, bool) { - if ta.CrlPath == "" { - ta.logger.Info("no crl_path, skipping CRL check") - return false, true + if revokedByCRL && checkedByCRL { + return errors.New("certificate revoked by CRL"), couldCheck + } } - crlContent, err := os.ReadFile(ta.CrlPath) - if err != nil { - ta.logger.Errorf("could not read CRL file, skipping check: %s", err) - return false, false - } + return nil, couldCheck +} - var crlBlock *pem.Block +func (ta *TLSAuth) setAllowedOu(allowedOus []string) error { + uniqueOUs := make(map[string]struct{}) - for { - crlBlock, crlContent = pem.Decode(crlContent) - if crlBlock == nil { - break // no more PEM blocks + for _, ou := range allowedOus { + // disallow empty ou + if ou == "" { + return errors.New("allowed_ou configuration contains invalid empty string") } - crl, err := x509.ParseRevocationList(crlBlock.Bytes) - if err != nil { - ta.logger.Errorf("could not parse a PEM block in CRL file, skipping: %s", err) + if _, exists := uniqueOUs[ou]; exists { + ta.logger.Warningf("dropping duplicate ou %s", ou) continue } - now := time.Now().UTC() + uniqueOUs[ou] = struct{}{} - if now.After(crl.NextUpdate) { - ta.logger.Warn("CRL has expired, will still validate the cert against it.") - } + ta.AllowedOUs = append(ta.AllowedOUs, ou) + } - if now.Before(crl.ThisUpdate) { - ta.logger.Warn("CRL is not yet valid, will still validate the cert against it.") - } + return nil +} - for _, revoked := range crl.RevokedCertificateEntries { - if revoked.SerialNumber.Cmp(cert.SerialNumber) == 0 { - ta.logger.Warn("client certificate is revoked by CRL") - return true, true - } +func (ta *TLSAuth) checkAllowedOU(ous []string) error { + for _, ou := range ous { + if slices.Contains(ta.AllowedOUs, ou) { + return nil } } - return false, true + return fmt.Errorf("client certificate OU %v doesn't match expected OU %v", ous, ta.AllowedOUs) } -func (ta *TLSAuth) isRevoked(cert *x509.Certificate, issuer *x509.Certificate) (bool, error) { - sn := cert.SerialNumber.String() - if cacheValue, ok := ta.revocationCache[sn]; ok { - if time.Now().UTC().Sub(cacheValue.timestamp) < ta.cacheExpiration { - ta.logger.Debugf("TLSAuth: using cached value for cert %s: %t", sn, cacheValue.revoked) - return cacheValue.revoked, nil - } +func (ta *TLSAuth) ValidateCert(c *gin.Context) (string, error) { + // Checks cert validity, Returns true + CN if client cert matches requested OU + var leaf *x509.Certificate - ta.logger.Debugf("TLSAuth: cached value expired, removing from cache") - delete(ta.revocationCache, sn) - } else { - ta.logger.Tracef("TLSAuth: no cached value for cert %s", sn) + if c.Request.TLS == nil || len(c.Request.TLS.PeerCertificates) == 0 { + return "", errors.New("no certificate in request") } - revokedByOCSP, cacheOCSP := ta.isOCSPRevoked(cert, issuer) - revokedByCRL, cacheCRL := ta.isCRLRevoked(cert) - revoked := revokedByOCSP || revokedByCRL - - if cacheOCSP && cacheCRL { - ta.revocationCache[sn] = cacheEntry{ - revoked: revoked, - timestamp: time.Now().UTC(), - } + if len(c.Request.TLS.VerifiedChains) == 0 { + return "", errors.New("no verified cert in request") } - return revoked, nil -} + // although there can be multiple chains, the leaf certificate is the same + // we take the first one + leaf = c.Request.TLS.VerifiedChains[0][0] -func (ta *TLSAuth) isInvalid(cert *x509.Certificate, issuer *x509.Certificate) (bool, error) { - if ta.isExpired(cert) { - return true, nil + if err := ta.checkAllowedOU(leaf.Subject.OrganizationalUnit); err != nil { + return "", err } - revoked, err := ta.isRevoked(cert, issuer) - if err != nil { - // Fail securely, if we can't check the revocation status, let's consider the cert invalid - // We may change this in the future based on users feedback, but this seems the most sensible thing to do - return true, fmt.Errorf("could not check for client certification revocation status: %w", err) + if ta.isExpired(leaf) { + return "", errors.New("client certificate is expired") } - return revoked, nil -} - -func (ta *TLSAuth) SetAllowedOu(allowedOus []string) error { - for _, ou := range allowedOus { - // disallow empty ou - if ou == "" { - return errors.New("empty ou isn't allowed") + if validErr, cached := ta.revocationCache.Get(leaf); cached { + if validErr != nil { + return "", fmt.Errorf("(cache) %w", validErr) } - // drop & warn on duplicate ou - ok := true - - for _, validOu := range ta.AllowedOUs { - if validOu == ou { - ta.logger.Warningf("dropping duplicate ou %s", ou) - - ok = false - } - } - - if ok { - ta.AllowedOUs = append(ta.AllowedOUs, ou) - } + return leaf.Subject.CommonName, nil } - return nil -} + okToCache := true -func (ta *TLSAuth) ValidateCert(c *gin.Context) (bool, string, error) { - // Checks cert validity, Returns true + CN if client cert matches requested OU - var clientCert *x509.Certificate - - if c.Request.TLS == nil || len(c.Request.TLS.PeerCertificates) == 0 { - // do not error if it's not TLS or there are no peer certs - return false, "", nil - } + var validErr error - if len(c.Request.TLS.VerifiedChains) > 0 { - validOU := false - clientCert = c.Request.TLS.VerifiedChains[0][0] - - for _, ou := range clientCert.Subject.OrganizationalUnit { - for _, allowedOu := range ta.AllowedOUs { - if allowedOu == ou { - validOU = true - break - } - } - } + var couldCheck bool - if !validOU { - return false, "", fmt.Errorf("client certificate OU (%v) doesn't match expected OU (%v)", - clientCert.Subject.OrganizationalUnit, ta.AllowedOUs) - } + for _, chain := range c.Request.TLS.VerifiedChains { + validErr, couldCheck = ta.checkRevocationPath(chain) + okToCache = okToCache && couldCheck - revoked, err := ta.isInvalid(clientCert, c.Request.TLS.VerifiedChains[0][1]) - if err != nil { - ta.logger.Errorf("TLSAuth: error checking if client certificate is revoked: %s", err) - return false, "", fmt.Errorf("could not check for client certification revocation status: %w", err) - } - - if revoked { - return false, "", fmt.Errorf("client certificate for CN=%s OU=%s is revoked", clientCert.Subject.CommonName, clientCert.Subject.OrganizationalUnit) + if validErr != nil { + break } + } - ta.logger.Debugf("client OU %v is allowed vs required OU %v", clientCert.Subject.OrganizationalUnit, ta.AllowedOUs) + if okToCache { + ta.revocationCache.Set(leaf, validErr) + } - return true, clientCert.Subject.CommonName, nil + if validErr != nil { + return "", validErr } - return false, "", errors.New("no verified cert in request") + return leaf.Subject.CommonName, nil } func NewTLSAuth(allowedOus []string, crlPath string, cacheExpiration time.Duration, logger *log.Entry) (*TLSAuth, error) { + var err error + + cache := NewRevocationCache(cacheExpiration, logger) + ta := &TLSAuth{ - revocationCache: map[string]cacheEntry{}, - cacheExpiration: cacheExpiration, - CrlPath: crlPath, + revocationCache: cache, + ocspChecker: NewOCSPChecker(logger), logger: logger, } - err := ta.SetAllowedOu(allowedOus) - if err != nil { + switch crlPath { + case "": + logger.Info("no crl_path, skipping CRL checks") + default: + ta.crlChecker, err = NewCRLChecker(crlPath, cache.Empty, logger) + if err != nil { + return nil, err + } + } + + if err := ta.setAllowedOu(allowedOus); err != nil { return nil, err } diff --git a/test/bats/11_bouncers_tls.bats b/test/bats/11_bouncers_tls.bats index 6b4986d45d7..765e93ebee2 100644 --- a/test/bats/11_bouncers_tls.bats +++ b/test/bats/11_bouncers_tls.bats @@ -3,6 +3,19 @@ set -u +# root: root CA +# inter: intermediate CA +# inter_rev: intermediate CA revoked by root (CRL3) +# leaf: valid client cert +# leaf_rev1: client cert revoked by inter (CRL1) +# leaf_rev2: client cert revoked by inter (CRL2) +# leaf_rev3: client cert (indirectly) revoked by root +# +# CRL1: inter revokes leaf_rev1 +# CRL2: inter revokes leaf_rev2 +# CRL3: root revokes inter_rev +# CRL4: root revokes leaf, but is ignored + setup_file() { load "../lib/setup_file.sh" ./instance-data load @@ -10,43 +23,96 @@ setup_file() { tmpdir="$BATS_FILE_TMPDIR" export tmpdir - CFDIR="${BATS_TEST_DIRNAME}/testdata/cfssl" + CFDIR="$BATS_TEST_DIRNAME/testdata/cfssl" export CFDIR - # Generate the CA - cfssl gencert --initca "${CFDIR}/ca.json" 2>/dev/null | cfssljson --bare "${tmpdir}/ca" - - # Generate an intermediate - cfssl gencert --initca "${CFDIR}/intermediate.json" 2>/dev/null | cfssljson --bare "${tmpdir}/inter" - cfssl sign -ca "${tmpdir}/ca.pem" -ca-key "${tmpdir}/ca-key.pem" -config "${CFDIR}/profiles.json" -profile intermediate_ca "${tmpdir}/inter.csr" 2>/dev/null | cfssljson --bare "${tmpdir}/inter" - - # Generate server cert for crowdsec with the intermediate - cfssl gencert -ca "${tmpdir}/inter.pem" -ca-key "${tmpdir}/inter-key.pem" -config "${CFDIR}/profiles.json" -profile=server "${CFDIR}/server.json" 2>/dev/null | cfssljson --bare "${tmpdir}/server" - - # Generate client cert for the bouncer - cfssl gencert -ca "${tmpdir}/inter.pem" -ca-key "${tmpdir}/inter-key.pem" -config "${CFDIR}/profiles.json" -profile=client "${CFDIR}/bouncer.json" 2>/dev/null | cfssljson --bare "${tmpdir}/bouncer" - - # Genearte client cert for the bouncer with an invalid OU - cfssl gencert -ca "${tmpdir}/inter.pem" -ca-key "${tmpdir}/inter-key.pem" -config "${CFDIR}/profiles.json" -profile=client "${CFDIR}/bouncer_invalid.json" 2>/dev/null | cfssljson --bare "${tmpdir}/bouncer_bad_ou" - - # Generate client cert for the bouncer directly signed by the CA, it should be refused by crowdsec as uses the intermediate - cfssl gencert -ca "${tmpdir}/ca.pem" -ca-key "${tmpdir}/ca-key.pem" -config "${CFDIR}/profiles.json" -profile=client "${CFDIR}/bouncer.json" 2>/dev/null | cfssljson --bare "${tmpdir}/bouncer_invalid" - - # Generate revoked client certs - for cert_name in "revoked_1" "revoked_2"; do - cfssl gencert -ca "${tmpdir}/inter.pem" -ca-key "${tmpdir}/inter-key.pem" -config "${CFDIR}/profiles.json" -profile=client "${CFDIR}/bouncer.json" 2>/dev/null | cfssljson --bare "${tmpdir}/${cert_name}" - cfssl certinfo -cert "${tmpdir}/${cert_name}.pem" | jq -r '.serial_number' > "${tmpdir}/serials_${cert_name}.txt" + # Root CA + cfssl gencert -loglevel 2 \ + --initca "$CFDIR/ca_root.json" \ + | cfssljson --bare "$tmpdir/root" + + # Intermediate CAs (valid or revoked) + for cert in "inter" "inter_rev"; do + cfssl gencert -loglevel 2 \ + --initca "$CFDIR/ca_intermediate.json" \ + | cfssljson --bare "$tmpdir/$cert" + + cfssl sign -loglevel 2 \ + -ca "$tmpdir/root.pem" -ca-key "$tmpdir/root-key.pem" \ + -config "$CFDIR/profiles.json" -profile intermediate_ca "$tmpdir/$cert.csr" \ + | cfssljson --bare "$tmpdir/$cert" done - # Generate separate CRL blocks and concatenate them - for cert_name in "revoked_1" "revoked_2"; do - echo '-----BEGIN X509 CRL-----' > "${tmpdir}/crl_${cert_name}.pem" - cfssl gencrl "${tmpdir}/serials_${cert_name}.txt" "${tmpdir}/ca.pem" "${tmpdir}/ca-key.pem" >> "${tmpdir}/crl_${cert_name}.pem" - echo '-----END X509 CRL-----' >> "${tmpdir}/crl_${cert_name}.pem" + # Server cert for crowdsec with the intermediate + cfssl gencert -loglevel 2 \ + -ca "$tmpdir/inter.pem" -ca-key "$tmpdir/inter-key.pem" \ + -config "$CFDIR/profiles.json" -profile=server "$CFDIR/server.json" \ + | cfssljson --bare "$tmpdir/server" + + # Client certs (valid or revoked) + for cert in "leaf" "leaf_rev1" "leaf_rev2"; do + cfssl gencert -loglevel 3 \ + -ca "$tmpdir/inter.pem" -ca-key "$tmpdir/inter-key.pem" \ + -config "$CFDIR/profiles.json" -profile=client \ + "$CFDIR/bouncer.json" \ + | cfssljson --bare "$tmpdir/$cert" done - cat "${tmpdir}/crl_revoked_1.pem" "${tmpdir}/crl_revoked_2.pem" >"${tmpdir}/crl.pem" - cat "${tmpdir}/ca.pem" "${tmpdir}/inter.pem" > "${tmpdir}/bundle.pem" + # Client cert (by revoked inter) + cfssl gencert -loglevel 3 \ + -ca "$tmpdir/inter_rev.pem" -ca-key "$tmpdir/inter_rev-key.pem" \ + -config "$CFDIR/profiles.json" -profile=client \ + "$CFDIR/bouncer.json" \ + | cfssljson --bare "$tmpdir/leaf_rev3" + + # Bad client cert (invalid OU) + cfssl gencert -loglevel 3 \ + -ca "$tmpdir/inter.pem" -ca-key "$tmpdir/inter-key.pem" \ + -config "$CFDIR/profiles.json" -profile=client \ + "$CFDIR/bouncer_invalid.json" \ + | cfssljson --bare "$tmpdir/leaf_bad_ou" + + # Bad client cert (directly signed by the CA, it should be refused by crowdsec as it uses the intermediate) + cfssl gencert -loglevel 3 \ + -ca "$tmpdir/root.pem" -ca-key "$tmpdir/root-key.pem" \ + -config "$CFDIR/profiles.json" -profile=client \ + "$CFDIR/bouncer.json" \ + | cfssljson --bare "$tmpdir/leaf_invalid" + + truncate -s 0 "$tmpdir/crl.pem" + + # Revoke certs + { + echo '-----BEGIN X509 CRL-----' + cfssl gencrl \ + <(cert_serial_number "$tmpdir/leaf_rev1.pem") \ + "$tmpdir/inter.pem" \ + "$tmpdir/inter-key.pem" + echo '-----END X509 CRL-----' + + echo '-----BEGIN X509 CRL-----' + cfssl gencrl \ + <(cert_serial_number "$tmpdir/leaf_rev2.pem") \ + "$tmpdir/inter.pem" \ + "$tmpdir/inter-key.pem" + echo '-----END X509 CRL-----' + + echo '-----BEGIN X509 CRL-----' + cfssl gencrl \ + <(cert_serial_number "$tmpdir/inter_rev.pem") \ + "$tmpdir/root.pem" \ + "$tmpdir/root-key.pem" + echo '-----END X509 CRL-----' + + echo '-----BEGIN X509 CRL-----' + cfssl gencrl \ + <(cert_serial_number "$tmpdir/leaf.pem") \ + "$tmpdir/root.pem" \ + "$tmpdir/root-key.pem" + echo '-----END X509 CRL-----' + } >> "$tmpdir/crl.pem" + + cat "$tmpdir/root.pem" "$tmpdir/inter.pem" > "$tmpdir/bundle.pem" config_set ' .api.server.tls.cert_file=strenv(tmpdir) + "/server.pem" | @@ -79,8 +145,12 @@ teardown() { assert_output "[]" } -@test "simulate one bouncer request with a valid cert" { - rune -0 curl -s --cert "${tmpdir}/bouncer.pem" --key "${tmpdir}/bouncer-key.pem" --cacert "${tmpdir}/bundle.pem" https://localhost:8080/v1/decisions\?ip=42.42.42.42 +@test "simulate a bouncer request with a valid cert" { + rune -0 curl -f -s \ + --cert "$tmpdir/leaf.pem" \ + --key "$tmpdir/leaf-key.pem" \ + --cacert "$tmpdir/bundle.pem" \ + https://localhost:8080/v1/decisions\?ip=42.42.42.42 assert_output "null" rune -0 cscli bouncers list -o json rune -0 jq '. | length' <(output) @@ -91,27 +161,54 @@ teardown() { rune cscli bouncers delete localhost@127.0.0.1 } -@test "simulate one bouncer request with an invalid cert" { - rune curl -s --cert "${tmpdir}/bouncer_invalid.pem" --key "${tmpdir}/bouncer_invalid-key.pem" --cacert "${tmpdir}/ca-key.pem" https://localhost:8080/v1/decisions\?ip=42.42.42.42 +@test "simulate a bouncer request with an invalid cert" { + rune -77 curl -f -s \ + --cert "$tmpdir/leaf_invalid.pem" \ + --key "$tmpdir/leaf_invalid-key.pem" \ + --cacert "$tmpdir/root-key.pem" \ + https://localhost:8080/v1/decisions\?ip=42.42.42.42 rune -0 cscli bouncers list -o json assert_output "[]" } -@test "simulate one bouncer request with an invalid OU" { - rune curl -s --cert "${tmpdir}/bouncer_bad_ou.pem" --key "${tmpdir}/bouncer_bad_ou-key.pem" --cacert "${tmpdir}/bundle.pem" https://localhost:8080/v1/decisions\?ip=42.42.42.42 +@test "simulate a bouncer request with an invalid OU" { + rune -0 curl -s \ + --cert "$tmpdir/leaf_bad_ou.pem" \ + --key "$tmpdir/leaf_bad_ou-key.pem" \ + --cacert "$tmpdir/bundle.pem" \ + https://localhost:8080/v1/decisions\?ip=42.42.42.42 + assert_json '{message:"access forbidden"}' rune -0 cscli bouncers list -o json assert_output "[]" } -@test "simulate one bouncer request with a revoked certificate" { +@test "simulate a bouncer request with a revoked certificate" { # we have two certificates revoked by different CRL blocks - for cert_name in "revoked_1" "revoked_2"; do + # we connect twice to test the cache too + for cert in "leaf_rev1" "leaf_rev2" "leaf_rev1" "leaf_rev2"; do truncate_log - rune -0 curl -i -s --cert "${tmpdir}/${cert_name}.pem" --key "${tmpdir}/${cert_name}-key.pem" --cacert "${tmpdir}/bundle.pem" https://localhost:8080/v1/decisions\?ip=42.42.42.42 - assert_log --partial "client certificate is revoked by CRL" - assert_log --partial "client certificate for CN=localhost OU=[bouncer-ou] is revoked" + rune -0 curl -s \ + --cert "$tmpdir/$cert.pem" \ + --key "$tmpdir/$cert-key.pem" \ + --cacert "$tmpdir/bundle.pem" \ + https://localhost:8080/v1/decisions\?ip=42.42.42.42 + assert_log --partial "certificate revoked by CRL" assert_output --partial "access forbidden" rune -0 cscli bouncers list -o json assert_output "[]" done } + +# vvv this test must be last, or it can break the ones that follow + +@test "allowed_ou can't contain an empty string" { + ./instance-crowdsec stop + config_set ' + .common.log_media="stdout" | + .api.server.tls.bouncers_allowed_ou=["bouncer-ou", ""] + ' + rune -1 wait-for "$CROWDSEC" + assert_stderr --partial "allowed_ou configuration contains invalid empty string" +} + +# ^^^ this test must be last, or it can break the ones that follow diff --git a/test/bats/30_machines_tls.bats b/test/bats/30_machines_tls.bats index 52231704558..ef2915e3880 100644 --- a/test/bats/30_machines_tls.bats +++ b/test/bats/30_machines_tls.bats @@ -3,6 +3,20 @@ set -u + +# root: root CA +# inter: intermediate CA +# inter_rev: intermediate CA revoked by root (CRL3) +# leaf: valid client cert +# leaf_rev1: client cert revoked by inter (CRL1) +# leaf_rev2: client cert revoked by inter (CRL2) +# leaf_rev3: client cert (indirectly) revoked by root +# +# CRL1: inter revokes leaf_rev1 +# CRL2: inter revokes leaf_rev2 +# CRL3: root revokes inter_rev +# CRL4: root revokes leaf, but is ignored + setup_file() { load "../lib/setup_file.sh" ./instance-data load @@ -13,43 +27,96 @@ setup_file() { tmpdir="$BATS_FILE_TMPDIR" export tmpdir - CFDIR="${BATS_TEST_DIRNAME}/testdata/cfssl" + CFDIR="$BATS_TEST_DIRNAME/testdata/cfssl" export CFDIR - # Generate the CA - cfssl gencert --initca "${CFDIR}/ca.json" 2>/dev/null | cfssljson --bare "${tmpdir}/ca" - - # Generate an intermediate - cfssl gencert --initca "${CFDIR}/intermediate.json" 2>/dev/null | cfssljson --bare "${tmpdir}/inter" - cfssl sign -ca "${tmpdir}/ca.pem" -ca-key "${tmpdir}/ca-key.pem" -config "${CFDIR}/profiles.json" -profile intermediate_ca "${tmpdir}/inter.csr" 2>/dev/null | cfssljson --bare "${tmpdir}/inter" - - # Generate server cert for crowdsec with the intermediate - cfssl gencert -ca "${tmpdir}/inter.pem" -ca-key "${tmpdir}/inter-key.pem" -config "${CFDIR}/profiles.json" -profile=server "${CFDIR}/server.json" 2>/dev/null | cfssljson --bare "${tmpdir}/server" - - # Generate client cert for the agent - cfssl gencert -ca "${tmpdir}/inter.pem" -ca-key "${tmpdir}/inter-key.pem" -config "${CFDIR}/profiles.json" -profile=client "${CFDIR}/agent.json" 2>/dev/null | cfssljson --bare "${tmpdir}/agent" - - # Genearte client cert for the agent with an invalid OU - cfssl gencert -ca "${tmpdir}/inter.pem" -ca-key "${tmpdir}/inter-key.pem" -config "${CFDIR}/profiles.json" -profile=client "${CFDIR}/agent_invalid.json" 2>/dev/null | cfssljson --bare "${tmpdir}/agent_bad_ou" - - # Generate client cert for the bouncer directly signed by the CA, it should be refused by crowdsec as uses the intermediate - cfssl gencert -ca "${tmpdir}/ca.pem" -ca-key "${tmpdir}/ca-key.pem" -config "${CFDIR}/profiles.json" -profile=client "${CFDIR}/agent.json" 2>/dev/null | cfssljson --bare "${tmpdir}/agent_invalid" - - # Generate revoked client cert - for cert_name in "revoked_1" "revoked_2"; do - cfssl gencert -ca "${tmpdir}/inter.pem" -ca-key "${tmpdir}/inter-key.pem" -config "${CFDIR}/profiles.json" -profile=client "${CFDIR}/agent.json" 2>/dev/null | cfssljson --bare "${tmpdir}/${cert_name}" - cfssl certinfo -cert "${tmpdir}/${cert_name}.pem" | jq -r '.serial_number' > "${tmpdir}/serials_${cert_name}.txt" + # Root CA + cfssl gencert -loglevel 2 \ + --initca "$CFDIR/ca_root.json" \ + | cfssljson --bare "$tmpdir/root" + + # Intermediate CAs (valid or revoked) + for cert in "inter" "inter_rev"; do + cfssl gencert -loglevel 2 \ + --initca "$CFDIR/ca_intermediate.json" \ + | cfssljson --bare "$tmpdir/$cert" + + cfssl sign -loglevel 2 \ + -ca "$tmpdir/root.pem" -ca-key "$tmpdir/root-key.pem" \ + -config "$CFDIR/profiles.json" -profile intermediate_ca "$tmpdir/$cert.csr" \ + | cfssljson --bare "$tmpdir/$cert" done - # Generate separate CRL blocks and concatenate them - for cert_name in "revoked_1" "revoked_2"; do - echo '-----BEGIN X509 CRL-----' > "${tmpdir}/crl_${cert_name}.pem" - cfssl gencrl "${tmpdir}/serials_${cert_name}.txt" "${tmpdir}/ca.pem" "${tmpdir}/ca-key.pem" >> "${tmpdir}/crl_${cert_name}.pem" - echo '-----END X509 CRL-----' >> "${tmpdir}/crl_${cert_name}.pem" + # Server cert for crowdsec with the intermediate + cfssl gencert -loglevel 2 \ + -ca "$tmpdir/inter.pem" -ca-key "$tmpdir/inter-key.pem" \ + -config "$CFDIR/profiles.json" -profile=server "$CFDIR/server.json" \ + | cfssljson --bare "$tmpdir/server" + + # Client certs (valid or revoked) + for cert in "leaf" "leaf_rev1" "leaf_rev2"; do + cfssl gencert -loglevel 3 \ + -ca "$tmpdir/inter.pem" -ca-key "$tmpdir/inter-key.pem" \ + -config "$CFDIR/profiles.json" -profile=client \ + "$CFDIR/agent.json" \ + | cfssljson --bare "$tmpdir/$cert" done - cat "${tmpdir}/crl_revoked_1.pem" "${tmpdir}/crl_revoked_2.pem" >"${tmpdir}/crl.pem" - cat "${tmpdir}/ca.pem" "${tmpdir}/inter.pem" > "${tmpdir}/bundle.pem" + # Client cert (by revoked inter) + cfssl gencert -loglevel 3 \ + -ca "$tmpdir/inter_rev.pem" -ca-key "$tmpdir/inter_rev-key.pem" \ + -config "$CFDIR/profiles.json" -profile=client \ + "$CFDIR/agent.json" \ + | cfssljson --bare "$tmpdir/leaf_rev3" + + # Bad client cert (invalid OU) + cfssl gencert -loglevel 3 \ + -ca "$tmpdir/inter.pem" -ca-key "$tmpdir/inter-key.pem" \ + -config "$CFDIR/profiles.json" -profile=client \ + "$CFDIR/agent_invalid.json" \ + | cfssljson --bare "$tmpdir/leaf_bad_ou" + + # Bad client cert (directly signed by the CA, it should be refused by crowdsec as it uses the intermediate) + cfssl gencert -loglevel 3 \ + -ca "$tmpdir/root.pem" -ca-key "$tmpdir/root-key.pem" \ + -config "$CFDIR/profiles.json" -profile=client \ + "$CFDIR/agent.json" \ + | cfssljson --bare "$tmpdir/leaf_invalid" + + truncate -s 0 "$tmpdir/crl.pem" + + # Revoke certs + { + echo '-----BEGIN X509 CRL-----' + cfssl gencrl \ + <(cert_serial_number "$tmpdir/leaf_rev1.pem") \ + "$tmpdir/inter.pem" \ + "$tmpdir/inter-key.pem" + echo '-----END X509 CRL-----' + + echo '-----BEGIN X509 CRL-----' + cfssl gencrl \ + <(cert_serial_number "$tmpdir/leaf_rev2.pem") \ + "$tmpdir/inter.pem" \ + "$tmpdir/inter-key.pem" + echo '-----END X509 CRL-----' + + echo '-----BEGIN X509 CRL-----' + cfssl gencrl \ + <(cert_serial_number "$tmpdir/inter_rev.pem") \ + "$tmpdir/root.pem" \ + "$tmpdir/root-key.pem" + echo '-----END X509 CRL-----' + + echo '-----BEGIN X509 CRL-----' + cfssl gencrl \ + <(cert_serial_number "$tmpdir/leaf.pem") \ + "$tmpdir/root.pem" \ + "$tmpdir/root-key.pem" + echo '-----END X509 CRL-----' + } >> "$tmpdir/crl.pem" + + cat "$tmpdir/root.pem" "$tmpdir/inter.pem" > "$tmpdir/bundle.pem" config_set ' .api.server.tls.cert_file=strenv(tmpdir) + "/server.pem" | @@ -62,7 +129,7 @@ setup_file() { # remove all machines for machine in $(cscli machines list -o json | jq -r '.[].machineId'); do - cscli machines delete "${machine}" >/dev/null 2>&1 + cscli machines delete "$machine" >/dev/null 2>&1 done config_disable_agent @@ -106,30 +173,32 @@ teardown() { } @test "invalid OU for agent" { - config_set "${CONFIG_DIR}/local_api_credentials.yaml" ' + config_set "$CONFIG_DIR/local_api_credentials.yaml" ' .ca_cert_path=strenv(tmpdir) + "/bundle.pem" | - .key_path=strenv(tmpdir) + "/agent_bad_ou-key.pem" | - .cert_path=strenv(tmpdir) + "/agent_bad_ou.pem" | + .key_path=strenv(tmpdir) + "/leaf_bad_ou-key.pem" | + .cert_path=strenv(tmpdir) + "/leaf_bad_ou.pem" | .url="https://127.0.0.1:8080" ' - config_set "${CONFIG_DIR}/local_api_credentials.yaml" 'del(.login,.password)' + config_set "$CONFIG_DIR/local_api_credentials.yaml" 'del(.login,.password)' ./instance-crowdsec start rune -0 cscli machines list -o json assert_output '[]' } @test "we have exactly one machine registered with TLS" { - config_set "${CONFIG_DIR}/local_api_credentials.yaml" ' + config_set "$CONFIG_DIR/local_api_credentials.yaml" ' .ca_cert_path=strenv(tmpdir) + "/bundle.pem" | - .key_path=strenv(tmpdir) + "/agent-key.pem" | - .cert_path=strenv(tmpdir) + "/agent.pem" | + .key_path=strenv(tmpdir) + "/leaf-key.pem" | + .cert_path=strenv(tmpdir) + "/leaf.pem" | .url="https://127.0.0.1:8080" ' - config_set "${CONFIG_DIR}/local_api_credentials.yaml" 'del(.login,.password)' + config_set "$CONFIG_DIR/local_api_credentials.yaml" 'del(.login,.password)' ./instance-crowdsec start rune -0 cscli lapi status + # second connection, test the tls cache + rune -0 cscli lapi status rune -0 cscli machines list -o json rune -0 jq -c '[. | length, .[0].machineId[0:32], .[0].isValidated, .[0].ipAddress, .[0].auth_type]' <(output) @@ -154,24 +223,24 @@ teardown() { # TLS cannot be used with a unix socket - config_set "${CONFIG_DIR}/local_api_credentials.yaml" ' + config_set "$CONFIG_DIR/local_api_credentials.yaml" ' .ca_cert_path=strenv(tmpdir) + "/bundle.pem" ' rune -1 cscli lapi status assert_stderr --partial "loading api client: cannot use TLS with a unix socket" - config_set "${CONFIG_DIR}/local_api_credentials.yaml" ' + config_set "$CONFIG_DIR/local_api_credentials.yaml" ' del(.ca_cert_path) | - .key_path=strenv(tmpdir) + "/agent-key.pem" + .key_path=strenv(tmpdir) + "/leaf-key.pem" ' rune -1 cscli lapi status assert_stderr --partial "loading api client: cannot use TLS with a unix socket" - config_set "${CONFIG_DIR}/local_api_credentials.yaml" ' + config_set "$CONFIG_DIR/local_api_credentials.yaml" ' del(.key_path) | - .cert_path=strenv(tmpdir) + "/agent.pem" + .cert_path=strenv(tmpdir) + "/leaf.pem" ' rune -1 cscli lapi status @@ -181,13 +250,13 @@ teardown() { } @test "invalid cert for agent" { - config_set "${CONFIG_DIR}/local_api_credentials.yaml" ' + config_set "$CONFIG_DIR/local_api_credentials.yaml" ' .ca_cert_path=strenv(tmpdir) + "/bundle.pem" | - .key_path=strenv(tmpdir) + "/agent_invalid-key.pem" | - .cert_path=strenv(tmpdir) + "/agent_invalid.pem" | + .key_path=strenv(tmpdir) + "/leaf_invalid-key.pem" | + .cert_path=strenv(tmpdir) + "/leaf_invalid.pem" | .url="https://127.0.0.1:8080" ' - config_set "${CONFIG_DIR}/local_api_credentials.yaml" 'del(.login,.password)' + config_set "$CONFIG_DIR/local_api_credentials.yaml" 'del(.login,.password)' ./instance-crowdsec start rune -1 cscli lapi status rune -0 cscli machines list -o json @@ -196,22 +265,35 @@ teardown() { @test "revoked cert for agent" { # we have two certificates revoked by different CRL blocks - for cert_name in "revoked_1" "revoked_2"; do + # we connect twice to test the cache too + for cert in "leaf_rev1" "leaf_rev2" "leaf_rev1" "leaf_rev2"; do truncate_log - cert_name="$cert_name" config_set "${CONFIG_DIR}/local_api_credentials.yaml" ' + cert="$cert" config_set "$CONFIG_DIR/local_api_credentials.yaml" ' .ca_cert_path=strenv(tmpdir) + "/bundle.pem" | - .key_path=strenv(tmpdir) + "/" + strenv(cert_name) + "-key.pem" | - .cert_path=strenv(tmpdir) + "/" + strenv(cert_name) + ".pem" | + .key_path=strenv(tmpdir) + "/" + strenv(cert) + "-key.pem" | + .cert_path=strenv(tmpdir) + "/" + strenv(cert) + ".pem" | .url="https://127.0.0.1:8080" ' - config_set "${CONFIG_DIR}/local_api_credentials.yaml" 'del(.login,.password)' + config_set "$CONFIG_DIR/local_api_credentials.yaml" 'del(.login,.password)' ./instance-crowdsec start rune -1 cscli lapi status - assert_log --partial "client certificate is revoked by CRL" - assert_log --partial "client certificate for CN=localhost OU=[agent-ou] is revoked" + assert_log --partial "certificate revoked by CRL" rune -0 cscli machines list -o json assert_output '[]' ./instance-crowdsec stop done } + +# vvv this test must be last, or it can break the ones that follow + +@test "allowed_ou can't contain an empty string" { + config_set ' + .common.log_media="stdout" | + .api.server.tls.agents_allowed_ou=["agent-ou", ""] + ' + rune -1 wait-for "$CROWDSEC" + assert_stderr --partial "allowed_ou configuration contains invalid empty string" +} + +# ^^^ this test must be last, or it can break the ones that follow diff --git a/test/bats/testdata/cfssl/agent.json b/test/bats/testdata/cfssl/agent.json index 693e3aa512b..47b342e5a40 100644 --- a/test/bats/testdata/cfssl/agent.json +++ b/test/bats/testdata/cfssl/agent.json @@ -1,10 +1,10 @@ { - "CN": "localhost", - "key": { - "algo": "rsa", - "size": 2048 - }, - "names": [ + "CN": "localhost", + "key": { + "algo": "rsa", + "size": 2048 + }, + "names": [ { "C": "FR", "L": "Paris", @@ -12,5 +12,5 @@ "OU": "agent-ou", "ST": "France" } - ] - } \ No newline at end of file + ] +} diff --git a/test/bats/testdata/cfssl/agent_invalid.json b/test/bats/testdata/cfssl/agent_invalid.json index c61d4dee677..eb7db8d96fb 100644 --- a/test/bats/testdata/cfssl/agent_invalid.json +++ b/test/bats/testdata/cfssl/agent_invalid.json @@ -1,10 +1,10 @@ { - "CN": "localhost", - "key": { - "algo": "rsa", - "size": 2048 - }, - "names": [ + "CN": "localhost", + "key": { + "algo": "rsa", + "size": 2048 + }, + "names": [ { "C": "FR", "L": "Paris", @@ -12,5 +12,5 @@ "OU": "this-is-not-the-ou-youre-looking-for", "ST": "France" } - ] - } \ No newline at end of file + ] +} diff --git a/test/bats/testdata/cfssl/bouncer.json b/test/bats/testdata/cfssl/bouncer.json index 9a07f576610..bf642c48ad8 100644 --- a/test/bats/testdata/cfssl/bouncer.json +++ b/test/bats/testdata/cfssl/bouncer.json @@ -1,10 +1,10 @@ { - "CN": "localhost", - "key": { - "algo": "rsa", - "size": 2048 - }, - "names": [ + "CN": "localhost", + "key": { + "algo": "rsa", + "size": 2048 + }, + "names": [ { "C": "FR", "L": "Paris", @@ -12,5 +12,5 @@ "OU": "bouncer-ou", "ST": "France" } - ] - } \ No newline at end of file + ] +} diff --git a/test/bats/testdata/cfssl/bouncer_invalid.json b/test/bats/testdata/cfssl/bouncer_invalid.json index c61d4dee677..eb7db8d96fb 100644 --- a/test/bats/testdata/cfssl/bouncer_invalid.json +++ b/test/bats/testdata/cfssl/bouncer_invalid.json @@ -1,10 +1,10 @@ { - "CN": "localhost", - "key": { - "algo": "rsa", - "size": 2048 - }, - "names": [ + "CN": "localhost", + "key": { + "algo": "rsa", + "size": 2048 + }, + "names": [ { "C": "FR", "L": "Paris", @@ -12,5 +12,5 @@ "OU": "this-is-not-the-ou-youre-looking-for", "ST": "France" } - ] - } \ No newline at end of file + ] +} diff --git a/test/bats/testdata/cfssl/ca.json b/test/bats/testdata/cfssl/ca.json deleted file mode 100644 index ed907e0375b..00000000000 --- a/test/bats/testdata/cfssl/ca.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "CN": "CrowdSec Test CA", - "key": { - "algo": "rsa", - "size": 2048 - }, - "names": [ - { - "C": "FR", - "L": "Paris", - "O": "Crowdsec", - "OU": "Crowdsec", - "ST": "France" - } - ] -} \ No newline at end of file diff --git a/test/bats/testdata/cfssl/intermediate.json b/test/bats/testdata/cfssl/ca_intermediate.json similarity index 53% rename from test/bats/testdata/cfssl/intermediate.json rename to test/bats/testdata/cfssl/ca_intermediate.json index 3996ce6e189..34f1583da06 100644 --- a/test/bats/testdata/cfssl/intermediate.json +++ b/test/bats/testdata/cfssl/ca_intermediate.json @@ -1,10 +1,10 @@ { - "CN": "CrowdSec Test CA Intermediate", - "key": { - "algo": "rsa", - "size": 2048 - }, - "names": [ + "CN": "CrowdSec Test CA Intermediate", + "key": { + "algo": "rsa", + "size": 2048 + }, + "names": [ { "C": "FR", "L": "Paris", @@ -12,8 +12,8 @@ "OU": "Crowdsec Intermediate", "ST": "France" } - ], - "ca": { + ], + "ca": { "expiry": "42720h" } - } \ No newline at end of file +} diff --git a/test/bats/testdata/cfssl/ca_root.json b/test/bats/testdata/cfssl/ca_root.json new file mode 100644 index 00000000000..a0d64796637 --- /dev/null +++ b/test/bats/testdata/cfssl/ca_root.json @@ -0,0 +1,16 @@ +{ + "CN": "CrowdSec Test CA", + "key": { + "algo": "rsa", + "size": 2048 + }, + "names": [ + { + "C": "FR", + "L": "Paris", + "O": "Crowdsec", + "OU": "Crowdsec", + "ST": "France" + } + ] +} diff --git a/test/bats/testdata/cfssl/profiles.json b/test/bats/testdata/cfssl/profiles.json index d0dfced4a47..47611beb64c 100644 --- a/test/bats/testdata/cfssl/profiles.json +++ b/test/bats/testdata/cfssl/profiles.json @@ -1,44 +1,37 @@ { - "signing": { - "default": { + "signing": { + "default": { + "expiry": "8760h" + }, + "profiles": { + "intermediate_ca": { + "usages": [ + "signing", + "key encipherment", + "cert sign", + "crl sign", + "server auth", + "client auth" + ], + "expiry": "8760h", + "ca_constraint": { + "is_ca": true, + "max_path_len": 0, + "max_path_len_zero": true + } + }, + "server": { + "usages": [ + "server auth" + ], "expiry": "8760h" }, - "profiles": { - "intermediate_ca": { - "usages": [ - "signing", - "digital signature", - "key encipherment", - "cert sign", - "crl sign", - "server auth", - "client auth" - ], - "expiry": "8760h", - "ca_constraint": { - "is_ca": true, - "max_path_len": 0, - "max_path_len_zero": true - } - }, - "server": { - "usages": [ - "signing", - "digital signing", - "key encipherment", - "server auth" - ], - "expiry": "8760h" - }, - "client": { - "usages": [ - "signing", - "digital signature", - "key encipherment", - "client auth" - ], - "expiry": "8760h" - } + "client": { + "usages": [ + "client auth" + ], + "expiry": "8760h" } } - } \ No newline at end of file + } +} diff --git a/test/bats/testdata/cfssl/server.json b/test/bats/testdata/cfssl/server.json index 37018259e95..cce97037ca7 100644 --- a/test/bats/testdata/cfssl/server.json +++ b/test/bats/testdata/cfssl/server.json @@ -1,10 +1,10 @@ { - "CN": "localhost", - "key": { - "algo": "rsa", - "size": 2048 - }, - "names": [ + "CN": "localhost", + "key": { + "algo": "rsa", + "size": 2048 + }, + "names": [ { "C": "FR", "L": "Paris", @@ -12,9 +12,9 @@ "OU": "Crowdsec Server", "ST": "France" } - ], - "hosts": [ - "127.0.0.1", - "localhost" - ] - } \ No newline at end of file + ], + "hosts": [ + "127.0.0.1", + "localhost" + ] +} diff --git a/test/lib/setup_file.sh b/test/lib/setup_file.sh index 3e6db0f12ff..ac651c68c4f 100755 --- a/test/lib/setup_file.sh +++ b/test/lib/setup_file.sh @@ -155,6 +155,11 @@ assert_log() { } export -f assert_log +cert_serial_number() { + cfssl certinfo -cert "$1" | jq -r '.serial_number' +} +export -f cert_serial_number + # Compare ignoring the key order, and allow "expected" without quoted identifiers. # Preserve the output variable in case the following commands require it. assert_json() { From d4e6b75517a65513f2e1df6991d7cd30fd67d0b4 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Wed, 26 Jun 2024 11:06:44 +0200 Subject: [PATCH 200/581] CI: use go 1.22.4 (#3095) * CI: use go 1.22.4 * fix alpine build --- .github/workflows/bats-hub.yml | 2 +- .github/workflows/bats-mysql.yml | 2 +- .github/workflows/bats-postgres.yml | 2 +- .github/workflows/bats-sqlite-coverage.yml | 2 +- .github/workflows/ci-windows-build-msi.yml | 2 +- .github/workflows/codeql-analysis.yml | 2 +- .github/workflows/go-tests-windows.yml | 2 +- .github/workflows/go-tests.yml | 2 +- .github/workflows/publish-tarball-release.yml | 2 +- Dockerfile | 4 ++-- Dockerfile.debian | 2 +- azure-pipelines.yml | 2 +- 12 files changed, 13 insertions(+), 13 deletions(-) diff --git a/.github/workflows/bats-hub.yml b/.github/workflows/bats-hub.yml index 941d4d15f13..e537aee8d1b 100644 --- a/.github/workflows/bats-hub.yml +++ b/.github/workflows/bats-hub.yml @@ -33,7 +33,7 @@ jobs: - name: "Set up Go" uses: actions/setup-go@v5 with: - go-version: "1.22.3" + go-version: "1.22.4" - name: "Install bats dependencies" env: diff --git a/.github/workflows/bats-mysql.yml b/.github/workflows/bats-mysql.yml index 8cc544523ef..659102ced56 100644 --- a/.github/workflows/bats-mysql.yml +++ b/.github/workflows/bats-mysql.yml @@ -36,7 +36,7 @@ jobs: - name: "Set up Go" uses: actions/setup-go@v5 with: - go-version: "1.22.3" + go-version: "1.22.4" - name: "Install bats dependencies" env: diff --git a/.github/workflows/bats-postgres.yml b/.github/workflows/bats-postgres.yml index 87101be0159..1d12a371430 100644 --- a/.github/workflows/bats-postgres.yml +++ b/.github/workflows/bats-postgres.yml @@ -45,7 +45,7 @@ jobs: - name: "Set up Go" uses: actions/setup-go@v5 with: - go-version: "1.22.3" + go-version: "1.22.4" - name: "Install bats dependencies" env: diff --git a/.github/workflows/bats-sqlite-coverage.yml b/.github/workflows/bats-sqlite-coverage.yml index 0d9906d11f0..e72973e1b1f 100644 --- a/.github/workflows/bats-sqlite-coverage.yml +++ b/.github/workflows/bats-sqlite-coverage.yml @@ -31,7 +31,7 @@ jobs: - name: "Set up Go" uses: actions/setup-go@v5 with: - go-version: "1.22.3" + go-version: "1.22.4" - name: "Install bats dependencies" env: diff --git a/.github/workflows/ci-windows-build-msi.yml b/.github/workflows/ci-windows-build-msi.yml index 9bca2122513..d8011b16247 100644 --- a/.github/workflows/ci-windows-build-msi.yml +++ b/.github/workflows/ci-windows-build-msi.yml @@ -35,7 +35,7 @@ jobs: - name: "Set up Go" uses: actions/setup-go@v5 with: - go-version: "1.22.3" + go-version: "1.22.4" - name: Build run: make windows_installer BUILD_RE2_WASM=1 diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml index cee77161a8e..ed3f7b4a4c2 100644 --- a/.github/workflows/codeql-analysis.yml +++ b/.github/workflows/codeql-analysis.yml @@ -52,7 +52,7 @@ jobs: - name: "Set up Go" uses: actions/setup-go@v5 with: - go-version: "1.22.3" + go-version: "1.22.4" cache-dependency-path: "**/go.sum" # Initializes the CodeQL tools for scanning. diff --git a/.github/workflows/go-tests-windows.yml b/.github/workflows/go-tests-windows.yml index daf6312447b..5c2ef0e0b0d 100644 --- a/.github/workflows/go-tests-windows.yml +++ b/.github/workflows/go-tests-windows.yml @@ -34,7 +34,7 @@ jobs: - name: "Set up Go" uses: actions/setup-go@v5 with: - go-version: "1.22.3" + go-version: "1.22.4" - name: Build run: | diff --git a/.github/workflows/go-tests.yml b/.github/workflows/go-tests.yml index 41e84189383..7f192a85b8f 100644 --- a/.github/workflows/go-tests.yml +++ b/.github/workflows/go-tests.yml @@ -126,7 +126,7 @@ jobs: - name: "Set up Go" uses: actions/setup-go@v5 with: - go-version: "1.22.3" + go-version: "1.22.4" - name: Create localstack streams run: | diff --git a/.github/workflows/publish-tarball-release.yml b/.github/workflows/publish-tarball-release.yml index bf8d5fe5f1a..a5ce1b37df3 100644 --- a/.github/workflows/publish-tarball-release.yml +++ b/.github/workflows/publish-tarball-release.yml @@ -25,7 +25,7 @@ jobs: - name: "Set up Go" uses: actions/setup-go@v5 with: - go-version: "1.22.3" + go-version: "1.22.4" - name: Build the binaries run: | diff --git a/Dockerfile b/Dockerfile index faa50f3f79a..45443e971ac 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,5 +1,5 @@ # vim: set ft=dockerfile: -FROM golang:1.22.3-alpine3.18 AS build +FROM golang:1.22.4-alpine3.20 AS build ARG BUILD_VERSION @@ -20,7 +20,7 @@ RUN apk add --no-cache git g++ gcc libc-dev make bash gettext binutils-gold core COPY . . -RUN make clean release DOCKER_BUILD=1 BUILD_STATIC=1 && \ +RUN make clean release DOCKER_BUILD=1 BUILD_STATIC=1 CGO_CFLAGS="-D_LARGEFILE64_SOURCE" && \ cd crowdsec-v* && \ ./wizard.sh --docker-mode && \ cd - >/dev/null && \ diff --git a/Dockerfile.debian b/Dockerfile.debian index 0ef1727f3e6..6fc5a15f766 100644 --- a/Dockerfile.debian +++ b/Dockerfile.debian @@ -1,5 +1,5 @@ # vim: set ft=dockerfile: -FROM golang:1.22.3-bookworm AS build +FROM golang:1.22.4-bookworm AS build ARG BUILD_VERSION diff --git a/azure-pipelines.yml b/azure-pipelines.yml index 0fc53242a9f..77015d18ef3 100644 --- a/azure-pipelines.yml +++ b/azure-pipelines.yml @@ -21,7 +21,7 @@ stages: - task: GoTool@0 displayName: "Install Go" inputs: - version: '1.22.3' + version: '1.22.4' - pwsh: | choco install -y make From 206211ce53554eb1bb6e947836818ec3af52af54 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Wed, 26 Jun 2024 12:16:17 +0200 Subject: [PATCH 201/581] lint: import statement order (#3085) * lint: import statement order * lint --- cmd/crowdsec/lapiclient.go | 2 +- cmd/notification-file/main.go | 11 ++-- cmd/notification-sentinel/main.go | 7 ++- pkg/acquisition/acquisition.go | 6 +-- pkg/acquisition/modules/appsec/appsec.go | 54 +++++++++---------- .../modules/appsec/appsec_hooks_test.go | 8 +-- .../modules/appsec/appsec_lnx_test.go | 6 +-- .../modules/appsec/appsec_remediation_test.go | 5 +- .../modules/appsec/appsec_rules_test.go | 8 ++- .../modules/appsec/appsec_runner.go | 10 ++-- pkg/acquisition/modules/appsec/appsec_test.go | 8 +-- pkg/acquisition/modules/appsec/rx_operator.go | 5 +- pkg/acquisition/modules/appsec/utils.go | 11 ++-- .../modules/cloudwatch/cloudwatch_test.go | 11 ++-- pkg/acquisition/modules/docker/docker_test.go | 11 ++-- .../modules/journalctl/journalctl_test.go | 9 ++-- .../modules/kinesis/kinesis_test.go | 11 ++-- .../modules/kubernetesaudit/k8s_audit_test.go | 6 +-- .../loki/internal/lokiclient/loki_client.go | 9 ++-- pkg/acquisition/modules/s3/s3_test.go | 9 ++-- .../internal/parser/rfc5424/parse_test.go | 4 +- pkg/acquisition/modules/syslog/syslog_test.go | 8 +-- pkg/alertcontext/alertcontext_test.go | 5 +- pkg/apiserver/apic_metrics.go | 2 +- pkg/apiserver/controllers/v1/utils.go | 2 +- pkg/appsec/appsec.go | 5 +- pkg/appsec/appsec_rules_collection.go | 6 +-- pkg/appsec/loader.go | 6 +-- pkg/csconfig/config.go | 3 +- pkg/csplugin/helpers.go | 3 +- pkg/exprhelpers/crowdsec_cti.go | 5 +- pkg/exprhelpers/debugger_test.go | 3 +- pkg/exprhelpers/expr_lib.go | 3 +- pkg/exprhelpers/jsonextract.go | 8 ++- pkg/leakybucket/bayesian.go | 3 +- pkg/leakybucket/bucket.go | 9 ++-- pkg/leakybucket/conditional.go | 1 + pkg/leakybucket/manager_run_test.go | 11 ++-- pkg/leakybucket/timemachine.go | 3 +- pkg/leakybucket/trigger.go | 3 +- pkg/parser/whitelist.go | 3 +- pkg/types/event.go | 2 +- 42 files changed, 151 insertions(+), 154 deletions(-) diff --git a/cmd/crowdsec/lapiclient.go b/cmd/crowdsec/lapiclient.go index ae23850eb0a..6cc0fba9515 100644 --- a/cmd/crowdsec/lapiclient.go +++ b/cmd/crowdsec/lapiclient.go @@ -10,8 +10,8 @@ import ( "github.com/crowdsecurity/crowdsec/pkg/apiclient" "github.com/crowdsecurity/crowdsec/pkg/csconfig" - "github.com/crowdsecurity/crowdsec/pkg/cwversion" "github.com/crowdsecurity/crowdsec/pkg/cwhub" + "github.com/crowdsecurity/crowdsec/pkg/cwversion" "github.com/crowdsecurity/crowdsec/pkg/models" ) diff --git a/cmd/notification-file/main.go b/cmd/notification-file/main.go index 467bdd4a4ff..7fc529cff41 100644 --- a/cmd/notification-file/main.go +++ b/cmd/notification-file/main.go @@ -11,10 +11,11 @@ import ( "sync" "time" - "github.com/crowdsecurity/crowdsec/pkg/protobufs" "github.com/hashicorp/go-hclog" plugin "github.com/hashicorp/go-plugin" "gopkg.in/yaml.v3" + + "github.com/crowdsecurity/crowdsec/pkg/protobufs" ) var ( @@ -69,7 +70,7 @@ func (r *LogRotate) rotateLogs(cfg PluginConfig) { } // Reopen the FileWriter FileWriter.Close() - FileWriter, err = os.OpenFile(cfg.LogPath, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644) + FileWriter, err = os.OpenFile(cfg.LogPath, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0o644) if err != nil { logger.Error("Failed to reopen log file", "error", err) } @@ -173,7 +174,7 @@ func WriteToFileWithCtx(ctx context.Context, cfg PluginConfig, log string) error // The file has been rotated outside our control logger.Info("Log file has been rotated or missing attempting to reopen it") FileWriter.Close() - FileWriter, err = os.OpenFile(cfg.LogPath, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644) + FileWriter, err = os.OpenFile(cfg.LogPath, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0o644) if err != nil { return err } @@ -213,7 +214,7 @@ func (s *FilePlugin) Configure(ctx context.Context, config *protobufs.Config) (* return &protobufs.Empty{}, err } FileWriteMutex = &sync.Mutex{} - FileWriter, err = os.OpenFile(d.LogPath, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644) + FileWriter, err = os.OpenFile(d.LogPath, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0o644) if err != nil { logger.Error("Failed to open log file", "error", err) return &protobufs.Empty{}, err @@ -230,7 +231,7 @@ func (s *FilePlugin) Configure(ctx context.Context, config *protobufs.Config) (* } func main() { - var handshake = plugin.HandshakeConfig{ + handshake := plugin.HandshakeConfig{ ProtocolVersion: 1, MagicCookieKey: "CROWDSEC_PLUGIN_KEY", MagicCookieValue: os.Getenv("CROWDSEC_PLUGIN_KEY"), diff --git a/cmd/notification-sentinel/main.go b/cmd/notification-sentinel/main.go index 9c34e63a289..a29e941f80c 100644 --- a/cmd/notification-sentinel/main.go +++ b/cmd/notification-sentinel/main.go @@ -11,10 +11,11 @@ import ( "strings" "time" - "github.com/crowdsecurity/crowdsec/pkg/protobufs" "github.com/hashicorp/go-hclog" "github.com/hashicorp/go-plugin" "gopkg.in/yaml.v3" + + "github.com/crowdsecurity/crowdsec/pkg/protobufs" ) type PluginConfig struct { @@ -54,7 +55,6 @@ func (s *SentinelPlugin) getAuthorizationHeader(now string, length int, pluginNa } func (s *SentinelPlugin) Notify(ctx context.Context, notification *protobufs.Notification) (*protobufs.Empty, error) { - if _, ok := s.PluginConfigByName[notification.Name]; !ok { return nil, fmt.Errorf("invalid plugin config name %s", notification.Name) } @@ -73,7 +73,6 @@ func (s *SentinelPlugin) Notify(ctx context.Context, notification *protobufs.Not now := time.Now().UTC().Format("Mon, 02 Jan 2006 15:04:05 GMT") authorization, err := s.getAuthorizationHeader(now, len(notification.Text), notification.Name) - if err != nil { return &protobufs.Empty{}, err } @@ -113,7 +112,7 @@ func (s *SentinelPlugin) Configure(ctx context.Context, config *protobufs.Config } func main() { - var handshake = plugin.HandshakeConfig{ + handshake := plugin.HandshakeConfig{ ProtocolVersion: 1, MagicCookieKey: "CROWDSEC_PLUGIN_KEY", MagicCookieValue: os.Getenv("CROWDSEC_PLUGIN_KEY"), diff --git a/pkg/acquisition/acquisition.go b/pkg/acquisition/acquisition.go index 6a91423c158..5e4a663eb9e 100644 --- a/pkg/acquisition/acquisition.go +++ b/pkg/acquisition/acquisition.go @@ -30,9 +30,8 @@ import ( s3acquisition "github.com/crowdsecurity/crowdsec/pkg/acquisition/modules/s3" syslogacquisition "github.com/crowdsecurity/crowdsec/pkg/acquisition/modules/syslog" wineventlogacquisition "github.com/crowdsecurity/crowdsec/pkg/acquisition/modules/wineventlog" - "github.com/crowdsecurity/crowdsec/pkg/exprhelpers" - "github.com/crowdsecurity/crowdsec/pkg/csconfig" + "github.com/crowdsecurity/crowdsec/pkg/exprhelpers" "github.com/crowdsecurity/crowdsec/pkg/types" ) @@ -124,7 +123,6 @@ func DataSourceConfigure(commonConfig configuration.DataSourceCommonCfg, metrics /* configure the actual datasource */ if err := dataSrc.Configure(yamlConfig, subLogger, metricsLevel); err != nil { return nil, fmt.Errorf("failed to configure datasource %s: %w", commonConfig.Source, err) - } return &dataSrc, nil } @@ -181,7 +179,6 @@ func LoadAcquisitionFromDSN(dsn string, labels map[string]string, transformExpr func GetMetricsLevelFromPromCfg(prom *csconfig.PrometheusCfg) int { if prom == nil { return configuration.METRICS_FULL - } if !prom.Enabled { return configuration.METRICS_NONE @@ -194,7 +191,6 @@ func GetMetricsLevelFromPromCfg(prom *csconfig.PrometheusCfg) int { return configuration.METRICS_FULL } return configuration.METRICS_FULL - } // LoadAcquisitionFromFile unmarshals the configuration item and checks its availability diff --git a/pkg/acquisition/modules/appsec/appsec.go b/pkg/acquisition/modules/appsec/appsec.go index b141ee6d666..5b0661a21b7 100644 --- a/pkg/acquisition/modules/appsec/appsec.go +++ b/pkg/acquisition/modules/appsec/appsec.go @@ -3,6 +3,7 @@ package appsecacquisition import ( "context" "encoding/json" + "errors" "fmt" "net" "net/http" @@ -10,17 +11,18 @@ import ( "sync" "time" - "github.com/crowdsecurity/crowdsec/pkg/csconfig" - - "github.com/crowdsecurity/crowdsec/pkg/acquisition/configuration" - "github.com/crowdsecurity/crowdsec/pkg/appsec" - "github.com/crowdsecurity/crowdsec/pkg/types" - "github.com/crowdsecurity/go-cs-lib/trace" "github.com/google/uuid" "github.com/prometheus/client_golang/prometheus" log "github.com/sirupsen/logrus" "gopkg.in/tomb.v2" "gopkg.in/yaml.v2" + + "github.com/crowdsecurity/go-cs-lib/trace" + + "github.com/crowdsecurity/crowdsec/pkg/acquisition/configuration" + "github.com/crowdsecurity/crowdsec/pkg/appsec" + "github.com/crowdsecurity/crowdsec/pkg/csconfig" + "github.com/crowdsecurity/crowdsec/pkg/types" ) const ( @@ -28,9 +30,7 @@ const ( OutOfBand = "outofband" ) -var ( - DefaultAuthCacheDuration = (1 * time.Minute) -) +var DefaultAuthCacheDuration = (1 * time.Minute) // configuration structure of the acquis for the application security engine type AppsecSourceConfig struct { @@ -94,10 +94,9 @@ type BodyResponse struct { } func (w *AppsecSource) UnmarshalConfig(yamlConfig []byte) error { - err := yaml.UnmarshalStrict(yamlConfig, &w.config) if err != nil { - return fmt.Errorf("Cannot parse appsec configuration: %w", err) + return fmt.Errorf("cannot parse appsec configuration: %w", err) } if w.config.ListenAddr == "" && w.config.ListenSocket == "" { @@ -122,7 +121,7 @@ func (w *AppsecSource) UnmarshalConfig(yamlConfig []byte) error { } if w.config.AppsecConfig == "" && w.config.AppsecConfigPath == "" { - return fmt.Errorf("appsec_config or appsec_config_path must be set") + return errors.New("appsec_config or appsec_config_path must be set") } if w.config.Name == "" { @@ -177,26 +176,25 @@ func (w *AppsecSource) Configure(yamlConfig []byte, logger *log.Entry, MetricsLe if w.config.AppsecConfigPath != "" { err := appsecCfg.LoadByPath(w.config.AppsecConfigPath) if err != nil { - return fmt.Errorf("unable to load appsec_config : %s", err) + return fmt.Errorf("unable to load appsec_config: %w", err) } } else if w.config.AppsecConfig != "" { err := appsecCfg.Load(w.config.AppsecConfig) if err != nil { - return fmt.Errorf("unable to load appsec_config : %s", err) + return fmt.Errorf("unable to load appsec_config: %w", err) } } else { - return fmt.Errorf("no appsec_config provided") + return errors.New("no appsec_config provided") } w.AppsecRuntime, err = appsecCfg.Build() if err != nil { - return fmt.Errorf("unable to build appsec_config : %s", err) + return fmt.Errorf("unable to build appsec_config: %w", err) } err = w.AppsecRuntime.ProcessOnLoadRules() - if err != nil { - return fmt.Errorf("unable to process on load rules : %s", err) + return fmt.Errorf("unable to process on load rules: %w", err) } w.AppsecRunners = make([]AppsecRunner, w.config.Routines) @@ -207,15 +205,15 @@ func (w *AppsecSource) Configure(yamlConfig []byte, logger *log.Entry, MetricsLe wrt := *w.AppsecRuntime wrt.Logger = w.logger.Dup().WithField("runner_uuid", appsecRunnerUUID) runner := AppsecRunner{ - inChan: w.InChan, - UUID: appsecRunnerUUID, - logger: w.logger.WithField("runner_uuid", appsecRunnerUUID), + inChan: w.InChan, + UUID: appsecRunnerUUID, + logger: w.logger.WithField("runner_uuid", appsecRunnerUUID), AppsecRuntime: &wrt, Labels: w.config.Labels, } err := runner.Init(appsecCfg.GetDataDir()) if err != nil { - return fmt.Errorf("unable to initialize runner : %s", err) + return fmt.Errorf("unable to initialize runner: %w", err) } w.AppsecRunners[nbRoutine] = runner } @@ -228,7 +226,7 @@ func (w *AppsecSource) Configure(yamlConfig []byte, logger *log.Entry, MetricsLe } func (w *AppsecSource) ConfigureByDSN(dsn string, labels map[string]string, logger *log.Entry, uuid string) error { - return fmt.Errorf("AppSec datasource does not support command line acquisition") + return errors.New("AppSec datasource does not support command line acquisition") } func (w *AppsecSource) GetMode() string { @@ -240,7 +238,7 @@ func (w *AppsecSource) GetName() string { } func (w *AppsecSource) OneShotAcquisition(out chan types.Event, t *tomb.Tomb) error { - return fmt.Errorf("AppSec datasource does not support command line acquisition") + return errors.New("AppSec datasource does not support command line acquisition") } func (w *AppsecSource) StreamingAcquisition(out chan types.Event, t *tomb.Tomb) error { @@ -262,7 +260,7 @@ func (w *AppsecSource) StreamingAcquisition(out chan types.Event, t *tomb.Tomb) _ = os.RemoveAll(w.config.ListenSocket) listener, err := net.Listen("unix", w.config.ListenSocket) if err != nil { - return fmt.Errorf("Appsec server failed: %w", err) + return fmt.Errorf("appsec server failed: %w", err) } defer listener.Close() if w.config.CertFilePath != "" && w.config.KeyFilePath != "" { @@ -271,7 +269,7 @@ func (w *AppsecSource) StreamingAcquisition(out chan types.Event, t *tomb.Tomb) err = w.server.Serve(listener) } if err != nil && err != http.ErrServerClosed { - return fmt.Errorf("Appsec server failed: %w", err) + return fmt.Errorf("appsec server failed: %w", err) } } return nil @@ -287,7 +285,7 @@ func (w *AppsecSource) StreamingAcquisition(out chan types.Event, t *tomb.Tomb) } if err != nil && err != http.ErrServerClosed { - return fmt.Errorf("Appsec server failed: %w", err) + return fmt.Errorf("appsec server failed: %w", err) } } return nil @@ -334,7 +332,6 @@ func (w *AppsecSource) IsAuth(apiKey string) bool { defer resp.Body.Close() return resp.StatusCode == http.StatusOK - } // should this be in the runner ? @@ -401,5 +398,4 @@ func (w *AppsecSource) appsecHandler(rw http.ResponseWriter, r *http.Request) { } else { rw.Write(body) } - } diff --git a/pkg/acquisition/modules/appsec/appsec_hooks_test.go b/pkg/acquisition/modules/appsec/appsec_hooks_test.go index 65fba33ae81..c549d2ef1d1 100644 --- a/pkg/acquisition/modules/appsec/appsec_hooks_test.go +++ b/pkg/acquisition/modules/appsec/appsec_hooks_test.go @@ -5,12 +5,13 @@ import ( "net/url" "testing" - "github.com/crowdsecurity/crowdsec/pkg/appsec" - "github.com/crowdsecurity/crowdsec/pkg/appsec/appsec_rule" - "github.com/crowdsecurity/crowdsec/pkg/types" "github.com/davecgh/go-spew/spew" log "github.com/sirupsen/logrus" "github.com/stretchr/testify/require" + + "github.com/crowdsecurity/crowdsec/pkg/appsec" + "github.com/crowdsecurity/crowdsec/pkg/appsec/appsec_rule" + "github.com/crowdsecurity/crowdsec/pkg/types" ) func TestAppsecOnMatchHooks(t *testing.T) { @@ -41,7 +42,6 @@ func TestAppsecOnMatchHooks(t *testing.T) { require.Equal(t, 403, responses[0].BouncerHTTPResponseCode) require.Equal(t, 403, responses[0].UserHTTPResponseCode) require.Equal(t, appsec.BanRemediation, responses[0].Action) - }, }, { diff --git a/pkg/acquisition/modules/appsec/appsec_lnx_test.go b/pkg/acquisition/modules/appsec/appsec_lnx_test.go index 93edc9d9ec3..3e40a1f970c 100644 --- a/pkg/acquisition/modules/appsec/appsec_lnx_test.go +++ b/pkg/acquisition/modules/appsec/appsec_lnx_test.go @@ -6,15 +6,15 @@ package appsecacquisition import ( "testing" + log "github.com/sirupsen/logrus" + "github.com/stretchr/testify/require" + "github.com/crowdsecurity/crowdsec/pkg/appsec" "github.com/crowdsecurity/crowdsec/pkg/appsec/appsec_rule" "github.com/crowdsecurity/crowdsec/pkg/types" - log "github.com/sirupsen/logrus" - "github.com/stretchr/testify/require" ) func TestAppsecRuleTransformsOthers(t *testing.T) { - log.SetLevel(log.TraceLevel) tests := []appsecRuleTest{ { diff --git a/pkg/acquisition/modules/appsec/appsec_remediation_test.go b/pkg/acquisition/modules/appsec/appsec_remediation_test.go index a7f117389b3..06016b6251f 100644 --- a/pkg/acquisition/modules/appsec/appsec_remediation_test.go +++ b/pkg/acquisition/modules/appsec/appsec_remediation_test.go @@ -5,14 +5,14 @@ import ( "net/url" "testing" + "github.com/stretchr/testify/require" + "github.com/crowdsecurity/crowdsec/pkg/appsec" "github.com/crowdsecurity/crowdsec/pkg/appsec/appsec_rule" "github.com/crowdsecurity/crowdsec/pkg/types" - "github.com/stretchr/testify/require" ) func TestAppsecDefaultPassRemediation(t *testing.T) { - tests := []appsecRuleTest{ { name: "Basic non-matching rule", @@ -152,7 +152,6 @@ func TestAppsecDefaultPassRemediation(t *testing.T) { } func TestAppsecDefaultRemediation(t *testing.T) { - tests := []appsecRuleTest{ { name: "Basic matching rule", diff --git a/pkg/acquisition/modules/appsec/appsec_rules_test.go b/pkg/acquisition/modules/appsec/appsec_rules_test.go index b25e4465f0f..909f16357ed 100644 --- a/pkg/acquisition/modules/appsec/appsec_rules_test.go +++ b/pkg/acquisition/modules/appsec/appsec_rules_test.go @@ -5,15 +5,15 @@ import ( "net/url" "testing" + log "github.com/sirupsen/logrus" + "github.com/stretchr/testify/require" + "github.com/crowdsecurity/crowdsec/pkg/appsec" "github.com/crowdsecurity/crowdsec/pkg/appsec/appsec_rule" "github.com/crowdsecurity/crowdsec/pkg/types" - log "github.com/sirupsen/logrus" - "github.com/stretchr/testify/require" ) func TestAppsecRuleMatches(t *testing.T) { - tests := []appsecRuleTest{ { name: "Basic matching rule", @@ -368,7 +368,6 @@ toto } func TestAppsecRuleTransforms(t *testing.T) { - log.SetLevel(log.TraceLevel) tests := []appsecRuleTest{ { @@ -568,7 +567,6 @@ func TestAppsecRuleTransforms(t *testing.T) { } func TestAppsecRuleZones(t *testing.T) { - log.SetLevel(log.TraceLevel) tests := []appsecRuleTest{ { diff --git a/pkg/acquisition/modules/appsec/appsec_runner.go b/pkg/acquisition/modules/appsec/appsec_runner.go index c015db74d82..ed49d6a7b41 100644 --- a/pkg/acquisition/modules/appsec/appsec_runner.go +++ b/pkg/acquisition/modules/appsec/appsec_runner.go @@ -6,16 +6,17 @@ import ( "slices" "time" - "github.com/crowdsecurity/coraza/v3" - corazatypes "github.com/crowdsecurity/coraza/v3/types" - "github.com/crowdsecurity/crowdsec/pkg/appsec" - "github.com/crowdsecurity/crowdsec/pkg/types" "github.com/prometheus/client_golang/prometheus" log "github.com/sirupsen/logrus" "gopkg.in/tomb.v2" + "github.com/crowdsecurity/coraza/v3" + corazatypes "github.com/crowdsecurity/coraza/v3/types" + // load body processors via init() _ "github.com/crowdsecurity/crowdsec/pkg/acquisition/modules/appsec/bodyprocessors" + "github.com/crowdsecurity/crowdsec/pkg/appsec" + "github.com/crowdsecurity/crowdsec/pkg/types" ) // that's the runtime structure of the Application security engine as seen from the acquis @@ -178,7 +179,6 @@ func (r *AppsecRunner) processRequest(tx appsec.ExtendedTransaction, request *ap } in, err = request.Tx.ProcessRequestBody() - if err != nil { r.logger.Errorf("unable to process request body : %s", err) return err diff --git a/pkg/acquisition/modules/appsec/appsec_test.go b/pkg/acquisition/modules/appsec/appsec_test.go index c769ea3d0fd..d2079b43726 100644 --- a/pkg/acquisition/modules/appsec/appsec_test.go +++ b/pkg/acquisition/modules/appsec/appsec_test.go @@ -4,12 +4,13 @@ import ( "testing" "time" - "github.com/crowdsecurity/crowdsec/pkg/appsec" - "github.com/crowdsecurity/crowdsec/pkg/appsec/appsec_rule" - "github.com/crowdsecurity/crowdsec/pkg/types" "github.com/davecgh/go-spew/spew" "github.com/google/uuid" log "github.com/sirupsen/logrus" + + "github.com/crowdsecurity/crowdsec/pkg/appsec" + "github.com/crowdsecurity/crowdsec/pkg/appsec/appsec_rule" + "github.com/crowdsecurity/crowdsec/pkg/types" ) type appsecRuleTest struct { @@ -120,5 +121,4 @@ func loadAppSecEngine(test appsecRuleTest, t *testing.T) { log.Infof("events : %s", spew.Sdump(OutputEvents)) log.Infof("responses : %s", spew.Sdump(OutputResponses)) test.output_asserts(OutputEvents, OutputResponses, appsecResponse, http_status) - } diff --git a/pkg/acquisition/modules/appsec/rx_operator.go b/pkg/acquisition/modules/appsec/rx_operator.go index 73060037657..4b16296fd40 100644 --- a/pkg/acquisition/modules/appsec/rx_operator.go +++ b/pkg/acquisition/modules/appsec/rx_operator.go @@ -5,10 +5,11 @@ import ( "strconv" "unicode/utf8" - "github.com/crowdsecurity/coraza/v3/experimental/plugins" - "github.com/crowdsecurity/coraza/v3/experimental/plugins/plugintypes" "github.com/wasilibs/go-re2" "github.com/wasilibs/go-re2/experimental" + + "github.com/crowdsecurity/coraza/v3/experimental/plugins" + "github.com/crowdsecurity/coraza/v3/experimental/plugins/plugintypes" ) type rx struct { diff --git a/pkg/acquisition/modules/appsec/utils.go b/pkg/acquisition/modules/appsec/utils.go index 02ded2a2437..15de8046716 100644 --- a/pkg/acquisition/modules/appsec/utils.go +++ b/pkg/acquisition/modules/appsec/utils.go @@ -7,17 +7,19 @@ import ( "strconv" "time" + "github.com/oschwald/geoip2-golang" + "github.com/prometheus/client_golang/prometheus" + log "github.com/sirupsen/logrus" + "github.com/crowdsecurity/coraza/v3/collection" "github.com/crowdsecurity/coraza/v3/types/variables" + "github.com/crowdsecurity/go-cs-lib/ptr" + "github.com/crowdsecurity/crowdsec/pkg/alertcontext" "github.com/crowdsecurity/crowdsec/pkg/appsec" "github.com/crowdsecurity/crowdsec/pkg/exprhelpers" "github.com/crowdsecurity/crowdsec/pkg/models" "github.com/crowdsecurity/crowdsec/pkg/types" - "github.com/crowdsecurity/go-cs-lib/ptr" - "github.com/oschwald/geoip2-golang" - "github.com/prometheus/client_golang/prometheus" - log "github.com/sirupsen/logrus" ) var appsecMetaKeys = []string{ @@ -368,5 +370,4 @@ func (r *AppsecRunner) AccumulateTxToEvent(evt *types.Event, req *appsec.ParsedR } return nil - } diff --git a/pkg/acquisition/modules/cloudwatch/cloudwatch_test.go b/pkg/acquisition/modules/cloudwatch/cloudwatch_test.go index 89a2b56bc00..bab7593f26f 100644 --- a/pkg/acquisition/modules/cloudwatch/cloudwatch_test.go +++ b/pkg/acquisition/modules/cloudwatch/cloudwatch_test.go @@ -10,15 +10,16 @@ import ( "testing" "time" - "github.com/crowdsecurity/go-cs-lib/cstest" - "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/cloudwatchlogs" - "github.com/crowdsecurity/crowdsec/pkg/acquisition/configuration" - "github.com/crowdsecurity/crowdsec/pkg/types" log "github.com/sirupsen/logrus" "github.com/stretchr/testify/require" "gopkg.in/tomb.v2" + + "github.com/crowdsecurity/go-cs-lib/cstest" + + "github.com/crowdsecurity/crowdsec/pkg/acquisition/configuration" + "github.com/crowdsecurity/crowdsec/pkg/types" ) /* @@ -51,7 +52,7 @@ func checkForLocalStackAvailability() error { _, err := net.Dial("tcp", v) if err != nil { - return fmt.Errorf("while dialing %s : %s : aws endpoint isn't available", v, err) + return fmt.Errorf("while dialing %s: %w: aws endpoint isn't available", v, err) } return nil diff --git a/pkg/acquisition/modules/docker/docker_test.go b/pkg/acquisition/modules/docker/docker_test.go index e1cc4db96ad..e332569fb3a 100644 --- a/pkg/acquisition/modules/docker/docker_test.go +++ b/pkg/acquisition/modules/docker/docker_test.go @@ -11,17 +11,17 @@ import ( "testing" "time" - "github.com/crowdsecurity/go-cs-lib/cstest" - - "github.com/crowdsecurity/crowdsec/pkg/acquisition/configuration" - "github.com/crowdsecurity/crowdsec/pkg/types" dockerTypes "github.com/docker/docker/api/types" dockerContainer "github.com/docker/docker/api/types/container" "github.com/docker/docker/client" log "github.com/sirupsen/logrus" + "github.com/stretchr/testify/assert" "gopkg.in/tomb.v2" - "github.com/stretchr/testify/assert" + "github.com/crowdsecurity/go-cs-lib/cstest" + + "github.com/crowdsecurity/crowdsec/pkg/acquisition/configuration" + "github.com/crowdsecurity/crowdsec/pkg/types" ) const testContainerName = "docker_test" @@ -379,5 +379,4 @@ func TestParseLabels(t *testing.T) { assert.Equal(t, test.expected, labels) }) } - } diff --git a/pkg/acquisition/modules/journalctl/journalctl_test.go b/pkg/acquisition/modules/journalctl/journalctl_test.go index f381a227534..53e2d0802ad 100644 --- a/pkg/acquisition/modules/journalctl/journalctl_test.go +++ b/pkg/acquisition/modules/journalctl/journalctl_test.go @@ -8,14 +8,15 @@ import ( "testing" "time" - "github.com/crowdsecurity/go-cs-lib/cstest" - - "github.com/crowdsecurity/crowdsec/pkg/acquisition/configuration" - "github.com/crowdsecurity/crowdsec/pkg/types" log "github.com/sirupsen/logrus" "github.com/sirupsen/logrus/hooks/test" "github.com/stretchr/testify/assert" "gopkg.in/tomb.v2" + + "github.com/crowdsecurity/go-cs-lib/cstest" + + "github.com/crowdsecurity/crowdsec/pkg/acquisition/configuration" + "github.com/crowdsecurity/crowdsec/pkg/types" ) func TestBadConfiguration(t *testing.T) { diff --git a/pkg/acquisition/modules/kinesis/kinesis_test.go b/pkg/acquisition/modules/kinesis/kinesis_test.go index ae601020621..46e404aa49b 100644 --- a/pkg/acquisition/modules/kinesis/kinesis_test.go +++ b/pkg/acquisition/modules/kinesis/kinesis_test.go @@ -12,16 +12,17 @@ import ( "testing" "time" - "github.com/crowdsecurity/go-cs-lib/cstest" - "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/session" "github.com/aws/aws-sdk-go/service/kinesis" - "github.com/crowdsecurity/crowdsec/pkg/acquisition/configuration" - "github.com/crowdsecurity/crowdsec/pkg/types" log "github.com/sirupsen/logrus" "github.com/stretchr/testify/assert" "gopkg.in/tomb.v2" + + "github.com/crowdsecurity/go-cs-lib/cstest" + + "github.com/crowdsecurity/crowdsec/pkg/acquisition/configuration" + "github.com/crowdsecurity/crowdsec/pkg/types" ) func getLocalStackEndpoint() (string, error) { @@ -30,7 +31,7 @@ func getLocalStackEndpoint() (string, error) { v = strings.TrimPrefix(v, "http://") _, err := net.Dial("tcp", v) if err != nil { - return "", fmt.Errorf("while dialing %s : %s : aws endpoint isn't available", v, err) + return "", fmt.Errorf("while dialing %s: %w: aws endpoint isn't available", v, err) } } return endpoint, nil diff --git a/pkg/acquisition/modules/kubernetesaudit/k8s_audit_test.go b/pkg/acquisition/modules/kubernetesaudit/k8s_audit_test.go index b6e6f6b03e9..020bd4c91a0 100644 --- a/pkg/acquisition/modules/kubernetesaudit/k8s_audit_test.go +++ b/pkg/acquisition/modules/kubernetesaudit/k8s_audit_test.go @@ -6,12 +6,13 @@ import ( "testing" "time" - "github.com/crowdsecurity/crowdsec/pkg/acquisition/configuration" - "github.com/crowdsecurity/crowdsec/pkg/types" log "github.com/sirupsen/logrus" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "gopkg.in/tomb.v2" + + "github.com/crowdsecurity/crowdsec/pkg/acquisition/configuration" + "github.com/crowdsecurity/crowdsec/pkg/types" ) func TestBadConfiguration(t *testing.T) { @@ -46,7 +47,6 @@ listen_addr: 0.0.0.0`, err := f.UnmarshalConfig([]byte(test.config)) assert.Contains(t, err.Error(), test.expectedErr) - }) } } diff --git a/pkg/acquisition/modules/loki/internal/lokiclient/loki_client.go b/pkg/acquisition/modules/loki/internal/lokiclient/loki_client.go index 359fef5bb96..420da6e391c 100644 --- a/pkg/acquisition/modules/loki/internal/lokiclient/loki_client.go +++ b/pkg/acquisition/modules/loki/internal/lokiclient/loki_client.go @@ -12,10 +12,11 @@ import ( "strconv" "time" - "github.com/crowdsecurity/crowdsec/pkg/cwversion" "github.com/gorilla/websocket" log "github.com/sirupsen/logrus" "gopkg.in/tomb.v2" + + "github.com/crowdsecurity/crowdsec/pkg/cwversion" ) type LokiClient struct { @@ -74,6 +75,7 @@ func (lc *LokiClient) resetFailStart() { } lc.fail_start = time.Time{} } + func (lc *LokiClient) shouldRetry() bool { if lc.fail_start.IsZero() { lc.Logger.Warningf("loki is not available, will retry for %s", lc.config.FailMaxDuration) @@ -185,7 +187,6 @@ func (lc *LokiClient) getURLFor(endpoint string, params map[string]string) strin u.RawQuery = queryParams.Encode() u.Path, err = url.JoinPath(lc.config.LokiPrefix, u.Path, endpoint) - if err != nil { return "" } @@ -254,8 +255,8 @@ func (lc *LokiClient) Tail(ctx context.Context) (chan *LokiResponse, error) { requestHeader.Add(k, v) } lc.Logger.Infof("Connecting to %s", u) - conn, _, err := dialer.Dial(u, requestHeader) + conn, _, err := dialer.Dial(u, requestHeader) if err != nil { lc.Logger.Errorf("Error connecting to websocket, err: %s", err) return responseChan, errors.New("error connecting to websocket") @@ -264,8 +265,8 @@ func (lc *LokiClient) Tail(ctx context.Context) (chan *LokiResponse, error) { lc.t.Go(func() error { for { jsonResponse := &LokiResponse{} - err = conn.ReadJSON(jsonResponse) + err = conn.ReadJSON(jsonResponse) if err != nil { lc.Logger.Errorf("Error reading from websocket: %s", err) return fmt.Errorf("websocket error: %w", err) diff --git a/pkg/acquisition/modules/s3/s3_test.go b/pkg/acquisition/modules/s3/s3_test.go index e94521d187f..93e166dfec5 100644 --- a/pkg/acquisition/modules/s3/s3_test.go +++ b/pkg/acquisition/modules/s3/s3_test.go @@ -14,11 +14,12 @@ import ( "github.com/aws/aws-sdk-go/service/s3/s3iface" "github.com/aws/aws-sdk-go/service/sqs" "github.com/aws/aws-sdk-go/service/sqs/sqsiface" - "github.com/crowdsecurity/crowdsec/pkg/acquisition/configuration" - "github.com/crowdsecurity/crowdsec/pkg/types" log "github.com/sirupsen/logrus" "github.com/stretchr/testify/assert" "gopkg.in/tomb.v2" + + "github.com/crowdsecurity/crowdsec/pkg/acquisition/configuration" + "github.com/crowdsecurity/crowdsec/pkg/types" ) func TestBadConfiguration(t *testing.T) { @@ -266,10 +267,8 @@ func TestDSNAcquis(t *testing.T) { time.Sleep(2 * time.Second) done <- true assert.Equal(t, test.expectedCount, linesRead) - }) } - } func TestListPolling(t *testing.T) { @@ -333,7 +332,6 @@ prefix: foo/ }() err = f.StreamingAcquisition(out, &tb) - if err != nil { t.Fatalf("unexpected error: %s", err.Error()) } @@ -414,7 +412,6 @@ sqs_name: test }() err = f.StreamingAcquisition(out, &tb) - if err != nil { t.Fatalf("unexpected error: %s", err.Error()) } diff --git a/pkg/acquisition/modules/syslog/internal/parser/rfc5424/parse_test.go b/pkg/acquisition/modules/syslog/internal/parser/rfc5424/parse_test.go index eed72244867..0938e947fe7 100644 --- a/pkg/acquisition/modules/syslog/internal/parser/rfc5424/parse_test.go +++ b/pkg/acquisition/modules/syslog/internal/parser/rfc5424/parse_test.go @@ -4,9 +4,9 @@ import ( "testing" "time" - "github.com/crowdsecurity/go-cs-lib/cstest" - "github.com/stretchr/testify/require" + + "github.com/crowdsecurity/go-cs-lib/cstest" ) func TestPri(t *testing.T) { diff --git a/pkg/acquisition/modules/syslog/syslog_test.go b/pkg/acquisition/modules/syslog/syslog_test.go index 0e823ecd32a..1750f375138 100644 --- a/pkg/acquisition/modules/syslog/syslog_test.go +++ b/pkg/acquisition/modules/syslog/syslog_test.go @@ -7,14 +7,14 @@ import ( "testing" "time" + log "github.com/sirupsen/logrus" + "github.com/stretchr/testify/assert" + "gopkg.in/tomb.v2" + "github.com/crowdsecurity/go-cs-lib/cstest" "github.com/crowdsecurity/crowdsec/pkg/acquisition/configuration" "github.com/crowdsecurity/crowdsec/pkg/types" - log "github.com/sirupsen/logrus" - "gopkg.in/tomb.v2" - - "github.com/stretchr/testify/assert" ) func TestConfigure(t *testing.T) { diff --git a/pkg/alertcontext/alertcontext_test.go b/pkg/alertcontext/alertcontext_test.go index 8b598eab86c..c111d1bbcfb 100644 --- a/pkg/alertcontext/alertcontext_test.go +++ b/pkg/alertcontext/alertcontext_test.go @@ -4,10 +4,11 @@ import ( "fmt" "testing" - "github.com/crowdsecurity/crowdsec/pkg/models" - "github.com/crowdsecurity/crowdsec/pkg/types" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + + "github.com/crowdsecurity/crowdsec/pkg/models" + "github.com/crowdsecurity/crowdsec/pkg/types" ) func TestNewAlertContext(t *testing.T) { diff --git a/pkg/apiserver/apic_metrics.go b/pkg/apiserver/apic_metrics.go index 128ce5a9639..b8e23629e1e 100644 --- a/pkg/apiserver/apic_metrics.go +++ b/pkg/apiserver/apic_metrics.go @@ -2,10 +2,10 @@ package apiserver import ( "context" + "slices" "time" log "github.com/sirupsen/logrus" - "slices" "github.com/crowdsecurity/go-cs-lib/ptr" "github.com/crowdsecurity/go-cs-lib/trace" diff --git a/pkg/apiserver/controllers/v1/utils.go b/pkg/apiserver/controllers/v1/utils.go index 2fcf8099e67..3cd53d217cc 100644 --- a/pkg/apiserver/controllers/v1/utils.go +++ b/pkg/apiserver/controllers/v1/utils.go @@ -9,7 +9,7 @@ import ( jwt "github.com/appleboy/gin-jwt/v2" "github.com/gin-gonic/gin" - middlewares "github.com/crowdsecurity/crowdsec/pkg/apiserver/middlewares/v1" + middlewares "github.com/crowdsecurity/crowdsec/pkg/apiserver/middlewares/v1" "github.com/crowdsecurity/crowdsec/pkg/database/ent" ) diff --git a/pkg/appsec/appsec.go b/pkg/appsec/appsec.go index 2c971fb36c5..e43a2ad6710 100644 --- a/pkg/appsec/appsec.go +++ b/pkg/appsec/appsec.go @@ -8,11 +8,12 @@ import ( "github.com/antonmedv/expr" "github.com/antonmedv/expr/vm" + log "github.com/sirupsen/logrus" + "gopkg.in/yaml.v2" + "github.com/crowdsecurity/crowdsec/pkg/cwhub" "github.com/crowdsecurity/crowdsec/pkg/exprhelpers" "github.com/crowdsecurity/crowdsec/pkg/types" - log "github.com/sirupsen/logrus" - "gopkg.in/yaml.v2" ) type Hook struct { diff --git a/pkg/appsec/appsec_rules_collection.go b/pkg/appsec/appsec_rules_collection.go index 2024673c330..09c1670de70 100644 --- a/pkg/appsec/appsec_rules_collection.go +++ b/pkg/appsec/appsec_rules_collection.go @@ -6,10 +6,10 @@ import ( "path/filepath" "strings" + log "github.com/sirupsen/logrus" + "github.com/crowdsecurity/crowdsec/pkg/appsec/appsec_rule" "github.com/crowdsecurity/crowdsec/pkg/exprhelpers" - - log "github.com/sirupsen/logrus" ) type AppsecCollection struct { @@ -51,9 +51,7 @@ func LoadCollection(pattern string, logger *log.Entry) ([]AppsecCollection, erro ret := make([]AppsecCollection, 0) for _, appsecRule := range appsecRules { - tmpMatch, err := exprhelpers.Match(pattern, appsecRule.Name) - if err != nil { logger.Errorf("unable to match %s with %s : %s", appsecRule.Name, pattern, err) continue diff --git a/pkg/appsec/loader.go b/pkg/appsec/loader.go index 82b8d440771..56ec23e3671 100644 --- a/pkg/appsec/loader.go +++ b/pkg/appsec/loader.go @@ -3,9 +3,10 @@ package appsec import ( "os" - "github.com/crowdsecurity/crowdsec/pkg/cwhub" log "github.com/sirupsen/logrus" "gopkg.in/yaml.v2" + + "github.com/crowdsecurity/crowdsec/pkg/cwhub" ) var appsecRules = make(map[string]AppsecCollectionConfig) //FIXME: would probably be better to have a struct for this @@ -13,7 +14,6 @@ var appsecRules = make(map[string]AppsecCollectionConfig) //FIXME: would probabl var hub *cwhub.Hub //FIXME: this is a temporary hack to make the hub available in the package func LoadAppsecRules(hubInstance *cwhub.Hub) error { - hub = hubInstance appsecRules = make(map[string]AppsecCollectionConfig) @@ -23,7 +23,6 @@ func LoadAppsecRules(hubInstance *cwhub.Hub) error { } content, err := os.ReadFile(hubAppsecRuleItem.State.LocalPath) - if err != nil { log.Warnf("unable to read file %s : %s", hubAppsecRuleItem.State.LocalPath, err) continue @@ -32,7 +31,6 @@ func LoadAppsecRules(hubInstance *cwhub.Hub) error { var rule AppsecCollectionConfig err = yaml.UnmarshalStrict(content, &rule) - if err != nil { log.Warnf("unable to unmarshal file %s : %s", hubAppsecRuleItem.State.LocalPath, err) continue diff --git a/pkg/csconfig/config.go b/pkg/csconfig/config.go index e007e042bd5..ed71af4029a 100644 --- a/pkg/csconfig/config.go +++ b/pkg/csconfig/config.go @@ -12,10 +12,11 @@ import ( log "github.com/sirupsen/logrus" "gopkg.in/yaml.v3" - "github.com/crowdsecurity/crowdsec/pkg/acquisition/configuration" "github.com/crowdsecurity/go-cs-lib/csstring" "github.com/crowdsecurity/go-cs-lib/ptr" "github.com/crowdsecurity/go-cs-lib/yamlpatch" + + "github.com/crowdsecurity/crowdsec/pkg/acquisition/configuration" ) // defaultConfigDir is the base path to all configuration files, to be overridden in the Makefile */ diff --git a/pkg/csplugin/helpers.go b/pkg/csplugin/helpers.go index 75ee773b808..915f17e5dd3 100644 --- a/pkg/csplugin/helpers.go +++ b/pkg/csplugin/helpers.go @@ -5,9 +5,10 @@ import ( "os" "text/template" + log "github.com/sirupsen/logrus" + "github.com/crowdsecurity/crowdsec/pkg/exprhelpers" "github.com/crowdsecurity/crowdsec/pkg/models" - log "github.com/sirupsen/logrus" ) var helpers = template.FuncMap{ diff --git a/pkg/exprhelpers/crowdsec_cti.go b/pkg/exprhelpers/crowdsec_cti.go index 4103bad1d43..ccd67b27a49 100644 --- a/pkg/exprhelpers/crowdsec_cti.go +++ b/pkg/exprhelpers/crowdsec_cti.go @@ -6,9 +6,10 @@ import ( "time" "github.com/bluele/gcache" + log "github.com/sirupsen/logrus" + "github.com/crowdsecurity/crowdsec/pkg/cticlient" "github.com/crowdsecurity/crowdsec/pkg/types" - log "github.com/sirupsen/logrus" ) var CTIUrl = "https://cti.api.crowdsec.net" @@ -111,7 +112,7 @@ func CrowdsecCTI(params ...any) (any, error) { return &cticlient.SmokeItem{}, cticlient.ErrLimit default: ctiClient.Logger.Warnf("CTI API error : %s", err) - return &cticlient.SmokeItem{}, fmt.Errorf("unexpected error : %v", err) + return &cticlient.SmokeItem{}, fmt.Errorf("unexpected error: %w", err) } } diff --git a/pkg/exprhelpers/debugger_test.go b/pkg/exprhelpers/debugger_test.go index 6832b4efaa8..5f239885539 100644 --- a/pkg/exprhelpers/debugger_test.go +++ b/pkg/exprhelpers/debugger_test.go @@ -6,9 +6,10 @@ import ( "testing" "github.com/antonmedv/expr" - "github.com/crowdsecurity/crowdsec/pkg/types" "github.com/davecgh/go-spew/spew" log "github.com/sirupsen/logrus" + + "github.com/crowdsecurity/crowdsec/pkg/types" ) type ExprDbgTest struct { diff --git a/pkg/exprhelpers/expr_lib.go b/pkg/exprhelpers/expr_lib.go index 19b25e25895..b90c1986153 100644 --- a/pkg/exprhelpers/expr_lib.go +++ b/pkg/exprhelpers/expr_lib.go @@ -4,8 +4,9 @@ import ( "net" "time" - "github.com/crowdsecurity/crowdsec/pkg/cticlient" "github.com/oschwald/geoip2-golang" + + "github.com/crowdsecurity/crowdsec/pkg/cticlient" ) type exprCustomFunc struct { diff --git a/pkg/exprhelpers/jsonextract.go b/pkg/exprhelpers/jsonextract.go index 6edb34e36e6..64ed97873d6 100644 --- a/pkg/exprhelpers/jsonextract.go +++ b/pkg/exprhelpers/jsonextract.go @@ -7,7 +7,6 @@ import ( "strings" "github.com/buger/jsonparser" - log "github.com/sirupsen/logrus" ) @@ -15,11 +14,11 @@ import ( func JsonExtractLib(params ...any) (any, error) { jsblob := params[0].(string) target := params[1].([]string) + value, dataType, _, err := jsonparser.Get( jsonparser.StringToBytes(jsblob), target..., ) - if err != nil { if errors.Is(err, jsonparser.KeyPathNotFoundError) { log.Debugf("%+v doesn't exist", target) @@ -93,7 +92,6 @@ func jsonExtractType(jsblob string, target string, t jsonparser.ValueType) ([]by jsonparser.StringToBytes(jsblob), fullpath..., ) - if err != nil { if errors.Is(err, jsonparser.KeyPathNotFoundError) { log.Debugf("Key %+v doesn't exist", target) @@ -115,8 +113,8 @@ func jsonExtractType(jsblob string, target string, t jsonparser.ValueType) ([]by func JsonExtractSlice(params ...any) (any, error) { jsblob := params[0].(string) target := params[1].(string) - value, err := jsonExtractType(jsblob, target, jsonparser.Array) + value, err := jsonExtractType(jsblob, target, jsonparser.Array) if err != nil { log.Errorf("JsonExtractSlice : %s", err) return []interface{}(nil), nil @@ -136,8 +134,8 @@ func JsonExtractSlice(params ...any) (any, error) { func JsonExtractObject(params ...any) (any, error) { jsblob := params[0].(string) target := params[1].(string) - value, err := jsonExtractType(jsblob, target, jsonparser.Object) + value, err := jsonExtractType(jsblob, target, jsonparser.Object) if err != nil { log.Errorf("JsonExtractObject: %s", err) return map[string]interface{}(nil), nil diff --git a/pkg/leakybucket/bayesian.go b/pkg/leakybucket/bayesian.go index b8d20a488f9..e56eb097ba4 100644 --- a/pkg/leakybucket/bayesian.go +++ b/pkg/leakybucket/bayesian.go @@ -5,6 +5,7 @@ import ( "github.com/antonmedv/expr" "github.com/antonmedv/expr/vm" + "github.com/crowdsecurity/crowdsec/pkg/exprhelpers" "github.com/crowdsecurity/crowdsec/pkg/types" ) @@ -109,7 +110,7 @@ func (b *BayesianEvent) bayesianUpdate(c *BayesianBucket, msg types.Event, l *Le l.logger.Debugf("running condition expression: %s", b.rawCondition.ConditionalFilterName) ret, err := exprhelpers.Run(b.conditionalFilterRuntime, map[string]interface{}{"evt": &msg, "queue": l.Queue, "leaky": l}, l.logger, l.BucketConfig.Debug) if err != nil { - return fmt.Errorf("unable to run conditional filter: %s", err) + return fmt.Errorf("unable to run conditional filter: %w", err) } l.logger.Tracef("bayesian bucket expression %s returned : %v", b.rawCondition.ConditionalFilterName, ret) diff --git a/pkg/leakybucket/bucket.go b/pkg/leakybucket/bucket.go index afb5377aa4f..e981551af8f 100644 --- a/pkg/leakybucket/bucket.go +++ b/pkg/leakybucket/bucket.go @@ -6,15 +6,16 @@ import ( "sync/atomic" "time" - "github.com/crowdsecurity/go-cs-lib/trace" - - "github.com/crowdsecurity/crowdsec/pkg/time/rate" - "github.com/crowdsecurity/crowdsec/pkg/types" "github.com/davecgh/go-spew/spew" "github.com/mohae/deepcopy" "github.com/prometheus/client_golang/prometheus" log "github.com/sirupsen/logrus" "gopkg.in/tomb.v2" + + "github.com/crowdsecurity/go-cs-lib/trace" + + "github.com/crowdsecurity/crowdsec/pkg/time/rate" + "github.com/crowdsecurity/crowdsec/pkg/types" ) // those constants are now defined in types/constants diff --git a/pkg/leakybucket/conditional.go b/pkg/leakybucket/conditional.go index 5ff69e60a26..0e4076c305c 100644 --- a/pkg/leakybucket/conditional.go +++ b/pkg/leakybucket/conditional.go @@ -6,6 +6,7 @@ import ( "github.com/antonmedv/expr" "github.com/antonmedv/expr/vm" + "github.com/crowdsecurity/crowdsec/pkg/exprhelpers" "github.com/crowdsecurity/crowdsec/pkg/types" ) diff --git a/pkg/leakybucket/manager_run_test.go b/pkg/leakybucket/manager_run_test.go index 27b665f750c..f3fe08b697a 100644 --- a/pkg/leakybucket/manager_run_test.go +++ b/pkg/leakybucket/manager_run_test.go @@ -5,9 +5,10 @@ import ( "testing" "time" - "github.com/crowdsecurity/crowdsec/pkg/types" log "github.com/sirupsen/logrus" "gopkg.in/tomb.v2" + + "github.com/crowdsecurity/crowdsec/pkg/types" ) func expectBucketCount(buckets *Buckets, expected int) error { @@ -20,7 +21,6 @@ func expectBucketCount(buckets *Buckets, expected int) error { return fmt.Errorf("expected %d live buckets, got %d", expected, count) } return nil - } func TestGCandDump(t *testing.T) { @@ -29,7 +29,7 @@ func TestGCandDump(t *testing.T) { tomb = &tomb.Tomb{} ) - var Holders = []BucketFactory{ + Holders := []BucketFactory{ //one overflowing soon + bh { Name: "test_counter_fast", @@ -80,7 +80,7 @@ func TestGCandDump(t *testing.T) { log.Printf("Pouring to bucket") - var in = types.Event{Parsed: map[string]string{"something": "something"}} + in := types.Event{Parsed: map[string]string{"something": "something"}} //pour an item that will go to leaky + counter ok, err := PourItemToHolders(in, Holders, buckets) if err != nil { @@ -156,7 +156,7 @@ func TestShutdownBuckets(t *testing.T) { log.Printf("Pouring to bucket") - var in = types.Event{Parsed: map[string]string{"something": "something"}} + in := types.Event{Parsed: map[string]string{"something": "something"}} //pour an item that will go to leaky + counter ok, err := PourItemToHolders(in, Holders, buckets) if err != nil { @@ -178,5 +178,4 @@ func TestShutdownBuckets(t *testing.T) { if err := expectBucketCount(buckets, 2); err != nil { t.Fatal(err) } - } diff --git a/pkg/leakybucket/timemachine.go b/pkg/leakybucket/timemachine.go index 266a8be7c69..e72bb1a464c 100644 --- a/pkg/leakybucket/timemachine.go +++ b/pkg/leakybucket/timemachine.go @@ -3,8 +3,9 @@ package leakybucket import ( "time" - "github.com/crowdsecurity/crowdsec/pkg/types" log "github.com/sirupsen/logrus" + + "github.com/crowdsecurity/crowdsec/pkg/types" ) func TimeMachinePour(l *Leaky, msg types.Event) { diff --git a/pkg/leakybucket/trigger.go b/pkg/leakybucket/trigger.go index d50d7ecc732..b6af1431888 100644 --- a/pkg/leakybucket/trigger.go +++ b/pkg/leakybucket/trigger.go @@ -3,8 +3,9 @@ package leakybucket import ( "time" - "github.com/crowdsecurity/crowdsec/pkg/types" log "github.com/sirupsen/logrus" + + "github.com/crowdsecurity/crowdsec/pkg/types" ) type Trigger struct { diff --git a/pkg/parser/whitelist.go b/pkg/parser/whitelist.go index f3739a49438..fd1c2a0546a 100644 --- a/pkg/parser/whitelist.go +++ b/pkg/parser/whitelist.go @@ -6,9 +6,10 @@ import ( "github.com/antonmedv/expr" "github.com/antonmedv/expr/vm" + "github.com/prometheus/client_golang/prometheus" + "github.com/crowdsecurity/crowdsec/pkg/exprhelpers" "github.com/crowdsecurity/crowdsec/pkg/types" - "github.com/prometheus/client_golang/prometheus" ) type Whitelist struct { diff --git a/pkg/types/event.go b/pkg/types/event.go index 90464aadf2c..7d8ef5825a2 100644 --- a/pkg/types/event.go +++ b/pkg/types/event.go @@ -4,9 +4,9 @@ import ( "net" "time" + "github.com/antonmedv/expr/vm" log "github.com/sirupsen/logrus" - "github.com/antonmedv/expr/vm" "github.com/crowdsecurity/crowdsec/pkg/models" ) From b081065c8ecb7560587fd76e48e253b0f787f43c Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Mon, 1 Jul 2024 11:54:49 +0200 Subject: [PATCH 202/581] lint: replace "github.com/pkg/errors" in apiserver (#3097) --- .golangci.yml | 1 - pkg/apiserver/controllers/v1/errors.go | 18 +++++++++--------- 2 files changed, 9 insertions(+), 10 deletions(-) diff --git a/.golangci.yml b/.golangci.yml index d89c8e9ed01..66c720381de 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -48,7 +48,6 @@ linters-settings: desc: "errors.Wrap() is deprecated in favor of fmt.Errorf()" files: - "!**/pkg/database/*.go" - - "!**/pkg/apiserver/controllers/v1/errors.go" yaml: files: - "!**/pkg/acquisition/acquisition.go" diff --git a/pkg/apiserver/controllers/v1/errors.go b/pkg/apiserver/controllers/v1/errors.go index b85b811f8a7..9004528e1b1 100644 --- a/pkg/apiserver/controllers/v1/errors.go +++ b/pkg/apiserver/controllers/v1/errors.go @@ -1,35 +1,35 @@ package v1 import ( + "errors" "net/http" "github.com/gin-gonic/gin" - "github.com/pkg/errors" "github.com/crowdsecurity/crowdsec/pkg/database" ) func (c *Controller) HandleDBErrors(gctx *gin.Context, err error) { - switch errors.Cause(err) { - case database.ItemNotFound: + switch { + case errors.Is(err, database.ItemNotFound): gctx.JSON(http.StatusNotFound, gin.H{"message": err.Error()}) return - case database.UserExists: + case errors.Is(err, database.UserExists): gctx.JSON(http.StatusForbidden, gin.H{"message": err.Error()}) return - case database.HashError: + case errors.Is(err, database.HashError): gctx.JSON(http.StatusBadRequest, gin.H{"message": err.Error()}) return - case database.InsertFail: + case errors.Is(err, database.InsertFail): gctx.JSON(http.StatusInternalServerError, gin.H{"message": err.Error()}) return - case database.QueryFail: + case errors.Is(err, database.QueryFail): gctx.JSON(http.StatusInternalServerError, gin.H{"message": err.Error()}) return - case database.ParseTimeFail: + case errors.Is(err, database.ParseTimeFail): gctx.JSON(http.StatusInternalServerError, gin.H{"message": err.Error()}) return - case database.ParseDurationFail: + case errors.Is(err, database.ParseDurationFail): gctx.JSON(http.StatusInternalServerError, gin.H{"message": err.Error()}) return default: From 1acc35442c8410e55a38918024eb4c48e3ccdd79 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Mon, 1 Jul 2024 12:14:55 +0200 Subject: [PATCH 203/581] lapi detailed metrics: db schema (#3099) --- pkg/database/ent/bouncer.go | 37 +- pkg/database/ent/bouncer/bouncer.go | 24 + pkg/database/ent/bouncer/where.go | 240 +++++ pkg/database/ent/bouncer_create.go | 54 ++ pkg/database/ent/bouncer_update.go | 156 +++ pkg/database/ent/client.go | 152 ++- pkg/database/ent/ent.go | 2 + pkg/database/ent/hook/hook.go | 12 + pkg/database/ent/machine.go | 65 +- pkg/database/ent/machine/machine.go | 30 + pkg/database/ent/machine/where.go | 260 +++++ pkg/database/ent/machine_create.go | 75 ++ pkg/database/ent/machine_update.go | 229 +++++ pkg/database/ent/metric.go | 154 +++ pkg/database/ent/metric/metric.go | 104 ++ pkg/database/ent/metric/where.go | 330 +++++++ pkg/database/ent/metric_create.go | 246 +++++ pkg/database/ent/metric_delete.go | 88 ++ pkg/database/ent/metric_query.go | 526 ++++++++++ pkg/database/ent/metric_update.go | 228 +++++ pkg/database/ent/migrate/schema.go | 31 + pkg/database/ent/mutation.go | 1187 ++++++++++++++++++++++- pkg/database/ent/predicate/predicate.go | 3 + pkg/database/ent/schema/bouncer.go | 3 + pkg/database/ent/schema/machine.go | 11 + pkg/database/ent/schema/metric.go | 43 + pkg/database/ent/tx.go | 3 + 27 files changed, 4267 insertions(+), 26 deletions(-) create mode 100644 pkg/database/ent/metric.go create mode 100644 pkg/database/ent/metric/metric.go create mode 100644 pkg/database/ent/metric/where.go create mode 100644 pkg/database/ent/metric_create.go create mode 100644 pkg/database/ent/metric_delete.go create mode 100644 pkg/database/ent/metric_query.go create mode 100644 pkg/database/ent/metric_update.go create mode 100644 pkg/database/ent/schema/metric.go diff --git a/pkg/database/ent/bouncer.go b/pkg/database/ent/bouncer.go index d7597d2a449..3b4d619e384 100644 --- a/pkg/database/ent/bouncer.go +++ b/pkg/database/ent/bouncer.go @@ -36,7 +36,13 @@ type Bouncer struct { // LastPull holds the value of the "last_pull" field. LastPull *time.Time `json:"last_pull"` // AuthType holds the value of the "auth_type" field. - AuthType string `json:"auth_type"` + AuthType string `json:"auth_type"` + // Osname holds the value of the "osname" field. + Osname string `json:"osname,omitempty"` + // Osversion holds the value of the "osversion" field. + Osversion string `json:"osversion,omitempty"` + // Featureflags holds the value of the "featureflags" field. + Featureflags string `json:"featureflags,omitempty"` selectValues sql.SelectValues } @@ -49,7 +55,7 @@ func (*Bouncer) scanValues(columns []string) ([]any, error) { values[i] = new(sql.NullBool) case bouncer.FieldID: values[i] = new(sql.NullInt64) - case bouncer.FieldName, bouncer.FieldAPIKey, bouncer.FieldIPAddress, bouncer.FieldType, bouncer.FieldVersion, bouncer.FieldAuthType: + case bouncer.FieldName, bouncer.FieldAPIKey, bouncer.FieldIPAddress, bouncer.FieldType, bouncer.FieldVersion, bouncer.FieldAuthType, bouncer.FieldOsname, bouncer.FieldOsversion, bouncer.FieldFeatureflags: values[i] = new(sql.NullString) case bouncer.FieldCreatedAt, bouncer.FieldUpdatedAt, bouncer.FieldLastPull: values[i] = new(sql.NullTime) @@ -135,6 +141,24 @@ func (b *Bouncer) assignValues(columns []string, values []any) error { } else if value.Valid { b.AuthType = value.String } + case bouncer.FieldOsname: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field osname", values[i]) + } else if value.Valid { + b.Osname = value.String + } + case bouncer.FieldOsversion: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field osversion", values[i]) + } else if value.Valid { + b.Osversion = value.String + } + case bouncer.FieldFeatureflags: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field featureflags", values[i]) + } else if value.Valid { + b.Featureflags = value.String + } default: b.selectValues.Set(columns[i], values[i]) } @@ -201,6 +225,15 @@ func (b *Bouncer) String() string { builder.WriteString(", ") builder.WriteString("auth_type=") builder.WriteString(b.AuthType) + builder.WriteString(", ") + builder.WriteString("osname=") + builder.WriteString(b.Osname) + builder.WriteString(", ") + builder.WriteString("osversion=") + builder.WriteString(b.Osversion) + builder.WriteString(", ") + builder.WriteString("featureflags=") + builder.WriteString(b.Featureflags) builder.WriteByte(')') return builder.String() } diff --git a/pkg/database/ent/bouncer/bouncer.go b/pkg/database/ent/bouncer/bouncer.go index 59afb199cb5..a6f62aeadd5 100644 --- a/pkg/database/ent/bouncer/bouncer.go +++ b/pkg/database/ent/bouncer/bouncer.go @@ -33,6 +33,12 @@ const ( FieldLastPull = "last_pull" // FieldAuthType holds the string denoting the auth_type field in the database. FieldAuthType = "auth_type" + // FieldOsname holds the string denoting the osname field in the database. + FieldOsname = "osname" + // FieldOsversion holds the string denoting the osversion field in the database. + FieldOsversion = "osversion" + // FieldFeatureflags holds the string denoting the featureflags field in the database. + FieldFeatureflags = "featureflags" // Table holds the table name of the bouncer in the database. Table = "bouncers" ) @@ -50,6 +56,9 @@ var Columns = []string{ FieldVersion, FieldLastPull, FieldAuthType, + FieldOsname, + FieldOsversion, + FieldFeatureflags, } // ValidColumn reports if the column name is valid (part of the table columns). @@ -132,3 +141,18 @@ func ByLastPull(opts ...sql.OrderTermOption) OrderOption { func ByAuthType(opts ...sql.OrderTermOption) OrderOption { return sql.OrderByField(FieldAuthType, opts...).ToFunc() } + +// ByOsname orders the results by the osname field. +func ByOsname(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldOsname, opts...).ToFunc() +} + +// ByOsversion orders the results by the osversion field. +func ByOsversion(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldOsversion, opts...).ToFunc() +} + +// ByFeatureflags orders the results by the featureflags field. +func ByFeatureflags(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldFeatureflags, opts...).ToFunc() +} diff --git a/pkg/database/ent/bouncer/where.go b/pkg/database/ent/bouncer/where.go index e3c5752331e..e02199bc0a9 100644 --- a/pkg/database/ent/bouncer/where.go +++ b/pkg/database/ent/bouncer/where.go @@ -104,6 +104,21 @@ func AuthType(v string) predicate.Bouncer { return predicate.Bouncer(sql.FieldEQ(FieldAuthType, v)) } +// Osname applies equality check predicate on the "osname" field. It's identical to OsnameEQ. +func Osname(v string) predicate.Bouncer { + return predicate.Bouncer(sql.FieldEQ(FieldOsname, v)) +} + +// Osversion applies equality check predicate on the "osversion" field. It's identical to OsversionEQ. +func Osversion(v string) predicate.Bouncer { + return predicate.Bouncer(sql.FieldEQ(FieldOsversion, v)) +} + +// Featureflags applies equality check predicate on the "featureflags" field. It's identical to FeatureflagsEQ. +func Featureflags(v string) predicate.Bouncer { + return predicate.Bouncer(sql.FieldEQ(FieldFeatureflags, v)) +} + // CreatedAtEQ applies the EQ predicate on the "created_at" field. func CreatedAtEQ(v time.Time) predicate.Bouncer { return predicate.Bouncer(sql.FieldEQ(FieldCreatedAt, v)) @@ -664,6 +679,231 @@ func AuthTypeContainsFold(v string) predicate.Bouncer { return predicate.Bouncer(sql.FieldContainsFold(FieldAuthType, v)) } +// OsnameEQ applies the EQ predicate on the "osname" field. +func OsnameEQ(v string) predicate.Bouncer { + return predicate.Bouncer(sql.FieldEQ(FieldOsname, v)) +} + +// OsnameNEQ applies the NEQ predicate on the "osname" field. +func OsnameNEQ(v string) predicate.Bouncer { + return predicate.Bouncer(sql.FieldNEQ(FieldOsname, v)) +} + +// OsnameIn applies the In predicate on the "osname" field. +func OsnameIn(vs ...string) predicate.Bouncer { + return predicate.Bouncer(sql.FieldIn(FieldOsname, vs...)) +} + +// OsnameNotIn applies the NotIn predicate on the "osname" field. +func OsnameNotIn(vs ...string) predicate.Bouncer { + return predicate.Bouncer(sql.FieldNotIn(FieldOsname, vs...)) +} + +// OsnameGT applies the GT predicate on the "osname" field. +func OsnameGT(v string) predicate.Bouncer { + return predicate.Bouncer(sql.FieldGT(FieldOsname, v)) +} + +// OsnameGTE applies the GTE predicate on the "osname" field. +func OsnameGTE(v string) predicate.Bouncer { + return predicate.Bouncer(sql.FieldGTE(FieldOsname, v)) +} + +// OsnameLT applies the LT predicate on the "osname" field. +func OsnameLT(v string) predicate.Bouncer { + return predicate.Bouncer(sql.FieldLT(FieldOsname, v)) +} + +// OsnameLTE applies the LTE predicate on the "osname" field. +func OsnameLTE(v string) predicate.Bouncer { + return predicate.Bouncer(sql.FieldLTE(FieldOsname, v)) +} + +// OsnameContains applies the Contains predicate on the "osname" field. +func OsnameContains(v string) predicate.Bouncer { + return predicate.Bouncer(sql.FieldContains(FieldOsname, v)) +} + +// OsnameHasPrefix applies the HasPrefix predicate on the "osname" field. +func OsnameHasPrefix(v string) predicate.Bouncer { + return predicate.Bouncer(sql.FieldHasPrefix(FieldOsname, v)) +} + +// OsnameHasSuffix applies the HasSuffix predicate on the "osname" field. +func OsnameHasSuffix(v string) predicate.Bouncer { + return predicate.Bouncer(sql.FieldHasSuffix(FieldOsname, v)) +} + +// OsnameIsNil applies the IsNil predicate on the "osname" field. +func OsnameIsNil() predicate.Bouncer { + return predicate.Bouncer(sql.FieldIsNull(FieldOsname)) +} + +// OsnameNotNil applies the NotNil predicate on the "osname" field. +func OsnameNotNil() predicate.Bouncer { + return predicate.Bouncer(sql.FieldNotNull(FieldOsname)) +} + +// OsnameEqualFold applies the EqualFold predicate on the "osname" field. +func OsnameEqualFold(v string) predicate.Bouncer { + return predicate.Bouncer(sql.FieldEqualFold(FieldOsname, v)) +} + +// OsnameContainsFold applies the ContainsFold predicate on the "osname" field. +func OsnameContainsFold(v string) predicate.Bouncer { + return predicate.Bouncer(sql.FieldContainsFold(FieldOsname, v)) +} + +// OsversionEQ applies the EQ predicate on the "osversion" field. +func OsversionEQ(v string) predicate.Bouncer { + return predicate.Bouncer(sql.FieldEQ(FieldOsversion, v)) +} + +// OsversionNEQ applies the NEQ predicate on the "osversion" field. +func OsversionNEQ(v string) predicate.Bouncer { + return predicate.Bouncer(sql.FieldNEQ(FieldOsversion, v)) +} + +// OsversionIn applies the In predicate on the "osversion" field. +func OsversionIn(vs ...string) predicate.Bouncer { + return predicate.Bouncer(sql.FieldIn(FieldOsversion, vs...)) +} + +// OsversionNotIn applies the NotIn predicate on the "osversion" field. +func OsversionNotIn(vs ...string) predicate.Bouncer { + return predicate.Bouncer(sql.FieldNotIn(FieldOsversion, vs...)) +} + +// OsversionGT applies the GT predicate on the "osversion" field. +func OsversionGT(v string) predicate.Bouncer { + return predicate.Bouncer(sql.FieldGT(FieldOsversion, v)) +} + +// OsversionGTE applies the GTE predicate on the "osversion" field. +func OsversionGTE(v string) predicate.Bouncer { + return predicate.Bouncer(sql.FieldGTE(FieldOsversion, v)) +} + +// OsversionLT applies the LT predicate on the "osversion" field. +func OsversionLT(v string) predicate.Bouncer { + return predicate.Bouncer(sql.FieldLT(FieldOsversion, v)) +} + +// OsversionLTE applies the LTE predicate on the "osversion" field. +func OsversionLTE(v string) predicate.Bouncer { + return predicate.Bouncer(sql.FieldLTE(FieldOsversion, v)) +} + +// OsversionContains applies the Contains predicate on the "osversion" field. +func OsversionContains(v string) predicate.Bouncer { + return predicate.Bouncer(sql.FieldContains(FieldOsversion, v)) +} + +// OsversionHasPrefix applies the HasPrefix predicate on the "osversion" field. +func OsversionHasPrefix(v string) predicate.Bouncer { + return predicate.Bouncer(sql.FieldHasPrefix(FieldOsversion, v)) +} + +// OsversionHasSuffix applies the HasSuffix predicate on the "osversion" field. +func OsversionHasSuffix(v string) predicate.Bouncer { + return predicate.Bouncer(sql.FieldHasSuffix(FieldOsversion, v)) +} + +// OsversionIsNil applies the IsNil predicate on the "osversion" field. +func OsversionIsNil() predicate.Bouncer { + return predicate.Bouncer(sql.FieldIsNull(FieldOsversion)) +} + +// OsversionNotNil applies the NotNil predicate on the "osversion" field. +func OsversionNotNil() predicate.Bouncer { + return predicate.Bouncer(sql.FieldNotNull(FieldOsversion)) +} + +// OsversionEqualFold applies the EqualFold predicate on the "osversion" field. +func OsversionEqualFold(v string) predicate.Bouncer { + return predicate.Bouncer(sql.FieldEqualFold(FieldOsversion, v)) +} + +// OsversionContainsFold applies the ContainsFold predicate on the "osversion" field. +func OsversionContainsFold(v string) predicate.Bouncer { + return predicate.Bouncer(sql.FieldContainsFold(FieldOsversion, v)) +} + +// FeatureflagsEQ applies the EQ predicate on the "featureflags" field. +func FeatureflagsEQ(v string) predicate.Bouncer { + return predicate.Bouncer(sql.FieldEQ(FieldFeatureflags, v)) +} + +// FeatureflagsNEQ applies the NEQ predicate on the "featureflags" field. +func FeatureflagsNEQ(v string) predicate.Bouncer { + return predicate.Bouncer(sql.FieldNEQ(FieldFeatureflags, v)) +} + +// FeatureflagsIn applies the In predicate on the "featureflags" field. +func FeatureflagsIn(vs ...string) predicate.Bouncer { + return predicate.Bouncer(sql.FieldIn(FieldFeatureflags, vs...)) +} + +// FeatureflagsNotIn applies the NotIn predicate on the "featureflags" field. +func FeatureflagsNotIn(vs ...string) predicate.Bouncer { + return predicate.Bouncer(sql.FieldNotIn(FieldFeatureflags, vs...)) +} + +// FeatureflagsGT applies the GT predicate on the "featureflags" field. +func FeatureflagsGT(v string) predicate.Bouncer { + return predicate.Bouncer(sql.FieldGT(FieldFeatureflags, v)) +} + +// FeatureflagsGTE applies the GTE predicate on the "featureflags" field. +func FeatureflagsGTE(v string) predicate.Bouncer { + return predicate.Bouncer(sql.FieldGTE(FieldFeatureflags, v)) +} + +// FeatureflagsLT applies the LT predicate on the "featureflags" field. +func FeatureflagsLT(v string) predicate.Bouncer { + return predicate.Bouncer(sql.FieldLT(FieldFeatureflags, v)) +} + +// FeatureflagsLTE applies the LTE predicate on the "featureflags" field. +func FeatureflagsLTE(v string) predicate.Bouncer { + return predicate.Bouncer(sql.FieldLTE(FieldFeatureflags, v)) +} + +// FeatureflagsContains applies the Contains predicate on the "featureflags" field. +func FeatureflagsContains(v string) predicate.Bouncer { + return predicate.Bouncer(sql.FieldContains(FieldFeatureflags, v)) +} + +// FeatureflagsHasPrefix applies the HasPrefix predicate on the "featureflags" field. +func FeatureflagsHasPrefix(v string) predicate.Bouncer { + return predicate.Bouncer(sql.FieldHasPrefix(FieldFeatureflags, v)) +} + +// FeatureflagsHasSuffix applies the HasSuffix predicate on the "featureflags" field. +func FeatureflagsHasSuffix(v string) predicate.Bouncer { + return predicate.Bouncer(sql.FieldHasSuffix(FieldFeatureflags, v)) +} + +// FeatureflagsIsNil applies the IsNil predicate on the "featureflags" field. +func FeatureflagsIsNil() predicate.Bouncer { + return predicate.Bouncer(sql.FieldIsNull(FieldFeatureflags)) +} + +// FeatureflagsNotNil applies the NotNil predicate on the "featureflags" field. +func FeatureflagsNotNil() predicate.Bouncer { + return predicate.Bouncer(sql.FieldNotNull(FieldFeatureflags)) +} + +// FeatureflagsEqualFold applies the EqualFold predicate on the "featureflags" field. +func FeatureflagsEqualFold(v string) predicate.Bouncer { + return predicate.Bouncer(sql.FieldEqualFold(FieldFeatureflags, v)) +} + +// FeatureflagsContainsFold applies the ContainsFold predicate on the "featureflags" field. +func FeatureflagsContainsFold(v string) predicate.Bouncer { + return predicate.Bouncer(sql.FieldContainsFold(FieldFeatureflags, v)) +} + // And groups predicates with the AND operator between them. func And(predicates ...predicate.Bouncer) predicate.Bouncer { return predicate.Bouncer(sql.AndPredicates(predicates...)) diff --git a/pkg/database/ent/bouncer_create.go b/pkg/database/ent/bouncer_create.go index f2dfc767872..29b23f87cf1 100644 --- a/pkg/database/ent/bouncer_create.go +++ b/pkg/database/ent/bouncer_create.go @@ -136,6 +136,48 @@ func (bc *BouncerCreate) SetNillableAuthType(s *string) *BouncerCreate { return bc } +// SetOsname sets the "osname" field. +func (bc *BouncerCreate) SetOsname(s string) *BouncerCreate { + bc.mutation.SetOsname(s) + return bc +} + +// SetNillableOsname sets the "osname" field if the given value is not nil. +func (bc *BouncerCreate) SetNillableOsname(s *string) *BouncerCreate { + if s != nil { + bc.SetOsname(*s) + } + return bc +} + +// SetOsversion sets the "osversion" field. +func (bc *BouncerCreate) SetOsversion(s string) *BouncerCreate { + bc.mutation.SetOsversion(s) + return bc +} + +// SetNillableOsversion sets the "osversion" field if the given value is not nil. +func (bc *BouncerCreate) SetNillableOsversion(s *string) *BouncerCreate { + if s != nil { + bc.SetOsversion(*s) + } + return bc +} + +// SetFeatureflags sets the "featureflags" field. +func (bc *BouncerCreate) SetFeatureflags(s string) *BouncerCreate { + bc.mutation.SetFeatureflags(s) + return bc +} + +// SetNillableFeatureflags sets the "featureflags" field if the given value is not nil. +func (bc *BouncerCreate) SetNillableFeatureflags(s *string) *BouncerCreate { + if s != nil { + bc.SetFeatureflags(*s) + } + return bc +} + // Mutation returns the BouncerMutation object of the builder. func (bc *BouncerCreate) Mutation() *BouncerMutation { return bc.mutation @@ -275,6 +317,18 @@ func (bc *BouncerCreate) createSpec() (*Bouncer, *sqlgraph.CreateSpec) { _spec.SetField(bouncer.FieldAuthType, field.TypeString, value) _node.AuthType = value } + if value, ok := bc.mutation.Osname(); ok { + _spec.SetField(bouncer.FieldOsname, field.TypeString, value) + _node.Osname = value + } + if value, ok := bc.mutation.Osversion(); ok { + _spec.SetField(bouncer.FieldOsversion, field.TypeString, value) + _node.Osversion = value + } + if value, ok := bc.mutation.Featureflags(); ok { + _spec.SetField(bouncer.FieldFeatureflags, field.TypeString, value) + _node.Featureflags = value + } return _node, _spec } diff --git a/pkg/database/ent/bouncer_update.go b/pkg/database/ent/bouncer_update.go index 31dd0bd708e..620b006a49a 100644 --- a/pkg/database/ent/bouncer_update.go +++ b/pkg/database/ent/bouncer_update.go @@ -156,6 +156,66 @@ func (bu *BouncerUpdate) SetNillableAuthType(s *string) *BouncerUpdate { return bu } +// SetOsname sets the "osname" field. +func (bu *BouncerUpdate) SetOsname(s string) *BouncerUpdate { + bu.mutation.SetOsname(s) + return bu +} + +// SetNillableOsname sets the "osname" field if the given value is not nil. +func (bu *BouncerUpdate) SetNillableOsname(s *string) *BouncerUpdate { + if s != nil { + bu.SetOsname(*s) + } + return bu +} + +// ClearOsname clears the value of the "osname" field. +func (bu *BouncerUpdate) ClearOsname() *BouncerUpdate { + bu.mutation.ClearOsname() + return bu +} + +// SetOsversion sets the "osversion" field. +func (bu *BouncerUpdate) SetOsversion(s string) *BouncerUpdate { + bu.mutation.SetOsversion(s) + return bu +} + +// SetNillableOsversion sets the "osversion" field if the given value is not nil. +func (bu *BouncerUpdate) SetNillableOsversion(s *string) *BouncerUpdate { + if s != nil { + bu.SetOsversion(*s) + } + return bu +} + +// ClearOsversion clears the value of the "osversion" field. +func (bu *BouncerUpdate) ClearOsversion() *BouncerUpdate { + bu.mutation.ClearOsversion() + return bu +} + +// SetFeatureflags sets the "featureflags" field. +func (bu *BouncerUpdate) SetFeatureflags(s string) *BouncerUpdate { + bu.mutation.SetFeatureflags(s) + return bu +} + +// SetNillableFeatureflags sets the "featureflags" field if the given value is not nil. +func (bu *BouncerUpdate) SetNillableFeatureflags(s *string) *BouncerUpdate { + if s != nil { + bu.SetFeatureflags(*s) + } + return bu +} + +// ClearFeatureflags clears the value of the "featureflags" field. +func (bu *BouncerUpdate) ClearFeatureflags() *BouncerUpdate { + bu.mutation.ClearFeatureflags() + return bu +} + // Mutation returns the BouncerMutation object of the builder. func (bu *BouncerUpdate) Mutation() *BouncerMutation { return bu.mutation @@ -242,6 +302,24 @@ func (bu *BouncerUpdate) sqlSave(ctx context.Context) (n int, err error) { if value, ok := bu.mutation.AuthType(); ok { _spec.SetField(bouncer.FieldAuthType, field.TypeString, value) } + if value, ok := bu.mutation.Osname(); ok { + _spec.SetField(bouncer.FieldOsname, field.TypeString, value) + } + if bu.mutation.OsnameCleared() { + _spec.ClearField(bouncer.FieldOsname, field.TypeString) + } + if value, ok := bu.mutation.Osversion(); ok { + _spec.SetField(bouncer.FieldOsversion, field.TypeString, value) + } + if bu.mutation.OsversionCleared() { + _spec.ClearField(bouncer.FieldOsversion, field.TypeString) + } + if value, ok := bu.mutation.Featureflags(); ok { + _spec.SetField(bouncer.FieldFeatureflags, field.TypeString, value) + } + if bu.mutation.FeatureflagsCleared() { + _spec.ClearField(bouncer.FieldFeatureflags, field.TypeString) + } if n, err = sqlgraph.UpdateNodes(ctx, bu.driver, _spec); err != nil { if _, ok := err.(*sqlgraph.NotFoundError); ok { err = &NotFoundError{bouncer.Label} @@ -390,6 +468,66 @@ func (buo *BouncerUpdateOne) SetNillableAuthType(s *string) *BouncerUpdateOne { return buo } +// SetOsname sets the "osname" field. +func (buo *BouncerUpdateOne) SetOsname(s string) *BouncerUpdateOne { + buo.mutation.SetOsname(s) + return buo +} + +// SetNillableOsname sets the "osname" field if the given value is not nil. +func (buo *BouncerUpdateOne) SetNillableOsname(s *string) *BouncerUpdateOne { + if s != nil { + buo.SetOsname(*s) + } + return buo +} + +// ClearOsname clears the value of the "osname" field. +func (buo *BouncerUpdateOne) ClearOsname() *BouncerUpdateOne { + buo.mutation.ClearOsname() + return buo +} + +// SetOsversion sets the "osversion" field. +func (buo *BouncerUpdateOne) SetOsversion(s string) *BouncerUpdateOne { + buo.mutation.SetOsversion(s) + return buo +} + +// SetNillableOsversion sets the "osversion" field if the given value is not nil. +func (buo *BouncerUpdateOne) SetNillableOsversion(s *string) *BouncerUpdateOne { + if s != nil { + buo.SetOsversion(*s) + } + return buo +} + +// ClearOsversion clears the value of the "osversion" field. +func (buo *BouncerUpdateOne) ClearOsversion() *BouncerUpdateOne { + buo.mutation.ClearOsversion() + return buo +} + +// SetFeatureflags sets the "featureflags" field. +func (buo *BouncerUpdateOne) SetFeatureflags(s string) *BouncerUpdateOne { + buo.mutation.SetFeatureflags(s) + return buo +} + +// SetNillableFeatureflags sets the "featureflags" field if the given value is not nil. +func (buo *BouncerUpdateOne) SetNillableFeatureflags(s *string) *BouncerUpdateOne { + if s != nil { + buo.SetFeatureflags(*s) + } + return buo +} + +// ClearFeatureflags clears the value of the "featureflags" field. +func (buo *BouncerUpdateOne) ClearFeatureflags() *BouncerUpdateOne { + buo.mutation.ClearFeatureflags() + return buo +} + // Mutation returns the BouncerMutation object of the builder. func (buo *BouncerUpdateOne) Mutation() *BouncerMutation { return buo.mutation @@ -506,6 +644,24 @@ func (buo *BouncerUpdateOne) sqlSave(ctx context.Context) (_node *Bouncer, err e if value, ok := buo.mutation.AuthType(); ok { _spec.SetField(bouncer.FieldAuthType, field.TypeString, value) } + if value, ok := buo.mutation.Osname(); ok { + _spec.SetField(bouncer.FieldOsname, field.TypeString, value) + } + if buo.mutation.OsnameCleared() { + _spec.ClearField(bouncer.FieldOsname, field.TypeString) + } + if value, ok := buo.mutation.Osversion(); ok { + _spec.SetField(bouncer.FieldOsversion, field.TypeString, value) + } + if buo.mutation.OsversionCleared() { + _spec.ClearField(bouncer.FieldOsversion, field.TypeString) + } + if value, ok := buo.mutation.Featureflags(); ok { + _spec.SetField(bouncer.FieldFeatureflags, field.TypeString, value) + } + if buo.mutation.FeatureflagsCleared() { + _spec.ClearField(bouncer.FieldFeatureflags, field.TypeString) + } _node = &Bouncer{config: buo.config} _spec.Assign = _node.assignValues _spec.ScanValues = _node.scanValues diff --git a/pkg/database/ent/client.go b/pkg/database/ent/client.go index 5318109ed42..59686102ebe 100644 --- a/pkg/database/ent/client.go +++ b/pkg/database/ent/client.go @@ -23,6 +23,7 @@ import ( "github.com/crowdsecurity/crowdsec/pkg/database/ent/lock" "github.com/crowdsecurity/crowdsec/pkg/database/ent/machine" "github.com/crowdsecurity/crowdsec/pkg/database/ent/meta" + "github.com/crowdsecurity/crowdsec/pkg/database/ent/metric" ) // Client is the client that holds all ent builders. @@ -46,6 +47,8 @@ type Client struct { Machine *MachineClient // Meta is the client for interacting with the Meta builders. Meta *MetaClient + // Metric is the client for interacting with the Metric builders. + Metric *MetricClient } // NewClient creates a new client configured with the given options. @@ -65,6 +68,7 @@ func (c *Client) init() { c.Lock = NewLockClient(c.config) c.Machine = NewMachineClient(c.config) c.Meta = NewMetaClient(c.config) + c.Metric = NewMetricClient(c.config) } type ( @@ -165,6 +169,7 @@ func (c *Client) Tx(ctx context.Context) (*Tx, error) { Lock: NewLockClient(cfg), Machine: NewMachineClient(cfg), Meta: NewMetaClient(cfg), + Metric: NewMetricClient(cfg), }, nil } @@ -192,6 +197,7 @@ func (c *Client) BeginTx(ctx context.Context, opts *sql.TxOptions) (*Tx, error) Lock: NewLockClient(cfg), Machine: NewMachineClient(cfg), Meta: NewMetaClient(cfg), + Metric: NewMetricClient(cfg), }, nil } @@ -222,7 +228,7 @@ func (c *Client) Close() error { func (c *Client) Use(hooks ...Hook) { for _, n := range []interface{ Use(...Hook) }{ c.Alert, c.Bouncer, c.ConfigItem, c.Decision, c.Event, c.Lock, c.Machine, - c.Meta, + c.Meta, c.Metric, } { n.Use(hooks...) } @@ -233,7 +239,7 @@ func (c *Client) Use(hooks ...Hook) { func (c *Client) Intercept(interceptors ...Interceptor) { for _, n := range []interface{ Intercept(...Interceptor) }{ c.Alert, c.Bouncer, c.ConfigItem, c.Decision, c.Event, c.Lock, c.Machine, - c.Meta, + c.Meta, c.Metric, } { n.Intercept(interceptors...) } @@ -258,6 +264,8 @@ func (c *Client) Mutate(ctx context.Context, m Mutation) (Value, error) { return c.Machine.mutate(ctx, m) case *MetaMutation: return c.Meta.mutate(ctx, m) + case *MetricMutation: + return c.Metric.mutate(ctx, m) default: return nil, fmt.Errorf("ent: unknown mutation type %T", m) } @@ -1455,13 +1463,147 @@ func (c *MetaClient) mutate(ctx context.Context, m *MetaMutation) (Value, error) } } +// MetricClient is a client for the Metric schema. +type MetricClient struct { + config +} + +// NewMetricClient returns a client for the Metric from the given config. +func NewMetricClient(c config) *MetricClient { + return &MetricClient{config: c} +} + +// Use adds a list of mutation hooks to the hooks stack. +// A call to `Use(f, g, h)` equals to `metric.Hooks(f(g(h())))`. +func (c *MetricClient) Use(hooks ...Hook) { + c.hooks.Metric = append(c.hooks.Metric, hooks...) +} + +// Intercept adds a list of query interceptors to the interceptors stack. +// A call to `Intercept(f, g, h)` equals to `metric.Intercept(f(g(h())))`. +func (c *MetricClient) Intercept(interceptors ...Interceptor) { + c.inters.Metric = append(c.inters.Metric, interceptors...) +} + +// Create returns a builder for creating a Metric entity. +func (c *MetricClient) Create() *MetricCreate { + mutation := newMetricMutation(c.config, OpCreate) + return &MetricCreate{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// CreateBulk returns a builder for creating a bulk of Metric entities. +func (c *MetricClient) CreateBulk(builders ...*MetricCreate) *MetricCreateBulk { + return &MetricCreateBulk{config: c.config, builders: builders} +} + +// MapCreateBulk creates a bulk creation builder from the given slice. For each item in the slice, the function creates +// a builder and applies setFunc on it. +func (c *MetricClient) MapCreateBulk(slice any, setFunc func(*MetricCreate, int)) *MetricCreateBulk { + rv := reflect.ValueOf(slice) + if rv.Kind() != reflect.Slice { + return &MetricCreateBulk{err: fmt.Errorf("calling to MetricClient.MapCreateBulk with wrong type %T, need slice", slice)} + } + builders := make([]*MetricCreate, rv.Len()) + for i := 0; i < rv.Len(); i++ { + builders[i] = c.Create() + setFunc(builders[i], i) + } + return &MetricCreateBulk{config: c.config, builders: builders} +} + +// Update returns an update builder for Metric. +func (c *MetricClient) Update() *MetricUpdate { + mutation := newMetricMutation(c.config, OpUpdate) + return &MetricUpdate{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// UpdateOne returns an update builder for the given entity. +func (c *MetricClient) UpdateOne(m *Metric) *MetricUpdateOne { + mutation := newMetricMutation(c.config, OpUpdateOne, withMetric(m)) + return &MetricUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// UpdateOneID returns an update builder for the given id. +func (c *MetricClient) UpdateOneID(id int) *MetricUpdateOne { + mutation := newMetricMutation(c.config, OpUpdateOne, withMetricID(id)) + return &MetricUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// Delete returns a delete builder for Metric. +func (c *MetricClient) Delete() *MetricDelete { + mutation := newMetricMutation(c.config, OpDelete) + return &MetricDelete{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// DeleteOne returns a builder for deleting the given entity. +func (c *MetricClient) DeleteOne(m *Metric) *MetricDeleteOne { + return c.DeleteOneID(m.ID) +} + +// DeleteOneID returns a builder for deleting the given entity by its id. +func (c *MetricClient) DeleteOneID(id int) *MetricDeleteOne { + builder := c.Delete().Where(metric.ID(id)) + builder.mutation.id = &id + builder.mutation.op = OpDeleteOne + return &MetricDeleteOne{builder} +} + +// Query returns a query builder for Metric. +func (c *MetricClient) Query() *MetricQuery { + return &MetricQuery{ + config: c.config, + ctx: &QueryContext{Type: TypeMetric}, + inters: c.Interceptors(), + } +} + +// Get returns a Metric entity by its id. +func (c *MetricClient) Get(ctx context.Context, id int) (*Metric, error) { + return c.Query().Where(metric.ID(id)).Only(ctx) +} + +// GetX is like Get, but panics if an error occurs. +func (c *MetricClient) GetX(ctx context.Context, id int) *Metric { + obj, err := c.Get(ctx, id) + if err != nil { + panic(err) + } + return obj +} + +// Hooks returns the client hooks. +func (c *MetricClient) Hooks() []Hook { + return c.hooks.Metric +} + +// Interceptors returns the client interceptors. +func (c *MetricClient) Interceptors() []Interceptor { + return c.inters.Metric +} + +func (c *MetricClient) mutate(ctx context.Context, m *MetricMutation) (Value, error) { + switch m.Op() { + case OpCreate: + return (&MetricCreate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpUpdate: + return (&MetricUpdate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpUpdateOne: + return (&MetricUpdateOne{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpDelete, OpDeleteOne: + return (&MetricDelete{config: c.config, hooks: c.Hooks(), mutation: m}).Exec(ctx) + default: + return nil, fmt.Errorf("ent: unknown Metric mutation op: %q", m.Op()) + } +} + // hooks and interceptors per client, for fast access. type ( hooks struct { - Alert, Bouncer, ConfigItem, Decision, Event, Lock, Machine, Meta []ent.Hook + Alert, Bouncer, ConfigItem, Decision, Event, Lock, Machine, Meta, + Metric []ent.Hook } inters struct { - Alert, Bouncer, ConfigItem, Decision, Event, Lock, Machine, - Meta []ent.Interceptor + Alert, Bouncer, ConfigItem, Decision, Event, Lock, Machine, Meta, + Metric []ent.Interceptor } ) diff --git a/pkg/database/ent/ent.go b/pkg/database/ent/ent.go index cb98ee9301c..2a5ad188197 100644 --- a/pkg/database/ent/ent.go +++ b/pkg/database/ent/ent.go @@ -20,6 +20,7 @@ import ( "github.com/crowdsecurity/crowdsec/pkg/database/ent/lock" "github.com/crowdsecurity/crowdsec/pkg/database/ent/machine" "github.com/crowdsecurity/crowdsec/pkg/database/ent/meta" + "github.com/crowdsecurity/crowdsec/pkg/database/ent/metric" ) // ent aliases to avoid import conflicts in user's code. @@ -88,6 +89,7 @@ func checkColumn(table, column string) error { lock.Table: lock.ValidColumn, machine.Table: machine.ValidColumn, meta.Table: meta.ValidColumn, + metric.Table: metric.ValidColumn, }) }) return columnCheck(table, column) diff --git a/pkg/database/ent/hook/hook.go b/pkg/database/ent/hook/hook.go index fdc31539679..62cc07820d0 100644 --- a/pkg/database/ent/hook/hook.go +++ b/pkg/database/ent/hook/hook.go @@ -105,6 +105,18 @@ func (f MetaFunc) Mutate(ctx context.Context, m ent.Mutation) (ent.Value, error) return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.MetaMutation", m) } +// The MetricFunc type is an adapter to allow the use of ordinary +// function as Metric mutator. +type MetricFunc func(context.Context, *ent.MetricMutation) (ent.Value, error) + +// Mutate calls f(ctx, m). +func (f MetricFunc) Mutate(ctx context.Context, m ent.Mutation) (ent.Value, error) { + if mv, ok := m.(*ent.MetricMutation); ok { + return f(ctx, mv) + } + return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.MetricMutation", m) +} + // Condition is a hook condition function. type Condition func(context.Context, ent.Mutation) bool diff --git a/pkg/database/ent/machine.go b/pkg/database/ent/machine.go index 475eab72ecd..fddb2e6a8b3 100644 --- a/pkg/database/ent/machine.go +++ b/pkg/database/ent/machine.go @@ -3,6 +3,7 @@ package ent import ( + "encoding/json" "fmt" "strings" "time" @@ -10,6 +11,7 @@ import ( "entgo.io/ent" "entgo.io/ent/dialect/sql" "github.com/crowdsecurity/crowdsec/pkg/database/ent/machine" + "github.com/crowdsecurity/crowdsec/pkg/database/ent/schema" ) // Machine is the model entity for the Machine schema. @@ -41,6 +43,16 @@ type Machine struct { Status string `json:"status,omitempty"` // AuthType holds the value of the "auth_type" field. AuthType string `json:"auth_type"` + // Osname holds the value of the "osname" field. + Osname string `json:"osname,omitempty"` + // Osversion holds the value of the "osversion" field. + Osversion string `json:"osversion,omitempty"` + // Featureflags holds the value of the "featureflags" field. + Featureflags string `json:"featureflags,omitempty"` + // Hubstate holds the value of the "hubstate" field. + Hubstate map[string]schema.ItemState `json:"hubstate,omitempty"` + // Datasources holds the value of the "datasources" field. + Datasources map[string]int64 `json:"datasources,omitempty"` // Edges holds the relations/edges for other nodes in the graph. // The values are being populated by the MachineQuery when eager-loading is set. Edges MachineEdges `json:"edges"` @@ -70,11 +82,13 @@ func (*Machine) scanValues(columns []string) ([]any, error) { values := make([]any, len(columns)) for i := range columns { switch columns[i] { + case machine.FieldHubstate, machine.FieldDatasources: + values[i] = new([]byte) case machine.FieldIsValidated: values[i] = new(sql.NullBool) case machine.FieldID: values[i] = new(sql.NullInt64) - case machine.FieldMachineId, machine.FieldPassword, machine.FieldIpAddress, machine.FieldScenarios, machine.FieldVersion, machine.FieldStatus, machine.FieldAuthType: + case machine.FieldMachineId, machine.FieldPassword, machine.FieldIpAddress, machine.FieldScenarios, machine.FieldVersion, machine.FieldStatus, machine.FieldAuthType, machine.FieldOsname, machine.FieldOsversion, machine.FieldFeatureflags: values[i] = new(sql.NullString) case machine.FieldCreatedAt, machine.FieldUpdatedAt, machine.FieldLastPush, machine.FieldLastHeartbeat: values[i] = new(sql.NullTime) @@ -173,6 +187,40 @@ func (m *Machine) assignValues(columns []string, values []any) error { } else if value.Valid { m.AuthType = value.String } + case machine.FieldOsname: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field osname", values[i]) + } else if value.Valid { + m.Osname = value.String + } + case machine.FieldOsversion: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field osversion", values[i]) + } else if value.Valid { + m.Osversion = value.String + } + case machine.FieldFeatureflags: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field featureflags", values[i]) + } else if value.Valid { + m.Featureflags = value.String + } + case machine.FieldHubstate: + if value, ok := values[i].(*[]byte); !ok { + return fmt.Errorf("unexpected type %T for field hubstate", values[i]) + } else if value != nil && len(*value) > 0 { + if err := json.Unmarshal(*value, &m.Hubstate); err != nil { + return fmt.Errorf("unmarshal field hubstate: %w", err) + } + } + case machine.FieldDatasources: + if value, ok := values[i].(*[]byte); !ok { + return fmt.Errorf("unexpected type %T for field datasources", values[i]) + } else if value != nil && len(*value) > 0 { + if err := json.Unmarshal(*value, &m.Datasources); err != nil { + return fmt.Errorf("unmarshal field datasources: %w", err) + } + } default: m.selectValues.Set(columns[i], values[i]) } @@ -252,6 +300,21 @@ func (m *Machine) String() string { builder.WriteString(", ") builder.WriteString("auth_type=") builder.WriteString(m.AuthType) + builder.WriteString(", ") + builder.WriteString("osname=") + builder.WriteString(m.Osname) + builder.WriteString(", ") + builder.WriteString("osversion=") + builder.WriteString(m.Osversion) + builder.WriteString(", ") + builder.WriteString("featureflags=") + builder.WriteString(m.Featureflags) + builder.WriteString(", ") + builder.WriteString("hubstate=") + builder.WriteString(fmt.Sprintf("%v", m.Hubstate)) + builder.WriteString(", ") + builder.WriteString("datasources=") + builder.WriteString(fmt.Sprintf("%v", m.Datasources)) builder.WriteByte(')') return builder.String() } diff --git a/pkg/database/ent/machine/machine.go b/pkg/database/ent/machine/machine.go index d7dece9f8ef..179059edd4d 100644 --- a/pkg/database/ent/machine/machine.go +++ b/pkg/database/ent/machine/machine.go @@ -38,6 +38,16 @@ const ( FieldStatus = "status" // FieldAuthType holds the string denoting the auth_type field in the database. FieldAuthType = "auth_type" + // FieldOsname holds the string denoting the osname field in the database. + FieldOsname = "osname" + // FieldOsversion holds the string denoting the osversion field in the database. + FieldOsversion = "osversion" + // FieldFeatureflags holds the string denoting the featureflags field in the database. + FieldFeatureflags = "featureflags" + // FieldHubstate holds the string denoting the hubstate field in the database. + FieldHubstate = "hubstate" + // FieldDatasources holds the string denoting the datasources field in the database. + FieldDatasources = "datasources" // EdgeAlerts holds the string denoting the alerts edge name in mutations. EdgeAlerts = "alerts" // Table holds the table name of the machine in the database. @@ -66,6 +76,11 @@ var Columns = []string{ FieldIsValidated, FieldStatus, FieldAuthType, + FieldOsname, + FieldOsversion, + FieldFeatureflags, + FieldHubstate, + FieldDatasources, } // ValidColumn reports if the column name is valid (part of the table columns). @@ -163,6 +178,21 @@ func ByAuthType(opts ...sql.OrderTermOption) OrderOption { return sql.OrderByField(FieldAuthType, opts...).ToFunc() } +// ByOsname orders the results by the osname field. +func ByOsname(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldOsname, opts...).ToFunc() +} + +// ByOsversion orders the results by the osversion field. +func ByOsversion(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldOsversion, opts...).ToFunc() +} + +// ByFeatureflags orders the results by the featureflags field. +func ByFeatureflags(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldFeatureflags, opts...).ToFunc() +} + // ByAlertsCount orders the results by alerts count. func ByAlertsCount(opts ...sql.OrderTermOption) OrderOption { return func(s *sql.Selector) { diff --git a/pkg/database/ent/machine/where.go b/pkg/database/ent/machine/where.go index 24c9ab154c8..aca66135f5c 100644 --- a/pkg/database/ent/machine/where.go +++ b/pkg/database/ent/machine/where.go @@ -115,6 +115,21 @@ func AuthType(v string) predicate.Machine { return predicate.Machine(sql.FieldEQ(FieldAuthType, v)) } +// Osname applies equality check predicate on the "osname" field. It's identical to OsnameEQ. +func Osname(v string) predicate.Machine { + return predicate.Machine(sql.FieldEQ(FieldOsname, v)) +} + +// Osversion applies equality check predicate on the "osversion" field. It's identical to OsversionEQ. +func Osversion(v string) predicate.Machine { + return predicate.Machine(sql.FieldEQ(FieldOsversion, v)) +} + +// Featureflags applies equality check predicate on the "featureflags" field. It's identical to FeatureflagsEQ. +func Featureflags(v string) predicate.Machine { + return predicate.Machine(sql.FieldEQ(FieldFeatureflags, v)) +} + // CreatedAtEQ applies the EQ predicate on the "created_at" field. func CreatedAtEQ(v time.Time) predicate.Machine { return predicate.Machine(sql.FieldEQ(FieldCreatedAt, v)) @@ -790,6 +805,251 @@ func AuthTypeContainsFold(v string) predicate.Machine { return predicate.Machine(sql.FieldContainsFold(FieldAuthType, v)) } +// OsnameEQ applies the EQ predicate on the "osname" field. +func OsnameEQ(v string) predicate.Machine { + return predicate.Machine(sql.FieldEQ(FieldOsname, v)) +} + +// OsnameNEQ applies the NEQ predicate on the "osname" field. +func OsnameNEQ(v string) predicate.Machine { + return predicate.Machine(sql.FieldNEQ(FieldOsname, v)) +} + +// OsnameIn applies the In predicate on the "osname" field. +func OsnameIn(vs ...string) predicate.Machine { + return predicate.Machine(sql.FieldIn(FieldOsname, vs...)) +} + +// OsnameNotIn applies the NotIn predicate on the "osname" field. +func OsnameNotIn(vs ...string) predicate.Machine { + return predicate.Machine(sql.FieldNotIn(FieldOsname, vs...)) +} + +// OsnameGT applies the GT predicate on the "osname" field. +func OsnameGT(v string) predicate.Machine { + return predicate.Machine(sql.FieldGT(FieldOsname, v)) +} + +// OsnameGTE applies the GTE predicate on the "osname" field. +func OsnameGTE(v string) predicate.Machine { + return predicate.Machine(sql.FieldGTE(FieldOsname, v)) +} + +// OsnameLT applies the LT predicate on the "osname" field. +func OsnameLT(v string) predicate.Machine { + return predicate.Machine(sql.FieldLT(FieldOsname, v)) +} + +// OsnameLTE applies the LTE predicate on the "osname" field. +func OsnameLTE(v string) predicate.Machine { + return predicate.Machine(sql.FieldLTE(FieldOsname, v)) +} + +// OsnameContains applies the Contains predicate on the "osname" field. +func OsnameContains(v string) predicate.Machine { + return predicate.Machine(sql.FieldContains(FieldOsname, v)) +} + +// OsnameHasPrefix applies the HasPrefix predicate on the "osname" field. +func OsnameHasPrefix(v string) predicate.Machine { + return predicate.Machine(sql.FieldHasPrefix(FieldOsname, v)) +} + +// OsnameHasSuffix applies the HasSuffix predicate on the "osname" field. +func OsnameHasSuffix(v string) predicate.Machine { + return predicate.Machine(sql.FieldHasSuffix(FieldOsname, v)) +} + +// OsnameIsNil applies the IsNil predicate on the "osname" field. +func OsnameIsNil() predicate.Machine { + return predicate.Machine(sql.FieldIsNull(FieldOsname)) +} + +// OsnameNotNil applies the NotNil predicate on the "osname" field. +func OsnameNotNil() predicate.Machine { + return predicate.Machine(sql.FieldNotNull(FieldOsname)) +} + +// OsnameEqualFold applies the EqualFold predicate on the "osname" field. +func OsnameEqualFold(v string) predicate.Machine { + return predicate.Machine(sql.FieldEqualFold(FieldOsname, v)) +} + +// OsnameContainsFold applies the ContainsFold predicate on the "osname" field. +func OsnameContainsFold(v string) predicate.Machine { + return predicate.Machine(sql.FieldContainsFold(FieldOsname, v)) +} + +// OsversionEQ applies the EQ predicate on the "osversion" field. +func OsversionEQ(v string) predicate.Machine { + return predicate.Machine(sql.FieldEQ(FieldOsversion, v)) +} + +// OsversionNEQ applies the NEQ predicate on the "osversion" field. +func OsversionNEQ(v string) predicate.Machine { + return predicate.Machine(sql.FieldNEQ(FieldOsversion, v)) +} + +// OsversionIn applies the In predicate on the "osversion" field. +func OsversionIn(vs ...string) predicate.Machine { + return predicate.Machine(sql.FieldIn(FieldOsversion, vs...)) +} + +// OsversionNotIn applies the NotIn predicate on the "osversion" field. +func OsversionNotIn(vs ...string) predicate.Machine { + return predicate.Machine(sql.FieldNotIn(FieldOsversion, vs...)) +} + +// OsversionGT applies the GT predicate on the "osversion" field. +func OsversionGT(v string) predicate.Machine { + return predicate.Machine(sql.FieldGT(FieldOsversion, v)) +} + +// OsversionGTE applies the GTE predicate on the "osversion" field. +func OsversionGTE(v string) predicate.Machine { + return predicate.Machine(sql.FieldGTE(FieldOsversion, v)) +} + +// OsversionLT applies the LT predicate on the "osversion" field. +func OsversionLT(v string) predicate.Machine { + return predicate.Machine(sql.FieldLT(FieldOsversion, v)) +} + +// OsversionLTE applies the LTE predicate on the "osversion" field. +func OsversionLTE(v string) predicate.Machine { + return predicate.Machine(sql.FieldLTE(FieldOsversion, v)) +} + +// OsversionContains applies the Contains predicate on the "osversion" field. +func OsversionContains(v string) predicate.Machine { + return predicate.Machine(sql.FieldContains(FieldOsversion, v)) +} + +// OsversionHasPrefix applies the HasPrefix predicate on the "osversion" field. +func OsversionHasPrefix(v string) predicate.Machine { + return predicate.Machine(sql.FieldHasPrefix(FieldOsversion, v)) +} + +// OsversionHasSuffix applies the HasSuffix predicate on the "osversion" field. +func OsversionHasSuffix(v string) predicate.Machine { + return predicate.Machine(sql.FieldHasSuffix(FieldOsversion, v)) +} + +// OsversionIsNil applies the IsNil predicate on the "osversion" field. +func OsversionIsNil() predicate.Machine { + return predicate.Machine(sql.FieldIsNull(FieldOsversion)) +} + +// OsversionNotNil applies the NotNil predicate on the "osversion" field. +func OsversionNotNil() predicate.Machine { + return predicate.Machine(sql.FieldNotNull(FieldOsversion)) +} + +// OsversionEqualFold applies the EqualFold predicate on the "osversion" field. +func OsversionEqualFold(v string) predicate.Machine { + return predicate.Machine(sql.FieldEqualFold(FieldOsversion, v)) +} + +// OsversionContainsFold applies the ContainsFold predicate on the "osversion" field. +func OsversionContainsFold(v string) predicate.Machine { + return predicate.Machine(sql.FieldContainsFold(FieldOsversion, v)) +} + +// FeatureflagsEQ applies the EQ predicate on the "featureflags" field. +func FeatureflagsEQ(v string) predicate.Machine { + return predicate.Machine(sql.FieldEQ(FieldFeatureflags, v)) +} + +// FeatureflagsNEQ applies the NEQ predicate on the "featureflags" field. +func FeatureflagsNEQ(v string) predicate.Machine { + return predicate.Machine(sql.FieldNEQ(FieldFeatureflags, v)) +} + +// FeatureflagsIn applies the In predicate on the "featureflags" field. +func FeatureflagsIn(vs ...string) predicate.Machine { + return predicate.Machine(sql.FieldIn(FieldFeatureflags, vs...)) +} + +// FeatureflagsNotIn applies the NotIn predicate on the "featureflags" field. +func FeatureflagsNotIn(vs ...string) predicate.Machine { + return predicate.Machine(sql.FieldNotIn(FieldFeatureflags, vs...)) +} + +// FeatureflagsGT applies the GT predicate on the "featureflags" field. +func FeatureflagsGT(v string) predicate.Machine { + return predicate.Machine(sql.FieldGT(FieldFeatureflags, v)) +} + +// FeatureflagsGTE applies the GTE predicate on the "featureflags" field. +func FeatureflagsGTE(v string) predicate.Machine { + return predicate.Machine(sql.FieldGTE(FieldFeatureflags, v)) +} + +// FeatureflagsLT applies the LT predicate on the "featureflags" field. +func FeatureflagsLT(v string) predicate.Machine { + return predicate.Machine(sql.FieldLT(FieldFeatureflags, v)) +} + +// FeatureflagsLTE applies the LTE predicate on the "featureflags" field. +func FeatureflagsLTE(v string) predicate.Machine { + return predicate.Machine(sql.FieldLTE(FieldFeatureflags, v)) +} + +// FeatureflagsContains applies the Contains predicate on the "featureflags" field. +func FeatureflagsContains(v string) predicate.Machine { + return predicate.Machine(sql.FieldContains(FieldFeatureflags, v)) +} + +// FeatureflagsHasPrefix applies the HasPrefix predicate on the "featureflags" field. +func FeatureflagsHasPrefix(v string) predicate.Machine { + return predicate.Machine(sql.FieldHasPrefix(FieldFeatureflags, v)) +} + +// FeatureflagsHasSuffix applies the HasSuffix predicate on the "featureflags" field. +func FeatureflagsHasSuffix(v string) predicate.Machine { + return predicate.Machine(sql.FieldHasSuffix(FieldFeatureflags, v)) +} + +// FeatureflagsIsNil applies the IsNil predicate on the "featureflags" field. +func FeatureflagsIsNil() predicate.Machine { + return predicate.Machine(sql.FieldIsNull(FieldFeatureflags)) +} + +// FeatureflagsNotNil applies the NotNil predicate on the "featureflags" field. +func FeatureflagsNotNil() predicate.Machine { + return predicate.Machine(sql.FieldNotNull(FieldFeatureflags)) +} + +// FeatureflagsEqualFold applies the EqualFold predicate on the "featureflags" field. +func FeatureflagsEqualFold(v string) predicate.Machine { + return predicate.Machine(sql.FieldEqualFold(FieldFeatureflags, v)) +} + +// FeatureflagsContainsFold applies the ContainsFold predicate on the "featureflags" field. +func FeatureflagsContainsFold(v string) predicate.Machine { + return predicate.Machine(sql.FieldContainsFold(FieldFeatureflags, v)) +} + +// HubstateIsNil applies the IsNil predicate on the "hubstate" field. +func HubstateIsNil() predicate.Machine { + return predicate.Machine(sql.FieldIsNull(FieldHubstate)) +} + +// HubstateNotNil applies the NotNil predicate on the "hubstate" field. +func HubstateNotNil() predicate.Machine { + return predicate.Machine(sql.FieldNotNull(FieldHubstate)) +} + +// DatasourcesIsNil applies the IsNil predicate on the "datasources" field. +func DatasourcesIsNil() predicate.Machine { + return predicate.Machine(sql.FieldIsNull(FieldDatasources)) +} + +// DatasourcesNotNil applies the NotNil predicate on the "datasources" field. +func DatasourcesNotNil() predicate.Machine { + return predicate.Machine(sql.FieldNotNull(FieldDatasources)) +} + // HasAlerts applies the HasEdge predicate on the "alerts" edge. func HasAlerts() predicate.Machine { return predicate.Machine(func(s *sql.Selector) { diff --git a/pkg/database/ent/machine_create.go b/pkg/database/ent/machine_create.go index 2e4cf9f1500..4ae0e5a9d1f 100644 --- a/pkg/database/ent/machine_create.go +++ b/pkg/database/ent/machine_create.go @@ -12,6 +12,7 @@ import ( "entgo.io/ent/schema/field" "github.com/crowdsecurity/crowdsec/pkg/database/ent/alert" "github.com/crowdsecurity/crowdsec/pkg/database/ent/machine" + "github.com/crowdsecurity/crowdsec/pkg/database/ent/schema" ) // MachineCreate is the builder for creating a Machine entity. @@ -165,6 +166,60 @@ func (mc *MachineCreate) SetNillableAuthType(s *string) *MachineCreate { return mc } +// SetOsname sets the "osname" field. +func (mc *MachineCreate) SetOsname(s string) *MachineCreate { + mc.mutation.SetOsname(s) + return mc +} + +// SetNillableOsname sets the "osname" field if the given value is not nil. +func (mc *MachineCreate) SetNillableOsname(s *string) *MachineCreate { + if s != nil { + mc.SetOsname(*s) + } + return mc +} + +// SetOsversion sets the "osversion" field. +func (mc *MachineCreate) SetOsversion(s string) *MachineCreate { + mc.mutation.SetOsversion(s) + return mc +} + +// SetNillableOsversion sets the "osversion" field if the given value is not nil. +func (mc *MachineCreate) SetNillableOsversion(s *string) *MachineCreate { + if s != nil { + mc.SetOsversion(*s) + } + return mc +} + +// SetFeatureflags sets the "featureflags" field. +func (mc *MachineCreate) SetFeatureflags(s string) *MachineCreate { + mc.mutation.SetFeatureflags(s) + return mc +} + +// SetNillableFeatureflags sets the "featureflags" field if the given value is not nil. +func (mc *MachineCreate) SetNillableFeatureflags(s *string) *MachineCreate { + if s != nil { + mc.SetFeatureflags(*s) + } + return mc +} + +// SetHubstate sets the "hubstate" field. +func (mc *MachineCreate) SetHubstate(ms map[string]schema.ItemState) *MachineCreate { + mc.mutation.SetHubstate(ms) + return mc +} + +// SetDatasources sets the "datasources" field. +func (mc *MachineCreate) SetDatasources(m map[string]int64) *MachineCreate { + mc.mutation.SetDatasources(m) + return mc +} + // AddAlertIDs adds the "alerts" edge to the Alert entity by IDs. func (mc *MachineCreate) AddAlertIDs(ids ...int) *MachineCreate { mc.mutation.AddAlertIDs(ids...) @@ -339,6 +394,26 @@ func (mc *MachineCreate) createSpec() (*Machine, *sqlgraph.CreateSpec) { _spec.SetField(machine.FieldAuthType, field.TypeString, value) _node.AuthType = value } + if value, ok := mc.mutation.Osname(); ok { + _spec.SetField(machine.FieldOsname, field.TypeString, value) + _node.Osname = value + } + if value, ok := mc.mutation.Osversion(); ok { + _spec.SetField(machine.FieldOsversion, field.TypeString, value) + _node.Osversion = value + } + if value, ok := mc.mutation.Featureflags(); ok { + _spec.SetField(machine.FieldFeatureflags, field.TypeString, value) + _node.Featureflags = value + } + if value, ok := mc.mutation.Hubstate(); ok { + _spec.SetField(machine.FieldHubstate, field.TypeJSON, value) + _node.Hubstate = value + } + if value, ok := mc.mutation.Datasources(); ok { + _spec.SetField(machine.FieldDatasources, field.TypeJSON, value) + _node.Datasources = value + } if nodes := mc.mutation.AlertsIDs(); len(nodes) > 0 { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.O2M, diff --git a/pkg/database/ent/machine_update.go b/pkg/database/ent/machine_update.go index 5fbd15220f9..aa0f02542c1 100644 --- a/pkg/database/ent/machine_update.go +++ b/pkg/database/ent/machine_update.go @@ -14,6 +14,7 @@ import ( "github.com/crowdsecurity/crowdsec/pkg/database/ent/alert" "github.com/crowdsecurity/crowdsec/pkg/database/ent/machine" "github.com/crowdsecurity/crowdsec/pkg/database/ent/predicate" + "github.com/crowdsecurity/crowdsec/pkg/database/ent/schema" ) // MachineUpdate is the builder for updating Machine entities. @@ -191,6 +192,90 @@ func (mu *MachineUpdate) SetNillableAuthType(s *string) *MachineUpdate { return mu } +// SetOsname sets the "osname" field. +func (mu *MachineUpdate) SetOsname(s string) *MachineUpdate { + mu.mutation.SetOsname(s) + return mu +} + +// SetNillableOsname sets the "osname" field if the given value is not nil. +func (mu *MachineUpdate) SetNillableOsname(s *string) *MachineUpdate { + if s != nil { + mu.SetOsname(*s) + } + return mu +} + +// ClearOsname clears the value of the "osname" field. +func (mu *MachineUpdate) ClearOsname() *MachineUpdate { + mu.mutation.ClearOsname() + return mu +} + +// SetOsversion sets the "osversion" field. +func (mu *MachineUpdate) SetOsversion(s string) *MachineUpdate { + mu.mutation.SetOsversion(s) + return mu +} + +// SetNillableOsversion sets the "osversion" field if the given value is not nil. +func (mu *MachineUpdate) SetNillableOsversion(s *string) *MachineUpdate { + if s != nil { + mu.SetOsversion(*s) + } + return mu +} + +// ClearOsversion clears the value of the "osversion" field. +func (mu *MachineUpdate) ClearOsversion() *MachineUpdate { + mu.mutation.ClearOsversion() + return mu +} + +// SetFeatureflags sets the "featureflags" field. +func (mu *MachineUpdate) SetFeatureflags(s string) *MachineUpdate { + mu.mutation.SetFeatureflags(s) + return mu +} + +// SetNillableFeatureflags sets the "featureflags" field if the given value is not nil. +func (mu *MachineUpdate) SetNillableFeatureflags(s *string) *MachineUpdate { + if s != nil { + mu.SetFeatureflags(*s) + } + return mu +} + +// ClearFeatureflags clears the value of the "featureflags" field. +func (mu *MachineUpdate) ClearFeatureflags() *MachineUpdate { + mu.mutation.ClearFeatureflags() + return mu +} + +// SetHubstate sets the "hubstate" field. +func (mu *MachineUpdate) SetHubstate(ms map[string]schema.ItemState) *MachineUpdate { + mu.mutation.SetHubstate(ms) + return mu +} + +// ClearHubstate clears the value of the "hubstate" field. +func (mu *MachineUpdate) ClearHubstate() *MachineUpdate { + mu.mutation.ClearHubstate() + return mu +} + +// SetDatasources sets the "datasources" field. +func (mu *MachineUpdate) SetDatasources(m map[string]int64) *MachineUpdate { + mu.mutation.SetDatasources(m) + return mu +} + +// ClearDatasources clears the value of the "datasources" field. +func (mu *MachineUpdate) ClearDatasources() *MachineUpdate { + mu.mutation.ClearDatasources() + return mu +} + // AddAlertIDs adds the "alerts" edge to the Alert entity by IDs. func (mu *MachineUpdate) AddAlertIDs(ids ...int) *MachineUpdate { mu.mutation.AddAlertIDs(ids...) @@ -335,6 +420,36 @@ func (mu *MachineUpdate) sqlSave(ctx context.Context) (n int, err error) { if value, ok := mu.mutation.AuthType(); ok { _spec.SetField(machine.FieldAuthType, field.TypeString, value) } + if value, ok := mu.mutation.Osname(); ok { + _spec.SetField(machine.FieldOsname, field.TypeString, value) + } + if mu.mutation.OsnameCleared() { + _spec.ClearField(machine.FieldOsname, field.TypeString) + } + if value, ok := mu.mutation.Osversion(); ok { + _spec.SetField(machine.FieldOsversion, field.TypeString, value) + } + if mu.mutation.OsversionCleared() { + _spec.ClearField(machine.FieldOsversion, field.TypeString) + } + if value, ok := mu.mutation.Featureflags(); ok { + _spec.SetField(machine.FieldFeatureflags, field.TypeString, value) + } + if mu.mutation.FeatureflagsCleared() { + _spec.ClearField(machine.FieldFeatureflags, field.TypeString) + } + if value, ok := mu.mutation.Hubstate(); ok { + _spec.SetField(machine.FieldHubstate, field.TypeJSON, value) + } + if mu.mutation.HubstateCleared() { + _spec.ClearField(machine.FieldHubstate, field.TypeJSON) + } + if value, ok := mu.mutation.Datasources(); ok { + _spec.SetField(machine.FieldDatasources, field.TypeJSON, value) + } + if mu.mutation.DatasourcesCleared() { + _spec.ClearField(machine.FieldDatasources, field.TypeJSON) + } if mu.mutation.AlertsCleared() { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.O2M, @@ -562,6 +677,90 @@ func (muo *MachineUpdateOne) SetNillableAuthType(s *string) *MachineUpdateOne { return muo } +// SetOsname sets the "osname" field. +func (muo *MachineUpdateOne) SetOsname(s string) *MachineUpdateOne { + muo.mutation.SetOsname(s) + return muo +} + +// SetNillableOsname sets the "osname" field if the given value is not nil. +func (muo *MachineUpdateOne) SetNillableOsname(s *string) *MachineUpdateOne { + if s != nil { + muo.SetOsname(*s) + } + return muo +} + +// ClearOsname clears the value of the "osname" field. +func (muo *MachineUpdateOne) ClearOsname() *MachineUpdateOne { + muo.mutation.ClearOsname() + return muo +} + +// SetOsversion sets the "osversion" field. +func (muo *MachineUpdateOne) SetOsversion(s string) *MachineUpdateOne { + muo.mutation.SetOsversion(s) + return muo +} + +// SetNillableOsversion sets the "osversion" field if the given value is not nil. +func (muo *MachineUpdateOne) SetNillableOsversion(s *string) *MachineUpdateOne { + if s != nil { + muo.SetOsversion(*s) + } + return muo +} + +// ClearOsversion clears the value of the "osversion" field. +func (muo *MachineUpdateOne) ClearOsversion() *MachineUpdateOne { + muo.mutation.ClearOsversion() + return muo +} + +// SetFeatureflags sets the "featureflags" field. +func (muo *MachineUpdateOne) SetFeatureflags(s string) *MachineUpdateOne { + muo.mutation.SetFeatureflags(s) + return muo +} + +// SetNillableFeatureflags sets the "featureflags" field if the given value is not nil. +func (muo *MachineUpdateOne) SetNillableFeatureflags(s *string) *MachineUpdateOne { + if s != nil { + muo.SetFeatureflags(*s) + } + return muo +} + +// ClearFeatureflags clears the value of the "featureflags" field. +func (muo *MachineUpdateOne) ClearFeatureflags() *MachineUpdateOne { + muo.mutation.ClearFeatureflags() + return muo +} + +// SetHubstate sets the "hubstate" field. +func (muo *MachineUpdateOne) SetHubstate(ms map[string]schema.ItemState) *MachineUpdateOne { + muo.mutation.SetHubstate(ms) + return muo +} + +// ClearHubstate clears the value of the "hubstate" field. +func (muo *MachineUpdateOne) ClearHubstate() *MachineUpdateOne { + muo.mutation.ClearHubstate() + return muo +} + +// SetDatasources sets the "datasources" field. +func (muo *MachineUpdateOne) SetDatasources(m map[string]int64) *MachineUpdateOne { + muo.mutation.SetDatasources(m) + return muo +} + +// ClearDatasources clears the value of the "datasources" field. +func (muo *MachineUpdateOne) ClearDatasources() *MachineUpdateOne { + muo.mutation.ClearDatasources() + return muo +} + // AddAlertIDs adds the "alerts" edge to the Alert entity by IDs. func (muo *MachineUpdateOne) AddAlertIDs(ids ...int) *MachineUpdateOne { muo.mutation.AddAlertIDs(ids...) @@ -736,6 +935,36 @@ func (muo *MachineUpdateOne) sqlSave(ctx context.Context) (_node *Machine, err e if value, ok := muo.mutation.AuthType(); ok { _spec.SetField(machine.FieldAuthType, field.TypeString, value) } + if value, ok := muo.mutation.Osname(); ok { + _spec.SetField(machine.FieldOsname, field.TypeString, value) + } + if muo.mutation.OsnameCleared() { + _spec.ClearField(machine.FieldOsname, field.TypeString) + } + if value, ok := muo.mutation.Osversion(); ok { + _spec.SetField(machine.FieldOsversion, field.TypeString, value) + } + if muo.mutation.OsversionCleared() { + _spec.ClearField(machine.FieldOsversion, field.TypeString) + } + if value, ok := muo.mutation.Featureflags(); ok { + _spec.SetField(machine.FieldFeatureflags, field.TypeString, value) + } + if muo.mutation.FeatureflagsCleared() { + _spec.ClearField(machine.FieldFeatureflags, field.TypeString) + } + if value, ok := muo.mutation.Hubstate(); ok { + _spec.SetField(machine.FieldHubstate, field.TypeJSON, value) + } + if muo.mutation.HubstateCleared() { + _spec.ClearField(machine.FieldHubstate, field.TypeJSON) + } + if value, ok := muo.mutation.Datasources(); ok { + _spec.SetField(machine.FieldDatasources, field.TypeJSON, value) + } + if muo.mutation.DatasourcesCleared() { + _spec.ClearField(machine.FieldDatasources, field.TypeJSON) + } if muo.mutation.AlertsCleared() { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.O2M, diff --git a/pkg/database/ent/metric.go b/pkg/database/ent/metric.go new file mode 100644 index 00000000000..236d54da25d --- /dev/null +++ b/pkg/database/ent/metric.go @@ -0,0 +1,154 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "fmt" + "strings" + "time" + + "entgo.io/ent" + "entgo.io/ent/dialect/sql" + "github.com/crowdsecurity/crowdsec/pkg/database/ent/metric" +) + +// Metric is the model entity for the Metric schema. +type Metric struct { + config `json:"-"` + // ID of the ent. + ID int `json:"id,omitempty"` + // Type of the metrics source: LP=logprocessor, RC=remediation + GeneratedType metric.GeneratedType `json:"generated_type,omitempty"` + // Source of the metrics: machine id, bouncer name... + // It must come from the auth middleware. + GeneratedBy string `json:"generated_by,omitempty"` + // When the metrics are collected/calculated at the source + CollectedAt time.Time `json:"collected_at,omitempty"` + // When the metrics are sent to the console + PushedAt *time.Time `json:"pushed_at,omitempty"` + // The actual metrics (item0) + Payload string `json:"payload,omitempty"` + selectValues sql.SelectValues +} + +// scanValues returns the types for scanning values from sql.Rows. +func (*Metric) scanValues(columns []string) ([]any, error) { + values := make([]any, len(columns)) + for i := range columns { + switch columns[i] { + case metric.FieldID: + values[i] = new(sql.NullInt64) + case metric.FieldGeneratedType, metric.FieldGeneratedBy, metric.FieldPayload: + values[i] = new(sql.NullString) + case metric.FieldCollectedAt, metric.FieldPushedAt: + values[i] = new(sql.NullTime) + default: + values[i] = new(sql.UnknownType) + } + } + return values, nil +} + +// assignValues assigns the values that were returned from sql.Rows (after scanning) +// to the Metric fields. +func (m *Metric) assignValues(columns []string, values []any) error { + if m, n := len(values), len(columns); m < n { + return fmt.Errorf("mismatch number of scan values: %d != %d", m, n) + } + for i := range columns { + switch columns[i] { + case metric.FieldID: + value, ok := values[i].(*sql.NullInt64) + if !ok { + return fmt.Errorf("unexpected type %T for field id", value) + } + m.ID = int(value.Int64) + case metric.FieldGeneratedType: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field generated_type", values[i]) + } else if value.Valid { + m.GeneratedType = metric.GeneratedType(value.String) + } + case metric.FieldGeneratedBy: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field generated_by", values[i]) + } else if value.Valid { + m.GeneratedBy = value.String + } + case metric.FieldCollectedAt: + if value, ok := values[i].(*sql.NullTime); !ok { + return fmt.Errorf("unexpected type %T for field collected_at", values[i]) + } else if value.Valid { + m.CollectedAt = value.Time + } + case metric.FieldPushedAt: + if value, ok := values[i].(*sql.NullTime); !ok { + return fmt.Errorf("unexpected type %T for field pushed_at", values[i]) + } else if value.Valid { + m.PushedAt = new(time.Time) + *m.PushedAt = value.Time + } + case metric.FieldPayload: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field payload", values[i]) + } else if value.Valid { + m.Payload = value.String + } + default: + m.selectValues.Set(columns[i], values[i]) + } + } + return nil +} + +// Value returns the ent.Value that was dynamically selected and assigned to the Metric. +// This includes values selected through modifiers, order, etc. +func (m *Metric) Value(name string) (ent.Value, error) { + return m.selectValues.Get(name) +} + +// Update returns a builder for updating this Metric. +// Note that you need to call Metric.Unwrap() before calling this method if this Metric +// was returned from a transaction, and the transaction was committed or rolled back. +func (m *Metric) Update() *MetricUpdateOne { + return NewMetricClient(m.config).UpdateOne(m) +} + +// Unwrap unwraps the Metric entity that was returned from a transaction after it was closed, +// so that all future queries will be executed through the driver which created the transaction. +func (m *Metric) Unwrap() *Metric { + _tx, ok := m.config.driver.(*txDriver) + if !ok { + panic("ent: Metric is not a transactional entity") + } + m.config.driver = _tx.drv + return m +} + +// String implements the fmt.Stringer. +func (m *Metric) String() string { + var builder strings.Builder + builder.WriteString("Metric(") + builder.WriteString(fmt.Sprintf("id=%v, ", m.ID)) + builder.WriteString("generated_type=") + builder.WriteString(fmt.Sprintf("%v", m.GeneratedType)) + builder.WriteString(", ") + builder.WriteString("generated_by=") + builder.WriteString(m.GeneratedBy) + builder.WriteString(", ") + builder.WriteString("collected_at=") + builder.WriteString(m.CollectedAt.Format(time.ANSIC)) + builder.WriteString(", ") + if v := m.PushedAt; v != nil { + builder.WriteString("pushed_at=") + builder.WriteString(v.Format(time.ANSIC)) + } + builder.WriteString(", ") + builder.WriteString("payload=") + builder.WriteString(m.Payload) + builder.WriteByte(')') + return builder.String() +} + +// Metrics is a parsable slice of Metric. +type Metrics []*Metric diff --git a/pkg/database/ent/metric/metric.go b/pkg/database/ent/metric/metric.go new file mode 100644 index 00000000000..879f1006d64 --- /dev/null +++ b/pkg/database/ent/metric/metric.go @@ -0,0 +1,104 @@ +// Code generated by ent, DO NOT EDIT. + +package metric + +import ( + "fmt" + + "entgo.io/ent/dialect/sql" +) + +const ( + // Label holds the string label denoting the metric type in the database. + Label = "metric" + // FieldID holds the string denoting the id field in the database. + FieldID = "id" + // FieldGeneratedType holds the string denoting the generated_type field in the database. + FieldGeneratedType = "generated_type" + // FieldGeneratedBy holds the string denoting the generated_by field in the database. + FieldGeneratedBy = "generated_by" + // FieldCollectedAt holds the string denoting the collected_at field in the database. + FieldCollectedAt = "collected_at" + // FieldPushedAt holds the string denoting the pushed_at field in the database. + FieldPushedAt = "pushed_at" + // FieldPayload holds the string denoting the payload field in the database. + FieldPayload = "payload" + // Table holds the table name of the metric in the database. + Table = "metrics" +) + +// Columns holds all SQL columns for metric fields. +var Columns = []string{ + FieldID, + FieldGeneratedType, + FieldGeneratedBy, + FieldCollectedAt, + FieldPushedAt, + FieldPayload, +} + +// ValidColumn reports if the column name is valid (part of the table columns). +func ValidColumn(column string) bool { + for i := range Columns { + if column == Columns[i] { + return true + } + } + return false +} + +// GeneratedType defines the type for the "generated_type" enum field. +type GeneratedType string + +// GeneratedType values. +const ( + GeneratedTypeLP GeneratedType = "LP" + GeneratedTypeRC GeneratedType = "RC" +) + +func (gt GeneratedType) String() string { + return string(gt) +} + +// GeneratedTypeValidator is a validator for the "generated_type" field enum values. It is called by the builders before save. +func GeneratedTypeValidator(gt GeneratedType) error { + switch gt { + case GeneratedTypeLP, GeneratedTypeRC: + return nil + default: + return fmt.Errorf("metric: invalid enum value for generated_type field: %q", gt) + } +} + +// OrderOption defines the ordering options for the Metric queries. +type OrderOption func(*sql.Selector) + +// ByID orders the results by the id field. +func ByID(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldID, opts...).ToFunc() +} + +// ByGeneratedType orders the results by the generated_type field. +func ByGeneratedType(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldGeneratedType, opts...).ToFunc() +} + +// ByGeneratedBy orders the results by the generated_by field. +func ByGeneratedBy(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldGeneratedBy, opts...).ToFunc() +} + +// ByCollectedAt orders the results by the collected_at field. +func ByCollectedAt(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldCollectedAt, opts...).ToFunc() +} + +// ByPushedAt orders the results by the pushed_at field. +func ByPushedAt(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldPushedAt, opts...).ToFunc() +} + +// ByPayload orders the results by the payload field. +func ByPayload(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldPayload, opts...).ToFunc() +} diff --git a/pkg/database/ent/metric/where.go b/pkg/database/ent/metric/where.go new file mode 100644 index 00000000000..e49f80f3411 --- /dev/null +++ b/pkg/database/ent/metric/where.go @@ -0,0 +1,330 @@ +// Code generated by ent, DO NOT EDIT. + +package metric + +import ( + "time" + + "entgo.io/ent/dialect/sql" + "github.com/crowdsecurity/crowdsec/pkg/database/ent/predicate" +) + +// ID filters vertices based on their ID field. +func ID(id int) predicate.Metric { + return predicate.Metric(sql.FieldEQ(FieldID, id)) +} + +// IDEQ applies the EQ predicate on the ID field. +func IDEQ(id int) predicate.Metric { + return predicate.Metric(sql.FieldEQ(FieldID, id)) +} + +// IDNEQ applies the NEQ predicate on the ID field. +func IDNEQ(id int) predicate.Metric { + return predicate.Metric(sql.FieldNEQ(FieldID, id)) +} + +// IDIn applies the In predicate on the ID field. +func IDIn(ids ...int) predicate.Metric { + return predicate.Metric(sql.FieldIn(FieldID, ids...)) +} + +// IDNotIn applies the NotIn predicate on the ID field. +func IDNotIn(ids ...int) predicate.Metric { + return predicate.Metric(sql.FieldNotIn(FieldID, ids...)) +} + +// IDGT applies the GT predicate on the ID field. +func IDGT(id int) predicate.Metric { + return predicate.Metric(sql.FieldGT(FieldID, id)) +} + +// IDGTE applies the GTE predicate on the ID field. +func IDGTE(id int) predicate.Metric { + return predicate.Metric(sql.FieldGTE(FieldID, id)) +} + +// IDLT applies the LT predicate on the ID field. +func IDLT(id int) predicate.Metric { + return predicate.Metric(sql.FieldLT(FieldID, id)) +} + +// IDLTE applies the LTE predicate on the ID field. +func IDLTE(id int) predicate.Metric { + return predicate.Metric(sql.FieldLTE(FieldID, id)) +} + +// GeneratedBy applies equality check predicate on the "generated_by" field. It's identical to GeneratedByEQ. +func GeneratedBy(v string) predicate.Metric { + return predicate.Metric(sql.FieldEQ(FieldGeneratedBy, v)) +} + +// CollectedAt applies equality check predicate on the "collected_at" field. It's identical to CollectedAtEQ. +func CollectedAt(v time.Time) predicate.Metric { + return predicate.Metric(sql.FieldEQ(FieldCollectedAt, v)) +} + +// PushedAt applies equality check predicate on the "pushed_at" field. It's identical to PushedAtEQ. +func PushedAt(v time.Time) predicate.Metric { + return predicate.Metric(sql.FieldEQ(FieldPushedAt, v)) +} + +// Payload applies equality check predicate on the "payload" field. It's identical to PayloadEQ. +func Payload(v string) predicate.Metric { + return predicate.Metric(sql.FieldEQ(FieldPayload, v)) +} + +// GeneratedTypeEQ applies the EQ predicate on the "generated_type" field. +func GeneratedTypeEQ(v GeneratedType) predicate.Metric { + return predicate.Metric(sql.FieldEQ(FieldGeneratedType, v)) +} + +// GeneratedTypeNEQ applies the NEQ predicate on the "generated_type" field. +func GeneratedTypeNEQ(v GeneratedType) predicate.Metric { + return predicate.Metric(sql.FieldNEQ(FieldGeneratedType, v)) +} + +// GeneratedTypeIn applies the In predicate on the "generated_type" field. +func GeneratedTypeIn(vs ...GeneratedType) predicate.Metric { + return predicate.Metric(sql.FieldIn(FieldGeneratedType, vs...)) +} + +// GeneratedTypeNotIn applies the NotIn predicate on the "generated_type" field. +func GeneratedTypeNotIn(vs ...GeneratedType) predicate.Metric { + return predicate.Metric(sql.FieldNotIn(FieldGeneratedType, vs...)) +} + +// GeneratedByEQ applies the EQ predicate on the "generated_by" field. +func GeneratedByEQ(v string) predicate.Metric { + return predicate.Metric(sql.FieldEQ(FieldGeneratedBy, v)) +} + +// GeneratedByNEQ applies the NEQ predicate on the "generated_by" field. +func GeneratedByNEQ(v string) predicate.Metric { + return predicate.Metric(sql.FieldNEQ(FieldGeneratedBy, v)) +} + +// GeneratedByIn applies the In predicate on the "generated_by" field. +func GeneratedByIn(vs ...string) predicate.Metric { + return predicate.Metric(sql.FieldIn(FieldGeneratedBy, vs...)) +} + +// GeneratedByNotIn applies the NotIn predicate on the "generated_by" field. +func GeneratedByNotIn(vs ...string) predicate.Metric { + return predicate.Metric(sql.FieldNotIn(FieldGeneratedBy, vs...)) +} + +// GeneratedByGT applies the GT predicate on the "generated_by" field. +func GeneratedByGT(v string) predicate.Metric { + return predicate.Metric(sql.FieldGT(FieldGeneratedBy, v)) +} + +// GeneratedByGTE applies the GTE predicate on the "generated_by" field. +func GeneratedByGTE(v string) predicate.Metric { + return predicate.Metric(sql.FieldGTE(FieldGeneratedBy, v)) +} + +// GeneratedByLT applies the LT predicate on the "generated_by" field. +func GeneratedByLT(v string) predicate.Metric { + return predicate.Metric(sql.FieldLT(FieldGeneratedBy, v)) +} + +// GeneratedByLTE applies the LTE predicate on the "generated_by" field. +func GeneratedByLTE(v string) predicate.Metric { + return predicate.Metric(sql.FieldLTE(FieldGeneratedBy, v)) +} + +// GeneratedByContains applies the Contains predicate on the "generated_by" field. +func GeneratedByContains(v string) predicate.Metric { + return predicate.Metric(sql.FieldContains(FieldGeneratedBy, v)) +} + +// GeneratedByHasPrefix applies the HasPrefix predicate on the "generated_by" field. +func GeneratedByHasPrefix(v string) predicate.Metric { + return predicate.Metric(sql.FieldHasPrefix(FieldGeneratedBy, v)) +} + +// GeneratedByHasSuffix applies the HasSuffix predicate on the "generated_by" field. +func GeneratedByHasSuffix(v string) predicate.Metric { + return predicate.Metric(sql.FieldHasSuffix(FieldGeneratedBy, v)) +} + +// GeneratedByEqualFold applies the EqualFold predicate on the "generated_by" field. +func GeneratedByEqualFold(v string) predicate.Metric { + return predicate.Metric(sql.FieldEqualFold(FieldGeneratedBy, v)) +} + +// GeneratedByContainsFold applies the ContainsFold predicate on the "generated_by" field. +func GeneratedByContainsFold(v string) predicate.Metric { + return predicate.Metric(sql.FieldContainsFold(FieldGeneratedBy, v)) +} + +// CollectedAtEQ applies the EQ predicate on the "collected_at" field. +func CollectedAtEQ(v time.Time) predicate.Metric { + return predicate.Metric(sql.FieldEQ(FieldCollectedAt, v)) +} + +// CollectedAtNEQ applies the NEQ predicate on the "collected_at" field. +func CollectedAtNEQ(v time.Time) predicate.Metric { + return predicate.Metric(sql.FieldNEQ(FieldCollectedAt, v)) +} + +// CollectedAtIn applies the In predicate on the "collected_at" field. +func CollectedAtIn(vs ...time.Time) predicate.Metric { + return predicate.Metric(sql.FieldIn(FieldCollectedAt, vs...)) +} + +// CollectedAtNotIn applies the NotIn predicate on the "collected_at" field. +func CollectedAtNotIn(vs ...time.Time) predicate.Metric { + return predicate.Metric(sql.FieldNotIn(FieldCollectedAt, vs...)) +} + +// CollectedAtGT applies the GT predicate on the "collected_at" field. +func CollectedAtGT(v time.Time) predicate.Metric { + return predicate.Metric(sql.FieldGT(FieldCollectedAt, v)) +} + +// CollectedAtGTE applies the GTE predicate on the "collected_at" field. +func CollectedAtGTE(v time.Time) predicate.Metric { + return predicate.Metric(sql.FieldGTE(FieldCollectedAt, v)) +} + +// CollectedAtLT applies the LT predicate on the "collected_at" field. +func CollectedAtLT(v time.Time) predicate.Metric { + return predicate.Metric(sql.FieldLT(FieldCollectedAt, v)) +} + +// CollectedAtLTE applies the LTE predicate on the "collected_at" field. +func CollectedAtLTE(v time.Time) predicate.Metric { + return predicate.Metric(sql.FieldLTE(FieldCollectedAt, v)) +} + +// PushedAtEQ applies the EQ predicate on the "pushed_at" field. +func PushedAtEQ(v time.Time) predicate.Metric { + return predicate.Metric(sql.FieldEQ(FieldPushedAt, v)) +} + +// PushedAtNEQ applies the NEQ predicate on the "pushed_at" field. +func PushedAtNEQ(v time.Time) predicate.Metric { + return predicate.Metric(sql.FieldNEQ(FieldPushedAt, v)) +} + +// PushedAtIn applies the In predicate on the "pushed_at" field. +func PushedAtIn(vs ...time.Time) predicate.Metric { + return predicate.Metric(sql.FieldIn(FieldPushedAt, vs...)) +} + +// PushedAtNotIn applies the NotIn predicate on the "pushed_at" field. +func PushedAtNotIn(vs ...time.Time) predicate.Metric { + return predicate.Metric(sql.FieldNotIn(FieldPushedAt, vs...)) +} + +// PushedAtGT applies the GT predicate on the "pushed_at" field. +func PushedAtGT(v time.Time) predicate.Metric { + return predicate.Metric(sql.FieldGT(FieldPushedAt, v)) +} + +// PushedAtGTE applies the GTE predicate on the "pushed_at" field. +func PushedAtGTE(v time.Time) predicate.Metric { + return predicate.Metric(sql.FieldGTE(FieldPushedAt, v)) +} + +// PushedAtLT applies the LT predicate on the "pushed_at" field. +func PushedAtLT(v time.Time) predicate.Metric { + return predicate.Metric(sql.FieldLT(FieldPushedAt, v)) +} + +// PushedAtLTE applies the LTE predicate on the "pushed_at" field. +func PushedAtLTE(v time.Time) predicate.Metric { + return predicate.Metric(sql.FieldLTE(FieldPushedAt, v)) +} + +// PushedAtIsNil applies the IsNil predicate on the "pushed_at" field. +func PushedAtIsNil() predicate.Metric { + return predicate.Metric(sql.FieldIsNull(FieldPushedAt)) +} + +// PushedAtNotNil applies the NotNil predicate on the "pushed_at" field. +func PushedAtNotNil() predicate.Metric { + return predicate.Metric(sql.FieldNotNull(FieldPushedAt)) +} + +// PayloadEQ applies the EQ predicate on the "payload" field. +func PayloadEQ(v string) predicate.Metric { + return predicate.Metric(sql.FieldEQ(FieldPayload, v)) +} + +// PayloadNEQ applies the NEQ predicate on the "payload" field. +func PayloadNEQ(v string) predicate.Metric { + return predicate.Metric(sql.FieldNEQ(FieldPayload, v)) +} + +// PayloadIn applies the In predicate on the "payload" field. +func PayloadIn(vs ...string) predicate.Metric { + return predicate.Metric(sql.FieldIn(FieldPayload, vs...)) +} + +// PayloadNotIn applies the NotIn predicate on the "payload" field. +func PayloadNotIn(vs ...string) predicate.Metric { + return predicate.Metric(sql.FieldNotIn(FieldPayload, vs...)) +} + +// PayloadGT applies the GT predicate on the "payload" field. +func PayloadGT(v string) predicate.Metric { + return predicate.Metric(sql.FieldGT(FieldPayload, v)) +} + +// PayloadGTE applies the GTE predicate on the "payload" field. +func PayloadGTE(v string) predicate.Metric { + return predicate.Metric(sql.FieldGTE(FieldPayload, v)) +} + +// PayloadLT applies the LT predicate on the "payload" field. +func PayloadLT(v string) predicate.Metric { + return predicate.Metric(sql.FieldLT(FieldPayload, v)) +} + +// PayloadLTE applies the LTE predicate on the "payload" field. +func PayloadLTE(v string) predicate.Metric { + return predicate.Metric(sql.FieldLTE(FieldPayload, v)) +} + +// PayloadContains applies the Contains predicate on the "payload" field. +func PayloadContains(v string) predicate.Metric { + return predicate.Metric(sql.FieldContains(FieldPayload, v)) +} + +// PayloadHasPrefix applies the HasPrefix predicate on the "payload" field. +func PayloadHasPrefix(v string) predicate.Metric { + return predicate.Metric(sql.FieldHasPrefix(FieldPayload, v)) +} + +// PayloadHasSuffix applies the HasSuffix predicate on the "payload" field. +func PayloadHasSuffix(v string) predicate.Metric { + return predicate.Metric(sql.FieldHasSuffix(FieldPayload, v)) +} + +// PayloadEqualFold applies the EqualFold predicate on the "payload" field. +func PayloadEqualFold(v string) predicate.Metric { + return predicate.Metric(sql.FieldEqualFold(FieldPayload, v)) +} + +// PayloadContainsFold applies the ContainsFold predicate on the "payload" field. +func PayloadContainsFold(v string) predicate.Metric { + return predicate.Metric(sql.FieldContainsFold(FieldPayload, v)) +} + +// And groups predicates with the AND operator between them. +func And(predicates ...predicate.Metric) predicate.Metric { + return predicate.Metric(sql.AndPredicates(predicates...)) +} + +// Or groups predicates with the OR operator between them. +func Or(predicates ...predicate.Metric) predicate.Metric { + return predicate.Metric(sql.OrPredicates(predicates...)) +} + +// Not applies the not operator on the given predicate. +func Not(p predicate.Metric) predicate.Metric { + return predicate.Metric(sql.NotPredicates(p)) +} diff --git a/pkg/database/ent/metric_create.go b/pkg/database/ent/metric_create.go new file mode 100644 index 00000000000..8fa656db427 --- /dev/null +++ b/pkg/database/ent/metric_create.go @@ -0,0 +1,246 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "errors" + "fmt" + "time" + + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/crowdsecurity/crowdsec/pkg/database/ent/metric" +) + +// MetricCreate is the builder for creating a Metric entity. +type MetricCreate struct { + config + mutation *MetricMutation + hooks []Hook +} + +// SetGeneratedType sets the "generated_type" field. +func (mc *MetricCreate) SetGeneratedType(mt metric.GeneratedType) *MetricCreate { + mc.mutation.SetGeneratedType(mt) + return mc +} + +// SetGeneratedBy sets the "generated_by" field. +func (mc *MetricCreate) SetGeneratedBy(s string) *MetricCreate { + mc.mutation.SetGeneratedBy(s) + return mc +} + +// SetCollectedAt sets the "collected_at" field. +func (mc *MetricCreate) SetCollectedAt(t time.Time) *MetricCreate { + mc.mutation.SetCollectedAt(t) + return mc +} + +// SetPushedAt sets the "pushed_at" field. +func (mc *MetricCreate) SetPushedAt(t time.Time) *MetricCreate { + mc.mutation.SetPushedAt(t) + return mc +} + +// SetNillablePushedAt sets the "pushed_at" field if the given value is not nil. +func (mc *MetricCreate) SetNillablePushedAt(t *time.Time) *MetricCreate { + if t != nil { + mc.SetPushedAt(*t) + } + return mc +} + +// SetPayload sets the "payload" field. +func (mc *MetricCreate) SetPayload(s string) *MetricCreate { + mc.mutation.SetPayload(s) + return mc +} + +// Mutation returns the MetricMutation object of the builder. +func (mc *MetricCreate) Mutation() *MetricMutation { + return mc.mutation +} + +// Save creates the Metric in the database. +func (mc *MetricCreate) Save(ctx context.Context) (*Metric, error) { + return withHooks(ctx, mc.sqlSave, mc.mutation, mc.hooks) +} + +// SaveX calls Save and panics if Save returns an error. +func (mc *MetricCreate) SaveX(ctx context.Context) *Metric { + v, err := mc.Save(ctx) + if err != nil { + panic(err) + } + return v +} + +// Exec executes the query. +func (mc *MetricCreate) Exec(ctx context.Context) error { + _, err := mc.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (mc *MetricCreate) ExecX(ctx context.Context) { + if err := mc.Exec(ctx); err != nil { + panic(err) + } +} + +// check runs all checks and user-defined validators on the builder. +func (mc *MetricCreate) check() error { + if _, ok := mc.mutation.GeneratedType(); !ok { + return &ValidationError{Name: "generated_type", err: errors.New(`ent: missing required field "Metric.generated_type"`)} + } + if v, ok := mc.mutation.GeneratedType(); ok { + if err := metric.GeneratedTypeValidator(v); err != nil { + return &ValidationError{Name: "generated_type", err: fmt.Errorf(`ent: validator failed for field "Metric.generated_type": %w`, err)} + } + } + if _, ok := mc.mutation.GeneratedBy(); !ok { + return &ValidationError{Name: "generated_by", err: errors.New(`ent: missing required field "Metric.generated_by"`)} + } + if _, ok := mc.mutation.CollectedAt(); !ok { + return &ValidationError{Name: "collected_at", err: errors.New(`ent: missing required field "Metric.collected_at"`)} + } + if _, ok := mc.mutation.Payload(); !ok { + return &ValidationError{Name: "payload", err: errors.New(`ent: missing required field "Metric.payload"`)} + } + return nil +} + +func (mc *MetricCreate) sqlSave(ctx context.Context) (*Metric, error) { + if err := mc.check(); err != nil { + return nil, err + } + _node, _spec := mc.createSpec() + if err := sqlgraph.CreateNode(ctx, mc.driver, _spec); err != nil { + if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return nil, err + } + id := _spec.ID.Value.(int64) + _node.ID = int(id) + mc.mutation.id = &_node.ID + mc.mutation.done = true + return _node, nil +} + +func (mc *MetricCreate) createSpec() (*Metric, *sqlgraph.CreateSpec) { + var ( + _node = &Metric{config: mc.config} + _spec = sqlgraph.NewCreateSpec(metric.Table, sqlgraph.NewFieldSpec(metric.FieldID, field.TypeInt)) + ) + if value, ok := mc.mutation.GeneratedType(); ok { + _spec.SetField(metric.FieldGeneratedType, field.TypeEnum, value) + _node.GeneratedType = value + } + if value, ok := mc.mutation.GeneratedBy(); ok { + _spec.SetField(metric.FieldGeneratedBy, field.TypeString, value) + _node.GeneratedBy = value + } + if value, ok := mc.mutation.CollectedAt(); ok { + _spec.SetField(metric.FieldCollectedAt, field.TypeTime, value) + _node.CollectedAt = value + } + if value, ok := mc.mutation.PushedAt(); ok { + _spec.SetField(metric.FieldPushedAt, field.TypeTime, value) + _node.PushedAt = &value + } + if value, ok := mc.mutation.Payload(); ok { + _spec.SetField(metric.FieldPayload, field.TypeString, value) + _node.Payload = value + } + return _node, _spec +} + +// MetricCreateBulk is the builder for creating many Metric entities in bulk. +type MetricCreateBulk struct { + config + err error + builders []*MetricCreate +} + +// Save creates the Metric entities in the database. +func (mcb *MetricCreateBulk) Save(ctx context.Context) ([]*Metric, error) { + if mcb.err != nil { + return nil, mcb.err + } + specs := make([]*sqlgraph.CreateSpec, len(mcb.builders)) + nodes := make([]*Metric, len(mcb.builders)) + mutators := make([]Mutator, len(mcb.builders)) + for i := range mcb.builders { + func(i int, root context.Context) { + builder := mcb.builders[i] + var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { + mutation, ok := m.(*MetricMutation) + if !ok { + return nil, fmt.Errorf("unexpected mutation type %T", m) + } + if err := builder.check(); err != nil { + return nil, err + } + builder.mutation = mutation + var err error + nodes[i], specs[i] = builder.createSpec() + if i < len(mutators)-1 { + _, err = mutators[i+1].Mutate(root, mcb.builders[i+1].mutation) + } else { + spec := &sqlgraph.BatchCreateSpec{Nodes: specs} + // Invoke the actual operation on the latest mutation in the chain. + if err = sqlgraph.BatchCreate(ctx, mcb.driver, spec); err != nil { + if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + } + } + if err != nil { + return nil, err + } + mutation.id = &nodes[i].ID + if specs[i].ID.Value != nil { + id := specs[i].ID.Value.(int64) + nodes[i].ID = int(id) + } + mutation.done = true + return nodes[i], nil + }) + for i := len(builder.hooks) - 1; i >= 0; i-- { + mut = builder.hooks[i](mut) + } + mutators[i] = mut + }(i, ctx) + } + if len(mutators) > 0 { + if _, err := mutators[0].Mutate(ctx, mcb.builders[0].mutation); err != nil { + return nil, err + } + } + return nodes, nil +} + +// SaveX is like Save, but panics if an error occurs. +func (mcb *MetricCreateBulk) SaveX(ctx context.Context) []*Metric { + v, err := mcb.Save(ctx) + if err != nil { + panic(err) + } + return v +} + +// Exec executes the query. +func (mcb *MetricCreateBulk) Exec(ctx context.Context) error { + _, err := mcb.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (mcb *MetricCreateBulk) ExecX(ctx context.Context) { + if err := mcb.Exec(ctx); err != nil { + panic(err) + } +} diff --git a/pkg/database/ent/metric_delete.go b/pkg/database/ent/metric_delete.go new file mode 100644 index 00000000000..d6606680a6a --- /dev/null +++ b/pkg/database/ent/metric_delete.go @@ -0,0 +1,88 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/crowdsecurity/crowdsec/pkg/database/ent/metric" + "github.com/crowdsecurity/crowdsec/pkg/database/ent/predicate" +) + +// MetricDelete is the builder for deleting a Metric entity. +type MetricDelete struct { + config + hooks []Hook + mutation *MetricMutation +} + +// Where appends a list predicates to the MetricDelete builder. +func (md *MetricDelete) Where(ps ...predicate.Metric) *MetricDelete { + md.mutation.Where(ps...) + return md +} + +// Exec executes the deletion query and returns how many vertices were deleted. +func (md *MetricDelete) Exec(ctx context.Context) (int, error) { + return withHooks(ctx, md.sqlExec, md.mutation, md.hooks) +} + +// ExecX is like Exec, but panics if an error occurs. +func (md *MetricDelete) ExecX(ctx context.Context) int { + n, err := md.Exec(ctx) + if err != nil { + panic(err) + } + return n +} + +func (md *MetricDelete) sqlExec(ctx context.Context) (int, error) { + _spec := sqlgraph.NewDeleteSpec(metric.Table, sqlgraph.NewFieldSpec(metric.FieldID, field.TypeInt)) + if ps := md.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + affected, err := sqlgraph.DeleteNodes(ctx, md.driver, _spec) + if err != nil && sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + md.mutation.done = true + return affected, err +} + +// MetricDeleteOne is the builder for deleting a single Metric entity. +type MetricDeleteOne struct { + md *MetricDelete +} + +// Where appends a list predicates to the MetricDelete builder. +func (mdo *MetricDeleteOne) Where(ps ...predicate.Metric) *MetricDeleteOne { + mdo.md.mutation.Where(ps...) + return mdo +} + +// Exec executes the deletion query. +func (mdo *MetricDeleteOne) Exec(ctx context.Context) error { + n, err := mdo.md.Exec(ctx) + switch { + case err != nil: + return err + case n == 0: + return &NotFoundError{metric.Label} + default: + return nil + } +} + +// ExecX is like Exec, but panics if an error occurs. +func (mdo *MetricDeleteOne) ExecX(ctx context.Context) { + if err := mdo.Exec(ctx); err != nil { + panic(err) + } +} diff --git a/pkg/database/ent/metric_query.go b/pkg/database/ent/metric_query.go new file mode 100644 index 00000000000..6e1c6f08b4a --- /dev/null +++ b/pkg/database/ent/metric_query.go @@ -0,0 +1,526 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "fmt" + "math" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/crowdsecurity/crowdsec/pkg/database/ent/metric" + "github.com/crowdsecurity/crowdsec/pkg/database/ent/predicate" +) + +// MetricQuery is the builder for querying Metric entities. +type MetricQuery struct { + config + ctx *QueryContext + order []metric.OrderOption + inters []Interceptor + predicates []predicate.Metric + // intermediate query (i.e. traversal path). + sql *sql.Selector + path func(context.Context) (*sql.Selector, error) +} + +// Where adds a new predicate for the MetricQuery builder. +func (mq *MetricQuery) Where(ps ...predicate.Metric) *MetricQuery { + mq.predicates = append(mq.predicates, ps...) + return mq +} + +// Limit the number of records to be returned by this query. +func (mq *MetricQuery) Limit(limit int) *MetricQuery { + mq.ctx.Limit = &limit + return mq +} + +// Offset to start from. +func (mq *MetricQuery) Offset(offset int) *MetricQuery { + mq.ctx.Offset = &offset + return mq +} + +// Unique configures the query builder to filter duplicate records on query. +// By default, unique is set to true, and can be disabled using this method. +func (mq *MetricQuery) Unique(unique bool) *MetricQuery { + mq.ctx.Unique = &unique + return mq +} + +// Order specifies how the records should be ordered. +func (mq *MetricQuery) Order(o ...metric.OrderOption) *MetricQuery { + mq.order = append(mq.order, o...) + return mq +} + +// First returns the first Metric entity from the query. +// Returns a *NotFoundError when no Metric was found. +func (mq *MetricQuery) First(ctx context.Context) (*Metric, error) { + nodes, err := mq.Limit(1).All(setContextOp(ctx, mq.ctx, "First")) + if err != nil { + return nil, err + } + if len(nodes) == 0 { + return nil, &NotFoundError{metric.Label} + } + return nodes[0], nil +} + +// FirstX is like First, but panics if an error occurs. +func (mq *MetricQuery) FirstX(ctx context.Context) *Metric { + node, err := mq.First(ctx) + if err != nil && !IsNotFound(err) { + panic(err) + } + return node +} + +// FirstID returns the first Metric ID from the query. +// Returns a *NotFoundError when no Metric ID was found. +func (mq *MetricQuery) FirstID(ctx context.Context) (id int, err error) { + var ids []int + if ids, err = mq.Limit(1).IDs(setContextOp(ctx, mq.ctx, "FirstID")); err != nil { + return + } + if len(ids) == 0 { + err = &NotFoundError{metric.Label} + return + } + return ids[0], nil +} + +// FirstIDX is like FirstID, but panics if an error occurs. +func (mq *MetricQuery) FirstIDX(ctx context.Context) int { + id, err := mq.FirstID(ctx) + if err != nil && !IsNotFound(err) { + panic(err) + } + return id +} + +// Only returns a single Metric entity found by the query, ensuring it only returns one. +// Returns a *NotSingularError when more than one Metric entity is found. +// Returns a *NotFoundError when no Metric entities are found. +func (mq *MetricQuery) Only(ctx context.Context) (*Metric, error) { + nodes, err := mq.Limit(2).All(setContextOp(ctx, mq.ctx, "Only")) + if err != nil { + return nil, err + } + switch len(nodes) { + case 1: + return nodes[0], nil + case 0: + return nil, &NotFoundError{metric.Label} + default: + return nil, &NotSingularError{metric.Label} + } +} + +// OnlyX is like Only, but panics if an error occurs. +func (mq *MetricQuery) OnlyX(ctx context.Context) *Metric { + node, err := mq.Only(ctx) + if err != nil { + panic(err) + } + return node +} + +// OnlyID is like Only, but returns the only Metric ID in the query. +// Returns a *NotSingularError when more than one Metric ID is found. +// Returns a *NotFoundError when no entities are found. +func (mq *MetricQuery) OnlyID(ctx context.Context) (id int, err error) { + var ids []int + if ids, err = mq.Limit(2).IDs(setContextOp(ctx, mq.ctx, "OnlyID")); err != nil { + return + } + switch len(ids) { + case 1: + id = ids[0] + case 0: + err = &NotFoundError{metric.Label} + default: + err = &NotSingularError{metric.Label} + } + return +} + +// OnlyIDX is like OnlyID, but panics if an error occurs. +func (mq *MetricQuery) OnlyIDX(ctx context.Context) int { + id, err := mq.OnlyID(ctx) + if err != nil { + panic(err) + } + return id +} + +// All executes the query and returns a list of Metrics. +func (mq *MetricQuery) All(ctx context.Context) ([]*Metric, error) { + ctx = setContextOp(ctx, mq.ctx, "All") + if err := mq.prepareQuery(ctx); err != nil { + return nil, err + } + qr := querierAll[[]*Metric, *MetricQuery]() + return withInterceptors[[]*Metric](ctx, mq, qr, mq.inters) +} + +// AllX is like All, but panics if an error occurs. +func (mq *MetricQuery) AllX(ctx context.Context) []*Metric { + nodes, err := mq.All(ctx) + if err != nil { + panic(err) + } + return nodes +} + +// IDs executes the query and returns a list of Metric IDs. +func (mq *MetricQuery) IDs(ctx context.Context) (ids []int, err error) { + if mq.ctx.Unique == nil && mq.path != nil { + mq.Unique(true) + } + ctx = setContextOp(ctx, mq.ctx, "IDs") + if err = mq.Select(metric.FieldID).Scan(ctx, &ids); err != nil { + return nil, err + } + return ids, nil +} + +// IDsX is like IDs, but panics if an error occurs. +func (mq *MetricQuery) IDsX(ctx context.Context) []int { + ids, err := mq.IDs(ctx) + if err != nil { + panic(err) + } + return ids +} + +// Count returns the count of the given query. +func (mq *MetricQuery) Count(ctx context.Context) (int, error) { + ctx = setContextOp(ctx, mq.ctx, "Count") + if err := mq.prepareQuery(ctx); err != nil { + return 0, err + } + return withInterceptors[int](ctx, mq, querierCount[*MetricQuery](), mq.inters) +} + +// CountX is like Count, but panics if an error occurs. +func (mq *MetricQuery) CountX(ctx context.Context) int { + count, err := mq.Count(ctx) + if err != nil { + panic(err) + } + return count +} + +// Exist returns true if the query has elements in the graph. +func (mq *MetricQuery) Exist(ctx context.Context) (bool, error) { + ctx = setContextOp(ctx, mq.ctx, "Exist") + switch _, err := mq.FirstID(ctx); { + case IsNotFound(err): + return false, nil + case err != nil: + return false, fmt.Errorf("ent: check existence: %w", err) + default: + return true, nil + } +} + +// ExistX is like Exist, but panics if an error occurs. +func (mq *MetricQuery) ExistX(ctx context.Context) bool { + exist, err := mq.Exist(ctx) + if err != nil { + panic(err) + } + return exist +} + +// Clone returns a duplicate of the MetricQuery builder, including all associated steps. It can be +// used to prepare common query builders and use them differently after the clone is made. +func (mq *MetricQuery) Clone() *MetricQuery { + if mq == nil { + return nil + } + return &MetricQuery{ + config: mq.config, + ctx: mq.ctx.Clone(), + order: append([]metric.OrderOption{}, mq.order...), + inters: append([]Interceptor{}, mq.inters...), + predicates: append([]predicate.Metric{}, mq.predicates...), + // clone intermediate query. + sql: mq.sql.Clone(), + path: mq.path, + } +} + +// GroupBy is used to group vertices by one or more fields/columns. +// It is often used with aggregate functions, like: count, max, mean, min, sum. +// +// Example: +// +// var v []struct { +// GeneratedType metric.GeneratedType `json:"generated_type,omitempty"` +// Count int `json:"count,omitempty"` +// } +// +// client.Metric.Query(). +// GroupBy(metric.FieldGeneratedType). +// Aggregate(ent.Count()). +// Scan(ctx, &v) +func (mq *MetricQuery) GroupBy(field string, fields ...string) *MetricGroupBy { + mq.ctx.Fields = append([]string{field}, fields...) + grbuild := &MetricGroupBy{build: mq} + grbuild.flds = &mq.ctx.Fields + grbuild.label = metric.Label + grbuild.scan = grbuild.Scan + return grbuild +} + +// Select allows the selection one or more fields/columns for the given query, +// instead of selecting all fields in the entity. +// +// Example: +// +// var v []struct { +// GeneratedType metric.GeneratedType `json:"generated_type,omitempty"` +// } +// +// client.Metric.Query(). +// Select(metric.FieldGeneratedType). +// Scan(ctx, &v) +func (mq *MetricQuery) Select(fields ...string) *MetricSelect { + mq.ctx.Fields = append(mq.ctx.Fields, fields...) + sbuild := &MetricSelect{MetricQuery: mq} + sbuild.label = metric.Label + sbuild.flds, sbuild.scan = &mq.ctx.Fields, sbuild.Scan + return sbuild +} + +// Aggregate returns a MetricSelect configured with the given aggregations. +func (mq *MetricQuery) Aggregate(fns ...AggregateFunc) *MetricSelect { + return mq.Select().Aggregate(fns...) +} + +func (mq *MetricQuery) prepareQuery(ctx context.Context) error { + for _, inter := range mq.inters { + if inter == nil { + return fmt.Errorf("ent: uninitialized interceptor (forgotten import ent/runtime?)") + } + if trv, ok := inter.(Traverser); ok { + if err := trv.Traverse(ctx, mq); err != nil { + return err + } + } + } + for _, f := range mq.ctx.Fields { + if !metric.ValidColumn(f) { + return &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)} + } + } + if mq.path != nil { + prev, err := mq.path(ctx) + if err != nil { + return err + } + mq.sql = prev + } + return nil +} + +func (mq *MetricQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*Metric, error) { + var ( + nodes = []*Metric{} + _spec = mq.querySpec() + ) + _spec.ScanValues = func(columns []string) ([]any, error) { + return (*Metric).scanValues(nil, columns) + } + _spec.Assign = func(columns []string, values []any) error { + node := &Metric{config: mq.config} + nodes = append(nodes, node) + return node.assignValues(columns, values) + } + for i := range hooks { + hooks[i](ctx, _spec) + } + if err := sqlgraph.QueryNodes(ctx, mq.driver, _spec); err != nil { + return nil, err + } + if len(nodes) == 0 { + return nodes, nil + } + return nodes, nil +} + +func (mq *MetricQuery) sqlCount(ctx context.Context) (int, error) { + _spec := mq.querySpec() + _spec.Node.Columns = mq.ctx.Fields + if len(mq.ctx.Fields) > 0 { + _spec.Unique = mq.ctx.Unique != nil && *mq.ctx.Unique + } + return sqlgraph.CountNodes(ctx, mq.driver, _spec) +} + +func (mq *MetricQuery) querySpec() *sqlgraph.QuerySpec { + _spec := sqlgraph.NewQuerySpec(metric.Table, metric.Columns, sqlgraph.NewFieldSpec(metric.FieldID, field.TypeInt)) + _spec.From = mq.sql + if unique := mq.ctx.Unique; unique != nil { + _spec.Unique = *unique + } else if mq.path != nil { + _spec.Unique = true + } + if fields := mq.ctx.Fields; len(fields) > 0 { + _spec.Node.Columns = make([]string, 0, len(fields)) + _spec.Node.Columns = append(_spec.Node.Columns, metric.FieldID) + for i := range fields { + if fields[i] != metric.FieldID { + _spec.Node.Columns = append(_spec.Node.Columns, fields[i]) + } + } + } + if ps := mq.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if limit := mq.ctx.Limit; limit != nil { + _spec.Limit = *limit + } + if offset := mq.ctx.Offset; offset != nil { + _spec.Offset = *offset + } + if ps := mq.order; len(ps) > 0 { + _spec.Order = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + return _spec +} + +func (mq *MetricQuery) sqlQuery(ctx context.Context) *sql.Selector { + builder := sql.Dialect(mq.driver.Dialect()) + t1 := builder.Table(metric.Table) + columns := mq.ctx.Fields + if len(columns) == 0 { + columns = metric.Columns + } + selector := builder.Select(t1.Columns(columns...)...).From(t1) + if mq.sql != nil { + selector = mq.sql + selector.Select(selector.Columns(columns...)...) + } + if mq.ctx.Unique != nil && *mq.ctx.Unique { + selector.Distinct() + } + for _, p := range mq.predicates { + p(selector) + } + for _, p := range mq.order { + p(selector) + } + if offset := mq.ctx.Offset; offset != nil { + // limit is mandatory for offset clause. We start + // with default value, and override it below if needed. + selector.Offset(*offset).Limit(math.MaxInt32) + } + if limit := mq.ctx.Limit; limit != nil { + selector.Limit(*limit) + } + return selector +} + +// MetricGroupBy is the group-by builder for Metric entities. +type MetricGroupBy struct { + selector + build *MetricQuery +} + +// Aggregate adds the given aggregation functions to the group-by query. +func (mgb *MetricGroupBy) Aggregate(fns ...AggregateFunc) *MetricGroupBy { + mgb.fns = append(mgb.fns, fns...) + return mgb +} + +// Scan applies the selector query and scans the result into the given value. +func (mgb *MetricGroupBy) Scan(ctx context.Context, v any) error { + ctx = setContextOp(ctx, mgb.build.ctx, "GroupBy") + if err := mgb.build.prepareQuery(ctx); err != nil { + return err + } + return scanWithInterceptors[*MetricQuery, *MetricGroupBy](ctx, mgb.build, mgb, mgb.build.inters, v) +} + +func (mgb *MetricGroupBy) sqlScan(ctx context.Context, root *MetricQuery, v any) error { + selector := root.sqlQuery(ctx).Select() + aggregation := make([]string, 0, len(mgb.fns)) + for _, fn := range mgb.fns { + aggregation = append(aggregation, fn(selector)) + } + if len(selector.SelectedColumns()) == 0 { + columns := make([]string, 0, len(*mgb.flds)+len(mgb.fns)) + for _, f := range *mgb.flds { + columns = append(columns, selector.C(f)) + } + columns = append(columns, aggregation...) + selector.Select(columns...) + } + selector.GroupBy(selector.Columns(*mgb.flds...)...) + if err := selector.Err(); err != nil { + return err + } + rows := &sql.Rows{} + query, args := selector.Query() + if err := mgb.build.driver.Query(ctx, query, args, rows); err != nil { + return err + } + defer rows.Close() + return sql.ScanSlice(rows, v) +} + +// MetricSelect is the builder for selecting fields of Metric entities. +type MetricSelect struct { + *MetricQuery + selector +} + +// Aggregate adds the given aggregation functions to the selector query. +func (ms *MetricSelect) Aggregate(fns ...AggregateFunc) *MetricSelect { + ms.fns = append(ms.fns, fns...) + return ms +} + +// Scan applies the selector query and scans the result into the given value. +func (ms *MetricSelect) Scan(ctx context.Context, v any) error { + ctx = setContextOp(ctx, ms.ctx, "Select") + if err := ms.prepareQuery(ctx); err != nil { + return err + } + return scanWithInterceptors[*MetricQuery, *MetricSelect](ctx, ms.MetricQuery, ms, ms.inters, v) +} + +func (ms *MetricSelect) sqlScan(ctx context.Context, root *MetricQuery, v any) error { + selector := root.sqlQuery(ctx) + aggregation := make([]string, 0, len(ms.fns)) + for _, fn := range ms.fns { + aggregation = append(aggregation, fn(selector)) + } + switch n := len(*ms.selector.flds); { + case n == 0 && len(aggregation) > 0: + selector.Select(aggregation...) + case n != 0 && len(aggregation) > 0: + selector.AppendSelect(aggregation...) + } + rows := &sql.Rows{} + query, args := selector.Query() + if err := ms.driver.Query(ctx, query, args, rows); err != nil { + return err + } + defer rows.Close() + return sql.ScanSlice(rows, v) +} diff --git a/pkg/database/ent/metric_update.go b/pkg/database/ent/metric_update.go new file mode 100644 index 00000000000..4da33dd6ce9 --- /dev/null +++ b/pkg/database/ent/metric_update.go @@ -0,0 +1,228 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "errors" + "fmt" + "time" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/crowdsecurity/crowdsec/pkg/database/ent/metric" + "github.com/crowdsecurity/crowdsec/pkg/database/ent/predicate" +) + +// MetricUpdate is the builder for updating Metric entities. +type MetricUpdate struct { + config + hooks []Hook + mutation *MetricMutation +} + +// Where appends a list predicates to the MetricUpdate builder. +func (mu *MetricUpdate) Where(ps ...predicate.Metric) *MetricUpdate { + mu.mutation.Where(ps...) + return mu +} + +// SetPushedAt sets the "pushed_at" field. +func (mu *MetricUpdate) SetPushedAt(t time.Time) *MetricUpdate { + mu.mutation.SetPushedAt(t) + return mu +} + +// SetNillablePushedAt sets the "pushed_at" field if the given value is not nil. +func (mu *MetricUpdate) SetNillablePushedAt(t *time.Time) *MetricUpdate { + if t != nil { + mu.SetPushedAt(*t) + } + return mu +} + +// ClearPushedAt clears the value of the "pushed_at" field. +func (mu *MetricUpdate) ClearPushedAt() *MetricUpdate { + mu.mutation.ClearPushedAt() + return mu +} + +// Mutation returns the MetricMutation object of the builder. +func (mu *MetricUpdate) Mutation() *MetricMutation { + return mu.mutation +} + +// Save executes the query and returns the number of nodes affected by the update operation. +func (mu *MetricUpdate) Save(ctx context.Context) (int, error) { + return withHooks(ctx, mu.sqlSave, mu.mutation, mu.hooks) +} + +// SaveX is like Save, but panics if an error occurs. +func (mu *MetricUpdate) SaveX(ctx context.Context) int { + affected, err := mu.Save(ctx) + if err != nil { + panic(err) + } + return affected +} + +// Exec executes the query. +func (mu *MetricUpdate) Exec(ctx context.Context) error { + _, err := mu.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (mu *MetricUpdate) ExecX(ctx context.Context) { + if err := mu.Exec(ctx); err != nil { + panic(err) + } +} + +func (mu *MetricUpdate) sqlSave(ctx context.Context) (n int, err error) { + _spec := sqlgraph.NewUpdateSpec(metric.Table, metric.Columns, sqlgraph.NewFieldSpec(metric.FieldID, field.TypeInt)) + if ps := mu.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if value, ok := mu.mutation.PushedAt(); ok { + _spec.SetField(metric.FieldPushedAt, field.TypeTime, value) + } + if mu.mutation.PushedAtCleared() { + _spec.ClearField(metric.FieldPushedAt, field.TypeTime) + } + if n, err = sqlgraph.UpdateNodes(ctx, mu.driver, _spec); err != nil { + if _, ok := err.(*sqlgraph.NotFoundError); ok { + err = &NotFoundError{metric.Label} + } else if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return 0, err + } + mu.mutation.done = true + return n, nil +} + +// MetricUpdateOne is the builder for updating a single Metric entity. +type MetricUpdateOne struct { + config + fields []string + hooks []Hook + mutation *MetricMutation +} + +// SetPushedAt sets the "pushed_at" field. +func (muo *MetricUpdateOne) SetPushedAt(t time.Time) *MetricUpdateOne { + muo.mutation.SetPushedAt(t) + return muo +} + +// SetNillablePushedAt sets the "pushed_at" field if the given value is not nil. +func (muo *MetricUpdateOne) SetNillablePushedAt(t *time.Time) *MetricUpdateOne { + if t != nil { + muo.SetPushedAt(*t) + } + return muo +} + +// ClearPushedAt clears the value of the "pushed_at" field. +func (muo *MetricUpdateOne) ClearPushedAt() *MetricUpdateOne { + muo.mutation.ClearPushedAt() + return muo +} + +// Mutation returns the MetricMutation object of the builder. +func (muo *MetricUpdateOne) Mutation() *MetricMutation { + return muo.mutation +} + +// Where appends a list predicates to the MetricUpdate builder. +func (muo *MetricUpdateOne) Where(ps ...predicate.Metric) *MetricUpdateOne { + muo.mutation.Where(ps...) + return muo +} + +// Select allows selecting one or more fields (columns) of the returned entity. +// The default is selecting all fields defined in the entity schema. +func (muo *MetricUpdateOne) Select(field string, fields ...string) *MetricUpdateOne { + muo.fields = append([]string{field}, fields...) + return muo +} + +// Save executes the query and returns the updated Metric entity. +func (muo *MetricUpdateOne) Save(ctx context.Context) (*Metric, error) { + return withHooks(ctx, muo.sqlSave, muo.mutation, muo.hooks) +} + +// SaveX is like Save, but panics if an error occurs. +func (muo *MetricUpdateOne) SaveX(ctx context.Context) *Metric { + node, err := muo.Save(ctx) + if err != nil { + panic(err) + } + return node +} + +// Exec executes the query on the entity. +func (muo *MetricUpdateOne) Exec(ctx context.Context) error { + _, err := muo.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (muo *MetricUpdateOne) ExecX(ctx context.Context) { + if err := muo.Exec(ctx); err != nil { + panic(err) + } +} + +func (muo *MetricUpdateOne) sqlSave(ctx context.Context) (_node *Metric, err error) { + _spec := sqlgraph.NewUpdateSpec(metric.Table, metric.Columns, sqlgraph.NewFieldSpec(metric.FieldID, field.TypeInt)) + id, ok := muo.mutation.ID() + if !ok { + return nil, &ValidationError{Name: "id", err: errors.New(`ent: missing "Metric.id" for update`)} + } + _spec.Node.ID.Value = id + if fields := muo.fields; len(fields) > 0 { + _spec.Node.Columns = make([]string, 0, len(fields)) + _spec.Node.Columns = append(_spec.Node.Columns, metric.FieldID) + for _, f := range fields { + if !metric.ValidColumn(f) { + return nil, &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)} + } + if f != metric.FieldID { + _spec.Node.Columns = append(_spec.Node.Columns, f) + } + } + } + if ps := muo.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if value, ok := muo.mutation.PushedAt(); ok { + _spec.SetField(metric.FieldPushedAt, field.TypeTime, value) + } + if muo.mutation.PushedAtCleared() { + _spec.ClearField(metric.FieldPushedAt, field.TypeTime) + } + _node = &Metric{config: muo.config} + _spec.Assign = _node.assignValues + _spec.ScanValues = _node.scanValues + if err = sqlgraph.UpdateNode(ctx, muo.driver, _spec); err != nil { + if _, ok := err.(*sqlgraph.NotFoundError); ok { + err = &NotFoundError{metric.Label} + } else if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return nil, err + } + muo.mutation.done = true + return _node, nil +} diff --git a/pkg/database/ent/migrate/schema.go b/pkg/database/ent/migrate/schema.go index 584e848f09e..5c32c472403 100644 --- a/pkg/database/ent/migrate/schema.go +++ b/pkg/database/ent/migrate/schema.go @@ -70,6 +70,9 @@ var ( {Name: "version", Type: field.TypeString, Nullable: true}, {Name: "last_pull", Type: field.TypeTime, Nullable: true}, {Name: "auth_type", Type: field.TypeString, Default: "api-key"}, + {Name: "osname", Type: field.TypeString, Nullable: true}, + {Name: "osversion", Type: field.TypeString, Nullable: true}, + {Name: "featureflags", Type: field.TypeString, Nullable: true}, } // BouncersTable holds the schema information for the "bouncers" table. BouncersTable = &schema.Table{ @@ -204,6 +207,11 @@ var ( {Name: "is_validated", Type: field.TypeBool, Default: false}, {Name: "status", Type: field.TypeString, Nullable: true}, {Name: "auth_type", Type: field.TypeString, Default: "password"}, + {Name: "osname", Type: field.TypeString, Nullable: true}, + {Name: "osversion", Type: field.TypeString, Nullable: true}, + {Name: "featureflags", Type: field.TypeString, Nullable: true}, + {Name: "hubstate", Type: field.TypeJSON, Nullable: true}, + {Name: "datasources", Type: field.TypeJSON, Nullable: true}, } // MachinesTable holds the schema information for the "machines" table. MachinesTable = &schema.Table{ @@ -241,6 +249,28 @@ var ( }, }, } + // MetricsColumns holds the columns for the "metrics" table. + MetricsColumns = []*schema.Column{ + {Name: "id", Type: field.TypeInt, Increment: true}, + {Name: "generated_type", Type: field.TypeEnum, Enums: []string{"LP", "RC"}}, + {Name: "generated_by", Type: field.TypeString}, + {Name: "collected_at", Type: field.TypeTime}, + {Name: "pushed_at", Type: field.TypeTime, Nullable: true}, + {Name: "payload", Type: field.TypeString}, + } + // MetricsTable holds the schema information for the "metrics" table. + MetricsTable = &schema.Table{ + Name: "metrics", + Columns: MetricsColumns, + PrimaryKey: []*schema.Column{MetricsColumns[0]}, + Indexes: []*schema.Index{ + { + Name: "metric_generated_type_generated_by_collected_at", + Unique: true, + Columns: []*schema.Column{MetricsColumns[1], MetricsColumns[2], MetricsColumns[3]}, + }, + }, + } // Tables holds all the tables in the schema. Tables = []*schema.Table{ AlertsTable, @@ -251,6 +281,7 @@ var ( LocksTable, MachinesTable, MetaTable, + MetricsTable, } ) diff --git a/pkg/database/ent/mutation.go b/pkg/database/ent/mutation.go index c012e870c8f..8d109ece379 100644 --- a/pkg/database/ent/mutation.go +++ b/pkg/database/ent/mutation.go @@ -19,7 +19,9 @@ import ( "github.com/crowdsecurity/crowdsec/pkg/database/ent/lock" "github.com/crowdsecurity/crowdsec/pkg/database/ent/machine" "github.com/crowdsecurity/crowdsec/pkg/database/ent/meta" + "github.com/crowdsecurity/crowdsec/pkg/database/ent/metric" "github.com/crowdsecurity/crowdsec/pkg/database/ent/predicate" + "github.com/crowdsecurity/crowdsec/pkg/database/ent/schema" ) const ( @@ -39,6 +41,7 @@ const ( TypeLock = "Lock" TypeMachine = "Machine" TypeMeta = "Meta" + TypeMetric = "Metric" ) // AlertMutation represents an operation that mutates the Alert nodes in the graph. @@ -2392,6 +2395,9 @@ type BouncerMutation struct { version *string last_pull *time.Time auth_type *string + osname *string + osversion *string + featureflags *string clearedFields map[string]struct{} done bool oldValue func(context.Context) (*Bouncer, error) @@ -2908,6 +2914,153 @@ func (m *BouncerMutation) ResetAuthType() { m.auth_type = nil } +// SetOsname sets the "osname" field. +func (m *BouncerMutation) SetOsname(s string) { + m.osname = &s +} + +// Osname returns the value of the "osname" field in the mutation. +func (m *BouncerMutation) Osname() (r string, exists bool) { + v := m.osname + if v == nil { + return + } + return *v, true +} + +// OldOsname returns the old "osname" field's value of the Bouncer entity. +// If the Bouncer object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *BouncerMutation) OldOsname(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldOsname is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldOsname requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldOsname: %w", err) + } + return oldValue.Osname, nil +} + +// ClearOsname clears the value of the "osname" field. +func (m *BouncerMutation) ClearOsname() { + m.osname = nil + m.clearedFields[bouncer.FieldOsname] = struct{}{} +} + +// OsnameCleared returns if the "osname" field was cleared in this mutation. +func (m *BouncerMutation) OsnameCleared() bool { + _, ok := m.clearedFields[bouncer.FieldOsname] + return ok +} + +// ResetOsname resets all changes to the "osname" field. +func (m *BouncerMutation) ResetOsname() { + m.osname = nil + delete(m.clearedFields, bouncer.FieldOsname) +} + +// SetOsversion sets the "osversion" field. +func (m *BouncerMutation) SetOsversion(s string) { + m.osversion = &s +} + +// Osversion returns the value of the "osversion" field in the mutation. +func (m *BouncerMutation) Osversion() (r string, exists bool) { + v := m.osversion + if v == nil { + return + } + return *v, true +} + +// OldOsversion returns the old "osversion" field's value of the Bouncer entity. +// If the Bouncer object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *BouncerMutation) OldOsversion(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldOsversion is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldOsversion requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldOsversion: %w", err) + } + return oldValue.Osversion, nil +} + +// ClearOsversion clears the value of the "osversion" field. +func (m *BouncerMutation) ClearOsversion() { + m.osversion = nil + m.clearedFields[bouncer.FieldOsversion] = struct{}{} +} + +// OsversionCleared returns if the "osversion" field was cleared in this mutation. +func (m *BouncerMutation) OsversionCleared() bool { + _, ok := m.clearedFields[bouncer.FieldOsversion] + return ok +} + +// ResetOsversion resets all changes to the "osversion" field. +func (m *BouncerMutation) ResetOsversion() { + m.osversion = nil + delete(m.clearedFields, bouncer.FieldOsversion) +} + +// SetFeatureflags sets the "featureflags" field. +func (m *BouncerMutation) SetFeatureflags(s string) { + m.featureflags = &s +} + +// Featureflags returns the value of the "featureflags" field in the mutation. +func (m *BouncerMutation) Featureflags() (r string, exists bool) { + v := m.featureflags + if v == nil { + return + } + return *v, true +} + +// OldFeatureflags returns the old "featureflags" field's value of the Bouncer entity. +// If the Bouncer object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *BouncerMutation) OldFeatureflags(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldFeatureflags is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldFeatureflags requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldFeatureflags: %w", err) + } + return oldValue.Featureflags, nil +} + +// ClearFeatureflags clears the value of the "featureflags" field. +func (m *BouncerMutation) ClearFeatureflags() { + m.featureflags = nil + m.clearedFields[bouncer.FieldFeatureflags] = struct{}{} +} + +// FeatureflagsCleared returns if the "featureflags" field was cleared in this mutation. +func (m *BouncerMutation) FeatureflagsCleared() bool { + _, ok := m.clearedFields[bouncer.FieldFeatureflags] + return ok +} + +// ResetFeatureflags resets all changes to the "featureflags" field. +func (m *BouncerMutation) ResetFeatureflags() { + m.featureflags = nil + delete(m.clearedFields, bouncer.FieldFeatureflags) +} + // Where appends a list predicates to the BouncerMutation builder. func (m *BouncerMutation) Where(ps ...predicate.Bouncer) { m.predicates = append(m.predicates, ps...) @@ -2942,7 +3095,7 @@ func (m *BouncerMutation) Type() string { // order to get all numeric fields that were incremented/decremented, call // AddedFields(). func (m *BouncerMutation) Fields() []string { - fields := make([]string, 0, 10) + fields := make([]string, 0, 13) if m.created_at != nil { fields = append(fields, bouncer.FieldCreatedAt) } @@ -2973,6 +3126,15 @@ func (m *BouncerMutation) Fields() []string { if m.auth_type != nil { fields = append(fields, bouncer.FieldAuthType) } + if m.osname != nil { + fields = append(fields, bouncer.FieldOsname) + } + if m.osversion != nil { + fields = append(fields, bouncer.FieldOsversion) + } + if m.featureflags != nil { + fields = append(fields, bouncer.FieldFeatureflags) + } return fields } @@ -3001,6 +3163,12 @@ func (m *BouncerMutation) Field(name string) (ent.Value, bool) { return m.LastPull() case bouncer.FieldAuthType: return m.AuthType() + case bouncer.FieldOsname: + return m.Osname() + case bouncer.FieldOsversion: + return m.Osversion() + case bouncer.FieldFeatureflags: + return m.Featureflags() } return nil, false } @@ -3030,6 +3198,12 @@ func (m *BouncerMutation) OldField(ctx context.Context, name string) (ent.Value, return m.OldLastPull(ctx) case bouncer.FieldAuthType: return m.OldAuthType(ctx) + case bouncer.FieldOsname: + return m.OldOsname(ctx) + case bouncer.FieldOsversion: + return m.OldOsversion(ctx) + case bouncer.FieldFeatureflags: + return m.OldFeatureflags(ctx) } return nil, fmt.Errorf("unknown Bouncer field %s", name) } @@ -3109,6 +3283,27 @@ func (m *BouncerMutation) SetField(name string, value ent.Value) error { } m.SetAuthType(v) return nil + case bouncer.FieldOsname: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetOsname(v) + return nil + case bouncer.FieldOsversion: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetOsversion(v) + return nil + case bouncer.FieldFeatureflags: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetFeatureflags(v) + return nil } return fmt.Errorf("unknown Bouncer field %s", name) } @@ -3151,6 +3346,15 @@ func (m *BouncerMutation) ClearedFields() []string { if m.FieldCleared(bouncer.FieldLastPull) { fields = append(fields, bouncer.FieldLastPull) } + if m.FieldCleared(bouncer.FieldOsname) { + fields = append(fields, bouncer.FieldOsname) + } + if m.FieldCleared(bouncer.FieldOsversion) { + fields = append(fields, bouncer.FieldOsversion) + } + if m.FieldCleared(bouncer.FieldFeatureflags) { + fields = append(fields, bouncer.FieldFeatureflags) + } return fields } @@ -3177,6 +3381,15 @@ func (m *BouncerMutation) ClearField(name string) error { case bouncer.FieldLastPull: m.ClearLastPull() return nil + case bouncer.FieldOsname: + m.ClearOsname() + return nil + case bouncer.FieldOsversion: + m.ClearOsversion() + return nil + case bouncer.FieldFeatureflags: + m.ClearFeatureflags() + return nil } return fmt.Errorf("unknown Bouncer nullable field %s", name) } @@ -3215,6 +3428,15 @@ func (m *BouncerMutation) ResetField(name string) error { case bouncer.FieldAuthType: m.ResetAuthType() return nil + case bouncer.FieldOsname: + m.ResetOsname() + return nil + case bouncer.FieldOsversion: + m.ResetOsversion() + return nil + case bouncer.FieldFeatureflags: + m.ResetFeatureflags() + return nil } return fmt.Errorf("unknown Bouncer field %s", name) } @@ -6318,6 +6540,11 @@ type MachineMutation struct { isValidated *bool status *string auth_type *string + osname *string + osversion *string + featureflags *string + hubstate *map[string]schema.ItemState + datasources *map[string]int64 clearedFields map[string]struct{} alerts map[int]struct{} removedalerts map[int]struct{} @@ -6922,6 +7149,251 @@ func (m *MachineMutation) ResetAuthType() { m.auth_type = nil } +// SetOsname sets the "osname" field. +func (m *MachineMutation) SetOsname(s string) { + m.osname = &s +} + +// Osname returns the value of the "osname" field in the mutation. +func (m *MachineMutation) Osname() (r string, exists bool) { + v := m.osname + if v == nil { + return + } + return *v, true +} + +// OldOsname returns the old "osname" field's value of the Machine entity. +// If the Machine object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *MachineMutation) OldOsname(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldOsname is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldOsname requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldOsname: %w", err) + } + return oldValue.Osname, nil +} + +// ClearOsname clears the value of the "osname" field. +func (m *MachineMutation) ClearOsname() { + m.osname = nil + m.clearedFields[machine.FieldOsname] = struct{}{} +} + +// OsnameCleared returns if the "osname" field was cleared in this mutation. +func (m *MachineMutation) OsnameCleared() bool { + _, ok := m.clearedFields[machine.FieldOsname] + return ok +} + +// ResetOsname resets all changes to the "osname" field. +func (m *MachineMutation) ResetOsname() { + m.osname = nil + delete(m.clearedFields, machine.FieldOsname) +} + +// SetOsversion sets the "osversion" field. +func (m *MachineMutation) SetOsversion(s string) { + m.osversion = &s +} + +// Osversion returns the value of the "osversion" field in the mutation. +func (m *MachineMutation) Osversion() (r string, exists bool) { + v := m.osversion + if v == nil { + return + } + return *v, true +} + +// OldOsversion returns the old "osversion" field's value of the Machine entity. +// If the Machine object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *MachineMutation) OldOsversion(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldOsversion is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldOsversion requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldOsversion: %w", err) + } + return oldValue.Osversion, nil +} + +// ClearOsversion clears the value of the "osversion" field. +func (m *MachineMutation) ClearOsversion() { + m.osversion = nil + m.clearedFields[machine.FieldOsversion] = struct{}{} +} + +// OsversionCleared returns if the "osversion" field was cleared in this mutation. +func (m *MachineMutation) OsversionCleared() bool { + _, ok := m.clearedFields[machine.FieldOsversion] + return ok +} + +// ResetOsversion resets all changes to the "osversion" field. +func (m *MachineMutation) ResetOsversion() { + m.osversion = nil + delete(m.clearedFields, machine.FieldOsversion) +} + +// SetFeatureflags sets the "featureflags" field. +func (m *MachineMutation) SetFeatureflags(s string) { + m.featureflags = &s +} + +// Featureflags returns the value of the "featureflags" field in the mutation. +func (m *MachineMutation) Featureflags() (r string, exists bool) { + v := m.featureflags + if v == nil { + return + } + return *v, true +} + +// OldFeatureflags returns the old "featureflags" field's value of the Machine entity. +// If the Machine object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *MachineMutation) OldFeatureflags(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldFeatureflags is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldFeatureflags requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldFeatureflags: %w", err) + } + return oldValue.Featureflags, nil +} + +// ClearFeatureflags clears the value of the "featureflags" field. +func (m *MachineMutation) ClearFeatureflags() { + m.featureflags = nil + m.clearedFields[machine.FieldFeatureflags] = struct{}{} +} + +// FeatureflagsCleared returns if the "featureflags" field was cleared in this mutation. +func (m *MachineMutation) FeatureflagsCleared() bool { + _, ok := m.clearedFields[machine.FieldFeatureflags] + return ok +} + +// ResetFeatureflags resets all changes to the "featureflags" field. +func (m *MachineMutation) ResetFeatureflags() { + m.featureflags = nil + delete(m.clearedFields, machine.FieldFeatureflags) +} + +// SetHubstate sets the "hubstate" field. +func (m *MachineMutation) SetHubstate(ms map[string]schema.ItemState) { + m.hubstate = &ms +} + +// Hubstate returns the value of the "hubstate" field in the mutation. +func (m *MachineMutation) Hubstate() (r map[string]schema.ItemState, exists bool) { + v := m.hubstate + if v == nil { + return + } + return *v, true +} + +// OldHubstate returns the old "hubstate" field's value of the Machine entity. +// If the Machine object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *MachineMutation) OldHubstate(ctx context.Context) (v map[string]schema.ItemState, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldHubstate is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldHubstate requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldHubstate: %w", err) + } + return oldValue.Hubstate, nil +} + +// ClearHubstate clears the value of the "hubstate" field. +func (m *MachineMutation) ClearHubstate() { + m.hubstate = nil + m.clearedFields[machine.FieldHubstate] = struct{}{} +} + +// HubstateCleared returns if the "hubstate" field was cleared in this mutation. +func (m *MachineMutation) HubstateCleared() bool { + _, ok := m.clearedFields[machine.FieldHubstate] + return ok +} + +// ResetHubstate resets all changes to the "hubstate" field. +func (m *MachineMutation) ResetHubstate() { + m.hubstate = nil + delete(m.clearedFields, machine.FieldHubstate) +} + +// SetDatasources sets the "datasources" field. +func (m *MachineMutation) SetDatasources(value map[string]int64) { + m.datasources = &value +} + +// Datasources returns the value of the "datasources" field in the mutation. +func (m *MachineMutation) Datasources() (r map[string]int64, exists bool) { + v := m.datasources + if v == nil { + return + } + return *v, true +} + +// OldDatasources returns the old "datasources" field's value of the Machine entity. +// If the Machine object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *MachineMutation) OldDatasources(ctx context.Context) (v map[string]int64, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldDatasources is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldDatasources requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldDatasources: %w", err) + } + return oldValue.Datasources, nil +} + +// ClearDatasources clears the value of the "datasources" field. +func (m *MachineMutation) ClearDatasources() { + m.datasources = nil + m.clearedFields[machine.FieldDatasources] = struct{}{} +} + +// DatasourcesCleared returns if the "datasources" field was cleared in this mutation. +func (m *MachineMutation) DatasourcesCleared() bool { + _, ok := m.clearedFields[machine.FieldDatasources] + return ok +} + +// ResetDatasources resets all changes to the "datasources" field. +func (m *MachineMutation) ResetDatasources() { + m.datasources = nil + delete(m.clearedFields, machine.FieldDatasources) +} + // AddAlertIDs adds the "alerts" edge to the Alert entity by ids. func (m *MachineMutation) AddAlertIDs(ids ...int) { if m.alerts == nil { @@ -7010,7 +7482,7 @@ func (m *MachineMutation) Type() string { // order to get all numeric fields that were incremented/decremented, call // AddedFields(). func (m *MachineMutation) Fields() []string { - fields := make([]string, 0, 12) + fields := make([]string, 0, 17) if m.created_at != nil { fields = append(fields, machine.FieldCreatedAt) } @@ -7047,6 +7519,21 @@ func (m *MachineMutation) Fields() []string { if m.auth_type != nil { fields = append(fields, machine.FieldAuthType) } + if m.osname != nil { + fields = append(fields, machine.FieldOsname) + } + if m.osversion != nil { + fields = append(fields, machine.FieldOsversion) + } + if m.featureflags != nil { + fields = append(fields, machine.FieldFeatureflags) + } + if m.hubstate != nil { + fields = append(fields, machine.FieldHubstate) + } + if m.datasources != nil { + fields = append(fields, machine.FieldDatasources) + } return fields } @@ -7079,6 +7566,16 @@ func (m *MachineMutation) Field(name string) (ent.Value, bool) { return m.Status() case machine.FieldAuthType: return m.AuthType() + case machine.FieldOsname: + return m.Osname() + case machine.FieldOsversion: + return m.Osversion() + case machine.FieldFeatureflags: + return m.Featureflags() + case machine.FieldHubstate: + return m.Hubstate() + case machine.FieldDatasources: + return m.Datasources() } return nil, false } @@ -7112,6 +7609,16 @@ func (m *MachineMutation) OldField(ctx context.Context, name string) (ent.Value, return m.OldStatus(ctx) case machine.FieldAuthType: return m.OldAuthType(ctx) + case machine.FieldOsname: + return m.OldOsname(ctx) + case machine.FieldOsversion: + return m.OldOsversion(ctx) + case machine.FieldFeatureflags: + return m.OldFeatureflags(ctx) + case machine.FieldHubstate: + return m.OldHubstate(ctx) + case machine.FieldDatasources: + return m.OldDatasources(ctx) } return nil, fmt.Errorf("unknown Machine field %s", name) } @@ -7205,22 +7712,57 @@ func (m *MachineMutation) SetField(name string, value ent.Value) error { } m.SetAuthType(v) return nil - } - return fmt.Errorf("unknown Machine field %s", name) -} - -// AddedFields returns all numeric fields that were incremented/decremented during -// this mutation. -func (m *MachineMutation) AddedFields() []string { - return nil -} - -// AddedField returns the numeric value that was incremented/decremented on a field -// with the given name. The second boolean return value indicates that this field -// was not set, or was not defined in the schema. -func (m *MachineMutation) AddedField(name string) (ent.Value, bool) { - return nil, false -} + case machine.FieldOsname: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetOsname(v) + return nil + case machine.FieldOsversion: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetOsversion(v) + return nil + case machine.FieldFeatureflags: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetFeatureflags(v) + return nil + case machine.FieldHubstate: + v, ok := value.(map[string]schema.ItemState) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetHubstate(v) + return nil + case machine.FieldDatasources: + v, ok := value.(map[string]int64) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetDatasources(v) + return nil + } + return fmt.Errorf("unknown Machine field %s", name) +} + +// AddedFields returns all numeric fields that were incremented/decremented during +// this mutation. +func (m *MachineMutation) AddedFields() []string { + return nil +} + +// AddedField returns the numeric value that was incremented/decremented on a field +// with the given name. The second boolean return value indicates that this field +// was not set, or was not defined in the schema. +func (m *MachineMutation) AddedField(name string) (ent.Value, bool) { + return nil, false +} // AddField adds the value to the field with the given name. It returns an error if // the field is not defined in the schema, or if the type mismatched the field @@ -7250,6 +7792,21 @@ func (m *MachineMutation) ClearedFields() []string { if m.FieldCleared(machine.FieldStatus) { fields = append(fields, machine.FieldStatus) } + if m.FieldCleared(machine.FieldOsname) { + fields = append(fields, machine.FieldOsname) + } + if m.FieldCleared(machine.FieldOsversion) { + fields = append(fields, machine.FieldOsversion) + } + if m.FieldCleared(machine.FieldFeatureflags) { + fields = append(fields, machine.FieldFeatureflags) + } + if m.FieldCleared(machine.FieldHubstate) { + fields = append(fields, machine.FieldHubstate) + } + if m.FieldCleared(machine.FieldDatasources) { + fields = append(fields, machine.FieldDatasources) + } return fields } @@ -7279,6 +7836,21 @@ func (m *MachineMutation) ClearField(name string) error { case machine.FieldStatus: m.ClearStatus() return nil + case machine.FieldOsname: + m.ClearOsname() + return nil + case machine.FieldOsversion: + m.ClearOsversion() + return nil + case machine.FieldFeatureflags: + m.ClearFeatureflags() + return nil + case machine.FieldHubstate: + m.ClearHubstate() + return nil + case machine.FieldDatasources: + m.ClearDatasources() + return nil } return fmt.Errorf("unknown Machine nullable field %s", name) } @@ -7323,6 +7895,21 @@ func (m *MachineMutation) ResetField(name string) error { case machine.FieldAuthType: m.ResetAuthType() return nil + case machine.FieldOsname: + m.ResetOsname() + return nil + case machine.FieldOsversion: + m.ResetOsversion() + return nil + case machine.FieldFeatureflags: + m.ResetFeatureflags() + return nil + case machine.FieldHubstate: + m.ResetHubstate() + return nil + case machine.FieldDatasources: + m.ResetDatasources() + return nil } return fmt.Errorf("unknown Machine field %s", name) } @@ -8044,3 +8631,567 @@ func (m *MetaMutation) ResetEdge(name string) error { } return fmt.Errorf("unknown Meta edge %s", name) } + +// MetricMutation represents an operation that mutates the Metric nodes in the graph. +type MetricMutation struct { + config + op Op + typ string + id *int + generated_type *metric.GeneratedType + generated_by *string + collected_at *time.Time + pushed_at *time.Time + payload *string + clearedFields map[string]struct{} + done bool + oldValue func(context.Context) (*Metric, error) + predicates []predicate.Metric +} + +var _ ent.Mutation = (*MetricMutation)(nil) + +// metricOption allows management of the mutation configuration using functional options. +type metricOption func(*MetricMutation) + +// newMetricMutation creates new mutation for the Metric entity. +func newMetricMutation(c config, op Op, opts ...metricOption) *MetricMutation { + m := &MetricMutation{ + config: c, + op: op, + typ: TypeMetric, + clearedFields: make(map[string]struct{}), + } + for _, opt := range opts { + opt(m) + } + return m +} + +// withMetricID sets the ID field of the mutation. +func withMetricID(id int) metricOption { + return func(m *MetricMutation) { + var ( + err error + once sync.Once + value *Metric + ) + m.oldValue = func(ctx context.Context) (*Metric, error) { + once.Do(func() { + if m.done { + err = errors.New("querying old values post mutation is not allowed") + } else { + value, err = m.Client().Metric.Get(ctx, id) + } + }) + return value, err + } + m.id = &id + } +} + +// withMetric sets the old Metric of the mutation. +func withMetric(node *Metric) metricOption { + return func(m *MetricMutation) { + m.oldValue = func(context.Context) (*Metric, error) { + return node, nil + } + m.id = &node.ID + } +} + +// Client returns a new `ent.Client` from the mutation. If the mutation was +// executed in a transaction (ent.Tx), a transactional client is returned. +func (m MetricMutation) Client() *Client { + client := &Client{config: m.config} + client.init() + return client +} + +// Tx returns an `ent.Tx` for mutations that were executed in transactions; +// it returns an error otherwise. +func (m MetricMutation) Tx() (*Tx, error) { + if _, ok := m.driver.(*txDriver); !ok { + return nil, errors.New("ent: mutation is not running in a transaction") + } + tx := &Tx{config: m.config} + tx.init() + return tx, nil +} + +// ID returns the ID value in the mutation. Note that the ID is only available +// if it was provided to the builder or after it was returned from the database. +func (m *MetricMutation) ID() (id int, exists bool) { + if m.id == nil { + return + } + return *m.id, true +} + +// IDs queries the database and returns the entity ids that match the mutation's predicate. +// That means, if the mutation is applied within a transaction with an isolation level such +// as sql.LevelSerializable, the returned ids match the ids of the rows that will be updated +// or updated by the mutation. +func (m *MetricMutation) IDs(ctx context.Context) ([]int, error) { + switch { + case m.op.Is(OpUpdateOne | OpDeleteOne): + id, exists := m.ID() + if exists { + return []int{id}, nil + } + fallthrough + case m.op.Is(OpUpdate | OpDelete): + return m.Client().Metric.Query().Where(m.predicates...).IDs(ctx) + default: + return nil, fmt.Errorf("IDs is not allowed on %s operations", m.op) + } +} + +// SetGeneratedType sets the "generated_type" field. +func (m *MetricMutation) SetGeneratedType(mt metric.GeneratedType) { + m.generated_type = &mt +} + +// GeneratedType returns the value of the "generated_type" field in the mutation. +func (m *MetricMutation) GeneratedType() (r metric.GeneratedType, exists bool) { + v := m.generated_type + if v == nil { + return + } + return *v, true +} + +// OldGeneratedType returns the old "generated_type" field's value of the Metric entity. +// If the Metric object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *MetricMutation) OldGeneratedType(ctx context.Context) (v metric.GeneratedType, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldGeneratedType is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldGeneratedType requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldGeneratedType: %w", err) + } + return oldValue.GeneratedType, nil +} + +// ResetGeneratedType resets all changes to the "generated_type" field. +func (m *MetricMutation) ResetGeneratedType() { + m.generated_type = nil +} + +// SetGeneratedBy sets the "generated_by" field. +func (m *MetricMutation) SetGeneratedBy(s string) { + m.generated_by = &s +} + +// GeneratedBy returns the value of the "generated_by" field in the mutation. +func (m *MetricMutation) GeneratedBy() (r string, exists bool) { + v := m.generated_by + if v == nil { + return + } + return *v, true +} + +// OldGeneratedBy returns the old "generated_by" field's value of the Metric entity. +// If the Metric object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *MetricMutation) OldGeneratedBy(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldGeneratedBy is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldGeneratedBy requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldGeneratedBy: %w", err) + } + return oldValue.GeneratedBy, nil +} + +// ResetGeneratedBy resets all changes to the "generated_by" field. +func (m *MetricMutation) ResetGeneratedBy() { + m.generated_by = nil +} + +// SetCollectedAt sets the "collected_at" field. +func (m *MetricMutation) SetCollectedAt(t time.Time) { + m.collected_at = &t +} + +// CollectedAt returns the value of the "collected_at" field in the mutation. +func (m *MetricMutation) CollectedAt() (r time.Time, exists bool) { + v := m.collected_at + if v == nil { + return + } + return *v, true +} + +// OldCollectedAt returns the old "collected_at" field's value of the Metric entity. +// If the Metric object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *MetricMutation) OldCollectedAt(ctx context.Context) (v time.Time, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldCollectedAt is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldCollectedAt requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldCollectedAt: %w", err) + } + return oldValue.CollectedAt, nil +} + +// ResetCollectedAt resets all changes to the "collected_at" field. +func (m *MetricMutation) ResetCollectedAt() { + m.collected_at = nil +} + +// SetPushedAt sets the "pushed_at" field. +func (m *MetricMutation) SetPushedAt(t time.Time) { + m.pushed_at = &t +} + +// PushedAt returns the value of the "pushed_at" field in the mutation. +func (m *MetricMutation) PushedAt() (r time.Time, exists bool) { + v := m.pushed_at + if v == nil { + return + } + return *v, true +} + +// OldPushedAt returns the old "pushed_at" field's value of the Metric entity. +// If the Metric object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *MetricMutation) OldPushedAt(ctx context.Context) (v *time.Time, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldPushedAt is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldPushedAt requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldPushedAt: %w", err) + } + return oldValue.PushedAt, nil +} + +// ClearPushedAt clears the value of the "pushed_at" field. +func (m *MetricMutation) ClearPushedAt() { + m.pushed_at = nil + m.clearedFields[metric.FieldPushedAt] = struct{}{} +} + +// PushedAtCleared returns if the "pushed_at" field was cleared in this mutation. +func (m *MetricMutation) PushedAtCleared() bool { + _, ok := m.clearedFields[metric.FieldPushedAt] + return ok +} + +// ResetPushedAt resets all changes to the "pushed_at" field. +func (m *MetricMutation) ResetPushedAt() { + m.pushed_at = nil + delete(m.clearedFields, metric.FieldPushedAt) +} + +// SetPayload sets the "payload" field. +func (m *MetricMutation) SetPayload(s string) { + m.payload = &s +} + +// Payload returns the value of the "payload" field in the mutation. +func (m *MetricMutation) Payload() (r string, exists bool) { + v := m.payload + if v == nil { + return + } + return *v, true +} + +// OldPayload returns the old "payload" field's value of the Metric entity. +// If the Metric object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *MetricMutation) OldPayload(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldPayload is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldPayload requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldPayload: %w", err) + } + return oldValue.Payload, nil +} + +// ResetPayload resets all changes to the "payload" field. +func (m *MetricMutation) ResetPayload() { + m.payload = nil +} + +// Where appends a list predicates to the MetricMutation builder. +func (m *MetricMutation) Where(ps ...predicate.Metric) { + m.predicates = append(m.predicates, ps...) +} + +// WhereP appends storage-level predicates to the MetricMutation builder. Using this method, +// users can use type-assertion to append predicates that do not depend on any generated package. +func (m *MetricMutation) WhereP(ps ...func(*sql.Selector)) { + p := make([]predicate.Metric, len(ps)) + for i := range ps { + p[i] = ps[i] + } + m.Where(p...) +} + +// Op returns the operation name. +func (m *MetricMutation) Op() Op { + return m.op +} + +// SetOp allows setting the mutation operation. +func (m *MetricMutation) SetOp(op Op) { + m.op = op +} + +// Type returns the node type of this mutation (Metric). +func (m *MetricMutation) Type() string { + return m.typ +} + +// Fields returns all fields that were changed during this mutation. Note that in +// order to get all numeric fields that were incremented/decremented, call +// AddedFields(). +func (m *MetricMutation) Fields() []string { + fields := make([]string, 0, 5) + if m.generated_type != nil { + fields = append(fields, metric.FieldGeneratedType) + } + if m.generated_by != nil { + fields = append(fields, metric.FieldGeneratedBy) + } + if m.collected_at != nil { + fields = append(fields, metric.FieldCollectedAt) + } + if m.pushed_at != nil { + fields = append(fields, metric.FieldPushedAt) + } + if m.payload != nil { + fields = append(fields, metric.FieldPayload) + } + return fields +} + +// Field returns the value of a field with the given name. The second boolean +// return value indicates that this field was not set, or was not defined in the +// schema. +func (m *MetricMutation) Field(name string) (ent.Value, bool) { + switch name { + case metric.FieldGeneratedType: + return m.GeneratedType() + case metric.FieldGeneratedBy: + return m.GeneratedBy() + case metric.FieldCollectedAt: + return m.CollectedAt() + case metric.FieldPushedAt: + return m.PushedAt() + case metric.FieldPayload: + return m.Payload() + } + return nil, false +} + +// OldField returns the old value of the field from the database. An error is +// returned if the mutation operation is not UpdateOne, or the query to the +// database failed. +func (m *MetricMutation) OldField(ctx context.Context, name string) (ent.Value, error) { + switch name { + case metric.FieldGeneratedType: + return m.OldGeneratedType(ctx) + case metric.FieldGeneratedBy: + return m.OldGeneratedBy(ctx) + case metric.FieldCollectedAt: + return m.OldCollectedAt(ctx) + case metric.FieldPushedAt: + return m.OldPushedAt(ctx) + case metric.FieldPayload: + return m.OldPayload(ctx) + } + return nil, fmt.Errorf("unknown Metric field %s", name) +} + +// SetField sets the value of a field with the given name. It returns an error if +// the field is not defined in the schema, or if the type mismatched the field +// type. +func (m *MetricMutation) SetField(name string, value ent.Value) error { + switch name { + case metric.FieldGeneratedType: + v, ok := value.(metric.GeneratedType) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetGeneratedType(v) + return nil + case metric.FieldGeneratedBy: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetGeneratedBy(v) + return nil + case metric.FieldCollectedAt: + v, ok := value.(time.Time) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetCollectedAt(v) + return nil + case metric.FieldPushedAt: + v, ok := value.(time.Time) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetPushedAt(v) + return nil + case metric.FieldPayload: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetPayload(v) + return nil + } + return fmt.Errorf("unknown Metric field %s", name) +} + +// AddedFields returns all numeric fields that were incremented/decremented during +// this mutation. +func (m *MetricMutation) AddedFields() []string { + return nil +} + +// AddedField returns the numeric value that was incremented/decremented on a field +// with the given name. The second boolean return value indicates that this field +// was not set, or was not defined in the schema. +func (m *MetricMutation) AddedField(name string) (ent.Value, bool) { + return nil, false +} + +// AddField adds the value to the field with the given name. It returns an error if +// the field is not defined in the schema, or if the type mismatched the field +// type. +func (m *MetricMutation) AddField(name string, value ent.Value) error { + switch name { + } + return fmt.Errorf("unknown Metric numeric field %s", name) +} + +// ClearedFields returns all nullable fields that were cleared during this +// mutation. +func (m *MetricMutation) ClearedFields() []string { + var fields []string + if m.FieldCleared(metric.FieldPushedAt) { + fields = append(fields, metric.FieldPushedAt) + } + return fields +} + +// FieldCleared returns a boolean indicating if a field with the given name was +// cleared in this mutation. +func (m *MetricMutation) FieldCleared(name string) bool { + _, ok := m.clearedFields[name] + return ok +} + +// ClearField clears the value of the field with the given name. It returns an +// error if the field is not defined in the schema. +func (m *MetricMutation) ClearField(name string) error { + switch name { + case metric.FieldPushedAt: + m.ClearPushedAt() + return nil + } + return fmt.Errorf("unknown Metric nullable field %s", name) +} + +// ResetField resets all changes in the mutation for the field with the given name. +// It returns an error if the field is not defined in the schema. +func (m *MetricMutation) ResetField(name string) error { + switch name { + case metric.FieldGeneratedType: + m.ResetGeneratedType() + return nil + case metric.FieldGeneratedBy: + m.ResetGeneratedBy() + return nil + case metric.FieldCollectedAt: + m.ResetCollectedAt() + return nil + case metric.FieldPushedAt: + m.ResetPushedAt() + return nil + case metric.FieldPayload: + m.ResetPayload() + return nil + } + return fmt.Errorf("unknown Metric field %s", name) +} + +// AddedEdges returns all edge names that were set/added in this mutation. +func (m *MetricMutation) AddedEdges() []string { + edges := make([]string, 0, 0) + return edges +} + +// AddedIDs returns all IDs (to other nodes) that were added for the given edge +// name in this mutation. +func (m *MetricMutation) AddedIDs(name string) []ent.Value { + return nil +} + +// RemovedEdges returns all edge names that were removed in this mutation. +func (m *MetricMutation) RemovedEdges() []string { + edges := make([]string, 0, 0) + return edges +} + +// RemovedIDs returns all IDs (to other nodes) that were removed for the edge with +// the given name in this mutation. +func (m *MetricMutation) RemovedIDs(name string) []ent.Value { + return nil +} + +// ClearedEdges returns all edge names that were cleared in this mutation. +func (m *MetricMutation) ClearedEdges() []string { + edges := make([]string, 0, 0) + return edges +} + +// EdgeCleared returns a boolean which indicates if the edge with the given name +// was cleared in this mutation. +func (m *MetricMutation) EdgeCleared(name string) bool { + return false +} + +// ClearEdge clears the value of the edge with the given name. It returns an error +// if that edge is not defined in the schema. +func (m *MetricMutation) ClearEdge(name string) error { + return fmt.Errorf("unknown Metric unique edge %s", name) +} + +// ResetEdge resets all changes to the edge with the given name in this mutation. +// It returns an error if the edge is not defined in the schema. +func (m *MetricMutation) ResetEdge(name string) error { + return fmt.Errorf("unknown Metric edge %s", name) +} diff --git a/pkg/database/ent/predicate/predicate.go b/pkg/database/ent/predicate/predicate.go index ad2e6d3f327..8ad03e2fc48 100644 --- a/pkg/database/ent/predicate/predicate.go +++ b/pkg/database/ent/predicate/predicate.go @@ -29,3 +29,6 @@ type Machine func(*sql.Selector) // Meta is the predicate function for meta builders. type Meta func(*sql.Selector) + +// Metric is the predicate function for metric builders. +type Metric func(*sql.Selector) diff --git a/pkg/database/ent/schema/bouncer.go b/pkg/database/ent/schema/bouncer.go index 242b5f5fe4a..599c4c404fc 100644 --- a/pkg/database/ent/schema/bouncer.go +++ b/pkg/database/ent/schema/bouncer.go @@ -30,6 +30,9 @@ func (Bouncer) Fields() []ent.Field { field.String("version").Optional().StructTag(`json:"version"`), field.Time("last_pull").Nillable().Optional().StructTag(`json:"last_pull"`), field.String("auth_type").StructTag(`json:"auth_type"`).Default(types.ApiKeyAuthType), + field.String("osname").Optional(), + field.String("osversion").Optional(), + field.String("featureflags").Optional(), } } diff --git a/pkg/database/ent/schema/machine.go b/pkg/database/ent/schema/machine.go index 7b4d97ed35c..6fdcea2d824 100644 --- a/pkg/database/ent/schema/machine.go +++ b/pkg/database/ent/schema/machine.go @@ -8,6 +8,12 @@ import ( "github.com/crowdsecurity/crowdsec/pkg/types" ) +// ItemState is defined here instead of using pkg/models/HubItem to avoid introducing a dependency +type ItemState struct { + Status string `json:"status,omitempty"` + Version string `json:"version,omitempty"` +} + // Machine holds the schema definition for the Machine entity. type Machine struct { ent.Schema @@ -38,6 +44,11 @@ func (Machine) Fields() []ent.Field { Default(false), field.String("status").Optional(), field.String("auth_type").Default(types.PasswordAuthType).StructTag(`json:"auth_type"`), + field.String("osname").Optional(), + field.String("osversion").Optional(), + field.String("featureflags").Optional(), + field.JSON("hubstate", map[string]ItemState{}).Optional(), + field.JSON("datasources", map[string]int64{}).Optional(), } } diff --git a/pkg/database/ent/schema/metric.go b/pkg/database/ent/schema/metric.go new file mode 100644 index 00000000000..9de3f21f464 --- /dev/null +++ b/pkg/database/ent/schema/metric.go @@ -0,0 +1,43 @@ +package schema + +import ( + "entgo.io/ent" + "entgo.io/ent/schema/field" + "entgo.io/ent/schema/index" +) + +// Metric is actually a set of metrics collected by a device +// (logprocessor, bouncer, etc) at a given time. +type Metric struct { + ent.Schema +} + +func (Metric) Fields() []ent.Field { + return []ent.Field{ + field.Enum("generated_type"). + Values("LP", "RC"). + Immutable(). + Comment("Type of the metrics source: LP=logprocessor, RC=remediation"), + field.String("generated_by"). + Immutable(). + Comment("Source of the metrics: machine id, bouncer name...\nIt must come from the auth middleware."), + field.Time("collected_at"). + Immutable(). + Comment("When the metrics are collected/calculated at the source"), + field.Time("pushed_at"). + Nillable(). + Optional(). + Comment("When the metrics are sent to the console"), + field.String("payload"). + Immutable(). + Comment("The actual metrics (item0)"), + } +} + +func (Metric) Indexes() []ent.Index { + return []ent.Index{ + // Don't store the same metrics multiple times. + index.Fields("generated_type", "generated_by", "collected_at"). + Unique(), + } +} diff --git a/pkg/database/ent/tx.go b/pkg/database/ent/tx.go index 27b39c12502..bf8221ce4a5 100644 --- a/pkg/database/ent/tx.go +++ b/pkg/database/ent/tx.go @@ -28,6 +28,8 @@ type Tx struct { Machine *MachineClient // Meta is the client for interacting with the Meta builders. Meta *MetaClient + // Metric is the client for interacting with the Metric builders. + Metric *MetricClient // lazily loaded. client *Client @@ -167,6 +169,7 @@ func (tx *Tx) init() { tx.Lock = NewLockClient(tx.config) tx.Machine = NewMachineClient(tx.config) tx.Meta = NewMetaClient(tx.config) + tx.Metric = NewMetricClient(tx.config) } // txDriver wraps the given dialect.Tx with a nop dialect.Driver implementation. From 27935f6d4cf0ce8a32ff0462c46738645db8aa6d Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Mon, 1 Jul 2024 16:35:34 +0200 Subject: [PATCH 204/581] fix "cscli [machines|bouncers] delete" autocompletion (#3101) * fix "cscli [machines|bouncers] delete" autocompletion * func test --- cmd/crowdsec-cli/bouncers.go | 20 +++++++++++++++++++- cmd/crowdsec-cli/machines.go | 20 +++++++++++++++++++- test/bats/10_bouncers.bats | 12 ++++++++++++ test/bats/30_machines.bats | 12 ++++++++++++ 4 files changed, 62 insertions(+), 2 deletions(-) diff --git a/cmd/crowdsec-cli/bouncers.go b/cmd/crowdsec-cli/bouncers.go index 0c96cc5eee4..3da9575146e 100644 --- a/cmd/crowdsec-cli/bouncers.go +++ b/cmd/crowdsec-cli/bouncers.go @@ -206,10 +206,28 @@ cscli bouncers add MyBouncerName --key `, return cmd } -func (cli *cliBouncers) deleteValid(_ *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { +func (cli *cliBouncers) deleteValid(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { + // need to load config and db because PersistentPreRunE is not called for completions + + var err error + + cfg := cli.cfg() + + if err = require.LAPI(cfg); err != nil { + cobra.CompError("unable to list bouncers " + err.Error()) + return nil, cobra.ShellCompDirectiveNoFileComp + } + + cli.db, err = require.DBClient(cmd.Context(), cfg.DbConfig) + if err != nil { + cobra.CompError("unable to list bouncers " + err.Error()) + return nil, cobra.ShellCompDirectiveNoFileComp + } + bouncers, err := cli.db.ListBouncers() if err != nil { cobra.CompError("unable to list bouncers " + err.Error()) + return nil, cobra.ShellCompDirectiveNoFileComp } ret := []string{} diff --git a/cmd/crowdsec-cli/machines.go b/cmd/crowdsec-cli/machines.go index 9014d3d803a..746045d0eab 100644 --- a/cmd/crowdsec-cli/machines.go +++ b/cmd/crowdsec-cli/machines.go @@ -349,10 +349,28 @@ func (cli *cliMachines) add(args []string, machinePassword string, dumpFile stri return nil } -func (cli *cliMachines) deleteValid(_ *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { +func (cli *cliMachines) deleteValid(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { + // need to load config and db because PersistentPreRunE is not called for completions + + var err error + + cfg := cli.cfg() + + if err = require.LAPI(cfg); err != nil { + cobra.CompError("unable to list machines " + err.Error()) + return nil, cobra.ShellCompDirectiveNoFileComp + } + + cli.db, err = require.DBClient(cmd.Context(), cfg.DbConfig) + if err != nil { + cobra.CompError("unable to list machines " + err.Error()) + return nil, cobra.ShellCompDirectiveNoFileComp + } + machines, err := cli.db.ListMachines() if err != nil { cobra.CompError("unable to list machines " + err.Error()) + return nil, cobra.ShellCompDirectiveNoFileComp } ret := []string{} diff --git a/test/bats/10_bouncers.bats b/test/bats/10_bouncers.bats index 5bf4b5358db..9e795e584b4 100644 --- a/test/bats/10_bouncers.bats +++ b/test/bats/10_bouncers.bats @@ -42,6 +42,18 @@ teardown() { assert_json '[]' } +@test "bouncers delete has autocompletion" { + rune -0 cscli bouncers add foo1 + rune -0 cscli bouncers add foo2 + rune -0 cscli bouncers add bar + rune -0 cscli bouncers add baz + rune -0 cscli __complete bouncers delete 'foo' + assert_line --index 0 'foo1' + assert_line --index 1 'foo2' + refute_line 'bar' + refute_line 'baz' +} + @test "cscli bouncers list" { export API_KEY=bouncerkey rune -0 cscli bouncers add ciTestBouncer --key "$API_KEY" diff --git a/test/bats/30_machines.bats b/test/bats/30_machines.bats index 1d65151b6c8..1af5e97dcb4 100644 --- a/test/bats/30_machines.bats +++ b/test/bats/30_machines.bats @@ -62,6 +62,18 @@ teardown() { assert_output 1 } +@test "machines delete has autocompletion" { + rune -0 cscli machines add -a -f /dev/null foo1 + rune -0 cscli machines add -a -f /dev/null foo2 + rune -0 cscli machines add -a -f /dev/null bar + rune -0 cscli machines add -a -f /dev/null baz + rune -0 cscli __complete machines delete 'foo' + assert_line --index 0 'foo1' + assert_line --index 1 'foo2' + refute_line 'bar' + refute_line 'baz' +} + @test "heartbeat is initially null" { rune -0 cscli machines add foo --auto --file /dev/null rune -0 cscli machines list -o json From b1cf83975e614a34f2fc7b26a3ba130fa3ec0cb4 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Tue, 2 Jul 2024 16:08:41 +0200 Subject: [PATCH 205/581] db refact: drop column machines.status (#3102) --- pkg/database/ent/machine.go | 13 +---- pkg/database/ent/machine/machine.go | 8 --- pkg/database/ent/machine/where.go | 80 ----------------------------- pkg/database/ent/machine_create.go | 18 ------- pkg/database/ent/machine_update.go | 52 ------------------- pkg/database/ent/migrate/schema.go | 1 - pkg/database/ent/mutation.go | 75 +-------------------------- pkg/database/ent/runtime.go | 2 +- pkg/database/ent/schema/machine.go | 1 - 9 files changed, 3 insertions(+), 247 deletions(-) diff --git a/pkg/database/ent/machine.go b/pkg/database/ent/machine.go index fddb2e6a8b3..24c9fdb57e6 100644 --- a/pkg/database/ent/machine.go +++ b/pkg/database/ent/machine.go @@ -39,8 +39,6 @@ type Machine struct { Version string `json:"version,omitempty"` // IsValidated holds the value of the "isValidated" field. IsValidated bool `json:"isValidated,omitempty"` - // Status holds the value of the "status" field. - Status string `json:"status,omitempty"` // AuthType holds the value of the "auth_type" field. AuthType string `json:"auth_type"` // Osname holds the value of the "osname" field. @@ -88,7 +86,7 @@ func (*Machine) scanValues(columns []string) ([]any, error) { values[i] = new(sql.NullBool) case machine.FieldID: values[i] = new(sql.NullInt64) - case machine.FieldMachineId, machine.FieldPassword, machine.FieldIpAddress, machine.FieldScenarios, machine.FieldVersion, machine.FieldStatus, machine.FieldAuthType, machine.FieldOsname, machine.FieldOsversion, machine.FieldFeatureflags: + case machine.FieldMachineId, machine.FieldPassword, machine.FieldIpAddress, machine.FieldScenarios, machine.FieldVersion, machine.FieldAuthType, machine.FieldOsname, machine.FieldOsversion, machine.FieldFeatureflags: values[i] = new(sql.NullString) case machine.FieldCreatedAt, machine.FieldUpdatedAt, machine.FieldLastPush, machine.FieldLastHeartbeat: values[i] = new(sql.NullTime) @@ -175,12 +173,6 @@ func (m *Machine) assignValues(columns []string, values []any) error { } else if value.Valid { m.IsValidated = value.Bool } - case machine.FieldStatus: - if value, ok := values[i].(*sql.NullString); !ok { - return fmt.Errorf("unexpected type %T for field status", values[i]) - } else if value.Valid { - m.Status = value.String - } case machine.FieldAuthType: if value, ok := values[i].(*sql.NullString); !ok { return fmt.Errorf("unexpected type %T for field auth_type", values[i]) @@ -295,9 +287,6 @@ func (m *Machine) String() string { builder.WriteString("isValidated=") builder.WriteString(fmt.Sprintf("%v", m.IsValidated)) builder.WriteString(", ") - builder.WriteString("status=") - builder.WriteString(m.Status) - builder.WriteString(", ") builder.WriteString("auth_type=") builder.WriteString(m.AuthType) builder.WriteString(", ") diff --git a/pkg/database/ent/machine/machine.go b/pkg/database/ent/machine/machine.go index 179059edd4d..009e6e19c35 100644 --- a/pkg/database/ent/machine/machine.go +++ b/pkg/database/ent/machine/machine.go @@ -34,8 +34,6 @@ const ( FieldVersion = "version" // FieldIsValidated holds the string denoting the isvalidated field in the database. FieldIsValidated = "is_validated" - // FieldStatus holds the string denoting the status field in the database. - FieldStatus = "status" // FieldAuthType holds the string denoting the auth_type field in the database. FieldAuthType = "auth_type" // FieldOsname holds the string denoting the osname field in the database. @@ -74,7 +72,6 @@ var Columns = []string{ FieldScenarios, FieldVersion, FieldIsValidated, - FieldStatus, FieldAuthType, FieldOsname, FieldOsversion, @@ -168,11 +165,6 @@ func ByIsValidated(opts ...sql.OrderTermOption) OrderOption { return sql.OrderByField(FieldIsValidated, opts...).ToFunc() } -// ByStatus orders the results by the status field. -func ByStatus(opts ...sql.OrderTermOption) OrderOption { - return sql.OrderByField(FieldStatus, opts...).ToFunc() -} - // ByAuthType orders the results by the auth_type field. func ByAuthType(opts ...sql.OrderTermOption) OrderOption { return sql.OrderByField(FieldAuthType, opts...).ToFunc() diff --git a/pkg/database/ent/machine/where.go b/pkg/database/ent/machine/where.go index aca66135f5c..de523510f33 100644 --- a/pkg/database/ent/machine/where.go +++ b/pkg/database/ent/machine/where.go @@ -105,11 +105,6 @@ func IsValidated(v bool) predicate.Machine { return predicate.Machine(sql.FieldEQ(FieldIsValidated, v)) } -// Status applies equality check predicate on the "status" field. It's identical to StatusEQ. -func Status(v string) predicate.Machine { - return predicate.Machine(sql.FieldEQ(FieldStatus, v)) -} - // AuthType applies equality check predicate on the "auth_type" field. It's identical to AuthTypeEQ. func AuthType(v string) predicate.Machine { return predicate.Machine(sql.FieldEQ(FieldAuthType, v)) @@ -665,81 +660,6 @@ func IsValidatedNEQ(v bool) predicate.Machine { return predicate.Machine(sql.FieldNEQ(FieldIsValidated, v)) } -// StatusEQ applies the EQ predicate on the "status" field. -func StatusEQ(v string) predicate.Machine { - return predicate.Machine(sql.FieldEQ(FieldStatus, v)) -} - -// StatusNEQ applies the NEQ predicate on the "status" field. -func StatusNEQ(v string) predicate.Machine { - return predicate.Machine(sql.FieldNEQ(FieldStatus, v)) -} - -// StatusIn applies the In predicate on the "status" field. -func StatusIn(vs ...string) predicate.Machine { - return predicate.Machine(sql.FieldIn(FieldStatus, vs...)) -} - -// StatusNotIn applies the NotIn predicate on the "status" field. -func StatusNotIn(vs ...string) predicate.Machine { - return predicate.Machine(sql.FieldNotIn(FieldStatus, vs...)) -} - -// StatusGT applies the GT predicate on the "status" field. -func StatusGT(v string) predicate.Machine { - return predicate.Machine(sql.FieldGT(FieldStatus, v)) -} - -// StatusGTE applies the GTE predicate on the "status" field. -func StatusGTE(v string) predicate.Machine { - return predicate.Machine(sql.FieldGTE(FieldStatus, v)) -} - -// StatusLT applies the LT predicate on the "status" field. -func StatusLT(v string) predicate.Machine { - return predicate.Machine(sql.FieldLT(FieldStatus, v)) -} - -// StatusLTE applies the LTE predicate on the "status" field. -func StatusLTE(v string) predicate.Machine { - return predicate.Machine(sql.FieldLTE(FieldStatus, v)) -} - -// StatusContains applies the Contains predicate on the "status" field. -func StatusContains(v string) predicate.Machine { - return predicate.Machine(sql.FieldContains(FieldStatus, v)) -} - -// StatusHasPrefix applies the HasPrefix predicate on the "status" field. -func StatusHasPrefix(v string) predicate.Machine { - return predicate.Machine(sql.FieldHasPrefix(FieldStatus, v)) -} - -// StatusHasSuffix applies the HasSuffix predicate on the "status" field. -func StatusHasSuffix(v string) predicate.Machine { - return predicate.Machine(sql.FieldHasSuffix(FieldStatus, v)) -} - -// StatusIsNil applies the IsNil predicate on the "status" field. -func StatusIsNil() predicate.Machine { - return predicate.Machine(sql.FieldIsNull(FieldStatus)) -} - -// StatusNotNil applies the NotNil predicate on the "status" field. -func StatusNotNil() predicate.Machine { - return predicate.Machine(sql.FieldNotNull(FieldStatus)) -} - -// StatusEqualFold applies the EqualFold predicate on the "status" field. -func StatusEqualFold(v string) predicate.Machine { - return predicate.Machine(sql.FieldEqualFold(FieldStatus, v)) -} - -// StatusContainsFold applies the ContainsFold predicate on the "status" field. -func StatusContainsFold(v string) predicate.Machine { - return predicate.Machine(sql.FieldContainsFold(FieldStatus, v)) -} - // AuthTypeEQ applies the EQ predicate on the "auth_type" field. func AuthTypeEQ(v string) predicate.Machine { return predicate.Machine(sql.FieldEQ(FieldAuthType, v)) diff --git a/pkg/database/ent/machine_create.go b/pkg/database/ent/machine_create.go index 4ae0e5a9d1f..a68f7a23966 100644 --- a/pkg/database/ent/machine_create.go +++ b/pkg/database/ent/machine_create.go @@ -138,20 +138,6 @@ func (mc *MachineCreate) SetNillableIsValidated(b *bool) *MachineCreate { return mc } -// SetStatus sets the "status" field. -func (mc *MachineCreate) SetStatus(s string) *MachineCreate { - mc.mutation.SetStatus(s) - return mc -} - -// SetNillableStatus sets the "status" field if the given value is not nil. -func (mc *MachineCreate) SetNillableStatus(s *string) *MachineCreate { - if s != nil { - mc.SetStatus(*s) - } - return mc -} - // SetAuthType sets the "auth_type" field. func (mc *MachineCreate) SetAuthType(s string) *MachineCreate { mc.mutation.SetAuthType(s) @@ -386,10 +372,6 @@ func (mc *MachineCreate) createSpec() (*Machine, *sqlgraph.CreateSpec) { _spec.SetField(machine.FieldIsValidated, field.TypeBool, value) _node.IsValidated = value } - if value, ok := mc.mutation.Status(); ok { - _spec.SetField(machine.FieldStatus, field.TypeString, value) - _node.Status = value - } if value, ok := mc.mutation.AuthType(); ok { _spec.SetField(machine.FieldAuthType, field.TypeString, value) _node.AuthType = value diff --git a/pkg/database/ent/machine_update.go b/pkg/database/ent/machine_update.go index aa0f02542c1..c9a4f0b72ff 100644 --- a/pkg/database/ent/machine_update.go +++ b/pkg/database/ent/machine_update.go @@ -158,26 +158,6 @@ func (mu *MachineUpdate) SetNillableIsValidated(b *bool) *MachineUpdate { return mu } -// SetStatus sets the "status" field. -func (mu *MachineUpdate) SetStatus(s string) *MachineUpdate { - mu.mutation.SetStatus(s) - return mu -} - -// SetNillableStatus sets the "status" field if the given value is not nil. -func (mu *MachineUpdate) SetNillableStatus(s *string) *MachineUpdate { - if s != nil { - mu.SetStatus(*s) - } - return mu -} - -// ClearStatus clears the value of the "status" field. -func (mu *MachineUpdate) ClearStatus() *MachineUpdate { - mu.mutation.ClearStatus() - return mu -} - // SetAuthType sets the "auth_type" field. func (mu *MachineUpdate) SetAuthType(s string) *MachineUpdate { mu.mutation.SetAuthType(s) @@ -411,12 +391,6 @@ func (mu *MachineUpdate) sqlSave(ctx context.Context) (n int, err error) { if value, ok := mu.mutation.IsValidated(); ok { _spec.SetField(machine.FieldIsValidated, field.TypeBool, value) } - if value, ok := mu.mutation.Status(); ok { - _spec.SetField(machine.FieldStatus, field.TypeString, value) - } - if mu.mutation.StatusCleared() { - _spec.ClearField(machine.FieldStatus, field.TypeString) - } if value, ok := mu.mutation.AuthType(); ok { _spec.SetField(machine.FieldAuthType, field.TypeString, value) } @@ -643,26 +617,6 @@ func (muo *MachineUpdateOne) SetNillableIsValidated(b *bool) *MachineUpdateOne { return muo } -// SetStatus sets the "status" field. -func (muo *MachineUpdateOne) SetStatus(s string) *MachineUpdateOne { - muo.mutation.SetStatus(s) - return muo -} - -// SetNillableStatus sets the "status" field if the given value is not nil. -func (muo *MachineUpdateOne) SetNillableStatus(s *string) *MachineUpdateOne { - if s != nil { - muo.SetStatus(*s) - } - return muo -} - -// ClearStatus clears the value of the "status" field. -func (muo *MachineUpdateOne) ClearStatus() *MachineUpdateOne { - muo.mutation.ClearStatus() - return muo -} - // SetAuthType sets the "auth_type" field. func (muo *MachineUpdateOne) SetAuthType(s string) *MachineUpdateOne { muo.mutation.SetAuthType(s) @@ -926,12 +880,6 @@ func (muo *MachineUpdateOne) sqlSave(ctx context.Context) (_node *Machine, err e if value, ok := muo.mutation.IsValidated(); ok { _spec.SetField(machine.FieldIsValidated, field.TypeBool, value) } - if value, ok := muo.mutation.Status(); ok { - _spec.SetField(machine.FieldStatus, field.TypeString, value) - } - if muo.mutation.StatusCleared() { - _spec.ClearField(machine.FieldStatus, field.TypeString) - } if value, ok := muo.mutation.AuthType(); ok { _spec.SetField(machine.FieldAuthType, field.TypeString, value) } diff --git a/pkg/database/ent/migrate/schema.go b/pkg/database/ent/migrate/schema.go index 5c32c472403..5b436830192 100644 --- a/pkg/database/ent/migrate/schema.go +++ b/pkg/database/ent/migrate/schema.go @@ -205,7 +205,6 @@ var ( {Name: "scenarios", Type: field.TypeString, Nullable: true, Size: 100000}, {Name: "version", Type: field.TypeString, Nullable: true}, {Name: "is_validated", Type: field.TypeBool, Default: false}, - {Name: "status", Type: field.TypeString, Nullable: true}, {Name: "auth_type", Type: field.TypeString, Default: "password"}, {Name: "osname", Type: field.TypeString, Nullable: true}, {Name: "osversion", Type: field.TypeString, Nullable: true}, diff --git a/pkg/database/ent/mutation.go b/pkg/database/ent/mutation.go index 8d109ece379..045ecb3c9af 100644 --- a/pkg/database/ent/mutation.go +++ b/pkg/database/ent/mutation.go @@ -6538,7 +6538,6 @@ type MachineMutation struct { scenarios *string version *string isValidated *bool - status *string auth_type *string osname *string osversion *string @@ -7064,55 +7063,6 @@ func (m *MachineMutation) ResetIsValidated() { m.isValidated = nil } -// SetStatus sets the "status" field. -func (m *MachineMutation) SetStatus(s string) { - m.status = &s -} - -// Status returns the value of the "status" field in the mutation. -func (m *MachineMutation) Status() (r string, exists bool) { - v := m.status - if v == nil { - return - } - return *v, true -} - -// OldStatus returns the old "status" field's value of the Machine entity. -// If the Machine object wasn't provided to the builder, the object is fetched from the database. -// An error is returned if the mutation operation is not UpdateOne, or the database query fails. -func (m *MachineMutation) OldStatus(ctx context.Context) (v string, err error) { - if !m.op.Is(OpUpdateOne) { - return v, errors.New("OldStatus is only allowed on UpdateOne operations") - } - if m.id == nil || m.oldValue == nil { - return v, errors.New("OldStatus requires an ID field in the mutation") - } - oldValue, err := m.oldValue(ctx) - if err != nil { - return v, fmt.Errorf("querying old value for OldStatus: %w", err) - } - return oldValue.Status, nil -} - -// ClearStatus clears the value of the "status" field. -func (m *MachineMutation) ClearStatus() { - m.status = nil - m.clearedFields[machine.FieldStatus] = struct{}{} -} - -// StatusCleared returns if the "status" field was cleared in this mutation. -func (m *MachineMutation) StatusCleared() bool { - _, ok := m.clearedFields[machine.FieldStatus] - return ok -} - -// ResetStatus resets all changes to the "status" field. -func (m *MachineMutation) ResetStatus() { - m.status = nil - delete(m.clearedFields, machine.FieldStatus) -} - // SetAuthType sets the "auth_type" field. func (m *MachineMutation) SetAuthType(s string) { m.auth_type = &s @@ -7482,7 +7432,7 @@ func (m *MachineMutation) Type() string { // order to get all numeric fields that were incremented/decremented, call // AddedFields(). func (m *MachineMutation) Fields() []string { - fields := make([]string, 0, 17) + fields := make([]string, 0, 16) if m.created_at != nil { fields = append(fields, machine.FieldCreatedAt) } @@ -7513,9 +7463,6 @@ func (m *MachineMutation) Fields() []string { if m.isValidated != nil { fields = append(fields, machine.FieldIsValidated) } - if m.status != nil { - fields = append(fields, machine.FieldStatus) - } if m.auth_type != nil { fields = append(fields, machine.FieldAuthType) } @@ -7562,8 +7509,6 @@ func (m *MachineMutation) Field(name string) (ent.Value, bool) { return m.Version() case machine.FieldIsValidated: return m.IsValidated() - case machine.FieldStatus: - return m.Status() case machine.FieldAuthType: return m.AuthType() case machine.FieldOsname: @@ -7605,8 +7550,6 @@ func (m *MachineMutation) OldField(ctx context.Context, name string) (ent.Value, return m.OldVersion(ctx) case machine.FieldIsValidated: return m.OldIsValidated(ctx) - case machine.FieldStatus: - return m.OldStatus(ctx) case machine.FieldAuthType: return m.OldAuthType(ctx) case machine.FieldOsname: @@ -7698,13 +7641,6 @@ func (m *MachineMutation) SetField(name string, value ent.Value) error { } m.SetIsValidated(v) return nil - case machine.FieldStatus: - v, ok := value.(string) - if !ok { - return fmt.Errorf("unexpected type %T for field %s", value, name) - } - m.SetStatus(v) - return nil case machine.FieldAuthType: v, ok := value.(string) if !ok { @@ -7789,9 +7725,6 @@ func (m *MachineMutation) ClearedFields() []string { if m.FieldCleared(machine.FieldVersion) { fields = append(fields, machine.FieldVersion) } - if m.FieldCleared(machine.FieldStatus) { - fields = append(fields, machine.FieldStatus) - } if m.FieldCleared(machine.FieldOsname) { fields = append(fields, machine.FieldOsname) } @@ -7833,9 +7766,6 @@ func (m *MachineMutation) ClearField(name string) error { case machine.FieldVersion: m.ClearVersion() return nil - case machine.FieldStatus: - m.ClearStatus() - return nil case machine.FieldOsname: m.ClearOsname() return nil @@ -7889,9 +7819,6 @@ func (m *MachineMutation) ResetField(name string) error { case machine.FieldIsValidated: m.ResetIsValidated() return nil - case machine.FieldStatus: - m.ResetStatus() - return nil case machine.FieldAuthType: m.ResetAuthType() return nil diff --git a/pkg/database/ent/runtime.go b/pkg/database/ent/runtime.go index 8d50d916029..15413490633 100644 --- a/pkg/database/ent/runtime.go +++ b/pkg/database/ent/runtime.go @@ -151,7 +151,7 @@ func init() { // machine.DefaultIsValidated holds the default value on creation for the isValidated field. machine.DefaultIsValidated = machineDescIsValidated.Default.(bool) // machineDescAuthType is the schema descriptor for auth_type field. - machineDescAuthType := machineFields[11].Descriptor() + machineDescAuthType := machineFields[10].Descriptor() // machine.DefaultAuthType holds the default value on creation for the auth_type field. machine.DefaultAuthType = machineDescAuthType.Default.(string) metaFields := schema.Meta{}.Fields() diff --git a/pkg/database/ent/schema/machine.go b/pkg/database/ent/schema/machine.go index 6fdcea2d824..071586f0c84 100644 --- a/pkg/database/ent/schema/machine.go +++ b/pkg/database/ent/schema/machine.go @@ -42,7 +42,6 @@ func (Machine) Fields() []ent.Field { field.String("version").Optional(), field.Bool("isValidated"). Default(false), - field.String("status").Optional(), field.String("auth_type").Default(types.PasswordAuthType).StructTag(`json:"auth_type"`), field.String("osname").Optional(), field.String("osversion").Optional(), From 4635d04b22308b71c061bdd36f88078ea87a2345 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Tue, 2 Jul 2024 22:49:47 +0200 Subject: [PATCH 206/581] lapi detailed metrics: API spec + models (#3100) * lapi detailed metrics: API spec + models * models+db: split hubstate by item type --- pkg/database/ent/machine.go | 2 +- pkg/database/ent/machine_create.go | 2 +- pkg/database/ent/machine_update.go | 4 +- pkg/database/ent/mutation.go | 10 +- pkg/database/ent/schema/machine.go | 2 +- pkg/models/all_metrics.go | 234 +++++++++++++++++++ pkg/models/base_metrics.go | 210 +++++++++++++++++ pkg/models/console_options.go | 27 +++ pkg/models/detailed_metrics.go | 173 ++++++++++++++ pkg/models/hub_item.go | 56 +++++ pkg/models/hub_items.go | 83 +++++++ pkg/models/lapi_metrics.go | 157 +++++++++++++ pkg/models/localapi_swagger.yaml | 220 +++++++++++++++++ pkg/models/log_processors_metrics.go | 219 +++++++++++++++++ pkg/models/metrics_detail_item.go | 158 +++++++++++++ pkg/models/metrics_labels.go | 27 +++ pkg/models/metrics_meta.go | 88 +++++++ pkg/models/o_sversion.go | 88 +++++++ pkg/models/remediation_components_metrics.go | 139 +++++++++++ pkg/models/success_response.go | 73 ++++++ 20 files changed, 1962 insertions(+), 10 deletions(-) create mode 100644 pkg/models/all_metrics.go create mode 100644 pkg/models/base_metrics.go create mode 100644 pkg/models/console_options.go create mode 100644 pkg/models/detailed_metrics.go create mode 100644 pkg/models/hub_item.go create mode 100644 pkg/models/hub_items.go create mode 100644 pkg/models/lapi_metrics.go create mode 100644 pkg/models/log_processors_metrics.go create mode 100644 pkg/models/metrics_detail_item.go create mode 100644 pkg/models/metrics_labels.go create mode 100644 pkg/models/metrics_meta.go create mode 100644 pkg/models/o_sversion.go create mode 100644 pkg/models/remediation_components_metrics.go create mode 100644 pkg/models/success_response.go diff --git a/pkg/database/ent/machine.go b/pkg/database/ent/machine.go index 24c9fdb57e6..76127065791 100644 --- a/pkg/database/ent/machine.go +++ b/pkg/database/ent/machine.go @@ -48,7 +48,7 @@ type Machine struct { // Featureflags holds the value of the "featureflags" field. Featureflags string `json:"featureflags,omitempty"` // Hubstate holds the value of the "hubstate" field. - Hubstate map[string]schema.ItemState `json:"hubstate,omitempty"` + Hubstate map[string][]schema.ItemState `json:"hubstate,omitempty"` // Datasources holds the value of the "datasources" field. Datasources map[string]int64 `json:"datasources,omitempty"` // Edges holds the relations/edges for other nodes in the graph. diff --git a/pkg/database/ent/machine_create.go b/pkg/database/ent/machine_create.go index a68f7a23966..fba8400798c 100644 --- a/pkg/database/ent/machine_create.go +++ b/pkg/database/ent/machine_create.go @@ -195,7 +195,7 @@ func (mc *MachineCreate) SetNillableFeatureflags(s *string) *MachineCreate { } // SetHubstate sets the "hubstate" field. -func (mc *MachineCreate) SetHubstate(ms map[string]schema.ItemState) *MachineCreate { +func (mc *MachineCreate) SetHubstate(ms map[string][]schema.ItemState) *MachineCreate { mc.mutation.SetHubstate(ms) return mc } diff --git a/pkg/database/ent/machine_update.go b/pkg/database/ent/machine_update.go index c9a4f0b72ff..531baabf0d6 100644 --- a/pkg/database/ent/machine_update.go +++ b/pkg/database/ent/machine_update.go @@ -233,7 +233,7 @@ func (mu *MachineUpdate) ClearFeatureflags() *MachineUpdate { } // SetHubstate sets the "hubstate" field. -func (mu *MachineUpdate) SetHubstate(ms map[string]schema.ItemState) *MachineUpdate { +func (mu *MachineUpdate) SetHubstate(ms map[string][]schema.ItemState) *MachineUpdate { mu.mutation.SetHubstate(ms) return mu } @@ -692,7 +692,7 @@ func (muo *MachineUpdateOne) ClearFeatureflags() *MachineUpdateOne { } // SetHubstate sets the "hubstate" field. -func (muo *MachineUpdateOne) SetHubstate(ms map[string]schema.ItemState) *MachineUpdateOne { +func (muo *MachineUpdateOne) SetHubstate(ms map[string][]schema.ItemState) *MachineUpdateOne { muo.mutation.SetHubstate(ms) return muo } diff --git a/pkg/database/ent/mutation.go b/pkg/database/ent/mutation.go index 045ecb3c9af..573e0b5daa9 100644 --- a/pkg/database/ent/mutation.go +++ b/pkg/database/ent/mutation.go @@ -6542,7 +6542,7 @@ type MachineMutation struct { osname *string osversion *string featureflags *string - hubstate *map[string]schema.ItemState + hubstate *map[string][]schema.ItemState datasources *map[string]int64 clearedFields map[string]struct{} alerts map[int]struct{} @@ -7247,12 +7247,12 @@ func (m *MachineMutation) ResetFeatureflags() { } // SetHubstate sets the "hubstate" field. -func (m *MachineMutation) SetHubstate(ms map[string]schema.ItemState) { +func (m *MachineMutation) SetHubstate(ms map[string][]schema.ItemState) { m.hubstate = &ms } // Hubstate returns the value of the "hubstate" field in the mutation. -func (m *MachineMutation) Hubstate() (r map[string]schema.ItemState, exists bool) { +func (m *MachineMutation) Hubstate() (r map[string][]schema.ItemState, exists bool) { v := m.hubstate if v == nil { return @@ -7263,7 +7263,7 @@ func (m *MachineMutation) Hubstate() (r map[string]schema.ItemState, exists bool // OldHubstate returns the old "hubstate" field's value of the Machine entity. // If the Machine object wasn't provided to the builder, the object is fetched from the database. // An error is returned if the mutation operation is not UpdateOne, or the database query fails. -func (m *MachineMutation) OldHubstate(ctx context.Context) (v map[string]schema.ItemState, err error) { +func (m *MachineMutation) OldHubstate(ctx context.Context) (v map[string][]schema.ItemState, err error) { if !m.op.Is(OpUpdateOne) { return v, errors.New("OldHubstate is only allowed on UpdateOne operations") } @@ -7670,7 +7670,7 @@ func (m *MachineMutation) SetField(name string, value ent.Value) error { m.SetFeatureflags(v) return nil case machine.FieldHubstate: - v, ok := value.(map[string]schema.ItemState) + v, ok := value.(map[string][]schema.ItemState) if !ok { return fmt.Errorf("unexpected type %T for field %s", value, name) } diff --git a/pkg/database/ent/schema/machine.go b/pkg/database/ent/schema/machine.go index 071586f0c84..1566cf70b32 100644 --- a/pkg/database/ent/schema/machine.go +++ b/pkg/database/ent/schema/machine.go @@ -46,7 +46,7 @@ func (Machine) Fields() []ent.Field { field.String("osname").Optional(), field.String("osversion").Optional(), field.String("featureflags").Optional(), - field.JSON("hubstate", map[string]ItemState{}).Optional(), + field.JSON("hubstate", map[string][]ItemState{}).Optional(), field.JSON("datasources", map[string]int64{}).Optional(), } } diff --git a/pkg/models/all_metrics.go b/pkg/models/all_metrics.go new file mode 100644 index 00000000000..5865070e8ef --- /dev/null +++ b/pkg/models/all_metrics.go @@ -0,0 +1,234 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "strconv" + + "github.com/go-openapi/errors" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" +) + +// AllMetrics AllMetrics +// +// swagger:model AllMetrics +type AllMetrics struct { + + // lapi + Lapi *LapiMetrics `json:"lapi,omitempty"` + + // log processors metrics + LogProcessors []*LogProcessorsMetrics `json:"log_processors"` + + // remediation components metrics + RemediationComponents []*RemediationComponentsMetrics `json:"remediation_components"` +} + +// Validate validates this all metrics +func (m *AllMetrics) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateLapi(formats); err != nil { + res = append(res, err) + } + + if err := m.validateLogProcessors(formats); err != nil { + res = append(res, err) + } + + if err := m.validateRemediationComponents(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *AllMetrics) validateLapi(formats strfmt.Registry) error { + if swag.IsZero(m.Lapi) { // not required + return nil + } + + if m.Lapi != nil { + if err := m.Lapi.Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("lapi") + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("lapi") + } + return err + } + } + + return nil +} + +func (m *AllMetrics) validateLogProcessors(formats strfmt.Registry) error { + if swag.IsZero(m.LogProcessors) { // not required + return nil + } + + for i := 0; i < len(m.LogProcessors); i++ { + if swag.IsZero(m.LogProcessors[i]) { // not required + continue + } + + if m.LogProcessors[i] != nil { + if err := m.LogProcessors[i].Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("log_processors" + "." + strconv.Itoa(i)) + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("log_processors" + "." + strconv.Itoa(i)) + } + return err + } + } + + } + + return nil +} + +func (m *AllMetrics) validateRemediationComponents(formats strfmt.Registry) error { + if swag.IsZero(m.RemediationComponents) { // not required + return nil + } + + for i := 0; i < len(m.RemediationComponents); i++ { + if swag.IsZero(m.RemediationComponents[i]) { // not required + continue + } + + if m.RemediationComponents[i] != nil { + if err := m.RemediationComponents[i].Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("remediation_components" + "." + strconv.Itoa(i)) + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("remediation_components" + "." + strconv.Itoa(i)) + } + return err + } + } + + } + + return nil +} + +// ContextValidate validate this all metrics based on the context it is used +func (m *AllMetrics) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + var res []error + + if err := m.contextValidateLapi(ctx, formats); err != nil { + res = append(res, err) + } + + if err := m.contextValidateLogProcessors(ctx, formats); err != nil { + res = append(res, err) + } + + if err := m.contextValidateRemediationComponents(ctx, formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *AllMetrics) contextValidateLapi(ctx context.Context, formats strfmt.Registry) error { + + if m.Lapi != nil { + + if swag.IsZero(m.Lapi) { // not required + return nil + } + + if err := m.Lapi.ContextValidate(ctx, formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("lapi") + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("lapi") + } + return err + } + } + + return nil +} + +func (m *AllMetrics) contextValidateLogProcessors(ctx context.Context, formats strfmt.Registry) error { + + for i := 0; i < len(m.LogProcessors); i++ { + + if m.LogProcessors[i] != nil { + + if swag.IsZero(m.LogProcessors[i]) { // not required + return nil + } + + if err := m.LogProcessors[i].ContextValidate(ctx, formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("log_processors" + "." + strconv.Itoa(i)) + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("log_processors" + "." + strconv.Itoa(i)) + } + return err + } + } + + } + + return nil +} + +func (m *AllMetrics) contextValidateRemediationComponents(ctx context.Context, formats strfmt.Registry) error { + + for i := 0; i < len(m.RemediationComponents); i++ { + + if m.RemediationComponents[i] != nil { + + if swag.IsZero(m.RemediationComponents[i]) { // not required + return nil + } + + if err := m.RemediationComponents[i].ContextValidate(ctx, formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("remediation_components" + "." + strconv.Itoa(i)) + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("remediation_components" + "." + strconv.Itoa(i)) + } + return err + } + } + + } + + return nil +} + +// MarshalBinary interface implementation +func (m *AllMetrics) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *AllMetrics) UnmarshalBinary(b []byte) error { + var res AllMetrics + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/pkg/models/base_metrics.go b/pkg/models/base_metrics.go new file mode 100644 index 00000000000..154d9004afe --- /dev/null +++ b/pkg/models/base_metrics.go @@ -0,0 +1,210 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "strconv" + + "github.com/go-openapi/errors" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" + "github.com/go-openapi/validate" +) + +// BaseMetrics BaseMetrics +// +// swagger:model BaseMetrics +type BaseMetrics struct { + + // feature flags (expected to be empty for remediation components) + FeatureFlags []string `json:"feature_flags"` + + // metrics details + Metrics []*DetailedMetrics `json:"metrics"` + + // os + Os *OSversion `json:"os,omitempty"` + + // UTC timestamp of the startup of the software + // Required: true + UtcStartupTimestamp *int64 `json:"utc_startup_timestamp"` + + // version of the remediation component + // Required: true + Version *string `json:"version"` +} + +// Validate validates this base metrics +func (m *BaseMetrics) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateMetrics(formats); err != nil { + res = append(res, err) + } + + if err := m.validateOs(formats); err != nil { + res = append(res, err) + } + + if err := m.validateUtcStartupTimestamp(formats); err != nil { + res = append(res, err) + } + + if err := m.validateVersion(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *BaseMetrics) validateMetrics(formats strfmt.Registry) error { + if swag.IsZero(m.Metrics) { // not required + return nil + } + + for i := 0; i < len(m.Metrics); i++ { + if swag.IsZero(m.Metrics[i]) { // not required + continue + } + + if m.Metrics[i] != nil { + if err := m.Metrics[i].Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("metrics" + "." + strconv.Itoa(i)) + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("metrics" + "." + strconv.Itoa(i)) + } + return err + } + } + + } + + return nil +} + +func (m *BaseMetrics) validateOs(formats strfmt.Registry) error { + if swag.IsZero(m.Os) { // not required + return nil + } + + if m.Os != nil { + if err := m.Os.Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("os") + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("os") + } + return err + } + } + + return nil +} + +func (m *BaseMetrics) validateUtcStartupTimestamp(formats strfmt.Registry) error { + + if err := validate.Required("utc_startup_timestamp", "body", m.UtcStartupTimestamp); err != nil { + return err + } + + return nil +} + +func (m *BaseMetrics) validateVersion(formats strfmt.Registry) error { + + if err := validate.Required("version", "body", m.Version); err != nil { + return err + } + + return nil +} + +// ContextValidate validate this base metrics based on the context it is used +func (m *BaseMetrics) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + var res []error + + if err := m.contextValidateMetrics(ctx, formats); err != nil { + res = append(res, err) + } + + if err := m.contextValidateOs(ctx, formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *BaseMetrics) contextValidateMetrics(ctx context.Context, formats strfmt.Registry) error { + + for i := 0; i < len(m.Metrics); i++ { + + if m.Metrics[i] != nil { + + if swag.IsZero(m.Metrics[i]) { // not required + return nil + } + + if err := m.Metrics[i].ContextValidate(ctx, formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("metrics" + "." + strconv.Itoa(i)) + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("metrics" + "." + strconv.Itoa(i)) + } + return err + } + } + + } + + return nil +} + +func (m *BaseMetrics) contextValidateOs(ctx context.Context, formats strfmt.Registry) error { + + if m.Os != nil { + + if swag.IsZero(m.Os) { // not required + return nil + } + + if err := m.Os.ContextValidate(ctx, formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("os") + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("os") + } + return err + } + } + + return nil +} + +// MarshalBinary interface implementation +func (m *BaseMetrics) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *BaseMetrics) UnmarshalBinary(b []byte) error { + var res BaseMetrics + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/pkg/models/console_options.go b/pkg/models/console_options.go new file mode 100644 index 00000000000..87983ab1762 --- /dev/null +++ b/pkg/models/console_options.go @@ -0,0 +1,27 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + + "github.com/go-openapi/strfmt" +) + +// ConsoleOptions ConsoleOptions +// +// swagger:model ConsoleOptions +type ConsoleOptions []string + +// Validate validates this console options +func (m ConsoleOptions) Validate(formats strfmt.Registry) error { + return nil +} + +// ContextValidate validates this console options based on context it is used +func (m ConsoleOptions) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + return nil +} diff --git a/pkg/models/detailed_metrics.go b/pkg/models/detailed_metrics.go new file mode 100644 index 00000000000..9e605ed8c88 --- /dev/null +++ b/pkg/models/detailed_metrics.go @@ -0,0 +1,173 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "strconv" + + "github.com/go-openapi/errors" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" + "github.com/go-openapi/validate" +) + +// DetailedMetrics DetailedMetrics +// +// swagger:model DetailedMetrics +type DetailedMetrics struct { + + // items + // Required: true + Items []*MetricsDetailItem `json:"items"` + + // meta + // Required: true + Meta *MetricsMeta `json:"meta"` +} + +// Validate validates this detailed metrics +func (m *DetailedMetrics) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateItems(formats); err != nil { + res = append(res, err) + } + + if err := m.validateMeta(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *DetailedMetrics) validateItems(formats strfmt.Registry) error { + + if err := validate.Required("items", "body", m.Items); err != nil { + return err + } + + for i := 0; i < len(m.Items); i++ { + if swag.IsZero(m.Items[i]) { // not required + continue + } + + if m.Items[i] != nil { + if err := m.Items[i].Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("items" + "." + strconv.Itoa(i)) + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("items" + "." + strconv.Itoa(i)) + } + return err + } + } + + } + + return nil +} + +func (m *DetailedMetrics) validateMeta(formats strfmt.Registry) error { + + if err := validate.Required("meta", "body", m.Meta); err != nil { + return err + } + + if m.Meta != nil { + if err := m.Meta.Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("meta") + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("meta") + } + return err + } + } + + return nil +} + +// ContextValidate validate this detailed metrics based on the context it is used +func (m *DetailedMetrics) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + var res []error + + if err := m.contextValidateItems(ctx, formats); err != nil { + res = append(res, err) + } + + if err := m.contextValidateMeta(ctx, formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *DetailedMetrics) contextValidateItems(ctx context.Context, formats strfmt.Registry) error { + + for i := 0; i < len(m.Items); i++ { + + if m.Items[i] != nil { + + if swag.IsZero(m.Items[i]) { // not required + return nil + } + + if err := m.Items[i].ContextValidate(ctx, formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("items" + "." + strconv.Itoa(i)) + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("items" + "." + strconv.Itoa(i)) + } + return err + } + } + + } + + return nil +} + +func (m *DetailedMetrics) contextValidateMeta(ctx context.Context, formats strfmt.Registry) error { + + if m.Meta != nil { + + if err := m.Meta.ContextValidate(ctx, formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("meta") + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("meta") + } + return err + } + } + + return nil +} + +// MarshalBinary interface implementation +func (m *DetailedMetrics) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *DetailedMetrics) UnmarshalBinary(b []byte) error { + var res DetailedMetrics + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/pkg/models/hub_item.go b/pkg/models/hub_item.go new file mode 100644 index 00000000000..c2bac3702c2 --- /dev/null +++ b/pkg/models/hub_item.go @@ -0,0 +1,56 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" +) + +// HubItem HubItem +// +// swagger:model HubItem +type HubItem struct { + + // name of the hub item + Name string `json:"name,omitempty"` + + // status of the hub item (official, custom, tainted, etc.) + Status string `json:"status,omitempty"` + + // version of the hub item + Version string `json:"version,omitempty"` +} + +// Validate validates this hub item +func (m *HubItem) Validate(formats strfmt.Registry) error { + return nil +} + +// ContextValidate validates this hub item based on context it is used +func (m *HubItem) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + return nil +} + +// MarshalBinary interface implementation +func (m *HubItem) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *HubItem) UnmarshalBinary(b []byte) error { + var res HubItem + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/pkg/models/hub_items.go b/pkg/models/hub_items.go new file mode 100644 index 00000000000..82388d5b97e --- /dev/null +++ b/pkg/models/hub_items.go @@ -0,0 +1,83 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "strconv" + + "github.com/go-openapi/errors" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" + "github.com/go-openapi/validate" +) + +// HubItems HubItems +// +// swagger:model HubItems +type HubItems map[string][]HubItem + +// Validate validates this hub items +func (m HubItems) Validate(formats strfmt.Registry) error { + var res []error + + for k := range m { + + if err := validate.Required(k, "body", m[k]); err != nil { + return err + } + + for i := 0; i < len(m[k]); i++ { + + if err := m[k][i].Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName(k + "." + strconv.Itoa(i)) + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName(k + "." + strconv.Itoa(i)) + } + return err + } + + } + + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +// ContextValidate validate this hub items based on the context it is used +func (m HubItems) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + var res []error + + for k := range m { + + for i := 0; i < len(m[k]); i++ { + + if swag.IsZero(m[k][i]) { // not required + return nil + } + + if err := m[k][i].ContextValidate(ctx, formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName(k + "." + strconv.Itoa(i)) + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName(k + "." + strconv.Itoa(i)) + } + return err + } + + } + + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/pkg/models/lapi_metrics.go b/pkg/models/lapi_metrics.go new file mode 100644 index 00000000000..b56d92ef1f8 --- /dev/null +++ b/pkg/models/lapi_metrics.go @@ -0,0 +1,157 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + + "github.com/go-openapi/errors" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" +) + +// LapiMetrics LapiMetrics +// +// swagger:model LapiMetrics +type LapiMetrics struct { + BaseMetrics + + // console options + ConsoleOptions ConsoleOptions `json:"console_options,omitempty"` +} + +// UnmarshalJSON unmarshals this object from a JSON structure +func (m *LapiMetrics) UnmarshalJSON(raw []byte) error { + // AO0 + var aO0 BaseMetrics + if err := swag.ReadJSON(raw, &aO0); err != nil { + return err + } + m.BaseMetrics = aO0 + + // AO1 + var dataAO1 struct { + ConsoleOptions ConsoleOptions `json:"console_options,omitempty"` + } + if err := swag.ReadJSON(raw, &dataAO1); err != nil { + return err + } + + m.ConsoleOptions = dataAO1.ConsoleOptions + + return nil +} + +// MarshalJSON marshals this object to a JSON structure +func (m LapiMetrics) MarshalJSON() ([]byte, error) { + _parts := make([][]byte, 0, 2) + + aO0, err := swag.WriteJSON(m.BaseMetrics) + if err != nil { + return nil, err + } + _parts = append(_parts, aO0) + var dataAO1 struct { + ConsoleOptions ConsoleOptions `json:"console_options,omitempty"` + } + + dataAO1.ConsoleOptions = m.ConsoleOptions + + jsonDataAO1, errAO1 := swag.WriteJSON(dataAO1) + if errAO1 != nil { + return nil, errAO1 + } + _parts = append(_parts, jsonDataAO1) + return swag.ConcatJSON(_parts...), nil +} + +// Validate validates this lapi metrics +func (m *LapiMetrics) Validate(formats strfmt.Registry) error { + var res []error + + // validation for a type composition with BaseMetrics + if err := m.BaseMetrics.Validate(formats); err != nil { + res = append(res, err) + } + + if err := m.validateConsoleOptions(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *LapiMetrics) validateConsoleOptions(formats strfmt.Registry) error { + + if swag.IsZero(m.ConsoleOptions) { // not required + return nil + } + + if err := m.ConsoleOptions.Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("console_options") + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("console_options") + } + return err + } + + return nil +} + +// ContextValidate validate this lapi metrics based on the context it is used +func (m *LapiMetrics) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + var res []error + + // validation for a type composition with BaseMetrics + if err := m.BaseMetrics.ContextValidate(ctx, formats); err != nil { + res = append(res, err) + } + + if err := m.contextValidateConsoleOptions(ctx, formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *LapiMetrics) contextValidateConsoleOptions(ctx context.Context, formats strfmt.Registry) error { + + if err := m.ConsoleOptions.ContextValidate(ctx, formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("console_options") + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("console_options") + } + return err + } + + return nil +} + +// MarshalBinary interface implementation +func (m *LapiMetrics) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *LapiMetrics) UnmarshalBinary(b []byte) error { + var res LapiMetrics + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/pkg/models/localapi_swagger.yaml b/pkg/models/localapi_swagger.yaml index d167da9b6af..d726f452a16 100644 --- a/pkg/models/localapi_swagger.yaml +++ b/pkg/models/localapi_swagger.yaml @@ -684,6 +684,36 @@ paths: $ref: "#/definitions/ErrorResponse" security: - JWTAuthorizer: [] + /usage-metrics: + post: + description: Post usage metrics from a LP or a bouncer + summary: Send usage metrics + tags: + - Remediation component + - watchers + operationId: usage-metrics + produces: + - application/json + parameters: + - name: body + in: body + required: true + schema: + $ref: '#/definitions/AllMetrics' + description: 'All metrics' + responses: + '200': + description: successful operation + schema: + $ref: '#/definitions/SuccessResponse' + headers: {} + '400': + description: "400 response" + schema: + $ref: "#/definitions/ErrorResponse" + security: + - APIKeyAuthorizer: [] + - JWTAuthorizer: [] definitions: WatcherRegistrationRequest: title: WatcherRegistrationRequest @@ -994,6 +1024,186 @@ definitions: type: string value: type: string + RemediationComponentsMetrics: + title: RemediationComponentsMetrics + type: object + allOf: + - $ref: '#/definitions/BaseMetrics' + - properties: + type: + type: string + description: type of the remediation component + name: + type: string + description: name of the remediation component + last_pull: + type: integer + description: last pull date + LogProcessorsMetrics: + title: LogProcessorsMetrics + type: object + allOf: + - $ref: '#/definitions/BaseMetrics' + - properties: + hub_items: + $ref: '#/definitions/HubItems' + datasources: + type: object + description: Number of datasources per type + additionalProperties: + type: integer + name: + type: string + description: name of the log processor + last_push: + type: integer + description: last push date + last_update: + type: integer + description: last update date + required: + - hub_items + - datasources + LapiMetrics: + title: LapiMetrics + type: object + allOf: + - $ref: '#/definitions/BaseMetrics' + - properties: + console_options: + $ref: '#/definitions/ConsoleOptions' + AllMetrics: + title: AllMetrics + type: object + properties: + remediation_components: + type: array + items: + $ref: '#/definitions/RemediationComponentsMetrics' + description: remediation components metrics + log_processors: + type: array + items: + $ref: '#/definitions/LogProcessorsMetrics' + description: log processors metrics + lapi: + $ref: '#/definitions/LapiMetrics' + BaseMetrics: + title: BaseMetrics + type: object + properties: + version: + type: string + description: version of the remediation component + os: + $ref: '#/definitions/OSversion' + metrics: + type: array + items: + $ref: '#/definitions/DetailedMetrics' + description: metrics details + feature_flags: + type: array + items: + type: string + description: feature flags (expected to be empty for remediation components) + utc_startup_timestamp: + type: integer + description: UTC timestamp of the startup of the software + required: + - version + - utc_startup_timestamp + OSversion: + title: OSversion + type: object + properties: + name: + type: string + description: name of the OS + version: + type: string + description: version of the OS + required: + - name + - version + DetailedMetrics: + type: object + title: DetailedMetrics + properties: + items: + type: array + items: + $ref: '#/definitions/MetricsDetailItem' + meta: + $ref: '#/definitions/MetricsMeta' + required: + - meta + - items + MetricsDetailItem: + title: MetricsDetailItem + type: object + properties: + name: + type: string + description: name of the metric + value: + type: number + description: value of the metric + unit: + type: string + description: unit of the metric + labels: + $ref: '#/definitions/MetricsLabels' + description: labels of the metric + required: + - name + - value + - unit + MetricsMeta: + title: MetricsMeta + type: object + properties: + window_size_seconds: + type: integer + description: Size, in seconds, of the window used to compute the metric + utc_now_timestamp: + type: integer + description: UTC timestamp of the current time + required: + - window_size_seconds + - utc_now_timestamp + MetricsLabels: + title: MetricsLabels + type: object + additionalProperties: + type: string + description: label of the metric + ConsoleOptions: + title: ConsoleOptions + type: array + items: + type: string + description: enabled console options + HubItems: + title: HubItems + type: object + additionalProperties: + type: array + items: + $ref: '#/definitions/HubItem' + HubItem: + title: HubItem + type: object + properties: + name: + type: string + description: name of the hub item + version: + type: string + description: version of the hub item + status: + type: string + description: status of the hub item (official, custom, tainted, etc.) ErrorResponse: type: "object" required: @@ -1007,6 +1217,16 @@ definitions: description: "more detail on individual errors" title: "error response" description: "error response return by the API" + SuccessResponse: + type: "object" + required: + - "message" + properties: + message: + type: "string" + description: "message" + title: "success response" + description: "success response return by the API" tags: - name: Remediation component description: 'Operations about decisions : bans, captcha, rate-limit etc.' diff --git a/pkg/models/log_processors_metrics.go b/pkg/models/log_processors_metrics.go new file mode 100644 index 00000000000..05b688fb994 --- /dev/null +++ b/pkg/models/log_processors_metrics.go @@ -0,0 +1,219 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + + "github.com/go-openapi/errors" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" + "github.com/go-openapi/validate" +) + +// LogProcessorsMetrics LogProcessorsMetrics +// +// swagger:model LogProcessorsMetrics +type LogProcessorsMetrics struct { + BaseMetrics + + // Number of datasources per type + // Required: true + Datasources map[string]int64 `json:"datasources"` + + // hub items + // Required: true + HubItems HubItems `json:"hub_items"` + + // last push date + LastPush int64 `json:"last_push,omitempty"` + + // last update date + LastUpdate int64 `json:"last_update,omitempty"` + + // name of the log processor + Name string `json:"name,omitempty"` +} + +// UnmarshalJSON unmarshals this object from a JSON structure +func (m *LogProcessorsMetrics) UnmarshalJSON(raw []byte) error { + // AO0 + var aO0 BaseMetrics + if err := swag.ReadJSON(raw, &aO0); err != nil { + return err + } + m.BaseMetrics = aO0 + + // AO1 + var dataAO1 struct { + Datasources map[string]int64 `json:"datasources"` + + HubItems HubItems `json:"hub_items"` + + LastPush int64 `json:"last_push,omitempty"` + + LastUpdate int64 `json:"last_update,omitempty"` + + Name string `json:"name,omitempty"` + } + if err := swag.ReadJSON(raw, &dataAO1); err != nil { + return err + } + + m.Datasources = dataAO1.Datasources + + m.HubItems = dataAO1.HubItems + + m.LastPush = dataAO1.LastPush + + m.LastUpdate = dataAO1.LastUpdate + + m.Name = dataAO1.Name + + return nil +} + +// MarshalJSON marshals this object to a JSON structure +func (m LogProcessorsMetrics) MarshalJSON() ([]byte, error) { + _parts := make([][]byte, 0, 2) + + aO0, err := swag.WriteJSON(m.BaseMetrics) + if err != nil { + return nil, err + } + _parts = append(_parts, aO0) + var dataAO1 struct { + Datasources map[string]int64 `json:"datasources"` + + HubItems HubItems `json:"hub_items"` + + LastPush int64 `json:"last_push,omitempty"` + + LastUpdate int64 `json:"last_update,omitempty"` + + Name string `json:"name,omitempty"` + } + + dataAO1.Datasources = m.Datasources + + dataAO1.HubItems = m.HubItems + + dataAO1.LastPush = m.LastPush + + dataAO1.LastUpdate = m.LastUpdate + + dataAO1.Name = m.Name + + jsonDataAO1, errAO1 := swag.WriteJSON(dataAO1) + if errAO1 != nil { + return nil, errAO1 + } + _parts = append(_parts, jsonDataAO1) + return swag.ConcatJSON(_parts...), nil +} + +// Validate validates this log processors metrics +func (m *LogProcessorsMetrics) Validate(formats strfmt.Registry) error { + var res []error + + // validation for a type composition with BaseMetrics + if err := m.BaseMetrics.Validate(formats); err != nil { + res = append(res, err) + } + + if err := m.validateDatasources(formats); err != nil { + res = append(res, err) + } + + if err := m.validateHubItems(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *LogProcessorsMetrics) validateDatasources(formats strfmt.Registry) error { + + if err := validate.Required("datasources", "body", m.Datasources); err != nil { + return err + } + + return nil +} + +func (m *LogProcessorsMetrics) validateHubItems(formats strfmt.Registry) error { + + if err := validate.Required("hub_items", "body", m.HubItems); err != nil { + return err + } + + if m.HubItems != nil { + if err := m.HubItems.Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("hub_items") + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("hub_items") + } + return err + } + } + + return nil +} + +// ContextValidate validate this log processors metrics based on the context it is used +func (m *LogProcessorsMetrics) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + var res []error + + // validation for a type composition with BaseMetrics + if err := m.BaseMetrics.ContextValidate(ctx, formats); err != nil { + res = append(res, err) + } + + if err := m.contextValidateHubItems(ctx, formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *LogProcessorsMetrics) contextValidateHubItems(ctx context.Context, formats strfmt.Registry) error { + + if err := m.HubItems.ContextValidate(ctx, formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("hub_items") + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("hub_items") + } + return err + } + + return nil +} + +// MarshalBinary interface implementation +func (m *LogProcessorsMetrics) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *LogProcessorsMetrics) UnmarshalBinary(b []byte) error { + var res LogProcessorsMetrics + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/pkg/models/metrics_detail_item.go b/pkg/models/metrics_detail_item.go new file mode 100644 index 00000000000..889f7e263d2 --- /dev/null +++ b/pkg/models/metrics_detail_item.go @@ -0,0 +1,158 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + + "github.com/go-openapi/errors" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" + "github.com/go-openapi/validate" +) + +// MetricsDetailItem MetricsDetailItem +// +// swagger:model MetricsDetailItem +type MetricsDetailItem struct { + + // labels of the metric + Labels MetricsLabels `json:"labels,omitempty"` + + // name of the metric + // Required: true + Name *string `json:"name"` + + // unit of the metric + // Required: true + Unit *string `json:"unit"` + + // value of the metric + // Required: true + Value *float64 `json:"value"` +} + +// Validate validates this metrics detail item +func (m *MetricsDetailItem) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateLabels(formats); err != nil { + res = append(res, err) + } + + if err := m.validateName(formats); err != nil { + res = append(res, err) + } + + if err := m.validateUnit(formats); err != nil { + res = append(res, err) + } + + if err := m.validateValue(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *MetricsDetailItem) validateLabels(formats strfmt.Registry) error { + if swag.IsZero(m.Labels) { // not required + return nil + } + + if m.Labels != nil { + if err := m.Labels.Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("labels") + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("labels") + } + return err + } + } + + return nil +} + +func (m *MetricsDetailItem) validateName(formats strfmt.Registry) error { + + if err := validate.Required("name", "body", m.Name); err != nil { + return err + } + + return nil +} + +func (m *MetricsDetailItem) validateUnit(formats strfmt.Registry) error { + + if err := validate.Required("unit", "body", m.Unit); err != nil { + return err + } + + return nil +} + +func (m *MetricsDetailItem) validateValue(formats strfmt.Registry) error { + + if err := validate.Required("value", "body", m.Value); err != nil { + return err + } + + return nil +} + +// ContextValidate validate this metrics detail item based on the context it is used +func (m *MetricsDetailItem) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + var res []error + + if err := m.contextValidateLabels(ctx, formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *MetricsDetailItem) contextValidateLabels(ctx context.Context, formats strfmt.Registry) error { + + if swag.IsZero(m.Labels) { // not required + return nil + } + + if err := m.Labels.ContextValidate(ctx, formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("labels") + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("labels") + } + return err + } + + return nil +} + +// MarshalBinary interface implementation +func (m *MetricsDetailItem) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *MetricsDetailItem) UnmarshalBinary(b []byte) error { + var res MetricsDetailItem + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/pkg/models/metrics_labels.go b/pkg/models/metrics_labels.go new file mode 100644 index 00000000000..d807a88bc8d --- /dev/null +++ b/pkg/models/metrics_labels.go @@ -0,0 +1,27 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + + "github.com/go-openapi/strfmt" +) + +// MetricsLabels MetricsLabels +// +// swagger:model MetricsLabels +type MetricsLabels map[string]string + +// Validate validates this metrics labels +func (m MetricsLabels) Validate(formats strfmt.Registry) error { + return nil +} + +// ContextValidate validates this metrics labels based on context it is used +func (m MetricsLabels) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + return nil +} diff --git a/pkg/models/metrics_meta.go b/pkg/models/metrics_meta.go new file mode 100644 index 00000000000..b021617e4d9 --- /dev/null +++ b/pkg/models/metrics_meta.go @@ -0,0 +1,88 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + + "github.com/go-openapi/errors" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" + "github.com/go-openapi/validate" +) + +// MetricsMeta MetricsMeta +// +// swagger:model MetricsMeta +type MetricsMeta struct { + + // UTC timestamp of the current time + // Required: true + UtcNowTimestamp *int64 `json:"utc_now_timestamp"` + + // Size, in seconds, of the window used to compute the metric + // Required: true + WindowSizeSeconds *int64 `json:"window_size_seconds"` +} + +// Validate validates this metrics meta +func (m *MetricsMeta) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateUtcNowTimestamp(formats); err != nil { + res = append(res, err) + } + + if err := m.validateWindowSizeSeconds(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *MetricsMeta) validateUtcNowTimestamp(formats strfmt.Registry) error { + + if err := validate.Required("utc_now_timestamp", "body", m.UtcNowTimestamp); err != nil { + return err + } + + return nil +} + +func (m *MetricsMeta) validateWindowSizeSeconds(formats strfmt.Registry) error { + + if err := validate.Required("window_size_seconds", "body", m.WindowSizeSeconds); err != nil { + return err + } + + return nil +} + +// ContextValidate validates this metrics meta based on context it is used +func (m *MetricsMeta) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + return nil +} + +// MarshalBinary interface implementation +func (m *MetricsMeta) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *MetricsMeta) UnmarshalBinary(b []byte) error { + var res MetricsMeta + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/pkg/models/o_sversion.go b/pkg/models/o_sversion.go new file mode 100644 index 00000000000..eb670409c90 --- /dev/null +++ b/pkg/models/o_sversion.go @@ -0,0 +1,88 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + + "github.com/go-openapi/errors" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" + "github.com/go-openapi/validate" +) + +// OSversion OSversion +// +// swagger:model OSversion +type OSversion struct { + + // name of the OS + // Required: true + Name *string `json:"name"` + + // version of the OS + // Required: true + Version *string `json:"version"` +} + +// Validate validates this o sversion +func (m *OSversion) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateName(formats); err != nil { + res = append(res, err) + } + + if err := m.validateVersion(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *OSversion) validateName(formats strfmt.Registry) error { + + if err := validate.Required("name", "body", m.Name); err != nil { + return err + } + + return nil +} + +func (m *OSversion) validateVersion(formats strfmt.Registry) error { + + if err := validate.Required("version", "body", m.Version); err != nil { + return err + } + + return nil +} + +// ContextValidate validates this o sversion based on context it is used +func (m *OSversion) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + return nil +} + +// MarshalBinary interface implementation +func (m *OSversion) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *OSversion) UnmarshalBinary(b []byte) error { + var res OSversion + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/pkg/models/remediation_components_metrics.go b/pkg/models/remediation_components_metrics.go new file mode 100644 index 00000000000..ba3845d872a --- /dev/null +++ b/pkg/models/remediation_components_metrics.go @@ -0,0 +1,139 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + + "github.com/go-openapi/errors" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" +) + +// RemediationComponentsMetrics RemediationComponentsMetrics +// +// swagger:model RemediationComponentsMetrics +type RemediationComponentsMetrics struct { + BaseMetrics + + // last pull date + LastPull int64 `json:"last_pull,omitempty"` + + // name of the remediation component + Name string `json:"name,omitempty"` + + // type of the remediation component + Type string `json:"type,omitempty"` +} + +// UnmarshalJSON unmarshals this object from a JSON structure +func (m *RemediationComponentsMetrics) UnmarshalJSON(raw []byte) error { + // AO0 + var aO0 BaseMetrics + if err := swag.ReadJSON(raw, &aO0); err != nil { + return err + } + m.BaseMetrics = aO0 + + // AO1 + var dataAO1 struct { + LastPull int64 `json:"last_pull,omitempty"` + + Name string `json:"name,omitempty"` + + Type string `json:"type,omitempty"` + } + if err := swag.ReadJSON(raw, &dataAO1); err != nil { + return err + } + + m.LastPull = dataAO1.LastPull + + m.Name = dataAO1.Name + + m.Type = dataAO1.Type + + return nil +} + +// MarshalJSON marshals this object to a JSON structure +func (m RemediationComponentsMetrics) MarshalJSON() ([]byte, error) { + _parts := make([][]byte, 0, 2) + + aO0, err := swag.WriteJSON(m.BaseMetrics) + if err != nil { + return nil, err + } + _parts = append(_parts, aO0) + var dataAO1 struct { + LastPull int64 `json:"last_pull,omitempty"` + + Name string `json:"name,omitempty"` + + Type string `json:"type,omitempty"` + } + + dataAO1.LastPull = m.LastPull + + dataAO1.Name = m.Name + + dataAO1.Type = m.Type + + jsonDataAO1, errAO1 := swag.WriteJSON(dataAO1) + if errAO1 != nil { + return nil, errAO1 + } + _parts = append(_parts, jsonDataAO1) + return swag.ConcatJSON(_parts...), nil +} + +// Validate validates this remediation components metrics +func (m *RemediationComponentsMetrics) Validate(formats strfmt.Registry) error { + var res []error + + // validation for a type composition with BaseMetrics + if err := m.BaseMetrics.Validate(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +// ContextValidate validate this remediation components metrics based on the context it is used +func (m *RemediationComponentsMetrics) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + var res []error + + // validation for a type composition with BaseMetrics + if err := m.BaseMetrics.ContextValidate(ctx, formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +// MarshalBinary interface implementation +func (m *RemediationComponentsMetrics) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *RemediationComponentsMetrics) UnmarshalBinary(b []byte) error { + var res RemediationComponentsMetrics + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/pkg/models/success_response.go b/pkg/models/success_response.go new file mode 100644 index 00000000000..e8fc281c090 --- /dev/null +++ b/pkg/models/success_response.go @@ -0,0 +1,73 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + + "github.com/go-openapi/errors" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" + "github.com/go-openapi/validate" +) + +// SuccessResponse success response +// +// success response return by the API +// +// swagger:model SuccessResponse +type SuccessResponse struct { + + // message + // Required: true + Message *string `json:"message"` +} + +// Validate validates this success response +func (m *SuccessResponse) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateMessage(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *SuccessResponse) validateMessage(formats strfmt.Registry) error { + + if err := validate.Required("message", "body", m.Message); err != nil { + return err + } + + return nil +} + +// ContextValidate validates this success response based on context it is used +func (m *SuccessResponse) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + return nil +} + +// MarshalBinary interface implementation +func (m *SuccessResponse) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *SuccessResponse) UnmarshalBinary(b []byte) error { + var res SuccessResponse + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} From 9cebcf96b4f8465c29e69b53cd9b872d5d99223e Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Wed, 3 Jul 2024 17:20:20 +0200 Subject: [PATCH 207/581] command cscli [machines|bouncers] inspect (#3103) * command cscli [machines|bouncers] inspect * lint --- .golangci.yml | 2 + cmd/crowdsec-cli/bouncers.go | 217 ++++++++++++++++---- cmd/crowdsec-cli/bouncers_table.go | 33 ---- cmd/crowdsec-cli/machines.go | 304 +++++++++++++++++++++++++---- cmd/crowdsec-cli/machines_table.go | 33 ---- cmd/crowdsec-cli/support.go | 22 +-- pkg/database/ent/helpers.go | 55 ++++++ pkg/database/ent/schema/machine.go | 1 + test/bats/30_machines.bats | 7 +- 9 files changed, 527 insertions(+), 147 deletions(-) delete mode 100644 cmd/crowdsec-cli/bouncers_table.go delete mode 100644 cmd/crowdsec-cli/machines_table.go create mode 100644 pkg/database/ent/helpers.go diff --git a/.golangci.yml b/.golangci.yml index 66c720381de..855c73f9af3 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -143,6 +143,8 @@ linters-settings: disabled: true - name: struct-tag disabled: true + - name: redundant-import-alias + disabled: true - name: time-equal disabled: true - name: var-naming diff --git a/cmd/crowdsec-cli/bouncers.go b/cmd/crowdsec-cli/bouncers.go index 3da9575146e..0673473d72a 100644 --- a/cmd/crowdsec-cli/bouncers.go +++ b/cmd/crowdsec-cli/bouncers.go @@ -5,6 +5,7 @@ import ( "encoding/json" "errors" "fmt" + "io" "os" "slices" "strings" @@ -12,12 +13,16 @@ import ( "github.com/AlecAivazis/survey/v2" "github.com/fatih/color" + "github.com/jedib0t/go-pretty/v6/table" log "github.com/sirupsen/logrus" "github.com/spf13/cobra" "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/require" middlewares "github.com/crowdsecurity/crowdsec/pkg/apiserver/middlewares/v1" "github.com/crowdsecurity/crowdsec/pkg/database" + "github.com/crowdsecurity/crowdsec/pkg/database/ent" + "github.com/crowdsecurity/crowdsec/pkg/database/ent/bouncer" + "github.com/crowdsecurity/crowdsec/pkg/emoji" "github.com/crowdsecurity/crowdsec/pkg/types" ) @@ -79,13 +84,92 @@ Note: This command requires database direct access, so is intended to be run on cmd.AddCommand(cli.newAddCmd()) cmd.AddCommand(cli.newDeleteCmd()) cmd.AddCommand(cli.newPruneCmd()) + cmd.AddCommand(cli.newInspectCmd()) return cmd } -func (cli *cliBouncers) list() error { - out := color.Output +func (cli *cliBouncers) listHuman(out io.Writer, bouncers ent.Bouncers) { + t := newLightTable(out).Writer + t.AppendHeader(table.Row{"Name", "IP Address", "Valid", "Last API pull", "Type", "Version", "Auth Type"}) + + for _, b := range bouncers { + revoked := emoji.CheckMark + if b.Revoked { + revoked = emoji.Prohibited + } + + lastPull := "" + if b.LastPull != nil { + lastPull = b.LastPull.Format(time.RFC3339) + } + + t.AppendRow(table.Row{b.Name, b.IPAddress, revoked, lastPull, b.Type, b.Version, b.AuthType}) + } + + fmt.Fprintln(out, t.Render()) +} + +// bouncerInfo contains only the data we want for inspect/list +type bouncerInfo struct { + CreatedAt time.Time `json:"created_at"` + UpdatedAt time.Time `json:"updated_at"` + Name string `json:"name"` + Revoked bool `json:"revoked"` + IPAddress string `json:"ip_address"` + Type string `json:"type"` + Version string `json:"version"` + LastPull *time.Time `json:"last_pull"` + AuthType string `json:"auth_type"` + OS string `json:"os,omitempty"` + Featureflags []string `json:"featureflags,omitempty"` +} + +func newBouncerInfo(b *ent.Bouncer) bouncerInfo { + return bouncerInfo{ + CreatedAt: b.CreatedAt, + UpdatedAt: b.UpdatedAt, + Name: b.Name, + Revoked: b.Revoked, + IPAddress: b.IPAddress, + Type: b.Type, + Version: b.Version, + LastPull: b.LastPull, + AuthType: b.AuthType, + OS: b.GetOSNameAndVersion(), + Featureflags: b.GetFeatureFlagList(), + } +} + +func (cli *cliBouncers) listCSV(out io.Writer, bouncers ent.Bouncers) error { + csvwriter := csv.NewWriter(out) + + if err := csvwriter.Write([]string{"name", "ip", "revoked", "last_pull", "type", "version", "auth_type"}); err != nil { + return fmt.Errorf("failed to write raw header: %w", err) + } + + for _, b := range bouncers { + valid := "validated" + if b.Revoked { + valid = "pending" + } + + lastPull := "" + if b.LastPull != nil { + lastPull = b.LastPull.Format(time.RFC3339) + } + if err := csvwriter.Write([]string{b.Name, b.IPAddress, valid, lastPull, b.Type, b.Version, b.AuthType}); err != nil { + return fmt.Errorf("failed to write raw: %w", err) + } + } + + csvwriter.Flush() + return nil +} + + +func (cli *cliBouncers) list(out io.Writer) error { bouncers, err := cli.db.ListBouncers() if err != nil { return fmt.Errorf("unable to list bouncers: %w", err) @@ -93,40 +177,23 @@ func (cli *cliBouncers) list() error { switch cli.cfg().Cscli.Output { case "human": - getBouncersTable(out, bouncers) + cli.listHuman(out, bouncers) case "json": + info := make([]bouncerInfo, 0, len(bouncers)) + for _, b := range bouncers { + info = append(info, newBouncerInfo(b)) + } + enc := json.NewEncoder(out) enc.SetIndent("", " ") - if err := enc.Encode(bouncers); err != nil { - return fmt.Errorf("failed to marshal: %w", err) + if err := enc.Encode(info); err != nil { + return errors.New("failed to marshal") } return nil case "raw": - csvwriter := csv.NewWriter(out) - - if err := csvwriter.Write([]string{"name", "ip", "revoked", "last_pull", "type", "version", "auth_type"}); err != nil { - return fmt.Errorf("failed to write raw header: %w", err) - } - - for _, b := range bouncers { - valid := "validated" - if b.Revoked { - valid = "pending" - } - - lastPull := "" - if b.LastPull != nil { - lastPull = b.LastPull.Format(time.RFC3339) - } - - if err := csvwriter.Write([]string{b.Name, b.IPAddress, valid, lastPull, b.Type, b.Version, b.AuthType}); err != nil { - return fmt.Errorf("failed to write raw: %w", err) - } - } - - csvwriter.Flush() + return cli.listCSV(out, bouncers) } return nil @@ -140,7 +207,7 @@ func (cli *cliBouncers) newListCmd() *cobra.Command { Args: cobra.ExactArgs(0), DisableAutoGenTag: true, RunE: func(_ *cobra.Command, _ []string) error { - return cli.list() + return cli.list(color.Output) }, } @@ -206,13 +273,14 @@ cscli bouncers add MyBouncerName --key `, return cmd } -func (cli *cliBouncers) deleteValid(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { - // need to load config and db because PersistentPreRunE is not called for completions - +// validBouncerID returns a list of bouncer IDs for command completion +func (cli *cliBouncers) validBouncerID(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { var err error cfg := cli.cfg() + // need to load config and db because PersistentPreRunE is not called for completions + if err = require.LAPI(cfg); err != nil { cobra.CompError("unable to list bouncers " + err.Error()) return nil, cobra.ShellCompDirectiveNoFileComp @@ -261,7 +329,7 @@ func (cli *cliBouncers) newDeleteCmd() *cobra.Command { Args: cobra.MinimumNArgs(1), Aliases: []string{"remove"}, DisableAutoGenTag: true, - ValidArgsFunction: cli.deleteValid, + ValidArgsFunction: cli.validBouncerID, RunE: func(_ *cobra.Command, args []string) error { return cli.delete(args) }, @@ -292,7 +360,7 @@ func (cli *cliBouncers) prune(duration time.Duration, force bool) error { return nil } - getBouncersTable(color.Output, bouncers) + cli.listHuman(color.Output, bouncers) if !force { if yes, err := askYesNo( @@ -341,3 +409,84 @@ cscli bouncers prune -d 45m --force`, return cmd } + +func (cli *cliBouncers) inspectHuman(out io.Writer, bouncer *ent.Bouncer) { + t := newTable(out).Writer + + t.SetTitle("Bouncer: " + bouncer.Name) + + t.SetColumnConfigs([]table.ColumnConfig{ + {Number: 1, AutoMerge: true}, + }) + + lastPull := "" + if bouncer.LastPull != nil { + lastPull = bouncer.LastPull.String() + } + + t.AppendRows([]table.Row{ + {"Created At", bouncer.CreatedAt}, + {"Last Update", bouncer.UpdatedAt}, + {"Revoked?", bouncer.Revoked}, + {"IP Address", bouncer.IPAddress}, + {"Type", bouncer.Type}, + {"Version", bouncer.Version}, + {"Last Pull", lastPull}, + {"Auth type", bouncer.AuthType}, + {"OS", bouncer.GetOSNameAndVersion()}, + }) + + for _, ff := range bouncer.GetFeatureFlagList() { + t.AppendRow(table.Row{"Feature Flags", ff}) + } + + fmt.Fprintln(out, t.Render()) +} + +func (cli *cliBouncers) inspect(bouncer *ent.Bouncer) error { + out := color.Output + outputFormat := cli.cfg().Cscli.Output + + switch outputFormat { + case "human": + cli.inspectHuman(out, bouncer) + case "json": + enc := json.NewEncoder(out) + enc.SetIndent("", " ") + + if err := enc.Encode(newBouncerInfo(bouncer)); err != nil { + return errors.New("failed to marshal") + } + + return nil + default: + return fmt.Errorf("output format '%s' not supported for this command", outputFormat) + } + return nil +} + + +func (cli *cliBouncers) newInspectCmd() *cobra.Command { + cmd := &cobra.Command{ + Use: "inspect [bouncer_name]", + Short: "inspect a bouncer by name", + Example: `cscli bouncers inspect "bouncer1"`, + Args: cobra.ExactArgs(1), + DisableAutoGenTag: true, + ValidArgsFunction: cli.validBouncerID, + RunE: func(cmd *cobra.Command, args []string) error { + bouncerName := args[0] + + b, err := cli.db.Ent.Bouncer.Query(). + Where(bouncer.Name(bouncerName)). + Only(cmd.Context()) + if err != nil { + return fmt.Errorf("unable to read bouncer data '%s': %w", bouncerName, err) + } + + return cli.inspect(b) + }, + } + + return cmd +} diff --git a/cmd/crowdsec-cli/bouncers_table.go b/cmd/crowdsec-cli/bouncers_table.go deleted file mode 100644 index c32762ba266..00000000000 --- a/cmd/crowdsec-cli/bouncers_table.go +++ /dev/null @@ -1,33 +0,0 @@ -package main - -import ( - "io" - "time" - - "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/table" - "github.com/crowdsecurity/crowdsec/pkg/database/ent" - "github.com/crowdsecurity/crowdsec/pkg/emoji" -) - -func getBouncersTable(out io.Writer, bouncers []*ent.Bouncer) { - t := newLightTable(out) - t.SetHeaders("Name", "IP Address", "Valid", "Last API pull", "Type", "Version", "Auth Type") - t.SetHeaderAlignment(table.AlignLeft, table.AlignLeft, table.AlignLeft, table.AlignLeft, table.AlignLeft, table.AlignLeft) - t.SetAlignment(table.AlignLeft, table.AlignLeft, table.AlignLeft, table.AlignLeft, table.AlignLeft, table.AlignLeft) - - for _, b := range bouncers { - revoked := emoji.CheckMark - if b.Revoked { - revoked = emoji.Prohibited - } - - lastPull := "" - if b.LastPull != nil { - lastPull = b.LastPull.Format(time.RFC3339) - } - - t.AddRow(b.Name, b.IPAddress, revoked, lastPull, b.Type, b.Version, b.AuthType) - } - - t.Render() -} diff --git a/cmd/crowdsec-cli/machines.go b/cmd/crowdsec-cli/machines.go index 746045d0eab..8796d3de9b8 100644 --- a/cmd/crowdsec-cli/machines.go +++ b/cmd/crowdsec-cli/machines.go @@ -6,6 +6,7 @@ import ( "encoding/json" "errors" "fmt" + "io" "math/big" "os" "slices" @@ -16,6 +17,7 @@ import ( "github.com/fatih/color" "github.com/go-openapi/strfmt" "github.com/google/uuid" + "github.com/jedib0t/go-pretty/v6/table" log "github.com/sirupsen/logrus" "github.com/spf13/cobra" "gopkg.in/yaml.v3" @@ -24,8 +26,10 @@ import ( "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/require" "github.com/crowdsecurity/crowdsec/pkg/csconfig" + "github.com/crowdsecurity/crowdsec/pkg/cwhub" "github.com/crowdsecurity/crowdsec/pkg/database" "github.com/crowdsecurity/crowdsec/pkg/database/ent" + "github.com/crowdsecurity/crowdsec/pkg/emoji" "github.com/crowdsecurity/crowdsec/pkg/types" ) @@ -147,13 +151,126 @@ Note: This command requires database direct access, so is intended to be run on cmd.AddCommand(cli.newDeleteCmd()) cmd.AddCommand(cli.newValidateCmd()) cmd.AddCommand(cli.newPruneCmd()) + cmd.AddCommand(cli.newInspectCmd()) return cmd } -func (cli *cliMachines) list() error { - out := color.Output +func (*cliMachines) inspectHubHuman(out io.Writer, machine *ent.Machine) { + state := machine.Hubstate + + if len(state) == 0 { + fmt.Println("No hub items found for this machine") + return + } + + // group state rows by type for multiple tables + rowsByType := make(map[string][]table.Row) + + for itemType, items := range state { + for _, item := range items { + if _, ok := rowsByType[itemType]; !ok { + rowsByType[itemType] = make([]table.Row, 0) + } + + row := table.Row{item.Name, item.Status, item.Version} + rowsByType[itemType] = append(rowsByType[itemType], row) + } + } + + for itemType, rows := range rowsByType { + t := newTable(out).Writer + t.AppendHeader(table.Row{"Name", "Status", "Version"}) + t.SetTitle(itemType) + t.AppendRows(rows) + fmt.Fprintln(out, t.Render()) + } +} + +func (cli *cliMachines) listHuman(out io.Writer, machines ent.Machines) { + t := newLightTable(out).Writer + t.AppendHeader(table.Row{"Name", "IP Address", "Last Update", "Status", "Version", "OS", "Auth Type", "Last Heartbeat"}) + + for _, m := range machines { + validated := emoji.Prohibited + if m.IsValidated { + validated = emoji.CheckMark + } + + hb, active := getLastHeartbeat(m) + if !active { + hb = emoji.Warning + " " + hb + } + + t.AppendRow(table.Row{m.MachineId, m.IpAddress, m.UpdatedAt.Format(time.RFC3339), validated, m.Version, m.GetOSNameAndVersion(), m.AuthType, hb}) + } + + fmt.Fprintln(out, t.Render()) +} + +// machineInfo contains only the data we want for inspect/list: no hub status, scenarios, edges, etc. +type machineInfo struct { + CreatedAt time.Time `json:"created_at,omitempty"` + UpdatedAt time.Time `json:"updated_at,omitempty"` + LastPush *time.Time `json:"last_push,omitempty"` + LastHeartbeat *time.Time `json:"last_heartbeat,omitempty"` + MachineId string `json:"machineId,omitempty"` + IpAddress string `json:"ipAddress,omitempty"` + Version string `json:"version,omitempty"` + IsValidated bool `json:"isValidated,omitempty"` + AuthType string `json:"auth_type"` + OS string `json:"os,omitempty"` + Featureflags []string `json:"featureflags,omitempty"` + Datasources map[string]int64 `json:"datasources,omitempty"` +} + +func newMachineInfo(m *ent.Machine) machineInfo { + return machineInfo{ + CreatedAt: m.CreatedAt, + UpdatedAt: m.UpdatedAt, + LastPush: m.LastPush, + LastHeartbeat: m.LastHeartbeat, + MachineId: m.MachineId, + IpAddress: m.IpAddress, + Version: m.Version, + IsValidated: m.IsValidated, + AuthType: m.AuthType, + OS: m.GetOSNameAndVersion(), + Featureflags: m.GetFeatureFlagList(), + Datasources: m.Datasources, + } +} + +func (cli *cliMachines) listCSV(out io.Writer, machines ent.Machines) error { + csvwriter := csv.NewWriter(out) + + err := csvwriter.Write([]string{"machine_id", "ip_address", "updated_at", "validated", "version", "auth_type", "last_heartbeat", "os"}) + if err != nil { + return fmt.Errorf("failed to write header: %w", err) + } + + for _, m := range machines { + validated := "false" + if m.IsValidated { + validated = "true" + } + + hb := "-" + if m.LastHeartbeat != nil { + hb = m.LastHeartbeat.Format(time.RFC3339) + } + + if err := csvwriter.Write([]string{m.MachineId, m.IpAddress, m.UpdatedAt.Format(time.RFC3339), validated, m.Version, m.AuthType, hb, fmt.Sprintf("%s/%s", m.Osname, m.Osversion)}); err != nil { + return fmt.Errorf("failed to write raw output: %w", err) + } + } + + csvwriter.Flush() + + return nil +} +func (cli *cliMachines) list(out io.Writer) error { machines, err := cli.db.ListMachines() if err != nil { return fmt.Errorf("unable to list machines: %w", err) @@ -161,38 +278,23 @@ func (cli *cliMachines) list() error { switch cli.cfg().Cscli.Output { case "human": - getAgentsTable(out, machines) + cli.listHuman(out, machines) case "json": + info := make([]machineInfo, 0, len(machines)) + for _, m := range machines { + info = append(info, newMachineInfo(m)) + } + enc := json.NewEncoder(out) enc.SetIndent("", " ") - if err := enc.Encode(machines); err != nil { + if err := enc.Encode(info); err != nil { return errors.New("failed to marshal") } return nil case "raw": - csvwriter := csv.NewWriter(out) - - err := csvwriter.Write([]string{"machine_id", "ip_address", "updated_at", "validated", "version", "auth_type", "last_heartbeat"}) - if err != nil { - return fmt.Errorf("failed to write header: %w", err) - } - - for _, m := range machines { - validated := "false" - if m.IsValidated { - validated = "true" - } - - hb, _ := getLastHeartbeat(m) - - if err := csvwriter.Write([]string{m.MachineId, m.IpAddress, m.UpdatedAt.Format(time.RFC3339), validated, m.Version, m.AuthType, hb}); err != nil { - return fmt.Errorf("failed to write raw output: %w", err) - } - } - - csvwriter.Flush() + return cli.listCSV(out, machines) } return nil @@ -207,7 +309,7 @@ func (cli *cliMachines) newListCmd() *cobra.Command { Args: cobra.NoArgs, DisableAutoGenTag: true, RunE: func(_ *cobra.Command, _ []string) error { - return cli.list() + return cli.list(color.Output) }, } @@ -349,13 +451,14 @@ func (cli *cliMachines) add(args []string, machinePassword string, dumpFile stri return nil } -func (cli *cliMachines) deleteValid(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { - // need to load config and db because PersistentPreRunE is not called for completions - +// validMachineID returns a list of machine IDs for command completion +func (cli *cliMachines) validMachineID(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { var err error cfg := cli.cfg() + // need to load config and db because PersistentPreRunE is not called for completions + if err = require.LAPI(cfg); err != nil { cobra.CompError("unable to list machines " + err.Error()) return nil, cobra.ShellCompDirectiveNoFileComp @@ -405,7 +508,7 @@ func (cli *cliMachines) newDeleteCmd() *cobra.Command { Args: cobra.MinimumNArgs(1), Aliases: []string{"remove"}, DisableAutoGenTag: true, - ValidArgsFunction: cli.deleteValid, + ValidArgsFunction: cli.validMachineID, RunE: func(_ *cobra.Command, args []string) error { return cli.delete(args) }, @@ -417,7 +520,7 @@ func (cli *cliMachines) newDeleteCmd() *cobra.Command { func (cli *cliMachines) prune(duration time.Duration, notValidOnly bool, force bool) error { if duration < 2*time.Minute && !notValidOnly { if yes, err := askYesNo( - "The duration you provided is less than 2 minutes. " + + "The duration you provided is less than 2 minutes. "+ "This can break installations if the machines are only temporarily disconnected. Continue?", false); err != nil { return err } else if !yes { @@ -442,11 +545,11 @@ func (cli *cliMachines) prune(duration time.Duration, notValidOnly bool, force b return nil } - getAgentsTable(color.Output, machines) + cli.listHuman(color.Output, machines) if !force { if yes, err := askYesNo( - "You are about to PERMANENTLY remove the above machines from the database. " + + "You are about to PERMANENTLY remove the above machines from the database. "+ "These will NOT be recoverable. Continue?", false); err != nil { return err } else if !yes { @@ -460,7 +563,7 @@ func (cli *cliMachines) prune(duration time.Duration, notValidOnly bool, force b return fmt.Errorf("unable to prune machines: %w", err) } - fmt.Fprintf(os.Stderr, "successfully delete %d machines\n", deleted) + fmt.Fprintf(os.Stderr, "successfully deleted %d machines\n", deleted) return nil } @@ -521,3 +624,136 @@ func (cli *cliMachines) newValidateCmd() *cobra.Command { return cmd } + +func (*cliMachines) inspectHuman(out io.Writer, machine *ent.Machine) { + t := newTable(out).Writer + + t.SetTitle("Machine: " + machine.MachineId) + + t.SetColumnConfigs([]table.ColumnConfig{ + {Number: 1, AutoMerge: true}, + }) + + t.AppendRows([]table.Row{ + {"IP Address", machine.IpAddress}, + {"Created At", machine.CreatedAt}, + {"Last Update", machine.UpdatedAt}, + {"Last Heartbeat", machine.LastHeartbeat}, + {"Validated?", machine.IsValidated}, + {"CrowdSec version", machine.Version}, + {"OS", machine.GetOSNameAndVersion()}, + {"Auth type", machine.AuthType}, + }) + + for dsName, dsCount := range machine.Datasources { + t.AppendRow(table.Row{"Datasources", fmt.Sprintf("%s: %d", dsName, dsCount)}) + } + + for _, ff := range machine.GetFeatureFlagList() { + t.AppendRow(table.Row{"Feature Flags", ff}) + } + + for _, coll := range machine.Hubstate[cwhub.COLLECTIONS] { + t.AppendRow(table.Row{"Collections", coll.Name}) + } + + fmt.Fprintln(out, t.Render()) +} + +func (cli *cliMachines) inspect(machine *ent.Machine) error { + out := color.Output + outputFormat := cli.cfg().Cscli.Output + + switch outputFormat { + case "human": + cli.inspectHuman(out, machine) + case "json": + enc := json.NewEncoder(out) + enc.SetIndent("", " ") + + if err := enc.Encode(newMachineInfo(machine)); err != nil { + return errors.New("failed to marshal") + } + + return nil + default: + return fmt.Errorf("output format '%s' not supported for this command", outputFormat) + } + + return nil +} + +func (cli *cliMachines) inspectHub(machine *ent.Machine) error { + out := color.Output + + switch cli.cfg().Cscli.Output { + case "human": + cli.inspectHubHuman(out, machine) + case "json": + enc := json.NewEncoder(out) + enc.SetIndent("", " ") + + if err := enc.Encode(machine.Hubstate); err != nil { + return errors.New("failed to marshal") + } + + return nil + case "raw": + csvwriter := csv.NewWriter(out) + + err := csvwriter.Write([]string{"type", "name", "status", "version"}) + if err != nil { + return fmt.Errorf("failed to write header: %w", err) + } + + rows := make([][]string, 0) + + for itemType, items := range machine.Hubstate { + for _, item := range items { + rows = append(rows, []string{itemType, item.Name, item.Status, item.Version}) + } + } + + for _, row := range rows { + if err := csvwriter.Write(row); err != nil { + return fmt.Errorf("failed to write raw output: %w", err) + } + } + + csvwriter.Flush() + } + + return nil +} + +func (cli *cliMachines) newInspectCmd() *cobra.Command { + var showHub bool + + cmd := &cobra.Command{ + Use: "inspect [machine_name]", + Short: "inspect a machine by name", + Example: `cscli machines inspect "machine1"`, + Args: cobra.ExactArgs(1), + DisableAutoGenTag: true, + ValidArgsFunction: cli.validMachineID, + RunE: func(_ *cobra.Command, args []string) error { + machineID := args[0] + machine, err := cli.db.QueryMachineByID(machineID) + if err != nil { + return fmt.Errorf("unable to read machine data '%s': %w", machineID, err) + } + + if showHub { + return cli.inspectHub(machine) + } + + return cli.inspect(machine) + }, + } + + flags := cmd.Flags() + + flags.BoolVarP(&showHub, "hub", "H", false, "show hub state") + + return cmd +} diff --git a/cmd/crowdsec-cli/machines_table.go b/cmd/crowdsec-cli/machines_table.go deleted file mode 100644 index 18e16bbde3a..00000000000 --- a/cmd/crowdsec-cli/machines_table.go +++ /dev/null @@ -1,33 +0,0 @@ -package main - -import ( - "io" - "time" - - "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/table" - "github.com/crowdsecurity/crowdsec/pkg/database/ent" - "github.com/crowdsecurity/crowdsec/pkg/emoji" -) - -func getAgentsTable(out io.Writer, machines []*ent.Machine) { - t := newLightTable(out) - t.SetHeaders("Name", "IP Address", "Last Update", "Status", "Version", "Auth Type", "Last Heartbeat") - t.SetHeaderAlignment(table.AlignLeft, table.AlignLeft, table.AlignLeft, table.AlignLeft, table.AlignLeft, table.AlignLeft, table.AlignLeft) - t.SetAlignment(table.AlignLeft, table.AlignLeft, table.AlignLeft, table.AlignLeft, table.AlignLeft, table.AlignLeft, table.AlignLeft) - - for _, m := range machines { - validated := emoji.Prohibited - if m.IsValidated { - validated = emoji.CheckMark - } - - hb, active := getLastHeartbeat(m) - if !active { - hb = emoji.Warning + " " + hb - } - - t.AddRow(m.MachineId, m.IpAddress, m.UpdatedAt.Format(time.RFC3339), validated, m.Version, m.AuthType, hb) - } - - t.Render() -} diff --git a/cmd/crowdsec-cli/support.go b/cmd/crowdsec-cli/support.go index 061733ef8d3..1f98768f778 100644 --- a/cmd/crowdsec-cli/support.go +++ b/cmd/crowdsec-cli/support.go @@ -193,12 +193,9 @@ func (cli *cliSupport) dumpBouncers(zw *zip.Writer, db *database.Client) error { out := new(bytes.Buffer) - bouncers, err := db.ListBouncers() - if err != nil { - return fmt.Errorf("unable to list bouncers: %w", err) - } - - getBouncersTable(out, bouncers) + // call the "cscli bouncers list" command directly, skip any preRun + cm := cliBouncers{db: db, cfg: cli.cfg} + cm.list(out) stripped := stripAnsiString(out.String()) @@ -216,12 +213,9 @@ func (cli *cliSupport) dumpAgents(zw *zip.Writer, db *database.Client) error { out := new(bytes.Buffer) - machines, err := db.ListMachines() - if err != nil { - return fmt.Errorf("unable to list machines: %w", err) - } - - getAgentsTable(out, machines) + // call the "cscli machines list" command directly, skip any preRun + cm := cliMachines{db: db, cfg: cli.cfg} + cm.list(out) stripped := stripAnsiString(out.String()) @@ -617,6 +611,10 @@ cscli support dump -f /tmp/crowdsec-support.zip Args: cobra.NoArgs, DisableAutoGenTag: true, RunE: func(cmd *cobra.Command, _ []string) error { + output := cli.cfg().Cscli.Output + if output != "human" { + return fmt.Errorf("output format %s not supported for this command", output) + } return cli.dump(cmd.Context(), outFile) }, } diff --git a/pkg/database/ent/helpers.go b/pkg/database/ent/helpers.go new file mode 100644 index 00000000000..c6cdbd7f32b --- /dev/null +++ b/pkg/database/ent/helpers.go @@ -0,0 +1,55 @@ +package ent + +import ( + "strings" +) + +func (m *Machine) GetOSNameAndVersion() string { + ret := m.Osname + if m.Osversion != "" { + if ret != "" { + ret += "/" + } + + ret += m.Osversion + } + + if ret == "" { + return "?" + } + + return ret +} + +func (b *Bouncer) GetOSNameAndVersion() string { + ret := b.Osname + if b.Osversion != "" { + if ret != "" { + ret += "/" + } + + ret += b.Osversion + } + + if ret == "" { + return "?" + } + + return ret +} + +func (m *Machine) GetFeatureFlagList() []string { + if m.Featureflags == "" { + return nil + } + + return strings.Split(m.Featureflags, ",") +} + +func (b *Bouncer) GetFeatureFlagList() []string { + if b.Featureflags == "" { + return nil + } + + return strings.Split(b.Featureflags, ",") +} diff --git a/pkg/database/ent/schema/machine.go b/pkg/database/ent/schema/machine.go index 1566cf70b32..5b68f61b1a0 100644 --- a/pkg/database/ent/schema/machine.go +++ b/pkg/database/ent/schema/machine.go @@ -10,6 +10,7 @@ import ( // ItemState is defined here instead of using pkg/models/HubItem to avoid introducing a dependency type ItemState struct { + Name string `json:"name,omitempty"` Status string `json:"status,omitempty"` Version string `json:"version,omitempty"` } diff --git a/test/bats/30_machines.bats b/test/bats/30_machines.bats index 1af5e97dcb4..f8b63fb3173 100644 --- a/test/bats/30_machines.bats +++ b/test/bats/30_machines.bats @@ -62,7 +62,7 @@ teardown() { assert_output 1 } -@test "machines delete has autocompletion" { +@test "machines [delete|inspect] has autocompletion" { rune -0 cscli machines add -a -f /dev/null foo1 rune -0 cscli machines add -a -f /dev/null foo2 rune -0 cscli machines add -a -f /dev/null bar @@ -72,6 +72,11 @@ teardown() { assert_line --index 1 'foo2' refute_line 'bar' refute_line 'baz' + rune -0 cscli __complete machines inspect 'foo' + assert_line --index 0 'foo1' + assert_line --index 1 'foo2' + refute_line 'bar' + refute_line 'baz' } @test "heartbeat is initially null" { From 90e3299373c81e4b755becd475a1708f0fe424c9 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Thu, 4 Jul 2024 13:42:02 +0200 Subject: [PATCH 208/581] cscli refact: extract table wrapper to own package (#3105) * cscli refact: extract table wrapper to own package * lint --- cmd/crowdsec-cli/alerts.go | 14 ++- cmd/crowdsec-cli/alerts_table.go | 13 +- cmd/crowdsec-cli/bouncers.go | 5 +- cmd/crowdsec-cli/console.go | 2 +- cmd/crowdsec-cli/console_table.go | 10 +- .../{prettytable.go => cstable/cstable.go} | 46 +++++-- cmd/crowdsec-cli/decisions_table.go | 3 +- cmd/crowdsec-cli/hub.go | 2 +- cmd/crowdsec-cli/hubtest.go | 27 ++++- cmd/crowdsec-cli/hubtest_table.go | 44 +++---- cmd/crowdsec-cli/item_metrics.go | 10 +- cmd/crowdsec-cli/itemcli.go | 4 +- cmd/crowdsec-cli/items.go | 8 +- cmd/crowdsec-cli/machines.go | 11 +- cmd/crowdsec-cli/metrics.go | 12 +- cmd/crowdsec-cli/metrics_table.go | 113 +++++++++--------- cmd/crowdsec-cli/notifications.go | 2 +- cmd/crowdsec-cli/notifications_table.go | 12 +- cmd/crowdsec-cli/support.go | 4 +- cmd/crowdsec-cli/table/align.go | 12 -- cmd/crowdsec-cli/tables.go | 32 ----- cmd/crowdsec-cli/utils_table.go | 32 ++--- 22 files changed, 218 insertions(+), 200 deletions(-) rename cmd/crowdsec-cli/{prettytable.go => cstable/cstable.go} (77%) delete mode 100644 cmd/crowdsec-cli/table/align.go delete mode 100644 cmd/crowdsec-cli/tables.go diff --git a/cmd/crowdsec-cli/alerts.go b/cmd/crowdsec-cli/alerts.go index 0fe9852519c..0bb310739d9 100644 --- a/cmd/crowdsec-cli/alerts.go +++ b/cmd/crowdsec-cli/alerts.go @@ -21,6 +21,7 @@ import ( "github.com/crowdsecurity/go-cs-lib/maptools" + "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/cstable" "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/require" "github.com/crowdsecurity/crowdsec/pkg/apiclient" "github.com/crowdsecurity/crowdsec/pkg/cwversion" @@ -54,7 +55,8 @@ func DecisionsFromAlert(alert *models.Alert) string { } func (cli *cliAlerts) alertsToTable(alerts *models.GetAlertsResponse, printMachine bool) error { - switch cli.cfg().Cscli.Output { + cfg := cli.cfg() + switch cfg.Cscli.Output { case "raw": csvwriter := csv.NewWriter(os.Stdout) header := []string{"id", "scope", "value", "reason", "country", "as", "decisions", "created_at"} @@ -104,7 +106,7 @@ func (cli *cliAlerts) alertsToTable(alerts *models.GetAlertsResponse, printMachi return nil } - alertsTable(color.Output, alerts, printMachine) + alertsTable(color.Output, cfg.Cscli.Color, alerts, printMachine) } return nil @@ -138,7 +140,9 @@ func (cli *cliAlerts) displayOneAlert(alert *models.Alert, withDetail bool) erro return err } - alertDecisionsTable(color.Output, alert) + cfg := cli.cfg() + + alertDecisionsTable(color.Output, cfg.Cscli.Color, alert) if len(alert.Meta) > 0 { fmt.Printf("\n - Context :\n") @@ -146,7 +150,7 @@ func (cli *cliAlerts) displayOneAlert(alert *models.Alert, withDetail bool) erro return alert.Meta[i].Key < alert.Meta[j].Key }) - table := newTable(color.Output) + table := cstable.New(color.Output, cfg.Cscli.Color) table.SetRowLines(false) table.SetHeaders("Key", "Value") @@ -171,7 +175,7 @@ func (cli *cliAlerts) displayOneAlert(alert *models.Alert, withDetail bool) erro fmt.Printf("\n - Events :\n") for _, event := range alert.Events { - alertEventTable(color.Output, event) + alertEventTable(color.Output, cfg.Cscli.Color, event) } } diff --git a/cmd/crowdsec-cli/alerts_table.go b/cmd/crowdsec-cli/alerts_table.go index fbde4d2aaa9..29383457ced 100644 --- a/cmd/crowdsec-cli/alerts_table.go +++ b/cmd/crowdsec-cli/alerts_table.go @@ -9,11 +9,12 @@ import ( log "github.com/sirupsen/logrus" + "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/cstable" "github.com/crowdsecurity/crowdsec/pkg/models" ) -func alertsTable(out io.Writer, alerts *models.GetAlertsResponse, printMachine bool) { - t := newTable(out) +func alertsTable(out io.Writer, wantColor string, alerts *models.GetAlertsResponse, printMachine bool) { + t := cstable.New(out, wantColor) t.SetRowLines(false) header := []string{"ID", "value", "reason", "country", "as", "decisions", "created_at"} @@ -51,9 +52,9 @@ func alertsTable(out io.Writer, alerts *models.GetAlertsResponse, printMachine b t.Render() } -func alertDecisionsTable(out io.Writer, alert *models.Alert) { +func alertDecisionsTable(out io.Writer, wantColor string, alert *models.Alert) { foundActive := false - t := newTable(out) + t := cstable.New(out, wantColor) t.SetRowLines(false) t.SetHeaders("ID", "scope:value", "action", "expiration", "created_at") @@ -90,10 +91,10 @@ func alertDecisionsTable(out io.Writer, alert *models.Alert) { } } -func alertEventTable(out io.Writer, event *models.Event) { +func alertEventTable(out io.Writer, wantColor string, event *models.Event) { fmt.Fprintf(out, "\n- Date: %s\n", *event.Timestamp) - t := newTable(out) + t := cstable.New(out, wantColor) t.SetHeaders("Key", "Value") sort.Slice(event.Meta, func(i, j int) bool { return event.Meta[i].Key < event.Meta[j].Key diff --git a/cmd/crowdsec-cli/bouncers.go b/cmd/crowdsec-cli/bouncers.go index 0673473d72a..d304b1b7867 100644 --- a/cmd/crowdsec-cli/bouncers.go +++ b/cmd/crowdsec-cli/bouncers.go @@ -17,6 +17,7 @@ import ( log "github.com/sirupsen/logrus" "github.com/spf13/cobra" + "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/cstable" "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/require" middlewares "github.com/crowdsecurity/crowdsec/pkg/apiserver/middlewares/v1" "github.com/crowdsecurity/crowdsec/pkg/database" @@ -90,7 +91,7 @@ Note: This command requires database direct access, so is intended to be run on } func (cli *cliBouncers) listHuman(out io.Writer, bouncers ent.Bouncers) { - t := newLightTable(out).Writer + t := cstable.NewLight(out, cli.cfg().Cscli.Color).Writer t.AppendHeader(table.Row{"Name", "IP Address", "Valid", "Last API pull", "Type", "Version", "Auth Type"}) for _, b := range bouncers { @@ -411,7 +412,7 @@ cscli bouncers prune -d 45m --force`, } func (cli *cliBouncers) inspectHuman(out io.Writer, bouncer *ent.Bouncer) { - t := newTable(out).Writer + t := cstable.NewLight(out, cli.cfg().Cscli.Color).Writer t.SetTitle("Bouncer: " + bouncer.Name) diff --git a/cmd/crowdsec-cli/console.go b/cmd/crowdsec-cli/console.go index 3c7df395b30..979c9f0ea60 100644 --- a/cmd/crowdsec-cli/console.go +++ b/cmd/crowdsec-cli/console.go @@ -276,7 +276,7 @@ func (cli *cliConsole) newStatusCmd() *cobra.Command { consoleCfg := cfg.API.Server.ConsoleConfig switch cfg.Cscli.Output { case "human": - cmdConsoleStatusTable(color.Output, *consoleCfg) + cmdConsoleStatusTable(color.Output, cfg.Cscli.Color, *consoleCfg) case "json": out := map[string](*bool){ csconfig.SEND_MANUAL_SCENARIOS: consoleCfg.ShareManualDecisions, diff --git a/cmd/crowdsec-cli/console_table.go b/cmd/crowdsec-cli/console_table.go index 4623f3bb62a..94976618573 100644 --- a/cmd/crowdsec-cli/console_table.go +++ b/cmd/crowdsec-cli/console_table.go @@ -3,17 +3,19 @@ package main import ( "io" - "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/table" + "github.com/jedib0t/go-pretty/v6/text" + + "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/cstable" "github.com/crowdsecurity/crowdsec/pkg/csconfig" "github.com/crowdsecurity/crowdsec/pkg/emoji" ) -func cmdConsoleStatusTable(out io.Writer, consoleCfg csconfig.ConsoleConfig) { - t := newTable(out) +func cmdConsoleStatusTable(out io.Writer, wantColor string, consoleCfg csconfig.ConsoleConfig) { + t := cstable.New(out, wantColor) t.SetRowLines(false) t.SetHeaders("Option Name", "Activated", "Description") - t.SetHeaderAlignment(table.AlignLeft, table.AlignLeft, table.AlignLeft) + t.SetHeaderAlignment(text.AlignLeft, text.AlignLeft, text.AlignLeft) for _, option := range csconfig.CONSOLE_CONFIGS { activated := emoji.CrossMark diff --git a/cmd/crowdsec-cli/prettytable.go b/cmd/crowdsec-cli/cstable/cstable.go similarity index 77% rename from cmd/crowdsec-cli/prettytable.go rename to cmd/crowdsec-cli/cstable/cstable.go index f17472722f1..f7ddb604d84 100644 --- a/cmd/crowdsec-cli/prettytable.go +++ b/cmd/crowdsec-cli/cstable/cstable.go @@ -1,4 +1,4 @@ -package main +package cstable // transisional file to keep (minimal) backwards compatibility with the old table // we can migrate the code to the new dependency later, it can already use the Writer interface @@ -6,11 +6,36 @@ package main import ( "fmt" "io" + "os" "github.com/jedib0t/go-pretty/v6/table" "github.com/jedib0t/go-pretty/v6/text" + isatty "github.com/mattn/go-isatty" ) +func RenderTitle(out io.Writer, title string) { + if out == nil { + panic("renderTableTitle: out is nil") + } + + if title == "" { + return + } + + fmt.Fprintln(out, title) +} + +func shouldWeColorize(wantColor string) bool { + switch wantColor { + case "yes": + return true + case "no": + return false + default: + return isatty.IsTerminal(os.Stdout.Fd()) || isatty.IsCygwinTerminal(os.Stdout.Fd()) + } +} + type Table struct { Writer table.Writer output io.Writer @@ -18,7 +43,7 @@ type Table struct { alignHeader []text.Align } -func newTable(out io.Writer) *Table { +func New(out io.Writer, wantColor string) *Table { if out == nil { panic("newTable: out is nil") } @@ -26,14 +51,14 @@ func newTable(out io.Writer) *Table { t := table.NewWriter() // colorize output, use unicode box characters - fancy := shouldWeColorize() + fancy := shouldWeColorize(wantColor) - color := table.ColorOptions{} + colorOptions := table.ColorOptions{} if fancy { - color.Header = text.Colors{text.Italic} - color.Border = text.Colors{text.FgHiBlack} - color.Separator = text.Colors{text.FgHiBlack} + colorOptions.Header = text.Colors{text.Italic} + colorOptions.Border = text.Colors{text.FgHiBlack} + colorOptions.Separator = text.Colors{text.FgHiBlack} } // no upper/lower case transformations @@ -46,7 +71,7 @@ func newTable(out io.Writer) *Table { style := table.Style{ Box: box, - Color: color, + Color: colorOptions, Format: format, HTML: table.DefaultHTMLOptions, Options: table.OptionsDefault, @@ -63,8 +88,8 @@ func newTable(out io.Writer) *Table { } } -func newLightTable(output io.Writer) *Table { - t := newTable(output) +func NewLight(output io.Writer, wantColor string) *Table { + t := New(output, wantColor) s := t.Writer.Style() s.Box.Left = "" s.Box.LeftSeparator = "" @@ -100,6 +125,7 @@ func (t *Table) setColumnConfigs() { WidthMaxEnforcer: text.WrapSoft, }) } + t.Writer.SetColumnConfigs(configs) } diff --git a/cmd/crowdsec-cli/decisions_table.go b/cmd/crowdsec-cli/decisions_table.go index 10021e4dd4b..02952f93b85 100644 --- a/cmd/crowdsec-cli/decisions_table.go +++ b/cmd/crowdsec-cli/decisions_table.go @@ -5,11 +5,12 @@ import ( "io" "strconv" + "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/cstable" "github.com/crowdsecurity/crowdsec/pkg/models" ) func (cli *cliDecisions) decisionsTable(out io.Writer, alerts *models.GetAlertsResponse, printMachine bool) { - t := newTable(out) + t := cstable.New(out, cli.cfg().Cscli.Color) t.SetRowLines(false) header := []string{"ID", "Source", "Scope:Value", "Reason", "Action", "Country", "AS", "Events", "expiration", "Alert ID"} diff --git a/cmd/crowdsec-cli/hub.go b/cmd/crowdsec-cli/hub.go index 737b93d8da8..7e00eb64b33 100644 --- a/cmd/crowdsec-cli/hub.go +++ b/cmd/crowdsec-cli/hub.go @@ -72,7 +72,7 @@ func (cli *cliHub) list(all bool) error { } } - err = listItems(color.Output, cwhub.ItemTypes, items, true, cfg.Cscli.Output) + err = listItems(color.Output, cfg.Cscli.Color, cwhub.ItemTypes, items, true, cfg.Cscli.Output) if err != nil { return err } diff --git a/cmd/crowdsec-cli/hubtest.go b/cmd/crowdsec-cli/hubtest.go index 8796fa48a17..2a4635d39f1 100644 --- a/cmd/crowdsec-cli/hubtest.go +++ b/cmd/crowdsec-cli/hubtest.go @@ -371,7 +371,7 @@ func (cli *cliHubTest) NewRunCmd() *cobra.Command { switch cfg.Cscli.Output { case "human": - hubTestResultTable(color.Output, testResult) + hubTestResultTable(color.Output, cfg.Cscli.Color, testResult) case "json": jsonResult := make(map[string][]string, 0) jsonResult["success"] = make([]string, 0) @@ -480,7 +480,7 @@ func (cli *cliHubTest) NewListCmd() *cobra.Command { switch cfg.Cscli.Output { case "human": - hubTestListTable(color.Output, hubPtr.Tests) + hubTestListTable(color.Output, cfg.Cscli.Color, hubPtr.Tests) case "json": j, err := json.MarshalIndent(hubPtr.Tests, " ", " ") if err != nil { @@ -505,7 +505,9 @@ func (cli *cliHubTest) coverage(showScenarioCov bool, showParserCov bool, showAp if err := HubTest.LoadAllTests(); err != nil { return fmt.Errorf("unable to load all tests: %+v", err) } + var err error + scenarioCoverage := []hubtest.Coverage{} parserCoverage := []hubtest.Coverage{} appsecRuleCoverage := []hubtest.Coverage{} @@ -521,12 +523,15 @@ func (cli *cliHubTest) coverage(showScenarioCov bool, showParserCov bool, showAp if err != nil { return fmt.Errorf("while getting parser coverage: %w", err) } + parserTested := 0 + for _, test := range parserCoverage { if test.TestsCount > 0 { parserTested++ } } + parserCoveragePercent = int(math.Round((float64(parserTested) / float64(len(parserCoverage)) * 100))) } @@ -537,6 +542,7 @@ func (cli *cliHubTest) coverage(showScenarioCov bool, showParserCov bool, showAp } scenarioTested := 0 + for _, test := range scenarioCoverage { if test.TestsCount > 0 { scenarioTested++ @@ -553,11 +559,13 @@ func (cli *cliHubTest) coverage(showScenarioCov bool, showParserCov bool, showAp } appsecRuleTested := 0 + for _, test := range appsecRuleCoverage { if test.TestsCount > 0 { appsecRuleTested++ } } + appsecRuleCoveragePercent = int(math.Round((float64(appsecRuleTested) / float64(len(appsecRuleCoverage)) * 100))) } @@ -572,30 +580,34 @@ func (cli *cliHubTest) coverage(showScenarioCov bool, showParserCov bool, showAp case showAppsecCov: fmt.Printf("appsec_rules=%d%%", appsecRuleCoveragePercent) } + return nil } switch cfg.Cscli.Output { case "human": if showParserCov || showAll { - hubTestParserCoverageTable(color.Output, parserCoverage) + hubTestParserCoverageTable(color.Output, cfg.Cscli.Color, parserCoverage) } if showScenarioCov || showAll { - hubTestScenarioCoverageTable(color.Output, scenarioCoverage) + hubTestScenarioCoverageTable(color.Output, cfg.Cscli.Color, scenarioCoverage) } if showAppsecCov || showAll { - hubTestAppsecRuleCoverageTable(color.Output, appsecRuleCoverage) + hubTestAppsecRuleCoverageTable(color.Output, cfg.Cscli.Color, appsecRuleCoverage) } fmt.Println() + if showParserCov || showAll { fmt.Printf("PARSERS : %d%% of coverage\n", parserCoveragePercent) } + if showScenarioCov || showAll { fmt.Printf("SCENARIOS : %d%% of coverage\n", scenarioCoveragePercent) } + if showAppsecCov || showAll { fmt.Printf("APPSEC RULES : %d%% of coverage\n", appsecRuleCoveragePercent) } @@ -604,16 +616,21 @@ func (cli *cliHubTest) coverage(showScenarioCov bool, showParserCov bool, showAp if err != nil { return err } + fmt.Printf("%s", dump) + dump, err = json.MarshalIndent(scenarioCoverage, "", " ") if err != nil { return err } + fmt.Printf("%s", dump) + dump, err = json.MarshalIndent(appsecRuleCoverage, "", " ") if err != nil { return err } + fmt.Printf("%s", dump) default: return errors.New("only human/json output modes are supported") diff --git a/cmd/crowdsec-cli/hubtest_table.go b/cmd/crowdsec-cli/hubtest_table.go index 1b76e75263e..1fa0f990be2 100644 --- a/cmd/crowdsec-cli/hubtest_table.go +++ b/cmd/crowdsec-cli/hubtest_table.go @@ -4,16 +4,18 @@ import ( "fmt" "io" - "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/table" + "github.com/jedib0t/go-pretty/v6/text" + + "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/cstable" "github.com/crowdsecurity/crowdsec/pkg/emoji" "github.com/crowdsecurity/crowdsec/pkg/hubtest" ) -func hubTestResultTable(out io.Writer, testResult map[string]bool) { - t := newLightTable(out) +func hubTestResultTable(out io.Writer, wantColor string, testResult map[string]bool) { + t := cstable.NewLight(out, wantColor) t.SetHeaders("Test", "Result") - t.SetHeaderAlignment(table.AlignLeft) - t.SetAlignment(table.AlignLeft) + t.SetHeaderAlignment(text.AlignLeft) + t.SetAlignment(text.AlignLeft) for testName, success := range testResult { status := emoji.CheckMarkButton @@ -27,11 +29,11 @@ func hubTestResultTable(out io.Writer, testResult map[string]bool) { t.Render() } -func hubTestListTable(out io.Writer, tests []*hubtest.HubTestItem) { - t := newLightTable(out) +func hubTestListTable(out io.Writer, wantColor string, tests []*hubtest.HubTestItem) { + t := cstable.NewLight(out, wantColor) t.SetHeaders("Name", "Path") - t.SetHeaderAlignment(table.AlignLeft, table.AlignLeft) - t.SetAlignment(table.AlignLeft, table.AlignLeft) + t.SetHeaderAlignment(text.AlignLeft, text.AlignLeft) + t.SetAlignment(text.AlignLeft, text.AlignLeft) for _, test := range tests { t.AddRow(test.Name, test.Path) @@ -40,11 +42,11 @@ func hubTestListTable(out io.Writer, tests []*hubtest.HubTestItem) { t.Render() } -func hubTestParserCoverageTable(out io.Writer, coverage []hubtest.Coverage) { - t := newLightTable(out) +func hubTestParserCoverageTable(out io.Writer, wantColor string, coverage []hubtest.Coverage) { + t := cstable.NewLight(out, wantColor) t.SetHeaders("Parser", "Status", "Number of tests") - t.SetHeaderAlignment(table.AlignLeft, table.AlignLeft, table.AlignLeft) - t.SetAlignment(table.AlignLeft, table.AlignLeft, table.AlignLeft) + t.SetHeaderAlignment(text.AlignLeft, text.AlignLeft, text.AlignLeft) + t.SetAlignment(text.AlignLeft, text.AlignLeft, text.AlignLeft) parserTested := 0 @@ -61,11 +63,11 @@ func hubTestParserCoverageTable(out io.Writer, coverage []hubtest.Coverage) { t.Render() } -func hubTestAppsecRuleCoverageTable(out io.Writer, coverage []hubtest.Coverage) { - t := newLightTable(out) +func hubTestAppsecRuleCoverageTable(out io.Writer, wantColor string, coverage []hubtest.Coverage) { + t := cstable.NewLight(out, wantColor) t.SetHeaders("Appsec Rule", "Status", "Number of tests") - t.SetHeaderAlignment(table.AlignLeft, table.AlignLeft, table.AlignLeft) - t.SetAlignment(table.AlignLeft, table.AlignLeft, table.AlignLeft) + t.SetHeaderAlignment(text.AlignLeft, text.AlignLeft, text.AlignLeft) + t.SetAlignment(text.AlignLeft, text.AlignLeft, text.AlignLeft) parserTested := 0 @@ -82,11 +84,11 @@ func hubTestAppsecRuleCoverageTable(out io.Writer, coverage []hubtest.Coverage) t.Render() } -func hubTestScenarioCoverageTable(out io.Writer, coverage []hubtest.Coverage) { - t := newLightTable(out) +func hubTestScenarioCoverageTable(out io.Writer, wantColor string, coverage []hubtest.Coverage) { + t := cstable.NewLight(out, wantColor) t.SetHeaders("Scenario", "Status", "Number of tests") - t.SetHeaderAlignment(table.AlignLeft, table.AlignLeft, table.AlignLeft) - t.SetAlignment(table.AlignLeft, table.AlignLeft, table.AlignLeft) + t.SetHeaderAlignment(text.AlignLeft, text.AlignLeft, text.AlignLeft) + t.SetAlignment(text.AlignLeft, text.AlignLeft, text.AlignLeft) parserTested := 0 diff --git a/cmd/crowdsec-cli/item_metrics.go b/cmd/crowdsec-cli/item_metrics.go index 9459968790b..b4b8c3c26b5 100644 --- a/cmd/crowdsec-cli/item_metrics.go +++ b/cmd/crowdsec-cli/item_metrics.go @@ -18,23 +18,23 @@ import ( "github.com/crowdsecurity/crowdsec/pkg/cwhub" ) -func ShowMetrics(prometheusURL string, hubItem *cwhub.Item) error { +func ShowMetrics(prometheusURL string, hubItem *cwhub.Item, wantColor string) error { switch hubItem.Type { case cwhub.PARSERS: metrics := GetParserMetric(prometheusURL, hubItem.Name) - parserMetricsTable(color.Output, hubItem.Name, metrics) + parserMetricsTable(color.Output, wantColor, hubItem.Name, metrics) case cwhub.SCENARIOS: metrics := GetScenarioMetric(prometheusURL, hubItem.Name) - scenarioMetricsTable(color.Output, hubItem.Name, metrics) + scenarioMetricsTable(color.Output, wantColor, hubItem.Name, metrics) case cwhub.COLLECTIONS: for _, sub := range hubItem.SubItems() { - if err := ShowMetrics(prometheusURL, sub); err != nil { + if err := ShowMetrics(prometheusURL, sub, wantColor); err != nil { return err } } case cwhub.APPSEC_RULES: metrics := GetAppsecRuleMetric(prometheusURL, hubItem.Name) - appsecMetricsTable(color.Output, hubItem.Name, metrics) + appsecMetricsTable(color.Output, wantColor, hubItem.Name, metrics) default: // no metrics for this item type } diff --git a/cmd/crowdsec-cli/itemcli.go b/cmd/crowdsec-cli/itemcli.go index 55396a10995..64c18ae89b1 100644 --- a/cmd/crowdsec-cli/itemcli.go +++ b/cmd/crowdsec-cli/itemcli.go @@ -381,7 +381,7 @@ func (cli cliItem) inspect(ctx context.Context, args []string, url string, diff continue } - if err = inspectItem(item, !noMetrics, cfg.Cscli.Output, cfg.Cscli.PrometheusUrl); err != nil { + if err = inspectItem(item, !noMetrics, cfg.Cscli.Output, cfg.Cscli.PrometheusUrl, cfg.Cscli.Color); err != nil { return err } @@ -442,7 +442,7 @@ func (cli cliItem) list(args []string, all bool) error { return err } - return listItems(color.Output, []string{cli.name}, items, false, cfg.Cscli.Output) + return listItems(color.Output, cfg.Cscli.Color, []string{cli.name}, items, false, cfg.Cscli.Output) } func (cli cliItem) newListCmd() *cobra.Command { diff --git a/cmd/crowdsec-cli/items.go b/cmd/crowdsec-cli/items.go index 9af432c32c1..b0c03922166 100644 --- a/cmd/crowdsec-cli/items.go +++ b/cmd/crowdsec-cli/items.go @@ -54,7 +54,7 @@ func selectItems(hub *cwhub.Hub, itemType string, args []string, installedOnly b return items, nil } -func listItems(out io.Writer, itemTypes []string, items map[string][]*cwhub.Item, omitIfEmpty bool, output string) error { +func listItems(out io.Writer, wantColor string, itemTypes []string, items map[string][]*cwhub.Item, omitIfEmpty bool, output string) error { switch output { case "human": nothingToDisplay := true @@ -64,7 +64,7 @@ func listItems(out io.Writer, itemTypes []string, items map[string][]*cwhub.Item continue } - listHubItemTable(out, "\n"+strings.ToUpper(itemType), items[itemType]) + listHubItemTable(out, wantColor, "\n"+strings.ToUpper(itemType), items[itemType]) nothingToDisplay = false } @@ -143,7 +143,7 @@ func listItems(out io.Writer, itemTypes []string, items map[string][]*cwhub.Item return nil } -func inspectItem(item *cwhub.Item, showMetrics bool, output string, prometheusURL string) error { +func inspectItem(item *cwhub.Item, showMetrics bool, output string, prometheusURL string, wantColor string) error { switch output { case "human", "raw": enc := yaml.NewEncoder(os.Stdout) @@ -174,7 +174,7 @@ func inspectItem(item *cwhub.Item, showMetrics bool, output string, prometheusUR if showMetrics { fmt.Printf("\nCurrent metrics: \n") - if err := ShowMetrics(prometheusURL, item); err != nil { + if err := ShowMetrics(prometheusURL, item, wantColor); err != nil { return err } } diff --git a/cmd/crowdsec-cli/machines.go b/cmd/crowdsec-cli/machines.go index 8796d3de9b8..2da5e20229c 100644 --- a/cmd/crowdsec-cli/machines.go +++ b/cmd/crowdsec-cli/machines.go @@ -24,6 +24,7 @@ import ( "github.com/crowdsecurity/machineid" + "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/cstable" "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/require" "github.com/crowdsecurity/crowdsec/pkg/csconfig" "github.com/crowdsecurity/crowdsec/pkg/cwhub" @@ -156,7 +157,7 @@ Note: This command requires database direct access, so is intended to be run on return cmd } -func (*cliMachines) inspectHubHuman(out io.Writer, machine *ent.Machine) { +func (cli *cliMachines) inspectHubHuman(out io.Writer, machine *ent.Machine) { state := machine.Hubstate if len(state) == 0 { @@ -179,7 +180,7 @@ func (*cliMachines) inspectHubHuman(out io.Writer, machine *ent.Machine) { } for itemType, rows := range rowsByType { - t := newTable(out).Writer + t := cstable.New(out, cli.cfg().Cscli.Color).Writer t.AppendHeader(table.Row{"Name", "Status", "Version"}) t.SetTitle(itemType) t.AppendRows(rows) @@ -188,7 +189,7 @@ func (*cliMachines) inspectHubHuman(out io.Writer, machine *ent.Machine) { } func (cli *cliMachines) listHuman(out io.Writer, machines ent.Machines) { - t := newLightTable(out).Writer + t := cstable.NewLight(out, cli.cfg().Cscli.Color).Writer t.AppendHeader(table.Row{"Name", "IP Address", "Last Update", "Status", "Version", "OS", "Auth Type", "Last Heartbeat"}) for _, m := range machines { @@ -625,8 +626,8 @@ func (cli *cliMachines) newValidateCmd() *cobra.Command { return cmd } -func (*cliMachines) inspectHuman(out io.Writer, machine *ent.Machine) { - t := newTable(out).Writer +func (cli *cliMachines) inspectHuman(out io.Writer, machine *ent.Machine) { + t := cstable.New(out, cli.cfg().Cscli.Color).Writer t.SetTitle("Machine: " + machine.MachineId) diff --git a/cmd/crowdsec-cli/metrics.go b/cmd/crowdsec-cli/metrics.go index 7858a7a7b2b..a9457bb849e 100644 --- a/cmd/crowdsec-cli/metrics.go +++ b/cmd/crowdsec-cli/metrics.go @@ -19,6 +19,8 @@ import ( "github.com/crowdsecurity/go-cs-lib/maptools" "github.com/crowdsecurity/go-cs-lib/trace" + + "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/cstable" ) type ( @@ -49,7 +51,7 @@ var ( ) type metricSection interface { - Table(out io.Writer, noUnit bool, showEmpty bool) + Table(out io.Writer, wantColor string, noUnit bool, showEmpty bool) Description() (string, string) } @@ -263,7 +265,7 @@ func NewCLIMetrics(cfg configGetter) *cliMetrics { } } -func (ms metricStore) Format(out io.Writer, sections []string, formatType string, noUnit bool) error { +func (ms metricStore) Format(out io.Writer, wantColor string, sections []string, formatType string, noUnit bool) error { // copy only the sections we want want := map[string]metricSection{} @@ -282,7 +284,7 @@ func (ms metricStore) Format(out io.Writer, sections []string, formatType string switch formatType { case "human": for _, section := range maptools.SortedKeys(want) { - want[section].Table(out, noUnit, showEmpty) + want[section].Table(out, wantColor, noUnit, showEmpty) } case "json": x, err := json.MarshalIndent(want, "", " ") @@ -331,7 +333,7 @@ func (cli *cliMetrics) show(sections []string, url string, noUnit bool) error { } } - return ms.Format(color.Output, sections, cfg.Cscli.Output, noUnit) + return ms.Format(color.Output, cfg.Cscli.Color, sections, cfg.Cscli.Output, noUnit) } func (cli *cliMetrics) NewCommand() *cobra.Command { @@ -449,7 +451,7 @@ func (cli *cliMetrics) list() error { switch cli.cfg().Cscli.Output { case "human": - t := newTable(color.Output) + t := cstable.New(color.Output, cli.cfg().Cscli.Color) t.SetRowLines(true) t.SetHeaders("Type", "Title", "Description") diff --git a/cmd/crowdsec-cli/metrics_table.go b/cmd/crowdsec-cli/metrics_table.go index f42d5bdaf91..38a4d3bbcc4 100644 --- a/cmd/crowdsec-cli/metrics_table.go +++ b/cmd/crowdsec-cli/metrics_table.go @@ -7,17 +7,18 @@ import ( "sort" "strconv" + "github.com/jedib0t/go-pretty/v6/text" log "github.com/sirupsen/logrus" "github.com/crowdsecurity/go-cs-lib/maptools" - "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/table" + "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/cstable" ) // ErrNilTable means a nil pointer was passed instead of a table instance. This is a programming error. var ErrNilTable = errors.New("nil table") -func lapiMetricsToTable(t *Table, stats map[string]map[string]map[string]int) int { +func lapiMetricsToTable(t *cstable.Table, stats map[string]map[string]map[string]int) int { // stats: machine -> route -> method -> count // sort keys to keep consistent order when printing machineKeys := []string{} @@ -55,7 +56,7 @@ func lapiMetricsToTable(t *Table, stats map[string]map[string]map[string]int) in return numRows } -func wlMetricsToTable(t *Table, stats map[string]map[string]map[string]int, noUnit bool) (int, error) { +func wlMetricsToTable(t *cstable.Table, stats map[string]map[string]map[string]int, noUnit bool) (int, error) { if t == nil { return 0, ErrNilTable } @@ -93,7 +94,7 @@ func wlMetricsToTable(t *Table, stats map[string]map[string]map[string]int, noUn return numRows, nil } -func metricsToTable(t *Table, stats map[string]map[string]int, keys []string, noUnit bool) (int, error) { +func metricsToTable(t *cstable.Table, stats map[string]map[string]int, keys []string, noUnit bool) (int, error) { if t == nil { return 0, ErrNilTable } @@ -145,11 +146,11 @@ func (s statBucket) Process(bucket, metric string, val int) { s[bucket][metric] += val } -func (s statBucket) Table(out io.Writer, noUnit bool, showEmpty bool) { - t := newTable(out) +func (s statBucket) Table(out io.Writer, wantColor string, noUnit bool, showEmpty bool) { + t := cstable.New(out, wantColor) t.SetRowLines(false) t.SetHeaders("Scenario", "Current Count", "Overflows", "Instantiated", "Poured", "Expired") - t.SetAlignment(table.AlignLeft, table.AlignLeft, table.AlignLeft, table.AlignLeft, table.AlignLeft, table.AlignLeft) + t.SetAlignment(text.AlignLeft, text.AlignLeft, text.AlignLeft, text.AlignLeft, text.AlignLeft, text.AlignLeft) keys := []string{"curr_count", "overflow", "instantiation", "pour", "underflow"} @@ -157,7 +158,7 @@ func (s statBucket) Table(out io.Writer, noUnit bool, showEmpty bool) { log.Warningf("while collecting scenario stats: %s", err) } else if numRows > 0 || showEmpty { title, _ := s.Description() - renderTableTitle(out, "\n"+title+":") + cstable.RenderTitle(out, "\n"+title+":") t.Render() } } @@ -178,11 +179,11 @@ func (s statAcquis) Process(source, metric string, val int) { s[source][metric] += val } -func (s statAcquis) Table(out io.Writer, noUnit bool, showEmpty bool) { - t := newTable(out) +func (s statAcquis) Table(out io.Writer, wantColor string, noUnit bool, showEmpty bool) { + t := cstable.New(out, wantColor) t.SetRowLines(false) t.SetHeaders("Source", "Lines read", "Lines parsed", "Lines unparsed", "Lines poured to bucket", "Lines whitelisted") - t.SetAlignment(table.AlignLeft, table.AlignLeft, table.AlignLeft, table.AlignLeft, table.AlignLeft) + t.SetAlignment(text.AlignLeft, text.AlignLeft, text.AlignLeft, text.AlignLeft, text.AlignLeft) keys := []string{"reads", "parsed", "unparsed", "pour", "whitelisted"} @@ -190,7 +191,7 @@ func (s statAcquis) Table(out io.Writer, noUnit bool, showEmpty bool) { log.Warningf("while collecting acquis stats: %s", err) } else if numRows > 0 || showEmpty { title, _ := s.Description() - renderTableTitle(out, "\n"+title+":") + cstable.RenderTitle(out, "\n"+title+":") t.Render() } } @@ -208,11 +209,11 @@ func (s statAppsecEngine) Process(appsecEngine, metric string, val int) { s[appsecEngine][metric] += val } -func (s statAppsecEngine) Table(out io.Writer, noUnit bool, showEmpty bool) { - t := newTable(out) +func (s statAppsecEngine) Table(out io.Writer, wantColor string, noUnit bool, showEmpty bool) { + t := cstable.New(out, wantColor) t.SetRowLines(false) t.SetHeaders("Appsec Engine", "Processed", "Blocked") - t.SetAlignment(table.AlignLeft, table.AlignLeft) + t.SetAlignment(text.AlignLeft, text.AlignLeft) keys := []string{"processed", "blocked"} @@ -220,7 +221,7 @@ func (s statAppsecEngine) Table(out io.Writer, noUnit bool, showEmpty bool) { log.Warningf("while collecting appsec stats: %s", err) } else if numRows > 0 || showEmpty { title, _ := s.Description() - renderTableTitle(out, "\n"+title+":") + cstable.RenderTitle(out, "\n"+title+":") t.Render() } } @@ -242,19 +243,19 @@ func (s statAppsecRule) Process(appsecEngine, appsecRule string, metric string, s[appsecEngine][appsecRule][metric] += val } -func (s statAppsecRule) Table(out io.Writer, noUnit bool, showEmpty bool) { +func (s statAppsecRule) Table(out io.Writer, wantColor string, noUnit bool, showEmpty bool) { for appsecEngine, appsecEngineRulesStats := range s { - t := newTable(out) + t := cstable.New(out, wantColor) t.SetRowLines(false) t.SetHeaders("Rule ID", "Triggered") - t.SetAlignment(table.AlignLeft, table.AlignLeft) + t.SetAlignment(text.AlignLeft, text.AlignLeft) keys := []string{"triggered"} if numRows, err := metricsToTable(t, appsecEngineRulesStats, keys, noUnit); err != nil { log.Warningf("while collecting appsec rules stats: %s", err) } else if numRows > 0 || showEmpty { - renderTableTitle(out, fmt.Sprintf("\nAppsec '%s' Rules Metrics:", appsecEngine)) + cstable.RenderTitle(out, fmt.Sprintf("\nAppsec '%s' Rules Metrics:", appsecEngine)) t.Render() } } @@ -277,17 +278,17 @@ func (s statWhitelist) Process(whitelist, reason, metric string, val int) { s[whitelist][reason][metric] += val } -func (s statWhitelist) Table(out io.Writer, noUnit bool, showEmpty bool) { - t := newTable(out) +func (s statWhitelist) Table(out io.Writer, wantColor string, noUnit bool, showEmpty bool) { + t := cstable.New(out, wantColor) t.SetRowLines(false) t.SetHeaders("Whitelist", "Reason", "Hits", "Whitelisted") - t.SetAlignment(table.AlignLeft, table.AlignLeft, table.AlignLeft, table.AlignLeft) + t.SetAlignment(text.AlignLeft, text.AlignLeft, text.AlignLeft, text.AlignLeft) if numRows, err := wlMetricsToTable(t, s, noUnit); err != nil { log.Warningf("while collecting parsers stats: %s", err) } else if numRows > 0 || showEmpty { title, _ := s.Description() - renderTableTitle(out, "\n"+title+":") + cstable.RenderTitle(out, "\n"+title+":") t.Render() } } @@ -307,11 +308,11 @@ func (s statParser) Process(parser, metric string, val int) { s[parser][metric] += val } -func (s statParser) Table(out io.Writer, noUnit bool, showEmpty bool) { - t := newTable(out) +func (s statParser) Table(out io.Writer, wantColor string, noUnit bool, showEmpty bool) { + t := cstable.New(out, wantColor) t.SetRowLines(false) t.SetHeaders("Parsers", "Hits", "Parsed", "Unparsed") - t.SetAlignment(table.AlignLeft, table.AlignLeft, table.AlignLeft, table.AlignLeft) + t.SetAlignment(text.AlignLeft, text.AlignLeft, text.AlignLeft, text.AlignLeft) keys := []string{"hits", "parsed", "unparsed"} @@ -319,7 +320,7 @@ func (s statParser) Table(out io.Writer, noUnit bool, showEmpty bool) { log.Warningf("while collecting parsers stats: %s", err) } else if numRows > 0 || showEmpty { title, _ := s.Description() - renderTableTitle(out, "\n"+title+":") + cstable.RenderTitle(out, "\n"+title+":") t.Render() } } @@ -339,11 +340,11 @@ func (s statStash) Process(name, mtype string, val int) { } } -func (s statStash) Table(out io.Writer, noUnit bool, showEmpty bool) { - t := newTable(out) +func (s statStash) Table(out io.Writer, wantColor string, noUnit bool, showEmpty bool) { + t := cstable.New(out, wantColor) t.SetRowLines(false) t.SetHeaders("Name", "Type", "Items") - t.SetAlignment(table.AlignLeft, table.AlignLeft, table.AlignLeft) + t.SetAlignment(text.AlignLeft, text.AlignLeft, text.AlignLeft) // unfortunately, we can't reuse metricsToTable as the structure is too different :/ numRows := 0 @@ -363,7 +364,7 @@ func (s statStash) Table(out io.Writer, noUnit bool, showEmpty bool) { if numRows > 0 || showEmpty { title, _ := s.Description() - renderTableTitle(out, "\n"+title+":") + cstable.RenderTitle(out, "\n"+title+":") t.Render() } } @@ -381,11 +382,11 @@ func (s statLapi) Process(route, method string, val int) { s[route][method] += val } -func (s statLapi) Table(out io.Writer, noUnit bool, showEmpty bool) { - t := newTable(out) +func (s statLapi) Table(out io.Writer, wantColor string, noUnit bool, showEmpty bool) { + t := cstable.New(out, wantColor) t.SetRowLines(false) t.SetHeaders("Route", "Method", "Hits") - t.SetAlignment(table.AlignLeft, table.AlignLeft, table.AlignLeft) + t.SetAlignment(text.AlignLeft, text.AlignLeft, text.AlignLeft) // unfortunately, we can't reuse metricsToTable as the structure is too different :/ numRows := 0 @@ -415,7 +416,7 @@ func (s statLapi) Table(out io.Writer, noUnit bool, showEmpty bool) { if numRows > 0 || showEmpty { title, _ := s.Description() - renderTableTitle(out, "\n"+title+":") + cstable.RenderTitle(out, "\n"+title+":") t.Render() } } @@ -437,17 +438,17 @@ func (s statLapiMachine) Process(machine, route, method string, val int) { s[machine][route][method] += val } -func (s statLapiMachine) Table(out io.Writer, noUnit bool, showEmpty bool) { - t := newTable(out) +func (s statLapiMachine) Table(out io.Writer, wantColor string, noUnit bool, showEmpty bool) { + t := cstable.New(out, wantColor) t.SetRowLines(false) t.SetHeaders("Machine", "Route", "Method", "Hits") - t.SetAlignment(table.AlignLeft, table.AlignLeft, table.AlignLeft, table.AlignLeft) + t.SetAlignment(text.AlignLeft, text.AlignLeft, text.AlignLeft, text.AlignLeft) numRows := lapiMetricsToTable(t, s) if numRows > 0 || showEmpty { title, _ := s.Description() - renderTableTitle(out, "\n"+title+":") + cstable.RenderTitle(out, "\n"+title+":") t.Render() } } @@ -469,17 +470,17 @@ func (s statLapiBouncer) Process(bouncer, route, method string, val int) { s[bouncer][route][method] += val } -func (s statLapiBouncer) Table(out io.Writer, noUnit bool, showEmpty bool) { - t := newTable(out) +func (s statLapiBouncer) Table(out io.Writer, wantColor string, noUnit bool, showEmpty bool) { + t := cstable.New(out, wantColor) t.SetRowLines(false) t.SetHeaders("Bouncer", "Route", "Method", "Hits") - t.SetAlignment(table.AlignLeft, table.AlignLeft, table.AlignLeft, table.AlignLeft) + t.SetAlignment(text.AlignLeft, text.AlignLeft, text.AlignLeft, text.AlignLeft) numRows := lapiMetricsToTable(t, s) if numRows > 0 || showEmpty { title, _ := s.Description() - renderTableTitle(out, "\n"+title+":") + cstable.RenderTitle(out, "\n"+title+":") t.Render() } } @@ -509,11 +510,11 @@ func (s statLapiDecision) Process(bouncer, fam string, val int) { s[bouncer] = x } -func (s statLapiDecision) Table(out io.Writer, noUnit bool, showEmpty bool) { - t := newTable(out) +func (s statLapiDecision) Table(out io.Writer, wantColor string, noUnit bool, showEmpty bool) { + t := cstable.New(out, wantColor) t.SetRowLines(false) t.SetHeaders("Bouncer", "Empty answers", "Non-empty answers") - t.SetAlignment(table.AlignLeft, table.AlignLeft, table.AlignLeft) + t.SetAlignment(text.AlignLeft, text.AlignLeft, text.AlignLeft) numRows := 0 @@ -529,7 +530,7 @@ func (s statLapiDecision) Table(out io.Writer, noUnit bool, showEmpty bool) { if numRows > 0 || showEmpty { title, _ := s.Description() - renderTableTitle(out, "\n"+title+":") + cstable.RenderTitle(out, "\n"+title+":") t.Render() } } @@ -552,11 +553,11 @@ func (s statDecision) Process(reason, origin, action string, val int) { s[reason][origin][action] += val } -func (s statDecision) Table(out io.Writer, noUnit bool, showEmpty bool) { - t := newTable(out) +func (s statDecision) Table(out io.Writer, wantColor string, noUnit bool, showEmpty bool) { + t := cstable.New(out, wantColor) t.SetRowLines(false) t.SetHeaders("Reason", "Origin", "Action", "Count") - t.SetAlignment(table.AlignLeft, table.AlignLeft, table.AlignLeft, table.AlignLeft) + t.SetAlignment(text.AlignLeft, text.AlignLeft, text.AlignLeft, text.AlignLeft) numRows := 0 @@ -577,7 +578,7 @@ func (s statDecision) Table(out io.Writer, noUnit bool, showEmpty bool) { if numRows > 0 || showEmpty { title, _ := s.Description() - renderTableTitle(out, "\n"+title+":") + cstable.RenderTitle(out, "\n"+title+":") t.Render() } } @@ -591,11 +592,11 @@ func (s statAlert) Process(reason string, val int) { s[reason] += val } -func (s statAlert) Table(out io.Writer, noUnit bool, showEmpty bool) { - t := newTable(out) +func (s statAlert) Table(out io.Writer, wantColor string, noUnit bool, showEmpty bool) { + t := cstable.New(out, wantColor) t.SetRowLines(false) t.SetHeaders("Reason", "Count") - t.SetAlignment(table.AlignLeft, table.AlignLeft) + t.SetAlignment(text.AlignLeft, text.AlignLeft) numRows := 0 @@ -610,7 +611,7 @@ func (s statAlert) Table(out io.Writer, noUnit bool, showEmpty bool) { if numRows > 0 || showEmpty { title, _ := s.Description() - renderTableTitle(out, "\n"+title+":") + cstable.RenderTitle(out, "\n"+title+":") t.Render() } } diff --git a/cmd/crowdsec-cli/notifications.go b/cmd/crowdsec-cli/notifications.go index 768d6a1c47e..4d757fca44e 100644 --- a/cmd/crowdsec-cli/notifications.go +++ b/cmd/crowdsec-cli/notifications.go @@ -167,7 +167,7 @@ func (cli *cliNotifications) NewListCmd() *cobra.Command { } if cfg.Cscli.Output == "human" { - notificationListTable(color.Output, ncfgs) + notificationListTable(color.Output, cfg.Cscli.Color, ncfgs) } else if cfg.Cscli.Output == "json" { x, err := json.MarshalIndent(ncfgs, "", " ") if err != nil { diff --git a/cmd/crowdsec-cli/notifications_table.go b/cmd/crowdsec-cli/notifications_table.go index b96c8ca4783..2976797bd8a 100644 --- a/cmd/crowdsec-cli/notifications_table.go +++ b/cmd/crowdsec-cli/notifications_table.go @@ -5,15 +5,17 @@ import ( "sort" "strings" - "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/table" + "github.com/jedib0t/go-pretty/v6/text" + + "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/cstable" "github.com/crowdsecurity/crowdsec/pkg/emoji" ) -func notificationListTable(out io.Writer, ncfgs map[string]NotificationsCfg) { - t := newLightTable(out) +func notificationListTable(out io.Writer, wantColor string, ncfgs map[string]NotificationsCfg) { + t := cstable.NewLight(out, wantColor) t.SetHeaders("Active", "Name", "Type", "Profile name") - t.SetHeaderAlignment(table.AlignLeft, table.AlignLeft, table.AlignLeft, table.AlignLeft) - t.SetAlignment(table.AlignLeft, table.AlignLeft, table.AlignLeft, table.AlignLeft) + t.SetHeaderAlignment(text.AlignLeft, text.AlignLeft, text.AlignLeft, text.AlignLeft) + t.SetAlignment(text.AlignLeft, text.AlignLeft, text.AlignLeft, text.AlignLeft) keys := make([]string, 0, len(ncfgs)) for k := range ncfgs { diff --git a/cmd/crowdsec-cli/support.go b/cmd/crowdsec-cli/support.go index 1f98768f778..b026d31412e 100644 --- a/cmd/crowdsec-cli/support.go +++ b/cmd/crowdsec-cli/support.go @@ -94,7 +94,7 @@ func (cli *cliSupport) dumpMetrics(ctx context.Context, zw *zip.Writer) error { return err } - if err := ms.Format(humanMetrics, nil, "human", false); err != nil { + if err := ms.Format(humanMetrics, cfg.Cscli.Color, nil, "human", false); err != nil { return fmt.Errorf("could not format prometheus metrics: %w", err) } @@ -173,7 +173,7 @@ func (cli *cliSupport) dumpHubItems(zw *zip.Writer, hub *cwhub.Hub, itemType str return fmt.Errorf("could not collect %s list: %w", itemType, err) } - if err := listItems(out, []string{itemType}, items, false, "human"); err != nil { + if err := listItems(out, cli.cfg().Cscli.Color, []string{itemType}, items, false, "human"); err != nil { return fmt.Errorf("could not list %s: %w", itemType, err) } diff --git a/cmd/crowdsec-cli/table/align.go b/cmd/crowdsec-cli/table/align.go deleted file mode 100644 index e0582007c57..00000000000 --- a/cmd/crowdsec-cli/table/align.go +++ /dev/null @@ -1,12 +0,0 @@ -package table - -import ( - "github.com/jedib0t/go-pretty/v6/text" -) - -// temporary, backward compatibility only - -const ( - AlignLeft = text.AlignLeft - AlignRight = text.AlignRight -) diff --git a/cmd/crowdsec-cli/tables.go b/cmd/crowdsec-cli/tables.go deleted file mode 100644 index e6dba0c2644..00000000000 --- a/cmd/crowdsec-cli/tables.go +++ /dev/null @@ -1,32 +0,0 @@ -package main - -import ( - "fmt" - "io" - "os" - - isatty "github.com/mattn/go-isatty" -) - -func shouldWeColorize() bool { - switch csConfig.Cscli.Color { - case "yes": - return true - case "no": - return false - default: - return isatty.IsTerminal(os.Stdout.Fd()) || isatty.IsCygwinTerminal(os.Stdout.Fd()) - } -} - -func renderTableTitle(out io.Writer, title string) { - if out == nil { - panic("renderTableTitle: out is nil") - } - - if title == "" { - return - } - - fmt.Fprintln(out, title) -} diff --git a/cmd/crowdsec-cli/utils_table.go b/cmd/crowdsec-cli/utils_table.go index d7d26a65c12..c0043e47ee3 100644 --- a/cmd/crowdsec-cli/utils_table.go +++ b/cmd/crowdsec-cli/utils_table.go @@ -5,28 +5,30 @@ import ( "io" "strconv" - "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/table" + "github.com/jedib0t/go-pretty/v6/text" + + "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/cstable" "github.com/crowdsecurity/crowdsec/pkg/cwhub" "github.com/crowdsecurity/crowdsec/pkg/emoji" ) -func listHubItemTable(out io.Writer, title string, items []*cwhub.Item) { - t := newLightTable(out) +func listHubItemTable(out io.Writer, wantColor string, title string, items []*cwhub.Item) { + t := cstable.NewLight(out, wantColor) t.SetHeaders("Name", fmt.Sprintf("%v Status", emoji.Package), "Version", "Local Path") - t.SetHeaderAlignment(table.AlignLeft, table.AlignLeft, table.AlignLeft, table.AlignLeft) - t.SetAlignment(table.AlignLeft, table.AlignLeft, table.AlignLeft, table.AlignLeft) + t.SetHeaderAlignment(text.AlignLeft, text.AlignLeft, text.AlignLeft, text.AlignLeft) + t.SetAlignment(text.AlignLeft, text.AlignLeft, text.AlignLeft, text.AlignLeft) for _, item := range items { status := fmt.Sprintf("%v %s", item.State.Emoji(), item.State.Text()) t.AddRow(item.Name, status, item.State.LocalVersion, item.State.LocalPath) } - renderTableTitle(out, title) + cstable.RenderTitle(out, title) t.Render() } -func appsecMetricsTable(out io.Writer, itemName string, metrics map[string]int) { - t := newTable(out) +func appsecMetricsTable(out io.Writer, wantColor string, itemName string, metrics map[string]int) { + t := cstable.NewLight(out, wantColor) t.SetHeaders("Inband Hits", "Outband Hits") t.AddRow( @@ -34,16 +36,16 @@ func appsecMetricsTable(out io.Writer, itemName string, metrics map[string]int) strconv.Itoa(metrics["outband_hits"]), ) - renderTableTitle(out, fmt.Sprintf("\n - (AppSec Rule) %s:", itemName)) + cstable.RenderTitle(out, fmt.Sprintf("\n - (AppSec Rule) %s:", itemName)) t.Render() } -func scenarioMetricsTable(out io.Writer, itemName string, metrics map[string]int) { +func scenarioMetricsTable(out io.Writer, wantColor string, itemName string, metrics map[string]int) { if metrics["instantiation"] == 0 { return } - t := newTable(out) + t := cstable.New(out, wantColor) t.SetHeaders("Current Count", "Overflows", "Instantiated", "Poured", "Expired") t.AddRow( @@ -54,12 +56,12 @@ func scenarioMetricsTable(out io.Writer, itemName string, metrics map[string]int strconv.Itoa(metrics["underflow"]), ) - renderTableTitle(out, fmt.Sprintf("\n - (Scenario) %s:", itemName)) + cstable.RenderTitle(out, fmt.Sprintf("\n - (Scenario) %s:", itemName)) t.Render() } -func parserMetricsTable(out io.Writer, itemName string, metrics map[string]map[string]int) { - t := newTable(out) +func parserMetricsTable(out io.Writer, wantColor string, itemName string, metrics map[string]map[string]int) { + t := cstable.New(out, wantColor) t.SetHeaders("Parsers", "Hits", "Parsed", "Unparsed") // don't show table if no hits @@ -79,7 +81,7 @@ func parserMetricsTable(out io.Writer, itemName string, metrics map[string]map[s } if showTable { - renderTableTitle(out, fmt.Sprintf("\n - (Parser) %s:", itemName)) + cstable.RenderTitle(out, fmt.Sprintf("\n - (Parser) %s:", itemName)) t.Render() } } From 7cc33b585433b5f94437821b732fc09511a7176a Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Thu, 4 Jul 2024 14:43:35 +0200 Subject: [PATCH 209/581] cscli refact: extract metrics to own package (#3106) --- cmd/crowdsec-cli/item_metrics.go | 36 ----------------- cmd/crowdsec-cli/main.go | 4 +- cmd/crowdsec-cli/{ => metrics}/metrics.go | 7 +++- .../{ => metrics}/metrics_table.go | 2 +- cmd/crowdsec-cli/metrics/number.go | 40 +++++++++++++++++++ cmd/crowdsec-cli/support.go | 3 +- 6 files changed, 51 insertions(+), 41 deletions(-) rename cmd/crowdsec-cli/{ => metrics}/metrics.go (98%) rename cmd/crowdsec-cli/{ => metrics}/metrics_table.go (99%) create mode 100644 cmd/crowdsec-cli/metrics/number.go diff --git a/cmd/crowdsec-cli/item_metrics.go b/cmd/crowdsec-cli/item_metrics.go index b4b8c3c26b5..f00ae08b00b 100644 --- a/cmd/crowdsec-cli/item_metrics.go +++ b/cmd/crowdsec-cli/item_metrics.go @@ -1,8 +1,6 @@ package main import ( - "fmt" - "math" "net/http" "strconv" "strings" @@ -291,37 +289,3 @@ func GetPrometheusMetric(url string) []*prom2json.Family { return result } - -type unit struct { - value int64 - symbol string -} - -var ranges = []unit{ - {value: 1e18, symbol: "E"}, - {value: 1e15, symbol: "P"}, - {value: 1e12, symbol: "T"}, - {value: 1e9, symbol: "G"}, - {value: 1e6, symbol: "M"}, - {value: 1e3, symbol: "k"}, - {value: 1, symbol: ""}, -} - -func formatNumber(num int) string { - goodUnit := unit{} - - for _, u := range ranges { - if int64(num) >= u.value { - goodUnit = u - break - } - } - - if goodUnit.value == 1 { - return fmt.Sprintf("%d%s", num, goodUnit.symbol) - } - - res := math.Round(float64(num)/float64(goodUnit.value)*100) / 100 - - return fmt.Sprintf("%.2f%s", res, goodUnit.symbol) -} diff --git a/cmd/crowdsec-cli/main.go b/cmd/crowdsec-cli/main.go index a712960810a..bd9d8988132 100644 --- a/cmd/crowdsec-cli/main.go +++ b/cmd/crowdsec-cli/main.go @@ -14,6 +14,8 @@ import ( "github.com/crowdsecurity/go-cs-lib/trace" + "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/metrics" + "github.com/crowdsecurity/crowdsec/pkg/csconfig" "github.com/crowdsecurity/crowdsec/pkg/fflag" ) @@ -250,7 +252,7 @@ It is meant to allow you to manage bans, parsers/scenarios/etc, api and generall cmd.AddCommand(NewCLIVersion().NewCommand()) cmd.AddCommand(NewCLIConfig(cli.cfg).NewCommand()) cmd.AddCommand(NewCLIHub(cli.cfg).NewCommand()) - cmd.AddCommand(NewCLIMetrics(cli.cfg).NewCommand()) + cmd.AddCommand(metrics.NewCLI(cli.cfg).NewCommand()) cmd.AddCommand(NewCLIDashboard(cli.cfg).NewCommand()) cmd.AddCommand(NewCLIDecisions(cli.cfg).NewCommand()) cmd.AddCommand(NewCLIAlerts(cli.cfg).NewCommand()) diff --git a/cmd/crowdsec-cli/metrics.go b/cmd/crowdsec-cli/metrics/metrics.go similarity index 98% rename from cmd/crowdsec-cli/metrics.go rename to cmd/crowdsec-cli/metrics/metrics.go index a9457bb849e..967b57de707 100644 --- a/cmd/crowdsec-cli/metrics.go +++ b/cmd/crowdsec-cli/metrics/metrics.go @@ -1,4 +1,4 @@ -package main +package metrics import ( "encoding/json" @@ -21,6 +21,7 @@ import ( "github.com/crowdsecurity/go-cs-lib/trace" "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/cstable" + "github.com/crowdsecurity/crowdsec/pkg/csconfig" ) type ( @@ -255,11 +256,13 @@ func (ms metricStore) Fetch(url string) error { return nil } +type configGetter func() *csconfig.Config + type cliMetrics struct { cfg configGetter } -func NewCLIMetrics(cfg configGetter) *cliMetrics { +func NewCLI(cfg configGetter) *cliMetrics { return &cliMetrics{ cfg: cfg, } diff --git a/cmd/crowdsec-cli/metrics_table.go b/cmd/crowdsec-cli/metrics/metrics_table.go similarity index 99% rename from cmd/crowdsec-cli/metrics_table.go rename to cmd/crowdsec-cli/metrics/metrics_table.go index 38a4d3bbcc4..4e423722f3e 100644 --- a/cmd/crowdsec-cli/metrics_table.go +++ b/cmd/crowdsec-cli/metrics/metrics_table.go @@ -1,4 +1,4 @@ -package main +package metrics import ( "errors" diff --git a/cmd/crowdsec-cli/metrics/number.go b/cmd/crowdsec-cli/metrics/number.go new file mode 100644 index 00000000000..a60d3db2a9b --- /dev/null +++ b/cmd/crowdsec-cli/metrics/number.go @@ -0,0 +1,40 @@ +package metrics + +import ( + "fmt" + "math" +) + +type unit struct { + value int64 + symbol string +} + +var ranges = []unit{ + {value: 1e18, symbol: "E"}, + {value: 1e15, symbol: "P"}, + {value: 1e12, symbol: "T"}, + {value: 1e9, symbol: "G"}, + {value: 1e6, symbol: "M"}, + {value: 1e3, symbol: "k"}, + {value: 1, symbol: ""}, +} + +func formatNumber(num int) string { + goodUnit := unit{} + + for _, u := range ranges { + if int64(num) >= u.value { + goodUnit = u + break + } + } + + if goodUnit.value == 1 { + return fmt.Sprintf("%d%s", num, goodUnit.symbol) + } + + res := math.Round(float64(num)/float64(goodUnit.value)*100) / 100 + + return fmt.Sprintf("%.2f%s", res, goodUnit.symbol) +} diff --git a/cmd/crowdsec-cli/support.go b/cmd/crowdsec-cli/support.go index b026d31412e..1b33ef38ada 100644 --- a/cmd/crowdsec-cli/support.go +++ b/cmd/crowdsec-cli/support.go @@ -22,6 +22,7 @@ import ( "github.com/crowdsecurity/go-cs-lib/trace" + "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/metrics" "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/require" "github.com/crowdsecurity/crowdsec/pkg/csconfig" "github.com/crowdsecurity/crowdsec/pkg/cwhub" @@ -88,7 +89,7 @@ func (cli *cliSupport) dumpMetrics(ctx context.Context, zw *zip.Writer) error { humanMetrics := new(bytes.Buffer) - ms := NewMetricStore() + ms := metrics.NewMetricStore() if err := ms.Fetch(cfg.Cscli.PrometheusUrl); err != nil { return err From ea2098f6e6e9a62608fde690234db9f307c5dbce Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Thu, 4 Jul 2024 15:55:47 +0200 Subject: [PATCH 210/581] refact cscli metrics: split stat types to own files (#3107) --- cmd/crowdsec-cli/metrics/list.go | 78 +++ cmd/crowdsec-cli/metrics/metrics.go | 444 ------------- cmd/crowdsec-cli/metrics/metrics_table.go | 617 ------------------- cmd/crowdsec-cli/metrics/show.go | 104 ++++ cmd/crowdsec-cli/metrics/statacquis.go | 45 ++ cmd/crowdsec-cli/metrics/statalert.go | 45 ++ cmd/crowdsec-cli/metrics/statappsecengine.go | 42 ++ cmd/crowdsec-cli/metrics/statappsecrule.go | 48 ++ cmd/crowdsec-cli/metrics/statbucket.go | 43 ++ cmd/crowdsec-cli/metrics/statdecision.go | 60 ++ cmd/crowdsec-cli/metrics/statlapi.go | 67 ++ cmd/crowdsec-cli/metrics/statlapibouncer.go | 43 ++ cmd/crowdsec-cli/metrics/statlapidecision.go | 65 ++ cmd/crowdsec-cli/metrics/statlapimachine.go | 43 ++ cmd/crowdsec-cli/metrics/statparser.go | 44 ++ cmd/crowdsec-cli/metrics/statstash.go | 61 ++ cmd/crowdsec-cli/metrics/statwhitelist.go | 44 ++ cmd/crowdsec-cli/metrics/store.go | 264 ++++++++ cmd/crowdsec-cli/metrics/table.go | 130 ++++ 19 files changed, 1226 insertions(+), 1061 deletions(-) create mode 100644 cmd/crowdsec-cli/metrics/list.go delete mode 100644 cmd/crowdsec-cli/metrics/metrics_table.go create mode 100644 cmd/crowdsec-cli/metrics/show.go create mode 100644 cmd/crowdsec-cli/metrics/statacquis.go create mode 100644 cmd/crowdsec-cli/metrics/statalert.go create mode 100644 cmd/crowdsec-cli/metrics/statappsecengine.go create mode 100644 cmd/crowdsec-cli/metrics/statappsecrule.go create mode 100644 cmd/crowdsec-cli/metrics/statbucket.go create mode 100644 cmd/crowdsec-cli/metrics/statdecision.go create mode 100644 cmd/crowdsec-cli/metrics/statlapi.go create mode 100644 cmd/crowdsec-cli/metrics/statlapibouncer.go create mode 100644 cmd/crowdsec-cli/metrics/statlapidecision.go create mode 100644 cmd/crowdsec-cli/metrics/statlapimachine.go create mode 100644 cmd/crowdsec-cli/metrics/statparser.go create mode 100644 cmd/crowdsec-cli/metrics/statstash.go create mode 100644 cmd/crowdsec-cli/metrics/statwhitelist.go create mode 100644 cmd/crowdsec-cli/metrics/store.go create mode 100644 cmd/crowdsec-cli/metrics/table.go diff --git a/cmd/crowdsec-cli/metrics/list.go b/cmd/crowdsec-cli/metrics/list.go new file mode 100644 index 00000000000..ba68aa4b64d --- /dev/null +++ b/cmd/crowdsec-cli/metrics/list.go @@ -0,0 +1,78 @@ +package metrics + +import ( + "encoding/json" + "fmt" + + "github.com/fatih/color" + "github.com/spf13/cobra" + "gopkg.in/yaml.v3" + + "github.com/crowdsecurity/go-cs-lib/maptools" + + "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/cstable" +) + +func (cli *cliMetrics) list() error { + type metricType struct { + Type string `json:"type" yaml:"type"` + Title string `json:"title" yaml:"title"` + Description string `json:"description" yaml:"description"` + } + + var allMetrics []metricType + + ms := NewMetricStore() + for _, section := range maptools.SortedKeys(ms) { + title, description := ms[section].Description() + allMetrics = append(allMetrics, metricType{ + Type: section, + Title: title, + Description: description, + }) + } + + switch cli.cfg().Cscli.Output { + case "human": + t := cstable.New(color.Output, cli.cfg().Cscli.Color) + t.SetRowLines(true) + t.SetHeaders("Type", "Title", "Description") + + for _, metric := range allMetrics { + t.AddRow(metric.Type, metric.Title, metric.Description) + } + + t.Render() + case "json": + x, err := json.MarshalIndent(allMetrics, "", " ") + if err != nil { + return fmt.Errorf("failed to marshal metric types: %w", err) + } + + fmt.Println(string(x)) + case "raw": + x, err := yaml.Marshal(allMetrics) + if err != nil { + return fmt.Errorf("failed to marshal metric types: %w", err) + } + + fmt.Println(string(x)) + } + + return nil +} + +func (cli *cliMetrics) newListCmd() *cobra.Command { + cmd := &cobra.Command{ + Use: "list", + Short: "List available types of metrics.", + Long: `List available types of metrics.`, + Args: cobra.ExactArgs(0), + DisableAutoGenTag: true, + RunE: func(_ *cobra.Command, _ []string) error { + return cli.list() + }, + } + + return cmd +} diff --git a/cmd/crowdsec-cli/metrics/metrics.go b/cmd/crowdsec-cli/metrics/metrics.go index 967b57de707..52d623dc37e 100644 --- a/cmd/crowdsec-cli/metrics/metrics.go +++ b/cmd/crowdsec-cli/metrics/metrics.go @@ -1,261 +1,11 @@ package metrics import ( - "encoding/json" - "errors" - "fmt" - "io" - "net/http" - "strconv" - "strings" - "time" - - "github.com/fatih/color" - dto "github.com/prometheus/client_model/go" - "github.com/prometheus/prom2json" - log "github.com/sirupsen/logrus" "github.com/spf13/cobra" - "gopkg.in/yaml.v3" - - "github.com/crowdsecurity/go-cs-lib/maptools" - "github.com/crowdsecurity/go-cs-lib/trace" - "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/cstable" "github.com/crowdsecurity/crowdsec/pkg/csconfig" ) -type ( - statAcquis map[string]map[string]int - statParser map[string]map[string]int - statBucket map[string]map[string]int - statWhitelist map[string]map[string]map[string]int - statLapi map[string]map[string]int - statLapiMachine map[string]map[string]map[string]int - statLapiBouncer map[string]map[string]map[string]int - statLapiDecision map[string]struct { - NonEmpty int - Empty int - } - statDecision map[string]map[string]map[string]int - statAppsecEngine map[string]map[string]int - statAppsecRule map[string]map[string]map[string]int - statAlert map[string]int - statStash map[string]struct { - Type string - Count int - } -) - -var ( - ErrMissingConfig = errors.New("prometheus section missing, can't show metrics") - ErrMetricsDisabled = errors.New("prometheus is not enabled, can't show metrics") -) - -type metricSection interface { - Table(out io.Writer, wantColor string, noUnit bool, showEmpty bool) - Description() (string, string) -} - -type metricStore map[string]metricSection - -func NewMetricStore() metricStore { - return metricStore{ - "acquisition": statAcquis{}, - "scenarios": statBucket{}, - "parsers": statParser{}, - "lapi": statLapi{}, - "lapi-machine": statLapiMachine{}, - "lapi-bouncer": statLapiBouncer{}, - "lapi-decisions": statLapiDecision{}, - "decisions": statDecision{}, - "alerts": statAlert{}, - "stash": statStash{}, - "appsec-engine": statAppsecEngine{}, - "appsec-rule": statAppsecRule{}, - "whitelists": statWhitelist{}, - } -} - -func (ms metricStore) Fetch(url string) error { - mfChan := make(chan *dto.MetricFamily, 1024) - errChan := make(chan error, 1) - - // Start with the DefaultTransport for sane defaults. - transport := http.DefaultTransport.(*http.Transport).Clone() - // Conservatively disable HTTP keep-alives as this program will only - // ever need a single HTTP request. - transport.DisableKeepAlives = true - // Timeout early if the server doesn't even return the headers. - transport.ResponseHeaderTimeout = time.Minute - go func() { - defer trace.CatchPanic("crowdsec/ShowPrometheus") - - err := prom2json.FetchMetricFamilies(url, mfChan, transport) - if err != nil { - errChan <- fmt.Errorf("failed to fetch metrics: %w", err) - return - } - errChan <- nil - }() - - result := []*prom2json.Family{} - for mf := range mfChan { - result = append(result, prom2json.NewFamily(mf)) - } - - if err := <-errChan; err != nil { - return err - } - - log.Debugf("Finished reading metrics output, %d entries", len(result)) - /*walk*/ - - mAcquis := ms["acquisition"].(statAcquis) - mParser := ms["parsers"].(statParser) - mBucket := ms["scenarios"].(statBucket) - mLapi := ms["lapi"].(statLapi) - mLapiMachine := ms["lapi-machine"].(statLapiMachine) - mLapiBouncer := ms["lapi-bouncer"].(statLapiBouncer) - mLapiDecision := ms["lapi-decisions"].(statLapiDecision) - mDecision := ms["decisions"].(statDecision) - mAppsecEngine := ms["appsec-engine"].(statAppsecEngine) - mAppsecRule := ms["appsec-rule"].(statAppsecRule) - mAlert := ms["alerts"].(statAlert) - mStash := ms["stash"].(statStash) - mWhitelist := ms["whitelists"].(statWhitelist) - - for idx, fam := range result { - if !strings.HasPrefix(fam.Name, "cs_") { - continue - } - - log.Tracef("round %d", idx) - - for _, m := range fam.Metrics { - metric, ok := m.(prom2json.Metric) - if !ok { - log.Debugf("failed to convert metric to prom2json.Metric") - continue - } - - name, ok := metric.Labels["name"] - if !ok { - log.Debugf("no name in Metric %v", metric.Labels) - } - - source, ok := metric.Labels["source"] - if !ok { - log.Debugf("no source in Metric %v for %s", metric.Labels, fam.Name) - } else { - if srctype, ok := metric.Labels["type"]; ok { - source = srctype + ":" + source - } - } - - value := m.(prom2json.Metric).Value - machine := metric.Labels["machine"] - bouncer := metric.Labels["bouncer"] - - route := metric.Labels["route"] - method := metric.Labels["method"] - - reason := metric.Labels["reason"] - origin := metric.Labels["origin"] - action := metric.Labels["action"] - - appsecEngine := metric.Labels["appsec_engine"] - appsecRule := metric.Labels["rule_name"] - - mtype := metric.Labels["type"] - - fval, err := strconv.ParseFloat(value, 32) - if err != nil { - log.Errorf("Unexpected int value %s : %s", value, err) - } - - ival := int(fval) - - switch fam.Name { - // - // buckets - // - case "cs_bucket_created_total": - mBucket.Process(name, "instantiation", ival) - case "cs_buckets": - mBucket.Process(name, "curr_count", ival) - case "cs_bucket_overflowed_total": - mBucket.Process(name, "overflow", ival) - case "cs_bucket_poured_total": - mBucket.Process(name, "pour", ival) - mAcquis.Process(source, "pour", ival) - case "cs_bucket_underflowed_total": - mBucket.Process(name, "underflow", ival) - // - // parsers - // - case "cs_parser_hits_total": - mAcquis.Process(source, "reads", ival) - case "cs_parser_hits_ok_total": - mAcquis.Process(source, "parsed", ival) - case "cs_parser_hits_ko_total": - mAcquis.Process(source, "unparsed", ival) - case "cs_node_hits_total": - mParser.Process(name, "hits", ival) - case "cs_node_hits_ok_total": - mParser.Process(name, "parsed", ival) - case "cs_node_hits_ko_total": - mParser.Process(name, "unparsed", ival) - // - // whitelists - // - case "cs_node_wl_hits_total": - mWhitelist.Process(name, reason, "hits", ival) - case "cs_node_wl_hits_ok_total": - mWhitelist.Process(name, reason, "whitelisted", ival) - // track as well whitelisted lines at acquis level - mAcquis.Process(source, "whitelisted", ival) - // - // lapi - // - case "cs_lapi_route_requests_total": - mLapi.Process(route, method, ival) - case "cs_lapi_machine_requests_total": - mLapiMachine.Process(machine, route, method, ival) - case "cs_lapi_bouncer_requests_total": - mLapiBouncer.Process(bouncer, route, method, ival) - case "cs_lapi_decisions_ko_total", "cs_lapi_decisions_ok_total": - mLapiDecision.Process(bouncer, fam.Name, ival) - // - // decisions - // - case "cs_active_decisions": - mDecision.Process(reason, origin, action, ival) - case "cs_alerts": - mAlert.Process(reason, ival) - // - // stash - // - case "cs_cache_size": - mStash.Process(name, mtype, ival) - // - // appsec - // - case "cs_appsec_reqs_total": - mAppsecEngine.Process(appsecEngine, "processed", ival) - case "cs_appsec_block_total": - mAppsecEngine.Process(appsecEngine, "blocked", ival) - case "cs_appsec_rule_hits": - mAppsecRule.Process(appsecEngine, appsecRule, "triggered", ival) - default: - log.Debugf("unknown: %+v", fam.Name) - continue - } - } - } - - return nil -} - type configGetter func() *csconfig.Config type cliMetrics struct { @@ -268,77 +18,6 @@ func NewCLI(cfg configGetter) *cliMetrics { } } -func (ms metricStore) Format(out io.Writer, wantColor string, sections []string, formatType string, noUnit bool) error { - // copy only the sections we want - want := map[string]metricSection{} - - // if explicitly asking for sections, we want to show empty tables - showEmpty := len(sections) > 0 - - // if no sections are specified, we want all of them - if len(sections) == 0 { - sections = maptools.SortedKeys(ms) - } - - for _, section := range sections { - want[section] = ms[section] - } - - switch formatType { - case "human": - for _, section := range maptools.SortedKeys(want) { - want[section].Table(out, wantColor, noUnit, showEmpty) - } - case "json": - x, err := json.MarshalIndent(want, "", " ") - if err != nil { - return fmt.Errorf("failed to marshal metrics: %w", err) - } - out.Write(x) - case "raw": - x, err := yaml.Marshal(want) - if err != nil { - return fmt.Errorf("failed to marshal metrics: %w", err) - } - out.Write(x) - default: - return fmt.Errorf("unknown format type %s", formatType) - } - - return nil -} - -func (cli *cliMetrics) show(sections []string, url string, noUnit bool) error { - cfg := cli.cfg() - - if url != "" { - cfg.Cscli.PrometheusUrl = url - } - - if cfg.Prometheus == nil { - return ErrMissingConfig - } - - if !cfg.Prometheus.Enabled { - return ErrMetricsDisabled - } - - ms := NewMetricStore() - - if err := ms.Fetch(cfg.Cscli.PrometheusUrl); err != nil { - return err - } - - // any section that we don't have in the store is an error - for _, section := range sections { - if _, ok := ms[section]; !ok { - return fmt.Errorf("unknown metrics type: %s", section) - } - } - - return ms.Format(color.Output, cfg.Cscli.Color, sections, cfg.Cscli.Output, noUnit) -} - func (cli *cliMetrics) NewCommand() *cobra.Command { var ( url string @@ -373,126 +52,3 @@ cscli metrics list`, return cmd } - -// expandAlias returns a list of sections. The input can be a list of sections or alias. -func (cli *cliMetrics) expandAlias(args []string) []string { - ret := []string{} - - for _, section := range args { - switch section { - case "engine": - ret = append(ret, "acquisition", "parsers", "scenarios", "stash", "whitelists") - case "lapi": - ret = append(ret, "alerts", "decisions", "lapi", "lapi-bouncer", "lapi-decisions", "lapi-machine") - case "appsec": - ret = append(ret, "appsec-engine", "appsec-rule") - default: - ret = append(ret, section) - } - } - - return ret -} - -func (cli *cliMetrics) newShowCmd() *cobra.Command { - var ( - url string - noUnit bool - ) - - cmd := &cobra.Command{ - Use: "show [type]...", - Short: "Display all or part of the available metrics.", - Long: `Fetch metrics from a Local API server and display them, optionally filtering on specific types.`, - Example: `# Show all Metrics, skip empty tables -cscli metrics show - -# Use an alias: "engine", "lapi" or "appsec" to show a group of metrics -cscli metrics show engine - -# Show some specific metrics, show empty tables, connect to a different url -cscli metrics show acquisition parsers scenarios stash --url http://lapi.local:6060/metrics - -# To list available metric types, use "cscli metrics list" -cscli metrics list; cscli metrics list -o json - -# Show metrics in json format -cscli metrics show acquisition parsers scenarios stash -o json`, - // Positional args are optional - DisableAutoGenTag: true, - RunE: func(_ *cobra.Command, args []string) error { - args = cli.expandAlias(args) - return cli.show(args, url, noUnit) - }, - } - - flags := cmd.Flags() - flags.StringVarP(&url, "url", "u", "", "Metrics url (http://:/metrics)") - flags.BoolVar(&noUnit, "no-unit", false, "Show the real number instead of formatted with units") - - return cmd -} - -func (cli *cliMetrics) list() error { - type metricType struct { - Type string `json:"type" yaml:"type"` - Title string `json:"title" yaml:"title"` - Description string `json:"description" yaml:"description"` - } - - var allMetrics []metricType - - ms := NewMetricStore() - for _, section := range maptools.SortedKeys(ms) { - title, description := ms[section].Description() - allMetrics = append(allMetrics, metricType{ - Type: section, - Title: title, - Description: description, - }) - } - - switch cli.cfg().Cscli.Output { - case "human": - t := cstable.New(color.Output, cli.cfg().Cscli.Color) - t.SetRowLines(true) - t.SetHeaders("Type", "Title", "Description") - - for _, metric := range allMetrics { - t.AddRow(metric.Type, metric.Title, metric.Description) - } - - t.Render() - case "json": - x, err := json.MarshalIndent(allMetrics, "", " ") - if err != nil { - return fmt.Errorf("failed to marshal metric types: %w", err) - } - - fmt.Println(string(x)) - case "raw": - x, err := yaml.Marshal(allMetrics) - if err != nil { - return fmt.Errorf("failed to marshal metric types: %w", err) - } - - fmt.Println(string(x)) - } - - return nil -} - -func (cli *cliMetrics) newListCmd() *cobra.Command { - cmd := &cobra.Command{ - Use: "list", - Short: "List available types of metrics.", - Long: `List available types of metrics.`, - Args: cobra.ExactArgs(0), - DisableAutoGenTag: true, - RunE: func(_ *cobra.Command, _ []string) error { - return cli.list() - }, - } - - return cmd -} diff --git a/cmd/crowdsec-cli/metrics/metrics_table.go b/cmd/crowdsec-cli/metrics/metrics_table.go deleted file mode 100644 index 4e423722f3e..00000000000 --- a/cmd/crowdsec-cli/metrics/metrics_table.go +++ /dev/null @@ -1,617 +0,0 @@ -package metrics - -import ( - "errors" - "fmt" - "io" - "sort" - "strconv" - - "github.com/jedib0t/go-pretty/v6/text" - log "github.com/sirupsen/logrus" - - "github.com/crowdsecurity/go-cs-lib/maptools" - - "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/cstable" -) - -// ErrNilTable means a nil pointer was passed instead of a table instance. This is a programming error. -var ErrNilTable = errors.New("nil table") - -func lapiMetricsToTable(t *cstable.Table, stats map[string]map[string]map[string]int) int { - // stats: machine -> route -> method -> count - // sort keys to keep consistent order when printing - machineKeys := []string{} - for k := range stats { - machineKeys = append(machineKeys, k) - } - - sort.Strings(machineKeys) - - numRows := 0 - - for _, machine := range machineKeys { - // oneRow: route -> method -> count - machineRow := stats[machine] - for routeName, route := range machineRow { - for methodName, count := range route { - row := []string{ - machine, - routeName, - methodName, - } - if count != 0 { - row = append(row, strconv.Itoa(count)) - } else { - row = append(row, "-") - } - - t.AddRow(row...) - - numRows++ - } - } - } - - return numRows -} - -func wlMetricsToTable(t *cstable.Table, stats map[string]map[string]map[string]int, noUnit bool) (int, error) { - if t == nil { - return 0, ErrNilTable - } - - numRows := 0 - - for _, name := range maptools.SortedKeys(stats) { - for _, reason := range maptools.SortedKeys(stats[name]) { - row := []string{ - name, - reason, - "-", - "-", - } - - for _, action := range maptools.SortedKeys(stats[name][reason]) { - value := stats[name][reason][action] - - switch action { - case "whitelisted": - row[3] = strconv.Itoa(value) - case "hits": - row[2] = strconv.Itoa(value) - default: - log.Debugf("unexpected counter '%s' for whitelists = %d", action, value) - } - } - - t.AddRow(row...) - - numRows++ - } - } - - return numRows, nil -} - -func metricsToTable(t *cstable.Table, stats map[string]map[string]int, keys []string, noUnit bool) (int, error) { - if t == nil { - return 0, ErrNilTable - } - - numRows := 0 - - for _, alabel := range maptools.SortedKeys(stats) { - astats, ok := stats[alabel] - if !ok { - continue - } - - row := []string{ - alabel, - } - - for _, sl := range keys { - if v, ok := astats[sl]; ok && v != 0 { - numberToShow := strconv.Itoa(v) - if !noUnit { - numberToShow = formatNumber(v) - } - - row = append(row, numberToShow) - } else { - row = append(row, "-") - } - } - - t.AddRow(row...) - - numRows++ - } - - return numRows, nil -} - -func (s statBucket) Description() (string, string) { - return "Scenario Metrics", - `Measure events in different scenarios. Current count is the number of buckets during metrics collection. ` + - `Overflows are past event-producing buckets, while Expired are the ones that didn’t receive enough events to Overflow.` -} - -func (s statBucket) Process(bucket, metric string, val int) { - if _, ok := s[bucket]; !ok { - s[bucket] = make(map[string]int) - } - - s[bucket][metric] += val -} - -func (s statBucket) Table(out io.Writer, wantColor string, noUnit bool, showEmpty bool) { - t := cstable.New(out, wantColor) - t.SetRowLines(false) - t.SetHeaders("Scenario", "Current Count", "Overflows", "Instantiated", "Poured", "Expired") - t.SetAlignment(text.AlignLeft, text.AlignLeft, text.AlignLeft, text.AlignLeft, text.AlignLeft, text.AlignLeft) - - keys := []string{"curr_count", "overflow", "instantiation", "pour", "underflow"} - - if numRows, err := metricsToTable(t, s, keys, noUnit); err != nil { - log.Warningf("while collecting scenario stats: %s", err) - } else if numRows > 0 || showEmpty { - title, _ := s.Description() - cstable.RenderTitle(out, "\n"+title+":") - t.Render() - } -} - -func (s statAcquis) Description() (string, string) { - return "Acquisition Metrics", - `Measures the lines read, parsed, and unparsed per datasource. ` + - `Zero read lines indicate a misconfigured or inactive datasource. ` + - `Zero parsed lines mean the parser(s) failed. ` + - `Non-zero parsed lines are fine as crowdsec selects relevant lines.` -} - -func (s statAcquis) Process(source, metric string, val int) { - if _, ok := s[source]; !ok { - s[source] = make(map[string]int) - } - - s[source][metric] += val -} - -func (s statAcquis) Table(out io.Writer, wantColor string, noUnit bool, showEmpty bool) { - t := cstable.New(out, wantColor) - t.SetRowLines(false) - t.SetHeaders("Source", "Lines read", "Lines parsed", "Lines unparsed", "Lines poured to bucket", "Lines whitelisted") - t.SetAlignment(text.AlignLeft, text.AlignLeft, text.AlignLeft, text.AlignLeft, text.AlignLeft) - - keys := []string{"reads", "parsed", "unparsed", "pour", "whitelisted"} - - if numRows, err := metricsToTable(t, s, keys, noUnit); err != nil { - log.Warningf("while collecting acquis stats: %s", err) - } else if numRows > 0 || showEmpty { - title, _ := s.Description() - cstable.RenderTitle(out, "\n"+title+":") - t.Render() - } -} - -func (s statAppsecEngine) Description() (string, string) { - return "Appsec Metrics", - `Measures the number of parsed and blocked requests by the AppSec Component.` -} - -func (s statAppsecEngine) Process(appsecEngine, metric string, val int) { - if _, ok := s[appsecEngine]; !ok { - s[appsecEngine] = make(map[string]int) - } - - s[appsecEngine][metric] += val -} - -func (s statAppsecEngine) Table(out io.Writer, wantColor string, noUnit bool, showEmpty bool) { - t := cstable.New(out, wantColor) - t.SetRowLines(false) - t.SetHeaders("Appsec Engine", "Processed", "Blocked") - t.SetAlignment(text.AlignLeft, text.AlignLeft) - - keys := []string{"processed", "blocked"} - - if numRows, err := metricsToTable(t, s, keys, noUnit); err != nil { - log.Warningf("while collecting appsec stats: %s", err) - } else if numRows > 0 || showEmpty { - title, _ := s.Description() - cstable.RenderTitle(out, "\n"+title+":") - t.Render() - } -} - -func (s statAppsecRule) Description() (string, string) { - return "Appsec Rule Metrics", - `Provides “per AppSec Component” information about the number of matches for loaded AppSec Rules.` -} - -func (s statAppsecRule) Process(appsecEngine, appsecRule string, metric string, val int) { - if _, ok := s[appsecEngine]; !ok { - s[appsecEngine] = make(map[string]map[string]int) - } - - if _, ok := s[appsecEngine][appsecRule]; !ok { - s[appsecEngine][appsecRule] = make(map[string]int) - } - - s[appsecEngine][appsecRule][metric] += val -} - -func (s statAppsecRule) Table(out io.Writer, wantColor string, noUnit bool, showEmpty bool) { - for appsecEngine, appsecEngineRulesStats := range s { - t := cstable.New(out, wantColor) - t.SetRowLines(false) - t.SetHeaders("Rule ID", "Triggered") - t.SetAlignment(text.AlignLeft, text.AlignLeft) - - keys := []string{"triggered"} - - if numRows, err := metricsToTable(t, appsecEngineRulesStats, keys, noUnit); err != nil { - log.Warningf("while collecting appsec rules stats: %s", err) - } else if numRows > 0 || showEmpty { - cstable.RenderTitle(out, fmt.Sprintf("\nAppsec '%s' Rules Metrics:", appsecEngine)) - t.Render() - } - } -} - -func (s statWhitelist) Description() (string, string) { - return "Whitelist Metrics", - `Tracks the number of events processed and possibly whitelisted by each parser whitelist.` -} - -func (s statWhitelist) Process(whitelist, reason, metric string, val int) { - if _, ok := s[whitelist]; !ok { - s[whitelist] = make(map[string]map[string]int) - } - - if _, ok := s[whitelist][reason]; !ok { - s[whitelist][reason] = make(map[string]int) - } - - s[whitelist][reason][metric] += val -} - -func (s statWhitelist) Table(out io.Writer, wantColor string, noUnit bool, showEmpty bool) { - t := cstable.New(out, wantColor) - t.SetRowLines(false) - t.SetHeaders("Whitelist", "Reason", "Hits", "Whitelisted") - t.SetAlignment(text.AlignLeft, text.AlignLeft, text.AlignLeft, text.AlignLeft) - - if numRows, err := wlMetricsToTable(t, s, noUnit); err != nil { - log.Warningf("while collecting parsers stats: %s", err) - } else if numRows > 0 || showEmpty { - title, _ := s.Description() - cstable.RenderTitle(out, "\n"+title+":") - t.Render() - } -} - -func (s statParser) Description() (string, string) { - return "Parser Metrics", - `Tracks the number of events processed by each parser and indicates success of failure. ` + - `Zero parsed lines means the parer(s) failed. ` + - `Non-zero unparsed lines are fine as crowdsec select relevant lines.` -} - -func (s statParser) Process(parser, metric string, val int) { - if _, ok := s[parser]; !ok { - s[parser] = make(map[string]int) - } - - s[parser][metric] += val -} - -func (s statParser) Table(out io.Writer, wantColor string, noUnit bool, showEmpty bool) { - t := cstable.New(out, wantColor) - t.SetRowLines(false) - t.SetHeaders("Parsers", "Hits", "Parsed", "Unparsed") - t.SetAlignment(text.AlignLeft, text.AlignLeft, text.AlignLeft, text.AlignLeft) - - keys := []string{"hits", "parsed", "unparsed"} - - if numRows, err := metricsToTable(t, s, keys, noUnit); err != nil { - log.Warningf("while collecting parsers stats: %s", err) - } else if numRows > 0 || showEmpty { - title, _ := s.Description() - cstable.RenderTitle(out, "\n"+title+":") - t.Render() - } -} - -func (s statStash) Description() (string, string) { - return "Parser Stash Metrics", - `Tracks the status of stashes that might be created by various parsers and scenarios.` -} - -func (s statStash) Process(name, mtype string, val int) { - s[name] = struct { - Type string - Count int - }{ - Type: mtype, - Count: val, - } -} - -func (s statStash) Table(out io.Writer, wantColor string, noUnit bool, showEmpty bool) { - t := cstable.New(out, wantColor) - t.SetRowLines(false) - t.SetHeaders("Name", "Type", "Items") - t.SetAlignment(text.AlignLeft, text.AlignLeft, text.AlignLeft) - - // unfortunately, we can't reuse metricsToTable as the structure is too different :/ - numRows := 0 - - for _, alabel := range maptools.SortedKeys(s) { - astats := s[alabel] - - row := []string{ - alabel, - astats.Type, - strconv.Itoa(astats.Count), - } - t.AddRow(row...) - - numRows++ - } - - if numRows > 0 || showEmpty { - title, _ := s.Description() - cstable.RenderTitle(out, "\n"+title+":") - t.Render() - } -} - -func (s statLapi) Description() (string, string) { - return "Local API Metrics", - `Monitors the requests made to local API routes.` -} - -func (s statLapi) Process(route, method string, val int) { - if _, ok := s[route]; !ok { - s[route] = make(map[string]int) - } - - s[route][method] += val -} - -func (s statLapi) Table(out io.Writer, wantColor string, noUnit bool, showEmpty bool) { - t := cstable.New(out, wantColor) - t.SetRowLines(false) - t.SetHeaders("Route", "Method", "Hits") - t.SetAlignment(text.AlignLeft, text.AlignLeft, text.AlignLeft) - - // unfortunately, we can't reuse metricsToTable as the structure is too different :/ - numRows := 0 - - for _, alabel := range maptools.SortedKeys(s) { - astats := s[alabel] - - subKeys := []string{} - for skey := range astats { - subKeys = append(subKeys, skey) - } - - sort.Strings(subKeys) - - for _, sl := range subKeys { - row := []string{ - alabel, - sl, - strconv.Itoa(astats[sl]), - } - - t.AddRow(row...) - - numRows++ - } - } - - if numRows > 0 || showEmpty { - title, _ := s.Description() - cstable.RenderTitle(out, "\n"+title+":") - t.Render() - } -} - -func (s statLapiMachine) Description() (string, string) { - return "Local API Machines Metrics", - `Tracks the number of calls to the local API from each registered machine.` -} - -func (s statLapiMachine) Process(machine, route, method string, val int) { - if _, ok := s[machine]; !ok { - s[machine] = make(map[string]map[string]int) - } - - if _, ok := s[machine][route]; !ok { - s[machine][route] = make(map[string]int) - } - - s[machine][route][method] += val -} - -func (s statLapiMachine) Table(out io.Writer, wantColor string, noUnit bool, showEmpty bool) { - t := cstable.New(out, wantColor) - t.SetRowLines(false) - t.SetHeaders("Machine", "Route", "Method", "Hits") - t.SetAlignment(text.AlignLeft, text.AlignLeft, text.AlignLeft, text.AlignLeft) - - numRows := lapiMetricsToTable(t, s) - - if numRows > 0 || showEmpty { - title, _ := s.Description() - cstable.RenderTitle(out, "\n"+title+":") - t.Render() - } -} - -func (s statLapiBouncer) Description() (string, string) { - return "Local API Bouncers Metrics", - `Tracks total hits to remediation component related API routes.` -} - -func (s statLapiBouncer) Process(bouncer, route, method string, val int) { - if _, ok := s[bouncer]; !ok { - s[bouncer] = make(map[string]map[string]int) - } - - if _, ok := s[bouncer][route]; !ok { - s[bouncer][route] = make(map[string]int) - } - - s[bouncer][route][method] += val -} - -func (s statLapiBouncer) Table(out io.Writer, wantColor string, noUnit bool, showEmpty bool) { - t := cstable.New(out, wantColor) - t.SetRowLines(false) - t.SetHeaders("Bouncer", "Route", "Method", "Hits") - t.SetAlignment(text.AlignLeft, text.AlignLeft, text.AlignLeft, text.AlignLeft) - - numRows := lapiMetricsToTable(t, s) - - if numRows > 0 || showEmpty { - title, _ := s.Description() - cstable.RenderTitle(out, "\n"+title+":") - t.Render() - } -} - -func (s statLapiDecision) Description() (string, string) { - return "Local API Bouncers Decisions", - `Tracks the number of empty/non-empty answers from LAPI to bouncers that are working in "live" mode.` -} - -func (s statLapiDecision) Process(bouncer, fam string, val int) { - if _, ok := s[bouncer]; !ok { - s[bouncer] = struct { - NonEmpty int - Empty int - }{} - } - - x := s[bouncer] - - switch fam { - case "cs_lapi_decisions_ko_total": - x.Empty += val - case "cs_lapi_decisions_ok_total": - x.NonEmpty += val - } - - s[bouncer] = x -} - -func (s statLapiDecision) Table(out io.Writer, wantColor string, noUnit bool, showEmpty bool) { - t := cstable.New(out, wantColor) - t.SetRowLines(false) - t.SetHeaders("Bouncer", "Empty answers", "Non-empty answers") - t.SetAlignment(text.AlignLeft, text.AlignLeft, text.AlignLeft) - - numRows := 0 - - for bouncer, hits := range s { - t.AddRow( - bouncer, - strconv.Itoa(hits.Empty), - strconv.Itoa(hits.NonEmpty), - ) - - numRows++ - } - - if numRows > 0 || showEmpty { - title, _ := s.Description() - cstable.RenderTitle(out, "\n"+title+":") - t.Render() - } -} - -func (s statDecision) Description() (string, string) { - return "Local API Decisions", - `Provides information about all currently active decisions. ` + - `Includes both local (crowdsec) and global decisions (CAPI), and lists subscriptions (lists).` -} - -func (s statDecision) Process(reason, origin, action string, val int) { - if _, ok := s[reason]; !ok { - s[reason] = make(map[string]map[string]int) - } - - if _, ok := s[reason][origin]; !ok { - s[reason][origin] = make(map[string]int) - } - - s[reason][origin][action] += val -} - -func (s statDecision) Table(out io.Writer, wantColor string, noUnit bool, showEmpty bool) { - t := cstable.New(out, wantColor) - t.SetRowLines(false) - t.SetHeaders("Reason", "Origin", "Action", "Count") - t.SetAlignment(text.AlignLeft, text.AlignLeft, text.AlignLeft, text.AlignLeft) - - numRows := 0 - - for reason, origins := range s { - for origin, actions := range origins { - for action, hits := range actions { - t.AddRow( - reason, - origin, - action, - strconv.Itoa(hits), - ) - - numRows++ - } - } - } - - if numRows > 0 || showEmpty { - title, _ := s.Description() - cstable.RenderTitle(out, "\n"+title+":") - t.Render() - } -} - -func (s statAlert) Description() (string, string) { - return "Local API Alerts", - `Tracks the total number of past and present alerts for the installed scenarios.` -} - -func (s statAlert) Process(reason string, val int) { - s[reason] += val -} - -func (s statAlert) Table(out io.Writer, wantColor string, noUnit bool, showEmpty bool) { - t := cstable.New(out, wantColor) - t.SetRowLines(false) - t.SetHeaders("Reason", "Count") - t.SetAlignment(text.AlignLeft, text.AlignLeft) - - numRows := 0 - - for scenario, hits := range s { - t.AddRow( - scenario, - strconv.Itoa(hits), - ) - - numRows++ - } - - if numRows > 0 || showEmpty { - title, _ := s.Description() - cstable.RenderTitle(out, "\n"+title+":") - t.Render() - } -} diff --git a/cmd/crowdsec-cli/metrics/show.go b/cmd/crowdsec-cli/metrics/show.go new file mode 100644 index 00000000000..46603034f24 --- /dev/null +++ b/cmd/crowdsec-cli/metrics/show.go @@ -0,0 +1,104 @@ +package metrics + +import ( + "errors" + "fmt" + + "github.com/fatih/color" + "github.com/spf13/cobra" +) + +var ( + ErrMissingConfig = errors.New("prometheus section missing, can't show metrics") + ErrMetricsDisabled = errors.New("prometheus is not enabled, can't show metrics") +) + +func (cli *cliMetrics) show(sections []string, url string, noUnit bool) error { + cfg := cli.cfg() + + if url != "" { + cfg.Cscli.PrometheusUrl = url + } + + if cfg.Prometheus == nil { + return ErrMissingConfig + } + + if !cfg.Prometheus.Enabled { + return ErrMetricsDisabled + } + + ms := NewMetricStore() + + if err := ms.Fetch(cfg.Cscli.PrometheusUrl); err != nil { + return err + } + + // any section that we don't have in the store is an error + for _, section := range sections { + if _, ok := ms[section]; !ok { + return fmt.Errorf("unknown metrics type: %s", section) + } + } + + return ms.Format(color.Output, cfg.Cscli.Color, sections, cfg.Cscli.Output, noUnit) +} + +// expandAlias returns a list of sections. The input can be a list of sections or alias. +func expandAlias(args []string) []string { + ret := []string{} + + for _, section := range args { + switch section { + case "engine": + ret = append(ret, "acquisition", "parsers", "scenarios", "stash", "whitelists") + case "lapi": + ret = append(ret, "alerts", "decisions", "lapi", "lapi-bouncer", "lapi-decisions", "lapi-machine") + case "appsec": + ret = append(ret, "appsec-engine", "appsec-rule") + default: + ret = append(ret, section) + } + } + + return ret +} + +func (cli *cliMetrics) newShowCmd() *cobra.Command { + var ( + url string + noUnit bool + ) + + cmd := &cobra.Command{ + Use: "show [type]...", + Short: "Display all or part of the available metrics.", + Long: `Fetch metrics from a Local API server and display them, optionally filtering on specific types.`, + Example: `# Show all Metrics, skip empty tables +cscli metrics show + +# Use an alias: "engine", "lapi" or "appsec" to show a group of metrics +cscli metrics show engine + +# Show some specific metrics, show empty tables, connect to a different url +cscli metrics show acquisition parsers scenarios stash --url http://lapi.local:6060/metrics + +# To list available metric types, use "cscli metrics list" +cscli metrics list; cscli metrics list -o json + +# Show metrics in json format +cscli metrics show acquisition parsers scenarios stash -o json`, + // Positional args are optional + DisableAutoGenTag: true, + RunE: func(_ *cobra.Command, args []string) error { + args = expandAlias(args) + return cli.show(args, url, noUnit) + }, + } + + flags := cmd.Flags() + flags.StringVarP(&url, "url", "u", "", "Metrics url (http://:/metrics)") + flags.BoolVar(&noUnit, "no-unit", false, "Show the real number instead of formatted with units") + + return cmd +} diff --git a/cmd/crowdsec-cli/metrics/statacquis.go b/cmd/crowdsec-cli/metrics/statacquis.go new file mode 100644 index 00000000000..4a8cf0f8934 --- /dev/null +++ b/cmd/crowdsec-cli/metrics/statacquis.go @@ -0,0 +1,45 @@ +package metrics + +import ( + "io" + + "github.com/jedib0t/go-pretty/v6/text" + log "github.com/sirupsen/logrus" + + "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/cstable" +) + +type statAcquis map[string]map[string]int + +func (s statAcquis) Description() (string, string) { + return "Acquisition Metrics", + `Measures the lines read, parsed, and unparsed per datasource. ` + + `Zero read lines indicate a misconfigured or inactive datasource. ` + + `Zero parsed lines mean the parser(s) failed. ` + + `Non-zero parsed lines are fine as crowdsec selects relevant lines.` +} + +func (s statAcquis) Process(source, metric string, val int) { + if _, ok := s[source]; !ok { + s[source] = make(map[string]int) + } + + s[source][metric] += val +} + +func (s statAcquis) Table(out io.Writer, wantColor string, noUnit bool, showEmpty bool) { + t := cstable.New(out, wantColor) + t.SetRowLines(false) + t.SetHeaders("Source", "Lines read", "Lines parsed", "Lines unparsed", "Lines poured to bucket", "Lines whitelisted") + t.SetAlignment(text.AlignLeft, text.AlignLeft, text.AlignLeft, text.AlignLeft, text.AlignLeft) + + keys := []string{"reads", "parsed", "unparsed", "pour", "whitelisted"} + + if numRows, err := metricsToTable(t, s, keys, noUnit); err != nil { + log.Warningf("while collecting acquis stats: %s", err) + } else if numRows > 0 || showEmpty { + title, _ := s.Description() + cstable.RenderTitle(out, "\n"+title+":") + t.Render() + } +} diff --git a/cmd/crowdsec-cli/metrics/statalert.go b/cmd/crowdsec-cli/metrics/statalert.go new file mode 100644 index 00000000000..c8055910a3a --- /dev/null +++ b/cmd/crowdsec-cli/metrics/statalert.go @@ -0,0 +1,45 @@ +package metrics + +import ( + "io" + "strconv" + + "github.com/jedib0t/go-pretty/v6/text" + + "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/cstable" +) + +type statAlert map[string]int + +func (s statAlert) Description() (string, string) { + return "Local API Alerts", + `Tracks the total number of past and present alerts for the installed scenarios.` +} + +func (s statAlert) Process(reason string, val int) { + s[reason] += val +} + +func (s statAlert) Table(out io.Writer, wantColor string, noUnit bool, showEmpty bool) { + t := cstable.New(out, wantColor) + t.SetRowLines(false) + t.SetHeaders("Reason", "Count") + t.SetAlignment(text.AlignLeft, text.AlignLeft) + + numRows := 0 + + for scenario, hits := range s { + t.AddRow( + scenario, + strconv.Itoa(hits), + ) + + numRows++ + } + + if numRows > 0 || showEmpty { + title, _ := s.Description() + cstable.RenderTitle(out, "\n"+title+":") + t.Render() + } +} diff --git a/cmd/crowdsec-cli/metrics/statappsecengine.go b/cmd/crowdsec-cli/metrics/statappsecengine.go new file mode 100644 index 00000000000..18ad03ef03f --- /dev/null +++ b/cmd/crowdsec-cli/metrics/statappsecengine.go @@ -0,0 +1,42 @@ +package metrics + +import ( + "io" + + "github.com/jedib0t/go-pretty/v6/text" + log "github.com/sirupsen/logrus" + + "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/cstable" +) + +type statAppsecEngine map[string]map[string]int + +func (s statAppsecEngine) Description() (string, string) { + return "Appsec Metrics", + `Measures the number of parsed and blocked requests by the AppSec Component.` +} + +func (s statAppsecEngine) Process(appsecEngine, metric string, val int) { + if _, ok := s[appsecEngine]; !ok { + s[appsecEngine] = make(map[string]int) + } + + s[appsecEngine][metric] += val +} + +func (s statAppsecEngine) Table(out io.Writer, wantColor string, noUnit bool, showEmpty bool) { + t := cstable.New(out, wantColor) + t.SetRowLines(false) + t.SetHeaders("Appsec Engine", "Processed", "Blocked") + t.SetAlignment(text.AlignLeft, text.AlignLeft) + + keys := []string{"processed", "blocked"} + + if numRows, err := metricsToTable(t, s, keys, noUnit); err != nil { + log.Warningf("while collecting appsec stats: %s", err) + } else if numRows > 0 || showEmpty { + title, _ := s.Description() + cstable.RenderTitle(out, "\n"+title+":") + t.Render() + } +} diff --git a/cmd/crowdsec-cli/metrics/statappsecrule.go b/cmd/crowdsec-cli/metrics/statappsecrule.go new file mode 100644 index 00000000000..17ec28de99e --- /dev/null +++ b/cmd/crowdsec-cli/metrics/statappsecrule.go @@ -0,0 +1,48 @@ +package metrics + +import ( + "fmt" + "io" + + "github.com/jedib0t/go-pretty/v6/text" + log "github.com/sirupsen/logrus" + + "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/cstable" +) + +type statAppsecRule map[string]map[string]map[string]int + +func (s statAppsecRule) Description() (string, string) { + return "Appsec Rule Metrics", + `Provides “per AppSec Component” information about the number of matches for loaded AppSec Rules.` +} + +func (s statAppsecRule) Process(appsecEngine, appsecRule string, metric string, val int) { + if _, ok := s[appsecEngine]; !ok { + s[appsecEngine] = make(map[string]map[string]int) + } + + if _, ok := s[appsecEngine][appsecRule]; !ok { + s[appsecEngine][appsecRule] = make(map[string]int) + } + + s[appsecEngine][appsecRule][metric] += val +} + +func (s statAppsecRule) Table(out io.Writer, wantColor string, noUnit bool, showEmpty bool) { + for appsecEngine, appsecEngineRulesStats := range s { + t := cstable.New(out, wantColor) + t.SetRowLines(false) + t.SetHeaders("Rule ID", "Triggered") + t.SetAlignment(text.AlignLeft, text.AlignLeft) + + keys := []string{"triggered"} + + if numRows, err := metricsToTable(t, appsecEngineRulesStats, keys, noUnit); err != nil { + log.Warningf("while collecting appsec rules stats: %s", err) + } else if numRows > 0 || showEmpty { + cstable.RenderTitle(out, fmt.Sprintf("\nAppsec '%s' Rules Metrics:", appsecEngine)) + t.Render() + } + } +} diff --git a/cmd/crowdsec-cli/metrics/statbucket.go b/cmd/crowdsec-cli/metrics/statbucket.go new file mode 100644 index 00000000000..62ca4dee71d --- /dev/null +++ b/cmd/crowdsec-cli/metrics/statbucket.go @@ -0,0 +1,43 @@ +package metrics + +import ( + "io" + + "github.com/jedib0t/go-pretty/v6/text" + log "github.com/sirupsen/logrus" + + "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/cstable" +) + +type statBucket map[string]map[string]int + +func (s statBucket) Description() (string, string) { + return "Scenario Metrics", + `Measure events in different scenarios. Current count is the number of buckets during metrics collection. ` + + `Overflows are past event-producing buckets, while Expired are the ones that didn’t receive enough events to Overflow.` +} + +func (s statBucket) Process(bucket, metric string, val int) { + if _, ok := s[bucket]; !ok { + s[bucket] = make(map[string]int) + } + + s[bucket][metric] += val +} + +func (s statBucket) Table(out io.Writer, wantColor string, noUnit bool, showEmpty bool) { + t := cstable.New(out, wantColor) + t.SetRowLines(false) + t.SetHeaders("Scenario", "Current Count", "Overflows", "Instantiated", "Poured", "Expired") + t.SetAlignment(text.AlignLeft, text.AlignLeft, text.AlignLeft, text.AlignLeft, text.AlignLeft, text.AlignLeft) + + keys := []string{"curr_count", "overflow", "instantiation", "pour", "underflow"} + + if numRows, err := metricsToTable(t, s, keys, noUnit); err != nil { + log.Warningf("while collecting scenario stats: %s", err) + } else if numRows > 0 || showEmpty { + title, _ := s.Description() + cstable.RenderTitle(out, "\n"+title+":") + t.Render() + } +} diff --git a/cmd/crowdsec-cli/metrics/statdecision.go b/cmd/crowdsec-cli/metrics/statdecision.go new file mode 100644 index 00000000000..b1474d95f76 --- /dev/null +++ b/cmd/crowdsec-cli/metrics/statdecision.go @@ -0,0 +1,60 @@ +package metrics + +import ( + "io" + "strconv" + + "github.com/jedib0t/go-pretty/v6/text" + + "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/cstable" +) + +type statDecision map[string]map[string]map[string]int + +func (s statDecision) Description() (string, string) { + return "Local API Decisions", + `Provides information about all currently active decisions. ` + + `Includes both local (crowdsec) and global decisions (CAPI), and lists subscriptions (lists).` +} + +func (s statDecision) Process(reason, origin, action string, val int) { + if _, ok := s[reason]; !ok { + s[reason] = make(map[string]map[string]int) + } + + if _, ok := s[reason][origin]; !ok { + s[reason][origin] = make(map[string]int) + } + + s[reason][origin][action] += val +} + +func (s statDecision) Table(out io.Writer, wantColor string, noUnit bool, showEmpty bool) { + t := cstable.New(out, wantColor) + t.SetRowLines(false) + t.SetHeaders("Reason", "Origin", "Action", "Count") + t.SetAlignment(text.AlignLeft, text.AlignLeft, text.AlignLeft, text.AlignLeft) + + numRows := 0 + + for reason, origins := range s { + for origin, actions := range origins { + for action, hits := range actions { + t.AddRow( + reason, + origin, + action, + strconv.Itoa(hits), + ) + + numRows++ + } + } + } + + if numRows > 0 || showEmpty { + title, _ := s.Description() + cstable.RenderTitle(out, "\n"+title+":") + t.Render() + } +} diff --git a/cmd/crowdsec-cli/metrics/statlapi.go b/cmd/crowdsec-cli/metrics/statlapi.go new file mode 100644 index 00000000000..f8a737e5c44 --- /dev/null +++ b/cmd/crowdsec-cli/metrics/statlapi.go @@ -0,0 +1,67 @@ +package metrics + +import ( + "io" + "sort" + "strconv" + + "github.com/jedib0t/go-pretty/v6/text" + + "github.com/crowdsecurity/go-cs-lib/maptools" + + "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/cstable" +) + +type statLapi map[string]map[string]int + +func (s statLapi) Description() (string, string) { + return "Local API Metrics", + `Monitors the requests made to local API routes.` +} + +func (s statLapi) Process(route, method string, val int) { + if _, ok := s[route]; !ok { + s[route] = make(map[string]int) + } + + s[route][method] += val +} + +func (s statLapi) Table(out io.Writer, wantColor string, noUnit bool, showEmpty bool) { + t := cstable.New(out, wantColor) + t.SetRowLines(false) + t.SetHeaders("Route", "Method", "Hits") + t.SetAlignment(text.AlignLeft, text.AlignLeft, text.AlignLeft) + + // unfortunately, we can't reuse metricsToTable as the structure is too different :/ + numRows := 0 + + for _, alabel := range maptools.SortedKeys(s) { + astats := s[alabel] + + subKeys := []string{} + for skey := range astats { + subKeys = append(subKeys, skey) + } + + sort.Strings(subKeys) + + for _, sl := range subKeys { + row := []string{ + alabel, + sl, + strconv.Itoa(astats[sl]), + } + + t.AddRow(row...) + + numRows++ + } + } + + if numRows > 0 || showEmpty { + title, _ := s.Description() + cstable.RenderTitle(out, "\n"+title+":") + t.Render() + } +} diff --git a/cmd/crowdsec-cli/metrics/statlapibouncer.go b/cmd/crowdsec-cli/metrics/statlapibouncer.go new file mode 100644 index 00000000000..e7483c6294d --- /dev/null +++ b/cmd/crowdsec-cli/metrics/statlapibouncer.go @@ -0,0 +1,43 @@ +package metrics + +import ( + "io" + + "github.com/jedib0t/go-pretty/v6/text" + + "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/cstable" +) + +type statLapiBouncer map[string]map[string]map[string]int + +func (s statLapiBouncer) Description() (string, string) { + return "Local API Bouncers Metrics", + `Tracks total hits to remediation component related API routes.` +} + +func (s statLapiBouncer) Process(bouncer, route, method string, val int) { + if _, ok := s[bouncer]; !ok { + s[bouncer] = make(map[string]map[string]int) + } + + if _, ok := s[bouncer][route]; !ok { + s[bouncer][route] = make(map[string]int) + } + + s[bouncer][route][method] += val +} + +func (s statLapiBouncer) Table(out io.Writer, wantColor string, noUnit bool, showEmpty bool) { + t := cstable.New(out, wantColor) + t.SetRowLines(false) + t.SetHeaders("Bouncer", "Route", "Method", "Hits") + t.SetAlignment(text.AlignLeft, text.AlignLeft, text.AlignLeft, text.AlignLeft) + + numRows := lapiMetricsToTable(t, s) + + if numRows > 0 || showEmpty { + title, _ := s.Description() + cstable.RenderTitle(out, "\n"+title+":") + t.Render() + } +} diff --git a/cmd/crowdsec-cli/metrics/statlapidecision.go b/cmd/crowdsec-cli/metrics/statlapidecision.go new file mode 100644 index 00000000000..97e17fe8a49 --- /dev/null +++ b/cmd/crowdsec-cli/metrics/statlapidecision.go @@ -0,0 +1,65 @@ +package metrics + +import ( + "io" + "strconv" + + "github.com/jedib0t/go-pretty/v6/text" + + "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/cstable" +) + +type statLapiDecision map[string]struct { + NonEmpty int + Empty int +} + +func (s statLapiDecision) Description() (string, string) { + return "Local API Bouncers Decisions", + `Tracks the number of empty/non-empty answers from LAPI to bouncers that are working in "live" mode.` +} + +func (s statLapiDecision) Process(bouncer, fam string, val int) { + if _, ok := s[bouncer]; !ok { + s[bouncer] = struct { + NonEmpty int + Empty int + }{} + } + + x := s[bouncer] + + switch fam { + case "cs_lapi_decisions_ko_total": + x.Empty += val + case "cs_lapi_decisions_ok_total": + x.NonEmpty += val + } + + s[bouncer] = x +} + +func (s statLapiDecision) Table(out io.Writer, wantColor string, noUnit bool, showEmpty bool) { + t := cstable.New(out, wantColor) + t.SetRowLines(false) + t.SetHeaders("Bouncer", "Empty answers", "Non-empty answers") + t.SetAlignment(text.AlignLeft, text.AlignLeft, text.AlignLeft) + + numRows := 0 + + for bouncer, hits := range s { + t.AddRow( + bouncer, + strconv.Itoa(hits.Empty), + strconv.Itoa(hits.NonEmpty), + ) + + numRows++ + } + + if numRows > 0 || showEmpty { + title, _ := s.Description() + cstable.RenderTitle(out, "\n"+title+":") + t.Render() + } +} diff --git a/cmd/crowdsec-cli/metrics/statlapimachine.go b/cmd/crowdsec-cli/metrics/statlapimachine.go new file mode 100644 index 00000000000..6b9d9da207e --- /dev/null +++ b/cmd/crowdsec-cli/metrics/statlapimachine.go @@ -0,0 +1,43 @@ +package metrics + +import ( + "io" + + "github.com/jedib0t/go-pretty/v6/text" + + "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/cstable" +) + +type statLapiMachine map[string]map[string]map[string]int + +func (s statLapiMachine) Description() (string, string) { + return "Local API Machines Metrics", + `Tracks the number of calls to the local API from each registered machine.` +} + +func (s statLapiMachine) Process(machine, route, method string, val int) { + if _, ok := s[machine]; !ok { + s[machine] = make(map[string]map[string]int) + } + + if _, ok := s[machine][route]; !ok { + s[machine][route] = make(map[string]int) + } + + s[machine][route][method] += val +} + +func (s statLapiMachine) Table(out io.Writer, wantColor string, noUnit bool, showEmpty bool) { + t := cstable.New(out, wantColor) + t.SetRowLines(false) + t.SetHeaders("Machine", "Route", "Method", "Hits") + t.SetAlignment(text.AlignLeft, text.AlignLeft, text.AlignLeft, text.AlignLeft) + + numRows := lapiMetricsToTable(t, s) + + if numRows > 0 || showEmpty { + title, _ := s.Description() + cstable.RenderTitle(out, "\n"+title+":") + t.Render() + } +} diff --git a/cmd/crowdsec-cli/metrics/statparser.go b/cmd/crowdsec-cli/metrics/statparser.go new file mode 100644 index 00000000000..0ed7cedbe4b --- /dev/null +++ b/cmd/crowdsec-cli/metrics/statparser.go @@ -0,0 +1,44 @@ +package metrics + +import ( + "io" + + "github.com/jedib0t/go-pretty/v6/text" + log "github.com/sirupsen/logrus" + + "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/cstable" +) + +type statParser map[string]map[string]int + +func (s statParser) Description() (string, string) { + return "Parser Metrics", + `Tracks the number of events processed by each parser and indicates success of failure. ` + + `Zero parsed lines means the parer(s) failed. ` + + `Non-zero unparsed lines are fine as crowdsec select relevant lines.` +} + +func (s statParser) Process(parser, metric string, val int) { + if _, ok := s[parser]; !ok { + s[parser] = make(map[string]int) + } + + s[parser][metric] += val +} + +func (s statParser) Table(out io.Writer, wantColor string, noUnit bool, showEmpty bool) { + t := cstable.New(out, wantColor) + t.SetRowLines(false) + t.SetHeaders("Parsers", "Hits", "Parsed", "Unparsed") + t.SetAlignment(text.AlignLeft, text.AlignLeft, text.AlignLeft, text.AlignLeft) + + keys := []string{"hits", "parsed", "unparsed"} + + if numRows, err := metricsToTable(t, s, keys, noUnit); err != nil { + log.Warningf("while collecting parsers stats: %s", err) + } else if numRows > 0 || showEmpty { + title, _ := s.Description() + cstable.RenderTitle(out, "\n"+title+":") + t.Render() + } +} diff --git a/cmd/crowdsec-cli/metrics/statstash.go b/cmd/crowdsec-cli/metrics/statstash.go new file mode 100644 index 00000000000..79c14b04fd6 --- /dev/null +++ b/cmd/crowdsec-cli/metrics/statstash.go @@ -0,0 +1,61 @@ +package metrics + +import ( + "io" + "strconv" + + "github.com/jedib0t/go-pretty/v6/text" + + "github.com/crowdsecurity/go-cs-lib/maptools" + + "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/cstable" +) + +type statStash map[string]struct { + Type string + Count int +} + +func (s statStash) Description() (string, string) { + return "Parser Stash Metrics", + `Tracks the status of stashes that might be created by various parsers and scenarios.` +} + +func (s statStash) Process(name, mtype string, val int) { + s[name] = struct { + Type string + Count int + }{ + Type: mtype, + Count: val, + } +} + +func (s statStash) Table(out io.Writer, wantColor string, noUnit bool, showEmpty bool) { + t := cstable.New(out, wantColor) + t.SetRowLines(false) + t.SetHeaders("Name", "Type", "Items") + t.SetAlignment(text.AlignLeft, text.AlignLeft, text.AlignLeft) + + // unfortunately, we can't reuse metricsToTable as the structure is too different :/ + numRows := 0 + + for _, alabel := range maptools.SortedKeys(s) { + astats := s[alabel] + + row := []string{ + alabel, + astats.Type, + strconv.Itoa(astats.Count), + } + t.AddRow(row...) + + numRows++ + } + + if numRows > 0 || showEmpty { + title, _ := s.Description() + cstable.RenderTitle(out, "\n"+title+":") + t.Render() + } +} diff --git a/cmd/crowdsec-cli/metrics/statwhitelist.go b/cmd/crowdsec-cli/metrics/statwhitelist.go new file mode 100644 index 00000000000..89a016d22b0 --- /dev/null +++ b/cmd/crowdsec-cli/metrics/statwhitelist.go @@ -0,0 +1,44 @@ +package metrics + +import ( + "io" + + "github.com/jedib0t/go-pretty/v6/text" + log "github.com/sirupsen/logrus" + + "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/cstable" +) + +type statWhitelist map[string]map[string]map[string]int + +func (s statWhitelist) Description() (string, string) { + return "Whitelist Metrics", + `Tracks the number of events processed and possibly whitelisted by each parser whitelist.` +} + +func (s statWhitelist) Process(whitelist, reason, metric string, val int) { + if _, ok := s[whitelist]; !ok { + s[whitelist] = make(map[string]map[string]int) + } + + if _, ok := s[whitelist][reason]; !ok { + s[whitelist][reason] = make(map[string]int) + } + + s[whitelist][reason][metric] += val +} + +func (s statWhitelist) Table(out io.Writer, wantColor string, noUnit bool, showEmpty bool) { + t := cstable.New(out, wantColor) + t.SetRowLines(false) + t.SetHeaders("Whitelist", "Reason", "Hits", "Whitelisted") + t.SetAlignment(text.AlignLeft, text.AlignLeft, text.AlignLeft, text.AlignLeft) + + if numRows, err := wlMetricsToTable(t, s, noUnit); err != nil { + log.Warningf("while collecting parsers stats: %s", err) + } else if numRows > 0 || showEmpty { + title, _ := s.Description() + cstable.RenderTitle(out, "\n"+title+":") + t.Render() + } +} diff --git a/cmd/crowdsec-cli/metrics/store.go b/cmd/crowdsec-cli/metrics/store.go new file mode 100644 index 00000000000..48926488c07 --- /dev/null +++ b/cmd/crowdsec-cli/metrics/store.go @@ -0,0 +1,264 @@ +package metrics + +import ( + "encoding/json" + "fmt" + "io" + "net/http" + "strconv" + "strings" + "time" + + dto "github.com/prometheus/client_model/go" + "github.com/prometheus/prom2json" + log "github.com/sirupsen/logrus" + "gopkg.in/yaml.v3" + + "github.com/crowdsecurity/go-cs-lib/maptools" + "github.com/crowdsecurity/go-cs-lib/trace" +) + +type metricSection interface { + Table(out io.Writer, wantColor string, noUnit bool, showEmpty bool) + Description() (string, string) +} + +type metricStore map[string]metricSection + +func NewMetricStore() metricStore { + return metricStore{ + "acquisition": statAcquis{}, + "scenarios": statBucket{}, + "parsers": statParser{}, + "lapi": statLapi{}, + "lapi-machine": statLapiMachine{}, + "lapi-bouncer": statLapiBouncer{}, + "lapi-decisions": statLapiDecision{}, + "decisions": statDecision{}, + "alerts": statAlert{}, + "stash": statStash{}, + "appsec-engine": statAppsecEngine{}, + "appsec-rule": statAppsecRule{}, + "whitelists": statWhitelist{}, + } +} + +func (ms metricStore) Fetch(url string) error { + mfChan := make(chan *dto.MetricFamily, 1024) + errChan := make(chan error, 1) + + // Start with the DefaultTransport for sane defaults. + transport := http.DefaultTransport.(*http.Transport).Clone() + // Conservatively disable HTTP keep-alives as this program will only + // ever need a single HTTP request. + transport.DisableKeepAlives = true + // Timeout early if the server doesn't even return the headers. + transport.ResponseHeaderTimeout = time.Minute + go func() { + defer trace.CatchPanic("crowdsec/ShowPrometheus") + + err := prom2json.FetchMetricFamilies(url, mfChan, transport) + if err != nil { + errChan <- fmt.Errorf("failed to fetch metrics: %w", err) + return + } + errChan <- nil + }() + + result := []*prom2json.Family{} + for mf := range mfChan { + result = append(result, prom2json.NewFamily(mf)) + } + + if err := <-errChan; err != nil { + return err + } + + log.Debugf("Finished reading metrics output, %d entries", len(result)) + /*walk*/ + + mAcquis := ms["acquisition"].(statAcquis) + mParser := ms["parsers"].(statParser) + mBucket := ms["scenarios"].(statBucket) + mLapi := ms["lapi"].(statLapi) + mLapiMachine := ms["lapi-machine"].(statLapiMachine) + mLapiBouncer := ms["lapi-bouncer"].(statLapiBouncer) + mLapiDecision := ms["lapi-decisions"].(statLapiDecision) + mDecision := ms["decisions"].(statDecision) + mAppsecEngine := ms["appsec-engine"].(statAppsecEngine) + mAppsecRule := ms["appsec-rule"].(statAppsecRule) + mAlert := ms["alerts"].(statAlert) + mStash := ms["stash"].(statStash) + mWhitelist := ms["whitelists"].(statWhitelist) + + for idx, fam := range result { + if !strings.HasPrefix(fam.Name, "cs_") { + continue + } + + log.Tracef("round %d", idx) + + for _, m := range fam.Metrics { + metric, ok := m.(prom2json.Metric) + if !ok { + log.Debugf("failed to convert metric to prom2json.Metric") + continue + } + + name, ok := metric.Labels["name"] + if !ok { + log.Debugf("no name in Metric %v", metric.Labels) + } + + source, ok := metric.Labels["source"] + if !ok { + log.Debugf("no source in Metric %v for %s", metric.Labels, fam.Name) + } else { + if srctype, ok := metric.Labels["type"]; ok { + source = srctype + ":" + source + } + } + + value := m.(prom2json.Metric).Value + machine := metric.Labels["machine"] + bouncer := metric.Labels["bouncer"] + + route := metric.Labels["route"] + method := metric.Labels["method"] + + reason := metric.Labels["reason"] + origin := metric.Labels["origin"] + action := metric.Labels["action"] + + appsecEngine := metric.Labels["appsec_engine"] + appsecRule := metric.Labels["rule_name"] + + mtype := metric.Labels["type"] + + fval, err := strconv.ParseFloat(value, 32) + if err != nil { + log.Errorf("Unexpected int value %s : %s", value, err) + } + + ival := int(fval) + + switch fam.Name { + // + // buckets + // + case "cs_bucket_created_total": + mBucket.Process(name, "instantiation", ival) + case "cs_buckets": + mBucket.Process(name, "curr_count", ival) + case "cs_bucket_overflowed_total": + mBucket.Process(name, "overflow", ival) + case "cs_bucket_poured_total": + mBucket.Process(name, "pour", ival) + mAcquis.Process(source, "pour", ival) + case "cs_bucket_underflowed_total": + mBucket.Process(name, "underflow", ival) + // + // parsers + // + case "cs_parser_hits_total": + mAcquis.Process(source, "reads", ival) + case "cs_parser_hits_ok_total": + mAcquis.Process(source, "parsed", ival) + case "cs_parser_hits_ko_total": + mAcquis.Process(source, "unparsed", ival) + case "cs_node_hits_total": + mParser.Process(name, "hits", ival) + case "cs_node_hits_ok_total": + mParser.Process(name, "parsed", ival) + case "cs_node_hits_ko_total": + mParser.Process(name, "unparsed", ival) + // + // whitelists + // + case "cs_node_wl_hits_total": + mWhitelist.Process(name, reason, "hits", ival) + case "cs_node_wl_hits_ok_total": + mWhitelist.Process(name, reason, "whitelisted", ival) + // track as well whitelisted lines at acquis level + mAcquis.Process(source, "whitelisted", ival) + // + // lapi + // + case "cs_lapi_route_requests_total": + mLapi.Process(route, method, ival) + case "cs_lapi_machine_requests_total": + mLapiMachine.Process(machine, route, method, ival) + case "cs_lapi_bouncer_requests_total": + mLapiBouncer.Process(bouncer, route, method, ival) + case "cs_lapi_decisions_ko_total", "cs_lapi_decisions_ok_total": + mLapiDecision.Process(bouncer, fam.Name, ival) + // + // decisions + // + case "cs_active_decisions": + mDecision.Process(reason, origin, action, ival) + case "cs_alerts": + mAlert.Process(reason, ival) + // + // stash + // + case "cs_cache_size": + mStash.Process(name, mtype, ival) + // + // appsec + // + case "cs_appsec_reqs_total": + mAppsecEngine.Process(appsecEngine, "processed", ival) + case "cs_appsec_block_total": + mAppsecEngine.Process(appsecEngine, "blocked", ival) + case "cs_appsec_rule_hits": + mAppsecRule.Process(appsecEngine, appsecRule, "triggered", ival) + default: + log.Debugf("unknown: %+v", fam.Name) + continue + } + } + } + + return nil +} + +func (ms metricStore) Format(out io.Writer, wantColor string, sections []string, formatType string, noUnit bool) error { + // copy only the sections we want + want := map[string]metricSection{} + + // if explicitly asking for sections, we want to show empty tables + showEmpty := len(sections) > 0 + + // if no sections are specified, we want all of them + if len(sections) == 0 { + sections = maptools.SortedKeys(ms) + } + + for _, section := range sections { + want[section] = ms[section] + } + + switch formatType { + case "human": + for _, section := range maptools.SortedKeys(want) { + want[section].Table(out, wantColor, noUnit, showEmpty) + } + case "json": + x, err := json.MarshalIndent(want, "", " ") + if err != nil { + return fmt.Errorf("failed to marshal metrics: %w", err) + } + out.Write(x) + case "raw": + x, err := yaml.Marshal(want) + if err != nil { + return fmt.Errorf("failed to marshal metrics: %w", err) + } + out.Write(x) + default: + return fmt.Errorf("unknown format type %s", formatType) + } + + return nil +} diff --git a/cmd/crowdsec-cli/metrics/table.go b/cmd/crowdsec-cli/metrics/table.go new file mode 100644 index 00000000000..f51e905ba71 --- /dev/null +++ b/cmd/crowdsec-cli/metrics/table.go @@ -0,0 +1,130 @@ +package metrics + +import ( + "errors" + "sort" + "strconv" + + log "github.com/sirupsen/logrus" + + "github.com/crowdsecurity/go-cs-lib/maptools" + + "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/cstable" +) + +// ErrNilTable means a nil pointer was passed instead of a table instance. This is a programming error. +var ErrNilTable = errors.New("nil table") + +func lapiMetricsToTable(t *cstable.Table, stats map[string]map[string]map[string]int) int { + // stats: machine -> route -> method -> count + // sort keys to keep consistent order when printing + machineKeys := []string{} + for k := range stats { + machineKeys = append(machineKeys, k) + } + + sort.Strings(machineKeys) + + numRows := 0 + + for _, machine := range machineKeys { + // oneRow: route -> method -> count + machineRow := stats[machine] + for routeName, route := range machineRow { + for methodName, count := range route { + row := []string{ + machine, + routeName, + methodName, + } + if count != 0 { + row = append(row, strconv.Itoa(count)) + } else { + row = append(row, "-") + } + + t.AddRow(row...) + + numRows++ + } + } + } + + return numRows +} + +func wlMetricsToTable(t *cstable.Table, stats map[string]map[string]map[string]int, noUnit bool) (int, error) { + if t == nil { + return 0, ErrNilTable + } + + numRows := 0 + + for _, name := range maptools.SortedKeys(stats) { + for _, reason := range maptools.SortedKeys(stats[name]) { + row := []string{ + name, + reason, + "-", + "-", + } + + for _, action := range maptools.SortedKeys(stats[name][reason]) { + value := stats[name][reason][action] + + switch action { + case "whitelisted": + row[3] = strconv.Itoa(value) + case "hits": + row[2] = strconv.Itoa(value) + default: + log.Debugf("unexpected counter '%s' for whitelists = %d", action, value) + } + } + + t.AddRow(row...) + + numRows++ + } + } + + return numRows, nil +} + +func metricsToTable(t *cstable.Table, stats map[string]map[string]int, keys []string, noUnit bool) (int, error) { + if t == nil { + return 0, ErrNilTable + } + + numRows := 0 + + for _, alabel := range maptools.SortedKeys(stats) { + astats, ok := stats[alabel] + if !ok { + continue + } + + row := []string{ + alabel, + } + + for _, sl := range keys { + if v, ok := astats[sl]; ok && v != 0 { + numberToShow := strconv.Itoa(v) + if !noUnit { + numberToShow = formatNumber(v) + } + + row = append(row, numberToShow) + } else { + row = append(row, "-") + } + } + + t.AddRow(row...) + + numRows++ + } + + return numRows, nil +} From 96a6eec1fbb4d6e49f73fa21d20c2cf7877efb67 Mon Sep 17 00:00:00 2001 From: Laurence Jones Date: Fri, 5 Jul 2024 16:04:33 +0100 Subject: [PATCH 211/581] enhance: Add default_range_remediation (#3109) --- config/profiles.yaml | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/config/profiles.yaml b/config/profiles.yaml index 9d81c9298a2..c4982acd978 100644 --- a/config/profiles.yaml +++ b/config/profiles.yaml @@ -12,3 +12,18 @@ decisions: # - http_default # Set the required http parameters in /etc/crowdsec/notifications/http.yaml before enabling this. # - email_default # Set the required email parameters in /etc/crowdsec/notifications/email.yaml before enabling this. on_success: break +--- +name: default_range_remediation +#debug: true +filters: + - Alert.Remediation == true && Alert.GetScope() == "Range" +decisions: + - type: ban + duration: 4h +#duration_expr: Sprintf('%dh', (GetDecisionsCount(Alert.GetValue()) + 1) * 4) +# notifications: +# - slack_default # Set the webhook in /etc/crowdsec/notifications/slack.yaml before enabling this. +# - splunk_default # Set the splunk url and token in /etc/crowdsec/notifications/splunk.yaml before enabling this. +# - http_default # Set the required http parameters in /etc/crowdsec/notifications/http.yaml before enabling this. +# - email_default # Set the required email parameters in /etc/crowdsec/notifications/email.yaml before enabling this. +on_success: break From aa0c389d2b9681a4a068337a89846d8bb404bbf0 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Tue, 9 Jul 2024 14:49:55 +0200 Subject: [PATCH 212/581] make: remove redundant go version check (#3118) It's already done by the toolchain with go.mod --- Makefile | 19 +++++++------------ mk/check_go_version.ps1 | 19 ------------------- mk/goversion.mk | 36 ------------------------------------ 3 files changed, 7 insertions(+), 67 deletions(-) delete mode 100644 mk/check_go_version.ps1 delete mode 100644 mk/goversion.mk diff --git a/Makefile b/Makefile index 324be04683d..97eb7ba0eea 100644 --- a/Makefile +++ b/Makefile @@ -25,10 +25,6 @@ BUILD_STATIC ?= 0 # List of plugins to build PLUGINS ?= $(patsubst ./cmd/notification-%,%,$(wildcard ./cmd/notification-*)) -# Can be overriden, if you can deal with the consequences -BUILD_REQUIRE_GO_MAJOR ?= 1 -BUILD_REQUIRE_GO_MINOR ?= 21 - #-------------------------------------- GO = go @@ -128,10 +124,10 @@ endif #-------------------------------------- .PHONY: build -build: pre-build goversion crowdsec cscli plugins ## Build crowdsec, cscli and plugins +build: build-info crowdsec cscli plugins ## Build crowdsec, cscli and plugins -.PHONY: pre-build -pre-build: ## Sanity checks and build information +.PHONY: build-info +build-info: ## Print build information $(info Building $(BUILD_VERSION) ($(BUILD_TAG)) $(BUILD_TYPE) for $(GOOS)/$(GOARCH)) ifneq (,$(RE2_FAIL)) @@ -195,11 +191,11 @@ clean: clean-debian clean-rpm testclean ## Remove build artifacts ) .PHONY: cscli -cscli: goversion ## Build cscli +cscli: ## Build cscli @$(MAKE) -C $(CSCLI_FOLDER) build $(MAKE_FLAGS) .PHONY: crowdsec -crowdsec: goversion ## Build crowdsec +crowdsec: ## Build crowdsec @$(MAKE) -C $(CROWDSEC_FOLDER) build $(MAKE_FLAGS) .PHONY: generate @@ -223,11 +219,11 @@ testenv: @echo 'NOTE: You need to run "make localstack" in a separate shell, "make localstack-stop" to terminate it' .PHONY: test -test: testenv goversion ## Run unit tests with localstack +test: testenv ## Run unit tests with localstack $(GOTEST) $(LD_OPTS) ./... .PHONY: go-acc -go-acc: testenv goversion ## Run unit tests with localstack + coverage +go-acc: testenv ## Run unit tests with localstack + coverage go-acc ./... -o coverage.out --ignore database,notifications,protobufs,cwversion,cstest,models -- $(LD_OPTS) check_docker: @@ -305,5 +301,4 @@ else include test/bats.mk endif -include mk/goversion.mk include mk/help.mk diff --git a/mk/check_go_version.ps1 b/mk/check_go_version.ps1 deleted file mode 100644 index 6060cb22751..00000000000 --- a/mk/check_go_version.ps1 +++ /dev/null @@ -1,19 +0,0 @@ -##This must be called with $(MINIMUM_SUPPORTED_GO_MAJOR_VERSION) $(MINIMUM_SUPPORTED_GO_MINOR_VERSION) in this order -$min_major=$args[0] -$min_minor=$args[1] -$goversion = (go env GOVERSION).replace("go","").split(".") -$goversion_major=$goversion[0] -$goversion_minor=$goversion[1] -$err_msg="Golang version $goversion_major.$goversion_minor is not supported, please use least $min_major.$min_minor" - -if ( $goversion_major -gt $min_major ) { - exit 0; -} -elseif ($goversion_major -lt $min_major) { - Write-Output $err_msg; - exit 1; -} -elseif ($goversion_minor -lt $min_minor) { - Write-Output $(GO_VERSION_VALIDATION_ERR_MSG); - exit 1; -} diff --git a/mk/goversion.mk b/mk/goversion.mk deleted file mode 100644 index 73e9a72e232..00000000000 --- a/mk/goversion.mk +++ /dev/null @@ -1,36 +0,0 @@ - -BUILD_GOVERSION = $(subst go,,$(shell $(GO) env GOVERSION)) - -go_major_minor = $(subst ., ,$(BUILD_GOVERSION)) -GO_MAJOR_VERSION = $(word 1, $(go_major_minor)) -GO_MINOR_VERSION = $(word 2, $(go_major_minor)) - -GO_VERSION_VALIDATION_ERR_MSG = Golang version ($(BUILD_GOVERSION)) is not supported, please use at least $(BUILD_REQUIRE_GO_MAJOR).$(BUILD_REQUIRE_GO_MINOR) - - -.PHONY: goversion -goversion: $(if $(findstring devel,$(shell $(GO) env GOVERSION)),goversion_devel,goversion_check) - - -.PHONY: goversion_devel -goversion_devel: - $(warning WARNING: You are using a development version of Golang ($(BUILD_GOVERSION)) which is not supported. For production environments, use a stable version (at least $(BUILD_REQUIRE_GO_MAJOR).$(BUILD_REQUIRE_GO_MINOR))) - $(info ) - - -.PHONY: goversion_check -goversion_check: -ifneq ($(OS), Windows_NT) - @if [ $(GO_MAJOR_VERSION) -gt $(BUILD_REQUIRE_GO_MAJOR) ]; then \ - exit 0; \ - elif [ $(GO_MAJOR_VERSION) -lt $(BUILD_REQUIRE_GO_MAJOR) ]; then \ - echo '$(GO_VERSION_VALIDATION_ERR_MSG)';\ - exit 1; \ - elif [ $(GO_MINOR_VERSION) -lt $(BUILD_REQUIRE_GO_MINOR) ] ; then \ - echo '$(GO_VERSION_VALIDATION_ERR_MSG)';\ - exit 1; \ - fi -else - # This needs Set-ExecutionPolicy -Scope CurrentUser Unrestricted - @$(CURDIR)/mk/check_go_version.ps1 $(BUILD_REQUIRE_GO_MAJOR) $(BUILD_REQUIRE_GO_MINOR) -endif From aab59b628794e933e7313d0cd25f5f746b0b2530 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Tue, 9 Jul 2024 14:51:31 +0200 Subject: [PATCH 213/581] typos (#3104) * typos * lint --- .golangci.yml | 8 -------- cmd/crowdsec-cli/metrics/statacquis.go | 2 +- cmd/crowdsec-cli/metrics/statparser.go | 2 +- cmd/crowdsec-cli/notifications.go | 2 +- 4 files changed, 3 insertions(+), 11 deletions(-) diff --git a/.golangci.yml b/.golangci.yml index 855c73f9af3..2ac39793731 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -407,18 +407,10 @@ issues: - errorlint text: "type switch on error will fail on wrapped errors. Use errors.As to check for specific errors" - - linters: - - errorlint - text: "type assertion on error will fail on wrapped errors. Use errors.Is to check for specific errors" - - linters: - errorlint text: "comparing with .* will fail on wrapped errors. Use errors.Is to check for a specific error" - - linters: - - errorlint - text: "switch on an error will fail on wrapped errors. Use errors.Is to check for specific errors" - - linters: - nosprintfhostport text: "host:port in url should be constructed with net.JoinHostPort and not directly with fmt.Sprintf" diff --git a/cmd/crowdsec-cli/metrics/statacquis.go b/cmd/crowdsec-cli/metrics/statacquis.go index 4a8cf0f8934..c004f64f17a 100644 --- a/cmd/crowdsec-cli/metrics/statacquis.go +++ b/cmd/crowdsec-cli/metrics/statacquis.go @@ -15,7 +15,7 @@ func (s statAcquis) Description() (string, string) { return "Acquisition Metrics", `Measures the lines read, parsed, and unparsed per datasource. ` + `Zero read lines indicate a misconfigured or inactive datasource. ` + - `Zero parsed lines mean the parser(s) failed. ` + + `Zero parsed lines means the parser(s) failed. ` + `Non-zero parsed lines are fine as crowdsec selects relevant lines.` } diff --git a/cmd/crowdsec-cli/metrics/statparser.go b/cmd/crowdsec-cli/metrics/statparser.go index 0ed7cedbe4b..d8d651f269f 100644 --- a/cmd/crowdsec-cli/metrics/statparser.go +++ b/cmd/crowdsec-cli/metrics/statparser.go @@ -14,7 +14,7 @@ type statParser map[string]map[string]int func (s statParser) Description() (string, string) { return "Parser Metrics", `Tracks the number of events processed by each parser and indicates success of failure. ` + - `Zero parsed lines means the parer(s) failed. ` + + `Zero parsed lines means the parser(s) failed. ` + `Non-zero unparsed lines are fine as crowdsec select relevant lines.` } diff --git a/cmd/crowdsec-cli/notifications.go b/cmd/crowdsec-cli/notifications.go index 4d757fca44e..8c6b6631b33 100644 --- a/cmd/crowdsec-cli/notifications.go +++ b/cmd/crowdsec-cli/notifications.go @@ -112,7 +112,7 @@ func (cli *cliNotifications) getPluginConfigs() (map[string]csplugin.PluginConfi func (cli *cliNotifications) getProfilesConfigs() (map[string]NotificationsCfg, error) { cfg := cli.cfg() - // A bit of a tricky stuf now: reconcile profiles and notification plugins + // A bit of a tricky stuff now: reconcile profiles and notification plugins pcfgs, err := cli.getPluginConfigs() if err != nil { return nil, err From 31195ddf36288a4091039dc162c87cf469791ec8 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Tue, 9 Jul 2024 15:30:38 +0200 Subject: [PATCH 214/581] update vagrant config for opensuse (#3119) --- .../experimental/{opensuse-15.4 => opensuse-15.6}/Vagrantfile | 3 ++- test/ansible/vagrant/experimental/opensuse-15.6/bootstrap | 3 +++ 2 files changed, 5 insertions(+), 1 deletion(-) rename test/ansible/vagrant/experimental/{opensuse-15.4 => opensuse-15.6}/Vagrantfile (84%) create mode 100644 test/ansible/vagrant/experimental/opensuse-15.6/bootstrap diff --git a/test/ansible/vagrant/experimental/opensuse-15.4/Vagrantfile b/test/ansible/vagrant/experimental/opensuse-15.6/Vagrantfile similarity index 84% rename from test/ansible/vagrant/experimental/opensuse-15.4/Vagrantfile rename to test/ansible/vagrant/experimental/opensuse-15.6/Vagrantfile index 4a3ec307c4f..f2dc70816c9 100644 --- a/test/ansible/vagrant/experimental/opensuse-15.4/Vagrantfile +++ b/test/ansible/vagrant/experimental/opensuse-15.6/Vagrantfile @@ -1,7 +1,8 @@ # frozen_string_literal: true Vagrant.configure('2') do |config| - config.vm.box = 'opensuse/Leap-15.4.x86_64' + config.vm.box = 'opensuse/Leap-15.6.x86_64' + config.vm.box_version = "15.6.13.280" config.vm.define 'crowdsec' config.vm.provision 'shell', path: 'bootstrap' diff --git a/test/ansible/vagrant/experimental/opensuse-15.6/bootstrap b/test/ansible/vagrant/experimental/opensuse-15.6/bootstrap new file mode 100644 index 00000000000..a43165d1828 --- /dev/null +++ b/test/ansible/vagrant/experimental/opensuse-15.6/bootstrap @@ -0,0 +1,3 @@ +#!/bin/sh + +zypper install -y kitty-terminfo From 6f5e970f502f06185401e8182c9976f11d5b47df Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Wed, 10 Jul 2024 12:57:16 +0200 Subject: [PATCH 215/581] func tests: update curl wrapper (#3121) --- test/bats/00_wait_for.bats | 1 - test/bats/01_crowdsec_lapi.bats | 1 - test/bats/01_cscli.bats | 1 - test/bats/07_setup.bats | 1 - test/bats/10_bouncers.bats | 2 +- test/bats/11_bouncers_tls.bats | 16 ++++++++------ test/bats/20_hub_items.bats | 4 ++-- test/bats/20_hub_scenarios.bats | 2 +- test/bats/30_machines_tls.bats | 1 - test/bats/90_decisions.bats | 1 - test/bats/97_ipv4_single.bats | 12 +++++------ test/bats/97_ipv6_single.bats | 16 +++++++------- test/bats/98_ipv4_range.bats | 16 +++++++------- test/bats/98_ipv6_range.bats | 24 ++++++++++----------- test/bats/99_lapi-stream-mode-scenario.bats | 24 ++++++++++----------- test/bats/99_lapi-stream-mode-scopes.bats | 8 +++---- test/bats/99_lapi-stream-mode.bats | 8 +++---- test/lib/setup_file.sh | 16 ++++++++------ 18 files changed, 76 insertions(+), 78 deletions(-) diff --git a/test/bats/00_wait_for.bats b/test/bats/00_wait_for.bats index ffc6802d9bc..94c65033bb4 100644 --- a/test/bats/00_wait_for.bats +++ b/test/bats/00_wait_for.bats @@ -68,4 +68,3 @@ setup() { 2 EOT } - diff --git a/test/bats/01_crowdsec_lapi.bats b/test/bats/01_crowdsec_lapi.bats index 0f6c41cc53d..21e1d7a093e 100644 --- a/test/bats/01_crowdsec_lapi.bats +++ b/test/bats/01_crowdsec_lapi.bats @@ -48,4 +48,3 @@ teardown() { rune -0 config_set 'del(.api.server.listen_socket) | .api.server.listen_uri="127.0.0.1:0"' rune -0 wait-for --err "CrowdSec Local API listening on 127.0.0.1:" "$CROWDSEC" -no-cs } - diff --git a/test/bats/01_cscli.bats b/test/bats/01_cscli.bats index a0878ad0e12..792274cc4f4 100644 --- a/test/bats/01_cscli.bats +++ b/test/bats/01_cscli.bats @@ -130,7 +130,6 @@ teardown() { EOT } - @test "cscli - required configuration paths" { config=$(cat "$CONFIG_YAML") configdir=$(config_get '.config_paths.config_dir') diff --git a/test/bats/07_setup.bats b/test/bats/07_setup.bats index 9e3f5533728..2106d3ab6b2 100644 --- a/test/bats/07_setup.bats +++ b/test/bats/07_setup.bats @@ -822,4 +822,3 @@ update-notifier-motd.timer enabled enabled assert_output "while unmarshaling setup file: yaml: line 2: could not find expected ':'" assert_stderr --partial "invalid setup file" } - diff --git a/test/bats/10_bouncers.bats b/test/bats/10_bouncers.bats index 9e795e584b4..b6efbd06650 100644 --- a/test/bats/10_bouncers.bats +++ b/test/bats/10_bouncers.bats @@ -68,7 +68,7 @@ teardown() { assert_output --regexp 'ciTestBouncer.*api-key.*' # the first connection sets last_pull and ip address - rune -0 lapi-get '/v1/decisions' + rune -0 curl-with-key '/v1/decisions' rune -0 cscli bouncers list -o json rune -0 jq -r '.[] | .ip_address' <(output) assert_output 127.0.0.1 diff --git a/test/bats/11_bouncers_tls.bats b/test/bats/11_bouncers_tls.bats index 765e93ebee2..849b3a5b35c 100644 --- a/test/bats/11_bouncers_tls.bats +++ b/test/bats/11_bouncers_tls.bats @@ -146,12 +146,13 @@ teardown() { } @test "simulate a bouncer request with a valid cert" { - rune -0 curl -f -s \ + rune -0 curl --fail-with-body -sS \ --cert "$tmpdir/leaf.pem" \ --key "$tmpdir/leaf-key.pem" \ --cacert "$tmpdir/bundle.pem" \ https://localhost:8080/v1/decisions\?ip=42.42.42.42 assert_output "null" + refute_stderr rune -0 cscli bouncers list -o json rune -0 jq '. | length' <(output) assert_output '1' @@ -162,22 +163,24 @@ teardown() { } @test "simulate a bouncer request with an invalid cert" { - rune -77 curl -f -s \ + rune -77 curl --fail-with-body -sS \ --cert "$tmpdir/leaf_invalid.pem" \ --key "$tmpdir/leaf_invalid-key.pem" \ --cacert "$tmpdir/root-key.pem" \ https://localhost:8080/v1/decisions\?ip=42.42.42.42 + assert_stderr --partial 'error setting certificate file' rune -0 cscli bouncers list -o json assert_output "[]" } @test "simulate a bouncer request with an invalid OU" { - rune -0 curl -s \ + rune -22 curl --fail-with-body -sS \ --cert "$tmpdir/leaf_bad_ou.pem" \ --key "$tmpdir/leaf_bad_ou-key.pem" \ --cacert "$tmpdir/bundle.pem" \ https://localhost:8080/v1/decisions\?ip=42.42.42.42 - assert_json '{message:"access forbidden"}' + assert_json '{message: "access forbidden"}' + assert_stderr --partial 'error: 403' rune -0 cscli bouncers list -o json assert_output "[]" } @@ -187,13 +190,14 @@ teardown() { # we connect twice to test the cache too for cert in "leaf_rev1" "leaf_rev2" "leaf_rev1" "leaf_rev2"; do truncate_log - rune -0 curl -s \ + rune -22 curl --fail-with-body -sS \ --cert "$tmpdir/$cert.pem" \ --key "$tmpdir/$cert-key.pem" \ --cacert "$tmpdir/bundle.pem" \ https://localhost:8080/v1/decisions\?ip=42.42.42.42 assert_log --partial "certificate revoked by CRL" - assert_output --partial "access forbidden" + assert_json '{message: "access forbidden"}' + assert_stderr --partial "error: 403" rune -0 cscli bouncers list -o json assert_output "[]" done diff --git a/test/bats/20_hub_items.bats b/test/bats/20_hub_items.bats index c6dbafc0911..1846b3e424c 100644 --- a/test/bats/20_hub_items.bats +++ b/test/bats/20_hub_items.bats @@ -46,7 +46,7 @@ teardown() { '. * {collections:{"crowdsecurity/sshd":{"versions":{"1.2":{"digest":$DIGEST, "deprecated": false}, "1.10": {"digest":$DIGEST, "deprecated": false}}}}}' \ ) echo "$new_hub" >"$INDEX_PATH" - + rune -0 cscli collections install crowdsecurity/sshd truncate -s 0 "$CONFIG_DIR/collections/sshd.yaml" @@ -78,7 +78,7 @@ teardown() { '. * {collections:{"crowdsecurity/sshd":{"versions":{"1.2.3.4":{"digest":"foo", "deprecated": false}}}}}' \ ) echo "$new_hub" >"$INDEX_PATH" - + rune -0 cscli collections install crowdsecurity/sshd rune -1 cscli collections inspect crowdsecurity/sshd --no-metrics -o json # XXX: we are on the verbose side here... diff --git a/test/bats/20_hub_scenarios.bats b/test/bats/20_hub_scenarios.bats index 3089e244cf1..4e4b28e7703 100644 --- a/test/bats/20_hub_scenarios.bats +++ b/test/bats/20_hub_scenarios.bats @@ -96,7 +96,7 @@ teardown() { # non-existent rune -1 cscli scenario install foo/bar assert_stderr --partial "can't find 'foo/bar' in scenarios" - + # not installed rune -0 cscli scenarios list crowdsecurity/ssh-bf assert_output --regexp 'crowdsecurity/ssh-bf.*disabled' diff --git a/test/bats/30_machines_tls.bats b/test/bats/30_machines_tls.bats index ef2915e3880..ef02d1b57c3 100644 --- a/test/bats/30_machines_tls.bats +++ b/test/bats/30_machines_tls.bats @@ -3,7 +3,6 @@ set -u - # root: root CA # inter: intermediate CA # inter_rev: intermediate CA revoked by root (CRL3) diff --git a/test/bats/90_decisions.bats b/test/bats/90_decisions.bats index be6e905356e..c7ed214ffc9 100644 --- a/test/bats/90_decisions.bats +++ b/test/bats/90_decisions.bats @@ -179,7 +179,6 @@ teardown() { # disarding only some invalid decisions - rune -0 cscli alerts delete --all truncate -s 0 "$LOGFILE" diff --git a/test/bats/97_ipv4_single.bats b/test/bats/97_ipv4_single.bats index f02d9ebd503..b709930e2e5 100644 --- a/test/bats/97_ipv4_single.bats +++ b/test/bats/97_ipv4_single.bats @@ -30,7 +30,7 @@ setup() { } @test "API - first decisions list: must be empty" { - rune -0 lapi-get '/v1/decisions' + rune -0 curl-with-key '/v1/decisions' assert_output 'null' } @@ -46,7 +46,7 @@ setup() { } @test "API - all decisions" { - rune -0 lapi-get '/v1/decisions' + rune -0 curl-with-key '/v1/decisions' rune -0 jq -c '[ . | length, .[0].value ]' <(output) assert_output '[1,"1.2.3.4"]' } @@ -60,7 +60,7 @@ setup() { } @test "API - decision for 1.2.3.4" { - rune -0 lapi-get '/v1/decisions?ip=1.2.3.4' + rune -0 curl-with-key '/v1/decisions?ip=1.2.3.4' rune -0 jq -r '.[0].value' <(output) assert_output '1.2.3.4' } @@ -71,7 +71,7 @@ setup() { } @test "API - decision for 1.2.3.5" { - rune -0 lapi-get '/v1/decisions?ip=1.2.3.5' + rune -0 curl-with-key '/v1/decisions?ip=1.2.3.5' assert_output 'null' } @@ -83,7 +83,7 @@ setup() { } @test "API - decision for 1.2.3.0/24" { - rune -0 lapi-get '/v1/decisions?range=1.2.3.0/24' + rune -0 curl-with-key '/v1/decisions?range=1.2.3.0/24' assert_output 'null' } @@ -94,7 +94,7 @@ setup() { } @test "API - decisions where IP in 1.2.3.0/24" { - rune -0 lapi-get '/v1/decisions?range=1.2.3.0/24&contains=false' + rune -0 curl-with-key '/v1/decisions?range=1.2.3.0/24&contains=false' rune -0 jq -r '.[0].value' <(output) assert_output '1.2.3.4' } diff --git a/test/bats/97_ipv6_single.bats b/test/bats/97_ipv6_single.bats index 3cb1cedd0c2..c7aea030f9c 100644 --- a/test/bats/97_ipv6_single.bats +++ b/test/bats/97_ipv6_single.bats @@ -41,7 +41,7 @@ setup() { } @test "API - all decisions" { - rune -0 lapi-get "/v1/decisions" + rune -0 curl-with-key "/v1/decisions" rune -0 jq -r '.[].value' <(output) assert_output '1111:2222:3333:4444:5555:6666:7777:8888' } @@ -53,7 +53,7 @@ setup() { } @test "API - decisions for ip 1111:2222:3333:4444:5555:6666:7777:888" { - rune -0 lapi-get '/v1/decisions?ip=1111:2222:3333:4444:5555:6666:7777:8888' + rune -0 curl-with-key '/v1/decisions?ip=1111:2222:3333:4444:5555:6666:7777:8888' rune -0 jq -r '.[].value' <(output) assert_output '1111:2222:3333:4444:5555:6666:7777:8888' } @@ -64,7 +64,7 @@ setup() { } @test "API - decisions for ip 1211:2222:3333:4444:5555:6666:7777:888" { - rune -0 lapi-get '/v1/decisions?ip=1211:2222:3333:4444:5555:6666:7777:8888' + rune -0 curl-with-key '/v1/decisions?ip=1211:2222:3333:4444:5555:6666:7777:8888' assert_output 'null' } @@ -74,7 +74,7 @@ setup() { } @test "API - decisions for ip 1111:2222:3333:4444:5555:6666:7777:8887" { - rune -0 lapi-get '/v1/decisions?ip=1111:2222:3333:4444:5555:6666:7777:8887' + rune -0 curl-with-key '/v1/decisions?ip=1111:2222:3333:4444:5555:6666:7777:8887' assert_output 'null' } @@ -84,7 +84,7 @@ setup() { } @test "API - decisions for range 1111:2222:3333:4444:5555:6666:7777:8888/48" { - rune -0 lapi-get '/v1/decisions?range=1111:2222:3333:4444:5555:6666:7777:8888/48' + rune -0 curl-with-key '/v1/decisions?range=1111:2222:3333:4444:5555:6666:7777:8888/48' assert_output 'null' } @@ -95,7 +95,7 @@ setup() { } @test "API - decisions for ip/range in 1111:2222:3333:4444:5555:6666:7777:8888/48" { - rune -0 lapi-get '/v1/decisions?range=1111:2222:3333:4444:5555:6666:7777:8888/48&&contains=false' + rune -0 curl-with-key '/v1/decisions?range=1111:2222:3333:4444:5555:6666:7777:8888/48&&contains=false' rune -0 jq -r '.[].value' <(output) assert_output '1111:2222:3333:4444:5555:6666:7777:8888' } @@ -106,7 +106,7 @@ setup() { } @test "API - decisions for range 1111:2222:3333:4444:5555:6666:7777:8888/64" { - rune -0 lapi-get '/v1/decisions?range=1111:2222:3333:4444:5555:6666:7777:8888/64' + rune -0 curl-with-key '/v1/decisions?range=1111:2222:3333:4444:5555:6666:7777:8888/64' assert_output 'null' } @@ -117,7 +117,7 @@ setup() { } @test "API - decisions for ip/range in 1111:2222:3333:4444:5555:6666:7777:8888/64" { - rune -0 lapi-get '/v1/decisions?range=1111:2222:3333:4444:5555:6666:7777:8888/64&&contains=false' + rune -0 curl-with-key '/v1/decisions?range=1111:2222:3333:4444:5555:6666:7777:8888/64&&contains=false' rune -0 jq -r '.[].value' <(output) assert_output '1111:2222:3333:4444:5555:6666:7777:8888' } diff --git a/test/bats/98_ipv4_range.bats b/test/bats/98_ipv4_range.bats index a58f144b885..c85e40267f3 100644 --- a/test/bats/98_ipv4_range.bats +++ b/test/bats/98_ipv4_range.bats @@ -41,7 +41,7 @@ setup() { } @test "API - all decisions" { - rune -0 lapi-get '/v1/decisions' + rune -0 curl-with-key '/v1/decisions' rune -0 jq -r '.[0].value' <(output) assert_output '4.4.4.0/24' } @@ -55,7 +55,7 @@ setup() { } @test "API - decisions for ip 4.4.4." { - rune -0 lapi-get '/v1/decisions?ip=4.4.4.3' + rune -0 curl-with-key '/v1/decisions?ip=4.4.4.3' rune -0 jq -r '.[0].value' <(output) assert_output '4.4.4.0/24' } @@ -66,7 +66,7 @@ setup() { } @test "API - decisions for ip contained in 4.4.4." { - rune -0 lapi-get '/v1/decisions?ip=4.4.4.4&contains=false' + rune -0 curl-with-key '/v1/decisions?ip=4.4.4.4&contains=false' assert_output 'null' } @@ -76,7 +76,7 @@ setup() { } @test "API - decisions for ip 5.4.4." { - rune -0 lapi-get '/v1/decisions?ip=5.4.4.3' + rune -0 curl-with-key '/v1/decisions?ip=5.4.4.3' assert_output 'null' } @@ -86,7 +86,7 @@ setup() { } @test "API - decisions for range 4.4.0.0/1" { - rune -0 lapi-get '/v1/decisions?range=4.4.0.0/16' + rune -0 curl-with-key '/v1/decisions?range=4.4.0.0/16' assert_output 'null' } @@ -97,7 +97,7 @@ setup() { } @test "API - decisions for ip/range in 4.4.0.0/1" { - rune -0 lapi-get '/v1/decisions?range=4.4.0.0/16&contains=false' + rune -0 curl-with-key '/v1/decisions?range=4.4.0.0/16&contains=false' rune -0 jq -r '.[0].value' <(output) assert_output '4.4.4.0/24' } @@ -111,7 +111,7 @@ setup() { } @test "API - decisions for range 4.4.4.2/2" { - rune -0 lapi-get '/v1/decisions?range=4.4.4.2/28' + rune -0 curl-with-key '/v1/decisions?range=4.4.4.2/28' rune -0 jq -r '.[].value' <(output) assert_output '4.4.4.0/24' } @@ -122,6 +122,6 @@ setup() { } @test "API - decisions for range 4.4.3.2/2" { - rune -0 lapi-get '/v1/decisions?range=4.4.3.2/28' + rune -0 curl-with-key '/v1/decisions?range=4.4.3.2/28' assert_output 'null' } diff --git a/test/bats/98_ipv6_range.bats b/test/bats/98_ipv6_range.bats index 065f32b74ba..531122a5533 100644 --- a/test/bats/98_ipv6_range.bats +++ b/test/bats/98_ipv6_range.bats @@ -41,7 +41,7 @@ setup() { } @test "API - all decisions (2)" { - rune -0 lapi-get '/v1/decisions' + rune -0 curl-with-key '/v1/decisions' rune -0 jq -r '.[].value' <(output) assert_output 'aaaa:2222:3333:4444::/64' } @@ -55,7 +55,7 @@ setup() { } @test "API - decisions for ip aaaa:2222:3333:4444:5555:6666:7777:8888" { - rune -0 lapi-get '/v1/decisions?ip=aaaa:2222:3333:4444:5555:6666:7777:8888' + rune -0 curl-with-key '/v1/decisions?ip=aaaa:2222:3333:4444:5555:6666:7777:8888' rune -0 jq -r '.[].value' <(output) assert_output 'aaaa:2222:3333:4444::/64' } @@ -66,7 +66,7 @@ setup() { } @test "API - decisions for ip aaaa:2222:3333:4445:5555:6666:7777:8888" { - rune -0 lapi-get '/v1/decisions?ip=aaaa:2222:3333:4445:5555:6666:7777:8888' + rune -0 curl-with-key '/v1/decisions?ip=aaaa:2222:3333:4445:5555:6666:7777:8888' assert_output 'null' } @@ -76,7 +76,7 @@ setup() { } @test "API - decisions for ip aaa1:2222:3333:4444:5555:6666:7777:8887" { - rune -0 lapi-get '/v1/decisions?ip=aaa1:2222:3333:4444:5555:6666:7777:8887' + rune -0 curl-with-key '/v1/decisions?ip=aaa1:2222:3333:4444:5555:6666:7777:8887' assert_output 'null' } @@ -89,7 +89,7 @@ setup() { } @test "API - decisions for range aaaa:2222:3333:4444:5555::/80" { - rune -0 lapi-get '/v1/decisions?range=aaaa:2222:3333:4444:5555::/80' + rune -0 curl-with-key '/v1/decisions?range=aaaa:2222:3333:4444:5555::/80' rune -0 jq -r '.[].value' <(output) assert_output 'aaaa:2222:3333:4444::/64' } @@ -101,7 +101,7 @@ setup() { } @test "API - decisions for range aaaa:2222:3333:4441:5555::/80" { - rune -0 lapi-get '/v1/decisions?range=aaaa:2222:3333:4441:5555::/80' + rune -0 curl-with-key '/v1/decisions?range=aaaa:2222:3333:4441:5555::/80' assert_output 'null' } @@ -111,7 +111,7 @@ setup() { } @test "API - decisions for range aaa1:2222:3333:4444:5555::/80" { - rune -0 lapi-get '/v1/decisions?range=aaa1:2222:3333:4444:5555::/80' + rune -0 curl-with-key '/v1/decisions?range=aaa1:2222:3333:4444:5555::/80' assert_output 'null' } @@ -123,7 +123,7 @@ setup() { } @test "API - decisions for range aaaa:2222:3333:4444:5555:6666:7777:8888/48" { - rune -0 lapi-get '/v1/decisions?range=aaaa:2222:3333:4444:5555:6666:7777:8888/48' + rune -0 curl-with-key '/v1/decisions?range=aaaa:2222:3333:4444:5555:6666:7777:8888/48' assert_output 'null' } @@ -134,7 +134,7 @@ setup() { } @test "API - decisions for ip/range in aaaa:2222:3333:4444:5555:6666:7777:8888/48" { - rune -0 lapi-get '/v1/decisions?range=aaaa:2222:3333:4444:5555:6666:7777:8888/48&contains=false' + rune -0 curl-with-key '/v1/decisions?range=aaaa:2222:3333:4444:5555:6666:7777:8888/48&contains=false' rune -0 jq -r '.[].value' <(output) assert_output 'aaaa:2222:3333:4444::/64' } @@ -145,7 +145,7 @@ setup() { } @test "API - decisions for ip/range in aaaa:2222:3333:4445:5555:6666:7777:8888/48" { - rune -0 lapi-get '/v1/decisions?range=aaaa:2222:3333:4445:5555:6666:7777:8888/48' + rune -0 curl-with-key '/v1/decisions?range=aaaa:2222:3333:4445:5555:6666:7777:8888/48' assert_output 'null' } @@ -163,7 +163,7 @@ setup() { } @test "API - decisions for ip in bbbb:db8:0000:0000:0000:6fff:ffff:ffff" { - rune -0 lapi-get '/v1/decisions?ip=bbbb:db8:0000:0000:0000:6fff:ffff:ffff' + rune -0 curl-with-key '/v1/decisions?ip=bbbb:db8:0000:0000:0000:6fff:ffff:ffff' rune -0 jq -r '.[].value' <(output) assert_output 'bbbb:db8::/81' } @@ -174,7 +174,7 @@ setup() { } @test "API - decisions for ip in bbbb:db8:0000:0000:0000:8fff:ffff:ffff" { - rune -0 lapi-get '/v1/decisions?ip=bbbb:db8:0000:0000:0000:8fff:ffff:ffff' + rune -0 curl-with-key '/v1/decisions?ip=bbbb:db8:0000:0000:0000:8fff:ffff:ffff' assert_output 'null' } diff --git a/test/bats/99_lapi-stream-mode-scenario.bats b/test/bats/99_lapi-stream-mode-scenario.bats index 1cd44c1ae80..32c346061d1 100644 --- a/test/bats/99_lapi-stream-mode-scenario.bats +++ b/test/bats/99_lapi-stream-mode-scenario.bats @@ -26,7 +26,6 @@ output_new_decisions() { jq -c '.new | map(select(.origin!="CAPI")) | .[] | del(.id) | (.. | .duration?) |= capture("(?[[:digit:]]+h[[:digit:]]+m)").d' <(output) | sort } - @test "adding decisions with different duration, scenario, origin" { # origin: test rune -0 cscli decisions add -i 127.0.0.1 -d 1h -R crowdsecurity/test @@ -55,7 +54,7 @@ output_new_decisions() { } @test "test startup" { - rune -0 lapi-get "/v1/decisions/stream?startup=true" + rune -0 curl-with-key "/v1/decisions/stream?startup=true" rune -0 output_new_decisions assert_output - <<-EOT {"duration":"2h59m","origin":"test","scenario":"crowdsecurity/test","scope":"Ip","type":"ban","value":"127.0.0.2"} @@ -64,7 +63,7 @@ output_new_decisions() { } @test "test startup with scenarios containing" { - rune -0 lapi-get "/v1/decisions/stream?startup=true&scenarios_containing=ssh_bf" + rune -0 curl-with-key "/v1/decisions/stream?startup=true&scenarios_containing=ssh_bf" rune -0 output_new_decisions assert_output - <<-EOT {"duration":"2h59m","origin":"another_origin","scenario":"crowdsecurity/ssh_bf","scope":"Ip","type":"ban","value":"127.0.0.1"} @@ -73,7 +72,7 @@ output_new_decisions() { } @test "test startup with multiple scenarios containing" { - rune -0 lapi-get "/v1/decisions/stream?startup=true&scenarios_containing=ssh_bf,test" + rune -0 curl-with-key "/v1/decisions/stream?startup=true&scenarios_containing=ssh_bf,test" rune -0 output_new_decisions assert_output - <<-EOT {"duration":"2h59m","origin":"another_origin","scenario":"crowdsecurity/ssh_bf","scope":"Ip","type":"ban","value":"127.0.0.1"} @@ -82,12 +81,12 @@ output_new_decisions() { } @test "test startup with unknown scenarios containing" { - rune -0 lapi-get "/v1/decisions/stream?startup=true&scenarios_containing=unknown" + rune -0 curl-with-key "/v1/decisions/stream?startup=true&scenarios_containing=unknown" assert_output '{"deleted":null,"new":null}' } @test "test startup with scenarios containing and not containing" { - rune -0 lapi-get "/v1/decisions/stream?startup=true&scenarios_containing=test&scenarios_not_containing=ssh_bf" + rune -0 curl-with-key "/v1/decisions/stream?startup=true&scenarios_containing=test&scenarios_not_containing=ssh_bf" rune -0 output_new_decisions assert_output - <<-EOT {"duration":"2h59m","origin":"test","scenario":"crowdsecurity/test","scope":"Ip","type":"ban","value":"127.0.0.2"} @@ -96,7 +95,7 @@ output_new_decisions() { } @test "test startup with scenarios containing and not containing 2" { - rune -0 lapi-get "/v1/decisions/stream?startup=true&scenarios_containing=longest&scenarios_not_containing=ssh_bf,test" + rune -0 curl-with-key "/v1/decisions/stream?startup=true&scenarios_containing=longest&scenarios_not_containing=ssh_bf,test" rune -0 output_new_decisions assert_output - <<-EOT {"duration":"4h59m","origin":"test","scenario":"crowdsecurity/longest","scope":"Ip","type":"ban","value":"127.0.0.1"} @@ -104,7 +103,7 @@ output_new_decisions() { } @test "test startup with scenarios not containing" { - rune -0 lapi-get "/v1/decisions/stream?startup=true&scenarios_not_containing=ssh_bf" + rune -0 curl-with-key "/v1/decisions/stream?startup=true&scenarios_not_containing=ssh_bf" rune -0 output_new_decisions assert_output - <<-EOT {"duration":"2h59m","origin":"test","scenario":"crowdsecurity/test","scope":"Ip","type":"ban","value":"127.0.0.2"} @@ -113,7 +112,7 @@ output_new_decisions() { } @test "test startup with multiple scenarios not containing" { - rune -0 lapi-get "/v1/decisions/stream?startup=true&scenarios_not_containing=ssh_bf,test" + rune -0 curl-with-key "/v1/decisions/stream?startup=true&scenarios_not_containing=ssh_bf,test" rune -0 output_new_decisions assert_output - <<-EOT {"duration":"4h59m","origin":"test","scenario":"crowdsecurity/longest","scope":"Ip","type":"ban","value":"127.0.0.1"} @@ -121,7 +120,7 @@ output_new_decisions() { } @test "test startup with origins parameter" { - rune -0 lapi-get "/v1/decisions/stream?startup=true&origins=another_origin" + rune -0 curl-with-key "/v1/decisions/stream?startup=true&origins=another_origin" rune -0 output_new_decisions assert_output - <<-EOT {"duration":"1h59m","origin":"another_origin","scenario":"crowdsecurity/test","scope":"Ip","type":"ban","value":"127.0.0.2"} @@ -130,7 +129,7 @@ output_new_decisions() { } @test "test startup with multiple origins parameter" { - rune -0 lapi-get "/v1/decisions/stream?startup=true&origins=another_origin,test" + rune -0 curl-with-key "/v1/decisions/stream?startup=true&origins=another_origin,test" rune -0 output_new_decisions assert_output - <<-EOT {"duration":"2h59m","origin":"test","scenario":"crowdsecurity/test","scope":"Ip","type":"ban","value":"127.0.0.2"} @@ -139,7 +138,7 @@ output_new_decisions() { } @test "test startup with unknown origins" { - rune -0 lapi-get "/v1/decisions/stream?startup=true&origins=unknown" + rune -0 curl-with-key "/v1/decisions/stream?startup=true&origins=unknown" assert_output '{"deleted":null,"new":null}' } @@ -223,4 +222,3 @@ output_new_decisions() { # NewChecks: []DecisionCheck{}, # }, #} - diff --git a/test/bats/99_lapi-stream-mode-scopes.bats b/test/bats/99_lapi-stream-mode-scopes.bats index a9ed494e69c..67badebea0e 100644 --- a/test/bats/99_lapi-stream-mode-scopes.bats +++ b/test/bats/99_lapi-stream-mode-scopes.bats @@ -29,28 +29,28 @@ setup() { } @test "stream start (implicit ip scope)" { - rune -0 lapi-get "/v1/decisions/stream?startup=true" + rune -0 curl-with-key "/v1/decisions/stream?startup=true" rune -0 jq -r '.new' <(output) assert_output --partial '1.2.3.6' refute_output --partial 'toto' } @test "stream start (explicit ip scope)" { - rune -0 lapi-get "/v1/decisions/stream?startup=true&scopes=ip" + rune -0 curl-with-key "/v1/decisions/stream?startup=true&scopes=ip" rune -0 jq -r '.new' <(output) assert_output --partial '1.2.3.6' refute_output --partial 'toto' } @test "stream start (user scope)" { - rune -0 lapi-get "/v1/decisions/stream?startup=true&scopes=user" + rune -0 curl-with-key "/v1/decisions/stream?startup=true&scopes=user" rune -0 jq -r '.new' <(output) refute_output --partial '1.2.3.6' assert_output --partial 'toto' } @test "stream start (user+ip scope)" { - rune -0 lapi-get "/v1/decisions/stream?startup=true&scopes=user,ip" + rune -0 curl-with-key "/v1/decisions/stream?startup=true&scopes=user,ip" rune -0 jq -r '.new' <(output) assert_output --partial '1.2.3.6' assert_output --partial 'toto' diff --git a/test/bats/99_lapi-stream-mode.bats b/test/bats/99_lapi-stream-mode.bats index c683a6150f4..b3ee8a434ff 100644 --- a/test/bats/99_lapi-stream-mode.bats +++ b/test/bats/99_lapi-stream-mode.bats @@ -31,7 +31,7 @@ setup() { } @test "stream start" { - rune -0 lapi-get "/v1/decisions/stream?startup=true" + rune -0 curl-with-key "/v1/decisions/stream?startup=true" if is_db_mysql; then sleep 3; fi rune -0 jq -r '.new' <(output) assert_output --partial '1111:2222:3333:4444:5555:6666:7777:8888' @@ -42,7 +42,7 @@ setup() { @test "stream cont (add)" { rune -0 cscli decisions add -i '1.2.3.5' if is_db_mysql; then sleep 3; fi - rune -0 lapi-get "/v1/decisions/stream" + rune -0 curl-with-key "/v1/decisions/stream" rune -0 jq -r '.new' <(output) assert_output --partial '1.2.3.5' } @@ -50,13 +50,13 @@ setup() { @test "stream cont (del)" { rune -0 cscli decisions delete -i '1.2.3.4' if is_db_mysql; then sleep 3; fi - rune -0 lapi-get "/v1/decisions/stream" + rune -0 curl-with-key "/v1/decisions/stream" rune -0 jq -r '.deleted' <(output) assert_output --partial '1.2.3.4' } @test "stream restart" { - rune -0 lapi-get "/v1/decisions/stream?startup=true" + rune -0 curl-with-key "/v1/decisions/stream?startup=true" api_out=${output} rune -0 jq -r '.deleted' <(output) assert_output --partial '1.2.3.4' diff --git a/test/lib/setup_file.sh b/test/lib/setup_file.sh index ac651c68c4f..65c600d1769 100755 --- a/test/lib/setup_file.sh +++ b/test/lib/setup_file.sh @@ -283,15 +283,17 @@ rune() { export -f rune # call the lapi through unix socket with an API_KEY (authenticates as a bouncer) -lapi-get() { - [[ -z "$1" ]] && { fail "lapi-get: missing path"; } - [[ -z "$API_KEY" ]] && { fail "lapi-get: missing API_KEY"; } +# after $1, pass throught extra arguments to curl +curl-with-key() { + [[ -z "$1" ]] && { fail "${FUNCNAME[0]}: missing path"; } + local path=$1 + shift + [[ -z "$API_KEY" ]] && { fail "${FUNCNAME[0]}: missing API_KEY"; } local socket socket=$(config_get '.api.server.listen_socket') - [[ -z "$socket" ]] && { fail "lapi-get: missing .api.server.listen_socket"; } + [[ -z "$socket" ]] && { fail "${FUNCNAME[0]}: missing .api.server.listen_socket"; } # curl needs a fake hostname when using a unix socket - curl -s -f -H "X-Api-Key: $API_KEY" --unix-socket "$socket" "http://lapi$1" + curl -sS --fail-with-body -H "X-Api-Key: $API_KEY" --unix-socket "$socket" "http://lapi$path" "$@" } -export -f lapi-get - +export -f curl-with-key From 1b0104186aea4981556d86708b364f59884f9723 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Thu, 11 Jul 2024 12:19:10 +0200 Subject: [PATCH 216/581] cscli machines/bouncers: dry helper code and move to cscli (#3123) --- cmd/crowdsec-cli/bouncers.go | 90 +++++++++++++++++++++++++----------- cmd/crowdsec-cli/machines.go | 10 ++-- pkg/database/ent/helpers.go | 58 ++++++----------------- 3 files changed, 81 insertions(+), 77 deletions(-) diff --git a/cmd/crowdsec-cli/bouncers.go b/cmd/crowdsec-cli/bouncers.go index d304b1b7867..f7237b8216c 100644 --- a/cmd/crowdsec-cli/bouncers.go +++ b/cmd/crowdsec-cli/bouncers.go @@ -27,6 +27,40 @@ import ( "github.com/crowdsecurity/crowdsec/pkg/types" ) +type featureflagProvider interface { + GetFeatureflags() string +} + +type osProvider interface { + GetOsname() string + GetOsversion() string +} + +func getOSNameAndVersion(o osProvider) string { + ret := o.GetOsname() + if o.GetOsversion() != "" { + if ret != "" { + ret += "/" + } + + ret += o.GetOsversion() + } + + if ret == "" { + return "?" + } + + return ret +} + +func getFeatureFlagList(o featureflagProvider) []string { + if o.GetFeatureflags() == "" { + return nil + } + + return strings.Split(o.GetFeatureflags(), ",") +} + func askYesNo(message string, defaultAnswer bool) (bool, error) { var answer bool @@ -113,32 +147,32 @@ func (cli *cliBouncers) listHuman(out io.Writer, bouncers ent.Bouncers) { // bouncerInfo contains only the data we want for inspect/list type bouncerInfo struct { - CreatedAt time.Time `json:"created_at"` - UpdatedAt time.Time `json:"updated_at"` - Name string `json:"name"` - Revoked bool `json:"revoked"` - IPAddress string `json:"ip_address"` - Type string `json:"type"` - Version string `json:"version"` - LastPull *time.Time `json:"last_pull"` - AuthType string `json:"auth_type"` - OS string `json:"os,omitempty"` - Featureflags []string `json:"featureflags,omitempty"` + CreatedAt time.Time `json:"created_at"` + UpdatedAt time.Time `json:"updated_at"` + Name string `json:"name"` + Revoked bool `json:"revoked"` + IPAddress string `json:"ip_address"` + Type string `json:"type"` + Version string `json:"version"` + LastPull *time.Time `json:"last_pull"` + AuthType string `json:"auth_type"` + OS string `json:"os,omitempty"` + Featureflags []string `json:"featureflags,omitempty"` } func newBouncerInfo(b *ent.Bouncer) bouncerInfo { return bouncerInfo{ - CreatedAt: b.CreatedAt, - UpdatedAt: b.UpdatedAt, - Name: b.Name, - Revoked: b.Revoked, - IPAddress: b.IPAddress, - Type: b.Type, - Version: b.Version, - LastPull: b.LastPull, - AuthType: b.AuthType, - OS: b.GetOSNameAndVersion(), - Featureflags: b.GetFeatureFlagList(), + CreatedAt: b.CreatedAt, + UpdatedAt: b.UpdatedAt, + Name: b.Name, + Revoked: b.Revoked, + IPAddress: b.IPAddress, + Type: b.Type, + Version: b.Version, + LastPull: b.LastPull, + AuthType: b.AuthType, + OS: getOSNameAndVersion(b), + Featureflags: getFeatureFlagList(b), } } @@ -166,10 +200,10 @@ func (cli *cliBouncers) listCSV(out io.Writer, bouncers ent.Bouncers) error { } csvwriter.Flush() + return nil } - func (cli *cliBouncers) list(out io.Writer) error { bouncers, err := cli.db.ListBouncers() if err != nil { @@ -342,7 +376,7 @@ func (cli *cliBouncers) newDeleteCmd() *cobra.Command { func (cli *cliBouncers) prune(duration time.Duration, force bool) error { if duration < 2*time.Minute { if yes, err := askYesNo( - "The duration you provided is less than 2 minutes. " + + "The duration you provided is less than 2 minutes. "+ "This may remove active bouncers. Continue?", false); err != nil { return err } else if !yes { @@ -365,7 +399,7 @@ func (cli *cliBouncers) prune(duration time.Duration, force bool) error { if !force { if yes, err := askYesNo( - "You are about to PERMANENTLY remove the above bouncers from the database. " + + "You are about to PERMANENTLY remove the above bouncers from the database. "+ "These will NOT be recoverable. Continue?", false); err != nil { return err } else if !yes { @@ -434,10 +468,10 @@ func (cli *cliBouncers) inspectHuman(out io.Writer, bouncer *ent.Bouncer) { {"Version", bouncer.Version}, {"Last Pull", lastPull}, {"Auth type", bouncer.AuthType}, - {"OS", bouncer.GetOSNameAndVersion()}, + {"OS", getOSNameAndVersion(bouncer)}, }) - for _, ff := range bouncer.GetFeatureFlagList() { + for _, ff := range getFeatureFlagList(bouncer) { t.AppendRow(table.Row{"Feature Flags", ff}) } @@ -463,10 +497,10 @@ func (cli *cliBouncers) inspect(bouncer *ent.Bouncer) error { default: return fmt.Errorf("output format '%s' not supported for this command", outputFormat) } + return nil } - func (cli *cliBouncers) newInspectCmd() *cobra.Command { cmd := &cobra.Command{ Use: "inspect [bouncer_name]", diff --git a/cmd/crowdsec-cli/machines.go b/cmd/crowdsec-cli/machines.go index 2da5e20229c..3489550b4fd 100644 --- a/cmd/crowdsec-cli/machines.go +++ b/cmd/crowdsec-cli/machines.go @@ -203,7 +203,7 @@ func (cli *cliMachines) listHuman(out io.Writer, machines ent.Machines) { hb = emoji.Warning + " " + hb } - t.AppendRow(table.Row{m.MachineId, m.IpAddress, m.UpdatedAt.Format(time.RFC3339), validated, m.Version, m.GetOSNameAndVersion(), m.AuthType, hb}) + t.AppendRow(table.Row{m.MachineId, m.IpAddress, m.UpdatedAt.Format(time.RFC3339), validated, m.Version, getOSNameAndVersion(m), m.AuthType, hb}) } fmt.Fprintln(out, t.Render()) @@ -236,8 +236,8 @@ func newMachineInfo(m *ent.Machine) machineInfo { Version: m.Version, IsValidated: m.IsValidated, AuthType: m.AuthType, - OS: m.GetOSNameAndVersion(), - Featureflags: m.GetFeatureFlagList(), + OS: getOSNameAndVersion(m), + Featureflags: getFeatureFlagList(m), Datasources: m.Datasources, } } @@ -642,7 +642,7 @@ func (cli *cliMachines) inspectHuman(out io.Writer, machine *ent.Machine) { {"Last Heartbeat", machine.LastHeartbeat}, {"Validated?", machine.IsValidated}, {"CrowdSec version", machine.Version}, - {"OS", machine.GetOSNameAndVersion()}, + {"OS", getOSNameAndVersion(machine)}, {"Auth type", machine.AuthType}, }) @@ -650,7 +650,7 @@ func (cli *cliMachines) inspectHuman(out io.Writer, machine *ent.Machine) { t.AppendRow(table.Row{"Datasources", fmt.Sprintf("%s: %d", dsName, dsCount)}) } - for _, ff := range machine.GetFeatureFlagList() { + for _, ff := range getFeatureFlagList(machine) { t.AppendRow(table.Row{"Feature Flags", ff}) } diff --git a/pkg/database/ent/helpers.go b/pkg/database/ent/helpers.go index c6cdbd7f32b..9b30ce451e0 100644 --- a/pkg/database/ent/helpers.go +++ b/pkg/database/ent/helpers.go @@ -1,55 +1,25 @@ package ent -import ( - "strings" -) - -func (m *Machine) GetOSNameAndVersion() string { - ret := m.Osname - if m.Osversion != "" { - if ret != "" { - ret += "/" - } - - ret += m.Osversion - } - - if ret == "" { - return "?" - } - - return ret +func (m *Machine) GetOsname() string { + return m.Osname } -func (b *Bouncer) GetOSNameAndVersion() string { - ret := b.Osname - if b.Osversion != "" { - if ret != "" { - ret += "/" - } - - ret += b.Osversion - } - - if ret == "" { - return "?" - } - - return ret +func (b *Bouncer) GetOsname() string { + return b.Osname } -func (m *Machine) GetFeatureFlagList() []string { - if m.Featureflags == "" { - return nil - } +func (m *Machine) GetOsversion() string { + return m.Osversion +} - return strings.Split(m.Featureflags, ",") +func (b *Bouncer) GetOsversion() string { + return b.Osversion } -func (b *Bouncer) GetFeatureFlagList() []string { - if b.Featureflags == "" { - return nil - } +func (m *Machine) GetFeatureflags() string { + return m.Featureflags +} - return strings.Split(b.Featureflags, ",") +func (b *Bouncer) GetFeatureflags() string { + return b.Featureflags } From 06720538f5f35d97f986244e9fe349d129fb61ed Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Fri, 12 Jul 2024 17:02:44 +0200 Subject: [PATCH 217/581] revert "db: round created, updated... timestamps to 1 second" (#3127) this fixes functional tests with decision stream startup=true --- pkg/types/utils.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/types/utils.go b/pkg/types/utils.go index 384dd00367e..712d44ba12d 100644 --- a/pkg/types/utils.go +++ b/pkg/types/utils.go @@ -66,7 +66,7 @@ func ConfigureLogger(clog *log.Logger) error { } func UtcNow() time.Time { - return time.Now().UTC().Round(time.Second) + return time.Now().UTC() } func IsNetworkFS(path string) (bool, string, error) { From f130ce677db5fa386d5491cd768b48e66f9c0585 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Mon, 15 Jul 2024 09:55:52 +0200 Subject: [PATCH 218/581] command "cscli metrics show bouncers" (#3126) * cscli metrics show bouncers * db metrics: increase payload size * func tests --- .../{metrics => climetrics}/list.go | 46 ++- .../{metrics => climetrics}/metrics.go | 8 +- .../{metrics => climetrics}/number.go | 13 +- .../{metrics => climetrics}/show.go | 22 +- .../{metrics => climetrics}/statacquis.go | 2 +- .../{metrics => climetrics}/statalert.go | 2 +- .../statappsecengine.go | 2 +- .../{metrics => climetrics}/statappsecrule.go | 2 +- cmd/crowdsec-cli/climetrics/statbouncer.go | 340 ++++++++++++++++++ .../{metrics => climetrics}/statbucket.go | 2 +- .../{metrics => climetrics}/statdecision.go | 2 +- .../{metrics => climetrics}/statlapi.go | 2 +- .../statlapibouncer.go | 2 +- .../statlapidecision.go | 2 +- .../statlapimachine.go | 2 +- .../{metrics => climetrics}/statparser.go | 2 +- .../{metrics => climetrics}/statstash.go | 2 +- .../{metrics => climetrics}/statwhitelist.go | 2 +- .../{metrics => climetrics}/store.go | 67 ++-- .../{metrics => climetrics}/table.go | 9 +- cmd/crowdsec-cli/main.go | 4 +- cmd/crowdsec-cli/support.go | 10 +- pkg/database/ent/migrate/schema.go | 2 +- pkg/database/ent/schema/metric.go | 2 +- test/bats/08_metrics.bats | 18 +- test/bats/08_metrics_bouncer.bats | 327 +++++++++++++++++ test/bats/08_metrics_machines.bats | 101 ++++++ 27 files changed, 892 insertions(+), 103 deletions(-) rename cmd/crowdsec-cli/{metrics => climetrics}/list.go (61%) rename cmd/crowdsec-cli/{metrics => climetrics}/metrics.go (87%) rename cmd/crowdsec-cli/{metrics => climetrics}/number.go (74%) rename cmd/crowdsec-cli/{metrics => climetrics}/show.go (82%) rename cmd/crowdsec-cli/{metrics => climetrics}/statacquis.go (98%) rename cmd/crowdsec-cli/{metrics => climetrics}/statalert.go (97%) rename cmd/crowdsec-cli/{metrics => climetrics}/statappsecengine.go (98%) rename cmd/crowdsec-cli/{metrics => climetrics}/statappsecrule.go (98%) create mode 100644 cmd/crowdsec-cli/climetrics/statbouncer.go rename cmd/crowdsec-cli/{metrics => climetrics}/statbucket.go (98%) rename cmd/crowdsec-cli/{metrics => climetrics}/statdecision.go (98%) rename cmd/crowdsec-cli/{metrics => climetrics}/statlapi.go (98%) rename cmd/crowdsec-cli/{metrics => climetrics}/statlapibouncer.go (98%) rename cmd/crowdsec-cli/{metrics => climetrics}/statlapidecision.go (98%) rename cmd/crowdsec-cli/{metrics => climetrics}/statlapimachine.go (98%) rename cmd/crowdsec-cli/{metrics => climetrics}/statparser.go (98%) rename cmd/crowdsec-cli/{metrics => climetrics}/statstash.go (98%) rename cmd/crowdsec-cli/{metrics => climetrics}/statwhitelist.go (98%) rename cmd/crowdsec-cli/{metrics => climetrics}/store.go (90%) rename cmd/crowdsec-cli/{metrics => climetrics}/table.go (94%) create mode 100644 test/bats/08_metrics_bouncer.bats create mode 100644 test/bats/08_metrics_machines.bats diff --git a/cmd/crowdsec-cli/metrics/list.go b/cmd/crowdsec-cli/climetrics/list.go similarity index 61% rename from cmd/crowdsec-cli/metrics/list.go rename to cmd/crowdsec-cli/climetrics/list.go index ba68aa4b64d..074f0603555 100644 --- a/cmd/crowdsec-cli/metrics/list.go +++ b/cmd/crowdsec-cli/climetrics/list.go @@ -1,4 +1,4 @@ -package metrics +package climetrics import ( "encoding/json" @@ -6,7 +6,9 @@ import ( "github.com/fatih/color" "github.com/spf13/cobra" - "gopkg.in/yaml.v3" + + "github.com/jedib0t/go-pretty/v6/table" + "github.com/jedib0t/go-pretty/v6/text" "github.com/crowdsecurity/go-cs-lib/maptools" @@ -32,17 +34,36 @@ func (cli *cliMetrics) list() error { }) } - switch cli.cfg().Cscli.Output { + outputFormat := cli.cfg().Cscli.Output + + switch outputFormat { case "human": - t := cstable.New(color.Output, cli.cfg().Cscli.Color) - t.SetRowLines(true) - t.SetHeaders("Type", "Title", "Description") + out := color.Output + t := cstable.New(out, cli.cfg().Cscli.Color).Writer + t.AppendHeader(table.Row{"Type", "Title", "Description"}) + t.SetColumnConfigs([]table.ColumnConfig{ + { + Name: "Type", + AlignHeader: text.AlignCenter, + }, + { + Name: "Title", + AlignHeader: text.AlignCenter, + }, + { + Name: "Description", + AlignHeader: text.AlignCenter, + WidthMax: 60, + WidthMaxEnforcer: text.WrapSoft, + }, + }) + t.Style().Options.SeparateRows = true for _, metric := range allMetrics { - t.AddRow(metric.Type, metric.Title, metric.Description) + t.AppendRow(table.Row{metric.Type, metric.Title, metric.Description}) } - t.Render() + fmt.Fprintln(out, t.Render()) case "json": x, err := json.MarshalIndent(allMetrics, "", " ") if err != nil { @@ -50,13 +71,8 @@ func (cli *cliMetrics) list() error { } fmt.Println(string(x)) - case "raw": - x, err := yaml.Marshal(allMetrics) - if err != nil { - return fmt.Errorf("failed to marshal metric types: %w", err) - } - - fmt.Println(string(x)) + default: + return fmt.Errorf("output format '%s' not supported for this command", outputFormat) } return nil diff --git a/cmd/crowdsec-cli/metrics/metrics.go b/cmd/crowdsec-cli/climetrics/metrics.go similarity index 87% rename from cmd/crowdsec-cli/metrics/metrics.go rename to cmd/crowdsec-cli/climetrics/metrics.go index 52d623dc37e..f3bc4874460 100644 --- a/cmd/crowdsec-cli/metrics/metrics.go +++ b/cmd/crowdsec-cli/climetrics/metrics.go @@ -1,4 +1,4 @@ -package metrics +package climetrics import ( "github.com/spf13/cobra" @@ -12,7 +12,7 @@ type cliMetrics struct { cfg configGetter } -func NewCLI(cfg configGetter) *cliMetrics { +func New(cfg configGetter) *cliMetrics { return &cliMetrics{ cfg: cfg, } @@ -38,8 +38,8 @@ cscli metrics --url http://lapi.local:6060/metrics show acquisition parsers cscli metrics list`, Args: cobra.ExactArgs(0), DisableAutoGenTag: true, - RunE: func(_ *cobra.Command, _ []string) error { - return cli.show(nil, url, noUnit) + RunE: func(cmd *cobra.Command, _ []string) error { + return cli.show(cmd.Context(), nil, url, noUnit) }, } diff --git a/cmd/crowdsec-cli/metrics/number.go b/cmd/crowdsec-cli/climetrics/number.go similarity index 74% rename from cmd/crowdsec-cli/metrics/number.go rename to cmd/crowdsec-cli/climetrics/number.go index a60d3db2a9b..709b7cf853a 100644 --- a/cmd/crowdsec-cli/metrics/number.go +++ b/cmd/crowdsec-cli/climetrics/number.go @@ -1,8 +1,9 @@ -package metrics +package climetrics import ( "fmt" "math" + "strconv" ) type unit struct { @@ -20,11 +21,15 @@ var ranges = []unit{ {value: 1, symbol: ""}, } -func formatNumber(num int) string { - goodUnit := unit{} +func formatNumber(num int64, withUnit bool) string { + if !withUnit { + return strconv.FormatInt(num, 10) + } + + goodUnit := ranges[len(ranges)-1] for _, u := range ranges { - if int64(num) >= u.value { + if num >= u.value { goodUnit = u break } diff --git a/cmd/crowdsec-cli/metrics/show.go b/cmd/crowdsec-cli/climetrics/show.go similarity index 82% rename from cmd/crowdsec-cli/metrics/show.go rename to cmd/crowdsec-cli/climetrics/show.go index 46603034f24..7559463b66b 100644 --- a/cmd/crowdsec-cli/metrics/show.go +++ b/cmd/crowdsec-cli/climetrics/show.go @@ -1,11 +1,16 @@ -package metrics +package climetrics import ( + "context" "errors" "fmt" + log "github.com/sirupsen/logrus" + "github.com/fatih/color" "github.com/spf13/cobra" + + "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/require" ) var ( @@ -13,7 +18,7 @@ var ( ErrMetricsDisabled = errors.New("prometheus is not enabled, can't show metrics") ) -func (cli *cliMetrics) show(sections []string, url string, noUnit bool) error { +func (cli *cliMetrics) show(ctx context.Context, sections []string, url string, noUnit bool) error { cfg := cli.cfg() if url != "" { @@ -30,8 +35,13 @@ func (cli *cliMetrics) show(sections []string, url string, noUnit bool) error { ms := NewMetricStore() - if err := ms.Fetch(cfg.Cscli.PrometheusUrl); err != nil { - return err + db, err := require.DBClient(ctx, cfg.DbConfig) + if err != nil { + log.Warnf("unable to open database: %s", err) + } + + if err := ms.Fetch(ctx, cfg.Cscli.PrometheusUrl, db); err != nil { + log.Warn(err) } // any section that we don't have in the store is an error @@ -90,9 +100,9 @@ cscli metrics list; cscli metrics list -o json cscli metrics show acquisition parsers scenarios stash -o json`, // Positional args are optional DisableAutoGenTag: true, - RunE: func(_ *cobra.Command, args []string) error { + RunE: func(cmd *cobra.Command, args []string) error { args = expandAlias(args) - return cli.show(args, url, noUnit) + return cli.show(cmd.Context(), args, url, noUnit) }, } diff --git a/cmd/crowdsec-cli/metrics/statacquis.go b/cmd/crowdsec-cli/climetrics/statacquis.go similarity index 98% rename from cmd/crowdsec-cli/metrics/statacquis.go rename to cmd/crowdsec-cli/climetrics/statacquis.go index c004f64f17a..a18b8a2e2a2 100644 --- a/cmd/crowdsec-cli/metrics/statacquis.go +++ b/cmd/crowdsec-cli/climetrics/statacquis.go @@ -1,4 +1,4 @@ -package metrics +package climetrics import ( "io" diff --git a/cmd/crowdsec-cli/metrics/statalert.go b/cmd/crowdsec-cli/climetrics/statalert.go similarity index 97% rename from cmd/crowdsec-cli/metrics/statalert.go rename to cmd/crowdsec-cli/climetrics/statalert.go index c8055910a3a..65009fa322d 100644 --- a/cmd/crowdsec-cli/metrics/statalert.go +++ b/cmd/crowdsec-cli/climetrics/statalert.go @@ -1,4 +1,4 @@ -package metrics +package climetrics import ( "io" diff --git a/cmd/crowdsec-cli/metrics/statappsecengine.go b/cmd/crowdsec-cli/climetrics/statappsecengine.go similarity index 98% rename from cmd/crowdsec-cli/metrics/statappsecengine.go rename to cmd/crowdsec-cli/climetrics/statappsecengine.go index 18ad03ef03f..e4e0048daaf 100644 --- a/cmd/crowdsec-cli/metrics/statappsecengine.go +++ b/cmd/crowdsec-cli/climetrics/statappsecengine.go @@ -1,4 +1,4 @@ -package metrics +package climetrics import ( "io" diff --git a/cmd/crowdsec-cli/metrics/statappsecrule.go b/cmd/crowdsec-cli/climetrics/statappsecrule.go similarity index 98% rename from cmd/crowdsec-cli/metrics/statappsecrule.go rename to cmd/crowdsec-cli/climetrics/statappsecrule.go index 17ec28de99e..9647a111b77 100644 --- a/cmd/crowdsec-cli/metrics/statappsecrule.go +++ b/cmd/crowdsec-cli/climetrics/statappsecrule.go @@ -1,4 +1,4 @@ -package metrics +package climetrics import ( "fmt" diff --git a/cmd/crowdsec-cli/climetrics/statbouncer.go b/cmd/crowdsec-cli/climetrics/statbouncer.go new file mode 100644 index 00000000000..0105e345313 --- /dev/null +++ b/cmd/crowdsec-cli/climetrics/statbouncer.go @@ -0,0 +1,340 @@ +package climetrics + +import ( + "context" + "encoding/json" + "fmt" + "io" + "time" + + "github.com/jedib0t/go-pretty/v6/table" + "github.com/jedib0t/go-pretty/v6/text" + log "github.com/sirupsen/logrus" + + "github.com/crowdsecurity/go-cs-lib/maptools" + + "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/cstable" + "github.com/crowdsecurity/crowdsec/pkg/database" + "github.com/crowdsecurity/crowdsec/pkg/database/ent/metric" + "github.com/crowdsecurity/crowdsec/pkg/models" +) + +// un-aggregated data, de-normalized. +type bouncerMetricItem struct { + bouncerName string + ipType string + origin string + name string + unit string + value float64 +} + +type statBouncer struct { + // oldest collection timestamp for each bouncer + oldestTS map[string]*time.Time + // we keep de-normalized metrics so we can iterate + // over them multiple times and keep the aggregation code simple + rawMetrics []bouncerMetricItem + aggregated map[string]map[string]map[string]map[string]int64 + aggregatedAllOrigin map[string]map[string]map[string]int64 +} + +var knownPlurals = map[string]string{ + "byte": "bytes", + "packet": "packets", + "ip": "IPs", +} + +func (s *statBouncer) MarshalJSON() ([]byte, error) { + return json.Marshal(s.aggregated) +} + +func (s *statBouncer) Description() (string, string) { + return "Bouncer Metrics", + `Network traffic blocked by bouncers.` +} + +func warnOnce(warningsLogged map[string]bool, msg string) { + if _, ok := warningsLogged[msg]; !ok { + log.Warningf(msg) + warningsLogged[msg] = true + } +} + +func (s *statBouncer) Fetch(ctx context.Context, db *database.Client) error { + if db == nil { + return nil + } + + // query all bouncer metrics that have not been flushed + + metrics, err := db.Ent.Metric.Query(). + Where( + metric.GeneratedTypeEQ(metric.GeneratedTypeRC), + ).All(ctx) + if err != nil { + return fmt.Errorf("unable to fetch metrics: %w", err) + } + + s.oldestTS = make(map[string]*time.Time) + + // don't spam the user with the same warnings + warningsLogged := make(map[string]bool) + + for _, met := range metrics { + bouncerName := met.GeneratedBy + + collectedAt := met.CollectedAt + if s.oldestTS[bouncerName] == nil || collectedAt.Before(*s.oldestTS[bouncerName]) { + s.oldestTS[bouncerName] = &collectedAt + } + + type bouncerMetrics struct { + Metrics []models.DetailedMetrics `json:"metrics"` + } + + payload := bouncerMetrics{} + + err := json.Unmarshal([]byte(met.Payload), &payload) + if err != nil { + log.Warningf("while parsing metrics for %s: %s", bouncerName, err) + continue + } + + for _, m := range payload.Metrics { + for _, item := range m.Items { + labels := item.Labels + + // these are mandatory but we got pointers, so... + + valid := true + + if item.Name == nil { + warnOnce(warningsLogged, "missing 'name' field in metrics reported by "+bouncerName) + // no continue - keep checking the rest + valid = false + } + + if item.Unit == nil { + warnOnce(warningsLogged, "missing 'unit' field in metrics reported by "+bouncerName) + valid = false + } + + if item.Value == nil { + warnOnce(warningsLogged, "missing 'value' field in metrics reported by "+bouncerName) + valid = false + } + + if !valid { + continue + } + + name := *item.Name + unit := *item.Unit + value := *item.Value + + rawMetric := bouncerMetricItem{ + bouncerName: bouncerName, + ipType: labels["ip_type"], + origin: labels["origin"], + name: name, + unit: unit, + value: value, + } + + s.rawMetrics = append(s.rawMetrics, rawMetric) + } + } + } + + s.aggregate() + + return nil +} + +func (s *statBouncer) aggregate() { + // [bouncer][origin][name][unit]value + if s.aggregated == nil { + s.aggregated = make(map[string]map[string]map[string]map[string]int64) + } + + if s.aggregatedAllOrigin == nil { + s.aggregatedAllOrigin = make(map[string]map[string]map[string]int64) + } + + for _, raw := range s.rawMetrics { + if _, ok := s.aggregated[raw.bouncerName]; !ok { + s.aggregated[raw.bouncerName] = make(map[string]map[string]map[string]int64) + } + + if _, ok := s.aggregated[raw.bouncerName][raw.origin]; !ok { + s.aggregated[raw.bouncerName][raw.origin] = make(map[string]map[string]int64) + } + + if _, ok := s.aggregated[raw.bouncerName][raw.origin][raw.name]; !ok { + s.aggregated[raw.bouncerName][raw.origin][raw.name] = make(map[string]int64) + } + + if _, ok := s.aggregated[raw.bouncerName][raw.origin][raw.name][raw.unit]; !ok { + s.aggregated[raw.bouncerName][raw.origin][raw.name][raw.unit] = 0 + } + + s.aggregated[raw.bouncerName][raw.origin][raw.name][raw.unit] += int64(raw.value) + + if _, ok := s.aggregatedAllOrigin[raw.bouncerName]; !ok { + s.aggregatedAllOrigin[raw.bouncerName] = make(map[string]map[string]int64) + } + + if _, ok := s.aggregatedAllOrigin[raw.bouncerName][raw.name]; !ok { + s.aggregatedAllOrigin[raw.bouncerName][raw.name] = make(map[string]int64) + } + + if _, ok := s.aggregatedAllOrigin[raw.bouncerName][raw.name][raw.unit]; !ok { + s.aggregatedAllOrigin[raw.bouncerName][raw.name][raw.unit] = 0 + } + + s.aggregatedAllOrigin[raw.bouncerName][raw.name][raw.unit] += int64(raw.value) + } +} + +// bouncerTable displays a table of metrics for a single bouncer +func (s *statBouncer) bouncerTable(out io.Writer, bouncerName string, wantColor string, noUnit bool) { + columns := make(map[string]map[string]bool) + for _, item := range s.rawMetrics { + if item.bouncerName != bouncerName { + continue + } + // build a map of the metric names and units, to display dynamic columns + if _, ok := columns[item.name]; !ok { + columns[item.name] = make(map[string]bool) + } + columns[item.name][item.unit] = true + } + + // no metrics for this bouncer, skip. how did we get here ? + // anyway we can't honor the "showEmpty" flag in this case, + // we don't heven have the table headers + + if len(columns) == 0 { + return + } + + t := cstable.New(out, wantColor).Writer + header1 := table.Row{"Origin"} + header2 := table.Row{""} + colNum := 1 + + colCfg := []table.ColumnConfig{{ + Number:colNum, + AlignHeader: + text.AlignLeft, + Align: text.AlignLeft, + AlignFooter: text.AlignRight, + }} + + for _, name := range maptools.SortedKeys(columns) { + for _, unit := range maptools.SortedKeys(columns[name]) { + colNum += 1 + header1 = append(header1, name) + + // we don't add "s" to random words + if knownPlurals[unit] != "" { + unit = knownPlurals[unit] + } + + header2 = append(header2, unit) + colCfg = append(colCfg, table.ColumnConfig{ + Number: colNum, + AlignHeader: text.AlignCenter, + Align: text.AlignRight, + AlignFooter: text.AlignRight}, + ) + } + } + + t.AppendHeader(header1, table.RowConfig{AutoMerge: true}) + t.AppendHeader(header2) + + t.SetColumnConfigs(colCfg) + + numRows := 0 + + // sort all the ranges for stable output + + for _, origin := range maptools.SortedKeys(s.aggregated[bouncerName]) { + if origin == "" { + // if the metric has no origin (i.e. processed bytes/packets) + // we don't display it in the table body but it still gets aggreagted + // in the footer's totals + continue + } + + metrics := s.aggregated[bouncerName][origin] + + // some users don't know what capi is + if origin == "CAPI" { + origin += " (community blocklist)" + } + + row := table.Row{origin} + for _, name := range maptools.SortedKeys(columns) { + for _, unit := range maptools.SortedKeys(columns[name]) { + valStr := "-" + val, ok := metrics[name][unit] + if ok { + valStr = formatNumber(val, !noUnit) + } + row = append(row, valStr) + } + } + t.AppendRow(row) + + numRows += 1 + } + + totals := s.aggregatedAllOrigin[bouncerName] + + if numRows == 0 { + t.Style().Options.SeparateFooter = false + } + + footer := table.Row{"Total"} + for _, name := range maptools.SortedKeys(columns) { + for _, unit := range maptools.SortedKeys(columns[name]) { + footer = append(footer, formatNumber(totals[name][unit], !noUnit)) + } + } + + t.AppendFooter(footer) + + title, _ := s.Description() + title = fmt.Sprintf("%s (%s)", title, bouncerName) + if s.oldestTS != nil { + // if we change this to .Local() beware of tests + title = fmt.Sprintf("%s since %s", title, s.oldestTS[bouncerName].String()) + } + title += ":" + + // don't use SetTitle() because it draws the title inside table box + // TODO: newline position wrt other stat tables + cstable.RenderTitle(out, title) + fmt.Fprintln(out, t.Render()) +} + +// Table displays a table of metrics for each bouncer +func (s *statBouncer) Table(out io.Writer, wantColor string, noUnit bool, _ bool) { + bouncerNames := make(map[string]bool) + for _, item := range s.rawMetrics { + bouncerNames[item.bouncerName] = true + } + + nl := false + for _, bouncerName := range maptools.SortedKeys(bouncerNames) { + if nl { + // empty line between tables + fmt.Fprintln(out) + } + s.bouncerTable(out, bouncerName, wantColor, noUnit) + nl = true + } +} diff --git a/cmd/crowdsec-cli/metrics/statbucket.go b/cmd/crowdsec-cli/climetrics/statbucket.go similarity index 98% rename from cmd/crowdsec-cli/metrics/statbucket.go rename to cmd/crowdsec-cli/climetrics/statbucket.go index 62ca4dee71d..836fa0ed1ab 100644 --- a/cmd/crowdsec-cli/metrics/statbucket.go +++ b/cmd/crowdsec-cli/climetrics/statbucket.go @@ -1,4 +1,4 @@ -package metrics +package climetrics import ( "io" diff --git a/cmd/crowdsec-cli/metrics/statdecision.go b/cmd/crowdsec-cli/climetrics/statdecision.go similarity index 98% rename from cmd/crowdsec-cli/metrics/statdecision.go rename to cmd/crowdsec-cli/climetrics/statdecision.go index b1474d95f76..485644a55ba 100644 --- a/cmd/crowdsec-cli/metrics/statdecision.go +++ b/cmd/crowdsec-cli/climetrics/statdecision.go @@ -1,4 +1,4 @@ -package metrics +package climetrics import ( "io" diff --git a/cmd/crowdsec-cli/metrics/statlapi.go b/cmd/crowdsec-cli/climetrics/statlapi.go similarity index 98% rename from cmd/crowdsec-cli/metrics/statlapi.go rename to cmd/crowdsec-cli/climetrics/statlapi.go index f8a737e5c44..7d8831aad74 100644 --- a/cmd/crowdsec-cli/metrics/statlapi.go +++ b/cmd/crowdsec-cli/climetrics/statlapi.go @@ -1,4 +1,4 @@ -package metrics +package climetrics import ( "io" diff --git a/cmd/crowdsec-cli/metrics/statlapibouncer.go b/cmd/crowdsec-cli/climetrics/statlapibouncer.go similarity index 98% rename from cmd/crowdsec-cli/metrics/statlapibouncer.go rename to cmd/crowdsec-cli/climetrics/statlapibouncer.go index e7483c6294d..3ee35adfe9a 100644 --- a/cmd/crowdsec-cli/metrics/statlapibouncer.go +++ b/cmd/crowdsec-cli/climetrics/statlapibouncer.go @@ -1,4 +1,4 @@ -package metrics +package climetrics import ( "io" diff --git a/cmd/crowdsec-cli/metrics/statlapidecision.go b/cmd/crowdsec-cli/climetrics/statlapidecision.go similarity index 98% rename from cmd/crowdsec-cli/metrics/statlapidecision.go rename to cmd/crowdsec-cli/climetrics/statlapidecision.go index 97e17fe8a49..5f4d2c07764 100644 --- a/cmd/crowdsec-cli/metrics/statlapidecision.go +++ b/cmd/crowdsec-cli/climetrics/statlapidecision.go @@ -1,4 +1,4 @@ -package metrics +package climetrics import ( "io" diff --git a/cmd/crowdsec-cli/metrics/statlapimachine.go b/cmd/crowdsec-cli/climetrics/statlapimachine.go similarity index 98% rename from cmd/crowdsec-cli/metrics/statlapimachine.go rename to cmd/crowdsec-cli/climetrics/statlapimachine.go index 6b9d9da207e..2f81ccb5751 100644 --- a/cmd/crowdsec-cli/metrics/statlapimachine.go +++ b/cmd/crowdsec-cli/climetrics/statlapimachine.go @@ -1,4 +1,4 @@ -package metrics +package climetrics import ( "io" diff --git a/cmd/crowdsec-cli/metrics/statparser.go b/cmd/crowdsec-cli/climetrics/statparser.go similarity index 98% rename from cmd/crowdsec-cli/metrics/statparser.go rename to cmd/crowdsec-cli/climetrics/statparser.go index d8d651f269f..58ce2248648 100644 --- a/cmd/crowdsec-cli/metrics/statparser.go +++ b/cmd/crowdsec-cli/climetrics/statparser.go @@ -1,4 +1,4 @@ -package metrics +package climetrics import ( "io" diff --git a/cmd/crowdsec-cli/metrics/statstash.go b/cmd/crowdsec-cli/climetrics/statstash.go similarity index 98% rename from cmd/crowdsec-cli/metrics/statstash.go rename to cmd/crowdsec-cli/climetrics/statstash.go index 79c14b04fd6..9de3469bea1 100644 --- a/cmd/crowdsec-cli/metrics/statstash.go +++ b/cmd/crowdsec-cli/climetrics/statstash.go @@ -1,4 +1,4 @@ -package metrics +package climetrics import ( "io" diff --git a/cmd/crowdsec-cli/metrics/statwhitelist.go b/cmd/crowdsec-cli/climetrics/statwhitelist.go similarity index 98% rename from cmd/crowdsec-cli/metrics/statwhitelist.go rename to cmd/crowdsec-cli/climetrics/statwhitelist.go index 89a016d22b0..6848452458b 100644 --- a/cmd/crowdsec-cli/metrics/statwhitelist.go +++ b/cmd/crowdsec-cli/climetrics/statwhitelist.go @@ -1,4 +1,4 @@ -package metrics +package climetrics import ( "io" diff --git a/cmd/crowdsec-cli/metrics/store.go b/cmd/crowdsec-cli/climetrics/store.go similarity index 90% rename from cmd/crowdsec-cli/metrics/store.go rename to cmd/crowdsec-cli/climetrics/store.go index 48926488c07..5de50558e89 100644 --- a/cmd/crowdsec-cli/metrics/store.go +++ b/cmd/crowdsec-cli/climetrics/store.go @@ -1,6 +1,7 @@ -package metrics +package climetrics import ( + "context" "encoding/json" "fmt" "io" @@ -12,10 +13,11 @@ import ( dto "github.com/prometheus/client_model/go" "github.com/prometheus/prom2json" log "github.com/sirupsen/logrus" - "gopkg.in/yaml.v3" "github.com/crowdsecurity/go-cs-lib/maptools" "github.com/crowdsecurity/go-cs-lib/trace" + + "github.com/crowdsecurity/crowdsec/pkg/database" ) type metricSection interface { @@ -28,22 +30,31 @@ type metricStore map[string]metricSection func NewMetricStore() metricStore { return metricStore{ "acquisition": statAcquis{}, - "scenarios": statBucket{}, - "parsers": statParser{}, + "alerts": statAlert{}, + "bouncers": &statBouncer{}, + "appsec-engine": statAppsecEngine{}, + "appsec-rule": statAppsecRule{}, + "decisions": statDecision{}, "lapi": statLapi{}, - "lapi-machine": statLapiMachine{}, "lapi-bouncer": statLapiBouncer{}, "lapi-decisions": statLapiDecision{}, - "decisions": statDecision{}, - "alerts": statAlert{}, + "lapi-machine": statLapiMachine{}, + "parsers": statParser{}, + "scenarios": statBucket{}, "stash": statStash{}, - "appsec-engine": statAppsecEngine{}, - "appsec-rule": statAppsecRule{}, "whitelists": statWhitelist{}, } } -func (ms metricStore) Fetch(url string) error { +func (ms metricStore) Fetch(ctx context.Context, url string, db *database.Client) error { + if err := ms["bouncers"].(*statBouncer).Fetch(ctx, db); err != nil { + return err + } + + return ms.fetchPrometheusMetrics(url) +} + +func (ms metricStore) fetchPrometheusMetrics(url string) error { mfChan := make(chan *dto.MetricFamily, 1024) errChan := make(chan error, 1) @@ -59,7 +70,7 @@ func (ms metricStore) Fetch(url string) error { err := prom2json.FetchMetricFamilies(url, mfChan, transport) if err != nil { - errChan <- fmt.Errorf("failed to fetch metrics: %w", err) + errChan <- fmt.Errorf("while fetching metrics: %w", err) return } errChan <- nil @@ -75,19 +86,23 @@ func (ms metricStore) Fetch(url string) error { } log.Debugf("Finished reading metrics output, %d entries", len(result)) - /*walk*/ + ms.processPrometheusMetrics(result) + return nil +} + +func (ms metricStore) processPrometheusMetrics(result []*prom2json.Family) { mAcquis := ms["acquisition"].(statAcquis) - mParser := ms["parsers"].(statParser) - mBucket := ms["scenarios"].(statBucket) + mAlert := ms["alerts"].(statAlert) + mAppsecEngine := ms["appsec-engine"].(statAppsecEngine) + mAppsecRule := ms["appsec-rule"].(statAppsecRule) + mDecision := ms["decisions"].(statDecision) mLapi := ms["lapi"].(statLapi) - mLapiMachine := ms["lapi-machine"].(statLapiMachine) mLapiBouncer := ms["lapi-bouncer"].(statLapiBouncer) mLapiDecision := ms["lapi-decisions"].(statLapiDecision) - mDecision := ms["decisions"].(statDecision) - mAppsecEngine := ms["appsec-engine"].(statAppsecEngine) - mAppsecRule := ms["appsec-rule"].(statAppsecRule) - mAlert := ms["alerts"].(statAlert) + mLapiMachine := ms["lapi-machine"].(statLapiMachine) + mParser := ms["parsers"].(statParser) + mBucket := ms["scenarios"].(statBucket) mStash := ms["stash"].(statStash) mWhitelist := ms["whitelists"].(statWhitelist) @@ -219,11 +234,9 @@ func (ms metricStore) Fetch(url string) error { } } } - - return nil } -func (ms metricStore) Format(out io.Writer, wantColor string, sections []string, formatType string, noUnit bool) error { +func (ms metricStore) Format(out io.Writer, wantColor string, sections []string, outputFormat string, noUnit bool) error { // copy only the sections we want want := map[string]metricSection{} @@ -239,7 +252,7 @@ func (ms metricStore) Format(out io.Writer, wantColor string, sections []string, want[section] = ms[section] } - switch formatType { + switch outputFormat { case "human": for _, section := range maptools.SortedKeys(want) { want[section].Table(out, wantColor, noUnit, showEmpty) @@ -250,14 +263,8 @@ func (ms metricStore) Format(out io.Writer, wantColor string, sections []string, return fmt.Errorf("failed to marshal metrics: %w", err) } out.Write(x) - case "raw": - x, err := yaml.Marshal(want) - if err != nil { - return fmt.Errorf("failed to marshal metrics: %w", err) - } - out.Write(x) default: - return fmt.Errorf("unknown format type %s", formatType) + return fmt.Errorf("output format '%s' not supported for this command", outputFormat) } return nil diff --git a/cmd/crowdsec-cli/metrics/table.go b/cmd/crowdsec-cli/climetrics/table.go similarity index 94% rename from cmd/crowdsec-cli/metrics/table.go rename to cmd/crowdsec-cli/climetrics/table.go index f51e905ba71..c446f2267e9 100644 --- a/cmd/crowdsec-cli/metrics/table.go +++ b/cmd/crowdsec-cli/climetrics/table.go @@ -1,4 +1,4 @@ -package metrics +package climetrics import ( "errors" @@ -110,12 +110,7 @@ func metricsToTable(t *cstable.Table, stats map[string]map[string]int, keys []st for _, sl := range keys { if v, ok := astats[sl]; ok && v != 0 { - numberToShow := strconv.Itoa(v) - if !noUnit { - numberToShow = formatNumber(v) - } - - row = append(row, numberToShow) + row = append(row, formatNumber(int64(v), !noUnit)) } else { row = append(row, "-") } diff --git a/cmd/crowdsec-cli/main.go b/cmd/crowdsec-cli/main.go index bd9d8988132..d4046414030 100644 --- a/cmd/crowdsec-cli/main.go +++ b/cmd/crowdsec-cli/main.go @@ -14,7 +14,7 @@ import ( "github.com/crowdsecurity/go-cs-lib/trace" - "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/metrics" + "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/climetrics" "github.com/crowdsecurity/crowdsec/pkg/csconfig" "github.com/crowdsecurity/crowdsec/pkg/fflag" @@ -252,7 +252,7 @@ It is meant to allow you to manage bans, parsers/scenarios/etc, api and generall cmd.AddCommand(NewCLIVersion().NewCommand()) cmd.AddCommand(NewCLIConfig(cli.cfg).NewCommand()) cmd.AddCommand(NewCLIHub(cli.cfg).NewCommand()) - cmd.AddCommand(metrics.NewCLI(cli.cfg).NewCommand()) + cmd.AddCommand(climetrics.New(cli.cfg).NewCommand()) cmd.AddCommand(NewCLIDashboard(cli.cfg).NewCommand()) cmd.AddCommand(NewCLIDecisions(cli.cfg).NewCommand()) cmd.AddCommand(NewCLIAlerts(cli.cfg).NewCommand()) diff --git a/cmd/crowdsec-cli/support.go b/cmd/crowdsec-cli/support.go index 1b33ef38ada..324be2710fd 100644 --- a/cmd/crowdsec-cli/support.go +++ b/cmd/crowdsec-cli/support.go @@ -22,7 +22,7 @@ import ( "github.com/crowdsecurity/go-cs-lib/trace" - "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/metrics" + "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/climetrics" "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/require" "github.com/crowdsecurity/crowdsec/pkg/csconfig" "github.com/crowdsecurity/crowdsec/pkg/cwhub" @@ -78,7 +78,7 @@ func stripAnsiString(str string) string { return reStripAnsi.ReplaceAllString(str, "") } -func (cli *cliSupport) dumpMetrics(ctx context.Context, zw *zip.Writer) error { +func (cli *cliSupport) dumpMetrics(ctx context.Context, db *database.Client, zw *zip.Writer) error { log.Info("Collecting prometheus metrics") cfg := cli.cfg() @@ -89,9 +89,9 @@ func (cli *cliSupport) dumpMetrics(ctx context.Context, zw *zip.Writer) error { humanMetrics := new(bytes.Buffer) - ms := metrics.NewMetricStore() + ms := climetrics.NewMetricStore() - if err := ms.Fetch(cfg.Cscli.PrometheusUrl); err != nil { + if err := ms.Fetch(ctx, cfg.Cscli.PrometheusUrl, db); err != nil { return err } @@ -493,7 +493,7 @@ func (cli *cliSupport) dump(ctx context.Context, outFile string) error { skipCAPI = true } - if err = cli.dumpMetrics(ctx, zipWriter); err != nil { + if err = cli.dumpMetrics(ctx, db, zipWriter); err != nil { log.Warn(err) } diff --git a/pkg/database/ent/migrate/schema.go b/pkg/database/ent/migrate/schema.go index 5b436830192..c1ce25bddef 100644 --- a/pkg/database/ent/migrate/schema.go +++ b/pkg/database/ent/migrate/schema.go @@ -255,7 +255,7 @@ var ( {Name: "generated_by", Type: field.TypeString}, {Name: "collected_at", Type: field.TypeTime}, {Name: "pushed_at", Type: field.TypeTime, Nullable: true}, - {Name: "payload", Type: field.TypeString}, + {Name: "payload", Type: field.TypeString, Size: 2147483647}, } // MetricsTable holds the schema information for the "metrics" table. MetricsTable = &schema.Table{ diff --git a/pkg/database/ent/schema/metric.go b/pkg/database/ent/schema/metric.go index 9de3f21f464..b47da78bdf3 100644 --- a/pkg/database/ent/schema/metric.go +++ b/pkg/database/ent/schema/metric.go @@ -28,7 +28,7 @@ func (Metric) Fields() []ent.Field { Nillable(). Optional(). Comment("When the metrics are sent to the console"), - field.String("payload"). + field.Text("payload"). Immutable(). Comment("The actual metrics (item0)"), } diff --git a/test/bats/08_metrics.bats b/test/bats/08_metrics.bats index 8bf30812cff..e260e667524 100644 --- a/test/bats/08_metrics.bats +++ b/test/bats/08_metrics.bats @@ -23,9 +23,9 @@ teardown() { #---------- @test "cscli metrics (crowdsec not running)" { - rune -1 cscli metrics - # crowdsec is down - assert_stderr --partial 'failed to fetch metrics: executing GET request for URL \"http://127.0.0.1:6060/metrics\" failed: Get \"http://127.0.0.1:6060/metrics\": dial tcp 127.0.0.1:6060: connect: connection refused' + rune -0 cscli metrics + # crowdsec is down, we won't get an error because some metrics come from the db instead + assert_stderr --partial 'while fetching metrics: executing GET request for URL \"http://127.0.0.1:6060/metrics\" failed: Get \"http://127.0.0.1:6060/metrics\": dial tcp 127.0.0.1:6060: connect: connection refused' } @test "cscli metrics (bad configuration)" { @@ -72,10 +72,6 @@ teardown() { rune -0 jq 'keys' <(output) assert_output --partial '"alerts",' assert_output --partial '"parsers",' - - rune -0 cscli metrics -o raw - assert_output --partial 'alerts: {}' - assert_output --partial 'parsers: {}' } @test "cscli metrics list" { @@ -85,10 +81,6 @@ teardown() { rune -0 cscli metrics list -o json rune -0 jq -c '.[] | [.type,.title]' <(output) assert_line '["acquisition","Acquisition Metrics"]' - - rune -0 cscli metrics list -o raw - assert_line "- type: acquisition" - assert_line " title: Acquisition Metrics" } @test "cscli metrics show" { @@ -108,8 +100,4 @@ teardown() { rune -0 cscli metrics show lapi -o json rune -0 jq -c '.lapi."/v1/watchers/login" | keys' <(output) assert_json '["POST"]' - - rune -0 cscli metrics show lapi -o raw - assert_line 'lapi:' - assert_line ' /v1/watchers/login:' } diff --git a/test/bats/08_metrics_bouncer.bats b/test/bats/08_metrics_bouncer.bats new file mode 100644 index 00000000000..778452644dd --- /dev/null +++ b/test/bats/08_metrics_bouncer.bats @@ -0,0 +1,327 @@ +#!/usr/bin/env bats +# vim: ft=bats:list:ts=8:sts=4:sw=4:et:ai:si: + +set -u + +setup_file() { + load "../lib/setup_file.sh" +} + +teardown_file() { + load "../lib/teardown_file.sh" +} + +setup() { + load "../lib/setup.sh" + ./instance-data load + ./instance-crowdsec start + skip "require the usage_metrics endpoint on apiserver" +} + +teardown() { + ./instance-crowdsec stop +} + +#---------- + +@test "cscli metrics show bouncers" { + # there are no bouncers, so no metrics yet + rune -0 cscli metrics show bouncers + refute_output +} + +@test "rc usage metrics (empty payload)" { + # a registered bouncer can send metrics for the lapi and console + API_KEY=$(cscli bouncers add testbouncer -o raw) + export API_KEY + + payload=$(yq -o j <<-EOT + remediation_components: [] + log_processors: [] + EOT + ) + + rune -22 curl-with-key '/v1/usage-metrics' -X POST --data "$payload" + assert_stderr --partial 'error: 400' + assert_json '{message: "Missing remediation component data"}' +} + +@test "rc usage metrics (bad payload)" { + API_KEY=$(cscli bouncers add testbouncer -o raw) + export API_KEY + + payload=$(yq -o j <<-EOT + remediation_components: + - version: "v1.0" + log_processors: [] + EOT + ) + + rune -22 curl-with-key '/v1/usage-metrics' -X POST --data "$payload" + assert_stderr --partial "error: 422" + rune -0 jq -r '.message' <(output) + assert_output - <<-EOT + validation failure list: + remediation_components.0.utc_startup_timestamp in body is required + EOT + + # validation, like timestamp format + + payload=$(yq -o j '.remediation_components[0].utc_startup_timestamp = "2021-09-01T00:00:00Z"' <<<"$payload") + rune -22 curl-with-key '/v1/usage-metrics' -X POST --data "$payload" + assert_stderr --partial "error: 400" + assert_json '{message: "json: cannot unmarshal string into Go struct field AllMetrics.remediation_components of type int64"}' + + payload=$(yq -o j '.remediation_components[0].utc_startup_timestamp = 1707399316' <<<"$payload") + rune -0 curl-with-key '/v1/usage-metrics' -X POST --data "$payload" + refute_output +} + +@test "rc usage metrics (good payload)" { + API_KEY=$(cscli bouncers add testbouncer -o raw) + export API_KEY + + payload=$(yq -o j <<-EOT + remediation_components: + - version: "v1.0" + utc_startup_timestamp: 1707399316 + log_processors: [] + EOT + ) + + # bouncers have feature flags too + + payload=$(yq -o j ' + .remediation_components[0].feature_flags = ["huey", "dewey", "louie"] | + .remediation_components[0].os = {"name": "Multics", "version": "MR12.5"} + ' <<<"$payload") + rune -0 curl-with-key '/v1/usage-metrics' -X POST --data "$payload" + rune -0 cscli bouncer inspect testbouncer -o json + rune -0 yq -o j '[.os,.featureflags]' <(output) + assert_json '["Multics/MR12.5",["huey","dewey","louie"]]' + + payload=$(yq -o j ' + .remediation_components[0].metrics = [ + { + "meta": {"utc_now_timestamp": 1707399316, "window_size_seconds":600}, + "items":[ + {"name": "foo", "unit": "pound", "value": 3.1415926}, + {"name": "foo", "unit": "pound", "value": 2.7182818}, + {"name": "foo", "unit": "dogyear", "value": 2.7182818} + ] + } + ] + ' <<<"$payload") + rune -0 curl-with-key '/v1/usage-metrics' -X POST --data "$payload" + rune -0 cscli metrics show bouncers -o json + # aggregation is ok -- we are truncating, not rounding, because the float is mandated by swagger. + # but without labels the origin string is empty + assert_json '{bouncers:{testbouncer:{"": {"foo": {"dogyear": 2, "pound": 5}}}}}' + + rune -0 cscli metrics show bouncers + assert_output - <<-EOT + Bouncer Metrics (testbouncer) since 2024-02-08 13:35:16 +0000 UTC: + +--------+-----------------+ + | Origin | foo | + | | dogyear | pound | + +--------+---------+-------+ + | Total | 2 | 5 | + +--------+---------+-------+ + EOT + + # some more realistic values, at least for the labels + # we don't use the same now_timestamp or the payload will be silently discarded + + payload=$(yq -o j ' + .remediation_components[0].metrics = [ + { + "meta": {"utc_now_timestamp": 1707399916, "window_size_seconds":600}, + "items":[ + {"name": "active_decisions", "unit": "ip", "value": 51936, "labels": {"ip_type": "ipv4", "origin": "lists:firehol_voipbl"}}, + {"name": "active_decisions", "unit": "ip", "value": 1, "labels": {"ip_type": "ipv6", "origin": "cscli"}}, + {"name": "dropped", "unit": "byte", "value": 3800, "labels": {"ip_type": "ipv4", "origin": "CAPI"}}, + {"name": "dropped", "unit": "byte", "value": 0, "labels": {"ip_type": "ipv4", "origin": "cscli"}}, + {"name": "dropped", "unit": "byte", "value": 1034, "labels": {"ip_type": "ipv4", "origin": "lists:firehol_cruzit_web_attacks"}}, + {"name": "dropped", "unit": "byte", "value": 3847, "labels": {"ip_type": "ipv4", "origin": "lists:firehol_voipbl"}}, + {"name": "dropped", "unit": "byte", "value": 380, "labels": {"ip_type": "ipv6", "origin": "cscli"}}, + {"name": "dropped", "unit": "packet", "value": 100, "labels": {"ip_type": "ipv4", "origin": "CAPI"}}, + {"name": "dropped", "unit": "packet", "value": 10, "labels": {"ip_type": "ipv4", "origin": "cscli"}}, + {"name": "dropped", "unit": "packet", "value": 23, "labels": {"ip_type": "ipv4", "origin": "lists:firehol_cruzit_web_attacks"}}, + {"name": "dropped", "unit": "packet", "value": 58, "labels": {"ip_type": "ipv4", "origin": "lists:firehol_voipbl"}}, + {"name": "dropped", "unit": "packet", "value": 0, "labels": {"ip_type": "ipv4", "origin": "lists:anotherlist"}}, + {"name": "dropped", "unit": "byte", "value": 0, "labels": {"ip_type": "ipv4", "origin": "lists:anotherlist"}}, + {"name": "dropped", "unit": "packet", "value": 0, "labels": {"ip_type": "ipv6", "origin": "cscli"}} + ] + } + ] | + .remediation_components[0].type = "crowdsec-firewall-bouncer" + ' <<<"$payload") + + rune -0 curl-with-key '/v1/usage-metrics' -X POST --data "$payload" + rune -0 cscli metrics show bouncers -o json + assert_json '{ + "bouncers": { + "testbouncer": { + "": { + "foo": { + "dogyear": 2, + "pound": 5 + } + }, + "CAPI": { + "dropped": { + "byte": 3800, + "packet": 100 + } + }, + "cscli": { + "active_decisions": { + "ip": 1 + }, + "dropped": { + "byte": 380, + "packet": 10 + } + }, + "lists:firehol_cruzit_web_attacks": { + "dropped": { + "byte": 1034, + "packet": 23 + } + }, + "lists:firehol_voipbl": { + "active_decisions": { + "ip": 51936 + }, + "dropped": { + "byte": 3847, + "packet": 58 + }, + }, + "lists:anotherlist": { + "dropped": { + "byte": 0, + "packet": 0 + } + } + } + } + }' + + rune -0 cscli metrics show bouncers + assert_output - <<-EOT + Bouncer Metrics (testbouncer) since 2024-02-08 13:35:16 +0000 UTC: + +----------------------------------+------------------+-------------------+-----------------+ + | Origin | active_decisions | dropped | foo | + | | IPs | bytes | packets | dogyear | pound | + +----------------------------------+------------------+---------+---------+---------+-------+ + | CAPI (community blocklist) | - | 3.80k | 100 | - | - | + | cscli | 1 | 380 | 10 | - | - | + | lists:anotherlist | - | 0 | 0 | - | - | + | lists:firehol_cruzit_web_attacks | - | 1.03k | 23 | - | - | + | lists:firehol_voipbl | 51.94k | 3.85k | 58 | - | - | + +----------------------------------+------------------+---------+---------+---------+-------+ + | Total | 51.94k | 9.06k | 191 | 2 | 5 | + +----------------------------------+------------------+---------+---------+---------+-------+ + EOT + + # TODO: multiple item lists + +} + +@test "rc usage metrics (multiple bouncers)" { + # multiple bouncers have separate totals and can have different types of metrics and units -> different columns + + API_KEY=$(cscli bouncers add bouncer1 -o raw) + export API_KEY + + payload=$(yq -o j <<-EOT + remediation_components: + - version: "v1.0" + utc_startup_timestamp: 1707369316 + metrics: + - meta: + utc_now_timestamp: 1707399316 + window_size_seconds: 600 + items: + - name: dropped + unit: byte + value: 1000 + labels: + origin: CAPI + - name: dropped + unit: byte + value: 800 + labels: + origin: lists:somelist + - name: processed + unit: byte + value: 12340 + - name: processed + unit: packet + value: 100 + EOT + ) + + rune -0 curl-with-key '/v1/usage-metrics' -X POST --data "$payload" + + API_KEY=$(cscli bouncers add bouncer2 -o raw) + export API_KEY + + payload=$(yq -o j <<-EOT + remediation_components: + - version: "v1.0" + utc_startup_timestamp: 1707379316 + metrics: + - meta: + utc_now_timestamp: 1707389316 + window_size_seconds: 600 + items: + - name: dropped + unit: byte + value: 1500 + labels: + origin: lists:somelist + - name: dropped + unit: byte + value: 2000 + labels: + origin: CAPI + - name: dropped + unit: packet + value: 20 + labels: + origin: lists:somelist + EOT + ) + + rune -0 curl-with-key '/v1/usage-metrics' -X POST --data "$payload" + + rune -0 cscli metrics show bouncers -o json + assert_json '{bouncers:{bouncer1:{"":{processed:{byte:12340,packet:100}},CAPI:{dropped:{byte:1000}},"lists:somelist":{dropped:{byte:800}}},bouncer2:{"lists:somelist":{dropped:{byte:1500,packet:20}},CAPI:{dropped:{byte:2000}}}}}' + + rune -0 cscli metrics show bouncers + assert_output - <<-EOT + Bouncer Metrics (bouncer1) since 2024-02-08 13:35:16 +0000 UTC: + +----------------------------+---------+-----------------------+ + | Origin | dropped | processed | + | | bytes | bytes | packets | + +----------------------------+---------+-----------+-----------+ + | CAPI (community blocklist) | 1.00k | - | - | + | lists:somelist | 800 | - | - | + +----------------------------+---------+-----------+-----------+ + | Total | 1.80k | 12.34k | 100 | + +----------------------------+---------+-----------+-----------+ + + Bouncer Metrics (bouncer2) since 2024-02-08 10:48:36 +0000 UTC: + +----------------------------+-------------------+ + | Origin | dropped | + | | bytes | packets | + +----------------------------+---------+---------+ + | CAPI (community blocklist) | 2.00k | - | + | lists:somelist | 1.50k | 20 | + +----------------------------+---------+---------+ + | Total | 3.50k | 20 | + +----------------------------+---------+---------+ + EOT +} diff --git a/test/bats/08_metrics_machines.bats b/test/bats/08_metrics_machines.bats new file mode 100644 index 00000000000..e63078124a9 --- /dev/null +++ b/test/bats/08_metrics_machines.bats @@ -0,0 +1,101 @@ +#!/usr/bin/env bats +# vim: ft=bats:list:ts=8:sts=4:sw=4:et:ai:si: + +set -u + +setup_file() { + load "../lib/setup_file.sh" +} + +teardown_file() { + load "../lib/teardown_file.sh" +} + +setup() { + load "../lib/setup.sh" + ./instance-data load + ./instance-crowdsec start + skip "require the usage_metrics endpoint on apiserver" +} + +teardown() { + ./instance-crowdsec stop +} + +#---------- + +@test "lp usage metrics (empty payload)" { + # a registered log processor can send metrics for the lapi and console + TOKEN=$(lp-get-token) + export TOKEN + + payload=$(yq -o j <<-EOT + remediation_components: [] + log_processors: [] + EOT + ) + + rune -22 curl-with-token '/v1/usage-metrics' -X POST --data "$payload" + assert_stderr --partial 'error: 400' + assert_json '{message: "Missing log processor data"}' +} + +@test "lp usage metrics (bad payload)" { + TOKEN=$(lp-get-token) + export TOKEN + + payload=$(yq -o j <<-EOT + remediation_components: [] + log_processors: + - version: "v1.0" + EOT + ) + + rune -22 curl-with-token '/v1/usage-metrics' -X POST --data "$payload" + assert_stderr --partial "error: 422" + rune -0 jq -r '.message' <(output) + assert_output - <<-EOT + validation failure list: + log_processors.0.utc_startup_timestamp in body is required + log_processors.0.datasources in body is required + log_processors.0.hub_items in body is required + EOT +} + +@test "lp usage metrics (full payload)" { + TOKEN=$(lp-get-token) + export TOKEN + + # base payload without any measurement + + payload=$(yq -o j <<-EOT + remediation_components: [] + log_processors: + - version: "v1.0" + utc_startup_timestamp: 1707399316 + hub_items: {} + feature_flags: + - marshmallows + os: + name: CentOS + version: "8" + metrics: + - name: logs_parsed + value: 5000 + unit: count + labels: {} + items: [] + meta: + window_size_seconds: 600 + utc_now_timestamp: 1707485349 + console_options: + - share_context + datasources: + syslog: 1 + file: 4 + EOT + ) + + rune -0 curl-with-token '/v1/usage-metrics' -X POST --data "$payload" + refute_output +} From 91463836ff4fcd19ba8525fe1c6aa992a9180057 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Mon, 15 Jul 2024 10:51:45 +0200 Subject: [PATCH 219/581] tests: increase delta for flaky float comparison (#3122) * tests: increase delta for flaky float comparison * remove leading 0 from non-octal number --- pkg/exprhelpers/exprlib_test.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/pkg/exprhelpers/exprlib_test.go b/pkg/exprhelpers/exprlib_test.go index 33025d4a992..b9ba1d01191 100644 --- a/pkg/exprhelpers/exprlib_test.go +++ b/pkg/exprhelpers/exprlib_test.go @@ -1361,7 +1361,7 @@ func TestGetActiveDecisionsTimeLeft(t *testing.T) { }, } - delta := 0.0001 + delta := 0.001 for _, test := range tests { program, err := expr.Compile(test.code, GetExprOptions(test.env)...) @@ -1392,12 +1392,12 @@ func TestParseUnixTime(t *testing.T) { { name: "ParseUnix() test: valid value with milli", value: "1672239773.3590894", - expected: time.Date(2022, 12, 28, 15, 02, 53, 0, time.UTC), + expected: time.Date(2022, 12, 28, 15, 2, 53, 0, time.UTC), }, { name: "ParseUnix() test: valid value without milli", value: "1672239773", - expected: time.Date(2022, 12, 28, 15, 02, 53, 0, time.UTC), + expected: time.Date(2022, 12, 28, 15, 2, 53, 0, time.UTC), }, { name: "ParseUnix() test: invalid input", From 5390b8ea4b121bc8949b885d1582bce9306a0c94 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Mon, 15 Jul 2024 11:05:40 +0200 Subject: [PATCH 220/581] remove warning "maxopenconns is 0, default to 100" (#3129) * remove warning "maxopenconns is 0, default to 100" also don't store as pointer since value 0 is not useful * lint --- pkg/csconfig/api_test.go | 2 +- pkg/csconfig/config.go | 2 +- pkg/csconfig/database.go | 7 ++++--- pkg/csconfig/database_test.go | 11 ++++++----- pkg/database/database.go | 19 ++++++++----------- 5 files changed, 20 insertions(+), 21 deletions(-) diff --git a/pkg/csconfig/api_test.go b/pkg/csconfig/api_test.go index 079936253a1..51a4c5ad602 100644 --- a/pkg/csconfig/api_test.go +++ b/pkg/csconfig/api_test.go @@ -191,7 +191,7 @@ func TestLoadAPIServer(t *testing.T) { DbConfig: &DatabaseCfg{ DbPath: "./testdata/test.db", Type: "sqlite", - MaxOpenConns: ptr.Of(DEFAULT_MAX_OPEN_CONNS), + MaxOpenConns: DEFAULT_MAX_OPEN_CONNS, UseWal: ptr.Of(true), // autodetected DecisionBulkSize: defaultDecisionBulkSize, }, diff --git a/pkg/csconfig/config.go b/pkg/csconfig/config.go index ed71af4029a..3bbdf607187 100644 --- a/pkg/csconfig/config.go +++ b/pkg/csconfig/config.go @@ -158,7 +158,7 @@ func NewDefaultConfig() *Config { dbConfig := DatabaseCfg{ Type: "sqlite", DbPath: DefaultDataPath("crowdsec.db"), - MaxOpenConns: ptr.Of(DEFAULT_MAX_OPEN_CONNS), + MaxOpenConns: DEFAULT_MAX_OPEN_CONNS, } globalCfg := Config{ diff --git a/pkg/csconfig/database.go b/pkg/csconfig/database.go index d30cc20a938..a24eb9e13c3 100644 --- a/pkg/csconfig/database.go +++ b/pkg/csconfig/database.go @@ -33,7 +33,7 @@ type DatabaseCfg struct { Type string `yaml:"type"` Flush *FlushDBCfg `yaml:"flush"` LogLevel *log.Level `yaml:"log_level"` - MaxOpenConns *int `yaml:"max_open_conns,omitempty"` + MaxOpenConns int `yaml:"max_open_conns,omitempty"` UseWal *bool `yaml:"use_wal,omitempty"` DecisionBulkSize int `yaml:"decision_bulk_size,omitempty"` } @@ -68,8 +68,8 @@ func (c *Config) LoadDBConfig(inCli bool) error { c.API.Server.DbConfig = c.DbConfig } - if c.DbConfig.MaxOpenConns == nil { - c.DbConfig.MaxOpenConns = ptr.Of(DEFAULT_MAX_OPEN_CONNS) + if c.DbConfig.MaxOpenConns == 0 { + c.DbConfig.MaxOpenConns = DEFAULT_MAX_OPEN_CONNS } if !inCli && c.DbConfig.Type == "sqlite" { @@ -134,6 +134,7 @@ func (d *DatabaseCfg) ConnectionString() string { } else { connString = fmt.Sprintf("%s:%s@tcp(%s:%d)/%s?parseTime=True", d.User, d.Password, d.Host, d.Port, d.DbName) } + if d.Sslmode != "" { connString = fmt.Sprintf("%s&tls=%s", connString, d.Sslmode) } diff --git a/pkg/csconfig/database_test.go b/pkg/csconfig/database_test.go index 954b1c47fd7..4a1ef807f97 100644 --- a/pkg/csconfig/database_test.go +++ b/pkg/csconfig/database_test.go @@ -22,7 +22,7 @@ func TestLoadDBConfig(t *testing.T) { DbConfig: &DatabaseCfg{ Type: "sqlite", DbPath: "./testdata/test.db", - MaxOpenConns: ptr.Of(10), + MaxOpenConns: 10, }, Cscli: &CscliCfg{}, API: &APICfg{ @@ -30,10 +30,10 @@ func TestLoadDBConfig(t *testing.T) { }, }, expected: &DatabaseCfg{ - Type: "sqlite", - DbPath: "./testdata/test.db", - MaxOpenConns: ptr.Of(10), - UseWal: ptr.Of(true), + Type: "sqlite", + DbPath: "./testdata/test.db", + MaxOpenConns: 10, + UseWal: ptr.Of(true), DecisionBulkSize: defaultDecisionBulkSize, }, }, @@ -49,6 +49,7 @@ func TestLoadDBConfig(t *testing.T) { t.Run(tc.name, func(t *testing.T) { err := tc.input.LoadDBConfig(false) cstest.RequireErrorContains(t, err, tc.expectedErr) + if tc.expectedErr != "" { return } diff --git a/pkg/database/database.go b/pkg/database/database.go index 6f392c46d21..e513459199f 100644 --- a/pkg/database/database.go +++ b/pkg/database/database.go @@ -14,8 +14,6 @@ import ( _ "github.com/mattn/go-sqlite3" log "github.com/sirupsen/logrus" - "github.com/crowdsecurity/go-cs-lib/ptr" - "github.com/crowdsecurity/crowdsec/pkg/csconfig" "github.com/crowdsecurity/crowdsec/pkg/database/ent" "github.com/crowdsecurity/crowdsec/pkg/types" @@ -37,12 +35,11 @@ func getEntDriver(dbtype string, dbdialect string, dsn string, config *csconfig. return nil, err } - if config.MaxOpenConns == nil { - log.Warningf("MaxOpenConns is 0, defaulting to %d", csconfig.DEFAULT_MAX_OPEN_CONNS) - config.MaxOpenConns = ptr.Of(csconfig.DEFAULT_MAX_OPEN_CONNS) + if config.MaxOpenConns == 0 { + config.MaxOpenConns = csconfig.DEFAULT_MAX_OPEN_CONNS } - db.SetMaxOpenConns(*config.MaxOpenConns) + db.SetMaxOpenConns(config.MaxOpenConns) drv := entsql.OpenDB(dbdialect, db) return drv, nil @@ -74,7 +71,7 @@ func NewClient(ctx context.Context, config *csconfig.DatabaseCfg) (*Client, erro if config.Type == "sqlite" { /*if it's the first startup, we want to touch and chmod file*/ - if _, err := os.Stat(config.DbPath); os.IsNotExist(err) { + if _, err = os.Stat(config.DbPath); os.IsNotExist(err) { f, err := os.OpenFile(config.DbPath, os.O_CREATE|os.O_RDWR, 0o600) if err != nil { return nil, fmt.Errorf("failed to create SQLite database file %q: %w", config.DbPath, err) @@ -85,14 +82,14 @@ func NewClient(ctx context.Context, config *csconfig.DatabaseCfg) (*Client, erro } } // Always try to set permissions to simplify a bit the code for windows (as the permissions set by OpenFile will be garbage) - if err := setFilePerm(config.DbPath, 0o640); err != nil { - return nil, fmt.Errorf("unable to set perms on %s: %v", config.DbPath, err) + if err = setFilePerm(config.DbPath, 0o640); err != nil { + return nil, fmt.Errorf("unable to set perms on %s: %w", config.DbPath, err) } } drv, err := getEntDriver(typ, dia, config.ConnectionString(), config) if err != nil { - return nil, fmt.Errorf("failed opening connection to %s: %v", config.Type, err) + return nil, fmt.Errorf("failed opening connection to %s: %w", config.Type, err) } client = ent.NewClient(ent.Driver(drv), entOpt) @@ -104,7 +101,7 @@ func NewClient(ctx context.Context, config *csconfig.DatabaseCfg) (*Client, erro } if err = client.Schema.Create(ctx); err != nil { - return nil, fmt.Errorf("failed creating schema resources: %v", err) + return nil, fmt.Errorf("failed creating schema resources: %w", err) } return &Client{ From c4da2775cdef20f395d158ecb942b18a65f94bad Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Mon, 15 Jul 2024 13:26:59 +0200 Subject: [PATCH 221/581] usage metrics: validate maxLength for some elements (#3131) Co-authored-by: Sebastien Blot --- pkg/models/base_metrics.go | 5 +++++ pkg/models/localapi_swagger.yaml | 7 +++++++ pkg/models/metrics_detail_item.go | 10 ++++++++++ pkg/models/metrics_labels.go | 15 +++++++++++++++ pkg/models/o_sversion.go | 10 ++++++++++ 5 files changed, 47 insertions(+) diff --git a/pkg/models/base_metrics.go b/pkg/models/base_metrics.go index 154d9004afe..94691ea233e 100644 --- a/pkg/models/base_metrics.go +++ b/pkg/models/base_metrics.go @@ -35,6 +35,7 @@ type BaseMetrics struct { // version of the remediation component // Required: true + // Max Length: 255 Version *string `json:"version"` } @@ -124,6 +125,10 @@ func (m *BaseMetrics) validateVersion(formats strfmt.Registry) error { return err } + if err := validate.MaxLength("version", "body", *m.Version, 255); err != nil { + return err + } + return nil } diff --git a/pkg/models/localapi_swagger.yaml b/pkg/models/localapi_swagger.yaml index d726f452a16..ba14880e7c5 100644 --- a/pkg/models/localapi_swagger.yaml +++ b/pkg/models/localapi_swagger.yaml @@ -1095,6 +1095,7 @@ definitions: version: type: string description: version of the remediation component + maxLength: 255 os: $ref: '#/definitions/OSversion' metrics: @@ -1107,6 +1108,7 @@ definitions: items: type: string description: feature flags (expected to be empty for remediation components) + maxLength: 255 utc_startup_timestamp: type: integer description: UTC timestamp of the startup of the software @@ -1120,9 +1122,11 @@ definitions: name: type: string description: name of the OS + maxLength: 255 version: type: string description: version of the OS + maxLength: 255 required: - name - version @@ -1146,12 +1150,14 @@ definitions: name: type: string description: name of the metric + maxLength: 255 value: type: number description: value of the metric unit: type: string description: unit of the metric + maxLength: 255 labels: $ref: '#/definitions/MetricsLabels' description: labels of the metric @@ -1178,6 +1184,7 @@ definitions: additionalProperties: type: string description: label of the metric + maxLength: 255 ConsoleOptions: title: ConsoleOptions type: array diff --git a/pkg/models/metrics_detail_item.go b/pkg/models/metrics_detail_item.go index 889f7e263d2..bb237884fcf 100644 --- a/pkg/models/metrics_detail_item.go +++ b/pkg/models/metrics_detail_item.go @@ -24,10 +24,12 @@ type MetricsDetailItem struct { // name of the metric // Required: true + // Max Length: 255 Name *string `json:"name"` // unit of the metric // Required: true + // Max Length: 255 Unit *string `json:"unit"` // value of the metric @@ -86,6 +88,10 @@ func (m *MetricsDetailItem) validateName(formats strfmt.Registry) error { return err } + if err := validate.MaxLength("name", "body", *m.Name, 255); err != nil { + return err + } + return nil } @@ -95,6 +101,10 @@ func (m *MetricsDetailItem) validateUnit(formats strfmt.Registry) error { return err } + if err := validate.MaxLength("unit", "body", *m.Unit, 255); err != nil { + return err + } + return nil } diff --git a/pkg/models/metrics_labels.go b/pkg/models/metrics_labels.go index d807a88bc8d..176a15cce24 100644 --- a/pkg/models/metrics_labels.go +++ b/pkg/models/metrics_labels.go @@ -8,7 +8,9 @@ package models import ( "context" + "github.com/go-openapi/errors" "github.com/go-openapi/strfmt" + "github.com/go-openapi/validate" ) // MetricsLabels MetricsLabels @@ -18,6 +20,19 @@ type MetricsLabels map[string]string // Validate validates this metrics labels func (m MetricsLabels) Validate(formats strfmt.Registry) error { + var res []error + + for k := range m { + + if err := validate.MaxLength(k, "body", m[k], 255); err != nil { + return err + } + + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } return nil } diff --git a/pkg/models/o_sversion.go b/pkg/models/o_sversion.go index eb670409c90..8f1f43ea9cc 100644 --- a/pkg/models/o_sversion.go +++ b/pkg/models/o_sversion.go @@ -21,10 +21,12 @@ type OSversion struct { // name of the OS // Required: true + // Max Length: 255 Name *string `json:"name"` // version of the OS // Required: true + // Max Length: 255 Version *string `json:"version"` } @@ -52,6 +54,10 @@ func (m *OSversion) validateName(formats strfmt.Registry) error { return err } + if err := validate.MaxLength("name", "body", *m.Name, 255); err != nil { + return err + } + return nil } @@ -61,6 +67,10 @@ func (m *OSversion) validateVersion(formats strfmt.Registry) error { return err } + if err := validate.MaxLength("version", "body", *m.Version, 255); err != nil { + return err + } + return nil } From 84c214a6f0984dbe7eb6fe2384241b305ce04438 Mon Sep 17 00:00:00 2001 From: Laurence Jones Date: Mon, 15 Jul 2024 12:31:01 +0100 Subject: [PATCH 222/581] enhance: add crowdsec user agent to cti do request func (#3130) --- pkg/cticlient/client.go | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/pkg/cticlient/client.go b/pkg/cticlient/client.go index b95d1237619..b817121e222 100644 --- a/pkg/cticlient/client.go +++ b/pkg/cticlient/client.go @@ -8,6 +8,7 @@ import ( "net/http" "strings" + "github.com/crowdsecurity/crowdsec/pkg/cwversion" log "github.com/sirupsen/logrus" ) @@ -43,7 +44,10 @@ func (c *CrowdsecCTIClient) doRequest(method string, endpoint string, params map if err != nil { return nil, err } + req.Header.Set("X-Api-Key", c.apiKey) + req.Header.Set("User-Agent", cwversion.UserAgent()) + resp, err := c.httpClient.Do(req) if err != nil { return nil, err From c4bfdf19914a88671663f8caae5a5ea849c1b3a6 Mon Sep 17 00:00:00 2001 From: blotus Date: Tue, 16 Jul 2024 10:08:00 +0200 Subject: [PATCH 223/581] Store alert remediations status in DB (#3115) --- cmd/crowdsec-cli/alerts.go | 1 + cmd/crowdsec-cli/decisions.go | 7 +-- pkg/apiserver/controllers/v1/alerts.go | 1 + pkg/database/alerts.go | 6 +-- pkg/database/ent/alert.go | 13 ++++- pkg/database/ent/alert/alert.go | 8 +++ pkg/database/ent/alert/where.go | 25 +++++++++ pkg/database/ent/alert_create.go | 18 +++++++ pkg/database/ent/alert_update.go | 6 +++ pkg/database/ent/migrate/schema.go | 3 +- pkg/database/ent/mutation.go | 75 +++++++++++++++++++++++++- pkg/database/ent/schema/alert.go | 1 + 12 files changed, 155 insertions(+), 9 deletions(-) diff --git a/cmd/crowdsec-cli/alerts.go b/cmd/crowdsec-cli/alerts.go index 0bb310739d9..37f9ab435c7 100644 --- a/cmd/crowdsec-cli/alerts.go +++ b/cmd/crowdsec-cli/alerts.go @@ -120,6 +120,7 @@ func (cli *cliAlerts) displayOneAlert(alert *models.Alert, withDetail bool) erro - Date : {{.CreatedAt}} - Machine : {{.MachineID}} - Simulation : {{.Simulated}} + - Remediation : {{.Remediation}} - Reason : {{.Scenario}} - Events Count : {{.EventsCount}} - Scope:Value : {{.Source.Scope}}{{if .Source.Value}}:{{.Source.Value}}{{end}} diff --git a/cmd/crowdsec-cli/decisions.go b/cmd/crowdsec-cli/decisions.go index 92a0de72e58..d485c90254f 100644 --- a/cmd/crowdsec-cli/decisions.go +++ b/cmd/crowdsec-cli/decisions.go @@ -374,9 +374,10 @@ func (cli *cliDecisions) add(addIP, addRange, addDuration, addValue, addScope, a Scope: &addScope, Value: &addValue, }, - StartAt: &startAt, - StopAt: &stopAt, - CreatedAt: createdAt, + StartAt: &startAt, + StopAt: &stopAt, + CreatedAt: createdAt, + Remediation: true, } alerts = append(alerts, &alert) diff --git a/pkg/apiserver/controllers/v1/alerts.go b/pkg/apiserver/controllers/v1/alerts.go index c8cd54203bc..82dc51d6879 100644 --- a/pkg/apiserver/controllers/v1/alerts.go +++ b/pkg/apiserver/controllers/v1/alerts.go @@ -43,6 +43,7 @@ func FormatOneAlert(alert *ent.Alert) *models.Alert { Capacity: &alert.Capacity, Leakspeed: &alert.LeakSpeed, Simulated: &alert.Simulated, + Remediation: alert.Remediation, UUID: alert.UUID, Source: &models.Source{ Scope: &alert.SourceScope, diff --git a/pkg/database/alerts.go b/pkg/database/alerts.go index 3563adba68c..0f6d87fb1b6 100644 --- a/pkg/database/alerts.go +++ b/pkg/database/alerts.go @@ -241,7 +241,8 @@ func (c *Client) UpdateCommunityBlocklist(alertItem *models.Alert) (int, int, in SetLeakSpeed(*alertItem.Leakspeed). SetSimulated(*alertItem.Simulated). SetScenarioVersion(*alertItem.ScenarioVersion). - SetScenarioHash(*alertItem.ScenarioHash) + SetScenarioHash(*alertItem.ScenarioHash). + SetRemediation(true) // it's from CAPI, we always have decisions alertRef, err := alertB.Save(c.CTX) if err != nil { @@ -554,7 +555,6 @@ func (c *Client) createAlertChunk(machineID string, owner *ent.Machine, alerts [ if len(metaItem.Value) > 4095 { c.Log.Warningf("truncated meta %s : value too long", metaItem.Key) - value = value[:4095] } @@ -618,6 +618,7 @@ func (c *Client) createAlertChunk(machineID string, owner *ent.Machine, alerts [ SetSimulated(*alertItem.Simulated). SetScenarioVersion(*alertItem.ScenarioVersion). SetScenarioHash(*alertItem.ScenarioHash). + SetRemediation(alertItem.Remediation). SetUUID(alertItem.UUID). AddEvents(events...). AddMetas(metas...) @@ -677,7 +678,6 @@ func (c *Client) createAlertChunk(machineID string, owner *ent.Machine, alerts [ } } } - return ret, nil } diff --git a/pkg/database/ent/alert.go b/pkg/database/ent/alert.go index 8bfe0badc09..eb0e1cb7612 100644 --- a/pkg/database/ent/alert.go +++ b/pkg/database/ent/alert.go @@ -64,6 +64,8 @@ type Alert struct { Simulated bool `json:"simulated,omitempty"` // UUID holds the value of the "uuid" field. UUID string `json:"uuid,omitempty"` + // Remediation holds the value of the "remediation" field. + Remediation bool `json:"remediation,omitempty"` // Edges holds the relations/edges for other nodes in the graph. // The values are being populated by the AlertQuery when eager-loading is set. Edges AlertEdges `json:"edges"` @@ -129,7 +131,7 @@ func (*Alert) scanValues(columns []string) ([]any, error) { values := make([]any, len(columns)) for i := range columns { switch columns[i] { - case alert.FieldSimulated: + case alert.FieldSimulated, alert.FieldRemediation: values[i] = new(sql.NullBool) case alert.FieldSourceLatitude, alert.FieldSourceLongitude: values[i] = new(sql.NullFloat64) @@ -300,6 +302,12 @@ func (a *Alert) assignValues(columns []string, values []any) error { } else if value.Valid { a.UUID = value.String } + case alert.FieldRemediation: + if value, ok := values[i].(*sql.NullBool); !ok { + return fmt.Errorf("unexpected type %T for field remediation", values[i]) + } else if value.Valid { + a.Remediation = value.Bool + } case alert.ForeignKeys[0]: if value, ok := values[i].(*sql.NullInt64); !ok { return fmt.Errorf("unexpected type %T for edge-field machine_alerts", value) @@ -431,6 +439,9 @@ func (a *Alert) String() string { builder.WriteString(", ") builder.WriteString("uuid=") builder.WriteString(a.UUID) + builder.WriteString(", ") + builder.WriteString("remediation=") + builder.WriteString(fmt.Sprintf("%v", a.Remediation)) builder.WriteByte(')') return builder.String() } diff --git a/pkg/database/ent/alert/alert.go b/pkg/database/ent/alert/alert.go index 16e0b019e14..62aade98e87 100644 --- a/pkg/database/ent/alert/alert.go +++ b/pkg/database/ent/alert/alert.go @@ -60,6 +60,8 @@ const ( FieldSimulated = "simulated" // FieldUUID holds the string denoting the uuid field in the database. FieldUUID = "uuid" + // FieldRemediation holds the string denoting the remediation field in the database. + FieldRemediation = "remediation" // EdgeOwner holds the string denoting the owner edge name in mutations. EdgeOwner = "owner" // EdgeDecisions holds the string denoting the decisions edge name in mutations. @@ -126,6 +128,7 @@ var Columns = []string{ FieldScenarioHash, FieldSimulated, FieldUUID, + FieldRemediation, } // ForeignKeys holds the SQL foreign-keys that are owned by the "alerts" @@ -293,6 +296,11 @@ func ByUUID(opts ...sql.OrderTermOption) OrderOption { return sql.OrderByField(FieldUUID, opts...).ToFunc() } +// ByRemediation orders the results by the remediation field. +func ByRemediation(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldRemediation, opts...).ToFunc() +} + // ByOwnerField orders the results by owner field. func ByOwnerField(field string, opts ...sql.OrderTermOption) OrderOption { return func(s *sql.Selector) { diff --git a/pkg/database/ent/alert/where.go b/pkg/database/ent/alert/where.go index c109b78704b..da6080fffb9 100644 --- a/pkg/database/ent/alert/where.go +++ b/pkg/database/ent/alert/where.go @@ -170,6 +170,11 @@ func UUID(v string) predicate.Alert { return predicate.Alert(sql.FieldEQ(FieldUUID, v)) } +// Remediation applies equality check predicate on the "remediation" field. It's identical to RemediationEQ. +func Remediation(v bool) predicate.Alert { + return predicate.Alert(sql.FieldEQ(FieldRemediation, v)) +} + // CreatedAtEQ applies the EQ predicate on the "created_at" field. func CreatedAtEQ(v time.Time) predicate.Alert { return predicate.Alert(sql.FieldEQ(FieldCreatedAt, v)) @@ -1600,6 +1605,26 @@ func UUIDContainsFold(v string) predicate.Alert { return predicate.Alert(sql.FieldContainsFold(FieldUUID, v)) } +// RemediationEQ applies the EQ predicate on the "remediation" field. +func RemediationEQ(v bool) predicate.Alert { + return predicate.Alert(sql.FieldEQ(FieldRemediation, v)) +} + +// RemediationNEQ applies the NEQ predicate on the "remediation" field. +func RemediationNEQ(v bool) predicate.Alert { + return predicate.Alert(sql.FieldNEQ(FieldRemediation, v)) +} + +// RemediationIsNil applies the IsNil predicate on the "remediation" field. +func RemediationIsNil() predicate.Alert { + return predicate.Alert(sql.FieldIsNull(FieldRemediation)) +} + +// RemediationNotNil applies the NotNil predicate on the "remediation" field. +func RemediationNotNil() predicate.Alert { + return predicate.Alert(sql.FieldNotNull(FieldRemediation)) +} + // HasOwner applies the HasEdge predicate on the "owner" edge. func HasOwner() predicate.Alert { return predicate.Alert(func(s *sql.Selector) { diff --git a/pkg/database/ent/alert_create.go b/pkg/database/ent/alert_create.go index 45a6e40b64f..753183a9eb9 100644 --- a/pkg/database/ent/alert_create.go +++ b/pkg/database/ent/alert_create.go @@ -338,6 +338,20 @@ func (ac *AlertCreate) SetNillableUUID(s *string) *AlertCreate { return ac } +// SetRemediation sets the "remediation" field. +func (ac *AlertCreate) SetRemediation(b bool) *AlertCreate { + ac.mutation.SetRemediation(b) + return ac +} + +// SetNillableRemediation sets the "remediation" field if the given value is not nil. +func (ac *AlertCreate) SetNillableRemediation(b *bool) *AlertCreate { + if b != nil { + ac.SetRemediation(*b) + } + return ac +} + // SetOwnerID sets the "owner" edge to the Machine entity by ID. func (ac *AlertCreate) SetOwnerID(id int) *AlertCreate { ac.mutation.SetOwnerID(id) @@ -603,6 +617,10 @@ func (ac *AlertCreate) createSpec() (*Alert, *sqlgraph.CreateSpec) { _spec.SetField(alert.FieldUUID, field.TypeString, value) _node.UUID = value } + if value, ok := ac.mutation.Remediation(); ok { + _spec.SetField(alert.FieldRemediation, field.TypeBool, value) + _node.Remediation = value + } if nodes := ac.mutation.OwnerIDs(); len(nodes) > 0 { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.M2O, diff --git a/pkg/database/ent/alert_update.go b/pkg/database/ent/alert_update.go index 48ce221ac82..5f0e01ac09f 100644 --- a/pkg/database/ent/alert_update.go +++ b/pkg/database/ent/alert_update.go @@ -281,6 +281,9 @@ func (au *AlertUpdate) sqlSave(ctx context.Context) (n int, err error) { if au.mutation.UUIDCleared() { _spec.ClearField(alert.FieldUUID, field.TypeString) } + if au.mutation.RemediationCleared() { + _spec.ClearField(alert.FieldRemediation, field.TypeBool) + } if au.mutation.OwnerCleared() { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.M2O, @@ -744,6 +747,9 @@ func (auo *AlertUpdateOne) sqlSave(ctx context.Context) (_node *Alert, err error if auo.mutation.UUIDCleared() { _spec.ClearField(alert.FieldUUID, field.TypeString) } + if auo.mutation.RemediationCleared() { + _spec.ClearField(alert.FieldRemediation, field.TypeBool) + } if auo.mutation.OwnerCleared() { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.M2O, diff --git a/pkg/database/ent/migrate/schema.go b/pkg/database/ent/migrate/schema.go index c1ce25bddef..60bf72a486b 100644 --- a/pkg/database/ent/migrate/schema.go +++ b/pkg/database/ent/migrate/schema.go @@ -34,6 +34,7 @@ var ( {Name: "scenario_hash", Type: field.TypeString, Nullable: true}, {Name: "simulated", Type: field.TypeBool, Default: false}, {Name: "uuid", Type: field.TypeString, Nullable: true}, + {Name: "remediation", Type: field.TypeBool, Nullable: true}, {Name: "machine_alerts", Type: field.TypeInt, Nullable: true}, } // AlertsTable holds the schema information for the "alerts" table. @@ -44,7 +45,7 @@ var ( ForeignKeys: []*schema.ForeignKey{ { Symbol: "alerts_machines_alerts", - Columns: []*schema.Column{AlertsColumns[24]}, + Columns: []*schema.Column{AlertsColumns[25]}, RefColumns: []*schema.Column{MachinesColumns[0]}, OnDelete: schema.SetNull, }, diff --git a/pkg/database/ent/mutation.go b/pkg/database/ent/mutation.go index 573e0b5daa9..5b70457c512 100644 --- a/pkg/database/ent/mutation.go +++ b/pkg/database/ent/mutation.go @@ -77,6 +77,7 @@ type AlertMutation struct { scenarioHash *string simulated *bool uuid *string + remediation *bool clearedFields map[string]struct{} owner *int clearedowner bool @@ -1351,6 +1352,55 @@ func (m *AlertMutation) ResetUUID() { delete(m.clearedFields, alert.FieldUUID) } +// SetRemediation sets the "remediation" field. +func (m *AlertMutation) SetRemediation(b bool) { + m.remediation = &b +} + +// Remediation returns the value of the "remediation" field in the mutation. +func (m *AlertMutation) Remediation() (r bool, exists bool) { + v := m.remediation + if v == nil { + return + } + return *v, true +} + +// OldRemediation returns the old "remediation" field's value of the Alert entity. +// If the Alert object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *AlertMutation) OldRemediation(ctx context.Context) (v bool, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldRemediation is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldRemediation requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldRemediation: %w", err) + } + return oldValue.Remediation, nil +} + +// ClearRemediation clears the value of the "remediation" field. +func (m *AlertMutation) ClearRemediation() { + m.remediation = nil + m.clearedFields[alert.FieldRemediation] = struct{}{} +} + +// RemediationCleared returns if the "remediation" field was cleared in this mutation. +func (m *AlertMutation) RemediationCleared() bool { + _, ok := m.clearedFields[alert.FieldRemediation] + return ok +} + +// ResetRemediation resets all changes to the "remediation" field. +func (m *AlertMutation) ResetRemediation() { + m.remediation = nil + delete(m.clearedFields, alert.FieldRemediation) +} + // SetOwnerID sets the "owner" edge to the Machine entity by id. func (m *AlertMutation) SetOwnerID(id int) { m.owner = &id @@ -1586,7 +1636,7 @@ func (m *AlertMutation) Type() string { // order to get all numeric fields that were incremented/decremented, call // AddedFields(). func (m *AlertMutation) Fields() []string { - fields := make([]string, 0, 23) + fields := make([]string, 0, 24) if m.created_at != nil { fields = append(fields, alert.FieldCreatedAt) } @@ -1656,6 +1706,9 @@ func (m *AlertMutation) Fields() []string { if m.uuid != nil { fields = append(fields, alert.FieldUUID) } + if m.remediation != nil { + fields = append(fields, alert.FieldRemediation) + } return fields } @@ -1710,6 +1763,8 @@ func (m *AlertMutation) Field(name string) (ent.Value, bool) { return m.Simulated() case alert.FieldUUID: return m.UUID() + case alert.FieldRemediation: + return m.Remediation() } return nil, false } @@ -1765,6 +1820,8 @@ func (m *AlertMutation) OldField(ctx context.Context, name string) (ent.Value, e return m.OldSimulated(ctx) case alert.FieldUUID: return m.OldUUID(ctx) + case alert.FieldRemediation: + return m.OldRemediation(ctx) } return nil, fmt.Errorf("unknown Alert field %s", name) } @@ -1935,6 +1992,13 @@ func (m *AlertMutation) SetField(name string, value ent.Value) error { } m.SetUUID(v) return nil + case alert.FieldRemediation: + v, ok := value.(bool) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetRemediation(v) + return nil } return fmt.Errorf("unknown Alert field %s", name) } @@ -2073,6 +2137,9 @@ func (m *AlertMutation) ClearedFields() []string { if m.FieldCleared(alert.FieldUUID) { fields = append(fields, alert.FieldUUID) } + if m.FieldCleared(alert.FieldRemediation) { + fields = append(fields, alert.FieldRemediation) + } return fields } @@ -2144,6 +2211,9 @@ func (m *AlertMutation) ClearField(name string) error { case alert.FieldUUID: m.ClearUUID() return nil + case alert.FieldRemediation: + m.ClearRemediation() + return nil } return fmt.Errorf("unknown Alert nullable field %s", name) } @@ -2221,6 +2291,9 @@ func (m *AlertMutation) ResetField(name string) error { case alert.FieldUUID: m.ResetUUID() return nil + case alert.FieldRemediation: + m.ResetRemediation() + return nil } return fmt.Errorf("unknown Alert field %s", name) } diff --git a/pkg/database/ent/schema/alert.go b/pkg/database/ent/schema/alert.go index 343979e3db7..87ace24aa84 100644 --- a/pkg/database/ent/schema/alert.go +++ b/pkg/database/ent/schema/alert.go @@ -52,6 +52,7 @@ func (Alert) Fields() []ent.Field { field.String("scenarioHash").Optional().Immutable(), field.Bool("simulated").Default(false).Immutable(), field.String("uuid").Optional().Immutable(), // this uuid is mostly here to ensure that CAPI/PAPI has a unique id for each alert + field.Bool("remediation").Optional().Immutable(), } } From 189fb9ca1b594f07f834e92af6f970230b075863 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Wed, 17 Jul 2024 09:50:31 +0200 Subject: [PATCH 224/581] deps: use go-cs-lib 0.13 (#3136) --- go.mod | 2 +- go.sum | 4 ++-- pkg/cwversion/version.go | 17 +++-------------- 3 files changed, 6 insertions(+), 17 deletions(-) diff --git a/go.mod b/go.mod index 1bc63a470df..6a2146efc5f 100644 --- a/go.mod +++ b/go.mod @@ -26,7 +26,7 @@ require ( github.com/corazawaf/libinjection-go v0.1.2 github.com/crowdsecurity/coraza/v3 v3.0.0-20240108124027-a62b8d8e5607 github.com/crowdsecurity/dlog v0.0.0-20170105205344-4fb5f8204f26 - github.com/crowdsecurity/go-cs-lib v0.0.11 + github.com/crowdsecurity/go-cs-lib v0.0.13 github.com/crowdsecurity/grokky v0.2.1 github.com/crowdsecurity/machineid v1.0.2 github.com/davecgh/go-spew v1.1.1 diff --git a/go.sum b/go.sum index ba4e6267bb9..faca9797341 100644 --- a/go.sum +++ b/go.sum @@ -105,8 +105,8 @@ github.com/crowdsecurity/coraza/v3 v3.0.0-20240108124027-a62b8d8e5607 h1:hyrYw3h github.com/crowdsecurity/coraza/v3 v3.0.0-20240108124027-a62b8d8e5607/go.mod h1:br36fEqurGYZQGit+iDYsIzW0FF6VufMbDzyyLxEuPA= github.com/crowdsecurity/dlog v0.0.0-20170105205344-4fb5f8204f26 h1:r97WNVC30Uen+7WnLs4xDScS/Ex988+id2k6mDf8psU= github.com/crowdsecurity/dlog v0.0.0-20170105205344-4fb5f8204f26/go.mod h1:zpv7r+7KXwgVUZnUNjyP22zc/D7LKjyoY02weH2RBbk= -github.com/crowdsecurity/go-cs-lib v0.0.11 h1:ygUOKrkMLaJ2wjC020LgtY6XDkToNFK4NmYlhpkk5ko= -github.com/crowdsecurity/go-cs-lib v0.0.11/go.mod h1:8FMKNGsh3hMZi2SEv6P15PURhEJnZV431XjzzBSuf0k= +github.com/crowdsecurity/go-cs-lib v0.0.13 h1:asmtjIEPOibUK8eaYQCIR7XIBU/EX5vyAp1EbKFQJtY= +github.com/crowdsecurity/go-cs-lib v0.0.13/go.mod h1:ePyQyJBxp1W/1bq4YpVAilnLSz7HkzmtI7TRhX187EU= github.com/crowdsecurity/grokky v0.2.1 h1:t4VYnDlAd0RjDM2SlILalbwfCrQxtJSMGdQOR0zwkE4= github.com/crowdsecurity/grokky v0.2.1/go.mod h1:33usDIYzGDsgX1kHAThCbseso6JuWNJXOzRQDGXHtWM= github.com/crowdsecurity/machineid v1.0.2 h1:wpkpsUghJF8Khtmn/tg6GxgdhLA1Xflerh5lirI+bdc= diff --git a/pkg/cwversion/version.go b/pkg/cwversion/version.go index 01509833c1c..28d5c2a621c 100644 --- a/pkg/cwversion/version.go +++ b/pkg/cwversion/version.go @@ -10,7 +10,7 @@ import ( ) var ( - Codename string // = "SoumSoum" + Codename string // = "SoumSoum" Libre2 = "WebAssembly" ) @@ -21,19 +21,8 @@ const ( Constraint_acquis = ">= 1.0, < 2.0" ) -func versionWithTag() string { - // if the version number already contains the tag, don't duplicate it - ret := version.Version - - if !strings.HasSuffix(ret, version.Tag) && !strings.HasSuffix(ret, "g"+version.Tag+"-dirty") { - ret += "-" + version.Tag - } - - return ret -} - func FullString() string { - ret := fmt.Sprintf("version: %s\n", versionWithTag()) + ret := fmt.Sprintf("version: %s\n", version.String()) ret += fmt.Sprintf("Codename: %s\n", Codename) ret += fmt.Sprintf("BuildDate: %s\n", version.BuildDate) ret += fmt.Sprintf("GoVersion: %s\n", version.GoVersion) @@ -49,7 +38,7 @@ func FullString() string { } func UserAgent() string { - return "crowdsec/" + versionWithTag() + "-" + version.System + return "crowdsec/" + version.String() + "-" + version.System } // VersionStrip remove the tag from the version string, used to match with a hub branch From 8d96ddd48eec924dfd377d54869442a98d7be1f9 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Wed, 17 Jul 2024 12:30:52 +0200 Subject: [PATCH 225/581] refact cscli metrics: fix lines between tables, skip wrapper api (#3137) * fix empty line between metrics tables * refact metrics tables: use go-pretty api directly * lint --- cmd/crowdsec-cli/bouncers.go | 4 +- cmd/crowdsec-cli/climetrics/list.go | 17 ++++--- cmd/crowdsec-cli/climetrics/statacquis.go | 13 +++-- cmd/crowdsec-cli/climetrics/statalert.go | 18 +++---- .../climetrics/statappsecengine.go | 13 +++-- cmd/crowdsec-cli/climetrics/statappsecrule.go | 14 +++--- cmd/crowdsec-cli/climetrics/statbouncer.go | 43 +++++++++------- cmd/crowdsec-cli/climetrics/statbucket.go | 13 +++-- cmd/crowdsec-cli/climetrics/statdecision.go | 18 +++---- cmd/crowdsec-cli/climetrics/statlapi.go | 29 ++++------- .../climetrics/statlapibouncer.go | 13 +++-- .../climetrics/statlapidecision.go | 17 +++---- .../climetrics/statlapimachine.go | 13 +++-- cmd/crowdsec-cli/climetrics/statparser.go | 13 +++-- cmd/crowdsec-cli/climetrics/statstash.go | 18 +++---- cmd/crowdsec-cli/climetrics/statwhitelist.go | 13 +++-- cmd/crowdsec-cli/climetrics/table.go | 23 ++++----- cmd/crowdsec-cli/cstable/cstable.go | 12 ----- cmd/crowdsec-cli/machines.go | 6 +-- cmd/crowdsec-cli/utils_table.go | 50 +++++++++---------- 20 files changed, 164 insertions(+), 196 deletions(-) diff --git a/cmd/crowdsec-cli/bouncers.go b/cmd/crowdsec-cli/bouncers.go index f7237b8216c..8057cba98c9 100644 --- a/cmd/crowdsec-cli/bouncers.go +++ b/cmd/crowdsec-cli/bouncers.go @@ -142,7 +142,7 @@ func (cli *cliBouncers) listHuman(out io.Writer, bouncers ent.Bouncers) { t.AppendRow(table.Row{b.Name, b.IPAddress, revoked, lastPull, b.Type, b.Version, b.AuthType}) } - fmt.Fprintln(out, t.Render()) + io.WriteString(out, t.Render() + "\n") } // bouncerInfo contains only the data we want for inspect/list @@ -475,7 +475,7 @@ func (cli *cliBouncers) inspectHuman(out io.Writer, bouncer *ent.Bouncer) { t.AppendRow(table.Row{"Feature Flags", ff}) } - fmt.Fprintln(out, t.Render()) + io.WriteString(out, t.Render() + "\n") } func (cli *cliBouncers) inspect(bouncer *ent.Bouncer) error { diff --git a/cmd/crowdsec-cli/climetrics/list.go b/cmd/crowdsec-cli/climetrics/list.go index 074f0603555..d3afbef0669 100644 --- a/cmd/crowdsec-cli/climetrics/list.go +++ b/cmd/crowdsec-cli/climetrics/list.go @@ -3,12 +3,12 @@ package climetrics import ( "encoding/json" "fmt" + "io" "github.com/fatih/color" - "github.com/spf13/cobra" - "github.com/jedib0t/go-pretty/v6/table" "github.com/jedib0t/go-pretty/v6/text" + "github.com/spf13/cobra" "github.com/crowdsecurity/go-cs-lib/maptools" @@ -43,27 +43,28 @@ func (cli *cliMetrics) list() error { t.AppendHeader(table.Row{"Type", "Title", "Description"}) t.SetColumnConfigs([]table.ColumnConfig{ { - Name: "Type", + Name: "Type", AlignHeader: text.AlignCenter, }, { - Name: "Title", + Name: "Title", AlignHeader: text.AlignCenter, }, { - Name: "Description", - AlignHeader: text.AlignCenter, - WidthMax: 60, + Name: "Description", + AlignHeader: text.AlignCenter, + WidthMax: 60, WidthMaxEnforcer: text.WrapSoft, }, }) + t.Style().Options.SeparateRows = true for _, metric := range allMetrics { t.AppendRow(table.Row{metric.Type, metric.Title, metric.Description}) } - fmt.Fprintln(out, t.Render()) + io.WriteString(out, t.Render() + "\n") case "json": x, err := json.MarshalIndent(allMetrics, "", " ") if err != nil { diff --git a/cmd/crowdsec-cli/climetrics/statacquis.go b/cmd/crowdsec-cli/climetrics/statacquis.go index a18b8a2e2a2..827dcf036c3 100644 --- a/cmd/crowdsec-cli/climetrics/statacquis.go +++ b/cmd/crowdsec-cli/climetrics/statacquis.go @@ -3,7 +3,7 @@ package climetrics import ( "io" - "github.com/jedib0t/go-pretty/v6/text" + "github.com/jedib0t/go-pretty/v6/table" log "github.com/sirupsen/logrus" "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/cstable" @@ -28,10 +28,8 @@ func (s statAcquis) Process(source, metric string, val int) { } func (s statAcquis) Table(out io.Writer, wantColor string, noUnit bool, showEmpty bool) { - t := cstable.New(out, wantColor) - t.SetRowLines(false) - t.SetHeaders("Source", "Lines read", "Lines parsed", "Lines unparsed", "Lines poured to bucket", "Lines whitelisted") - t.SetAlignment(text.AlignLeft, text.AlignLeft, text.AlignLeft, text.AlignLeft, text.AlignLeft) + t := cstable.New(out, wantColor).Writer + t.AppendHeader(table.Row{"Source", "Lines read", "Lines parsed", "Lines unparsed", "Lines poured to bucket", "Lines whitelisted"}) keys := []string{"reads", "parsed", "unparsed", "pour", "whitelisted"} @@ -39,7 +37,8 @@ func (s statAcquis) Table(out io.Writer, wantColor string, noUnit bool, showEmpt log.Warningf("while collecting acquis stats: %s", err) } else if numRows > 0 || showEmpty { title, _ := s.Description() - cstable.RenderTitle(out, "\n"+title+":") - t.Render() + io.WriteString(out, title + ":\n") + io.WriteString(out, t.Render() + "\n") + io.WriteString(out, "\n") } } diff --git a/cmd/crowdsec-cli/climetrics/statalert.go b/cmd/crowdsec-cli/climetrics/statalert.go index 65009fa322d..e48dd6c924f 100644 --- a/cmd/crowdsec-cli/climetrics/statalert.go +++ b/cmd/crowdsec-cli/climetrics/statalert.go @@ -4,7 +4,7 @@ import ( "io" "strconv" - "github.com/jedib0t/go-pretty/v6/text" + "github.com/jedib0t/go-pretty/v6/table" "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/cstable" ) @@ -21,25 +21,25 @@ func (s statAlert) Process(reason string, val int) { } func (s statAlert) Table(out io.Writer, wantColor string, noUnit bool, showEmpty bool) { - t := cstable.New(out, wantColor) - t.SetRowLines(false) - t.SetHeaders("Reason", "Count") - t.SetAlignment(text.AlignLeft, text.AlignLeft) + t := cstable.New(out, wantColor).Writer + t.AppendHeader(table.Row{"Reason", "Count"}) numRows := 0 + // TODO: sort keys for scenario, hits := range s { - t.AddRow( + t.AppendRow(table.Row{ scenario, strconv.Itoa(hits), - ) + }) numRows++ } if numRows > 0 || showEmpty { title, _ := s.Description() - cstable.RenderTitle(out, "\n"+title+":") - t.Render() + io.WriteString(out, title + ":\n") + io.WriteString(out, t.Render() + "\n") + io.WriteString(out, "\n") } } diff --git a/cmd/crowdsec-cli/climetrics/statappsecengine.go b/cmd/crowdsec-cli/climetrics/statappsecengine.go index e4e0048daaf..4a249e11687 100644 --- a/cmd/crowdsec-cli/climetrics/statappsecengine.go +++ b/cmd/crowdsec-cli/climetrics/statappsecengine.go @@ -3,7 +3,7 @@ package climetrics import ( "io" - "github.com/jedib0t/go-pretty/v6/text" + "github.com/jedib0t/go-pretty/v6/table" log "github.com/sirupsen/logrus" "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/cstable" @@ -25,10 +25,8 @@ func (s statAppsecEngine) Process(appsecEngine, metric string, val int) { } func (s statAppsecEngine) Table(out io.Writer, wantColor string, noUnit bool, showEmpty bool) { - t := cstable.New(out, wantColor) - t.SetRowLines(false) - t.SetHeaders("Appsec Engine", "Processed", "Blocked") - t.SetAlignment(text.AlignLeft, text.AlignLeft) + t := cstable.New(out, wantColor).Writer + t.AppendHeader(table.Row{"Appsec Engine", "Processed", "Blocked"}) keys := []string{"processed", "blocked"} @@ -36,7 +34,8 @@ func (s statAppsecEngine) Table(out io.Writer, wantColor string, noUnit bool, sh log.Warningf("while collecting appsec stats: %s", err) } else if numRows > 0 || showEmpty { title, _ := s.Description() - cstable.RenderTitle(out, "\n"+title+":") - t.Render() + io.WriteString(out, title + ":\n") + io.WriteString(out, t.Render() + "\n") + io.WriteString(out, "\n") } } diff --git a/cmd/crowdsec-cli/climetrics/statappsecrule.go b/cmd/crowdsec-cli/climetrics/statappsecrule.go index 9647a111b77..2f859d70cfb 100644 --- a/cmd/crowdsec-cli/climetrics/statappsecrule.go +++ b/cmd/crowdsec-cli/climetrics/statappsecrule.go @@ -4,7 +4,7 @@ import ( "fmt" "io" - "github.com/jedib0t/go-pretty/v6/text" + "github.com/jedib0t/go-pretty/v6/table" log "github.com/sirupsen/logrus" "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/cstable" @@ -30,19 +30,19 @@ func (s statAppsecRule) Process(appsecEngine, appsecRule string, metric string, } func (s statAppsecRule) Table(out io.Writer, wantColor string, noUnit bool, showEmpty bool) { + // TODO: sort keys for appsecEngine, appsecEngineRulesStats := range s { - t := cstable.New(out, wantColor) - t.SetRowLines(false) - t.SetHeaders("Rule ID", "Triggered") - t.SetAlignment(text.AlignLeft, text.AlignLeft) + t := cstable.New(out, wantColor).Writer + t.AppendHeader(table.Row{"Rule ID", "Triggered"}) keys := []string{"triggered"} if numRows, err := metricsToTable(t, appsecEngineRulesStats, keys, noUnit); err != nil { log.Warningf("while collecting appsec rules stats: %s", err) } else if numRows > 0 || showEmpty { - cstable.RenderTitle(out, fmt.Sprintf("\nAppsec '%s' Rules Metrics:", appsecEngine)) - t.Render() + io.WriteString(out, fmt.Sprintf("Appsec '%s' Rules Metrics:\n", appsecEngine)) + io.WriteString(out, t.Render() + "\n") + io.WriteString(out, "\n") } } } diff --git a/cmd/crowdsec-cli/climetrics/statbouncer.go b/cmd/crowdsec-cli/climetrics/statbouncer.go index 0105e345313..1a803cefbd2 100644 --- a/cmd/crowdsec-cli/climetrics/statbouncer.go +++ b/cmd/crowdsec-cli/climetrics/statbouncer.go @@ -34,8 +34,8 @@ type statBouncer struct { oldestTS map[string]*time.Time // we keep de-normalized metrics so we can iterate // over them multiple times and keep the aggregation code simple - rawMetrics []bouncerMetricItem - aggregated map[string]map[string]map[string]map[string]int64 + rawMetrics []bouncerMetricItem + aggregated map[string]map[string]map[string]map[string]int64 aggregatedAllOrigin map[string]map[string]map[string]int64 } @@ -57,6 +57,7 @@ func (s *statBouncer) Description() (string, string) { func warnOnce(warningsLogged map[string]bool, msg string) { if _, ok := warningsLogged[msg]; !ok { log.Warningf(msg) + warningsLogged[msg] = true } } @@ -200,6 +201,7 @@ func (s *statBouncer) aggregate() { // bouncerTable displays a table of metrics for a single bouncer func (s *statBouncer) bouncerTable(out io.Writer, bouncerName string, wantColor string, noUnit bool) { columns := make(map[string]map[string]bool) + for _, item := range s.rawMetrics { if item.bouncerName != bouncerName { continue @@ -208,6 +210,7 @@ func (s *statBouncer) bouncerTable(out io.Writer, bouncerName string, wantColor if _, ok := columns[item.name]; !ok { columns[item.name] = make(map[string]bool) } + columns[item.name][item.unit] = true } @@ -225,16 +228,16 @@ func (s *statBouncer) bouncerTable(out io.Writer, bouncerName string, wantColor colNum := 1 colCfg := []table.ColumnConfig{{ - Number:colNum, - AlignHeader: - text.AlignLeft, - Align: text.AlignLeft, + Number: colNum, + AlignHeader: text.AlignLeft, + Align: text.AlignLeft, AlignFooter: text.AlignRight, }} for _, name := range maptools.SortedKeys(columns) { for _, unit := range maptools.SortedKeys(columns[name]) { colNum += 1 + header1 = append(header1, name) // we don't add "s" to random words @@ -244,11 +247,11 @@ func (s *statBouncer) bouncerTable(out io.Writer, bouncerName string, wantColor header2 = append(header2, unit) colCfg = append(colCfg, table.ColumnConfig{ - Number: colNum, + Number: colNum, AlignHeader: text.AlignCenter, - Align: text.AlignRight, - AlignFooter: text.AlignRight}, - ) + Align: text.AlignRight, + AlignFooter: text.AlignRight, + }) } } @@ -277,16 +280,20 @@ func (s *statBouncer) bouncerTable(out io.Writer, bouncerName string, wantColor } row := table.Row{origin} + for _, name := range maptools.SortedKeys(columns) { for _, unit := range maptools.SortedKeys(columns[name]) { valStr := "-" + val, ok := metrics[name][unit] if ok { valStr = formatNumber(val, !noUnit) } + row = append(row, valStr) } } + t.AppendRow(row) numRows += 1 @@ -299,6 +306,7 @@ func (s *statBouncer) bouncerTable(out io.Writer, bouncerName string, wantColor } footer := table.Row{"Total"} + for _, name := range maptools.SortedKeys(columns) { for _, unit := range maptools.SortedKeys(columns[name]) { footer = append(footer, formatNumber(totals[name][unit], !noUnit)) @@ -309,16 +317,19 @@ func (s *statBouncer) bouncerTable(out io.Writer, bouncerName string, wantColor title, _ := s.Description() title = fmt.Sprintf("%s (%s)", title, bouncerName) + if s.oldestTS != nil { // if we change this to .Local() beware of tests title = fmt.Sprintf("%s since %s", title, s.oldestTS[bouncerName].String()) } + title += ":" // don't use SetTitle() because it draws the title inside table box - // TODO: newline position wrt other stat tables - cstable.RenderTitle(out, title) - fmt.Fprintln(out, t.Render()) + io.WriteString(out, title+"\n") + io.WriteString(out, t.Render() + "\n") + // empty line between tables + io.WriteString(out, "\n") } // Table displays a table of metrics for each bouncer @@ -328,13 +339,7 @@ func (s *statBouncer) Table(out io.Writer, wantColor string, noUnit bool, _ bool bouncerNames[item.bouncerName] = true } - nl := false for _, bouncerName := range maptools.SortedKeys(bouncerNames) { - if nl { - // empty line between tables - fmt.Fprintln(out) - } s.bouncerTable(out, bouncerName, wantColor, noUnit) - nl = true } } diff --git a/cmd/crowdsec-cli/climetrics/statbucket.go b/cmd/crowdsec-cli/climetrics/statbucket.go index 836fa0ed1ab..507d9f3a476 100644 --- a/cmd/crowdsec-cli/climetrics/statbucket.go +++ b/cmd/crowdsec-cli/climetrics/statbucket.go @@ -3,7 +3,7 @@ package climetrics import ( "io" - "github.com/jedib0t/go-pretty/v6/text" + "github.com/jedib0t/go-pretty/v6/table" log "github.com/sirupsen/logrus" "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/cstable" @@ -26,10 +26,8 @@ func (s statBucket) Process(bucket, metric string, val int) { } func (s statBucket) Table(out io.Writer, wantColor string, noUnit bool, showEmpty bool) { - t := cstable.New(out, wantColor) - t.SetRowLines(false) - t.SetHeaders("Scenario", "Current Count", "Overflows", "Instantiated", "Poured", "Expired") - t.SetAlignment(text.AlignLeft, text.AlignLeft, text.AlignLeft, text.AlignLeft, text.AlignLeft, text.AlignLeft) + t := cstable.New(out, wantColor).Writer + t.AppendHeader(table.Row{"Scenario", "Current Count", "Overflows", "Instantiated", "Poured", "Expired"}) keys := []string{"curr_count", "overflow", "instantiation", "pour", "underflow"} @@ -37,7 +35,8 @@ func (s statBucket) Table(out io.Writer, wantColor string, noUnit bool, showEmpt log.Warningf("while collecting scenario stats: %s", err) } else if numRows > 0 || showEmpty { title, _ := s.Description() - cstable.RenderTitle(out, "\n"+title+":") - t.Render() + io.WriteString(out, title + ":\n") + io.WriteString(out, t.Render() + "\n") + io.WriteString(out, "\n") } } diff --git a/cmd/crowdsec-cli/climetrics/statdecision.go b/cmd/crowdsec-cli/climetrics/statdecision.go index 485644a55ba..145665cfba2 100644 --- a/cmd/crowdsec-cli/climetrics/statdecision.go +++ b/cmd/crowdsec-cli/climetrics/statdecision.go @@ -4,7 +4,7 @@ import ( "io" "strconv" - "github.com/jedib0t/go-pretty/v6/text" + "github.com/jedib0t/go-pretty/v6/table" "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/cstable" ) @@ -30,22 +30,21 @@ func (s statDecision) Process(reason, origin, action string, val int) { } func (s statDecision) Table(out io.Writer, wantColor string, noUnit bool, showEmpty bool) { - t := cstable.New(out, wantColor) - t.SetRowLines(false) - t.SetHeaders("Reason", "Origin", "Action", "Count") - t.SetAlignment(text.AlignLeft, text.AlignLeft, text.AlignLeft, text.AlignLeft) + t := cstable.New(out, wantColor).Writer + t.AppendHeader(table.Row{"Reason", "Origin", "Action", "Count"}) numRows := 0 + // TODO: sort by reason, origin, action for reason, origins := range s { for origin, actions := range origins { for action, hits := range actions { - t.AddRow( + t.AppendRow(table.Row{ reason, origin, action, strconv.Itoa(hits), - ) + }) numRows++ } @@ -54,7 +53,8 @@ func (s statDecision) Table(out io.Writer, wantColor string, noUnit bool, showEm if numRows > 0 || showEmpty { title, _ := s.Description() - cstable.RenderTitle(out, "\n"+title+":") - t.Render() + io.WriteString(out, title + ":\n") + io.WriteString(out, t.Render() + "\n") + io.WriteString(out, "\n") } } diff --git a/cmd/crowdsec-cli/climetrics/statlapi.go b/cmd/crowdsec-cli/climetrics/statlapi.go index 7d8831aad74..45b384708bf 100644 --- a/cmd/crowdsec-cli/climetrics/statlapi.go +++ b/cmd/crowdsec-cli/climetrics/statlapi.go @@ -2,10 +2,9 @@ package climetrics import ( "io" - "sort" "strconv" - "github.com/jedib0t/go-pretty/v6/text" + "github.com/jedib0t/go-pretty/v6/table" "github.com/crowdsecurity/go-cs-lib/maptools" @@ -28,10 +27,8 @@ func (s statLapi) Process(route, method string, val int) { } func (s statLapi) Table(out io.Writer, wantColor string, noUnit bool, showEmpty bool) { - t := cstable.New(out, wantColor) - t.SetRowLines(false) - t.SetHeaders("Route", "Method", "Hits") - t.SetAlignment(text.AlignLeft, text.AlignLeft, text.AlignLeft) + t := cstable.New(out, wantColor).Writer + t.AppendHeader(table.Row{"Route", "Method", "Hits"}) // unfortunately, we can't reuse metricsToTable as the structure is too different :/ numRows := 0 @@ -39,21 +36,12 @@ func (s statLapi) Table(out io.Writer, wantColor string, noUnit bool, showEmpty for _, alabel := range maptools.SortedKeys(s) { astats := s[alabel] - subKeys := []string{} - for skey := range astats { - subKeys = append(subKeys, skey) - } - - sort.Strings(subKeys) - - for _, sl := range subKeys { - row := []string{ + for _, sl := range maptools.SortedKeys(astats) { + t.AppendRow(table.Row{ alabel, sl, strconv.Itoa(astats[sl]), - } - - t.AddRow(row...) + }) numRows++ } @@ -61,7 +49,8 @@ func (s statLapi) Table(out io.Writer, wantColor string, noUnit bool, showEmpty if numRows > 0 || showEmpty { title, _ := s.Description() - cstable.RenderTitle(out, "\n"+title+":") - t.Render() + io.WriteString(out, title + ":\n") + io.WriteString(out, t.Render() + "\n") + io.WriteString(out, "\n") } } diff --git a/cmd/crowdsec-cli/climetrics/statlapibouncer.go b/cmd/crowdsec-cli/climetrics/statlapibouncer.go index 3ee35adfe9a..828ccb33413 100644 --- a/cmd/crowdsec-cli/climetrics/statlapibouncer.go +++ b/cmd/crowdsec-cli/climetrics/statlapibouncer.go @@ -3,7 +3,7 @@ package climetrics import ( "io" - "github.com/jedib0t/go-pretty/v6/text" + "github.com/jedib0t/go-pretty/v6/table" "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/cstable" ) @@ -28,16 +28,15 @@ func (s statLapiBouncer) Process(bouncer, route, method string, val int) { } func (s statLapiBouncer) Table(out io.Writer, wantColor string, noUnit bool, showEmpty bool) { - t := cstable.New(out, wantColor) - t.SetRowLines(false) - t.SetHeaders("Bouncer", "Route", "Method", "Hits") - t.SetAlignment(text.AlignLeft, text.AlignLeft, text.AlignLeft, text.AlignLeft) + t := cstable.New(out, wantColor).Writer + t.AppendHeader(table.Row{"Bouncer", "Route", "Method", "Hits"}) numRows := lapiMetricsToTable(t, s) if numRows > 0 || showEmpty { title, _ := s.Description() - cstable.RenderTitle(out, "\n"+title+":") - t.Render() + io.WriteString(out, title + ":\n") + io.WriteString(out, t.Render() + "\n") + io.WriteString(out, "\n") } } diff --git a/cmd/crowdsec-cli/climetrics/statlapidecision.go b/cmd/crowdsec-cli/climetrics/statlapidecision.go index 5f4d2c07764..ffc999555c1 100644 --- a/cmd/crowdsec-cli/climetrics/statlapidecision.go +++ b/cmd/crowdsec-cli/climetrics/statlapidecision.go @@ -4,7 +4,7 @@ import ( "io" "strconv" - "github.com/jedib0t/go-pretty/v6/text" + "github.com/jedib0t/go-pretty/v6/table" "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/cstable" ) @@ -40,26 +40,25 @@ func (s statLapiDecision) Process(bouncer, fam string, val int) { } func (s statLapiDecision) Table(out io.Writer, wantColor string, noUnit bool, showEmpty bool) { - t := cstable.New(out, wantColor) - t.SetRowLines(false) - t.SetHeaders("Bouncer", "Empty answers", "Non-empty answers") - t.SetAlignment(text.AlignLeft, text.AlignLeft, text.AlignLeft) + t := cstable.New(out, wantColor).Writer + t.AppendHeader(table.Row{"Bouncer", "Empty answers", "Non-empty answers"}) numRows := 0 for bouncer, hits := range s { - t.AddRow( + t.AppendRow(table.Row{ bouncer, strconv.Itoa(hits.Empty), strconv.Itoa(hits.NonEmpty), - ) + }) numRows++ } if numRows > 0 || showEmpty { title, _ := s.Description() - cstable.RenderTitle(out, "\n"+title+":") - t.Render() + io.WriteString(out, title + ":\n") + io.WriteString(out, t.Render() + "\n") + io.WriteString(out, "\n") } } diff --git a/cmd/crowdsec-cli/climetrics/statlapimachine.go b/cmd/crowdsec-cli/climetrics/statlapimachine.go index 2f81ccb5751..09abe2dd44b 100644 --- a/cmd/crowdsec-cli/climetrics/statlapimachine.go +++ b/cmd/crowdsec-cli/climetrics/statlapimachine.go @@ -3,7 +3,7 @@ package climetrics import ( "io" - "github.com/jedib0t/go-pretty/v6/text" + "github.com/jedib0t/go-pretty/v6/table" "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/cstable" ) @@ -28,16 +28,15 @@ func (s statLapiMachine) Process(machine, route, method string, val int) { } func (s statLapiMachine) Table(out io.Writer, wantColor string, noUnit bool, showEmpty bool) { - t := cstable.New(out, wantColor) - t.SetRowLines(false) - t.SetHeaders("Machine", "Route", "Method", "Hits") - t.SetAlignment(text.AlignLeft, text.AlignLeft, text.AlignLeft, text.AlignLeft) + t := cstable.New(out, wantColor).Writer + t.AppendHeader(table.Row{"Machine", "Route", "Method", "Hits"}) numRows := lapiMetricsToTable(t, s) if numRows > 0 || showEmpty { title, _ := s.Description() - cstable.RenderTitle(out, "\n"+title+":") - t.Render() + io.WriteString(out, title + ":\n") + io.WriteString(out, t.Render() + "\n") + io.WriteString(out, "\n") } } diff --git a/cmd/crowdsec-cli/climetrics/statparser.go b/cmd/crowdsec-cli/climetrics/statparser.go index 58ce2248648..0b3512052b9 100644 --- a/cmd/crowdsec-cli/climetrics/statparser.go +++ b/cmd/crowdsec-cli/climetrics/statparser.go @@ -3,7 +3,7 @@ package climetrics import ( "io" - "github.com/jedib0t/go-pretty/v6/text" + "github.com/jedib0t/go-pretty/v6/table" log "github.com/sirupsen/logrus" "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/cstable" @@ -27,10 +27,8 @@ func (s statParser) Process(parser, metric string, val int) { } func (s statParser) Table(out io.Writer, wantColor string, noUnit bool, showEmpty bool) { - t := cstable.New(out, wantColor) - t.SetRowLines(false) - t.SetHeaders("Parsers", "Hits", "Parsed", "Unparsed") - t.SetAlignment(text.AlignLeft, text.AlignLeft, text.AlignLeft, text.AlignLeft) + t := cstable.New(out, wantColor).Writer + t.AppendHeader(table.Row{"Parsers", "Hits", "Parsed", "Unparsed"}) keys := []string{"hits", "parsed", "unparsed"} @@ -38,7 +36,8 @@ func (s statParser) Table(out io.Writer, wantColor string, noUnit bool, showEmpt log.Warningf("while collecting parsers stats: %s", err) } else if numRows > 0 || showEmpty { title, _ := s.Description() - cstable.RenderTitle(out, "\n"+title+":") - t.Render() + io.WriteString(out, title + ":\n") + io.WriteString(out, t.Render() + "\n") + io.WriteString(out, "\n") } } diff --git a/cmd/crowdsec-cli/climetrics/statstash.go b/cmd/crowdsec-cli/climetrics/statstash.go index 9de3469bea1..5938ac05fc8 100644 --- a/cmd/crowdsec-cli/climetrics/statstash.go +++ b/cmd/crowdsec-cli/climetrics/statstash.go @@ -4,7 +4,7 @@ import ( "io" "strconv" - "github.com/jedib0t/go-pretty/v6/text" + "github.com/jedib0t/go-pretty/v6/table" "github.com/crowdsecurity/go-cs-lib/maptools" @@ -32,10 +32,8 @@ func (s statStash) Process(name, mtype string, val int) { } func (s statStash) Table(out io.Writer, wantColor string, noUnit bool, showEmpty bool) { - t := cstable.New(out, wantColor) - t.SetRowLines(false) - t.SetHeaders("Name", "Type", "Items") - t.SetAlignment(text.AlignLeft, text.AlignLeft, text.AlignLeft) + t := cstable.New(out, wantColor).Writer + t.AppendHeader(table.Row{"Name", "Type", "Items"}) // unfortunately, we can't reuse metricsToTable as the structure is too different :/ numRows := 0 @@ -43,19 +41,19 @@ func (s statStash) Table(out io.Writer, wantColor string, noUnit bool, showEmpty for _, alabel := range maptools.SortedKeys(s) { astats := s[alabel] - row := []string{ + t.AppendRow(table.Row{ alabel, astats.Type, strconv.Itoa(astats.Count), - } - t.AddRow(row...) + }) numRows++ } if numRows > 0 || showEmpty { title, _ := s.Description() - cstable.RenderTitle(out, "\n"+title+":") - t.Render() + io.WriteString(out, title + ":\n") + io.WriteString(out, t.Render() + "\n") + io.WriteString(out, "\n") } } diff --git a/cmd/crowdsec-cli/climetrics/statwhitelist.go b/cmd/crowdsec-cli/climetrics/statwhitelist.go index 6848452458b..ccb7e52153b 100644 --- a/cmd/crowdsec-cli/climetrics/statwhitelist.go +++ b/cmd/crowdsec-cli/climetrics/statwhitelist.go @@ -3,7 +3,7 @@ package climetrics import ( "io" - "github.com/jedib0t/go-pretty/v6/text" + "github.com/jedib0t/go-pretty/v6/table" log "github.com/sirupsen/logrus" "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/cstable" @@ -29,16 +29,15 @@ func (s statWhitelist) Process(whitelist, reason, metric string, val int) { } func (s statWhitelist) Table(out io.Writer, wantColor string, noUnit bool, showEmpty bool) { - t := cstable.New(out, wantColor) - t.SetRowLines(false) - t.SetHeaders("Whitelist", "Reason", "Hits", "Whitelisted") - t.SetAlignment(text.AlignLeft, text.AlignLeft, text.AlignLeft, text.AlignLeft) + t := cstable.New(out, wantColor).Writer + t.AppendHeader(table.Row{"Whitelist", "Reason", "Hits", "Whitelisted"}) if numRows, err := wlMetricsToTable(t, s, noUnit); err != nil { log.Warningf("while collecting parsers stats: %s", err) } else if numRows > 0 || showEmpty { title, _ := s.Description() - cstable.RenderTitle(out, "\n"+title+":") - t.Render() + io.WriteString(out, title + ":\n") + io.WriteString(out, t.Render() + "\n") + io.WriteString(out, "\n") } } diff --git a/cmd/crowdsec-cli/climetrics/table.go b/cmd/crowdsec-cli/climetrics/table.go index c446f2267e9..af13edce2f5 100644 --- a/cmd/crowdsec-cli/climetrics/table.go +++ b/cmd/crowdsec-cli/climetrics/table.go @@ -5,17 +5,16 @@ import ( "sort" "strconv" + "github.com/jedib0t/go-pretty/v6/table" log "github.com/sirupsen/logrus" "github.com/crowdsecurity/go-cs-lib/maptools" - - "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/cstable" ) // ErrNilTable means a nil pointer was passed instead of a table instance. This is a programming error. var ErrNilTable = errors.New("nil table") -func lapiMetricsToTable(t *cstable.Table, stats map[string]map[string]map[string]int) int { +func lapiMetricsToTable(t table.Writer, stats map[string]map[string]map[string]int) int { // stats: machine -> route -> method -> count // sort keys to keep consistent order when printing machineKeys := []string{} @@ -32,7 +31,7 @@ func lapiMetricsToTable(t *cstable.Table, stats map[string]map[string]map[string machineRow := stats[machine] for routeName, route := range machineRow { for methodName, count := range route { - row := []string{ + row := table.Row{ machine, routeName, methodName, @@ -43,7 +42,7 @@ func lapiMetricsToTable(t *cstable.Table, stats map[string]map[string]map[string row = append(row, "-") } - t.AddRow(row...) + t.AppendRow(row) numRows++ } @@ -53,7 +52,7 @@ func lapiMetricsToTable(t *cstable.Table, stats map[string]map[string]map[string return numRows } -func wlMetricsToTable(t *cstable.Table, stats map[string]map[string]map[string]int, noUnit bool) (int, error) { +func wlMetricsToTable(t table.Writer, stats map[string]map[string]map[string]int, noUnit bool) (int, error) { if t == nil { return 0, ErrNilTable } @@ -62,7 +61,7 @@ func wlMetricsToTable(t *cstable.Table, stats map[string]map[string]map[string]i for _, name := range maptools.SortedKeys(stats) { for _, reason := range maptools.SortedKeys(stats[name]) { - row := []string{ + row := table.Row{ name, reason, "-", @@ -82,7 +81,7 @@ func wlMetricsToTable(t *cstable.Table, stats map[string]map[string]map[string]i } } - t.AddRow(row...) + t.AppendRow(row) numRows++ } @@ -91,7 +90,7 @@ func wlMetricsToTable(t *cstable.Table, stats map[string]map[string]map[string]i return numRows, nil } -func metricsToTable(t *cstable.Table, stats map[string]map[string]int, keys []string, noUnit bool) (int, error) { +func metricsToTable(t table.Writer, stats map[string]map[string]int, keys []string, noUnit bool) (int, error) { if t == nil { return 0, ErrNilTable } @@ -104,9 +103,7 @@ func metricsToTable(t *cstable.Table, stats map[string]map[string]int, keys []st continue } - row := []string{ - alabel, - } + row := table.Row{alabel} for _, sl := range keys { if v, ok := astats[sl]; ok && v != 0 { @@ -116,7 +113,7 @@ func metricsToTable(t *cstable.Table, stats map[string]map[string]int, keys []st } } - t.AddRow(row...) + t.AppendRow(row) numRows++ } diff --git a/cmd/crowdsec-cli/cstable/cstable.go b/cmd/crowdsec-cli/cstable/cstable.go index f7ddb604d84..85ba491f4e8 100644 --- a/cmd/crowdsec-cli/cstable/cstable.go +++ b/cmd/crowdsec-cli/cstable/cstable.go @@ -13,18 +13,6 @@ import ( isatty "github.com/mattn/go-isatty" ) -func RenderTitle(out io.Writer, title string) { - if out == nil { - panic("renderTableTitle: out is nil") - } - - if title == "" { - return - } - - fmt.Fprintln(out, title) -} - func shouldWeColorize(wantColor string) bool { switch wantColor { case "yes": diff --git a/cmd/crowdsec-cli/machines.go b/cmd/crowdsec-cli/machines.go index 3489550b4fd..e50a17f02dc 100644 --- a/cmd/crowdsec-cli/machines.go +++ b/cmd/crowdsec-cli/machines.go @@ -184,7 +184,7 @@ func (cli *cliMachines) inspectHubHuman(out io.Writer, machine *ent.Machine) { t.AppendHeader(table.Row{"Name", "Status", "Version"}) t.SetTitle(itemType) t.AppendRows(rows) - fmt.Fprintln(out, t.Render()) + io.WriteString(out, t.Render() + "\n") } } @@ -206,7 +206,7 @@ func (cli *cliMachines) listHuman(out io.Writer, machines ent.Machines) { t.AppendRow(table.Row{m.MachineId, m.IpAddress, m.UpdatedAt.Format(time.RFC3339), validated, m.Version, getOSNameAndVersion(m), m.AuthType, hb}) } - fmt.Fprintln(out, t.Render()) + io.WriteString(out, t.Render() + "\n") } // machineInfo contains only the data we want for inspect/list: no hub status, scenarios, edges, etc. @@ -658,7 +658,7 @@ func (cli *cliMachines) inspectHuman(out io.Writer, machine *ent.Machine) { t.AppendRow(table.Row{"Collections", coll.Name}) } - fmt.Fprintln(out, t.Render()) + io.WriteString(out, t.Render() + "\n") } func (cli *cliMachines) inspect(machine *ent.Machine) error { diff --git a/cmd/crowdsec-cli/utils_table.go b/cmd/crowdsec-cli/utils_table.go index c0043e47ee3..6df16cd85f5 100644 --- a/cmd/crowdsec-cli/utils_table.go +++ b/cmd/crowdsec-cli/utils_table.go @@ -5,7 +5,7 @@ import ( "io" "strconv" - "github.com/jedib0t/go-pretty/v6/text" + "github.com/jedib0t/go-pretty/v6/table" "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/cstable" "github.com/crowdsecurity/crowdsec/pkg/cwhub" @@ -13,31 +13,29 @@ import ( ) func listHubItemTable(out io.Writer, wantColor string, title string, items []*cwhub.Item) { - t := cstable.NewLight(out, wantColor) - t.SetHeaders("Name", fmt.Sprintf("%v Status", emoji.Package), "Version", "Local Path") - t.SetHeaderAlignment(text.AlignLeft, text.AlignLeft, text.AlignLeft, text.AlignLeft) - t.SetAlignment(text.AlignLeft, text.AlignLeft, text.AlignLeft, text.AlignLeft) + t := cstable.NewLight(out, wantColor).Writer + t.AppendHeader(table.Row{"Name", fmt.Sprintf("%v Status", emoji.Package), "Version", "Local Path"}) for _, item := range items { status := fmt.Sprintf("%v %s", item.State.Emoji(), item.State.Text()) - t.AddRow(item.Name, status, item.State.LocalVersion, item.State.LocalPath) + t.AppendRow(table.Row{item.Name, status, item.State.LocalVersion, item.State.LocalPath}) } - cstable.RenderTitle(out, title) - t.Render() + io.WriteString(out, title+"\n") + io.WriteString(out, t.Render() + "\n") } func appsecMetricsTable(out io.Writer, wantColor string, itemName string, metrics map[string]int) { - t := cstable.NewLight(out, wantColor) - t.SetHeaders("Inband Hits", "Outband Hits") + t := cstable.NewLight(out, wantColor).Writer + t.AppendHeader(table.Row{"Inband Hits", "Outband Hits"}) - t.AddRow( + t.AppendRow(table.Row{ strconv.Itoa(metrics["inband_hits"]), strconv.Itoa(metrics["outband_hits"]), - ) + }) - cstable.RenderTitle(out, fmt.Sprintf("\n - (AppSec Rule) %s:", itemName)) - t.Render() + io.WriteString(out, fmt.Sprintf("\n - (AppSec Rule) %s:\n", itemName)) + io.WriteString(out, t.Render() + "\n") } func scenarioMetricsTable(out io.Writer, wantColor string, itemName string, metrics map[string]int) { @@ -45,43 +43,43 @@ func scenarioMetricsTable(out io.Writer, wantColor string, itemName string, metr return } - t := cstable.New(out, wantColor) - t.SetHeaders("Current Count", "Overflows", "Instantiated", "Poured", "Expired") + t := cstable.New(out, wantColor).Writer + t.AppendHeader(table.Row{"Current Count", "Overflows", "Instantiated", "Poured", "Expired"}) - t.AddRow( + t.AppendRow(table.Row{ strconv.Itoa(metrics["curr_count"]), strconv.Itoa(metrics["overflow"]), strconv.Itoa(metrics["instantiation"]), strconv.Itoa(metrics["pour"]), strconv.Itoa(metrics["underflow"]), - ) + }) - cstable.RenderTitle(out, fmt.Sprintf("\n - (Scenario) %s:", itemName)) - t.Render() + io.WriteString(out, fmt.Sprintf("\n - (Scenario) %s:\n", itemName)) + io.WriteString(out, t.Render() + "\n") } func parserMetricsTable(out io.Writer, wantColor string, itemName string, metrics map[string]map[string]int) { - t := cstable.New(out, wantColor) - t.SetHeaders("Parsers", "Hits", "Parsed", "Unparsed") + t := cstable.New(out, wantColor).Writer + t.AppendHeader(table.Row{"Parsers", "Hits", "Parsed", "Unparsed"}) // don't show table if no hits showTable := false for source, stats := range metrics { if stats["hits"] > 0 { - t.AddRow( + t.AppendRow(table.Row{ source, strconv.Itoa(stats["hits"]), strconv.Itoa(stats["parsed"]), strconv.Itoa(stats["unparsed"]), - ) + }) showTable = true } } if showTable { - cstable.RenderTitle(out, fmt.Sprintf("\n - (Parser) %s:", itemName)) - t.Render() + io.WriteString(out, fmt.Sprintf("\n - (Parser) %s:\n", itemName)) + io.WriteString(out, t.Render() + "\n") } } From aabb5cca890f32c0d4fabf8c630fa3a1872c8f71 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Wed, 17 Jul 2024 12:48:07 +0200 Subject: [PATCH 226/581] CI: use go 1.22.5 (#3128) --- .github/workflows/bats-hub.yml | 2 +- .github/workflows/bats-mysql.yml | 2 +- .github/workflows/bats-postgres.yml | 2 +- .github/workflows/bats-sqlite-coverage.yml | 2 +- .github/workflows/ci-windows-build-msi.yml | 2 +- .github/workflows/codeql-analysis.yml | 2 +- .github/workflows/go-tests-windows.yml | 2 +- .github/workflows/go-tests.yml | 2 +- .github/workflows/publish-tarball-release.yml | 2 +- Dockerfile | 2 +- Dockerfile.debian | 2 +- azure-pipelines.yml | 2 +- 12 files changed, 12 insertions(+), 12 deletions(-) diff --git a/.github/workflows/bats-hub.yml b/.github/workflows/bats-hub.yml index e537aee8d1b..be6fc0b8a73 100644 --- a/.github/workflows/bats-hub.yml +++ b/.github/workflows/bats-hub.yml @@ -33,7 +33,7 @@ jobs: - name: "Set up Go" uses: actions/setup-go@v5 with: - go-version: "1.22.4" + go-version: "1.22.5" - name: "Install bats dependencies" env: diff --git a/.github/workflows/bats-mysql.yml b/.github/workflows/bats-mysql.yml index 659102ced56..3fcc5a6a209 100644 --- a/.github/workflows/bats-mysql.yml +++ b/.github/workflows/bats-mysql.yml @@ -36,7 +36,7 @@ jobs: - name: "Set up Go" uses: actions/setup-go@v5 with: - go-version: "1.22.4" + go-version: "1.22.5" - name: "Install bats dependencies" env: diff --git a/.github/workflows/bats-postgres.yml b/.github/workflows/bats-postgres.yml index 1d12a371430..76342a47603 100644 --- a/.github/workflows/bats-postgres.yml +++ b/.github/workflows/bats-postgres.yml @@ -45,7 +45,7 @@ jobs: - name: "Set up Go" uses: actions/setup-go@v5 with: - go-version: "1.22.4" + go-version: "1.22.5" - name: "Install bats dependencies" env: diff --git a/.github/workflows/bats-sqlite-coverage.yml b/.github/workflows/bats-sqlite-coverage.yml index e72973e1b1f..87e60b071d5 100644 --- a/.github/workflows/bats-sqlite-coverage.yml +++ b/.github/workflows/bats-sqlite-coverage.yml @@ -31,7 +31,7 @@ jobs: - name: "Set up Go" uses: actions/setup-go@v5 with: - go-version: "1.22.4" + go-version: "1.22.5" - name: "Install bats dependencies" env: diff --git a/.github/workflows/ci-windows-build-msi.yml b/.github/workflows/ci-windows-build-msi.yml index d8011b16247..9860a171233 100644 --- a/.github/workflows/ci-windows-build-msi.yml +++ b/.github/workflows/ci-windows-build-msi.yml @@ -35,7 +35,7 @@ jobs: - name: "Set up Go" uses: actions/setup-go@v5 with: - go-version: "1.22.4" + go-version: "1.22.5" - name: Build run: make windows_installer BUILD_RE2_WASM=1 diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml index ed3f7b4a4c2..982ecedbb25 100644 --- a/.github/workflows/codeql-analysis.yml +++ b/.github/workflows/codeql-analysis.yml @@ -52,7 +52,7 @@ jobs: - name: "Set up Go" uses: actions/setup-go@v5 with: - go-version: "1.22.4" + go-version: "1.22.5" cache-dependency-path: "**/go.sum" # Initializes the CodeQL tools for scanning. diff --git a/.github/workflows/go-tests-windows.yml b/.github/workflows/go-tests-windows.yml index 5c2ef0e0b0d..4580061e7d2 100644 --- a/.github/workflows/go-tests-windows.yml +++ b/.github/workflows/go-tests-windows.yml @@ -34,7 +34,7 @@ jobs: - name: "Set up Go" uses: actions/setup-go@v5 with: - go-version: "1.22.4" + go-version: "1.22.5" - name: Build run: | diff --git a/.github/workflows/go-tests.yml b/.github/workflows/go-tests.yml index 7f192a85b8f..01844ddddc7 100644 --- a/.github/workflows/go-tests.yml +++ b/.github/workflows/go-tests.yml @@ -126,7 +126,7 @@ jobs: - name: "Set up Go" uses: actions/setup-go@v5 with: - go-version: "1.22.4" + go-version: "1.22.5" - name: Create localstack streams run: | diff --git a/.github/workflows/publish-tarball-release.yml b/.github/workflows/publish-tarball-release.yml index a5ce1b37df3..72211cb5180 100644 --- a/.github/workflows/publish-tarball-release.yml +++ b/.github/workflows/publish-tarball-release.yml @@ -25,7 +25,7 @@ jobs: - name: "Set up Go" uses: actions/setup-go@v5 with: - go-version: "1.22.4" + go-version: "1.22.5" - name: Build the binaries run: | diff --git a/Dockerfile b/Dockerfile index 45443e971ac..7976c61e256 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,5 +1,5 @@ # vim: set ft=dockerfile: -FROM golang:1.22.4-alpine3.20 AS build +FROM golang:1.22.5-alpine3.20 AS build ARG BUILD_VERSION diff --git a/Dockerfile.debian b/Dockerfile.debian index 6fc5a15f766..56836163c96 100644 --- a/Dockerfile.debian +++ b/Dockerfile.debian @@ -1,5 +1,5 @@ # vim: set ft=dockerfile: -FROM golang:1.22.4-bookworm AS build +FROM golang:1.22.5-bookworm AS build ARG BUILD_VERSION diff --git a/azure-pipelines.yml b/azure-pipelines.yml index 77015d18ef3..d73637fff6b 100644 --- a/azure-pipelines.yml +++ b/azure-pipelines.yml @@ -21,7 +21,7 @@ stages: - task: GoTool@0 displayName: "Install Go" inputs: - version: '1.22.4' + version: '1.22.5' - pwsh: | choco install -y make From 443ec37e47942faf23822842cee8458f5e3486aa Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Thu, 18 Jul 2024 09:39:57 +0200 Subject: [PATCH 227/581] lint: dockerfiles (#3138) --- Dockerfile | 8 ++++---- Dockerfile.debian | 10 +++++----- 2 files changed, 9 insertions(+), 9 deletions(-) diff --git a/Dockerfile b/Dockerfile index 7976c61e256..b9569065137 100644 --- a/Dockerfile +++ b/Dockerfile @@ -11,7 +11,7 @@ ENV BUILD_VERSION=${BUILD_VERSION} # wizard.sh requires GNU coreutils RUN apk add --no-cache git g++ gcc libc-dev make bash gettext binutils-gold coreutils pkgconfig && \ - wget https://github.com/google/re2/archive/refs/tags/${RE2_VERSION}.tar.gz && \ + wget -q https://github.com/google/re2/archive/refs/tags/${RE2_VERSION}.tar.gz && \ tar -xzf ${RE2_VERSION}.tar.gz && \ cd re2-${RE2_VERSION} && \ make install && \ @@ -32,7 +32,7 @@ RUN make clean release DOCKER_BUILD=1 BUILD_STATIC=1 CGO_CFLAGS="-D_LARGEFILE64_ # In case we need to remove agents here.. # cscli machines list -o json | yq '.[].machineId' | xargs -r cscli machines delete -FROM alpine:latest as slim +FROM alpine:latest AS slim RUN apk add --no-cache --repository=http://dl-cdn.alpinelinux.org/alpine/edge/community tzdata bash rsync && \ mkdir -p /staging/etc/crowdsec && \ @@ -47,9 +47,9 @@ COPY --from=build /go/src/crowdsec/docker/config.yaml /staging/etc/crowdsec/conf COPY --from=build /var/lib/crowdsec /staging/var/lib/crowdsec RUN yq -n '.url="http://0.0.0.0:8080"' | install -m 0600 /dev/stdin /staging/etc/crowdsec/local_api_credentials.yaml -ENTRYPOINT /bin/bash /docker_start.sh +ENTRYPOINT ["/bin/bash", "/docker_start.sh"] -FROM slim as full +FROM slim AS full # Due to the wizard using cp -n, we have to copy the config files directly from the source as -n does not exist in busybox cp # The files are here for reference, as users will need to mount a new version to be actually able to use notifications diff --git a/Dockerfile.debian b/Dockerfile.debian index 56836163c96..a256cfa9ab3 100644 --- a/Dockerfile.debian +++ b/Dockerfile.debian @@ -37,7 +37,7 @@ RUN make clean release DOCKER_BUILD=1 BUILD_STATIC=1 && \ # In case we need to remove agents here.. # cscli machines list -o json | yq '.[].machineId' | xargs -r cscli machines delete -FROM debian:bookworm-slim as slim +FROM debian:bookworm-slim AS slim ENV DEBIAN_FRONTEND=noninteractive ENV DEBCONF_NOWARNINGS="yes" @@ -63,9 +63,9 @@ COPY --from=build /go/src/crowdsec/docker/config.yaml /staging/etc/crowdsec/conf RUN yq -n '.url="http://0.0.0.0:8080"' | install -m 0600 /dev/stdin /staging/etc/crowdsec/local_api_credentials.yaml && \ yq eval -i ".plugin_config.group = \"nogroup\"" /staging/etc/crowdsec/config.yaml -ENTRYPOINT /bin/bash docker_start.sh +ENTRYPOINT ["/bin/bash", "docker_start.sh"] -FROM slim as plugins +FROM slim AS plugins # Due to the wizard using cp -n, we have to copy the config files directly from the source as -n does not exist in busybox cp # The files are here for reference, as users will need to mount a new version to be actually able to use notifications @@ -79,10 +79,10 @@ COPY --from=build \ COPY --from=build /usr/local/lib/crowdsec/plugins /usr/local/lib/crowdsec/plugins -FROM slim as geoip +FROM slim AS geoip COPY --from=build /var/lib/crowdsec /staging/var/lib/crowdsec -FROM plugins as full +FROM plugins AS full COPY --from=build /var/lib/crowdsec /staging/var/lib/crowdsec From 8f1abc300d6315ec3785cf64e5fb8a914e4c4126 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Thu, 18 Jul 2024 09:58:45 +0200 Subject: [PATCH 228/581] docker: make sure the sqlite db is present before changing GID (#3140) --- docker/docker_start.sh | 2 ++ 1 file changed, 2 insertions(+) diff --git a/docker/docker_start.sh b/docker/docker_start.sh index a3e9226bced..4db421e7c87 100755 --- a/docker/docker_start.sh +++ b/docker/docker_start.sh @@ -333,6 +333,8 @@ fi # crowdsec sqlite database permissions if [ "$GID" != "" ]; then if istrue "$(conf_get '.db_config.type == "sqlite"')"; then + # force the creation of the db file(s) + cscli machines inspect create-db --error >/dev/null 2>&1 || : # don't fail if the db is not there yet if chown -f ":$GID" "$(conf_get '.db_config.db_path')" 2>/dev/null; then echo "sqlite database permissions updated" From 35f97d4855943fdaab5c0528a2c5d94f16b758cf Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Thu, 18 Jul 2024 11:13:18 +0200 Subject: [PATCH 229/581] bats: curl helpers to mock log processors and bouncers (#3141) --- test/bats/10_bouncers.bats | 18 +++++++++++++ test/lib/db/instance-mysql | 2 +- test/lib/setup_file.sh | 55 +++++++++++++++++++++++++++++++++----- 3 files changed, 68 insertions(+), 7 deletions(-) diff --git a/test/bats/10_bouncers.bats b/test/bats/10_bouncers.bats index b6efbd06650..a89c9f9dd65 100644 --- a/test/bats/10_bouncers.bats +++ b/test/bats/10_bouncers.bats @@ -42,6 +42,24 @@ teardown() { assert_json '[]' } +@test "bouncer api-key auth" { + rune -0 cscli bouncers add ciTestBouncer --key "goodkey" + + # connect with good credentials + rune -0 curl-tcp "/v1/decisions" -sS --fail-with-body -H "X-Api-Key: goodkey" + assert_output null + + # connect with bad credentials + rune -22 curl-tcp "/v1/decisions" -sS --fail-with-body -H "X-Api-Key: badkey" + assert_stderr --partial 'error: 403' + assert_json '{message:"access forbidden"}' + + # connect with no credentials + rune -22 curl-tcp "/v1/decisions" -sS --fail-with-body + assert_stderr --partial 'error: 403' + assert_json '{message:"access forbidden"}' +} + @test "bouncers delete has autocompletion" { rune -0 cscli bouncers add foo1 rune -0 cscli bouncers add foo2 diff --git a/test/lib/db/instance-mysql b/test/lib/db/instance-mysql index 6b40c84acba..df38f09761f 100755 --- a/test/lib/db/instance-mysql +++ b/test/lib/db/instance-mysql @@ -21,7 +21,7 @@ about() { check_requirements() { if ! command -v mysql >/dev/null; then - die "missing required program 'mysql' as a mysql client (package mariadb-client-core-10.6 on debian like system)" + die "missing required program 'mysql' as a mysql client (package mariadb-client on debian like system)" fi } diff --git a/test/lib/setup_file.sh b/test/lib/setup_file.sh index 65c600d1769..7cbced01ef1 100755 --- a/test/lib/setup_file.sh +++ b/test/lib/setup_file.sh @@ -282,18 +282,61 @@ rune() { } export -f rune -# call the lapi through unix socket with an API_KEY (authenticates as a bouncer) -# after $1, pass throught extra arguments to curl -curl-with-key() { +# call the lapi through unix socket +# the path (and query string) must be the first parameter, the others will be passed to curl +curl-socket() { [[ -z "$1" ]] && { fail "${FUNCNAME[0]}: missing path"; } local path=$1 shift - [[ -z "$API_KEY" ]] && { fail "${FUNCNAME[0]}: missing API_KEY"; } local socket socket=$(config_get '.api.server.listen_socket') [[ -z "$socket" ]] && { fail "${FUNCNAME[0]}: missing .api.server.listen_socket"; } - # curl needs a fake hostname when using a unix socket - curl -sS --fail-with-body -H "X-Api-Key: $API_KEY" --unix-socket "$socket" "http://lapi$path" "$@" + curl --unix-socket "$socket" "http://lapi$path" "$@" +} +export -f curl-socket + +# call the lapi through tcp +# the path (and query string) must be the first parameter, the others will be passed to curl +curl-tcp() { + [[ -z "$1" ]] && { fail "${FUNCNAME[0]}: missing path"; } + local path=$1 + shift + local cred + cred=$(config_get .api.client.credentials_path) + local base_url + base_url="$(yq '.url' < "$cred")" + curl "$base_url$path" "$@" +} +export -f curl-tcp + +# call the lapi through unix socket with an API_KEY (authenticates as a bouncer) +# after $1, pass throught extra arguments to curl +curl-with-key() { + [[ -z "$API_KEY" ]] && { fail "${FUNCNAME[0]}: missing API_KEY"; } + curl-tcp "$@" -sS --fail-with-body -H "X-Api-Key: $API_KEY" } export -f curl-with-key + +# call the lapi through unix socket with a TOKEN (authenticates as a machine) +# after $1, pass throught extra arguments to curl +curl-with-token() { + [[ -z "$TOKEN" ]] && { fail "${FUNCNAME[0]}: missing TOKEN"; } + # curl needs a fake hostname when using a unix socket + curl-tcp "$@" -sS --fail-with-body -H "Authorization: Bearer $TOKEN" +} +export -f curl-with-token + +# as a log processor, connect to lapi and get a token +lp-get-token() { + local cred + cred=$(config_get .api.client.credentials_path) + local resp + resp=$(yq -oj -I0 '{"machine_id":.login,"password":.password}' < "$cred" | curl-socket '/v1/watchers/login' -s -X POST --data-binary @-) + if [[ "$(yq -e '.code' <<<"$resp")" != 200 ]]; then + echo "login_lp: failed to login" >&3 + return 1 + fi + echo "$resp" | yq -r '.token' +} +export -f lp-get-token From 5b4f924d5f2855813a1205fee740d677d7129885 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Mon, 22 Jul 2024 10:54:39 +0200 Subject: [PATCH 230/581] allow .index.json to embed item content (#3145) --- pkg/cwhub/item.go | 1 + pkg/cwhub/itemupgrade.go | 50 ++++++++++++++++++++++++++++++---- pkg/cwhub/testdata/index1.json | 8 +----- pkg/cwhub/testdata/index2.json | 9 +----- test/bats/20_hub.bats | 2 +- test/lib/setup_file.sh | 2 +- 6 files changed, 49 insertions(+), 23 deletions(-) diff --git a/pkg/cwhub/item.go b/pkg/cwhub/item.go index 4249a20e134..32d1acf94ff 100644 --- a/pkg/cwhub/item.go +++ b/pkg/cwhub/item.go @@ -109,6 +109,7 @@ type Item struct { Name string `json:"name,omitempty" yaml:"name,omitempty"` // usually "author/name" FileName string `json:"file_name,omitempty" yaml:"file_name,omitempty"` // eg. apache2-logs.yaml Description string `json:"description,omitempty" yaml:"description,omitempty"` + Content string `json:"content,omitempty" yaml:"-"` Author string `json:"author,omitempty" yaml:"author,omitempty"` References []string `json:"references,omitempty" yaml:"references,omitempty"` diff --git a/pkg/cwhub/itemupgrade.go b/pkg/cwhub/itemupgrade.go index d74544ddaed..1aebb3caf29 100644 --- a/pkg/cwhub/itemupgrade.go +++ b/pkg/cwhub/itemupgrade.go @@ -4,9 +4,13 @@ package cwhub import ( "context" + "crypto" + "encoding/base64" + "encoding/hex" "errors" "fmt" "os" + "path/filepath" "github.com/sirupsen/logrus" @@ -110,16 +114,50 @@ func (i *Item) downloadLatest(ctx context.Context, overwrite bool, updateOnly bo // FetchContentTo downloads the last version of the item's YAML file to the specified path. func (i *Item) FetchContentTo(ctx context.Context, destPath string) (bool, string, error) { + wantHash := i.latestHash() + if wantHash == "" { + return false, "", errors.New("latest hash missing from index. The index file is invalid, please run 'cscli hub update' and try again") + } + + // Use the embedded content if available + if i.Content != "" { + // the content was historically base64 encoded + content, err := base64.StdEncoding.DecodeString(i.Content) + if err != nil { + content = []byte(i.Content) + } + + dir := filepath.Dir(destPath) + + if err := os.MkdirAll(dir, 0o755); err != nil { + return false, "", fmt.Errorf("while creating %s: %w", dir, err) + } + + // check sha256 + hash := crypto.SHA256.New() + if _, err := hash.Write(content); err != nil { + return false, "", fmt.Errorf("while hashing %s: %w", i.Name, err) + } + + gotHash := hex.EncodeToString(hash.Sum(nil)) + if gotHash != wantHash { + return false, "", fmt.Errorf("hash mismatch: expected %s, got %s. The index file is invalid, please run 'cscli hub update' and try again", wantHash, gotHash) + } + + if err := os.WriteFile(destPath, content, 0o600); err != nil { + return false, "", fmt.Errorf("while writing %s: %w", destPath, err) + } + + i.hub.logger.Debugf("Wrote %s content from .index.json to %s", i.Name, destPath) + + return true, fmt.Sprintf("(embedded in %s)", i.hub.local.HubIndexFile), nil + } + url, err := i.hub.remote.urlTo(i.RemotePath) if err != nil { return false, "", fmt.Errorf("failed to build request: %w", err) } - wantHash := i.latestHash() - if wantHash == "" { - return false, "", errors.New("latest hash missing from index") - } - d := downloader. New(). WithHTTPClient(hubClient). @@ -167,7 +205,7 @@ func (i *Item) download(ctx context.Context, overwrite bool) (bool, error) { downloaded, _, err := i.FetchContentTo(ctx, finalPath) if err != nil { - return false, fmt.Errorf("while downloading %s: %w", i.Name, err) + return false, err } if downloaded { diff --git a/pkg/cwhub/testdata/index1.json b/pkg/cwhub/testdata/index1.json index a7e6ef6153b..59548bda379 100644 --- a/pkg/cwhub/testdata/index1.json +++ b/pkg/cwhub/testdata/index1.json @@ -10,7 +10,6 @@ } }, "long_description": "bG9uZyBkZXNjcmlwdGlvbgo=", - "content": "bG9uZyBkZXNjcmlwdGlvbgo=", "description": "foobar collection : foobar", "author": "crowdsecurity", "labels": null, @@ -34,7 +33,6 @@ } }, "long_description": "bG9uZyBkZXNjcmlwdGlvbgo=", - "content": "bG9uZyBkZXNjcmlwdGlvbgo=", "description": "test_collection : foobar", "author": "crowdsecurity", "labels": null, @@ -52,7 +50,6 @@ } }, "long_description": "bG9uZyBkZXNjcmlwdGlvbgo=", - "content": "bG9uZyBkZXNjcmlwdGlvbgo=", "description": "foobar collection : foobar", "author": "crowdsecurity", "labels": null, @@ -73,7 +70,6 @@ } }, "long_description": "bG9uZyBkZXNjcmlwdGlvbgo=", - "content": "bG9uZyBkZXNjcmlwdGlvbgo=", "description": "A foobar parser", "author": "crowdsecurity", "labels": null @@ -89,7 +85,6 @@ } }, "long_description": "bG9uZyBkZXNjcmlwdGlvbgo=", - "content": "bG9uZyBkZXNjcmlwdGlvbgo=", "description": "A foobar parser", "author": "crowdsecurity", "labels": null @@ -107,7 +102,6 @@ } }, "long_description": "bG9uZyBkZXNjcmlwdGlvbgo=", - "content": "bG9uZyBkZXNjcmlwdGlvbgo=", "description": "a foobar scenario", "author": "crowdsecurity", "labels": { @@ -118,4 +112,4 @@ } } } -} \ No newline at end of file +} diff --git a/pkg/cwhub/testdata/index2.json b/pkg/cwhub/testdata/index2.json index 7f97ebf2308..41c4ccba83a 100644 --- a/pkg/cwhub/testdata/index2.json +++ b/pkg/cwhub/testdata/index2.json @@ -10,7 +10,6 @@ } }, "long_description": "bG9uZyBkZXNjcmlwdGlvbgo=", - "content": "bG9uZyBkZXNjcmlwdGlvbgo=", "description": "foobar collection : foobar", "author": "crowdsecurity", "labels": null, @@ -38,7 +37,6 @@ } }, "long_description": "bG9uZyBkZXNjcmlwdGlvbgo=", - "content": "bG9uZyBkZXNjcmlwdGlvbgo=", "description": "test_collection : foobar", "author": "crowdsecurity", "labels": null, @@ -57,7 +55,6 @@ } }, "long_description": "bG9uZyBkZXNjcmlwdGlvbgo=", - "content": "bG9uZyBkZXNjcmlwdGlvbgo=", "description": "foobar collection : foobar", "author": "crowdsecurity", "labels": null, @@ -78,7 +75,6 @@ } }, "long_description": "bG9uZyBkZXNjcmlwdGlvbgo=", - "content": "bG9uZyBkZXNjcmlwdGlvbgo=", "description": "A foobar parser", "author": "crowdsecurity", "labels": null @@ -94,7 +90,6 @@ } }, "long_description": "bG9uZyBkZXNjcmlwdGlvbgo=", - "content": "bG9uZyBkZXNjcmlwdGlvbgo=", "description": "A foobar parser", "author": "crowdsecurity", "labels": null @@ -112,7 +107,6 @@ } }, "long_description": "bG9uZyBkZXNjcmlwdGlvbgo=", - "content": "bG9uZyBkZXNjcmlwdGlvbgo=", "description": "a foobar scenario", "author": "crowdsecurity", "labels": { @@ -132,7 +126,6 @@ } }, "long_description": "bG9uZyBkZXNjcmlwdGlvbgo=", - "content": "bG9uZyBkZXNjcmlwdGlvbgo=", "description": "a foobar scenario", "author": "crowdsecurity", "labels": { @@ -143,4 +136,4 @@ } } } -} \ No newline at end of file +} diff --git a/test/bats/20_hub.bats b/test/bats/20_hub.bats index 0d9f29b2418..b8fa1e9efca 100644 --- a/test/bats/20_hub.bats +++ b/test/bats/20_hub.bats @@ -76,7 +76,7 @@ teardown() { assert_stderr --partial "invalid hub item appsec-rules:crowdsecurity/vpatch-laravel-debug-mode: latest version missing from index" rune -1 cscli appsec-rules install crowdsecurity/vpatch-laravel-debug-mode --force - assert_stderr --partial "error while installing 'crowdsecurity/vpatch-laravel-debug-mode': while downloading crowdsecurity/vpatch-laravel-debug-mode: latest hash missing from index" + assert_stderr --partial "error while installing 'crowdsecurity/vpatch-laravel-debug-mode': latest hash missing from index. The index file is invalid, please run 'cscli hub update' and try again" } @test "missing reference in hub index" { diff --git a/test/lib/setup_file.sh b/test/lib/setup_file.sh index 7cbced01ef1..39a084596e2 100755 --- a/test/lib/setup_file.sh +++ b/test/lib/setup_file.sh @@ -265,7 +265,7 @@ hub_strip_index() { local INDEX INDEX=$(config_get .config_paths.index_path) local hub_min - hub_min=$(jq <"$INDEX" 'del(..|.content?) | del(..|.long_description?) | del(..|.deprecated?) | del (..|.labels?)') + hub_min=$(jq <"$INDEX" 'del(..|.long_description?) | del(..|.deprecated?) | del (..|.labels?)') echo "$hub_min" >"$INDEX" } export -f hub_strip_index From 30c0d8997d24e7b8323ee54ea1541cae10ddc108 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Mon, 22 Jul 2024 12:12:27 +0200 Subject: [PATCH 231/581] CI: update action versions (#3143) --- .github/workflows/docker-tests.yml | 4 ++-- .github/workflows/go-tests-windows.yml | 2 +- .github/workflows/go-tests.yml | 2 +- .github/workflows/publish-docker.yml | 6 +++--- 4 files changed, 7 insertions(+), 7 deletions(-) diff --git a/.github/workflows/docker-tests.yml b/.github/workflows/docker-tests.yml index d3ae4f90d79..228a0829984 100644 --- a/.github/workflows/docker-tests.yml +++ b/.github/workflows/docker-tests.yml @@ -35,10 +35,10 @@ jobs: - name: Set up Docker Buildx uses: docker/setup-buildx-action@v3 with: - config: .github/buildkit.toml + buildkitd-config: .github/buildkit.toml - name: "Build image" - uses: docker/build-push-action@v5 + uses: docker/build-push-action@v6 with: context: . file: ./Dockerfile${{ matrix.flavor == 'debian' && '.debian' || '' }} diff --git a/.github/workflows/go-tests-windows.yml b/.github/workflows/go-tests-windows.yml index 4580061e7d2..cfea128acf7 100644 --- a/.github/workflows/go-tests-windows.yml +++ b/.github/workflows/go-tests-windows.yml @@ -55,7 +55,7 @@ jobs: token: ${{ secrets.CODECOV_TOKEN }} - name: golangci-lint - uses: golangci/golangci-lint-action@v4 + uses: golangci/golangci-lint-action@v6 with: version: v1.59 args: --issues-exit-code=1 --timeout 10m diff --git a/.github/workflows/go-tests.yml b/.github/workflows/go-tests.yml index 01844ddddc7..ab45bd88717 100644 --- a/.github/workflows/go-tests.yml +++ b/.github/workflows/go-tests.yml @@ -156,7 +156,7 @@ jobs: token: ${{ secrets.CODECOV_TOKEN }} - name: golangci-lint - uses: golangci/golangci-lint-action@v4 + uses: golangci/golangci-lint-action@v6 with: version: v1.59 args: --issues-exit-code=1 --timeout 10m diff --git a/.github/workflows/publish-docker.yml b/.github/workflows/publish-docker.yml index 005db0cc9d1..11b4401c6da 100644 --- a/.github/workflows/publish-docker.yml +++ b/.github/workflows/publish-docker.yml @@ -47,7 +47,7 @@ jobs: - name: Set up Docker Buildx uses: docker/setup-buildx-action@v3 with: - config: .github/buildkit.toml + buildkitd-config: .github/buildkit.toml - name: Login to DockerHub uses: docker/login-action@v3 @@ -93,7 +93,7 @@ jobs: - name: Build and push image (slim) if: ${{ inputs.slim }} - uses: docker/build-push-action@v5 + uses: docker/build-push-action@v6 with: context: . file: ./Dockerfile${{ inputs.debian && '.debian' || '' }} @@ -109,7 +109,7 @@ jobs: BUILD_VERSION=${{ inputs.crowdsec_version }} - name: Build and push image (full) - uses: docker/build-push-action@v5 + uses: docker/build-push-action@v6 with: context: . file: ./Dockerfile${{ inputs.debian && '.debian' || '' }} From a3d7900b5f625163bf1b2e4a3dada969cefab4fe Mon Sep 17 00:00:00 2001 From: blotus Date: Mon, 22 Jul 2024 12:14:46 +0200 Subject: [PATCH 232/581] update expr (#3144) --- .github/workflows/go-tests-windows.yml | 2 +- Makefile | 7 +- cmd/crowdsec-cli/config_show.go | 2 +- go.mod | 1 + go.sum | 2 + pkg/acquisition/acquisition.go | 4 +- pkg/alertcontext/alertcontext.go | 4 +- pkg/appsec/appsec.go | 4 +- pkg/csprofiles/csprofiles.go | 4 +- pkg/exprhelpers/debugger.go | 144 +++++++++---------------- pkg/exprhelpers/debugger_test.go | 4 +- pkg/exprhelpers/exprlib_test.go | 2 +- pkg/exprhelpers/helpers.go | 2 +- pkg/exprhelpers/jsonextract_test.go | 2 +- pkg/hubtest/parser_assert.go | 2 +- pkg/hubtest/scenario_assert.go | 2 +- pkg/leakybucket/bayesian.go | 4 +- pkg/leakybucket/conditional.go | 4 +- pkg/leakybucket/manager_load.go | 4 +- pkg/leakybucket/overflow_filter.go | 4 +- pkg/leakybucket/reset_filter.go | 4 +- pkg/leakybucket/uniq.go | 4 +- pkg/parser/grok_pattern.go | 2 +- pkg/parser/node.go | 5 +- pkg/parser/whitelist.go | 4 +- pkg/setup/detect.go | 2 +- pkg/types/event.go | 2 +- 27 files changed, 93 insertions(+), 134 deletions(-) diff --git a/.github/workflows/go-tests-windows.yml b/.github/workflows/go-tests-windows.yml index cfea128acf7..9d8051821c8 100644 --- a/.github/workflows/go-tests-windows.yml +++ b/.github/workflows/go-tests-windows.yml @@ -43,7 +43,7 @@ jobs: - name: Run tests run: | go install github.com/kyoh86/richgo@v0.3.10 - go test -coverprofile coverage.out -covermode=atomic ./... > out.txt + go test -tags expr_debug -coverprofile coverage.out -covermode=atomic ./... > out.txt if(!$?) { cat out.txt | sed 's/ *coverage:.*of statements in.*//' | richgo testfilter; Exit 1 } cat out.txt | sed 's/ *coverage:.*of statements in.*//' | richgo testfilter diff --git a/Makefile b/Makefile index 97eb7ba0eea..207b5d610f0 100644 --- a/Makefile +++ b/Makefile @@ -77,7 +77,8 @@ ifneq (,$(DOCKER_BUILD)) LD_OPTS_VARS += -X 'github.com/crowdsecurity/go-cs-lib/version.System=docker' endif -GO_TAGS := netgo,osusergo,sqlite_omit_load_extension +#expr_debug tag is required to enable the debug mode in expr +GO_TAGS := netgo,osusergo,sqlite_omit_load_extension,expr_debug # this will be used by Go in the make target, some distributions require it export PKG_CONFIG_PATH:=/usr/local/lib/pkgconfig:$(PKG_CONFIG_PATH) @@ -220,11 +221,11 @@ testenv: .PHONY: test test: testenv ## Run unit tests with localstack - $(GOTEST) $(LD_OPTS) ./... + $(GOTEST) --tags=$(GO_TAGS) $(LD_OPTS) ./... .PHONY: go-acc go-acc: testenv ## Run unit tests with localstack + coverage - go-acc ./... -o coverage.out --ignore database,notifications,protobufs,cwversion,cstest,models -- $(LD_OPTS) + go-acc ./... -o coverage.out --ignore database,notifications,protobufs,cwversion,cstest,models --tags $(GO_TAGS) -- $(LD_OPTS) check_docker: @if ! docker info > /dev/null 2>&1; then \ diff --git a/cmd/crowdsec-cli/config_show.go b/cmd/crowdsec-cli/config_show.go index c7138c98e33..e411f5a322b 100644 --- a/cmd/crowdsec-cli/config_show.go +++ b/cmd/crowdsec-cli/config_show.go @@ -6,7 +6,7 @@ import ( "os" "text/template" - "github.com/antonmedv/expr" + "github.com/expr-lang/expr" "github.com/sanity-io/litter" log "github.com/sirupsen/logrus" "github.com/spf13/cobra" diff --git a/go.mod b/go.mod index 6a2146efc5f..f36bbcd996d 100644 --- a/go.mod +++ b/go.mod @@ -111,6 +111,7 @@ require ( github.com/creack/pty v1.1.18 // indirect github.com/docker/distribution v2.8.2+incompatible // indirect github.com/docker/go-units v0.5.0 // indirect + github.com/expr-lang/expr v1.16.9 // indirect github.com/gabriel-vasile/mimetype v1.4.3 // indirect github.com/gin-contrib/sse v0.1.0 // indirect github.com/go-logr/logr v1.2.4 // indirect diff --git a/go.sum b/go.sum index faca9797341..d4cc2651f0f 100644 --- a/go.sum +++ b/go.sum @@ -127,6 +127,8 @@ github.com/docker/go-units v0.3.3/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDD github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= +github.com/expr-lang/expr v1.16.9 h1:WUAzmR0JNI9JCiF0/ewwHB1gmcGw5wW7nWt8gc6PpCI= +github.com/expr-lang/expr v1.16.9/go.mod h1:8/vRC7+7HBzESEqt5kKpYXxrxkr31SaO8r40VO/1IT4= github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk= github.com/fatih/color v1.16.0 h1:zmkK9Ngbjj+K0yRhTVONQh1p/HknKYSlNT+vZCzyokM= github.com/fatih/color v1.16.0/go.mod h1:fL2Sau1YI5c0pdGEVCbKQbLXB6edEj1ZgiY4NijnWvE= diff --git a/pkg/acquisition/acquisition.go b/pkg/acquisition/acquisition.go index 5e4a663eb9e..634557021f1 100644 --- a/pkg/acquisition/acquisition.go +++ b/pkg/acquisition/acquisition.go @@ -7,8 +7,8 @@ import ( "os" "strings" - "github.com/antonmedv/expr" - "github.com/antonmedv/expr/vm" + "github.com/expr-lang/expr" + "github.com/expr-lang/expr/vm" "github.com/google/uuid" "github.com/prometheus/client_golang/prometheus" log "github.com/sirupsen/logrus" diff --git a/pkg/alertcontext/alertcontext.go b/pkg/alertcontext/alertcontext.go index 9946d694363..c502def32cd 100644 --- a/pkg/alertcontext/alertcontext.go +++ b/pkg/alertcontext/alertcontext.go @@ -6,8 +6,8 @@ import ( "slices" "strconv" - "github.com/antonmedv/expr" - "github.com/antonmedv/expr/vm" + "github.com/expr-lang/expr" + "github.com/expr-lang/expr/vm" log "github.com/sirupsen/logrus" "github.com/crowdsecurity/crowdsec/pkg/exprhelpers" diff --git a/pkg/appsec/appsec.go b/pkg/appsec/appsec.go index e43a2ad6710..7c61f2a8dfd 100644 --- a/pkg/appsec/appsec.go +++ b/pkg/appsec/appsec.go @@ -6,8 +6,8 @@ import ( "os" "regexp" - "github.com/antonmedv/expr" - "github.com/antonmedv/expr/vm" + "github.com/expr-lang/expr" + "github.com/expr-lang/expr/vm" log "github.com/sirupsen/logrus" "gopkg.in/yaml.v2" diff --git a/pkg/csprofiles/csprofiles.go b/pkg/csprofiles/csprofiles.go index 42509eaceae..52cda1ed2e1 100644 --- a/pkg/csprofiles/csprofiles.go +++ b/pkg/csprofiles/csprofiles.go @@ -4,8 +4,8 @@ import ( "fmt" "time" - "github.com/antonmedv/expr" - "github.com/antonmedv/expr/vm" + "github.com/expr-lang/expr" + "github.com/expr-lang/expr/vm" log "github.com/sirupsen/logrus" "github.com/crowdsecurity/crowdsec/pkg/csconfig" diff --git a/pkg/exprhelpers/debugger.go b/pkg/exprhelpers/debugger.go index a2bd489acfa..711aa491078 100644 --- a/pkg/exprhelpers/debugger.go +++ b/pkg/exprhelpers/debugger.go @@ -5,8 +5,9 @@ import ( "strconv" "strings" - "github.com/antonmedv/expr" - "github.com/antonmedv/expr/vm" + "github.com/expr-lang/expr" + "github.com/expr-lang/expr/file" + "github.com/expr-lang/expr/vm" log "github.com/sirupsen/logrus" ) @@ -106,62 +107,30 @@ func (o *OpOutput) String() string { return ret + "" } -func (erp ExprRuntimeDebug) extractCode(ip int, program *vm.Program, parts []string) string { +func (erp ExprRuntimeDebug) extractCode(ip int, program *vm.Program) string { + locations := program.Locations() + src := string(program.Source()) - //log.Tracef("# extracting code for ip %d [%s]", ip, parts[1]) - if program.Locations[ip].Line == 0 { //it seems line is zero when it's not actual code (ie. op push at the beginning) - log.Tracef("zero location ?") - return "" - } - startLine := program.Locations[ip].Line - startColumn := program.Locations[ip].Column - lines := strings.Split(program.Source.Content(), "\n") - - endCol := 0 - endLine := 0 - - for i := ip + 1; i < len(program.Locations); i++ { - if program.Locations[i].Line > startLine || (program.Locations[i].Line == startLine && program.Locations[i].Column > startColumn) { - //we didn't had values yet and it's superior to current one, take it - if endLine == 0 && endCol == 0 { - endLine = program.Locations[i].Line - endCol = program.Locations[i].Column - } - //however, we are looking for the closest upper one - if program.Locations[i].Line < endLine || (program.Locations[i].Line == endLine && program.Locations[i].Column < endCol) { - endLine = program.Locations[i].Line - endCol = program.Locations[i].Column - } + currentInstruction := locations[ip] - } - } - //maybe it was the last instruction ? - if endCol == 0 && endLine == 0 { - endLine = len(lines) - endCol = len(lines[endLine-1]) - } - code_snippet := "" - startLine -= 1 //line count starts at 1 - endLine -= 1 - - for i := startLine; i <= endLine; i++ { - if i == startLine { - if startLine != endLine { - code_snippet += lines[i][startColumn:] - continue + var closest *file.Location + + for i := ip + 1; i < len(locations); i++ { + if locations[i].From > currentInstruction.From { + if closest == nil || locations[i].From < closest.From { + closest = &locations[i] } - code_snippet += lines[i][startColumn:endCol] - break - } - if i == endLine { - code_snippet += lines[i][:endCol] - break } - code_snippet += lines[i] } - log.Tracef("#code extract for ip %d [%s] -> '%s'", ip, parts[1], code_snippet) - return cleanTextForDebug(code_snippet) + var end int + if closest == nil { + end = len(src) + } else { + end = closest.From + } + + return cleanTextForDebug(src[locations[ip].From:end]) } func autoQuote(v any) string { @@ -189,7 +158,7 @@ func (erp ExprRuntimeDebug) ipDebug(ip int, vm *vm.VM, program *vm.Program, part prevIdxOut = IdxOut - 1 currentDepth = outputs[prevIdxOut].CodeDepth if outputs[prevIdxOut].Func && !outputs[prevIdxOut].Finalized { - stack := vm.Stack() + stack := vm.Stack num_items := 1 for i := len(stack) - 1; i >= 0 && num_items > 0; i-- { outputs[prevIdxOut].FuncResults = append(outputs[prevIdxOut].FuncResults, autoQuote(stack[i])) @@ -197,7 +166,7 @@ func (erp ExprRuntimeDebug) ipDebug(ip int, vm *vm.VM, program *vm.Program, part } outputs[prevIdxOut].Finalized = true } else if (outputs[prevIdxOut].Comparison || outputs[prevIdxOut].Condition) && !outputs[prevIdxOut].Finalized { - stack := vm.Stack() + stack := vm.Stack outputs[prevIdxOut].StrConditionResult = fmt.Sprintf("%+v", stack) if val, ok := stack[0].(bool); ok { outputs[prevIdxOut].ConditionResult = new(bool) @@ -207,10 +176,10 @@ func (erp ExprRuntimeDebug) ipDebug(ip int, vm *vm.VM, program *vm.Program, part } } - erp.Logger.Tracef("[STEP %d:%s] (stack:%+v) (parts:%+v) {depth:%d}", ip, parts[1], vm.Stack(), parts, currentDepth) + erp.Logger.Tracef("[STEP %d:%s] (stack:%+v) (parts:%+v) {depth:%d}", ip, parts[1], vm.Stack, parts, currentDepth) out := OpOutput{} out.CodeDepth = currentDepth - out.Code = erp.extractCode(ip, program, parts) + out.Code = erp.extractCode(ip, program) switch parts[1] { case "OpBegin": @@ -221,8 +190,8 @@ func (erp ExprRuntimeDebug) ipDebug(ip int, vm *vm.VM, program *vm.Program, part out.CodeDepth -= IndentStep out.BlockEnd = true //OpEnd can carry value, if it's any/all/count etc. - if len(vm.Stack()) > 0 { - out.StrConditionResult = fmt.Sprintf("%v", vm.Stack()) + if len(vm.Stack) > 0 { + out.StrConditionResult = fmt.Sprintf("%v", vm.Stack) } outputs = append(outputs, out) case "OpNot": @@ -241,7 +210,7 @@ func (erp ExprRuntimeDebug) ipDebug(ip int, vm *vm.VM, program *vm.Program, part out.StrConditionResult = "false" outputs = append(outputs, out) case "OpJumpIfTrue": //OR - stack := vm.Stack() + stack := vm.Stack out.JumpIf = true out.IfTrue = true out.StrConditionResult = fmt.Sprintf("%v", stack[0]) @@ -252,7 +221,7 @@ func (erp ExprRuntimeDebug) ipDebug(ip int, vm *vm.VM, program *vm.Program, part } outputs = append(outputs, out) case "OpJumpIfFalse": //AND - stack := vm.Stack() + stack := vm.Stack out.JumpIf = true out.IfFalse = true out.StrConditionResult = fmt.Sprintf("%v", stack[0]) @@ -264,7 +233,7 @@ func (erp ExprRuntimeDebug) ipDebug(ip int, vm *vm.VM, program *vm.Program, part case "OpCall1": //Op for function calls out.Func = true out.FuncName = parts[3] - stack := vm.Stack() + stack := vm.Stack num_items := 1 for i := len(stack) - 1; i >= 0 && num_items > 0; i-- { out.Args = append(out.Args, autoQuote(stack[i])) @@ -274,7 +243,7 @@ func (erp ExprRuntimeDebug) ipDebug(ip int, vm *vm.VM, program *vm.Program, part case "OpCall2": //Op for function calls out.Func = true out.FuncName = parts[3] - stack := vm.Stack() + stack := vm.Stack num_items := 2 for i := len(stack) - 1; i >= 0 && num_items > 0; i-- { out.Args = append(out.Args, autoQuote(stack[i])) @@ -284,7 +253,7 @@ func (erp ExprRuntimeDebug) ipDebug(ip int, vm *vm.VM, program *vm.Program, part case "OpCall3": //Op for function calls out.Func = true out.FuncName = parts[3] - stack := vm.Stack() + stack := vm.Stack num_items := 3 for i := len(stack) - 1; i >= 0 && num_items > 0; i-- { out.Args = append(out.Args, autoQuote(stack[i])) @@ -297,7 +266,7 @@ func (erp ExprRuntimeDebug) ipDebug(ip int, vm *vm.VM, program *vm.Program, part case "OpCallN": //Op for function calls with more than 3 args out.Func = true out.FuncName = parts[1] - stack := vm.Stack() + stack := vm.Stack //for OpCallN, we get the number of args if len(program.Arguments) >= ip { @@ -310,19 +279,19 @@ func (erp ExprRuntimeDebug) ipDebug(ip int, vm *vm.VM, program *vm.Program, part } } } else { //let's blindly take the items on stack - for _, val := range vm.Stack() { + for _, val := range vm.Stack { out.Args = append(out.Args, autoQuote(val)) } } outputs = append(outputs, out) case "OpEqualString", "OpEqual", "OpEqualInt": //comparisons - stack := vm.Stack() + stack := vm.Stack out.Comparison = true out.Left = autoQuote(stack[0]) out.Right = autoQuote(stack[1]) outputs = append(outputs, out) case "OpIn": //in operator - stack := vm.Stack() + stack := vm.Stack out.Condition = true out.ConditionIn = true //seems that we tend to receive stack[1] as a map. @@ -332,7 +301,7 @@ func (erp ExprRuntimeDebug) ipDebug(ip int, vm *vm.VM, program *vm.Program, part out.Args = append(out.Args, autoQuote(stack[1])) outputs = append(outputs, out) case "OpContains": //kind OpIn , but reverse - stack := vm.Stack() + stack := vm.Stack out.Condition = true out.ConditionContains = true //seems that we tend to receive stack[1] as a map. @@ -347,7 +316,10 @@ func (erp ExprRuntimeDebug) ipDebug(ip int, vm *vm.VM, program *vm.Program, part func (erp ExprRuntimeDebug) ipSeek(ip int) []string { for i := range len(erp.Lines) { - parts := strings.Split(erp.Lines[i], "\t") + parts := strings.Fields(erp.Lines[i]) + if len(parts) == 0 { + continue + } if parts[0] == strconv.Itoa(ip) { return parts } @@ -371,7 +343,7 @@ func cleanTextForDebug(text string) string { } func DisplayExprDebug(program *vm.Program, outputs []OpOutput, logger *log.Entry, ret any) { - logger.Debugf("dbg(result=%v): %s", ret, cleanTextForDebug(program.Source.Content())) + logger.Debugf("dbg(result=%v): %s", ret, cleanTextForDebug(string(program.Source()))) for _, output := range outputs { logger.Debugf("%s", output.String()) } @@ -383,57 +355,41 @@ func RunWithDebug(program *vm.Program, env interface{}, logger *log.Entry) ([]Op erp := ExprRuntimeDebug{ Logger: logger, } - debugErr := make(chan error) - var buf strings.Builder vm := vm.Debug() - done := false - program.Opcodes(&buf) - lines := strings.Split(buf.String(), "\n") + opcodes := program.Disassemble() + lines := strings.Split(opcodes, "\n") erp.Lines = lines go func() { + //We must never return until the execution of the program is done var err error erp.Logger.Tracef("[START] ip 0") ops := erp.ipSeek(0) if ops == nil { - debugErr <- fmt.Errorf("failed getting ops for ip 0") - return + log.Warningf("error while debugging expr: failed getting ops for ip 0") } if outputs, err = erp.ipDebug(0, vm, program, ops, outputs); err != nil { - debugErr <- fmt.Errorf("error while debugging at ip 0") + log.Warningf("error while debugging expr: error while debugging at ip 0") } vm.Step() for ip := range vm.Position() { ops := erp.ipSeek(ip) - if ops == nil { //we reached the end of the program, we shouldn't throw an error + if ops == nil { erp.Logger.Tracef("[DONE] ip %d", ip) - debugErr <- nil - return + break } if outputs, err = erp.ipDebug(ip, vm, program, ops, outputs); err != nil { - debugErr <- fmt.Errorf("error while debugging at ip %d", ip) - return - } - if done { - debugErr <- nil - return + log.Warningf("error while debugging expr: error while debugging at ip %d", ip) } vm.Step() } - debugErr <- nil }() var return_error error ret, err := vm.Run(program, env) - done = true //if the expr runtime failed, we don't need to wait for the debug to finish if err != nil { return_error = err - } else { - err = <-debugErr - if err != nil { - log.Warningf("error while debugging expr: %s", err) - } } //the overall result of expression is the result of last op ? if len(outputs) > 0 { diff --git a/pkg/exprhelpers/debugger_test.go b/pkg/exprhelpers/debugger_test.go index 5f239885539..efdcbc1a769 100644 --- a/pkg/exprhelpers/debugger_test.go +++ b/pkg/exprhelpers/debugger_test.go @@ -5,8 +5,8 @@ import ( "strings" "testing" - "github.com/antonmedv/expr" "github.com/davecgh/go-spew/spew" + "github.com/expr-lang/expr" log "github.com/sirupsen/logrus" "github.com/crowdsecurity/crowdsec/pkg/types" @@ -52,6 +52,7 @@ type teststruct struct { Foo string } +// You need to add the tag expr_debug when running the tests func TestBaseDbg(t *testing.T) { defaultEnv := map[string]interface{}{ "queue": &types.Queue{}, @@ -265,7 +266,6 @@ func TestBaseDbg(t *testing.T) { {Code: "Upper(base_string)", CodeDepth: 0, Func: true, FuncName: "Upper", Args: []string{"\"hello world\""}, FuncResults: []string{"\"HELLO WORLD\""}, ConditionResult: (*bool)(nil), Finalized: true}, {Code: "Upper('/someotherurl?account-name=admin&account-status=1&ow=cmd') )", CodeDepth: 0, Func: true, FuncName: "Upper", Args: []string{"\"/someotherurl?account-name=admin&account...\""}, FuncResults: []string{"\"/SOMEOTHERURL?ACCOUNT-NAME=ADMIN&ACCOUNT...\""}, ConditionResult: (*bool)(nil), Finalized: true}, {Code: "contains Upper('/someotherurl?account-name=admin&account-status=1&ow=cmd') )", CodeDepth: 0, Args: []string{"\"HELLO WORLD\"", "\"/SOMEOTHERURL?ACCOUNT-NAME=ADMIN&ACCOUNT...\""}, Condition: true, ConditionContains: true, StrConditionResult: "[false]", ConditionResult: boolPtr(false), Finalized: true}, - {Code: "and", CodeDepth: 0, JumpIf: true, IfFalse: true, StrConditionResult: "false", ConditionResult: boolPtr(false), Finalized: false}, {Code: "and", CodeDepth: 0, JumpIf: true, IfFalse: true, StrConditionResult: "false", ConditionResult: boolPtr(false), Finalized: true}, }, }, diff --git a/pkg/exprhelpers/exprlib_test.go b/pkg/exprhelpers/exprlib_test.go index b9ba1d01191..f2eb208ebfa 100644 --- a/pkg/exprhelpers/exprlib_test.go +++ b/pkg/exprhelpers/exprlib_test.go @@ -7,7 +7,7 @@ import ( "testing" "time" - "github.com/antonmedv/expr" + "github.com/expr-lang/expr" log "github.com/sirupsen/logrus" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" diff --git a/pkg/exprhelpers/helpers.go b/pkg/exprhelpers/helpers.go index 43be5d82de0..b4324f8bbab 100644 --- a/pkg/exprhelpers/helpers.go +++ b/pkg/exprhelpers/helpers.go @@ -15,11 +15,11 @@ import ( "strings" "time" - "github.com/antonmedv/expr" "github.com/bluele/gcache" "github.com/c-robinson/iplib" "github.com/cespare/xxhash/v2" "github.com/davecgh/go-spew/spew" + "github.com/expr-lang/expr" "github.com/oschwald/geoip2-golang" "github.com/oschwald/maxminddb-golang" "github.com/prometheus/client_golang/prometheus" diff --git a/pkg/exprhelpers/jsonextract_test.go b/pkg/exprhelpers/jsonextract_test.go index 80de1619580..5845c3ae66b 100644 --- a/pkg/exprhelpers/jsonextract_test.go +++ b/pkg/exprhelpers/jsonextract_test.go @@ -3,7 +3,7 @@ package exprhelpers import ( "testing" - "github.com/antonmedv/expr" + "github.com/expr-lang/expr" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) diff --git a/pkg/hubtest/parser_assert.go b/pkg/hubtest/parser_assert.go index d79d26fb9d0..be4fdbdb5e6 100644 --- a/pkg/hubtest/parser_assert.go +++ b/pkg/hubtest/parser_assert.go @@ -7,7 +7,7 @@ import ( "os" "strings" - "github.com/antonmedv/expr" + "github.com/expr-lang/expr" log "github.com/sirupsen/logrus" "gopkg.in/yaml.v3" diff --git a/pkg/hubtest/scenario_assert.go b/pkg/hubtest/scenario_assert.go index bb004daad49..f32abf9e110 100644 --- a/pkg/hubtest/scenario_assert.go +++ b/pkg/hubtest/scenario_assert.go @@ -9,7 +9,7 @@ import ( "sort" "strings" - "github.com/antonmedv/expr" + "github.com/expr-lang/expr" log "github.com/sirupsen/logrus" "gopkg.in/yaml.v3" diff --git a/pkg/leakybucket/bayesian.go b/pkg/leakybucket/bayesian.go index e56eb097ba4..357d51f597b 100644 --- a/pkg/leakybucket/bayesian.go +++ b/pkg/leakybucket/bayesian.go @@ -3,8 +3,8 @@ package leakybucket import ( "fmt" - "github.com/antonmedv/expr" - "github.com/antonmedv/expr/vm" + "github.com/expr-lang/expr" + "github.com/expr-lang/expr/vm" "github.com/crowdsecurity/crowdsec/pkg/exprhelpers" "github.com/crowdsecurity/crowdsec/pkg/types" diff --git a/pkg/leakybucket/conditional.go b/pkg/leakybucket/conditional.go index 0e4076c305c..a203a639743 100644 --- a/pkg/leakybucket/conditional.go +++ b/pkg/leakybucket/conditional.go @@ -4,8 +4,8 @@ import ( "fmt" "sync" - "github.com/antonmedv/expr" - "github.com/antonmedv/expr/vm" + "github.com/expr-lang/expr" + "github.com/expr-lang/expr/vm" "github.com/crowdsecurity/crowdsec/pkg/exprhelpers" "github.com/crowdsecurity/crowdsec/pkg/types" diff --git a/pkg/leakybucket/manager_load.go b/pkg/leakybucket/manager_load.go index 3e47f1fc16f..ca2e4d17d99 100644 --- a/pkg/leakybucket/manager_load.go +++ b/pkg/leakybucket/manager_load.go @@ -11,9 +11,9 @@ import ( "sync" "time" - "github.com/antonmedv/expr" - "github.com/antonmedv/expr/vm" "github.com/davecgh/go-spew/spew" + "github.com/expr-lang/expr" + "github.com/expr-lang/expr/vm" "github.com/goombaio/namegenerator" log "github.com/sirupsen/logrus" "gopkg.in/tomb.v2" diff --git a/pkg/leakybucket/overflow_filter.go b/pkg/leakybucket/overflow_filter.go index 8ec701a3400..01dd491ed41 100644 --- a/pkg/leakybucket/overflow_filter.go +++ b/pkg/leakybucket/overflow_filter.go @@ -3,8 +3,8 @@ package leakybucket import ( "fmt" - "github.com/antonmedv/expr" - "github.com/antonmedv/expr/vm" + "github.com/expr-lang/expr" + "github.com/expr-lang/expr/vm" "github.com/crowdsecurity/crowdsec/pkg/exprhelpers" "github.com/crowdsecurity/crowdsec/pkg/types" diff --git a/pkg/leakybucket/reset_filter.go b/pkg/leakybucket/reset_filter.go index 6e61510fcc7..452ccc085b1 100644 --- a/pkg/leakybucket/reset_filter.go +++ b/pkg/leakybucket/reset_filter.go @@ -3,8 +3,8 @@ package leakybucket import ( "sync" - "github.com/antonmedv/expr" - "github.com/antonmedv/expr/vm" + "github.com/expr-lang/expr" + "github.com/expr-lang/expr/vm" "github.com/crowdsecurity/crowdsec/pkg/exprhelpers" "github.com/crowdsecurity/crowdsec/pkg/types" diff --git a/pkg/leakybucket/uniq.go b/pkg/leakybucket/uniq.go index 197246c91a0..0cc0583390b 100644 --- a/pkg/leakybucket/uniq.go +++ b/pkg/leakybucket/uniq.go @@ -3,8 +3,8 @@ package leakybucket import ( "sync" - "github.com/antonmedv/expr" - "github.com/antonmedv/expr/vm" + "github.com/expr-lang/expr" + "github.com/expr-lang/expr/vm" "github.com/crowdsecurity/crowdsec/pkg/exprhelpers" "github.com/crowdsecurity/crowdsec/pkg/types" diff --git a/pkg/parser/grok_pattern.go b/pkg/parser/grok_pattern.go index 5b3204a4201..9c781d47aa6 100644 --- a/pkg/parser/grok_pattern.go +++ b/pkg/parser/grok_pattern.go @@ -3,7 +3,7 @@ package parser import ( "time" - "github.com/antonmedv/expr/vm" + "github.com/expr-lang/expr/vm" "github.com/crowdsecurity/grokky" ) diff --git a/pkg/parser/node.go b/pkg/parser/node.go index bb57995a129..26046ae4fd6 100644 --- a/pkg/parser/node.go +++ b/pkg/parser/node.go @@ -6,9 +6,9 @@ import ( "strings" "time" - "github.com/antonmedv/expr" - "github.com/antonmedv/expr/vm" "github.com/davecgh/go-spew/spew" + "github.com/expr-lang/expr" + "github.com/expr-lang/expr/vm" "github.com/prometheus/client_golang/prometheus" log "github.com/sirupsen/logrus" yaml "gopkg.in/yaml.v2" @@ -202,7 +202,6 @@ func (n *Node) processWhitelist(cachedExprEnv map[string]interface{}, p *types.E return isWhitelisted, nil } - func (n *Node) processGrok(p *types.Event, cachedExprEnv map[string]any) (bool, bool, error) { // Process grok if present, should be exclusive with nodes :) clog := n.Logger diff --git a/pkg/parser/whitelist.go b/pkg/parser/whitelist.go index fd1c2a0546a..e7b93a8d7da 100644 --- a/pkg/parser/whitelist.go +++ b/pkg/parser/whitelist.go @@ -4,8 +4,8 @@ import ( "fmt" "net" - "github.com/antonmedv/expr" - "github.com/antonmedv/expr/vm" + "github.com/expr-lang/expr" + "github.com/expr-lang/expr/vm" "github.com/prometheus/client_golang/prometheus" "github.com/crowdsecurity/crowdsec/pkg/exprhelpers" diff --git a/pkg/setup/detect.go b/pkg/setup/detect.go index 5deff904e19..55af951bf89 100644 --- a/pkg/setup/detect.go +++ b/pkg/setup/detect.go @@ -10,8 +10,8 @@ import ( "sort" "github.com/Masterminds/semver/v3" - "github.com/antonmedv/expr" "github.com/blackfireio/osinfo" + "github.com/expr-lang/expr" "github.com/shirou/gopsutil/v3/process" log "github.com/sirupsen/logrus" "gopkg.in/yaml.v3" diff --git a/pkg/types/event.go b/pkg/types/event.go index 7d8ef5825a2..76a447bdc8c 100644 --- a/pkg/types/event.go +++ b/pkg/types/event.go @@ -4,7 +4,7 @@ import ( "net" "time" - "github.com/antonmedv/expr/vm" + "github.com/expr-lang/expr/vm" log "github.com/sirupsen/logrus" "github.com/crowdsecurity/crowdsec/pkg/models" From 7b1794ff896eb679775e67cdb8b150b42d30a69a Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Mon, 22 Jul 2024 12:59:24 +0200 Subject: [PATCH 233/581] CI: update test dependencies (#3146) --- docker/test/Pipfile.lock | 213 +++++++++++++++++++-------------------- 1 file changed, 106 insertions(+), 107 deletions(-) diff --git a/docker/test/Pipfile.lock b/docker/test/Pipfile.lock index 75437876b72..2cb587b6b88 100644 --- a/docker/test/Pipfile.lock +++ b/docker/test/Pipfile.lock @@ -18,11 +18,11 @@ "default": { "certifi": { "hashes": [ - "sha256:9b469f3a900bf28dc19b8cfbf8019bf47f7fdd1a65a1d4ffb98fc14166beb4d1", - "sha256:e036ab49d5b79556f99cfc2d9320b34cfbe5be05c5871b51de9329f0603b0474" + "sha256:5a1e7645bc0ec61a09e26c36f6106dd4cf40c6db3a1fb6352b0244e7fb057c7b", + "sha256:c198e21b1289c2ab85ee4e67bb4b4ef3ead0892059901a8d5b622f24a1101e90" ], "markers": "python_version >= '3.6'", - "version": "==2023.11.17" + "version": "==2024.7.4" }, "cffi": { "hashes": [ @@ -180,65 +180,60 @@ }, "cryptography": { "hashes": [ - "sha256:087887e55e0b9c8724cf05361357875adb5c20dec27e5816b653492980d20380", - "sha256:09a77e5b2e8ca732a19a90c5bca2d124621a1edb5438c5daa2d2738bfeb02589", - "sha256:130c0f77022b2b9c99d8cebcdd834d81705f61c68e91ddd614ce74c657f8b3ea", - "sha256:141e2aa5ba100d3788c0ad7919b288f89d1fe015878b9659b307c9ef867d3a65", - "sha256:28cb2c41f131a5758d6ba6a0504150d644054fd9f3203a1e8e8d7ac3aea7f73a", - "sha256:2f9f14185962e6a04ab32d1abe34eae8a9001569ee4edb64d2304bf0d65c53f3", - "sha256:320948ab49883557a256eab46149df79435a22d2fefd6a66fe6946f1b9d9d008", - "sha256:36d4b7c4be6411f58f60d9ce555a73df8406d484ba12a63549c88bd64f7967f1", - "sha256:3b15c678f27d66d247132cbf13df2f75255627bcc9b6a570f7d2fd08e8c081d2", - "sha256:3dbd37e14ce795b4af61b89b037d4bc157f2cb23e676fa16932185a04dfbf635", - "sha256:4383b47f45b14459cab66048d384614019965ba6c1a1a141f11b5a551cace1b2", - "sha256:44c95c0e96b3cb628e8452ec060413a49002a247b2b9938989e23a2c8291fc90", - "sha256:4b063d3413f853e056161eb0c7724822a9740ad3caa24b8424d776cebf98e7ee", - "sha256:52ed9ebf8ac602385126c9a2fe951db36f2cb0c2538d22971487f89d0de4065a", - "sha256:55d1580e2d7e17f45d19d3b12098e352f3a37fe86d380bf45846ef257054b242", - "sha256:5ef9bc3d046ce83c4bbf4c25e1e0547b9c441c01d30922d812e887dc5f125c12", - "sha256:5fa82a26f92871eca593b53359c12ad7949772462f887c35edaf36f87953c0e2", - "sha256:61321672b3ac7aade25c40449ccedbc6db72c7f5f0fdf34def5e2f8b51ca530d", - "sha256:701171f825dcab90969596ce2af253143b93b08f1a716d4b2a9d2db5084ef7be", - "sha256:841ec8af7a8491ac76ec5a9522226e287187a3107e12b7d686ad354bb78facee", - "sha256:8a06641fb07d4e8f6c7dda4fc3f8871d327803ab6542e33831c7ccfdcb4d0ad6", - "sha256:8e88bb9eafbf6a4014d55fb222e7360eef53e613215085e65a13290577394529", - "sha256:a00aee5d1b6c20620161984f8ab2ab69134466c51f58c052c11b076715e72929", - "sha256:a047682d324ba56e61b7ea7c7299d51e61fd3bca7dad2ccc39b72bd0118d60a1", - "sha256:a7ef8dd0bf2e1d0a27042b231a3baac6883cdd5557036f5e8df7139255feaac6", - "sha256:ad28cff53f60d99a928dfcf1e861e0b2ceb2bc1f08a074fdd601b314e1cc9e0a", - "sha256:b9097a208875fc7bbeb1286d0125d90bdfed961f61f214d3f5be62cd4ed8a446", - "sha256:b97fe7d7991c25e6a31e5d5e795986b18fbbb3107b873d5f3ae6dc9a103278e9", - "sha256:e0ec52ba3c7f1b7d813cd52649a5b3ef1fc0d433219dc8c93827c57eab6cf888", - "sha256:ea2c3ffb662fec8bbbfce5602e2c159ff097a4631d96235fcf0fb00e59e3ece4", - "sha256:fa3dec4ba8fb6e662770b74f62f1a0c7d4e37e25b58b2bf2c1be4c95372b4a33", - "sha256:fbeb725c9dc799a574518109336acccaf1303c30d45c075c665c0793c2f79a7f" + "sha256:0663585d02f76929792470451a5ba64424acc3cd5227b03921dab0e2f27b1709", + "sha256:08a24a7070b2b6804c1940ff0f910ff728932a9d0e80e7814234269f9d46d069", + "sha256:232ce02943a579095a339ac4b390fbbe97f5b5d5d107f8a08260ea2768be8cc2", + "sha256:2905ccf93a8a2a416f3ec01b1a7911c3fe4073ef35640e7ee5296754e30b762b", + "sha256:299d3da8e00b7e2b54bb02ef58d73cd5f55fb31f33ebbf33bd00d9aa6807df7e", + "sha256:2c6d112bf61c5ef44042c253e4859b3cbbb50df2f78fa8fae6747a7814484a70", + "sha256:31e44a986ceccec3d0498e16f3d27b2ee5fdf69ce2ab89b52eaad1d2f33d8778", + "sha256:3d9a1eca329405219b605fac09ecfc09ac09e595d6def650a437523fcd08dd22", + "sha256:3dcdedae5c7710b9f97ac6bba7e1052b95c7083c9d0e9df96e02a1932e777895", + "sha256:47ca71115e545954e6c1d207dd13461ab81f4eccfcb1345eac874828b5e3eaaf", + "sha256:4a997df8c1c2aae1e1e5ac49c2e4f610ad037fc5a3aadc7b64e39dea42249431", + "sha256:51956cf8730665e2bdf8ddb8da0056f699c1a5715648c1b0144670c1ba00b48f", + "sha256:5bcb8a5620008a8034d39bce21dc3e23735dfdb6a33a06974739bfa04f853947", + "sha256:64c3f16e2a4fc51c0d06af28441881f98c5d91009b8caaff40cf3548089e9c74", + "sha256:6e2b11c55d260d03a8cf29ac9b5e0608d35f08077d8c087be96287f43af3ccdc", + "sha256:7b3f5fe74a5ca32d4d0f302ffe6680fcc5c28f8ef0dc0ae8f40c0f3a1b4fca66", + "sha256:844b6d608374e7d08f4f6e6f9f7b951f9256db41421917dfb2d003dde4cd6b66", + "sha256:9a8d6802e0825767476f62aafed40532bd435e8a5f7d23bd8b4f5fd04cc80ecf", + "sha256:aae4d918f6b180a8ab8bf6511a419473d107df4dbb4225c7b48c5c9602c38c7f", + "sha256:ac1955ce000cb29ab40def14fd1bbfa7af2017cca696ee696925615cafd0dce5", + "sha256:b88075ada2d51aa9f18283532c9f60e72170041bba88d7f37e49cbb10275299e", + "sha256:cb013933d4c127349b3948aa8aaf2f12c0353ad0eccd715ca789c8a0f671646f", + "sha256:cc70b4b581f28d0a254d006f26949245e3657d40d8857066c2ae22a61222ef55", + "sha256:e9c5266c432a1e23738d178e51c2c7a5e2ddf790f248be939448c0ba2021f9d1", + "sha256:ea9e57f8ea880eeea38ab5abf9fbe39f923544d7884228ec67d666abd60f5a47", + "sha256:ee0c405832ade84d4de74b9029bedb7b31200600fa524d218fc29bfa371e97f5", + "sha256:fdcb265de28585de5b859ae13e3846a8e805268a823a12a4da2597f1f5afc9f0" ], "markers": "python_version >= '3.7'", - "version": "==42.0.2" + "version": "==43.0.0" }, "docker": { "hashes": [ - "sha256:12ba681f2777a0ad28ffbcc846a69c31b4dfd9752b47eb425a274ee269c5e14b", - "sha256:323736fb92cd9418fc5e7133bc953e11a9da04f4483f828b527db553f1e7e5a3" + "sha256:ad8c70e6e3f8926cb8a92619b832b4ea5299e2831c14284663184e200546fa6c", + "sha256:c96b93b7f0a746f9e77d325bcfb87422a3d8bd4f03136ae8a85b37f1898d5fc0" ], "markers": "python_version >= '3.8'", - "version": "==7.0.0" + "version": "==7.1.0" }, "execnet": { "hashes": [ - "sha256:88256416ae766bc9e8895c76a87928c0012183da3cc4fc18016e6f050e025f41", - "sha256:cc59bc4423742fd71ad227122eb0dd44db51efb3dc4095b45ac9a08c770096af" + "sha256:26dee51f1b80cebd6d0ca8e74dd8745419761d3bef34163928cbebbdc4749fdc", + "sha256:5189b52c6121c24feae288166ab41b32549c7e2348652736540b9e6e7d4e72e3" ], - "markers": "python_version >= '3.7'", - "version": "==2.0.2" + "markers": "python_version >= '3.8'", + "version": "==2.1.1" }, "idna": { "hashes": [ - "sha256:9ecdbbd083b06798ae1e86adcbfe8ab1479cf864e4ee30fe4e46a003d12491ca", - "sha256:c05567e9c24a6b9faaa835c4821bad0590fbb9d5779e7caa6e1cc4978e7eb24f" + "sha256:028ff3aadf0609c1fd278d8ea3089299412a7a8b9bd005dd08b9f8285bcb5cfc", + "sha256:82fee1fc78add43492d3a1898bfa6d8a904cc97d8427f683ed8e798d07761aa0" ], "markers": "python_version >= '3.5'", - "version": "==3.6" + "version": "==3.7" }, "iniconfig": { "hashes": [ @@ -250,56 +245,58 @@ }, "packaging": { "hashes": [ - "sha256:048fb0e9405036518eaaf48a55953c750c11e1a1b68e0dd1a9d62ed0c092cfc5", - "sha256:8c491190033a9af7e1d931d0b5dacc2ef47509b34dd0de67ed209b5203fc88c7" + "sha256:026ed72c8ed3fcce5bf8950572258698927fd1dbda10a5e981cdf0ac37f4f002", + "sha256:5b8f2217dbdbd2f7f384c41c628544e6d52f2d0f53c6d0c3ea61aa5d1d7ff124" ], - "markers": "python_version >= '3.7'", - "version": "==23.2" + "markers": "python_version >= '3.8'", + "version": "==24.1" }, "pluggy": { "hashes": [ - "sha256:7db9f7b503d67d1c5b95f59773ebb58a8c1c288129a88665838012cfb07b8981", - "sha256:8c85c2876142a764e5b7548e7d9a0e0ddb46f5185161049a79b7e974454223be" + "sha256:2cffa88e94fdc978c4c574f15f9e59b7f4201d439195c3715ca9e2486f1d0cf1", + "sha256:44e1ad92c8ca002de6377e165f3e0f1be63266ab4d554740532335b9d75ea669" ], "markers": "python_version >= '3.8'", - "version": "==1.4.0" + "version": "==1.5.0" }, "psutil": { "hashes": [ - "sha256:02615ed8c5ea222323408ceba16c60e99c3f91639b07da6373fb7e6539abc56d", - "sha256:05806de88103b25903dff19bb6692bd2e714ccf9e668d050d144012055cbca73", - "sha256:26bd09967ae00920df88e0352a91cff1a78f8d69b3ecabbfe733610c0af486c8", - "sha256:27cc40c3493bb10de1be4b3f07cae4c010ce715290a5be22b98493509c6299e2", - "sha256:36f435891adb138ed3c9e58c6af3e2e6ca9ac2f365efe1f9cfef2794e6c93b4e", - "sha256:50187900d73c1381ba1454cf40308c2bf6f34268518b3f36a9b663ca87e65e36", - "sha256:611052c4bc70432ec770d5d54f64206aa7203a101ec273a0cd82418c86503bb7", - "sha256:6be126e3225486dff286a8fb9a06246a5253f4c7c53b475ea5f5ac934e64194c", - "sha256:7d79560ad97af658a0f6adfef8b834b53f64746d45b403f225b85c5c2c140eee", - "sha256:8cb6403ce6d8e047495a701dc7c5bd788add903f8986d523e3e20b98b733e421", - "sha256:8db4c1b57507eef143a15a6884ca10f7c73876cdf5d51e713151c1236a0e68cf", - "sha256:aee678c8720623dc456fa20659af736241f575d79429a0e5e9cf88ae0605cc81", - "sha256:bc56c2a1b0d15aa3eaa5a60c9f3f8e3e565303b465dbf57a1b730e7a2b9844e0", - "sha256:bd1184ceb3f87651a67b2708d4c3338e9b10c5df903f2e3776b62303b26cb631", - "sha256:d06016f7f8625a1825ba3732081d77c94589dca78b7a3fc072194851e88461a4", - "sha256:d16bbddf0693323b8c6123dd804100241da461e41d6e332fb0ba6058f630f8c8" + "sha256:02b69001f44cc73c1c5279d02b30a817e339ceb258ad75997325e0e6169d8b35", + "sha256:1287c2b95f1c0a364d23bc6f2ea2365a8d4d9b726a3be7294296ff7ba97c17f0", + "sha256:1e7c870afcb7d91fdea2b37c24aeb08f98b6d67257a5cb0a8bc3ac68d0f1a68c", + "sha256:21f1fb635deccd510f69f485b87433460a603919b45e2a324ad65b0cc74f8fb1", + "sha256:33ea5e1c975250a720b3a6609c490db40dae5d83a4eb315170c4fe0d8b1f34b3", + "sha256:34859b8d8f423b86e4385ff3665d3f4d94be3cdf48221fbe476e883514fdb71c", + "sha256:5fd9a97c8e94059b0ef54a7d4baf13b405011176c3b6ff257c247cae0d560ecd", + "sha256:6ec7588fb3ddaec7344a825afe298db83fe01bfaaab39155fa84cf1c0d6b13c3", + "sha256:6ed2440ada7ef7d0d608f20ad89a04ec47d2d3ab7190896cd62ca5fc4fe08bf0", + "sha256:8faae4f310b6d969fa26ca0545338b21f73c6b15db7c4a8d934a5482faa818f2", + "sha256:a021da3e881cd935e64a3d0a20983bda0bb4cf80e4f74fa9bfcb1bc5785360c6", + "sha256:a495580d6bae27291324fe60cea0b5a7c23fa36a7cd35035a16d93bdcf076b9d", + "sha256:a9a3dbfb4de4f18174528d87cc352d1f788b7496991cca33c6996f40c9e3c92c", + "sha256:c588a7e9b1173b6e866756dde596fd4cad94f9399daf99ad8c3258b3cb2b47a0", + "sha256:e2e8d0054fc88153ca0544f5c4d554d42e33df2e009c4ff42284ac9ebdef4132", + "sha256:fc8c9510cde0146432bbdb433322861ee8c3efbf8589865c8bf8d21cb30c4d14", + "sha256:ffe7fc9b6b36beadc8c322f84e1caff51e8703b88eee1da46d1e3a6ae11b4fd0" ], "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3, 3.4, 3.5'", - "version": "==5.9.8" + "version": "==6.0.0" }, "pycparser": { "hashes": [ - "sha256:8ee45429555515e1f6b185e78100aea234072576aa43ab53aefcae078162fca9", - "sha256:e644fdec12f7872f86c58ff790da456218b10f863970249516d60a5eaca77206" + "sha256:491c8be9c040f5390f5bf44a5b07752bd07f56edf992381b05c701439eec10f6", + "sha256:c3702b6d3dd8c7abc1afa565d7e63d53a1d0bd86cdc24edd75470f4de499cfcc" ], - "version": "==2.21" + "markers": "python_version >= '3.8'", + "version": "==2.22" }, "pytest": { "hashes": [ - "sha256:249b1b0864530ba251b7438274c4d251c58d868edaaec8762893ad4a0d71c36c", - "sha256:50fb9cbe836c3f20f0dfa99c565201fb75dc54c8d76373cd1bde06b06657bdb6" + "sha256:7e8e5c5abd6e93cb1cc151f23e57adc31fcf8cfd2a3ff2da63e23f732de35db6", + "sha256:e9600ccf4f563976e2c99fa02c7624ab938296551f280835ee6516df8bc4ae8c" ], "markers": "python_version >= '3.8'", - "version": "==8.0.0" + "version": "==8.3.1" }, "pytest-cs": { "git": "https://github.com/crowdsecurity/pytest-cs.git", @@ -327,6 +324,7 @@ "sha256:d075629c7e00b611df89f490a5063944bee7a4362a5ff11c7cc7824a03dfce24" ], "index": "pypi", + "markers": "python_version >= '3.7'", "version": "==3.5.0" }, "python-dotenv": { @@ -396,11 +394,11 @@ }, "requests": { "hashes": [ - "sha256:58cd2187c01e70e6e26505bca751777aa9f2ee0b7f4300988b709f44e013003f", - "sha256:942c5a758f98d790eaed1a29cb6eefc7ffb0d1cf7af05c3d2791656dbd6ad1e1" + "sha256:55365417734eb18255590a9ff9eb97e9e1da868d4ccd6402399eaf68af20a760", + "sha256:70761cfe03c773ceb22aa2f671b4757976145175cdfca038c02654d061d6dcc6" ], - "markers": "python_version >= '3.7'", - "version": "==2.31.0" + "markers": "python_version >= '3.8'", + "version": "==2.32.3" }, "trustme": { "hashes": [ @@ -412,11 +410,11 @@ }, "urllib3": { "hashes": [ - "sha256:051d961ad0c62a94e50ecf1af379c3aba230c66c710493493560c0c223c49f20", - "sha256:ce3711610ddce217e6d113a2732fafad960a03fd0318c91faa79481e35c11224" + "sha256:a448b2f64d686155468037e1ace9f2d2199776e17f0a46610480d311f73e3472", + "sha256:dd505485549a7a552833da5e6063639d0d177c04f23bc3864e41e5dc5f612168" ], "markers": "python_version >= '3.8'", - "version": "==2.2.0" + "version": "==2.2.2" } }, "develop": { @@ -482,15 +480,16 @@ "sha256:e3ac6018ef05126d442af680aad863006ec19d02290561ac88b8b1c0b0cfc726" ], "index": "pypi", + "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'", "version": "==0.13.13" }, "ipython": { "hashes": [ - "sha256:1050a3ab8473488d7eee163796b02e511d0735cf43a04ba2a8348bd0f2eaf8a5", - "sha256:48fbc236fbe0e138b88773fa0437751f14c3645fb483f1d4c5dee58b37e5ce73" + "sha256:1cec0fbba8404af13facebe83d04436a7434c7400e59f47acf467c64abd0956c", + "sha256:e6b347c27bdf9c32ee9d31ae85defc525755a1869f14057e900675b9e8d6e6ff" ], "markers": "python_version >= '3.11'", - "version": "==8.21.0" + "version": "==8.26.0" }, "jedi": { "hashes": [ @@ -502,35 +501,35 @@ }, "matplotlib-inline": { "hashes": [ - "sha256:f1f41aab5328aa5aaea9b16d083b128102f8712542f819fe7e6a420ff581b311", - "sha256:f887e5f10ba98e8d2b150ddcf4702c1e5f8b3a20005eb0f74bfdbd360ee6f304" + "sha256:8423b23ec666be3d16e16b60bdd8ac4e86e840ebd1dd11a30b9f117f2fa0ab90", + "sha256:df192d39a4ff8f21b1895d72e6a13f5fcc5099f00fa84384e0ea28c2cc0653ca" ], - "markers": "python_version >= '3.5'", - "version": "==0.1.6" + "markers": "python_version >= '3.8'", + "version": "==0.1.7" }, "parso": { "hashes": [ - "sha256:8c07be290bb59f03588915921e29e8a50002acaf2cdc5fa0e0114f91709fafa0", - "sha256:c001d4636cd3aecdaf33cbb40aebb59b094be2a74c556778ef5576c175e19e75" + "sha256:a418670a20291dacd2dddc80c377c5c3791378ee1e8d12bffc35420643d43f18", + "sha256:eb3a7b58240fb99099a345571deecc0f9540ea5f4dd2fe14c2a99d6b281ab92d" ], "markers": "python_version >= '3.6'", - "version": "==0.8.3" + "version": "==0.8.4" }, "pexpect": { "hashes": [ "sha256:7236d1e080e4936be2dc3e326cec0af72acf9212a7e1d060210e70a47e253523", "sha256:ee7d41123f3c9911050ea2c2dac107568dc43b2d3b0c7557a33212c398ead30f" ], - "markers": "sys_platform != 'win32'", + "markers": "sys_platform != 'win32' and sys_platform != 'emscripten'", "version": "==4.9.0" }, "prompt-toolkit": { "hashes": [ - "sha256:3527b7af26106cbc65a040bcc84839a3566ec1b051bb0bfe953631e704b0ff7d", - "sha256:a11a29cb3bf0a28a387fe5122cdb649816a957cd9261dcedf8c9f1fef33eacf6" + "sha256:0d7bfa67001d5e39d02c224b663abc33687405033a8c422d0d675a5a13361d10", + "sha256:1e1b29cb58080b1e69f207c893a1a7bf16d127a5c30c9d17a25a5d77792e5360" ], "markers": "python_full_version >= '3.7.0'", - "version": "==3.0.43" + "version": "==3.0.47" }, "ptyprocess": { "hashes": [ @@ -541,18 +540,18 @@ }, "pure-eval": { "hashes": [ - "sha256:01eaab343580944bc56080ebe0a674b39ec44a945e6d09ba7db3cb8cec289350", - "sha256:2b45320af6dfaa1750f543d714b6d1c520a1688dec6fd24d339063ce0aaa9ac3" + "sha256:1db8e35b67b3d218d818ae653e27f06c3aa420901fa7b081ca98cbedc874e0d0", + "sha256:5f4e983f40564c576c7c8635ae88db5956bb2229d7e9237d03b3c0b0190eaf42" ], - "version": "==0.2.2" + "version": "==0.2.3" }, "pygments": { "hashes": [ - "sha256:b27c2826c47d0f3219f29554824c30c5e8945175d888647acd804ddd04af846c", - "sha256:da46cec9fd2de5be3a8a784f434e4c4ab670b4ff54d605c4c2717e9d49c4c367" + "sha256:786ff802f32e91311bff3889f6e9a86e81505fe99f2735bb6d60ae0c5004f199", + "sha256:b8e6aca0523f3ab76fee51799c488e38782ac06eafcf95e7ba832985c8e7b13a" ], - "markers": "python_version >= '3.7'", - "version": "==2.17.2" + "markers": "python_version >= '3.8'", + "version": "==2.18.0" }, "six": { "hashes": [ @@ -571,11 +570,11 @@ }, "traitlets": { "hashes": [ - "sha256:2e5a030e6eff91737c643231bfcf04a65b0132078dad75e4936700b213652e74", - "sha256:8585105b371a04b8316a43d5ce29c098575c2e477850b62b848b964f1444527e" + "sha256:9ed0579d3502c94b4b3732ac120375cda96f923114522847de4b3bb98b96b6b7", + "sha256:b74e89e397b1ed28cc831db7aea759ba6640cb3de13090ca145426688ff1ac4f" ], "markers": "python_version >= '3.8'", - "version": "==5.14.1" + "version": "==5.14.3" }, "wcwidth": { "hashes": [ From 61d19cff84e1f9261143feb06736ee02adb1c8ee Mon Sep 17 00:00:00 2001 From: marco Date: Mon, 22 Jul 2024 14:58:43 +0200 Subject: [PATCH 234/581] update table test --- test/bats/08_metrics_bouncer.bats | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/bats/08_metrics_bouncer.bats b/test/bats/08_metrics_bouncer.bats index 778452644dd..84a55dc88c1 100644 --- a/test/bats/08_metrics_bouncer.bats +++ b/test/bats/08_metrics_bouncer.bats @@ -216,7 +216,7 @@ teardown() { | | IPs | bytes | packets | dogyear | pound | +----------------------------------+------------------+---------+---------+---------+-------+ | CAPI (community blocklist) | - | 3.80k | 100 | - | - | - | cscli | 1 | 380 | 10 | - | - | + | cscli (manual decisions) | 1 | 380 | 10 | - | - | | lists:anotherlist | - | 0 | 0 | - | - | | lists:firehol_cruzit_web_attacks | - | 1.03k | 23 | - | - | | lists:firehol_voipbl | 51.94k | 3.85k | 58 | - | - | From 64e4ecde905a1dc0b4188cc0a6b2d26c739678e9 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Wed, 24 Jul 2024 10:29:18 +0200 Subject: [PATCH 235/581] LAPI: detailed metrics endpoint (#2858) --- cmd/crowdsec-cli/climetrics/statbouncer.go | 351 +++++++++++------ cmd/crowdsec/crowdsec.go | 13 + cmd/crowdsec/lpmetrics.go | 182 +++++++++ cmd/crowdsec/main.go | 27 +- cmd/crowdsec/serve.go | 11 + go.mod | 3 +- go.sum | 2 - pkg/apiclient/client.go | 3 + pkg/apiclient/client_test.go | 2 +- pkg/apiclient/resperr.go | 12 +- pkg/apiclient/usagemetrics.go | 29 ++ pkg/apiserver/apic.go | 72 ++-- pkg/apiserver/apic_metrics.go | 212 ++++++++++ pkg/apiserver/apiserver.go | 7 +- pkg/apiserver/controllers/controller.go | 26 +- pkg/apiserver/controllers/v1/errors.go | 30 ++ pkg/apiserver/controllers/v1/errors_test.go | 57 +++ pkg/apiserver/controllers/v1/usagemetrics.go | 204 ++++++++++ pkg/apiserver/usage_metrics_test.go | 384 +++++++++++++++++++ pkg/csconfig/crowdsec_service_test.go | 19 +- pkg/csconfig/database.go | 13 +- pkg/database/bouncers.go | 22 ++ pkg/database/ent/metric.go | 16 +- pkg/database/ent/metric/metric.go | 12 +- pkg/database/ent/metric/where.go | 54 +-- pkg/database/ent/metric_create.go | 16 +- pkg/database/ent/migrate/schema.go | 9 +- pkg/database/ent/mutation.go | 52 +-- pkg/database/ent/schema/metric.go | 13 +- pkg/database/flush.go | 44 ++- pkg/database/machines.go | 51 ++- pkg/database/metrics.go | 73 ++++ test/bats/08_metrics_bouncer.bats | 209 +++++++++- test/bats/08_metrics_machines.bats | 1 - test/bats/11_bouncers_tls.bats | 29 ++ 35 files changed, 1958 insertions(+), 302 deletions(-) create mode 100644 cmd/crowdsec/lpmetrics.go create mode 100644 pkg/apiclient/usagemetrics.go create mode 100644 pkg/apiserver/controllers/v1/errors_test.go create mode 100644 pkg/apiserver/controllers/v1/usagemetrics.go create mode 100644 pkg/apiserver/usage_metrics_test.go create mode 100644 pkg/database/metrics.go diff --git a/cmd/crowdsec-cli/climetrics/statbouncer.go b/cmd/crowdsec-cli/climetrics/statbouncer.go index 1a803cefbd2..7d80e902961 100644 --- a/cmd/crowdsec-cli/climetrics/statbouncer.go +++ b/cmd/crowdsec-cli/climetrics/statbouncer.go @@ -5,6 +5,8 @@ import ( "encoding/json" "fmt" "io" + "sort" + "strings" "time" "github.com/jedib0t/go-pretty/v6/table" @@ -15,12 +17,15 @@ import ( "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/cstable" "github.com/crowdsecurity/crowdsec/pkg/database" + "github.com/crowdsecurity/crowdsec/pkg/database/ent" "github.com/crowdsecurity/crowdsec/pkg/database/ent/metric" "github.com/crowdsecurity/crowdsec/pkg/models" ) -// un-aggregated data, de-normalized. +// bouncerMetricItem represents unaggregated, denormalized metric data. +// Possibly not unique if a bouncer sent the same data multiple times. type bouncerMetricItem struct { + collectedAt time.Time bouncerName string ipType string origin string @@ -29,14 +34,82 @@ type bouncerMetricItem struct { value float64 } +// aggregationOverTime is the first level of aggregation: we aggregate +// over time, then over ip type, then over origin. we only sum values +// for non-gauge metrics, and take the last value for gauge metrics. +type aggregationOverTime map[string]map[string]map[string]map[string]map[string]int64 + +func (a aggregationOverTime) add(bouncerName, origin, name, unit, ipType string, value float64, isGauge bool) { + if _, ok := a[bouncerName]; !ok { + a[bouncerName] = make(map[string]map[string]map[string]map[string]int64) + } + + if _, ok := a[bouncerName][origin]; !ok { + a[bouncerName][origin] = make(map[string]map[string]map[string]int64) + } + + if _, ok := a[bouncerName][origin][name]; !ok { + a[bouncerName][origin][name] = make(map[string]map[string]int64) + } + + if _, ok := a[bouncerName][origin][name][unit]; !ok { + a[bouncerName][origin][name][unit] = make(map[string]int64) + } + + if isGauge { + a[bouncerName][origin][name][unit][ipType] = int64(value) + } else { + a[bouncerName][origin][name][unit][ipType] += int64(value) + } +} + +// aggregationOverIPType is the second level of aggregation: data is summed +// regardless of the metrics type (gauge or not). This is used to display +// table rows, they won't differentiate ipv4 and ipv6 +type aggregationOverIPType map[string]map[string]map[string]map[string]int64 + +func (a aggregationOverIPType) add(bouncerName, origin, name, unit string, value int64) { + if _, ok := a[bouncerName]; !ok { + a[bouncerName] = make(map[string]map[string]map[string]int64) + } + + if _, ok := a[bouncerName][origin]; !ok { + a[bouncerName][origin] = make(map[string]map[string]int64) + } + + if _, ok := a[bouncerName][origin][name]; !ok { + a[bouncerName][origin][name] = make(map[string]int64) + } + + a[bouncerName][origin][name][unit] += value +} + +// aggregationOverOrigin is the third level of aggregation: these are +// the totals at the end of the table. Metrics without an origin will +// be added to the totals but not displayed in the rows, only in the footer. +type aggregationOverOrigin map[string]map[string]map[string]int64 + +func (a aggregationOverOrigin) add(bouncerName, name, unit string, value int64) { + if _, ok := a[bouncerName]; !ok { + a[bouncerName] = make(map[string]map[string]int64) + } + + if _, ok := a[bouncerName][name]; !ok { + a[bouncerName][name] = make(map[string]int64) + } + + a[bouncerName][name][unit] += value +} + type statBouncer struct { // oldest collection timestamp for each bouncer - oldestTS map[string]*time.Time - // we keep de-normalized metrics so we can iterate - // over them multiple times and keep the aggregation code simple - rawMetrics []bouncerMetricItem - aggregated map[string]map[string]map[string]map[string]int64 - aggregatedAllOrigin map[string]map[string]map[string]int64 + oldestTS map[string]time.Time + // aggregate over ip type: always sum + // [bouncer][origin][name][unit]value + aggOverIPType aggregationOverIPType + // aggregate over origin: always sum + // [bouncer][name][unit]value + aggOverOrigin aggregationOverOrigin } var knownPlurals = map[string]string{ @@ -46,15 +119,15 @@ var knownPlurals = map[string]string{ } func (s *statBouncer) MarshalJSON() ([]byte, error) { - return json.Marshal(s.aggregated) + return json.Marshal(s.aggOverIPType) } -func (s *statBouncer) Description() (string, string) { +func (*statBouncer) Description() (string, string) { return "Bouncer Metrics", `Network traffic blocked by bouncers.` } -func warnOnce(warningsLogged map[string]bool, msg string) { +func logWarningOnce(warningsLogged map[string]bool, msg string) { if _, ok := warningsLogged[msg]; !ok { log.Warningf(msg) @@ -62,67 +135,58 @@ func warnOnce(warningsLogged map[string]bool, msg string) { } } -func (s *statBouncer) Fetch(ctx context.Context, db *database.Client) error { - if db == nil { - return nil - } - - // query all bouncer metrics that have not been flushed - - metrics, err := db.Ent.Metric.Query(). - Where( - metric.GeneratedTypeEQ(metric.GeneratedTypeRC), - ).All(ctx) - if err != nil { - return fmt.Errorf("unable to fetch metrics: %w", err) - } - - s.oldestTS = make(map[string]*time.Time) +// extractRawMetrics converts metrics from the database to a de-normalized, de-duplicated slice +// it returns the slice and the oldest timestamp for each bouncer +func (*statBouncer) extractRawMetrics(metrics []*ent.Metric) ([]bouncerMetricItem, map[string]time.Time) { + oldestTS := make(map[string]time.Time) // don't spam the user with the same warnings warningsLogged := make(map[string]bool) + // store raw metrics, de-duplicated in case some were sent multiple times + uniqueRaw := make(map[bouncerMetricItem]struct{}) + for _, met := range metrics { bouncerName := met.GeneratedBy - collectedAt := met.CollectedAt - if s.oldestTS[bouncerName] == nil || collectedAt.Before(*s.oldestTS[bouncerName]) { - s.oldestTS[bouncerName] = &collectedAt - } - - type bouncerMetrics struct { + var payload struct { Metrics []models.DetailedMetrics `json:"metrics"` } - payload := bouncerMetrics{} - - err := json.Unmarshal([]byte(met.Payload), &payload) - if err != nil { + if err := json.Unmarshal([]byte(met.Payload), &payload); err != nil { log.Warningf("while parsing metrics for %s: %s", bouncerName, err) continue } for _, m := range payload.Metrics { - for _, item := range m.Items { - labels := item.Labels + // fields like timestamp, name, etc. are mandatory but we got pointers, so we check anyway + if m.Meta.UtcNowTimestamp == nil { + logWarningOnce(warningsLogged, "missing 'utc_now_timestamp' field in metrics reported by "+bouncerName) + continue + } + + collectedAt := time.Unix(*m.Meta.UtcNowTimestamp, 0).UTC() - // these are mandatory but we got pointers, so... + if oldestTS[bouncerName].IsZero() || collectedAt.Before(oldestTS[bouncerName]) { + oldestTS[bouncerName] = collectedAt + } + for _, item := range m.Items { valid := true if item.Name == nil { - warnOnce(warningsLogged, "missing 'name' field in metrics reported by "+bouncerName) + logWarningOnce(warningsLogged, "missing 'name' field in metrics reported by "+bouncerName) // no continue - keep checking the rest valid = false } if item.Unit == nil { - warnOnce(warningsLogged, "missing 'unit' field in metrics reported by "+bouncerName) + logWarningOnce(warningsLogged, "missing 'unit' field in metrics reported by "+bouncerName) valid = false } if item.Value == nil { - warnOnce(warningsLogged, "missing 'value' field in metrics reported by "+bouncerName) + logWarningOnce(warningsLogged, "missing 'value' field in metrics reported by "+bouncerName) valid = false } @@ -130,94 +194,152 @@ func (s *statBouncer) Fetch(ctx context.Context, db *database.Client) error { continue } - name := *item.Name - unit := *item.Unit - value := *item.Value - rawMetric := bouncerMetricItem{ + collectedAt: collectedAt, bouncerName: bouncerName, - ipType: labels["ip_type"], - origin: labels["origin"], - name: name, - unit: unit, - value: value, + ipType: item.Labels["ip_type"], + origin: item.Labels["origin"], + name: *item.Name, + unit: *item.Unit, + value: *item.Value, } - s.rawMetrics = append(s.rawMetrics, rawMetric) + uniqueRaw[rawMetric] = struct{}{} } } } - s.aggregate() + // extract raw metric structs + keys := make([]bouncerMetricItem, 0, len(uniqueRaw)) + for key := range uniqueRaw { + keys = append(keys, key) + } - return nil + // order them by timestamp + sort.Slice(keys, func(i, j int) bool { + return keys[i].collectedAt.Before(keys[j].collectedAt) + }) + + return keys, oldestTS } -func (s *statBouncer) aggregate() { - // [bouncer][origin][name][unit]value - if s.aggregated == nil { - s.aggregated = make(map[string]map[string]map[string]map[string]int64) +func (s *statBouncer) Fetch(ctx context.Context, db *database.Client) error { + if db == nil { + return nil } - if s.aggregatedAllOrigin == nil { - s.aggregatedAllOrigin = make(map[string]map[string]map[string]int64) + // query all bouncer metrics that have not been flushed + + metrics, err := db.Ent.Metric.Query(). + Where(metric.GeneratedTypeEQ(metric.GeneratedTypeRC)). + All(ctx) + if err != nil { + return fmt.Errorf("unable to fetch metrics: %w", err) } - for _, raw := range s.rawMetrics { - if _, ok := s.aggregated[raw.bouncerName]; !ok { - s.aggregated[raw.bouncerName] = make(map[string]map[string]map[string]int64) - } + // de-normalize, de-duplicate metrics and keep the oldest timestamp for each bouncer - if _, ok := s.aggregated[raw.bouncerName][raw.origin]; !ok { - s.aggregated[raw.bouncerName][raw.origin] = make(map[string]map[string]int64) - } + rawMetrics, oldestTS := s.extractRawMetrics(metrics) - if _, ok := s.aggregated[raw.bouncerName][raw.origin][raw.name]; !ok { - s.aggregated[raw.bouncerName][raw.origin][raw.name] = make(map[string]int64) - } + s.oldestTS = oldestTS + aggOverTime := s.newAggregationOverTime(rawMetrics) + s.aggOverIPType = s.newAggregationOverIPType(aggOverTime) + s.aggOverOrigin = s.newAggregationOverOrigin(s.aggOverIPType) - if _, ok := s.aggregated[raw.bouncerName][raw.origin][raw.name][raw.unit]; !ok { - s.aggregated[raw.bouncerName][raw.origin][raw.name][raw.unit] = 0 - } + return nil +} - s.aggregated[raw.bouncerName][raw.origin][raw.name][raw.unit] += int64(raw.value) +// return true if the metric is a gauge and should not be aggregated +func (*statBouncer) isGauge(name string) bool { + return name == "active_decisions" || strings.HasSuffix(name, "_gauge") +} - if _, ok := s.aggregatedAllOrigin[raw.bouncerName]; !ok { - s.aggregatedAllOrigin[raw.bouncerName] = make(map[string]map[string]int64) - } +// formatMetricName returns the metric name to display in the table header +func (*statBouncer) formatMetricName(name string) string { + return strings.TrimSuffix(name, "_gauge") +} - if _, ok := s.aggregatedAllOrigin[raw.bouncerName][raw.name]; !ok { - s.aggregatedAllOrigin[raw.bouncerName][raw.name] = make(map[string]int64) - } +// formatMetricOrigin returns the origin to display in the table rows +// (for example, some users don't know what capi is) +func (*statBouncer) formatMetricOrigin(origin string) string { + switch origin { + case "CAPI": + return origin + " (community blocklist)" + case "cscli": + return origin + " (manual decisions)" + case "crowdsec": + return origin + " (security engine)" + default: + return origin + } +} + +func (s *statBouncer) newAggregationOverTime(rawMetrics []bouncerMetricItem) aggregationOverTime { + ret := aggregationOverTime{} - if _, ok := s.aggregatedAllOrigin[raw.bouncerName][raw.name][raw.unit]; !ok { - s.aggregatedAllOrigin[raw.bouncerName][raw.name][raw.unit] = 0 + for _, raw := range rawMetrics { + ret.add(raw.bouncerName, raw.origin, raw.name, raw.unit, raw.ipType, raw.value, s.isGauge(raw.name)) + } + + return ret +} + +func (*statBouncer) newAggregationOverIPType(aggMetrics aggregationOverTime) aggregationOverIPType { + ret := aggregationOverIPType{} + + for bouncerName := range aggMetrics { + for origin := range aggMetrics[bouncerName] { + for name := range aggMetrics[bouncerName][origin] { + for unit := range aggMetrics[bouncerName][origin][name] { + for ipType := range aggMetrics[bouncerName][origin][name][unit] { + value := aggMetrics[bouncerName][origin][name][unit][ipType] + ret.add(bouncerName, origin, name, unit, value) + } + } + } } + } - s.aggregatedAllOrigin[raw.bouncerName][raw.name][raw.unit] += int64(raw.value) + return ret +} + +func (*statBouncer) newAggregationOverOrigin(aggMetrics aggregationOverIPType) aggregationOverOrigin { + ret := aggregationOverOrigin{} + + for bouncerName := range aggMetrics { + for origin := range aggMetrics[bouncerName] { + for name := range aggMetrics[bouncerName][origin] { + for unit := range aggMetrics[bouncerName][origin][name] { + val := aggMetrics[bouncerName][origin][name][unit] + ret.add(bouncerName, name, unit, val) + } + } + } } + + return ret } // bouncerTable displays a table of metrics for a single bouncer func (s *statBouncer) bouncerTable(out io.Writer, bouncerName string, wantColor string, noUnit bool) { - columns := make(map[string]map[string]bool) + columns := make(map[string]map[string]struct{}) - for _, item := range s.rawMetrics { - if item.bouncerName != bouncerName { - continue - } + bouncerData, ok := s.aggOverOrigin[bouncerName] + if !ok { + // no metrics for this bouncer, skip. how did we get here ? + // anyway we can't honor the "showEmpty" flag in this case, + // we don't even have the table headers + return + } + + for metricName, units := range bouncerData { // build a map of the metric names and units, to display dynamic columns - if _, ok := columns[item.name]; !ok { - columns[item.name] = make(map[string]bool) + columns[metricName] = make(map[string]struct{}) + for unit := range units { + columns[metricName][unit] = struct{}{} } - - columns[item.name][item.unit] = true } - // no metrics for this bouncer, skip. how did we get here ? - // anyway we can't honor the "showEmpty" flag in this case, - // we don't heven have the table headers - if len(columns) == 0 { return } @@ -238,11 +360,11 @@ func (s *statBouncer) bouncerTable(out io.Writer, bouncerName string, wantColor for _, unit := range maptools.SortedKeys(columns[name]) { colNum += 1 - header1 = append(header1, name) + header1 = append(header1, s.formatMetricName(name)) // we don't add "s" to random words - if knownPlurals[unit] != "" { - unit = knownPlurals[unit] + if plural, ok := knownPlurals[unit]; ok { + unit = plural } header2 = append(header2, unit) @@ -264,7 +386,7 @@ func (s *statBouncer) bouncerTable(out io.Writer, bouncerName string, wantColor // sort all the ranges for stable output - for _, origin := range maptools.SortedKeys(s.aggregated[bouncerName]) { + for _, origin := range maptools.SortedKeys(s.aggOverIPType[bouncerName]) { if origin == "" { // if the metric has no origin (i.e. processed bytes/packets) // we don't display it in the table body but it still gets aggreagted @@ -272,21 +394,15 @@ func (s *statBouncer) bouncerTable(out io.Writer, bouncerName string, wantColor continue } - metrics := s.aggregated[bouncerName][origin] - - // some users don't know what capi is - if origin == "CAPI" { - origin += " (community blocklist)" - } + metrics := s.aggOverIPType[bouncerName][origin] - row := table.Row{origin} + row := table.Row{s.formatMetricOrigin(origin)} for _, name := range maptools.SortedKeys(columns) { for _, unit := range maptools.SortedKeys(columns[name]) { valStr := "-" - val, ok := metrics[name][unit] - if ok { + if val, ok := metrics[name][unit]; ok { valStr = formatNumber(val, !noUnit) } @@ -299,7 +415,7 @@ func (s *statBouncer) bouncerTable(out io.Writer, bouncerName string, wantColor numRows += 1 } - totals := s.aggregatedAllOrigin[bouncerName] + totals := s.aggOverOrigin[bouncerName] if numRows == 0 { t.Style().Options.SeparateFooter = false @@ -319,27 +435,20 @@ func (s *statBouncer) bouncerTable(out io.Writer, bouncerName string, wantColor title = fmt.Sprintf("%s (%s)", title, bouncerName) if s.oldestTS != nil { - // if we change this to .Local() beware of tests + // if you change this to .Local() beware of tests title = fmt.Sprintf("%s since %s", title, s.oldestTS[bouncerName].String()) } - title += ":" - // don't use SetTitle() because it draws the title inside table box - io.WriteString(out, title+"\n") - io.WriteString(out, t.Render() + "\n") + io.WriteString(out, title+":\n") + io.WriteString(out, t.Render()+"\n") // empty line between tables io.WriteString(out, "\n") } // Table displays a table of metrics for each bouncer func (s *statBouncer) Table(out io.Writer, wantColor string, noUnit bool, _ bool) { - bouncerNames := make(map[string]bool) - for _, item := range s.rawMetrics { - bouncerNames[item.bouncerName] = true - } - - for _, bouncerName := range maptools.SortedKeys(bouncerNames) { + for _, bouncerName := range maptools.SortedKeys(s.aggOverOrigin) { s.bouncerTable(out, bouncerName, wantColor, noUnit) } } diff --git a/cmd/crowdsec/crowdsec.go b/cmd/crowdsec/crowdsec.go index 2be8a84fec0..5aafc6b0dfe 100644 --- a/cmd/crowdsec/crowdsec.go +++ b/cmd/crowdsec/crowdsec.go @@ -140,6 +140,19 @@ func runCrowdsec(cConfig *csconfig.Config, parsers *parser.Parsers, hub *cwhub.H }) outputWg.Wait() + mp := NewMetricsProvider( + apiClient, + lpMetricsDefaultInterval, + log.WithField("service", "lpmetrics"), + []string{}, + datasources, + hub, + ) + + lpMetricsTomb.Go(func() error { + return mp.Run(context.Background(), &lpMetricsTomb) + }) + if cConfig.Prometheus != nil && cConfig.Prometheus.Enabled { aggregated := false if cConfig.Prometheus.Level == configuration.CFG_METRICS_AGGREGATE { diff --git a/cmd/crowdsec/lpmetrics.go b/cmd/crowdsec/lpmetrics.go new file mode 100644 index 00000000000..0fd27054071 --- /dev/null +++ b/cmd/crowdsec/lpmetrics.go @@ -0,0 +1,182 @@ +package main + +import ( + "context" + "errors" + "net/http" + "time" + + "github.com/sirupsen/logrus" + + "gopkg.in/tomb.v2" + + "github.com/crowdsecurity/go-cs-lib/ptr" + "github.com/crowdsecurity/go-cs-lib/trace" + "github.com/crowdsecurity/go-cs-lib/version" + + "github.com/crowdsecurity/crowdsec/pkg/acquisition" + "github.com/crowdsecurity/crowdsec/pkg/apiclient" + "github.com/crowdsecurity/crowdsec/pkg/cwhub" + "github.com/crowdsecurity/crowdsec/pkg/fflag" + "github.com/crowdsecurity/crowdsec/pkg/models" +) + +const lpMetricsDefaultInterval = 30 * time.Minute + +// MetricsProvider collects metrics from the LP and sends them to the LAPI +type MetricsProvider struct { + apic *apiclient.ApiClient + interval time.Duration + static staticMetrics + logger *logrus.Entry +} + +type staticMetrics struct { + osName string + osVersion string + startupTS int64 + featureFlags []string + consoleOptions []string + datasourceMap map[string]int64 + hubState models.HubItems +} + +func getHubState(hub *cwhub.Hub) models.HubItems { + ret := models.HubItems{} + + for _, itemType := range cwhub.ItemTypes { + ret[itemType] = []models.HubItem{} + items, _ := hub.GetInstalledItemsByType(itemType) + cwhub.SortItemSlice(items) + + for _, item := range items { + status := "official" + if item.State.IsLocal() { + status = "custom" + } + if item.State.Tainted { + status = "tainted" + } + ret[itemType] = append(ret[itemType], models.HubItem{ + Name: item.Name, + Status: status, + Version: item.Version, + }) + } + } + + return ret +} + +// newStaticMetrics is called when the process starts, or reloads the configuration +func newStaticMetrics(consoleOptions []string, datasources []acquisition.DataSource, hub *cwhub.Hub) staticMetrics { + datasourceMap := map[string]int64{} + + for _, ds := range datasources { + datasourceMap[ds.GetName()] += 1 + } + + osName, osVersion := version.DetectOS() + + return staticMetrics{ + osName: osName, + osVersion: osVersion, + startupTS: time.Now().UTC().Unix(), + featureFlags: fflag.Crowdsec.GetEnabledFeatures(), + consoleOptions: consoleOptions, + datasourceMap: datasourceMap, + hubState: getHubState(hub), + } +} + +func NewMetricsProvider(apic *apiclient.ApiClient, interval time.Duration, logger *logrus.Entry, + consoleOptions []string, datasources []acquisition.DataSource, hub *cwhub.Hub) *MetricsProvider { + return &MetricsProvider{ + apic: apic, + interval: interval, + logger: logger, + static: newStaticMetrics(consoleOptions, datasources, hub), + } +} + +func (m *MetricsProvider) metricsPayload() *models.AllMetrics { + os := &models.OSversion{ + Name: ptr.Of(m.static.osName), + Version: ptr.Of(m.static.osVersion), + } + + base := models.BaseMetrics{ + UtcStartupTimestamp: ptr.Of(m.static.startupTS), + Os: os, + Version: ptr.Of(version.String()), + FeatureFlags: m.static.featureFlags, + Metrics: make([]*models.DetailedMetrics, 0), + } + + met := &models.LogProcessorsMetrics{ + BaseMetrics: base, + Datasources: m.static.datasourceMap, + HubItems: m.static.hubState, + } + + met.Metrics = append(met.Metrics, &models.DetailedMetrics{ + Meta: &models.MetricsMeta{ + UtcNowTimestamp: ptr.Of(time.Now().Unix()), + WindowSizeSeconds: ptr.Of(int64(m.interval.Seconds())), + }, + Items: make([]*models.MetricsDetailItem, 0), + }) + + return &models.AllMetrics{ + LogProcessors: []*models.LogProcessorsMetrics{met}, + } +} + +func (m *MetricsProvider) Run(ctx context.Context, myTomb *tomb.Tomb) error { + defer trace.CatchPanic("crowdsec/MetricsProvider.Run") + + if m.interval == time.Duration(0) { + return nil + } + + met := m.metricsPayload() + + ticker := time.NewTicker(1) // Send on start + + for { + select { + case <-ticker.C: + ctxTime, cancel := context.WithTimeout(ctx, 10*time.Second) + defer cancel() + + _, resp, err := m.apic.UsageMetrics.Add(ctxTime, met) + switch { + case errors.Is(err, context.DeadlineExceeded): + m.logger.Warnf("timeout sending lp metrics") + ticker.Reset(m.interval) + continue + case err != nil && resp != nil && resp.Response.StatusCode == http.StatusNotFound: + m.logger.Warnf("metrics endpoint not found, older LAPI?") + ticker.Reset(m.interval) + continue + case err != nil: + m.logger.Warnf("failed to send lp metrics: %s", err) + ticker.Reset(m.interval) + continue + } + + if resp.Response.StatusCode != http.StatusCreated { + m.logger.Warnf("failed to send lp metrics: %s", resp.Response.Status) + ticker.Reset(m.interval) + continue + } + + ticker.Reset(m.interval) + + m.logger.Tracef("lp usage metrics sent") + case <-myTomb.Dying(): + ticker.Stop() + return nil + } + } +} diff --git a/cmd/crowdsec/main.go b/cmd/crowdsec/main.go index 26e39eb069c..18416e044e7 100644 --- a/cmd/crowdsec/main.go +++ b/cmd/crowdsec/main.go @@ -29,28 +29,29 @@ import ( ) var ( - /*tombs for the parser, buckets and outputs.*/ - acquisTomb tomb.Tomb - parsersTomb tomb.Tomb - bucketsTomb tomb.Tomb - outputsTomb tomb.Tomb - apiTomb tomb.Tomb - crowdsecTomb tomb.Tomb - pluginTomb tomb.Tomb + // tombs for the parser, buckets and outputs. + acquisTomb tomb.Tomb + parsersTomb tomb.Tomb + bucketsTomb tomb.Tomb + outputsTomb tomb.Tomb + apiTomb tomb.Tomb + crowdsecTomb tomb.Tomb + pluginTomb tomb.Tomb + lpMetricsTomb tomb.Tomb flags *Flags - /*the state of acquisition*/ + // the state of acquisition dataSources []acquisition.DataSource - /*the state of the buckets*/ + // the state of the buckets holders []leakybucket.BucketFactory buckets *leakybucket.Buckets inputLineChan chan types.Event inputEventChan chan types.Event outputEventChan chan types.Event // the buckets init returns its own chan that is used for multiplexing - /*settings*/ - lastProcessedItem time.Time /*keep track of last item timestamp in time-machine. it is used to GC buckets when we dump them.*/ + // settings + lastProcessedItem time.Time // keep track of last item timestamp in time-machine. it is used to GC buckets when we dump them. pluginBroker csplugin.PluginBroker ) @@ -307,7 +308,7 @@ func LoadConfig(configFile string, disableAgent bool, disableAPI bool, quiet boo if cConfig.API != nil && cConfig.API.Server != nil { cConfig.API.Server.OnlineClient = nil } - /*if the api is disabled as well, just read file and exit, don't daemonize*/ + // if the api is disabled as well, just read file and exit, don't daemonize if cConfig.DisableAPI { cConfig.Common.Daemonize = false } diff --git a/cmd/crowdsec/serve.go b/cmd/crowdsec/serve.go index 5fb7b86f181..f1a658e9512 100644 --- a/cmd/crowdsec/serve.go +++ b/cmd/crowdsec/serve.go @@ -60,6 +60,7 @@ func reloadHandler(sig os.Signal) (*csconfig.Config, error) { apiTomb = tomb.Tomb{} crowdsecTomb = tomb.Tomb{} pluginTomb = tomb.Tomb{} + lpMetricsTomb = tomb.Tomb{} cConfig, err := LoadConfig(flags.ConfigFile, flags.DisableAgent, flags.DisableAPI, false) if err != nil { @@ -179,6 +180,15 @@ func ShutdownCrowdsecRoutines() error { log.Warningf("Outputs didn't finish in time, some events may have not been flushed") } + lpMetricsTomb.Kill(nil) + + if err := lpMetricsTomb.Wait(); err != nil { + log.Warningf("Metrics returned error : %s", err) + reterr = err + } + + log.Debugf("metrics are done") + // He's dead, Jim. crowdsecTomb.Kill(nil) @@ -322,6 +332,7 @@ func Serve(cConfig *csconfig.Config, agentReady chan bool) error { apiTomb = tomb.Tomb{} crowdsecTomb = tomb.Tomb{} pluginTomb = tomb.Tomb{} + lpMetricsTomb = tomb.Tomb{} ctx := context.TODO() diff --git a/go.mod b/go.mod index f36bbcd996d..af9d7550b94 100644 --- a/go.mod +++ b/go.mod @@ -13,7 +13,6 @@ require ( github.com/Masterminds/sprig/v3 v3.2.3 github.com/agext/levenshtein v1.2.3 github.com/alexliesenfeld/health v0.8.0 - github.com/antonmedv/expr v1.15.3 github.com/appleboy/gin-jwt/v2 v2.9.2 github.com/aws/aws-lambda-go v1.47.0 github.com/aws/aws-sdk-go v1.52.0 @@ -33,6 +32,7 @@ require ( github.com/dghubble/sling v1.4.2 github.com/docker/docker v24.0.9+incompatible github.com/docker/go-connections v0.4.0 + github.com/expr-lang/expr v1.16.9 github.com/fatih/color v1.16.0 github.com/fsnotify/fsnotify v1.7.0 github.com/gin-gonic/gin v1.9.1 @@ -111,7 +111,6 @@ require ( github.com/creack/pty v1.1.18 // indirect github.com/docker/distribution v2.8.2+incompatible // indirect github.com/docker/go-units v0.5.0 // indirect - github.com/expr-lang/expr v1.16.9 // indirect github.com/gabriel-vasile/mimetype v1.4.3 // indirect github.com/gin-contrib/sse v0.1.0 // indirect github.com/go-logr/logr v1.2.4 // indirect diff --git a/go.sum b/go.sum index d4cc2651f0f..282f10d6367 100644 --- a/go.sum +++ b/go.sum @@ -39,8 +39,6 @@ github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk5 github.com/alexliesenfeld/health v0.8.0 h1:lCV0i+ZJPTbqP7LfKG7p3qZBl5VhelwUFCIVWl77fgk= github.com/alexliesenfeld/health v0.8.0/go.mod h1:TfNP0f+9WQVWMQRzvMUjlws4ceXKEL3WR+6Hp95HUFc= github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883/go.mod h1:rCTlJbsFo29Kk6CurOXKm700vrz8f0KW0JNfpkRJY/8= -github.com/antonmedv/expr v1.15.3 h1:q3hOJZNvLvhqE8OHBs1cFRdbXFNKuA+bHmRaI+AmRmI= -github.com/antonmedv/expr v1.15.3/go.mod h1:0E/6TxnOlRNp81GMzX9QfDPAmHo2Phg00y4JUv1ihsE= github.com/apparentlymart/go-textseg/v13 v13.0.0 h1:Y+KvPE1NYz0xl601PVImeQfFyEy6iT90AvPUL1NNfNw= github.com/apparentlymart/go-textseg/v13 v13.0.0/go.mod h1:ZK2fH7c4NqDTLtiYLvIkEghdlcqw7yxLeM89kiTRPUo= github.com/appleboy/gin-jwt/v2 v2.9.2 h1:GeS3lm9mb9HMmj7+GNjYUtpp3V1DAQ1TkUFa5poiZ7Y= diff --git a/pkg/apiclient/client.go b/pkg/apiclient/client.go index b702829efd3..3abd42cf009 100644 --- a/pkg/apiclient/client.go +++ b/pkg/apiclient/client.go @@ -39,6 +39,7 @@ type ApiClient struct { Metrics *MetricsService Signal *SignalService HeartBeat *HeartBeatService + UsageMetrics *UsageMetricsService } func (a *ApiClient) GetClient() *http.Client { @@ -108,6 +109,7 @@ func NewClient(config *Config) (*ApiClient, error) { c.Signal = (*SignalService)(&c.common) c.DecisionDelete = (*DecisionDeleteService)(&c.common) c.HeartBeat = (*HeartBeatService)(&c.common) + c.UsageMetrics = (*UsageMetricsService)(&c.common) return c, nil } @@ -144,6 +146,7 @@ func NewDefaultClient(URL *url.URL, prefix string, userAgent string, client *htt c.Signal = (*SignalService)(&c.common) c.DecisionDelete = (*DecisionDeleteService)(&c.common) c.HeartBeat = (*HeartBeatService)(&c.common) + c.UsageMetrics = (*UsageMetricsService)(&c.common) return c, nil } diff --git a/pkg/apiclient/client_test.go b/pkg/apiclient/client_test.go index 2adba170584..dd09811924f 100644 --- a/pkg/apiclient/client_test.go +++ b/pkg/apiclient/client_test.go @@ -348,5 +348,5 @@ func TestNewClientBadAnswer(t *testing.T) { URL: apiURL, VersionPrefix: "v1", }, &http.Client{}) - cstest.RequireErrorContains(t, err, "invalid body: invalid character 'b' looking for beginning of value") + cstest.RequireErrorContains(t, err, "invalid body: bad") } diff --git a/pkg/apiclient/resperr.go b/pkg/apiclient/resperr.go index ff954a73609..e8f12ee9f4e 100644 --- a/pkg/apiclient/resperr.go +++ b/pkg/apiclient/resperr.go @@ -34,12 +34,18 @@ func CheckResponse(r *http.Response) error { data, err := io.ReadAll(r.Body) if err != nil || len(data) == 0 { - ret.Message = ptr.Of(fmt.Sprintf("http code %d, no error message", r.StatusCode)) + ret.Message = ptr.Of(fmt.Sprintf("http code %d, no response body", r.StatusCode)) return ret } - if err := json.Unmarshal(data, ret); err != nil { - return fmt.Errorf("http code %d, invalid body: %w", r.StatusCode, err) + switch r.StatusCode { + case http.StatusUnprocessableEntity: + ret.Message = ptr.Of(fmt.Sprintf("http code %d, invalid request: %s", r.StatusCode, string(data))) + default: + if err := json.Unmarshal(data, ret); err != nil { + ret.Message = ptr.Of(fmt.Sprintf("http code %d, invalid body: %s", r.StatusCode, string(data))) + return ret + } } return ret diff --git a/pkg/apiclient/usagemetrics.go b/pkg/apiclient/usagemetrics.go new file mode 100644 index 00000000000..1d822bb5c1e --- /dev/null +++ b/pkg/apiclient/usagemetrics.go @@ -0,0 +1,29 @@ +package apiclient + +import ( + "context" + "fmt" + "net/http" + + "github.com/crowdsecurity/crowdsec/pkg/models" +) + +type UsageMetricsService service + +func (s *UsageMetricsService) Add(ctx context.Context, metrics *models.AllMetrics) (interface{}, *Response, error) { + u := fmt.Sprintf("%s/usage-metrics", s.client.URLPrefix) + + req, err := s.client.NewRequest(http.MethodPost, u, &metrics) + if err != nil { + return nil, nil, err + } + + var response interface{} + + resp, err := s.client.Do(ctx, req, &response) + if err != nil { + return nil, resp, err + } + + return &response, resp, nil +} diff --git a/pkg/apiserver/apic.go b/pkg/apiserver/apic.go index 68dc94367e2..284d0acdabf 100644 --- a/pkg/apiserver/apic.go +++ b/pkg/apiserver/apic.go @@ -35,26 +35,30 @@ import ( const ( // delta values must be smaller than the interval - pullIntervalDefault = time.Hour * 2 - pullIntervalDelta = 5 * time.Minute - pushIntervalDefault = time.Second * 10 - pushIntervalDelta = time.Second * 7 - metricsIntervalDefault = time.Minute * 30 - metricsIntervalDelta = time.Minute * 15 + pullIntervalDefault = time.Hour * 2 + pullIntervalDelta = 5 * time.Minute + pushIntervalDefault = time.Second * 10 + pushIntervalDelta = time.Second * 7 + metricsIntervalDefault = time.Minute * 30 + metricsIntervalDelta = time.Minute * 15 + usageMetricsInterval = time.Minute * 30 + usageMetricsIntervalFirst = time.Minute * 15 ) type apic struct { // when changing the intervals in tests, always set *First too // or they can be negative - pullInterval time.Duration - pullIntervalFirst time.Duration - pushInterval time.Duration - pushIntervalFirst time.Duration - metricsInterval time.Duration - metricsIntervalFirst time.Duration - dbClient *database.Client - apiClient *apiclient.ApiClient - AlertsAddChan chan []*models.Alert + pullInterval time.Duration + pullIntervalFirst time.Duration + pushInterval time.Duration + pushIntervalFirst time.Duration + metricsInterval time.Duration + metricsIntervalFirst time.Duration + usageMetricsInterval time.Duration + usageMetricsIntervalFirst time.Duration + dbClient *database.Client + apiClient *apiclient.ApiClient + AlertsAddChan chan []*models.Alert mu sync.Mutex pushTomb tomb.Tomb @@ -175,24 +179,26 @@ func NewAPIC(config *csconfig.OnlineApiClientCfg, dbClient *database.Client, con var err error ret := &apic{ - AlertsAddChan: make(chan []*models.Alert), - dbClient: dbClient, - mu: sync.Mutex{}, - startup: true, - credentials: config.Credentials, - pullTomb: tomb.Tomb{}, - pushTomb: tomb.Tomb{}, - metricsTomb: tomb.Tomb{}, - scenarioList: make([]string, 0), - consoleConfig: consoleConfig, - pullInterval: pullIntervalDefault, - pullIntervalFirst: randomDuration(pullIntervalDefault, pullIntervalDelta), - pushInterval: pushIntervalDefault, - pushIntervalFirst: randomDuration(pushIntervalDefault, pushIntervalDelta), - metricsInterval: metricsIntervalDefault, - metricsIntervalFirst: randomDuration(metricsIntervalDefault, metricsIntervalDelta), - isPulling: make(chan bool, 1), - whitelists: apicWhitelist, + AlertsAddChan: make(chan []*models.Alert), + dbClient: dbClient, + mu: sync.Mutex{}, + startup: true, + credentials: config.Credentials, + pullTomb: tomb.Tomb{}, + pushTomb: tomb.Tomb{}, + metricsTomb: tomb.Tomb{}, + scenarioList: make([]string, 0), + consoleConfig: consoleConfig, + pullInterval: pullIntervalDefault, + pullIntervalFirst: randomDuration(pullIntervalDefault, pullIntervalDelta), + pushInterval: pushIntervalDefault, + pushIntervalFirst: randomDuration(pushIntervalDefault, pushIntervalDelta), + metricsInterval: metricsIntervalDefault, + metricsIntervalFirst: randomDuration(metricsIntervalDefault, metricsIntervalDelta), + usageMetricsInterval: usageMetricsInterval, + usageMetricsIntervalFirst: randomDuration(usageMetricsInterval, usageMetricsIntervalFirst), + isPulling: make(chan bool, 1), + whitelists: apicWhitelist, } password := strfmt.Password(config.Credentials.Password) diff --git a/pkg/apiserver/apic_metrics.go b/pkg/apiserver/apic_metrics.go index b8e23629e1e..54640afc2d0 100644 --- a/pkg/apiserver/apic_metrics.go +++ b/pkg/apiserver/apic_metrics.go @@ -2,7 +2,10 @@ package apiserver import ( "context" + "encoding/json" + "net/http" "slices" + "strings" "time" log "github.com/sirupsen/logrus" @@ -11,9 +14,170 @@ import ( "github.com/crowdsecurity/go-cs-lib/trace" "github.com/crowdsecurity/go-cs-lib/version" + "github.com/crowdsecurity/crowdsec/pkg/csconfig" + "github.com/crowdsecurity/crowdsec/pkg/fflag" "github.com/crowdsecurity/crowdsec/pkg/models" ) +type dbPayload struct { + Metrics []*models.DetailedMetrics `json:"metrics"` +} + +func (a *apic) GetUsageMetrics() (*models.AllMetrics, []int, error) { + allMetrics := &models.AllMetrics{} + metricsIds := make([]int, 0) + + lps, err := a.dbClient.ListMachines() + if err != nil { + return nil, nil, err + } + + bouncers, err := a.dbClient.ListBouncers() + if err != nil { + return nil, nil, err + } + + for _, bouncer := range bouncers { + dbMetrics, err := a.dbClient.GetBouncerUsageMetricsByName(bouncer.Name) + if err != nil { + log.Errorf("unable to get bouncer usage metrics: %s", err) + continue + } + + rcMetrics := models.RemediationComponentsMetrics{} + + rcMetrics.Os = &models.OSversion{ + Name: ptr.Of(bouncer.Osname), + Version: ptr.Of(bouncer.Osversion), + } + rcMetrics.Type = bouncer.Type + rcMetrics.FeatureFlags = strings.Split(bouncer.Featureflags, ",") + rcMetrics.Version = ptr.Of(bouncer.Version) + rcMetrics.Name = bouncer.Name + rcMetrics.LastPull = bouncer.LastPull.UTC().Unix() + + rcMetrics.Metrics = make([]*models.DetailedMetrics, 0) + + // Might seem weird, but we duplicate the bouncers if we have multiple unsent metrics + for _, dbMetric := range dbMetrics { + dbPayload := &dbPayload{} + // Append no matter what, if we cannot unmarshal, there's no way we'll be able to fix it automatically + metricsIds = append(metricsIds, dbMetric.ID) + + err := json.Unmarshal([]byte(dbMetric.Payload), dbPayload) + if err != nil { + log.Errorf("unable to unmarshal bouncer metric (%s)", err) + continue + } + + rcMetrics.Metrics = append(rcMetrics.Metrics, dbPayload.Metrics...) + } + + allMetrics.RemediationComponents = append(allMetrics.RemediationComponents, &rcMetrics) + } + + for _, lp := range lps { + dbMetrics, err := a.dbClient.GetLPUsageMetricsByMachineID(lp.MachineId) + if err != nil { + log.Errorf("unable to get LP usage metrics: %s", err) + continue + } + + lpMetrics := models.LogProcessorsMetrics{} + + lpMetrics.Os = &models.OSversion{ + Name: ptr.Of(lp.Osname), + Version: ptr.Of(lp.Osversion), + } + lpMetrics.FeatureFlags = strings.Split(lp.Featureflags, ",") + lpMetrics.Version = ptr.Of(lp.Version) + lpMetrics.Name = lp.MachineId + lpMetrics.LastPush = lp.LastPush.UTC().Unix() + lpMetrics.LastUpdate = lp.UpdatedAt.UTC().Unix() + + lpMetrics.Datasources = lp.Datasources + + if lp.Hubstate != nil { + // must carry over the hub state even if nothing is installed + hubItems := models.HubItems{} + for itemType, items := range lp.Hubstate { + hubItems[itemType] = []models.HubItem{} + for _, item := range items { + hubItems[itemType] = append(hubItems[itemType], models.HubItem{ + Name: item.Name, + Status: item.Status, + Version: item.Version, + }) + } + + lpMetrics.HubItems = hubItems + } + } else { + lpMetrics.HubItems = models.HubItems{} + } + + lpMetrics.Metrics = make([]*models.DetailedMetrics, 0) + + for _, dbMetric := range dbMetrics { + dbPayload := &dbPayload{} + // Append no matter what, if we cannot unmarshal, there's no way we'll be able to fix it automatically + metricsIds = append(metricsIds, dbMetric.ID) + + err := json.Unmarshal([]byte(dbMetric.Payload), dbPayload) + if err != nil { + log.Errorf("unable to unmarshal log processor metric (%s)", err) + continue + } + + lpMetrics.Metrics = append(lpMetrics.Metrics, dbPayload.Metrics...) + } + + allMetrics.LogProcessors = append(allMetrics.LogProcessors, &lpMetrics) + } + + // FIXME: all of this should only be done once on startup/reload + consoleOptions := strings.Join(csconfig.GetConfig().API.Server.ConsoleConfig.EnabledOptions(), ",") + allMetrics.Lapi = &models.LapiMetrics{ + ConsoleOptions: models.ConsoleOptions{ + consoleOptions, + }, + } + + osName, osVersion := version.DetectOS() + + allMetrics.Lapi.Os = &models.OSversion{ + Name: ptr.Of(osName), + Version: ptr.Of(osVersion), + } + allMetrics.Lapi.Version = ptr.Of(version.String()) + allMetrics.Lapi.FeatureFlags = fflag.Crowdsec.GetEnabledFeatures() + + allMetrics.Lapi.Metrics = make([]*models.DetailedMetrics, 0) + + allMetrics.Lapi.Metrics = append(allMetrics.Lapi.Metrics, &models.DetailedMetrics{ + Meta: &models.MetricsMeta{ + UtcNowTimestamp: ptr.Of(time.Now().UTC().Unix()), + WindowSizeSeconds: ptr.Of(int64(a.metricsInterval.Seconds())), + }, + Items: make([]*models.MetricsDetailItem, 0), + }) + + // Force an actual slice to avoid non existing fields in the json + if allMetrics.RemediationComponents == nil { + allMetrics.RemediationComponents = make([]*models.RemediationComponentsMetrics, 0) + } + + if allMetrics.LogProcessors == nil { + allMetrics.LogProcessors = make([]*models.LogProcessorsMetrics, 0) + } + + return allMetrics, metricsIds, nil +} + +func (a *apic) MarkUsageMetricsAsSent(ids []int) error { + return a.dbClient.MarkUsageMetricsAsSent(ids) +} + func (a *apic) GetMetrics() (*models.Metrics, error) { machines, err := a.dbClient.ListMachines() if err != nil { @@ -160,3 +324,51 @@ func (a *apic) SendMetrics(stop chan (bool)) { } } } + +func (a *apic) SendUsageMetrics() { + defer trace.CatchPanic("lapi/usageMetricsToAPIC") + + firstRun := true + + ticker := time.NewTicker(a.usageMetricsIntervalFirst) + + for { + select { + case <-a.metricsTomb.Dying(): + // The normal metrics routine also kills push/pull tombs, does that make sense ? + ticker.Stop() + return + case <-ticker.C: + if firstRun { + firstRun = false + + ticker.Reset(a.usageMetricsInterval) + } + + metrics, metricsId, err := a.GetUsageMetrics() + if err != nil { + log.Errorf("unable to get usage metrics: %s", err) + continue + } + + _, resp, err := a.apiClient.UsageMetrics.Add(context.Background(), metrics) + if err != nil { + log.Errorf("unable to send usage metrics: %s", err) + + if resp.Response.StatusCode >= http.StatusBadRequest && resp.Response.StatusCode != http.StatusUnprocessableEntity { + // In case of 422, mark the metrics as sent anyway, the API did not like what we sent, + // and it's unlikely we'll be able to fix it + continue + } + } + + err = a.MarkUsageMetricsAsSent(metricsId) + if err != nil { + log.Errorf("unable to mark usage metrics as sent: %s", err) + continue + } + + log.Infof("Sent %d usage metrics", len(metricsId)) + } + } +} diff --git a/pkg/apiserver/apiserver.go b/pkg/apiserver/apiserver.go index c6074801d7e..bd0b5d39bf4 100644 --- a/pkg/apiserver/apiserver.go +++ b/pkg/apiserver/apiserver.go @@ -367,6 +367,11 @@ func (s *APIServer) Run(apiReady chan bool) error { s.apic.SendMetrics(make(chan bool)) return nil }) + + s.apic.metricsTomb.Go(func() error { + s.apic.SendUsageMetrics() + return nil + }) } s.httpServerTomb.Go(func() error { @@ -375,7 +380,7 @@ func (s *APIServer) Run(apiReady chan bool) error { if err := s.httpServerTomb.Wait(); err != nil { return fmt.Errorf("local API server stopped with error: %w", err) - } + } return nil } diff --git a/pkg/apiserver/controllers/controller.go b/pkg/apiserver/controllers/controller.go index 51f359244be..8175f431384 100644 --- a/pkg/apiserver/controllers/controller.go +++ b/pkg/apiserver/controllers/controller.go @@ -4,12 +4,13 @@ import ( "context" "net" "net/http" + "strings" "github.com/alexliesenfeld/health" "github.com/gin-gonic/gin" log "github.com/sirupsen/logrus" - "github.com/crowdsecurity/crowdsec/pkg/apiserver/controllers/v1" + v1 "github.com/crowdsecurity/crowdsec/pkg/apiserver/controllers/v1" "github.com/crowdsecurity/crowdsec/pkg/csconfig" "github.com/crowdsecurity/crowdsec/pkg/csplugin" "github.com/crowdsecurity/crowdsec/pkg/database" @@ -59,6 +60,23 @@ func serveHealth() http.HandlerFunc { return health.NewHandler(checker) } +func eitherAuthMiddleware(jwtMiddleware gin.HandlerFunc, apiKeyMiddleware gin.HandlerFunc) gin.HandlerFunc { + return func(c *gin.Context) { + switch { + case c.GetHeader("X-Api-Key") != "": + apiKeyMiddleware(c) + case c.GetHeader("Authorization") != "": + jwtMiddleware(c) + // uh no auth header. is this TLS with mutual authentication? + case strings.HasPrefix(c.Request.UserAgent(), "crowdsec/"): + // guess log processors by sniffing user-agent + jwtMiddleware(c) + default: + apiKeyMiddleware(c) + } + } +} + func (c *Controller) NewV1() error { var err error @@ -117,6 +135,12 @@ func (c *Controller) NewV1() error { apiKeyAuth.HEAD("/decisions/stream", c.HandlerV1.StreamDecision) } + eitherAuth := groupV1.Group("") + eitherAuth.Use(eitherAuthMiddleware(c.HandlerV1.Middlewares.JWT.Middleware.MiddlewareFunc(), c.HandlerV1.Middlewares.APIKey.MiddlewareFunc())) + { + eitherAuth.POST("/usage-metrics", c.HandlerV1.UsageMetrics) + } + return nil } diff --git a/pkg/apiserver/controllers/v1/errors.go b/pkg/apiserver/controllers/v1/errors.go index 9004528e1b1..d661de44b0e 100644 --- a/pkg/apiserver/controllers/v1/errors.go +++ b/pkg/apiserver/controllers/v1/errors.go @@ -3,6 +3,7 @@ package v1 import ( "errors" "net/http" + "strings" "github.com/gin-gonic/gin" @@ -37,3 +38,32 @@ func (c *Controller) HandleDBErrors(gctx *gin.Context, err error) { return } } + +// collapseRepeatedPrefix collapses repeated occurrences of a given prefix in the text +func collapseRepeatedPrefix(text string, prefix string) string { + count := 0 + for strings.HasPrefix(text, prefix) { + count++ + text = strings.TrimPrefix(text, prefix) + } + + if count > 0 { + return prefix + text + } + + return text +} + +// RepeatedPrefixError wraps an error and removes the repeating prefix from its message +type RepeatedPrefixError struct { + OriginalError error + Prefix string +} + +func (e RepeatedPrefixError) Error() string { + return collapseRepeatedPrefix(e.OriginalError.Error(), e.Prefix) +} + +func (e RepeatedPrefixError) Unwrap() error { + return e.OriginalError +} diff --git a/pkg/apiserver/controllers/v1/errors_test.go b/pkg/apiserver/controllers/v1/errors_test.go new file mode 100644 index 00000000000..89c561f83bd --- /dev/null +++ b/pkg/apiserver/controllers/v1/errors_test.go @@ -0,0 +1,57 @@ +package v1 + +import ( + "errors" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestCollapseRepeatedPrefix(t *testing.T) { + tests := []struct { + input string + prefix string + want string + }{ + { + input: "aaabbbcccaaa", + prefix: "aaa", + want: "aaabbbcccaaa", + }, { + input: "hellohellohello world", + prefix: "hello", + want: "hello world", + }, { + input: "ababababxyz", + prefix: "ab", + want: "abxyz", + }, { + input: "xyzxyzxyzxyzxyz", + prefix: "xyz", + want: "xyz", + }, { + input: "123123123456", + prefix: "456", + want: "123123123456", + }, + } + + for _, tt := range tests { + t.Run(tt.input, func(t *testing.T) { + assert.Equal(t, tt.want, collapseRepeatedPrefix(tt.input, tt.prefix)) + }) + } +} + +func TestRepeatedPrefixError(t *testing.T) { + originalErr := errors.New("hellohellohello world") + wrappedErr := RepeatedPrefixError{OriginalError: originalErr, Prefix: "hello"} + + want := "hello world" + + assert.Equal(t, want, wrappedErr.Error()) + + assert.Equal(t, originalErr, errors.Unwrap(wrappedErr)) + require.ErrorIs(t, wrappedErr, originalErr) +} diff --git a/pkg/apiserver/controllers/v1/usagemetrics.go b/pkg/apiserver/controllers/v1/usagemetrics.go new file mode 100644 index 00000000000..74f27bb6cf4 --- /dev/null +++ b/pkg/apiserver/controllers/v1/usagemetrics.go @@ -0,0 +1,204 @@ +package v1 + +import ( + "encoding/json" + "errors" + "net/http" + "time" + + "github.com/gin-gonic/gin" + "github.com/go-openapi/strfmt" + log "github.com/sirupsen/logrus" + + "github.com/crowdsecurity/go-cs-lib/ptr" + + "github.com/crowdsecurity/crowdsec/pkg/database/ent" + "github.com/crowdsecurity/crowdsec/pkg/database/ent/metric" + "github.com/crowdsecurity/crowdsec/pkg/models" +) + +// updateBaseMetrics updates the base metrics for a machine or bouncer +func (c *Controller) updateBaseMetrics(machineID string, bouncer *ent.Bouncer, baseMetrics models.BaseMetrics, hubItems models.HubItems, datasources map[string]int64) error { + switch { + case machineID != "": + c.DBClient.MachineUpdateBaseMetrics(machineID, baseMetrics, hubItems, datasources) + case bouncer != nil: + c.DBClient.BouncerUpdateBaseMetrics(bouncer.Name, bouncer.Type, baseMetrics) + default: + return errors.New("no machineID or bouncerName set") + } + + return nil +} + +// UsageMetrics receives metrics from log processors and remediation components +func (c *Controller) UsageMetrics(gctx *gin.Context) { + var input models.AllMetrics + + logger := log.WithField("func", "UsageMetrics") + + // parse the payload + + if err := gctx.ShouldBindJSON(&input); err != nil { + logger.Errorf("Failed to bind json: %s", err) + gctx.JSON(http.StatusBadRequest, gin.H{"message": err.Error()}) + + return + } + + if err := input.Validate(strfmt.Default); err != nil { + // work around a nuisance in the generated code + cleanErr := RepeatedPrefixError{ + OriginalError: err, + Prefix: "validation failure list:\n", + } + logger.Errorf("Failed to validate usage metrics: %s", cleanErr) + gctx.JSON(http.StatusUnprocessableEntity, gin.H{"message": cleanErr.Error()}) + + return + } + + var ( + generatedType metric.GeneratedType + generatedBy string + ) + + bouncer, _ := getBouncerFromContext(gctx) + if bouncer != nil { + logger.Tracef("Received usage metris for bouncer: %s", bouncer.Name) + + generatedType = metric.GeneratedTypeRC + generatedBy = bouncer.Name + } + + machineID, _ := getMachineIDFromContext(gctx) + if machineID != "" { + logger.Tracef("Received usage metrics for log processor: %s", machineID) + + generatedType = metric.GeneratedTypeLP + generatedBy = machineID + } + + if generatedBy == "" { + // how did we get here? + logger.Error("No machineID or bouncer in request context after authentication") + gctx.JSON(http.StatusInternalServerError, gin.H{"message": "No machineID or bouncer in request context after authentication"}) + + return + } + + if machineID != "" && bouncer != nil { + logger.Errorf("Payload has both machineID and bouncer") + gctx.JSON(http.StatusBadRequest, gin.H{"message": "Payload has both LP and RC data"}) + + return + } + + var ( + payload map[string]any + baseMetrics models.BaseMetrics + hubItems models.HubItems + datasources map[string]int64 + ) + + switch len(input.LogProcessors) { + case 0: + if machineID != "" { + logger.Errorf("Missing log processor data") + gctx.JSON(http.StatusBadRequest, gin.H{"message": "Missing log processor data"}) + + return + } + case 1: + // the final slice can't have more than one item, + // guaranteed by the swagger schema + item0 := input.LogProcessors[0] + + err := item0.Validate(strfmt.Default) + if err != nil { + logger.Errorf("Failed to validate log processor data: %s", err) + gctx.JSON(http.StatusUnprocessableEntity, gin.H{"message": err.Error()}) + + return + } + + payload = map[string]any{ + "metrics": item0.Metrics, + } + baseMetrics = item0.BaseMetrics + hubItems = item0.HubItems + datasources = item0.Datasources + default: + logger.Errorf("Payload has more than one log processor") + // this is not checked in the swagger schema + gctx.JSON(http.StatusBadRequest, gin.H{"message": "Payload has more than one log processor"}) + + return + } + + switch len(input.RemediationComponents) { + case 0: + if bouncer != nil { + logger.Errorf("Missing remediation component data") + gctx.JSON(http.StatusBadRequest, gin.H{"message": "Missing remediation component data"}) + + return + } + case 1: + item0 := input.RemediationComponents[0] + + err := item0.Validate(strfmt.Default) + if err != nil { + logger.Errorf("Failed to validate remediation component data: %s", err) + gctx.JSON(http.StatusUnprocessableEntity, gin.H{"message": err.Error()}) + + return + } + + payload = map[string]any{ + "type": item0.Type, + "metrics": item0.Metrics, + } + baseMetrics = item0.BaseMetrics + default: + gctx.JSON(http.StatusBadRequest, gin.H{"message": "Payload has more than one remediation component"}) + return + } + + if baseMetrics.Os == nil { + baseMetrics.Os = &models.OSversion{ + Name: ptr.Of(""), + Version: ptr.Of(""), + } + } + + err := c.updateBaseMetrics(machineID, bouncer, baseMetrics, hubItems, datasources) + if err != nil { + logger.Errorf("Failed to update base metrics: %s", err) + c.HandleDBErrors(gctx, err) + + return + } + + jsonPayload, err := json.Marshal(payload) + if err != nil { + logger.Errorf("Failed to marshal usage metrics: %s", err) + c.HandleDBErrors(gctx, err) + + return + } + + receivedAt := time.Now().UTC() + + if _, err := c.DBClient.CreateMetric(generatedType, generatedBy, receivedAt, string(jsonPayload)); err != nil { + logger.Error(err) + c.HandleDBErrors(gctx, err) + + return + } + + // if CreateMetrics() returned nil, the metric was already there, we're good + // and don't split hair about 201 vs 200/204 + + gctx.Status(http.StatusCreated) +} diff --git a/pkg/apiserver/usage_metrics_test.go b/pkg/apiserver/usage_metrics_test.go new file mode 100644 index 00000000000..41dd0ccdc2c --- /dev/null +++ b/pkg/apiserver/usage_metrics_test.go @@ -0,0 +1,384 @@ +package apiserver + +import ( + "context" + "net/http" + "strings" + "testing" + + "github.com/stretchr/testify/assert" + + "github.com/crowdsecurity/crowdsec/pkg/database" + "github.com/crowdsecurity/crowdsec/pkg/database/ent/metric" +) + +func TestLPMetrics(t *testing.T) { + tests := []struct { + name string + body string + expectedStatusCode int + expectedResponse string + expectedMetricsCount int + expectedOSName string + expectedOSVersion string + expectedFeatureFlags string + authType string + }{ + { + name: "empty metrics for LP", + body: `{ + }`, + expectedStatusCode: 400, + expectedResponse: "Missing log processor data", + authType: PASSWORD, + }, + { + name: "basic metrics with empty dynamic metrics for LP", + body: ` +{ + "log_processors": [ + { + "version": "1.42", + "os": {"name":"foo", "version": "42"}, + "utc_startup_timestamp": 42, + "metrics": [], + "feature_flags": ["a", "b", "c"], + "datasources": {"file": 42}, + "hub_items": {} + } + ] +}`, + expectedStatusCode: 201, + expectedMetricsCount: 1, + expectedResponse: "", + expectedOSName: "foo", + expectedOSVersion: "42", + expectedFeatureFlags: "a,b,c", + authType: PASSWORD, + }, + { + name: "basic metrics with dynamic metrics for LP", + body: ` +{ + "log_processors": [ + { + "version": "1.42", + "os": {"name":"foo", "version": "42"}, + "utc_startup_timestamp": 42, + "metrics": [{"meta":{"utc_now_timestamp":42, "window_size_seconds": 42}, "items": [{"name": "foo", "value": 42, "unit": "bla"}] }, {"meta":{"utc_now_timestamp":43, "window_size_seconds": 42}, "items": [{"name": "foo", "value": 42, "unit": "bla"}] }], + "feature_flags": ["a", "b", "c"], + "datasources": {"file": 42}, + "hub_items": {} + } + ] +}`, + expectedStatusCode: 201, + expectedMetricsCount: 1, + expectedResponse: "", + expectedOSName: "foo", + expectedOSVersion: "42", + expectedFeatureFlags: "a,b,c", + authType: PASSWORD, + }, + { + name: "wrong auth type for LP", + body: ` +{ + "log_processors": [ + { + "version": "1.42", + "os": {"name":"foo", "version": "42"}, + "utc_startup_timestamp": 42, + "metrics": [], + "feature_flags": ["a", "b", "c"], + "datasources": {"file": 42}, + "hub_items": {} + } + ] +}`, + expectedStatusCode: 400, + expectedResponse: "Missing remediation component data", + authType: APIKEY, + }, + { + name: "missing OS field for LP", + body: ` +{ + "log_processors": [ + { + "version": "1.42", + "utc_startup_timestamp": 42, + "metrics": [], + "feature_flags": ["a", "b", "c"], + "datasources": {"file": 42}, + "hub_items": {} + } + ] +}`, + expectedStatusCode: 201, + expectedResponse: "", + expectedMetricsCount: 1, + expectedFeatureFlags: "a,b,c", + authType: PASSWORD, + }, + { + name: "missing datasources for LP", + body: ` +{ + "log_processors": [ + { + "version": "1.42", + "os": {"name":"foo", "version": "42"}, + "utc_startup_timestamp": 42, + "metrics": [], + "feature_flags": ["a", "b", "c"], + "hub_items": {} + } + ] +}`, + expectedStatusCode: 422, + expectedResponse: "log_processors.0.datasources in body is required", + authType: PASSWORD, + }, + { + name: "missing feature flags for LP", + body: ` +{ + "log_processors": [ + { + "version": "1.42", + "os": {"name":"foo", "version": "42"}, + "utc_startup_timestamp": 42, + "metrics": [], + "datasources": {"file": 42}, + "hub_items": {} + } + ] +}`, + expectedStatusCode: 201, + expectedMetricsCount: 1, + expectedOSName: "foo", + expectedOSVersion: "42", + authType: PASSWORD, + }, + { + name: "missing OS name", + body: ` +{ + "log_processors": [ + { + "version": "1.42", + "os": {"version": "42"}, + "utc_startup_timestamp": 42, + "metrics": [], + "feature_flags": ["a", "b", "c"], + "datasources": {"file": 42}, + "hub_items": {} + } + ] +}`, + expectedStatusCode: 422, + expectedResponse: "log_processors.0.os.name in body is required", + authType: PASSWORD, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + lapi := SetupLAPITest(t) + + dbClient, err := database.NewClient(context.Background(), lapi.DBConfig) + if err != nil { + t.Fatalf("unable to create database client: %s", err) + } + + w := lapi.RecordResponse(t, http.MethodPost, "/v1/usage-metrics", strings.NewReader(tt.body), tt.authType) + + assert.Equal(t, tt.expectedStatusCode, w.Code) + assert.Contains(t, w.Body.String(), tt.expectedResponse) + + machine, _ := dbClient.QueryMachineByID("test") + metrics, _ := dbClient.GetLPUsageMetricsByMachineID("test") + + assert.Len(t, metrics, tt.expectedMetricsCount) + assert.Equal(t, tt.expectedOSName, machine.Osname) + assert.Equal(t, tt.expectedOSVersion, machine.Osversion) + assert.Equal(t, tt.expectedFeatureFlags, machine.Featureflags) + + if len(metrics) > 0 { + assert.Equal(t, "test", metrics[0].GeneratedBy) + assert.Equal(t, metric.GeneratedType("LP"), metrics[0].GeneratedType) + } + }) + } +} + +func TestRCMetrics(t *testing.T) { + tests := []struct { + name string + body string + expectedStatusCode int + expectedResponse string + expectedMetricsCount int + expectedOSName string + expectedOSVersion string + expectedFeatureFlags string + authType string + }{ + { + name: "empty metrics for RC", + body: `{ + }`, + expectedStatusCode: 400, + expectedResponse: "Missing remediation component data", + authType: APIKEY, + }, + { + name: "basic metrics with empty dynamic metrics for RC", + body: ` +{ + "remediation_components": [ + { + "version": "1.42", + "os": {"name":"foo", "version": "42"}, + "utc_startup_timestamp": 42, + "metrics": [], + "feature_flags": ["a", "b", "c"] + } + ] +}`, + expectedStatusCode: 201, + expectedMetricsCount: 1, + expectedResponse: "", + expectedOSName: "foo", + expectedOSVersion: "42", + expectedFeatureFlags: "a,b,c", + authType: APIKEY, + }, + { + name: "basic metrics with dynamic metrics for RC", + body: ` +{ + "remediation_components": [ + { + "version": "1.42", + "os": {"name":"foo", "version": "42"}, + "utc_startup_timestamp": 42, + "metrics": [{"meta":{"utc_now_timestamp":42, "window_size_seconds": 42}, "items": [{"name": "foo", "value": 42, "unit": "bla"}] }, {"meta":{"utc_now_timestamp":43, "window_size_seconds": 42}, "items": [{"name": "foo", "value": 42, "unit": "bla"}] }], + "feature_flags": ["a", "b", "c"] + } + ] +}`, + expectedStatusCode: 201, + expectedMetricsCount: 1, + expectedResponse: "", + expectedOSName: "foo", + expectedOSVersion: "42", + expectedFeatureFlags: "a,b,c", + authType: APIKEY, + }, + { + name: "wrong auth type for RC", + body: ` +{ + "remediation_components": [ + { + "version": "1.42", + "os": {"name":"foo", "version": "42"}, + "utc_startup_timestamp": 42, + "metrics": [], + "feature_flags": ["a", "b", "c"] + } + ] +}`, + expectedStatusCode: 400, + expectedResponse: "Missing log processor data", + authType: PASSWORD, + }, + { + name: "missing OS field for RC", + body: ` +{ + "remediation_components": [ + { + "version": "1.42", + "utc_startup_timestamp": 42, + "metrics": [], + "feature_flags": ["a", "b", "c"] + } + ] +}`, + expectedStatusCode: 201, + expectedResponse: "", + expectedMetricsCount: 1, + expectedFeatureFlags: "a,b,c", + authType: APIKEY, + }, + { + name: "missing feature flags for RC", + body: ` +{ + "remediation_components": [ + { + "version": "1.42", + "os": {"name":"foo", "version": "42"}, + "utc_startup_timestamp": 42, + "metrics": [] + } + ] +}`, + expectedStatusCode: 201, + expectedMetricsCount: 1, + expectedOSName: "foo", + expectedOSVersion: "42", + authType: APIKEY, + }, + { + name: "missing OS name", + body: ` +{ + "remediation_components": [ + { + "version": "1.42", + "os": {"version": "42"}, + "utc_startup_timestamp": 42, + "metrics": [], + "feature_flags": ["a", "b", "c"] + } + ] +}`, + expectedStatusCode: 422, + expectedResponse: "remediation_components.0.os.name in body is required", + authType: APIKEY, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + lapi := SetupLAPITest(t) + + dbClient, err := database.NewClient(context.Background(), lapi.DBConfig) + if err != nil { + t.Fatalf("unable to create database client: %s", err) + } + + w := lapi.RecordResponse(t, http.MethodPost, "/v1/usage-metrics", strings.NewReader(tt.body), tt.authType) + + assert.Equal(t, tt.expectedStatusCode, w.Code) + assert.Contains(t, w.Body.String(), tt.expectedResponse) + + bouncer, _ := dbClient.SelectBouncerByName("test") + metrics, _ := dbClient.GetBouncerUsageMetricsByName("test") + + assert.Len(t, metrics, tt.expectedMetricsCount) + assert.Equal(t, tt.expectedOSName, bouncer.Osname) + assert.Equal(t, tt.expectedOSVersion, bouncer.Osversion) + assert.Equal(t, tt.expectedFeatureFlags, bouncer.Featureflags) + + if len(metrics) > 0 { + assert.Equal(t, "test", metrics[0].GeneratedBy) + assert.Equal(t, metric.GeneratedType("RC"), metrics[0].GeneratedType) + } + }) + } +} diff --git a/pkg/csconfig/crowdsec_service_test.go b/pkg/csconfig/crowdsec_service_test.go index 2f41beaf55e..7570b63011e 100644 --- a/pkg/csconfig/crowdsec_service_test.go +++ b/pkg/csconfig/crowdsec_service_test.go @@ -61,9 +61,9 @@ func TestLoadCrowdsec(t *testing.T) { AcquisitionFiles: []string{acquisFullPath}, SimulationFilePath: "./testdata/simulation.yaml", // context is loaded in pkg/alertcontext -// ContextToSend: map[string][]string{ -// "source_ip": {"evt.Parsed.source_ip"}, -// }, + // ContextToSend: map[string][]string{ + // "source_ip": {"evt.Parsed.source_ip"}, + // }, SimulationConfig: &SimulationConfig{ Simulation: ptr.Of(false), }, @@ -100,9 +100,9 @@ func TestLoadCrowdsec(t *testing.T) { ConsoleContextValueLength: 0, AcquisitionFiles: []string{acquisFullPath, acquisInDirFullPath}, // context is loaded in pkg/alertcontext -// ContextToSend: map[string][]string{ -// "source_ip": {"evt.Parsed.source_ip"}, -// }, + // ContextToSend: map[string][]string{ + // "source_ip": {"evt.Parsed.source_ip"}, + // }, SimulationFilePath: "./testdata/simulation.yaml", SimulationConfig: &SimulationConfig{ Simulation: ptr.Of(false), @@ -139,9 +139,9 @@ func TestLoadCrowdsec(t *testing.T) { AcquisitionFiles: []string{}, SimulationFilePath: "", // context is loaded in pkg/alertcontext -// ContextToSend: map[string][]string{ -// "source_ip": {"evt.Parsed.source_ip"}, -// }, + // ContextToSend: map[string][]string{ + // "source_ip": {"evt.Parsed.source_ip"}, + // }, SimulationConfig: &SimulationConfig{ Simulation: ptr.Of(false), }, @@ -184,6 +184,7 @@ func TestLoadCrowdsec(t *testing.T) { t.Run(tc.name, func(t *testing.T) { err := tc.input.LoadCrowdsec() cstest.RequireErrorContains(t, err, tc.expectedErr) + if tc.expectedErr != "" { return } diff --git a/pkg/csconfig/database.go b/pkg/csconfig/database.go index a24eb9e13c3..4ca582cf576 100644 --- a/pkg/csconfig/database.go +++ b/pkg/csconfig/database.go @@ -50,9 +50,10 @@ type AuthGCCfg struct { type FlushDBCfg struct { MaxItems *int `yaml:"max_items,omitempty"` // We could unmarshal as time.Duration, but alert filters right now are a map of strings - MaxAge *string `yaml:"max_age,omitempty"` - BouncersGC *AuthGCCfg `yaml:"bouncers_autodelete,omitempty"` - AgentsGC *AuthGCCfg `yaml:"agents_autodelete,omitempty"` + MaxAge *string `yaml:"max_age,omitempty"` + BouncersGC *AuthGCCfg `yaml:"bouncers_autodelete,omitempty"` + AgentsGC *AuthGCCfg `yaml:"agents_autodelete,omitempty"` + MetricsMaxAge *time.Duration `yaml:"metrics_max_age,omitempty"` } func (c *Config) LoadDBConfig(inCli bool) error { @@ -80,9 +81,9 @@ func (c *Config) LoadDBConfig(inCli bool) error { case err != nil: log.Warnf("unable to determine if database is on network filesystem: %s", err) log.Warning( - "You are using sqlite without WAL, this can have a performance impact. " + - "If you do not store the database in a network share, set db_config.use_wal to true. " + - "Set explicitly to false to disable this warning.") + "You are using sqlite without WAL, this can have a performance impact. " + + "If you do not store the database in a network share, set db_config.use_wal to true. " + + "Set explicitly to false to disable this warning.") case isNetwork: log.Debugf("database is on network filesystem (%s), setting useWal to false", fsType) c.DbConfig.UseWal = ptr.Of(false) diff --git a/pkg/database/bouncers.go b/pkg/database/bouncers.go index 03a3227301d..ff750e63c59 100644 --- a/pkg/database/bouncers.go +++ b/pkg/database/bouncers.go @@ -2,14 +2,36 @@ package database import ( "fmt" + "strings" "time" "github.com/pkg/errors" "github.com/crowdsecurity/crowdsec/pkg/database/ent" "github.com/crowdsecurity/crowdsec/pkg/database/ent/bouncer" + "github.com/crowdsecurity/crowdsec/pkg/models" ) +func (c *Client) BouncerUpdateBaseMetrics(bouncerName string, bouncerType string, baseMetrics models.BaseMetrics) error { + os := baseMetrics.Os + features := strings.Join(baseMetrics.FeatureFlags, ",") + + _, err := c.Ent.Bouncer. + Update(). + Where(bouncer.NameEQ(bouncerName)). + SetNillableVersion(baseMetrics.Version). + SetOsname(*os.Name). + SetOsversion(*os.Version). + SetFeatureflags(features). + SetType(bouncerType). + Save(c.CTX) + if err != nil { + return fmt.Errorf("unable to update base bouncer metrics in database: %w", err) + } + + return nil +} + func (c *Client) SelectBouncer(apiKeyHash string) (*ent.Bouncer, error) { result, err := c.Ent.Bouncer.Query().Where(bouncer.APIKeyEQ(apiKeyHash)).First(c.CTX) if err != nil { diff --git a/pkg/database/ent/metric.go b/pkg/database/ent/metric.go index 236d54da25d..47f3b4df4e5 100644 --- a/pkg/database/ent/metric.go +++ b/pkg/database/ent/metric.go @@ -22,8 +22,8 @@ type Metric struct { // Source of the metrics: machine id, bouncer name... // It must come from the auth middleware. GeneratedBy string `json:"generated_by,omitempty"` - // When the metrics are collected/calculated at the source - CollectedAt time.Time `json:"collected_at,omitempty"` + // When the metrics are received by LAPI + ReceivedAt time.Time `json:"received_at,omitempty"` // When the metrics are sent to the console PushedAt *time.Time `json:"pushed_at,omitempty"` // The actual metrics (item0) @@ -40,7 +40,7 @@ func (*Metric) scanValues(columns []string) ([]any, error) { values[i] = new(sql.NullInt64) case metric.FieldGeneratedType, metric.FieldGeneratedBy, metric.FieldPayload: values[i] = new(sql.NullString) - case metric.FieldCollectedAt, metric.FieldPushedAt: + case metric.FieldReceivedAt, metric.FieldPushedAt: values[i] = new(sql.NullTime) default: values[i] = new(sql.UnknownType) @@ -75,11 +75,11 @@ func (m *Metric) assignValues(columns []string, values []any) error { } else if value.Valid { m.GeneratedBy = value.String } - case metric.FieldCollectedAt: + case metric.FieldReceivedAt: if value, ok := values[i].(*sql.NullTime); !ok { - return fmt.Errorf("unexpected type %T for field collected_at", values[i]) + return fmt.Errorf("unexpected type %T for field received_at", values[i]) } else if value.Valid { - m.CollectedAt = value.Time + m.ReceivedAt = value.Time } case metric.FieldPushedAt: if value, ok := values[i].(*sql.NullTime); !ok { @@ -136,8 +136,8 @@ func (m *Metric) String() string { builder.WriteString("generated_by=") builder.WriteString(m.GeneratedBy) builder.WriteString(", ") - builder.WriteString("collected_at=") - builder.WriteString(m.CollectedAt.Format(time.ANSIC)) + builder.WriteString("received_at=") + builder.WriteString(m.ReceivedAt.Format(time.ANSIC)) builder.WriteString(", ") if v := m.PushedAt; v != nil { builder.WriteString("pushed_at=") diff --git a/pkg/database/ent/metric/metric.go b/pkg/database/ent/metric/metric.go index 879f1006d64..78e88982220 100644 --- a/pkg/database/ent/metric/metric.go +++ b/pkg/database/ent/metric/metric.go @@ -17,8 +17,8 @@ const ( FieldGeneratedType = "generated_type" // FieldGeneratedBy holds the string denoting the generated_by field in the database. FieldGeneratedBy = "generated_by" - // FieldCollectedAt holds the string denoting the collected_at field in the database. - FieldCollectedAt = "collected_at" + // FieldReceivedAt holds the string denoting the received_at field in the database. + FieldReceivedAt = "received_at" // FieldPushedAt holds the string denoting the pushed_at field in the database. FieldPushedAt = "pushed_at" // FieldPayload holds the string denoting the payload field in the database. @@ -32,7 +32,7 @@ var Columns = []string{ FieldID, FieldGeneratedType, FieldGeneratedBy, - FieldCollectedAt, + FieldReceivedAt, FieldPushedAt, FieldPayload, } @@ -88,9 +88,9 @@ func ByGeneratedBy(opts ...sql.OrderTermOption) OrderOption { return sql.OrderByField(FieldGeneratedBy, opts...).ToFunc() } -// ByCollectedAt orders the results by the collected_at field. -func ByCollectedAt(opts ...sql.OrderTermOption) OrderOption { - return sql.OrderByField(FieldCollectedAt, opts...).ToFunc() +// ByReceivedAt orders the results by the received_at field. +func ByReceivedAt(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldReceivedAt, opts...).ToFunc() } // ByPushedAt orders the results by the pushed_at field. diff --git a/pkg/database/ent/metric/where.go b/pkg/database/ent/metric/where.go index e49f80f3411..72bd9d93cd7 100644 --- a/pkg/database/ent/metric/where.go +++ b/pkg/database/ent/metric/where.go @@ -59,9 +59,9 @@ func GeneratedBy(v string) predicate.Metric { return predicate.Metric(sql.FieldEQ(FieldGeneratedBy, v)) } -// CollectedAt applies equality check predicate on the "collected_at" field. It's identical to CollectedAtEQ. -func CollectedAt(v time.Time) predicate.Metric { - return predicate.Metric(sql.FieldEQ(FieldCollectedAt, v)) +// ReceivedAt applies equality check predicate on the "received_at" field. It's identical to ReceivedAtEQ. +func ReceivedAt(v time.Time) predicate.Metric { + return predicate.Metric(sql.FieldEQ(FieldReceivedAt, v)) } // PushedAt applies equality check predicate on the "pushed_at" field. It's identical to PushedAtEQ. @@ -159,44 +159,44 @@ func GeneratedByContainsFold(v string) predicate.Metric { return predicate.Metric(sql.FieldContainsFold(FieldGeneratedBy, v)) } -// CollectedAtEQ applies the EQ predicate on the "collected_at" field. -func CollectedAtEQ(v time.Time) predicate.Metric { - return predicate.Metric(sql.FieldEQ(FieldCollectedAt, v)) +// ReceivedAtEQ applies the EQ predicate on the "received_at" field. +func ReceivedAtEQ(v time.Time) predicate.Metric { + return predicate.Metric(sql.FieldEQ(FieldReceivedAt, v)) } -// CollectedAtNEQ applies the NEQ predicate on the "collected_at" field. -func CollectedAtNEQ(v time.Time) predicate.Metric { - return predicate.Metric(sql.FieldNEQ(FieldCollectedAt, v)) +// ReceivedAtNEQ applies the NEQ predicate on the "received_at" field. +func ReceivedAtNEQ(v time.Time) predicate.Metric { + return predicate.Metric(sql.FieldNEQ(FieldReceivedAt, v)) } -// CollectedAtIn applies the In predicate on the "collected_at" field. -func CollectedAtIn(vs ...time.Time) predicate.Metric { - return predicate.Metric(sql.FieldIn(FieldCollectedAt, vs...)) +// ReceivedAtIn applies the In predicate on the "received_at" field. +func ReceivedAtIn(vs ...time.Time) predicate.Metric { + return predicate.Metric(sql.FieldIn(FieldReceivedAt, vs...)) } -// CollectedAtNotIn applies the NotIn predicate on the "collected_at" field. -func CollectedAtNotIn(vs ...time.Time) predicate.Metric { - return predicate.Metric(sql.FieldNotIn(FieldCollectedAt, vs...)) +// ReceivedAtNotIn applies the NotIn predicate on the "received_at" field. +func ReceivedAtNotIn(vs ...time.Time) predicate.Metric { + return predicate.Metric(sql.FieldNotIn(FieldReceivedAt, vs...)) } -// CollectedAtGT applies the GT predicate on the "collected_at" field. -func CollectedAtGT(v time.Time) predicate.Metric { - return predicate.Metric(sql.FieldGT(FieldCollectedAt, v)) +// ReceivedAtGT applies the GT predicate on the "received_at" field. +func ReceivedAtGT(v time.Time) predicate.Metric { + return predicate.Metric(sql.FieldGT(FieldReceivedAt, v)) } -// CollectedAtGTE applies the GTE predicate on the "collected_at" field. -func CollectedAtGTE(v time.Time) predicate.Metric { - return predicate.Metric(sql.FieldGTE(FieldCollectedAt, v)) +// ReceivedAtGTE applies the GTE predicate on the "received_at" field. +func ReceivedAtGTE(v time.Time) predicate.Metric { + return predicate.Metric(sql.FieldGTE(FieldReceivedAt, v)) } -// CollectedAtLT applies the LT predicate on the "collected_at" field. -func CollectedAtLT(v time.Time) predicate.Metric { - return predicate.Metric(sql.FieldLT(FieldCollectedAt, v)) +// ReceivedAtLT applies the LT predicate on the "received_at" field. +func ReceivedAtLT(v time.Time) predicate.Metric { + return predicate.Metric(sql.FieldLT(FieldReceivedAt, v)) } -// CollectedAtLTE applies the LTE predicate on the "collected_at" field. -func CollectedAtLTE(v time.Time) predicate.Metric { - return predicate.Metric(sql.FieldLTE(FieldCollectedAt, v)) +// ReceivedAtLTE applies the LTE predicate on the "received_at" field. +func ReceivedAtLTE(v time.Time) predicate.Metric { + return predicate.Metric(sql.FieldLTE(FieldReceivedAt, v)) } // PushedAtEQ applies the EQ predicate on the "pushed_at" field. diff --git a/pkg/database/ent/metric_create.go b/pkg/database/ent/metric_create.go index 8fa656db427..973cddd41d0 100644 --- a/pkg/database/ent/metric_create.go +++ b/pkg/database/ent/metric_create.go @@ -32,9 +32,9 @@ func (mc *MetricCreate) SetGeneratedBy(s string) *MetricCreate { return mc } -// SetCollectedAt sets the "collected_at" field. -func (mc *MetricCreate) SetCollectedAt(t time.Time) *MetricCreate { - mc.mutation.SetCollectedAt(t) +// SetReceivedAt sets the "received_at" field. +func (mc *MetricCreate) SetReceivedAt(t time.Time) *MetricCreate { + mc.mutation.SetReceivedAt(t) return mc } @@ -103,8 +103,8 @@ func (mc *MetricCreate) check() error { if _, ok := mc.mutation.GeneratedBy(); !ok { return &ValidationError{Name: "generated_by", err: errors.New(`ent: missing required field "Metric.generated_by"`)} } - if _, ok := mc.mutation.CollectedAt(); !ok { - return &ValidationError{Name: "collected_at", err: errors.New(`ent: missing required field "Metric.collected_at"`)} + if _, ok := mc.mutation.ReceivedAt(); !ok { + return &ValidationError{Name: "received_at", err: errors.New(`ent: missing required field "Metric.received_at"`)} } if _, ok := mc.mutation.Payload(); !ok { return &ValidationError{Name: "payload", err: errors.New(`ent: missing required field "Metric.payload"`)} @@ -143,9 +143,9 @@ func (mc *MetricCreate) createSpec() (*Metric, *sqlgraph.CreateSpec) { _spec.SetField(metric.FieldGeneratedBy, field.TypeString, value) _node.GeneratedBy = value } - if value, ok := mc.mutation.CollectedAt(); ok { - _spec.SetField(metric.FieldCollectedAt, field.TypeTime, value) - _node.CollectedAt = value + if value, ok := mc.mutation.ReceivedAt(); ok { + _spec.SetField(metric.FieldReceivedAt, field.TypeTime, value) + _node.ReceivedAt = value } if value, ok := mc.mutation.PushedAt(); ok { _spec.SetField(metric.FieldPushedAt, field.TypeTime, value) diff --git a/pkg/database/ent/migrate/schema.go b/pkg/database/ent/migrate/schema.go index 60bf72a486b..986f5bc8c67 100644 --- a/pkg/database/ent/migrate/schema.go +++ b/pkg/database/ent/migrate/schema.go @@ -254,7 +254,7 @@ var ( {Name: "id", Type: field.TypeInt, Increment: true}, {Name: "generated_type", Type: field.TypeEnum, Enums: []string{"LP", "RC"}}, {Name: "generated_by", Type: field.TypeString}, - {Name: "collected_at", Type: field.TypeTime}, + {Name: "received_at", Type: field.TypeTime}, {Name: "pushed_at", Type: field.TypeTime, Nullable: true}, {Name: "payload", Type: field.TypeString, Size: 2147483647}, } @@ -263,13 +263,6 @@ var ( Name: "metrics", Columns: MetricsColumns, PrimaryKey: []*schema.Column{MetricsColumns[0]}, - Indexes: []*schema.Index{ - { - Name: "metric_generated_type_generated_by_collected_at", - Unique: true, - Columns: []*schema.Column{MetricsColumns[1], MetricsColumns[2], MetricsColumns[3]}, - }, - }, } // Tables holds all the tables in the schema. Tables = []*schema.Table{ diff --git a/pkg/database/ent/mutation.go b/pkg/database/ent/mutation.go index 5b70457c512..5c6596f3db4 100644 --- a/pkg/database/ent/mutation.go +++ b/pkg/database/ent/mutation.go @@ -8640,7 +8640,7 @@ type MetricMutation struct { id *int generated_type *metric.GeneratedType generated_by *string - collected_at *time.Time + received_at *time.Time pushed_at *time.Time payload *string clearedFields map[string]struct{} @@ -8819,40 +8819,40 @@ func (m *MetricMutation) ResetGeneratedBy() { m.generated_by = nil } -// SetCollectedAt sets the "collected_at" field. -func (m *MetricMutation) SetCollectedAt(t time.Time) { - m.collected_at = &t +// SetReceivedAt sets the "received_at" field. +func (m *MetricMutation) SetReceivedAt(t time.Time) { + m.received_at = &t } -// CollectedAt returns the value of the "collected_at" field in the mutation. -func (m *MetricMutation) CollectedAt() (r time.Time, exists bool) { - v := m.collected_at +// ReceivedAt returns the value of the "received_at" field in the mutation. +func (m *MetricMutation) ReceivedAt() (r time.Time, exists bool) { + v := m.received_at if v == nil { return } return *v, true } -// OldCollectedAt returns the old "collected_at" field's value of the Metric entity. +// OldReceivedAt returns the old "received_at" field's value of the Metric entity. // If the Metric object wasn't provided to the builder, the object is fetched from the database. // An error is returned if the mutation operation is not UpdateOne, or the database query fails. -func (m *MetricMutation) OldCollectedAt(ctx context.Context) (v time.Time, err error) { +func (m *MetricMutation) OldReceivedAt(ctx context.Context) (v time.Time, err error) { if !m.op.Is(OpUpdateOne) { - return v, errors.New("OldCollectedAt is only allowed on UpdateOne operations") + return v, errors.New("OldReceivedAt is only allowed on UpdateOne operations") } if m.id == nil || m.oldValue == nil { - return v, errors.New("OldCollectedAt requires an ID field in the mutation") + return v, errors.New("OldReceivedAt requires an ID field in the mutation") } oldValue, err := m.oldValue(ctx) if err != nil { - return v, fmt.Errorf("querying old value for OldCollectedAt: %w", err) + return v, fmt.Errorf("querying old value for OldReceivedAt: %w", err) } - return oldValue.CollectedAt, nil + return oldValue.ReceivedAt, nil } -// ResetCollectedAt resets all changes to the "collected_at" field. -func (m *MetricMutation) ResetCollectedAt() { - m.collected_at = nil +// ResetReceivedAt resets all changes to the "received_at" field. +func (m *MetricMutation) ResetReceivedAt() { + m.received_at = nil } // SetPushedAt sets the "pushed_at" field. @@ -8981,8 +8981,8 @@ func (m *MetricMutation) Fields() []string { if m.generated_by != nil { fields = append(fields, metric.FieldGeneratedBy) } - if m.collected_at != nil { - fields = append(fields, metric.FieldCollectedAt) + if m.received_at != nil { + fields = append(fields, metric.FieldReceivedAt) } if m.pushed_at != nil { fields = append(fields, metric.FieldPushedAt) @@ -9002,8 +9002,8 @@ func (m *MetricMutation) Field(name string) (ent.Value, bool) { return m.GeneratedType() case metric.FieldGeneratedBy: return m.GeneratedBy() - case metric.FieldCollectedAt: - return m.CollectedAt() + case metric.FieldReceivedAt: + return m.ReceivedAt() case metric.FieldPushedAt: return m.PushedAt() case metric.FieldPayload: @@ -9021,8 +9021,8 @@ func (m *MetricMutation) OldField(ctx context.Context, name string) (ent.Value, return m.OldGeneratedType(ctx) case metric.FieldGeneratedBy: return m.OldGeneratedBy(ctx) - case metric.FieldCollectedAt: - return m.OldCollectedAt(ctx) + case metric.FieldReceivedAt: + return m.OldReceivedAt(ctx) case metric.FieldPushedAt: return m.OldPushedAt(ctx) case metric.FieldPayload: @@ -9050,12 +9050,12 @@ func (m *MetricMutation) SetField(name string, value ent.Value) error { } m.SetGeneratedBy(v) return nil - case metric.FieldCollectedAt: + case metric.FieldReceivedAt: v, ok := value.(time.Time) if !ok { return fmt.Errorf("unexpected type %T for field %s", value, name) } - m.SetCollectedAt(v) + m.SetReceivedAt(v) return nil case metric.FieldPushedAt: v, ok := value.(time.Time) @@ -9135,8 +9135,8 @@ func (m *MetricMutation) ResetField(name string) error { case metric.FieldGeneratedBy: m.ResetGeneratedBy() return nil - case metric.FieldCollectedAt: - m.ResetCollectedAt() + case metric.FieldReceivedAt: + m.ResetReceivedAt() return nil case metric.FieldPushedAt: m.ResetPushedAt() diff --git a/pkg/database/ent/schema/metric.go b/pkg/database/ent/schema/metric.go index b47da78bdf3..319c67b7aa7 100644 --- a/pkg/database/ent/schema/metric.go +++ b/pkg/database/ent/schema/metric.go @@ -3,7 +3,6 @@ package schema import ( "entgo.io/ent" "entgo.io/ent/schema/field" - "entgo.io/ent/schema/index" ) // Metric is actually a set of metrics collected by a device @@ -21,9 +20,9 @@ func (Metric) Fields() []ent.Field { field.String("generated_by"). Immutable(). Comment("Source of the metrics: machine id, bouncer name...\nIt must come from the auth middleware."), - field.Time("collected_at"). + field.Time("received_at"). Immutable(). - Comment("When the metrics are collected/calculated at the source"), + Comment("When the metrics are received by LAPI"), field.Time("pushed_at"). Nillable(). Optional(). @@ -33,11 +32,3 @@ func (Metric) Fields() []ent.Field { Comment("The actual metrics (item0)"), } } - -func (Metric) Indexes() []ent.Index { - return []ent.Index{ - // Don't store the same metrics multiple times. - index.Fields("generated_type", "generated_by", "collected_at"). - Unique(), - } -} diff --git a/pkg/database/flush.go b/pkg/database/flush.go index 5a1f0bea5bf..5d53d10c942 100644 --- a/pkg/database/flush.go +++ b/pkg/database/flush.go @@ -8,15 +8,24 @@ import ( "github.com/go-co-op/gocron" log "github.com/sirupsen/logrus" + "github.com/crowdsecurity/go-cs-lib/ptr" + "github.com/crowdsecurity/crowdsec/pkg/csconfig" "github.com/crowdsecurity/crowdsec/pkg/database/ent/alert" "github.com/crowdsecurity/crowdsec/pkg/database/ent/bouncer" "github.com/crowdsecurity/crowdsec/pkg/database/ent/decision" "github.com/crowdsecurity/crowdsec/pkg/database/ent/event" "github.com/crowdsecurity/crowdsec/pkg/database/ent/machine" + "github.com/crowdsecurity/crowdsec/pkg/database/ent/metric" "github.com/crowdsecurity/crowdsec/pkg/types" ) +const ( + // how long to keep metrics in the local database + defaultMetricsMaxAge = 7 * 24 * time.Hour + flushInterval = 1 * time.Minute +) + func (c *Client) StartFlushScheduler(config *csconfig.FlushDBCfg) (*gocron.Scheduler, error) { maxItems := 0 maxAge := "" @@ -91,17 +100,46 @@ func (c *Client) StartFlushScheduler(config *csconfig.FlushDBCfg) (*gocron.Sched } } - baJob, err := scheduler.Every(1).Minute().Do(c.FlushAgentsAndBouncers, config.AgentsGC, config.BouncersGC) + baJob, err := scheduler.Every(flushInterval).Do(c.FlushAgentsAndBouncers, config.AgentsGC, config.BouncersGC) if err != nil { return nil, fmt.Errorf("while starting FlushAgentsAndBouncers scheduler: %w", err) } baJob.SingletonMode() + + metricsJob, err := scheduler.Every(flushInterval).Do(c.flushMetrics, config.MetricsMaxAge) + if err != nil { + return nil, fmt.Errorf("while starting flushMetrics scheduler: %w", err) + } + + metricsJob.SingletonMode() + scheduler.StartAsync() return scheduler, nil } +// flushMetrics deletes metrics older than maxAge, regardless if they have been pushed to CAPI or not +func (c *Client) flushMetrics(maxAge *time.Duration) { + if maxAge == nil { + maxAge = ptr.Of(defaultMetricsMaxAge) + } + + c.Log.Debugf("flushing metrics older than %s", maxAge) + + deleted, err := c.Ent.Metric.Delete().Where( + metric.ReceivedAtLTE(time.Now().UTC().Add(-*maxAge)), + ).Exec(c.CTX) + if err != nil { + c.Log.Errorf("while flushing metrics: %s", err) + return + } + + if deleted > 0 { + c.Log.Debugf("flushed %d metrics snapshots", deleted) + } +} + func (c *Client) FlushOrphans() { /* While it has only been linked to some very corner-case bug : https://github.com/crowdsecurity/crowdsec/issues/778 */ /* We want to take care of orphaned events for which the parent alert/decision has been deleted */ @@ -117,7 +155,6 @@ func (c *Client) FlushOrphans() { eventsCount, err = c.Ent.Decision.Delete().Where( decision.Not(decision.HasOwner())).Where(decision.UntilLTE(time.Now().UTC())).Exec(c.CTX) - if err != nil { c.Log.Warningf("error while deleting orphan decisions: %s", err) return @@ -138,7 +175,6 @@ func (c *Client) flushBouncers(authType string, duration *time.Duration) { ).Where( bouncer.AuthTypeEQ(authType), ).Exec(c.CTX) - if err != nil { c.Log.Errorf("while auto-deleting expired bouncers (%s): %s", authType, err) return @@ -159,7 +195,6 @@ func (c *Client) flushAgents(authType string, duration *time.Duration) { machine.Not(machine.HasAlerts()), machine.AuthTypeEQ(authType), ).Exec(c.CTX) - if err != nil { c.Log.Errorf("while auto-deleting expired machines (%s): %s", authType, err) return @@ -253,7 +288,6 @@ func (c *Client) FlushAlerts(MaxAge string, MaxItems int) error { if maxid > 0 { // This may lead to orphan alerts (at least on MySQL), but the next time the flush job will run, they will be deleted deletedByNbItem, err = c.Ent.Alert.Delete().Where(alert.IDLT(maxid)).Exec(c.CTX) - if err != nil { c.Log.Errorf("FlushAlerts: Could not delete alerts: %s", err) return fmt.Errorf("could not delete alerts: %w", err) diff --git a/pkg/database/machines.go b/pkg/database/machines.go index 18fd32fdd84..21349b8b687 100644 --- a/pkg/database/machines.go +++ b/pkg/database/machines.go @@ -2,6 +2,7 @@ package database import ( "fmt" + "strings" "time" "github.com/go-openapi/strfmt" @@ -10,6 +11,8 @@ import ( "github.com/crowdsecurity/crowdsec/pkg/database/ent" "github.com/crowdsecurity/crowdsec/pkg/database/ent/machine" + "github.com/crowdsecurity/crowdsec/pkg/database/ent/schema" + "github.com/crowdsecurity/crowdsec/pkg/models" "github.com/crowdsecurity/crowdsec/pkg/types" ) @@ -18,6 +21,48 @@ const ( CapiListsMachineID = types.ListOrigin ) +func (c *Client) MachineUpdateBaseMetrics(machineID string, baseMetrics models.BaseMetrics, hubItems models.HubItems, datasources map[string]int64) error { + os := baseMetrics.Os + features := strings.Join(baseMetrics.FeatureFlags, ",") + + var heartbeat time.Time + + if baseMetrics.Metrics == nil || len(baseMetrics.Metrics) == 0 { + heartbeat = time.Now().UTC() + } else { + heartbeat = time.Unix(*baseMetrics.Metrics[0].Meta.UtcNowTimestamp, 0) + } + + hubState := map[string][]schema.ItemState{} + for itemType, items := range hubItems { + hubState[itemType] = []schema.ItemState{} + for _, item := range items { + hubState[itemType] = append(hubState[itemType], schema.ItemState{ + Name: item.Name, + Status: item.Status, + Version: item.Version, + }) + } + } + + _, err := c.Ent.Machine. + Update(). + Where(machine.MachineIdEQ(machineID)). + SetNillableVersion(baseMetrics.Version). + SetOsname(*os.Name). + SetOsversion(*os.Version). + SetFeatureflags(features). + SetLastHeartbeat(heartbeat). + SetHubstate(hubState). + SetDatasources(datasources). + Save(c.CTX) + if err != nil { + return fmt.Errorf("unable to update base machine metrics in database: %w", err) + } + + return nil +} + func (c *Client) CreateMachine(machineID *string, password *strfmt.Password, ipAddress string, isValidated bool, force bool, authType string) (*ent.Machine, error) { hashPassword, err := bcrypt.GenerateFromPassword([]byte(*password), bcrypt.DefaultCost) if err != nil { @@ -158,7 +203,7 @@ func (c *Client) UpdateMachineScenarios(scenarios string, ID int) error { SetScenarios(scenarios). Save(c.CTX) if err != nil { - return fmt.Errorf("unable to update machine in database: %s", err) + return fmt.Errorf("unable to update machine in database: %w", err) } return nil @@ -169,7 +214,7 @@ func (c *Client) UpdateMachineIP(ipAddr string, ID int) error { SetIpAddress(ipAddr). Save(c.CTX) if err != nil { - return fmt.Errorf("unable to update machine IP in database: %s", err) + return fmt.Errorf("unable to update machine IP in database: %w", err) } return nil @@ -180,7 +225,7 @@ func (c *Client) UpdateMachineVersion(ipAddr string, ID int) error { SetVersion(ipAddr). Save(c.CTX) if err != nil { - return fmt.Errorf("unable to update machine version in database: %s", err) + return fmt.Errorf("unable to update machine version in database: %w", err) } return nil diff --git a/pkg/database/metrics.go b/pkg/database/metrics.go new file mode 100644 index 00000000000..3bc5e7b5d32 --- /dev/null +++ b/pkg/database/metrics.go @@ -0,0 +1,73 @@ +package database + +import ( + "fmt" + "time" + + "github.com/crowdsecurity/crowdsec/pkg/database/ent" + "github.com/crowdsecurity/crowdsec/pkg/database/ent/metric" +) + +func (c *Client) CreateMetric(generatedType metric.GeneratedType, generatedBy string, receivedAt time.Time, payload string) (*ent.Metric, error) { + metric, err := c.Ent.Metric. + Create(). + SetGeneratedType(generatedType). + SetGeneratedBy(generatedBy). + SetReceivedAt(receivedAt). + SetPayload(payload). + Save(c.CTX) + if err != nil { + c.Log.Warningf("CreateMetric: %s", err) + return nil, fmt.Errorf("storing metrics snapshot for '%s' at %s: %w", generatedBy, receivedAt, InsertFail) + } + + return metric, nil +} + +func (c *Client) GetLPUsageMetricsByMachineID(machineId string) ([]*ent.Metric, error) { + metrics, err := c.Ent.Metric.Query(). + Where( + metric.GeneratedTypeEQ(metric.GeneratedTypeLP), + metric.GeneratedByEQ(machineId), + metric.PushedAtIsNil(), + ). + // XXX: do we need to sort? + Order(ent.Desc(metric.FieldReceivedAt)). + All(c.CTX) + if err != nil { + c.Log.Warningf("GetLPUsageMetricsByOrigin: %s", err) + return nil, fmt.Errorf("getting LP usage metrics by origin %s: %w", machineId, err) + } + + return metrics, nil +} + +func (c *Client) GetBouncerUsageMetricsByName(bouncerName string) ([]*ent.Metric, error) { + metrics, err := c.Ent.Metric.Query(). + Where( + metric.GeneratedTypeEQ(metric.GeneratedTypeRC), + metric.GeneratedByEQ(bouncerName), + metric.PushedAtIsNil(), + ). + Order(ent.Desc(metric.FieldReceivedAt)). + All(c.CTX) + if err != nil { + c.Log.Warningf("GetBouncerUsageMetricsByName: %s", err) + return nil, fmt.Errorf("getting bouncer usage metrics by name %s: %w", bouncerName, err) + } + + return metrics, nil +} + +func (c *Client) MarkUsageMetricsAsSent(ids []int) error { + _, err := c.Ent.Metric.Update(). + Where(metric.IDIn(ids...)). + SetPushedAt(time.Now().UTC()). + Save(c.CTX) + if err != nil { + c.Log.Warningf("MarkUsageMetricsAsSent: %s", err) + return fmt.Errorf("marking usage metrics as sent: %w", err) + } + + return nil +} diff --git a/test/bats/08_metrics_bouncer.bats b/test/bats/08_metrics_bouncer.bats index 84a55dc88c1..1851ed0ac14 100644 --- a/test/bats/08_metrics_bouncer.bats +++ b/test/bats/08_metrics_bouncer.bats @@ -15,7 +15,6 @@ setup() { load "../lib/setup.sh" ./instance-data load ./instance-crowdsec start - skip "require the usage_metrics endpoint on apiserver" } teardown() { @@ -75,6 +74,18 @@ teardown() { payload=$(yq -o j '.remediation_components[0].utc_startup_timestamp = 1707399316' <<<"$payload") rune -0 curl-with-key '/v1/usage-metrics' -X POST --data "$payload" refute_output + + payload=$(yq -o j '.remediation_components[0].metrics = [{"meta": {}}]' <<<"$payload") + rune -22 curl-with-key '/v1/usage-metrics' -X POST --data "$payload" + assert_stderr --partial "error: 422" + rune -0 jq -r '.message' <(output) + assert_output - <<-EOT + validation failure list: + remediation_components.0.metrics.0.items in body is required + validation failure list: + remediation_components.0.metrics.0.meta.utc_now_timestamp in body is required + remediation_components.0.metrics.0.meta.window_size_seconds in body is required + EOT } @test "rc usage metrics (good payload)" { @@ -116,7 +127,7 @@ teardown() { rune -0 cscli metrics show bouncers -o json # aggregation is ok -- we are truncating, not rounding, because the float is mandated by swagger. # but without labels the origin string is empty - assert_json '{bouncers:{testbouncer:{"": {"foo": {"dogyear": 2, "pound": 5}}}}}' + assert_json '{bouncers:{testbouncer:{"": {foo: {dogyear: 2, pound: 5}}}}}' rune -0 cscli metrics show bouncers assert_output - <<-EOT @@ -137,7 +148,7 @@ teardown() { { "meta": {"utc_now_timestamp": 1707399916, "window_size_seconds":600}, "items":[ - {"name": "active_decisions", "unit": "ip", "value": 51936, "labels": {"ip_type": "ipv4", "origin": "lists:firehol_voipbl"}}, + {"name": "active_decisions", "unit": "ip", "value": 500, "labels": {"ip_type": "ipv4", "origin": "lists:firehol_voipbl"}}, {"name": "active_decisions", "unit": "ip", "value": 1, "labels": {"ip_type": "ipv6", "origin": "cscli"}}, {"name": "dropped", "unit": "byte", "value": 3800, "labels": {"ip_type": "ipv4", "origin": "CAPI"}}, {"name": "dropped", "unit": "byte", "value": 0, "labels": {"ip_type": "ipv4", "origin": "cscli"}}, @@ -191,7 +202,7 @@ teardown() { }, "lists:firehol_voipbl": { "active_decisions": { - "ip": 51936 + "ip": 500 }, "dropped": { "byte": 3847, @@ -219,14 +230,198 @@ teardown() { | cscli (manual decisions) | 1 | 380 | 10 | - | - | | lists:anotherlist | - | 0 | 0 | - | - | | lists:firehol_cruzit_web_attacks | - | 1.03k | 23 | - | - | - | lists:firehol_voipbl | 51.94k | 3.85k | 58 | - | - | + | lists:firehol_voipbl | 500 | 3.85k | 58 | - | - | + +----------------------------------+------------------+---------+---------+---------+-------+ + | Total | 501 | 9.06k | 191 | 2 | 5 | + +----------------------------------+------------------+---------+---------+---------+-------+ + EOT + + # active_decisions is actually a gauge: values should not be aggregated, keep only the latest one + + payload=$(yq -o j ' + .remediation_components[0].metrics = [ + { + "meta": {"utc_now_timestamp": 1707450000, "window_size_seconds":600}, + "items":[ + {"name": "active_decisions", "unit": "ip", "value": 250, "labels": {"ip_type": "ipv4", "origin": "lists:firehol_voipbl"}}, + {"name": "active_decisions", "unit": "ip", "value": 10, "labels": {"ip_type": "ipv6", "origin": "cscli"}} + ] + } + ] | + .remediation_components[0].type = "crowdsec-firewall-bouncer" + ' <<<"$payload") + + rune -0 curl-with-key '/v1/usage-metrics' -X POST --data "$payload" + rune -0 cscli metrics show bouncers -o json + assert_json '{ + "bouncers": { + "testbouncer": { + "": { + "foo": { + "dogyear": 2, + "pound": 5 + } + }, + "CAPI": { + "dropped": { + "byte": 3800, + "packet": 100 + } + }, + "cscli": { + "active_decisions": { + "ip": 10 + }, + "dropped": { + "byte": 380, + "packet": 10 + } + }, + "lists:firehol_cruzit_web_attacks": { + "dropped": { + "byte": 1034, + "packet": 23 + } + }, + "lists:firehol_voipbl": { + "active_decisions": { + "ip": 250 + }, + "dropped": { + "byte": 3847, + "packet": 58 + }, + }, + "lists:anotherlist": { + "dropped": { + "byte": 0, + "packet": 0 + } + } + } + } + }' + + rune -0 cscli metrics show bouncers + assert_output - <<-EOT + Bouncer Metrics (testbouncer) since 2024-02-08 13:35:16 +0000 UTC: + +----------------------------------+------------------+-------------------+-----------------+ + | Origin | active_decisions | dropped | foo | + | | IPs | bytes | packets | dogyear | pound | + +----------------------------------+------------------+---------+---------+---------+-------+ + | CAPI (community blocklist) | - | 3.80k | 100 | - | - | + | cscli (manual decisions) | 10 | 380 | 10 | - | - | + | lists:anotherlist | - | 0 | 0 | - | - | + | lists:firehol_cruzit_web_attacks | - | 1.03k | 23 | - | - | + | lists:firehol_voipbl | 250 | 3.85k | 58 | - | - | +----------------------------------+------------------+---------+---------+---------+-------+ - | Total | 51.94k | 9.06k | 191 | 2 | 5 | + | Total | 260 | 9.06k | 191 | 2 | 5 | +----------------------------------+------------------+---------+---------+---------+-------+ EOT +} - # TODO: multiple item lists +@test "rc usage metrics (unknown metrics)" { + # new metrics are introduced in a new bouncer version, unknown by this version of cscli: some are gauges, some are not + + API_KEY=$(cscli bouncers add testbouncer -o raw) + export API_KEY + + payload=$(yq -o j <<-EOT + remediation_components: + - version: "v1.0" + utc_startup_timestamp: 1707369316 + log_processors: [] + EOT + ) + + payload=$(yq -o j ' + .remediation_components[0].metrics = [ + { + "meta": {"utc_now_timestamp": 1707460000, "window_size_seconds":600}, + "items":[ + {"name": "ima_gauge", "unit": "second", "value": 30, "labels": {"origin": "cscli"}}, + {"name": "notagauge", "unit": "inch", "value": 15, "labels": {"origin": "cscli"}} + ] + }, { + "meta": {"utc_now_timestamp": 1707450000, "window_size_seconds":600}, + "items":[ + {"name": "ima_gauge", "unit": "second", "value": 20, "labels": {"origin": "cscli"}}, + {"name": "notagauge", "unit": "inch", "value": 10, "labels": {"origin": "cscli"}} + ] + } + ] | + .remediation_components[0].type = "crowdsec-firewall-bouncer" + ' <<<"$payload") + + rune -0 curl-with-key '/v1/usage-metrics' -X POST --data "$payload" + + rune -0 cscli metrics show bouncers -o json + assert_json '{bouncers: {testbouncer: {cscli: {ima_gauge: {second: 30}, notagauge: {inch: 25}}}}}' + rune -0 cscli metrics show bouncers + assert_output - <<-EOT + Bouncer Metrics (testbouncer) since 2024-02-09 03:40:00 +0000 UTC: + +--------------------------+--------+-----------+ + | Origin | ima | notagauge | + | | second | inch | + +--------------------------+--------+-----------+ + | cscli (manual decisions) | 30 | 25 | + +--------------------------+--------+-----------+ + | Total | 30 | 25 | + +--------------------------+--------+-----------+ + EOT +} + +@test "rc usage metrics (ipv4/ipv6)" { + # gauge metrics are not aggregated over time, but they are over ip type + + API_KEY=$(cscli bouncers add testbouncer -o raw) + export API_KEY + + payload=$(yq -o j <<-EOT + remediation_components: + - version: "v1.0" + utc_startup_timestamp: 1707369316 + log_processors: [] + EOT + ) + + payload=$(yq -o j ' + .remediation_components[0].metrics = [ + { + "meta": {"utc_now_timestamp": 1707460000, "window_size_seconds":600}, + "items":[ + {"name": "active_decisions", "unit": "ip", "value": 200, "labels": {"ip_type": "ipv4", "origin": "cscli"}}, + {"name": "active_decisions", "unit": "ip", "value": 30, "labels": {"ip_type": "ipv6", "origin": "cscli"}} + ] + }, { + "meta": {"utc_now_timestamp": 1707450000, "window_size_seconds":600}, + "items":[ + {"name": "active_decisions", "unit": "ip", "value": 400, "labels": {"ip_type": "ipv4", "origin": "cscli"}}, + {"name": "active_decisions", "unit": "ip", "value": 50, "labels": {"ip_type": "ipv6", "origin": "cscli"}} + ] + } + ] | + .remediation_components[0].type = "crowdsec-firewall-bouncer" + ' <<<"$payload") + + rune -0 curl-with-key '/v1/usage-metrics' -X POST --data "$payload" + + rune -0 cscli metrics show bouncers -o json + assert_json '{bouncers: {testbouncer: {cscli: {active_decisions: {ip: 230}}}}}' + + rune -0 cscli metrics show bouncers + assert_output - <<-EOT + Bouncer Metrics (testbouncer) since 2024-02-09 03:40:00 +0000 UTC: + +--------------------------+------------------+ + | Origin | active_decisions | + | | IPs | + +--------------------------+------------------+ + | cscli (manual decisions) | 230 | + +--------------------------+------------------+ + | Total | 230 | + +--------------------------+------------------+ + EOT } @test "rc usage metrics (multiple bouncers)" { diff --git a/test/bats/08_metrics_machines.bats b/test/bats/08_metrics_machines.bats index e63078124a9..3b73839e753 100644 --- a/test/bats/08_metrics_machines.bats +++ b/test/bats/08_metrics_machines.bats @@ -15,7 +15,6 @@ setup() { load "../lib/setup.sh" ./instance-data load ./instance-crowdsec start - skip "require the usage_metrics endpoint on apiserver" } teardown() { diff --git a/test/bats/11_bouncers_tls.bats b/test/bats/11_bouncers_tls.bats index 849b3a5b35c..554308ae962 100644 --- a/test/bats/11_bouncers_tls.bats +++ b/test/bats/11_bouncers_tls.bats @@ -162,6 +162,35 @@ teardown() { rune cscli bouncers delete localhost@127.0.0.1 } +@test "a bouncer authenticated with TLS can send metrics" { + payload=$(yq -o j <<-EOT + remediation_components: [] + log_processors: [] + EOT + ) + + # with mutual authentication there is no api key, so it's detected as RC if user agent != crowdsec + + rune -22 curl --fail-with-body -sS \ + --cert "$tmpdir/leaf.pem" \ + --key "$tmpdir/leaf-key.pem" \ + --cacert "$tmpdir/bundle.pem" \ + https://localhost:8080/v1/usage-metrics -X POST --data "$payload" + assert_stderr --partial 'error: 400' + assert_json '{message: "Missing remediation component data"}' + + rune -22 curl --fail-with-body -sS \ + --cert "$tmpdir/leaf.pem" \ + --key "$tmpdir/leaf-key.pem" \ + --cacert "$tmpdir/bundle.pem" \ + --user-agent "crowdsec/someversion" \ + https://localhost:8080/v1/usage-metrics -X POST --data "$payload" + assert_stderr --partial 'error: 401' + assert_json '{code:401, message: "cookie token is empty"}' + + rune cscli bouncers delete localhost@127.0.0.1 +} + @test "simulate a bouncer request with an invalid cert" { rune -77 curl --fail-with-body -sS \ --cert "$tmpdir/leaf_invalid.pem" \ From 24bd8bb92cbb045cc4259bbc709a6145e1ee352b Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Wed, 24 Jul 2024 10:50:19 +0200 Subject: [PATCH 236/581] perf: retrieve unsorted metrics (#3148) --- pkg/database/metrics.go | 3 --- 1 file changed, 3 deletions(-) diff --git a/pkg/database/metrics.go b/pkg/database/metrics.go index 3bc5e7b5d32..7626c39f6f1 100644 --- a/pkg/database/metrics.go +++ b/pkg/database/metrics.go @@ -31,8 +31,6 @@ func (c *Client) GetLPUsageMetricsByMachineID(machineId string) ([]*ent.Metric, metric.GeneratedByEQ(machineId), metric.PushedAtIsNil(), ). - // XXX: do we need to sort? - Order(ent.Desc(metric.FieldReceivedAt)). All(c.CTX) if err != nil { c.Log.Warningf("GetLPUsageMetricsByOrigin: %s", err) @@ -49,7 +47,6 @@ func (c *Client) GetBouncerUsageMetricsByName(bouncerName string) ([]*ent.Metric metric.GeneratedByEQ(bouncerName), metric.PushedAtIsNil(), ). - Order(ent.Desc(metric.FieldReceivedAt)). All(c.CTX) if err != nil { c.Log.Warningf("GetBouncerUsageMetricsByName: %s", err) From a7ec842bce150f0f9738cac5f59d603df7cf90fb Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Wed, 24 Jul 2024 11:00:38 +0200 Subject: [PATCH 237/581] docker: symlink all data files to the staging area (#3120) --- docker/docker_start.sh | 19 ++++++++++--------- 1 file changed, 10 insertions(+), 9 deletions(-) diff --git a/docker/docker_start.sh b/docker/docker_start.sh index 4db421e7c87..0ae8841e029 100755 --- a/docker/docker_start.sh +++ b/docker/docker_start.sh @@ -213,15 +213,16 @@ if [ -n "$CERT_FILE" ] || [ -n "$KEY_FILE" ] ; then export LAPI_KEY_FILE=${LAPI_KEY_FILE:-$KEY_FILE} fi -# Check and prestage databases -for geodb in GeoLite2-ASN.mmdb GeoLite2-City.mmdb; do - # We keep the pre-populated geoip databases in /staging instead of /var, - # because if the data directory is bind-mounted from the host, it will be - # empty and the files will be out of reach, requiring a runtime download. - # We link to them to save about 80Mb compared to cp/mv. - if [ ! -e "/var/lib/crowdsec/data/$geodb" ] && [ -e "/staging/var/lib/crowdsec/data/$geodb" ]; then - mkdir -p /var/lib/crowdsec/data - ln -s "/staging/var/lib/crowdsec/data/$geodb" /var/lib/crowdsec/data/ +# Link the preloaded data files when the data dir is mounted (common case) +# The symlinks can be overridden by hub upgrade +for target in "/staging/var/lib/crowdsec/data"/*; do + fname="$(basename "$target")" + # skip the db and wal files + if [[ $fname == crowdsec.db* ]]; then + continue + fi + if [ ! -e "/var/lib/crowdsec/data/$fname" ]; then + ln -s "$target" "/var/lib/crowdsec/data/$fname" fi done From 36d15fedce1540bf957d6dedcb5193783720ce21 Mon Sep 17 00:00:00 2001 From: Manuel Sabban Date: Wed, 24 Jul 2024 12:20:23 +0200 Subject: [PATCH 238/581] Use the new hub api url (#3132) * new hapi url * mock url in tests has to be updated --------- Co-authored-by: marco --- pkg/csconfig/cscli.go | 20 +++++++-------- pkg/cwhub/cwhub_test.go | 18 ++++++------- pkg/cwhub/doc.go | 48 +++++++++++++++++------------------ pkg/cwhub/hub_test.go | 2 +- pkg/cwhub/itemupgrade_test.go | 4 +-- 5 files changed, 45 insertions(+), 47 deletions(-) diff --git a/pkg/csconfig/cscli.go b/pkg/csconfig/cscli.go index 7fff03864ef..9393156c0ed 100644 --- a/pkg/csconfig/cscli.go +++ b/pkg/csconfig/cscli.go @@ -6,18 +6,18 @@ import ( /*cscli specific config, such as hub directory*/ type CscliCfg struct { - Output string `yaml:"output,omitempty"` - Color string `yaml:"color,omitempty"` - HubBranch string `yaml:"hub_branch"` - HubURLTemplate string `yaml:"__hub_url_template__,omitempty"` - SimulationConfig *SimulationConfig `yaml:"-"` - DbConfig *DatabaseCfg `yaml:"-"` - - SimulationFilePath string `yaml:"-"` - PrometheusUrl string `yaml:"prometheus_uri"` + Output string `yaml:"output,omitempty"` + Color string `yaml:"color,omitempty"` + HubBranch string `yaml:"hub_branch"` + HubURLTemplate string `yaml:"__hub_url_template__,omitempty"` + SimulationConfig *SimulationConfig `yaml:"-"` + DbConfig *DatabaseCfg `yaml:"-"` + + SimulationFilePath string `yaml:"-"` + PrometheusUrl string `yaml:"prometheus_uri"` } -const defaultHubURLTemplate = "https://hub-cdn.crowdsec.net/%s/%s" +const defaultHubURLTemplate = "https://cdn-hub.crowdsec.net/crowdsecurity/%s/%s" func (c *Config) loadCSCLI() error { if c.Cscli == nil { diff --git a/pkg/cwhub/cwhub_test.go b/pkg/cwhub/cwhub_test.go index d11ed2b9a95..a4641483622 100644 --- a/pkg/cwhub/cwhub_test.go +++ b/pkg/cwhub/cwhub_test.go @@ -16,7 +16,7 @@ import ( "github.com/crowdsecurity/crowdsec/pkg/csconfig" ) -const mockURLTemplate = "https://hub-cdn.crowdsec.net/%s/%s" +const mockURLTemplate = "https://cdn-hub.crowdsec.net/crowdsecurity/%s/%s" /* To test : @@ -143,18 +143,18 @@ func fileToStringX(path string) string { func setResponseByPath() { responseByPath = map[string]string{ - "/master/parsers/s01-parse/crowdsecurity/foobar_parser.yaml": fileToStringX("./testdata/foobar_parser.yaml"), - "/master/parsers/s01-parse/crowdsecurity/foobar_subparser.yaml": fileToStringX("./testdata/foobar_parser.yaml"), - "/master/collections/crowdsecurity/test_collection.yaml": fileToStringX("./testdata/collection_v1.yaml"), - "/master/.index.json": fileToStringX("./testdata/index1.json"), - "/master/scenarios/crowdsecurity/foobar_scenario.yaml": `filter: true + "/crowdsecurity/master/parsers/s01-parse/crowdsecurity/foobar_parser.yaml": fileToStringX("./testdata/foobar_parser.yaml"), + "/crowdsecurity/master/parsers/s01-parse/crowdsecurity/foobar_subparser.yaml": fileToStringX("./testdata/foobar_parser.yaml"), + "/crowdsecurity/master/collections/crowdsecurity/test_collection.yaml": fileToStringX("./testdata/collection_v1.yaml"), + "/crowdsecurity/master/.index.json": fileToStringX("./testdata/index1.json"), + "/crowdsecurity/master/scenarios/crowdsecurity/foobar_scenario.yaml": `filter: true name: crowdsecurity/foobar_scenario`, - "/master/scenarios/crowdsecurity/barfoo_scenario.yaml": `filter: true + "/crowdsecurity/master/scenarios/crowdsecurity/barfoo_scenario.yaml": `filter: true name: crowdsecurity/foobar_scenario`, - "/master/collections/crowdsecurity/foobar_subcollection.yaml": ` + "/crowdsecurity/master/collections/crowdsecurity/foobar_subcollection.yaml": ` blah: blalala qwe: jejwejejw`, - "/master/collections/crowdsecurity/foobar.yaml": ` + "/crowdsecurity/master/collections/crowdsecurity/foobar.yaml": ` blah: blalala qwe: jejwejejw`, } diff --git a/pkg/cwhub/doc.go b/pkg/cwhub/doc.go index 8cbf77ba00f..89d8de0fa8b 100644 --- a/pkg/cwhub/doc.go +++ b/pkg/cwhub/doc.go @@ -2,10 +2,10 @@ // // # Definitions // -// - A hub ITEM is a file that defines a parser, a scenario, a collection... in the case of a collection, it has dependencies on other hub items. -// - The hub INDEX is a JSON file that contains a tree of available hub items. -// - A REMOTE HUB is an HTTP server that hosts the hub index and the hub items. It can serve from several branches, usually linked to the CrowdSec version. -// - A LOCAL HUB is a directory that contains a copy of the hub index and the downloaded hub items. +// - A hub ITEM is a file that defines a parser, a scenario, a collection... in the case of a collection, it has dependencies on other hub items. +// - The hub INDEX is a JSON file that contains a tree of available hub items. +// - A REMOTE HUB is an HTTP server that hosts the hub index and the hub items. It can serve from several branches, usually linked to the CrowdSec version. +// - A LOCAL HUB is a directory that contains a copy of the hub index and the downloaded hub items. // // Once downloaded, hub items can be installed by linking to them from the configuration directory. // If an item is present in the configuration directory but it's not a link to the local hub, it is @@ -17,15 +17,15 @@ // // For the local hub (HubDir = /etc/crowdsec/hub): // -// - /etc/crowdsec/hub/.index.json -// - /etc/crowdsec/hub/parsers/{stage}/{author}/{parser-name}.yaml -// - /etc/crowdsec/hub/scenarios/{author}/{scenario-name}.yaml +// - /etc/crowdsec/hub/.index.json +// - /etc/crowdsec/hub/parsers/{stage}/{author}/{parser-name}.yaml +// - /etc/crowdsec/hub/scenarios/{author}/{scenario-name}.yaml // // For the configuration directory (InstallDir = /etc/crowdsec): // -// - /etc/crowdsec/parsers/{stage}/{parser-name.yaml} -> /etc/crowdsec/hub/parsers/{stage}/{author}/{parser-name}.yaml -// - /etc/crowdsec/scenarios/{scenario-name.yaml} -> /etc/crowdsec/hub/scenarios/{author}/{scenario-name}.yaml -// - /etc/crowdsec/scenarios/local-scenario.yaml +// - /etc/crowdsec/parsers/{stage}/{parser-name.yaml} -> /etc/crowdsec/hub/parsers/{stage}/{author}/{parser-name}.yaml +// - /etc/crowdsec/scenarios/{scenario-name.yaml} -> /etc/crowdsec/hub/scenarios/{author}/{scenario-name}.yaml +// - /etc/crowdsec/scenarios/local-scenario.yaml // // Note that installed items are not grouped by author, this may change in the future if we want to // support items with the same name from different authors. @@ -35,11 +35,10 @@ // Additionally, an item can reference a DATA SET that is installed in a different location than // the item itself. These files are stored in the data directory (InstallDataDir = /var/lib/crowdsec/data). // -// - /var/lib/crowdsec/data/http_path_traversal.txt -// - /var/lib/crowdsec/data/jira_cve_2021-26086.txt -// - /var/lib/crowdsec/data/log4j2_cve_2021_44228.txt -// - /var/lib/crowdsec/data/sensitive_data.txt -// +// - /var/lib/crowdsec/data/http_path_traversal.txt +// - /var/lib/crowdsec/data/jira_cve_2021-26086.txt +// - /var/lib/crowdsec/data/log4j2_cve_2021_44228.txt +// - /var/lib/crowdsec/data/sensitive_data.txt // // # Using the package // @@ -87,13 +86,13 @@ // // You can also install items if they have already been downloaded: // -// // install a parser -// force := false -// downloadOnly := false -// err := parser.Install(force, downloadOnly) -// if err != nil { -// return fmt.Errorf("unable to install parser: %w", err) -// } +// // install a parser +// force := false +// downloadOnly := false +// err := parser.Install(force, downloadOnly) +// if err != nil { +// return fmt.Errorf("unable to install parser: %w", err) +// } // // As soon as you try to install an item that is not downloaded or is not up-to-date (meaning its computed hash // does not correspond to the latest version available in the index), a download will be attempted and you'll @@ -101,8 +100,8 @@ // // To provide the remote hub configuration, use the second parameter of NewHub(): // -// remoteHub := cwhub.RemoteHubCfg{ -// URLTemplate: "https://hub-cdn.crowdsec.net/%s/%s", +// remoteHub := cwhub.RemoteHubCfg{ +// URLTemplate: "https://cdn-hub.crowdsec.net/crowdsecurity/%s/%s", // Branch: "master", // IndexPath: ".index.json", // } @@ -124,5 +123,4 @@ // // Note that the command will fail if the hub has already been synced. If you want to do it (ex. after a configuration // change the application is notified with SIGHUP) you have to instantiate a new hub object and dispose of the old one. -// package cwhub diff --git a/pkg/cwhub/hub_test.go b/pkg/cwhub/hub_test.go index 3d4ae5793b3..1c2c9ccceca 100644 --- a/pkg/cwhub/hub_test.go +++ b/pkg/cwhub/hub_test.go @@ -65,7 +65,7 @@ func TestUpdateIndex(t *testing.T) { fmt.Println("Test 'bad domain'") hub.remote = &RemoteHubCfg{ - URLTemplate: "https://baddomain/%s/%s", + URLTemplate: "https://baddomain/crowdsecurity/%s/%s", Branch: "master", IndexPath: ".index.json", } diff --git a/pkg/cwhub/itemupgrade_test.go b/pkg/cwhub/itemupgrade_test.go index 5d302db3345..5f9e4d1944e 100644 --- a/pkg/cwhub/itemupgrade_test.go +++ b/pkg/cwhub/itemupgrade_test.go @@ -218,6 +218,6 @@ func assertCollectionDepsInstalled(t *testing.T, hub *Hub, collection string) { } func pushUpdateToCollectionInHub() { - responseByPath["/master/.index.json"] = fileToStringX("./testdata/index2.json") - responseByPath["/master/collections/crowdsecurity/test_collection.yaml"] = fileToStringX("./testdata/collection_v2.yaml") + responseByPath["/crowdsecurity/master/.index.json"] = fileToStringX("./testdata/index2.json") + responseByPath["/crowdsecurity/master/collections/crowdsecurity/test_collection.yaml"] = fileToStringX("./testdata/collection_v2.yaml") } From 20067a85a0df105f921d81c5c26ca6ef1296fb94 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Wed, 24 Jul 2024 16:29:38 +0200 Subject: [PATCH 239/581] cscli/hub: don't return error if some file can't be recognized (#3150) In k8s there can be extra directories while mounting config maps, which leads to a failure while parsing the hub state. The PR changes these kind of errors to warnings. --- pkg/cwhub/sync.go | 6 ++++-- test/bats/20_hub_items.bats | 7 +++++++ 2 files changed, 11 insertions(+), 2 deletions(-) diff --git a/pkg/cwhub/sync.go b/pkg/cwhub/sync.go index fd5d6b81220..38bb376ae3b 100644 --- a/pkg/cwhub/sync.go +++ b/pkg/cwhub/sync.go @@ -1,6 +1,7 @@ package cwhub import ( + "errors" "fmt" "os" "path/filepath" @@ -99,7 +100,7 @@ func (h *Hub) getItemFileInfo(path string, logger *logrus.Logger) (*itemFileInfo if ret.ftype != PARSERS && ret.ftype != POSTOVERFLOWS { if !slices.Contains(ItemTypes, ret.stage) { - return nil, fmt.Errorf("unknown configuration type for file '%s'", path) + return nil, errors.New("unknown configuration type") } ret.ftype = ret.stage @@ -196,7 +197,8 @@ func (h *Hub) itemVisit(path string, f os.DirEntry, err error) error { info, err := h.getItemFileInfo(path, h.logger) if err != nil { - return err + h.logger.Warningf("Ignoring file %s: %s", path, err) + return nil } // non symlinks are local user files or hub files diff --git a/test/bats/20_hub_items.bats b/test/bats/20_hub_items.bats index 1846b3e424c..214d07d927f 100644 --- a/test/bats/20_hub_items.bats +++ b/test/bats/20_hub_items.bats @@ -193,3 +193,10 @@ teardown() { rune -0 jq -c '.tainted' <(output) assert_output 'false' } + +@test "skip files if we can't guess their type" { + rune -0 mkdir -p "$CONFIG_DIR/scenarios/foo" + rune -0 touch "$CONFIG_DIR/scenarios/foo/bar.yaml" + rune -0 cscli hub list + assert_stderr --partial "Ignoring file $CONFIG_DIR/scenarios/foo/bar.yaml: unknown configuration type" +} From 6f5d75c5f13229e603fbcdc725c51665fabc2c92 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Mon, 29 Jul 2024 14:13:06 +0200 Subject: [PATCH 240/581] cscli metrics: explicit message "no bouncer metrics found" (#3155) --- cmd/crowdsec-cli/climetrics/statbouncer.go | 9 ++++++++- test/bats/08_metrics_bouncer.bats | 11 ++++++++--- 2 files changed, 16 insertions(+), 4 deletions(-) diff --git a/cmd/crowdsec-cli/climetrics/statbouncer.go b/cmd/crowdsec-cli/climetrics/statbouncer.go index 7d80e902961..62e68b6bc41 100644 --- a/cmd/crowdsec-cli/climetrics/statbouncer.go +++ b/cmd/crowdsec-cli/climetrics/statbouncer.go @@ -447,8 +447,15 @@ func (s *statBouncer) bouncerTable(out io.Writer, bouncerName string, wantColor } // Table displays a table of metrics for each bouncer -func (s *statBouncer) Table(out io.Writer, wantColor string, noUnit bool, _ bool) { +func (s *statBouncer) Table(out io.Writer, wantColor string, noUnit bool, showEmpty bool) { + found := false + for _, bouncerName := range maptools.SortedKeys(s.aggOverOrigin) { s.bouncerTable(out, bouncerName, wantColor, noUnit) + found = true + } + + if !found && showEmpty { + io.WriteString(out, "No bouncer metrics found.\n\n") } } diff --git a/test/bats/08_metrics_bouncer.bats b/test/bats/08_metrics_bouncer.bats index 1851ed0ac14..c4dfebbab1d 100644 --- a/test/bats/08_metrics_bouncer.bats +++ b/test/bats/08_metrics_bouncer.bats @@ -23,10 +23,15 @@ teardown() { #---------- -@test "cscli metrics show bouncers" { - # there are no bouncers, so no metrics yet +@test "cscli metrics show bouncers (empty)" { + # this message is given only if we ask explicitly for bouncers + notfound="No bouncer metrics found." + rune -0 cscli metrics show bouncers - refute_output + assert_output "$notfound" + + rune -0 cscli metrics list + refute_output "$notfound" } @test "rc usage metrics (empty payload)" { From 136dba61d97021181ab574dcea90d816735d686d Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Thu, 1 Aug 2024 10:55:04 +0200 Subject: [PATCH 241/581] reduce log verbosity, minor CI fixes, lint (#3157) * pkg/cwhub: redundant log messages * CI: fixture output and elapsed time * CI: preload only essential hub items * report full version (including -rc2 etc.) with cscli hub update --debug * lint --- cmd/crowdsec-cli/require/branch.go | 6 +- .../modules/cloudwatch/cloudwatch.go | 31 +++++----- pkg/acquisition/modules/docker/docker.go | 17 ++--- .../modules/journalctl/journalctl.go | 11 ++-- pkg/acquisition/modules/kafka/kafka.go | 2 +- .../modules/kubernetesaudit/k8s_audit.go | 11 ++-- .../syslog/internal/parser/rfc3164/parse.go | 28 ++++----- .../syslog/internal/parser/rfc5424/parse.go | 62 +++++++++---------- pkg/acquisition/modules/syslog/syslog.go | 5 +- pkg/appsec/appsec.go | 3 +- pkg/appsec/appsec_rule/appsec_rule.go | 7 ++- pkg/csplugin/notifier.go | 4 +- pkg/csplugin/utils.go | 4 +- pkg/cwhub/iteminstall.go | 1 - pkg/cwhub/itemupgrade.go | 2 +- pkg/exprhelpers/helpers.go | 5 +- test/bin/preload-hub-items | 11 +++- test/lib/config/config-global | 4 +- test/lib/config/config-local | 5 +- 19 files changed, 119 insertions(+), 100 deletions(-) diff --git a/cmd/crowdsec-cli/require/branch.go b/cmd/crowdsec-cli/require/branch.go index 503cb6d2326..09acc0fef8a 100644 --- a/cmd/crowdsec-cli/require/branch.go +++ b/cmd/crowdsec-cli/require/branch.go @@ -12,6 +12,8 @@ import ( log "github.com/sirupsen/logrus" "golang.org/x/mod/semver" + "github.com/crowdsecurity/go-cs-lib/version" + "github.com/crowdsecurity/crowdsec/pkg/csconfig" "github.com/crowdsecurity/crowdsec/pkg/cwversion" ) @@ -74,13 +76,13 @@ func chooseBranch(ctx context.Context, cfg *csconfig.Config) string { } if csVersion == latest { - log.Debugf("Latest crowdsec version (%s), using hub branch 'master'", csVersion) + log.Debugf("Latest crowdsec version (%s), using hub branch 'master'", version.String()) return "master" } // if current version is greater than the latest we are in pre-release if semver.Compare(csVersion, latest) == 1 { - log.Debugf("Your current crowdsec version seems to be a pre-release (%s), using hub branch 'master'", csVersion) + log.Debugf("Your current crowdsec version seems to be a pre-release (%s), using hub branch 'master'", version.String()) return "master" } diff --git a/pkg/acquisition/modules/cloudwatch/cloudwatch.go b/pkg/acquisition/modules/cloudwatch/cloudwatch.go index 1859bbf0f84..1a78ae6fa7a 100644 --- a/pkg/acquisition/modules/cloudwatch/cloudwatch.go +++ b/pkg/acquisition/modules/cloudwatch/cloudwatch.go @@ -2,6 +2,7 @@ package cloudwatchacquisition import ( "context" + "errors" "fmt" "net/url" "os" @@ -111,7 +112,7 @@ func (cw *CloudwatchSource) UnmarshalConfig(yamlConfig []byte) error { } if len(cw.Config.GroupName) == 0 { - return fmt.Errorf("group_name is mandatory for CloudwatchSource") + return errors.New("group_name is mandatory for CloudwatchSource") } if cw.Config.Mode == "" { @@ -189,7 +190,7 @@ func (cw *CloudwatchSource) Configure(yamlConfig []byte, logger *log.Entry, Metr } else { if cw.Config.AwsRegion == nil { cw.logger.Errorf("aws_region is not specified, specify it or aws_config_dir") - return fmt.Errorf("aws_region is not specified, specify it or aws_config_dir") + return errors.New("aws_region is not specified, specify it or aws_config_dir") } os.Setenv("AWS_REGION", *cw.Config.AwsRegion) } @@ -228,7 +229,7 @@ func (cw *CloudwatchSource) newClient() error { } if sess == nil { - return fmt.Errorf("failed to create aws session") + return errors.New("failed to create aws session") } if v := os.Getenv("AWS_ENDPOINT_FORCE"); v != "" { cw.logger.Debugf("[testing] overloading endpoint with %s", v) @@ -237,7 +238,7 @@ func (cw *CloudwatchSource) newClient() error { cw.cwClient = cloudwatchlogs.New(sess) } if cw.cwClient == nil { - return fmt.Errorf("failed to create cloudwatch client") + return errors.New("failed to create cloudwatch client") } return nil } @@ -516,7 +517,7 @@ func (cw *CloudwatchSource) TailLogStream(cfg *LogStreamTailConfig, outChan chan } case <-cfg.t.Dying(): cfg.logger.Infof("logstream tail stopping") - return fmt.Errorf("killed") + return errors.New("killed") } } } @@ -527,11 +528,11 @@ func (cw *CloudwatchSource) ConfigureByDSN(dsn string, labels map[string]string, dsn = strings.TrimPrefix(dsn, cw.GetName()+"://") args := strings.Split(dsn, "?") if len(args) != 2 { - return fmt.Errorf("query is mandatory (at least start_date and end_date or backlog)") + return errors.New("query is mandatory (at least start_date and end_date or backlog)") } frags := strings.Split(args[0], ":") if len(frags) != 2 { - return fmt.Errorf("cloudwatch path must contain group and stream : /my/group/name:stream/name") + return errors.New("cloudwatch path must contain group and stream : /my/group/name:stream/name") } cw.Config.GroupName = frags[0] cw.Config.StreamName = &frags[1] @@ -547,7 +548,7 @@ func (cw *CloudwatchSource) ConfigureByDSN(dsn string, labels map[string]string, switch k { case "log_level": if len(v) != 1 { - return fmt.Errorf("expected zero or one value for 'log_level'") + return errors.New("expected zero or one value for 'log_level'") } lvl, err := log.ParseLevel(v[0]) if err != nil { @@ -557,14 +558,14 @@ func (cw *CloudwatchSource) ConfigureByDSN(dsn string, labels map[string]string, case "profile": if len(v) != 1 { - return fmt.Errorf("expected zero or one value for 'profile'") + return errors.New("expected zero or one value for 'profile'") } awsprof := v[0] cw.Config.AwsProfile = &awsprof cw.logger.Debugf("profile set to '%s'", *cw.Config.AwsProfile) case "start_date": if len(v) != 1 { - return fmt.Errorf("expected zero or one argument for 'start_date'") + return errors.New("expected zero or one argument for 'start_date'") } //let's reuse our parser helper so that a ton of date formats are supported strdate, startDate := parser.GenDateParse(v[0]) @@ -572,7 +573,7 @@ func (cw *CloudwatchSource) ConfigureByDSN(dsn string, labels map[string]string, cw.Config.StartTime = &startDate case "end_date": if len(v) != 1 { - return fmt.Errorf("expected zero or one argument for 'end_date'") + return errors.New("expected zero or one argument for 'end_date'") } //let's reuse our parser helper so that a ton of date formats are supported strdate, endDate := parser.GenDateParse(v[0]) @@ -580,7 +581,7 @@ func (cw *CloudwatchSource) ConfigureByDSN(dsn string, labels map[string]string, cw.Config.EndTime = &endDate case "backlog": if len(v) != 1 { - return fmt.Errorf("expected zero or one argument for 'backlog'") + return errors.New("expected zero or one argument for 'backlog'") } //let's reuse our parser helper so that a ton of date formats are supported duration, err := time.ParseDuration(v[0]) @@ -605,10 +606,10 @@ func (cw *CloudwatchSource) ConfigureByDSN(dsn string, labels map[string]string, } if cw.Config.StreamName == nil || cw.Config.GroupName == "" { - return fmt.Errorf("missing stream or group name") + return errors.New("missing stream or group name") } if cw.Config.StartTime == nil || cw.Config.EndTime == nil { - return fmt.Errorf("start_date and end_date or backlog are mandatory in one-shot mode") + return errors.New("start_date and end_date or backlog are mandatory in one-shot mode") } cw.Config.Mode = configuration.CAT_MODE @@ -699,7 +700,7 @@ func cwLogToEvent(log *cloudwatchlogs.OutputLogEvent, cfg *LogStreamTailConfig) l := types.Line{} evt := types.Event{} if log.Message == nil { - return evt, fmt.Errorf("nil message") + return evt, errors.New("nil message") } msg := *log.Message if cfg.PrependCloudwatchTimestamp != nil && *cfg.PrependCloudwatchTimestamp { diff --git a/pkg/acquisition/modules/docker/docker.go b/pkg/acquisition/modules/docker/docker.go index 857d7e7af78..9a6e13feee4 100644 --- a/pkg/acquisition/modules/docker/docker.go +++ b/pkg/acquisition/modules/docker/docker.go @@ -3,6 +3,7 @@ package dockeracquisition import ( "bufio" "context" + "errors" "fmt" "net/url" "regexp" @@ -88,11 +89,11 @@ func (d *DockerSource) UnmarshalConfig(yamlConfig []byte) error { } if len(d.Config.ContainerName) == 0 && len(d.Config.ContainerID) == 0 && len(d.Config.ContainerIDRegexp) == 0 && len(d.Config.ContainerNameRegexp) == 0 && !d.Config.UseContainerLabels { - return fmt.Errorf("no containers names or containers ID configuration provided") + return errors.New("no containers names or containers ID configuration provided") } if d.Config.UseContainerLabels && (len(d.Config.ContainerName) > 0 || len(d.Config.ContainerID) > 0 || len(d.Config.ContainerIDRegexp) > 0 || len(d.Config.ContainerNameRegexp) > 0) { - return fmt.Errorf("use_container_labels and container_name, container_id, container_id_regexp, container_name_regexp are mutually exclusive") + return errors.New("use_container_labels and container_name, container_id, container_id_regexp, container_name_regexp are mutually exclusive") } d.CheckIntervalDuration, err = time.ParseDuration(d.Config.CheckInterval) @@ -225,7 +226,7 @@ func (d *DockerSource) ConfigureByDSN(dsn string, labels map[string]string, logg switch k { case "log_level": if len(v) != 1 { - return fmt.Errorf("only one 'log_level' parameters is required, not many") + return errors.New("only one 'log_level' parameters is required, not many") } lvl, err := log.ParseLevel(v[0]) if err != nil { @@ -234,17 +235,17 @@ func (d *DockerSource) ConfigureByDSN(dsn string, labels map[string]string, logg d.logger.Logger.SetLevel(lvl) case "until": if len(v) != 1 { - return fmt.Errorf("only one 'until' parameters is required, not many") + return errors.New("only one 'until' parameters is required, not many") } d.containerLogsOptions.Until = v[0] case "since": if len(v) != 1 { - return fmt.Errorf("only one 'since' parameters is required, not many") + return errors.New("only one 'since' parameters is required, not many") } d.containerLogsOptions.Since = v[0] case "follow_stdout": if len(v) != 1 { - return fmt.Errorf("only one 'follow_stdout' parameters is required, not many") + return errors.New("only one 'follow_stdout' parameters is required, not many") } followStdout, err := strconv.ParseBool(v[0]) if err != nil { @@ -254,7 +255,7 @@ func (d *DockerSource) ConfigureByDSN(dsn string, labels map[string]string, logg d.containerLogsOptions.ShowStdout = followStdout case "follow_stderr": if len(v) != 1 { - return fmt.Errorf("only one 'follow_stderr' parameters is required, not many") + return errors.New("only one 'follow_stderr' parameters is required, not many") } followStdErr, err := strconv.ParseBool(v[0]) if err != nil { @@ -264,7 +265,7 @@ func (d *DockerSource) ConfigureByDSN(dsn string, labels map[string]string, logg d.containerLogsOptions.ShowStderr = followStdErr case "docker_host": if len(v) != 1 { - return fmt.Errorf("only one 'docker_host' parameters is required, not many") + return errors.New("only one 'docker_host' parameters is required, not many") } if err := client.WithHost(v[0])(dockerClient); err != nil { return err diff --git a/pkg/acquisition/modules/journalctl/journalctl.go b/pkg/acquisition/modules/journalctl/journalctl.go index e8bb5a3edd5..762dfe9ba12 100644 --- a/pkg/acquisition/modules/journalctl/journalctl.go +++ b/pkg/acquisition/modules/journalctl/journalctl.go @@ -3,6 +3,7 @@ package journalctlacquisition import ( "bufio" "context" + "errors" "fmt" "net/url" "os/exec" @@ -98,7 +99,7 @@ func (j *JournalCtlSource) runJournalCtl(out chan types.Event, t *tomb.Tomb) err if stdoutscanner == nil { cancel() cmd.Wait() - return fmt.Errorf("failed to create stdout scanner") + return errors.New("failed to create stdout scanner") } stderrScanner := bufio.NewScanner(stderr) @@ -106,7 +107,7 @@ func (j *JournalCtlSource) runJournalCtl(out chan types.Event, t *tomb.Tomb) err if stderrScanner == nil { cancel() cmd.Wait() - return fmt.Errorf("failed to create stderr scanner") + return errors.New("failed to create stderr scanner") } t.Go(func() error { return readLine(stdoutscanner, stdoutChan, errChan) @@ -189,7 +190,7 @@ func (j *JournalCtlSource) UnmarshalConfig(yamlConfig []byte) error { } if len(j.config.Filters) == 0 { - return fmt.Errorf("journalctl_filter is required") + return errors.New("journalctl_filter is required") } j.args = append(args, j.config.Filters...) j.src = fmt.Sprintf("journalctl-%s", strings.Join(j.config.Filters, ".")) @@ -223,7 +224,7 @@ func (j *JournalCtlSource) ConfigureByDSN(dsn string, labels map[string]string, qs := strings.TrimPrefix(dsn, "journalctl://") if len(qs) == 0 { - return fmt.Errorf("empty journalctl:// DSN") + return errors.New("empty journalctl:// DSN") } params, err := url.ParseQuery(qs) @@ -236,7 +237,7 @@ func (j *JournalCtlSource) ConfigureByDSN(dsn string, labels map[string]string, j.config.Filters = append(j.config.Filters, value...) case "log_level": if len(value) != 1 { - return fmt.Errorf("expected zero or one value for 'log_level'") + return errors.New("expected zero or one value for 'log_level'") } lvl, err := log.ParseLevel(value[0]) if err != nil { diff --git a/pkg/acquisition/modules/kafka/kafka.go b/pkg/acquisition/modules/kafka/kafka.go index f64bb1df306..ca0a7556fca 100644 --- a/pkg/acquisition/modules/kafka/kafka.go +++ b/pkg/acquisition/modules/kafka/kafka.go @@ -278,7 +278,7 @@ func (kc *KafkaConfiguration) NewReader(dialer *kafka.Dialer, logger *log.Entry) ErrorLogger: kafka.LoggerFunc(logger.Errorf), } if kc.GroupID != "" && kc.Partition != 0 { - return &kafka.Reader{}, fmt.Errorf("cannot specify both group_id and partition") + return &kafka.Reader{}, errors.New("cannot specify both group_id and partition") } if kc.GroupID != "" { rConf.GroupID = kc.GroupID diff --git a/pkg/acquisition/modules/kubernetesaudit/k8s_audit.go b/pkg/acquisition/modules/kubernetesaudit/k8s_audit.go index 7d27f9e0390..e48a074b764 100644 --- a/pkg/acquisition/modules/kubernetesaudit/k8s_audit.go +++ b/pkg/acquisition/modules/kubernetesaudit/k8s_audit.go @@ -3,6 +3,7 @@ package kubernetesauditacquisition import ( "context" "encoding/json" + "errors" "fmt" "io" "net/http" @@ -73,15 +74,15 @@ func (ka *KubernetesAuditSource) UnmarshalConfig(yamlConfig []byte) error { ka.config = k8sConfig if ka.config.ListenAddr == "" { - return fmt.Errorf("listen_addr cannot be empty") + return errors.New("listen_addr cannot be empty") } if ka.config.ListenPort == 0 { - return fmt.Errorf("listen_port cannot be empty") + return errors.New("listen_port cannot be empty") } if ka.config.WebhookPath == "" { - return fmt.Errorf("webhook_path cannot be empty") + return errors.New("webhook_path cannot be empty") } if ka.config.WebhookPath[0] != '/' { @@ -119,7 +120,7 @@ func (ka *KubernetesAuditSource) Configure(config []byte, logger *log.Entry, Met } func (ka *KubernetesAuditSource) ConfigureByDSN(dsn string, labels map[string]string, logger *log.Entry, uuid string) error { - return fmt.Errorf("k8s-audit datasource does not support command-line acquisition") + return errors.New("k8s-audit datasource does not support command-line acquisition") } func (ka *KubernetesAuditSource) GetMode() string { @@ -131,7 +132,7 @@ func (ka *KubernetesAuditSource) GetName() string { } func (ka *KubernetesAuditSource) OneShotAcquisition(out chan types.Event, t *tomb.Tomb) error { - return fmt.Errorf("k8s-audit datasource does not support one-shot acquisition") + return errors.New("k8s-audit datasource does not support one-shot acquisition") } func (ka *KubernetesAuditSource) StreamingAcquisition(out chan types.Event, t *tomb.Tomb) error { diff --git a/pkg/acquisition/modules/syslog/internal/parser/rfc3164/parse.go b/pkg/acquisition/modules/syslog/internal/parser/rfc3164/parse.go index 3b59a806b8b..66d842ed519 100644 --- a/pkg/acquisition/modules/syslog/internal/parser/rfc3164/parse.go +++ b/pkg/acquisition/modules/syslog/internal/parser/rfc3164/parse.go @@ -1,7 +1,7 @@ package rfc3164 import ( - "fmt" + "errors" "time" "github.com/crowdsecurity/crowdsec/pkg/acquisition/modules/syslog/internal/parser/utils" @@ -52,7 +52,7 @@ func (r *RFC3164) parsePRI() error { pri := 0 if r.buf[r.position] != '<' { - return fmt.Errorf("PRI must start with '<'") + return errors.New("PRI must start with '<'") } r.position++ @@ -64,18 +64,18 @@ func (r *RFC3164) parsePRI() error { break } if c < '0' || c > '9' { - return fmt.Errorf("PRI must be a number") + return errors.New("PRI must be a number") } pri = pri*10 + int(c-'0') r.position++ } if pri > 999 { - return fmt.Errorf("PRI must be up to 3 characters long") + return errors.New("PRI must be up to 3 characters long") } if r.position == r.len && r.buf[r.position-1] != '>' { - return fmt.Errorf("PRI must end with '>'") + return errors.New("PRI must end with '>'") } r.PRI = pri @@ -98,7 +98,7 @@ func (r *RFC3164) parseTimestamp() error { } } if !validTs { - return fmt.Errorf("timestamp is not valid") + return errors.New("timestamp is not valid") } if r.useCurrentYear { if r.Timestamp.Year() == 0 { @@ -122,11 +122,11 @@ func (r *RFC3164) parseHostname() error { } if r.strictHostname { if !utils.IsValidHostnameOrIP(string(hostname)) { - return fmt.Errorf("hostname is not valid") + return errors.New("hostname is not valid") } } if len(hostname) == 0 { - return fmt.Errorf("hostname is empty") + return errors.New("hostname is empty") } r.Hostname = string(hostname) return nil @@ -147,7 +147,7 @@ func (r *RFC3164) parseTag() error { r.position++ } if len(tag) == 0 { - return fmt.Errorf("tag is empty") + return errors.New("tag is empty") } r.Tag = string(tag) @@ -167,7 +167,7 @@ func (r *RFC3164) parseTag() error { break } if c < '0' || c > '9' { - return fmt.Errorf("pid inside tag must be a number") + return errors.New("pid inside tag must be a number") } tmpPid = append(tmpPid, c) r.position++ @@ -175,7 +175,7 @@ func (r *RFC3164) parseTag() error { } if hasPid && !pidEnd { - return fmt.Errorf("pid inside tag must be closed with ']'") + return errors.New("pid inside tag must be closed with ']'") } if hasPid { @@ -191,7 +191,7 @@ func (r *RFC3164) parseMessage() error { } if r.position == r.len { - return fmt.Errorf("message is empty") + return errors.New("message is empty") } c := r.buf[r.position] @@ -202,7 +202,7 @@ func (r *RFC3164) parseMessage() error { for { if r.position >= r.len { - return fmt.Errorf("message is empty") + return errors.New("message is empty") } c := r.buf[r.position] if c != ' ' { @@ -219,7 +219,7 @@ func (r *RFC3164) parseMessage() error { func (r *RFC3164) Parse(message []byte) error { r.len = len(message) if r.len == 0 { - return fmt.Errorf("message is empty") + return errors.New("message is empty") } r.buf = message diff --git a/pkg/acquisition/modules/syslog/internal/parser/rfc5424/parse.go b/pkg/acquisition/modules/syslog/internal/parser/rfc5424/parse.go index 8b71a77e2e3..639e91e1224 100644 --- a/pkg/acquisition/modules/syslog/internal/parser/rfc5424/parse.go +++ b/pkg/acquisition/modules/syslog/internal/parser/rfc5424/parse.go @@ -1,7 +1,7 @@ package rfc5424 import ( - "fmt" + "errors" "time" "github.com/crowdsecurity/crowdsec/pkg/acquisition/modules/syslog/internal/parser/utils" @@ -52,7 +52,7 @@ func (r *RFC5424) parsePRI() error { pri := 0 if r.buf[r.position] != '<' { - return fmt.Errorf("PRI must start with '<'") + return errors.New("PRI must start with '<'") } r.position++ @@ -64,18 +64,18 @@ func (r *RFC5424) parsePRI() error { break } if c < '0' || c > '9' { - return fmt.Errorf("PRI must be a number") + return errors.New("PRI must be a number") } pri = pri*10 + int(c-'0') r.position++ } if pri > 999 { - return fmt.Errorf("PRI must be up to 3 characters long") + return errors.New("PRI must be up to 3 characters long") } if r.position == r.len && r.buf[r.position-1] != '>' { - return fmt.Errorf("PRI must end with '>'") + return errors.New("PRI must end with '>'") } r.PRI = pri @@ -84,11 +84,11 @@ func (r *RFC5424) parsePRI() error { func (r *RFC5424) parseVersion() error { if r.buf[r.position] != '1' { - return fmt.Errorf("version must be 1") + return errors.New("version must be 1") } r.position += 2 if r.position >= r.len { - return fmt.Errorf("version must be followed by a space") + return errors.New("version must be followed by a space") } return nil } @@ -113,17 +113,17 @@ func (r *RFC5424) parseTimestamp() error { } if len(timestamp) == 0 { - return fmt.Errorf("timestamp is empty") + return errors.New("timestamp is empty") } if r.position == r.len { - return fmt.Errorf("EOL after timestamp") + return errors.New("EOL after timestamp") } date, err := time.Parse(VALID_TIMESTAMP, string(timestamp)) if err != nil { - return fmt.Errorf("timestamp is not valid") + return errors.New("timestamp is not valid") } r.Timestamp = date @@ -131,7 +131,7 @@ func (r *RFC5424) parseTimestamp() error { r.position++ if r.position >= r.len { - return fmt.Errorf("EOL after timestamp") + return errors.New("EOL after timestamp") } return nil @@ -156,11 +156,11 @@ func (r *RFC5424) parseHostname() error { } if r.strictHostname { if !utils.IsValidHostnameOrIP(string(hostname)) { - return fmt.Errorf("hostname is not valid") + return errors.New("hostname is not valid") } } if len(hostname) == 0 { - return fmt.Errorf("hostname is empty") + return errors.New("hostname is empty") } r.Hostname = string(hostname) return nil @@ -185,11 +185,11 @@ func (r *RFC5424) parseAppName() error { } if len(appname) == 0 { - return fmt.Errorf("appname is empty") + return errors.New("appname is empty") } if len(appname) > 48 { - return fmt.Errorf("appname is too long") + return errors.New("appname is too long") } r.Tag = string(appname) @@ -215,11 +215,11 @@ func (r *RFC5424) parseProcID() error { } if len(procid) == 0 { - return fmt.Errorf("procid is empty") + return errors.New("procid is empty") } if len(procid) > 128 { - return fmt.Errorf("procid is too long") + return errors.New("procid is too long") } r.PID = string(procid) @@ -245,11 +245,11 @@ func (r *RFC5424) parseMsgID() error { } if len(msgid) == 0 { - return fmt.Errorf("msgid is empty") + return errors.New("msgid is empty") } if len(msgid) > 32 { - return fmt.Errorf("msgid is too long") + return errors.New("msgid is too long") } r.MsgID = string(msgid) @@ -263,7 +263,7 @@ func (r *RFC5424) parseStructuredData() error { return nil } if r.buf[r.position] != '[' { - return fmt.Errorf("structured data must start with '[' or be '-'") + return errors.New("structured data must start with '[' or be '-'") } prev := byte(0) for r.position < r.len { @@ -281,14 +281,14 @@ func (r *RFC5424) parseStructuredData() error { } r.position++ if !done { - return fmt.Errorf("structured data must end with ']'") + return errors.New("structured data must end with ']'") } return nil } func (r *RFC5424) parseMessage() error { if r.position == r.len { - return fmt.Errorf("message is empty") + return errors.New("message is empty") } message := []byte{} @@ -305,7 +305,7 @@ func (r *RFC5424) parseMessage() error { func (r *RFC5424) Parse(message []byte) error { r.len = len(message) if r.len == 0 { - return fmt.Errorf("syslog line is empty") + return errors.New("syslog line is empty") } r.buf = message @@ -315,7 +315,7 @@ func (r *RFC5424) Parse(message []byte) error { } if r.position >= r.len { - return fmt.Errorf("EOL after PRI") + return errors.New("EOL after PRI") } err = r.parseVersion() @@ -324,7 +324,7 @@ func (r *RFC5424) Parse(message []byte) error { } if r.position >= r.len { - return fmt.Errorf("EOL after Version") + return errors.New("EOL after Version") } err = r.parseTimestamp() @@ -333,7 +333,7 @@ func (r *RFC5424) Parse(message []byte) error { } if r.position >= r.len { - return fmt.Errorf("EOL after Timestamp") + return errors.New("EOL after Timestamp") } err = r.parseHostname() @@ -342,7 +342,7 @@ func (r *RFC5424) Parse(message []byte) error { } if r.position >= r.len { - return fmt.Errorf("EOL after hostname") + return errors.New("EOL after hostname") } err = r.parseAppName() @@ -351,7 +351,7 @@ func (r *RFC5424) Parse(message []byte) error { } if r.position >= r.len { - return fmt.Errorf("EOL after appname") + return errors.New("EOL after appname") } err = r.parseProcID() @@ -360,7 +360,7 @@ func (r *RFC5424) Parse(message []byte) error { } if r.position >= r.len { - return fmt.Errorf("EOL after ProcID") + return errors.New("EOL after ProcID") } err = r.parseMsgID() @@ -369,7 +369,7 @@ func (r *RFC5424) Parse(message []byte) error { } if r.position >= r.len { - return fmt.Errorf("EOL after MSGID") + return errors.New("EOL after MSGID") } err = r.parseStructuredData() @@ -378,7 +378,7 @@ func (r *RFC5424) Parse(message []byte) error { } if r.position >= r.len { - return fmt.Errorf("EOL after SD") + return errors.New("EOL after SD") } err = r.parseMessage() diff --git a/pkg/acquisition/modules/syslog/syslog.go b/pkg/acquisition/modules/syslog/syslog.go index 47940c3294c..06c32e62f77 100644 --- a/pkg/acquisition/modules/syslog/syslog.go +++ b/pkg/acquisition/modules/syslog/syslog.go @@ -1,6 +1,7 @@ package syslogacquisition import ( + "errors" "fmt" "net" "strings" @@ -79,11 +80,11 @@ func (s *SyslogSource) GetAggregMetrics() []prometheus.Collector { } func (s *SyslogSource) ConfigureByDSN(dsn string, labels map[string]string, logger *log.Entry, uuid string) error { - return fmt.Errorf("syslog datasource does not support one shot acquisition") + return errors.New("syslog datasource does not support one shot acquisition") } func (s *SyslogSource) OneShotAcquisition(out chan types.Event, t *tomb.Tomb) error { - return fmt.Errorf("syslog datasource does not support one shot acquisition") + return errors.New("syslog datasource does not support one shot acquisition") } func validatePort(port int) bool { diff --git a/pkg/appsec/appsec.go b/pkg/appsec/appsec.go index 7c61f2a8dfd..96f977b4738 100644 --- a/pkg/appsec/appsec.go +++ b/pkg/appsec/appsec.go @@ -1,6 +1,7 @@ package appsec import ( + "errors" "fmt" "net/http" "os" @@ -164,7 +165,7 @@ func (wc *AppsecConfig) LoadByPath(file string) error { } if wc.Name == "" { - return fmt.Errorf("name cannot be empty") + return errors.New("name cannot be empty") } if wc.LogLevel == nil { lvl := wc.Logger.Logger.GetLevel() diff --git a/pkg/appsec/appsec_rule/appsec_rule.go b/pkg/appsec/appsec_rule/appsec_rule.go index 289405ef161..136d8b11cb7 100644 --- a/pkg/appsec/appsec_rule/appsec_rule.go +++ b/pkg/appsec/appsec_rule/appsec_rule.go @@ -1,6 +1,7 @@ package appsec_rule import ( + "errors" "fmt" ) @@ -48,15 +49,15 @@ type CustomRule struct { func (v *CustomRule) Convert(ruleType string, appsecRuleName string) (string, []uint32, error) { if v.Zones == nil && v.And == nil && v.Or == nil { - return "", nil, fmt.Errorf("no zones defined") + return "", nil, errors.New("no zones defined") } if v.Match.Type == "" && v.And == nil && v.Or == nil { - return "", nil, fmt.Errorf("no match type defined") + return "", nil, errors.New("no match type defined") } if v.Match.Value == "" && v.And == nil && v.Or == nil { - return "", nil, fmt.Errorf("no match value defined") + return "", nil, errors.New("no match value defined") } switch ruleType { diff --git a/pkg/csplugin/notifier.go b/pkg/csplugin/notifier.go index a4f5bbc0ed8..2b5d57fbcff 100644 --- a/pkg/csplugin/notifier.go +++ b/pkg/csplugin/notifier.go @@ -2,7 +2,7 @@ package csplugin import ( "context" - "fmt" + "errors" plugin "github.com/hashicorp/go-plugin" "google.golang.org/grpc" @@ -35,7 +35,7 @@ func (m *GRPCClient) Notify(ctx context.Context, notification *protobufs.Notific return &protobufs.Empty{}, err case <-ctx.Done(): - return &protobufs.Empty{}, fmt.Errorf("timeout exceeded") + return &protobufs.Empty{}, errors.New("timeout exceeded") } } diff --git a/pkg/csplugin/utils.go b/pkg/csplugin/utils.go index 216a079d457..2e7f0c80528 100644 --- a/pkg/csplugin/utils.go +++ b/pkg/csplugin/utils.go @@ -51,7 +51,7 @@ func getUID(username string) (uint32, error) { return 0, err } if uid < 0 || uid > math.MaxInt32 { - return 0, fmt.Errorf("out of bound uid") + return 0, errors.New("out of bound uid") } return uint32(uid), nil } @@ -66,7 +66,7 @@ func getGID(groupname string) (uint32, error) { return 0, err } if gid < 0 || gid > math.MaxInt32 { - return 0, fmt.Errorf("out of bound gid") + return 0, errors.New("out of bound gid") } return uint32(gid), nil } diff --git a/pkg/cwhub/iteminstall.go b/pkg/cwhub/iteminstall.go index 6a16ad0a65f..7fe8ab59228 100644 --- a/pkg/cwhub/iteminstall.go +++ b/pkg/cwhub/iteminstall.go @@ -55,7 +55,6 @@ func (i *Item) Install(ctx context.Context, force bool, downloadOnly bool) error } if downloadOnly && downloaded { - i.hub.logger.Infof("Downloaded %s", i.Name) return nil } diff --git a/pkg/cwhub/itemupgrade.go b/pkg/cwhub/itemupgrade.go index 1aebb3caf29..441948c9168 100644 --- a/pkg/cwhub/itemupgrade.go +++ b/pkg/cwhub/itemupgrade.go @@ -171,7 +171,7 @@ func (i *Item) FetchContentTo(ctx context.Context, destPath string) (bool, strin downloaded, err := d.Download(ctx, url) if err != nil { - return false, "", fmt.Errorf("while downloading %s to %s: %w", i.Name, url, err) + return false, "", err } return downloaded, url, nil diff --git a/pkg/exprhelpers/helpers.go b/pkg/exprhelpers/helpers.go index b4324f8bbab..17ce468f623 100644 --- a/pkg/exprhelpers/helpers.go +++ b/pkg/exprhelpers/helpers.go @@ -3,6 +3,7 @@ package exprhelpers import ( "bufio" "encoding/base64" + "errors" "fmt" "math" "net" @@ -772,7 +773,7 @@ func ParseKV(params ...any) (any, error) { matches := keyValuePattern.FindAllStringSubmatch(blob, -1) if matches == nil { log.Errorf("could not find any key/value pair in line") - return nil, fmt.Errorf("invalid input format") + return nil, errors.New("invalid input format") } if _, ok := target[prefix]; !ok { target[prefix] = make(map[string]string) @@ -780,7 +781,7 @@ func ParseKV(params ...any) (any, error) { _, ok := target[prefix].(map[string]string) if !ok { log.Errorf("ParseKV: target is not a map[string]string") - return nil, fmt.Errorf("target is not a map[string]string") + return nil, errors.New("target is not a map[string]string") } } for _, match := range matches { diff --git a/test/bin/preload-hub-items b/test/bin/preload-hub-items index 319544d843d..79e20efbea2 100755 --- a/test/bin/preload-hub-items +++ b/test/bin/preload-hub-items @@ -11,6 +11,8 @@ THIS_DIR=$(CDPATH= cd -- "$(dirname -- "$0")" && pwd) echo "Pre-downloading Hub content..." +start=$(date +%s%N) + types=$("$CSCLI" hub types -o raw) for itemtype in $types; do @@ -19,9 +21,12 @@ for itemtype in $types; do #shellcheck disable=SC2086 "$CSCLI" "$itemtype" install \ $ALL_ITEMS \ - --download-only \ - --error + --download-only fi done -echo " done." \ No newline at end of file +elapsed=$((($(date +%s%N) - start)/1000000)) +# bash only does integer arithmetic, we could use bc or have some fun with sed +elapsed=$(echo "$elapsed" | sed -e 's/...$/.&/;t' -e 's/.$/.0&/') + +echo " done in $elapsed secs." diff --git a/test/lib/config/config-global b/test/lib/config/config-global index f77fb3e27bc..9b2b71c1dd1 100755 --- a/test/lib/config/config-global +++ b/test/lib/config/config-global @@ -70,7 +70,9 @@ make_init_data() { ./instance-db config-yaml ./instance-db setup - ./bin/preload-hub-items + # preload some content and data files + "$CSCLI" collections install crowdsecurity/linux --download-only + # sub-items did not respect --download-only ./bin/remove-all-hub-items # when installed packages are always using sqlite, so no need to regenerate diff --git a/test/lib/config/config-local b/test/lib/config/config-local index 76bd4c3fbce..f80c26bc652 100755 --- a/test/lib/config/config-local +++ b/test/lib/config/config-local @@ -116,7 +116,10 @@ make_init_data() { "$CSCLI" --warning hub update - ./bin/preload-hub-items + # preload some content and data files + "$CSCLI" collections install crowdsecurity/linux --download-only + # sub-items did not respect --download-only + ./bin/remove-all-hub-items # force TCP, the default would be unix socket "$CSCLI" --warning machines add githubciXXXXXXXXXXXXXXXXXXXXXXXX --url http://127.0.0.1:8080 --auto --force From 6bd4096a3eea0df42e3d82f69a77d06ceef705e1 Mon Sep 17 00:00:00 2001 From: "Thibault \"bui\" Koechlin" Date: Mon, 5 Aug 2024 12:04:07 +0200 Subject: [PATCH 242/581] fix #1742 : Allow QueryCAPIStatus to return as well enrollment status (#3159) * fix #1742 : Allow QueryCAPIStatus to return as well enrollment status --------- Co-authored-by: marco --- cmd/crowdsec-cli/capi.go | 72 +++++++++++++++++++++++++++---------- cmd/crowdsec-cli/support.go | 10 ++++-- 2 files changed, 62 insertions(+), 20 deletions(-) diff --git a/cmd/crowdsec-cli/capi.go b/cmd/crowdsec-cli/capi.go index 6933d355071..1888aa3545a 100644 --- a/cmd/crowdsec-cli/capi.go +++ b/cmd/crowdsec-cli/capi.go @@ -148,28 +148,53 @@ func (cli *cliCapi) newRegisterCmd() *cobra.Command { return cmd } -// QueryCAPIStatus checks if the Local API is reachable, and if the credentials are correct -func QueryCAPIStatus(hub *cwhub.Hub, credURL string, login string, password string) error { +// QueryCAPIStatus checks if the Local API is reachable, and if the credentials are correct. It then checks if the instance is enrolle in the console. +func QueryCAPIStatus(hub *cwhub.Hub, credURL string, login string, password string) (bool, bool, error) { + apiURL, err := url.Parse(credURL) if err != nil { - return fmt.Errorf("parsing api url: %w", err) + return false, false, fmt.Errorf("parsing api url: %w", err) } scenarios, err := hub.GetInstalledNamesByType(cwhub.SCENARIOS) if err != nil { - return fmt.Errorf("failed to get scenarios: %w", err) + return false, false, fmt.Errorf("failed to get scenarios: %w", err) } if len(scenarios) == 0 { - return errors.New("no scenarios installed, abort") + return false, false, errors.New("no scenarios installed, abort") } - client, err := apiclient.NewDefaultClient(apiURL, - CAPIURLPrefix, - cwversion.UserAgent(), - nil) + passwd := strfmt.Password(password) + + client, err := apiclient.NewClient(&apiclient.Config{ + MachineID: login, + Password: passwd, + Scenarios: scenarios, + UserAgent: cwversion.UserAgent(), + URL: apiURL, + //I don't believe papi is neede to check enrollement + //PapiURL: papiURL, + VersionPrefix: "v3", + UpdateScenario: func() ([]string, error) { + l_scenarios, err := hub.GetInstalledNamesByType(cwhub.SCENARIOS) + if err != nil { + return nil, err + } + appsecRules, err := hub.GetInstalledNamesByType(cwhub.APPSEC_RULES) + if err != nil { + return nil, err + } + ret := make([]string, 0, len(l_scenarios)+len(appsecRules)) + ret = append(ret, l_scenarios...) + ret = append(ret, appsecRules...) + + return ret, nil + }, + }) + if err != nil { - return fmt.Errorf("init default client: %w", err) + return false, false, fmt.Errorf("new client api: %w", err) } pw := strfmt.Password(password) @@ -180,12 +205,18 @@ func QueryCAPIStatus(hub *cwhub.Hub, credURL string, login string, password stri Scenarios: scenarios, } - _, _, err = client.Auth.AuthenticateWatcher(context.Background(), t) + authResp, _, err := client.Auth.AuthenticateWatcher(context.Background(), t) if err != nil { - return err + return false, false, err } - return nil + client.GetClient().Transport.(*apiclient.JWTTransport).Token = authResp.Token + + if client.IsEnrolled() { + return true, true, nil + } + return true, false, nil + } func (cli *cliCapi) status() error { @@ -205,12 +236,17 @@ func (cli *cliCapi) status() error { log.Infof("Loaded credentials from %s", cfg.API.Server.OnlineClient.CredentialsFilePath) log.Infof("Trying to authenticate with username %s on %s", cred.Login, cred.URL) - if err := QueryCAPIStatus(hub, cred.URL, cred.Login, cred.Password); err != nil { - return fmt.Errorf("failed to authenticate to Central API (CAPI): %w", err) - } - - log.Info("You can successfully interact with Central API (CAPI)") + auth, enrolled, err := QueryCAPIStatus(hub, cred.URL, cred.Login, cred.Password) + if err != nil { + return fmt.Errorf("CAPI: failed to authenticate to Central API (CAPI): %s", err) + } + if auth { + log.Info("You can successfully interact with Central API (CAPI)") + } + if enrolled { + log.Info("Your instance is enrolled in the console") + } return nil } diff --git a/cmd/crowdsec-cli/support.go b/cmd/crowdsec-cli/support.go index 324be2710fd..ef14f90df17 100644 --- a/cmd/crowdsec-cli/support.go +++ b/cmd/crowdsec-cli/support.go @@ -260,11 +260,17 @@ func (cli *cliSupport) dumpCAPIStatus(zw *zip.Writer, hub *cwhub.Hub) error { fmt.Fprintf(out, "CAPI URL: %s\n", cred.URL) fmt.Fprintf(out, "CAPI username: %s\n", cred.Login) - if err := QueryCAPIStatus(hub, cred.URL, cred.Login, cred.Password); err != nil { + auth, enrolled, err := QueryCAPIStatus(hub, cred.URL, cred.Login, cred.Password) + if err != nil { return fmt.Errorf("could not authenticate to Central API (CAPI): %w", err) } + if auth { + fmt.Fprintln(out, "You can successfully interact with Central API (CAPI)") + } - fmt.Fprintln(out, "You can successfully interact with Central API (CAPI)") + if enrolled { + fmt.Fprintln(out, "Your instance is enrolled in the console") + } cli.writeToZip(zw, SUPPORT_CAPI_STATUS_PATH, time.Now(), out) From 1bc3b0870b8b9ca20c5027750fef1a6599a28bc7 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Wed, 7 Aug 2024 12:45:54 +0200 Subject: [PATCH 243/581] command "cscli doc --target /path/to/dir" (#3169) * command "cscli doc --target /path/to/dir" * typos and improved messages * CI: remove obsolete parameters for golangi-lint action * lint --- .github/workflows/go-tests-windows.yml | 4 ---- .github/workflows/go-tests.yml | 4 ---- .golangci.yml | 6 +----- cmd/crowdsec-cli/dashboard.go | 11 +++++----- cmd/crowdsec-cli/doc.go | 20 ++++++++++++++----- .../wineventlog/wineventlog_windows.go | 4 ++-- pkg/csplugin/utils_windows.go | 5 +++-- pkg/cwhub/iteminstall.go | 4 ++-- test/bats/01_cscli.bats | 14 +++++++++---- test/bats/20_hub_collections.bats | 2 +- test/bats/20_hub_parsers.bats | 2 +- test/bats/20_hub_postoverflows.bats | 2 +- test/bats/20_hub_scenarios.bats | 2 +- 13 files changed, 43 insertions(+), 37 deletions(-) diff --git a/.github/workflows/go-tests-windows.yml b/.github/workflows/go-tests-windows.yml index 9d8051821c8..b99291f06b7 100644 --- a/.github/workflows/go-tests-windows.yml +++ b/.github/workflows/go-tests-windows.yml @@ -60,7 +60,3 @@ jobs: version: v1.59 args: --issues-exit-code=1 --timeout 10m only-new-issues: false - # the cache is already managed above, enabling it here - # gives errors when extracting - skip-pkg-cache: true - skip-build-cache: true diff --git a/.github/workflows/go-tests.yml b/.github/workflows/go-tests.yml index ab45bd88717..a7c34adfe50 100644 --- a/.github/workflows/go-tests.yml +++ b/.github/workflows/go-tests.yml @@ -161,7 +161,3 @@ jobs: version: v1.59 args: --issues-exit-code=1 --timeout 10m only-new-issues: false - # the cache is already managed above, enabling it here - # gives errors when extracting - skip-pkg-cache: true - skip-build-cache: true diff --git a/.golangci.yml b/.golangci.yml index 2ac39793731..fb1dab623c1 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -271,7 +271,7 @@ linters: # - dogsled # Checks assignments with too many blank identifiers (e.g. x, _, _, _, := f()) - - errchkjson # Checks types passed to the json encoding functions. Reports unsupported types and reports occations, where the check for the returned error can be omitted. + - errchkjson # Checks types passed to the json encoding functions. Reports unsupported types and reports occasions, where the check for the returned error can be omitted. - exhaustive # check exhaustiveness of enum switch statements - gci # Gci control golang package import order and make it always deterministic. - godot # Check if comments end in a period @@ -387,10 +387,6 @@ issues: - perfsprint text: "fmt.Sprintf can be replaced .*" - - linters: - - perfsprint - text: "fmt.Errorf can be replaced with errors.New" - # # Will fix, easy but some neurons required # diff --git a/cmd/crowdsec-cli/dashboard.go b/cmd/crowdsec-cli/dashboard.go index beff06d478a..96a2fa38cb7 100644 --- a/cmd/crowdsec-cli/dashboard.go +++ b/cmd/crowdsec-cli/dashboard.go @@ -3,6 +3,7 @@ package main import ( + "errors" "fmt" "math" "os" @@ -277,7 +278,7 @@ cscli dashboard remove --force return fmt.Errorf("unable to ask to force: %s", err) } if !answer { - return fmt.Errorf("user stated no to continue") + return errors.New("user stated no to continue") } } if metabase.IsContainerExist(metabaseContainerID) { @@ -289,7 +290,7 @@ cscli dashboard remove --force if err == nil { // if group exist, remove it groupDelCmd, err := exec.LookPath("groupdel") if err != nil { - return fmt.Errorf("unable to find 'groupdel' command, can't continue") + return errors.New("unable to find 'groupdel' command, can't continue") } groupDel := &exec.Cmd{Path: groupDelCmd, Args: []string{groupDelCmd, crowdsecGroup}} @@ -366,7 +367,7 @@ func checkSystemMemory(forceYes *bool) error { } if !answer { - return fmt.Errorf("user stated no to continue") + return errors.New("user stated no to continue") } return nil @@ -399,7 +400,7 @@ func disclaimer(forceYes *bool) error { } if !answer { - return fmt.Errorf("user stated no to responsibilities") + return errors.New("user stated no to responsibilities") } return nil @@ -435,7 +436,7 @@ func checkGroups(forceYes *bool) (*user.Group, error) { groupAddCmd, err := exec.LookPath("groupadd") if err != nil { - return dockerGroup, fmt.Errorf("unable to find 'groupadd' command, can't continue") + return dockerGroup, errors.New("unable to find 'groupadd' command, can't continue") } groupAdd := &exec.Cmd{Path: groupAddCmd, Args: []string{groupAddCmd, crowdsecGroup}} diff --git a/cmd/crowdsec-cli/doc.go b/cmd/crowdsec-cli/doc.go index db1e642115e..f68d535db03 100644 --- a/cmd/crowdsec-cli/doc.go +++ b/cmd/crowdsec-cli/doc.go @@ -16,20 +16,30 @@ func NewCLIDoc() *cliDoc { } func (cli cliDoc) NewCommand(rootCmd *cobra.Command) *cobra.Command { + var target string + + const defaultTarget = "./doc" + cmd := &cobra.Command{ Use: "doc", - Short: "Generate the documentation in `./doc/`. Directory must exist.", - Args: cobra.ExactArgs(0), + Short: "Generate the documentation related to cscli commands. Target directory must exist.", + Args: cobra.NoArgs, Hidden: true, DisableAutoGenTag: true, - RunE: func(_ *cobra.Command, _ []string) error { - if err := doc.GenMarkdownTreeCustom(rootCmd, "./doc/", cli.filePrepender, cli.linkHandler); err != nil { - return fmt.Errorf("failed to generate cobra doc: %w", err) + RunE: func(_ *cobra.Command, args []string) error { + if err := doc.GenMarkdownTreeCustom(rootCmd, target, cli.filePrepender, cli.linkHandler); err != nil { + return fmt.Errorf("failed to generate cscli documentation: %w", err) } + + fmt.Println("Documentation generated in", target) + return nil }, } + flags := cmd.Flags() + flags.StringVar(&target, "target", defaultTarget, "The target directory where the documentation will be generated") + return cmd } diff --git a/pkg/acquisition/modules/wineventlog/wineventlog_windows.go b/pkg/acquisition/modules/wineventlog/wineventlog_windows.go index 8adbf1e53c5..c6b10b7c38c 100644 --- a/pkg/acquisition/modules/wineventlog/wineventlog_windows.go +++ b/pkg/acquisition/modules/wineventlog/wineventlog_windows.go @@ -246,11 +246,11 @@ func (w *WinEventLogSource) UnmarshalConfig(yamlConfig []byte) error { } if w.config.EventChannel != "" && w.config.XPathQuery != "" { - return fmt.Errorf("event_channel and xpath_query are mutually exclusive") + return errors.New("event_channel and xpath_query are mutually exclusive") } if w.config.EventChannel == "" && w.config.XPathQuery == "" { - return fmt.Errorf("event_channel or xpath_query must be set") + return errors.New("event_channel or xpath_query must be set") } w.config.Mode = configuration.TAIL_MODE diff --git a/pkg/csplugin/utils_windows.go b/pkg/csplugin/utils_windows.go index 2870a2addb5..8d4956ceeeb 100644 --- a/pkg/csplugin/utils_windows.go +++ b/pkg/csplugin/utils_windows.go @@ -3,6 +3,7 @@ package csplugin import ( + "errors" "fmt" "os" "os/exec" @@ -77,14 +78,14 @@ func CheckPerms(path string) error { return fmt.Errorf("while getting owner security info: %w", err) } if !sd.IsValid() { - return fmt.Errorf("security descriptor is invalid") + return errors.New("security descriptor is invalid") } owner, _, err := sd.Owner() if err != nil { return fmt.Errorf("while getting owner: %w", err) } if !owner.IsValid() { - return fmt.Errorf("owner is invalid") + return errors.New("owner is invalid") } if !owner.Equals(systemSid) && !owner.Equals(currentUserSid) && !owner.Equals(adminSid) { diff --git a/pkg/cwhub/iteminstall.go b/pkg/cwhub/iteminstall.go index 7fe8ab59228..912897d0d7e 100644 --- a/pkg/cwhub/iteminstall.go +++ b/pkg/cwhub/iteminstall.go @@ -9,11 +9,11 @@ import ( func (i *Item) enable() error { if i.State.Installed { if i.State.Tainted { - return fmt.Errorf("%s is tainted, won't enable unless --force", i.Name) + return fmt.Errorf("%s is tainted, won't overwrite unless --force", i.Name) } if i.State.IsLocal() { - return fmt.Errorf("%s is local, won't enable", i.Name) + return fmt.Errorf("%s is local, won't overwrite", i.Name) } // if it's a collection, check sub-items even if the collection file itself is up-to-date diff --git a/test/bats/01_cscli.bats b/test/bats/01_cscli.bats index 792274cc4f4..27cfe53212b 100644 --- a/test/bats/01_cscli.bats +++ b/test/bats/01_cscli.bats @@ -366,16 +366,14 @@ teardown() { } @test "cscli doc" { - # generating documentation requires a directory named "doc" - cd "$BATS_TEST_TMPDIR" rune -1 cscli doc refute_output - assert_stderr --regexp 'failed to generate cobra doc: open doc/.*: no such file or directory' + assert_stderr --regexp 'failed to generate cscli documentation: open doc/.*: no such file or directory' mkdir -p doc rune -0 cscli doc - refute_output + assert_output "Documentation generated in ./doc" refute_stderr assert_file_exists "doc/cscli.md" assert_file_not_exist "doc/cscli_setup.md" @@ -385,6 +383,14 @@ teardown() { export CROWDSEC_FEATURE_CSCLI_SETUP="true" rune -0 cscli doc assert_file_exists "doc/cscli_setup.md" + + # specify a target directory + mkdir -p "$BATS_TEST_TMPDIR/doc2" + rune -0 cscli doc --target "$BATS_TEST_TMPDIR/doc2" + assert_output "Documentation generated in $BATS_TEST_TMPDIR/doc2" + refute_stderr + assert_file_exists "$BATS_TEST_TMPDIR/doc2/cscli_setup.md" + } @test "feature.yaml for subcommands" { diff --git a/test/bats/20_hub_collections.bats b/test/bats/20_hub_collections.bats index 1381fe8e55a..6822339ae40 100644 --- a/test/bats/20_hub_collections.bats +++ b/test/bats/20_hub_collections.bats @@ -177,7 +177,7 @@ teardown() { echo "dirty" >"$CONFIG_DIR/collections/sshd.yaml" rune -1 cscli collections install crowdsecurity/sshd - assert_stderr --partial "error while installing 'crowdsecurity/sshd': while enabling crowdsecurity/sshd: crowdsecurity/sshd is tainted, won't enable unless --force" + assert_stderr --partial "error while installing 'crowdsecurity/sshd': while enabling crowdsecurity/sshd: crowdsecurity/sshd is tainted, won't overwrite unless --force" rune -0 cscli collections install crowdsecurity/sshd --force assert_stderr --partial "Enabled crowdsecurity/sshd" diff --git a/test/bats/20_hub_parsers.bats b/test/bats/20_hub_parsers.bats index 214463f9cfc..791b1a2177f 100644 --- a/test/bats/20_hub_parsers.bats +++ b/test/bats/20_hub_parsers.bats @@ -177,7 +177,7 @@ teardown() { echo "dirty" >"$CONFIG_DIR/parsers/s02-enrich/whitelists.yaml" rune -1 cscli parsers install crowdsecurity/whitelists - assert_stderr --partial "error while installing 'crowdsecurity/whitelists': while enabling crowdsecurity/whitelists: crowdsecurity/whitelists is tainted, won't enable unless --force" + assert_stderr --partial "error while installing 'crowdsecurity/whitelists': while enabling crowdsecurity/whitelists: crowdsecurity/whitelists is tainted, won't overwrite unless --force" rune -0 cscli parsers install crowdsecurity/whitelists --force assert_stderr --partial "Enabled crowdsecurity/whitelists" diff --git a/test/bats/20_hub_postoverflows.bats b/test/bats/20_hub_postoverflows.bats index 5123966a44e..37337b08caa 100644 --- a/test/bats/20_hub_postoverflows.bats +++ b/test/bats/20_hub_postoverflows.bats @@ -177,7 +177,7 @@ teardown() { echo "dirty" >"$CONFIG_DIR/postoverflows/s00-enrich/rdns.yaml" rune -1 cscli postoverflows install crowdsecurity/rdns - assert_stderr --partial "error while installing 'crowdsecurity/rdns': while enabling crowdsecurity/rdns: crowdsecurity/rdns is tainted, won't enable unless --force" + assert_stderr --partial "error while installing 'crowdsecurity/rdns': while enabling crowdsecurity/rdns: crowdsecurity/rdns is tainted, won't overwrite unless --force" rune -0 cscli postoverflows install crowdsecurity/rdns --force assert_stderr --partial "Enabled crowdsecurity/rdns" diff --git a/test/bats/20_hub_scenarios.bats b/test/bats/20_hub_scenarios.bats index 4e4b28e7703..3ab3d944c93 100644 --- a/test/bats/20_hub_scenarios.bats +++ b/test/bats/20_hub_scenarios.bats @@ -178,7 +178,7 @@ teardown() { echo "dirty" >"$CONFIG_DIR/scenarios/ssh-bf.yaml" rune -1 cscli scenarios install crowdsecurity/ssh-bf - assert_stderr --partial "error while installing 'crowdsecurity/ssh-bf': while enabling crowdsecurity/ssh-bf: crowdsecurity/ssh-bf is tainted, won't enable unless --force" + assert_stderr --partial "error while installing 'crowdsecurity/ssh-bf': while enabling crowdsecurity/ssh-bf: crowdsecurity/ssh-bf is tainted, won't overwrite unless --force" rune -0 cscli scenarios install crowdsecurity/ssh-bf --force assert_stderr --partial "Enabled crowdsecurity/ssh-bf" From 3532e872d350d224e1fd247cfbfe6116620604d5 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Thu, 8 Aug 2024 16:46:39 +0200 Subject: [PATCH 244/581] metrics: avoid nil deref with inactive bouncers or malformed response (#3170) * metrics: avoid nil deref with inactive bouncers * log message from API even it if cannot be parsed * fix unit test --- pkg/apiclient/client_test.go | 2 +- pkg/apiclient/resperr.go | 19 +++++++++++++----- pkg/apiserver/apic_metrics.go | 31 +++++++++++++++++++++--------- pkg/apiserver/apic_metrics_test.go | 3 +++ pkg/apiserver/apic_test.go | 14 +++++++++++++- pkg/apiserver/apiserver_test.go | 19 +++++++++--------- 6 files changed, 63 insertions(+), 25 deletions(-) diff --git a/pkg/apiclient/client_test.go b/pkg/apiclient/client_test.go index dd09811924f..bd83e512afc 100644 --- a/pkg/apiclient/client_test.go +++ b/pkg/apiclient/client_test.go @@ -348,5 +348,5 @@ func TestNewClientBadAnswer(t *testing.T) { URL: apiURL, VersionPrefix: "v1", }, &http.Client{}) - cstest.RequireErrorContains(t, err, "invalid body: bad") + cstest.RequireErrorContains(t, err, "API error: http code 401, response: bad") } diff --git a/pkg/apiclient/resperr.go b/pkg/apiclient/resperr.go index e8f12ee9f4e..00689147332 100644 --- a/pkg/apiclient/resperr.go +++ b/pkg/apiclient/resperr.go @@ -16,12 +16,18 @@ type ErrorResponse struct { } func (e *ErrorResponse) Error() string { - err := fmt.Sprintf("API error: %s", *e.Message) + message := ptr.OrEmpty(e.Message) + errors := "" + if len(e.Errors) > 0 { - err += fmt.Sprintf(" (%s)", e.Errors) + errors = fmt.Sprintf(" (%s)", e.Errors) + } + + if message == "" && errors == "" { + errors = "(no errors)" } - return err + return fmt.Sprintf("API error: %s%s", message, errors) } // CheckResponse verifies the API response and builds an appropriate Go error if necessary. @@ -42,8 +48,11 @@ func CheckResponse(r *http.Response) error { case http.StatusUnprocessableEntity: ret.Message = ptr.Of(fmt.Sprintf("http code %d, invalid request: %s", r.StatusCode, string(data))) default: - if err := json.Unmarshal(data, ret); err != nil { - ret.Message = ptr.Of(fmt.Sprintf("http code %d, invalid body: %s", r.StatusCode, string(data))) + // try to unmarshal and if there are no 'message' or 'errors' fields, display the body as is, + // the API is following a different convention + err := json.Unmarshal(data, ret) + if err != nil || (ret.Message == nil && len(ret.Errors) == 0) { + ret.Message = ptr.Of(fmt.Sprintf("http code %d, response: %s", r.StatusCode, string(data))) return ret } } diff --git a/pkg/apiserver/apic_metrics.go b/pkg/apiserver/apic_metrics.go index 54640afc2d0..176984f1ad6 100644 --- a/pkg/apiserver/apic_metrics.go +++ b/pkg/apiserver/apic_metrics.go @@ -54,7 +54,11 @@ func (a *apic) GetUsageMetrics() (*models.AllMetrics, []int, error) { rcMetrics.FeatureFlags = strings.Split(bouncer.Featureflags, ",") rcMetrics.Version = ptr.Of(bouncer.Version) rcMetrics.Name = bouncer.Name - rcMetrics.LastPull = bouncer.LastPull.UTC().Unix() + + rcMetrics.LastPull = 0 + if bouncer.LastPull != nil { + rcMetrics.LastPull = bouncer.LastPull.UTC().Unix() + } rcMetrics.Metrics = make([]*models.DetailedMetrics, 0) @@ -92,14 +96,19 @@ func (a *apic) GetUsageMetrics() (*models.AllMetrics, []int, error) { lpMetrics.FeatureFlags = strings.Split(lp.Featureflags, ",") lpMetrics.Version = ptr.Of(lp.Version) lpMetrics.Name = lp.MachineId - lpMetrics.LastPush = lp.LastPush.UTC().Unix() - lpMetrics.LastUpdate = lp.UpdatedAt.UTC().Unix() + lpMetrics.LastPush = 0 + if lp.LastPush != nil { + lpMetrics.LastPush = lp.LastPush.UTC().Unix() + } + + lpMetrics.LastUpdate = lp.UpdatedAt.UTC().Unix() lpMetrics.Datasources = lp.Datasources + hubItems := models.HubItems{} + if lp.Hubstate != nil { // must carry over the hub state even if nothing is installed - hubItems := models.HubItems{} for itemType, items := range lp.Hubstate { hubItems[itemType] = []models.HubItem{} for _, item := range items { @@ -109,13 +118,11 @@ func (a *apic) GetUsageMetrics() (*models.AllMetrics, []int, error) { Version: item.Version, }) } - - lpMetrics.HubItems = hubItems } - } else { - lpMetrics.HubItems = models.HubItems{} } + lpMetrics.HubItems = hubItems + lpMetrics.Metrics = make([]*models.DetailedMetrics, 0) for _, dbMetric := range dbMetrics { @@ -203,11 +210,16 @@ func (a *apic) GetMetrics() (*models.Metrics, error) { bouncersInfo := make([]*models.MetricsBouncerInfo, len(bouncers)) for i, bouncer := range bouncers { + lastPull := "" + if bouncer.LastPull != nil { + lastPull = bouncer.LastPull.Format(time.RFC3339) + } + bouncersInfo[i] = &models.MetricsBouncerInfo{ Version: bouncer.Version, CustomName: bouncer.Name, Name: bouncer.Type, - LastPull: bouncer.LastPull.Format(time.RFC3339), + LastPull: lastPull, } } @@ -330,6 +342,7 @@ func (a *apic) SendUsageMetrics() { firstRun := true + log.Debugf("Start sending usage metrics to CrowdSec Central API (interval: %s once, then %s)", a.usageMetricsIntervalFirst, a.usageMetricsInterval) ticker := time.NewTicker(a.usageMetricsIntervalFirst) for { diff --git a/pkg/apiserver/apic_metrics_test.go b/pkg/apiserver/apic_metrics_test.go index 15ad63b391b..d1e48ac90a3 100644 --- a/pkg/apiserver/apic_metrics_test.go +++ b/pkg/apiserver/apic_metrics_test.go @@ -84,8 +84,11 @@ func TestAPICSendMetrics(t *testing.T) { tc.setUp(api) stop := make(chan bool) + httpmock.ZeroCallCounters() + go api.SendMetrics(stop) + time.Sleep(tc.duration) stop <- true diff --git a/pkg/apiserver/apic_test.go b/pkg/apiserver/apic_test.go index a3aa956ed98..546a236251f 100644 --- a/pkg/apiserver/apic_test.go +++ b/pkg/apiserver/apic_test.go @@ -1113,12 +1113,16 @@ func TestAPICPush(t *testing.T) { require.NoError(t, err) api.apiClient = apic + httpmock.RegisterResponder("POST", "http://api.crowdsec.net/api/signals", httpmock.NewBytesResponder(200, []byte{})) + go func() { api.AlertsAddChan <- tc.alerts + time.Sleep(time.Second) api.Shutdown() }() + err = api.Push() require.NoError(t, err) assert.Equal(t, tc.expectedCalls, httpmock.GetTotalCallCount()) @@ -1161,7 +1165,9 @@ func TestAPICPull(t *testing.T) { url, err := url.ParseRequestURI("http://api.crowdsec.net/") require.NoError(t, err) httpmock.Activate() + defer httpmock.DeactivateAndReset() + apic, err := apiclient.NewDefaultClient( url, "/api", @@ -1169,7 +1175,9 @@ func TestAPICPull(t *testing.T) { nil, ) require.NoError(t, err) + api.apiClient = apic + httpmock.RegisterNoResponder(httpmock.NewBytesResponder(200, jsonMarshalX( modelscapi.GetDecisionsStreamResponse{ New: modelscapi.GetDecisionsStreamResponseNew{ @@ -1187,14 +1195,18 @@ func TestAPICPull(t *testing.T) { }, ))) tc.setUp() + var buf bytes.Buffer + go func() { logrus.SetOutput(&buf) + if err := api.Pull(); err != nil { panic(err) } }() - //Slightly long because the CI runner for windows are slow, and this can lead to random failure + + // Slightly long because the CI runner for windows are slow, and this can lead to random failure time.Sleep(time.Millisecond * 500) logrus.SetOutput(os.Stderr) assert.Contains(t, buf.String(), tc.logContains) diff --git a/pkg/apiserver/apiserver_test.go b/pkg/apiserver/apiserver_test.go index 26531a592da..b3f619f39c1 100644 --- a/pkg/apiserver/apiserver_test.go +++ b/pkg/apiserver/apiserver_test.go @@ -29,15 +29,16 @@ import ( "github.com/crowdsecurity/crowdsec/pkg/types" ) -var testMachineID = "test" -var testPassword = strfmt.Password("test") -var MachineTest = models.WatcherAuthRequest{ - MachineID: &testMachineID, - Password: &testPassword, -} - -var UserAgent = fmt.Sprintf("crowdsec-test/%s", version.Version) -var emptyBody = strings.NewReader("") +var ( + testMachineID = "test" + testPassword = strfmt.Password("test") + MachineTest = models.WatcherAuthRequest{ + MachineID: &testMachineID, + Password: &testPassword, + } + UserAgent = fmt.Sprintf("crowdsec-test/%s", version.Version) + emptyBody = strings.NewReader("") +) func LoadTestConfig(t *testing.T) csconfig.Config { config := csconfig.Config{} From 52995b8fa456a03b0761ce9aa2dcbcc2427e62ae Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Mon, 12 Aug 2024 10:15:00 +0200 Subject: [PATCH 245/581] prevent nil deref when loading cti config (#3176) * correct variable names * prevent nil deref when loading cti config --- pkg/apiserver/apic.go | 4 ++-- pkg/csconfig/api.go | 5 ++--- 2 files changed, 4 insertions(+), 5 deletions(-) diff --git a/pkg/apiserver/apic.go b/pkg/apiserver/apic.go index 284d0acdabf..5b850cbff0d 100644 --- a/pkg/apiserver/apic.go +++ b/pkg/apiserver/apic.go @@ -42,7 +42,7 @@ const ( metricsIntervalDefault = time.Minute * 30 metricsIntervalDelta = time.Minute * 15 usageMetricsInterval = time.Minute * 30 - usageMetricsIntervalFirst = time.Minute * 15 + usageMetricsIntervalDelta = time.Minute * 15 ) type apic struct { @@ -196,7 +196,7 @@ func NewAPIC(config *csconfig.OnlineApiClientCfg, dbClient *database.Client, con metricsInterval: metricsIntervalDefault, metricsIntervalFirst: randomDuration(metricsIntervalDefault, metricsIntervalDelta), usageMetricsInterval: usageMetricsInterval, - usageMetricsIntervalFirst: randomDuration(usageMetricsInterval, usageMetricsIntervalFirst), + usageMetricsIntervalFirst: randomDuration(usageMetricsInterval, usageMetricsIntervalDelta), isPulling: make(chan bool, 1), whitelists: apicWhitelist, } diff --git a/pkg/csconfig/api.go b/pkg/csconfig/api.go index fb29111ecd5..a23df957282 100644 --- a/pkg/csconfig/api.go +++ b/pkg/csconfig/api.go @@ -61,7 +61,7 @@ type CTICfg struct { func (a *CTICfg) Load() error { if a.Key == nil { - *a.Enabled = false + a.Enabled = ptr.Of(false) } if a.Key != nil && *a.Key == "" { @@ -69,8 +69,7 @@ func (a *CTICfg) Load() error { } if a.Enabled == nil { - a.Enabled = new(bool) - *a.Enabled = true + a.Enabled = ptr.Of(true) } if a.CacheTimeout == nil { From 71a253aea6a8cf8b25ed724f51a88a7859f8c807 Mon Sep 17 00:00:00 2001 From: Laurence Jones Date: Mon, 12 Aug 2024 09:34:45 +0100 Subject: [PATCH 246/581] Remove useragent set by RC (#3167) Fix #3166 --- pkg/appsec/request.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/pkg/appsec/request.go b/pkg/appsec/request.go index d0e3632abda..ccd7a9f9cc8 100644 --- a/pkg/appsec/request.go +++ b/pkg/appsec/request.go @@ -331,8 +331,9 @@ func NewParsedRequestFromRequest(r *http.Request, logger *log.Entry) (ParsedRequ originalHTTPRequest.Header.Set("User-Agent", userAgent) r.Header.Set("User-Agent", userAgent) //Override the UA in the original request, as this is what will be used by the waf engine } else { - //If we don't have a forwarded UA, delete the one that was set by the bouncer + //If we don't have a forwarded UA, delete the one that was set by the remediation in both original and incoming originalHTTPRequest.Header.Del("User-Agent") + r.Header.Del("User-Agent") } parsedURL, err := url.Parse(clientURI) From 48e3f51954defbb3592fedb38e04529f94b3e409 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Tue, 13 Aug 2024 16:49:27 +0200 Subject: [PATCH 247/581] cscli: add option --ignore-missing to "bouncers delete", "machines delete" (#3177) * cscli: add option --ignore-missing to "bouncers delete", "machines delete" * lint --- cmd/crowdsec-cli/bouncers.go | 20 +++++++++++++++----- cmd/crowdsec-cli/machines.go | 17 ++++++++++++++--- pkg/database/bouncers.go | 10 +++++++++- pkg/database/machines.go | 22 +++++++++++++++------- test/bats/10_bouncers.bats | 8 ++++++++ test/bats/30_machines.bats | 8 ++++++++ 6 files changed, 69 insertions(+), 16 deletions(-) diff --git a/cmd/crowdsec-cli/bouncers.go b/cmd/crowdsec-cli/bouncers.go index 8057cba98c9..d3edcea0db9 100644 --- a/cmd/crowdsec-cli/bouncers.go +++ b/cmd/crowdsec-cli/bouncers.go @@ -344,11 +344,15 @@ func (cli *cliBouncers) validBouncerID(cmd *cobra.Command, args []string, toComp return ret, cobra.ShellCompDirectiveNoFileComp } -func (cli *cliBouncers) delete(bouncers []string) error { +func (cli *cliBouncers) delete(bouncers []string, ignoreMissing bool) error { for _, bouncerID := range bouncers { - err := cli.db.DeleteBouncer(bouncerID) - if err != nil { - return fmt.Errorf("unable to delete bouncer '%s': %w", bouncerID, err) + if err := cli.db.DeleteBouncer(bouncerID); err != nil { + var notFoundErr *database.BouncerNotFoundError + if ignoreMissing && errors.As(err, ¬FoundErr) { + return nil + } + + return fmt.Errorf("unable to delete bouncer: %w", err) } log.Infof("bouncer '%s' deleted successfully", bouncerID) @@ -358,18 +362,24 @@ func (cli *cliBouncers) delete(bouncers []string) error { } func (cli *cliBouncers) newDeleteCmd() *cobra.Command { + var ignoreMissing bool + cmd := &cobra.Command{ Use: "delete MyBouncerName", Short: "delete bouncer(s) from the database", + Example: `cscli bouncers delete "bouncer1" "bouncer2"`, Args: cobra.MinimumNArgs(1), Aliases: []string{"remove"}, DisableAutoGenTag: true, ValidArgsFunction: cli.validBouncerID, RunE: func(_ *cobra.Command, args []string) error { - return cli.delete(args) + return cli.delete(args, ignoreMissing) }, } + flags := cmd.Flags() + flags.BoolVar(&ignoreMissing, "ignore-missing", false, "don't print errors if one or more bouncers don't exist") + return cmd } diff --git a/cmd/crowdsec-cli/machines.go b/cmd/crowdsec-cli/machines.go index e50a17f02dc..dcdb1963b49 100644 --- a/cmd/crowdsec-cli/machines.go +++ b/cmd/crowdsec-cli/machines.go @@ -488,10 +488,16 @@ func (cli *cliMachines) validMachineID(cmd *cobra.Command, args []string, toComp return ret, cobra.ShellCompDirectiveNoFileComp } -func (cli *cliMachines) delete(machines []string) error { +func (cli *cliMachines) delete(machines []string, ignoreMissing bool) error { for _, machineID := range machines { if err := cli.db.DeleteWatcher(machineID); err != nil { - log.Errorf("unable to delete machine '%s': %s", machineID, err) + var notFoundErr *database.MachineNotFoundError + if ignoreMissing && errors.As(err, ¬FoundErr) { + return nil + } + + log.Errorf("unable to delete machine: %s", err) + return nil } @@ -502,6 +508,8 @@ func (cli *cliMachines) delete(machines []string) error { } func (cli *cliMachines) newDeleteCmd() *cobra.Command { + var ignoreMissing bool + cmd := &cobra.Command{ Use: "delete [machine_name]...", Short: "delete machine(s) by name", @@ -511,10 +519,13 @@ func (cli *cliMachines) newDeleteCmd() *cobra.Command { DisableAutoGenTag: true, ValidArgsFunction: cli.validMachineID, RunE: func(_ *cobra.Command, args []string) error { - return cli.delete(args) + return cli.delete(args, ignoreMissing) }, } + flags := cmd.Flags() + flags.BoolVar(&ignoreMissing, "ignore-missing", false, "don't print errors if one or more machines don't exist") + return cmd } diff --git a/pkg/database/bouncers.go b/pkg/database/bouncers.go index ff750e63c59..f79e9580afe 100644 --- a/pkg/database/bouncers.go +++ b/pkg/database/bouncers.go @@ -12,6 +12,14 @@ import ( "github.com/crowdsecurity/crowdsec/pkg/models" ) +type BouncerNotFoundError struct { + BouncerName string +} + +func (e *BouncerNotFoundError) Error() string { + return fmt.Sprintf("'%s' does not exist", e.BouncerName) +} + func (c *Client) BouncerUpdateBaseMetrics(bouncerName string, bouncerType string, baseMetrics models.BaseMetrics) error { os := baseMetrics.Os features := strings.Join(baseMetrics.FeatureFlags, ",") @@ -88,7 +96,7 @@ func (c *Client) DeleteBouncer(name string) error { } if nbDeleted == 0 { - return errors.New("bouncer doesn't exist") + return &BouncerNotFoundError{BouncerName: name} } return nil diff --git a/pkg/database/machines.go b/pkg/database/machines.go index 21349b8b687..75b0ee5fdaa 100644 --- a/pkg/database/machines.go +++ b/pkg/database/machines.go @@ -21,6 +21,14 @@ const ( CapiListsMachineID = types.ListOrigin ) +type MachineNotFoundError struct { + MachineID string +} + +func (e *MachineNotFoundError) Error() string { + return fmt.Sprintf("'%s' does not exist", e.MachineID) +} + func (c *Client) MachineUpdateBaseMetrics(machineID string, baseMetrics models.BaseMetrics, hubItems models.HubItems, datasources map[string]int64) error { os := baseMetrics.Os features := strings.Join(baseMetrics.FeatureFlags, ",") @@ -168,7 +176,7 @@ func (c *Client) DeleteWatcher(name string) error { } if nbDeleted == 0 { - return errors.New("machine doesn't exist") + return &MachineNotFoundError{MachineID: name} } return nil @@ -197,8 +205,8 @@ func (c *Client) UpdateMachineLastHeartBeat(machineID string) error { return nil } -func (c *Client) UpdateMachineScenarios(scenarios string, ID int) error { - _, err := c.Ent.Machine.UpdateOneID(ID). +func (c *Client) UpdateMachineScenarios(scenarios string, id int) error { + _, err := c.Ent.Machine.UpdateOneID(id). SetUpdatedAt(time.Now().UTC()). SetScenarios(scenarios). Save(c.CTX) @@ -209,8 +217,8 @@ func (c *Client) UpdateMachineScenarios(scenarios string, ID int) error { return nil } -func (c *Client) UpdateMachineIP(ipAddr string, ID int) error { - _, err := c.Ent.Machine.UpdateOneID(ID). +func (c *Client) UpdateMachineIP(ipAddr string, id int) error { + _, err := c.Ent.Machine.UpdateOneID(id). SetIpAddress(ipAddr). Save(c.CTX) if err != nil { @@ -220,8 +228,8 @@ func (c *Client) UpdateMachineIP(ipAddr string, ID int) error { return nil } -func (c *Client) UpdateMachineVersion(ipAddr string, ID int) error { - _, err := c.Ent.Machine.UpdateOneID(ID). +func (c *Client) UpdateMachineVersion(ipAddr string, id int) error { + _, err := c.Ent.Machine.UpdateOneID(id). SetVersion(ipAddr). Save(c.CTX) if err != nil { diff --git a/test/bats/10_bouncers.bats b/test/bats/10_bouncers.bats index a89c9f9dd65..f99913dcee5 100644 --- a/test/bats/10_bouncers.bats +++ b/test/bats/10_bouncers.bats @@ -60,6 +60,14 @@ teardown() { assert_json '{message:"access forbidden"}' } +@test "delete non-existent bouncer" { + # this is a fatal error, which is not consistent with "machines delete" + rune -1 cscli bouncers delete something + assert_stderr --partial "unable to delete bouncer: 'something' does not exist" + rune -0 cscli bouncers delete something --ignore-missing + refute_stderr +} + @test "bouncers delete has autocompletion" { rune -0 cscli bouncers add foo1 rune -0 cscli bouncers add foo2 diff --git a/test/bats/30_machines.bats b/test/bats/30_machines.bats index f8b63fb3173..d4cce67d0b0 100644 --- a/test/bats/30_machines.bats +++ b/test/bats/30_machines.bats @@ -62,6 +62,14 @@ teardown() { assert_output 1 } +@test "delete non-existent machine" { + # this is not a fatal error, won't halt a script with -e + rune -0 cscli machines delete something + assert_stderr --partial "unable to delete machine: 'something' does not exist" + rune -0 cscli machines delete something --ignore-missing + refute_stderr +} + @test "machines [delete|inspect] has autocompletion" { rune -0 cscli machines add -a -f /dev/null foo1 rune -0 cscli machines add -a -f /dev/null foo2 From 64d2ea0ddb49369a323471f1e4984c502ebe135f Mon Sep 17 00:00:00 2001 From: Alexander Mnich <56564725+a-mnich@users.noreply.github.com> Date: Fri, 16 Aug 2024 21:48:14 +0200 Subject: [PATCH 248/581] add missing ApiKeyAuthorizer to decisions swagger endpoint (#3178) --- pkg/models/localapi_swagger.yaml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/pkg/models/localapi_swagger.yaml b/pkg/models/localapi_swagger.yaml index ba14880e7c5..9edfd12b82f 100644 --- a/pkg/models/localapi_swagger.yaml +++ b/pkg/models/localapi_swagger.yaml @@ -160,6 +160,8 @@ paths: description: "400 response" schema: $ref: "#/definitions/ErrorResponse" + security: + - APIKeyAuthorizer: [] head: description: Returns information about existing decisions summary: GetDecisions From 2fbc214538615268cf9122f7d9e449b9f96986bc Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Mon, 19 Aug 2024 09:54:58 +0200 Subject: [PATCH 249/581] CI: use go 1.22.6 (#3171) --- .github/workflows/bats-hub.yml | 2 +- .github/workflows/bats-mysql.yml | 2 +- .github/workflows/bats-postgres.yml | 2 +- .github/workflows/bats-sqlite-coverage.yml | 2 +- .github/workflows/ci-windows-build-msi.yml | 2 +- .github/workflows/codeql-analysis.yml | 2 +- .github/workflows/go-tests-windows.yml | 2 +- .github/workflows/go-tests.yml | 2 +- .github/workflows/publish-tarball-release.yml | 2 +- Dockerfile | 2 +- Dockerfile.debian | 2 +- azure-pipelines.yml | 2 +- 12 files changed, 12 insertions(+), 12 deletions(-) diff --git a/.github/workflows/bats-hub.yml b/.github/workflows/bats-hub.yml index be6fc0b8a73..d1cfb026cdc 100644 --- a/.github/workflows/bats-hub.yml +++ b/.github/workflows/bats-hub.yml @@ -33,7 +33,7 @@ jobs: - name: "Set up Go" uses: actions/setup-go@v5 with: - go-version: "1.22.5" + go-version: "1.22.6" - name: "Install bats dependencies" env: diff --git a/.github/workflows/bats-mysql.yml b/.github/workflows/bats-mysql.yml index 3fcc5a6a209..e0b6068e44b 100644 --- a/.github/workflows/bats-mysql.yml +++ b/.github/workflows/bats-mysql.yml @@ -36,7 +36,7 @@ jobs: - name: "Set up Go" uses: actions/setup-go@v5 with: - go-version: "1.22.5" + go-version: "1.22.6" - name: "Install bats dependencies" env: diff --git a/.github/workflows/bats-postgres.yml b/.github/workflows/bats-postgres.yml index 76342a47603..0d2012708a0 100644 --- a/.github/workflows/bats-postgres.yml +++ b/.github/workflows/bats-postgres.yml @@ -45,7 +45,7 @@ jobs: - name: "Set up Go" uses: actions/setup-go@v5 with: - go-version: "1.22.5" + go-version: "1.22.6" - name: "Install bats dependencies" env: diff --git a/.github/workflows/bats-sqlite-coverage.yml b/.github/workflows/bats-sqlite-coverage.yml index 87e60b071d5..3c0ae4785ad 100644 --- a/.github/workflows/bats-sqlite-coverage.yml +++ b/.github/workflows/bats-sqlite-coverage.yml @@ -31,7 +31,7 @@ jobs: - name: "Set up Go" uses: actions/setup-go@v5 with: - go-version: "1.22.5" + go-version: "1.22.6" - name: "Install bats dependencies" env: diff --git a/.github/workflows/ci-windows-build-msi.yml b/.github/workflows/ci-windows-build-msi.yml index 9860a171233..03cdb4bd871 100644 --- a/.github/workflows/ci-windows-build-msi.yml +++ b/.github/workflows/ci-windows-build-msi.yml @@ -35,7 +35,7 @@ jobs: - name: "Set up Go" uses: actions/setup-go@v5 with: - go-version: "1.22.5" + go-version: "1.22.6" - name: Build run: make windows_installer BUILD_RE2_WASM=1 diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml index 982ecedbb25..42b52490ea8 100644 --- a/.github/workflows/codeql-analysis.yml +++ b/.github/workflows/codeql-analysis.yml @@ -52,7 +52,7 @@ jobs: - name: "Set up Go" uses: actions/setup-go@v5 with: - go-version: "1.22.5" + go-version: "1.22.6" cache-dependency-path: "**/go.sum" # Initializes the CodeQL tools for scanning. diff --git a/.github/workflows/go-tests-windows.yml b/.github/workflows/go-tests-windows.yml index b99291f06b7..5a463bab99c 100644 --- a/.github/workflows/go-tests-windows.yml +++ b/.github/workflows/go-tests-windows.yml @@ -34,7 +34,7 @@ jobs: - name: "Set up Go" uses: actions/setup-go@v5 with: - go-version: "1.22.5" + go-version: "1.22.6" - name: Build run: | diff --git a/.github/workflows/go-tests.yml b/.github/workflows/go-tests.yml index a7c34adfe50..58b8dc61a0d 100644 --- a/.github/workflows/go-tests.yml +++ b/.github/workflows/go-tests.yml @@ -126,7 +126,7 @@ jobs: - name: "Set up Go" uses: actions/setup-go@v5 with: - go-version: "1.22.5" + go-version: "1.22.6" - name: Create localstack streams run: | diff --git a/.github/workflows/publish-tarball-release.yml b/.github/workflows/publish-tarball-release.yml index 72211cb5180..2f809a29a9b 100644 --- a/.github/workflows/publish-tarball-release.yml +++ b/.github/workflows/publish-tarball-release.yml @@ -25,7 +25,7 @@ jobs: - name: "Set up Go" uses: actions/setup-go@v5 with: - go-version: "1.22.5" + go-version: "1.22.6" - name: Build the binaries run: | diff --git a/Dockerfile b/Dockerfile index b9569065137..d30004e4683 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,5 +1,5 @@ # vim: set ft=dockerfile: -FROM golang:1.22.5-alpine3.20 AS build +FROM golang:1.22.6-alpine3.20 AS build ARG BUILD_VERSION diff --git a/Dockerfile.debian b/Dockerfile.debian index a256cfa9ab3..4fda8c26599 100644 --- a/Dockerfile.debian +++ b/Dockerfile.debian @@ -1,5 +1,5 @@ # vim: set ft=dockerfile: -FROM golang:1.22.5-bookworm AS build +FROM golang:1.22.6-bookworm AS build ARG BUILD_VERSION diff --git a/azure-pipelines.yml b/azure-pipelines.yml index d73637fff6b..0ceb9e5cffc 100644 --- a/azure-pipelines.yml +++ b/azure-pipelines.yml @@ -21,7 +21,7 @@ stages: - task: GoTool@0 displayName: "Install Go" inputs: - version: '1.22.5' + version: '1.22.6' - pwsh: | choco install -y make From 534fb14f7b91d3b098c4ccd214653e5ac058edb4 Mon Sep 17 00:00:00 2001 From: blotus Date: Mon, 19 Aug 2024 10:07:06 +0200 Subject: [PATCH 250/581] hide geoip related warnings (#3179) --- pkg/parser/enrich_geoip.go | 16 +++++++++++++--- 1 file changed, 13 insertions(+), 3 deletions(-) diff --git a/pkg/parser/enrich_geoip.go b/pkg/parser/enrich_geoip.go index 58732129af8..1756927bc4b 100644 --- a/pkg/parser/enrich_geoip.go +++ b/pkg/parser/enrich_geoip.go @@ -25,7 +25,7 @@ func IpToRange(field string, p *types.Event, plog *log.Entry) (map[string]string } if r == nil { - plog.Warnf("No range found for ip '%s'", field) + plog.Debugf("No range found for ip '%s'", field) return nil, nil } @@ -49,10 +49,15 @@ func GeoIpASN(field string, p *types.Event, plog *log.Entry) (map[string]string, r, err := exprhelpers.GeoIPASNEnrich(field) if err != nil { - plog.Errorf("Unable to enrich ip '%s'", field) + plog.Debugf("Unable to enrich ip '%s'", field) return nil, nil //nolint:nilerr } + if r == nil { + plog.Debugf("No ASN found for ip '%s'", field) + return nil, nil + } + record, ok := r.(*geoip2.ASN) if !ok { @@ -78,10 +83,15 @@ func GeoIpCity(field string, p *types.Event, plog *log.Entry) (map[string]string r, err := exprhelpers.GeoIPEnrich(field) if err != nil { - plog.Errorf("Unable to enrich ip '%s'", field) + plog.Debugf("Unable to enrich ip '%s'", field) return nil, nil //nolint:nilerr } + if r == nil { + plog.Debugf("No city found for ip '%s'", field) + return nil, nil + } + record, ok := r.(*geoip2.City) if !ok { From 6b9b19b8c1380617bcec2cf36099d80fc5270c79 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Mon, 19 Aug 2024 14:09:17 +0200 Subject: [PATCH 251/581] CI: use golangci-lint 1.60 (#3180) * use golangci-lint 1.60 * lint (gosimple/S1009) -> len(nil) == 0 * nerf govet for now --- .github/workflows/go-tests-windows.yml | 2 +- .github/workflows/go-tests.yml | 2 +- .golangci.yml | 1 + cmd/notification-email/main.go | 2 +- pkg/acquisition/modules/appsec/appsec_runner.go | 2 +- pkg/apiserver/middlewares/v1/ocsp.go | 2 +- pkg/cwhub/hub.go | 2 +- pkg/database/machines.go | 2 +- 8 files changed, 8 insertions(+), 7 deletions(-) diff --git a/.github/workflows/go-tests-windows.yml b/.github/workflows/go-tests-windows.yml index 5a463bab99c..6e623b26e09 100644 --- a/.github/workflows/go-tests-windows.yml +++ b/.github/workflows/go-tests-windows.yml @@ -57,6 +57,6 @@ jobs: - name: golangci-lint uses: golangci/golangci-lint-action@v6 with: - version: v1.59 + version: v1.60 args: --issues-exit-code=1 --timeout 10m only-new-issues: false diff --git a/.github/workflows/go-tests.yml b/.github/workflows/go-tests.yml index 58b8dc61a0d..dfdbd8ab0e9 100644 --- a/.github/workflows/go-tests.yml +++ b/.github/workflows/go-tests.yml @@ -158,6 +158,6 @@ jobs: - name: golangci-lint uses: golangci/golangci-lint-action@v6 with: - version: v1.59 + version: v1.60 args: --issues-exit-code=1 --timeout 10m only-new-issues: false diff --git a/.golangci.yml b/.golangci.yml index fb1dab623c1..f12d7086b62 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -17,6 +17,7 @@ linters-settings: disable: - reflectvaluecompare - fieldalignment + - printf maintidx: # raise this after refactoring diff --git a/cmd/notification-email/main.go b/cmd/notification-email/main.go index 3b535ae7ffa..2707b7fe1af 100644 --- a/cmd/notification-email/main.go +++ b/cmd/notification-email/main.go @@ -81,7 +81,7 @@ func (n *EmailPlugin) Configure(ctx context.Context, config *protobufs.Config) ( return nil, errors.New("SMTP host is not set") } - if d.ReceiverEmails == nil || len(d.ReceiverEmails) == 0 { + if len(d.ReceiverEmails) == 0 { return nil, errors.New("receiver emails are not set") } diff --git a/pkg/acquisition/modules/appsec/appsec_runner.go b/pkg/acquisition/modules/appsec/appsec_runner.go index ed49d6a7b41..de34b62d704 100644 --- a/pkg/acquisition/modules/appsec/appsec_runner.go +++ b/pkg/acquisition/modules/appsec/appsec_runner.go @@ -167,7 +167,7 @@ func (r *AppsecRunner) processRequest(tx appsec.ExtendedTransaction, request *ap return nil } - if request.Body != nil && len(request.Body) > 0 { + if len(request.Body) > 0 { in, _, err = request.Tx.WriteRequestBody(request.Body) if err != nil { r.logger.Errorf("unable to write request body : %s", err) diff --git a/pkg/apiserver/middlewares/v1/ocsp.go b/pkg/apiserver/middlewares/v1/ocsp.go index 24557bfda7b..0b6406ad0e7 100644 --- a/pkg/apiserver/middlewares/v1/ocsp.go +++ b/pkg/apiserver/middlewares/v1/ocsp.go @@ -70,7 +70,7 @@ func (oc *OCSPChecker) query(server string, cert *x509.Certificate, issuer *x509 // It returns a boolean indicating if the certificate is revoked and a boolean indicating // if the OCSP check was successful and could be cached. func (oc *OCSPChecker) isRevokedBy(cert *x509.Certificate, issuer *x509.Certificate) (bool, bool) { - if cert.OCSPServer == nil || len(cert.OCSPServer) == 0 { + if len(cert.OCSPServer) == 0 { oc.logger.Infof("TLSAuth: no OCSP Server present in client certificate, skipping OCSP verification") return false, true } diff --git a/pkg/cwhub/hub.go b/pkg/cwhub/hub.go index 1293d6fa235..20a628a493f 100644 --- a/pkg/cwhub/hub.go +++ b/pkg/cwhub/hub.go @@ -153,7 +153,7 @@ func (h *Hub) ItemStats() []string { // Update downloads the latest version of the index and writes it to disk if it changed. It cannot be called after Load() // unless the hub is completely empty. func (h *Hub) Update(ctx context.Context) error { - if h.pathIndex != nil && len(h.pathIndex) > 0 { + if len(h.pathIndex) > 0 { // if this happens, it's a bug. return errors.New("cannot update hub after items have been loaded") } diff --git a/pkg/database/machines.go b/pkg/database/machines.go index 75b0ee5fdaa..3c8cbabbfa7 100644 --- a/pkg/database/machines.go +++ b/pkg/database/machines.go @@ -35,7 +35,7 @@ func (c *Client) MachineUpdateBaseMetrics(machineID string, baseMetrics models.B var heartbeat time.Time - if baseMetrics.Metrics == nil || len(baseMetrics.Metrics) == 0 { + if len(baseMetrics.Metrics) == 0 { heartbeat = time.Now().UTC() } else { heartbeat = time.Unix(*baseMetrics.Metrics[0].Meta.UtcNowTimestamp, 0) From 0e567a9a1867f17f007f24366edaf1536acb5e91 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Tue, 20 Aug 2024 09:48:36 +0200 Subject: [PATCH 252/581] CI: go - don't pin minor version (always use latest) (#3181) --- .github/workflows/bats-hub.yml | 2 +- .github/workflows/bats-mysql.yml | 2 +- .github/workflows/bats-postgres.yml | 2 +- .github/workflows/bats-sqlite-coverage.yml | 2 +- .github/workflows/ci-windows-build-msi.yml | 2 +- .github/workflows/codeql-analysis.yml | 2 +- .github/workflows/go-tests-windows.yml | 2 +- .github/workflows/go-tests.yml | 2 +- .github/workflows/publish-tarball-release.yml | 2 +- Dockerfile | 2 +- Dockerfile.debian | 2 +- azure-pipelines.yml | 2 +- 12 files changed, 12 insertions(+), 12 deletions(-) diff --git a/.github/workflows/bats-hub.yml b/.github/workflows/bats-hub.yml index d1cfb026cdc..473af9b1312 100644 --- a/.github/workflows/bats-hub.yml +++ b/.github/workflows/bats-hub.yml @@ -33,7 +33,7 @@ jobs: - name: "Set up Go" uses: actions/setup-go@v5 with: - go-version: "1.22.6" + go-version: "1.22" - name: "Install bats dependencies" env: diff --git a/.github/workflows/bats-mysql.yml b/.github/workflows/bats-mysql.yml index e0b6068e44b..211d856bc34 100644 --- a/.github/workflows/bats-mysql.yml +++ b/.github/workflows/bats-mysql.yml @@ -36,7 +36,7 @@ jobs: - name: "Set up Go" uses: actions/setup-go@v5 with: - go-version: "1.22.6" + go-version: "1.22" - name: "Install bats dependencies" env: diff --git a/.github/workflows/bats-postgres.yml b/.github/workflows/bats-postgres.yml index 0d2012708a0..aec707f0c03 100644 --- a/.github/workflows/bats-postgres.yml +++ b/.github/workflows/bats-postgres.yml @@ -45,7 +45,7 @@ jobs: - name: "Set up Go" uses: actions/setup-go@v5 with: - go-version: "1.22.6" + go-version: "1.22" - name: "Install bats dependencies" env: diff --git a/.github/workflows/bats-sqlite-coverage.yml b/.github/workflows/bats-sqlite-coverage.yml index 3c0ae4785ad..c1ae9077310 100644 --- a/.github/workflows/bats-sqlite-coverage.yml +++ b/.github/workflows/bats-sqlite-coverage.yml @@ -31,7 +31,7 @@ jobs: - name: "Set up Go" uses: actions/setup-go@v5 with: - go-version: "1.22.6" + go-version: "1.22" - name: "Install bats dependencies" env: diff --git a/.github/workflows/ci-windows-build-msi.yml b/.github/workflows/ci-windows-build-msi.yml index 03cdb4bd871..a37aa43e2d0 100644 --- a/.github/workflows/ci-windows-build-msi.yml +++ b/.github/workflows/ci-windows-build-msi.yml @@ -35,7 +35,7 @@ jobs: - name: "Set up Go" uses: actions/setup-go@v5 with: - go-version: "1.22.6" + go-version: "1.22" - name: Build run: make windows_installer BUILD_RE2_WASM=1 diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml index 42b52490ea8..2715c6590c3 100644 --- a/.github/workflows/codeql-analysis.yml +++ b/.github/workflows/codeql-analysis.yml @@ -52,7 +52,7 @@ jobs: - name: "Set up Go" uses: actions/setup-go@v5 with: - go-version: "1.22.6" + go-version: "1.22" cache-dependency-path: "**/go.sum" # Initializes the CodeQL tools for scanning. diff --git a/.github/workflows/go-tests-windows.yml b/.github/workflows/go-tests-windows.yml index 6e623b26e09..e70d6e352f1 100644 --- a/.github/workflows/go-tests-windows.yml +++ b/.github/workflows/go-tests-windows.yml @@ -34,7 +34,7 @@ jobs: - name: "Set up Go" uses: actions/setup-go@v5 with: - go-version: "1.22.6" + go-version: "1.22" - name: Build run: | diff --git a/.github/workflows/go-tests.yml b/.github/workflows/go-tests.yml index dfdbd8ab0e9..3709c695231 100644 --- a/.github/workflows/go-tests.yml +++ b/.github/workflows/go-tests.yml @@ -126,7 +126,7 @@ jobs: - name: "Set up Go" uses: actions/setup-go@v5 with: - go-version: "1.22.6" + go-version: "1.22" - name: Create localstack streams run: | diff --git a/.github/workflows/publish-tarball-release.yml b/.github/workflows/publish-tarball-release.yml index 2f809a29a9b..eeefb801719 100644 --- a/.github/workflows/publish-tarball-release.yml +++ b/.github/workflows/publish-tarball-release.yml @@ -25,7 +25,7 @@ jobs: - name: "Set up Go" uses: actions/setup-go@v5 with: - go-version: "1.22.6" + go-version: "1.22" - name: Build the binaries run: | diff --git a/Dockerfile b/Dockerfile index d30004e4683..d4c8978c9ae 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,5 +1,5 @@ # vim: set ft=dockerfile: -FROM golang:1.22.6-alpine3.20 AS build +FROM golang:1.22-alpine3.20 AS build ARG BUILD_VERSION diff --git a/Dockerfile.debian b/Dockerfile.debian index 4fda8c26599..73a44e8a4b8 100644 --- a/Dockerfile.debian +++ b/Dockerfile.debian @@ -1,5 +1,5 @@ # vim: set ft=dockerfile: -FROM golang:1.22.6-bookworm AS build +FROM golang:1.22-bookworm AS build ARG BUILD_VERSION diff --git a/azure-pipelines.yml b/azure-pipelines.yml index 0ceb9e5cffc..6051ca67393 100644 --- a/azure-pipelines.yml +++ b/azure-pipelines.yml @@ -21,7 +21,7 @@ stages: - task: GoTool@0 displayName: "Install Go" inputs: - version: '1.22.6' + version: '1.22' - pwsh: | choco install -y make From e7b54c68c549608c48bceae1d4f29486cbb2ab49 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Tue, 20 Aug 2024 15:47:28 +0200 Subject: [PATCH 253/581] refact (nestif): reduce complexity of pkg/leakybucket (#3139) * refact (nestif): extract functions in pkg/leakybucket/overflows.go * refact (nestif): extract functions in pkg/leakybucket/manager_load.go * lint --- .golangci.yml | 2 +- pkg/leakybucket/manager_load.go | 183 +++++++++++++++++++------------- pkg/leakybucket/overflows.go | 103 ++++++++++-------- 3 files changed, 171 insertions(+), 117 deletions(-) diff --git a/.golangci.yml b/.golangci.yml index f12d7086b62..6da59142691 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -28,7 +28,7 @@ linters-settings: nestif: # lower this after refactoring - min-complexity: 24 + min-complexity: 20 nlreturn: block-size: 5 diff --git a/pkg/leakybucket/manager_load.go b/pkg/leakybucket/manager_load.go index ca2e4d17d99..1d523759f2b 100644 --- a/pkg/leakybucket/manager_load.go +++ b/pkg/leakybucket/manager_load.go @@ -79,84 +79,125 @@ type BucketFactory struct { // we use one NameGenerator for all the future buckets var seed namegenerator.Generator = namegenerator.NewNameGenerator(time.Now().UTC().UnixNano()) -func ValidateFactory(bucketFactory *BucketFactory) error { - if bucketFactory.Name == "" { - return errors.New("bucket must have name") +func validateLeakyType(bucketFactory *BucketFactory) error { + if bucketFactory.Capacity <= 0 { // capacity must be a positive int + return fmt.Errorf("bad capacity for leaky '%d'", bucketFactory.Capacity) } - if bucketFactory.Description == "" { - return errors.New("description is mandatory") + if bucketFactory.LeakSpeed == "" { + return errors.New("leakspeed can't be empty for leaky") } - if bucketFactory.Type == "leaky" { - if bucketFactory.Capacity <= 0 { // capacity must be a positive int - return fmt.Errorf("bad capacity for leaky '%d'", bucketFactory.Capacity) - } + if bucketFactory.leakspeed == 0 { + return fmt.Errorf("bad leakspeed for leaky '%s'", bucketFactory.LeakSpeed) + } - if bucketFactory.LeakSpeed == "" { - return errors.New("leakspeed can't be empty for leaky") - } + return nil +} - if bucketFactory.leakspeed == 0 { - return fmt.Errorf("bad leakspeed for leaky '%s'", bucketFactory.LeakSpeed) - } - } else if bucketFactory.Type == "counter" { - if bucketFactory.Duration == "" { - return errors.New("duration can't be empty for counter") - } +func validateCounterType(bucketFactory *BucketFactory) error { + if bucketFactory.Duration == "" { + return errors.New("duration can't be empty for counter") + } - if bucketFactory.duration == 0 { - return fmt.Errorf("bad duration for counter bucket '%d'", bucketFactory.duration) - } + if bucketFactory.duration == 0 { + return fmt.Errorf("bad duration for counter bucket '%d'", bucketFactory.duration) + } - if bucketFactory.Capacity != -1 { - return errors.New("counter bucket must have -1 capacity") - } - } else if bucketFactory.Type == "trigger" { - if bucketFactory.Capacity != 0 { - return errors.New("trigger bucket must have 0 capacity") - } - } else if bucketFactory.Type == "conditional" { - if bucketFactory.ConditionalOverflow == "" { - return errors.New("conditional bucket must have a condition") - } + if bucketFactory.Capacity != -1 { + return errors.New("counter bucket must have -1 capacity") + } - if bucketFactory.Capacity != -1 { - bucketFactory.logger.Warnf("Using a value different than -1 as capacity for conditional bucket, this may lead to unexpected overflows") - } + return nil +} - if bucketFactory.LeakSpeed == "" { - return errors.New("leakspeed can't be empty for conditional bucket") - } +func validateTriggerType(bucketFactory *BucketFactory) error { + if bucketFactory.Capacity != 0 { + return errors.New("trigger bucket must have 0 capacity") + } - if bucketFactory.leakspeed == 0 { - return fmt.Errorf("bad leakspeed for conditional bucket '%s'", bucketFactory.LeakSpeed) - } - } else if bucketFactory.Type == "bayesian" { - if bucketFactory.BayesianConditions == nil { - return errors.New("bayesian bucket must have bayesian conditions") - } + return nil +} - if bucketFactory.BayesianPrior == 0 { - return errors.New("bayesian bucket must have a valid, non-zero prior") - } +func validateConditionalType(bucketFactory *BucketFactory) error { + if bucketFactory.ConditionalOverflow == "" { + return errors.New("conditional bucket must have a condition") + } - if bucketFactory.BayesianThreshold == 0 { - return errors.New("bayesian bucket must have a valid, non-zero threshold") - } + if bucketFactory.Capacity != -1 { + bucketFactory.logger.Warnf("Using a value different than -1 as capacity for conditional bucket, this may lead to unexpected overflows") + } - if bucketFactory.BayesianPrior > 1 { - return errors.New("bayesian bucket must have a valid, non-zero prior") - } + if bucketFactory.LeakSpeed == "" { + return errors.New("leakspeed can't be empty for conditional bucket") + } - if bucketFactory.BayesianThreshold > 1 { - return errors.New("bayesian bucket must have a valid, non-zero threshold") - } + if bucketFactory.leakspeed == 0 { + return fmt.Errorf("bad leakspeed for conditional bucket '%s'", bucketFactory.LeakSpeed) + } - if bucketFactory.Capacity != -1 { - return errors.New("bayesian bucket must have capacity -1") + return nil +} + +func validateBayesianType(bucketFactory *BucketFactory) error { + if bucketFactory.BayesianConditions == nil { + return errors.New("bayesian bucket must have bayesian conditions") + } + + if bucketFactory.BayesianPrior == 0 { + return errors.New("bayesian bucket must have a valid, non-zero prior") + } + + if bucketFactory.BayesianThreshold == 0 { + return errors.New("bayesian bucket must have a valid, non-zero threshold") + } + + if bucketFactory.BayesianPrior > 1 { + return errors.New("bayesian bucket must have a valid, non-zero prior") + } + + if bucketFactory.BayesianThreshold > 1 { + return errors.New("bayesian bucket must have a valid, non-zero threshold") + } + + if bucketFactory.Capacity != -1 { + return errors.New("bayesian bucket must have capacity -1") + } + + return nil +} + +func ValidateFactory(bucketFactory *BucketFactory) error { + if bucketFactory.Name == "" { + return errors.New("bucket must have name") + } + + if bucketFactory.Description == "" { + return errors.New("description is mandatory") + } + + switch bucketFactory.Type { + case "leaky": + if err := validateLeakyType(bucketFactory); err != nil { + return err } - } else { + case "counter": + if err := validateCounterType(bucketFactory); err != nil { + return err + } + case "trigger": + if err := validateTriggerType(bucketFactory); err != nil { + return err + } + case "conditional": + if err := validateConditionalType(bucketFactory); err != nil { + return err + } + case "bayesian": + if err := validateBayesianType(bucketFactory); err != nil { + return err + } + default: return fmt.Errorf("unknown bucket type '%s'", bucketFactory.Type) } @@ -230,8 +271,8 @@ func LoadBuckets(cscfg *csconfig.CrowdsecServiceCfg, hub *cwhub.Hub, files []str err = dec.Decode(&bucketFactory) if err != nil { if !errors.Is(err, io.EOF) { - log.Errorf("Bad yaml in %s : %v", f, err) - return nil, nil, fmt.Errorf("bad yaml in %s : %v", f, err) + log.Errorf("Bad yaml in %s: %v", f, err) + return nil, nil, fmt.Errorf("bad yaml in %s: %w", f, err) } log.Tracef("End of yaml file") @@ -282,8 +323,8 @@ func LoadBuckets(cscfg *csconfig.CrowdsecServiceCfg, hub *cwhub.Hub, files []str err = LoadBucket(&bucketFactory, tomb) if err != nil { - log.Errorf("Failed to load bucket %s : %v", bucketFactory.Name, err) - return nil, nil, fmt.Errorf("loading of %s failed : %v", bucketFactory.Name, err) + log.Errorf("Failed to load bucket %s: %v", bucketFactory.Name, err) + return nil, nil, fmt.Errorf("loading of %s failed: %w", bucketFactory.Name, err) } bucketFactory.orderEvent = orderEvent @@ -326,7 +367,7 @@ func LoadBucket(bucketFactory *BucketFactory, tomb *tomb.Tomb) error { if bucketFactory.LeakSpeed != "" { if bucketFactory.leakspeed, err = time.ParseDuration(bucketFactory.LeakSpeed); err != nil { - return fmt.Errorf("bad leakspeed '%s' in %s : %v", bucketFactory.LeakSpeed, bucketFactory.Filename, err) + return fmt.Errorf("bad leakspeed '%s' in %s: %w", bucketFactory.LeakSpeed, bucketFactory.Filename, err) } } else { bucketFactory.leakspeed = time.Duration(0) @@ -334,7 +375,7 @@ func LoadBucket(bucketFactory *BucketFactory, tomb *tomb.Tomb) error { if bucketFactory.Duration != "" { if bucketFactory.duration, err = time.ParseDuration(bucketFactory.Duration); err != nil { - return fmt.Errorf("invalid Duration '%s' in %s : %v", bucketFactory.Duration, bucketFactory.Filename, err) + return fmt.Errorf("invalid Duration '%s' in %s: %w", bucketFactory.Duration, bucketFactory.Filename, err) } } @@ -345,13 +386,13 @@ func LoadBucket(bucketFactory *BucketFactory, tomb *tomb.Tomb) error { bucketFactory.RunTimeFilter, err = expr.Compile(bucketFactory.Filter, exprhelpers.GetExprOptions(map[string]interface{}{"evt": &types.Event{}})...) if err != nil { - return fmt.Errorf("invalid filter '%s' in %s : %v", bucketFactory.Filter, bucketFactory.Filename, err) + return fmt.Errorf("invalid filter '%s' in %s: %w", bucketFactory.Filter, bucketFactory.Filename, err) } if bucketFactory.GroupBy != "" { bucketFactory.RunTimeGroupBy, err = expr.Compile(bucketFactory.GroupBy, exprhelpers.GetExprOptions(map[string]interface{}{"evt": &types.Event{}})...) if err != nil { - return fmt.Errorf("invalid groupby '%s' in %s : %v", bucketFactory.GroupBy, bucketFactory.Filename, err) + return fmt.Errorf("invalid groupby '%s' in %s: %w", bucketFactory.GroupBy, bucketFactory.Filename, err) } } @@ -370,7 +411,7 @@ func LoadBucket(bucketFactory *BucketFactory, tomb *tomb.Tomb) error { case "bayesian": bucketFactory.processors = append(bucketFactory.processors, &DumbProcessor{}) default: - return fmt.Errorf("invalid type '%s' in %s : %v", bucketFactory.Type, bucketFactory.Filename, err) + return fmt.Errorf("invalid type '%s' in %s: %w", bucketFactory.Type, bucketFactory.Filename, err) } if bucketFactory.Distinct != "" { @@ -435,7 +476,7 @@ func LoadBucket(bucketFactory *BucketFactory, tomb *tomb.Tomb) error { bucketFactory.output = false if err := ValidateFactory(bucketFactory); err != nil { - return fmt.Errorf("invalid bucket from %s : %v", bucketFactory.Filename, err) + return fmt.Errorf("invalid bucket from %s: %w", bucketFactory.Filename, err) } bucketFactory.tomb = tomb diff --git a/pkg/leakybucket/overflows.go b/pkg/leakybucket/overflows.go index 3ee067177ef..e67698e8473 100644 --- a/pkg/leakybucket/overflows.go +++ b/pkg/leakybucket/overflows.go @@ -19,66 +19,77 @@ import ( // SourceFromEvent extracts and formats a valid models.Source object from an Event func SourceFromEvent(evt types.Event, leaky *Leaky) (map[string]models.Source, error) { - srcs := make(map[string]models.Source) /*if it's already an overflow, we have properly formatted sources. we can just twitch them to reflect the requested scope*/ if evt.Type == types.OVFLW { - for k, v := range evt.Overflow.Sources { - /*the scopes are already similar, nothing to do*/ - if leaky.scopeType.Scope == *v.Scope { - srcs[k] = v - continue - } + return overflowEventSources(evt, leaky) + } - /*The bucket requires a decision on scope Range */ - if leaky.scopeType.Scope == types.Range { - /*the original bucket was target IPs, check that we do have range*/ - if *v.Scope == types.Ip { - src := models.Source{} - src.AsName = v.AsName - src.AsNumber = v.AsNumber - src.Cn = v.Cn - src.Latitude = v.Latitude - src.Longitude = v.Longitude - src.Range = v.Range - src.Value = new(string) - src.Scope = new(string) - *src.Scope = leaky.scopeType.Scope - *src.Value = "" - - if v.Range != "" { - *src.Value = v.Range - } + return eventSources(evt, leaky) +} - if leaky.scopeType.RunTimeFilter != nil { - retValue, err := exprhelpers.Run(leaky.scopeType.RunTimeFilter, map[string]interface{}{"evt": &evt}, leaky.logger, leaky.BucketConfig.Debug) - if err != nil { - return srcs, fmt.Errorf("while running scope filter: %w", err) - } +func overflowEventSources(evt types.Event, leaky *Leaky) (map[string]models.Source, error) { + srcs := make(map[string]models.Source) - value, ok := retValue.(string) - if !ok { - value = "" - } + for k, v := range evt.Overflow.Sources { + /*the scopes are already similar, nothing to do*/ + if leaky.scopeType.Scope == *v.Scope { + srcs[k] = v + continue + } - src.Value = &value + /*The bucket requires a decision on scope Range */ + if leaky.scopeType.Scope == types.Range { + /*the original bucket was target IPs, check that we do have range*/ + if *v.Scope == types.Ip { + src := models.Source{} + src.AsName = v.AsName + src.AsNumber = v.AsNumber + src.Cn = v.Cn + src.Latitude = v.Latitude + src.Longitude = v.Longitude + src.Range = v.Range + src.Value = new(string) + src.Scope = new(string) + *src.Scope = leaky.scopeType.Scope + *src.Value = "" + + if v.Range != "" { + *src.Value = v.Range + } + + if leaky.scopeType.RunTimeFilter != nil { + retValue, err := exprhelpers.Run(leaky.scopeType.RunTimeFilter, map[string]interface{}{"evt": &evt}, leaky.logger, leaky.BucketConfig.Debug) + if err != nil { + return srcs, fmt.Errorf("while running scope filter: %w", err) } - if *src.Value != "" { - srcs[*src.Value] = src - } else { - log.Warningf("bucket %s requires scope Range, but none was provided. It seems that the %s wasn't enriched to include its range.", leaky.Name, *v.Value) + value, ok := retValue.(string) + if !ok { + value = "" } + + src.Value = &value + } + + if *src.Value != "" { + srcs[*src.Value] = src } else { - log.Warningf("bucket %s requires scope Range, but can't extrapolate from %s (%s)", - leaky.Name, *v.Scope, *v.Value) + log.Warningf("bucket %s requires scope Range, but none was provided. It seems that the %s wasn't enriched to include its range.", leaky.Name, *v.Value) } + } else { + log.Warningf("bucket %s requires scope Range, but can't extrapolate from %s (%s)", + leaky.Name, *v.Scope, *v.Value) } } - - return srcs, nil } + return srcs, nil +} + +func eventSources(evt types.Event, leaky *Leaky) (map[string]models.Source, error) { + srcs := make(map[string]models.Source) + src := models.Source{} switch leaky.scopeType.Scope { @@ -236,9 +247,10 @@ func EventsFromQueue(queue *types.Queue) []*models.Event { // alertFormatSource iterates over the queue to collect sources func alertFormatSource(leaky *Leaky, queue *types.Queue) (map[string]models.Source, string, error) { - var sources = make(map[string]models.Source) var source_type string + sources := make(map[string]models.Source) + log.Debugf("Formatting (%s) - scope Info : scope_type:%s / scope_filter:%s", leaky.Name, leaky.scopeType.Scope, leaky.scopeType.Filter) for _, evt := range queue.Queue { @@ -299,6 +311,7 @@ func NewAlert(leaky *Leaky, queue *types.Queue) (types.RuntimeAlert, error) { StopAt: &stopAt, Simulated: &leaky.Simulated, } + if leaky.BucketConfig == nil { return runtimeAlert, errors.New("leaky.BucketConfig is nil") } From 08fdfc4fb07be0d1b13d43b47d8a7185870df7e1 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Tue, 20 Aug 2024 16:20:40 +0200 Subject: [PATCH 254/581] cscli refact: package 'cliconsole' (#3149) * cscli refact: package 'cliconsole' * dry * lint * lint --- .golangci.yml | 1 - cmd/crowdsec-cli/capi.go | 2 +- cmd/crowdsec-cli/{ => cliconsole}/console.go | 61 ++++++------------- .../{ => cliconsole}/console_table.go | 2 +- cmd/crowdsec-cli/climetrics/statbouncer.go | 2 +- cmd/crowdsec-cli/itemcli.go | 12 ++-- cmd/crowdsec-cli/lapi.go | 2 +- cmd/crowdsec-cli/main.go | 4 +- cmd/crowdsec-cli/messages.go | 23 ------- cmd/crowdsec-cli/reload.go | 6 ++ cmd/crowdsec-cli/reload_freebsd.go | 4 ++ cmd/crowdsec-cli/reload_linux.go | 4 ++ cmd/crowdsec-cli/reload_windows.go | 3 + cmd/crowdsec-cli/simulation.go | 2 +- pkg/acquisition/modules/appsec/utils.go | 49 +++++++++------ pkg/acquisition/modules/file/file.go | 5 +- pkg/alertcontext/alertcontext.go | 10 +-- pkg/apiclient/auth_service_test.go | 2 +- pkg/longpollclient/client.go | 2 +- 19 files changed, 90 insertions(+), 106 deletions(-) rename cmd/crowdsec-cli/{ => cliconsole}/console.go (87%) rename cmd/crowdsec-cli/{ => cliconsole}/console_table.go (98%) delete mode 100644 cmd/crowdsec-cli/messages.go create mode 100644 cmd/crowdsec-cli/reload.go create mode 100644 cmd/crowdsec-cli/reload_freebsd.go create mode 100644 cmd/crowdsec-cli/reload_linux.go create mode 100644 cmd/crowdsec-cli/reload_windows.go diff --git a/.golangci.yml b/.golangci.yml index 6da59142691..2b216259770 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -17,7 +17,6 @@ linters-settings: disable: - reflectvaluecompare - fieldalignment - - printf maintidx: # raise this after refactoring diff --git a/cmd/crowdsec-cli/capi.go b/cmd/crowdsec-cli/capi.go index 1888aa3545a..589b36adade 100644 --- a/cmd/crowdsec-cli/capi.go +++ b/cmd/crowdsec-cli/capi.go @@ -119,7 +119,7 @@ func (cli *cliCapi) register(capiUserPrefix string, outputFile string) error { fmt.Println(string(apiConfigDump)) } - log.Warning(ReloadMessage()) + log.Warning(reloadMessage) return nil } diff --git a/cmd/crowdsec-cli/console.go b/cmd/crowdsec-cli/cliconsole/console.go similarity index 87% rename from cmd/crowdsec-cli/console.go rename to cmd/crowdsec-cli/cliconsole/console.go index 979c9f0ea60..666afbba07f 100644 --- a/cmd/crowdsec-cli/console.go +++ b/cmd/crowdsec-cli/cliconsole/console.go @@ -1,4 +1,4 @@ -package main +package cliconsole import ( "context" @@ -28,13 +28,17 @@ import ( "github.com/crowdsecurity/crowdsec/pkg/types" ) +type configGetter func() *csconfig.Config + type cliConsole struct { - cfg configGetter + cfg func() *csconfig.Config + reloadMessage string } -func NewCLIConsole(cfg configGetter) *cliConsole { +func New(cfg configGetter, reloadMessage string) *cliConsole { return &cliConsole{ - cfg: cfg, + cfg: cfg, + reloadMessage: reloadMessage, } } @@ -221,7 +225,7 @@ Enable given information push to the central API. Allows to empower the console` log.Infof("%v have been enabled", args) } - log.Infof(ReloadMessage()) + log.Info(cli.reloadMessage) return nil }, @@ -255,7 +259,7 @@ Disable given information push to the central API.`, log.Infof("%v have been disabled", args) } - log.Infof(ReloadMessage()) + log.Info(cli.reloadMessage) return nil }, @@ -348,13 +352,8 @@ func (cli *cliConsole) setConsoleOpts(args []string, wanted bool) error { switch arg { case csconfig.CONSOLE_MANAGEMENT: /*for each flag check if it's already set before setting it*/ - if consoleCfg.ConsoleManagement != nil { - if *consoleCfg.ConsoleManagement == wanted { - log.Debugf("%s already set to %t", csconfig.CONSOLE_MANAGEMENT, wanted) - } else { - log.Infof("%s set to %t", csconfig.CONSOLE_MANAGEMENT, wanted) - *consoleCfg.ConsoleManagement = wanted - } + if consoleCfg.ConsoleManagement != nil && *consoleCfg.ConsoleManagement == wanted { + log.Debugf("%s already set to %t", csconfig.CONSOLE_MANAGEMENT, wanted) } else { log.Infof("%s set to %t", csconfig.CONSOLE_MANAGEMENT, wanted) consoleCfg.ConsoleManagement = ptr.Of(wanted) @@ -386,52 +385,32 @@ func (cli *cliConsole) setConsoleOpts(args []string, wanted bool) error { } case csconfig.SEND_CUSTOM_SCENARIOS: /*for each flag check if it's already set before setting it*/ - if consoleCfg.ShareCustomScenarios != nil { - if *consoleCfg.ShareCustomScenarios == wanted { - log.Debugf("%s already set to %t", csconfig.SEND_CUSTOM_SCENARIOS, wanted) - } else { - log.Infof("%s set to %t", csconfig.SEND_CUSTOM_SCENARIOS, wanted) - *consoleCfg.ShareCustomScenarios = wanted - } + if consoleCfg.ShareCustomScenarios != nil && *consoleCfg.ShareCustomScenarios == wanted { + log.Debugf("%s already set to %t", csconfig.SEND_CUSTOM_SCENARIOS, wanted) } else { log.Infof("%s set to %t", csconfig.SEND_CUSTOM_SCENARIOS, wanted) consoleCfg.ShareCustomScenarios = ptr.Of(wanted) } case csconfig.SEND_TAINTED_SCENARIOS: /*for each flag check if it's already set before setting it*/ - if consoleCfg.ShareTaintedScenarios != nil { - if *consoleCfg.ShareTaintedScenarios == wanted { - log.Debugf("%s already set to %t", csconfig.SEND_TAINTED_SCENARIOS, wanted) - } else { - log.Infof("%s set to %t", csconfig.SEND_TAINTED_SCENARIOS, wanted) - *consoleCfg.ShareTaintedScenarios = wanted - } + if consoleCfg.ShareTaintedScenarios != nil && *consoleCfg.ShareTaintedScenarios == wanted { + log.Debugf("%s already set to %t", csconfig.SEND_TAINTED_SCENARIOS, wanted) } else { log.Infof("%s set to %t", csconfig.SEND_TAINTED_SCENARIOS, wanted) consoleCfg.ShareTaintedScenarios = ptr.Of(wanted) } case csconfig.SEND_MANUAL_SCENARIOS: /*for each flag check if it's already set before setting it*/ - if consoleCfg.ShareManualDecisions != nil { - if *consoleCfg.ShareManualDecisions == wanted { - log.Debugf("%s already set to %t", csconfig.SEND_MANUAL_SCENARIOS, wanted) - } else { - log.Infof("%s set to %t", csconfig.SEND_MANUAL_SCENARIOS, wanted) - *consoleCfg.ShareManualDecisions = wanted - } + if consoleCfg.ShareManualDecisions != nil && *consoleCfg.ShareManualDecisions == wanted { + log.Debugf("%s already set to %t", csconfig.SEND_MANUAL_SCENARIOS, wanted) } else { log.Infof("%s set to %t", csconfig.SEND_MANUAL_SCENARIOS, wanted) consoleCfg.ShareManualDecisions = ptr.Of(wanted) } case csconfig.SEND_CONTEXT: /*for each flag check if it's already set before setting it*/ - if consoleCfg.ShareContext != nil { - if *consoleCfg.ShareContext == wanted { - log.Debugf("%s already set to %t", csconfig.SEND_CONTEXT, wanted) - } else { - log.Infof("%s set to %t", csconfig.SEND_CONTEXT, wanted) - *consoleCfg.ShareContext = wanted - } + if consoleCfg.ShareContext != nil && *consoleCfg.ShareContext == wanted { + log.Debugf("%s already set to %t", csconfig.SEND_CONTEXT, wanted) } else { log.Infof("%s set to %t", csconfig.SEND_CONTEXT, wanted) consoleCfg.ShareContext = ptr.Of(wanted) diff --git a/cmd/crowdsec-cli/console_table.go b/cmd/crowdsec-cli/cliconsole/console_table.go similarity index 98% rename from cmd/crowdsec-cli/console_table.go rename to cmd/crowdsec-cli/cliconsole/console_table.go index 94976618573..8f17b97860a 100644 --- a/cmd/crowdsec-cli/console_table.go +++ b/cmd/crowdsec-cli/cliconsole/console_table.go @@ -1,4 +1,4 @@ -package main +package cliconsole import ( "io" diff --git a/cmd/crowdsec-cli/climetrics/statbouncer.go b/cmd/crowdsec-cli/climetrics/statbouncer.go index 62e68b6bc41..bc0da152d6d 100644 --- a/cmd/crowdsec-cli/climetrics/statbouncer.go +++ b/cmd/crowdsec-cli/climetrics/statbouncer.go @@ -129,7 +129,7 @@ func (*statBouncer) Description() (string, string) { func logWarningOnce(warningsLogged map[string]bool, msg string) { if _, ok := warningsLogged[msg]; !ok { - log.Warningf(msg) + log.Warning(msg) warningsLogged[msg] = true } diff --git a/cmd/crowdsec-cli/itemcli.go b/cmd/crowdsec-cli/itemcli.go index 64c18ae89b1..3f789a14ded 100644 --- a/cmd/crowdsec-cli/itemcli.go +++ b/cmd/crowdsec-cli/itemcli.go @@ -78,7 +78,7 @@ func (cli cliItem) install(ctx context.Context, args []string, downloadOnly bool return errors.New(msg) } - log.Errorf(msg) + log.Error(msg) continue } @@ -92,7 +92,7 @@ func (cli cliItem) install(ctx context.Context, args []string, downloadOnly bool } } - log.Infof(ReloadMessage()) + log.Info(reloadMessage) return nil } @@ -175,7 +175,7 @@ func (cli cliItem) remove(args []string, purge bool, force bool, all bool) error log.Infof("Removed %d %s", removed, cli.name) if removed > 0 { - log.Infof(ReloadMessage()) + log.Info(reloadMessage) } return nil @@ -217,7 +217,7 @@ func (cli cliItem) remove(args []string, purge bool, force bool, all bool) error log.Infof("Removed %d %s", removed, cli.name) if removed > 0 { - log.Infof(ReloadMessage()) + log.Info(reloadMessage) } return nil @@ -283,7 +283,7 @@ func (cli cliItem) upgrade(ctx context.Context, args []string, force bool, all b log.Infof("Updated %d %s", updated, cli.name) if updated > 0 { - log.Infof(ReloadMessage()) + log.Info(reloadMessage) } return nil @@ -314,7 +314,7 @@ func (cli cliItem) upgrade(ctx context.Context, args []string, force bool, all b } if updated > 0 { - log.Infof(ReloadMessage()) + log.Info(reloadMessage) } return nil diff --git a/cmd/crowdsec-cli/lapi.go b/cmd/crowdsec-cli/lapi.go index 0b8bc59dad5..df4f0a98188 100644 --- a/cmd/crowdsec-cli/lapi.go +++ b/cmd/crowdsec-cli/lapi.go @@ -161,7 +161,7 @@ func (cli *cliLapi) register(apiURL string, outputFile string, machine string) e fmt.Printf("%s\n", string(apiConfigDump)) } - log.Warning(ReloadMessage()) + log.Warning(reloadMessage) return nil } diff --git a/cmd/crowdsec-cli/main.go b/cmd/crowdsec-cli/main.go index d4046414030..da955923962 100644 --- a/cmd/crowdsec-cli/main.go +++ b/cmd/crowdsec-cli/main.go @@ -14,8 +14,8 @@ import ( "github.com/crowdsecurity/go-cs-lib/trace" + "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/cliconsole" "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/climetrics" - "github.com/crowdsecurity/crowdsec/pkg/csconfig" "github.com/crowdsecurity/crowdsec/pkg/fflag" ) @@ -262,7 +262,7 @@ It is meant to allow you to manage bans, parsers/scenarios/etc, api and generall cmd.AddCommand(NewCLICapi(cli.cfg).NewCommand()) cmd.AddCommand(NewCLILapi(cli.cfg).NewCommand()) cmd.AddCommand(NewCompletionCmd()) - cmd.AddCommand(NewCLIConsole(cli.cfg).NewCommand()) + cmd.AddCommand(cliconsole.New(cli.cfg, reloadMessage).NewCommand()) cmd.AddCommand(NewCLIExplain(cli.cfg).NewCommand()) cmd.AddCommand(NewCLIHubTest(cli.cfg).NewCommand()) cmd.AddCommand(NewCLINotifications(cli.cfg).NewCommand()) diff --git a/cmd/crowdsec-cli/messages.go b/cmd/crowdsec-cli/messages.go deleted file mode 100644 index 02f051601e4..00000000000 --- a/cmd/crowdsec-cli/messages.go +++ /dev/null @@ -1,23 +0,0 @@ -package main - -import ( - "fmt" - "runtime" -) - -// ReloadMessage returns a description of the task required to reload -// the crowdsec configuration, according to the operating system. -func ReloadMessage() string { - var msg string - - switch runtime.GOOS { - case "windows": - msg = "Please restart the crowdsec service" - case "freebsd": - msg = `Run 'sudo service crowdsec reload'` - default: - msg = `Run 'sudo systemctl reload crowdsec'` - } - - return fmt.Sprintf("%s for the new configuration to be effective.", msg) -} diff --git a/cmd/crowdsec-cli/reload.go b/cmd/crowdsec-cli/reload.go new file mode 100644 index 00000000000..8dd59be8d05 --- /dev/null +++ b/cmd/crowdsec-cli/reload.go @@ -0,0 +1,6 @@ +//go:build !windows && !freebsd && !linux + +package main + +// generic message since we don't know the platform +const reloadMessage = "Please reload the crowdsec process for the new configuration to be effective." diff --git a/cmd/crowdsec-cli/reload_freebsd.go b/cmd/crowdsec-cli/reload_freebsd.go new file mode 100644 index 00000000000..991d3ea6080 --- /dev/null +++ b/cmd/crowdsec-cli/reload_freebsd.go @@ -0,0 +1,4 @@ +package main + +// actually sudo is not that popular on freebsd, but this will do +const reloadMessage = "Run 'sudo service crowdsec reload' for the new configuration to be effective." diff --git a/cmd/crowdsec-cli/reload_linux.go b/cmd/crowdsec-cli/reload_linux.go new file mode 100644 index 00000000000..a74adfbcdfd --- /dev/null +++ b/cmd/crowdsec-cli/reload_linux.go @@ -0,0 +1,4 @@ +package main + +// assume systemd, although gentoo and others may differ +const reloadMessage = "Run 'sudo systemctl reload crowdsec' for the new configuration to be effective." diff --git a/cmd/crowdsec-cli/reload_windows.go b/cmd/crowdsec-cli/reload_windows.go new file mode 100644 index 00000000000..ec9a0b10741 --- /dev/null +++ b/cmd/crowdsec-cli/reload_windows.go @@ -0,0 +1,3 @@ +package main + +const reloadMessage = "Please restart the crowdsec service for the new configuration to be effective." diff --git a/cmd/crowdsec-cli/simulation.go b/cmd/crowdsec-cli/simulation.go index f8d8a660b8c..12c9980d588 100644 --- a/cmd/crowdsec-cli/simulation.go +++ b/cmd/crowdsec-cli/simulation.go @@ -44,7 +44,7 @@ cscli simulation disable crowdsecurity/ssh-bf`, }, PersistentPostRun: func(cmd *cobra.Command, _ []string) { if cmd.Name() != "status" { - log.Infof(ReloadMessage()) + log.Info(reloadMessage) } }, } diff --git a/pkg/acquisition/modules/appsec/utils.go b/pkg/acquisition/modules/appsec/utils.go index 15de8046716..4fb1a979d14 100644 --- a/pkg/acquisition/modules/appsec/utils.go +++ b/pkg/acquisition/modules/appsec/utils.go @@ -40,14 +40,16 @@ func appendMeta(meta models.Meta, key string, value string) models.Meta { Key: key, Value: value, }) + return meta } func AppsecEventGeneration(inEvt types.Event) (*types.Event, error) { - //if the request didnd't trigger inband rules, we don't want to generate an event to LAPI/CAPI + // if the request didnd't trigger inband rules, we don't want to generate an event to LAPI/CAPI if !inEvt.Appsec.HasInBandMatches { return nil, nil } + evt := types.Event{} evt.Type = types.APPSEC evt.Process = true @@ -105,7 +107,6 @@ func AppsecEventGeneration(inEvt types.Event) (*types.Event, error) { evtRule.Meta = make(models.Meta, 0) for _, key := range appsecMetaKeys { - if tmpAppsecContext[key] == nil { tmpAppsecContext[key] = make([]string, 0) } @@ -113,18 +114,21 @@ func AppsecEventGeneration(inEvt types.Event) (*types.Event, error) { switch value := matched_rule[key].(type) { case string: evtRule.Meta = appendMeta(evtRule.Meta, key, value) + if value != "" && !slices.Contains(tmpAppsecContext[key], value) { tmpAppsecContext[key] = append(tmpAppsecContext[key], value) } case int: val := strconv.Itoa(value) evtRule.Meta = appendMeta(evtRule.Meta, key, val) + if val != "" && !slices.Contains(tmpAppsecContext[key], val) { tmpAppsecContext[key] = append(tmpAppsecContext[key], val) } case []string: for _, v := range value { evtRule.Meta = appendMeta(evtRule.Meta, key, v) + if v != "" && !slices.Contains(tmpAppsecContext[key], v) { tmpAppsecContext[key] = append(tmpAppsecContext[key], v) } @@ -133,20 +137,21 @@ func AppsecEventGeneration(inEvt types.Event) (*types.Event, error) { for _, v := range value { val := strconv.Itoa(v) evtRule.Meta = appendMeta(evtRule.Meta, key, val) + if val != "" && !slices.Contains(tmpAppsecContext[key], val) { tmpAppsecContext[key] = append(tmpAppsecContext[key], val) } - } default: val := fmt.Sprintf("%v", value) evtRule.Meta = appendMeta(evtRule.Meta, key, val) + if val != "" && !slices.Contains(tmpAppsecContext[key], val) { tmpAppsecContext[key] = append(tmpAppsecContext[key], val) } - } } + alert.Events = append(alert.Events, &evtRule) } @@ -159,7 +164,7 @@ func AppsecEventGeneration(inEvt types.Event) (*types.Event, error) { valueStr, err := alertcontext.TruncateContext(values, alertcontext.MaxContextValueLen) if err != nil { - log.Warningf(err.Error()) + log.Warning(err.Error()) } meta := models.MetaItems0{ @@ -185,15 +190,16 @@ func AppsecEventGeneration(inEvt types.Event) (*types.Event, error) { alert.StopAt = ptr.Of(time.Now().UTC().Format(time.RFC3339)) evt.Overflow.APIAlerts = []models.Alert{alert} evt.Overflow.Alert = &alert + return &evt, nil } func EventFromRequest(r *appsec.ParsedRequest, labels map[string]string) (types.Event, error) { evt := types.Event{} - //we might want to change this based on in-band vs out-of-band ? + // we might want to change this based on in-band vs out-of-band ? evt.Type = types.LOG evt.ExpectMode = types.LIVE - //def needs fixing + // def needs fixing evt.Stage = "s00-raw" evt.Parsed = map[string]string{ "source_ip": r.ClientIP, @@ -203,19 +209,19 @@ func EventFromRequest(r *appsec.ParsedRequest, labels map[string]string) (types. "req_uuid": r.Tx.ID(), "source": "crowdsec-appsec", "remediation_cmpt_ip": r.RemoteAddrNormalized, - //TBD: - //http_status - //user_agent + // TBD: + // http_status + // user_agent } evt.Line = types.Line{ Time: time.Now(), - //should we add some info like listen addr/port/path ? + // should we add some info like listen addr/port/path ? Labels: labels, Process: true, Module: "appsec", Src: "appsec", - Raw: "dummy-appsec-data", //we discard empty Line.Raw items :) + Raw: "dummy-appsec-data", // we discard empty Line.Raw items :) } evt.Appsec = types.AppsecEvent{} @@ -247,29 +253,29 @@ func LogAppsecEvent(evt *types.Event, logger *log.Entry) { "target_uri": req, }).Debugf("%s triggered non-blocking rules on %s (%d rules) [%v]", evt.Parsed["source_ip"], req, len(evt.Appsec.MatchedRules), evt.Appsec.GetRuleIDs()) } - } func (r *AppsecRunner) AccumulateTxToEvent(evt *types.Event, req *appsec.ParsedRequest) error { - if evt == nil { - //an error was already emitted, let's not spam the logs + // an error was already emitted, let's not spam the logs return nil } if !req.Tx.IsInterrupted() { - //if the phase didn't generate an interruption, we don't have anything to add to the event + // if the phase didn't generate an interruption, we don't have anything to add to the event return nil } - //if one interruption was generated, event is good for processing :) + // if one interruption was generated, event is good for processing :) evt.Process = true if evt.Meta == nil { evt.Meta = map[string]string{} } + if evt.Parsed == nil { evt.Parsed = map[string]string{} } + if req.IsInBand { evt.Meta["appsec_interrupted"] = "true" evt.Meta["appsec_action"] = req.Tx.Interruption().Action @@ -290,9 +296,11 @@ func (r *AppsecRunner) AccumulateTxToEvent(evt *types.Event, req *appsec.ParsedR if variable.Key() != "" { key += "." + variable.Key() } + if variable.Value() == "" { continue } + for _, collectionToKeep := range r.AppsecRuntime.CompiledVariablesTracking { match := collectionToKeep.MatchString(key) if match { @@ -303,6 +311,7 @@ func (r *AppsecRunner) AccumulateTxToEvent(evt *types.Event, req *appsec.ParsedR } } } + return true }) @@ -325,11 +334,12 @@ func (r *AppsecRunner) AccumulateTxToEvent(evt *types.Event, req *appsec.ParsedR ruleNameProm := fmt.Sprintf("%d", rule.Rule().ID()) if details, ok := appsec.AppsecRulesDetails[rule.Rule().ID()]; ok { - //Only set them for custom rules, not for rules written in seclang + // Only set them for custom rules, not for rules written in seclang name = details.Name version = details.Version hash = details.Hash ruleNameProm = details.Name + r.logger.Debugf("custom rule for event, setting name: %s, version: %s, hash: %s", name, version, hash) } else { name = fmt.Sprintf("native_rule:%d", rule.Rule().ID()) @@ -338,12 +348,15 @@ func (r *AppsecRunner) AccumulateTxToEvent(evt *types.Event, req *appsec.ParsedR AppsecRuleHits.With(prometheus.Labels{"rule_name": ruleNameProm, "type": kind, "source": req.RemoteAddrNormalized, "appsec_engine": req.AppsecEngine}).Inc() matchedZones := make([]string, 0) + for _, matchData := range rule.MatchedDatas() { zone := matchData.Variable().Name() + varName := matchData.Key() if varName != "" { zone += "." + varName } + matchedZones = append(matchedZones, zone) } diff --git a/pkg/acquisition/modules/file/file.go b/pkg/acquisition/modules/file/file.go index c36672507db..34a7052f46f 100644 --- a/pkg/acquisition/modules/file/file.go +++ b/pkg/acquisition/modules/file/file.go @@ -385,7 +385,6 @@ func (f *FileSource) StreamingAcquisition(out chan types.Event, t *tomb.Tomb) er } filink, err := os.Lstat(file) - if err != nil { f.logger.Errorf("Could not lstat() new file %s, ignoring it : %s", file, err) continue @@ -578,7 +577,7 @@ func (f *FileSource) tailFile(out chan types.Event, t *tomb.Tomb, tail *tail.Tai errMsg = fmt.Sprintf(errMsg+" : %s", err) } - logger.Warningf(errMsg) + logger.Warning(errMsg) return nil case line := <-tail.Lines: @@ -629,8 +628,8 @@ func (f *FileSource) readFile(filename string, out chan types.Event, t *tomb.Tom var scanner *bufio.Scanner logger := f.logger.WithField("oneshot", filename) - fd, err := os.Open(filename) + fd, err := os.Open(filename) if err != nil { return fmt.Errorf("failed opening %s: %w", filename, err) } diff --git a/pkg/alertcontext/alertcontext.go b/pkg/alertcontext/alertcontext.go index c502def32cd..16ebc6d0ac2 100644 --- a/pkg/alertcontext/alertcontext.go +++ b/pkg/alertcontext/alertcontext.go @@ -32,7 +32,7 @@ func ValidateContextExpr(key string, expressions []string) error { for _, expression := range expressions { _, err := expr.Compile(expression, exprhelpers.GetExprOptions(map[string]interface{}{"evt": &types.Event{}})...) if err != nil { - return fmt.Errorf("compilation of '%s' failed: %v", expression, err) + return fmt.Errorf("compilation of '%s' failed: %w", expression, err) } } @@ -74,7 +74,7 @@ func NewAlertContext(contextToSend map[string][]string, valueLength int) error { for _, value := range values { valueCompiled, err := expr.Compile(value, exprhelpers.GetExprOptions(map[string]interface{}{"evt": &types.Event{}})...) if err != nil { - return fmt.Errorf("compilation of '%s' context value failed: %v", value, err) + return fmt.Errorf("compilation of '%s' context value failed: %w", value, err) } alertContext.ContextToSendCompiled[key] = append(alertContext.ContextToSendCompiled[key], valueCompiled) @@ -133,7 +133,7 @@ func EventToContext(events []types.Event) (models.Meta, []error) { output, err := expr.Run(value, map[string]interface{}{"evt": evt}) if err != nil { - errors = append(errors, fmt.Errorf("failed to get value for %s : %v", key, err)) + errors = append(errors, fmt.Errorf("failed to get value for %s: %w", key, err)) continue } @@ -143,7 +143,7 @@ func EventToContext(events []types.Event) (models.Meta, []error) { case int: val = strconv.Itoa(out) default: - errors = append(errors, fmt.Errorf("unexpected return type for %s : %T", key, output)) + errors = append(errors, fmt.Errorf("unexpected return type for %s: %T", key, output)) continue } @@ -161,7 +161,7 @@ func EventToContext(events []types.Event) (models.Meta, []error) { valueStr, err := TruncateContext(values, alertContext.ContextValueLen) if err != nil { - log.Warningf(err.Error()) + log.Warning(err.Error()) } meta := models.MetaItems0{ diff --git a/pkg/apiclient/auth_service_test.go b/pkg/apiclient/auth_service_test.go index 3e887149a98..6c9abc0edef 100644 --- a/pkg/apiclient/auth_service_test.go +++ b/pkg/apiclient/auth_service_test.go @@ -161,7 +161,7 @@ func TestWatcherAuth(t *testing.T) { bodyBytes, err := io.ReadAll(resp.Response.Body) require.NoError(t, err) - log.Printf(string(bodyBytes)) + log.Print(string(bodyBytes)) t.Fatalf("The AuthenticateWatcher function should have returned an error for the response code %d", errorCodeToTest) } diff --git a/pkg/longpollclient/client.go b/pkg/longpollclient/client.go index 9fa3b4b3f9a..0603b7a5e80 100644 --- a/pkg/longpollclient/client.go +++ b/pkg/longpollclient/client.go @@ -95,7 +95,7 @@ func (c *LongPollClient) poll() error { logger.Errorf("failed to read response body: %s", err) return err } - logger.Errorf(string(bodyContent)) + logger.Error(string(bodyContent)) return errUnauthorized } return fmt.Errorf("unexpected status code: %d", resp.StatusCode) From 3d27e83bf5db10895bc7c87ecff93f13cef5d067 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Tue, 20 Aug 2024 17:36:07 +0200 Subject: [PATCH 255/581] pkg/cwhub: improve support for k8s config maps with custom items (#3154) * pkg/cwhub: improve support for k8s config maps as custom items - allow links to links - ignore hidden ..data directories, but allow links to their content * allow any number of subdirectories in /etc/crowdsec/{hubtype} * item name as subdir/file.yaml * improve func test * lint --- cmd/crowdsec-cli/capi.go | 7 +- pkg/cwhub/errors.go | 6 +- pkg/cwhub/relativepath.go | 28 +++++ pkg/cwhub/relativepath_test.go | 72 ++++++++++++ pkg/cwhub/sync.go | 198 ++++++++++++++++++++++----------- pkg/metabase/metabase.go | 4 +- test/bats/20_hub_items.bats | 92 ++++++++++++++- 7 files changed, 322 insertions(+), 85 deletions(-) create mode 100644 pkg/cwhub/relativepath.go create mode 100644 pkg/cwhub/relativepath_test.go diff --git a/cmd/crowdsec-cli/capi.go b/cmd/crowdsec-cli/capi.go index 589b36adade..ac921ea547f 100644 --- a/cmd/crowdsec-cli/capi.go +++ b/cmd/crowdsec-cli/capi.go @@ -21,11 +21,6 @@ import ( "github.com/crowdsecurity/crowdsec/pkg/types" ) -const ( - CAPIBaseURL = "https://api.crowdsec.net/" - CAPIURLPrefix = "v3" -) - type cliCapi struct { cfg configGetter } @@ -78,7 +73,7 @@ func (cli *cliCapi) register(capiUserPrefix string, outputFile string) error { Password: password, UserAgent: cwversion.UserAgent(), URL: apiurl, - VersionPrefix: CAPIURLPrefix, + VersionPrefix: "v3", }, nil) if err != nil { return fmt.Errorf("api client register ('%s'): %w", types.CAPIBaseURL, err) diff --git a/pkg/cwhub/errors.go b/pkg/cwhub/errors.go index f1e779b5476..b0be444fcba 100644 --- a/pkg/cwhub/errors.go +++ b/pkg/cwhub/errors.go @@ -5,10 +5,8 @@ import ( "fmt" ) -var ( - // ErrNilRemoteHub is returned when trying to download with a local-only configuration. - ErrNilRemoteHub = errors.New("remote hub configuration is not provided. Please report this issue to the developers") -) +// ErrNilRemoteHub is returned when trying to download with a local-only configuration. +var ErrNilRemoteHub = errors.New("remote hub configuration is not provided. Please report this issue to the developers") // IndexNotFoundError is returned when the remote hub index is not found. type IndexNotFoundError struct { diff --git a/pkg/cwhub/relativepath.go b/pkg/cwhub/relativepath.go new file mode 100644 index 00000000000..bcd4c576840 --- /dev/null +++ b/pkg/cwhub/relativepath.go @@ -0,0 +1,28 @@ +package cwhub + +import ( + "path/filepath" + "strings" +) + +// relativePathComponents returns the list of path components after baseDir. +// If path is not inside baseDir, it returns an empty slice. +func relativePathComponents(path string, baseDir string) []string { + absPath, err := filepath.Abs(path) + if err != nil { + return []string{} + } + + absBaseDir, err := filepath.Abs(baseDir) + if err != nil { + return []string{} + } + + // is path inside baseDir? + relPath, err := filepath.Rel(absBaseDir, absPath) + if err != nil || strings.HasPrefix(relPath, "..") || relPath == "." { + return []string{} + } + + return strings.Split(relPath, string(filepath.Separator)) +} diff --git a/pkg/cwhub/relativepath_test.go b/pkg/cwhub/relativepath_test.go new file mode 100644 index 00000000000..11eba566064 --- /dev/null +++ b/pkg/cwhub/relativepath_test.go @@ -0,0 +1,72 @@ +package cwhub + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestRelativePathComponents(t *testing.T) { + tests := []struct { + name string + path string + baseDir string + expected []string + }{ + { + name: "Path within baseDir", + path: "/home/user/project/src/file.go", + baseDir: "/home/user/project", + expected: []string{"src", "file.go"}, + }, + { + name: "Path is baseDir", + path: "/home/user/project", + baseDir: "/home/user/project", + expected: []string{}, + }, + { + name: "Path outside baseDir", + path: "/home/user/otherproject/src/file.go", + baseDir: "/home/user/project", + expected: []string{}, + }, + { + name: "Path is subdirectory of baseDir", + path: "/home/user/project/src/", + baseDir: "/home/user/project", + expected: []string{"src"}, + }, + { + name: "Relative paths", + path: "project/src/file.go", + baseDir: "project", + expected: []string{"src", "file.go"}, + }, + { + name: "BaseDir with trailing slash", + path: "/home/user/project/src/file.go", + baseDir: "/home/user/project/", + expected: []string{"src", "file.go"}, + }, + { + name: "Empty baseDir", + path: "/home/user/project/src/file.go", + baseDir: "", + expected: []string{}, + }, + { + name: "Empty path", + path: "", + baseDir: "/home/user/project", + expected: []string{}, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := relativePathComponents(tt.path, tt.baseDir) + assert.Equal(t, tt.expected, result) + }) + } +} diff --git a/pkg/cwhub/sync.go b/pkg/cwhub/sync.go index 38bb376ae3b..81d41d55971 100644 --- a/pkg/cwhub/sync.go +++ b/pkg/cwhub/sync.go @@ -20,22 +20,49 @@ func isYAMLFileName(path string) bool { return strings.HasSuffix(path, ".yaml") || strings.HasSuffix(path, ".yml") } -// linkTarget returns the target of a symlink, or empty string if it's dangling. -func linkTarget(path string, logger *logrus.Logger) (string, error) { - hubpath, err := os.Readlink(path) - if err != nil { - return "", fmt.Errorf("unable to read symlink: %s", path) +// resolveSymlink returns the ultimate target path of a symlink +// returns error if the symlink is dangling or too many symlinks are followed +func resolveSymlink(path string) (string, error) { + const maxSymlinks = 10 // Prevent infinite loops + for i := 0; i < maxSymlinks; i++ { + fi, err := os.Lstat(path) + if err != nil { + return "", err // dangling link + } + + if fi.Mode()&os.ModeSymlink == 0 { + // found the target + return path, nil + } + + path, err = os.Readlink(path) + if err != nil { + return "", err + } + + // relative to the link's directory? + if !filepath.IsAbs(path) { + path = filepath.Join(filepath.Dir(path), path) + } } - logger.Tracef("symlink %s -> %s", path, hubpath) + return "", errors.New("too many levels of symbolic links") +} - _, err = os.Lstat(hubpath) - if os.IsNotExist(err) { - logger.Warningf("link target does not exist: %s -> %s", path, hubpath) - return "", nil +// isPathInside checks if a path is inside the given directory +// it can return false negatives if the filesystem is case insensitive +func isPathInside(path, dir string) (bool, error) { + absFilePath, err := filepath.Abs(path) + if err != nil { + return false, err + } + + absDir, err := filepath.Abs(dir) + if err != nil { + return false, err } - return hubpath, nil + return strings.HasPrefix(absFilePath, absDir), nil } // information used to create a new Item, from a file path. @@ -53,58 +80,76 @@ func (h *Hub) getItemFileInfo(path string, logger *logrus.Logger) (*itemFileInfo hubDir := h.local.HubDir installDir := h.local.InstallDir - subs := strings.Split(path, string(os.PathSeparator)) + subsHub := relativePathComponents(path, hubDir) + subsInstall := relativePathComponents(path, installDir) - logger.Tracef("path:%s, hubdir:%s, installdir:%s", path, hubDir, installDir) - logger.Tracef("subs:%v", subs) - // we're in hub (~/.hub/hub/) - if strings.HasPrefix(path, hubDir) { + switch { + case len(subsHub) > 0: logger.Tracef("in hub dir") - // .../hub/parsers/s00-raw/crowdsec/skip-pretag.yaml - // .../hub/scenarios/crowdsec/ssh_bf.yaml - // .../hub/profiles/crowdsec/linux.yaml - if len(subs) < 4 { - return nil, fmt.Errorf("path is too short: %s (%d)", path, len(subs)) + // .../hub/parsers/s00-raw/crowdsecurity/skip-pretag.yaml + // .../hub/scenarios/crowdsecurity/ssh_bf.yaml + // .../hub/profiles/crowdsecurity/linux.yaml + if len(subsHub) < 3 { + return nil, fmt.Errorf("path is too short: %s (%d)", path, len(subsHub)) + } + + ftype := subsHub[0] + if !slices.Contains(ItemTypes, ftype) { + // this doesn't really happen anymore, because we only scan the {hubtype} directories + return nil, fmt.Errorf("unknown configuration type '%s'", ftype) + } + + stage := "" + fauthor := subsHub[1] + fname := subsHub[2] + + if ftype == PARSERS || ftype == POSTOVERFLOWS { + stage = subsHub[1] + fauthor = subsHub[2] + fname = subsHub[3] } ret = &itemFileInfo{ inhub: true, - fname: subs[len(subs)-1], - fauthor: subs[len(subs)-2], - stage: subs[len(subs)-3], - ftype: subs[len(subs)-4], + ftype: ftype, + stage: stage, + fauthor: fauthor, + fname: fname, } - } else if strings.HasPrefix(path, installDir) { // we're in install /etc/crowdsec//... + + case len(subsInstall) > 0: logger.Tracef("in install dir") - if len(subs) < 3 { - return nil, fmt.Errorf("path is too short: %s (%d)", path, len(subs)) - } // .../config/parser/stage/file.yaml // .../config/postoverflow/stage/file.yaml // .../config/scenarios/scenar.yaml // .../config/collections/linux.yaml //file is empty - ret = &itemFileInfo{ - inhub: false, - fname: subs[len(subs)-1], - stage: subs[len(subs)-2], - ftype: subs[len(subs)-3], - fauthor: "", + + if len(subsInstall) < 2 { + return nil, fmt.Errorf("path is too short: %s (%d)", path, len(subsInstall)) } - } else { - return nil, fmt.Errorf("file '%s' is not from hub '%s' nor from the configuration directory '%s'", path, hubDir, installDir) - } - logger.Tracef("stage:%s ftype:%s", ret.stage, ret.ftype) + // this can be in any number of subdirs, we join them to compose the item name + + ftype := subsInstall[0] + stage := "" + fname := strings.Join(subsInstall[1:], "/") - if ret.ftype != PARSERS && ret.ftype != POSTOVERFLOWS { - if !slices.Contains(ItemTypes, ret.stage) { - return nil, errors.New("unknown configuration type") + if ftype == PARSERS || ftype == POSTOVERFLOWS { + stage = subsInstall[1] + fname = strings.Join(subsInstall[2:], "/") } - ret.ftype = ret.stage - ret.stage = "" + ret = &itemFileInfo{ + inhub: false, + ftype: ftype, + stage: stage, + fauthor: "", + fname: fname, + } + default: + return nil, fmt.Errorf("file '%s' is not from hub '%s' nor from the configuration directory '%s'", path, hubDir, installDir) } logger.Tracef("CORRECTED [%s] by [%s] in stage [%s] of type [%s]", ret.fname, ret.fauthor, ret.stage, ret.ftype) @@ -176,8 +221,6 @@ func newLocalItem(h *Hub, path string, info *itemFileInfo) (*Item, error) { } func (h *Hub) itemVisit(path string, f os.DirEntry, err error) error { - hubpath := "" - if err != nil { h.logger.Debugf("while syncing hub dir: %s", err) // there is a path error, we ignore the file @@ -190,8 +233,26 @@ func (h *Hub) itemVisit(path string, f os.DirEntry, err error) error { return err } + // permission errors, files removed while reading, etc. + if f == nil { + return nil + } + + if f.IsDir() { + // if a directory starts with a dot, we don't traverse it + // - single dot prefix is hidden by unix convention + // - double dot prefix is used by k8s to mount config maps + if strings.HasPrefix(f.Name(), ".") { + h.logger.Tracef("skipping hidden directory %s", path) + return filepath.SkipDir + } + + // keep traversing + return nil + } + // we only care about YAML files - if f == nil || f.IsDir() || !isYAMLFileName(f.Name()) { + if !isYAMLFileName(f.Name()) { return nil } @@ -201,35 +262,38 @@ func (h *Hub) itemVisit(path string, f os.DirEntry, err error) error { return nil } - // non symlinks are local user files or hub files - if f.Type()&os.ModeSymlink == 0 { - h.logger.Tracef("%s is not a symlink", path) - - if !info.inhub { - h.logger.Tracef("%s is a local file, skip", path) + // follow the link to see if it falls in the hub directory + // if it's not a link, target == path + target, err := resolveSymlink(path) + if err != nil { + // target does not exist, the user might have removed the file + // or switched to a hub branch without it; or symlink loop + h.logger.Warningf("Ignoring file %s: %s", path, err) + return nil + } - item, err := newLocalItem(h, path, info) - if err != nil { - return err - } + targetInHub, err := isPathInside(target, h.local.HubDir) + if err != nil { + h.logger.Warningf("Ignoring file %s: %s", path, err) + return nil + } - h.addItem(item) + // local (custom) item if the file or link target is not inside the hub dir + if !targetInHub { + h.logger.Tracef("%s is a local file, skip", path) - return nil - } - } else { - hubpath, err = linkTarget(path, h.logger) + item, err := newLocalItem(h, path, info) if err != nil { return err } - if hubpath == "" { - // target does not exist, the user might have removed the file - // or switched to a hub branch without it - return nil - } + h.addItem(item) + + return nil } + hubpath := target + // try to find which configuration item it is h.logger.Tracef("check [%s] of %s", info.fname, info.ftype) diff --git a/pkg/metabase/metabase.go b/pkg/metabase/metabase.go index 837bab796d5..324a05666a1 100644 --- a/pkg/metabase/metabase.go +++ b/pkg/metabase/metabase.go @@ -70,12 +70,12 @@ func (m *Metabase) Init(containerName string, image string) error { switch m.Config.Database.Type { case "mysql": - return fmt.Errorf("'mysql' is not supported yet for cscli dashboard") + return errors.New("'mysql' is not supported yet for cscli dashboard") //DBConnectionURI = fmt.Sprintf("MB_DB_CONNECTION_URI=mysql://%s:%d/%s?user=%s&password=%s&allowPublicKeyRetrieval=true", remoteDBAddr, m.Config.Database.Port, m.Config.Database.DbName, m.Config.Database.User, m.Config.Database.Password) case "sqlite": m.InternalDBURL = metabaseSQLiteDBURL case "postgresql", "postgres", "pgsql": - return fmt.Errorf("'postgresql' is not supported yet by cscli dashboard") + return errors.New("'postgresql' is not supported yet by cscli dashboard") default: return fmt.Errorf("database '%s' not supported", m.Config.Database.Type) } diff --git a/test/bats/20_hub_items.bats b/test/bats/20_hub_items.bats index 214d07d927f..4b390c90ed4 100644 --- a/test/bats/20_hub_items.bats +++ b/test/bats/20_hub_items.bats @@ -176,7 +176,7 @@ teardown() { rune -0 mkdir -p "$CONFIG_DIR/collections" rune -0 ln -s /this/does/not/exist.yaml "$CONFIG_DIR/collections/foobar.yaml" rune -0 cscli hub list - assert_stderr --partial "link target does not exist: $CONFIG_DIR/collections/foobar.yaml -> /this/does/not/exist.yaml" + assert_stderr --partial "Ignoring file $CONFIG_DIR/collections/foobar.yaml: lstat /this/does/not/exist.yaml: no such file or directory" rune -0 cscli hub list -o json rune -0 jq '.collections' <(output) assert_json '[]' @@ -194,9 +194,89 @@ teardown() { assert_output 'false' } -@test "skip files if we can't guess their type" { - rune -0 mkdir -p "$CONFIG_DIR/scenarios/foo" - rune -0 touch "$CONFIG_DIR/scenarios/foo/bar.yaml" - rune -0 cscli hub list - assert_stderr --partial "Ignoring file $CONFIG_DIR/scenarios/foo/bar.yaml: unknown configuration type" +@test "don't traverse hidden directories (starting with a dot)" { + rune -0 mkdir -p "$CONFIG_DIR/scenarios/.foo" + rune -0 touch "$CONFIG_DIR/scenarios/.foo/bar.yaml" + rune -0 cscli hub list --trace + assert_stderr --partial "skipping hidden directory $CONFIG_DIR/scenarios/.foo" +} + +@test "allow symlink to target inside a hidden directory" { + # k8s config maps use hidden directories and links when mounted + rune -0 mkdir -p "$CONFIG_DIR/scenarios/.foo" + + # ignored + rune -0 touch "$CONFIG_DIR/scenarios/.foo/hidden.yaml" + rune -0 cscli scenarios list -o json + rune -0 jq '.scenarios | length' <(output) + assert_output 0 + + # real file + rune -0 touch "$CONFIG_DIR/scenarios/myfoo.yaml" + rune -0 cscli scenarios list -o json + rune -0 jq '.scenarios | length' <(output) + assert_output 1 + + rune -0 rm "$CONFIG_DIR/scenarios/myfoo.yaml" + rune -0 cscli scenarios list -o json + rune -0 jq '.scenarios | length' <(output) + assert_output 0 + + # link to ignored is not ignored, and the name comes from the link + rune -0 ln -s "$CONFIG_DIR/scenarios/.foo/hidden.yaml" "$CONFIG_DIR/scenarios/myfoo.yaml" + rune -0 cscli scenarios list -o json + rune -0 jq -c '[.scenarios[].name] | sort' <(output) + assert_json '["myfoo.yaml"]' +} + +@test "item files can be links to links" { + rune -0 mkdir -p "$CONFIG_DIR"/scenarios/{.foo,.bar} + + rune -0 ln -s "$CONFIG_DIR/scenarios/.foo/hidden.yaml" "$CONFIG_DIR/scenarios/.bar/hidden.yaml" + + # link to a danling link + rune -0 ln -s "$CONFIG_DIR/scenarios/.bar/hidden.yaml" "$CONFIG_DIR/scenarios/myfoo.yaml" + rune -0 cscli scenarios list + assert_stderr --partial "Ignoring file $CONFIG_DIR/scenarios/myfoo.yaml: lstat $CONFIG_DIR/scenarios/.foo/hidden.yaml: no such file or directory" + rune -0 cscli scenarios list -o json + rune -0 jq '.scenarios | length' <(output) + assert_output 0 + + # detect link loops + rune -0 ln -s "$CONFIG_DIR/scenarios/.bar/hidden.yaml" "$CONFIG_DIR/scenarios/.foo/hidden.yaml" + rune -0 cscli scenarios list + assert_stderr --partial "Ignoring file $CONFIG_DIR/scenarios/myfoo.yaml: too many levels of symbolic links" + + rune -0 rm "$CONFIG_DIR/scenarios/.foo/hidden.yaml" + rune -0 touch "$CONFIG_DIR/scenarios/.foo/hidden.yaml" + rune -0 cscli scenarios list -o json + rune -0 jq '.scenarios | length' <(output) + assert_output 1 +} + +@test "item files can be in a subdirectory" { + rune -0 mkdir -p "$CONFIG_DIR/scenarios/sub/sub2/sub3" + rune -0 touch "$CONFIG_DIR/scenarios/sub/imlocal.yaml" + # subdir name is now part of the item name + rune -0 cscli scenarios inspect sub/imlocal.yaml -o json + rune -0 jq -e '[.tainted,.local==false,true]' <(output) + rune -0 rm "$CONFIG_DIR/scenarios/sub/imlocal.yaml" + + rune -0 ln -s "$HUB_DIR/scenarios/crowdsecurity/smb-bf.yaml" "$CONFIG_DIR/scenarios/sub/smb-bf.yaml" + rune -0 cscli scenarios inspect crowdsecurity/smb-bf -o json + rune -0 jq -e '[.tainted,.local==false,false]' <(output) + rune -0 rm "$CONFIG_DIR/scenarios/sub/smb-bf.yaml" + + rune -0 ln -s "$HUB_DIR/scenarios/crowdsecurity/smb-bf.yaml" "$CONFIG_DIR/scenarios/sub/sub2/sub3/smb-bf.yaml" + rune -0 cscli scenarios inspect crowdsecurity/smb-bf -o json + rune -0 jq -e '[.tainted,.local==false,false]' <(output) +} + +@test "same file name for local items in different subdirectories" { + rune -0 mkdir -p "$CONFIG_DIR"/scenarios/{foo,bar} + rune -0 touch "$CONFIG_DIR/scenarios/foo/local.yaml" + rune -0 touch "$CONFIG_DIR/scenarios/bar/local.yaml" + rune -0 cscli scenarios list -o json + rune -0 jq -c '[.scenarios[].name] | sort' <(output) + assert_json '["bar/local.yaml","foo/local.yaml"]' } From 429418ffc6f1ef1bbae7c9f73c36abe4848c7151 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Wed, 21 Aug 2024 10:24:18 +0200 Subject: [PATCH 256/581] cscli refact: package 'cliexplain' (#3151) --- cmd/crowdsec-cli/{ => cliexplain}/explain.go | 19 ++++++---- cmd/crowdsec-cli/main.go | 39 ++++++++++---------- 2 files changed, 32 insertions(+), 26 deletions(-) rename cmd/crowdsec-cli/{ => cliexplain}/explain.go (93%) diff --git a/cmd/crowdsec-cli/explain.go b/cmd/crowdsec-cli/cliexplain/explain.go similarity index 93% rename from cmd/crowdsec-cli/explain.go rename to cmd/crowdsec-cli/cliexplain/explain.go index c322cce47fe..182e34a12a5 100644 --- a/cmd/crowdsec-cli/explain.go +++ b/cmd/crowdsec-cli/cliexplain/explain.go @@ -1,4 +1,4 @@ -package main +package cliexplain import ( "bufio" @@ -12,6 +12,7 @@ import ( log "github.com/sirupsen/logrus" "github.com/spf13/cobra" + "github.com/crowdsecurity/crowdsec/pkg/csconfig" "github.com/crowdsecurity/crowdsec/pkg/dumps" "github.com/crowdsecurity/crowdsec/pkg/hubtest" ) @@ -40,9 +41,12 @@ func getLineCountForFile(filepath string) (int, error) { return lc, nil } +type configGetter func() *csconfig.Config + type cliExplain struct { - cfg configGetter - flags struct { + cfg configGetter + configFilePath string + flags struct { logFile string dsn string logLine string @@ -56,9 +60,10 @@ type cliExplain struct { } } -func NewCLIExplain(cfg configGetter) *cliExplain { +func New(cfg configGetter, configFilePath string) *cliExplain { return &cliExplain{ - cfg: cfg, + cfg: cfg, + configFilePath: configFilePath, } } @@ -103,7 +108,7 @@ tail -n 5 myfile.log | cscli explain --type nginx -f - flags.StringVar(&cli.flags.crowdsec, "crowdsec", "crowdsec", "Path to crowdsec") flags.BoolVar(&cli.flags.noClean, "no-clean", false, "Don't clean runtime environment after tests") - cmd.MarkFlagRequired("type") + _ = cmd.MarkFlagRequired("type") cmd.MarkFlagsOneRequired("log", "file", "dsn") return cmd @@ -214,7 +219,7 @@ func (cli *cliExplain) run() error { return errors.New("no acquisition (--file or --dsn) provided, can't run cscli test") } - cmdArgs := []string{"-c", ConfigFilePath, "-type", logType, "-dsn", dsn, "-dump-data", dir, "-no-api"} + cmdArgs := []string{"-c", cli.configFilePath, "-type", logType, "-dsn", dsn, "-dump-data", dir, "-no-api"} if labels != "" { log.Debugf("adding labels %s", labels) diff --git a/cmd/crowdsec-cli/main.go b/cmd/crowdsec-cli/main.go index da955923962..49140b160ad 100644 --- a/cmd/crowdsec-cli/main.go +++ b/cmd/crowdsec-cli/main.go @@ -15,6 +15,7 @@ import ( "github.com/crowdsecurity/go-cs-lib/trace" "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/cliconsole" + "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/cliexplain" "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/climetrics" "github.com/crowdsecurity/crowdsec/pkg/csconfig" "github.com/crowdsecurity/crowdsec/pkg/fflag" @@ -152,14 +153,6 @@ func (cli *cliRoot) initialize() error { return nil } -// list of valid subcommands for the shell completion -var validArgs = []string{ - "alerts", "appsec-configs", "appsec-rules", "bouncers", "capi", "collections", - "completion", "config", "console", "contexts", "dashboard", "decisions", "explain", - "hub", "hubtest", "lapi", "machines", "metrics", "notifications", "parsers", - "postoverflows", "scenarios", "simulation", "support", "version", -} - func (cli *cliRoot) colorize(cmd *cobra.Command) { cc.Init(&cc.Config{ RootCmd: cmd, @@ -191,6 +184,14 @@ func (cli *cliRoot) NewCommand() (*cobra.Command, error) { return nil, fmt.Errorf("failed to set feature flags from env: %w", err) } + // list of valid subcommands for the shell completion + validArgs := []string{ + "alerts", "appsec-configs", "appsec-rules", "bouncers", "capi", "collections", + "completion", "config", "console", "contexts", "dashboard", "decisions", "explain", + "hub", "hubtest", "lapi", "machines", "metrics", "notifications", "parsers", + "postoverflows", "scenarios", "simulation", "support", "version", + } + cmd := &cobra.Command{ Use: "cscli", Short: "cscli allows you to manage crowdsec", @@ -238,16 +239,6 @@ It is meant to allow you to manage bans, parsers/scenarios/etc, api and generall return nil, err } - if len(os.Args) > 1 { - cobra.OnInitialize( - func() { - if err := cli.initialize(); err != nil { - log.Fatal(err) - } - }, - ) - } - cmd.AddCommand(NewCLIDoc().NewCommand(cmd)) cmd.AddCommand(NewCLIVersion().NewCommand()) cmd.AddCommand(NewCLIConfig(cli.cfg).NewCommand()) @@ -263,7 +254,7 @@ It is meant to allow you to manage bans, parsers/scenarios/etc, api and generall cmd.AddCommand(NewCLILapi(cli.cfg).NewCommand()) cmd.AddCommand(NewCompletionCmd()) cmd.AddCommand(cliconsole.New(cli.cfg, reloadMessage).NewCommand()) - cmd.AddCommand(NewCLIExplain(cli.cfg).NewCommand()) + cmd.AddCommand(cliexplain.New(cli.cfg, ConfigFilePath).NewCommand()) cmd.AddCommand(NewCLIHubTest(cli.cfg).NewCommand()) cmd.AddCommand(NewCLINotifications(cli.cfg).NewCommand()) cmd.AddCommand(NewCLISupport(cli.cfg).NewCommand()) @@ -280,6 +271,16 @@ It is meant to allow you to manage bans, parsers/scenarios/etc, api and generall cmd.AddCommand(NewCLISetup(cli.cfg).NewCommand()) } + if len(os.Args) > 1 { + cobra.OnInitialize( + func() { + if err := cli.initialize(); err != nil { + log.Fatal(err) + } + }, + ) + } + return cmd, nil } From 89aec7cf6b3e04afa823bda4aecb465cd357f946 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Thu, 22 Aug 2024 12:12:40 +0200 Subject: [PATCH 257/581] pkg/cwhub: simpler accessor methods (#3165) * pkg/cwhub: simpler accessor methods - prefer higher level GetItemsByType, GetInstalledByType over GetItemMap - always send both appsec-rules and scenarios to api - explicit parameter for (case insensitive) sorted list of items - shorter code - assume itemType parameter makes sense, don't error * lint (gofumpt) --- cmd/crowdsec-cli/capi.go | 35 +++------- cmd/crowdsec-cli/cliconsole/console.go | 22 ++----- cmd/crowdsec-cli/hub.go | 7 +- cmd/crowdsec-cli/item_suggest.go | 20 ++---- cmd/crowdsec-cli/itemcli.go | 18 ++--- cmd/crowdsec-cli/items.go | 15 +++-- cmd/crowdsec-cli/lapi.go | 9 +-- cmd/crowdsec/lapiclient.go | 34 ++-------- cmd/crowdsec/lpmetrics.go | 8 +-- cmd/crowdsec/main.go | 6 +- pkg/alertcontext/config.go | 10 +-- pkg/appsec/appsec.go | 71 +++++++++----------- pkg/appsec/loader.go | 10 +-- pkg/cwhub/cwhub.go | 8 --- pkg/cwhub/doc.go | 2 +- pkg/cwhub/hub.go | 91 ++++++++++++-------------- pkg/hubtest/hubtest_item.go | 39 +++++------ pkg/parser/unix_parser.go | 35 +++++----- test/bats/04_capi.bats | 2 +- 19 files changed, 160 insertions(+), 282 deletions(-) diff --git a/cmd/crowdsec-cli/capi.go b/cmd/crowdsec-cli/capi.go index ac921ea547f..64bca9f7caf 100644 --- a/cmd/crowdsec-cli/capi.go +++ b/cmd/crowdsec-cli/capi.go @@ -145,19 +145,15 @@ func (cli *cliCapi) newRegisterCmd() *cobra.Command { // QueryCAPIStatus checks if the Local API is reachable, and if the credentials are correct. It then checks if the instance is enrolle in the console. func QueryCAPIStatus(hub *cwhub.Hub, credURL string, login string, password string) (bool, bool, error) { - apiURL, err := url.Parse(credURL) if err != nil { return false, false, fmt.Errorf("parsing api url: %w", err) } - scenarios, err := hub.GetInstalledNamesByType(cwhub.SCENARIOS) - if err != nil { - return false, false, fmt.Errorf("failed to get scenarios: %w", err) - } + itemsForAPI := hub.GetInstalledListForAPI() - if len(scenarios) == 0 { - return false, false, errors.New("no scenarios installed, abort") + if len(itemsForAPI) == 0 { + return false, false, errors.New("no scenarios or appsec-rules installed, abort") } passwd := strfmt.Password(password) @@ -165,29 +161,16 @@ func QueryCAPIStatus(hub *cwhub.Hub, credURL string, login string, password stri client, err := apiclient.NewClient(&apiclient.Config{ MachineID: login, Password: passwd, - Scenarios: scenarios, + Scenarios: itemsForAPI, UserAgent: cwversion.UserAgent(), URL: apiURL, - //I don't believe papi is neede to check enrollement - //PapiURL: papiURL, + // I don't believe papi is neede to check enrollement + // PapiURL: papiURL, VersionPrefix: "v3", UpdateScenario: func() ([]string, error) { - l_scenarios, err := hub.GetInstalledNamesByType(cwhub.SCENARIOS) - if err != nil { - return nil, err - } - appsecRules, err := hub.GetInstalledNamesByType(cwhub.APPSEC_RULES) - if err != nil { - return nil, err - } - ret := make([]string, 0, len(l_scenarios)+len(appsecRules)) - ret = append(ret, l_scenarios...) - ret = append(ret, appsecRules...) - - return ret, nil + return itemsForAPI, nil }, }) - if err != nil { return false, false, fmt.Errorf("new client api: %w", err) } @@ -197,7 +180,7 @@ func QueryCAPIStatus(hub *cwhub.Hub, credURL string, login string, password stri t := models.WatcherAuthRequest{ MachineID: &login, Password: &pw, - Scenarios: scenarios, + Scenarios: itemsForAPI, } authResp, _, err := client.Auth.AuthenticateWatcher(context.Background(), t) @@ -211,7 +194,6 @@ func QueryCAPIStatus(hub *cwhub.Hub, credURL string, login string, password stri return true, true, nil } return true, false, nil - } func (cli *cliCapi) status() error { @@ -232,7 +214,6 @@ func (cli *cliCapi) status() error { log.Infof("Trying to authenticate with username %s on %s", cred.Login, cred.URL) auth, enrolled, err := QueryCAPIStatus(hub, cred.URL, cred.Login, cred.Password) - if err != nil { return fmt.Errorf("CAPI: failed to authenticate to Central API (CAPI): %s", err) } diff --git a/cmd/crowdsec-cli/cliconsole/console.go b/cmd/crowdsec-cli/cliconsole/console.go index 666afbba07f..158f33c9136 100644 --- a/cmd/crowdsec-cli/cliconsole/console.go +++ b/cmd/crowdsec-cli/cliconsole/console.go @@ -23,7 +23,6 @@ import ( "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/require" "github.com/crowdsecurity/crowdsec/pkg/apiclient" "github.com/crowdsecurity/crowdsec/pkg/csconfig" - "github.com/crowdsecurity/crowdsec/pkg/cwhub" "github.com/crowdsecurity/crowdsec/pkg/cwversion" "github.com/crowdsecurity/crowdsec/pkg/types" ) @@ -78,20 +77,6 @@ func (cli *cliConsole) enroll(key string, name string, overwrite bool, tags []st return fmt.Errorf("could not parse CAPI URL: %w", err) } - hub, err := require.Hub(cfg, nil, nil) - if err != nil { - return err - } - - scenarios, err := hub.GetInstalledNamesByType(cwhub.SCENARIOS) - if err != nil { - return fmt.Errorf("failed to get installed scenarios: %w", err) - } - - if len(scenarios) == 0 { - scenarios = make([]string, 0) - } - enableOpts := []string{csconfig.SEND_MANUAL_SCENARIOS, csconfig.SEND_TAINTED_SCENARIOS} if len(opts) != 0 { @@ -129,10 +114,15 @@ func (cli *cliConsole) enroll(key string, name string, overwrite bool, tags []st } } + hub, err := require.Hub(cfg, nil, nil) + if err != nil { + return err + } + c, _ := apiclient.NewClient(&apiclient.Config{ MachineID: cli.cfg().API.Server.OnlineClient.Credentials.Login, Password: password, - Scenarios: scenarios, + Scenarios: hub.GetInstalledListForAPI(), UserAgent: cwversion.UserAgent(), URL: apiURL, VersionPrefix: "v3", diff --git a/cmd/crowdsec-cli/hub.go b/cmd/crowdsec-cli/hub.go index 7e00eb64b33..e6cba08940a 100644 --- a/cmd/crowdsec-cli/hub.go +++ b/cmd/crowdsec-cli/hub.go @@ -148,16 +148,11 @@ func (cli *cliHub) upgrade(ctx context.Context, force bool) error { } for _, itemType := range cwhub.ItemTypes { - items, err := hub.GetInstalledItemsByType(itemType) - if err != nil { - return err - } - updated := 0 log.Infof("Upgrading %s", itemType) - for _, item := range items { + for _, item := range hub.GetInstalledByType(itemType, true) { didUpdate, err := item.Upgrade(ctx, force) if err != nil { return err diff --git a/cmd/crowdsec-cli/item_suggest.go b/cmd/crowdsec-cli/item_suggest.go index 0ea656549ba..7d3e1e728ae 100644 --- a/cmd/crowdsec-cli/item_suggest.go +++ b/cmd/crowdsec-cli/item_suggest.go @@ -19,7 +19,7 @@ func suggestNearestMessage(hub *cwhub.Hub, itemType string, itemName string) str score := 100 nearest := "" - for _, item := range hub.GetItemMap(itemType) { + for _, item := range hub.GetItemsByType(itemType, false) { d := levenshtein.Distance(itemName, item.Name, nil) if d < score { score = d @@ -44,7 +44,7 @@ func compAllItems(itemType string, args []string, toComplete string, cfg configG comp := make([]string, 0) - for _, item := range hub.GetItemMap(itemType) { + for _, item := range hub.GetItemsByType(itemType, false) { if !slices.Contains(args, item.Name) && strings.Contains(item.Name, toComplete) { comp = append(comp, item.Name) } @@ -61,22 +61,14 @@ func compInstalledItems(itemType string, args []string, toComplete string, cfg c return nil, cobra.ShellCompDirectiveDefault } - items, err := hub.GetInstalledNamesByType(itemType) - if err != nil { - cobra.CompDebugln(fmt.Sprintf("list installed %s err: %s", itemType, err), true) - return nil, cobra.ShellCompDirectiveDefault - } + items := hub.GetInstalledByType(itemType, true) comp := make([]string, 0) - if toComplete != "" { - for _, item := range items { - if strings.Contains(item, toComplete) { - comp = append(comp, item) - } + for _, item := range items { + if strings.Contains(item.Name, toComplete) { + comp = append(comp, item.Name) } - } else { - comp = items } cobra.CompDebugln(fmt.Sprintf("%s: %+v", itemType, comp), true) diff --git a/cmd/crowdsec-cli/itemcli.go b/cmd/crowdsec-cli/itemcli.go index 3f789a14ded..85647a5f6e8 100644 --- a/cmd/crowdsec-cli/itemcli.go +++ b/cmd/crowdsec-cli/itemcli.go @@ -147,19 +147,14 @@ func (cli cliItem) remove(args []string, purge bool, force bool, all bool) error } if all { - getter := hub.GetInstalledItemsByType + itemGetter := hub.GetInstalledByType if purge { - getter = hub.GetItemsByType - } - - items, err := getter(cli.name) - if err != nil { - return err + itemGetter = hub.GetItemsByType } removed := 0 - for _, item := range items { + for _, item := range itemGetter(cli.name, true) { didRemove, err := item.Remove(purge, force) if err != nil { return err @@ -262,14 +257,9 @@ func (cli cliItem) upgrade(ctx context.Context, args []string, force bool, all b } if all { - items, err := hub.GetInstalledItemsByType(cli.name) - if err != nil { - return err - } - updated := 0 - for _, item := range items { + for _, item := range hub.GetInstalledByType(cli.name, true) { didUpdate, err := item.Upgrade(ctx, force) if err != nil { return err diff --git a/cmd/crowdsec-cli/items.go b/cmd/crowdsec-cli/items.go index b0c03922166..5a4fee4d582 100644 --- a/cmd/crowdsec-cli/items.go +++ b/cmd/crowdsec-cli/items.go @@ -17,7 +17,12 @@ import ( // selectItems returns a slice of items of a given type, selected by name and sorted by case-insensitive name func selectItems(hub *cwhub.Hub, itemType string, args []string, installedOnly bool) ([]*cwhub.Item, error) { - itemNames := hub.GetNamesByType(itemType) + allItems := hub.GetItemsByType(itemType, true) + + itemNames := make([]string, len(allItems)) + for idx, item := range allItems { + itemNames[idx] = item.Name + } notExist := []string{} @@ -38,7 +43,7 @@ func selectItems(hub *cwhub.Hub, itemType string, args []string, installedOnly b installedOnly = false } - items := make([]*cwhub.Item, 0, len(itemNames)) + wantedItems := make([]*cwhub.Item, 0, len(itemNames)) for _, itemName := range itemNames { item := hub.GetItem(itemType, itemName) @@ -46,12 +51,10 @@ func selectItems(hub *cwhub.Hub, itemType string, args []string, installedOnly b continue } - items = append(items, item) + wantedItems = append(wantedItems, item) } - cwhub.SortItemSlice(items) - - return items, nil + return wantedItems, nil } func listItems(out io.Writer, wantColor string, itemTypes []string, items map[string][]*cwhub.Item, omitIfEmpty bool, output string) error { diff --git a/cmd/crowdsec-cli/lapi.go b/cmd/crowdsec-cli/lapi.go index df4f0a98188..6e13dd94436 100644 --- a/cmd/crowdsec-cli/lapi.go +++ b/cmd/crowdsec-cli/lapi.go @@ -45,11 +45,6 @@ func QueryLAPIStatus(hub *cwhub.Hub, credURL string, login string, password stri return fmt.Errorf("parsing api url: %w", err) } - scenarios, err := hub.GetInstalledNamesByType(cwhub.SCENARIOS) - if err != nil { - return fmt.Errorf("failed to get scenarios: %w", err) - } - client, err := apiclient.NewDefaultClient(apiURL, LAPIURLPrefix, cwversion.UserAgent(), @@ -60,10 +55,12 @@ func QueryLAPIStatus(hub *cwhub.Hub, credURL string, login string, password stri pw := strfmt.Password(password) + itemsForAPI := hub.GetInstalledListForAPI() + t := models.WatcherAuthRequest{ MachineID: &login, Password: &pw, - Scenarios: scenarios, + Scenarios: itemsForAPI, } _, _, err = client.Auth.AuthenticateWatcher(context.Background(), t) diff --git a/cmd/crowdsec/lapiclient.go b/cmd/crowdsec/lapiclient.go index 6cc0fba9515..cbafb460042 100644 --- a/cmd/crowdsec/lapiclient.go +++ b/cmd/crowdsec/lapiclient.go @@ -16,20 +16,6 @@ import ( ) func AuthenticatedLAPIClient(credentials csconfig.ApiCredentialsCfg, hub *cwhub.Hub) (*apiclient.ApiClient, error) { - scenarios, err := hub.GetInstalledNamesByType(cwhub.SCENARIOS) - if err != nil { - return nil, fmt.Errorf("loading list of installed hub scenarios: %w", err) - } - - appsecRules, err := hub.GetInstalledNamesByType(cwhub.APPSEC_RULES) - if err != nil { - return nil, fmt.Errorf("loading list of installed hub appsec rules: %w", err) - } - - installedScenariosAndAppsecRules := make([]string, 0, len(scenarios)+len(appsecRules)) - installedScenariosAndAppsecRules = append(installedScenariosAndAppsecRules, scenarios...) - installedScenariosAndAppsecRules = append(installedScenariosAndAppsecRules, appsecRules...) - apiURL, err := url.Parse(credentials.URL) if err != nil { return nil, fmt.Errorf("parsing api url ('%s'): %w", credentials.URL, err) @@ -42,28 +28,18 @@ func AuthenticatedLAPIClient(credentials csconfig.ApiCredentialsCfg, hub *cwhub. password := strfmt.Password(credentials.Password) + itemsForAPI := hub.GetInstalledListForAPI() + client, err := apiclient.NewClient(&apiclient.Config{ MachineID: credentials.Login, Password: password, - Scenarios: installedScenariosAndAppsecRules, + Scenarios: itemsForAPI, UserAgent: cwversion.UserAgent(), URL: apiURL, PapiURL: papiURL, VersionPrefix: "v1", UpdateScenario: func() ([]string, error) { - scenarios, err := hub.GetInstalledNamesByType(cwhub.SCENARIOS) - if err != nil { - return nil, err - } - appsecRules, err := hub.GetInstalledNamesByType(cwhub.APPSEC_RULES) - if err != nil { - return nil, err - } - ret := make([]string, 0, len(scenarios)+len(appsecRules)) - ret = append(ret, scenarios...) - ret = append(ret, appsecRules...) - - return ret, nil + return itemsForAPI, nil }, }) if err != nil { @@ -73,7 +49,7 @@ func AuthenticatedLAPIClient(credentials csconfig.ApiCredentialsCfg, hub *cwhub. authResp, _, err := client.Auth.AuthenticateWatcher(context.Background(), models.WatcherAuthRequest{ MachineID: &credentials.Login, Password: &password, - Scenarios: installedScenariosAndAppsecRules, + Scenarios: itemsForAPI, }) if err != nil { return nil, fmt.Errorf("authenticate watcher (%s): %w", credentials.Login, err) diff --git a/cmd/crowdsec/lpmetrics.go b/cmd/crowdsec/lpmetrics.go index 0fd27054071..24842851294 100644 --- a/cmd/crowdsec/lpmetrics.go +++ b/cmd/crowdsec/lpmetrics.go @@ -7,7 +7,6 @@ import ( "time" "github.com/sirupsen/logrus" - "gopkg.in/tomb.v2" "github.com/crowdsecurity/go-cs-lib/ptr" @@ -46,10 +45,8 @@ func getHubState(hub *cwhub.Hub) models.HubItems { for _, itemType := range cwhub.ItemTypes { ret[itemType] = []models.HubItem{} - items, _ := hub.GetInstalledItemsByType(itemType) - cwhub.SortItemSlice(items) - for _, item := range items { + for _, item := range hub.GetInstalledByType(itemType, true) { status := "official" if item.State.IsLocal() { status = "custom" @@ -90,7 +87,8 @@ func newStaticMetrics(consoleOptions []string, datasources []acquisition.DataSou } func NewMetricsProvider(apic *apiclient.ApiClient, interval time.Duration, logger *logrus.Entry, - consoleOptions []string, datasources []acquisition.DataSource, hub *cwhub.Hub) *MetricsProvider { + consoleOptions []string, datasources []acquisition.DataSource, hub *cwhub.Hub, +) *MetricsProvider { return &MetricsProvider{ apic: apic, interval: interval, diff --git a/cmd/crowdsec/main.go b/cmd/crowdsec/main.go index 18416e044e7..6d8ca24c335 100644 --- a/cmd/crowdsec/main.go +++ b/cmd/crowdsec/main.go @@ -91,10 +91,8 @@ func LoadBuckets(cConfig *csconfig.Config, hub *cwhub.Hub) error { files []string ) - for _, hubScenarioItem := range hub.GetItemMap(cwhub.SCENARIOS) { - if hubScenarioItem.State.Installed { - files = append(files, hubScenarioItem.State.LocalPath) - } + for _, hubScenarioItem := range hub.GetInstalledByType(cwhub.SCENARIOS, false) { + files = append(files, hubScenarioItem.State.LocalPath) } buckets = leakybucket.NewBuckets() diff --git a/pkg/alertcontext/config.go b/pkg/alertcontext/config.go index 21d16db3972..da05c937b18 100644 --- a/pkg/alertcontext/config.go +++ b/pkg/alertcontext/config.go @@ -98,20 +98,14 @@ func addContextFromFile(toSend map[string][]string, filePath string) error { return nil } - // LoadConsoleContext loads the context from the hub (if provided) and the file console_context_path. func LoadConsoleContext(c *csconfig.Config, hub *cwhub.Hub) error { c.Crowdsec.ContextToSend = make(map[string][]string, 0) if hub != nil { - items, err := hub.GetInstalledItemsByType(cwhub.CONTEXTS) - if err != nil { - return err - } - - for _, item := range items { + for _, item := range hub.GetInstalledByType(cwhub.CONTEXTS, true) { // context in item files goes under the key 'context' - if err = addContextFromItem(c.Crowdsec.ContextToSend, item); err != nil { + if err := addContextFromItem(c.Crowdsec.ContextToSend, item); err != nil { return err } } diff --git a/pkg/appsec/appsec.go b/pkg/appsec/appsec.go index 96f977b4738..30784b23db0 100644 --- a/pkg/appsec/appsec.go +++ b/pkg/appsec/appsec.go @@ -40,7 +40,6 @@ const ( ) func (h *Hook) Build(hookStage int) error { - ctx := map[string]interface{}{} switch hookStage { case hookOnLoad: @@ -54,7 +53,7 @@ func (h *Hook) Build(hookStage int) error { } opts := exprhelpers.GetExprOptions(ctx) if h.Filter != "" { - program, err := expr.Compile(h.Filter, opts...) //FIXME: opts + program, err := expr.Compile(h.Filter, opts...) // FIXME: opts if err != nil { return fmt.Errorf("unable to compile filter %s : %w", h.Filter, err) } @@ -73,11 +72,11 @@ func (h *Hook) Build(hookStage int) error { type AppsecTempResponse struct { InBandInterrupt bool OutOfBandInterrupt bool - Action string //allow, deny, captcha, log - UserHTTPResponseCode int //The response code to send to the user - BouncerHTTPResponseCode int //The response code to send to the remediation component - SendEvent bool //do we send an internal event on rule match - SendAlert bool //do we send an alert on rule match + Action string // allow, deny, captcha, log + UserHTTPResponseCode int // The response code to send to the user + BouncerHTTPResponseCode int // The response code to send to the remediation component + SendEvent bool // do we send an internal event on rule match + SendAlert bool // do we send an alert on rule match } type AppsecSubEngineOpts struct { @@ -93,7 +92,7 @@ type AppsecRuntimeConfig struct { InBandRules []AppsecCollection DefaultRemediation string - RemediationByTag map[string]string //Also used for ByName, as the name (for modsec rules) is a tag crowdsec-NAME + RemediationByTag map[string]string // Also used for ByName, as the name (for modsec rules) is a tag crowdsec-NAME RemediationById map[int]string CompiledOnLoad []Hook CompiledPreEval []Hook @@ -101,22 +100,22 @@ type AppsecRuntimeConfig struct { CompiledOnMatch []Hook CompiledVariablesTracking []*regexp.Regexp Config *AppsecConfig - //CorazaLogger debuglog.Logger + // CorazaLogger debuglog.Logger - //those are ephemeral, created/destroyed with every req - OutOfBandTx ExtendedTransaction //is it a good idea ? - InBandTx ExtendedTransaction //is it a good idea ? + // those are ephemeral, created/destroyed with every req + OutOfBandTx ExtendedTransaction // is it a good idea ? + InBandTx ExtendedTransaction // is it a good idea ? Response AppsecTempResponse - //should we store matched rules here ? + // should we store matched rules here ? Logger *log.Entry - //Set by on_load to ignore some rules on loading + // Set by on_load to ignore some rules on loading DisabledInBandRuleIds []int - DisabledInBandRulesTags []string //Also used for ByName, as the name (for modsec rules) is a tag crowdsec-NAME + DisabledInBandRulesTags []string // Also used for ByName, as the name (for modsec rules) is a tag crowdsec-NAME DisabledOutOfBandRuleIds []int - DisabledOutOfBandRulesTags []string //Also used for ByName, as the name (for modsec rules) is a tag crowdsec-NAME + DisabledOutOfBandRulesTags []string // Also used for ByName, as the name (for modsec rules) is a tag crowdsec-NAME } type AppsecConfig struct { @@ -125,10 +124,10 @@ type AppsecConfig struct { InBandRules []string `yaml:"inband_rules"` DefaultRemediation string `yaml:"default_remediation"` DefaultPassAction string `yaml:"default_pass_action"` - BouncerBlockedHTTPCode int `yaml:"blocked_http_code"` //returned to the bouncer - BouncerPassedHTTPCode int `yaml:"passed_http_code"` //returned to the bouncer - UserBlockedHTTPCode int `yaml:"user_blocked_http_code"` //returned to the user - UserPassedHTTPCode int `yaml:"user_passed_http_code"` //returned to the user + BouncerBlockedHTTPCode int `yaml:"blocked_http_code"` // returned to the bouncer + BouncerPassedHTTPCode int `yaml:"passed_http_code"` // returned to the bouncer + UserBlockedHTTPCode int `yaml:"user_blocked_http_code"` // returned to the user + UserPassedHTTPCode int `yaml:"user_passed_http_code"` // returned to the user OnLoad []Hook `yaml:"on_load"` PreEval []Hook `yaml:"pre_eval"` @@ -152,7 +151,6 @@ func (w *AppsecRuntimeConfig) ClearResponse() { } func (wc *AppsecConfig) LoadByPath(file string) error { - wc.Logger.Debugf("loading config %s", file) yamlFile, err := os.ReadFile(file) @@ -177,19 +175,13 @@ func (wc *AppsecConfig) LoadByPath(file string) error { } func (wc *AppsecConfig) Load(configName string) error { - appsecConfigs := hub.GetItemMap(cwhub.APPSEC_CONFIGS) + item := hub.GetItem(cwhub.APPSEC_CONFIGS, configName) - for _, hubAppsecConfigItem := range appsecConfigs { - if !hubAppsecConfigItem.State.Installed { - continue - } - if hubAppsecConfigItem.Name != configName { - continue - } - wc.Logger.Infof("loading %s", hubAppsecConfigItem.State.LocalPath) - err := wc.LoadByPath(hubAppsecConfigItem.State.LocalPath) + if item != nil && item.State.Installed { + wc.Logger.Infof("loading %s", item.State.LocalPath) + err := wc.LoadByPath(item.State.LocalPath) if err != nil { - return fmt.Errorf("unable to load appsec-config %s : %s", hubAppsecConfigItem.State.LocalPath, err) + return fmt.Errorf("unable to load appsec-config %s : %s", item.State.LocalPath, err) } return nil } @@ -224,10 +216,10 @@ func (wc *AppsecConfig) Build() (*AppsecRuntimeConfig, error) { wc.DefaultRemediation = BanRemediation } - //set the defaults + // set the defaults switch wc.DefaultRemediation { case BanRemediation, CaptchaRemediation, AllowRemediation: - //those are the officially supported remediation(s) + // those are the officially supported remediation(s) default: wc.Logger.Warningf("default '%s' remediation of %s is none of [%s,%s,%s] ensure bouncer compatbility!", wc.DefaultRemediation, wc.Name, BanRemediation, CaptchaRemediation, AllowRemediation) } @@ -237,7 +229,7 @@ func (wc *AppsecConfig) Build() (*AppsecRuntimeConfig, error) { ret.DefaultRemediation = wc.DefaultRemediation wc.Logger.Tracef("Loading config %+v", wc) - //load rules + // load rules for _, rule := range wc.OutOfBandRules { wc.Logger.Infof("loading outofband rule %s", rule) collections, err := LoadCollection(rule, wc.Logger.WithField("component", "appsec_collection_loader")) @@ -259,7 +251,7 @@ func (wc *AppsecConfig) Build() (*AppsecRuntimeConfig, error) { wc.Logger.Infof("Loaded %d inband rules", len(ret.InBandRules)) - //load hooks + // load hooks for _, hook := range wc.OnLoad { if hook.OnSuccess != "" && hook.OnSuccess != "continue" && hook.OnSuccess != "break" { return nil, fmt.Errorf("invalid 'on_success' for on_load hook : %s", hook.OnSuccess) @@ -304,7 +296,7 @@ func (wc *AppsecConfig) Build() (*AppsecRuntimeConfig, error) { ret.CompiledOnMatch = append(ret.CompiledOnMatch, hook) } - //variable tracking + // variable tracking for _, variable := range wc.VariablesTracking { compiledVariableRule, err := regexp.Compile(variable) if err != nil { @@ -460,7 +452,6 @@ func (w *AppsecRuntimeConfig) ProcessPostEvalRules(request *ParsedRequest) error // here means there is no filter or the filter matched for _, applyExpr := range rule.ApplyExpr { o, err := exprhelpers.Run(applyExpr, GetPostEvalEnv(w, request), w.Logger, w.Logger.Level >= log.DebugLevel) - if err != nil { w.Logger.Errorf("unable to apply appsec post_eval expr: %s", err) continue @@ -604,7 +595,7 @@ func (w *AppsecRuntimeConfig) SetActionByName(name string, action string) error } func (w *AppsecRuntimeConfig) SetAction(action string) error { - //log.Infof("setting to %s", action) + // log.Infof("setting to %s", action) w.Logger.Debugf("setting action to %s", action) w.Response.Action = action return nil @@ -628,7 +619,7 @@ func (w *AppsecRuntimeConfig) GenerateResponse(response AppsecTempResponse, logg if response.Action == AllowRemediation { resp.HTTPStatus = w.Config.UserPassedHTTPCode bouncerStatusCode = w.Config.BouncerPassedHTTPCode - } else { //ban, captcha and anything else + } else { // ban, captcha and anything else resp.HTTPStatus = response.UserHTTPResponseCode if resp.HTTPStatus == 0 { resp.HTTPStatus = w.Config.UserBlockedHTTPCode diff --git a/pkg/appsec/loader.go b/pkg/appsec/loader.go index 56ec23e3671..9a3bfb6b668 100644 --- a/pkg/appsec/loader.go +++ b/pkg/appsec/loader.go @@ -9,19 +9,15 @@ import ( "github.com/crowdsecurity/crowdsec/pkg/cwhub" ) -var appsecRules = make(map[string]AppsecCollectionConfig) //FIXME: would probably be better to have a struct for this +var appsecRules = make(map[string]AppsecCollectionConfig) // FIXME: would probably be better to have a struct for this -var hub *cwhub.Hub //FIXME: this is a temporary hack to make the hub available in the package +var hub *cwhub.Hub // FIXME: this is a temporary hack to make the hub available in the package func LoadAppsecRules(hubInstance *cwhub.Hub) error { hub = hubInstance appsecRules = make(map[string]AppsecCollectionConfig) - for _, hubAppsecRuleItem := range hub.GetItemMap(cwhub.APPSEC_RULES) { - if !hubAppsecRuleItem.State.Installed { - continue - } - + for _, hubAppsecRuleItem := range hub.GetInstalledByType(cwhub.APPSEC_RULES, false) { content, err := os.ReadFile(hubAppsecRuleItem.State.LocalPath) if err != nil { log.Warnf("unable to read file %s : %s", hubAppsecRuleItem.State.LocalPath, err) diff --git a/pkg/cwhub/cwhub.go b/pkg/cwhub/cwhub.go index 0a9cc443ce0..d8607e7e562 100644 --- a/pkg/cwhub/cwhub.go +++ b/pkg/cwhub/cwhub.go @@ -4,7 +4,6 @@ import ( "fmt" "net/http" "path/filepath" - "sort" "strings" "time" @@ -45,10 +44,3 @@ func safePath(dir, filePath string) (string, error) { return absFilePath, nil } - -// SortItemSlice sorts a slice of items by name, case insensitive. -func SortItemSlice(items []*Item) { - sort.Slice(items, func(i, j int) bool { - return strings.ToLower(items[i].Name) < strings.ToLower(items[j].Name) - }) -} diff --git a/pkg/cwhub/doc.go b/pkg/cwhub/doc.go index 89d8de0fa8b..f86b95c6454 100644 --- a/pkg/cwhub/doc.go +++ b/pkg/cwhub/doc.go @@ -74,7 +74,7 @@ // Now you can use the hub object to access the existing items: // // // list all the parsers -// for _, parser := range hub.GetItemMap(cwhub.PARSERS) { +// for _, parser := range hub.GetItemsByType(cwhub.PARSERS, false) { // fmt.Printf("parser: %s\n", parser.Name) // } // diff --git a/pkg/cwhub/hub.go b/pkg/cwhub/hub.go index 20a628a493f..a4e81e2c3e2 100644 --- a/pkg/cwhub/hub.go +++ b/pkg/cwhub/hub.go @@ -8,11 +8,12 @@ import ( "io" "os" "path" - "slices" "strings" "github.com/sirupsen/logrus" + "github.com/crowdsecurity/go-cs-lib/maptools" + "github.com/crowdsecurity/crowdsec/pkg/csconfig" ) @@ -117,13 +118,14 @@ func (h *Hub) ItemStats() []string { tainted := 0 for _, itemType := range ItemTypes { - if len(h.GetItemMap(itemType)) == 0 { + items := h.GetItemsByType(itemType, false) + if len(items) == 0 { continue } - loaded += fmt.Sprintf("%d %s, ", len(h.GetItemMap(itemType)), itemType) + loaded += fmt.Sprintf("%d %s, ", len(items), itemType) - for _, item := range h.GetItemMap(itemType) { + for _, item := range items { if item.State.IsLocal() { local++ } @@ -218,73 +220,62 @@ func (h *Hub) GetItemFQ(itemFQName string) (*Item, error) { return i, nil } -// GetNamesByType returns a slice of (full) item names for a given type -// (eg. for collections: crowdsecurity/apache2 crowdsecurity/nginx). -func (h *Hub) GetNamesByType(itemType string) []string { - m := h.GetItemMap(itemType) - if m == nil { - return nil - } +// GetItemsByType returns a slice of all the items of a given type, installed or not, optionally sorted by case-insensitive name. +// A non-existent type will silently return an empty slice. +func (h *Hub) GetItemsByType(itemType string, sorted bool) []*Item { + items := h.items[itemType] - names := make([]string, 0, len(m)) - for k := range m { - names = append(names, k) - } + ret := make([]*Item, len(items)) - return names -} + if sorted { + for idx, name := range maptools.SortedKeysNoCase(items) { + ret[idx] = items[name] + } -// GetItemsByType returns a slice of all the items of a given type, installed or not. -func (h *Hub) GetItemsByType(itemType string) ([]*Item, error) { - if !slices.Contains(ItemTypes, itemType) { - return nil, fmt.Errorf("invalid item type %s", itemType) + return ret } - items := h.items[itemType] - - ret := make([]*Item, len(items)) - idx := 0 - for _, item := range items { ret[idx] = item - idx++ + idx += 1 } - return ret, nil + return ret } -// GetInstalledItemsByType returns a slice of the installed items of a given type. -func (h *Hub) GetInstalledItemsByType(itemType string) ([]*Item, error) { - if !slices.Contains(ItemTypes, itemType) { - return nil, fmt.Errorf("invalid item type %s", itemType) - } - - items := h.items[itemType] +// GetInstalledByType returns a slice of all the installed items of a given type, optionally sorted by case-insensitive name. +// A non-existent type will silently return an empty slice. +func (h *Hub) GetInstalledByType(itemType string, sorted bool) []*Item { + ret := make([]*Item, 0) - retItems := make([]*Item, 0) - - for _, item := range items { + for _, item := range h.GetItemsByType(itemType, sorted) { if item.State.Installed { - retItems = append(retItems, item) + ret = append(ret, item) } } - return retItems, nil + return ret } -// GetInstalledNamesByType returns the names of the installed items of a given type. -func (h *Hub) GetInstalledNamesByType(itemType string) ([]string, error) { - items, err := h.GetInstalledItemsByType(itemType) - if err != nil { - return nil, err - } +// GetInstalledListForAPI returns a slice of names of all the installed scenarios and appsec-rules. +// The returned list is sorted by type (scenarios first) and case-insensitive name. +func (h *Hub) GetInstalledListForAPI() []string { + scenarios := h.GetInstalledByType(SCENARIOS, true) + appsecRules := h.GetInstalledByType(APPSEC_RULES, true) + + ret := make([]string, len(scenarios)+len(appsecRules)) - retStr := make([]string, len(items)) + idx := 0 + for _, item := range scenarios { + ret[idx] = item.Name + idx += 1 + } - for idx, it := range items { - retStr[idx] = it.Name + for _, item := range appsecRules { + ret[idx] = item.Name + idx += 1 } - return retStr, nil + return ret } diff --git a/pkg/hubtest/hubtest_item.go b/pkg/hubtest/hubtest_item.go index da4969ee8dd..42792413b5d 100644 --- a/pkg/hubtest/hubtest_item.go +++ b/pkg/hubtest/hubtest_item.go @@ -223,39 +223,30 @@ func (t *HubTestItem) InstallHub() error { ctx := context.Background() // install data for parsers if needed - ret := hub.GetItemMap(cwhub.PARSERS) - for parserName, item := range ret { - if item.State.Installed { - if err := item.DownloadDataIfNeeded(ctx, true); err != nil { - return fmt.Errorf("unable to download data for parser '%s': %+v", parserName, err) - } - - log.Debugf("parser '%s' installed successfully in runtime environment", parserName) + for _, item := range hub.GetInstalledByType(cwhub.PARSERS, true) { + if err := item.DownloadDataIfNeeded(ctx, true); err != nil { + return fmt.Errorf("unable to download data for parser '%s': %+v", item.Name, err) } + + log.Debugf("parser '%s' installed successfully in runtime environment", item.Name) } // install data for scenarios if needed - ret = hub.GetItemMap(cwhub.SCENARIOS) - for scenarioName, item := range ret { - if item.State.Installed { - if err := item.DownloadDataIfNeeded(ctx, true); err != nil { - return fmt.Errorf("unable to download data for parser '%s': %+v", scenarioName, err) - } - - log.Debugf("scenario '%s' installed successfully in runtime environment", scenarioName) + for _, item := range hub.GetInstalledByType(cwhub.SCENARIOS, true) { + if err := item.DownloadDataIfNeeded(ctx, true); err != nil { + return fmt.Errorf("unable to download data for parser '%s': %+v", item.Name, err) } + + log.Debugf("scenario '%s' installed successfully in runtime environment", item.Name) } // install data for postoverflows if needed - ret = hub.GetItemMap(cwhub.POSTOVERFLOWS) - for postoverflowName, item := range ret { - if item.State.Installed { - if err := item.DownloadDataIfNeeded(ctx, true); err != nil { - return fmt.Errorf("unable to download data for parser '%s': %+v", postoverflowName, err) - } - - log.Debugf("postoverflow '%s' installed successfully in runtime environment", postoverflowName) + for _, item := range hub.GetInstalledByType(cwhub.POSTOVERFLOWS, true) { + if err := item.DownloadDataIfNeeded(ctx, true); err != nil { + return fmt.Errorf("unable to download data for parser '%s': %+v", item.Name, err) } + + log.Debugf("postoverflow '%s' installed successfully in runtime environment", item.Name) } return nil diff --git a/pkg/parser/unix_parser.go b/pkg/parser/unix_parser.go index 280d122ecc1..351de8ade56 100644 --- a/pkg/parser/unix_parser.go +++ b/pkg/parser/unix_parser.go @@ -66,21 +66,20 @@ func NewParsers(hub *cwhub.Hub) *Parsers { } for _, itemType := range []string{cwhub.PARSERS, cwhub.POSTOVERFLOWS} { - for _, hubParserItem := range hub.GetItemMap(itemType) { - if hubParserItem.State.Installed { - stagefile := Stagefile{ - Filename: hubParserItem.State.LocalPath, - Stage: hubParserItem.Stage, - } - if itemType == cwhub.PARSERS { - parsers.StageFiles = append(parsers.StageFiles, stagefile) - } - if itemType == cwhub.POSTOVERFLOWS { - parsers.PovfwStageFiles = append(parsers.PovfwStageFiles, stagefile) - } + for _, hubParserItem := range hub.GetInstalledByType(itemType, false) { + stagefile := Stagefile{ + Filename: hubParserItem.State.LocalPath, + Stage: hubParserItem.Stage, + } + if itemType == cwhub.PARSERS { + parsers.StageFiles = append(parsers.StageFiles, stagefile) + } + if itemType == cwhub.POSTOVERFLOWS { + parsers.PovfwStageFiles = append(parsers.PovfwStageFiles, stagefile) } } } + if parsers.StageFiles != nil { sort.Slice(parsers.StageFiles, func(i, j int) bool { return parsers.StageFiles[i].Filename < parsers.StageFiles[j].Filename @@ -101,13 +100,17 @@ func LoadParsers(cConfig *csconfig.Config, parsers *Parsers) (*Parsers, error) { patternsDir := cConfig.ConfigPaths.PatternDir log.Infof("Loading grok library %s", patternsDir) /* load base regexps for two grok parsers */ - parsers.Ctx, err = Init(map[string]interface{}{"patterns": patternsDir, - "data": cConfig.ConfigPaths.DataDir}) + parsers.Ctx, err = Init(map[string]interface{}{ + "patterns": patternsDir, + "data": cConfig.ConfigPaths.DataDir, + }) if err != nil { return parsers, fmt.Errorf("failed to load parser patterns : %v", err) } - parsers.Povfwctx, err = Init(map[string]interface{}{"patterns": patternsDir, - "data": cConfig.ConfigPaths.DataDir}) + parsers.Povfwctx, err = Init(map[string]interface{}{ + "patterns": patternsDir, + "data": cConfig.ConfigPaths.DataDir, + }) if err != nil { return parsers, fmt.Errorf("failed to load postovflw parser patterns : %v", err) } diff --git a/test/bats/04_capi.bats b/test/bats/04_capi.bats index d5154c1a0d7..830d0668cbb 100644 --- a/test/bats/04_capi.bats +++ b/test/bats/04_capi.bats @@ -51,7 +51,7 @@ setup() { config_enable_capi rune -0 cscli capi register --schmilblick githubciXXXXXXXXXXXXXXXXXXXXXXXX rune -1 cscli capi status - assert_stderr --partial "no scenarios installed, abort" + assert_stderr --partial "no scenarios or appsec-rules installed, abort" rune -0 cscli scenarios install crowdsecurity/ssh-bf rune -0 cscli capi status From da495e812c90a266f03dad3255ee09742cf86b41 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Fri, 23 Aug 2024 17:05:55 +0200 Subject: [PATCH 258/581] pkg/cwhub: cache control / send etag header from file contents, check with HEAD req (#3187) --- go.mod | 2 +- go.sum | 4 ++-- pkg/cwhub/itemupgrade.go | 1 + pkg/cwhub/remote.go | 1 + 4 files changed, 5 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index af9d7550b94..9377e116519 100644 --- a/go.mod +++ b/go.mod @@ -25,7 +25,7 @@ require ( github.com/corazawaf/libinjection-go v0.1.2 github.com/crowdsecurity/coraza/v3 v3.0.0-20240108124027-a62b8d8e5607 github.com/crowdsecurity/dlog v0.0.0-20170105205344-4fb5f8204f26 - github.com/crowdsecurity/go-cs-lib v0.0.13 + github.com/crowdsecurity/go-cs-lib v0.0.15 github.com/crowdsecurity/grokky v0.2.1 github.com/crowdsecurity/machineid v1.0.2 github.com/davecgh/go-spew v1.1.1 diff --git a/go.sum b/go.sum index 282f10d6367..b76c7fccd1a 100644 --- a/go.sum +++ b/go.sum @@ -103,8 +103,8 @@ github.com/crowdsecurity/coraza/v3 v3.0.0-20240108124027-a62b8d8e5607 h1:hyrYw3h github.com/crowdsecurity/coraza/v3 v3.0.0-20240108124027-a62b8d8e5607/go.mod h1:br36fEqurGYZQGit+iDYsIzW0FF6VufMbDzyyLxEuPA= github.com/crowdsecurity/dlog v0.0.0-20170105205344-4fb5f8204f26 h1:r97WNVC30Uen+7WnLs4xDScS/Ex988+id2k6mDf8psU= github.com/crowdsecurity/dlog v0.0.0-20170105205344-4fb5f8204f26/go.mod h1:zpv7r+7KXwgVUZnUNjyP22zc/D7LKjyoY02weH2RBbk= -github.com/crowdsecurity/go-cs-lib v0.0.13 h1:asmtjIEPOibUK8eaYQCIR7XIBU/EX5vyAp1EbKFQJtY= -github.com/crowdsecurity/go-cs-lib v0.0.13/go.mod h1:ePyQyJBxp1W/1bq4YpVAilnLSz7HkzmtI7TRhX187EU= +github.com/crowdsecurity/go-cs-lib v0.0.15 h1:zNWqOPVLHgKUstlr6clom9d66S0eIIW66jQG3Y7FEvo= +github.com/crowdsecurity/go-cs-lib v0.0.15/go.mod h1:ePyQyJBxp1W/1bq4YpVAilnLSz7HkzmtI7TRhX187EU= github.com/crowdsecurity/grokky v0.2.1 h1:t4VYnDlAd0RjDM2SlILalbwfCrQxtJSMGdQOR0zwkE4= github.com/crowdsecurity/grokky v0.2.1/go.mod h1:33usDIYzGDsgX1kHAThCbseso6JuWNJXOzRQDGXHtWM= github.com/crowdsecurity/machineid v1.0.2 h1:wpkpsUghJF8Khtmn/tg6GxgdhLA1Xflerh5lirI+bdc= diff --git a/pkg/cwhub/itemupgrade.go b/pkg/cwhub/itemupgrade.go index 441948c9168..105e5ebec31 100644 --- a/pkg/cwhub/itemupgrade.go +++ b/pkg/cwhub/itemupgrade.go @@ -162,6 +162,7 @@ func (i *Item) FetchContentTo(ctx context.Context, destPath string) (bool, strin New(). WithHTTPClient(hubClient). ToFile(destPath). + WithETagFn(downloader.SHA256). WithMakeDirs(true). WithLogger(logrus.WithField("url", url)). CompareContent(). diff --git a/pkg/cwhub/remote.go b/pkg/cwhub/remote.go index 0678a7488f8..04e4fab972f 100644 --- a/pkg/cwhub/remote.go +++ b/pkg/cwhub/remote.go @@ -45,6 +45,7 @@ func (r *RemoteHubCfg) fetchIndex(ctx context.Context, destPath string) (bool, e New(). WithHTTPClient(hubClient). ToFile(destPath). + WithETagFn(downloader.SHA256). CompareContent(). WithLogger(logrus.WithField("url", url)). Download(ctx, url) From b754c39b6d96c5b6fd48ae682b91553b4a7bdb2f Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Fri, 23 Aug 2024 17:13:42 +0200 Subject: [PATCH 259/581] pkg/cwhub: cache control / send etag header from file contents, check with HEAD req (#3189) --- go.mod | 2 +- go.sum | 4 ++-- pkg/cwhub/itemupgrade.go | 1 + pkg/cwhub/remote.go | 1 + 4 files changed, 5 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index af9d7550b94..9377e116519 100644 --- a/go.mod +++ b/go.mod @@ -25,7 +25,7 @@ require ( github.com/corazawaf/libinjection-go v0.1.2 github.com/crowdsecurity/coraza/v3 v3.0.0-20240108124027-a62b8d8e5607 github.com/crowdsecurity/dlog v0.0.0-20170105205344-4fb5f8204f26 - github.com/crowdsecurity/go-cs-lib v0.0.13 + github.com/crowdsecurity/go-cs-lib v0.0.15 github.com/crowdsecurity/grokky v0.2.1 github.com/crowdsecurity/machineid v1.0.2 github.com/davecgh/go-spew v1.1.1 diff --git a/go.sum b/go.sum index 282f10d6367..b76c7fccd1a 100644 --- a/go.sum +++ b/go.sum @@ -103,8 +103,8 @@ github.com/crowdsecurity/coraza/v3 v3.0.0-20240108124027-a62b8d8e5607 h1:hyrYw3h github.com/crowdsecurity/coraza/v3 v3.0.0-20240108124027-a62b8d8e5607/go.mod h1:br36fEqurGYZQGit+iDYsIzW0FF6VufMbDzyyLxEuPA= github.com/crowdsecurity/dlog v0.0.0-20170105205344-4fb5f8204f26 h1:r97WNVC30Uen+7WnLs4xDScS/Ex988+id2k6mDf8psU= github.com/crowdsecurity/dlog v0.0.0-20170105205344-4fb5f8204f26/go.mod h1:zpv7r+7KXwgVUZnUNjyP22zc/D7LKjyoY02weH2RBbk= -github.com/crowdsecurity/go-cs-lib v0.0.13 h1:asmtjIEPOibUK8eaYQCIR7XIBU/EX5vyAp1EbKFQJtY= -github.com/crowdsecurity/go-cs-lib v0.0.13/go.mod h1:ePyQyJBxp1W/1bq4YpVAilnLSz7HkzmtI7TRhX187EU= +github.com/crowdsecurity/go-cs-lib v0.0.15 h1:zNWqOPVLHgKUstlr6clom9d66S0eIIW66jQG3Y7FEvo= +github.com/crowdsecurity/go-cs-lib v0.0.15/go.mod h1:ePyQyJBxp1W/1bq4YpVAilnLSz7HkzmtI7TRhX187EU= github.com/crowdsecurity/grokky v0.2.1 h1:t4VYnDlAd0RjDM2SlILalbwfCrQxtJSMGdQOR0zwkE4= github.com/crowdsecurity/grokky v0.2.1/go.mod h1:33usDIYzGDsgX1kHAThCbseso6JuWNJXOzRQDGXHtWM= github.com/crowdsecurity/machineid v1.0.2 h1:wpkpsUghJF8Khtmn/tg6GxgdhLA1Xflerh5lirI+bdc= diff --git a/pkg/cwhub/itemupgrade.go b/pkg/cwhub/itemupgrade.go index 441948c9168..105e5ebec31 100644 --- a/pkg/cwhub/itemupgrade.go +++ b/pkg/cwhub/itemupgrade.go @@ -162,6 +162,7 @@ func (i *Item) FetchContentTo(ctx context.Context, destPath string) (bool, strin New(). WithHTTPClient(hubClient). ToFile(destPath). + WithETagFn(downloader.SHA256). WithMakeDirs(true). WithLogger(logrus.WithField("url", url)). CompareContent(). diff --git a/pkg/cwhub/remote.go b/pkg/cwhub/remote.go index 0678a7488f8..04e4fab972f 100644 --- a/pkg/cwhub/remote.go +++ b/pkg/cwhub/remote.go @@ -45,6 +45,7 @@ func (r *RemoteHubCfg) fetchIndex(ctx context.Context, destPath string) (bool, e New(). WithHTTPClient(hubClient). ToFile(destPath). + WithETagFn(downloader.SHA256). CompareContent(). WithLogger(logrus.WithField("url", url)). Download(ctx, url) From 9c0422f75b0b67209cc47680163cccac598fe832 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Mon, 26 Aug 2024 09:39:36 +0200 Subject: [PATCH 260/581] cscli refact: package 'clicapi', 'clilapi' (#3185) * extract functions to own files * package clilapi * package clicapi * package crowdsec-cli/reload --- .golangci.yml | 2 +- cmd/crowdsec-cli/{ => clicapi}/capi.go | 14 ++-- cmd/crowdsec-cli/cliconsole/console.go | 11 ++-- cmd/crowdsec-cli/{ => clilapi}/lapi.go | 16 +++-- cmd/crowdsec-cli/{ => clilapi}/lapi_test.go | 2 +- cmd/crowdsec-cli/clilapi/utils.go | 24 +++++++ cmd/crowdsec-cli/dashboard.go | 3 +- cmd/crowdsec-cli/idgen/machineid.go | 48 ++++++++++++++ cmd/crowdsec-cli/idgen/password.go | 32 ++++++++++ cmd/crowdsec-cli/itemcli.go | 11 ++-- cmd/crowdsec-cli/machines.go | 71 +-------------------- cmd/crowdsec-cli/main.go | 8 ++- cmd/crowdsec-cli/reload.go | 6 -- cmd/crowdsec-cli/reload/reload.go | 6 ++ cmd/crowdsec-cli/reload/reload_freebsd.go | 4 ++ cmd/crowdsec-cli/reload/reload_linux.go | 4 ++ cmd/crowdsec-cli/reload/reload_windows.go | 3 + cmd/crowdsec-cli/reload_freebsd.go | 4 -- cmd/crowdsec-cli/reload_linux.go | 4 -- cmd/crowdsec-cli/reload_windows.go | 3 - cmd/crowdsec-cli/simulation.go | 3 +- cmd/crowdsec-cli/support.go | 6 +- cmd/crowdsec-cli/utils.go | 23 ------- 23 files changed, 169 insertions(+), 139 deletions(-) rename cmd/crowdsec-cli/{ => clicapi}/capi.go (93%) rename cmd/crowdsec-cli/{ => clilapi}/lapi.go (97%) rename cmd/crowdsec-cli/{ => clilapi}/lapi_test.go (98%) create mode 100644 cmd/crowdsec-cli/clilapi/utils.go create mode 100644 cmd/crowdsec-cli/idgen/machineid.go create mode 100644 cmd/crowdsec-cli/idgen/password.go delete mode 100644 cmd/crowdsec-cli/reload.go create mode 100644 cmd/crowdsec-cli/reload/reload.go create mode 100644 cmd/crowdsec-cli/reload/reload_freebsd.go create mode 100644 cmd/crowdsec-cli/reload/reload_linux.go create mode 100644 cmd/crowdsec-cli/reload/reload_windows.go delete mode 100644 cmd/crowdsec-cli/reload_freebsd.go delete mode 100644 cmd/crowdsec-cli/reload_linux.go delete mode 100644 cmd/crowdsec-cli/reload_windows.go diff --git a/.golangci.yml b/.golangci.yml index 2b216259770..86771f17f60 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -487,7 +487,7 @@ issues: - linters: - revive - path: "cmd/crowdsec-cli/machines.go" + path: "cmd/crowdsec-cli/idgen/password.go" text: "deep-exit: .*" - linters: diff --git a/cmd/crowdsec-cli/capi.go b/cmd/crowdsec-cli/clicapi/capi.go similarity index 93% rename from cmd/crowdsec-cli/capi.go rename to cmd/crowdsec-cli/clicapi/capi.go index 64bca9f7caf..bf45613c776 100644 --- a/cmd/crowdsec-cli/capi.go +++ b/cmd/crowdsec-cli/clicapi/capi.go @@ -1,4 +1,4 @@ -package main +package clicapi import ( "context" @@ -12,6 +12,8 @@ import ( "github.com/spf13/cobra" "gopkg.in/yaml.v3" + "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/idgen" + "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/reload" "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/require" "github.com/crowdsecurity/crowdsec/pkg/apiclient" "github.com/crowdsecurity/crowdsec/pkg/csconfig" @@ -21,11 +23,13 @@ import ( "github.com/crowdsecurity/crowdsec/pkg/types" ) +type configGetter func() *csconfig.Config + type cliCapi struct { cfg configGetter } -func NewCLICapi(cfg configGetter) *cliCapi { +func New(cfg configGetter) *cliCapi { return &cliCapi{ cfg: cfg, } @@ -56,12 +60,12 @@ func (cli *cliCapi) NewCommand() *cobra.Command { func (cli *cliCapi) register(capiUserPrefix string, outputFile string) error { cfg := cli.cfg() - capiUser, err := generateID(capiUserPrefix) + capiUser, err := idgen.GenerateMachineID(capiUserPrefix) if err != nil { return fmt.Errorf("unable to generate machine id: %w", err) } - password := strfmt.Password(generatePassword(passwordLength)) + password := strfmt.Password(idgen.GeneratePassword(idgen.PasswordLength)) apiurl, err := url.Parse(types.CAPIBaseURL) if err != nil { @@ -114,7 +118,7 @@ func (cli *cliCapi) register(capiUserPrefix string, outputFile string) error { fmt.Println(string(apiConfigDump)) } - log.Warning(reloadMessage) + log.Warning(reload.Message) return nil } diff --git a/cmd/crowdsec-cli/cliconsole/console.go b/cmd/crowdsec-cli/cliconsole/console.go index 158f33c9136..995a082c514 100644 --- a/cmd/crowdsec-cli/cliconsole/console.go +++ b/cmd/crowdsec-cli/cliconsole/console.go @@ -20,6 +20,7 @@ import ( "github.com/crowdsecurity/go-cs-lib/ptr" + "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/reload" "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/require" "github.com/crowdsecurity/crowdsec/pkg/apiclient" "github.com/crowdsecurity/crowdsec/pkg/csconfig" @@ -30,14 +31,12 @@ import ( type configGetter func() *csconfig.Config type cliConsole struct { - cfg func() *csconfig.Config - reloadMessage string + cfg configGetter } -func New(cfg configGetter, reloadMessage string) *cliConsole { +func New(cfg configGetter) *cliConsole { return &cliConsole{ cfg: cfg, - reloadMessage: reloadMessage, } } @@ -215,7 +214,7 @@ Enable given information push to the central API. Allows to empower the console` log.Infof("%v have been enabled", args) } - log.Info(cli.reloadMessage) + log.Info(reload.Message) return nil }, @@ -249,7 +248,7 @@ Disable given information push to the central API.`, log.Infof("%v have been disabled", args) } - log.Info(cli.reloadMessage) + log.Info(reload.Message) return nil }, diff --git a/cmd/crowdsec-cli/lapi.go b/cmd/crowdsec-cli/clilapi/lapi.go similarity index 97% rename from cmd/crowdsec-cli/lapi.go rename to cmd/crowdsec-cli/clilapi/lapi.go index 6e13dd94436..ec66daf16a4 100644 --- a/cmd/crowdsec-cli/lapi.go +++ b/cmd/crowdsec-cli/clilapi/lapi.go @@ -1,4 +1,4 @@ -package main +package clilapi import ( "context" @@ -15,6 +15,8 @@ import ( "github.com/spf13/cobra" "gopkg.in/yaml.v3" + "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/idgen" + "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/reload" "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/require" "github.com/crowdsecurity/crowdsec/pkg/alertcontext" "github.com/crowdsecurity/crowdsec/pkg/apiclient" @@ -28,11 +30,13 @@ import ( const LAPIURLPrefix = "v1" +type configGetter func() *csconfig.Config + type cliLapi struct { - cfg configGetter + cfg configGetter } -func NewCLILapi(cfg configGetter) *cliLapi { +func New(cfg configGetter) *cliLapi { return &cliLapi{ cfg: cfg, } @@ -100,13 +104,13 @@ func (cli *cliLapi) register(apiURL string, outputFile string, machine string) e cfg := cli.cfg() if lapiUser == "" { - lapiUser, err = generateID("") + lapiUser, err = idgen.GenerateMachineID("") if err != nil { return fmt.Errorf("unable to generate machine id: %w", err) } } - password := strfmt.Password(generatePassword(passwordLength)) + password := strfmt.Password(idgen.GeneratePassword(idgen.PasswordLength)) apiurl, err := prepareAPIURL(cfg.API.Client, apiURL) if err != nil { @@ -158,7 +162,7 @@ func (cli *cliLapi) register(apiURL string, outputFile string, machine string) e fmt.Printf("%s\n", string(apiConfigDump)) } - log.Warning(reloadMessage) + log.Warning(reload.Message) return nil } diff --git a/cmd/crowdsec-cli/lapi_test.go b/cmd/crowdsec-cli/clilapi/lapi_test.go similarity index 98% rename from cmd/crowdsec-cli/lapi_test.go rename to cmd/crowdsec-cli/clilapi/lapi_test.go index 018ecad8118..caf986d847a 100644 --- a/cmd/crowdsec-cli/lapi_test.go +++ b/cmd/crowdsec-cli/clilapi/lapi_test.go @@ -1,4 +1,4 @@ -package main +package clilapi import ( "testing" diff --git a/cmd/crowdsec-cli/clilapi/utils.go b/cmd/crowdsec-cli/clilapi/utils.go new file mode 100644 index 00000000000..e3ec65f2145 --- /dev/null +++ b/cmd/crowdsec-cli/clilapi/utils.go @@ -0,0 +1,24 @@ +package clilapi + +func removeFromSlice(val string, slice []string) []string { + var i int + var value string + + valueFound := false + + // get the index + for i, value = range slice { + if value == val { + valueFound = true + break + } + } + + if valueFound { + slice[i] = slice[len(slice)-1] + slice[len(slice)-1] = "" + slice = slice[:len(slice)-1] + } + + return slice +} diff --git a/cmd/crowdsec-cli/dashboard.go b/cmd/crowdsec-cli/dashboard.go index 96a2fa38cb7..eb4a2a5956a 100644 --- a/cmd/crowdsec-cli/dashboard.go +++ b/cmd/crowdsec-cli/dashboard.go @@ -20,6 +20,7 @@ import ( log "github.com/sirupsen/logrus" "github.com/spf13/cobra" + "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/idgen" "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/require" "github.com/crowdsecurity/crowdsec/pkg/metabase" ) @@ -137,7 +138,7 @@ cscli dashboard setup -l 0.0.0.0 -p 443 --password if metabasePassword == "" { isValid := passwordIsValid(metabasePassword) for !isValid { - metabasePassword = generatePassword(16) + metabasePassword = idgen.GeneratePassword(16) isValid = passwordIsValid(metabasePassword) } } diff --git a/cmd/crowdsec-cli/idgen/machineid.go b/cmd/crowdsec-cli/idgen/machineid.go new file mode 100644 index 00000000000..4bd356b3abc --- /dev/null +++ b/cmd/crowdsec-cli/idgen/machineid.go @@ -0,0 +1,48 @@ +package idgen + +import ( + "fmt" + "strings" + + "github.com/google/uuid" + log "github.com/sirupsen/logrus" + + "github.com/crowdsecurity/machineid" +) + +// Returns a unique identifier for each crowdsec installation, using an +// identifier of the OS installation where available, otherwise a random +// string. +func generateMachineIDPrefix() (string, error) { + prefix, err := machineid.ID() + if err == nil { + return prefix, nil + } + + log.Debugf("failed to get machine-id with usual files: %s", err) + + bID, err := uuid.NewRandom() + if err == nil { + return bID.String(), nil + } + + return "", fmt.Errorf("generating machine id: %w", err) +} + +// Generate a unique identifier, composed by a prefix and a random suffix. +// The prefix can be provided by a parameter to use in test environments. +func GenerateMachineID(prefix string) (string, error) { + var err error + if prefix == "" { + prefix, err = generateMachineIDPrefix() + } + + if err != nil { + return "", err + } + + prefix = strings.ReplaceAll(prefix, "-", "")[:32] + suffix := GeneratePassword(16) + + return prefix + suffix, nil +} diff --git a/cmd/crowdsec-cli/idgen/password.go b/cmd/crowdsec-cli/idgen/password.go new file mode 100644 index 00000000000..cd798fdcc48 --- /dev/null +++ b/cmd/crowdsec-cli/idgen/password.go @@ -0,0 +1,32 @@ +package idgen + +import ( + "math/big" + saferand "crypto/rand" + + log "github.com/sirupsen/logrus" +) + +const PasswordLength = 64 + +func GeneratePassword(length int) string { + upper := "ABCDEFGHIJKLMNOPQRSTUVWXY" + lower := "abcdefghijklmnopqrstuvwxyz" + digits := "0123456789" + + charset := upper + lower + digits + charsetLength := len(charset) + + buf := make([]byte, length) + + for i := range length { + rInt, err := saferand.Int(saferand.Reader, big.NewInt(int64(charsetLength))) + if err != nil { + log.Fatalf("failed getting data from prng for password generation : %s", err) + } + + buf[i] = charset[rInt.Int64()] + } + + return string(buf) +} diff --git a/cmd/crowdsec-cli/itemcli.go b/cmd/crowdsec-cli/itemcli.go index 85647a5f6e8..11ac1232eae 100644 --- a/cmd/crowdsec-cli/itemcli.go +++ b/cmd/crowdsec-cli/itemcli.go @@ -15,6 +15,7 @@ import ( log "github.com/sirupsen/logrus" "github.com/spf13/cobra" + "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/reload" "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/require" "github.com/crowdsecurity/crowdsec/pkg/cwhub" ) @@ -92,7 +93,7 @@ func (cli cliItem) install(ctx context.Context, args []string, downloadOnly bool } } - log.Info(reloadMessage) + log.Info(reload.Message) return nil } @@ -170,7 +171,7 @@ func (cli cliItem) remove(args []string, purge bool, force bool, all bool) error log.Infof("Removed %d %s", removed, cli.name) if removed > 0 { - log.Info(reloadMessage) + log.Info(reload.Message) } return nil @@ -212,7 +213,7 @@ func (cli cliItem) remove(args []string, purge bool, force bool, all bool) error log.Infof("Removed %d %s", removed, cli.name) if removed > 0 { - log.Info(reloadMessage) + log.Info(reload.Message) } return nil @@ -273,7 +274,7 @@ func (cli cliItem) upgrade(ctx context.Context, args []string, force bool, all b log.Infof("Updated %d %s", updated, cli.name) if updated > 0 { - log.Info(reloadMessage) + log.Info(reload.Message) } return nil @@ -304,7 +305,7 @@ func (cli cliItem) upgrade(ctx context.Context, args []string, force bool, all b } if updated > 0 { - log.Info(reloadMessage) + log.Info(reload.Message) } return nil diff --git a/cmd/crowdsec-cli/machines.go b/cmd/crowdsec-cli/machines.go index dcdb1963b49..34d0b1b9208 100644 --- a/cmd/crowdsec-cli/machines.go +++ b/cmd/crowdsec-cli/machines.go @@ -1,13 +1,11 @@ package main import ( - saferand "crypto/rand" "encoding/csv" "encoding/json" "errors" "fmt" "io" - "math/big" "os" "slices" "strings" @@ -16,15 +14,13 @@ import ( "github.com/AlecAivazis/survey/v2" "github.com/fatih/color" "github.com/go-openapi/strfmt" - "github.com/google/uuid" "github.com/jedib0t/go-pretty/v6/table" log "github.com/sirupsen/logrus" "github.com/spf13/cobra" "gopkg.in/yaml.v3" - "github.com/crowdsecurity/machineid" - "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/cstable" + "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/idgen" "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/require" "github.com/crowdsecurity/crowdsec/pkg/csconfig" "github.com/crowdsecurity/crowdsec/pkg/cwhub" @@ -34,67 +30,6 @@ import ( "github.com/crowdsecurity/crowdsec/pkg/types" ) -const passwordLength = 64 - -func generatePassword(length int) string { - upper := "ABCDEFGHIJKLMNOPQRSTUVWXY" - lower := "abcdefghijklmnopqrstuvwxyz" - digits := "0123456789" - - charset := upper + lower + digits - charsetLength := len(charset) - - buf := make([]byte, length) - - for i := range length { - rInt, err := saferand.Int(saferand.Reader, big.NewInt(int64(charsetLength))) - if err != nil { - log.Fatalf("failed getting data from prng for password generation : %s", err) - } - - buf[i] = charset[rInt.Int64()] - } - - return string(buf) -} - -// Returns a unique identifier for each crowdsec installation, using an -// identifier of the OS installation where available, otherwise a random -// string. -func generateIDPrefix() (string, error) { - prefix, err := machineid.ID() - if err == nil { - return prefix, nil - } - - log.Debugf("failed to get machine-id with usual files: %s", err) - - bID, err := uuid.NewRandom() - if err == nil { - return bID.String(), nil - } - - return "", fmt.Errorf("generating machine id: %w", err) -} - -// Generate a unique identifier, composed by a prefix and a random suffix. -// The prefix can be provided by a parameter to use in test environments. -func generateID(prefix string) (string, error) { - var err error - if prefix == "" { - prefix, err = generateIDPrefix() - } - - if err != nil { - return "", err - } - - prefix = strings.ReplaceAll(prefix, "-", "")[:32] - suffix := generatePassword(16) - - return prefix + suffix, nil -} - // getLastHeartbeat returns the last heartbeat timestamp of a machine // and a boolean indicating if the machine is considered active or not. func getLastHeartbeat(m *ent.Machine) (string, bool) { @@ -364,7 +299,7 @@ func (cli *cliMachines) add(args []string, machinePassword string, dumpFile stri return errors.New("please specify a machine name to add, or use --auto") } - machineID, err = generateID("") + machineID, err = idgen.GenerateMachineID("") if err != nil { return fmt.Errorf("unable to generate machine id: %w", err) } @@ -401,7 +336,7 @@ func (cli *cliMachines) add(args []string, machinePassword string, dumpFile stri return errors.New("please specify a password with --password or use --auto") } - machinePassword = generatePassword(passwordLength) + machinePassword = idgen.GeneratePassword(idgen.PasswordLength) } else if machinePassword == "" && interactive { qs := &survey.Password{ Message: "Please provide a password for the machine:", diff --git a/cmd/crowdsec-cli/main.go b/cmd/crowdsec-cli/main.go index 49140b160ad..9124dde0607 100644 --- a/cmd/crowdsec-cli/main.go +++ b/cmd/crowdsec-cli/main.go @@ -14,8 +14,10 @@ import ( "github.com/crowdsecurity/go-cs-lib/trace" + "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/clicapi" "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/cliconsole" "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/cliexplain" + "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/clilapi" "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/climetrics" "github.com/crowdsecurity/crowdsec/pkg/csconfig" "github.com/crowdsecurity/crowdsec/pkg/fflag" @@ -250,10 +252,10 @@ It is meant to allow you to manage bans, parsers/scenarios/etc, api and generall cmd.AddCommand(NewCLISimulation(cli.cfg).NewCommand()) cmd.AddCommand(NewCLIBouncers(cli.cfg).NewCommand()) cmd.AddCommand(NewCLIMachines(cli.cfg).NewCommand()) - cmd.AddCommand(NewCLICapi(cli.cfg).NewCommand()) - cmd.AddCommand(NewCLILapi(cli.cfg).NewCommand()) + cmd.AddCommand(clicapi.New(cli.cfg).NewCommand()) + cmd.AddCommand(clilapi.New(cli.cfg).NewCommand()) cmd.AddCommand(NewCompletionCmd()) - cmd.AddCommand(cliconsole.New(cli.cfg, reloadMessage).NewCommand()) + cmd.AddCommand(cliconsole.New(cli.cfg).NewCommand()) cmd.AddCommand(cliexplain.New(cli.cfg, ConfigFilePath).NewCommand()) cmd.AddCommand(NewCLIHubTest(cli.cfg).NewCommand()) cmd.AddCommand(NewCLINotifications(cli.cfg).NewCommand()) diff --git a/cmd/crowdsec-cli/reload.go b/cmd/crowdsec-cli/reload.go deleted file mode 100644 index 8dd59be8d05..00000000000 --- a/cmd/crowdsec-cli/reload.go +++ /dev/null @@ -1,6 +0,0 @@ -//go:build !windows && !freebsd && !linux - -package main - -// generic message since we don't know the platform -const reloadMessage = "Please reload the crowdsec process for the new configuration to be effective." diff --git a/cmd/crowdsec-cli/reload/reload.go b/cmd/crowdsec-cli/reload/reload.go new file mode 100644 index 00000000000..fe03af1ea79 --- /dev/null +++ b/cmd/crowdsec-cli/reload/reload.go @@ -0,0 +1,6 @@ +//go:build !windows && !freebsd && !linux + +package reload + +// generic message since we don't know the platform +const Message = "Please reload the crowdsec process for the new configuration to be effective." diff --git a/cmd/crowdsec-cli/reload/reload_freebsd.go b/cmd/crowdsec-cli/reload/reload_freebsd.go new file mode 100644 index 00000000000..0dac99f2315 --- /dev/null +++ b/cmd/crowdsec-cli/reload/reload_freebsd.go @@ -0,0 +1,4 @@ +package reload + +// actually sudo is not that popular on freebsd, but this will do +const Message = "Run 'sudo service crowdsec reload' for the new configuration to be effective." diff --git a/cmd/crowdsec-cli/reload/reload_linux.go b/cmd/crowdsec-cli/reload/reload_linux.go new file mode 100644 index 00000000000..fbe16e5f168 --- /dev/null +++ b/cmd/crowdsec-cli/reload/reload_linux.go @@ -0,0 +1,4 @@ +package reload + +// assume systemd, although gentoo and others may differ +const Message = "Run 'sudo systemctl reload crowdsec' for the new configuration to be effective." diff --git a/cmd/crowdsec-cli/reload/reload_windows.go b/cmd/crowdsec-cli/reload/reload_windows.go new file mode 100644 index 00000000000..88642425ae2 --- /dev/null +++ b/cmd/crowdsec-cli/reload/reload_windows.go @@ -0,0 +1,3 @@ +package reload + +const Message = "Please restart the crowdsec service for the new configuration to be effective." diff --git a/cmd/crowdsec-cli/reload_freebsd.go b/cmd/crowdsec-cli/reload_freebsd.go deleted file mode 100644 index 991d3ea6080..00000000000 --- a/cmd/crowdsec-cli/reload_freebsd.go +++ /dev/null @@ -1,4 +0,0 @@ -package main - -// actually sudo is not that popular on freebsd, but this will do -const reloadMessage = "Run 'sudo service crowdsec reload' for the new configuration to be effective." diff --git a/cmd/crowdsec-cli/reload_linux.go b/cmd/crowdsec-cli/reload_linux.go deleted file mode 100644 index a74adfbcdfd..00000000000 --- a/cmd/crowdsec-cli/reload_linux.go +++ /dev/null @@ -1,4 +0,0 @@ -package main - -// assume systemd, although gentoo and others may differ -const reloadMessage = "Run 'sudo systemctl reload crowdsec' for the new configuration to be effective." diff --git a/cmd/crowdsec-cli/reload_windows.go b/cmd/crowdsec-cli/reload_windows.go deleted file mode 100644 index ec9a0b10741..00000000000 --- a/cmd/crowdsec-cli/reload_windows.go +++ /dev/null @@ -1,3 +0,0 @@ -package main - -const reloadMessage = "Please restart the crowdsec service for the new configuration to be effective." diff --git a/cmd/crowdsec-cli/simulation.go b/cmd/crowdsec-cli/simulation.go index 12c9980d588..afcc0a2af37 100644 --- a/cmd/crowdsec-cli/simulation.go +++ b/cmd/crowdsec-cli/simulation.go @@ -10,6 +10,7 @@ import ( "github.com/spf13/cobra" "gopkg.in/yaml.v3" + "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/reload" "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/require" "github.com/crowdsecurity/crowdsec/pkg/cwhub" ) @@ -44,7 +45,7 @@ cscli simulation disable crowdsecurity/ssh-bf`, }, PersistentPostRun: func(cmd *cobra.Command, _ []string) { if cmd.Name() != "status" { - log.Info(reloadMessage) + log.Info(reload.Message) } }, } diff --git a/cmd/crowdsec-cli/support.go b/cmd/crowdsec-cli/support.go index ef14f90df17..3ab1415f2ec 100644 --- a/cmd/crowdsec-cli/support.go +++ b/cmd/crowdsec-cli/support.go @@ -22,6 +22,8 @@ import ( "github.com/crowdsecurity/go-cs-lib/trace" + "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/clicapi" + "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/clilapi" "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/climetrics" "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/require" "github.com/crowdsecurity/crowdsec/pkg/csconfig" @@ -237,7 +239,7 @@ func (cli *cliSupport) dumpLAPIStatus(zw *zip.Writer, hub *cwhub.Hub) error { fmt.Fprintf(out, "LAPI URL: %s\n", cred.URL) fmt.Fprintf(out, "LAPI username: %s\n", cred.Login) - if err := QueryLAPIStatus(hub, cred.URL, cred.Login, cred.Password); err != nil { + if err := clilapi.QueryLAPIStatus(hub, cred.URL, cred.Login, cred.Password); err != nil { return fmt.Errorf("could not authenticate to Local API (LAPI): %w", err) } @@ -260,7 +262,7 @@ func (cli *cliSupport) dumpCAPIStatus(zw *zip.Writer, hub *cwhub.Hub) error { fmt.Fprintf(out, "CAPI URL: %s\n", cred.URL) fmt.Fprintf(out, "CAPI username: %s\n", cred.Login) - auth, enrolled, err := QueryCAPIStatus(hub, cred.URL, cred.Login, cred.Password) + auth, enrolled, err := clicapi.QueryCAPIStatus(hub, cred.URL, cred.Login, cred.Password) if err != nil { return fmt.Errorf("could not authenticate to Central API (CAPI): %w", err) } diff --git a/cmd/crowdsec-cli/utils.go b/cmd/crowdsec-cli/utils.go index f6c32094958..c51140836b8 100644 --- a/cmd/crowdsec-cli/utils.go +++ b/cmd/crowdsec-cli/utils.go @@ -38,26 +38,3 @@ func manageCliDecisionAlerts(ip *string, ipRange *string, scope *string, value * return nil } - -func removeFromSlice(val string, slice []string) []string { - var i int - var value string - - valueFound := false - - // get the index - for i, value = range slice { - if value == val { - valueFound = true - break - } - } - - if valueFound { - slice[i] = slice[len(slice)-1] - slice[len(slice)-1] = "" - slice = slice[:len(slice)-1] - } - - return slice -} From c4431b63853af40e48ccffb71756ba023c24a327 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Mon, 26 Aug 2024 16:33:35 +0200 Subject: [PATCH 261/581] cscli refact: notifications, simulation, papi, setup (#3190) * package 'clinotifications' * package 'clisimulation' * package 'clipapi' * package 'cslisetup' --- .../{ => clinotifications}/notifications.go | 6 ++++-- .../{ => clinotifications}/notifications_table.go | 2 +- cmd/crowdsec-cli/{ => clipapi}/papi.go | 7 +++++-- cmd/crowdsec-cli/{ => clisetup}/setup.go | 6 ++++-- cmd/crowdsec-cli/{ => clisimulation}/simulation.go | 7 +++++-- cmd/crowdsec-cli/main.go | 12 ++++++++---- 6 files changed, 27 insertions(+), 13 deletions(-) rename cmd/crowdsec-cli/{ => clinotifications}/notifications.go (99%) rename cmd/crowdsec-cli/{ => clinotifications}/notifications_table.go (97%) rename cmd/crowdsec-cli/{ => clipapi}/papi.go (96%) rename cmd/crowdsec-cli/{ => clisetup}/setup.go (98%) rename cmd/crowdsec-cli/{ => clisimulation}/simulation.go (98%) diff --git a/cmd/crowdsec-cli/notifications.go b/cmd/crowdsec-cli/clinotifications/notifications.go similarity index 99% rename from cmd/crowdsec-cli/notifications.go rename to cmd/crowdsec-cli/clinotifications/notifications.go index 8c6b6631b33..04be09354c2 100644 --- a/cmd/crowdsec-cli/notifications.go +++ b/cmd/crowdsec-cli/clinotifications/notifications.go @@ -1,4 +1,4 @@ -package main +package clinotifications import ( "context" @@ -40,11 +40,13 @@ type NotificationsCfg struct { ids []uint } +type configGetter func() *csconfig.Config + type cliNotifications struct { cfg configGetter } -func NewCLINotifications(cfg configGetter) *cliNotifications { +func New(cfg configGetter) *cliNotifications { return &cliNotifications{ cfg: cfg, } diff --git a/cmd/crowdsec-cli/notifications_table.go b/cmd/crowdsec-cli/clinotifications/notifications_table.go similarity index 97% rename from cmd/crowdsec-cli/notifications_table.go rename to cmd/crowdsec-cli/clinotifications/notifications_table.go index 2976797bd8a..0b6a3f58efc 100644 --- a/cmd/crowdsec-cli/notifications_table.go +++ b/cmd/crowdsec-cli/clinotifications/notifications_table.go @@ -1,4 +1,4 @@ -package main +package clinotifications import ( "io" diff --git a/cmd/crowdsec-cli/papi.go b/cmd/crowdsec-cli/clipapi/papi.go similarity index 96% rename from cmd/crowdsec-cli/papi.go rename to cmd/crowdsec-cli/clipapi/papi.go index a2fa0a90871..9a9e4fcaa8b 100644 --- a/cmd/crowdsec-cli/papi.go +++ b/cmd/crowdsec-cli/clipapi/papi.go @@ -1,4 +1,4 @@ -package main +package clipapi import ( "fmt" @@ -12,13 +12,16 @@ import ( "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/require" "github.com/crowdsecurity/crowdsec/pkg/apiserver" + "github.com/crowdsecurity/crowdsec/pkg/csconfig" ) +type configGetter func() *csconfig.Config + type cliPapi struct { cfg configGetter } -func NewCLIPapi(cfg configGetter) *cliPapi { +func New(cfg configGetter) *cliPapi { return &cliPapi{ cfg: cfg, } diff --git a/cmd/crowdsec-cli/setup.go b/cmd/crowdsec-cli/clisetup/setup.go similarity index 98% rename from cmd/crowdsec-cli/setup.go rename to cmd/crowdsec-cli/clisetup/setup.go index d747af9225f..2a1a2bd0560 100644 --- a/cmd/crowdsec-cli/setup.go +++ b/cmd/crowdsec-cli/clisetup/setup.go @@ -1,4 +1,4 @@ -package main +package clisetup import ( "bytes" @@ -18,11 +18,13 @@ import ( "github.com/crowdsecurity/crowdsec/pkg/setup" ) +type configGetter func() *csconfig.Config + type cliSetup struct { cfg configGetter } -func NewCLISetup(cfg configGetter) *cliSetup { +func New(cfg configGetter) *cliSetup { return &cliSetup{ cfg: cfg, } diff --git a/cmd/crowdsec-cli/simulation.go b/cmd/crowdsec-cli/clisimulation/simulation.go similarity index 98% rename from cmd/crowdsec-cli/simulation.go rename to cmd/crowdsec-cli/clisimulation/simulation.go index afcc0a2af37..bf986f82c06 100644 --- a/cmd/crowdsec-cli/simulation.go +++ b/cmd/crowdsec-cli/clisimulation/simulation.go @@ -1,4 +1,4 @@ -package main +package clisimulation import ( "errors" @@ -12,14 +12,17 @@ import ( "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/reload" "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/require" + "github.com/crowdsecurity/crowdsec/pkg/csconfig" "github.com/crowdsecurity/crowdsec/pkg/cwhub" ) +type configGetter func() *csconfig.Config + type cliSimulation struct { cfg configGetter } -func NewCLISimulation(cfg configGetter) *cliSimulation { +func New(cfg configGetter) *cliSimulation { return &cliSimulation{ cfg: cfg, } diff --git a/cmd/crowdsec-cli/main.go b/cmd/crowdsec-cli/main.go index 9124dde0607..ab8b4f82518 100644 --- a/cmd/crowdsec-cli/main.go +++ b/cmd/crowdsec-cli/main.go @@ -19,6 +19,10 @@ import ( "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/cliexplain" "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/clilapi" "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/climetrics" + "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/clinotifications" + "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/clipapi" + "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/clisetup" + "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/clisimulation" "github.com/crowdsecurity/crowdsec/pkg/csconfig" "github.com/crowdsecurity/crowdsec/pkg/fflag" ) @@ -249,7 +253,7 @@ It is meant to allow you to manage bans, parsers/scenarios/etc, api and generall cmd.AddCommand(NewCLIDashboard(cli.cfg).NewCommand()) cmd.AddCommand(NewCLIDecisions(cli.cfg).NewCommand()) cmd.AddCommand(NewCLIAlerts(cli.cfg).NewCommand()) - cmd.AddCommand(NewCLISimulation(cli.cfg).NewCommand()) + cmd.AddCommand(clisimulation.New(cli.cfg).NewCommand()) cmd.AddCommand(NewCLIBouncers(cli.cfg).NewCommand()) cmd.AddCommand(NewCLIMachines(cli.cfg).NewCommand()) cmd.AddCommand(clicapi.New(cli.cfg).NewCommand()) @@ -258,9 +262,9 @@ It is meant to allow you to manage bans, parsers/scenarios/etc, api and generall cmd.AddCommand(cliconsole.New(cli.cfg).NewCommand()) cmd.AddCommand(cliexplain.New(cli.cfg, ConfigFilePath).NewCommand()) cmd.AddCommand(NewCLIHubTest(cli.cfg).NewCommand()) - cmd.AddCommand(NewCLINotifications(cli.cfg).NewCommand()) + cmd.AddCommand(clinotifications.New(cli.cfg).NewCommand()) cmd.AddCommand(NewCLISupport(cli.cfg).NewCommand()) - cmd.AddCommand(NewCLIPapi(cli.cfg).NewCommand()) + cmd.AddCommand(clipapi.New(cli.cfg).NewCommand()) cmd.AddCommand(NewCLICollection(cli.cfg).NewCommand()) cmd.AddCommand(NewCLIParser(cli.cfg).NewCommand()) cmd.AddCommand(NewCLIScenario(cli.cfg).NewCommand()) @@ -270,7 +274,7 @@ It is meant to allow you to manage bans, parsers/scenarios/etc, api and generall cmd.AddCommand(NewCLIAppsecRule(cli.cfg).NewCommand()) if fflag.CscliSetup.IsEnabled() { - cmd.AddCommand(NewCLISetup(cli.cfg).NewCommand()) + cmd.AddCommand(clisetup.New(cli.cfg).NewCommand()) } if len(os.Args) > 1 { From 4ca70e1d642fccabc32677d154bfce5b3517909c Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Tue, 27 Aug 2024 12:41:45 +0200 Subject: [PATCH 262/581] implement GetFSType on openbsd with the correct statfs struct member (#3196) Co-authored-by: Robert Nagy --- pkg/types/getfstype.go | 2 +- pkg/types/getfstype_openbsd.go | 25 +++++++++++++++++++++++++ 2 files changed, 26 insertions(+), 1 deletion(-) create mode 100644 pkg/types/getfstype_openbsd.go diff --git a/pkg/types/getfstype.go b/pkg/types/getfstype.go index c16eea5cf98..728e986bed0 100644 --- a/pkg/types/getfstype.go +++ b/pkg/types/getfstype.go @@ -1,4 +1,4 @@ -//go:build !windows && !freebsd +//go:build !windows && !freebsd && !openbsd package types diff --git a/pkg/types/getfstype_openbsd.go b/pkg/types/getfstype_openbsd.go new file mode 100644 index 00000000000..9ec254b7bec --- /dev/null +++ b/pkg/types/getfstype_openbsd.go @@ -0,0 +1,25 @@ +//go:build openbsd + +package types + +import ( + "fmt" + "syscall" +) + +func GetFSType(path string) (string, error) { + var fsStat syscall.Statfs_t + + if err := syscall.Statfs(path, &fsStat); err != nil { + return "", fmt.Errorf("failed to get filesystem type: %w", err) + } + + bs := fsStat.F_fstypename + + b := make([]byte, len(bs)) + for i, v := range bs { + b[i] = byte(v) + } + + return string(b), nil +} From 27559d6636f794c5bc50aa62485d455cddaaee7a Mon Sep 17 00:00:00 2001 From: Robert Nagy Date: Tue, 27 Aug 2024 12:41:53 +0200 Subject: [PATCH 263/581] implement GetFSType on openbsd with the correct statfs struct member (#3191) --- pkg/types/getfstype.go | 2 +- pkg/types/getfstype_openbsd.go | 25 +++++++++++++++++++++++++ 2 files changed, 26 insertions(+), 1 deletion(-) create mode 100644 pkg/types/getfstype_openbsd.go diff --git a/pkg/types/getfstype.go b/pkg/types/getfstype.go index c16eea5cf98..728e986bed0 100644 --- a/pkg/types/getfstype.go +++ b/pkg/types/getfstype.go @@ -1,4 +1,4 @@ -//go:build !windows && !freebsd +//go:build !windows && !freebsd && !openbsd package types diff --git a/pkg/types/getfstype_openbsd.go b/pkg/types/getfstype_openbsd.go new file mode 100644 index 00000000000..9ec254b7bec --- /dev/null +++ b/pkg/types/getfstype_openbsd.go @@ -0,0 +1,25 @@ +//go:build openbsd + +package types + +import ( + "fmt" + "syscall" +) + +func GetFSType(path string) (string, error) { + var fsStat syscall.Statfs_t + + if err := syscall.Statfs(path, &fsStat); err != nil { + return "", fmt.Errorf("failed to get filesystem type: %w", err) + } + + bs := fsStat.F_fstypename + + b := make([]byte, len(bs)) + for i, v := range bs { + b[i] = byte(v) + } + + return string(b), nil +} From ec415ed069d36091203e99a59ac231995ad9e49f Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Tue, 27 Aug 2024 13:07:05 +0200 Subject: [PATCH 264/581] cscli hub update: option --with-content to keep embedded items in index; use it in docker (#3192) --- Dockerfile | 3 +-- Dockerfile.debian | 3 +-- cmd/crowdsec-cli/hub.go | 10 ++++++++-- docker/docker_start.sh | 2 +- pkg/cwhub/remote.go | 33 ++++++++++++++++++++++++++++++--- test/lib/config/config-local | 2 +- 6 files changed, 42 insertions(+), 11 deletions(-) diff --git a/Dockerfile b/Dockerfile index d4c8978c9ae..93ba1010fc2 100644 --- a/Dockerfile +++ b/Dockerfile @@ -24,8 +24,7 @@ RUN make clean release DOCKER_BUILD=1 BUILD_STATIC=1 CGO_CFLAGS="-D_LARGEFILE64_ cd crowdsec-v* && \ ./wizard.sh --docker-mode && \ cd - >/dev/null && \ - cscli hub update && \ - ./docker/preload-hub-items && \ + cscli hub update --with-content && \ cscli collections install crowdsecurity/linux && \ cscli parsers install crowdsecurity/whitelists diff --git a/Dockerfile.debian b/Dockerfile.debian index 73a44e8a4b8..dd55d2c3e48 100644 --- a/Dockerfile.debian +++ b/Dockerfile.debian @@ -29,8 +29,7 @@ RUN make clean release DOCKER_BUILD=1 BUILD_STATIC=1 && \ cd crowdsec-v* && \ ./wizard.sh --docker-mode && \ cd - >/dev/null && \ - cscli hub update && \ - ./docker/preload-hub-items && \ + cscli hub update --with-content && \ cscli collections install crowdsecurity/linux && \ cscli parsers install crowdsecurity/whitelists diff --git a/cmd/crowdsec-cli/hub.go b/cmd/crowdsec-cli/hub.go index e6cba08940a..34bd61b1277 100644 --- a/cmd/crowdsec-cli/hub.go +++ b/cmd/crowdsec-cli/hub.go @@ -99,9 +99,10 @@ func (cli *cliHub) newListCmd() *cobra.Command { return cmd } -func (cli *cliHub) update(ctx context.Context) error { +func (cli *cliHub) update(ctx context.Context, withContent bool) error { local := cli.cfg().Hub remote := require.RemoteHub(ctx, cli.cfg()) + remote.EmbedItemContent = withContent // don't use require.Hub because if there is no index file, it would fail hub, err := cwhub.NewHub(local, remote, log.StandardLogger()) @@ -125,6 +126,8 @@ func (cli *cliHub) update(ctx context.Context) error { } func (cli *cliHub) newUpdateCmd() *cobra.Command { + withContent := false + cmd := &cobra.Command{ Use: "update", Short: "Download the latest index (catalog of available configurations)", @@ -134,10 +137,13 @@ Fetches the .index.json file from the hub, containing the list of available conf Args: cobra.ExactArgs(0), DisableAutoGenTag: true, RunE: func(cmd *cobra.Command, _ []string) error { - return cli.update(cmd.Context()) + return cli.update(cmd.Context(), withContent) }, } + flags := cmd.Flags() + flags.BoolVar(&withContent, "with-content", false, "Download index with embedded item content") + return cmd } diff --git a/docker/docker_start.sh b/docker/docker_start.sh index 0ae8841e029..fb87c1eff9b 100755 --- a/docker/docker_start.sh +++ b/docker/docker_start.sh @@ -57,7 +57,7 @@ run_hub_update() { index_modification_time=$(stat -c %Y /etc/crowdsec/hub/.index.json 2>/dev/null) # Run cscli hub update if no date or if the index file is older than 24h if [ -z "$index_modification_time" ] || [ $(( $(date +%s) - index_modification_time )) -gt 86400 ]; then - cscli hub update + cscli hub update --with-content else echo "Skipping hub update, index file is recent" fi diff --git a/pkg/cwhub/remote.go b/pkg/cwhub/remote.go index 04e4fab972f..8d2dc2dbb94 100644 --- a/pkg/cwhub/remote.go +++ b/pkg/cwhub/remote.go @@ -3,6 +3,7 @@ package cwhub import ( "context" "fmt" + "net/url" "github.com/sirupsen/logrus" @@ -11,9 +12,10 @@ import ( // RemoteHubCfg is used to retrieve index and items from the remote hub. type RemoteHubCfg struct { - Branch string - URLTemplate string - IndexPath string + Branch string + URLTemplate string + IndexPath string + EmbedItemContent bool } // urlTo builds the URL to download a file from the remote hub. @@ -30,6 +32,24 @@ func (r *RemoteHubCfg) urlTo(remotePath string) (string, error) { return fmt.Sprintf(r.URLTemplate, r.Branch, remotePath), nil } +// addURLParam adds the "with_content=true" parameter to the URL if it's not already present. +func addURLParam(rawURL string, param string, value string) (string, error) { + parsedURL, err := url.Parse(rawURL) + if err != nil { + return "", fmt.Errorf("failed to parse URL: %w", err) + } + + query := parsedURL.Query() + + if _, exists := query[param]; !exists { + query.Add(param, value) + } + + parsedURL.RawQuery = query.Encode() + + return parsedURL.String(), nil +} + // fetchIndex downloads the index from the hub and returns the content. func (r *RemoteHubCfg) fetchIndex(ctx context.Context, destPath string) (bool, error) { if r == nil { @@ -41,6 +61,13 @@ func (r *RemoteHubCfg) fetchIndex(ctx context.Context, destPath string) (bool, e return false, fmt.Errorf("failed to build hub index request: %w", err) } + if r.EmbedItemContent { + url, err = addURLParam(url, "with_content", "true") + if err != nil { + return false, fmt.Errorf("failed to add 'with_content' parameter to URL: %w", err) + } + } + downloaded, err := downloader. New(). WithHTTPClient(hubClient). diff --git a/test/lib/config/config-local b/test/lib/config/config-local index f80c26bc652..3e3c806b616 100755 --- a/test/lib/config/config-local +++ b/test/lib/config/config-local @@ -114,7 +114,7 @@ make_init_data() { ./instance-db config-yaml ./instance-db setup - "$CSCLI" --warning hub update + "$CSCLI" --warning hub update --with-content # preload some content and data files "$CSCLI" collections install crowdsecurity/linux --download-only From faa3cd3b43dc56399c3efc8ebaed9e3179b48615 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Tue, 27 Aug 2024 13:07:11 +0200 Subject: [PATCH 265/581] cscli hub update: option --with-content to keep embedded items in index; use it in docker (#3195) --- Dockerfile | 3 +-- Dockerfile.debian | 3 +-- cmd/crowdsec-cli/hub.go | 10 ++++++++-- docker/docker_start.sh | 2 +- pkg/cwhub/remote.go | 33 ++++++++++++++++++++++++++++++--- test/lib/config/config-local | 2 +- 6 files changed, 42 insertions(+), 11 deletions(-) diff --git a/Dockerfile b/Dockerfile index d30004e4683..731e08fb1a6 100644 --- a/Dockerfile +++ b/Dockerfile @@ -24,8 +24,7 @@ RUN make clean release DOCKER_BUILD=1 BUILD_STATIC=1 CGO_CFLAGS="-D_LARGEFILE64_ cd crowdsec-v* && \ ./wizard.sh --docker-mode && \ cd - >/dev/null && \ - cscli hub update && \ - ./docker/preload-hub-items && \ + cscli hub update --with-content && \ cscli collections install crowdsecurity/linux && \ cscli parsers install crowdsecurity/whitelists diff --git a/Dockerfile.debian b/Dockerfile.debian index 4fda8c26599..ec961a4a1ec 100644 --- a/Dockerfile.debian +++ b/Dockerfile.debian @@ -29,8 +29,7 @@ RUN make clean release DOCKER_BUILD=1 BUILD_STATIC=1 && \ cd crowdsec-v* && \ ./wizard.sh --docker-mode && \ cd - >/dev/null && \ - cscli hub update && \ - ./docker/preload-hub-items && \ + cscli hub update --with-content && \ cscli collections install crowdsecurity/linux && \ cscli parsers install crowdsecurity/whitelists diff --git a/cmd/crowdsec-cli/hub.go b/cmd/crowdsec-cli/hub.go index 7e00eb64b33..70df30fc410 100644 --- a/cmd/crowdsec-cli/hub.go +++ b/cmd/crowdsec-cli/hub.go @@ -99,9 +99,10 @@ func (cli *cliHub) newListCmd() *cobra.Command { return cmd } -func (cli *cliHub) update(ctx context.Context) error { +func (cli *cliHub) update(ctx context.Context, withContent bool) error { local := cli.cfg().Hub remote := require.RemoteHub(ctx, cli.cfg()) + remote.EmbedItemContent = withContent // don't use require.Hub because if there is no index file, it would fail hub, err := cwhub.NewHub(local, remote, log.StandardLogger()) @@ -125,6 +126,8 @@ func (cli *cliHub) update(ctx context.Context) error { } func (cli *cliHub) newUpdateCmd() *cobra.Command { + withContent := false + cmd := &cobra.Command{ Use: "update", Short: "Download the latest index (catalog of available configurations)", @@ -134,10 +137,13 @@ Fetches the .index.json file from the hub, containing the list of available conf Args: cobra.ExactArgs(0), DisableAutoGenTag: true, RunE: func(cmd *cobra.Command, _ []string) error { - return cli.update(cmd.Context()) + return cli.update(cmd.Context(), withContent) }, } + flags := cmd.Flags() + flags.BoolVar(&withContent, "with-content", false, "Download index with embedded item content") + return cmd } diff --git a/docker/docker_start.sh b/docker/docker_start.sh index 0ae8841e029..fb87c1eff9b 100755 --- a/docker/docker_start.sh +++ b/docker/docker_start.sh @@ -57,7 +57,7 @@ run_hub_update() { index_modification_time=$(stat -c %Y /etc/crowdsec/hub/.index.json 2>/dev/null) # Run cscli hub update if no date or if the index file is older than 24h if [ -z "$index_modification_time" ] || [ $(( $(date +%s) - index_modification_time )) -gt 86400 ]; then - cscli hub update + cscli hub update --with-content else echo "Skipping hub update, index file is recent" fi diff --git a/pkg/cwhub/remote.go b/pkg/cwhub/remote.go index 04e4fab972f..8d2dc2dbb94 100644 --- a/pkg/cwhub/remote.go +++ b/pkg/cwhub/remote.go @@ -3,6 +3,7 @@ package cwhub import ( "context" "fmt" + "net/url" "github.com/sirupsen/logrus" @@ -11,9 +12,10 @@ import ( // RemoteHubCfg is used to retrieve index and items from the remote hub. type RemoteHubCfg struct { - Branch string - URLTemplate string - IndexPath string + Branch string + URLTemplate string + IndexPath string + EmbedItemContent bool } // urlTo builds the URL to download a file from the remote hub. @@ -30,6 +32,24 @@ func (r *RemoteHubCfg) urlTo(remotePath string) (string, error) { return fmt.Sprintf(r.URLTemplate, r.Branch, remotePath), nil } +// addURLParam adds the "with_content=true" parameter to the URL if it's not already present. +func addURLParam(rawURL string, param string, value string) (string, error) { + parsedURL, err := url.Parse(rawURL) + if err != nil { + return "", fmt.Errorf("failed to parse URL: %w", err) + } + + query := parsedURL.Query() + + if _, exists := query[param]; !exists { + query.Add(param, value) + } + + parsedURL.RawQuery = query.Encode() + + return parsedURL.String(), nil +} + // fetchIndex downloads the index from the hub and returns the content. func (r *RemoteHubCfg) fetchIndex(ctx context.Context, destPath string) (bool, error) { if r == nil { @@ -41,6 +61,13 @@ func (r *RemoteHubCfg) fetchIndex(ctx context.Context, destPath string) (bool, e return false, fmt.Errorf("failed to build hub index request: %w", err) } + if r.EmbedItemContent { + url, err = addURLParam(url, "with_content", "true") + if err != nil { + return false, fmt.Errorf("failed to add 'with_content' parameter to URL: %w", err) + } + } + downloaded, err := downloader. New(). WithHTTPClient(hubClient). diff --git a/test/lib/config/config-local b/test/lib/config/config-local index f80c26bc652..3e3c806b616 100755 --- a/test/lib/config/config-local +++ b/test/lib/config/config-local @@ -114,7 +114,7 @@ make_init_data() { ./instance-db config-yaml ./instance-db setup - "$CSCLI" --warning hub update + "$CSCLI" --warning hub update --with-content # preload some content and data files "$CSCLI" collections install crowdsecurity/linux --download-only From 2e970b3eb9869c11bef2414758c62419e18fedc7 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Tue, 27 Aug 2024 15:42:24 +0200 Subject: [PATCH 266/581] cscli refact: package 'clihubtest' (#3174) * cscli refact: package 'clihubtest' * split hubtest.go subcommands in files * extract function getCoverage() * common function hubTestCoverageTable() * update cyclomatic lint * lint --- .golangci.yml | 2 +- cmd/crowdsec-cli/clihubtest/clean.go | 31 + cmd/crowdsec-cli/clihubtest/coverage.go | 166 ++++ cmd/crowdsec-cli/clihubtest/create.go | 158 ++++ cmd/crowdsec-cli/clihubtest/eval.go | 44 ++ cmd/crowdsec-cli/clihubtest/explain.go | 53 ++ cmd/crowdsec-cli/clihubtest/hubtest.go | 81 ++ cmd/crowdsec-cli/clihubtest/info.go | 44 ++ cmd/crowdsec-cli/clihubtest/list.go | 42 + cmd/crowdsec-cli/clihubtest/run.go | 195 +++++ .../{hubtest_table.go => clihubtest/table.go} | 48 +- cmd/crowdsec-cli/hubtest.go | 746 ------------------ cmd/crowdsec-cli/main.go | 3 +- 13 files changed, 820 insertions(+), 793 deletions(-) create mode 100644 cmd/crowdsec-cli/clihubtest/clean.go create mode 100644 cmd/crowdsec-cli/clihubtest/coverage.go create mode 100644 cmd/crowdsec-cli/clihubtest/create.go create mode 100644 cmd/crowdsec-cli/clihubtest/eval.go create mode 100644 cmd/crowdsec-cli/clihubtest/explain.go create mode 100644 cmd/crowdsec-cli/clihubtest/hubtest.go create mode 100644 cmd/crowdsec-cli/clihubtest/info.go create mode 100644 cmd/crowdsec-cli/clihubtest/list.go create mode 100644 cmd/crowdsec-cli/clihubtest/run.go rename cmd/crowdsec-cli/{hubtest_table.go => clihubtest/table.go} (50%) delete mode 100644 cmd/crowdsec-cli/hubtest.go diff --git a/.golangci.yml b/.golangci.yml index 86771f17f60..62147b54101 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -103,7 +103,7 @@ linters-settings: disabled: true - name: cyclomatic # lower this after refactoring - arguments: [42] + arguments: [41] - name: defer disabled: true - name: empty-block diff --git a/cmd/crowdsec-cli/clihubtest/clean.go b/cmd/crowdsec-cli/clihubtest/clean.go new file mode 100644 index 00000000000..075d7961d84 --- /dev/null +++ b/cmd/crowdsec-cli/clihubtest/clean.go @@ -0,0 +1,31 @@ +package clihubtest + +import ( + "fmt" + + "github.com/spf13/cobra" +) + +func (cli *cliHubTest) NewCleanCmd() *cobra.Command { + cmd := &cobra.Command{ + Use: "clean", + Short: "clean [test_name]", + Args: cobra.MinimumNArgs(1), + DisableAutoGenTag: true, + RunE: func(_ *cobra.Command, args []string) error { + for _, testName := range args { + test, err := hubPtr.LoadTestItem(testName) + if err != nil { + return fmt.Errorf("unable to load test '%s': %w", testName, err) + } + if err := test.Clean(); err != nil { + return fmt.Errorf("unable to clean test '%s' env: %w", test.Name, err) + } + } + + return nil + }, + } + + return cmd +} diff --git a/cmd/crowdsec-cli/clihubtest/coverage.go b/cmd/crowdsec-cli/clihubtest/coverage.go new file mode 100644 index 00000000000..b6e5b1e9c01 --- /dev/null +++ b/cmd/crowdsec-cli/clihubtest/coverage.go @@ -0,0 +1,166 @@ +package clihubtest + +import ( + "encoding/json" + "errors" + "fmt" + "math" + + "github.com/fatih/color" + "github.com/spf13/cobra" + + "github.com/crowdsecurity/crowdsec/pkg/hubtest" +) + +// getCoverage returns the coverage and the percentage of tests that passed +func getCoverage(show bool, getCoverageFunc func() ([]hubtest.Coverage, error)) ([]hubtest.Coverage, int, error) { + if !show { + return nil, 0, nil + } + + coverage, err := getCoverageFunc() + if err != nil { + return nil, 0, fmt.Errorf("while getting coverage: %w", err) + } + + tested := 0 + + for _, test := range coverage { + if test.TestsCount > 0 { + tested++ + } + } + + // keep coverage 0 if there's no tests? + percent := 0 + if len(coverage) > 0 { + percent = int(math.Round((float64(tested) / float64(len(coverage)) * 100))) + } + + return coverage, percent, nil +} + +func (cli *cliHubTest) coverage(showScenarioCov bool, showParserCov bool, showAppsecCov bool, showOnlyPercent bool) error { + cfg := cli.cfg() + + // for this one we explicitly don't do for appsec + if err := HubTest.LoadAllTests(); err != nil { + return fmt.Errorf("unable to load all tests: %+v", err) + } + + var err error + + // if all are false (flag by default), show them + if !showParserCov && !showScenarioCov && !showAppsecCov { + showParserCov = true + showScenarioCov = true + showAppsecCov = true + } + + parserCoverage, parserCoveragePercent, err := getCoverage(showParserCov, HubTest.GetParsersCoverage) + if err != nil { + return err + } + + scenarioCoverage, scenarioCoveragePercent, err := getCoverage(showScenarioCov, HubTest.GetScenariosCoverage) + if err != nil { + return err + } + + appsecRuleCoverage, appsecRuleCoveragePercent, err := getCoverage(showAppsecCov, HubTest.GetAppsecCoverage) + if err != nil { + return err + } + + if showOnlyPercent { + switch { + case showParserCov: + fmt.Printf("parsers=%d%%", parserCoveragePercent) + case showScenarioCov: + fmt.Printf("scenarios=%d%%", scenarioCoveragePercent) + case showAppsecCov: + fmt.Printf("appsec_rules=%d%%", appsecRuleCoveragePercent) + } + + return nil + } + + switch cfg.Cscli.Output { + case "human": + if showParserCov { + hubTestCoverageTable(color.Output, cfg.Cscli.Color, []string{"Parser", "Status", "Number of tests"}, parserCoverage) + } + + if showScenarioCov { + hubTestCoverageTable(color.Output, cfg.Cscli.Color, []string{"Scenario", "Status", "Number of tests"}, parserCoverage) + } + + if showAppsecCov { + hubTestCoverageTable(color.Output, cfg.Cscli.Color, []string{"Appsec Rule", "Status", "Number of tests"}, parserCoverage) + } + + fmt.Println() + + if showParserCov { + fmt.Printf("PARSERS : %d%% of coverage\n", parserCoveragePercent) + } + + if showScenarioCov { + fmt.Printf("SCENARIOS : %d%% of coverage\n", scenarioCoveragePercent) + } + + if showAppsecCov { + fmt.Printf("APPSEC RULES : %d%% of coverage\n", appsecRuleCoveragePercent) + } + case "json": + dump, err := json.MarshalIndent(parserCoverage, "", " ") + if err != nil { + return err + } + + fmt.Printf("%s", dump) + + dump, err = json.MarshalIndent(scenarioCoverage, "", " ") + if err != nil { + return err + } + + fmt.Printf("%s", dump) + + dump, err = json.MarshalIndent(appsecRuleCoverage, "", " ") + if err != nil { + return err + } + + fmt.Printf("%s", dump) + default: + return errors.New("only human/json output modes are supported") + } + + return nil +} + +func (cli *cliHubTest) NewCoverageCmd() *cobra.Command { + var ( + showParserCov bool + showScenarioCov bool + showOnlyPercent bool + showAppsecCov bool + ) + + cmd := &cobra.Command{ + Use: "coverage", + Short: "coverage", + DisableAutoGenTag: true, + RunE: func(_ *cobra.Command, _ []string) error { + return cli.coverage(showScenarioCov, showParserCov, showAppsecCov, showOnlyPercent) + }, + } + + cmd.PersistentFlags().BoolVar(&showOnlyPercent, "percent", false, "Show only percentages of coverage") + cmd.PersistentFlags().BoolVar(&showParserCov, "parsers", false, "Show only parsers coverage") + cmd.PersistentFlags().BoolVar(&showScenarioCov, "scenarios", false, "Show only scenarios coverage") + cmd.PersistentFlags().BoolVar(&showAppsecCov, "appsec", false, "Show only appsec coverage") + + return cmd +} diff --git a/cmd/crowdsec-cli/clihubtest/create.go b/cmd/crowdsec-cli/clihubtest/create.go new file mode 100644 index 00000000000..523c9740cf7 --- /dev/null +++ b/cmd/crowdsec-cli/clihubtest/create.go @@ -0,0 +1,158 @@ +package clihubtest + +import ( + "errors" + "fmt" + "os" + "path/filepath" + "text/template" + + "github.com/spf13/cobra" + "gopkg.in/yaml.v3" + + "github.com/crowdsecurity/crowdsec/pkg/hubtest" +) + +func (cli *cliHubTest) NewCreateCmd() *cobra.Command { + var ( + ignoreParsers bool + labels map[string]string + logType string + ) + + parsers := []string{} + postoverflows := []string{} + scenarios := []string{} + + cmd := &cobra.Command{ + Use: "create", + Short: "create [test_name]", + Example: `cscli hubtest create my-awesome-test --type syslog +cscli hubtest create my-nginx-custom-test --type nginx +cscli hubtest create my-scenario-test --parsers crowdsecurity/nginx --scenarios crowdsecurity/http-probing`, + Args: cobra.ExactArgs(1), + DisableAutoGenTag: true, + RunE: func(_ *cobra.Command, args []string) error { + testName := args[0] + testPath := filepath.Join(hubPtr.HubTestPath, testName) + if _, err := os.Stat(testPath); os.IsExist(err) { + return fmt.Errorf("test '%s' already exists in '%s', exiting", testName, testPath) + } + + if isAppsecTest { + logType = "appsec" + } + + if logType == "" { + return errors.New("please provide a type (--type) for the test") + } + + if err := os.MkdirAll(testPath, os.ModePerm); err != nil { + return fmt.Errorf("unable to create folder '%s': %+v", testPath, err) + } + + configFilePath := filepath.Join(testPath, "config.yaml") + + configFileData := &hubtest.HubTestItemConfig{} + if logType == "appsec" { + // create empty nuclei template file + nucleiFileName := testName + ".yaml" + nucleiFilePath := filepath.Join(testPath, nucleiFileName) + + nucleiFile, err := os.OpenFile(nucleiFilePath, os.O_RDWR|os.O_CREATE, 0o755) + if err != nil { + return err + } + + ntpl := template.Must(template.New("nuclei").Parse(hubtest.TemplateNucleiFile)) + if ntpl == nil { + return errors.New("unable to parse nuclei template") + } + ntpl.ExecuteTemplate(nucleiFile, "nuclei", struct{ TestName string }{TestName: testName}) + nucleiFile.Close() + configFileData.AppsecRules = []string{"./appsec-rules//your_rule_here.yaml"} + configFileData.NucleiTemplate = nucleiFileName + fmt.Println() + fmt.Printf(" Test name : %s\n", testName) + fmt.Printf(" Test path : %s\n", testPath) + fmt.Printf(" Config File : %s\n", configFilePath) + fmt.Printf(" Nuclei Template : %s\n", nucleiFilePath) + } else { + // create empty log file + logFileName := testName + ".log" + logFilePath := filepath.Join(testPath, logFileName) + logFile, err := os.Create(logFilePath) + if err != nil { + return err + } + logFile.Close() + + // create empty parser assertion file + parserAssertFilePath := filepath.Join(testPath, hubtest.ParserAssertFileName) + parserAssertFile, err := os.Create(parserAssertFilePath) + if err != nil { + return err + } + parserAssertFile.Close() + // create empty scenario assertion file + scenarioAssertFilePath := filepath.Join(testPath, hubtest.ScenarioAssertFileName) + scenarioAssertFile, err := os.Create(scenarioAssertFilePath) + if err != nil { + return err + } + scenarioAssertFile.Close() + + parsers = append(parsers, "crowdsecurity/syslog-logs") + parsers = append(parsers, "crowdsecurity/dateparse-enrich") + + if len(scenarios) == 0 { + scenarios = append(scenarios, "") + } + + if len(postoverflows) == 0 { + postoverflows = append(postoverflows, "") + } + configFileData.Parsers = parsers + configFileData.Scenarios = scenarios + configFileData.PostOverflows = postoverflows + configFileData.LogFile = logFileName + configFileData.LogType = logType + configFileData.IgnoreParsers = ignoreParsers + configFileData.Labels = labels + fmt.Println() + fmt.Printf(" Test name : %s\n", testName) + fmt.Printf(" Test path : %s\n", testPath) + fmt.Printf(" Log file : %s (please fill it with logs)\n", logFilePath) + fmt.Printf(" Parser assertion file : %s (please fill it with assertion)\n", parserAssertFilePath) + fmt.Printf(" Scenario assertion file : %s (please fill it with assertion)\n", scenarioAssertFilePath) + fmt.Printf(" Configuration File : %s (please fill it with parsers, scenarios...)\n", configFilePath) + } + + fd, err := os.Create(configFilePath) + if err != nil { + return fmt.Errorf("open: %w", err) + } + data, err := yaml.Marshal(configFileData) + if err != nil { + return fmt.Errorf("marshal: %w", err) + } + _, err = fd.Write(data) + if err != nil { + return fmt.Errorf("write: %w", err) + } + if err := fd.Close(); err != nil { + return fmt.Errorf("close: %w", err) + } + + return nil + }, + } + + cmd.PersistentFlags().StringVarP(&logType, "type", "t", "", "Log type of the test") + cmd.Flags().StringSliceVarP(&parsers, "parsers", "p", parsers, "Parsers to add to test") + cmd.Flags().StringSliceVar(&postoverflows, "postoverflows", postoverflows, "Postoverflows to add to test") + cmd.Flags().StringSliceVarP(&scenarios, "scenarios", "s", scenarios, "Scenarios to add to test") + cmd.PersistentFlags().BoolVar(&ignoreParsers, "ignore-parsers", false, "Don't run test on parsers") + + return cmd +} diff --git a/cmd/crowdsec-cli/clihubtest/eval.go b/cmd/crowdsec-cli/clihubtest/eval.go new file mode 100644 index 00000000000..204a0bc420e --- /dev/null +++ b/cmd/crowdsec-cli/clihubtest/eval.go @@ -0,0 +1,44 @@ +package clihubtest + +import ( + "fmt" + + "github.com/spf13/cobra" +) + +func (cli *cliHubTest) NewEvalCmd() *cobra.Command { + var evalExpression string + + cmd := &cobra.Command{ + Use: "eval", + Short: "eval [test_name]", + Args: cobra.ExactArgs(1), + DisableAutoGenTag: true, + RunE: func(_ *cobra.Command, args []string) error { + for _, testName := range args { + test, err := hubPtr.LoadTestItem(testName) + if err != nil { + return fmt.Errorf("can't load test: %+v", err) + } + + err = test.ParserAssert.LoadTest(test.ParserResultFile) + if err != nil { + return fmt.Errorf("can't load test results from '%s': %+v", test.ParserResultFile, err) + } + + output, err := test.ParserAssert.EvalExpression(evalExpression) + if err != nil { + return err + } + + fmt.Print(output) + } + + return nil + }, + } + + cmd.PersistentFlags().StringVarP(&evalExpression, "expr", "e", "", "Expression to eval") + + return cmd +} diff --git a/cmd/crowdsec-cli/clihubtest/explain.go b/cmd/crowdsec-cli/clihubtest/explain.go new file mode 100644 index 00000000000..ecaf520211e --- /dev/null +++ b/cmd/crowdsec-cli/clihubtest/explain.go @@ -0,0 +1,53 @@ +package clihubtest + +import ( + "fmt" + + "github.com/spf13/cobra" + + "github.com/crowdsecurity/crowdsec/pkg/dumps" +) + +func (cli *cliHubTest) NewExplainCmd() *cobra.Command { + cmd := &cobra.Command{ + Use: "explain", + Short: "explain [test_name]", + Args: cobra.ExactArgs(1), + DisableAutoGenTag: true, + RunE: func(_ *cobra.Command, args []string) error { + for _, testName := range args { + test, err := HubTest.LoadTestItem(testName) + if err != nil { + return fmt.Errorf("can't load test: %+v", err) + } + err = test.ParserAssert.LoadTest(test.ParserResultFile) + if err != nil { + if err = test.Run(); err != nil { + return fmt.Errorf("running test '%s' failed: %+v", test.Name, err) + } + + if err = test.ParserAssert.LoadTest(test.ParserResultFile); err != nil { + return fmt.Errorf("unable to load parser result after run: %w", err) + } + } + + err = test.ScenarioAssert.LoadTest(test.ScenarioResultFile, test.BucketPourResultFile) + if err != nil { + if err = test.Run(); err != nil { + return fmt.Errorf("running test '%s' failed: %+v", test.Name, err) + } + + if err = test.ScenarioAssert.LoadTest(test.ScenarioResultFile, test.BucketPourResultFile); err != nil { + return fmt.Errorf("unable to load scenario result after run: %w", err) + } + } + opts := dumps.DumpOpts{} + dumps.DumpTree(*test.ParserAssert.TestData, *test.ScenarioAssert.PourData, opts) + } + + return nil + }, + } + + return cmd +} diff --git a/cmd/crowdsec-cli/clihubtest/hubtest.go b/cmd/crowdsec-cli/clihubtest/hubtest.go new file mode 100644 index 00000000000..22607336177 --- /dev/null +++ b/cmd/crowdsec-cli/clihubtest/hubtest.go @@ -0,0 +1,81 @@ +package clihubtest + +import ( + "fmt" + + "github.com/spf13/cobra" + + "github.com/crowdsecurity/crowdsec/pkg/csconfig" + "github.com/crowdsecurity/crowdsec/pkg/hubtest" +) + +type configGetter func() *csconfig.Config + +var ( + HubTest hubtest.HubTest + HubAppsecTests hubtest.HubTest + hubPtr *hubtest.HubTest + isAppsecTest bool +) + +type cliHubTest struct { + cfg configGetter +} + +func New(cfg configGetter) *cliHubTest { + return &cliHubTest{ + cfg: cfg, + } +} + +func (cli *cliHubTest) NewCommand() *cobra.Command { + var ( + hubPath string + crowdsecPath string + cscliPath string + ) + + cmd := &cobra.Command{ + Use: "hubtest", + Short: "Run functional tests on hub configurations", + Long: "Run functional tests on hub configurations (parsers, scenarios, collections...)", + Args: cobra.ExactArgs(0), + DisableAutoGenTag: true, + PersistentPreRunE: func(_ *cobra.Command, _ []string) error { + var err error + HubTest, err = hubtest.NewHubTest(hubPath, crowdsecPath, cscliPath, false) + if err != nil { + return fmt.Errorf("unable to load hubtest: %+v", err) + } + + HubAppsecTests, err = hubtest.NewHubTest(hubPath, crowdsecPath, cscliPath, true) + if err != nil { + return fmt.Errorf("unable to load appsec specific hubtest: %+v", err) + } + + // commands will use the hubPtr, will point to the default hubTest object, or the one dedicated to appsec tests + hubPtr = &HubTest + if isAppsecTest { + hubPtr = &HubAppsecTests + } + + return nil + }, + } + + cmd.PersistentFlags().StringVar(&hubPath, "hub", ".", "Path to hub folder") + cmd.PersistentFlags().StringVar(&crowdsecPath, "crowdsec", "crowdsec", "Path to crowdsec") + cmd.PersistentFlags().StringVar(&cscliPath, "cscli", "cscli", "Path to cscli") + cmd.PersistentFlags().BoolVar(&isAppsecTest, "appsec", false, "Command relates to appsec tests") + + cmd.AddCommand(cli.NewCreateCmd()) + cmd.AddCommand(cli.NewRunCmd()) + cmd.AddCommand(cli.NewCleanCmd()) + cmd.AddCommand(cli.NewInfoCmd()) + cmd.AddCommand(cli.NewListCmd()) + cmd.AddCommand(cli.NewCoverageCmd()) + cmd.AddCommand(cli.NewEvalCmd()) + cmd.AddCommand(cli.NewExplainCmd()) + + return cmd +} diff --git a/cmd/crowdsec-cli/clihubtest/info.go b/cmd/crowdsec-cli/clihubtest/info.go new file mode 100644 index 00000000000..2e3fd132340 --- /dev/null +++ b/cmd/crowdsec-cli/clihubtest/info.go @@ -0,0 +1,44 @@ +package clihubtest + +import ( + "fmt" + "path/filepath" + "strings" + + "github.com/spf13/cobra" + + "github.com/crowdsecurity/crowdsec/pkg/hubtest" +) + +func (cli *cliHubTest) NewInfoCmd() *cobra.Command { + cmd := &cobra.Command{ + Use: "info", + Short: "info [test_name]", + Args: cobra.MinimumNArgs(1), + DisableAutoGenTag: true, + RunE: func(_ *cobra.Command, args []string) error { + for _, testName := range args { + test, err := hubPtr.LoadTestItem(testName) + if err != nil { + return fmt.Errorf("unable to load test '%s': %w", testName, err) + } + fmt.Println() + fmt.Printf(" Test name : %s\n", test.Name) + fmt.Printf(" Test path : %s\n", test.Path) + if isAppsecTest { + fmt.Printf(" Nuclei Template : %s\n", test.Config.NucleiTemplate) + fmt.Printf(" Appsec Rules : %s\n", strings.Join(test.Config.AppsecRules, ", ")) + } else { + fmt.Printf(" Log file : %s\n", filepath.Join(test.Path, test.Config.LogFile)) + fmt.Printf(" Parser assertion file : %s\n", filepath.Join(test.Path, hubtest.ParserAssertFileName)) + fmt.Printf(" Scenario assertion file : %s\n", filepath.Join(test.Path, hubtest.ScenarioAssertFileName)) + } + fmt.Printf(" Configuration File : %s\n", filepath.Join(test.Path, "config.yaml")) + } + + return nil + }, + } + + return cmd +} diff --git a/cmd/crowdsec-cli/clihubtest/list.go b/cmd/crowdsec-cli/clihubtest/list.go new file mode 100644 index 00000000000..76c51927897 --- /dev/null +++ b/cmd/crowdsec-cli/clihubtest/list.go @@ -0,0 +1,42 @@ +package clihubtest + +import ( + "encoding/json" + "errors" + "fmt" + + "github.com/fatih/color" + "github.com/spf13/cobra" +) + +func (cli *cliHubTest) NewListCmd() *cobra.Command { + cmd := &cobra.Command{ + Use: "list", + Short: "list", + DisableAutoGenTag: true, + RunE: func(_ *cobra.Command, _ []string) error { + cfg := cli.cfg() + + if err := hubPtr.LoadAllTests(); err != nil { + return fmt.Errorf("unable to load all tests: %w", err) + } + + switch cfg.Cscli.Output { + case "human": + hubTestListTable(color.Output, cfg.Cscli.Color, hubPtr.Tests) + case "json": + j, err := json.MarshalIndent(hubPtr.Tests, " ", " ") + if err != nil { + return err + } + fmt.Println(string(j)) + default: + return errors.New("only human/json output modes are supported") + } + + return nil + }, + } + + return cmd +} diff --git a/cmd/crowdsec-cli/clihubtest/run.go b/cmd/crowdsec-cli/clihubtest/run.go new file mode 100644 index 00000000000..552ee87c16e --- /dev/null +++ b/cmd/crowdsec-cli/clihubtest/run.go @@ -0,0 +1,195 @@ +package clihubtest + +import ( + "encoding/json" + "errors" + "fmt" + "os" + "strings" + + "github.com/AlecAivazis/survey/v2" + "github.com/fatih/color" + log "github.com/sirupsen/logrus" + "github.com/spf13/cobra" + + "github.com/crowdsecurity/crowdsec/pkg/emoji" + "github.com/crowdsecurity/crowdsec/pkg/hubtest" +) + +func (cli *cliHubTest) run(runAll bool, NucleiTargetHost string, AppSecHost string, args []string) error { + cfg := cli.cfg() + + if !runAll && len(args) == 0 { + return errors.New("please provide test to run or --all flag") + } + + hubPtr.NucleiTargetHost = NucleiTargetHost + hubPtr.AppSecHost = AppSecHost + + if runAll { + if err := hubPtr.LoadAllTests(); err != nil { + return fmt.Errorf("unable to load all tests: %+v", err) + } + } else { + for _, testName := range args { + _, err := hubPtr.LoadTestItem(testName) + if err != nil { + return fmt.Errorf("unable to load test '%s': %w", testName, err) + } + } + } + + // set timezone to avoid DST issues + os.Setenv("TZ", "UTC") + + for _, test := range hubPtr.Tests { + if cfg.Cscli.Output == "human" { + log.Infof("Running test '%s'", test.Name) + } + + err := test.Run() + if err != nil { + log.Errorf("running test '%s' failed: %+v", test.Name, err) + } + } + + return nil +} + +func (cli *cliHubTest) NewRunCmd() *cobra.Command { + var ( + noClean bool + runAll bool + forceClean bool + NucleiTargetHost string + AppSecHost string + ) + + cmd := &cobra.Command{ + Use: "run", + Short: "run [test_name]", + DisableAutoGenTag: true, + RunE: func(_ *cobra.Command, args []string) error { + return cli.run(runAll, NucleiTargetHost, AppSecHost, args) + }, + PersistentPostRunE: func(_ *cobra.Command, _ []string) error { + cfg := cli.cfg() + + success := true + testResult := make(map[string]bool) + for _, test := range hubPtr.Tests { + if test.AutoGen && !isAppsecTest { + if test.ParserAssert.AutoGenAssert { + log.Warningf("Assert file '%s' is empty, generating assertion:", test.ParserAssert.File) + fmt.Println() + fmt.Println(test.ParserAssert.AutoGenAssertData) + } + if test.ScenarioAssert.AutoGenAssert { + log.Warningf("Assert file '%s' is empty, generating assertion:", test.ScenarioAssert.File) + fmt.Println() + fmt.Println(test.ScenarioAssert.AutoGenAssertData) + } + if !noClean { + if err := test.Clean(); err != nil { + return fmt.Errorf("unable to clean test '%s' env: %w", test.Name, err) + } + } + + return fmt.Errorf("please fill your assert file(s) for test '%s', exiting", test.Name) + } + testResult[test.Name] = test.Success + if test.Success { + if cfg.Cscli.Output == "human" { + log.Infof("Test '%s' passed successfully (%d assertions)\n", test.Name, test.ParserAssert.NbAssert+test.ScenarioAssert.NbAssert) + } + if !noClean { + if err := test.Clean(); err != nil { + return fmt.Errorf("unable to clean test '%s' env: %w", test.Name, err) + } + } + } else { + success = false + cleanTestEnv := false + if cfg.Cscli.Output == "human" { + if len(test.ParserAssert.Fails) > 0 { + fmt.Println() + log.Errorf("Parser test '%s' failed (%d errors)\n", test.Name, len(test.ParserAssert.Fails)) + for _, fail := range test.ParserAssert.Fails { + fmt.Printf("(L.%d) %s => %s\n", fail.Line, emoji.RedCircle, fail.Expression) + fmt.Printf(" Actual expression values:\n") + for key, value := range fail.Debug { + fmt.Printf(" %s = '%s'\n", key, strings.TrimSuffix(value, "\n")) + } + fmt.Println() + } + } + if len(test.ScenarioAssert.Fails) > 0 { + fmt.Println() + log.Errorf("Scenario test '%s' failed (%d errors)\n", test.Name, len(test.ScenarioAssert.Fails)) + for _, fail := range test.ScenarioAssert.Fails { + fmt.Printf("(L.%d) %s => %s\n", fail.Line, emoji.RedCircle, fail.Expression) + fmt.Printf(" Actual expression values:\n") + for key, value := range fail.Debug { + fmt.Printf(" %s = '%s'\n", key, strings.TrimSuffix(value, "\n")) + } + fmt.Println() + } + } + if !forceClean && !noClean { + prompt := &survey.Confirm{ + Message: fmt.Sprintf("\nDo you want to remove runtime folder for test '%s'? (default: Yes)", test.Name), + Default: true, + } + if err := survey.AskOne(prompt, &cleanTestEnv); err != nil { + return fmt.Errorf("unable to ask to remove runtime folder: %w", err) + } + } + } + + if cleanTestEnv || forceClean { + if err := test.Clean(); err != nil { + return fmt.Errorf("unable to clean test '%s' env: %w", test.Name, err) + } + } + } + } + + switch cfg.Cscli.Output { + case "human": + hubTestResultTable(color.Output, cfg.Cscli.Color, testResult) + case "json": + jsonResult := make(map[string][]string, 0) + jsonResult["success"] = make([]string, 0) + jsonResult["fail"] = make([]string, 0) + for testName, success := range testResult { + if success { + jsonResult["success"] = append(jsonResult["success"], testName) + } else { + jsonResult["fail"] = append(jsonResult["fail"], testName) + } + } + jsonStr, err := json.Marshal(jsonResult) + if err != nil { + return fmt.Errorf("unable to json test result: %w", err) + } + fmt.Println(string(jsonStr)) + default: + return errors.New("only human/json output modes are supported") + } + + if !success { + return errors.New("some tests failed") + } + + return nil + }, + } + + cmd.Flags().BoolVar(&noClean, "no-clean", false, "Don't clean runtime environment if test succeed") + cmd.Flags().BoolVar(&forceClean, "clean", false, "Clean runtime environment if test fail") + cmd.Flags().StringVar(&NucleiTargetHost, "target", hubtest.DefaultNucleiTarget, "Target for AppSec Test") + cmd.Flags().StringVar(&AppSecHost, "host", hubtest.DefaultAppsecHost, "Address to expose AppSec for hubtest") + cmd.Flags().BoolVar(&runAll, "all", false, "Run all tests") + + return cmd +} diff --git a/cmd/crowdsec-cli/hubtest_table.go b/cmd/crowdsec-cli/clihubtest/table.go similarity index 50% rename from cmd/crowdsec-cli/hubtest_table.go rename to cmd/crowdsec-cli/clihubtest/table.go index 1fa0f990be2..2a105a1f5c1 100644 --- a/cmd/crowdsec-cli/hubtest_table.go +++ b/cmd/crowdsec-cli/clihubtest/table.go @@ -1,4 +1,4 @@ -package main +package clihubtest import ( "fmt" @@ -42,51 +42,9 @@ func hubTestListTable(out io.Writer, wantColor string, tests []*hubtest.HubTestI t.Render() } -func hubTestParserCoverageTable(out io.Writer, wantColor string, coverage []hubtest.Coverage) { +func hubTestCoverageTable(out io.Writer, wantColor string, headers []string, coverage []hubtest.Coverage) { t := cstable.NewLight(out, wantColor) - t.SetHeaders("Parser", "Status", "Number of tests") - t.SetHeaderAlignment(text.AlignLeft, text.AlignLeft, text.AlignLeft) - t.SetAlignment(text.AlignLeft, text.AlignLeft, text.AlignLeft) - - parserTested := 0 - - for _, test := range coverage { - status := emoji.RedCircle - if test.TestsCount > 0 { - status = emoji.GreenCircle - parserTested++ - } - - t.AddRow(test.Name, status, fmt.Sprintf("%d times (across %d tests)", test.TestsCount, len(test.PresentIn))) - } - - t.Render() -} - -func hubTestAppsecRuleCoverageTable(out io.Writer, wantColor string, coverage []hubtest.Coverage) { - t := cstable.NewLight(out, wantColor) - t.SetHeaders("Appsec Rule", "Status", "Number of tests") - t.SetHeaderAlignment(text.AlignLeft, text.AlignLeft, text.AlignLeft) - t.SetAlignment(text.AlignLeft, text.AlignLeft, text.AlignLeft) - - parserTested := 0 - - for _, test := range coverage { - status := emoji.RedCircle - if test.TestsCount > 0 { - status = emoji.GreenCircle - parserTested++ - } - - t.AddRow(test.Name, status, fmt.Sprintf("%d times (across %d tests)", test.TestsCount, len(test.PresentIn))) - } - - t.Render() -} - -func hubTestScenarioCoverageTable(out io.Writer, wantColor string, coverage []hubtest.Coverage) { - t := cstable.NewLight(out, wantColor) - t.SetHeaders("Scenario", "Status", "Number of tests") + t.SetHeaders(headers...) t.SetHeaderAlignment(text.AlignLeft, text.AlignLeft, text.AlignLeft) t.SetAlignment(text.AlignLeft, text.AlignLeft, text.AlignLeft) diff --git a/cmd/crowdsec-cli/hubtest.go b/cmd/crowdsec-cli/hubtest.go deleted file mode 100644 index 2a4635d39f1..00000000000 --- a/cmd/crowdsec-cli/hubtest.go +++ /dev/null @@ -1,746 +0,0 @@ -package main - -import ( - "encoding/json" - "errors" - "fmt" - "math" - "os" - "path/filepath" - "strings" - "text/template" - - "github.com/AlecAivazis/survey/v2" - "github.com/fatih/color" - log "github.com/sirupsen/logrus" - "github.com/spf13/cobra" - "gopkg.in/yaml.v3" - - "github.com/crowdsecurity/crowdsec/pkg/dumps" - "github.com/crowdsecurity/crowdsec/pkg/emoji" - "github.com/crowdsecurity/crowdsec/pkg/hubtest" -) - -var ( - HubTest hubtest.HubTest - HubAppsecTests hubtest.HubTest - hubPtr *hubtest.HubTest - isAppsecTest bool -) - -type cliHubTest struct { - cfg configGetter -} - -func NewCLIHubTest(cfg configGetter) *cliHubTest { - return &cliHubTest{ - cfg: cfg, - } -} - -func (cli *cliHubTest) NewCommand() *cobra.Command { - var ( - hubPath string - crowdsecPath string - cscliPath string - ) - - cmd := &cobra.Command{ - Use: "hubtest", - Short: "Run functional tests on hub configurations", - Long: "Run functional tests on hub configurations (parsers, scenarios, collections...)", - Args: cobra.ExactArgs(0), - DisableAutoGenTag: true, - PersistentPreRunE: func(_ *cobra.Command, _ []string) error { - var err error - HubTest, err = hubtest.NewHubTest(hubPath, crowdsecPath, cscliPath, false) - if err != nil { - return fmt.Errorf("unable to load hubtest: %+v", err) - } - - HubAppsecTests, err = hubtest.NewHubTest(hubPath, crowdsecPath, cscliPath, true) - if err != nil { - return fmt.Errorf("unable to load appsec specific hubtest: %+v", err) - } - - // commands will use the hubPtr, will point to the default hubTest object, or the one dedicated to appsec tests - hubPtr = &HubTest - if isAppsecTest { - hubPtr = &HubAppsecTests - } - - return nil - }, - } - - cmd.PersistentFlags().StringVar(&hubPath, "hub", ".", "Path to hub folder") - cmd.PersistentFlags().StringVar(&crowdsecPath, "crowdsec", "crowdsec", "Path to crowdsec") - cmd.PersistentFlags().StringVar(&cscliPath, "cscli", "cscli", "Path to cscli") - cmd.PersistentFlags().BoolVar(&isAppsecTest, "appsec", false, "Command relates to appsec tests") - - cmd.AddCommand(cli.NewCreateCmd()) - cmd.AddCommand(cli.NewRunCmd()) - cmd.AddCommand(cli.NewCleanCmd()) - cmd.AddCommand(cli.NewInfoCmd()) - cmd.AddCommand(cli.NewListCmd()) - cmd.AddCommand(cli.NewCoverageCmd()) - cmd.AddCommand(cli.NewEvalCmd()) - cmd.AddCommand(cli.NewExplainCmd()) - - return cmd -} - -func (cli *cliHubTest) NewCreateCmd() *cobra.Command { - var ( - ignoreParsers bool - labels map[string]string - logType string - ) - - parsers := []string{} - postoverflows := []string{} - scenarios := []string{} - - cmd := &cobra.Command{ - Use: "create", - Short: "create [test_name]", - Example: `cscli hubtest create my-awesome-test --type syslog -cscli hubtest create my-nginx-custom-test --type nginx -cscli hubtest create my-scenario-test --parsers crowdsecurity/nginx --scenarios crowdsecurity/http-probing`, - Args: cobra.ExactArgs(1), - DisableAutoGenTag: true, - RunE: func(_ *cobra.Command, args []string) error { - testName := args[0] - testPath := filepath.Join(hubPtr.HubTestPath, testName) - if _, err := os.Stat(testPath); os.IsExist(err) { - return fmt.Errorf("test '%s' already exists in '%s', exiting", testName, testPath) - } - - if isAppsecTest { - logType = "appsec" - } - - if logType == "" { - return errors.New("please provide a type (--type) for the test") - } - - if err := os.MkdirAll(testPath, os.ModePerm); err != nil { - return fmt.Errorf("unable to create folder '%s': %+v", testPath, err) - } - - configFilePath := filepath.Join(testPath, "config.yaml") - - configFileData := &hubtest.HubTestItemConfig{} - if logType == "appsec" { - // create empty nuclei template file - nucleiFileName := fmt.Sprintf("%s.yaml", testName) - nucleiFilePath := filepath.Join(testPath, nucleiFileName) - - nucleiFile, err := os.OpenFile(nucleiFilePath, os.O_RDWR|os.O_CREATE, 0o755) - if err != nil { - return err - } - - ntpl := template.Must(template.New("nuclei").Parse(hubtest.TemplateNucleiFile)) - if ntpl == nil { - return errors.New("unable to parse nuclei template") - } - ntpl.ExecuteTemplate(nucleiFile, "nuclei", struct{ TestName string }{TestName: testName}) - nucleiFile.Close() - configFileData.AppsecRules = []string{"./appsec-rules//your_rule_here.yaml"} - configFileData.NucleiTemplate = nucleiFileName - fmt.Println() - fmt.Printf(" Test name : %s\n", testName) - fmt.Printf(" Test path : %s\n", testPath) - fmt.Printf(" Config File : %s\n", configFilePath) - fmt.Printf(" Nuclei Template : %s\n", nucleiFilePath) - } else { - // create empty log file - logFileName := fmt.Sprintf("%s.log", testName) - logFilePath := filepath.Join(testPath, logFileName) - logFile, err := os.Create(logFilePath) - if err != nil { - return err - } - logFile.Close() - - // create empty parser assertion file - parserAssertFilePath := filepath.Join(testPath, hubtest.ParserAssertFileName) - parserAssertFile, err := os.Create(parserAssertFilePath) - if err != nil { - return err - } - parserAssertFile.Close() - // create empty scenario assertion file - scenarioAssertFilePath := filepath.Join(testPath, hubtest.ScenarioAssertFileName) - scenarioAssertFile, err := os.Create(scenarioAssertFilePath) - if err != nil { - return err - } - scenarioAssertFile.Close() - - parsers = append(parsers, "crowdsecurity/syslog-logs") - parsers = append(parsers, "crowdsecurity/dateparse-enrich") - - if len(scenarios) == 0 { - scenarios = append(scenarios, "") - } - - if len(postoverflows) == 0 { - postoverflows = append(postoverflows, "") - } - configFileData.Parsers = parsers - configFileData.Scenarios = scenarios - configFileData.PostOverflows = postoverflows - configFileData.LogFile = logFileName - configFileData.LogType = logType - configFileData.IgnoreParsers = ignoreParsers - configFileData.Labels = labels - fmt.Println() - fmt.Printf(" Test name : %s\n", testName) - fmt.Printf(" Test path : %s\n", testPath) - fmt.Printf(" Log file : %s (please fill it with logs)\n", logFilePath) - fmt.Printf(" Parser assertion file : %s (please fill it with assertion)\n", parserAssertFilePath) - fmt.Printf(" Scenario assertion file : %s (please fill it with assertion)\n", scenarioAssertFilePath) - fmt.Printf(" Configuration File : %s (please fill it with parsers, scenarios...)\n", configFilePath) - } - - fd, err := os.Create(configFilePath) - if err != nil { - return fmt.Errorf("open: %w", err) - } - data, err := yaml.Marshal(configFileData) - if err != nil { - return fmt.Errorf("marshal: %w", err) - } - _, err = fd.Write(data) - if err != nil { - return fmt.Errorf("write: %w", err) - } - if err := fd.Close(); err != nil { - return fmt.Errorf("close: %w", err) - } - - return nil - }, - } - - cmd.PersistentFlags().StringVarP(&logType, "type", "t", "", "Log type of the test") - cmd.Flags().StringSliceVarP(&parsers, "parsers", "p", parsers, "Parsers to add to test") - cmd.Flags().StringSliceVar(&postoverflows, "postoverflows", postoverflows, "Postoverflows to add to test") - cmd.Flags().StringSliceVarP(&scenarios, "scenarios", "s", scenarios, "Scenarios to add to test") - cmd.PersistentFlags().BoolVar(&ignoreParsers, "ignore-parsers", false, "Don't run test on parsers") - - return cmd -} - - -func (cli *cliHubTest) run(runAll bool, NucleiTargetHost string, AppSecHost string, args []string) error { - cfg := cli.cfg() - - if !runAll && len(args) == 0 { - return errors.New("please provide test to run or --all flag") - } - hubPtr.NucleiTargetHost = NucleiTargetHost - hubPtr.AppSecHost = AppSecHost - if runAll { - if err := hubPtr.LoadAllTests(); err != nil { - return fmt.Errorf("unable to load all tests: %+v", err) - } - } else { - for _, testName := range args { - _, err := hubPtr.LoadTestItem(testName) - if err != nil { - return fmt.Errorf("unable to load test '%s': %w", testName, err) - } - } - } - - // set timezone to avoid DST issues - os.Setenv("TZ", "UTC") - for _, test := range hubPtr.Tests { - if cfg.Cscli.Output == "human" { - log.Infof("Running test '%s'", test.Name) - } - err := test.Run() - if err != nil { - log.Errorf("running test '%s' failed: %+v", test.Name, err) - } - } - - return nil -} - - -func (cli *cliHubTest) NewRunCmd() *cobra.Command { - var ( - noClean bool - runAll bool - forceClean bool - NucleiTargetHost string - AppSecHost string - ) - - cmd := &cobra.Command{ - Use: "run", - Short: "run [test_name]", - DisableAutoGenTag: true, - RunE: func(_ *cobra.Command, args []string) error { - return cli.run(runAll, NucleiTargetHost, AppSecHost, args) - }, - PersistentPostRunE: func(_ *cobra.Command, _ []string) error { - cfg := cli.cfg() - - success := true - testResult := make(map[string]bool) - for _, test := range hubPtr.Tests { - if test.AutoGen && !isAppsecTest { - if test.ParserAssert.AutoGenAssert { - log.Warningf("Assert file '%s' is empty, generating assertion:", test.ParserAssert.File) - fmt.Println() - fmt.Println(test.ParserAssert.AutoGenAssertData) - } - if test.ScenarioAssert.AutoGenAssert { - log.Warningf("Assert file '%s' is empty, generating assertion:", test.ScenarioAssert.File) - fmt.Println() - fmt.Println(test.ScenarioAssert.AutoGenAssertData) - } - if !noClean { - if err := test.Clean(); err != nil { - return fmt.Errorf("unable to clean test '%s' env: %w", test.Name, err) - } - } - return fmt.Errorf("please fill your assert file(s) for test '%s', exiting", test.Name) - } - testResult[test.Name] = test.Success - if test.Success { - if cfg.Cscli.Output == "human" { - log.Infof("Test '%s' passed successfully (%d assertions)\n", test.Name, test.ParserAssert.NbAssert+test.ScenarioAssert.NbAssert) - } - if !noClean { - if err := test.Clean(); err != nil { - return fmt.Errorf("unable to clean test '%s' env: %w", test.Name, err) - } - } - } else { - success = false - cleanTestEnv := false - if cfg.Cscli.Output == "human" { - if len(test.ParserAssert.Fails) > 0 { - fmt.Println() - log.Errorf("Parser test '%s' failed (%d errors)\n", test.Name, len(test.ParserAssert.Fails)) - for _, fail := range test.ParserAssert.Fails { - fmt.Printf("(L.%d) %s => %s\n", fail.Line, emoji.RedCircle, fail.Expression) - fmt.Printf(" Actual expression values:\n") - for key, value := range fail.Debug { - fmt.Printf(" %s = '%s'\n", key, strings.TrimSuffix(value, "\n")) - } - fmt.Println() - } - } - if len(test.ScenarioAssert.Fails) > 0 { - fmt.Println() - log.Errorf("Scenario test '%s' failed (%d errors)\n", test.Name, len(test.ScenarioAssert.Fails)) - for _, fail := range test.ScenarioAssert.Fails { - fmt.Printf("(L.%d) %s => %s\n", fail.Line, emoji.RedCircle, fail.Expression) - fmt.Printf(" Actual expression values:\n") - for key, value := range fail.Debug { - fmt.Printf(" %s = '%s'\n", key, strings.TrimSuffix(value, "\n")) - } - fmt.Println() - } - } - if !forceClean && !noClean { - prompt := &survey.Confirm{ - Message: fmt.Sprintf("\nDo you want to remove runtime folder for test '%s'? (default: Yes)", test.Name), - Default: true, - } - if err := survey.AskOne(prompt, &cleanTestEnv); err != nil { - return fmt.Errorf("unable to ask to remove runtime folder: %w", err) - } - } - } - - if cleanTestEnv || forceClean { - if err := test.Clean(); err != nil { - return fmt.Errorf("unable to clean test '%s' env: %w", test.Name, err) - } - } - } - } - - switch cfg.Cscli.Output { - case "human": - hubTestResultTable(color.Output, cfg.Cscli.Color, testResult) - case "json": - jsonResult := make(map[string][]string, 0) - jsonResult["success"] = make([]string, 0) - jsonResult["fail"] = make([]string, 0) - for testName, success := range testResult { - if success { - jsonResult["success"] = append(jsonResult["success"], testName) - } else { - jsonResult["fail"] = append(jsonResult["fail"], testName) - } - } - jsonStr, err := json.Marshal(jsonResult) - if err != nil { - return fmt.Errorf("unable to json test result: %w", err) - } - fmt.Println(string(jsonStr)) - default: - return errors.New("only human/json output modes are supported") - } - - if !success { - return errors.New("some tests failed") - } - - return nil - }, - } - - cmd.Flags().BoolVar(&noClean, "no-clean", false, "Don't clean runtime environment if test succeed") - cmd.Flags().BoolVar(&forceClean, "clean", false, "Clean runtime environment if test fail") - cmd.Flags().StringVar(&NucleiTargetHost, "target", hubtest.DefaultNucleiTarget, "Target for AppSec Test") - cmd.Flags().StringVar(&AppSecHost, "host", hubtest.DefaultAppsecHost, "Address to expose AppSec for hubtest") - cmd.Flags().BoolVar(&runAll, "all", false, "Run all tests") - - return cmd -} - -func (cli *cliHubTest) NewCleanCmd() *cobra.Command { - cmd := &cobra.Command{ - Use: "clean", - Short: "clean [test_name]", - Args: cobra.MinimumNArgs(1), - DisableAutoGenTag: true, - RunE: func(_ *cobra.Command, args []string) error { - for _, testName := range args { - test, err := hubPtr.LoadTestItem(testName) - if err != nil { - return fmt.Errorf("unable to load test '%s': %w", testName, err) - } - if err := test.Clean(); err != nil { - return fmt.Errorf("unable to clean test '%s' env: %w", test.Name, err) - } - } - - return nil - }, - } - - return cmd -} - -func (cli *cliHubTest) NewInfoCmd() *cobra.Command { - cmd := &cobra.Command{ - Use: "info", - Short: "info [test_name]", - Args: cobra.MinimumNArgs(1), - DisableAutoGenTag: true, - RunE: func(_ *cobra.Command, args []string) error { - for _, testName := range args { - test, err := hubPtr.LoadTestItem(testName) - if err != nil { - return fmt.Errorf("unable to load test '%s': %w", testName, err) - } - fmt.Println() - fmt.Printf(" Test name : %s\n", test.Name) - fmt.Printf(" Test path : %s\n", test.Path) - if isAppsecTest { - fmt.Printf(" Nuclei Template : %s\n", test.Config.NucleiTemplate) - fmt.Printf(" Appsec Rules : %s\n", strings.Join(test.Config.AppsecRules, ", ")) - } else { - fmt.Printf(" Log file : %s\n", filepath.Join(test.Path, test.Config.LogFile)) - fmt.Printf(" Parser assertion file : %s\n", filepath.Join(test.Path, hubtest.ParserAssertFileName)) - fmt.Printf(" Scenario assertion file : %s\n", filepath.Join(test.Path, hubtest.ScenarioAssertFileName)) - } - fmt.Printf(" Configuration File : %s\n", filepath.Join(test.Path, "config.yaml")) - } - - return nil - }, - } - - return cmd -} - -func (cli *cliHubTest) NewListCmd() *cobra.Command { - cmd := &cobra.Command{ - Use: "list", - Short: "list", - DisableAutoGenTag: true, - RunE: func(_ *cobra.Command, _ []string) error { - cfg := cli.cfg() - - if err := hubPtr.LoadAllTests(); err != nil { - return fmt.Errorf("unable to load all tests: %w", err) - } - - switch cfg.Cscli.Output { - case "human": - hubTestListTable(color.Output, cfg.Cscli.Color, hubPtr.Tests) - case "json": - j, err := json.MarshalIndent(hubPtr.Tests, " ", " ") - if err != nil { - return err - } - fmt.Println(string(j)) - default: - return errors.New("only human/json output modes are supported") - } - - return nil - }, - } - - return cmd -} - -func (cli *cliHubTest) coverage(showScenarioCov bool, showParserCov bool, showAppsecCov bool, showOnlyPercent bool) error { - cfg := cli.cfg() - - // for this one we explicitly don't do for appsec - if err := HubTest.LoadAllTests(); err != nil { - return fmt.Errorf("unable to load all tests: %+v", err) - } - - var err error - - scenarioCoverage := []hubtest.Coverage{} - parserCoverage := []hubtest.Coverage{} - appsecRuleCoverage := []hubtest.Coverage{} - scenarioCoveragePercent := 0 - parserCoveragePercent := 0 - appsecRuleCoveragePercent := 0 - - // if both are false (flag by default), show both - showAll := !showScenarioCov && !showParserCov && !showAppsecCov - - if showParserCov || showAll { - parserCoverage, err = HubTest.GetParsersCoverage() - if err != nil { - return fmt.Errorf("while getting parser coverage: %w", err) - } - - parserTested := 0 - - for _, test := range parserCoverage { - if test.TestsCount > 0 { - parserTested++ - } - } - - parserCoveragePercent = int(math.Round((float64(parserTested) / float64(len(parserCoverage)) * 100))) - } - - if showScenarioCov || showAll { - scenarioCoverage, err = HubTest.GetScenariosCoverage() - if err != nil { - return fmt.Errorf("while getting scenario coverage: %w", err) - } - - scenarioTested := 0 - - for _, test := range scenarioCoverage { - if test.TestsCount > 0 { - scenarioTested++ - } - } - - scenarioCoveragePercent = int(math.Round((float64(scenarioTested) / float64(len(scenarioCoverage)) * 100))) - } - - if showAppsecCov || showAll { - appsecRuleCoverage, err = HubTest.GetAppsecCoverage() - if err != nil { - return fmt.Errorf("while getting scenario coverage: %w", err) - } - - appsecRuleTested := 0 - - for _, test := range appsecRuleCoverage { - if test.TestsCount > 0 { - appsecRuleTested++ - } - } - - appsecRuleCoveragePercent = int(math.Round((float64(appsecRuleTested) / float64(len(appsecRuleCoverage)) * 100))) - } - - if showOnlyPercent { - switch { - case showAll: - fmt.Printf("parsers=%d%%\nscenarios=%d%%\nappsec_rules=%d%%", parserCoveragePercent, scenarioCoveragePercent, appsecRuleCoveragePercent) - case showParserCov: - fmt.Printf("parsers=%d%%", parserCoveragePercent) - case showScenarioCov: - fmt.Printf("scenarios=%d%%", scenarioCoveragePercent) - case showAppsecCov: - fmt.Printf("appsec_rules=%d%%", appsecRuleCoveragePercent) - } - - return nil - } - - switch cfg.Cscli.Output { - case "human": - if showParserCov || showAll { - hubTestParserCoverageTable(color.Output, cfg.Cscli.Color, parserCoverage) - } - - if showScenarioCov || showAll { - hubTestScenarioCoverageTable(color.Output, cfg.Cscli.Color, scenarioCoverage) - } - - if showAppsecCov || showAll { - hubTestAppsecRuleCoverageTable(color.Output, cfg.Cscli.Color, appsecRuleCoverage) - } - - fmt.Println() - - if showParserCov || showAll { - fmt.Printf("PARSERS : %d%% of coverage\n", parserCoveragePercent) - } - - if showScenarioCov || showAll { - fmt.Printf("SCENARIOS : %d%% of coverage\n", scenarioCoveragePercent) - } - - if showAppsecCov || showAll { - fmt.Printf("APPSEC RULES : %d%% of coverage\n", appsecRuleCoveragePercent) - } - case "json": - dump, err := json.MarshalIndent(parserCoverage, "", " ") - if err != nil { - return err - } - - fmt.Printf("%s", dump) - - dump, err = json.MarshalIndent(scenarioCoverage, "", " ") - if err != nil { - return err - } - - fmt.Printf("%s", dump) - - dump, err = json.MarshalIndent(appsecRuleCoverage, "", " ") - if err != nil { - return err - } - - fmt.Printf("%s", dump) - default: - return errors.New("only human/json output modes are supported") - } - - return nil -} - -func (cli *cliHubTest) NewCoverageCmd() *cobra.Command { - var ( - showParserCov bool - showScenarioCov bool - showOnlyPercent bool - showAppsecCov bool - ) - - cmd := &cobra.Command{ - Use: "coverage", - Short: "coverage", - DisableAutoGenTag: true, - RunE: func(_ *cobra.Command, _ []string) error { - return cli.coverage(showScenarioCov, showParserCov, showAppsecCov, showOnlyPercent) - }, - } - - cmd.PersistentFlags().BoolVar(&showOnlyPercent, "percent", false, "Show only percentages of coverage") - cmd.PersistentFlags().BoolVar(&showParserCov, "parsers", false, "Show only parsers coverage") - cmd.PersistentFlags().BoolVar(&showScenarioCov, "scenarios", false, "Show only scenarios coverage") - cmd.PersistentFlags().BoolVar(&showAppsecCov, "appsec", false, "Show only appsec coverage") - - return cmd -} - -func (cli *cliHubTest) NewEvalCmd() *cobra.Command { - var evalExpression string - - cmd := &cobra.Command{ - Use: "eval", - Short: "eval [test_name]", - Args: cobra.ExactArgs(1), - DisableAutoGenTag: true, - RunE: func(_ *cobra.Command, args []string) error { - for _, testName := range args { - test, err := hubPtr.LoadTestItem(testName) - if err != nil { - return fmt.Errorf("can't load test: %+v", err) - } - - err = test.ParserAssert.LoadTest(test.ParserResultFile) - if err != nil { - return fmt.Errorf("can't load test results from '%s': %+v", test.ParserResultFile, err) - } - - output, err := test.ParserAssert.EvalExpression(evalExpression) - if err != nil { - return err - } - - fmt.Print(output) - } - - return nil - }, - } - - cmd.PersistentFlags().StringVarP(&evalExpression, "expr", "e", "", "Expression to eval") - - return cmd -} - -func (cli *cliHubTest) NewExplainCmd() *cobra.Command { - cmd := &cobra.Command{ - Use: "explain", - Short: "explain [test_name]", - Args: cobra.ExactArgs(1), - DisableAutoGenTag: true, - RunE: func(_ *cobra.Command, args []string) error { - for _, testName := range args { - test, err := HubTest.LoadTestItem(testName) - if err != nil { - return fmt.Errorf("can't load test: %+v", err) - } - err = test.ParserAssert.LoadTest(test.ParserResultFile) - if err != nil { - if err = test.Run(); err != nil { - return fmt.Errorf("running test '%s' failed: %+v", test.Name, err) - } - - if err = test.ParserAssert.LoadTest(test.ParserResultFile); err != nil { - return fmt.Errorf("unable to load parser result after run: %w", err) - } - } - - err = test.ScenarioAssert.LoadTest(test.ScenarioResultFile, test.BucketPourResultFile) - if err != nil { - if err = test.Run(); err != nil { - return fmt.Errorf("running test '%s' failed: %+v", test.Name, err) - } - - if err = test.ScenarioAssert.LoadTest(test.ScenarioResultFile, test.BucketPourResultFile); err != nil { - return fmt.Errorf("unable to load scenario result after run: %w", err) - } - } - opts := dumps.DumpOpts{} - dumps.DumpTree(*test.ParserAssert.TestData, *test.ScenarioAssert.PourData, opts) - } - - return nil - }, - } - - return cmd -} diff --git a/cmd/crowdsec-cli/main.go b/cmd/crowdsec-cli/main.go index ab8b4f82518..12128f6091a 100644 --- a/cmd/crowdsec-cli/main.go +++ b/cmd/crowdsec-cli/main.go @@ -17,6 +17,7 @@ import ( "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/clicapi" "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/cliconsole" "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/cliexplain" + "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/clihubtest" "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/clilapi" "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/climetrics" "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/clinotifications" @@ -261,7 +262,7 @@ It is meant to allow you to manage bans, parsers/scenarios/etc, api and generall cmd.AddCommand(NewCompletionCmd()) cmd.AddCommand(cliconsole.New(cli.cfg).NewCommand()) cmd.AddCommand(cliexplain.New(cli.cfg, ConfigFilePath).NewCommand()) - cmd.AddCommand(NewCLIHubTest(cli.cfg).NewCommand()) + cmd.AddCommand(clihubtest.New(cli.cfg).NewCommand()) cmd.AddCommand(clinotifications.New(cli.cfg).NewCommand()) cmd.AddCommand(NewCLISupport(cli.cfg).NewCommand()) cmd.AddCommand(clipapi.New(cli.cfg).NewCommand()) From eec32ad64b4b5fbdb039f36f2dd681f94c3597a2 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Wed, 28 Aug 2024 10:52:49 +0200 Subject: [PATCH 267/581] cscli refact: extract packages ask, clientinfo (#3197) * cscli: extrack package 'crowdsec-cli/ask' * cscli: extract package 'crowdsec-cli/clientinfo' --- cmd/crowdsec-cli/ask/ask.go | 20 +++++++ cmd/crowdsec-cli/bouncers.go | 64 +++-------------------- cmd/crowdsec-cli/clientinfo/clientinfo.go | 39 ++++++++++++++ cmd/crowdsec-cli/machines.go | 16 +++--- 4 files changed, 76 insertions(+), 63 deletions(-) create mode 100644 cmd/crowdsec-cli/ask/ask.go create mode 100644 cmd/crowdsec-cli/clientinfo/clientinfo.go diff --git a/cmd/crowdsec-cli/ask/ask.go b/cmd/crowdsec-cli/ask/ask.go new file mode 100644 index 00000000000..484ccb30c8a --- /dev/null +++ b/cmd/crowdsec-cli/ask/ask.go @@ -0,0 +1,20 @@ +package ask + +import ( + "github.com/AlecAivazis/survey/v2" +) + +func YesNo(message string, defaultAnswer bool) (bool, error) { + var answer bool + + prompt := &survey.Confirm{ + Message: message, + Default: defaultAnswer, + } + + if err := survey.AskOne(prompt, &answer); err != nil { + return defaultAnswer, err + } + + return answer, nil +} diff --git a/cmd/crowdsec-cli/bouncers.go b/cmd/crowdsec-cli/bouncers.go index d3edcea0db9..68ce1a2fa05 100644 --- a/cmd/crowdsec-cli/bouncers.go +++ b/cmd/crowdsec-cli/bouncers.go @@ -11,12 +11,13 @@ import ( "strings" "time" - "github.com/AlecAivazis/survey/v2" "github.com/fatih/color" "github.com/jedib0t/go-pretty/v6/table" log "github.com/sirupsen/logrus" "github.com/spf13/cobra" + "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/ask" + "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/clientinfo" "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/cstable" "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/require" middlewares "github.com/crowdsecurity/crowdsec/pkg/apiserver/middlewares/v1" @@ -27,55 +28,6 @@ import ( "github.com/crowdsecurity/crowdsec/pkg/types" ) -type featureflagProvider interface { - GetFeatureflags() string -} - -type osProvider interface { - GetOsname() string - GetOsversion() string -} - -func getOSNameAndVersion(o osProvider) string { - ret := o.GetOsname() - if o.GetOsversion() != "" { - if ret != "" { - ret += "/" - } - - ret += o.GetOsversion() - } - - if ret == "" { - return "?" - } - - return ret -} - -func getFeatureFlagList(o featureflagProvider) []string { - if o.GetFeatureflags() == "" { - return nil - } - - return strings.Split(o.GetFeatureflags(), ",") -} - -func askYesNo(message string, defaultAnswer bool) (bool, error) { - var answer bool - - prompt := &survey.Confirm{ - Message: message, - Default: defaultAnswer, - } - - if err := survey.AskOne(prompt, &answer); err != nil { - return defaultAnswer, err - } - - return answer, nil -} - type cliBouncers struct { db *database.Client cfg configGetter @@ -171,8 +123,8 @@ func newBouncerInfo(b *ent.Bouncer) bouncerInfo { Version: b.Version, LastPull: b.LastPull, AuthType: b.AuthType, - OS: getOSNameAndVersion(b), - Featureflags: getFeatureFlagList(b), + OS: clientinfo.GetOSNameAndVersion(b), + Featureflags: clientinfo.GetFeatureFlagList(b), } } @@ -385,7 +337,7 @@ func (cli *cliBouncers) newDeleteCmd() *cobra.Command { func (cli *cliBouncers) prune(duration time.Duration, force bool) error { if duration < 2*time.Minute { - if yes, err := askYesNo( + if yes, err := ask.YesNo( "The duration you provided is less than 2 minutes. "+ "This may remove active bouncers. Continue?", false); err != nil { return err @@ -408,7 +360,7 @@ func (cli *cliBouncers) prune(duration time.Duration, force bool) error { cli.listHuman(color.Output, bouncers) if !force { - if yes, err := askYesNo( + if yes, err := ask.YesNo( "You are about to PERMANENTLY remove the above bouncers from the database. "+ "These will NOT be recoverable. Continue?", false); err != nil { return err @@ -478,10 +430,10 @@ func (cli *cliBouncers) inspectHuman(out io.Writer, bouncer *ent.Bouncer) { {"Version", bouncer.Version}, {"Last Pull", lastPull}, {"Auth type", bouncer.AuthType}, - {"OS", getOSNameAndVersion(bouncer)}, + {"OS", clientinfo.GetOSNameAndVersion(bouncer)}, }) - for _, ff := range getFeatureFlagList(bouncer) { + for _, ff := range clientinfo.GetFeatureFlagList(bouncer) { t.AppendRow(table.Row{"Feature Flags", ff}) } diff --git a/cmd/crowdsec-cli/clientinfo/clientinfo.go b/cmd/crowdsec-cli/clientinfo/clientinfo.go new file mode 100644 index 00000000000..0bf1d98804f --- /dev/null +++ b/cmd/crowdsec-cli/clientinfo/clientinfo.go @@ -0,0 +1,39 @@ +package clientinfo + +import ( + "strings" +) + +type featureflagProvider interface { + GetFeatureflags() string +} + +type osProvider interface { + GetOsname() string + GetOsversion() string +} + +func GetOSNameAndVersion(o osProvider) string { + ret := o.GetOsname() + if o.GetOsversion() != "" { + if ret != "" { + ret += "/" + } + + ret += o.GetOsversion() + } + + if ret == "" { + return "?" + } + + return ret +} + +func GetFeatureFlagList(o featureflagProvider) []string { + if o.GetFeatureflags() == "" { + return nil + } + + return strings.Split(o.GetFeatureflags(), ",") +} diff --git a/cmd/crowdsec-cli/machines.go b/cmd/crowdsec-cli/machines.go index 34d0b1b9208..8b35245405f 100644 --- a/cmd/crowdsec-cli/machines.go +++ b/cmd/crowdsec-cli/machines.go @@ -19,6 +19,8 @@ import ( "github.com/spf13/cobra" "gopkg.in/yaml.v3" + "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/ask" + "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/clientinfo" "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/cstable" "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/idgen" "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/require" @@ -138,7 +140,7 @@ func (cli *cliMachines) listHuman(out io.Writer, machines ent.Machines) { hb = emoji.Warning + " " + hb } - t.AppendRow(table.Row{m.MachineId, m.IpAddress, m.UpdatedAt.Format(time.RFC3339), validated, m.Version, getOSNameAndVersion(m), m.AuthType, hb}) + t.AppendRow(table.Row{m.MachineId, m.IpAddress, m.UpdatedAt.Format(time.RFC3339), validated, m.Version, clientinfo.GetOSNameAndVersion(m), m.AuthType, hb}) } io.WriteString(out, t.Render() + "\n") @@ -171,8 +173,8 @@ func newMachineInfo(m *ent.Machine) machineInfo { Version: m.Version, IsValidated: m.IsValidated, AuthType: m.AuthType, - OS: getOSNameAndVersion(m), - Featureflags: getFeatureFlagList(m), + OS: clientinfo.GetOSNameAndVersion(m), + Featureflags: clientinfo.GetFeatureFlagList(m), Datasources: m.Datasources, } } @@ -466,7 +468,7 @@ func (cli *cliMachines) newDeleteCmd() *cobra.Command { func (cli *cliMachines) prune(duration time.Duration, notValidOnly bool, force bool) error { if duration < 2*time.Minute && !notValidOnly { - if yes, err := askYesNo( + if yes, err := ask.YesNo( "The duration you provided is less than 2 minutes. "+ "This can break installations if the machines are only temporarily disconnected. Continue?", false); err != nil { return err @@ -495,7 +497,7 @@ func (cli *cliMachines) prune(duration time.Duration, notValidOnly bool, force b cli.listHuman(color.Output, machines) if !force { - if yes, err := askYesNo( + if yes, err := ask.YesNo( "You are about to PERMANENTLY remove the above machines from the database. "+ "These will NOT be recoverable. Continue?", false); err != nil { return err @@ -588,7 +590,7 @@ func (cli *cliMachines) inspectHuman(out io.Writer, machine *ent.Machine) { {"Last Heartbeat", machine.LastHeartbeat}, {"Validated?", machine.IsValidated}, {"CrowdSec version", machine.Version}, - {"OS", getOSNameAndVersion(machine)}, + {"OS", clientinfo.GetOSNameAndVersion(machine)}, {"Auth type", machine.AuthType}, }) @@ -596,7 +598,7 @@ func (cli *cliMachines) inspectHuman(out io.Writer, machine *ent.Machine) { t.AppendRow(table.Row{"Datasources", fmt.Sprintf("%s: %d", dsName, dsCount)}) } - for _, ff := range getFeatureFlagList(machine) { + for _, ff := range clientinfo.GetFeatureFlagList(machine) { t.AppendRow(table.Row{"Feature Flags", ff}) } From b880df9a683d249c2152408fb48d99b11dd1217f Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Wed, 28 Aug 2024 11:52:25 +0200 Subject: [PATCH 268/581] cscli refact: package 'clihub' (#3198) * cscli refact: package 'clihub' * check for errors --- .golangci.yml | 2 +- cmd/crowdsec-cli/{ => clihub}/hub.go | 28 ++++++++----- cmd/crowdsec-cli/{ => clihub}/item_metrics.go | 2 +- cmd/crowdsec-cli/{ => clihub}/items.go | 8 ++-- cmd/crowdsec-cli/{ => clihub}/utils_table.go | 2 +- cmd/crowdsec-cli/itemcli.go | 7 ++-- cmd/crowdsec-cli/main.go | 3 +- cmd/crowdsec-cli/support.go | 42 +++++++++---------- 8 files changed, 50 insertions(+), 44 deletions(-) rename cmd/crowdsec-cli/{ => clihub}/hub.go (90%) rename cmd/crowdsec-cli/{ => clihub}/item_metrics.go (99%) rename cmd/crowdsec-cli/{ => clihub}/items.go (95%) rename cmd/crowdsec-cli/{ => clihub}/utils_table.go (99%) diff --git a/.golangci.yml b/.golangci.yml index 62147b54101..c59ab372799 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -482,7 +482,7 @@ issues: - linters: - revive - path: "cmd/crowdsec-cli/item_metrics.go" + path: "cmd/crowdsec-cli/clihub/item_metrics.go" text: "deep-exit: .*" - linters: diff --git a/cmd/crowdsec-cli/hub.go b/cmd/crowdsec-cli/clihub/hub.go similarity index 90% rename from cmd/crowdsec-cli/hub.go rename to cmd/crowdsec-cli/clihub/hub.go index 34bd61b1277..22568355546 100644 --- a/cmd/crowdsec-cli/hub.go +++ b/cmd/crowdsec-cli/clihub/hub.go @@ -1,9 +1,10 @@ -package main +package clihub import ( "context" "encoding/json" "fmt" + "io" "github.com/fatih/color" log "github.com/sirupsen/logrus" @@ -11,14 +12,17 @@ import ( "gopkg.in/yaml.v3" "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/require" + "github.com/crowdsecurity/crowdsec/pkg/csconfig" "github.com/crowdsecurity/crowdsec/pkg/cwhub" ) +type configGetter = func() *csconfig.Config + type cliHub struct { cfg configGetter } -func NewCLIHub(cfg configGetter) *cliHub { +func New(cfg configGetter) *cliHub { return &cliHub{ cfg: cfg, } @@ -47,14 +51,9 @@ cscli hub upgrade`, return cmd } -func (cli *cliHub) list(all bool) error { +func (cli *cliHub) List(out io.Writer, hub *cwhub.Hub, all bool) error { cfg := cli.cfg() - hub, err := require.Hub(cfg, nil, log.StandardLogger()) - if err != nil { - return err - } - for _, v := range hub.Warnings { log.Info(v) } @@ -65,14 +64,16 @@ func (cli *cliHub) list(all bool) error { items := make(map[string][]*cwhub.Item) + var err error + for _, itemType := range cwhub.ItemTypes { - items[itemType], err = selectItems(hub, itemType, nil, !all) + items[itemType], err = SelectItems(hub, itemType, nil, !all) if err != nil { return err } } - err = listItems(color.Output, cfg.Cscli.Color, cwhub.ItemTypes, items, true, cfg.Cscli.Output) + err = ListItems(out, cfg.Cscli.Color, cwhub.ItemTypes, items, true, cfg.Cscli.Output) if err != nil { return err } @@ -89,7 +90,12 @@ func (cli *cliHub) newListCmd() *cobra.Command { Args: cobra.ExactArgs(0), DisableAutoGenTag: true, RunE: func(_ *cobra.Command, _ []string) error { - return cli.list(all) + hub, err := require.Hub(cli.cfg(), nil, log.StandardLogger()) + if err != nil { + return err + } + + return cli.List(color.Output, hub, all) }, } diff --git a/cmd/crowdsec-cli/item_metrics.go b/cmd/crowdsec-cli/clihub/item_metrics.go similarity index 99% rename from cmd/crowdsec-cli/item_metrics.go rename to cmd/crowdsec-cli/clihub/item_metrics.go index f00ae08b00b..aaee63d1d38 100644 --- a/cmd/crowdsec-cli/item_metrics.go +++ b/cmd/crowdsec-cli/clihub/item_metrics.go @@ -1,4 +1,4 @@ -package main +package clihub import ( "net/http" diff --git a/cmd/crowdsec-cli/items.go b/cmd/crowdsec-cli/clihub/items.go similarity index 95% rename from cmd/crowdsec-cli/items.go rename to cmd/crowdsec-cli/clihub/items.go index 5a4fee4d582..4dd3c46e0f9 100644 --- a/cmd/crowdsec-cli/items.go +++ b/cmd/crowdsec-cli/clihub/items.go @@ -1,4 +1,4 @@ -package main +package clihub import ( "encoding/csv" @@ -16,7 +16,7 @@ import ( ) // selectItems returns a slice of items of a given type, selected by name and sorted by case-insensitive name -func selectItems(hub *cwhub.Hub, itemType string, args []string, installedOnly bool) ([]*cwhub.Item, error) { +func SelectItems(hub *cwhub.Hub, itemType string, args []string, installedOnly bool) ([]*cwhub.Item, error) { allItems := hub.GetItemsByType(itemType, true) itemNames := make([]string, len(allItems)) @@ -57,7 +57,7 @@ func selectItems(hub *cwhub.Hub, itemType string, args []string, installedOnly b return wantedItems, nil } -func listItems(out io.Writer, wantColor string, itemTypes []string, items map[string][]*cwhub.Item, omitIfEmpty bool, output string) error { +func ListItems(out io.Writer, wantColor string, itemTypes []string, items map[string][]*cwhub.Item, omitIfEmpty bool, output string) error { switch output { case "human": nothingToDisplay := true @@ -146,7 +146,7 @@ func listItems(out io.Writer, wantColor string, itemTypes []string, items map[st return nil } -func inspectItem(item *cwhub.Item, showMetrics bool, output string, prometheusURL string, wantColor string) error { +func InspectItem(item *cwhub.Item, showMetrics bool, output string, prometheusURL string, wantColor string) error { switch output { case "human", "raw": enc := yaml.NewEncoder(os.Stdout) diff --git a/cmd/crowdsec-cli/utils_table.go b/cmd/crowdsec-cli/clihub/utils_table.go similarity index 99% rename from cmd/crowdsec-cli/utils_table.go rename to cmd/crowdsec-cli/clihub/utils_table.go index 6df16cd85f5..018071d91bb 100644 --- a/cmd/crowdsec-cli/utils_table.go +++ b/cmd/crowdsec-cli/clihub/utils_table.go @@ -1,4 +1,4 @@ -package main +package clihub import ( "fmt" diff --git a/cmd/crowdsec-cli/itemcli.go b/cmd/crowdsec-cli/itemcli.go index 11ac1232eae..a5629b425b9 100644 --- a/cmd/crowdsec-cli/itemcli.go +++ b/cmd/crowdsec-cli/itemcli.go @@ -15,6 +15,7 @@ import ( log "github.com/sirupsen/logrus" "github.com/spf13/cobra" + "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/clihub" "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/reload" "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/require" "github.com/crowdsecurity/crowdsec/pkg/cwhub" @@ -372,7 +373,7 @@ func (cli cliItem) inspect(ctx context.Context, args []string, url string, diff continue } - if err = inspectItem(item, !noMetrics, cfg.Cscli.Output, cfg.Cscli.PrometheusUrl, cfg.Cscli.Color); err != nil { + if err = clihub.InspectItem(item, !noMetrics, cfg.Cscli.Output, cfg.Cscli.PrometheusUrl, cfg.Cscli.Color); err != nil { return err } @@ -428,12 +429,12 @@ func (cli cliItem) list(args []string, all bool) error { items := make(map[string][]*cwhub.Item) - items[cli.name], err = selectItems(hub, cli.name, args, !all) + items[cli.name], err = clihub.SelectItems(hub, cli.name, args, !all) if err != nil { return err } - return listItems(color.Output, cfg.Cscli.Color, []string{cli.name}, items, false, cfg.Cscli.Output) + return clihub.ListItems(color.Output, cfg.Cscli.Color, []string{cli.name}, items, false, cfg.Cscli.Output) } func (cli cliItem) newListCmd() *cobra.Command { diff --git a/cmd/crowdsec-cli/main.go b/cmd/crowdsec-cli/main.go index 12128f6091a..2153ebfb7bb 100644 --- a/cmd/crowdsec-cli/main.go +++ b/cmd/crowdsec-cli/main.go @@ -17,6 +17,7 @@ import ( "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/clicapi" "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/cliconsole" "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/cliexplain" + "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/clihub" "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/clihubtest" "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/clilapi" "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/climetrics" @@ -249,7 +250,7 @@ It is meant to allow you to manage bans, parsers/scenarios/etc, api and generall cmd.AddCommand(NewCLIDoc().NewCommand(cmd)) cmd.AddCommand(NewCLIVersion().NewCommand()) cmd.AddCommand(NewCLIConfig(cli.cfg).NewCommand()) - cmd.AddCommand(NewCLIHub(cli.cfg).NewCommand()) + cmd.AddCommand(clihub.New(cli.cfg).NewCommand()) cmd.AddCommand(climetrics.New(cli.cfg).NewCommand()) cmd.AddCommand(NewCLIDashboard(cli.cfg).NewCommand()) cmd.AddCommand(NewCLIDecisions(cli.cfg).NewCommand()) diff --git a/cmd/crowdsec-cli/support.go b/cmd/crowdsec-cli/support.go index 3ab1415f2ec..c48c84668ea 100644 --- a/cmd/crowdsec-cli/support.go +++ b/cmd/crowdsec-cli/support.go @@ -23,6 +23,7 @@ import ( "github.com/crowdsecurity/go-cs-lib/trace" "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/clicapi" + "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/clihub" "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/clilapi" "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/climetrics" "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/require" @@ -38,7 +39,7 @@ const ( SUPPORT_VERSION_PATH = "version.txt" SUPPORT_FEATURES_PATH = "features.txt" SUPPORT_OS_INFO_PATH = "osinfo.txt" - SUPPORT_HUB_DIR = "hub/" + SUPPORT_HUB = "hub.txt" SUPPORT_BOUNCERS_PATH = "lapi/bouncers.txt" SUPPORT_AGENTS_PATH = "lapi/agents.txt" SUPPORT_CROWDSEC_CONFIG_PATH = "config/crowdsec.yaml" @@ -163,26 +164,23 @@ func (cli *cliSupport) dumpOSInfo(zw *zip.Writer) error { return nil } -func (cli *cliSupport) dumpHubItems(zw *zip.Writer, hub *cwhub.Hub, itemType string) error { - var err error - - out := new(bytes.Buffer) - - log.Infof("Collecting hub: %s", itemType) +func (cli *cliSupport) dumpHubItems(zw *zip.Writer, hub *cwhub.Hub) error { + if hub == nil { + return errors.New("no hub connection") + } - items := make(map[string][]*cwhub.Item) + log.Infof("Collecting hub") - if items[itemType], err = selectItems(hub, itemType, nil, true); err != nil { - return fmt.Errorf("could not collect %s list: %w", itemType, err) - } + out := new(bytes.Buffer) - if err := listItems(out, cli.cfg().Cscli.Color, []string{itemType}, items, false, "human"); err != nil { - return fmt.Errorf("could not list %s: %w", itemType, err) + ch := clihub.New(cli.cfg) + if err := ch.List(out, hub, false); err != nil { + return err } stripped := stripAnsiString(out.String()) - cli.writeToZip(zw, SUPPORT_HUB_DIR+itemType+".txt", time.Now(), strings.NewReader(stripped)) + cli.writeToZip(zw, SUPPORT_HUB, time.Now(), strings.NewReader(stripped)) return nil } @@ -198,7 +196,9 @@ func (cli *cliSupport) dumpBouncers(zw *zip.Writer, db *database.Client) error { // call the "cscli bouncers list" command directly, skip any preRun cm := cliBouncers{db: db, cfg: cli.cfg} - cm.list(out) + if err := cm.list(out); err != nil { + return err + } stripped := stripAnsiString(out.String()) @@ -218,7 +218,9 @@ func (cli *cliSupport) dumpAgents(zw *zip.Writer, db *database.Client) error { // call the "cscli machines list" command directly, skip any preRun cm := cliMachines{db: db, cfg: cli.cfg} - cm.list(out) + if err := cm.list(out); err != nil { + return err + } stripped := stripAnsiString(out.String()) @@ -513,12 +515,8 @@ func (cli *cliSupport) dump(ctx context.Context, outFile string) error { log.Warnf("could not collect main config file: %s", err) } - if hub != nil { - for _, itemType := range cwhub.ItemTypes { - if err = cli.dumpHubItems(zipWriter, hub, itemType); err != nil { - log.Warnf("could not collect %s information: %s", itemType, err) - } - } + if err = cli.dumpHubItems(zipWriter, hub); err != nil { + log.Warnf("could not collect hub information: %s", err) } if err = cli.dumpBouncers(zipWriter, db); err != nil { From 0fb64682fd17452784b81a42f6909bd322afcacb Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Thu, 29 Aug 2024 15:21:29 +0200 Subject: [PATCH 269/581] cscli refact: package cli{support, machine, bouncer} (#3199) * cscli refact: clisupport (reuse lapi status, capi status) * cscli refact: package clibouncer, climachine * cscli refact: package clisupport --- cmd/crowdsec-cli/{ => clibouncer}/bouncers.go | 16 +++-- cmd/crowdsec-cli/clicapi/capi.go | 42 +++++++------ cmd/crowdsec-cli/clilapi/lapi.go | 41 ++++++------ cmd/crowdsec-cli/{ => climachine}/flag.go | 2 +- cmd/crowdsec-cli/{ => climachine}/machines.go | 15 +++-- cmd/crowdsec-cli/{ => clisupport}/support.go | 62 +++++++------------ cmd/crowdsec-cli/main.go | 9 ++- test/bats/01_cscli.bats | 12 ++-- test/bats/03_noagent.bats | 2 +- test/bats/04_capi.bats | 10 +-- test/bats/04_nocapi.bats | 2 +- test/bats/09_socket.bats | 12 ++-- 12 files changed, 117 insertions(+), 108 deletions(-) rename cmd/crowdsec-cli/{ => clibouncer}/bouncers.go (96%) rename cmd/crowdsec-cli/{ => climachine}/flag.go (96%) rename cmd/crowdsec-cli/{ => climachine}/machines.go (98%) rename cmd/crowdsec-cli/{ => clisupport}/support.go (90%) diff --git a/cmd/crowdsec-cli/bouncers.go b/cmd/crowdsec-cli/clibouncer/bouncers.go similarity index 96% rename from cmd/crowdsec-cli/bouncers.go rename to cmd/crowdsec-cli/clibouncer/bouncers.go index 68ce1a2fa05..0d1484bcc6b 100644 --- a/cmd/crowdsec-cli/bouncers.go +++ b/cmd/crowdsec-cli/clibouncer/bouncers.go @@ -1,4 +1,4 @@ -package main +package clibouncer import ( "encoding/csv" @@ -21,6 +21,7 @@ import ( "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/cstable" "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/require" middlewares "github.com/crowdsecurity/crowdsec/pkg/apiserver/middlewares/v1" + "github.com/crowdsecurity/crowdsec/pkg/csconfig" "github.com/crowdsecurity/crowdsec/pkg/database" "github.com/crowdsecurity/crowdsec/pkg/database/ent" "github.com/crowdsecurity/crowdsec/pkg/database/ent/bouncer" @@ -28,12 +29,14 @@ import ( "github.com/crowdsecurity/crowdsec/pkg/types" ) +type configGetter = func() *csconfig.Config + type cliBouncers struct { db *database.Client cfg configGetter } -func NewCLIBouncers(cfg configGetter) *cliBouncers { +func New(cfg configGetter) *cliBouncers { return &cliBouncers{ cfg: cfg, } @@ -156,8 +159,11 @@ func (cli *cliBouncers) listCSV(out io.Writer, bouncers ent.Bouncers) error { return nil } -func (cli *cliBouncers) list(out io.Writer) error { - bouncers, err := cli.db.ListBouncers() +func (cli *cliBouncers) List(out io.Writer, db *database.Client) error { + // XXX: must use the provided db object, the one in the struct might be nil + // (calling List directly skips the PersistentPreRunE) + + bouncers, err := db.ListBouncers() if err != nil { return fmt.Errorf("unable to list bouncers: %w", err) } @@ -194,7 +200,7 @@ func (cli *cliBouncers) newListCmd() *cobra.Command { Args: cobra.ExactArgs(0), DisableAutoGenTag: true, RunE: func(_ *cobra.Command, _ []string) error { - return cli.list(color.Output) + return cli.List(color.Output, cli.db) }, } diff --git a/cmd/crowdsec-cli/clicapi/capi.go b/cmd/crowdsec-cli/clicapi/capi.go index bf45613c776..4d658e3a602 100644 --- a/cmd/crowdsec-cli/clicapi/capi.go +++ b/cmd/crowdsec-cli/clicapi/capi.go @@ -4,9 +4,11 @@ import ( "context" "errors" "fmt" + "io" "net/url" "os" + "github.com/fatih/color" "github.com/go-openapi/strfmt" log "github.com/sirupsen/logrus" "github.com/spf13/cobra" @@ -23,7 +25,7 @@ import ( "github.com/crowdsecurity/crowdsec/pkg/types" ) -type configGetter func() *csconfig.Config +type configGetter = func() *csconfig.Config type cliCapi struct { cfg configGetter @@ -147,11 +149,11 @@ func (cli *cliCapi) newRegisterCmd() *cobra.Command { return cmd } -// QueryCAPIStatus checks if the Local API is reachable, and if the credentials are correct. It then checks if the instance is enrolle in the console. -func QueryCAPIStatus(hub *cwhub.Hub, credURL string, login string, password string) (bool, bool, error) { +// queryCAPIStatus checks if the Central API is reachable, and if the credentials are correct. It then checks if the instance is enrolle in the console. +func queryCAPIStatus(hub *cwhub.Hub, credURL string, login string, password string) (bool, bool, error) { apiURL, err := url.Parse(credURL) if err != nil { - return false, false, fmt.Errorf("parsing api url: %w", err) + return false, false, err } itemsForAPI := hub.GetInstalledListForAPI() @@ -176,7 +178,7 @@ func QueryCAPIStatus(hub *cwhub.Hub, credURL string, login string, password stri }, }) if err != nil { - return false, false, fmt.Errorf("new client api: %w", err) + return false, false, err } pw := strfmt.Password(password) @@ -197,10 +199,11 @@ func QueryCAPIStatus(hub *cwhub.Hub, credURL string, login string, password stri if client.IsEnrolled() { return true, true, nil } + return true, false, nil } -func (cli *cliCapi) status() error { +func (cli *cliCapi) Status(out io.Writer, hub *cwhub.Hub) error { cfg := cli.cfg() if err := require.CAPIRegistered(cfg); err != nil { @@ -209,24 +212,22 @@ func (cli *cliCapi) status() error { cred := cfg.API.Server.OnlineClient.Credentials - hub, err := require.Hub(cfg, nil, nil) - if err != nil { - return err - } - - log.Infof("Loaded credentials from %s", cfg.API.Server.OnlineClient.CredentialsFilePath) - log.Infof("Trying to authenticate with username %s on %s", cred.Login, cred.URL) + fmt.Fprintf(out, "Loaded credentials from %s\n", cfg.API.Server.OnlineClient.CredentialsFilePath) + fmt.Fprintf(out, "Trying to authenticate with username %s on %s\n", cred.Login, cred.URL) - auth, enrolled, err := QueryCAPIStatus(hub, cred.URL, cred.Login, cred.Password) + auth, enrolled, err := queryCAPIStatus(hub, cred.URL, cred.Login, cred.Password) if err != nil { - return fmt.Errorf("CAPI: failed to authenticate to Central API (CAPI): %s", err) + return fmt.Errorf("failed to authenticate to Central API (CAPI): %w", err) } + if auth { - log.Info("You can successfully interact with Central API (CAPI)") + fmt.Fprint(out, "You can successfully interact with Central API (CAPI)\n") } + if enrolled { - log.Info("Your instance is enrolled in the console") + fmt.Fprint(out, "Your instance is enrolled in the console\n") } + return nil } @@ -237,7 +238,12 @@ func (cli *cliCapi) newStatusCmd() *cobra.Command { Args: cobra.MinimumNArgs(0), DisableAutoGenTag: true, RunE: func(_ *cobra.Command, _ []string) error { - return cli.status() + hub, err := require.Hub(cli.cfg(), nil, nil) + if err != nil { + return err + } + + return cli.Status(color.Output, hub) }, } diff --git a/cmd/crowdsec-cli/clilapi/lapi.go b/cmd/crowdsec-cli/clilapi/lapi.go index ec66daf16a4..2de962d896d 100644 --- a/cmd/crowdsec-cli/clilapi/lapi.go +++ b/cmd/crowdsec-cli/clilapi/lapi.go @@ -4,12 +4,14 @@ import ( "context" "errors" "fmt" + "io" "net/url" "os" "slices" "sort" "strings" + "github.com/fatih/color" "github.com/go-openapi/strfmt" log "github.com/sirupsen/logrus" "github.com/spf13/cobra" @@ -30,10 +32,10 @@ import ( const LAPIURLPrefix = "v1" -type configGetter func() *csconfig.Config +type configGetter = func() *csconfig.Config type cliLapi struct { - cfg configGetter + cfg configGetter } func New(cfg configGetter) *cliLapi { @@ -42,11 +44,11 @@ func New(cfg configGetter) *cliLapi { } } -// QueryLAPIStatus checks if the Local API is reachable, and if the credentials are correct -func QueryLAPIStatus(hub *cwhub.Hub, credURL string, login string, password string) error { +// queryLAPIStatus checks if the Local API is reachable, and if the credentials are correct. +func queryLAPIStatus(hub *cwhub.Hub, credURL string, login string, password string) (bool, error) { apiURL, err := url.Parse(credURL) if err != nil { - return fmt.Errorf("parsing api url: %w", err) + return false, err } client, err := apiclient.NewDefaultClient(apiURL, @@ -54,7 +56,7 @@ func QueryLAPIStatus(hub *cwhub.Hub, credURL string, login string, password stri cwversion.UserAgent(), nil) if err != nil { - return fmt.Errorf("init default client: %w", err) + return false, err } pw := strfmt.Password(password) @@ -69,30 +71,26 @@ func QueryLAPIStatus(hub *cwhub.Hub, credURL string, login string, password stri _, _, err = client.Auth.AuthenticateWatcher(context.Background(), t) if err != nil { - return err + return false, err } - return nil + return true, nil } -func (cli *cliLapi) status() error { +func (cli *cliLapi) Status(out io.Writer, hub *cwhub.Hub) error { cfg := cli.cfg() cred := cfg.API.Client.Credentials - hub, err := require.Hub(cfg, nil, nil) - if err != nil { - return err - } - - log.Infof("Loaded credentials from %s", cfg.API.Client.CredentialsFilePath) - log.Infof("Trying to authenticate with username %s on %s", cred.Login, cred.URL) + fmt.Fprintf(out, "Loaded credentials from %s\n", cfg.API.Client.CredentialsFilePath) + fmt.Fprintf(out, "Trying to authenticate with username %s on %s\n", cred.Login, cred.URL) - if err := QueryLAPIStatus(hub, cred.URL, cred.Login, cred.Password); err != nil { + _, err := queryLAPIStatus(hub, cred.URL, cred.Login, cred.Password) + if err != nil { return fmt.Errorf("failed to authenticate to Local API (LAPI): %w", err) } - log.Infof("You can successfully interact with Local API (LAPI)") + fmt.Fprintf(out, "You can successfully interact with Local API (LAPI)\n") return nil } @@ -197,7 +195,12 @@ func (cli *cliLapi) newStatusCmd() *cobra.Command { Args: cobra.MinimumNArgs(0), DisableAutoGenTag: true, RunE: func(_ *cobra.Command, _ []string) error { - return cli.status() + hub, err := require.Hub(cli.cfg(), nil, nil) + if err != nil { + return err + } + + return cli.Status(color.Output, hub) }, } diff --git a/cmd/crowdsec-cli/flag.go b/cmd/crowdsec-cli/climachine/flag.go similarity index 96% rename from cmd/crowdsec-cli/flag.go rename to cmd/crowdsec-cli/climachine/flag.go index 1780d08e5f7..c3fefd896e1 100644 --- a/cmd/crowdsec-cli/flag.go +++ b/cmd/crowdsec-cli/climachine/flag.go @@ -1,4 +1,4 @@ -package main +package climachine // Custom types for flag validation and conversion. diff --git a/cmd/crowdsec-cli/machines.go b/cmd/crowdsec-cli/climachine/machines.go similarity index 98% rename from cmd/crowdsec-cli/machines.go rename to cmd/crowdsec-cli/climachine/machines.go index 8b35245405f..bf8656105aa 100644 --- a/cmd/crowdsec-cli/machines.go +++ b/cmd/crowdsec-cli/climachine/machines.go @@ -1,4 +1,4 @@ -package main +package climachine import ( "encoding/csv" @@ -49,12 +49,14 @@ func getLastHeartbeat(m *ent.Machine) (string, bool) { return hb, true } +type configGetter = func() *csconfig.Config + type cliMachines struct { db *database.Client cfg configGetter } -func NewCLIMachines(cfg configGetter) *cliMachines { +func New(cfg configGetter) *cliMachines { return &cliMachines{ cfg: cfg, } @@ -208,8 +210,11 @@ func (cli *cliMachines) listCSV(out io.Writer, machines ent.Machines) error { return nil } -func (cli *cliMachines) list(out io.Writer) error { - machines, err := cli.db.ListMachines() +func (cli *cliMachines) List(out io.Writer, db *database.Client) error { + // XXX: must use the provided db object, the one in the struct might be nil + // (calling List directly skips the PersistentPreRunE) + + machines, err := db.ListMachines() if err != nil { return fmt.Errorf("unable to list machines: %w", err) } @@ -247,7 +252,7 @@ func (cli *cliMachines) newListCmd() *cobra.Command { Args: cobra.NoArgs, DisableAutoGenTag: true, RunE: func(_ *cobra.Command, _ []string) error { - return cli.list(color.Output) + return cli.List(color.Output, cli.db) }, } diff --git a/cmd/crowdsec-cli/support.go b/cmd/crowdsec-cli/clisupport/support.go similarity index 90% rename from cmd/crowdsec-cli/support.go rename to cmd/crowdsec-cli/clisupport/support.go index c48c84668ea..55f0ec4b03e 100644 --- a/cmd/crowdsec-cli/support.go +++ b/cmd/crowdsec-cli/clisupport/support.go @@ -1,4 +1,4 @@ -package main +package clisupport import ( "archive/zip" @@ -22,9 +22,11 @@ import ( "github.com/crowdsecurity/go-cs-lib/trace" + "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/clibouncer" "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/clicapi" "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/clihub" "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/clilapi" + "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/climachine" "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/climetrics" "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/require" "github.com/crowdsecurity/crowdsec/pkg/csconfig" @@ -165,15 +167,15 @@ func (cli *cliSupport) dumpOSInfo(zw *zip.Writer) error { } func (cli *cliSupport) dumpHubItems(zw *zip.Writer, hub *cwhub.Hub) error { + log.Infof("Collecting hub") + if hub == nil { return errors.New("no hub connection") } - log.Infof("Collecting hub") - out := new(bytes.Buffer) - ch := clihub.New(cli.cfg) + if err := ch.List(out, hub, false); err != nil { return err } @@ -193,10 +195,9 @@ func (cli *cliSupport) dumpBouncers(zw *zip.Writer, db *database.Client) error { } out := new(bytes.Buffer) + cm := clibouncer.New(cli.cfg) - // call the "cscli bouncers list" command directly, skip any preRun - cm := cliBouncers{db: db, cfg: cli.cfg} - if err := cm.list(out); err != nil { + if err := cm.List(out, db); err != nil { return err } @@ -215,10 +216,9 @@ func (cli *cliSupport) dumpAgents(zw *zip.Writer, db *database.Client) error { } out := new(bytes.Buffer) + cm := climachine.New(cli.cfg) - // call the "cscli machines list" command directly, skip any preRun - cm := cliMachines{db: db, cfg: cli.cfg} - if err := cm.list(out); err != nil { + if err := cm.List(out, db); err != nil { return err } @@ -232,22 +232,17 @@ func (cli *cliSupport) dumpAgents(zw *zip.Writer, db *database.Client) error { func (cli *cliSupport) dumpLAPIStatus(zw *zip.Writer, hub *cwhub.Hub) error { log.Info("Collecting LAPI status") - cfg := cli.cfg() - cred := cfg.API.Client.Credentials - out := new(bytes.Buffer) + cl := clilapi.New(cli.cfg) - fmt.Fprintf(out, "LAPI credentials file: %s\n", cfg.API.Client.CredentialsFilePath) - fmt.Fprintf(out, "LAPI URL: %s\n", cred.URL) - fmt.Fprintf(out, "LAPI username: %s\n", cred.Login) - - if err := clilapi.QueryLAPIStatus(hub, cred.URL, cred.Login, cred.Password); err != nil { - return fmt.Errorf("could not authenticate to Local API (LAPI): %w", err) + err := cl.Status(out, hub) + if err != nil { + fmt.Fprintf(out, "%s\n", err) } - fmt.Fprintln(out, "You can successfully interact with Local API (LAPI)") + stripped := stripAnsiString(out.String()) - cli.writeToZip(zw, SUPPORT_LAPI_STATUS_PATH, time.Now(), out) + cli.writeToZip(zw, SUPPORT_LAPI_STATUS_PATH, time.Now(), strings.NewReader(stripped)) return nil } @@ -255,28 +250,17 @@ func (cli *cliSupport) dumpLAPIStatus(zw *zip.Writer, hub *cwhub.Hub) error { func (cli *cliSupport) dumpCAPIStatus(zw *zip.Writer, hub *cwhub.Hub) error { log.Info("Collecting CAPI status") - cfg := cli.cfg() - cred := cfg.API.Server.OnlineClient.Credentials - out := new(bytes.Buffer) + cc := clicapi.New(cli.cfg) - fmt.Fprintf(out, "CAPI credentials file: %s\n", cfg.API.Server.OnlineClient.CredentialsFilePath) - fmt.Fprintf(out, "CAPI URL: %s\n", cred.URL) - fmt.Fprintf(out, "CAPI username: %s\n", cred.Login) - - auth, enrolled, err := clicapi.QueryCAPIStatus(hub, cred.URL, cred.Login, cred.Password) + err := cc.Status(out, hub) if err != nil { - return fmt.Errorf("could not authenticate to Central API (CAPI): %w", err) - } - if auth { - fmt.Fprintln(out, "You can successfully interact with Central API (CAPI)") + fmt.Fprintf(out, "%s\n", err) } - if enrolled { - fmt.Fprintln(out, "Your instance is enrolled in the console") - } + stripped := stripAnsiString(out.String()) - cli.writeToZip(zw, SUPPORT_CAPI_STATUS_PATH, time.Now(), out) + cli.writeToZip(zw, SUPPORT_CAPI_STATUS_PATH, time.Now(), strings.NewReader(stripped)) return nil } @@ -389,11 +373,13 @@ func (cli *cliSupport) dumpCrash(zw *zip.Writer) error { return nil } +type configGetter func() *csconfig.Config + type cliSupport struct { cfg configGetter } -func NewCLISupport(cfg configGetter) *cliSupport { +func New(cfg configGetter) *cliSupport { return &cliSupport{ cfg: cfg, } diff --git a/cmd/crowdsec-cli/main.go b/cmd/crowdsec-cli/main.go index 2153ebfb7bb..2a1f5ac7ebe 100644 --- a/cmd/crowdsec-cli/main.go +++ b/cmd/crowdsec-cli/main.go @@ -14,17 +14,20 @@ import ( "github.com/crowdsecurity/go-cs-lib/trace" + "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/clibouncer" "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/clicapi" "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/cliconsole" "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/cliexplain" "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/clihub" "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/clihubtest" "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/clilapi" + "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/climachine" "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/climetrics" "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/clinotifications" "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/clipapi" "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/clisetup" "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/clisimulation" + "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/clisupport" "github.com/crowdsecurity/crowdsec/pkg/csconfig" "github.com/crowdsecurity/crowdsec/pkg/fflag" ) @@ -256,8 +259,8 @@ It is meant to allow you to manage bans, parsers/scenarios/etc, api and generall cmd.AddCommand(NewCLIDecisions(cli.cfg).NewCommand()) cmd.AddCommand(NewCLIAlerts(cli.cfg).NewCommand()) cmd.AddCommand(clisimulation.New(cli.cfg).NewCommand()) - cmd.AddCommand(NewCLIBouncers(cli.cfg).NewCommand()) - cmd.AddCommand(NewCLIMachines(cli.cfg).NewCommand()) + cmd.AddCommand(clibouncer.New(cli.cfg).NewCommand()) + cmd.AddCommand(climachine.New(cli.cfg).NewCommand()) cmd.AddCommand(clicapi.New(cli.cfg).NewCommand()) cmd.AddCommand(clilapi.New(cli.cfg).NewCommand()) cmd.AddCommand(NewCompletionCmd()) @@ -265,7 +268,7 @@ It is meant to allow you to manage bans, parsers/scenarios/etc, api and generall cmd.AddCommand(cliexplain.New(cli.cfg, ConfigFilePath).NewCommand()) cmd.AddCommand(clihubtest.New(cli.cfg).NewCommand()) cmd.AddCommand(clinotifications.New(cli.cfg).NewCommand()) - cmd.AddCommand(NewCLISupport(cli.cfg).NewCommand()) + cmd.AddCommand(clisupport.New(cli.cfg).NewCommand()) cmd.AddCommand(clipapi.New(cli.cfg).NewCommand()) cmd.AddCommand(NewCLICollection(cli.cfg).NewCommand()) cmd.AddCommand(NewCLIParser(cli.cfg).NewCommand()) diff --git a/test/bats/01_cscli.bats b/test/bats/01_cscli.bats index 27cfe53212b..bda2362c02a 100644 --- a/test/bats/01_cscli.bats +++ b/test/bats/01_cscli.bats @@ -213,9 +213,9 @@ teardown() { rune -0 ./instance-crowdsec start rune -0 cscli lapi status - assert_stderr --partial "Loaded credentials from" - assert_stderr --partial "Trying to authenticate with username" - assert_stderr --partial "You can successfully interact with Local API (LAPI)" + assert_output --partial "Loaded credentials from" + assert_output --partial "Trying to authenticate with username" + assert_output --partial "You can successfully interact with Local API (LAPI)" } @test "cscli - missing LAPI credentials file" { @@ -260,7 +260,7 @@ teardown() { rune -0 ./instance-crowdsec start rune -0 cscli lapi status - assert_stderr --partial "You can successfully interact with Local API (LAPI)" + assert_output --partial "You can successfully interact with Local API (LAPI)" rm "$LOCAL_API_CREDENTIALS".local @@ -272,7 +272,7 @@ teardown() { config_set "$LOCAL_API_CREDENTIALS" '.password="$PASSWORD"' rune -0 cscli lapi status - assert_stderr --partial "You can successfully interact with Local API (LAPI)" + assert_output --partial "You can successfully interact with Local API (LAPI)" # but if a variable is not defined, there is no specific error message unset URL @@ -299,7 +299,7 @@ teardown() { rune -1 cscli lapi status -o json rune -0 jq -r '.msg' <(stderr) - assert_output 'failed to authenticate to Local API (LAPI): parsing api url: parse "http://127.0.0.1:-80/": invalid port ":-80" after host' + assert_output 'failed to authenticate to Local API (LAPI): parse "http://127.0.0.1:-80/": invalid port ":-80" after host' } @test "cscli - bad LAPI password" { diff --git a/test/bats/03_noagent.bats b/test/bats/03_noagent.bats index 60731b90713..6be5101cee2 100644 --- a/test/bats/03_noagent.bats +++ b/test/bats/03_noagent.bats @@ -76,7 +76,7 @@ teardown() { config_disable_agent ./instance-crowdsec start rune -0 cscli lapi status - assert_stderr --partial "You can successfully interact with Local API (LAPI)" + assert_output --partial "You can successfully interact with Local API (LAPI)" } @test "cscli metrics" { diff --git a/test/bats/04_capi.bats b/test/bats/04_capi.bats index 830d0668cbb..f17ce376d62 100644 --- a/test/bats/04_capi.bats +++ b/test/bats/04_capi.bats @@ -55,10 +55,10 @@ setup() { rune -0 cscli scenarios install crowdsecurity/ssh-bf rune -0 cscli capi status - assert_stderr --partial "Loaded credentials from" - assert_stderr --partial "Trying to authenticate with username" - assert_stderr --partial " on https://api.crowdsec.net/" - assert_stderr --partial "You can successfully interact with Central API (CAPI)" + assert_output --partial "Loaded credentials from" + assert_output --partial "Trying to authenticate with username" + assert_output --partial " on https://api.crowdsec.net/" + assert_output --partial "You can successfully interact with Central API (CAPI)" } @test "cscli alerts list: receive a community pull when capi is enabled" { @@ -85,7 +85,7 @@ setup() { config_disable_agent ./instance-crowdsec start rune -0 cscli capi status - assert_stderr --partial "You can successfully interact with Central API (CAPI)" + assert_output --partial "You can successfully interact with Central API (CAPI)" } @test "capi register must be run from lapi" { diff --git a/test/bats/04_nocapi.bats b/test/bats/04_nocapi.bats index c02a75810b9..d22a6f0a953 100644 --- a/test/bats/04_nocapi.bats +++ b/test/bats/04_nocapi.bats @@ -66,7 +66,7 @@ teardown() { config_disable_capi ./instance-crowdsec start rune -0 cscli lapi status - assert_stderr --partial "You can successfully interact with Local API (LAPI)" + assert_output --partial "You can successfully interact with Local API (LAPI)" } @test "cscli metrics" { diff --git a/test/bats/09_socket.bats b/test/bats/09_socket.bats index f770abaad2e..f861d8a40dc 100644 --- a/test/bats/09_socket.bats +++ b/test/bats/09_socket.bats @@ -37,22 +37,22 @@ teardown() { ./instance-crowdsec start rune -0 cscli lapi status - assert_stderr --regexp "Trying to authenticate with username .* on $socket" - assert_stderr --partial "You can successfully interact with Local API (LAPI)" + assert_output --regexp "Trying to authenticate with username .* on $socket" + assert_output --partial "You can successfully interact with Local API (LAPI)" } @test "crowdsec - listen on both socket and TCP" { ./instance-crowdsec start rune -0 cscli lapi status - assert_stderr --regexp "Trying to authenticate with username .* on http://127.0.0.1:8080/" - assert_stderr --partial "You can successfully interact with Local API (LAPI)" + assert_output --regexp "Trying to authenticate with username .* on http://127.0.0.1:8080/" + assert_output --partial "You can successfully interact with Local API (LAPI)" config_set "$LOCAL_API_CREDENTIALS" ".url=strenv(socket)" rune -0 cscli lapi status - assert_stderr --regexp "Trying to authenticate with username .* on $socket" - assert_stderr --partial "You can successfully interact with Local API (LAPI)" + assert_output --regexp "Trying to authenticate with username .* on $socket" + assert_output --partial "You can successfully interact with Local API (LAPI)" } @test "cscli - authenticate new machine with socket" { From 8c0c10cd7ab5f6c0871e13f455d3e26a0148d243 Mon Sep 17 00:00:00 2001 From: Laurence Jones Date: Mon, 2 Sep 2024 09:11:59 +0100 Subject: [PATCH 270/581] enhance: return an error if cscli dashboard is run within a container (#3207) --- cmd/crowdsec-cli/dashboard.go | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/cmd/crowdsec-cli/dashboard.go b/cmd/crowdsec-cli/dashboard.go index eb4a2a5956a..31efd074bf9 100644 --- a/cmd/crowdsec-cli/dashboard.go +++ b/cmd/crowdsec-cli/dashboard.go @@ -23,6 +23,7 @@ import ( "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/idgen" "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/require" "github.com/crowdsecurity/crowdsec/pkg/metabase" + "github.com/crowdsecurity/go-cs-lib/version" ) var ( @@ -103,6 +104,10 @@ cscli dashboard remove log.Warn("cscli dashboard will be deprecated in version 1.7.0, read more at https://docs.crowdsec.net/blog/cscli_dashboard_deprecation/") + if version.System == "docker" { + return errors.New("cscli dashboard is not supported whilst running CrowdSec within a container please see: https://github.com/crowdsecurity/example-docker-compose/tree/main/basic") + } + return nil }, } From d2616766deba0cf9c4a67c7e634b1226db86f2ed Mon Sep 17 00:00:00 2001 From: blotus Date: Mon, 2 Sep 2024 13:13:40 +0200 Subject: [PATCH 271/581] Allow auto registration of machines in LAPI (#3202) Co-authored-by: marco --- cmd/crowdsec-cli/clilapi/lapi.go | 27 +-- pkg/apiclient/client.go | 2 +- pkg/apiclient/config.go | 17 +- pkg/apiserver/alerts_test.go | 2 +- pkg/apiserver/apiserver.go | 3 +- pkg/apiserver/apiserver_test.go | 27 ++- pkg/apiserver/controllers/controller.go | 2 + pkg/apiserver/controllers/v1/controller.go | 17 +- pkg/apiserver/controllers/v1/machines.go | 54 +++++- pkg/apiserver/jwt_test.go | 2 +- pkg/apiserver/machines_test.go | 102 +++++++++- pkg/csconfig/api.go | 108 ++++++++--- pkg/csconfig/api_test.go | 6 + pkg/models/localapi_swagger.yaml | 7 + pkg/models/watcher_registration_request.go | 25 +++ test/bats/01_cscli.bats | 103 ---------- test/bats/01_cscli_lapi.bats | 213 +++++++++++++++++++++ 17 files changed, 546 insertions(+), 171 deletions(-) create mode 100644 test/bats/01_cscli_lapi.bats diff --git a/cmd/crowdsec-cli/clilapi/lapi.go b/cmd/crowdsec-cli/clilapi/lapi.go index 2de962d896d..a6b88101cbf 100644 --- a/cmd/crowdsec-cli/clilapi/lapi.go +++ b/cmd/crowdsec-cli/clilapi/lapi.go @@ -95,7 +95,7 @@ func (cli *cliLapi) Status(out io.Writer, hub *cwhub.Hub) error { return nil } -func (cli *cliLapi) register(apiURL string, outputFile string, machine string) error { +func (cli *cliLapi) register(apiURL string, outputFile string, machine string, token string) error { var err error lapiUser := machine @@ -116,11 +116,12 @@ func (cli *cliLapi) register(apiURL string, outputFile string, machine string) e } _, err = apiclient.RegisterClient(&apiclient.Config{ - MachineID: lapiUser, - Password: password, - UserAgent: cwversion.UserAgent(), - URL: apiurl, - VersionPrefix: LAPIURLPrefix, + MachineID: lapiUser, + Password: password, + UserAgent: cwversion.UserAgent(), + RegistrationToken: token, + URL: apiurl, + VersionPrefix: LAPIURLPrefix, }, nil) if err != nil { return fmt.Errorf("api client register: %w", err) @@ -138,10 +139,12 @@ func (cli *cliLapi) register(apiURL string, outputFile string, machine string) e dumpFile = "" } - apiCfg := csconfig.ApiCredentialsCfg{ - Login: lapiUser, - Password: password.String(), - URL: apiURL, + apiCfg := cfg.API.Client.Credentials + apiCfg.Login = lapiUser + apiCfg.Password = password.String() + + if apiURL != "" { + apiCfg.URL = apiURL } apiConfigDump, err := yaml.Marshal(apiCfg) @@ -212,6 +215,7 @@ func (cli *cliLapi) newRegisterCmd() *cobra.Command { apiURL string outputFile string machine string + token string ) cmd := &cobra.Command{ @@ -222,7 +226,7 @@ Keep in mind the machine needs to be validated by an administrator on LAPI side Args: cobra.MinimumNArgs(0), DisableAutoGenTag: true, RunE: func(_ *cobra.Command, _ []string) error { - return cli.register(apiURL, outputFile, machine) + return cli.register(apiURL, outputFile, machine, token) }, } @@ -230,6 +234,7 @@ Keep in mind the machine needs to be validated by an administrator on LAPI side flags.StringVarP(&apiURL, "url", "u", "", "URL of the API (ie. http://127.0.0.1)") flags.StringVarP(&outputFile, "file", "f", "", "output file destination") flags.StringVar(&machine, "machine", "", "Name of the machine to register with") + flags.StringVar(&token, "token", "", "Auto registration token to use") return cmd } diff --git a/pkg/apiclient/client.go b/pkg/apiclient/client.go index 3abd42cf009..5669fd24786 100644 --- a/pkg/apiclient/client.go +++ b/pkg/apiclient/client.go @@ -177,7 +177,7 @@ func RegisterClient(config *Config, client *http.Client) (*ApiClient, error) { c.Alerts = (*AlertsService)(&c.common) c.Auth = (*AuthService)(&c.common) - resp, err := c.Auth.RegisterWatcher(context.Background(), models.WatcherRegistrationRequest{MachineID: &config.MachineID, Password: &config.Password}) + resp, err := c.Auth.RegisterWatcher(context.Background(), models.WatcherRegistrationRequest{MachineID: &config.MachineID, Password: &config.Password, RegistrationToken: config.RegistrationToken}) /*if we have http status, return it*/ if err != nil { if resp != nil && resp.Response != nil { diff --git a/pkg/apiclient/config.go b/pkg/apiclient/config.go index 4dfeb3e863f..b08452e74e0 100644 --- a/pkg/apiclient/config.go +++ b/pkg/apiclient/config.go @@ -7,12 +7,13 @@ import ( ) type Config struct { - MachineID string - Password strfmt.Password - Scenarios []string - URL *url.URL - PapiURL *url.URL - VersionPrefix string - UserAgent string - UpdateScenario func() ([]string, error) + MachineID string + Password strfmt.Password + Scenarios []string + URL *url.URL + PapiURL *url.URL + VersionPrefix string + UserAgent string + RegistrationToken string + UpdateScenario func() ([]string, error) } diff --git a/pkg/apiserver/alerts_test.go b/pkg/apiserver/alerts_test.go index 812e33ae13b..891eb3a8f4a 100644 --- a/pkg/apiserver/alerts_test.go +++ b/pkg/apiserver/alerts_test.go @@ -71,7 +71,7 @@ func InitMachineTest(t *testing.T) (*gin.Engine, models.WatcherAuthResponse, csc } func LoginToTestAPI(t *testing.T, router *gin.Engine, config csconfig.Config) models.WatcherAuthResponse { - body := CreateTestMachine(t, router) + body := CreateTestMachine(t, router, "") ValidateMachine(t, "test", config.API.Server.DbConfig) w := httptest.NewRecorder() diff --git a/pkg/apiserver/apiserver.go b/pkg/apiserver/apiserver.go index bd0b5d39bf4..31b31bcb82d 100644 --- a/pkg/apiserver/apiserver.go +++ b/pkg/apiserver/apiserver.go @@ -21,7 +21,7 @@ import ( "github.com/crowdsecurity/go-cs-lib/trace" "github.com/crowdsecurity/crowdsec/pkg/apiserver/controllers" - "github.com/crowdsecurity/crowdsec/pkg/apiserver/middlewares/v1" + v1 "github.com/crowdsecurity/crowdsec/pkg/apiserver/middlewares/v1" "github.com/crowdsecurity/crowdsec/pkg/csconfig" "github.com/crowdsecurity/crowdsec/pkg/csplugin" "github.com/crowdsecurity/crowdsec/pkg/database" @@ -235,6 +235,7 @@ func NewServer(config *csconfig.LocalApiServerCfg) (*APIServer, error) { Log: clog, ConsoleConfig: config.ConsoleConfig, DisableRemoteLapiRegistration: config.DisableRemoteLapiRegistration, + AutoRegisterCfg: config.AutoRegister, } var ( diff --git a/pkg/apiserver/apiserver_test.go b/pkg/apiserver/apiserver_test.go index b3f619f39c1..f48791ebcb8 100644 --- a/pkg/apiserver/apiserver_test.go +++ b/pkg/apiserver/apiserver_test.go @@ -29,10 +29,15 @@ import ( "github.com/crowdsecurity/crowdsec/pkg/types" ) +const ( + validRegistrationToken = "igheethauCaeteSaiyee3LosohPhahze" + invalidRegistrationToken = "vohl1feibechieG5coh8musheish2auj" +) + var ( testMachineID = "test" testPassword = strfmt.Password("test") - MachineTest = models.WatcherAuthRequest{ + MachineTest = models.WatcherRegistrationRequest{ MachineID: &testMachineID, Password: &testPassword, } @@ -65,6 +70,14 @@ func LoadTestConfig(t *testing.T) csconfig.Config { ShareTaintedScenarios: new(bool), ShareCustomScenarios: new(bool), }, + AutoRegister: &csconfig.LocalAPIAutoRegisterCfg{ + Enable: ptr.Of(true), + Token: validRegistrationToken, + AllowedRanges: []string{ + "127.0.0.1/8", + "::1/128", + }, + }, } apiConfig := csconfig.APICfg{ @@ -75,6 +88,9 @@ func LoadTestConfig(t *testing.T) csconfig.Config { err := config.API.Server.LoadProfiles() require.NoError(t, err) + err = config.API.Server.LoadAutoRegister() + require.NoError(t, err) + return config } @@ -113,6 +129,9 @@ func LoadTestConfigForwardedFor(t *testing.T) csconfig.Config { err := config.API.Server.LoadProfiles() require.NoError(t, err) + err = config.API.Server.LoadAutoRegister() + require.NoError(t, err) + return config } @@ -251,8 +270,10 @@ func readDecisionsStreamResp(t *testing.T, resp *httptest.ResponseRecorder) (map return response, resp.Code } -func CreateTestMachine(t *testing.T, router *gin.Engine) string { - b, err := json.Marshal(MachineTest) +func CreateTestMachine(t *testing.T, router *gin.Engine, token string) string { + regReq := MachineTest + regReq.RegistrationToken = token + b, err := json.Marshal(regReq) require.NoError(t, err) body := string(b) diff --git a/pkg/apiserver/controllers/controller.go b/pkg/apiserver/controllers/controller.go index 8175f431384..29f02723b70 100644 --- a/pkg/apiserver/controllers/controller.go +++ b/pkg/apiserver/controllers/controller.go @@ -29,6 +29,7 @@ type Controller struct { ConsoleConfig *csconfig.ConsoleConfig TrustedIPs []net.IPNet HandlerV1 *v1.Controller + AutoRegisterCfg *csconfig.LocalAPIAutoRegisterCfg DisableRemoteLapiRegistration bool } @@ -89,6 +90,7 @@ func (c *Controller) NewV1() error { PluginChannel: c.PluginChannel, ConsoleConfig: *c.ConsoleConfig, TrustedIPs: c.TrustedIPs, + AutoRegisterCfg: c.AutoRegisterCfg, } c.HandlerV1, err = v1.New(&v1Config) diff --git a/pkg/apiserver/controllers/v1/controller.go b/pkg/apiserver/controllers/v1/controller.go index ad76ad76616..6de4abe3b3b 100644 --- a/pkg/apiserver/controllers/v1/controller.go +++ b/pkg/apiserver/controllers/v1/controller.go @@ -23,9 +23,10 @@ type Controller struct { AlertsAddChan chan []*models.Alert DecisionDeleteChan chan []*models.Decision - PluginChannel chan csplugin.ProfileAlert - ConsoleConfig csconfig.ConsoleConfig - TrustedIPs []net.IPNet + PluginChannel chan csplugin.ProfileAlert + ConsoleConfig csconfig.ConsoleConfig + TrustedIPs []net.IPNet + AutoRegisterCfg *csconfig.LocalAPIAutoRegisterCfg } type ControllerV1Config struct { @@ -36,9 +37,10 @@ type ControllerV1Config struct { AlertsAddChan chan []*models.Alert DecisionDeleteChan chan []*models.Decision - PluginChannel chan csplugin.ProfileAlert - ConsoleConfig csconfig.ConsoleConfig - TrustedIPs []net.IPNet + PluginChannel chan csplugin.ProfileAlert + ConsoleConfig csconfig.ConsoleConfig + TrustedIPs []net.IPNet + AutoRegisterCfg *csconfig.LocalAPIAutoRegisterCfg } func New(cfg *ControllerV1Config) (*Controller, error) { @@ -59,9 +61,10 @@ func New(cfg *ControllerV1Config) (*Controller, error) { PluginChannel: cfg.PluginChannel, ConsoleConfig: cfg.ConsoleConfig, TrustedIPs: cfg.TrustedIPs, + AutoRegisterCfg: cfg.AutoRegisterCfg, } - v1.Middlewares, err = middlewares.NewMiddlewares(cfg.DbClient) + v1.Middlewares, err = middlewares.NewMiddlewares(cfg.DbClient) if err != nil { return v1, err } diff --git a/pkg/apiserver/controllers/v1/machines.go b/pkg/apiserver/controllers/v1/machines.go index 84a6ef2583c..0030f7d3b39 100644 --- a/pkg/apiserver/controllers/v1/machines.go +++ b/pkg/apiserver/controllers/v1/machines.go @@ -1,15 +1,50 @@ package v1 import ( + "errors" + "net" "net/http" "github.com/gin-gonic/gin" "github.com/go-openapi/strfmt" + log "github.com/sirupsen/logrus" "github.com/crowdsecurity/crowdsec/pkg/models" "github.com/crowdsecurity/crowdsec/pkg/types" ) +func (c *Controller) shouldAutoRegister(token string, gctx *gin.Context) (bool, error) { + if !*c.AutoRegisterCfg.Enable { + return false, nil + } + + clientIP := net.ParseIP(gctx.ClientIP()) + + // Can probaby happen if using unix socket ? + if clientIP == nil { + log.Warnf("Failed to parse client IP for watcher self registration: %s", gctx.ClientIP()) + return false, nil + } + + if token == "" || c.AutoRegisterCfg == nil { + return false, nil + } + + // Check the token + if token != c.AutoRegisterCfg.Token { + return false, errors.New("invalid token for auto registration") + } + + // Check the source IP + for _, ipRange := range c.AutoRegisterCfg.AllowedRangesParsed { + if ipRange.Contains(clientIP) { + return true, nil + } + } + + return false, errors.New("IP not in allowed range for auto registration") +} + func (c *Controller) CreateMachine(gctx *gin.Context) { var input models.WatcherRegistrationRequest @@ -19,14 +54,27 @@ func (c *Controller) CreateMachine(gctx *gin.Context) { } if err := input.Validate(strfmt.Default); err != nil { - c.HandleDBErrors(gctx, err) + gctx.JSON(http.StatusUnprocessableEntity, gin.H{"message": err.Error()}) return } - if _, err := c.DBClient.CreateMachine(input.MachineID, input.Password, gctx.ClientIP(), false, false, types.PasswordAuthType); err != nil { + autoRegister, err := c.shouldAutoRegister(input.RegistrationToken, gctx) + if err != nil { + log.WithFields(log.Fields{"ip": gctx.ClientIP(), "machine_id": *input.MachineID}).Errorf("Auto-register failed: %s", err) + gctx.JSON(http.StatusUnauthorized, gin.H{"message": err.Error()}) + + return + } + + if _, err := c.DBClient.CreateMachine(input.MachineID, input.Password, gctx.ClientIP(), autoRegister, false, types.PasswordAuthType); err != nil { c.HandleDBErrors(gctx, err) return } - gctx.Status(http.StatusCreated) + if autoRegister { + log.WithFields(log.Fields{"ip": gctx.ClientIP(), "machine_id": *input.MachineID}).Info("Auto-registered machine") + gctx.Status(http.StatusAccepted) + } else { + gctx.Status(http.StatusCreated) + } } diff --git a/pkg/apiserver/jwt_test.go b/pkg/apiserver/jwt_test.go index 58f66cfc74f..aa6e84e416b 100644 --- a/pkg/apiserver/jwt_test.go +++ b/pkg/apiserver/jwt_test.go @@ -12,7 +12,7 @@ import ( func TestLogin(t *testing.T) { router, config := NewAPITest(t) - body := CreateTestMachine(t, router) + body := CreateTestMachine(t, router, "") // Login with machine not validated yet w := httptest.NewRecorder() diff --git a/pkg/apiserver/machines_test.go b/pkg/apiserver/machines_test.go index 08efa91c6c1..041a6bee528 100644 --- a/pkg/apiserver/machines_test.go +++ b/pkg/apiserver/machines_test.go @@ -9,6 +9,8 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + + "github.com/crowdsecurity/go-cs-lib/ptr" ) func TestCreateMachine(t *testing.T) { @@ -20,7 +22,7 @@ func TestCreateMachine(t *testing.T) { req.Header.Add("User-Agent", UserAgent) router.ServeHTTP(w, req) - assert.Equal(t, 400, w.Code) + assert.Equal(t, http.StatusBadRequest, w.Code) assert.Equal(t, `{"message":"invalid character 'e' in literal true (expecting 'r')"}`, w.Body.String()) // Create machine with invalid input @@ -29,7 +31,7 @@ func TestCreateMachine(t *testing.T) { req.Header.Add("User-Agent", UserAgent) router.ServeHTTP(w, req) - assert.Equal(t, 500, w.Code) + assert.Equal(t, http.StatusUnprocessableEntity, w.Code) assert.Equal(t, `{"message":"validation failure list:\nmachine_id in body is required\npassword in body is required"}`, w.Body.String()) // Create machine @@ -43,7 +45,7 @@ func TestCreateMachine(t *testing.T) { req.Header.Add("User-Agent", UserAgent) router.ServeHTTP(w, req) - assert.Equal(t, 201, w.Code) + assert.Equal(t, http.StatusCreated, w.Code) assert.Equal(t, "", w.Body.String()) } @@ -62,7 +64,7 @@ func TestCreateMachineWithForwardedFor(t *testing.T) { req.Header.Add("X-Real-Ip", "1.1.1.1") router.ServeHTTP(w, req) - assert.Equal(t, 201, w.Code) + assert.Equal(t, http.StatusCreated, w.Code) assert.Equal(t, "", w.Body.String()) ip := GetMachineIP(t, *MachineTest.MachineID, config.API.Server.DbConfig) @@ -85,7 +87,7 @@ func TestCreateMachineWithForwardedForNoConfig(t *testing.T) { req.Header.Add("X-Real-IP", "1.1.1.1") router.ServeHTTP(w, req) - assert.Equal(t, 201, w.Code) + assert.Equal(t, http.StatusCreated, w.Code) assert.Equal(t, "", w.Body.String()) ip := GetMachineIP(t, *MachineTest.MachineID, config.API.Server.DbConfig) @@ -109,7 +111,7 @@ func TestCreateMachineWithoutForwardedFor(t *testing.T) { req.Header.Add("User-Agent", UserAgent) router.ServeHTTP(w, req) - assert.Equal(t, 201, w.Code) + assert.Equal(t, http.StatusCreated, w.Code) assert.Equal(t, "", w.Body.String()) ip := GetMachineIP(t, *MachineTest.MachineID, config.API.Server.DbConfig) @@ -122,7 +124,7 @@ func TestCreateMachineWithoutForwardedFor(t *testing.T) { func TestCreateMachineAlreadyExist(t *testing.T) { router, _ := NewAPITest(t) - body := CreateTestMachine(t, router) + body := CreateTestMachine(t, router, "") w := httptest.NewRecorder() req, _ := http.NewRequest(http.MethodPost, "/v1/watchers", strings.NewReader(body)) @@ -134,6 +136,90 @@ func TestCreateMachineAlreadyExist(t *testing.T) { req.Header.Add("User-Agent", UserAgent) router.ServeHTTP(w, req) - assert.Equal(t, 403, w.Code) + assert.Equal(t, http.StatusForbidden, w.Code) assert.Equal(t, `{"message":"user 'test': user already exist"}`, w.Body.String()) } + +func TestAutoRegistration(t *testing.T) { + router, _ := NewAPITest(t) + + //Invalid registration token / valid source IP + regReq := MachineTest + regReq.RegistrationToken = invalidRegistrationToken + b, err := json.Marshal(regReq) + require.NoError(t, err) + + body := string(b) + + w := httptest.NewRecorder() + req, _ := http.NewRequest(http.MethodPost, "/v1/watchers", strings.NewReader(body)) + req.Header.Add("User-Agent", UserAgent) + req.RemoteAddr = "127.0.0.1:4242" + router.ServeHTTP(w, req) + + assert.Equal(t, http.StatusUnauthorized, w.Code) + + //Invalid registration token / invalid source IP + regReq = MachineTest + regReq.RegistrationToken = invalidRegistrationToken + b, err = json.Marshal(regReq) + require.NoError(t, err) + + body = string(b) + + w = httptest.NewRecorder() + req, _ = http.NewRequest(http.MethodPost, "/v1/watchers", strings.NewReader(body)) + req.Header.Add("User-Agent", UserAgent) + req.RemoteAddr = "42.42.42.42:4242" + router.ServeHTTP(w, req) + + assert.Equal(t, http.StatusUnauthorized, w.Code) + + //valid registration token / invalid source IP + regReq = MachineTest + regReq.RegistrationToken = validRegistrationToken + b, err = json.Marshal(regReq) + require.NoError(t, err) + + body = string(b) + + w = httptest.NewRecorder() + req, _ = http.NewRequest(http.MethodPost, "/v1/watchers", strings.NewReader(body)) + req.Header.Add("User-Agent", UserAgent) + req.RemoteAddr = "42.42.42.42:4242" + router.ServeHTTP(w, req) + + assert.Equal(t, http.StatusUnauthorized, w.Code) + + //Valid registration token / valid source IP + regReq = MachineTest + regReq.RegistrationToken = validRegistrationToken + b, err = json.Marshal(regReq) + require.NoError(t, err) + + body = string(b) + + w = httptest.NewRecorder() + req, _ = http.NewRequest(http.MethodPost, "/v1/watchers", strings.NewReader(body)) + req.Header.Add("User-Agent", UserAgent) + req.RemoteAddr = "127.0.0.1:4242" + router.ServeHTTP(w, req) + + assert.Equal(t, http.StatusAccepted, w.Code) + + //No token / valid source IP + regReq = MachineTest + regReq.MachineID = ptr.Of("test2") + b, err = json.Marshal(regReq) + require.NoError(t, err) + + body = string(b) + + w = httptest.NewRecorder() + req, _ = http.NewRequest(http.MethodPost, "/v1/watchers", strings.NewReader(body)) + req.Header.Add("User-Agent", UserAgent) + req.RemoteAddr = "127.0.0.1:4242" + router.ServeHTTP(w, req) + + assert.Equal(t, http.StatusCreated, w.Code) +} diff --git a/pkg/csconfig/api.go b/pkg/csconfig/api.go index a23df957282..4a28b590e80 100644 --- a/pkg/csconfig/api.go +++ b/pkg/csconfig/api.go @@ -236,32 +236,40 @@ type CapiWhitelist struct { Cidrs []*net.IPNet `yaml:"cidrs,omitempty"` } +type LocalAPIAutoRegisterCfg struct { + Enable *bool `yaml:"enabled"` + Token string `yaml:"token"` + AllowedRanges []string `yaml:"allowed_ranges,omitempty"` + AllowedRangesParsed []*net.IPNet `yaml:"-"` +} + /*local api service configuration*/ type LocalApiServerCfg struct { - Enable *bool `yaml:"enable"` - ListenURI string `yaml:"listen_uri,omitempty"` // 127.0.0.1:8080 - ListenSocket string `yaml:"listen_socket,omitempty"` - TLS *TLSCfg `yaml:"tls"` - DbConfig *DatabaseCfg `yaml:"-"` - LogDir string `yaml:"-"` - LogMedia string `yaml:"-"` - OnlineClient *OnlineApiClientCfg `yaml:"online_client"` - ProfilesPath string `yaml:"profiles_path,omitempty"` - ConsoleConfigPath string `yaml:"console_path,omitempty"` - ConsoleConfig *ConsoleConfig `yaml:"-"` - Profiles []*ProfileCfg `yaml:"-"` - LogLevel *log.Level `yaml:"log_level"` - UseForwardedForHeaders bool `yaml:"use_forwarded_for_headers,omitempty"` - TrustedProxies *[]string `yaml:"trusted_proxies,omitempty"` - CompressLogs *bool `yaml:"-"` - LogMaxSize int `yaml:"-"` - LogMaxAge int `yaml:"-"` - LogMaxFiles int `yaml:"-"` - TrustedIPs []string `yaml:"trusted_ips,omitempty"` - PapiLogLevel *log.Level `yaml:"papi_log_level"` - DisableRemoteLapiRegistration bool `yaml:"disable_remote_lapi_registration,omitempty"` - CapiWhitelistsPath string `yaml:"capi_whitelists_path,omitempty"` - CapiWhitelists *CapiWhitelist `yaml:"-"` + Enable *bool `yaml:"enable"` + ListenURI string `yaml:"listen_uri,omitempty"` // 127.0.0.1:8080 + ListenSocket string `yaml:"listen_socket,omitempty"` + TLS *TLSCfg `yaml:"tls"` + DbConfig *DatabaseCfg `yaml:"-"` + LogDir string `yaml:"-"` + LogMedia string `yaml:"-"` + OnlineClient *OnlineApiClientCfg `yaml:"online_client"` + ProfilesPath string `yaml:"profiles_path,omitempty"` + ConsoleConfigPath string `yaml:"console_path,omitempty"` + ConsoleConfig *ConsoleConfig `yaml:"-"` + Profiles []*ProfileCfg `yaml:"-"` + LogLevel *log.Level `yaml:"log_level"` + UseForwardedForHeaders bool `yaml:"use_forwarded_for_headers,omitempty"` + TrustedProxies *[]string `yaml:"trusted_proxies,omitempty"` + CompressLogs *bool `yaml:"-"` + LogMaxSize int `yaml:"-"` + LogMaxAge int `yaml:"-"` + LogMaxFiles int `yaml:"-"` + TrustedIPs []string `yaml:"trusted_ips,omitempty"` + PapiLogLevel *log.Level `yaml:"papi_log_level"` + DisableRemoteLapiRegistration bool `yaml:"disable_remote_lapi_registration,omitempty"` + CapiWhitelistsPath string `yaml:"capi_whitelists_path,omitempty"` + CapiWhitelists *CapiWhitelist `yaml:"-"` + AutoRegister *LocalAPIAutoRegisterCfg `yaml:"auto_registration,omitempty"` } func (c *LocalApiServerCfg) ClientURL() string { @@ -348,6 +356,14 @@ func (c *Config) LoadAPIServer(inCli bool) error { log.Infof("loaded capi whitelist from %s: %d IPs, %d CIDRs", c.API.Server.CapiWhitelistsPath, len(c.API.Server.CapiWhitelists.Ips), len(c.API.Server.CapiWhitelists.Cidrs)) } + if err := c.API.Server.LoadAutoRegister(); err != nil { + return err + } + + if c.API.Server.AutoRegister != nil && c.API.Server.AutoRegister.Enable != nil && *c.API.Server.AutoRegister.Enable && !inCli { + log.Infof("auto LAPI registration enabled for ranges %+v", c.API.Server.AutoRegister.AllowedRanges) + } + c.API.Server.LogDir = c.Common.LogDir c.API.Server.LogMedia = c.Common.LogMedia c.API.Server.CompressLogs = c.Common.CompressLogs @@ -455,3 +471,47 @@ func (c *Config) LoadAPIClient() error { return c.API.Client.Load() } + +func (c *LocalApiServerCfg) LoadAutoRegister() error { + if c.AutoRegister == nil { + c.AutoRegister = &LocalAPIAutoRegisterCfg{ + Enable: ptr.Of(false), + } + + return nil + } + + // Disable by default + if c.AutoRegister.Enable == nil { + c.AutoRegister.Enable = ptr.Of(false) + } + + if !*c.AutoRegister.Enable { + return nil + } + + if c.AutoRegister.Token == "" { + return errors.New("missing token value for api.server.auto_register") + } + + if len(c.AutoRegister.Token) < 32 { + return errors.New("token value for api.server.auto_register is too short (min 32 characters)") + } + + if c.AutoRegister.AllowedRanges == nil { + return errors.New("missing allowed_ranges value for api.server.auto_register") + } + + c.AutoRegister.AllowedRangesParsed = make([]*net.IPNet, 0, len(c.AutoRegister.AllowedRanges)) + + for _, ipRange := range c.AutoRegister.AllowedRanges { + _, ipNet, err := net.ParseCIDR(ipRange) + if err != nil { + return fmt.Errorf("auto_register: failed to parse allowed range '%s': %w", ipRange, err) + } + + c.AutoRegister.AllowedRangesParsed = append(c.AutoRegister.AllowedRangesParsed, ipNet) + } + + return nil +} diff --git a/pkg/csconfig/api_test.go b/pkg/csconfig/api_test.go index 51a4c5ad602..96945202aa8 100644 --- a/pkg/csconfig/api_test.go +++ b/pkg/csconfig/api_test.go @@ -217,6 +217,12 @@ func TestLoadAPIServer(t *testing.T) { ProfilesPath: "./testdata/profiles.yaml", UseForwardedForHeaders: false, PapiLogLevel: &logLevel, + AutoRegister: &LocalAPIAutoRegisterCfg{ + Enable: ptr.Of(false), + Token: "", + AllowedRanges: nil, + AllowedRangesParsed: nil, + }, }, }, { diff --git a/pkg/models/localapi_swagger.yaml b/pkg/models/localapi_swagger.yaml index 9edfd12b82f..01bbe6f8bde 100644 --- a/pkg/models/localapi_swagger.yaml +++ b/pkg/models/localapi_swagger.yaml @@ -312,6 +312,9 @@ paths: '201': description: Watcher Created headers: {} + '202': + description: Watcher Validated + headers: {} '400': description: "400 response" schema: @@ -726,6 +729,10 @@ definitions: password: type: string format: password + registration_token: + type: string + minLength: 32 + maxLength: 255 required: - machine_id - password diff --git a/pkg/models/watcher_registration_request.go b/pkg/models/watcher_registration_request.go index 8be802ea3e7..673f0d59b9e 100644 --- a/pkg/models/watcher_registration_request.go +++ b/pkg/models/watcher_registration_request.go @@ -27,6 +27,11 @@ type WatcherRegistrationRequest struct { // Required: true // Format: password Password *strfmt.Password `json:"password"` + + // registration token + // Max Length: 255 + // Min Length: 32 + RegistrationToken string `json:"registration_token,omitempty"` } // Validate validates this watcher registration request @@ -41,6 +46,10 @@ func (m *WatcherRegistrationRequest) Validate(formats strfmt.Registry) error { res = append(res, err) } + if err := m.validateRegistrationToken(formats); err != nil { + res = append(res, err) + } + if len(res) > 0 { return errors.CompositeValidationError(res...) } @@ -69,6 +78,22 @@ func (m *WatcherRegistrationRequest) validatePassword(formats strfmt.Registry) e return nil } +func (m *WatcherRegistrationRequest) validateRegistrationToken(formats strfmt.Registry) error { + if swag.IsZero(m.RegistrationToken) { // not required + return nil + } + + if err := validate.MinLength("registration_token", "body", m.RegistrationToken, 32); err != nil { + return err + } + + if err := validate.MaxLength("registration_token", "body", m.RegistrationToken, 255); err != nil { + return err + } + + return nil +} + // ContextValidate validates this watcher registration request based on context it is used func (m *WatcherRegistrationRequest) ContextValidate(ctx context.Context, formats strfmt.Registry) error { return nil diff --git a/test/bats/01_cscli.bats b/test/bats/01_cscli.bats index bda2362c02a..264870501a5 100644 --- a/test/bats/01_cscli.bats +++ b/test/bats/01_cscli.bats @@ -209,109 +209,6 @@ teardown() { rm -rf -- "${backupdir:?}" } -@test "cscli lapi status" { - rune -0 ./instance-crowdsec start - rune -0 cscli lapi status - - assert_output --partial "Loaded credentials from" - assert_output --partial "Trying to authenticate with username" - assert_output --partial "You can successfully interact with Local API (LAPI)" -} - -@test "cscli - missing LAPI credentials file" { - LOCAL_API_CREDENTIALS=$(config_get '.api.client.credentials_path') - rm -f "$LOCAL_API_CREDENTIALS" - rune -1 cscli lapi status - assert_stderr --partial "loading api client: while reading yaml file: open ${LOCAL_API_CREDENTIALS}: no such file or directory" - - rune -1 cscli alerts list - assert_stderr --partial "loading api client: while reading yaml file: open ${LOCAL_API_CREDENTIALS}: no such file or directory" - - rune -1 cscli decisions list - assert_stderr --partial "loading api client: while reading yaml file: open ${LOCAL_API_CREDENTIALS}: no such file or directory" -} - -@test "cscli - empty LAPI credentials file" { - LOCAL_API_CREDENTIALS=$(config_get '.api.client.credentials_path') - : > "$LOCAL_API_CREDENTIALS" - rune -1 cscli lapi status - assert_stderr --partial "no credentials or URL found in api client configuration '${LOCAL_API_CREDENTIALS}'" - - rune -1 cscli alerts list - assert_stderr --partial "no credentials or URL found in api client configuration '${LOCAL_API_CREDENTIALS}'" - - rune -1 cscli decisions list - assert_stderr --partial "no credentials or URL found in api client configuration '${LOCAL_API_CREDENTIALS}'" -} - -@test "cscli - LAPI credentials file can reference env variables" { - LOCAL_API_CREDENTIALS=$(config_get '.api.client.credentials_path') - URL=$(config_get "$LOCAL_API_CREDENTIALS" '.url') - export URL - LOGIN=$(config_get "$LOCAL_API_CREDENTIALS" '.login') - export LOGIN - PASSWORD=$(config_get "$LOCAL_API_CREDENTIALS" '.password') - export PASSWORD - - # shellcheck disable=SC2016 - echo '{"url":"$URL","login":"$LOGIN","password":"$PASSWORD"}' > "$LOCAL_API_CREDENTIALS".local - - config_set '.crowdsec_service.enable=false' - rune -0 ./instance-crowdsec start - - rune -0 cscli lapi status - assert_output --partial "You can successfully interact with Local API (LAPI)" - - rm "$LOCAL_API_CREDENTIALS".local - - # shellcheck disable=SC2016 - config_set "$LOCAL_API_CREDENTIALS" '.url="$URL"' - # shellcheck disable=SC2016 - config_set "$LOCAL_API_CREDENTIALS" '.login="$LOGIN"' - # shellcheck disable=SC2016 - config_set "$LOCAL_API_CREDENTIALS" '.password="$PASSWORD"' - - rune -0 cscli lapi status - assert_output --partial "You can successfully interact with Local API (LAPI)" - - # but if a variable is not defined, there is no specific error message - unset URL - rune -1 cscli lapi status - # shellcheck disable=SC2016 - assert_stderr --partial 'BaseURL must have a trailing slash' -} - -@test "cscli - missing LAPI client settings" { - config_set 'del(.api.client)' - rune -1 cscli lapi status - assert_stderr --partial "loading api client: no API client section in configuration" - - rune -1 cscli alerts list - assert_stderr --partial "loading api client: no API client section in configuration" - - rune -1 cscli decisions list - assert_stderr --partial "loading api client: no API client section in configuration" -} - -@test "cscli - malformed LAPI url" { - LOCAL_API_CREDENTIALS=$(config_get '.api.client.credentials_path') - config_set "$LOCAL_API_CREDENTIALS" '.url="http://127.0.0.1:-80"' - - rune -1 cscli lapi status -o json - rune -0 jq -r '.msg' <(stderr) - assert_output 'failed to authenticate to Local API (LAPI): parse "http://127.0.0.1:-80/": invalid port ":-80" after host' -} - -@test "cscli - bad LAPI password" { - rune -0 ./instance-crowdsec start - LOCAL_API_CREDENTIALS=$(config_get '.api.client.credentials_path') - config_set "$LOCAL_API_CREDENTIALS" '.password="meh"' - - rune -1 cscli lapi status -o json - rune -0 jq -r '.msg' <(stderr) - assert_output 'failed to authenticate to Local API (LAPI): API error: incorrect Username or Password' -} - @test "'cscli completion' with or without configuration file" { rune -0 cscli completion bash assert_output --partial "# bash completion for cscli" diff --git a/test/bats/01_cscli_lapi.bats b/test/bats/01_cscli_lapi.bats new file mode 100644 index 00000000000..6e876576a6e --- /dev/null +++ b/test/bats/01_cscli_lapi.bats @@ -0,0 +1,213 @@ +#!/usr/bin/env bats +# vim: ft=bats:list:ts=8:sts=4:sw=4:et:ai:si: + +set -u + +setup_file() { + load "../lib/setup_file.sh" +} + +teardown_file() { + load "../lib/teardown_file.sh" +} + +setup() { + load "../lib/setup.sh" + load "../lib/bats-file/load.bash" + ./instance-data load + # don't run crowdsec here, not all tests require a running instance +} + +teardown() { + cd "$TEST_DIR" || exit 1 + ./instance-crowdsec stop +} + +#---------- + +@test "cscli lapi status" { + rune -0 ./instance-crowdsec start + rune -0 cscli lapi status + + assert_output --partial "Loaded credentials from" + assert_output --partial "Trying to authenticate with username" + assert_output --partial "You can successfully interact with Local API (LAPI)" +} + +@test "cscli - missing LAPI credentials file" { + LOCAL_API_CREDENTIALS=$(config_get '.api.client.credentials_path') + rm -f "$LOCAL_API_CREDENTIALS" + rune -1 cscli lapi status + assert_stderr --partial "loading api client: while reading yaml file: open $LOCAL_API_CREDENTIALS: no such file or directory" + + rune -1 cscli alerts list + assert_stderr --partial "loading api client: while reading yaml file: open $LOCAL_API_CREDENTIALS: no such file or directory" + + rune -1 cscli decisions list + assert_stderr --partial "loading api client: while reading yaml file: open $LOCAL_API_CREDENTIALS: no such file or directory" +} + +@test "cscli - empty LAPI credentials file" { + LOCAL_API_CREDENTIALS=$(config_get '.api.client.credentials_path') + : > "$LOCAL_API_CREDENTIALS" + rune -1 cscli lapi status + assert_stderr --partial "no credentials or URL found in api client configuration '$LOCAL_API_CREDENTIALS'" + + rune -1 cscli alerts list + assert_stderr --partial "no credentials or URL found in api client configuration '$LOCAL_API_CREDENTIALS'" + + rune -1 cscli decisions list + assert_stderr --partial "no credentials or URL found in api client configuration '$LOCAL_API_CREDENTIALS'" +} + +@test "cscli - LAPI credentials file can reference env variables" { + LOCAL_API_CREDENTIALS=$(config_get '.api.client.credentials_path') + URL=$(config_get "$LOCAL_API_CREDENTIALS" '.url') + export URL + LOGIN=$(config_get "$LOCAL_API_CREDENTIALS" '.login') + export LOGIN + PASSWORD=$(config_get "$LOCAL_API_CREDENTIALS" '.password') + export PASSWORD + + # shellcheck disable=SC2016 + echo '{"url":"$URL","login":"$LOGIN","password":"$PASSWORD"}' > "$LOCAL_API_CREDENTIALS".local + + config_set '.crowdsec_service.enable=false' + rune -0 ./instance-crowdsec start + + rune -0 cscli lapi status + assert_output --partial "You can successfully interact with Local API (LAPI)" + + rm "$LOCAL_API_CREDENTIALS".local + + # shellcheck disable=SC2016 + config_set "$LOCAL_API_CREDENTIALS" '.url="$URL"' + # shellcheck disable=SC2016 + config_set "$LOCAL_API_CREDENTIALS" '.login="$LOGIN"' + # shellcheck disable=SC2016 + config_set "$LOCAL_API_CREDENTIALS" '.password="$PASSWORD"' + + rune -0 cscli lapi status + assert_output --partial "You can successfully interact with Local API (LAPI)" + + # but if a variable is not defined, there is no specific error message + unset URL + rune -1 cscli lapi status + # shellcheck disable=SC2016 + assert_stderr --partial 'BaseURL must have a trailing slash' +} + +@test "cscli - missing LAPI client settings" { + config_set 'del(.api.client)' + rune -1 cscli lapi status + assert_stderr --partial "loading api client: no API client section in configuration" + + rune -1 cscli alerts list + assert_stderr --partial "loading api client: no API client section in configuration" + + rune -1 cscli decisions list + assert_stderr --partial "loading api client: no API client section in configuration" +} + +@test "cscli - malformed LAPI url" { + LOCAL_API_CREDENTIALS=$(config_get '.api.client.credentials_path') + config_set "$LOCAL_API_CREDENTIALS" '.url="http://127.0.0.1:-80"' + + rune -1 cscli lapi status -o json + rune -0 jq -r '.msg' <(stderr) + assert_output 'failed to authenticate to Local API (LAPI): parse "http://127.0.0.1:-80/": invalid port ":-80" after host' +} + +@test "cscli - bad LAPI password" { + rune -0 ./instance-crowdsec start + LOCAL_API_CREDENTIALS=$(config_get '.api.client.credentials_path') + config_set "$LOCAL_API_CREDENTIALS" '.password="meh"' + + rune -1 cscli lapi status -o json + rune -0 jq -r '.msg' <(stderr) + assert_output 'failed to authenticate to Local API (LAPI): API error: incorrect Username or Password' +} + +@test "cscli lapi register / machines validate" { + rune -1 cscli lapi register + assert_stderr --partial "connection refused" + + LOCAL_API_CREDENTIALS=$(config_get '.api.client.credentials_path') + + rune -0 ./instance-crowdsec start + rune -0 cscli lapi register + assert_stderr --partial "Successfully registered to Local API" + assert_stderr --partial "Local API credentials written to '$LOCAL_API_CREDENTIALS'" + assert_stderr --partial "Run 'sudo systemctl reload crowdsec' for the new configuration to be effective." + + LOGIN=$(config_get "$LOCAL_API_CREDENTIALS" '.login') + + rune -0 cscli machines inspect "$LOGIN" -o json + rune -0 jq -r '.isValidated' <(output) + assert_output "null" + + rune -0 cscli machines validate "$LOGIN" + + rune -0 cscli machines inspect "$LOGIN" -o json + rune -0 jq -r '.isValidated' <(output) + assert_output "true" +} + +@test "cscli lapi register --machine" { + rune -0 ./instance-crowdsec start + rune -0 cscli lapi register --machine newmachine + rune -0 cscli machines validate newmachine + rune -0 cscli machines inspect newmachine -o json + rune -0 jq -r '.isValidated' <(output) + assert_output "true" +} + +@test "cscli lapi register --token (ignored)" { + # A token is ignored if the server is not configured with it + rune -1 cscli lapi register --machine newmachine --token meh + assert_stderr --partial "connection refused" + + rune -0 ./instance-crowdsec start + rune -1 cscli lapi register --machine newmachine --token meh + assert_stderr --partial '422 Unprocessable Entity: API error: http code 422, invalid request:' + assert_stderr --partial 'registration_token in body should be at least 32 chars long' + + rune -0 cscli lapi register --machine newmachine --token 12345678901234567890123456789012 + assert_stderr --partial "Successfully registered to Local API" + + rune -0 cscli machines inspect newmachine -o json + rune -0 jq -r '.isValidated' <(output) + assert_output "null" +} + +@test "cscli lapi register --token" { + config_set '.api.server.auto_registration.enabled=true' + config_set '.api.server.auto_registration.token="12345678901234567890123456789012"' + config_set '.api.server.auto_registration.allowed_ranges=["127.0.0.1/32"]' + + rune -0 ./instance-crowdsec start + + rune -1 cscli lapi register --machine malicious --token 123456789012345678901234badtoken + assert_stderr --partial "401 Unauthorized: API error: invalid token for auto registration" + rune -1 cscli machines inspect malicious -o json + assert_stderr --partial "unable to read machine data 'malicious': user 'malicious': user doesn't exist" + + rune -0 cscli lapi register --machine newmachine --token 12345678901234567890123456789012 + assert_stderr --partial "Successfully registered to Local API" + rune -0 cscli machines inspect newmachine -o json + rune -0 jq -r '.isValidated' <(output) + assert_output "true" +} + +@test "cscli lapi register --token (bad source ip)" { + config_set '.api.server.auto_registration.enabled=true' + config_set '.api.server.auto_registration.token="12345678901234567890123456789012"' + config_set '.api.server.auto_registration.allowed_ranges=["127.0.0.2/32"]' + + rune -0 ./instance-crowdsec start + + rune -1 cscli lapi register --machine outofrange --token 12345678901234567890123456789012 + assert_stderr --partial "401 Unauthorized: API error: IP not in allowed range for auto registration" + rune -1 cscli machines inspect outofrange -o json + assert_stderr --partial "unable to read machine data 'outofrange': user 'outofrange': user doesn't exist" +} From 7c2ebe5ebb4b0ab89968f1ea2c3abc6a71c81b71 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Mon, 2 Sep 2024 14:00:28 +0200 Subject: [PATCH 272/581] Allow auto registration of machines in LAPI (backport) (#3209) --- cmd/crowdsec-cli/lapi.go | 27 +-- pkg/apiclient/client.go | 2 +- pkg/apiclient/config.go | 17 +- pkg/apiserver/alerts_test.go | 2 +- pkg/apiserver/apiserver.go | 3 +- pkg/apiserver/apiserver_test.go | 27 ++- pkg/apiserver/controllers/controller.go | 2 + pkg/apiserver/controllers/v1/controller.go | 17 +- pkg/apiserver/controllers/v1/machines.go | 54 +++++- pkg/apiserver/jwt_test.go | 2 +- pkg/apiserver/machines_test.go | 102 +++++++++- pkg/csconfig/api.go | 108 ++++++++--- pkg/csconfig/api_test.go | 6 + pkg/models/localapi_swagger.yaml | 7 + pkg/models/watcher_registration_request.go | 25 +++ test/bats/01_cscli.bats | 103 ---------- test/bats/01_cscli_lapi.bats | 213 +++++++++++++++++++++ 17 files changed, 546 insertions(+), 171 deletions(-) create mode 100644 test/bats/01_cscli_lapi.bats diff --git a/cmd/crowdsec-cli/lapi.go b/cmd/crowdsec-cli/lapi.go index 0b8bc59dad5..c0543f98733 100644 --- a/cmd/crowdsec-cli/lapi.go +++ b/cmd/crowdsec-cli/lapi.go @@ -96,7 +96,7 @@ func (cli *cliLapi) status() error { return nil } -func (cli *cliLapi) register(apiURL string, outputFile string, machine string) error { +func (cli *cliLapi) register(apiURL string, outputFile string, machine string, token string) error { var err error lapiUser := machine @@ -117,11 +117,12 @@ func (cli *cliLapi) register(apiURL string, outputFile string, machine string) e } _, err = apiclient.RegisterClient(&apiclient.Config{ - MachineID: lapiUser, - Password: password, - UserAgent: cwversion.UserAgent(), - URL: apiurl, - VersionPrefix: LAPIURLPrefix, + MachineID: lapiUser, + Password: password, + UserAgent: cwversion.UserAgent(), + RegistrationToken: token, + URL: apiurl, + VersionPrefix: LAPIURLPrefix, }, nil) if err != nil { return fmt.Errorf("api client register: %w", err) @@ -139,10 +140,12 @@ func (cli *cliLapi) register(apiURL string, outputFile string, machine string) e dumpFile = "" } - apiCfg := csconfig.ApiCredentialsCfg{ - Login: lapiUser, - Password: password.String(), - URL: apiURL, + apiCfg := cfg.API.Client.Credentials + apiCfg.Login = lapiUser + apiCfg.Password = password.String() + + if apiURL != "" { + apiCfg.URL = apiURL } apiConfigDump, err := yaml.Marshal(apiCfg) @@ -208,6 +211,7 @@ func (cli *cliLapi) newRegisterCmd() *cobra.Command { apiURL string outputFile string machine string + token string ) cmd := &cobra.Command{ @@ -218,7 +222,7 @@ Keep in mind the machine needs to be validated by an administrator on LAPI side Args: cobra.MinimumNArgs(0), DisableAutoGenTag: true, RunE: func(_ *cobra.Command, _ []string) error { - return cli.register(apiURL, outputFile, machine) + return cli.register(apiURL, outputFile, machine, token) }, } @@ -226,6 +230,7 @@ Keep in mind the machine needs to be validated by an administrator on LAPI side flags.StringVarP(&apiURL, "url", "u", "", "URL of the API (ie. http://127.0.0.1)") flags.StringVarP(&outputFile, "file", "f", "", "output file destination") flags.StringVar(&machine, "machine", "", "Name of the machine to register with") + flags.StringVar(&token, "token", "", "Auto registration token to use") return cmd } diff --git a/pkg/apiclient/client.go b/pkg/apiclient/client.go index 3abd42cf009..5669fd24786 100644 --- a/pkg/apiclient/client.go +++ b/pkg/apiclient/client.go @@ -177,7 +177,7 @@ func RegisterClient(config *Config, client *http.Client) (*ApiClient, error) { c.Alerts = (*AlertsService)(&c.common) c.Auth = (*AuthService)(&c.common) - resp, err := c.Auth.RegisterWatcher(context.Background(), models.WatcherRegistrationRequest{MachineID: &config.MachineID, Password: &config.Password}) + resp, err := c.Auth.RegisterWatcher(context.Background(), models.WatcherRegistrationRequest{MachineID: &config.MachineID, Password: &config.Password, RegistrationToken: config.RegistrationToken}) /*if we have http status, return it*/ if err != nil { if resp != nil && resp.Response != nil { diff --git a/pkg/apiclient/config.go b/pkg/apiclient/config.go index 4dfeb3e863f..b08452e74e0 100644 --- a/pkg/apiclient/config.go +++ b/pkg/apiclient/config.go @@ -7,12 +7,13 @@ import ( ) type Config struct { - MachineID string - Password strfmt.Password - Scenarios []string - URL *url.URL - PapiURL *url.URL - VersionPrefix string - UserAgent string - UpdateScenario func() ([]string, error) + MachineID string + Password strfmt.Password + Scenarios []string + URL *url.URL + PapiURL *url.URL + VersionPrefix string + UserAgent string + RegistrationToken string + UpdateScenario func() ([]string, error) } diff --git a/pkg/apiserver/alerts_test.go b/pkg/apiserver/alerts_test.go index 812e33ae13b..891eb3a8f4a 100644 --- a/pkg/apiserver/alerts_test.go +++ b/pkg/apiserver/alerts_test.go @@ -71,7 +71,7 @@ func InitMachineTest(t *testing.T) (*gin.Engine, models.WatcherAuthResponse, csc } func LoginToTestAPI(t *testing.T, router *gin.Engine, config csconfig.Config) models.WatcherAuthResponse { - body := CreateTestMachine(t, router) + body := CreateTestMachine(t, router, "") ValidateMachine(t, "test", config.API.Server.DbConfig) w := httptest.NewRecorder() diff --git a/pkg/apiserver/apiserver.go b/pkg/apiserver/apiserver.go index bd0b5d39bf4..31b31bcb82d 100644 --- a/pkg/apiserver/apiserver.go +++ b/pkg/apiserver/apiserver.go @@ -21,7 +21,7 @@ import ( "github.com/crowdsecurity/go-cs-lib/trace" "github.com/crowdsecurity/crowdsec/pkg/apiserver/controllers" - "github.com/crowdsecurity/crowdsec/pkg/apiserver/middlewares/v1" + v1 "github.com/crowdsecurity/crowdsec/pkg/apiserver/middlewares/v1" "github.com/crowdsecurity/crowdsec/pkg/csconfig" "github.com/crowdsecurity/crowdsec/pkg/csplugin" "github.com/crowdsecurity/crowdsec/pkg/database" @@ -235,6 +235,7 @@ func NewServer(config *csconfig.LocalApiServerCfg) (*APIServer, error) { Log: clog, ConsoleConfig: config.ConsoleConfig, DisableRemoteLapiRegistration: config.DisableRemoteLapiRegistration, + AutoRegisterCfg: config.AutoRegister, } var ( diff --git a/pkg/apiserver/apiserver_test.go b/pkg/apiserver/apiserver_test.go index b3f619f39c1..f48791ebcb8 100644 --- a/pkg/apiserver/apiserver_test.go +++ b/pkg/apiserver/apiserver_test.go @@ -29,10 +29,15 @@ import ( "github.com/crowdsecurity/crowdsec/pkg/types" ) +const ( + validRegistrationToken = "igheethauCaeteSaiyee3LosohPhahze" + invalidRegistrationToken = "vohl1feibechieG5coh8musheish2auj" +) + var ( testMachineID = "test" testPassword = strfmt.Password("test") - MachineTest = models.WatcherAuthRequest{ + MachineTest = models.WatcherRegistrationRequest{ MachineID: &testMachineID, Password: &testPassword, } @@ -65,6 +70,14 @@ func LoadTestConfig(t *testing.T) csconfig.Config { ShareTaintedScenarios: new(bool), ShareCustomScenarios: new(bool), }, + AutoRegister: &csconfig.LocalAPIAutoRegisterCfg{ + Enable: ptr.Of(true), + Token: validRegistrationToken, + AllowedRanges: []string{ + "127.0.0.1/8", + "::1/128", + }, + }, } apiConfig := csconfig.APICfg{ @@ -75,6 +88,9 @@ func LoadTestConfig(t *testing.T) csconfig.Config { err := config.API.Server.LoadProfiles() require.NoError(t, err) + err = config.API.Server.LoadAutoRegister() + require.NoError(t, err) + return config } @@ -113,6 +129,9 @@ func LoadTestConfigForwardedFor(t *testing.T) csconfig.Config { err := config.API.Server.LoadProfiles() require.NoError(t, err) + err = config.API.Server.LoadAutoRegister() + require.NoError(t, err) + return config } @@ -251,8 +270,10 @@ func readDecisionsStreamResp(t *testing.T, resp *httptest.ResponseRecorder) (map return response, resp.Code } -func CreateTestMachine(t *testing.T, router *gin.Engine) string { - b, err := json.Marshal(MachineTest) +func CreateTestMachine(t *testing.T, router *gin.Engine, token string) string { + regReq := MachineTest + regReq.RegistrationToken = token + b, err := json.Marshal(regReq) require.NoError(t, err) body := string(b) diff --git a/pkg/apiserver/controllers/controller.go b/pkg/apiserver/controllers/controller.go index 8175f431384..29f02723b70 100644 --- a/pkg/apiserver/controllers/controller.go +++ b/pkg/apiserver/controllers/controller.go @@ -29,6 +29,7 @@ type Controller struct { ConsoleConfig *csconfig.ConsoleConfig TrustedIPs []net.IPNet HandlerV1 *v1.Controller + AutoRegisterCfg *csconfig.LocalAPIAutoRegisterCfg DisableRemoteLapiRegistration bool } @@ -89,6 +90,7 @@ func (c *Controller) NewV1() error { PluginChannel: c.PluginChannel, ConsoleConfig: *c.ConsoleConfig, TrustedIPs: c.TrustedIPs, + AutoRegisterCfg: c.AutoRegisterCfg, } c.HandlerV1, err = v1.New(&v1Config) diff --git a/pkg/apiserver/controllers/v1/controller.go b/pkg/apiserver/controllers/v1/controller.go index ad76ad76616..6de4abe3b3b 100644 --- a/pkg/apiserver/controllers/v1/controller.go +++ b/pkg/apiserver/controllers/v1/controller.go @@ -23,9 +23,10 @@ type Controller struct { AlertsAddChan chan []*models.Alert DecisionDeleteChan chan []*models.Decision - PluginChannel chan csplugin.ProfileAlert - ConsoleConfig csconfig.ConsoleConfig - TrustedIPs []net.IPNet + PluginChannel chan csplugin.ProfileAlert + ConsoleConfig csconfig.ConsoleConfig + TrustedIPs []net.IPNet + AutoRegisterCfg *csconfig.LocalAPIAutoRegisterCfg } type ControllerV1Config struct { @@ -36,9 +37,10 @@ type ControllerV1Config struct { AlertsAddChan chan []*models.Alert DecisionDeleteChan chan []*models.Decision - PluginChannel chan csplugin.ProfileAlert - ConsoleConfig csconfig.ConsoleConfig - TrustedIPs []net.IPNet + PluginChannel chan csplugin.ProfileAlert + ConsoleConfig csconfig.ConsoleConfig + TrustedIPs []net.IPNet + AutoRegisterCfg *csconfig.LocalAPIAutoRegisterCfg } func New(cfg *ControllerV1Config) (*Controller, error) { @@ -59,9 +61,10 @@ func New(cfg *ControllerV1Config) (*Controller, error) { PluginChannel: cfg.PluginChannel, ConsoleConfig: cfg.ConsoleConfig, TrustedIPs: cfg.TrustedIPs, + AutoRegisterCfg: cfg.AutoRegisterCfg, } - v1.Middlewares, err = middlewares.NewMiddlewares(cfg.DbClient) + v1.Middlewares, err = middlewares.NewMiddlewares(cfg.DbClient) if err != nil { return v1, err } diff --git a/pkg/apiserver/controllers/v1/machines.go b/pkg/apiserver/controllers/v1/machines.go index 84a6ef2583c..0030f7d3b39 100644 --- a/pkg/apiserver/controllers/v1/machines.go +++ b/pkg/apiserver/controllers/v1/machines.go @@ -1,15 +1,50 @@ package v1 import ( + "errors" + "net" "net/http" "github.com/gin-gonic/gin" "github.com/go-openapi/strfmt" + log "github.com/sirupsen/logrus" "github.com/crowdsecurity/crowdsec/pkg/models" "github.com/crowdsecurity/crowdsec/pkg/types" ) +func (c *Controller) shouldAutoRegister(token string, gctx *gin.Context) (bool, error) { + if !*c.AutoRegisterCfg.Enable { + return false, nil + } + + clientIP := net.ParseIP(gctx.ClientIP()) + + // Can probaby happen if using unix socket ? + if clientIP == nil { + log.Warnf("Failed to parse client IP for watcher self registration: %s", gctx.ClientIP()) + return false, nil + } + + if token == "" || c.AutoRegisterCfg == nil { + return false, nil + } + + // Check the token + if token != c.AutoRegisterCfg.Token { + return false, errors.New("invalid token for auto registration") + } + + // Check the source IP + for _, ipRange := range c.AutoRegisterCfg.AllowedRangesParsed { + if ipRange.Contains(clientIP) { + return true, nil + } + } + + return false, errors.New("IP not in allowed range for auto registration") +} + func (c *Controller) CreateMachine(gctx *gin.Context) { var input models.WatcherRegistrationRequest @@ -19,14 +54,27 @@ func (c *Controller) CreateMachine(gctx *gin.Context) { } if err := input.Validate(strfmt.Default); err != nil { - c.HandleDBErrors(gctx, err) + gctx.JSON(http.StatusUnprocessableEntity, gin.H{"message": err.Error()}) return } - if _, err := c.DBClient.CreateMachine(input.MachineID, input.Password, gctx.ClientIP(), false, false, types.PasswordAuthType); err != nil { + autoRegister, err := c.shouldAutoRegister(input.RegistrationToken, gctx) + if err != nil { + log.WithFields(log.Fields{"ip": gctx.ClientIP(), "machine_id": *input.MachineID}).Errorf("Auto-register failed: %s", err) + gctx.JSON(http.StatusUnauthorized, gin.H{"message": err.Error()}) + + return + } + + if _, err := c.DBClient.CreateMachine(input.MachineID, input.Password, gctx.ClientIP(), autoRegister, false, types.PasswordAuthType); err != nil { c.HandleDBErrors(gctx, err) return } - gctx.Status(http.StatusCreated) + if autoRegister { + log.WithFields(log.Fields{"ip": gctx.ClientIP(), "machine_id": *input.MachineID}).Info("Auto-registered machine") + gctx.Status(http.StatusAccepted) + } else { + gctx.Status(http.StatusCreated) + } } diff --git a/pkg/apiserver/jwt_test.go b/pkg/apiserver/jwt_test.go index 58f66cfc74f..aa6e84e416b 100644 --- a/pkg/apiserver/jwt_test.go +++ b/pkg/apiserver/jwt_test.go @@ -12,7 +12,7 @@ import ( func TestLogin(t *testing.T) { router, config := NewAPITest(t) - body := CreateTestMachine(t, router) + body := CreateTestMachine(t, router, "") // Login with machine not validated yet w := httptest.NewRecorder() diff --git a/pkg/apiserver/machines_test.go b/pkg/apiserver/machines_test.go index 08efa91c6c1..041a6bee528 100644 --- a/pkg/apiserver/machines_test.go +++ b/pkg/apiserver/machines_test.go @@ -9,6 +9,8 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + + "github.com/crowdsecurity/go-cs-lib/ptr" ) func TestCreateMachine(t *testing.T) { @@ -20,7 +22,7 @@ func TestCreateMachine(t *testing.T) { req.Header.Add("User-Agent", UserAgent) router.ServeHTTP(w, req) - assert.Equal(t, 400, w.Code) + assert.Equal(t, http.StatusBadRequest, w.Code) assert.Equal(t, `{"message":"invalid character 'e' in literal true (expecting 'r')"}`, w.Body.String()) // Create machine with invalid input @@ -29,7 +31,7 @@ func TestCreateMachine(t *testing.T) { req.Header.Add("User-Agent", UserAgent) router.ServeHTTP(w, req) - assert.Equal(t, 500, w.Code) + assert.Equal(t, http.StatusUnprocessableEntity, w.Code) assert.Equal(t, `{"message":"validation failure list:\nmachine_id in body is required\npassword in body is required"}`, w.Body.String()) // Create machine @@ -43,7 +45,7 @@ func TestCreateMachine(t *testing.T) { req.Header.Add("User-Agent", UserAgent) router.ServeHTTP(w, req) - assert.Equal(t, 201, w.Code) + assert.Equal(t, http.StatusCreated, w.Code) assert.Equal(t, "", w.Body.String()) } @@ -62,7 +64,7 @@ func TestCreateMachineWithForwardedFor(t *testing.T) { req.Header.Add("X-Real-Ip", "1.1.1.1") router.ServeHTTP(w, req) - assert.Equal(t, 201, w.Code) + assert.Equal(t, http.StatusCreated, w.Code) assert.Equal(t, "", w.Body.String()) ip := GetMachineIP(t, *MachineTest.MachineID, config.API.Server.DbConfig) @@ -85,7 +87,7 @@ func TestCreateMachineWithForwardedForNoConfig(t *testing.T) { req.Header.Add("X-Real-IP", "1.1.1.1") router.ServeHTTP(w, req) - assert.Equal(t, 201, w.Code) + assert.Equal(t, http.StatusCreated, w.Code) assert.Equal(t, "", w.Body.String()) ip := GetMachineIP(t, *MachineTest.MachineID, config.API.Server.DbConfig) @@ -109,7 +111,7 @@ func TestCreateMachineWithoutForwardedFor(t *testing.T) { req.Header.Add("User-Agent", UserAgent) router.ServeHTTP(w, req) - assert.Equal(t, 201, w.Code) + assert.Equal(t, http.StatusCreated, w.Code) assert.Equal(t, "", w.Body.String()) ip := GetMachineIP(t, *MachineTest.MachineID, config.API.Server.DbConfig) @@ -122,7 +124,7 @@ func TestCreateMachineWithoutForwardedFor(t *testing.T) { func TestCreateMachineAlreadyExist(t *testing.T) { router, _ := NewAPITest(t) - body := CreateTestMachine(t, router) + body := CreateTestMachine(t, router, "") w := httptest.NewRecorder() req, _ := http.NewRequest(http.MethodPost, "/v1/watchers", strings.NewReader(body)) @@ -134,6 +136,90 @@ func TestCreateMachineAlreadyExist(t *testing.T) { req.Header.Add("User-Agent", UserAgent) router.ServeHTTP(w, req) - assert.Equal(t, 403, w.Code) + assert.Equal(t, http.StatusForbidden, w.Code) assert.Equal(t, `{"message":"user 'test': user already exist"}`, w.Body.String()) } + +func TestAutoRegistration(t *testing.T) { + router, _ := NewAPITest(t) + + //Invalid registration token / valid source IP + regReq := MachineTest + regReq.RegistrationToken = invalidRegistrationToken + b, err := json.Marshal(regReq) + require.NoError(t, err) + + body := string(b) + + w := httptest.NewRecorder() + req, _ := http.NewRequest(http.MethodPost, "/v1/watchers", strings.NewReader(body)) + req.Header.Add("User-Agent", UserAgent) + req.RemoteAddr = "127.0.0.1:4242" + router.ServeHTTP(w, req) + + assert.Equal(t, http.StatusUnauthorized, w.Code) + + //Invalid registration token / invalid source IP + regReq = MachineTest + regReq.RegistrationToken = invalidRegistrationToken + b, err = json.Marshal(regReq) + require.NoError(t, err) + + body = string(b) + + w = httptest.NewRecorder() + req, _ = http.NewRequest(http.MethodPost, "/v1/watchers", strings.NewReader(body)) + req.Header.Add("User-Agent", UserAgent) + req.RemoteAddr = "42.42.42.42:4242" + router.ServeHTTP(w, req) + + assert.Equal(t, http.StatusUnauthorized, w.Code) + + //valid registration token / invalid source IP + regReq = MachineTest + regReq.RegistrationToken = validRegistrationToken + b, err = json.Marshal(regReq) + require.NoError(t, err) + + body = string(b) + + w = httptest.NewRecorder() + req, _ = http.NewRequest(http.MethodPost, "/v1/watchers", strings.NewReader(body)) + req.Header.Add("User-Agent", UserAgent) + req.RemoteAddr = "42.42.42.42:4242" + router.ServeHTTP(w, req) + + assert.Equal(t, http.StatusUnauthorized, w.Code) + + //Valid registration token / valid source IP + regReq = MachineTest + regReq.RegistrationToken = validRegistrationToken + b, err = json.Marshal(regReq) + require.NoError(t, err) + + body = string(b) + + w = httptest.NewRecorder() + req, _ = http.NewRequest(http.MethodPost, "/v1/watchers", strings.NewReader(body)) + req.Header.Add("User-Agent", UserAgent) + req.RemoteAddr = "127.0.0.1:4242" + router.ServeHTTP(w, req) + + assert.Equal(t, http.StatusAccepted, w.Code) + + //No token / valid source IP + regReq = MachineTest + regReq.MachineID = ptr.Of("test2") + b, err = json.Marshal(regReq) + require.NoError(t, err) + + body = string(b) + + w = httptest.NewRecorder() + req, _ = http.NewRequest(http.MethodPost, "/v1/watchers", strings.NewReader(body)) + req.Header.Add("User-Agent", UserAgent) + req.RemoteAddr = "127.0.0.1:4242" + router.ServeHTTP(w, req) + + assert.Equal(t, http.StatusCreated, w.Code) +} diff --git a/pkg/csconfig/api.go b/pkg/csconfig/api.go index a23df957282..4a28b590e80 100644 --- a/pkg/csconfig/api.go +++ b/pkg/csconfig/api.go @@ -236,32 +236,40 @@ type CapiWhitelist struct { Cidrs []*net.IPNet `yaml:"cidrs,omitempty"` } +type LocalAPIAutoRegisterCfg struct { + Enable *bool `yaml:"enabled"` + Token string `yaml:"token"` + AllowedRanges []string `yaml:"allowed_ranges,omitempty"` + AllowedRangesParsed []*net.IPNet `yaml:"-"` +} + /*local api service configuration*/ type LocalApiServerCfg struct { - Enable *bool `yaml:"enable"` - ListenURI string `yaml:"listen_uri,omitempty"` // 127.0.0.1:8080 - ListenSocket string `yaml:"listen_socket,omitempty"` - TLS *TLSCfg `yaml:"tls"` - DbConfig *DatabaseCfg `yaml:"-"` - LogDir string `yaml:"-"` - LogMedia string `yaml:"-"` - OnlineClient *OnlineApiClientCfg `yaml:"online_client"` - ProfilesPath string `yaml:"profiles_path,omitempty"` - ConsoleConfigPath string `yaml:"console_path,omitempty"` - ConsoleConfig *ConsoleConfig `yaml:"-"` - Profiles []*ProfileCfg `yaml:"-"` - LogLevel *log.Level `yaml:"log_level"` - UseForwardedForHeaders bool `yaml:"use_forwarded_for_headers,omitempty"` - TrustedProxies *[]string `yaml:"trusted_proxies,omitempty"` - CompressLogs *bool `yaml:"-"` - LogMaxSize int `yaml:"-"` - LogMaxAge int `yaml:"-"` - LogMaxFiles int `yaml:"-"` - TrustedIPs []string `yaml:"trusted_ips,omitempty"` - PapiLogLevel *log.Level `yaml:"papi_log_level"` - DisableRemoteLapiRegistration bool `yaml:"disable_remote_lapi_registration,omitempty"` - CapiWhitelistsPath string `yaml:"capi_whitelists_path,omitempty"` - CapiWhitelists *CapiWhitelist `yaml:"-"` + Enable *bool `yaml:"enable"` + ListenURI string `yaml:"listen_uri,omitempty"` // 127.0.0.1:8080 + ListenSocket string `yaml:"listen_socket,omitempty"` + TLS *TLSCfg `yaml:"tls"` + DbConfig *DatabaseCfg `yaml:"-"` + LogDir string `yaml:"-"` + LogMedia string `yaml:"-"` + OnlineClient *OnlineApiClientCfg `yaml:"online_client"` + ProfilesPath string `yaml:"profiles_path,omitempty"` + ConsoleConfigPath string `yaml:"console_path,omitempty"` + ConsoleConfig *ConsoleConfig `yaml:"-"` + Profiles []*ProfileCfg `yaml:"-"` + LogLevel *log.Level `yaml:"log_level"` + UseForwardedForHeaders bool `yaml:"use_forwarded_for_headers,omitempty"` + TrustedProxies *[]string `yaml:"trusted_proxies,omitempty"` + CompressLogs *bool `yaml:"-"` + LogMaxSize int `yaml:"-"` + LogMaxAge int `yaml:"-"` + LogMaxFiles int `yaml:"-"` + TrustedIPs []string `yaml:"trusted_ips,omitempty"` + PapiLogLevel *log.Level `yaml:"papi_log_level"` + DisableRemoteLapiRegistration bool `yaml:"disable_remote_lapi_registration,omitempty"` + CapiWhitelistsPath string `yaml:"capi_whitelists_path,omitempty"` + CapiWhitelists *CapiWhitelist `yaml:"-"` + AutoRegister *LocalAPIAutoRegisterCfg `yaml:"auto_registration,omitempty"` } func (c *LocalApiServerCfg) ClientURL() string { @@ -348,6 +356,14 @@ func (c *Config) LoadAPIServer(inCli bool) error { log.Infof("loaded capi whitelist from %s: %d IPs, %d CIDRs", c.API.Server.CapiWhitelistsPath, len(c.API.Server.CapiWhitelists.Ips), len(c.API.Server.CapiWhitelists.Cidrs)) } + if err := c.API.Server.LoadAutoRegister(); err != nil { + return err + } + + if c.API.Server.AutoRegister != nil && c.API.Server.AutoRegister.Enable != nil && *c.API.Server.AutoRegister.Enable && !inCli { + log.Infof("auto LAPI registration enabled for ranges %+v", c.API.Server.AutoRegister.AllowedRanges) + } + c.API.Server.LogDir = c.Common.LogDir c.API.Server.LogMedia = c.Common.LogMedia c.API.Server.CompressLogs = c.Common.CompressLogs @@ -455,3 +471,47 @@ func (c *Config) LoadAPIClient() error { return c.API.Client.Load() } + +func (c *LocalApiServerCfg) LoadAutoRegister() error { + if c.AutoRegister == nil { + c.AutoRegister = &LocalAPIAutoRegisterCfg{ + Enable: ptr.Of(false), + } + + return nil + } + + // Disable by default + if c.AutoRegister.Enable == nil { + c.AutoRegister.Enable = ptr.Of(false) + } + + if !*c.AutoRegister.Enable { + return nil + } + + if c.AutoRegister.Token == "" { + return errors.New("missing token value for api.server.auto_register") + } + + if len(c.AutoRegister.Token) < 32 { + return errors.New("token value for api.server.auto_register is too short (min 32 characters)") + } + + if c.AutoRegister.AllowedRanges == nil { + return errors.New("missing allowed_ranges value for api.server.auto_register") + } + + c.AutoRegister.AllowedRangesParsed = make([]*net.IPNet, 0, len(c.AutoRegister.AllowedRanges)) + + for _, ipRange := range c.AutoRegister.AllowedRanges { + _, ipNet, err := net.ParseCIDR(ipRange) + if err != nil { + return fmt.Errorf("auto_register: failed to parse allowed range '%s': %w", ipRange, err) + } + + c.AutoRegister.AllowedRangesParsed = append(c.AutoRegister.AllowedRangesParsed, ipNet) + } + + return nil +} diff --git a/pkg/csconfig/api_test.go b/pkg/csconfig/api_test.go index 51a4c5ad602..96945202aa8 100644 --- a/pkg/csconfig/api_test.go +++ b/pkg/csconfig/api_test.go @@ -217,6 +217,12 @@ func TestLoadAPIServer(t *testing.T) { ProfilesPath: "./testdata/profiles.yaml", UseForwardedForHeaders: false, PapiLogLevel: &logLevel, + AutoRegister: &LocalAPIAutoRegisterCfg{ + Enable: ptr.Of(false), + Token: "", + AllowedRanges: nil, + AllowedRangesParsed: nil, + }, }, }, { diff --git a/pkg/models/localapi_swagger.yaml b/pkg/models/localapi_swagger.yaml index 9edfd12b82f..01bbe6f8bde 100644 --- a/pkg/models/localapi_swagger.yaml +++ b/pkg/models/localapi_swagger.yaml @@ -312,6 +312,9 @@ paths: '201': description: Watcher Created headers: {} + '202': + description: Watcher Validated + headers: {} '400': description: "400 response" schema: @@ -726,6 +729,10 @@ definitions: password: type: string format: password + registration_token: + type: string + minLength: 32 + maxLength: 255 required: - machine_id - password diff --git a/pkg/models/watcher_registration_request.go b/pkg/models/watcher_registration_request.go index 8be802ea3e7..673f0d59b9e 100644 --- a/pkg/models/watcher_registration_request.go +++ b/pkg/models/watcher_registration_request.go @@ -27,6 +27,11 @@ type WatcherRegistrationRequest struct { // Required: true // Format: password Password *strfmt.Password `json:"password"` + + // registration token + // Max Length: 255 + // Min Length: 32 + RegistrationToken string `json:"registration_token,omitempty"` } // Validate validates this watcher registration request @@ -41,6 +46,10 @@ func (m *WatcherRegistrationRequest) Validate(formats strfmt.Registry) error { res = append(res, err) } + if err := m.validateRegistrationToken(formats); err != nil { + res = append(res, err) + } + if len(res) > 0 { return errors.CompositeValidationError(res...) } @@ -69,6 +78,22 @@ func (m *WatcherRegistrationRequest) validatePassword(formats strfmt.Registry) e return nil } +func (m *WatcherRegistrationRequest) validateRegistrationToken(formats strfmt.Registry) error { + if swag.IsZero(m.RegistrationToken) { // not required + return nil + } + + if err := validate.MinLength("registration_token", "body", m.RegistrationToken, 32); err != nil { + return err + } + + if err := validate.MaxLength("registration_token", "body", m.RegistrationToken, 255); err != nil { + return err + } + + return nil +} + // ContextValidate validates this watcher registration request based on context it is used func (m *WatcherRegistrationRequest) ContextValidate(ctx context.Context, formats strfmt.Registry) error { return nil diff --git a/test/bats/01_cscli.bats b/test/bats/01_cscli.bats index 27cfe53212b..264870501a5 100644 --- a/test/bats/01_cscli.bats +++ b/test/bats/01_cscli.bats @@ -209,109 +209,6 @@ teardown() { rm -rf -- "${backupdir:?}" } -@test "cscli lapi status" { - rune -0 ./instance-crowdsec start - rune -0 cscli lapi status - - assert_stderr --partial "Loaded credentials from" - assert_stderr --partial "Trying to authenticate with username" - assert_stderr --partial "You can successfully interact with Local API (LAPI)" -} - -@test "cscli - missing LAPI credentials file" { - LOCAL_API_CREDENTIALS=$(config_get '.api.client.credentials_path') - rm -f "$LOCAL_API_CREDENTIALS" - rune -1 cscli lapi status - assert_stderr --partial "loading api client: while reading yaml file: open ${LOCAL_API_CREDENTIALS}: no such file or directory" - - rune -1 cscli alerts list - assert_stderr --partial "loading api client: while reading yaml file: open ${LOCAL_API_CREDENTIALS}: no such file or directory" - - rune -1 cscli decisions list - assert_stderr --partial "loading api client: while reading yaml file: open ${LOCAL_API_CREDENTIALS}: no such file or directory" -} - -@test "cscli - empty LAPI credentials file" { - LOCAL_API_CREDENTIALS=$(config_get '.api.client.credentials_path') - : > "$LOCAL_API_CREDENTIALS" - rune -1 cscli lapi status - assert_stderr --partial "no credentials or URL found in api client configuration '${LOCAL_API_CREDENTIALS}'" - - rune -1 cscli alerts list - assert_stderr --partial "no credentials or URL found in api client configuration '${LOCAL_API_CREDENTIALS}'" - - rune -1 cscli decisions list - assert_stderr --partial "no credentials or URL found in api client configuration '${LOCAL_API_CREDENTIALS}'" -} - -@test "cscli - LAPI credentials file can reference env variables" { - LOCAL_API_CREDENTIALS=$(config_get '.api.client.credentials_path') - URL=$(config_get "$LOCAL_API_CREDENTIALS" '.url') - export URL - LOGIN=$(config_get "$LOCAL_API_CREDENTIALS" '.login') - export LOGIN - PASSWORD=$(config_get "$LOCAL_API_CREDENTIALS" '.password') - export PASSWORD - - # shellcheck disable=SC2016 - echo '{"url":"$URL","login":"$LOGIN","password":"$PASSWORD"}' > "$LOCAL_API_CREDENTIALS".local - - config_set '.crowdsec_service.enable=false' - rune -0 ./instance-crowdsec start - - rune -0 cscli lapi status - assert_stderr --partial "You can successfully interact with Local API (LAPI)" - - rm "$LOCAL_API_CREDENTIALS".local - - # shellcheck disable=SC2016 - config_set "$LOCAL_API_CREDENTIALS" '.url="$URL"' - # shellcheck disable=SC2016 - config_set "$LOCAL_API_CREDENTIALS" '.login="$LOGIN"' - # shellcheck disable=SC2016 - config_set "$LOCAL_API_CREDENTIALS" '.password="$PASSWORD"' - - rune -0 cscli lapi status - assert_stderr --partial "You can successfully interact with Local API (LAPI)" - - # but if a variable is not defined, there is no specific error message - unset URL - rune -1 cscli lapi status - # shellcheck disable=SC2016 - assert_stderr --partial 'BaseURL must have a trailing slash' -} - -@test "cscli - missing LAPI client settings" { - config_set 'del(.api.client)' - rune -1 cscli lapi status - assert_stderr --partial "loading api client: no API client section in configuration" - - rune -1 cscli alerts list - assert_stderr --partial "loading api client: no API client section in configuration" - - rune -1 cscli decisions list - assert_stderr --partial "loading api client: no API client section in configuration" -} - -@test "cscli - malformed LAPI url" { - LOCAL_API_CREDENTIALS=$(config_get '.api.client.credentials_path') - config_set "$LOCAL_API_CREDENTIALS" '.url="http://127.0.0.1:-80"' - - rune -1 cscli lapi status -o json - rune -0 jq -r '.msg' <(stderr) - assert_output 'failed to authenticate to Local API (LAPI): parsing api url: parse "http://127.0.0.1:-80/": invalid port ":-80" after host' -} - -@test "cscli - bad LAPI password" { - rune -0 ./instance-crowdsec start - LOCAL_API_CREDENTIALS=$(config_get '.api.client.credentials_path') - config_set "$LOCAL_API_CREDENTIALS" '.password="meh"' - - rune -1 cscli lapi status -o json - rune -0 jq -r '.msg' <(stderr) - assert_output 'failed to authenticate to Local API (LAPI): API error: incorrect Username or Password' -} - @test "'cscli completion' with or without configuration file" { rune -0 cscli completion bash assert_output --partial "# bash completion for cscli" diff --git a/test/bats/01_cscli_lapi.bats b/test/bats/01_cscli_lapi.bats new file mode 100644 index 00000000000..a503dfff8cf --- /dev/null +++ b/test/bats/01_cscli_lapi.bats @@ -0,0 +1,213 @@ +#!/usr/bin/env bats +# vim: ft=bats:list:ts=8:sts=4:sw=4:et:ai:si: + +set -u + +setup_file() { + load "../lib/setup_file.sh" +} + +teardown_file() { + load "../lib/teardown_file.sh" +} + +setup() { + load "../lib/setup.sh" + load "../lib/bats-file/load.bash" + ./instance-data load + # don't run crowdsec here, not all tests require a running instance +} + +teardown() { + cd "$TEST_DIR" || exit 1 + ./instance-crowdsec stop +} + +#---------- + +@test "cscli lapi status" { + rune -0 ./instance-crowdsec start + rune -0 cscli lapi status + + assert_stderr --partial "Loaded credentials from" + assert_stderr --partial "Trying to authenticate with username" + assert_stderr --partial "You can successfully interact with Local API (LAPI)" +} + +@test "cscli - missing LAPI credentials file" { + LOCAL_API_CREDENTIALS=$(config_get '.api.client.credentials_path') + rm -f "$LOCAL_API_CREDENTIALS" + rune -1 cscli lapi status + assert_stderr --partial "loading api client: while reading yaml file: open $LOCAL_API_CREDENTIALS: no such file or directory" + + rune -1 cscli alerts list + assert_stderr --partial "loading api client: while reading yaml file: open $LOCAL_API_CREDENTIALS: no such file or directory" + + rune -1 cscli decisions list + assert_stderr --partial "loading api client: while reading yaml file: open $LOCAL_API_CREDENTIALS: no such file or directory" +} + +@test "cscli - empty LAPI credentials file" { + LOCAL_API_CREDENTIALS=$(config_get '.api.client.credentials_path') + : > "$LOCAL_API_CREDENTIALS" + rune -1 cscli lapi status + assert_stderr --partial "no credentials or URL found in api client configuration '$LOCAL_API_CREDENTIALS'" + + rune -1 cscli alerts list + assert_stderr --partial "no credentials or URL found in api client configuration '$LOCAL_API_CREDENTIALS'" + + rune -1 cscli decisions list + assert_stderr --partial "no credentials or URL found in api client configuration '$LOCAL_API_CREDENTIALS'" +} + +@test "cscli - LAPI credentials file can reference env variables" { + LOCAL_API_CREDENTIALS=$(config_get '.api.client.credentials_path') + URL=$(config_get "$LOCAL_API_CREDENTIALS" '.url') + export URL + LOGIN=$(config_get "$LOCAL_API_CREDENTIALS" '.login') + export LOGIN + PASSWORD=$(config_get "$LOCAL_API_CREDENTIALS" '.password') + export PASSWORD + + # shellcheck disable=SC2016 + echo '{"url":"$URL","login":"$LOGIN","password":"$PASSWORD"}' > "$LOCAL_API_CREDENTIALS".local + + config_set '.crowdsec_service.enable=false' + rune -0 ./instance-crowdsec start + + rune -0 cscli lapi status + assert_stderr --partial "You can successfully interact with Local API (LAPI)" + + rm "$LOCAL_API_CREDENTIALS".local + + # shellcheck disable=SC2016 + config_set "$LOCAL_API_CREDENTIALS" '.url="$URL"' + # shellcheck disable=SC2016 + config_set "$LOCAL_API_CREDENTIALS" '.login="$LOGIN"' + # shellcheck disable=SC2016 + config_set "$LOCAL_API_CREDENTIALS" '.password="$PASSWORD"' + + rune -0 cscli lapi status + assert_stderr --partial "You can successfully interact with Local API (LAPI)" + + # but if a variable is not defined, there is no specific error message + unset URL + rune -1 cscli lapi status + # shellcheck disable=SC2016 + assert_stderr --partial 'BaseURL must have a trailing slash' +} + +@test "cscli - missing LAPI client settings" { + config_set 'del(.api.client)' + rune -1 cscli lapi status + assert_stderr --partial "loading api client: no API client section in configuration" + + rune -1 cscli alerts list + assert_stderr --partial "loading api client: no API client section in configuration" + + rune -1 cscli decisions list + assert_stderr --partial "loading api client: no API client section in configuration" +} + +@test "cscli - malformed LAPI url" { + LOCAL_API_CREDENTIALS=$(config_get '.api.client.credentials_path') + config_set "$LOCAL_API_CREDENTIALS" '.url="http://127.0.0.1:-80"' + + rune -1 cscli lapi status -o json + rune -0 jq -r '.msg' <(stderr) + assert_output 'failed to authenticate to Local API (LAPI): parsing api url: parse "http://127.0.0.1:-80/": invalid port ":-80" after host' +} + +@test "cscli - bad LAPI password" { + rune -0 ./instance-crowdsec start + LOCAL_API_CREDENTIALS=$(config_get '.api.client.credentials_path') + config_set "$LOCAL_API_CREDENTIALS" '.password="meh"' + + rune -1 cscli lapi status -o json + rune -0 jq -r '.msg' <(stderr) + assert_output 'failed to authenticate to Local API (LAPI): API error: incorrect Username or Password' +} + +@test "cscli lapi register / machines validate" { + rune -1 cscli lapi register + assert_stderr --partial "connection refused" + + LOCAL_API_CREDENTIALS=$(config_get '.api.client.credentials_path') + + rune -0 ./instance-crowdsec start + rune -0 cscli lapi register + assert_stderr --partial "Successfully registered to Local API" + assert_stderr --partial "Local API credentials written to '$LOCAL_API_CREDENTIALS'" + assert_stderr --partial "Run 'sudo systemctl reload crowdsec' for the new configuration to be effective." + + LOGIN=$(config_get "$LOCAL_API_CREDENTIALS" '.login') + + rune -0 cscli machines inspect "$LOGIN" -o json + rune -0 jq -r '.isValidated' <(output) + assert_output "null" + + rune -0 cscli machines validate "$LOGIN" + + rune -0 cscli machines inspect "$LOGIN" -o json + rune -0 jq -r '.isValidated' <(output) + assert_output "true" +} + +@test "cscli lapi register --machine" { + rune -0 ./instance-crowdsec start + rune -0 cscli lapi register --machine newmachine + rune -0 cscli machines validate newmachine + rune -0 cscli machines inspect newmachine -o json + rune -0 jq -r '.isValidated' <(output) + assert_output "true" +} + +@test "cscli lapi register --token (ignored)" { + # A token is ignored if the server is not configured with it + rune -1 cscli lapi register --machine newmachine --token meh + assert_stderr --partial "connection refused" + + rune -0 ./instance-crowdsec start + rune -1 cscli lapi register --machine newmachine --token meh + assert_stderr --partial '422 Unprocessable Entity: API error: http code 422, invalid request:' + assert_stderr --partial 'registration_token in body should be at least 32 chars long' + + rune -0 cscli lapi register --machine newmachine --token 12345678901234567890123456789012 + assert_stderr --partial "Successfully registered to Local API" + + rune -0 cscli machines inspect newmachine -o json + rune -0 jq -r '.isValidated' <(output) + assert_output "null" +} + +@test "cscli lapi register --token" { + config_set '.api.server.auto_registration.enabled=true' + config_set '.api.server.auto_registration.token="12345678901234567890123456789012"' + config_set '.api.server.auto_registration.allowed_ranges=["127.0.0.1/32"]' + + rune -0 ./instance-crowdsec start + + rune -1 cscli lapi register --machine malicious --token 123456789012345678901234badtoken + assert_stderr --partial "401 Unauthorized: API error: invalid token for auto registration" + rune -1 cscli machines inspect malicious -o json + assert_stderr --partial "unable to read machine data 'malicious': user 'malicious': user doesn't exist" + + rune -0 cscli lapi register --machine newmachine --token 12345678901234567890123456789012 + assert_stderr --partial "Successfully registered to Local API" + rune -0 cscli machines inspect newmachine -o json + rune -0 jq -r '.isValidated' <(output) + assert_output "true" +} + +@test "cscli lapi register --token (bad source ip)" { + config_set '.api.server.auto_registration.enabled=true' + config_set '.api.server.auto_registration.token="12345678901234567890123456789012"' + config_set '.api.server.auto_registration.allowed_ranges=["127.0.0.2/32"]' + + rune -0 ./instance-crowdsec start + + rune -1 cscli lapi register --machine outofrange --token 12345678901234567890123456789012 + assert_stderr --partial "401 Unauthorized: API error: IP not in allowed range for auto registration" + rune -1 cscli machines inspect outofrange -o json + assert_stderr --partial "unable to read machine data 'outofrange': user 'outofrange': user doesn't exist" +} From dda0fa1dfabf8efeb297e88bbef6a8d41b8e6f8e Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Mon, 2 Sep 2024 14:04:14 +0200 Subject: [PATCH 273/581] cscli dashboard: exit earlier on docker (#3210) --- cmd/crowdsec-cli/dashboard.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/cmd/crowdsec-cli/dashboard.go b/cmd/crowdsec-cli/dashboard.go index 31efd074bf9..13cebe3dbd5 100644 --- a/cmd/crowdsec-cli/dashboard.go +++ b/cmd/crowdsec-cli/dashboard.go @@ -71,6 +71,10 @@ cscli dashboard stop cscli dashboard remove `, PersistentPreRunE: func(_ *cobra.Command, _ []string) error { + if version.System == "docker" { + return errors.New("cscli dashboard is not supported whilst running CrowdSec within a container please see: https://github.com/crowdsecurity/example-docker-compose/tree/main/basic") + } + cfg := cli.cfg() if err := require.LAPI(cfg); err != nil { return err @@ -104,10 +108,6 @@ cscli dashboard remove log.Warn("cscli dashboard will be deprecated in version 1.7.0, read more at https://docs.crowdsec.net/blog/cscli_dashboard_deprecation/") - if version.System == "docker" { - return errors.New("cscli dashboard is not supported whilst running CrowdSec within a container please see: https://github.com/crowdsecurity/example-docker-compose/tree/main/basic") - } - return nil }, } From a987dfcaf29d4fd12ecd50fab474ef45a92ca4b3 Mon Sep 17 00:00:00 2001 From: Laurence Jones Date: Mon, 2 Sep 2024 13:17:27 +0100 Subject: [PATCH 274/581] backport: return an error if cscli dashboard is run within a container (#3208) * enhance: return an error if cscli dashboard is run within a container * move docker check on top --------- Co-authored-by: marco --- cmd/crowdsec-cli/dashboard.go | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/cmd/crowdsec-cli/dashboard.go b/cmd/crowdsec-cli/dashboard.go index 96a2fa38cb7..c61fc8eeded 100644 --- a/cmd/crowdsec-cli/dashboard.go +++ b/cmd/crowdsec-cli/dashboard.go @@ -22,6 +22,7 @@ import ( "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/require" "github.com/crowdsecurity/crowdsec/pkg/metabase" + "github.com/crowdsecurity/go-cs-lib/version" ) var ( @@ -69,6 +70,10 @@ cscli dashboard stop cscli dashboard remove `, PersistentPreRunE: func(_ *cobra.Command, _ []string) error { + if version.System == "docker" { + return errors.New("cscli dashboard is not supported whilst running CrowdSec within a container please see: https://github.com/crowdsecurity/example-docker-compose/tree/main/basic") + } + cfg := cli.cfg() if err := require.LAPI(cfg); err != nil { return err From f0d6046c596b3b76ce3265474b58cca09ad761a4 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Mon, 2 Sep 2024 14:30:25 +0200 Subject: [PATCH 275/581] cscli refact: package cliitem (#3204) --- .../{hubappsec.go => cliitem/appsec.go} | 6 +++--- .../{hubcollection.go => cliitem/collection.go} | 4 ++-- .../{hubcontext.go => cliitem/context.go} | 4 ++-- cmd/crowdsec-cli/{ => cliitem}/hubscenario.go | 4 ++-- cmd/crowdsec-cli/{itemcli.go => cliitem/item.go} | 5 ++++- .../{hubparser.go => cliitem/parser.go} | 4 ++-- .../postoverflow.go} | 4 ++-- .../{item_suggest.go => cliitem/suggest.go} | 2 +- cmd/crowdsec-cli/main.go | 15 ++++++++------- 9 files changed, 26 insertions(+), 22 deletions(-) rename cmd/crowdsec-cli/{hubappsec.go => cliitem/appsec.go} (96%) rename cmd/crowdsec-cli/{hubcollection.go => cliitem/collection.go} (95%) rename cmd/crowdsec-cli/{hubcontext.go => cliitem/context.go} (94%) rename cmd/crowdsec-cli/{ => cliitem}/hubscenario.go (95%) rename cmd/crowdsec-cli/{itemcli.go => cliitem/item.go} (99%) rename cmd/crowdsec-cli/{hubparser.go => cliitem/parser.go} (95%) rename cmd/crowdsec-cli/{hubpostoverflow.go => cliitem/postoverflow.go} (95%) rename cmd/crowdsec-cli/{item_suggest.go => cliitem/suggest.go} (99%) diff --git a/cmd/crowdsec-cli/hubappsec.go b/cmd/crowdsec-cli/cliitem/appsec.go similarity index 96% rename from cmd/crowdsec-cli/hubappsec.go rename to cmd/crowdsec-cli/cliitem/appsec.go index 1df3212f941..db567f86a32 100644 --- a/cmd/crowdsec-cli/hubappsec.go +++ b/cmd/crowdsec-cli/cliitem/appsec.go @@ -1,4 +1,4 @@ -package main +package cliitem import ( "fmt" @@ -13,7 +13,7 @@ import ( "github.com/crowdsecurity/crowdsec/pkg/cwhub" ) -func NewCLIAppsecConfig(cfg configGetter) *cliItem { +func NewAppsecConfig(cfg configGetter) *cliItem { return &cliItem{ cfg: cfg, name: cwhub.APPSEC_CONFIGS, @@ -47,7 +47,7 @@ cscli appsec-configs list crowdsecurity/vpatch`, } } -func NewCLIAppsecRule(cfg configGetter) *cliItem { +func NewAppsecRule(cfg configGetter) *cliItem { inspectDetail := func(item *cwhub.Item) error { // Only show the converted rules in human mode if cfg().Cscli.Output != "human" { diff --git a/cmd/crowdsec-cli/hubcollection.go b/cmd/crowdsec-cli/cliitem/collection.go similarity index 95% rename from cmd/crowdsec-cli/hubcollection.go rename to cmd/crowdsec-cli/cliitem/collection.go index 655b36eb1b8..ea91c1e537a 100644 --- a/cmd/crowdsec-cli/hubcollection.go +++ b/cmd/crowdsec-cli/cliitem/collection.go @@ -1,10 +1,10 @@ -package main +package cliitem import ( "github.com/crowdsecurity/crowdsec/pkg/cwhub" ) -func NewCLICollection(cfg configGetter) *cliItem { +func NewCollection(cfg configGetter) *cliItem { return &cliItem{ cfg: cfg, name: cwhub.COLLECTIONS, diff --git a/cmd/crowdsec-cli/hubcontext.go b/cmd/crowdsec-cli/cliitem/context.go similarity index 94% rename from cmd/crowdsec-cli/hubcontext.go rename to cmd/crowdsec-cli/cliitem/context.go index 2a777327379..7d110b8203d 100644 --- a/cmd/crowdsec-cli/hubcontext.go +++ b/cmd/crowdsec-cli/cliitem/context.go @@ -1,10 +1,10 @@ -package main +package cliitem import ( "github.com/crowdsecurity/crowdsec/pkg/cwhub" ) -func NewCLIContext(cfg configGetter) *cliItem { +func NewContext(cfg configGetter) *cliItem { return &cliItem{ cfg: cfg, name: cwhub.CONTEXTS, diff --git a/cmd/crowdsec-cli/hubscenario.go b/cmd/crowdsec-cli/cliitem/hubscenario.go similarity index 95% rename from cmd/crowdsec-cli/hubscenario.go rename to cmd/crowdsec-cli/cliitem/hubscenario.go index 4434b9a2c45..a5e854b3c82 100644 --- a/cmd/crowdsec-cli/hubscenario.go +++ b/cmd/crowdsec-cli/cliitem/hubscenario.go @@ -1,10 +1,10 @@ -package main +package cliitem import ( "github.com/crowdsecurity/crowdsec/pkg/cwhub" ) -func NewCLIScenario(cfg configGetter) *cliItem { +func NewScenario(cfg configGetter) *cliItem { return &cliItem{ cfg: cfg, name: cwhub.SCENARIOS, diff --git a/cmd/crowdsec-cli/itemcli.go b/cmd/crowdsec-cli/cliitem/item.go similarity index 99% rename from cmd/crowdsec-cli/itemcli.go rename to cmd/crowdsec-cli/cliitem/item.go index a5629b425b9..28828eb9c95 100644 --- a/cmd/crowdsec-cli/itemcli.go +++ b/cmd/crowdsec-cli/cliitem/item.go @@ -1,4 +1,4 @@ -package main +package cliitem import ( "cmp" @@ -18,6 +18,7 @@ import ( "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/clihub" "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/reload" "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/require" + "github.com/crowdsecurity/crowdsec/pkg/csconfig" "github.com/crowdsecurity/crowdsec/pkg/cwhub" ) @@ -30,6 +31,8 @@ type cliHelp struct { example string } +type configGetter func() *csconfig.Config + type cliItem struct { cfg configGetter name string // plural, as used in the hub index diff --git a/cmd/crowdsec-cli/hubparser.go b/cmd/crowdsec-cli/cliitem/parser.go similarity index 95% rename from cmd/crowdsec-cli/hubparser.go rename to cmd/crowdsec-cli/cliitem/parser.go index cc856cbedb9..bc1d96bdaf0 100644 --- a/cmd/crowdsec-cli/hubparser.go +++ b/cmd/crowdsec-cli/cliitem/parser.go @@ -1,10 +1,10 @@ -package main +package cliitem import ( "github.com/crowdsecurity/crowdsec/pkg/cwhub" ) -func NewCLIParser(cfg configGetter) *cliItem { +func NewParser(cfg configGetter) *cliItem { return &cliItem{ cfg: cfg, name: cwhub.PARSERS, diff --git a/cmd/crowdsec-cli/hubpostoverflow.go b/cmd/crowdsec-cli/cliitem/postoverflow.go similarity index 95% rename from cmd/crowdsec-cli/hubpostoverflow.go rename to cmd/crowdsec-cli/cliitem/postoverflow.go index 3fd45fd113d..ea53aef327d 100644 --- a/cmd/crowdsec-cli/hubpostoverflow.go +++ b/cmd/crowdsec-cli/cliitem/postoverflow.go @@ -1,10 +1,10 @@ -package main +package cliitem import ( "github.com/crowdsecurity/crowdsec/pkg/cwhub" ) -func NewCLIPostOverflow(cfg configGetter) *cliItem { +func NewPostOverflow(cfg configGetter) *cliItem { return &cliItem{ cfg: cfg, name: cwhub.POSTOVERFLOWS, diff --git a/cmd/crowdsec-cli/item_suggest.go b/cmd/crowdsec-cli/cliitem/suggest.go similarity index 99% rename from cmd/crowdsec-cli/item_suggest.go rename to cmd/crowdsec-cli/cliitem/suggest.go index 7d3e1e728ae..5b080722af9 100644 --- a/cmd/crowdsec-cli/item_suggest.go +++ b/cmd/crowdsec-cli/cliitem/suggest.go @@ -1,4 +1,4 @@ -package main +package cliitem import ( "fmt" diff --git a/cmd/crowdsec-cli/main.go b/cmd/crowdsec-cli/main.go index 2a1f5ac7ebe..6f8e93e463c 100644 --- a/cmd/crowdsec-cli/main.go +++ b/cmd/crowdsec-cli/main.go @@ -20,6 +20,7 @@ import ( "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/cliexplain" "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/clihub" "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/clihubtest" + "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/cliitem" "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/clilapi" "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/climachine" "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/climetrics" @@ -270,13 +271,13 @@ It is meant to allow you to manage bans, parsers/scenarios/etc, api and generall cmd.AddCommand(clinotifications.New(cli.cfg).NewCommand()) cmd.AddCommand(clisupport.New(cli.cfg).NewCommand()) cmd.AddCommand(clipapi.New(cli.cfg).NewCommand()) - cmd.AddCommand(NewCLICollection(cli.cfg).NewCommand()) - cmd.AddCommand(NewCLIParser(cli.cfg).NewCommand()) - cmd.AddCommand(NewCLIScenario(cli.cfg).NewCommand()) - cmd.AddCommand(NewCLIPostOverflow(cli.cfg).NewCommand()) - cmd.AddCommand(NewCLIContext(cli.cfg).NewCommand()) - cmd.AddCommand(NewCLIAppsecConfig(cli.cfg).NewCommand()) - cmd.AddCommand(NewCLIAppsecRule(cli.cfg).NewCommand()) + cmd.AddCommand(cliitem.NewCollection(cli.cfg).NewCommand()) + cmd.AddCommand(cliitem.NewParser(cli.cfg).NewCommand()) + cmd.AddCommand(cliitem.NewScenario(cli.cfg).NewCommand()) + cmd.AddCommand(cliitem.NewPostOverflow(cli.cfg).NewCommand()) + cmd.AddCommand(cliitem.NewContext(cli.cfg).NewCommand()) + cmd.AddCommand(cliitem.NewAppsecConfig(cli.cfg).NewCommand()) + cmd.AddCommand(cliitem.NewAppsecRule(cli.cfg).NewCommand()) if fflag.CscliSetup.IsEnabled() { cmd.AddCommand(clisetup.New(cli.cfg).NewCommand()) From ae5e99ff13853fe48c0b6b54a806581387a01d9f Mon Sep 17 00:00:00 2001 From: blotus Date: Tue, 3 Sep 2024 12:08:18 +0200 Subject: [PATCH 276/581] update grokky and deps (#3206) * update grokky and deps * mod tidy --------- Co-authored-by: marco --- go.mod | 10 +++++----- go.sum | 20 ++++++++++---------- 2 files changed, 15 insertions(+), 15 deletions(-) diff --git a/go.mod b/go.mod index 9377e116519..bce1ca12316 100644 --- a/go.mod +++ b/go.mod @@ -26,7 +26,7 @@ require ( github.com/crowdsecurity/coraza/v3 v3.0.0-20240108124027-a62b8d8e5607 github.com/crowdsecurity/dlog v0.0.0-20170105205344-4fb5f8204f26 github.com/crowdsecurity/go-cs-lib v0.0.15 - github.com/crowdsecurity/grokky v0.2.1 + github.com/crowdsecurity/grokky v0.2.2 github.com/crowdsecurity/machineid v1.0.2 github.com/davecgh/go-spew v1.1.1 github.com/dghubble/sling v1.4.2 @@ -80,11 +80,11 @@ require ( github.com/spf13/cobra v1.8.0 github.com/stretchr/testify v1.8.4 github.com/umahmood/haversine v0.0.0-20151105152445-808ab04add26 - github.com/wasilibs/go-re2 v1.3.0 + github.com/wasilibs/go-re2 v1.6.0 github.com/xhit/go-simple-mail/v2 v2.16.0 golang.org/x/crypto v0.22.0 golang.org/x/mod v0.15.0 - golang.org/x/sys v0.19.0 + golang.org/x/sys v0.24.0 golang.org/x/text v0.14.0 google.golang.org/grpc v1.56.3 google.golang.org/protobuf v1.33.0 @@ -152,7 +152,7 @@ require ( github.com/klauspost/cpuid/v2 v2.2.6 // indirect github.com/leodido/go-urn v1.3.0 // indirect github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 // indirect - github.com/magefile/mage v1.15.0 // indirect + github.com/magefile/mage v1.15.1-0.20230912152418-9f54e0f83e2a // indirect github.com/mailru/easyjson v0.7.7 // indirect github.com/mattn/go-colorable v0.1.13 // indirect github.com/mattn/go-runewidth v0.0.15 // indirect @@ -185,7 +185,7 @@ require ( github.com/shopspring/decimal v1.2.0 // indirect github.com/spf13/cast v1.3.1 // indirect github.com/spf13/pflag v1.0.5 // indirect - github.com/tetratelabs/wazero v1.2.1 // indirect + github.com/tetratelabs/wazero v1.8.0 // indirect github.com/tidwall/gjson v1.17.0 // indirect github.com/tidwall/match v1.1.1 // indirect github.com/tidwall/pretty v1.2.1 // indirect diff --git a/go.sum b/go.sum index b76c7fccd1a..eec85b5b2e9 100644 --- a/go.sum +++ b/go.sum @@ -105,8 +105,8 @@ github.com/crowdsecurity/dlog v0.0.0-20170105205344-4fb5f8204f26 h1:r97WNVC30Uen github.com/crowdsecurity/dlog v0.0.0-20170105205344-4fb5f8204f26/go.mod h1:zpv7r+7KXwgVUZnUNjyP22zc/D7LKjyoY02weH2RBbk= github.com/crowdsecurity/go-cs-lib v0.0.15 h1:zNWqOPVLHgKUstlr6clom9d66S0eIIW66jQG3Y7FEvo= github.com/crowdsecurity/go-cs-lib v0.0.15/go.mod h1:ePyQyJBxp1W/1bq4YpVAilnLSz7HkzmtI7TRhX187EU= -github.com/crowdsecurity/grokky v0.2.1 h1:t4VYnDlAd0RjDM2SlILalbwfCrQxtJSMGdQOR0zwkE4= -github.com/crowdsecurity/grokky v0.2.1/go.mod h1:33usDIYzGDsgX1kHAThCbseso6JuWNJXOzRQDGXHtWM= +github.com/crowdsecurity/grokky v0.2.2 h1:yALsI9zqpDArYzmSSxfBq2dhYuGUTKMJq8KOEIAsuo4= +github.com/crowdsecurity/grokky v0.2.2/go.mod h1:33usDIYzGDsgX1kHAThCbseso6JuWNJXOzRQDGXHtWM= github.com/crowdsecurity/machineid v1.0.2 h1:wpkpsUghJF8Khtmn/tg6GxgdhLA1Xflerh5lirI+bdc= github.com/crowdsecurity/machineid v1.0.2/go.mod h1:XWUSlnS0R0+u/JK5ulidwlbceNT3ZOCKteoVQEn6Luo= github.com/davecgh/go-spew v0.0.0-20161028175848-04cdfd42973b/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= @@ -465,8 +465,8 @@ github.com/lithammer/dedent v1.1.0 h1:VNzHMVCBNG1j0fh3OrsFRkVUwStdDArbgBWoPAffkt github.com/lithammer/dedent v1.1.0/go.mod h1:jrXYCQtgg0nJiN+StA2KgR7w6CiQNv9Fd/Z9BP0jIOc= github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 h1:6E+4a0GO5zZEnZ81pIr0yLvtUWk2if982qA3F3QD6H4= github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0/go.mod h1:zJYVVT2jmtg6P3p1VtQj7WsuWi/y4VnjVBn7F8KPB3I= -github.com/magefile/mage v1.15.0 h1:BvGheCMAsG3bWUDbZ8AyXXpCNwU9u5CB6sM+HNb9HYg= -github.com/magefile/mage v1.15.0/go.mod h1:z5UZb/iS3GoOSn0JgWuiw7dxlurVYTu+/jHXqQg881A= +github.com/magefile/mage v1.15.1-0.20230912152418-9f54e0f83e2a h1:tdPcGgyiH0K+SbsJBBm2oPyEIOTAvLBwD9TuUwVtZho= +github.com/magefile/mage v1.15.1-0.20230912152418-9f54e0f83e2a/go.mod h1:z5UZb/iS3GoOSn0JgWuiw7dxlurVYTu+/jHXqQg881A= github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20190312143242-1de009706dbe/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= @@ -661,8 +661,8 @@ github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o github.com/stretchr/testify v1.8.3/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= -github.com/tetratelabs/wazero v1.2.1 h1:J4X2hrGzJvt+wqltuvcSjHQ7ujQxA9gb6PeMs4qlUWs= -github.com/tetratelabs/wazero v1.2.1/go.mod h1:wYx2gNRg8/WihJfSDxA1TIL8H+GkfLYm+bIfbblu9VQ= +github.com/tetratelabs/wazero v1.8.0 h1:iEKu0d4c2Pd+QSRieYbnQC9yiFlMS9D+Jr0LsRmcF4g= +github.com/tetratelabs/wazero v1.8.0/go.mod h1:yAI0XTsMBhREkM/YDAK/zNou3GoiAce1P6+rp/wQhjs= github.com/tidwall/gjson v1.17.0 h1:/Jocvlh98kcTfpN2+JzGQWQcqrPQwDrVEMApx/M5ZwM= github.com/tidwall/gjson v1.17.0/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk= github.com/tidwall/match v1.1.1 h1:+Ho715JplO36QYgwN9PGYNhgZvoUSc9X2c80KVTi+GA= @@ -690,8 +690,8 @@ github.com/vmihailenco/msgpack v4.0.4+incompatible h1:dSLoQfGFAo3F6OoNhwUmLwVgaU github.com/vmihailenco/msgpack v4.0.4+incompatible/go.mod h1:fy3FlTQTDXWkZ7Bh6AcGMlsjHatGryHQYUTf1ShIgkk= github.com/vmihailenco/msgpack/v4 v4.3.12/go.mod h1:gborTTJjAo/GWTqqRjrLCn9pgNN+NXzzngzBKDPIqw4= github.com/vmihailenco/tagparser v0.1.1/go.mod h1:OeAg3pn3UbLjkWt+rN9oFYB6u/cQgqMEUPoW2WPyhdI= -github.com/wasilibs/go-re2 v1.3.0 h1:LFhBNzoStM3wMie6rN2slD1cuYH2CGiHpvNL3UtcsMw= -github.com/wasilibs/go-re2 v1.3.0/go.mod h1:AafrCXVvGRJJOImMajgJ2M7rVmWyisVK7sFshbxnVrg= +github.com/wasilibs/go-re2 v1.6.0 h1:CLlhDebt38wtl/zz4ww+hkXBMcxjrKFvTDXzFW2VOz8= +github.com/wasilibs/go-re2 v1.6.0/go.mod h1:prArCyErsypRBI/jFAFJEbzyHzjABKqkzlidF0SNA04= github.com/wasilibs/nottinygc v0.4.0 h1:h1TJMihMC4neN6Zq+WKpLxgd9xCFMw7O9ETLwY2exJQ= github.com/wasilibs/nottinygc v0.4.0/go.mod h1:oDcIotskuYNMpqMF23l7Z8uzD4TC0WXHK8jetlB3HIo= github.com/xdg-go/pbkdf2 v1.0.0 h1:Su7DPu48wXMwC3bs7MCNG+z4FhcyEuz5dlvchbq0B0c= @@ -841,8 +841,8 @@ golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.19.0 h1:q5f1RH2jigJ1MoAWp2KTp3gm5zAGFUTarQZ5U386+4o= -golang.org/x/sys v0.19.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.24.0 h1:Twjiwq9dn6R1fQcyiK+wQyHWfaz/BJB+YIpzU/Cv3Xg= +golang.org/x/sys v0.24.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= From 6797597c5d459ddaa75715f6dded326fe4d671ad Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Tue, 3 Sep 2024 12:08:44 +0200 Subject: [PATCH 277/581] update grokky and deps (backport) (#3211) * update grokky and deps * mod tidy --------- Co-authored-by: Sebastien Blot --- go.mod | 10 +++++----- go.sum | 20 ++++++++++---------- 2 files changed, 15 insertions(+), 15 deletions(-) diff --git a/go.mod b/go.mod index 9377e116519..bce1ca12316 100644 --- a/go.mod +++ b/go.mod @@ -26,7 +26,7 @@ require ( github.com/crowdsecurity/coraza/v3 v3.0.0-20240108124027-a62b8d8e5607 github.com/crowdsecurity/dlog v0.0.0-20170105205344-4fb5f8204f26 github.com/crowdsecurity/go-cs-lib v0.0.15 - github.com/crowdsecurity/grokky v0.2.1 + github.com/crowdsecurity/grokky v0.2.2 github.com/crowdsecurity/machineid v1.0.2 github.com/davecgh/go-spew v1.1.1 github.com/dghubble/sling v1.4.2 @@ -80,11 +80,11 @@ require ( github.com/spf13/cobra v1.8.0 github.com/stretchr/testify v1.8.4 github.com/umahmood/haversine v0.0.0-20151105152445-808ab04add26 - github.com/wasilibs/go-re2 v1.3.0 + github.com/wasilibs/go-re2 v1.6.0 github.com/xhit/go-simple-mail/v2 v2.16.0 golang.org/x/crypto v0.22.0 golang.org/x/mod v0.15.0 - golang.org/x/sys v0.19.0 + golang.org/x/sys v0.24.0 golang.org/x/text v0.14.0 google.golang.org/grpc v1.56.3 google.golang.org/protobuf v1.33.0 @@ -152,7 +152,7 @@ require ( github.com/klauspost/cpuid/v2 v2.2.6 // indirect github.com/leodido/go-urn v1.3.0 // indirect github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 // indirect - github.com/magefile/mage v1.15.0 // indirect + github.com/magefile/mage v1.15.1-0.20230912152418-9f54e0f83e2a // indirect github.com/mailru/easyjson v0.7.7 // indirect github.com/mattn/go-colorable v0.1.13 // indirect github.com/mattn/go-runewidth v0.0.15 // indirect @@ -185,7 +185,7 @@ require ( github.com/shopspring/decimal v1.2.0 // indirect github.com/spf13/cast v1.3.1 // indirect github.com/spf13/pflag v1.0.5 // indirect - github.com/tetratelabs/wazero v1.2.1 // indirect + github.com/tetratelabs/wazero v1.8.0 // indirect github.com/tidwall/gjson v1.17.0 // indirect github.com/tidwall/match v1.1.1 // indirect github.com/tidwall/pretty v1.2.1 // indirect diff --git a/go.sum b/go.sum index b76c7fccd1a..eec85b5b2e9 100644 --- a/go.sum +++ b/go.sum @@ -105,8 +105,8 @@ github.com/crowdsecurity/dlog v0.0.0-20170105205344-4fb5f8204f26 h1:r97WNVC30Uen github.com/crowdsecurity/dlog v0.0.0-20170105205344-4fb5f8204f26/go.mod h1:zpv7r+7KXwgVUZnUNjyP22zc/D7LKjyoY02weH2RBbk= github.com/crowdsecurity/go-cs-lib v0.0.15 h1:zNWqOPVLHgKUstlr6clom9d66S0eIIW66jQG3Y7FEvo= github.com/crowdsecurity/go-cs-lib v0.0.15/go.mod h1:ePyQyJBxp1W/1bq4YpVAilnLSz7HkzmtI7TRhX187EU= -github.com/crowdsecurity/grokky v0.2.1 h1:t4VYnDlAd0RjDM2SlILalbwfCrQxtJSMGdQOR0zwkE4= -github.com/crowdsecurity/grokky v0.2.1/go.mod h1:33usDIYzGDsgX1kHAThCbseso6JuWNJXOzRQDGXHtWM= +github.com/crowdsecurity/grokky v0.2.2 h1:yALsI9zqpDArYzmSSxfBq2dhYuGUTKMJq8KOEIAsuo4= +github.com/crowdsecurity/grokky v0.2.2/go.mod h1:33usDIYzGDsgX1kHAThCbseso6JuWNJXOzRQDGXHtWM= github.com/crowdsecurity/machineid v1.0.2 h1:wpkpsUghJF8Khtmn/tg6GxgdhLA1Xflerh5lirI+bdc= github.com/crowdsecurity/machineid v1.0.2/go.mod h1:XWUSlnS0R0+u/JK5ulidwlbceNT3ZOCKteoVQEn6Luo= github.com/davecgh/go-spew v0.0.0-20161028175848-04cdfd42973b/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= @@ -465,8 +465,8 @@ github.com/lithammer/dedent v1.1.0 h1:VNzHMVCBNG1j0fh3OrsFRkVUwStdDArbgBWoPAffkt github.com/lithammer/dedent v1.1.0/go.mod h1:jrXYCQtgg0nJiN+StA2KgR7w6CiQNv9Fd/Z9BP0jIOc= github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 h1:6E+4a0GO5zZEnZ81pIr0yLvtUWk2if982qA3F3QD6H4= github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0/go.mod h1:zJYVVT2jmtg6P3p1VtQj7WsuWi/y4VnjVBn7F8KPB3I= -github.com/magefile/mage v1.15.0 h1:BvGheCMAsG3bWUDbZ8AyXXpCNwU9u5CB6sM+HNb9HYg= -github.com/magefile/mage v1.15.0/go.mod h1:z5UZb/iS3GoOSn0JgWuiw7dxlurVYTu+/jHXqQg881A= +github.com/magefile/mage v1.15.1-0.20230912152418-9f54e0f83e2a h1:tdPcGgyiH0K+SbsJBBm2oPyEIOTAvLBwD9TuUwVtZho= +github.com/magefile/mage v1.15.1-0.20230912152418-9f54e0f83e2a/go.mod h1:z5UZb/iS3GoOSn0JgWuiw7dxlurVYTu+/jHXqQg881A= github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20190312143242-1de009706dbe/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= @@ -661,8 +661,8 @@ github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o github.com/stretchr/testify v1.8.3/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= -github.com/tetratelabs/wazero v1.2.1 h1:J4X2hrGzJvt+wqltuvcSjHQ7ujQxA9gb6PeMs4qlUWs= -github.com/tetratelabs/wazero v1.2.1/go.mod h1:wYx2gNRg8/WihJfSDxA1TIL8H+GkfLYm+bIfbblu9VQ= +github.com/tetratelabs/wazero v1.8.0 h1:iEKu0d4c2Pd+QSRieYbnQC9yiFlMS9D+Jr0LsRmcF4g= +github.com/tetratelabs/wazero v1.8.0/go.mod h1:yAI0XTsMBhREkM/YDAK/zNou3GoiAce1P6+rp/wQhjs= github.com/tidwall/gjson v1.17.0 h1:/Jocvlh98kcTfpN2+JzGQWQcqrPQwDrVEMApx/M5ZwM= github.com/tidwall/gjson v1.17.0/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk= github.com/tidwall/match v1.1.1 h1:+Ho715JplO36QYgwN9PGYNhgZvoUSc9X2c80KVTi+GA= @@ -690,8 +690,8 @@ github.com/vmihailenco/msgpack v4.0.4+incompatible h1:dSLoQfGFAo3F6OoNhwUmLwVgaU github.com/vmihailenco/msgpack v4.0.4+incompatible/go.mod h1:fy3FlTQTDXWkZ7Bh6AcGMlsjHatGryHQYUTf1ShIgkk= github.com/vmihailenco/msgpack/v4 v4.3.12/go.mod h1:gborTTJjAo/GWTqqRjrLCn9pgNN+NXzzngzBKDPIqw4= github.com/vmihailenco/tagparser v0.1.1/go.mod h1:OeAg3pn3UbLjkWt+rN9oFYB6u/cQgqMEUPoW2WPyhdI= -github.com/wasilibs/go-re2 v1.3.0 h1:LFhBNzoStM3wMie6rN2slD1cuYH2CGiHpvNL3UtcsMw= -github.com/wasilibs/go-re2 v1.3.0/go.mod h1:AafrCXVvGRJJOImMajgJ2M7rVmWyisVK7sFshbxnVrg= +github.com/wasilibs/go-re2 v1.6.0 h1:CLlhDebt38wtl/zz4ww+hkXBMcxjrKFvTDXzFW2VOz8= +github.com/wasilibs/go-re2 v1.6.0/go.mod h1:prArCyErsypRBI/jFAFJEbzyHzjABKqkzlidF0SNA04= github.com/wasilibs/nottinygc v0.4.0 h1:h1TJMihMC4neN6Zq+WKpLxgd9xCFMw7O9ETLwY2exJQ= github.com/wasilibs/nottinygc v0.4.0/go.mod h1:oDcIotskuYNMpqMF23l7Z8uzD4TC0WXHK8jetlB3HIo= github.com/xdg-go/pbkdf2 v1.0.0 h1:Su7DPu48wXMwC3bs7MCNG+z4FhcyEuz5dlvchbq0B0c= @@ -841,8 +841,8 @@ golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.19.0 h1:q5f1RH2jigJ1MoAWp2KTp3gm5zAGFUTarQZ5U386+4o= -golang.org/x/sys v0.19.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.24.0 h1:Twjiwq9dn6R1fQcyiK+wQyHWfaz/BJB+YIpzU/Cv3Xg= +golang.org/x/sys v0.24.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= From 5a50fd06bb9d7b6ac34dc8db5dee7662285c48af Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Tue, 3 Sep 2024 12:25:30 +0200 Subject: [PATCH 278/581] refact: reduce code nesting (acquisition/file, tests) (#3200) * reduce if nesting * lint: gocritic (nestingReduce) * lint --- .golangci.yml | 2 +- cmd/crowdsec-cli/cliconsole/console.go | 32 ++-- pkg/acquisition/modules/file/file.go | 175 +++++++++--------- .../internal/parser/rfc3164/parse_test.go | 162 ++++++---------- pkg/exprhelpers/debugger_test.go | 36 ++-- pkg/leakybucket/manager_load.go | 60 +++--- 6 files changed, 218 insertions(+), 249 deletions(-) diff --git a/.golangci.yml b/.golangci.yml index c59ab372799..78b666d25b4 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -27,7 +27,7 @@ linters-settings: nestif: # lower this after refactoring - min-complexity: 20 + min-complexity: 19 nlreturn: block-size: 5 diff --git a/cmd/crowdsec-cli/cliconsole/console.go b/cmd/crowdsec-cli/cliconsole/console.go index 995a082c514..d15f25eaf69 100644 --- a/cmd/crowdsec-cli/cliconsole/console.go +++ b/cmd/crowdsec-cli/cliconsole/console.go @@ -31,12 +31,12 @@ import ( type configGetter func() *csconfig.Config type cliConsole struct { - cfg configGetter + cfg configGetter } func New(cfg configGetter) *cliConsole { return &cliConsole{ - cfg: cfg, + cfg: cfg, } } @@ -88,23 +88,25 @@ func (cli *cliConsole) enroll(key string, name string, overwrite bool, tags []st } for _, availableOpt := range csconfig.CONSOLE_CONFIGS { - if opt == availableOpt { - valid = true - enable := true - - for _, enabledOpt := range enableOpts { - if opt == enabledOpt { - enable = false - continue - } - } + if opt != availableOpt { + continue + } + + valid = true + enable := true - if enable { - enableOpts = append(enableOpts, opt) + for _, enabledOpt := range enableOpts { + if opt == enabledOpt { + enable = false + continue } + } - break + if enable { + enableOpts = append(enableOpts, opt) } + + break } if !valid { diff --git a/pkg/acquisition/modules/file/file.go b/pkg/acquisition/modules/file/file.go index 34a7052f46f..4f7880baa89 100644 --- a/pkg/acquisition/modules/file/file.go +++ b/pkg/acquisition/modules/file/file.go @@ -426,118 +426,122 @@ func (f *FileSource) monitorNewFiles(out chan types.Event, t *tomb.Tomb) error { return nil } - if event.Op&fsnotify.Create == fsnotify.Create { - fi, err := os.Stat(event.Name) - if err != nil { - logger.Errorf("Could not stat() new file %s, ignoring it : %s", event.Name, err) - continue - } + if event.Op&fsnotify.Create != fsnotify.Create { + continue + } - if fi.IsDir() { - continue - } + fi, err := os.Stat(event.Name) + if err != nil { + logger.Errorf("Could not stat() new file %s, ignoring it : %s", event.Name, err) + continue + } - logger.Debugf("Detected new file %s", event.Name) + if fi.IsDir() { + continue + } - matched := false + logger.Debugf("Detected new file %s", event.Name) - for _, pattern := range f.config.Filenames { - logger.Debugf("Matching %s with %s", pattern, event.Name) + matched := false - matched, err = filepath.Match(pattern, event.Name) - if err != nil { - logger.Errorf("Could not match pattern : %s", err) - continue - } + for _, pattern := range f.config.Filenames { + logger.Debugf("Matching %s with %s", pattern, event.Name) - if matched { - logger.Debugf("Matched %s with %s", pattern, event.Name) - break - } + matched, err = filepath.Match(pattern, event.Name) + if err != nil { + logger.Errorf("Could not match pattern : %s", err) + continue } - if !matched { - continue + if matched { + logger.Debugf("Matched %s with %s", pattern, event.Name) + break } + } - // before opening the file, check if we need to specifically avoid it. (XXX) - skip := false + if !matched { + continue + } - for _, pattern := range f.exclude_regexps { - if pattern.MatchString(event.Name) { - f.logger.Infof("file %s matches exclusion pattern %s, skipping", event.Name, pattern.String()) + // before opening the file, check if we need to specifically avoid it. (XXX) + skip := false - skip = true + for _, pattern := range f.exclude_regexps { + if pattern.MatchString(event.Name) { + f.logger.Infof("file %s matches exclusion pattern %s, skipping", event.Name, pattern.String()) - break - } - } + skip = true - if skip { - continue + break } + } - f.tailMapMutex.RLock() - if f.tails[event.Name] { - f.tailMapMutex.RUnlock() - // we already have a tail on it, do not start a new one - logger.Debugf("Already tailing file %s, not creating a new tail", event.Name) + if skip { + continue + } - break - } + f.tailMapMutex.RLock() + if f.tails[event.Name] { f.tailMapMutex.RUnlock() - // cf. https://github.com/crowdsecurity/crowdsec/issues/1168 - // do not rely on stat, reclose file immediately as it's opened by Tail - fd, err := os.Open(event.Name) - if err != nil { - f.logger.Errorf("unable to read %s : %s", event.Name, err) - continue - } - if err := fd.Close(); err != nil { - f.logger.Errorf("unable to close %s : %s", event.Name, err) - continue - } + // we already have a tail on it, do not start a new one + logger.Debugf("Already tailing file %s, not creating a new tail", event.Name) - pollFile := false - if f.config.PollWithoutInotify != nil { - pollFile = *f.config.PollWithoutInotify - } else { - networkFS, fsType, err := types.IsNetworkFS(event.Name) - if err != nil { - f.logger.Warningf("Could not get fs type for %s : %s", event.Name, err) - } - f.logger.Debugf("fs for %s is network: %t (%s)", event.Name, networkFS, fsType) - if networkFS { - pollFile = true - } - } + break + } + f.tailMapMutex.RUnlock() + // cf. https://github.com/crowdsecurity/crowdsec/issues/1168 + // do not rely on stat, reclose file immediately as it's opened by Tail + fd, err := os.Open(event.Name) + if err != nil { + f.logger.Errorf("unable to read %s : %s", event.Name, err) + continue + } - filink, err := os.Lstat(event.Name) + if err = fd.Close(); err != nil { + f.logger.Errorf("unable to close %s : %s", event.Name, err) + continue + } + pollFile := false + if f.config.PollWithoutInotify != nil { + pollFile = *f.config.PollWithoutInotify + } else { + networkFS, fsType, err := types.IsNetworkFS(event.Name) if err != nil { - logger.Errorf("Could not lstat() new file %s, ignoring it : %s", event.Name, err) - continue + f.logger.Warningf("Could not get fs type for %s : %s", event.Name, err) } - if filink.Mode()&os.ModeSymlink == os.ModeSymlink && !pollFile { - logger.Warnf("File %s is a symlink, but inotify polling is enabled. Crowdsec will not be able to detect rotation. Consider setting poll_without_inotify to true in your configuration", event.Name) - } + f.logger.Debugf("fs for %s is network: %t (%s)", event.Name, networkFS, fsType) - //Slightly different parameters for Location, as we want to read the first lines of the newly created file - tail, err := tail.TailFile(event.Name, tail.Config{ReOpen: true, Follow: true, Poll: pollFile, Location: &tail.SeekInfo{Offset: 0, Whence: io.SeekStart}}) - if err != nil { - logger.Errorf("Could not start tailing file %s : %s", event.Name, err) - break + if networkFS { + pollFile = true } + } + + filink, err := os.Lstat(event.Name) + if err != nil { + logger.Errorf("Could not lstat() new file %s, ignoring it : %s", event.Name, err) + continue + } - f.tailMapMutex.Lock() - f.tails[event.Name] = true - f.tailMapMutex.Unlock() - t.Go(func() error { - defer trace.CatchPanic("crowdsec/acquis/tailfile") - return f.tailFile(out, t, tail) - }) + if filink.Mode()&os.ModeSymlink == os.ModeSymlink && !pollFile { + logger.Warnf("File %s is a symlink, but inotify polling is enabled. Crowdsec will not be able to detect rotation. Consider setting poll_without_inotify to true in your configuration", event.Name) } + + // Slightly different parameters for Location, as we want to read the first lines of the newly created file + tail, err := tail.TailFile(event.Name, tail.Config{ReOpen: true, Follow: true, Poll: pollFile, Location: &tail.SeekInfo{Offset: 0, Whence: io.SeekStart}}) + if err != nil { + logger.Errorf("Could not start tailing file %s : %s", event.Name, err) + break + } + + f.tailMapMutex.Lock() + f.tails[event.Name] = true + f.tailMapMutex.Unlock() + t.Go(func() error { + defer trace.CatchPanic("crowdsec/acquis/tailfile") + return f.tailFile(out, t, tail) + }) case err, ok := <-f.watcher.Errors: if !ok { return nil @@ -571,8 +575,9 @@ func (f *FileSource) tailFile(out chan types.Event, t *tomb.Tomb, tail *tail.Tai return nil case <-tail.Dying(): // our tailer is dying - err := tail.Err() errMsg := fmt.Sprintf("file reader of %s died", tail.Filename) + + err := tail.Err() if err != nil { errMsg = fmt.Sprintf(errMsg+" : %s", err) } diff --git a/pkg/acquisition/modules/syslog/internal/parser/rfc3164/parse_test.go b/pkg/acquisition/modules/syslog/internal/parser/rfc3164/parse_test.go index 8fb5089a61f..3af6614bce6 100644 --- a/pkg/acquisition/modules/syslog/internal/parser/rfc3164/parse_test.go +++ b/pkg/acquisition/modules/syslog/internal/parser/rfc3164/parse_test.go @@ -4,6 +4,10 @@ import ( "fmt" "testing" "time" + + "github.com/stretchr/testify/assert" + + "github.com/crowdsecurity/go-cs-lib/cstest" ) func TestPri(t *testing.T) { @@ -26,28 +30,20 @@ func TestPri(t *testing.T) { r := &RFC3164{} r.buf = []byte(test.input) r.len = len(r.buf) + err := r.parsePRI() - if err != nil { - if test.expectedErr != "" { - if err.Error() != test.expectedErr { - t.Errorf("expected error %s, got %s", test.expectedErr, err) - } - } else { - t.Errorf("unexpected error: %s", err) - } - } else { - if test.expectedErr != "" { - t.Errorf("expected error %s, got no error", test.expectedErr) - } else if r.PRI != test.expected { - t.Errorf("expected %d, got %d", test.expected, r.PRI) - } + cstest.RequireErrorContains(t, err, test.expectedErr) + + if test.expectedErr != "" { + return } + + assert.Equal(t, test.expected, r.PRI) }) } } func TestTimestamp(t *testing.T) { - tests := []struct { input string expected string @@ -68,25 +64,19 @@ func TestTimestamp(t *testing.T) { if test.currentYear { opts = append(opts, WithCurrentYear()) } + r := NewRFC3164Parser(opts...) r.buf = []byte(test.input) r.len = len(r.buf) + err := r.parseTimestamp() - if err != nil { - if test.expectedErr != "" { - if err.Error() != test.expectedErr { - t.Errorf("expected error %s, got %s", test.expectedErr, err) - } - } else { - t.Errorf("unexpected error: %s", err) - } - } else { - if test.expectedErr != "" { - t.Errorf("expected error %s, got no error", test.expectedErr) - } else if r.Timestamp.Format(time.RFC3339) != test.expected { - t.Errorf("expected %s, got %s", test.expected, r.Timestamp.Format(time.RFC3339)) - } + cstest.RequireErrorContains(t, err, test.expectedErr) + + if test.expectedErr != "" { + return } + + assert.Equal(t, test.expected, r.Timestamp.Format(time.RFC3339)) }) } } @@ -121,25 +111,19 @@ func TestHostname(t *testing.T) { if test.strictHostname { opts = append(opts, WithStrictHostname()) } + r := NewRFC3164Parser(opts...) r.buf = []byte(test.input) r.len = len(r.buf) + err := r.parseHostname() - if err != nil { - if test.expectedErr != "" { - if err.Error() != test.expectedErr { - t.Errorf("expected error %s, got %s", test.expectedErr, err) - } - } else { - t.Errorf("unexpected error: %s", err) - } - } else { - if test.expectedErr != "" { - t.Errorf("expected error %s, got no error", test.expectedErr) - } else if r.Hostname != test.expected { - t.Errorf("expected %s, got %s", test.expected, r.Hostname) - } + cstest.RequireErrorContains(t, err, test.expectedErr) + + if test.expectedErr != "" { + return } + + assert.Equal(t, test.expected, r.Hostname) }) } } @@ -164,27 +148,16 @@ func TestTag(t *testing.T) { r := &RFC3164{} r.buf = []byte(test.input) r.len = len(r.buf) + err := r.parseTag() - if err != nil { - if test.expectedErr != "" { - if err.Error() != test.expectedErr { - t.Errorf("expected error %s, got %s", test.expectedErr, err) - } - } else { - t.Errorf("unexpected error: %s", err) - } - } else { - if test.expectedErr != "" { - t.Errorf("expected error %s, got no error", test.expectedErr) - } else { - if r.Tag != test.expected { - t.Errorf("expected %s, got %s", test.expected, r.Tag) - } - if r.PID != test.expectedPID { - t.Errorf("expected %s, got %s", test.expected, r.Message) - } - } + cstest.RequireErrorContains(t, err, test.expectedErr) + + if test.expectedErr != "" { + return } + + assert.Equal(t, test.expected, r.Tag) + assert.Equal(t, test.expectedPID, r.PID) }) } } @@ -207,22 +180,15 @@ func TestMessage(t *testing.T) { r := &RFC3164{} r.buf = []byte(test.input) r.len = len(r.buf) + err := r.parseMessage() - if err != nil { - if test.expectedErr != "" { - if err.Error() != test.expectedErr { - t.Errorf("expected error %s, got %s", test.expectedErr, err) - } - } else { - t.Errorf("unexpected error: %s", err) - } - } else { - if test.expectedErr != "" { - t.Errorf("expected error %s, got no error", test.expectedErr) - } else if r.Message != test.expected { - t.Errorf("expected message %s, got %s", test.expected, r.Tag) - } + cstest.RequireErrorContains(t, err, test.expectedErr) + + if test.expectedErr != "" { + return } + + assert.Equal(t, test.expected, r.Message) }) } } @@ -236,6 +202,7 @@ func TestParse(t *testing.T) { Message string PRI int } + tests := []struct { input string expected expected @@ -326,39 +293,20 @@ func TestParse(t *testing.T) { for _, test := range tests { t.Run(test.input, func(t *testing.T) { r := NewRFC3164Parser(test.opts...) + err := r.Parse([]byte(test.input)) - if err != nil { - if test.expectedErr != "" { - if err.Error() != test.expectedErr { - t.Errorf("expected error '%s', got '%s'", test.expectedErr, err) - } - } else { - t.Errorf("unexpected error: '%s'", err) - } - } else { - if test.expectedErr != "" { - t.Errorf("expected error '%s', got no error", test.expectedErr) - } else { - if r.Timestamp != test.expected.Timestamp { - t.Errorf("expected timestamp '%s', got '%s'", test.expected.Timestamp, r.Timestamp) - } - if r.Hostname != test.expected.Hostname { - t.Errorf("expected hostname '%s', got '%s'", test.expected.Hostname, r.Hostname) - } - if r.Tag != test.expected.Tag { - t.Errorf("expected tag '%s', got '%s'", test.expected.Tag, r.Tag) - } - if r.PID != test.expected.PID { - t.Errorf("expected pid '%s', got '%s'", test.expected.PID, r.PID) - } - if r.Message != test.expected.Message { - t.Errorf("expected message '%s', got '%s'", test.expected.Message, r.Message) - } - if r.PRI != test.expected.PRI { - t.Errorf("expected pri '%d', got '%d'", test.expected.PRI, r.PRI) - } - } + cstest.RequireErrorContains(t, err, test.expectedErr) + + if test.expectedErr != "" { + return } + + assert.Equal(t, test.expected.Timestamp, r.Timestamp) + assert.Equal(t, test.expected.Hostname, r.Hostname) + assert.Equal(t, test.expected.Tag, r.Tag) + assert.Equal(t, test.expected.PID, r.PID) + assert.Equal(t, test.expected.Message, r.Message) + assert.Equal(t, test.expected.PRI, r.PRI) }) } } diff --git a/pkg/exprhelpers/debugger_test.go b/pkg/exprhelpers/debugger_test.go index efdcbc1a769..32144454084 100644 --- a/pkg/exprhelpers/debugger_test.go +++ b/pkg/exprhelpers/debugger_test.go @@ -26,6 +26,7 @@ type ExprDbgTest struct { func UpperTwo(params ...any) (any, error) { s := params[0].(string) v := params[1].(string) + return strings.ToUpper(s) + strings.ToUpper(v), nil } @@ -33,6 +34,7 @@ func UpperThree(params ...any) (any, error) { s := params[0].(string) v := params[1].(string) x := params[2].(string) + return strings.ToUpper(s) + strings.ToUpper(v) + strings.ToUpper(x), nil } @@ -41,6 +43,7 @@ func UpperN(params ...any) (any, error) { v := params[1].(string) x := params[2].(string) y := params[3].(string) + return strings.ToUpper(s) + strings.ToUpper(v) + strings.ToUpper(x) + strings.ToUpper(y), nil } @@ -76,9 +79,9 @@ func TestBaseDbg(t *testing.T) { // use '%#v' to dump in golang syntax // use regexp to clear empty/default fields: // [a-z]+: (false|\[\]string\(nil\)|""), - //ConditionResult:(*bool) + // ConditionResult:(*bool) - //Missing multi parametes function + // Missing multi parametes function tests := []ExprDbgTest{ { Name: "nil deref", @@ -272,6 +275,7 @@ func TestBaseDbg(t *testing.T) { } logger := log.WithField("test", "exprhelpers") + for _, test := range tests { if test.LogLevel != 0 { log.SetLevel(test.LogLevel) @@ -308,10 +312,13 @@ func TestBaseDbg(t *testing.T) { t.Fatalf("test %s : unexpected compile error : %s", test.Name, err) } } + if test.Name == "nil deref" { test.Env["nilvar"] = nil } + outdbg, ret, err := RunWithDebug(prog, test.Env, logger) + if test.ExpectedFailRuntime { if err == nil { t.Fatalf("test %s : expected runtime error", test.Name) @@ -321,25 +328,30 @@ func TestBaseDbg(t *testing.T) { t.Fatalf("test %s : unexpected runtime error : %s", test.Name, err) } } + log.SetLevel(log.DebugLevel) DisplayExprDebug(prog, outdbg, logger, ret) + if len(outdbg) != len(test.ExpectedOutputs) { t.Errorf("failed test %s", test.Name) t.Errorf("%#v", outdbg) - //out, _ := yaml.Marshal(outdbg) - //fmt.Printf("%s", string(out)) + // out, _ := yaml.Marshal(outdbg) + // fmt.Printf("%s", string(out)) t.Fatalf("test %s : expected %d outputs, got %d", test.Name, len(test.ExpectedOutputs), len(outdbg)) - } + for i, out := range outdbg { - if !reflect.DeepEqual(out, test.ExpectedOutputs[i]) { - spew.Config.DisableMethods = true - t.Errorf("failed test %s", test.Name) - t.Errorf("expected : %#v", test.ExpectedOutputs[i]) - t.Errorf("got : %#v", out) - t.Fatalf("%d/%d : mismatch", i, len(outdbg)) + if reflect.DeepEqual(out, test.ExpectedOutputs[i]) { + // DisplayExprDebug(prog, outdbg, logger, ret) + continue } - //DisplayExprDebug(prog, outdbg, logger, ret) + + spew.Config.DisableMethods = true + + t.Errorf("failed test %s", test.Name) + t.Errorf("expected : %#v", test.ExpectedOutputs[i]) + t.Errorf("got : %#v", out) + t.Fatalf("%d/%d : mismatch", i, len(outdbg)) } } } diff --git a/pkg/leakybucket/manager_load.go b/pkg/leakybucket/manager_load.go index 1d523759f2b..6055a5308b5 100644 --- a/pkg/leakybucket/manager_load.go +++ b/pkg/leakybucket/manager_load.go @@ -509,37 +509,39 @@ func LoadBucketsState(file string, buckets *Buckets, bucketFactories []BucketFac found := false for _, h := range bucketFactories { - if h.Name == v.Name { - log.Debugf("found factory %s/%s -> %s", h.Author, h.Name, h.Description) - // check in which mode the bucket was - if v.Mode == types.TIMEMACHINE { - tbucket = NewTimeMachine(h) - } else if v.Mode == types.LIVE { - tbucket = NewLeaky(h) - } else { - log.Errorf("Unknown bucket type : %d", v.Mode) - } - /*Trying to restore queue state*/ - tbucket.Queue = v.Queue - /*Trying to set the limiter to the saved values*/ - tbucket.Limiter.Load(v.SerializedState) - tbucket.In = make(chan *types.Event) - tbucket.Mapkey = k - tbucket.Signal = make(chan bool, 1) - tbucket.First_ts = v.First_ts - tbucket.Last_ts = v.Last_ts - tbucket.Ovflw_ts = v.Ovflw_ts - tbucket.Total_count = v.Total_count - buckets.Bucket_map.Store(k, tbucket) - h.tomb.Go(func() error { - return LeakRoutine(tbucket) - }) - <-tbucket.Signal - - found = true + if h.Name != v.Name { + continue + } - break + log.Debugf("found factory %s/%s -> %s", h.Author, h.Name, h.Description) + // check in which mode the bucket was + if v.Mode == types.TIMEMACHINE { + tbucket = NewTimeMachine(h) + } else if v.Mode == types.LIVE { + tbucket = NewLeaky(h) + } else { + log.Errorf("Unknown bucket type : %d", v.Mode) } + /*Trying to restore queue state*/ + tbucket.Queue = v.Queue + /*Trying to set the limiter to the saved values*/ + tbucket.Limiter.Load(v.SerializedState) + tbucket.In = make(chan *types.Event) + tbucket.Mapkey = k + tbucket.Signal = make(chan bool, 1) + tbucket.First_ts = v.First_ts + tbucket.Last_ts = v.Last_ts + tbucket.Ovflw_ts = v.Ovflw_ts + tbucket.Total_count = v.Total_count + buckets.Bucket_map.Store(k, tbucket) + h.tomb.Go(func() error { + return LeakRoutine(tbucket) + }) + <-tbucket.Signal + + found = true + + break } if !found { From bc6be99b973f34cb5948caa09e4ac329f048cbca Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Tue, 3 Sep 2024 12:37:38 +0200 Subject: [PATCH 279/581] cscli refact: package clialert, clidecision (#3203) * cscli refact: package clialert, clidecision * refact: function SanitizeScope() * lint --- cmd/crowdsec-cli/{ => clialert}/alerts.go | 17 +++++--- cmd/crowdsec-cli/clialert/sanitize.go | 26 ++++++++++++ .../{alerts_table.go => clialert/table.go} | 2 +- .../{ => clidecision}/decisions.go | 24 +++++++---- .../{ => clidecision}/decisions_import.go | 6 +-- .../{ => clidecision}/decisions_table.go | 2 +- cmd/crowdsec-cli/main.go | 6 ++- cmd/crowdsec-cli/utils.go | 40 ------------------- pkg/apiserver/controllers/v1/alerts.go | 22 ++-------- pkg/types/event.go | 38 ++++++++++++++---- test/bats/90_decisions.bats | 4 +- 11 files changed, 98 insertions(+), 89 deletions(-) rename cmd/crowdsec-cli/{ => clialert}/alerts.go (97%) create mode 100644 cmd/crowdsec-cli/clialert/sanitize.go rename cmd/crowdsec-cli/{alerts_table.go => clialert/table.go} (99%) rename cmd/crowdsec-cli/{ => clidecision}/decisions.go (96%) rename cmd/crowdsec-cli/{ => clidecision}/decisions_import.go (99%) rename cmd/crowdsec-cli/{ => clidecision}/decisions_table.go (98%) delete mode 100644 cmd/crowdsec-cli/utils.go diff --git a/cmd/crowdsec-cli/alerts.go b/cmd/crowdsec-cli/clialert/alerts.go similarity index 97% rename from cmd/crowdsec-cli/alerts.go rename to cmd/crowdsec-cli/clialert/alerts.go index 37f9ab435c7..13013153a79 100644 --- a/cmd/crowdsec-cli/alerts.go +++ b/cmd/crowdsec-cli/clialert/alerts.go @@ -1,4 +1,4 @@ -package main +package clialert import ( "context" @@ -24,6 +24,7 @@ import ( "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/cstable" "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/require" "github.com/crowdsecurity/crowdsec/pkg/apiclient" + "github.com/crowdsecurity/crowdsec/pkg/csconfig" "github.com/crowdsecurity/crowdsec/pkg/cwversion" "github.com/crowdsecurity/crowdsec/pkg/models" "github.com/crowdsecurity/crowdsec/pkg/types" @@ -183,12 +184,14 @@ func (cli *cliAlerts) displayOneAlert(alert *models.Alert, withDetail bool) erro return nil } +type configGetter func() *csconfig.Config + type cliAlerts struct { client *apiclient.ApiClient cfg configGetter } -func NewCLIAlerts(getconfig configGetter) *cliAlerts { +func New(getconfig configGetter) *cliAlerts { return &cliAlerts{ cfg: getconfig, } @@ -235,8 +238,10 @@ func (cli *cliAlerts) NewCommand() *cobra.Command { } func (cli *cliAlerts) list(alertListFilter apiclient.AlertsListOpts, limit *int, contained *bool, printMachine bool) error { - if err := manageCliDecisionAlerts(alertListFilter.IPEquals, alertListFilter.RangeEquals, - alertListFilter.ScopeEquals, alertListFilter.ValueEquals); err != nil { + var err error + + *alertListFilter.ScopeEquals, err = SanitizeScope(*alertListFilter.ScopeEquals, *alertListFilter.IPEquals, *alertListFilter.RangeEquals) + if err != nil { return err } @@ -378,8 +383,8 @@ func (cli *cliAlerts) delete(alertDeleteFilter apiclient.AlertsDeleteOpts, Activ var err error if !AlertDeleteAll { - if err = manageCliDecisionAlerts(alertDeleteFilter.IPEquals, alertDeleteFilter.RangeEquals, - alertDeleteFilter.ScopeEquals, alertDeleteFilter.ValueEquals); err != nil { + *alertDeleteFilter.ScopeEquals, err = SanitizeScope(*alertDeleteFilter.ScopeEquals, *alertDeleteFilter.IPEquals, *alertDeleteFilter.RangeEquals) + if err != nil { return err } diff --git a/cmd/crowdsec-cli/clialert/sanitize.go b/cmd/crowdsec-cli/clialert/sanitize.go new file mode 100644 index 00000000000..87b110649da --- /dev/null +++ b/cmd/crowdsec-cli/clialert/sanitize.go @@ -0,0 +1,26 @@ +package clialert + +import ( + "fmt" + "net" + + "github.com/crowdsecurity/crowdsec/pkg/types" +) + +// SanitizeScope validates ip and range and sets the scope accordingly to our case convention. +func SanitizeScope(scope, ip, ipRange string) (string, error) { + if ipRange != "" { + _, _, err := net.ParseCIDR(ipRange) + if err != nil { + return "", fmt.Errorf("%s is not a valid range", ipRange) + } + } + + if ip != "" { + if net.ParseIP(ip) == nil { + return "", fmt.Errorf("%s is not a valid ip", ip) + } + } + + return types.NormalizeScope(scope), nil +} diff --git a/cmd/crowdsec-cli/alerts_table.go b/cmd/crowdsec-cli/clialert/table.go similarity index 99% rename from cmd/crowdsec-cli/alerts_table.go rename to cmd/crowdsec-cli/clialert/table.go index 29383457ced..5dec63ec152 100644 --- a/cmd/crowdsec-cli/alerts_table.go +++ b/cmd/crowdsec-cli/clialert/table.go @@ -1,4 +1,4 @@ -package main +package clialert import ( "fmt" diff --git a/cmd/crowdsec-cli/decisions.go b/cmd/crowdsec-cli/clidecision/decisions.go similarity index 96% rename from cmd/crowdsec-cli/decisions.go rename to cmd/crowdsec-cli/clidecision/decisions.go index d485c90254f..5ecb3fc3304 100644 --- a/cmd/crowdsec-cli/decisions.go +++ b/cmd/crowdsec-cli/clidecision/decisions.go @@ -1,4 +1,4 @@ -package main +package clidecision import ( "context" @@ -17,7 +17,9 @@ import ( log "github.com/sirupsen/logrus" "github.com/spf13/cobra" + "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/clialert" "github.com/crowdsecurity/crowdsec/pkg/apiclient" + "github.com/crowdsecurity/crowdsec/pkg/csconfig" "github.com/crowdsecurity/crowdsec/pkg/cwversion" "github.com/crowdsecurity/crowdsec/pkg/models" "github.com/crowdsecurity/crowdsec/pkg/types" @@ -114,12 +116,14 @@ func (cli *cliDecisions) decisionsToTable(alerts *models.GetAlertsResponse, prin return nil } +type configGetter func() *csconfig.Config + type cliDecisions struct { client *apiclient.ApiClient cfg configGetter } -func NewCLIDecisions(cfg configGetter) *cliDecisions { +func New(cfg configGetter) *cliDecisions { return &cliDecisions{ cfg: cfg, } @@ -170,8 +174,9 @@ func (cli *cliDecisions) NewCommand() *cobra.Command { func (cli *cliDecisions) list(filter apiclient.AlertsListOpts, NoSimu *bool, contained *bool, printMachine bool) error { var err error - /*take care of shorthand options*/ - if err = manageCliDecisionAlerts(filter.IPEquals, filter.RangeEquals, filter.ScopeEquals, filter.ValueEquals); err != nil { + + *filter.ScopeEquals, err = clialert.SanitizeScope(*filter.ScopeEquals, *filter.IPEquals, *filter.RangeEquals) + if err != nil { return err } @@ -326,8 +331,10 @@ func (cli *cliDecisions) add(addIP, addRange, addDuration, addValue, addScope, a stopAt := time.Now().UTC().Format(time.RFC3339) createdAt := time.Now().UTC().Format(time.RFC3339) - /*take care of shorthand options*/ - if err := manageCliDecisionAlerts(&addIP, &addRange, &addScope, &addValue); err != nil { + var err error + + addScope, err = clialert.SanitizeScope(addScope, addIP, addRange) + if err != nil { return err } @@ -381,7 +388,7 @@ func (cli *cliDecisions) add(addIP, addRange, addDuration, addValue, addScope, a } alerts = append(alerts, &alert) - _, _, err := cli.client.Alerts.Add(context.Background(), alerts) + _, _, err = cli.client.Alerts.Add(context.Background(), alerts) if err != nil { return err } @@ -435,7 +442,8 @@ func (cli *cliDecisions) delete(delFilter apiclient.DecisionsDeleteOpts, delDeci var err error /*take care of shorthand options*/ - if err = manageCliDecisionAlerts(delFilter.IPEquals, delFilter.RangeEquals, delFilter.ScopeEquals, delFilter.ValueEquals); err != nil { + *delFilter.ScopeEquals, err = clialert.SanitizeScope(*delFilter.ScopeEquals, *delFilter.IPEquals, *delFilter.RangeEquals) + if err != nil { return err } diff --git a/cmd/crowdsec-cli/decisions_import.go b/cmd/crowdsec-cli/clidecision/decisions_import.go similarity index 99% rename from cmd/crowdsec-cli/decisions_import.go rename to cmd/crowdsec-cli/clidecision/decisions_import.go index 338c1b7fb3e..10d92f88876 100644 --- a/cmd/crowdsec-cli/decisions_import.go +++ b/cmd/crowdsec-cli/clidecision/decisions_import.go @@ -1,4 +1,4 @@ -package main +package clidecision import ( "bufio" @@ -122,8 +122,8 @@ func (cli *cliDecisions) runImport(cmd *cobra.Command, args []string) error { } var ( - content []byte - fin *os.File + content []byte + fin *os.File ) // set format if the file has a json or csv extension diff --git a/cmd/crowdsec-cli/decisions_table.go b/cmd/crowdsec-cli/clidecision/decisions_table.go similarity index 98% rename from cmd/crowdsec-cli/decisions_table.go rename to cmd/crowdsec-cli/clidecision/decisions_table.go index 02952f93b85..90a0ae1176b 100644 --- a/cmd/crowdsec-cli/decisions_table.go +++ b/cmd/crowdsec-cli/clidecision/decisions_table.go @@ -1,4 +1,4 @@ -package main +package clidecision import ( "fmt" diff --git a/cmd/crowdsec-cli/main.go b/cmd/crowdsec-cli/main.go index 6f8e93e463c..01179cf93be 100644 --- a/cmd/crowdsec-cli/main.go +++ b/cmd/crowdsec-cli/main.go @@ -14,9 +14,11 @@ import ( "github.com/crowdsecurity/go-cs-lib/trace" + "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/clialert" "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/clibouncer" "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/clicapi" "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/cliconsole" + "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/clidecision" "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/cliexplain" "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/clihub" "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/clihubtest" @@ -257,8 +259,8 @@ It is meant to allow you to manage bans, parsers/scenarios/etc, api and generall cmd.AddCommand(clihub.New(cli.cfg).NewCommand()) cmd.AddCommand(climetrics.New(cli.cfg).NewCommand()) cmd.AddCommand(NewCLIDashboard(cli.cfg).NewCommand()) - cmd.AddCommand(NewCLIDecisions(cli.cfg).NewCommand()) - cmd.AddCommand(NewCLIAlerts(cli.cfg).NewCommand()) + cmd.AddCommand(clidecision.New(cli.cfg).NewCommand()) + cmd.AddCommand(clialert.New(cli.cfg).NewCommand()) cmd.AddCommand(clisimulation.New(cli.cfg).NewCommand()) cmd.AddCommand(clibouncer.New(cli.cfg).NewCommand()) cmd.AddCommand(climachine.New(cli.cfg).NewCommand()) diff --git a/cmd/crowdsec-cli/utils.go b/cmd/crowdsec-cli/utils.go deleted file mode 100644 index c51140836b8..00000000000 --- a/cmd/crowdsec-cli/utils.go +++ /dev/null @@ -1,40 +0,0 @@ -package main - -import ( - "fmt" - "net" - "strings" - - "github.com/crowdsecurity/crowdsec/pkg/types" -) - -func manageCliDecisionAlerts(ip *string, ipRange *string, scope *string, value *string) error { - /*if a range is provided, change the scope*/ - if *ipRange != "" { - _, _, err := net.ParseCIDR(*ipRange) - if err != nil { - return fmt.Errorf("%s isn't a valid range", *ipRange) - } - } - - if *ip != "" { - ipRepr := net.ParseIP(*ip) - if ipRepr == nil { - return fmt.Errorf("%s isn't a valid ip", *ip) - } - } - - // avoid confusion on scope (ip vs Ip and range vs Range) - switch strings.ToLower(*scope) { - case "ip": - *scope = types.Ip - case "range": - *scope = types.Range - case "country": - *scope = types.Country - case "as": - *scope = types.AS - } - - return nil -} diff --git a/pkg/apiserver/controllers/v1/alerts.go b/pkg/apiserver/controllers/v1/alerts.go index 82dc51d6879..3d4309b1347 100644 --- a/pkg/apiserver/controllers/v1/alerts.go +++ b/pkg/apiserver/controllers/v1/alerts.go @@ -6,7 +6,6 @@ import ( "net" "net/http" "strconv" - "strings" "time" "github.com/gin-gonic/gin" @@ -124,21 +123,6 @@ func (c *Controller) sendAlertToPluginChannel(alert *models.Alert, profileID uin } } -func normalizeScope(scope string) string { - switch strings.ToLower(scope) { - case "ip": - return types.Ip - case "range": - return types.Range - case "as": - return types.AS - case "country": - return types.Country - default: - return scope - } -} - // CreateAlert writes the alerts received in the body to the database func (c *Controller) CreateAlert(gctx *gin.Context) { var input models.AddAlertsRequest @@ -160,12 +144,12 @@ func (c *Controller) CreateAlert(gctx *gin.Context) { for _, alert := range input { // normalize scope for alert.Source and decisions if alert.Source.Scope != nil { - *alert.Source.Scope = normalizeScope(*alert.Source.Scope) + *alert.Source.Scope = types.NormalizeScope(*alert.Source.Scope) } for _, decision := range alert.Decisions { if decision.Scope != nil { - *decision.Scope = normalizeScope(*decision.Scope) + *decision.Scope = types.NormalizeScope(*decision.Scope) } } @@ -296,8 +280,8 @@ func (c *Controller) FindAlerts(gctx *gin.Context) { // FindAlertByID returns the alert associated with the ID func (c *Controller) FindAlertByID(gctx *gin.Context) { alertIDStr := gctx.Param("alert_id") - alertID, err := strconv.Atoi(alertIDStr) + alertID, err := strconv.Atoi(alertIDStr) if err != nil { gctx.JSON(http.StatusBadRequest, gin.H{"message": "alert_id must be valid integer"}) return diff --git a/pkg/types/event.go b/pkg/types/event.go index 76a447bdc8c..e016d0294c4 100644 --- a/pkg/types/event.go +++ b/pkg/types/event.go @@ -2,6 +2,7 @@ package types import ( "net" + "strings" "time" "github.com/expr-lang/expr/vm" @@ -19,11 +20,11 @@ const ( // Event is the structure representing a runtime event (log or overflow) type Event struct { /* is it a log or an overflow */ - Type int `yaml:"Type,omitempty" json:"Type,omitempty"` //Can be types.LOG (0) or types.OVFLOW (1) - ExpectMode int `yaml:"ExpectMode,omitempty" json:"ExpectMode,omitempty"` //how to buckets should handle event : types.TIMEMACHINE or types.LIVE + Type int `yaml:"Type,omitempty" json:"Type,omitempty"` // Can be types.LOG (0) or types.OVFLOW (1) + ExpectMode int `yaml:"ExpectMode,omitempty" json:"ExpectMode,omitempty"` // how to buckets should handle event : types.TIMEMACHINE or types.LIVE Whitelisted bool `yaml:"Whitelisted,omitempty" json:"Whitelisted,omitempty"` WhitelistReason string `yaml:"WhitelistReason,omitempty" json:"whitelist_reason,omitempty"` - //should add whitelist reason ? + // should add whitelist reason ? /* the current stage of the line being parsed */ Stage string `yaml:"Stage,omitempty" json:"Stage,omitempty"` /* original line (produced by acquisition) */ @@ -36,11 +37,11 @@ type Event struct { Unmarshaled map[string]interface{} `yaml:"Unmarshaled,omitempty" json:"Unmarshaled,omitempty"` /* Overflow */ Overflow RuntimeAlert `yaml:"Overflow,omitempty" json:"Alert,omitempty"` - Time time.Time `yaml:"Time,omitempty" json:"Time,omitempty"` //parsed time `json:"-"` `` + Time time.Time `yaml:"Time,omitempty" json:"Time,omitempty"` // parsed time `json:"-"` `` StrTime string `yaml:"StrTime,omitempty" json:"StrTime,omitempty"` StrTimeFormat string `yaml:"StrTimeFormat,omitempty" json:"StrTimeFormat,omitempty"` MarshaledTime string `yaml:"MarshaledTime,omitempty" json:"MarshaledTime,omitempty"` - Process bool `yaml:"Process,omitempty" json:"Process,omitempty"` //can be set to false to avoid processing line + Process bool `yaml:"Process,omitempty" json:"Process,omitempty"` // can be set to false to avoid processing line Appsec AppsecEvent `yaml:"Appsec,omitempty" json:"Appsec,omitempty"` /* Meta is the only part that will make it to the API - it should be normalized */ Meta map[string]string `yaml:"Meta,omitempty" json:"Meta,omitempty"` @@ -50,7 +51,9 @@ func (e *Event) SetMeta(key string, value string) bool { if e.Meta == nil { e.Meta = make(map[string]string) } + e.Meta[key] = value + return true } @@ -58,7 +61,9 @@ func (e *Event) SetParsed(key string, value string) bool { if e.Parsed == nil { e.Parsed = make(map[string]string) } + e.Parsed[key] = value + return true } @@ -90,11 +95,13 @@ func (e *Event) GetMeta(key string) string { } } } + return "" } func (e *Event) ParseIPSources() []net.IP { var srcs []net.IP + switch e.Type { case LOG: if _, ok := e.Meta["source_ip"]; ok { @@ -105,6 +112,7 @@ func (e *Event) ParseIPSources() []net.IP { srcs = append(srcs, net.ParseIP(k)) } } + return srcs } @@ -131,8 +139,8 @@ type RuntimeAlert struct { Whitelisted bool `yaml:"Whitelisted,omitempty" json:"Whitelisted,omitempty"` Reprocess bool `yaml:"Reprocess,omitempty" json:"Reprocess,omitempty"` Sources map[string]models.Source `yaml:"Sources,omitempty" json:"Sources,omitempty"` - Alert *models.Alert `yaml:"Alert,omitempty" json:"Alert,omitempty"` //this one is a pointer to APIAlerts[0] for convenience. - //APIAlerts will be populated at the end when there is more than one source + Alert *models.Alert `yaml:"Alert,omitempty" json:"Alert,omitempty"` // this one is a pointer to APIAlerts[0] for convenience. + // APIAlerts will be populated at the end when there is more than one source APIAlerts []models.Alert `yaml:"APIAlerts,omitempty" json:"APIAlerts,omitempty"` } @@ -141,5 +149,21 @@ func (r RuntimeAlert) GetSources() []string { for key := range r.Sources { ret = append(ret, key) } + return ret } + +func NormalizeScope(scope string) string { + switch strings.ToLower(scope) { + case "ip": + return Ip + case "range": + return Range + case "as": + return AS + case "country": + return Country + default: + return scope + } +} diff --git a/test/bats/90_decisions.bats b/test/bats/90_decisions.bats index c7ed214ffc9..b892dc84015 100644 --- a/test/bats/90_decisions.bats +++ b/test/bats/90_decisions.bats @@ -108,12 +108,12 @@ teardown() { # invalid json rune -1 cscli decisions import -i - <<<'{"blah":"blah"}' --format json assert_stderr --partial 'Parsing json' - assert_stderr --partial 'json: cannot unmarshal object into Go value of type []main.decisionRaw' + assert_stderr --partial 'json: cannot unmarshal object into Go value of type []clidecision.decisionRaw' # json with extra data rune -1 cscli decisions import -i - <<<'{"values":"1.2.3.4","blah":"blah"}' --format json assert_stderr --partial 'Parsing json' - assert_stderr --partial 'json: cannot unmarshal object into Go value of type []main.decisionRaw' + assert_stderr --partial 'json: cannot unmarshal object into Go value of type []clidecision.decisionRaw' #---------- # CSV From fb0117e77845904e0b58251d72460b6c3e26d00e Mon Sep 17 00:00:00 2001 From: Laurence Jones Date: Tue, 3 Sep 2024 14:27:36 +0100 Subject: [PATCH 280/581] enhance: add additional explain options to hubtest (#3162) * enhance: add additional explain options to hubtest * Revert "enhance: add additional explain options to hubtest" This reverts commit b24632f3eb473e3c42885f31827764d2b7eebe2d. * enhance: add additional explain options to hubtest --------- Co-authored-by: marco --- cmd/crowdsec-cli/clihubtest/explain.go | 77 +++++++++++++++++--------- 1 file changed, 51 insertions(+), 26 deletions(-) diff --git a/cmd/crowdsec-cli/clihubtest/explain.go b/cmd/crowdsec-cli/clihubtest/explain.go index ecaf520211e..4183b6a515d 100644 --- a/cmd/crowdsec-cli/clihubtest/explain.go +++ b/cmd/crowdsec-cli/clihubtest/explain.go @@ -8,7 +8,52 @@ import ( "github.com/crowdsecurity/crowdsec/pkg/dumps" ) + +func (cli *cliHubTest) explain(testName string, details bool, skipOk bool) error { + test, err := HubTest.LoadTestItem(testName) + if err != nil { + return fmt.Errorf("can't load test: %+v", err) + } + + err = test.ParserAssert.LoadTest(test.ParserResultFile) + if err != nil { + if err = test.Run(); err != nil { + return fmt.Errorf("running test '%s' failed: %+v", test.Name, err) + } + + if err = test.ParserAssert.LoadTest(test.ParserResultFile); err != nil { + return fmt.Errorf("unable to load parser result after run: %w", err) + } + } + + err = test.ScenarioAssert.LoadTest(test.ScenarioResultFile, test.BucketPourResultFile) + if err != nil { + if err = test.Run(); err != nil { + return fmt.Errorf("running test '%s' failed: %+v", test.Name, err) + } + + if err = test.ScenarioAssert.LoadTest(test.ScenarioResultFile, test.BucketPourResultFile); err != nil { + return fmt.Errorf("unable to load scenario result after run: %w", err) + } + } + + opts := dumps.DumpOpts{ + Details: details, + SkipOk: skipOk, + } + + dumps.DumpTree(*test.ParserAssert.TestData, *test.ScenarioAssert.PourData, opts) + + return nil +} + + func (cli *cliHubTest) NewExplainCmd() *cobra.Command { + var ( + details bool + skipOk bool + ) + cmd := &cobra.Command{ Use: "explain", Short: "explain [test_name]", @@ -16,38 +61,18 @@ func (cli *cliHubTest) NewExplainCmd() *cobra.Command { DisableAutoGenTag: true, RunE: func(_ *cobra.Command, args []string) error { for _, testName := range args { - test, err := HubTest.LoadTestItem(testName) - if err != nil { - return fmt.Errorf("can't load test: %+v", err) + if err := cli.explain(testName, details, skipOk); err != nil { + return err } - err = test.ParserAssert.LoadTest(test.ParserResultFile) - if err != nil { - if err = test.Run(); err != nil { - return fmt.Errorf("running test '%s' failed: %+v", test.Name, err) - } - - if err = test.ParserAssert.LoadTest(test.ParserResultFile); err != nil { - return fmt.Errorf("unable to load parser result after run: %w", err) - } - } - - err = test.ScenarioAssert.LoadTest(test.ScenarioResultFile, test.BucketPourResultFile) - if err != nil { - if err = test.Run(); err != nil { - return fmt.Errorf("running test '%s' failed: %+v", test.Name, err) - } - - if err = test.ScenarioAssert.LoadTest(test.ScenarioResultFile, test.BucketPourResultFile); err != nil { - return fmt.Errorf("unable to load scenario result after run: %w", err) - } - } - opts := dumps.DumpOpts{} - dumps.DumpTree(*test.ParserAssert.TestData, *test.ScenarioAssert.PourData, opts) } return nil }, } + flags := cmd.Flags() + flags.BoolVarP(&details, "verbose", "v", false, "Display individual changes") + flags.BoolVar(&skipOk, "failures", false, "Only show failed lines") + return cmd } From ace942a36d01e88b387d0b9a7a86864c89767fae Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Fri, 6 Sep 2024 10:31:00 +0200 Subject: [PATCH 281/581] tests: prevent "make bats-fixture" to run if instance-data is locked (#3201) * tests: prevent "make bats-fixture" to run if instance-data is locked * lint --- test/instance-data | 14 ++++++++++++-- test/run-tests | 18 +++++++++--------- 2 files changed, 21 insertions(+), 11 deletions(-) diff --git a/test/instance-data b/test/instance-data index e4e76d3980a..e7fd05a9e54 100755 --- a/test/instance-data +++ b/test/instance-data @@ -1,16 +1,26 @@ #!/usr/bin/env bash +set -eu + +die() { + echo >&2 "$@" + exit 1 +} + #shellcheck disable=SC1007 THIS_DIR=$(CDPATH= cd -- "$(dirname -- "$0")" && pwd) cd "$THIS_DIR" || exit 1 # shellcheck disable=SC1091 . ./.environment.sh +if [[ -f "$LOCAL_INIT_DIR/.lock" ]] && [[ "$1" != "unlock" ]]; then + die "init data is locked: are you doing some manual test? if so, please finish what you are doing, run 'instance-data unlock' and retry" +fi + backend_script="./lib/config/config-${CONFIG_BACKEND}" if [[ ! -x "$backend_script" ]]; then - echo "unknown config backend '${CONFIG_BACKEND}'" >&2 - exit 1 + die "unknown config backend '${CONFIG_BACKEND}'" fi exec "$backend_script" "$@" diff --git a/test/run-tests b/test/run-tests index 6fe3bd004e2..957eb663b9c 100755 --- a/test/run-tests +++ b/test/run-tests @@ -10,12 +10,12 @@ die() { # shellcheck disable=SC1007 TEST_DIR=$(CDPATH= cd -- "$(dirname -- "$0")" && pwd) # shellcheck source=./.environment.sh -. "${TEST_DIR}/.environment.sh" +. "$TEST_DIR/.environment.sh" -"${TEST_DIR}/bin/check-requirements" +"$TEST_DIR/bin/check-requirements" echo "Running tests..." -echo "DB_BACKEND: ${DB_BACKEND}" +echo "DB_BACKEND: $DB_BACKEND" if [[ -z "$TEST_COVERAGE" ]]; then echo "Coverage report: no" else @@ -24,23 +24,23 @@ fi [[ -f "$LOCAL_INIT_DIR/.lock" ]] && die "init data is locked: are you doing some manual test? if so, please finish what you are doing, run 'instance-data unlock' and retry" -dump_backend="$(cat "${LOCAL_INIT_DIR}/.backend")" +dump_backend="$(cat "$LOCAL_INIT_DIR/.backend")" if [[ "$DB_BACKEND" != "$dump_backend" ]]; then - die "Can't run with backend '${DB_BACKEND}' because the test data was build with '${dump_backend}'" + die "Can't run with backend '$DB_BACKEND' because the test data was build with '$dump_backend'" fi if [[ $# -ge 1 ]]; then echo "test files: $*" - "${TEST_DIR}/lib/bats-core/bin/bats" \ + "$TEST_DIR/lib/bats-core/bin/bats" \ --jobs 1 \ --timing \ --print-output-on-failure \ "$@" else - echo "test files: ${TEST_DIR}/bats ${TEST_DIR}/dyn-bats" - "${TEST_DIR}/lib/bats-core/bin/bats" \ + echo "test files: $TEST_DIR/bats $TEST_DIR/dyn-bats" + "$TEST_DIR/lib/bats-core/bin/bats" \ --jobs 1 \ --timing \ --print-output-on-failure \ - "${TEST_DIR}/bats" "${TEST_DIR}/dyn-bats" + "$TEST_DIR/bats" "$TEST_DIR/dyn-bats" fi From 4851945a3c807216d0445a2c7ca0c78942ba0dd1 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Fri, 6 Sep 2024 10:32:15 +0200 Subject: [PATCH 282/581] fix appsec/tls issues by cloning http transport (#3214) --- pkg/apiclient/client.go | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) diff --git a/pkg/apiclient/client.go b/pkg/apiclient/client.go index 5669fd24786..2cb68f597f3 100644 --- a/pkg/apiclient/client.go +++ b/pkg/apiclient/client.go @@ -85,6 +85,11 @@ func NewClient(config *Config) (*ApiClient, error) { transport, baseURL := createTransport(config.URL) if transport != nil { t.Transport = transport + } else { + // can be httpmock.MockTransport + if ht, ok := http.DefaultTransport.(*http.Transport); ok { + t.Transport = ht.Clone() + } } t.URL = baseURL @@ -96,8 +101,8 @@ func NewClient(config *Config) (*ApiClient, error) { tlsconfig.Certificates = []tls.Certificate{*Cert} } - if ht, ok := http.DefaultTransport.(*http.Transport); ok { - ht.TLSClientConfig = &tlsconfig + if t.Transport != nil { + t.Transport.(*http.Transport).TLSClientConfig = &tlsconfig } c := &ApiClient{client: t.Client(), BaseURL: baseURL, UserAgent: config.UserAgent, URLPrefix: config.VersionPrefix, PapiURL: config.PapiURL} @@ -124,6 +129,7 @@ func NewDefaultClient(URL *url.URL, prefix string, userAgent string, client *htt client.Transport = transport } else { if ht, ok := http.DefaultTransport.(*http.Transport); ok { + ht = ht.Clone() tlsconfig := tls.Config{InsecureSkipVerify: InsecureSkipVerify} tlsconfig.RootCAs = CaCertPool @@ -165,7 +171,8 @@ func RegisterClient(config *Config, client *http.Client) (*ApiClient, error) { tlsconfig.Certificates = []tls.Certificate{*Cert} } - http.DefaultTransport.(*http.Transport).TLSClientConfig = &tlsconfig + client.Transport = http.DefaultTransport.(*http.Transport).Clone() + client.Transport.(*http.Transport).TLSClientConfig = &tlsconfig } } else if client.Transport == nil && transport != nil { client.Transport = transport From 57dee1abf9e6f31d70f3556bad57e6d91f163918 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Fri, 6 Sep 2024 10:36:23 +0200 Subject: [PATCH 283/581] fix appsec/tls issues by cloning http transport (#3213) --- pkg/apiclient/client.go | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) diff --git a/pkg/apiclient/client.go b/pkg/apiclient/client.go index 5669fd24786..2cb68f597f3 100644 --- a/pkg/apiclient/client.go +++ b/pkg/apiclient/client.go @@ -85,6 +85,11 @@ func NewClient(config *Config) (*ApiClient, error) { transport, baseURL := createTransport(config.URL) if transport != nil { t.Transport = transport + } else { + // can be httpmock.MockTransport + if ht, ok := http.DefaultTransport.(*http.Transport); ok { + t.Transport = ht.Clone() + } } t.URL = baseURL @@ -96,8 +101,8 @@ func NewClient(config *Config) (*ApiClient, error) { tlsconfig.Certificates = []tls.Certificate{*Cert} } - if ht, ok := http.DefaultTransport.(*http.Transport); ok { - ht.TLSClientConfig = &tlsconfig + if t.Transport != nil { + t.Transport.(*http.Transport).TLSClientConfig = &tlsconfig } c := &ApiClient{client: t.Client(), BaseURL: baseURL, UserAgent: config.UserAgent, URLPrefix: config.VersionPrefix, PapiURL: config.PapiURL} @@ -124,6 +129,7 @@ func NewDefaultClient(URL *url.URL, prefix string, userAgent string, client *htt client.Transport = transport } else { if ht, ok := http.DefaultTransport.(*http.Transport); ok { + ht = ht.Clone() tlsconfig := tls.Config{InsecureSkipVerify: InsecureSkipVerify} tlsconfig.RootCAs = CaCertPool @@ -165,7 +171,8 @@ func RegisterClient(config *Config, client *http.Client) (*ApiClient, error) { tlsconfig.Certificates = []tls.Certificate{*Cert} } - http.DefaultTransport.(*http.Transport).TLSClientConfig = &tlsconfig + client.Transport = http.DefaultTransport.(*http.Transport).Clone() + client.Transport.(*http.Transport).TLSClientConfig = &tlsconfig } } else if client.Transport == nil && transport != nil { client.Transport = transport From c8750f604c1f47adcff8ee7a07b8aca4312a14a4 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Mon, 9 Sep 2024 11:27:20 +0200 Subject: [PATCH 284/581] refact: pkg/apiclient set and use default user agent (#3219) * default user agent * DRY default user agent * useragent.go * moved to pkg/apiclient/useragent * lint * rename useragent.DefaultUserAgent() -> useragent.Default() --- cmd/crowdsec-cli/clialert/alerts.go | 2 -- cmd/crowdsec-cli/clicapi/capi.go | 3 -- cmd/crowdsec-cli/cliconsole/console.go | 2 -- cmd/crowdsec-cli/clidecision/decisions.go | 2 -- cmd/crowdsec-cli/clilapi/lapi.go | 4 +-- .../clinotifications/notifications.go | 2 -- cmd/crowdsec/lapiclient.go | 2 -- .../loki/internal/lokiclient/loki_client.go | 4 +-- pkg/apiclient/alerts_service_test.go | 17 ++++------- pkg/apiclient/auth_service_test.go | 19 ++++++------ pkg/apiclient/client.go | 23 ++++++++++++--- pkg/apiclient/client_http_test.go | 6 +--- pkg/apiclient/client_test.go | 9 ------ pkg/apiclient/decisions_service_test.go | 29 ++++++++++--------- pkg/apiclient/useragent/useragent.go | 9 ++++++ pkg/apiserver/apic.go | 2 -- pkg/apiserver/apic_metrics_test.go | 3 +- pkg/apiserver/apic_test.go | 18 +++++------- pkg/cticlient/client.go | 5 ++-- pkg/cwhub/cwhub.go | 4 +-- pkg/cwversion/version.go | 8 ++--- pkg/metabase/api.go | 4 +-- 22 files changed, 82 insertions(+), 95 deletions(-) create mode 100644 pkg/apiclient/useragent/useragent.go diff --git a/cmd/crowdsec-cli/clialert/alerts.go b/cmd/crowdsec-cli/clialert/alerts.go index 13013153a79..c5e27394f77 100644 --- a/cmd/crowdsec-cli/clialert/alerts.go +++ b/cmd/crowdsec-cli/clialert/alerts.go @@ -25,7 +25,6 @@ import ( "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/require" "github.com/crowdsecurity/crowdsec/pkg/apiclient" "github.com/crowdsecurity/crowdsec/pkg/csconfig" - "github.com/crowdsecurity/crowdsec/pkg/cwversion" "github.com/crowdsecurity/crowdsec/pkg/models" "github.com/crowdsecurity/crowdsec/pkg/types" ) @@ -217,7 +216,6 @@ func (cli *cliAlerts) NewCommand() *cobra.Command { cli.client, err = apiclient.NewClient(&apiclient.Config{ MachineID: cfg.API.Client.Credentials.Login, Password: strfmt.Password(cfg.API.Client.Credentials.Password), - UserAgent: cwversion.UserAgent(), URL: apiURL, VersionPrefix: "v1", }) diff --git a/cmd/crowdsec-cli/clicapi/capi.go b/cmd/crowdsec-cli/clicapi/capi.go index 4d658e3a602..49f68dd6b9e 100644 --- a/cmd/crowdsec-cli/clicapi/capi.go +++ b/cmd/crowdsec-cli/clicapi/capi.go @@ -20,7 +20,6 @@ import ( "github.com/crowdsecurity/crowdsec/pkg/apiclient" "github.com/crowdsecurity/crowdsec/pkg/csconfig" "github.com/crowdsecurity/crowdsec/pkg/cwhub" - "github.com/crowdsecurity/crowdsec/pkg/cwversion" "github.com/crowdsecurity/crowdsec/pkg/models" "github.com/crowdsecurity/crowdsec/pkg/types" ) @@ -77,7 +76,6 @@ func (cli *cliCapi) register(capiUserPrefix string, outputFile string) error { _, err = apiclient.RegisterClient(&apiclient.Config{ MachineID: capiUser, Password: password, - UserAgent: cwversion.UserAgent(), URL: apiurl, VersionPrefix: "v3", }, nil) @@ -168,7 +166,6 @@ func queryCAPIStatus(hub *cwhub.Hub, credURL string, login string, password stri MachineID: login, Password: passwd, Scenarios: itemsForAPI, - UserAgent: cwversion.UserAgent(), URL: apiURL, // I don't believe papi is neede to check enrollement // PapiURL: papiURL, diff --git a/cmd/crowdsec-cli/cliconsole/console.go b/cmd/crowdsec-cli/cliconsole/console.go index d15f25eaf69..e4b4039bdd2 100644 --- a/cmd/crowdsec-cli/cliconsole/console.go +++ b/cmd/crowdsec-cli/cliconsole/console.go @@ -24,7 +24,6 @@ import ( "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/require" "github.com/crowdsecurity/crowdsec/pkg/apiclient" "github.com/crowdsecurity/crowdsec/pkg/csconfig" - "github.com/crowdsecurity/crowdsec/pkg/cwversion" "github.com/crowdsecurity/crowdsec/pkg/types" ) @@ -124,7 +123,6 @@ func (cli *cliConsole) enroll(key string, name string, overwrite bool, tags []st MachineID: cli.cfg().API.Server.OnlineClient.Credentials.Login, Password: password, Scenarios: hub.GetInstalledListForAPI(), - UserAgent: cwversion.UserAgent(), URL: apiURL, VersionPrefix: "v3", }) diff --git a/cmd/crowdsec-cli/clidecision/decisions.go b/cmd/crowdsec-cli/clidecision/decisions.go index 5ecb3fc3304..b82ebe3086e 100644 --- a/cmd/crowdsec-cli/clidecision/decisions.go +++ b/cmd/crowdsec-cli/clidecision/decisions.go @@ -20,7 +20,6 @@ import ( "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/clialert" "github.com/crowdsecurity/crowdsec/pkg/apiclient" "github.com/crowdsecurity/crowdsec/pkg/csconfig" - "github.com/crowdsecurity/crowdsec/pkg/cwversion" "github.com/crowdsecurity/crowdsec/pkg/models" "github.com/crowdsecurity/crowdsec/pkg/types" ) @@ -152,7 +151,6 @@ func (cli *cliDecisions) NewCommand() *cobra.Command { cli.client, err = apiclient.NewClient(&apiclient.Config{ MachineID: cfg.API.Client.Credentials.Login, Password: strfmt.Password(cfg.API.Client.Credentials.Password), - UserAgent: cwversion.UserAgent(), URL: apiURL, VersionPrefix: "v1", }) diff --git a/cmd/crowdsec-cli/clilapi/lapi.go b/cmd/crowdsec-cli/clilapi/lapi.go index a6b88101cbf..fa229002512 100644 --- a/cmd/crowdsec-cli/clilapi/lapi.go +++ b/cmd/crowdsec-cli/clilapi/lapi.go @@ -24,7 +24,6 @@ import ( "github.com/crowdsecurity/crowdsec/pkg/apiclient" "github.com/crowdsecurity/crowdsec/pkg/csconfig" "github.com/crowdsecurity/crowdsec/pkg/cwhub" - "github.com/crowdsecurity/crowdsec/pkg/cwversion" "github.com/crowdsecurity/crowdsec/pkg/exprhelpers" "github.com/crowdsecurity/crowdsec/pkg/models" "github.com/crowdsecurity/crowdsec/pkg/parser" @@ -53,7 +52,7 @@ func queryLAPIStatus(hub *cwhub.Hub, credURL string, login string, password stri client, err := apiclient.NewDefaultClient(apiURL, LAPIURLPrefix, - cwversion.UserAgent(), + "", nil) if err != nil { return false, err @@ -118,7 +117,6 @@ func (cli *cliLapi) register(apiURL string, outputFile string, machine string, t _, err = apiclient.RegisterClient(&apiclient.Config{ MachineID: lapiUser, Password: password, - UserAgent: cwversion.UserAgent(), RegistrationToken: token, URL: apiurl, VersionPrefix: LAPIURLPrefix, diff --git a/cmd/crowdsec-cli/clinotifications/notifications.go b/cmd/crowdsec-cli/clinotifications/notifications.go index 04be09354c2..87a94243c5d 100644 --- a/cmd/crowdsec-cli/clinotifications/notifications.go +++ b/cmd/crowdsec-cli/clinotifications/notifications.go @@ -29,7 +29,6 @@ import ( "github.com/crowdsecurity/crowdsec/pkg/csconfig" "github.com/crowdsecurity/crowdsec/pkg/csplugin" "github.com/crowdsecurity/crowdsec/pkg/csprofiles" - "github.com/crowdsecurity/crowdsec/pkg/cwversion" "github.com/crowdsecurity/crowdsec/pkg/models" "github.com/crowdsecurity/crowdsec/pkg/types" ) @@ -464,7 +463,6 @@ func (cli *cliNotifications) fetchAlertFromArgString(toParse string) (*models.Al client, err := apiclient.NewClient(&apiclient.Config{ MachineID: cfg.API.Client.Credentials.Login, Password: strfmt.Password(cfg.API.Client.Credentials.Password), - UserAgent: cwversion.UserAgent(), URL: apiURL, VersionPrefix: "v1", }) diff --git a/cmd/crowdsec/lapiclient.go b/cmd/crowdsec/lapiclient.go index cbafb460042..4556306825c 100644 --- a/cmd/crowdsec/lapiclient.go +++ b/cmd/crowdsec/lapiclient.go @@ -11,7 +11,6 @@ import ( "github.com/crowdsecurity/crowdsec/pkg/apiclient" "github.com/crowdsecurity/crowdsec/pkg/csconfig" "github.com/crowdsecurity/crowdsec/pkg/cwhub" - "github.com/crowdsecurity/crowdsec/pkg/cwversion" "github.com/crowdsecurity/crowdsec/pkg/models" ) @@ -34,7 +33,6 @@ func AuthenticatedLAPIClient(credentials csconfig.ApiCredentialsCfg, hub *cwhub. MachineID: credentials.Login, Password: password, Scenarios: itemsForAPI, - UserAgent: cwversion.UserAgent(), URL: apiURL, PapiURL: papiURL, VersionPrefix: "v1", diff --git a/pkg/acquisition/modules/loki/internal/lokiclient/loki_client.go b/pkg/acquisition/modules/loki/internal/lokiclient/loki_client.go index 420da6e391c..846e833abea 100644 --- a/pkg/acquisition/modules/loki/internal/lokiclient/loki_client.go +++ b/pkg/acquisition/modules/loki/internal/lokiclient/loki_client.go @@ -16,7 +16,7 @@ import ( log "github.com/sirupsen/logrus" "gopkg.in/tomb.v2" - "github.com/crowdsecurity/crowdsec/pkg/cwversion" + "github.com/crowdsecurity/crowdsec/pkg/apiclient/useragent" ) type LokiClient struct { @@ -319,6 +319,6 @@ func NewLokiClient(config Config) *LokiClient { if config.Username != "" || config.Password != "" { headers["Authorization"] = "Basic " + base64.StdEncoding.EncodeToString([]byte(config.Username+":"+config.Password)) } - headers["User-Agent"] = cwversion.UserAgent() + headers["User-Agent"] = useragent.Default() return &LokiClient{Logger: log.WithField("component", "lokiclient"), config: config, requestHeaders: headers} } diff --git a/pkg/apiclient/alerts_service_test.go b/pkg/apiclient/alerts_service_test.go index 12ef2d295f4..0d1ff41685f 100644 --- a/pkg/apiclient/alerts_service_test.go +++ b/pkg/apiclient/alerts_service_test.go @@ -14,7 +14,6 @@ import ( "github.com/crowdsecurity/go-cs-lib/cstest" "github.com/crowdsecurity/go-cs-lib/ptr" - "github.com/crowdsecurity/crowdsec/pkg/cwversion" "github.com/crowdsecurity/crowdsec/pkg/models" ) @@ -35,7 +34,6 @@ func TestAlertsListAsMachine(t *testing.T) { client, err := NewClient(&Config{ MachineID: "test_login", Password: "test_password", - UserAgent: cwversion.UserAgent(), URL: apiURL, VersionPrefix: "v1", }) @@ -180,16 +178,16 @@ func TestAlertsListAsMachine(t *testing.T) { }, } - //log.Debugf("data : -> %s", spew.Sdump(alerts)) - //log.Debugf("resp : -> %s", spew.Sdump(resp)) - //log.Debugf("expected : -> %s", spew.Sdump(expected)) - //first one returns data + // log.Debugf("data : -> %s", spew.Sdump(alerts)) + // log.Debugf("resp : -> %s", spew.Sdump(resp)) + // log.Debugf("expected : -> %s", spew.Sdump(expected)) + // first one returns data alerts, resp, err := client.Alerts.List(context.Background(), AlertsListOpts{}) require.NoError(t, err) assert.Equal(t, http.StatusOK, resp.Response.StatusCode) assert.Equal(t, expected, *alerts) - //this one doesn't + // this one doesn't filter := AlertsListOpts{IPEquals: ptr.Of("1.2.3.4")} alerts, resp, err = client.Alerts.List(context.Background(), filter) @@ -214,7 +212,6 @@ func TestAlertsGetAsMachine(t *testing.T) { client, err := NewClient(&Config{ MachineID: "test_login", Password: "test_password", - UserAgent: cwversion.UserAgent(), URL: apiURL, VersionPrefix: "v1", }) @@ -360,7 +357,7 @@ func TestAlertsGetAsMachine(t *testing.T) { assert.Equal(t, http.StatusOK, resp.Response.StatusCode) assert.Equal(t, *expected, *alerts) - //fail + // fail _, _, err = client.Alerts.GetByID(context.Background(), 2) cstest.RequireErrorMessage(t, err, "API error: object not found") } @@ -388,7 +385,6 @@ func TestAlertsCreateAsMachine(t *testing.T) { client, err := NewClient(&Config{ MachineID: "test_login", Password: "test_password", - UserAgent: cwversion.UserAgent(), URL: apiURL, VersionPrefix: "v1", }) @@ -430,7 +426,6 @@ func TestAlertsDeleteAsMachine(t *testing.T) { client, err := NewClient(&Config{ MachineID: "test_login", Password: "test_password", - UserAgent: cwversion.UserAgent(), URL: apiURL, VersionPrefix: "v1", }) diff --git a/pkg/apiclient/auth_service_test.go b/pkg/apiclient/auth_service_test.go index 6c9abc0edef..344e377ad0f 100644 --- a/pkg/apiclient/auth_service_test.go +++ b/pkg/apiclient/auth_service_test.go @@ -14,7 +14,6 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "github.com/crowdsecurity/crowdsec/pkg/cwversion" "github.com/crowdsecurity/crowdsec/pkg/models" ) @@ -36,11 +35,13 @@ func initBasicMuxMock(t *testing.T, mux *http.ServeMux, path string) { mux.HandleFunc(path, func(w http.ResponseWriter, r *http.Request) { testMethod(t, r, "POST") + buf := new(bytes.Buffer) _, _ = buf.ReadFrom(r.Body) newStr := buf.String() var payload BasicMockPayload + err := json.Unmarshal([]byte(newStr), &payload) if err != nil || payload.MachineID == "" || payload.Password == "" { log.Printf("Bad payload") @@ -48,8 +49,8 @@ func initBasicMuxMock(t *testing.T, mux *http.ServeMux, path string) { } var responseBody string - responseCode, hasFoundErrorMock := loginsForMockErrorCases[payload.MachineID] + responseCode, hasFoundErrorMock := loginsForMockErrorCases[payload.MachineID] if !hasFoundErrorMock { responseCode = http.StatusOK responseBody = `{"code":200,"expire":"2029-11-30T14:14:24+01:00","token":"toto"}` @@ -76,7 +77,7 @@ func TestWatcherRegister(t *testing.T) { mux, urlx, teardown := setup() defer teardown() - //body: models.WatcherRegistrationRequest{MachineID: &config.MachineID, Password: &config.Password} + // body: models.WatcherRegistrationRequest{MachineID: &config.MachineID, Password: &config.Password} initBasicMuxMock(t, mux, "/watchers") log.Printf("URL is %s", urlx) @@ -87,7 +88,6 @@ func TestWatcherRegister(t *testing.T) { clientconfig := Config{ MachineID: "test_login", Password: "test_password", - UserAgent: cwversion.UserAgent(), URL: apiURL, VersionPrefix: "v1", } @@ -113,7 +113,7 @@ func TestWatcherAuth(t *testing.T) { mux, urlx, teardown := setup() defer teardown() - //body: models.WatcherRegistrationRequest{MachineID: &config.MachineID, Password: &config.Password} + // body: models.WatcherRegistrationRequest{MachineID: &config.MachineID, Password: &config.Password} initBasicMuxMock(t, mux, "/watchers/login") log.Printf("URL is %s", urlx) @@ -121,11 +121,10 @@ func TestWatcherAuth(t *testing.T) { apiURL, err := url.Parse(urlx + "/") require.NoError(t, err) - //ok auth + // ok auth clientConfig := &Config{ MachineID: "test_login", Password: "test_password", - UserAgent: cwversion.UserAgent(), URL: apiURL, VersionPrefix: "v1", Scenarios: []string{"crowdsecurity/test"}, @@ -174,7 +173,7 @@ func TestWatcherUnregister(t *testing.T) { mux, urlx, teardown := setup() defer teardown() - //body: models.WatcherRegistrationRequest{MachineID: &config.MachineID, Password: &config.Password} + // body: models.WatcherRegistrationRequest{MachineID: &config.MachineID, Password: &config.Password} mux.HandleFunc("/watchers", func(w http.ResponseWriter, r *http.Request) { testMethod(t, r, "DELETE") @@ -184,6 +183,7 @@ func TestWatcherUnregister(t *testing.T) { mux.HandleFunc("/watchers/login", func(w http.ResponseWriter, r *http.Request) { testMethod(t, r, "POST") + buf := new(bytes.Buffer) _, _ = buf.ReadFrom(r.Body) @@ -206,7 +206,6 @@ func TestWatcherUnregister(t *testing.T) { mycfg := &Config{ MachineID: "test_login", Password: "test_password", - UserAgent: cwversion.UserAgent(), URL: apiURL, VersionPrefix: "v1", Scenarios: []string{"crowdsecurity/test"}, @@ -229,6 +228,7 @@ func TestWatcherEnroll(t *testing.T) { mux.HandleFunc("/watchers/enroll", func(w http.ResponseWriter, r *http.Request) { testMethod(t, r, "POST") + buf := new(bytes.Buffer) _, _ = buf.ReadFrom(r.Body) newStr := buf.String() @@ -260,7 +260,6 @@ func TestWatcherEnroll(t *testing.T) { mycfg := &Config{ MachineID: "test_login", Password: "test_password", - UserAgent: cwversion.UserAgent(), URL: apiURL, VersionPrefix: "v1", Scenarios: []string{"crowdsecurity/test"}, diff --git a/pkg/apiclient/client.go b/pkg/apiclient/client.go index 2cb68f597f3..02a99037a04 100644 --- a/pkg/apiclient/client.go +++ b/pkg/apiclient/client.go @@ -12,6 +12,7 @@ import ( "github.com/golang-jwt/jwt/v4" + "github.com/crowdsecurity/crowdsec/pkg/apiclient/useragent" "github.com/crowdsecurity/crowdsec/pkg/models" ) @@ -66,11 +67,16 @@ type service struct { } func NewClient(config *Config) (*ApiClient, error) { + userAgent := config.UserAgent + if userAgent == "" { + userAgent = useragent.Default() + } + t := &JWTTransport{ MachineID: &config.MachineID, Password: &config.Password, Scenarios: config.Scenarios, - UserAgent: config.UserAgent, + UserAgent: userAgent, VersionPrefix: config.VersionPrefix, UpdateScenario: config.UpdateScenario, RetryConfig: NewRetryConfig( @@ -105,7 +111,7 @@ func NewClient(config *Config) (*ApiClient, error) { t.Transport.(*http.Transport).TLSClientConfig = &tlsconfig } - c := &ApiClient{client: t.Client(), BaseURL: baseURL, UserAgent: config.UserAgent, URLPrefix: config.VersionPrefix, PapiURL: config.PapiURL} + c := &ApiClient{client: t.Client(), BaseURL: baseURL, UserAgent: userAgent, URLPrefix: config.VersionPrefix, PapiURL: config.PapiURL} c.common.client = c c.Decisions = (*DecisionsService)(&c.common) c.Alerts = (*AlertsService)(&c.common) @@ -143,6 +149,10 @@ func NewDefaultClient(URL *url.URL, prefix string, userAgent string, client *htt } } + if userAgent == "" { + userAgent = useragent.Default() + } + c := &ApiClient{client: client, BaseURL: baseURL, UserAgent: userAgent, URLPrefix: prefix} c.common.client = c c.Decisions = (*DecisionsService)(&c.common) @@ -178,15 +188,20 @@ func RegisterClient(config *Config, client *http.Client) (*ApiClient, error) { client.Transport = transport } - c := &ApiClient{client: client, BaseURL: baseURL, UserAgent: config.UserAgent, URLPrefix: config.VersionPrefix} + userAgent := config.UserAgent + if userAgent == "" { + userAgent = useragent.Default() + } + + c := &ApiClient{client: client, BaseURL: baseURL, UserAgent: userAgent, URLPrefix: config.VersionPrefix} c.common.client = c c.Decisions = (*DecisionsService)(&c.common) c.Alerts = (*AlertsService)(&c.common) c.Auth = (*AuthService)(&c.common) resp, err := c.Auth.RegisterWatcher(context.Background(), models.WatcherRegistrationRequest{MachineID: &config.MachineID, Password: &config.Password, RegistrationToken: config.RegistrationToken}) - /*if we have http status, return it*/ if err != nil { + /*if we have http status, return it*/ if resp != nil && resp.Response != nil { return nil, fmt.Errorf("api register (%s) http %s: %w", c.BaseURL, resp.Response.Status, err) } diff --git a/pkg/apiclient/client_http_test.go b/pkg/apiclient/client_http_test.go index 4bdfe1d0da5..45cd8410a8e 100644 --- a/pkg/apiclient/client_http_test.go +++ b/pkg/apiclient/client_http_test.go @@ -10,22 +10,19 @@ import ( "github.com/stretchr/testify/require" "github.com/crowdsecurity/go-cs-lib/cstest" - - "github.com/crowdsecurity/crowdsec/pkg/cwversion" ) func TestNewRequestInvalid(t *testing.T) { mux, urlx, teardown := setup() defer teardown() - //missing slash in uri + // missing slash in uri apiURL, err := url.Parse(urlx) require.NoError(t, err) client, err := NewClient(&Config{ MachineID: "test_login", Password: "test_password", - UserAgent: cwversion.UserAgent(), URL: apiURL, VersionPrefix: "v1", }) @@ -57,7 +54,6 @@ func TestNewRequestTimeout(t *testing.T) { client, err := NewClient(&Config{ MachineID: "test_login", Password: "test_password", - UserAgent: cwversion.UserAgent(), URL: apiURL, VersionPrefix: "v1", }) diff --git a/pkg/apiclient/client_test.go b/pkg/apiclient/client_test.go index bd83e512afc..e632ff428c0 100644 --- a/pkg/apiclient/client_test.go +++ b/pkg/apiclient/client_test.go @@ -17,8 +17,6 @@ import ( "github.com/stretchr/testify/require" "github.com/crowdsecurity/go-cs-lib/cstest" - - "github.com/crowdsecurity/crowdsec/pkg/cwversion" ) /*this is a ripoff of google/go-github approach : @@ -97,7 +95,6 @@ func TestNewClientOk(t *testing.T) { client, err := NewClient(&Config{ MachineID: "test_login", Password: "test_password", - UserAgent: cwversion.UserAgent(), URL: apiURL, VersionPrefix: "v1", }) @@ -134,7 +131,6 @@ func TestNewClientOk_UnixSocket(t *testing.T) { client, err := NewClient(&Config{ MachineID: "test_login", Password: "test_password", - UserAgent: cwversion.UserAgent(), URL: apiURL, VersionPrefix: "v1", }) @@ -172,7 +168,6 @@ func TestNewClientKo(t *testing.T) { client, err := NewClient(&Config{ MachineID: "test_login", Password: "test_password", - UserAgent: cwversion.UserAgent(), URL: apiURL, VersionPrefix: "v1", }) @@ -250,7 +245,6 @@ func TestNewClientRegisterKO(t *testing.T) { _, err = RegisterClient(&Config{ MachineID: "test_login", Password: "test_password", - UserAgent: cwversion.UserAgent(), URL: apiURL, VersionPrefix: "v1", }, &http.Client{}) @@ -281,7 +275,6 @@ func TestNewClientRegisterOK(t *testing.T) { client, err := RegisterClient(&Config{ MachineID: "test_login", Password: "test_password", - UserAgent: cwversion.UserAgent(), URL: apiURL, VersionPrefix: "v1", }, &http.Client{}) @@ -314,7 +307,6 @@ func TestNewClientRegisterOK_UnixSocket(t *testing.T) { client, err := RegisterClient(&Config{ MachineID: "test_login", Password: "test_password", - UserAgent: cwversion.UserAgent(), URL: apiURL, VersionPrefix: "v1", }, &http.Client{}) @@ -344,7 +336,6 @@ func TestNewClientBadAnswer(t *testing.T) { _, err = RegisterClient(&Config{ MachineID: "test_login", Password: "test_password", - UserAgent: cwversion.UserAgent(), URL: apiURL, VersionPrefix: "v1", }, &http.Client{}) diff --git a/pkg/apiclient/decisions_service_test.go b/pkg/apiclient/decisions_service_test.go index 6942cfc9d85..54c44f43eda 100644 --- a/pkg/apiclient/decisions_service_test.go +++ b/pkg/apiclient/decisions_service_test.go @@ -13,7 +13,6 @@ import ( "github.com/crowdsecurity/go-cs-lib/cstest" "github.com/crowdsecurity/go-cs-lib/ptr" - "github.com/crowdsecurity/crowdsec/pkg/cwversion" "github.com/crowdsecurity/crowdsec/pkg/models" "github.com/crowdsecurity/crowdsec/pkg/modelscapi" ) @@ -26,6 +25,7 @@ func TestDecisionsList(t *testing.T) { mux.HandleFunc("/decisions", func(w http.ResponseWriter, r *http.Request) { testMethod(t, r, "GET") + if r.URL.RawQuery == "ip=1.2.3.4" { assert.Equal(t, "ip=1.2.3.4", r.URL.RawQuery) assert.Equal(t, "ixu", r.Header.Get("X-Api-Key")) @@ -34,14 +34,14 @@ func TestDecisionsList(t *testing.T) { } else { w.WriteHeader(http.StatusOK) w.Write([]byte(`null`)) - //no results + // no results } }) apiURL, err := url.Parse(urlx + "/") require.NoError(t, err) - //ok answer + // ok answer auth := &APIKeyTransport{ APIKey: "ixu", } @@ -68,7 +68,7 @@ func TestDecisionsList(t *testing.T) { assert.Equal(t, http.StatusOK, resp.Response.StatusCode) assert.Equal(t, *expected, *decisions) - //Empty return + // Empty return decisionsFilter = DecisionsListOpts{IPEquals: ptr.Of("1.2.3.5")} decisions, resp, err = newcli.Decisions.List(context.Background(), decisionsFilter) require.NoError(t, err) @@ -85,6 +85,7 @@ func TestDecisionsStream(t *testing.T) { mux.HandleFunc("/decisions/stream", func(w http.ResponseWriter, r *http.Request) { assert.Equal(t, "ixu", r.Header.Get("X-Api-Key")) testMethod(t, r, http.MethodGet) + if r.Method == http.MethodGet { if r.URL.RawQuery == "startup=true" { w.WriteHeader(http.StatusOK) @@ -99,6 +100,7 @@ func TestDecisionsStream(t *testing.T) { mux.HandleFunc("/decisions", func(w http.ResponseWriter, r *http.Request) { assert.Equal(t, "ixu", r.Header.Get("X-Api-Key")) testMethod(t, r, http.MethodDelete) + if r.Method == http.MethodDelete { w.WriteHeader(http.StatusOK) } @@ -107,7 +109,7 @@ func TestDecisionsStream(t *testing.T) { apiURL, err := url.Parse(urlx + "/") require.NoError(t, err) - //ok answer + // ok answer auth := &APIKeyTransport{ APIKey: "ixu", } @@ -134,14 +136,14 @@ func TestDecisionsStream(t *testing.T) { assert.Equal(t, http.StatusOK, resp.Response.StatusCode) assert.Equal(t, *expected, *decisions) - //and second call, we get empty lists + // and second call, we get empty lists decisions, resp, err = newcli.Decisions.GetStream(context.Background(), DecisionsStreamOpts{Startup: false}) require.NoError(t, err) assert.Equal(t, http.StatusOK, resp.Response.StatusCode) assert.Empty(t, decisions.New) assert.Empty(t, decisions.Deleted) - //delete stream + // delete stream resp, err = newcli.Decisions.StopStream(context.Background()) require.NoError(t, err) assert.Equal(t, http.StatusOK, resp.Response.StatusCode) @@ -156,6 +158,7 @@ func TestDecisionsStreamV3Compatibility(t *testing.T) { mux.HandleFunc("/decisions/stream", func(w http.ResponseWriter, r *http.Request) { assert.Equal(t, "ixu", r.Header.Get("X-Api-Key")) testMethod(t, r, http.MethodGet) + if r.Method == http.MethodGet { if r.URL.RawQuery == "startup=true" { w.WriteHeader(http.StatusOK) @@ -170,7 +173,7 @@ func TestDecisionsStreamV3Compatibility(t *testing.T) { apiURL, err := url.Parse(urlx + "/") require.NoError(t, err) - //ok answer + // ok answer auth := &APIKeyTransport{ APIKey: "ixu", } @@ -220,6 +223,7 @@ func TestDecisionsStreamV3(t *testing.T) { mux.HandleFunc("/decisions/stream", func(w http.ResponseWriter, r *http.Request) { assert.Equal(t, "ixu", r.Header.Get("X-Api-Key")) testMethod(t, r, http.MethodGet) + if r.Method == http.MethodGet { w.WriteHeader(http.StatusOK) w.Write([]byte(`{"deleted":[{"scope":"ip","decisions":["1.2.3.5"]}], @@ -231,7 +235,7 @@ func TestDecisionsStreamV3(t *testing.T) { apiURL, err := url.Parse(urlx + "/") require.NoError(t, err) - //ok answer + // ok answer auth := &APIKeyTransport{ APIKey: "ixu", } @@ -305,7 +309,7 @@ func TestDecisionsFromBlocklist(t *testing.T) { apiURL, err := url.Parse(urlx + "/") require.NoError(t, err) - //ok answer + // ok answer auth := &APIKeyTransport{ APIKey: "ixu", } @@ -391,7 +395,7 @@ func TestDeleteDecisions(t *testing.T) { assert.Equal(t, "ip=1.2.3.4", r.URL.RawQuery) w.WriteHeader(http.StatusOK) w.Write([]byte(`{"nbDeleted":"1"}`)) - //w.Write([]byte(`{"message":"0 deleted alerts"}`)) + // w.Write([]byte(`{"message":"0 deleted alerts"}`)) }) log.Printf("URL is %s", urlx) @@ -402,7 +406,6 @@ func TestDeleteDecisions(t *testing.T) { client, err := NewClient(&Config{ MachineID: "test_login", Password: "test_password", - UserAgent: cwversion.UserAgent(), URL: apiURL, VersionPrefix: "v1", }) @@ -468,6 +471,7 @@ func TestDecisionsStreamOpts_addQueryParamsToURL(t *testing.T) { got, err := o.addQueryParamsToURL(baseURLString) cstest.RequireErrorContains(t, err, tt.expectedErr) + if tt.expectedErr != "" { return } @@ -502,7 +506,6 @@ func TestDecisionsStreamOpts_addQueryParamsToURL(t *testing.T) { // client, err := NewClient(&Config{ // MachineID: "test_login", // Password: "test_password", -// UserAgent: cwversion.UserAgent(), // URL: apiURL, // VersionPrefix: "v1", // }) diff --git a/pkg/apiclient/useragent/useragent.go b/pkg/apiclient/useragent/useragent.go new file mode 100644 index 00000000000..5a62ce1ac06 --- /dev/null +++ b/pkg/apiclient/useragent/useragent.go @@ -0,0 +1,9 @@ +package useragent + +import ( + "github.com/crowdsecurity/go-cs-lib/version" +) + +func Default() string { + return "crowdsec/" + version.String() + "-" + version.System +} diff --git a/pkg/apiserver/apic.go b/pkg/apiserver/apic.go index 5b850cbff0d..73061637ad9 100644 --- a/pkg/apiserver/apic.go +++ b/pkg/apiserver/apic.go @@ -23,7 +23,6 @@ import ( "github.com/crowdsecurity/crowdsec/pkg/apiclient" "github.com/crowdsecurity/crowdsec/pkg/csconfig" - "github.com/crowdsecurity/crowdsec/pkg/cwversion" "github.com/crowdsecurity/crowdsec/pkg/database" "github.com/crowdsecurity/crowdsec/pkg/database/ent" "github.com/crowdsecurity/crowdsec/pkg/database/ent/alert" @@ -221,7 +220,6 @@ func NewAPIC(config *csconfig.OnlineApiClientCfg, dbClient *database.Client, con ret.apiClient, err = apiclient.NewClient(&apiclient.Config{ MachineID: config.Credentials.Login, Password: password, - UserAgent: cwversion.UserAgent(), URL: apiURL, PapiURL: papiURL, VersionPrefix: "v3", diff --git a/pkg/apiserver/apic_metrics_test.go b/pkg/apiserver/apic_metrics_test.go index d1e48ac90a3..78b16f9c8b7 100644 --- a/pkg/apiserver/apic_metrics_test.go +++ b/pkg/apiserver/apic_metrics_test.go @@ -11,7 +11,6 @@ import ( "github.com/stretchr/testify/require" "github.com/crowdsecurity/crowdsec/pkg/apiclient" - "github.com/crowdsecurity/crowdsec/pkg/cwversion" ) func TestAPICSendMetrics(t *testing.T) { @@ -70,7 +69,7 @@ func TestAPICSendMetrics(t *testing.T) { apiClient, err := apiclient.NewDefaultClient( url, "/api", - cwversion.UserAgent(), + "", nil, ) require.NoError(t, err) diff --git a/pkg/apiserver/apic_test.go b/pkg/apiserver/apic_test.go index 546a236251f..51887006ad4 100644 --- a/pkg/apiserver/apic_test.go +++ b/pkg/apiserver/apic_test.go @@ -26,7 +26,6 @@ import ( "github.com/crowdsecurity/crowdsec/pkg/apiclient" "github.com/crowdsecurity/crowdsec/pkg/csconfig" - "github.com/crowdsecurity/crowdsec/pkg/cwversion" "github.com/crowdsecurity/crowdsec/pkg/database" "github.com/crowdsecurity/crowdsec/pkg/database/ent/decision" "github.com/crowdsecurity/crowdsec/pkg/database/ent/machine" @@ -676,7 +675,7 @@ func TestAPICWhitelists(t *testing.T) { apic, err := apiclient.NewDefaultClient( url, "/api", - cwversion.UserAgent(), + "", nil, ) require.NoError(t, err) @@ -817,7 +816,7 @@ func TestAPICPullTop(t *testing.T) { apic, err := apiclient.NewDefaultClient( url, "/api", - cwversion.UserAgent(), + "", nil, ) require.NoError(t, err) @@ -832,8 +831,7 @@ func TestAPICPullTop(t *testing.T) { alerts := api.dbClient.Ent.Alert.Query().AllX(context.Background()) validDecisions := api.dbClient.Ent.Decision.Query().Where( decision.UntilGT(time.Now())). - AllX(context.Background(), - ) + AllX(context.Background()) decisionScenarioFreq := make(map[string]int) alertScenario := make(map[string]int) @@ -905,7 +903,7 @@ func TestAPICPullTopBLCacheFirstCall(t *testing.T) { apic, err := apiclient.NewDefaultClient( url, "/api", - cwversion.UserAgent(), + "", nil, ) require.NoError(t, err) @@ -997,7 +995,7 @@ func TestAPICPullTopBLCacheForceCall(t *testing.T) { apic, err := apiclient.NewDefaultClient( url, "/api", - cwversion.UserAgent(), + "", nil, ) require.NoError(t, err) @@ -1024,7 +1022,7 @@ func TestAPICPullBlocklistCall(t *testing.T) { apic, err := apiclient.NewDefaultClient( url, "/api", - cwversion.UserAgent(), + "", nil, ) require.NoError(t, err) @@ -1107,7 +1105,7 @@ func TestAPICPush(t *testing.T) { apic, err := apiclient.NewDefaultClient( url, "/api", - cwversion.UserAgent(), + "", nil, ) require.NoError(t, err) @@ -1171,7 +1169,7 @@ func TestAPICPull(t *testing.T) { apic, err := apiclient.NewDefaultClient( url, "/api", - cwversion.UserAgent(), + "", nil, ) require.NoError(t, err) diff --git a/pkg/cticlient/client.go b/pkg/cticlient/client.go index b817121e222..90112d80abf 100644 --- a/pkg/cticlient/client.go +++ b/pkg/cticlient/client.go @@ -8,8 +8,9 @@ import ( "net/http" "strings" - "github.com/crowdsecurity/crowdsec/pkg/cwversion" log "github.com/sirupsen/logrus" + + "github.com/crowdsecurity/crowdsec/pkg/apiclient/useragent" ) const ( @@ -46,7 +47,7 @@ func (c *CrowdsecCTIClient) doRequest(method string, endpoint string, params map } req.Header.Set("X-Api-Key", c.apiKey) - req.Header.Set("User-Agent", cwversion.UserAgent()) + req.Header.Set("User-Agent", useragent.Default()) resp, err := c.httpClient.Do(req) if err != nil { diff --git a/pkg/cwhub/cwhub.go b/pkg/cwhub/cwhub.go index d8607e7e562..683f1853b43 100644 --- a/pkg/cwhub/cwhub.go +++ b/pkg/cwhub/cwhub.go @@ -7,7 +7,7 @@ import ( "strings" "time" - "github.com/crowdsecurity/crowdsec/pkg/cwversion" + "github.com/crowdsecurity/crowdsec/pkg/apiclient/useragent" ) // hubTransport wraps a Transport to set a custom User-Agent. @@ -16,7 +16,7 @@ type hubTransport struct { } func (t *hubTransport) RoundTrip(req *http.Request) (*http.Response, error) { - req.Header.Set("User-Agent", cwversion.UserAgent()) + req.Header.Set("User-Agent", useragent.Default()) return t.RoundTripper.RoundTrip(req) } diff --git a/pkg/cwversion/version.go b/pkg/cwversion/version.go index 28d5c2a621c..b208467aef5 100644 --- a/pkg/cwversion/version.go +++ b/pkg/cwversion/version.go @@ -7,6 +7,8 @@ import ( goversion "github.com/hashicorp/go-version" "github.com/crowdsecurity/go-cs-lib/version" + + "github.com/crowdsecurity/crowdsec/pkg/apiclient/useragent" ) var ( @@ -28,7 +30,7 @@ func FullString() string { ret += fmt.Sprintf("GoVersion: %s\n", version.GoVersion) ret += fmt.Sprintf("Platform: %s\n", version.System) ret += fmt.Sprintf("libre2: %s\n", Libre2) - ret += fmt.Sprintf("User-Agent: %s\n", UserAgent()) + ret += fmt.Sprintf("User-Agent: %s\n", useragent.Default()) ret += fmt.Sprintf("Constraint_parser: %s\n", Constraint_parser) ret += fmt.Sprintf("Constraint_scenario: %s\n", Constraint_scenario) ret += fmt.Sprintf("Constraint_api: %s\n", Constraint_api) @@ -37,10 +39,6 @@ func FullString() string { return ret } -func UserAgent() string { - return "crowdsec/" + version.String() + "-" + version.System -} - // VersionStrip remove the tag from the version string, used to match with a hub branch func VersionStrip() string { ret := strings.Split(version.Version, "~") diff --git a/pkg/metabase/api.go b/pkg/metabase/api.go index 387e8d151e0..08e10188678 100644 --- a/pkg/metabase/api.go +++ b/pkg/metabase/api.go @@ -9,7 +9,7 @@ import ( "github.com/dghubble/sling" log "github.com/sirupsen/logrus" - "github.com/crowdsecurity/crowdsec/pkg/cwversion" + "github.com/crowdsecurity/crowdsec/pkg/apiclient/useragent" ) type MBClient struct { @@ -38,7 +38,7 @@ var ( func NewMBClient(url string) (*MBClient, error) { httpClient := &http.Client{Timeout: 20 * time.Second} return &MBClient{ - CTX: sling.New().Client(httpClient).Base(url).Set("User-Agent", cwversion.UserAgent()), + CTX: sling.New().Client(httpClient).Base(url).Set("User-Agent", useragent.Default()), Client: httpClient, }, nil } From 4d10e9df00c0abcde9f749db4a546354fd664033 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Tue, 10 Sep 2024 17:32:13 +0200 Subject: [PATCH 285/581] refact: cscli papi (#3222) * cscli papi status - fix nil deref + func test * cscli papi: extract methods status(), sync() * papi status -> stdout * fix nil deref * cscli support dump: include papi status * lint --- cmd/crowdsec-cli/clipapi/papi.go | 144 ++++++++++++++----------- cmd/crowdsec-cli/clisupport/support.go | 28 ++++- cmd/crowdsec-cli/require/require.go | 8 ++ test/bats/04_capi.bats | 15 ++- 4 files changed, 129 insertions(+), 66 deletions(-) diff --git a/cmd/crowdsec-cli/clipapi/papi.go b/cmd/crowdsec-cli/clipapi/papi.go index 9a9e4fcaa8b..0752267707b 100644 --- a/cmd/crowdsec-cli/clipapi/papi.go +++ b/cmd/crowdsec-cli/clipapi/papi.go @@ -2,8 +2,10 @@ package clipapi import ( "fmt" + "io" "time" + "github.com/fatih/color" log "github.com/sirupsen/logrus" "github.com/spf13/cobra" "gopkg.in/tomb.v2" @@ -13,9 +15,10 @@ import ( "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/require" "github.com/crowdsecurity/crowdsec/pkg/apiserver" "github.com/crowdsecurity/crowdsec/pkg/csconfig" + "github.com/crowdsecurity/crowdsec/pkg/database" ) -type configGetter func() *csconfig.Config +type configGetter = func() *csconfig.Config type cliPapi struct { cfg configGetter @@ -46,104 +49,119 @@ func (cli *cliPapi) NewCommand() *cobra.Command { }, } - cmd.AddCommand(cli.NewStatusCmd()) - cmd.AddCommand(cli.NewSyncCmd()) + cmd.AddCommand(cli.newStatusCmd()) + cmd.AddCommand(cli.newSyncCmd()) return cmd } -func (cli *cliPapi) NewStatusCmd() *cobra.Command { +func (cli *cliPapi) Status(out io.Writer, db *database.Client) error { + cfg := cli.cfg() + + apic, err := apiserver.NewAPIC(cfg.API.Server.OnlineClient, db, cfg.API.Server.ConsoleConfig, cfg.API.Server.CapiWhitelists) + if err != nil { + return fmt.Errorf("unable to initialize API client: %w", err) + } + + papi, err := apiserver.NewPAPI(apic, db, cfg.API.Server.ConsoleConfig, log.GetLevel()) + if err != nil { + return fmt.Errorf("unable to initialize PAPI client: %w", err) + } + + perms, err := papi.GetPermissions() + if err != nil { + return fmt.Errorf("unable to get PAPI permissions: %w", err) + } + + lastTimestampStr, err := db.GetConfigItem(apiserver.PapiPullKey) + if err != nil { + lastTimestampStr = ptr.Of("never") + } + + // both can and did happen + if lastTimestampStr == nil || *lastTimestampStr == "0001-01-01T00:00:00Z" { + lastTimestampStr = ptr.Of("never") + } + + fmt.Fprint(out, "You can successfully interact with Polling API (PAPI)\n") + fmt.Fprintf(out, "Console plan: %s\n", perms.Plan) + fmt.Fprintf(out, "Last order received: %s\n", *lastTimestampStr) + fmt.Fprint(out, "PAPI subscriptions:\n") + + for _, sub := range perms.Categories { + fmt.Fprintf(out, " - %s\n", sub) + } + + return nil +} + +func (cli *cliPapi) newStatusCmd() *cobra.Command { cmd := &cobra.Command{ Use: "status", Short: "Get status of the Polling API", Args: cobra.MinimumNArgs(0), DisableAutoGenTag: true, RunE: func(cmd *cobra.Command, _ []string) error { - var err error cfg := cli.cfg() db, err := require.DBClient(cmd.Context(), cfg.DbConfig) if err != nil { return err } - apic, err := apiserver.NewAPIC(cfg.API.Server.OnlineClient, db, cfg.API.Server.ConsoleConfig, cfg.API.Server.CapiWhitelists) - if err != nil { - return fmt.Errorf("unable to initialize API client: %w", err) - } + return cli.Status(color.Output, db) + }, + } - papi, err := apiserver.NewPAPI(apic, db, cfg.API.Server.ConsoleConfig, log.GetLevel()) - if err != nil { - return fmt.Errorf("unable to initialize PAPI client: %w", err) - } + return cmd +} - perms, err := papi.GetPermissions() - if err != nil { - return fmt.Errorf("unable to get PAPI permissions: %w", err) - } - var lastTimestampStr *string - lastTimestampStr, err = db.GetConfigItem(apiserver.PapiPullKey) - if err != nil { - lastTimestampStr = ptr.Of("never") - } - log.Infof("You can successfully interact with Polling API (PAPI)") - log.Infof("Console plan: %s", perms.Plan) - log.Infof("Last order received: %s", *lastTimestampStr) +func (cli *cliPapi) sync(out io.Writer, db *database.Client) error { + cfg := cli.cfg() + t := tomb.Tomb{} - log.Infof("PAPI subscriptions:") - for _, sub := range perms.Categories { - log.Infof(" - %s", sub) - } + apic, err := apiserver.NewAPIC(cfg.API.Server.OnlineClient, db, cfg.API.Server.ConsoleConfig, cfg.API.Server.CapiWhitelists) + if err != nil { + return fmt.Errorf("unable to initialize API client: %w", err) + } - return nil - }, + t.Go(apic.Push) + + papi, err := apiserver.NewPAPI(apic, db, cfg.API.Server.ConsoleConfig, log.GetLevel()) + if err != nil { + return fmt.Errorf("unable to initialize PAPI client: %w", err) } - return cmd + t.Go(papi.SyncDecisions) + + err = papi.PullOnce(time.Time{}, true) + if err != nil { + return fmt.Errorf("unable to sync decisions: %w", err) + } + + log.Infof("Sending acknowledgements to CAPI") + + apic.Shutdown() + papi.Shutdown() + t.Wait() + time.Sleep(5 * time.Second) // FIXME: the push done by apic.Push is run inside a sub goroutine, sleep to make sure it's done + + return nil } -func (cli *cliPapi) NewSyncCmd() *cobra.Command { +func (cli *cliPapi) newSyncCmd() *cobra.Command { cmd := &cobra.Command{ Use: "sync", Short: "Sync with the Polling API, pulling all non-expired orders for the instance", Args: cobra.MinimumNArgs(0), DisableAutoGenTag: true, RunE: func(cmd *cobra.Command, _ []string) error { - var err error cfg := cli.cfg() - t := tomb.Tomb{} - db, err := require.DBClient(cmd.Context(), cfg.DbConfig) if err != nil { return err } - apic, err := apiserver.NewAPIC(cfg.API.Server.OnlineClient, db, cfg.API.Server.ConsoleConfig, cfg.API.Server.CapiWhitelists) - if err != nil { - return fmt.Errorf("unable to initialize API client: %w", err) - } - - t.Go(apic.Push) - - papi, err := apiserver.NewPAPI(apic, db, cfg.API.Server.ConsoleConfig, log.GetLevel()) - if err != nil { - return fmt.Errorf("unable to initialize PAPI client: %w", err) - } - - t.Go(papi.SyncDecisions) - - err = papi.PullOnce(time.Time{}, true) - if err != nil { - return fmt.Errorf("unable to sync decisions: %w", err) - } - - log.Infof("Sending acknowledgements to CAPI") - - apic.Shutdown() - papi.Shutdown() - t.Wait() - time.Sleep(5 * time.Second) // FIXME: the push done by apic.Push is run inside a sub goroutine, sleep to make sure it's done - - return nil + return cli.sync(color.Output, db) }, } diff --git a/cmd/crowdsec-cli/clisupport/support.go b/cmd/crowdsec-cli/clisupport/support.go index 55f0ec4b03e..f161c66c802 100644 --- a/cmd/crowdsec-cli/clisupport/support.go +++ b/cmd/crowdsec-cli/clisupport/support.go @@ -28,6 +28,7 @@ import ( "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/clilapi" "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/climachine" "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/climetrics" + "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/clipapi" "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/require" "github.com/crowdsecurity/crowdsec/pkg/csconfig" "github.com/crowdsecurity/crowdsec/pkg/cwhub" @@ -47,6 +48,7 @@ const ( SUPPORT_CROWDSEC_CONFIG_PATH = "config/crowdsec.yaml" SUPPORT_LAPI_STATUS_PATH = "lapi_status.txt" SUPPORT_CAPI_STATUS_PATH = "capi_status.txt" + SUPPORT_PAPI_STATUS_PATH = "papi_status.txt" SUPPORT_ACQUISITION_DIR = "config/acquis/" SUPPORT_CROWDSEC_PROFILE_PATH = "config/profiles.yaml" SUPPORT_CRASH_DIR = "crash/" @@ -195,9 +197,9 @@ func (cli *cliSupport) dumpBouncers(zw *zip.Writer, db *database.Client) error { } out := new(bytes.Buffer) - cm := clibouncer.New(cli.cfg) + cb := clibouncer.New(cli.cfg) - if err := cm.List(out, db); err != nil { + if err := cb.List(out, db); err != nil { return err } @@ -265,6 +267,24 @@ func (cli *cliSupport) dumpCAPIStatus(zw *zip.Writer, hub *cwhub.Hub) error { return nil } +func (cli *cliSupport) dumpPAPIStatus(zw *zip.Writer, db *database.Client) error { + log.Info("Collecting PAPI status") + + out := new(bytes.Buffer) + cp := clipapi.New(cli.cfg) + + err := cp.Status(out, db) + if err != nil { + fmt.Fprintf(out, "%s\n", err) + } + + stripped := stripAnsiString(out.String()) + + cli.writeToZip(zw, SUPPORT_PAPI_STATUS_PATH, time.Now(), strings.NewReader(stripped)) + + return nil +} + func (cli *cliSupport) dumpConfigYAML(zw *zip.Writer) error { log.Info("Collecting crowdsec config") @@ -517,6 +537,10 @@ func (cli *cliSupport) dump(ctx context.Context, outFile string) error { if err = cli.dumpCAPIStatus(zipWriter, hub); err != nil { log.Warnf("could not collect CAPI status: %s", err) } + + if err = cli.dumpPAPIStatus(zipWriter, db); err != nil { + log.Warnf("could not collect PAPI status: %s", err) + } } if !skipLAPI { diff --git a/cmd/crowdsec-cli/require/require.go b/cmd/crowdsec-cli/require/require.go index 15d8bce682d..191eee55bc5 100644 --- a/cmd/crowdsec-cli/require/require.go +++ b/cmd/crowdsec-cli/require/require.go @@ -34,6 +34,14 @@ func CAPI(c *csconfig.Config) error { } func PAPI(c *csconfig.Config) error { + if err := CAPI(c); err != nil { + return err + } + + if err := CAPIRegistered(c); err != nil { + return err + } + if c.API.Server.OnlineClient.Credentials.PapiURL == "" { return errors.New("no PAPI URL in configuration") } diff --git a/test/bats/04_capi.bats b/test/bats/04_capi.bats index f17ce376d62..7ba6bfa4428 100644 --- a/test/bats/04_capi.bats +++ b/test/bats/04_capi.bats @@ -46,13 +46,26 @@ setup() { assert_stderr --regexp "no configuration for Central API \(CAPI\) in '$(echo $CONFIG_YAML|sed s#//#/#g)'" } -@test "cscli capi status" { +@test "cscli {capi,papi} status" { ./instance-data load config_enable_capi + + # should not panic with no credentials, but return an error + rune -1 cscli papi status + assert_stderr --partial "the Central API (CAPI) must be configured with 'cscli capi register'" + rune -0 cscli capi register --schmilblick githubciXXXXXXXXXXXXXXXXXXXXXXXX rune -1 cscli capi status assert_stderr --partial "no scenarios or appsec-rules installed, abort" + rune -1 cscli papi status + assert_stderr --partial "no PAPI URL in configuration" + + rune -0 cscli console enable console_management + rune -1 cscli papi status + assert_stderr --partial "unable to get PAPI permissions" + assert_stderr --partial "Forbidden for plan" + rune -0 cscli scenarios install crowdsecurity/ssh-bf rune -0 cscli capi status assert_output --partial "Loaded credentials from" From 57539f61b46ddf8d5f6266a793458d51bea4cd74 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Wed, 11 Sep 2024 15:38:15 +0200 Subject: [PATCH 286/581] refact cscli - don't export functions if not required (#3224) * unexport subcommand constructors * unexport internal methods * lint + rename local variables --- cmd/crowdsec-cli/clialert/alerts.go | 86 +++++++++---------- cmd/crowdsec-cli/clialert/table.go | 2 +- cmd/crowdsec-cli/clihub/item_metrics.go | 26 +++--- cmd/crowdsec-cli/clihub/items.go | 6 +- cmd/crowdsec-cli/clihubtest/clean.go | 2 +- cmd/crowdsec-cli/clihubtest/coverage.go | 2 +- cmd/crowdsec-cli/clihubtest/create.go | 2 +- cmd/crowdsec-cli/clihubtest/eval.go | 2 +- cmd/crowdsec-cli/clihubtest/explain.go | 6 +- cmd/crowdsec-cli/clihubtest/hubtest.go | 16 ++-- cmd/crowdsec-cli/clihubtest/info.go | 2 +- cmd/crowdsec-cli/clihubtest/list.go | 2 +- cmd/crowdsec-cli/clihubtest/run.go | 18 ++-- .../clinotifications/notifications.go | 16 ++-- cmd/crowdsec-cli/clisetup/setup.go | 27 +++--- cmd/crowdsec-cli/clisimulation/simulation.go | 12 +-- 16 files changed, 113 insertions(+), 114 deletions(-) diff --git a/cmd/crowdsec-cli/clialert/alerts.go b/cmd/crowdsec-cli/clialert/alerts.go index c5e27394f77..757a84927e5 100644 --- a/cmd/crowdsec-cli/clialert/alerts.go +++ b/cmd/crowdsec-cli/clialert/alerts.go @@ -29,7 +29,7 @@ import ( "github.com/crowdsecurity/crowdsec/pkg/types" ) -func DecisionsFromAlert(alert *models.Alert) string { +func decisionsFromAlert(alert *models.Alert) string { ret := "" decMap := make(map[string]int) @@ -77,7 +77,7 @@ func (cli *cliAlerts) alertsToTable(alerts *models.GetAlertsResponse, printMachi *alertItem.Scenario, alertItem.Source.Cn, alertItem.Source.GetAsNumberName(), - DecisionsFromAlert(alertItem), + decisionsFromAlert(alertItem), *alertItem.StartAt, } if printMachine { @@ -227,10 +227,10 @@ func (cli *cliAlerts) NewCommand() *cobra.Command { }, } - cmd.AddCommand(cli.NewListCmd()) - cmd.AddCommand(cli.NewInspectCmd()) - cmd.AddCommand(cli.NewFlushCmd()) - cmd.AddCommand(cli.NewDeleteCmd()) + cmd.AddCommand(cli.newListCmd()) + cmd.AddCommand(cli.newInspectCmd()) + cmd.AddCommand(cli.newFlushCmd()) + cmd.AddCommand(cli.newDeleteCmd()) return cmd } @@ -323,7 +323,7 @@ func (cli *cliAlerts) list(alertListFilter apiclient.AlertsListOpts, limit *int, return nil } -func (cli *cliAlerts) NewListCmd() *cobra.Command { +func (cli *cliAlerts) newListCmd() *cobra.Command { alertListFilter := apiclient.AlertsListOpts{ ScopeEquals: new(string), ValueEquals: new(string), @@ -377,53 +377,53 @@ cscli alerts list --type ban`, return cmd } -func (cli *cliAlerts) delete(alertDeleteFilter apiclient.AlertsDeleteOpts, ActiveDecision *bool, AlertDeleteAll bool, delAlertByID string, contained *bool) error { +func (cli *cliAlerts) delete(delFilter apiclient.AlertsDeleteOpts, activeDecision *bool, deleteAll bool, delAlertByID string, contained *bool) error { var err error - if !AlertDeleteAll { - *alertDeleteFilter.ScopeEquals, err = SanitizeScope(*alertDeleteFilter.ScopeEquals, *alertDeleteFilter.IPEquals, *alertDeleteFilter.RangeEquals) + if !deleteAll { + *delFilter.ScopeEquals, err = SanitizeScope(*delFilter.ScopeEquals, *delFilter.IPEquals, *delFilter.RangeEquals) if err != nil { return err } - if ActiveDecision != nil { - alertDeleteFilter.ActiveDecisionEquals = ActiveDecision + if activeDecision != nil { + delFilter.ActiveDecisionEquals = activeDecision } - if *alertDeleteFilter.ScopeEquals == "" { - alertDeleteFilter.ScopeEquals = nil + if *delFilter.ScopeEquals == "" { + delFilter.ScopeEquals = nil } - if *alertDeleteFilter.ValueEquals == "" { - alertDeleteFilter.ValueEquals = nil + if *delFilter.ValueEquals == "" { + delFilter.ValueEquals = nil } - if *alertDeleteFilter.ScenarioEquals == "" { - alertDeleteFilter.ScenarioEquals = nil + if *delFilter.ScenarioEquals == "" { + delFilter.ScenarioEquals = nil } - if *alertDeleteFilter.IPEquals == "" { - alertDeleteFilter.IPEquals = nil + if *delFilter.IPEquals == "" { + delFilter.IPEquals = nil } - if *alertDeleteFilter.RangeEquals == "" { - alertDeleteFilter.RangeEquals = nil + if *delFilter.RangeEquals == "" { + delFilter.RangeEquals = nil } if contained != nil && *contained { - alertDeleteFilter.Contains = new(bool) + delFilter.Contains = new(bool) } limit := 0 - alertDeleteFilter.Limit = &limit + delFilter.Limit = &limit } else { limit := 0 - alertDeleteFilter = apiclient.AlertsDeleteOpts{Limit: &limit} + delFilter = apiclient.AlertsDeleteOpts{Limit: &limit} } var alerts *models.DeleteAlertsResponse if delAlertByID == "" { - alerts, _, err = cli.client.Alerts.Delete(context.Background(), alertDeleteFilter) + alerts, _, err = cli.client.Alerts.Delete(context.Background(), delFilter) if err != nil { return fmt.Errorf("unable to delete alerts: %w", err) } @@ -439,14 +439,14 @@ func (cli *cliAlerts) delete(alertDeleteFilter apiclient.AlertsDeleteOpts, Activ return nil } -func (cli *cliAlerts) NewDeleteCmd() *cobra.Command { +func (cli *cliAlerts) newDeleteCmd() *cobra.Command { var ( - ActiveDecision *bool - AlertDeleteAll bool + activeDecision *bool + deleteAll bool delAlertByID string ) - alertDeleteFilter := apiclient.AlertsDeleteOpts{ + delFilter := apiclient.AlertsDeleteOpts{ ScopeEquals: new(string), ValueEquals: new(string), ScenarioEquals: new(string), @@ -467,12 +467,12 @@ cscli alerts delete -s crowdsecurity/ssh-bf"`, Aliases: []string{"remove"}, Args: cobra.ExactArgs(0), PreRunE: func(cmd *cobra.Command, _ []string) error { - if AlertDeleteAll { + if deleteAll { return nil } - if *alertDeleteFilter.ScopeEquals == "" && *alertDeleteFilter.ValueEquals == "" && - *alertDeleteFilter.ScenarioEquals == "" && *alertDeleteFilter.IPEquals == "" && - *alertDeleteFilter.RangeEquals == "" && delAlertByID == "" { + if *delFilter.ScopeEquals == "" && *delFilter.ValueEquals == "" && + *delFilter.ScenarioEquals == "" && *delFilter.IPEquals == "" && + *delFilter.RangeEquals == "" && delAlertByID == "" { _ = cmd.Usage() return errors.New("at least one filter or --all must be specified") } @@ -480,19 +480,19 @@ cscli alerts delete -s crowdsecurity/ssh-bf"`, return nil }, RunE: func(cmd *cobra.Command, _ []string) error { - return cli.delete(alertDeleteFilter, ActiveDecision, AlertDeleteAll, delAlertByID, contained) + return cli.delete(delFilter, activeDecision, deleteAll, delAlertByID, contained) }, } flags := cmd.Flags() flags.SortFlags = false - flags.StringVar(alertDeleteFilter.ScopeEquals, "scope", "", "the scope (ie. ip,range)") - flags.StringVarP(alertDeleteFilter.ValueEquals, "value", "v", "", "the value to match for in the specified scope") - flags.StringVarP(alertDeleteFilter.ScenarioEquals, "scenario", "s", "", "the scenario (ie. crowdsecurity/ssh-bf)") - flags.StringVarP(alertDeleteFilter.IPEquals, "ip", "i", "", "Source ip (shorthand for --scope ip --value )") - flags.StringVarP(alertDeleteFilter.RangeEquals, "range", "r", "", "Range source ip (shorthand for --scope range --value )") + flags.StringVar(delFilter.ScopeEquals, "scope", "", "the scope (ie. ip,range)") + flags.StringVarP(delFilter.ValueEquals, "value", "v", "", "the value to match for in the specified scope") + flags.StringVarP(delFilter.ScenarioEquals, "scenario", "s", "", "the scenario (ie. crowdsecurity/ssh-bf)") + flags.StringVarP(delFilter.IPEquals, "ip", "i", "", "Source ip (shorthand for --scope ip --value )") + flags.StringVarP(delFilter.RangeEquals, "range", "r", "", "Range source ip (shorthand for --scope range --value )") flags.StringVar(&delAlertByID, "id", "", "alert ID") - flags.BoolVarP(&AlertDeleteAll, "all", "a", false, "delete all alerts") + flags.BoolVarP(&deleteAll, "all", "a", false, "delete all alerts") flags.BoolVar(contained, "contained", false, "query decisions contained by range") return cmd @@ -538,7 +538,7 @@ func (cli *cliAlerts) inspect(details bool, alertIDs ...string) error { return nil } -func (cli *cliAlerts) NewInspectCmd() *cobra.Command { +func (cli *cliAlerts) newInspectCmd() *cobra.Command { var details bool cmd := &cobra.Command{ @@ -561,7 +561,7 @@ func (cli *cliAlerts) NewInspectCmd() *cobra.Command { return cmd } -func (cli *cliAlerts) NewFlushCmd() *cobra.Command { +func (cli *cliAlerts) newFlushCmd() *cobra.Command { var ( maxItems int maxAge string diff --git a/cmd/crowdsec-cli/clialert/table.go b/cmd/crowdsec-cli/clialert/table.go index 5dec63ec152..1416e1e435c 100644 --- a/cmd/crowdsec-cli/clialert/table.go +++ b/cmd/crowdsec-cli/clialert/table.go @@ -38,7 +38,7 @@ func alertsTable(out io.Writer, wantColor string, alerts *models.GetAlertsRespon *alertItem.Scenario, alertItem.Source.Cn, alertItem.Source.GetAsNumberName(), - DecisionsFromAlert(alertItem), + decisionsFromAlert(alertItem), *alertItem.StartAt, } diff --git a/cmd/crowdsec-cli/clihub/item_metrics.go b/cmd/crowdsec-cli/clihub/item_metrics.go index aaee63d1d38..f4af8f635db 100644 --- a/cmd/crowdsec-cli/clihub/item_metrics.go +++ b/cmd/crowdsec-cli/clihub/item_metrics.go @@ -16,22 +16,22 @@ import ( "github.com/crowdsecurity/crowdsec/pkg/cwhub" ) -func ShowMetrics(prometheusURL string, hubItem *cwhub.Item, wantColor string) error { +func showMetrics(prometheusURL string, hubItem *cwhub.Item, wantColor string) error { switch hubItem.Type { case cwhub.PARSERS: - metrics := GetParserMetric(prometheusURL, hubItem.Name) + metrics := getParserMetric(prometheusURL, hubItem.Name) parserMetricsTable(color.Output, wantColor, hubItem.Name, metrics) case cwhub.SCENARIOS: - metrics := GetScenarioMetric(prometheusURL, hubItem.Name) + metrics := getScenarioMetric(prometheusURL, hubItem.Name) scenarioMetricsTable(color.Output, wantColor, hubItem.Name, metrics) case cwhub.COLLECTIONS: for _, sub := range hubItem.SubItems() { - if err := ShowMetrics(prometheusURL, sub, wantColor); err != nil { + if err := showMetrics(prometheusURL, sub, wantColor); err != nil { return err } } case cwhub.APPSEC_RULES: - metrics := GetAppsecRuleMetric(prometheusURL, hubItem.Name) + metrics := getAppsecRuleMetric(prometheusURL, hubItem.Name) appsecMetricsTable(color.Output, wantColor, hubItem.Name, metrics) default: // no metrics for this item type } @@ -39,11 +39,11 @@ func ShowMetrics(prometheusURL string, hubItem *cwhub.Item, wantColor string) er return nil } -// GetParserMetric is a complete rip from prom2json -func GetParserMetric(url string, itemName string) map[string]map[string]int { +// getParserMetric is a complete rip from prom2json +func getParserMetric(url string, itemName string) map[string]map[string]int { stats := make(map[string]map[string]int) - result := GetPrometheusMetric(url) + result := getPrometheusMetric(url) for idx, fam := range result { if !strings.HasPrefix(fam.Name, "cs_") { continue @@ -131,7 +131,7 @@ func GetParserMetric(url string, itemName string) map[string]map[string]int { return stats } -func GetScenarioMetric(url string, itemName string) map[string]int { +func getScenarioMetric(url string, itemName string) map[string]int { stats := make(map[string]int) stats["instantiation"] = 0 @@ -140,7 +140,7 @@ func GetScenarioMetric(url string, itemName string) map[string]int { stats["pour"] = 0 stats["underflow"] = 0 - result := GetPrometheusMetric(url) + result := getPrometheusMetric(url) for idx, fam := range result { if !strings.HasPrefix(fam.Name, "cs_") { continue @@ -195,13 +195,13 @@ func GetScenarioMetric(url string, itemName string) map[string]int { return stats } -func GetAppsecRuleMetric(url string, itemName string) map[string]int { +func getAppsecRuleMetric(url string, itemName string) map[string]int { stats := make(map[string]int) stats["inband_hits"] = 0 stats["outband_hits"] = 0 - results := GetPrometheusMetric(url) + results := getPrometheusMetric(url) for idx, fam := range results { if !strings.HasPrefix(fam.Name, "cs_") { continue @@ -260,7 +260,7 @@ func GetAppsecRuleMetric(url string, itemName string) map[string]int { return stats } -func GetPrometheusMetric(url string) []*prom2json.Family { +func getPrometheusMetric(url string) []*prom2json.Family { mfChan := make(chan *dto.MetricFamily, 1024) // Start with the DefaultTransport for sane defaults. diff --git a/cmd/crowdsec-cli/clihub/items.go b/cmd/crowdsec-cli/clihub/items.go index 4dd3c46e0f9..0ab89654dac 100644 --- a/cmd/crowdsec-cli/clihub/items.go +++ b/cmd/crowdsec-cli/clihub/items.go @@ -146,7 +146,7 @@ func ListItems(out io.Writer, wantColor string, itemTypes []string, items map[st return nil } -func InspectItem(item *cwhub.Item, showMetrics bool, output string, prometheusURL string, wantColor string) error { +func InspectItem(item *cwhub.Item, wantMetrics bool, output string, prometheusURL string, wantColor string) error { switch output { case "human", "raw": enc := yaml.NewEncoder(os.Stdout) @@ -174,10 +174,10 @@ func InspectItem(item *cwhub.Item, showMetrics bool, output string, prometheusUR fmt.Println() } - if showMetrics { + if wantMetrics { fmt.Printf("\nCurrent metrics: \n") - if err := ShowMetrics(prometheusURL, item, wantColor); err != nil { + if err := showMetrics(prometheusURL, item, wantColor); err != nil { return err } } diff --git a/cmd/crowdsec-cli/clihubtest/clean.go b/cmd/crowdsec-cli/clihubtest/clean.go index 075d7961d84..e3b40b6bd57 100644 --- a/cmd/crowdsec-cli/clihubtest/clean.go +++ b/cmd/crowdsec-cli/clihubtest/clean.go @@ -6,7 +6,7 @@ import ( "github.com/spf13/cobra" ) -func (cli *cliHubTest) NewCleanCmd() *cobra.Command { +func (cli *cliHubTest) newCleanCmd() *cobra.Command { cmd := &cobra.Command{ Use: "clean", Short: "clean [test_name]", diff --git a/cmd/crowdsec-cli/clihubtest/coverage.go b/cmd/crowdsec-cli/clihubtest/coverage.go index b6e5b1e9c01..5a4f231caf5 100644 --- a/cmd/crowdsec-cli/clihubtest/coverage.go +++ b/cmd/crowdsec-cli/clihubtest/coverage.go @@ -140,7 +140,7 @@ func (cli *cliHubTest) coverage(showScenarioCov bool, showParserCov bool, showAp return nil } -func (cli *cliHubTest) NewCoverageCmd() *cobra.Command { +func (cli *cliHubTest) newCoverageCmd() *cobra.Command { var ( showParserCov bool showScenarioCov bool diff --git a/cmd/crowdsec-cli/clihubtest/create.go b/cmd/crowdsec-cli/clihubtest/create.go index 523c9740cf7..e0834f7e569 100644 --- a/cmd/crowdsec-cli/clihubtest/create.go +++ b/cmd/crowdsec-cli/clihubtest/create.go @@ -13,7 +13,7 @@ import ( "github.com/crowdsecurity/crowdsec/pkg/hubtest" ) -func (cli *cliHubTest) NewCreateCmd() *cobra.Command { +func (cli *cliHubTest) newCreateCmd() *cobra.Command { var ( ignoreParsers bool labels map[string]string diff --git a/cmd/crowdsec-cli/clihubtest/eval.go b/cmd/crowdsec-cli/clihubtest/eval.go index 204a0bc420e..83e9eae9c15 100644 --- a/cmd/crowdsec-cli/clihubtest/eval.go +++ b/cmd/crowdsec-cli/clihubtest/eval.go @@ -6,7 +6,7 @@ import ( "github.com/spf13/cobra" ) -func (cli *cliHubTest) NewEvalCmd() *cobra.Command { +func (cli *cliHubTest) newEvalCmd() *cobra.Command { var evalExpression string cmd := &cobra.Command{ diff --git a/cmd/crowdsec-cli/clihubtest/explain.go b/cmd/crowdsec-cli/clihubtest/explain.go index 4183b6a515d..dbe10fa7ec0 100644 --- a/cmd/crowdsec-cli/clihubtest/explain.go +++ b/cmd/crowdsec-cli/clihubtest/explain.go @@ -8,7 +8,6 @@ import ( "github.com/crowdsecurity/crowdsec/pkg/dumps" ) - func (cli *cliHubTest) explain(testName string, details bool, skipOk bool) error { test, err := HubTest.LoadTestItem(testName) if err != nil { @@ -39,7 +38,7 @@ func (cli *cliHubTest) explain(testName string, details bool, skipOk bool) error opts := dumps.DumpOpts{ Details: details, - SkipOk: skipOk, + SkipOk: skipOk, } dumps.DumpTree(*test.ParserAssert.TestData, *test.ScenarioAssert.PourData, opts) @@ -47,8 +46,7 @@ func (cli *cliHubTest) explain(testName string, details bool, skipOk bool) error return nil } - -func (cli *cliHubTest) NewExplainCmd() *cobra.Command { +func (cli *cliHubTest) newExplainCmd() *cobra.Command { var ( details bool skipOk bool diff --git a/cmd/crowdsec-cli/clihubtest/hubtest.go b/cmd/crowdsec-cli/clihubtest/hubtest.go index 22607336177..3420e21e1e2 100644 --- a/cmd/crowdsec-cli/clihubtest/hubtest.go +++ b/cmd/crowdsec-cli/clihubtest/hubtest.go @@ -68,14 +68,14 @@ func (cli *cliHubTest) NewCommand() *cobra.Command { cmd.PersistentFlags().StringVar(&cscliPath, "cscli", "cscli", "Path to cscli") cmd.PersistentFlags().BoolVar(&isAppsecTest, "appsec", false, "Command relates to appsec tests") - cmd.AddCommand(cli.NewCreateCmd()) - cmd.AddCommand(cli.NewRunCmd()) - cmd.AddCommand(cli.NewCleanCmd()) - cmd.AddCommand(cli.NewInfoCmd()) - cmd.AddCommand(cli.NewListCmd()) - cmd.AddCommand(cli.NewCoverageCmd()) - cmd.AddCommand(cli.NewEvalCmd()) - cmd.AddCommand(cli.NewExplainCmd()) + cmd.AddCommand(cli.newCreateCmd()) + cmd.AddCommand(cli.newRunCmd()) + cmd.AddCommand(cli.newCleanCmd()) + cmd.AddCommand(cli.newInfoCmd()) + cmd.AddCommand(cli.newListCmd()) + cmd.AddCommand(cli.newCoverageCmd()) + cmd.AddCommand(cli.newEvalCmd()) + cmd.AddCommand(cli.newExplainCmd()) return cmd } diff --git a/cmd/crowdsec-cli/clihubtest/info.go b/cmd/crowdsec-cli/clihubtest/info.go index 2e3fd132340..a5d760eea01 100644 --- a/cmd/crowdsec-cli/clihubtest/info.go +++ b/cmd/crowdsec-cli/clihubtest/info.go @@ -10,7 +10,7 @@ import ( "github.com/crowdsecurity/crowdsec/pkg/hubtest" ) -func (cli *cliHubTest) NewInfoCmd() *cobra.Command { +func (cli *cliHubTest) newInfoCmd() *cobra.Command { cmd := &cobra.Command{ Use: "info", Short: "info [test_name]", diff --git a/cmd/crowdsec-cli/clihubtest/list.go b/cmd/crowdsec-cli/clihubtest/list.go index 76c51927897..3e76824a18e 100644 --- a/cmd/crowdsec-cli/clihubtest/list.go +++ b/cmd/crowdsec-cli/clihubtest/list.go @@ -9,7 +9,7 @@ import ( "github.com/spf13/cobra" ) -func (cli *cliHubTest) NewListCmd() *cobra.Command { +func (cli *cliHubTest) newListCmd() *cobra.Command { cmd := &cobra.Command{ Use: "list", Short: "list", diff --git a/cmd/crowdsec-cli/clihubtest/run.go b/cmd/crowdsec-cli/clihubtest/run.go index 552ee87c16e..57956ce67dc 100644 --- a/cmd/crowdsec-cli/clihubtest/run.go +++ b/cmd/crowdsec-cli/clihubtest/run.go @@ -16,15 +16,15 @@ import ( "github.com/crowdsecurity/crowdsec/pkg/hubtest" ) -func (cli *cliHubTest) run(runAll bool, NucleiTargetHost string, AppSecHost string, args []string) error { +func (cli *cliHubTest) run(runAll bool, nucleiTargetHost string, appSecHost string, args []string) error { cfg := cli.cfg() if !runAll && len(args) == 0 { return errors.New("please provide test to run or --all flag") } - hubPtr.NucleiTargetHost = NucleiTargetHost - hubPtr.AppSecHost = AppSecHost + hubPtr.NucleiTargetHost = nucleiTargetHost + hubPtr.AppSecHost = appSecHost if runAll { if err := hubPtr.LoadAllTests(); err != nil { @@ -56,13 +56,13 @@ func (cli *cliHubTest) run(runAll bool, NucleiTargetHost string, AppSecHost stri return nil } -func (cli *cliHubTest) NewRunCmd() *cobra.Command { +func (cli *cliHubTest) newRunCmd() *cobra.Command { var ( noClean bool runAll bool forceClean bool - NucleiTargetHost string - AppSecHost string + nucleiTargetHost string + appSecHost string ) cmd := &cobra.Command{ @@ -70,7 +70,7 @@ func (cli *cliHubTest) NewRunCmd() *cobra.Command { Short: "run [test_name]", DisableAutoGenTag: true, RunE: func(_ *cobra.Command, args []string) error { - return cli.run(runAll, NucleiTargetHost, AppSecHost, args) + return cli.run(runAll, nucleiTargetHost, appSecHost, args) }, PersistentPostRunE: func(_ *cobra.Command, _ []string) error { cfg := cli.cfg() @@ -187,8 +187,8 @@ func (cli *cliHubTest) NewRunCmd() *cobra.Command { cmd.Flags().BoolVar(&noClean, "no-clean", false, "Don't clean runtime environment if test succeed") cmd.Flags().BoolVar(&forceClean, "clean", false, "Clean runtime environment if test fail") - cmd.Flags().StringVar(&NucleiTargetHost, "target", hubtest.DefaultNucleiTarget, "Target for AppSec Test") - cmd.Flags().StringVar(&AppSecHost, "host", hubtest.DefaultAppsecHost, "Address to expose AppSec for hubtest") + cmd.Flags().StringVar(&nucleiTargetHost, "target", hubtest.DefaultNucleiTarget, "Target for AppSec Test") + cmd.Flags().StringVar(&appSecHost, "host", hubtest.DefaultAppsecHost, "Address to expose AppSec for hubtest") cmd.Flags().BoolVar(&runAll, "all", false, "Run all tests") return cmd diff --git a/cmd/crowdsec-cli/clinotifications/notifications.go b/cmd/crowdsec-cli/clinotifications/notifications.go index 87a94243c5d..eb568ca5fa6 100644 --- a/cmd/crowdsec-cli/clinotifications/notifications.go +++ b/cmd/crowdsec-cli/clinotifications/notifications.go @@ -72,10 +72,10 @@ func (cli *cliNotifications) NewCommand() *cobra.Command { }, } - cmd.AddCommand(cli.NewListCmd()) - cmd.AddCommand(cli.NewInspectCmd()) - cmd.AddCommand(cli.NewReinjectCmd()) - cmd.AddCommand(cli.NewTestCmd()) + cmd.AddCommand(cli.newListCmd()) + cmd.AddCommand(cli.newInspectCmd()) + cmd.AddCommand(cli.newReinjectCmd()) + cmd.AddCommand(cli.newTestCmd()) return cmd } @@ -152,7 +152,7 @@ func (cli *cliNotifications) getProfilesConfigs() (map[string]NotificationsCfg, return ncfgs, nil } -func (cli *cliNotifications) NewListCmd() *cobra.Command { +func (cli *cliNotifications) newListCmd() *cobra.Command { cmd := &cobra.Command{ Use: "list", Short: "list notifications plugins", @@ -201,7 +201,7 @@ func (cli *cliNotifications) NewListCmd() *cobra.Command { return cmd } -func (cli *cliNotifications) NewInspectCmd() *cobra.Command { +func (cli *cliNotifications) newInspectCmd() *cobra.Command { cmd := &cobra.Command{ Use: "inspect", Short: "Inspect notifications plugin", @@ -260,7 +260,7 @@ func (cli *cliNotifications) notificationConfigFilter(cmd *cobra.Command, args [ return ret, cobra.ShellCompDirectiveNoFileComp } -func (cli cliNotifications) NewTestCmd() *cobra.Command { +func (cli cliNotifications) newTestCmd() *cobra.Command { var ( pluginBroker csplugin.PluginBroker pluginTomb tomb.Tomb @@ -351,7 +351,7 @@ func (cli cliNotifications) NewTestCmd() *cobra.Command { return cmd } -func (cli *cliNotifications) NewReinjectCmd() *cobra.Command { +func (cli *cliNotifications) newReinjectCmd() *cobra.Command { var ( alertOverride string alert *models.Alert diff --git a/cmd/crowdsec-cli/clisetup/setup.go b/cmd/crowdsec-cli/clisetup/setup.go index 2a1a2bd0560..8aee45b4287 100644 --- a/cmd/crowdsec-cli/clisetup/setup.go +++ b/cmd/crowdsec-cli/clisetup/setup.go @@ -39,10 +39,10 @@ func (cli *cliSetup) NewCommand() *cobra.Command { DisableAutoGenTag: true, } - cmd.AddCommand(cli.NewDetectCmd()) - cmd.AddCommand(cli.NewInstallHubCmd()) - cmd.AddCommand(cli.NewDataSourcesCmd()) - cmd.AddCommand(cli.NewValidateCmd()) + cmd.AddCommand(cli.newDetectCmd()) + cmd.AddCommand(cli.newInstallHubCmd()) + cmd.AddCommand(cli.newDataSourcesCmd()) + cmd.AddCommand(cli.newValidateCmd()) return cmd } @@ -76,23 +76,24 @@ func (f *detectFlags) bind(cmd *cobra.Command) { flags.BoolVar(&f.outYaml, "yaml", false, "output yaml, not json") } -func (cli *cliSetup) NewDetectCmd() *cobra.Command { +func (cli *cliSetup) newDetectCmd() *cobra.Command { f := detectFlags{} cmd := &cobra.Command{ Use: "detect", Short: "detect running services, generate a setup file", DisableAutoGenTag: true, - RunE: func(_ *cobra.Command, args []string) error { + RunE: func(_ *cobra.Command, args []string) error { return cli.detect(f) }, } f.bind(cmd) + return cmd } -func (cli *cliSetup) NewInstallHubCmd() *cobra.Command { +func (cli *cliSetup) newInstallHubCmd() *cobra.Command { var dryRun bool cmd := &cobra.Command{ @@ -100,7 +101,7 @@ func (cli *cliSetup) NewInstallHubCmd() *cobra.Command { Short: "install items from a setup file", Args: cobra.ExactArgs(1), DisableAutoGenTag: true, - RunE: func(cmd *cobra.Command, args []string) error { + RunE: func(cmd *cobra.Command, args []string) error { return cli.install(cmd.Context(), dryRun, args[0]) }, } @@ -111,7 +112,7 @@ func (cli *cliSetup) NewInstallHubCmd() *cobra.Command { return cmd } -func (cli *cliSetup) NewDataSourcesCmd() *cobra.Command { +func (cli *cliSetup) newDataSourcesCmd() *cobra.Command { var toDir string cmd := &cobra.Command{ @@ -119,7 +120,7 @@ func (cli *cliSetup) NewDataSourcesCmd() *cobra.Command { Short: "generate datasource (acquisition) configuration from a setup file", Args: cobra.ExactArgs(1), DisableAutoGenTag: true, - RunE: func(cmd *cobra.Command, args []string) error { + RunE: func(cmd *cobra.Command, args []string) error { return cli.dataSources(args[0], toDir) }, } @@ -130,13 +131,13 @@ func (cli *cliSetup) NewDataSourcesCmd() *cobra.Command { return cmd } -func (cli *cliSetup) NewValidateCmd() *cobra.Command { +func (cli *cliSetup) newValidateCmd() *cobra.Command { cmd := &cobra.Command{ Use: "validate [setup_file]", Short: "validate a setup file", Args: cobra.ExactArgs(1), DisableAutoGenTag: true, - RunE: func(cmd *cobra.Command, args []string) error { + RunE: func(cmd *cobra.Command, args []string) error { return cli.validate(args[0]) }, } @@ -165,7 +166,7 @@ func (cli *cliSetup) detect(f detectFlags) error { } if !f.snubSystemd { - _, err := exec.LookPath("systemctl") + _, err = exec.LookPath("systemctl") if err != nil { log.Debug("systemctl not available: snubbing systemd") diff --git a/cmd/crowdsec-cli/clisimulation/simulation.go b/cmd/crowdsec-cli/clisimulation/simulation.go index bf986f82c06..9d9defd78e7 100644 --- a/cmd/crowdsec-cli/clisimulation/simulation.go +++ b/cmd/crowdsec-cli/clisimulation/simulation.go @@ -55,14 +55,14 @@ cscli simulation disable crowdsecurity/ssh-bf`, cmd.Flags().SortFlags = false cmd.PersistentFlags().SortFlags = false - cmd.AddCommand(cli.NewEnableCmd()) - cmd.AddCommand(cli.NewDisableCmd()) - cmd.AddCommand(cli.NewStatusCmd()) + cmd.AddCommand(cli.newEnableCmd()) + cmd.AddCommand(cli.newDisableCmd()) + cmd.AddCommand(cli.newStatusCmd()) return cmd } -func (cli *cliSimulation) NewEnableCmd() *cobra.Command { +func (cli *cliSimulation) newEnableCmd() *cobra.Command { var forceGlobalSimulation bool cmd := &cobra.Command{ @@ -122,7 +122,7 @@ func (cli *cliSimulation) NewEnableCmd() *cobra.Command { return cmd } -func (cli *cliSimulation) NewDisableCmd() *cobra.Command { +func (cli *cliSimulation) newDisableCmd() *cobra.Command { var forceGlobalSimulation bool cmd := &cobra.Command{ @@ -169,7 +169,7 @@ func (cli *cliSimulation) NewDisableCmd() *cobra.Command { return cmd } -func (cli *cliSimulation) NewStatusCmd() *cobra.Command { +func (cli *cliSimulation) newStatusCmd() *cobra.Command { cmd := &cobra.Command{ Use: "status", Short: "Show simulation mode status", From 584a19f3f18763b4a1ccba0f2c2249754bdc154a Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Thu, 12 Sep 2024 17:07:30 +0200 Subject: [PATCH 287/581] fix #3225: info->debug for trigger bucket overflow (#3227) * fix #3225: info->debug for trigger bucket overflow * lint --- pkg/leakybucket/trigger.go | 16 +++++++++++----- 1 file changed, 11 insertions(+), 5 deletions(-) diff --git a/pkg/leakybucket/trigger.go b/pkg/leakybucket/trigger.go index b6af1431888..7558f696dc7 100644 --- a/pkg/leakybucket/trigger.go +++ b/pkg/leakybucket/trigger.go @@ -16,25 +16,31 @@ func (t *Trigger) OnBucketPour(b *BucketFactory) func(types.Event, *Leaky) *type // Pour makes the bucket overflow all the time // TriggerPour unconditionally overflows return func(msg types.Event, l *Leaky) *types.Event { + now := time.Now().UTC() + if l.Mode == types.TIMEMACHINE { var d time.Time + err := d.UnmarshalText([]byte(msg.MarshaledTime)) if err != nil { log.Warningf("Failed unmarshaling event time (%s) : %v", msg.MarshaledTime, err) - d = time.Now().UTC() + + d = now } + l.logger.Debugf("yay timemachine overflow time : %s --> %s", d, msg.MarshaledTime) l.Last_ts = d l.First_ts = d l.Ovflw_ts = d } else { - l.Last_ts = time.Now().UTC() - l.First_ts = time.Now().UTC() - l.Ovflw_ts = time.Now().UTC() + l.Last_ts = now + l.First_ts = now + l.Ovflw_ts = now } + l.Total_count = 1 - l.logger.Infof("Bucket overflow") + l.logger.Debug("Bucket overflow") l.Queue.Add(msg) l.Out <- l.Queue From d5c587cf2b7e4ee5ae74b2249016814fc6769e91 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Thu, 12 Sep 2024 17:08:22 +0200 Subject: [PATCH 288/581] refact / split APIServer.Run() method (#3215) --- pkg/apiserver/apiserver.go | 124 ++++++++++++++++++++----------------- 1 file changed, 67 insertions(+), 57 deletions(-) diff --git a/pkg/apiserver/apiserver.go b/pkg/apiserver/apiserver.go index 31b31bcb82d..42dcb219379 100644 --- a/pkg/apiserver/apiserver.go +++ b/pkg/apiserver/apiserver.go @@ -301,6 +301,72 @@ func (s *APIServer) Router() (*gin.Engine, error) { return s.router, nil } +func (s *APIServer) apicPush() error { + if err := s.apic.Push(); err != nil { + log.Errorf("capi push: %s", err) + return err + } + + return nil +} + +func (s *APIServer) apicPull() error { + if err := s.apic.Pull(); err != nil { + log.Errorf("capi pull: %s", err) + return err + } + + return nil +} + +func (s *APIServer) papiPull() error { + if err := s.papi.Pull(); err != nil { + log.Errorf("papi pull: %s", err) + return err + } + + return nil +} + +func (s *APIServer) papiSync() error { + if err := s.papi.SyncDecisions(); err != nil { + log.Errorf("capi decisions sync: %s", err) + return err + } + + return nil +} + +func (s *APIServer) initAPIC() { + s.apic.pushTomb.Go(s.apicPush) + s.apic.pullTomb.Go(s.apicPull) + + // csConfig.API.Server.ConsoleConfig.ShareCustomScenarios + if s.apic.apiClient.IsEnrolled() { + if s.consoleConfig.IsPAPIEnabled() { + if s.papi.URL != "" { + log.Info("Starting PAPI decision receiver") + s.papi.pullTomb.Go(s.papiPull) + s.papi.syncTomb.Go(s.papiSync) + } else { + log.Warnf("papi_url is not set in online_api_credentials.yaml, can't synchronize with the console. Run cscli console enable console_management to add it.") + } + } else { + log.Warningf("Machine is not allowed to synchronize decisions, you can enable it with `cscli console enable console_management`") + } + } + + s.apic.metricsTomb.Go(func() error { + s.apic.SendMetrics(make(chan bool)) + return nil + }) + + s.apic.metricsTomb.Go(func() error { + s.apic.SendUsageMetrics() + return nil + }) +} + func (s *APIServer) Run(apiReady chan bool) error { defer trace.CatchPanic("lapi/runServer") @@ -316,63 +382,7 @@ func (s *APIServer) Run(apiReady chan bool) error { } if s.apic != nil { - s.apic.pushTomb.Go(func() error { - if err := s.apic.Push(); err != nil { - log.Errorf("capi push: %s", err) - return err - } - - return nil - }) - - s.apic.pullTomb.Go(func() error { - if err := s.apic.Pull(); err != nil { - log.Errorf("capi pull: %s", err) - return err - } - - return nil - }) - - // csConfig.API.Server.ConsoleConfig.ShareCustomScenarios - if s.apic.apiClient.IsEnrolled() { - if s.consoleConfig.IsPAPIEnabled() { - if s.papi.URL != "" { - log.Info("Starting PAPI decision receiver") - s.papi.pullTomb.Go(func() error { - if err := s.papi.Pull(); err != nil { - log.Errorf("papi pull: %s", err) - return err - } - - return nil - }) - - s.papi.syncTomb.Go(func() error { - if err := s.papi.SyncDecisions(); err != nil { - log.Errorf("capi decisions sync: %s", err) - return err - } - - return nil - }) - } else { - log.Warnf("papi_url is not set in online_api_credentials.yaml, can't synchronize with the console. Run cscli console enable console_management to add it.") - } - } else { - log.Warningf("Machine is not allowed to synchronize decisions, you can enable it with `cscli console enable console_management`") - } - } - - s.apic.metricsTomb.Go(func() error { - s.apic.SendMetrics(make(chan bool)) - return nil - }) - - s.apic.metricsTomb.Go(func() error { - s.apic.SendUsageMetrics() - return nil - }) + s.initAPIC() } s.httpServerTomb.Go(func() error { From cae76baa3a3af7b6279155c2a95a814de95848b4 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Thu, 12 Sep 2024 17:26:39 +0200 Subject: [PATCH 289/581] refact acquisition: build profiles (optionally exclude datasources from final binary) (#3217) example $ make BUILD_PROFILE=minimal or $ make EXCLUDE=datasource_s3,datasource_kinesis --- .github/workflows/go-tests.yml | 5 + Makefile | 63 ++++++ cmd/crowdsec/appsec.go | 18 ++ cmd/crowdsec/appsec_stub.go | 11 ++ cmd/crowdsec/crowdsec.go | 11 +- pkg/acquisition/acquisition.go | 185 +++++++++++------- pkg/acquisition/acquisition_test.go | 27 ++- pkg/acquisition/appsec.go | 12 ++ pkg/acquisition/cloudwatch.go | 12 ++ pkg/acquisition/docker.go | 12 ++ pkg/acquisition/file.go | 12 ++ pkg/acquisition/journalctl.go | 12 ++ pkg/acquisition/k8s.go | 12 ++ pkg/acquisition/kafka.go | 12 ++ pkg/acquisition/kinesis.go | 12 ++ pkg/acquisition/loki.go | 12 ++ .../modules/appsec/appsec_lnx_test.go | 2 +- .../modules/appsec/appsec_win_test.go | 1 - pkg/acquisition/s3.go | 12 ++ pkg/acquisition/syslog.go | 12 ++ pkg/acquisition/wineventlog.go | 12 ++ pkg/cwversion/constraint/constraint.go | 32 +++ pkg/cwversion/version.go | 58 +++--- pkg/leakybucket/manager_load.go | 6 +- pkg/parser/stage.go | 6 +- pkg/setup/detect.go | 6 +- pkg/setup/detect_test.go | 2 +- test/bats.mk | 6 + test/bats/01_crowdsec.bats | 37 +++- 29 files changed, 485 insertions(+), 135 deletions(-) create mode 100644 cmd/crowdsec/appsec.go create mode 100644 cmd/crowdsec/appsec_stub.go create mode 100644 pkg/acquisition/appsec.go create mode 100644 pkg/acquisition/cloudwatch.go create mode 100644 pkg/acquisition/docker.go create mode 100644 pkg/acquisition/file.go create mode 100644 pkg/acquisition/journalctl.go create mode 100644 pkg/acquisition/k8s.go create mode 100644 pkg/acquisition/kafka.go create mode 100644 pkg/acquisition/kinesis.go create mode 100644 pkg/acquisition/loki.go create mode 100644 pkg/acquisition/s3.go create mode 100644 pkg/acquisition/syslog.go create mode 100644 pkg/acquisition/wineventlog.go create mode 100644 pkg/cwversion/constraint/constraint.go diff --git a/.github/workflows/go-tests.yml b/.github/workflows/go-tests.yml index 3709c695231..df5c0b4fb88 100644 --- a/.github/workflows/go-tests.yml +++ b/.github/workflows/go-tests.yml @@ -142,6 +142,11 @@ jobs: make build BUILD_STATIC=1 make go-acc | sed 's/ *coverage:.*of statements in.*//' | richgo testfilter + # check if some component stubs are missing + - name: "Build profile: minimal" + run: | + make build BUILD_PROFILE=minimal + - name: Run tests again, dynamic run: | make clean build diff --git a/Makefile b/Makefile index 207b5d610f0..6bd3cbb7980 100644 --- a/Makefile +++ b/Makefile @@ -115,6 +115,68 @@ STRIP_SYMBOLS := -s -w DISABLE_OPTIMIZATION := endif +#-------------------------------------- + +# Handle optional components and build profiles, to save space on the final binaries. + +# Keep it safe for now until we decide how to expand on the idea. Either choose a profile or exclude components manually. +# For example if we want to disable some component by default, or have opt-in components (INCLUDE?). + +ifeq ($(and $(BUILD_PROFILE),$(EXCLUDE)),1) +$(error "Cannot specify both BUILD_PROFILE and EXCLUDE") +endif + +COMPONENTS := \ + datasource_appsec \ + datasource_cloudwatch \ + datasource_docker \ + datasource_file \ + datasource_k8saudit \ + datasource_kafka \ + datasource_journalctl \ + datasource_kinesis \ + datasource_loki \ + datasource_s3 \ + datasource_syslog \ + datasource_wineventlog + +comma := , +space := $(empty) $(empty) + +# Predefined profiles + +# keep only datasource-file +EXCLUDE_MINIMAL := $(subst $(space),$(comma),$(filter-out datasource_file,,$(COMPONENTS))) + +# example +# EXCLUDE_MEDIUM := datasource_kafka,datasource_kinesis,datasource_s3 + +BUILD_PROFILE ?= default + +# Set the EXCLUDE_LIST based on the chosen profile, unless EXCLUDE is already set +ifeq ($(BUILD_PROFILE),minimal) +EXCLUDE ?= $(EXCLUDE_MINIMAL) +else ifneq ($(BUILD_PROFILE),default) +$(error Invalid build profile specified: $(BUILD_PROFILE). Valid profiles are: minimal, default) +endif + +# Create list of excluded components from the EXCLUDE variable +EXCLUDE_LIST := $(subst $(comma),$(space),$(EXCLUDE)) + +INVALID_COMPONENTS := $(filter-out $(COMPONENTS),$(EXCLUDE_LIST)) +ifneq ($(INVALID_COMPONENTS),) +$(error Invalid optional components specified in EXCLUDE: $(INVALID_COMPONENTS). Valid components are: $(COMPONENTS)) +endif + +# Convert the excluded components to "no_" form +COMPONENT_TAGS := $(foreach component,$(EXCLUDE_LIST),no_$(component)) + +ifneq ($(COMPONENT_TAGS),) +GO_TAGS := $(GO_TAGS),$(subst $(space),$(comma),$(COMPONENT_TAGS)) +endif + +#-------------------------------------- + export LD_OPTS=-ldflags "$(STRIP_SYMBOLS) $(EXTLDFLAGS) $(LD_OPTS_VARS)" \ -trimpath -tags $(GO_TAGS) $(DISABLE_OPTIMIZATION) @@ -130,6 +192,7 @@ build: build-info crowdsec cscli plugins ## Build crowdsec, cscli and plugins .PHONY: build-info build-info: ## Print build information $(info Building $(BUILD_VERSION) ($(BUILD_TAG)) $(BUILD_TYPE) for $(GOOS)/$(GOARCH)) + $(info Excluded components: $(EXCLUDE_LIST)) ifneq (,$(RE2_FAIL)) $(error $(RE2_FAIL)) diff --git a/cmd/crowdsec/appsec.go b/cmd/crowdsec/appsec.go new file mode 100644 index 00000000000..cb02b137dcd --- /dev/null +++ b/cmd/crowdsec/appsec.go @@ -0,0 +1,18 @@ +// +build !no_datasource_appsec + +package main + +import ( + "fmt" + + "github.com/crowdsecurity/crowdsec/pkg/appsec" + "github.com/crowdsecurity/crowdsec/pkg/cwhub" +) + +func LoadAppsecRules(hub *cwhub.Hub) error { + if err := appsec.LoadAppsecRules(hub); err != nil { + return fmt.Errorf("while loading appsec rules: %w", err) + } + + return nil +} diff --git a/cmd/crowdsec/appsec_stub.go b/cmd/crowdsec/appsec_stub.go new file mode 100644 index 00000000000..4a65b32a9ad --- /dev/null +++ b/cmd/crowdsec/appsec_stub.go @@ -0,0 +1,11 @@ +//go:build no_datasource_appsec + +package main + +import ( + "github.com/crowdsecurity/crowdsec/pkg/cwhub" +) + +func LoadAppsecRules(hub *cwhub.Hub) error { + return nil +} diff --git a/cmd/crowdsec/crowdsec.go b/cmd/crowdsec/crowdsec.go index 5aafc6b0dfe..460e8ab4328 100644 --- a/cmd/crowdsec/crowdsec.go +++ b/cmd/crowdsec/crowdsec.go @@ -14,7 +14,6 @@ import ( "github.com/crowdsecurity/crowdsec/pkg/acquisition" "github.com/crowdsecurity/crowdsec/pkg/acquisition/configuration" "github.com/crowdsecurity/crowdsec/pkg/alertcontext" - "github.com/crowdsecurity/crowdsec/pkg/appsec" "github.com/crowdsecurity/crowdsec/pkg/csconfig" "github.com/crowdsecurity/crowdsec/pkg/cwhub" "github.com/crowdsecurity/crowdsec/pkg/exprhelpers" @@ -43,12 +42,13 @@ func initCrowdsec(cConfig *csconfig.Config, hub *cwhub.Hub) (*parser.Parsers, [] return nil, nil, fmt.Errorf("while loading parsers: %w", err) } - if err := LoadBuckets(cConfig, hub); err != nil { + if err = LoadBuckets(cConfig, hub); err != nil { return nil, nil, fmt.Errorf("while loading scenarios: %w", err) } - if err := appsec.LoadAppsecRules(hub); err != nil { - return nil, nil, fmt.Errorf("while loading appsec rules: %w", err) + // can be nerfed by a build flag + if err = LoadAppsecRules(hub); err != nil { + return nil, nil, err } datasources, err := LoadAcquisition(cConfig) @@ -82,6 +82,7 @@ func runCrowdsec(cConfig *csconfig.Config, parsers *parser.Parsers, hub *cwhub.H return nil }) } + parserWg.Done() return nil @@ -108,6 +109,7 @@ func runCrowdsec(cConfig *csconfig.Config, parsers *parser.Parsers, hub *cwhub.H return runPour(inputEventChan, holders, buckets, cConfig) }) } + bucketWg.Done() return nil @@ -134,6 +136,7 @@ func runCrowdsec(cConfig *csconfig.Config, parsers *parser.Parsers, hub *cwhub.H return runOutput(inputEventChan, outputEventChan, buckets, *parsers.Povfwctx, parsers.Povfwnodes, apiClient) }) } + outputWg.Done() return nil diff --git a/pkg/acquisition/acquisition.go b/pkg/acquisition/acquisition.go index 634557021f1..38bf228abbc 100644 --- a/pkg/acquisition/acquisition.go +++ b/pkg/acquisition/acquisition.go @@ -18,18 +18,6 @@ import ( "github.com/crowdsecurity/go-cs-lib/trace" "github.com/crowdsecurity/crowdsec/pkg/acquisition/configuration" - appsecacquisition "github.com/crowdsecurity/crowdsec/pkg/acquisition/modules/appsec" - cloudwatchacquisition "github.com/crowdsecurity/crowdsec/pkg/acquisition/modules/cloudwatch" - dockeracquisition "github.com/crowdsecurity/crowdsec/pkg/acquisition/modules/docker" - fileacquisition "github.com/crowdsecurity/crowdsec/pkg/acquisition/modules/file" - journalctlacquisition "github.com/crowdsecurity/crowdsec/pkg/acquisition/modules/journalctl" - kafkaacquisition "github.com/crowdsecurity/crowdsec/pkg/acquisition/modules/kafka" - kinesisacquisition "github.com/crowdsecurity/crowdsec/pkg/acquisition/modules/kinesis" - k8sauditacquisition "github.com/crowdsecurity/crowdsec/pkg/acquisition/modules/kubernetesaudit" - lokiacquisition "github.com/crowdsecurity/crowdsec/pkg/acquisition/modules/loki" - s3acquisition "github.com/crowdsecurity/crowdsec/pkg/acquisition/modules/s3" - syslogacquisition "github.com/crowdsecurity/crowdsec/pkg/acquisition/modules/syslog" - wineventlogacquisition "github.com/crowdsecurity/crowdsec/pkg/acquisition/modules/wineventlog" "github.com/crowdsecurity/crowdsec/pkg/csconfig" "github.com/crowdsecurity/crowdsec/pkg/exprhelpers" "github.com/crowdsecurity/crowdsec/pkg/types" @@ -64,29 +52,72 @@ type DataSource interface { Dump() interface{} } -var AcquisitionSources = map[string]func() DataSource{ - "file": func() DataSource { return &fileacquisition.FileSource{} }, - "journalctl": func() DataSource { return &journalctlacquisition.JournalCtlSource{} }, - "cloudwatch": func() DataSource { return &cloudwatchacquisition.CloudwatchSource{} }, - "syslog": func() DataSource { return &syslogacquisition.SyslogSource{} }, - "docker": func() DataSource { return &dockeracquisition.DockerSource{} }, - "kinesis": func() DataSource { return &kinesisacquisition.KinesisSource{} }, - "wineventlog": func() DataSource { return &wineventlogacquisition.WinEventLogSource{} }, - "kafka": func() DataSource { return &kafkaacquisition.KafkaSource{} }, - "k8s-audit": func() DataSource { return &k8sauditacquisition.KubernetesAuditSource{} }, - "loki": func() DataSource { return &lokiacquisition.LokiSource{} }, - "s3": func() DataSource { return &s3acquisition.S3Source{} }, - "appsec": func() DataSource { return &appsecacquisition.AppsecSource{} }, -} +var ( + // We declare everything here so we can tell if they are unsupported, or excluded from the build + AcquisitionSources = map[string]func() DataSource{ + "appsec": nil, + "cloudwatch": nil, + "docker": nil, + "file": nil, + "journalctl": nil, + "k8s-audit": nil, + "kafka": nil, + "kinesis": nil, + "loki": nil, + "s3": nil, + "syslog": nil, + "wineventlog": nil, + } + transformRuntimes = map[string]*vm.Program{} +) -var transformRuntimes = map[string]*vm.Program{} +func GetDataSourceIface(dataSourceType string) (DataSource, error) { + source, ok := AcquisitionSources[dataSourceType] + if !ok { + return nil, fmt.Errorf("unknown data source %s", dataSourceType) + } -func GetDataSourceIface(dataSourceType string) DataSource { - source := AcquisitionSources[dataSourceType] if source == nil { - return nil + return nil, fmt.Errorf("data source %s is not built in this version of crowdsec", dataSourceType) + } + + return source(), nil +} + +// registerDataSource registers a datasource in the AcquisitionSources map. +// It must be called in the init() function of the datasource package, and the datasource name +// must be declared with a nil value in the map, to allow for conditional compilation. +func registerDataSource(dataSourceType string, dsGetter func() DataSource) { + _, ok := AcquisitionSources[dataSourceType] + if !ok { + panic("datasource must be declared in the map: " + dataSourceType) + } + + AcquisitionSources[dataSourceType] = dsGetter +} + +// setupLogger creates a logger for the datasource to use at runtime. +func setupLogger(source, name string, level *log.Level) (*log.Entry, error) { + clog := log.New() + if err := types.ConfigureLogger(clog); err != nil { + return nil, fmt.Errorf("while configuring datasource logger: %w", err) + } + + if level != nil { + clog.SetLevel(*level) + } + + fields := log.Fields{ + "type": source, + } + + if name != "" { + fields["name"] = name } - return source() + + subLogger := clog.WithFields(fields) + + return subLogger, nil } // DataSourceConfigure creates and returns a DataSource object from a configuration, @@ -100,33 +131,27 @@ func DataSourceConfigure(commonConfig configuration.DataSourceCommonCfg, metrics if err != nil { return nil, fmt.Errorf("unable to marshal back interface: %w", err) } - if dataSrc := GetDataSourceIface(commonConfig.Source); dataSrc != nil { - /* this logger will then be used by the datasource at runtime */ - clog := log.New() - if err := types.ConfigureLogger(clog); err != nil { - return nil, fmt.Errorf("while configuring datasource logger: %w", err) - } - if commonConfig.LogLevel != nil { - clog.SetLevel(*commonConfig.LogLevel) - } - customLog := log.Fields{ - "type": commonConfig.Source, - } - if commonConfig.Name != "" { - customLog["name"] = commonConfig.Name - } - subLogger := clog.WithFields(customLog) - /* check eventual dependencies are satisfied (ie. journald will check journalctl availability) */ - if err := dataSrc.CanRun(); err != nil { - return nil, &DataSourceUnavailableError{Name: commonConfig.Source, Err: err} - } - /* configure the actual datasource */ - if err := dataSrc.Configure(yamlConfig, subLogger, metricsLevel); err != nil { - return nil, fmt.Errorf("failed to configure datasource %s: %w", commonConfig.Source, err) - } - return &dataSrc, nil + + dataSrc, err := GetDataSourceIface(commonConfig.Source) + if err != nil { + return nil, err + } + + subLogger, err := setupLogger(commonConfig.Source, commonConfig.Name, commonConfig.LogLevel) + if err != nil { + return nil, err + } + + /* check eventual dependencies are satisfied (ie. journald will check journalctl availability) */ + if err := dataSrc.CanRun(); err != nil { + return nil, &DataSourceUnavailableError{Name: commonConfig.Source, Err: err} + } + /* configure the actual datasource */ + if err := dataSrc.Configure(yamlConfig, subLogger, metricsLevel); err != nil { + return nil, fmt.Errorf("failed to configure datasource %s: %w", commonConfig.Source, err) } - return nil, fmt.Errorf("cannot find source %s", commonConfig.Source) + + return &dataSrc, nil } // detectBackwardCompatAcquis: try to magically detect the type for backward compat (type was not mandatory then) @@ -134,12 +159,15 @@ func detectBackwardCompatAcquis(sub configuration.DataSourceCommonCfg) string { if _, ok := sub.Config["filename"]; ok { return "file" } + if _, ok := sub.Config["filenames"]; ok { return "file" } + if _, ok := sub.Config["journalctl_filter"]; ok { return "journalctl" } + return "" } @@ -150,29 +178,35 @@ func LoadAcquisitionFromDSN(dsn string, labels map[string]string, transformExpr if len(frags) == 1 { return nil, fmt.Errorf("%s isn't valid dsn (no protocol)", dsn) } - dataSrc := GetDataSourceIface(frags[0]) - if dataSrc == nil { - return nil, fmt.Errorf("no acquisition for protocol %s://", frags[0]) + + dataSrc, err := GetDataSourceIface(frags[0]) + if err != nil { + return nil, fmt.Errorf("no acquisition for protocol %s:// - %w", frags[0], err) } - /* this logger will then be used by the datasource at runtime */ - clog := log.New() - if err := types.ConfigureLogger(clog); err != nil { - return nil, fmt.Errorf("while configuring datasource logger: %w", err) + + subLogger, err := setupLogger(dsn, "", nil) + if err != nil { + return nil, err } - subLogger := clog.WithField("type", dsn) + uniqueId := uuid.NewString() + if transformExpr != "" { vm, err := expr.Compile(transformExpr, exprhelpers.GetExprOptions(map[string]interface{}{"evt": &types.Event{}})...) if err != nil { return nil, fmt.Errorf("while compiling transform expression '%s': %w", transformExpr, err) } + transformRuntimes[uniqueId] = vm } - err := dataSrc.ConfigureByDSN(dsn, labels, subLogger, uniqueId) + + err = dataSrc.ConfigureByDSN(dsn, labels, subLogger, uniqueId) if err != nil { return nil, fmt.Errorf("while configuration datasource for %s: %w", dsn, err) } + sources = append(sources, dataSrc) + return sources, nil } @@ -219,27 +253,31 @@ func LoadAcquisitionFromFile(config *csconfig.CrowdsecServiceCfg, prom *csconfig break } - //for backward compat ('type' was not mandatory, detect it) + // for backward compat ('type' was not mandatory, detect it) if guessType := detectBackwardCompatAcquis(sub); guessType != "" { sub.Source = guessType } - //it's an empty item, skip it + // it's an empty item, skip it if len(sub.Labels) == 0 { if sub.Source == "" { log.Debugf("skipping empty item in %s", acquisFile) continue } if sub.Source != "docker" { - //docker is the only source that can be empty + // docker is the only source that can be empty return nil, fmt.Errorf("missing labels in %s (position: %d)", acquisFile, idx) } } if sub.Source == "" { return nil, fmt.Errorf("data source type is empty ('source') in %s (position: %d)", acquisFile, idx) } - if GetDataSourceIface(sub.Source) == nil { - return nil, fmt.Errorf("unknown data source %s in %s (position: %d)", sub.Source, acquisFile, idx) + + // pre-check that the source is valid + _, err := GetDataSourceIface(sub.Source) + if err != nil { + return nil, fmt.Errorf("in file %s (position: %d) - %w", acquisFile, idx, err) } + uniqueId := uuid.NewString() sub.UniqueId = uniqueId src, err := DataSourceConfigure(sub, metrics_level) @@ -341,7 +379,7 @@ func StartAcquisition(sources []DataSource, output chan types.Event, AcquisTomb } for i := range len(sources) { - subsrc := sources[i] //ensure its a copy + subsrc := sources[i] // ensure its a copy log.Debugf("starting one source %d/%d ->> %T", i, len(sources), subsrc) AcquisTomb.Go(func() error { @@ -369,7 +407,7 @@ func StartAcquisition(sources []DataSource, output chan types.Event, AcquisTomb err = subsrc.OneShotAcquisition(outChan, AcquisTomb) } if err != nil { - //if one of the acqusition returns an error, we kill the others to properly shutdown + // if one of the acqusition returns an error, we kill the others to properly shutdown AcquisTomb.Kill(err) } return nil @@ -378,5 +416,6 @@ func StartAcquisition(sources []DataSource, output chan types.Event, AcquisTomb /*return only when acquisition is over (cat) or never (tail)*/ err := AcquisTomb.Wait() + return err } diff --git a/pkg/acquisition/acquisition_test.go b/pkg/acquisition/acquisition_test.go index a5eecbc20ed..e39199f9cdb 100644 --- a/pkg/acquisition/acquisition_test.go +++ b/pkg/acquisition/acquisition_test.go @@ -79,13 +79,8 @@ func (f *MockSourceCantRun) GetName() string { return "mock_cant_run" } // appendMockSource is only used to add mock source for tests func appendMockSource() { - if GetDataSourceIface("mock") == nil { - AcquisitionSources["mock"] = func() DataSource { return &MockSource{} } - } - - if GetDataSourceIface("mock_cant_run") == nil { - AcquisitionSources["mock_cant_run"] = func() DataSource { return &MockSourceCantRun{} } - } + AcquisitionSources["mock"] = func() DataSource { return &MockSource{} } + AcquisitionSources["mock_cant_run"] = func() DataSource { return &MockSourceCantRun{} } } func TestDataSourceConfigure(t *testing.T) { @@ -150,7 +145,7 @@ labels: log_level: debug source: tutu `, - ExpectedError: "cannot find source tutu", + ExpectedError: "unknown data source tutu", }, { TestName: "mismatch_config", @@ -184,6 +179,7 @@ wowo: ajsajasjas yaml.Unmarshal([]byte(tc.String), &common) ds, err := DataSourceConfigure(common, configuration.METRICS_NONE) cstest.RequireErrorContains(t, err, tc.ExpectedError) + if tc.ExpectedError != "" { return } @@ -270,7 +266,7 @@ func TestLoadAcquisitionFromFile(t *testing.T) { Config: csconfig.CrowdsecServiceCfg{ AcquisitionFiles: []string{"test_files/bad_source.yaml"}, }, - ExpectedError: "unknown data source does_not_exist in test_files/bad_source.yaml", + ExpectedError: "in file test_files/bad_source.yaml (position: 0) - unknown data source does_not_exist", }, { TestName: "invalid_filetype_config", @@ -284,6 +280,7 @@ func TestLoadAcquisitionFromFile(t *testing.T) { t.Run(tc.TestName, func(t *testing.T) { dss, err := LoadAcquisitionFromFile(&tc.Config, nil) cstest.RequireErrorContains(t, err, tc.ExpectedError) + if tc.ExpectedError != "" { return } @@ -329,6 +326,7 @@ func (f *MockCat) OneShotAcquisition(out chan types.Event, tomb *tomb.Tomb) erro return nil } + func (f *MockCat) StreamingAcquisition(chan types.Event, *tomb.Tomb) error { return errors.New("can't run in tail") } @@ -367,12 +365,14 @@ func (f *MockTail) GetMode() string { return "tail" } func (f *MockTail) OneShotAcquisition(out chan types.Event, tomb *tomb.Tomb) error { return errors.New("can't run in cat mode") } + func (f *MockTail) StreamingAcquisition(out chan types.Event, t *tomb.Tomb) error { for range 10 { evt := types.Event{} evt.Line.Src = "test" out <- evt } + <-t.Dying() return nil @@ -386,7 +386,7 @@ func (f *MockTail) ConfigureByDSN(string, map[string]string, *log.Entry, string) } func (f *MockTail) GetUuid() string { return "" } -//func StartAcquisition(sources []DataSource, output chan types.Event, AcquisTomb *tomb.Tomb) error { +// func StartAcquisition(sources []DataSource, output chan types.Event, AcquisTomb *tomb.Tomb) error { func TestStartAcquisitionCat(t *testing.T) { sources := []DataSource{ @@ -456,6 +456,7 @@ func (f *MockTailError) StreamingAcquisition(out chan types.Event, t *tomb.Tomb) evt.Line.Src = "test" out <- evt } + t.Kill(errors.New("got error (tomb)")) return errors.New("got error") @@ -485,7 +486,7 @@ READLOOP: } } assert.Equal(t, 10, count) - //acquisTomb.Kill(nil) + // acquisTomb.Kill(nil) time.Sleep(1 * time.Second) cstest.RequireErrorContains(t, acquisTomb.Err(), "got error (tomb)") } @@ -542,9 +543,7 @@ func TestConfigureByDSN(t *testing.T) { }, } - if GetDataSourceIface("mockdsn") == nil { - AcquisitionSources["mockdsn"] = func() DataSource { return &MockSourceByDSN{} } - } + AcquisitionSources["mockdsn"] = func() DataSource { return &MockSourceByDSN{} } for _, tc := range tests { t.Run(tc.dsn, func(t *testing.T) { diff --git a/pkg/acquisition/appsec.go b/pkg/acquisition/appsec.go new file mode 100644 index 00000000000..81616d3d2b8 --- /dev/null +++ b/pkg/acquisition/appsec.go @@ -0,0 +1,12 @@ +//go:build !no_datasource_appsec + +package acquisition + +import ( + appsecacquisition "github.com/crowdsecurity/crowdsec/pkg/acquisition/modules/appsec" +) + +//nolint:gochecknoinits +func init() { + registerDataSource("appsec", func() DataSource { return &appsecacquisition.AppsecSource{} }) +} diff --git a/pkg/acquisition/cloudwatch.go b/pkg/acquisition/cloudwatch.go new file mode 100644 index 00000000000..e6b3d3e3e53 --- /dev/null +++ b/pkg/acquisition/cloudwatch.go @@ -0,0 +1,12 @@ +//go:build !no_datasource_cloudwatch + +package acquisition + +import ( + cloudwatchacquisition "github.com/crowdsecurity/crowdsec/pkg/acquisition/modules/cloudwatch" +) + +//nolint:gochecknoinits +func init() { + registerDataSource("cloudwatch", func() DataSource { return &cloudwatchacquisition.CloudwatchSource{} }) +} diff --git a/pkg/acquisition/docker.go b/pkg/acquisition/docker.go new file mode 100644 index 00000000000..3bf792a039a --- /dev/null +++ b/pkg/acquisition/docker.go @@ -0,0 +1,12 @@ +//go:build !no_datasource_docker + +package acquisition + +import ( + dockeracquisition "github.com/crowdsecurity/crowdsec/pkg/acquisition/modules/docker" +) + +//nolint:gochecknoinits +func init() { + registerDataSource("docker", func() DataSource { return &dockeracquisition.DockerSource{} }) +} diff --git a/pkg/acquisition/file.go b/pkg/acquisition/file.go new file mode 100644 index 00000000000..1ff2e4a3c0e --- /dev/null +++ b/pkg/acquisition/file.go @@ -0,0 +1,12 @@ +//go:build !no_datasource_file + +package acquisition + +import ( + fileacquisition "github.com/crowdsecurity/crowdsec/pkg/acquisition/modules/file" +) + +//nolint:gochecknoinits +func init() { + registerDataSource("file", func() DataSource { return &fileacquisition.FileSource{} }) +} diff --git a/pkg/acquisition/journalctl.go b/pkg/acquisition/journalctl.go new file mode 100644 index 00000000000..691f961ae77 --- /dev/null +++ b/pkg/acquisition/journalctl.go @@ -0,0 +1,12 @@ +//go:build !no_datasource_journalctl + +package acquisition + +import ( + journalctlacquisition "github.com/crowdsecurity/crowdsec/pkg/acquisition/modules/journalctl" +) + +//nolint:gochecknoinits +func init() { + registerDataSource("journalctl", func() DataSource { return &journalctlacquisition.JournalCtlSource{} }) +} diff --git a/pkg/acquisition/k8s.go b/pkg/acquisition/k8s.go new file mode 100644 index 00000000000..cb9446be285 --- /dev/null +++ b/pkg/acquisition/k8s.go @@ -0,0 +1,12 @@ +//go:build !no_datasource_k8saudit + +package acquisition + +import ( + k8sauditacquisition "github.com/crowdsecurity/crowdsec/pkg/acquisition/modules/kubernetesaudit" +) + +//nolint:gochecknoinits +func init() { + registerDataSource("k8s-audit", func() DataSource { return &k8sauditacquisition.KubernetesAuditSource{} }) +} diff --git a/pkg/acquisition/kafka.go b/pkg/acquisition/kafka.go new file mode 100644 index 00000000000..7d315d87feb --- /dev/null +++ b/pkg/acquisition/kafka.go @@ -0,0 +1,12 @@ +//go:build !no_datasource_kafka + +package acquisition + +import ( + kafkaacquisition "github.com/crowdsecurity/crowdsec/pkg/acquisition/modules/kafka" +) + +//nolint:gochecknoinits +func init() { + registerDataSource("kafka", func() DataSource { return &kafkaacquisition.KafkaSource{} }) +} diff --git a/pkg/acquisition/kinesis.go b/pkg/acquisition/kinesis.go new file mode 100644 index 00000000000..b41372e7fb9 --- /dev/null +++ b/pkg/acquisition/kinesis.go @@ -0,0 +1,12 @@ +//go:build !no_datasource_kinesis + +package acquisition + +import ( + kinesisacquisition "github.com/crowdsecurity/crowdsec/pkg/acquisition/modules/kinesis" +) + +//nolint:gochecknoinits +func init() { + registerDataSource("kinesis", func() DataSource { return &kinesisacquisition.KinesisSource{} }) +} diff --git a/pkg/acquisition/loki.go b/pkg/acquisition/loki.go new file mode 100644 index 00000000000..1eed6686591 --- /dev/null +++ b/pkg/acquisition/loki.go @@ -0,0 +1,12 @@ +//go:build !no_datasource_loki + +package acquisition + +import ( + "github.com/crowdsecurity/crowdsec/pkg/acquisition/modules/loki" +) + +//nolint:gochecknoinits +func init() { + registerDataSource("loki", func() DataSource { return &loki.LokiSource{} }) +} diff --git a/pkg/acquisition/modules/appsec/appsec_lnx_test.go b/pkg/acquisition/modules/appsec/appsec_lnx_test.go index 3e40a1f970c..61dfc536f5e 100644 --- a/pkg/acquisition/modules/appsec/appsec_lnx_test.go +++ b/pkg/acquisition/modules/appsec/appsec_lnx_test.go @@ -1,5 +1,4 @@ //go:build !windows -// +build !windows package appsecacquisition @@ -16,6 +15,7 @@ import ( func TestAppsecRuleTransformsOthers(t *testing.T) { log.SetLevel(log.TraceLevel) + tests := []appsecRuleTest{ { name: "normalizepath", diff --git a/pkg/acquisition/modules/appsec/appsec_win_test.go b/pkg/acquisition/modules/appsec/appsec_win_test.go index e85d75df251..a6b8f3a0340 100644 --- a/pkg/acquisition/modules/appsec/appsec_win_test.go +++ b/pkg/acquisition/modules/appsec/appsec_win_test.go @@ -1,5 +1,4 @@ //go:build windows -// +build windows package appsecacquisition diff --git a/pkg/acquisition/s3.go b/pkg/acquisition/s3.go new file mode 100644 index 00000000000..73343b0408d --- /dev/null +++ b/pkg/acquisition/s3.go @@ -0,0 +1,12 @@ +//go:build !no_datasource_s3 + +package acquisition + +import ( + s3acquisition "github.com/crowdsecurity/crowdsec/pkg/acquisition/modules/s3" +) + +//nolint:gochecknoinits +func init() { + registerDataSource("s3", func() DataSource { return &s3acquisition.S3Source{} }) +} diff --git a/pkg/acquisition/syslog.go b/pkg/acquisition/syslog.go new file mode 100644 index 00000000000..f62cc23b916 --- /dev/null +++ b/pkg/acquisition/syslog.go @@ -0,0 +1,12 @@ +//go:build !no_datasource_syslog + +package acquisition + +import ( + syslogacquisition "github.com/crowdsecurity/crowdsec/pkg/acquisition/modules/syslog" +) + +//nolint:gochecknoinits +func init() { + registerDataSource("syslog", func() DataSource { return &syslogacquisition.SyslogSource{} }) +} diff --git a/pkg/acquisition/wineventlog.go b/pkg/acquisition/wineventlog.go new file mode 100644 index 00000000000..0c4889a3f5c --- /dev/null +++ b/pkg/acquisition/wineventlog.go @@ -0,0 +1,12 @@ +//go:build !no_datasource_wineventlog + +package acquisition + +import ( + wineventlogacquisition "github.com/crowdsecurity/crowdsec/pkg/acquisition/modules/wineventlog" +) + +//nolint:gochecknoinits +func init() { + registerDataSource("wineventlog", func() DataSource { return &wineventlogacquisition.WinEventLogSource{} }) +} diff --git a/pkg/cwversion/constraint/constraint.go b/pkg/cwversion/constraint/constraint.go new file mode 100644 index 00000000000..67593f9ebbc --- /dev/null +++ b/pkg/cwversion/constraint/constraint.go @@ -0,0 +1,32 @@ +package constraint + +import ( + "fmt" + + goversion "github.com/hashicorp/go-version" +) + +const ( + Parser = ">= 1.0, <= 3.0" + Scenario = ">= 1.0, <= 3.0" + API = "v1" + Acquis = ">= 1.0, < 2.0" +) + +func Satisfies(strvers string, constraint string) (bool, error) { + vers, err := goversion.NewVersion(strvers) + if err != nil { + return false, fmt.Errorf("failed to parse '%s': %w", strvers, err) + } + + constraints, err := goversion.NewConstraint(constraint) + if err != nil { + return false, fmt.Errorf("failed to parse constraint '%s'", constraint) + } + + if !constraints.Check(vers) { + return false, nil + } + + return true, nil +} diff --git a/pkg/cwversion/version.go b/pkg/cwversion/version.go index b208467aef5..867098e7d5a 100644 --- a/pkg/cwversion/version.go +++ b/pkg/cwversion/version.go @@ -4,11 +4,12 @@ import ( "fmt" "strings" - goversion "github.com/hashicorp/go-version" - + "github.com/crowdsecurity/go-cs-lib/maptools" "github.com/crowdsecurity/go-cs-lib/version" + "github.com/crowdsecurity/crowdsec/pkg/acquisition" "github.com/crowdsecurity/crowdsec/pkg/apiclient/useragent" + "github.com/crowdsecurity/crowdsec/pkg/cwversion/constraint" ) var ( @@ -16,14 +17,19 @@ var ( Libre2 = "WebAssembly" ) -const ( - Constraint_parser = ">= 1.0, <= 3.0" - Constraint_scenario = ">= 1.0, <= 3.0" - Constraint_api = "v1" - Constraint_acquis = ">= 1.0, < 2.0" -) - func FullString() string { + dsBuilt := []string{} + dsExcluded := []string{} + + for _, ds := range maptools.SortedKeys(acquisition.AcquisitionSources) { + if acquisition.AcquisitionSources[ds] != nil { + dsBuilt = append(dsBuilt, ds) + continue + } + + dsExcluded = append(dsExcluded, ds) + } + ret := fmt.Sprintf("version: %s\n", version.String()) ret += fmt.Sprintf("Codename: %s\n", Codename) ret += fmt.Sprintf("BuildDate: %s\n", version.BuildDate) @@ -31,10 +37,18 @@ func FullString() string { ret += fmt.Sprintf("Platform: %s\n", version.System) ret += fmt.Sprintf("libre2: %s\n", Libre2) ret += fmt.Sprintf("User-Agent: %s\n", useragent.Default()) - ret += fmt.Sprintf("Constraint_parser: %s\n", Constraint_parser) - ret += fmt.Sprintf("Constraint_scenario: %s\n", Constraint_scenario) - ret += fmt.Sprintf("Constraint_api: %s\n", Constraint_api) - ret += fmt.Sprintf("Constraint_acquis: %s\n", Constraint_acquis) + ret += fmt.Sprintf("Constraint_parser: %s\n", constraint.Parser) + ret += fmt.Sprintf("Constraint_scenario: %s\n", constraint.Scenario) + ret += fmt.Sprintf("Constraint_api: %s\n", constraint.API) + ret += fmt.Sprintf("Constraint_acquis: %s\n", constraint.Acquis) + + if len(dsBuilt) > 0 { + ret += fmt.Sprintf("Built data sources: %s\n", strings.Join(dsBuilt, ", ")) + } + + if len(dsExcluded) > 0 { + ret += fmt.Sprintf("Excluded data sources: %s\n", strings.Join(dsExcluded, ", ")) + } return ret } @@ -46,21 +60,3 @@ func VersionStrip() string { return ret[0] } - -func Satisfies(strvers string, constraint string) (bool, error) { - vers, err := goversion.NewVersion(strvers) - if err != nil { - return false, fmt.Errorf("failed to parse '%s': %w", strvers, err) - } - - constraints, err := goversion.NewConstraint(constraint) - if err != nil { - return false, fmt.Errorf("failed to parse constraint '%s'", constraint) - } - - if !constraints.Check(vers) { - return false, nil - } - - return true, nil -} diff --git a/pkg/leakybucket/manager_load.go b/pkg/leakybucket/manager_load.go index 6055a5308b5..1ae70fbfab3 100644 --- a/pkg/leakybucket/manager_load.go +++ b/pkg/leakybucket/manager_load.go @@ -22,7 +22,7 @@ import ( "github.com/crowdsecurity/crowdsec/pkg/alertcontext" "github.com/crowdsecurity/crowdsec/pkg/csconfig" "github.com/crowdsecurity/crowdsec/pkg/cwhub" - "github.com/crowdsecurity/crowdsec/pkg/cwversion" + "github.com/crowdsecurity/crowdsec/pkg/cwversion/constraint" "github.com/crowdsecurity/crowdsec/pkg/exprhelpers" "github.com/crowdsecurity/crowdsec/pkg/types" ) @@ -292,13 +292,13 @@ func LoadBuckets(cscfg *csconfig.CrowdsecServiceCfg, hub *cwhub.Hub, files []str bucketFactory.FormatVersion = "1.0" } - ok, err := cwversion.Satisfies(bucketFactory.FormatVersion, cwversion.Constraint_scenario) + ok, err := constraint.Satisfies(bucketFactory.FormatVersion, constraint.Scenario) if err != nil { return nil, nil, fmt.Errorf("failed to check version: %w", err) } if !ok { - log.Errorf("can't load %s : %s doesn't satisfy scenario format %s, skip", bucketFactory.Name, bucketFactory.FormatVersion, cwversion.Constraint_scenario) + log.Errorf("can't load %s : %s doesn't satisfy scenario format %s, skip", bucketFactory.Name, bucketFactory.FormatVersion, constraint.Scenario) continue } diff --git a/pkg/parser/stage.go b/pkg/parser/stage.go index fe538023b61..b98db350254 100644 --- a/pkg/parser/stage.go +++ b/pkg/parser/stage.go @@ -21,7 +21,7 @@ import ( log "github.com/sirupsen/logrus" yaml "gopkg.in/yaml.v2" - "github.com/crowdsecurity/crowdsec/pkg/cwversion" + "github.com/crowdsecurity/crowdsec/pkg/cwversion/constraint" "github.com/crowdsecurity/crowdsec/pkg/exprhelpers" ) @@ -85,12 +85,12 @@ func LoadStages(stageFiles []Stagefile, pctx *UnixParserCtx, ectx EnricherCtx) ( log.Tracef("no version in %s, assuming '1.0'", node.Name) node.FormatVersion = "1.0" } - ok, err := cwversion.Satisfies(node.FormatVersion, cwversion.Constraint_parser) + ok, err := constraint.Satisfies(node.FormatVersion, constraint.Parser) if err != nil { return nil, fmt.Errorf("failed to check version : %s", err) } if !ok { - log.Errorf("%s : %s doesn't satisfy parser format %s, skip", node.Name, node.FormatVersion, cwversion.Constraint_parser) + log.Errorf("%s : %s doesn't satisfy parser format %s, skip", node.Name, node.FormatVersion, constraint.Parser) continue } diff --git a/pkg/setup/detect.go b/pkg/setup/detect.go index 55af951bf89..01368091a6b 100644 --- a/pkg/setup/detect.go +++ b/pkg/setup/detect.go @@ -73,9 +73,9 @@ func validateDataSource(opaqueDS DataSourceItem) error { // source must be known - ds := acquisition.GetDataSourceIface(commonDS.Source) - if ds == nil { - return fmt.Errorf("unknown source '%s'", commonDS.Source) + ds, err := acquisition.GetDataSourceIface(commonDS.Source) + if err != nil { + return err } // unmarshal and validate the rest with the specific implementation diff --git a/pkg/setup/detect_test.go b/pkg/setup/detect_test.go index c744e7d6796..6f61b5dac78 100644 --- a/pkg/setup/detect_test.go +++ b/pkg/setup/detect_test.go @@ -871,7 +871,7 @@ func TestDetectDatasourceValidation(t *testing.T) { datasource: source: wombat`, expected: setup.Setup{Setup: []setup.ServiceSetup{}}, - expectedErr: "invalid datasource for foobar: unknown source 'wombat'", + expectedErr: "invalid datasource for foobar: unknown data source wombat", }, { name: "source is misplaced", config: ` diff --git a/test/bats.mk b/test/bats.mk index 8f507cb659b..631cc55579b 100644 --- a/test/bats.mk +++ b/test/bats.mk @@ -38,6 +38,7 @@ define ENV := export TEST_DIR="$(TEST_DIR)" export LOCAL_DIR="$(LOCAL_DIR)" export BIN_DIR="$(BIN_DIR)" +# append .min to the binary names to use the minimal profile export CROWDSEC="$(CROWDSEC)" export CSCLI="$(CSCLI)" export CONFIG_YAML="$(CONFIG_DIR)/config.yaml" @@ -75,6 +76,11 @@ bats-update-tools: ## Install/update tools required for functional tests # Build and installs crowdsec in a local directory. Rebuilds if already exists. bats-build: bats-environment ## Build binaries for functional tests @$(MKDIR) $(BIN_DIR) $(LOG_DIR) $(PID_DIR) $(BATS_PLUGIN_DIR) + # minimal profile + @$(MAKE) build DEBUG=1 TEST_COVERAGE=$(TEST_COVERAGE) DEFAULT_CONFIGDIR=$(CONFIG_DIR) DEFAULT_DATADIR=$(DATA_DIR) BUILD_PROFILE=minimal + @install -m 0755 cmd/crowdsec/crowdsec $(BIN_DIR)/crowdsec.min + @install -m 0755 cmd/crowdsec-cli/cscli $(BIN_DIR)/cscli.min + # default profile @$(MAKE) build DEBUG=1 TEST_COVERAGE=$(TEST_COVERAGE) DEFAULT_CONFIGDIR=$(CONFIG_DIR) DEFAULT_DATADIR=$(DATA_DIR) @install -m 0755 cmd/crowdsec/crowdsec cmd/crowdsec-cli/cscli $(BIN_DIR)/ @install -m 0755 cmd/notification-*/notification-* $(BATS_PLUGIN_DIR)/ diff --git a/test/bats/01_crowdsec.bats b/test/bats/01_crowdsec.bats index 83072b0f159..aa5830a6bae 100644 --- a/test/bats/01_crowdsec.bats +++ b/test/bats/01_crowdsec.bats @@ -199,7 +199,42 @@ teardown() { assert_stderr --partial "crowdsec init: while loading acquisition config: no datasource enabled" } -@test "crowdsec (disabled datasources)" { +@test "crowdsec (datasource not built)" { + config_set '.common.log_media="stdout"' + + # a datasource cannot run - it's not built in the log processor executable + + ACQUIS_DIR=$(config_get '.crowdsec_service.acquisition_dir') + mkdir -p "$ACQUIS_DIR" + cat >"$ACQUIS_DIR"/foo.yaml <<-EOT + source: journalctl + journalctl_filter: + - "_SYSTEMD_UNIT=ssh.service" + labels: + type: syslog + EOT + + #shellcheck disable=SC2016 + rune -1 wait-for \ + --err "crowdsec init: while loading acquisition config: in file $ACQUIS_DIR/foo.yaml (position: 0) - data source journalctl is not built in this version of crowdsec" \ + env PATH='' "$CROWDSEC".min + + # auto-detection of journalctl_filter still works + cat >"$ACQUIS_DIR"/foo.yaml <<-EOT + source: whatever + journalctl_filter: + - "_SYSTEMD_UNIT=ssh.service" + labels: + type: syslog + EOT + + #shellcheck disable=SC2016 + rune -1 wait-for \ + --err "crowdsec init: while loading acquisition config: in file $ACQUIS_DIR/foo.yaml (position: 0) - data source journalctl is not built in this version of crowdsec" \ + env PATH='' "$CROWDSEC".min +} + +@test "crowdsec (disabled datasource)" { if is_package_testing; then # we can't hide journalctl in package testing # because crowdsec is run from systemd From 6810b41dd872670d58e028b1cfa4d12bffc8b19b Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Thu, 12 Sep 2024 17:28:16 +0200 Subject: [PATCH 290/581] refact pkg/database: context propagation (start) (#3226) * refact pkg/database: context propagation (part) * more context propagation (usagemetrics) * propagate errors when updating metrics --- cmd/crowdsec/metrics.go | 6 ++++-- pkg/apiserver/controllers/v1/usagemetrics.go | 15 ++++++++------- pkg/database/alerts.go | 4 +--- pkg/database/bouncers.go | 5 +++-- pkg/database/decisions.go | 5 +++-- pkg/database/machines.go | 5 +++-- pkg/database/metrics.go | 5 +++-- 7 files changed, 25 insertions(+), 20 deletions(-) diff --git a/cmd/crowdsec/metrics.go b/cmd/crowdsec/metrics.go index d3c6e172091..ff280fc3512 100644 --- a/cmd/crowdsec/metrics.go +++ b/cmd/crowdsec/metrics.go @@ -118,7 +118,9 @@ func computeDynamicMetrics(next http.Handler, dbClient *database.Client) http.Ha return } - decisions, err := dbClient.QueryDecisionCountByScenario() + ctx := r.Context() + + decisions, err := dbClient.QueryDecisionCountByScenario(ctx) if err != nil { log.Errorf("Error querying decisions for metrics: %v", err) next.ServeHTTP(w, r) @@ -138,7 +140,7 @@ func computeDynamicMetrics(next http.Handler, dbClient *database.Client) http.Ha "include_capi": {"false"}, } - alerts, err := dbClient.AlertsCountPerScenario(alertsFilter) + alerts, err := dbClient.AlertsCountPerScenario(ctx, alertsFilter) if err != nil { log.Errorf("Error querying alerts for metrics: %v", err) next.ServeHTTP(w, r) diff --git a/pkg/apiserver/controllers/v1/usagemetrics.go b/pkg/apiserver/controllers/v1/usagemetrics.go index 74f27bb6cf4..27b1b819a54 100644 --- a/pkg/apiserver/controllers/v1/usagemetrics.go +++ b/pkg/apiserver/controllers/v1/usagemetrics.go @@ -1,6 +1,7 @@ package v1 import ( + "context" "encoding/json" "errors" "net/http" @@ -18,17 +19,15 @@ import ( ) // updateBaseMetrics updates the base metrics for a machine or bouncer -func (c *Controller) updateBaseMetrics(machineID string, bouncer *ent.Bouncer, baseMetrics models.BaseMetrics, hubItems models.HubItems, datasources map[string]int64) error { +func (c *Controller) updateBaseMetrics(ctx context.Context, machineID string, bouncer *ent.Bouncer, baseMetrics models.BaseMetrics, hubItems models.HubItems, datasources map[string]int64) error { switch { case machineID != "": - c.DBClient.MachineUpdateBaseMetrics(machineID, baseMetrics, hubItems, datasources) + return c.DBClient.MachineUpdateBaseMetrics(ctx, machineID, baseMetrics, hubItems, datasources) case bouncer != nil: - c.DBClient.BouncerUpdateBaseMetrics(bouncer.Name, bouncer.Type, baseMetrics) + return c.DBClient.BouncerUpdateBaseMetrics(ctx, bouncer.Name, bouncer.Type, baseMetrics) default: return errors.New("no machineID or bouncerName set") } - - return nil } // UsageMetrics receives metrics from log processors and remediation components @@ -172,7 +171,9 @@ func (c *Controller) UsageMetrics(gctx *gin.Context) { } } - err := c.updateBaseMetrics(machineID, bouncer, baseMetrics, hubItems, datasources) + ctx := gctx.Request.Context() + + err := c.updateBaseMetrics(ctx, machineID, bouncer, baseMetrics, hubItems, datasources) if err != nil { logger.Errorf("Failed to update base metrics: %s", err) c.HandleDBErrors(gctx, err) @@ -190,7 +191,7 @@ func (c *Controller) UsageMetrics(gctx *gin.Context) { receivedAt := time.Now().UTC() - if _, err := c.DBClient.CreateMetric(generatedType, generatedBy, receivedAt, string(jsonPayload)); err != nil { + if _, err := c.DBClient.CreateMetric(ctx, generatedType, generatedBy, receivedAt, string(jsonPayload)); err != nil { logger.Error(err) c.HandleDBErrors(gctx, err) diff --git a/pkg/database/alerts.go b/pkg/database/alerts.go index 0f6d87fb1b6..3e3e480c7d6 100644 --- a/pkg/database/alerts.go +++ b/pkg/database/alerts.go @@ -941,14 +941,12 @@ func BuildAlertRequestFromFilter(alerts *ent.AlertQuery, filter map[string][]str return alerts.Where(preds...), nil } -func (c *Client) AlertsCountPerScenario(filters map[string][]string) (map[string]int, error) { +func (c *Client) AlertsCountPerScenario(ctx context.Context, filters map[string][]string) (map[string]int, error) { var res []struct { Scenario string Count int } - ctx := context.TODO() - query := c.Ent.Alert.Query() query, err := BuildAlertRequestFromFilter(query, filters) diff --git a/pkg/database/bouncers.go b/pkg/database/bouncers.go index f79e9580afe..a7378bbb203 100644 --- a/pkg/database/bouncers.go +++ b/pkg/database/bouncers.go @@ -1,6 +1,7 @@ package database import ( + "context" "fmt" "strings" "time" @@ -20,7 +21,7 @@ func (e *BouncerNotFoundError) Error() string { return fmt.Sprintf("'%s' does not exist", e.BouncerName) } -func (c *Client) BouncerUpdateBaseMetrics(bouncerName string, bouncerType string, baseMetrics models.BaseMetrics) error { +func (c *Client) BouncerUpdateBaseMetrics(ctx context.Context, bouncerName string, bouncerType string, baseMetrics models.BaseMetrics) error { os := baseMetrics.Os features := strings.Join(baseMetrics.FeatureFlags, ",") @@ -32,7 +33,7 @@ func (c *Client) BouncerUpdateBaseMetrics(bouncerName string, bouncerType string SetOsversion(*os.Version). SetFeatureflags(features). SetType(bouncerType). - Save(c.CTX) + Save(ctx) if err != nil { return fmt.Errorf("unable to update base bouncer metrics in database: %w", err) } diff --git a/pkg/database/decisions.go b/pkg/database/decisions.go index fc582247e59..5fd4757c883 100644 --- a/pkg/database/decisions.go +++ b/pkg/database/decisions.go @@ -1,6 +1,7 @@ package database import ( + "context" "fmt" "strconv" "strings" @@ -173,7 +174,7 @@ func (c *Client) QueryExpiredDecisionsWithFilters(filters map[string][]string) ( return data, nil } -func (c *Client) QueryDecisionCountByScenario() ([]*DecisionsByScenario, error) { +func (c *Client) QueryDecisionCountByScenario(ctx context.Context) ([]*DecisionsByScenario, error) { query := c.Ent.Decision.Query().Where( decision.UntilGT(time.Now().UTC()), ) @@ -186,7 +187,7 @@ func (c *Client) QueryDecisionCountByScenario() ([]*DecisionsByScenario, error) var r []*DecisionsByScenario - err = query.GroupBy(decision.FieldScenario, decision.FieldOrigin, decision.FieldType).Aggregate(ent.Count()).Scan(c.CTX, &r) + err = query.GroupBy(decision.FieldScenario, decision.FieldOrigin, decision.FieldType).Aggregate(ent.Count()).Scan(ctx, &r) if err != nil { c.Log.Warningf("QueryDecisionCountByScenario : %s", err) return nil, errors.Wrap(QueryFail, "count all decisions with filters") diff --git a/pkg/database/machines.go b/pkg/database/machines.go index 3c8cbabbfa7..27d737e625e 100644 --- a/pkg/database/machines.go +++ b/pkg/database/machines.go @@ -1,6 +1,7 @@ package database import ( + "context" "fmt" "strings" "time" @@ -29,7 +30,7 @@ func (e *MachineNotFoundError) Error() string { return fmt.Sprintf("'%s' does not exist", e.MachineID) } -func (c *Client) MachineUpdateBaseMetrics(machineID string, baseMetrics models.BaseMetrics, hubItems models.HubItems, datasources map[string]int64) error { +func (c *Client) MachineUpdateBaseMetrics(ctx context.Context, machineID string, baseMetrics models.BaseMetrics, hubItems models.HubItems, datasources map[string]int64) error { os := baseMetrics.Os features := strings.Join(baseMetrics.FeatureFlags, ",") @@ -63,7 +64,7 @@ func (c *Client) MachineUpdateBaseMetrics(machineID string, baseMetrics models.B SetLastHeartbeat(heartbeat). SetHubstate(hubState). SetDatasources(datasources). - Save(c.CTX) + Save(ctx) if err != nil { return fmt.Errorf("unable to update base machine metrics in database: %w", err) } diff --git a/pkg/database/metrics.go b/pkg/database/metrics.go index 7626c39f6f1..1619fcc923b 100644 --- a/pkg/database/metrics.go +++ b/pkg/database/metrics.go @@ -1,6 +1,7 @@ package database import ( + "context" "fmt" "time" @@ -8,14 +9,14 @@ import ( "github.com/crowdsecurity/crowdsec/pkg/database/ent/metric" ) -func (c *Client) CreateMetric(generatedType metric.GeneratedType, generatedBy string, receivedAt time.Time, payload string) (*ent.Metric, error) { +func (c *Client) CreateMetric(ctx context.Context, generatedType metric.GeneratedType, generatedBy string, receivedAt time.Time, payload string) (*ent.Metric, error) { metric, err := c.Ent.Metric. Create(). SetGeneratedType(generatedType). SetGeneratedBy(generatedBy). SetReceivedAt(receivedAt). SetPayload(payload). - Save(c.CTX) + Save(ctx) if err != nil { c.Log.Warningf("CreateMetric: %s", err) return nil, fmt.Errorf("storing metrics snapshot for '%s' at %s: %w", generatedBy, receivedAt, InsertFail) From 8a74faed43103a03d6499d6528b635fc35abe839 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Thu, 12 Sep 2024 21:16:55 +0200 Subject: [PATCH 291/581] context propagation: cscli {capi,lapi,papi} (#3228) * context propagation: lapi status, capi status, papi status * context propagation: lapi register, capi register * lint --- cmd/crowdsec-cli/clicapi/capi.go | 20 ++++++++--------- cmd/crowdsec-cli/clilapi/lapi.go | 30 +++++++++++++------------- cmd/crowdsec-cli/clipapi/papi.go | 11 ++++++---- cmd/crowdsec-cli/clisupport/support.go | 18 ++++++++-------- pkg/apiclient/auth_service_test.go | 6 ++++-- pkg/apiclient/client.go | 4 ++-- pkg/apiclient/client_test.go | 16 ++++++++++---- pkg/apiserver/papi.go | 4 ++-- 8 files changed, 61 insertions(+), 48 deletions(-) diff --git a/cmd/crowdsec-cli/clicapi/capi.go b/cmd/crowdsec-cli/clicapi/capi.go index 49f68dd6b9e..fbc50066287 100644 --- a/cmd/crowdsec-cli/clicapi/capi.go +++ b/cmd/crowdsec-cli/clicapi/capi.go @@ -58,7 +58,7 @@ func (cli *cliCapi) NewCommand() *cobra.Command { return cmd } -func (cli *cliCapi) register(capiUserPrefix string, outputFile string) error { +func (cli *cliCapi) register(ctx context.Context, capiUserPrefix string, outputFile string) error { cfg := cli.cfg() capiUser, err := idgen.GenerateMachineID(capiUserPrefix) @@ -73,7 +73,7 @@ func (cli *cliCapi) register(capiUserPrefix string, outputFile string) error { return fmt.Errorf("unable to parse api url %s: %w", types.CAPIBaseURL, err) } - _, err = apiclient.RegisterClient(&apiclient.Config{ + _, err = apiclient.RegisterClient(ctx, &apiclient.Config{ MachineID: capiUser, Password: password, URL: apiurl, @@ -134,8 +134,8 @@ func (cli *cliCapi) newRegisterCmd() *cobra.Command { Short: "Register to Central API (CAPI)", Args: cobra.MinimumNArgs(0), DisableAutoGenTag: true, - RunE: func(_ *cobra.Command, _ []string) error { - return cli.register(capiUserPrefix, outputFile) + RunE: func(cmd *cobra.Command, _ []string) error { + return cli.register(cmd.Context(), capiUserPrefix, outputFile) }, } @@ -148,7 +148,7 @@ func (cli *cliCapi) newRegisterCmd() *cobra.Command { } // queryCAPIStatus checks if the Central API is reachable, and if the credentials are correct. It then checks if the instance is enrolle in the console. -func queryCAPIStatus(hub *cwhub.Hub, credURL string, login string, password string) (bool, bool, error) { +func queryCAPIStatus(ctx context.Context, hub *cwhub.Hub, credURL string, login string, password string) (bool, bool, error) { apiURL, err := url.Parse(credURL) if err != nil { return false, false, err @@ -186,7 +186,7 @@ func queryCAPIStatus(hub *cwhub.Hub, credURL string, login string, password stri Scenarios: itemsForAPI, } - authResp, _, err := client.Auth.AuthenticateWatcher(context.Background(), t) + authResp, _, err := client.Auth.AuthenticateWatcher(ctx, t) if err != nil { return false, false, err } @@ -200,7 +200,7 @@ func queryCAPIStatus(hub *cwhub.Hub, credURL string, login string, password stri return true, false, nil } -func (cli *cliCapi) Status(out io.Writer, hub *cwhub.Hub) error { +func (cli *cliCapi) Status(ctx context.Context, out io.Writer, hub *cwhub.Hub) error { cfg := cli.cfg() if err := require.CAPIRegistered(cfg); err != nil { @@ -212,7 +212,7 @@ func (cli *cliCapi) Status(out io.Writer, hub *cwhub.Hub) error { fmt.Fprintf(out, "Loaded credentials from %s\n", cfg.API.Server.OnlineClient.CredentialsFilePath) fmt.Fprintf(out, "Trying to authenticate with username %s on %s\n", cred.Login, cred.URL) - auth, enrolled, err := queryCAPIStatus(hub, cred.URL, cred.Login, cred.Password) + auth, enrolled, err := queryCAPIStatus(ctx, hub, cred.URL, cred.Login, cred.Password) if err != nil { return fmt.Errorf("failed to authenticate to Central API (CAPI): %w", err) } @@ -234,13 +234,13 @@ func (cli *cliCapi) newStatusCmd() *cobra.Command { Short: "Check status with the Central API (CAPI)", Args: cobra.MinimumNArgs(0), DisableAutoGenTag: true, - RunE: func(_ *cobra.Command, _ []string) error { + RunE: func(cmd *cobra.Command, _ []string) error { hub, err := require.Hub(cli.cfg(), nil, nil) if err != nil { return err } - return cli.Status(color.Output, hub) + return cli.Status(cmd.Context(), color.Output, hub) }, } diff --git a/cmd/crowdsec-cli/clilapi/lapi.go b/cmd/crowdsec-cli/clilapi/lapi.go index fa229002512..eff7ae64476 100644 --- a/cmd/crowdsec-cli/clilapi/lapi.go +++ b/cmd/crowdsec-cli/clilapi/lapi.go @@ -44,7 +44,7 @@ func New(cfg configGetter) *cliLapi { } // queryLAPIStatus checks if the Local API is reachable, and if the credentials are correct. -func queryLAPIStatus(hub *cwhub.Hub, credURL string, login string, password string) (bool, error) { +func queryLAPIStatus(ctx context.Context, hub *cwhub.Hub, credURL string, login string, password string) (bool, error) { apiURL, err := url.Parse(credURL) if err != nil { return false, err @@ -76,7 +76,7 @@ func queryLAPIStatus(hub *cwhub.Hub, credURL string, login string, password stri return true, nil } -func (cli *cliLapi) Status(out io.Writer, hub *cwhub.Hub) error { +func (cli *cliLapi) Status(ctx context.Context, out io.Writer, hub *cwhub.Hub) error { cfg := cli.cfg() cred := cfg.API.Client.Credentials @@ -84,7 +84,7 @@ func (cli *cliLapi) Status(out io.Writer, hub *cwhub.Hub) error { fmt.Fprintf(out, "Loaded credentials from %s\n", cfg.API.Client.CredentialsFilePath) fmt.Fprintf(out, "Trying to authenticate with username %s on %s\n", cred.Login, cred.URL) - _, err := queryLAPIStatus(hub, cred.URL, cred.Login, cred.Password) + _, err := queryLAPIStatus(ctx, hub, cred.URL, cred.Login, cred.Password) if err != nil { return fmt.Errorf("failed to authenticate to Local API (LAPI): %w", err) } @@ -94,7 +94,7 @@ func (cli *cliLapi) Status(out io.Writer, hub *cwhub.Hub) error { return nil } -func (cli *cliLapi) register(apiURL string, outputFile string, machine string, token string) error { +func (cli *cliLapi) register(ctx context.Context, apiURL string, outputFile string, machine string, token string) error { var err error lapiUser := machine @@ -114,7 +114,7 @@ func (cli *cliLapi) register(apiURL string, outputFile string, machine string, t return fmt.Errorf("parsing api url: %w", err) } - _, err = apiclient.RegisterClient(&apiclient.Config{ + _, err = apiclient.RegisterClient(ctx, &apiclient.Config{ MachineID: lapiUser, Password: password, RegistrationToken: token, @@ -195,13 +195,13 @@ func (cli *cliLapi) newStatusCmd() *cobra.Command { Short: "Check authentication to Local API (LAPI)", Args: cobra.MinimumNArgs(0), DisableAutoGenTag: true, - RunE: func(_ *cobra.Command, _ []string) error { + RunE: func(cmd *cobra.Command, _ []string) error { hub, err := require.Hub(cli.cfg(), nil, nil) if err != nil { return err } - return cli.Status(color.Output, hub) + return cli.Status(cmd.Context(), color.Output, hub) }, } @@ -223,8 +223,8 @@ func (cli *cliLapi) newRegisterCmd() *cobra.Command { Keep in mind the machine needs to be validated by an administrator on LAPI side to be effective.`, Args: cobra.MinimumNArgs(0), DisableAutoGenTag: true, - RunE: func(_ *cobra.Command, _ []string) error { - return cli.register(apiURL, outputFile, machine, token) + RunE: func(cmd *cobra.Command, _ []string) error { + return cli.register(cmd.Context(), apiURL, outputFile, machine, token) }, } @@ -513,14 +513,14 @@ func detectStaticField(grokStatics []parser.ExtraField) []string { for _, static := range grokStatics { if static.Parsed != "" { - fieldName := fmt.Sprintf("evt.Parsed.%s", static.Parsed) + fieldName := "evt.Parsed." + static.Parsed if !slices.Contains(ret, fieldName) { ret = append(ret, fieldName) } } if static.Meta != "" { - fieldName := fmt.Sprintf("evt.Meta.%s", static.Meta) + fieldName := "evt.Meta." + static.Meta if !slices.Contains(ret, fieldName) { ret = append(ret, fieldName) } @@ -546,7 +546,7 @@ func detectNode(node parser.Node, parserCTX parser.UnixParserCtx) []string { if node.Grok.RunTimeRegexp != nil { for _, capturedField := range node.Grok.RunTimeRegexp.Names() { - fieldName := fmt.Sprintf("evt.Parsed.%s", capturedField) + fieldName := "evt.Parsed." + capturedField if !slices.Contains(ret, fieldName) { ret = append(ret, fieldName) } @@ -558,7 +558,7 @@ func detectNode(node parser.Node, parserCTX parser.UnixParserCtx) []string { // ignore error (parser does not exist?) if err == nil { for _, capturedField := range grokCompiled.Names() { - fieldName := fmt.Sprintf("evt.Parsed.%s", capturedField) + fieldName := "evt.Parsed." + capturedField if !slices.Contains(ret, fieldName) { ret = append(ret, fieldName) } @@ -593,7 +593,7 @@ func detectSubNode(node parser.Node, parserCTX parser.UnixParserCtx) []string { for _, subnode := range node.LeavesNodes { if subnode.Grok.RunTimeRegexp != nil { for _, capturedField := range subnode.Grok.RunTimeRegexp.Names() { - fieldName := fmt.Sprintf("evt.Parsed.%s", capturedField) + fieldName := "evt.Parsed." + capturedField if !slices.Contains(ret, fieldName) { ret = append(ret, fieldName) } @@ -605,7 +605,7 @@ func detectSubNode(node parser.Node, parserCTX parser.UnixParserCtx) []string { if err == nil { // ignore error (parser does not exist?) for _, capturedField := range grokCompiled.Names() { - fieldName := fmt.Sprintf("evt.Parsed.%s", capturedField) + fieldName := "evt.Parsed." + capturedField if !slices.Contains(ret, fieldName) { ret = append(ret, fieldName) } diff --git a/cmd/crowdsec-cli/clipapi/papi.go b/cmd/crowdsec-cli/clipapi/papi.go index 0752267707b..747b8c01b9b 100644 --- a/cmd/crowdsec-cli/clipapi/papi.go +++ b/cmd/crowdsec-cli/clipapi/papi.go @@ -1,6 +1,7 @@ package clipapi import ( + "context" "fmt" "io" "time" @@ -55,7 +56,7 @@ func (cli *cliPapi) NewCommand() *cobra.Command { return cmd } -func (cli *cliPapi) Status(out io.Writer, db *database.Client) error { +func (cli *cliPapi) Status(ctx context.Context, out io.Writer, db *database.Client) error { cfg := cli.cfg() apic, err := apiserver.NewAPIC(cfg.API.Server.OnlineClient, db, cfg.API.Server.ConsoleConfig, cfg.API.Server.CapiWhitelists) @@ -68,7 +69,7 @@ func (cli *cliPapi) Status(out io.Writer, db *database.Client) error { return fmt.Errorf("unable to initialize PAPI client: %w", err) } - perms, err := papi.GetPermissions() + perms, err := papi.GetPermissions(ctx) if err != nil { return fmt.Errorf("unable to get PAPI permissions: %w", err) } @@ -103,12 +104,14 @@ func (cli *cliPapi) newStatusCmd() *cobra.Command { DisableAutoGenTag: true, RunE: func(cmd *cobra.Command, _ []string) error { cfg := cli.cfg() - db, err := require.DBClient(cmd.Context(), cfg.DbConfig) + ctx := cmd.Context() + + db, err := require.DBClient(ctx, cfg.DbConfig) if err != nil { return err } - return cli.Status(color.Output, db) + return cli.Status(ctx, color.Output, db) }, } diff --git a/cmd/crowdsec-cli/clisupport/support.go b/cmd/crowdsec-cli/clisupport/support.go index f161c66c802..e9837b03fe7 100644 --- a/cmd/crowdsec-cli/clisupport/support.go +++ b/cmd/crowdsec-cli/clisupport/support.go @@ -231,13 +231,13 @@ func (cli *cliSupport) dumpAgents(zw *zip.Writer, db *database.Client) error { return nil } -func (cli *cliSupport) dumpLAPIStatus(zw *zip.Writer, hub *cwhub.Hub) error { +func (cli *cliSupport) dumpLAPIStatus(ctx context.Context, zw *zip.Writer, hub *cwhub.Hub) error { log.Info("Collecting LAPI status") out := new(bytes.Buffer) cl := clilapi.New(cli.cfg) - err := cl.Status(out, hub) + err := cl.Status(ctx, out, hub) if err != nil { fmt.Fprintf(out, "%s\n", err) } @@ -249,13 +249,13 @@ func (cli *cliSupport) dumpLAPIStatus(zw *zip.Writer, hub *cwhub.Hub) error { return nil } -func (cli *cliSupport) dumpCAPIStatus(zw *zip.Writer, hub *cwhub.Hub) error { +func (cli *cliSupport) dumpCAPIStatus(ctx context.Context, zw *zip.Writer, hub *cwhub.Hub) error { log.Info("Collecting CAPI status") out := new(bytes.Buffer) cc := clicapi.New(cli.cfg) - err := cc.Status(out, hub) + err := cc.Status(ctx, out, hub) if err != nil { fmt.Fprintf(out, "%s\n", err) } @@ -267,13 +267,13 @@ func (cli *cliSupport) dumpCAPIStatus(zw *zip.Writer, hub *cwhub.Hub) error { return nil } -func (cli *cliSupport) dumpPAPIStatus(zw *zip.Writer, db *database.Client) error { +func (cli *cliSupport) dumpPAPIStatus(ctx context.Context, zw *zip.Writer, db *database.Client) error { log.Info("Collecting PAPI status") out := new(bytes.Buffer) cp := clipapi.New(cli.cfg) - err := cp.Status(out, db) + err := cp.Status(ctx, out, db) if err != nil { fmt.Fprintf(out, "%s\n", err) } @@ -534,17 +534,17 @@ func (cli *cliSupport) dump(ctx context.Context, outFile string) error { } if !skipCAPI { - if err = cli.dumpCAPIStatus(zipWriter, hub); err != nil { + if err = cli.dumpCAPIStatus(ctx, zipWriter, hub); err != nil { log.Warnf("could not collect CAPI status: %s", err) } - if err = cli.dumpPAPIStatus(zipWriter, db); err != nil { + if err = cli.dumpPAPIStatus(ctx, zipWriter, db); err != nil { log.Warnf("could not collect PAPI status: %s", err) } } if !skipLAPI { - if err = cli.dumpLAPIStatus(zipWriter, hub); err != nil { + if err = cli.dumpLAPIStatus(ctx, zipWriter, hub); err != nil { log.Warnf("could not collect LAPI status: %s", err) } diff --git a/pkg/apiclient/auth_service_test.go b/pkg/apiclient/auth_service_test.go index 344e377ad0f..d22c9394014 100644 --- a/pkg/apiclient/auth_service_test.go +++ b/pkg/apiclient/auth_service_test.go @@ -92,7 +92,9 @@ func TestWatcherRegister(t *testing.T) { VersionPrefix: "v1", } - client, err := RegisterClient(&clientconfig, &http.Client{}) + ctx := context.Background() + + client, err := RegisterClient(ctx, &clientconfig, &http.Client{}) require.NoError(t, err) log.Printf("->%T", client) @@ -102,7 +104,7 @@ func TestWatcherRegister(t *testing.T) { for _, errorCodeToTest := range errorCodesToTest { clientconfig.MachineID = fmt.Sprintf("login_%d", errorCodeToTest) - client, err = RegisterClient(&clientconfig, &http.Client{}) + client, err = RegisterClient(ctx, &clientconfig, &http.Client{}) require.Nil(t, client, "nil expected for the response code %d", errorCodeToTest) require.Error(t, err, "error expected for the response code %d", errorCodeToTest) } diff --git a/pkg/apiclient/client.go b/pkg/apiclient/client.go index 02a99037a04..47d97a28344 100644 --- a/pkg/apiclient/client.go +++ b/pkg/apiclient/client.go @@ -167,7 +167,7 @@ func NewDefaultClient(URL *url.URL, prefix string, userAgent string, client *htt return c, nil } -func RegisterClient(config *Config, client *http.Client) (*ApiClient, error) { +func RegisterClient(ctx context.Context, config *Config, client *http.Client) (*ApiClient, error) { transport, baseURL := createTransport(config.URL) if client == nil { @@ -199,7 +199,7 @@ func RegisterClient(config *Config, client *http.Client) (*ApiClient, error) { c.Alerts = (*AlertsService)(&c.common) c.Auth = (*AuthService)(&c.common) - resp, err := c.Auth.RegisterWatcher(context.Background(), models.WatcherRegistrationRequest{MachineID: &config.MachineID, Password: &config.Password, RegistrationToken: config.RegistrationToken}) + resp, err := c.Auth.RegisterWatcher(ctx, models.WatcherRegistrationRequest{MachineID: &config.MachineID, Password: &config.Password, RegistrationToken: config.RegistrationToken}) if err != nil { /*if we have http status, return it*/ if resp != nil && resp.Response != nil { diff --git a/pkg/apiclient/client_test.go b/pkg/apiclient/client_test.go index e632ff428c0..d1f58f33ad2 100644 --- a/pkg/apiclient/client_test.go +++ b/pkg/apiclient/client_test.go @@ -242,7 +242,9 @@ func TestNewClientRegisterKO(t *testing.T) { apiURL, err := url.Parse("http://127.0.0.1:4242/") require.NoError(t, err) - _, err = RegisterClient(&Config{ + ctx := context.Background() + + _, err = RegisterClient(ctx, &Config{ MachineID: "test_login", Password: "test_password", URL: apiURL, @@ -272,7 +274,9 @@ func TestNewClientRegisterOK(t *testing.T) { apiURL, err := url.Parse(urlx + "/") require.NoError(t, err) - client, err := RegisterClient(&Config{ + ctx := context.Background() + + client, err := RegisterClient(ctx, &Config{ MachineID: "test_login", Password: "test_password", URL: apiURL, @@ -304,7 +308,9 @@ func TestNewClientRegisterOK_UnixSocket(t *testing.T) { t.Fatalf("parsing api url: %s", apiURL) } - client, err := RegisterClient(&Config{ + ctx := context.Background() + + client, err := RegisterClient(ctx, &Config{ MachineID: "test_login", Password: "test_password", URL: apiURL, @@ -333,7 +339,9 @@ func TestNewClientBadAnswer(t *testing.T) { apiURL, err := url.Parse(urlx + "/") require.NoError(t, err) - _, err = RegisterClient(&Config{ + ctx := context.Background() + + _, err = RegisterClient(ctx, &Config{ MachineID: "test_login", Password: "test_password", URL: apiURL, diff --git a/pkg/apiserver/papi.go b/pkg/apiserver/papi.go index 0d0fd0ecd42..0a69f086a7f 100644 --- a/pkg/apiserver/papi.go +++ b/pkg/apiserver/papi.go @@ -156,11 +156,11 @@ func (p *Papi) handleEvent(event longpollclient.Event, sync bool) error { return nil } -func (p *Papi) GetPermissions() (PapiPermCheckSuccess, error) { +func (p *Papi) GetPermissions(ctx context.Context) (PapiPermCheckSuccess, error) { httpClient := p.apiClient.GetClient() papiCheckUrl := fmt.Sprintf("%s%s%s", p.URL, types.PAPIVersion, types.PAPIPermissionsUrl) - req, err := http.NewRequest(http.MethodGet, papiCheckUrl, nil) + req, err := http.NewRequestWithContext(ctx, http.MethodGet, papiCheckUrl, nil) if err != nil { return PapiPermCheckSuccess{}, fmt.Errorf("failed to create request: %w", err) } From 1591a0c46ee3c3b1643d7078887f4bc262ed155b Mon Sep 17 00:00:00 2001 From: blotus Date: Fri, 13 Sep 2024 10:57:48 +0200 Subject: [PATCH 292/581] Update go-re2 (#3230) --- go.mod | 5 +++-- go.sum | 12 ++++++++---- 2 files changed, 11 insertions(+), 6 deletions(-) diff --git a/go.mod b/go.mod index bce1ca12316..ec8566db84a 100644 --- a/go.mod +++ b/go.mod @@ -78,9 +78,9 @@ require ( github.com/sirupsen/logrus v1.9.3 github.com/slack-go/slack v0.12.2 github.com/spf13/cobra v1.8.0 - github.com/stretchr/testify v1.8.4 + github.com/stretchr/testify v1.9.0 github.com/umahmood/haversine v0.0.0-20151105152445-808ab04add26 - github.com/wasilibs/go-re2 v1.6.0 + github.com/wasilibs/go-re2 v1.7.0 github.com/xhit/go-simple-mail/v2 v2.16.0 golang.org/x/crypto v0.22.0 golang.org/x/mod v0.15.0 @@ -195,6 +195,7 @@ require ( github.com/twitchyliquid64/golang-asm v0.15.1 // indirect github.com/ugorji/go/codec v1.2.12 // indirect github.com/vmihailenco/msgpack v4.0.4+incompatible // indirect + github.com/wasilibs/wazero-helpers v0.0.0-20240620070341-3dff1577cd52 // indirect github.com/yusufpapurcu/wmi v1.2.3 // indirect github.com/zclconf/go-cty v1.8.0 // indirect go.mongodb.org/mongo-driver v1.9.4 // indirect diff --git a/go.sum b/go.sum index eec85b5b2e9..ff73dc56332 100644 --- a/go.sum +++ b/go.sum @@ -644,8 +644,9 @@ github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+ github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= -github.com/stretchr/objx v0.5.0 h1:1zr/of2m5FGMsad5YfcqgdqdWrIhu+EBEJRhR1U7z/c= github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= +github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY= +github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= github.com/stretchr/testify v0.0.0-20161117074351-18a02ba4a312/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= @@ -659,8 +660,9 @@ github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/stretchr/testify v1.8.3/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= -github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= +github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= +github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/tetratelabs/wazero v1.8.0 h1:iEKu0d4c2Pd+QSRieYbnQC9yiFlMS9D+Jr0LsRmcF4g= github.com/tetratelabs/wazero v1.8.0/go.mod h1:yAI0XTsMBhREkM/YDAK/zNou3GoiAce1P6+rp/wQhjs= github.com/tidwall/gjson v1.17.0 h1:/Jocvlh98kcTfpN2+JzGQWQcqrPQwDrVEMApx/M5ZwM= @@ -690,10 +692,12 @@ github.com/vmihailenco/msgpack v4.0.4+incompatible h1:dSLoQfGFAo3F6OoNhwUmLwVgaU github.com/vmihailenco/msgpack v4.0.4+incompatible/go.mod h1:fy3FlTQTDXWkZ7Bh6AcGMlsjHatGryHQYUTf1ShIgkk= github.com/vmihailenco/msgpack/v4 v4.3.12/go.mod h1:gborTTJjAo/GWTqqRjrLCn9pgNN+NXzzngzBKDPIqw4= github.com/vmihailenco/tagparser v0.1.1/go.mod h1:OeAg3pn3UbLjkWt+rN9oFYB6u/cQgqMEUPoW2WPyhdI= -github.com/wasilibs/go-re2 v1.6.0 h1:CLlhDebt38wtl/zz4ww+hkXBMcxjrKFvTDXzFW2VOz8= -github.com/wasilibs/go-re2 v1.6.0/go.mod h1:prArCyErsypRBI/jFAFJEbzyHzjABKqkzlidF0SNA04= +github.com/wasilibs/go-re2 v1.7.0 h1:bYhl8gn+a9h01dxwotNycxkiFPTiSgwUrIz8KZJ90Lc= +github.com/wasilibs/go-re2 v1.7.0/go.mod h1:sUsZMLflgl+LNivDE229omtmvjICmOseT9xOy199VDU= github.com/wasilibs/nottinygc v0.4.0 h1:h1TJMihMC4neN6Zq+WKpLxgd9xCFMw7O9ETLwY2exJQ= github.com/wasilibs/nottinygc v0.4.0/go.mod h1:oDcIotskuYNMpqMF23l7Z8uzD4TC0WXHK8jetlB3HIo= +github.com/wasilibs/wazero-helpers v0.0.0-20240620070341-3dff1577cd52 h1:OvLBa8SqJnZ6P+mjlzc2K7PM22rRUPE1x32G9DTPrC4= +github.com/wasilibs/wazero-helpers v0.0.0-20240620070341-3dff1577cd52/go.mod h1:jMeV4Vpbi8osrE/pKUxRZkVaA0EX7NZN0A9/oRzgpgY= github.com/xdg-go/pbkdf2 v1.0.0 h1:Su7DPu48wXMwC3bs7MCNG+z4FhcyEuz5dlvchbq0B0c= github.com/xdg-go/pbkdf2 v1.0.0/go.mod h1:jrpuAogTd400dnrH08LKmI/xc1MbPOebTwRqcT5RDeI= github.com/xdg-go/scram v1.0.2/go.mod h1:1WAq6h33pAW+iRreB34OORO2Nf7qel3VV3fjBj+hCSs= From 188f580fc475b6f03bba5ddf6c1cf8b8b6967d6b Mon Sep 17 00:00:00 2001 From: blotus Date: Fri, 13 Sep 2024 11:37:44 +0200 Subject: [PATCH 293/581] Update go-re2 (#3230) (#3234) --- go.mod | 5 +++-- go.sum | 12 ++++++++---- 2 files changed, 11 insertions(+), 6 deletions(-) diff --git a/go.mod b/go.mod index bce1ca12316..ec8566db84a 100644 --- a/go.mod +++ b/go.mod @@ -78,9 +78,9 @@ require ( github.com/sirupsen/logrus v1.9.3 github.com/slack-go/slack v0.12.2 github.com/spf13/cobra v1.8.0 - github.com/stretchr/testify v1.8.4 + github.com/stretchr/testify v1.9.0 github.com/umahmood/haversine v0.0.0-20151105152445-808ab04add26 - github.com/wasilibs/go-re2 v1.6.0 + github.com/wasilibs/go-re2 v1.7.0 github.com/xhit/go-simple-mail/v2 v2.16.0 golang.org/x/crypto v0.22.0 golang.org/x/mod v0.15.0 @@ -195,6 +195,7 @@ require ( github.com/twitchyliquid64/golang-asm v0.15.1 // indirect github.com/ugorji/go/codec v1.2.12 // indirect github.com/vmihailenco/msgpack v4.0.4+incompatible // indirect + github.com/wasilibs/wazero-helpers v0.0.0-20240620070341-3dff1577cd52 // indirect github.com/yusufpapurcu/wmi v1.2.3 // indirect github.com/zclconf/go-cty v1.8.0 // indirect go.mongodb.org/mongo-driver v1.9.4 // indirect diff --git a/go.sum b/go.sum index eec85b5b2e9..ff73dc56332 100644 --- a/go.sum +++ b/go.sum @@ -644,8 +644,9 @@ github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+ github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= -github.com/stretchr/objx v0.5.0 h1:1zr/of2m5FGMsad5YfcqgdqdWrIhu+EBEJRhR1U7z/c= github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= +github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY= +github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= github.com/stretchr/testify v0.0.0-20161117074351-18a02ba4a312/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= @@ -659,8 +660,9 @@ github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/stretchr/testify v1.8.3/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= -github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= +github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= +github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/tetratelabs/wazero v1.8.0 h1:iEKu0d4c2Pd+QSRieYbnQC9yiFlMS9D+Jr0LsRmcF4g= github.com/tetratelabs/wazero v1.8.0/go.mod h1:yAI0XTsMBhREkM/YDAK/zNou3GoiAce1P6+rp/wQhjs= github.com/tidwall/gjson v1.17.0 h1:/Jocvlh98kcTfpN2+JzGQWQcqrPQwDrVEMApx/M5ZwM= @@ -690,10 +692,12 @@ github.com/vmihailenco/msgpack v4.0.4+incompatible h1:dSLoQfGFAo3F6OoNhwUmLwVgaU github.com/vmihailenco/msgpack v4.0.4+incompatible/go.mod h1:fy3FlTQTDXWkZ7Bh6AcGMlsjHatGryHQYUTf1ShIgkk= github.com/vmihailenco/msgpack/v4 v4.3.12/go.mod h1:gborTTJjAo/GWTqqRjrLCn9pgNN+NXzzngzBKDPIqw4= github.com/vmihailenco/tagparser v0.1.1/go.mod h1:OeAg3pn3UbLjkWt+rN9oFYB6u/cQgqMEUPoW2WPyhdI= -github.com/wasilibs/go-re2 v1.6.0 h1:CLlhDebt38wtl/zz4ww+hkXBMcxjrKFvTDXzFW2VOz8= -github.com/wasilibs/go-re2 v1.6.0/go.mod h1:prArCyErsypRBI/jFAFJEbzyHzjABKqkzlidF0SNA04= +github.com/wasilibs/go-re2 v1.7.0 h1:bYhl8gn+a9h01dxwotNycxkiFPTiSgwUrIz8KZJ90Lc= +github.com/wasilibs/go-re2 v1.7.0/go.mod h1:sUsZMLflgl+LNivDE229omtmvjICmOseT9xOy199VDU= github.com/wasilibs/nottinygc v0.4.0 h1:h1TJMihMC4neN6Zq+WKpLxgd9xCFMw7O9ETLwY2exJQ= github.com/wasilibs/nottinygc v0.4.0/go.mod h1:oDcIotskuYNMpqMF23l7Z8uzD4TC0WXHK8jetlB3HIo= +github.com/wasilibs/wazero-helpers v0.0.0-20240620070341-3dff1577cd52 h1:OvLBa8SqJnZ6P+mjlzc2K7PM22rRUPE1x32G9DTPrC4= +github.com/wasilibs/wazero-helpers v0.0.0-20240620070341-3dff1577cd52/go.mod h1:jMeV4Vpbi8osrE/pKUxRZkVaA0EX7NZN0A9/oRzgpgY= github.com/xdg-go/pbkdf2 v1.0.0 h1:Su7DPu48wXMwC3bs7MCNG+z4FhcyEuz5dlvchbq0B0c= github.com/xdg-go/pbkdf2 v1.0.0/go.mod h1:jrpuAogTd400dnrH08LKmI/xc1MbPOebTwRqcT5RDeI= github.com/xdg-go/scram v1.0.2/go.mod h1:1WAq6h33pAW+iRreB34OORO2Nf7qel3VV3fjBj+hCSs= From ce085dc4cd11c2ca650ff373afa599417c77bdad Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Tue, 17 Sep 2024 13:19:14 +0200 Subject: [PATCH 294/581] logs and user messages: use "parse" and "serialize" instead of marshal/unmarshal (#3240) --- cmd/crowdsec-cli/clialert/alerts.go | 4 ++-- cmd/crowdsec-cli/clibouncer/bouncers.go | 6 +++--- cmd/crowdsec-cli/clicapi/capi.go | 2 +- cmd/crowdsec-cli/cliconsole/console.go | 6 +++--- cmd/crowdsec-cli/clihub/items.go | 4 ++-- cmd/crowdsec-cli/clihubtest/create.go | 2 +- cmd/crowdsec-cli/cliitem/appsec.go | 2 +- cmd/crowdsec-cli/clilapi/lapi.go | 2 +- cmd/crowdsec-cli/climachine/machines.go | 8 ++++---- cmd/crowdsec-cli/climetrics/list.go | 2 +- cmd/crowdsec-cli/climetrics/store.go | 2 +- cmd/crowdsec-cli/clinotifications/notifications.go | 8 ++++---- cmd/crowdsec-cli/clisetup/setup.go | 2 +- cmd/crowdsec-cli/clisimulation/simulation.go | 4 ++-- cmd/crowdsec-cli/config_backup.go | 2 +- cmd/crowdsec-cli/config_restore.go | 2 +- cmd/crowdsec-cli/config_show.go | 6 +++--- cmd/crowdsec/pour.go | 4 ++-- cmd/notification-file/main.go | 2 +- pkg/acquisition/acquisition.go | 2 +- pkg/acquisition/modules/appsec/appsec.go | 2 +- pkg/acquisition/modules/kafka/kafka.go | 2 +- pkg/acquisition/modules/kubernetesaudit/k8s_audit.go | 2 +- .../modules/wineventlog/wineventlog_windows.go | 2 +- pkg/alertcontext/config.go | 2 +- pkg/apiserver/apic_metrics.go | 4 ++-- pkg/apiserver/controllers/v1/alerts.go | 2 +- pkg/apiserver/controllers/v1/usagemetrics.go | 2 +- pkg/apiserver/papi.go | 6 +++--- pkg/appsec/loader.go | 2 +- pkg/csconfig/api.go | 4 ++-- pkg/csconfig/api_test.go | 2 +- pkg/csconfig/config_test.go | 2 +- pkg/csconfig/console.go | 2 +- pkg/csconfig/crowdsec_service.go | 2 +- pkg/csconfig/simulation.go | 2 +- pkg/csconfig/simulation_test.go | 4 ++-- pkg/csplugin/broker_test.go | 4 ++-- pkg/cwhub/hub.go | 2 +- pkg/cwhub/sync.go | 2 +- pkg/database/ent/machine.go | 4 ++-- pkg/database/errors.go | 4 ++-- pkg/hubtest/coverage.go | 4 ++-- pkg/hubtest/hubtest_item.go | 4 ++-- pkg/leakybucket/buckets_test.go | 4 ++-- pkg/leakybucket/manager_load.go | 2 +- pkg/leakybucket/manager_run.go | 4 ++-- pkg/leakybucket/overflows.go | 6 +++--- pkg/leakybucket/timemachine.go | 2 +- pkg/leakybucket/trigger.go | 2 +- pkg/parser/enrich_date.go | 4 ++-- pkg/parser/enrich_unmarshal.go | 2 +- pkg/parser/parsing_test.go | 2 +- pkg/setup/detect.go | 2 +- pkg/setup/install.go | 2 +- test/bats/07_setup.bats | 2 +- 56 files changed, 87 insertions(+), 87 deletions(-) diff --git a/cmd/crowdsec-cli/clialert/alerts.go b/cmd/crowdsec-cli/clialert/alerts.go index 757a84927e5..006d7ac7d8c 100644 --- a/cmd/crowdsec-cli/clialert/alerts.go +++ b/cmd/crowdsec-cli/clialert/alerts.go @@ -521,14 +521,14 @@ func (cli *cliAlerts) inspect(details bool, alertIDs ...string) error { case "json": data, err := json.MarshalIndent(alert, "", " ") if err != nil { - return fmt.Errorf("unable to marshal alert with id %s: %w", alertID, err) + return fmt.Errorf("unable to serialize alert with id %s: %w", alertID, err) } fmt.Printf("%s\n", string(data)) case "raw": data, err := yaml.Marshal(alert) if err != nil { - return fmt.Errorf("unable to marshal alert with id %s: %w", alertID, err) + return fmt.Errorf("unable to serialize alert with id %s: %w", alertID, err) } fmt.Println(string(data)) diff --git a/cmd/crowdsec-cli/clibouncer/bouncers.go b/cmd/crowdsec-cli/clibouncer/bouncers.go index 0d1484bcc6b..25c80d16404 100644 --- a/cmd/crowdsec-cli/clibouncer/bouncers.go +++ b/cmd/crowdsec-cli/clibouncer/bouncers.go @@ -181,7 +181,7 @@ func (cli *cliBouncers) List(out io.Writer, db *database.Client) error { enc.SetIndent("", " ") if err := enc.Encode(info); err != nil { - return errors.New("failed to marshal") + return errors.New("failed to serialize") } return nil @@ -234,7 +234,7 @@ func (cli *cliBouncers) add(bouncerName string, key string) error { case "json": j, err := json.Marshal(key) if err != nil { - return errors.New("unable to marshal api key") + return errors.New("unable to serialize api key") } fmt.Print(string(j)) @@ -458,7 +458,7 @@ func (cli *cliBouncers) inspect(bouncer *ent.Bouncer) error { enc.SetIndent("", " ") if err := enc.Encode(newBouncerInfo(bouncer)); err != nil { - return errors.New("failed to marshal") + return errors.New("failed to serialize") } return nil diff --git a/cmd/crowdsec-cli/clicapi/capi.go b/cmd/crowdsec-cli/clicapi/capi.go index fbc50066287..24c3ba054a9 100644 --- a/cmd/crowdsec-cli/clicapi/capi.go +++ b/cmd/crowdsec-cli/clicapi/capi.go @@ -104,7 +104,7 @@ func (cli *cliCapi) register(ctx context.Context, capiUserPrefix string, outputF apiConfigDump, err := yaml.Marshal(apiCfg) if err != nil { - return fmt.Errorf("unable to marshal api credentials: %w", err) + return fmt.Errorf("unable to serialize api credentials: %w", err) } if dumpFile != "" { diff --git a/cmd/crowdsec-cli/cliconsole/console.go b/cmd/crowdsec-cli/cliconsole/console.go index e4b4039bdd2..af1ba316c2d 100644 --- a/cmd/crowdsec-cli/cliconsole/console.go +++ b/cmd/crowdsec-cli/cliconsole/console.go @@ -280,7 +280,7 @@ func (cli *cliConsole) newStatusCmd() *cobra.Command { } data, err := json.MarshalIndent(out, "", " ") if err != nil { - return fmt.Errorf("failed to marshal configuration: %w", err) + return fmt.Errorf("failed to serialize configuration: %w", err) } fmt.Println(string(data)) case "raw": @@ -318,7 +318,7 @@ func (cli *cliConsole) dumpConfig() error { out, err := yaml.Marshal(serverCfg.ConsoleConfig) if err != nil { - return fmt.Errorf("while marshaling ConsoleConfig (for %s): %w", serverCfg.ConsoleConfigPath, err) + return fmt.Errorf("while serializing ConsoleConfig (for %s): %w", serverCfg.ConsoleConfigPath, err) } if serverCfg.ConsoleConfigPath == "" { @@ -361,7 +361,7 @@ func (cli *cliConsole) setConsoleOpts(args []string, wanted bool) error { if changed { fileContent, err := yaml.Marshal(cfg.API.Server.OnlineClient.Credentials) if err != nil { - return fmt.Errorf("cannot marshal credentials: %w", err) + return fmt.Errorf("cannot serialize credentials: %w", err) } log.Infof("Updating credentials file: %s", cfg.API.Server.OnlineClient.CredentialsFilePath) diff --git a/cmd/crowdsec-cli/clihub/items.go b/cmd/crowdsec-cli/clihub/items.go index 0ab89654dac..f86fe65a2a1 100644 --- a/cmd/crowdsec-cli/clihub/items.go +++ b/cmd/crowdsec-cli/clihub/items.go @@ -106,7 +106,7 @@ func ListItems(out io.Writer, wantColor string, itemTypes []string, items map[st x, err := json.MarshalIndent(hubStatus, "", " ") if err != nil { - return fmt.Errorf("failed to unmarshal: %w", err) + return fmt.Errorf("failed to parse: %w", err) } out.Write(x) @@ -158,7 +158,7 @@ func InspectItem(item *cwhub.Item, wantMetrics bool, output string, prometheusUR case "json": b, err := json.MarshalIndent(*item, "", " ") if err != nil { - return fmt.Errorf("unable to marshal item: %w", err) + return fmt.Errorf("unable to serialize item: %w", err) } fmt.Print(string(b)) diff --git a/cmd/crowdsec-cli/clihubtest/create.go b/cmd/crowdsec-cli/clihubtest/create.go index e0834f7e569..3822bed8903 100644 --- a/cmd/crowdsec-cli/clihubtest/create.go +++ b/cmd/crowdsec-cli/clihubtest/create.go @@ -134,7 +134,7 @@ cscli hubtest create my-scenario-test --parsers crowdsecurity/nginx --scenarios } data, err := yaml.Marshal(configFileData) if err != nil { - return fmt.Errorf("marshal: %w", err) + return fmt.Errorf("serialize: %w", err) } _, err = fd.Write(data) if err != nil { diff --git a/cmd/crowdsec-cli/cliitem/appsec.go b/cmd/crowdsec-cli/cliitem/appsec.go index db567f86a32..44afa2133bd 100644 --- a/cmd/crowdsec-cli/cliitem/appsec.go +++ b/cmd/crowdsec-cli/cliitem/appsec.go @@ -62,7 +62,7 @@ func NewAppsecRule(cfg configGetter) *cliItem { } if err := yaml.Unmarshal(yamlContent, &appsecRule); err != nil { - return fmt.Errorf("unable to unmarshal yaml file %s: %w", item.State.LocalPath, err) + return fmt.Errorf("unable to parse yaml file %s: %w", item.State.LocalPath, err) } for _, ruleType := range appsec_rule.SupportedTypes() { diff --git a/cmd/crowdsec-cli/clilapi/lapi.go b/cmd/crowdsec-cli/clilapi/lapi.go index eff7ae64476..75fdc5c239d 100644 --- a/cmd/crowdsec-cli/clilapi/lapi.go +++ b/cmd/crowdsec-cli/clilapi/lapi.go @@ -147,7 +147,7 @@ func (cli *cliLapi) register(ctx context.Context, apiURL string, outputFile stri apiConfigDump, err := yaml.Marshal(apiCfg) if err != nil { - return fmt.Errorf("unable to marshal api credentials: %w", err) + return fmt.Errorf("unable to serialize api credentials: %w", err) } if dumpFile != "" { diff --git a/cmd/crowdsec-cli/climachine/machines.go b/cmd/crowdsec-cli/climachine/machines.go index bf8656105aa..3df176d786d 100644 --- a/cmd/crowdsec-cli/climachine/machines.go +++ b/cmd/crowdsec-cli/climachine/machines.go @@ -232,7 +232,7 @@ func (cli *cliMachines) List(out io.Writer, db *database.Client) error { enc.SetIndent("", " ") if err := enc.Encode(info); err != nil { - return errors.New("failed to marshal") + return errors.New("failed to serialize") } return nil @@ -378,7 +378,7 @@ func (cli *cliMachines) add(args []string, machinePassword string, dumpFile stri apiConfigDump, err := yaml.Marshal(apiCfg) if err != nil { - return fmt.Errorf("unable to marshal api credentials: %w", err) + return fmt.Errorf("unable to serialize api credentials: %w", err) } if dumpFile != "" && dumpFile != "-" { @@ -626,7 +626,7 @@ func (cli *cliMachines) inspect(machine *ent.Machine) error { enc.SetIndent("", " ") if err := enc.Encode(newMachineInfo(machine)); err != nil { - return errors.New("failed to marshal") + return errors.New("failed to serialize") } return nil @@ -648,7 +648,7 @@ func (cli *cliMachines) inspectHub(machine *ent.Machine) error { enc.SetIndent("", " ") if err := enc.Encode(machine.Hubstate); err != nil { - return errors.New("failed to marshal") + return errors.New("failed to serialize") } return nil diff --git a/cmd/crowdsec-cli/climetrics/list.go b/cmd/crowdsec-cli/climetrics/list.go index d3afbef0669..ba827634052 100644 --- a/cmd/crowdsec-cli/climetrics/list.go +++ b/cmd/crowdsec-cli/climetrics/list.go @@ -68,7 +68,7 @@ func (cli *cliMetrics) list() error { case "json": x, err := json.MarshalIndent(allMetrics, "", " ") if err != nil { - return fmt.Errorf("failed to marshal metric types: %w", err) + return fmt.Errorf("failed to serialize metric types: %w", err) } fmt.Println(string(x)) diff --git a/cmd/crowdsec-cli/climetrics/store.go b/cmd/crowdsec-cli/climetrics/store.go index 5de50558e89..55fab5dbd7f 100644 --- a/cmd/crowdsec-cli/climetrics/store.go +++ b/cmd/crowdsec-cli/climetrics/store.go @@ -260,7 +260,7 @@ func (ms metricStore) Format(out io.Writer, wantColor string, sections []string, case "json": x, err := json.MarshalIndent(want, "", " ") if err != nil { - return fmt.Errorf("failed to marshal metrics: %w", err) + return fmt.Errorf("failed to serialize metrics: %w", err) } out.Write(x) default: diff --git a/cmd/crowdsec-cli/clinotifications/notifications.go b/cmd/crowdsec-cli/clinotifications/notifications.go index eb568ca5fa6..314f97db23e 100644 --- a/cmd/crowdsec-cli/clinotifications/notifications.go +++ b/cmd/crowdsec-cli/clinotifications/notifications.go @@ -172,7 +172,7 @@ func (cli *cliNotifications) newListCmd() *cobra.Command { } else if cfg.Cscli.Output == "json" { x, err := json.MarshalIndent(ncfgs, "", " ") if err != nil { - return fmt.Errorf("failed to marshal notification configuration: %w", err) + return fmt.Errorf("failed to serialize notification configuration: %w", err) } fmt.Printf("%s", string(x)) } else if cfg.Cscli.Output == "raw" { @@ -231,7 +231,7 @@ func (cli *cliNotifications) newInspectCmd() *cobra.Command { } else if cfg.Cscli.Output == "json" { x, err := json.MarshalIndent(cfg, "", " ") if err != nil { - return fmt.Errorf("failed to marshal notification configuration: %w", err) + return fmt.Errorf("failed to serialize notification configuration: %w", err) } fmt.Printf("%s", string(x)) } @@ -331,7 +331,7 @@ func (cli cliNotifications) newTestCmd() *cobra.Command { CreatedAt: time.Now().UTC().Format(time.RFC3339), } if err := yaml.Unmarshal([]byte(alertOverride), alert); err != nil { - return fmt.Errorf("failed to unmarshal alert override: %w", err) + return fmt.Errorf("failed to parse alert override: %w", err) } pluginBroker.PluginChannel <- csplugin.ProfileAlert{ @@ -387,7 +387,7 @@ cscli notifications reinject -a '{"remediation": true,"scenario":"not if alertOverride != "" { if err := json.Unmarshal([]byte(alertOverride), alert); err != nil { - return fmt.Errorf("can't unmarshal data in the alert flag: %w", err) + return fmt.Errorf("can't parse data in the alert flag: %w", err) } } diff --git a/cmd/crowdsec-cli/clisetup/setup.go b/cmd/crowdsec-cli/clisetup/setup.go index 8aee45b4287..269cdfb78e9 100644 --- a/cmd/crowdsec-cli/clisetup/setup.go +++ b/cmd/crowdsec-cli/clisetup/setup.go @@ -227,7 +227,7 @@ func setupAsString(cs setup.Setup, outYaml bool) (string, error) { ) wrap := func(err error) error { - return fmt.Errorf("while marshaling setup: %w", err) + return fmt.Errorf("while serializing setup: %w", err) } indentLevel := 2 diff --git a/cmd/crowdsec-cli/clisimulation/simulation.go b/cmd/crowdsec-cli/clisimulation/simulation.go index 9d9defd78e7..8136aa213c3 100644 --- a/cmd/crowdsec-cli/clisimulation/simulation.go +++ b/cmd/crowdsec-cli/clisimulation/simulation.go @@ -220,7 +220,7 @@ func (cli *cliSimulation) dumpSimulationFile() error { newConfigSim, err := yaml.Marshal(cfg.Cscli.SimulationConfig) if err != nil { - return fmt.Errorf("unable to marshal simulation configuration: %w", err) + return fmt.Errorf("unable to serialize simulation configuration: %w", err) } err = os.WriteFile(cfg.ConfigPaths.SimulationFilePath, newConfigSim, 0o644) @@ -242,7 +242,7 @@ func (cli *cliSimulation) disableGlobalSimulation() error { newConfigSim, err := yaml.Marshal(cfg.Cscli.SimulationConfig) if err != nil { - return fmt.Errorf("unable to marshal new simulation configuration: %w", err) + return fmt.Errorf("unable to serialize new simulation configuration: %w", err) } err = os.WriteFile(cfg.ConfigPaths.SimulationFilePath, newConfigSim, 0o644) diff --git a/cmd/crowdsec-cli/config_backup.go b/cmd/crowdsec-cli/config_backup.go index e8ac6213530..d23aff80a78 100644 --- a/cmd/crowdsec-cli/config_backup.go +++ b/cmd/crowdsec-cli/config_backup.go @@ -74,7 +74,7 @@ func (cli *cliConfig) backupHub(dirPath string) error { upstreamParsersContent, err := json.MarshalIndent(upstreamParsers, "", " ") if err != nil { - return fmt.Errorf("failed marshaling upstream parsers: %w", err) + return fmt.Errorf("failed to serialize upstream parsers: %w", err) } err = os.WriteFile(upstreamParsersFname, upstreamParsersContent, 0o644) diff --git a/cmd/crowdsec-cli/config_restore.go b/cmd/crowdsec-cli/config_restore.go index fc3670165f8..c32328485ec 100644 --- a/cmd/crowdsec-cli/config_restore.go +++ b/cmd/crowdsec-cli/config_restore.go @@ -40,7 +40,7 @@ func (cli *cliConfig) restoreHub(ctx context.Context, dirPath string) error { err = json.Unmarshal(file, &upstreamList) if err != nil { - return fmt.Errorf("error unmarshaling %s: %w", upstreamListFN, err) + return fmt.Errorf("error parsing %s: %w", upstreamListFN, err) } for _, toinstall := range upstreamList { diff --git a/cmd/crowdsec-cli/config_show.go b/cmd/crowdsec-cli/config_show.go index e411f5a322b..2d3ac488ba2 100644 --- a/cmd/crowdsec-cli/config_show.go +++ b/cmd/crowdsec-cli/config_show.go @@ -50,7 +50,7 @@ func (cli *cliConfig) showKey(key string) error { case "json": data, err := json.MarshalIndent(output, "", " ") if err != nil { - return fmt.Errorf("failed to marshal configuration: %w", err) + return fmt.Errorf("failed to serialize configuration: %w", err) } fmt.Println(string(data)) @@ -212,14 +212,14 @@ func (cli *cliConfig) show() error { case "json": data, err := json.MarshalIndent(cfg, "", " ") if err != nil { - return fmt.Errorf("failed to marshal configuration: %w", err) + return fmt.Errorf("failed to serialize configuration: %w", err) } fmt.Println(string(data)) case "raw": data, err := yaml.Marshal(cfg) if err != nil { - return fmt.Errorf("failed to marshal configuration: %w", err) + return fmt.Errorf("failed to serialize configuration: %w", err) } fmt.Println(string(data)) diff --git a/cmd/crowdsec/pour.go b/cmd/crowdsec/pour.go index 388c7a6c1b3..1382a909ab3 100644 --- a/cmd/crowdsec/pour.go +++ b/cmd/crowdsec/pour.go @@ -32,7 +32,7 @@ func runPour(input chan types.Event, holders []leaky.BucketFactory, buckets *lea if parsed.MarshaledTime != "" { z := &time.Time{} if err := z.UnmarshalText([]byte(parsed.MarshaledTime)); err != nil { - log.Warningf("Failed to unmarshal time from event '%s' : %s", parsed.MarshaledTime, err) + log.Warningf("Failed to parse time from event '%s' : %s", parsed.MarshaledTime, err) } else { log.Warning("Starting buckets garbage collection ...") @@ -61,7 +61,7 @@ func runPour(input chan types.Event, holders []leaky.BucketFactory, buckets *lea if len(parsed.MarshaledTime) != 0 { if err := lastProcessedItem.UnmarshalText([]byte(parsed.MarshaledTime)); err != nil { - log.Warningf("failed to unmarshal time from event : %s", err) + log.Warningf("failed to parse time from event : %s", err) } } } diff --git a/cmd/notification-file/main.go b/cmd/notification-file/main.go index 7fc529cff41..f6649b1f395 100644 --- a/cmd/notification-file/main.go +++ b/cmd/notification-file/main.go @@ -210,7 +210,7 @@ func (s *FilePlugin) Configure(ctx context.Context, config *protobufs.Config) (* d := PluginConfig{} err := yaml.Unmarshal(config.Config, &d) if err != nil { - logger.Error("Failed to unmarshal config", "error", err) + logger.Error("Failed to parse config", "error", err) return &protobufs.Empty{}, err } FileWriteMutex = &sync.Mutex{} diff --git a/pkg/acquisition/acquisition.go b/pkg/acquisition/acquisition.go index 38bf228abbc..a737881dd4d 100644 --- a/pkg/acquisition/acquisition.go +++ b/pkg/acquisition/acquisition.go @@ -129,7 +129,7 @@ func DataSourceConfigure(commonConfig configuration.DataSourceCommonCfg, metrics // once to DataSourceCommonCfg, and then later to the dedicated type of the datasource yamlConfig, err := yaml.Marshal(commonConfig) if err != nil { - return nil, fmt.Errorf("unable to marshal back interface: %w", err) + return nil, fmt.Errorf("unable to serialize back interface: %w", err) } dataSrc, err := GetDataSourceIface(commonConfig.Source) diff --git a/pkg/acquisition/modules/appsec/appsec.go b/pkg/acquisition/modules/appsec/appsec.go index 5b0661a21b7..8a93326c7e3 100644 --- a/pkg/acquisition/modules/appsec/appsec.go +++ b/pkg/acquisition/modules/appsec/appsec.go @@ -393,7 +393,7 @@ func (w *AppsecSource) appsecHandler(rw http.ResponseWriter, r *http.Request) { rw.WriteHeader(statusCode) body, err := json.Marshal(appsecResponse) if err != nil { - logger.Errorf("unable to marshal response: %s", err) + logger.Errorf("unable to serialize response: %s", err) rw.WriteHeader(http.StatusInternalServerError) } else { rw.Write(body) diff --git a/pkg/acquisition/modules/kafka/kafka.go b/pkg/acquisition/modules/kafka/kafka.go index ca0a7556fca..a0d7fc39bcc 100644 --- a/pkg/acquisition/modules/kafka/kafka.go +++ b/pkg/acquisition/modules/kafka/kafka.go @@ -82,7 +82,7 @@ func (k *KafkaSource) UnmarshalConfig(yamlConfig []byte) error { k.Config.Mode = configuration.TAIL_MODE } - k.logger.Debugf("successfully unmarshaled kafka configuration : %+v", k.Config) + k.logger.Debugf("successfully parsed kafka configuration : %+v", k.Config) return err } diff --git a/pkg/acquisition/modules/kubernetesaudit/k8s_audit.go b/pkg/acquisition/modules/kubernetesaudit/k8s_audit.go index e48a074b764..8ba5b2d06e0 100644 --- a/pkg/acquisition/modules/kubernetesaudit/k8s_audit.go +++ b/pkg/acquisition/modules/kubernetesaudit/k8s_audit.go @@ -196,7 +196,7 @@ func (ka *KubernetesAuditSource) webhookHandler(w http.ResponseWriter, r *http.R } bytesEvent, err := json.Marshal(auditEvent) if err != nil { - ka.logger.Errorf("Error marshaling audit event: %s", err) + ka.logger.Errorf("Error serializing audit event: %s", err) continue } ka.logger.Tracef("Got audit event: %s", string(bytesEvent)) diff --git a/pkg/acquisition/modules/wineventlog/wineventlog_windows.go b/pkg/acquisition/modules/wineventlog/wineventlog_windows.go index c6b10b7c38c..4f2384d71db 100644 --- a/pkg/acquisition/modules/wineventlog/wineventlog_windows.go +++ b/pkg/acquisition/modules/wineventlog/wineventlog_windows.go @@ -149,7 +149,7 @@ func (w *WinEventLogSource) buildXpathQuery() (string, error) { queryList := QueryList{Select: Select{Path: w.config.EventChannel, Query: query}} xpathQuery, err := xml.Marshal(queryList) if err != nil { - w.logger.Errorf("Marshal failed: %v", err) + w.logger.Errorf("Serialize failed: %v", err) return "", err } w.logger.Debugf("xpathQuery: %s", xpathQuery) diff --git a/pkg/alertcontext/config.go b/pkg/alertcontext/config.go index da05c937b18..6ef877619e4 100644 --- a/pkg/alertcontext/config.go +++ b/pkg/alertcontext/config.go @@ -133,7 +133,7 @@ func LoadConsoleContext(c *csconfig.Config, hub *cwhub.Hub) error { feedback, err := json.Marshal(c.Crowdsec.ContextToSend) if err != nil { - return fmt.Errorf("marshaling console context: %s", err) + return fmt.Errorf("serializing console context: %s", err) } log.Debugf("console context to send: %s", feedback) diff --git a/pkg/apiserver/apic_metrics.go b/pkg/apiserver/apic_metrics.go index 176984f1ad6..5c6a550a6a0 100644 --- a/pkg/apiserver/apic_metrics.go +++ b/pkg/apiserver/apic_metrics.go @@ -70,7 +70,7 @@ func (a *apic) GetUsageMetrics() (*models.AllMetrics, []int, error) { err := json.Unmarshal([]byte(dbMetric.Payload), dbPayload) if err != nil { - log.Errorf("unable to unmarshal bouncer metric (%s)", err) + log.Errorf("unable to parse bouncer metric (%s)", err) continue } @@ -132,7 +132,7 @@ func (a *apic) GetUsageMetrics() (*models.AllMetrics, []int, error) { err := json.Unmarshal([]byte(dbMetric.Payload), dbPayload) if err != nil { - log.Errorf("unable to unmarshal log processor metric (%s)", err) + log.Errorf("unable to parse log processor metric (%s)", err) continue } diff --git a/pkg/apiserver/controllers/v1/alerts.go b/pkg/apiserver/controllers/v1/alerts.go index 3d4309b1347..84b3094865c 100644 --- a/pkg/apiserver/controllers/v1/alerts.go +++ b/pkg/apiserver/controllers/v1/alerts.go @@ -63,7 +63,7 @@ func FormatOneAlert(alert *ent.Alert) *models.Alert { var Metas models.Meta if err := json.Unmarshal([]byte(eventItem.Serialized), &Metas); err != nil { - log.Errorf("unable to unmarshall events meta '%s' : %s", eventItem.Serialized, err) + log.Errorf("unable to parse events meta '%s' : %s", eventItem.Serialized, err) } outputAlert.Events = append(outputAlert.Events, &models.Event{ diff --git a/pkg/apiserver/controllers/v1/usagemetrics.go b/pkg/apiserver/controllers/v1/usagemetrics.go index 27b1b819a54..5b2c3e3b1a9 100644 --- a/pkg/apiserver/controllers/v1/usagemetrics.go +++ b/pkg/apiserver/controllers/v1/usagemetrics.go @@ -183,7 +183,7 @@ func (c *Controller) UsageMetrics(gctx *gin.Context) { jsonPayload, err := json.Marshal(payload) if err != nil { - logger.Errorf("Failed to marshal usage metrics: %s", err) + logger.Errorf("Failed to serialize usage metrics: %s", err) c.HandleDBErrors(gctx, err) return diff --git a/pkg/apiserver/papi.go b/pkg/apiserver/papi.go index 0a69f086a7f..89ad93930a1 100644 --- a/pkg/apiserver/papi.go +++ b/pkg/apiserver/papi.go @@ -245,7 +245,7 @@ func (p *Papi) Pull() error { if lastTimestampStr == nil { binTime, err := lastTimestamp.MarshalText() if err != nil { - return fmt.Errorf("failed to marshal last timestamp: %w", err) + return fmt.Errorf("failed to serialize last timestamp: %w", err) } if err := p.DBClient.SetConfigItem(PapiPullKey, string(binTime)); err != nil { @@ -255,7 +255,7 @@ func (p *Papi) Pull() error { } } else { if err := lastTimestamp.UnmarshalText([]byte(*lastTimestampStr)); err != nil { - return fmt.Errorf("failed to unmarshal last timestamp: %w", err) + return fmt.Errorf("failed to parse last timestamp: %w", err) } } @@ -268,7 +268,7 @@ func (p *Papi) Pull() error { binTime, err := newTime.MarshalText() if err != nil { - return fmt.Errorf("failed to marshal last timestamp: %w", err) + return fmt.Errorf("failed to serialize last timestamp: %w", err) } err = p.handleEvent(event, false) diff --git a/pkg/appsec/loader.go b/pkg/appsec/loader.go index 9a3bfb6b668..c724010cec2 100644 --- a/pkg/appsec/loader.go +++ b/pkg/appsec/loader.go @@ -28,7 +28,7 @@ func LoadAppsecRules(hubInstance *cwhub.Hub) error { err = yaml.UnmarshalStrict(content, &rule) if err != nil { - log.Warnf("unable to unmarshal file %s : %s", hubAppsecRuleItem.State.LocalPath, err) + log.Warnf("unable to parse file %s : %s", hubAppsecRuleItem.State.LocalPath, err) continue } diff --git a/pkg/csconfig/api.go b/pkg/csconfig/api.go index 4a28b590e80..3014b729a9e 100644 --- a/pkg/csconfig/api.go +++ b/pkg/csconfig/api.go @@ -99,7 +99,7 @@ func (o *OnlineApiClientCfg) Load() error { err = dec.Decode(o.Credentials) if err != nil { if !errors.Is(err, io.EOF) { - return fmt.Errorf("failed unmarshaling api server credentials configuration file '%s': %w", o.CredentialsFilePath, err) + return fmt.Errorf("failed to parse api server credentials configuration file '%s': %w", o.CredentialsFilePath, err) } } @@ -134,7 +134,7 @@ func (l *LocalApiClientCfg) Load() error { err = dec.Decode(&l.Credentials) if err != nil { if !errors.Is(err, io.EOF) { - return fmt.Errorf("failed unmarshaling api client credential configuration file '%s': %w", l.CredentialsFilePath, err) + return fmt.Errorf("failed to parse api client credential configuration file '%s': %w", l.CredentialsFilePath, err) } } diff --git a/pkg/csconfig/api_test.go b/pkg/csconfig/api_test.go index 96945202aa8..dff3c3afc8c 100644 --- a/pkg/csconfig/api_test.go +++ b/pkg/csconfig/api_test.go @@ -101,7 +101,7 @@ func TestLoadOnlineApiClientCfg(t *testing.T) { CredentialsFilePath: "./testdata/bad_lapi-secrets.yaml", }, expected: &ApiCredentialsCfg{}, - expectedErr: "failed unmarshaling api server credentials", + expectedErr: "failed to parse api server credentials", }, { name: "missing field configuration", diff --git a/pkg/csconfig/config_test.go b/pkg/csconfig/config_test.go index 11f1f0cf68d..b69954de178 100644 --- a/pkg/csconfig/config_test.go +++ b/pkg/csconfig/config_test.go @@ -42,5 +42,5 @@ func TestNewCrowdSecConfig(t *testing.T) { func TestDefaultConfig(t *testing.T) { x := NewDefaultConfig() _, err := yaml.Marshal(x) - require.NoError(t, err, "failed marshaling config: %s", err) + require.NoError(t, err, "failed to serialize config: %s", err) } diff --git a/pkg/csconfig/console.go b/pkg/csconfig/console.go index 4c14f5f7d49..21ecbf3d736 100644 --- a/pkg/csconfig/console.go +++ b/pkg/csconfig/console.go @@ -95,7 +95,7 @@ func (c *LocalApiServerCfg) LoadConsoleConfig() error { err = yaml.Unmarshal(yamlFile, c.ConsoleConfig) if err != nil { - return fmt.Errorf("unmarshaling console config file '%s': %w", c.ConsoleConfigPath, err) + return fmt.Errorf("parsing console config file '%s': %w", c.ConsoleConfigPath, err) } if c.ConsoleConfig.ShareCustomScenarios == nil { diff --git a/pkg/csconfig/crowdsec_service.go b/pkg/csconfig/crowdsec_service.go index 7820595b46f..7a611a856ee 100644 --- a/pkg/csconfig/crowdsec_service.go +++ b/pkg/csconfig/crowdsec_service.go @@ -143,7 +143,7 @@ func (c *CrowdsecServiceCfg) DumpContextConfigFile() error { // XXX: MakeDirs out, err := yaml.Marshal(c.ContextToSend) if err != nil { - return fmt.Errorf("while marshaling ConsoleConfig (for %s): %w", c.ConsoleContextPath, err) + return fmt.Errorf("while serializing ConsoleConfig (for %s): %w", c.ConsoleContextPath, err) } if err = os.MkdirAll(filepath.Dir(c.ConsoleContextPath), 0700); err != nil { diff --git a/pkg/csconfig/simulation.go b/pkg/csconfig/simulation.go index 947b47e3c1e..afc4ea4f044 100644 --- a/pkg/csconfig/simulation.go +++ b/pkg/csconfig/simulation.go @@ -52,7 +52,7 @@ func (c *Config) LoadSimulation() error { if err := dec.Decode(&simCfg); err != nil { if !errors.Is(err, io.EOF) { - return fmt.Errorf("while unmarshaling simulation file '%s': %w", c.ConfigPaths.SimulationFilePath, err) + return fmt.Errorf("while parsing simulation file '%s': %w", c.ConfigPaths.SimulationFilePath, err) } } diff --git a/pkg/csconfig/simulation_test.go b/pkg/csconfig/simulation_test.go index a678d7edd49..a1e5f0a5b02 100644 --- a/pkg/csconfig/simulation_test.go +++ b/pkg/csconfig/simulation_test.go @@ -60,7 +60,7 @@ func TestSimulationLoading(t *testing.T) { }, Crowdsec: &CrowdsecServiceCfg{}, }, - expectedErr: "while unmarshaling simulation file './testdata/config.yaml': yaml: unmarshal errors", + expectedErr: "while parsing simulation file './testdata/config.yaml': yaml: unmarshal errors", }, { name: "basic bad file content", @@ -71,7 +71,7 @@ func TestSimulationLoading(t *testing.T) { }, Crowdsec: &CrowdsecServiceCfg{}, }, - expectedErr: "while unmarshaling simulation file './testdata/config.yaml': yaml: unmarshal errors", + expectedErr: "while parsing simulation file './testdata/config.yaml': yaml: unmarshal errors", }, } diff --git a/pkg/csplugin/broker_test.go b/pkg/csplugin/broker_test.go index f2179acb2c1..48f5a71f773 100644 --- a/pkg/csplugin/broker_test.go +++ b/pkg/csplugin/broker_test.go @@ -38,7 +38,7 @@ func (s *PluginSuite) readconfig() PluginConfig { require.NoError(t, err, "unable to read config file %s", s.pluginConfig) err = yaml.Unmarshal(orig, &config) - require.NoError(t, err, "unable to unmarshal config file") + require.NoError(t, err, "unable to parse config file") return config } @@ -46,7 +46,7 @@ func (s *PluginSuite) readconfig() PluginConfig { func (s *PluginSuite) writeconfig(config PluginConfig) { t := s.T() data, err := yaml.Marshal(&config) - require.NoError(t, err, "unable to marshal config file") + require.NoError(t, err, "unable to serialize config file") err = os.WriteFile(s.pluginConfig, data, 0o644) require.NoError(t, err, "unable to write config file %s", s.pluginConfig) diff --git a/pkg/cwhub/hub.go b/pkg/cwhub/hub.go index a4e81e2c3e2..f74a794a512 100644 --- a/pkg/cwhub/hub.go +++ b/pkg/cwhub/hub.go @@ -79,7 +79,7 @@ func (h *Hub) parseIndex() error { } if err := json.Unmarshal(bidx, &h.items); err != nil { - return fmt.Errorf("failed to unmarshal index: %w", err) + return fmt.Errorf("failed to parse index: %w", err) } h.logger.Debugf("%d item types in hub index", len(ItemTypes)) diff --git a/pkg/cwhub/sync.go b/pkg/cwhub/sync.go index 81d41d55971..7ed14086adf 100644 --- a/pkg/cwhub/sync.go +++ b/pkg/cwhub/sync.go @@ -210,7 +210,7 @@ func newLocalItem(h *Hub, path string, info *itemFileInfo) (*Item, error) { err = yaml.Unmarshal(itemContent, &itemName) if err != nil { - return nil, fmt.Errorf("failed to unmarshal %s: %w", path, err) + return nil, fmt.Errorf("failed to parse %s: %w", path, err) } if itemName.Name != "" { diff --git a/pkg/database/ent/machine.go b/pkg/database/ent/machine.go index 76127065791..1b8122060d1 100644 --- a/pkg/database/ent/machine.go +++ b/pkg/database/ent/machine.go @@ -202,7 +202,7 @@ func (m *Machine) assignValues(columns []string, values []any) error { return fmt.Errorf("unexpected type %T for field hubstate", values[i]) } else if value != nil && len(*value) > 0 { if err := json.Unmarshal(*value, &m.Hubstate); err != nil { - return fmt.Errorf("unmarshal field hubstate: %w", err) + return fmt.Errorf("parsing field hubstate: %w", err) } } case machine.FieldDatasources: @@ -210,7 +210,7 @@ func (m *Machine) assignValues(columns []string, values []any) error { return fmt.Errorf("unexpected type %T for field datasources", values[i]) } else if value != nil && len(*value) > 0 { if err := json.Unmarshal(*value, &m.Datasources); err != nil { - return fmt.Errorf("unmarshal field datasources: %w", err) + return fmt.Errorf("parsing field datasources: %w", err) } } default: diff --git a/pkg/database/errors.go b/pkg/database/errors.go index 8e96f52d7ce..77f92707e51 100644 --- a/pkg/database/errors.go +++ b/pkg/database/errors.go @@ -13,8 +13,8 @@ var ( ItemNotFound = errors.New("object not found") ParseTimeFail = errors.New("unable to parse time") ParseDurationFail = errors.New("unable to parse duration") - MarshalFail = errors.New("unable to marshal") - UnmarshalFail = errors.New("unable to unmarshal") + MarshalFail = errors.New("unable to serialize") + UnmarshalFail = errors.New("unable to parse") BulkError = errors.New("unable to insert bulk") ParseType = errors.New("unable to parse type") InvalidIPOrRange = errors.New("invalid ip address / range") diff --git a/pkg/hubtest/coverage.go b/pkg/hubtest/coverage.go index 4156def06d7..e42c1e23455 100644 --- a/pkg/hubtest/coverage.go +++ b/pkg/hubtest/coverage.go @@ -57,7 +57,7 @@ func (h *HubTest) GetAppsecCoverage() ([]Coverage, error) { err = yaml.Unmarshal(yamlFile, configFileData) if err != nil { - return nil, fmt.Errorf("unmarshal: %v", err) + return nil, fmt.Errorf("parsing: %v", err) } for _, appsecRulesFile := range configFileData.AppsecRules { @@ -70,7 +70,7 @@ func (h *HubTest) GetAppsecCoverage() ([]Coverage, error) { err = yaml.Unmarshal(yamlFile, appsecRuleData) if err != nil { - return nil, fmt.Errorf("unmarshal: %v", err) + return nil, fmt.Errorf("parsing: %v", err) } appsecRuleName := appsecRuleData.Name diff --git a/pkg/hubtest/hubtest_item.go b/pkg/hubtest/hubtest_item.go index 42792413b5d..bc9c8955d0d 100644 --- a/pkg/hubtest/hubtest_item.go +++ b/pkg/hubtest/hubtest_item.go @@ -111,7 +111,7 @@ func NewTest(name string, hubTest *HubTest) (*HubTestItem, error) { err = yaml.Unmarshal(yamlFile, configFileData) if err != nil { - return nil, fmt.Errorf("unmarshal: %w", err) + return nil, fmt.Errorf("parsing: %w", err) } parserAssertFilePath := filepath.Join(testPath, ParserAssertFileName) @@ -201,7 +201,7 @@ func (t *HubTestItem) InstallHub() error { b, err := yaml.Marshal(n) if err != nil { - return fmt.Errorf("unable to marshal overrides: %w", err) + return fmt.Errorf("unable to serialize overrides: %w", err) } tgtFilename := fmt.Sprintf("%s/parsers/s00-raw/00_overrides.yaml", t.RuntimePath) diff --git a/pkg/leakybucket/buckets_test.go b/pkg/leakybucket/buckets_test.go index 989e03944c3..1da906cb555 100644 --- a/pkg/leakybucket/buckets_test.go +++ b/pkg/leakybucket/buckets_test.go @@ -136,7 +136,7 @@ func testOneBucket(t *testing.T, hub *cwhub.Hub, dir string, tomb *tomb.Tomb) er } if err := yaml.UnmarshalStrict(out.Bytes(), &stages); err != nil { - t.Fatalf("failed unmarshaling %s : %s", stagecfg, err) + t.Fatalf("failed to parse %s : %s", stagecfg, err) } files := []string{} @@ -201,7 +201,7 @@ func testFile(t *testing.T, file string, bs string, holders []BucketFactory, res var ts time.Time if err := ts.UnmarshalText([]byte(in.MarshaledTime)); err != nil { - t.Fatalf("Failed to unmarshal time from input event : %s", err) + t.Fatalf("Failed to parse time from input event : %s", err) } if latest_ts.IsZero() { diff --git a/pkg/leakybucket/manager_load.go b/pkg/leakybucket/manager_load.go index 1ae70fbfab3..1b62b29dc3c 100644 --- a/pkg/leakybucket/manager_load.go +++ b/pkg/leakybucket/manager_load.go @@ -493,7 +493,7 @@ func LoadBucketsState(file string, buckets *Buckets, bucketFactories []BucketFac } if err := json.Unmarshal(body, &state); err != nil { - return fmt.Errorf("can't unmarshal state file %s: %w", file, err) + return fmt.Errorf("can't parse state file %s: %w", file, err) } for k, v := range state { diff --git a/pkg/leakybucket/manager_run.go b/pkg/leakybucket/manager_run.go index 673b372d81e..053f9be05da 100644 --- a/pkg/leakybucket/manager_run.go +++ b/pkg/leakybucket/manager_run.go @@ -132,7 +132,7 @@ func DumpBucketsStateAt(deadline time.Time, outputdir string, buckets *Buckets) }) bbuckets, err := json.MarshalIndent(serialized, "", " ") if err != nil { - return "", fmt.Errorf("failed to unmarshal buckets: %s", err) + return "", fmt.Errorf("failed to parse buckets: %s", err) } size, err := tmpFd.Write(bbuckets) if err != nil { @@ -203,7 +203,7 @@ func PourItemToBucket(bucket *Leaky, holder BucketFactory, buckets *Buckets, par var d time.Time err = d.UnmarshalText([]byte(parsed.MarshaledTime)) if err != nil { - holder.logger.Warningf("Failed unmarshaling event time (%s) : %v", parsed.MarshaledTime, err) + holder.logger.Warningf("Failed to parse event time (%s) : %v", parsed.MarshaledTime, err) } if d.After(lastTs.Add(bucket.Duration)) { bucket.logger.Tracef("bucket is expired (curr event: %s, bucket deadline: %s), kill", d, lastTs.Add(bucket.Duration)) diff --git a/pkg/leakybucket/overflows.go b/pkg/leakybucket/overflows.go index e67698e8473..39b0e6a0ec4 100644 --- a/pkg/leakybucket/overflows.go +++ b/pkg/leakybucket/overflows.go @@ -231,7 +231,7 @@ func EventsFromQueue(queue *types.Queue) []*models.Event { raw, err := evt.Time.MarshalText() if err != nil { - log.Warningf("while marshaling time '%s' : %s", evt.Time.String(), err) + log.Warningf("while serializing time '%s' : %s", evt.Time.String(), err) } else { *ovflwEvent.Timestamp = string(raw) } @@ -286,12 +286,12 @@ func NewAlert(leaky *Leaky, queue *types.Queue) (types.RuntimeAlert, error) { */ start_at, err := leaky.First_ts.MarshalText() if err != nil { - log.Warningf("failed to marshal start ts %s : %s", leaky.First_ts.String(), err) + log.Warningf("failed to serialize start ts %s : %s", leaky.First_ts.String(), err) } stop_at, err := leaky.Ovflw_ts.MarshalText() if err != nil { - log.Warningf("failed to marshal ovflw ts %s : %s", leaky.First_ts.String(), err) + log.Warningf("failed to serialize ovflw ts %s : %s", leaky.First_ts.String(), err) } capacity := int32(leaky.Capacity) diff --git a/pkg/leakybucket/timemachine.go b/pkg/leakybucket/timemachine.go index e72bb1a464c..34073d1cc5c 100644 --- a/pkg/leakybucket/timemachine.go +++ b/pkg/leakybucket/timemachine.go @@ -24,7 +24,7 @@ func TimeMachinePour(l *Leaky, msg types.Event) { err = d.UnmarshalText([]byte(msg.MarshaledTime)) if err != nil { - log.Warningf("Failed unmarshaling event time (%s) : %v", msg.MarshaledTime, err) + log.Warningf("Failed to parse event time (%s) : %v", msg.MarshaledTime, err) return } diff --git a/pkg/leakybucket/trigger.go b/pkg/leakybucket/trigger.go index 7558f696dc7..d13e57856f9 100644 --- a/pkg/leakybucket/trigger.go +++ b/pkg/leakybucket/trigger.go @@ -23,7 +23,7 @@ func (t *Trigger) OnBucketPour(b *BucketFactory) func(types.Event, *Leaky) *type err := d.UnmarshalText([]byte(msg.MarshaledTime)) if err != nil { - log.Warningf("Failed unmarshaling event time (%s) : %v", msg.MarshaledTime, err) + log.Warningf("Failed to parse event time (%s) : %v", msg.MarshaledTime, err) d = now } diff --git a/pkg/parser/enrich_date.go b/pkg/parser/enrich_date.go index 748a466d7c3..40c8de39da5 100644 --- a/pkg/parser/enrich_date.go +++ b/pkg/parser/enrich_date.go @@ -18,7 +18,7 @@ func parseDateWithFormat(date, format string) (string, time.Time) { } retstr, err := t.MarshalText() if err != nil { - log.Warningf("Failed marshaling '%v'", t) + log.Warningf("Failed to serialize '%v'", t) return "", time.Time{} } return string(retstr), t @@ -98,7 +98,7 @@ func ParseDate(in string, p *types.Event, plog *log.Entry) (map[string]string, e now := time.Now().UTC() retstr, err := now.MarshalText() if err != nil { - plog.Warning("Failed marshaling current time") + plog.Warning("Failed to serialize current time") return ret, err } ret["MarshaledTime"] = string(retstr) diff --git a/pkg/parser/enrich_unmarshal.go b/pkg/parser/enrich_unmarshal.go index 7ff91b70aea..dbdd9d3f583 100644 --- a/pkg/parser/enrich_unmarshal.go +++ b/pkg/parser/enrich_unmarshal.go @@ -11,7 +11,7 @@ import ( func unmarshalJSON(field string, p *types.Event, plog *log.Entry) (map[string]string, error) { err := json.Unmarshal([]byte(p.Line.Raw), &p.Unmarshaled) if err != nil { - plog.Errorf("could not unmarshal JSON: %s", err) + plog.Errorf("could not parse JSON: %s", err) return nil, err } plog.Tracef("unmarshaled JSON: %+v", p.Unmarshaled) diff --git a/pkg/parser/parsing_test.go b/pkg/parser/parsing_test.go index 0542c69c049..269d51a1ba2 100644 --- a/pkg/parser/parsing_test.go +++ b/pkg/parser/parsing_test.go @@ -132,7 +132,7 @@ func testOneParser(pctx *UnixParserCtx, ectx EnricherCtx, dir string, b *testing } if err = yaml.UnmarshalStrict(out.Bytes(), &parser_configs); err != nil { - return fmt.Errorf("failed unmarshaling %s: %w", parser_cfg_file, err) + return fmt.Errorf("failed to parse %s: %w", parser_cfg_file, err) } pnodes, err = LoadStages(parser_configs, pctx, ectx) diff --git a/pkg/setup/detect.go b/pkg/setup/detect.go index 01368091a6b..073b221b10c 100644 --- a/pkg/setup/detect.go +++ b/pkg/setup/detect.go @@ -545,7 +545,7 @@ func Detect(detectReader io.Reader, opts DetectOptions) (Setup, error) { // } // err = yaml.Unmarshal(svc.AcquisYAML, svc.DataSource) // if err != nil { - // return Setup{}, fmt.Errorf("while unmarshaling datasource for service %s: %w", name, err) + // return Setup{}, fmt.Errorf("while parsing datasource for service %s: %w", name, err) // } // } diff --git a/pkg/setup/install.go b/pkg/setup/install.go index fc5bd380fd9..d63a1ee1775 100644 --- a/pkg/setup/install.go +++ b/pkg/setup/install.go @@ -40,7 +40,7 @@ func decodeSetup(input []byte, fancyErrors bool) (Setup, error) { dec2.KnownFields(true) if err := dec2.Decode(&ret); err != nil { - return ret, fmt.Errorf("while unmarshaling setup file: %w", err) + return ret, fmt.Errorf("while parsing setup file: %w", err) } return ret, nil diff --git a/test/bats/07_setup.bats b/test/bats/07_setup.bats index 2106d3ab6b2..f832ac572d2 100644 --- a/test/bats/07_setup.bats +++ b/test/bats/07_setup.bats @@ -819,6 +819,6 @@ update-notifier-motd.timer enabled enabled setup: alsdk al; sdf EOT - assert_output "while unmarshaling setup file: yaml: line 2: could not find expected ':'" + assert_output "while parsing setup file: yaml: line 2: could not find expected ':'" assert_stderr --partial "invalid setup file" } From f97b9c84da473ac60249bd2d6d7a865a4bdf7310 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Tue, 17 Sep 2024 15:19:30 +0200 Subject: [PATCH 295/581] CI: update golangci-lint to v1.61, yq to 4.44.3 (#3241) --- .github/workflows/go-tests-windows.yml | 2 +- .github/workflows/go-tests.yml | 2 +- .golangci.yml | 2 +- Dockerfile | 2 +- Dockerfile.debian | 2 +- test/bats.mk | 4 ++-- 6 files changed, 7 insertions(+), 7 deletions(-) diff --git a/.github/workflows/go-tests-windows.yml b/.github/workflows/go-tests-windows.yml index e70d6e352f1..a31e42cf702 100644 --- a/.github/workflows/go-tests-windows.yml +++ b/.github/workflows/go-tests-windows.yml @@ -57,6 +57,6 @@ jobs: - name: golangci-lint uses: golangci/golangci-lint-action@v6 with: - version: v1.60 + version: v1.61 args: --issues-exit-code=1 --timeout 10m only-new-issues: false diff --git a/.github/workflows/go-tests.yml b/.github/workflows/go-tests.yml index df5c0b4fb88..d4e3a3d843a 100644 --- a/.github/workflows/go-tests.yml +++ b/.github/workflows/go-tests.yml @@ -163,6 +163,6 @@ jobs: - name: golangci-lint uses: golangci/golangci-lint-action@v6 with: - version: v1.60 + version: v1.61 args: --issues-exit-code=1 --timeout 10m only-new-issues: false diff --git a/.golangci.yml b/.golangci.yml index 78b666d25b4..e90f3841ffd 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -185,6 +185,7 @@ linters: # DEPRECATED by golangi-lint # - execinquery + - exportloopref # # Redundant @@ -217,7 +218,6 @@ linters: # - durationcheck # check for two durations multiplied together # - errcheck # errcheck is a program for checking for unchecked errors in Go code. These unchecked errors can be critical bugs in some cases # - errorlint # errorlint is a linter for that can be used to find code that will cause problems with the error wrapping scheme introduced in Go 1.13. - # - exportloopref # checks for pointers to enclosing loop variables # - ginkgolinter # enforces standards of using ginkgo and gomega # - gocheckcompilerdirectives # Checks that go compiler directive comments (//go:) are valid. # - gochecknoinits # Checks that no init functions are present in Go code diff --git a/Dockerfile b/Dockerfile index 93ba1010fc2..450ea69017f 100644 --- a/Dockerfile +++ b/Dockerfile @@ -16,7 +16,7 @@ RUN apk add --no-cache git g++ gcc libc-dev make bash gettext binutils-gold core cd re2-${RE2_VERSION} && \ make install && \ echo "githubciXXXXXXXXXXXXXXXXXXXXXXXX" > /etc/machine-id && \ - go install github.com/mikefarah/yq/v4@v4.43.1 + go install github.com/mikefarah/yq/v4@v4.44.3 COPY . . diff --git a/Dockerfile.debian b/Dockerfile.debian index dd55d2c3e48..8bf2698c786 100644 --- a/Dockerfile.debian +++ b/Dockerfile.debian @@ -21,7 +21,7 @@ RUN apt-get update && \ make && \ make install && \ echo "githubciXXXXXXXXXXXXXXXXXXXXXXXX" > /etc/machine-id && \ - go install github.com/mikefarah/yq/v4@v4.43.1 + go install github.com/mikefarah/yq/v4@v4.44.3 COPY . . diff --git a/test/bats.mk b/test/bats.mk index 631cc55579b..72ac8863f72 100644 --- a/test/bats.mk +++ b/test/bats.mk @@ -67,8 +67,8 @@ bats-check-requirements: ## Check dependencies for functional tests @$(TEST_DIR)/bin/check-requirements bats-update-tools: ## Install/update tools required for functional tests - # yq v4.43.1 - GOBIN=$(TEST_DIR)/tools go install github.com/mikefarah/yq/v4@c35ec752e38ea0c096d3c44e13cfc0797ac394d8 + # yq v4.44.3 + GOBIN=$(TEST_DIR)/tools go install github.com/mikefarah/yq/v4@bbdd97482f2d439126582a59689eb1c855944955 # cfssl v1.6.5 GOBIN=$(TEST_DIR)/tools go install github.com/cloudflare/cfssl/cmd/cfssl@96259aa29c9cc9b2f4e04bad7d4bc152e5405dda GOBIN=$(TEST_DIR)/tools go install github.com/cloudflare/cfssl/cmd/cfssljson@96259aa29c9cc9b2f4e04bad7d4bc152e5405dda From 5f22c78fcf9d3f70c89cde702974bd42236f55dd Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Tue, 17 Sep 2024 16:10:56 +0200 Subject: [PATCH 296/581] enable linters: copyloopvar, intrange (#3184) * enable linters: copyloopvar, intrange * lint --- .golangci.yml | 13 +++---------- pkg/acquisition/acquisition.go | 4 ++-- pkg/acquisition/modules/docker/utils.go | 2 +- pkg/apiclient/decisions_service.go | 2 +- pkg/apiserver/apic_test.go | 5 +++-- pkg/csplugin/broker_win_test.go | 1 - pkg/csplugin/utils_windows.go | 2 +- pkg/csplugin/utils_windows_test.go | 1 - pkg/cwhub/sync.go | 2 +- pkg/leakybucket/manager_run.go | 2 +- pkg/setup/detect_test.go | 1 - 11 files changed, 13 insertions(+), 22 deletions(-) diff --git a/.golangci.yml b/.golangci.yml index e90f3841ffd..1787f0487a4 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -197,12 +197,9 @@ linters: - funlen # revive - gocognit # revive - # - # Disabled until fixed for go 1.22 - # + # Disabled atm - - copyloopvar # copyloopvar is a linter detects places where loop variables are copied - - intrange # intrange is a linter to find places where for loops could make use of an integer range. + - intrange # intrange is a linter to find places where for loops could make use of an integer range. # # Enabled @@ -212,6 +209,7 @@ linters: # - asciicheck # checks that all code identifiers does not have non-ASCII symbols in the name # - bidichk # Checks for dangerous unicode character sequences # - bodyclose # checks whether HTTP response body is closed successfully + # - copyloopvar # copyloopvar is a linter detects places where loop variables are copied # - decorder # check declaration order and count of types, constants, variables and functions # - depguard # Go linter that checks if package imports are in a list of acceptable packages # - dupword # checks for duplicate words in the source code @@ -490,11 +488,6 @@ issues: path: "cmd/crowdsec-cli/idgen/password.go" text: "deep-exit: .*" - - linters: - - revive - path: "cmd/crowdsec-cli/utils.go" - text: "deep-exit: .*" - - linters: - revive path: "pkg/leakybucket/overflows.go" diff --git a/pkg/acquisition/acquisition.go b/pkg/acquisition/acquisition.go index a737881dd4d..b2493bbb9b7 100644 --- a/pkg/acquisition/acquisition.go +++ b/pkg/acquisition/acquisition.go @@ -304,7 +304,7 @@ func LoadAcquisitionFromFile(config *csconfig.CrowdsecServiceCfg, prom *csconfig func GetMetrics(sources []DataSource, aggregated bool) error { var metrics []prometheus.Collector - for i := range len(sources) { + for i := range sources { if aggregated { metrics = sources[i].GetMetrics() } else { @@ -378,7 +378,7 @@ func StartAcquisition(sources []DataSource, output chan types.Event, AcquisTomb return nil } - for i := range len(sources) { + for i := range sources { subsrc := sources[i] // ensure its a copy log.Debugf("starting one source %d/%d ->> %T", i, len(sources), subsrc) diff --git a/pkg/acquisition/modules/docker/utils.go b/pkg/acquisition/modules/docker/utils.go index c724f581194..6a0d494097f 100644 --- a/pkg/acquisition/modules/docker/utils.go +++ b/pkg/acquisition/modules/docker/utils.go @@ -22,7 +22,7 @@ func parseKeyToMap(m map[string]interface{}, key string, value string) { return } - for i := range len(parts) { + for i := range parts { if parts[i] == "" { return } diff --git a/pkg/apiclient/decisions_service.go b/pkg/apiclient/decisions_service.go index 388a870f999..98f26cad9ae 100644 --- a/pkg/apiclient/decisions_service.go +++ b/pkg/apiclient/decisions_service.go @@ -144,7 +144,7 @@ func (s *DecisionsService) FetchV3Decisions(ctx context.Context, url string) (*m partialDecisions := make([]*models.Decision, len(decisionsGroup.Decisions)) for idx, decision := range decisionsGroup.Decisions { - decision := decision // fix exportloopref linter message + decision := decision //nolint:copyloopvar // fix exportloopref linter message partialDecisions[idx] = &models.Decision{ Scenario: &scenarioDeleted, Scope: decisionsGroup.Scope, diff --git a/pkg/apiserver/apic_test.go b/pkg/apiserver/apic_test.go index 51887006ad4..058e25079e0 100644 --- a/pkg/apiserver/apic_test.go +++ b/pkg/apiserver/apic_test.go @@ -1091,7 +1091,6 @@ func TestAPICPush(t *testing.T) { } for _, tc := range tests { - tc := tc t.Run(tc.name, func(t *testing.T) { api := getAPIC(t) api.pushInterval = time.Millisecond @@ -1114,8 +1113,10 @@ func TestAPICPush(t *testing.T) { httpmock.RegisterResponder("POST", "http://api.crowdsec.net/api/signals", httpmock.NewBytesResponder(200, []byte{})) + // capture the alerts to avoid datarace + alerts := tc.alerts go func() { - api.AlertsAddChan <- tc.alerts + api.AlertsAddChan <- alerts time.Sleep(time.Second) api.Shutdown() diff --git a/pkg/csplugin/broker_win_test.go b/pkg/csplugin/broker_win_test.go index 97a3ad33deb..b7956bdcc0a 100644 --- a/pkg/csplugin/broker_win_test.go +++ b/pkg/csplugin/broker_win_test.go @@ -54,7 +54,6 @@ func (s *PluginSuite) TestBrokerInit() { } for _, tc := range tests { - tc := tc s.Run(tc.name, func() { t := s.T() if tc.action != nil { diff --git a/pkg/csplugin/utils_windows.go b/pkg/csplugin/utils_windows.go index 8d4956ceeeb..91002079398 100644 --- a/pkg/csplugin/utils_windows.go +++ b/pkg/csplugin/utils_windows.go @@ -116,7 +116,7 @@ func CheckPerms(path string) error { */ aceCount := rs.Field(3).Uint() - for i := uint64(0); i < aceCount; i++ { + for i := range aceCount { ace := &AccessAllowedAce{} ret, _, _ := procGetAce.Call(uintptr(unsafe.Pointer(dacl)), uintptr(i), uintptr(unsafe.Pointer(&ace))) if ret == 0 { diff --git a/pkg/csplugin/utils_windows_test.go b/pkg/csplugin/utils_windows_test.go index 6a76e1215e5..1eb4dfb9033 100644 --- a/pkg/csplugin/utils_windows_test.go +++ b/pkg/csplugin/utils_windows_test.go @@ -37,7 +37,6 @@ func TestGetPluginNameAndTypeFromPath(t *testing.T) { }, } for _, tc := range tests { - tc := tc t.Run(tc.name, func(t *testing.T) { got, got1, err := getPluginTypeAndSubtypeFromPath(tc.path) cstest.RequireErrorContains(t, err, tc.expectedErr) diff --git a/pkg/cwhub/sync.go b/pkg/cwhub/sync.go index 7ed14086adf..c82822e64ef 100644 --- a/pkg/cwhub/sync.go +++ b/pkg/cwhub/sync.go @@ -24,7 +24,7 @@ func isYAMLFileName(path string) bool { // returns error if the symlink is dangling or too many symlinks are followed func resolveSymlink(path string) (string, error) { const maxSymlinks = 10 // Prevent infinite loops - for i := 0; i < maxSymlinks; i++ { + for range maxSymlinks { fi, err := os.Lstat(path) if err != nil { return "", err // dangling link diff --git a/pkg/leakybucket/manager_run.go b/pkg/leakybucket/manager_run.go index 053f9be05da..2858d8b5635 100644 --- a/pkg/leakybucket/manager_run.go +++ b/pkg/leakybucket/manager_run.go @@ -298,7 +298,7 @@ func PourItemToHolders(parsed types.Event, holders []BucketFactory, buckets *Buc BucketPourCache["OK"] = append(BucketPourCache["OK"], evt.(types.Event)) } //find the relevant holders (scenarios) - for idx := range len(holders) { + for idx := range holders { //for idx, holder := range holders { //evaluate bucket's condition diff --git a/pkg/setup/detect_test.go b/pkg/setup/detect_test.go index 6f61b5dac78..588e74dab54 100644 --- a/pkg/setup/detect_test.go +++ b/pkg/setup/detect_test.go @@ -184,7 +184,6 @@ func TestNormalizeVersion(t *testing.T) { } for _, tc := range tests { - tc := tc t.Run(tc.version, func(t *testing.T) { t.Parallel() actual := setup.NormalizeVersion(tc.version) From b93b240bd4a7f094f1143db19553dc442028cb79 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Tue, 17 Sep 2024 17:07:30 +0200 Subject: [PATCH 297/581] lint: enable (some) gocritic checks (#3238) * lint: gocritic configuration * lint: octal literals * lint: gocritic (filepath.Join) * ling: gocritic (badRegexp, regexpSimplify) * lint: gocritic (preferStringWriter) * lint: gocritic (emtpyStringTest) * filepath.Clean(a+b) -> filepath.Join(a, b) * gofumpt --- .golangci.yml | 47 ++++++++++++------- cmd/crowdsec-cli/clialert/alerts.go | 2 +- cmd/crowdsec-cli/clibouncer/bouncers.go | 4 +- cmd/crowdsec-cli/clihub/utils_table.go | 8 ++-- cmd/crowdsec-cli/climachine/machines.go | 6 +-- cmd/crowdsec-cli/climetrics/list.go | 2 +- cmd/crowdsec-cli/climetrics/statacquis.go | 4 +- cmd/crowdsec-cli/climetrics/statalert.go | 4 +- .../climetrics/statappsecengine.go | 4 +- cmd/crowdsec-cli/climetrics/statappsecrule.go | 2 +- cmd/crowdsec-cli/climetrics/statbucket.go | 4 +- cmd/crowdsec-cli/climetrics/statdecision.go | 4 +- cmd/crowdsec-cli/climetrics/statlapi.go | 4 +- .../climetrics/statlapibouncer.go | 4 +- .../climetrics/statlapidecision.go | 4 +- .../climetrics/statlapimachine.go | 4 +- cmd/crowdsec-cli/climetrics/statparser.go | 4 +- cmd/crowdsec-cli/climetrics/statstash.go | 4 +- cmd/crowdsec-cli/climetrics/statwhitelist.go | 4 +- cmd/crowdsec-cli/idgen/password.go | 2 +- cmd/crowdsec/pour.go | 2 +- .../modules/cloudwatch/cloudwatch.go | 21 ++++----- pkg/acquisition/modules/docker/docker.go | 4 +- pkg/acquisition/modules/file/file.go | 6 +-- .../modules/journalctl/journalctl.go | 7 +-- pkg/acquisition/modules/s3/s3.go | 15 +++--- pkg/apiclient/resperr.go | 4 +- pkg/apiserver/controllers/v1/decisions.go | 23 +++++---- pkg/csconfig/config_paths.go | 10 ++-- pkg/csconfig/crowdsec_service.go | 4 +- pkg/csconfig/simulation.go | 2 +- pkg/csplugin/listfiles_test.go | 2 +- pkg/csplugin/utils.go | 4 +- pkg/cwhub/cwhub_test.go | 2 +- pkg/database/utils.go | 5 +- pkg/dumps/parser_dump.go | 6 +-- pkg/exprhelpers/debugger.go | 5 +- pkg/exprhelpers/helpers.go | 5 +- pkg/fflag/features.go | 2 +- pkg/hubtest/hubtest.go | 4 +- pkg/hubtest/nucleirunner.go | 6 +-- pkg/hubtest/regexp.go | 4 +- pkg/hubtest/utils.go | 2 +- pkg/longpollclient/client.go | 6 +-- pkg/setup/units.go | 2 +- 45 files changed, 143 insertions(+), 131 deletions(-) diff --git a/.golangci.yml b/.golangci.yml index 1787f0487a4..4918fb99fe3 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -178,6 +178,37 @@ linters-settings: # Allow blocks to end with comments allow-trailing-comment: true + gocritic: + enable-all: true + disabled-checks: + - typeDefFirst + - paramTypeCombine + - httpNoBody + - ifElseChain + - importShadow + - hugeParam + - rangeValCopy + - commentedOutCode + - commentedOutImport + - unnamedResult + - sloppyReassign + - appendCombine + - captLocal + - typeUnparen + - commentFormatting + - deferInLoop # + - sprintfQuotedString # + - whyNoLint + - equalFold # + - unnecessaryBlock # + - ptrToRefParam # + - stringXbytes # + - appendAssign # + - tooManyResultsChecker + - unnecessaryDefer + - docStub + - preferFprint + linters: enable-all: true disable: @@ -363,22 +394,6 @@ issues: - errcheck text: "Error return value of `.*` is not checked" - - linters: - - gocritic - text: "ifElseChain: rewrite if-else to switch statement" - - - linters: - - gocritic - text: "captLocal: `.*' should not be capitalized" - - - linters: - - gocritic - text: "appendAssign: append result not assigned to the same slice" - - - linters: - - gocritic - text: "commentFormatting: put a space between `//` and comment text" - # Will fix, trivial - just beware of merge conflicts - linters: diff --git a/cmd/crowdsec-cli/clialert/alerts.go b/cmd/crowdsec-cli/clialert/alerts.go index 006d7ac7d8c..0965e1e13d0 100644 --- a/cmd/crowdsec-cli/clialert/alerts.go +++ b/cmd/crowdsec-cli/clialert/alerts.go @@ -44,7 +44,7 @@ func decisionsFromAlert(alert *models.Alert) string { } for _, key := range maptools.SortedKeys(decMap) { - if len(ret) > 0 { + if ret != "" { ret += " " } diff --git a/cmd/crowdsec-cli/clibouncer/bouncers.go b/cmd/crowdsec-cli/clibouncer/bouncers.go index 25c80d16404..0c0fc8851c9 100644 --- a/cmd/crowdsec-cli/clibouncer/bouncers.go +++ b/cmd/crowdsec-cli/clibouncer/bouncers.go @@ -97,7 +97,7 @@ func (cli *cliBouncers) listHuman(out io.Writer, bouncers ent.Bouncers) { t.AppendRow(table.Row{b.Name, b.IPAddress, revoked, lastPull, b.Type, b.Version, b.AuthType}) } - io.WriteString(out, t.Render() + "\n") + io.WriteString(out, t.Render()+"\n") } // bouncerInfo contains only the data we want for inspect/list @@ -443,7 +443,7 @@ func (cli *cliBouncers) inspectHuman(out io.Writer, bouncer *ent.Bouncer) { t.AppendRow(table.Row{"Feature Flags", ff}) } - io.WriteString(out, t.Render() + "\n") + io.WriteString(out, t.Render()+"\n") } func (cli *cliBouncers) inspect(bouncer *ent.Bouncer) error { diff --git a/cmd/crowdsec-cli/clihub/utils_table.go b/cmd/crowdsec-cli/clihub/utils_table.go index 018071d91bb..98f14341b10 100644 --- a/cmd/crowdsec-cli/clihub/utils_table.go +++ b/cmd/crowdsec-cli/clihub/utils_table.go @@ -22,7 +22,7 @@ func listHubItemTable(out io.Writer, wantColor string, title string, items []*cw } io.WriteString(out, title+"\n") - io.WriteString(out, t.Render() + "\n") + io.WriteString(out, t.Render()+"\n") } func appsecMetricsTable(out io.Writer, wantColor string, itemName string, metrics map[string]int) { @@ -35,7 +35,7 @@ func appsecMetricsTable(out io.Writer, wantColor string, itemName string, metric }) io.WriteString(out, fmt.Sprintf("\n - (AppSec Rule) %s:\n", itemName)) - io.WriteString(out, t.Render() + "\n") + io.WriteString(out, t.Render()+"\n") } func scenarioMetricsTable(out io.Writer, wantColor string, itemName string, metrics map[string]int) { @@ -55,7 +55,7 @@ func scenarioMetricsTable(out io.Writer, wantColor string, itemName string, metr }) io.WriteString(out, fmt.Sprintf("\n - (Scenario) %s:\n", itemName)) - io.WriteString(out, t.Render() + "\n") + io.WriteString(out, t.Render()+"\n") } func parserMetricsTable(out io.Writer, wantColor string, itemName string, metrics map[string]map[string]int) { @@ -80,6 +80,6 @@ func parserMetricsTable(out io.Writer, wantColor string, itemName string, metric if showTable { io.WriteString(out, fmt.Sprintf("\n - (Parser) %s:\n", itemName)) - io.WriteString(out, t.Render() + "\n") + io.WriteString(out, t.Render()+"\n") } } diff --git a/cmd/crowdsec-cli/climachine/machines.go b/cmd/crowdsec-cli/climachine/machines.go index 3df176d786d..30948f43056 100644 --- a/cmd/crowdsec-cli/climachine/machines.go +++ b/cmd/crowdsec-cli/climachine/machines.go @@ -123,7 +123,7 @@ func (cli *cliMachines) inspectHubHuman(out io.Writer, machine *ent.Machine) { t.AppendHeader(table.Row{"Name", "Status", "Version"}) t.SetTitle(itemType) t.AppendRows(rows) - io.WriteString(out, t.Render() + "\n") + io.WriteString(out, t.Render()+"\n") } } @@ -145,7 +145,7 @@ func (cli *cliMachines) listHuman(out io.Writer, machines ent.Machines) { t.AppendRow(table.Row{m.MachineId, m.IpAddress, m.UpdatedAt.Format(time.RFC3339), validated, m.Version, clientinfo.GetOSNameAndVersion(m), m.AuthType, hb}) } - io.WriteString(out, t.Render() + "\n") + io.WriteString(out, t.Render()+"\n") } // machineInfo contains only the data we want for inspect/list: no hub status, scenarios, edges, etc. @@ -611,7 +611,7 @@ func (cli *cliMachines) inspectHuman(out io.Writer, machine *ent.Machine) { t.AppendRow(table.Row{"Collections", coll.Name}) } - io.WriteString(out, t.Render() + "\n") + io.WriteString(out, t.Render()+"\n") } func (cli *cliMachines) inspect(machine *ent.Machine) error { diff --git a/cmd/crowdsec-cli/climetrics/list.go b/cmd/crowdsec-cli/climetrics/list.go index ba827634052..ddb2baac14d 100644 --- a/cmd/crowdsec-cli/climetrics/list.go +++ b/cmd/crowdsec-cli/climetrics/list.go @@ -64,7 +64,7 @@ func (cli *cliMetrics) list() error { t.AppendRow(table.Row{metric.Type, metric.Title, metric.Description}) } - io.WriteString(out, t.Render() + "\n") + io.WriteString(out, t.Render()+"\n") case "json": x, err := json.MarshalIndent(allMetrics, "", " ") if err != nil { diff --git a/cmd/crowdsec-cli/climetrics/statacquis.go b/cmd/crowdsec-cli/climetrics/statacquis.go index 827dcf036c3..0af2e796f40 100644 --- a/cmd/crowdsec-cli/climetrics/statacquis.go +++ b/cmd/crowdsec-cli/climetrics/statacquis.go @@ -37,8 +37,8 @@ func (s statAcquis) Table(out io.Writer, wantColor string, noUnit bool, showEmpt log.Warningf("while collecting acquis stats: %s", err) } else if numRows > 0 || showEmpty { title, _ := s.Description() - io.WriteString(out, title + ":\n") - io.WriteString(out, t.Render() + "\n") + io.WriteString(out, title+":\n") + io.WriteString(out, t.Render()+"\n") io.WriteString(out, "\n") } } diff --git a/cmd/crowdsec-cli/climetrics/statalert.go b/cmd/crowdsec-cli/climetrics/statalert.go index e48dd6c924f..942eceaa75c 100644 --- a/cmd/crowdsec-cli/climetrics/statalert.go +++ b/cmd/crowdsec-cli/climetrics/statalert.go @@ -38,8 +38,8 @@ func (s statAlert) Table(out io.Writer, wantColor string, noUnit bool, showEmpty if numRows > 0 || showEmpty { title, _ := s.Description() - io.WriteString(out, title + ":\n") - io.WriteString(out, t.Render() + "\n") + io.WriteString(out, title+":\n") + io.WriteString(out, t.Render()+"\n") io.WriteString(out, "\n") } } diff --git a/cmd/crowdsec-cli/climetrics/statappsecengine.go b/cmd/crowdsec-cli/climetrics/statappsecengine.go index 4a249e11687..d924375247f 100644 --- a/cmd/crowdsec-cli/climetrics/statappsecengine.go +++ b/cmd/crowdsec-cli/climetrics/statappsecengine.go @@ -34,8 +34,8 @@ func (s statAppsecEngine) Table(out io.Writer, wantColor string, noUnit bool, sh log.Warningf("while collecting appsec stats: %s", err) } else if numRows > 0 || showEmpty { title, _ := s.Description() - io.WriteString(out, title + ":\n") - io.WriteString(out, t.Render() + "\n") + io.WriteString(out, title+":\n") + io.WriteString(out, t.Render()+"\n") io.WriteString(out, "\n") } } diff --git a/cmd/crowdsec-cli/climetrics/statappsecrule.go b/cmd/crowdsec-cli/climetrics/statappsecrule.go index 2f859d70cfb..e06a7c2e2b3 100644 --- a/cmd/crowdsec-cli/climetrics/statappsecrule.go +++ b/cmd/crowdsec-cli/climetrics/statappsecrule.go @@ -41,7 +41,7 @@ func (s statAppsecRule) Table(out io.Writer, wantColor string, noUnit bool, show log.Warningf("while collecting appsec rules stats: %s", err) } else if numRows > 0 || showEmpty { io.WriteString(out, fmt.Sprintf("Appsec '%s' Rules Metrics:\n", appsecEngine)) - io.WriteString(out, t.Render() + "\n") + io.WriteString(out, t.Render()+"\n") io.WriteString(out, "\n") } } diff --git a/cmd/crowdsec-cli/climetrics/statbucket.go b/cmd/crowdsec-cli/climetrics/statbucket.go index 507d9f3a476..1882fe21df1 100644 --- a/cmd/crowdsec-cli/climetrics/statbucket.go +++ b/cmd/crowdsec-cli/climetrics/statbucket.go @@ -35,8 +35,8 @@ func (s statBucket) Table(out io.Writer, wantColor string, noUnit bool, showEmpt log.Warningf("while collecting scenario stats: %s", err) } else if numRows > 0 || showEmpty { title, _ := s.Description() - io.WriteString(out, title + ":\n") - io.WriteString(out, t.Render() + "\n") + io.WriteString(out, title+":\n") + io.WriteString(out, t.Render()+"\n") io.WriteString(out, "\n") } } diff --git a/cmd/crowdsec-cli/climetrics/statdecision.go b/cmd/crowdsec-cli/climetrics/statdecision.go index 145665cfba2..b862f49ff12 100644 --- a/cmd/crowdsec-cli/climetrics/statdecision.go +++ b/cmd/crowdsec-cli/climetrics/statdecision.go @@ -53,8 +53,8 @@ func (s statDecision) Table(out io.Writer, wantColor string, noUnit bool, showEm if numRows > 0 || showEmpty { title, _ := s.Description() - io.WriteString(out, title + ":\n") - io.WriteString(out, t.Render() + "\n") + io.WriteString(out, title+":\n") + io.WriteString(out, t.Render()+"\n") io.WriteString(out, "\n") } } diff --git a/cmd/crowdsec-cli/climetrics/statlapi.go b/cmd/crowdsec-cli/climetrics/statlapi.go index 45b384708bf..9559eacf0f4 100644 --- a/cmd/crowdsec-cli/climetrics/statlapi.go +++ b/cmd/crowdsec-cli/climetrics/statlapi.go @@ -49,8 +49,8 @@ func (s statLapi) Table(out io.Writer, wantColor string, noUnit bool, showEmpty if numRows > 0 || showEmpty { title, _ := s.Description() - io.WriteString(out, title + ":\n") - io.WriteString(out, t.Render() + "\n") + io.WriteString(out, title+":\n") + io.WriteString(out, t.Render()+"\n") io.WriteString(out, "\n") } } diff --git a/cmd/crowdsec-cli/climetrics/statlapibouncer.go b/cmd/crowdsec-cli/climetrics/statlapibouncer.go index 828ccb33413..5e5f63a79d3 100644 --- a/cmd/crowdsec-cli/climetrics/statlapibouncer.go +++ b/cmd/crowdsec-cli/climetrics/statlapibouncer.go @@ -35,8 +35,8 @@ func (s statLapiBouncer) Table(out io.Writer, wantColor string, noUnit bool, sho if numRows > 0 || showEmpty { title, _ := s.Description() - io.WriteString(out, title + ":\n") - io.WriteString(out, t.Render() + "\n") + io.WriteString(out, title+":\n") + io.WriteString(out, t.Render()+"\n") io.WriteString(out, "\n") } } diff --git a/cmd/crowdsec-cli/climetrics/statlapidecision.go b/cmd/crowdsec-cli/climetrics/statlapidecision.go index ffc999555c1..44f0e8f4b87 100644 --- a/cmd/crowdsec-cli/climetrics/statlapidecision.go +++ b/cmd/crowdsec-cli/climetrics/statlapidecision.go @@ -57,8 +57,8 @@ func (s statLapiDecision) Table(out io.Writer, wantColor string, noUnit bool, sh if numRows > 0 || showEmpty { title, _ := s.Description() - io.WriteString(out, title + ":\n") - io.WriteString(out, t.Render() + "\n") + io.WriteString(out, title+":\n") + io.WriteString(out, t.Render()+"\n") io.WriteString(out, "\n") } } diff --git a/cmd/crowdsec-cli/climetrics/statlapimachine.go b/cmd/crowdsec-cli/climetrics/statlapimachine.go index 09abe2dd44b..0e6693bea82 100644 --- a/cmd/crowdsec-cli/climetrics/statlapimachine.go +++ b/cmd/crowdsec-cli/climetrics/statlapimachine.go @@ -35,8 +35,8 @@ func (s statLapiMachine) Table(out io.Writer, wantColor string, noUnit bool, sho if numRows > 0 || showEmpty { title, _ := s.Description() - io.WriteString(out, title + ":\n") - io.WriteString(out, t.Render() + "\n") + io.WriteString(out, title+":\n") + io.WriteString(out, t.Render()+"\n") io.WriteString(out, "\n") } } diff --git a/cmd/crowdsec-cli/climetrics/statparser.go b/cmd/crowdsec-cli/climetrics/statparser.go index 0b3512052b9..520e68f9adf 100644 --- a/cmd/crowdsec-cli/climetrics/statparser.go +++ b/cmd/crowdsec-cli/climetrics/statparser.go @@ -36,8 +36,8 @@ func (s statParser) Table(out io.Writer, wantColor string, noUnit bool, showEmpt log.Warningf("while collecting parsers stats: %s", err) } else if numRows > 0 || showEmpty { title, _ := s.Description() - io.WriteString(out, title + ":\n") - io.WriteString(out, t.Render() + "\n") + io.WriteString(out, title+":\n") + io.WriteString(out, t.Render()+"\n") io.WriteString(out, "\n") } } diff --git a/cmd/crowdsec-cli/climetrics/statstash.go b/cmd/crowdsec-cli/climetrics/statstash.go index 5938ac05fc8..2729de931a1 100644 --- a/cmd/crowdsec-cli/climetrics/statstash.go +++ b/cmd/crowdsec-cli/climetrics/statstash.go @@ -52,8 +52,8 @@ func (s statStash) Table(out io.Writer, wantColor string, noUnit bool, showEmpty if numRows > 0 || showEmpty { title, _ := s.Description() - io.WriteString(out, title + ":\n") - io.WriteString(out, t.Render() + "\n") + io.WriteString(out, title+":\n") + io.WriteString(out, t.Render()+"\n") io.WriteString(out, "\n") } } diff --git a/cmd/crowdsec-cli/climetrics/statwhitelist.go b/cmd/crowdsec-cli/climetrics/statwhitelist.go index ccb7e52153b..7f533b45b4b 100644 --- a/cmd/crowdsec-cli/climetrics/statwhitelist.go +++ b/cmd/crowdsec-cli/climetrics/statwhitelist.go @@ -36,8 +36,8 @@ func (s statWhitelist) Table(out io.Writer, wantColor string, noUnit bool, showE log.Warningf("while collecting parsers stats: %s", err) } else if numRows > 0 || showEmpty { title, _ := s.Description() - io.WriteString(out, title + ":\n") - io.WriteString(out, t.Render() + "\n") + io.WriteString(out, title+":\n") + io.WriteString(out, t.Render()+"\n") io.WriteString(out, "\n") } } diff --git a/cmd/crowdsec-cli/idgen/password.go b/cmd/crowdsec-cli/idgen/password.go index cd798fdcc48..e0faa4daacc 100644 --- a/cmd/crowdsec-cli/idgen/password.go +++ b/cmd/crowdsec-cli/idgen/password.go @@ -1,8 +1,8 @@ package idgen import ( - "math/big" saferand "crypto/rand" + "math/big" log "github.com/sirupsen/logrus" ) diff --git a/cmd/crowdsec/pour.go b/cmd/crowdsec/pour.go index 1382a909ab3..2fc7d7e42c9 100644 --- a/cmd/crowdsec/pour.go +++ b/cmd/crowdsec/pour.go @@ -59,7 +59,7 @@ func runPour(input chan types.Event, holders []leaky.BucketFactory, buckets *lea globalBucketPourKo.Inc() } - if len(parsed.MarshaledTime) != 0 { + if parsed.MarshaledTime != "" { if err := lastProcessedItem.UnmarshalText([]byte(parsed.MarshaledTime)); err != nil { log.Warningf("failed to parse time from event : %s", err) } diff --git a/pkg/acquisition/modules/cloudwatch/cloudwatch.go b/pkg/acquisition/modules/cloudwatch/cloudwatch.go index 1a78ae6fa7a..d6f33b68050 100644 --- a/pkg/acquisition/modules/cloudwatch/cloudwatch.go +++ b/pkg/acquisition/modules/cloudwatch/cloudwatch.go @@ -57,16 +57,16 @@ type CloudwatchSource struct { // CloudwatchSourceConfiguration allows user to define one or more streams to monitor within a cloudwatch log group type CloudwatchSourceConfiguration struct { configuration.DataSourceCommonCfg `yaml:",inline"` - GroupName string `yaml:"group_name"` //the group name to be monitored - StreamRegexp *string `yaml:"stream_regexp,omitempty"` //allow to filter specific streams + GroupName string `yaml:"group_name"` // the group name to be monitored + StreamRegexp *string `yaml:"stream_regexp,omitempty"` // allow to filter specific streams StreamName *string `yaml:"stream_name,omitempty"` StartTime, EndTime *time.Time `yaml:"-"` - DescribeLogStreamsLimit *int64 `yaml:"describelogstreams_limit,omitempty"` //batch size for DescribeLogStreamsPagesWithContext + DescribeLogStreamsLimit *int64 `yaml:"describelogstreams_limit,omitempty"` // batch size for DescribeLogStreamsPagesWithContext GetLogEventsPagesLimit *int64 `yaml:"getlogeventspages_limit,omitempty"` - PollNewStreamInterval *time.Duration `yaml:"poll_new_stream_interval,omitempty"` //frequency at which we poll for new streams within the log group - MaxStreamAge *time.Duration `yaml:"max_stream_age,omitempty"` //monitor only streams that have been updated within $duration - PollStreamInterval *time.Duration `yaml:"poll_stream_interval,omitempty"` //frequency at which we poll each stream - StreamReadTimeout *time.Duration `yaml:"stream_read_timeout,omitempty"` //stop monitoring streams that haven't been updated within $duration, might be reopened later tho + PollNewStreamInterval *time.Duration `yaml:"poll_new_stream_interval,omitempty"` // frequency at which we poll for new streams within the log group + MaxStreamAge *time.Duration `yaml:"max_stream_age,omitempty"` // monitor only streams that have been updated within $duration + PollStreamInterval *time.Duration `yaml:"poll_stream_interval,omitempty"` // frequency at which we poll each stream + StreamReadTimeout *time.Duration `yaml:"stream_read_timeout,omitempty"` // stop monitoring streams that haven't been updated within $duration, might be reopened later tho AwsApiCallTimeout *time.Duration `yaml:"aws_api_timeout,omitempty"` AwsProfile *string `yaml:"aws_profile,omitempty"` PrependCloudwatchTimestamp *bool `yaml:"prepend_cloudwatch_timestamp,omitempty"` @@ -86,7 +86,7 @@ type LogStreamTailConfig struct { logger *log.Entry ExpectMode int t tomb.Tomb - StartTime, EndTime time.Time //only used for CatMode + StartTime, EndTime time.Time // only used for CatMode } var ( @@ -111,7 +111,7 @@ func (cw *CloudwatchSource) UnmarshalConfig(yamlConfig []byte) error { return fmt.Errorf("cannot parse CloudwatchSource configuration: %w", err) } - if len(cw.Config.GroupName) == 0 { + if cw.Config.GroupName == "" { return errors.New("group_name is mandatory for CloudwatchSource") } @@ -357,7 +357,6 @@ func (cw *CloudwatchSource) WatchLogGroupForStreams(out chan LogStreamTailConfig // LogStreamManager receives the potential streams to monitor, and starts a go routine when needed func (cw *CloudwatchSource) LogStreamManager(in chan LogStreamTailConfig, outChan chan types.Event) error { - cw.logger.Debugf("starting to monitor streams for %s", cw.Config.GroupName) pollDeadStreamInterval := time.NewTicker(def_PollDeadStreamInterval) @@ -638,7 +637,7 @@ func (cw *CloudwatchSource) OneShotAcquisition(out chan types.Event, t *tomb.Tom func (cw *CloudwatchSource) CatLogStream(cfg *LogStreamTailConfig, outChan chan types.Event) error { var startFrom *string - var head = true + head := true /*convert the times*/ startTime := cfg.StartTime.UTC().Unix() * 1000 endTime := cfg.EndTime.UTC().Unix() * 1000 diff --git a/pkg/acquisition/modules/docker/docker.go b/pkg/acquisition/modules/docker/docker.go index 9a6e13feee4..44fee0a99a2 100644 --- a/pkg/acquisition/modules/docker/docker.go +++ b/pkg/acquisition/modules/docker/docker.go @@ -397,14 +397,13 @@ func (d *DockerSource) EvalContainer(container dockerTypes.Container) *Container for _, containerName := range d.Config.ContainerName { for _, name := range container.Names { - if strings.HasPrefix(name, "/") && len(name) > 0 { + if strings.HasPrefix(name, "/") && name != "" { name = name[1:] } if name == containerName { return &ContainerConfig{ID: container.ID, Name: name, Labels: d.Config.Labels, Tty: d.getContainerTTY(container.ID)} } } - } for _, cont := range d.compiledContainerID { @@ -419,7 +418,6 @@ func (d *DockerSource) EvalContainer(container dockerTypes.Container) *Container return &ContainerConfig{ID: container.ID, Name: name, Labels: d.Config.Labels, Tty: d.getContainerTTY(container.ID)} } } - } if d.Config.UseContainerLabels { diff --git a/pkg/acquisition/modules/file/file.go b/pkg/acquisition/modules/file/file.go index 4f7880baa89..85b4c1b5b32 100644 --- a/pkg/acquisition/modules/file/file.go +++ b/pkg/acquisition/modules/file/file.go @@ -73,7 +73,7 @@ func (f *FileSource) UnmarshalConfig(yamlConfig []byte) error { f.logger.Tracef("FileAcquisition configuration: %+v", f.config) } - if len(f.config.Filename) != 0 { + if f.config.Filename != "" { f.config.Filenames = append(f.config.Filenames, f.config.Filename) } @@ -202,11 +202,11 @@ func (f *FileSource) ConfigureByDSN(dsn string, labels map[string]string, logger args := strings.Split(dsn, "?") - if len(args[0]) == 0 { + if args[0] == "" { return errors.New("empty file:// DSN") } - if len(args) == 2 && len(args[1]) != 0 { + if len(args) == 2 && args[1] != "" { params, err := url.ParseQuery(args[1]) if err != nil { return fmt.Errorf("could not parse file args: %w", err) diff --git a/pkg/acquisition/modules/journalctl/journalctl.go b/pkg/acquisition/modules/journalctl/journalctl.go index 762dfe9ba12..1336fac4578 100644 --- a/pkg/acquisition/modules/journalctl/journalctl.go +++ b/pkg/acquisition/modules/journalctl/journalctl.go @@ -223,7 +223,7 @@ func (j *JournalCtlSource) ConfigureByDSN(dsn string, labels map[string]string, } qs := strings.TrimPrefix(dsn, "journalctl://") - if len(qs) == 0 { + if qs == "" { return errors.New("empty journalctl:// DSN") } @@ -267,7 +267,6 @@ func (j *JournalCtlSource) OneShotAcquisition(out chan types.Event, t *tomb.Tomb err := j.runJournalCtl(out, t) j.logger.Debug("Oneshot journalctl acquisition is done") return err - } func (j *JournalCtlSource) StreamingAcquisition(out chan types.Event, t *tomb.Tomb) error { @@ -277,11 +276,13 @@ func (j *JournalCtlSource) StreamingAcquisition(out chan types.Event, t *tomb.To }) return nil } + func (j *JournalCtlSource) CanRun() error { - //TODO: add a more precise check on version or something ? + // TODO: add a more precise check on version or something ? _, err := exec.LookPath(journalctlCmd) return err } + func (j *JournalCtlSource) Dump() interface{} { return j } diff --git a/pkg/acquisition/modules/s3/s3.go b/pkg/acquisition/modules/s3/s3.go index 9ef4d2ba757..a9835ab4974 100644 --- a/pkg/acquisition/modules/s3/s3.go +++ b/pkg/acquisition/modules/s3/s3.go @@ -93,10 +93,12 @@ type S3Event struct { } `json:"detail"` } -const PollMethodList = "list" -const PollMethodSQS = "sqs" -const SQSFormatEventBridge = "eventbridge" -const SQSFormatS3Notification = "s3notification" +const ( + PollMethodList = "list" + PollMethodSQS = "sqs" + SQSFormatEventBridge = "eventbridge" + SQSFormatS3Notification = "s3notification" +) var linesRead = prometheus.NewCounterVec( prometheus.CounterOpts{ @@ -467,6 +469,7 @@ func (s *S3Source) GetUuid() string { func (s *S3Source) GetMetrics() []prometheus.Collector { return []prometheus.Collector{linesRead, objectsRead, sqsMessagesReceived} } + func (s *S3Source) GetAggregMetrics() []prometheus.Collector { return []prometheus.Collector{linesRead, objectsRead, sqsMessagesReceived} } @@ -567,11 +570,11 @@ func (s *S3Source) ConfigureByDSN(dsn string, labels map[string]string, logger * }) dsn = strings.TrimPrefix(dsn, "s3://") args := strings.Split(dsn, "?") - if len(args[0]) == 0 { + if args[0] == "" { return errors.New("empty s3:// DSN") } - if len(args) == 2 && len(args[1]) != 0 { + if len(args) == 2 && args[1] != "" { params, err := url.ParseQuery(args[1]) if err != nil { return fmt.Errorf("could not parse s3 args: %w", err) diff --git a/pkg/apiclient/resperr.go b/pkg/apiclient/resperr.go index 00689147332..1b0786f9882 100644 --- a/pkg/apiclient/resperr.go +++ b/pkg/apiclient/resperr.go @@ -19,7 +19,7 @@ func (e *ErrorResponse) Error() string { message := ptr.OrEmpty(e.Message) errors := "" - if len(e.Errors) > 0 { + if e.Errors != "" { errors = fmt.Sprintf(" (%s)", e.Errors) } @@ -51,7 +51,7 @@ func CheckResponse(r *http.Response) error { // try to unmarshal and if there are no 'message' or 'errors' fields, display the body as is, // the API is following a different convention err := json.Unmarshal(data, ret) - if err != nil || (ret.Message == nil && len(ret.Errors) == 0) { + if err != nil || (ret.Message == nil && ret.Errors == "") { ret.Message = ptr.Of(fmt.Sprintf("http code %d, response: %s", r.StatusCode, string(data))) return ret } diff --git a/pkg/apiserver/controllers/v1/decisions.go b/pkg/apiserver/controllers/v1/decisions.go index 3d8e0232224..54e9b0290cc 100644 --- a/pkg/apiserver/controllers/v1/decisions.go +++ b/pkg/apiserver/controllers/v1/decisions.go @@ -160,7 +160,7 @@ func writeStartupDecisions(gctx *gin.Context, filters map[string][]string, dbFun if needComma { //respBuffer.Write([]byte(",")) - gctx.Writer.Write([]byte(",")) + gctx.Writer.WriteString(",") } else { needComma = true } @@ -212,7 +212,7 @@ func writeDeltaDecisions(gctx *gin.Context, filters map[string][]string, lastPul if needComma { //respBuffer.Write([]byte(",")) - gctx.Writer.Write([]byte(",")) + gctx.Writer.WriteString(",") } else { needComma = true } @@ -244,7 +244,7 @@ func (c *Controller) StreamDecisionChunked(gctx *gin.Context, bouncerInfo *ent.B gctx.Writer.Header().Set("Content-Type", "application/json") gctx.Writer.Header().Set("Transfer-Encoding", "chunked") gctx.Writer.WriteHeader(http.StatusOK) - gctx.Writer.Write([]byte(`{"new": [`)) //No need to check for errors, the doc says it always returns nil + gctx.Writer.WriteString(`{"new": [`) //No need to check for errors, the doc says it always returns nil // if the blocker just started, return all decisions if val, ok := gctx.Request.URL.Query()["startup"]; ok && val[0] == "true" { @@ -252,48 +252,47 @@ func (c *Controller) StreamDecisionChunked(gctx *gin.Context, bouncerInfo *ent.B err := writeStartupDecisions(gctx, filters, c.DBClient.QueryAllDecisionsWithFilters) if err != nil { log.Errorf("failed sending new decisions for startup: %v", err) - gctx.Writer.Write([]byte(`], "deleted": []}`)) + gctx.Writer.WriteString(`], "deleted": []}`) gctx.Writer.Flush() return err } - gctx.Writer.Write([]byte(`], "deleted": [`)) + gctx.Writer.WriteString(`], "deleted": [`) //Expired decisions err = writeStartupDecisions(gctx, filters, c.DBClient.QueryExpiredDecisionsWithFilters) if err != nil { log.Errorf("failed sending expired decisions for startup: %v", err) - gctx.Writer.Write([]byte(`]}`)) + gctx.Writer.WriteString(`]}`) gctx.Writer.Flush() return err } - gctx.Writer.Write([]byte(`]}`)) + gctx.Writer.WriteString(`]}`) gctx.Writer.Flush() } else { err = writeDeltaDecisions(gctx, filters, bouncerInfo.LastPull, c.DBClient.QueryNewDecisionsSinceWithFilters) if err != nil { log.Errorf("failed sending new decisions for delta: %v", err) - gctx.Writer.Write([]byte(`], "deleted": []}`)) + gctx.Writer.WriteString(`], "deleted": []}`) gctx.Writer.Flush() return err } - gctx.Writer.Write([]byte(`], "deleted": [`)) + gctx.Writer.WriteString(`], "deleted": [`) err = writeDeltaDecisions(gctx, filters, bouncerInfo.LastPull, c.DBClient.QueryExpiredDecisionsSinceWithFilters) - if err != nil { log.Errorf("failed sending expired decisions for delta: %v", err) - gctx.Writer.Write([]byte(`]}`)) + gctx.Writer.WriteString("]}") gctx.Writer.Flush() return err } - gctx.Writer.Write([]byte(`]}`)) + gctx.Writer.WriteString("]}") gctx.Writer.Flush() } diff --git a/pkg/csconfig/config_paths.go b/pkg/csconfig/config_paths.go index 7675b90d7dd..a8d39a664f3 100644 --- a/pkg/csconfig/config_paths.go +++ b/pkg/csconfig/config_paths.go @@ -10,7 +10,7 @@ type ConfigurationPaths struct { ConfigDir string `yaml:"config_dir"` DataDir string `yaml:"data_dir,omitempty"` SimulationFilePath string `yaml:"simulation_path,omitempty"` - HubIndexFile string `yaml:"index_path,omitempty"` //path of the .index.json + HubIndexFile string `yaml:"index_path,omitempty"` // path of the .index.json HubDir string `yaml:"hub_dir,omitempty"` PluginDir string `yaml:"plugin_dir,omitempty"` NotificationDir string `yaml:"notification_dir,omitempty"` @@ -28,18 +28,18 @@ func (c *Config) loadConfigurationPaths() error { } if c.ConfigPaths.HubDir == "" { - c.ConfigPaths.HubDir = filepath.Clean(c.ConfigPaths.ConfigDir + "/hub") + c.ConfigPaths.HubDir = filepath.Join(c.ConfigPaths.ConfigDir, "hub") } if c.ConfigPaths.HubIndexFile == "" { - c.ConfigPaths.HubIndexFile = filepath.Clean(c.ConfigPaths.HubDir + "/.index.json") + c.ConfigPaths.HubIndexFile = filepath.Join(c.ConfigPaths.HubDir, ".index.json") } if c.ConfigPaths.PatternDir == "" { - c.ConfigPaths.PatternDir = filepath.Join(c.ConfigPaths.ConfigDir, "patterns/") + c.ConfigPaths.PatternDir = filepath.Join(c.ConfigPaths.ConfigDir, "patterns") } - var configPathsCleanup = []*string{ + configPathsCleanup := []*string{ &c.ConfigPaths.HubDir, &c.ConfigPaths.HubIndexFile, &c.ConfigPaths.ConfigDir, diff --git a/pkg/csconfig/crowdsec_service.go b/pkg/csconfig/crowdsec_service.go index 7a611a856ee..cf796805dee 100644 --- a/pkg/csconfig/crowdsec_service.go +++ b/pkg/csconfig/crowdsec_service.go @@ -146,11 +146,11 @@ func (c *CrowdsecServiceCfg) DumpContextConfigFile() error { return fmt.Errorf("while serializing ConsoleConfig (for %s): %w", c.ConsoleContextPath, err) } - if err = os.MkdirAll(filepath.Dir(c.ConsoleContextPath), 0700); err != nil { + if err = os.MkdirAll(filepath.Dir(c.ConsoleContextPath), 0o700); err != nil { return fmt.Errorf("while creating directories for %s: %w", c.ConsoleContextPath, err) } - if err := os.WriteFile(c.ConsoleContextPath, out, 0600); err != nil { + if err := os.WriteFile(c.ConsoleContextPath, out, 0o600); err != nil { return fmt.Errorf("while dumping console config to %s: %w", c.ConsoleContextPath, err) } diff --git a/pkg/csconfig/simulation.go b/pkg/csconfig/simulation.go index afc4ea4f044..c9041df464a 100644 --- a/pkg/csconfig/simulation.go +++ b/pkg/csconfig/simulation.go @@ -37,7 +37,7 @@ func (c *Config) LoadSimulation() error { simCfg := SimulationConfig{} if c.ConfigPaths.SimulationFilePath == "" { - c.ConfigPaths.SimulationFilePath = filepath.Clean(c.ConfigPaths.ConfigDir + "/simulation.yaml") + c.ConfigPaths.SimulationFilePath = filepath.Join(c.ConfigPaths.ConfigDir, "simulation.yaml") } patcher := yamlpatch.NewPatcher(c.ConfigPaths.SimulationFilePath, ".local") diff --git a/pkg/csplugin/listfiles_test.go b/pkg/csplugin/listfiles_test.go index a4188804149..c476d7a4e4a 100644 --- a/pkg/csplugin/listfiles_test.go +++ b/pkg/csplugin/listfiles_test.go @@ -21,7 +21,7 @@ func TestListFilesAtPath(t *testing.T) { require.NoError(t, err) _, err = os.Create(filepath.Join(dir, "slack")) require.NoError(t, err) - err = os.Mkdir(filepath.Join(dir, "somedir"), 0755) + err = os.Mkdir(filepath.Join(dir, "somedir"), 0o755) require.NoError(t, err) _, err = os.Create(filepath.Join(dir, "somedir", "inner")) require.NoError(t, err) diff --git a/pkg/csplugin/utils.go b/pkg/csplugin/utils.go index 2e7f0c80528..571d78add56 100644 --- a/pkg/csplugin/utils.go +++ b/pkg/csplugin/utils.go @@ -123,10 +123,10 @@ func pluginIsValid(path string) error { mode := details.Mode() perm := uint32(mode) - if (perm & 00002) != 0 { + if (perm & 0o0002) != 0 { return fmt.Errorf("plugin at %s is world writable, world writable plugins are invalid", path) } - if (perm & 00020) != 0 { + if (perm & 0o0020) != 0 { return fmt.Errorf("plugin at %s is group writable, group writable plugins are invalid", path) } if (mode & os.ModeSetgid) != 0 { diff --git a/pkg/cwhub/cwhub_test.go b/pkg/cwhub/cwhub_test.go index a4641483622..17e7a0dc723 100644 --- a/pkg/cwhub/cwhub_test.go +++ b/pkg/cwhub/cwhub_test.go @@ -146,7 +146,7 @@ func setResponseByPath() { "/crowdsecurity/master/parsers/s01-parse/crowdsecurity/foobar_parser.yaml": fileToStringX("./testdata/foobar_parser.yaml"), "/crowdsecurity/master/parsers/s01-parse/crowdsecurity/foobar_subparser.yaml": fileToStringX("./testdata/foobar_parser.yaml"), "/crowdsecurity/master/collections/crowdsecurity/test_collection.yaml": fileToStringX("./testdata/collection_v1.yaml"), - "/crowdsecurity/master/.index.json": fileToStringX("./testdata/index1.json"), + "/crowdsecurity/master/.index.json": fileToStringX("./testdata/index1.json"), "/crowdsecurity/master/scenarios/crowdsecurity/foobar_scenario.yaml": `filter: true name: crowdsecurity/foobar_scenario`, "/crowdsecurity/master/scenarios/crowdsecurity/barfoo_scenario.yaml": `filter: true diff --git a/pkg/database/utils.go b/pkg/database/utils.go index f1c06565635..8148df56f24 100644 --- a/pkg/database/utils.go +++ b/pkg/database/utils.go @@ -42,7 +42,8 @@ func LastAddress(n *net.IPNet) net.IP { ip[6] | ^n.Mask[6], ip[7] | ^n.Mask[7], ip[8] | ^n.Mask[8], ip[9] | ^n.Mask[9], ip[10] | ^n.Mask[10], ip[11] | ^n.Mask[11], ip[12] | ^n.Mask[12], ip[13] | ^n.Mask[13], ip[14] | ^n.Mask[14], - ip[15] | ^n.Mask[15]} + ip[15] | ^n.Mask[15], + } } return net.IPv4( @@ -74,7 +75,7 @@ func ParseDuration(d string) (time.Duration, error) { if strings.HasSuffix(d, "d") { days := strings.Split(d, "d")[0] - if len(days) == 0 { + if days == "" { return 0, fmt.Errorf("'%s' can't be parsed as duration", d) } diff --git a/pkg/dumps/parser_dump.go b/pkg/dumps/parser_dump.go index d43f3cdc1b9..bc8f78dc203 100644 --- a/pkg/dumps/parser_dump.go +++ b/pkg/dumps/parser_dump.go @@ -259,7 +259,7 @@ func (t *tree) displayResults(opts DumpOpts) { } if updated > 0 { - if len(changeStr) > 0 { + if changeStr != "" { changeStr += " " } @@ -267,7 +267,7 @@ func (t *tree) displayResults(opts DumpOpts) { } if deleted > 0 { - if len(changeStr) > 0 { + if changeStr != "" { changeStr += " " } @@ -275,7 +275,7 @@ func (t *tree) displayResults(opts DumpOpts) { } if whitelisted { - if len(changeStr) > 0 { + if changeStr != "" { changeStr += " " } diff --git a/pkg/exprhelpers/debugger.go b/pkg/exprhelpers/debugger.go index 711aa491078..2e47af6d1de 100644 --- a/pkg/exprhelpers/debugger.go +++ b/pkg/exprhelpers/debugger.go @@ -53,9 +53,8 @@ type OpOutput struct { } func (o *OpOutput) String() string { - ret := fmt.Sprintf("%*c", o.CodeDepth, ' ') - if len(o.Code) != 0 { + if o.Code != "" { ret += fmt.Sprintf("[%s]", o.Code) } ret += " " @@ -70,7 +69,7 @@ func (o *OpOutput) String() string { indent = 0 } ret = fmt.Sprintf("%*cBLOCK_END [%s]", indent, ' ', o.Code) - if len(o.StrConditionResult) > 0 { + if o.StrConditionResult != "" { ret += fmt.Sprintf(" -> %s", o.StrConditionResult) } return ret diff --git a/pkg/exprhelpers/helpers.go b/pkg/exprhelpers/helpers.go index 17ce468f623..2ca7d0be79a 100644 --- a/pkg/exprhelpers/helpers.go +++ b/pkg/exprhelpers/helpers.go @@ -213,7 +213,7 @@ func FileInit(fileFolder string, filename string, fileType string) error { if strings.HasPrefix(scanner.Text(), "#") { // allow comments continue } - if len(scanner.Text()) == 0 { //skip empty lines + if scanner.Text() == "" { //skip empty lines continue } @@ -254,7 +254,6 @@ func Distinct(params ...any) (any, error) { } } return ret, nil - } func FlattenDistinct(params ...any) (any, error) { @@ -280,6 +279,7 @@ func flatten(args []interface{}, v reflect.Value) []interface{} { return args } + func existsInFileMaps(filename string, ftype string) (bool, error) { ok := false var err error @@ -765,7 +765,6 @@ func B64Decode(params ...any) (any, error) { } func ParseKV(params ...any) (any, error) { - blob := params[0].(string) target := params[1].(map[string]interface{}) prefix := params[2].(string) diff --git a/pkg/fflag/features.go b/pkg/fflag/features.go index 3a106984a66..c8a3d7755ea 100644 --- a/pkg/fflag/features.go +++ b/pkg/fflag/features.go @@ -97,7 +97,7 @@ type FeatureRegister struct { features map[string]*Feature } -var featureNameRexp = regexp.MustCompile(`^[a-z0-9_\.]+$`) +var featureNameRexp = regexp.MustCompile(`^[a-z0-9_.]+$`) func validateFeatureName(featureName string) error { if featureName == "" { diff --git a/pkg/hubtest/hubtest.go b/pkg/hubtest/hubtest.go index a4ca275c310..93f5abaa879 100644 --- a/pkg/hubtest/hubtest.go +++ b/pkg/hubtest/hubtest.go @@ -83,7 +83,7 @@ func NewHubTest(hubPath string, crowdsecPath string, cscliPath string, isAppsecT } if isAppsecTest { - HubTestPath := filepath.Join(hubPath, "./.appsec-tests/") + HubTestPath := filepath.Join(hubPath, ".appsec-tests") hubIndexFile := filepath.Join(hubPath, ".index.json") local := &csconfig.LocalHubCfg{ @@ -119,7 +119,7 @@ func NewHubTest(hubPath string, crowdsecPath string, cscliPath string, isAppsecT }, nil } - HubTestPath := filepath.Join(hubPath, "./.tests/") + HubTestPath := filepath.Join(hubPath, ".tests") hubIndexFile := filepath.Join(hubPath, ".index.json") diff --git a/pkg/hubtest/nucleirunner.go b/pkg/hubtest/nucleirunner.go index 0bf2013dd8d..32c81eb64d8 100644 --- a/pkg/hubtest/nucleirunner.go +++ b/pkg/hubtest/nucleirunner.go @@ -42,11 +42,11 @@ func (nc *NucleiConfig) RunNucleiTemplate(testName string, templatePath string, err := cmd.Run() - if err := os.WriteFile(outputPrefix+"_stdout.txt", out.Bytes(), 0644); err != nil { + if err := os.WriteFile(outputPrefix+"_stdout.txt", out.Bytes(), 0o644); err != nil { log.Warningf("Error writing stdout: %s", err) } - if err := os.WriteFile(outputPrefix+"_stderr.txt", outErr.Bytes(), 0644); err != nil { + if err := os.WriteFile(outputPrefix+"_stderr.txt", outErr.Bytes(), 0o644); err != nil { log.Warningf("Error writing stderr: %s", err) } @@ -56,7 +56,7 @@ func (nc *NucleiConfig) RunNucleiTemplate(testName string, templatePath string, log.Warningf("Stderr saved to %s", outputPrefix+"_stderr.txt") log.Warningf("Nuclei generated output saved to %s", outputPrefix+".json") return err - } else if len(out.String()) == 0 { + } else if out.String() == "" { log.Warningf("Stdout saved to %s", outputPrefix+"_stdout.txt") log.Warningf("Stderr saved to %s", outputPrefix+"_stderr.txt") log.Warningf("Nuclei generated output saved to %s", outputPrefix+".json") diff --git a/pkg/hubtest/regexp.go b/pkg/hubtest/regexp.go index f9165eae3d1..8b2fcc928dd 100644 --- a/pkg/hubtest/regexp.go +++ b/pkg/hubtest/regexp.go @@ -5,7 +5,7 @@ import ( ) var ( - variableRE = regexp.MustCompile(`(?P[^ =]+) == .*`) - parserResultRE = regexp.MustCompile(`^results\["[^"]+"\]\["(?P[^"]+)"\]\[[0-9]+\]\.Evt\..*`) + variableRE = regexp.MustCompile(`(?P[^ =]+) == .*`) + parserResultRE = regexp.MustCompile(`^results\["[^"]+"\]\["(?P[^"]+)"\]\[[0-9]+\]\.Evt\..*`) scenarioResultRE = regexp.MustCompile(`^results\[[0-9]+\].Overflow.Alert.GetScenario\(\) == "(?P[^"]+)"`) ) diff --git a/pkg/hubtest/utils.go b/pkg/hubtest/utils.go index a7373fcc0bf..b42a73461f3 100644 --- a/pkg/hubtest/utils.go +++ b/pkg/hubtest/utils.go @@ -91,7 +91,7 @@ func CopyDir(src string, dest string) error { return errors.New("Source " + file.Name() + " is not a directory!") } - err = os.MkdirAll(dest, 0755) + err = os.MkdirAll(dest, 0o755) if err != nil { return err } diff --git a/pkg/longpollclient/client.go b/pkg/longpollclient/client.go index 0603b7a5e80..5a7af0bfa63 100644 --- a/pkg/longpollclient/client.go +++ b/pkg/longpollclient/client.go @@ -74,11 +74,9 @@ func (c *LongPollClient) doQuery() (*http.Response, error) { } func (c *LongPollClient) poll() error { - logger := c.logger.WithField("method", "poll") resp, err := c.doQuery() - if err != nil { return err } @@ -122,7 +120,7 @@ func (c *LongPollClient) poll() error { logger.Tracef("got response: %+v", pollResp) - if len(pollResp.ErrorMessage) > 0 { + if pollResp.ErrorMessage != "" { if pollResp.ErrorMessage == timeoutMessage { logger.Debugf("got timeout message") return nil @@ -209,7 +207,7 @@ func (c *LongPollClient) PullOnce(since time.Time) ([]Event, error) { c.logger.Tracef("got response: %+v", pollResp) - if len(pollResp.ErrorMessage) > 0 { + if pollResp.ErrorMessage != "" { if pollResp.ErrorMessage == timeoutMessage { c.logger.Debugf("got timeout message") break diff --git a/pkg/setup/units.go b/pkg/setup/units.go index ab1eec6f33e..861513d3f1d 100644 --- a/pkg/setup/units.go +++ b/pkg/setup/units.go @@ -35,7 +35,7 @@ func systemdUnitList() ([]string, error) { for scanner.Scan() { line := scanner.Text() - if len(line) == 0 { + if line == "" { break // the rest of the output is footer } From 519693270eba7515b0201749d8670503de527d02 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Wed, 18 Sep 2024 15:00:19 +0200 Subject: [PATCH 298/581] refact: alerts query (#3216) * refact alerts: log messages * refact: AlertPredicatesFromFilter --- .golangci.yml | 2 +- pkg/database/alerts.go | 312 ++++++++++++++++++++++------------------- 2 files changed, 170 insertions(+), 144 deletions(-) diff --git a/.golangci.yml b/.golangci.yml index 4918fb99fe3..34837437355 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -103,7 +103,7 @@ linters-settings: disabled: true - name: cyclomatic # lower this after refactoring - arguments: [41] + arguments: [39] - name: defer disabled: true - name: empty-block diff --git a/pkg/database/alerts.go b/pkg/database/alerts.go index 3e3e480c7d6..3dfb0dc8197 100644 --- a/pkg/database/alerts.go +++ b/pkg/database/alerts.go @@ -456,14 +456,14 @@ func (c *Client) createAlertChunk(machineID string, owner *ent.Machine, alerts [ startAtTime, err := time.Parse(time.RFC3339, *alertItem.StartAt) if err != nil { - c.Log.Errorf("CreateAlertBulk: Failed to parse startAtTime '%s', defaulting to now: %s", *alertItem.StartAt, err) + c.Log.Errorf("creating alert: Failed to parse startAtTime '%s', defaulting to now: %s", *alertItem.StartAt, err) startAtTime = time.Now().UTC() } stopAtTime, err := time.Parse(time.RFC3339, *alertItem.StopAt) if err != nil { - c.Log.Errorf("CreateAlertBulk: Failed to parse stopAtTime '%s', defaulting to now: %s", *alertItem.StopAt, err) + c.Log.Errorf("creating alert: Failed to parse stopAtTime '%s', defaulting to now: %s", *alertItem.StopAt, err) stopAtTime = time.Now().UTC() } @@ -483,7 +483,7 @@ func (c *Client) createAlertChunk(machineID string, owner *ent.Machine, alerts [ for i, eventItem := range alertItem.Events { ts, err := time.Parse(time.RFC3339, *eventItem.Timestamp) if err != nil { - c.Log.Errorf("CreateAlertBulk: Failed to parse event timestamp '%s', defaulting to now: %s", *eventItem.Timestamp, err) + c.Log.Errorf("creating alert: Failed to parse event timestamp '%s', defaulting to now: %s", *eventItem.Timestamp, err) ts = time.Now().UTC() } @@ -694,7 +694,7 @@ func (c *Client) CreateAlert(machineID string, alertList []*models.Alert) ([]str return nil, fmt.Errorf("machine '%s': %w", machineID, err) } - c.Log.Debugf("CreateAlertBulk: Machine Id %s doesn't exist", machineID) + c.Log.Debugf("creating alert: machine %s doesn't exist", machineID) owner = nil } @@ -724,6 +724,160 @@ func (c *Client) CreateAlert(machineID string, alertList []*models.Alert) ([]str return alertIDs, nil } +func handleSimulatedFilter(filter map[string][]string, predicates *[]predicate.Alert) { + /* the simulated filter is a bit different : if it's not present *or* set to false, specifically exclude records with simulated to true */ + if v, ok := filter["simulated"]; ok && v[0] == "false" { + *predicates = append(*predicates, alert.SimulatedEQ(false)) + } +} + +func handleOriginFilter(filter map[string][]string, predicates *[]predicate.Alert) { + if _, ok := filter["origin"]; ok { + filter["include_capi"] = []string{"true"} + } +} + +func handleScopeFilter(scope string, predicates *[]predicate.Alert) { + if strings.ToLower(scope) == "ip" { + scope = types.Ip + } else if strings.ToLower(scope) == "range" { + scope = types.Range + } + + *predicates = append(*predicates, alert.SourceScopeEQ(scope)) +} + +func handleTimeFilters(param, value string, predicates *[]predicate.Alert) error { + duration, err := ParseDuration(value) + if err != nil { + return fmt.Errorf("while parsing duration: %w", err) + } + + timePoint := time.Now().UTC().Add(-duration) + if timePoint.IsZero() { + return fmt.Errorf("empty time now() - %s", timePoint.String()) + } + + switch param { + case "since": + *predicates = append(*predicates, alert.StartedAtGTE(timePoint)) + case "created_before": + *predicates = append(*predicates, alert.CreatedAtLTE(timePoint)) + case "until": + *predicates = append(*predicates, alert.StartedAtLTE(timePoint)) + } + + return nil +} + +func handleIPv4Predicates(ip_sz int, contains bool, start_ip, start_sfx, end_ip, end_sfx int64, predicates *[]predicate.Alert) { + if contains { // decision contains {start_ip,end_ip} + *predicates = append(*predicates, alert.And( + alert.HasDecisionsWith(decision.StartIPLTE(start_ip)), + alert.HasDecisionsWith(decision.EndIPGTE(end_ip)), + alert.HasDecisionsWith(decision.IPSizeEQ(int64(ip_sz))), + )) + } else { // decision is contained within {start_ip,end_ip} + *predicates = append(*predicates, alert.And( + alert.HasDecisionsWith(decision.StartIPGTE(start_ip)), + alert.HasDecisionsWith(decision.EndIPLTE(end_ip)), + alert.HasDecisionsWith(decision.IPSizeEQ(int64(ip_sz))), + )) + } +} + +func handleIPv6Predicates(ip_sz int, contains bool, start_ip, start_sfx, end_ip, end_sfx int64, predicates *[]predicate.Alert) { + if contains { // decision contains {start_ip,end_ip} + *predicates = append(*predicates, alert.And( + // matching addr size + alert.HasDecisionsWith(decision.IPSizeEQ(int64(ip_sz))), + alert.Or( + // decision.start_ip < query.start_ip + alert.HasDecisionsWith(decision.StartIPLT(start_ip)), + alert.And( + // decision.start_ip == query.start_ip + alert.HasDecisionsWith(decision.StartIPEQ(start_ip)), + // decision.start_suffix <= query.start_suffix + alert.HasDecisionsWith(decision.StartSuffixLTE(start_sfx)), + ), + ), + alert.Or( + // decision.end_ip > query.end_ip + alert.HasDecisionsWith(decision.EndIPGT(end_ip)), + alert.And( + // decision.end_ip == query.end_ip + alert.HasDecisionsWith(decision.EndIPEQ(end_ip)), + // decision.end_suffix >= query.end_suffix + alert.HasDecisionsWith(decision.EndSuffixGTE(end_sfx)), + ), + ), + )) + } else { // decision is contained within {start_ip,end_ip} + *predicates = append(*predicates, alert.And( + // matching addr size + alert.HasDecisionsWith(decision.IPSizeEQ(int64(ip_sz))), + alert.Or( + // decision.start_ip > query.start_ip + alert.HasDecisionsWith(decision.StartIPGT(start_ip)), + alert.And( + // decision.start_ip == query.start_ip + alert.HasDecisionsWith(decision.StartIPEQ(start_ip)), + // decision.start_suffix >= query.start_suffix + alert.HasDecisionsWith(decision.StartSuffixGTE(start_sfx)), + ), + ), + alert.Or( + // decision.end_ip < query.end_ip + alert.HasDecisionsWith(decision.EndIPLT(end_ip)), + alert.And( + // decision.end_ip == query.end_ip + alert.HasDecisionsWith(decision.EndIPEQ(end_ip)), + // decision.end_suffix <= query.end_suffix + alert.HasDecisionsWith(decision.EndSuffixLTE(end_sfx)), + ), + ), + )) + } +} + +func handleIPPredicates(ip_sz int, contains bool, start_ip, start_sfx, end_ip, end_sfx int64, predicates *[]predicate.Alert) error { + if ip_sz == 4 { + handleIPv4Predicates(ip_sz, contains, start_ip, start_sfx, end_ip, end_sfx, predicates) + } else if ip_sz == 16 { + handleIPv6Predicates(ip_sz, contains, start_ip, start_sfx, end_ip, end_sfx, predicates) + } else if ip_sz != 0 { + return errors.Wrapf(InvalidFilter, "Unknown ip size %d", ip_sz) + } + + return nil +} + +func handleIncludeCapiFilter(value string, predicates *[]predicate.Alert) error { + if value == "false" { + *predicates = append(*predicates, alert.And( + // do not show alerts with active decisions having origin CAPI or lists + alert.And( + alert.Not(alert.HasDecisionsWith(decision.OriginEQ(types.CAPIOrigin))), + alert.Not(alert.HasDecisionsWith(decision.OriginEQ(types.ListOrigin))), + ), + alert.Not( + alert.And( + // do not show neither alerts with no decisions if the Source Scope is lists: or CAPI + alert.Not(alert.HasDecisions()), + alert.Or( + alert.SourceScopeHasPrefix(types.ListOrigin+":"), + alert.SourceScopeEQ(types.CommunityBlocklistPullSourceScope), + ), + ), + ), + )) + } else if value != "true" { + log.Errorf("invalid bool '%s' for include_capi", value) + } + + return nil +} + func AlertPredicatesFromFilter(filter map[string][]string) ([]predicate.Alert, error) { predicates := make([]predicate.Alert, 0) @@ -739,16 +893,8 @@ func AlertPredicatesFromFilter(filter map[string][]string) ([]predicate.Alert, e /*if contains is true, return bans that *contains* the given value (value is the inner) else, return bans that are *contained* by the given value (value is the outer)*/ - /*the simulated filter is a bit different : if it's not present *or* set to false, specifically exclude records with simulated to true */ - if v, ok := filter["simulated"]; ok { - if v[0] == "false" { - predicates = append(predicates, alert.SimulatedEQ(false)) - } - } - - if _, ok := filter["origin"]; ok { - filter["include_capi"] = []string{"true"} - } + handleSimulatedFilter(filter, &predicates) + handleOriginFilter(filter, &predicates) for param, value := range filter { switch param { @@ -758,14 +904,7 @@ func AlertPredicatesFromFilter(filter map[string][]string) ([]predicate.Alert, e return nil, errors.Wrapf(InvalidFilter, "invalid contains value : %s", err) } case "scope": - scope := value[0] - if strings.ToLower(scope) == "ip" { - scope = types.Ip - } else if strings.ToLower(scope) == "range" { - scope = types.Range - } - - predicates = append(predicates, alert.SourceScopeEQ(scope)) + handleScopeFilter(value[0], &predicates) case "value": predicates = append(predicates, alert.SourceValueEQ(value[0])) case "scenario": @@ -775,68 +914,18 @@ func AlertPredicatesFromFilter(filter map[string][]string) ([]predicate.Alert, e if err != nil { return nil, errors.Wrapf(InvalidIPOrRange, "unable to convert '%s' to int: %s", value[0], err) } - case "since": - duration, err := ParseDuration(value[0]) - if err != nil { - return nil, fmt.Errorf("while parsing duration: %w", err) - } - - since := time.Now().UTC().Add(-duration) - if since.IsZero() { - return nil, fmt.Errorf("empty time now() - %s", since.String()) - } - - predicates = append(predicates, alert.StartedAtGTE(since)) - case "created_before": - duration, err := ParseDuration(value[0]) - if err != nil { - return nil, fmt.Errorf("while parsing duration: %w", err) - } + case "since", "created_before", "until": + if err := handleTimeFilters(param, value[0], &predicates); err != nil { + return nil, err - since := time.Now().UTC().Add(-duration) - if since.IsZero() { - return nil, fmt.Errorf("empty time now() - %s", since.String()) } - - predicates = append(predicates, alert.CreatedAtLTE(since)) - case "until": - duration, err := ParseDuration(value[0]) - if err != nil { - return nil, fmt.Errorf("while parsing duration: %w", err) - } - - until := time.Now().UTC().Add(-duration) - if until.IsZero() { - return nil, fmt.Errorf("empty time now() - %s", until.String()) - } - - predicates = append(predicates, alert.StartedAtLTE(until)) case "decision_type": predicates = append(predicates, alert.HasDecisionsWith(decision.TypeEQ(value[0]))) case "origin": predicates = append(predicates, alert.HasDecisionsWith(decision.OriginEQ(value[0]))) case "include_capi": // allows to exclude one or more specific origins - if value[0] == "false" { - predicates = append(predicates, alert.And( - // do not show alerts with active decisions having origin CAPI or lists - alert.And( - alert.Not(alert.HasDecisionsWith(decision.OriginEQ(types.CAPIOrigin))), - alert.Not(alert.HasDecisionsWith(decision.OriginEQ(types.ListOrigin))), - ), - alert.Not( - alert.And( - // do not show neither alerts with no decisions if the Source Scope is lists: or CAPI - alert.Not(alert.HasDecisions()), - alert.Or( - alert.SourceScopeHasPrefix(types.ListOrigin+":"), - alert.SourceScopeEQ(types.CommunityBlocklistPullSourceScope), - ), - ), - ), - ), - ) - } else if value[0] != "true" { - log.Errorf("Invalid bool '%s' for include_capi", value[0]) + if err = handleIncludeCapiFilter(value[0], &predicates); err != nil { + return nil, err } case "has_active_decision": if hasActiveDecision, err = strconv.ParseBool(value[0]); err != nil { @@ -861,72 +950,9 @@ func AlertPredicatesFromFilter(filter map[string][]string) ([]predicate.Alert, e } } - if ip_sz == 4 { - if contains { /*decision contains {start_ip,end_ip}*/ - predicates = append(predicates, alert.And( - alert.HasDecisionsWith(decision.StartIPLTE(start_ip)), - alert.HasDecisionsWith(decision.EndIPGTE(end_ip)), - alert.HasDecisionsWith(decision.IPSizeEQ(int64(ip_sz))), - )) - } else { /*decision is contained within {start_ip,end_ip}*/ - predicates = append(predicates, alert.And( - alert.HasDecisionsWith(decision.StartIPGTE(start_ip)), - alert.HasDecisionsWith(decision.EndIPLTE(end_ip)), - alert.HasDecisionsWith(decision.IPSizeEQ(int64(ip_sz))), - )) - } - } else if ip_sz == 16 { - if contains { /*decision contains {start_ip,end_ip}*/ - predicates = append(predicates, alert.And( - // matching addr size - alert.HasDecisionsWith(decision.IPSizeEQ(int64(ip_sz))), - alert.Or( - // decision.start_ip < query.start_ip - alert.HasDecisionsWith(decision.StartIPLT(start_ip)), - alert.And( - // decision.start_ip == query.start_ip - alert.HasDecisionsWith(decision.StartIPEQ(start_ip)), - // decision.start_suffix <= query.start_suffix - alert.HasDecisionsWith(decision.StartSuffixLTE(start_sfx)), - )), - alert.Or( - // decision.end_ip > query.end_ip - alert.HasDecisionsWith(decision.EndIPGT(end_ip)), - alert.And( - // decision.end_ip == query.end_ip - alert.HasDecisionsWith(decision.EndIPEQ(end_ip)), - // decision.end_suffix >= query.end_suffix - alert.HasDecisionsWith(decision.EndSuffixGTE(end_sfx)), - ), - ), - )) - } else { /*decision is contained within {start_ip,end_ip}*/ - predicates = append(predicates, alert.And( - // matching addr size - alert.HasDecisionsWith(decision.IPSizeEQ(int64(ip_sz))), - alert.Or( - // decision.start_ip > query.start_ip - alert.HasDecisionsWith(decision.StartIPGT(start_ip)), - alert.And( - // decision.start_ip == query.start_ip - alert.HasDecisionsWith(decision.StartIPEQ(start_ip)), - // decision.start_suffix >= query.start_suffix - alert.HasDecisionsWith(decision.StartSuffixGTE(start_sfx)), - )), - alert.Or( - // decision.end_ip < query.end_ip - alert.HasDecisionsWith(decision.EndIPLT(end_ip)), - alert.And( - // decision.end_ip == query.end_ip - alert.HasDecisionsWith(decision.EndIPEQ(end_ip)), - // decision.end_suffix <= query.end_suffix - alert.HasDecisionsWith(decision.EndSuffixLTE(end_sfx)), - ), - ), - )) - } - } else if ip_sz != 0 { - return nil, errors.Wrapf(InvalidFilter, "Unknown ip size %d", ip_sz) + if err := handleIPPredicates(ip_sz, contains, start_ip, start_sfx, end_ip, end_sfx, &predicates); err != nil { + return nil, err + } return predicates, nil From b14201aa840cea1e61eb7a7550e4b301e09adbff Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Thu, 19 Sep 2024 09:31:28 +0200 Subject: [PATCH 299/581] CI: delegate pipenv cache management to the setup-python action (#3243) --- .github/workflows/docker-tests.yml | 15 ++------------- 1 file changed, 2 insertions(+), 13 deletions(-) diff --git a/.github/workflows/docker-tests.yml b/.github/workflows/docker-tests.yml index 228a0829984..918f3bcaf1d 100644 --- a/.github/workflows/docker-tests.yml +++ b/.github/workflows/docker-tests.yml @@ -53,23 +53,12 @@ jobs: uses: actions/setup-python@v5 with: python-version: "3.x" - - - name: "Install pipenv" - run: | - cd docker/test - python -m pip install --upgrade pipenv wheel - - - name: "Cache virtualenvs" - id: cache-pipenv - uses: actions/cache@v4 - with: - path: ~/.local/share/virtualenvs - key: ${{ runner.os }}-pipenv-${{ hashFiles('**/Pipfile.lock') }} + cache: 'pipenv' - name: "Install dependencies" - if: steps.cache-pipenv.outputs.cache-hit != 'true' run: | cd docker/test + python -m pip install --upgrade pipenv wheel pipenv install --deploy - name: "Create Docker network" From 7c5d4d8b3d3e4fcc9f7383cfd3455a1e4e7f1c14 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Thu, 19 Sep 2024 11:06:31 +0200 Subject: [PATCH 300/581] context propagation: explicit ctx parameter in unit tests (#3229) --- pkg/acquisition/modules/loki/loki_test.go | 19 +++-- pkg/apiserver/alerts_test.go | 97 +++++++++++++---------- pkg/apiserver/api_key_test.go | 9 ++- pkg/apiserver/apiserver_test.go | 16 +++- pkg/apiserver/jwt_test.go | 19 +++-- pkg/apiserver/machines_test.go | 58 +++++++++----- 6 files changed, 131 insertions(+), 87 deletions(-) diff --git a/pkg/acquisition/modules/loki/loki_test.go b/pkg/acquisition/modules/loki/loki_test.go index 5f41cd4c62e..2fd2b61e995 100644 --- a/pkg/acquisition/modules/loki/loki_test.go +++ b/pkg/acquisition/modules/loki/loki_test.go @@ -95,7 +95,6 @@ query: > delayFor: 1 * time.Second, }, { - config: ` mode: tail source: loki @@ -111,7 +110,6 @@ query: > testName: "Correct config with password", }, { - config: ` mode: tail source: loki @@ -261,7 +259,7 @@ func TestConfigureDSN(t *testing.T) { } } -func feedLoki(logger *log.Entry, n int, title string) error { +func feedLoki(ctx context.Context, logger *log.Entry, n int, title string) error { streams := LogStreams{ Streams: []LogStream{ { @@ -286,7 +284,7 @@ func feedLoki(logger *log.Entry, n int, title string) error { return err } - req, err := http.NewRequest(http.MethodPost, "http://127.0.0.1:3100/loki/api/v1/push", bytes.NewBuffer(buff)) + req, err := http.NewRequestWithContext(ctx, http.MethodPost, "http://127.0.0.1:3100/loki/api/v1/push", bytes.NewBuffer(buff)) if err != nil { return err } @@ -344,12 +342,13 @@ since: 1h subLogger := logger.WithField("type", "loki") lokiSource := loki.LokiSource{} err := lokiSource.Configure([]byte(ts.config), subLogger, configuration.METRICS_NONE) - if err != nil { t.Fatalf("Unexpected error : %s", err) } - err = feedLoki(subLogger, 20, title) + ctx := context.Background() + + err = feedLoki(ctx, subLogger, 20, title) if err != nil { t.Fatalf("Unexpected error : %s", err) } @@ -421,6 +420,8 @@ query: > }, } + ctx := context.Background() + for _, ts := range tests { t.Run(ts.name, func(t *testing.T) { logger := log.New() @@ -472,7 +473,7 @@ query: > } }) - err = feedLoki(subLogger, ts.expectedLines, title) + err = feedLoki(ctx, subLogger, ts.expectedLines, title) if err != nil { t.Fatalf("Unexpected error : %s", err) } @@ -525,7 +526,9 @@ query: > time.Sleep(time.Second * 2) - err = feedLoki(subLogger, 1, title) + ctx := context.Background() + + err = feedLoki(ctx, subLogger, 1, title) if err != nil { t.Fatalf("Unexpected error : %s", err) } diff --git a/pkg/apiserver/alerts_test.go b/pkg/apiserver/alerts_test.go index 891eb3a8f4a..d801ff86f45 100644 --- a/pkg/apiserver/alerts_test.go +++ b/pkg/apiserver/alerts_test.go @@ -1,6 +1,7 @@ package apiserver import ( + "context" "encoding/json" "fmt" "net/http" @@ -45,8 +46,9 @@ func (l *LAPI) InsertAlertFromFile(t *testing.T, path string) *httptest.Response } func (l *LAPI) RecordResponse(t *testing.T, verb string, url string, body *strings.Reader, authType string) *httptest.ResponseRecorder { + ctx := context.Background() w := httptest.NewRecorder() - req, err := http.NewRequest(verb, url, body) + req, err := http.NewRequestWithContext(ctx, verb, url, body) require.NoError(t, err) switch authType { @@ -74,8 +76,9 @@ func LoginToTestAPI(t *testing.T, router *gin.Engine, config csconfig.Config) mo body := CreateTestMachine(t, router, "") ValidateMachine(t, "test", config.API.Server.DbConfig) + ctx := context.Background() w := httptest.NewRecorder() - req, _ := http.NewRequest(http.MethodPost, "/v1/watchers/login", strings.NewReader(body)) + req, _ := http.NewRequestWithContext(ctx, http.MethodPost, "/v1/watchers/login", strings.NewReader(body)) req.Header.Add("User-Agent", UserAgent) router.ServeHTTP(w, req) @@ -95,13 +98,13 @@ func TestSimulatedAlert(t *testing.T) { lapi := SetupLAPITest(t) lapi.InsertAlertFromFile(t, "./tests/alert_minibulk+simul.json") alertContent := GetAlertReaderFromFile(t, "./tests/alert_minibulk+simul.json") - //exclude decision in simulation mode + // exclude decision in simulation mode w := lapi.RecordResponse(t, "GET", "/v1/alerts?simulated=false", alertContent, "password") assert.Equal(t, 200, w.Code) assert.Contains(t, w.Body.String(), `"message":"Ip 91.121.79.178 performed crowdsecurity/ssh-bf (6 events over `) assert.NotContains(t, w.Body.String(), `"message":"Ip 91.121.79.179 performed crowdsecurity/ssh-bf (6 events over `) - //include decision in simulation mode + // include decision in simulation mode w = lapi.RecordResponse(t, "GET", "/v1/alerts?simulated=true", alertContent, "password") assert.Equal(t, 200, w.Code) @@ -122,7 +125,9 @@ func TestCreateAlert(t *testing.T) { w = lapi.RecordResponse(t, http.MethodPost, "/v1/alerts", alertContent, "password") assert.Equal(t, 500, w.Code) - assert.Equal(t, `{"message":"validation failure list:\n0.scenario in body is required\n0.scenario_hash in body is required\n0.scenario_version in body is required\n0.simulated in body is required\n0.source in body is required"}`, w.Body.String()) + assert.Equal(t, + `{"message":"validation failure list:\n0.scenario in body is required\n0.scenario_hash in body is required\n0.scenario_version in body is required\n0.simulated in body is required\n0.source in body is required"}`, + w.Body.String()) // Create Valid Alert w = lapi.InsertAlertFromFile(t, "./tests/alert_sample.json") @@ -162,163 +167,163 @@ func TestAlertListFilters(t *testing.T) { lapi.InsertAlertFromFile(t, "./tests/alert_ssh-bf.json") alertContent := GetAlertReaderFromFile(t, "./tests/alert_ssh-bf.json") - //bad filter + // bad filter w := lapi.RecordResponse(t, "GET", "/v1/alerts?test=test", alertContent, "password") assert.Equal(t, 500, w.Code) assert.Equal(t, `{"message":"Filter parameter 'test' is unknown (=test): invalid filter"}`, w.Body.String()) - //get without filters + // get without filters w = lapi.RecordResponse(t, "GET", "/v1/alerts", emptyBody, "password") assert.Equal(t, 200, w.Code) - //check alert and decision + // check alert and decision assert.Contains(t, w.Body.String(), "Ip 91.121.79.195 performed 'crowdsecurity/ssh-bf' (6 events over ") assert.Contains(t, w.Body.String(), `scope":"Ip","simulated":false,"type":"ban","value":"91.121.79.195"`) - //test decision_type filter (ok) + // test decision_type filter (ok) w = lapi.RecordResponse(t, "GET", "/v1/alerts?decision_type=ban", emptyBody, "password") assert.Equal(t, 200, w.Code) assert.Contains(t, w.Body.String(), "Ip 91.121.79.195 performed 'crowdsecurity/ssh-bf' (6 events over ") assert.Contains(t, w.Body.String(), `scope":"Ip","simulated":false,"type":"ban","value":"91.121.79.195"`) - //test decision_type filter (bad value) + // test decision_type filter (bad value) w = lapi.RecordResponse(t, "GET", "/v1/alerts?decision_type=ratata", emptyBody, "password") assert.Equal(t, 200, w.Code) assert.Equal(t, "null", w.Body.String()) - //test scope (ok) + // test scope (ok) w = lapi.RecordResponse(t, "GET", "/v1/alerts?scope=Ip", emptyBody, "password") assert.Equal(t, 200, w.Code) assert.Contains(t, w.Body.String(), "Ip 91.121.79.195 performed 'crowdsecurity/ssh-bf' (6 events over ") assert.Contains(t, w.Body.String(), `scope":"Ip","simulated":false,"type":"ban","value":"91.121.79.195"`) - //test scope (bad value) + // test scope (bad value) w = lapi.RecordResponse(t, "GET", "/v1/alerts?scope=rarara", emptyBody, "password") assert.Equal(t, 200, w.Code) assert.Equal(t, "null", w.Body.String()) - //test scenario (ok) + // test scenario (ok) w = lapi.RecordResponse(t, "GET", "/v1/alerts?scenario=crowdsecurity/ssh-bf", emptyBody, "password") assert.Equal(t, 200, w.Code) assert.Contains(t, w.Body.String(), "Ip 91.121.79.195 performed 'crowdsecurity/ssh-bf' (6 events over ") assert.Contains(t, w.Body.String(), `scope":"Ip","simulated":false,"type":"ban","value":"91.121.79.195"`) - //test scenario (bad value) + // test scenario (bad value) w = lapi.RecordResponse(t, "GET", "/v1/alerts?scenario=crowdsecurity/nope", emptyBody, "password") assert.Equal(t, 200, w.Code) assert.Equal(t, "null", w.Body.String()) - //test ip (ok) + // test ip (ok) w = lapi.RecordResponse(t, "GET", "/v1/alerts?ip=91.121.79.195", emptyBody, "password") assert.Equal(t, 200, w.Code) assert.Contains(t, w.Body.String(), "Ip 91.121.79.195 performed 'crowdsecurity/ssh-bf' (6 events over ") assert.Contains(t, w.Body.String(), `scope":"Ip","simulated":false,"type":"ban","value":"91.121.79.195"`) - //test ip (bad value) + // test ip (bad value) w = lapi.RecordResponse(t, "GET", "/v1/alerts?ip=99.122.77.195", emptyBody, "password") assert.Equal(t, 200, w.Code) assert.Equal(t, "null", w.Body.String()) - //test ip (invalid value) + // test ip (invalid value) w = lapi.RecordResponse(t, "GET", "/v1/alerts?ip=gruueq", emptyBody, "password") assert.Equal(t, 500, w.Code) assert.Equal(t, `{"message":"unable to convert 'gruueq' to int: invalid address: invalid ip address / range"}`, w.Body.String()) - //test range (ok) + // test range (ok) w = lapi.RecordResponse(t, "GET", "/v1/alerts?range=91.121.79.0/24&contains=false", emptyBody, "password") assert.Equal(t, 200, w.Code) assert.Contains(t, w.Body.String(), "Ip 91.121.79.195 performed 'crowdsecurity/ssh-bf' (6 events over ") assert.Contains(t, w.Body.String(), `scope":"Ip","simulated":false,"type":"ban","value":"91.121.79.195"`) - //test range + // test range w = lapi.RecordResponse(t, "GET", "/v1/alerts?range=99.122.77.0/24&contains=false", emptyBody, "password") assert.Equal(t, 200, w.Code) assert.Equal(t, "null", w.Body.String()) - //test range (invalid value) + // test range (invalid value) w = lapi.RecordResponse(t, "GET", "/v1/alerts?range=ratata", emptyBody, "password") assert.Equal(t, 500, w.Code) assert.Equal(t, `{"message":"unable to convert 'ratata' to int: invalid address: invalid ip address / range"}`, w.Body.String()) - //test since (ok) + // test since (ok) w = lapi.RecordResponse(t, "GET", "/v1/alerts?since=1h", emptyBody, "password") assert.Equal(t, 200, w.Code) assert.Contains(t, w.Body.String(), "Ip 91.121.79.195 performed 'crowdsecurity/ssh-bf' (6 events over ") assert.Contains(t, w.Body.String(), `scope":"Ip","simulated":false,"type":"ban","value":"91.121.79.195"`) - //test since (ok but yields no results) + // test since (ok but yields no results) w = lapi.RecordResponse(t, "GET", "/v1/alerts?since=1ns", emptyBody, "password") assert.Equal(t, 200, w.Code) assert.Equal(t, "null", w.Body.String()) - //test since (invalid value) + // test since (invalid value) w = lapi.RecordResponse(t, "GET", "/v1/alerts?since=1zuzu", emptyBody, "password") assert.Equal(t, 500, w.Code) assert.Contains(t, w.Body.String(), `{"message":"while parsing duration: time: unknown unit`) - //test until (ok) + // test until (ok) w = lapi.RecordResponse(t, "GET", "/v1/alerts?until=1ns", emptyBody, "password") assert.Equal(t, 200, w.Code) assert.Contains(t, w.Body.String(), "Ip 91.121.79.195 performed 'crowdsecurity/ssh-bf' (6 events over ") assert.Contains(t, w.Body.String(), `scope":"Ip","simulated":false,"type":"ban","value":"91.121.79.195"`) - //test until (ok but no return) + // test until (ok but no return) w = lapi.RecordResponse(t, "GET", "/v1/alerts?until=1m", emptyBody, "password") assert.Equal(t, 200, w.Code) assert.Equal(t, "null", w.Body.String()) - //test until (invalid value) + // test until (invalid value) w = lapi.RecordResponse(t, "GET", "/v1/alerts?until=1zuzu", emptyBody, "password") assert.Equal(t, 500, w.Code) assert.Contains(t, w.Body.String(), `{"message":"while parsing duration: time: unknown unit`) - //test simulated (ok) + // test simulated (ok) w = lapi.RecordResponse(t, "GET", "/v1/alerts?simulated=true", emptyBody, "password") assert.Equal(t, 200, w.Code) assert.Contains(t, w.Body.String(), "Ip 91.121.79.195 performed 'crowdsecurity/ssh-bf' (6 events over ") assert.Contains(t, w.Body.String(), `scope":"Ip","simulated":false,"type":"ban","value":"91.121.79.195"`) - //test simulated (ok) + // test simulated (ok) w = lapi.RecordResponse(t, "GET", "/v1/alerts?simulated=false", emptyBody, "password") assert.Equal(t, 200, w.Code) assert.Contains(t, w.Body.String(), "Ip 91.121.79.195 performed 'crowdsecurity/ssh-bf' (6 events over ") assert.Contains(t, w.Body.String(), `scope":"Ip","simulated":false,"type":"ban","value":"91.121.79.195"`) - //test has active decision + // test has active decision w = lapi.RecordResponse(t, "GET", "/v1/alerts?has_active_decision=true", emptyBody, "password") assert.Equal(t, 200, w.Code) assert.Contains(t, w.Body.String(), "Ip 91.121.79.195 performed 'crowdsecurity/ssh-bf' (6 events over ") assert.Contains(t, w.Body.String(), `scope":"Ip","simulated":false,"type":"ban","value":"91.121.79.195"`) - //test has active decision + // test has active decision w = lapi.RecordResponse(t, "GET", "/v1/alerts?has_active_decision=false", emptyBody, "password") assert.Equal(t, 200, w.Code) assert.Equal(t, "null", w.Body.String()) - //test has active decision (invalid value) + // test has active decision (invalid value) w = lapi.RecordResponse(t, "GET", "/v1/alerts?has_active_decision=ratatqata", emptyBody, "password") assert.Equal(t, 500, w.Code) @@ -327,7 +332,7 @@ func TestAlertListFilters(t *testing.T) { func TestAlertBulkInsert(t *testing.T) { lapi := SetupLAPITest(t) - //insert a bulk of 20 alerts to trigger bulk insert + // insert a bulk of 20 alerts to trigger bulk insert lapi.InsertAlertFromFile(t, "./tests/alert_bulk.json") alertContent := GetAlertReaderFromFile(t, "./tests/alert_bulk.json") @@ -355,17 +360,19 @@ func TestCreateAlertErrors(t *testing.T) { lapi := SetupLAPITest(t) alertContent := GetAlertReaderFromFile(t, "./tests/alert_sample.json") - //test invalid bearer + ctx := context.Background() + + // test invalid bearer w := httptest.NewRecorder() - req, _ := http.NewRequest(http.MethodPost, "/v1/alerts", alertContent) + req, _ := http.NewRequestWithContext(ctx, http.MethodPost, "/v1/alerts", alertContent) req.Header.Add("User-Agent", UserAgent) req.Header.Add("Authorization", fmt.Sprintf("Bearer %s", "ratata")) lapi.router.ServeHTTP(w, req) assert.Equal(t, 401, w.Code) - //test invalid bearer + // test invalid bearer w = httptest.NewRecorder() - req, _ = http.NewRequest(http.MethodPost, "/v1/alerts", alertContent) + req, _ = http.NewRequestWithContext(ctx, http.MethodPost, "/v1/alerts", alertContent) req.Header.Add("User-Agent", UserAgent) req.Header.Add("Authorization", fmt.Sprintf("Bearer %s", lapi.loginResp.Token+"s")) lapi.router.ServeHTTP(w, req) @@ -376,9 +383,11 @@ func TestDeleteAlert(t *testing.T) { lapi := SetupLAPITest(t) lapi.InsertAlertFromFile(t, "./tests/alert_sample.json") + ctx := context.Background() + // Fail Delete Alert w := httptest.NewRecorder() - req, _ := http.NewRequest(http.MethodDelete, "/v1/alerts", strings.NewReader("")) + req, _ := http.NewRequestWithContext(ctx, http.MethodDelete, "/v1/alerts", strings.NewReader("")) AddAuthHeaders(req, lapi.loginResp) req.RemoteAddr = "127.0.0.2:4242" lapi.router.ServeHTTP(w, req) @@ -387,7 +396,7 @@ func TestDeleteAlert(t *testing.T) { // Delete Alert w = httptest.NewRecorder() - req, _ = http.NewRequest(http.MethodDelete, "/v1/alerts", strings.NewReader("")) + req, _ = http.NewRequestWithContext(ctx, http.MethodDelete, "/v1/alerts", strings.NewReader("")) AddAuthHeaders(req, lapi.loginResp) req.RemoteAddr = "127.0.0.1:4242" lapi.router.ServeHTTP(w, req) @@ -399,9 +408,11 @@ func TestDeleteAlertByID(t *testing.T) { lapi := SetupLAPITest(t) lapi.InsertAlertFromFile(t, "./tests/alert_sample.json") + ctx := context.Background() + // Fail Delete Alert w := httptest.NewRecorder() - req, _ := http.NewRequest(http.MethodDelete, "/v1/alerts/1", strings.NewReader("")) + req, _ := http.NewRequestWithContext(ctx, http.MethodDelete, "/v1/alerts/1", strings.NewReader("")) AddAuthHeaders(req, lapi.loginResp) req.RemoteAddr = "127.0.0.2:4242" lapi.router.ServeHTTP(w, req) @@ -410,7 +421,7 @@ func TestDeleteAlertByID(t *testing.T) { // Delete Alert w = httptest.NewRecorder() - req, _ = http.NewRequest(http.MethodDelete, "/v1/alerts/1", strings.NewReader("")) + req, _ = http.NewRequestWithContext(ctx, http.MethodDelete, "/v1/alerts/1", strings.NewReader("")) AddAuthHeaders(req, lapi.loginResp) req.RemoteAddr = "127.0.0.1:4242" lapi.router.ServeHTTP(w, req) @@ -439,9 +450,11 @@ func TestDeleteAlertTrustedIPS(t *testing.T) { loginResp: loginResp, } + ctx := context.Background() + assertAlertDeleteFailedFromIP := func(ip string) { w := httptest.NewRecorder() - req, _ := http.NewRequest(http.MethodDelete, "/v1/alerts", strings.NewReader("")) + req, _ := http.NewRequestWithContext(ctx, http.MethodDelete, "/v1/alerts", strings.NewReader("")) AddAuthHeaders(req, loginResp) req.RemoteAddr = ip + ":1234" @@ -453,7 +466,7 @@ func TestDeleteAlertTrustedIPS(t *testing.T) { assertAlertDeletedFromIP := func(ip string) { w := httptest.NewRecorder() - req, _ := http.NewRequest(http.MethodDelete, "/v1/alerts", strings.NewReader("")) + req, _ := http.NewRequestWithContext(ctx, http.MethodDelete, "/v1/alerts", strings.NewReader("")) AddAuthHeaders(req, loginResp) req.RemoteAddr = ip + ":1234" diff --git a/pkg/apiserver/api_key_test.go b/pkg/apiserver/api_key_test.go index 883ff21298d..10e75ae47f1 100644 --- a/pkg/apiserver/api_key_test.go +++ b/pkg/apiserver/api_key_test.go @@ -1,6 +1,7 @@ package apiserver import ( + "context" "net/http" "net/http/httptest" "strings" @@ -12,11 +13,13 @@ import ( func TestAPIKey(t *testing.T) { router, config := NewAPITest(t) + ctx := context.Background() + APIKey := CreateTestBouncer(t, config.API.Server.DbConfig) // Login with empty token w := httptest.NewRecorder() - req, _ := http.NewRequest(http.MethodGet, "/v1/decisions", strings.NewReader("")) + req, _ := http.NewRequestWithContext(ctx, http.MethodGet, "/v1/decisions", strings.NewReader("")) req.Header.Add("User-Agent", UserAgent) router.ServeHTTP(w, req) @@ -25,7 +28,7 @@ func TestAPIKey(t *testing.T) { // Login with invalid token w = httptest.NewRecorder() - req, _ = http.NewRequest(http.MethodGet, "/v1/decisions", strings.NewReader("")) + req, _ = http.NewRequestWithContext(ctx, http.MethodGet, "/v1/decisions", strings.NewReader("")) req.Header.Add("User-Agent", UserAgent) req.Header.Add("X-Api-Key", "a1b2c3d4e5f6") router.ServeHTTP(w, req) @@ -35,7 +38,7 @@ func TestAPIKey(t *testing.T) { // Login with valid token w = httptest.NewRecorder() - req, _ = http.NewRequest(http.MethodGet, "/v1/decisions", strings.NewReader("")) + req, _ = http.NewRequestWithContext(ctx, http.MethodGet, "/v1/decisions", strings.NewReader("")) req.Header.Add("User-Agent", UserAgent) req.Header.Add("X-Api-Key", APIKey) router.ServeHTTP(w, req) diff --git a/pkg/apiserver/apiserver_test.go b/pkg/apiserver/apiserver_test.go index f48791ebcb8..89c75f35d21 100644 --- a/pkg/apiserver/apiserver_test.go +++ b/pkg/apiserver/apiserver_test.go @@ -278,8 +278,10 @@ func CreateTestMachine(t *testing.T, router *gin.Engine, token string) string { body := string(b) + ctx := context.Background() + w := httptest.NewRecorder() - req, _ := http.NewRequest(http.MethodPost, "/v1/watchers", strings.NewReader(body)) + req, _ := http.NewRequestWithContext(ctx, http.MethodPost, "/v1/watchers", strings.NewReader(body)) req.Header.Set("User-Agent", UserAgent) router.ServeHTTP(w, req) @@ -323,8 +325,10 @@ func TestWithWrongFlushConfig(t *testing.T) { func TestUnknownPath(t *testing.T) { router, _ := NewAPITest(t) + ctx := context.Background() + w := httptest.NewRecorder() - req, _ := http.NewRequest(http.MethodGet, "/test", nil) + req, _ := http.NewRequestWithContext(ctx, http.MethodGet, "/test", nil) req.Header.Set("User-Agent", UserAgent) router.ServeHTTP(w, req) @@ -380,8 +384,10 @@ func TestLoggingDebugToFileConfig(t *testing.T) { require.NoError(t, err) require.NotNil(t, api) + ctx := context.Background() + w := httptest.NewRecorder() - req, _ := http.NewRequest(http.MethodGet, "/test42", nil) + req, _ := http.NewRequestWithContext(ctx, http.MethodGet, "/test42", nil) req.Header.Set("User-Agent", UserAgent) api.router.ServeHTTP(w, req) assert.Equal(t, 404, w.Code) @@ -430,8 +436,10 @@ func TestLoggingErrorToFileConfig(t *testing.T) { require.NoError(t, err) require.NotNil(t, api) + ctx := context.Background() + w := httptest.NewRecorder() - req, _ := http.NewRequest(http.MethodGet, "/test42", nil) + req, _ := http.NewRequestWithContext(ctx, http.MethodGet, "/test42", nil) req.Header.Set("User-Agent", UserAgent) api.router.ServeHTTP(w, req) assert.Equal(t, http.StatusNotFound, w.Code) diff --git a/pkg/apiserver/jwt_test.go b/pkg/apiserver/jwt_test.go index aa6e84e416b..293cc38bd2c 100644 --- a/pkg/apiserver/jwt_test.go +++ b/pkg/apiserver/jwt_test.go @@ -1,6 +1,7 @@ package apiserver import ( + "context" "net/http" "net/http/httptest" "strings" @@ -12,11 +13,13 @@ import ( func TestLogin(t *testing.T) { router, config := NewAPITest(t) + ctx := context.Background() + body := CreateTestMachine(t, router, "") // Login with machine not validated yet w := httptest.NewRecorder() - req, _ := http.NewRequest(http.MethodPost, "/v1/watchers/login", strings.NewReader(body)) + req, _ := http.NewRequestWithContext(ctx, http.MethodPost, "/v1/watchers/login", strings.NewReader(body)) req.Header.Add("User-Agent", UserAgent) router.ServeHTTP(w, req) @@ -25,7 +28,7 @@ func TestLogin(t *testing.T) { // Login with machine not exist w = httptest.NewRecorder() - req, _ = http.NewRequest(http.MethodPost, "/v1/watchers/login", strings.NewReader(`{"machine_id": "test1", "password": "test1"}`)) + req, _ = http.NewRequestWithContext(ctx, http.MethodPost, "/v1/watchers/login", strings.NewReader(`{"machine_id": "test1", "password": "test1"}`)) req.Header.Add("User-Agent", UserAgent) router.ServeHTTP(w, req) @@ -34,7 +37,7 @@ func TestLogin(t *testing.T) { // Login with invalid body w = httptest.NewRecorder() - req, _ = http.NewRequest(http.MethodPost, "/v1/watchers/login", strings.NewReader("test")) + req, _ = http.NewRequestWithContext(ctx, http.MethodPost, "/v1/watchers/login", strings.NewReader("test")) req.Header.Add("User-Agent", UserAgent) router.ServeHTTP(w, req) @@ -43,19 +46,19 @@ func TestLogin(t *testing.T) { // Login with invalid format w = httptest.NewRecorder() - req, _ = http.NewRequest(http.MethodPost, "/v1/watchers/login", strings.NewReader(`{"machine_id": "test1"}`)) + req, _ = http.NewRequestWithContext(ctx, http.MethodPost, "/v1/watchers/login", strings.NewReader(`{"machine_id": "test1"}`)) req.Header.Add("User-Agent", UserAgent) router.ServeHTTP(w, req) assert.Equal(t, 401, w.Code) assert.Equal(t, `{"code":401,"message":"validation failure list:\npassword in body is required"}`, w.Body.String()) - //Validate machine + // Validate machine ValidateMachine(t, "test", config.API.Server.DbConfig) // Login with invalid password w = httptest.NewRecorder() - req, _ = http.NewRequest(http.MethodPost, "/v1/watchers/login", strings.NewReader(`{"machine_id": "test", "password": "test1"}`)) + req, _ = http.NewRequestWithContext(ctx, http.MethodPost, "/v1/watchers/login", strings.NewReader(`{"machine_id": "test", "password": "test1"}`)) req.Header.Add("User-Agent", UserAgent) router.ServeHTTP(w, req) @@ -64,7 +67,7 @@ func TestLogin(t *testing.T) { // Login with valid machine w = httptest.NewRecorder() - req, _ = http.NewRequest(http.MethodPost, "/v1/watchers/login", strings.NewReader(body)) + req, _ = http.NewRequestWithContext(ctx, http.MethodPost, "/v1/watchers/login", strings.NewReader(body)) req.Header.Add("User-Agent", UserAgent) router.ServeHTTP(w, req) @@ -74,7 +77,7 @@ func TestLogin(t *testing.T) { // Login with valid machine + scenarios w = httptest.NewRecorder() - req, _ = http.NewRequest(http.MethodPost, "/v1/watchers/login", strings.NewReader(`{"machine_id": "test", "password": "test", "scenarios": ["crowdsecurity/test", "crowdsecurity/test2"]}`)) + req, _ = http.NewRequestWithContext(ctx, http.MethodPost, "/v1/watchers/login", strings.NewReader(`{"machine_id": "test", "password": "test", "scenarios": ["crowdsecurity/test", "crowdsecurity/test2"]}`)) req.Header.Add("User-Agent", UserAgent) router.ServeHTTP(w, req) diff --git a/pkg/apiserver/machines_test.go b/pkg/apiserver/machines_test.go index 041a6bee528..44c370732c7 100644 --- a/pkg/apiserver/machines_test.go +++ b/pkg/apiserver/machines_test.go @@ -1,6 +1,7 @@ package apiserver import ( + "context" "encoding/json" "net/http" "net/http/httptest" @@ -16,9 +17,11 @@ import ( func TestCreateMachine(t *testing.T) { router, _ := NewAPITest(t) + ctx := context.Background() + // Create machine with invalid format w := httptest.NewRecorder() - req, _ := http.NewRequest(http.MethodPost, "/v1/watchers", strings.NewReader("test")) + req, _ := http.NewRequestWithContext(ctx, http.MethodPost, "/v1/watchers", strings.NewReader("test")) req.Header.Add("User-Agent", UserAgent) router.ServeHTTP(w, req) @@ -27,7 +30,7 @@ func TestCreateMachine(t *testing.T) { // Create machine with invalid input w = httptest.NewRecorder() - req, _ = http.NewRequest(http.MethodPost, "/v1/watchers", strings.NewReader(`{"test": "test"}`)) + req, _ = http.NewRequestWithContext(ctx, http.MethodPost, "/v1/watchers", strings.NewReader(`{"test": "test"}`)) req.Header.Add("User-Agent", UserAgent) router.ServeHTTP(w, req) @@ -41,7 +44,7 @@ func TestCreateMachine(t *testing.T) { body := string(b) w = httptest.NewRecorder() - req, _ = http.NewRequest(http.MethodPost, "/v1/watchers", strings.NewReader(body)) + req, _ = http.NewRequestWithContext(ctx, http.MethodPost, "/v1/watchers", strings.NewReader(body)) req.Header.Add("User-Agent", UserAgent) router.ServeHTTP(w, req) @@ -52,6 +55,9 @@ func TestCreateMachine(t *testing.T) { func TestCreateMachineWithForwardedFor(t *testing.T) { router, config := NewAPITestForwardedFor(t) router.TrustedPlatform = "X-Real-IP" + + ctx := context.Background() + // Create machine b, err := json.Marshal(MachineTest) require.NoError(t, err) @@ -59,7 +65,7 @@ func TestCreateMachineWithForwardedFor(t *testing.T) { body := string(b) w := httptest.NewRecorder() - req, _ := http.NewRequest(http.MethodPost, "/v1/watchers", strings.NewReader(body)) + req, _ := http.NewRequestWithContext(ctx, http.MethodPost, "/v1/watchers", strings.NewReader(body)) req.Header.Add("User-Agent", UserAgent) req.Header.Add("X-Real-Ip", "1.1.1.1") router.ServeHTTP(w, req) @@ -75,6 +81,8 @@ func TestCreateMachineWithForwardedFor(t *testing.T) { func TestCreateMachineWithForwardedForNoConfig(t *testing.T) { router, config := NewAPITest(t) + ctx := context.Background() + // Create machine b, err := json.Marshal(MachineTest) require.NoError(t, err) @@ -82,7 +90,7 @@ func TestCreateMachineWithForwardedForNoConfig(t *testing.T) { body := string(b) w := httptest.NewRecorder() - req, _ := http.NewRequest(http.MethodPost, "/v1/watchers", strings.NewReader(body)) + req, _ := http.NewRequestWithContext(ctx, http.MethodPost, "/v1/watchers", strings.NewReader(body)) req.Header.Add("User-Agent", UserAgent) req.Header.Add("X-Real-IP", "1.1.1.1") router.ServeHTTP(w, req) @@ -92,14 +100,16 @@ func TestCreateMachineWithForwardedForNoConfig(t *testing.T) { ip := GetMachineIP(t, *MachineTest.MachineID, config.API.Server.DbConfig) - //For some reason, the IP is empty when running tests - //if no forwarded-for headers are present + // For some reason, the IP is empty when running tests + // if no forwarded-for headers are present assert.Equal(t, "", ip) } func TestCreateMachineWithoutForwardedFor(t *testing.T) { router, config := NewAPITestForwardedFor(t) + ctx := context.Background() + // Create machine b, err := json.Marshal(MachineTest) require.NoError(t, err) @@ -107,7 +117,7 @@ func TestCreateMachineWithoutForwardedFor(t *testing.T) { body := string(b) w := httptest.NewRecorder() - req, _ := http.NewRequest(http.MethodPost, "/v1/watchers", strings.NewReader(body)) + req, _ := http.NewRequestWithContext(ctx, http.MethodPost, "/v1/watchers", strings.NewReader(body)) req.Header.Add("User-Agent", UserAgent) router.ServeHTTP(w, req) @@ -116,23 +126,25 @@ func TestCreateMachineWithoutForwardedFor(t *testing.T) { ip := GetMachineIP(t, *MachineTest.MachineID, config.API.Server.DbConfig) - //For some reason, the IP is empty when running tests - //if no forwarded-for headers are present + // For some reason, the IP is empty when running tests + // if no forwarded-for headers are present assert.Equal(t, "", ip) } func TestCreateMachineAlreadyExist(t *testing.T) { router, _ := NewAPITest(t) + ctx := context.Background() + body := CreateTestMachine(t, router, "") w := httptest.NewRecorder() - req, _ := http.NewRequest(http.MethodPost, "/v1/watchers", strings.NewReader(body)) + req, _ := http.NewRequestWithContext(ctx, http.MethodPost, "/v1/watchers", strings.NewReader(body)) req.Header.Add("User-Agent", UserAgent) router.ServeHTTP(w, req) w = httptest.NewRecorder() - req, _ = http.NewRequest(http.MethodPost, "/v1/watchers", strings.NewReader(body)) + req, _ = http.NewRequestWithContext(ctx, http.MethodPost, "/v1/watchers", strings.NewReader(body)) req.Header.Add("User-Agent", UserAgent) router.ServeHTTP(w, req) @@ -143,7 +155,9 @@ func TestCreateMachineAlreadyExist(t *testing.T) { func TestAutoRegistration(t *testing.T) { router, _ := NewAPITest(t) - //Invalid registration token / valid source IP + ctx := context.Background() + + // Invalid registration token / valid source IP regReq := MachineTest regReq.RegistrationToken = invalidRegistrationToken b, err := json.Marshal(regReq) @@ -152,14 +166,14 @@ func TestAutoRegistration(t *testing.T) { body := string(b) w := httptest.NewRecorder() - req, _ := http.NewRequest(http.MethodPost, "/v1/watchers", strings.NewReader(body)) + req, _ := http.NewRequestWithContext(ctx, http.MethodPost, "/v1/watchers", strings.NewReader(body)) req.Header.Add("User-Agent", UserAgent) req.RemoteAddr = "127.0.0.1:4242" router.ServeHTTP(w, req) assert.Equal(t, http.StatusUnauthorized, w.Code) - //Invalid registration token / invalid source IP + // Invalid registration token / invalid source IP regReq = MachineTest regReq.RegistrationToken = invalidRegistrationToken b, err = json.Marshal(regReq) @@ -168,14 +182,14 @@ func TestAutoRegistration(t *testing.T) { body = string(b) w = httptest.NewRecorder() - req, _ = http.NewRequest(http.MethodPost, "/v1/watchers", strings.NewReader(body)) + req, _ = http.NewRequestWithContext(ctx, http.MethodPost, "/v1/watchers", strings.NewReader(body)) req.Header.Add("User-Agent", UserAgent) req.RemoteAddr = "42.42.42.42:4242" router.ServeHTTP(w, req) assert.Equal(t, http.StatusUnauthorized, w.Code) - //valid registration token / invalid source IP + // valid registration token / invalid source IP regReq = MachineTest regReq.RegistrationToken = validRegistrationToken b, err = json.Marshal(regReq) @@ -184,14 +198,14 @@ func TestAutoRegistration(t *testing.T) { body = string(b) w = httptest.NewRecorder() - req, _ = http.NewRequest(http.MethodPost, "/v1/watchers", strings.NewReader(body)) + req, _ = http.NewRequestWithContext(ctx, http.MethodPost, "/v1/watchers", strings.NewReader(body)) req.Header.Add("User-Agent", UserAgent) req.RemoteAddr = "42.42.42.42:4242" router.ServeHTTP(w, req) assert.Equal(t, http.StatusUnauthorized, w.Code) - //Valid registration token / valid source IP + // Valid registration token / valid source IP regReq = MachineTest regReq.RegistrationToken = validRegistrationToken b, err = json.Marshal(regReq) @@ -200,14 +214,14 @@ func TestAutoRegistration(t *testing.T) { body = string(b) w = httptest.NewRecorder() - req, _ = http.NewRequest(http.MethodPost, "/v1/watchers", strings.NewReader(body)) + req, _ = http.NewRequestWithContext(ctx, http.MethodPost, "/v1/watchers", strings.NewReader(body)) req.Header.Add("User-Agent", UserAgent) req.RemoteAddr = "127.0.0.1:4242" router.ServeHTTP(w, req) assert.Equal(t, http.StatusAccepted, w.Code) - //No token / valid source IP + // No token / valid source IP regReq = MachineTest regReq.MachineID = ptr.Of("test2") b, err = json.Marshal(regReq) @@ -216,7 +230,7 @@ func TestAutoRegistration(t *testing.T) { body = string(b) w = httptest.NewRecorder() - req, _ = http.NewRequest(http.MethodPost, "/v1/watchers", strings.NewReader(body)) + req, _ = http.NewRequestWithContext(ctx, http.MethodPost, "/v1/watchers", strings.NewReader(body)) req.Header.Add("User-Agent", UserAgent) req.RemoteAddr = "127.0.0.1:4242" router.ServeHTTP(w, req) From be391a3da8301cc8f3eef2c3740cdebc8aa295f8 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Thu, 19 Sep 2024 11:09:29 +0200 Subject: [PATCH 301/581] context propagation: pass context to NewAPIC() (#3231) --- cmd/crowdsec-cli/clipapi/papi.go | 12 +++++++----- pkg/apiserver/apic.go | 4 ++-- pkg/apiserver/apic_test.go | 4 +++- pkg/apiserver/apiserver.go | 2 +- 4 files changed, 13 insertions(+), 9 deletions(-) diff --git a/cmd/crowdsec-cli/clipapi/papi.go b/cmd/crowdsec-cli/clipapi/papi.go index 747b8c01b9b..c0f08157f31 100644 --- a/cmd/crowdsec-cli/clipapi/papi.go +++ b/cmd/crowdsec-cli/clipapi/papi.go @@ -59,7 +59,7 @@ func (cli *cliPapi) NewCommand() *cobra.Command { func (cli *cliPapi) Status(ctx context.Context, out io.Writer, db *database.Client) error { cfg := cli.cfg() - apic, err := apiserver.NewAPIC(cfg.API.Server.OnlineClient, db, cfg.API.Server.ConsoleConfig, cfg.API.Server.CapiWhitelists) + apic, err := apiserver.NewAPIC(ctx, cfg.API.Server.OnlineClient, db, cfg.API.Server.ConsoleConfig, cfg.API.Server.CapiWhitelists) if err != nil { return fmt.Errorf("unable to initialize API client: %w", err) } @@ -118,11 +118,11 @@ func (cli *cliPapi) newStatusCmd() *cobra.Command { return cmd } -func (cli *cliPapi) sync(out io.Writer, db *database.Client) error { +func (cli *cliPapi) sync(ctx context.Context, out io.Writer, db *database.Client) error { cfg := cli.cfg() t := tomb.Tomb{} - apic, err := apiserver.NewAPIC(cfg.API.Server.OnlineClient, db, cfg.API.Server.ConsoleConfig, cfg.API.Server.CapiWhitelists) + apic, err := apiserver.NewAPIC(ctx, cfg.API.Server.OnlineClient, db, cfg.API.Server.ConsoleConfig, cfg.API.Server.CapiWhitelists) if err != nil { return fmt.Errorf("unable to initialize API client: %w", err) } @@ -159,12 +159,14 @@ func (cli *cliPapi) newSyncCmd() *cobra.Command { DisableAutoGenTag: true, RunE: func(cmd *cobra.Command, _ []string) error { cfg := cli.cfg() - db, err := require.DBClient(cmd.Context(), cfg.DbConfig) + ctx := cmd.Context() + + db, err := require.DBClient(ctx, cfg.DbConfig) if err != nil { return err } - return cli.sync(color.Output, db) + return cli.sync(ctx, color.Output, db) }, } diff --git a/pkg/apiserver/apic.go b/pkg/apiserver/apic.go index 73061637ad9..3ed2e12ea54 100644 --- a/pkg/apiserver/apic.go +++ b/pkg/apiserver/apic.go @@ -174,7 +174,7 @@ func alertToSignal(alert *models.Alert, scenarioTrust string, shareContext bool) return signal } -func NewAPIC(config *csconfig.OnlineApiClientCfg, dbClient *database.Client, consoleConfig *csconfig.ConsoleConfig, apicWhitelist *csconfig.CapiWhitelist) (*apic, error) { +func NewAPIC(ctx context.Context, config *csconfig.OnlineApiClientCfg, dbClient *database.Client, consoleConfig *csconfig.ConsoleConfig, apicWhitelist *csconfig.CapiWhitelist) (*apic, error) { var err error ret := &apic{ @@ -237,7 +237,7 @@ func NewAPIC(config *csconfig.OnlineApiClientCfg, dbClient *database.Client, con return ret, fmt.Errorf("get scenario in db: %w", err) } - authResp, _, err := ret.apiClient.Auth.AuthenticateWatcher(context.Background(), models.WatcherAuthRequest{ + authResp, _, err := ret.apiClient.Auth.AuthenticateWatcher(ctx, models.WatcherAuthRequest{ MachineID: &config.Credentials.Login, Password: &password, Scenarios: scenarios, diff --git a/pkg/apiserver/apic_test.go b/pkg/apiserver/apic_test.go index 058e25079e0..105d295dd0d 100644 --- a/pkg/apiserver/apic_test.go +++ b/pkg/apiserver/apic_test.go @@ -230,6 +230,8 @@ func TestNewAPIC(t *testing.T) { }, } + ctx := context.Background() + for _, tc := range tests { t.Run(tc.name, func(t *testing.T) { setConfig() @@ -246,7 +248,7 @@ func TestNewAPIC(t *testing.T) { ), )) tc.action() - _, err := NewAPIC(testConfig, tc.args.dbClient, tc.args.consoleConfig, nil) + _, err := NewAPIC(ctx, testConfig, tc.args.dbClient, tc.args.consoleConfig, nil) cstest.RequireErrorContains(t, err, tc.expectedErr) }) } diff --git a/pkg/apiserver/apiserver.go b/pkg/apiserver/apiserver.go index 42dcb219379..8bf406e0a79 100644 --- a/pkg/apiserver/apiserver.go +++ b/pkg/apiserver/apiserver.go @@ -249,7 +249,7 @@ func NewServer(config *csconfig.LocalApiServerCfg) (*APIServer, error) { if config.OnlineClient != nil && config.OnlineClient.Credentials != nil { log.Printf("Loading CAPI manager") - apiClient, err = NewAPIC(config.OnlineClient, dbClient, config.ConsoleConfig, config.CapiWhitelists) + apiClient, err = NewAPIC(ctx, config.OnlineClient, dbClient, config.ConsoleConfig, config.CapiWhitelists) if err != nil { return nil, err } From 2a60c9903692115cf847429aa7a2b332d339fe7b Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Thu, 19 Sep 2024 11:11:50 +0200 Subject: [PATCH 302/581] context propagation: pkg/database/flush (#3235) --- cmd/crowdsec-cli/clialert/alerts.go | 6 ++-- pkg/apiserver/apiserver.go | 2 +- pkg/database/flush.go | 43 +++++++++++++++-------------- 3 files changed, 27 insertions(+), 24 deletions(-) diff --git a/cmd/crowdsec-cli/clialert/alerts.go b/cmd/crowdsec-cli/clialert/alerts.go index 0965e1e13d0..dbb7ca14db5 100644 --- a/cmd/crowdsec-cli/clialert/alerts.go +++ b/cmd/crowdsec-cli/clialert/alerts.go @@ -575,15 +575,17 @@ func (cli *cliAlerts) newFlushCmd() *cobra.Command { DisableAutoGenTag: true, RunE: func(cmd *cobra.Command, _ []string) error { cfg := cli.cfg() + ctx := cmd.Context() + if err := require.LAPI(cfg); err != nil { return err } - db, err := require.DBClient(cmd.Context(), cfg.DbConfig) + db, err := require.DBClient(ctx, cfg.DbConfig) if err != nil { return err } log.Info("Flushing alerts. !! This may take a long time !!") - err = db.FlushAlerts(maxAge, maxItems) + err = db.FlushAlerts(ctx, maxAge, maxItems) if err != nil { return fmt.Errorf("unable to flush alerts: %w", err) } diff --git a/pkg/apiserver/apiserver.go b/pkg/apiserver/apiserver.go index 8bf406e0a79..95d18ccb028 100644 --- a/pkg/apiserver/apiserver.go +++ b/pkg/apiserver/apiserver.go @@ -170,7 +170,7 @@ func NewServer(config *csconfig.LocalApiServerCfg) (*APIServer, error) { } if config.DbConfig.Flush != nil { - flushScheduler, err = dbClient.StartFlushScheduler(config.DbConfig.Flush) + flushScheduler, err = dbClient.StartFlushScheduler(ctx, config.DbConfig.Flush) if err != nil { return nil, err } diff --git a/pkg/database/flush.go b/pkg/database/flush.go index 5d53d10c942..46c8edfa308 100644 --- a/pkg/database/flush.go +++ b/pkg/database/flush.go @@ -1,6 +1,7 @@ package database import ( + "context" "errors" "fmt" "time" @@ -26,7 +27,7 @@ const ( flushInterval = 1 * time.Minute ) -func (c *Client) StartFlushScheduler(config *csconfig.FlushDBCfg) (*gocron.Scheduler, error) { +func (c *Client) StartFlushScheduler(ctx context.Context, config *csconfig.FlushDBCfg) (*gocron.Scheduler, error) { maxItems := 0 maxAge := "" @@ -45,7 +46,7 @@ func (c *Client) StartFlushScheduler(config *csconfig.FlushDBCfg) (*gocron.Sched // Init & Start cronjob every minute for alerts scheduler := gocron.NewScheduler(time.UTC) - job, err := scheduler.Every(1).Minute().Do(c.FlushAlerts, maxAge, maxItems) + job, err := scheduler.Every(1).Minute().Do(c.FlushAlerts, ctx, maxAge, maxItems) if err != nil { return nil, fmt.Errorf("while starting FlushAlerts scheduler: %w", err) } @@ -100,14 +101,14 @@ func (c *Client) StartFlushScheduler(config *csconfig.FlushDBCfg) (*gocron.Sched } } - baJob, err := scheduler.Every(flushInterval).Do(c.FlushAgentsAndBouncers, config.AgentsGC, config.BouncersGC) + baJob, err := scheduler.Every(flushInterval).Do(c.FlushAgentsAndBouncers, ctx, config.AgentsGC, config.BouncersGC) if err != nil { return nil, fmt.Errorf("while starting FlushAgentsAndBouncers scheduler: %w", err) } baJob.SingletonMode() - metricsJob, err := scheduler.Every(flushInterval).Do(c.flushMetrics, config.MetricsMaxAge) + metricsJob, err := scheduler.Every(flushInterval).Do(c.flushMetrics, ctx, config.MetricsMaxAge) if err != nil { return nil, fmt.Errorf("while starting flushMetrics scheduler: %w", err) } @@ -120,7 +121,7 @@ func (c *Client) StartFlushScheduler(config *csconfig.FlushDBCfg) (*gocron.Sched } // flushMetrics deletes metrics older than maxAge, regardless if they have been pushed to CAPI or not -func (c *Client) flushMetrics(maxAge *time.Duration) { +func (c *Client) flushMetrics(ctx context.Context, maxAge *time.Duration) { if maxAge == nil { maxAge = ptr.Of(defaultMetricsMaxAge) } @@ -129,7 +130,7 @@ func (c *Client) flushMetrics(maxAge *time.Duration) { deleted, err := c.Ent.Metric.Delete().Where( metric.ReceivedAtLTE(time.Now().UTC().Add(-*maxAge)), - ).Exec(c.CTX) + ).Exec(ctx) if err != nil { c.Log.Errorf("while flushing metrics: %s", err) return @@ -140,10 +141,10 @@ func (c *Client) flushMetrics(maxAge *time.Duration) { } } -func (c *Client) FlushOrphans() { +func (c *Client) FlushOrphans(ctx context.Context) { /* While it has only been linked to some very corner-case bug : https://github.com/crowdsecurity/crowdsec/issues/778 */ /* We want to take care of orphaned events for which the parent alert/decision has been deleted */ - eventsCount, err := c.Ent.Event.Delete().Where(event.Not(event.HasOwner())).Exec(c.CTX) + eventsCount, err := c.Ent.Event.Delete().Where(event.Not(event.HasOwner())).Exec(ctx) if err != nil { c.Log.Warningf("error while deleting orphan events: %s", err) return @@ -154,7 +155,7 @@ func (c *Client) FlushOrphans() { } eventsCount, err = c.Ent.Decision.Delete().Where( - decision.Not(decision.HasOwner())).Where(decision.UntilLTE(time.Now().UTC())).Exec(c.CTX) + decision.Not(decision.HasOwner())).Where(decision.UntilLTE(time.Now().UTC())).Exec(ctx) if err != nil { c.Log.Warningf("error while deleting orphan decisions: %s", err) return @@ -165,7 +166,7 @@ func (c *Client) FlushOrphans() { } } -func (c *Client) flushBouncers(authType string, duration *time.Duration) { +func (c *Client) flushBouncers(ctx context.Context, authType string, duration *time.Duration) { if duration == nil { return } @@ -174,7 +175,7 @@ func (c *Client) flushBouncers(authType string, duration *time.Duration) { bouncer.LastPullLTE(time.Now().UTC().Add(-*duration)), ).Where( bouncer.AuthTypeEQ(authType), - ).Exec(c.CTX) + ).Exec(ctx) if err != nil { c.Log.Errorf("while auto-deleting expired bouncers (%s): %s", authType, err) return @@ -185,7 +186,7 @@ func (c *Client) flushBouncers(authType string, duration *time.Duration) { } } -func (c *Client) flushAgents(authType string, duration *time.Duration) { +func (c *Client) flushAgents(ctx context.Context, authType string, duration *time.Duration) { if duration == nil { return } @@ -194,7 +195,7 @@ func (c *Client) flushAgents(authType string, duration *time.Duration) { machine.LastHeartbeatLTE(time.Now().UTC().Add(-*duration)), machine.Not(machine.HasAlerts()), machine.AuthTypeEQ(authType), - ).Exec(c.CTX) + ).Exec(ctx) if err != nil { c.Log.Errorf("while auto-deleting expired machines (%s): %s", authType, err) return @@ -205,23 +206,23 @@ func (c *Client) flushAgents(authType string, duration *time.Duration) { } } -func (c *Client) FlushAgentsAndBouncers(agentsCfg *csconfig.AuthGCCfg, bouncersCfg *csconfig.AuthGCCfg) error { +func (c *Client) FlushAgentsAndBouncers(ctx context.Context, agentsCfg *csconfig.AuthGCCfg, bouncersCfg *csconfig.AuthGCCfg) error { log.Debug("starting FlushAgentsAndBouncers") if agentsCfg != nil { - c.flushAgents(types.TlsAuthType, agentsCfg.CertDuration) - c.flushAgents(types.PasswordAuthType, agentsCfg.LoginPasswordDuration) + c.flushAgents(ctx, types.TlsAuthType, agentsCfg.CertDuration) + c.flushAgents(ctx, types.PasswordAuthType, agentsCfg.LoginPasswordDuration) } if bouncersCfg != nil { - c.flushBouncers(types.TlsAuthType, bouncersCfg.CertDuration) - c.flushBouncers(types.ApiKeyAuthType, bouncersCfg.ApiDuration) + c.flushBouncers(ctx, types.TlsAuthType, bouncersCfg.CertDuration) + c.flushBouncers(ctx, types.ApiKeyAuthType, bouncersCfg.ApiDuration) } return nil } -func (c *Client) FlushAlerts(MaxAge string, MaxItems int) error { +func (c *Client) FlushAlerts(ctx context.Context, MaxAge string, MaxItems int) error { var ( deletedByAge int deletedByNbItem int @@ -235,7 +236,7 @@ func (c *Client) FlushAlerts(MaxAge string, MaxItems int) error { } c.Log.Debug("Flushing orphan alerts") - c.FlushOrphans() + c.FlushOrphans(ctx) c.Log.Debug("Done flushing orphan alerts") totalAlerts, err = c.TotalAlerts() @@ -287,7 +288,7 @@ func (c *Client) FlushAlerts(MaxAge string, MaxItems int) error { if maxid > 0 { // This may lead to orphan alerts (at least on MySQL), but the next time the flush job will run, they will be deleted - deletedByNbItem, err = c.Ent.Alert.Delete().Where(alert.IDLT(maxid)).Exec(c.CTX) + deletedByNbItem, err = c.Ent.Alert.Delete().Where(alert.IDLT(maxid)).Exec(ctx) if err != nil { c.Log.Errorf("FlushAlerts: Could not delete alerts: %s", err) return fmt.Errorf("could not delete alerts: %w", err) From b4a2403fdfab37560a15fae9d6208f67b1f22b5e Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Thu, 19 Sep 2024 11:14:33 +0200 Subject: [PATCH 303/581] context propagation: bouncer list (#3236) --- cmd/crowdsec-cli/clibouncer/bouncers.go | 14 ++++++++------ cmd/crowdsec-cli/clisupport/support.go | 6 +++--- pkg/apiserver/apic_metrics.go | 16 ++++++++++------ pkg/apiserver/apic_test.go | 8 +++++--- pkg/database/bouncers.go | 4 ++-- 5 files changed, 28 insertions(+), 20 deletions(-) diff --git a/cmd/crowdsec-cli/clibouncer/bouncers.go b/cmd/crowdsec-cli/clibouncer/bouncers.go index 0c0fc8851c9..89e91b63911 100644 --- a/cmd/crowdsec-cli/clibouncer/bouncers.go +++ b/cmd/crowdsec-cli/clibouncer/bouncers.go @@ -1,6 +1,7 @@ package clibouncer import ( + "context" "encoding/csv" "encoding/json" "errors" @@ -159,11 +160,11 @@ func (cli *cliBouncers) listCSV(out io.Writer, bouncers ent.Bouncers) error { return nil } -func (cli *cliBouncers) List(out io.Writer, db *database.Client) error { +func (cli *cliBouncers) List(ctx context.Context, out io.Writer, db *database.Client) error { // XXX: must use the provided db object, the one in the struct might be nil // (calling List directly skips the PersistentPreRunE) - bouncers, err := db.ListBouncers() + bouncers, err := db.ListBouncers(ctx) if err != nil { return fmt.Errorf("unable to list bouncers: %w", err) } @@ -199,8 +200,8 @@ func (cli *cliBouncers) newListCmd() *cobra.Command { Example: `cscli bouncers list`, Args: cobra.ExactArgs(0), DisableAutoGenTag: true, - RunE: func(_ *cobra.Command, _ []string) error { - return cli.List(color.Output, cli.db) + RunE: func(cmd *cobra.Command, _ []string) error { + return cli.List(cmd.Context(), color.Output, cli.db) }, } @@ -271,6 +272,7 @@ func (cli *cliBouncers) validBouncerID(cmd *cobra.Command, args []string, toComp var err error cfg := cli.cfg() + ctx := cmd.Context() // need to load config and db because PersistentPreRunE is not called for completions @@ -279,13 +281,13 @@ func (cli *cliBouncers) validBouncerID(cmd *cobra.Command, args []string, toComp return nil, cobra.ShellCompDirectiveNoFileComp } - cli.db, err = require.DBClient(cmd.Context(), cfg.DbConfig) + cli.db, err = require.DBClient(ctx, cfg.DbConfig) if err != nil { cobra.CompError("unable to list bouncers " + err.Error()) return nil, cobra.ShellCompDirectiveNoFileComp } - bouncers, err := cli.db.ListBouncers() + bouncers, err := cli.db.ListBouncers(ctx) if err != nil { cobra.CompError("unable to list bouncers " + err.Error()) return nil, cobra.ShellCompDirectiveNoFileComp diff --git a/cmd/crowdsec-cli/clisupport/support.go b/cmd/crowdsec-cli/clisupport/support.go index e9837b03fe7..7e41518805a 100644 --- a/cmd/crowdsec-cli/clisupport/support.go +++ b/cmd/crowdsec-cli/clisupport/support.go @@ -189,7 +189,7 @@ func (cli *cliSupport) dumpHubItems(zw *zip.Writer, hub *cwhub.Hub) error { return nil } -func (cli *cliSupport) dumpBouncers(zw *zip.Writer, db *database.Client) error { +func (cli *cliSupport) dumpBouncers(ctx context.Context, zw *zip.Writer, db *database.Client) error { log.Info("Collecting bouncers") if db == nil { @@ -199,7 +199,7 @@ func (cli *cliSupport) dumpBouncers(zw *zip.Writer, db *database.Client) error { out := new(bytes.Buffer) cb := clibouncer.New(cli.cfg) - if err := cb.List(out, db); err != nil { + if err := cb.List(ctx, out, db); err != nil { return err } @@ -525,7 +525,7 @@ func (cli *cliSupport) dump(ctx context.Context, outFile string) error { log.Warnf("could not collect hub information: %s", err) } - if err = cli.dumpBouncers(zipWriter, db); err != nil { + if err = cli.dumpBouncers(ctx, zipWriter, db); err != nil { log.Warnf("could not collect bouncers information: %s", err) } diff --git a/pkg/apiserver/apic_metrics.go b/pkg/apiserver/apic_metrics.go index 5c6a550a6a0..91a0a8273f7 100644 --- a/pkg/apiserver/apic_metrics.go +++ b/pkg/apiserver/apic_metrics.go @@ -23,7 +23,7 @@ type dbPayload struct { Metrics []*models.DetailedMetrics `json:"metrics"` } -func (a *apic) GetUsageMetrics() (*models.AllMetrics, []int, error) { +func (a *apic) GetUsageMetrics(ctx context.Context) (*models.AllMetrics, []int, error) { allMetrics := &models.AllMetrics{} metricsIds := make([]int, 0) @@ -32,7 +32,7 @@ func (a *apic) GetUsageMetrics() (*models.AllMetrics, []int, error) { return nil, nil, err } - bouncers, err := a.dbClient.ListBouncers() + bouncers, err := a.dbClient.ListBouncers(ctx) if err != nil { return nil, nil, err } @@ -185,7 +185,7 @@ func (a *apic) MarkUsageMetricsAsSent(ids []int) error { return a.dbClient.MarkUsageMetricsAsSent(ids) } -func (a *apic) GetMetrics() (*models.Metrics, error) { +func (a *apic) GetMetrics(ctx context.Context) (*models.Metrics, error) { machines, err := a.dbClient.ListMachines() if err != nil { return nil, err @@ -202,7 +202,7 @@ func (a *apic) GetMetrics() (*models.Metrics, error) { } } - bouncers, err := a.dbClient.ListBouncers() + bouncers, err := a.dbClient.ListBouncers(ctx) if err != nil { return nil, err } @@ -254,6 +254,8 @@ func (a *apic) fetchMachineIDs() ([]string, error) { func (a *apic) SendMetrics(stop chan (bool)) { defer trace.CatchPanic("lapi/metricsToAPIC") + ctx := context.TODO() + // verify the list of machines every interval const checkInt = 20 * time.Second @@ -311,7 +313,7 @@ func (a *apic) SendMetrics(stop chan (bool)) { case <-metTicker.C: metTicker.Stop() - metrics, err := a.GetMetrics() + metrics, err := a.GetMetrics(ctx) if err != nil { log.Errorf("unable to get metrics (%s)", err) } @@ -340,6 +342,8 @@ func (a *apic) SendMetrics(stop chan (bool)) { func (a *apic) SendUsageMetrics() { defer trace.CatchPanic("lapi/usageMetricsToAPIC") + ctx := context.TODO() + firstRun := true log.Debugf("Start sending usage metrics to CrowdSec Central API (interval: %s once, then %s)", a.usageMetricsIntervalFirst, a.usageMetricsInterval) @@ -358,7 +362,7 @@ func (a *apic) SendUsageMetrics() { ticker.Reset(a.usageMetricsInterval) } - metrics, metricsId, err := a.GetUsageMetrics() + metrics, metricsId, err := a.GetUsageMetrics(ctx) if err != nil { log.Errorf("unable to get usage metrics: %s", err) continue diff --git a/pkg/apiserver/apic_test.go b/pkg/apiserver/apic_test.go index 105d295dd0d..182bf18532f 100644 --- a/pkg/apiserver/apic_test.go +++ b/pkg/apiserver/apic_test.go @@ -292,9 +292,11 @@ func TestAPICHandleDeletedDecisions(t *testing.T) { } func TestAPICGetMetrics(t *testing.T) { + ctx := context.Background() + cleanUp := func(api *apic) { - api.dbClient.Ent.Bouncer.Delete().ExecX(context.Background()) - api.dbClient.Ent.Machine.Delete().ExecX(context.Background()) + api.dbClient.Ent.Bouncer.Delete().ExecX(ctx) + api.dbClient.Ent.Machine.Delete().ExecX(ctx) } tests := []struct { name string @@ -377,7 +379,7 @@ func TestAPICGetMetrics(t *testing.T) { ExecX(context.Background()) } - foundMetrics, err := apiClient.GetMetrics() + foundMetrics, err := apiClient.GetMetrics(ctx) require.NoError(t, err) assert.Equal(t, tc.expectedMetric.Bouncers, foundMetrics.Bouncers) diff --git a/pkg/database/bouncers.go b/pkg/database/bouncers.go index a7378bbb203..6ff308ff786 100644 --- a/pkg/database/bouncers.go +++ b/pkg/database/bouncers.go @@ -59,8 +59,8 @@ func (c *Client) SelectBouncerByName(bouncerName string) (*ent.Bouncer, error) { return result, nil } -func (c *Client) ListBouncers() ([]*ent.Bouncer, error) { - result, err := c.Ent.Bouncer.Query().All(c.CTX) +func (c *Client) ListBouncers(ctx context.Context) ([]*ent.Bouncer, error) { + result, err := c.Ent.Bouncer.Query().All(ctx) if err != nil { return nil, errors.Wrapf(QueryFail, "listing bouncers: %s", err) } From eeb28014c6860a0f50e87ef1488fb641d09edbb9 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Thu, 19 Sep 2024 14:09:35 +0200 Subject: [PATCH 304/581] context propagation: pkg/database/config (#3246) --- cmd/crowdsec-cli/clipapi/papi.go | 2 +- pkg/apiserver/apic.go | 28 ++++++++++++++-------------- pkg/apiserver/apic_test.go | 24 +++++++++++++++--------- pkg/apiserver/apiserver.go | 18 ++++++++++-------- pkg/apiserver/papi.go | 8 ++++---- pkg/apiserver/papi_cmd.go | 7 +++++-- pkg/database/config.go | 17 ++++++++++------- 7 files changed, 59 insertions(+), 45 deletions(-) diff --git a/cmd/crowdsec-cli/clipapi/papi.go b/cmd/crowdsec-cli/clipapi/papi.go index c0f08157f31..b8101a0fb34 100644 --- a/cmd/crowdsec-cli/clipapi/papi.go +++ b/cmd/crowdsec-cli/clipapi/papi.go @@ -74,7 +74,7 @@ func (cli *cliPapi) Status(ctx context.Context, out io.Writer, db *database.Clie return fmt.Errorf("unable to get PAPI permissions: %w", err) } - lastTimestampStr, err := db.GetConfigItem(apiserver.PapiPullKey) + lastTimestampStr, err := db.GetConfigItem(ctx, apiserver.PapiPullKey) if err != nil { lastTimestampStr = ptr.Of("never") } diff --git a/pkg/apiserver/apic.go b/pkg/apiserver/apic.go index 3ed2e12ea54..b5384c6cc5c 100644 --- a/pkg/apiserver/apic.go +++ b/pkg/apiserver/apic.go @@ -614,7 +614,7 @@ func fillAlertsWithDecisions(alerts []*models.Alert, decisions []*models.Decisio // we receive a list of decisions and links for blocklist and we need to create a list of alerts : // one alert for "community blocklist" // one alert per list we're subscribed to -func (a *apic) PullTop(forcePull bool) error { +func (a *apic) PullTop(ctx context.Context, forcePull bool) error { var err error // A mutex with TryLock would be a bit simpler @@ -655,7 +655,7 @@ func (a *apic) PullTop(forcePull bool) error { log.Infof("Starting community-blocklist update") - data, _, err := a.apiClient.Decisions.GetStreamV3(context.Background(), apiclient.DecisionsStreamOpts{Startup: a.startup}) + data, _, err := a.apiClient.Decisions.GetStreamV3(ctx, apiclient.DecisionsStreamOpts{Startup: a.startup}) if err != nil { return fmt.Errorf("get stream: %w", err) } @@ -700,7 +700,7 @@ func (a *apic) PullTop(forcePull bool) error { } // update blocklists - if err := a.UpdateBlocklists(data.Links, addCounters, forcePull); err != nil { + if err := a.UpdateBlocklists(ctx, data.Links, addCounters, forcePull); err != nil { return fmt.Errorf("while updating blocklists: %w", err) } @@ -708,9 +708,9 @@ func (a *apic) PullTop(forcePull bool) error { } // we receive a link to a blocklist, we pull the content of the blocklist and we create one alert -func (a *apic) PullBlocklist(blocklist *modelscapi.BlocklistLink, forcePull bool) error { +func (a *apic) PullBlocklist(ctx context.Context, blocklist *modelscapi.BlocklistLink, forcePull bool) error { addCounters, _ := makeAddAndDeleteCounters() - if err := a.UpdateBlocklists(&modelscapi.GetDecisionsStreamResponseLinks{ + if err := a.UpdateBlocklists(ctx, &modelscapi.GetDecisionsStreamResponseLinks{ Blocklists: []*modelscapi.BlocklistLink{blocklist}, }, addCounters, forcePull); err != nil { return fmt.Errorf("while pulling blocklist: %w", err) @@ -820,7 +820,7 @@ func (a *apic) ShouldForcePullBlocklist(blocklist *modelscapi.BlocklistLink) (bo return false, nil } -func (a *apic) updateBlocklist(client *apiclient.ApiClient, blocklist *modelscapi.BlocklistLink, addCounters map[string]map[string]int, forcePull bool) error { +func (a *apic) updateBlocklist(ctx context.Context, client *apiclient.ApiClient, blocklist *modelscapi.BlocklistLink, addCounters map[string]map[string]int, forcePull bool) error { if blocklist.Scope == nil { log.Warningf("blocklist has no scope") return nil @@ -848,13 +848,13 @@ func (a *apic) updateBlocklist(client *apiclient.ApiClient, blocklist *modelscap ) if !forcePull { - lastPullTimestamp, err = a.dbClient.GetConfigItem(blocklistConfigItemName) + lastPullTimestamp, err = a.dbClient.GetConfigItem(ctx, blocklistConfigItemName) if err != nil { return fmt.Errorf("while getting last pull timestamp for blocklist %s: %w", *blocklist.Name, err) } } - decisions, hasChanged, err := client.Decisions.GetDecisionsFromBlocklist(context.Background(), blocklist, lastPullTimestamp) + decisions, hasChanged, err := client.Decisions.GetDecisionsFromBlocklist(ctx, blocklist, lastPullTimestamp) if err != nil { return fmt.Errorf("while getting decisions from blocklist %s: %w", *blocklist.Name, err) } @@ -869,7 +869,7 @@ func (a *apic) updateBlocklist(client *apiclient.ApiClient, blocklist *modelscap return nil } - err = a.dbClient.SetConfigItem(blocklistConfigItemName, time.Now().UTC().Format(http.TimeFormat)) + err = a.dbClient.SetConfigItem(ctx, blocklistConfigItemName, time.Now().UTC().Format(http.TimeFormat)) if err != nil { return fmt.Errorf("while setting last pull timestamp for blocklist %s: %w", *blocklist.Name, err) } @@ -892,7 +892,7 @@ func (a *apic) updateBlocklist(client *apiclient.ApiClient, blocklist *modelscap return nil } -func (a *apic) UpdateBlocklists(links *modelscapi.GetDecisionsStreamResponseLinks, addCounters map[string]map[string]int, forcePull bool) error { +func (a *apic) UpdateBlocklists(ctx context.Context, links *modelscapi.GetDecisionsStreamResponseLinks, addCounters map[string]map[string]int, forcePull bool) error { if links == nil { return nil } @@ -908,7 +908,7 @@ func (a *apic) UpdateBlocklists(links *modelscapi.GetDecisionsStreamResponseLink } for _, blocklist := range links.Blocklists { - if err := a.updateBlocklist(defaultClient, blocklist, addCounters, forcePull); err != nil { + if err := a.updateBlocklist(ctx, defaultClient, blocklist, addCounters, forcePull); err != nil { return err } } @@ -931,7 +931,7 @@ func setAlertScenario(alert *models.Alert, addCounters map[string]map[string]int } } -func (a *apic) Pull() error { +func (a *apic) Pull(ctx context.Context) error { defer trace.CatchPanic("lapi/pullFromAPIC") toldOnce := false @@ -955,7 +955,7 @@ func (a *apic) Pull() error { time.Sleep(1 * time.Second) } - if err := a.PullTop(false); err != nil { + if err := a.PullTop(ctx, false); err != nil { log.Errorf("capi pull top: %s", err) } @@ -967,7 +967,7 @@ func (a *apic) Pull() error { case <-ticker.C: ticker.Reset(a.pullInterval) - if err := a.PullTop(false); err != nil { + if err := a.PullTop(ctx, false); err != nil { log.Errorf("capi pull top: %s", err) continue } diff --git a/pkg/apiserver/apic_test.go b/pkg/apiserver/apic_test.go index 182bf18532f..97943b495e5 100644 --- a/pkg/apiserver/apic_test.go +++ b/pkg/apiserver/apic_test.go @@ -550,6 +550,7 @@ func TestFillAlertsWithDecisions(t *testing.T) { } func TestAPICWhitelists(t *testing.T) { + ctx := context.Background() api := getAPIC(t) // one whitelist on IP, one on CIDR api.whitelists = &csconfig.CapiWhitelist{} @@ -685,7 +686,7 @@ func TestAPICWhitelists(t *testing.T) { require.NoError(t, err) api.apiClient = apic - err = api.PullTop(false) + err = api.PullTop(ctx, false) require.NoError(t, err) assertTotalDecisionCount(t, api.dbClient, 5) // 2 from FIRE + 2 from bl + 1 existing @@ -736,6 +737,7 @@ func TestAPICWhitelists(t *testing.T) { } func TestAPICPullTop(t *testing.T) { + ctx := context.Background() api := getAPIC(t) api.dbClient.Ent.Decision.Create(). SetOrigin(types.CAPIOrigin). @@ -826,7 +828,7 @@ func TestAPICPullTop(t *testing.T) { require.NoError(t, err) api.apiClient = apic - err = api.PullTop(false) + err = api.PullTop(ctx, false) require.NoError(t, err) assertTotalDecisionCount(t, api.dbClient, 5) @@ -860,6 +862,7 @@ func TestAPICPullTop(t *testing.T) { } func TestAPICPullTopBLCacheFirstCall(t *testing.T) { + ctx := context.Background() // no decision in db, no last modified parameter. api := getAPIC(t) @@ -913,11 +916,11 @@ func TestAPICPullTopBLCacheFirstCall(t *testing.T) { require.NoError(t, err) api.apiClient = apic - err = api.PullTop(false) + err = api.PullTop(ctx, false) require.NoError(t, err) blocklistConfigItemName := "blocklist:blocklist1:last_pull" - lastPullTimestamp, err := api.dbClient.GetConfigItem(blocklistConfigItemName) + lastPullTimestamp, err := api.dbClient.GetConfigItem(ctx, blocklistConfigItemName) require.NoError(t, err) assert.NotEqual(t, "", *lastPullTimestamp) @@ -927,14 +930,15 @@ func TestAPICPullTopBLCacheFirstCall(t *testing.T) { return httpmock.NewStringResponse(304, ""), nil }) - err = api.PullTop(false) + err = api.PullTop(ctx, false) require.NoError(t, err) - secondLastPullTimestamp, err := api.dbClient.GetConfigItem(blocklistConfigItemName) + secondLastPullTimestamp, err := api.dbClient.GetConfigItem(ctx, blocklistConfigItemName) require.NoError(t, err) assert.Equal(t, *lastPullTimestamp, *secondLastPullTimestamp) } func TestAPICPullTopBLCacheForceCall(t *testing.T) { + ctx := context.Background() api := getAPIC(t) httpmock.Activate() @@ -1005,11 +1009,12 @@ func TestAPICPullTopBLCacheForceCall(t *testing.T) { require.NoError(t, err) api.apiClient = apic - err = api.PullTop(false) + err = api.PullTop(ctx, false) require.NoError(t, err) } func TestAPICPullBlocklistCall(t *testing.T) { + ctx := context.Background() api := getAPIC(t) httpmock.Activate() @@ -1032,7 +1037,7 @@ func TestAPICPullBlocklistCall(t *testing.T) { require.NoError(t, err) api.apiClient = apic - err = api.PullBlocklist(&modelscapi.BlocklistLink{ + err = api.PullBlocklist(ctx, &modelscapi.BlocklistLink{ URL: ptr.Of("http://api.crowdsec.net/blocklist1"), Name: ptr.Of("blocklist1"), Scope: ptr.Of("Ip"), @@ -1134,6 +1139,7 @@ func TestAPICPush(t *testing.T) { } func TestAPICPull(t *testing.T) { + ctx := context.Background() api := getAPIC(t) tests := []struct { name string @@ -1204,7 +1210,7 @@ func TestAPICPull(t *testing.T) { go func() { logrus.SetOutput(&buf) - if err := api.Pull(); err != nil { + if err := api.Pull(ctx); err != nil { panic(err) } }() diff --git a/pkg/apiserver/apiserver.go b/pkg/apiserver/apiserver.go index 95d18ccb028..6b5d6803be9 100644 --- a/pkg/apiserver/apiserver.go +++ b/pkg/apiserver/apiserver.go @@ -310,8 +310,8 @@ func (s *APIServer) apicPush() error { return nil } -func (s *APIServer) apicPull() error { - if err := s.apic.Pull(); err != nil { +func (s *APIServer) apicPull(ctx context.Context) error { + if err := s.apic.Pull(ctx); err != nil { log.Errorf("capi pull: %s", err) return err } @@ -319,8 +319,8 @@ func (s *APIServer) apicPull() error { return nil } -func (s *APIServer) papiPull() error { - if err := s.papi.Pull(); err != nil { +func (s *APIServer) papiPull(ctx context.Context) error { + if err := s.papi.Pull(ctx); err != nil { log.Errorf("papi pull: %s", err) return err } @@ -337,16 +337,16 @@ func (s *APIServer) papiSync() error { return nil } -func (s *APIServer) initAPIC() { +func (s *APIServer) initAPIC(ctx context.Context) { s.apic.pushTomb.Go(s.apicPush) - s.apic.pullTomb.Go(s.apicPull) + s.apic.pullTomb.Go(func() error { return s.apicPull(ctx) }) // csConfig.API.Server.ConsoleConfig.ShareCustomScenarios if s.apic.apiClient.IsEnrolled() { if s.consoleConfig.IsPAPIEnabled() { if s.papi.URL != "" { log.Info("Starting PAPI decision receiver") - s.papi.pullTomb.Go(s.papiPull) + s.papi.pullTomb.Go(func() error { return s.papiPull(ctx) }) s.papi.syncTomb.Go(s.papiSync) } else { log.Warnf("papi_url is not set in online_api_credentials.yaml, can't synchronize with the console. Run cscli console enable console_management to add it.") @@ -381,8 +381,10 @@ func (s *APIServer) Run(apiReady chan bool) error { TLSConfig: tlsCfg, } + ctx := context.TODO() + if s.apic != nil { - s.initAPIC() + s.initAPIC(ctx) } s.httpServerTomb.Go(func() error { diff --git a/pkg/apiserver/papi.go b/pkg/apiserver/papi.go index 89ad93930a1..7dd6b346aa9 100644 --- a/pkg/apiserver/papi.go +++ b/pkg/apiserver/papi.go @@ -230,13 +230,13 @@ func (p *Papi) PullOnce(since time.Time, sync bool) error { } // PullPAPI is the long polling client for real-time decisions from PAPI -func (p *Papi) Pull() error { +func (p *Papi) Pull(ctx context.Context) error { defer trace.CatchPanic("lapi/PullPAPI") p.Logger.Infof("Starting Polling API Pull") lastTimestamp := time.Time{} - lastTimestampStr, err := p.DBClient.GetConfigItem(PapiPullKey) + lastTimestampStr, err := p.DBClient.GetConfigItem(ctx, PapiPullKey) if err != nil { p.Logger.Warningf("failed to get last timestamp for papi pull: %s", err) } @@ -248,7 +248,7 @@ func (p *Papi) Pull() error { return fmt.Errorf("failed to serialize last timestamp: %w", err) } - if err := p.DBClient.SetConfigItem(PapiPullKey, string(binTime)); err != nil { + if err := p.DBClient.SetConfigItem(ctx, PapiPullKey, string(binTime)); err != nil { p.Logger.Errorf("error setting papi pull last key: %s", err) } else { p.Logger.Debugf("config item '%s' set in database with value '%s'", PapiPullKey, string(binTime)) @@ -277,7 +277,7 @@ func (p *Papi) Pull() error { continue } - if err := p.DBClient.SetConfigItem(PapiPullKey, string(binTime)); err != nil { + if err := p.DBClient.SetConfigItem(ctx, PapiPullKey, string(binTime)); err != nil { return fmt.Errorf("failed to update last timestamp: %w", err) } diff --git a/pkg/apiserver/papi_cmd.go b/pkg/apiserver/papi_cmd.go index a1137161698..943eb4139de 100644 --- a/pkg/apiserver/papi_cmd.go +++ b/pkg/apiserver/papi_cmd.go @@ -1,6 +1,7 @@ package apiserver import ( + "context" "encoding/json" "fmt" "time" @@ -215,17 +216,19 @@ func ManagementCmd(message *Message, p *Papi, sync bool) error { return fmt.Errorf("message for '%s' contains bad data format: %w", message.Header.OperationType, err) } + ctx := context.TODO() + if forcePullMsg.Blocklist == nil { p.Logger.Infof("Received force_pull command from PAPI, pulling community and 3rd-party blocklists") - err = p.apic.PullTop(true) + err = p.apic.PullTop(ctx, true) if err != nil { return fmt.Errorf("failed to force pull operation: %w", err) } } else { p.Logger.Infof("Received force_pull command from PAPI, pulling blocklist %s", forcePullMsg.Blocklist.Name) - err = p.apic.PullBlocklist(&modelscapi.BlocklistLink{ + err = p.apic.PullBlocklist(ctx, &modelscapi.BlocklistLink{ Name: &forcePullMsg.Blocklist.Name, URL: &forcePullMsg.Blocklist.Url, Remediation: &forcePullMsg.Blocklist.Remediation, diff --git a/pkg/database/config.go b/pkg/database/config.go index 8c3578ad596..89ccb1e1b28 100644 --- a/pkg/database/config.go +++ b/pkg/database/config.go @@ -1,17 +1,20 @@ package database import ( + "context" + "github.com/pkg/errors" "github.com/crowdsecurity/crowdsec/pkg/database/ent" "github.com/crowdsecurity/crowdsec/pkg/database/ent/configitem" ) -func (c *Client) GetConfigItem(key string) (*string, error) { - result, err := c.Ent.ConfigItem.Query().Where(configitem.NameEQ(key)).First(c.CTX) +func (c *Client) GetConfigItem(ctx context.Context, key string) (*string, error) { + result, err := c.Ent.ConfigItem.Query().Where(configitem.NameEQ(key)).First(ctx) if err != nil && ent.IsNotFound(err) { return nil, nil } + if err != nil { return nil, errors.Wrapf(QueryFail, "select config item: %s", err) } @@ -19,16 +22,16 @@ func (c *Client) GetConfigItem(key string) (*string, error) { return &result.Value, nil } -func (c *Client) SetConfigItem(key string, value string) error { - - nbUpdated, err := c.Ent.ConfigItem.Update().SetValue(value).Where(configitem.NameEQ(key)).Save(c.CTX) - if (err != nil && ent.IsNotFound(err)) || nbUpdated == 0 { //not found, create - err := c.Ent.ConfigItem.Create().SetName(key).SetValue(value).Exec(c.CTX) +func (c *Client) SetConfigItem(ctx context.Context, key string, value string) error { + nbUpdated, err := c.Ent.ConfigItem.Update().SetValue(value).Where(configitem.NameEQ(key)).Save(ctx) + if (err != nil && ent.IsNotFound(err)) || nbUpdated == 0 { // not found, create + err := c.Ent.ConfigItem.Create().SetName(key).SetValue(value).Exec(ctx) if err != nil { return errors.Wrapf(QueryFail, "insert config item: %s", err) } } else if err != nil { return errors.Wrapf(QueryFail, "update config item: %s", err) } + return nil } From 29f3d2710d1f75662624a30027c939036f6133b6 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Thu, 19 Sep 2024 14:27:34 +0200 Subject: [PATCH 305/581] context propagation: pkg/database/metrics (#3247) --- pkg/apiserver/apic_metrics.go | 10 +++++----- pkg/apiserver/usage_metrics_test.go | 8 ++++++-- pkg/database/metrics.go | 12 ++++++------ 3 files changed, 17 insertions(+), 13 deletions(-) diff --git a/pkg/apiserver/apic_metrics.go b/pkg/apiserver/apic_metrics.go index 91a0a8273f7..e5821e4c1e2 100644 --- a/pkg/apiserver/apic_metrics.go +++ b/pkg/apiserver/apic_metrics.go @@ -38,7 +38,7 @@ func (a *apic) GetUsageMetrics(ctx context.Context) (*models.AllMetrics, []int, } for _, bouncer := range bouncers { - dbMetrics, err := a.dbClient.GetBouncerUsageMetricsByName(bouncer.Name) + dbMetrics, err := a.dbClient.GetBouncerUsageMetricsByName(ctx, bouncer.Name) if err != nil { log.Errorf("unable to get bouncer usage metrics: %s", err) continue @@ -81,7 +81,7 @@ func (a *apic) GetUsageMetrics(ctx context.Context) (*models.AllMetrics, []int, } for _, lp := range lps { - dbMetrics, err := a.dbClient.GetLPUsageMetricsByMachineID(lp.MachineId) + dbMetrics, err := a.dbClient.GetLPUsageMetricsByMachineID(ctx, lp.MachineId) if err != nil { log.Errorf("unable to get LP usage metrics: %s", err) continue @@ -181,8 +181,8 @@ func (a *apic) GetUsageMetrics(ctx context.Context) (*models.AllMetrics, []int, return allMetrics, metricsIds, nil } -func (a *apic) MarkUsageMetricsAsSent(ids []int) error { - return a.dbClient.MarkUsageMetricsAsSent(ids) +func (a *apic) MarkUsageMetricsAsSent(ctx context.Context, ids []int) error { + return a.dbClient.MarkUsageMetricsAsSent(ctx, ids) } func (a *apic) GetMetrics(ctx context.Context) (*models.Metrics, error) { @@ -379,7 +379,7 @@ func (a *apic) SendUsageMetrics() { } } - err = a.MarkUsageMetricsAsSent(metricsId) + err = a.MarkUsageMetricsAsSent(ctx, metricsId) if err != nil { log.Errorf("unable to mark usage metrics as sent: %s", err) continue diff --git a/pkg/apiserver/usage_metrics_test.go b/pkg/apiserver/usage_metrics_test.go index 41dd0ccdc2c..019de5fb970 100644 --- a/pkg/apiserver/usage_metrics_test.go +++ b/pkg/apiserver/usage_metrics_test.go @@ -13,6 +13,8 @@ import ( ) func TestLPMetrics(t *testing.T) { + ctx := context.Background() + tests := []struct { name string body string @@ -198,7 +200,7 @@ func TestLPMetrics(t *testing.T) { assert.Contains(t, w.Body.String(), tt.expectedResponse) machine, _ := dbClient.QueryMachineByID("test") - metrics, _ := dbClient.GetLPUsageMetricsByMachineID("test") + metrics, _ := dbClient.GetLPUsageMetricsByMachineID(ctx, "test") assert.Len(t, metrics, tt.expectedMetricsCount) assert.Equal(t, tt.expectedOSName, machine.Osname) @@ -214,6 +216,8 @@ func TestLPMetrics(t *testing.T) { } func TestRCMetrics(t *testing.T) { + ctx := context.Background() + tests := []struct { name string body string @@ -368,7 +372,7 @@ func TestRCMetrics(t *testing.T) { assert.Contains(t, w.Body.String(), tt.expectedResponse) bouncer, _ := dbClient.SelectBouncerByName("test") - metrics, _ := dbClient.GetBouncerUsageMetricsByName("test") + metrics, _ := dbClient.GetBouncerUsageMetricsByName(ctx, "test") assert.Len(t, metrics, tt.expectedMetricsCount) assert.Equal(t, tt.expectedOSName, bouncer.Osname) diff --git a/pkg/database/metrics.go b/pkg/database/metrics.go index 1619fcc923b..99ba90c80b8 100644 --- a/pkg/database/metrics.go +++ b/pkg/database/metrics.go @@ -25,14 +25,14 @@ func (c *Client) CreateMetric(ctx context.Context, generatedType metric.Generate return metric, nil } -func (c *Client) GetLPUsageMetricsByMachineID(machineId string) ([]*ent.Metric, error) { +func (c *Client) GetLPUsageMetricsByMachineID(ctx context.Context, machineId string) ([]*ent.Metric, error) { metrics, err := c.Ent.Metric.Query(). Where( metric.GeneratedTypeEQ(metric.GeneratedTypeLP), metric.GeneratedByEQ(machineId), metric.PushedAtIsNil(), ). - All(c.CTX) + All(ctx) if err != nil { c.Log.Warningf("GetLPUsageMetricsByOrigin: %s", err) return nil, fmt.Errorf("getting LP usage metrics by origin %s: %w", machineId, err) @@ -41,14 +41,14 @@ func (c *Client) GetLPUsageMetricsByMachineID(machineId string) ([]*ent.Metric, return metrics, nil } -func (c *Client) GetBouncerUsageMetricsByName(bouncerName string) ([]*ent.Metric, error) { +func (c *Client) GetBouncerUsageMetricsByName(ctx context.Context, bouncerName string) ([]*ent.Metric, error) { metrics, err := c.Ent.Metric.Query(). Where( metric.GeneratedTypeEQ(metric.GeneratedTypeRC), metric.GeneratedByEQ(bouncerName), metric.PushedAtIsNil(), ). - All(c.CTX) + All(ctx) if err != nil { c.Log.Warningf("GetBouncerUsageMetricsByName: %s", err) return nil, fmt.Errorf("getting bouncer usage metrics by name %s: %w", bouncerName, err) @@ -57,11 +57,11 @@ func (c *Client) GetBouncerUsageMetricsByName(bouncerName string) ([]*ent.Metric return metrics, nil } -func (c *Client) MarkUsageMetricsAsSent(ids []int) error { +func (c *Client) MarkUsageMetricsAsSent(ctx context.Context, ids []int) error { _, err := c.Ent.Metric.Update(). Where(metric.IDIn(ids...)). SetPushedAt(time.Now().UTC()). - Save(c.CTX) + Save(ctx) if err != nil { c.Log.Warningf("MarkUsageMetricsAsSent: %s", err) return fmt.Errorf("marking usage metrics as sent: %w", err) From 00032d40a887e7847c5877d96fb5506408836b9c Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Thu, 19 Sep 2024 16:59:02 +0200 Subject: [PATCH 306/581] lint/nestif: reduce hubtest complexity (#3244) --- .golangci.yml | 2 +- cmd/crowdsec-cli/clihubtest/run.go | 66 +++++++++++++++++++----------- 2 files changed, 43 insertions(+), 25 deletions(-) diff --git a/.golangci.yml b/.golangci.yml index 34837437355..b76e2613be7 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -27,7 +27,7 @@ linters-settings: nestif: # lower this after refactoring - min-complexity: 19 + min-complexity: 16 nlreturn: block-size: 5 diff --git a/cmd/crowdsec-cli/clihubtest/run.go b/cmd/crowdsec-cli/clihubtest/run.go index 57956ce67dc..31cceb81884 100644 --- a/cmd/crowdsec-cli/clihubtest/run.go +++ b/cmd/crowdsec-cli/clihubtest/run.go @@ -56,6 +56,46 @@ func (cli *cliHubTest) run(runAll bool, nucleiTargetHost string, appSecHost stri return nil } +func printParserFailures(test *hubtest.HubTestItem) { + if len(test.ParserAssert.Fails) == 0 { + return + } + + fmt.Println() + log.Errorf("Parser test '%s' failed (%d errors)\n", test.Name, len(test.ParserAssert.Fails)) + + for _, fail := range test.ParserAssert.Fails { + fmt.Printf("(L.%d) %s => %s\n", fail.Line, emoji.RedCircle, fail.Expression) + fmt.Printf(" Actual expression values:\n") + + for key, value := range fail.Debug { + fmt.Printf(" %s = '%s'\n", key, strings.TrimSuffix(value, "\n")) + } + + fmt.Println() + } +} + +func printScenarioFailures(test *hubtest.HubTestItem) { + if len(test.ScenarioAssert.Fails) == 0 { + return + } + + fmt.Println() + log.Errorf("Scenario test '%s' failed (%d errors)\n", test.Name, len(test.ScenarioAssert.Fails)) + + for _, fail := range test.ScenarioAssert.Fails { + fmt.Printf("(L.%d) %s => %s\n", fail.Line, emoji.RedCircle, fail.Expression) + fmt.Printf(" Actual expression values:\n") + + for key, value := range fail.Debug { + fmt.Printf(" %s = '%s'\n", key, strings.TrimSuffix(value, "\n")) + } + + fmt.Println() + } +} + func (cli *cliHubTest) newRunCmd() *cobra.Command { var ( noClean bool @@ -111,30 +151,8 @@ func (cli *cliHubTest) newRunCmd() *cobra.Command { success = false cleanTestEnv := false if cfg.Cscli.Output == "human" { - if len(test.ParserAssert.Fails) > 0 { - fmt.Println() - log.Errorf("Parser test '%s' failed (%d errors)\n", test.Name, len(test.ParserAssert.Fails)) - for _, fail := range test.ParserAssert.Fails { - fmt.Printf("(L.%d) %s => %s\n", fail.Line, emoji.RedCircle, fail.Expression) - fmt.Printf(" Actual expression values:\n") - for key, value := range fail.Debug { - fmt.Printf(" %s = '%s'\n", key, strings.TrimSuffix(value, "\n")) - } - fmt.Println() - } - } - if len(test.ScenarioAssert.Fails) > 0 { - fmt.Println() - log.Errorf("Scenario test '%s' failed (%d errors)\n", test.Name, len(test.ScenarioAssert.Fails)) - for _, fail := range test.ScenarioAssert.Fails { - fmt.Printf("(L.%d) %s => %s\n", fail.Line, emoji.RedCircle, fail.Expression) - fmt.Printf(" Actual expression values:\n") - for key, value := range fail.Debug { - fmt.Printf(" %s = '%s'\n", key, strings.TrimSuffix(value, "\n")) - } - fmt.Println() - } - } + printParserFailures(test) + printScenarioFailures(test) if !forceClean && !noClean { prompt := &survey.Confirm{ Message: fmt.Sprintf("\nDo you want to remove runtime folder for test '%s'? (default: Yes)", test.Name), From e2196bdd66a4ed30b444997d1ad89bed8a601756 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Fri, 20 Sep 2024 10:34:53 +0200 Subject: [PATCH 307/581] remove dependency from pkg/cwversion to pkg/acquisition (#3242) * register built-in components without dependencies * package comment --- Makefile | 3 +- cmd/crowdsec-cli/main.go | 5 +-- cmd/crowdsec-cli/setup.go | 18 ++++++++++ cmd/crowdsec-cli/setup_stub.go | 9 +++++ pkg/acquisition/acquisition.go | 51 +++++++++++++++------------- pkg/cwversion/component/component.go | 34 +++++++++++++++++++ pkg/cwversion/version.go | 22 +++++++----- 7 files changed, 104 insertions(+), 38 deletions(-) create mode 100644 cmd/crowdsec-cli/setup.go create mode 100644 cmd/crowdsec-cli/setup_stub.go create mode 100644 pkg/cwversion/component/component.go diff --git a/Makefile b/Makefile index 6bd3cbb7980..a0b06dc2ea0 100644 --- a/Makefile +++ b/Makefile @@ -138,7 +138,8 @@ COMPONENTS := \ datasource_loki \ datasource_s3 \ datasource_syslog \ - datasource_wineventlog + datasource_wineventlog \ + cscli_setup comma := , space := $(empty) $(empty) diff --git a/cmd/crowdsec-cli/main.go b/cmd/crowdsec-cli/main.go index 01179cf93be..1cca03b1d3d 100644 --- a/cmd/crowdsec-cli/main.go +++ b/cmd/crowdsec-cli/main.go @@ -28,7 +28,6 @@ import ( "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/climetrics" "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/clinotifications" "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/clipapi" - "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/clisetup" "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/clisimulation" "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/clisupport" "github.com/crowdsecurity/crowdsec/pkg/csconfig" @@ -281,9 +280,7 @@ It is meant to allow you to manage bans, parsers/scenarios/etc, api and generall cmd.AddCommand(cliitem.NewAppsecConfig(cli.cfg).NewCommand()) cmd.AddCommand(cliitem.NewAppsecRule(cli.cfg).NewCommand()) - if fflag.CscliSetup.IsEnabled() { - cmd.AddCommand(clisetup.New(cli.cfg).NewCommand()) - } + cli.addSetup(cmd) if len(os.Args) > 1 { cobra.OnInitialize( diff --git a/cmd/crowdsec-cli/setup.go b/cmd/crowdsec-cli/setup.go new file mode 100644 index 00000000000..66c0d71e777 --- /dev/null +++ b/cmd/crowdsec-cli/setup.go @@ -0,0 +1,18 @@ +//go:build !no_cscli_setup +package main + +import ( + "github.com/spf13/cobra" + + "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/clisetup" + "github.com/crowdsecurity/crowdsec/pkg/cwversion/component" + "github.com/crowdsecurity/crowdsec/pkg/fflag" +) + +func (cli *cliRoot) addSetup(cmd *cobra.Command) { + if fflag.CscliSetup.IsEnabled() { + cmd.AddCommand(clisetup.New(cli.cfg).NewCommand()) + } + + component.Register("cscli_setup") +} diff --git a/cmd/crowdsec-cli/setup_stub.go b/cmd/crowdsec-cli/setup_stub.go new file mode 100644 index 00000000000..e001f93c797 --- /dev/null +++ b/cmd/crowdsec-cli/setup_stub.go @@ -0,0 +1,9 @@ +//go:build no_cscli_setup +package main + +import ( + "github.com/spf13/cobra" +) + +func (cli *cliRoot) addSetup(_ *cobra.Command) { +} diff --git a/pkg/acquisition/acquisition.go b/pkg/acquisition/acquisition.go index b2493bbb9b7..4a5226a2981 100644 --- a/pkg/acquisition/acquisition.go +++ b/pkg/acquisition/acquisition.go @@ -19,6 +19,7 @@ import ( "github.com/crowdsecurity/crowdsec/pkg/acquisition/configuration" "github.com/crowdsecurity/crowdsec/pkg/csconfig" + "github.com/crowdsecurity/crowdsec/pkg/cwversion/component" "github.com/crowdsecurity/crowdsec/pkg/exprhelpers" "github.com/crowdsecurity/crowdsec/pkg/types" ) @@ -54,44 +55,34 @@ type DataSource interface { var ( // We declare everything here so we can tell if they are unsupported, or excluded from the build - AcquisitionSources = map[string]func() DataSource{ - "appsec": nil, - "cloudwatch": nil, - "docker": nil, - "file": nil, - "journalctl": nil, - "k8s-audit": nil, - "kafka": nil, - "kinesis": nil, - "loki": nil, - "s3": nil, - "syslog": nil, - "wineventlog": nil, - } - transformRuntimes = map[string]*vm.Program{} + AcquisitionSources = map[string]func() DataSource{} + transformRuntimes = map[string]*vm.Program{} ) func GetDataSourceIface(dataSourceType string) (DataSource, error) { - source, ok := AcquisitionSources[dataSourceType] - if !ok { + source, registered := AcquisitionSources[dataSourceType] + if registered { + return source(), nil + } + + built, known := component.Built["datasource_"+dataSourceType] + + if !known { return nil, fmt.Errorf("unknown data source %s", dataSourceType) } - if source == nil { - return nil, fmt.Errorf("data source %s is not built in this version of crowdsec", dataSourceType) + if built { + panic("datasource " + dataSourceType + " is built but not registered") } - return source(), nil + return nil, fmt.Errorf("data source %s is not built in this version of crowdsec", dataSourceType) } // registerDataSource registers a datasource in the AcquisitionSources map. // It must be called in the init() function of the datasource package, and the datasource name // must be declared with a nil value in the map, to allow for conditional compilation. func registerDataSource(dataSourceType string, dsGetter func() DataSource) { - _, ok := AcquisitionSources[dataSourceType] - if !ok { - panic("datasource must be declared in the map: " + dataSourceType) - } + component.Register("datasource_" + dataSourceType) AcquisitionSources[dataSourceType] = dsGetter } @@ -214,9 +205,11 @@ func GetMetricsLevelFromPromCfg(prom *csconfig.PrometheusCfg) int { if prom == nil { return configuration.METRICS_FULL } + if !prom.Enabled { return configuration.METRICS_NONE } + if prom.Level == configuration.CFG_METRICS_AGGREGATE { return configuration.METRICS_AGGREGATE } @@ -224,6 +217,7 @@ func GetMetricsLevelFromPromCfg(prom *csconfig.PrometheusCfg) int { if prom.Level == configuration.CFG_METRICS_FULL { return configuration.METRICS_FULL } + return configuration.METRICS_FULL } @@ -232,15 +226,20 @@ func LoadAcquisitionFromFile(config *csconfig.CrowdsecServiceCfg, prom *csconfig var sources []DataSource metrics_level := GetMetricsLevelFromPromCfg(prom) + for _, acquisFile := range config.AcquisitionFiles { log.Infof("loading acquisition file : %s", acquisFile) + yamlFile, err := os.Open(acquisFile) if err != nil { return nil, err } + dec := yaml.NewDecoder(yamlFile) dec.SetStrict(true) + idx := -1 + for { var sub configuration.DataSourceCommonCfg err = dec.Decode(&sub) @@ -249,7 +248,9 @@ func LoadAcquisitionFromFile(config *csconfig.CrowdsecServiceCfg, prom *csconfig if !errors.Is(err, io.EOF) { return nil, fmt.Errorf("failed to yaml decode %s: %w", acquisFile, err) } + log.Tracef("End of yaml file") + break } @@ -263,11 +264,13 @@ func LoadAcquisitionFromFile(config *csconfig.CrowdsecServiceCfg, prom *csconfig log.Debugf("skipping empty item in %s", acquisFile) continue } + if sub.Source != "docker" { // docker is the only source that can be empty return nil, fmt.Errorf("missing labels in %s (position: %d)", acquisFile, idx) } } + if sub.Source == "" { return nil, fmt.Errorf("data source type is empty ('source') in %s (position: %d)", acquisFile, idx) } diff --git a/pkg/cwversion/component/component.go b/pkg/cwversion/component/component.go new file mode 100644 index 00000000000..4036b63cf00 --- /dev/null +++ b/pkg/cwversion/component/component.go @@ -0,0 +1,34 @@ +package component + +// Package component provides functionality for managing the registration of +// optional, compile-time components in the system. This is meant as a space +// saving measure, separate from feature flags (package pkg/fflag) which are +// only enabled/disabled at runtime. + +// Built is a map of all the known components, and whether they are built-in or not. +// This is populated as soon as possible by the respective init() functions +var Built = map[string]bool { + "datasource_appsec": false, + "datasource_cloudwatch": false, + "datasource_docker": false, + "datasource_file": false, + "datasource_journalctl": false, + "datasource_k8s-audit": false, + "datasource_kafka": false, + "datasource_kinesis": false, + "datasource_loki": false, + "datasource_s3": false, + "datasource_syslog": false, + "datasource_wineventlog":false, + "cscli_setup": false, +} + +func Register(name string) { + if _, ok := Built[name]; !ok { + // having a list of the disabled components is essential + // to debug users' issues + panic("cannot register unknown compile-time component: " + name) + } + + Built[name] = true +} diff --git a/pkg/cwversion/version.go b/pkg/cwversion/version.go index 867098e7d5a..2cb7de13e18 100644 --- a/pkg/cwversion/version.go +++ b/pkg/cwversion/version.go @@ -7,8 +7,8 @@ import ( "github.com/crowdsecurity/go-cs-lib/maptools" "github.com/crowdsecurity/go-cs-lib/version" - "github.com/crowdsecurity/crowdsec/pkg/acquisition" "github.com/crowdsecurity/crowdsec/pkg/apiclient/useragent" + "github.com/crowdsecurity/crowdsec/pkg/cwversion/component" "github.com/crowdsecurity/crowdsec/pkg/cwversion/constraint" ) @@ -18,16 +18,16 @@ var ( ) func FullString() string { - dsBuilt := []string{} - dsExcluded := []string{} + dsBuilt := map[string]struct{}{} + dsExcluded := map[string]struct{}{} - for _, ds := range maptools.SortedKeys(acquisition.AcquisitionSources) { - if acquisition.AcquisitionSources[ds] != nil { - dsBuilt = append(dsBuilt, ds) + for ds, built := range component.Built { + if built { + dsBuilt[ds] = struct{}{} continue } - dsExcluded = append(dsExcluded, ds) + dsExcluded[ds] = struct{}{} } ret := fmt.Sprintf("version: %s\n", version.String()) @@ -42,12 +42,16 @@ func FullString() string { ret += fmt.Sprintf("Constraint_api: %s\n", constraint.API) ret += fmt.Sprintf("Constraint_acquis: %s\n", constraint.Acquis) + built := "(none)" + if len(dsBuilt) > 0 { - ret += fmt.Sprintf("Built data sources: %s\n", strings.Join(dsBuilt, ", ")) + built = strings.Join(maptools.SortedKeys(dsBuilt), ", ") } + ret += fmt.Sprintf("Built-in optional components: %s\n", built) + if len(dsExcluded) > 0 { - ret += fmt.Sprintf("Excluded data sources: %s\n", strings.Join(dsExcluded, ", ")) + ret += fmt.Sprintf("Excluded components: %s\n", strings.Join(maptools.SortedKeys(dsExcluded), ", ")) } return ret From fee3debdccc71b7b4848cea95e8da0ea276117df Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Fri, 20 Sep 2024 16:00:58 +0200 Subject: [PATCH 308/581] context propagation: pkg/database/machines (#3248) --- cmd/crowdsec-cli/climachine/machines.go | 56 ++++++++++++---------- cmd/crowdsec-cli/clisupport/support.go | 6 +-- pkg/apiserver/apic.go | 4 +- pkg/apiserver/apic_metrics.go | 10 ++-- pkg/apiserver/apiserver_test.go | 10 ++-- pkg/apiserver/controllers/v1/heartbeat.go | 4 +- pkg/apiserver/controllers/v1/machines.go | 4 +- pkg/apiserver/middlewares/v1/jwt.go | 15 +++--- pkg/apiserver/usage_metrics_test.go | 32 ++++++------- pkg/database/alerts.go | 4 +- pkg/database/machines.go | 58 +++++++++++------------ 11 files changed, 109 insertions(+), 94 deletions(-) diff --git a/cmd/crowdsec-cli/climachine/machines.go b/cmd/crowdsec-cli/climachine/machines.go index 30948f43056..1fbedcf57fd 100644 --- a/cmd/crowdsec-cli/climachine/machines.go +++ b/cmd/crowdsec-cli/climachine/machines.go @@ -1,6 +1,7 @@ package climachine import ( + "context" "encoding/csv" "encoding/json" "errors" @@ -210,11 +211,11 @@ func (cli *cliMachines) listCSV(out io.Writer, machines ent.Machines) error { return nil } -func (cli *cliMachines) List(out io.Writer, db *database.Client) error { +func (cli *cliMachines) List(ctx context.Context, out io.Writer, db *database.Client) error { // XXX: must use the provided db object, the one in the struct might be nil // (calling List directly skips the PersistentPreRunE) - machines, err := db.ListMachines() + machines, err := db.ListMachines(ctx) if err != nil { return fmt.Errorf("unable to list machines: %w", err) } @@ -251,8 +252,8 @@ func (cli *cliMachines) newListCmd() *cobra.Command { Example: `cscli machines list`, Args: cobra.NoArgs, DisableAutoGenTag: true, - RunE: func(_ *cobra.Command, _ []string) error { - return cli.List(color.Output, cli.db) + RunE: func(cmd *cobra.Command, _ []string) error { + return cli.List(cmd.Context(), color.Output, cli.db) }, } @@ -278,8 +279,8 @@ func (cli *cliMachines) newAddCmd() *cobra.Command { cscli machines add MyTestMachine --auto cscli machines add MyTestMachine --password MyPassword cscli machines add -f- --auto > /tmp/mycreds.yaml`, - RunE: func(_ *cobra.Command, args []string) error { - return cli.add(args, string(password), dumpFile, apiURL, interactive, autoAdd, force) + RunE: func(cmd *cobra.Command, args []string) error { + return cli.add(cmd.Context(), args, string(password), dumpFile, apiURL, interactive, autoAdd, force) }, } @@ -294,7 +295,7 @@ cscli machines add -f- --auto > /tmp/mycreds.yaml`, return cmd } -func (cli *cliMachines) add(args []string, machinePassword string, dumpFile string, apiURL string, interactive bool, autoAdd bool, force bool) error { +func (cli *cliMachines) add(ctx context.Context, args []string, machinePassword string, dumpFile string, apiURL string, interactive bool, autoAdd bool, force bool) error { var ( err error machineID string @@ -353,7 +354,7 @@ func (cli *cliMachines) add(args []string, machinePassword string, dumpFile stri password := strfmt.Password(machinePassword) - _, err = cli.db.CreateMachine(&machineID, &password, "", true, force, types.PasswordAuthType) + _, err = cli.db.CreateMachine(ctx, &machineID, &password, "", true, force, types.PasswordAuthType) if err != nil { return fmt.Errorf("unable to create machine: %w", err) } @@ -399,6 +400,7 @@ func (cli *cliMachines) validMachineID(cmd *cobra.Command, args []string, toComp var err error cfg := cli.cfg() + ctx := cmd.Context() // need to load config and db because PersistentPreRunE is not called for completions @@ -407,13 +409,13 @@ func (cli *cliMachines) validMachineID(cmd *cobra.Command, args []string, toComp return nil, cobra.ShellCompDirectiveNoFileComp } - cli.db, err = require.DBClient(cmd.Context(), cfg.DbConfig) + cli.db, err = require.DBClient(ctx, cfg.DbConfig) if err != nil { cobra.CompError("unable to list machines " + err.Error()) return nil, cobra.ShellCompDirectiveNoFileComp } - machines, err := cli.db.ListMachines() + machines, err := cli.db.ListMachines(ctx) if err != nil { cobra.CompError("unable to list machines " + err.Error()) return nil, cobra.ShellCompDirectiveNoFileComp @@ -430,9 +432,9 @@ func (cli *cliMachines) validMachineID(cmd *cobra.Command, args []string, toComp return ret, cobra.ShellCompDirectiveNoFileComp } -func (cli *cliMachines) delete(machines []string, ignoreMissing bool) error { +func (cli *cliMachines) delete(ctx context.Context, machines []string, ignoreMissing bool) error { for _, machineID := range machines { - if err := cli.db.DeleteWatcher(machineID); err != nil { + if err := cli.db.DeleteWatcher(ctx, machineID); err != nil { var notFoundErr *database.MachineNotFoundError if ignoreMissing && errors.As(err, ¬FoundErr) { return nil @@ -460,8 +462,8 @@ func (cli *cliMachines) newDeleteCmd() *cobra.Command { Aliases: []string{"remove"}, DisableAutoGenTag: true, ValidArgsFunction: cli.validMachineID, - RunE: func(_ *cobra.Command, args []string) error { - return cli.delete(args, ignoreMissing) + RunE: func(cmd *cobra.Command, args []string) error { + return cli.delete(cmd.Context(), args, ignoreMissing) }, } @@ -471,7 +473,7 @@ func (cli *cliMachines) newDeleteCmd() *cobra.Command { return cmd } -func (cli *cliMachines) prune(duration time.Duration, notValidOnly bool, force bool) error { +func (cli *cliMachines) prune(ctx context.Context, duration time.Duration, notValidOnly bool, force bool) error { if duration < 2*time.Minute && !notValidOnly { if yes, err := ask.YesNo( "The duration you provided is less than 2 minutes. "+ @@ -484,12 +486,12 @@ func (cli *cliMachines) prune(duration time.Duration, notValidOnly bool, force b } machines := []*ent.Machine{} - if pending, err := cli.db.QueryPendingMachine(); err == nil { + if pending, err := cli.db.QueryPendingMachine(ctx); err == nil { machines = append(machines, pending...) } if !notValidOnly { - if pending, err := cli.db.QueryMachinesInactiveSince(time.Now().UTC().Add(-duration)); err == nil { + if pending, err := cli.db.QueryMachinesInactiveSince(ctx, time.Now().UTC().Add(-duration)); err == nil { machines = append(machines, pending...) } } @@ -512,7 +514,7 @@ func (cli *cliMachines) prune(duration time.Duration, notValidOnly bool, force b } } - deleted, err := cli.db.BulkDeleteWatchers(machines) + deleted, err := cli.db.BulkDeleteWatchers(ctx, machines) if err != nil { return fmt.Errorf("unable to prune machines: %w", err) } @@ -540,8 +542,8 @@ cscli machines prune --duration 1h cscli machines prune --not-validated-only --force`, Args: cobra.NoArgs, DisableAutoGenTag: true, - RunE: func(_ *cobra.Command, _ []string) error { - return cli.prune(duration, notValidOnly, force) + RunE: func(cmd *cobra.Command, _ []string) error { + return cli.prune(cmd.Context(), duration, notValidOnly, force) }, } @@ -553,8 +555,8 @@ cscli machines prune --not-validated-only --force`, return cmd } -func (cli *cliMachines) validate(machineID string) error { - if err := cli.db.ValidateMachine(machineID); err != nil { +func (cli *cliMachines) validate(ctx context.Context, machineID string) error { + if err := cli.db.ValidateMachine(ctx, machineID); err != nil { return fmt.Errorf("unable to validate machine '%s': %w", machineID, err) } @@ -571,8 +573,8 @@ func (cli *cliMachines) newValidateCmd() *cobra.Command { Example: `cscli machines validate "machine_name"`, Args: cobra.ExactArgs(1), DisableAutoGenTag: true, - RunE: func(_ *cobra.Command, args []string) error { - return cli.validate(args[0]) + RunE: func(cmd *cobra.Command, args []string) error { + return cli.validate(cmd.Context(), args[0]) }, } @@ -690,9 +692,11 @@ func (cli *cliMachines) newInspectCmd() *cobra.Command { Args: cobra.ExactArgs(1), DisableAutoGenTag: true, ValidArgsFunction: cli.validMachineID, - RunE: func(_ *cobra.Command, args []string) error { + RunE: func(cmd *cobra.Command, args []string) error { + ctx := cmd.Context() machineID := args[0] - machine, err := cli.db.QueryMachineByID(machineID) + + machine, err := cli.db.QueryMachineByID(ctx, machineID) if err != nil { return fmt.Errorf("unable to read machine data '%s': %w", machineID, err) } diff --git a/cmd/crowdsec-cli/clisupport/support.go b/cmd/crowdsec-cli/clisupport/support.go index 7e41518805a..4474f5c8f11 100644 --- a/cmd/crowdsec-cli/clisupport/support.go +++ b/cmd/crowdsec-cli/clisupport/support.go @@ -210,7 +210,7 @@ func (cli *cliSupport) dumpBouncers(ctx context.Context, zw *zip.Writer, db *dat return nil } -func (cli *cliSupport) dumpAgents(zw *zip.Writer, db *database.Client) error { +func (cli *cliSupport) dumpAgents(ctx context.Context, zw *zip.Writer, db *database.Client) error { log.Info("Collecting agents") if db == nil { @@ -220,7 +220,7 @@ func (cli *cliSupport) dumpAgents(zw *zip.Writer, db *database.Client) error { out := new(bytes.Buffer) cm := climachine.New(cli.cfg) - if err := cm.List(out, db); err != nil { + if err := cm.List(ctx, out, db); err != nil { return err } @@ -529,7 +529,7 @@ func (cli *cliSupport) dump(ctx context.Context, outFile string) error { log.Warnf("could not collect bouncers information: %s", err) } - if err = cli.dumpAgents(zipWriter, db); err != nil { + if err = cli.dumpAgents(ctx, zipWriter, db); err != nil { log.Warnf("could not collect agents information: %s", err) } diff --git a/pkg/apiserver/apic.go b/pkg/apiserver/apic.go index b5384c6cc5c..c79d5f88e3f 100644 --- a/pkg/apiserver/apic.go +++ b/pkg/apiserver/apic.go @@ -85,7 +85,9 @@ func randomDuration(d time.Duration, delta time.Duration) time.Duration { func (a *apic) FetchScenariosListFromDB() ([]string, error) { scenarios := make([]string, 0) - machines, err := a.dbClient.ListMachines() + ctx := context.TODO() + + machines, err := a.dbClient.ListMachines(ctx) if err != nil { return nil, fmt.Errorf("while listing machines: %w", err) } diff --git a/pkg/apiserver/apic_metrics.go b/pkg/apiserver/apic_metrics.go index e5821e4c1e2..16b2328dbe9 100644 --- a/pkg/apiserver/apic_metrics.go +++ b/pkg/apiserver/apic_metrics.go @@ -27,7 +27,7 @@ func (a *apic) GetUsageMetrics(ctx context.Context) (*models.AllMetrics, []int, allMetrics := &models.AllMetrics{} metricsIds := make([]int, 0) - lps, err := a.dbClient.ListMachines() + lps, err := a.dbClient.ListMachines(ctx) if err != nil { return nil, nil, err } @@ -186,7 +186,7 @@ func (a *apic) MarkUsageMetricsAsSent(ctx context.Context, ids []int) error { } func (a *apic) GetMetrics(ctx context.Context) (*models.Metrics, error) { - machines, err := a.dbClient.ListMachines() + machines, err := a.dbClient.ListMachines(ctx) if err != nil { return nil, err } @@ -230,8 +230,8 @@ func (a *apic) GetMetrics(ctx context.Context) (*models.Metrics, error) { }, nil } -func (a *apic) fetchMachineIDs() ([]string, error) { - machines, err := a.dbClient.ListMachines() +func (a *apic) fetchMachineIDs(ctx context.Context) ([]string, error) { + machines, err := a.dbClient.ListMachines(ctx) if err != nil { return nil, err } @@ -277,7 +277,7 @@ func (a *apic) SendMetrics(stop chan (bool)) { machineIDs := []string{} reloadMachineIDs := func() { - ids, err := a.fetchMachineIDs() + ids, err := a.fetchMachineIDs(ctx) if err != nil { log.Debugf("unable to get machines (%s), will retry", err) diff --git a/pkg/apiserver/apiserver_test.go b/pkg/apiserver/apiserver_test.go index 89c75f35d21..0db1ee5dcdc 100644 --- a/pkg/apiserver/apiserver_test.go +++ b/pkg/apiserver/apiserver_test.go @@ -182,12 +182,12 @@ func NewAPITestForwardedFor(t *testing.T) (*gin.Engine, csconfig.Config) { } func ValidateMachine(t *testing.T, machineID string, config *csconfig.DatabaseCfg) { - ctx := context.Background() + ctx := context.TODO() dbClient, err := database.NewClient(ctx, config) require.NoError(t, err) - err = dbClient.ValidateMachine(machineID) + err = dbClient.ValidateMachine(ctx, machineID) require.NoError(t, err) } @@ -197,7 +197,7 @@ func GetMachineIP(t *testing.T, machineID string, config *csconfig.DatabaseCfg) dbClient, err := database.NewClient(ctx, config) require.NoError(t, err) - machines, err := dbClient.ListMachines() + machines, err := dbClient.ListMachines(ctx) require.NoError(t, err) for _, machine := range machines { @@ -332,7 +332,7 @@ func TestUnknownPath(t *testing.T) { req.Header.Set("User-Agent", UserAgent) router.ServeHTTP(w, req) - assert.Equal(t, 404, w.Code) + assert.Equal(t, http.StatusNotFound, w.Code) } /* @@ -390,7 +390,7 @@ func TestLoggingDebugToFileConfig(t *testing.T) { req, _ := http.NewRequestWithContext(ctx, http.MethodGet, "/test42", nil) req.Header.Set("User-Agent", UserAgent) api.router.ServeHTTP(w, req) - assert.Equal(t, 404, w.Code) + assert.Equal(t, http.StatusNotFound, w.Code) // wait for the request to happen time.Sleep(500 * time.Millisecond) diff --git a/pkg/apiserver/controllers/v1/heartbeat.go b/pkg/apiserver/controllers/v1/heartbeat.go index e1231eaa9ec..799b736ccfe 100644 --- a/pkg/apiserver/controllers/v1/heartbeat.go +++ b/pkg/apiserver/controllers/v1/heartbeat.go @@ -9,7 +9,9 @@ import ( func (c *Controller) HeartBeat(gctx *gin.Context) { machineID, _ := getMachineIDFromContext(gctx) - if err := c.DBClient.UpdateMachineLastHeartBeat(machineID); err != nil { + ctx := gctx.Request.Context() + + if err := c.DBClient.UpdateMachineLastHeartBeat(ctx, machineID); err != nil { c.HandleDBErrors(gctx, err) return } diff --git a/pkg/apiserver/controllers/v1/machines.go b/pkg/apiserver/controllers/v1/machines.go index 0030f7d3b39..ff59e389cb1 100644 --- a/pkg/apiserver/controllers/v1/machines.go +++ b/pkg/apiserver/controllers/v1/machines.go @@ -46,6 +46,8 @@ func (c *Controller) shouldAutoRegister(token string, gctx *gin.Context) (bool, } func (c *Controller) CreateMachine(gctx *gin.Context) { + ctx := gctx.Request.Context() + var input models.WatcherRegistrationRequest if err := gctx.ShouldBindJSON(&input); err != nil { @@ -66,7 +68,7 @@ func (c *Controller) CreateMachine(gctx *gin.Context) { return } - if _, err := c.DBClient.CreateMachine(input.MachineID, input.Password, gctx.ClientIP(), autoRegister, false, types.PasswordAuthType); err != nil { + if _, err := c.DBClient.CreateMachine(ctx, input.MachineID, input.Password, gctx.ClientIP(), autoRegister, false, types.PasswordAuthType); err != nil { c.HandleDBErrors(gctx, err) return } diff --git a/pkg/apiserver/middlewares/v1/jwt.go b/pkg/apiserver/middlewares/v1/jwt.go index 64406deff3e..17ca5b28359 100644 --- a/pkg/apiserver/middlewares/v1/jwt.go +++ b/pkg/apiserver/middlewares/v1/jwt.go @@ -55,6 +55,7 @@ type authInput struct { } func (j *JWT) authTLS(c *gin.Context) (*authInput, error) { + ctx := c.Request.Context() ret := authInput{} if j.TlsAuth == nil { @@ -76,7 +77,7 @@ func (j *JWT) authTLS(c *gin.Context) (*authInput, error) { ret.clientMachine, err = j.DbClient.Ent.Machine.Query(). Where(machine.MachineId(ret.machineID)). - First(j.DbClient.CTX) + First(ctx) if ent.IsNotFound(err) { // Machine was not found, let's create it logger.Infof("machine %s not found, create it", ret.machineID) @@ -91,7 +92,7 @@ func (j *JWT) authTLS(c *gin.Context) (*authInput, error) { password := strfmt.Password(pwd) - ret.clientMachine, err = j.DbClient.CreateMachine(&ret.machineID, &password, "", true, true, types.TlsAuthType) + ret.clientMachine, err = j.DbClient.CreateMachine(ctx, &ret.machineID, &password, "", true, true, types.TlsAuthType) if err != nil { return nil, fmt.Errorf("while creating machine entry for %s: %w", ret.machineID, err) } @@ -175,6 +176,8 @@ func (j *JWT) Authenticator(c *gin.Context) (interface{}, error) { auth *authInput ) + ctx := c.Request.Context() + if c.Request.TLS != nil && len(c.Request.TLS.PeerCertificates) > 0 { auth, err = j.authTLS(c) if err != nil { @@ -198,7 +201,7 @@ func (j *JWT) Authenticator(c *gin.Context) (interface{}, error) { } } - err = j.DbClient.UpdateMachineScenarios(scenarios, auth.clientMachine.ID) + err = j.DbClient.UpdateMachineScenarios(ctx, scenarios, auth.clientMachine.ID) if err != nil { log.Errorf("Failed to update scenarios list for '%s': %s\n", auth.machineID, err) return nil, jwt.ErrFailedAuthentication @@ -208,7 +211,7 @@ func (j *JWT) Authenticator(c *gin.Context) (interface{}, error) { clientIP := c.ClientIP() if auth.clientMachine.IpAddress == "" { - err = j.DbClient.UpdateMachineIP(clientIP, auth.clientMachine.ID) + err = j.DbClient.UpdateMachineIP(ctx, clientIP, auth.clientMachine.ID) if err != nil { log.Errorf("Failed to update ip address for '%s': %s\n", auth.machineID, err) return nil, jwt.ErrFailedAuthentication @@ -218,7 +221,7 @@ func (j *JWT) Authenticator(c *gin.Context) (interface{}, error) { if auth.clientMachine.IpAddress != clientIP && auth.clientMachine.IpAddress != "" { log.Warningf("new IP address detected for machine '%s': %s (old: %s)", auth.clientMachine.MachineId, clientIP, auth.clientMachine.IpAddress) - err = j.DbClient.UpdateMachineIP(clientIP, auth.clientMachine.ID) + err = j.DbClient.UpdateMachineIP(ctx, clientIP, auth.clientMachine.ID) if err != nil { log.Errorf("Failed to update ip address for '%s': %s\n", auth.clientMachine.MachineId, err) return nil, jwt.ErrFailedAuthentication @@ -231,7 +234,7 @@ func (j *JWT) Authenticator(c *gin.Context) (interface{}, error) { return nil, jwt.ErrFailedAuthentication } - if err := j.DbClient.UpdateMachineVersion(useragent[1], auth.clientMachine.ID); err != nil { + if err := j.DbClient.UpdateMachineVersion(ctx, useragent[1], auth.clientMachine.ID); err != nil { log.Errorf("unable to update machine '%s' version '%s': %s", auth.clientMachine.MachineId, useragent[1], err) log.Errorf("bad user agent from : %s", clientIP) diff --git a/pkg/apiserver/usage_metrics_test.go b/pkg/apiserver/usage_metrics_test.go index 019de5fb970..b231fb22ad8 100644 --- a/pkg/apiserver/usage_metrics_test.go +++ b/pkg/apiserver/usage_metrics_test.go @@ -30,7 +30,7 @@ func TestLPMetrics(t *testing.T) { name: "empty metrics for LP", body: `{ }`, - expectedStatusCode: 400, + expectedStatusCode: http.StatusBadRequest, expectedResponse: "Missing log processor data", authType: PASSWORD, }, @@ -50,7 +50,7 @@ func TestLPMetrics(t *testing.T) { } ] }`, - expectedStatusCode: 201, + expectedStatusCode: http.StatusCreated, expectedMetricsCount: 1, expectedResponse: "", expectedOSName: "foo", @@ -74,7 +74,7 @@ func TestLPMetrics(t *testing.T) { } ] }`, - expectedStatusCode: 201, + expectedStatusCode: http.StatusCreated, expectedMetricsCount: 1, expectedResponse: "", expectedOSName: "foo", @@ -98,7 +98,7 @@ func TestLPMetrics(t *testing.T) { } ] }`, - expectedStatusCode: 400, + expectedStatusCode: http.StatusBadRequest, expectedResponse: "Missing remediation component data", authType: APIKEY, }, @@ -117,7 +117,7 @@ func TestLPMetrics(t *testing.T) { } ] }`, - expectedStatusCode: 201, + expectedStatusCode: http.StatusCreated, expectedResponse: "", expectedMetricsCount: 1, expectedFeatureFlags: "a,b,c", @@ -138,7 +138,7 @@ func TestLPMetrics(t *testing.T) { } ] }`, - expectedStatusCode: 422, + expectedStatusCode: http.StatusUnprocessableEntity, expectedResponse: "log_processors.0.datasources in body is required", authType: PASSWORD, }, @@ -157,7 +157,7 @@ func TestLPMetrics(t *testing.T) { } ] }`, - expectedStatusCode: 201, + expectedStatusCode: http.StatusCreated, expectedMetricsCount: 1, expectedOSName: "foo", expectedOSVersion: "42", @@ -179,7 +179,7 @@ func TestLPMetrics(t *testing.T) { } ] }`, - expectedStatusCode: 422, + expectedStatusCode: http.StatusUnprocessableEntity, expectedResponse: "log_processors.0.os.name in body is required", authType: PASSWORD, }, @@ -199,7 +199,7 @@ func TestLPMetrics(t *testing.T) { assert.Equal(t, tt.expectedStatusCode, w.Code) assert.Contains(t, w.Body.String(), tt.expectedResponse) - machine, _ := dbClient.QueryMachineByID("test") + machine, _ := dbClient.QueryMachineByID(ctx, "test") metrics, _ := dbClient.GetLPUsageMetricsByMachineID(ctx, "test") assert.Len(t, metrics, tt.expectedMetricsCount) @@ -233,7 +233,7 @@ func TestRCMetrics(t *testing.T) { name: "empty metrics for RC", body: `{ }`, - expectedStatusCode: 400, + expectedStatusCode: http.StatusBadRequest, expectedResponse: "Missing remediation component data", authType: APIKEY, }, @@ -251,7 +251,7 @@ func TestRCMetrics(t *testing.T) { } ] }`, - expectedStatusCode: 201, + expectedStatusCode: http.StatusCreated, expectedMetricsCount: 1, expectedResponse: "", expectedOSName: "foo", @@ -273,7 +273,7 @@ func TestRCMetrics(t *testing.T) { } ] }`, - expectedStatusCode: 201, + expectedStatusCode: http.StatusCreated, expectedMetricsCount: 1, expectedResponse: "", expectedOSName: "foo", @@ -295,7 +295,7 @@ func TestRCMetrics(t *testing.T) { } ] }`, - expectedStatusCode: 400, + expectedStatusCode: http.StatusBadRequest, expectedResponse: "Missing log processor data", authType: PASSWORD, }, @@ -312,7 +312,7 @@ func TestRCMetrics(t *testing.T) { } ] }`, - expectedStatusCode: 201, + expectedStatusCode: http.StatusCreated, expectedResponse: "", expectedMetricsCount: 1, expectedFeatureFlags: "a,b,c", @@ -331,7 +331,7 @@ func TestRCMetrics(t *testing.T) { } ] }`, - expectedStatusCode: 201, + expectedStatusCode: http.StatusCreated, expectedMetricsCount: 1, expectedOSName: "foo", expectedOSVersion: "42", @@ -351,7 +351,7 @@ func TestRCMetrics(t *testing.T) { } ] }`, - expectedStatusCode: 422, + expectedStatusCode: http.StatusUnprocessableEntity, expectedResponse: "remediation_components.0.os.name in body is required", authType: APIKEY, }, diff --git a/pkg/database/alerts.go b/pkg/database/alerts.go index 3dfb0dc8197..d2760a209f9 100644 --- a/pkg/database/alerts.go +++ b/pkg/database/alerts.go @@ -687,8 +687,10 @@ func (c *Client) CreateAlert(machineID string, alertList []*models.Alert) ([]str err error ) + ctx := context.TODO() + if machineID != "" { - owner, err = c.QueryMachineByID(machineID) + owner, err = c.QueryMachineByID(ctx, machineID) if err != nil { if !errors.Is(err, UserNotExists) { return nil, fmt.Errorf("machine '%s': %w", machineID, err) diff --git a/pkg/database/machines.go b/pkg/database/machines.go index 27d737e625e..d8c02825312 100644 --- a/pkg/database/machines.go +++ b/pkg/database/machines.go @@ -72,7 +72,7 @@ func (c *Client) MachineUpdateBaseMetrics(ctx context.Context, machineID string, return nil } -func (c *Client) CreateMachine(machineID *string, password *strfmt.Password, ipAddress string, isValidated bool, force bool, authType string) (*ent.Machine, error) { +func (c *Client) CreateMachine(ctx context.Context, machineID *string, password *strfmt.Password, ipAddress string, isValidated bool, force bool, authType string) (*ent.Machine, error) { hashPassword, err := bcrypt.GenerateFromPassword([]byte(*password), bcrypt.DefaultCost) if err != nil { c.Log.Warningf("CreateMachine: %s", err) @@ -82,20 +82,20 @@ func (c *Client) CreateMachine(machineID *string, password *strfmt.Password, ipA machineExist, err := c.Ent.Machine. Query(). Where(machine.MachineIdEQ(*machineID)). - Select(machine.FieldMachineId).Strings(c.CTX) + Select(machine.FieldMachineId).Strings(ctx) if err != nil { return nil, errors.Wrapf(QueryFail, "machine '%s': %s", *machineID, err) } if len(machineExist) > 0 { if force { - _, err := c.Ent.Machine.Update().Where(machine.MachineIdEQ(*machineID)).SetPassword(string(hashPassword)).Save(c.CTX) + _, err := c.Ent.Machine.Update().Where(machine.MachineIdEQ(*machineID)).SetPassword(string(hashPassword)).Save(ctx) if err != nil { c.Log.Warningf("CreateMachine : %s", err) return nil, errors.Wrapf(UpdateFail, "machine '%s'", *machineID) } - machine, err := c.QueryMachineByID(*machineID) + machine, err := c.QueryMachineByID(ctx, *machineID) if err != nil { return nil, errors.Wrapf(QueryFail, "machine '%s': %s", *machineID, err) } @@ -113,7 +113,7 @@ func (c *Client) CreateMachine(machineID *string, password *strfmt.Password, ipA SetIpAddress(ipAddress). SetIsValidated(isValidated). SetAuthType(authType). - Save(c.CTX) + Save(ctx) if err != nil { c.Log.Warningf("CreateMachine : %s", err) return nil, errors.Wrapf(InsertFail, "creating machine '%s'", *machineID) @@ -122,11 +122,11 @@ func (c *Client) CreateMachine(machineID *string, password *strfmt.Password, ipA return machine, nil } -func (c *Client) QueryMachineByID(machineID string) (*ent.Machine, error) { +func (c *Client) QueryMachineByID(ctx context.Context, machineID string) (*ent.Machine, error) { machine, err := c.Ent.Machine. Query(). Where(machine.MachineIdEQ(machineID)). - Only(c.CTX) + Only(ctx) if err != nil { c.Log.Warningf("QueryMachineByID : %s", err) return &ent.Machine{}, errors.Wrapf(UserNotExists, "user '%s'", machineID) @@ -135,8 +135,8 @@ func (c *Client) QueryMachineByID(machineID string) (*ent.Machine, error) { return machine, nil } -func (c *Client) ListMachines() ([]*ent.Machine, error) { - machines, err := c.Ent.Machine.Query().All(c.CTX) +func (c *Client) ListMachines(ctx context.Context) ([]*ent.Machine, error) { + machines, err := c.Ent.Machine.Query().All(ctx) if err != nil { return nil, errors.Wrapf(QueryFail, "listing machines: %s", err) } @@ -144,8 +144,8 @@ func (c *Client) ListMachines() ([]*ent.Machine, error) { return machines, nil } -func (c *Client) ValidateMachine(machineID string) error { - rets, err := c.Ent.Machine.Update().Where(machine.MachineIdEQ(machineID)).SetIsValidated(true).Save(c.CTX) +func (c *Client) ValidateMachine(ctx context.Context, machineID string) error { + rets, err := c.Ent.Machine.Update().Where(machine.MachineIdEQ(machineID)).SetIsValidated(true).Save(ctx) if err != nil { return errors.Wrapf(UpdateFail, "validating machine: %s", err) } @@ -157,8 +157,8 @@ func (c *Client) ValidateMachine(machineID string) error { return nil } -func (c *Client) QueryPendingMachine() ([]*ent.Machine, error) { - machines, err := c.Ent.Machine.Query().Where(machine.IsValidatedEQ(false)).All(c.CTX) +func (c *Client) QueryPendingMachine(ctx context.Context) ([]*ent.Machine, error) { + machines, err := c.Ent.Machine.Query().Where(machine.IsValidatedEQ(false)).All(ctx) if err != nil { c.Log.Warningf("QueryPendingMachine : %s", err) return nil, errors.Wrapf(QueryFail, "querying pending machines: %s", err) @@ -167,11 +167,11 @@ func (c *Client) QueryPendingMachine() ([]*ent.Machine, error) { return machines, nil } -func (c *Client) DeleteWatcher(name string) error { +func (c *Client) DeleteWatcher(ctx context.Context, name string) error { nbDeleted, err := c.Ent.Machine. Delete(). Where(machine.MachineIdEQ(name)). - Exec(c.CTX) + Exec(ctx) if err != nil { return err } @@ -183,13 +183,13 @@ func (c *Client) DeleteWatcher(name string) error { return nil } -func (c *Client) BulkDeleteWatchers(machines []*ent.Machine) (int, error) { +func (c *Client) BulkDeleteWatchers(ctx context.Context, machines []*ent.Machine) (int, error) { ids := make([]int, len(machines)) for i, b := range machines { ids[i] = b.ID } - nbDeleted, err := c.Ent.Machine.Delete().Where(machine.IDIn(ids...)).Exec(c.CTX) + nbDeleted, err := c.Ent.Machine.Delete().Where(machine.IDIn(ids...)).Exec(ctx) if err != nil { return nbDeleted, err } @@ -197,8 +197,8 @@ func (c *Client) BulkDeleteWatchers(machines []*ent.Machine) (int, error) { return nbDeleted, nil } -func (c *Client) UpdateMachineLastHeartBeat(machineID string) error { - _, err := c.Ent.Machine.Update().Where(machine.MachineIdEQ(machineID)).SetLastHeartbeat(time.Now().UTC()).Save(c.CTX) +func (c *Client) UpdateMachineLastHeartBeat(ctx context.Context, machineID string) error { + _, err := c.Ent.Machine.Update().Where(machine.MachineIdEQ(machineID)).SetLastHeartbeat(time.Now().UTC()).Save(ctx) if err != nil { return errors.Wrapf(UpdateFail, "updating machine last_heartbeat: %s", err) } @@ -206,11 +206,11 @@ func (c *Client) UpdateMachineLastHeartBeat(machineID string) error { return nil } -func (c *Client) UpdateMachineScenarios(scenarios string, id int) error { +func (c *Client) UpdateMachineScenarios(ctx context.Context, scenarios string, id int) error { _, err := c.Ent.Machine.UpdateOneID(id). SetUpdatedAt(time.Now().UTC()). SetScenarios(scenarios). - Save(c.CTX) + Save(ctx) if err != nil { return fmt.Errorf("unable to update machine in database: %w", err) } @@ -218,10 +218,10 @@ func (c *Client) UpdateMachineScenarios(scenarios string, id int) error { return nil } -func (c *Client) UpdateMachineIP(ipAddr string, id int) error { +func (c *Client) UpdateMachineIP(ctx context.Context, ipAddr string, id int) error { _, err := c.Ent.Machine.UpdateOneID(id). SetIpAddress(ipAddr). - Save(c.CTX) + Save(ctx) if err != nil { return fmt.Errorf("unable to update machine IP in database: %w", err) } @@ -229,10 +229,10 @@ func (c *Client) UpdateMachineIP(ipAddr string, id int) error { return nil } -func (c *Client) UpdateMachineVersion(ipAddr string, id int) error { +func (c *Client) UpdateMachineVersion(ctx context.Context, ipAddr string, id int) error { _, err := c.Ent.Machine.UpdateOneID(id). SetVersion(ipAddr). - Save(c.CTX) + Save(ctx) if err != nil { return fmt.Errorf("unable to update machine version in database: %w", err) } @@ -240,8 +240,8 @@ func (c *Client) UpdateMachineVersion(ipAddr string, id int) error { return nil } -func (c *Client) IsMachineRegistered(machineID string) (bool, error) { - exist, err := c.Ent.Machine.Query().Where().Select(machine.FieldMachineId).Strings(c.CTX) +func (c *Client) IsMachineRegistered(ctx context.Context, machineID string) (bool, error) { + exist, err := c.Ent.Machine.Query().Where().Select(machine.FieldMachineId).Strings(ctx) if err != nil { return false, err } @@ -257,11 +257,11 @@ func (c *Client) IsMachineRegistered(machineID string) (bool, error) { return false, nil } -func (c *Client) QueryMachinesInactiveSince(t time.Time) ([]*ent.Machine, error) { +func (c *Client) QueryMachinesInactiveSince(ctx context.Context, t time.Time) ([]*ent.Machine, error) { return c.Ent.Machine.Query().Where( machine.Or( machine.And(machine.LastHeartbeatLT(t), machine.IsValidatedEQ(true)), machine.And(machine.LastHeartbeatIsNil(), machine.CreatedAtLT(t)), ), - ).All(c.CTX) + ).All(ctx) } From 4a2a6632273c5db364a2acd8e02ba4184e05582a Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Fri, 20 Sep 2024 20:45:21 +0200 Subject: [PATCH 309/581] context propagation: pkg/database/bouncers (#3249) --- cmd/crowdsec-cli/clibouncer/bouncers.go | 26 ++++++++-------- pkg/apiserver/apiserver_test.go | 2 +- pkg/apiserver/controllers/v1/decisions.go | 8 +++-- pkg/apiserver/middlewares/v1/api_key.go | 18 ++++++++---- pkg/apiserver/usage_metrics_test.go | 2 +- pkg/database/bouncers.go | 36 +++++++++++------------ 6 files changed, 51 insertions(+), 41 deletions(-) diff --git a/cmd/crowdsec-cli/clibouncer/bouncers.go b/cmd/crowdsec-cli/clibouncer/bouncers.go index 89e91b63911..226fbb7e922 100644 --- a/cmd/crowdsec-cli/clibouncer/bouncers.go +++ b/cmd/crowdsec-cli/clibouncer/bouncers.go @@ -208,7 +208,7 @@ func (cli *cliBouncers) newListCmd() *cobra.Command { return cmd } -func (cli *cliBouncers) add(bouncerName string, key string) error { +func (cli *cliBouncers) add(ctx context.Context, bouncerName string, key string) error { var err error keyLength := 32 @@ -220,7 +220,7 @@ func (cli *cliBouncers) add(bouncerName string, key string) error { } } - _, err = cli.db.CreateBouncer(bouncerName, "", middlewares.HashSHA512(key), types.ApiKeyAuthType) + _, err = cli.db.CreateBouncer(ctx, bouncerName, "", middlewares.HashSHA512(key), types.ApiKeyAuthType) if err != nil { return fmt.Errorf("unable to create bouncer: %w", err) } @@ -254,8 +254,8 @@ func (cli *cliBouncers) newAddCmd() *cobra.Command { cscli bouncers add MyBouncerName --key `, Args: cobra.ExactArgs(1), DisableAutoGenTag: true, - RunE: func(_ *cobra.Command, args []string) error { - return cli.add(args[0], key) + RunE: func(cmd *cobra.Command, args []string) error { + return cli.add(cmd.Context(), args[0], key) }, } @@ -304,9 +304,9 @@ func (cli *cliBouncers) validBouncerID(cmd *cobra.Command, args []string, toComp return ret, cobra.ShellCompDirectiveNoFileComp } -func (cli *cliBouncers) delete(bouncers []string, ignoreMissing bool) error { +func (cli *cliBouncers) delete(ctx context.Context, bouncers []string, ignoreMissing bool) error { for _, bouncerID := range bouncers { - if err := cli.db.DeleteBouncer(bouncerID); err != nil { + if err := cli.db.DeleteBouncer(ctx, bouncerID); err != nil { var notFoundErr *database.BouncerNotFoundError if ignoreMissing && errors.As(err, ¬FoundErr) { return nil @@ -332,8 +332,8 @@ func (cli *cliBouncers) newDeleteCmd() *cobra.Command { Aliases: []string{"remove"}, DisableAutoGenTag: true, ValidArgsFunction: cli.validBouncerID, - RunE: func(_ *cobra.Command, args []string) error { - return cli.delete(args, ignoreMissing) + RunE: func(cmd *cobra.Command, args []string) error { + return cli.delete(cmd.Context(), args, ignoreMissing) }, } @@ -343,7 +343,7 @@ func (cli *cliBouncers) newDeleteCmd() *cobra.Command { return cmd } -func (cli *cliBouncers) prune(duration time.Duration, force bool) error { +func (cli *cliBouncers) prune(ctx context.Context, duration time.Duration, force bool) error { if duration < 2*time.Minute { if yes, err := ask.YesNo( "The duration you provided is less than 2 minutes. "+ @@ -355,7 +355,7 @@ func (cli *cliBouncers) prune(duration time.Duration, force bool) error { } } - bouncers, err := cli.db.QueryBouncersInactiveSince(time.Now().UTC().Add(-duration)) + bouncers, err := cli.db.QueryBouncersInactiveSince(ctx, time.Now().UTC().Add(-duration)) if err != nil { return fmt.Errorf("unable to query bouncers: %w", err) } @@ -378,7 +378,7 @@ func (cli *cliBouncers) prune(duration time.Duration, force bool) error { } } - deleted, err := cli.db.BulkDeleteBouncers(bouncers) + deleted, err := cli.db.BulkDeleteBouncers(ctx, bouncers) if err != nil { return fmt.Errorf("unable to prune bouncers: %w", err) } @@ -403,8 +403,8 @@ func (cli *cliBouncers) newPruneCmd() *cobra.Command { DisableAutoGenTag: true, Example: `cscli bouncers prune -d 45m cscli bouncers prune -d 45m --force`, - RunE: func(_ *cobra.Command, _ []string) error { - return cli.prune(duration, force) + RunE: func(cmd *cobra.Command, _ []string) error { + return cli.prune(cmd.Context(), duration, force) }, } diff --git a/pkg/apiserver/apiserver_test.go b/pkg/apiserver/apiserver_test.go index 0db1ee5dcdc..081fa23f15e 100644 --- a/pkg/apiserver/apiserver_test.go +++ b/pkg/apiserver/apiserver_test.go @@ -297,7 +297,7 @@ func CreateTestBouncer(t *testing.T, config *csconfig.DatabaseCfg) string { apiKey, err := middlewares.GenerateAPIKey(keyLength) require.NoError(t, err) - _, err = dbClient.CreateBouncer("test", "127.0.0.1", middlewares.HashSHA512(apiKey), types.ApiKeyAuthType) + _, err = dbClient.CreateBouncer(ctx, "test", "127.0.0.1", middlewares.HashSHA512(apiKey), types.ApiKeyAuthType) require.NoError(t, err) return apiKey diff --git a/pkg/apiserver/controllers/v1/decisions.go b/pkg/apiserver/controllers/v1/decisions.go index 54e9b0290cc..139280ab497 100644 --- a/pkg/apiserver/controllers/v1/decisions.go +++ b/pkg/apiserver/controllers/v1/decisions.go @@ -43,6 +43,8 @@ func (c *Controller) GetDecision(gctx *gin.Context) { data []*ent.Decision ) + ctx := gctx.Request.Context() + bouncerInfo, err := getBouncerFromContext(gctx) if err != nil { gctx.JSON(http.StatusUnauthorized, gin.H{"message": "not allowed"}) @@ -73,7 +75,7 @@ func (c *Controller) GetDecision(gctx *gin.Context) { } if bouncerInfo.LastPull == nil || time.Now().UTC().Sub(*bouncerInfo.LastPull) >= time.Minute { - if err := c.DBClient.UpdateBouncerLastPull(time.Now().UTC(), bouncerInfo.ID); err != nil { + if err := c.DBClient.UpdateBouncerLastPull(ctx, time.Now().UTC(), bouncerInfo.ID); err != nil { log.Errorf("failed to update bouncer last pull: %v", err) } } @@ -370,6 +372,8 @@ func (c *Controller) StreamDecisionNonChunked(gctx *gin.Context, bouncerInfo *en func (c *Controller) StreamDecision(gctx *gin.Context) { var err error + ctx := gctx.Request.Context() + streamStartTime := time.Now().UTC() bouncerInfo, err := getBouncerFromContext(gctx) @@ -400,7 +404,7 @@ func (c *Controller) StreamDecision(gctx *gin.Context) { if err == nil { //Only update the last pull time if no error occurred when sending the decisions to avoid missing decisions - if err := c.DBClient.UpdateBouncerLastPull(streamStartTime, bouncerInfo.ID); err != nil { + if err := c.DBClient.UpdateBouncerLastPull(ctx, streamStartTime, bouncerInfo.ID); err != nil { log.Errorf("unable to update bouncer '%s' pull: %v", bouncerInfo.Name, err) } } diff --git a/pkg/apiserver/middlewares/v1/api_key.go b/pkg/apiserver/middlewares/v1/api_key.go index e822666db0f..d438c9b15a4 100644 --- a/pkg/apiserver/middlewares/v1/api_key.go +++ b/pkg/apiserver/middlewares/v1/api_key.go @@ -64,6 +64,8 @@ func (a *APIKey) authTLS(c *gin.Context, logger *log.Entry) *ent.Bouncer { return nil } + ctx := c.Request.Context() + extractedCN, err := a.TlsAuth.ValidateCert(c) if err != nil { logger.Warn(err) @@ -73,7 +75,7 @@ func (a *APIKey) authTLS(c *gin.Context, logger *log.Entry) *ent.Bouncer { logger = logger.WithField("cn", extractedCN) bouncerName := fmt.Sprintf("%s@%s", extractedCN, c.ClientIP()) - bouncer, err := a.DbClient.SelectBouncerByName(bouncerName) + bouncer, err := a.DbClient.SelectBouncerByName(ctx, bouncerName) // This is likely not the proper way, but isNotFound does not seem to work if err != nil && strings.Contains(err.Error(), "bouncer not found") { @@ -87,7 +89,7 @@ func (a *APIKey) authTLS(c *gin.Context, logger *log.Entry) *ent.Bouncer { logger.Infof("Creating bouncer %s", bouncerName) - bouncer, err = a.DbClient.CreateBouncer(bouncerName, c.ClientIP(), HashSHA512(apiKey), types.TlsAuthType) + bouncer, err = a.DbClient.CreateBouncer(ctx, bouncerName, c.ClientIP(), HashSHA512(apiKey), types.TlsAuthType) if err != nil { logger.Errorf("while creating bouncer db entry: %s", err) return nil @@ -112,9 +114,11 @@ func (a *APIKey) authPlain(c *gin.Context, logger *log.Entry) *ent.Bouncer { return nil } + ctx := c.Request.Context() + hashStr := HashSHA512(val[0]) - bouncer, err := a.DbClient.SelectBouncer(hashStr) + bouncer, err := a.DbClient.SelectBouncer(ctx, hashStr) if err != nil { logger.Errorf("while fetching bouncer info: %s", err) return nil @@ -132,6 +136,8 @@ func (a *APIKey) MiddlewareFunc() gin.HandlerFunc { return func(c *gin.Context) { var bouncer *ent.Bouncer + ctx := c.Request.Context() + clientIP := c.ClientIP() logger := log.WithField("ip", clientIP) @@ -153,7 +159,7 @@ func (a *APIKey) MiddlewareFunc() gin.HandlerFunc { logger = logger.WithField("name", bouncer.Name) if bouncer.IPAddress == "" { - if err := a.DbClient.UpdateBouncerIP(clientIP, bouncer.ID); err != nil { + if err := a.DbClient.UpdateBouncerIP(ctx, clientIP, bouncer.ID); err != nil { logger.Errorf("Failed to update ip address for '%s': %s\n", bouncer.Name, err) c.JSON(http.StatusForbidden, gin.H{"message": "access forbidden"}) c.Abort() @@ -166,7 +172,7 @@ func (a *APIKey) MiddlewareFunc() gin.HandlerFunc { if bouncer.IPAddress != clientIP && bouncer.IPAddress != "" && c.Request.Method != http.MethodHead { log.Warningf("new IP address detected for bouncer '%s': %s (old: %s)", bouncer.Name, clientIP, bouncer.IPAddress) - if err := a.DbClient.UpdateBouncerIP(clientIP, bouncer.ID); err != nil { + if err := a.DbClient.UpdateBouncerIP(ctx, clientIP, bouncer.ID); err != nil { logger.Errorf("Failed to update ip address for '%s': %s\n", bouncer.Name, err) c.JSON(http.StatusForbidden, gin.H{"message": "access forbidden"}) c.Abort() @@ -182,7 +188,7 @@ func (a *APIKey) MiddlewareFunc() gin.HandlerFunc { } if bouncer.Version != useragent[1] || bouncer.Type != useragent[0] { - if err := a.DbClient.UpdateBouncerTypeAndVersion(useragent[0], useragent[1], bouncer.ID); err != nil { + if err := a.DbClient.UpdateBouncerTypeAndVersion(ctx, useragent[0], useragent[1], bouncer.ID); err != nil { logger.Errorf("failed to update bouncer version and type: %s", err) c.JSON(http.StatusForbidden, gin.H{"message": "bad user agent"}) c.Abort() diff --git a/pkg/apiserver/usage_metrics_test.go b/pkg/apiserver/usage_metrics_test.go index b231fb22ad8..4d17b0d6ed8 100644 --- a/pkg/apiserver/usage_metrics_test.go +++ b/pkg/apiserver/usage_metrics_test.go @@ -371,7 +371,7 @@ func TestRCMetrics(t *testing.T) { assert.Equal(t, tt.expectedStatusCode, w.Code) assert.Contains(t, w.Body.String(), tt.expectedResponse) - bouncer, _ := dbClient.SelectBouncerByName("test") + bouncer, _ := dbClient.SelectBouncerByName(ctx, "test") metrics, _ := dbClient.GetBouncerUsageMetricsByName(ctx, "test") assert.Len(t, metrics, tt.expectedMetricsCount) diff --git a/pkg/database/bouncers.go b/pkg/database/bouncers.go index 6ff308ff786..04ef830ae72 100644 --- a/pkg/database/bouncers.go +++ b/pkg/database/bouncers.go @@ -41,8 +41,8 @@ func (c *Client) BouncerUpdateBaseMetrics(ctx context.Context, bouncerName strin return nil } -func (c *Client) SelectBouncer(apiKeyHash string) (*ent.Bouncer, error) { - result, err := c.Ent.Bouncer.Query().Where(bouncer.APIKeyEQ(apiKeyHash)).First(c.CTX) +func (c *Client) SelectBouncer(ctx context.Context, apiKeyHash string) (*ent.Bouncer, error) { + result, err := c.Ent.Bouncer.Query().Where(bouncer.APIKeyEQ(apiKeyHash)).First(ctx) if err != nil { return nil, err } @@ -50,8 +50,8 @@ func (c *Client) SelectBouncer(apiKeyHash string) (*ent.Bouncer, error) { return result, nil } -func (c *Client) SelectBouncerByName(bouncerName string) (*ent.Bouncer, error) { - result, err := c.Ent.Bouncer.Query().Where(bouncer.NameEQ(bouncerName)).First(c.CTX) +func (c *Client) SelectBouncerByName(ctx context.Context, bouncerName string) (*ent.Bouncer, error) { + result, err := c.Ent.Bouncer.Query().Where(bouncer.NameEQ(bouncerName)).First(ctx) if err != nil { return nil, err } @@ -68,14 +68,14 @@ func (c *Client) ListBouncers(ctx context.Context) ([]*ent.Bouncer, error) { return result, nil } -func (c *Client) CreateBouncer(name string, ipAddr string, apiKey string, authType string) (*ent.Bouncer, error) { +func (c *Client) CreateBouncer(ctx context.Context, name string, ipAddr string, apiKey string, authType string) (*ent.Bouncer, error) { bouncer, err := c.Ent.Bouncer. Create(). SetName(name). SetAPIKey(apiKey). SetRevoked(false). SetAuthType(authType). - Save(c.CTX) + Save(ctx) if err != nil { if ent.IsConstraintError(err) { return nil, fmt.Errorf("bouncer %s already exists", name) @@ -87,11 +87,11 @@ func (c *Client) CreateBouncer(name string, ipAddr string, apiKey string, authTy return bouncer, nil } -func (c *Client) DeleteBouncer(name string) error { +func (c *Client) DeleteBouncer(ctx context.Context, name string) error { nbDeleted, err := c.Ent.Bouncer. Delete(). Where(bouncer.NameEQ(name)). - Exec(c.CTX) + Exec(ctx) if err != nil { return err } @@ -103,13 +103,13 @@ func (c *Client) DeleteBouncer(name string) error { return nil } -func (c *Client) BulkDeleteBouncers(bouncers []*ent.Bouncer) (int, error) { +func (c *Client) BulkDeleteBouncers(ctx context.Context, bouncers []*ent.Bouncer) (int, error) { ids := make([]int, len(bouncers)) for i, b := range bouncers { ids[i] = b.ID } - nbDeleted, err := c.Ent.Bouncer.Delete().Where(bouncer.IDIn(ids...)).Exec(c.CTX) + nbDeleted, err := c.Ent.Bouncer.Delete().Where(bouncer.IDIn(ids...)).Exec(ctx) if err != nil { return nbDeleted, fmt.Errorf("unable to delete bouncers: %w", err) } @@ -117,10 +117,10 @@ func (c *Client) BulkDeleteBouncers(bouncers []*ent.Bouncer) (int, error) { return nbDeleted, nil } -func (c *Client) UpdateBouncerLastPull(lastPull time.Time, id int) error { +func (c *Client) UpdateBouncerLastPull(ctx context.Context, lastPull time.Time, id int) error { _, err := c.Ent.Bouncer.UpdateOneID(id). SetLastPull(lastPull). - Save(c.CTX) + Save(ctx) if err != nil { return fmt.Errorf("unable to update machine last pull in database: %w", err) } @@ -128,8 +128,8 @@ func (c *Client) UpdateBouncerLastPull(lastPull time.Time, id int) error { return nil } -func (c *Client) UpdateBouncerIP(ipAddr string, id int) error { - _, err := c.Ent.Bouncer.UpdateOneID(id).SetIPAddress(ipAddr).Save(c.CTX) +func (c *Client) UpdateBouncerIP(ctx context.Context, ipAddr string, id int) error { + _, err := c.Ent.Bouncer.UpdateOneID(id).SetIPAddress(ipAddr).Save(ctx) if err != nil { return fmt.Errorf("unable to update bouncer ip address in database: %w", err) } @@ -137,8 +137,8 @@ func (c *Client) UpdateBouncerIP(ipAddr string, id int) error { return nil } -func (c *Client) UpdateBouncerTypeAndVersion(bType string, version string, id int) error { - _, err := c.Ent.Bouncer.UpdateOneID(id).SetVersion(version).SetType(bType).Save(c.CTX) +func (c *Client) UpdateBouncerTypeAndVersion(ctx context.Context, bType string, version string, id int) error { + _, err := c.Ent.Bouncer.UpdateOneID(id).SetVersion(version).SetType(bType).Save(ctx) if err != nil { return fmt.Errorf("unable to update bouncer type and version in database: %w", err) } @@ -146,7 +146,7 @@ func (c *Client) UpdateBouncerTypeAndVersion(bType string, version string, id in return nil } -func (c *Client) QueryBouncersInactiveSince(t time.Time) ([]*ent.Bouncer, error) { +func (c *Client) QueryBouncersInactiveSince(ctx context.Context, t time.Time) ([]*ent.Bouncer, error) { return c.Ent.Bouncer.Query().Where( // poor man's coalesce bouncer.Or( @@ -156,5 +156,5 @@ func (c *Client) QueryBouncersInactiveSince(t time.Time) ([]*ent.Bouncer, error) bouncer.CreatedAtLT(t), ), ), - ).All(c.CTX) + ).All(ctx) } From 1133afe58ddfb190b8397bc6604ba70217130d35 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Mon, 23 Sep 2024 17:33:46 +0200 Subject: [PATCH 310/581] context propagation: pkg/database/{lock,decision} (#3251) * context propagation: pkg/database/lock * QueryAllDecisionsWithFilters(ctx...), QueryExpiredDecisionsWithFilters(ctx...) * more Query...Decision...(ctx..) * rest of decisions * lint --- pkg/apiserver/apic.go | 11 +-- pkg/apiserver/controllers/v1/decisions.go | 82 ++++++++++++++--------- pkg/apiserver/papi_cmd.go | 8 ++- pkg/database/decisions.go | 79 +++++++++------------- pkg/database/lock.go | 36 ++++++---- pkg/exprhelpers/helpers.go | 17 +++-- 6 files changed, 133 insertions(+), 100 deletions(-) diff --git a/pkg/apiserver/apic.go b/pkg/apiserver/apic.go index c79d5f88e3f..8b09e3e5f9d 100644 --- a/pkg/apiserver/apic.go +++ b/pkg/apiserver/apic.go @@ -426,6 +426,7 @@ func (a *apic) CAPIPullIsOld() (bool, error) { } func (a *apic) HandleDeletedDecisions(deletedDecisions []*models.Decision, deleteCounters map[string]map[string]int) (int, error) { + ctx := context.TODO() nbDeleted := 0 for _, decision := range deletedDecisions { @@ -438,7 +439,7 @@ func (a *apic) HandleDeletedDecisions(deletedDecisions []*models.Decision, delet filter["scopes"] = []string{*decision.Scope} } - dbCliRet, _, err := a.dbClient.ExpireDecisionsWithFilter(filter) + dbCliRet, _, err := a.dbClient.ExpireDecisionsWithFilter(ctx, filter) if err != nil { return 0, fmt.Errorf("expiring decisions error: %w", err) } @@ -458,6 +459,8 @@ func (a *apic) HandleDeletedDecisions(deletedDecisions []*models.Decision, delet func (a *apic) HandleDeletedDecisionsV3(deletedDecisions []*modelscapi.GetDecisionsStreamResponseDeletedItem, deleteCounters map[string]map[string]int) (int, error) { var nbDeleted int + ctx := context.TODO() + for _, decisions := range deletedDecisions { scope := decisions.Scope @@ -470,7 +473,7 @@ func (a *apic) HandleDeletedDecisionsV3(deletedDecisions []*modelscapi.GetDecisi filter["scopes"] = []string{*scope} } - dbCliRet, _, err := a.dbClient.ExpireDecisionsWithFilter(filter) + dbCliRet, _, err := a.dbClient.ExpireDecisionsWithFilter(ctx, filter) if err != nil { return 0, fmt.Errorf("expiring decisions error: %w", err) } @@ -640,7 +643,7 @@ func (a *apic) PullTop(ctx context.Context, forcePull bool) error { log.Debug("Acquiring lock for pullCAPI") - err = a.dbClient.AcquirePullCAPILock() + err = a.dbClient.AcquirePullCAPILock(ctx) if a.dbClient.IsLocked(err) { log.Info("PullCAPI is already running, skipping") return nil @@ -650,7 +653,7 @@ func (a *apic) PullTop(ctx context.Context, forcePull bool) error { defer func() { log.Debug("Releasing lock for pullCAPI") - if err := a.dbClient.ReleasePullCAPILock(); err != nil { + if err := a.dbClient.ReleasePullCAPILock(ctx); err != nil { log.Errorf("while releasing lock: %v", err) } }() diff --git a/pkg/apiserver/controllers/v1/decisions.go b/pkg/apiserver/controllers/v1/decisions.go index 139280ab497..d1aa79bbf07 100644 --- a/pkg/apiserver/controllers/v1/decisions.go +++ b/pkg/apiserver/controllers/v1/decisions.go @@ -1,8 +1,8 @@ package v1 import ( + "context" "encoding/json" - "fmt" "net/http" "strconv" "time" @@ -52,7 +52,7 @@ func (c *Controller) GetDecision(gctx *gin.Context) { return } - data, err = c.DBClient.QueryDecisionWithFilter(gctx.Request.URL.Query()) + data, err = c.DBClient.QueryDecisionWithFilter(ctx, gctx.Request.URL.Query()) if err != nil { c.HandleDBErrors(gctx, err) @@ -93,7 +93,9 @@ func (c *Controller) DeleteDecisionById(gctx *gin.Context) { return } - nbDeleted, deletedFromDB, err := c.DBClient.ExpireDecisionByID(decisionID) + ctx := gctx.Request.Context() + + nbDeleted, deletedFromDB, err := c.DBClient.ExpireDecisionByID(ctx, decisionID) if err != nil { c.HandleDBErrors(gctx, err) @@ -115,7 +117,9 @@ func (c *Controller) DeleteDecisionById(gctx *gin.Context) { } func (c *Controller) DeleteDecisions(gctx *gin.Context) { - nbDeleted, deletedFromDB, err := c.DBClient.ExpireDecisionsWithFilter(gctx.Request.URL.Query()) + ctx := gctx.Request.Context() + + nbDeleted, deletedFromDB, err := c.DBClient.ExpireDecisionsWithFilter(ctx, gctx.Request.URL.Query()) if err != nil { c.HandleDBErrors(gctx, err) @@ -136,32 +140,37 @@ func (c *Controller) DeleteDecisions(gctx *gin.Context) { gctx.JSON(http.StatusOK, deleteDecisionResp) } -func writeStartupDecisions(gctx *gin.Context, filters map[string][]string, dbFunc func(map[string][]string) ([]*ent.Decision, error)) error { +func writeStartupDecisions(gctx *gin.Context, filters map[string][]string, dbFunc func(context.Context, map[string][]string) ([]*ent.Decision, error)) error { // respBuffer := bytes.NewBuffer([]byte{}) - limit := 30000 //FIXME : make it configurable + limit := 30000 // FIXME : make it configurable needComma := false lastId := 0 - limitStr := fmt.Sprintf("%d", limit) + ctx := gctx.Request.Context() + + limitStr := strconv.Itoa(limit) filters["limit"] = []string{limitStr} + for { if lastId > 0 { - lastIdStr := fmt.Sprintf("%d", lastId) + lastIdStr := strconv.Itoa(lastId) filters["id_gt"] = []string{lastIdStr} } - data, err := dbFunc(filters) + data, err := dbFunc(ctx, filters) if err != nil { return err } + if len(data) > 0 { lastId = data[len(data)-1].ID + results := FormatDecisions(data) for _, decision := range results { decisionJSON, _ := json.Marshal(decision) if needComma { - //respBuffer.Write([]byte(",")) + // respBuffer.Write([]byte(",")) gctx.Writer.WriteString(",") } else { needComma = true @@ -174,10 +183,12 @@ func writeStartupDecisions(gctx *gin.Context, filters map[string][]string, dbFun return err } - //respBuffer.Reset() + // respBuffer.Reset() } } + log.Debugf("startup: %d decisions returned (limit: %d, lastid: %d)", len(data), limit, lastId) + if len(data) < limit { gctx.Writer.Flush() @@ -188,32 +199,37 @@ func writeStartupDecisions(gctx *gin.Context, filters map[string][]string, dbFun return nil } -func writeDeltaDecisions(gctx *gin.Context, filters map[string][]string, lastPull *time.Time, dbFunc func(*time.Time, map[string][]string) ([]*ent.Decision, error)) error { - //respBuffer := bytes.NewBuffer([]byte{}) - limit := 30000 //FIXME : make it configurable +func writeDeltaDecisions(gctx *gin.Context, filters map[string][]string, lastPull *time.Time, dbFunc func(context.Context, *time.Time, map[string][]string) ([]*ent.Decision, error)) error { + // respBuffer := bytes.NewBuffer([]byte{}) + limit := 30000 // FIXME : make it configurable needComma := false lastId := 0 - limitStr := fmt.Sprintf("%d", limit) + ctx := gctx.Request.Context() + + limitStr := strconv.Itoa(limit) filters["limit"] = []string{limitStr} + for { if lastId > 0 { - lastIdStr := fmt.Sprintf("%d", lastId) + lastIdStr := strconv.Itoa(lastId) filters["id_gt"] = []string{lastIdStr} } - data, err := dbFunc(lastPull, filters) + data, err := dbFunc(ctx, lastPull, filters) if err != nil { return err } + if len(data) > 0 { lastId = data[len(data)-1].ID + results := FormatDecisions(data) for _, decision := range results { decisionJSON, _ := json.Marshal(decision) if needComma { - //respBuffer.Write([]byte(",")) + // respBuffer.Write([]byte(",")) gctx.Writer.WriteString(",") } else { needComma = true @@ -226,10 +242,12 @@ func writeDeltaDecisions(gctx *gin.Context, filters map[string][]string, lastPul return err } - //respBuffer.Reset() + // respBuffer.Reset() } } + log.Debugf("startup: %d decisions returned (limit: %d, lastid: %d)", len(data), limit, lastId) + if len(data) < limit { gctx.Writer.Flush() @@ -261,7 +279,7 @@ func (c *Controller) StreamDecisionChunked(gctx *gin.Context, bouncerInfo *ent.B } gctx.Writer.WriteString(`], "deleted": [`) - //Expired decisions + // Expired decisions err = writeStartupDecisions(gctx, filters, c.DBClient.QueryExpiredDecisionsWithFilters) if err != nil { log.Errorf("failed sending expired decisions for startup: %v", err) @@ -302,8 +320,12 @@ func (c *Controller) StreamDecisionChunked(gctx *gin.Context, bouncerInfo *ent.B } func (c *Controller) StreamDecisionNonChunked(gctx *gin.Context, bouncerInfo *ent.Bouncer, streamStartTime time.Time, filters map[string][]string) error { - var data []*ent.Decision - var err error + var ( + data []*ent.Decision + err error + ) + + ctx := gctx.Request.Context() ret := make(map[string][]*models.Decision, 0) ret["new"] = []*models.Decision{} @@ -311,7 +333,7 @@ func (c *Controller) StreamDecisionNonChunked(gctx *gin.Context, bouncerInfo *en if val, ok := gctx.Request.URL.Query()["startup"]; ok { if val[0] == "true" { - data, err = c.DBClient.QueryAllDecisionsWithFilters(filters) + data, err = c.DBClient.QueryAllDecisionsWithFilters(ctx, filters) if err != nil { log.Errorf("failed querying decisions: %v", err) gctx.JSON(http.StatusInternalServerError, gin.H{"message": err.Error()}) @@ -322,7 +344,7 @@ func (c *Controller) StreamDecisionNonChunked(gctx *gin.Context, bouncerInfo *en ret["new"] = FormatDecisions(data) // getting expired decisions - data, err = c.DBClient.QueryExpiredDecisionsWithFilters(filters) + data, err = c.DBClient.QueryExpiredDecisionsWithFilters(ctx, filters) if err != nil { log.Errorf("unable to query expired decision for '%s' : %v", bouncerInfo.Name, err) gctx.JSON(http.StatusInternalServerError, gin.H{"message": err.Error()}) @@ -339,14 +361,14 @@ func (c *Controller) StreamDecisionNonChunked(gctx *gin.Context, bouncerInfo *en } // getting new decisions - data, err = c.DBClient.QueryNewDecisionsSinceWithFilters(bouncerInfo.LastPull, filters) + data, err = c.DBClient.QueryNewDecisionsSinceWithFilters(ctx, bouncerInfo.LastPull, filters) if err != nil { log.Errorf("unable to query new decision for '%s' : %v", bouncerInfo.Name, err) gctx.JSON(http.StatusInternalServerError, gin.H{"message": err.Error()}) return err } - //data = KeepLongestDecision(data) + // data = KeepLongestDecision(data) ret["new"] = FormatDecisions(data) since := time.Time{} @@ -355,7 +377,7 @@ func (c *Controller) StreamDecisionNonChunked(gctx *gin.Context, bouncerInfo *en } // getting expired decisions - data, err = c.DBClient.QueryExpiredDecisionsSinceWithFilters(&since, filters) // do we want to give exactly lastPull time ? + data, err = c.DBClient.QueryExpiredDecisionsSinceWithFilters(ctx, &since, filters) // do we want to give exactly lastPull time ? if err != nil { log.Errorf("unable to query expired decision for '%s' : %v", bouncerInfo.Name, err) gctx.JSON(http.StatusInternalServerError, gin.H{"message": err.Error()}) @@ -384,8 +406,8 @@ func (c *Controller) StreamDecision(gctx *gin.Context) { } if gctx.Request.Method == http.MethodHead { - //For HEAD, just return as the bouncer won't get a body anyway, so no need to query the db - //We also don't update the last pull time, as it would mess with the delta sent on the next request (if done without startup=true) + // For HEAD, just return as the bouncer won't get a body anyway, so no need to query the db + // We also don't update the last pull time, as it would mess with the delta sent on the next request (if done without startup=true) gctx.String(http.StatusOK, "") return @@ -403,7 +425,7 @@ func (c *Controller) StreamDecision(gctx *gin.Context) { } if err == nil { - //Only update the last pull time if no error occurred when sending the decisions to avoid missing decisions + // Only update the last pull time if no error occurred when sending the decisions to avoid missing decisions if err := c.DBClient.UpdateBouncerLastPull(ctx, streamStartTime, bouncerInfo.ID); err != nil { log.Errorf("unable to update bouncer '%s' pull: %v", bouncerInfo.Name, err) } diff --git a/pkg/apiserver/papi_cmd.go b/pkg/apiserver/papi_cmd.go index 943eb4139de..18b19b03418 100644 --- a/pkg/apiserver/papi_cmd.go +++ b/pkg/apiserver/papi_cmd.go @@ -43,6 +43,8 @@ type listUnsubscribe struct { } func DecisionCmd(message *Message, p *Papi, sync bool) error { + ctx := context.TODO() + switch message.Header.OperationCmd { case "delete": data, err := json.Marshal(message.Data) @@ -65,7 +67,7 @@ func DecisionCmd(message *Message, p *Papi, sync bool) error { filter := make(map[string][]string) filter["uuid"] = UUIDs - _, deletedDecisions, err := p.DBClient.ExpireDecisionsWithFilter(filter) + _, deletedDecisions, err := p.DBClient.ExpireDecisionsWithFilter(ctx, filter) if err != nil { return fmt.Errorf("unable to expire decisions %+v: %w", UUIDs, err) } @@ -168,6 +170,8 @@ func AlertCmd(message *Message, p *Papi, sync bool) error { } func ManagementCmd(message *Message, p *Papi, sync bool) error { + ctx := context.TODO() + if sync { p.Logger.Infof("Ignoring management command from PAPI in sync mode") return nil @@ -195,7 +199,7 @@ func ManagementCmd(message *Message, p *Papi, sync bool) error { filter["origin"] = []string{types.ListOrigin} filter["scenario"] = []string{unsubscribeMsg.Name} - _, deletedDecisions, err := p.DBClient.ExpireDecisionsWithFilter(filter) + _, deletedDecisions, err := p.DBClient.ExpireDecisionsWithFilter(ctx, filter) if err != nil { return fmt.Errorf("unable to expire decisions for list %s : %w", unsubscribeMsg.Name, err) } diff --git a/pkg/database/decisions.go b/pkg/database/decisions.go index 5fd4757c883..8547990c25f 100644 --- a/pkg/database/decisions.go +++ b/pkg/database/decisions.go @@ -121,7 +121,7 @@ func BuildDecisionRequestWithFilter(query *ent.DecisionQuery, filter map[string] return query, nil } -func (c *Client) QueryAllDecisionsWithFilters(filters map[string][]string) ([]*ent.Decision, error) { +func (c *Client) QueryAllDecisionsWithFilters(ctx context.Context, filters map[string][]string) ([]*ent.Decision, error) { query := c.Ent.Decision.Query().Where( decision.UntilGT(time.Now().UTC()), ) @@ -138,7 +138,7 @@ func (c *Client) QueryAllDecisionsWithFilters(filters map[string][]string) ([]*e query = query.Order(ent.Asc(decision.FieldID)) - data, err := query.All(c.CTX) + data, err := query.All(ctx) if err != nil { c.Log.Warningf("QueryAllDecisionsWithFilters : %s", err) return []*ent.Decision{}, errors.Wrap(QueryFail, "get all decisions with filters") @@ -147,7 +147,7 @@ func (c *Client) QueryAllDecisionsWithFilters(filters map[string][]string) ([]*e return data, nil } -func (c *Client) QueryExpiredDecisionsWithFilters(filters map[string][]string) ([]*ent.Decision, error) { +func (c *Client) QueryExpiredDecisionsWithFilters(ctx context.Context, filters map[string][]string) ([]*ent.Decision, error) { query := c.Ent.Decision.Query().Where( decision.UntilLT(time.Now().UTC()), ) @@ -165,7 +165,7 @@ func (c *Client) QueryExpiredDecisionsWithFilters(filters map[string][]string) ( return []*ent.Decision{}, errors.Wrap(QueryFail, "get expired decisions with filters") } - data, err := query.All(c.CTX) + data, err := query.All(ctx) if err != nil { c.Log.Warningf("QueryExpiredDecisionsWithFilters : %s", err) return []*ent.Decision{}, errors.Wrap(QueryFail, "expired decisions") @@ -196,7 +196,7 @@ func (c *Client) QueryDecisionCountByScenario(ctx context.Context) ([]*Decisions return r, nil } -func (c *Client) QueryDecisionWithFilter(filter map[string][]string) ([]*ent.Decision, error) { +func (c *Client) QueryDecisionWithFilter(ctx context.Context, filter map[string][]string) ([]*ent.Decision, error) { var data []*ent.Decision var err error @@ -218,7 +218,7 @@ func (c *Client) QueryDecisionWithFilter(filter map[string][]string) ([]*ent.Dec decision.FieldValue, decision.FieldScope, decision.FieldOrigin, - ).Scan(c.CTX, &data) + ).Scan(ctx, &data) if err != nil { c.Log.Warningf("QueryDecisionWithFilter : %s", err) return []*ent.Decision{}, errors.Wrap(QueryFail, "query decision failed") @@ -255,7 +255,7 @@ func longestDecisionForScopeTypeValue(s *sql.Selector) { ) } -func (c *Client) QueryExpiredDecisionsSinceWithFilters(since *time.Time, filters map[string][]string) ([]*ent.Decision, error) { +func (c *Client) QueryExpiredDecisionsSinceWithFilters(ctx context.Context, since *time.Time, filters map[string][]string) ([]*ent.Decision, error) { query := c.Ent.Decision.Query().Where( decision.UntilLT(time.Now().UTC()), ) @@ -277,7 +277,7 @@ func (c *Client) QueryExpiredDecisionsSinceWithFilters(since *time.Time, filters query = query.Order(ent.Asc(decision.FieldID)) - data, err := query.All(c.CTX) + data, err := query.All(ctx) if err != nil { c.Log.Warningf("QueryExpiredDecisionsSinceWithFilters : %s", err) return []*ent.Decision{}, errors.Wrap(QueryFail, "expired decisions with filters") @@ -286,7 +286,7 @@ func (c *Client) QueryExpiredDecisionsSinceWithFilters(since *time.Time, filters return data, nil } -func (c *Client) QueryNewDecisionsSinceWithFilters(since *time.Time, filters map[string][]string) ([]*ent.Decision, error) { +func (c *Client) QueryNewDecisionsSinceWithFilters(ctx context.Context, since *time.Time, filters map[string][]string) ([]*ent.Decision, error) { query := c.Ent.Decision.Query().Where( decision.UntilGT(time.Now().UTC()), ) @@ -308,7 +308,7 @@ func (c *Client) QueryNewDecisionsSinceWithFilters(since *time.Time, filters map query = query.Order(ent.Asc(decision.FieldID)) - data, err := query.All(c.CTX) + data, err := query.All(ctx) if err != nil { c.Log.Warningf("QueryNewDecisionsSinceWithFilters : %s", err) return []*ent.Decision{}, errors.Wrapf(QueryFail, "new decisions since '%s'", since.String()) @@ -317,20 +317,7 @@ func (c *Client) QueryNewDecisionsSinceWithFilters(since *time.Time, filters map return data, nil } -func (c *Client) DeleteDecisionById(decisionID int) ([]*ent.Decision, error) { - toDelete, err := c.Ent.Decision.Query().Where(decision.IDEQ(decisionID)).All(c.CTX) - if err != nil { - c.Log.Warningf("DeleteDecisionById : %s", err) - return nil, errors.Wrapf(DeleteFail, "decision with id '%d' doesn't exist", decisionID) - } - - count, err := c.DeleteDecisions(toDelete) - c.Log.Debugf("deleted %d decisions", count) - - return toDelete, err -} - -func (c *Client) DeleteDecisionsWithFilter(filter map[string][]string) (string, []*ent.Decision, error) { +func (c *Client) DeleteDecisionsWithFilter(ctx context.Context, filter map[string][]string) (string, []*ent.Decision, error) { var err error var start_ip, start_sfx, end_ip, end_sfx int64 var ip_sz int @@ -433,13 +420,13 @@ func (c *Client) DeleteDecisionsWithFilter(filter map[string][]string) (string, return "0", nil, errors.Wrapf(InvalidFilter, "Unknown ip size %d", ip_sz) } - toDelete, err := decisions.All(c.CTX) + toDelete, err := decisions.All(ctx) if err != nil { c.Log.Warningf("DeleteDecisionsWithFilter : %s", err) return "0", nil, errors.Wrap(DeleteFail, "decisions with provided filter") } - count, err := c.DeleteDecisions(toDelete) + count, err := c.DeleteDecisions(ctx, toDelete) if err != nil { c.Log.Warningf("While deleting decisions : %s", err) return "0", nil, errors.Wrap(DeleteFail, "decisions with provided filter") @@ -449,7 +436,7 @@ func (c *Client) DeleteDecisionsWithFilter(filter map[string][]string) (string, } // ExpireDecisionsWithFilter updates the expiration time to now() for the decisions matching the filter, and returns the updated items -func (c *Client) ExpireDecisionsWithFilter(filter map[string][]string) (string, []*ent.Decision, error) { +func (c *Client) ExpireDecisionsWithFilter(ctx context.Context, filter map[string][]string) (string, []*ent.Decision, error) { var err error var start_ip, start_sfx, end_ip, end_sfx int64 var ip_sz int @@ -558,13 +545,13 @@ func (c *Client) ExpireDecisionsWithFilter(filter map[string][]string) (string, return "0", nil, errors.Wrapf(InvalidFilter, "Unknown ip size %d", ip_sz) } - DecisionsToDelete, err := decisions.All(c.CTX) + DecisionsToDelete, err := decisions.All(ctx) if err != nil { c.Log.Warningf("ExpireDecisionsWithFilter : %s", err) return "0", nil, errors.Wrap(DeleteFail, "expire decisions with provided filter") } - count, err := c.ExpireDecisions(DecisionsToDelete) + count, err := c.ExpireDecisions(ctx, DecisionsToDelete) if err != nil { return "0", nil, errors.Wrapf(DeleteFail, "expire decisions with provided filter : %s", err) } @@ -583,13 +570,13 @@ func decisionIDs(decisions []*ent.Decision) []int { // ExpireDecisions sets the expiration of a list of decisions to now() // It returns the number of impacted decisions for the CAPI/PAPI -func (c *Client) ExpireDecisions(decisions []*ent.Decision) (int, error) { +func (c *Client) ExpireDecisions(ctx context.Context, decisions []*ent.Decision) (int, error) { if len(decisions) <= decisionDeleteBulkSize { ids := decisionIDs(decisions) rows, err := c.Ent.Decision.Update().Where( decision.IDIn(ids...), - ).SetUntil(time.Now().UTC()).Save(c.CTX) + ).SetUntil(time.Now().UTC()).Save(ctx) if err != nil { return 0, fmt.Errorf("expire decisions with provided filter: %w", err) } @@ -602,7 +589,7 @@ func (c *Client) ExpireDecisions(decisions []*ent.Decision) (int, error) { total := 0 for _, chunk := range slicetools.Chunks(decisions, decisionDeleteBulkSize) { - rows, err := c.ExpireDecisions(chunk) + rows, err := c.ExpireDecisions(ctx, chunk) if err != nil { return total, err } @@ -615,13 +602,13 @@ func (c *Client) ExpireDecisions(decisions []*ent.Decision) (int, error) { // DeleteDecisions removes a list of decisions from the database // It returns the number of impacted decisions for the CAPI/PAPI -func (c *Client) DeleteDecisions(decisions []*ent.Decision) (int, error) { +func (c *Client) DeleteDecisions(ctx context.Context, decisions []*ent.Decision) (int, error) { if len(decisions) < decisionDeleteBulkSize { ids := decisionIDs(decisions) rows, err := c.Ent.Decision.Delete().Where( decision.IDIn(ids...), - ).Exec(c.CTX) + ).Exec(ctx) if err != nil { return 0, fmt.Errorf("hard delete decisions with provided filter: %w", err) } @@ -634,7 +621,7 @@ func (c *Client) DeleteDecisions(decisions []*ent.Decision) (int, error) { tot := 0 for _, chunk := range slicetools.Chunks(decisions, decisionDeleteBulkSize) { - rows, err := c.DeleteDecisions(chunk) + rows, err := c.DeleteDecisions(ctx, chunk) if err != nil { return tot, err } @@ -646,8 +633,8 @@ func (c *Client) DeleteDecisions(decisions []*ent.Decision) (int, error) { } // ExpireDecision set the expiration of a decision to now() -func (c *Client) ExpireDecisionByID(decisionID int) (int, []*ent.Decision, error) { - toUpdate, err := c.Ent.Decision.Query().Where(decision.IDEQ(decisionID)).All(c.CTX) +func (c *Client) ExpireDecisionByID(ctx context.Context, decisionID int) (int, []*ent.Decision, error) { + toUpdate, err := c.Ent.Decision.Query().Where(decision.IDEQ(decisionID)).All(ctx) // XXX: do we want 500 or 404 here? if err != nil || len(toUpdate) == 0 { @@ -659,12 +646,12 @@ func (c *Client) ExpireDecisionByID(decisionID int) (int, []*ent.Decision, error return 0, nil, ItemNotFound } - count, err := c.ExpireDecisions(toUpdate) + count, err := c.ExpireDecisions(ctx, toUpdate) return count, toUpdate, err } -func (c *Client) CountDecisionsByValue(decisionValue string) (int, error) { +func (c *Client) CountDecisionsByValue(ctx context.Context, decisionValue string) (int, error) { var err error var start_ip, start_sfx, end_ip, end_sfx int64 var ip_sz, count int @@ -682,7 +669,7 @@ func (c *Client) CountDecisionsByValue(decisionValue string) (int, error) { return 0, errors.Wrapf(err, "fail to apply StartIpEndIpFilter") } - count, err = decisions.Count(c.CTX) + count, err = decisions.Count(ctx) if err != nil { return 0, errors.Wrapf(err, "fail to count decisions") } @@ -690,7 +677,7 @@ func (c *Client) CountDecisionsByValue(decisionValue string) (int, error) { return count, nil } -func (c *Client) CountActiveDecisionsByValue(decisionValue string) (int, error) { +func (c *Client) CountActiveDecisionsByValue(ctx context.Context, decisionValue string) (int, error) { var err error var start_ip, start_sfx, end_ip, end_sfx int64 var ip_sz, count int @@ -710,7 +697,7 @@ func (c *Client) CountActiveDecisionsByValue(decisionValue string) (int, error) decisions = decisions.Where(decision.UntilGT(time.Now().UTC())) - count, err = decisions.Count(c.CTX) + count, err = decisions.Count(ctx) if err != nil { return 0, fmt.Errorf("fail to count decisions: %w", err) } @@ -718,7 +705,7 @@ func (c *Client) CountActiveDecisionsByValue(decisionValue string) (int, error) return count, nil } -func (c *Client) GetActiveDecisionsTimeLeftByValue(decisionValue string) (time.Duration, error) { +func (c *Client) GetActiveDecisionsTimeLeftByValue(ctx context.Context, decisionValue string) (time.Duration, error) { var err error var start_ip, start_sfx, end_ip, end_sfx int64 var ip_sz int @@ -740,7 +727,7 @@ func (c *Client) GetActiveDecisionsTimeLeftByValue(decisionValue string) (time.D decisions = decisions.Order(ent.Desc(decision.FieldUntil)) - decision, err := decisions.First(c.CTX) + decision, err := decisions.First(ctx) if err != nil && !ent.IsNotFound(err) { return 0, fmt.Errorf("fail to get decision: %w", err) } @@ -752,7 +739,7 @@ func (c *Client) GetActiveDecisionsTimeLeftByValue(decisionValue string) (time.D return decision.Until.Sub(time.Now().UTC()), nil } -func (c *Client) CountDecisionsSinceByValue(decisionValue string, since time.Time) (int, error) { +func (c *Client) CountDecisionsSinceByValue(ctx context.Context, decisionValue string, since time.Time) (int, error) { ip_sz, start_ip, start_sfx, end_ip, end_sfx, err := types.Addr2Ints(decisionValue) if err != nil { return 0, errors.Wrapf(InvalidIPOrRange, "unable to convert '%s' to int: %s", decisionValue, err) @@ -768,7 +755,7 @@ func (c *Client) CountDecisionsSinceByValue(decisionValue string, since time.Tim return 0, errors.Wrapf(err, "fail to apply StartIpEndIpFilter") } - count, err := decisions.Count(c.CTX) + count, err := decisions.Count(ctx) if err != nil { return 0, errors.Wrapf(err, "fail to count decisions") } diff --git a/pkg/database/lock.go b/pkg/database/lock.go index d25b71870f0..474228a069c 100644 --- a/pkg/database/lock.go +++ b/pkg/database/lock.go @@ -1,6 +1,7 @@ package database import ( + "context" "time" "github.com/pkg/errors" @@ -16,40 +17,45 @@ const ( CapiPullLockName = "pullCAPI" ) -func (c *Client) AcquireLock(name string) error { +func (c *Client) AcquireLock(ctx context.Context, name string) error { log.Debugf("acquiring lock %s", name) _, err := c.Ent.Lock.Create(). SetName(name). SetCreatedAt(types.UtcNow()). - Save(c.CTX) + Save(ctx) + if ent.IsConstraintError(err) { return err } + if err != nil { return errors.Wrapf(InsertFail, "insert lock: %s", err) } + return nil } -func (c *Client) ReleaseLock(name string) error { +func (c *Client) ReleaseLock(ctx context.Context, name string) error { log.Debugf("releasing lock %s", name) - _, err := c.Ent.Lock.Delete().Where(lock.NameEQ(name)).Exec(c.CTX) + _, err := c.Ent.Lock.Delete().Where(lock.NameEQ(name)).Exec(ctx) if err != nil { return errors.Wrapf(DeleteFail, "delete lock: %s", err) } + return nil } -func (c *Client) ReleaseLockWithTimeout(name string, timeout int) error { +func (c *Client) ReleaseLockWithTimeout(ctx context.Context, name string, timeout int) error { log.Debugf("releasing lock %s with timeout of %d minutes", name, timeout) + _, err := c.Ent.Lock.Delete().Where( lock.NameEQ(name), lock.CreatedAtLT(time.Now().UTC().Add(-time.Duration(timeout)*time.Minute)), - ).Exec(c.CTX) - + ).Exec(ctx) if err != nil { return errors.Wrapf(DeleteFail, "delete lock: %s", err) } + return nil } @@ -57,23 +63,25 @@ func (c *Client) IsLocked(err error) bool { return ent.IsConstraintError(err) } -func (c *Client) AcquirePullCAPILock() error { - - /*delete orphan "old" lock if present*/ - err := c.ReleaseLockWithTimeout(CapiPullLockName, CAPIPullLockTimeout) +func (c *Client) AcquirePullCAPILock(ctx context.Context) error { + // delete orphan "old" lock if present + err := c.ReleaseLockWithTimeout(ctx, CapiPullLockName, CAPIPullLockTimeout) if err != nil { log.Errorf("unable to release pullCAPI lock: %s", err) } - return c.AcquireLock(CapiPullLockName) + + return c.AcquireLock(ctx, CapiPullLockName) } -func (c *Client) ReleasePullCAPILock() error { +func (c *Client) ReleasePullCAPILock(ctx context.Context) error { log.Debugf("deleting lock %s", CapiPullLockName) + _, err := c.Ent.Lock.Delete().Where( lock.NameEQ(CapiPullLockName), - ).Exec(c.CTX) + ).Exec(ctx) if err != nil { return errors.Wrapf(DeleteFail, "delete lock: %s", err) } + return nil } diff --git a/pkg/exprhelpers/helpers.go b/pkg/exprhelpers/helpers.go index 2ca7d0be79a..6b7eb0840e9 100644 --- a/pkg/exprhelpers/helpers.go +++ b/pkg/exprhelpers/helpers.go @@ -2,6 +2,7 @@ package exprhelpers import ( "bufio" + "context" "encoding/base64" "errors" "fmt" @@ -592,7 +593,10 @@ func GetDecisionsCount(params ...any) (any, error) { return 0, nil } - count, err := dbClient.CountDecisionsByValue(value) + + ctx := context.TODO() + + count, err := dbClient.CountDecisionsByValue(ctx, value) if err != nil { log.Errorf("Failed to get decisions count from value '%s'", value) return 0, nil //nolint:nilerr // This helper did not return an error before the move to expr.Function, we keep this behavior for backward compatibility @@ -613,8 +617,11 @@ func GetDecisionsSinceCount(params ...any) (any, error) { log.Errorf("Failed to parse since parameter '%s' : %s", since, err) return 0, nil } + + ctx := context.TODO() sinceTime := time.Now().UTC().Add(-sinceDuration) - count, err := dbClient.CountDecisionsSinceByValue(value, sinceTime) + + count, err := dbClient.CountDecisionsSinceByValue(ctx, value, sinceTime) if err != nil { log.Errorf("Failed to get decisions count from value '%s'", value) return 0, nil //nolint:nilerr // This helper did not return an error before the move to expr.Function, we keep this behavior for backward compatibility @@ -628,7 +635,8 @@ func GetActiveDecisionsCount(params ...any) (any, error) { log.Error("No database config to call GetActiveDecisionsCount()") return 0, nil } - count, err := dbClient.CountActiveDecisionsByValue(value) + ctx := context.TODO() + count, err := dbClient.CountActiveDecisionsByValue(ctx, value) if err != nil { log.Errorf("Failed to get active decisions count from value '%s'", value) return 0, err @@ -642,7 +650,8 @@ func GetActiveDecisionsTimeLeft(params ...any) (any, error) { log.Error("No database config to call GetActiveDecisionsTimeLeft()") return 0, nil } - timeLeft, err := dbClient.GetActiveDecisionsTimeLeftByValue(value) + ctx := context.TODO() + timeLeft, err := dbClient.GetActiveDecisionsTimeLeftByValue(ctx, value) if err != nil { log.Errorf("Failed to get active decisions time left from value '%s'", value) return 0, err From 3945a991bd265b765e9ab80e28aff70c21c707c9 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Tue, 24 Sep 2024 14:13:45 +0200 Subject: [PATCH 311/581] context propagation: pkg/database/alerts (#3252) * alerts * drop CTX from dbclient * lint * pkg/database/alerts: context.TODO() * cscli: context.Background() -> cmd.Context() --- .golangci.yml | 4 +- cmd/crowdsec-cli/clialert/alerts.go | 20 ++-- cmd/crowdsec-cli/cliconsole/console.go | 8 +- cmd/crowdsec-cli/clidecision/decisions.go | 22 ++--- cmd/crowdsec-cli/clilapi/lapi.go | 2 +- .../clinotifications/notifications.go | 8 +- pkg/apiserver/apic.go | 10 +- pkg/apiserver/apic_test.go | 10 +- pkg/apiserver/controllers/v1/alerts.go | 18 +++- pkg/apiserver/controllers/v1/decisions.go | 4 +- pkg/apiserver/controllers/v1/metrics.go | 15 ++- pkg/apiserver/decisions_test.go | 10 +- pkg/apiserver/middlewares/v1/cache.go | 2 +- pkg/apiserver/middlewares/v1/crl.go | 10 +- pkg/apiserver/middlewares/v1/jwt.go | 4 +- pkg/apiserver/papi_cmd.go | 4 +- pkg/database/alerts.go | 97 +++++++++---------- pkg/database/database.go | 2 - pkg/database/decisions.go | 6 +- pkg/database/flush.go | 6 +- pkg/database/metrics.go | 2 +- 21 files changed, 141 insertions(+), 123 deletions(-) diff --git a/.golangci.yml b/.golangci.yml index b76e2613be7..54c0acb0644 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -20,7 +20,7 @@ linters-settings: maintidx: # raise this after refactoring - under: 16 + under: 15 misspell: locale: US @@ -118,7 +118,7 @@ linters-settings: arguments: [6] - name: function-length # lower this after refactoring - arguments: [110, 235] + arguments: [110, 237] - name: get-return disabled: true - name: increment-decrement diff --git a/cmd/crowdsec-cli/clialert/alerts.go b/cmd/crowdsec-cli/clialert/alerts.go index dbb7ca14db5..75454e945f2 100644 --- a/cmd/crowdsec-cli/clialert/alerts.go +++ b/cmd/crowdsec-cli/clialert/alerts.go @@ -235,7 +235,7 @@ func (cli *cliAlerts) NewCommand() *cobra.Command { return cmd } -func (cli *cliAlerts) list(alertListFilter apiclient.AlertsListOpts, limit *int, contained *bool, printMachine bool) error { +func (cli *cliAlerts) list(ctx context.Context, alertListFilter apiclient.AlertsListOpts, limit *int, contained *bool, printMachine bool) error { var err error *alertListFilter.ScopeEquals, err = SanitizeScope(*alertListFilter.ScopeEquals, *alertListFilter.IPEquals, *alertListFilter.RangeEquals) @@ -311,7 +311,7 @@ func (cli *cliAlerts) list(alertListFilter apiclient.AlertsListOpts, limit *int, alertListFilter.Contains = new(bool) } - alerts, _, err := cli.client.Alerts.List(context.Background(), alertListFilter) + alerts, _, err := cli.client.Alerts.List(ctx, alertListFilter) if err != nil { return fmt.Errorf("unable to list alerts: %w", err) } @@ -354,7 +354,7 @@ cscli alerts list --type ban`, Long: `List alerts with optional filters`, DisableAutoGenTag: true, RunE: func(cmd *cobra.Command, _ []string) error { - return cli.list(alertListFilter, limit, contained, printMachine) + return cli.list(cmd.Context(), alertListFilter, limit, contained, printMachine) }, } @@ -377,7 +377,7 @@ cscli alerts list --type ban`, return cmd } -func (cli *cliAlerts) delete(delFilter apiclient.AlertsDeleteOpts, activeDecision *bool, deleteAll bool, delAlertByID string, contained *bool) error { +func (cli *cliAlerts) delete(ctx context.Context, delFilter apiclient.AlertsDeleteOpts, activeDecision *bool, deleteAll bool, delAlertByID string, contained *bool) error { var err error if !deleteAll { @@ -423,12 +423,12 @@ func (cli *cliAlerts) delete(delFilter apiclient.AlertsDeleteOpts, activeDecisio var alerts *models.DeleteAlertsResponse if delAlertByID == "" { - alerts, _, err = cli.client.Alerts.Delete(context.Background(), delFilter) + alerts, _, err = cli.client.Alerts.Delete(ctx, delFilter) if err != nil { return fmt.Errorf("unable to delete alerts: %w", err) } } else { - alerts, _, err = cli.client.Alerts.DeleteOne(context.Background(), delAlertByID) + alerts, _, err = cli.client.Alerts.DeleteOne(ctx, delAlertByID) if err != nil { return fmt.Errorf("unable to delete alert: %w", err) } @@ -480,7 +480,7 @@ cscli alerts delete -s crowdsecurity/ssh-bf"`, return nil }, RunE: func(cmd *cobra.Command, _ []string) error { - return cli.delete(delFilter, activeDecision, deleteAll, delAlertByID, contained) + return cli.delete(cmd.Context(), delFilter, activeDecision, deleteAll, delAlertByID, contained) }, } @@ -498,7 +498,7 @@ cscli alerts delete -s crowdsecurity/ssh-bf"`, return cmd } -func (cli *cliAlerts) inspect(details bool, alertIDs ...string) error { +func (cli *cliAlerts) inspect(ctx context.Context, details bool, alertIDs ...string) error { cfg := cli.cfg() for _, alertID := range alertIDs { @@ -507,7 +507,7 @@ func (cli *cliAlerts) inspect(details bool, alertIDs ...string) error { return fmt.Errorf("bad alert id %s", alertID) } - alert, _, err := cli.client.Alerts.GetByID(context.Background(), id) + alert, _, err := cli.client.Alerts.GetByID(ctx, id) if err != nil { return fmt.Errorf("can't find alert with id %s: %w", alertID, err) } @@ -551,7 +551,7 @@ func (cli *cliAlerts) newInspectCmd() *cobra.Command { _ = cmd.Help() return errors.New("missing alert_id") } - return cli.inspect(details, args...) + return cli.inspect(cmd.Context(), details, args...) }, } diff --git a/cmd/crowdsec-cli/cliconsole/console.go b/cmd/crowdsec-cli/cliconsole/console.go index af1ba316c2d..448ddcee7fa 100644 --- a/cmd/crowdsec-cli/cliconsole/console.go +++ b/cmd/crowdsec-cli/cliconsole/console.go @@ -66,7 +66,7 @@ func (cli *cliConsole) NewCommand() *cobra.Command { return cmd } -func (cli *cliConsole) enroll(key string, name string, overwrite bool, tags []string, opts []string) error { +func (cli *cliConsole) enroll(ctx context.Context, key string, name string, overwrite bool, tags []string, opts []string) error { cfg := cli.cfg() password := strfmt.Password(cfg.API.Server.OnlineClient.Credentials.Password) @@ -127,7 +127,7 @@ func (cli *cliConsole) enroll(key string, name string, overwrite bool, tags []st VersionPrefix: "v3", }) - resp, err := c.Auth.EnrollWatcher(context.Background(), key, name, tags, overwrite) + resp, err := c.Auth.EnrollWatcher(ctx, key, name, tags, overwrite) if err != nil { return fmt.Errorf("could not enroll instance: %w", err) } @@ -173,8 +173,8 @@ After running this command your will need to validate the enrollment in the weba valid options are : %s,all (see 'cscli console status' for details)`, strings.Join(csconfig.CONSOLE_CONFIGS, ",")), Args: cobra.ExactArgs(1), DisableAutoGenTag: true, - RunE: func(_ *cobra.Command, args []string) error { - return cli.enroll(args[0], name, overwrite, tags, opts) + RunE: func(cmd *cobra.Command, args []string) error { + return cli.enroll(cmd.Context(), args[0], name, overwrite, tags, opts) }, } diff --git a/cmd/crowdsec-cli/clidecision/decisions.go b/cmd/crowdsec-cli/clidecision/decisions.go index b82ebe3086e..1f8781a3716 100644 --- a/cmd/crowdsec-cli/clidecision/decisions.go +++ b/cmd/crowdsec-cli/clidecision/decisions.go @@ -170,7 +170,7 @@ func (cli *cliDecisions) NewCommand() *cobra.Command { return cmd } -func (cli *cliDecisions) list(filter apiclient.AlertsListOpts, NoSimu *bool, contained *bool, printMachine bool) error { +func (cli *cliDecisions) list(ctx context.Context, filter apiclient.AlertsListOpts, NoSimu *bool, contained *bool, printMachine bool) error { var err error *filter.ScopeEquals, err = clialert.SanitizeScope(*filter.ScopeEquals, *filter.IPEquals, *filter.RangeEquals) @@ -249,7 +249,7 @@ func (cli *cliDecisions) list(filter apiclient.AlertsListOpts, NoSimu *bool, con filter.Contains = new(bool) } - alerts, _, err := cli.client.Alerts.List(context.Background(), filter) + alerts, _, err := cli.client.Alerts.List(ctx, filter) if err != nil { return fmt.Errorf("unable to retrieve decisions: %w", err) } @@ -293,7 +293,7 @@ cscli decisions list --origin lists --scenario list_name Args: cobra.ExactArgs(0), DisableAutoGenTag: true, RunE: func(cmd *cobra.Command, _ []string) error { - return cli.list(filter, NoSimu, contained, printMachine) + return cli.list(cmd.Context(), filter, NoSimu, contained, printMachine) }, } @@ -317,7 +317,7 @@ cscli decisions list --origin lists --scenario list_name return cmd } -func (cli *cliDecisions) add(addIP, addRange, addDuration, addValue, addScope, addReason, addType string) error { +func (cli *cliDecisions) add(ctx context.Context, addIP, addRange, addDuration, addValue, addScope, addReason, addType string) error { alerts := models.AddAlertsRequest{} origin := types.CscliOrigin capacity := int32(0) @@ -386,7 +386,7 @@ func (cli *cliDecisions) add(addIP, addRange, addDuration, addValue, addScope, a } alerts = append(alerts, &alert) - _, _, err = cli.client.Alerts.Add(context.Background(), alerts) + _, _, err = cli.client.Alerts.Add(ctx, alerts) if err != nil { return err } @@ -419,7 +419,7 @@ cscli decisions add --scope username --value foobar Args: cobra.ExactArgs(0), DisableAutoGenTag: true, RunE: func(cmd *cobra.Command, _ []string) error { - return cli.add(addIP, addRange, addDuration, addValue, addScope, addReason, addType) + return cli.add(cmd.Context(), addIP, addRange, addDuration, addValue, addScope, addReason, addType) }, } @@ -436,7 +436,7 @@ cscli decisions add --scope username --value foobar return cmd } -func (cli *cliDecisions) delete(delFilter apiclient.DecisionsDeleteOpts, delDecisionID string, contained *bool) error { +func (cli *cliDecisions) delete(ctx context.Context, delFilter apiclient.DecisionsDeleteOpts, delDecisionID string, contained *bool) error { var err error /*take care of shorthand options*/ @@ -480,7 +480,7 @@ func (cli *cliDecisions) delete(delFilter apiclient.DecisionsDeleteOpts, delDeci var decisions *models.DeleteDecisionResponse if delDecisionID == "" { - decisions, _, err = cli.client.Decisions.Delete(context.Background(), delFilter) + decisions, _, err = cli.client.Decisions.Delete(ctx, delFilter) if err != nil { return fmt.Errorf("unable to delete decisions: %w", err) } @@ -489,7 +489,7 @@ func (cli *cliDecisions) delete(delFilter apiclient.DecisionsDeleteOpts, delDeci return fmt.Errorf("id '%s' is not an integer: %w", delDecisionID, err) } - decisions, _, err = cli.client.Decisions.DeleteOne(context.Background(), delDecisionID) + decisions, _, err = cli.client.Decisions.DeleteOne(ctx, delDecisionID) if err != nil { return fmt.Errorf("unable to delete decision: %w", err) } @@ -543,8 +543,8 @@ cscli decisions delete --origin lists --scenario list_name return nil }, - RunE: func(_ *cobra.Command, _ []string) error { - return cli.delete(delFilter, delDecisionID, contained) + RunE: func(cmd *cobra.Command, _ []string) error { + return cli.delete(cmd.Context(), delFilter, delDecisionID, contained) }, } diff --git a/cmd/crowdsec-cli/clilapi/lapi.go b/cmd/crowdsec-cli/clilapi/lapi.go index 75fdc5c239d..bb721eefe03 100644 --- a/cmd/crowdsec-cli/clilapi/lapi.go +++ b/cmd/crowdsec-cli/clilapi/lapi.go @@ -68,7 +68,7 @@ func queryLAPIStatus(ctx context.Context, hub *cwhub.Hub, credURL string, login Scenarios: itemsForAPI, } - _, _, err = client.Auth.AuthenticateWatcher(context.Background(), t) + _, _, err = client.Auth.AuthenticateWatcher(ctx, t) if err != nil { return false, err } diff --git a/cmd/crowdsec-cli/clinotifications/notifications.go b/cmd/crowdsec-cli/clinotifications/notifications.go index 314f97db23e..0641dd1a7d4 100644 --- a/cmd/crowdsec-cli/clinotifications/notifications.go +++ b/cmd/crowdsec-cli/clinotifications/notifications.go @@ -368,9 +368,9 @@ cscli notifications reinject -a '{"remediation": true,"scenario":"not `, Args: cobra.ExactArgs(1), DisableAutoGenTag: true, - PreRunE: func(_ *cobra.Command, args []string) error { + PreRunE: func(cmd *cobra.Command, args []string) error { var err error - alert, err = cli.fetchAlertFromArgString(args[0]) + alert, err = cli.fetchAlertFromArgString(cmd.Context(), args[0]) if err != nil { return err } @@ -447,7 +447,7 @@ cscli notifications reinject -a '{"remediation": true,"scenario":"not return cmd } -func (cli *cliNotifications) fetchAlertFromArgString(toParse string) (*models.Alert, error) { +func (cli *cliNotifications) fetchAlertFromArgString(ctx context.Context, toParse string) (*models.Alert, error) { cfg := cli.cfg() id, err := strconv.Atoi(toParse) @@ -470,7 +470,7 @@ func (cli *cliNotifications) fetchAlertFromArgString(toParse string) (*models.Al return nil, fmt.Errorf("error creating the client for the API: %w", err) } - alert, _, err := client.Alerts.GetByID(context.Background(), id) + alert, _, err := client.Alerts.GetByID(ctx, id) if err != nil { return nil, fmt.Errorf("can't find alert with id %d: %w", id, err) } diff --git a/pkg/apiserver/apic.go b/pkg/apiserver/apic.go index 8b09e3e5f9d..9b56fef6549 100644 --- a/pkg/apiserver/apic.go +++ b/pkg/apiserver/apic.go @@ -406,13 +406,13 @@ func (a *apic) Send(cacheOrig *models.AddSignalsRequest) { } } -func (a *apic) CAPIPullIsOld() (bool, error) { +func (a *apic) CAPIPullIsOld(ctx context.Context) (bool, error) { /*only pull community blocklist if it's older than 1h30 */ alerts := a.dbClient.Ent.Alert.Query() alerts = alerts.Where(alert.HasDecisionsWith(decision.OriginEQ(database.CapiMachineID))) alerts = alerts.Where(alert.CreatedAtGTE(time.Now().UTC().Add(-time.Duration(1*time.Hour + 30*time.Minute)))) //nolint:unconvert - count, err := alerts.Count(a.dbClient.CTX) + count, err := alerts.Count(ctx) if err != nil { return false, fmt.Errorf("while looking for CAPI alert: %w", err) } @@ -634,7 +634,7 @@ func (a *apic) PullTop(ctx context.Context, forcePull bool) error { } if !forcePull { - if lastPullIsOld, err := a.CAPIPullIsOld(); err != nil { + if lastPullIsOld, err := a.CAPIPullIsOld(ctx); err != nil { return err } else if !lastPullIsOld { return nil @@ -769,6 +769,8 @@ func (a *apic) ApplyApicWhitelists(decisions []*models.Decision) []*models.Decis } func (a *apic) SaveAlerts(alertsFromCapi []*models.Alert, addCounters map[string]map[string]int, deleteCounters map[string]map[string]int) error { + ctx := context.TODO() + for _, alert := range alertsFromCapi { setAlertScenario(alert, addCounters, deleteCounters) log.Debugf("%s has %d decisions", *alert.Source.Scope, len(alert.Decisions)) @@ -777,7 +779,7 @@ func (a *apic) SaveAlerts(alertsFromCapi []*models.Alert, addCounters map[string log.Warningf("sqlite is not using WAL mode, LAPI might become unresponsive when inserting the community blocklist") } - alertID, inserted, deleted, err := a.dbClient.UpdateCommunityBlocklist(alert) + alertID, inserted, deleted, err := a.dbClient.UpdateCommunityBlocklist(ctx, alert) if err != nil { return fmt.Errorf("while saving alert from %s: %w", *alert.Source.Scope, err) } diff --git a/pkg/apiserver/apic_test.go b/pkg/apiserver/apic_test.go index 97943b495e5..3bb158acf35 100644 --- a/pkg/apiserver/apic_test.go +++ b/pkg/apiserver/apic_test.go @@ -113,7 +113,9 @@ func assertTotalAlertCount(t *testing.T, dbClient *database.Client, count int) { func TestAPICCAPIPullIsOld(t *testing.T) { api := getAPIC(t) - isOld, err := api.CAPIPullIsOld() + ctx := context.Background() + + isOld, err := api.CAPIPullIsOld(ctx) require.NoError(t, err) assert.True(t, isOld) @@ -124,7 +126,7 @@ func TestAPICCAPIPullIsOld(t *testing.T) { SetScope("Country"). SetValue("Blah"). SetOrigin(types.CAPIOrigin). - SaveX(context.Background()) + SaveX(ctx) api.dbClient.Ent.Alert.Create(). SetCreatedAt(time.Now()). @@ -132,9 +134,9 @@ func TestAPICCAPIPullIsOld(t *testing.T) { AddDecisions( decision, ). - SaveX(context.Background()) + SaveX(ctx) - isOld, err = api.CAPIPullIsOld() + isOld, err = api.CAPIPullIsOld(ctx) require.NoError(t, err) assert.False(t, isOld) diff --git a/pkg/apiserver/controllers/v1/alerts.go b/pkg/apiserver/controllers/v1/alerts.go index 84b3094865c..d1f93228512 100644 --- a/pkg/apiserver/controllers/v1/alerts.go +++ b/pkg/apiserver/controllers/v1/alerts.go @@ -127,6 +127,7 @@ func (c *Controller) sendAlertToPluginChannel(alert *models.Alert, profileID uin func (c *Controller) CreateAlert(gctx *gin.Context) { var input models.AddAlertsRequest + ctx := gctx.Request.Context() machineID, _ := getMachineIDFromContext(gctx) if err := gctx.ShouldBindJSON(&input); err != nil { @@ -239,7 +240,7 @@ func (c *Controller) CreateAlert(gctx *gin.Context) { c.DBClient.CanFlush = false } - alerts, err := c.DBClient.CreateAlert(machineID, input) + alerts, err := c.DBClient.CreateAlert(ctx, machineID, input) c.DBClient.CanFlush = true if err != nil { @@ -261,7 +262,9 @@ func (c *Controller) CreateAlert(gctx *gin.Context) { // FindAlerts: returns alerts from the database based on the specified filter func (c *Controller) FindAlerts(gctx *gin.Context) { - result, err := c.DBClient.QueryAlertWithFilter(gctx.Request.URL.Query()) + ctx := gctx.Request.Context() + + result, err := c.DBClient.QueryAlertWithFilter(ctx, gctx.Request.URL.Query()) if err != nil { c.HandleDBErrors(gctx, err) return @@ -279,6 +282,7 @@ func (c *Controller) FindAlerts(gctx *gin.Context) { // FindAlertByID returns the alert associated with the ID func (c *Controller) FindAlertByID(gctx *gin.Context) { + ctx := gctx.Request.Context() alertIDStr := gctx.Param("alert_id") alertID, err := strconv.Atoi(alertIDStr) @@ -287,7 +291,7 @@ func (c *Controller) FindAlertByID(gctx *gin.Context) { return } - result, err := c.DBClient.GetAlertByID(alertID) + result, err := c.DBClient.GetAlertByID(ctx, alertID) if err != nil { c.HandleDBErrors(gctx, err) return @@ -307,6 +311,8 @@ func (c *Controller) FindAlertByID(gctx *gin.Context) { func (c *Controller) DeleteAlertByID(gctx *gin.Context) { var err error + ctx := gctx.Request.Context() + incomingIP := gctx.ClientIP() if incomingIP != "127.0.0.1" && incomingIP != "::1" && !networksContainIP(c.TrustedIPs, incomingIP) && !isUnixSocket(gctx) { gctx.JSON(http.StatusForbidden, gin.H{"message": fmt.Sprintf("access forbidden from this IP (%s)", incomingIP)}) @@ -321,7 +327,7 @@ func (c *Controller) DeleteAlertByID(gctx *gin.Context) { return } - err = c.DBClient.DeleteAlertByID(decisionID) + err = c.DBClient.DeleteAlertByID(ctx, decisionID) if err != nil { c.HandleDBErrors(gctx, err) return @@ -334,13 +340,15 @@ func (c *Controller) DeleteAlertByID(gctx *gin.Context) { // DeleteAlerts deletes alerts from the database based on the specified filter func (c *Controller) DeleteAlerts(gctx *gin.Context) { + ctx := gctx.Request.Context() + incomingIP := gctx.ClientIP() if incomingIP != "127.0.0.1" && incomingIP != "::1" && !networksContainIP(c.TrustedIPs, incomingIP) && !isUnixSocket(gctx) { gctx.JSON(http.StatusForbidden, gin.H{"message": fmt.Sprintf("access forbidden from this IP (%s)", incomingIP)}) return } - nbDeleted, err := c.DBClient.DeleteAlertWithFilter(gctx.Request.URL.Query()) + nbDeleted, err := c.DBClient.DeleteAlertWithFilter(ctx, gctx.Request.URL.Query()) if err != nil { c.HandleDBErrors(gctx, err) return diff --git a/pkg/apiserver/controllers/v1/decisions.go b/pkg/apiserver/controllers/v1/decisions.go index d1aa79bbf07..ffefffc226b 100644 --- a/pkg/apiserver/controllers/v1/decisions.go +++ b/pkg/apiserver/controllers/v1/decisions.go @@ -264,7 +264,7 @@ func (c *Controller) StreamDecisionChunked(gctx *gin.Context, bouncerInfo *ent.B gctx.Writer.Header().Set("Content-Type", "application/json") gctx.Writer.Header().Set("Transfer-Encoding", "chunked") gctx.Writer.WriteHeader(http.StatusOK) - gctx.Writer.WriteString(`{"new": [`) //No need to check for errors, the doc says it always returns nil + gctx.Writer.WriteString(`{"new": [`) // No need to check for errors, the doc says it always returns nil // if the blocker just started, return all decisions if val, ok := gctx.Request.URL.Query()["startup"]; ok && val[0] == "true" { @@ -340,7 +340,7 @@ func (c *Controller) StreamDecisionNonChunked(gctx *gin.Context, bouncerInfo *en return err } - //data = KeepLongestDecision(data) + // data = KeepLongestDecision(data) ret["new"] = FormatDecisions(data) // getting expired decisions diff --git a/pkg/apiserver/controllers/v1/metrics.go b/pkg/apiserver/controllers/v1/metrics.go index ddb38512a11..4f6ee0986eb 100644 --- a/pkg/apiserver/controllers/v1/metrics.go +++ b/pkg/apiserver/controllers/v1/metrics.go @@ -68,7 +68,8 @@ func PrometheusBouncersHasEmptyDecision(c *gin.Context) { bouncer, _ := getBouncerFromContext(c) if bouncer != nil { LapiNilDecisions.With(prometheus.Labels{ - "bouncer": bouncer.Name}).Inc() + "bouncer": bouncer.Name, + }).Inc() } } @@ -76,7 +77,8 @@ func PrometheusBouncersHasNonEmptyDecision(c *gin.Context) { bouncer, _ := getBouncerFromContext(c) if bouncer != nil { LapiNonNilDecisions.With(prometheus.Labels{ - "bouncer": bouncer.Name}).Inc() + "bouncer": bouncer.Name, + }).Inc() } } @@ -87,7 +89,8 @@ func PrometheusMachinesMiddleware() gin.HandlerFunc { LapiMachineHits.With(prometheus.Labels{ "machine": machineID, "route": c.Request.URL.Path, - "method": c.Request.Method}).Inc() + "method": c.Request.Method, + }).Inc() } c.Next() @@ -101,7 +104,8 @@ func PrometheusBouncersMiddleware() gin.HandlerFunc { LapiBouncerHits.With(prometheus.Labels{ "bouncer": bouncer.Name, "route": c.Request.URL.Path, - "method": c.Request.Method}).Inc() + "method": c.Request.Method, + }).Inc() } c.Next() @@ -114,7 +118,8 @@ func PrometheusMiddleware() gin.HandlerFunc { LapiRouteHits.With(prometheus.Labels{ "route": c.Request.URL.Path, - "method": c.Request.Method}).Inc() + "method": c.Request.Method, + }).Inc() c.Next() elapsed := time.Since(startTime) diff --git a/pkg/apiserver/decisions_test.go b/pkg/apiserver/decisions_test.go index e4c9dda47ce..1c70c495a3a 100644 --- a/pkg/apiserver/decisions_test.go +++ b/pkg/apiserver/decisions_test.go @@ -191,7 +191,7 @@ func TestDeleteDecisionByID(t *testing.T) { // Create Valid Alert lapi.InsertAlertFromFile(t, "./tests/alert_sample.json") - //Have one alerts + // Have one alert w := lapi.RecordResponse(t, "GET", "/v1/decisions/stream?startup=true", emptyBody, APIKEY) decisions, code := readDecisionsStreamResp(t, w) assert.Equal(t, 200, code) @@ -210,7 +210,7 @@ func TestDeleteDecisionByID(t *testing.T) { errResp, _ = readDecisionsErrorResp(t, w) assert.Equal(t, "decision with id '100' doesn't exist: unable to delete", errResp["message"]) - //Have one alerts + // Have one alert w = lapi.RecordResponse(t, "GET", "/v1/decisions/stream?startup=true", emptyBody, APIKEY) decisions, code = readDecisionsStreamResp(t, w) assert.Equal(t, 200, code) @@ -223,7 +223,7 @@ func TestDeleteDecisionByID(t *testing.T) { resp, _ := readDecisionsDeleteResp(t, w) assert.Equal(t, "1", resp.NbDeleted) - //Have one alert (because we delete an alert that has dup targets) + // Have one alert (because we delete an alert that has dup targets) w = lapi.RecordResponse(t, "GET", "/v1/decisions/stream?startup=true", emptyBody, APIKEY) decisions, code = readDecisionsStreamResp(t, w) assert.Equal(t, 200, code) @@ -251,7 +251,7 @@ func TestDeleteDecision(t *testing.T) { } func TestStreamStartDecisionDedup(t *testing.T) { - //Ensure that at stream startup we only get the longest decision + // Ensure that at stream startup we only get the longest decision lapi := SetupLAPITest(t) // Create Valid Alert : 3 decisions for 127.0.0.1, longest has id=3 @@ -299,7 +299,7 @@ func TestStreamStartDecisionDedup(t *testing.T) { w = lapi.RecordResponse(t, "DELETE", "/v1/decisions/1", emptyBody, PASSWORD) assert.Equal(t, 200, w.Code) - //and now we only get a deleted decision + // and now we only get a deleted decision w = lapi.RecordResponse(t, "GET", "/v1/decisions/stream?startup=true", emptyBody, APIKEY) decisions, code = readDecisionsStreamResp(t, w) assert.Equal(t, 200, code) diff --git a/pkg/apiserver/middlewares/v1/cache.go b/pkg/apiserver/middlewares/v1/cache.go index a058ec40393..b0037bc4fa4 100644 --- a/pkg/apiserver/middlewares/v1/cache.go +++ b/pkg/apiserver/middlewares/v1/cache.go @@ -9,7 +9,7 @@ import ( ) type cacheEntry struct { - err error // if nil, the certificate is not revocated + err error // if nil, the certificate is not revocated timestamp time.Time } diff --git a/pkg/apiserver/middlewares/v1/crl.go b/pkg/apiserver/middlewares/v1/crl.go index f85a410998e..64d7d3f0d96 100644 --- a/pkg/apiserver/middlewares/v1/crl.go +++ b/pkg/apiserver/middlewares/v1/crl.go @@ -12,13 +12,13 @@ import ( ) type CRLChecker struct { - path string // path to the CRL file - fileInfo os.FileInfo // last stat of the CRL file - crls []*x509.RevocationList // parsed CRLs + path string // path to the CRL file + fileInfo os.FileInfo // last stat of the CRL file + crls []*x509.RevocationList // parsed CRLs logger *log.Entry mu sync.RWMutex - lastLoad time.Time // time when the CRL file was last read successfully - onLoad func() // called when the CRL file changes (and is read successfully) + lastLoad time.Time // time when the CRL file was last read successfully + onLoad func() // called when the CRL file changes (and is read successfully) } func NewCRLChecker(crlPath string, onLoad func(), logger *log.Entry) (*CRLChecker, error) { diff --git a/pkg/apiserver/middlewares/v1/jwt.go b/pkg/apiserver/middlewares/v1/jwt.go index 17ca5b28359..9171e9fce06 100644 --- a/pkg/apiserver/middlewares/v1/jwt.go +++ b/pkg/apiserver/middlewares/v1/jwt.go @@ -128,6 +128,8 @@ func (j *JWT) authPlain(c *gin.Context) (*authInput, error) { err error ) + ctx := c.Request.Context() + ret := authInput{} if err = c.ShouldBindJSON(&loginInput); err != nil { @@ -144,7 +146,7 @@ func (j *JWT) authPlain(c *gin.Context) (*authInput, error) { ret.clientMachine, err = j.DbClient.Ent.Machine.Query(). Where(machine.MachineId(ret.machineID)). - First(j.DbClient.CTX) + First(ctx) if err != nil { log.Infof("Error machine login for %s : %+v ", ret.machineID, err) return nil, err diff --git a/pkg/apiserver/papi_cmd.go b/pkg/apiserver/papi_cmd.go index 18b19b03418..78f5dc9b0fe 100644 --- a/pkg/apiserver/papi_cmd.go +++ b/pkg/apiserver/papi_cmd.go @@ -97,6 +97,8 @@ func DecisionCmd(message *Message, p *Papi, sync bool) error { } func AlertCmd(message *Message, p *Papi, sync bool) error { + ctx := context.TODO() + switch message.Header.OperationCmd { case "add": data, err := json.Marshal(message.Data) @@ -155,7 +157,7 @@ func AlertCmd(message *Message, p *Papi, sync bool) error { } // use a different method: alert and/or decision might already be partially present in the database - _, err = p.DBClient.CreateOrUpdateAlert("", alert) + _, err = p.DBClient.CreateOrUpdateAlert(ctx, "", alert) if err != nil { log.Errorf("Failed to create alerts in DB: %s", err) } else { diff --git a/pkg/database/alerts.go b/pkg/database/alerts.go index d2760a209f9..ede9c89fe9a 100644 --- a/pkg/database/alerts.go +++ b/pkg/database/alerts.go @@ -35,12 +35,12 @@ const ( // CreateOrUpdateAlert is specific to PAPI : It checks if alert already exists, otherwise inserts it // if alert already exists, it checks it associated decisions already exists // if some associated decisions are missing (ie. previous insert ended up in error) it inserts them -func (c *Client) CreateOrUpdateAlert(machineID string, alertItem *models.Alert) (string, error) { +func (c *Client) CreateOrUpdateAlert(ctx context.Context, machineID string, alertItem *models.Alert) (string, error) { if alertItem.UUID == "" { return "", errors.New("alert UUID is empty") } - alerts, err := c.Ent.Alert.Query().Where(alert.UUID(alertItem.UUID)).WithDecisions().All(c.CTX) + alerts, err := c.Ent.Alert.Query().Where(alert.UUID(alertItem.UUID)).WithDecisions().All(ctx) if err != nil && !ent.IsNotFound(err) { return "", fmt.Errorf("unable to query alerts for uuid %s: %w", alertItem.UUID, err) @@ -48,7 +48,7 @@ func (c *Client) CreateOrUpdateAlert(machineID string, alertItem *models.Alert) // alert wasn't found, insert it (expected hotpath) if ent.IsNotFound(err) || len(alerts) == 0 { - alertIDs, err := c.CreateAlert(machineID, []*models.Alert{alertItem}) + alertIDs, err := c.CreateAlert(ctx, machineID, []*models.Alert{alertItem}) if err != nil { return "", fmt.Errorf("unable to create alert: %w", err) } @@ -165,7 +165,7 @@ func (c *Client) CreateOrUpdateAlert(machineID string, alertItem *models.Alert) builderChunks := slicetools.Chunks(decisionBuilders, c.decisionBulkSize) for _, builderChunk := range builderChunks { - decisionsCreateRet, err := c.Ent.Decision.CreateBulk(builderChunk...).Save(c.CTX) + decisionsCreateRet, err := c.Ent.Decision.CreateBulk(builderChunk...).Save(ctx) if err != nil { return "", fmt.Errorf("creating alert decisions: %w", err) } @@ -178,7 +178,7 @@ func (c *Client) CreateOrUpdateAlert(machineID string, alertItem *models.Alert) decisionChunks := slicetools.Chunks(decisions, c.decisionBulkSize) for _, decisionChunk := range decisionChunks { - err = c.Ent.Alert.Update().Where(alert.UUID(alertItem.UUID)).AddDecisions(decisionChunk...).Exec(c.CTX) + err = c.Ent.Alert.Update().Where(alert.UUID(alertItem.UUID)).AddDecisions(decisionChunk...).Exec(ctx) if err != nil { return "", fmt.Errorf("updating alert %s: %w", alertItem.UUID, err) } @@ -191,7 +191,7 @@ func (c *Client) CreateOrUpdateAlert(machineID string, alertItem *models.Alert) // it takes care of creating the new alert with the associated decisions, and it will as well deleted the "older" overlapping decisions: // 1st pull, you get decisions [1,2,3]. it inserts [1,2,3] // 2nd pull, you get decisions [1,2,3,4]. it inserts [1,2,3,4] and will try to delete [1,2,3,4] with a different alert ID and same origin -func (c *Client) UpdateCommunityBlocklist(alertItem *models.Alert) (int, int, int, error) { +func (c *Client) UpdateCommunityBlocklist(ctx context.Context, alertItem *models.Alert) (int, int, int, error) { if alertItem == nil { return 0, 0, 0, errors.New("nil alert") } @@ -244,7 +244,7 @@ func (c *Client) UpdateCommunityBlocklist(alertItem *models.Alert) (int, int, in SetScenarioHash(*alertItem.ScenarioHash). SetRemediation(true) // it's from CAPI, we always have decisions - alertRef, err := alertB.Save(c.CTX) + alertRef, err := alertB.Save(ctx) if err != nil { return 0, 0, 0, errors.Wrapf(BulkError, "error creating alert : %s", err) } @@ -253,7 +253,7 @@ func (c *Client) UpdateCommunityBlocklist(alertItem *models.Alert) (int, int, in return alertRef.ID, 0, 0, nil } - txClient, err := c.Ent.Tx(c.CTX) + txClient, err := c.Ent.Tx(ctx) if err != nil { return 0, 0, 0, errors.Wrapf(BulkError, "error creating transaction : %s", err) } @@ -347,7 +347,7 @@ func (c *Client) UpdateCommunityBlocklist(alertItem *models.Alert) (int, int, in decision.OriginEQ(DecOrigin), decision.Not(decision.HasOwnerWith(alert.IDEQ(alertRef.ID))), decision.ValueIn(deleteChunk...), - )).Exec(c.CTX) + )).Exec(ctx) if err != nil { rollbackErr := txClient.Rollback() if rollbackErr != nil { @@ -363,7 +363,7 @@ func (c *Client) UpdateCommunityBlocklist(alertItem *models.Alert) (int, int, in builderChunks := slicetools.Chunks(decisionBuilders, c.decisionBulkSize) for _, builderChunk := range builderChunks { - insertedDecisions, err := txClient.Decision.CreateBulk(builderChunk...).Save(c.CTX) + insertedDecisions, err := txClient.Decision.CreateBulk(builderChunk...).Save(ctx) if err != nil { rollbackErr := txClient.Rollback() if rollbackErr != nil { @@ -391,7 +391,7 @@ func (c *Client) UpdateCommunityBlocklist(alertItem *models.Alert) (int, int, in return alertRef.ID, inserted, deleted, nil } -func (c *Client) createDecisionChunk(simulated bool, stopAtTime time.Time, decisions []*models.Decision) ([]*ent.Decision, error) { +func (c *Client) createDecisionChunk(ctx context.Context, simulated bool, stopAtTime time.Time, decisions []*models.Decision) ([]*ent.Decision, error) { decisionCreate := []*ent.DecisionCreate{} for _, decisionItem := range decisions { @@ -436,7 +436,7 @@ func (c *Client) createDecisionChunk(simulated bool, stopAtTime time.Time, decis return nil, nil } - ret, err := c.Ent.Decision.CreateBulk(decisionCreate...).Save(c.CTX) + ret, err := c.Ent.Decision.CreateBulk(decisionCreate...).Save(ctx) if err != nil { return nil, err } @@ -444,7 +444,7 @@ func (c *Client) createDecisionChunk(simulated bool, stopAtTime time.Time, decis return ret, nil } -func (c *Client) createAlertChunk(machineID string, owner *ent.Machine, alerts []*models.Alert) ([]string, error) { +func (c *Client) createAlertChunk(ctx context.Context, machineID string, owner *ent.Machine, alerts []*models.Alert) ([]string, error) { alertBuilders := []*ent.AlertCreate{} alertDecisions := [][]*ent.Decision{} @@ -540,7 +540,7 @@ func (c *Client) createAlertChunk(machineID string, owner *ent.Machine, alerts [ c.Log.Warningf("dropped 'serialized' field (machine %s / scenario %s)", machineID, *alertItem.Scenario) } - events, err = c.Ent.Event.CreateBulk(eventBulk...).Save(c.CTX) + events, err = c.Ent.Event.CreateBulk(eventBulk...).Save(ctx) if err != nil { return nil, errors.Wrapf(BulkError, "creating alert events: %s", err) } @@ -554,12 +554,14 @@ func (c *Client) createAlertChunk(machineID string, owner *ent.Machine, alerts [ value := metaItem.Value if len(metaItem.Value) > 4095 { - c.Log.Warningf("truncated meta %s : value too long", metaItem.Key) + c.Log.Warningf("truncated meta %s: value too long", metaItem.Key) + value = value[:4095] } if len(metaItem.Key) > 255 { - c.Log.Warningf("truncated meta %s : key too long", metaItem.Key) + c.Log.Warningf("truncated meta %s: key too long", metaItem.Key) + key = key[:255] } @@ -568,7 +570,7 @@ func (c *Client) createAlertChunk(machineID string, owner *ent.Machine, alerts [ SetValue(value) } - metas, err = c.Ent.Meta.CreateBulk(metaBulk...).Save(c.CTX) + metas, err = c.Ent.Meta.CreateBulk(metaBulk...).Save(ctx) if err != nil { c.Log.Warningf("error creating alert meta: %s", err) } @@ -578,7 +580,7 @@ func (c *Client) createAlertChunk(machineID string, owner *ent.Machine, alerts [ decisionChunks := slicetools.Chunks(alertItem.Decisions, c.decisionBulkSize) for _, decisionChunk := range decisionChunks { - decisionRet, err := c.createDecisionChunk(*alertItem.Simulated, stopAtTime, decisionChunk) + decisionRet, err := c.createDecisionChunk(ctx, *alertItem.Simulated, stopAtTime, decisionChunk) if err != nil { return nil, fmt.Errorf("creating alert decisions: %w", err) } @@ -636,7 +638,7 @@ func (c *Client) createAlertChunk(machineID string, owner *ent.Machine, alerts [ return nil, nil } - alertsCreateBulk, err := c.Ent.Alert.CreateBulk(alertBuilders...).Save(c.CTX) + alertsCreateBulk, err := c.Ent.Alert.CreateBulk(alertBuilders...).Save(ctx) if err != nil { return nil, errors.Wrapf(BulkError, "bulk creating alert : %s", err) } @@ -653,7 +655,7 @@ func (c *Client) createAlertChunk(machineID string, owner *ent.Machine, alerts [ for retry < maxLockRetries { // so much for the happy path... but sqlite3 errors work differently - _, err := c.Ent.Alert.Update().Where(alert.IDEQ(a.ID)).AddDecisions(d2...).Save(c.CTX) + _, err := c.Ent.Alert.Update().Where(alert.IDEQ(a.ID)).AddDecisions(d2...).Save(ctx) if err == nil { break } @@ -678,17 +680,16 @@ func (c *Client) createAlertChunk(machineID string, owner *ent.Machine, alerts [ } } } + return ret, nil } -func (c *Client) CreateAlert(machineID string, alertList []*models.Alert) ([]string, error) { +func (c *Client) CreateAlert(ctx context.Context, machineID string, alertList []*models.Alert) ([]string, error) { var ( owner *ent.Machine err error ) - ctx := context.TODO() - if machineID != "" { owner, err = c.QueryMachineByID(ctx, machineID) if err != nil { @@ -708,7 +709,7 @@ func (c *Client) CreateAlert(machineID string, alertList []*models.Alert) ([]str alertIDs := []string{} for _, alertChunk := range alertChunks { - ids, err := c.createAlertChunk(machineID, owner, alertChunk) + ids, err := c.createAlertChunk(ctx, machineID, owner, alertChunk) if err != nil { return nil, fmt.Errorf("machine '%s': %w", machineID, err) } @@ -717,7 +718,7 @@ func (c *Client) CreateAlert(machineID string, alertList []*models.Alert) ([]str } if owner != nil { - err = owner.Update().SetLastPush(time.Now().UTC()).Exec(c.CTX) + err = owner.Update().SetLastPush(time.Now().UTC()).Exec(ctx) if err != nil { return nil, fmt.Errorf("machine '%s': %w", machineID, err) } @@ -919,7 +920,6 @@ func AlertPredicatesFromFilter(filter map[string][]string) ([]predicate.Alert, e case "since", "created_before", "until": if err := handleTimeFilters(param, value[0], &predicates); err != nil { return nil, err - } case "decision_type": predicates = append(predicates, alert.HasDecisionsWith(decision.TypeEQ(value[0]))) @@ -954,7 +954,6 @@ func AlertPredicatesFromFilter(filter map[string][]string) ([]predicate.Alert, e if err := handleIPPredicates(ip_sz, contains, start_ip, start_sfx, end_ip, end_sfx, &predicates); err != nil { return nil, err - } return predicates, nil @@ -996,11 +995,11 @@ func (c *Client) AlertsCountPerScenario(ctx context.Context, filters map[string] return counts, nil } -func (c *Client) TotalAlerts() (int, error) { - return c.Ent.Alert.Query().Count(c.CTX) +func (c *Client) TotalAlerts(ctx context.Context) (int, error) { + return c.Ent.Alert.Query().Count(ctx) } -func (c *Client) QueryAlertWithFilter(filter map[string][]string) ([]*ent.Alert, error) { +func (c *Client) QueryAlertWithFilter(ctx context.Context, filter map[string][]string) ([]*ent.Alert, error) { sort := "DESC" // we sort by desc by default if val, ok := filter["sort"]; ok { @@ -1047,7 +1046,7 @@ func (c *Client) QueryAlertWithFilter(filter map[string][]string) ([]*ent.Alert, WithOwner() if limit == 0 { - limit, err = alerts.Count(c.CTX) + limit, err = alerts.Count(ctx) if err != nil { return nil, fmt.Errorf("unable to count nb alerts: %w", err) } @@ -1059,7 +1058,7 @@ func (c *Client) QueryAlertWithFilter(filter map[string][]string) ([]*ent.Alert, alerts = alerts.Order(ent.Desc(alert.FieldCreatedAt), ent.Desc(alert.FieldID)) } - result, err := alerts.Limit(paginationSize).Offset(offset).All(c.CTX) + result, err := alerts.Limit(paginationSize).Offset(offset).All(ctx) if err != nil { return nil, errors.Wrapf(QueryFail, "pagination size: %d, offset: %d: %s", paginationSize, offset, err) } @@ -1088,35 +1087,35 @@ func (c *Client) QueryAlertWithFilter(filter map[string][]string) ([]*ent.Alert, return ret, nil } -func (c *Client) DeleteAlertGraphBatch(alertItems []*ent.Alert) (int, error) { +func (c *Client) DeleteAlertGraphBatch(ctx context.Context, alertItems []*ent.Alert) (int, error) { idList := make([]int, 0) for _, alert := range alertItems { idList = append(idList, alert.ID) } _, err := c.Ent.Event.Delete(). - Where(event.HasOwnerWith(alert.IDIn(idList...))).Exec(c.CTX) + Where(event.HasOwnerWith(alert.IDIn(idList...))).Exec(ctx) if err != nil { c.Log.Warningf("DeleteAlertGraphBatch : %s", err) return 0, errors.Wrapf(DeleteFail, "alert graph delete batch events") } _, err = c.Ent.Meta.Delete(). - Where(meta.HasOwnerWith(alert.IDIn(idList...))).Exec(c.CTX) + Where(meta.HasOwnerWith(alert.IDIn(idList...))).Exec(ctx) if err != nil { c.Log.Warningf("DeleteAlertGraphBatch : %s", err) return 0, errors.Wrapf(DeleteFail, "alert graph delete batch meta") } _, err = c.Ent.Decision.Delete(). - Where(decision.HasOwnerWith(alert.IDIn(idList...))).Exec(c.CTX) + Where(decision.HasOwnerWith(alert.IDIn(idList...))).Exec(ctx) if err != nil { c.Log.Warningf("DeleteAlertGraphBatch : %s", err) return 0, errors.Wrapf(DeleteFail, "alert graph delete batch decisions") } deleted, err := c.Ent.Alert.Delete(). - Where(alert.IDIn(idList...)).Exec(c.CTX) + Where(alert.IDIn(idList...)).Exec(ctx) if err != nil { c.Log.Warningf("DeleteAlertGraphBatch : %s", err) return deleted, errors.Wrapf(DeleteFail, "alert graph delete batch") @@ -1127,10 +1126,10 @@ func (c *Client) DeleteAlertGraphBatch(alertItems []*ent.Alert) (int, error) { return deleted, nil } -func (c *Client) DeleteAlertGraph(alertItem *ent.Alert) error { +func (c *Client) DeleteAlertGraph(ctx context.Context, alertItem *ent.Alert) error { // delete the associated events _, err := c.Ent.Event.Delete(). - Where(event.HasOwnerWith(alert.IDEQ(alertItem.ID))).Exec(c.CTX) + Where(event.HasOwnerWith(alert.IDEQ(alertItem.ID))).Exec(ctx) if err != nil { c.Log.Warningf("DeleteAlertGraph : %s", err) return errors.Wrapf(DeleteFail, "event with alert ID '%d'", alertItem.ID) @@ -1138,7 +1137,7 @@ func (c *Client) DeleteAlertGraph(alertItem *ent.Alert) error { // delete the associated meta _, err = c.Ent.Meta.Delete(). - Where(meta.HasOwnerWith(alert.IDEQ(alertItem.ID))).Exec(c.CTX) + Where(meta.HasOwnerWith(alert.IDEQ(alertItem.ID))).Exec(ctx) if err != nil { c.Log.Warningf("DeleteAlertGraph : %s", err) return errors.Wrapf(DeleteFail, "meta with alert ID '%d'", alertItem.ID) @@ -1146,14 +1145,14 @@ func (c *Client) DeleteAlertGraph(alertItem *ent.Alert) error { // delete the associated decisions _, err = c.Ent.Decision.Delete(). - Where(decision.HasOwnerWith(alert.IDEQ(alertItem.ID))).Exec(c.CTX) + Where(decision.HasOwnerWith(alert.IDEQ(alertItem.ID))).Exec(ctx) if err != nil { c.Log.Warningf("DeleteAlertGraph : %s", err) return errors.Wrapf(DeleteFail, "decision with alert ID '%d'", alertItem.ID) } // delete the alert - err = c.Ent.Alert.DeleteOne(alertItem).Exec(c.CTX) + err = c.Ent.Alert.DeleteOne(alertItem).Exec(ctx) if err != nil { c.Log.Warningf("DeleteAlertGraph : %s", err) return errors.Wrapf(DeleteFail, "alert with ID '%d'", alertItem.ID) @@ -1162,26 +1161,26 @@ func (c *Client) DeleteAlertGraph(alertItem *ent.Alert) error { return nil } -func (c *Client) DeleteAlertByID(id int) error { - alertItem, err := c.Ent.Alert.Query().Where(alert.IDEQ(id)).Only(c.CTX) +func (c *Client) DeleteAlertByID(ctx context.Context, id int) error { + alertItem, err := c.Ent.Alert.Query().Where(alert.IDEQ(id)).Only(ctx) if err != nil { return err } - return c.DeleteAlertGraph(alertItem) + return c.DeleteAlertGraph(ctx, alertItem) } -func (c *Client) DeleteAlertWithFilter(filter map[string][]string) (int, error) { +func (c *Client) DeleteAlertWithFilter(ctx context.Context, filter map[string][]string) (int, error) { preds, err := AlertPredicatesFromFilter(filter) if err != nil { return 0, err } - return c.Ent.Alert.Delete().Where(preds...).Exec(c.CTX) + return c.Ent.Alert.Delete().Where(preds...).Exec(ctx) } -func (c *Client) GetAlertByID(alertID int) (*ent.Alert, error) { - alert, err := c.Ent.Alert.Query().Where(alert.IDEQ(alertID)).WithDecisions().WithEvents().WithMetas().WithOwner().First(c.CTX) +func (c *Client) GetAlertByID(ctx context.Context, alertID int) (*ent.Alert, error) { + alert, err := c.Ent.Alert.Query().Where(alert.IDEQ(alertID)).WithDecisions().WithEvents().WithMetas().WithOwner().First(ctx) if err != nil { /*record not found, 404*/ if ent.IsNotFound(err) { diff --git a/pkg/database/database.go b/pkg/database/database.go index e513459199f..bb41dd3b645 100644 --- a/pkg/database/database.go +++ b/pkg/database/database.go @@ -21,7 +21,6 @@ import ( type Client struct { Ent *ent.Client - CTX context.Context Log *log.Logger CanFlush bool Type string @@ -106,7 +105,6 @@ func NewClient(ctx context.Context, config *csconfig.DatabaseCfg) (*Client, erro return &Client{ Ent: client, - CTX: ctx, Log: clog, CanFlush: true, Type: config.Type, diff --git a/pkg/database/decisions.go b/pkg/database/decisions.go index 8547990c25f..7522a272799 100644 --- a/pkg/database/decisions.go +++ b/pkg/database/decisions.go @@ -31,7 +31,7 @@ func BuildDecisionRequestWithFilter(query *ent.DecisionQuery, filter map[string] var err error var start_ip, start_sfx, end_ip, end_sfx int64 var ip_sz int - var contains = true + contains := true /*if contains is true, return bans that *contains* the given value (value is the inner) else, return bans that are *contained* by the given value (value is the outer)*/ @@ -321,7 +321,7 @@ func (c *Client) DeleteDecisionsWithFilter(ctx context.Context, filter map[strin var err error var start_ip, start_sfx, end_ip, end_sfx int64 var ip_sz int - var contains = true + contains := true /*if contains is true, return bans that *contains* the given value (value is the inner) else, return bans that are *contained* by the given value (value is the outer) */ @@ -440,7 +440,7 @@ func (c *Client) ExpireDecisionsWithFilter(ctx context.Context, filter map[strin var err error var start_ip, start_sfx, end_ip, end_sfx int64 var ip_sz int - var contains = true + contains := true /*if contains is true, return bans that *contains* the given value (value is the inner) else, return bans that are *contained* by the given value (value is the outer)*/ decisions := c.Ent.Decision.Query().Where(decision.UntilGT(time.Now().UTC())) diff --git a/pkg/database/flush.go b/pkg/database/flush.go index 46c8edfa308..8f646ddc961 100644 --- a/pkg/database/flush.go +++ b/pkg/database/flush.go @@ -239,7 +239,7 @@ func (c *Client) FlushAlerts(ctx context.Context, MaxAge string, MaxItems int) e c.FlushOrphans(ctx) c.Log.Debug("Done flushing orphan alerts") - totalAlerts, err = c.TotalAlerts() + totalAlerts, err = c.TotalAlerts(ctx) if err != nil { c.Log.Warningf("FlushAlerts (max items count): %s", err) return fmt.Errorf("unable to get alerts count: %w", err) @@ -252,7 +252,7 @@ func (c *Client) FlushAlerts(ctx context.Context, MaxAge string, MaxItems int) e "created_before": {MaxAge}, } - nbDeleted, err := c.DeleteAlertWithFilter(filter) + nbDeleted, err := c.DeleteAlertWithFilter(ctx, filter) if err != nil { c.Log.Warningf("FlushAlerts (max age): %s", err) return fmt.Errorf("unable to flush alerts with filter until=%s: %w", MaxAge, err) @@ -268,7 +268,7 @@ func (c *Client) FlushAlerts(ctx context.Context, MaxAge string, MaxItems int) e // This gives us the oldest alert that we want to keep // We then delete all the alerts with an id lower than this one // We can do this because the id is auto-increment, and the database won't reuse the same id twice - lastAlert, err := c.QueryAlertWithFilter(map[string][]string{ + lastAlert, err := c.QueryAlertWithFilter(ctx, map[string][]string{ "sort": {"DESC"}, "limit": {"1"}, // we do not care about fetching the edges, we just want the id diff --git a/pkg/database/metrics.go b/pkg/database/metrics.go index 99ba90c80b8..eb4c472821e 100644 --- a/pkg/database/metrics.go +++ b/pkg/database/metrics.go @@ -17,7 +17,7 @@ func (c *Client) CreateMetric(ctx context.Context, generatedType metric.Generate SetReceivedAt(receivedAt). SetPayload(payload). Save(ctx) - if err != nil { + if err != nil { c.Log.Warningf("CreateMetric: %s", err) return nil, fmt.Errorf("storing metrics snapshot for '%s' at %s: %w", generatedBy, receivedAt, InsertFail) } From 897613e8377db824d47b32a5d4b27e599280fb9d Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Wed, 25 Sep 2024 09:02:53 +0200 Subject: [PATCH 312/581] CI: check generated code in test workflow (#3261) --- .github/workflows/go-tests.yml | 12 ++++++++++++ pkg/database/ent/machine.go | 4 ++-- 2 files changed, 14 insertions(+), 2 deletions(-) diff --git a/.github/workflows/go-tests.yml b/.github/workflows/go-tests.yml index d4e3a3d843a..a6c72a91af6 100644 --- a/.github/workflows/go-tests.yml +++ b/.github/workflows/go-tests.yml @@ -128,6 +128,18 @@ jobs: with: go-version: "1.22" + - name: Run "make generate" and check for changes + run: | + set -e + make generate 2>/dev/null + if [[ $(git status --porcelain) ]]; then + echo "Error: Uncommitted changes found after running 'make generate'. Please commit all generated code." + git diff + exit 1 + else + echo "No changes detected after running 'make generate'." + fi + - name: Create localstack streams run: | aws --endpoint-url=http://127.0.0.1:4566 --region us-east-1 kinesis create-stream --stream-name stream-1-shard --shard-count 1 diff --git a/pkg/database/ent/machine.go b/pkg/database/ent/machine.go index 1b8122060d1..76127065791 100644 --- a/pkg/database/ent/machine.go +++ b/pkg/database/ent/machine.go @@ -202,7 +202,7 @@ func (m *Machine) assignValues(columns []string, values []any) error { return fmt.Errorf("unexpected type %T for field hubstate", values[i]) } else if value != nil && len(*value) > 0 { if err := json.Unmarshal(*value, &m.Hubstate); err != nil { - return fmt.Errorf("parsing field hubstate: %w", err) + return fmt.Errorf("unmarshal field hubstate: %w", err) } } case machine.FieldDatasources: @@ -210,7 +210,7 @@ func (m *Machine) assignValues(columns []string, values []any) error { return fmt.Errorf("unexpected type %T for field datasources", values[i]) } else if value != nil && len(*value) > 0 { if err := json.Unmarshal(*value, &m.Datasources); err != nil { - return fmt.Errorf("parsing field datasources: %w", err) + return fmt.Errorf("unmarshal field datasources: %w", err) } } default: From 27451a5ee6b88cb36be97634dbbe6c5f16e27cb2 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Wed, 2 Oct 2024 13:39:07 +0200 Subject: [PATCH 313/581] context propagation: pass ctx to UpdateScenario() (#3258) * context propagation: pass ctx to UpdateScenario() * context propagation: SendMetrics, SendUsageMetrics, plugin config --- cmd/crowdsec-cli/clicapi/capi.go | 2 +- cmd/crowdsec/lapiclient.go | 2 +- pkg/apiclient/auth_jwt.go | 7 +++++-- pkg/apiclient/config.go | 3 ++- pkg/apiserver/apic.go | 10 ++++------ pkg/apiserver/apic_metrics.go | 12 ++++-------- pkg/apiserver/apic_metrics_test.go | 12 +++++++----- pkg/apiserver/apic_test.go | 6 ++++-- pkg/apiserver/apiserver.go | 4 ++-- pkg/csplugin/notifier.go | 4 +--- pkg/protobufs/plugin_interface.go | 4 ++-- 11 files changed, 33 insertions(+), 33 deletions(-) diff --git a/cmd/crowdsec-cli/clicapi/capi.go b/cmd/crowdsec-cli/clicapi/capi.go index 24c3ba054a9..cba66f11104 100644 --- a/cmd/crowdsec-cli/clicapi/capi.go +++ b/cmd/crowdsec-cli/clicapi/capi.go @@ -170,7 +170,7 @@ func queryCAPIStatus(ctx context.Context, hub *cwhub.Hub, credURL string, login // I don't believe papi is neede to check enrollement // PapiURL: papiURL, VersionPrefix: "v3", - UpdateScenario: func() ([]string, error) { + UpdateScenario: func(_ context.Context) ([]string, error) { return itemsForAPI, nil }, }) diff --git a/cmd/crowdsec/lapiclient.go b/cmd/crowdsec/lapiclient.go index 4556306825c..eed517f9df9 100644 --- a/cmd/crowdsec/lapiclient.go +++ b/cmd/crowdsec/lapiclient.go @@ -36,7 +36,7 @@ func AuthenticatedLAPIClient(credentials csconfig.ApiCredentialsCfg, hub *cwhub. URL: apiURL, PapiURL: papiURL, VersionPrefix: "v1", - UpdateScenario: func() ([]string, error) { + UpdateScenario: func(_ context.Context) ([]string, error) { return itemsForAPI, nil }, }) diff --git a/pkg/apiclient/auth_jwt.go b/pkg/apiclient/auth_jwt.go index b202e382842..193486ff065 100644 --- a/pkg/apiclient/auth_jwt.go +++ b/pkg/apiclient/auth_jwt.go @@ -2,6 +2,7 @@ package apiclient import ( "bytes" + "context" "encoding/json" "fmt" "io" @@ -30,15 +31,17 @@ type JWTTransport struct { // Transport is the underlying HTTP transport to use when making requests. // It will default to http.DefaultTransport if nil. Transport http.RoundTripper - UpdateScenario func() ([]string, error) + UpdateScenario func(context.Context) ([]string, error) refreshTokenMutex sync.Mutex } func (t *JWTTransport) refreshJwtToken() error { var err error + ctx := context.TODO() + if t.UpdateScenario != nil { - t.Scenarios, err = t.UpdateScenario() + t.Scenarios, err = t.UpdateScenario(ctx) if err != nil { return fmt.Errorf("can't update scenario list: %w", err) } diff --git a/pkg/apiclient/config.go b/pkg/apiclient/config.go index b08452e74e0..29a8acf185e 100644 --- a/pkg/apiclient/config.go +++ b/pkg/apiclient/config.go @@ -1,6 +1,7 @@ package apiclient import ( + "context" "net/url" "github.com/go-openapi/strfmt" @@ -15,5 +16,5 @@ type Config struct { VersionPrefix string UserAgent string RegistrationToken string - UpdateScenario func() ([]string, error) + UpdateScenario func(context.Context) ([]string, error) } diff --git a/pkg/apiserver/apic.go b/pkg/apiserver/apic.go index 9b56fef6549..c8768e71b0a 100644 --- a/pkg/apiserver/apic.go +++ b/pkg/apiserver/apic.go @@ -82,11 +82,9 @@ func randomDuration(d time.Duration, delta time.Duration) time.Duration { return ret } -func (a *apic) FetchScenariosListFromDB() ([]string, error) { +func (a *apic) FetchScenariosListFromDB(ctx context.Context) ([]string, error) { scenarios := make([]string, 0) - ctx := context.TODO() - machines, err := a.dbClient.ListMachines(ctx) if err != nil { return nil, fmt.Errorf("while listing machines: %w", err) @@ -214,7 +212,7 @@ func NewAPIC(ctx context.Context, config *csconfig.OnlineApiClientCfg, dbClient return nil, fmt.Errorf("while parsing '%s': %w", config.Credentials.PapiURL, err) } - ret.scenarioList, err = ret.FetchScenariosListFromDB() + ret.scenarioList, err = ret.FetchScenariosListFromDB(ctx) if err != nil { return nil, fmt.Errorf("while fetching scenarios from db: %w", err) } @@ -234,7 +232,7 @@ func NewAPIC(ctx context.Context, config *csconfig.OnlineApiClientCfg, dbClient // The watcher will be authenticated by the RoundTripper the first time it will call CAPI // Explicit authentication will provoke a useless supplementary call to CAPI - scenarios, err := ret.FetchScenariosListFromDB() + scenarios, err := ret.FetchScenariosListFromDB(ctx) if err != nil { return ret, fmt.Errorf("get scenario in db: %w", err) } @@ -944,7 +942,7 @@ func (a *apic) Pull(ctx context.Context) error { toldOnce := false for { - scenario, err := a.FetchScenariosListFromDB() + scenario, err := a.FetchScenariosListFromDB(ctx) if err != nil { log.Errorf("unable to fetch scenarios from db: %s", err) } diff --git a/pkg/apiserver/apic_metrics.go b/pkg/apiserver/apic_metrics.go index 16b2328dbe9..3d9e7b28a79 100644 --- a/pkg/apiserver/apic_metrics.go +++ b/pkg/apiserver/apic_metrics.go @@ -251,11 +251,9 @@ func (a *apic) fetchMachineIDs(ctx context.Context) ([]string, error) { // Metrics are sent at start, then at the randomized metricsIntervalFirst, // then at regular metricsInterval. If a change is detected in the list // of machines, the next metrics are sent immediately. -func (a *apic) SendMetrics(stop chan (bool)) { +func (a *apic) SendMetrics(ctx context.Context, stop chan (bool)) { defer trace.CatchPanic("lapi/metricsToAPIC") - ctx := context.TODO() - // verify the list of machines every interval const checkInt = 20 * time.Second @@ -321,7 +319,7 @@ func (a *apic) SendMetrics(stop chan (bool)) { if metrics != nil { log.Info("capi metrics: sending") - _, _, err = a.apiClient.Metrics.Add(context.Background(), metrics) + _, _, err = a.apiClient.Metrics.Add(ctx, metrics) if err != nil { log.Errorf("capi metrics: failed: %s", err) } @@ -339,11 +337,9 @@ func (a *apic) SendMetrics(stop chan (bool)) { } } -func (a *apic) SendUsageMetrics() { +func (a *apic) SendUsageMetrics(ctx context.Context) { defer trace.CatchPanic("lapi/usageMetricsToAPIC") - ctx := context.TODO() - firstRun := true log.Debugf("Start sending usage metrics to CrowdSec Central API (interval: %s once, then %s)", a.usageMetricsIntervalFirst, a.usageMetricsInterval) @@ -368,7 +364,7 @@ func (a *apic) SendUsageMetrics() { continue } - _, resp, err := a.apiClient.UsageMetrics.Add(context.Background(), metrics) + _, resp, err := a.apiClient.UsageMetrics.Add(ctx, metrics) if err != nil { log.Errorf("unable to send usage metrics: %s", err) diff --git a/pkg/apiserver/apic_metrics_test.go b/pkg/apiserver/apic_metrics_test.go index 78b16f9c8b7..13a24668f26 100644 --- a/pkg/apiserver/apic_metrics_test.go +++ b/pkg/apiserver/apic_metrics_test.go @@ -14,6 +14,8 @@ import ( ) func TestAPICSendMetrics(t *testing.T) { + ctx := context.Background() + tests := []struct { name string duration time.Duration @@ -34,7 +36,7 @@ func TestAPICSendMetrics(t *testing.T) { metricsInterval: time.Millisecond * 20, expectedCalls: 5, setUp: func(api *apic) { - api.dbClient.Ent.Machine.Delete().ExecX(context.Background()) + api.dbClient.Ent.Machine.Delete().ExecX(ctx) api.dbClient.Ent.Machine.Create(). SetMachineId("1234"). SetPassword(testPassword.String()). @@ -42,16 +44,16 @@ func TestAPICSendMetrics(t *testing.T) { SetScenarios("crowdsecurity/test"). SetLastPush(time.Time{}). SetUpdatedAt(time.Time{}). - ExecX(context.Background()) + ExecX(ctx) - api.dbClient.Ent.Bouncer.Delete().ExecX(context.Background()) + api.dbClient.Ent.Bouncer.Delete().ExecX(ctx) api.dbClient.Ent.Bouncer.Create(). SetIPAddress("1.2.3.6"). SetName("someBouncer"). SetAPIKey("foobar"). SetRevoked(false). SetLastPull(time.Time{}). - ExecX(context.Background()) + ExecX(ctx) }, }, } @@ -86,7 +88,7 @@ func TestAPICSendMetrics(t *testing.T) { httpmock.ZeroCallCounters() - go api.SendMetrics(stop) + go api.SendMetrics(ctx, stop) time.Sleep(tc.duration) stop <- true diff --git a/pkg/apiserver/apic_test.go b/pkg/apiserver/apic_test.go index 3bb158acf35..a215edb2fbd 100644 --- a/pkg/apiserver/apic_test.go +++ b/pkg/apiserver/apic_test.go @@ -143,6 +143,8 @@ func TestAPICCAPIPullIsOld(t *testing.T) { } func TestAPICFetchScenariosListFromDB(t *testing.T) { + ctx := context.Background() + tests := []struct { name string machineIDsWithScenarios map[string]string @@ -174,10 +176,10 @@ func TestAPICFetchScenariosListFromDB(t *testing.T) { SetPassword(testPassword.String()). SetIpAddress("1.2.3.4"). SetScenarios(scenarios). - ExecX(context.Background()) + ExecX(ctx) } - scenarios, err := api.FetchScenariosListFromDB() + scenarios, err := api.FetchScenariosListFromDB(ctx) require.NoError(t, err) for machineID := range tc.machineIDsWithScenarios { diff --git a/pkg/apiserver/apiserver.go b/pkg/apiserver/apiserver.go index 6b5d6803be9..2b2b453348a 100644 --- a/pkg/apiserver/apiserver.go +++ b/pkg/apiserver/apiserver.go @@ -357,12 +357,12 @@ func (s *APIServer) initAPIC(ctx context.Context) { } s.apic.metricsTomb.Go(func() error { - s.apic.SendMetrics(make(chan bool)) + s.apic.SendMetrics(ctx, make(chan bool)) return nil }) s.apic.metricsTomb.Go(func() error { - s.apic.SendUsageMetrics() + s.apic.SendUsageMetrics(ctx) return nil }) } diff --git a/pkg/csplugin/notifier.go b/pkg/csplugin/notifier.go index 2b5d57fbcff..ed4a4cc4149 100644 --- a/pkg/csplugin/notifier.go +++ b/pkg/csplugin/notifier.go @@ -40,9 +40,7 @@ func (m *GRPCClient) Notify(ctx context.Context, notification *protobufs.Notific } func (m *GRPCClient) Configure(ctx context.Context, config *protobufs.Config) (*protobufs.Empty, error) { - _, err := m.client.Configure( - context.Background(), config, - ) + _, err := m.client.Configure(ctx, config) return &protobufs.Empty{}, err } diff --git a/pkg/protobufs/plugin_interface.go b/pkg/protobufs/plugin_interface.go index fc89b2fa009..baa76c8941c 100644 --- a/pkg/protobufs/plugin_interface.go +++ b/pkg/protobufs/plugin_interface.go @@ -24,12 +24,12 @@ type NotifierPlugin struct { type GRPCClient struct{ client NotifierClient } func (m *GRPCClient) Notify(ctx context.Context, notification *Notification) (*Empty, error) { - _, err := m.client.Notify(context.Background(), notification) + _, err := m.client.Notify(ctx, notification) return &Empty{}, err } func (m *GRPCClient) Configure(ctx context.Context, config *Config) (*Empty, error) { - _, err := m.client.Configure(context.Background(), config) + _, err := m.client.Configure(ctx, config) return &Empty{}, err } From 56c9c7a80486910bb010c0630a5f258625b02ece Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Wed, 2 Oct 2024 16:44:13 +0200 Subject: [PATCH 314/581] Re-generate capi models (#3260) * add centralapi_swagger.yaml * remove nullable to avoid generate error * re-generate pkg/modelscapi * update go-swagger to 0.31.0 --- Makefile | 1 + pkg/models/generate.go | 2 +- pkg/modelscapi/add_signals_request.go | 5 + pkg/modelscapi/add_signals_request_item.go | 9 + .../add_signals_request_item_decisions.go | 5 + ...add_signals_request_item_decisions_item.go | 3 + pkg/modelscapi/centralapi_swagger.yaml | 875 ++++++++++++++++++ pkg/modelscapi/decisions_delete_request.go | 5 + pkg/modelscapi/decisions_sync_request.go | 5 + pkg/modelscapi/decisions_sync_request_item.go | 1 + .../decisions_sync_request_item_decisions.go | 5 + pkg/modelscapi/generate.go | 4 + .../get_decisions_stream_response.go | 5 + .../get_decisions_stream_response_deleted.go | 5 + .../get_decisions_stream_response_links.go | 5 + .../get_decisions_stream_response_new.go | 5 + .../get_decisions_stream_response_new_item.go | 5 + pkg/modelscapi/metrics_request.go | 10 + 18 files changed, 954 insertions(+), 1 deletion(-) create mode 100644 pkg/modelscapi/centralapi_swagger.yaml create mode 100644 pkg/modelscapi/generate.go diff --git a/Makefile b/Makefile index a0b06dc2ea0..cb0d5ebaf60 100644 --- a/Makefile +++ b/Makefile @@ -267,6 +267,7 @@ crowdsec: ## Build crowdsec generate: ## Generate code for the database and APIs $(GO) generate ./pkg/database/ent $(GO) generate ./pkg/models + $(GO) generate ./pkg/modelscapi .PHONY: testclean testclean: bats-clean ## Remove test artifacts diff --git a/pkg/models/generate.go b/pkg/models/generate.go index ccacc409ab5..502d6f3d2cf 100644 --- a/pkg/models/generate.go +++ b/pkg/models/generate.go @@ -1,4 +1,4 @@ package models -//go:generate go run -mod=mod github.com/go-swagger/go-swagger/cmd/swagger@v0.30.5 generate model --spec=./localapi_swagger.yaml --target=../ +//go:generate go run -mod=mod github.com/go-swagger/go-swagger/cmd/swagger@v0.31.0 generate model --spec=./localapi_swagger.yaml --target=../ diff --git a/pkg/modelscapi/add_signals_request.go b/pkg/modelscapi/add_signals_request.go index 62fe590cb79..7bfe6ae80e0 100644 --- a/pkg/modelscapi/add_signals_request.go +++ b/pkg/modelscapi/add_signals_request.go @@ -56,6 +56,11 @@ func (m AddSignalsRequest) ContextValidate(ctx context.Context, formats strfmt.R for i := 0; i < len(m); i++ { if m[i] != nil { + + if swag.IsZero(m[i]) { // not required + return nil + } + if err := m[i].ContextValidate(ctx, formats); err != nil { if ve, ok := err.(*errors.Validation); ok { return ve.ValidateName(strconv.Itoa(i)) diff --git a/pkg/modelscapi/add_signals_request_item.go b/pkg/modelscapi/add_signals_request_item.go index f9c865b4c68..5f63b542d5a 100644 --- a/pkg/modelscapi/add_signals_request_item.go +++ b/pkg/modelscapi/add_signals_request_item.go @@ -65,6 +65,9 @@ type AddSignalsRequestItem struct { // stop at // Required: true StopAt *string `json:"stop_at"` + + // UUID of the alert + UUID string `json:"uuid,omitempty"` } // Validate validates this add signals request item @@ -257,6 +260,11 @@ func (m *AddSignalsRequestItem) contextValidateContext(ctx context.Context, form for i := 0; i < len(m.Context); i++ { if m.Context[i] != nil { + + if swag.IsZero(m.Context[i]) { // not required + return nil + } + if err := m.Context[i].ContextValidate(ctx, formats); err != nil { if ve, ok := err.(*errors.Validation); ok { return ve.ValidateName("context" + "." + strconv.Itoa(i)) @@ -289,6 +297,7 @@ func (m *AddSignalsRequestItem) contextValidateDecisions(ctx context.Context, fo func (m *AddSignalsRequestItem) contextValidateSource(ctx context.Context, formats strfmt.Registry) error { if m.Source != nil { + if err := m.Source.ContextValidate(ctx, formats); err != nil { if ve, ok := err.(*errors.Validation); ok { return ve.ValidateName("source") diff --git a/pkg/modelscapi/add_signals_request_item_decisions.go b/pkg/modelscapi/add_signals_request_item_decisions.go index 54e123ab3f8..11ed27a496d 100644 --- a/pkg/modelscapi/add_signals_request_item_decisions.go +++ b/pkg/modelscapi/add_signals_request_item_decisions.go @@ -54,6 +54,11 @@ func (m AddSignalsRequestItemDecisions) ContextValidate(ctx context.Context, for for i := 0; i < len(m); i++ { if m[i] != nil { + + if swag.IsZero(m[i]) { // not required + return nil + } + if err := m[i].ContextValidate(ctx, formats); err != nil { if ve, ok := err.(*errors.Validation); ok { return ve.ValidateName(strconv.Itoa(i)) diff --git a/pkg/modelscapi/add_signals_request_item_decisions_item.go b/pkg/modelscapi/add_signals_request_item_decisions_item.go index 34dfeb5bce5..797c517e33f 100644 --- a/pkg/modelscapi/add_signals_request_item_decisions_item.go +++ b/pkg/modelscapi/add_signals_request_item_decisions_item.go @@ -49,6 +49,9 @@ type AddSignalsRequestItemDecisionsItem struct { // until Until string `json:"until,omitempty"` + // UUID of the decision + UUID string `json:"uuid,omitempty"` + // the value of the decision scope : an IP, a range, a username, etc // Required: true Value *string `json:"value"` diff --git a/pkg/modelscapi/centralapi_swagger.yaml b/pkg/modelscapi/centralapi_swagger.yaml new file mode 100644 index 00000000000..bd695894f2b --- /dev/null +++ b/pkg/modelscapi/centralapi_swagger.yaml @@ -0,0 +1,875 @@ +swagger: "2.0" +info: + description: + "API to manage machines using [crowdsec](https://github.com/crowdsecurity/crowdsec)\ + \ and bouncers.\n" + version: "2023-01-23T11:16:39Z" + title: "prod-capi-v3" + contact: + name: "Crowdsec team" + url: "https://github.com/crowdsecurity/crowdsec" + email: "support@crowdsec.net" +host: "api.crowdsec.net" +basePath: "/v3" +tags: + - name: "watchers" + description: "Operations about watchers: crowdsec & cscli" + - name: "bouncers" + description: "Operations about decisions : bans, captcha, rate-limit etc." +schemes: + - "https" +paths: + /decisions/delete: + post: + tags: + - "watchers" + summary: "delete decisions" + description: "delete provided decisions" + consumes: + - "application/json" + produces: + - "application/json" + parameters: + - in: "body" + name: "DecisionsDeleteRequest" + required: true + schema: + $ref: "#/definitions/DecisionsDeleteRequest" + responses: + "200": + description: "200 response" + schema: + $ref: "#/definitions/SuccessResponse" + "500": + description: "500 response" + schema: + $ref: "#/definitions/ErrorResponse" + security: + - UserPoolAuthorizer: [] + /decisions/stream: + get: + tags: + - "bouncers" + - "watchers" + summary: "returns list of top decisions" + description: "returns list of top decisions to add or delete" + produces: + - "application/json" + responses: + "200": + description: "200 response" + schema: + $ref: "#/definitions/GetDecisionsStreamResponse" + "400": + description: "400 response" + schema: + $ref: "#/definitions/ErrorResponse" + "500": + description: "500 response" + schema: + $ref: "#/definitions/ErrorResponse" + "404": + description: "404 response" + schema: + $ref: "#/definitions/ErrorResponse" + security: + - UserPoolAuthorizer: [] + options: + consumes: + - "application/json" + produces: + - "application/json" + responses: + "200": + description: "200 response" + headers: + Access-Control-Allow-Origin: + type: "string" + Access-Control-Allow-Methods: + type: "string" + Access-Control-Allow-Headers: + type: "string" + /decisions/sync: + post: + tags: + - "watchers" + summary: "sync decisions" + description: "sync provided decisions" + consumes: + - "application/json" + produces: + - "application/json" + parameters: + - in: "body" + name: "DecisionsSyncRequest" + required: true + schema: + $ref: "#/definitions/DecisionsSyncRequest" + responses: + "200": + description: "200 response" + schema: + $ref: "#/definitions/SuccessResponse" + "500": + description: "500 response" + schema: + $ref: "#/definitions/ErrorResponse" + security: + - UserPoolAuthorizer: [] + /metrics: + post: + tags: + - "watchers" + summary: "receive metrics about enrolled machines and bouncers in APIL" + description: "receive metrics about enrolled machines and bouncers in APIL" + consumes: + - "application/json" + produces: + - "application/json" + parameters: + - in: "body" + name: "MetricsRequest" + required: true + schema: + $ref: "#/definitions/MetricsRequest" + responses: + "200": + description: "200 response" + schema: + $ref: "#/definitions/SuccessResponse" + "400": + description: "400 response" + schema: + $ref: "#/definitions/ErrorResponse" + "500": + description: "500 response" + schema: + $ref: "#/definitions/ErrorResponse" + security: + - UserPoolAuthorizer: [] + /signals: + post: + tags: + - "watchers" + summary: "Push signals" + description: "to push signals" + consumes: + - "application/json" + produces: + - "application/json" + parameters: + - in: "body" + name: "AddSignalsRequest" + required: true + schema: + $ref: "#/definitions/AddSignalsRequest" + responses: + "200": + description: "200 response" + schema: + $ref: "#/definitions/SuccessResponse" + "400": + description: "400 response" + schema: + $ref: "#/definitions/ErrorResponse" + "500": + description: "500 response" + schema: + $ref: "#/definitions/ErrorResponse" + security: + - UserPoolAuthorizer: [] + /watchers: + post: + tags: + - "watchers" + summary: "Register watcher" + description: "Register a watcher" + consumes: + - "application/json" + produces: + - "application/json" + parameters: + - in: "body" + name: "RegisterRequest" + required: true + schema: + $ref: "#/definitions/RegisterRequest" + responses: + "200": + description: "200 response" + schema: + $ref: "#/definitions/SuccessResponse" + "400": + description: "400 response" + schema: + $ref: "#/definitions/ErrorResponse" + "500": + description: "500 response" + schema: + $ref: "#/definitions/ErrorResponse" + /watchers/enroll: + post: + tags: + - "watchers" + summary: "watcher enrollment" + description: "watcher enrollment : enroll watcher to crowdsec backoffice account" + consumes: + - "application/json" + produces: + - "application/json" + parameters: + - in: "body" + name: "EnrollRequest" + required: true + schema: + $ref: "#/definitions/EnrollRequest" + responses: + "200": + description: "200 response" + schema: + $ref: "#/definitions/SuccessResponse" + "400": + description: "400 response" + schema: + $ref: "#/definitions/ErrorResponse" + "500": + description: "500 response" + schema: + $ref: "#/definitions/ErrorResponse" + "403": + description: "403 response" + schema: + $ref: "#/definitions/ErrorResponse" + security: + - UserPoolAuthorizer: [] + /watchers/login: + post: + tags: + - "watchers" + summary: "watcher login" + description: "Sign-in to get a valid token" + consumes: + - "application/json" + produces: + - "application/json" + parameters: + - in: "body" + name: "LoginRequest" + required: true + schema: + $ref: "#/definitions/LoginRequest" + responses: + "200": + description: "200 response" + schema: + $ref: "#/definitions/LoginResponse" + "400": + description: "400 response" + schema: + $ref: "#/definitions/ErrorResponse" + "500": + description: "500 response" + schema: + $ref: "#/definitions/ErrorResponse" + "403": + description: "403 response" + schema: + $ref: "#/definitions/ErrorResponse" + /watchers/reset: + post: + tags: + - "watchers" + summary: "Reset Password" + description: "to reset a watcher password" + consumes: + - "application/json" + produces: + - "application/json" + parameters: + - in: "body" + name: "ResetPasswordRequest" + required: true + schema: + $ref: "#/definitions/ResetPasswordRequest" + responses: + "200": + description: "200 response" + schema: + $ref: "#/definitions/SuccessResponse" + headers: + Content-type: + type: "string" + Access-Control-Allow-Origin: + type: "string" + "400": + description: "400 response" + schema: + $ref: "#/definitions/ErrorResponse" + "500": + description: "500 response" + schema: + $ref: "#/definitions/ErrorResponse" + headers: + Content-type: + type: "string" + Access-Control-Allow-Origin: + type: "string" + "403": + description: "403 response" + schema: + $ref: "#/definitions/ErrorResponse" + "404": + description: "404 response" + headers: + Content-type: + type: "string" + Access-Control-Allow-Origin: + type: "string" + options: + consumes: + - "application/json" + produces: + - "application/json" + responses: + "200": + description: "200 response" + headers: + Access-Control-Allow-Origin: + type: "string" + Access-Control-Allow-Methods: + type: "string" + Access-Control-Allow-Headers: + type: "string" +securityDefinitions: + UserPoolAuthorizer: + type: "apiKey" + name: "Authorization" + in: "header" + x-amazon-apigateway-authtype: "cognito_user_pools" +definitions: + DecisionsDeleteRequest: + title: "delete decisions" + type: "array" + description: "delete decision model" + items: + $ref: "#/definitions/DecisionsDeleteRequestItem" + DecisionsSyncRequestItem: + type: "object" + required: + - "message" + - "scenario" + - "scenario_hash" + - "scenario_version" + - "source" + - "start_at" + - "stop_at" + properties: + scenario_trust: + type: "string" + scenario_hash: + type: "string" + scenario: + type: "string" + alert_id: + type: "integer" + created_at: + type: "string" + machine_id: + type: "string" + decisions: + $ref: "#/definitions/DecisionsSyncRequestItemDecisions" + source: + $ref: "#/definitions/DecisionsSyncRequestItemSource" + scenario_version: + type: "string" + message: + type: "string" + description: "a human readable message" + start_at: + type: "string" + stop_at: + type: "string" + title: "Signal" + AddSignalsRequestItem: + type: "object" + required: + - "message" + - "scenario" + - "scenario_hash" + - "scenario_version" + - "source" + - "start_at" + - "stop_at" + properties: + created_at: + type: "string" + machine_id: + type: "string" + source: + $ref: "#/definitions/AddSignalsRequestItemSource" + scenario_version: + type: "string" + message: + type: "string" + description: "a human readable message" + uuid: + type: "string" + description: "UUID of the alert" + start_at: + type: "string" + scenario_trust: + type: "string" + scenario_hash: + type: "string" + scenario: + type: "string" + alert_id: + type: "integer" + context: + type: "array" + items: + type: "object" + properties: + value: + type: "string" + key: + type: "string" + decisions: + $ref: "#/definitions/AddSignalsRequestItemDecisions" + stop_at: + type: "string" + title: "Signal" + DecisionsSyncRequest: + title: "sync decisions request" + type: "array" + description: "sync decision model" + items: + $ref: "#/definitions/DecisionsSyncRequestItem" + LoginRequest: + type: "object" + required: + - "machine_id" + - "password" + properties: + password: + type: "string" + description: "Password, should respect the password policy (link to add)" + machine_id: + type: "string" + description: "machine_id is a (username) generated by crowdsec" + minLength: 48 + maxLength: 48 + pattern: "^[a-zA-Z0-9]+$" + scenarios: + type: "array" + description: "all scenarios installed" + items: + type: "string" + title: "login request" + description: "Login request model" + GetDecisionsStreamResponseNewItem: + type: "object" + required: + - "scenario" + - "scope" + - "decisions" + properties: + scenario: + type: "string" + scope: + type: "string" + description: + "the scope of decision : does it apply to an IP, a range, a username,\ + \ etc" + decisions: + type: array + items: + type: object + required: + - value + - duration + properties: + duration: + type: "string" + value: + type: "string" + description: + "the value of the decision scope : an IP, a range, a username,\ + \ etc" + title: "New Decisions" + GetDecisionsStreamResponseDeletedItem: + type: object + required: + - scope + - decisions + properties: + scope: + type: "string" + description: + "the scope of decision : does it apply to an IP, a range, a username,\ + \ etc" + decisions: + type: array + items: + type: string + BlocklistLink: + type: object + required: + - name + - url + - remediation + - scope + - duration + properties: + name: + type: string + description: "the name of the blocklist" + url: + type: string + description: "the url from which the blocklist content can be downloaded" + remediation: + type: string + description: "the remediation that should be used for the blocklist" + scope: + type: string + description: "the scope of decisions in the blocklist" + duration: + type: string + AddSignalsRequestItemDecisionsItem: + type: "object" + required: + - "duration" + - "id" + - "origin" + - "scenario" + - "scope" + - "type" + - "value" + properties: + duration: + type: "string" + uuid: + type: "string" + description: "UUID of the decision" + scenario: + type: "string" + origin: + type: "string" + description: "the origin of the decision : cscli, crowdsec" + scope: + type: "string" + description: + "the scope of decision : does it apply to an IP, a range, a username,\ + \ etc" + simulated: + type: "boolean" + until: + type: "string" + id: + type: "integer" + description: "(only relevant for GET ops) the unique id" + type: + type: "string" + description: + "the type of decision, might be 'ban', 'captcha' or something\ + \ custom. Ignored when watcher (cscli/crowdsec) is pushing to APIL." + value: + type: "string" + description: + "the value of the decision scope : an IP, a range, a username,\ + \ etc" + title: "Decision" + EnrollRequest: + type: "object" + required: + - "attachment_key" + properties: + name: + type: "string" + description: "The name that will be display in the console for the instance" + overwrite: + type: "boolean" + description: "To force enroll the instance" + attachment_key: + type: "string" + description: + "attachment_key is generated in your crowdsec backoffice account\ + \ and allows you to enroll your machines to your BO account" + pattern: "^[a-zA-Z0-9]+$" + tags: + type: "array" + description: "Tags to apply on the console for the instance" + items: + type: "string" + title: "enroll request" + description: "enroll request model" + ResetPasswordRequest: + type: "object" + required: + - "machine_id" + - "password" + properties: + password: + type: "string" + description: "Password, should respect the password policy (link to add)" + machine_id: + type: "string" + description: "machine_id is a (username) generated by crowdsec" + minLength: 48 + maxLength: 48 + pattern: "^[a-zA-Z0-9]+$" + title: "resetPassword" + description: "ResetPassword request model" + MetricsRequestBouncersItem: + type: "object" + properties: + last_pull: + type: "string" + description: "last bouncer pull date" + custom_name: + type: "string" + description: "bouncer name" + name: + type: "string" + description: "bouncer type (firewall, php...)" + version: + type: "string" + description: "bouncer version" + title: "MetricsBouncerInfo" + AddSignalsRequestItemSource: + type: "object" + required: + - "scope" + - "value" + properties: + scope: + type: "string" + description: "the scope of a source : ip,range,username,etc" + ip: + type: "string" + description: "provided as a convenience when the source is an IP" + latitude: + type: "number" + format: "float" + as_number: + type: "string" + description: "provided as a convenience when the source is an IP" + range: + type: "string" + description: "provided as a convenience when the source is an IP" + cn: + type: "string" + value: + type: "string" + description: "the value of a source : the ip, the range, the username,etc" + as_name: + type: "string" + description: "provided as a convenience when the source is an IP" + longitude: + type: "number" + format: "float" + title: "Source" + DecisionsSyncRequestItemDecisions: + title: "Decisions list" + type: "array" + items: + $ref: "#/definitions/DecisionsSyncRequestItemDecisionsItem" + RegisterRequest: + type: "object" + required: + - "machine_id" + - "password" + properties: + password: + type: "string" + description: "Password, should respect the password policy (link to add)" + machine_id: + type: "string" + description: "machine_id is a (username) generated by crowdsec" + pattern: "^[a-zA-Z0-9]+$" + title: "register request" + description: "Register request model" + SuccessResponse: + type: "object" + required: + - "message" + properties: + message: + type: "string" + description: "message" + title: "success response" + description: "success response return by the API" + LoginResponse: + type: "object" + properties: + code: + type: "integer" + expire: + type: "string" + token: + type: "string" + title: "login response" + description: "Login request model" + DecisionsSyncRequestItemDecisionsItem: + type: "object" + required: + - "duration" + - "id" + - "origin" + - "scenario" + - "scope" + - "type" + - "value" + properties: + duration: + type: "string" + scenario: + type: "string" + origin: + type: "string" + description: "the origin of the decision : cscli, crowdsec" + scope: + type: "string" + description: + "the scope of decision : does it apply to an IP, a range, a username,\ + \ etc" + simulated: + type: "boolean" + until: + type: "string" + id: + type: "integer" + description: "(only relevant for GET ops) the unique id" + type: + type: "string" + description: + "the type of decision, might be 'ban', 'captcha' or something\ + \ custom. Ignored when watcher (cscli/crowdsec) is pushing to APIL." + value: + type: "string" + description: + "the value of the decision scope : an IP, a range, a username,\ + \ etc" + title: "Decision" + GetDecisionsStreamResponse: + type: "object" + properties: + new: + $ref: "#/definitions/GetDecisionsStreamResponseNew" + deleted: + $ref: "#/definitions/GetDecisionsStreamResponseDeleted" + links: + $ref: "#/definitions/GetDecisionsStreamResponseLinks" + title: "get decisions stream response" + description: "get decision response model" + DecisionsSyncRequestItemSource: + type: "object" + required: + - "scope" + - "value" + properties: + scope: + type: "string" + description: "the scope of a source : ip,range,username,etc" + ip: + type: "string" + description: "provided as a convenience when the source is an IP" + latitude: + type: "number" + format: "float" + as_number: + type: "string" + description: "provided as a convenience when the source is an IP" + range: + type: "string" + description: "provided as a convenience when the source is an IP" + cn: + type: "string" + value: + type: "string" + description: "the value of a source : the ip, the range, the username,etc" + as_name: + type: "string" + description: "provided as a convenience when the source is an IP" + longitude: + type: "number" + format: "float" + title: "Source" + AddSignalsRequestItemDecisions: + title: "Decisions list" + type: "array" + items: + $ref: "#/definitions/AddSignalsRequestItemDecisionsItem" + MetricsRequestMachinesItem: + type: "object" + properties: + last_update: + type: "string" + description: "last agent update date" + name: + type: "string" + description: "agent name" + last_push: + type: "string" + description: "last agent push date" + version: + type: "string" + description: "agent version" + title: "MetricsAgentInfo" + MetricsRequest: + type: "object" + required: + - "bouncers" + - "machines" + properties: + bouncers: + type: "array" + items: + $ref: "#/definitions/MetricsRequestBouncersItem" + machines: + type: "array" + items: + $ref: "#/definitions/MetricsRequestMachinesItem" + title: "metrics" + description: "push metrics model" + ErrorResponse: + type: "object" + required: + - "message" + properties: + message: + type: "string" + description: "Error message" + errors: + type: "string" + description: "more detail on individual errors" + title: "error response" + description: "error response return by the API" + AddSignalsRequest: + title: "add signals request" + type: "array" + description: "All signals request model" + items: + $ref: "#/definitions/AddSignalsRequestItem" + DecisionsDeleteRequestItem: + type: "string" + title: "decisionsIDs" + GetDecisionsStreamResponseNew: + title: "Decisions list" + type: "array" + items: + $ref: "#/definitions/GetDecisionsStreamResponseNewItem" + GetDecisionsStreamResponseDeleted: + title: "Decisions list" + type: "array" + items: + $ref: "#/definitions/GetDecisionsStreamResponseDeletedItem" + GetDecisionsStreamResponseLinks: + title: "Decisions list" + type: "object" + properties: + blocklists: + type: array + items: + $ref: "#/definitions/BlocklistLink" + diff --git a/pkg/modelscapi/decisions_delete_request.go b/pkg/modelscapi/decisions_delete_request.go index e8718835027..0c93558adf1 100644 --- a/pkg/modelscapi/decisions_delete_request.go +++ b/pkg/modelscapi/decisions_delete_request.go @@ -11,6 +11,7 @@ import ( "github.com/go-openapi/errors" "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" ) // DecisionsDeleteRequest delete decisions @@ -49,6 +50,10 @@ func (m DecisionsDeleteRequest) ContextValidate(ctx context.Context, formats str for i := 0; i < len(m); i++ { + if swag.IsZero(m[i]) { // not required + return nil + } + if err := m[i].ContextValidate(ctx, formats); err != nil { if ve, ok := err.(*errors.Validation); ok { return ve.ValidateName(strconv.Itoa(i)) diff --git a/pkg/modelscapi/decisions_sync_request.go b/pkg/modelscapi/decisions_sync_request.go index e3a95162519..c087d39ff62 100644 --- a/pkg/modelscapi/decisions_sync_request.go +++ b/pkg/modelscapi/decisions_sync_request.go @@ -56,6 +56,11 @@ func (m DecisionsSyncRequest) ContextValidate(ctx context.Context, formats strfm for i := 0; i < len(m); i++ { if m[i] != nil { + + if swag.IsZero(m[i]) { // not required + return nil + } + if err := m[i].ContextValidate(ctx, formats); err != nil { if ve, ok := err.(*errors.Validation); ok { return ve.ValidateName(strconv.Itoa(i)) diff --git a/pkg/modelscapi/decisions_sync_request_item.go b/pkg/modelscapi/decisions_sync_request_item.go index 5139ea2de4b..460fe4d430e 100644 --- a/pkg/modelscapi/decisions_sync_request_item.go +++ b/pkg/modelscapi/decisions_sync_request_item.go @@ -231,6 +231,7 @@ func (m *DecisionsSyncRequestItem) contextValidateDecisions(ctx context.Context, func (m *DecisionsSyncRequestItem) contextValidateSource(ctx context.Context, formats strfmt.Registry) error { if m.Source != nil { + if err := m.Source.ContextValidate(ctx, formats); err != nil { if ve, ok := err.(*errors.Validation); ok { return ve.ValidateName("source") diff --git a/pkg/modelscapi/decisions_sync_request_item_decisions.go b/pkg/modelscapi/decisions_sync_request_item_decisions.go index 76316e43c5e..bdc8e77e2b6 100644 --- a/pkg/modelscapi/decisions_sync_request_item_decisions.go +++ b/pkg/modelscapi/decisions_sync_request_item_decisions.go @@ -54,6 +54,11 @@ func (m DecisionsSyncRequestItemDecisions) ContextValidate(ctx context.Context, for i := 0; i < len(m); i++ { if m[i] != nil { + + if swag.IsZero(m[i]) { // not required + return nil + } + if err := m[i].ContextValidate(ctx, formats); err != nil { if ve, ok := err.(*errors.Validation); ok { return ve.ValidateName(strconv.Itoa(i)) diff --git a/pkg/modelscapi/generate.go b/pkg/modelscapi/generate.go new file mode 100644 index 00000000000..66dc2a34b7e --- /dev/null +++ b/pkg/modelscapi/generate.go @@ -0,0 +1,4 @@ +package modelscapi + +//go:generate go run -mod=mod github.com/go-swagger/go-swagger/cmd/swagger@v0.31.0 generate model --spec=./centralapi_swagger.yaml --target=../ --model-package=modelscapi + diff --git a/pkg/modelscapi/get_decisions_stream_response.go b/pkg/modelscapi/get_decisions_stream_response.go index af19b85c4d3..5ebf29c5d93 100644 --- a/pkg/modelscapi/get_decisions_stream_response.go +++ b/pkg/modelscapi/get_decisions_stream_response.go @@ -144,6 +144,11 @@ func (m *GetDecisionsStreamResponse) contextValidateDeleted(ctx context.Context, func (m *GetDecisionsStreamResponse) contextValidateLinks(ctx context.Context, formats strfmt.Registry) error { if m.Links != nil { + + if swag.IsZero(m.Links) { // not required + return nil + } + if err := m.Links.ContextValidate(ctx, formats); err != nil { if ve, ok := err.(*errors.Validation); ok { return ve.ValidateName("links") diff --git a/pkg/modelscapi/get_decisions_stream_response_deleted.go b/pkg/modelscapi/get_decisions_stream_response_deleted.go index d218bf87e4e..78292860f22 100644 --- a/pkg/modelscapi/get_decisions_stream_response_deleted.go +++ b/pkg/modelscapi/get_decisions_stream_response_deleted.go @@ -54,6 +54,11 @@ func (m GetDecisionsStreamResponseDeleted) ContextValidate(ctx context.Context, for i := 0; i < len(m); i++ { if m[i] != nil { + + if swag.IsZero(m[i]) { // not required + return nil + } + if err := m[i].ContextValidate(ctx, formats); err != nil { if ve, ok := err.(*errors.Validation); ok { return ve.ValidateName(strconv.Itoa(i)) diff --git a/pkg/modelscapi/get_decisions_stream_response_links.go b/pkg/modelscapi/get_decisions_stream_response_links.go index 85cc9af9b48..6b9054574f1 100644 --- a/pkg/modelscapi/get_decisions_stream_response_links.go +++ b/pkg/modelscapi/get_decisions_stream_response_links.go @@ -82,6 +82,11 @@ func (m *GetDecisionsStreamResponseLinks) contextValidateBlocklists(ctx context. for i := 0; i < len(m.Blocklists); i++ { if m.Blocklists[i] != nil { + + if swag.IsZero(m.Blocklists[i]) { // not required + return nil + } + if err := m.Blocklists[i].ContextValidate(ctx, formats); err != nil { if ve, ok := err.(*errors.Validation); ok { return ve.ValidateName("blocklists" + "." + strconv.Itoa(i)) diff --git a/pkg/modelscapi/get_decisions_stream_response_new.go b/pkg/modelscapi/get_decisions_stream_response_new.go index e9525bf6fa7..8e09f1b20e7 100644 --- a/pkg/modelscapi/get_decisions_stream_response_new.go +++ b/pkg/modelscapi/get_decisions_stream_response_new.go @@ -54,6 +54,11 @@ func (m GetDecisionsStreamResponseNew) ContextValidate(ctx context.Context, form for i := 0; i < len(m); i++ { if m[i] != nil { + + if swag.IsZero(m[i]) { // not required + return nil + } + if err := m[i].ContextValidate(ctx, formats); err != nil { if ve, ok := err.(*errors.Validation); ok { return ve.ValidateName(strconv.Itoa(i)) diff --git a/pkg/modelscapi/get_decisions_stream_response_new_item.go b/pkg/modelscapi/get_decisions_stream_response_new_item.go index a3592d0ab61..77cc06732ce 100644 --- a/pkg/modelscapi/get_decisions_stream_response_new_item.go +++ b/pkg/modelscapi/get_decisions_stream_response_new_item.go @@ -119,6 +119,11 @@ func (m *GetDecisionsStreamResponseNewItem) contextValidateDecisions(ctx context for i := 0; i < len(m.Decisions); i++ { if m.Decisions[i] != nil { + + if swag.IsZero(m.Decisions[i]) { // not required + return nil + } + if err := m.Decisions[i].ContextValidate(ctx, formats); err != nil { if ve, ok := err.(*errors.Validation); ok { return ve.ValidateName("decisions" + "." + strconv.Itoa(i)) diff --git a/pkg/modelscapi/metrics_request.go b/pkg/modelscapi/metrics_request.go index d5b7d058fc1..5d663cf1750 100644 --- a/pkg/modelscapi/metrics_request.go +++ b/pkg/modelscapi/metrics_request.go @@ -126,6 +126,11 @@ func (m *MetricsRequest) contextValidateBouncers(ctx context.Context, formats st for i := 0; i < len(m.Bouncers); i++ { if m.Bouncers[i] != nil { + + if swag.IsZero(m.Bouncers[i]) { // not required + return nil + } + if err := m.Bouncers[i].ContextValidate(ctx, formats); err != nil { if ve, ok := err.(*errors.Validation); ok { return ve.ValidateName("bouncers" + "." + strconv.Itoa(i)) @@ -146,6 +151,11 @@ func (m *MetricsRequest) contextValidateMachines(ctx context.Context, formats st for i := 0; i < len(m.Machines); i++ { if m.Machines[i] != nil { + + if swag.IsZero(m.Machines[i]) { // not required + return nil + } + if err := m.Machines[i].ContextValidate(ctx, formats); err != nil { if ve, ok := err.(*errors.Validation); ok { return ve.ValidateName("machines" + "." + strconv.Itoa(i)) From af3116d1a7094273afd9c571c7356d46a0b250ac Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Thu, 3 Oct 2024 11:06:39 +0200 Subject: [PATCH 315/581] context propagation: don't store ctx in api controller (#3268) --- pkg/apiserver/apiserver.go | 1 - pkg/apiserver/controllers/controller.go | 3 --- pkg/apiserver/controllers/v1/controller.go | 4 ---- 3 files changed, 8 deletions(-) diff --git a/pkg/apiserver/apiserver.go b/pkg/apiserver/apiserver.go index 2b2b453348a..8fe500c7f52 100644 --- a/pkg/apiserver/apiserver.go +++ b/pkg/apiserver/apiserver.go @@ -229,7 +229,6 @@ func NewServer(config *csconfig.LocalApiServerCfg) (*APIServer, error) { controller := &controllers.Controller{ DBClient: dbClient, - Ectx: ctx, Router: router, Profiles: config.Profiles, Log: clog, diff --git a/pkg/apiserver/controllers/controller.go b/pkg/apiserver/controllers/controller.go index 29f02723b70..719bb231006 100644 --- a/pkg/apiserver/controllers/controller.go +++ b/pkg/apiserver/controllers/controller.go @@ -1,7 +1,6 @@ package controllers import ( - "context" "net" "net/http" "strings" @@ -18,7 +17,6 @@ import ( ) type Controller struct { - Ectx context.Context DBClient *database.Client Router *gin.Engine Profiles []*csconfig.ProfileCfg @@ -83,7 +81,6 @@ func (c *Controller) NewV1() error { v1Config := v1.ControllerV1Config{ DbClient: c.DBClient, - Ctx: c.Ectx, ProfilesCfg: c.Profiles, DecisionDeleteChan: c.DecisionDeleteChan, AlertsAddChan: c.AlertsAddChan, diff --git a/pkg/apiserver/controllers/v1/controller.go b/pkg/apiserver/controllers/v1/controller.go index 6de4abe3b3b..f8b6aa76ea5 100644 --- a/pkg/apiserver/controllers/v1/controller.go +++ b/pkg/apiserver/controllers/v1/controller.go @@ -1,7 +1,6 @@ package v1 import ( - "context" "fmt" "net" @@ -14,7 +13,6 @@ import ( ) type Controller struct { - Ectx context.Context DBClient *database.Client APIKeyHeader string Middlewares *middlewares.Middlewares @@ -31,7 +29,6 @@ type Controller struct { type ControllerV1Config struct { DbClient *database.Client - Ctx context.Context ProfilesCfg []*csconfig.ProfileCfg AlertsAddChan chan []*models.Alert @@ -52,7 +49,6 @@ func New(cfg *ControllerV1Config) (*Controller, error) { } v1 := &Controller{ - Ectx: cfg.Ctx, DBClient: cfg.DbClient, APIKeyHeader: middlewares.APIKeyHeader, Profiles: profiles, From 06adbe031d9928d239cca5b31857471f173b769f Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Thu, 3 Oct 2024 14:51:53 +0200 Subject: [PATCH 316/581] context propagation: apic, unit tests (#3271) * context propagation: apic * context propagation: unit tests --- .golangci.yml | 6 ++ pkg/apiserver/alerts_test.go | 144 ++++++++++++++-------------- pkg/apiserver/api_key_test.go | 2 +- pkg/apiserver/apic.go | 22 ++--- pkg/apiserver/apic_metrics_test.go | 2 +- pkg/apiserver/apic_test.go | 67 +++++++------ pkg/apiserver/apiserver_test.go | 4 +- pkg/apiserver/decisions_test.go | 101 ++++++++++--------- pkg/apiserver/heartbeat_test.go | 8 +- pkg/apiserver/usage_metrics_test.go | 12 +-- 10 files changed, 189 insertions(+), 179 deletions(-) diff --git a/.golangci.yml b/.golangci.yml index 54c0acb0644..a9d962610df 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -487,6 +487,12 @@ issues: path: "pkg/(.+)_test.go" text: "deep-exit: .*" + # we use t,ctx instead of ctx,t in tests + - linters: + - revive + path: "pkg/(.+)_test.go" + text: "context-as-argument: context.Context should be the first parameter of a function" + # tolerate deep exit in cobra's OnInitialize, for now - linters: - revive diff --git a/pkg/apiserver/alerts_test.go b/pkg/apiserver/alerts_test.go index d801ff86f45..0e89ddb2137 100644 --- a/pkg/apiserver/alerts_test.go +++ b/pkg/apiserver/alerts_test.go @@ -26,11 +26,11 @@ type LAPI struct { DBConfig *csconfig.DatabaseCfg } -func SetupLAPITest(t *testing.T) LAPI { +func SetupLAPITest(t *testing.T, ctx context.Context) LAPI { t.Helper() - router, loginResp, config := InitMachineTest(t) + router, loginResp, config := InitMachineTest(t, ctx) - APIKey := CreateTestBouncer(t, config.API.Server.DbConfig) + APIKey := CreateTestBouncer(t, ctx, config.API.Server.DbConfig) return LAPI{ router: router, @@ -40,13 +40,12 @@ func SetupLAPITest(t *testing.T) LAPI { } } -func (l *LAPI) InsertAlertFromFile(t *testing.T, path string) *httptest.ResponseRecorder { +func (l *LAPI) InsertAlertFromFile(t *testing.T, ctx context.Context, path string) *httptest.ResponseRecorder { alertReader := GetAlertReaderFromFile(t, path) - return l.RecordResponse(t, http.MethodPost, "/v1/alerts", alertReader, "password") + return l.RecordResponse(t, ctx, http.MethodPost, "/v1/alerts", alertReader, "password") } -func (l *LAPI) RecordResponse(t *testing.T, verb string, url string, body *strings.Reader, authType string) *httptest.ResponseRecorder { - ctx := context.Background() +func (l *LAPI) RecordResponse(t *testing.T, ctx context.Context, verb string, url string, body *strings.Reader, authType string) *httptest.ResponseRecorder { w := httptest.NewRecorder() req, err := http.NewRequestWithContext(ctx, verb, url, body) require.NoError(t, err) @@ -65,18 +64,17 @@ func (l *LAPI) RecordResponse(t *testing.T, verb string, url string, body *strin return w } -func InitMachineTest(t *testing.T) (*gin.Engine, models.WatcherAuthResponse, csconfig.Config) { +func InitMachineTest(t *testing.T, ctx context.Context) (*gin.Engine, models.WatcherAuthResponse, csconfig.Config) { router, config := NewAPITest(t) - loginResp := LoginToTestAPI(t, router, config) + loginResp := LoginToTestAPI(t, ctx, router, config) return router, loginResp, config } -func LoginToTestAPI(t *testing.T, router *gin.Engine, config csconfig.Config) models.WatcherAuthResponse { +func LoginToTestAPI(t *testing.T, ctx context.Context, router *gin.Engine, config csconfig.Config) models.WatcherAuthResponse { body := CreateTestMachine(t, router, "") ValidateMachine(t, "test", config.API.Server.DbConfig) - ctx := context.Background() w := httptest.NewRecorder() req, _ := http.NewRequestWithContext(ctx, http.MethodPost, "/v1/watchers/login", strings.NewReader(body)) req.Header.Add("User-Agent", UserAgent) @@ -95,52 +93,55 @@ func AddAuthHeaders(request *http.Request, authResponse models.WatcherAuthRespon } func TestSimulatedAlert(t *testing.T) { - lapi := SetupLAPITest(t) - lapi.InsertAlertFromFile(t, "./tests/alert_minibulk+simul.json") + ctx := context.Background() + lapi := SetupLAPITest(t, ctx) + lapi.InsertAlertFromFile(t, ctx, "./tests/alert_minibulk+simul.json") alertContent := GetAlertReaderFromFile(t, "./tests/alert_minibulk+simul.json") // exclude decision in simulation mode - w := lapi.RecordResponse(t, "GET", "/v1/alerts?simulated=false", alertContent, "password") + w := lapi.RecordResponse(t, ctx, "GET", "/v1/alerts?simulated=false", alertContent, "password") assert.Equal(t, 200, w.Code) assert.Contains(t, w.Body.String(), `"message":"Ip 91.121.79.178 performed crowdsecurity/ssh-bf (6 events over `) assert.NotContains(t, w.Body.String(), `"message":"Ip 91.121.79.179 performed crowdsecurity/ssh-bf (6 events over `) // include decision in simulation mode - w = lapi.RecordResponse(t, "GET", "/v1/alerts?simulated=true", alertContent, "password") + w = lapi.RecordResponse(t, ctx, "GET", "/v1/alerts?simulated=true", alertContent, "password") assert.Equal(t, 200, w.Code) assert.Contains(t, w.Body.String(), `"message":"Ip 91.121.79.178 performed crowdsecurity/ssh-bf (6 events over `) assert.Contains(t, w.Body.String(), `"message":"Ip 91.121.79.179 performed crowdsecurity/ssh-bf (6 events over `) } func TestCreateAlert(t *testing.T) { - lapi := SetupLAPITest(t) + ctx := context.Background() + lapi := SetupLAPITest(t, ctx) // Create Alert with invalid format - w := lapi.RecordResponse(t, http.MethodPost, "/v1/alerts", strings.NewReader("test"), "password") + w := lapi.RecordResponse(t, ctx, http.MethodPost, "/v1/alerts", strings.NewReader("test"), "password") assert.Equal(t, 400, w.Code) assert.Equal(t, `{"message":"invalid character 'e' in literal true (expecting 'r')"}`, w.Body.String()) // Create Alert with invalid input alertContent := GetAlertReaderFromFile(t, "./tests/invalidAlert_sample.json") - w = lapi.RecordResponse(t, http.MethodPost, "/v1/alerts", alertContent, "password") + w = lapi.RecordResponse(t, ctx, http.MethodPost, "/v1/alerts", alertContent, "password") assert.Equal(t, 500, w.Code) assert.Equal(t, `{"message":"validation failure list:\n0.scenario in body is required\n0.scenario_hash in body is required\n0.scenario_version in body is required\n0.simulated in body is required\n0.source in body is required"}`, w.Body.String()) // Create Valid Alert - w = lapi.InsertAlertFromFile(t, "./tests/alert_sample.json") + w = lapi.InsertAlertFromFile(t, ctx, "./tests/alert_sample.json") assert.Equal(t, 201, w.Code) assert.Equal(t, `["1"]`, w.Body.String()) } func TestCreateAlertChannels(t *testing.T) { + ctx := context.Background() apiServer, config := NewAPIServer(t) apiServer.controller.PluginChannel = make(chan csplugin.ProfileAlert) apiServer.InitController() - loginResp := LoginToTestAPI(t, apiServer.router, config) + loginResp := LoginToTestAPI(t, ctx, apiServer.router, config) lapi := LAPI{router: apiServer.router, loginResp: loginResp} var ( @@ -156,26 +157,27 @@ func TestCreateAlertChannels(t *testing.T) { wg.Done() }() - lapi.InsertAlertFromFile(t, "./tests/alert_ssh-bf.json") + lapi.InsertAlertFromFile(t, ctx, "./tests/alert_ssh-bf.json") wg.Wait() assert.Len(t, pd.Alert.Decisions, 1) apiServer.Close() } func TestAlertListFilters(t *testing.T) { - lapi := SetupLAPITest(t) - lapi.InsertAlertFromFile(t, "./tests/alert_ssh-bf.json") + ctx := context.Background() + lapi := SetupLAPITest(t, ctx) + lapi.InsertAlertFromFile(t, ctx, "./tests/alert_ssh-bf.json") alertContent := GetAlertReaderFromFile(t, "./tests/alert_ssh-bf.json") // bad filter - w := lapi.RecordResponse(t, "GET", "/v1/alerts?test=test", alertContent, "password") + w := lapi.RecordResponse(t, ctx, "GET", "/v1/alerts?test=test", alertContent, "password") assert.Equal(t, 500, w.Code) assert.Equal(t, `{"message":"Filter parameter 'test' is unknown (=test): invalid filter"}`, w.Body.String()) // get without filters - w = lapi.RecordResponse(t, "GET", "/v1/alerts", emptyBody, "password") + w = lapi.RecordResponse(t, ctx, "GET", "/v1/alerts", emptyBody, "password") assert.Equal(t, 200, w.Code) // check alert and decision assert.Contains(t, w.Body.String(), "Ip 91.121.79.195 performed 'crowdsecurity/ssh-bf' (6 events over ") @@ -183,184 +185,185 @@ func TestAlertListFilters(t *testing.T) { // test decision_type filter (ok) - w = lapi.RecordResponse(t, "GET", "/v1/alerts?decision_type=ban", emptyBody, "password") + w = lapi.RecordResponse(t, ctx, "GET", "/v1/alerts?decision_type=ban", emptyBody, "password") assert.Equal(t, 200, w.Code) assert.Contains(t, w.Body.String(), "Ip 91.121.79.195 performed 'crowdsecurity/ssh-bf' (6 events over ") assert.Contains(t, w.Body.String(), `scope":"Ip","simulated":false,"type":"ban","value":"91.121.79.195"`) // test decision_type filter (bad value) - w = lapi.RecordResponse(t, "GET", "/v1/alerts?decision_type=ratata", emptyBody, "password") + w = lapi.RecordResponse(t, ctx, "GET", "/v1/alerts?decision_type=ratata", emptyBody, "password") assert.Equal(t, 200, w.Code) assert.Equal(t, "null", w.Body.String()) // test scope (ok) - w = lapi.RecordResponse(t, "GET", "/v1/alerts?scope=Ip", emptyBody, "password") + w = lapi.RecordResponse(t, ctx, "GET", "/v1/alerts?scope=Ip", emptyBody, "password") assert.Equal(t, 200, w.Code) assert.Contains(t, w.Body.String(), "Ip 91.121.79.195 performed 'crowdsecurity/ssh-bf' (6 events over ") assert.Contains(t, w.Body.String(), `scope":"Ip","simulated":false,"type":"ban","value":"91.121.79.195"`) // test scope (bad value) - w = lapi.RecordResponse(t, "GET", "/v1/alerts?scope=rarara", emptyBody, "password") + w = lapi.RecordResponse(t, ctx, "GET", "/v1/alerts?scope=rarara", emptyBody, "password") assert.Equal(t, 200, w.Code) assert.Equal(t, "null", w.Body.String()) // test scenario (ok) - w = lapi.RecordResponse(t, "GET", "/v1/alerts?scenario=crowdsecurity/ssh-bf", emptyBody, "password") + w = lapi.RecordResponse(t, ctx, "GET", "/v1/alerts?scenario=crowdsecurity/ssh-bf", emptyBody, "password") assert.Equal(t, 200, w.Code) assert.Contains(t, w.Body.String(), "Ip 91.121.79.195 performed 'crowdsecurity/ssh-bf' (6 events over ") assert.Contains(t, w.Body.String(), `scope":"Ip","simulated":false,"type":"ban","value":"91.121.79.195"`) // test scenario (bad value) - w = lapi.RecordResponse(t, "GET", "/v1/alerts?scenario=crowdsecurity/nope", emptyBody, "password") + w = lapi.RecordResponse(t, ctx, "GET", "/v1/alerts?scenario=crowdsecurity/nope", emptyBody, "password") assert.Equal(t, 200, w.Code) assert.Equal(t, "null", w.Body.String()) // test ip (ok) - w = lapi.RecordResponse(t, "GET", "/v1/alerts?ip=91.121.79.195", emptyBody, "password") + w = lapi.RecordResponse(t, ctx, "GET", "/v1/alerts?ip=91.121.79.195", emptyBody, "password") assert.Equal(t, 200, w.Code) assert.Contains(t, w.Body.String(), "Ip 91.121.79.195 performed 'crowdsecurity/ssh-bf' (6 events over ") assert.Contains(t, w.Body.String(), `scope":"Ip","simulated":false,"type":"ban","value":"91.121.79.195"`) // test ip (bad value) - w = lapi.RecordResponse(t, "GET", "/v1/alerts?ip=99.122.77.195", emptyBody, "password") + w = lapi.RecordResponse(t, ctx, "GET", "/v1/alerts?ip=99.122.77.195", emptyBody, "password") assert.Equal(t, 200, w.Code) assert.Equal(t, "null", w.Body.String()) // test ip (invalid value) - w = lapi.RecordResponse(t, "GET", "/v1/alerts?ip=gruueq", emptyBody, "password") + w = lapi.RecordResponse(t, ctx, "GET", "/v1/alerts?ip=gruueq", emptyBody, "password") assert.Equal(t, 500, w.Code) assert.Equal(t, `{"message":"unable to convert 'gruueq' to int: invalid address: invalid ip address / range"}`, w.Body.String()) // test range (ok) - w = lapi.RecordResponse(t, "GET", "/v1/alerts?range=91.121.79.0/24&contains=false", emptyBody, "password") + w = lapi.RecordResponse(t, ctx, "GET", "/v1/alerts?range=91.121.79.0/24&contains=false", emptyBody, "password") assert.Equal(t, 200, w.Code) assert.Contains(t, w.Body.String(), "Ip 91.121.79.195 performed 'crowdsecurity/ssh-bf' (6 events over ") assert.Contains(t, w.Body.String(), `scope":"Ip","simulated":false,"type":"ban","value":"91.121.79.195"`) // test range - w = lapi.RecordResponse(t, "GET", "/v1/alerts?range=99.122.77.0/24&contains=false", emptyBody, "password") + w = lapi.RecordResponse(t, ctx, "GET", "/v1/alerts?range=99.122.77.0/24&contains=false", emptyBody, "password") assert.Equal(t, 200, w.Code) assert.Equal(t, "null", w.Body.String()) // test range (invalid value) - w = lapi.RecordResponse(t, "GET", "/v1/alerts?range=ratata", emptyBody, "password") + w = lapi.RecordResponse(t, ctx, "GET", "/v1/alerts?range=ratata", emptyBody, "password") assert.Equal(t, 500, w.Code) assert.Equal(t, `{"message":"unable to convert 'ratata' to int: invalid address: invalid ip address / range"}`, w.Body.String()) // test since (ok) - w = lapi.RecordResponse(t, "GET", "/v1/alerts?since=1h", emptyBody, "password") + w = lapi.RecordResponse(t, ctx, "GET", "/v1/alerts?since=1h", emptyBody, "password") assert.Equal(t, 200, w.Code) assert.Contains(t, w.Body.String(), "Ip 91.121.79.195 performed 'crowdsecurity/ssh-bf' (6 events over ") assert.Contains(t, w.Body.String(), `scope":"Ip","simulated":false,"type":"ban","value":"91.121.79.195"`) // test since (ok but yields no results) - w = lapi.RecordResponse(t, "GET", "/v1/alerts?since=1ns", emptyBody, "password") + w = lapi.RecordResponse(t, ctx, "GET", "/v1/alerts?since=1ns", emptyBody, "password") assert.Equal(t, 200, w.Code) assert.Equal(t, "null", w.Body.String()) // test since (invalid value) - w = lapi.RecordResponse(t, "GET", "/v1/alerts?since=1zuzu", emptyBody, "password") + w = lapi.RecordResponse(t, ctx, "GET", "/v1/alerts?since=1zuzu", emptyBody, "password") assert.Equal(t, 500, w.Code) assert.Contains(t, w.Body.String(), `{"message":"while parsing duration: time: unknown unit`) // test until (ok) - w = lapi.RecordResponse(t, "GET", "/v1/alerts?until=1ns", emptyBody, "password") + w = lapi.RecordResponse(t, ctx, "GET", "/v1/alerts?until=1ns", emptyBody, "password") assert.Equal(t, 200, w.Code) assert.Contains(t, w.Body.String(), "Ip 91.121.79.195 performed 'crowdsecurity/ssh-bf' (6 events over ") assert.Contains(t, w.Body.String(), `scope":"Ip","simulated":false,"type":"ban","value":"91.121.79.195"`) // test until (ok but no return) - w = lapi.RecordResponse(t, "GET", "/v1/alerts?until=1m", emptyBody, "password") + w = lapi.RecordResponse(t, ctx, "GET", "/v1/alerts?until=1m", emptyBody, "password") assert.Equal(t, 200, w.Code) assert.Equal(t, "null", w.Body.String()) // test until (invalid value) - w = lapi.RecordResponse(t, "GET", "/v1/alerts?until=1zuzu", emptyBody, "password") + w = lapi.RecordResponse(t, ctx, "GET", "/v1/alerts?until=1zuzu", emptyBody, "password") assert.Equal(t, 500, w.Code) assert.Contains(t, w.Body.String(), `{"message":"while parsing duration: time: unknown unit`) // test simulated (ok) - w = lapi.RecordResponse(t, "GET", "/v1/alerts?simulated=true", emptyBody, "password") + w = lapi.RecordResponse(t, ctx, "GET", "/v1/alerts?simulated=true", emptyBody, "password") assert.Equal(t, 200, w.Code) assert.Contains(t, w.Body.String(), "Ip 91.121.79.195 performed 'crowdsecurity/ssh-bf' (6 events over ") assert.Contains(t, w.Body.String(), `scope":"Ip","simulated":false,"type":"ban","value":"91.121.79.195"`) // test simulated (ok) - w = lapi.RecordResponse(t, "GET", "/v1/alerts?simulated=false", emptyBody, "password") + w = lapi.RecordResponse(t, ctx, "GET", "/v1/alerts?simulated=false", emptyBody, "password") assert.Equal(t, 200, w.Code) assert.Contains(t, w.Body.String(), "Ip 91.121.79.195 performed 'crowdsecurity/ssh-bf' (6 events over ") assert.Contains(t, w.Body.String(), `scope":"Ip","simulated":false,"type":"ban","value":"91.121.79.195"`) // test has active decision - w = lapi.RecordResponse(t, "GET", "/v1/alerts?has_active_decision=true", emptyBody, "password") + w = lapi.RecordResponse(t, ctx, "GET", "/v1/alerts?has_active_decision=true", emptyBody, "password") assert.Equal(t, 200, w.Code) assert.Contains(t, w.Body.String(), "Ip 91.121.79.195 performed 'crowdsecurity/ssh-bf' (6 events over ") assert.Contains(t, w.Body.String(), `scope":"Ip","simulated":false,"type":"ban","value":"91.121.79.195"`) // test has active decision - w = lapi.RecordResponse(t, "GET", "/v1/alerts?has_active_decision=false", emptyBody, "password") + w = lapi.RecordResponse(t, ctx, "GET", "/v1/alerts?has_active_decision=false", emptyBody, "password") assert.Equal(t, 200, w.Code) assert.Equal(t, "null", w.Body.String()) // test has active decision (invalid value) - w = lapi.RecordResponse(t, "GET", "/v1/alerts?has_active_decision=ratatqata", emptyBody, "password") + w = lapi.RecordResponse(t, ctx, "GET", "/v1/alerts?has_active_decision=ratatqata", emptyBody, "password") assert.Equal(t, 500, w.Code) assert.Equal(t, `{"message":"'ratatqata' is not a boolean: strconv.ParseBool: parsing \"ratatqata\": invalid syntax: unable to parse type"}`, w.Body.String()) } func TestAlertBulkInsert(t *testing.T) { - lapi := SetupLAPITest(t) + ctx := context.Background() + lapi := SetupLAPITest(t, ctx) // insert a bulk of 20 alerts to trigger bulk insert - lapi.InsertAlertFromFile(t, "./tests/alert_bulk.json") + lapi.InsertAlertFromFile(t, ctx, "./tests/alert_bulk.json") alertContent := GetAlertReaderFromFile(t, "./tests/alert_bulk.json") - w := lapi.RecordResponse(t, "GET", "/v1/alerts", alertContent, "password") + w := lapi.RecordResponse(t, ctx, "GET", "/v1/alerts", alertContent, "password") assert.Equal(t, 200, w.Code) } func TestListAlert(t *testing.T) { - lapi := SetupLAPITest(t) - lapi.InsertAlertFromFile(t, "./tests/alert_sample.json") + ctx := context.Background() + lapi := SetupLAPITest(t, ctx) + lapi.InsertAlertFromFile(t, ctx, "./tests/alert_sample.json") // List Alert with invalid filter - w := lapi.RecordResponse(t, "GET", "/v1/alerts?test=test", emptyBody, "password") + w := lapi.RecordResponse(t, ctx, "GET", "/v1/alerts?test=test", emptyBody, "password") assert.Equal(t, 500, w.Code) assert.Equal(t, `{"message":"Filter parameter 'test' is unknown (=test): invalid filter"}`, w.Body.String()) // List Alert - w = lapi.RecordResponse(t, "GET", "/v1/alerts", emptyBody, "password") + w = lapi.RecordResponse(t, ctx, "GET", "/v1/alerts", emptyBody, "password") assert.Equal(t, 200, w.Code) assert.Contains(t, w.Body.String(), "crowdsecurity/test") } func TestCreateAlertErrors(t *testing.T) { - lapi := SetupLAPITest(t) - alertContent := GetAlertReaderFromFile(t, "./tests/alert_sample.json") - ctx := context.Background() + lapi := SetupLAPITest(t, ctx) + alertContent := GetAlertReaderFromFile(t, "./tests/alert_sample.json") // test invalid bearer w := httptest.NewRecorder() @@ -380,10 +383,9 @@ func TestCreateAlertErrors(t *testing.T) { } func TestDeleteAlert(t *testing.T) { - lapi := SetupLAPITest(t) - lapi.InsertAlertFromFile(t, "./tests/alert_sample.json") - ctx := context.Background() + lapi := SetupLAPITest(t, ctx) + lapi.InsertAlertFromFile(t, ctx, "./tests/alert_sample.json") // Fail Delete Alert w := httptest.NewRecorder() @@ -405,10 +407,9 @@ func TestDeleteAlert(t *testing.T) { } func TestDeleteAlertByID(t *testing.T) { - lapi := SetupLAPITest(t) - lapi.InsertAlertFromFile(t, "./tests/alert_sample.json") - ctx := context.Background() + lapi := SetupLAPITest(t, ctx) + lapi.InsertAlertFromFile(t, ctx, "./tests/alert_sample.json") // Fail Delete Alert w := httptest.NewRecorder() @@ -430,6 +431,7 @@ func TestDeleteAlertByID(t *testing.T) { } func TestDeleteAlertTrustedIPS(t *testing.T) { + ctx := context.Background() cfg := LoadTestConfig(t) // IPv6 mocking doesn't seem to work. // cfg.API.Server.TrustedIPs = []string{"1.2.3.4", "1.2.4.0/24", "::"} @@ -444,14 +446,12 @@ func TestDeleteAlertTrustedIPS(t *testing.T) { router, err := server.Router() require.NoError(t, err) - loginResp := LoginToTestAPI(t, router, cfg) + loginResp := LoginToTestAPI(t, ctx, router, cfg) lapi := LAPI{ router: router, loginResp: loginResp, } - ctx := context.Background() - assertAlertDeleteFailedFromIP := func(ip string) { w := httptest.NewRecorder() req, _ := http.NewRequestWithContext(ctx, http.MethodDelete, "/v1/alerts", strings.NewReader("")) @@ -475,17 +475,17 @@ func TestDeleteAlertTrustedIPS(t *testing.T) { assert.Equal(t, `{"nbDeleted":"1"}`, w.Body.String()) } - lapi.InsertAlertFromFile(t, "./tests/alert_sample.json") + lapi.InsertAlertFromFile(t, ctx, "./tests/alert_sample.json") assertAlertDeleteFailedFromIP("4.3.2.1") assertAlertDeletedFromIP("1.2.3.4") - lapi.InsertAlertFromFile(t, "./tests/alert_sample.json") + lapi.InsertAlertFromFile(t, ctx, "./tests/alert_sample.json") assertAlertDeletedFromIP("1.2.4.0") - lapi.InsertAlertFromFile(t, "./tests/alert_sample.json") + lapi.InsertAlertFromFile(t, ctx, "./tests/alert_sample.json") assertAlertDeletedFromIP("1.2.4.1") - lapi.InsertAlertFromFile(t, "./tests/alert_sample.json") + lapi.InsertAlertFromFile(t, ctx, "./tests/alert_sample.json") assertAlertDeletedFromIP("1.2.4.255") - lapi.InsertAlertFromFile(t, "./tests/alert_sample.json") + lapi.InsertAlertFromFile(t, ctx, "./tests/alert_sample.json") assertAlertDeletedFromIP("127.0.0.1") } diff --git a/pkg/apiserver/api_key_test.go b/pkg/apiserver/api_key_test.go index 10e75ae47f1..014f255b892 100644 --- a/pkg/apiserver/api_key_test.go +++ b/pkg/apiserver/api_key_test.go @@ -15,7 +15,7 @@ func TestAPIKey(t *testing.T) { ctx := context.Background() - APIKey := CreateTestBouncer(t, config.API.Server.DbConfig) + APIKey := CreateTestBouncer(t, ctx, config.API.Server.DbConfig) // Login with empty token w := httptest.NewRecorder() diff --git a/pkg/apiserver/apic.go b/pkg/apiserver/apic.go index c8768e71b0a..e62bc663c16 100644 --- a/pkg/apiserver/apic.go +++ b/pkg/apiserver/apic.go @@ -454,11 +454,9 @@ func (a *apic) HandleDeletedDecisions(deletedDecisions []*models.Decision, delet return nbDeleted, nil } -func (a *apic) HandleDeletedDecisionsV3(deletedDecisions []*modelscapi.GetDecisionsStreamResponseDeletedItem, deleteCounters map[string]map[string]int) (int, error) { +func (a *apic) HandleDeletedDecisionsV3(ctx context.Context, deletedDecisions []*modelscapi.GetDecisionsStreamResponseDeletedItem, deleteCounters map[string]map[string]int) (int, error) { var nbDeleted int - ctx := context.TODO() - for _, decisions := range deletedDecisions { scope := decisions.Scope @@ -676,7 +674,7 @@ func (a *apic) PullTop(ctx context.Context, forcePull bool) error { addCounters, deleteCounters := makeAddAndDeleteCounters() // process deleted decisions - nbDeleted, err := a.HandleDeletedDecisionsV3(data.Deleted, deleteCounters) + nbDeleted, err := a.HandleDeletedDecisionsV3(ctx, data.Deleted, deleteCounters) if err != nil { return err } @@ -697,7 +695,7 @@ func (a *apic) PullTop(ctx context.Context, forcePull bool) error { alertsFromCapi := []*models.Alert{alert} alertsFromCapi = fillAlertsWithDecisions(alertsFromCapi, decisions, addCounters) - err = a.SaveAlerts(alertsFromCapi, addCounters, deleteCounters) + err = a.SaveAlerts(ctx, alertsFromCapi, addCounters, deleteCounters) if err != nil { return fmt.Errorf("while saving alerts: %w", err) } @@ -766,9 +764,7 @@ func (a *apic) ApplyApicWhitelists(decisions []*models.Decision) []*models.Decis return decisions[:outIdx] } -func (a *apic) SaveAlerts(alertsFromCapi []*models.Alert, addCounters map[string]map[string]int, deleteCounters map[string]map[string]int) error { - ctx := context.TODO() - +func (a *apic) SaveAlerts(ctx context.Context, alertsFromCapi []*models.Alert, addCounters map[string]map[string]int, deleteCounters map[string]map[string]int) error { for _, alert := range alertsFromCapi { setAlertScenario(alert, addCounters, deleteCounters) log.Debugf("%s has %d decisions", *alert.Source.Scope, len(alert.Decisions)) @@ -788,13 +784,13 @@ func (a *apic) SaveAlerts(alertsFromCapi []*models.Alert, addCounters map[string return nil } -func (a *apic) ShouldForcePullBlocklist(blocklist *modelscapi.BlocklistLink) (bool, error) { +func (a *apic) ShouldForcePullBlocklist(ctx context.Context, blocklist *modelscapi.BlocklistLink) (bool, error) { // we should force pull if the blocklist decisions are about to expire or there's no decision in the db alertQuery := a.dbClient.Ent.Alert.Query() alertQuery.Where(alert.SourceScopeEQ(fmt.Sprintf("%s:%s", types.ListOrigin, *blocklist.Name))) alertQuery.Order(ent.Desc(alert.FieldCreatedAt)) - alertInstance, err := alertQuery.First(context.Background()) + alertInstance, err := alertQuery.First(ctx) if err != nil { if ent.IsNotFound(err) { log.Debugf("no alert found for %s, force refresh", *blocklist.Name) @@ -807,7 +803,7 @@ func (a *apic) ShouldForcePullBlocklist(blocklist *modelscapi.BlocklistLink) (bo decisionQuery := a.dbClient.Ent.Decision.Query() decisionQuery.Where(decision.HasOwnerWith(alert.IDEQ(alertInstance.ID))) - firstDecision, err := decisionQuery.First(context.Background()) + firstDecision, err := decisionQuery.First(ctx) if err != nil { if ent.IsNotFound(err) { log.Debugf("no decision found for %s, force refresh", *blocklist.Name) @@ -837,7 +833,7 @@ func (a *apic) updateBlocklist(ctx context.Context, client *apiclient.ApiClient, } if !forcePull { - _forcePull, err := a.ShouldForcePullBlocklist(blocklist) + _forcePull, err := a.ShouldForcePullBlocklist(ctx, blocklist) if err != nil { return fmt.Errorf("while checking if we should force pull blocklist %s: %w", *blocklist.Name, err) } @@ -889,7 +885,7 @@ func (a *apic) updateBlocklist(ctx context.Context, client *apiclient.ApiClient, alertsFromCapi := []*models.Alert{alert} alertsFromCapi = fillAlertsWithDecisions(alertsFromCapi, decisions, addCounters) - err = a.SaveAlerts(alertsFromCapi, addCounters, nil) + err = a.SaveAlerts(ctx, alertsFromCapi, addCounters, nil) if err != nil { return fmt.Errorf("while saving alert from blocklist %s: %w", *blocklist.Name, err) } diff --git a/pkg/apiserver/apic_metrics_test.go b/pkg/apiserver/apic_metrics_test.go index 13a24668f26..d81af03f710 100644 --- a/pkg/apiserver/apic_metrics_test.go +++ b/pkg/apiserver/apic_metrics_test.go @@ -76,7 +76,7 @@ func TestAPICSendMetrics(t *testing.T) { ) require.NoError(t, err) - api := getAPIC(t) + api := getAPIC(t, ctx) api.pushInterval = time.Millisecond api.pushIntervalFirst = time.Millisecond api.apiClient = apiClient diff --git a/pkg/apiserver/apic_test.go b/pkg/apiserver/apic_test.go index a215edb2fbd..51b1f43c707 100644 --- a/pkg/apiserver/apic_test.go +++ b/pkg/apiserver/apic_test.go @@ -34,11 +34,9 @@ import ( "github.com/crowdsecurity/crowdsec/pkg/types" ) -func getDBClient(t *testing.T) *database.Client { +func getDBClient(t *testing.T, ctx context.Context) *database.Client { t.Helper() - ctx := context.Background() - dbPath, err := os.CreateTemp("", "*sqlite") require.NoError(t, err) dbClient, err := database.NewClient(ctx, &csconfig.DatabaseCfg{ @@ -51,9 +49,9 @@ func getDBClient(t *testing.T) *database.Client { return dbClient } -func getAPIC(t *testing.T) *apic { +func getAPIC(t *testing.T, ctx context.Context) *apic { t.Helper() - dbClient := getDBClient(t) + dbClient := getDBClient(t, ctx) return &apic{ AlertsAddChan: make(chan []*models.Alert), @@ -84,8 +82,8 @@ func absDiff(a int, b int) int { return c } -func assertTotalDecisionCount(t *testing.T, dbClient *database.Client, count int) { - d := dbClient.Ent.Decision.Query().AllX(context.Background()) +func assertTotalDecisionCount(t *testing.T, ctx context.Context, dbClient *database.Client, count int) { + d := dbClient.Ent.Decision.Query().AllX(ctx) assert.Len(t, d, count) } @@ -111,9 +109,8 @@ func assertTotalAlertCount(t *testing.T, dbClient *database.Client, count int) { } func TestAPICCAPIPullIsOld(t *testing.T) { - api := getAPIC(t) - ctx := context.Background() + api := getAPIC(t, ctx) isOld, err := api.CAPIPullIsOld(ctx) require.NoError(t, err) @@ -169,7 +166,7 @@ func TestAPICFetchScenariosListFromDB(t *testing.T) { for _, tc := range tests { t.Run(tc.name, func(t *testing.T) { - api := getAPIC(t) + api := getAPIC(t, ctx) for machineID, scenarios := range tc.machineIDsWithScenarios { api.dbClient.Ent.Machine.Create(). SetMachineId(machineID). @@ -183,7 +180,7 @@ func TestAPICFetchScenariosListFromDB(t *testing.T) { require.NoError(t, err) for machineID := range tc.machineIDsWithScenarios { - api.dbClient.Ent.Machine.Delete().Where(machine.MachineIdEQ(machineID)).ExecX(context.Background()) + api.dbClient.Ent.Machine.Delete().Where(machine.MachineIdEQ(machineID)).ExecX(ctx) } assert.ElementsMatch(t, tc.expectedScenarios, scenarios) @@ -192,6 +189,8 @@ func TestAPICFetchScenariosListFromDB(t *testing.T) { } func TestNewAPIC(t *testing.T) { + ctx := context.Background() + var testConfig *csconfig.OnlineApiClientCfg setConfig := func() { @@ -219,7 +218,7 @@ func TestNewAPIC(t *testing.T) { name: "simple", action: func() {}, args: args{ - dbClient: getDBClient(t), + dbClient: getDBClient(t, ctx), consoleConfig: LoadTestConfig(t).API.Server.ConsoleConfig, }, }, @@ -227,15 +226,13 @@ func TestNewAPIC(t *testing.T) { name: "error in parsing URL", action: func() { testConfig.Credentials.URL = "foobar http://" }, args: args{ - dbClient: getDBClient(t), + dbClient: getDBClient(t, ctx), consoleConfig: LoadTestConfig(t).API.Server.ConsoleConfig, }, expectedErr: "first path segment in URL cannot contain colon", }, } - ctx := context.Background() - for _, tc := range tests { t.Run(tc.name, func(t *testing.T) { setConfig() @@ -259,7 +256,8 @@ func TestNewAPIC(t *testing.T) { } func TestAPICHandleDeletedDecisions(t *testing.T) { - api := getAPIC(t) + ctx := context.Background() + api := getAPIC(t, ctx) _, deleteCounters := makeAddAndDeleteCounters() decision1 := api.dbClient.Ent.Decision.Create(). @@ -280,7 +278,7 @@ func TestAPICHandleDeletedDecisions(t *testing.T) { SetOrigin(types.CAPIOrigin). SaveX(context.Background()) - assertTotalDecisionCount(t, api.dbClient, 2) + assertTotalDecisionCount(t, ctx, api.dbClient, 2) nbDeleted, err := api.HandleDeletedDecisions([]*models.Decision{{ Value: ptr.Of("1.2.3.4"), @@ -359,7 +357,7 @@ func TestAPICGetMetrics(t *testing.T) { for _, tc := range tests { t.Run(tc.name, func(t *testing.T) { - apiClient := getAPIC(t) + apiClient := getAPIC(t, ctx) cleanUp(apiClient) for i, machineID := range tc.machineIDs { @@ -370,7 +368,7 @@ func TestAPICGetMetrics(t *testing.T) { SetScenarios("crowdsecurity/test"). SetLastPush(time.Time{}). SetUpdatedAt(time.Time{}). - ExecX(context.Background()) + ExecX(ctx) } for i, bouncerName := range tc.bouncers { @@ -380,7 +378,7 @@ func TestAPICGetMetrics(t *testing.T) { SetAPIKey("foobar"). SetRevoked(false). SetLastPull(time.Time{}). - ExecX(context.Background()) + ExecX(ctx) } foundMetrics, err := apiClient.GetMetrics(ctx) @@ -555,7 +553,7 @@ func TestFillAlertsWithDecisions(t *testing.T) { func TestAPICWhitelists(t *testing.T) { ctx := context.Background() - api := getAPIC(t) + api := getAPIC(t, ctx) // one whitelist on IP, one on CIDR api.whitelists = &csconfig.CapiWhitelist{} api.whitelists.Ips = append(api.whitelists.Ips, net.ParseIP("9.2.3.4"), net.ParseIP("7.2.3.4")) @@ -578,7 +576,7 @@ func TestAPICWhitelists(t *testing.T) { SetScenario("crowdsecurity/ssh-bf"). SetUntil(time.Now().Add(time.Hour)). ExecX(context.Background()) - assertTotalDecisionCount(t, api.dbClient, 1) + assertTotalDecisionCount(t, ctx, api.dbClient, 1) assertTotalValidDecisionCount(t, api.dbClient, 1) httpmock.Activate() @@ -693,7 +691,7 @@ func TestAPICWhitelists(t *testing.T) { err = api.PullTop(ctx, false) require.NoError(t, err) - assertTotalDecisionCount(t, api.dbClient, 5) // 2 from FIRE + 2 from bl + 1 existing + assertTotalDecisionCount(t, ctx, api.dbClient, 5) // 2 from FIRE + 2 from bl + 1 existing assertTotalValidDecisionCount(t, api.dbClient, 4) assertTotalAlertCount(t, api.dbClient, 3) // 2 for list sub , 1 for community list. alerts := api.dbClient.Ent.Alert.Query().AllX(context.Background()) @@ -742,7 +740,7 @@ func TestAPICWhitelists(t *testing.T) { func TestAPICPullTop(t *testing.T) { ctx := context.Background() - api := getAPIC(t) + api := getAPIC(t, ctx) api.dbClient.Ent.Decision.Create(). SetOrigin(types.CAPIOrigin). SetType("ban"). @@ -750,8 +748,8 @@ func TestAPICPullTop(t *testing.T) { SetScope("Ip"). SetScenario("crowdsecurity/ssh-bf"). SetUntil(time.Now().Add(time.Hour)). - ExecX(context.Background()) - assertTotalDecisionCount(t, api.dbClient, 1) + ExecX(ctx) + assertTotalDecisionCount(t, ctx, api.dbClient, 1) assertTotalValidDecisionCount(t, api.dbClient, 1) httpmock.Activate() @@ -835,7 +833,7 @@ func TestAPICPullTop(t *testing.T) { err = api.PullTop(ctx, false) require.NoError(t, err) - assertTotalDecisionCount(t, api.dbClient, 5) + assertTotalDecisionCount(t, ctx, api.dbClient, 5) assertTotalValidDecisionCount(t, api.dbClient, 4) assertTotalAlertCount(t, api.dbClient, 3) // 2 for list sub , 1 for community list. alerts := api.dbClient.Ent.Alert.Query().AllX(context.Background()) @@ -868,7 +866,7 @@ func TestAPICPullTop(t *testing.T) { func TestAPICPullTopBLCacheFirstCall(t *testing.T) { ctx := context.Background() // no decision in db, no last modified parameter. - api := getAPIC(t) + api := getAPIC(t, ctx) httpmock.Activate() defer httpmock.DeactivateAndReset() @@ -943,7 +941,7 @@ func TestAPICPullTopBLCacheFirstCall(t *testing.T) { func TestAPICPullTopBLCacheForceCall(t *testing.T) { ctx := context.Background() - api := getAPIC(t) + api := getAPIC(t, ctx) httpmock.Activate() defer httpmock.DeactivateAndReset() @@ -1019,7 +1017,7 @@ func TestAPICPullTopBLCacheForceCall(t *testing.T) { func TestAPICPullBlocklistCall(t *testing.T) { ctx := context.Background() - api := getAPIC(t) + api := getAPIC(t, ctx) httpmock.Activate() defer httpmock.DeactivateAndReset() @@ -1052,6 +1050,7 @@ func TestAPICPullBlocklistCall(t *testing.T) { } func TestAPICPush(t *testing.T) { + ctx := context.Background() tests := []struct { name string alerts []*models.Alert @@ -1105,7 +1104,7 @@ func TestAPICPush(t *testing.T) { for _, tc := range tests { t.Run(tc.name, func(t *testing.T) { - api := getAPIC(t) + api := getAPIC(t, ctx) api.pushInterval = time.Millisecond api.pushIntervalFirst = time.Millisecond url, err := url.ParseRequestURI("http://api.crowdsec.net/") @@ -1144,7 +1143,7 @@ func TestAPICPush(t *testing.T) { func TestAPICPull(t *testing.T) { ctx := context.Background() - api := getAPIC(t) + api := getAPIC(t, ctx) tests := []struct { name string setUp func() @@ -1172,7 +1171,7 @@ func TestAPICPull(t *testing.T) { for _, tc := range tests { t.Run(tc.name, func(t *testing.T) { - api = getAPIC(t) + api = getAPIC(t, ctx) api.pullInterval = time.Millisecond api.pullIntervalFirst = time.Millisecond url, err := url.ParseRequestURI("http://api.crowdsec.net/") @@ -1223,7 +1222,7 @@ func TestAPICPull(t *testing.T) { time.Sleep(time.Millisecond * 500) logrus.SetOutput(os.Stderr) assert.Contains(t, buf.String(), tc.logContains) - assertTotalDecisionCount(t, api.dbClient, tc.expectedDecisionCount) + assertTotalDecisionCount(t, ctx, api.dbClient, tc.expectedDecisionCount) }) } } diff --git a/pkg/apiserver/apiserver_test.go b/pkg/apiserver/apiserver_test.go index 081fa23f15e..c3f69c5c365 100644 --- a/pkg/apiserver/apiserver_test.go +++ b/pkg/apiserver/apiserver_test.go @@ -288,9 +288,7 @@ func CreateTestMachine(t *testing.T, router *gin.Engine, token string) string { return body } -func CreateTestBouncer(t *testing.T, config *csconfig.DatabaseCfg) string { - ctx := context.Background() - +func CreateTestBouncer(t *testing.T, ctx context.Context, config *csconfig.DatabaseCfg) string { dbClient, err := database.NewClient(ctx, config) require.NoError(t, err) diff --git a/pkg/apiserver/decisions_test.go b/pkg/apiserver/decisions_test.go index 1c70c495a3a..a0af6956443 100644 --- a/pkg/apiserver/decisions_test.go +++ b/pkg/apiserver/decisions_test.go @@ -1,6 +1,7 @@ package apiserver import ( + "context" "testing" "github.com/stretchr/testify/assert" @@ -12,82 +13,86 @@ const ( ) func TestDeleteDecisionRange(t *testing.T) { - lapi := SetupLAPITest(t) + ctx := context.Background() + lapi := SetupLAPITest(t, ctx) // Create Valid Alert - lapi.InsertAlertFromFile(t, "./tests/alert_minibulk.json") + lapi.InsertAlertFromFile(t, ctx, "./tests/alert_minibulk.json") // delete by ip wrong - w := lapi.RecordResponse(t, "DELETE", "/v1/decisions?range=1.2.3.0/24", emptyBody, PASSWORD) + w := lapi.RecordResponse(t, ctx, "DELETE", "/v1/decisions?range=1.2.3.0/24", emptyBody, PASSWORD) assert.Equal(t, 200, w.Code) assert.Equal(t, `{"nbDeleted":"0"}`, w.Body.String()) // delete by range - w = lapi.RecordResponse(t, "DELETE", "/v1/decisions?range=91.121.79.0/24&contains=false", emptyBody, PASSWORD) + w = lapi.RecordResponse(t, ctx, "DELETE", "/v1/decisions?range=91.121.79.0/24&contains=false", emptyBody, PASSWORD) assert.Equal(t, 200, w.Code) assert.Equal(t, `{"nbDeleted":"2"}`, w.Body.String()) // delete by range : ensure it was already deleted - w = lapi.RecordResponse(t, "DELETE", "/v1/decisions?range=91.121.79.0/24", emptyBody, PASSWORD) + w = lapi.RecordResponse(t, ctx, "DELETE", "/v1/decisions?range=91.121.79.0/24", emptyBody, PASSWORD) assert.Equal(t, 200, w.Code) assert.Equal(t, `{"nbDeleted":"0"}`, w.Body.String()) } func TestDeleteDecisionFilter(t *testing.T) { - lapi := SetupLAPITest(t) + ctx := context.Background() + lapi := SetupLAPITest(t, ctx) // Create Valid Alert - lapi.InsertAlertFromFile(t, "./tests/alert_minibulk.json") + lapi.InsertAlertFromFile(t, ctx, "./tests/alert_minibulk.json") // delete by ip wrong - w := lapi.RecordResponse(t, "DELETE", "/v1/decisions?ip=1.2.3.4", emptyBody, PASSWORD) + w := lapi.RecordResponse(t, ctx, "DELETE", "/v1/decisions?ip=1.2.3.4", emptyBody, PASSWORD) assert.Equal(t, 200, w.Code) assert.Equal(t, `{"nbDeleted":"0"}`, w.Body.String()) // delete by ip good - w = lapi.RecordResponse(t, "DELETE", "/v1/decisions?ip=91.121.79.179", emptyBody, PASSWORD) + w = lapi.RecordResponse(t, ctx, "DELETE", "/v1/decisions?ip=91.121.79.179", emptyBody, PASSWORD) assert.Equal(t, 200, w.Code) assert.Equal(t, `{"nbDeleted":"1"}`, w.Body.String()) // delete by scope/value - w = lapi.RecordResponse(t, "DELETE", "/v1/decisions?scopes=Ip&value=91.121.79.178", emptyBody, PASSWORD) + w = lapi.RecordResponse(t, ctx, "DELETE", "/v1/decisions?scopes=Ip&value=91.121.79.178", emptyBody, PASSWORD) assert.Equal(t, 200, w.Code) assert.Equal(t, `{"nbDeleted":"1"}`, w.Body.String()) } func TestDeleteDecisionFilterByScenario(t *testing.T) { - lapi := SetupLAPITest(t) + ctx := context.Background() + lapi := SetupLAPITest(t, ctx) // Create Valid Alert - lapi.InsertAlertFromFile(t, "./tests/alert_minibulk.json") + lapi.InsertAlertFromFile(t, ctx, "./tests/alert_minibulk.json") // delete by wrong scenario - w := lapi.RecordResponse(t, "DELETE", "/v1/decisions?scenario=crowdsecurity/ssh-bff", emptyBody, PASSWORD) + w := lapi.RecordResponse(t, ctx, "DELETE", "/v1/decisions?scenario=crowdsecurity/ssh-bff", emptyBody, PASSWORD) assert.Equal(t, 200, w.Code) assert.Equal(t, `{"nbDeleted":"0"}`, w.Body.String()) // delete by scenario good - w = lapi.RecordResponse(t, "DELETE", "/v1/decisions?scenario=crowdsecurity/ssh-bf", emptyBody, PASSWORD) + w = lapi.RecordResponse(t, ctx, "DELETE", "/v1/decisions?scenario=crowdsecurity/ssh-bf", emptyBody, PASSWORD) assert.Equal(t, 200, w.Code) assert.Equal(t, `{"nbDeleted":"2"}`, w.Body.String()) } func TestGetDecisionFilters(t *testing.T) { - lapi := SetupLAPITest(t) + ctx := context.Background() + lapi := SetupLAPITest(t, ctx) // Create Valid Alert - lapi.InsertAlertFromFile(t, "./tests/alert_minibulk.json") + lapi.InsertAlertFromFile(t, ctx, "./tests/alert_minibulk.json") // Get Decision - w := lapi.RecordResponse(t, "GET", "/v1/decisions", emptyBody, APIKEY) + w := lapi.RecordResponse(t, ctx, "GET", "/v1/decisions", emptyBody, APIKEY) assert.Equal(t, 200, w.Code) decisions, code := readDecisionsGetResp(t, w) assert.Equal(t, 200, code) @@ -101,7 +106,7 @@ func TestGetDecisionFilters(t *testing.T) { // Get Decision : type filter - w = lapi.RecordResponse(t, "GET", "/v1/decisions?type=ban", emptyBody, APIKEY) + w = lapi.RecordResponse(t, ctx, "GET", "/v1/decisions?type=ban", emptyBody, APIKEY) assert.Equal(t, 200, w.Code) decisions, code = readDecisionsGetResp(t, w) assert.Equal(t, 200, code) @@ -118,7 +123,7 @@ func TestGetDecisionFilters(t *testing.T) { // Get Decision : scope/value - w = lapi.RecordResponse(t, "GET", "/v1/decisions?scopes=Ip&value=91.121.79.179", emptyBody, APIKEY) + w = lapi.RecordResponse(t, ctx, "GET", "/v1/decisions?scopes=Ip&value=91.121.79.179", emptyBody, APIKEY) assert.Equal(t, 200, w.Code) decisions, code = readDecisionsGetResp(t, w) assert.Equal(t, 200, code) @@ -132,7 +137,7 @@ func TestGetDecisionFilters(t *testing.T) { // Get Decision : ip filter - w = lapi.RecordResponse(t, "GET", "/v1/decisions?ip=91.121.79.179", emptyBody, APIKEY) + w = lapi.RecordResponse(t, ctx, "GET", "/v1/decisions?ip=91.121.79.179", emptyBody, APIKEY) assert.Equal(t, 200, w.Code) decisions, code = readDecisionsGetResp(t, w) assert.Equal(t, 200, code) @@ -145,7 +150,7 @@ func TestGetDecisionFilters(t *testing.T) { // assert.NotContains(t, w.Body.String(), `"id":2,"origin":"crowdsec","scenario":"crowdsecurity/ssh-bf","scope":"Ip","type":"ban","value":"91.121.79.178"`) // Get decision : by range - w = lapi.RecordResponse(t, "GET", "/v1/decisions?range=91.121.79.0/24&contains=false", emptyBody, APIKEY) + w = lapi.RecordResponse(t, ctx, "GET", "/v1/decisions?range=91.121.79.0/24&contains=false", emptyBody, APIKEY) assert.Equal(t, 200, w.Code) decisions, code = readDecisionsGetResp(t, w) assert.Equal(t, 200, code) @@ -155,13 +160,14 @@ func TestGetDecisionFilters(t *testing.T) { } func TestGetDecision(t *testing.T) { - lapi := SetupLAPITest(t) + ctx := context.Background() + lapi := SetupLAPITest(t, ctx) // Create Valid Alert - lapi.InsertAlertFromFile(t, "./tests/alert_sample.json") + lapi.InsertAlertFromFile(t, ctx, "./tests/alert_sample.json") // Get Decision - w := lapi.RecordResponse(t, "GET", "/v1/decisions", emptyBody, APIKEY) + w := lapi.RecordResponse(t, ctx, "GET", "/v1/decisions", emptyBody, APIKEY) assert.Equal(t, 200, w.Code) decisions, code := readDecisionsGetResp(t, w) assert.Equal(t, 200, code) @@ -180,51 +186,52 @@ func TestGetDecision(t *testing.T) { assert.Equal(t, int64(3), decisions[2].ID) // Get Decision with invalid filter. It should ignore this filter - w = lapi.RecordResponse(t, "GET", "/v1/decisions?test=test", emptyBody, APIKEY) + w = lapi.RecordResponse(t, ctx, "GET", "/v1/decisions?test=test", emptyBody, APIKEY) assert.Equal(t, 200, w.Code) assert.Len(t, decisions, 3) } func TestDeleteDecisionByID(t *testing.T) { - lapi := SetupLAPITest(t) + ctx := context.Background() + lapi := SetupLAPITest(t, ctx) // Create Valid Alert - lapi.InsertAlertFromFile(t, "./tests/alert_sample.json") + lapi.InsertAlertFromFile(t, ctx, "./tests/alert_sample.json") // Have one alert - w := lapi.RecordResponse(t, "GET", "/v1/decisions/stream?startup=true", emptyBody, APIKEY) + w := lapi.RecordResponse(t, ctx, "GET", "/v1/decisions/stream?startup=true", emptyBody, APIKEY) decisions, code := readDecisionsStreamResp(t, w) assert.Equal(t, 200, code) assert.Empty(t, decisions["deleted"]) assert.Len(t, decisions["new"], 1) // Delete alert with Invalid ID - w = lapi.RecordResponse(t, "DELETE", "/v1/decisions/test", emptyBody, PASSWORD) + w = lapi.RecordResponse(t, ctx, "DELETE", "/v1/decisions/test", emptyBody, PASSWORD) assert.Equal(t, 400, w.Code) errResp, _ := readDecisionsErrorResp(t, w) assert.Equal(t, "decision_id must be valid integer", errResp["message"]) // Delete alert with ID that not exist - w = lapi.RecordResponse(t, "DELETE", "/v1/decisions/100", emptyBody, PASSWORD) + w = lapi.RecordResponse(t, ctx, "DELETE", "/v1/decisions/100", emptyBody, PASSWORD) assert.Equal(t, 500, w.Code) errResp, _ = readDecisionsErrorResp(t, w) assert.Equal(t, "decision with id '100' doesn't exist: unable to delete", errResp["message"]) // Have one alert - w = lapi.RecordResponse(t, "GET", "/v1/decisions/stream?startup=true", emptyBody, APIKEY) + w = lapi.RecordResponse(t, ctx, "GET", "/v1/decisions/stream?startup=true", emptyBody, APIKEY) decisions, code = readDecisionsStreamResp(t, w) assert.Equal(t, 200, code) assert.Empty(t, decisions["deleted"]) assert.Len(t, decisions["new"], 1) // Delete alert with valid ID - w = lapi.RecordResponse(t, "DELETE", "/v1/decisions/1", emptyBody, PASSWORD) + w = lapi.RecordResponse(t, ctx, "DELETE", "/v1/decisions/1", emptyBody, PASSWORD) assert.Equal(t, 200, w.Code) resp, _ := readDecisionsDeleteResp(t, w) assert.Equal(t, "1", resp.NbDeleted) // Have one alert (because we delete an alert that has dup targets) - w = lapi.RecordResponse(t, "GET", "/v1/decisions/stream?startup=true", emptyBody, APIKEY) + w = lapi.RecordResponse(t, ctx, "GET", "/v1/decisions/stream?startup=true", emptyBody, APIKEY) decisions, code = readDecisionsStreamResp(t, w) assert.Equal(t, 200, code) assert.Empty(t, decisions["deleted"]) @@ -232,33 +239,35 @@ func TestDeleteDecisionByID(t *testing.T) { } func TestDeleteDecision(t *testing.T) { - lapi := SetupLAPITest(t) + ctx := context.Background() + lapi := SetupLAPITest(t, ctx) // Create Valid Alert - lapi.InsertAlertFromFile(t, "./tests/alert_sample.json") + lapi.InsertAlertFromFile(t, ctx, "./tests/alert_sample.json") // Delete alert with Invalid filter - w := lapi.RecordResponse(t, "DELETE", "/v1/decisions?test=test", emptyBody, PASSWORD) + w := lapi.RecordResponse(t, ctx, "DELETE", "/v1/decisions?test=test", emptyBody, PASSWORD) assert.Equal(t, 500, w.Code) errResp, _ := readDecisionsErrorResp(t, w) assert.Equal(t, "'test' doesn't exist: invalid filter", errResp["message"]) // Delete all alert - w = lapi.RecordResponse(t, "DELETE", "/v1/decisions", emptyBody, PASSWORD) + w = lapi.RecordResponse(t, ctx, "DELETE", "/v1/decisions", emptyBody, PASSWORD) assert.Equal(t, 200, w.Code) resp, _ := readDecisionsDeleteResp(t, w) assert.Equal(t, "3", resp.NbDeleted) } func TestStreamStartDecisionDedup(t *testing.T) { + ctx := context.Background() // Ensure that at stream startup we only get the longest decision - lapi := SetupLAPITest(t) + lapi := SetupLAPITest(t, ctx) // Create Valid Alert : 3 decisions for 127.0.0.1, longest has id=3 - lapi.InsertAlertFromFile(t, "./tests/alert_sample.json") + lapi.InsertAlertFromFile(t, ctx, "./tests/alert_sample.json") // Get Stream, we only get one decision (the longest one) - w := lapi.RecordResponse(t, "GET", "/v1/decisions/stream?startup=true", emptyBody, APIKEY) + w := lapi.RecordResponse(t, ctx, "GET", "/v1/decisions/stream?startup=true", emptyBody, APIKEY) decisions, code := readDecisionsStreamResp(t, w) assert.Equal(t, 200, code) assert.Empty(t, decisions["deleted"]) @@ -268,11 +277,11 @@ func TestStreamStartDecisionDedup(t *testing.T) { assert.Equal(t, "127.0.0.1", *decisions["new"][0].Value) // id=3 decision is deleted, this won't affect `deleted`, because there are decisions on the same ip - w = lapi.RecordResponse(t, "DELETE", "/v1/decisions/3", emptyBody, PASSWORD) + w = lapi.RecordResponse(t, ctx, "DELETE", "/v1/decisions/3", emptyBody, PASSWORD) assert.Equal(t, 200, w.Code) // Get Stream, we only get one decision (the longest one, id=2) - w = lapi.RecordResponse(t, "GET", "/v1/decisions/stream?startup=true", emptyBody, APIKEY) + w = lapi.RecordResponse(t, ctx, "GET", "/v1/decisions/stream?startup=true", emptyBody, APIKEY) decisions, code = readDecisionsStreamResp(t, w) assert.Equal(t, 200, code) assert.Empty(t, decisions["deleted"]) @@ -282,11 +291,11 @@ func TestStreamStartDecisionDedup(t *testing.T) { assert.Equal(t, "127.0.0.1", *decisions["new"][0].Value) // We delete another decision, yet don't receive it in stream, since there's another decision on same IP - w = lapi.RecordResponse(t, "DELETE", "/v1/decisions/2", emptyBody, PASSWORD) + w = lapi.RecordResponse(t, ctx, "DELETE", "/v1/decisions/2", emptyBody, PASSWORD) assert.Equal(t, 200, w.Code) // And get the remaining decision (1) - w = lapi.RecordResponse(t, "GET", "/v1/decisions/stream?startup=true", emptyBody, APIKEY) + w = lapi.RecordResponse(t, ctx, "GET", "/v1/decisions/stream?startup=true", emptyBody, APIKEY) decisions, code = readDecisionsStreamResp(t, w) assert.Equal(t, 200, code) assert.Empty(t, decisions["deleted"]) @@ -296,11 +305,11 @@ func TestStreamStartDecisionDedup(t *testing.T) { assert.Equal(t, "127.0.0.1", *decisions["new"][0].Value) // We delete the last decision, we receive the delete order - w = lapi.RecordResponse(t, "DELETE", "/v1/decisions/1", emptyBody, PASSWORD) + w = lapi.RecordResponse(t, ctx, "DELETE", "/v1/decisions/1", emptyBody, PASSWORD) assert.Equal(t, 200, w.Code) // and now we only get a deleted decision - w = lapi.RecordResponse(t, "GET", "/v1/decisions/stream?startup=true", emptyBody, APIKEY) + w = lapi.RecordResponse(t, ctx, "GET", "/v1/decisions/stream?startup=true", emptyBody, APIKEY) decisions, code = readDecisionsStreamResp(t, w) assert.Equal(t, 200, code) assert.Len(t, decisions["deleted"], 1) diff --git a/pkg/apiserver/heartbeat_test.go b/pkg/apiserver/heartbeat_test.go index fbf01c7fb8e..db051566f75 100644 --- a/pkg/apiserver/heartbeat_test.go +++ b/pkg/apiserver/heartbeat_test.go @@ -1,6 +1,7 @@ package apiserver import ( + "context" "net/http" "testing" @@ -8,11 +9,12 @@ import ( ) func TestHeartBeat(t *testing.T) { - lapi := SetupLAPITest(t) + ctx := context.Background() + lapi := SetupLAPITest(t, ctx) - w := lapi.RecordResponse(t, http.MethodGet, "/v1/heartbeat", emptyBody, "password") + w := lapi.RecordResponse(t, ctx, http.MethodGet, "/v1/heartbeat", emptyBody, "password") assert.Equal(t, 200, w.Code) - w = lapi.RecordResponse(t, "POST", "/v1/heartbeat", emptyBody, "password") + w = lapi.RecordResponse(t, ctx, "POST", "/v1/heartbeat", emptyBody, "password") assert.Equal(t, 405, w.Code) } diff --git a/pkg/apiserver/usage_metrics_test.go b/pkg/apiserver/usage_metrics_test.go index 4d17b0d6ed8..32aeb7d9a5a 100644 --- a/pkg/apiserver/usage_metrics_test.go +++ b/pkg/apiserver/usage_metrics_test.go @@ -187,14 +187,14 @@ func TestLPMetrics(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - lapi := SetupLAPITest(t) + lapi := SetupLAPITest(t, ctx) - dbClient, err := database.NewClient(context.Background(), lapi.DBConfig) + dbClient, err := database.NewClient(ctx, lapi.DBConfig) if err != nil { t.Fatalf("unable to create database client: %s", err) } - w := lapi.RecordResponse(t, http.MethodPost, "/v1/usage-metrics", strings.NewReader(tt.body), tt.authType) + w := lapi.RecordResponse(t, ctx, http.MethodPost, "/v1/usage-metrics", strings.NewReader(tt.body), tt.authType) assert.Equal(t, tt.expectedStatusCode, w.Code) assert.Contains(t, w.Body.String(), tt.expectedResponse) @@ -359,14 +359,14 @@ func TestRCMetrics(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - lapi := SetupLAPITest(t) + lapi := SetupLAPITest(t, ctx) - dbClient, err := database.NewClient(context.Background(), lapi.DBConfig) + dbClient, err := database.NewClient(ctx, lapi.DBConfig) if err != nil { t.Fatalf("unable to create database client: %s", err) } - w := lapi.RecordResponse(t, http.MethodPost, "/v1/usage-metrics", strings.NewReader(tt.body), tt.authType) + w := lapi.RecordResponse(t, ctx, http.MethodPost, "/v1/usage-metrics", strings.NewReader(tt.body), tt.authType) assert.Equal(t, tt.expectedStatusCode, w.Code) assert.Contains(t, w.Body.String(), tt.expectedResponse) From 4e3495dbba38d65705c79f946b8da8fb91ac6252 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Thu, 3 Oct 2024 15:27:48 +0200 Subject: [PATCH 317/581] lint/revive: check tags on non-exported struct fields (#3257) * lint/revive: check tags on non-exported struct fields * update .golangci.yaml; remove full list of enabled linters * lint --- .golangci.yml | 69 +-------------------------- cmd/crowdsec-cli/climetrics/show.go | 3 +- cmd/crowdsec-cli/dashboard.go | 3 +- pkg/appsec/appsec_rules_collection.go | 10 ++-- pkg/leakybucket/manager_load.go | 22 ++++----- 5 files changed, 20 insertions(+), 87 deletions(-) diff --git a/.golangci.yml b/.golangci.yml index a9d962610df..786bb18d8e7 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -135,14 +135,10 @@ linters-settings: arguments: [7] - name: max-public-structs disabled: true - - name: optimize-operands-order - disabled: true - name: nested-structs disabled: true - name: package-comments disabled: true - - name: struct-tag - disabled: true - name: redundant-import-alias disabled: true - name: time-equal @@ -217,6 +213,7 @@ linters: # - execinquery - exportloopref + - gomnd # # Redundant @@ -232,69 +229,6 @@ linters: - intrange # intrange is a linter to find places where for loops could make use of an integer range. - # - # Enabled - # - - # - asasalint # check for pass []any as any in variadic func(...any) - # - asciicheck # checks that all code identifiers does not have non-ASCII symbols in the name - # - bidichk # Checks for dangerous unicode character sequences - # - bodyclose # checks whether HTTP response body is closed successfully - # - copyloopvar # copyloopvar is a linter detects places where loop variables are copied - # - decorder # check declaration order and count of types, constants, variables and functions - # - depguard # Go linter that checks if package imports are in a list of acceptable packages - # - dupword # checks for duplicate words in the source code - # - durationcheck # check for two durations multiplied together - # - errcheck # errcheck is a program for checking for unchecked errors in Go code. These unchecked errors can be critical bugs in some cases - # - errorlint # errorlint is a linter for that can be used to find code that will cause problems with the error wrapping scheme introduced in Go 1.13. - # - ginkgolinter # enforces standards of using ginkgo and gomega - # - gocheckcompilerdirectives # Checks that go compiler directive comments (//go:) are valid. - # - gochecknoinits # Checks that no init functions are present in Go code - # - gochecksumtype # Run exhaustiveness checks on Go "sum types" - # - gocritic # Provides diagnostics that check for bugs, performance and style issues. - # - goheader # Checks is file header matches to pattern - # - gomoddirectives # Manage the use of 'replace', 'retract', and 'excludes' directives in go.mod. - # - gomodguard # Allow and block list linter for direct Go module dependencies. This is different from depguard where there are different block types for example version constraints and module recommendations. - # - goprintffuncname # Checks that printf-like functions are named with `f` at the end - # - gosimple # (megacheck): Linter for Go source code that specializes in simplifying code - # - gosmopolitan # Report certain i18n/l10n anti-patterns in your Go codebase - # - govet # (vet, vetshadow): Vet examines Go source code and reports suspicious constructs. It is roughly the same as 'go vet' and uses its passes. - # - grouper # Analyze expression groups. - # - importas # Enforces consistent import aliases - # - ineffassign # Detects when assignments to existing variables are not used - # - interfacebloat # A linter that checks the number of methods inside an interface. - # - loggercheck # (logrlint): Checks key value pairs for common logger libraries (kitlog,klog,logr,zap). - # - logrlint # Check logr arguments. - # - maintidx # maintidx measures the maintainability index of each function. - # - makezero # Finds slice declarations with non-zero initial length - # - mirror # reports wrong mirror patterns of bytes/strings usage - # - misspell # Finds commonly misspelled English words - # - nakedret # Checks that functions with naked returns are not longer than a maximum size (can be zero). - # - nestif # Reports deeply nested if statements - # - nilerr # Finds the code that returns nil even if it checks that the error is not nil. - # - nolintlint # Reports ill-formed or insufficient nolint directives - # - nonamedreturns # Reports all named returns - # - nosprintfhostport # Checks for misuse of Sprintf to construct a host with port in a URL. - # - perfsprint # Checks that fmt.Sprintf can be replaced with a faster alternative. - # - predeclared # find code that shadows one of Go's predeclared identifiers - # - reassign # Checks that package variables are not reassigned - # - revive # Fast, configurable, extensible, flexible, and beautiful linter for Go. Drop-in replacement of golint. - # - rowserrcheck # checks whether Rows.Err of rows is checked successfully - # - sloglint # ensure consistent code style when using log/slog - # - spancheck # Checks for mistakes with OpenTelemetry/Census spans. - # - sqlclosecheck # Checks that sql.Rows, sql.Stmt, sqlx.NamedStmt, pgx.Query are closed. - # - staticcheck # (megacheck): It's a set of rules from staticcheck. It's not the same thing as the staticcheck binary. The author of staticcheck doesn't support or approve the use of staticcheck as a library inside golangci-lint. - # - stylecheck # Stylecheck is a replacement for golint - # - tenv # tenv is analyzer that detects using os.Setenv instead of t.Setenv since Go1.17 - # - testableexamples # linter checks if examples are testable (have an expected output) - # - testifylint # Checks usage of github.com/stretchr/testify. - # - tparallel # tparallel detects inappropriate usage of t.Parallel() method in your Go test codes - # - unconvert # Remove unnecessary type conversions - # - unused # (megacheck): Checks Go code for unused constants, variables, functions and types - # - usestdlibvars # A linter that detect the possibility to use variables/constants from the Go standard library. - # - wastedassign # Finds wasted assignment statements - # - zerologlint # Detects the wrong usage of `zerolog` that a user forgets to dispatch with `Send` or `Msg` - # # Recommended? (easy) # @@ -322,7 +256,6 @@ linters: - containedctx # containedctx is a linter that detects struct contained context.Context field - contextcheck # check whether the function uses a non-inherited context - errname # Checks that sentinel errors are prefixed with the `Err` and error types are suffixed with the `Error`. - - gomnd # An analyzer to detect magic numbers. - ireturn # Accept Interfaces, Return Concrete Types - mnd # An analyzer to detect magic numbers. - nilnil # Checks that there is no simultaneous return of `nil` error and an invalid value. diff --git a/cmd/crowdsec-cli/climetrics/show.go b/cmd/crowdsec-cli/climetrics/show.go index 7559463b66b..045959048f6 100644 --- a/cmd/crowdsec-cli/climetrics/show.go +++ b/cmd/crowdsec-cli/climetrics/show.go @@ -5,9 +5,8 @@ import ( "errors" "fmt" - log "github.com/sirupsen/logrus" - "github.com/fatih/color" + log "github.com/sirupsen/logrus" "github.com/spf13/cobra" "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/require" diff --git a/cmd/crowdsec-cli/dashboard.go b/cmd/crowdsec-cli/dashboard.go index 13cebe3dbd5..41db9e6cbf2 100644 --- a/cmd/crowdsec-cli/dashboard.go +++ b/cmd/crowdsec-cli/dashboard.go @@ -20,10 +20,11 @@ import ( log "github.com/sirupsen/logrus" "github.com/spf13/cobra" + "github.com/crowdsecurity/go-cs-lib/version" + "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/idgen" "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/require" "github.com/crowdsecurity/crowdsec/pkg/metabase" - "github.com/crowdsecurity/go-cs-lib/version" ) var ( diff --git a/pkg/appsec/appsec_rules_collection.go b/pkg/appsec/appsec_rules_collection.go index 09c1670de70..d283f95cb19 100644 --- a/pkg/appsec/appsec_rules_collection.go +++ b/pkg/appsec/appsec_rules_collection.go @@ -29,11 +29,11 @@ type AppsecCollectionConfig struct { SecLangRules []string `yaml:"seclang_rules"` Rules []appsec_rule.CustomRule `yaml:"rules"` - Labels map[string]interface{} `yaml:"labels"` //Labels is K:V list aiming at providing context the overflow + Labels map[string]interface{} `yaml:"labels"` // Labels is K:V list aiming at providing context the overflow - Data interface{} `yaml:"data"` //Ignore it - hash string `yaml:"-"` - version string `yaml:"-"` + Data interface{} `yaml:"data"` // Ignore it + hash string + version string } type RulesDetails struct { @@ -108,7 +108,7 @@ func LoadCollection(pattern string, logger *log.Entry) ([]AppsecCollection, erro logger.Debugf("Adding rule %s", strRule) appsecCol.Rules = append(appsecCol.Rules, strRule) - //We only take the first id, as it's the one of the "main" rule + // We only take the first id, as it's the one of the "main" rule if _, ok := AppsecRulesDetails[int(rulesId[0])]; !ok { AppsecRulesDetails[int(rulesId[0])] = RulesDetails{ LogLevel: log.InfoLevel, diff --git a/pkg/leakybucket/manager_load.go b/pkg/leakybucket/manager_load.go index 1b62b29dc3c..b8310b8cb17 100644 --- a/pkg/leakybucket/manager_load.go +++ b/pkg/leakybucket/manager_load.go @@ -45,12 +45,12 @@ type BucketFactory struct { Debug bool `yaml:"debug"` // Debug, when set to true, will enable debugging for _this_ scenario specifically Labels map[string]interface{} `yaml:"labels"` // Labels is K:V list aiming at providing context the overflow Blackhole string `yaml:"blackhole,omitempty"` // Blackhole is a duration that, if present, will prevent same bucket partition to overflow more often than $duration - logger *log.Entry `yaml:"-"` // logger is bucket-specific logger (used by Debug as well) - Reprocess bool `yaml:"reprocess"` // Reprocess, if true, will for the bucket to be re-injected into processing chain - CacheSize int `yaml:"cache_size"` // CacheSize, if > 0, limits the size of in-memory cache of the bucket - Profiling bool `yaml:"profiling"` // Profiling, if true, will make the bucket record pours/overflows/etc. - OverflowFilter string `yaml:"overflow_filter"` // OverflowFilter if present, is a filter that must return true for the overflow to go through - ConditionalOverflow string `yaml:"condition"` // condition if present, is an expression that must return true for the bucket to overflow + logger *log.Entry // logger is bucket-specific logger (used by Debug as well) + Reprocess bool `yaml:"reprocess"` // Reprocess, if true, will for the bucket to be re-injected into processing chain + CacheSize int `yaml:"cache_size"` // CacheSize, if > 0, limits the size of in-memory cache of the bucket + Profiling bool `yaml:"profiling"` // Profiling, if true, will make the bucket record pours/overflows/etc. + OverflowFilter string `yaml:"overflow_filter"` // OverflowFilter if present, is a filter that must return true for the overflow to go through + ConditionalOverflow string `yaml:"condition"` // condition if present, is an expression that must return true for the bucket to overflow BayesianPrior float32 `yaml:"bayesian_prior"` BayesianThreshold float32 `yaml:"bayesian_threshold"` BayesianConditions []RawBayesianCondition `yaml:"bayesian_conditions"` // conditions for the bayesian bucket @@ -68,11 +68,11 @@ type BucketFactory struct { processors []Processor // processors is the list of hooks for pour/overflow/create (cf. uniq, blackhole etc.) output bool // ?? ScenarioVersion string `yaml:"version,omitempty"` - hash string `yaml:"-"` - Simulated bool `yaml:"simulated"` // Set to true if the scenario instantiating the bucket was in the exclusion list - tomb *tomb.Tomb `yaml:"-"` - wgPour *sync.WaitGroup `yaml:"-"` - wgDumpState *sync.WaitGroup `yaml:"-"` + hash string + Simulated bool `yaml:"simulated"` // Set to true if the scenario instantiating the bucket was in the exclusion list + tomb *tomb.Tomb + wgPour *sync.WaitGroup + wgDumpState *sync.WaitGroup orderEvent bool } From f6af791b835991d9c77deee6817eb544700016fc Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Fri, 4 Oct 2024 11:28:18 +0200 Subject: [PATCH 318/581] CI: update test dependencies (#3267) --- docker/test/Pipfile.lock | 331 ++++++++++++++++++++------------------- 1 file changed, 174 insertions(+), 157 deletions(-) diff --git a/docker/test/Pipfile.lock b/docker/test/Pipfile.lock index 2cb587b6b88..99184d9f2a2 100644 --- a/docker/test/Pipfile.lock +++ b/docker/test/Pipfile.lock @@ -18,69 +18,84 @@ "default": { "certifi": { "hashes": [ - "sha256:5a1e7645bc0ec61a09e26c36f6106dd4cf40c6db3a1fb6352b0244e7fb057c7b", - "sha256:c198e21b1289c2ab85ee4e67bb4b4ef3ead0892059901a8d5b622f24a1101e90" + "sha256:922820b53db7a7257ffbda3f597266d435245903d80737e34f8a45ff3e3230d8", + "sha256:bec941d2aa8195e248a60b31ff9f0558284cf01a52591ceda73ea9afffd69fd9" ], "markers": "python_version >= '3.6'", - "version": "==2024.7.4" + "version": "==2024.8.30" }, "cffi": { "hashes": [ - "sha256:0c9ef6ff37e974b73c25eecc13952c55bceed9112be2d9d938ded8e856138bcc", - "sha256:131fd094d1065b19540c3d72594260f118b231090295d8c34e19a7bbcf2e860a", - "sha256:1b8ebc27c014c59692bb2664c7d13ce7a6e9a629be20e54e7271fa696ff2b417", - "sha256:2c56b361916f390cd758a57f2e16233eb4f64bcbeee88a4881ea90fca14dc6ab", - "sha256:2d92b25dbf6cae33f65005baf472d2c245c050b1ce709cc4588cdcdd5495b520", - "sha256:31d13b0f99e0836b7ff893d37af07366ebc90b678b6664c955b54561fc36ef36", - "sha256:32c68ef735dbe5857c810328cb2481e24722a59a2003018885514d4c09af9743", - "sha256:3686dffb02459559c74dd3d81748269ffb0eb027c39a6fc99502de37d501faa8", - "sha256:582215a0e9adbe0e379761260553ba11c58943e4bbe9c36430c4ca6ac74b15ed", - "sha256:5b50bf3f55561dac5438f8e70bfcdfd74543fd60df5fa5f62d94e5867deca684", - "sha256:5bf44d66cdf9e893637896c7faa22298baebcd18d1ddb6d2626a6e39793a1d56", - "sha256:6602bc8dc6f3a9e02b6c22c4fc1e47aa50f8f8e6d3f78a5e16ac33ef5fefa324", - "sha256:673739cb539f8cdaa07d92d02efa93c9ccf87e345b9a0b556e3ecc666718468d", - "sha256:68678abf380b42ce21a5f2abde8efee05c114c2fdb2e9eef2efdb0257fba1235", - "sha256:68e7c44931cc171c54ccb702482e9fc723192e88d25a0e133edd7aff8fcd1f6e", - "sha256:6b3d6606d369fc1da4fd8c357d026317fbb9c9b75d36dc16e90e84c26854b088", - "sha256:748dcd1e3d3d7cd5443ef03ce8685043294ad6bd7c02a38d1bd367cfd968e000", - "sha256:7651c50c8c5ef7bdb41108b7b8c5a83013bfaa8a935590c5d74627c047a583c7", - "sha256:7b78010e7b97fef4bee1e896df8a4bbb6712b7f05b7ef630f9d1da00f6444d2e", - "sha256:7e61e3e4fa664a8588aa25c883eab612a188c725755afff6289454d6362b9673", - "sha256:80876338e19c951fdfed6198e70bc88f1c9758b94578d5a7c4c91a87af3cf31c", - "sha256:8895613bcc094d4a1b2dbe179d88d7fb4a15cee43c052e8885783fac397d91fe", - "sha256:88e2b3c14bdb32e440be531ade29d3c50a1a59cd4e51b1dd8b0865c54ea5d2e2", - "sha256:8f8e709127c6c77446a8c0a8c8bf3c8ee706a06cd44b1e827c3e6a2ee6b8c098", - "sha256:9cb4a35b3642fc5c005a6755a5d17c6c8b6bcb6981baf81cea8bfbc8903e8ba8", - "sha256:9f90389693731ff1f659e55c7d1640e2ec43ff725cc61b04b2f9c6d8d017df6a", - "sha256:a09582f178759ee8128d9270cd1344154fd473bb77d94ce0aeb2a93ebf0feaf0", - "sha256:a6a14b17d7e17fa0d207ac08642c8820f84f25ce17a442fd15e27ea18d67c59b", - "sha256:a72e8961a86d19bdb45851d8f1f08b041ea37d2bd8d4fd19903bc3083d80c896", - "sha256:abd808f9c129ba2beda4cfc53bde801e5bcf9d6e0f22f095e45327c038bfe68e", - "sha256:ac0f5edd2360eea2f1daa9e26a41db02dd4b0451b48f7c318e217ee092a213e9", - "sha256:b29ebffcf550f9da55bec9e02ad430c992a87e5f512cd63388abb76f1036d8d2", - "sha256:b2ca4e77f9f47c55c194982e10f058db063937845bb2b7a86c84a6cfe0aefa8b", - "sha256:b7be2d771cdba2942e13215c4e340bfd76398e9227ad10402a8767ab1865d2e6", - "sha256:b84834d0cf97e7d27dd5b7f3aca7b6e9263c56308ab9dc8aae9784abb774d404", - "sha256:b86851a328eedc692acf81fb05444bdf1891747c25af7529e39ddafaf68a4f3f", - "sha256:bcb3ef43e58665bbda2fb198698fcae6776483e0c4a631aa5647806c25e02cc0", - "sha256:c0f31130ebc2d37cdd8e44605fb5fa7ad59049298b3f745c74fa74c62fbfcfc4", - "sha256:c6a164aa47843fb1b01e941d385aab7215563bb8816d80ff3a363a9f8448a8dc", - "sha256:d8a9d3ebe49f084ad71f9269834ceccbf398253c9fac910c4fd7053ff1386936", - "sha256:db8e577c19c0fda0beb7e0d4e09e0ba74b1e4c092e0e40bfa12fe05b6f6d75ba", - "sha256:dc9b18bf40cc75f66f40a7379f6a9513244fe33c0e8aa72e2d56b0196a7ef872", - "sha256:e09f3ff613345df5e8c3667da1d918f9149bd623cd9070c983c013792a9a62eb", - "sha256:e4108df7fe9b707191e55f33efbcb2d81928e10cea45527879a4749cbe472614", - "sha256:e6024675e67af929088fda399b2094574609396b1decb609c55fa58b028a32a1", - "sha256:e70f54f1796669ef691ca07d046cd81a29cb4deb1e5f942003f401c0c4a2695d", - "sha256:e715596e683d2ce000574bae5d07bd522c781a822866c20495e52520564f0969", - "sha256:e760191dd42581e023a68b758769e2da259b5d52e3103c6060ddc02c9edb8d7b", - "sha256:ed86a35631f7bfbb28e108dd96773b9d5a6ce4811cf6ea468bb6a359b256b1e4", - "sha256:ee07e47c12890ef248766a6e55bd38ebfb2bb8edd4142d56db91b21ea68b7627", - "sha256:fa3a0128b152627161ce47201262d3140edb5a5c3da88d73a1b790a959126956", - "sha256:fcc8eb6d5902bb1cf6dc4f187ee3ea80a1eba0a89aba40a5cb20a5087d961357" + "sha256:045d61c734659cc045141be4bae381a41d89b741f795af1dd018bfb532fd0df8", + "sha256:0984a4925a435b1da406122d4d7968dd861c1385afe3b45ba82b750f229811e2", + "sha256:0e2b1fac190ae3ebfe37b979cc1ce69c81f4e4fe5746bb401dca63a9062cdaf1", + "sha256:0f048dcf80db46f0098ccac01132761580d28e28bc0f78ae0d58048063317e15", + "sha256:1257bdabf294dceb59f5e70c64a3e2f462c30c7ad68092d01bbbfb1c16b1ba36", + "sha256:1c39c6016c32bc48dd54561950ebd6836e1670f2ae46128f67cf49e789c52824", + "sha256:1d599671f396c4723d016dbddb72fe8e0397082b0a77a4fab8028923bec050e8", + "sha256:28b16024becceed8c6dfbc75629e27788d8a3f9030691a1dbf9821a128b22c36", + "sha256:2bb1a08b8008b281856e5971307cc386a8e9c5b625ac297e853d36da6efe9c17", + "sha256:30c5e0cb5ae493c04c8b42916e52ca38079f1b235c2f8ae5f4527b963c401caf", + "sha256:31000ec67d4221a71bd3f67df918b1f88f676f1c3b535a7eb473255fdc0b83fc", + "sha256:386c8bf53c502fff58903061338ce4f4950cbdcb23e2902d86c0f722b786bbe3", + "sha256:3edc8d958eb099c634dace3c7e16560ae474aa3803a5df240542b305d14e14ed", + "sha256:45398b671ac6d70e67da8e4224a065cec6a93541bb7aebe1b198a61b58c7b702", + "sha256:46bf43160c1a35f7ec506d254e5c890f3c03648a4dbac12d624e4490a7046cd1", + "sha256:4ceb10419a9adf4460ea14cfd6bc43d08701f0835e979bf821052f1805850fe8", + "sha256:51392eae71afec0d0c8fb1a53b204dbb3bcabcb3c9b807eedf3e1e6ccf2de903", + "sha256:5da5719280082ac6bd9aa7becb3938dc9f9cbd57fac7d2871717b1feb0902ab6", + "sha256:610faea79c43e44c71e1ec53a554553fa22321b65fae24889706c0a84d4ad86d", + "sha256:636062ea65bd0195bc012fea9321aca499c0504409f413dc88af450b57ffd03b", + "sha256:6883e737d7d9e4899a8a695e00ec36bd4e5e4f18fabe0aca0efe0a4b44cdb13e", + "sha256:6b8b4a92e1c65048ff98cfe1f735ef8f1ceb72e3d5f0c25fdb12087a23da22be", + "sha256:6f17be4345073b0a7b8ea599688f692ac3ef23ce28e5df79c04de519dbc4912c", + "sha256:706510fe141c86a69c8ddc029c7910003a17353970cff3b904ff0686a5927683", + "sha256:72e72408cad3d5419375fc87d289076ee319835bdfa2caad331e377589aebba9", + "sha256:733e99bc2df47476e3848417c5a4540522f234dfd4ef3ab7fafdf555b082ec0c", + "sha256:7596d6620d3fa590f677e9ee430df2958d2d6d6de2feeae5b20e82c00b76fbf8", + "sha256:78122be759c3f8a014ce010908ae03364d00a1f81ab5c7f4a7a5120607ea56e1", + "sha256:805b4371bf7197c329fcb3ead37e710d1bca9da5d583f5073b799d5c5bd1eee4", + "sha256:85a950a4ac9c359340d5963966e3e0a94a676bd6245a4b55bc43949eee26a655", + "sha256:8f2cdc858323644ab277e9bb925ad72ae0e67f69e804f4898c070998d50b1a67", + "sha256:9755e4345d1ec879e3849e62222a18c7174d65a6a92d5b346b1863912168b595", + "sha256:98e3969bcff97cae1b2def8ba499ea3d6f31ddfdb7635374834cf89a1a08ecf0", + "sha256:a08d7e755f8ed21095a310a693525137cfe756ce62d066e53f502a83dc550f65", + "sha256:a1ed2dd2972641495a3ec98445e09766f077aee98a1c896dcb4ad0d303628e41", + "sha256:a24ed04c8ffd54b0729c07cee15a81d964e6fee0e3d4d342a27b020d22959dc6", + "sha256:a45e3c6913c5b87b3ff120dcdc03f6131fa0065027d0ed7ee6190736a74cd401", + "sha256:a9b15d491f3ad5d692e11f6b71f7857e7835eb677955c00cc0aefcd0669adaf6", + "sha256:ad9413ccdeda48c5afdae7e4fa2192157e991ff761e7ab8fdd8926f40b160cc3", + "sha256:b2ab587605f4ba0bf81dc0cb08a41bd1c0a5906bd59243d56bad7668a6fc6c16", + "sha256:b62ce867176a75d03a665bad002af8e6d54644fad99a3c70905c543130e39d93", + "sha256:c03e868a0b3bc35839ba98e74211ed2b05d2119be4e8a0f224fba9384f1fe02e", + "sha256:c59d6e989d07460165cc5ad3c61f9fd8f1b4796eacbd81cee78957842b834af4", + "sha256:c7eac2ef9b63c79431bc4b25f1cd649d7f061a28808cbc6c47b534bd789ef964", + "sha256:c9c3d058ebabb74db66e431095118094d06abf53284d9c81f27300d0e0d8bc7c", + "sha256:ca74b8dbe6e8e8263c0ffd60277de77dcee6c837a3d0881d8c1ead7268c9e576", + "sha256:caaf0640ef5f5517f49bc275eca1406b0ffa6aa184892812030f04c2abf589a0", + "sha256:cdf5ce3acdfd1661132f2a9c19cac174758dc2352bfe37d98aa7512c6b7178b3", + "sha256:d016c76bdd850f3c626af19b0542c9677ba156e4ee4fccfdd7848803533ef662", + "sha256:d01b12eeeb4427d3110de311e1774046ad344f5b1a7403101878976ecd7a10f3", + "sha256:d63afe322132c194cf832bfec0dc69a99fb9bb6bbd550f161a49e9e855cc78ff", + "sha256:da95af8214998d77a98cc14e3a3bd00aa191526343078b530ceb0bd710fb48a5", + "sha256:dd398dbc6773384a17fe0d3e7eeb8d1a21c2200473ee6806bb5e6a8e62bb73dd", + "sha256:de2ea4b5833625383e464549fec1bc395c1bdeeb5f25c4a3a82b5a8c756ec22f", + "sha256:de55b766c7aa2e2a3092c51e0483d700341182f08e67c63630d5b6f200bb28e5", + "sha256:df8b1c11f177bc2313ec4b2d46baec87a5f3e71fc8b45dab2ee7cae86d9aba14", + "sha256:e03eab0a8677fa80d646b5ddece1cbeaf556c313dcfac435ba11f107ba117b5d", + "sha256:e221cf152cff04059d011ee126477f0d9588303eb57e88923578ace7baad17f9", + "sha256:e31ae45bc2e29f6b2abd0de1cc3b9d5205aa847cafaecb8af1476a609a2f6eb7", + "sha256:edae79245293e15384b51f88b00613ba9f7198016a5948b5dddf4917d4d26382", + "sha256:f1e22e8c4419538cb197e4dd60acc919d7696e5ef98ee4da4e01d3f8cfa4cc5a", + "sha256:f3a2b4222ce6b60e2e8b337bb9596923045681d71e5a082783484d845390938e", + "sha256:f6a16c31041f09ead72d69f583767292f750d24913dadacf5756b966aacb3f1a", + "sha256:f75c7ab1f9e4aca5414ed4d8e5c0e303a34f4421f8a0d47a4d019ceff0ab6af4", + "sha256:f79fc4fc25f1c8698ff97788206bb3c2598949bfe0fef03d299eb1b5356ada99", + "sha256:f7f5baafcc48261359e14bcd6d9bff6d4b28d9103847c9e136694cb0501aef87", + "sha256:fc48c783f9c87e60831201f2cce7f3b2e4846bf4d8728eabe54d60700b318a0b" ], "markers": "platform_python_implementation != 'PyPy'", - "version": "==1.16.0" + "version": "==1.17.1" }, "charset-normalizer": { "hashes": [ @@ -180,36 +195,36 @@ }, "cryptography": { "hashes": [ - "sha256:0663585d02f76929792470451a5ba64424acc3cd5227b03921dab0e2f27b1709", - "sha256:08a24a7070b2b6804c1940ff0f910ff728932a9d0e80e7814234269f9d46d069", - "sha256:232ce02943a579095a339ac4b390fbbe97f5b5d5d107f8a08260ea2768be8cc2", - "sha256:2905ccf93a8a2a416f3ec01b1a7911c3fe4073ef35640e7ee5296754e30b762b", - "sha256:299d3da8e00b7e2b54bb02ef58d73cd5f55fb31f33ebbf33bd00d9aa6807df7e", - "sha256:2c6d112bf61c5ef44042c253e4859b3cbbb50df2f78fa8fae6747a7814484a70", - "sha256:31e44a986ceccec3d0498e16f3d27b2ee5fdf69ce2ab89b52eaad1d2f33d8778", - "sha256:3d9a1eca329405219b605fac09ecfc09ac09e595d6def650a437523fcd08dd22", - "sha256:3dcdedae5c7710b9f97ac6bba7e1052b95c7083c9d0e9df96e02a1932e777895", - "sha256:47ca71115e545954e6c1d207dd13461ab81f4eccfcb1345eac874828b5e3eaaf", - "sha256:4a997df8c1c2aae1e1e5ac49c2e4f610ad037fc5a3aadc7b64e39dea42249431", - "sha256:51956cf8730665e2bdf8ddb8da0056f699c1a5715648c1b0144670c1ba00b48f", - "sha256:5bcb8a5620008a8034d39bce21dc3e23735dfdb6a33a06974739bfa04f853947", - "sha256:64c3f16e2a4fc51c0d06af28441881f98c5d91009b8caaff40cf3548089e9c74", - "sha256:6e2b11c55d260d03a8cf29ac9b5e0608d35f08077d8c087be96287f43af3ccdc", - "sha256:7b3f5fe74a5ca32d4d0f302ffe6680fcc5c28f8ef0dc0ae8f40c0f3a1b4fca66", - "sha256:844b6d608374e7d08f4f6e6f9f7b951f9256db41421917dfb2d003dde4cd6b66", - "sha256:9a8d6802e0825767476f62aafed40532bd435e8a5f7d23bd8b4f5fd04cc80ecf", - "sha256:aae4d918f6b180a8ab8bf6511a419473d107df4dbb4225c7b48c5c9602c38c7f", - "sha256:ac1955ce000cb29ab40def14fd1bbfa7af2017cca696ee696925615cafd0dce5", - "sha256:b88075ada2d51aa9f18283532c9f60e72170041bba88d7f37e49cbb10275299e", - "sha256:cb013933d4c127349b3948aa8aaf2f12c0353ad0eccd715ca789c8a0f671646f", - "sha256:cc70b4b581f28d0a254d006f26949245e3657d40d8857066c2ae22a61222ef55", - "sha256:e9c5266c432a1e23738d178e51c2c7a5e2ddf790f248be939448c0ba2021f9d1", - "sha256:ea9e57f8ea880eeea38ab5abf9fbe39f923544d7884228ec67d666abd60f5a47", - "sha256:ee0c405832ade84d4de74b9029bedb7b31200600fa524d218fc29bfa371e97f5", - "sha256:fdcb265de28585de5b859ae13e3846a8e805268a823a12a4da2597f1f5afc9f0" + "sha256:014f58110f53237ace6a408b5beb6c427b64e084eb451ef25a28308270086494", + "sha256:1bbcce1a551e262dfbafb6e6252f1ae36a248e615ca44ba302df077a846a8806", + "sha256:203e92a75716d8cfb491dc47c79e17d0d9207ccffcbcb35f598fbe463ae3444d", + "sha256:27e613d7077ac613e399270253259d9d53872aaf657471473ebfc9a52935c062", + "sha256:2bd51274dcd59f09dd952afb696bf9c61a7a49dfc764c04dd33ef7a6b502a1e2", + "sha256:38926c50cff6f533f8a2dae3d7f19541432610d114a70808f0926d5aaa7121e4", + "sha256:511f4273808ab590912a93ddb4e3914dfd8a388fed883361b02dea3791f292e1", + "sha256:58d4e9129985185a06d849aa6df265bdd5a74ca6e1b736a77959b498e0505b85", + "sha256:5b43d1ea6b378b54a1dc99dd8a2b5be47658fe9a7ce0a58ff0b55f4b43ef2b84", + "sha256:61ec41068b7b74268fa86e3e9e12b9f0c21fcf65434571dbb13d954bceb08042", + "sha256:666ae11966643886c2987b3b721899d250855718d6d9ce41b521252a17985f4d", + "sha256:68aaecc4178e90719e95298515979814bda0cbada1256a4485414860bd7ab962", + "sha256:7c05650fe8023c5ed0d46793d4b7d7e6cd9c04e68eabe5b0aeea836e37bdcec2", + "sha256:80eda8b3e173f0f247f711eef62be51b599b5d425c429b5d4ca6a05e9e856baa", + "sha256:8385d98f6a3bf8bb2d65a73e17ed87a3ba84f6991c155691c51112075f9ffc5d", + "sha256:88cce104c36870d70c49c7c8fd22885875d950d9ee6ab54df2745f83ba0dc365", + "sha256:9d3cdb25fa98afdd3d0892d132b8d7139e2c087da1712041f6b762e4f807cc96", + "sha256:a575913fb06e05e6b4b814d7f7468c2c660e8bb16d8d5a1faf9b33ccc569dd47", + "sha256:ac119bb76b9faa00f48128b7f5679e1d8d437365c5d26f1c2c3f0da4ce1b553d", + "sha256:c1332724be35d23a854994ff0b66530119500b6053d0bd3363265f7e5e77288d", + "sha256:d03a475165f3134f773d1388aeb19c2d25ba88b6a9733c5c590b9ff7bbfa2e0c", + "sha256:d75601ad10b059ec832e78823b348bfa1a59f6b8d545db3a24fd44362a1564cb", + "sha256:de41fd81a41e53267cb020bb3a7212861da53a7d39f863585d13ea11049cf277", + "sha256:e710bf40870f4db63c3d7d929aa9e09e4e7ee219e703f949ec4073b4294f6172", + "sha256:ea25acb556320250756e53f9e20a4177515f012c9eaea17eb7587a8c4d8ae034", + "sha256:f98bf604c82c416bc829e490c700ca1553eafdf2912a91e23a79d97d9801372a", + "sha256:fba1007b3ef89946dbbb515aeeb41e30203b004f0b4b00e5e16078b518563289" ], "markers": "python_version >= '3.7'", - "version": "==43.0.0" + "version": "==43.0.1" }, "docker": { "hashes": [ @@ -229,11 +244,11 @@ }, "idna": { "hashes": [ - "sha256:028ff3aadf0609c1fd278d8ea3089299412a7a8b9bd005dd08b9f8285bcb5cfc", - "sha256:82fee1fc78add43492d3a1898bfa6d8a904cc97d8427f683ed8e798d07761aa0" + "sha256:12f65c9b470abda6dc35cf8e63cc574b1c52b11df2c86030af0ac09b01b13ea9", + "sha256:946d195a0d259cbba61165e88e65941f16e9b36ea6ddb97f00452bae8b1287d3" ], - "markers": "python_version >= '3.5'", - "version": "==3.7" + "markers": "python_version >= '3.6'", + "version": "==3.10" }, "iniconfig": { "hashes": [ @@ -292,11 +307,11 @@ }, "pytest": { "hashes": [ - "sha256:7e8e5c5abd6e93cb1cc151f23e57adc31fcf8cfd2a3ff2da63e23f732de35db6", - "sha256:e9600ccf4f563976e2c99fa02c7624ab938296551f280835ee6516df8bc4ae8c" + "sha256:70b98107bd648308a7952b06e6ca9a50bc660be218d53c257cc1fc94fda10181", + "sha256:a6853c7375b2663155079443d2e45de913a911a11d669df02a50814944db57b2" ], "markers": "python_version >= '3.8'", - "version": "==8.3.1" + "version": "==8.3.3" }, "pytest-cs": { "git": "https://github.com/crowdsecurity/pytest-cs.git", @@ -337,60 +352,62 @@ }, "pyyaml": { "hashes": [ - "sha256:04ac92ad1925b2cff1db0cfebffb6ffc43457495c9b3c39d3fcae417d7125dc5", - "sha256:062582fca9fabdd2c8b54a3ef1c978d786e0f6b3a1510e0ac93ef59e0ddae2bc", - "sha256:0d3304d8c0adc42be59c5f8a4d9e3d7379e6955ad754aa9d6ab7a398b59dd1df", - "sha256:1635fd110e8d85d55237ab316b5b011de701ea0f29d07611174a1b42f1444741", - "sha256:184c5108a2aca3c5b3d3bf9395d50893a7ab82a38004c8f61c258d4428e80206", - "sha256:18aeb1bf9a78867dc38b259769503436b7c72f7a1f1f4c93ff9a17de54319b27", - "sha256:1d4c7e777c441b20e32f52bd377e0c409713e8bb1386e1099c2415f26e479595", - "sha256:1e2722cc9fbb45d9b87631ac70924c11d3a401b2d7f410cc0e3bbf249f2dca62", - "sha256:1fe35611261b29bd1de0070f0b2f47cb6ff71fa6595c077e42bd0c419fa27b98", - "sha256:28c119d996beec18c05208a8bd78cbe4007878c6dd15091efb73a30e90539696", - "sha256:326c013efe8048858a6d312ddd31d56e468118ad4cdeda36c719bf5bb6192290", - "sha256:40df9b996c2b73138957fe23a16a4f0ba614f4c0efce1e9406a184b6d07fa3a9", - "sha256:42f8152b8dbc4fe7d96729ec2b99c7097d656dc1213a3229ca5383f973a5ed6d", - "sha256:49a183be227561de579b4a36efbb21b3eab9651dd81b1858589f796549873dd6", - "sha256:4fb147e7a67ef577a588a0e2c17b6db51dda102c71de36f8549b6816a96e1867", - "sha256:50550eb667afee136e9a77d6dc71ae76a44df8b3e51e41b77f6de2932bfe0f47", - "sha256:510c9deebc5c0225e8c96813043e62b680ba2f9c50a08d3724c7f28a747d1486", - "sha256:5773183b6446b2c99bb77e77595dd486303b4faab2b086e7b17bc6bef28865f6", - "sha256:596106435fa6ad000c2991a98fa58eeb8656ef2325d7e158344fb33864ed87e3", - "sha256:6965a7bc3cf88e5a1c3bd2e0b5c22f8d677dc88a455344035f03399034eb3007", - "sha256:69b023b2b4daa7548bcfbd4aa3da05b3a74b772db9e23b982788168117739938", - "sha256:6c22bec3fbe2524cde73d7ada88f6566758a8f7227bfbf93a408a9d86bcc12a0", - "sha256:704219a11b772aea0d8ecd7058d0082713c3562b4e271b849ad7dc4a5c90c13c", - "sha256:7e07cbde391ba96ab58e532ff4803f79c4129397514e1413a7dc761ccd755735", - "sha256:81e0b275a9ecc9c0c0c07b4b90ba548307583c125f54d5b6946cfee6360c733d", - "sha256:855fb52b0dc35af121542a76b9a84f8d1cd886ea97c84703eaa6d88e37a2ad28", - "sha256:8d4e9c88387b0f5c7d5f281e55304de64cf7f9c0021a3525bd3b1c542da3b0e4", - "sha256:9046c58c4395dff28dd494285c82ba00b546adfc7ef001486fbf0324bc174fba", - "sha256:9eb6caa9a297fc2c2fb8862bc5370d0303ddba53ba97e71f08023b6cd73d16a8", - "sha256:a08c6f0fe150303c1c6b71ebcd7213c2858041a7e01975da3a99aed1e7a378ef", - "sha256:a0cd17c15d3bb3fa06978b4e8958dcdc6e0174ccea823003a106c7d4d7899ac5", - "sha256:afd7e57eddb1a54f0f1a974bc4391af8bcce0b444685d936840f125cf046d5bd", - "sha256:b1275ad35a5d18c62a7220633c913e1b42d44b46ee12554e5fd39c70a243d6a3", - "sha256:b786eecbdf8499b9ca1d697215862083bd6d2a99965554781d0d8d1ad31e13a0", - "sha256:ba336e390cd8e4d1739f42dfe9bb83a3cc2e80f567d8805e11b46f4a943f5515", - "sha256:baa90d3f661d43131ca170712d903e6295d1f7a0f595074f151c0aed377c9b9c", - "sha256:bc1bf2925a1ecd43da378f4db9e4f799775d6367bdb94671027b73b393a7c42c", - "sha256:bd4af7373a854424dabd882decdc5579653d7868b8fb26dc7d0e99f823aa5924", - "sha256:bf07ee2fef7014951eeb99f56f39c9bb4af143d8aa3c21b1677805985307da34", - "sha256:bfdf460b1736c775f2ba9f6a92bca30bc2095067b8a9d77876d1fad6cc3b4a43", - "sha256:c8098ddcc2a85b61647b2590f825f3db38891662cfc2fc776415143f599bb859", - "sha256:d2b04aac4d386b172d5b9692e2d2da8de7bfb6c387fa4f801fbf6fb2e6ba4673", - "sha256:d483d2cdf104e7c9fa60c544d92981f12ad66a457afae824d146093b8c294c54", - "sha256:d858aa552c999bc8a8d57426ed01e40bef403cd8ccdd0fc5f6f04a00414cac2a", - "sha256:e7d73685e87afe9f3b36c799222440d6cf362062f78be1013661b00c5c6f678b", - "sha256:f003ed9ad21d6a4713f0a9b5a7a0a79e08dd0f221aff4525a2be4c346ee60aab", - "sha256:f22ac1c3cac4dbc50079e965eba2c1058622631e526bd9afd45fedd49ba781fa", - "sha256:faca3bdcf85b2fc05d06ff3fbc1f83e1391b3e724afa3feba7d13eeab355484c", - "sha256:fca0e3a251908a499833aa292323f32437106001d436eca0e6e7833256674585", - "sha256:fd1592b3fdf65fff2ad0004b5e363300ef59ced41c2e6b3a99d4089fa8c5435d", - "sha256:fd66fc5d0da6d9815ba2cebeb4205f95818ff4b79c3ebe268e75d961704af52f" + "sha256:01179a4a8559ab5de078078f37e5c1a30d76bb88519906844fd7bdea1b7729ff", + "sha256:0833f8694549e586547b576dcfaba4a6b55b9e96098b36cdc7ebefe667dfed48", + "sha256:0a9a2848a5b7feac301353437eb7d5957887edbf81d56e903999a75a3d743086", + "sha256:0b69e4ce7a131fe56b7e4d770c67429700908fc0752af059838b1cfb41960e4e", + "sha256:0ffe8360bab4910ef1b9e87fb812d8bc0a308b0d0eef8c8f44e0254ab3b07133", + "sha256:11d8f3dd2b9c1207dcaf2ee0bbbfd5991f571186ec9cc78427ba5bd32afae4b5", + "sha256:17e311b6c678207928d649faa7cb0d7b4c26a0ba73d41e99c4fff6b6c3276484", + "sha256:1e2120ef853f59c7419231f3bf4e7021f1b936f6ebd222406c3b60212205d2ee", + "sha256:1f71ea527786de97d1a0cc0eacd1defc0985dcf6b3f17bb77dcfc8c34bec4dc5", + "sha256:23502f431948090f597378482b4812b0caae32c22213aecf3b55325e049a6c68", + "sha256:24471b829b3bf607e04e88d79542a9d48bb037c2267d7927a874e6c205ca7e9a", + "sha256:29717114e51c84ddfba879543fb232a6ed60086602313ca38cce623c1d62cfbf", + "sha256:2e99c6826ffa974fe6e27cdb5ed0021786b03fc98e5ee3c5bfe1fd5015f42b99", + "sha256:39693e1f8320ae4f43943590b49779ffb98acb81f788220ea932a6b6c51004d8", + "sha256:3ad2a3decf9aaba3d29c8f537ac4b243e36bef957511b4766cb0057d32b0be85", + "sha256:3b1fdb9dc17f5a7677423d508ab4f243a726dea51fa5e70992e59a7411c89d19", + "sha256:41e4e3953a79407c794916fa277a82531dd93aad34e29c2a514c2c0c5fe971cc", + "sha256:43fa96a3ca0d6b1812e01ced1044a003533c47f6ee8aca31724f78e93ccc089a", + "sha256:50187695423ffe49e2deacb8cd10510bc361faac997de9efef88badc3bb9e2d1", + "sha256:5ac9328ec4831237bec75defaf839f7d4564be1e6b25ac710bd1a96321cc8317", + "sha256:5d225db5a45f21e78dd9358e58a98702a0302f2659a3c6cd320564b75b86f47c", + "sha256:6395c297d42274772abc367baaa79683958044e5d3835486c16da75d2a694631", + "sha256:688ba32a1cffef67fd2e9398a2efebaea461578b0923624778664cc1c914db5d", + "sha256:68ccc6023a3400877818152ad9a1033e3db8625d899c72eacb5a668902e4d652", + "sha256:70b189594dbe54f75ab3a1acec5f1e3faa7e8cf2f1e08d9b561cb41b845f69d5", + "sha256:797b4f722ffa07cc8d62053e4cff1486fa6dc094105d13fea7b1de7d8bf71c9e", + "sha256:7c36280e6fb8385e520936c3cb3b8042851904eba0e58d277dca80a5cfed590b", + "sha256:7e7401d0de89a9a855c839bc697c079a4af81cf878373abd7dc625847d25cbd8", + "sha256:80bab7bfc629882493af4aa31a4cfa43a4c57c83813253626916b8c7ada83476", + "sha256:82d09873e40955485746739bcb8b4586983670466c23382c19cffecbf1fd8706", + "sha256:8388ee1976c416731879ac16da0aff3f63b286ffdd57cdeb95f3f2e085687563", + "sha256:8824b5a04a04a047e72eea5cec3bc266db09e35de6bdfe34c9436ac5ee27d237", + "sha256:8b9c7197f7cb2738065c481a0461e50ad02f18c78cd75775628afb4d7137fb3b", + "sha256:9056c1ecd25795207ad294bcf39f2db3d845767be0ea6e6a34d856f006006083", + "sha256:936d68689298c36b53b29f23c6dbb74de12b4ac12ca6cfe0e047bedceea56180", + "sha256:9b22676e8097e9e22e36d6b7bda33190d0d400f345f23d4065d48f4ca7ae0425", + "sha256:a4d3091415f010369ae4ed1fc6b79def9416358877534caf6a0fdd2146c87a3e", + "sha256:a8786accb172bd8afb8be14490a16625cbc387036876ab6ba70912730faf8e1f", + "sha256:a9f8c2e67970f13b16084e04f134610fd1d374bf477b17ec1599185cf611d725", + "sha256:bc2fa7c6b47d6bc618dd7fb02ef6fdedb1090ec036abab80d4681424b84c1183", + "sha256:c70c95198c015b85feafc136515252a261a84561b7b1d51e3384e0655ddf25ab", + "sha256:cc1c1159b3d456576af7a3e4d1ba7e6924cb39de8f67111c735f6fc832082774", + "sha256:ce826d6ef20b1bc864f0a68340c8b3287705cae2f8b4b1d932177dcc76721725", + "sha256:d584d9ec91ad65861cc08d42e834324ef890a082e591037abe114850ff7bbc3e", + "sha256:d7fded462629cfa4b685c5416b949ebad6cec74af5e2d42905d41e257e0869f5", + "sha256:d84a1718ee396f54f3a086ea0a66d8e552b2ab2017ef8b420e92edbc841c352d", + "sha256:d8e03406cac8513435335dbab54c0d385e4a49e4945d2909a581c83647ca0290", + "sha256:e10ce637b18caea04431ce14fabcf5c64a1c61ec9c56b071a4b7ca131ca52d44", + "sha256:ec031d5d2feb36d1d1a24380e4db6d43695f3748343d99434e6f5f9156aaa2ed", + "sha256:ef6107725bd54b262d6dedcc2af448a266975032bc85ef0172c5f059da6325b4", + "sha256:efdca5630322a10774e8e98e1af481aad470dd62c3170801852d752aa7a783ba", + "sha256:f753120cb8181e736c57ef7636e83f31b9c0d1722c516f7e86cf15b7aa57ff12", + "sha256:ff3824dc5261f50c9b0dfb3be22b4567a6f938ccce4587b38952d85fd9e9afe4" ], - "markers": "python_version >= '3.6'", - "version": "==6.0.1" + "markers": "python_version >= '3.8'", + "version": "==6.0.2" }, "requests": { "hashes": [ @@ -410,11 +427,11 @@ }, "urllib3": { "hashes": [ - "sha256:a448b2f64d686155468037e1ace9f2d2199776e17f0a46610480d311f73e3472", - "sha256:dd505485549a7a552833da5e6063639d0d177c04f23bc3864e41e5dc5f612168" + "sha256:ca899ca043dcb1bafa3e262d73aa25c465bfb49e0bd9dd5d59f1d0acba2f8fac", + "sha256:e7d814a81dad81e6caf2ec9fdedb284ecc9c73076b62654547cc64ccdcae26e9" ], "markers": "python_version >= '3.8'", - "version": "==2.2.2" + "version": "==2.2.3" } }, "develop": { @@ -435,11 +452,11 @@ }, "executing": { "hashes": [ - "sha256:35afe2ce3affba8ee97f2d69927fa823b08b472b7b994e36a52a964b93d16147", - "sha256:eac49ca94516ccc753f9fb5ce82603156e590b27525a8bc32cce8ae302eb61bc" + "sha256:8d63781349375b5ebccc3142f4b30350c0cd9c79f921cde38be2be4637e98eaf", + "sha256:8ea27ddd260da8150fa5a708269c4a10e76161e2496ec3e587da9e3c0fe4b9ab" ], - "markers": "python_version >= '3.5'", - "version": "==2.0.1" + "markers": "python_version >= '3.8'", + "version": "==2.1.0" }, "gnureadline": { "hashes": [ @@ -485,11 +502,11 @@ }, "ipython": { "hashes": [ - "sha256:1cec0fbba8404af13facebe83d04436a7434c7400e59f47acf467c64abd0956c", - "sha256:e6b347c27bdf9c32ee9d31ae85defc525755a1869f14057e900675b9e8d6e6ff" + "sha256:0d0d15ca1e01faeb868ef56bc7ee5a0de5bd66885735682e8a322ae289a13d1a", + "sha256:530ef1e7bb693724d3cdc37287c80b07ad9b25986c007a53aa1857272dac3f35" ], "markers": "python_version >= '3.11'", - "version": "==8.26.0" + "version": "==8.28.0" }, "jedi": { "hashes": [ @@ -525,11 +542,11 @@ }, "prompt-toolkit": { "hashes": [ - "sha256:0d7bfa67001d5e39d02c224b663abc33687405033a8c422d0d675a5a13361d10", - "sha256:1e1b29cb58080b1e69f207c893a1a7bf16d127a5c30c9d17a25a5d77792e5360" + "sha256:d6623ab0477a80df74e646bdbc93621143f5caf104206aa29294d53de1a03d90", + "sha256:f49a827f90062e411f1ce1f854f2aedb3c23353244f8108b89283587397ac10e" ], "markers": "python_full_version >= '3.7.0'", - "version": "==3.0.47" + "version": "==3.0.48" }, "ptyprocess": { "hashes": [ From 40021b6bcf40cb724c4444c79239223c0419ec71 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Fri, 4 Oct 2024 13:15:16 +0200 Subject: [PATCH 319/581] CI: update coverage ignore list for generated code (#3262) * CI: update coverage ignore list for generated code * CI: generate codecov.yml automatically --- .github/codecov-ignore-generated.sh | 17 +++ .github/codecov.yml | 139 --------------------- .github/workflows/bats-sqlite-coverage.yml | 4 + .github/workflows/go-tests-windows.yml | 4 + .github/workflows/go-tests.yml | 4 + .gitignore | 3 + 6 files changed, 32 insertions(+), 139 deletions(-) delete mode 100644 .github/codecov.yml diff --git a/.github/codecov-ignore-generated.sh b/.github/codecov-ignore-generated.sh index 3c896d47be7..0f59b1de4a0 100755 --- a/.github/codecov-ignore-generated.sh +++ b/.github/codecov-ignore-generated.sh @@ -4,6 +4,23 @@ # # .github/codecov-ignore-generated.sh >> .github/codecov.yml +cat <>>>> $file"; cat $file; echo; done if: ${{ always() }} + - name: Ignore-list of generated files for codecov + run: | + .github/codecov-ignore-generated.sh >> .github/codecov.yml + - name: Upload bats coverage to codecov uses: codecov/codecov-action@v4 with: diff --git a/.github/workflows/go-tests-windows.yml b/.github/workflows/go-tests-windows.yml index a31e42cf702..488756d0059 100644 --- a/.github/workflows/go-tests-windows.yml +++ b/.github/workflows/go-tests-windows.yml @@ -47,6 +47,10 @@ jobs: if(!$?) { cat out.txt | sed 's/ *coverage:.*of statements in.*//' | richgo testfilter; Exit 1 } cat out.txt | sed 's/ *coverage:.*of statements in.*//' | richgo testfilter + - name: Ignore-list of generated files for codecov + run: | + .github/codecov-ignore-generated.sh >> .github/codecov.yml + - name: Upload unit coverage to Codecov uses: codecov/codecov-action@v4 with: diff --git a/.github/workflows/go-tests.yml b/.github/workflows/go-tests.yml index a6c72a91af6..52de434ad9b 100644 --- a/.github/workflows/go-tests.yml +++ b/.github/workflows/go-tests.yml @@ -165,6 +165,10 @@ jobs: set -o pipefail make go-acc | sed 's/ *coverage:.*of statements in.*//' | richgo testfilter + - name: Ignore-list of generated files for codecov + run: | + .github/codecov-ignore-generated.sh >> .github/codecov.yml + - name: Upload unit coverage to Codecov uses: codecov/codecov-action@v4 with: diff --git a/.gitignore b/.gitignore index 6e6624fd282..d76efcbfc48 100644 --- a/.gitignore +++ b/.gitignore @@ -60,3 +60,6 @@ msi __pycache__ *.py[cod] *.egg-info + +# automatically generated before running codecov +.github/codecov.yml From b9bccfa56f3393dccf19ca97b4a2673efc0feaff Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Wed, 9 Oct 2024 13:06:03 +0200 Subject: [PATCH 320/581] context propagation: pkg/apiserver (#3272) * context propagation: apic.Push() * context propagation: NewServer() * lint --- .golangci.yml | 2 +- cmd/crowdsec-cli/clipapi/papi.go | 2 +- cmd/crowdsec/api.go | 10 ++++++-- cmd/crowdsec/serve.go | 10 ++++---- pkg/apiserver/alerts_test.go | 6 ++--- pkg/apiserver/api_key_test.go | 3 +-- pkg/apiserver/apic.go | 12 +++++----- pkg/apiserver/apic_test.go | 2 +- pkg/apiserver/apiserver.go | 10 ++++---- pkg/apiserver/apiserver_test.go | 40 ++++++++++++++++---------------- pkg/apiserver/jwt_test.go | 3 +-- pkg/apiserver/machines_test.go | 20 ++++++---------- 12 files changed, 59 insertions(+), 61 deletions(-) diff --git a/.golangci.yml b/.golangci.yml index 786bb18d8e7..4909d3e60c0 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -321,7 +321,7 @@ issues: # `err` is often shadowed, we may continue to do it - linters: - govet - text: "shadow: declaration of \"err\" shadows declaration" + text: "shadow: declaration of \"(err|ctx)\" shadows declaration" - linters: - errcheck diff --git a/cmd/crowdsec-cli/clipapi/papi.go b/cmd/crowdsec-cli/clipapi/papi.go index b8101a0fb34..461215c3a39 100644 --- a/cmd/crowdsec-cli/clipapi/papi.go +++ b/cmd/crowdsec-cli/clipapi/papi.go @@ -127,7 +127,7 @@ func (cli *cliPapi) sync(ctx context.Context, out io.Writer, db *database.Client return fmt.Errorf("unable to initialize API client: %w", err) } - t.Go(apic.Push) + t.Go(func() error { return apic.Push(ctx) }) papi, err := apiserver.NewPAPI(apic, db, cfg.API.Server.ConsoleConfig, log.GetLevel()) if err != nil { diff --git a/cmd/crowdsec/api.go b/cmd/crowdsec/api.go index c57b8d87cff..6ab41def16f 100644 --- a/cmd/crowdsec/api.go +++ b/cmd/crowdsec/api.go @@ -1,6 +1,7 @@ package main import ( + "context" "errors" "fmt" "runtime" @@ -14,12 +15,12 @@ import ( "github.com/crowdsecurity/crowdsec/pkg/csconfig" ) -func initAPIServer(cConfig *csconfig.Config) (*apiserver.APIServer, error) { +func initAPIServer(ctx context.Context, cConfig *csconfig.Config) (*apiserver.APIServer, error) { if cConfig.API.Server.OnlineClient == nil || cConfig.API.Server.OnlineClient.Credentials == nil { log.Info("push and pull to Central API disabled") } - apiServer, err := apiserver.NewServer(cConfig.API.Server) + apiServer, err := apiserver.NewServer(ctx, cConfig.API.Server) if err != nil { return nil, fmt.Errorf("unable to run local API: %w", err) } @@ -58,11 +59,14 @@ func initAPIServer(cConfig *csconfig.Config) (*apiserver.APIServer, error) { func serveAPIServer(apiServer *apiserver.APIServer) { apiReady := make(chan bool, 1) + apiTomb.Go(func() error { defer trace.CatchPanic("crowdsec/serveAPIServer") + go func() { defer trace.CatchPanic("crowdsec/runAPIServer") log.Debugf("serving API after %s ms", time.Since(crowdsecT0)) + if err := apiServer.Run(apiReady); err != nil { log.Fatal(err) } @@ -76,6 +80,7 @@ func serveAPIServer(apiServer *apiserver.APIServer) { <-apiTomb.Dying() // lock until go routine is dying pluginTomb.Kill(nil) log.Infof("serve: shutting down api server") + return apiServer.Shutdown() }) <-apiReady @@ -87,5 +92,6 @@ func hasPlugins(profiles []*csconfig.ProfileCfg) bool { return true } } + return false } diff --git a/cmd/crowdsec/serve.go b/cmd/crowdsec/serve.go index f1a658e9512..14602c425fe 100644 --- a/cmd/crowdsec/serve.go +++ b/cmd/crowdsec/serve.go @@ -52,6 +52,8 @@ func debugHandler(sig os.Signal, cConfig *csconfig.Config) error { func reloadHandler(sig os.Signal) (*csconfig.Config, error) { var tmpFile string + ctx := context.TODO() + // re-initialize tombs acquisTomb = tomb.Tomb{} parsersTomb = tomb.Tomb{} @@ -74,7 +76,7 @@ func reloadHandler(sig os.Signal) (*csconfig.Config, error) { cConfig.API.Server.OnlineClient = nil } - apiServer, err := initAPIServer(cConfig) + apiServer, err := initAPIServer(ctx, cConfig) if err != nil { return nil, fmt.Errorf("unable to init api server: %w", err) } @@ -88,7 +90,7 @@ func reloadHandler(sig os.Signal) (*csconfig.Config, error) { return nil, err } - if err := hub.Load(); err != nil { + if err = hub.Load(); err != nil { return nil, err } @@ -374,7 +376,7 @@ func Serve(cConfig *csconfig.Config, agentReady chan bool) error { cConfig.API.Server.OnlineClient = nil } - apiServer, err := initAPIServer(cConfig) + apiServer, err := initAPIServer(ctx, cConfig) if err != nil { return fmt.Errorf("api server init: %w", err) } @@ -390,7 +392,7 @@ func Serve(cConfig *csconfig.Config, agentReady chan bool) error { return err } - if err := hub.Load(); err != nil { + if err = hub.Load(); err != nil { return err } diff --git a/pkg/apiserver/alerts_test.go b/pkg/apiserver/alerts_test.go index 0e89ddb2137..cd981f76542 100644 --- a/pkg/apiserver/alerts_test.go +++ b/pkg/apiserver/alerts_test.go @@ -65,7 +65,7 @@ func (l *LAPI) RecordResponse(t *testing.T, ctx context.Context, verb string, ur } func InitMachineTest(t *testing.T, ctx context.Context) (*gin.Engine, models.WatcherAuthResponse, csconfig.Config) { - router, config := NewAPITest(t) + router, config := NewAPITest(t, ctx) loginResp := LoginToTestAPI(t, ctx, router, config) return router, loginResp, config @@ -137,7 +137,7 @@ func TestCreateAlert(t *testing.T) { func TestCreateAlertChannels(t *testing.T) { ctx := context.Background() - apiServer, config := NewAPIServer(t) + apiServer, config := NewAPIServer(t, ctx) apiServer.controller.PluginChannel = make(chan csplugin.ProfileAlert) apiServer.InitController() @@ -437,7 +437,7 @@ func TestDeleteAlertTrustedIPS(t *testing.T) { // cfg.API.Server.TrustedIPs = []string{"1.2.3.4", "1.2.4.0/24", "::"} cfg.API.Server.TrustedIPs = []string{"1.2.3.4", "1.2.4.0/24"} cfg.API.Server.ListenURI = "::8080" - server, err := NewServer(cfg.API.Server) + server, err := NewServer(ctx, cfg.API.Server) require.NoError(t, err) err = server.InitController() diff --git a/pkg/apiserver/api_key_test.go b/pkg/apiserver/api_key_test.go index 014f255b892..e6ed68a6e0d 100644 --- a/pkg/apiserver/api_key_test.go +++ b/pkg/apiserver/api_key_test.go @@ -11,9 +11,8 @@ import ( ) func TestAPIKey(t *testing.T) { - router, config := NewAPITest(t) - ctx := context.Background() + router, config := NewAPITest(t, ctx) APIKey := CreateTestBouncer(t, ctx, config.API.Server.DbConfig) diff --git a/pkg/apiserver/apic.go b/pkg/apiserver/apic.go index e62bc663c16..a2fb0e85749 100644 --- a/pkg/apiserver/apic.go +++ b/pkg/apiserver/apic.go @@ -256,7 +256,7 @@ func NewAPIC(ctx context.Context, config *csconfig.OnlineApiClientCfg, dbClient } // keep track of all alerts in cache and push it to CAPI every PushInterval. -func (a *apic) Push() error { +func (a *apic) Push(ctx context.Context) error { defer trace.CatchPanic("lapi/pushToAPIC") var cache models.AddSignalsRequest @@ -276,7 +276,7 @@ func (a *apic) Push() error { return nil } - go a.Send(&cache) + go a.Send(ctx, &cache) return nil case <-ticker.C: @@ -289,7 +289,7 @@ func (a *apic) Push() error { a.mu.Unlock() log.Infof("Signal push: %d signals to push", len(cacheCopy)) - go a.Send(&cacheCopy) + go a.Send(ctx, &cacheCopy) } case alerts := <-a.AlertsAddChan: var signals []*models.AddSignalsRequestItem @@ -351,7 +351,7 @@ func shouldShareAlert(alert *models.Alert, consoleConfig *csconfig.ConsoleConfig return true } -func (a *apic) Send(cacheOrig *models.AddSignalsRequest) { +func (a *apic) Send(ctx context.Context, cacheOrig *models.AddSignalsRequest) { /*we do have a problem with this : The apic.Push background routine reads from alertToPush chan. This chan is filled by Controller.CreateAlert @@ -375,7 +375,7 @@ func (a *apic) Send(cacheOrig *models.AddSignalsRequest) { for { if pageEnd >= len(cache) { send = cache[pageStart:] - ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + ctx, cancel := context.WithTimeout(ctx, 5*time.Second) defer cancel() @@ -389,7 +389,7 @@ func (a *apic) Send(cacheOrig *models.AddSignalsRequest) { } send = cache[pageStart:pageEnd] - ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + ctx, cancel := context.WithTimeout(ctx, 5*time.Second) defer cancel() diff --git a/pkg/apiserver/apic_test.go b/pkg/apiserver/apic_test.go index 51b1f43c707..b52dc9e44cc 100644 --- a/pkg/apiserver/apic_test.go +++ b/pkg/apiserver/apic_test.go @@ -1134,7 +1134,7 @@ func TestAPICPush(t *testing.T) { api.Shutdown() }() - err = api.Push() + err = api.Push(ctx) require.NoError(t, err) assert.Equal(t, tc.expectedCalls, httpmock.GetTotalCallCount()) }) diff --git a/pkg/apiserver/apiserver.go b/pkg/apiserver/apiserver.go index 8fe500c7f52..bdf2d4148cc 100644 --- a/pkg/apiserver/apiserver.go +++ b/pkg/apiserver/apiserver.go @@ -159,11 +159,9 @@ func newGinLogger(config *csconfig.LocalApiServerCfg) (*log.Logger, string, erro // NewServer creates a LAPI server. // It sets up a gin router, a database client, and a controller. -func NewServer(config *csconfig.LocalApiServerCfg) (*APIServer, error) { +func NewServer(ctx context.Context, config *csconfig.LocalApiServerCfg) (*APIServer, error) { var flushScheduler *gocron.Scheduler - ctx := context.TODO() - dbClient, err := database.NewClient(ctx, config.DbConfig) if err != nil { return nil, fmt.Errorf("unable to init database client: %w", err) @@ -300,8 +298,8 @@ func (s *APIServer) Router() (*gin.Engine, error) { return s.router, nil } -func (s *APIServer) apicPush() error { - if err := s.apic.Push(); err != nil { +func (s *APIServer) apicPush(ctx context.Context) error { + if err := s.apic.Push(ctx); err != nil { log.Errorf("capi push: %s", err) return err } @@ -337,7 +335,7 @@ func (s *APIServer) papiSync() error { } func (s *APIServer) initAPIC(ctx context.Context) { - s.apic.pushTomb.Go(s.apicPush) + s.apic.pushTomb.Go(func() error { return s.apicPush(ctx) }) s.apic.pullTomb.Go(func() error { return s.apicPull(ctx) }) // csConfig.API.Server.ConsoleConfig.ShareCustomScenarios diff --git a/pkg/apiserver/apiserver_test.go b/pkg/apiserver/apiserver_test.go index c3f69c5c365..b04ad687e4e 100644 --- a/pkg/apiserver/apiserver_test.go +++ b/pkg/apiserver/apiserver_test.go @@ -3,7 +3,6 @@ package apiserver import ( "context" "encoding/json" - "fmt" "net/http" "net/http/httptest" "os" @@ -41,7 +40,7 @@ var ( MachineID: &testMachineID, Password: &testPassword, } - UserAgent = fmt.Sprintf("crowdsec-test/%s", version.Version) + UserAgent = "crowdsec-test/" + version.Version emptyBody = strings.NewReader("") ) @@ -135,12 +134,12 @@ func LoadTestConfigForwardedFor(t *testing.T) csconfig.Config { return config } -func NewAPIServer(t *testing.T) (*APIServer, csconfig.Config) { +func NewAPIServer(t *testing.T, ctx context.Context) (*APIServer, csconfig.Config) { config := LoadTestConfig(t) os.Remove("./ent") - apiServer, err := NewServer(config.API.Server) + apiServer, err := NewServer(ctx, config.API.Server) require.NoError(t, err) log.Printf("Creating new API server") @@ -149,8 +148,8 @@ func NewAPIServer(t *testing.T) (*APIServer, csconfig.Config) { return apiServer, config } -func NewAPITest(t *testing.T) (*gin.Engine, csconfig.Config) { - apiServer, config := NewAPIServer(t) +func NewAPITest(t *testing.T, ctx context.Context) (*gin.Engine, csconfig.Config) { + apiServer, config := NewAPIServer(t, ctx) err := apiServer.InitController() require.NoError(t, err) @@ -161,12 +160,12 @@ func NewAPITest(t *testing.T) (*gin.Engine, csconfig.Config) { return router, config } -func NewAPITestForwardedFor(t *testing.T) (*gin.Engine, csconfig.Config) { +func NewAPITestForwardedFor(t *testing.T, ctx context.Context) (*gin.Engine, csconfig.Config) { config := LoadTestConfigForwardedFor(t) os.Remove("./ent") - apiServer, err := NewServer(config.API.Server) + apiServer, err := NewServer(ctx, config.API.Server) require.NoError(t, err) err = apiServer.InitController() @@ -302,28 +301,29 @@ func CreateTestBouncer(t *testing.T, ctx context.Context, config *csconfig.Datab } func TestWithWrongDBConfig(t *testing.T) { + ctx := context.Background() config := LoadTestConfig(t) config.API.Server.DbConfig.Type = "test" - apiServer, err := NewServer(config.API.Server) + apiServer, err := NewServer(ctx, config.API.Server) cstest.RequireErrorContains(t, err, "unable to init database client: unknown database type 'test'") assert.Nil(t, apiServer) } func TestWithWrongFlushConfig(t *testing.T) { + ctx := context.Background() config := LoadTestConfig(t) maxItems := -1 config.API.Server.DbConfig.Flush.MaxItems = &maxItems - apiServer, err := NewServer(config.API.Server) + apiServer, err := NewServer(ctx, config.API.Server) cstest.RequireErrorContains(t, err, "max_items can't be zero or negative") assert.Nil(t, apiServer) } func TestUnknownPath(t *testing.T) { - router, _ := NewAPITest(t) - ctx := context.Background() + router, _ := NewAPITest(t, ctx) w := httptest.NewRecorder() req, _ := http.NewRequestWithContext(ctx, http.MethodGet, "/test", nil) @@ -349,6 +349,8 @@ ListenURI string `yaml:"listen_uri,omitempty"` //127.0 */ func TestLoggingDebugToFileConfig(t *testing.T) { + ctx := context.Background() + /*declare settings*/ maxAge := "1h" flushConfig := csconfig.FlushDBCfg{ @@ -370,7 +372,7 @@ func TestLoggingDebugToFileConfig(t *testing.T) { LogDir: tempDir, DbConfig: &dbconfig, } - expectedFile := fmt.Sprintf("%s/crowdsec_api.log", tempDir) + expectedFile := filepath.Join(tempDir, "crowdsec_api.log") expectedLines := []string{"/test42"} cfg.LogLevel = ptr.Of(log.DebugLevel) @@ -378,12 +380,10 @@ func TestLoggingDebugToFileConfig(t *testing.T) { err := types.SetDefaultLoggerConfig(cfg.LogMedia, cfg.LogDir, *cfg.LogLevel, cfg.LogMaxSize, cfg.LogMaxFiles, cfg.LogMaxAge, cfg.CompressLogs, false) require.NoError(t, err) - api, err := NewServer(&cfg) + api, err := NewServer(ctx, &cfg) require.NoError(t, err) require.NotNil(t, api) - ctx := context.Background() - w := httptest.NewRecorder() req, _ := http.NewRequestWithContext(ctx, http.MethodGet, "/test42", nil) req.Header.Set("User-Agent", UserAgent) @@ -402,6 +402,8 @@ func TestLoggingDebugToFileConfig(t *testing.T) { } func TestLoggingErrorToFileConfig(t *testing.T) { + ctx := context.Background() + /*declare settings*/ maxAge := "1h" flushConfig := csconfig.FlushDBCfg{ @@ -423,19 +425,17 @@ func TestLoggingErrorToFileConfig(t *testing.T) { LogDir: tempDir, DbConfig: &dbconfig, } - expectedFile := fmt.Sprintf("%s/crowdsec_api.log", tempDir) + expectedFile := filepath.Join(tempDir, "crowdsec_api.log") cfg.LogLevel = ptr.Of(log.ErrorLevel) // Configure logging err := types.SetDefaultLoggerConfig(cfg.LogMedia, cfg.LogDir, *cfg.LogLevel, cfg.LogMaxSize, cfg.LogMaxFiles, cfg.LogMaxAge, cfg.CompressLogs, false) require.NoError(t, err) - api, err := NewServer(&cfg) + api, err := NewServer(ctx, &cfg) require.NoError(t, err) require.NotNil(t, api) - ctx := context.Background() - w := httptest.NewRecorder() req, _ := http.NewRequestWithContext(ctx, http.MethodGet, "/test42", nil) req.Header.Set("User-Agent", UserAgent) diff --git a/pkg/apiserver/jwt_test.go b/pkg/apiserver/jwt_test.go index 293cc38bd2c..710cf82ad00 100644 --- a/pkg/apiserver/jwt_test.go +++ b/pkg/apiserver/jwt_test.go @@ -11,9 +11,8 @@ import ( ) func TestLogin(t *testing.T) { - router, config := NewAPITest(t) - ctx := context.Background() + router, config := NewAPITest(t, ctx) body := CreateTestMachine(t, router, "") diff --git a/pkg/apiserver/machines_test.go b/pkg/apiserver/machines_test.go index 44c370732c7..e60cec30e54 100644 --- a/pkg/apiserver/machines_test.go +++ b/pkg/apiserver/machines_test.go @@ -15,9 +15,8 @@ import ( ) func TestCreateMachine(t *testing.T) { - router, _ := NewAPITest(t) - ctx := context.Background() + router, _ := NewAPITest(t, ctx) // Create machine with invalid format w := httptest.NewRecorder() @@ -53,10 +52,9 @@ func TestCreateMachine(t *testing.T) { } func TestCreateMachineWithForwardedFor(t *testing.T) { - router, config := NewAPITestForwardedFor(t) - router.TrustedPlatform = "X-Real-IP" - ctx := context.Background() + router, config := NewAPITestForwardedFor(t, ctx) + router.TrustedPlatform = "X-Real-IP" // Create machine b, err := json.Marshal(MachineTest) @@ -79,9 +77,8 @@ func TestCreateMachineWithForwardedFor(t *testing.T) { } func TestCreateMachineWithForwardedForNoConfig(t *testing.T) { - router, config := NewAPITest(t) - ctx := context.Background() + router, config := NewAPITest(t, ctx) // Create machine b, err := json.Marshal(MachineTest) @@ -106,9 +103,8 @@ func TestCreateMachineWithForwardedForNoConfig(t *testing.T) { } func TestCreateMachineWithoutForwardedFor(t *testing.T) { - router, config := NewAPITestForwardedFor(t) - ctx := context.Background() + router, config := NewAPITestForwardedFor(t, ctx) // Create machine b, err := json.Marshal(MachineTest) @@ -132,9 +128,8 @@ func TestCreateMachineWithoutForwardedFor(t *testing.T) { } func TestCreateMachineAlreadyExist(t *testing.T) { - router, _ := NewAPITest(t) - ctx := context.Background() + router, _ := NewAPITest(t, ctx) body := CreateTestMachine(t, router, "") @@ -153,9 +148,8 @@ func TestCreateMachineAlreadyExist(t *testing.T) { } func TestAutoRegistration(t *testing.T) { - router, _ := NewAPITest(t) - ctx := context.Background() + router, _ := NewAPITest(t, ctx) // Invalid registration token / valid source IP regReq := MachineTest From 4ea0537d0bda7b56de2b0d7712cf1611965ad213 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Thu, 10 Oct 2024 11:09:26 +0200 Subject: [PATCH 321/581] CI: generate codecov.yml before tests (#3280) --- ...odecov-ignore-generated.sh => generate-codecov-yml.sh} | 2 +- .github/workflows/bats-sqlite-coverage.yml | 8 ++++---- .github/workflows/go-tests-windows.yml | 8 ++++---- .github/workflows/go-tests.yml | 8 ++++++-- 4 files changed, 15 insertions(+), 11 deletions(-) rename .github/{codecov-ignore-generated.sh => generate-codecov-yml.sh} (89%) diff --git a/.github/codecov-ignore-generated.sh b/.github/generate-codecov-yml.sh similarity index 89% rename from .github/codecov-ignore-generated.sh rename to .github/generate-codecov-yml.sh index 0f59b1de4a0..cc2d652e339 100755 --- a/.github/codecov-ignore-generated.sh +++ b/.github/generate-codecov-yml.sh @@ -2,7 +2,7 @@ # Run this from the repository root: # -# .github/codecov-ignore-generated.sh >> .github/codecov.yml +# .github/generate-codecov-yml.sh >> .github/codecov.yml cat <> .github/codecov.yml + - name: "Run tests" run: ./test/run-tests ./test/bats --formatter $(pwd)/test/lib/color-formatter @@ -79,10 +83,6 @@ jobs: run: for file in $(find ./test/local/var/log -type f); do echo ">>>>> $file"; cat $file; echo; done if: ${{ always() }} - - name: Ignore-list of generated files for codecov - run: | - .github/codecov-ignore-generated.sh >> .github/codecov.yml - - name: Upload bats coverage to codecov uses: codecov/codecov-action@v4 with: diff --git a/.github/workflows/go-tests-windows.yml b/.github/workflows/go-tests-windows.yml index 488756d0059..ba283f3890a 100644 --- a/.github/workflows/go-tests-windows.yml +++ b/.github/workflows/go-tests-windows.yml @@ -40,6 +40,10 @@ jobs: run: | make build BUILD_RE2_WASM=1 + - name: Generate codecov configuration + run: | + .github/generate-codecov-yml.sh >> .github/codecov.yml + - name: Run tests run: | go install github.com/kyoh86/richgo@v0.3.10 @@ -47,10 +51,6 @@ jobs: if(!$?) { cat out.txt | sed 's/ *coverage:.*of statements in.*//' | richgo testfilter; Exit 1 } cat out.txt | sed 's/ *coverage:.*of statements in.*//' | richgo testfilter - - name: Ignore-list of generated files for codecov - run: | - .github/codecov-ignore-generated.sh >> .github/codecov.yml - - name: Upload unit coverage to Codecov uses: codecov/codecov-action@v4 with: diff --git a/.github/workflows/go-tests.yml b/.github/workflows/go-tests.yml index 52de434ad9b..225c1b57628 100644 --- a/.github/workflows/go-tests.yml +++ b/.github/workflows/go-tests.yml @@ -145,6 +145,10 @@ jobs: aws --endpoint-url=http://127.0.0.1:4566 --region us-east-1 kinesis create-stream --stream-name stream-1-shard --shard-count 1 aws --endpoint-url=http://127.0.0.1:4566 --region us-east-1 kinesis create-stream --stream-name stream-2-shards --shard-count 2 + - name: Generate codecov configuration + run: | + .github/generate-codecov-yml.sh >> .github/codecov.yml + - name: Build and run tests, static run: | sudo apt -qq -y -o=Dpkg::Use-Pty=0 install build-essential libre2-dev @@ -165,9 +169,9 @@ jobs: set -o pipefail make go-acc | sed 's/ *coverage:.*of statements in.*//' | richgo testfilter - - name: Ignore-list of generated files for codecov + - name: Generate codecov configuration run: | - .github/codecov-ignore-generated.sh >> .github/codecov.yml + .github/generate-codecov-yml.sh >> .github/codecov.yml - name: Upload unit coverage to Codecov uses: codecov/codecov-action@v4 From 50d115b9146beeaae9788d81f5e2c5fae0520de3 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Thu, 10 Oct 2024 15:54:25 +0200 Subject: [PATCH 322/581] Update protobufs (#3276) * deps: update protobufs; add pkg/protobufs/generate.go * generate protobuf in CI * make: remove generate target * pin protoc --- .github/workflows/go-tests.yml | 15 ++- Makefile | 6 -- cmd/notification-dummy/main.go | 4 +- cmd/notification-email/main.go | 4 +- cmd/notification-file/main.go | 4 +- cmd/notification-http/main.go | 4 +- cmd/notification-sentinel/main.go | 4 +- cmd/notification-slack/main.go | 4 +- cmd/notification-splunk/main.go | 4 +- go.mod | 24 ++--- go.sum | 48 ++++----- pkg/csplugin/broker.go | 8 +- pkg/csplugin/notifier.go | 14 ++- pkg/protobufs/generate.go | 14 +++ pkg/protobufs/notifier.pb.go | 132 ++----------------------- pkg/protobufs/notifier_grpc.pb.go | 159 ++++++++++++++++++++++++++++++ pkg/protobufs/plugin_interface.go | 47 --------- 17 files changed, 259 insertions(+), 236 deletions(-) create mode 100644 pkg/protobufs/generate.go create mode 100644 pkg/protobufs/notifier_grpc.pb.go delete mode 100644 pkg/protobufs/plugin_interface.go diff --git a/.github/workflows/go-tests.yml b/.github/workflows/go-tests.yml index 225c1b57628..bc718414677 100644 --- a/.github/workflows/go-tests.yml +++ b/.github/workflows/go-tests.yml @@ -128,10 +128,21 @@ jobs: with: go-version: "1.22" - - name: Run "make generate" and check for changes + - name: Run "go generate" and check for changes run: | set -e - make generate 2>/dev/null + # ensure the version of 'protoc' matches the one that generated the files + PROTOBUF_VERSION="21.12" + # don't pollute the repo + pushd $HOME + curl -OL https://github.com/protocolbuffers/protobuf/releases/download/v${PROTOBUF_VERSION}/protoc-${PROTOBUF_VERSION}-linux-x86_64.zip + unzip protoc-${PROTOBUF_VERSION}-linux-x86_64.zip -d $HOME/.protoc + popd + export PATH="$HOME/.protoc/bin:$PATH" + go install google.golang.org/protobuf/cmd/protoc-gen-go@v1.34.2 + go install google.golang.org/grpc/cmd/protoc-gen-go-grpc@v1.5.1 + go generate ./... + protoc --version if [[ $(git status --porcelain) ]]; then echo "Error: Uncommitted changes found after running 'make generate'. Please commit all generated code." git diff diff --git a/Makefile b/Makefile index cb0d5ebaf60..bbfa4bbee94 100644 --- a/Makefile +++ b/Makefile @@ -263,12 +263,6 @@ cscli: ## Build cscli crowdsec: ## Build crowdsec @$(MAKE) -C $(CROWDSEC_FOLDER) build $(MAKE_FLAGS) -.PHONY: generate -generate: ## Generate code for the database and APIs - $(GO) generate ./pkg/database/ent - $(GO) generate ./pkg/models - $(GO) generate ./pkg/modelscapi - .PHONY: testclean testclean: bats-clean ## Remove test artifacts @$(RM) pkg/apiserver/ent $(WIN_IGNORE_ERR) diff --git a/cmd/notification-dummy/main.go b/cmd/notification-dummy/main.go index 024a1eb81ba..7fbb10d4fca 100644 --- a/cmd/notification-dummy/main.go +++ b/cmd/notification-dummy/main.go @@ -9,6 +9,7 @@ import ( plugin "github.com/hashicorp/go-plugin" "gopkg.in/yaml.v3" + "github.com/crowdsecurity/crowdsec/pkg/csplugin" "github.com/crowdsecurity/crowdsec/pkg/protobufs" ) @@ -19,6 +20,7 @@ type PluginConfig struct { } type DummyPlugin struct { + protobufs.UnimplementedNotifierServer PluginConfigByName map[string]PluginConfig } @@ -84,7 +86,7 @@ func main() { plugin.Serve(&plugin.ServeConfig{ HandshakeConfig: handshake, Plugins: map[string]plugin.Plugin{ - "dummy": &protobufs.NotifierPlugin{ + "dummy": &csplugin.NotifierPlugin{ Impl: sp, }, }, diff --git a/cmd/notification-email/main.go b/cmd/notification-email/main.go index 2707b7fe1af..5fc02cdd1d7 100644 --- a/cmd/notification-email/main.go +++ b/cmd/notification-email/main.go @@ -12,6 +12,7 @@ import ( mail "github.com/xhit/go-simple-mail/v2" "gopkg.in/yaml.v3" + "github.com/crowdsecurity/crowdsec/pkg/csplugin" "github.com/crowdsecurity/crowdsec/pkg/protobufs" ) @@ -55,6 +56,7 @@ type PluginConfig struct { } type EmailPlugin struct { + protobufs.UnimplementedNotifierServer ConfigByName map[string]PluginConfig } @@ -170,7 +172,7 @@ func main() { plugin.Serve(&plugin.ServeConfig{ HandshakeConfig: handshake, Plugins: map[string]plugin.Plugin{ - "email": &protobufs.NotifierPlugin{ + "email": &csplugin.NotifierPlugin{ Impl: &EmailPlugin{ConfigByName: make(map[string]PluginConfig)}, }, }, diff --git a/cmd/notification-file/main.go b/cmd/notification-file/main.go index f6649b1f395..a4dbb8ee5db 100644 --- a/cmd/notification-file/main.go +++ b/cmd/notification-file/main.go @@ -15,6 +15,7 @@ import ( plugin "github.com/hashicorp/go-plugin" "gopkg.in/yaml.v3" + "github.com/crowdsecurity/crowdsec/pkg/csplugin" "github.com/crowdsecurity/crowdsec/pkg/protobufs" ) @@ -52,6 +53,7 @@ type LogRotate struct { } type FilePlugin struct { + protobufs.UnimplementedNotifierServer PluginConfigByName map[string]PluginConfig } @@ -241,7 +243,7 @@ func main() { plugin.Serve(&plugin.ServeConfig{ HandshakeConfig: handshake, Plugins: map[string]plugin.Plugin{ - "file": &protobufs.NotifierPlugin{ + "file": &csplugin.NotifierPlugin{ Impl: sp, }, }, diff --git a/cmd/notification-http/main.go b/cmd/notification-http/main.go index 6b11a78ef86..3f84984315b 100644 --- a/cmd/notification-http/main.go +++ b/cmd/notification-http/main.go @@ -16,6 +16,7 @@ import ( plugin "github.com/hashicorp/go-plugin" "gopkg.in/yaml.v3" + "github.com/crowdsecurity/crowdsec/pkg/csplugin" "github.com/crowdsecurity/crowdsec/pkg/protobufs" ) @@ -34,6 +35,7 @@ type PluginConfig struct { } type HTTPPlugin struct { + protobufs.UnimplementedNotifierServer PluginConfigByName map[string]PluginConfig } @@ -190,7 +192,7 @@ func main() { plugin.Serve(&plugin.ServeConfig{ HandshakeConfig: handshake, Plugins: map[string]plugin.Plugin{ - "http": &protobufs.NotifierPlugin{ + "http": &csplugin.NotifierPlugin{ Impl: sp, }, }, diff --git a/cmd/notification-sentinel/main.go b/cmd/notification-sentinel/main.go index a29e941f80c..0293d45b0a4 100644 --- a/cmd/notification-sentinel/main.go +++ b/cmd/notification-sentinel/main.go @@ -15,6 +15,7 @@ import ( "github.com/hashicorp/go-plugin" "gopkg.in/yaml.v3" + "github.com/crowdsecurity/crowdsec/pkg/csplugin" "github.com/crowdsecurity/crowdsec/pkg/protobufs" ) @@ -27,6 +28,7 @@ type PluginConfig struct { } type SentinelPlugin struct { + protobufs.UnimplementedNotifierServer PluginConfigByName map[string]PluginConfig } @@ -122,7 +124,7 @@ func main() { plugin.Serve(&plugin.ServeConfig{ HandshakeConfig: handshake, Plugins: map[string]plugin.Plugin{ - "sentinel": &protobufs.NotifierPlugin{ + "sentinel": &csplugin.NotifierPlugin{ Impl: sp, }, }, diff --git a/cmd/notification-slack/main.go b/cmd/notification-slack/main.go index fba1b33e334..34c7c0df361 100644 --- a/cmd/notification-slack/main.go +++ b/cmd/notification-slack/main.go @@ -10,6 +10,7 @@ import ( "github.com/slack-go/slack" "gopkg.in/yaml.v3" + "github.com/crowdsecurity/crowdsec/pkg/csplugin" "github.com/crowdsecurity/crowdsec/pkg/protobufs" ) @@ -23,6 +24,7 @@ type PluginConfig struct { LogLevel *string `yaml:"log_level"` } type Notify struct { + protobufs.UnimplementedNotifierServer ConfigByName map[string]PluginConfig } @@ -84,7 +86,7 @@ func main() { plugin.Serve(&plugin.ServeConfig{ HandshakeConfig: handshake, Plugins: map[string]plugin.Plugin{ - "slack": &protobufs.NotifierPlugin{ + "slack": &csplugin.NotifierPlugin{ Impl: &Notify{ConfigByName: make(map[string]PluginConfig)}, }, }, diff --git a/cmd/notification-splunk/main.go b/cmd/notification-splunk/main.go index 26190c58a89..e18f416c14a 100644 --- a/cmd/notification-splunk/main.go +++ b/cmd/notification-splunk/main.go @@ -14,6 +14,7 @@ import ( plugin "github.com/hashicorp/go-plugin" "gopkg.in/yaml.v3" + "github.com/crowdsecurity/crowdsec/pkg/csplugin" "github.com/crowdsecurity/crowdsec/pkg/protobufs" ) @@ -32,6 +33,7 @@ type PluginConfig struct { } type Splunk struct { + protobufs.UnimplementedNotifierServer PluginConfigByName map[string]PluginConfig Client http.Client } @@ -117,7 +119,7 @@ func main() { plugin.Serve(&plugin.ServeConfig{ HandshakeConfig: handshake, Plugins: map[string]plugin.Plugin{ - "splunk": &protobufs.NotifierPlugin{ + "splunk": &csplugin.NotifierPlugin{ Impl: sp, }, }, diff --git a/go.mod b/go.mod index ec8566db84a..b02d3b76840 100644 --- a/go.mod +++ b/go.mod @@ -21,7 +21,7 @@ require ( github.com/bluele/gcache v0.0.2 github.com/buger/jsonparser v1.1.1 github.com/c-robinson/iplib v1.0.8 - github.com/cespare/xxhash/v2 v2.2.0 + github.com/cespare/xxhash/v2 v2.3.0 github.com/corazawaf/libinjection-go v0.1.2 github.com/crowdsecurity/coraza/v3 v3.0.0-20240108124027-a62b8d8e5607 github.com/crowdsecurity/dlog v0.0.0-20170105205344-4fb5f8204f26 @@ -82,12 +82,12 @@ require ( github.com/umahmood/haversine v0.0.0-20151105152445-808ab04add26 github.com/wasilibs/go-re2 v1.7.0 github.com/xhit/go-simple-mail/v2 v2.16.0 - golang.org/x/crypto v0.22.0 - golang.org/x/mod v0.15.0 + golang.org/x/crypto v0.26.0 + golang.org/x/mod v0.17.0 golang.org/x/sys v0.24.0 - golang.org/x/text v0.14.0 - google.golang.org/grpc v1.56.3 - google.golang.org/protobuf v1.33.0 + golang.org/x/text v0.17.0 + google.golang.org/grpc v1.67.1 + google.golang.org/protobuf v1.34.2 gopkg.in/natefinch/lumberjack.v2 v2.2.1 gopkg.in/tomb.v2 v2.0.0-20161208151619-d5d1b5820637 gopkg.in/yaml.v2 v2.4.0 @@ -128,7 +128,7 @@ require ( github.com/go-stack/stack v1.8.0 // indirect github.com/goccy/go-json v0.10.2 // indirect github.com/gogo/protobuf v1.3.2 // indirect - github.com/golang/glog v1.1.0 // indirect + github.com/golang/glog v1.2.2 // indirect github.com/golang/protobuf v1.5.3 // indirect github.com/google/go-cmp v0.6.0 // indirect github.com/google/gofuzz v1.2.0 // indirect @@ -201,14 +201,14 @@ require ( go.mongodb.org/mongo-driver v1.9.4 // indirect go.uber.org/atomic v1.10.0 // indirect golang.org/x/arch v0.7.0 // indirect - golang.org/x/net v0.24.0 // indirect - golang.org/x/sync v0.6.0 // indirect - golang.org/x/term v0.19.0 // indirect + golang.org/x/net v0.28.0 // indirect + golang.org/x/sync v0.8.0 // indirect + golang.org/x/term v0.23.0 // indirect golang.org/x/time v0.3.0 // indirect - golang.org/x/tools v0.18.0 // indirect + golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d // indirect golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 // indirect google.golang.org/appengine v1.6.7 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234030-28d5490b6b19 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20240814211410-ddb44dafa142 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 // indirect gotest.tools/v3 v3.5.0 // indirect diff --git a/go.sum b/go.sum index ff73dc56332..7aaea1587b8 100644 --- a/go.sum +++ b/go.sum @@ -74,8 +74,8 @@ github.com/bytedance/sonic v1.10.2 h1:GQebETVBxYB7JGWJtLBi07OVzWwt+8dWA00gEVW2ZF github.com/bytedance/sonic v1.10.2/go.mod h1:iZcSUejdk5aukTND/Eu/ivjQuEL0Cu9/rf50Hi0u/g4= github.com/c-robinson/iplib v1.0.8 h1:exDRViDyL9UBLcfmlxxkY5odWX5092nPsQIykHXhIn4= github.com/c-robinson/iplib v1.0.8/go.mod h1:i3LuuFL1hRT5gFpBRnEydzw8R6yhGkF4szNDIbF8pgo= -github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= -github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= +github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/chenzhuoyu/base64x v0.0.0-20211019084208-fb5309c8db06/go.mod h1:DH46F32mSOjUmXrMHnKwZdA8wcEefY7UVqBKYGjpdQY= github.com/chenzhuoyu/base64x v0.0.0-20221115062448-fe3a3abad311/go.mod h1:b583jCggY9gE99b6G5LEC39OIiVsWj+R97kbl5odCEk= github.com/chenzhuoyu/base64x v0.0.0-20230717121745-296ad89f973d h1:77cEq6EriyTZ0g/qfRdp61a3Uu/AWrgIq2s0ClJV1g0= @@ -294,8 +294,8 @@ github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/golang-jwt/jwt/v4 v4.5.0 h1:7cYmW1XlMY7h7ii7UhUyChSgS5wUJEnm9uZVTGqOWzg= github.com/golang-jwt/jwt/v4 v4.5.0/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= -github.com/golang/glog v1.1.0 h1:/d3pCKDPWNnvIWe0vVUpNP32qc8U3PDVxySP/y360qE= -github.com/golang/glog v1.1.0/go.mod h1:pfYeQZ3JWZoXTV5sFc986z3HTpwQs9At6P4ImfuP3NQ= +github.com/golang/glog v1.2.2 h1:1+mZ9upx1Dh6FmUTFR1naJ77miKiXgALjWOZ3NVFPmY= +github.com/golang/glog v1.2.2/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwmO+w= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= @@ -763,8 +763,8 @@ golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97/go.mod h1:GvvjBRRGRdwPK5y golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.3.0/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4= golang.org/x/crypto v0.14.0/go.mod h1:MVFd36DqK4CsrnJYDkBA3VC4m2GkXAM0PvzMCn4JQf4= -golang.org/x/crypto v0.22.0 h1:g1v0xeRhjcugydODzvb3mEM9SQ0HGp9s/nh3COQ/C30= -golang.org/x/crypto v0.22.0/go.mod h1:vr6Su+7cTlO45qkww3VDJlzDn0ctJvRgYbC2NvXHt+M= +golang.org/x/crypto v0.26.0 h1:RrRspgV4mU+YwB4FYnuBoKsUapNIL5cohGAmSH3azsw= +golang.org/x/crypto v0.26.0/go.mod h1:GY7jblb9wI+FOo5y8/S2oY4zWP07AkOJ4+jxCqdqn54= golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= @@ -772,8 +772,8 @@ golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= -golang.org/x/mod v0.15.0 h1:SernR4v+D55NyBH2QiEQrlBAnj1ECL6AGrA5+dPaMY8= -golang.org/x/mod v0.15.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/mod v0.17.0 h1:zY54UmvipHiNd+pm+m0x9KhZ9hl1/7QNMyxXbc6ICqA= +golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/net v0.0.0-20181005035420-146acd28ed58/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= @@ -797,8 +797,8 @@ golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY= golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE= -golang.org/x/net v0.24.0 h1:1PcaxkF854Fu3+lvBIx5SYn9wRlBzzcnHZSiaFFAb0w= -golang.org/x/net v0.24.0/go.mod h1:2Q7sJY5mzlzWjKtYUEXSlBWCdyaioyXzRB2RtU8KVE8= +golang.org/x/net v0.28.0 h1:a9JDOJc5GMUJ0+UDqmLT86WiEy7iWyIhz8gz8E4e5hE= +golang.org/x/net v0.28.0/go.mod h1:yqtgsTWOOnlGLG9GFRrK3++bGOUEkNBoHZc8MEDWPNg= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -808,8 +808,8 @@ golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.6.0 h1:5BMeUDZ7vkXGfEr1x9B4bRcTH4lpkTkpdh0T/J+qjbQ= -golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.8.0 h1:3NFvSEYkUoMifnESzZl15y791HH1qU2xm6eCJU5ZPXQ= +golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -854,8 +854,8 @@ golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= golang.org/x/term v0.13.0/go.mod h1:LTmsnFJwVN6bCy1rVCoS+qHT1HhALEFxKncY3WNNh4U= -golang.org/x/term v0.19.0 h1:+ThwsDv+tYfnJFhF4L8jITxu1tdTWRTZpdsWgEgjL6Q= -golang.org/x/term v0.19.0/go.mod h1:2CuTdWZ7KHSQwUzKva0cbMg6q2DMI3Mmxp+gKJbskEk= +golang.org/x/term v0.23.0 h1:F6D4vR+EHoL9/sWAWgAR1H2DcHr4PareCbAaCo1RpuU= +golang.org/x/term v0.23.0/go.mod h1:DgV24QBUrK6jhZXl+20l6UWznPlwAHm1Q1mGHtydmSk= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= @@ -868,8 +868,8 @@ golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= -golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= -golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= +golang.org/x/text v0.17.0 h1:XtiM5bkSOt+ewxlOE/aE/AKEHibwj/6gvWMl9Rsh0Qc= +golang.org/x/text v0.17.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4= golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -893,8 +893,8 @@ golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roY golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= -golang.org/x/tools v0.18.0 h1:k8NLag8AGHnn+PHbl7g43CtqZAwG60vZkLqgyZgIHgQ= -golang.org/x/tools v0.18.0/go.mod h1:GL7B4CwcLLeo59yx/9UWWuNOW1n3VZ4f5axWfML7Lcg= +golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d h1:vU5i/LfpvrRCpgM/VPfJLg5KjxD3E+hfT1SH+d9zLwg= +golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk= golang.org/x/xerrors v0.0.0-20190410155217-1f06c39b4373/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20190513163551-3ee3066db522/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -906,14 +906,14 @@ google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCID google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c= google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234030-28d5490b6b19 h1:0nDDozoAU19Qb2HwhXadU8OcsiO/09cnTqhUtq2MEOM= -google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234030-28d5490b6b19/go.mod h1:66JfowdXAEgad5O9NnYcsNPLCPZJD++2L9X0PCMODrA= -google.golang.org/grpc v1.56.3 h1:8I4C0Yq1EjstUzUJzpcRVbuYA2mODtEmpWiQoN/b2nc= -google.golang.org/grpc v1.56.3/go.mod h1:I9bI3vqKfayGqPUAwGdOSu7kt6oIJLixfffKrpXqQ9s= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240814211410-ddb44dafa142 h1:e7S5W7MGGLaSu8j3YjdezkZ+m1/Nm0uRVRMEMGk26Xs= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240814211410-ddb44dafa142/go.mod h1:UqMtugtsSgubUsoxbuAoiCXvqvErP7Gf0so0mK9tHxU= +google.golang.org/grpc v1.67.1 h1:zWnc1Vrcno+lHZCOofnIMvycFcc0QRGIzm9dhnDX68E= +google.golang.org/grpc v1.67.1/go.mod h1:1gLDyUQU7CTLJI90u3nXZ9ekeghjeM7pTDZlqFNg2AA= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI= -google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= +google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg= +google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= diff --git a/pkg/csplugin/broker.go b/pkg/csplugin/broker.go index f6629b2609e..31d7ac82fb2 100644 --- a/pkg/csplugin/broker.go +++ b/pkg/csplugin/broker.go @@ -45,7 +45,7 @@ type PluginBroker struct { pluginConfigByName map[string]PluginConfig pluginMap map[string]plugin.Plugin notificationConfigsByPluginType map[string][][]byte // "slack" -> []{config1, config2} - notificationPluginByName map[string]Notifier + notificationPluginByName map[string]protobufs.NotifierServer watcher PluginWatcher pluginKillMethods []func() pluginProcConfig *csconfig.PluginCfg @@ -75,7 +75,7 @@ type ProfileAlert struct { func (pb *PluginBroker) Init(pluginCfg *csconfig.PluginCfg, profileConfigs []*csconfig.ProfileCfg, configPaths *csconfig.ConfigurationPaths) error { pb.PluginChannel = make(chan ProfileAlert) pb.notificationConfigsByPluginType = make(map[string][][]byte) - pb.notificationPluginByName = make(map[string]Notifier) + pb.notificationPluginByName = make(map[string]protobufs.NotifierServer) pb.pluginMap = make(map[string]plugin.Plugin) pb.pluginConfigByName = make(map[string]PluginConfig) pb.alertsByPluginName = make(map[string][]*models.Alert) @@ -276,7 +276,7 @@ func (pb *PluginBroker) loadPlugins(path string) error { return pb.verifyPluginBinaryWithProfile() } -func (pb *PluginBroker) loadNotificationPlugin(name string, binaryPath string) (Notifier, error) { +func (pb *PluginBroker) loadNotificationPlugin(name string, binaryPath string) (protobufs.NotifierServer, error) { handshake, err := getHandshake() if err != nil { @@ -313,7 +313,7 @@ func (pb *PluginBroker) loadNotificationPlugin(name string, binaryPath string) ( return nil, err } pb.pluginKillMethods = append(pb.pluginKillMethods, c.Kill) - return raw.(Notifier), nil + return raw.(protobufs.NotifierServer), nil } func (pb *PluginBroker) pushNotificationsToPlugin(pluginName string, alerts []*models.Alert) error { diff --git a/pkg/csplugin/notifier.go b/pkg/csplugin/notifier.go index ed4a4cc4149..615322ac0c3 100644 --- a/pkg/csplugin/notifier.go +++ b/pkg/csplugin/notifier.go @@ -10,17 +10,15 @@ import ( "github.com/crowdsecurity/crowdsec/pkg/protobufs" ) -type Notifier interface { - Notify(ctx context.Context, notification *protobufs.Notification) (*protobufs.Empty, error) - Configure(ctx context.Context, cfg *protobufs.Config) (*protobufs.Empty, error) -} - type NotifierPlugin struct { plugin.Plugin - Impl Notifier + Impl protobufs.NotifierServer } -type GRPCClient struct{ client protobufs.NotifierClient } +type GRPCClient struct{ + protobufs.UnimplementedNotifierServer + client protobufs.NotifierClient +} func (m *GRPCClient) Notify(ctx context.Context, notification *protobufs.Notification) (*protobufs.Empty, error) { done := make(chan error) @@ -45,7 +43,7 @@ func (m *GRPCClient) Configure(ctx context.Context, config *protobufs.Config) (* } type GRPCServer struct { - Impl Notifier + Impl protobufs.NotifierServer } func (p *NotifierPlugin) GRPCServer(broker *plugin.GRPCBroker, s *grpc.Server) error { diff --git a/pkg/protobufs/generate.go b/pkg/protobufs/generate.go new file mode 100644 index 00000000000..0e90d65b643 --- /dev/null +++ b/pkg/protobufs/generate.go @@ -0,0 +1,14 @@ +package protobufs + +// Dependencies: +// +// apt install protobuf-compiler +// +// keep this in sync with go.mod +// go install google.golang.org/protobuf/cmd/protoc-gen-go@v1.34.2 +// +// Not the same versions as google.golang.org/grpc +// go list -m -versions google.golang.org/grpc/cmd/protoc-gen-go-grpc +// go install google.golang.org/grpc/cmd/protoc-gen-go-grpc@v1.5.1 + +//go:generate protoc --go_out=. --go_opt=paths=source_relative --go-grpc_out=. --go-grpc_opt=paths=source_relative notifier.proto diff --git a/pkg/protobufs/notifier.pb.go b/pkg/protobufs/notifier.pb.go index b5dc8113568..8c4754da773 100644 --- a/pkg/protobufs/notifier.pb.go +++ b/pkg/protobufs/notifier.pb.go @@ -1,16 +1,12 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.27.1 -// protoc v3.12.4 +// protoc-gen-go v1.34.2 +// protoc v3.21.12 // source: notifier.proto package protobufs import ( - context "context" - grpc "google.golang.org/grpc" - codes "google.golang.org/grpc/codes" - status "google.golang.org/grpc/status" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" reflect "reflect" @@ -198,7 +194,7 @@ func file_notifier_proto_rawDescGZIP() []byte { } var file_notifier_proto_msgTypes = make([]protoimpl.MessageInfo, 3) -var file_notifier_proto_goTypes = []interface{}{ +var file_notifier_proto_goTypes = []any{ (*Notification)(nil), // 0: proto.Notification (*Config)(nil), // 1: proto.Config (*Empty)(nil), // 2: proto.Empty @@ -221,7 +217,7 @@ func file_notifier_proto_init() { return } if !protoimpl.UnsafeEnabled { - file_notifier_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + file_notifier_proto_msgTypes[0].Exporter = func(v any, i int) any { switch v := v.(*Notification); i { case 0: return &v.state @@ -233,7 +229,7 @@ func file_notifier_proto_init() { return nil } } - file_notifier_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + file_notifier_proto_msgTypes[1].Exporter = func(v any, i int) any { switch v := v.(*Config); i { case 0: return &v.state @@ -245,7 +241,7 @@ func file_notifier_proto_init() { return nil } } - file_notifier_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + file_notifier_proto_msgTypes[2].Exporter = func(v any, i int) any { switch v := v.(*Empty); i { case 0: return &v.state @@ -277,119 +273,3 @@ func file_notifier_proto_init() { file_notifier_proto_goTypes = nil file_notifier_proto_depIdxs = nil } - -// Reference imports to suppress errors if they are not otherwise used. -var _ context.Context -var _ grpc.ClientConnInterface - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the grpc package it is being compiled against. -const _ = grpc.SupportPackageIsVersion6 - -// NotifierClient is the client API for Notifier service. -// -// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. -type NotifierClient interface { - Notify(ctx context.Context, in *Notification, opts ...grpc.CallOption) (*Empty, error) - Configure(ctx context.Context, in *Config, opts ...grpc.CallOption) (*Empty, error) -} - -type notifierClient struct { - cc grpc.ClientConnInterface -} - -func NewNotifierClient(cc grpc.ClientConnInterface) NotifierClient { - return ¬ifierClient{cc} -} - -func (c *notifierClient) Notify(ctx context.Context, in *Notification, opts ...grpc.CallOption) (*Empty, error) { - out := new(Empty) - err := c.cc.Invoke(ctx, "/proto.Notifier/Notify", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *notifierClient) Configure(ctx context.Context, in *Config, opts ...grpc.CallOption) (*Empty, error) { - out := new(Empty) - err := c.cc.Invoke(ctx, "/proto.Notifier/Configure", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -// NotifierServer is the server API for Notifier service. -type NotifierServer interface { - Notify(context.Context, *Notification) (*Empty, error) - Configure(context.Context, *Config) (*Empty, error) -} - -// UnimplementedNotifierServer can be embedded to have forward compatible implementations. -type UnimplementedNotifierServer struct { -} - -func (*UnimplementedNotifierServer) Notify(context.Context, *Notification) (*Empty, error) { - return nil, status.Errorf(codes.Unimplemented, "method Notify not implemented") -} -func (*UnimplementedNotifierServer) Configure(context.Context, *Config) (*Empty, error) { - return nil, status.Errorf(codes.Unimplemented, "method Configure not implemented") -} - -func RegisterNotifierServer(s *grpc.Server, srv NotifierServer) { - s.RegisterService(&_Notifier_serviceDesc, srv) -} - -func _Notifier_Notify_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(Notification) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(NotifierServer).Notify(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/proto.Notifier/Notify", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(NotifierServer).Notify(ctx, req.(*Notification)) - } - return interceptor(ctx, in, info, handler) -} - -func _Notifier_Configure_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(Config) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(NotifierServer).Configure(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/proto.Notifier/Configure", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(NotifierServer).Configure(ctx, req.(*Config)) - } - return interceptor(ctx, in, info, handler) -} - -var _Notifier_serviceDesc = grpc.ServiceDesc{ - ServiceName: "proto.Notifier", - HandlerType: (*NotifierServer)(nil), - Methods: []grpc.MethodDesc{ - { - MethodName: "Notify", - Handler: _Notifier_Notify_Handler, - }, - { - MethodName: "Configure", - Handler: _Notifier_Configure_Handler, - }, - }, - Streams: []grpc.StreamDesc{}, - Metadata: "notifier.proto", -} diff --git a/pkg/protobufs/notifier_grpc.pb.go b/pkg/protobufs/notifier_grpc.pb.go new file mode 100644 index 00000000000..5141e83f98b --- /dev/null +++ b/pkg/protobufs/notifier_grpc.pb.go @@ -0,0 +1,159 @@ +// Code generated by protoc-gen-go-grpc. DO NOT EDIT. +// versions: +// - protoc-gen-go-grpc v1.5.1 +// - protoc v3.21.12 +// source: notifier.proto + +package protobufs + +import ( + context "context" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" +) + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +// Requires gRPC-Go v1.64.0 or later. +const _ = grpc.SupportPackageIsVersion9 + +const ( + Notifier_Notify_FullMethodName = "/proto.Notifier/Notify" + Notifier_Configure_FullMethodName = "/proto.Notifier/Configure" +) + +// NotifierClient is the client API for Notifier service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. +type NotifierClient interface { + Notify(ctx context.Context, in *Notification, opts ...grpc.CallOption) (*Empty, error) + Configure(ctx context.Context, in *Config, opts ...grpc.CallOption) (*Empty, error) +} + +type notifierClient struct { + cc grpc.ClientConnInterface +} + +func NewNotifierClient(cc grpc.ClientConnInterface) NotifierClient { + return ¬ifierClient{cc} +} + +func (c *notifierClient) Notify(ctx context.Context, in *Notification, opts ...grpc.CallOption) (*Empty, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(Empty) + err := c.cc.Invoke(ctx, Notifier_Notify_FullMethodName, in, out, cOpts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *notifierClient) Configure(ctx context.Context, in *Config, opts ...grpc.CallOption) (*Empty, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(Empty) + err := c.cc.Invoke(ctx, Notifier_Configure_FullMethodName, in, out, cOpts...) + if err != nil { + return nil, err + } + return out, nil +} + +// NotifierServer is the server API for Notifier service. +// All implementations must embed UnimplementedNotifierServer +// for forward compatibility. +type NotifierServer interface { + Notify(context.Context, *Notification) (*Empty, error) + Configure(context.Context, *Config) (*Empty, error) + mustEmbedUnimplementedNotifierServer() +} + +// UnimplementedNotifierServer must be embedded to have +// forward compatible implementations. +// +// NOTE: this should be embedded by value instead of pointer to avoid a nil +// pointer dereference when methods are called. +type UnimplementedNotifierServer struct{} + +func (UnimplementedNotifierServer) Notify(context.Context, *Notification) (*Empty, error) { + return nil, status.Errorf(codes.Unimplemented, "method Notify not implemented") +} +func (UnimplementedNotifierServer) Configure(context.Context, *Config) (*Empty, error) { + return nil, status.Errorf(codes.Unimplemented, "method Configure not implemented") +} +func (UnimplementedNotifierServer) mustEmbedUnimplementedNotifierServer() {} +func (UnimplementedNotifierServer) testEmbeddedByValue() {} + +// UnsafeNotifierServer may be embedded to opt out of forward compatibility for this service. +// Use of this interface is not recommended, as added methods to NotifierServer will +// result in compilation errors. +type UnsafeNotifierServer interface { + mustEmbedUnimplementedNotifierServer() +} + +func RegisterNotifierServer(s grpc.ServiceRegistrar, srv NotifierServer) { + // If the following call pancis, it indicates UnimplementedNotifierServer was + // embedded by pointer and is nil. This will cause panics if an + // unimplemented method is ever invoked, so we test this at initialization + // time to prevent it from happening at runtime later due to I/O. + if t, ok := srv.(interface{ testEmbeddedByValue() }); ok { + t.testEmbeddedByValue() + } + s.RegisterService(&Notifier_ServiceDesc, srv) +} + +func _Notifier_Notify_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(Notification) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(NotifierServer).Notify(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: Notifier_Notify_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(NotifierServer).Notify(ctx, req.(*Notification)) + } + return interceptor(ctx, in, info, handler) +} + +func _Notifier_Configure_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(Config) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(NotifierServer).Configure(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: Notifier_Configure_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(NotifierServer).Configure(ctx, req.(*Config)) + } + return interceptor(ctx, in, info, handler) +} + +// Notifier_ServiceDesc is the grpc.ServiceDesc for Notifier service. +// It's only intended for direct use with grpc.RegisterService, +// and not to be introspected or modified (even as a copy) +var Notifier_ServiceDesc = grpc.ServiceDesc{ + ServiceName: "proto.Notifier", + HandlerType: (*NotifierServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Notify", + Handler: _Notifier_Notify_Handler, + }, + { + MethodName: "Configure", + Handler: _Notifier_Configure_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "notifier.proto", +} diff --git a/pkg/protobufs/plugin_interface.go b/pkg/protobufs/plugin_interface.go deleted file mode 100644 index baa76c8941c..00000000000 --- a/pkg/protobufs/plugin_interface.go +++ /dev/null @@ -1,47 +0,0 @@ -package protobufs - -import ( - "context" - - plugin "github.com/hashicorp/go-plugin" - "google.golang.org/grpc" -) - -type Notifier interface { - Notify(ctx context.Context, notification *Notification) (*Empty, error) - Configure(ctx context.Context, config *Config) (*Empty, error) -} - -// This is the implementation of plugin.NotifierPlugin so we can serve/consume this. -type NotifierPlugin struct { - // GRPCPlugin must still implement the Plugin interface - plugin.Plugin - // Concrete implementation, written in Go. This is only used for plugins - // that are written in Go. - Impl Notifier -} - -type GRPCClient struct{ client NotifierClient } - -func (m *GRPCClient) Notify(ctx context.Context, notification *Notification) (*Empty, error) { - _, err := m.client.Notify(ctx, notification) - return &Empty{}, err -} - -func (m *GRPCClient) Configure(ctx context.Context, config *Config) (*Empty, error) { - _, err := m.client.Configure(ctx, config) - return &Empty{}, err -} - -type GRPCServer struct { - Impl Notifier -} - -func (p *NotifierPlugin) GRPCServer(broker *plugin.GRPCBroker, s *grpc.Server) error { - RegisterNotifierServer(s, p.Impl) - return nil -} - -func (p *NotifierPlugin) GRPCClient(ctx context.Context, broker *plugin.GRPCBroker, c *grpc.ClientConn) (interface{}, error) { - return &GRPCClient{client: NewNotifierClient(c)}, nil -} From 8ff58ee74eb9a471c9d1eb4d5de5f20ca68cbbf0 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Thu, 10 Oct 2024 17:18:59 +0200 Subject: [PATCH 323/581] context propagation: pkg/csplugin (#3273) --- .../clinotifications/notifications.go | 10 ++++---- cmd/crowdsec/api.go | 2 +- pkg/apiserver/alerts_test.go | 4 ++-- pkg/apiserver/apiserver_test.go | 8 ++----- pkg/apiserver/jwt_test.go | 4 ++-- pkg/apiserver/machines_test.go | 2 +- pkg/csplugin/broker.go | 8 +++---- pkg/csplugin/broker_suite_test.go | 12 ++++++++-- pkg/csplugin/broker_test.go | 24 +++++++++++++------ pkg/csplugin/broker_win_test.go | 7 ++++-- pkg/csplugin/watcher_test.go | 18 ++++++++++++-- 11 files changed, 66 insertions(+), 33 deletions(-) diff --git a/cmd/crowdsec-cli/clinotifications/notifications.go b/cmd/crowdsec-cli/clinotifications/notifications.go index 0641dd1a7d4..5489faa37c8 100644 --- a/cmd/crowdsec-cli/clinotifications/notifications.go +++ b/cmd/crowdsec-cli/clinotifications/notifications.go @@ -275,7 +275,8 @@ func (cli cliNotifications) newTestCmd() *cobra.Command { Args: cobra.ExactArgs(1), DisableAutoGenTag: true, ValidArgsFunction: cli.notificationConfigFilter, - PreRunE: func(_ *cobra.Command, args []string) error { + PreRunE: func(cmd *cobra.Command, args []string) error { + ctx := cmd.Context() cfg := cli.cfg() pconfigs, err := cli.getPluginConfigs() if err != nil { @@ -286,7 +287,7 @@ func (cli cliNotifications) newTestCmd() *cobra.Command { return fmt.Errorf("plugin name: '%s' does not exist", args[0]) } // Create a single profile with plugin name as notification name - return pluginBroker.Init(cfg.PluginConfig, []*csconfig.ProfileCfg{ + return pluginBroker.Init(ctx, cfg.PluginConfig, []*csconfig.ProfileCfg{ { Notifications: []string{ pcfg.Name, @@ -377,12 +378,13 @@ cscli notifications reinject -a '{"remediation": true,"scenario":"not return nil }, - RunE: func(_ *cobra.Command, _ []string) error { + RunE: func(cmd *cobra.Command, _ []string) error { var ( pluginBroker csplugin.PluginBroker pluginTomb tomb.Tomb ) + ctx := cmd.Context() cfg := cli.cfg() if alertOverride != "" { @@ -391,7 +393,7 @@ cscli notifications reinject -a '{"remediation": true,"scenario":"not } } - err := pluginBroker.Init(cfg.PluginConfig, cfg.API.Server.Profiles, cfg.ConfigPaths) + err := pluginBroker.Init(ctx, cfg.PluginConfig, cfg.API.Server.Profiles, cfg.ConfigPaths) if err != nil { return fmt.Errorf("can't initialize plugins: %w", err) } diff --git a/cmd/crowdsec/api.go b/cmd/crowdsec/api.go index 6ab41def16f..ccb0acf0209 100644 --- a/cmd/crowdsec/api.go +++ b/cmd/crowdsec/api.go @@ -40,7 +40,7 @@ func initAPIServer(ctx context.Context, cConfig *csconfig.Config) (*apiserver.AP return nil, errors.New("plugins are enabled, but config_paths.plugin_dir is not defined") } - err = pluginBroker.Init(cConfig.PluginConfig, cConfig.API.Server.Profiles, cConfig.ConfigPaths) + err = pluginBroker.Init(ctx, cConfig.PluginConfig, cConfig.API.Server.Profiles, cConfig.ConfigPaths) if err != nil { return nil, fmt.Errorf("unable to run plugin broker: %w", err) } diff --git a/pkg/apiserver/alerts_test.go b/pkg/apiserver/alerts_test.go index cd981f76542..4cc215c344f 100644 --- a/pkg/apiserver/alerts_test.go +++ b/pkg/apiserver/alerts_test.go @@ -72,8 +72,8 @@ func InitMachineTest(t *testing.T, ctx context.Context) (*gin.Engine, models.Wat } func LoginToTestAPI(t *testing.T, ctx context.Context, router *gin.Engine, config csconfig.Config) models.WatcherAuthResponse { - body := CreateTestMachine(t, router, "") - ValidateMachine(t, "test", config.API.Server.DbConfig) + body := CreateTestMachine(t, ctx, router, "") + ValidateMachine(t, ctx, "test", config.API.Server.DbConfig) w := httptest.NewRecorder() req, _ := http.NewRequestWithContext(ctx, http.MethodPost, "/v1/watchers/login", strings.NewReader(body)) diff --git a/pkg/apiserver/apiserver_test.go b/pkg/apiserver/apiserver_test.go index b04ad687e4e..cdf99462c35 100644 --- a/pkg/apiserver/apiserver_test.go +++ b/pkg/apiserver/apiserver_test.go @@ -180,9 +180,7 @@ func NewAPITestForwardedFor(t *testing.T, ctx context.Context) (*gin.Engine, csc return router, config } -func ValidateMachine(t *testing.T, machineID string, config *csconfig.DatabaseCfg) { - ctx := context.TODO() - +func ValidateMachine(t *testing.T, ctx context.Context, machineID string, config *csconfig.DatabaseCfg) { dbClient, err := database.NewClient(ctx, config) require.NoError(t, err) @@ -269,7 +267,7 @@ func readDecisionsStreamResp(t *testing.T, resp *httptest.ResponseRecorder) (map return response, resp.Code } -func CreateTestMachine(t *testing.T, router *gin.Engine, token string) string { +func CreateTestMachine(t *testing.T, ctx context.Context, router *gin.Engine, token string) string { regReq := MachineTest regReq.RegistrationToken = token b, err := json.Marshal(regReq) @@ -277,8 +275,6 @@ func CreateTestMachine(t *testing.T, router *gin.Engine, token string) string { body := string(b) - ctx := context.Background() - w := httptest.NewRecorder() req, _ := http.NewRequestWithContext(ctx, http.MethodPost, "/v1/watchers", strings.NewReader(body)) req.Header.Set("User-Agent", UserAgent) diff --git a/pkg/apiserver/jwt_test.go b/pkg/apiserver/jwt_test.go index 710cf82ad00..f6f51763975 100644 --- a/pkg/apiserver/jwt_test.go +++ b/pkg/apiserver/jwt_test.go @@ -14,7 +14,7 @@ func TestLogin(t *testing.T) { ctx := context.Background() router, config := NewAPITest(t, ctx) - body := CreateTestMachine(t, router, "") + body := CreateTestMachine(t, ctx, router, "") // Login with machine not validated yet w := httptest.NewRecorder() @@ -53,7 +53,7 @@ func TestLogin(t *testing.T) { assert.Equal(t, `{"code":401,"message":"validation failure list:\npassword in body is required"}`, w.Body.String()) // Validate machine - ValidateMachine(t, "test", config.API.Server.DbConfig) + ValidateMachine(t, ctx, "test", config.API.Server.DbConfig) // Login with invalid password w = httptest.NewRecorder() diff --git a/pkg/apiserver/machines_test.go b/pkg/apiserver/machines_test.go index e60cec30e54..969f75707d6 100644 --- a/pkg/apiserver/machines_test.go +++ b/pkg/apiserver/machines_test.go @@ -131,7 +131,7 @@ func TestCreateMachineAlreadyExist(t *testing.T) { ctx := context.Background() router, _ := NewAPITest(t, ctx) - body := CreateTestMachine(t, router, "") + body := CreateTestMachine(t, ctx, router, "") w := httptest.NewRecorder() req, _ := http.NewRequestWithContext(ctx, http.MethodPost, "/v1/watchers", strings.NewReader(body)) diff --git a/pkg/csplugin/broker.go b/pkg/csplugin/broker.go index 31d7ac82fb2..e996fa9b68c 100644 --- a/pkg/csplugin/broker.go +++ b/pkg/csplugin/broker.go @@ -72,7 +72,7 @@ type ProfileAlert struct { Alert *models.Alert } -func (pb *PluginBroker) Init(pluginCfg *csconfig.PluginCfg, profileConfigs []*csconfig.ProfileCfg, configPaths *csconfig.ConfigurationPaths) error { +func (pb *PluginBroker) Init(ctx context.Context, pluginCfg *csconfig.PluginCfg, profileConfigs []*csconfig.ProfileCfg, configPaths *csconfig.ConfigurationPaths) error { pb.PluginChannel = make(chan ProfileAlert) pb.notificationConfigsByPluginType = make(map[string][][]byte) pb.notificationPluginByName = make(map[string]protobufs.NotifierServer) @@ -85,7 +85,7 @@ func (pb *PluginBroker) Init(pluginCfg *csconfig.PluginCfg, profileConfigs []*cs if err := pb.loadConfig(configPaths.NotificationDir); err != nil { return fmt.Errorf("while loading plugin config: %w", err) } - if err := pb.loadPlugins(configPaths.PluginDir); err != nil { + if err := pb.loadPlugins(ctx, configPaths.PluginDir); err != nil { return fmt.Errorf("while loading plugin: %w", err) } pb.watcher = PluginWatcher{} @@ -230,7 +230,7 @@ func (pb *PluginBroker) verifyPluginBinaryWithProfile() error { return nil } -func (pb *PluginBroker) loadPlugins(path string) error { +func (pb *PluginBroker) loadPlugins(ctx context.Context, path string) error { binaryPaths, err := listFilesAtPath(path) if err != nil { return err @@ -265,7 +265,7 @@ func (pb *PluginBroker) loadPlugins(path string) error { return err } data = []byte(csstring.StrictExpand(string(data), os.LookupEnv)) - _, err = pluginClient.Configure(context.Background(), &protobufs.Config{Config: data}) + _, err = pluginClient.Configure(ctx, &protobufs.Config{Config: data}) if err != nil { return fmt.Errorf("while configuring %s: %w", pc.Name, err) } diff --git a/pkg/csplugin/broker_suite_test.go b/pkg/csplugin/broker_suite_test.go index 778bb2dfe2e..1210c67058a 100644 --- a/pkg/csplugin/broker_suite_test.go +++ b/pkg/csplugin/broker_suite_test.go @@ -1,6 +1,7 @@ package csplugin import ( + "context" "io" "os" "os/exec" @@ -96,6 +97,7 @@ func (s *PluginSuite) TearDownTest() { func (s *PluginSuite) SetupSubTest() { var err error + t := s.T() s.runDir, err = os.MkdirTemp("", "cs_plugin_test") @@ -127,6 +129,7 @@ func (s *PluginSuite) SetupSubTest() { func (s *PluginSuite) TearDownSubTest() { t := s.T() + if s.pluginBroker != nil { s.pluginBroker.Kill() s.pluginBroker = nil @@ -140,19 +143,24 @@ func (s *PluginSuite) TearDownSubTest() { os.Remove("./out") } -func (s *PluginSuite) InitBroker(procCfg *csconfig.PluginCfg) (*PluginBroker, error) { +func (s *PluginSuite) InitBroker(ctx context.Context, procCfg *csconfig.PluginCfg) (*PluginBroker, error) { pb := PluginBroker{} + if procCfg == nil { procCfg = &csconfig.PluginCfg{} } + profiles := csconfig.NewDefaultConfig().API.Server.Profiles profiles = append(profiles, &csconfig.ProfileCfg{ Notifications: []string{"dummy_default"}, }) - err := pb.Init(procCfg, profiles, &csconfig.ConfigurationPaths{ + + err := pb.Init(ctx, procCfg, profiles, &csconfig.ConfigurationPaths{ PluginDir: s.pluginDir, NotificationDir: s.notifDir, }) + s.pluginBroker = &pb + return s.pluginBroker, err } diff --git a/pkg/csplugin/broker_test.go b/pkg/csplugin/broker_test.go index 48f5a71f773..ae5a615b489 100644 --- a/pkg/csplugin/broker_test.go +++ b/pkg/csplugin/broker_test.go @@ -4,6 +4,7 @@ package csplugin import ( "bytes" + "context" "encoding/json" "io" "os" @@ -53,6 +54,7 @@ func (s *PluginSuite) writeconfig(config PluginConfig) { } func (s *PluginSuite) TestBrokerInit() { + ctx := context.Background() tests := []struct { name string action func(*testing.T) @@ -135,20 +137,22 @@ func (s *PluginSuite) TestBrokerInit() { tc.action(t) } - _, err := s.InitBroker(&tc.procCfg) + _, err := s.InitBroker(ctx, &tc.procCfg) cstest.RequireErrorContains(t, err, tc.expectedErr) }) } } func (s *PluginSuite) TestBrokerNoThreshold() { + ctx := context.Background() + var alerts []models.Alert DefaultEmptyTicker = 50 * time.Millisecond t := s.T() - pb, err := s.InitBroker(nil) + pb, err := s.InitBroker(ctx, nil) require.NoError(t, err) tomb := tomb.Tomb{} @@ -187,6 +191,8 @@ func (s *PluginSuite) TestBrokerNoThreshold() { } func (s *PluginSuite) TestBrokerRunGroupAndTimeThreshold_TimeFirst() { + ctx := context.Background() + // test grouping by "time" DefaultEmptyTicker = 50 * time.Millisecond @@ -198,7 +204,7 @@ func (s *PluginSuite) TestBrokerRunGroupAndTimeThreshold_TimeFirst() { cfg.GroupWait = 1 * time.Second s.writeconfig(cfg) - pb, err := s.InitBroker(nil) + pb, err := s.InitBroker(ctx, nil) require.NoError(t, err) tomb := tomb.Tomb{} @@ -224,6 +230,7 @@ func (s *PluginSuite) TestBrokerRunGroupAndTimeThreshold_TimeFirst() { } func (s *PluginSuite) TestBrokerRunGroupAndTimeThreshold_CountFirst() { + ctx := context.Background() DefaultEmptyTicker = 50 * time.Millisecond t := s.T() @@ -234,7 +241,7 @@ func (s *PluginSuite) TestBrokerRunGroupAndTimeThreshold_CountFirst() { cfg.GroupWait = 4 * time.Second s.writeconfig(cfg) - pb, err := s.InitBroker(nil) + pb, err := s.InitBroker(ctx, nil) require.NoError(t, err) tomb := tomb.Tomb{} @@ -264,6 +271,7 @@ func (s *PluginSuite) TestBrokerRunGroupAndTimeThreshold_CountFirst() { } func (s *PluginSuite) TestBrokerRunGroupThreshold() { + ctx := context.Background() // test grouping by "size" DefaultEmptyTicker = 50 * time.Millisecond @@ -274,7 +282,7 @@ func (s *PluginSuite) TestBrokerRunGroupThreshold() { cfg.GroupThreshold = 4 s.writeconfig(cfg) - pb, err := s.InitBroker(nil) + pb, err := s.InitBroker(ctx, nil) require.NoError(t, err) tomb := tomb.Tomb{} @@ -318,6 +326,7 @@ func (s *PluginSuite) TestBrokerRunGroupThreshold() { } func (s *PluginSuite) TestBrokerRunTimeThreshold() { + ctx := context.Background() DefaultEmptyTicker = 50 * time.Millisecond t := s.T() @@ -327,7 +336,7 @@ func (s *PluginSuite) TestBrokerRunTimeThreshold() { cfg.GroupWait = 1 * time.Second s.writeconfig(cfg) - pb, err := s.InitBroker(nil) + pb, err := s.InitBroker(ctx, nil) require.NoError(t, err) tomb := tomb.Tomb{} @@ -353,11 +362,12 @@ func (s *PluginSuite) TestBrokerRunTimeThreshold() { } func (s *PluginSuite) TestBrokerRunSimple() { + ctx := context.Background() DefaultEmptyTicker = 50 * time.Millisecond t := s.T() - pb, err := s.InitBroker(nil) + pb, err := s.InitBroker(ctx, nil) require.NoError(t, err) tomb := tomb.Tomb{} diff --git a/pkg/csplugin/broker_win_test.go b/pkg/csplugin/broker_win_test.go index b7956bdcc0a..570f23e5015 100644 --- a/pkg/csplugin/broker_win_test.go +++ b/pkg/csplugin/broker_win_test.go @@ -4,6 +4,7 @@ package csplugin import ( "bytes" + "context" "encoding/json" "io" "os" @@ -26,6 +27,7 @@ not if it will actually reject plugins with invalid permissions */ func (s *PluginSuite) TestBrokerInit() { + ctx := context.Background() tests := []struct { name string action func(*testing.T) @@ -59,16 +61,17 @@ func (s *PluginSuite) TestBrokerInit() { if tc.action != nil { tc.action(t) } - _, err := s.InitBroker(&tc.procCfg) + _, err := s.InitBroker(ctx, &tc.procCfg) cstest.RequireErrorContains(t, err, tc.expectedErr) }) } } func (s *PluginSuite) TestBrokerRun() { + ctx := context.Background() t := s.T() - pb, err := s.InitBroker(nil) + pb, err := s.InitBroker(ctx, nil) require.NoError(t, err) tomb := tomb.Tomb{} diff --git a/pkg/csplugin/watcher_test.go b/pkg/csplugin/watcher_test.go index b76c3c4eadd..84e63ec6493 100644 --- a/pkg/csplugin/watcher_test.go +++ b/pkg/csplugin/watcher_test.go @@ -15,11 +15,10 @@ import ( "github.com/crowdsecurity/crowdsec/pkg/models" ) -var ctx = context.Background() - func resetTestTomb(testTomb *tomb.Tomb, pw *PluginWatcher) { testTomb.Kill(nil) <-pw.PluginEvents + if err := testTomb.Wait(); err != nil { log.Fatal(err) } @@ -46,13 +45,17 @@ func listenChannelWithTimeout(ctx context.Context, channel chan string) error { case <-ctx.Done(): return ctx.Err() } + return nil } func TestPluginWatcherInterval(t *testing.T) { + ctx := context.Background() + if runtime.GOOS == "windows" { t.Skip("Skipping test on windows because timing is not reliable") } + pw := PluginWatcher{} alertsByPluginName := make(map[string][]*models.Alert) testTomb := tomb.Tomb{} @@ -66,6 +69,7 @@ func TestPluginWatcherInterval(t *testing.T) { ct, cancel := context.WithTimeout(ctx, time.Microsecond) defer cancel() + err := listenChannelWithTimeout(ct, pw.PluginEvents) cstest.RequireErrorContains(t, err, "context deadline exceeded") resetTestTomb(&testTomb, &pw) @@ -74,6 +78,7 @@ func TestPluginWatcherInterval(t *testing.T) { ct, cancel = context.WithTimeout(ctx, time.Millisecond*5) defer cancel() + err = listenChannelWithTimeout(ct, pw.PluginEvents) require.NoError(t, err) resetTestTomb(&testTomb, &pw) @@ -81,9 +86,12 @@ func TestPluginWatcherInterval(t *testing.T) { } func TestPluginAlertCountWatcher(t *testing.T) { + ctx := context.Background() + if runtime.GOOS == "windows" { t.Skip("Skipping test on windows because timing is not reliable") } + pw := PluginWatcher{} alertsByPluginName := make(map[string][]*models.Alert) configs := map[string]PluginConfig{ @@ -92,28 +100,34 @@ func TestPluginAlertCountWatcher(t *testing.T) { }, } testTomb := tomb.Tomb{} + pw.Init(configs, alertsByPluginName) pw.Start(&testTomb) // Channel won't contain any events since threshold is not crossed. ct, cancel := context.WithTimeout(ctx, time.Second) defer cancel() + err := listenChannelWithTimeout(ct, pw.PluginEvents) cstest.RequireErrorContains(t, err, "context deadline exceeded") // Channel won't contain any events since threshold is not crossed. resetWatcherAlertCounter(&pw) insertNAlertsToPlugin(&pw, 4, "testPlugin") + ct, cancel = context.WithTimeout(ctx, time.Second) defer cancel() + err = listenChannelWithTimeout(ct, pw.PluginEvents) cstest.RequireErrorContains(t, err, "context deadline exceeded") // Channel will contain an event since threshold is crossed. resetWatcherAlertCounter(&pw) insertNAlertsToPlugin(&pw, 5, "testPlugin") + ct, cancel = context.WithTimeout(ctx, time.Second) defer cancel() + err = listenChannelWithTimeout(ct, pw.PluginEvents) require.NoError(t, err) resetTestTomb(&testTomb, &pw) From 9976616773313bb56d052996be3f0d5fcee99d4a Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Fri, 11 Oct 2024 15:59:10 +0200 Subject: [PATCH 324/581] context propagation: StreamingAcquisition() (#3274) * context propagation: StreamingAcquisition() * lint * ship with codecov.yml --- .github/generate-codecov-yml.sh | 3 + .github/workflows/go-tests.yml | 4 -- cmd/crowdsec/crowdsec.go | 2 +- pkg/acquisition/acquisition.go | 56 ++++++++++++++----- pkg/acquisition/acquisition_test.go | 52 +++++++++-------- pkg/acquisition/modules/appsec/appsec.go | 12 ++-- .../modules/cloudwatch/cloudwatch.go | 46 ++++++++++----- .../modules/cloudwatch/cloudwatch_test.go | 13 +++-- pkg/acquisition/modules/docker/docker.go | 8 +-- pkg/acquisition/modules/docker/docker_test.go | 5 +- pkg/acquisition/modules/file/file.go | 3 +- pkg/acquisition/modules/file/file_test.go | 4 +- .../modules/journalctl/journalctl.go | 8 +-- .../modules/journalctl/journalctl_test.go | 4 +- pkg/acquisition/modules/kafka/kafka.go | 6 +- pkg/acquisition/modules/kafka/kafka_test.go | 14 +++-- pkg/acquisition/modules/kinesis/kinesis.go | 34 +++++------ .../modules/kinesis/kinesis_test.go | 26 +++++---- .../modules/kubernetesaudit/k8s_audit.go | 3 +- .../modules/kubernetesaudit/k8s_audit_test.go | 9 ++- pkg/acquisition/modules/loki/loki.go | 6 +- pkg/acquisition/modules/loki/loki_test.go | 9 ++- pkg/acquisition/modules/s3/s3.go | 21 ++++--- pkg/acquisition/modules/s3/s3_test.go | 6 +- pkg/acquisition/modules/syslog/syslog.go | 9 +-- pkg/acquisition/modules/syslog/syslog_test.go | 16 ++++-- .../modules/wineventlog/wineventlog.go | 3 +- .../modules/wineventlog/wineventlog_test.go | 4 +- .../wineventlog/wineventlog_windows.go | 3 +- 29 files changed, 235 insertions(+), 154 deletions(-) diff --git a/.github/generate-codecov-yml.sh b/.github/generate-codecov-yml.sh index cc2d652e339..ddb60d0ce80 100755 --- a/.github/generate-codecov-yml.sh +++ b/.github/generate-codecov-yml.sh @@ -7,6 +7,9 @@ cat <> .github/codecov.yml - - name: Upload unit coverage to Codecov uses: codecov/codecov-action@v4 with: diff --git a/cmd/crowdsec/crowdsec.go b/cmd/crowdsec/crowdsec.go index 460e8ab4328..c44d71d2093 100644 --- a/cmd/crowdsec/crowdsec.go +++ b/cmd/crowdsec/crowdsec.go @@ -169,7 +169,7 @@ func runCrowdsec(cConfig *csconfig.Config, parsers *parser.Parsers, hub *cwhub.H log.Info("Starting processing data") - if err := acquisition.StartAcquisition(dataSources, inputLineChan, &acquisTomb); err != nil { + if err := acquisition.StartAcquisition(context.TODO(), dataSources, inputLineChan, &acquisTomb); err != nil { return fmt.Errorf("starting acquisition error: %w", err) } diff --git a/pkg/acquisition/acquisition.go b/pkg/acquisition/acquisition.go index 4a5226a2981..4519ea7392b 100644 --- a/pkg/acquisition/acquisition.go +++ b/pkg/acquisition/acquisition.go @@ -1,6 +1,7 @@ package acquisition import ( + "context" "errors" "fmt" "io" @@ -39,17 +40,17 @@ func (e *DataSourceUnavailableError) Unwrap() error { // The interface each datasource must implement type DataSource interface { - GetMetrics() []prometheus.Collector // Returns pointers to metrics that are managed by the module - GetAggregMetrics() []prometheus.Collector // Returns pointers to metrics that are managed by the module (aggregated mode, limits cardinality) - UnmarshalConfig([]byte) error // Decode and pre-validate the YAML datasource - anything that can be checked before runtime - Configure([]byte, *log.Entry, int) error // Complete the YAML datasource configuration and perform runtime checks. - ConfigureByDSN(string, map[string]string, *log.Entry, string) error // Configure the datasource - GetMode() string // Get the mode (TAIL, CAT or SERVER) - GetName() string // Get the name of the module - OneShotAcquisition(chan types.Event, *tomb.Tomb) error // Start one shot acquisition(eg, cat a file) - StreamingAcquisition(chan types.Event, *tomb.Tomb) error // Start live acquisition (eg, tail a file) - CanRun() error // Whether the datasource can run or not (eg, journalctl on BSD is a non-sense) - GetUuid() string // Get the unique identifier of the datasource + GetMetrics() []prometheus.Collector // Returns pointers to metrics that are managed by the module + GetAggregMetrics() []prometheus.Collector // Returns pointers to metrics that are managed by the module (aggregated mode, limits cardinality) + UnmarshalConfig([]byte) error // Decode and pre-validate the YAML datasource - anything that can be checked before runtime + Configure([]byte, *log.Entry, int) error // Complete the YAML datasource configuration and perform runtime checks. + ConfigureByDSN(string, map[string]string, *log.Entry, string) error // Configure the datasource + GetMode() string // Get the mode (TAIL, CAT or SERVER) + GetName() string // Get the name of the module + OneShotAcquisition(chan types.Event, *tomb.Tomb) error // Start one shot acquisition(eg, cat a file) + StreamingAcquisition(context.Context, chan types.Event, *tomb.Tomb) error // Start live acquisition (eg, tail a file) + CanRun() error // Whether the datasource can run or not (eg, journalctl on BSD is a non-sense) + GetUuid() string // Get the unique identifier of the datasource Dump() interface{} } @@ -242,8 +243,10 @@ func LoadAcquisitionFromFile(config *csconfig.CrowdsecServiceCfg, prom *csconfig for { var sub configuration.DataSourceCommonCfg - err = dec.Decode(&sub) + idx += 1 + + err = dec.Decode(&sub) if err != nil { if !errors.Is(err, io.EOF) { return nil, fmt.Errorf("failed to yaml decode %s: %w", acquisFile, err) @@ -283,6 +286,7 @@ func LoadAcquisitionFromFile(config *csconfig.CrowdsecServiceCfg, prom *csconfig uniqueId := uuid.NewString() sub.UniqueId = uniqueId + src, err := DataSourceConfigure(sub, metrics_level) if err != nil { var dserr *DataSourceUnavailableError @@ -290,29 +294,36 @@ func LoadAcquisitionFromFile(config *csconfig.CrowdsecServiceCfg, prom *csconfig log.Error(err) continue } + return nil, fmt.Errorf("while configuring datasource of type %s from %s (position: %d): %w", sub.Source, acquisFile, idx, err) } + if sub.TransformExpr != "" { vm, err := expr.Compile(sub.TransformExpr, exprhelpers.GetExprOptions(map[string]interface{}{"evt": &types.Event{}})...) if err != nil { return nil, fmt.Errorf("while compiling transform expression '%s' for datasource %s in %s (position: %d): %w", sub.TransformExpr, sub.Source, acquisFile, idx, err) } + transformRuntimes[uniqueId] = vm } + sources = append(sources, *src) } } + return sources, nil } func GetMetrics(sources []DataSource, aggregated bool) error { var metrics []prometheus.Collector + for i := range sources { if aggregated { metrics = sources[i].GetMetrics() } else { metrics = sources[i].GetAggregMetrics() } + for _, metric := range metrics { if err := prometheus.Register(metric); err != nil { if _, ok := err.(prometheus.AlreadyRegisteredError); !ok { @@ -322,12 +333,14 @@ func GetMetrics(sources []DataSource, aggregated bool) error { } } } + return nil } func transform(transformChan chan types.Event, output chan types.Event, AcquisTomb *tomb.Tomb, transformRuntime *vm.Program, logger *log.Entry) { defer trace.CatchPanic("crowdsec/acquis") logger.Infof("transformer started") + for { select { case <-AcquisTomb.Dying(): @@ -335,15 +348,18 @@ func transform(transformChan chan types.Event, output chan types.Event, AcquisTo return case evt := <-transformChan: logger.Tracef("Received event %s", evt.Line.Raw) + out, err := expr.Run(transformRuntime, map[string]interface{}{"evt": &evt}) if err != nil { logger.Errorf("while running transform expression: %s, sending event as-is", err) output <- evt } + if out == nil { logger.Errorf("transform expression returned nil, sending event as-is") output <- evt } + switch v := out.(type) { case string: logger.Tracef("transform expression returned %s", v) @@ -351,18 +367,22 @@ func transform(transformChan chan types.Event, output chan types.Event, AcquisTo output <- evt case []interface{}: logger.Tracef("transform expression returned %v", v) //nolint:asasalint // We actually want to log the slice content + for _, line := range v { l, ok := line.(string) if !ok { logger.Errorf("transform expression returned []interface{}, but cannot assert an element to string") output <- evt + continue } + evt.Line.Raw = l output <- evt } case []string: logger.Tracef("transform expression returned %v", v) + for _, line := range v { evt.Line.Raw = line output <- evt @@ -375,7 +395,7 @@ func transform(transformChan chan types.Event, output chan types.Event, AcquisTo } } -func StartAcquisition(sources []DataSource, output chan types.Event, AcquisTomb *tomb.Tomb) error { +func StartAcquisition(ctx context.Context, sources []DataSource, output chan types.Event, AcquisTomb *tomb.Tomb) error { // Don't wait if we have no sources, as it will hang forever if len(sources) == 0 { return nil @@ -387,32 +407,40 @@ func StartAcquisition(sources []DataSource, output chan types.Event, AcquisTomb AcquisTomb.Go(func() error { defer trace.CatchPanic("crowdsec/acquis") + var err error outChan := output + log.Debugf("datasource %s UUID: %s", subsrc.GetName(), subsrc.GetUuid()) + if transformRuntime, ok := transformRuntimes[subsrc.GetUuid()]; ok { log.Infof("transform expression found for datasource %s", subsrc.GetName()) + transformChan := make(chan types.Event) outChan = transformChan transformLogger := log.WithFields(log.Fields{ "component": "transform", "datasource": subsrc.GetName(), }) + AcquisTomb.Go(func() error { transform(outChan, output, AcquisTomb, transformRuntime, transformLogger) return nil }) } + if subsrc.GetMode() == configuration.TAIL_MODE { - err = subsrc.StreamingAcquisition(outChan, AcquisTomb) + err = subsrc.StreamingAcquisition(ctx, outChan, AcquisTomb) } else { err = subsrc.OneShotAcquisition(outChan, AcquisTomb) } + if err != nil { // if one of the acqusition returns an error, we kill the others to properly shutdown AcquisTomb.Kill(err) } + return nil }) } diff --git a/pkg/acquisition/acquisition_test.go b/pkg/acquisition/acquisition_test.go index e39199f9cdb..e82b3df54c2 100644 --- a/pkg/acquisition/acquisition_test.go +++ b/pkg/acquisition/acquisition_test.go @@ -1,6 +1,7 @@ package acquisition import ( + "context" "errors" "fmt" "strings" @@ -56,14 +57,16 @@ func (f *MockSource) Configure(cfg []byte, logger *log.Entry, metricsLevel int) return nil } -func (f *MockSource) GetMode() string { return f.Mode } -func (f *MockSource) OneShotAcquisition(chan types.Event, *tomb.Tomb) error { return nil } -func (f *MockSource) StreamingAcquisition(chan types.Event, *tomb.Tomb) error { return nil } -func (f *MockSource) CanRun() error { return nil } -func (f *MockSource) GetMetrics() []prometheus.Collector { return nil } -func (f *MockSource) GetAggregMetrics() []prometheus.Collector { return nil } -func (f *MockSource) Dump() interface{} { return f } -func (f *MockSource) GetName() string { return "mock" } +func (f *MockSource) GetMode() string { return f.Mode } +func (f *MockSource) OneShotAcquisition(chan types.Event, *tomb.Tomb) error { return nil } +func (f *MockSource) StreamingAcquisition(context.Context, chan types.Event, *tomb.Tomb) error { + return nil +} +func (f *MockSource) CanRun() error { return nil } +func (f *MockSource) GetMetrics() []prometheus.Collector { return nil } +func (f *MockSource) GetAggregMetrics() []prometheus.Collector { return nil } +func (f *MockSource) Dump() interface{} { return f } +func (f *MockSource) GetName() string { return "mock" } func (f *MockSource) ConfigureByDSN(string, map[string]string, *log.Entry, string) error { return errors.New("not supported") } @@ -327,7 +330,7 @@ func (f *MockCat) OneShotAcquisition(out chan types.Event, tomb *tomb.Tomb) erro return nil } -func (f *MockCat) StreamingAcquisition(chan types.Event, *tomb.Tomb) error { +func (f *MockCat) StreamingAcquisition(context.Context, chan types.Event, *tomb.Tomb) error { return errors.New("can't run in tail") } func (f *MockCat) CanRun() error { return nil } @@ -366,7 +369,7 @@ func (f *MockTail) OneShotAcquisition(out chan types.Event, tomb *tomb.Tomb) err return errors.New("can't run in cat mode") } -func (f *MockTail) StreamingAcquisition(out chan types.Event, t *tomb.Tomb) error { +func (f *MockTail) StreamingAcquisition(ctx context.Context, out chan types.Event, t *tomb.Tomb) error { for range 10 { evt := types.Event{} evt.Line.Src = "test" @@ -389,6 +392,7 @@ func (f *MockTail) GetUuid() string { return "" } // func StartAcquisition(sources []DataSource, output chan types.Event, AcquisTomb *tomb.Tomb) error { func TestStartAcquisitionCat(t *testing.T) { + ctx := context.Background() sources := []DataSource{ &MockCat{}, } @@ -396,7 +400,7 @@ func TestStartAcquisitionCat(t *testing.T) { acquisTomb := tomb.Tomb{} go func() { - if err := StartAcquisition(sources, out, &acquisTomb); err != nil { + if err := StartAcquisition(ctx, sources, out, &acquisTomb); err != nil { t.Errorf("unexpected error") } }() @@ -416,6 +420,7 @@ READLOOP: } func TestStartAcquisitionTail(t *testing.T) { + ctx := context.Background() sources := []DataSource{ &MockTail{}, } @@ -423,7 +428,7 @@ func TestStartAcquisitionTail(t *testing.T) { acquisTomb := tomb.Tomb{} go func() { - if err := StartAcquisition(sources, out, &acquisTomb); err != nil { + if err := StartAcquisition(ctx, sources, out, &acquisTomb); err != nil { t.Errorf("unexpected error") } }() @@ -450,7 +455,7 @@ type MockTailError struct { MockTail } -func (f *MockTailError) StreamingAcquisition(out chan types.Event, t *tomb.Tomb) error { +func (f *MockTailError) StreamingAcquisition(ctx context.Context, out chan types.Event, t *tomb.Tomb) error { for range 10 { evt := types.Event{} evt.Line.Src = "test" @@ -463,6 +468,7 @@ func (f *MockTailError) StreamingAcquisition(out chan types.Event, t *tomb.Tomb) } func TestStartAcquisitionTailError(t *testing.T) { + ctx := context.Background() sources := []DataSource{ &MockTailError{}, } @@ -470,7 +476,7 @@ func TestStartAcquisitionTailError(t *testing.T) { acquisTomb := tomb.Tomb{} go func() { - if err := StartAcquisition(sources, out, &acquisTomb); err != nil && err.Error() != "got error (tomb)" { + if err := StartAcquisition(ctx, sources, out, &acquisTomb); err != nil && err.Error() != "got error (tomb)" { t.Errorf("expected error, got '%s'", err) } }() @@ -501,14 +507,16 @@ func (f *MockSourceByDSN) UnmarshalConfig(cfg []byte) error { return nil } func (f *MockSourceByDSN) Configure(cfg []byte, logger *log.Entry, metricsLevel int) error { return nil } -func (f *MockSourceByDSN) GetMode() string { return f.Mode } -func (f *MockSourceByDSN) OneShotAcquisition(chan types.Event, *tomb.Tomb) error { return nil } -func (f *MockSourceByDSN) StreamingAcquisition(chan types.Event, *tomb.Tomb) error { return nil } -func (f *MockSourceByDSN) CanRun() error { return nil } -func (f *MockSourceByDSN) GetMetrics() []prometheus.Collector { return nil } -func (f *MockSourceByDSN) GetAggregMetrics() []prometheus.Collector { return nil } -func (f *MockSourceByDSN) Dump() interface{} { return f } -func (f *MockSourceByDSN) GetName() string { return "mockdsn" } +func (f *MockSourceByDSN) GetMode() string { return f.Mode } +func (f *MockSourceByDSN) OneShotAcquisition(chan types.Event, *tomb.Tomb) error { return nil } +func (f *MockSourceByDSN) StreamingAcquisition(context.Context, chan types.Event, *tomb.Tomb) error { + return nil +} +func (f *MockSourceByDSN) CanRun() error { return nil } +func (f *MockSourceByDSN) GetMetrics() []prometheus.Collector { return nil } +func (f *MockSourceByDSN) GetAggregMetrics() []prometheus.Collector { return nil } +func (f *MockSourceByDSN) Dump() interface{} { return f } +func (f *MockSourceByDSN) GetName() string { return "mockdsn" } func (f *MockSourceByDSN) ConfigureByDSN(dsn string, labels map[string]string, logger *log.Entry, uuid string) error { dsn = strings.TrimPrefix(dsn, "mockdsn://") if dsn != "test_expect" { diff --git a/pkg/acquisition/modules/appsec/appsec.go b/pkg/acquisition/modules/appsec/appsec.go index 8a93326c7e3..5161b631c33 100644 --- a/pkg/acquisition/modules/appsec/appsec.go +++ b/pkg/acquisition/modules/appsec/appsec.go @@ -59,7 +59,7 @@ type AppsecSource struct { AppsecConfigs map[string]appsec.AppsecConfig lapiURL string AuthCache AuthCache - AppsecRunners []AppsecRunner //one for each go-routine + AppsecRunners []AppsecRunner // one for each go-routine } // Struct to handle cache of authentication @@ -172,7 +172,7 @@ func (w *AppsecSource) Configure(yamlConfig []byte, logger *log.Entry, MetricsLe w.InChan = make(chan appsec.ParsedRequest) appsecCfg := appsec.AppsecConfig{Logger: w.logger.WithField("component", "appsec_config")} - //let's load the associated appsec_config: + // let's load the associated appsec_config: if w.config.AppsecConfigPath != "" { err := appsecCfg.LoadByPath(w.config.AppsecConfigPath) if err != nil { @@ -201,7 +201,7 @@ func (w *AppsecSource) Configure(yamlConfig []byte, logger *log.Entry, MetricsLe for nbRoutine := range w.config.Routines { appsecRunnerUUID := uuid.New().String() - //we copy AppsecRutime for each runner + // we copy AppsecRutime for each runner wrt := *w.AppsecRuntime wrt.Logger = w.logger.Dup().WithField("runner_uuid", appsecRunnerUUID) runner := AppsecRunner{ @@ -220,7 +220,7 @@ func (w *AppsecSource) Configure(yamlConfig []byte, logger *log.Entry, MetricsLe w.logger.Infof("Created %d appsec runners", len(w.AppsecRunners)) - //We don´t use the wrapper provided by coraza because we want to fully control what happens when a rule match to send the information in crowdsec + // We don´t use the wrapper provided by coraza because we want to fully control what happens when a rule match to send the information in crowdsec w.mux.HandleFunc(w.config.Path, w.appsecHandler) return nil } @@ -241,7 +241,7 @@ func (w *AppsecSource) OneShotAcquisition(out chan types.Event, t *tomb.Tomb) er return errors.New("AppSec datasource does not support command line acquisition") } -func (w *AppsecSource) StreamingAcquisition(out chan types.Event, t *tomb.Tomb) error { +func (w *AppsecSource) StreamingAcquisition(ctx context.Context, out chan types.Event, t *tomb.Tomb) error { w.outChan = out t.Go(func() error { defer trace.CatchPanic("crowdsec/acquis/appsec/live") @@ -292,7 +292,7 @@ func (w *AppsecSource) StreamingAcquisition(out chan types.Event, t *tomb.Tomb) }) <-t.Dying() w.logger.Info("Shutting down Appsec server") - //xx let's clean up the appsec runners :) + // xx let's clean up the appsec runners :) appsec.AppsecRulesDetails = make(map[int]appsec.RulesDetails) w.server.Shutdown(context.TODO()) return nil diff --git a/pkg/acquisition/modules/cloudwatch/cloudwatch.go b/pkg/acquisition/modules/cloudwatch/cloudwatch.go index d6f33b68050..e4b6c95d77f 100644 --- a/pkg/acquisition/modules/cloudwatch/cloudwatch.go +++ b/pkg/acquisition/modules/cloudwatch/cloudwatch.go @@ -159,6 +159,7 @@ func (cw *CloudwatchSource) Configure(yamlConfig []byte, logger *log.Entry, Metr if err != nil { return err } + cw.metricsLevel = MetricsLevel cw.logger = logger.WithField("group", cw.Config.GroupName) @@ -175,16 +176,18 @@ func (cw *CloudwatchSource) Configure(yamlConfig []byte, logger *log.Entry, Metr if *cw.Config.MaxStreamAge > *cw.Config.StreamReadTimeout { cw.logger.Warningf("max_stream_age > stream_read_timeout, stream might keep being opened/closed") } + cw.logger.Tracef("aws_config_dir set to %s", *cw.Config.AwsConfigDir) if *cw.Config.AwsConfigDir != "" { _, err := os.Stat(*cw.Config.AwsConfigDir) if err != nil { cw.logger.Errorf("can't read aws_config_dir '%s' got err %s", *cw.Config.AwsConfigDir, err) - return fmt.Errorf("can't read aws_config_dir %s got err %s ", *cw.Config.AwsConfigDir, err) + return fmt.Errorf("can't read aws_config_dir %s got err %w ", *cw.Config.AwsConfigDir, err) } + os.Setenv("AWS_SDK_LOAD_CONFIG", "1") - //as aws sdk relies on $HOME, let's allow the user to override it :) + // as aws sdk relies on $HOME, let's allow the user to override it :) os.Setenv("AWS_CONFIG_FILE", fmt.Sprintf("%s/config", *cw.Config.AwsConfigDir)) os.Setenv("AWS_SHARED_CREDENTIALS_FILE", fmt.Sprintf("%s/credentials", *cw.Config.AwsConfigDir)) } else { @@ -192,25 +195,30 @@ func (cw *CloudwatchSource) Configure(yamlConfig []byte, logger *log.Entry, Metr cw.logger.Errorf("aws_region is not specified, specify it or aws_config_dir") return errors.New("aws_region is not specified, specify it or aws_config_dir") } + os.Setenv("AWS_REGION", *cw.Config.AwsRegion) } if err := cw.newClient(); err != nil { return err } + cw.streamIndexes = make(map[string]string) targetStream := "*" + if cw.Config.StreamRegexp != nil { if _, err := regexp.Compile(*cw.Config.StreamRegexp); err != nil { return fmt.Errorf("while compiling regexp '%s': %w", *cw.Config.StreamRegexp, err) } + targetStream = *cw.Config.StreamRegexp } else if cw.Config.StreamName != nil { targetStream = *cw.Config.StreamName } cw.logger.Infof("Adding cloudwatch group '%s' (stream:%s) to datasources", cw.Config.GroupName, targetStream) + return nil } @@ -231,24 +239,29 @@ func (cw *CloudwatchSource) newClient() error { if sess == nil { return errors.New("failed to create aws session") } + if v := os.Getenv("AWS_ENDPOINT_FORCE"); v != "" { cw.logger.Debugf("[testing] overloading endpoint with %s", v) cw.cwClient = cloudwatchlogs.New(sess, aws.NewConfig().WithEndpoint(v)) } else { cw.cwClient = cloudwatchlogs.New(sess) } + if cw.cwClient == nil { return errors.New("failed to create cloudwatch client") } + return nil } -func (cw *CloudwatchSource) StreamingAcquisition(out chan types.Event, t *tomb.Tomb) error { +func (cw *CloudwatchSource) StreamingAcquisition(ctx context.Context, out chan types.Event, t *tomb.Tomb) error { cw.t = t monitChan := make(chan LogStreamTailConfig) + t.Go(func() error { return cw.LogStreamManager(monitChan, out) }) + return cw.WatchLogGroupForStreams(monitChan) } @@ -279,6 +292,7 @@ func (cw *CloudwatchSource) Dump() interface{} { func (cw *CloudwatchSource) WatchLogGroupForStreams(out chan LogStreamTailConfig) error { cw.logger.Debugf("Starting to watch group (interval:%s)", cw.Config.PollNewStreamInterval) ticker := time.NewTicker(*cw.Config.PollNewStreamInterval) + var startFrom *string for { @@ -289,11 +303,12 @@ func (cw *CloudwatchSource) WatchLogGroupForStreams(out chan LogStreamTailConfig case <-ticker.C: hasMoreStreams := true startFrom = nil + for hasMoreStreams { cw.logger.Tracef("doing the call to DescribeLogStreamsPagesWithContext") ctx := context.Background() - //there can be a lot of streams in a group, and we're only interested in those recently written to, so we sort by LastEventTime + // there can be a lot of streams in a group, and we're only interested in those recently written to, so we sort by LastEventTime err := cw.cwClient.DescribeLogStreamsPagesWithContext( ctx, &cloudwatchlogs.DescribeLogStreamsInput{ @@ -305,13 +320,14 @@ func (cw *CloudwatchSource) WatchLogGroupForStreams(out chan LogStreamTailConfig }, func(page *cloudwatchlogs.DescribeLogStreamsOutput, lastPage bool) bool { cw.logger.Tracef("in helper of DescribeLogStreamsPagesWithContext") + for _, event := range page.LogStreams { startFrom = page.NextToken - //we check if the stream has been written to recently enough to be monitored + // we check if the stream has been written to recently enough to be monitored if event.LastIngestionTime != nil { - //aws uses millisecond since the epoch + // aws uses millisecond since the epoch oldest := time.Now().UTC().Add(-*cw.Config.MaxStreamAge) - //TBD : verify that this is correct : Unix 2nd arg expects Nanoseconds, and have a code that is more explicit. + // TBD : verify that this is correct : Unix 2nd arg expects Nanoseconds, and have a code that is more explicit. LastIngestionTime := time.Unix(0, *event.LastIngestionTime*int64(time.Millisecond)) if LastIngestionTime.Before(oldest) { cw.logger.Tracef("stop iteration, %s reached oldest age, stop (%s < %s)", *event.LogStreamName, LastIngestionTime, time.Now().UTC().Add(-*cw.Config.MaxStreamAge)) @@ -319,7 +335,7 @@ func (cw *CloudwatchSource) WatchLogGroupForStreams(out chan LogStreamTailConfig return false } cw.logger.Tracef("stream %s is elligible for monitoring", *event.LogStreamName) - //the stream has been updated recently, check if we should monitor it + // the stream has been updated recently, check if we should monitor it var expectMode int if !cw.Config.UseTimeMachine { expectMode = types.LIVE @@ -383,7 +399,7 @@ func (cw *CloudwatchSource) LogStreamManager(in chan LogStreamTailConfig, outCha for idx, stream := range cw.monitoredStreams { if newStream.GroupName == stream.GroupName && newStream.StreamName == stream.StreamName { - //stream exists, but is dead, remove it from list + // stream exists, but is dead, remove it from list if !stream.t.Alive() { cw.logger.Debugf("stream %s already exists, but is dead", newStream.StreamName) cw.monitoredStreams = append(cw.monitoredStreams[:idx], cw.monitoredStreams[idx+1:]...) @@ -397,7 +413,7 @@ func (cw *CloudwatchSource) LogStreamManager(in chan LogStreamTailConfig, outCha } } - //let's start watching this stream + // let's start watching this stream if shouldCreate { if cw.metricsLevel != configuration.METRICS_NONE { openedStreams.With(prometheus.Labels{"group": newStream.GroupName}).Inc() @@ -445,7 +461,7 @@ func (cw *CloudwatchSource) TailLogStream(cfg *LogStreamTailConfig, outChan chan var startFrom *string lastReadMessage := time.Now().UTC() ticker := time.NewTicker(cfg.PollStreamInterval) - //resume at existing index if we already had + // resume at existing index if we already had streamIndexMutex.Lock() v := cw.streamIndexes[cfg.GroupName+"+"+cfg.StreamName] streamIndexMutex.Unlock() @@ -566,7 +582,7 @@ func (cw *CloudwatchSource) ConfigureByDSN(dsn string, labels map[string]string, if len(v) != 1 { return errors.New("expected zero or one argument for 'start_date'") } - //let's reuse our parser helper so that a ton of date formats are supported + // let's reuse our parser helper so that a ton of date formats are supported strdate, startDate := parser.GenDateParse(v[0]) cw.logger.Debugf("parsed '%s' as '%s'", v[0], strdate) cw.Config.StartTime = &startDate @@ -574,7 +590,7 @@ func (cw *CloudwatchSource) ConfigureByDSN(dsn string, labels map[string]string, if len(v) != 1 { return errors.New("expected zero or one argument for 'end_date'") } - //let's reuse our parser helper so that a ton of date formats are supported + // let's reuse our parser helper so that a ton of date formats are supported strdate, endDate := parser.GenDateParse(v[0]) cw.logger.Debugf("parsed '%s' as '%s'", v[0], strdate) cw.Config.EndTime = &endDate @@ -582,7 +598,7 @@ func (cw *CloudwatchSource) ConfigureByDSN(dsn string, labels map[string]string, if len(v) != 1 { return errors.New("expected zero or one argument for 'backlog'") } - //let's reuse our parser helper so that a ton of date formats are supported + // let's reuse our parser helper so that a ton of date formats are supported duration, err := time.ParseDuration(v[0]) if err != nil { return fmt.Errorf("unable to parse '%s' as duration: %w", v[0], err) @@ -618,7 +634,7 @@ func (cw *CloudwatchSource) ConfigureByDSN(dsn string, labels map[string]string, } func (cw *CloudwatchSource) OneShotAcquisition(out chan types.Event, t *tomb.Tomb) error { - //StreamName string, Start time.Time, End time.Time + // StreamName string, Start time.Time, End time.Time config := LogStreamTailConfig{ GroupName: cw.Config.GroupName, StreamName: *cw.Config.StreamName, diff --git a/pkg/acquisition/modules/cloudwatch/cloudwatch_test.go b/pkg/acquisition/modules/cloudwatch/cloudwatch_test.go index bab7593f26f..d62c3f6e3dd 100644 --- a/pkg/acquisition/modules/cloudwatch/cloudwatch_test.go +++ b/pkg/acquisition/modules/cloudwatch/cloudwatch_test.go @@ -1,6 +1,7 @@ package cloudwatchacquisition import ( + "context" "errors" "fmt" "net" @@ -34,6 +35,7 @@ func deleteAllLogGroups(t *testing.T, cw *CloudwatchSource) { input := &cloudwatchlogs.DescribeLogGroupsInput{} result, err := cw.cwClient.DescribeLogGroups(input) require.NoError(t, err) + for _, group := range result.LogGroups { _, err := cw.cwClient.DeleteLogGroup(&cloudwatchlogs.DeleteLogGroupInput{ LogGroupName: group.LogGroupName, @@ -62,18 +64,22 @@ func TestMain(m *testing.M) { if runtime.GOOS == "windows" { os.Exit(0) } + if err := checkForLocalStackAvailability(); err != nil { log.Fatalf("local stack error : %s", err) } + def_PollNewStreamInterval = 1 * time.Second def_PollStreamInterval = 1 * time.Second def_StreamReadTimeout = 10 * time.Second def_MaxStreamAge = 5 * time.Second def_PollDeadStreamInterval = 5 * time.Second + os.Exit(m.Run()) } func TestWatchLogGroupForStreams(t *testing.T) { + ctx := context.Background() if runtime.GOOS == "windows" { t.Skip("Skipping test on windows") } @@ -447,7 +453,7 @@ stream_name: test_stream`), dbgLogger.Infof("running StreamingAcquisition") actmb := tomb.Tomb{} actmb.Go(func() error { - err := cw.StreamingAcquisition(out, &actmb) + err := cw.StreamingAcquisition(ctx, out, &actmb) dbgLogger.Infof("acquis done") cstest.RequireErrorContains(t, err, tc.expectedStartErr) return nil @@ -503,7 +509,6 @@ stream_name: test_stream`), if len(res) != 0 { t.Fatalf("leftover unmatched results : %v", res) } - } if tc.teardown != nil { tc.teardown(t, &cw) @@ -513,6 +518,7 @@ stream_name: test_stream`), } func TestConfiguration(t *testing.T) { + ctx := context.Background() if runtime.GOOS == "windows" { t.Skip("Skipping test on windows") } @@ -571,7 +577,7 @@ stream_name: test_stream`), switch cw.GetMode() { case "tail": - err = cw.StreamingAcquisition(out, &tmb) + err = cw.StreamingAcquisition(ctx, out, &tmb) case "cat": err = cw.OneShotAcquisition(out, &tmb) } @@ -798,7 +804,6 @@ func TestOneShotAcquisition(t *testing.T) { if len(res) != 0 { t.Fatalf("leftover unmatched results : %v", res) } - } if tc.teardown != nil { tc.teardown(t, &cw) diff --git a/pkg/acquisition/modules/docker/docker.go b/pkg/acquisition/modules/docker/docker.go index 44fee0a99a2..874b1556fd5 100644 --- a/pkg/acquisition/modules/docker/docker.go +++ b/pkg/acquisition/modules/docker/docker.go @@ -518,7 +518,7 @@ func (d *DockerSource) WatchContainer(monitChan chan *ContainerConfig, deleteCha } } -func (d *DockerSource) StreamingAcquisition(out chan types.Event, t *tomb.Tomb) error { +func (d *DockerSource) StreamingAcquisition(ctx context.Context, out chan types.Event, t *tomb.Tomb) error { d.t = t monitChan := make(chan *ContainerConfig) deleteChan := make(chan *ContainerConfig) @@ -589,11 +589,11 @@ func (d *DockerSource) TailDocker(container *ContainerConfig, outChan chan types outChan <- evt d.logger.Debugf("Sent line to parsing: %+v", evt.Line.Raw) case <-readerTomb.Dying(): - //This case is to handle temporarily losing the connection to the docker socket - //The only known case currently is when using docker-socket-proxy (and maybe a docker daemon restart) + // This case is to handle temporarily losing the connection to the docker socket + // The only known case currently is when using docker-socket-proxy (and maybe a docker daemon restart) d.logger.Debugf("readerTomb dying for container %s, removing it from runningContainerState", container.Name) deleteChan <- container - //Also reset the Since to avoid re-reading logs + // Also reset the Since to avoid re-reading logs d.Config.Since = time.Now().UTC().Format(time.RFC3339) d.containerLogsOptions.Since = d.Config.Since return nil diff --git a/pkg/acquisition/modules/docker/docker_test.go b/pkg/acquisition/modules/docker/docker_test.go index e332569fb3a..e394c9cbe79 100644 --- a/pkg/acquisition/modules/docker/docker_test.go +++ b/pkg/acquisition/modules/docker/docker_test.go @@ -120,6 +120,7 @@ type mockDockerCli struct { } func TestStreamingAcquisition(t *testing.T) { + ctx := context.Background() log.SetOutput(os.Stdout) log.SetLevel(log.InfoLevel) log.Info("Test 'TestStreamingAcquisition'") @@ -185,7 +186,7 @@ container_name_regexp: readerTomb := &tomb.Tomb{} streamTomb := tomb.Tomb{} streamTomb.Go(func() error { - return dockerSource.StreamingAcquisition(out, &dockerTomb) + return dockerSource.StreamingAcquisition(ctx, out, &dockerTomb) }) readerTomb.Go(func() error { time.Sleep(1 * time.Second) @@ -245,7 +246,7 @@ func (cli *mockDockerCli) ContainerLogs(ctx context.Context, container string, o for _, line := range data { startLineByte := make([]byte, 8) - binary.LittleEndian.PutUint32(startLineByte, 1) //stdout stream + binary.LittleEndian.PutUint32(startLineByte, 1) // stdout stream binary.BigEndian.PutUint32(startLineByte[4:], uint32(len(line))) ret += fmt.Sprintf("%s%s", startLineByte, line) } diff --git a/pkg/acquisition/modules/file/file.go b/pkg/acquisition/modules/file/file.go index 85b4c1b5b32..2d2df3ff4d4 100644 --- a/pkg/acquisition/modules/file/file.go +++ b/pkg/acquisition/modules/file/file.go @@ -3,6 +3,7 @@ package fileacquisition import ( "bufio" "compress/gzip" + "context" "errors" "fmt" "io" @@ -320,7 +321,7 @@ func (f *FileSource) CanRun() error { return nil } -func (f *FileSource) StreamingAcquisition(out chan types.Event, t *tomb.Tomb) error { +func (f *FileSource) StreamingAcquisition(ctx context.Context, out chan types.Event, t *tomb.Tomb) error { f.logger.Debug("Starting live acquisition") t.Go(func() error { return f.monitorNewFiles(out, t) diff --git a/pkg/acquisition/modules/file/file_test.go b/pkg/acquisition/modules/file/file_test.go index 5d38552b3c5..3db0042ba2f 100644 --- a/pkg/acquisition/modules/file/file_test.go +++ b/pkg/acquisition/modules/file/file_test.go @@ -1,6 +1,7 @@ package fileacquisition_test import ( + "context" "fmt" "os" "runtime" @@ -243,6 +244,7 @@ filename: test_files/test_delete.log`, } func TestLiveAcquisition(t *testing.T) { + ctx := context.Background() permDeniedFile := "/etc/shadow" permDeniedError := "unable to read /etc/shadow : open /etc/shadow: permission denied" testPattern := "test_files/*.log" @@ -394,7 +396,7 @@ force_inotify: true`, testPattern), }() } - err = f.StreamingAcquisition(out, &tomb) + err = f.StreamingAcquisition(ctx, out, &tomb) cstest.RequireErrorContains(t, err, tc.expectedErr) if tc.expectedLines != 0 { diff --git a/pkg/acquisition/modules/journalctl/journalctl.go b/pkg/acquisition/modules/journalctl/journalctl.go index 1336fac4578..b9cda54a472 100644 --- a/pkg/acquisition/modules/journalctl/journalctl.go +++ b/pkg/acquisition/modules/journalctl/journalctl.go @@ -113,7 +113,7 @@ func (j *JournalCtlSource) runJournalCtl(out chan types.Event, t *tomb.Tomb) err return readLine(stdoutscanner, stdoutChan, errChan) }) t.Go(func() error { - //looks like journalctl closes stderr quite early, so ignore its status (but not its output) + // looks like journalctl closes stderr quite early, so ignore its status (but not its output) return readLine(stderrScanner, stderrChan, nil) }) @@ -122,7 +122,7 @@ func (j *JournalCtlSource) runJournalCtl(out chan types.Event, t *tomb.Tomb) err case <-t.Dying(): logger.Infof("journalctl datasource %s stopping", j.src) cancel() - cmd.Wait() //avoid zombie process + cmd.Wait() // avoid zombie process return nil case stdoutLine := <-stdoutChan: l := types.Line{} @@ -217,7 +217,7 @@ func (j *JournalCtlSource) ConfigureByDSN(dsn string, labels map[string]string, j.config.Labels = labels j.config.UniqueId = uuid - //format for the DSN is : journalctl://filters=FILTER1&filters=FILTER2 + // format for the DSN is : journalctl://filters=FILTER1&filters=FILTER2 if !strings.HasPrefix(dsn, "journalctl://") { return fmt.Errorf("invalid DSN %s for journalctl source, must start with journalctl://", dsn) } @@ -269,7 +269,7 @@ func (j *JournalCtlSource) OneShotAcquisition(out chan types.Event, t *tomb.Tomb return err } -func (j *JournalCtlSource) StreamingAcquisition(out chan types.Event, t *tomb.Tomb) error { +func (j *JournalCtlSource) StreamingAcquisition(ctx context.Context, out chan types.Event, t *tomb.Tomb) error { t.Go(func() error { defer trace.CatchPanic("crowdsec/acquis/journalctl/streaming") return j.runJournalCtl(out, t) diff --git a/pkg/acquisition/modules/journalctl/journalctl_test.go b/pkg/acquisition/modules/journalctl/journalctl_test.go index 53e2d0802ad..c416bb5d23e 100644 --- a/pkg/acquisition/modules/journalctl/journalctl_test.go +++ b/pkg/acquisition/modules/journalctl/journalctl_test.go @@ -1,6 +1,7 @@ package journalctlacquisition import ( + "context" "os" "os/exec" "path/filepath" @@ -187,6 +188,7 @@ journalctl_filter: } func TestStreaming(t *testing.T) { + ctx := context.Background() if runtime.GOOS == "windows" { t.Skip("Skipping test on windows") } @@ -250,7 +252,7 @@ journalctl_filter: }() } - err = j.StreamingAcquisition(out, &tomb) + err = j.StreamingAcquisition(ctx, out, &tomb) cstest.AssertErrorContains(t, err, ts.expectedErr) if err != nil { diff --git a/pkg/acquisition/modules/kafka/kafka.go b/pkg/acquisition/modules/kafka/kafka.go index a0d7fc39bcc..9fd5fc2a035 100644 --- a/pkg/acquisition/modules/kafka/kafka.go +++ b/pkg/acquisition/modules/kafka/kafka.go @@ -23,9 +23,7 @@ import ( "github.com/crowdsecurity/crowdsec/pkg/types" ) -var ( - dataSourceName = "kafka" -) +var dataSourceName = "kafka" var linesRead = prometheus.NewCounterVec( prometheus.CounterOpts{ @@ -204,7 +202,7 @@ func (k *KafkaSource) RunReader(out chan types.Event, t *tomb.Tomb) error { } } -func (k *KafkaSource) StreamingAcquisition(out chan types.Event, t *tomb.Tomb) error { +func (k *KafkaSource) StreamingAcquisition(ctx context.Context, out chan types.Event, t *tomb.Tomb) error { k.logger.Infof("start reader on brokers '%+v' with topic '%s'", k.Config.Brokers, k.Config.Topic) t.Go(func() error { diff --git a/pkg/acquisition/modules/kafka/kafka_test.go b/pkg/acquisition/modules/kafka/kafka_test.go index 7b467142cc9..d796166a6ca 100644 --- a/pkg/acquisition/modules/kafka/kafka_test.go +++ b/pkg/acquisition/modules/kafka/kafka_test.go @@ -80,9 +80,9 @@ group_id: crowdsec`, } } -func writeToKafka(w *kafka.Writer, logs []string) { +func writeToKafka(ctx context.Context, w *kafka.Writer, logs []string) { for idx, log := range logs { - err := w.WriteMessages(context.Background(), kafka.Message{ + err := w.WriteMessages(ctx, kafka.Message{ Key: []byte(strconv.Itoa(idx)), // create an arbitrary message payload for the value Value: []byte(log), @@ -128,6 +128,7 @@ func createTopic(topic string, broker string) { } func TestStreamingAcquisition(t *testing.T) { + ctx := context.Background() if runtime.GOOS == "windows" { t.Skip("Skipping test on windows") } @@ -176,12 +177,12 @@ topic: crowdsecplaintext`), subLogger, configuration.METRICS_NONE) tomb := tomb.Tomb{} out := make(chan types.Event) - err = k.StreamingAcquisition(out, &tomb) + err = k.StreamingAcquisition(ctx, out, &tomb) cstest.AssertErrorContains(t, err, ts.expectedErr) actualLines := 0 - go writeToKafka(w, ts.logs) + go writeToKafka(ctx, w, ts.logs) READLOOP: for { select { @@ -199,6 +200,7 @@ topic: crowdsecplaintext`), subLogger, configuration.METRICS_NONE) } func TestStreamingAcquisitionWithSSL(t *testing.T) { + ctx := context.Background() if runtime.GOOS == "windows" { t.Skip("Skipping test on windows") } @@ -252,12 +254,12 @@ tls: tomb := tomb.Tomb{} out := make(chan types.Event) - err = k.StreamingAcquisition(out, &tomb) + err = k.StreamingAcquisition(ctx, out, &tomb) cstest.AssertErrorContains(t, err, ts.expectedErr) actualLines := 0 - go writeToKafka(w2, ts.logs) + go writeToKafka(ctx, w2, ts.logs) READLOOP: for { select { diff --git a/pkg/acquisition/modules/kinesis/kinesis.go b/pkg/acquisition/modules/kinesis/kinesis.go index 0e6c1980fa9..ca3a847dbfb 100644 --- a/pkg/acquisition/modules/kinesis/kinesis.go +++ b/pkg/acquisition/modules/kinesis/kinesis.go @@ -3,6 +3,7 @@ package kinesisacquisition import ( "bytes" "compress/gzip" + "context" "encoding/json" "errors" "fmt" @@ -29,7 +30,7 @@ type KinesisConfiguration struct { configuration.DataSourceCommonCfg `yaml:",inline"` StreamName string `yaml:"stream_name"` StreamARN string `yaml:"stream_arn"` - UseEnhancedFanOut bool `yaml:"use_enhanced_fanout"` //Use RegisterStreamConsumer and SubscribeToShard instead of GetRecords + UseEnhancedFanOut bool `yaml:"use_enhanced_fanout"` // Use RegisterStreamConsumer and SubscribeToShard instead of GetRecords AwsProfile *string `yaml:"aws_profile"` AwsRegion string `yaml:"aws_region"` AwsEndpoint string `yaml:"aws_endpoint"` @@ -114,8 +115,8 @@ func (k *KinesisSource) newClient() error { func (k *KinesisSource) GetMetrics() []prometheus.Collector { return []prometheus.Collector{linesRead, linesReadShards} - } + func (k *KinesisSource) GetAggregMetrics() []prometheus.Collector { return []prometheus.Collector{linesRead, linesReadShards} } @@ -188,7 +189,6 @@ func (k *KinesisSource) OneShotAcquisition(out chan types.Event, t *tomb.Tomb) e func (k *KinesisSource) decodeFromSubscription(record []byte) ([]CloudwatchSubscriptionLogEvent, error) { b := bytes.NewBuffer(record) r, err := gzip.NewReader(b) - if err != nil { k.logger.Error(err) return nil, err @@ -299,8 +299,8 @@ func (k *KinesisSource) ParseAndPushRecords(records []*kinesis.Record, out chan var data []CloudwatchSubscriptionLogEvent var err error if k.Config.FromSubscription { - //The AWS docs says that the data is base64 encoded - //but apparently GetRecords decodes it for us ? + // The AWS docs says that the data is base64 encoded + // but apparently GetRecords decodes it for us ? data, err = k.decodeFromSubscription(record.Data) if err != nil { logger.Errorf("Cannot decode data: %s", err) @@ -335,9 +335,9 @@ func (k *KinesisSource) ParseAndPushRecords(records []*kinesis.Record, out chan func (k *KinesisSource) ReadFromSubscription(reader kinesis.SubscribeToShardEventStreamReader, out chan types.Event, shardId string, streamName string) error { logger := k.logger.WithField("shard_id", shardId) - //ghetto sync, kinesis allows to subscribe to a closed shard, which will make the goroutine exit immediately - //and we won't be able to start a new one if this is the first one started by the tomb - //TODO: look into parent shards to see if a shard is closed before starting to read it ? + // ghetto sync, kinesis allows to subscribe to a closed shard, which will make the goroutine exit immediately + // and we won't be able to start a new one if this is the first one started by the tomb + // TODO: look into parent shards to see if a shard is closed before starting to read it ? time.Sleep(time.Second) for { select { @@ -420,7 +420,7 @@ func (k *KinesisSource) EnhancedRead(out chan types.Event, t *tomb.Tomb) error { case <-t.Dying(): k.logger.Infof("Kinesis source is dying") k.shardReaderTomb.Kill(nil) - _ = k.shardReaderTomb.Wait() //we don't care about the error as we kill the tomb ourselves + _ = k.shardReaderTomb.Wait() // we don't care about the error as we kill the tomb ourselves err = k.DeregisterConsumer() if err != nil { return fmt.Errorf("cannot deregister consumer: %w", err) @@ -431,7 +431,7 @@ func (k *KinesisSource) EnhancedRead(out chan types.Event, t *tomb.Tomb) error { if k.shardReaderTomb.Err() != nil { return k.shardReaderTomb.Err() } - //All goroutines have exited without error, so a resharding event, start again + // All goroutines have exited without error, so a resharding event, start again k.logger.Debugf("All reader goroutines have exited, resharding event or periodic resubscribe") continue } @@ -441,15 +441,17 @@ func (k *KinesisSource) EnhancedRead(out chan types.Event, t *tomb.Tomb) error { func (k *KinesisSource) ReadFromShard(out chan types.Event, shardId string) error { logger := k.logger.WithField("shard", shardId) logger.Debugf("Starting to read shard") - sharIt, err := k.kClient.GetShardIterator(&kinesis.GetShardIteratorInput{ShardId: aws.String(shardId), + sharIt, err := k.kClient.GetShardIterator(&kinesis.GetShardIteratorInput{ + ShardId: aws.String(shardId), StreamName: &k.Config.StreamName, - ShardIteratorType: aws.String(kinesis.ShardIteratorTypeLatest)}) + ShardIteratorType: aws.String(kinesis.ShardIteratorTypeLatest), + }) if err != nil { logger.Errorf("Cannot get shard iterator: %s", err) return fmt.Errorf("cannot get shard iterator: %w", err) } it := sharIt.ShardIterator - //AWS recommends to wait for a second between calls to GetRecords for a given shard + // AWS recommends to wait for a second between calls to GetRecords for a given shard ticker := time.NewTicker(time.Second) for { select { @@ -460,7 +462,7 @@ func (k *KinesisSource) ReadFromShard(out chan types.Event, shardId string) erro switch err.(type) { case *kinesis.ProvisionedThroughputExceededException: logger.Warn("Provisioned throughput exceeded") - //TODO: implement exponential backoff + // TODO: implement exponential backoff continue case *kinesis.ExpiredIteratorException: logger.Warn("Expired iterator") @@ -506,7 +508,7 @@ func (k *KinesisSource) ReadFromStream(out chan types.Event, t *tomb.Tomb) error case <-t.Dying(): k.logger.Info("kinesis source is dying") k.shardReaderTomb.Kill(nil) - _ = k.shardReaderTomb.Wait() //we don't care about the error as we kill the tomb ourselves + _ = k.shardReaderTomb.Wait() // we don't care about the error as we kill the tomb ourselves return nil case <-k.shardReaderTomb.Dying(): reason := k.shardReaderTomb.Err() @@ -520,7 +522,7 @@ func (k *KinesisSource) ReadFromStream(out chan types.Event, t *tomb.Tomb) error } } -func (k *KinesisSource) StreamingAcquisition(out chan types.Event, t *tomb.Tomb) error { +func (k *KinesisSource) StreamingAcquisition(ctx context.Context, out chan types.Event, t *tomb.Tomb) error { t.Go(func() error { defer trace.CatchPanic("crowdsec/acquis/kinesis/streaming") if k.Config.UseEnhancedFanOut { diff --git a/pkg/acquisition/modules/kinesis/kinesis_test.go b/pkg/acquisition/modules/kinesis/kinesis_test.go index 46e404aa49b..027cbde9240 100644 --- a/pkg/acquisition/modules/kinesis/kinesis_test.go +++ b/pkg/acquisition/modules/kinesis/kinesis_test.go @@ -3,6 +3,7 @@ package kinesisacquisition import ( "bytes" "compress/gzip" + "context" "encoding/json" "fmt" "net" @@ -60,8 +61,8 @@ func GenSubObject(i int) []byte { gz := gzip.NewWriter(&b) gz.Write(body) gz.Close() - //AWS actually base64 encodes the data, but it looks like kinesis automatically decodes it at some point - //localstack does not do it, so let's just write a raw gzipped stream + // AWS actually base64 encodes the data, but it looks like kinesis automatically decodes it at some point + // localstack does not do it, so let's just write a raw gzipped stream return b.Bytes() } @@ -99,10 +100,10 @@ func TestMain(m *testing.M) { os.Setenv("AWS_ACCESS_KEY_ID", "foobar") os.Setenv("AWS_SECRET_ACCESS_KEY", "foobar") - //delete_streams() - //create_streams() + // delete_streams() + // create_streams() code := m.Run() - //delete_streams() + // delete_streams() os.Exit(code) } @@ -149,6 +150,7 @@ stream_arn: arn:aws:kinesis:eu-west-1:123456789012:stream/my-stream`, } func TestReadFromStream(t *testing.T) { + ctx := context.Background() if runtime.GOOS == "windows" { t.Skip("Skipping test on windows") } @@ -176,11 +178,11 @@ stream_name: stream-1-shard`, } tomb := &tomb.Tomb{} out := make(chan types.Event) - err = f.StreamingAcquisition(out, tomb) + err = f.StreamingAcquisition(ctx, out, tomb) if err != nil { t.Fatalf("Error starting source: %s", err) } - //Allow the datasource to start listening to the stream + // Allow the datasource to start listening to the stream time.Sleep(4 * time.Second) WriteToStream(f.Config.StreamName, test.count, test.shards, false) for i := range test.count { @@ -193,6 +195,7 @@ stream_name: stream-1-shard`, } func TestReadFromMultipleShards(t *testing.T) { + ctx := context.Background() if runtime.GOOS == "windows" { t.Skip("Skipping test on windows") } @@ -220,11 +223,11 @@ stream_name: stream-2-shards`, } tomb := &tomb.Tomb{} out := make(chan types.Event) - err = f.StreamingAcquisition(out, tomb) + err = f.StreamingAcquisition(ctx, out, tomb) if err != nil { t.Fatalf("Error starting source: %s", err) } - //Allow the datasource to start listening to the stream + // Allow the datasource to start listening to the stream time.Sleep(4 * time.Second) WriteToStream(f.Config.StreamName, test.count, test.shards, false) c := 0 @@ -239,6 +242,7 @@ stream_name: stream-2-shards`, } func TestFromSubscription(t *testing.T) { + ctx := context.Background() if runtime.GOOS == "windows" { t.Skip("Skipping test on windows") } @@ -267,11 +271,11 @@ from_subscription: true`, } tomb := &tomb.Tomb{} out := make(chan types.Event) - err = f.StreamingAcquisition(out, tomb) + err = f.StreamingAcquisition(ctx, out, tomb) if err != nil { t.Fatalf("Error starting source: %s", err) } - //Allow the datasource to start listening to the stream + // Allow the datasource to start listening to the stream time.Sleep(4 * time.Second) WriteToStream(f.Config.StreamName, test.count, test.shards, true) for i := range test.count { diff --git a/pkg/acquisition/modules/kubernetesaudit/k8s_audit.go b/pkg/acquisition/modules/kubernetesaudit/k8s_audit.go index 8ba5b2d06e0..f979b044dcc 100644 --- a/pkg/acquisition/modules/kubernetesaudit/k8s_audit.go +++ b/pkg/acquisition/modules/kubernetesaudit/k8s_audit.go @@ -135,7 +135,7 @@ func (ka *KubernetesAuditSource) OneShotAcquisition(out chan types.Event, t *tom return errors.New("k8s-audit datasource does not support one-shot acquisition") } -func (ka *KubernetesAuditSource) StreamingAcquisition(out chan types.Event, t *tomb.Tomb) error { +func (ka *KubernetesAuditSource) StreamingAcquisition(ctx context.Context, out chan types.Event, t *tomb.Tomb) error { ka.outChan = out t.Go(func() error { defer trace.CatchPanic("crowdsec/acquis/k8s-audit/live") @@ -164,7 +164,6 @@ func (ka *KubernetesAuditSource) Dump() interface{} { } func (ka *KubernetesAuditSource) webhookHandler(w http.ResponseWriter, r *http.Request) { - if ka.metricsLevel != configuration.METRICS_NONE { requestCount.WithLabelValues(ka.addr).Inc() } diff --git a/pkg/acquisition/modules/kubernetesaudit/k8s_audit_test.go b/pkg/acquisition/modules/kubernetesaudit/k8s_audit_test.go index 020bd4c91a0..a086a756e4a 100644 --- a/pkg/acquisition/modules/kubernetesaudit/k8s_audit_test.go +++ b/pkg/acquisition/modules/kubernetesaudit/k8s_audit_test.go @@ -1,6 +1,7 @@ package kubernetesauditacquisition import ( + "context" "net/http/httptest" "strings" "testing" @@ -52,6 +53,7 @@ listen_addr: 0.0.0.0`, } func TestInvalidConfig(t *testing.T) { + ctx := context.Background() tests := []struct { name string config string @@ -83,7 +85,7 @@ webhook_path: /k8s-audit`, err = f.Configure([]byte(test.config), subLogger, configuration.METRICS_NONE) require.NoError(t, err) - f.StreamingAcquisition(out, tb) + f.StreamingAcquisition(ctx, out, tb) time.Sleep(1 * time.Second) tb.Kill(nil) @@ -98,6 +100,7 @@ webhook_path: /k8s-audit`, } func TestHandler(t *testing.T) { + ctx := context.Background() tests := []struct { name string config string @@ -257,14 +260,14 @@ webhook_path: /k8s-audit`, req := httptest.NewRequest(test.method, "/k8s-audit", strings.NewReader(test.body)) w := httptest.NewRecorder() - f.StreamingAcquisition(out, tb) + f.StreamingAcquisition(ctx, out, tb) f.webhookHandler(w, req) res := w.Result() assert.Equal(t, test.expectedStatusCode, res.StatusCode) - //time.Sleep(1 * time.Second) + // time.Sleep(1 * time.Second) require.NoError(t, err) tb.Kill(nil) diff --git a/pkg/acquisition/modules/loki/loki.go b/pkg/acquisition/modules/loki/loki.go index 15c454723ee..f867feeb84b 100644 --- a/pkg/acquisition/modules/loki/loki.go +++ b/pkg/acquisition/modules/loki/loki.go @@ -319,9 +319,9 @@ func (l *LokiSource) readOneEntry(entry lokiclient.Entry, labels map[string]stri } } -func (l *LokiSource) StreamingAcquisition(out chan types.Event, t *tomb.Tomb) error { +func (l *LokiSource) StreamingAcquisition(ctx context.Context, out chan types.Event, t *tomb.Tomb) error { l.Client.SetTomb(t) - readyCtx, cancel := context.WithTimeout(context.Background(), l.Config.WaitForReady) + readyCtx, cancel := context.WithTimeout(ctx, l.Config.WaitForReady) defer cancel() err := l.Client.Ready(readyCtx) if err != nil { @@ -329,7 +329,7 @@ func (l *LokiSource) StreamingAcquisition(out chan types.Event, t *tomb.Tomb) er } ll := l.logger.WithField("websocket_url", l.lokiWebsocket) t.Go(func() error { - ctx, cancel := context.WithCancel(context.Background()) + ctx, cancel := context.WithCancel(ctx) defer cancel() respChan := l.Client.QueryRange(ctx, true) if err != nil { diff --git a/pkg/acquisition/modules/loki/loki_test.go b/pkg/acquisition/modules/loki/loki_test.go index 2fd2b61e995..627200217f5 100644 --- a/pkg/acquisition/modules/loki/loki_test.go +++ b/pkg/acquisition/modules/loki/loki_test.go @@ -439,7 +439,7 @@ query: > t.Fatalf("Unexpected error : %s", err) } - err = lokiSource.StreamingAcquisition(out, &lokiTomb) + err = lokiSource.StreamingAcquisition(ctx, out, &lokiTomb) cstest.AssertErrorContains(t, err, ts.streamErr) if ts.streamErr != "" { @@ -449,7 +449,7 @@ query: > time.Sleep(time.Second * 2) // We need to give time to start reading from the WS readTomb := tomb.Tomb{} - readCtx, cancel := context.WithTimeout(context.Background(), time.Second*10) + readCtx, cancel := context.WithTimeout(ctx, time.Second*10) count := 0 readTomb.Go(func() error { @@ -492,6 +492,7 @@ query: > } func TestStopStreaming(t *testing.T) { + ctx := context.Background() if runtime.GOOS == "windows" { t.Skip("Skipping test on windows") } @@ -519,15 +520,13 @@ query: > lokiTomb := &tomb.Tomb{} - err = lokiSource.StreamingAcquisition(out, lokiTomb) + err = lokiSource.StreamingAcquisition(ctx, out, lokiTomb) if err != nil { t.Fatalf("Unexpected error : %s", err) } time.Sleep(time.Second * 2) - ctx := context.Background() - err = feedLoki(ctx, subLogger, 1, title) if err != nil { t.Fatalf("Unexpected error : %s", err) diff --git a/pkg/acquisition/modules/s3/s3.go b/pkg/acquisition/modules/s3/s3.go index a9835ab4974..ed1964edebf 100644 --- a/pkg/acquisition/modules/s3/s3.go +++ b/pkg/acquisition/modules/s3/s3.go @@ -38,7 +38,7 @@ type S3Configuration struct { AwsEndpoint string `yaml:"aws_endpoint"` BucketName string `yaml:"bucket_name"` Prefix string `yaml:"prefix"` - Key string `yaml:"-"` //Only for DSN acquisition + Key string `yaml:"-"` // Only for DSN acquisition PollingMethod string `yaml:"polling_method"` PollingInterval int `yaml:"polling_interval"` SQSName string `yaml:"sqs_name"` @@ -338,7 +338,7 @@ func (s *S3Source) sqsPoll() error { out, err := s.sqsClient.ReceiveMessageWithContext(s.ctx, &sqs.ReceiveMessageInput{ QueueUrl: aws.String(s.Config.SQSName), MaxNumberOfMessages: aws.Int64(10), - WaitTimeSeconds: aws.Int64(20), //Probably no need to make it configurable ? + WaitTimeSeconds: aws.Int64(20), // Probably no need to make it configurable ? }) if err != nil { logger.Errorf("Error while polling SQS: %s", err) @@ -353,7 +353,7 @@ func (s *S3Source) sqsPoll() error { bucket, key, err := s.extractBucketAndPrefix(message.Body) if err != nil { logger.Errorf("Error while parsing SQS message: %s", err) - //Always delete the message to avoid infinite loop + // Always delete the message to avoid infinite loop _, err = s.sqsClient.DeleteMessage(&sqs.DeleteMessageInput{ QueueUrl: aws.String(s.Config.SQSName), ReceiptHandle: message.ReceiptHandle, @@ -379,7 +379,7 @@ func (s *S3Source) sqsPoll() error { } func (s *S3Source) readFile(bucket string, key string) error { - //TODO: Handle SSE-C + // TODO: Handle SSE-C var scanner *bufio.Scanner logger := s.logger.WithFields(log.Fields{ @@ -392,14 +392,13 @@ func (s *S3Source) readFile(bucket string, key string) error { Bucket: aws.String(bucket), Key: aws.String(key), }) - if err != nil { return fmt.Errorf("failed to get object %s/%s: %w", bucket, key, err) } defer output.Body.Close() if strings.HasSuffix(key, ".gz") { - //This *might* be a gzipped file, but sometimes the SDK will decompress the data for us (it's not clear when it happens, only had the issue with cloudtrail logs) + // This *might* be a gzipped file, but sometimes the SDK will decompress the data for us (it's not clear when it happens, only had the issue with cloudtrail logs) header := make([]byte, 2) _, err := output.Body.Read(header) if err != nil { @@ -613,7 +612,7 @@ func (s *S3Source) ConfigureByDSN(dsn string, labels map[string]string, logger * pathParts := strings.Split(args[0], "/") s.logger.Debugf("pathParts: %v", pathParts) - //FIXME: handle s3://bucket/ + // FIXME: handle s3://bucket/ if len(pathParts) == 1 { s.Config.BucketName = pathParts[0] s.Config.Prefix = "" @@ -656,7 +655,7 @@ func (s *S3Source) OneShotAcquisition(out chan types.Event, t *tomb.Tomb) error return err } } else { - //No key, get everything in the bucket based on the prefix + // No key, get everything in the bucket based on the prefix objects, err := s.getBucketContent() if err != nil { return err @@ -672,11 +671,11 @@ func (s *S3Source) OneShotAcquisition(out chan types.Event, t *tomb.Tomb) error return nil } -func (s *S3Source) StreamingAcquisition(out chan types.Event, t *tomb.Tomb) error { +func (s *S3Source) StreamingAcquisition(ctx context.Context, out chan types.Event, t *tomb.Tomb) error { s.t = t s.out = out - s.readerChan = make(chan S3Object, 100) //FIXME: does this needs to be buffered? - s.ctx, s.cancel = context.WithCancel(context.Background()) + s.readerChan = make(chan S3Object, 100) // FIXME: does this needs to be buffered? + s.ctx, s.cancel = context.WithCancel(ctx) s.logger.Infof("starting acquisition of %s/%s", s.Config.BucketName, s.Config.Prefix) t.Go(func() error { s.readManager() diff --git a/pkg/acquisition/modules/s3/s3_test.go b/pkg/acquisition/modules/s3/s3_test.go index 93e166dfec5..05a974517a0 100644 --- a/pkg/acquisition/modules/s3/s3_test.go +++ b/pkg/acquisition/modules/s3/s3_test.go @@ -272,6 +272,7 @@ func TestDSNAcquis(t *testing.T) { } func TestListPolling(t *testing.T) { + ctx := context.Background() tests := []struct { name string config string @@ -331,7 +332,7 @@ prefix: foo/ } }() - err = f.StreamingAcquisition(out, &tb) + err = f.StreamingAcquisition(ctx, out, &tb) if err != nil { t.Fatalf("unexpected error: %s", err.Error()) } @@ -348,6 +349,7 @@ prefix: foo/ } func TestSQSPoll(t *testing.T) { + ctx := context.Background() tests := []struct { name string config string @@ -411,7 +413,7 @@ sqs_name: test } }() - err = f.StreamingAcquisition(out, &tb) + err = f.StreamingAcquisition(ctx, out, &tb) if err != nil { t.Fatalf("unexpected error: %s", err.Error()) } diff --git a/pkg/acquisition/modules/syslog/syslog.go b/pkg/acquisition/modules/syslog/syslog.go index 06c32e62f77..5315096fb9b 100644 --- a/pkg/acquisition/modules/syslog/syslog.go +++ b/pkg/acquisition/modules/syslog/syslog.go @@ -1,6 +1,7 @@ package syslogacquisition import ( + "context" "errors" "fmt" "net" @@ -105,7 +106,7 @@ func (s *SyslogSource) UnmarshalConfig(yamlConfig []byte) error { } if s.config.Addr == "" { - s.config.Addr = "127.0.0.1" //do we want a usable or secure default ? + s.config.Addr = "127.0.0.1" // do we want a usable or secure default ? } if s.config.Port == 0 { s.config.Port = 514 @@ -135,7 +136,7 @@ func (s *SyslogSource) Configure(yamlConfig []byte, logger *log.Entry, MetricsLe return nil } -func (s *SyslogSource) StreamingAcquisition(out chan types.Event, t *tomb.Tomb) error { +func (s *SyslogSource) StreamingAcquisition(ctx context.Context, out chan types.Event, t *tomb.Tomb) error { c := make(chan syslogserver.SyslogMessage) s.server = &syslogserver.SyslogServer{Logger: s.logger.WithField("syslog", "internal"), MaxMessageLen: s.config.MaxMessageLen} s.server.SetChannel(c) @@ -152,7 +153,8 @@ func (s *SyslogSource) StreamingAcquisition(out chan types.Event, t *tomb.Tomb) } func (s *SyslogSource) buildLogFromSyslog(ts time.Time, hostname string, - appname string, pid string, msg string) string { + appname string, pid string, msg string, +) string { ret := "" if !ts.IsZero() { ret += ts.Format("Jan 2 15:04:05") @@ -178,7 +180,6 @@ func (s *SyslogSource) buildLogFromSyslog(ts time.Time, hostname string, ret += msg } return ret - } func (s *SyslogSource) handleSyslogMsg(out chan types.Event, t *tomb.Tomb, c chan syslogserver.SyslogMessage) error { diff --git a/pkg/acquisition/modules/syslog/syslog_test.go b/pkg/acquisition/modules/syslog/syslog_test.go index 1750f375138..57fa3e8747b 100644 --- a/pkg/acquisition/modules/syslog/syslog_test.go +++ b/pkg/acquisition/modules/syslog/syslog_test.go @@ -1,6 +1,7 @@ package syslogacquisition import ( + "context" "fmt" "net" "runtime" @@ -80,6 +81,7 @@ func writeToSyslog(logs []string) { } func TestStreamingAcquisition(t *testing.T) { + ctx := context.Background() tests := []struct { name string config string @@ -100,8 +102,10 @@ listen_addr: 127.0.0.1`, listen_port: 4242 listen_addr: 127.0.0.1`, expectedLines: 2, - logs: []string{`<13>1 2021-05-18T11:58:40.828081+02:00 mantis sshd 49340 - [timeQuality isSynced="0" tzKnown="1"] blabla`, - `<13>1 2021-05-18T12:12:37.560695+02:00 mantis sshd 49340 - [timeQuality isSynced="0" tzKnown="1"] blabla2[foobar]`}, + logs: []string{ + `<13>1 2021-05-18T11:58:40.828081+02:00 mantis sshd 49340 - [timeQuality isSynced="0" tzKnown="1"] blabla`, + `<13>1 2021-05-18T12:12:37.560695+02:00 mantis sshd 49340 - [timeQuality isSynced="0" tzKnown="1"] blabla2[foobar]`, + }, }, { name: "RFC3164", @@ -109,10 +113,12 @@ listen_addr: 127.0.0.1`, listen_port: 4242 listen_addr: 127.0.0.1`, expectedLines: 3, - logs: []string{`<13>May 18 12:37:56 mantis sshd[49340]: blabla2[foobar]`, + logs: []string{ + `<13>May 18 12:37:56 mantis sshd[49340]: blabla2[foobar]`, `<13>May 18 12:37:56 mantis sshd[49340]: blabla2`, `<13>May 18 12:37:56 mantis sshd: blabla2`, - `<13>May 18 12:37:56 mantis sshd`}, + `<13>May 18 12:37:56 mantis sshd`, + }, }, } if runtime.GOOS != "windows" { @@ -139,7 +145,7 @@ listen_addr: 127.0.0.1`, } tomb := tomb.Tomb{} out := make(chan types.Event) - err = s.StreamingAcquisition(out, &tomb) + err = s.StreamingAcquisition(ctx, out, &tomb) cstest.AssertErrorContains(t, err, ts.expectedErr) if ts.expectedErr != "" { return diff --git a/pkg/acquisition/modules/wineventlog/wineventlog.go b/pkg/acquisition/modules/wineventlog/wineventlog.go index 44035d0a708..6d522d8d8cb 100644 --- a/pkg/acquisition/modules/wineventlog/wineventlog.go +++ b/pkg/acquisition/modules/wineventlog/wineventlog.go @@ -3,6 +3,7 @@ package wineventlogacquisition import ( + "context" "errors" "github.com/prometheus/client_golang/prometheus" @@ -59,7 +60,7 @@ func (w *WinEventLogSource) CanRun() error { return errors.New("windows event log acquisition is only supported on Windows") } -func (w *WinEventLogSource) StreamingAcquisition(out chan types.Event, t *tomb.Tomb) error { +func (w *WinEventLogSource) StreamingAcquisition(ctx context.Context, out chan types.Event, t *tomb.Tomb) error { return nil } diff --git a/pkg/acquisition/modules/wineventlog/wineventlog_test.go b/pkg/acquisition/modules/wineventlog/wineventlog_test.go index 2ea0e365be5..ae6cb776909 100644 --- a/pkg/acquisition/modules/wineventlog/wineventlog_test.go +++ b/pkg/acquisition/modules/wineventlog/wineventlog_test.go @@ -3,6 +3,7 @@ package wineventlogacquisition import ( + "context" "runtime" "testing" "time" @@ -129,6 +130,7 @@ event_level: bla`, } func TestLiveAcquisition(t *testing.T) { + ctx := context.Background() if runtime.GOOS != "windows" { t.Skip("Skipping test on non-windows OS") } @@ -190,7 +192,7 @@ event_ids: c := make(chan types.Event) f := WinEventLogSource{} f.Configure([]byte(test.config), subLogger, configuration.METRICS_NONE) - f.StreamingAcquisition(c, to) + f.StreamingAcquisition(ctx, c, to) time.Sleep(time.Second) lines := test.expectedLines go func() { diff --git a/pkg/acquisition/modules/wineventlog/wineventlog_windows.go b/pkg/acquisition/modules/wineventlog/wineventlog_windows.go index 4f2384d71db..087c20eb70e 100644 --- a/pkg/acquisition/modules/wineventlog/wineventlog_windows.go +++ b/pkg/acquisition/modules/wineventlog/wineventlog_windows.go @@ -1,6 +1,7 @@ package wineventlogacquisition import ( + "context" "encoding/xml" "errors" "fmt" @@ -325,7 +326,7 @@ func (w *WinEventLogSource) CanRun() error { return nil } -func (w *WinEventLogSource) StreamingAcquisition(out chan types.Event, t *tomb.Tomb) error { +func (w *WinEventLogSource) StreamingAcquisition(ctx context.Context, out chan types.Event, t *tomb.Tomb) error { t.Go(func() error { defer trace.CatchPanic("crowdsec/acquis/wineventlog/streaming") return w.getEvents(out, t) From b2ac65bfb6fd80435a6e64f43e645892fa31ca50 Mon Sep 17 00:00:00 2001 From: blotus Date: Wed, 16 Oct 2024 16:55:18 +0200 Subject: [PATCH 325/581] avoid deadlock when deleting decisions if PAPI is half configured (#3283) --- pkg/apiserver/apiserver.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pkg/apiserver/apiserver.go b/pkg/apiserver/apiserver.go index bdf2d4148cc..35f9beaf635 100644 --- a/pkg/apiserver/apiserver.go +++ b/pkg/apiserver/apiserver.go @@ -255,7 +255,7 @@ func NewServer(ctx context.Context, config *csconfig.LocalApiServerCfg) (*APISer controller.AlertsAddChan = apiClient.AlertsAddChan - if config.ConsoleConfig.IsPAPIEnabled() { + if config.ConsoleConfig.IsPAPIEnabled() && config.OnlineClient.Credentials.PapiURL != "" { if apiClient.apiClient.IsEnrolled() { log.Info("Machine is enrolled in the console, Loading PAPI Client") @@ -340,7 +340,7 @@ func (s *APIServer) initAPIC(ctx context.Context) { // csConfig.API.Server.ConsoleConfig.ShareCustomScenarios if s.apic.apiClient.IsEnrolled() { - if s.consoleConfig.IsPAPIEnabled() { + if s.consoleConfig.IsPAPIEnabled() && s.papi != nil { if s.papi.URL != "" { log.Info("Starting PAPI decision receiver") s.papi.pullTomb.Go(func() error { return s.papiPull(ctx) }) From d8bc17b17009467945dd47c9b90d061bbf6de899 Mon Sep 17 00:00:00 2001 From: blotus Date: Wed, 16 Oct 2024 16:55:32 +0200 Subject: [PATCH 326/581] wineventlog: add support for replaying evtx files (#3278) --- go.mod | 2 +- go.sum | 2 + .../modules/wineventlog/test_files/Setup.evtx | Bin 0 -> 69632 bytes .../wineventlog/wineventlog_windows.go | 160 ++++++++++++++++-- ...og_test.go => wineventlog_windows_test.go} | 94 ++++++++-- pkg/exprhelpers/helpers.go | 2 +- pkg/exprhelpers/xml.go | 100 ++++++++--- 7 files changed, 313 insertions(+), 47 deletions(-) create mode 100644 pkg/acquisition/modules/wineventlog/test_files/Setup.evtx rename pkg/acquisition/modules/wineventlog/{wineventlog_test.go => wineventlog_windows_test.go} (73%) diff --git a/go.mod b/go.mod index b02d3b76840..f28f21c6eb4 100644 --- a/go.mod +++ b/go.mod @@ -16,7 +16,7 @@ require ( github.com/appleboy/gin-jwt/v2 v2.9.2 github.com/aws/aws-lambda-go v1.47.0 github.com/aws/aws-sdk-go v1.52.0 - github.com/beevik/etree v1.3.0 + github.com/beevik/etree v1.4.1 github.com/blackfireio/osinfo v1.0.5 github.com/bluele/gcache v0.0.2 github.com/buger/jsonparser v1.1.1 diff --git a/go.sum b/go.sum index 7aaea1587b8..b2bd77c9915 100644 --- a/go.sum +++ b/go.sum @@ -58,6 +58,8 @@ github.com/aws/aws-sdk-go v1.52.0 h1:ptgek/4B2v/ljsjYSEvLQ8LTD+SQyrqhOOWvHc/VGPI github.com/aws/aws-sdk-go v1.52.0/go.mod h1:LF8svs817+Nz+DmiMQKTO3ubZ/6IaTpq3TjupRn3Eqk= github.com/beevik/etree v1.3.0 h1:hQTc+pylzIKDb23yYprodCWWTt+ojFfUZyzU09a/hmU= github.com/beevik/etree v1.3.0/go.mod h1:aiPf89g/1k3AShMVAzriilpcE4R/Vuor90y83zVZWFc= +github.com/beevik/etree v1.4.1 h1:PmQJDDYahBGNKDcpdX8uPy1xRCwoCGVUiW669MEirVI= +github.com/beevik/etree v1.4.1/go.mod h1:gPNJNaBGVZ9AwsidazFZyygnd+0pAU38N4D+WemwKNs= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= diff --git a/pkg/acquisition/modules/wineventlog/test_files/Setup.evtx b/pkg/acquisition/modules/wineventlog/test_files/Setup.evtx new file mode 100644 index 0000000000000000000000000000000000000000..2c4f8b0f680d0cf86be8d25825fc6a94576a5fe2 GIT binary patch literal 69632 zcmeI43yf6N8OOhwogHQ$%J2&c39?Ah6Pcbrlt@+rTV43oH9TccCb? zZm3#g+G-n3Oi8MtPZMiss|_XAXlrX~2@un?#zNYrG-~=turW3EvHkzg+yl#H=q||Y zEb0GD?%vnA_k8#F{qJ{P7glz6wsdu8ESxS14h&))u_Cj|NI1&}+UMQgSyOb&2fB$a z76B0u0TB=Z5fA|p5CIVo0TB=Z5f~MLmEE1oH}`HqA3vL>2kY?uAdWx2#O$r5W@~pJ zxF^VV&MV&kdvo7>aQF>o#?6jQF#CHk%83m>V-`!8EyXaz*`|XdTyrnVWSWUGnf?*v z-xD0?vE4Y<8M5ap?7tV~S+fs?%IFF1#dA*u+t(s*3+}l;bRXs!o<0)XbNDfCKL;-7 znl8Wm%va8zZ?+Hj_Wr)|jz=1jt6q;(L?Tumxp;Ew32za%7bN%p@Ur)he)WrAX}#?M zJPC6xtFpVX$p>WGUtVl0ZM$Wx*9Nfvc`WhUgOkr!6x$B#v2L8~$BE)dy=_Le0o!U< z+7sqV9FNLidYr@~miQ@ZD z*^5SohF*oQyx;n6jN4kf3wIuHC7MN-4GrB2ySzt_?@U;$D}6i4>OihLakc?Vix=bS z$71#g7~6xBX33;Q;G;Y_y zfpiNIvA78DRBUswOdZchi?%z_0gTBr+Cm`1Uzx8DvFdsEhl41OVPw(v~R!-X)-Ozl8+`!t(3Oh zkBfXj*z-=AEqB>yN4K-YV{JpemX_cRFFjsr%W%g57*4ee;7POhO-`_iID-P+UjMTy zW6{|=@tMwW(L~B+Xs8Ee_#m?Hxe`3U4rll_S5Vfyt7r$jX|$#EHzk2)HzH#{et{JU zOT-yk>Dv376Ck`fVCw-SODs_osdRE;FlPE3B)S|q(-2axvztz{pUxw9$I&E zDaj0g%vOZst6ux&CSMV7JGr|nwd)nTcvt4H|1O6m49D;ajbc5@v|<+}=b^qBq7Pp)o^|jU;ffJlZbI1|?o`ITl>^Xj#-bDP@O0(8;4YV79nW6F zBJaeHFBjh(Muq6ah$WbRVUYinGEU}S3lpe0;74V4Ad>MKmIL?2 zVsJJd5+vw!uAg_Vy2$1X8uYy!)lFN-2W{}f$;zM()k+(Dvkgbn2qPKy-p#h^6IXjd z_9LHHcpD(nqrG6%hwB*R!sRsJZj0gZ)poVjp~|YmuNkE^Xqr?eTH{)$P{mt+thTn{uq}hfkCoQwFd^5sc|H-)ZIvXtX zdkS9P4)4?VP4NB*zIV=s)9L>N)_6x4K`z_XvG7>!`fk>`8Reyk*jn>nZ+k9kKVY z9{#6J8lVCG+MRCF&<0za>~GY#o=FFCHM^}^mzR2?-ZtWj4*a-9buAvPj@TYS!_TEp z&Hwgom3fOtx!R+?#ITMYM?5+Lv%F;m)zSY&dz5jdB+$W2zrE(``O`tJddZg<*1-Ti zXwn0F^iz*xJko(2HCUQ)MGcl( zr@k~4HA)?|A|9bZ>e6HH?%bO{9WBk#PWB~+b@U=S#%o}fx2&K#nl`T5$!FG4e&W$6 z$6Tl*wwJj^+I!cd-$vcxyL%M3H4k9uIX^OMw|~C-d@6>t4nJi+57Ra z)sZhTtfTud2sH(rcyF0>lp`*Mb(F2=IF{1@y=2#Hc{f8DY&m7yMk$N)JXP@@{MD;1N_SH}T=i;kFGv2JHI18O^g*UqBlfvB7Y}|XS=g7}I zT6(H=ex7ta6wST?3ynS6uQXhIRA}Xy`jwyk!N|k>b)QzAf?N9PC$QzmhatwD#Fc z^QWg={Ze0ISWhWTGpL%YD7L)InQcoB(cj=jX_s!0qj&j8%Ut(BC z_hBOFQ7qoFg6e3z#-*{D4?2hT5l7}ir8w6q3`x2&Lg8n1C_ zZ1i*%acNpsM%L21<+A+gC|6waC5Cl$4C5ScVeys~R7V#|T*}Y<;#tNe)^q;aR{_2ZW0m}m4QZrxiPH7-9MEKS(U_Bs^aib)}FSwZzQWn9fO@?T-_y9bUiHM+lx zF*?NW@x!C}9J}WjDrcfoSUAeWu~?29ai9P2kY7UZ>pbSA9+{V;PyCl89Es&O6dQ4T zgy@u=U-aWl{@>&F{ql)Fj;)@2iD5lW$AnNDKI-+BNlz!n^T)BCf@)4H5fA|p5CIVo z0TB=Z5fA|p5CIVo0TB=Z5fA|p5CIVo0TB=Z5fA|p5CIVo0TB=Z5fA|p5CIVo0TB=Z z5fA|p5CIVo0TB=Z5fA|p5CIVo0TB=Z5fA|p5CIVo0TB=Z5fA|p5CIVo0TB=Z5fA|p z5CIVo0TB=Z5fA|p5CIVo0TB=Z5fA|p5CIVo0TB=Z5fA|p5CIVo0TB=Z5fA|p5CIVo z0TB=Z5fA|p5CIVo0TB=Z5fA|p5CIVo0TB=Z5fA|p5CIVo0TB=Z5fA|p5CIVo0TB=Z z5fA|p5CIVo0TB=Z5fA|p5CIVo0TB=Z5fA|p5CIVo0TB=Z5fA|p5CIVo0TB=Z5fA|p a5CIVo0TB=Z5fA|p5CIVo0TCE?1pW&`k>URU literal 0 HcmV?d00001 diff --git a/pkg/acquisition/modules/wineventlog/wineventlog_windows.go b/pkg/acquisition/modules/wineventlog/wineventlog_windows.go index 087c20eb70e..ca40363155b 100644 --- a/pkg/acquisition/modules/wineventlog/wineventlog_windows.go +++ b/pkg/acquisition/modules/wineventlog/wineventlog_windows.go @@ -5,7 +5,9 @@ import ( "encoding/xml" "errors" "fmt" + "net/url" "runtime" + "strconv" "strings" "syscall" "time" @@ -30,7 +32,7 @@ type WinEventLogConfiguration struct { EventLevel string `yaml:"event_level"` EventIDs []int `yaml:"event_ids"` XPathQuery string `yaml:"xpath_query"` - EventFile string `yaml:"event_file"` + EventFile string PrettyName string `yaml:"pretty_name"` } @@ -48,10 +50,13 @@ type QueryList struct { } type Select struct { - Path string `xml:"Path,attr"` + Path string `xml:"Path,attr,omitempty"` Query string `xml:",chardata"` } +// 0 identifies the local machine in windows APIs +const localMachine = 0 + var linesRead = prometheus.NewCounterVec( prometheus.CounterOpts{ Name: "cs_winevtlogsource_hits_total", @@ -212,20 +217,28 @@ func (w *WinEventLogSource) getEvents(out chan types.Event, t *tomb.Tomb) error } } -func (w *WinEventLogSource) generateConfig(query string) (*winlog.SubscribeConfig, error) { +func (w *WinEventLogSource) generateConfig(query string, live bool) (*winlog.SubscribeConfig, error) { var config winlog.SubscribeConfig var err error - // Create a subscription signaler. - config.SignalEvent, err = windows.CreateEvent( - nil, // Default security descriptor. - 1, // Manual reset. - 1, // Initial state is signaled. - nil) // Optional name. - if err != nil { - return &config, fmt.Errorf("windows.CreateEvent failed: %v", err) + if live { + // Create a subscription signaler. + config.SignalEvent, err = windows.CreateEvent( + nil, // Default security descriptor. + 1, // Manual reset. + 1, // Initial state is signaled. + nil) // Optional name. + if err != nil { + return &config, fmt.Errorf("windows.CreateEvent failed: %v", err) + } + config.Flags = wevtapi.EvtSubscribeToFutureEvents + } else { + config.ChannelPath, err = syscall.UTF16PtrFromString(w.config.EventFile) + if err != nil { + return &config, fmt.Errorf("syscall.UTF16PtrFromString failed: %v", err) + } + config.Flags = wevtapi.EvtQueryFilePath | wevtapi.EvtQueryForwardDirection } - config.Flags = wevtapi.EvtSubscribeToFutureEvents config.Query, err = syscall.UTF16PtrFromString(query) if err != nil { return &config, fmt.Errorf("syscall.UTF16PtrFromString failed: %v", err) @@ -283,7 +296,7 @@ func (w *WinEventLogSource) Configure(yamlConfig []byte, logger *log.Entry, Metr return err } - w.evtConfig, err = w.generateConfig(w.query) + w.evtConfig, err = w.generateConfig(w.query, true) if err != nil { return err } @@ -292,6 +305,78 @@ func (w *WinEventLogSource) Configure(yamlConfig []byte, logger *log.Entry, Metr } func (w *WinEventLogSource) ConfigureByDSN(dsn string, labels map[string]string, logger *log.Entry, uuid string) error { + if !strings.HasPrefix(dsn, "wineventlog://") { + return fmt.Errorf("invalid DSN %s for wineventlog source, must start with wineventlog://", dsn) + } + + w.logger = logger + w.config = WinEventLogConfiguration{} + + dsn = strings.TrimPrefix(dsn, "wineventlog://") + + args := strings.Split(dsn, "?") + + if args[0] == "" { + return errors.New("empty wineventlog:// DSN") + } + + if len(args) > 2 { + return errors.New("too many arguments in DSN") + } + + w.config.EventFile = args[0] + + if len(args) == 2 && args[1] != "" { + params, err := url.ParseQuery(args[1]) + if err != nil { + return fmt.Errorf("failed to parse DSN parameters: %w", err) + } + + for key, value := range params { + switch key { + case "log_level": + if len(value) != 1 { + return errors.New("log_level must be a single value") + } + lvl, err := log.ParseLevel(value[0]) + if err != nil { + return fmt.Errorf("failed to parse log_level: %s", err) + } + w.logger.Logger.SetLevel(lvl) + case "event_id": + for _, id := range value { + evtid, err := strconv.Atoi(id) + if err != nil { + return fmt.Errorf("failed to parse event_id: %s", err) + } + w.config.EventIDs = append(w.config.EventIDs, evtid) + } + case "event_level": + if len(value) != 1 { + return errors.New("event_level must be a single value") + } + w.config.EventLevel = value[0] + } + } + } + + var err error + + //FIXME: handle custom xpath query + w.query, err = w.buildXpathQuery() + + if err != nil { + return fmt.Errorf("buildXpathQuery failed: %w", err) + } + + w.logger.Debugf("query: %s\n", w.query) + + w.evtConfig, err = w.generateConfig(w.query, false) + + if err != nil { + return fmt.Errorf("generateConfig failed: %w", err) + } + return nil } @@ -300,10 +385,57 @@ func (w *WinEventLogSource) GetMode() string { } func (w *WinEventLogSource) SupportedModes() []string { - return []string{configuration.TAIL_MODE} + return []string{configuration.TAIL_MODE, configuration.CAT_MODE} } func (w *WinEventLogSource) OneShotAcquisition(out chan types.Event, t *tomb.Tomb) error { + + handle, err := wevtapi.EvtQuery(localMachine, w.evtConfig.ChannelPath, w.evtConfig.Query, w.evtConfig.Flags) + + if err != nil { + return fmt.Errorf("EvtQuery failed: %v", err) + } + + defer winlog.Close(handle) + + publisherCache := make(map[string]windows.Handle) + defer func() { + for _, h := range publisherCache { + winlog.Close(h) + } + }() + +OUTER_LOOP: + for { + select { + case <-t.Dying(): + w.logger.Infof("wineventlog is dying") + return nil + default: + evts, err := w.getXMLEvents(w.evtConfig, publisherCache, handle, 500) + if err == windows.ERROR_NO_MORE_ITEMS { + log.Info("No more items") + break OUTER_LOOP + } else if err != nil { + return fmt.Errorf("getXMLEvents failed: %v", err) + } + w.logger.Debugf("Got %d events", len(evts)) + for _, evt := range evts { + w.logger.Tracef("Event: %s", evt) + if w.metricsLevel != configuration.METRICS_NONE { + linesRead.With(prometheus.Labels{"source": w.name}).Inc() + } + l := types.Line{} + l.Raw = evt + l.Module = w.GetName() + l.Labels = w.config.Labels + l.Time = time.Now() + l.Src = w.name + l.Process = true + out <- types.Event{Line: l, Process: true, Type: types.LOG, ExpectMode: types.TIMEMACHINE} + } + } + } return nil } diff --git a/pkg/acquisition/modules/wineventlog/wineventlog_test.go b/pkg/acquisition/modules/wineventlog/wineventlog_windows_test.go similarity index 73% rename from pkg/acquisition/modules/wineventlog/wineventlog_test.go rename to pkg/acquisition/modules/wineventlog/wineventlog_windows_test.go index ae6cb776909..9afef963669 100644 --- a/pkg/acquisition/modules/wineventlog/wineventlog_test.go +++ b/pkg/acquisition/modules/wineventlog/wineventlog_windows_test.go @@ -4,7 +4,6 @@ package wineventlogacquisition import ( "context" - "runtime" "testing" "time" @@ -19,9 +18,8 @@ import ( ) func TestBadConfiguration(t *testing.T) { - if runtime.GOOS != "windows" { - t.Skip("Skipping test on non-windows OS") - } + exprhelpers.Init(nil) + tests := []struct { config string expectedErr string @@ -64,9 +62,8 @@ xpath_query: test`, } func TestQueryBuilder(t *testing.T) { - if runtime.GOOS != "windows" { - t.Skip("Skipping test on non-windows OS") - } + exprhelpers.Init(nil) + tests := []struct { config string expectedQuery string @@ -130,10 +127,8 @@ event_level: bla`, } func TestLiveAcquisition(t *testing.T) { + exprhelpers.Init(nil) ctx := context.Background() - if runtime.GOOS != "windows" { - t.Skip("Skipping test on non-windows OS") - } tests := []struct { config string @@ -227,3 +222,82 @@ event_ids: to.Wait() } } + +func TestOneShotAcquisition(t *testing.T) { + tests := []struct { + name string + dsn string + expectedCount int + expectedErr string + expectedConfigureErr string + }{ + { + name: "non-existing file", + dsn: `wineventlog://foo.evtx`, + expectedCount: 0, + expectedErr: "The system cannot find the file specified.", + }, + { + name: "empty DSN", + dsn: `wineventlog://`, + expectedCount: 0, + expectedConfigureErr: "empty wineventlog:// DSN", + }, + { + name: "existing file", + dsn: `wineventlog://test_files/Setup.evtx`, + expectedCount: 24, + expectedErr: "", + }, + { + name: "filter on event_id", + dsn: `wineventlog://test_files/Setup.evtx?event_id=2`, + expectedCount: 1, + }, + { + name: "filter on event_id", + dsn: `wineventlog://test_files/Setup.evtx?event_id=2&event_id=3`, + expectedCount: 24, + }, + } + + exprhelpers.Init(nil) + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + lineCount := 0 + to := &tomb.Tomb{} + c := make(chan types.Event) + f := WinEventLogSource{} + err := f.ConfigureByDSN(test.dsn, map[string]string{"type": "wineventlog"}, log.WithField("type", "windowseventlog"), "") + + if test.expectedConfigureErr != "" { + assert.Contains(t, err.Error(), test.expectedConfigureErr) + return + } + + require.NoError(t, err) + + go func() { + for { + select { + case <-c: + lineCount++ + case <-to.Dying(): + return + } + } + }() + + err = f.OneShotAcquisition(c, to) + if test.expectedErr != "" { + assert.Contains(t, err.Error(), test.expectedErr) + } else { + require.NoError(t, err) + + time.Sleep(2 * time.Second) + assert.Equal(t, test.expectedCount, lineCount) + } + }) + } +} diff --git a/pkg/exprhelpers/helpers.go b/pkg/exprhelpers/helpers.go index 6b7eb0840e9..9bc991a8f2d 100644 --- a/pkg/exprhelpers/helpers.go +++ b/pkg/exprhelpers/helpers.go @@ -129,7 +129,7 @@ func Init(databaseClient *database.Client) error { dataFileRegex = make(map[string][]*regexp.Regexp) dataFileRe2 = make(map[string][]*re2.Regexp) dbClient = databaseClient - + XMLCacheInit() return nil } diff --git a/pkg/exprhelpers/xml.go b/pkg/exprhelpers/xml.go index 75758e18316..0b550bdb641 100644 --- a/pkg/exprhelpers/xml.go +++ b/pkg/exprhelpers/xml.go @@ -1,43 +1,103 @@ package exprhelpers import ( + "errors" + "sync" + "time" + "github.com/beevik/etree" + "github.com/bluele/gcache" + "github.com/cespare/xxhash/v2" log "github.com/sirupsen/logrus" ) -var pathCache = make(map[string]etree.Path) +var ( + pathCache = make(map[string]etree.Path) + rwMutex = sync.RWMutex{} + xmlDocumentCache gcache.Cache +) + +func compileOrGetPath(path string) (etree.Path, error) { + rwMutex.RLock() + compiledPath, ok := pathCache[path] + rwMutex.RUnlock() + + if !ok { + var err error + compiledPath, err = etree.CompilePath(path) + if err != nil { + return etree.Path{}, err + } + + rwMutex.Lock() + pathCache[path] = compiledPath + rwMutex.Unlock() + } + + return compiledPath, nil +} + +func getXMLDocumentFromCache(xmlString string) (*etree.Document, error) { + cacheKey := xxhash.Sum64String(xmlString) + cacheObj, err := xmlDocumentCache.Get(cacheKey) + + if err != nil && !errors.Is(err, gcache.KeyNotFoundError) { + return nil, err + } + + doc, ok := cacheObj.(*etree.Document) + if !ok || cacheObj == nil { + doc = etree.NewDocument() + if err := doc.ReadFromString(xmlString); err != nil { + return nil, err + } + if err := xmlDocumentCache.Set(cacheKey, doc); err != nil { + log.Warnf("Could not set XML document in cache: %s", err) + } + } + + return doc, nil +} + +func XMLCacheInit() { + gc := gcache.New(50) + // Short cache expiration because we each line we read is different, but we can call multiple times XML helpers on each of them + gc.Expiration(5 * time.Second) + gc = gc.LRU() + + xmlDocumentCache = gc.Build() +} // func XMLGetAttributeValue(xmlString string, path string, attributeName string) string { func XMLGetAttributeValue(params ...any) (any, error) { xmlString := params[0].(string) path := params[1].(string) attributeName := params[2].(string) - if _, ok := pathCache[path]; !ok { - compiledPath, err := etree.CompilePath(path) - if err != nil { - log.Errorf("Could not compile path %s: %s", path, err) - return "", nil - } - pathCache[path] = compiledPath + + compiledPath, err := compileOrGetPath(path) + if err != nil { + log.Errorf("Could not compile path %s: %s", path, err) + return "", nil } - compiledPath := pathCache[path] - doc := etree.NewDocument() - err := doc.ReadFromString(xmlString) + doc, err := getXMLDocumentFromCache(xmlString) if err != nil { log.Tracef("Could not parse XML: %s", err) return "", nil } + elem := doc.FindElementPath(compiledPath) if elem == nil { log.Debugf("Could not find element %s", path) return "", nil } + attr := elem.SelectAttr(attributeName) if attr == nil { log.Debugf("Could not find attribute %s", attributeName) return "", nil } + return attr.Value, nil } @@ -45,26 +105,24 @@ func XMLGetAttributeValue(params ...any) (any, error) { func XMLGetNodeValue(params ...any) (any, error) { xmlString := params[0].(string) path := params[1].(string) - if _, ok := pathCache[path]; !ok { - compiledPath, err := etree.CompilePath(path) - if err != nil { - log.Errorf("Could not compile path %s: %s", path, err) - return "", nil - } - pathCache[path] = compiledPath + + compiledPath, err := compileOrGetPath(path) + if err != nil { + log.Errorf("Could not compile path %s: %s", path, err) + return "", nil } - compiledPath := pathCache[path] - doc := etree.NewDocument() - err := doc.ReadFromString(xmlString) + doc, err := getXMLDocumentFromCache(xmlString) if err != nil { log.Tracef("Could not parse XML: %s", err) return "", nil } + elem := doc.FindElementPath(compiledPath) if elem == nil { log.Debugf("Could not find element %s", path) return "", nil } + return elem.Text(), nil } From 128328bda150c2f2c950b569a158fc902bf90403 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Fri, 25 Oct 2024 11:03:11 +0200 Subject: [PATCH 327/581] require go 1.23 (#3298) --- .github/workflows/bats-hub.yml | 2 +- .github/workflows/bats-mysql.yml | 2 +- .github/workflows/bats-postgres.yml | 2 +- .github/workflows/bats-sqlite-coverage.yml | 2 +- .github/workflows/ci-windows-build-msi.yml | 2 +- .github/workflows/codeql-analysis.yml | 2 +- .github/workflows/go-tests-windows.yml | 2 +- .github/workflows/go-tests.yml | 2 +- .github/workflows/publish-tarball-release.yml | 2 +- Dockerfile | 2 +- Dockerfile.debian | 2 +- azure-pipelines.yml | 2 +- go.mod | 2 +- 13 files changed, 13 insertions(+), 13 deletions(-) diff --git a/.github/workflows/bats-hub.yml b/.github/workflows/bats-hub.yml index 473af9b1312..e631c3ebc71 100644 --- a/.github/workflows/bats-hub.yml +++ b/.github/workflows/bats-hub.yml @@ -33,7 +33,7 @@ jobs: - name: "Set up Go" uses: actions/setup-go@v5 with: - go-version: "1.22" + go-version: "1.23" - name: "Install bats dependencies" env: diff --git a/.github/workflows/bats-mysql.yml b/.github/workflows/bats-mysql.yml index 211d856bc34..a94e28b1f97 100644 --- a/.github/workflows/bats-mysql.yml +++ b/.github/workflows/bats-mysql.yml @@ -36,7 +36,7 @@ jobs: - name: "Set up Go" uses: actions/setup-go@v5 with: - go-version: "1.22" + go-version: "1.23" - name: "Install bats dependencies" env: diff --git a/.github/workflows/bats-postgres.yml b/.github/workflows/bats-postgres.yml index aec707f0c03..a1054463341 100644 --- a/.github/workflows/bats-postgres.yml +++ b/.github/workflows/bats-postgres.yml @@ -45,7 +45,7 @@ jobs: - name: "Set up Go" uses: actions/setup-go@v5 with: - go-version: "1.22" + go-version: "1.23" - name: "Install bats dependencies" env: diff --git a/.github/workflows/bats-sqlite-coverage.yml b/.github/workflows/bats-sqlite-coverage.yml index a089aa53532..ac685bf4e87 100644 --- a/.github/workflows/bats-sqlite-coverage.yml +++ b/.github/workflows/bats-sqlite-coverage.yml @@ -31,7 +31,7 @@ jobs: - name: "Set up Go" uses: actions/setup-go@v5 with: - go-version: "1.22" + go-version: "1.23" - name: "Install bats dependencies" env: diff --git a/.github/workflows/ci-windows-build-msi.yml b/.github/workflows/ci-windows-build-msi.yml index a37aa43e2d0..07e29071e05 100644 --- a/.github/workflows/ci-windows-build-msi.yml +++ b/.github/workflows/ci-windows-build-msi.yml @@ -35,7 +35,7 @@ jobs: - name: "Set up Go" uses: actions/setup-go@v5 with: - go-version: "1.22" + go-version: "1.23" - name: Build run: make windows_installer BUILD_RE2_WASM=1 diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml index 2715c6590c3..4128cb435f9 100644 --- a/.github/workflows/codeql-analysis.yml +++ b/.github/workflows/codeql-analysis.yml @@ -52,7 +52,7 @@ jobs: - name: "Set up Go" uses: actions/setup-go@v5 with: - go-version: "1.22" + go-version: "1.23" cache-dependency-path: "**/go.sum" # Initializes the CodeQL tools for scanning. diff --git a/.github/workflows/go-tests-windows.yml b/.github/workflows/go-tests-windows.yml index ba283f3890a..2966b999a4a 100644 --- a/.github/workflows/go-tests-windows.yml +++ b/.github/workflows/go-tests-windows.yml @@ -34,7 +34,7 @@ jobs: - name: "Set up Go" uses: actions/setup-go@v5 with: - go-version: "1.22" + go-version: "1.23" - name: Build run: | diff --git a/.github/workflows/go-tests.yml b/.github/workflows/go-tests.yml index 3fdfb8a3e82..3f4aa67e139 100644 --- a/.github/workflows/go-tests.yml +++ b/.github/workflows/go-tests.yml @@ -126,7 +126,7 @@ jobs: - name: "Set up Go" uses: actions/setup-go@v5 with: - go-version: "1.22" + go-version: "1.23" - name: Run "go generate" and check for changes run: | diff --git a/.github/workflows/publish-tarball-release.yml b/.github/workflows/publish-tarball-release.yml index eeefb801719..6a41c3fba53 100644 --- a/.github/workflows/publish-tarball-release.yml +++ b/.github/workflows/publish-tarball-release.yml @@ -25,7 +25,7 @@ jobs: - name: "Set up Go" uses: actions/setup-go@v5 with: - go-version: "1.22" + go-version: "1.23" - name: Build the binaries run: | diff --git a/Dockerfile b/Dockerfile index 450ea69017f..880df88dc02 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,5 +1,5 @@ # vim: set ft=dockerfile: -FROM golang:1.22-alpine3.20 AS build +FROM golang:1.23-alpine3.20 AS build ARG BUILD_VERSION diff --git a/Dockerfile.debian b/Dockerfile.debian index 8bf2698c786..5d47f167e99 100644 --- a/Dockerfile.debian +++ b/Dockerfile.debian @@ -1,5 +1,5 @@ # vim: set ft=dockerfile: -FROM golang:1.22-bookworm AS build +FROM golang:1.23-bookworm AS build ARG BUILD_VERSION diff --git a/azure-pipelines.yml b/azure-pipelines.yml index 6051ca67393..acbcabc20c5 100644 --- a/azure-pipelines.yml +++ b/azure-pipelines.yml @@ -21,7 +21,7 @@ stages: - task: GoTool@0 displayName: "Install Go" inputs: - version: '1.22' + version: '1.23' - pwsh: | choco install -y make diff --git a/go.mod b/go.mod index f28f21c6eb4..c889b62cb8c 100644 --- a/go.mod +++ b/go.mod @@ -1,6 +1,6 @@ module github.com/crowdsecurity/crowdsec -go 1.22 +go 1.23 // Don't use the toolchain directive to avoid uncontrolled downloads during // a build, especially in sandboxed environments (freebsd, gentoo...). From d00a6a687345f78add9c05e06bf7b75cbfb28007 Mon Sep 17 00:00:00 2001 From: Laurence Jones Date: Fri, 25 Oct 2024 10:31:08 +0100 Subject: [PATCH 328/581] enhance: Check if resp is nil in capi metrics and continue (#3299) --- pkg/apiserver/apic_metrics.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/pkg/apiserver/apic_metrics.go b/pkg/apiserver/apic_metrics.go index 3d9e7b28a79..aa8db3f1c85 100644 --- a/pkg/apiserver/apic_metrics.go +++ b/pkg/apiserver/apic_metrics.go @@ -368,9 +368,10 @@ func (a *apic) SendUsageMetrics(ctx context.Context) { if err != nil { log.Errorf("unable to send usage metrics: %s", err) - if resp.Response.StatusCode >= http.StatusBadRequest && resp.Response.StatusCode != http.StatusUnprocessableEntity { + if resp == nil || resp.Response.StatusCode >= http.StatusBadRequest && resp.Response.StatusCode != http.StatusUnprocessableEntity { // In case of 422, mark the metrics as sent anyway, the API did not like what we sent, // and it's unlikely we'll be able to fix it + // also if resp is nil, we should'nt mark the metrics as sent could be network issue continue } } From 9d6ccb0f08b3afd067c9a858e6ff04212f03b7f1 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Fri, 25 Oct 2024 15:43:03 +0200 Subject: [PATCH 329/581] context propagation: appsec, docker, kafka, k8s datasources (#3284) --- .github/codecov.yml | 164 ++++++++++++++++++ .gitignore | 3 - pkg/acquisition/modules/appsec/appsec.go | 2 +- pkg/acquisition/modules/docker/docker.go | 47 ++--- pkg/acquisition/modules/kafka/kafka.go | 12 +- .../modules/kubernetesaudit/k8s_audit.go | 2 +- 6 files changed, 196 insertions(+), 34 deletions(-) create mode 100644 .github/codecov.yml diff --git a/.github/codecov.yml b/.github/codecov.yml new file mode 100644 index 00000000000..e3a81070324 --- /dev/null +++ b/.github/codecov.yml @@ -0,0 +1,164 @@ +# we measure coverage but don't enforce it +# https://docs.codecov.com/docs/codecov-yaml +codecov: + require_ci_to_pass: false + +coverage: + status: + patch: + default: + target: 0% + project: + default: + target: 0% + +# if a directory is ignored, there is no way to un-ignore files like pkg/models/helpers.go +# so we make a full list +ignore: + - "./pkg/modelscapi/success_response.go" + - "./pkg/modelscapi/get_decisions_stream_response_deleted.go" + - "./pkg/modelscapi/login_request.go" + - "./pkg/modelscapi/get_decisions_stream_response_links.go" + - "./pkg/modelscapi/login_response.go" + - "./pkg/modelscapi/add_signals_request_item.go" + - "./pkg/modelscapi/blocklist_link.go" + - "./pkg/modelscapi/get_decisions_stream_response_deleted_item.go" + - "./pkg/modelscapi/decisions_sync_request.go" + - "./pkg/modelscapi/get_decisions_stream_response.go" + - "./pkg/modelscapi/metrics_request_machines_item.go" + - "./pkg/modelscapi/metrics_request.go" + - "./pkg/modelscapi/get_decisions_stream_response_new.go" + - "./pkg/modelscapi/add_signals_request_item_decisions_item.go" + - "./pkg/modelscapi/metrics_request_bouncers_item.go" + - "./pkg/modelscapi/decisions_sync_request_item_decisions_item.go" + - "./pkg/modelscapi/decisions_delete_request_item.go" + - "./pkg/modelscapi/get_decisions_stream_response_new_item.go" + - "./pkg/modelscapi/decisions_sync_request_item.go" + - "./pkg/modelscapi/add_signals_request.go" + - "./pkg/modelscapi/reset_password_request.go" + - "./pkg/modelscapi/add_signals_request_item_decisions.go" + - "./pkg/modelscapi/decisions_sync_request_item_source.go" + - "./pkg/modelscapi/error_response.go" + - "./pkg/modelscapi/decisions_delete_request.go" + - "./pkg/modelscapi/decisions_sync_request_item_decisions.go" + - "./pkg/modelscapi/enroll_request.go" + - "./pkg/modelscapi/register_request.go" + - "./pkg/modelscapi/add_signals_request_item_source.go" + - "./pkg/models/success_response.go" + - "./pkg/models/hub_items.go" + - "./pkg/models/alert.go" + - "./pkg/models/metrics_bouncer_info.go" + - "./pkg/models/add_signals_request_item.go" + - "./pkg/models/metrics_meta.go" + - "./pkg/models/metrics_detail_item.go" + - "./pkg/models/add_signals_request_item_decisions_item.go" + - "./pkg/models/hub_item.go" + - "./pkg/models/get_alerts_response.go" + - "./pkg/models/metrics_labels.go" + - "./pkg/models/watcher_auth_request.go" + - "./pkg/models/add_alerts_request.go" + - "./pkg/models/event.go" + - "./pkg/models/decisions_delete_request_item.go" + - "./pkg/models/meta.go" + - "./pkg/models/detailed_metrics.go" + - "./pkg/models/delete_alerts_response.go" + - "./pkg/models/remediation_components_metrics.go" + - "./pkg/models/console_options.go" + - "./pkg/models/topx_response.go" + - "./pkg/models/add_signals_request.go" + - "./pkg/models/delete_decision_response.go" + - "./pkg/models/get_decisions_response.go" + - "./pkg/models/add_signals_request_item_decisions.go" + - "./pkg/models/source.go" + - "./pkg/models/decisions_stream_response.go" + - "./pkg/models/error_response.go" + - "./pkg/models/all_metrics.go" + - "./pkg/models/o_sversion.go" + - "./pkg/models/decision.go" + - "./pkg/models/decisions_delete_request.go" + - "./pkg/models/flush_decision_response.go" + - "./pkg/models/watcher_auth_response.go" + - "./pkg/models/lapi_metrics.go" + - "./pkg/models/watcher_registration_request.go" + - "./pkg/models/metrics_agent_info.go" + - "./pkg/models/log_processors_metrics.go" + - "./pkg/models/add_signals_request_item_source.go" + - "./pkg/models/base_metrics.go" + - "./pkg/models/add_alerts_response.go" + - "./pkg/models/metrics.go" + - "./pkg/protobufs/notifier.pb.go" + - "./pkg/protobufs/notifier_grpc.pb.go" + - "./pkg/database/ent/metric_update.go" + - "./pkg/database/ent/machine_delete.go" + - "./pkg/database/ent/decision_query.go" + - "./pkg/database/ent/meta_query.go" + - "./pkg/database/ent/metric/where.go" + - "./pkg/database/ent/metric/metric.go" + - "./pkg/database/ent/machine_create.go" + - "./pkg/database/ent/alert.go" + - "./pkg/database/ent/event_update.go" + - "./pkg/database/ent/alert_create.go" + - "./pkg/database/ent/alert_query.go" + - "./pkg/database/ent/metric_delete.go" + - "./pkg/database/ent/lock_create.go" + - "./pkg/database/ent/bouncer_update.go" + - "./pkg/database/ent/meta_update.go" + - "./pkg/database/ent/decision_create.go" + - "./pkg/database/ent/configitem_update.go" + - "./pkg/database/ent/machine_query.go" + - "./pkg/database/ent/client.go" + - "./pkg/database/ent/predicate/predicate.go" + - "./pkg/database/ent/lock/where.go" + - "./pkg/database/ent/lock/lock.go" + - "./pkg/database/ent/mutation.go" + - "./pkg/database/ent/migrate/migrate.go" + - "./pkg/database/ent/migrate/schema.go" + - "./pkg/database/ent/configitem.go" + - "./pkg/database/ent/metric_query.go" + - "./pkg/database/ent/event.go" + - "./pkg/database/ent/event_query.go" + - "./pkg/database/ent/lock_update.go" + - "./pkg/database/ent/meta.go" + - "./pkg/database/ent/configitem_query.go" + - "./pkg/database/ent/bouncer.go" + - "./pkg/database/ent/alert_update.go" + - "./pkg/database/ent/meta/meta.go" + - "./pkg/database/ent/meta/where.go" + - "./pkg/database/ent/decision_update.go" + - "./pkg/database/ent/alert_delete.go" + - "./pkg/database/ent/lock.go" + - "./pkg/database/ent/runtime/runtime.go" + - "./pkg/database/ent/alert/alert.go" + - "./pkg/database/ent/alert/where.go" + - "./pkg/database/ent/runtime.go" + - "./pkg/database/ent/bouncer/bouncer.go" + - "./pkg/database/ent/bouncer/where.go" + - "./pkg/database/ent/hook/hook.go" + - "./pkg/database/ent/metric.go" + - "./pkg/database/ent/configitem_create.go" + - "./pkg/database/ent/configitem_delete.go" + - "./pkg/database/ent/tx.go" + - "./pkg/database/ent/decision.go" + - "./pkg/database/ent/lock_delete.go" + - "./pkg/database/ent/decision_delete.go" + - "./pkg/database/ent/machine/where.go" + - "./pkg/database/ent/machine/machine.go" + - "./pkg/database/ent/event_create.go" + - "./pkg/database/ent/metric_create.go" + - "./pkg/database/ent/decision/where.go" + - "./pkg/database/ent/decision/decision.go" + - "./pkg/database/ent/enttest/enttest.go" + - "./pkg/database/ent/lock_query.go" + - "./pkg/database/ent/bouncer_create.go" + - "./pkg/database/ent/event_delete.go" + - "./pkg/database/ent/bouncer_delete.go" + - "./pkg/database/ent/event/event.go" + - "./pkg/database/ent/event/where.go" + - "./pkg/database/ent/machine.go" + - "./pkg/database/ent/ent.go" + - "./pkg/database/ent/meta_create.go" + - "./pkg/database/ent/bouncer_query.go" + - "./pkg/database/ent/meta_delete.go" + - "./pkg/database/ent/machine_update.go" + - "./pkg/database/ent/configitem/configitem.go" + - "./pkg/database/ent/configitem/where.go" diff --git a/.gitignore b/.gitignore index d76efcbfc48..6e6624fd282 100644 --- a/.gitignore +++ b/.gitignore @@ -60,6 +60,3 @@ msi __pycache__ *.py[cod] *.egg-info - -# automatically generated before running codecov -.github/codecov.yml diff --git a/pkg/acquisition/modules/appsec/appsec.go b/pkg/acquisition/modules/appsec/appsec.go index 5161b631c33..4ab980ee860 100644 --- a/pkg/acquisition/modules/appsec/appsec.go +++ b/pkg/acquisition/modules/appsec/appsec.go @@ -294,7 +294,7 @@ func (w *AppsecSource) StreamingAcquisition(ctx context.Context, out chan types. w.logger.Info("Shutting down Appsec server") // xx let's clean up the appsec runners :) appsec.AppsecRulesDetails = make(map[int]appsec.RulesDetails) - w.server.Shutdown(context.TODO()) + w.server.Shutdown(ctx) return nil }) return nil diff --git a/pkg/acquisition/modules/docker/docker.go b/pkg/acquisition/modules/docker/docker.go index 874b1556fd5..57ec7c7abda 100644 --- a/pkg/acquisition/modules/docker/docker.go +++ b/pkg/acquisition/modules/docker/docker.go @@ -287,8 +287,9 @@ func (d *DockerSource) SupportedModes() []string { // OneShotAcquisition reads a set of file and returns when done func (d *DockerSource) OneShotAcquisition(out chan types.Event, t *tomb.Tomb) error { + ctx := context.TODO() d.logger.Debug("In oneshot") - runningContainer, err := d.Client.ContainerList(context.Background(), dockerTypes.ContainerListOptions{}) + runningContainer, err := d.Client.ContainerList(ctx, dockerTypes.ContainerListOptions{}) if err != nil { return err } @@ -298,10 +299,10 @@ func (d *DockerSource) OneShotAcquisition(out chan types.Event, t *tomb.Tomb) er d.logger.Debugf("container with id %s is already being read from", container.ID) continue } - if containerConfig := d.EvalContainer(container); containerConfig != nil { + if containerConfig := d.EvalContainer(ctx, container); containerConfig != nil { d.logger.Infof("reading logs from container %s", containerConfig.Name) d.logger.Debugf("logs options: %+v", *d.containerLogsOptions) - dockerReader, err := d.Client.ContainerLogs(context.Background(), containerConfig.ID, *d.containerLogsOptions) + dockerReader, err := d.Client.ContainerLogs(ctx, containerConfig.ID, *d.containerLogsOptions) if err != nil { d.logger.Errorf("unable to read logs from container: %+v", err) return err @@ -372,26 +373,26 @@ func (d *DockerSource) CanRun() error { return nil } -func (d *DockerSource) getContainerTTY(containerId string) bool { - containerDetails, err := d.Client.ContainerInspect(context.Background(), containerId) +func (d *DockerSource) getContainerTTY(ctx context.Context, containerId string) bool { + containerDetails, err := d.Client.ContainerInspect(ctx, containerId) if err != nil { return false } return containerDetails.Config.Tty } -func (d *DockerSource) getContainerLabels(containerId string) map[string]interface{} { - containerDetails, err := d.Client.ContainerInspect(context.Background(), containerId) +func (d *DockerSource) getContainerLabels(ctx context.Context, containerId string) map[string]interface{} { + containerDetails, err := d.Client.ContainerInspect(ctx, containerId) if err != nil { return map[string]interface{}{} } return parseLabels(containerDetails.Config.Labels) } -func (d *DockerSource) EvalContainer(container dockerTypes.Container) *ContainerConfig { +func (d *DockerSource) EvalContainer(ctx context.Context, container dockerTypes.Container) *ContainerConfig { for _, containerID := range d.Config.ContainerID { if containerID == container.ID { - return &ContainerConfig{ID: container.ID, Name: container.Names[0], Labels: d.Config.Labels, Tty: d.getContainerTTY(container.ID)} + return &ContainerConfig{ID: container.ID, Name: container.Names[0], Labels: d.Config.Labels, Tty: d.getContainerTTY(ctx, container.ID)} } } @@ -401,27 +402,27 @@ func (d *DockerSource) EvalContainer(container dockerTypes.Container) *Container name = name[1:] } if name == containerName { - return &ContainerConfig{ID: container.ID, Name: name, Labels: d.Config.Labels, Tty: d.getContainerTTY(container.ID)} + return &ContainerConfig{ID: container.ID, Name: name, Labels: d.Config.Labels, Tty: d.getContainerTTY(ctx, container.ID)} } } } for _, cont := range d.compiledContainerID { if matched := cont.MatchString(container.ID); matched { - return &ContainerConfig{ID: container.ID, Name: container.Names[0], Labels: d.Config.Labels, Tty: d.getContainerTTY(container.ID)} + return &ContainerConfig{ID: container.ID, Name: container.Names[0], Labels: d.Config.Labels, Tty: d.getContainerTTY(ctx, container.ID)} } } for _, cont := range d.compiledContainerName { for _, name := range container.Names { if matched := cont.MatchString(name); matched { - return &ContainerConfig{ID: container.ID, Name: name, Labels: d.Config.Labels, Tty: d.getContainerTTY(container.ID)} + return &ContainerConfig{ID: container.ID, Name: name, Labels: d.Config.Labels, Tty: d.getContainerTTY(ctx, container.ID)} } } } if d.Config.UseContainerLabels { - parsedLabels := d.getContainerLabels(container.ID) + parsedLabels := d.getContainerLabels(ctx, container.ID) if len(parsedLabels) == 0 { d.logger.Tracef("container has no 'crowdsec' labels set, ignoring container: %s", container.ID) return nil @@ -458,13 +459,13 @@ func (d *DockerSource) EvalContainer(container dockerTypes.Container) *Container } d.logger.Errorf("label %s is not a string", k) } - return &ContainerConfig{ID: container.ID, Name: container.Names[0], Labels: labels, Tty: d.getContainerTTY(container.ID)} + return &ContainerConfig{ID: container.ID, Name: container.Names[0], Labels: labels, Tty: d.getContainerTTY(ctx, container.ID)} } return nil } -func (d *DockerSource) WatchContainer(monitChan chan *ContainerConfig, deleteChan chan *ContainerConfig) error { +func (d *DockerSource) WatchContainer(ctx context.Context, monitChan chan *ContainerConfig, deleteChan chan *ContainerConfig) error { ticker := time.NewTicker(d.CheckIntervalDuration) d.logger.Infof("Container watcher started, interval: %s", d.CheckIntervalDuration.String()) for { @@ -475,7 +476,7 @@ func (d *DockerSource) WatchContainer(monitChan chan *ContainerConfig, deleteCha case <-ticker.C: // to track for garbage collection runningContainersID := make(map[string]bool) - runningContainer, err := d.Client.ContainerList(context.Background(), dockerTypes.ContainerListOptions{}) + runningContainer, err := d.Client.ContainerList(ctx, dockerTypes.ContainerListOptions{}) if err != nil { if strings.Contains(strings.ToLower(err.Error()), "cannot connect to the docker daemon at") { for idx, container := range d.runningContainerState { @@ -501,7 +502,7 @@ func (d *DockerSource) WatchContainer(monitChan chan *ContainerConfig, deleteCha if _, ok := d.runningContainerState[container.ID]; ok { continue } - if containerConfig := d.EvalContainer(container); containerConfig != nil { + if containerConfig := d.EvalContainer(ctx, container); containerConfig != nil { monitChan <- containerConfig } } @@ -524,10 +525,10 @@ func (d *DockerSource) StreamingAcquisition(ctx context.Context, out chan types. deleteChan := make(chan *ContainerConfig) d.logger.Infof("Starting docker acquisition") t.Go(func() error { - return d.DockerManager(monitChan, deleteChan, out) + return d.DockerManager(ctx, monitChan, deleteChan, out) }) - return d.WatchContainer(monitChan, deleteChan) + return d.WatchContainer(ctx, monitChan, deleteChan) } func (d *DockerSource) Dump() interface{} { @@ -541,9 +542,9 @@ func ReadTailScanner(scanner *bufio.Scanner, out chan string, t *tomb.Tomb) erro return scanner.Err() } -func (d *DockerSource) TailDocker(container *ContainerConfig, outChan chan types.Event, deleteChan chan *ContainerConfig) error { +func (d *DockerSource) TailDocker(ctx context.Context, container *ContainerConfig, outChan chan types.Event, deleteChan chan *ContainerConfig) error { container.logger.Infof("start tail for container %s", container.Name) - dockerReader, err := d.Client.ContainerLogs(context.Background(), container.ID, *d.containerLogsOptions) + dockerReader, err := d.Client.ContainerLogs(ctx, container.ID, *d.containerLogsOptions) if err != nil { container.logger.Errorf("unable to read logs from container: %+v", err) return err @@ -601,7 +602,7 @@ func (d *DockerSource) TailDocker(container *ContainerConfig, outChan chan types } } -func (d *DockerSource) DockerManager(in chan *ContainerConfig, deleteChan chan *ContainerConfig, outChan chan types.Event) error { +func (d *DockerSource) DockerManager(ctx context.Context, in chan *ContainerConfig, deleteChan chan *ContainerConfig, outChan chan types.Event) error { d.logger.Info("DockerSource Manager started") for { select { @@ -610,7 +611,7 @@ func (d *DockerSource) DockerManager(in chan *ContainerConfig, deleteChan chan * newContainer.t = &tomb.Tomb{} newContainer.logger = d.logger.WithField("container_name", newContainer.Name) newContainer.t.Go(func() error { - return d.TailDocker(newContainer, outChan, deleteChan) + return d.TailDocker(ctx, newContainer, outChan, deleteChan) }) d.runningContainerState[newContainer.ID] = newContainer } diff --git a/pkg/acquisition/modules/kafka/kafka.go b/pkg/acquisition/modules/kafka/kafka.go index 9fd5fc2a035..d08a0ae4e4d 100644 --- a/pkg/acquisition/modules/kafka/kafka.go +++ b/pkg/acquisition/modules/kafka/kafka.go @@ -147,12 +147,12 @@ func (k *KafkaSource) Dump() interface{} { return k } -func (k *KafkaSource) ReadMessage(out chan types.Event) error { +func (k *KafkaSource) ReadMessage(ctx context.Context, out chan types.Event) error { // Start processing from latest Offset - k.Reader.SetOffsetAt(context.Background(), time.Now()) + k.Reader.SetOffsetAt(ctx, time.Now()) for { k.logger.Tracef("reading message from topic '%s'", k.Config.Topic) - m, err := k.Reader.ReadMessage(context.Background()) + m, err := k.Reader.ReadMessage(ctx) if err != nil { if errors.Is(err, io.EOF) { return nil @@ -184,10 +184,10 @@ func (k *KafkaSource) ReadMessage(out chan types.Event) error { } } -func (k *KafkaSource) RunReader(out chan types.Event, t *tomb.Tomb) error { +func (k *KafkaSource) RunReader(ctx context.Context, out chan types.Event, t *tomb.Tomb) error { k.logger.Debugf("starting %s datasource reader goroutine with configuration %+v", dataSourceName, k.Config) t.Go(func() error { - return k.ReadMessage(out) + return k.ReadMessage(ctx, out) }) //nolint //fp for { @@ -207,7 +207,7 @@ func (k *KafkaSource) StreamingAcquisition(ctx context.Context, out chan types.E t.Go(func() error { defer trace.CatchPanic("crowdsec/acquis/kafka/live") - return k.RunReader(out, t) + return k.RunReader(ctx, out, t) }) return nil diff --git a/pkg/acquisition/modules/kubernetesaudit/k8s_audit.go b/pkg/acquisition/modules/kubernetesaudit/k8s_audit.go index f979b044dcc..0d64345a4a0 100644 --- a/pkg/acquisition/modules/kubernetesaudit/k8s_audit.go +++ b/pkg/acquisition/modules/kubernetesaudit/k8s_audit.go @@ -149,7 +149,7 @@ func (ka *KubernetesAuditSource) StreamingAcquisition(ctx context.Context, out c }) <-t.Dying() ka.logger.Infof("Stopping k8s-audit server on %s:%d%s", ka.config.ListenAddr, ka.config.ListenPort, ka.config.WebhookPath) - ka.server.Shutdown(context.TODO()) + ka.server.Shutdown(ctx) return nil }) return nil From 3ead7461ed54954191a98afa3525355d920ec7f2 Mon Sep 17 00:00:00 2001 From: Laurence Jones Date: Mon, 28 Oct 2024 10:20:36 +0000 Subject: [PATCH 330/581] enhance: Remove if log check in one instance that was not needed as the logged items are not resource intensive (#3300) --- pkg/apiclient/client_http.go | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/pkg/apiclient/client_http.go b/pkg/apiclient/client_http.go index 0240618f535..eeca929ea6e 100644 --- a/pkg/apiclient/client_http.go +++ b/pkg/apiclient/client_http.go @@ -61,9 +61,7 @@ func (c *ApiClient) Do(ctx context.Context, req *http.Request, v interface{}) (* req.Header.Add("User-Agent", c.UserAgent) } - if log.GetLevel() >= log.DebugLevel { - log.Debugf("[URL] %s %s", req.Method, req.URL) - } + log.Debugf("[URL] %s %s", req.Method, req.URL) resp, err := c.client.Do(req) if resp != nil && resp.Body != nil { From 53f9bc562d7fef323ff199271cbbc3b6886832e2 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Mon, 28 Oct 2024 15:38:50 +0100 Subject: [PATCH 331/581] context propagation: OneShotAcquisition(); enable contextcheck linter (#3285) * context propagation: OneShotAcquisition(); enable contextcheck linter --- .golangci.yml | 1 - pkg/acquisition/acquisition.go | 4 ++-- pkg/acquisition/acquisition_test.go | 18 ++++++++++------ pkg/acquisition/modules/appsec/appsec.go | 2 +- .../modules/cloudwatch/cloudwatch.go | 21 ++++++++----------- .../modules/cloudwatch/cloudwatch_test.go | 6 ++++-- pkg/acquisition/modules/docker/docker.go | 3 +-- pkg/acquisition/modules/docker/docker_test.go | 4 +++- pkg/acquisition/modules/file/file.go | 2 +- pkg/acquisition/modules/file/file_test.go | 4 +++- .../modules/journalctl/journalctl.go | 10 ++++----- .../modules/journalctl/journalctl_test.go | 4 +++- pkg/acquisition/modules/kafka/kafka.go | 2 +- pkg/acquisition/modules/kinesis/kinesis.go | 2 +- .../modules/kubernetesaudit/k8s_audit.go | 2 +- pkg/acquisition/modules/loki/loki.go | 6 +++--- pkg/acquisition/modules/loki/loki_test.go | 6 +++--- pkg/acquisition/modules/s3/s3.go | 4 ++-- pkg/acquisition/modules/s3/s3_test.go | 3 ++- pkg/acquisition/modules/syslog/syslog.go | 2 +- .../modules/wineventlog/wineventlog.go | 2 +- .../wineventlog/wineventlog_windows.go | 9 ++++---- .../wineventlog/wineventlog_windows_test.go | 4 ++-- 23 files changed, 65 insertions(+), 56 deletions(-) diff --git a/.golangci.yml b/.golangci.yml index 4909d3e60c0..271e3a57d34 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -254,7 +254,6 @@ linters: # - containedctx # containedctx is a linter that detects struct contained context.Context field - - contextcheck # check whether the function uses a non-inherited context - errname # Checks that sentinel errors are prefixed with the `Err` and error types are suffixed with the `Error`. - ireturn # Accept Interfaces, Return Concrete Types - mnd # An analyzer to detect magic numbers. diff --git a/pkg/acquisition/acquisition.go b/pkg/acquisition/acquisition.go index 4519ea7392b..1ad385105d3 100644 --- a/pkg/acquisition/acquisition.go +++ b/pkg/acquisition/acquisition.go @@ -47,7 +47,7 @@ type DataSource interface { ConfigureByDSN(string, map[string]string, *log.Entry, string) error // Configure the datasource GetMode() string // Get the mode (TAIL, CAT or SERVER) GetName() string // Get the name of the module - OneShotAcquisition(chan types.Event, *tomb.Tomb) error // Start one shot acquisition(eg, cat a file) + OneShotAcquisition(context.Context, chan types.Event, *tomb.Tomb) error // Start one shot acquisition(eg, cat a file) StreamingAcquisition(context.Context, chan types.Event, *tomb.Tomb) error // Start live acquisition (eg, tail a file) CanRun() error // Whether the datasource can run or not (eg, journalctl on BSD is a non-sense) GetUuid() string // Get the unique identifier of the datasource @@ -433,7 +433,7 @@ func StartAcquisition(ctx context.Context, sources []DataSource, output chan typ if subsrc.GetMode() == configuration.TAIL_MODE { err = subsrc.StreamingAcquisition(ctx, outChan, AcquisTomb) } else { - err = subsrc.OneShotAcquisition(outChan, AcquisTomb) + err = subsrc.OneShotAcquisition(ctx, outChan, AcquisTomb) } if err != nil { diff --git a/pkg/acquisition/acquisition_test.go b/pkg/acquisition/acquisition_test.go index e82b3df54c2..dd70172cf62 100644 --- a/pkg/acquisition/acquisition_test.go +++ b/pkg/acquisition/acquisition_test.go @@ -57,8 +57,11 @@ func (f *MockSource) Configure(cfg []byte, logger *log.Entry, metricsLevel int) return nil } -func (f *MockSource) GetMode() string { return f.Mode } -func (f *MockSource) OneShotAcquisition(chan types.Event, *tomb.Tomb) error { return nil } +func (f *MockSource) GetMode() string { return f.Mode } +func (f *MockSource) OneShotAcquisition(context.Context, chan types.Event, *tomb.Tomb) error { + return nil +} + func (f *MockSource) StreamingAcquisition(context.Context, chan types.Event, *tomb.Tomb) error { return nil } @@ -320,7 +323,7 @@ func (f *MockCat) Configure(cfg []byte, logger *log.Entry, metricsLevel int) err func (f *MockCat) UnmarshalConfig(cfg []byte) error { return nil } func (f *MockCat) GetName() string { return "mock_cat" } func (f *MockCat) GetMode() string { return "cat" } -func (f *MockCat) OneShotAcquisition(out chan types.Event, tomb *tomb.Tomb) error { +func (f *MockCat) OneShotAcquisition(ctx context.Context, out chan types.Event, tomb *tomb.Tomb) error { for range 10 { evt := types.Event{} evt.Line.Src = "test" @@ -365,7 +368,7 @@ func (f *MockTail) Configure(cfg []byte, logger *log.Entry, metricsLevel int) er func (f *MockTail) UnmarshalConfig(cfg []byte) error { return nil } func (f *MockTail) GetName() string { return "mock_tail" } func (f *MockTail) GetMode() string { return "tail" } -func (f *MockTail) OneShotAcquisition(out chan types.Event, tomb *tomb.Tomb) error { +func (f *MockTail) OneShotAcquisition(_ context.Context, _ chan types.Event, _ *tomb.Tomb) error { return errors.New("can't run in cat mode") } @@ -507,8 +510,11 @@ func (f *MockSourceByDSN) UnmarshalConfig(cfg []byte) error { return nil } func (f *MockSourceByDSN) Configure(cfg []byte, logger *log.Entry, metricsLevel int) error { return nil } -func (f *MockSourceByDSN) GetMode() string { return f.Mode } -func (f *MockSourceByDSN) OneShotAcquisition(chan types.Event, *tomb.Tomb) error { return nil } +func (f *MockSourceByDSN) GetMode() string { return f.Mode } +func (f *MockSourceByDSN) OneShotAcquisition(context.Context, chan types.Event, *tomb.Tomb) error { + return nil +} + func (f *MockSourceByDSN) StreamingAcquisition(context.Context, chan types.Event, *tomb.Tomb) error { return nil } diff --git a/pkg/acquisition/modules/appsec/appsec.go b/pkg/acquisition/modules/appsec/appsec.go index 4ab980ee860..a6dcffe89a2 100644 --- a/pkg/acquisition/modules/appsec/appsec.go +++ b/pkg/acquisition/modules/appsec/appsec.go @@ -237,7 +237,7 @@ func (w *AppsecSource) GetName() string { return "appsec" } -func (w *AppsecSource) OneShotAcquisition(out chan types.Event, t *tomb.Tomb) error { +func (w *AppsecSource) OneShotAcquisition(_ context.Context, _ chan types.Event, _ *tomb.Tomb) error { return errors.New("AppSec datasource does not support command line acquisition") } diff --git a/pkg/acquisition/modules/cloudwatch/cloudwatch.go b/pkg/acquisition/modules/cloudwatch/cloudwatch.go index e4b6c95d77f..2df70b3312b 100644 --- a/pkg/acquisition/modules/cloudwatch/cloudwatch.go +++ b/pkg/acquisition/modules/cloudwatch/cloudwatch.go @@ -259,10 +259,10 @@ func (cw *CloudwatchSource) StreamingAcquisition(ctx context.Context, out chan t monitChan := make(chan LogStreamTailConfig) t.Go(func() error { - return cw.LogStreamManager(monitChan, out) + return cw.LogStreamManager(ctx, monitChan, out) }) - return cw.WatchLogGroupForStreams(monitChan) + return cw.WatchLogGroupForStreams(ctx, monitChan) } func (cw *CloudwatchSource) GetMetrics() []prometheus.Collector { @@ -289,7 +289,7 @@ func (cw *CloudwatchSource) Dump() interface{} { return cw } -func (cw *CloudwatchSource) WatchLogGroupForStreams(out chan LogStreamTailConfig) error { +func (cw *CloudwatchSource) WatchLogGroupForStreams(ctx context.Context, out chan LogStreamTailConfig) error { cw.logger.Debugf("Starting to watch group (interval:%s)", cw.Config.PollNewStreamInterval) ticker := time.NewTicker(*cw.Config.PollNewStreamInterval) @@ -307,7 +307,6 @@ func (cw *CloudwatchSource) WatchLogGroupForStreams(out chan LogStreamTailConfig for hasMoreStreams { cw.logger.Tracef("doing the call to DescribeLogStreamsPagesWithContext") - ctx := context.Background() // there can be a lot of streams in a group, and we're only interested in those recently written to, so we sort by LastEventTime err := cw.cwClient.DescribeLogStreamsPagesWithContext( ctx, @@ -372,7 +371,7 @@ func (cw *CloudwatchSource) WatchLogGroupForStreams(out chan LogStreamTailConfig } // LogStreamManager receives the potential streams to monitor, and starts a go routine when needed -func (cw *CloudwatchSource) LogStreamManager(in chan LogStreamTailConfig, outChan chan types.Event) error { +func (cw *CloudwatchSource) LogStreamManager(ctx context.Context, in chan LogStreamTailConfig, outChan chan types.Event) error { cw.logger.Debugf("starting to monitor streams for %s", cw.Config.GroupName) pollDeadStreamInterval := time.NewTicker(def_PollDeadStreamInterval) @@ -422,7 +421,7 @@ func (cw *CloudwatchSource) LogStreamManager(in chan LogStreamTailConfig, outCha newStream.logger = cw.logger.WithField("stream", newStream.StreamName) cw.logger.Debugf("starting tail of stream %s", newStream.StreamName) newStream.t.Go(func() error { - return cw.TailLogStream(&newStream, outChan) + return cw.TailLogStream(ctx, &newStream, outChan) }) cw.monitoredStreams = append(cw.monitoredStreams, &newStream) } @@ -457,7 +456,7 @@ func (cw *CloudwatchSource) LogStreamManager(in chan LogStreamTailConfig, outCha } } -func (cw *CloudwatchSource) TailLogStream(cfg *LogStreamTailConfig, outChan chan types.Event) error { +func (cw *CloudwatchSource) TailLogStream(ctx context.Context, cfg *LogStreamTailConfig, outChan chan types.Event) error { var startFrom *string lastReadMessage := time.Now().UTC() ticker := time.NewTicker(cfg.PollStreamInterval) @@ -479,7 +478,6 @@ func (cw *CloudwatchSource) TailLogStream(cfg *LogStreamTailConfig, outChan chan for hasMorePages { /*for the first call, we only consume the last item*/ cfg.logger.Tracef("calling GetLogEventsPagesWithContext") - ctx := context.Background() err := cw.cwClient.GetLogEventsPagesWithContext(ctx, &cloudwatchlogs.GetLogEventsInput{ Limit: aws.Int64(cfg.GetLogEventsPagesLimit), @@ -633,7 +631,7 @@ func (cw *CloudwatchSource) ConfigureByDSN(dsn string, labels map[string]string, return nil } -func (cw *CloudwatchSource) OneShotAcquisition(out chan types.Event, t *tomb.Tomb) error { +func (cw *CloudwatchSource) OneShotAcquisition(ctx context.Context, out chan types.Event, t *tomb.Tomb) error { // StreamName string, Start time.Time, End time.Time config := LogStreamTailConfig{ GroupName: cw.Config.GroupName, @@ -648,10 +646,10 @@ func (cw *CloudwatchSource) OneShotAcquisition(out chan types.Event, t *tomb.Tom Labels: cw.Config.Labels, ExpectMode: types.TIMEMACHINE, } - return cw.CatLogStream(&config, out) + return cw.CatLogStream(ctx, &config, out) } -func (cw *CloudwatchSource) CatLogStream(cfg *LogStreamTailConfig, outChan chan types.Event) error { +func (cw *CloudwatchSource) CatLogStream(ctx context.Context, cfg *LogStreamTailConfig, outChan chan types.Event) error { var startFrom *string head := true /*convert the times*/ @@ -667,7 +665,6 @@ func (cw *CloudwatchSource) CatLogStream(cfg *LogStreamTailConfig, outChan chan if startFrom != nil { cfg.logger.Tracef("next_token: %s", *startFrom) } - ctx := context.Background() err := cw.cwClient.GetLogEventsPagesWithContext(ctx, &cloudwatchlogs.GetLogEventsInput{ Limit: aws.Int64(10), diff --git a/pkg/acquisition/modules/cloudwatch/cloudwatch_test.go b/pkg/acquisition/modules/cloudwatch/cloudwatch_test.go index d62c3f6e3dd..3d638896537 100644 --- a/pkg/acquisition/modules/cloudwatch/cloudwatch_test.go +++ b/pkg/acquisition/modules/cloudwatch/cloudwatch_test.go @@ -579,7 +579,7 @@ stream_name: test_stream`), case "tail": err = cw.StreamingAcquisition(ctx, out, &tmb) case "cat": - err = cw.OneShotAcquisition(out, &tmb) + err = cw.OneShotAcquisition(ctx, out, &tmb) } cstest.RequireErrorContains(t, err, tc.expectedStartErr) @@ -637,6 +637,8 @@ func TestConfigureByDSN(t *testing.T) { } func TestOneShotAcquisition(t *testing.T) { + ctx := context.Background() + if runtime.GOOS == "windows" { t.Skip("Skipping test on windows") } @@ -768,7 +770,7 @@ func TestOneShotAcquisition(t *testing.T) { var rcvdEvts []types.Event dbgLogger.Infof("running StreamingAcquisition") - err = cw.OneShotAcquisition(out, &tmb) + err = cw.OneShotAcquisition(ctx, out, &tmb) dbgLogger.Infof("acquis done") cstest.RequireErrorContains(t, err, tc.expectedStartErr) close(out) diff --git a/pkg/acquisition/modules/docker/docker.go b/pkg/acquisition/modules/docker/docker.go index 57ec7c7abda..2f79d4dcee6 100644 --- a/pkg/acquisition/modules/docker/docker.go +++ b/pkg/acquisition/modules/docker/docker.go @@ -286,8 +286,7 @@ func (d *DockerSource) SupportedModes() []string { } // OneShotAcquisition reads a set of file and returns when done -func (d *DockerSource) OneShotAcquisition(out chan types.Event, t *tomb.Tomb) error { - ctx := context.TODO() +func (d *DockerSource) OneShotAcquisition(ctx context.Context, out chan types.Event, t *tomb.Tomb) error { d.logger.Debug("In oneshot") runningContainer, err := d.Client.ContainerList(ctx, dockerTypes.ContainerListOptions{}) if err != nil { diff --git a/pkg/acquisition/modules/docker/docker_test.go b/pkg/acquisition/modules/docker/docker_test.go index e394c9cbe79..5d8208637e8 100644 --- a/pkg/acquisition/modules/docker/docker_test.go +++ b/pkg/acquisition/modules/docker/docker_test.go @@ -267,6 +267,8 @@ func (cli *mockDockerCli) ContainerInspect(ctx context.Context, c string) (docke } func TestOneShot(t *testing.T) { + ctx := context.Background() + log.Infof("Test 'TestOneShot'") tests := []struct { @@ -321,7 +323,7 @@ func TestOneShot(t *testing.T) { dockerClient.Client = new(mockDockerCli) out := make(chan types.Event, 100) tomb := tomb.Tomb{} - err := dockerClient.OneShotAcquisition(out, &tomb) + err := dockerClient.OneShotAcquisition(ctx, out, &tomb) cstest.AssertErrorContains(t, err, ts.expectedErr) // else we do the check before actualLines is incremented ... diff --git a/pkg/acquisition/modules/file/file.go b/pkg/acquisition/modules/file/file.go index 2d2df3ff4d4..f752d04aada 100644 --- a/pkg/acquisition/modules/file/file.go +++ b/pkg/acquisition/modules/file/file.go @@ -280,7 +280,7 @@ func (f *FileSource) SupportedModes() []string { } // OneShotAcquisition reads a set of file and returns when done -func (f *FileSource) OneShotAcquisition(out chan types.Event, t *tomb.Tomb) error { +func (f *FileSource) OneShotAcquisition(ctx context.Context, out chan types.Event, t *tomb.Tomb) error { f.logger.Debug("In oneshot") for _, file := range f.files { diff --git a/pkg/acquisition/modules/file/file_test.go b/pkg/acquisition/modules/file/file_test.go index 3db0042ba2f..a26e44cc9c7 100644 --- a/pkg/acquisition/modules/file/file_test.go +++ b/pkg/acquisition/modules/file/file_test.go @@ -101,6 +101,8 @@ func TestConfigureDSN(t *testing.T) { } func TestOneShot(t *testing.T) { + ctx := context.Background() + permDeniedFile := "/etc/shadow" permDeniedError := "failed opening /etc/shadow: open /etc/shadow: permission denied" @@ -224,7 +226,7 @@ filename: test_files/test_delete.log`, if tc.afterConfigure != nil { tc.afterConfigure() } - err = f.OneShotAcquisition(out, &tomb) + err = f.OneShotAcquisition(ctx, out, &tomb) actualLines := len(out) cstest.RequireErrorContains(t, err, tc.expectedErr) diff --git a/pkg/acquisition/modules/journalctl/journalctl.go b/pkg/acquisition/modules/journalctl/journalctl.go index b9cda54a472..e7a35d5a3ba 100644 --- a/pkg/acquisition/modules/journalctl/journalctl.go +++ b/pkg/acquisition/modules/journalctl/journalctl.go @@ -65,8 +65,8 @@ func readLine(scanner *bufio.Scanner, out chan string, errChan chan error) error return nil } -func (j *JournalCtlSource) runJournalCtl(out chan types.Event, t *tomb.Tomb) error { - ctx, cancel := context.WithCancel(context.Background()) +func (j *JournalCtlSource) runJournalCtl(ctx context.Context, out chan types.Event, t *tomb.Tomb) error { + ctx, cancel := context.WithCancel(ctx) cmd := exec.CommandContext(ctx, journalctlCmd, j.args...) stdout, err := cmd.StdoutPipe() @@ -262,9 +262,9 @@ func (j *JournalCtlSource) GetName() string { return "journalctl" } -func (j *JournalCtlSource) OneShotAcquisition(out chan types.Event, t *tomb.Tomb) error { +func (j *JournalCtlSource) OneShotAcquisition(ctx context.Context, out chan types.Event, t *tomb.Tomb) error { defer trace.CatchPanic("crowdsec/acquis/journalctl/oneshot") - err := j.runJournalCtl(out, t) + err := j.runJournalCtl(ctx, out, t) j.logger.Debug("Oneshot journalctl acquisition is done") return err } @@ -272,7 +272,7 @@ func (j *JournalCtlSource) OneShotAcquisition(out chan types.Event, t *tomb.Tomb func (j *JournalCtlSource) StreamingAcquisition(ctx context.Context, out chan types.Event, t *tomb.Tomb) error { t.Go(func() error { defer trace.CatchPanic("crowdsec/acquis/journalctl/streaming") - return j.runJournalCtl(out, t) + return j.runJournalCtl(ctx, out, t) }) return nil } diff --git a/pkg/acquisition/modules/journalctl/journalctl_test.go b/pkg/acquisition/modules/journalctl/journalctl_test.go index c416bb5d23e..687067c1881 100644 --- a/pkg/acquisition/modules/journalctl/journalctl_test.go +++ b/pkg/acquisition/modules/journalctl/journalctl_test.go @@ -107,6 +107,8 @@ func TestConfigureDSN(t *testing.T) { } func TestOneShot(t *testing.T) { + ctx := context.Background() + if runtime.GOOS == "windows" { t.Skip("Skipping test on windows") } @@ -165,7 +167,7 @@ journalctl_filter: t.Fatalf("Unexpected error : %s", err) } - err = j.OneShotAcquisition(out, &tomb) + err = j.OneShotAcquisition(ctx, out, &tomb) cstest.AssertErrorContains(t, err, ts.expectedErr) if err != nil { diff --git a/pkg/acquisition/modules/kafka/kafka.go b/pkg/acquisition/modules/kafka/kafka.go index d08a0ae4e4d..a9a5e13e958 100644 --- a/pkg/acquisition/modules/kafka/kafka.go +++ b/pkg/acquisition/modules/kafka/kafka.go @@ -127,7 +127,7 @@ func (k *KafkaSource) GetName() string { return dataSourceName } -func (k *KafkaSource) OneShotAcquisition(out chan types.Event, t *tomb.Tomb) error { +func (k *KafkaSource) OneShotAcquisition(_ context.Context, _ chan types.Event, _ *tomb.Tomb) error { return fmt.Errorf("%s datasource does not support one-shot acquisition", dataSourceName) } diff --git a/pkg/acquisition/modules/kinesis/kinesis.go b/pkg/acquisition/modules/kinesis/kinesis.go index ca3a847dbfb..3cfc224aa25 100644 --- a/pkg/acquisition/modules/kinesis/kinesis.go +++ b/pkg/acquisition/modules/kinesis/kinesis.go @@ -182,7 +182,7 @@ func (k *KinesisSource) GetName() string { return "kinesis" } -func (k *KinesisSource) OneShotAcquisition(out chan types.Event, t *tomb.Tomb) error { +func (k *KinesisSource) OneShotAcquisition(_ context.Context, _ chan types.Event, _ *tomb.Tomb) error { return errors.New("kinesis datasource does not support one-shot acquisition") } diff --git a/pkg/acquisition/modules/kubernetesaudit/k8s_audit.go b/pkg/acquisition/modules/kubernetesaudit/k8s_audit.go index 0d64345a4a0..30fc5c467ea 100644 --- a/pkg/acquisition/modules/kubernetesaudit/k8s_audit.go +++ b/pkg/acquisition/modules/kubernetesaudit/k8s_audit.go @@ -131,7 +131,7 @@ func (ka *KubernetesAuditSource) GetName() string { return "k8s-audit" } -func (ka *KubernetesAuditSource) OneShotAcquisition(out chan types.Event, t *tomb.Tomb) error { +func (ka *KubernetesAuditSource) OneShotAcquisition(_ context.Context, _ chan types.Event, _ *tomb.Tomb) error { return errors.New("k8s-audit datasource does not support one-shot acquisition") } diff --git a/pkg/acquisition/modules/loki/loki.go b/pkg/acquisition/modules/loki/loki.go index f867feeb84b..e39c76af22c 100644 --- a/pkg/acquisition/modules/loki/loki.go +++ b/pkg/acquisition/modules/loki/loki.go @@ -261,17 +261,17 @@ func (l *LokiSource) GetName() string { } // OneShotAcquisition reads a set of file and returns when done -func (l *LokiSource) OneShotAcquisition(out chan types.Event, t *tomb.Tomb) error { +func (l *LokiSource) OneShotAcquisition(ctx context.Context, out chan types.Event, t *tomb.Tomb) error { l.logger.Debug("Loki one shot acquisition") l.Client.SetTomb(t) - readyCtx, cancel := context.WithTimeout(context.Background(), l.Config.WaitForReady) + readyCtx, cancel := context.WithTimeout(ctx, l.Config.WaitForReady) defer cancel() err := l.Client.Ready(readyCtx) if err != nil { return fmt.Errorf("loki is not ready: %w", err) } - ctx, cancel := context.WithCancel(context.Background()) + ctx, cancel = context.WithCancel(ctx) c := l.Client.QueryRange(ctx, false) for { diff --git a/pkg/acquisition/modules/loki/loki_test.go b/pkg/acquisition/modules/loki/loki_test.go index 627200217f5..cacdda32d80 100644 --- a/pkg/acquisition/modules/loki/loki_test.go +++ b/pkg/acquisition/modules/loki/loki_test.go @@ -312,6 +312,8 @@ func feedLoki(ctx context.Context, logger *log.Entry, n int, title string) error } func TestOneShotAcquisition(t *testing.T) { + ctx := context.Background() + if runtime.GOOS == "windows" { t.Skip("Skipping test on windows") } @@ -346,8 +348,6 @@ since: 1h t.Fatalf("Unexpected error : %s", err) } - ctx := context.Background() - err = feedLoki(ctx, subLogger, 20, title) if err != nil { t.Fatalf("Unexpected error : %s", err) @@ -366,7 +366,7 @@ since: 1h lokiTomb := tomb.Tomb{} - err = lokiSource.OneShotAcquisition(out, &lokiTomb) + err = lokiSource.OneShotAcquisition(ctx, out, &lokiTomb) if err != nil { t.Fatalf("Unexpected error : %s", err) } diff --git a/pkg/acquisition/modules/s3/s3.go b/pkg/acquisition/modules/s3/s3.go index ed1964edebf..acd78ceba8f 100644 --- a/pkg/acquisition/modules/s3/s3.go +++ b/pkg/acquisition/modules/s3/s3.go @@ -643,10 +643,10 @@ func (s *S3Source) GetName() string { return "s3" } -func (s *S3Source) OneShotAcquisition(out chan types.Event, t *tomb.Tomb) error { +func (s *S3Source) OneShotAcquisition(ctx context.Context, out chan types.Event, t *tomb.Tomb) error { s.logger.Infof("starting acquisition of %s/%s/%s", s.Config.BucketName, s.Config.Prefix, s.Config.Key) s.out = out - s.ctx, s.cancel = context.WithCancel(context.Background()) + s.ctx, s.cancel = context.WithCancel(ctx) s.Config.UseTimeMachine = true s.t = t if s.Config.Key != "" { diff --git a/pkg/acquisition/modules/s3/s3_test.go b/pkg/acquisition/modules/s3/s3_test.go index 05a974517a0..367048aa33a 100644 --- a/pkg/acquisition/modules/s3/s3_test.go +++ b/pkg/acquisition/modules/s3/s3_test.go @@ -208,6 +208,7 @@ func (msqs mockSQSClientNotif) DeleteMessage(input *sqs.DeleteMessageInput) (*sq } func TestDSNAcquis(t *testing.T) { + ctx := context.Background() tests := []struct { name string dsn string @@ -260,7 +261,7 @@ func TestDSNAcquis(t *testing.T) { f.s3Client = mockS3Client{} tmb := tomb.Tomb{} - err = f.OneShotAcquisition(out, &tmb) + err = f.OneShotAcquisition(ctx, out, &tmb) if err != nil { t.Fatalf("unexpected error: %s", err.Error()) } diff --git a/pkg/acquisition/modules/syslog/syslog.go b/pkg/acquisition/modules/syslog/syslog.go index 5315096fb9b..33a2f1542db 100644 --- a/pkg/acquisition/modules/syslog/syslog.go +++ b/pkg/acquisition/modules/syslog/syslog.go @@ -84,7 +84,7 @@ func (s *SyslogSource) ConfigureByDSN(dsn string, labels map[string]string, logg return errors.New("syslog datasource does not support one shot acquisition") } -func (s *SyslogSource) OneShotAcquisition(out chan types.Event, t *tomb.Tomb) error { +func (s *SyslogSource) OneShotAcquisition(_ context.Context, _ chan types.Event, _ *tomb.Tomb) error { return errors.New("syslog datasource does not support one shot acquisition") } diff --git a/pkg/acquisition/modules/wineventlog/wineventlog.go b/pkg/acquisition/modules/wineventlog/wineventlog.go index 6d522d8d8cb..3023a371576 100644 --- a/pkg/acquisition/modules/wineventlog/wineventlog.go +++ b/pkg/acquisition/modules/wineventlog/wineventlog.go @@ -40,7 +40,7 @@ func (w *WinEventLogSource) SupportedModes() []string { return []string{configuration.TAIL_MODE, configuration.CAT_MODE} } -func (w *WinEventLogSource) OneShotAcquisition(out chan types.Event, t *tomb.Tomb) error { +func (w *WinEventLogSource) OneShotAcquisition(_ context.Context, _ chan types.Event, _ *tomb.Tomb) error { return nil } diff --git a/pkg/acquisition/modules/wineventlog/wineventlog_windows.go b/pkg/acquisition/modules/wineventlog/wineventlog_windows.go index ca40363155b..887be8b7dd3 100644 --- a/pkg/acquisition/modules/wineventlog/wineventlog_windows.go +++ b/pkg/acquisition/modules/wineventlog/wineventlog_windows.go @@ -83,7 +83,7 @@ func logLevelToInt(logLevel string) ([]string, error) { // This is lifted from winops/winlog, but we only want to render the basic XML string, we don't need the extra fluff func (w *WinEventLogSource) getXMLEvents(config *winlog.SubscribeConfig, publisherCache map[string]windows.Handle, resultSet windows.Handle, maxEvents int) ([]string, error) { - var events = make([]windows.Handle, maxEvents) + events := make([]windows.Handle, maxEvents) var returned uint32 // Get handles to events from the result set. @@ -362,7 +362,7 @@ func (w *WinEventLogSource) ConfigureByDSN(dsn string, labels map[string]string, var err error - //FIXME: handle custom xpath query + // FIXME: handle custom xpath query w.query, err = w.buildXpathQuery() if err != nil { @@ -388,10 +388,8 @@ func (w *WinEventLogSource) SupportedModes() []string { return []string{configuration.TAIL_MODE, configuration.CAT_MODE} } -func (w *WinEventLogSource) OneShotAcquisition(out chan types.Event, t *tomb.Tomb) error { - +func (w *WinEventLogSource) OneShotAcquisition(ctx context.Context, out chan types.Event, t *tomb.Tomb) error { handle, err := wevtapi.EvtQuery(localMachine, w.evtConfig.ChannelPath, w.evtConfig.Query, w.evtConfig.Flags) - if err != nil { return fmt.Errorf("EvtQuery failed: %v", err) } @@ -436,6 +434,7 @@ OUTER_LOOP: } } } + return nil } diff --git a/pkg/acquisition/modules/wineventlog/wineventlog_windows_test.go b/pkg/acquisition/modules/wineventlog/wineventlog_windows_test.go index 9afef963669..2f6fe15450f 100644 --- a/pkg/acquisition/modules/wineventlog/wineventlog_windows_test.go +++ b/pkg/acquisition/modules/wineventlog/wineventlog_windows_test.go @@ -177,7 +177,6 @@ event_ids: subLogger := log.WithField("type", "windowseventlog") evthandler, err := eventlog.Open("Application") - if err != nil { t.Fatalf("failed to open event log: %s", err) } @@ -224,6 +223,7 @@ event_ids: } func TestOneShotAcquisition(t *testing.T) { + ctx := context.Background() tests := []struct { name string dsn string @@ -289,7 +289,7 @@ func TestOneShotAcquisition(t *testing.T) { } }() - err = f.OneShotAcquisition(c, to) + err = f.OneShotAcquisition(ctx, c, to) if test.expectedErr != "" { assert.Contains(t, err.Error(), test.expectedErr) } else { From 96d4da76b944b0883e297e88f4c6f3f7778f3a2d Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Tue, 29 Oct 2024 09:33:44 +0100 Subject: [PATCH 332/581] remove unused code: HandleDeletedDecisions() (#3301) --- pkg/apiserver/apic.go | 31 ------------------------------- pkg/apiserver/apic_test.go | 38 -------------------------------------- 2 files changed, 69 deletions(-) diff --git a/pkg/apiserver/apic.go b/pkg/apiserver/apic.go index a2fb0e85749..fff0ebcacbf 100644 --- a/pkg/apiserver/apic.go +++ b/pkg/apiserver/apic.go @@ -423,37 +423,6 @@ func (a *apic) CAPIPullIsOld(ctx context.Context) (bool, error) { return true, nil } -func (a *apic) HandleDeletedDecisions(deletedDecisions []*models.Decision, deleteCounters map[string]map[string]int) (int, error) { - ctx := context.TODO() - nbDeleted := 0 - - for _, decision := range deletedDecisions { - filter := map[string][]string{ - "value": {*decision.Value}, - "origin": {*decision.Origin}, - } - if strings.ToLower(*decision.Scope) != "ip" { - filter["type"] = []string{*decision.Type} - filter["scopes"] = []string{*decision.Scope} - } - - dbCliRet, _, err := a.dbClient.ExpireDecisionsWithFilter(ctx, filter) - if err != nil { - return 0, fmt.Errorf("expiring decisions error: %w", err) - } - - dbCliDel, err := strconv.Atoi(dbCliRet) - if err != nil { - return 0, fmt.Errorf("converting db ret %d: %w", dbCliDel, err) - } - - updateCounterForDecision(deleteCounters, decision.Origin, decision.Scenario, dbCliDel) - nbDeleted += dbCliDel - } - - return nbDeleted, nil -} - func (a *apic) HandleDeletedDecisionsV3(ctx context.Context, deletedDecisions []*modelscapi.GetDecisionsStreamResponseDeletedItem, deleteCounters map[string]map[string]int) (int, error) { var nbDeleted int diff --git a/pkg/apiserver/apic_test.go b/pkg/apiserver/apic_test.go index b52dc9e44cc..99fee6e32bf 100644 --- a/pkg/apiserver/apic_test.go +++ b/pkg/apiserver/apic_test.go @@ -255,44 +255,6 @@ func TestNewAPIC(t *testing.T) { } } -func TestAPICHandleDeletedDecisions(t *testing.T) { - ctx := context.Background() - api := getAPIC(t, ctx) - _, deleteCounters := makeAddAndDeleteCounters() - - decision1 := api.dbClient.Ent.Decision.Create(). - SetUntil(time.Now().Add(time.Hour)). - SetScenario("crowdsec/test"). - SetType("ban"). - SetScope("IP"). - SetValue("1.2.3.4"). - SetOrigin(types.CAPIOrigin). - SaveX(context.Background()) - - api.dbClient.Ent.Decision.Create(). - SetUntil(time.Now().Add(time.Hour)). - SetScenario("crowdsec/test"). - SetType("ban"). - SetScope("IP"). - SetValue("1.2.3.4"). - SetOrigin(types.CAPIOrigin). - SaveX(context.Background()) - - assertTotalDecisionCount(t, ctx, api.dbClient, 2) - - nbDeleted, err := api.HandleDeletedDecisions([]*models.Decision{{ - Value: ptr.Of("1.2.3.4"), - Origin: ptr.Of(types.CAPIOrigin), - Type: &decision1.Type, - Scenario: ptr.Of("crowdsec/test"), - Scope: ptr.Of("IP"), - }}, deleteCounters) - - require.NoError(t, err) - assert.Equal(t, 2, nbDeleted) - assert.Equal(t, 2, deleteCounters[types.CAPIOrigin]["all"]) -} - func TestAPICGetMetrics(t *testing.T) { ctx := context.Background() From 97c1f60a681c1d8fa7b28f762a9aa68194052408 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Tue, 29 Oct 2024 09:56:51 +0100 Subject: [PATCH 333/581] make: remove obsolete/redundant parameters (#3304) * makefile: remove -w from build flags (implied by -s) * remove reference to obsolete build param --- Makefile | 2 +- debian/rules | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/Makefile b/Makefile index bbfa4bbee94..29a84d5b066 100644 --- a/Makefile +++ b/Makefile @@ -111,7 +111,7 @@ ifeq ($(call bool,$(DEBUG)),1) STRIP_SYMBOLS := DISABLE_OPTIMIZATION := -gcflags "-N -l" else -STRIP_SYMBOLS := -s -w +STRIP_SYMBOLS := -s DISABLE_OPTIMIZATION := endif diff --git a/debian/rules b/debian/rules index c11771282ea..5b8d6fc51f8 100755 --- a/debian/rules +++ b/debian/rules @@ -13,7 +13,7 @@ override_dh_auto_build: override_dh_auto_install: # just use the prebuilt binaries, otherwise: - # make build BUILD_RE_WASM=0 BUILD_STATIC=1 + # make build BUILD_STATIC=1 mkdir -p debian/crowdsec/usr/bin mkdir -p debian/crowdsec/etc/crowdsec From 92662ed4615e6c48d211e016ed34dfffd845cc95 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Wed, 30 Oct 2024 10:59:20 +0100 Subject: [PATCH 334/581] refact cscli: decisions, lapi, bouncers, machines (#3306) * refact "cscli decisions import" * cobra.ExactArgs(0) -> cobra.NoArgs * refact cscli bouncers * refact cscli machines * refact "cscli lapi" * lint --- cmd/crowdsec-cli/clialert/alerts.go | 4 +- cmd/crowdsec-cli/clibouncer/add.go | 72 +++ cmd/crowdsec-cli/clibouncer/bouncers.go | 364 ----------- cmd/crowdsec-cli/clibouncer/delete.go | 51 ++ cmd/crowdsec-cli/clibouncer/inspect.go | 98 +++ cmd/crowdsec-cli/clibouncer/list.go | 117 ++++ cmd/crowdsec-cli/clibouncer/prune.go | 85 +++ cmd/crowdsec-cli/clidecision/decisions.go | 4 +- .../{decisions_import.go => import.go} | 116 ++-- .../{decisions_table.go => table.go} | 3 +- cmd/crowdsec-cli/cliexplain/explain.go | 4 +- cmd/crowdsec-cli/clihub/hub.go | 10 +- cmd/crowdsec-cli/clihubtest/hubtest.go | 2 +- cmd/crowdsec-cli/clilapi/context.go | 395 ++++++++++++ cmd/crowdsec-cli/clilapi/lapi.go | 594 ------------------ cmd/crowdsec-cli/clilapi/register.go | 117 ++++ cmd/crowdsec-cli/clilapi/status.go | 115 ++++ .../clilapi/{lapi_test.go => status_test.go} | 0 cmd/crowdsec-cli/climachine/add.go | 152 +++++ cmd/crowdsec-cli/climachine/delete.go | 52 ++ cmd/crowdsec-cli/climachine/inspect.go | 184 ++++++ cmd/crowdsec-cli/climachine/list.go | 137 ++++ cmd/crowdsec-cli/climachine/machines.go | 585 ----------------- cmd/crowdsec-cli/climachine/prune.go | 96 +++ cmd/crowdsec-cli/climachine/validate.go | 35 ++ cmd/crowdsec-cli/climetrics/list.go | 2 +- cmd/crowdsec-cli/climetrics/metrics.go | 2 +- .../clinotifications/notifications.go | 2 +- cmd/crowdsec-cli/config.go | 2 +- cmd/crowdsec-cli/config_feature_flags.go | 2 +- cmd/crowdsec-cli/config_show.go | 2 +- cmd/crowdsec-cli/config_showyaml.go | 2 +- cmd/crowdsec-cli/dashboard.go | 10 +- test/bats/90_decisions.bats | 8 +- 34 files changed, 1781 insertions(+), 1643 deletions(-) create mode 100644 cmd/crowdsec-cli/clibouncer/add.go create mode 100644 cmd/crowdsec-cli/clibouncer/delete.go create mode 100644 cmd/crowdsec-cli/clibouncer/inspect.go create mode 100644 cmd/crowdsec-cli/clibouncer/list.go create mode 100644 cmd/crowdsec-cli/clibouncer/prune.go rename cmd/crowdsec-cli/clidecision/{decisions_import.go => import.go} (71%) rename cmd/crowdsec-cli/clidecision/{decisions_table.go => table.go} (93%) create mode 100644 cmd/crowdsec-cli/clilapi/context.go create mode 100644 cmd/crowdsec-cli/clilapi/register.go create mode 100644 cmd/crowdsec-cli/clilapi/status.go rename cmd/crowdsec-cli/clilapi/{lapi_test.go => status_test.go} (100%) create mode 100644 cmd/crowdsec-cli/climachine/add.go create mode 100644 cmd/crowdsec-cli/climachine/delete.go create mode 100644 cmd/crowdsec-cli/climachine/inspect.go create mode 100644 cmd/crowdsec-cli/climachine/list.go create mode 100644 cmd/crowdsec-cli/climachine/prune.go create mode 100644 cmd/crowdsec-cli/climachine/validate.go diff --git a/cmd/crowdsec-cli/clialert/alerts.go b/cmd/crowdsec-cli/clialert/alerts.go index 75454e945f2..5907d4a0fa8 100644 --- a/cmd/crowdsec-cli/clialert/alerts.go +++ b/cmd/crowdsec-cli/clialert/alerts.go @@ -36,7 +36,7 @@ func decisionsFromAlert(alert *models.Alert) string { for _, decision := range alert.Decisions { k := *decision.Type if *decision.Simulated { - k = fmt.Sprintf("(simul)%s", k) + k = "(simul)" + k } v := decMap[k] @@ -465,7 +465,7 @@ cscli alerts delete --range 1.2.3.0/24 cscli alerts delete -s crowdsecurity/ssh-bf"`, DisableAutoGenTag: true, Aliases: []string{"remove"}, - Args: cobra.ExactArgs(0), + Args: cobra.NoArgs, PreRunE: func(cmd *cobra.Command, _ []string) error { if deleteAll { return nil diff --git a/cmd/crowdsec-cli/clibouncer/add.go b/cmd/crowdsec-cli/clibouncer/add.go new file mode 100644 index 00000000000..8c40507a996 --- /dev/null +++ b/cmd/crowdsec-cli/clibouncer/add.go @@ -0,0 +1,72 @@ +package clibouncer + +import ( + "context" + "encoding/json" + "errors" + "fmt" + + "github.com/spf13/cobra" + + middlewares "github.com/crowdsecurity/crowdsec/pkg/apiserver/middlewares/v1" + "github.com/crowdsecurity/crowdsec/pkg/types" +) + +func (cli *cliBouncers) add(ctx context.Context, bouncerName string, key string) error { + var err error + + keyLength := 32 + + if key == "" { + key, err = middlewares.GenerateAPIKey(keyLength) + if err != nil { + return fmt.Errorf("unable to generate api key: %w", err) + } + } + + _, err = cli.db.CreateBouncer(ctx, bouncerName, "", middlewares.HashSHA512(key), types.ApiKeyAuthType) + if err != nil { + return fmt.Errorf("unable to create bouncer: %w", err) + } + + switch cli.cfg().Cscli.Output { + case "human": + fmt.Printf("API key for '%s':\n\n", bouncerName) + fmt.Printf(" %s\n\n", key) + fmt.Print("Please keep this key since you will not be able to retrieve it!\n") + case "raw": + fmt.Print(key) + case "json": + j, err := json.Marshal(key) + if err != nil { + return errors.New("unable to serialize api key") + } + + fmt.Print(string(j)) + } + + return nil +} + +func (cli *cliBouncers) newAddCmd() *cobra.Command { + var key string + + cmd := &cobra.Command{ + Use: "add MyBouncerName", + Short: "add a single bouncer to the database", + Example: `cscli bouncers add MyBouncerName +cscli bouncers add MyBouncerName --key `, + Args: cobra.ExactArgs(1), + DisableAutoGenTag: true, + RunE: func(cmd *cobra.Command, args []string) error { + return cli.add(cmd.Context(), args[0], key) + }, + } + + flags := cmd.Flags() + flags.StringP("length", "l", "", "length of the api key") + _ = flags.MarkDeprecated("length", "use --key instead") + flags.StringVarP(&key, "key", "k", "", "api key for the bouncer") + + return cmd +} diff --git a/cmd/crowdsec-cli/clibouncer/bouncers.go b/cmd/crowdsec-cli/clibouncer/bouncers.go index 226fbb7e922..876b613be53 100644 --- a/cmd/crowdsec-cli/clibouncer/bouncers.go +++ b/cmd/crowdsec-cli/clibouncer/bouncers.go @@ -1,33 +1,17 @@ package clibouncer import ( - "context" - "encoding/csv" - "encoding/json" - "errors" - "fmt" - "io" - "os" "slices" "strings" "time" - "github.com/fatih/color" - "github.com/jedib0t/go-pretty/v6/table" - log "github.com/sirupsen/logrus" "github.com/spf13/cobra" - "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/ask" "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/clientinfo" - "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/cstable" "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/require" - middlewares "github.com/crowdsecurity/crowdsec/pkg/apiserver/middlewares/v1" "github.com/crowdsecurity/crowdsec/pkg/csconfig" "github.com/crowdsecurity/crowdsec/pkg/database" "github.com/crowdsecurity/crowdsec/pkg/database/ent" - "github.com/crowdsecurity/crowdsec/pkg/database/ent/bouncer" - "github.com/crowdsecurity/crowdsec/pkg/emoji" - "github.com/crowdsecurity/crowdsec/pkg/types" ) type configGetter = func() *csconfig.Config @@ -80,27 +64,6 @@ Note: This command requires database direct access, so is intended to be run on return cmd } -func (cli *cliBouncers) listHuman(out io.Writer, bouncers ent.Bouncers) { - t := cstable.NewLight(out, cli.cfg().Cscli.Color).Writer - t.AppendHeader(table.Row{"Name", "IP Address", "Valid", "Last API pull", "Type", "Version", "Auth Type"}) - - for _, b := range bouncers { - revoked := emoji.CheckMark - if b.Revoked { - revoked = emoji.Prohibited - } - - lastPull := "" - if b.LastPull != nil { - lastPull = b.LastPull.Format(time.RFC3339) - } - - t.AppendRow(table.Row{b.Name, b.IPAddress, revoked, lastPull, b.Type, b.Version, b.AuthType}) - } - - io.WriteString(out, t.Render()+"\n") -} - // bouncerInfo contains only the data we want for inspect/list type bouncerInfo struct { CreatedAt time.Time `json:"created_at"` @@ -132,141 +95,6 @@ func newBouncerInfo(b *ent.Bouncer) bouncerInfo { } } -func (cli *cliBouncers) listCSV(out io.Writer, bouncers ent.Bouncers) error { - csvwriter := csv.NewWriter(out) - - if err := csvwriter.Write([]string{"name", "ip", "revoked", "last_pull", "type", "version", "auth_type"}); err != nil { - return fmt.Errorf("failed to write raw header: %w", err) - } - - for _, b := range bouncers { - valid := "validated" - if b.Revoked { - valid = "pending" - } - - lastPull := "" - if b.LastPull != nil { - lastPull = b.LastPull.Format(time.RFC3339) - } - - if err := csvwriter.Write([]string{b.Name, b.IPAddress, valid, lastPull, b.Type, b.Version, b.AuthType}); err != nil { - return fmt.Errorf("failed to write raw: %w", err) - } - } - - csvwriter.Flush() - - return nil -} - -func (cli *cliBouncers) List(ctx context.Context, out io.Writer, db *database.Client) error { - // XXX: must use the provided db object, the one in the struct might be nil - // (calling List directly skips the PersistentPreRunE) - - bouncers, err := db.ListBouncers(ctx) - if err != nil { - return fmt.Errorf("unable to list bouncers: %w", err) - } - - switch cli.cfg().Cscli.Output { - case "human": - cli.listHuman(out, bouncers) - case "json": - info := make([]bouncerInfo, 0, len(bouncers)) - for _, b := range bouncers { - info = append(info, newBouncerInfo(b)) - } - - enc := json.NewEncoder(out) - enc.SetIndent("", " ") - - if err := enc.Encode(info); err != nil { - return errors.New("failed to serialize") - } - - return nil - case "raw": - return cli.listCSV(out, bouncers) - } - - return nil -} - -func (cli *cliBouncers) newListCmd() *cobra.Command { - cmd := &cobra.Command{ - Use: "list", - Short: "list all bouncers within the database", - Example: `cscli bouncers list`, - Args: cobra.ExactArgs(0), - DisableAutoGenTag: true, - RunE: func(cmd *cobra.Command, _ []string) error { - return cli.List(cmd.Context(), color.Output, cli.db) - }, - } - - return cmd -} - -func (cli *cliBouncers) add(ctx context.Context, bouncerName string, key string) error { - var err error - - keyLength := 32 - - if key == "" { - key, err = middlewares.GenerateAPIKey(keyLength) - if err != nil { - return fmt.Errorf("unable to generate api key: %w", err) - } - } - - _, err = cli.db.CreateBouncer(ctx, bouncerName, "", middlewares.HashSHA512(key), types.ApiKeyAuthType) - if err != nil { - return fmt.Errorf("unable to create bouncer: %w", err) - } - - switch cli.cfg().Cscli.Output { - case "human": - fmt.Printf("API key for '%s':\n\n", bouncerName) - fmt.Printf(" %s\n\n", key) - fmt.Print("Please keep this key since you will not be able to retrieve it!\n") - case "raw": - fmt.Print(key) - case "json": - j, err := json.Marshal(key) - if err != nil { - return errors.New("unable to serialize api key") - } - - fmt.Print(string(j)) - } - - return nil -} - -func (cli *cliBouncers) newAddCmd() *cobra.Command { - var key string - - cmd := &cobra.Command{ - Use: "add MyBouncerName", - Short: "add a single bouncer to the database", - Example: `cscli bouncers add MyBouncerName -cscli bouncers add MyBouncerName --key `, - Args: cobra.ExactArgs(1), - DisableAutoGenTag: true, - RunE: func(cmd *cobra.Command, args []string) error { - return cli.add(cmd.Context(), args[0], key) - }, - } - - flags := cmd.Flags() - flags.StringP("length", "l", "", "length of the api key") - _ = flags.MarkDeprecated("length", "use --key instead") - flags.StringVarP(&key, "key", "k", "", "api key for the bouncer") - - return cmd -} - // validBouncerID returns a list of bouncer IDs for command completion func (cli *cliBouncers) validBouncerID(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { var err error @@ -303,195 +131,3 @@ func (cli *cliBouncers) validBouncerID(cmd *cobra.Command, args []string, toComp return ret, cobra.ShellCompDirectiveNoFileComp } - -func (cli *cliBouncers) delete(ctx context.Context, bouncers []string, ignoreMissing bool) error { - for _, bouncerID := range bouncers { - if err := cli.db.DeleteBouncer(ctx, bouncerID); err != nil { - var notFoundErr *database.BouncerNotFoundError - if ignoreMissing && errors.As(err, ¬FoundErr) { - return nil - } - - return fmt.Errorf("unable to delete bouncer: %w", err) - } - - log.Infof("bouncer '%s' deleted successfully", bouncerID) - } - - return nil -} - -func (cli *cliBouncers) newDeleteCmd() *cobra.Command { - var ignoreMissing bool - - cmd := &cobra.Command{ - Use: "delete MyBouncerName", - Short: "delete bouncer(s) from the database", - Example: `cscli bouncers delete "bouncer1" "bouncer2"`, - Args: cobra.MinimumNArgs(1), - Aliases: []string{"remove"}, - DisableAutoGenTag: true, - ValidArgsFunction: cli.validBouncerID, - RunE: func(cmd *cobra.Command, args []string) error { - return cli.delete(cmd.Context(), args, ignoreMissing) - }, - } - - flags := cmd.Flags() - flags.BoolVar(&ignoreMissing, "ignore-missing", false, "don't print errors if one or more bouncers don't exist") - - return cmd -} - -func (cli *cliBouncers) prune(ctx context.Context, duration time.Duration, force bool) error { - if duration < 2*time.Minute { - if yes, err := ask.YesNo( - "The duration you provided is less than 2 minutes. "+ - "This may remove active bouncers. Continue?", false); err != nil { - return err - } else if !yes { - fmt.Println("User aborted prune. No changes were made.") - return nil - } - } - - bouncers, err := cli.db.QueryBouncersInactiveSince(ctx, time.Now().UTC().Add(-duration)) - if err != nil { - return fmt.Errorf("unable to query bouncers: %w", err) - } - - if len(bouncers) == 0 { - fmt.Println("No bouncers to prune.") - return nil - } - - cli.listHuman(color.Output, bouncers) - - if !force { - if yes, err := ask.YesNo( - "You are about to PERMANENTLY remove the above bouncers from the database. "+ - "These will NOT be recoverable. Continue?", false); err != nil { - return err - } else if !yes { - fmt.Println("User aborted prune. No changes were made.") - return nil - } - } - - deleted, err := cli.db.BulkDeleteBouncers(ctx, bouncers) - if err != nil { - return fmt.Errorf("unable to prune bouncers: %w", err) - } - - fmt.Fprintf(os.Stderr, "Successfully deleted %d bouncers\n", deleted) - - return nil -} - -func (cli *cliBouncers) newPruneCmd() *cobra.Command { - var ( - duration time.Duration - force bool - ) - - const defaultDuration = 60 * time.Minute - - cmd := &cobra.Command{ - Use: "prune", - Short: "prune multiple bouncers from the database", - Args: cobra.NoArgs, - DisableAutoGenTag: true, - Example: `cscli bouncers prune -d 45m -cscli bouncers prune -d 45m --force`, - RunE: func(cmd *cobra.Command, _ []string) error { - return cli.prune(cmd.Context(), duration, force) - }, - } - - flags := cmd.Flags() - flags.DurationVarP(&duration, "duration", "d", defaultDuration, "duration of time since last pull") - flags.BoolVar(&force, "force", false, "force prune without asking for confirmation") - - return cmd -} - -func (cli *cliBouncers) inspectHuman(out io.Writer, bouncer *ent.Bouncer) { - t := cstable.NewLight(out, cli.cfg().Cscli.Color).Writer - - t.SetTitle("Bouncer: " + bouncer.Name) - - t.SetColumnConfigs([]table.ColumnConfig{ - {Number: 1, AutoMerge: true}, - }) - - lastPull := "" - if bouncer.LastPull != nil { - lastPull = bouncer.LastPull.String() - } - - t.AppendRows([]table.Row{ - {"Created At", bouncer.CreatedAt}, - {"Last Update", bouncer.UpdatedAt}, - {"Revoked?", bouncer.Revoked}, - {"IP Address", bouncer.IPAddress}, - {"Type", bouncer.Type}, - {"Version", bouncer.Version}, - {"Last Pull", lastPull}, - {"Auth type", bouncer.AuthType}, - {"OS", clientinfo.GetOSNameAndVersion(bouncer)}, - }) - - for _, ff := range clientinfo.GetFeatureFlagList(bouncer) { - t.AppendRow(table.Row{"Feature Flags", ff}) - } - - io.WriteString(out, t.Render()+"\n") -} - -func (cli *cliBouncers) inspect(bouncer *ent.Bouncer) error { - out := color.Output - outputFormat := cli.cfg().Cscli.Output - - switch outputFormat { - case "human": - cli.inspectHuman(out, bouncer) - case "json": - enc := json.NewEncoder(out) - enc.SetIndent("", " ") - - if err := enc.Encode(newBouncerInfo(bouncer)); err != nil { - return errors.New("failed to serialize") - } - - return nil - default: - return fmt.Errorf("output format '%s' not supported for this command", outputFormat) - } - - return nil -} - -func (cli *cliBouncers) newInspectCmd() *cobra.Command { - cmd := &cobra.Command{ - Use: "inspect [bouncer_name]", - Short: "inspect a bouncer by name", - Example: `cscli bouncers inspect "bouncer1"`, - Args: cobra.ExactArgs(1), - DisableAutoGenTag: true, - ValidArgsFunction: cli.validBouncerID, - RunE: func(cmd *cobra.Command, args []string) error { - bouncerName := args[0] - - b, err := cli.db.Ent.Bouncer.Query(). - Where(bouncer.Name(bouncerName)). - Only(cmd.Context()) - if err != nil { - return fmt.Errorf("unable to read bouncer data '%s': %w", bouncerName, err) - } - - return cli.inspect(b) - }, - } - - return cmd -} diff --git a/cmd/crowdsec-cli/clibouncer/delete.go b/cmd/crowdsec-cli/clibouncer/delete.go new file mode 100644 index 00000000000..6e2f312d4af --- /dev/null +++ b/cmd/crowdsec-cli/clibouncer/delete.go @@ -0,0 +1,51 @@ +package clibouncer + +import ( + "context" + "errors" + "fmt" + + log "github.com/sirupsen/logrus" + "github.com/spf13/cobra" + + "github.com/crowdsecurity/crowdsec/pkg/database" +) + +func (cli *cliBouncers) delete(ctx context.Context, bouncers []string, ignoreMissing bool) error { + for _, bouncerID := range bouncers { + if err := cli.db.DeleteBouncer(ctx, bouncerID); err != nil { + var notFoundErr *database.BouncerNotFoundError + if ignoreMissing && errors.As(err, ¬FoundErr) { + return nil + } + + return fmt.Errorf("unable to delete bouncer: %w", err) + } + + log.Infof("bouncer '%s' deleted successfully", bouncerID) + } + + return nil +} + +func (cli *cliBouncers) newDeleteCmd() *cobra.Command { + var ignoreMissing bool + + cmd := &cobra.Command{ + Use: "delete MyBouncerName", + Short: "delete bouncer(s) from the database", + Example: `cscli bouncers delete "bouncer1" "bouncer2"`, + Args: cobra.MinimumNArgs(1), + Aliases: []string{"remove"}, + DisableAutoGenTag: true, + ValidArgsFunction: cli.validBouncerID, + RunE: func(cmd *cobra.Command, args []string) error { + return cli.delete(cmd.Context(), args, ignoreMissing) + }, + } + + flags := cmd.Flags() + flags.BoolVar(&ignoreMissing, "ignore-missing", false, "don't print errors if one or more bouncers don't exist") + + return cmd +} diff --git a/cmd/crowdsec-cli/clibouncer/inspect.go b/cmd/crowdsec-cli/clibouncer/inspect.go new file mode 100644 index 00000000000..6dac386b888 --- /dev/null +++ b/cmd/crowdsec-cli/clibouncer/inspect.go @@ -0,0 +1,98 @@ +package clibouncer + +import ( + "encoding/json" + "errors" + "fmt" + "io" + + "github.com/fatih/color" + "github.com/jedib0t/go-pretty/v6/table" + "github.com/spf13/cobra" + + "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/clientinfo" + "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/cstable" + "github.com/crowdsecurity/crowdsec/pkg/database/ent" + "github.com/crowdsecurity/crowdsec/pkg/database/ent/bouncer" +) + +func (cli *cliBouncers) inspectHuman(out io.Writer, bouncer *ent.Bouncer) { + t := cstable.NewLight(out, cli.cfg().Cscli.Color).Writer + + t.SetTitle("Bouncer: " + bouncer.Name) + + t.SetColumnConfigs([]table.ColumnConfig{ + {Number: 1, AutoMerge: true}, + }) + + lastPull := "" + if bouncer.LastPull != nil { + lastPull = bouncer.LastPull.String() + } + + t.AppendRows([]table.Row{ + {"Created At", bouncer.CreatedAt}, + {"Last Update", bouncer.UpdatedAt}, + {"Revoked?", bouncer.Revoked}, + {"IP Address", bouncer.IPAddress}, + {"Type", bouncer.Type}, + {"Version", bouncer.Version}, + {"Last Pull", lastPull}, + {"Auth type", bouncer.AuthType}, + {"OS", clientinfo.GetOSNameAndVersion(bouncer)}, + }) + + for _, ff := range clientinfo.GetFeatureFlagList(bouncer) { + t.AppendRow(table.Row{"Feature Flags", ff}) + } + + io.WriteString(out, t.Render()+"\n") +} + +func (cli *cliBouncers) inspect(bouncer *ent.Bouncer) error { + out := color.Output + outputFormat := cli.cfg().Cscli.Output + + switch outputFormat { + case "human": + cli.inspectHuman(out, bouncer) + case "json": + enc := json.NewEncoder(out) + enc.SetIndent("", " ") + + if err := enc.Encode(newBouncerInfo(bouncer)); err != nil { + return errors.New("failed to serialize") + } + + return nil + default: + return fmt.Errorf("output format '%s' not supported for this command", outputFormat) + } + + return nil +} + +func (cli *cliBouncers) newInspectCmd() *cobra.Command { + cmd := &cobra.Command{ + Use: "inspect [bouncer_name]", + Short: "inspect a bouncer by name", + Example: `cscli bouncers inspect "bouncer1"`, + Args: cobra.ExactArgs(1), + DisableAutoGenTag: true, + ValidArgsFunction: cli.validBouncerID, + RunE: func(cmd *cobra.Command, args []string) error { + bouncerName := args[0] + + b, err := cli.db.Ent.Bouncer.Query(). + Where(bouncer.Name(bouncerName)). + Only(cmd.Context()) + if err != nil { + return fmt.Errorf("unable to read bouncer data '%s': %w", bouncerName, err) + } + + return cli.inspect(b) + }, + } + + return cmd +} diff --git a/cmd/crowdsec-cli/clibouncer/list.go b/cmd/crowdsec-cli/clibouncer/list.go new file mode 100644 index 00000000000..a13ca994e1e --- /dev/null +++ b/cmd/crowdsec-cli/clibouncer/list.go @@ -0,0 +1,117 @@ +package clibouncer + +import ( + "context" + "encoding/csv" + "encoding/json" + "errors" + "fmt" + "io" + "time" + + "github.com/fatih/color" + "github.com/jedib0t/go-pretty/v6/table" + "github.com/spf13/cobra" + + "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/cstable" + "github.com/crowdsecurity/crowdsec/pkg/database" + "github.com/crowdsecurity/crowdsec/pkg/database/ent" + "github.com/crowdsecurity/crowdsec/pkg/emoji" +) + +func (cli *cliBouncers) listHuman(out io.Writer, bouncers ent.Bouncers) { + t := cstable.NewLight(out, cli.cfg().Cscli.Color).Writer + t.AppendHeader(table.Row{"Name", "IP Address", "Valid", "Last API pull", "Type", "Version", "Auth Type"}) + + for _, b := range bouncers { + revoked := emoji.CheckMark + if b.Revoked { + revoked = emoji.Prohibited + } + + lastPull := "" + if b.LastPull != nil { + lastPull = b.LastPull.Format(time.RFC3339) + } + + t.AppendRow(table.Row{b.Name, b.IPAddress, revoked, lastPull, b.Type, b.Version, b.AuthType}) + } + + io.WriteString(out, t.Render()+"\n") +} + +func (cli *cliBouncers) listCSV(out io.Writer, bouncers ent.Bouncers) error { + csvwriter := csv.NewWriter(out) + + if err := csvwriter.Write([]string{"name", "ip", "revoked", "last_pull", "type", "version", "auth_type"}); err != nil { + return fmt.Errorf("failed to write raw header: %w", err) + } + + for _, b := range bouncers { + valid := "validated" + if b.Revoked { + valid = "pending" + } + + lastPull := "" + if b.LastPull != nil { + lastPull = b.LastPull.Format(time.RFC3339) + } + + if err := csvwriter.Write([]string{b.Name, b.IPAddress, valid, lastPull, b.Type, b.Version, b.AuthType}); err != nil { + return fmt.Errorf("failed to write raw: %w", err) + } + } + + csvwriter.Flush() + + return nil +} + +func (cli *cliBouncers) List(ctx context.Context, out io.Writer, db *database.Client) error { + // XXX: must use the provided db object, the one in the struct might be nil + // (calling List directly skips the PersistentPreRunE) + + bouncers, err := db.ListBouncers(ctx) + if err != nil { + return fmt.Errorf("unable to list bouncers: %w", err) + } + + switch cli.cfg().Cscli.Output { + case "human": + cli.listHuman(out, bouncers) + case "json": + info := make([]bouncerInfo, 0, len(bouncers)) + for _, b := range bouncers { + info = append(info, newBouncerInfo(b)) + } + + enc := json.NewEncoder(out) + enc.SetIndent("", " ") + + if err := enc.Encode(info); err != nil { + return errors.New("failed to serialize") + } + + return nil + case "raw": + return cli.listCSV(out, bouncers) + } + + return nil +} + +func (cli *cliBouncers) newListCmd() *cobra.Command { + cmd := &cobra.Command{ + Use: "list", + Short: "list all bouncers within the database", + Example: `cscli bouncers list`, + Args: cobra.NoArgs, + DisableAutoGenTag: true, + RunE: func(cmd *cobra.Command, _ []string) error { + return cli.List(cmd.Context(), color.Output, cli.db) + }, + } + + return cmd +} diff --git a/cmd/crowdsec-cli/clibouncer/prune.go b/cmd/crowdsec-cli/clibouncer/prune.go new file mode 100644 index 00000000000..754e0898a3b --- /dev/null +++ b/cmd/crowdsec-cli/clibouncer/prune.go @@ -0,0 +1,85 @@ +package clibouncer + +import ( + "context" + "fmt" + "os" + "time" + + "github.com/fatih/color" + "github.com/spf13/cobra" + + "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/ask" +) + +func (cli *cliBouncers) prune(ctx context.Context, duration time.Duration, force bool) error { + if duration < 2*time.Minute { + if yes, err := ask.YesNo( + "The duration you provided is less than 2 minutes. "+ + "This may remove active bouncers. Continue?", false); err != nil { + return err + } else if !yes { + fmt.Println("User aborted prune. No changes were made.") + return nil + } + } + + bouncers, err := cli.db.QueryBouncersInactiveSince(ctx, time.Now().UTC().Add(-duration)) + if err != nil { + return fmt.Errorf("unable to query bouncers: %w", err) + } + + if len(bouncers) == 0 { + fmt.Println("No bouncers to prune.") + return nil + } + + cli.listHuman(color.Output, bouncers) + + if !force { + if yes, err := ask.YesNo( + "You are about to PERMANENTLY remove the above bouncers from the database. "+ + "These will NOT be recoverable. Continue?", false); err != nil { + return err + } else if !yes { + fmt.Println("User aborted prune. No changes were made.") + return nil + } + } + + deleted, err := cli.db.BulkDeleteBouncers(ctx, bouncers) + if err != nil { + return fmt.Errorf("unable to prune bouncers: %w", err) + } + + fmt.Fprintf(os.Stderr, "Successfully deleted %d bouncers\n", deleted) + + return nil +} + +func (cli *cliBouncers) newPruneCmd() *cobra.Command { + var ( + duration time.Duration + force bool + ) + + const defaultDuration = 60 * time.Minute + + cmd := &cobra.Command{ + Use: "prune", + Short: "prune multiple bouncers from the database", + Args: cobra.NoArgs, + DisableAutoGenTag: true, + Example: `cscli bouncers prune -d 45m +cscli bouncers prune -d 45m --force`, + RunE: func(cmd *cobra.Command, _ []string) error { + return cli.prune(cmd.Context(), duration, force) + }, + } + + flags := cmd.Flags() + flags.DurationVarP(&duration, "duration", "d", defaultDuration, "duration of time since last pull") + flags.BoolVar(&force, "force", false, "force prune without asking for confirmation") + + return cmd +} diff --git a/cmd/crowdsec-cli/clidecision/decisions.go b/cmd/crowdsec-cli/clidecision/decisions.go index 1f8781a3716..307cabffe51 100644 --- a/cmd/crowdsec-cli/clidecision/decisions.go +++ b/cmd/crowdsec-cli/clidecision/decisions.go @@ -290,7 +290,7 @@ cscli decisions list -r 1.2.3.0/24 cscli decisions list -s crowdsecurity/ssh-bf cscli decisions list --origin lists --scenario list_name `, - Args: cobra.ExactArgs(0), + Args: cobra.NoArgs, DisableAutoGenTag: true, RunE: func(cmd *cobra.Command, _ []string) error { return cli.list(cmd.Context(), filter, NoSimu, contained, printMachine) @@ -416,7 +416,7 @@ cscli decisions add --ip 1.2.3.4 --duration 24h --type captcha cscli decisions add --scope username --value foobar `, /*TBD : fix long and example*/ - Args: cobra.ExactArgs(0), + Args: cobra.NoArgs, DisableAutoGenTag: true, RunE: func(cmd *cobra.Command, _ []string) error { return cli.add(cmd.Context(), addIP, addRange, addDuration, addValue, addScope, addReason, addType) diff --git a/cmd/crowdsec-cli/clidecision/decisions_import.go b/cmd/crowdsec-cli/clidecision/import.go similarity index 71% rename from cmd/crowdsec-cli/clidecision/decisions_import.go rename to cmd/crowdsec-cli/clidecision/import.go index 10d92f88876..5b34b74a250 100644 --- a/cmd/crowdsec-cli/clidecision/decisions_import.go +++ b/cmd/crowdsec-cli/clidecision/import.go @@ -67,65 +67,29 @@ func parseDecisionList(content []byte, format string) ([]decisionRaw, error) { return ret, nil } -func (cli *cliDecisions) runImport(cmd *cobra.Command, args []string) error { - flags := cmd.Flags() - - input, err := flags.GetString("input") - if err != nil { - return err - } - - defaultDuration, err := flags.GetString("duration") - if err != nil { - return err - } - - if defaultDuration == "" { - return errors.New("--duration cannot be empty") - } - - defaultScope, err := flags.GetString("scope") - if err != nil { - return err - } - - if defaultScope == "" { - return errors.New("--scope cannot be empty") - } - - defaultReason, err := flags.GetString("reason") - if err != nil { - return err - } - - if defaultReason == "" { - return errors.New("--reason cannot be empty") - } +func (cli *cliDecisions) import_(ctx context.Context, input string, duration string, scope string, reason string, type_ string, batch int, format string) error { + var ( + content []byte + fin *os.File + err error + ) - defaultType, err := flags.GetString("type") - if err != nil { - return err + if duration == "" { + return errors.New("default duration cannot be empty") } - if defaultType == "" { - return errors.New("--type cannot be empty") + if scope == "" { + return errors.New("default scope cannot be empty") } - batchSize, err := flags.GetInt("batch") - if err != nil { - return err + if reason == "" { + return errors.New("default reason cannot be empty") } - format, err := flags.GetString("format") - if err != nil { - return err + if type_ == "" { + return errors.New("default type cannot be empty") } - var ( - content []byte - fin *os.File - ) - // set format if the file has a json or csv extension if format == "" { if strings.HasSuffix(input, ".json") { @@ -167,23 +131,23 @@ func (cli *cliDecisions) runImport(cmd *cobra.Command, args []string) error { } if d.Duration == "" { - d.Duration = defaultDuration - log.Debugf("item %d: missing 'duration', using default '%s'", i, defaultDuration) + d.Duration = duration + log.Debugf("item %d: missing 'duration', using default '%s'", i, duration) } if d.Scenario == "" { - d.Scenario = defaultReason - log.Debugf("item %d: missing 'reason', using default '%s'", i, defaultReason) + d.Scenario = reason + log.Debugf("item %d: missing 'reason', using default '%s'", i, reason) } if d.Type == "" { - d.Type = defaultType - log.Debugf("item %d: missing 'type', using default '%s'", i, defaultType) + d.Type = type_ + log.Debugf("item %d: missing 'type', using default '%s'", i, type_) } if d.Scope == "" { - d.Scope = defaultScope - log.Debugf("item %d: missing 'scope', using default '%s'", i, defaultScope) + d.Scope = scope + log.Debugf("item %d: missing 'scope', using default '%s'", i, scope) } decisions[i] = &models.Decision{ @@ -201,7 +165,7 @@ func (cli *cliDecisions) runImport(cmd *cobra.Command, args []string) error { log.Infof("You are about to add %d decisions, this may take a while", len(decisions)) } - for _, chunk := range slicetools.Chunks(decisions, batchSize) { + for _, chunk := range slicetools.Chunks(decisions, batch) { log.Debugf("Processing chunk of %d decisions", len(chunk)) importAlert := models.Alert{ CreatedAt: time.Now().UTC().Format(time.RFC3339), @@ -224,7 +188,7 @@ func (cli *cliDecisions) runImport(cmd *cobra.Command, args []string) error { Decisions: chunk, } - _, _, err = cli.client.Alerts.Add(context.Background(), models.AddAlertsRequest{&importAlert}) + _, _, err = cli.client.Alerts.Add(ctx, models.AddAlertsRequest{&importAlert}) if err != nil { return err } @@ -236,12 +200,22 @@ func (cli *cliDecisions) runImport(cmd *cobra.Command, args []string) error { } func (cli *cliDecisions) newImportCmd() *cobra.Command { + var ( + input string + duration string + scope string + reason string + decisionType string + batch int + format string + ) + cmd := &cobra.Command{ Use: "import [options]", Short: "Import decisions from a file or pipe", Long: "expected format:\n" + "csv : any of duration,reason,scope,type,value, with a header line\n" + - "json :" + "`{" + `"duration" : "24h", "reason" : "my_scenario", "scope" : "ip", "type" : "ban", "value" : "x.y.z.z"` + "}`", + "json :" + "`{" + `"duration": "24h", "reason": "my_scenario", "scope": "ip", "type": "ban", "value": "x.y.z.z"` + "}`", Args: cobra.NoArgs, DisableAutoGenTag: true, Example: `decisions.csv: @@ -251,7 +225,7 @@ duration,scope,value $ cscli decisions import -i decisions.csv decisions.json: -[{"duration" : "4h", "scope" : "ip", "type" : "ban", "value" : "1.2.3.4"}] +[{"duration": "4h", "scope": "ip", "type": "ban", "value": "1.2.3.4"}] The file format is detected from the extension, but can be forced with the --format option which is required when reading from standard input. @@ -260,18 +234,20 @@ Raw values, standard input: $ echo "1.2.3.4" | cscli decisions import -i - --format values `, - RunE: cli.runImport, + RunE: func(cmd *cobra.Command, args []string) error { + return cli.import_(cmd.Context(), input, duration, scope, reason, decisionType, batch, format) + }, } flags := cmd.Flags() flags.SortFlags = false - flags.StringP("input", "i", "", "Input file") - flags.StringP("duration", "d", "4h", "Decision duration: 1h,4h,30m") - flags.String("scope", types.Ip, "Decision scope: ip,range,username") - flags.StringP("reason", "R", "manual", "Decision reason: ") - flags.StringP("type", "t", "ban", "Decision type: ban,captcha,throttle") - flags.Int("batch", 0, "Split import in batches of N decisions") - flags.String("format", "", "Input format: 'json', 'csv' or 'values' (each line is a value, no headers)") + flags.StringVarP(&input, "input", "i", "", "Input file") + flags.StringVarP(&duration, "duration", "d", "4h", "Decision duration: 1h,4h,30m") + flags.StringVar(&scope, "scope", types.Ip, "Decision scope: ip,range,username") + flags.StringVarP(&reason, "reason", "R", "manual", "Decision reason: ") + flags.StringVarP(&decisionType, "type", "t", "ban", "Decision type: ban,captcha,throttle") + flags.IntVar(&batch, "batch", 0, "Split import in batches of N decisions") + flags.StringVar(&format, "format", "", "Input format: 'json', 'csv' or 'values' (each line is a value, no headers)") _ = cmd.MarkFlagRequired("input") diff --git a/cmd/crowdsec-cli/clidecision/decisions_table.go b/cmd/crowdsec-cli/clidecision/table.go similarity index 93% rename from cmd/crowdsec-cli/clidecision/decisions_table.go rename to cmd/crowdsec-cli/clidecision/table.go index 90a0ae1176b..189eb80b8e5 100644 --- a/cmd/crowdsec-cli/clidecision/decisions_table.go +++ b/cmd/crowdsec-cli/clidecision/table.go @@ -1,7 +1,6 @@ package clidecision import ( - "fmt" "io" "strconv" @@ -23,7 +22,7 @@ func (cli *cliDecisions) decisionsTable(out io.Writer, alerts *models.GetAlertsR for _, alertItem := range *alerts { for _, decisionItem := range alertItem.Decisions { if *alertItem.Simulated { - *decisionItem.Type = fmt.Sprintf("(simul)%s", *decisionItem.Type) + *decisionItem.Type = "(simul)" + *decisionItem.Type } row := []string{ diff --git a/cmd/crowdsec-cli/cliexplain/explain.go b/cmd/crowdsec-cli/cliexplain/explain.go index 182e34a12a5..d6e821e4e6c 100644 --- a/cmd/crowdsec-cli/cliexplain/explain.go +++ b/cmd/crowdsec-cli/cliexplain/explain.go @@ -80,7 +80,7 @@ cscli explain --log "Sep 19 18:33:22 scw-d95986 sshd[24347]: pam_unix(sshd:auth) cscli explain --dsn "file://myfile.log" --type nginx tail -n 5 myfile.log | cscli explain --type nginx -f - `, - Args: cobra.ExactArgs(0), + Args: cobra.NoArgs, DisableAutoGenTag: true, RunE: func(_ *cobra.Command, _ []string) error { return cli.run() @@ -197,7 +197,7 @@ func (cli *cliExplain) run() error { return fmt.Errorf("unable to get absolute path of '%s', exiting", logFile) } - dsn = fmt.Sprintf("file://%s", absolutePath) + dsn = "file://" + absolutePath lineCount, err := getLineCountForFile(absolutePath) if err != nil { diff --git a/cmd/crowdsec-cli/clihub/hub.go b/cmd/crowdsec-cli/clihub/hub.go index 22568355546..f189d6a2e13 100644 --- a/cmd/crowdsec-cli/clihub/hub.go +++ b/cmd/crowdsec-cli/clihub/hub.go @@ -39,7 +39,7 @@ The Hub is managed by cscli, to get the latest hub files from [Crowdsec Hub](htt Example: `cscli hub list cscli hub update cscli hub upgrade`, - Args: cobra.ExactArgs(0), + Args: cobra.NoArgs, DisableAutoGenTag: true, } @@ -87,7 +87,7 @@ func (cli *cliHub) newListCmd() *cobra.Command { cmd := &cobra.Command{ Use: "list [-a]", Short: "List all installed configurations", - Args: cobra.ExactArgs(0), + Args: cobra.NoArgs, DisableAutoGenTag: true, RunE: func(_ *cobra.Command, _ []string) error { hub, err := require.Hub(cli.cfg(), nil, log.StandardLogger()) @@ -140,7 +140,7 @@ func (cli *cliHub) newUpdateCmd() *cobra.Command { Long: ` Fetches the .index.json file from the hub, containing the list of available configs. `, - Args: cobra.ExactArgs(0), + Args: cobra.NoArgs, DisableAutoGenTag: true, RunE: func(cmd *cobra.Command, _ []string) error { return cli.update(cmd.Context(), withContent) @@ -190,7 +190,7 @@ func (cli *cliHub) newUpgradeCmd() *cobra.Command { Long: ` Upgrade all configs installed from Crowdsec Hub. Run 'sudo cscli hub update' if you want the latest versions available. `, - Args: cobra.ExactArgs(0), + Args: cobra.NoArgs, DisableAutoGenTag: true, RunE: func(cmd *cobra.Command, _ []string) error { return cli.upgrade(cmd.Context(), force) @@ -235,7 +235,7 @@ func (cli *cliHub) newTypesCmd() *cobra.Command { Long: ` List the types of supported hub items. `, - Args: cobra.ExactArgs(0), + Args: cobra.NoArgs, DisableAutoGenTag: true, RunE: func(_ *cobra.Command, _ []string) error { return cli.types() diff --git a/cmd/crowdsec-cli/clihubtest/hubtest.go b/cmd/crowdsec-cli/clihubtest/hubtest.go index 3420e21e1e2..f4cfed2e1cb 100644 --- a/cmd/crowdsec-cli/clihubtest/hubtest.go +++ b/cmd/crowdsec-cli/clihubtest/hubtest.go @@ -39,7 +39,7 @@ func (cli *cliHubTest) NewCommand() *cobra.Command { Use: "hubtest", Short: "Run functional tests on hub configurations", Long: "Run functional tests on hub configurations (parsers, scenarios, collections...)", - Args: cobra.ExactArgs(0), + Args: cobra.NoArgs, DisableAutoGenTag: true, PersistentPreRunE: func(_ *cobra.Command, _ []string) error { var err error diff --git a/cmd/crowdsec-cli/clilapi/context.go b/cmd/crowdsec-cli/clilapi/context.go new file mode 100644 index 00000000000..20ceb2b9596 --- /dev/null +++ b/cmd/crowdsec-cli/clilapi/context.go @@ -0,0 +1,395 @@ +package clilapi + +import ( + "errors" + "fmt" + "slices" + "sort" + "strings" + + log "github.com/sirupsen/logrus" + "github.com/spf13/cobra" + "gopkg.in/yaml.v3" + + "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/require" + "github.com/crowdsecurity/crowdsec/pkg/alertcontext" + "github.com/crowdsecurity/crowdsec/pkg/exprhelpers" + "github.com/crowdsecurity/crowdsec/pkg/parser" +) + +func (cli *cliLapi) addContext(key string, values []string) error { + cfg := cli.cfg() + + if err := alertcontext.ValidateContextExpr(key, values); err != nil { + return fmt.Errorf("invalid context configuration: %w", err) + } + + if _, ok := cfg.Crowdsec.ContextToSend[key]; !ok { + cfg.Crowdsec.ContextToSend[key] = make([]string, 0) + + log.Infof("key '%s' added", key) + } + + data := cfg.Crowdsec.ContextToSend[key] + + for _, val := range values { + if !slices.Contains(data, val) { + log.Infof("value '%s' added to key '%s'", val, key) + data = append(data, val) + } + + cfg.Crowdsec.ContextToSend[key] = data + } + + return cfg.Crowdsec.DumpContextConfigFile() +} + +func (cli *cliLapi) newContextAddCmd() *cobra.Command { + var ( + keyToAdd string + valuesToAdd []string + ) + + cmd := &cobra.Command{ + Use: "add", + Short: "Add context to send with alerts. You must specify the output key with the expr value you want", + Example: `cscli lapi context add --key source_ip --value evt.Meta.source_ip +cscli lapi context add --key file_source --value evt.Line.Src +cscli lapi context add --value evt.Meta.source_ip --value evt.Meta.target_user + `, + DisableAutoGenTag: true, + RunE: func(_ *cobra.Command, _ []string) error { + hub, err := require.Hub(cli.cfg(), nil, nil) + if err != nil { + return err + } + + if err = alertcontext.LoadConsoleContext(cli.cfg(), hub); err != nil { + return fmt.Errorf("while loading context: %w", err) + } + + if keyToAdd != "" { + return cli.addContext(keyToAdd, valuesToAdd) + } + + for _, v := range valuesToAdd { + keySlice := strings.Split(v, ".") + key := keySlice[len(keySlice)-1] + value := []string{v} + if err := cli.addContext(key, value); err != nil { + return err + } + } + + return nil + }, + } + + flags := cmd.Flags() + flags.StringVarP(&keyToAdd, "key", "k", "", "The key of the different values to send") + flags.StringSliceVar(&valuesToAdd, "value", []string{}, "The expr fields to associate with the key") + + _ = cmd.MarkFlagRequired("value") + + return cmd +} + +func (cli *cliLapi) newContextStatusCmd() *cobra.Command { + cmd := &cobra.Command{ + Use: "status", + Short: "List context to send with alerts", + DisableAutoGenTag: true, + RunE: func(_ *cobra.Command, _ []string) error { + cfg := cli.cfg() + hub, err := require.Hub(cfg, nil, nil) + if err != nil { + return err + } + + if err = alertcontext.LoadConsoleContext(cfg, hub); err != nil { + return fmt.Errorf("while loading context: %w", err) + } + + if len(cfg.Crowdsec.ContextToSend) == 0 { + fmt.Println("No context found on this agent. You can use 'cscli lapi context add' to add context to your alerts.") + return nil + } + + dump, err := yaml.Marshal(cfg.Crowdsec.ContextToSend) + if err != nil { + return fmt.Errorf("unable to show context status: %w", err) + } + + fmt.Print(string(dump)) + + return nil + }, + } + + return cmd +} + +func (cli *cliLapi) newContextDetectCmd() *cobra.Command { + var detectAll bool + + cmd := &cobra.Command{ + Use: "detect", + Short: "Detect available fields from the installed parsers", + Example: `cscli lapi context detect --all +cscli lapi context detect crowdsecurity/sshd-logs + `, + DisableAutoGenTag: true, + RunE: func(cmd *cobra.Command, args []string) error { + cfg := cli.cfg() + if !detectAll && len(args) == 0 { + _ = cmd.Help() + return errors.New("please provide parsers to detect or --all flag") + } + + // to avoid all the log.Info from the loaders functions + log.SetLevel(log.WarnLevel) + + if err := exprhelpers.Init(nil); err != nil { + return fmt.Errorf("failed to init expr helpers: %w", err) + } + + hub, err := require.Hub(cfg, nil, nil) + if err != nil { + return err + } + + csParsers := parser.NewParsers(hub) + if csParsers, err = parser.LoadParsers(cfg, csParsers); err != nil { + return fmt.Errorf("unable to load parsers: %w", err) + } + + fieldByParsers := make(map[string][]string) + for _, node := range csParsers.Nodes { + if !detectAll && !slices.Contains(args, node.Name) { + continue + } + if !detectAll { + args = removeFromSlice(node.Name, args) + } + fieldByParsers[node.Name] = make([]string, 0) + fieldByParsers[node.Name] = detectNode(node, *csParsers.Ctx) + + subNodeFields := detectSubNode(node, *csParsers.Ctx) + for _, field := range subNodeFields { + if !slices.Contains(fieldByParsers[node.Name], field) { + fieldByParsers[node.Name] = append(fieldByParsers[node.Name], field) + } + } + } + + fmt.Printf("Acquisition :\n\n") + fmt.Printf(" - evt.Line.Module\n") + fmt.Printf(" - evt.Line.Raw\n") + fmt.Printf(" - evt.Line.Src\n") + fmt.Println() + + parsersKey := make([]string, 0) + for k := range fieldByParsers { + parsersKey = append(parsersKey, k) + } + sort.Strings(parsersKey) + + for _, k := range parsersKey { + if len(fieldByParsers[k]) == 0 { + continue + } + fmt.Printf("%s :\n\n", k) + values := fieldByParsers[k] + sort.Strings(values) + for _, value := range values { + fmt.Printf(" - %s\n", value) + } + fmt.Println() + } + + if len(args) > 0 { + for _, parserNotFound := range args { + log.Errorf("parser '%s' not found, can't detect fields", parserNotFound) + } + } + + return nil + }, + } + cmd.Flags().BoolVarP(&detectAll, "all", "a", false, "Detect evt field for all installed parser") + + return cmd +} + +func (cli *cliLapi) newContextDeleteCmd() *cobra.Command { + cmd := &cobra.Command{ + Use: "delete", + DisableAutoGenTag: true, + RunE: func(_ *cobra.Command, _ []string) error { + filePath := cli.cfg().Crowdsec.ConsoleContextPath + if filePath == "" { + filePath = "the context file" + } + + return fmt.Errorf("command 'delete' has been removed, please manually edit %s", filePath) + }, + } + + return cmd +} + +func (cli *cliLapi) newContextCmd() *cobra.Command { + cmd := &cobra.Command{ + Use: "context [command]", + Short: "Manage context to send with alerts", + DisableAutoGenTag: true, + PersistentPreRunE: func(_ *cobra.Command, _ []string) error { + cfg := cli.cfg() + if err := cfg.LoadCrowdsec(); err != nil { + fileNotFoundMessage := fmt.Sprintf("failed to open context file: open %s: no such file or directory", cfg.Crowdsec.ConsoleContextPath) + if err.Error() != fileNotFoundMessage { + return fmt.Errorf("unable to load CrowdSec agent configuration: %w", err) + } + } + if cfg.DisableAgent { + return errors.New("agent is disabled and lapi context can only be used on the agent") + } + + return nil + }, + } + + cmd.AddCommand(cli.newContextAddCmd()) + cmd.AddCommand(cli.newContextStatusCmd()) + cmd.AddCommand(cli.newContextDetectCmd()) + cmd.AddCommand(cli.newContextDeleteCmd()) + + return cmd +} + +func detectStaticField(grokStatics []parser.ExtraField) []string { + ret := make([]string, 0) + + for _, static := range grokStatics { + if static.Parsed != "" { + fieldName := "evt.Parsed." + static.Parsed + if !slices.Contains(ret, fieldName) { + ret = append(ret, fieldName) + } + } + + if static.Meta != "" { + fieldName := "evt.Meta." + static.Meta + if !slices.Contains(ret, fieldName) { + ret = append(ret, fieldName) + } + } + + if static.TargetByName != "" { + fieldName := static.TargetByName + if !strings.HasPrefix(fieldName, "evt.") { + fieldName = "evt." + fieldName + } + + if !slices.Contains(ret, fieldName) { + ret = append(ret, fieldName) + } + } + } + + return ret +} + +func detectNode(node parser.Node, parserCTX parser.UnixParserCtx) []string { + ret := make([]string, 0) + + if node.Grok.RunTimeRegexp != nil { + for _, capturedField := range node.Grok.RunTimeRegexp.Names() { + fieldName := "evt.Parsed." + capturedField + if !slices.Contains(ret, fieldName) { + ret = append(ret, fieldName) + } + } + } + + if node.Grok.RegexpName != "" { + grokCompiled, err := parserCTX.Grok.Get(node.Grok.RegexpName) + // ignore error (parser does not exist?) + if err == nil { + for _, capturedField := range grokCompiled.Names() { + fieldName := "evt.Parsed." + capturedField + if !slices.Contains(ret, fieldName) { + ret = append(ret, fieldName) + } + } + } + } + + if len(node.Grok.Statics) > 0 { + staticsField := detectStaticField(node.Grok.Statics) + for _, staticField := range staticsField { + if !slices.Contains(ret, staticField) { + ret = append(ret, staticField) + } + } + } + + if len(node.Statics) > 0 { + staticsField := detectStaticField(node.Statics) + for _, staticField := range staticsField { + if !slices.Contains(ret, staticField) { + ret = append(ret, staticField) + } + } + } + + return ret +} + +func detectSubNode(node parser.Node, parserCTX parser.UnixParserCtx) []string { + ret := make([]string, 0) + + for _, subnode := range node.LeavesNodes { + if subnode.Grok.RunTimeRegexp != nil { + for _, capturedField := range subnode.Grok.RunTimeRegexp.Names() { + fieldName := "evt.Parsed." + capturedField + if !slices.Contains(ret, fieldName) { + ret = append(ret, fieldName) + } + } + } + + if subnode.Grok.RegexpName != "" { + grokCompiled, err := parserCTX.Grok.Get(subnode.Grok.RegexpName) + if err == nil { + // ignore error (parser does not exist?) + for _, capturedField := range grokCompiled.Names() { + fieldName := "evt.Parsed." + capturedField + if !slices.Contains(ret, fieldName) { + ret = append(ret, fieldName) + } + } + } + } + + if len(subnode.Grok.Statics) > 0 { + staticsField := detectStaticField(subnode.Grok.Statics) + for _, staticField := range staticsField { + if !slices.Contains(ret, staticField) { + ret = append(ret, staticField) + } + } + } + + if len(subnode.Statics) > 0 { + staticsField := detectStaticField(subnode.Statics) + for _, staticField := range staticsField { + if !slices.Contains(ret, staticField) { + ret = append(ret, staticField) + } + } + } + } + + return ret +} diff --git a/cmd/crowdsec-cli/clilapi/lapi.go b/cmd/crowdsec-cli/clilapi/lapi.go index bb721eefe03..01341330ae8 100644 --- a/cmd/crowdsec-cli/clilapi/lapi.go +++ b/cmd/crowdsec-cli/clilapi/lapi.go @@ -1,36 +1,13 @@ package clilapi import ( - "context" - "errors" "fmt" - "io" - "net/url" - "os" - "slices" - "sort" - "strings" - "github.com/fatih/color" - "github.com/go-openapi/strfmt" - log "github.com/sirupsen/logrus" "github.com/spf13/cobra" - "gopkg.in/yaml.v3" - "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/idgen" - "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/reload" - "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/require" - "github.com/crowdsecurity/crowdsec/pkg/alertcontext" - "github.com/crowdsecurity/crowdsec/pkg/apiclient" "github.com/crowdsecurity/crowdsec/pkg/csconfig" - "github.com/crowdsecurity/crowdsec/pkg/cwhub" - "github.com/crowdsecurity/crowdsec/pkg/exprhelpers" - "github.com/crowdsecurity/crowdsec/pkg/models" - "github.com/crowdsecurity/crowdsec/pkg/parser" ) -const LAPIURLPrefix = "v1" - type configGetter = func() *csconfig.Config type cliLapi struct { @@ -43,200 +20,6 @@ func New(cfg configGetter) *cliLapi { } } -// queryLAPIStatus checks if the Local API is reachable, and if the credentials are correct. -func queryLAPIStatus(ctx context.Context, hub *cwhub.Hub, credURL string, login string, password string) (bool, error) { - apiURL, err := url.Parse(credURL) - if err != nil { - return false, err - } - - client, err := apiclient.NewDefaultClient(apiURL, - LAPIURLPrefix, - "", - nil) - if err != nil { - return false, err - } - - pw := strfmt.Password(password) - - itemsForAPI := hub.GetInstalledListForAPI() - - t := models.WatcherAuthRequest{ - MachineID: &login, - Password: &pw, - Scenarios: itemsForAPI, - } - - _, _, err = client.Auth.AuthenticateWatcher(ctx, t) - if err != nil { - return false, err - } - - return true, nil -} - -func (cli *cliLapi) Status(ctx context.Context, out io.Writer, hub *cwhub.Hub) error { - cfg := cli.cfg() - - cred := cfg.API.Client.Credentials - - fmt.Fprintf(out, "Loaded credentials from %s\n", cfg.API.Client.CredentialsFilePath) - fmt.Fprintf(out, "Trying to authenticate with username %s on %s\n", cred.Login, cred.URL) - - _, err := queryLAPIStatus(ctx, hub, cred.URL, cred.Login, cred.Password) - if err != nil { - return fmt.Errorf("failed to authenticate to Local API (LAPI): %w", err) - } - - fmt.Fprintf(out, "You can successfully interact with Local API (LAPI)\n") - - return nil -} - -func (cli *cliLapi) register(ctx context.Context, apiURL string, outputFile string, machine string, token string) error { - var err error - - lapiUser := machine - cfg := cli.cfg() - - if lapiUser == "" { - lapiUser, err = idgen.GenerateMachineID("") - if err != nil { - return fmt.Errorf("unable to generate machine id: %w", err) - } - } - - password := strfmt.Password(idgen.GeneratePassword(idgen.PasswordLength)) - - apiurl, err := prepareAPIURL(cfg.API.Client, apiURL) - if err != nil { - return fmt.Errorf("parsing api url: %w", err) - } - - _, err = apiclient.RegisterClient(ctx, &apiclient.Config{ - MachineID: lapiUser, - Password: password, - RegistrationToken: token, - URL: apiurl, - VersionPrefix: LAPIURLPrefix, - }, nil) - if err != nil { - return fmt.Errorf("api client register: %w", err) - } - - log.Printf("Successfully registered to Local API (LAPI)") - - var dumpFile string - - if outputFile != "" { - dumpFile = outputFile - } else if cfg.API.Client.CredentialsFilePath != "" { - dumpFile = cfg.API.Client.CredentialsFilePath - } else { - dumpFile = "" - } - - apiCfg := cfg.API.Client.Credentials - apiCfg.Login = lapiUser - apiCfg.Password = password.String() - - if apiURL != "" { - apiCfg.URL = apiURL - } - - apiConfigDump, err := yaml.Marshal(apiCfg) - if err != nil { - return fmt.Errorf("unable to serialize api credentials: %w", err) - } - - if dumpFile != "" { - err = os.WriteFile(dumpFile, apiConfigDump, 0o600) - if err != nil { - return fmt.Errorf("write api credentials to '%s' failed: %w", dumpFile, err) - } - - log.Printf("Local API credentials written to '%s'", dumpFile) - } else { - fmt.Printf("%s\n", string(apiConfigDump)) - } - - log.Warning(reload.Message) - - return nil -} - -// prepareAPIURL checks/fixes a LAPI connection url (http, https or socket) and returns an URL struct -func prepareAPIURL(clientCfg *csconfig.LocalApiClientCfg, apiURL string) (*url.URL, error) { - if apiURL == "" { - if clientCfg == nil || clientCfg.Credentials == nil || clientCfg.Credentials.URL == "" { - return nil, errors.New("no Local API URL. Please provide it in your configuration or with the -u parameter") - } - - apiURL = clientCfg.Credentials.URL - } - - // URL needs to end with /, but user doesn't care - if !strings.HasSuffix(apiURL, "/") { - apiURL += "/" - } - - // URL needs to start with http://, but user doesn't care - if !strings.HasPrefix(apiURL, "http://") && !strings.HasPrefix(apiURL, "https://") && !strings.HasPrefix(apiURL, "/") { - apiURL = "http://" + apiURL - } - - return url.Parse(apiURL) -} - -func (cli *cliLapi) newStatusCmd() *cobra.Command { - cmdLapiStatus := &cobra.Command{ - Use: "status", - Short: "Check authentication to Local API (LAPI)", - Args: cobra.MinimumNArgs(0), - DisableAutoGenTag: true, - RunE: func(cmd *cobra.Command, _ []string) error { - hub, err := require.Hub(cli.cfg(), nil, nil) - if err != nil { - return err - } - - return cli.Status(cmd.Context(), color.Output, hub) - }, - } - - return cmdLapiStatus -} - -func (cli *cliLapi) newRegisterCmd() *cobra.Command { - var ( - apiURL string - outputFile string - machine string - token string - ) - - cmd := &cobra.Command{ - Use: "register", - Short: "Register a machine to Local API (LAPI)", - Long: `Register your machine to the Local API (LAPI). -Keep in mind the machine needs to be validated by an administrator on LAPI side to be effective.`, - Args: cobra.MinimumNArgs(0), - DisableAutoGenTag: true, - RunE: func(cmd *cobra.Command, _ []string) error { - return cli.register(cmd.Context(), apiURL, outputFile, machine, token) - }, - } - - flags := cmd.Flags() - flags.StringVarP(&apiURL, "url", "u", "", "URL of the API (ie. http://127.0.0.1)") - flags.StringVarP(&outputFile, "file", "f", "", "output file destination") - flags.StringVar(&machine, "machine", "", "Name of the machine to register with") - flags.StringVar(&token, "token", "", "Auto registration token to use") - - return cmd -} - func (cli *cliLapi) NewCommand() *cobra.Command { cmd := &cobra.Command{ Use: "lapi [action]", @@ -257,380 +40,3 @@ func (cli *cliLapi) NewCommand() *cobra.Command { return cmd } - -func (cli *cliLapi) addContext(key string, values []string) error { - cfg := cli.cfg() - - if err := alertcontext.ValidateContextExpr(key, values); err != nil { - return fmt.Errorf("invalid context configuration: %w", err) - } - - if _, ok := cfg.Crowdsec.ContextToSend[key]; !ok { - cfg.Crowdsec.ContextToSend[key] = make([]string, 0) - - log.Infof("key '%s' added", key) - } - - data := cfg.Crowdsec.ContextToSend[key] - - for _, val := range values { - if !slices.Contains(data, val) { - log.Infof("value '%s' added to key '%s'", val, key) - data = append(data, val) - } - - cfg.Crowdsec.ContextToSend[key] = data - } - - return cfg.Crowdsec.DumpContextConfigFile() -} - -func (cli *cliLapi) newContextAddCmd() *cobra.Command { - var ( - keyToAdd string - valuesToAdd []string - ) - - cmd := &cobra.Command{ - Use: "add", - Short: "Add context to send with alerts. You must specify the output key with the expr value you want", - Example: `cscli lapi context add --key source_ip --value evt.Meta.source_ip -cscli lapi context add --key file_source --value evt.Line.Src -cscli lapi context add --value evt.Meta.source_ip --value evt.Meta.target_user - `, - DisableAutoGenTag: true, - RunE: func(_ *cobra.Command, _ []string) error { - hub, err := require.Hub(cli.cfg(), nil, nil) - if err != nil { - return err - } - - if err = alertcontext.LoadConsoleContext(cli.cfg(), hub); err != nil { - return fmt.Errorf("while loading context: %w", err) - } - - if keyToAdd != "" { - return cli.addContext(keyToAdd, valuesToAdd) - } - - for _, v := range valuesToAdd { - keySlice := strings.Split(v, ".") - key := keySlice[len(keySlice)-1] - value := []string{v} - if err := cli.addContext(key, value); err != nil { - return err - } - } - - return nil - }, - } - - flags := cmd.Flags() - flags.StringVarP(&keyToAdd, "key", "k", "", "The key of the different values to send") - flags.StringSliceVar(&valuesToAdd, "value", []string{}, "The expr fields to associate with the key") - - _ = cmd.MarkFlagRequired("value") - - return cmd -} - -func (cli *cliLapi) newContextStatusCmd() *cobra.Command { - cmd := &cobra.Command{ - Use: "status", - Short: "List context to send with alerts", - DisableAutoGenTag: true, - RunE: func(_ *cobra.Command, _ []string) error { - cfg := cli.cfg() - hub, err := require.Hub(cfg, nil, nil) - if err != nil { - return err - } - - if err = alertcontext.LoadConsoleContext(cfg, hub); err != nil { - return fmt.Errorf("while loading context: %w", err) - } - - if len(cfg.Crowdsec.ContextToSend) == 0 { - fmt.Println("No context found on this agent. You can use 'cscli lapi context add' to add context to your alerts.") - return nil - } - - dump, err := yaml.Marshal(cfg.Crowdsec.ContextToSend) - if err != nil { - return fmt.Errorf("unable to show context status: %w", err) - } - - fmt.Print(string(dump)) - - return nil - }, - } - - return cmd -} - -func (cli *cliLapi) newContextDetectCmd() *cobra.Command { - var detectAll bool - - cmd := &cobra.Command{ - Use: "detect", - Short: "Detect available fields from the installed parsers", - Example: `cscli lapi context detect --all -cscli lapi context detect crowdsecurity/sshd-logs - `, - DisableAutoGenTag: true, - RunE: func(cmd *cobra.Command, args []string) error { - cfg := cli.cfg() - if !detectAll && len(args) == 0 { - _ = cmd.Help() - return errors.New("please provide parsers to detect or --all flag") - } - - // to avoid all the log.Info from the loaders functions - log.SetLevel(log.WarnLevel) - - if err := exprhelpers.Init(nil); err != nil { - return fmt.Errorf("failed to init expr helpers: %w", err) - } - - hub, err := require.Hub(cfg, nil, nil) - if err != nil { - return err - } - - csParsers := parser.NewParsers(hub) - if csParsers, err = parser.LoadParsers(cfg, csParsers); err != nil { - return fmt.Errorf("unable to load parsers: %w", err) - } - - fieldByParsers := make(map[string][]string) - for _, node := range csParsers.Nodes { - if !detectAll && !slices.Contains(args, node.Name) { - continue - } - if !detectAll { - args = removeFromSlice(node.Name, args) - } - fieldByParsers[node.Name] = make([]string, 0) - fieldByParsers[node.Name] = detectNode(node, *csParsers.Ctx) - - subNodeFields := detectSubNode(node, *csParsers.Ctx) - for _, field := range subNodeFields { - if !slices.Contains(fieldByParsers[node.Name], field) { - fieldByParsers[node.Name] = append(fieldByParsers[node.Name], field) - } - } - } - - fmt.Printf("Acquisition :\n\n") - fmt.Printf(" - evt.Line.Module\n") - fmt.Printf(" - evt.Line.Raw\n") - fmt.Printf(" - evt.Line.Src\n") - fmt.Println() - - parsersKey := make([]string, 0) - for k := range fieldByParsers { - parsersKey = append(parsersKey, k) - } - sort.Strings(parsersKey) - - for _, k := range parsersKey { - if len(fieldByParsers[k]) == 0 { - continue - } - fmt.Printf("%s :\n\n", k) - values := fieldByParsers[k] - sort.Strings(values) - for _, value := range values { - fmt.Printf(" - %s\n", value) - } - fmt.Println() - } - - if len(args) > 0 { - for _, parserNotFound := range args { - log.Errorf("parser '%s' not found, can't detect fields", parserNotFound) - } - } - - return nil - }, - } - cmd.Flags().BoolVarP(&detectAll, "all", "a", false, "Detect evt field for all installed parser") - - return cmd -} - -func (cli *cliLapi) newContextDeleteCmd() *cobra.Command { - cmd := &cobra.Command{ - Use: "delete", - DisableAutoGenTag: true, - RunE: func(_ *cobra.Command, _ []string) error { - filePath := cli.cfg().Crowdsec.ConsoleContextPath - if filePath == "" { - filePath = "the context file" - } - - return fmt.Errorf("command 'delete' has been removed, please manually edit %s", filePath) - }, - } - - return cmd -} - -func (cli *cliLapi) newContextCmd() *cobra.Command { - cmd := &cobra.Command{ - Use: "context [command]", - Short: "Manage context to send with alerts", - DisableAutoGenTag: true, - PersistentPreRunE: func(_ *cobra.Command, _ []string) error { - cfg := cli.cfg() - if err := cfg.LoadCrowdsec(); err != nil { - fileNotFoundMessage := fmt.Sprintf("failed to open context file: open %s: no such file or directory", cfg.Crowdsec.ConsoleContextPath) - if err.Error() != fileNotFoundMessage { - return fmt.Errorf("unable to load CrowdSec agent configuration: %w", err) - } - } - if cfg.DisableAgent { - return errors.New("agent is disabled and lapi context can only be used on the agent") - } - - return nil - }, - } - - cmd.AddCommand(cli.newContextAddCmd()) - cmd.AddCommand(cli.newContextStatusCmd()) - cmd.AddCommand(cli.newContextDetectCmd()) - cmd.AddCommand(cli.newContextDeleteCmd()) - - return cmd -} - -func detectStaticField(grokStatics []parser.ExtraField) []string { - ret := make([]string, 0) - - for _, static := range grokStatics { - if static.Parsed != "" { - fieldName := "evt.Parsed." + static.Parsed - if !slices.Contains(ret, fieldName) { - ret = append(ret, fieldName) - } - } - - if static.Meta != "" { - fieldName := "evt.Meta." + static.Meta - if !slices.Contains(ret, fieldName) { - ret = append(ret, fieldName) - } - } - - if static.TargetByName != "" { - fieldName := static.TargetByName - if !strings.HasPrefix(fieldName, "evt.") { - fieldName = "evt." + fieldName - } - - if !slices.Contains(ret, fieldName) { - ret = append(ret, fieldName) - } - } - } - - return ret -} - -func detectNode(node parser.Node, parserCTX parser.UnixParserCtx) []string { - ret := make([]string, 0) - - if node.Grok.RunTimeRegexp != nil { - for _, capturedField := range node.Grok.RunTimeRegexp.Names() { - fieldName := "evt.Parsed." + capturedField - if !slices.Contains(ret, fieldName) { - ret = append(ret, fieldName) - } - } - } - - if node.Grok.RegexpName != "" { - grokCompiled, err := parserCTX.Grok.Get(node.Grok.RegexpName) - // ignore error (parser does not exist?) - if err == nil { - for _, capturedField := range grokCompiled.Names() { - fieldName := "evt.Parsed." + capturedField - if !slices.Contains(ret, fieldName) { - ret = append(ret, fieldName) - } - } - } - } - - if len(node.Grok.Statics) > 0 { - staticsField := detectStaticField(node.Grok.Statics) - for _, staticField := range staticsField { - if !slices.Contains(ret, staticField) { - ret = append(ret, staticField) - } - } - } - - if len(node.Statics) > 0 { - staticsField := detectStaticField(node.Statics) - for _, staticField := range staticsField { - if !slices.Contains(ret, staticField) { - ret = append(ret, staticField) - } - } - } - - return ret -} - -func detectSubNode(node parser.Node, parserCTX parser.UnixParserCtx) []string { - ret := make([]string, 0) - - for _, subnode := range node.LeavesNodes { - if subnode.Grok.RunTimeRegexp != nil { - for _, capturedField := range subnode.Grok.RunTimeRegexp.Names() { - fieldName := "evt.Parsed." + capturedField - if !slices.Contains(ret, fieldName) { - ret = append(ret, fieldName) - } - } - } - - if subnode.Grok.RegexpName != "" { - grokCompiled, err := parserCTX.Grok.Get(subnode.Grok.RegexpName) - if err == nil { - // ignore error (parser does not exist?) - for _, capturedField := range grokCompiled.Names() { - fieldName := "evt.Parsed." + capturedField - if !slices.Contains(ret, fieldName) { - ret = append(ret, fieldName) - } - } - } - } - - if len(subnode.Grok.Statics) > 0 { - staticsField := detectStaticField(subnode.Grok.Statics) - for _, staticField := range staticsField { - if !slices.Contains(ret, staticField) { - ret = append(ret, staticField) - } - } - } - - if len(subnode.Statics) > 0 { - staticsField := detectStaticField(subnode.Statics) - for _, staticField := range staticsField { - if !slices.Contains(ret, staticField) { - ret = append(ret, staticField) - } - } - } - } - - return ret -} diff --git a/cmd/crowdsec-cli/clilapi/register.go b/cmd/crowdsec-cli/clilapi/register.go new file mode 100644 index 00000000000..4c9b0f39903 --- /dev/null +++ b/cmd/crowdsec-cli/clilapi/register.go @@ -0,0 +1,117 @@ +package clilapi + +import ( + "context" + "fmt" + "os" + + "github.com/go-openapi/strfmt" + log "github.com/sirupsen/logrus" + "github.com/spf13/cobra" + "gopkg.in/yaml.v3" + + "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/idgen" + "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/reload" + "github.com/crowdsecurity/crowdsec/pkg/apiclient" +) + +func (cli *cliLapi) register(ctx context.Context, apiURL string, outputFile string, machine string, token string) error { + var err error + + lapiUser := machine + cfg := cli.cfg() + + if lapiUser == "" { + lapiUser, err = idgen.GenerateMachineID("") + if err != nil { + return fmt.Errorf("unable to generate machine id: %w", err) + } + } + + password := strfmt.Password(idgen.GeneratePassword(idgen.PasswordLength)) + + apiurl, err := prepareAPIURL(cfg.API.Client, apiURL) + if err != nil { + return fmt.Errorf("parsing api url: %w", err) + } + + _, err = apiclient.RegisterClient(ctx, &apiclient.Config{ + MachineID: lapiUser, + Password: password, + RegistrationToken: token, + URL: apiurl, + VersionPrefix: LAPIURLPrefix, + }, nil) + if err != nil { + return fmt.Errorf("api client register: %w", err) + } + + log.Printf("Successfully registered to Local API (LAPI)") + + var dumpFile string + + if outputFile != "" { + dumpFile = outputFile + } else if cfg.API.Client.CredentialsFilePath != "" { + dumpFile = cfg.API.Client.CredentialsFilePath + } else { + dumpFile = "" + } + + apiCfg := cfg.API.Client.Credentials + apiCfg.Login = lapiUser + apiCfg.Password = password.String() + + if apiURL != "" { + apiCfg.URL = apiURL + } + + apiConfigDump, err := yaml.Marshal(apiCfg) + if err != nil { + return fmt.Errorf("unable to serialize api credentials: %w", err) + } + + if dumpFile != "" { + err = os.WriteFile(dumpFile, apiConfigDump, 0o600) + if err != nil { + return fmt.Errorf("write api credentials to '%s' failed: %w", dumpFile, err) + } + + log.Printf("Local API credentials written to '%s'", dumpFile) + } else { + fmt.Printf("%s\n", string(apiConfigDump)) + } + + log.Warning(reload.Message) + + return nil +} + +func (cli *cliLapi) newRegisterCmd() *cobra.Command { + var ( + apiURL string + outputFile string + machine string + token string + ) + + cmd := &cobra.Command{ + Use: "register", + Short: "Register a machine to Local API (LAPI)", + Long: `Register your machine to the Local API (LAPI). +Keep in mind the machine needs to be validated by an administrator on LAPI side to be effective.`, + Args: cobra.MinimumNArgs(0), + DisableAutoGenTag: true, + RunE: func(cmd *cobra.Command, _ []string) error { + return cli.register(cmd.Context(), apiURL, outputFile, machine, token) + }, + } + + flags := cmd.Flags() + flags.StringVarP(&apiURL, "url", "u", "", "URL of the API (ie. http://127.0.0.1)") + flags.StringVarP(&outputFile, "file", "f", "", "output file destination") + flags.StringVar(&machine, "machine", "", "Name of the machine to register with") + flags.StringVar(&token, "token", "", "Auto registration token to use") + + return cmd +} diff --git a/cmd/crowdsec-cli/clilapi/status.go b/cmd/crowdsec-cli/clilapi/status.go new file mode 100644 index 00000000000..6ff88834602 --- /dev/null +++ b/cmd/crowdsec-cli/clilapi/status.go @@ -0,0 +1,115 @@ +package clilapi + +import ( + "context" + "errors" + "fmt" + "io" + "net/url" + "strings" + + "github.com/fatih/color" + "github.com/go-openapi/strfmt" + "github.com/spf13/cobra" + + "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/require" + "github.com/crowdsecurity/crowdsec/pkg/apiclient" + "github.com/crowdsecurity/crowdsec/pkg/csconfig" + "github.com/crowdsecurity/crowdsec/pkg/cwhub" + "github.com/crowdsecurity/crowdsec/pkg/models" +) + +const LAPIURLPrefix = "v1" + +// queryLAPIStatus checks if the Local API is reachable, and if the credentials are correct. +func queryLAPIStatus(ctx context.Context, hub *cwhub.Hub, credURL string, login string, password string) (bool, error) { + apiURL, err := url.Parse(credURL) + if err != nil { + return false, err + } + + client, err := apiclient.NewDefaultClient(apiURL, + LAPIURLPrefix, + "", + nil) + if err != nil { + return false, err + } + + pw := strfmt.Password(password) + + itemsForAPI := hub.GetInstalledListForAPI() + + t := models.WatcherAuthRequest{ + MachineID: &login, + Password: &pw, + Scenarios: itemsForAPI, + } + + _, _, err = client.Auth.AuthenticateWatcher(ctx, t) + if err != nil { + return false, err + } + + return true, nil +} + +func (cli *cliLapi) Status(ctx context.Context, out io.Writer, hub *cwhub.Hub) error { + cfg := cli.cfg() + + cred := cfg.API.Client.Credentials + + fmt.Fprintf(out, "Loaded credentials from %s\n", cfg.API.Client.CredentialsFilePath) + fmt.Fprintf(out, "Trying to authenticate with username %s on %s\n", cred.Login, cred.URL) + + _, err := queryLAPIStatus(ctx, hub, cred.URL, cred.Login, cred.Password) + if err != nil { + return fmt.Errorf("failed to authenticate to Local API (LAPI): %w", err) + } + + fmt.Fprintf(out, "You can successfully interact with Local API (LAPI)\n") + + return nil +} + +// prepareAPIURL checks/fixes a LAPI connection url (http, https or socket) and returns an URL struct +func prepareAPIURL(clientCfg *csconfig.LocalApiClientCfg, apiURL string) (*url.URL, error) { + if apiURL == "" { + if clientCfg == nil || clientCfg.Credentials == nil || clientCfg.Credentials.URL == "" { + return nil, errors.New("no Local API URL. Please provide it in your configuration or with the -u parameter") + } + + apiURL = clientCfg.Credentials.URL + } + + // URL needs to end with /, but user doesn't care + if !strings.HasSuffix(apiURL, "/") { + apiURL += "/" + } + + // URL needs to start with http://, but user doesn't care + if !strings.HasPrefix(apiURL, "http://") && !strings.HasPrefix(apiURL, "https://") && !strings.HasPrefix(apiURL, "/") { + apiURL = "http://" + apiURL + } + + return url.Parse(apiURL) +} + +func (cli *cliLapi) newStatusCmd() *cobra.Command { + cmdLapiStatus := &cobra.Command{ + Use: "status", + Short: "Check authentication to Local API (LAPI)", + Args: cobra.MinimumNArgs(0), + DisableAutoGenTag: true, + RunE: func(cmd *cobra.Command, _ []string) error { + hub, err := require.Hub(cli.cfg(), nil, nil) + if err != nil { + return err + } + + return cli.Status(cmd.Context(), color.Output, hub) + }, + } + + return cmdLapiStatus +} diff --git a/cmd/crowdsec-cli/clilapi/lapi_test.go b/cmd/crowdsec-cli/clilapi/status_test.go similarity index 100% rename from cmd/crowdsec-cli/clilapi/lapi_test.go rename to cmd/crowdsec-cli/clilapi/status_test.go diff --git a/cmd/crowdsec-cli/climachine/add.go b/cmd/crowdsec-cli/climachine/add.go new file mode 100644 index 00000000000..afddb4e4b65 --- /dev/null +++ b/cmd/crowdsec-cli/climachine/add.go @@ -0,0 +1,152 @@ +package climachine + +import ( + "context" + "errors" + "fmt" + "os" + + "github.com/AlecAivazis/survey/v2" + "github.com/go-openapi/strfmt" + "github.com/spf13/cobra" + "gopkg.in/yaml.v3" + + "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/idgen" + "github.com/crowdsecurity/crowdsec/pkg/csconfig" + "github.com/crowdsecurity/crowdsec/pkg/types" +) + +func (cli *cliMachines) add(ctx context.Context, args []string, machinePassword string, dumpFile string, apiURL string, interactive bool, autoAdd bool, force bool) error { + var ( + err error + machineID string + ) + + // create machineID if not specified by user + if len(args) == 0 { + if !autoAdd { + return errors.New("please specify a machine name to add, or use --auto") + } + + machineID, err = idgen.GenerateMachineID("") + if err != nil { + return fmt.Errorf("unable to generate machine id: %w", err) + } + } else { + machineID = args[0] + } + + clientCfg := cli.cfg().API.Client + serverCfg := cli.cfg().API.Server + + /*check if file already exists*/ + if dumpFile == "" && clientCfg != nil && clientCfg.CredentialsFilePath != "" { + credFile := clientCfg.CredentialsFilePath + // use the default only if the file does not exist + _, err = os.Stat(credFile) + + switch { + case os.IsNotExist(err) || force: + dumpFile = credFile + case err != nil: + return fmt.Errorf("unable to stat '%s': %w", credFile, err) + default: + return fmt.Errorf(`credentials file '%s' already exists: please remove it, use "--force" or specify a different file with "-f" ("-f -" for standard output)`, credFile) + } + } + + if dumpFile == "" { + return errors.New(`please specify a file to dump credentials to, with -f ("-f -" for standard output)`) + } + + // create a password if it's not specified by user + if machinePassword == "" && !interactive { + if !autoAdd { + return errors.New("please specify a password with --password or use --auto") + } + + machinePassword = idgen.GeneratePassword(idgen.PasswordLength) + } else if machinePassword == "" && interactive { + qs := &survey.Password{ + Message: "Please provide a password for the machine:", + } + survey.AskOne(qs, &machinePassword) + } + + password := strfmt.Password(machinePassword) + + _, err = cli.db.CreateMachine(ctx, &machineID, &password, "", true, force, types.PasswordAuthType) + if err != nil { + return fmt.Errorf("unable to create machine: %w", err) + } + + fmt.Fprintf(os.Stderr, "Machine '%s' successfully added to the local API.\n", machineID) + + if apiURL == "" { + if clientCfg != nil && clientCfg.Credentials != nil && clientCfg.Credentials.URL != "" { + apiURL = clientCfg.Credentials.URL + } else if serverCfg.ClientURL() != "" { + apiURL = serverCfg.ClientURL() + } else { + return errors.New("unable to dump an api URL. Please provide it in your configuration or with the -u parameter") + } + } + + apiCfg := csconfig.ApiCredentialsCfg{ + Login: machineID, + Password: password.String(), + URL: apiURL, + } + + apiConfigDump, err := yaml.Marshal(apiCfg) + if err != nil { + return fmt.Errorf("unable to serialize api credentials: %w", err) + } + + if dumpFile != "" && dumpFile != "-" { + if err = os.WriteFile(dumpFile, apiConfigDump, 0o600); err != nil { + return fmt.Errorf("write api credentials in '%s' failed: %w", dumpFile, err) + } + + fmt.Fprintf(os.Stderr, "API credentials written to '%s'.\n", dumpFile) + } else { + fmt.Print(string(apiConfigDump)) + } + + return nil +} + +func (cli *cliMachines) newAddCmd() *cobra.Command { + var ( + password MachinePassword + dumpFile string + apiURL string + interactive bool + autoAdd bool + force bool + ) + + cmd := &cobra.Command{ + Use: "add", + Short: "add a single machine to the database", + DisableAutoGenTag: true, + Long: `Register a new machine in the database. cscli should be on the same machine as LAPI.`, + Example: `cscli machines add --auto +cscli machines add MyTestMachine --auto +cscli machines add MyTestMachine --password MyPassword +cscli machines add -f- --auto > /tmp/mycreds.yaml`, + RunE: func(cmd *cobra.Command, args []string) error { + return cli.add(cmd.Context(), args, string(password), dumpFile, apiURL, interactive, autoAdd, force) + }, + } + + flags := cmd.Flags() + flags.VarP(&password, "password", "p", "machine password to login to the API") + flags.StringVarP(&dumpFile, "file", "f", "", "output file destination (defaults to "+csconfig.DefaultConfigPath("local_api_credentials.yaml")+")") + flags.StringVarP(&apiURL, "url", "u", "", "URL of the local API") + flags.BoolVarP(&interactive, "interactive", "i", false, "interfactive mode to enter the password") + flags.BoolVarP(&autoAdd, "auto", "a", false, "automatically generate password (and username if not provided)") + flags.BoolVar(&force, "force", false, "will force add the machine if it already exist") + + return cmd +} diff --git a/cmd/crowdsec-cli/climachine/delete.go b/cmd/crowdsec-cli/climachine/delete.go new file mode 100644 index 00000000000..644ce93c642 --- /dev/null +++ b/cmd/crowdsec-cli/climachine/delete.go @@ -0,0 +1,52 @@ +package climachine + +import ( + "context" + "errors" + + log "github.com/sirupsen/logrus" + "github.com/spf13/cobra" + + "github.com/crowdsecurity/crowdsec/pkg/database" +) + +func (cli *cliMachines) delete(ctx context.Context, machines []string, ignoreMissing bool) error { + for _, machineID := range machines { + if err := cli.db.DeleteWatcher(ctx, machineID); err != nil { + var notFoundErr *database.MachineNotFoundError + if ignoreMissing && errors.As(err, ¬FoundErr) { + return nil + } + + log.Errorf("unable to delete machine: %s", err) + + return nil + } + + log.Infof("machine '%s' deleted successfully", machineID) + } + + return nil +} + +func (cli *cliMachines) newDeleteCmd() *cobra.Command { + var ignoreMissing bool + + cmd := &cobra.Command{ + Use: "delete [machine_name]...", + Short: "delete machine(s) by name", + Example: `cscli machines delete "machine1" "machine2"`, + Args: cobra.MinimumNArgs(1), + Aliases: []string{"remove"}, + DisableAutoGenTag: true, + ValidArgsFunction: cli.validMachineID, + RunE: func(cmd *cobra.Command, args []string) error { + return cli.delete(cmd.Context(), args, ignoreMissing) + }, + } + + flags := cmd.Flags() + flags.BoolVar(&ignoreMissing, "ignore-missing", false, "don't print errors if one or more machines don't exist") + + return cmd +} diff --git a/cmd/crowdsec-cli/climachine/inspect.go b/cmd/crowdsec-cli/climachine/inspect.go new file mode 100644 index 00000000000..b08f2f62794 --- /dev/null +++ b/cmd/crowdsec-cli/climachine/inspect.go @@ -0,0 +1,184 @@ +package climachine + +import ( + "encoding/csv" + "encoding/json" + "errors" + "fmt" + "io" + + "github.com/fatih/color" + "github.com/jedib0t/go-pretty/v6/table" + "github.com/spf13/cobra" + + "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/clientinfo" + "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/cstable" + "github.com/crowdsecurity/crowdsec/pkg/cwhub" + "github.com/crowdsecurity/crowdsec/pkg/database/ent" +) + +func (cli *cliMachines) inspectHubHuman(out io.Writer, machine *ent.Machine) { + state := machine.Hubstate + + if len(state) == 0 { + fmt.Println("No hub items found for this machine") + return + } + + // group state rows by type for multiple tables + rowsByType := make(map[string][]table.Row) + + for itemType, items := range state { + for _, item := range items { + if _, ok := rowsByType[itemType]; !ok { + rowsByType[itemType] = make([]table.Row, 0) + } + + row := table.Row{item.Name, item.Status, item.Version} + rowsByType[itemType] = append(rowsByType[itemType], row) + } + } + + for itemType, rows := range rowsByType { + t := cstable.New(out, cli.cfg().Cscli.Color).Writer + t.AppendHeader(table.Row{"Name", "Status", "Version"}) + t.SetTitle(itemType) + t.AppendRows(rows) + io.WriteString(out, t.Render()+"\n") + } +} + +func (cli *cliMachines) inspectHuman(out io.Writer, machine *ent.Machine) { + t := cstable.New(out, cli.cfg().Cscli.Color).Writer + + t.SetTitle("Machine: " + machine.MachineId) + + t.SetColumnConfigs([]table.ColumnConfig{ + {Number: 1, AutoMerge: true}, + }) + + t.AppendRows([]table.Row{ + {"IP Address", machine.IpAddress}, + {"Created At", machine.CreatedAt}, + {"Last Update", machine.UpdatedAt}, + {"Last Heartbeat", machine.LastHeartbeat}, + {"Validated?", machine.IsValidated}, + {"CrowdSec version", machine.Version}, + {"OS", clientinfo.GetOSNameAndVersion(machine)}, + {"Auth type", machine.AuthType}, + }) + + for dsName, dsCount := range machine.Datasources { + t.AppendRow(table.Row{"Datasources", fmt.Sprintf("%s: %d", dsName, dsCount)}) + } + + for _, ff := range clientinfo.GetFeatureFlagList(machine) { + t.AppendRow(table.Row{"Feature Flags", ff}) + } + + for _, coll := range machine.Hubstate[cwhub.COLLECTIONS] { + t.AppendRow(table.Row{"Collections", coll.Name}) + } + + io.WriteString(out, t.Render()+"\n") +} + +func (cli *cliMachines) inspect(machine *ent.Machine) error { + out := color.Output + outputFormat := cli.cfg().Cscli.Output + + switch outputFormat { + case "human": + cli.inspectHuman(out, machine) + case "json": + enc := json.NewEncoder(out) + enc.SetIndent("", " ") + + if err := enc.Encode(newMachineInfo(machine)); err != nil { + return errors.New("failed to serialize") + } + + return nil + default: + return fmt.Errorf("output format '%s' not supported for this command", outputFormat) + } + + return nil +} + +func (cli *cliMachines) inspectHub(machine *ent.Machine) error { + out := color.Output + + switch cli.cfg().Cscli.Output { + case "human": + cli.inspectHubHuman(out, machine) + case "json": + enc := json.NewEncoder(out) + enc.SetIndent("", " ") + + if err := enc.Encode(machine.Hubstate); err != nil { + return errors.New("failed to serialize") + } + + return nil + case "raw": + csvwriter := csv.NewWriter(out) + + err := csvwriter.Write([]string{"type", "name", "status", "version"}) + if err != nil { + return fmt.Errorf("failed to write header: %w", err) + } + + rows := make([][]string, 0) + + for itemType, items := range machine.Hubstate { + for _, item := range items { + rows = append(rows, []string{itemType, item.Name, item.Status, item.Version}) + } + } + + for _, row := range rows { + if err := csvwriter.Write(row); err != nil { + return fmt.Errorf("failed to write raw output: %w", err) + } + } + + csvwriter.Flush() + } + + return nil +} + +func (cli *cliMachines) newInspectCmd() *cobra.Command { + var showHub bool + + cmd := &cobra.Command{ + Use: "inspect [machine_name]", + Short: "inspect a machine by name", + Example: `cscli machines inspect "machine1"`, + Args: cobra.ExactArgs(1), + DisableAutoGenTag: true, + ValidArgsFunction: cli.validMachineID, + RunE: func(cmd *cobra.Command, args []string) error { + ctx := cmd.Context() + machineID := args[0] + + machine, err := cli.db.QueryMachineByID(ctx, machineID) + if err != nil { + return fmt.Errorf("unable to read machine data '%s': %w", machineID, err) + } + + if showHub { + return cli.inspectHub(machine) + } + + return cli.inspect(machine) + }, + } + + flags := cmd.Flags() + + flags.BoolVarP(&showHub, "hub", "H", false, "show hub state") + + return cmd +} diff --git a/cmd/crowdsec-cli/climachine/list.go b/cmd/crowdsec-cli/climachine/list.go new file mode 100644 index 00000000000..6bedb2ad807 --- /dev/null +++ b/cmd/crowdsec-cli/climachine/list.go @@ -0,0 +1,137 @@ +package climachine + +import ( + "context" + "encoding/csv" + "encoding/json" + "errors" + "fmt" + "io" + "time" + + "github.com/fatih/color" + "github.com/jedib0t/go-pretty/v6/table" + "github.com/spf13/cobra" + + "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/clientinfo" + "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/cstable" + "github.com/crowdsecurity/crowdsec/pkg/database" + "github.com/crowdsecurity/crowdsec/pkg/database/ent" + "github.com/crowdsecurity/crowdsec/pkg/emoji" +) + +// getLastHeartbeat returns the last heartbeat timestamp of a machine +// and a boolean indicating if the machine is considered active or not. +func getLastHeartbeat(m *ent.Machine) (string, bool) { + if m.LastHeartbeat == nil { + return "-", false + } + + elapsed := time.Now().UTC().Sub(*m.LastHeartbeat) + + hb := elapsed.Truncate(time.Second).String() + if elapsed > 2*time.Minute { + return hb, false + } + + return hb, true +} + +func (cli *cliMachines) listHuman(out io.Writer, machines ent.Machines) { + t := cstable.NewLight(out, cli.cfg().Cscli.Color).Writer + t.AppendHeader(table.Row{"Name", "IP Address", "Last Update", "Status", "Version", "OS", "Auth Type", "Last Heartbeat"}) + + for _, m := range machines { + validated := emoji.Prohibited + if m.IsValidated { + validated = emoji.CheckMark + } + + hb, active := getLastHeartbeat(m) + if !active { + hb = emoji.Warning + " " + hb + } + + t.AppendRow(table.Row{m.MachineId, m.IpAddress, m.UpdatedAt.Format(time.RFC3339), validated, m.Version, clientinfo.GetOSNameAndVersion(m), m.AuthType, hb}) + } + + io.WriteString(out, t.Render()+"\n") +} + +func (cli *cliMachines) listCSV(out io.Writer, machines ent.Machines) error { + csvwriter := csv.NewWriter(out) + + err := csvwriter.Write([]string{"machine_id", "ip_address", "updated_at", "validated", "version", "auth_type", "last_heartbeat", "os"}) + if err != nil { + return fmt.Errorf("failed to write header: %w", err) + } + + for _, m := range machines { + validated := "false" + if m.IsValidated { + validated = "true" + } + + hb := "-" + if m.LastHeartbeat != nil { + hb = m.LastHeartbeat.Format(time.RFC3339) + } + + if err := csvwriter.Write([]string{m.MachineId, m.IpAddress, m.UpdatedAt.Format(time.RFC3339), validated, m.Version, m.AuthType, hb, fmt.Sprintf("%s/%s", m.Osname, m.Osversion)}); err != nil { + return fmt.Errorf("failed to write raw output: %w", err) + } + } + + csvwriter.Flush() + + return nil +} + +func (cli *cliMachines) List(ctx context.Context, out io.Writer, db *database.Client) error { + // XXX: must use the provided db object, the one in the struct might be nil + // (calling List directly skips the PersistentPreRunE) + + machines, err := db.ListMachines(ctx) + if err != nil { + return fmt.Errorf("unable to list machines: %w", err) + } + + switch cli.cfg().Cscli.Output { + case "human": + cli.listHuman(out, machines) + case "json": + info := make([]machineInfo, 0, len(machines)) + for _, m := range machines { + info = append(info, newMachineInfo(m)) + } + + enc := json.NewEncoder(out) + enc.SetIndent("", " ") + + if err := enc.Encode(info); err != nil { + return errors.New("failed to serialize") + } + + return nil + case "raw": + return cli.listCSV(out, machines) + } + + return nil +} + +func (cli *cliMachines) newListCmd() *cobra.Command { + cmd := &cobra.Command{ + Use: "list", + Short: "list all machines in the database", + Long: `list all machines in the database with their status and last heartbeat`, + Example: `cscli machines list`, + Args: cobra.NoArgs, + DisableAutoGenTag: true, + RunE: func(cmd *cobra.Command, _ []string) error { + return cli.List(cmd.Context(), color.Output, cli.db) + }, + } + + return cmd +} diff --git a/cmd/crowdsec-cli/climachine/machines.go b/cmd/crowdsec-cli/climachine/machines.go index 1fbedcf57fd..ad503c6e936 100644 --- a/cmd/crowdsec-cli/climachine/machines.go +++ b/cmd/crowdsec-cli/climachine/machines.go @@ -1,55 +1,19 @@ package climachine import ( - "context" - "encoding/csv" - "encoding/json" - "errors" - "fmt" - "io" - "os" "slices" "strings" "time" - "github.com/AlecAivazis/survey/v2" - "github.com/fatih/color" - "github.com/go-openapi/strfmt" - "github.com/jedib0t/go-pretty/v6/table" - log "github.com/sirupsen/logrus" "github.com/spf13/cobra" - "gopkg.in/yaml.v3" - "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/ask" "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/clientinfo" - "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/cstable" - "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/idgen" "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/require" "github.com/crowdsecurity/crowdsec/pkg/csconfig" - "github.com/crowdsecurity/crowdsec/pkg/cwhub" "github.com/crowdsecurity/crowdsec/pkg/database" "github.com/crowdsecurity/crowdsec/pkg/database/ent" - "github.com/crowdsecurity/crowdsec/pkg/emoji" - "github.com/crowdsecurity/crowdsec/pkg/types" ) -// getLastHeartbeat returns the last heartbeat timestamp of a machine -// and a boolean indicating if the machine is considered active or not. -func getLastHeartbeat(m *ent.Machine) (string, bool) { - if m.LastHeartbeat == nil { - return "-", false - } - - elapsed := time.Now().UTC().Sub(*m.LastHeartbeat) - - hb := elapsed.Truncate(time.Second).String() - if elapsed > 2*time.Minute { - return hb, false - } - - return hb, true -} - type configGetter = func() *csconfig.Config type cliMachines struct { @@ -97,58 +61,6 @@ Note: This command requires database direct access, so is intended to be run on return cmd } -func (cli *cliMachines) inspectHubHuman(out io.Writer, machine *ent.Machine) { - state := machine.Hubstate - - if len(state) == 0 { - fmt.Println("No hub items found for this machine") - return - } - - // group state rows by type for multiple tables - rowsByType := make(map[string][]table.Row) - - for itemType, items := range state { - for _, item := range items { - if _, ok := rowsByType[itemType]; !ok { - rowsByType[itemType] = make([]table.Row, 0) - } - - row := table.Row{item.Name, item.Status, item.Version} - rowsByType[itemType] = append(rowsByType[itemType], row) - } - } - - for itemType, rows := range rowsByType { - t := cstable.New(out, cli.cfg().Cscli.Color).Writer - t.AppendHeader(table.Row{"Name", "Status", "Version"}) - t.SetTitle(itemType) - t.AppendRows(rows) - io.WriteString(out, t.Render()+"\n") - } -} - -func (cli *cliMachines) listHuman(out io.Writer, machines ent.Machines) { - t := cstable.NewLight(out, cli.cfg().Cscli.Color).Writer - t.AppendHeader(table.Row{"Name", "IP Address", "Last Update", "Status", "Version", "OS", "Auth Type", "Last Heartbeat"}) - - for _, m := range machines { - validated := emoji.Prohibited - if m.IsValidated { - validated = emoji.CheckMark - } - - hb, active := getLastHeartbeat(m) - if !active { - hb = emoji.Warning + " " + hb - } - - t.AppendRow(table.Row{m.MachineId, m.IpAddress, m.UpdatedAt.Format(time.RFC3339), validated, m.Version, clientinfo.GetOSNameAndVersion(m), m.AuthType, hb}) - } - - io.WriteString(out, t.Render()+"\n") -} - // machineInfo contains only the data we want for inspect/list: no hub status, scenarios, edges, etc. type machineInfo struct { CreatedAt time.Time `json:"created_at,omitempty"` @@ -182,219 +94,6 @@ func newMachineInfo(m *ent.Machine) machineInfo { } } -func (cli *cliMachines) listCSV(out io.Writer, machines ent.Machines) error { - csvwriter := csv.NewWriter(out) - - err := csvwriter.Write([]string{"machine_id", "ip_address", "updated_at", "validated", "version", "auth_type", "last_heartbeat", "os"}) - if err != nil { - return fmt.Errorf("failed to write header: %w", err) - } - - for _, m := range machines { - validated := "false" - if m.IsValidated { - validated = "true" - } - - hb := "-" - if m.LastHeartbeat != nil { - hb = m.LastHeartbeat.Format(time.RFC3339) - } - - if err := csvwriter.Write([]string{m.MachineId, m.IpAddress, m.UpdatedAt.Format(time.RFC3339), validated, m.Version, m.AuthType, hb, fmt.Sprintf("%s/%s", m.Osname, m.Osversion)}); err != nil { - return fmt.Errorf("failed to write raw output: %w", err) - } - } - - csvwriter.Flush() - - return nil -} - -func (cli *cliMachines) List(ctx context.Context, out io.Writer, db *database.Client) error { - // XXX: must use the provided db object, the one in the struct might be nil - // (calling List directly skips the PersistentPreRunE) - - machines, err := db.ListMachines(ctx) - if err != nil { - return fmt.Errorf("unable to list machines: %w", err) - } - - switch cli.cfg().Cscli.Output { - case "human": - cli.listHuman(out, machines) - case "json": - info := make([]machineInfo, 0, len(machines)) - for _, m := range machines { - info = append(info, newMachineInfo(m)) - } - - enc := json.NewEncoder(out) - enc.SetIndent("", " ") - - if err := enc.Encode(info); err != nil { - return errors.New("failed to serialize") - } - - return nil - case "raw": - return cli.listCSV(out, machines) - } - - return nil -} - -func (cli *cliMachines) newListCmd() *cobra.Command { - cmd := &cobra.Command{ - Use: "list", - Short: "list all machines in the database", - Long: `list all machines in the database with their status and last heartbeat`, - Example: `cscli machines list`, - Args: cobra.NoArgs, - DisableAutoGenTag: true, - RunE: func(cmd *cobra.Command, _ []string) error { - return cli.List(cmd.Context(), color.Output, cli.db) - }, - } - - return cmd -} - -func (cli *cliMachines) newAddCmd() *cobra.Command { - var ( - password MachinePassword - dumpFile string - apiURL string - interactive bool - autoAdd bool - force bool - ) - - cmd := &cobra.Command{ - Use: "add", - Short: "add a single machine to the database", - DisableAutoGenTag: true, - Long: `Register a new machine in the database. cscli should be on the same machine as LAPI.`, - Example: `cscli machines add --auto -cscli machines add MyTestMachine --auto -cscli machines add MyTestMachine --password MyPassword -cscli machines add -f- --auto > /tmp/mycreds.yaml`, - RunE: func(cmd *cobra.Command, args []string) error { - return cli.add(cmd.Context(), args, string(password), dumpFile, apiURL, interactive, autoAdd, force) - }, - } - - flags := cmd.Flags() - flags.VarP(&password, "password", "p", "machine password to login to the API") - flags.StringVarP(&dumpFile, "file", "f", "", "output file destination (defaults to "+csconfig.DefaultConfigPath("local_api_credentials.yaml")+")") - flags.StringVarP(&apiURL, "url", "u", "", "URL of the local API") - flags.BoolVarP(&interactive, "interactive", "i", false, "interfactive mode to enter the password") - flags.BoolVarP(&autoAdd, "auto", "a", false, "automatically generate password (and username if not provided)") - flags.BoolVar(&force, "force", false, "will force add the machine if it already exist") - - return cmd -} - -func (cli *cliMachines) add(ctx context.Context, args []string, machinePassword string, dumpFile string, apiURL string, interactive bool, autoAdd bool, force bool) error { - var ( - err error - machineID string - ) - - // create machineID if not specified by user - if len(args) == 0 { - if !autoAdd { - return errors.New("please specify a machine name to add, or use --auto") - } - - machineID, err = idgen.GenerateMachineID("") - if err != nil { - return fmt.Errorf("unable to generate machine id: %w", err) - } - } else { - machineID = args[0] - } - - clientCfg := cli.cfg().API.Client - serverCfg := cli.cfg().API.Server - - /*check if file already exists*/ - if dumpFile == "" && clientCfg != nil && clientCfg.CredentialsFilePath != "" { - credFile := clientCfg.CredentialsFilePath - // use the default only if the file does not exist - _, err = os.Stat(credFile) - - switch { - case os.IsNotExist(err) || force: - dumpFile = credFile - case err != nil: - return fmt.Errorf("unable to stat '%s': %w", credFile, err) - default: - return fmt.Errorf(`credentials file '%s' already exists: please remove it, use "--force" or specify a different file with "-f" ("-f -" for standard output)`, credFile) - } - } - - if dumpFile == "" { - return errors.New(`please specify a file to dump credentials to, with -f ("-f -" for standard output)`) - } - - // create a password if it's not specified by user - if machinePassword == "" && !interactive { - if !autoAdd { - return errors.New("please specify a password with --password or use --auto") - } - - machinePassword = idgen.GeneratePassword(idgen.PasswordLength) - } else if machinePassword == "" && interactive { - qs := &survey.Password{ - Message: "Please provide a password for the machine:", - } - survey.AskOne(qs, &machinePassword) - } - - password := strfmt.Password(machinePassword) - - _, err = cli.db.CreateMachine(ctx, &machineID, &password, "", true, force, types.PasswordAuthType) - if err != nil { - return fmt.Errorf("unable to create machine: %w", err) - } - - fmt.Fprintf(os.Stderr, "Machine '%s' successfully added to the local API.\n", machineID) - - if apiURL == "" { - if clientCfg != nil && clientCfg.Credentials != nil && clientCfg.Credentials.URL != "" { - apiURL = clientCfg.Credentials.URL - } else if serverCfg.ClientURL() != "" { - apiURL = serverCfg.ClientURL() - } else { - return errors.New("unable to dump an api URL. Please provide it in your configuration or with the -u parameter") - } - } - - apiCfg := csconfig.ApiCredentialsCfg{ - Login: machineID, - Password: password.String(), - URL: apiURL, - } - - apiConfigDump, err := yaml.Marshal(apiCfg) - if err != nil { - return fmt.Errorf("unable to serialize api credentials: %w", err) - } - - if dumpFile != "" && dumpFile != "-" { - if err = os.WriteFile(dumpFile, apiConfigDump, 0o600); err != nil { - return fmt.Errorf("write api credentials in '%s' failed: %w", dumpFile, err) - } - - fmt.Fprintf(os.Stderr, "API credentials written to '%s'.\n", dumpFile) - } else { - fmt.Print(string(apiConfigDump)) - } - - return nil -} - // validMachineID returns a list of machine IDs for command completion func (cli *cliMachines) validMachineID(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { var err error @@ -431,287 +130,3 @@ func (cli *cliMachines) validMachineID(cmd *cobra.Command, args []string, toComp return ret, cobra.ShellCompDirectiveNoFileComp } - -func (cli *cliMachines) delete(ctx context.Context, machines []string, ignoreMissing bool) error { - for _, machineID := range machines { - if err := cli.db.DeleteWatcher(ctx, machineID); err != nil { - var notFoundErr *database.MachineNotFoundError - if ignoreMissing && errors.As(err, ¬FoundErr) { - return nil - } - - log.Errorf("unable to delete machine: %s", err) - - return nil - } - - log.Infof("machine '%s' deleted successfully", machineID) - } - - return nil -} - -func (cli *cliMachines) newDeleteCmd() *cobra.Command { - var ignoreMissing bool - - cmd := &cobra.Command{ - Use: "delete [machine_name]...", - Short: "delete machine(s) by name", - Example: `cscli machines delete "machine1" "machine2"`, - Args: cobra.MinimumNArgs(1), - Aliases: []string{"remove"}, - DisableAutoGenTag: true, - ValidArgsFunction: cli.validMachineID, - RunE: func(cmd *cobra.Command, args []string) error { - return cli.delete(cmd.Context(), args, ignoreMissing) - }, - } - - flags := cmd.Flags() - flags.BoolVar(&ignoreMissing, "ignore-missing", false, "don't print errors if one or more machines don't exist") - - return cmd -} - -func (cli *cliMachines) prune(ctx context.Context, duration time.Duration, notValidOnly bool, force bool) error { - if duration < 2*time.Minute && !notValidOnly { - if yes, err := ask.YesNo( - "The duration you provided is less than 2 minutes. "+ - "This can break installations if the machines are only temporarily disconnected. Continue?", false); err != nil { - return err - } else if !yes { - fmt.Println("User aborted prune. No changes were made.") - return nil - } - } - - machines := []*ent.Machine{} - if pending, err := cli.db.QueryPendingMachine(ctx); err == nil { - machines = append(machines, pending...) - } - - if !notValidOnly { - if pending, err := cli.db.QueryMachinesInactiveSince(ctx, time.Now().UTC().Add(-duration)); err == nil { - machines = append(machines, pending...) - } - } - - if len(machines) == 0 { - fmt.Println("No machines to prune.") - return nil - } - - cli.listHuman(color.Output, machines) - - if !force { - if yes, err := ask.YesNo( - "You are about to PERMANENTLY remove the above machines from the database. "+ - "These will NOT be recoverable. Continue?", false); err != nil { - return err - } else if !yes { - fmt.Println("User aborted prune. No changes were made.") - return nil - } - } - - deleted, err := cli.db.BulkDeleteWatchers(ctx, machines) - if err != nil { - return fmt.Errorf("unable to prune machines: %w", err) - } - - fmt.Fprintf(os.Stderr, "successfully deleted %d machines\n", deleted) - - return nil -} - -func (cli *cliMachines) newPruneCmd() *cobra.Command { - var ( - duration time.Duration - notValidOnly bool - force bool - ) - - const defaultDuration = 10 * time.Minute - - cmd := &cobra.Command{ - Use: "prune", - Short: "prune multiple machines from the database", - Long: `prune multiple machines that are not validated or have not connected to the local API in a given duration.`, - Example: `cscli machines prune -cscli machines prune --duration 1h -cscli machines prune --not-validated-only --force`, - Args: cobra.NoArgs, - DisableAutoGenTag: true, - RunE: func(cmd *cobra.Command, _ []string) error { - return cli.prune(cmd.Context(), duration, notValidOnly, force) - }, - } - - flags := cmd.Flags() - flags.DurationVarP(&duration, "duration", "d", defaultDuration, "duration of time since validated machine last heartbeat") - flags.BoolVar(¬ValidOnly, "not-validated-only", false, "only prune machines that are not validated") - flags.BoolVar(&force, "force", false, "force prune without asking for confirmation") - - return cmd -} - -func (cli *cliMachines) validate(ctx context.Context, machineID string) error { - if err := cli.db.ValidateMachine(ctx, machineID); err != nil { - return fmt.Errorf("unable to validate machine '%s': %w", machineID, err) - } - - log.Infof("machine '%s' validated successfully", machineID) - - return nil -} - -func (cli *cliMachines) newValidateCmd() *cobra.Command { - cmd := &cobra.Command{ - Use: "validate", - Short: "validate a machine to access the local API", - Long: `validate a machine to access the local API.`, - Example: `cscli machines validate "machine_name"`, - Args: cobra.ExactArgs(1), - DisableAutoGenTag: true, - RunE: func(cmd *cobra.Command, args []string) error { - return cli.validate(cmd.Context(), args[0]) - }, - } - - return cmd -} - -func (cli *cliMachines) inspectHuman(out io.Writer, machine *ent.Machine) { - t := cstable.New(out, cli.cfg().Cscli.Color).Writer - - t.SetTitle("Machine: " + machine.MachineId) - - t.SetColumnConfigs([]table.ColumnConfig{ - {Number: 1, AutoMerge: true}, - }) - - t.AppendRows([]table.Row{ - {"IP Address", machine.IpAddress}, - {"Created At", machine.CreatedAt}, - {"Last Update", machine.UpdatedAt}, - {"Last Heartbeat", machine.LastHeartbeat}, - {"Validated?", machine.IsValidated}, - {"CrowdSec version", machine.Version}, - {"OS", clientinfo.GetOSNameAndVersion(machine)}, - {"Auth type", machine.AuthType}, - }) - - for dsName, dsCount := range machine.Datasources { - t.AppendRow(table.Row{"Datasources", fmt.Sprintf("%s: %d", dsName, dsCount)}) - } - - for _, ff := range clientinfo.GetFeatureFlagList(machine) { - t.AppendRow(table.Row{"Feature Flags", ff}) - } - - for _, coll := range machine.Hubstate[cwhub.COLLECTIONS] { - t.AppendRow(table.Row{"Collections", coll.Name}) - } - - io.WriteString(out, t.Render()+"\n") -} - -func (cli *cliMachines) inspect(machine *ent.Machine) error { - out := color.Output - outputFormat := cli.cfg().Cscli.Output - - switch outputFormat { - case "human": - cli.inspectHuman(out, machine) - case "json": - enc := json.NewEncoder(out) - enc.SetIndent("", " ") - - if err := enc.Encode(newMachineInfo(machine)); err != nil { - return errors.New("failed to serialize") - } - - return nil - default: - return fmt.Errorf("output format '%s' not supported for this command", outputFormat) - } - - return nil -} - -func (cli *cliMachines) inspectHub(machine *ent.Machine) error { - out := color.Output - - switch cli.cfg().Cscli.Output { - case "human": - cli.inspectHubHuman(out, machine) - case "json": - enc := json.NewEncoder(out) - enc.SetIndent("", " ") - - if err := enc.Encode(machine.Hubstate); err != nil { - return errors.New("failed to serialize") - } - - return nil - case "raw": - csvwriter := csv.NewWriter(out) - - err := csvwriter.Write([]string{"type", "name", "status", "version"}) - if err != nil { - return fmt.Errorf("failed to write header: %w", err) - } - - rows := make([][]string, 0) - - for itemType, items := range machine.Hubstate { - for _, item := range items { - rows = append(rows, []string{itemType, item.Name, item.Status, item.Version}) - } - } - - for _, row := range rows { - if err := csvwriter.Write(row); err != nil { - return fmt.Errorf("failed to write raw output: %w", err) - } - } - - csvwriter.Flush() - } - - return nil -} - -func (cli *cliMachines) newInspectCmd() *cobra.Command { - var showHub bool - - cmd := &cobra.Command{ - Use: "inspect [machine_name]", - Short: "inspect a machine by name", - Example: `cscli machines inspect "machine1"`, - Args: cobra.ExactArgs(1), - DisableAutoGenTag: true, - ValidArgsFunction: cli.validMachineID, - RunE: func(cmd *cobra.Command, args []string) error { - ctx := cmd.Context() - machineID := args[0] - - machine, err := cli.db.QueryMachineByID(ctx, machineID) - if err != nil { - return fmt.Errorf("unable to read machine data '%s': %w", machineID, err) - } - - if showHub { - return cli.inspectHub(machine) - } - - return cli.inspect(machine) - }, - } - - flags := cmd.Flags() - - flags.BoolVarP(&showHub, "hub", "H", false, "show hub state") - - return cmd -} diff --git a/cmd/crowdsec-cli/climachine/prune.go b/cmd/crowdsec-cli/climachine/prune.go new file mode 100644 index 00000000000..ed41ef0a736 --- /dev/null +++ b/cmd/crowdsec-cli/climachine/prune.go @@ -0,0 +1,96 @@ +package climachine + +import ( + "context" + "fmt" + "os" + "time" + + "github.com/fatih/color" + "github.com/spf13/cobra" + + "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/ask" + "github.com/crowdsecurity/crowdsec/pkg/database/ent" +) + +func (cli *cliMachines) prune(ctx context.Context, duration time.Duration, notValidOnly bool, force bool) error { + if duration < 2*time.Minute && !notValidOnly { + if yes, err := ask.YesNo( + "The duration you provided is less than 2 minutes. "+ + "This can break installations if the machines are only temporarily disconnected. Continue?", false); err != nil { + return err + } else if !yes { + fmt.Println("User aborted prune. No changes were made.") + return nil + } + } + + machines := []*ent.Machine{} + if pending, err := cli.db.QueryPendingMachine(ctx); err == nil { + machines = append(machines, pending...) + } + + if !notValidOnly { + if pending, err := cli.db.QueryMachinesInactiveSince(ctx, time.Now().UTC().Add(-duration)); err == nil { + machines = append(machines, pending...) + } + } + + if len(machines) == 0 { + fmt.Println("No machines to prune.") + return nil + } + + cli.listHuman(color.Output, machines) + + if !force { + if yes, err := ask.YesNo( + "You are about to PERMANENTLY remove the above machines from the database. "+ + "These will NOT be recoverable. Continue?", false); err != nil { + return err + } else if !yes { + fmt.Println("User aborted prune. No changes were made.") + return nil + } + } + + deleted, err := cli.db.BulkDeleteWatchers(ctx, machines) + if err != nil { + return fmt.Errorf("unable to prune machines: %w", err) + } + + fmt.Fprintf(os.Stderr, "successfully deleted %d machines\n", deleted) + + return nil +} + +func (cli *cliMachines) newPruneCmd() *cobra.Command { + var ( + duration time.Duration + notValidOnly bool + force bool + ) + + const defaultDuration = 10 * time.Minute + + cmd := &cobra.Command{ + Use: "prune", + Short: "prune multiple machines from the database", + Long: `prune multiple machines that are not validated or have not connected to the local API in a given duration.`, + Example: `cscli machines prune +cscli machines prune --duration 1h +cscli machines prune --not-validated-only --force`, + Args: cobra.NoArgs, + DisableAutoGenTag: true, + RunE: func(cmd *cobra.Command, _ []string) error { + return cli.prune(cmd.Context(), duration, notValidOnly, force) + }, + } + + flags := cmd.Flags() + flags.DurationVarP(&duration, "duration", "d", defaultDuration, "duration of time since validated machine last heartbeat") + flags.BoolVar(¬ValidOnly, "not-validated-only", false, "only prune machines that are not validated") + flags.BoolVar(&force, "force", false, "force prune without asking for confirmation") + + return cmd +} diff --git a/cmd/crowdsec-cli/climachine/validate.go b/cmd/crowdsec-cli/climachine/validate.go new file mode 100644 index 00000000000..cba872aa05d --- /dev/null +++ b/cmd/crowdsec-cli/climachine/validate.go @@ -0,0 +1,35 @@ +package climachine + +import ( + "context" + "fmt" + + log "github.com/sirupsen/logrus" + "github.com/spf13/cobra" +) + +func (cli *cliMachines) validate(ctx context.Context, machineID string) error { + if err := cli.db.ValidateMachine(ctx, machineID); err != nil { + return fmt.Errorf("unable to validate machine '%s': %w", machineID, err) + } + + log.Infof("machine '%s' validated successfully", machineID) + + return nil +} + +func (cli *cliMachines) newValidateCmd() *cobra.Command { + cmd := &cobra.Command{ + Use: "validate", + Short: "validate a machine to access the local API", + Long: `validate a machine to access the local API.`, + Example: `cscli machines validate "machine_name"`, + Args: cobra.ExactArgs(1), + DisableAutoGenTag: true, + RunE: func(cmd *cobra.Command, args []string) error { + return cli.validate(cmd.Context(), args[0]) + }, + } + + return cmd +} diff --git a/cmd/crowdsec-cli/climetrics/list.go b/cmd/crowdsec-cli/climetrics/list.go index ddb2baac14d..27fa99710c8 100644 --- a/cmd/crowdsec-cli/climetrics/list.go +++ b/cmd/crowdsec-cli/climetrics/list.go @@ -84,7 +84,7 @@ func (cli *cliMetrics) newListCmd() *cobra.Command { Use: "list", Short: "List available types of metrics.", Long: `List available types of metrics.`, - Args: cobra.ExactArgs(0), + Args: cobra.NoArgs, DisableAutoGenTag: true, RunE: func(_ *cobra.Command, _ []string) error { return cli.list() diff --git a/cmd/crowdsec-cli/climetrics/metrics.go b/cmd/crowdsec-cli/climetrics/metrics.go index f3bc4874460..67bd7b6ad93 100644 --- a/cmd/crowdsec-cli/climetrics/metrics.go +++ b/cmd/crowdsec-cli/climetrics/metrics.go @@ -36,7 +36,7 @@ cscli metrics --url http://lapi.local:6060/metrics show acquisition parsers # List available metric types cscli metrics list`, - Args: cobra.ExactArgs(0), + Args: cobra.NoArgs, DisableAutoGenTag: true, RunE: func(cmd *cobra.Command, _ []string) error { return cli.show(cmd.Context(), nil, url, noUnit) diff --git a/cmd/crowdsec-cli/clinotifications/notifications.go b/cmd/crowdsec-cli/clinotifications/notifications.go index 5489faa37c8..baf899c10cf 100644 --- a/cmd/crowdsec-cli/clinotifications/notifications.go +++ b/cmd/crowdsec-cli/clinotifications/notifications.go @@ -158,7 +158,7 @@ func (cli *cliNotifications) newListCmd() *cobra.Command { Short: "list notifications plugins", Long: `list notifications plugins and their status (active or not)`, Example: `cscli notifications list`, - Args: cobra.ExactArgs(0), + Args: cobra.NoArgs, DisableAutoGenTag: true, RunE: func(_ *cobra.Command, _ []string) error { cfg := cli.cfg() diff --git a/cmd/crowdsec-cli/config.go b/cmd/crowdsec-cli/config.go index e88845798e2..4cf8916ad4b 100644 --- a/cmd/crowdsec-cli/config.go +++ b/cmd/crowdsec-cli/config.go @@ -18,7 +18,7 @@ func (cli *cliConfig) NewCommand() *cobra.Command { cmd := &cobra.Command{ Use: "config [command]", Short: "Allows to view current config", - Args: cobra.ExactArgs(0), + Args: cobra.NoArgs, DisableAutoGenTag: true, } diff --git a/cmd/crowdsec-cli/config_feature_flags.go b/cmd/crowdsec-cli/config_feature_flags.go index d1dbe2b93b7..760e2194bb3 100644 --- a/cmd/crowdsec-cli/config_feature_flags.go +++ b/cmd/crowdsec-cli/config_feature_flags.go @@ -121,7 +121,7 @@ func (cli *cliConfig) newFeatureFlagsCmd() *cobra.Command { Use: "feature-flags", Short: "Displays feature flag status", Long: `Displays the supported feature flags and their current status.`, - Args: cobra.ExactArgs(0), + Args: cobra.NoArgs, DisableAutoGenTag: true, RunE: func(_ *cobra.Command, _ []string) error { return cli.featureFlags(showRetired) diff --git a/cmd/crowdsec-cli/config_show.go b/cmd/crowdsec-cli/config_show.go index 2d3ac488ba2..3d17d264574 100644 --- a/cmd/crowdsec-cli/config_show.go +++ b/cmd/crowdsec-cli/config_show.go @@ -235,7 +235,7 @@ func (cli *cliConfig) newShowCmd() *cobra.Command { Use: "show", Short: "Displays current config", Long: `Displays the current cli configuration.`, - Args: cobra.ExactArgs(0), + Args: cobra.NoArgs, DisableAutoGenTag: true, RunE: func(_ *cobra.Command, _ []string) error { if err := cli.cfg().LoadAPIClient(); err != nil { diff --git a/cmd/crowdsec-cli/config_showyaml.go b/cmd/crowdsec-cli/config_showyaml.go index 52daee6a65e..10549648d09 100644 --- a/cmd/crowdsec-cli/config_showyaml.go +++ b/cmd/crowdsec-cli/config_showyaml.go @@ -15,7 +15,7 @@ func (cli *cliConfig) newShowYAMLCmd() *cobra.Command { cmd := &cobra.Command{ Use: "show-yaml", Short: "Displays merged config.yaml + config.yaml.local", - Args: cobra.ExactArgs(0), + Args: cobra.NoArgs, DisableAutoGenTag: true, RunE: func(_ *cobra.Command, _ []string) error { return cli.showYAML() diff --git a/cmd/crowdsec-cli/dashboard.go b/cmd/crowdsec-cli/dashboard.go index 41db9e6cbf2..53a7dff85a0 100644 --- a/cmd/crowdsec-cli/dashboard.go +++ b/cmd/crowdsec-cli/dashboard.go @@ -129,7 +129,7 @@ func (cli *cliDashboard) newSetupCmd() *cobra.Command { Use: "setup", Short: "Setup a metabase container.", Long: `Perform a metabase docker setup, download standard dashboards, create a fresh user and start the container`, - Args: cobra.ExactArgs(0), + Args: cobra.NoArgs, DisableAutoGenTag: true, Example: ` cscli dashboard setup @@ -198,7 +198,7 @@ func (cli *cliDashboard) newStartCmd() *cobra.Command { Use: "start", Short: "Start the metabase container.", Long: `Stats the metabase container using docker.`, - Args: cobra.ExactArgs(0), + Args: cobra.NoArgs, DisableAutoGenTag: true, RunE: func(_ *cobra.Command, _ []string) error { mb, err := metabase.NewMetabase(metabaseConfigPath, metabaseContainerID) @@ -229,7 +229,7 @@ func (cli *cliDashboard) newStopCmd() *cobra.Command { Use: "stop", Short: "Stops the metabase container.", Long: `Stops the metabase container using docker.`, - Args: cobra.ExactArgs(0), + Args: cobra.NoArgs, DisableAutoGenTag: true, RunE: func(_ *cobra.Command, _ []string) error { if err := metabase.StopContainer(metabaseContainerID); err != nil { @@ -245,7 +245,7 @@ func (cli *cliDashboard) newStopCmd() *cobra.Command { func (cli *cliDashboard) newShowPasswordCmd() *cobra.Command { cmd := &cobra.Command{Use: "show-password", Short: "displays password of metabase.", - Args: cobra.ExactArgs(0), + Args: cobra.NoArgs, DisableAutoGenTag: true, RunE: func(_ *cobra.Command, _ []string) error { m := metabase.Metabase{} @@ -268,7 +268,7 @@ func (cli *cliDashboard) newRemoveCmd() *cobra.Command { Use: "remove", Short: "removes the metabase container.", Long: `removes the metabase container using docker.`, - Args: cobra.ExactArgs(0), + Args: cobra.NoArgs, DisableAutoGenTag: true, Example: ` cscli dashboard remove diff --git a/test/bats/90_decisions.bats b/test/bats/90_decisions.bats index b892dc84015..8601414db48 100644 --- a/test/bats/90_decisions.bats +++ b/test/bats/90_decisions.bats @@ -78,13 +78,13 @@ teardown() { # invalid defaults rune -1 cscli decisions import --duration "" -i - <<<'value\n5.6.7.8' --format csv - assert_stderr --partial "--duration cannot be empty" + assert_stderr --partial "default duration cannot be empty" rune -1 cscli decisions import --scope "" -i - <<<'value\n5.6.7.8' --format csv - assert_stderr --partial "--scope cannot be empty" + assert_stderr --partial "default scope cannot be empty" rune -1 cscli decisions import --reason "" -i - <<<'value\n5.6.7.8' --format csv - assert_stderr --partial "--reason cannot be empty" + assert_stderr --partial "default reason cannot be empty" rune -1 cscli decisions import --type "" -i - <<<'value\n5.6.7.8' --format csv - assert_stderr --partial "--type cannot be empty" + assert_stderr --partial "default type cannot be empty" #---------- # JSON From 094072441c870aa6a7e3493dfe4f555d129d04e8 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Thu, 31 Oct 2024 17:12:53 +0100 Subject: [PATCH 335/581] make: allow build on ubuntu 24.10 (#3311) --- Makefile | 3 +++ 1 file changed, 3 insertions(+) diff --git a/Makefile b/Makefile index 29a84d5b066..87bb0313b25 100644 --- a/Makefile +++ b/Makefile @@ -80,6 +80,9 @@ endif #expr_debug tag is required to enable the debug mode in expr GO_TAGS := netgo,osusergo,sqlite_omit_load_extension,expr_debug +# Allow building on ubuntu 24.10, see https://github.com/golang/go/issues/70023 +export CGO_LDFLAGS_ALLOW=-Wl,--(push|pop)-state.* + # this will be used by Go in the make target, some distributions require it export PKG_CONFIG_PATH:=/usr/local/lib/pkgconfig:$(PKG_CONFIG_PATH) From 16169913985074f8b9944dca2a5c338713ef3e93 Mon Sep 17 00:00:00 2001 From: "Thibault \"bui\" Koechlin" Date: Mon, 4 Nov 2024 10:02:55 +0100 Subject: [PATCH 336/581] Alert context appsec (#3288) --- .../modules/appsec/appsec_runner.go | 4 +- pkg/acquisition/modules/appsec/utils.go | 163 +++++------------- pkg/alertcontext/alertcontext.go | 154 ++++++++++++----- pkg/alertcontext/alertcontext_test.go | 162 +++++++++++++++++ pkg/types/appsec_event.go | 8 +- pkg/types/event.go | 9 + 6 files changed, 337 insertions(+), 163 deletions(-) diff --git a/pkg/acquisition/modules/appsec/appsec_runner.go b/pkg/acquisition/modules/appsec/appsec_runner.go index de34b62d704..90d23f63543 100644 --- a/pkg/acquisition/modules/appsec/appsec_runner.go +++ b/pkg/acquisition/modules/appsec/appsec_runner.go @@ -249,7 +249,7 @@ func (r *AppsecRunner) handleInBandInterrupt(request *appsec.ParsedRequest) { // Should the in band match trigger an overflow ? if r.AppsecRuntime.Response.SendAlert { - appsecOvlfw, err := AppsecEventGeneration(evt) + appsecOvlfw, err := AppsecEventGeneration(evt, request.HTTPRequest) if err != nil { r.logger.Errorf("unable to generate appsec event : %s", err) return @@ -293,7 +293,7 @@ func (r *AppsecRunner) handleOutBandInterrupt(request *appsec.ParsedRequest) { // Should the match trigger an overflow ? if r.AppsecRuntime.Response.SendAlert { - appsecOvlfw, err := AppsecEventGeneration(evt) + appsecOvlfw, err := AppsecEventGeneration(evt, request.HTTPRequest) if err != nil { r.logger.Errorf("unable to generate appsec event : %s", err) return diff --git a/pkg/acquisition/modules/appsec/utils.go b/pkg/acquisition/modules/appsec/utils.go index 4fb1a979d14..b4b66897516 100644 --- a/pkg/acquisition/modules/appsec/utils.go +++ b/pkg/acquisition/modules/appsec/utils.go @@ -1,10 +1,10 @@ package appsecacquisition import ( + "errors" "fmt" "net" - "slices" - "strconv" + "net/http" "time" "github.com/oschwald/geoip2-golang" @@ -22,29 +22,44 @@ import ( "github.com/crowdsecurity/crowdsec/pkg/types" ) -var appsecMetaKeys = []string{ - "id", - "name", - "method", - "uri", - "matched_zones", - "msg", -} +func AppsecEventGenerationGeoIPEnrich(src *models.Source) error { -func appendMeta(meta models.Meta, key string, value string) models.Meta { - if value == "" { - return meta + if src == nil || src.Scope == nil || *src.Scope != types.Ip { + return errors.New("source is nil or not an IP") } - meta = append(meta, &models.MetaItems0{ - Key: key, - Value: value, - }) + //GeoIP enrich + asndata, err := exprhelpers.GeoIPASNEnrich(src.IP) + + if err != nil { + return err + } else if asndata != nil { + record := asndata.(*geoip2.ASN) + src.AsName = record.AutonomousSystemOrganization + src.AsNumber = fmt.Sprintf("%d", record.AutonomousSystemNumber) + } - return meta + cityData, err := exprhelpers.GeoIPEnrich(src.IP) + if err != nil { + return err + } else if cityData != nil { + record := cityData.(*geoip2.City) + src.Cn = record.Country.IsoCode + src.Latitude = float32(record.Location.Latitude) + src.Longitude = float32(record.Location.Longitude) + } + + rangeData, err := exprhelpers.GeoIPRangeEnrich(src.IP) + if err != nil { + return err + } else if rangeData != nil { + record := rangeData.(*net.IPNet) + src.Range = record.String() + } + return nil } -func AppsecEventGeneration(inEvt types.Event) (*types.Event, error) { +func AppsecEventGeneration(inEvt types.Event, request *http.Request) (*types.Event, error) { // if the request didnd't trigger inband rules, we don't want to generate an event to LAPI/CAPI if !inEvt.Appsec.HasInBandMatches { return nil, nil @@ -60,34 +75,12 @@ func AppsecEventGeneration(inEvt types.Event) (*types.Event, error) { Scope: ptr.Of(types.Ip), } - asndata, err := exprhelpers.GeoIPASNEnrich(sourceIP) - - if err != nil { - log.Errorf("Unable to enrich ip '%s' for ASN: %s", sourceIP, err) - } else if asndata != nil { - record := asndata.(*geoip2.ASN) - source.AsName = record.AutonomousSystemOrganization - source.AsNumber = fmt.Sprintf("%d", record.AutonomousSystemNumber) - } - - cityData, err := exprhelpers.GeoIPEnrich(sourceIP) - if err != nil { - log.Errorf("Unable to enrich ip '%s' for geo data: %s", sourceIP, err) - } else if cityData != nil { - record := cityData.(*geoip2.City) - source.Cn = record.Country.IsoCode - source.Latitude = float32(record.Location.Latitude) - source.Longitude = float32(record.Location.Longitude) - } - - rangeData, err := exprhelpers.GeoIPRangeEnrich(sourceIP) - if err != nil { - log.Errorf("Unable to enrich ip '%s' for range: %s", sourceIP, err) - } else if rangeData != nil { - record := rangeData.(*net.IPNet) - source.Range = record.String() + // Enrich source with GeoIP data + if err := AppsecEventGenerationGeoIPEnrich(&source); err != nil { + log.Errorf("unable to enrich source with GeoIP data : %s", err) } + // Build overflow evt.Overflow.Sources = make(map[string]models.Source) evt.Overflow.Sources[sourceIP] = source @@ -95,83 +88,11 @@ func AppsecEventGeneration(inEvt types.Event) (*types.Event, error) { alert.Capacity = ptr.Of(int32(1)) alert.Events = make([]*models.Event, len(evt.Appsec.GetRuleIDs())) - now := ptr.Of(time.Now().UTC().Format(time.RFC3339)) - - tmpAppsecContext := make(map[string][]string) - - for _, matched_rule := range inEvt.Appsec.MatchedRules { - evtRule := models.Event{} - - evtRule.Timestamp = now - - evtRule.Meta = make(models.Meta, 0) - - for _, key := range appsecMetaKeys { - if tmpAppsecContext[key] == nil { - tmpAppsecContext[key] = make([]string, 0) - } - - switch value := matched_rule[key].(type) { - case string: - evtRule.Meta = appendMeta(evtRule.Meta, key, value) - - if value != "" && !slices.Contains(tmpAppsecContext[key], value) { - tmpAppsecContext[key] = append(tmpAppsecContext[key], value) - } - case int: - val := strconv.Itoa(value) - evtRule.Meta = appendMeta(evtRule.Meta, key, val) - - if val != "" && !slices.Contains(tmpAppsecContext[key], val) { - tmpAppsecContext[key] = append(tmpAppsecContext[key], val) - } - case []string: - for _, v := range value { - evtRule.Meta = appendMeta(evtRule.Meta, key, v) - - if v != "" && !slices.Contains(tmpAppsecContext[key], v) { - tmpAppsecContext[key] = append(tmpAppsecContext[key], v) - } - } - case []int: - for _, v := range value { - val := strconv.Itoa(v) - evtRule.Meta = appendMeta(evtRule.Meta, key, val) - - if val != "" && !slices.Contains(tmpAppsecContext[key], val) { - tmpAppsecContext[key] = append(tmpAppsecContext[key], val) - } - } - default: - val := fmt.Sprintf("%v", value) - evtRule.Meta = appendMeta(evtRule.Meta, key, val) - - if val != "" && !slices.Contains(tmpAppsecContext[key], val) { - tmpAppsecContext[key] = append(tmpAppsecContext[key], val) - } - } - } - - alert.Events = append(alert.Events, &evtRule) - } - - metas := make([]*models.MetaItems0, 0) - - for key, values := range tmpAppsecContext { - if len(values) == 0 { - continue - } - - valueStr, err := alertcontext.TruncateContext(values, alertcontext.MaxContextValueLen) - if err != nil { - log.Warning(err.Error()) - } - - meta := models.MetaItems0{ - Key: key, - Value: valueStr, + metas, errors := alertcontext.AppsecEventToContext(inEvt.Appsec, request) + if len(errors) > 0 { + for _, err := range errors { + log.Errorf("failed to generate appsec context: %s", err) } - metas = append(metas, &meta) } alert.Meta = metas diff --git a/pkg/alertcontext/alertcontext.go b/pkg/alertcontext/alertcontext.go index 16ebc6d0ac2..0c60dea4292 100644 --- a/pkg/alertcontext/alertcontext.go +++ b/pkg/alertcontext/alertcontext.go @@ -3,6 +3,7 @@ package alertcontext import ( "encoding/json" "fmt" + "net/http" "slices" "strconv" @@ -30,7 +31,10 @@ type Context struct { func ValidateContextExpr(key string, expressions []string) error { for _, expression := range expressions { - _, err := expr.Compile(expression, exprhelpers.GetExprOptions(map[string]interface{}{"evt": &types.Event{}})...) + _, err := expr.Compile(expression, exprhelpers.GetExprOptions(map[string]interface{}{ + "evt": &types.Event{}, + "match": &types.MatchedRule{}, + "req": &http.Request{}})...) if err != nil { return fmt.Errorf("compilation of '%s' failed: %w", expression, err) } @@ -72,7 +76,10 @@ func NewAlertContext(contextToSend map[string][]string, valueLength int) error { } for _, value := range values { - valueCompiled, err := expr.Compile(value, exprhelpers.GetExprOptions(map[string]interface{}{"evt": &types.Event{}})...) + valueCompiled, err := expr.Compile(value, exprhelpers.GetExprOptions(map[string]interface{}{ + "evt": &types.Event{}, + "match": &types.MatchedRule{}, + "req": &http.Request{}})...) if err != nil { return fmt.Errorf("compilation of '%s' context value failed: %w", value, err) } @@ -85,6 +92,32 @@ func NewAlertContext(contextToSend map[string][]string, valueLength int) error { return nil } +// Truncate the context map to fit in the context value length +func TruncateContextMap(contextMap map[string][]string, contextValueLen int) ([]*models.MetaItems0, []error) { + metas := make([]*models.MetaItems0, 0) + errors := make([]error, 0) + + for key, values := range contextMap { + if len(values) == 0 { + continue + } + + valueStr, err := TruncateContext(values, alertContext.ContextValueLen) + if err != nil { + errors = append(errors, fmt.Errorf("error truncating content for %s: %w", key, err)) + continue + } + + meta := models.MetaItems0{ + Key: key, + Value: valueStr, + } + metas = append(metas, &meta) + } + return metas, errors +} + +// Truncate an individual []string to fit in the context value length func TruncateContext(values []string, contextValueLen int) (string, error) { valueByte, err := json.Marshal(values) if err != nil { @@ -116,61 +149,104 @@ func TruncateContext(values []string, contextValueLen int) (string, error) { return ret, nil } -func EventToContext(events []types.Event) (models.Meta, []error) { +func EvalAlertContextRules(evt *types.Event, match *types.MatchedRule, request *http.Request, tmpContext map[string][]string) []error { + var errors []error - metas := make([]*models.MetaItems0, 0) - tmpContext := make(map[string][]string) + //if we're evaluating context for appsec event, match and request will be present. + //otherwise, only evt will be. + if evt == nil { + evt = types.NewEvent() + } + if match == nil { + match = types.NewMatchedRule() + } + if request == nil { + request = &http.Request{} + } - for _, evt := range events { - for key, values := range alertContext.ContextToSendCompiled { - if _, ok := tmpContext[key]; !ok { - tmpContext[key] = make([]string, 0) - } + for key, values := range alertContext.ContextToSendCompiled { - for _, value := range values { - var val string + if _, ok := tmpContext[key]; !ok { + tmpContext[key] = make([]string, 0) + } - output, err := expr.Run(value, map[string]interface{}{"evt": evt}) - if err != nil { - errors = append(errors, fmt.Errorf("failed to get value for %s: %w", key, err)) - continue - } + for _, value := range values { + var val string - switch out := output.(type) { - case string: - val = out - case int: - val = strconv.Itoa(out) - default: - errors = append(errors, fmt.Errorf("unexpected return type for %s: %T", key, output)) - continue + output, err := expr.Run(value, map[string]interface{}{"match": match, "evt": evt, "req": request}) + if err != nil { + errors = append(errors, fmt.Errorf("failed to get value for %s: %w", key, err)) + continue + } + switch out := output.(type) { + case string: + val = out + if val != "" && !slices.Contains(tmpContext[key], val) { + tmpContext[key] = append(tmpContext[key], val) } - + case []string: + for _, v := range out { + if v != "" && !slices.Contains(tmpContext[key], v) { + tmpContext[key] = append(tmpContext[key], v) + } + } + case int: + val = strconv.Itoa(out) + if val != "" && !slices.Contains(tmpContext[key], val) { + tmpContext[key] = append(tmpContext[key], val) + } + case []int: + for _, v := range out { + val = strconv.Itoa(v) + if val != "" && !slices.Contains(tmpContext[key], val) { + tmpContext[key] = append(tmpContext[key], val) + } + } + default: + val := fmt.Sprintf("%v", output) if val != "" && !slices.Contains(tmpContext[key], val) { tmpContext[key] = append(tmpContext[key], val) } } } } + return errors +} - for key, values := range tmpContext { - if len(values) == 0 { - continue - } +// Iterate over the individual appsec matched rules to create the needed alert context. +func AppsecEventToContext(event types.AppsecEvent, request *http.Request) (models.Meta, []error) { + var errors []error - valueStr, err := TruncateContext(values, alertContext.ContextValueLen) - if err != nil { - log.Warning(err.Error()) - } + tmpContext := make(map[string][]string) - meta := models.MetaItems0{ - Key: key, - Value: valueStr, - } - metas = append(metas, &meta) + for _, matched_rule := range event.MatchedRules { + tmpErrors := EvalAlertContextRules(nil, &matched_rule, request, tmpContext) + errors = append(errors, tmpErrors...) } + metas, truncErrors := TruncateContextMap(tmpContext, alertContext.ContextValueLen) + errors = append(errors, truncErrors...) + + ret := models.Meta(metas) + + return ret, errors +} + +// Iterate over the individual events to create the needed alert context. +func EventToContext(events []types.Event) (models.Meta, []error) { + var errors []error + + tmpContext := make(map[string][]string) + + for _, evt := range events { + tmpErrors := EvalAlertContextRules(&evt, nil, nil, tmpContext) + errors = append(errors, tmpErrors...) + } + + metas, truncErrors := TruncateContextMap(tmpContext, alertContext.ContextValueLen) + errors = append(errors, truncErrors...) + ret := models.Meta(metas) return ret, errors diff --git a/pkg/alertcontext/alertcontext_test.go b/pkg/alertcontext/alertcontext_test.go index c111d1bbcfb..dc752ba8b09 100644 --- a/pkg/alertcontext/alertcontext_test.go +++ b/pkg/alertcontext/alertcontext_test.go @@ -2,6 +2,7 @@ package alertcontext import ( "fmt" + "net/http" "testing" "github.com/stretchr/testify/assert" @@ -9,6 +10,7 @@ import ( "github.com/crowdsecurity/crowdsec/pkg/models" "github.com/crowdsecurity/crowdsec/pkg/types" + "github.com/crowdsecurity/go-cs-lib/ptr" ) func TestNewAlertContext(t *testing.T) { @@ -200,3 +202,163 @@ func TestEventToContext(t *testing.T) { assert.ElementsMatch(t, test.expectedResult, metas) } } + +func TestValidateContextExpr(t *testing.T) { + tests := []struct { + name string + key string + exprs []string + expectedErr *string + }{ + { + name: "basic config", + key: "source_ip", + exprs: []string{ + "evt.Parsed.source_ip", + }, + expectedErr: nil, + }, + { + name: "basic config with non existent field", + key: "source_ip", + exprs: []string{ + "evt.invalid.source_ip", + }, + expectedErr: ptr.Of("compilation of 'evt.invalid.source_ip' failed: type types.Event has no field invalid"), + }, + } + for _, test := range tests { + fmt.Printf("Running test '%s'\n", test.name) + err := ValidateContextExpr(test.key, test.exprs) + if test.expectedErr == nil { + require.NoError(t, err) + } else { + require.ErrorContains(t, err, *test.expectedErr) + } + } +} + +func TestAppsecEventToContext(t *testing.T) { + + tests := []struct { + name string + contextToSend map[string][]string + match types.AppsecEvent + req *http.Request + expectedResult models.Meta + expectedErrLen int + }{ + { + name: "basic test on match", + contextToSend: map[string][]string{ + "id": {"match.id"}, + }, + match: types.AppsecEvent{ + MatchedRules: types.MatchedRules{ + { + "id": "test", + }, + }, + }, + req: &http.Request{}, + expectedResult: []*models.MetaItems0{ + { + Key: "id", + Value: "[\"test\"]", + }, + }, + expectedErrLen: 0, + }, + { + name: "basic test on req", + contextToSend: map[string][]string{ + "ua": {"req.UserAgent()"}, + }, + match: types.AppsecEvent{ + MatchedRules: types.MatchedRules{ + { + "id": "test", + }, + }, + }, + req: &http.Request{ + Header: map[string][]string{ + "User-Agent": {"test"}, + }, + }, + expectedResult: []*models.MetaItems0{ + { + Key: "ua", + Value: "[\"test\"]", + }, + }, + expectedErrLen: 0, + }, + { + name: "test on req -> []string", + contextToSend: map[string][]string{ + "foobarxx": {"req.Header.Values('Foobar')"}, + }, + match: types.AppsecEvent{ + MatchedRules: types.MatchedRules{ + { + "id": "test", + }, + }, + }, + req: &http.Request{ + Header: map[string][]string{ + "User-Agent": {"test"}, + "Foobar": {"test1", "test2"}, + }, + }, + expectedResult: []*models.MetaItems0{ + { + Key: "foobarxx", + Value: "[\"test1\",\"test2\"]", + }, + }, + expectedErrLen: 0, + }, + { + name: "test on type int", + contextToSend: map[string][]string{ + "foobarxx": {"len(req.Header.Values('Foobar'))"}, + }, + match: types.AppsecEvent{ + MatchedRules: types.MatchedRules{ + { + "id": "test", + }, + }, + }, + req: &http.Request{ + Header: map[string][]string{ + "User-Agent": {"test"}, + "Foobar": {"test1", "test2"}, + }, + }, + expectedResult: []*models.MetaItems0{ + { + Key: "foobarxx", + Value: "[\"2\"]", + }, + }, + expectedErrLen: 0, + }, + } + + for _, test := range tests { + //reset cache + alertContext = Context{} + //compile + if err := NewAlertContext(test.contextToSend, 100); err != nil { + t.Fatalf("failed to compile %s: %s", test.name, err) + } + //run + + metas, errors := AppsecEventToContext(test.match, test.req) + assert.Len(t, errors, test.expectedErrLen) + assert.ElementsMatch(t, test.expectedResult, metas) + } +} diff --git a/pkg/types/appsec_event.go b/pkg/types/appsec_event.go index dc81c63b344..11d70ad368d 100644 --- a/pkg/types/appsec_event.go +++ b/pkg/types/appsec_event.go @@ -18,7 +18,9 @@ len(evt.Waf.ByTagRx("*CVE*").ByConfidence("high").ByAction("block")) > 1 */ -type MatchedRules []map[string]interface{} +type MatchedRules []MatchedRule + +type MatchedRule map[string]interface{} type AppsecEvent struct { HasInBandMatches, HasOutBandMatches bool @@ -45,6 +47,10 @@ const ( Kind Field = "kind" ) +func NewMatchedRule() *MatchedRule { + return &MatchedRule{} +} + func (w AppsecEvent) GetVar(varName string) string { if w.Vars == nil { return "" diff --git a/pkg/types/event.go b/pkg/types/event.go index e016d0294c4..6d275aedf95 100644 --- a/pkg/types/event.go +++ b/pkg/types/event.go @@ -47,6 +47,15 @@ type Event struct { Meta map[string]string `yaml:"Meta,omitempty" json:"Meta,omitempty"` } +func NewEvent() *Event { + return &Event{Type: LOG, + Parsed: make(map[string]string), + Enriched: make(map[string]string), + Meta: make(map[string]string), + Unmarshaled: make(map[string]interface{}), + } +} + func (e *Event) SetMeta(key string, value string) bool { if e.Meta == nil { e.Meta = make(map[string]string) From 57521114bdab6007644d114cf0c504a85a4f58da Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Mon, 4 Nov 2024 11:21:48 +0100 Subject: [PATCH 337/581] update checks for wrapped errors (#3117) * errors.Is() * extract function isBrokenConnection() --- .golangci.yml | 4 --- pkg/acquisition/modules/appsec/appsec.go | 15 +++++++++- .../wineventlog/wineventlog_windows.go | 6 ++-- pkg/apiserver/apiserver.go | 28 ++++++++++--------- 4 files changed, 32 insertions(+), 21 deletions(-) diff --git a/.golangci.yml b/.golangci.yml index 271e3a57d34..acde901dbe6 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -348,10 +348,6 @@ issues: - errorlint text: "type switch on error will fail on wrapped errors. Use errors.As to check for specific errors" - - linters: - - errorlint - text: "comparing with .* will fail on wrapped errors. Use errors.Is to check for a specific error" - - linters: - nosprintfhostport text: "host:port in url should be constructed with net.JoinHostPort and not directly with fmt.Sprintf" diff --git a/pkg/acquisition/modules/appsec/appsec.go b/pkg/acquisition/modules/appsec/appsec.go index a6dcffe89a2..86b5fbbac2d 100644 --- a/pkg/acquisition/modules/appsec/appsec.go +++ b/pkg/acquisition/modules/appsec/appsec.go @@ -85,6 +85,7 @@ func (ac *AuthCache) Get(apiKey string) (time.Time, bool) { ac.mu.RLock() expiration, exists := ac.APIKeys[apiKey] ac.mu.RUnlock() + return expiration, exists } @@ -128,6 +129,7 @@ func (w *AppsecSource) UnmarshalConfig(yamlConfig []byte) error { if w.config.ListenSocket != "" && w.config.ListenAddr == "" { w.config.Name = w.config.ListenSocket } + if w.config.ListenSocket == "" { w.config.Name = fmt.Sprintf("%s%s", w.config.ListenAddr, w.config.Path) } @@ -153,6 +155,7 @@ func (w *AppsecSource) Configure(yamlConfig []byte, logger *log.Entry, MetricsLe if err != nil { return fmt.Errorf("unable to parse appsec configuration: %w", err) } + w.logger = logger w.metricsLevel = MetricsLevel w.logger.Tracef("Appsec configuration: %+v", w.config) @@ -211,10 +214,12 @@ func (w *AppsecSource) Configure(yamlConfig []byte, logger *log.Entry, MetricsLe AppsecRuntime: &wrt, Labels: w.config.Labels, } + err := runner.Init(appsecCfg.GetDataDir()) if err != nil { return fmt.Errorf("unable to initialize runner: %w", err) } + w.AppsecRunners[nbRoutine] = runner } @@ -222,6 +227,7 @@ func (w *AppsecSource) Configure(yamlConfig []byte, logger *log.Entry, MetricsLe // We don´t use the wrapper provided by coraza because we want to fully control what happens when a rule match to send the information in crowdsec w.mux.HandleFunc(w.config.Path, w.appsecHandler) + return nil } @@ -243,10 +249,12 @@ func (w *AppsecSource) OneShotAcquisition(_ context.Context, _ chan types.Event, func (w *AppsecSource) StreamingAcquisition(ctx context.Context, out chan types.Event, t *tomb.Tomb) error { w.outChan = out + t.Go(func() error { defer trace.CatchPanic("crowdsec/acquis/appsec/live") w.logger.Infof("%d appsec runner to start", len(w.AppsecRunners)) + for _, runner := range w.AppsecRunners { runner.outChan = out t.Go(func() error { @@ -254,6 +262,7 @@ func (w *AppsecSource) StreamingAcquisition(ctx context.Context, out chan types. return runner.Run(t) }) } + t.Go(func() error { if w.config.ListenSocket != "" { w.logger.Infof("creating unix socket %s", w.config.ListenSocket) @@ -268,10 +277,11 @@ func (w *AppsecSource) StreamingAcquisition(ctx context.Context, out chan types. } else { err = w.server.Serve(listener) } - if err != nil && err != http.ErrServerClosed { + if err != nil && !errors.Is(err, http.ErrServerClosed) { return fmt.Errorf("appsec server failed: %w", err) } } + return nil }) t.Go(func() error { @@ -288,6 +298,7 @@ func (w *AppsecSource) StreamingAcquisition(ctx context.Context, out chan types. return fmt.Errorf("appsec server failed: %w", err) } } + return nil }) <-t.Dying() @@ -297,6 +308,7 @@ func (w *AppsecSource) StreamingAcquisition(ctx context.Context, out chan types. w.server.Shutdown(ctx) return nil }) + return nil } @@ -391,6 +403,7 @@ func (w *AppsecSource) appsecHandler(rw http.ResponseWriter, r *http.Request) { logger.Debugf("Response: %+v", appsecResponse) rw.WriteHeader(statusCode) + body, err := json.Marshal(appsecResponse) if err != nil { logger.Errorf("unable to serialize response: %s", err) diff --git a/pkg/acquisition/modules/wineventlog/wineventlog_windows.go b/pkg/acquisition/modules/wineventlog/wineventlog_windows.go index 887be8b7dd3..1f5332f43b8 100644 --- a/pkg/acquisition/modules/wineventlog/wineventlog_windows.go +++ b/pkg/acquisition/modules/wineventlog/wineventlog_windows.go @@ -94,7 +94,7 @@ func (w *WinEventLogSource) getXMLEvents(config *winlog.SubscribeConfig, publish 2000, // Timeout in milliseconds to wait. 0, // Reserved. Must be zero. &returned) // The number of handles in the array that are set by the API. - if err == windows.ERROR_NO_MORE_ITEMS { + if errors.Is(err, windows.ERROR_NO_MORE_ITEMS) { return nil, err } else if err != nil { return nil, fmt.Errorf("wevtapi.EvtNext failed: %v", err) @@ -188,7 +188,7 @@ func (w *WinEventLogSource) getEvents(out chan types.Event, t *tomb.Tomb) error } if status == syscall.WAIT_OBJECT_0 { renderedEvents, err := w.getXMLEvents(w.evtConfig, publisherCache, subscription, 500) - if err == windows.ERROR_NO_MORE_ITEMS { + if errors.Is(err, windows.ERROR_NO_MORE_ITEMS) { windows.ResetEvent(w.evtConfig.SignalEvent) } else if err != nil { w.logger.Errorf("getXMLEvents failed: %v", err) @@ -411,7 +411,7 @@ OUTER_LOOP: return nil default: evts, err := w.getXMLEvents(w.evtConfig, publisherCache, handle, 500) - if err == windows.ERROR_NO_MORE_ITEMS { + if errors.Is(err, windows.ERROR_NO_MORE_ITEMS) { log.Info("No more items") break OUTER_LOOP } else if err != nil { diff --git a/pkg/apiserver/apiserver.go b/pkg/apiserver/apiserver.go index 35f9beaf635..05f9150b037 100644 --- a/pkg/apiserver/apiserver.go +++ b/pkg/apiserver/apiserver.go @@ -46,20 +46,11 @@ type APIServer struct { consoleConfig *csconfig.ConsoleConfig } -func recoverFromPanic(c *gin.Context) { - err := recover() - if err == nil { - return - } - - // Check for a broken connection, as it is not really a - // condition that warrants a panic stack trace. - brokenPipe := false - +func isBrokenConnection(err any) bool { if ne, ok := err.(*net.OpError); ok { if se, ok := ne.Err.(*os.SyscallError); ok { if strings.Contains(strings.ToLower(se.Error()), "broken pipe") || strings.Contains(strings.ToLower(se.Error()), "connection reset by peer") { - brokenPipe = true + return true } } } @@ -79,11 +70,22 @@ func recoverFromPanic(c *gin.Context) { errors.Is(strErr, errClosedBody) || errors.Is(strErr, errHandlerComplete) || errors.Is(strErr, errStreamClosed) { - brokenPipe = true + return true } } - if brokenPipe { + return false +} + +func recoverFromPanic(c *gin.Context) { + err := recover() + if err == nil { + return + } + + // Check for a broken connection, as it is not really a + // condition that warrants a panic stack trace. + if isBrokenConnection(err) { log.Warningf("client %s disconnected: %s", c.ClientIP(), err) c.Abort() } else { From 19b70f10bea0f7bbcecbba18a25cfc84bea140a5 Mon Sep 17 00:00:00 2001 From: he2ss Date: Tue, 5 Nov 2024 14:15:04 +0100 Subject: [PATCH 338/581] add HTTP datasource (#3294) --- Makefile | 1 + pkg/acquisition/acquisition.go | 24 +- pkg/acquisition/http.go | 12 + pkg/acquisition/modules/appsec/utils.go | 5 +- .../modules/cloudwatch/cloudwatch.go | 5 +- pkg/acquisition/modules/docker/docker.go | 13 +- pkg/acquisition/modules/file/file.go | 10 +- pkg/acquisition/modules/http/http.go | 416 ++++++++++ pkg/acquisition/modules/http/http_test.go | 785 ++++++++++++++++++ pkg/acquisition/modules/http/testdata/ca.crt | 23 + .../modules/http/testdata/client.crt | 24 + .../modules/http/testdata/client.key | 27 + .../modules/http/testdata/server.crt | 23 + .../modules/http/testdata/server.key | 27 + .../modules/journalctl/journalctl.go | 9 +- pkg/acquisition/modules/kafka/kafka.go | 9 +- pkg/acquisition/modules/kinesis/kinesis.go | 8 +- .../modules/kubernetesaudit/k8s_audit.go | 9 +- pkg/acquisition/modules/loki/loki.go | 13 +- pkg/acquisition/modules/s3/s3.go | 8 +- pkg/acquisition/modules/syslog/syslog.go | 8 +- .../wineventlog/wineventlog_windows.go | 8 +- pkg/alertcontext/alertcontext.go | 10 +- pkg/cwversion/component/component.go | 29 +- pkg/types/event.go | 13 +- 25 files changed, 1419 insertions(+), 100 deletions(-) create mode 100644 pkg/acquisition/http.go create mode 100644 pkg/acquisition/modules/http/http.go create mode 100644 pkg/acquisition/modules/http/http_test.go create mode 100644 pkg/acquisition/modules/http/testdata/ca.crt create mode 100644 pkg/acquisition/modules/http/testdata/client.crt create mode 100644 pkg/acquisition/modules/http/testdata/client.key create mode 100644 pkg/acquisition/modules/http/testdata/server.crt create mode 100644 pkg/acquisition/modules/http/testdata/server.key diff --git a/Makefile b/Makefile index 87bb0313b25..4b7f0b746fe 100644 --- a/Makefile +++ b/Makefile @@ -134,6 +134,7 @@ COMPONENTS := \ datasource_cloudwatch \ datasource_docker \ datasource_file \ + datasource_http \ datasource_k8saudit \ datasource_kafka \ datasource_journalctl \ diff --git a/pkg/acquisition/acquisition.go b/pkg/acquisition/acquisition.go index 1ad385105d3..ef5a413b91f 100644 --- a/pkg/acquisition/acquisition.go +++ b/pkg/acquisition/acquisition.go @@ -337,6 +337,20 @@ func GetMetrics(sources []DataSource, aggregated bool) error { return nil } +// There's no need for an actual deep copy +// The event is almost empty, we are mostly interested in allocating new maps for Parsed/Meta/... +func copyEvent(evt types.Event, line string) types.Event { + evtCopy := types.MakeEvent(evt.ExpectMode == types.TIMEMACHINE, evt.Type, evt.Process) + evtCopy.Line = evt.Line + evtCopy.Line.Raw = line + evtCopy.Line.Labels = make(map[string]string) + for k, v := range evt.Line.Labels { + evtCopy.Line.Labels[k] = v + } + + return evtCopy +} + func transform(transformChan chan types.Event, output chan types.Event, AcquisTomb *tomb.Tomb, transformRuntime *vm.Program, logger *log.Entry) { defer trace.CatchPanic("crowdsec/acquis") logger.Infof("transformer started") @@ -363,8 +377,7 @@ func transform(transformChan chan types.Event, output chan types.Event, AcquisTo switch v := out.(type) { case string: logger.Tracef("transform expression returned %s", v) - evt.Line.Raw = v - output <- evt + output <- copyEvent(evt, v) case []interface{}: logger.Tracef("transform expression returned %v", v) //nolint:asasalint // We actually want to log the slice content @@ -373,19 +386,16 @@ func transform(transformChan chan types.Event, output chan types.Event, AcquisTo if !ok { logger.Errorf("transform expression returned []interface{}, but cannot assert an element to string") output <- evt - continue } - evt.Line.Raw = l - output <- evt + output <- copyEvent(evt, l) } case []string: logger.Tracef("transform expression returned %v", v) for _, line := range v { - evt.Line.Raw = line - output <- evt + output <- copyEvent(evt, line) } default: logger.Errorf("transform expression returned an invalid type %T, sending event as-is", out) diff --git a/pkg/acquisition/http.go b/pkg/acquisition/http.go new file mode 100644 index 00000000000..59745772b62 --- /dev/null +++ b/pkg/acquisition/http.go @@ -0,0 +1,12 @@ +//go:build !no_datasource_http + +package acquisition + +import ( + httpacquisition "github.com/crowdsecurity/crowdsec/pkg/acquisition/modules/http" +) + +//nolint:gochecknoinits +func init() { + registerDataSource("http", func() DataSource { return &httpacquisition.HTTPSource{} }) +} diff --git a/pkg/acquisition/modules/appsec/utils.go b/pkg/acquisition/modules/appsec/utils.go index b4b66897516..8995b305680 100644 --- a/pkg/acquisition/modules/appsec/utils.go +++ b/pkg/acquisition/modules/appsec/utils.go @@ -116,10 +116,7 @@ func AppsecEventGeneration(inEvt types.Event, request *http.Request) (*types.Eve } func EventFromRequest(r *appsec.ParsedRequest, labels map[string]string) (types.Event, error) { - evt := types.Event{} - // we might want to change this based on in-band vs out-of-band ? - evt.Type = types.LOG - evt.ExpectMode = types.LIVE + evt := types.MakeEvent(false, types.LOG, true) // def needs fixing evt.Stage = "s00-raw" evt.Parsed = map[string]string{ diff --git a/pkg/acquisition/modules/cloudwatch/cloudwatch.go b/pkg/acquisition/modules/cloudwatch/cloudwatch.go index 2df70b3312b..ba267c9050b 100644 --- a/pkg/acquisition/modules/cloudwatch/cloudwatch.go +++ b/pkg/acquisition/modules/cloudwatch/cloudwatch.go @@ -710,7 +710,7 @@ func (cw *CloudwatchSource) CatLogStream(ctx context.Context, cfg *LogStreamTail func cwLogToEvent(log *cloudwatchlogs.OutputLogEvent, cfg *LogStreamTailConfig) (types.Event, error) { l := types.Line{} - evt := types.Event{} + evt := types.MakeEvent(cfg.ExpectMode == types.TIMEMACHINE, types.LOG, true) if log.Message == nil { return evt, errors.New("nil message") } @@ -726,9 +726,6 @@ func cwLogToEvent(log *cloudwatchlogs.OutputLogEvent, cfg *LogStreamTailConfig) l.Process = true l.Module = "cloudwatch" evt.Line = l - evt.Process = true - evt.Type = types.LOG - evt.ExpectMode = cfg.ExpectMode cfg.logger.Debugf("returned event labels : %+v", evt.Line.Labels) return evt, nil } diff --git a/pkg/acquisition/modules/docker/docker.go b/pkg/acquisition/modules/docker/docker.go index 2f79d4dcee6..b27255ec13f 100644 --- a/pkg/acquisition/modules/docker/docker.go +++ b/pkg/acquisition/modules/docker/docker.go @@ -334,7 +334,10 @@ func (d *DockerSource) OneShotAcquisition(ctx context.Context, out chan types.Ev if d.metricsLevel != configuration.METRICS_NONE { linesRead.With(prometheus.Labels{"source": containerConfig.Name}).Inc() } - evt := types.Event{Line: l, Process: true, Type: types.LOG, ExpectMode: types.TIMEMACHINE} + evt := types.MakeEvent(true, types.LOG, true) + evt.Line = l + evt.Process = true + evt.Type = types.LOG out <- evt d.logger.Debugf("Sent line to parsing: %+v", evt.Line.Raw) } @@ -579,12 +582,8 @@ func (d *DockerSource) TailDocker(ctx context.Context, container *ContainerConfi l.Src = container.Name l.Process = true l.Module = d.GetName() - var evt types.Event - if !d.Config.UseTimeMachine { - evt = types.Event{Line: l, Process: true, Type: types.LOG, ExpectMode: types.LIVE} - } else { - evt = types.Event{Line: l, Process: true, Type: types.LOG, ExpectMode: types.TIMEMACHINE} - } + evt := types.MakeEvent(d.Config.UseTimeMachine, types.LOG, true) + evt.Line = l linesRead.With(prometheus.Labels{"source": container.Name}).Inc() outChan <- evt d.logger.Debugf("Sent line to parsing: %+v", evt.Line.Raw) diff --git a/pkg/acquisition/modules/file/file.go b/pkg/acquisition/modules/file/file.go index f752d04aada..9f439b0c82e 100644 --- a/pkg/acquisition/modules/file/file.go +++ b/pkg/acquisition/modules/file/file.go @@ -621,11 +621,9 @@ func (f *FileSource) tailFile(out chan types.Event, t *tomb.Tomb, tail *tail.Tai // we're tailing, it must be real time logs logger.Debugf("pushing %+v", l) - expectMode := types.LIVE - if f.config.UseTimeMachine { - expectMode = types.TIMEMACHINE - } - out <- types.Event{Line: l, Process: true, Type: types.LOG, ExpectMode: expectMode} + evt := types.MakeEvent(f.config.UseTimeMachine, types.LOG, true) + evt.Line = l + out <- evt } } } @@ -684,7 +682,7 @@ func (f *FileSource) readFile(filename string, out chan types.Event, t *tomb.Tom linesRead.With(prometheus.Labels{"source": filename}).Inc() // we're reading logs at once, it must be time-machine buckets - out <- types.Event{Line: l, Process: true, Type: types.LOG, ExpectMode: types.TIMEMACHINE} + out <- types.Event{Line: l, Process: true, Type: types.LOG, ExpectMode: types.TIMEMACHINE, Unmarshaled: make(map[string]interface{})} } } diff --git a/pkg/acquisition/modules/http/http.go b/pkg/acquisition/modules/http/http.go new file mode 100644 index 00000000000..98af134c84e --- /dev/null +++ b/pkg/acquisition/modules/http/http.go @@ -0,0 +1,416 @@ +package httpacquisition + +import ( + "compress/gzip" + "context" + "crypto/tls" + "crypto/x509" + "encoding/json" + "errors" + "fmt" + "io" + "net" + "net/http" + "os" + "time" + + "github.com/prometheus/client_golang/prometheus" + log "github.com/sirupsen/logrus" + + "gopkg.in/tomb.v2" + "gopkg.in/yaml.v3" + + "github.com/crowdsecurity/go-cs-lib/trace" + + "github.com/crowdsecurity/crowdsec/pkg/acquisition/configuration" + "github.com/crowdsecurity/crowdsec/pkg/types" +) + +var ( + dataSourceName = "http" +) + +var linesRead = prometheus.NewCounterVec( + prometheus.CounterOpts{ + Name: "cs_httpsource_hits_total", + Help: "Total lines that were read from http source", + }, + []string{"path", "src"}) + +type HttpConfiguration struct { + //IPFilter []string `yaml:"ip_filter"` + //ChunkSize *int64 `yaml:"chunk_size"` + ListenAddr string `yaml:"listen_addr"` + Path string `yaml:"path"` + AuthType string `yaml:"auth_type"` + BasicAuth *BasicAuthConfig `yaml:"basic_auth"` + Headers *map[string]string `yaml:"headers"` + TLS *TLSConfig `yaml:"tls"` + CustomStatusCode *int `yaml:"custom_status_code"` + CustomHeaders *map[string]string `yaml:"custom_headers"` + MaxBodySize *int64 `yaml:"max_body_size"` + Timeout *time.Duration `yaml:"timeout"` + configuration.DataSourceCommonCfg `yaml:",inline"` +} + +type BasicAuthConfig struct { + Username string `yaml:"username"` + Password string `yaml:"password"` +} + +type TLSConfig struct { + InsecureSkipVerify bool `yaml:"insecure_skip_verify"` + ServerCert string `yaml:"server_cert"` + ServerKey string `yaml:"server_key"` + CaCert string `yaml:"ca_cert"` +} + +type HTTPSource struct { + metricsLevel int + Config HttpConfiguration + logger *log.Entry + Server *http.Server +} + +func (h *HTTPSource) GetUuid() string { + return h.Config.UniqueId +} + +func (h *HTTPSource) UnmarshalConfig(yamlConfig []byte) error { + h.Config = HttpConfiguration{} + err := yaml.Unmarshal(yamlConfig, &h.Config) + if err != nil { + return fmt.Errorf("cannot parse %s datasource configuration: %w", dataSourceName, err) + } + + if h.Config.Mode == "" { + h.Config.Mode = configuration.TAIL_MODE + } + + return nil +} + +func (hc *HttpConfiguration) Validate() error { + if hc.ListenAddr == "" { + return errors.New("listen_addr is required") + } + + if hc.Path == "" { + hc.Path = "/" + } + if hc.Path[0] != '/' { + return errors.New("path must start with /") + } + + switch hc.AuthType { + case "basic_auth": + baseErr := "basic_auth is selected, but" + if hc.BasicAuth == nil { + return errors.New(baseErr + " basic_auth is not provided") + } + if hc.BasicAuth.Username == "" { + return errors.New(baseErr + " username is not provided") + } + if hc.BasicAuth.Password == "" { + return errors.New(baseErr + " password is not provided") + } + case "headers": + if hc.Headers == nil { + return errors.New("headers is selected, but headers is not provided") + } + case "mtls": + if hc.TLS == nil || hc.TLS.CaCert == "" { + return errors.New("mtls is selected, but ca_cert is not provided") + } + default: + return errors.New("invalid auth_type: must be one of basic_auth, headers, mtls") + } + + if hc.TLS != nil { + if hc.TLS.ServerCert == "" { + return errors.New("server_cert is required") + } + if hc.TLS.ServerKey == "" { + return errors.New("server_key is required") + } + } + + if hc.MaxBodySize != nil && *hc.MaxBodySize <= 0 { + return errors.New("max_body_size must be positive") + } + + /* + if hc.ChunkSize != nil && *hc.ChunkSize <= 0 { + return errors.New("chunk_size must be positive") + } + */ + + if hc.CustomStatusCode != nil { + statusText := http.StatusText(*hc.CustomStatusCode) + if statusText == "" { + return errors.New("invalid HTTP status code") + } + } + + return nil +} + +func (h *HTTPSource) Configure(yamlConfig []byte, logger *log.Entry, MetricsLevel int) error { + h.logger = logger + h.metricsLevel = MetricsLevel + err := h.UnmarshalConfig(yamlConfig) + if err != nil { + return err + } + + if err := h.Config.Validate(); err != nil { + return fmt.Errorf("invalid configuration: %w", err) + } + + return nil +} + +func (h *HTTPSource) ConfigureByDSN(string, map[string]string, *log.Entry, string) error { + return fmt.Errorf("%s datasource does not support command-line acquisition", dataSourceName) +} + +func (h *HTTPSource) GetMode() string { + return h.Config.Mode +} + +func (h *HTTPSource) GetName() string { + return dataSourceName +} + +func (h *HTTPSource) OneShotAcquisition(ctx context.Context, out chan types.Event, t *tomb.Tomb) error { + return fmt.Errorf("%s datasource does not support one-shot acquisition", dataSourceName) +} + +func (h *HTTPSource) CanRun() error { + return nil +} + +func (h *HTTPSource) GetMetrics() []prometheus.Collector { + return []prometheus.Collector{linesRead} +} + +func (h *HTTPSource) GetAggregMetrics() []prometheus.Collector { + return []prometheus.Collector{linesRead} +} + +func (h *HTTPSource) Dump() interface{} { + return h +} + +func (hc *HttpConfiguration) NewTLSConfig() (*tls.Config, error) { + tlsConfig := tls.Config{ + InsecureSkipVerify: hc.TLS.InsecureSkipVerify, + } + + if hc.TLS.ServerCert != "" && hc.TLS.ServerKey != "" { + cert, err := tls.LoadX509KeyPair(hc.TLS.ServerCert, hc.TLS.ServerKey) + if err != nil { + return nil, fmt.Errorf("failed to load server cert/key: %w", err) + } + tlsConfig.Certificates = []tls.Certificate{cert} + } + + if hc.AuthType == "mtls" && hc.TLS.CaCert != "" { + caCert, err := os.ReadFile(hc.TLS.CaCert) + if err != nil { + return nil, fmt.Errorf("failed to read ca cert: %w", err) + } + + caCertPool, err := x509.SystemCertPool() + if err != nil { + return nil, fmt.Errorf("failed to load system cert pool: %w", err) + } + + if caCertPool == nil { + caCertPool = x509.NewCertPool() + } + caCertPool.AppendCertsFromPEM(caCert) + tlsConfig.ClientCAs = caCertPool + tlsConfig.ClientAuth = tls.RequireAndVerifyClientCert + } + + return &tlsConfig, nil +} + +func authorizeRequest(r *http.Request, hc *HttpConfiguration) error { + if hc.AuthType == "basic_auth" { + username, password, ok := r.BasicAuth() + if !ok { + return errors.New("missing basic auth") + } + if username != hc.BasicAuth.Username || password != hc.BasicAuth.Password { + return errors.New("invalid basic auth") + } + } + if hc.AuthType == "headers" { + for key, value := range *hc.Headers { + if r.Header.Get(key) != value { + return errors.New("invalid headers") + } + } + } + return nil +} + +func (h *HTTPSource) processRequest(w http.ResponseWriter, r *http.Request, hc *HttpConfiguration, out chan types.Event) error { + if hc.MaxBodySize != nil && r.ContentLength > *hc.MaxBodySize { + w.WriteHeader(http.StatusRequestEntityTooLarge) + return fmt.Errorf("body size exceeds max body size: %d > %d", r.ContentLength, *hc.MaxBodySize) + } + + srcHost, _, err := net.SplitHostPort(r.RemoteAddr) + if err != nil { + return err + } + + defer r.Body.Close() + + reader := r.Body + + if r.Header.Get("Content-Encoding") == "gzip" { + reader, err = gzip.NewReader(r.Body) + if err != nil { + w.WriteHeader(http.StatusBadRequest) + return fmt.Errorf("failed to create gzip reader: %w", err) + } + defer reader.Close() + } + + decoder := json.NewDecoder(reader) + for { + var message json.RawMessage + + if err := decoder.Decode(&message); err != nil { + if err == io.EOF { + break + } + w.WriteHeader(http.StatusBadRequest) + return fmt.Errorf("failed to decode: %w", err) + } + + line := types.Line{ + Raw: string(message), + Src: srcHost, + Time: time.Now().UTC(), + Labels: hc.Labels, + Process: true, + Module: h.GetName(), + } + + if h.metricsLevel == configuration.METRICS_AGGREGATE { + line.Src = hc.Path + } + + evt := types.MakeEvent(h.Config.UseTimeMachine, types.LOG, true) + evt.Line = line + + if h.metricsLevel == configuration.METRICS_AGGREGATE { + linesRead.With(prometheus.Labels{"path": hc.Path, "src": ""}).Inc() + } else if h.metricsLevel == configuration.METRICS_FULL { + linesRead.With(prometheus.Labels{"path": hc.Path, "src": srcHost}).Inc() + } + + h.logger.Tracef("line to send: %+v", line) + out <- evt + } + + return nil +} + +func (h *HTTPSource) RunServer(out chan types.Event, t *tomb.Tomb) error { + mux := http.NewServeMux() + mux.HandleFunc(h.Config.Path, func(w http.ResponseWriter, r *http.Request) { + if r.Method != http.MethodPost { + h.logger.Errorf("method not allowed: %s", r.Method) + http.Error(w, "Method Not Allowed", http.StatusMethodNotAllowed) + return + } + if err := authorizeRequest(r, &h.Config); err != nil { + h.logger.Errorf("failed to authorize request from '%s': %s", r.RemoteAddr, err) + http.Error(w, "Unauthorized", http.StatusUnauthorized) + return + } + err := h.processRequest(w, r, &h.Config, out) + if err != nil { + h.logger.Errorf("failed to process request from '%s': %s", r.RemoteAddr, err) + return + } + + if h.Config.CustomHeaders != nil { + for key, value := range *h.Config.CustomHeaders { + w.Header().Set(key, value) + } + } + if h.Config.CustomStatusCode != nil { + w.WriteHeader(*h.Config.CustomStatusCode) + } else { + w.WriteHeader(http.StatusOK) + } + + w.Write([]byte("OK")) + }) + + h.Server = &http.Server{ + Addr: h.Config.ListenAddr, + Handler: mux, + } + + if h.Config.Timeout != nil { + h.Server.ReadTimeout = *h.Config.Timeout + } + + if h.Config.TLS != nil { + tlsConfig, err := h.Config.NewTLSConfig() + if err != nil { + return fmt.Errorf("failed to create tls config: %w", err) + } + h.logger.Tracef("tls config: %+v", tlsConfig) + h.Server.TLSConfig = tlsConfig + } + + t.Go(func() error { + defer trace.CatchPanic("crowdsec/acquis/http/server") + if h.Config.TLS != nil { + h.logger.Infof("start https server on %s", h.Config.ListenAddr) + err := h.Server.ListenAndServeTLS(h.Config.TLS.ServerCert, h.Config.TLS.ServerKey) + if err != nil && err != http.ErrServerClosed { + return fmt.Errorf("https server failed: %w", err) + } + } else { + h.logger.Infof("start http server on %s", h.Config.ListenAddr) + err := h.Server.ListenAndServe() + if err != nil && err != http.ErrServerClosed { + return fmt.Errorf("http server failed: %w", err) + } + } + return nil + }) + + //nolint //fp + for { + select { + case <-t.Dying(): + h.logger.Infof("%s datasource stopping", dataSourceName) + if err := h.Server.Close(); err != nil { + return fmt.Errorf("while closing %s server: %w", dataSourceName, err) + } + return nil + } + } +} + +func (h *HTTPSource) StreamingAcquisition(ctx context.Context, out chan types.Event, t *tomb.Tomb) error { + h.logger.Debugf("start http server on %s", h.Config.ListenAddr) + + t.Go(func() error { + defer trace.CatchPanic("crowdsec/acquis/http/live") + return h.RunServer(out, t) + }) + + return nil +} diff --git a/pkg/acquisition/modules/http/http_test.go b/pkg/acquisition/modules/http/http_test.go new file mode 100644 index 00000000000..f89ba7aa8ba --- /dev/null +++ b/pkg/acquisition/modules/http/http_test.go @@ -0,0 +1,785 @@ +package httpacquisition + +import ( + "compress/gzip" + "context" + "crypto/tls" + "crypto/x509" + "errors" + "fmt" + "io" + "net/http" + "os" + "strings" + "testing" + "time" + + "github.com/crowdsecurity/crowdsec/pkg/types" + "github.com/crowdsecurity/go-cs-lib/cstest" + "github.com/prometheus/client_golang/prometheus" + log "github.com/sirupsen/logrus" + "gopkg.in/tomb.v2" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +const ( + testHTTPServerAddr = "http://127.0.0.1:8080" + testHTTPServerAddrTLS = "https://127.0.0.1:8080" +) + +func TestConfigure(t *testing.T) { + tests := []struct { + config string + expectedErr string + }{ + { + config: ` +foobar: bla`, + expectedErr: "invalid configuration: listen_addr is required", + }, + { + config: ` +source: http +listen_addr: 127.0.0.1:8080 +path: wrongpath`, + expectedErr: "invalid configuration: path must start with /", + }, + { + config: ` +source: http +listen_addr: 127.0.0.1:8080 +path: /test +auth_type: basic_auth`, + expectedErr: "invalid configuration: basic_auth is selected, but basic_auth is not provided", + }, + { + config: ` +source: http +listen_addr: 127.0.0.1:8080 +path: /test +auth_type: headers`, + expectedErr: "invalid configuration: headers is selected, but headers is not provided", + }, + { + config: ` +source: http +listen_addr: 127.0.0.1:8080 +path: /test +auth_type: basic_auth +basic_auth: + username: 132`, + expectedErr: "invalid configuration: basic_auth is selected, but password is not provided", + }, + { + config: ` +source: http +listen_addr: 127.0.0.1:8080 +path: /test +auth_type: basic_auth +basic_auth: + password: 132`, + expectedErr: "invalid configuration: basic_auth is selected, but username is not provided", + }, + { + config: ` +source: http +listen_addr: 127.0.0.1:8080 +path: /test +auth_type: headers +headers:`, + expectedErr: "invalid configuration: headers is selected, but headers is not provided", + }, + { + config: ` +source: http +listen_addr: 127.0.0.1:8080 +path: /test +auth_type: toto`, + expectedErr: "invalid configuration: invalid auth_type: must be one of basic_auth, headers, mtls", + }, + { + config: ` +source: http +listen_addr: 127.0.0.1:8080 +path: /test +auth_type: headers +headers: + key: value +tls: + server_key: key`, + expectedErr: "invalid configuration: server_cert is required", + }, + { + config: ` +source: http +listen_addr: 127.0.0.1:8080 +path: /test +auth_type: headers +headers: + key: value +tls: + server_cert: cert`, + expectedErr: "invalid configuration: server_key is required", + }, + { + config: ` +source: http +listen_addr: 127.0.0.1:8080 +path: /test +auth_type: mtls +tls: + server_cert: cert + server_key: key`, + expectedErr: "invalid configuration: mtls is selected, but ca_cert is not provided", + }, + { + config: ` +source: http +listen_addr: 127.0.0.1:8080 +path: /test +auth_type: headers +headers: + key: value +max_body_size: 0`, + expectedErr: "invalid configuration: max_body_size must be positive", + }, + { + config: ` +source: http +listen_addr: 127.0.0.1:8080 +path: /test +auth_type: headers +headers: + key: value +timeout: toto`, + expectedErr: "cannot parse http datasource configuration: yaml: unmarshal errors:\n line 8: cannot unmarshal !!str `toto` into time.Duration", + }, + { + config: ` +source: http +listen_addr: 127.0.0.1:8080 +path: /test +auth_type: headers +headers: + key: value +custom_status_code: 999`, + expectedErr: "invalid configuration: invalid HTTP status code", + }, + } + + subLogger := log.WithFields(log.Fields{ + "type": "http", + }) + + for _, test := range tests { + h := HTTPSource{} + err := h.Configure([]byte(test.config), subLogger, 0) + cstest.AssertErrorContains(t, err, test.expectedErr) + } +} + +func TestGetUuid(t *testing.T) { + h := HTTPSource{} + h.Config.UniqueId = "test" + assert.Equal(t, "test", h.GetUuid()) +} + +func TestUnmarshalConfig(t *testing.T) { + h := HTTPSource{} + err := h.UnmarshalConfig([]byte(` +source: http +listen_addr: 127.0.0.1:8080 +path: 15 + auth_type: headers`)) + cstest.AssertErrorMessage(t, err, "cannot parse http datasource configuration: yaml: line 4: found a tab character that violates indentation") +} + +func TestConfigureByDSN(t *testing.T) { + h := HTTPSource{} + err := h.ConfigureByDSN("http://localhost:8080/test", map[string]string{}, log.WithFields(log.Fields{ + "type": "http", + }), "test") + cstest.AssertErrorMessage( + t, + err, + "http datasource does not support command-line acquisition", + ) +} + +func TestGetMode(t *testing.T) { + h := HTTPSource{} + h.Config.Mode = "test" + assert.Equal(t, "test", h.GetMode()) +} + +func TestGetName(t *testing.T) { + h := HTTPSource{} + assert.Equal(t, "http", h.GetName()) +} + +func SetupAndRunHTTPSource(t *testing.T, h *HTTPSource, config []byte, metricLevel int) (chan types.Event, *tomb.Tomb) { + ctx := context.Background() + subLogger := log.WithFields(log.Fields{ + "type": "http", + }) + err := h.Configure(config, subLogger, metricLevel) + require.NoError(t, err) + tomb := tomb.Tomb{} + out := make(chan types.Event) + err = h.StreamingAcquisition(ctx, out, &tomb) + require.NoError(t, err) + + for _, metric := range h.GetMetrics() { + prometheus.Register(metric) + } + + return out, &tomb +} + +func TestStreamingAcquisitionWrongHTTPMethod(t *testing.T) { + h := &HTTPSource{} + _, tomb := SetupAndRunHTTPSource(t, h, []byte(` +source: http +listen_addr: 127.0.0.1:8080 +path: /test +auth_type: basic_auth +basic_auth: + username: test + password: test`), 0) + + time.Sleep(1 * time.Second) + + res, err := http.Get(fmt.Sprintf("%s/test", testHTTPServerAddr)) + require.NoError(t, err) + assert.Equal(t, http.StatusMethodNotAllowed, res.StatusCode) + + h.Server.Close() + tomb.Kill(nil) + tomb.Wait() + +} + +func TestStreamingAcquisitionUnknownPath(t *testing.T) { + h := &HTTPSource{} + _, tomb := SetupAndRunHTTPSource(t, h, []byte(` +source: http +listen_addr: 127.0.0.1:8080 +path: /test +auth_type: basic_auth +basic_auth: + username: test + password: test`), 0) + + time.Sleep(1 * time.Second) + + res, err := http.Get(fmt.Sprintf("%s/unknown", testHTTPServerAddr)) + require.NoError(t, err) + assert.Equal(t, http.StatusNotFound, res.StatusCode) + + h.Server.Close() + tomb.Kill(nil) + tomb.Wait() +} + +func TestStreamingAcquisitionBasicAuth(t *testing.T) { + h := &HTTPSource{} + _, tomb := SetupAndRunHTTPSource(t, h, []byte(` +source: http +listen_addr: 127.0.0.1:8080 +path: /test +auth_type: basic_auth +basic_auth: + username: test + password: test`), 0) + + time.Sleep(1 * time.Second) + + client := &http.Client{} + + resp, err := http.Post(fmt.Sprintf("%s/test", testHTTPServerAddr), "application/json", strings.NewReader("test")) + require.NoError(t, err) + assert.Equal(t, http.StatusUnauthorized, resp.StatusCode) + + req, err := http.NewRequest(http.MethodPost, fmt.Sprintf("%s/test", testHTTPServerAddr), strings.NewReader("test")) + require.NoError(t, err) + req.SetBasicAuth("test", "WrongPassword") + + resp, err = client.Do(req) + require.NoError(t, err) + assert.Equal(t, http.StatusUnauthorized, resp.StatusCode) + + h.Server.Close() + tomb.Kill(nil) + tomb.Wait() +} + +func TestStreamingAcquisitionBadHeaders(t *testing.T) { + h := &HTTPSource{} + _, tomb := SetupAndRunHTTPSource(t, h, []byte(` +source: http +listen_addr: 127.0.0.1:8080 +path: /test +auth_type: headers +headers: + key: test`), 0) + + time.Sleep(1 * time.Second) + + client := &http.Client{} + + req, err := http.NewRequest(http.MethodPost, fmt.Sprintf("%s/test", testHTTPServerAddr), strings.NewReader("test")) + require.NoError(t, err) + + req.Header.Add("Key", "wrong") + resp, err := client.Do(req) + require.NoError(t, err) + assert.Equal(t, http.StatusUnauthorized, resp.StatusCode) + + h.Server.Close() + tomb.Kill(nil) + tomb.Wait() +} + +func TestStreamingAcquisitionMaxBodySize(t *testing.T) { + h := &HTTPSource{} + _, tomb := SetupAndRunHTTPSource(t, h, []byte(` +source: http +listen_addr: 127.0.0.1:8080 +path: /test +auth_type: headers +headers: + key: test +max_body_size: 5`), 0) + + time.Sleep(1 * time.Second) + + client := &http.Client{} + req, err := http.NewRequest(http.MethodPost, fmt.Sprintf("%s/test", testHTTPServerAddr), strings.NewReader("testtest")) + require.NoError(t, err) + + req.Header.Add("Key", "test") + resp, err := client.Do(req) + require.NoError(t, err) + + assert.Equal(t, http.StatusRequestEntityTooLarge, resp.StatusCode) + + h.Server.Close() + tomb.Kill(nil) + tomb.Wait() +} + +func TestStreamingAcquisitionSuccess(t *testing.T) { + h := &HTTPSource{} + out, tomb := SetupAndRunHTTPSource(t, h, []byte(` +source: http +listen_addr: 127.0.0.1:8080 +path: /test +auth_type: headers +headers: + key: test`), 2) + + time.Sleep(1 * time.Second) + rawEvt := `{"test": "test"}` + + errChan := make(chan error) + go assertEvents(out, []string{rawEvt}, errChan) + + client := &http.Client{} + req, err := http.NewRequest(http.MethodPost, fmt.Sprintf("%s/test", testHTTPServerAddr), strings.NewReader(rawEvt)) + require.NoError(t, err) + + req.Header.Add("Key", "test") + resp, err := client.Do(req) + require.NoError(t, err) + assert.Equal(t, http.StatusOK, resp.StatusCode) + + err = <-errChan + require.NoError(t, err) + + assertMetrics(t, h.GetMetrics(), 1) + + h.Server.Close() + tomb.Kill(nil) + tomb.Wait() +} + +func TestStreamingAcquisitionCustomStatusCodeAndCustomHeaders(t *testing.T) { + h := &HTTPSource{} + out, tomb := SetupAndRunHTTPSource(t, h, []byte(` +source: http +listen_addr: 127.0.0.1:8080 +path: /test +auth_type: headers +headers: + key: test +custom_status_code: 201 +custom_headers: + success: true`), 2) + + time.Sleep(1 * time.Second) + + rawEvt := `{"test": "test"}` + errChan := make(chan error) + go assertEvents(out, []string{rawEvt}, errChan) + + req, err := http.NewRequest(http.MethodPost, fmt.Sprintf("%s/test", testHTTPServerAddr), strings.NewReader(rawEvt)) + require.NoError(t, err) + + req.Header.Add("Key", "test") + resp, err := http.DefaultClient.Do(req) + require.NoError(t, err) + + assert.Equal(t, http.StatusCreated, resp.StatusCode) + assert.Equal(t, "true", resp.Header.Get("Success")) + + err = <-errChan + require.NoError(t, err) + + assertMetrics(t, h.GetMetrics(), 1) + + h.Server.Close() + tomb.Kill(nil) + tomb.Wait() +} + +type slowReader struct { + delay time.Duration + body []byte + index int +} + +func (sr *slowReader) Read(p []byte) (int, error) { + if sr.index >= len(sr.body) { + return 0, io.EOF + } + time.Sleep(sr.delay) // Simulate a delay in reading + n := copy(p, sr.body[sr.index:]) + sr.index += n + return n, nil +} + +func assertEvents(out chan types.Event, expected []string, errChan chan error) { + readLines := []types.Event{} + + for i := 0; i < len(expected); i++ { + select { + case event := <-out: + readLines = append(readLines, event) + case <-time.After(2 * time.Second): + errChan <- errors.New("timeout waiting for event") + return + } + } + + if len(readLines) != len(expected) { + errChan <- fmt.Errorf("expected %d lines, got %d", len(expected), len(readLines)) + return + } + + for i, evt := range readLines { + if evt.Line.Raw != expected[i] { + errChan <- fmt.Errorf(`expected %s, got '%+v'`, expected, evt.Line.Raw) + return + } + if evt.Line.Src != "127.0.0.1" { + errChan <- fmt.Errorf("expected '127.0.0.1', got '%s'", evt.Line.Src) + return + } + if evt.Line.Module != "http" { + errChan <- fmt.Errorf("expected 'http', got '%s'", evt.Line.Module) + return + } + } + errChan <- nil +} + +func TestStreamingAcquisitionTimeout(t *testing.T) { + h := &HTTPSource{} + _, tomb := SetupAndRunHTTPSource(t, h, []byte(` +source: http +listen_addr: 127.0.0.1:8080 +path: /test +auth_type: headers +headers: + key: test +timeout: 1s`), 0) + + time.Sleep(1 * time.Second) + + slow := &slowReader{ + delay: 2 * time.Second, + body: []byte(`{"test": "delayed_payload"}`), + } + + req, err := http.NewRequest(http.MethodPost, fmt.Sprintf("%s/test", testHTTPServerAddr), slow) + require.NoError(t, err) + + req.Header.Add("Key", "test") + req.Header.Set("Content-Type", "application/json") + + client := &http.Client{} + resp, err := client.Do(req) + require.NoError(t, err) + + assert.Equal(t, http.StatusBadRequest, resp.StatusCode) + + h.Server.Close() + tomb.Kill(nil) + tomb.Wait() +} + +func TestStreamingAcquisitionTLSHTTPRequest(t *testing.T) { + h := &HTTPSource{} + _, tomb := SetupAndRunHTTPSource(t, h, []byte(` +source: http +listen_addr: 127.0.0.1:8080 +auth_type: mtls +path: /test +tls: + server_cert: testdata/server.crt + server_key: testdata/server.key + ca_cert: testdata/ca.crt`), 0) + + time.Sleep(1 * time.Second) + + resp, err := http.Post(fmt.Sprintf("%s/test", testHTTPServerAddr), "application/json", strings.NewReader("test")) + require.NoError(t, err) + + assert.Equal(t, http.StatusBadRequest, resp.StatusCode) + + h.Server.Close() + tomb.Kill(nil) + tomb.Wait() +} + +func TestStreamingAcquisitionTLSWithHeadersAuthSuccess(t *testing.T) { + h := &HTTPSource{} + out, tomb := SetupAndRunHTTPSource(t, h, []byte(` +source: http +listen_addr: 127.0.0.1:8080 +path: /test +auth_type: headers +headers: + key: test +tls: + server_cert: testdata/server.crt + server_key: testdata/server.key +`), 0) + + time.Sleep(1 * time.Second) + + caCert, err := os.ReadFile("testdata/server.crt") + require.NoError(t, err) + + caCertPool := x509.NewCertPool() + caCertPool.AppendCertsFromPEM(caCert) + + tlsConfig := &tls.Config{ + RootCAs: caCertPool, + } + + client := &http.Client{ + Transport: &http.Transport{ + TLSClientConfig: tlsConfig, + }, + } + + rawEvt := `{"test": "test"}` + errChan := make(chan error) + go assertEvents(out, []string{rawEvt}, errChan) + + req, err := http.NewRequest(http.MethodPost, fmt.Sprintf("%s/test", testHTTPServerAddrTLS), strings.NewReader(rawEvt)) + require.NoError(t, err) + + req.Header.Add("Key", "test") + resp, err := client.Do(req) + require.NoError(t, err) + + assert.Equal(t, http.StatusOK, resp.StatusCode) + + err = <-errChan + require.NoError(t, err) + + assertMetrics(t, h.GetMetrics(), 0) + + h.Server.Close() + tomb.Kill(nil) + tomb.Wait() +} + +func TestStreamingAcquisitionMTLS(t *testing.T) { + h := &HTTPSource{} + out, tomb := SetupAndRunHTTPSource(t, h, []byte(` +source: http +listen_addr: 127.0.0.1:8080 +path: /test +auth_type: mtls +tls: + server_cert: testdata/server.crt + server_key: testdata/server.key + ca_cert: testdata/ca.crt`), 0) + + time.Sleep(1 * time.Second) + + // init client cert + cert, err := tls.LoadX509KeyPair("testdata/client.crt", "testdata/client.key") + require.NoError(t, err) + + caCert, err := os.ReadFile("testdata/ca.crt") + require.NoError(t, err) + + caCertPool := x509.NewCertPool() + caCertPool.AppendCertsFromPEM(caCert) + + tlsConfig := &tls.Config{ + Certificates: []tls.Certificate{cert}, + RootCAs: caCertPool, + } + + client := &http.Client{ + Transport: &http.Transport{ + TLSClientConfig: tlsConfig, + }, + } + + rawEvt := `{"test": "test"}` + errChan := make(chan error) + go assertEvents(out, []string{rawEvt}, errChan) + + req, err := http.NewRequest(http.MethodPost, fmt.Sprintf("%s/test", testHTTPServerAddrTLS), strings.NewReader(rawEvt)) + require.NoError(t, err) + + resp, err := client.Do(req) + require.NoError(t, err) + + assert.Equal(t, http.StatusOK, resp.StatusCode) + + err = <-errChan + require.NoError(t, err) + + assertMetrics(t, h.GetMetrics(), 0) + + h.Server.Close() + tomb.Kill(nil) + tomb.Wait() +} + +func TestStreamingAcquisitionGzipData(t *testing.T) { + h := &HTTPSource{} + out, tomb := SetupAndRunHTTPSource(t, h, []byte(` +source: http +listen_addr: 127.0.0.1:8080 +path: /test +auth_type: headers +headers: + key: test`), 2) + + time.Sleep(1 * time.Second) + + rawEvt := `{"test": "test"}` + errChan := make(chan error) + go assertEvents(out, []string{rawEvt, rawEvt}, errChan) + + var b strings.Builder + gz := gzip.NewWriter(&b) + + _, err := gz.Write([]byte(rawEvt)) + require.NoError(t, err) + + _, err = gz.Write([]byte(rawEvt)) + require.NoError(t, err) + + err = gz.Close() + require.NoError(t, err) + + // send gzipped compressed data + client := &http.Client{} + req, err := http.NewRequest(http.MethodPost, fmt.Sprintf("%s/test", testHTTPServerAddr), strings.NewReader(b.String())) + require.NoError(t, err) + + req.Header.Add("Key", "test") + req.Header.Add("Content-Encoding", "gzip") + req.Header.Add("Content-Type", "application/json") + + resp, err := client.Do(req) + require.NoError(t, err) + + assert.Equal(t, http.StatusOK, resp.StatusCode) + + err = <-errChan + require.NoError(t, err) + + assertMetrics(t, h.GetMetrics(), 2) + + h.Server.Close() + tomb.Kill(nil) + tomb.Wait() +} + +func TestStreamingAcquisitionNDJson(t *testing.T) { + h := &HTTPSource{} + out, tomb := SetupAndRunHTTPSource(t, h, []byte(` +source: http +listen_addr: 127.0.0.1:8080 +path: /test +auth_type: headers +headers: + key: test`), 2) + + time.Sleep(1 * time.Second) + rawEvt := `{"test": "test"}` + + errChan := make(chan error) + go assertEvents(out, []string{rawEvt, rawEvt}, errChan) + + client := &http.Client{} + req, err := http.NewRequest(http.MethodPost, fmt.Sprintf("%s/test", testHTTPServerAddr), strings.NewReader(fmt.Sprintf("%s\n%s\n", rawEvt, rawEvt))) + + require.NoError(t, err) + + req.Header.Add("Key", "test") + req.Header.Add("Content-Type", "application/x-ndjson") + + resp, err := client.Do(req) + require.NoError(t, err) + assert.Equal(t, http.StatusOK, resp.StatusCode) + + err = <-errChan + require.NoError(t, err) + + assertMetrics(t, h.GetMetrics(), 2) + + h.Server.Close() + tomb.Kill(nil) + tomb.Wait() +} + +func assertMetrics(t *testing.T, metrics []prometheus.Collector, expected int) { + promMetrics, err := prometheus.DefaultGatherer.Gather() + require.NoError(t, err) + + isExist := false + for _, metricFamily := range promMetrics { + if metricFamily.GetName() == "cs_httpsource_hits_total" { + isExist = true + assert.Len(t, metricFamily.GetMetric(), 1) + for _, metric := range metricFamily.GetMetric() { + assert.InDelta(t, float64(expected), metric.GetCounter().GetValue(), 0.000001) + labels := metric.GetLabel() + assert.Len(t, labels, 2) + assert.Equal(t, "path", labels[0].GetName()) + assert.Equal(t, "/test", labels[0].GetValue()) + assert.Equal(t, "src", labels[1].GetName()) + assert.Equal(t, "127.0.0.1", labels[1].GetValue()) + } + } + } + if !isExist && expected > 0 { + t.Fatalf("expected metric cs_httpsource_hits_total not found") + } + + for _, metric := range metrics { + metric.(*prometheus.CounterVec).Reset() + } +} diff --git a/pkg/acquisition/modules/http/testdata/ca.crt b/pkg/acquisition/modules/http/testdata/ca.crt new file mode 100644 index 00000000000..ac81b9db8a6 --- /dev/null +++ b/pkg/acquisition/modules/http/testdata/ca.crt @@ -0,0 +1,23 @@ +-----BEGIN CERTIFICATE----- +MIIDvzCCAqegAwIBAgIUHQfsFpWkCy7gAmDa3A6O+y5CvAswDQYJKoZIhvcNAQEL +BQAwbzELMAkGA1UEBhMCRlIxFjAUBgNVBAgTDUlsZS1kZS1GcmFuY2UxDjAMBgNV +BAcTBVBhcmlzMREwDwYDVQQKEwhDcm93ZHNlYzERMA8GA1UECxMIQ3Jvd2RzZWMx +EjAQBgNVBAMTCWxvY2FsaG9zdDAeFw0yNDEwMjMxMDAxMDBaFw0yOTEwMjIxMDAx +MDBaMG8xCzAJBgNVBAYTAkZSMRYwFAYDVQQIEw1JbGUtZGUtRnJhbmNlMQ4wDAYD +VQQHEwVQYXJpczERMA8GA1UEChMIQ3Jvd2RzZWMxETAPBgNVBAsTCENyb3dkc2Vj +MRIwEAYDVQQDEwlsb2NhbGhvc3QwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEK +AoIBAQCZSR2/A24bpVHSiEeSlelfdA32uhk9wHkauwy2qxos/G/UmKG/dgWrHzRh +LawlFVHtVn4u7Hjqz2y2EsH3bX42jC5NMVARgXIOBr1dE6F5/bPqA6SoVgkDm9wh +ZBigyAMxYsR4+3ahuf0pQflBShKrLZ1UYoe6tQXob7l3x5vThEhNkBawBkLfWpj7 +7Imm1tGyEZdxCMkT400KRtSmJRrnpiOCUosnacwgp7MCbKWOIOng07Eh16cVUiuI +BthWU/LycIuac2xaD9PFpeK/MpwASRRPXZgPUhiZuaa7vttD0phCdDaS46Oln5/7 +tFRZH0alBZmkpVZJCWAP4ujIA3vLAgMBAAGjUzBRMA4GA1UdDwEB/wQEAwIBBjAP +BgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBTwpg+WN1nZJs4gj5hfoa+fMSZjGTAP +BgNVHREECDAGhwR/AAABMA0GCSqGSIb3DQEBCwUAA4IBAQAZuOWT8zHcwbWvC6Jm +/ccgB/U7SbeIYFJrCZd9mTyqsgnkFNH8yJ5F4dXXtPXr+SO/uWWa3G5hams3qVFf +zWzzPDQdyhUhfh5fjUHR2RsSGBmCxcapYHpVvAP5aY1/ujYrXMvAJV0hfDO2tGHb +rveuJxhe8ymQ1Yb2u9NcmI1HG9IVt3Airz4gAIUJWbFvRigky0bukfddOkfiUiaF +DMPJQO6HAj8d8ctSHHVZWzhAInZ1pDg6HIHYF44m1tT27pSQoi0ZFocskDi/fC2f +EIF0nu5fRLUS6BZEfpnDi9U0lbJ/kUrgT5IFHMFqXdRpDqcnXpJZhYtp5l6GoqjM +gT33 +-----END CERTIFICATE----- diff --git a/pkg/acquisition/modules/http/testdata/client.crt b/pkg/acquisition/modules/http/testdata/client.crt new file mode 100644 index 00000000000..55efdddad09 --- /dev/null +++ b/pkg/acquisition/modules/http/testdata/client.crt @@ -0,0 +1,24 @@ +-----BEGIN CERTIFICATE----- +MIID7jCCAtagAwIBAgIUJMTPh3oPJLPgsnb9T85ieb4EuOQwDQYJKoZIhvcNAQEL +BQAwbzELMAkGA1UEBhMCRlIxFjAUBgNVBAgTDUlsZS1kZS1GcmFuY2UxDjAMBgNV +BAcTBVBhcmlzMREwDwYDVQQKEwhDcm93ZHNlYzERMA8GA1UECxMIQ3Jvd2RzZWMx +EjAQBgNVBAMTCWxvY2FsaG9zdDAeFw0yNDEwMjMxMDQ2MDBaFw0yNTEwMjMxMDQ2 +MDBaMHIxCzAJBgNVBAYTAkZSMRYwFAYDVQQIEw1JbGUtZGUtRnJhbmNlMQ4wDAYD +VQQHEwVQYXJpczERMA8GA1UEChMIQ3Jvd2RzZWMxFzAVBgNVBAsTDlRlc3Rpbmcg +Y2xpZW50MQ8wDQYDVQQDEwZjbGllbnQwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAw +ggEKAoIBAQDAUOdpRieRrrH6krUjgcjLgJg6TzoWAb/iv6rfcioX1L9bj9fZSkwu +GqKzXX/PceIXElzQgiGJZErbJtnTzhGS80QgtAB8BwWQIT2zgoGcYJf7pPFvmcMM +qMGFwK0dMC+LHPk+ePtFz8dskI2XJ8jgBdtuZcnDblMuVGtjYT6n0rszvRdo118+ +mlGCLPzOfsO1JdOqLWAR88yZfqCFt1TrwmzpRT1crJQeM6i7muw4aO0L7uSek9QM +6APHz0QexSq7/zHOtRjA4jnJbDzZJHRlwOdlsNU9cmTz6uWIQXlg+2ovD55YurNy ++jYfmfDYpimhoeGf54zaETp1fTuTJYpxAgMBAAGjfzB9MA4GA1UdDwEB/wQEAwIF +oDAdBgNVHSUEFjAUBggrBgEFBQcDAQYIKwYBBQUHAwIwDAYDVR0TAQH/BAIwADAd +BgNVHQ4EFgQUmH0/7RuKnoW7sEK4Cr8eVNGbb8swHwYDVR0jBBgwFoAU8KYPljdZ +2SbOII+YX6GvnzEmYxkwDQYJKoZIhvcNAQELBQADggEBAHVn9Zuoyxu9iTFoyJ50 +e/XKcmt2uK2M1x+ap2Av7Wb/Omikx/R2YPq7994BfiUCAezY2YtreZzkE6Io1wNM +qApijEJnlqEmOXiYJqlF89QrCcsAsz6lfaqitYBZSL3o4KT+7/uUDVxgNEjEksRz +9qy6DFBLvyhxbOM2zDEV+MVfemBWSvNiojHqXzDBkZnBHHclJLuIKsXDZDGhKbNd +hsoGU00RLevvcUpUJ3a68ekgwiYFJifm0uyfmao9lmiB3i+8ZW3Q4rbwHtD+U7U2 +3n+U5PkhiUAveuMfrvUMzsTolZiop9ZLtcALDUFaqyr4tjfVOf5+CGjiluio7oE1 +UYg= +-----END CERTIFICATE----- diff --git a/pkg/acquisition/modules/http/testdata/client.key b/pkg/acquisition/modules/http/testdata/client.key new file mode 100644 index 00000000000..f8ef2efbd58 --- /dev/null +++ b/pkg/acquisition/modules/http/testdata/client.key @@ -0,0 +1,27 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIEowIBAAKCAQEAwFDnaUYnka6x+pK1I4HIy4CYOk86FgG/4r+q33IqF9S/W4/X +2UpMLhqis11/z3HiFxJc0IIhiWRK2ybZ084RkvNEILQAfAcFkCE9s4KBnGCX+6Tx +b5nDDKjBhcCtHTAvixz5Pnj7Rc/HbJCNlyfI4AXbbmXJw25TLlRrY2E+p9K7M70X +aNdfPppRgiz8zn7DtSXTqi1gEfPMmX6ghbdU68Js6UU9XKyUHjOou5rsOGjtC+7k +npPUDOgDx89EHsUqu/8xzrUYwOI5yWw82SR0ZcDnZbDVPXJk8+rliEF5YPtqLw+e +WLqzcvo2H5nw2KYpoaHhn+eM2hE6dX07kyWKcQIDAQABAoIBAQChriKuza0MfBri +9x3UCRN/is/wDZVe1P+2KL8F9ZvPxytNVeP4qM7c38WzF8MQ6sRR8z0WiqCZOjj4 +f3QX7iG2MlAvUkUqAFk778ZIuUov5sE/bU8RLOrfJKz1vqOLa2w8/xHH5LwS1/jn +m6t9zZPCSwpMiMSUSZci1xQlS6b6POZMjeqLPqv9cP8PJNv9UNrHFcQnQi1iwKJH +MJ7CQI3R8FSeGad3P7tB9YDaBm7hHmd/TevuFkymcKNT44XBSgddPDfgKui6sHTY +QQWgWI9VGVO350ZBLRLkrk8wboY4vc15qbBzYFG66WiR/tNdLt3rDYxpcXaDvcQy +e47mYNVxAoGBAMFsUmPDssqzmOkmZxHDM+VmgHYPXjDqQdE299FtuClobUW4iU4g +By7o84aCIBQz2sp9f1KM+10lr+Bqw3s7QBbR5M67PA8Zm45DL9t70NR/NZHGzFRD +BR/NMbwzCqNtY2UGDhYQLGhW8heAwsYwir8ZqmOfKTd9aY1pu/S8m9AlAoGBAP6I +483EIN8R5y+beGcGynYeIrH5Gc+W2FxWIW9jh/G7vRbhMlW4z0GxV3uEAYmOlBH2 +AqUkV6+uzU0P4B/m3vCYqLycBVDwifJazDj9nskVL5kGMxia62iwDMXs5nqNS4WJ +ZM5Gl2xIiwmgWnYnujM3eKF2wbm439wj4na80SldAoGANdIqatA9o+GtntKsw2iJ +vD91Z2SHVR0aC1k8Q+4/3GXOYiQjMLYAybDQcpEq0/RJ4SZik1nfZ9/gvJV4p4Wp +I7Br9opq/9ikTEWtv2kIhtiO02151ciAWIUEXdXmE+uQSMASk1kUwkPPQXL2v6cq +NFqz6tyS33nqMQtG3abNxHECgYA4AEA2nmcpDRRTSh50dG8JC9pQU+EU5jhWIHEc +w8Y+LjMNHKDpcU7QQkdgGowICsGTLhAo61ULhycORGboPfBg+QVu8djNlQ6Urttt +0ocj8LBXN6D4UeVnVAyLY3LWFc4+5Bq0s51PKqrEhG5Cvrzd1d+JjspSpVVDZvXF +cAeI1QKBgC/cMN3+2Sc+2biu46DnkdYpdF/N0VGMOgzz+unSVD4RA2mEJ9UdwGga +feshtrtcroHtEmc+WDYgTTnAq1MbsVFQYIwZ5fL/GJ1R8ccaWiPuX2HrKALKG4Y3 +CMFpDUWhRgtaBsmuOpUq3FeS5cyPNMHk6axL1KyFoJk9AgfhqhTp +-----END RSA PRIVATE KEY----- diff --git a/pkg/acquisition/modules/http/testdata/server.crt b/pkg/acquisition/modules/http/testdata/server.crt new file mode 100644 index 00000000000..7a02c606c9d --- /dev/null +++ b/pkg/acquisition/modules/http/testdata/server.crt @@ -0,0 +1,23 @@ +-----BEGIN CERTIFICATE----- +MIID5jCCAs6gAwIBAgIUU3F6URi0oTe9ontkf7JqXOo89QYwDQYJKoZIhvcNAQEL +BQAwbzELMAkGA1UEBhMCRlIxFjAUBgNVBAgTDUlsZS1kZS1GcmFuY2UxDjAMBgNV +BAcTBVBhcmlzMREwDwYDVQQKEwhDcm93ZHNlYzERMA8GA1UECxMIQ3Jvd2RzZWMx +EjAQBgNVBAMTCWxvY2FsaG9zdDAeFw0yNDEwMjMxMDAzMDBaFw0yNTEwMjMxMDAz +MDBaMG8xCzAJBgNVBAYTAkZSMRYwFAYDVQQIEw1JbGUtZGUtRnJhbmNlMQ4wDAYD +VQQHEwVQYXJpczERMA8GA1UEChMIQ3Jvd2RzZWMxETAPBgNVBAsTCENyb3dkc2Vj +MRIwEAYDVQQDEwlsb2NhbGhvc3QwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEK +AoIBAQC/lnUubjBGe5x0LgIE5GeG52LRzj99iLWuvey4qbSwFZ07ECgv+JttVwDm +AjEeakj2ZR46WHvHAR9eBNkRCORyWX0iKVIzm09PXYi80KtwGLaA8YMEio9/08Cc ++LS0TuP0yiOcw+btrhmvvauDzcQhA6u55q8anCZiF2BlHfX9Sh6QKewA3NhOkzbU +VTxqrOqfcRsGNub7dheqfP5bfrPkF6Y6l/0Fhyx0NMsu1zaQ0hCls2hkTf0Y3XGt +IQNWoN22seexR3qRmPf0j3jBa0qOmGgd6kAd+YpsjDblgCNUIJZiVj51fVb0sGRx +ShkfKGU6t0eznTWPCqswujO/sn+pAgMBAAGjejB4MA4GA1UdDwEB/wQEAwIFoDAd +BgNVHSUEFjAUBggrBgEFBQcDAQYIKwYBBQUHAwIwDAYDVR0TAQH/BAIwADAdBgNV +HQ4EFgQUOiIF+7Wzx1J8Ki3DiBfx+E6zlSUwGgYDVR0RBBMwEYIJbG9jYWxob3N0 +hwR/AAABMA0GCSqGSIb3DQEBCwUAA4IBAQA0dzlhBr/0wXPyj/iWxMOXxZ1FNJ9f +lxBMhLAgX0WrT2ys+284J7Hcn0lJeqelluYpmeKn9vmCAEj3MmUmHzZyf//lhuUJ +0DlYWIHUsGaJHJ7A+1hQqrcXHhkcRy5WGIM9VoddKbBbg2b6qzTSvxn8EnuD7H4h +28wLyGLCzsSXoVcAB8u+svYt29TPuy6xmMAokyIShV8FsE77fjVTgtCuxmx1PKv3 +zd6+uEae7bbZ+GJH1zKF0vokejQvmByt+YuIXlNbMseaMUeDdpy+6qlRvbbN1dyp +rkQXfWvidMfSue5nH/akAn83v/CdKxG6tfW83d9Rud3naabUkywALDng +-----END CERTIFICATE----- diff --git a/pkg/acquisition/modules/http/testdata/server.key b/pkg/acquisition/modules/http/testdata/server.key new file mode 100644 index 00000000000..4d0ee53b4c2 --- /dev/null +++ b/pkg/acquisition/modules/http/testdata/server.key @@ -0,0 +1,27 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIEpQIBAAKCAQEAv5Z1Lm4wRnucdC4CBORnhudi0c4/fYi1rr3suKm0sBWdOxAo +L/ibbVcA5gIxHmpI9mUeOlh7xwEfXgTZEQjkcll9IilSM5tPT12IvNCrcBi2gPGD +BIqPf9PAnPi0tE7j9MojnMPm7a4Zr72rg83EIQOrueavGpwmYhdgZR31/UoekCns +ANzYTpM21FU8aqzqn3EbBjbm+3YXqnz+W36z5BemOpf9BYcsdDTLLtc2kNIQpbNo +ZE39GN1xrSEDVqDdtrHnsUd6kZj39I94wWtKjphoHepAHfmKbIw25YAjVCCWYlY+ +dX1W9LBkcUoZHyhlOrdHs501jwqrMLozv7J/qQIDAQABAoIBAF1Vd/rJlV0Q5RQ4 +QaWOe9zdpmedeZK3YgMh5UvE6RCLRxC5+0n7bASlSPvEf5dYofjfJA26g3pcUqKj +6/d/hIMsk2hsBu67L7TzVSTe51XxxB8nCPPSaLwWNZSDGM1qTWU4gIbjbQHHOh5C +YWcRfAW1WxhyiEWHYq+QwdYg9XCRrSg1UzvVvW1Yt2wDGcSZP5whbXipfw3BITDs +XU7ODYNkU1sjIzQZwzVGxOf9qKdhZFZ26Vhoz8OJNMLyJxY7EspuwR7HbDGt11Pb +CxOt/BV44LwdVYeqh57oIKtckQW33W/6EeaWr7GfMzyH5WSrsOJoK5IJVrZaPTcS +QiMYLA0CgYEA9vMVsGshBl3TeRGaU3XLHqooXD4kszbdnjfPrwGlfCO/iybhDqo5 +WFypM/bYcIWzbTez/ihufHEHPSCUbFEcN4B+oczGcuxTcZjFyvJYvq2ycxPUiDIi +JnVUcVxgh1Yn39+CsQ/b6meP7MumTD2P3I87CeQGlWTO5Ys9mdw0BjcCgYEAxpv1 +64l5UoFJGr4yElNKDIKnhEFbJZsLGKiiuVXcS1QVHW5Az5ar9fPxuepyHpz416l3 +ppncuhJiUIP+jbu5e0s0LsN46mLS3wkHLgYJj06CNT3uOSLSg1iFl7DusdbyiaA7 +wEJ/aotS1NZ4XaeryAWHwYJ6Kag3nz6NV3ZYuR8CgYEAxAFCuMj+6F+2RsTa+d1n +v8oMyNImLPyiQD9KHzyuTW7OTDMqtIoVg/Xf8re9KOpl9I0e1t7eevT3auQeCi8C +t2bMm7290V+UB3jbnO5n08hn+ADIUuV/x4ie4m8QyrpuYbm0sLbGtTFHwgoNzzuZ +oNUqZfpP42mk8fpnhWSLAlcCgYEAgpY7XRI4HkJ5ocbav2faMV2a7X/XgWNvKViA +HeJRhYoUlBRRMuz7xi0OjFKVlIFbsNlxna5fDk1WLWCMd/6tl168Qd8u2tX9lr6l +5OH9WSeiv4Un5JN73PbQaAvi9jXBpTIg92oBwzk2TlFyNQoxDcRtHZQ/5LIBWIhV +gOOEtLsCgYEA1wbGc4XlH+/nXVsvx7gmfK8pZG8XA4/ToeIEURwPYrxtQZLB4iZs +aqWGgIwiB4F4UkuKZIjMrgInU9y0fG6EL96Qty7Yjh7dGy1vJTZl6C+QU6o4sEwl +r5Id5BNLEaqISWQ0LvzfwdfABYlvFfBdaGbzUzLEitD79eyhxuNEOBw= +-----END RSA PRIVATE KEY----- diff --git a/pkg/acquisition/modules/journalctl/journalctl.go b/pkg/acquisition/modules/journalctl/journalctl.go index e7a35d5a3ba..27f20b9f446 100644 --- a/pkg/acquisition/modules/journalctl/journalctl.go +++ b/pkg/acquisition/modules/journalctl/journalctl.go @@ -136,12 +136,9 @@ func (j *JournalCtlSource) runJournalCtl(ctx context.Context, out chan types.Eve if j.metricsLevel != configuration.METRICS_NONE { linesRead.With(prometheus.Labels{"source": j.src}).Inc() } - var evt types.Event - if !j.config.UseTimeMachine { - evt = types.Event{Line: l, Process: true, Type: types.LOG, ExpectMode: types.LIVE} - } else { - evt = types.Event{Line: l, Process: true, Type: types.LOG, ExpectMode: types.TIMEMACHINE} - } + + evt := types.MakeEvent(j.config.UseTimeMachine, types.LOG, true) + evt.Line = l out <- evt case stderrLine := <-stderrChan: logger.Warnf("Got stderr message : %s", stderrLine) diff --git a/pkg/acquisition/modules/kafka/kafka.go b/pkg/acquisition/modules/kafka/kafka.go index a9a5e13e958..77fc44e310d 100644 --- a/pkg/acquisition/modules/kafka/kafka.go +++ b/pkg/acquisition/modules/kafka/kafka.go @@ -173,13 +173,8 @@ func (k *KafkaSource) ReadMessage(ctx context.Context, out chan types.Event) err if k.metricsLevel != configuration.METRICS_NONE { linesRead.With(prometheus.Labels{"topic": k.Config.Topic}).Inc() } - var evt types.Event - - if !k.Config.UseTimeMachine { - evt = types.Event{Line: l, Process: true, Type: types.LOG, ExpectMode: types.LIVE} - } else { - evt = types.Event{Line: l, Process: true, Type: types.LOG, ExpectMode: types.TIMEMACHINE} - } + evt := types.MakeEvent(k.Config.UseTimeMachine, types.LOG, true) + evt.Line = l out <- evt } } diff --git a/pkg/acquisition/modules/kinesis/kinesis.go b/pkg/acquisition/modules/kinesis/kinesis.go index 3cfc224aa25..3744e43f38d 100644 --- a/pkg/acquisition/modules/kinesis/kinesis.go +++ b/pkg/acquisition/modules/kinesis/kinesis.go @@ -322,12 +322,8 @@ func (k *KinesisSource) ParseAndPushRecords(records []*kinesis.Record, out chan } else { l.Src = k.Config.StreamName } - var evt types.Event - if !k.Config.UseTimeMachine { - evt = types.Event{Line: l, Process: true, Type: types.LOG, ExpectMode: types.LIVE} - } else { - evt = types.Event{Line: l, Process: true, Type: types.LOG, ExpectMode: types.TIMEMACHINE} - } + evt := types.MakeEvent(k.Config.UseTimeMachine, types.LOG, true) + evt.Line = l out <- evt } } diff --git a/pkg/acquisition/modules/kubernetesaudit/k8s_audit.go b/pkg/acquisition/modules/kubernetesaudit/k8s_audit.go index 30fc5c467ea..1fa6c894a32 100644 --- a/pkg/acquisition/modules/kubernetesaudit/k8s_audit.go +++ b/pkg/acquisition/modules/kubernetesaudit/k8s_audit.go @@ -207,11 +207,8 @@ func (ka *KubernetesAuditSource) webhookHandler(w http.ResponseWriter, r *http.R Process: true, Module: ka.GetName(), } - ka.outChan <- types.Event{ - Line: l, - Process: true, - Type: types.LOG, - ExpectMode: types.LIVE, - } + evt := types.MakeEvent(ka.config.UseTimeMachine, types.LOG, true) + evt.Line = l + ka.outChan <- evt } } diff --git a/pkg/acquisition/modules/loki/loki.go b/pkg/acquisition/modules/loki/loki.go index e39c76af22c..d50787b652b 100644 --- a/pkg/acquisition/modules/loki/loki.go +++ b/pkg/acquisition/modules/loki/loki.go @@ -307,16 +307,9 @@ func (l *LokiSource) readOneEntry(entry lokiclient.Entry, labels map[string]stri if l.metricsLevel != configuration.METRICS_NONE { linesRead.With(prometheus.Labels{"source": l.Config.URL}).Inc() } - expectMode := types.LIVE - if l.Config.UseTimeMachine { - expectMode = types.TIMEMACHINE - } - out <- types.Event{ - Line: ll, - Process: true, - Type: types.LOG, - ExpectMode: expectMode, - } + evt := types.MakeEvent(l.Config.UseTimeMachine, types.LOG, true) + evt.Line = ll + out <- evt } func (l *LokiSource) StreamingAcquisition(ctx context.Context, out chan types.Event, t *tomb.Tomb) error { diff --git a/pkg/acquisition/modules/s3/s3.go b/pkg/acquisition/modules/s3/s3.go index acd78ceba8f..cdc84a8a3ca 100644 --- a/pkg/acquisition/modules/s3/s3.go +++ b/pkg/acquisition/modules/s3/s3.go @@ -443,12 +443,8 @@ func (s *S3Source) readFile(bucket string, key string) error { } else if s.MetricsLevel == configuration.METRICS_AGGREGATE { l.Src = bucket } - var evt types.Event - if !s.Config.UseTimeMachine { - evt = types.Event{Line: l, Process: true, Type: types.LOG, ExpectMode: types.LIVE} - } else { - evt = types.Event{Line: l, Process: true, Type: types.LOG, ExpectMode: types.TIMEMACHINE} - } + evt := types.MakeEvent(s.Config.UseTimeMachine, types.LOG, true) + evt.Line = l s.out <- evt } } diff --git a/pkg/acquisition/modules/syslog/syslog.go b/pkg/acquisition/modules/syslog/syslog.go index 33a2f1542db..fb6a04600c1 100644 --- a/pkg/acquisition/modules/syslog/syslog.go +++ b/pkg/acquisition/modules/syslog/syslog.go @@ -235,11 +235,9 @@ func (s *SyslogSource) handleSyslogMsg(out chan types.Event, t *tomb.Tomb, c cha l.Time = ts l.Src = syslogLine.Client l.Process = true - if !s.config.UseTimeMachine { - out <- types.Event{Line: l, Process: true, Type: types.LOG, ExpectMode: types.LIVE} - } else { - out <- types.Event{Line: l, Process: true, Type: types.LOG, ExpectMode: types.TIMEMACHINE} - } + evt := types.MakeEvent(s.config.UseTimeMachine, types.LOG, true) + evt.Line = l + out <- evt } } } diff --git a/pkg/acquisition/modules/wineventlog/wineventlog_windows.go b/pkg/acquisition/modules/wineventlog/wineventlog_windows.go index 1f5332f43b8..8283bcc21a2 100644 --- a/pkg/acquisition/modules/wineventlog/wineventlog_windows.go +++ b/pkg/acquisition/modules/wineventlog/wineventlog_windows.go @@ -206,9 +206,9 @@ func (w *WinEventLogSource) getEvents(out chan types.Event, t *tomb.Tomb) error l.Src = w.name l.Process = true if !w.config.UseTimeMachine { - out <- types.Event{Line: l, Process: true, Type: types.LOG, ExpectMode: types.LIVE} + out <- types.Event{Line: l, Process: true, Type: types.LOG, ExpectMode: types.LIVE, Unmarshaled: make(map[string]interface{})} } else { - out <- types.Event{Line: l, Process: true, Type: types.LOG, ExpectMode: types.TIMEMACHINE} + out <- types.Event{Line: l, Process: true, Type: types.LOG, ExpectMode: types.TIMEMACHINE, Unmarshaled: make(map[string]interface{})} } } } @@ -430,7 +430,9 @@ OUTER_LOOP: l.Time = time.Now() l.Src = w.name l.Process = true - out <- types.Event{Line: l, Process: true, Type: types.LOG, ExpectMode: types.TIMEMACHINE} + csevt := types.MakeEvent(w.config.UseTimeMachine, types.LOG, true) + csevt.Line = l + out <- csevt } } } diff --git a/pkg/alertcontext/alertcontext.go b/pkg/alertcontext/alertcontext.go index 0c60dea4292..1b7d1e20018 100644 --- a/pkg/alertcontext/alertcontext.go +++ b/pkg/alertcontext/alertcontext.go @@ -149,15 +149,12 @@ func TruncateContext(values []string, contextValueLen int) (string, error) { return ret, nil } -func EvalAlertContextRules(evt *types.Event, match *types.MatchedRule, request *http.Request, tmpContext map[string][]string) []error { +func EvalAlertContextRules(evt types.Event, match *types.MatchedRule, request *http.Request, tmpContext map[string][]string) []error { var errors []error //if we're evaluating context for appsec event, match and request will be present. //otherwise, only evt will be. - if evt == nil { - evt = types.NewEvent() - } if match == nil { match = types.NewMatchedRule() } @@ -220,8 +217,9 @@ func AppsecEventToContext(event types.AppsecEvent, request *http.Request) (model tmpContext := make(map[string][]string) + evt := types.MakeEvent(false, types.LOG, false) for _, matched_rule := range event.MatchedRules { - tmpErrors := EvalAlertContextRules(nil, &matched_rule, request, tmpContext) + tmpErrors := EvalAlertContextRules(evt, &matched_rule, request, tmpContext) errors = append(errors, tmpErrors...) } @@ -240,7 +238,7 @@ func EventToContext(events []types.Event) (models.Meta, []error) { tmpContext := make(map[string][]string) for _, evt := range events { - tmpErrors := EvalAlertContextRules(&evt, nil, nil, tmpContext) + tmpErrors := EvalAlertContextRules(evt, nil, nil, tmpContext) errors = append(errors, tmpErrors...) } diff --git a/pkg/cwversion/component/component.go b/pkg/cwversion/component/component.go index 4036b63cf00..7ed596525e0 100644 --- a/pkg/cwversion/component/component.go +++ b/pkg/cwversion/component/component.go @@ -7,20 +7,21 @@ package component // Built is a map of all the known components, and whether they are built-in or not. // This is populated as soon as possible by the respective init() functions -var Built = map[string]bool { - "datasource_appsec": false, - "datasource_cloudwatch": false, - "datasource_docker": false, - "datasource_file": false, - "datasource_journalctl": false, - "datasource_k8s-audit": false, - "datasource_kafka": false, - "datasource_kinesis": false, - "datasource_loki": false, - "datasource_s3": false, - "datasource_syslog": false, - "datasource_wineventlog":false, - "cscli_setup": false, +var Built = map[string]bool{ + "datasource_appsec": false, + "datasource_cloudwatch": false, + "datasource_docker": false, + "datasource_file": false, + "datasource_journalctl": false, + "datasource_k8s-audit": false, + "datasource_kafka": false, + "datasource_kinesis": false, + "datasource_loki": false, + "datasource_s3": false, + "datasource_syslog": false, + "datasource_wineventlog": false, + "datasource_http": false, + "cscli_setup": false, } func Register(name string) { diff --git a/pkg/types/event.go b/pkg/types/event.go index 6d275aedf95..9300626b927 100644 --- a/pkg/types/event.go +++ b/pkg/types/event.go @@ -47,13 +47,20 @@ type Event struct { Meta map[string]string `yaml:"Meta,omitempty" json:"Meta,omitempty"` } -func NewEvent() *Event { - return &Event{Type: LOG, +func MakeEvent(timeMachine bool, evtType int, process bool) Event { + evt := Event{ Parsed: make(map[string]string), - Enriched: make(map[string]string), Meta: make(map[string]string), Unmarshaled: make(map[string]interface{}), + Enriched: make(map[string]string), + ExpectMode: LIVE, + Process: process, + Type: evtType, + } + if timeMachine { + evt.ExpectMode = TIMEMACHINE } + return evt } func (e *Event) SetMeta(key string, value string) bool { From f2dfc66afad4ccefd63731bb0132ee77eab95472 Mon Sep 17 00:00:00 2001 From: laur89 Date: Tue, 5 Nov 2024 14:18:29 +0100 Subject: [PATCH 339/581] readme: update bouncers link (#3297) --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index a900f0ee514..1e57d4e91c4 100644 --- a/README.md +++ b/README.md @@ -84,7 +84,7 @@ The architecture is as follows : CrowdSec

-Once an unwanted behavior is detected, deal with it through a [bouncer](https://hub.crowdsec.net/browse/#bouncers). The aggressive IP, scenario triggered and timestamp are sent for curation, to avoid poisoning & false positives. (This can be disabled). If verified, this IP is then redistributed to all CrowdSec users running the same scenario. +Once an unwanted behavior is detected, deal with it through a [bouncer](https://app.crowdsec.net/hub/remediation-components). The aggressive IP, scenario triggered and timestamp are sent for curation, to avoid poisoning & false positives. (This can be disabled). If verified, this IP is then redistributed to all CrowdSec users running the same scenario. ## Outnumbering hackers all together From 94a2a586e4a2b150c15054e989736ea2077365a6 Mon Sep 17 00:00:00 2001 From: blotus Date: Fri, 8 Nov 2024 12:01:30 +0100 Subject: [PATCH 340/581] loki: add no_ready_check option (#3317) --- pkg/acquisition/modules/loki/loki.go | 48 ++++++++++++++--------- pkg/acquisition/modules/loki/loki_test.go | 23 ++++++++++- 2 files changed, 51 insertions(+), 20 deletions(-) diff --git a/pkg/acquisition/modules/loki/loki.go b/pkg/acquisition/modules/loki/loki.go index d50787b652b..c57e6a67c94 100644 --- a/pkg/acquisition/modules/loki/loki.go +++ b/pkg/acquisition/modules/loki/loki.go @@ -53,6 +53,7 @@ type LokiConfiguration struct { WaitForReady time.Duration `yaml:"wait_for_ready"` // Retry interval, default is 10 seconds Auth LokiAuthConfiguration `yaml:"auth"` MaxFailureDuration time.Duration `yaml:"max_failure_duration"` // Max duration of failure before stopping the source + NoReadyCheck bool `yaml:"no_ready_check"` // Bypass /ready check before starting configuration.DataSourceCommonCfg `yaml:",inline"` } @@ -229,6 +230,14 @@ func (l *LokiSource) ConfigureByDSN(dsn string, labels map[string]string, logger l.logger.Logger.SetLevel(level) } + if noReadyCheck := params.Get("no_ready_check"); noReadyCheck != "" { + noReadyCheck, err := strconv.ParseBool(noReadyCheck) + if err != nil { + return fmt.Errorf("invalid no_ready_check in dsn: %w", err) + } + l.Config.NoReadyCheck = noReadyCheck + } + l.Config.URL = fmt.Sprintf("%s://%s", scheme, u.Host) if u.User != nil { l.Config.Auth.Username = u.User.Username() @@ -264,26 +273,28 @@ func (l *LokiSource) GetName() string { func (l *LokiSource) OneShotAcquisition(ctx context.Context, out chan types.Event, t *tomb.Tomb) error { l.logger.Debug("Loki one shot acquisition") l.Client.SetTomb(t) - readyCtx, cancel := context.WithTimeout(ctx, l.Config.WaitForReady) - defer cancel() - err := l.Client.Ready(readyCtx) - if err != nil { - return fmt.Errorf("loki is not ready: %w", err) + + if !l.Config.NoReadyCheck { + readyCtx, readyCancel := context.WithTimeout(ctx, l.Config.WaitForReady) + defer readyCancel() + err := l.Client.Ready(readyCtx) + if err != nil { + return fmt.Errorf("loki is not ready: %w", err) + } } - ctx, cancel = context.WithCancel(ctx) - c := l.Client.QueryRange(ctx, false) + lokiCtx, cancel := context.WithCancel(ctx) + defer cancel() + c := l.Client.QueryRange(lokiCtx, false) for { select { case <-t.Dying(): l.logger.Debug("Loki one shot acquisition stopped") - cancel() return nil case resp, ok := <-c: if !ok { l.logger.Info("Loki acquisition done, chan closed") - cancel() return nil } for _, stream := range resp.Data.Result { @@ -314,27 +325,26 @@ func (l *LokiSource) readOneEntry(entry lokiclient.Entry, labels map[string]stri func (l *LokiSource) StreamingAcquisition(ctx context.Context, out chan types.Event, t *tomb.Tomb) error { l.Client.SetTomb(t) - readyCtx, cancel := context.WithTimeout(ctx, l.Config.WaitForReady) - defer cancel() - err := l.Client.Ready(readyCtx) - if err != nil { - return fmt.Errorf("loki is not ready: %w", err) + + if !l.Config.NoReadyCheck { + readyCtx, readyCancel := context.WithTimeout(ctx, l.Config.WaitForReady) + defer readyCancel() + err := l.Client.Ready(readyCtx) + if err != nil { + return fmt.Errorf("loki is not ready: %w", err) + } } ll := l.logger.WithField("websocket_url", l.lokiWebsocket) t.Go(func() error { ctx, cancel := context.WithCancel(ctx) defer cancel() respChan := l.Client.QueryRange(ctx, true) - if err != nil { - ll.Errorf("could not start loki tail: %s", err) - return fmt.Errorf("while starting loki tail: %w", err) - } for { select { case resp, ok := <-respChan: if !ok { ll.Warnf("loki channel closed") - return err + return errors.New("loki channel closed") } for _, stream := range resp.Data.Result { for _, entry := range stream.Entries { diff --git a/pkg/acquisition/modules/loki/loki_test.go b/pkg/acquisition/modules/loki/loki_test.go index cacdda32d80..643aefad715 100644 --- a/pkg/acquisition/modules/loki/loki_test.go +++ b/pkg/acquisition/modules/loki/loki_test.go @@ -34,6 +34,7 @@ func TestConfiguration(t *testing.T) { password string waitForReady time.Duration delayFor time.Duration + noReadyCheck bool testName string }{ { @@ -99,6 +100,19 @@ query: > mode: tail source: loki url: http://localhost:3100/ +no_ready_check: true +query: > + {server="demo"} +`, + expectedErr: "", + testName: "Correct config with no_ready_check", + noReadyCheck: true, + }, + { + config: ` +mode: tail +source: loki +url: http://localhost:3100/ auth: username: foo password: bar @@ -148,6 +162,8 @@ query: > t.Fatalf("Wrong DelayFor %v != %v", lokiSource.Config.DelayFor, test.delayFor) } } + + assert.Equal(t, test.noReadyCheck, lokiSource.Config.NoReadyCheck) }) } } @@ -164,6 +180,7 @@ func TestConfigureDSN(t *testing.T) { scheme string waitForReady time.Duration delayFor time.Duration + noReadyCheck bool }{ { name: "Wrong scheme", @@ -202,10 +219,11 @@ func TestConfigureDSN(t *testing.T) { }, { name: "Correct DSN", - dsn: `loki://localhost:3100/?query={server="demo"}&wait_for_ready=5s&delay_for=1s`, + dsn: `loki://localhost:3100/?query={server="demo"}&wait_for_ready=5s&delay_for=1s&no_ready_check=true`, expectedErr: "", waitForReady: 5 * time.Second, delayFor: 1 * time.Second, + noReadyCheck: true, }, { name: "SSL DSN", @@ -256,6 +274,9 @@ func TestConfigureDSN(t *testing.T) { t.Fatalf("Wrong DelayFor %v != %v", lokiSource.Config.DelayFor, test.delayFor) } } + + assert.Equal(t, test.noReadyCheck, lokiSource.Config.NoReadyCheck) + } } From 5d414f58e5eda2659f44d17be9ea39da87f9e791 Mon Sep 17 00:00:00 2001 From: blotus Date: Fri, 8 Nov 2024 15:11:09 +0100 Subject: [PATCH 341/581] Add explicit configuration for signals sharing and blocklists pull (#3277) --- cmd/crowdsec-cli/clicapi/capi.go | 21 ++++++++++ pkg/apiclient/decisions_service.go | 13 ++++++ pkg/apiclient/decisions_service_test.go | 27 +++++++++++-- pkg/apiserver/apic.go | 54 ++++++++++++++++--------- pkg/apiserver/apic_test.go | 32 ++++++++++++++- pkg/csconfig/api.go | 24 ++++++++++- pkg/csconfig/api_test.go | 5 +++ pkg/modelscapi/centralapi_swagger.yaml | 13 ++++++ 8 files changed, 165 insertions(+), 24 deletions(-) diff --git a/cmd/crowdsec-cli/clicapi/capi.go b/cmd/crowdsec-cli/clicapi/capi.go index cba66f11104..61d59836fdd 100644 --- a/cmd/crowdsec-cli/clicapi/capi.go +++ b/cmd/crowdsec-cli/clicapi/capi.go @@ -225,6 +225,27 @@ func (cli *cliCapi) Status(ctx context.Context, out io.Writer, hub *cwhub.Hub) e fmt.Fprint(out, "Your instance is enrolled in the console\n") } + switch *cfg.API.Server.OnlineClient.Sharing { + case true: + fmt.Fprint(out, "Sharing signals is enabled\n") + case false: + fmt.Fprint(out, "Sharing signals is disabled\n") + } + + switch *cfg.API.Server.OnlineClient.PullConfig.Community { + case true: + fmt.Fprint(out, "Pulling community blocklist is enabled\n") + case false: + fmt.Fprint(out, "Pulling community blocklist is disabled\n") + } + + switch *cfg.API.Server.OnlineClient.PullConfig.Blocklists { + case true: + fmt.Fprint(out, "Pulling blocklists from the console is enabled\n") + case false: + fmt.Fprint(out, "Pulling blocklists from the console is disabled\n") + } + return nil } diff --git a/pkg/apiclient/decisions_service.go b/pkg/apiclient/decisions_service.go index 98f26cad9ae..fea2f39072d 100644 --- a/pkg/apiclient/decisions_service.go +++ b/pkg/apiclient/decisions_service.go @@ -31,6 +31,8 @@ type DecisionsListOpts struct { type DecisionsStreamOpts struct { Startup bool `url:"startup,omitempty"` + CommunityPull bool `url:"community_pull"` + AdditionalPull bool `url:"additional_pull"` Scopes string `url:"scopes,omitempty"` ScenariosContaining string `url:"scenarios_containing,omitempty"` ScenariosNotContaining string `url:"scenarios_not_containing,omitempty"` @@ -43,6 +45,17 @@ func (o *DecisionsStreamOpts) addQueryParamsToURL(url string) (string, error) { return "", err } + //Those 2 are a bit different + //They default to true, and we only want to include them if they are false + + if params.Get("community_pull") == "true" { + params.Del("community_pull") + } + + if params.Get("additional_pull") == "true" { + params.Del("additional_pull") + } + return fmt.Sprintf("%s?%s", url, params.Encode()), nil } diff --git a/pkg/apiclient/decisions_service_test.go b/pkg/apiclient/decisions_service_test.go index 54c44f43eda..942d14689ff 100644 --- a/pkg/apiclient/decisions_service_test.go +++ b/pkg/apiclient/decisions_service_test.go @@ -4,6 +4,7 @@ import ( "context" "net/http" "net/url" + "strings" "testing" log "github.com/sirupsen/logrus" @@ -87,7 +88,7 @@ func TestDecisionsStream(t *testing.T) { testMethod(t, r, http.MethodGet) if r.Method == http.MethodGet { - if r.URL.RawQuery == "startup=true" { + if strings.Contains(r.URL.RawQuery, "startup=true") { w.WriteHeader(http.StatusOK) w.Write([]byte(`{"deleted":null,"new":[{"duration":"3h59m55.756182786s","id":4,"origin":"cscli","scenario":"manual 'ban' from '82929df7ee394b73b81252fe3b4e50203yaT2u6nXiaN7Ix9'","scope":"Ip","type":"ban","value":"1.2.3.4"}]}`)) } else { @@ -160,7 +161,7 @@ func TestDecisionsStreamV3Compatibility(t *testing.T) { testMethod(t, r, http.MethodGet) if r.Method == http.MethodGet { - if r.URL.RawQuery == "startup=true" { + if strings.Contains(r.URL.RawQuery, "startup=true") { w.WriteHeader(http.StatusOK) w.Write([]byte(`{"deleted":[{"scope":"ip","decisions":["1.2.3.5"]}],"new":[{"scope":"ip", "scenario": "manual 'ban' from '82929df7ee394b73b81252fe3b4e50203yaT2u6nXiaN7Ix9'", "decisions":[{"duration":"3h59m55.756182786s","value":"1.2.3.4"}]}]}`)) } else { @@ -429,6 +430,8 @@ func TestDecisionsStreamOpts_addQueryParamsToURL(t *testing.T) { Scopes string ScenariosContaining string ScenariosNotContaining string + CommunityPull bool + AdditionalPull bool } tests := []struct { @@ -440,11 +443,17 @@ func TestDecisionsStreamOpts_addQueryParamsToURL(t *testing.T) { { name: "no filter", expected: baseURLString + "?", + fields: fields{ + CommunityPull: true, + AdditionalPull: true, + }, }, { name: "startup=true", fields: fields{ - Startup: true, + Startup: true, + CommunityPull: true, + AdditionalPull: true, }, expected: baseURLString + "?startup=true", }, @@ -455,9 +464,19 @@ func TestDecisionsStreamOpts_addQueryParamsToURL(t *testing.T) { Scopes: "ip,range", ScenariosContaining: "ssh", ScenariosNotContaining: "bf", + CommunityPull: true, + AdditionalPull: true, }, expected: baseURLString + "?scenarios_containing=ssh&scenarios_not_containing=bf&scopes=ip%2Crange&startup=true", }, + { + name: "pull options", + fields: fields{ + CommunityPull: false, + AdditionalPull: false, + }, + expected: baseURLString + "?additional_pull=false&community_pull=false", + }, } for _, tt := range tests { @@ -467,6 +486,8 @@ func TestDecisionsStreamOpts_addQueryParamsToURL(t *testing.T) { Scopes: tt.fields.Scopes, ScenariosContaining: tt.fields.ScenariosContaining, ScenariosNotContaining: tt.fields.ScenariosNotContaining, + CommunityPull: tt.fields.CommunityPull, + AdditionalPull: tt.fields.AdditionalPull, } got, err := o.addQueryParamsToURL(baseURLString) diff --git a/pkg/apiserver/apic.go b/pkg/apiserver/apic.go index fff0ebcacbf..51a85b1ea23 100644 --- a/pkg/apiserver/apic.go +++ b/pkg/apiserver/apic.go @@ -69,6 +69,10 @@ type apic struct { consoleConfig *csconfig.ConsoleConfig isPulling chan bool whitelists *csconfig.CapiWhitelist + + pullBlocklists bool + pullCommunity bool + shareSignals bool } // randomDuration returns a duration value between d-delta and d+delta @@ -198,6 +202,9 @@ func NewAPIC(ctx context.Context, config *csconfig.OnlineApiClientCfg, dbClient usageMetricsIntervalFirst: randomDuration(usageMetricsInterval, usageMetricsIntervalDelta), isPulling: make(chan bool, 1), whitelists: apicWhitelist, + pullBlocklists: *config.PullConfig.Blocklists, + pullCommunity: *config.PullConfig.Community, + shareSignals: *config.Sharing, } password := strfmt.Password(config.Credentials.Password) @@ -295,7 +302,7 @@ func (a *apic) Push(ctx context.Context) error { var signals []*models.AddSignalsRequestItem for _, alert := range alerts { - if ok := shouldShareAlert(alert, a.consoleConfig); ok { + if ok := shouldShareAlert(alert, a.consoleConfig, a.shareSignals); ok { signals = append(signals, alertToSignal(alert, getScenarioTrustOfAlert(alert), *a.consoleConfig.ShareContext)) } } @@ -324,7 +331,13 @@ func getScenarioTrustOfAlert(alert *models.Alert) string { return scenarioTrust } -func shouldShareAlert(alert *models.Alert, consoleConfig *csconfig.ConsoleConfig) bool { +func shouldShareAlert(alert *models.Alert, consoleConfig *csconfig.ConsoleConfig, shareSignals bool) bool { + + if !shareSignals { + log.Debugf("sharing signals is disabled") + return false + } + if *alert.Simulated { log.Debugf("simulation enabled for alert (id:%d), will not be sent to CAPI", alert.ID) return false @@ -625,7 +638,9 @@ func (a *apic) PullTop(ctx context.Context, forcePull bool) error { log.Infof("Starting community-blocklist update") - data, _, err := a.apiClient.Decisions.GetStreamV3(ctx, apiclient.DecisionsStreamOpts{Startup: a.startup}) + log.Debugf("Community pull: %t | Blocklist pull: %t", a.pullCommunity, a.pullBlocklists) + + data, _, err := a.apiClient.Decisions.GetStreamV3(ctx, apiclient.DecisionsStreamOpts{Startup: a.startup, CommunityPull: a.pullCommunity, AdditionalPull: a.pullBlocklists}) if err != nil { return fmt.Errorf("get stream: %w", err) } @@ -650,23 +665,26 @@ func (a *apic) PullTop(ctx context.Context, forcePull bool) error { log.Printf("capi/community-blocklist : %d explicit deletions", nbDeleted) - if len(data.New) == 0 { - log.Infof("capi/community-blocklist : received 0 new entries (expected if you just installed crowdsec)") - return nil - } + if len(data.New) > 0 { + // create one alert for community blocklist using the first decision + decisions := a.apiClient.Decisions.GetDecisionsFromGroups(data.New) + // apply APIC specific whitelists + decisions = a.ApplyApicWhitelists(decisions) - // create one alert for community blocklist using the first decision - decisions := a.apiClient.Decisions.GetDecisionsFromGroups(data.New) - // apply APIC specific whitelists - decisions = a.ApplyApicWhitelists(decisions) + alert := createAlertForDecision(decisions[0]) + alertsFromCapi := []*models.Alert{alert} + alertsFromCapi = fillAlertsWithDecisions(alertsFromCapi, decisions, addCounters) - alert := createAlertForDecision(decisions[0]) - alertsFromCapi := []*models.Alert{alert} - alertsFromCapi = fillAlertsWithDecisions(alertsFromCapi, decisions, addCounters) - - err = a.SaveAlerts(ctx, alertsFromCapi, addCounters, deleteCounters) - if err != nil { - return fmt.Errorf("while saving alerts: %w", err) + err = a.SaveAlerts(ctx, alertsFromCapi, addCounters, deleteCounters) + if err != nil { + return fmt.Errorf("while saving alerts: %w", err) + } + } else { + if a.pullCommunity { + log.Info("capi/community-blocklist : received 0 new entries (expected if you just installed crowdsec)") + } else { + log.Debug("capi/community-blocklist : community blocklist pull is disabled") + } } // update blocklists diff --git a/pkg/apiserver/apic_test.go b/pkg/apiserver/apic_test.go index 99fee6e32bf..a8fbb40c4fa 100644 --- a/pkg/apiserver/apic_test.go +++ b/pkg/apiserver/apic_test.go @@ -69,7 +69,10 @@ func getAPIC(t *testing.T, ctx context.Context) *apic { ShareCustomScenarios: ptr.Of(false), ShareContext: ptr.Of(false), }, - isPulling: make(chan bool, 1), + isPulling: make(chan bool, 1), + shareSignals: true, + pullBlocklists: true, + pullCommunity: true, } } @@ -200,6 +203,11 @@ func TestNewAPIC(t *testing.T) { Login: "foo", Password: "bar", }, + Sharing: ptr.Of(true), + PullConfig: csconfig.CapiPullConfig{ + Community: ptr.Of(true), + Blocklists: ptr.Of(true), + }, } } @@ -1193,6 +1201,7 @@ func TestShouldShareAlert(t *testing.T) { tests := []struct { name string consoleConfig *csconfig.ConsoleConfig + shareSignals bool alert *models.Alert expectedRet bool expectedTrust string @@ -1203,6 +1212,7 @@ func TestShouldShareAlert(t *testing.T) { ShareCustomScenarios: ptr.Of(true), }, alert: &models.Alert{Simulated: ptr.Of(false)}, + shareSignals: true, expectedRet: true, expectedTrust: "custom", }, @@ -1212,6 +1222,7 @@ func TestShouldShareAlert(t *testing.T) { ShareCustomScenarios: ptr.Of(false), }, alert: &models.Alert{Simulated: ptr.Of(false)}, + shareSignals: true, expectedRet: false, expectedTrust: "custom", }, @@ -1220,6 +1231,7 @@ func TestShouldShareAlert(t *testing.T) { consoleConfig: &csconfig.ConsoleConfig{ ShareManualDecisions: ptr.Of(true), }, + shareSignals: true, alert: &models.Alert{ Simulated: ptr.Of(false), Decisions: []*models.Decision{{Origin: ptr.Of(types.CscliOrigin)}}, @@ -1232,6 +1244,7 @@ func TestShouldShareAlert(t *testing.T) { consoleConfig: &csconfig.ConsoleConfig{ ShareManualDecisions: ptr.Of(false), }, + shareSignals: true, alert: &models.Alert{ Simulated: ptr.Of(false), Decisions: []*models.Decision{{Origin: ptr.Of(types.CscliOrigin)}}, @@ -1244,6 +1257,7 @@ func TestShouldShareAlert(t *testing.T) { consoleConfig: &csconfig.ConsoleConfig{ ShareTaintedScenarios: ptr.Of(true), }, + shareSignals: true, alert: &models.Alert{ Simulated: ptr.Of(false), ScenarioHash: ptr.Of("whateverHash"), @@ -1256,6 +1270,7 @@ func TestShouldShareAlert(t *testing.T) { consoleConfig: &csconfig.ConsoleConfig{ ShareTaintedScenarios: ptr.Of(false), }, + shareSignals: true, alert: &models.Alert{ Simulated: ptr.Of(false), ScenarioHash: ptr.Of("whateverHash"), @@ -1263,11 +1278,24 @@ func TestShouldShareAlert(t *testing.T) { expectedRet: false, expectedTrust: "tainted", }, + { + name: "manual alert should not be shared if global sharing is disabled", + consoleConfig: &csconfig.ConsoleConfig{ + ShareManualDecisions: ptr.Of(true), + }, + shareSignals: false, + alert: &models.Alert{ + Simulated: ptr.Of(false), + ScenarioHash: ptr.Of("whateverHash"), + }, + expectedRet: false, + expectedTrust: "manual", + }, } for _, tc := range tests { t.Run(tc.name, func(t *testing.T) { - ret := shouldShareAlert(tc.alert, tc.consoleConfig) + ret := shouldShareAlert(tc.alert, tc.consoleConfig, tc.shareSignals) assert.Equal(t, tc.expectedRet, ret) }) } diff --git a/pkg/csconfig/api.go b/pkg/csconfig/api.go index 3014b729a9e..5f2f8f9248b 100644 --- a/pkg/csconfig/api.go +++ b/pkg/csconfig/api.go @@ -38,10 +38,17 @@ type ApiCredentialsCfg struct { CertPath string `yaml:"cert_path,omitempty"` } -/*global api config (for lapi->oapi)*/ +type CapiPullConfig struct { + Community *bool `yaml:"community,omitempty"` + Blocklists *bool `yaml:"blocklists,omitempty"` +} + +/*global api config (for lapi->capi)*/ type OnlineApiClientCfg struct { CredentialsFilePath string `yaml:"credentials_path,omitempty"` // credz will be edited by software, store in diff file Credentials *ApiCredentialsCfg `yaml:"-"` + PullConfig CapiPullConfig `yaml:"pull,omitempty"` + Sharing *bool `yaml:"sharing,omitempty"` } /*local api config (for crowdsec/cscli->lapi)*/ @@ -344,6 +351,21 @@ func (c *Config) LoadAPIServer(inCli bool) error { log.Printf("push and pull to Central API disabled") } + //Set default values for CAPI push/pull + if c.API.Server.OnlineClient != nil { + if c.API.Server.OnlineClient.PullConfig.Community == nil { + c.API.Server.OnlineClient.PullConfig.Community = ptr.Of(true) + } + + if c.API.Server.OnlineClient.PullConfig.Blocklists == nil { + c.API.Server.OnlineClient.PullConfig.Blocklists = ptr.Of(true) + } + + if c.API.Server.OnlineClient.Sharing == nil { + c.API.Server.OnlineClient.Sharing = ptr.Of(true) + } + } + if err := c.LoadDBConfig(inCli); err != nil { return err } diff --git a/pkg/csconfig/api_test.go b/pkg/csconfig/api_test.go index dff3c3afc8c..17802ba31dd 100644 --- a/pkg/csconfig/api_test.go +++ b/pkg/csconfig/api_test.go @@ -212,6 +212,11 @@ func TestLoadAPIServer(t *testing.T) { Login: "test", Password: "testpassword", }, + Sharing: ptr.Of(true), + PullConfig: CapiPullConfig{ + Community: ptr.Of(true), + Blocklists: ptr.Of(true), + }, }, Profiles: tmpLAPI.Profiles, ProfilesPath: "./testdata/profiles.yaml", diff --git a/pkg/modelscapi/centralapi_swagger.yaml b/pkg/modelscapi/centralapi_swagger.yaml index bd695894f2b..c75233809c8 100644 --- a/pkg/modelscapi/centralapi_swagger.yaml +++ b/pkg/modelscapi/centralapi_swagger.yaml @@ -55,6 +55,19 @@ paths: description: "returns list of top decisions to add or delete" produces: - "application/json" + parameters: + - in: query + name: "community_pull" + type: "boolean" + default: true + required: false + description: "Fetch the community blocklist content" + - in: query + name: "additional_pull" + type: "boolean" + default: true + required: false + description: "Fetch additional blocklists content" responses: "200": description: "200 response" From 1c140142dadeb5b63cee60c5da33cc5e47f7e1b0 Mon Sep 17 00:00:00 2001 From: Manuel Sabban Date: Tue, 12 Nov 2024 10:04:10 +0100 Subject: [PATCH 342/581] add go minor in go.mod (#3318) --- go.mod | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/go.mod b/go.mod index c889b62cb8c..f4bd9379a2d 100644 --- a/go.mod +++ b/go.mod @@ -1,6 +1,6 @@ module github.com/crowdsecurity/crowdsec -go 1.23 +go 1.23.3 // Don't use the toolchain directive to avoid uncontrolled downloads during // a build, especially in sandboxed environments (freebsd, gentoo...). From fd314a31ce0f293474d817a083a80208c0c105e7 Mon Sep 17 00:00:00 2001 From: blotus Date: Tue, 12 Nov 2024 14:49:26 +0100 Subject: [PATCH 343/581] fix go version for azure pipeline (#3324) --- azure-pipelines.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/azure-pipelines.yml b/azure-pipelines.yml index acbcabc20c5..bcf327bdf38 100644 --- a/azure-pipelines.yml +++ b/azure-pipelines.yml @@ -21,7 +21,7 @@ stages: - task: GoTool@0 displayName: "Install Go" inputs: - version: '1.23' + version: '1.23.3' - pwsh: | choco install -y make From 3ba71f412ead3d70bd18d00318a7d0bdd8fc0e55 Mon Sep 17 00:00:00 2001 From: Laurence Jones Date: Tue, 12 Nov 2024 16:42:19 +0000 Subject: [PATCH 344/581] fix: Ansible fedora 40 to use 40 (#3327) --- test/ansible/vagrant/fedora-40/Vagrantfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/ansible/vagrant/fedora-40/Vagrantfile b/test/ansible/vagrant/fedora-40/Vagrantfile index ec03661fe39..5541d453acf 100644 --- a/test/ansible/vagrant/fedora-40/Vagrantfile +++ b/test/ansible/vagrant/fedora-40/Vagrantfile @@ -1,7 +1,7 @@ # frozen_string_literal: true Vagrant.configure('2') do |config| - config.vm.box = "fedora/39-cloud-base" + config.vm.box = "fedora/40-cloud-base" config.vm.provision "shell", inline: <<-SHELL SHELL end From dd52e137ee135522345a2b7e03e4d51642f6b5d2 Mon Sep 17 00:00:00 2001 From: Laurence Jones Date: Tue, 12 Nov 2024 16:56:16 +0000 Subject: [PATCH 345/581] fix: Add a check to prevent attempting to load a directory within patterns (#3326) --- pkg/parser/unix_parser.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/parser/unix_parser.go b/pkg/parser/unix_parser.go index 351de8ade56..f0f26a06645 100644 --- a/pkg/parser/unix_parser.go +++ b/pkg/parser/unix_parser.go @@ -43,7 +43,7 @@ func Init(c map[string]interface{}) (*UnixParserCtx, error) { } r.DataFolder = c["data"].(string) for _, f := range files { - if strings.Contains(f.Name(), ".") { + if strings.Contains(f.Name(), ".") || f.IsDir() { continue } if err := r.Grok.AddFromFile(filepath.Join(c["patterns"].(string), f.Name())); err != nil { From 4be84b8a0a17a2fb71d33818c132b1d46dfffea1 Mon Sep 17 00:00:00 2001 From: Laurence Jones Date: Wed, 13 Nov 2024 14:15:01 +0000 Subject: [PATCH 346/581] enhance: add opensuse leap 15 vagrant (#3329) --- test/ansible/vagrant/opensuse-leap-15/Vagrantfile | 10 ++++++++++ test/ansible/vagrant/opensuse-leap-15/skip | 9 +++++++++ 2 files changed, 19 insertions(+) create mode 100644 test/ansible/vagrant/opensuse-leap-15/Vagrantfile create mode 100644 test/ansible/vagrant/opensuse-leap-15/skip diff --git a/test/ansible/vagrant/opensuse-leap-15/Vagrantfile b/test/ansible/vagrant/opensuse-leap-15/Vagrantfile new file mode 100644 index 00000000000..d10e68a50a7 --- /dev/null +++ b/test/ansible/vagrant/opensuse-leap-15/Vagrantfile @@ -0,0 +1,10 @@ +# frozen_string_literal: true + +Vagrant.configure('2') do |config| + config.vm.box = "opensuse/Leap-15.6.x86_64" + config.vm.provision "shell", inline: <<-SHELL + SHELL +end + +common = '../common' +load common if File.exist?(common) diff --git a/test/ansible/vagrant/opensuse-leap-15/skip b/test/ansible/vagrant/opensuse-leap-15/skip new file mode 100644 index 00000000000..4f1a9063d2b --- /dev/null +++ b/test/ansible/vagrant/opensuse-leap-15/skip @@ -0,0 +1,9 @@ +#!/bin/sh + +die() { + echo "$@" >&2 + exit 1 +} + +[ "${DB_BACKEND}" = "mysql" ] && die "mysql role does not support this distribution" +exit 0 From b3810c7e754d618bfa8eed64d3d7c853f760484d Mon Sep 17 00:00:00 2001 From: Laurence Jones Date: Wed, 13 Nov 2024 14:17:15 +0000 Subject: [PATCH 347/581] enhance: add fedora 41 (40, with provision to upgrade to 41 since there no image so far on vagrant cloud) (#3328) --- test/ansible/vagrant/fedora-41/Vagrantfile | 13 +++++++++++++ test/ansible/vagrant/fedora-41/skip | 9 +++++++++ 2 files changed, 22 insertions(+) create mode 100644 test/ansible/vagrant/fedora-41/Vagrantfile create mode 100644 test/ansible/vagrant/fedora-41/skip diff --git a/test/ansible/vagrant/fedora-41/Vagrantfile b/test/ansible/vagrant/fedora-41/Vagrantfile new file mode 100644 index 00000000000..3f905f51671 --- /dev/null +++ b/test/ansible/vagrant/fedora-41/Vagrantfile @@ -0,0 +1,13 @@ +# frozen_string_literal: true + +Vagrant.configure('2') do |config| + config.vm.box = "fedora/40-cloud-base" + config.vm.provision "shell", inline: <<-SHELL + SHELL + config.vm.provision "shell" do |s| + s.inline = "sudo dnf upgrade --refresh -y && sudo dnf install dnf-plugin-system-upgrade -y && sudo dnf system-upgrade download --releasever=41 -y && sudo dnf system-upgrade reboot -y" + end +end + +common = '../common' +load common if File.exist?(common) diff --git a/test/ansible/vagrant/fedora-41/skip b/test/ansible/vagrant/fedora-41/skip new file mode 100644 index 00000000000..4f1a9063d2b --- /dev/null +++ b/test/ansible/vagrant/fedora-41/skip @@ -0,0 +1,9 @@ +#!/bin/sh + +die() { + echo "$@" >&2 + exit 1 +} + +[ "${DB_BACKEND}" = "mysql" ] && die "mysql role does not support this distribution" +exit 0 From 4bd4e8dc297b77ee1c776b523a54d642ec1eecac Mon Sep 17 00:00:00 2001 From: Manuel Sabban Date: Wed, 13 Nov 2024 15:42:10 +0100 Subject: [PATCH 348/581] test for cron dependency for packaging (#3331) --- rpm/SPECS/crowdsec.spec | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/rpm/SPECS/crowdsec.spec b/rpm/SPECS/crowdsec.spec index ab71b650d11..ac438ad0c14 100644 --- a/rpm/SPECS/crowdsec.spec +++ b/rpm/SPECS/crowdsec.spec @@ -12,7 +12,7 @@ Patch0: user.patch BuildRoot: %{_tmppath}/%{name}-%{version}-%{release}-root-%(%{__id_u} -n) BuildRequires: systemd -Requires: crontabs +Requires: (crontabs or cron) %{?fc33:BuildRequires: systemd-rpm-macros} %{?fc34:BuildRequires: systemd-rpm-macros} %{?fc35:BuildRequires: systemd-rpm-macros} From b96a7a5f06e1177adb03a60e9f130dd1dac708d0 Mon Sep 17 00:00:00 2001 From: Laurence Jones Date: Fri, 15 Nov 2024 09:48:55 +0000 Subject: [PATCH 349/581] fix: Use clientIP when passing coraza (#3322) * fix: Use clientIP when passing coraza * chore: update tests to reflect what remoteAddr and ClientIP should represent * chore: Add a basic seclang rule test to appsec runner test suite * chore: make linter happy :) --- .../modules/appsec/appsec_rules_test.go | 119 +++++++++++++----- .../modules/appsec/appsec_runner.go | 2 +- pkg/acquisition/modules/appsec/appsec_test.go | 3 + 3 files changed, 92 insertions(+), 32 deletions(-) diff --git a/pkg/acquisition/modules/appsec/appsec_rules_test.go b/pkg/acquisition/modules/appsec/appsec_rules_test.go index 909f16357ed..1a52df31714 100644 --- a/pkg/acquisition/modules/appsec/appsec_rules_test.go +++ b/pkg/acquisition/modules/appsec/appsec_rules_test.go @@ -28,7 +28,8 @@ func TestAppsecRuleMatches(t *testing.T) { }, }, input_request: appsec.ParsedRequest{ - RemoteAddr: "1.2.3.4", + ClientIP: "1.2.3.4", + RemoteAddr: "127.0.0.1", Method: "GET", URI: "/urllll", Args: url.Values{"foo": []string{"toto"}}, @@ -59,7 +60,8 @@ func TestAppsecRuleMatches(t *testing.T) { }, }, input_request: appsec.ParsedRequest{ - RemoteAddr: "1.2.3.4", + ClientIP: "1.2.3.4", + RemoteAddr: "127.0.0.1", Method: "GET", URI: "/urllll", Args: url.Values{"foo": []string{"tutu"}}, @@ -84,7 +86,8 @@ func TestAppsecRuleMatches(t *testing.T) { }, }, input_request: appsec.ParsedRequest{ - RemoteAddr: "1.2.3.4", + ClientIP: "1.2.3.4", + RemoteAddr: "127.0.0.1", Method: "GET", URI: "/urllll", Args: url.Values{"foo": []string{"toto"}}, @@ -110,7 +113,8 @@ func TestAppsecRuleMatches(t *testing.T) { }, }, input_request: appsec.ParsedRequest{ - RemoteAddr: "1.2.3.4", + ClientIP: "1.2.3.4", + RemoteAddr: "127.0.0.1", Method: "GET", URI: "/urllll", Args: url.Values{"foo": []string{"toto"}}, @@ -136,7 +140,8 @@ func TestAppsecRuleMatches(t *testing.T) { }, }, input_request: appsec.ParsedRequest{ - RemoteAddr: "1.2.3.4", + ClientIP: "1.2.3.4", + RemoteAddr: "127.0.0.1", Method: "GET", URI: "/urllll", Args: url.Values{"foo": []string{"toto"}}, @@ -165,7 +170,8 @@ func TestAppsecRuleMatches(t *testing.T) { {Filter: "IsInBand == true", Apply: []string{"SetRemediation('captcha')"}}, }, input_request: appsec.ParsedRequest{ - RemoteAddr: "1.2.3.4", + ClientIP: "1.2.3.4", + RemoteAddr: "127.0.0.1", Method: "GET", URI: "/urllll", Args: url.Values{"foo": []string{"bla"}}, @@ -192,7 +198,8 @@ func TestAppsecRuleMatches(t *testing.T) { {Filter: "IsInBand == true", Apply: []string{"SetReturnCode(418)"}}, }, input_request: appsec.ParsedRequest{ - RemoteAddr: "1.2.3.4", + ClientIP: "1.2.3.4", + RemoteAddr: "127.0.0.1", Method: "GET", URI: "/urllll", Args: url.Values{"foo": []string{"bla"}}, @@ -219,7 +226,8 @@ func TestAppsecRuleMatches(t *testing.T) { {Filter: "IsInBand == true", Apply: []string{"SetRemediationByName('rule42', 'captcha')"}}, }, input_request: appsec.ParsedRequest{ - RemoteAddr: "1.2.3.4", + ClientIP: "1.2.3.4", + RemoteAddr: "127.0.0.1", Method: "GET", URI: "/urllll", Args: url.Values{"foo": []string{"bla"}}, @@ -243,7 +251,8 @@ func TestAppsecRuleMatches(t *testing.T) { }, }, input_request: appsec.ParsedRequest{ - RemoteAddr: "1.2.3.4", + ClientIP: "1.2.3.4", + RemoteAddr: "127.0.0.1", Method: "GET", URI: "/urllll", Headers: http.Header{"Cookie": []string{"foo=toto"}}, @@ -273,7 +282,8 @@ func TestAppsecRuleMatches(t *testing.T) { }, }, input_request: appsec.ParsedRequest{ - RemoteAddr: "1.2.3.4", + ClientIP: "1.2.3.4", + RemoteAddr: "127.0.0.1", Method: "GET", URI: "/urllll", Headers: http.Header{"Cookie": []string{"foo=toto; bar=tutu"}}, @@ -303,7 +313,8 @@ func TestAppsecRuleMatches(t *testing.T) { }, }, input_request: appsec.ParsedRequest{ - RemoteAddr: "1.2.3.4", + ClientIP: "1.2.3.4", + RemoteAddr: "127.0.0.1", Method: "GET", URI: "/urllll", Headers: http.Header{"Cookie": []string{"bar=tutu; tututata=toto"}}, @@ -333,7 +344,8 @@ func TestAppsecRuleMatches(t *testing.T) { }, }, input_request: appsec.ParsedRequest{ - RemoteAddr: "1.2.3.4", + ClientIP: "1.2.3.4", + RemoteAddr: "127.0.0.1", Method: "GET", URI: "/urllll", Headers: http.Header{"Content-Type": []string{"multipart/form-data; boundary=boundary"}}, @@ -354,6 +366,32 @@ toto require.Len(t, events[1].Appsec.MatchedRules, 1) require.Equal(t, "rule1", events[1].Appsec.MatchedRules[0]["msg"]) + require.Len(t, responses, 1) + require.True(t, responses[0].InBandInterrupt) + }, + }, + { + name: "Basic matching IP address", + expected_load_ok: true, + seclang_rules: []string{ + "SecRule REMOTE_ADDR \"@ipMatch 1.2.3.4\" \"id:1,phase:1,log,deny,msg: 'block ip'\"", + }, + input_request: appsec.ParsedRequest{ + ClientIP: "1.2.3.4", + RemoteAddr: "127.0.0.1", + Method: "GET", + URI: "/urllll", + Headers: http.Header{"Content-Type": []string{"multipart/form-data; boundary=boundary"}}, + }, + output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { + require.Len(t, events, 2) + require.Equal(t, types.APPSEC, events[0].Type) + + require.Equal(t, types.LOG, events[1].Type) + require.True(t, events[1].Appsec.HasInBandMatches) + require.Len(t, events[1].Appsec.MatchedRules, 1) + require.Equal(t, "block ip", events[1].Appsec.MatchedRules[0]["msg"]) + require.Len(t, responses, 1) require.True(t, responses[0].InBandInterrupt) }, @@ -381,7 +419,8 @@ func TestAppsecRuleTransforms(t *testing.T) { }, }, input_request: appsec.ParsedRequest{ - RemoteAddr: "1.2.3.4", + ClientIP: "1.2.3.4", + RemoteAddr: "127.0.0.1", Method: "GET", URI: "/toto", }, @@ -404,7 +443,8 @@ func TestAppsecRuleTransforms(t *testing.T) { }, }, input_request: appsec.ParsedRequest{ - RemoteAddr: "1.2.3.4", + ClientIP: "1.2.3.4", + RemoteAddr: "127.0.0.1", Method: "GET", URI: "/TOTO", }, @@ -427,7 +467,8 @@ func TestAppsecRuleTransforms(t *testing.T) { }, }, input_request: appsec.ParsedRequest{ - RemoteAddr: "1.2.3.4", + ClientIP: "1.2.3.4", + RemoteAddr: "127.0.0.1", Method: "GET", URI: "/toto", }, @@ -451,7 +492,8 @@ func TestAppsecRuleTransforms(t *testing.T) { }, }, input_request: appsec.ParsedRequest{ - RemoteAddr: "1.2.3.4", + ClientIP: "1.2.3.4", + RemoteAddr: "127.0.0.1", Method: "GET", URI: "/?foo=dG90bw", }, @@ -475,7 +517,8 @@ func TestAppsecRuleTransforms(t *testing.T) { }, }, input_request: appsec.ParsedRequest{ - RemoteAddr: "1.2.3.4", + ClientIP: "1.2.3.4", + RemoteAddr: "127.0.0.1", Method: "GET", URI: "/?foo=dG90bw===", }, @@ -499,7 +542,8 @@ func TestAppsecRuleTransforms(t *testing.T) { }, }, input_request: appsec.ParsedRequest{ - RemoteAddr: "1.2.3.4", + ClientIP: "1.2.3.4", + RemoteAddr: "127.0.0.1", Method: "GET", URI: "/?foo=toto", }, @@ -523,7 +567,8 @@ func TestAppsecRuleTransforms(t *testing.T) { }, }, input_request: appsec.ParsedRequest{ - RemoteAddr: "1.2.3.4", + ClientIP: "1.2.3.4", + RemoteAddr: "127.0.0.1", Method: "GET", URI: "/?foo=%42%42%2F%41", }, @@ -547,7 +592,8 @@ func TestAppsecRuleTransforms(t *testing.T) { }, }, input_request: appsec.ParsedRequest{ - RemoteAddr: "1.2.3.4", + ClientIP: "1.2.3.4", + RemoteAddr: "127.0.0.1", Method: "GET", URI: "/?foo=%20%20%42%42%2F%41%20%20", }, @@ -585,7 +631,8 @@ func TestAppsecRuleZones(t *testing.T) { }, }, input_request: appsec.ParsedRequest{ - RemoteAddr: "1.2.3.4", + ClientIP: "1.2.3.4", + RemoteAddr: "127.0.0.1", Method: "GET", URI: "/foobar?something=toto&foobar=smth", }, @@ -612,7 +659,8 @@ func TestAppsecRuleZones(t *testing.T) { }, }, input_request: appsec.ParsedRequest{ - RemoteAddr: "1.2.3.4", + ClientIP: "1.2.3.4", + RemoteAddr: "127.0.0.1", Method: "GET", URI: "/foobar?something=toto&foobar=smth", }, @@ -639,7 +687,8 @@ func TestAppsecRuleZones(t *testing.T) { }, }, input_request: appsec.ParsedRequest{ - RemoteAddr: "1.2.3.4", + ClientIP: "1.2.3.4", + RemoteAddr: "127.0.0.1", Method: "GET", URI: "/", Body: []byte("smth=toto&foobar=other"), @@ -668,7 +717,8 @@ func TestAppsecRuleZones(t *testing.T) { }, }, input_request: appsec.ParsedRequest{ - RemoteAddr: "1.2.3.4", + ClientIP: "1.2.3.4", + RemoteAddr: "127.0.0.1", Method: "GET", URI: "/", Body: []byte("smth=toto&foobar=other"), @@ -697,7 +747,8 @@ func TestAppsecRuleZones(t *testing.T) { }, }, input_request: appsec.ParsedRequest{ - RemoteAddr: "1.2.3.4", + ClientIP: "1.2.3.4", + RemoteAddr: "127.0.0.1", Method: "GET", URI: "/", Headers: http.Header{"foobar": []string{"toto"}}, @@ -725,7 +776,8 @@ func TestAppsecRuleZones(t *testing.T) { }, }, input_request: appsec.ParsedRequest{ - RemoteAddr: "1.2.3.4", + ClientIP: "1.2.3.4", + RemoteAddr: "127.0.0.1", Method: "GET", URI: "/", Headers: http.Header{"foobar": []string{"toto"}}, @@ -748,7 +800,8 @@ func TestAppsecRuleZones(t *testing.T) { }, }, input_request: appsec.ParsedRequest{ - RemoteAddr: "1.2.3.4", + ClientIP: "1.2.3.4", + RemoteAddr: "127.0.0.1", Method: "GET", URI: "/", }, @@ -770,7 +823,8 @@ func TestAppsecRuleZones(t *testing.T) { }, }, input_request: appsec.ParsedRequest{ - RemoteAddr: "1.2.3.4", + ClientIP: "1.2.3.4", + RemoteAddr: "127.0.0.1", Method: "GET", URI: "/", Proto: "HTTP/3.1", @@ -793,7 +847,8 @@ func TestAppsecRuleZones(t *testing.T) { }, }, input_request: appsec.ParsedRequest{ - RemoteAddr: "1.2.3.4", + ClientIP: "1.2.3.4", + RemoteAddr: "127.0.0.1", Method: "GET", URI: "/foobar", }, @@ -815,7 +870,8 @@ func TestAppsecRuleZones(t *testing.T) { }, }, input_request: appsec.ParsedRequest{ - RemoteAddr: "1.2.3.4", + ClientIP: "1.2.3.4", + RemoteAddr: "127.0.0.1", Method: "GET", URI: "/foobar?a=b", }, @@ -837,7 +893,8 @@ func TestAppsecRuleZones(t *testing.T) { }, }, input_request: appsec.ParsedRequest{ - RemoteAddr: "1.2.3.4", + ClientIP: "1.2.3.4", + RemoteAddr: "127.0.0.1", Method: "GET", URI: "/", Body: []byte("foobar=42421"), diff --git a/pkg/acquisition/modules/appsec/appsec_runner.go b/pkg/acquisition/modules/appsec/appsec_runner.go index 90d23f63543..9a2c58e8336 100644 --- a/pkg/acquisition/modules/appsec/appsec_runner.go +++ b/pkg/acquisition/modules/appsec/appsec_runner.go @@ -135,7 +135,7 @@ func (r *AppsecRunner) processRequest(tx appsec.ExtendedTransaction, request *ap //FIXME: should we abort here ? } - request.Tx.ProcessConnection(request.RemoteAddr, 0, "", 0) + request.Tx.ProcessConnection(request.ClientIP, 0, "", 0) for k, v := range request.Args { for _, vv := range v { diff --git a/pkg/acquisition/modules/appsec/appsec_test.go b/pkg/acquisition/modules/appsec/appsec_test.go index d2079b43726..85ac10746f2 100644 --- a/pkg/acquisition/modules/appsec/appsec_test.go +++ b/pkg/acquisition/modules/appsec/appsec_test.go @@ -18,6 +18,7 @@ type appsecRuleTest struct { expected_load_ok bool inband_rules []appsec_rule.CustomRule outofband_rules []appsec_rule.CustomRule + seclang_rules []string on_load []appsec.Hook pre_eval []appsec.Hook post_eval []appsec.Hook @@ -61,6 +62,8 @@ func loadAppSecEngine(test appsecRuleTest, t *testing.T) { outofbandRules = append(outofbandRules, strRule) } + inbandRules = append(inbandRules, test.seclang_rules...) + appsecCfg := appsec.AppsecConfig{Logger: logger, OnLoad: test.on_load, PreEval: test.pre_eval, From a4497da6b9e8df76420dedb345715b24387bbcbc Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Fri, 15 Nov 2024 15:31:10 +0100 Subject: [PATCH 350/581] context propagation: papi, loki (#3308) * context propagation: AuthenticatedLAPIClient() * context propagation: papi * context propagation: loki --- cmd/crowdsec-cli/clipapi/papi.go | 2 +- cmd/crowdsec/crowdsec.go | 2 +- cmd/crowdsec/lapiclient.go | 4 ++-- .../loki/internal/lokiclient/loki_client.go | 8 +++---- pkg/apiserver/papi.go | 6 +++--- pkg/longpollclient/client.go | 21 ++++++++++--------- 6 files changed, 22 insertions(+), 21 deletions(-) diff --git a/cmd/crowdsec-cli/clipapi/papi.go b/cmd/crowdsec-cli/clipapi/papi.go index 461215c3a39..7ac2455d28f 100644 --- a/cmd/crowdsec-cli/clipapi/papi.go +++ b/cmd/crowdsec-cli/clipapi/papi.go @@ -136,7 +136,7 @@ func (cli *cliPapi) sync(ctx context.Context, out io.Writer, db *database.Client t.Go(papi.SyncDecisions) - err = papi.PullOnce(time.Time{}, true) + err = papi.PullOnce(ctx, time.Time{}, true) if err != nil { return fmt.Errorf("unable to sync decisions: %w", err) } diff --git a/cmd/crowdsec/crowdsec.go b/cmd/crowdsec/crowdsec.go index c44d71d2093..db93992605d 100644 --- a/cmd/crowdsec/crowdsec.go +++ b/cmd/crowdsec/crowdsec.go @@ -116,7 +116,7 @@ func runCrowdsec(cConfig *csconfig.Config, parsers *parser.Parsers, hub *cwhub.H }) bucketWg.Wait() - apiClient, err := AuthenticatedLAPIClient(*cConfig.API.Client.Credentials, hub) + apiClient, err := AuthenticatedLAPIClient(context.TODO(), *cConfig.API.Client.Credentials, hub) if err != nil { return err } diff --git a/cmd/crowdsec/lapiclient.go b/cmd/crowdsec/lapiclient.go index eed517f9df9..6656ba6b4c2 100644 --- a/cmd/crowdsec/lapiclient.go +++ b/cmd/crowdsec/lapiclient.go @@ -14,7 +14,7 @@ import ( "github.com/crowdsecurity/crowdsec/pkg/models" ) -func AuthenticatedLAPIClient(credentials csconfig.ApiCredentialsCfg, hub *cwhub.Hub) (*apiclient.ApiClient, error) { +func AuthenticatedLAPIClient(ctx context.Context, credentials csconfig.ApiCredentialsCfg, hub *cwhub.Hub) (*apiclient.ApiClient, error) { apiURL, err := url.Parse(credentials.URL) if err != nil { return nil, fmt.Errorf("parsing api url ('%s'): %w", credentials.URL, err) @@ -44,7 +44,7 @@ func AuthenticatedLAPIClient(credentials csconfig.ApiCredentialsCfg, hub *cwhub. return nil, fmt.Errorf("new client api: %w", err) } - authResp, _, err := client.Auth.AuthenticateWatcher(context.Background(), models.WatcherAuthRequest{ + authResp, _, err := client.Auth.AuthenticateWatcher(ctx, models.WatcherAuthRequest{ MachineID: &credentials.Login, Password: &password, Scenarios: itemsForAPI, diff --git a/pkg/acquisition/modules/loki/internal/lokiclient/loki_client.go b/pkg/acquisition/modules/loki/internal/lokiclient/loki_client.go index 846e833abea..fce199c5708 100644 --- a/pkg/acquisition/modules/loki/internal/lokiclient/loki_client.go +++ b/pkg/acquisition/modules/loki/internal/lokiclient/loki_client.go @@ -119,7 +119,7 @@ func (lc *LokiClient) queryRange(ctx context.Context, uri string, c chan *LokiQu case <-lc.t.Dying(): return lc.t.Err() case <-ticker.C: - resp, err := lc.Get(uri) + resp, err := lc.Get(ctx, uri) if err != nil { if ok := lc.shouldRetry(); !ok { return fmt.Errorf("error querying range: %w", err) @@ -215,7 +215,7 @@ func (lc *LokiClient) Ready(ctx context.Context) error { return lc.t.Err() case <-tick.C: lc.Logger.Debug("Checking if Loki is ready") - resp, err := lc.Get(url) + resp, err := lc.Get(ctx, url) if err != nil { lc.Logger.Warnf("Error checking if Loki is ready: %s", err) continue @@ -300,8 +300,8 @@ func (lc *LokiClient) QueryRange(ctx context.Context, infinite bool) chan *LokiQ } // Create a wrapper for http.Get to be able to set headers and auth -func (lc *LokiClient) Get(url string) (*http.Response, error) { - request, err := http.NewRequest(http.MethodGet, url, nil) +func (lc *LokiClient) Get(ctx context.Context, url string) (*http.Response, error) { + request, err := http.NewRequestWithContext(ctx, http.MethodGet, url, nil) if err != nil { return nil, err } diff --git a/pkg/apiserver/papi.go b/pkg/apiserver/papi.go index 7dd6b346aa9..83ba13843b9 100644 --- a/pkg/apiserver/papi.go +++ b/pkg/apiserver/papi.go @@ -205,8 +205,8 @@ func reverse(s []longpollclient.Event) []longpollclient.Event { return a } -func (p *Papi) PullOnce(since time.Time, sync bool) error { - events, err := p.Client.PullOnce(since) +func (p *Papi) PullOnce(ctx context.Context, since time.Time, sync bool) error { + events, err := p.Client.PullOnce(ctx, since) if err != nil { return err } @@ -261,7 +261,7 @@ func (p *Papi) Pull(ctx context.Context) error { p.Logger.Infof("Starting PAPI pull (since:%s)", lastTimestamp) - for event := range p.Client.Start(lastTimestamp) { + for event := range p.Client.Start(ctx, lastTimestamp) { logger := p.Logger.WithField("request-id", event.RequestId) // update last timestamp in database newTime := time.Now().UTC() diff --git a/pkg/longpollclient/client.go b/pkg/longpollclient/client.go index 5a7af0bfa63..5c395185b20 100644 --- a/pkg/longpollclient/client.go +++ b/pkg/longpollclient/client.go @@ -1,6 +1,7 @@ package longpollclient import ( + "context" "encoding/json" "errors" "fmt" @@ -50,7 +51,7 @@ var errUnauthorized = errors.New("user is not authorized to use PAPI") const timeoutMessage = "no events before timeout" -func (c *LongPollClient) doQuery() (*http.Response, error) { +func (c *LongPollClient) doQuery(ctx context.Context) (*http.Response, error) { logger := c.logger.WithField("method", "doQuery") query := c.url.Query() query.Set("since_time", fmt.Sprintf("%d", c.since)) @@ -59,7 +60,7 @@ func (c *LongPollClient) doQuery() (*http.Response, error) { logger.Debugf("Query parameters: %s", c.url.RawQuery) - req, err := http.NewRequest(http.MethodGet, c.url.String(), nil) + req, err := http.NewRequestWithContext(ctx, http.MethodGet, c.url.String(), nil) if err != nil { logger.Errorf("failed to create request: %s", err) return nil, err @@ -73,10 +74,10 @@ func (c *LongPollClient) doQuery() (*http.Response, error) { return resp, nil } -func (c *LongPollClient) poll() error { +func (c *LongPollClient) poll(ctx context.Context) error { logger := c.logger.WithField("method", "poll") - resp, err := c.doQuery() + resp, err := c.doQuery(ctx) if err != nil { return err } @@ -146,7 +147,7 @@ func (c *LongPollClient) poll() error { } } -func (c *LongPollClient) pollEvents() error { +func (c *LongPollClient) pollEvents(ctx context.Context) error { for { select { case <-c.t.Dying(): @@ -154,7 +155,7 @@ func (c *LongPollClient) pollEvents() error { return nil default: c.logger.Debug("Polling PAPI") - err := c.poll() + err := c.poll(ctx) if err != nil { c.logger.Errorf("failed to poll: %s", err) if errors.Is(err, errUnauthorized) { @@ -168,12 +169,12 @@ func (c *LongPollClient) pollEvents() error { } } -func (c *LongPollClient) Start(since time.Time) chan Event { +func (c *LongPollClient) Start(ctx context.Context, since time.Time) chan Event { c.logger.Infof("starting polling client") c.c = make(chan Event) c.since = since.Unix() * 1000 c.timeout = "45" - c.t.Go(c.pollEvents) + c.t.Go(func() error {return c.pollEvents(ctx)}) return c.c } @@ -182,11 +183,11 @@ func (c *LongPollClient) Stop() error { return nil } -func (c *LongPollClient) PullOnce(since time.Time) ([]Event, error) { +func (c *LongPollClient) PullOnce(ctx context.Context, since time.Time) ([]Event, error) { c.logger.Debug("Pulling PAPI once") c.since = since.Unix() * 1000 c.timeout = "1" - resp, err := c.doQuery() + resp, err := c.doQuery(ctx) if err != nil { return nil, err } From 9067106ac08cd8f5888550b56371b1b315da3b6c Mon Sep 17 00:00:00 2001 From: "Thibault \"bui\" Koechlin" Date: Fri, 15 Nov 2024 16:40:48 +0100 Subject: [PATCH 351/581] Support multiple appsec configs (#3314) * support multiple appsec configs --- pkg/acquisition/modules/appsec/appsec.go | 20 ++- .../modules/appsec/appsec_rules_test.go | 2 +- .../modules/appsec/appsec_runner.go | 36 +++-- .../modules/appsec/appsec_runner_test.go | 139 ++++++++++++++++++ pkg/acquisition/modules/appsec/appsec_test.go | 15 +- pkg/appsec/appsec.go | 74 ++++++++-- 6 files changed, 262 insertions(+), 24 deletions(-) create mode 100644 pkg/acquisition/modules/appsec/appsec_runner_test.go diff --git a/pkg/acquisition/modules/appsec/appsec.go b/pkg/acquisition/modules/appsec/appsec.go index 86b5fbbac2d..2f7861b32ff 100644 --- a/pkg/acquisition/modules/appsec/appsec.go +++ b/pkg/acquisition/modules/appsec/appsec.go @@ -41,6 +41,7 @@ type AppsecSourceConfig struct { Path string `yaml:"path"` Routines int `yaml:"routines"` AppsecConfig string `yaml:"appsec_config"` + AppsecConfigs []string `yaml:"appsec_configs"` AppsecConfigPath string `yaml:"appsec_config_path"` AuthCacheDuration *time.Duration `yaml:"auth_cache_duration"` configuration.DataSourceCommonCfg `yaml:",inline"` @@ -121,10 +122,14 @@ func (w *AppsecSource) UnmarshalConfig(yamlConfig []byte) error { w.config.Routines = 1 } - if w.config.AppsecConfig == "" && w.config.AppsecConfigPath == "" { + if w.config.AppsecConfig == "" && w.config.AppsecConfigPath == "" && len(w.config.AppsecConfigs) == 0 { return errors.New("appsec_config or appsec_config_path must be set") } + if (w.config.AppsecConfig != "" || w.config.AppsecConfigPath != "") && len(w.config.AppsecConfigs) != 0 { + return errors.New("appsec_config and appsec_config_path are mutually exclusive with appsec_configs") + } + if w.config.Name == "" { if w.config.ListenSocket != "" && w.config.ListenAddr == "" { w.config.Name = w.config.ListenSocket @@ -175,6 +180,9 @@ func (w *AppsecSource) Configure(yamlConfig []byte, logger *log.Entry, MetricsLe w.InChan = make(chan appsec.ParsedRequest) appsecCfg := appsec.AppsecConfig{Logger: w.logger.WithField("component", "appsec_config")} + //we keep the datasource name + appsecCfg.Name = w.config.Name + // let's load the associated appsec_config: if w.config.AppsecConfigPath != "" { err := appsecCfg.LoadByPath(w.config.AppsecConfigPath) @@ -186,10 +194,20 @@ func (w *AppsecSource) Configure(yamlConfig []byte, logger *log.Entry, MetricsLe if err != nil { return fmt.Errorf("unable to load appsec_config: %w", err) } + } else if len(w.config.AppsecConfigs) > 0 { + for _, appsecConfig := range w.config.AppsecConfigs { + err := appsecCfg.Load(appsecConfig) + if err != nil { + return fmt.Errorf("unable to load appsec_config: %w", err) + } + } } else { return errors.New("no appsec_config provided") } + // Now we can set up the logger + appsecCfg.SetUpLogger() + w.AppsecRuntime, err = appsecCfg.Build() if err != nil { return fmt.Errorf("unable to build appsec_config: %w", err) diff --git a/pkg/acquisition/modules/appsec/appsec_rules_test.go b/pkg/acquisition/modules/appsec/appsec_rules_test.go index 1a52df31714..00093c5a5ad 100644 --- a/pkg/acquisition/modules/appsec/appsec_rules_test.go +++ b/pkg/acquisition/modules/appsec/appsec_rules_test.go @@ -373,7 +373,7 @@ toto { name: "Basic matching IP address", expected_load_ok: true, - seclang_rules: []string{ + inband_native_rules: []string{ "SecRule REMOTE_ADDR \"@ipMatch 1.2.3.4\" \"id:1,phase:1,log,deny,msg: 'block ip'\"", }, input_request: appsec.ParsedRequest{ diff --git a/pkg/acquisition/modules/appsec/appsec_runner.go b/pkg/acquisition/modules/appsec/appsec_runner.go index 9a2c58e8336..7ce43779591 100644 --- a/pkg/acquisition/modules/appsec/appsec_runner.go +++ b/pkg/acquisition/modules/appsec/appsec_runner.go @@ -4,6 +4,7 @@ import ( "fmt" "os" "slices" + "strings" "time" "github.com/prometheus/client_golang/prometheus" @@ -31,23 +32,38 @@ type AppsecRunner struct { logger *log.Entry } +func (r *AppsecRunner) MergeDedupRules(collections []appsec.AppsecCollection, logger *log.Entry) string { + var rulesArr []string + dedupRules := make(map[string]struct{}) + + for _, collection := range collections { + for _, rule := range collection.Rules { + if _, ok := dedupRules[rule]; !ok { + rulesArr = append(rulesArr, rule) + dedupRules[rule] = struct{}{} + } else { + logger.Debugf("Discarding duplicate rule : %s", rule) + } + } + } + if len(rulesArr) != len(dedupRules) { + logger.Warningf("%d rules were discarded as they were duplicates", len(rulesArr)-len(dedupRules)) + } + + return strings.Join(rulesArr, "\n") +} + func (r *AppsecRunner) Init(datadir string) error { var err error fs := os.DirFS(datadir) - inBandRules := "" - outOfBandRules := "" - - for _, collection := range r.AppsecRuntime.InBandRules { - inBandRules += collection.String() - } - - for _, collection := range r.AppsecRuntime.OutOfBandRules { - outOfBandRules += collection.String() - } inBandLogger := r.logger.Dup().WithField("band", "inband") outBandLogger := r.logger.Dup().WithField("band", "outband") + //While loading rules, we dedup rules based on their content, while keeping the order + inBandRules := r.MergeDedupRules(r.AppsecRuntime.InBandRules, inBandLogger) + outOfBandRules := r.MergeDedupRules(r.AppsecRuntime.OutOfBandRules, outBandLogger) + //setting up inband engine inbandCfg := coraza.NewWAFConfig().WithDirectives(inBandRules).WithRootFS(fs).WithDebugLogger(appsec.NewCrzLogger(inBandLogger)) if !r.AppsecRuntime.Config.InbandOptions.DisableBodyInspection { diff --git a/pkg/acquisition/modules/appsec/appsec_runner_test.go b/pkg/acquisition/modules/appsec/appsec_runner_test.go new file mode 100644 index 00000000000..2027cf1d2c0 --- /dev/null +++ b/pkg/acquisition/modules/appsec/appsec_runner_test.go @@ -0,0 +1,139 @@ +package appsecacquisition + +import ( + "testing" + + "github.com/crowdsecurity/crowdsec/pkg/appsec/appsec_rule" + log "github.com/sirupsen/logrus" + "github.com/stretchr/testify/require" +) + +func TestAppsecRuleLoad(t *testing.T) { + log.SetLevel(log.TraceLevel) + tests := []appsecRuleTest{ + { + name: "simple rule load", + expected_load_ok: true, + inband_rules: []appsec_rule.CustomRule{ + { + Name: "rule1", + Zones: []string{"ARGS"}, + Match: appsec_rule.Match{Type: "equals", Value: "toto"}, + }, + }, + afterload_asserts: func(runner AppsecRunner) { + require.Len(t, runner.AppsecInbandEngine.GetRuleGroup().GetRules(), 1) + }, + }, + { + name: "simple native rule load", + expected_load_ok: true, + inband_native_rules: []string{ + `Secrule REQUEST_HEADERS:Content-Type "@rx ^application/x-www-form-urlencoded" "id:100,phase:1,pass,nolog,noauditlog,ctl:requestBodyProcessor=URLENCODED"`, + }, + afterload_asserts: func(runner AppsecRunner) { + require.Len(t, runner.AppsecInbandEngine.GetRuleGroup().GetRules(), 1) + }, + }, + { + name: "simple native rule load (2)", + expected_load_ok: true, + inband_native_rules: []string{ + `Secrule REQUEST_HEADERS:Content-Type "@rx ^application/x-www-form-urlencoded" "id:100,phase:1,pass,nolog,noauditlog,ctl:requestBodyProcessor=URLENCODED"`, + `Secrule REQUEST_HEADERS:Content-Type "@rx ^multipart/form-data" "id:101,phase:1,pass,nolog,noauditlog,ctl:requestBodyProcessor=MULTIPART"`, + }, + afterload_asserts: func(runner AppsecRunner) { + require.Len(t, runner.AppsecInbandEngine.GetRuleGroup().GetRules(), 2) + }, + }, + { + name: "simple native rule load + dedup", + expected_load_ok: true, + inband_native_rules: []string{ + `Secrule REQUEST_HEADERS:Content-Type "@rx ^application/x-www-form-urlencoded" "id:100,phase:1,pass,nolog,noauditlog,ctl:requestBodyProcessor=URLENCODED"`, + `Secrule REQUEST_HEADERS:Content-Type "@rx ^multipart/form-data" "id:101,phase:1,pass,nolog,noauditlog,ctl:requestBodyProcessor=MULTIPART"`, + `Secrule REQUEST_HEADERS:Content-Type "@rx ^application/x-www-form-urlencoded" "id:100,phase:1,pass,nolog,noauditlog,ctl:requestBodyProcessor=URLENCODED"`, + }, + afterload_asserts: func(runner AppsecRunner) { + require.Len(t, runner.AppsecInbandEngine.GetRuleGroup().GetRules(), 2) + }, + }, + { + name: "multi simple rule load", + expected_load_ok: true, + inband_rules: []appsec_rule.CustomRule{ + { + Name: "rule1", + Zones: []string{"ARGS"}, + Match: appsec_rule.Match{Type: "equals", Value: "toto"}, + }, + { + Name: "rule2", + Zones: []string{"ARGS"}, + Match: appsec_rule.Match{Type: "equals", Value: "toto"}, + }, + }, + afterload_asserts: func(runner AppsecRunner) { + require.Len(t, runner.AppsecInbandEngine.GetRuleGroup().GetRules(), 2) + }, + }, + { + name: "multi simple rule load", + expected_load_ok: true, + inband_rules: []appsec_rule.CustomRule{ + { + Name: "rule1", + Zones: []string{"ARGS"}, + Match: appsec_rule.Match{Type: "equals", Value: "toto"}, + }, + { + Name: "rule2", + Zones: []string{"ARGS"}, + Match: appsec_rule.Match{Type: "equals", Value: "toto"}, + }, + }, + afterload_asserts: func(runner AppsecRunner) { + require.Len(t, runner.AppsecInbandEngine.GetRuleGroup().GetRules(), 2) + }, + }, + { + name: "imbricated rule load", + expected_load_ok: true, + inband_rules: []appsec_rule.CustomRule{ + { + Name: "rule1", + + Or: []appsec_rule.CustomRule{ + { + //Name: "rule1", + Zones: []string{"ARGS"}, + Match: appsec_rule.Match{Type: "equals", Value: "toto"}, + }, + { + //Name: "rule1", + Zones: []string{"ARGS"}, + Match: appsec_rule.Match{Type: "equals", Value: "tutu"}, + }, + { + //Name: "rule1", + Zones: []string{"ARGS"}, + Match: appsec_rule.Match{Type: "equals", Value: "tata"}, + }, { + //Name: "rule1", + Zones: []string{"ARGS"}, + Match: appsec_rule.Match{Type: "equals", Value: "titi"}, + }, + }, + }, + }, + afterload_asserts: func(runner AppsecRunner) { + require.Len(t, runner.AppsecInbandEngine.GetRuleGroup().GetRules(), 4) + }, + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + loadAppSecEngine(test, t) + }) + } +} diff --git a/pkg/acquisition/modules/appsec/appsec_test.go b/pkg/acquisition/modules/appsec/appsec_test.go index 85ac10746f2..1534f5cb7fa 100644 --- a/pkg/acquisition/modules/appsec/appsec_test.go +++ b/pkg/acquisition/modules/appsec/appsec_test.go @@ -18,7 +18,8 @@ type appsecRuleTest struct { expected_load_ok bool inband_rules []appsec_rule.CustomRule outofband_rules []appsec_rule.CustomRule - seclang_rules []string + inband_native_rules []string + outofband_native_rules []string on_load []appsec.Hook pre_eval []appsec.Hook post_eval []appsec.Hook @@ -29,6 +30,7 @@ type appsecRuleTest struct { DefaultRemediation string DefaultPassAction string input_request appsec.ParsedRequest + afterload_asserts func(runner AppsecRunner) output_asserts func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) } @@ -54,6 +56,8 @@ func loadAppSecEngine(test appsecRuleTest, t *testing.T) { inbandRules = append(inbandRules, strRule) } + inbandRules = append(inbandRules, test.inband_native_rules...) + outofbandRules = append(outofbandRules, test.outofband_native_rules...) for ridx, rule := range test.outofband_rules { strRule, _, err := rule.Convert(appsec_rule.ModsecurityRuleType, rule.Name) if err != nil { @@ -62,8 +66,6 @@ func loadAppSecEngine(test appsecRuleTest, t *testing.T) { outofbandRules = append(outofbandRules, strRule) } - inbandRules = append(inbandRules, test.seclang_rules...) - appsecCfg := appsec.AppsecConfig{Logger: logger, OnLoad: test.on_load, PreEval: test.pre_eval, @@ -97,6 +99,13 @@ func loadAppSecEngine(test appsecRuleTest, t *testing.T) { t.Fatalf("unable to initialize runner : %s", err) } + if test.afterload_asserts != nil { + //afterload asserts are just to evaluate the state of the runner after the rules have been loaded + //if it's present, don't try to process requests + test.afterload_asserts(runner) + return + } + input := test.input_request input.ResponseChannel = make(chan appsec.AppsecTempResponse) OutputEvents := make([]types.Event, 0) diff --git a/pkg/appsec/appsec.go b/pkg/appsec/appsec.go index 30784b23db0..553db205b5d 100644 --- a/pkg/appsec/appsec.go +++ b/pkg/appsec/appsec.go @@ -1,7 +1,6 @@ package appsec import ( - "errors" "fmt" "net/http" "os" @@ -150,6 +149,18 @@ func (w *AppsecRuntimeConfig) ClearResponse() { w.Response.SendAlert = true } +func (wc *AppsecConfig) SetUpLogger() { + if wc.LogLevel == nil { + lvl := wc.Logger.Logger.GetLevel() + wc.LogLevel = &lvl + } + + /* wc.Name is actually the datasource name.*/ + wc.Logger = wc.Logger.Dup().WithField("name", wc.Name) + wc.Logger.Logger.SetLevel(*wc.LogLevel) + +} + func (wc *AppsecConfig) LoadByPath(file string) error { wc.Logger.Debugf("loading config %s", file) @@ -157,20 +168,65 @@ func (wc *AppsecConfig) LoadByPath(file string) error { if err != nil { return fmt.Errorf("unable to read file %s : %s", file, err) } - err = yaml.UnmarshalStrict(yamlFile, wc) + + //as LoadByPath can be called several time, we append rules/hooks, but override other options + var tmp AppsecConfig + + err = yaml.UnmarshalStrict(yamlFile, &tmp) if err != nil { return fmt.Errorf("unable to parse yaml file %s : %s", file, err) } - if wc.Name == "" { - return errors.New("name cannot be empty") + if wc.Name == "" && tmp.Name != "" { + wc.Name = tmp.Name } - if wc.LogLevel == nil { - lvl := wc.Logger.Logger.GetLevel() - wc.LogLevel = &lvl + + //We can append rules/hooks + if tmp.OutOfBandRules != nil { + wc.OutOfBandRules = append(wc.OutOfBandRules, tmp.OutOfBandRules...) } - wc.Logger = wc.Logger.Dup().WithField("name", wc.Name) - wc.Logger.Logger.SetLevel(*wc.LogLevel) + if tmp.InBandRules != nil { + wc.InBandRules = append(wc.InBandRules, tmp.InBandRules...) + } + if tmp.OnLoad != nil { + wc.OnLoad = append(wc.OnLoad, tmp.OnLoad...) + } + if tmp.PreEval != nil { + wc.PreEval = append(wc.PreEval, tmp.PreEval...) + } + if tmp.PostEval != nil { + wc.PostEval = append(wc.PostEval, tmp.PostEval...) + } + if tmp.OnMatch != nil { + wc.OnMatch = append(wc.OnMatch, tmp.OnMatch...) + } + if tmp.VariablesTracking != nil { + wc.VariablesTracking = append(wc.VariablesTracking, tmp.VariablesTracking...) + } + + //override other options + wc.LogLevel = tmp.LogLevel + + wc.DefaultRemediation = tmp.DefaultRemediation + wc.DefaultPassAction = tmp.DefaultPassAction + wc.BouncerBlockedHTTPCode = tmp.BouncerBlockedHTTPCode + wc.BouncerPassedHTTPCode = tmp.BouncerPassedHTTPCode + wc.UserBlockedHTTPCode = tmp.UserBlockedHTTPCode + wc.UserPassedHTTPCode = tmp.UserPassedHTTPCode + + if tmp.InbandOptions.DisableBodyInspection { + wc.InbandOptions.DisableBodyInspection = true + } + if tmp.InbandOptions.RequestBodyInMemoryLimit != nil { + wc.InbandOptions.RequestBodyInMemoryLimit = tmp.InbandOptions.RequestBodyInMemoryLimit + } + if tmp.OutOfBandOptions.DisableBodyInspection { + wc.OutOfBandOptions.DisableBodyInspection = true + } + if tmp.OutOfBandOptions.RequestBodyInMemoryLimit != nil { + wc.OutOfBandOptions.RequestBodyInMemoryLimit = tmp.OutOfBandOptions.RequestBodyInMemoryLimit + } + return nil } From c57dadce27f23e047961b14fe0f301e3253087b4 Mon Sep 17 00:00:00 2001 From: blotus Date: Mon, 18 Nov 2024 11:21:30 +0100 Subject: [PATCH 352/581] better handle error when sending usage metrics (#3333) --- pkg/apiserver/apic_metrics.go | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/pkg/apiserver/apic_metrics.go b/pkg/apiserver/apic_metrics.go index aa8db3f1c85..fe0dfd55821 100644 --- a/pkg/apiserver/apic_metrics.go +++ b/pkg/apiserver/apic_metrics.go @@ -368,10 +368,14 @@ func (a *apic) SendUsageMetrics(ctx context.Context) { if err != nil { log.Errorf("unable to send usage metrics: %s", err) - if resp == nil || resp.Response.StatusCode >= http.StatusBadRequest && resp.Response.StatusCode != http.StatusUnprocessableEntity { + if resp == nil || resp.Response == nil { + // Most likely a transient network error, it will be retried later + continue + } + + if resp.Response.StatusCode >= http.StatusBadRequest && resp.Response.StatusCode != http.StatusUnprocessableEntity { // In case of 422, mark the metrics as sent anyway, the API did not like what we sent, // and it's unlikely we'll be able to fix it - // also if resp is nil, we should'nt mark the metrics as sent could be network issue continue } } From 36e2c6c8bef2102e2cc00b724557f09cbea79d27 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Tue, 19 Nov 2024 11:02:37 +0100 Subject: [PATCH 353/581] make: improve re2/wasm check (#3335) --- Makefile | 57 +++++++++++++++++++++++++++++++++----------------------- 1 file changed, 34 insertions(+), 23 deletions(-) diff --git a/Makefile b/Makefile index 4b7f0b746fe..f8ae66e1cb6 100644 --- a/Makefile +++ b/Makefile @@ -22,7 +22,7 @@ BUILD_RE2_WASM ?= 0 # for your distribution (look for libre2.a). See the Dockerfile for an example of how to build it. BUILD_STATIC ?= 0 -# List of plugins to build +# List of notification plugins to build PLUGINS ?= $(patsubst ./cmd/notification-%,%,$(wildcard ./cmd/notification-*)) #-------------------------------------- @@ -86,6 +86,11 @@ export CGO_LDFLAGS_ALLOW=-Wl,--(push|pop)-state.* # this will be used by Go in the make target, some distributions require it export PKG_CONFIG_PATH:=/usr/local/lib/pkgconfig:$(PKG_CONFIG_PATH) +#-------------------------------------- +# +# Choose the re2 backend. +# + ifeq ($(call bool,$(BUILD_RE2_WASM)),0) ifeq ($(PKG_CONFIG),) $(error "pkg-config is not available. Please install pkg-config.") @@ -93,35 +98,28 @@ endif ifeq ($(RE2_CHECK),) RE2_FAIL := "libre2-dev is not installed, please install it or set BUILD_RE2_WASM=1 to use the WebAssembly version" +# if you prefer to build WASM instead of a critical error, comment out RE2_FAIL and uncomment RE2_MSG. +# RE2_MSG := Fallback to WebAssembly regexp library. To use the C++ version, make sure you have installed libre2-dev and pkg-config. else # += adds a space that we don't want GO_TAGS := $(GO_TAGS),re2_cgo LD_OPTS_VARS += -X '$(GO_MODULE_NAME)/pkg/cwversion.Libre2=C++' +RE2_MSG := Using C++ regexp library endif -endif - -# Build static to avoid the runtime dependency on libre2.so -ifeq ($(call bool,$(BUILD_STATIC)),1) -BUILD_TYPE = static -EXTLDFLAGS := -extldflags '-static' else -BUILD_TYPE = dynamic -EXTLDFLAGS := +RE2_MSG := Using WebAssembly regexp library endif -# Build with debug symbols, and disable optimizations + inlining, to use Delve -ifeq ($(call bool,$(DEBUG)),1) -STRIP_SYMBOLS := -DISABLE_OPTIMIZATION := -gcflags "-N -l" +ifeq ($(call bool,$(BUILD_RE2_WASM)),1) else -STRIP_SYMBOLS := -s -DISABLE_OPTIMIZATION := +ifneq (,$(RE2_CHECK)) +endif endif #-------------------------------------- - +# # Handle optional components and build profiles, to save space on the final binaries. - +# # Keep it safe for now until we decide how to expand on the idea. Either choose a profile or exclude components manually. # For example if we want to disable some component by default, or have opt-in components (INCLUDE?). @@ -182,6 +180,23 @@ endif #-------------------------------------- +ifeq ($(call bool,$(BUILD_STATIC)),1) +BUILD_TYPE = static +EXTLDFLAGS := -extldflags '-static' +else +BUILD_TYPE = dynamic +EXTLDFLAGS := +endif + +# Build with debug symbols, and disable optimizations + inlining, to use Delve +ifeq ($(call bool,$(DEBUG)),1) +STRIP_SYMBOLS := +DISABLE_OPTIMIZATION := -gcflags "-N -l" +else +STRIP_SYMBOLS := -s +DISABLE_OPTIMIZATION := +endif + export LD_OPTS=-ldflags "$(STRIP_SYMBOLS) $(EXTLDFLAGS) $(LD_OPTS_VARS)" \ -trimpath -tags $(GO_TAGS) $(DISABLE_OPTIMIZATION) @@ -197,17 +212,13 @@ build: build-info crowdsec cscli plugins ## Build crowdsec, cscli and plugins .PHONY: build-info build-info: ## Print build information $(info Building $(BUILD_VERSION) ($(BUILD_TAG)) $(BUILD_TYPE) for $(GOOS)/$(GOARCH)) - $(info Excluded components: $(EXCLUDE_LIST)) + $(info Excluded components: $(if $(EXCLUDE_LIST),$(EXCLUDE_LIST),none)) ifneq (,$(RE2_FAIL)) $(error $(RE2_FAIL)) endif -ifneq (,$(RE2_CHECK)) - $(info Using C++ regexp library) -else - $(info Fallback to WebAssembly regexp library. To use the C++ version, make sure you have installed libre2-dev and pkg-config.) -endif + $(info $(RE2_MSG)) ifeq ($(call bool,$(DEBUG)),1) $(info Building with debug symbols and disabled optimizations) From fb733ee43a4f0210dd65d32618dec00e5904ab6f Mon Sep 17 00:00:00 2001 From: blotus Date: Tue, 19 Nov 2024 14:47:59 +0100 Subject: [PATCH 354/581] Allow bouncers to share API keys (#3323) --- cmd/crowdsec-cli/clibouncer/add.go | 2 +- cmd/crowdsec-cli/clibouncer/bouncers.go | 2 + cmd/crowdsec-cli/clibouncer/delete.go | 62 ++++++++++++++++--- cmd/crowdsec-cli/clibouncer/inspect.go | 1 + pkg/apiserver/alerts_test.go | 3 + pkg/apiserver/api_key_test.go | 56 +++++++++++++++-- pkg/apiserver/apiserver_test.go | 16 ++++- pkg/apiserver/middlewares/v1/api_key.go | 80 +++++++++++++++++++------ pkg/database/bouncers.go | 19 +++++- pkg/database/ent/bouncer.go | 13 +++- pkg/database/ent/bouncer/bouncer.go | 10 ++++ pkg/database/ent/bouncer/where.go | 15 +++++ pkg/database/ent/bouncer_create.go | 25 ++++++++ pkg/database/ent/migrate/schema.go | 1 + pkg/database/ent/mutation.go | 56 ++++++++++++++++- pkg/database/ent/runtime.go | 4 ++ pkg/database/ent/schema/bouncer.go | 2 + test/bats/10_bouncers.bats | 55 ++++++++++++++++- test/lib/init/crowdsec-daemon | 6 +- 19 files changed, 389 insertions(+), 39 deletions(-) diff --git a/cmd/crowdsec-cli/clibouncer/add.go b/cmd/crowdsec-cli/clibouncer/add.go index 8c40507a996..7cc74e45fba 100644 --- a/cmd/crowdsec-cli/clibouncer/add.go +++ b/cmd/crowdsec-cli/clibouncer/add.go @@ -24,7 +24,7 @@ func (cli *cliBouncers) add(ctx context.Context, bouncerName string, key string) } } - _, err = cli.db.CreateBouncer(ctx, bouncerName, "", middlewares.HashSHA512(key), types.ApiKeyAuthType) + _, err = cli.db.CreateBouncer(ctx, bouncerName, "", middlewares.HashSHA512(key), types.ApiKeyAuthType, false) if err != nil { return fmt.Errorf("unable to create bouncer: %w", err) } diff --git a/cmd/crowdsec-cli/clibouncer/bouncers.go b/cmd/crowdsec-cli/clibouncer/bouncers.go index 876b613be53..2b0a3556873 100644 --- a/cmd/crowdsec-cli/clibouncer/bouncers.go +++ b/cmd/crowdsec-cli/clibouncer/bouncers.go @@ -77,6 +77,7 @@ type bouncerInfo struct { AuthType string `json:"auth_type"` OS string `json:"os,omitempty"` Featureflags []string `json:"featureflags,omitempty"` + AutoCreated bool `json:"auto_created"` } func newBouncerInfo(b *ent.Bouncer) bouncerInfo { @@ -92,6 +93,7 @@ func newBouncerInfo(b *ent.Bouncer) bouncerInfo { AuthType: b.AuthType, OS: clientinfo.GetOSNameAndVersion(b), Featureflags: clientinfo.GetFeatureFlagList(b), + AutoCreated: b.AutoCreated, } } diff --git a/cmd/crowdsec-cli/clibouncer/delete.go b/cmd/crowdsec-cli/clibouncer/delete.go index 6e2f312d4af..33419f483b6 100644 --- a/cmd/crowdsec-cli/clibouncer/delete.go +++ b/cmd/crowdsec-cli/clibouncer/delete.go @@ -4,25 +4,73 @@ import ( "context" "errors" "fmt" + "strings" log "github.com/sirupsen/logrus" "github.com/spf13/cobra" - "github.com/crowdsecurity/crowdsec/pkg/database" + "github.com/crowdsecurity/crowdsec/pkg/database/ent" + "github.com/crowdsecurity/crowdsec/pkg/types" ) +func (cli *cliBouncers) findParentBouncer(bouncerName string, bouncers []*ent.Bouncer) (string, error) { + bouncerPrefix := strings.Split(bouncerName, "@")[0] + for _, bouncer := range bouncers { + if strings.HasPrefix(bouncer.Name, bouncerPrefix) && !bouncer.AutoCreated { + return bouncer.Name, nil + } + } + + return "", errors.New("no parent bouncer found") +} + func (cli *cliBouncers) delete(ctx context.Context, bouncers []string, ignoreMissing bool) error { - for _, bouncerID := range bouncers { - if err := cli.db.DeleteBouncer(ctx, bouncerID); err != nil { - var notFoundErr *database.BouncerNotFoundError + allBouncers, err := cli.db.ListBouncers(ctx) + if err != nil { + return fmt.Errorf("unable to list bouncers: %w", err) + } + for _, bouncerName := range bouncers { + bouncer, err := cli.db.SelectBouncerByName(ctx, bouncerName) + if err != nil { + var notFoundErr *ent.NotFoundError if ignoreMissing && errors.As(err, ¬FoundErr) { - return nil + continue } + return fmt.Errorf("unable to delete bouncer %s: %w", bouncerName, err) + } + + // For TLS bouncers, always delete them, they have no parents + if bouncer.AuthType == types.TlsAuthType { + if err := cli.db.DeleteBouncer(ctx, bouncerName); err != nil { + return fmt.Errorf("unable to delete bouncer %s: %w", bouncerName, err) + } + continue + } + + if bouncer.AutoCreated { + parentBouncer, err := cli.findParentBouncer(bouncerName, allBouncers) + if err != nil { + log.Errorf("bouncer '%s' is auto-created, but couldn't find a parent bouncer", err) + continue + } + log.Warnf("bouncer '%s' is auto-created and cannot be deleted, delete parent bouncer %s instead", bouncerName, parentBouncer) + continue + } + //Try to find all child bouncers and delete them + for _, childBouncer := range allBouncers { + if strings.HasPrefix(childBouncer.Name, bouncerName+"@") && childBouncer.AutoCreated { + if err := cli.db.DeleteBouncer(ctx, childBouncer.Name); err != nil { + return fmt.Errorf("unable to delete bouncer %s: %w", childBouncer.Name, err) + } + log.Infof("bouncer '%s' deleted successfully", childBouncer.Name) + } + } - return fmt.Errorf("unable to delete bouncer: %w", err) + if err := cli.db.DeleteBouncer(ctx, bouncerName); err != nil { + return fmt.Errorf("unable to delete bouncer %s: %w", bouncerName, err) } - log.Infof("bouncer '%s' deleted successfully", bouncerID) + log.Infof("bouncer '%s' deleted successfully", bouncerName) } return nil diff --git a/cmd/crowdsec-cli/clibouncer/inspect.go b/cmd/crowdsec-cli/clibouncer/inspect.go index 6dac386b888..b62344baa9b 100644 --- a/cmd/crowdsec-cli/clibouncer/inspect.go +++ b/cmd/crowdsec-cli/clibouncer/inspect.go @@ -40,6 +40,7 @@ func (cli *cliBouncers) inspectHuman(out io.Writer, bouncer *ent.Bouncer) { {"Last Pull", lastPull}, {"Auth type", bouncer.AuthType}, {"OS", clientinfo.GetOSNameAndVersion(bouncer)}, + {"Auto Created", bouncer.AutoCreated}, }) for _, ff := range clientinfo.GetFeatureFlagList(bouncer) { diff --git a/pkg/apiserver/alerts_test.go b/pkg/apiserver/alerts_test.go index 4cc215c344f..d86234e4813 100644 --- a/pkg/apiserver/alerts_test.go +++ b/pkg/apiserver/alerts_test.go @@ -59,6 +59,9 @@ func (l *LAPI) RecordResponse(t *testing.T, ctx context.Context, verb string, ur t.Fatal("auth type not supported") } + // Port is required for gin to properly parse the client IP + req.RemoteAddr = "127.0.0.1:1234" + l.router.ServeHTTP(w, req) return w diff --git a/pkg/apiserver/api_key_test.go b/pkg/apiserver/api_key_test.go index e6ed68a6e0d..45c02c806e7 100644 --- a/pkg/apiserver/api_key_test.go +++ b/pkg/apiserver/api_key_test.go @@ -20,28 +20,74 @@ func TestAPIKey(t *testing.T) { w := httptest.NewRecorder() req, _ := http.NewRequestWithContext(ctx, http.MethodGet, "/v1/decisions", strings.NewReader("")) req.Header.Add("User-Agent", UserAgent) + req.RemoteAddr = "127.0.0.1:1234" router.ServeHTTP(w, req) - assert.Equal(t, 403, w.Code) - assert.Equal(t, `{"message":"access forbidden"}`, w.Body.String()) + assert.Equal(t, http.StatusForbidden, w.Code) + assert.JSONEq(t, `{"message":"access forbidden"}`, w.Body.String()) // Login with invalid token w = httptest.NewRecorder() req, _ = http.NewRequestWithContext(ctx, http.MethodGet, "/v1/decisions", strings.NewReader("")) req.Header.Add("User-Agent", UserAgent) req.Header.Add("X-Api-Key", "a1b2c3d4e5f6") + req.RemoteAddr = "127.0.0.1:1234" router.ServeHTTP(w, req) - assert.Equal(t, 403, w.Code) - assert.Equal(t, `{"message":"access forbidden"}`, w.Body.String()) + assert.Equal(t, http.StatusForbidden, w.Code) + assert.JSONEq(t, `{"message":"access forbidden"}`, w.Body.String()) // Login with valid token w = httptest.NewRecorder() req, _ = http.NewRequestWithContext(ctx, http.MethodGet, "/v1/decisions", strings.NewReader("")) req.Header.Add("User-Agent", UserAgent) req.Header.Add("X-Api-Key", APIKey) + req.RemoteAddr = "127.0.0.1:1234" router.ServeHTTP(w, req) - assert.Equal(t, 200, w.Code) + assert.Equal(t, http.StatusOK, w.Code) assert.Equal(t, "null", w.Body.String()) + + // Login with valid token from another IP + w = httptest.NewRecorder() + req, _ = http.NewRequestWithContext(ctx, http.MethodGet, "/v1/decisions", strings.NewReader("")) + req.Header.Add("User-Agent", UserAgent) + req.Header.Add("X-Api-Key", APIKey) + req.RemoteAddr = "4.3.2.1:1234" + router.ServeHTTP(w, req) + + assert.Equal(t, http.StatusOK, w.Code) + assert.Equal(t, "null", w.Body.String()) + + // Make the requests multiple times to make sure we only create one + w = httptest.NewRecorder() + req, _ = http.NewRequestWithContext(ctx, http.MethodGet, "/v1/decisions", strings.NewReader("")) + req.Header.Add("User-Agent", UserAgent) + req.Header.Add("X-Api-Key", APIKey) + req.RemoteAddr = "4.3.2.1:1234" + router.ServeHTTP(w, req) + + assert.Equal(t, http.StatusOK, w.Code) + assert.Equal(t, "null", w.Body.String()) + + // Use the original bouncer again + w = httptest.NewRecorder() + req, _ = http.NewRequestWithContext(ctx, http.MethodGet, "/v1/decisions", strings.NewReader("")) + req.Header.Add("User-Agent", UserAgent) + req.Header.Add("X-Api-Key", APIKey) + req.RemoteAddr = "127.0.0.1:1234" + router.ServeHTTP(w, req) + + assert.Equal(t, http.StatusOK, w.Code) + assert.Equal(t, "null", w.Body.String()) + + // Check if our second bouncer was properly created + bouncers := GetBouncers(t, config.API.Server.DbConfig) + + assert.Len(t, bouncers, 2) + assert.Equal(t, "test@4.3.2.1", bouncers[1].Name) + assert.Equal(t, bouncers[0].APIKey, bouncers[1].APIKey) + assert.Equal(t, bouncers[0].AuthType, bouncers[1].AuthType) + assert.False(t, bouncers[0].AutoCreated) + assert.True(t, bouncers[1].AutoCreated) } diff --git a/pkg/apiserver/apiserver_test.go b/pkg/apiserver/apiserver_test.go index cdf99462c35..cf4c91dedda 100644 --- a/pkg/apiserver/apiserver_test.go +++ b/pkg/apiserver/apiserver_test.go @@ -24,6 +24,7 @@ import ( middlewares "github.com/crowdsecurity/crowdsec/pkg/apiserver/middlewares/v1" "github.com/crowdsecurity/crowdsec/pkg/csconfig" "github.com/crowdsecurity/crowdsec/pkg/database" + "github.com/crowdsecurity/crowdsec/pkg/database/ent" "github.com/crowdsecurity/crowdsec/pkg/models" "github.com/crowdsecurity/crowdsec/pkg/types" ) @@ -62,6 +63,7 @@ func LoadTestConfig(t *testing.T) csconfig.Config { } apiServerConfig := csconfig.LocalApiServerCfg{ ListenURI: "http://127.0.0.1:8080", + LogLevel: ptr.Of(log.DebugLevel), DbConfig: &dbconfig, ProfilesPath: "./tests/profiles.yaml", ConsoleConfig: &csconfig.ConsoleConfig{ @@ -206,6 +208,18 @@ func GetMachineIP(t *testing.T, machineID string, config *csconfig.DatabaseCfg) return "" } +func GetBouncers(t *testing.T, config *csconfig.DatabaseCfg) []*ent.Bouncer { + ctx := context.Background() + + dbClient, err := database.NewClient(ctx, config) + require.NoError(t, err) + + bouncers, err := dbClient.ListBouncers(ctx) + require.NoError(t, err) + + return bouncers +} + func GetAlertReaderFromFile(t *testing.T, path string) *strings.Reader { alertContentBytes, err := os.ReadFile(path) require.NoError(t, err) @@ -290,7 +304,7 @@ func CreateTestBouncer(t *testing.T, ctx context.Context, config *csconfig.Datab apiKey, err := middlewares.GenerateAPIKey(keyLength) require.NoError(t, err) - _, err = dbClient.CreateBouncer(ctx, "test", "127.0.0.1", middlewares.HashSHA512(apiKey), types.ApiKeyAuthType) + _, err = dbClient.CreateBouncer(ctx, "test", "127.0.0.1", middlewares.HashSHA512(apiKey), types.ApiKeyAuthType, false) require.NoError(t, err) return apiKey diff --git a/pkg/apiserver/middlewares/v1/api_key.go b/pkg/apiserver/middlewares/v1/api_key.go index d438c9b15a4..3c154be4fab 100644 --- a/pkg/apiserver/middlewares/v1/api_key.go +++ b/pkg/apiserver/middlewares/v1/api_key.go @@ -89,7 +89,7 @@ func (a *APIKey) authTLS(c *gin.Context, logger *log.Entry) *ent.Bouncer { logger.Infof("Creating bouncer %s", bouncerName) - bouncer, err = a.DbClient.CreateBouncer(ctx, bouncerName, c.ClientIP(), HashSHA512(apiKey), types.TlsAuthType) + bouncer, err = a.DbClient.CreateBouncer(ctx, bouncerName, c.ClientIP(), HashSHA512(apiKey), types.TlsAuthType, true) if err != nil { logger.Errorf("while creating bouncer db entry: %s", err) return nil @@ -114,18 +114,69 @@ func (a *APIKey) authPlain(c *gin.Context, logger *log.Entry) *ent.Bouncer { return nil } + clientIP := c.ClientIP() + ctx := c.Request.Context() hashStr := HashSHA512(val[0]) - bouncer, err := a.DbClient.SelectBouncer(ctx, hashStr) + // Appsec case, we only care if the key is valid + // No content is returned, no last_pull update or anything + if c.Request.Method == http.MethodHead { + bouncer, err := a.DbClient.SelectBouncers(ctx, hashStr, types.ApiKeyAuthType) + if err != nil { + logger.Errorf("while fetching bouncer info: %s", err) + return nil + } + return bouncer[0] + } + + // most common case, check if this specific bouncer exists + bouncer, err := a.DbClient.SelectBouncerWithIP(ctx, hashStr, clientIP) + if err != nil && !ent.IsNotFound(err) { + logger.Errorf("while fetching bouncer info: %s", err) + return nil + } + + // We found the bouncer with key and IP, we can use it + if bouncer != nil { + if bouncer.AuthType != types.ApiKeyAuthType { + logger.Errorf("bouncer isn't allowed to auth by API key") + return nil + } + return bouncer + } + + // We didn't find the bouncer with key and IP, let's try to find it with the key only + bouncers, err := a.DbClient.SelectBouncers(ctx, hashStr, types.ApiKeyAuthType) if err != nil { logger.Errorf("while fetching bouncer info: %s", err) return nil } - if bouncer.AuthType != types.ApiKeyAuthType { - logger.Errorf("bouncer %s attempted to login using an API key but it is configured to auth with %s", bouncer.Name, bouncer.AuthType) + if len(bouncers) == 0 { + logger.Debugf("no bouncer found with this key") + return nil + } + + logger.Debugf("found %d bouncers with this key", len(bouncers)) + + // We only have one bouncer with this key and no IP + // This is the first request made by this bouncer, keep this one + if len(bouncers) == 1 && bouncers[0].IPAddress == "" { + return bouncers[0] + } + + // Bouncers are ordered by ID, first one *should* be the manually created one + // Can probably get a bit weird if the user deletes the manually created one + bouncerName := fmt.Sprintf("%s@%s", bouncers[0].Name, clientIP) + + logger.Infof("Creating bouncer %s", bouncerName) + + bouncer, err = a.DbClient.CreateBouncer(ctx, bouncerName, clientIP, hashStr, types.ApiKeyAuthType, true) + + if err != nil { + logger.Errorf("while creating bouncer db entry: %s", err) return nil } @@ -156,27 +207,20 @@ func (a *APIKey) MiddlewareFunc() gin.HandlerFunc { return } - logger = logger.WithField("name", bouncer.Name) - - if bouncer.IPAddress == "" { - if err := a.DbClient.UpdateBouncerIP(ctx, clientIP, bouncer.ID); err != nil { - logger.Errorf("Failed to update ip address for '%s': %s\n", bouncer.Name, err) - c.JSON(http.StatusForbidden, gin.H{"message": "access forbidden"}) - c.Abort() - - return - } + // Appsec request, return immediately if we found something + if c.Request.Method == http.MethodHead { + c.Set(BouncerContextKey, bouncer) + return } - // Don't update IP on HEAD request, as it's used by the appsec to check the validity of the API key provided - if bouncer.IPAddress != clientIP && bouncer.IPAddress != "" && c.Request.Method != http.MethodHead { - log.Warningf("new IP address detected for bouncer '%s': %s (old: %s)", bouncer.Name, clientIP, bouncer.IPAddress) + logger = logger.WithField("name", bouncer.Name) + // 1st time we see this bouncer, we update its IP + if bouncer.IPAddress == "" { if err := a.DbClient.UpdateBouncerIP(ctx, clientIP, bouncer.ID); err != nil { logger.Errorf("Failed to update ip address for '%s': %s\n", bouncer.Name, err) c.JSON(http.StatusForbidden, gin.H{"message": "access forbidden"}) c.Abort() - return } } diff --git a/pkg/database/bouncers.go b/pkg/database/bouncers.go index 04ef830ae72..f9e62bc6522 100644 --- a/pkg/database/bouncers.go +++ b/pkg/database/bouncers.go @@ -41,8 +41,19 @@ func (c *Client) BouncerUpdateBaseMetrics(ctx context.Context, bouncerName strin return nil } -func (c *Client) SelectBouncer(ctx context.Context, apiKeyHash string) (*ent.Bouncer, error) { - result, err := c.Ent.Bouncer.Query().Where(bouncer.APIKeyEQ(apiKeyHash)).First(ctx) +func (c *Client) SelectBouncers(ctx context.Context, apiKeyHash string, authType string) ([]*ent.Bouncer, error) { + //Order by ID so manually created bouncer will be first in the list to use as the base name + //when automatically creating a new entry if API keys are shared + result, err := c.Ent.Bouncer.Query().Where(bouncer.APIKeyEQ(apiKeyHash), bouncer.AuthTypeEQ(authType)).Order(ent.Asc(bouncer.FieldID)).All(ctx) + if err != nil { + return nil, err + } + + return result, nil +} + +func (c *Client) SelectBouncerWithIP(ctx context.Context, apiKeyHash string, clientIP string) (*ent.Bouncer, error) { + result, err := c.Ent.Bouncer.Query().Where(bouncer.APIKeyEQ(apiKeyHash), bouncer.IPAddressEQ(clientIP)).First(ctx) if err != nil { return nil, err } @@ -68,13 +79,15 @@ func (c *Client) ListBouncers(ctx context.Context) ([]*ent.Bouncer, error) { return result, nil } -func (c *Client) CreateBouncer(ctx context.Context, name string, ipAddr string, apiKey string, authType string) (*ent.Bouncer, error) { +func (c *Client) CreateBouncer(ctx context.Context, name string, ipAddr string, apiKey string, authType string, autoCreated bool) (*ent.Bouncer, error) { bouncer, err := c.Ent.Bouncer. Create(). SetName(name). SetAPIKey(apiKey). SetRevoked(false). SetAuthType(authType). + SetIPAddress(ipAddr). + SetAutoCreated(autoCreated). Save(ctx) if err != nil { if ent.IsConstraintError(err) { diff --git a/pkg/database/ent/bouncer.go b/pkg/database/ent/bouncer.go index 3b4d619e384..197f61cde19 100644 --- a/pkg/database/ent/bouncer.go +++ b/pkg/database/ent/bouncer.go @@ -43,6 +43,8 @@ type Bouncer struct { Osversion string `json:"osversion,omitempty"` // Featureflags holds the value of the "featureflags" field. Featureflags string `json:"featureflags,omitempty"` + // AutoCreated holds the value of the "auto_created" field. + AutoCreated bool `json:"auto_created"` selectValues sql.SelectValues } @@ -51,7 +53,7 @@ func (*Bouncer) scanValues(columns []string) ([]any, error) { values := make([]any, len(columns)) for i := range columns { switch columns[i] { - case bouncer.FieldRevoked: + case bouncer.FieldRevoked, bouncer.FieldAutoCreated: values[i] = new(sql.NullBool) case bouncer.FieldID: values[i] = new(sql.NullInt64) @@ -159,6 +161,12 @@ func (b *Bouncer) assignValues(columns []string, values []any) error { } else if value.Valid { b.Featureflags = value.String } + case bouncer.FieldAutoCreated: + if value, ok := values[i].(*sql.NullBool); !ok { + return fmt.Errorf("unexpected type %T for field auto_created", values[i]) + } else if value.Valid { + b.AutoCreated = value.Bool + } default: b.selectValues.Set(columns[i], values[i]) } @@ -234,6 +242,9 @@ func (b *Bouncer) String() string { builder.WriteString(", ") builder.WriteString("featureflags=") builder.WriteString(b.Featureflags) + builder.WriteString(", ") + builder.WriteString("auto_created=") + builder.WriteString(fmt.Sprintf("%v", b.AutoCreated)) builder.WriteByte(')') return builder.String() } diff --git a/pkg/database/ent/bouncer/bouncer.go b/pkg/database/ent/bouncer/bouncer.go index a6f62aeadd5..f25b5a5815a 100644 --- a/pkg/database/ent/bouncer/bouncer.go +++ b/pkg/database/ent/bouncer/bouncer.go @@ -39,6 +39,8 @@ const ( FieldOsversion = "osversion" // FieldFeatureflags holds the string denoting the featureflags field in the database. FieldFeatureflags = "featureflags" + // FieldAutoCreated holds the string denoting the auto_created field in the database. + FieldAutoCreated = "auto_created" // Table holds the table name of the bouncer in the database. Table = "bouncers" ) @@ -59,6 +61,7 @@ var Columns = []string{ FieldOsname, FieldOsversion, FieldFeatureflags, + FieldAutoCreated, } // ValidColumn reports if the column name is valid (part of the table columns). @@ -82,6 +85,8 @@ var ( DefaultIPAddress string // DefaultAuthType holds the default value on creation for the "auth_type" field. DefaultAuthType string + // DefaultAutoCreated holds the default value on creation for the "auto_created" field. + DefaultAutoCreated bool ) // OrderOption defines the ordering options for the Bouncer queries. @@ -156,3 +161,8 @@ func ByOsversion(opts ...sql.OrderTermOption) OrderOption { func ByFeatureflags(opts ...sql.OrderTermOption) OrderOption { return sql.OrderByField(FieldFeatureflags, opts...).ToFunc() } + +// ByAutoCreated orders the results by the auto_created field. +func ByAutoCreated(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldAutoCreated, opts...).ToFunc() +} diff --git a/pkg/database/ent/bouncer/where.go b/pkg/database/ent/bouncer/where.go index e02199bc0a9..79b8999354f 100644 --- a/pkg/database/ent/bouncer/where.go +++ b/pkg/database/ent/bouncer/where.go @@ -119,6 +119,11 @@ func Featureflags(v string) predicate.Bouncer { return predicate.Bouncer(sql.FieldEQ(FieldFeatureflags, v)) } +// AutoCreated applies equality check predicate on the "auto_created" field. It's identical to AutoCreatedEQ. +func AutoCreated(v bool) predicate.Bouncer { + return predicate.Bouncer(sql.FieldEQ(FieldAutoCreated, v)) +} + // CreatedAtEQ applies the EQ predicate on the "created_at" field. func CreatedAtEQ(v time.Time) predicate.Bouncer { return predicate.Bouncer(sql.FieldEQ(FieldCreatedAt, v)) @@ -904,6 +909,16 @@ func FeatureflagsContainsFold(v string) predicate.Bouncer { return predicate.Bouncer(sql.FieldContainsFold(FieldFeatureflags, v)) } +// AutoCreatedEQ applies the EQ predicate on the "auto_created" field. +func AutoCreatedEQ(v bool) predicate.Bouncer { + return predicate.Bouncer(sql.FieldEQ(FieldAutoCreated, v)) +} + +// AutoCreatedNEQ applies the NEQ predicate on the "auto_created" field. +func AutoCreatedNEQ(v bool) predicate.Bouncer { + return predicate.Bouncer(sql.FieldNEQ(FieldAutoCreated, v)) +} + // And groups predicates with the AND operator between them. func And(predicates ...predicate.Bouncer) predicate.Bouncer { return predicate.Bouncer(sql.AndPredicates(predicates...)) diff --git a/pkg/database/ent/bouncer_create.go b/pkg/database/ent/bouncer_create.go index 29b23f87cf1..9ff4c0e0820 100644 --- a/pkg/database/ent/bouncer_create.go +++ b/pkg/database/ent/bouncer_create.go @@ -178,6 +178,20 @@ func (bc *BouncerCreate) SetNillableFeatureflags(s *string) *BouncerCreate { return bc } +// SetAutoCreated sets the "auto_created" field. +func (bc *BouncerCreate) SetAutoCreated(b bool) *BouncerCreate { + bc.mutation.SetAutoCreated(b) + return bc +} + +// SetNillableAutoCreated sets the "auto_created" field if the given value is not nil. +func (bc *BouncerCreate) SetNillableAutoCreated(b *bool) *BouncerCreate { + if b != nil { + bc.SetAutoCreated(*b) + } + return bc +} + // Mutation returns the BouncerMutation object of the builder. func (bc *BouncerCreate) Mutation() *BouncerMutation { return bc.mutation @@ -229,6 +243,10 @@ func (bc *BouncerCreate) defaults() { v := bouncer.DefaultAuthType bc.mutation.SetAuthType(v) } + if _, ok := bc.mutation.AutoCreated(); !ok { + v := bouncer.DefaultAutoCreated + bc.mutation.SetAutoCreated(v) + } } // check runs all checks and user-defined validators on the builder. @@ -251,6 +269,9 @@ func (bc *BouncerCreate) check() error { if _, ok := bc.mutation.AuthType(); !ok { return &ValidationError{Name: "auth_type", err: errors.New(`ent: missing required field "Bouncer.auth_type"`)} } + if _, ok := bc.mutation.AutoCreated(); !ok { + return &ValidationError{Name: "auto_created", err: errors.New(`ent: missing required field "Bouncer.auto_created"`)} + } return nil } @@ -329,6 +350,10 @@ func (bc *BouncerCreate) createSpec() (*Bouncer, *sqlgraph.CreateSpec) { _spec.SetField(bouncer.FieldFeatureflags, field.TypeString, value) _node.Featureflags = value } + if value, ok := bc.mutation.AutoCreated(); ok { + _spec.SetField(bouncer.FieldAutoCreated, field.TypeBool, value) + _node.AutoCreated = value + } return _node, _spec } diff --git a/pkg/database/ent/migrate/schema.go b/pkg/database/ent/migrate/schema.go index 986f5bc8c67..dae248c7f38 100644 --- a/pkg/database/ent/migrate/schema.go +++ b/pkg/database/ent/migrate/schema.go @@ -74,6 +74,7 @@ var ( {Name: "osname", Type: field.TypeString, Nullable: true}, {Name: "osversion", Type: field.TypeString, Nullable: true}, {Name: "featureflags", Type: field.TypeString, Nullable: true}, + {Name: "auto_created", Type: field.TypeBool, Default: false}, } // BouncersTable holds the schema information for the "bouncers" table. BouncersTable = &schema.Table{ diff --git a/pkg/database/ent/mutation.go b/pkg/database/ent/mutation.go index 5c6596f3db4..fa1ccb3da58 100644 --- a/pkg/database/ent/mutation.go +++ b/pkg/database/ent/mutation.go @@ -2471,6 +2471,7 @@ type BouncerMutation struct { osname *string osversion *string featureflags *string + auto_created *bool clearedFields map[string]struct{} done bool oldValue func(context.Context) (*Bouncer, error) @@ -3134,6 +3135,42 @@ func (m *BouncerMutation) ResetFeatureflags() { delete(m.clearedFields, bouncer.FieldFeatureflags) } +// SetAutoCreated sets the "auto_created" field. +func (m *BouncerMutation) SetAutoCreated(b bool) { + m.auto_created = &b +} + +// AutoCreated returns the value of the "auto_created" field in the mutation. +func (m *BouncerMutation) AutoCreated() (r bool, exists bool) { + v := m.auto_created + if v == nil { + return + } + return *v, true +} + +// OldAutoCreated returns the old "auto_created" field's value of the Bouncer entity. +// If the Bouncer object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *BouncerMutation) OldAutoCreated(ctx context.Context) (v bool, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldAutoCreated is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldAutoCreated requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldAutoCreated: %w", err) + } + return oldValue.AutoCreated, nil +} + +// ResetAutoCreated resets all changes to the "auto_created" field. +func (m *BouncerMutation) ResetAutoCreated() { + m.auto_created = nil +} + // Where appends a list predicates to the BouncerMutation builder. func (m *BouncerMutation) Where(ps ...predicate.Bouncer) { m.predicates = append(m.predicates, ps...) @@ -3168,7 +3205,7 @@ func (m *BouncerMutation) Type() string { // order to get all numeric fields that were incremented/decremented, call // AddedFields(). func (m *BouncerMutation) Fields() []string { - fields := make([]string, 0, 13) + fields := make([]string, 0, 14) if m.created_at != nil { fields = append(fields, bouncer.FieldCreatedAt) } @@ -3208,6 +3245,9 @@ func (m *BouncerMutation) Fields() []string { if m.featureflags != nil { fields = append(fields, bouncer.FieldFeatureflags) } + if m.auto_created != nil { + fields = append(fields, bouncer.FieldAutoCreated) + } return fields } @@ -3242,6 +3282,8 @@ func (m *BouncerMutation) Field(name string) (ent.Value, bool) { return m.Osversion() case bouncer.FieldFeatureflags: return m.Featureflags() + case bouncer.FieldAutoCreated: + return m.AutoCreated() } return nil, false } @@ -3277,6 +3319,8 @@ func (m *BouncerMutation) OldField(ctx context.Context, name string) (ent.Value, return m.OldOsversion(ctx) case bouncer.FieldFeatureflags: return m.OldFeatureflags(ctx) + case bouncer.FieldAutoCreated: + return m.OldAutoCreated(ctx) } return nil, fmt.Errorf("unknown Bouncer field %s", name) } @@ -3377,6 +3421,13 @@ func (m *BouncerMutation) SetField(name string, value ent.Value) error { } m.SetFeatureflags(v) return nil + case bouncer.FieldAutoCreated: + v, ok := value.(bool) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetAutoCreated(v) + return nil } return fmt.Errorf("unknown Bouncer field %s", name) } @@ -3510,6 +3561,9 @@ func (m *BouncerMutation) ResetField(name string) error { case bouncer.FieldFeatureflags: m.ResetFeatureflags() return nil + case bouncer.FieldAutoCreated: + m.ResetAutoCreated() + return nil } return fmt.Errorf("unknown Bouncer field %s", name) } diff --git a/pkg/database/ent/runtime.go b/pkg/database/ent/runtime.go index 15413490633..49921a17b03 100644 --- a/pkg/database/ent/runtime.go +++ b/pkg/database/ent/runtime.go @@ -76,6 +76,10 @@ func init() { bouncerDescAuthType := bouncerFields[9].Descriptor() // bouncer.DefaultAuthType holds the default value on creation for the auth_type field. bouncer.DefaultAuthType = bouncerDescAuthType.Default.(string) + // bouncerDescAutoCreated is the schema descriptor for auto_created field. + bouncerDescAutoCreated := bouncerFields[13].Descriptor() + // bouncer.DefaultAutoCreated holds the default value on creation for the auto_created field. + bouncer.DefaultAutoCreated = bouncerDescAutoCreated.Default.(bool) configitemFields := schema.ConfigItem{}.Fields() _ = configitemFields // configitemDescCreatedAt is the schema descriptor for created_at field. diff --git a/pkg/database/ent/schema/bouncer.go b/pkg/database/ent/schema/bouncer.go index 599c4c404fc..c176bf0f766 100644 --- a/pkg/database/ent/schema/bouncer.go +++ b/pkg/database/ent/schema/bouncer.go @@ -33,6 +33,8 @@ func (Bouncer) Fields() []ent.Field { field.String("osname").Optional(), field.String("osversion").Optional(), field.String("featureflags").Optional(), + // Old auto-created TLS bouncers will have a wrong value for this field + field.Bool("auto_created").StructTag(`json:"auto_created"`).Default(false).Immutable(), } } diff --git a/test/bats/10_bouncers.bats b/test/bats/10_bouncers.bats index f99913dcee5..b1c90116dd2 100644 --- a/test/bats/10_bouncers.bats +++ b/test/bats/10_bouncers.bats @@ -63,7 +63,7 @@ teardown() { @test "delete non-existent bouncer" { # this is a fatal error, which is not consistent with "machines delete" rune -1 cscli bouncers delete something - assert_stderr --partial "unable to delete bouncer: 'something' does not exist" + assert_stderr --partial "unable to delete bouncer something: ent: bouncer not found" rune -0 cscli bouncers delete something --ignore-missing refute_stderr } @@ -144,3 +144,56 @@ teardown() { rune -0 cscli bouncers prune assert_output 'No bouncers to prune.' } + +curl_localhost() { + [[ -z "$API_KEY" ]] && { fail "${FUNCNAME[0]}: missing API_KEY"; } + local path=$1 + shift + curl "localhost:8080$path" -sS --fail-with-body -H "X-Api-Key: $API_KEY" "$@" +} + +# We can't use curl-with-key here, as we want to query localhost, not 127.0.0.1 +@test "multiple bouncers sharing api key" { + export API_KEY=bouncerkey + + # crowdsec needs to listen on all interfaces + rune -0 ./instance-crowdsec stop + rune -0 config_set 'del(.api.server.listen_socket) | del(.api.server.listen_uri)' + echo "{'api':{'server':{'listen_uri':0.0.0.0:8080}}}" >"${CONFIG_YAML}.local" + + rune -0 ./instance-crowdsec start + + # add a decision for our bouncers + rune -0 cscli decisions add -i '1.2.3.5' + + rune -0 cscli bouncers add test-auto -k "$API_KEY" + + # query with 127.0.0.1 as source ip + rune -0 curl_localhost "/v1/decisions/stream" -4 + rune -0 jq -r '.new' <(output) + assert_output --partial '1.2.3.5' + + # now with ::1, we should get the same IP, even though we are using the same key + rune -0 curl_localhost "/v1/decisions/stream" -6 + rune -0 jq -r '.new' <(output) + assert_output --partial '1.2.3.5' + + rune -0 cscli bouncers list -o json + rune -0 jq -c '[.[] | [.name,.revoked,.ip_address,.auto_created]]' <(output) + assert_json '[["test-auto",false,"127.0.0.1",false],["test-auto@::1",false,"::1",true]]' + + # check the 2nd bouncer was created automatically + rune -0 cscli bouncers inspect "test-auto@::1" -o json + rune -0 jq -r '.ip_address' <(output) + assert_output --partial '::1' + + # attempt to delete the auto-created bouncer, it should fail + rune -0 cscli bouncers delete 'test-auto@::1' + assert_stderr --partial 'cannot be deleted' + + # delete the "real" bouncer, it should delete both + rune -0 cscli bouncers delete 'test-auto' + + rune -0 cscli bouncers list -o json + assert_json [] +} diff --git a/test/lib/init/crowdsec-daemon b/test/lib/init/crowdsec-daemon index a232f344b6a..ba8e98992db 100755 --- a/test/lib/init/crowdsec-daemon +++ b/test/lib/init/crowdsec-daemon @@ -51,7 +51,11 @@ stop() { PGID="$(ps -o pgid= -p "$(cat "${DAEMON_PID}")" | tr -d ' ')" # ps above should work on linux, freebsd, busybox.. if [[ -n "${PGID}" ]]; then - kill -- "-${PGID}" + kill -- "-${PGID}" + + while pgrep -g "${PGID}" >/dev/null; do + sleep .05 + done fi rm -f -- "${DAEMON_PID}" From 523164f60527819335f8d231e3c06a6cc26f83fa Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Wed, 20 Nov 2024 12:12:20 +0100 Subject: [PATCH 355/581] merge from master for 1.6.4 (#3337) --- .github/codecov-ignore-generated.sh | 11 - .github/codecov.yml | 27 +- .github/generate-codecov-yml.sh | 31 + .github/workflows/bats-hub.yml | 2 +- .github/workflows/bats-mysql.yml | 2 +- .github/workflows/bats-postgres.yml | 2 +- .github/workflows/bats-sqlite-coverage.yml | 6 +- .github/workflows/ci-windows-build-msi.yml | 2 +- .github/workflows/codeql-analysis.yml | 2 +- .github/workflows/docker-tests.yml | 15 +- .github/workflows/go-tests-windows.yml | 8 +- .github/workflows/go-tests.yml | 36 +- .github/workflows/publish-tarball-release.yml | 2 +- .golangci.yml | 152 +-- Dockerfile | 4 +- Dockerfile.debian | 4 +- Makefile | 100 +- README.md | 2 +- azure-pipelines.yml | 2 +- cmd/crowdsec-cli/ask/ask.go | 20 + cmd/crowdsec-cli/bouncers.go | 537 ----------- cmd/crowdsec-cli/{ => clialert}/alerts.go | 133 +-- cmd/crowdsec-cli/clialert/sanitize.go | 26 + .../{alerts_table.go => clialert/table.go} | 4 +- cmd/crowdsec-cli/clibouncer/add.go | 72 ++ cmd/crowdsec-cli/clibouncer/bouncers.go | 135 +++ cmd/crowdsec-cli/clibouncer/delete.go | 99 ++ cmd/crowdsec-cli/clibouncer/inspect.go | 99 ++ cmd/crowdsec-cli/clibouncer/list.go | 117 +++ cmd/crowdsec-cli/clibouncer/prune.go | 85 ++ cmd/crowdsec-cli/{ => clicapi}/capi.go | 132 +-- cmd/crowdsec-cli/{ => cliconsole}/console.go | 122 +-- .../{ => cliconsole}/console_table.go | 2 +- .../{ => clidecision}/decisions.go | 50 +- .../import.go} | 118 +-- .../table.go} | 5 +- cmd/crowdsec-cli/clientinfo/clientinfo.go | 39 + cmd/crowdsec-cli/{ => cliexplain}/explain.go | 23 +- cmd/crowdsec-cli/{ => clihub}/hub.go | 45 +- cmd/crowdsec-cli/{ => clihub}/item_metrics.go | 28 +- cmd/crowdsec-cli/{ => clihub}/items.go | 31 +- cmd/crowdsec-cli/{ => clihub}/utils_table.go | 10 +- cmd/crowdsec-cli/clihubtest/clean.go | 31 + cmd/crowdsec-cli/clihubtest/coverage.go | 166 ++++ cmd/crowdsec-cli/clihubtest/create.go | 158 ++++ cmd/crowdsec-cli/clihubtest/eval.go | 44 + cmd/crowdsec-cli/clihubtest/explain.go | 76 ++ cmd/crowdsec-cli/clihubtest/hubtest.go | 81 ++ cmd/crowdsec-cli/clihubtest/info.go | 44 + cmd/crowdsec-cli/clihubtest/list.go | 42 + cmd/crowdsec-cli/clihubtest/run.go | 213 +++++ .../{hubtest_table.go => clihubtest/table.go} | 48 +- .../{hubappsec.go => cliitem/appsec.go} | 8 +- .../collection.go} | 4 +- .../{hubcontext.go => cliitem/context.go} | 4 +- cmd/crowdsec-cli/{ => cliitem}/hubscenario.go | 4 +- .../{itemcli.go => cliitem/item.go} | 43 +- .../{hubparser.go => cliitem/parser.go} | 4 +- .../postoverflow.go} | 4 +- .../{item_suggest.go => cliitem/suggest.go} | 22 +- .../{lapi.go => clilapi/context.go} | 253 +---- cmd/crowdsec-cli/clilapi/lapi.go | 42 + cmd/crowdsec-cli/clilapi/register.go | 117 +++ cmd/crowdsec-cli/clilapi/status.go | 115 +++ .../{lapi_test.go => clilapi/status_test.go} | 2 +- cmd/crowdsec-cli/clilapi/utils.go | 24 + cmd/crowdsec-cli/climachine/add.go | 152 +++ cmd/crowdsec-cli/climachine/delete.go | 52 + cmd/crowdsec-cli/{ => climachine}/flag.go | 2 +- cmd/crowdsec-cli/climachine/inspect.go | 184 ++++ cmd/crowdsec-cli/climachine/list.go | 137 +++ cmd/crowdsec-cli/climachine/machines.go | 132 +++ cmd/crowdsec-cli/climachine/prune.go | 96 ++ cmd/crowdsec-cli/climachine/validate.go | 35 + cmd/crowdsec-cli/climetrics/list.go | 6 +- cmd/crowdsec-cli/climetrics/metrics.go | 2 +- cmd/crowdsec-cli/climetrics/show.go | 3 +- cmd/crowdsec-cli/climetrics/statacquis.go | 4 +- cmd/crowdsec-cli/climetrics/statalert.go | 4 +- .../climetrics/statappsecengine.go | 4 +- cmd/crowdsec-cli/climetrics/statappsecrule.go | 2 +- cmd/crowdsec-cli/climetrics/statbouncer.go | 2 +- cmd/crowdsec-cli/climetrics/statbucket.go | 4 +- cmd/crowdsec-cli/climetrics/statdecision.go | 4 +- cmd/crowdsec-cli/climetrics/statlapi.go | 4 +- .../climetrics/statlapibouncer.go | 4 +- .../climetrics/statlapidecision.go | 4 +- .../climetrics/statlapimachine.go | 4 +- cmd/crowdsec-cli/climetrics/statparser.go | 4 +- cmd/crowdsec-cli/climetrics/statstash.go | 4 +- cmd/crowdsec-cli/climetrics/statwhitelist.go | 4 +- cmd/crowdsec-cli/climetrics/store.go | 2 +- .../{ => clinotifications}/notifications.go | 52 +- .../notifications_table.go | 2 +- cmd/crowdsec-cli/clipapi/papi.go | 174 ++++ cmd/crowdsec-cli/clisetup/setup.go | 307 ++++++ .../{ => clisimulation}/simulation.go | 26 +- cmd/crowdsec-cli/{ => clisupport}/support.go | 134 +-- cmd/crowdsec-cli/config.go | 2 +- cmd/crowdsec-cli/config_backup.go | 2 +- cmd/crowdsec-cli/config_feature_flags.go | 2 +- cmd/crowdsec-cli/config_restore.go | 2 +- cmd/crowdsec-cli/config_show.go | 8 +- cmd/crowdsec-cli/config_showyaml.go | 2 +- cmd/crowdsec-cli/dashboard.go | 16 +- cmd/crowdsec-cli/hubtest.go | 746 --------------- cmd/crowdsec-cli/idgen/machineid.go | 48 + cmd/crowdsec-cli/idgen/password.go | 32 + cmd/crowdsec-cli/machines.go | 771 --------------- cmd/crowdsec-cli/main.go | 94 +- cmd/crowdsec-cli/messages.go | 23 - cmd/crowdsec-cli/papi.go | 148 --- cmd/crowdsec-cli/reload/reload.go | 6 + cmd/crowdsec-cli/reload/reload_freebsd.go | 4 + cmd/crowdsec-cli/reload/reload_linux.go | 4 + cmd/crowdsec-cli/reload/reload_windows.go | 3 + cmd/crowdsec-cli/require/require.go | 8 + cmd/crowdsec-cli/setup.go | 302 +----- cmd/crowdsec-cli/setup_stub.go | 9 + cmd/crowdsec-cli/utils.go | 63 -- cmd/crowdsec/api.go | 12 +- cmd/crowdsec/appsec.go | 18 + cmd/crowdsec/appsec_stub.go | 11 + cmd/crowdsec/crowdsec.go | 15 +- cmd/crowdsec/lapiclient.go | 42 +- cmd/crowdsec/lpmetrics.go | 8 +- cmd/crowdsec/main.go | 6 +- cmd/crowdsec/metrics.go | 6 +- cmd/crowdsec/pour.go | 6 +- cmd/crowdsec/serve.go | 10 +- cmd/notification-dummy/main.go | 4 +- cmd/notification-email/main.go | 6 +- cmd/notification-file/main.go | 6 +- cmd/notification-http/main.go | 4 +- cmd/notification-sentinel/main.go | 4 +- cmd/notification-slack/main.go | 4 +- cmd/notification-splunk/main.go | 4 +- debian/rules | 2 +- docker/test/Pipfile.lock | 331 +++---- go.mod | 28 +- go.sum | 50 +- pkg/acquisition/acquisition.go | 274 ++++-- pkg/acquisition/acquisition_test.go | 89 +- pkg/acquisition/appsec.go | 12 + pkg/acquisition/cloudwatch.go | 12 + pkg/acquisition/docker.go | 12 + pkg/acquisition/file.go | 12 + pkg/acquisition/http.go | 12 + pkg/acquisition/journalctl.go | 12 + pkg/acquisition/k8s.go | 12 + pkg/acquisition/kafka.go | 12 + pkg/acquisition/kinesis.go | 12 + pkg/acquisition/loki.go | 12 + pkg/acquisition/modules/appsec/appsec.go | 53 +- .../modules/appsec/appsec_lnx_test.go | 2 +- .../modules/appsec/appsec_rules_test.go | 119 ++- .../modules/appsec/appsec_runner.go | 44 +- .../modules/appsec/appsec_runner_test.go | 139 +++ pkg/acquisition/modules/appsec/appsec_test.go | 12 + .../modules/appsec/appsec_win_test.go | 1 - pkg/acquisition/modules/appsec/utils.go | 201 ++-- .../modules/cloudwatch/cloudwatch.go | 93 +- .../modules/cloudwatch/cloudwatch_test.go | 19 +- pkg/acquisition/modules/docker/docker.go | 73 +- pkg/acquisition/modules/docker/docker_test.go | 9 +- pkg/acquisition/modules/docker/utils.go | 2 +- pkg/acquisition/modules/file/file.go | 201 ++-- pkg/acquisition/modules/file/file_test.go | 8 +- pkg/acquisition/modules/http/http.go | 416 ++++++++ pkg/acquisition/modules/http/http_test.go | 785 ++++++++++++++++ pkg/acquisition/modules/http/testdata/ca.crt | 23 + .../modules/http/testdata/client.crt | 24 + .../modules/http/testdata/client.key | 27 + .../modules/http/testdata/server.crt | 23 + .../modules/http/testdata/server.key | 27 + .../modules/journalctl/journalctl.go | 34 +- .../modules/journalctl/journalctl_test.go | 8 +- pkg/acquisition/modules/kafka/kafka.go | 31 +- pkg/acquisition/modules/kafka/kafka_test.go | 14 +- pkg/acquisition/modules/kinesis/kinesis.go | 44 +- .../modules/kinesis/kinesis_test.go | 26 +- .../modules/kubernetesaudit/k8s_audit.go | 18 +- .../modules/kubernetesaudit/k8s_audit_test.go | 9 +- .../loki/internal/lokiclient/loki_client.go | 12 +- pkg/acquisition/modules/loki/loki.go | 67 +- pkg/acquisition/modules/loki/loki_test.go | 49 +- pkg/acquisition/modules/s3/s3.go | 48 +- pkg/acquisition/modules/s3/s3_test.go | 9 +- .../internal/parser/rfc3164/parse_test.go | 162 ++-- pkg/acquisition/modules/syslog/syslog.go | 19 +- pkg/acquisition/modules/syslog/syslog_test.go | 16 +- .../modules/wineventlog/test_files/Setup.evtx | Bin 0 -> 69632 bytes .../modules/wineventlog/wineventlog.go | 5 +- .../wineventlog/wineventlog_windows.go | 178 +++- ...og_test.go => wineventlog_windows_test.go} | 100 +- pkg/acquisition/s3.go | 12 + pkg/acquisition/syslog.go | 12 + pkg/acquisition/wineventlog.go | 12 + pkg/alertcontext/alertcontext.go | 156 ++- pkg/alertcontext/alertcontext_test.go | 162 ++++ pkg/alertcontext/config.go | 12 +- pkg/apiclient/alerts_service_test.go | 17 +- pkg/apiclient/auth_jwt.go | 7 +- pkg/apiclient/auth_service_test.go | 27 +- pkg/apiclient/client.go | 27 +- pkg/apiclient/client_http.go | 4 +- pkg/apiclient/client_http_test.go | 6 +- pkg/apiclient/client_test.go | 25 +- pkg/apiclient/config.go | 3 +- pkg/apiclient/decisions_service.go | 15 +- pkg/apiclient/decisions_service_test.go | 56 +- pkg/apiclient/resperr.go | 4 +- pkg/apiclient/useragent/useragent.go | 9 + pkg/apiserver/alerts_test.go | 238 ++--- pkg/apiserver/api_key_test.go | 68 +- pkg/apiserver/apic.go | 168 ++-- pkg/apiserver/apic_metrics.go | 49 +- pkg/apiserver/apic_metrics_test.go | 17 +- pkg/apiserver/apic_test.go | 202 ++-- pkg/apiserver/apiserver.go | 165 ++-- pkg/apiserver/apiserver_test.go | 78 +- pkg/apiserver/controllers/controller.go | 3 - pkg/apiserver/controllers/v1/alerts.go | 42 +- pkg/apiserver/controllers/v1/controller.go | 4 - pkg/apiserver/controllers/v1/decisions.go | 115 ++- pkg/apiserver/controllers/v1/heartbeat.go | 4 +- pkg/apiserver/controllers/v1/machines.go | 4 +- pkg/apiserver/controllers/v1/metrics.go | 15 +- pkg/apiserver/controllers/v1/usagemetrics.go | 17 +- pkg/apiserver/decisions_test.go | 111 ++- pkg/apiserver/heartbeat_test.go | 8 +- pkg/apiserver/jwt_test.go | 24 +- pkg/apiserver/machines_test.go | 66 +- pkg/apiserver/middlewares/v1/api_key.go | 92 +- pkg/apiserver/middlewares/v1/cache.go | 2 +- pkg/apiserver/middlewares/v1/crl.go | 10 +- pkg/apiserver/middlewares/v1/jwt.go | 19 +- pkg/apiserver/middlewares/v1/ocsp.go | 2 +- pkg/apiserver/papi.go | 24 +- pkg/apiserver/papi_cmd.go | 19 +- pkg/apiserver/usage_metrics_test.go | 54 +- pkg/appsec/appsec.go | 145 ++- pkg/appsec/appsec_rules_collection.go | 10 +- pkg/appsec/loader.go | 12 +- pkg/csconfig/api.go | 28 +- pkg/csconfig/api_test.go | 7 +- pkg/csconfig/config_paths.go | 10 +- pkg/csconfig/config_test.go | 2 +- pkg/csconfig/console.go | 2 +- pkg/csconfig/crowdsec_service.go | 6 +- pkg/csconfig/simulation.go | 4 +- pkg/csconfig/simulation_test.go | 4 +- pkg/csplugin/broker.go | 16 +- pkg/csplugin/broker_suite_test.go | 12 +- pkg/csplugin/broker_test.go | 28 +- pkg/csplugin/broker_win_test.go | 8 +- pkg/csplugin/listfiles_test.go | 2 +- pkg/csplugin/notifier.go | 18 +- pkg/csplugin/utils.go | 4 +- pkg/csplugin/utils_windows.go | 2 +- pkg/csplugin/utils_windows_test.go | 1 - pkg/csplugin/watcher_test.go | 18 +- pkg/cticlient/client.go | 5 +- pkg/cwhub/cwhub.go | 12 +- pkg/cwhub/cwhub_test.go | 2 +- pkg/cwhub/doc.go | 2 +- pkg/cwhub/errors.go | 6 +- pkg/cwhub/hub.go | 95 +- pkg/cwhub/relativepath.go | 28 + pkg/cwhub/relativepath_test.go | 72 ++ pkg/cwhub/sync.go | 200 ++-- pkg/cwversion/component/component.go | 35 + pkg/cwversion/constraint/constraint.go | 32 + pkg/cwversion/version.go | 70 +- pkg/database/alerts.go | 411 ++++---- pkg/database/bouncers.go | 58 +- pkg/database/config.go | 17 +- pkg/database/database.go | 2 - pkg/database/decisions.go | 90 +- pkg/database/ent/bouncer.go | 13 +- pkg/database/ent/bouncer/bouncer.go | 10 + pkg/database/ent/bouncer/where.go | 15 + pkg/database/ent/bouncer_create.go | 25 + pkg/database/ent/migrate/schema.go | 1 + pkg/database/ent/mutation.go | 56 +- pkg/database/ent/runtime.go | 4 + pkg/database/ent/schema/bouncer.go | 2 + pkg/database/errors.go | 4 +- pkg/database/flush.go | 49 +- pkg/database/lock.go | 36 +- pkg/database/machines.go | 65 +- pkg/database/metrics.go | 19 +- pkg/database/utils.go | 5 +- pkg/dumps/parser_dump.go | 6 +- pkg/exprhelpers/debugger.go | 5 +- pkg/exprhelpers/debugger_test.go | 36 +- pkg/exprhelpers/helpers.go | 24 +- pkg/exprhelpers/xml.go | 100 +- pkg/fflag/features.go | 2 +- pkg/hubtest/coverage.go | 4 +- pkg/hubtest/hubtest.go | 4 +- pkg/hubtest/hubtest_item.go | 43 +- pkg/hubtest/nucleirunner.go | 6 +- pkg/hubtest/regexp.go | 4 +- pkg/hubtest/utils.go | 2 +- pkg/leakybucket/buckets_test.go | 4 +- pkg/leakybucket/manager_load.go | 273 +++--- pkg/leakybucket/manager_run.go | 6 +- pkg/leakybucket/overflows.go | 109 ++- pkg/leakybucket/timemachine.go | 2 +- pkg/leakybucket/trigger.go | 18 +- pkg/longpollclient/client.go | 29 +- pkg/metabase/api.go | 4 +- pkg/metabase/metabase.go | 4 +- pkg/models/generate.go | 2 +- pkg/modelscapi/add_signals_request.go | 5 + pkg/modelscapi/add_signals_request_item.go | 9 + .../add_signals_request_item_decisions.go | 5 + ...add_signals_request_item_decisions_item.go | 3 + pkg/modelscapi/centralapi_swagger.yaml | 888 ++++++++++++++++++ pkg/modelscapi/decisions_delete_request.go | 5 + pkg/modelscapi/decisions_sync_request.go | 5 + pkg/modelscapi/decisions_sync_request_item.go | 1 + .../decisions_sync_request_item_decisions.go | 5 + pkg/modelscapi/generate.go | 4 + .../get_decisions_stream_response.go | 5 + .../get_decisions_stream_response_deleted.go | 5 + .../get_decisions_stream_response_links.go | 5 + .../get_decisions_stream_response_new.go | 5 + .../get_decisions_stream_response_new_item.go | 5 + pkg/modelscapi/metrics_request.go | 10 + pkg/parser/enrich_date.go | 4 +- pkg/parser/enrich_unmarshal.go | 2 +- pkg/parser/parsing_test.go | 2 +- pkg/parser/stage.go | 6 +- pkg/parser/unix_parser.go | 37 +- pkg/protobufs/generate.go | 14 + pkg/protobufs/notifier.pb.go | 132 +-- pkg/protobufs/notifier_grpc.pb.go | 159 ++++ pkg/protobufs/plugin_interface.go | 47 - pkg/setup/detect.go | 8 +- pkg/setup/detect_test.go | 3 +- pkg/setup/install.go | 2 +- pkg/setup/units.go | 2 +- pkg/types/appsec_event.go | 8 +- pkg/types/event.go | 54 +- rpm/SPECS/crowdsec.spec | 2 +- test/ansible/vagrant/fedora-40/Vagrantfile | 2 +- test/ansible/vagrant/fedora-41/Vagrantfile | 13 + test/ansible/vagrant/fedora-41/skip | 9 + .../vagrant/opensuse-leap-15/Vagrantfile | 10 + test/ansible/vagrant/opensuse-leap-15/skip | 9 + test/bats.mk | 10 +- test/bats/01_crowdsec.bats | 37 +- test/bats/01_cscli_lapi.bats | 12 +- test/bats/03_noagent.bats | 2 +- test/bats/04_capi.bats | 27 +- test/bats/04_nocapi.bats | 2 +- test/bats/07_setup.bats | 2 +- test/bats/09_socket.bats | 12 +- test/bats/10_bouncers.bats | 55 +- test/bats/20_hub_items.bats | 92 +- test/bats/90_decisions.bats | 12 +- test/instance-data | 14 +- test/lib/init/crowdsec-daemon | 6 +- test/run-tests | 18 +- 366 files changed, 11596 insertions(+), 6794 deletions(-) delete mode 100755 .github/codecov-ignore-generated.sh create mode 100755 .github/generate-codecov-yml.sh create mode 100644 cmd/crowdsec-cli/ask/ask.go delete mode 100644 cmd/crowdsec-cli/bouncers.go rename cmd/crowdsec-cli/{ => clialert}/alerts.go (77%) create mode 100644 cmd/crowdsec-cli/clialert/sanitize.go rename cmd/crowdsec-cli/{alerts_table.go => clialert/table.go} (97%) create mode 100644 cmd/crowdsec-cli/clibouncer/add.go create mode 100644 cmd/crowdsec-cli/clibouncer/bouncers.go create mode 100644 cmd/crowdsec-cli/clibouncer/delete.go create mode 100644 cmd/crowdsec-cli/clibouncer/inspect.go create mode 100644 cmd/crowdsec-cli/clibouncer/list.go create mode 100644 cmd/crowdsec-cli/clibouncer/prune.go rename cmd/crowdsec-cli/{ => clicapi}/capi.go (57%) rename cmd/crowdsec-cli/{ => cliconsole}/console.go (79%) rename cmd/crowdsec-cli/{ => cliconsole}/console_table.go (98%) rename cmd/crowdsec-cli/{ => clidecision}/decisions.go (90%) rename cmd/crowdsec-cli/{decisions_import.go => clidecision/import.go} (70%) rename cmd/crowdsec-cli/{decisions_table.go => clidecision/table.go} (92%) create mode 100644 cmd/crowdsec-cli/clientinfo/clientinfo.go rename cmd/crowdsec-cli/{ => cliexplain}/explain.go (92%) rename cmd/crowdsec-cli/{ => clihub}/hub.go (85%) rename cmd/crowdsec-cli/{ => clihub}/item_metrics.go (89%) rename cmd/crowdsec-cli/{ => clihub}/items.go (84%) rename cmd/crowdsec-cli/{ => clihub}/utils_table.go (92%) create mode 100644 cmd/crowdsec-cli/clihubtest/clean.go create mode 100644 cmd/crowdsec-cli/clihubtest/coverage.go create mode 100644 cmd/crowdsec-cli/clihubtest/create.go create mode 100644 cmd/crowdsec-cli/clihubtest/eval.go create mode 100644 cmd/crowdsec-cli/clihubtest/explain.go create mode 100644 cmd/crowdsec-cli/clihubtest/hubtest.go create mode 100644 cmd/crowdsec-cli/clihubtest/info.go create mode 100644 cmd/crowdsec-cli/clihubtest/list.go create mode 100644 cmd/crowdsec-cli/clihubtest/run.go rename cmd/crowdsec-cli/{hubtest_table.go => clihubtest/table.go} (50%) rename cmd/crowdsec-cli/{hubappsec.go => cliitem/appsec.go} (93%) rename cmd/crowdsec-cli/{hubcollection.go => cliitem/collection.go} (95%) rename cmd/crowdsec-cli/{hubcontext.go => cliitem/context.go} (94%) rename cmd/crowdsec-cli/{ => cliitem}/hubscenario.go (95%) rename cmd/crowdsec-cli/{itemcli.go => cliitem/item.go} (93%) rename cmd/crowdsec-cli/{hubparser.go => cliitem/parser.go} (95%) rename cmd/crowdsec-cli/{hubpostoverflow.go => cliitem/postoverflow.go} (95%) rename cmd/crowdsec-cli/{item_suggest.go => cliitem/suggest.go} (77%) rename cmd/crowdsec-cli/{lapi.go => clilapi/context.go} (59%) create mode 100644 cmd/crowdsec-cli/clilapi/lapi.go create mode 100644 cmd/crowdsec-cli/clilapi/register.go create mode 100644 cmd/crowdsec-cli/clilapi/status.go rename cmd/crowdsec-cli/{lapi_test.go => clilapi/status_test.go} (98%) create mode 100644 cmd/crowdsec-cli/clilapi/utils.go create mode 100644 cmd/crowdsec-cli/climachine/add.go create mode 100644 cmd/crowdsec-cli/climachine/delete.go rename cmd/crowdsec-cli/{ => climachine}/flag.go (96%) create mode 100644 cmd/crowdsec-cli/climachine/inspect.go create mode 100644 cmd/crowdsec-cli/climachine/list.go create mode 100644 cmd/crowdsec-cli/climachine/machines.go create mode 100644 cmd/crowdsec-cli/climachine/prune.go create mode 100644 cmd/crowdsec-cli/climachine/validate.go rename cmd/crowdsec-cli/{ => clinotifications}/notifications.go (89%) rename cmd/crowdsec-cli/{ => clinotifications}/notifications_table.go (97%) create mode 100644 cmd/crowdsec-cli/clipapi/papi.go create mode 100644 cmd/crowdsec-cli/clisetup/setup.go rename cmd/crowdsec-cli/{ => clisimulation}/simulation.go (91%) rename cmd/crowdsec-cli/{ => clisupport}/support.go (82%) delete mode 100644 cmd/crowdsec-cli/hubtest.go create mode 100644 cmd/crowdsec-cli/idgen/machineid.go create mode 100644 cmd/crowdsec-cli/idgen/password.go delete mode 100644 cmd/crowdsec-cli/machines.go delete mode 100644 cmd/crowdsec-cli/messages.go delete mode 100644 cmd/crowdsec-cli/papi.go create mode 100644 cmd/crowdsec-cli/reload/reload.go create mode 100644 cmd/crowdsec-cli/reload/reload_freebsd.go create mode 100644 cmd/crowdsec-cli/reload/reload_linux.go create mode 100644 cmd/crowdsec-cli/reload/reload_windows.go create mode 100644 cmd/crowdsec-cli/setup_stub.go delete mode 100644 cmd/crowdsec-cli/utils.go create mode 100644 cmd/crowdsec/appsec.go create mode 100644 cmd/crowdsec/appsec_stub.go create mode 100644 pkg/acquisition/appsec.go create mode 100644 pkg/acquisition/cloudwatch.go create mode 100644 pkg/acquisition/docker.go create mode 100644 pkg/acquisition/file.go create mode 100644 pkg/acquisition/http.go create mode 100644 pkg/acquisition/journalctl.go create mode 100644 pkg/acquisition/k8s.go create mode 100644 pkg/acquisition/kafka.go create mode 100644 pkg/acquisition/kinesis.go create mode 100644 pkg/acquisition/loki.go create mode 100644 pkg/acquisition/modules/appsec/appsec_runner_test.go create mode 100644 pkg/acquisition/modules/http/http.go create mode 100644 pkg/acquisition/modules/http/http_test.go create mode 100644 pkg/acquisition/modules/http/testdata/ca.crt create mode 100644 pkg/acquisition/modules/http/testdata/client.crt create mode 100644 pkg/acquisition/modules/http/testdata/client.key create mode 100644 pkg/acquisition/modules/http/testdata/server.crt create mode 100644 pkg/acquisition/modules/http/testdata/server.key create mode 100644 pkg/acquisition/modules/wineventlog/test_files/Setup.evtx rename pkg/acquisition/modules/wineventlog/{wineventlog_test.go => wineventlog_windows_test.go} (71%) create mode 100644 pkg/acquisition/s3.go create mode 100644 pkg/acquisition/syslog.go create mode 100644 pkg/acquisition/wineventlog.go create mode 100644 pkg/apiclient/useragent/useragent.go create mode 100644 pkg/cwhub/relativepath.go create mode 100644 pkg/cwhub/relativepath_test.go create mode 100644 pkg/cwversion/component/component.go create mode 100644 pkg/cwversion/constraint/constraint.go create mode 100644 pkg/modelscapi/centralapi_swagger.yaml create mode 100644 pkg/modelscapi/generate.go create mode 100644 pkg/protobufs/generate.go create mode 100644 pkg/protobufs/notifier_grpc.pb.go delete mode 100644 pkg/protobufs/plugin_interface.go create mode 100644 test/ansible/vagrant/fedora-41/Vagrantfile create mode 100644 test/ansible/vagrant/fedora-41/skip create mode 100644 test/ansible/vagrant/opensuse-leap-15/Vagrantfile create mode 100644 test/ansible/vagrant/opensuse-leap-15/skip diff --git a/.github/codecov-ignore-generated.sh b/.github/codecov-ignore-generated.sh deleted file mode 100755 index 3c896d47be7..00000000000 --- a/.github/codecov-ignore-generated.sh +++ /dev/null @@ -1,11 +0,0 @@ -#!/bin/bash - -# Run this from the repository root: -# -# .github/codecov-ignore-generated.sh >> .github/codecov.yml - -find . -name "*.go" | while read -r file; do - if head -n 1 "$file" | grep -q "Code generated by"; then - echo " - \"$file\"" - fi -done diff --git a/.github/codecov.yml b/.github/codecov.yml index 82598c15511..e3a81070324 100644 --- a/.github/codecov.yml +++ b/.github/codecov.yml @@ -1,5 +1,8 @@ # we measure coverage but don't enforce it # https://docs.codecov.com/docs/codecov-yaml +codecov: + require_ci_to_pass: false + coverage: status: patch: @@ -10,7 +13,7 @@ coverage: target: 0% # if a directory is ignored, there is no way to un-ignore files like pkg/models/helpers.go -# so we make a full list, manually updated - but it could be generated right before running codecov +# so we make a full list ignore: - "./pkg/modelscapi/success_response.go" - "./pkg/modelscapi/get_decisions_stream_response_deleted.go" @@ -41,17 +44,26 @@ ignore: - "./pkg/modelscapi/enroll_request.go" - "./pkg/modelscapi/register_request.go" - "./pkg/modelscapi/add_signals_request_item_source.go" + - "./pkg/models/success_response.go" + - "./pkg/models/hub_items.go" - "./pkg/models/alert.go" - "./pkg/models/metrics_bouncer_info.go" - "./pkg/models/add_signals_request_item.go" + - "./pkg/models/metrics_meta.go" + - "./pkg/models/metrics_detail_item.go" - "./pkg/models/add_signals_request_item_decisions_item.go" + - "./pkg/models/hub_item.go" - "./pkg/models/get_alerts_response.go" + - "./pkg/models/metrics_labels.go" - "./pkg/models/watcher_auth_request.go" - "./pkg/models/add_alerts_request.go" - "./pkg/models/event.go" - "./pkg/models/decisions_delete_request_item.go" - "./pkg/models/meta.go" + - "./pkg/models/detailed_metrics.go" - "./pkg/models/delete_alerts_response.go" + - "./pkg/models/remediation_components_metrics.go" + - "./pkg/models/console_options.go" - "./pkg/models/topx_response.go" - "./pkg/models/add_signals_request.go" - "./pkg/models/delete_decision_response.go" @@ -60,24 +72,34 @@ ignore: - "./pkg/models/source.go" - "./pkg/models/decisions_stream_response.go" - "./pkg/models/error_response.go" + - "./pkg/models/all_metrics.go" + - "./pkg/models/o_sversion.go" - "./pkg/models/decision.go" - "./pkg/models/decisions_delete_request.go" - "./pkg/models/flush_decision_response.go" - "./pkg/models/watcher_auth_response.go" + - "./pkg/models/lapi_metrics.go" - "./pkg/models/watcher_registration_request.go" - "./pkg/models/metrics_agent_info.go" + - "./pkg/models/log_processors_metrics.go" - "./pkg/models/add_signals_request_item_source.go" + - "./pkg/models/base_metrics.go" - "./pkg/models/add_alerts_response.go" - "./pkg/models/metrics.go" - "./pkg/protobufs/notifier.pb.go" + - "./pkg/protobufs/notifier_grpc.pb.go" + - "./pkg/database/ent/metric_update.go" - "./pkg/database/ent/machine_delete.go" - "./pkg/database/ent/decision_query.go" - "./pkg/database/ent/meta_query.go" + - "./pkg/database/ent/metric/where.go" + - "./pkg/database/ent/metric/metric.go" - "./pkg/database/ent/machine_create.go" - "./pkg/database/ent/alert.go" - "./pkg/database/ent/event_update.go" - "./pkg/database/ent/alert_create.go" - "./pkg/database/ent/alert_query.go" + - "./pkg/database/ent/metric_delete.go" - "./pkg/database/ent/lock_create.go" - "./pkg/database/ent/bouncer_update.go" - "./pkg/database/ent/meta_update.go" @@ -92,6 +114,7 @@ ignore: - "./pkg/database/ent/migrate/migrate.go" - "./pkg/database/ent/migrate/schema.go" - "./pkg/database/ent/configitem.go" + - "./pkg/database/ent/metric_query.go" - "./pkg/database/ent/event.go" - "./pkg/database/ent/event_query.go" - "./pkg/database/ent/lock_update.go" @@ -111,6 +134,7 @@ ignore: - "./pkg/database/ent/bouncer/bouncer.go" - "./pkg/database/ent/bouncer/where.go" - "./pkg/database/ent/hook/hook.go" + - "./pkg/database/ent/metric.go" - "./pkg/database/ent/configitem_create.go" - "./pkg/database/ent/configitem_delete.go" - "./pkg/database/ent/tx.go" @@ -120,6 +144,7 @@ ignore: - "./pkg/database/ent/machine/where.go" - "./pkg/database/ent/machine/machine.go" - "./pkg/database/ent/event_create.go" + - "./pkg/database/ent/metric_create.go" - "./pkg/database/ent/decision/where.go" - "./pkg/database/ent/decision/decision.go" - "./pkg/database/ent/enttest/enttest.go" diff --git a/.github/generate-codecov-yml.sh b/.github/generate-codecov-yml.sh new file mode 100755 index 00000000000..ddb60d0ce80 --- /dev/null +++ b/.github/generate-codecov-yml.sh @@ -0,0 +1,31 @@ +#!/bin/bash + +# Run this from the repository root: +# +# .github/generate-codecov-yml.sh >> .github/codecov.yml + +cat <> .github/codecov.yml + - name: "Run tests" run: ./test/run-tests ./test/bats --formatter $(pwd)/test/lib/color-formatter diff --git a/.github/workflows/ci-windows-build-msi.yml b/.github/workflows/ci-windows-build-msi.yml index 03cdb4bd871..07e29071e05 100644 --- a/.github/workflows/ci-windows-build-msi.yml +++ b/.github/workflows/ci-windows-build-msi.yml @@ -35,7 +35,7 @@ jobs: - name: "Set up Go" uses: actions/setup-go@v5 with: - go-version: "1.22.6" + go-version: "1.23" - name: Build run: make windows_installer BUILD_RE2_WASM=1 diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml index 42b52490ea8..4128cb435f9 100644 --- a/.github/workflows/codeql-analysis.yml +++ b/.github/workflows/codeql-analysis.yml @@ -52,7 +52,7 @@ jobs: - name: "Set up Go" uses: actions/setup-go@v5 with: - go-version: "1.22.6" + go-version: "1.23" cache-dependency-path: "**/go.sum" # Initializes the CodeQL tools for scanning. diff --git a/.github/workflows/docker-tests.yml b/.github/workflows/docker-tests.yml index 228a0829984..918f3bcaf1d 100644 --- a/.github/workflows/docker-tests.yml +++ b/.github/workflows/docker-tests.yml @@ -53,23 +53,12 @@ jobs: uses: actions/setup-python@v5 with: python-version: "3.x" - - - name: "Install pipenv" - run: | - cd docker/test - python -m pip install --upgrade pipenv wheel - - - name: "Cache virtualenvs" - id: cache-pipenv - uses: actions/cache@v4 - with: - path: ~/.local/share/virtualenvs - key: ${{ runner.os }}-pipenv-${{ hashFiles('**/Pipfile.lock') }} + cache: 'pipenv' - name: "Install dependencies" - if: steps.cache-pipenv.outputs.cache-hit != 'true' run: | cd docker/test + python -m pip install --upgrade pipenv wheel pipenv install --deploy - name: "Create Docker network" diff --git a/.github/workflows/go-tests-windows.yml b/.github/workflows/go-tests-windows.yml index 5a463bab99c..2966b999a4a 100644 --- a/.github/workflows/go-tests-windows.yml +++ b/.github/workflows/go-tests-windows.yml @@ -34,12 +34,16 @@ jobs: - name: "Set up Go" uses: actions/setup-go@v5 with: - go-version: "1.22.6" + go-version: "1.23" - name: Build run: | make build BUILD_RE2_WASM=1 + - name: Generate codecov configuration + run: | + .github/generate-codecov-yml.sh >> .github/codecov.yml + - name: Run tests run: | go install github.com/kyoh86/richgo@v0.3.10 @@ -57,6 +61,6 @@ jobs: - name: golangci-lint uses: golangci/golangci-lint-action@v6 with: - version: v1.59 + version: v1.61 args: --issues-exit-code=1 --timeout 10m only-new-issues: false diff --git a/.github/workflows/go-tests.yml b/.github/workflows/go-tests.yml index 58b8dc61a0d..3f4aa67e139 100644 --- a/.github/workflows/go-tests.yml +++ b/.github/workflows/go-tests.yml @@ -126,13 +126,40 @@ jobs: - name: "Set up Go" uses: actions/setup-go@v5 with: - go-version: "1.22.6" + go-version: "1.23" + + - name: Run "go generate" and check for changes + run: | + set -e + # ensure the version of 'protoc' matches the one that generated the files + PROTOBUF_VERSION="21.12" + # don't pollute the repo + pushd $HOME + curl -OL https://github.com/protocolbuffers/protobuf/releases/download/v${PROTOBUF_VERSION}/protoc-${PROTOBUF_VERSION}-linux-x86_64.zip + unzip protoc-${PROTOBUF_VERSION}-linux-x86_64.zip -d $HOME/.protoc + popd + export PATH="$HOME/.protoc/bin:$PATH" + go install google.golang.org/protobuf/cmd/protoc-gen-go@v1.34.2 + go install google.golang.org/grpc/cmd/protoc-gen-go-grpc@v1.5.1 + go generate ./... + protoc --version + if [[ $(git status --porcelain) ]]; then + echo "Error: Uncommitted changes found after running 'make generate'. Please commit all generated code." + git diff + exit 1 + else + echo "No changes detected after running 'make generate'." + fi - name: Create localstack streams run: | aws --endpoint-url=http://127.0.0.1:4566 --region us-east-1 kinesis create-stream --stream-name stream-1-shard --shard-count 1 aws --endpoint-url=http://127.0.0.1:4566 --region us-east-1 kinesis create-stream --stream-name stream-2-shards --shard-count 2 + - name: Generate codecov configuration + run: | + .github/generate-codecov-yml.sh >> .github/codecov.yml + - name: Build and run tests, static run: | sudo apt -qq -y -o=Dpkg::Use-Pty=0 install build-essential libre2-dev @@ -142,6 +169,11 @@ jobs: make build BUILD_STATIC=1 make go-acc | sed 's/ *coverage:.*of statements in.*//' | richgo testfilter + # check if some component stubs are missing + - name: "Build profile: minimal" + run: | + make build BUILD_PROFILE=minimal + - name: Run tests again, dynamic run: | make clean build @@ -158,6 +190,6 @@ jobs: - name: golangci-lint uses: golangci/golangci-lint-action@v6 with: - version: v1.59 + version: v1.61 args: --issues-exit-code=1 --timeout 10m only-new-issues: false diff --git a/.github/workflows/publish-tarball-release.yml b/.github/workflows/publish-tarball-release.yml index 2f809a29a9b..6a41c3fba53 100644 --- a/.github/workflows/publish-tarball-release.yml +++ b/.github/workflows/publish-tarball-release.yml @@ -25,7 +25,7 @@ jobs: - name: "Set up Go" uses: actions/setup-go@v5 with: - go-version: "1.22.6" + go-version: "1.23" - name: Build the binaries run: | diff --git a/.golangci.yml b/.golangci.yml index fb1dab623c1..acde901dbe6 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -20,14 +20,14 @@ linters-settings: maintidx: # raise this after refactoring - under: 16 + under: 15 misspell: locale: US nestif: # lower this after refactoring - min-complexity: 24 + min-complexity: 16 nlreturn: block-size: 5 @@ -103,7 +103,7 @@ linters-settings: disabled: true - name: cyclomatic # lower this after refactoring - arguments: [42] + arguments: [39] - name: defer disabled: true - name: empty-block @@ -118,7 +118,7 @@ linters-settings: arguments: [6] - name: function-length # lower this after refactoring - arguments: [110, 235] + arguments: [110, 237] - name: get-return disabled: true - name: increment-decrement @@ -135,14 +135,10 @@ linters-settings: arguments: [7] - name: max-public-structs disabled: true - - name: optimize-operands-order - disabled: true - name: nested-structs disabled: true - name: package-comments disabled: true - - name: struct-tag - disabled: true - name: redundant-import-alias disabled: true - name: time-equal @@ -178,6 +174,37 @@ linters-settings: # Allow blocks to end with comments allow-trailing-comment: true + gocritic: + enable-all: true + disabled-checks: + - typeDefFirst + - paramTypeCombine + - httpNoBody + - ifElseChain + - importShadow + - hugeParam + - rangeValCopy + - commentedOutCode + - commentedOutImport + - unnamedResult + - sloppyReassign + - appendCombine + - captLocal + - typeUnparen + - commentFormatting + - deferInLoop # + - sprintfQuotedString # + - whyNoLint + - equalFold # + - unnecessaryBlock # + - ptrToRefParam # + - stringXbytes # + - appendAssign # + - tooManyResultsChecker + - unnecessaryDefer + - docStub + - preferFprint + linters: enable-all: true disable: @@ -185,6 +212,8 @@ linters: # DEPRECATED by golangi-lint # - execinquery + - exportloopref + - gomnd # # Redundant @@ -196,75 +225,9 @@ linters: - funlen # revive - gocognit # revive - # - # Disabled until fixed for go 1.22 - # - - - copyloopvar # copyloopvar is a linter detects places where loop variables are copied - - intrange # intrange is a linter to find places where for loops could make use of an integer range. + # Disabled atm - # - # Enabled - # - - # - asasalint # check for pass []any as any in variadic func(...any) - # - asciicheck # checks that all code identifiers does not have non-ASCII symbols in the name - # - bidichk # Checks for dangerous unicode character sequences - # - bodyclose # checks whether HTTP response body is closed successfully - # - decorder # check declaration order and count of types, constants, variables and functions - # - depguard # Go linter that checks if package imports are in a list of acceptable packages - # - dupword # checks for duplicate words in the source code - # - durationcheck # check for two durations multiplied together - # - errcheck # errcheck is a program for checking for unchecked errors in Go code. These unchecked errors can be critical bugs in some cases - # - errorlint # errorlint is a linter for that can be used to find code that will cause problems with the error wrapping scheme introduced in Go 1.13. - # - exportloopref # checks for pointers to enclosing loop variables - # - ginkgolinter # enforces standards of using ginkgo and gomega - # - gocheckcompilerdirectives # Checks that go compiler directive comments (//go:) are valid. - # - gochecknoinits # Checks that no init functions are present in Go code - # - gochecksumtype # Run exhaustiveness checks on Go "sum types" - # - gocritic # Provides diagnostics that check for bugs, performance and style issues. - # - goheader # Checks is file header matches to pattern - # - gomoddirectives # Manage the use of 'replace', 'retract', and 'excludes' directives in go.mod. - # - gomodguard # Allow and block list linter for direct Go module dependencies. This is different from depguard where there are different block types for example version constraints and module recommendations. - # - goprintffuncname # Checks that printf-like functions are named with `f` at the end - # - gosimple # (megacheck): Linter for Go source code that specializes in simplifying code - # - gosmopolitan # Report certain i18n/l10n anti-patterns in your Go codebase - # - govet # (vet, vetshadow): Vet examines Go source code and reports suspicious constructs. It is roughly the same as 'go vet' and uses its passes. - # - grouper # Analyze expression groups. - # - importas # Enforces consistent import aliases - # - ineffassign # Detects when assignments to existing variables are not used - # - interfacebloat # A linter that checks the number of methods inside an interface. - # - loggercheck # (logrlint): Checks key value pairs for common logger libraries (kitlog,klog,logr,zap). - # - logrlint # Check logr arguments. - # - maintidx # maintidx measures the maintainability index of each function. - # - makezero # Finds slice declarations with non-zero initial length - # - mirror # reports wrong mirror patterns of bytes/strings usage - # - misspell # Finds commonly misspelled English words - # - nakedret # Checks that functions with naked returns are not longer than a maximum size (can be zero). - # - nestif # Reports deeply nested if statements - # - nilerr # Finds the code that returns nil even if it checks that the error is not nil. - # - nolintlint # Reports ill-formed or insufficient nolint directives - # - nonamedreturns # Reports all named returns - # - nosprintfhostport # Checks for misuse of Sprintf to construct a host with port in a URL. - # - perfsprint # Checks that fmt.Sprintf can be replaced with a faster alternative. - # - predeclared # find code that shadows one of Go's predeclared identifiers - # - reassign # Checks that package variables are not reassigned - # - revive # Fast, configurable, extensible, flexible, and beautiful linter for Go. Drop-in replacement of golint. - # - rowserrcheck # checks whether Rows.Err of rows is checked successfully - # - sloglint # ensure consistent code style when using log/slog - # - spancheck # Checks for mistakes with OpenTelemetry/Census spans. - # - sqlclosecheck # Checks that sql.Rows, sql.Stmt, sqlx.NamedStmt, pgx.Query are closed. - # - staticcheck # (megacheck): It's a set of rules from staticcheck. It's not the same thing as the staticcheck binary. The author of staticcheck doesn't support or approve the use of staticcheck as a library inside golangci-lint. - # - stylecheck # Stylecheck is a replacement for golint - # - tenv # tenv is analyzer that detects using os.Setenv instead of t.Setenv since Go1.17 - # - testableexamples # linter checks if examples are testable (have an expected output) - # - testifylint # Checks usage of github.com/stretchr/testify. - # - tparallel # tparallel detects inappropriate usage of t.Parallel() method in your Go test codes - # - unconvert # Remove unnecessary type conversions - # - unused # (megacheck): Checks Go code for unused constants, variables, functions and types - # - usestdlibvars # A linter that detect the possibility to use variables/constants from the Go standard library. - # - wastedassign # Finds wasted assignment statements - # - zerologlint # Detects the wrong usage of `zerolog` that a user forgets to dispatch with `Send` or `Msg` + - intrange # intrange is a linter to find places where for loops could make use of an integer range. # # Recommended? (easy) @@ -291,9 +254,7 @@ linters: # - containedctx # containedctx is a linter that detects struct contained context.Context field - - contextcheck # check whether the function uses a non-inherited context - errname # Checks that sentinel errors are prefixed with the `Err` and error types are suffixed with the `Error`. - - gomnd # An analyzer to detect magic numbers. - ireturn # Accept Interfaces, Return Concrete Types - mnd # An analyzer to detect magic numbers. - nilnil # Checks that there is no simultaneous return of `nil` error and an invalid value. @@ -359,28 +320,12 @@ issues: # `err` is often shadowed, we may continue to do it - linters: - govet - text: "shadow: declaration of \"err\" shadows declaration" + text: "shadow: declaration of \"(err|ctx)\" shadows declaration" - linters: - errcheck text: "Error return value of `.*` is not checked" - - linters: - - gocritic - text: "ifElseChain: rewrite if-else to switch statement" - - - linters: - - gocritic - text: "captLocal: `.*' should not be capitalized" - - - linters: - - gocritic - text: "appendAssign: append result not assigned to the same slice" - - - linters: - - gocritic - text: "commentFormatting: put a space between `//` and comment text" - # Will fix, trivial - just beware of merge conflicts - linters: @@ -403,10 +348,6 @@ issues: - errorlint text: "type switch on error will fail on wrapped errors. Use errors.As to check for specific errors" - - linters: - - errorlint - text: "comparing with .* will fail on wrapped errors. Use errors.Is to check for a specific error" - - linters: - nosprintfhostport text: "host:port in url should be constructed with net.JoinHostPort and not directly with fmt.Sprintf" @@ -474,25 +415,26 @@ issues: path: "pkg/(.+)_test.go" text: "deep-exit: .*" - # tolerate deep exit in cobra's OnInitialize, for now + # we use t,ctx instead of ctx,t in tests - linters: - revive - path: "cmd/crowdsec-cli/main.go" - text: "deep-exit: .*" + path: "pkg/(.+)_test.go" + text: "context-as-argument: context.Context should be the first parameter of a function" + # tolerate deep exit in cobra's OnInitialize, for now - linters: - revive - path: "cmd/crowdsec-cli/item_metrics.go" + path: "cmd/crowdsec-cli/main.go" text: "deep-exit: .*" - linters: - revive - path: "cmd/crowdsec-cli/machines.go" + path: "cmd/crowdsec-cli/clihub/item_metrics.go" text: "deep-exit: .*" - linters: - revive - path: "cmd/crowdsec-cli/utils.go" + path: "cmd/crowdsec-cli/idgen/password.go" text: "deep-exit: .*" - linters: diff --git a/Dockerfile b/Dockerfile index 731e08fb1a6..880df88dc02 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,5 +1,5 @@ # vim: set ft=dockerfile: -FROM golang:1.22.6-alpine3.20 AS build +FROM golang:1.23-alpine3.20 AS build ARG BUILD_VERSION @@ -16,7 +16,7 @@ RUN apk add --no-cache git g++ gcc libc-dev make bash gettext binutils-gold core cd re2-${RE2_VERSION} && \ make install && \ echo "githubciXXXXXXXXXXXXXXXXXXXXXXXX" > /etc/machine-id && \ - go install github.com/mikefarah/yq/v4@v4.43.1 + go install github.com/mikefarah/yq/v4@v4.44.3 COPY . . diff --git a/Dockerfile.debian b/Dockerfile.debian index ec961a4a1ec..5d47f167e99 100644 --- a/Dockerfile.debian +++ b/Dockerfile.debian @@ -1,5 +1,5 @@ # vim: set ft=dockerfile: -FROM golang:1.22.6-bookworm AS build +FROM golang:1.23-bookworm AS build ARG BUILD_VERSION @@ -21,7 +21,7 @@ RUN apt-get update && \ make && \ make install && \ echo "githubciXXXXXXXXXXXXXXXXXXXXXXXX" > /etc/machine-id && \ - go install github.com/mikefarah/yq/v4@v4.43.1 + go install github.com/mikefarah/yq/v4@v4.44.3 COPY . . diff --git a/Makefile b/Makefile index 207b5d610f0..f8ae66e1cb6 100644 --- a/Makefile +++ b/Makefile @@ -22,7 +22,7 @@ BUILD_RE2_WASM ?= 0 # for your distribution (look for libre2.a). See the Dockerfile for an example of how to build it. BUILD_STATIC ?= 0 -# List of plugins to build +# List of notification plugins to build PLUGINS ?= $(patsubst ./cmd/notification-%,%,$(wildcard ./cmd/notification-*)) #-------------------------------------- @@ -80,9 +80,17 @@ endif #expr_debug tag is required to enable the debug mode in expr GO_TAGS := netgo,osusergo,sqlite_omit_load_extension,expr_debug +# Allow building on ubuntu 24.10, see https://github.com/golang/go/issues/70023 +export CGO_LDFLAGS_ALLOW=-Wl,--(push|pop)-state.* + # this will be used by Go in the make target, some distributions require it export PKG_CONFIG_PATH:=/usr/local/lib/pkgconfig:$(PKG_CONFIG_PATH) +#-------------------------------------- +# +# Choose the re2 backend. +# + ifeq ($(call bool,$(BUILD_RE2_WASM)),0) ifeq ($(PKG_CONFIG),) $(error "pkg-config is not available. Please install pkg-config.") @@ -90,14 +98,88 @@ endif ifeq ($(RE2_CHECK),) RE2_FAIL := "libre2-dev is not installed, please install it or set BUILD_RE2_WASM=1 to use the WebAssembly version" +# if you prefer to build WASM instead of a critical error, comment out RE2_FAIL and uncomment RE2_MSG. +# RE2_MSG := Fallback to WebAssembly regexp library. To use the C++ version, make sure you have installed libre2-dev and pkg-config. else # += adds a space that we don't want GO_TAGS := $(GO_TAGS),re2_cgo LD_OPTS_VARS += -X '$(GO_MODULE_NAME)/pkg/cwversion.Libre2=C++' +RE2_MSG := Using C++ regexp library +endif +else +RE2_MSG := Using WebAssembly regexp library +endif + +ifeq ($(call bool,$(BUILD_RE2_WASM)),1) +else +ifneq (,$(RE2_CHECK)) endif endif -# Build static to avoid the runtime dependency on libre2.so +#-------------------------------------- +# +# Handle optional components and build profiles, to save space on the final binaries. +# +# Keep it safe for now until we decide how to expand on the idea. Either choose a profile or exclude components manually. +# For example if we want to disable some component by default, or have opt-in components (INCLUDE?). + +ifeq ($(and $(BUILD_PROFILE),$(EXCLUDE)),1) +$(error "Cannot specify both BUILD_PROFILE and EXCLUDE") +endif + +COMPONENTS := \ + datasource_appsec \ + datasource_cloudwatch \ + datasource_docker \ + datasource_file \ + datasource_http \ + datasource_k8saudit \ + datasource_kafka \ + datasource_journalctl \ + datasource_kinesis \ + datasource_loki \ + datasource_s3 \ + datasource_syslog \ + datasource_wineventlog \ + cscli_setup + +comma := , +space := $(empty) $(empty) + +# Predefined profiles + +# keep only datasource-file +EXCLUDE_MINIMAL := $(subst $(space),$(comma),$(filter-out datasource_file,,$(COMPONENTS))) + +# example +# EXCLUDE_MEDIUM := datasource_kafka,datasource_kinesis,datasource_s3 + +BUILD_PROFILE ?= default + +# Set the EXCLUDE_LIST based on the chosen profile, unless EXCLUDE is already set +ifeq ($(BUILD_PROFILE),minimal) +EXCLUDE ?= $(EXCLUDE_MINIMAL) +else ifneq ($(BUILD_PROFILE),default) +$(error Invalid build profile specified: $(BUILD_PROFILE). Valid profiles are: minimal, default) +endif + +# Create list of excluded components from the EXCLUDE variable +EXCLUDE_LIST := $(subst $(comma),$(space),$(EXCLUDE)) + +INVALID_COMPONENTS := $(filter-out $(COMPONENTS),$(EXCLUDE_LIST)) +ifneq ($(INVALID_COMPONENTS),) +$(error Invalid optional components specified in EXCLUDE: $(INVALID_COMPONENTS). Valid components are: $(COMPONENTS)) +endif + +# Convert the excluded components to "no_" form +COMPONENT_TAGS := $(foreach component,$(EXCLUDE_LIST),no_$(component)) + +ifneq ($(COMPONENT_TAGS),) +GO_TAGS := $(GO_TAGS),$(subst $(space),$(comma),$(COMPONENT_TAGS)) +endif + +#-------------------------------------- + ifeq ($(call bool,$(BUILD_STATIC)),1) BUILD_TYPE = static EXTLDFLAGS := -extldflags '-static' @@ -111,7 +193,7 @@ ifeq ($(call bool,$(DEBUG)),1) STRIP_SYMBOLS := DISABLE_OPTIMIZATION := -gcflags "-N -l" else -STRIP_SYMBOLS := -s -w +STRIP_SYMBOLS := -s DISABLE_OPTIMIZATION := endif @@ -130,16 +212,13 @@ build: build-info crowdsec cscli plugins ## Build crowdsec, cscli and plugins .PHONY: build-info build-info: ## Print build information $(info Building $(BUILD_VERSION) ($(BUILD_TAG)) $(BUILD_TYPE) for $(GOOS)/$(GOARCH)) + $(info Excluded components: $(if $(EXCLUDE_LIST),$(EXCLUDE_LIST),none)) ifneq (,$(RE2_FAIL)) $(error $(RE2_FAIL)) endif -ifneq (,$(RE2_CHECK)) - $(info Using C++ regexp library) -else - $(info Fallback to WebAssembly regexp library. To use the C++ version, make sure you have installed libre2-dev and pkg-config.) -endif + $(info $(RE2_MSG)) ifeq ($(call bool,$(DEBUG)),1) $(info Building with debug symbols and disabled optimizations) @@ -199,11 +278,6 @@ cscli: ## Build cscli crowdsec: ## Build crowdsec @$(MAKE) -C $(CROWDSEC_FOLDER) build $(MAKE_FLAGS) -.PHONY: generate -generate: ## Generate code for the database and APIs - $(GO) generate ./pkg/database/ent - $(GO) generate ./pkg/models - .PHONY: testclean testclean: bats-clean ## Remove test artifacts @$(RM) pkg/apiserver/ent $(WIN_IGNORE_ERR) diff --git a/README.md b/README.md index a900f0ee514..1e57d4e91c4 100644 --- a/README.md +++ b/README.md @@ -84,7 +84,7 @@ The architecture is as follows : CrowdSec

-Once an unwanted behavior is detected, deal with it through a [bouncer](https://hub.crowdsec.net/browse/#bouncers). The aggressive IP, scenario triggered and timestamp are sent for curation, to avoid poisoning & false positives. (This can be disabled). If verified, this IP is then redistributed to all CrowdSec users running the same scenario. +Once an unwanted behavior is detected, deal with it through a [bouncer](https://app.crowdsec.net/hub/remediation-components). The aggressive IP, scenario triggered and timestamp are sent for curation, to avoid poisoning & false positives. (This can be disabled). If verified, this IP is then redistributed to all CrowdSec users running the same scenario. ## Outnumbering hackers all together diff --git a/azure-pipelines.yml b/azure-pipelines.yml index 0ceb9e5cffc..bcf327bdf38 100644 --- a/azure-pipelines.yml +++ b/azure-pipelines.yml @@ -21,7 +21,7 @@ stages: - task: GoTool@0 displayName: "Install Go" inputs: - version: '1.22.6' + version: '1.23.3' - pwsh: | choco install -y make diff --git a/cmd/crowdsec-cli/ask/ask.go b/cmd/crowdsec-cli/ask/ask.go new file mode 100644 index 00000000000..484ccb30c8a --- /dev/null +++ b/cmd/crowdsec-cli/ask/ask.go @@ -0,0 +1,20 @@ +package ask + +import ( + "github.com/AlecAivazis/survey/v2" +) + +func YesNo(message string, defaultAnswer bool) (bool, error) { + var answer bool + + prompt := &survey.Confirm{ + Message: message, + Default: defaultAnswer, + } + + if err := survey.AskOne(prompt, &answer); err != nil { + return defaultAnswer, err + } + + return answer, nil +} diff --git a/cmd/crowdsec-cli/bouncers.go b/cmd/crowdsec-cli/bouncers.go deleted file mode 100644 index d3edcea0db9..00000000000 --- a/cmd/crowdsec-cli/bouncers.go +++ /dev/null @@ -1,537 +0,0 @@ -package main - -import ( - "encoding/csv" - "encoding/json" - "errors" - "fmt" - "io" - "os" - "slices" - "strings" - "time" - - "github.com/AlecAivazis/survey/v2" - "github.com/fatih/color" - "github.com/jedib0t/go-pretty/v6/table" - log "github.com/sirupsen/logrus" - "github.com/spf13/cobra" - - "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/cstable" - "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/require" - middlewares "github.com/crowdsecurity/crowdsec/pkg/apiserver/middlewares/v1" - "github.com/crowdsecurity/crowdsec/pkg/database" - "github.com/crowdsecurity/crowdsec/pkg/database/ent" - "github.com/crowdsecurity/crowdsec/pkg/database/ent/bouncer" - "github.com/crowdsecurity/crowdsec/pkg/emoji" - "github.com/crowdsecurity/crowdsec/pkg/types" -) - -type featureflagProvider interface { - GetFeatureflags() string -} - -type osProvider interface { - GetOsname() string - GetOsversion() string -} - -func getOSNameAndVersion(o osProvider) string { - ret := o.GetOsname() - if o.GetOsversion() != "" { - if ret != "" { - ret += "/" - } - - ret += o.GetOsversion() - } - - if ret == "" { - return "?" - } - - return ret -} - -func getFeatureFlagList(o featureflagProvider) []string { - if o.GetFeatureflags() == "" { - return nil - } - - return strings.Split(o.GetFeatureflags(), ",") -} - -func askYesNo(message string, defaultAnswer bool) (bool, error) { - var answer bool - - prompt := &survey.Confirm{ - Message: message, - Default: defaultAnswer, - } - - if err := survey.AskOne(prompt, &answer); err != nil { - return defaultAnswer, err - } - - return answer, nil -} - -type cliBouncers struct { - db *database.Client - cfg configGetter -} - -func NewCLIBouncers(cfg configGetter) *cliBouncers { - return &cliBouncers{ - cfg: cfg, - } -} - -func (cli *cliBouncers) NewCommand() *cobra.Command { - cmd := &cobra.Command{ - Use: "bouncers [action]", - Short: "Manage bouncers [requires local API]", - Long: `To list/add/delete/prune bouncers. -Note: This command requires database direct access, so is intended to be run on Local API/master. -`, - Args: cobra.MinimumNArgs(1), - Aliases: []string{"bouncer"}, - DisableAutoGenTag: true, - PersistentPreRunE: func(cmd *cobra.Command, _ []string) error { - var err error - - cfg := cli.cfg() - - if err = require.LAPI(cfg); err != nil { - return err - } - - cli.db, err = require.DBClient(cmd.Context(), cfg.DbConfig) - if err != nil { - return err - } - - return nil - }, - } - - cmd.AddCommand(cli.newListCmd()) - cmd.AddCommand(cli.newAddCmd()) - cmd.AddCommand(cli.newDeleteCmd()) - cmd.AddCommand(cli.newPruneCmd()) - cmd.AddCommand(cli.newInspectCmd()) - - return cmd -} - -func (cli *cliBouncers) listHuman(out io.Writer, bouncers ent.Bouncers) { - t := cstable.NewLight(out, cli.cfg().Cscli.Color).Writer - t.AppendHeader(table.Row{"Name", "IP Address", "Valid", "Last API pull", "Type", "Version", "Auth Type"}) - - for _, b := range bouncers { - revoked := emoji.CheckMark - if b.Revoked { - revoked = emoji.Prohibited - } - - lastPull := "" - if b.LastPull != nil { - lastPull = b.LastPull.Format(time.RFC3339) - } - - t.AppendRow(table.Row{b.Name, b.IPAddress, revoked, lastPull, b.Type, b.Version, b.AuthType}) - } - - io.WriteString(out, t.Render() + "\n") -} - -// bouncerInfo contains only the data we want for inspect/list -type bouncerInfo struct { - CreatedAt time.Time `json:"created_at"` - UpdatedAt time.Time `json:"updated_at"` - Name string `json:"name"` - Revoked bool `json:"revoked"` - IPAddress string `json:"ip_address"` - Type string `json:"type"` - Version string `json:"version"` - LastPull *time.Time `json:"last_pull"` - AuthType string `json:"auth_type"` - OS string `json:"os,omitempty"` - Featureflags []string `json:"featureflags,omitempty"` -} - -func newBouncerInfo(b *ent.Bouncer) bouncerInfo { - return bouncerInfo{ - CreatedAt: b.CreatedAt, - UpdatedAt: b.UpdatedAt, - Name: b.Name, - Revoked: b.Revoked, - IPAddress: b.IPAddress, - Type: b.Type, - Version: b.Version, - LastPull: b.LastPull, - AuthType: b.AuthType, - OS: getOSNameAndVersion(b), - Featureflags: getFeatureFlagList(b), - } -} - -func (cli *cliBouncers) listCSV(out io.Writer, bouncers ent.Bouncers) error { - csvwriter := csv.NewWriter(out) - - if err := csvwriter.Write([]string{"name", "ip", "revoked", "last_pull", "type", "version", "auth_type"}); err != nil { - return fmt.Errorf("failed to write raw header: %w", err) - } - - for _, b := range bouncers { - valid := "validated" - if b.Revoked { - valid = "pending" - } - - lastPull := "" - if b.LastPull != nil { - lastPull = b.LastPull.Format(time.RFC3339) - } - - if err := csvwriter.Write([]string{b.Name, b.IPAddress, valid, lastPull, b.Type, b.Version, b.AuthType}); err != nil { - return fmt.Errorf("failed to write raw: %w", err) - } - } - - csvwriter.Flush() - - return nil -} - -func (cli *cliBouncers) list(out io.Writer) error { - bouncers, err := cli.db.ListBouncers() - if err != nil { - return fmt.Errorf("unable to list bouncers: %w", err) - } - - switch cli.cfg().Cscli.Output { - case "human": - cli.listHuman(out, bouncers) - case "json": - info := make([]bouncerInfo, 0, len(bouncers)) - for _, b := range bouncers { - info = append(info, newBouncerInfo(b)) - } - - enc := json.NewEncoder(out) - enc.SetIndent("", " ") - - if err := enc.Encode(info); err != nil { - return errors.New("failed to marshal") - } - - return nil - case "raw": - return cli.listCSV(out, bouncers) - } - - return nil -} - -func (cli *cliBouncers) newListCmd() *cobra.Command { - cmd := &cobra.Command{ - Use: "list", - Short: "list all bouncers within the database", - Example: `cscli bouncers list`, - Args: cobra.ExactArgs(0), - DisableAutoGenTag: true, - RunE: func(_ *cobra.Command, _ []string) error { - return cli.list(color.Output) - }, - } - - return cmd -} - -func (cli *cliBouncers) add(bouncerName string, key string) error { - var err error - - keyLength := 32 - - if key == "" { - key, err = middlewares.GenerateAPIKey(keyLength) - if err != nil { - return fmt.Errorf("unable to generate api key: %w", err) - } - } - - _, err = cli.db.CreateBouncer(bouncerName, "", middlewares.HashSHA512(key), types.ApiKeyAuthType) - if err != nil { - return fmt.Errorf("unable to create bouncer: %w", err) - } - - switch cli.cfg().Cscli.Output { - case "human": - fmt.Printf("API key for '%s':\n\n", bouncerName) - fmt.Printf(" %s\n\n", key) - fmt.Print("Please keep this key since you will not be able to retrieve it!\n") - case "raw": - fmt.Print(key) - case "json": - j, err := json.Marshal(key) - if err != nil { - return errors.New("unable to marshal api key") - } - - fmt.Print(string(j)) - } - - return nil -} - -func (cli *cliBouncers) newAddCmd() *cobra.Command { - var key string - - cmd := &cobra.Command{ - Use: "add MyBouncerName", - Short: "add a single bouncer to the database", - Example: `cscli bouncers add MyBouncerName -cscli bouncers add MyBouncerName --key `, - Args: cobra.ExactArgs(1), - DisableAutoGenTag: true, - RunE: func(_ *cobra.Command, args []string) error { - return cli.add(args[0], key) - }, - } - - flags := cmd.Flags() - flags.StringP("length", "l", "", "length of the api key") - _ = flags.MarkDeprecated("length", "use --key instead") - flags.StringVarP(&key, "key", "k", "", "api key for the bouncer") - - return cmd -} - -// validBouncerID returns a list of bouncer IDs for command completion -func (cli *cliBouncers) validBouncerID(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { - var err error - - cfg := cli.cfg() - - // need to load config and db because PersistentPreRunE is not called for completions - - if err = require.LAPI(cfg); err != nil { - cobra.CompError("unable to list bouncers " + err.Error()) - return nil, cobra.ShellCompDirectiveNoFileComp - } - - cli.db, err = require.DBClient(cmd.Context(), cfg.DbConfig) - if err != nil { - cobra.CompError("unable to list bouncers " + err.Error()) - return nil, cobra.ShellCompDirectiveNoFileComp - } - - bouncers, err := cli.db.ListBouncers() - if err != nil { - cobra.CompError("unable to list bouncers " + err.Error()) - return nil, cobra.ShellCompDirectiveNoFileComp - } - - ret := []string{} - - for _, bouncer := range bouncers { - if strings.Contains(bouncer.Name, toComplete) && !slices.Contains(args, bouncer.Name) { - ret = append(ret, bouncer.Name) - } - } - - return ret, cobra.ShellCompDirectiveNoFileComp -} - -func (cli *cliBouncers) delete(bouncers []string, ignoreMissing bool) error { - for _, bouncerID := range bouncers { - if err := cli.db.DeleteBouncer(bouncerID); err != nil { - var notFoundErr *database.BouncerNotFoundError - if ignoreMissing && errors.As(err, ¬FoundErr) { - return nil - } - - return fmt.Errorf("unable to delete bouncer: %w", err) - } - - log.Infof("bouncer '%s' deleted successfully", bouncerID) - } - - return nil -} - -func (cli *cliBouncers) newDeleteCmd() *cobra.Command { - var ignoreMissing bool - - cmd := &cobra.Command{ - Use: "delete MyBouncerName", - Short: "delete bouncer(s) from the database", - Example: `cscli bouncers delete "bouncer1" "bouncer2"`, - Args: cobra.MinimumNArgs(1), - Aliases: []string{"remove"}, - DisableAutoGenTag: true, - ValidArgsFunction: cli.validBouncerID, - RunE: func(_ *cobra.Command, args []string) error { - return cli.delete(args, ignoreMissing) - }, - } - - flags := cmd.Flags() - flags.BoolVar(&ignoreMissing, "ignore-missing", false, "don't print errors if one or more bouncers don't exist") - - return cmd -} - -func (cli *cliBouncers) prune(duration time.Duration, force bool) error { - if duration < 2*time.Minute { - if yes, err := askYesNo( - "The duration you provided is less than 2 minutes. "+ - "This may remove active bouncers. Continue?", false); err != nil { - return err - } else if !yes { - fmt.Println("User aborted prune. No changes were made.") - return nil - } - } - - bouncers, err := cli.db.QueryBouncersInactiveSince(time.Now().UTC().Add(-duration)) - if err != nil { - return fmt.Errorf("unable to query bouncers: %w", err) - } - - if len(bouncers) == 0 { - fmt.Println("No bouncers to prune.") - return nil - } - - cli.listHuman(color.Output, bouncers) - - if !force { - if yes, err := askYesNo( - "You are about to PERMANENTLY remove the above bouncers from the database. "+ - "These will NOT be recoverable. Continue?", false); err != nil { - return err - } else if !yes { - fmt.Println("User aborted prune. No changes were made.") - return nil - } - } - - deleted, err := cli.db.BulkDeleteBouncers(bouncers) - if err != nil { - return fmt.Errorf("unable to prune bouncers: %w", err) - } - - fmt.Fprintf(os.Stderr, "Successfully deleted %d bouncers\n", deleted) - - return nil -} - -func (cli *cliBouncers) newPruneCmd() *cobra.Command { - var ( - duration time.Duration - force bool - ) - - const defaultDuration = 60 * time.Minute - - cmd := &cobra.Command{ - Use: "prune", - Short: "prune multiple bouncers from the database", - Args: cobra.NoArgs, - DisableAutoGenTag: true, - Example: `cscli bouncers prune -d 45m -cscli bouncers prune -d 45m --force`, - RunE: func(_ *cobra.Command, _ []string) error { - return cli.prune(duration, force) - }, - } - - flags := cmd.Flags() - flags.DurationVarP(&duration, "duration", "d", defaultDuration, "duration of time since last pull") - flags.BoolVar(&force, "force", false, "force prune without asking for confirmation") - - return cmd -} - -func (cli *cliBouncers) inspectHuman(out io.Writer, bouncer *ent.Bouncer) { - t := cstable.NewLight(out, cli.cfg().Cscli.Color).Writer - - t.SetTitle("Bouncer: " + bouncer.Name) - - t.SetColumnConfigs([]table.ColumnConfig{ - {Number: 1, AutoMerge: true}, - }) - - lastPull := "" - if bouncer.LastPull != nil { - lastPull = bouncer.LastPull.String() - } - - t.AppendRows([]table.Row{ - {"Created At", bouncer.CreatedAt}, - {"Last Update", bouncer.UpdatedAt}, - {"Revoked?", bouncer.Revoked}, - {"IP Address", bouncer.IPAddress}, - {"Type", bouncer.Type}, - {"Version", bouncer.Version}, - {"Last Pull", lastPull}, - {"Auth type", bouncer.AuthType}, - {"OS", getOSNameAndVersion(bouncer)}, - }) - - for _, ff := range getFeatureFlagList(bouncer) { - t.AppendRow(table.Row{"Feature Flags", ff}) - } - - io.WriteString(out, t.Render() + "\n") -} - -func (cli *cliBouncers) inspect(bouncer *ent.Bouncer) error { - out := color.Output - outputFormat := cli.cfg().Cscli.Output - - switch outputFormat { - case "human": - cli.inspectHuman(out, bouncer) - case "json": - enc := json.NewEncoder(out) - enc.SetIndent("", " ") - - if err := enc.Encode(newBouncerInfo(bouncer)); err != nil { - return errors.New("failed to marshal") - } - - return nil - default: - return fmt.Errorf("output format '%s' not supported for this command", outputFormat) - } - - return nil -} - -func (cli *cliBouncers) newInspectCmd() *cobra.Command { - cmd := &cobra.Command{ - Use: "inspect [bouncer_name]", - Short: "inspect a bouncer by name", - Example: `cscli bouncers inspect "bouncer1"`, - Args: cobra.ExactArgs(1), - DisableAutoGenTag: true, - ValidArgsFunction: cli.validBouncerID, - RunE: func(cmd *cobra.Command, args []string) error { - bouncerName := args[0] - - b, err := cli.db.Ent.Bouncer.Query(). - Where(bouncer.Name(bouncerName)). - Only(cmd.Context()) - if err != nil { - return fmt.Errorf("unable to read bouncer data '%s': %w", bouncerName, err) - } - - return cli.inspect(b) - }, - } - - return cmd -} diff --git a/cmd/crowdsec-cli/alerts.go b/cmd/crowdsec-cli/clialert/alerts.go similarity index 77% rename from cmd/crowdsec-cli/alerts.go rename to cmd/crowdsec-cli/clialert/alerts.go index 37f9ab435c7..5907d4a0fa8 100644 --- a/cmd/crowdsec-cli/alerts.go +++ b/cmd/crowdsec-cli/clialert/alerts.go @@ -1,4 +1,4 @@ -package main +package clialert import ( "context" @@ -24,19 +24,19 @@ import ( "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/cstable" "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/require" "github.com/crowdsecurity/crowdsec/pkg/apiclient" - "github.com/crowdsecurity/crowdsec/pkg/cwversion" + "github.com/crowdsecurity/crowdsec/pkg/csconfig" "github.com/crowdsecurity/crowdsec/pkg/models" "github.com/crowdsecurity/crowdsec/pkg/types" ) -func DecisionsFromAlert(alert *models.Alert) string { +func decisionsFromAlert(alert *models.Alert) string { ret := "" decMap := make(map[string]int) for _, decision := range alert.Decisions { k := *decision.Type if *decision.Simulated { - k = fmt.Sprintf("(simul)%s", k) + k = "(simul)" + k } v := decMap[k] @@ -44,7 +44,7 @@ func DecisionsFromAlert(alert *models.Alert) string { } for _, key := range maptools.SortedKeys(decMap) { - if len(ret) > 0 { + if ret != "" { ret += " " } @@ -77,7 +77,7 @@ func (cli *cliAlerts) alertsToTable(alerts *models.GetAlertsResponse, printMachi *alertItem.Scenario, alertItem.Source.Cn, alertItem.Source.GetAsNumberName(), - DecisionsFromAlert(alertItem), + decisionsFromAlert(alertItem), *alertItem.StartAt, } if printMachine { @@ -183,12 +183,14 @@ func (cli *cliAlerts) displayOneAlert(alert *models.Alert, withDetail bool) erro return nil } +type configGetter func() *csconfig.Config + type cliAlerts struct { client *apiclient.ApiClient cfg configGetter } -func NewCLIAlerts(getconfig configGetter) *cliAlerts { +func New(getconfig configGetter) *cliAlerts { return &cliAlerts{ cfg: getconfig, } @@ -214,7 +216,6 @@ func (cli *cliAlerts) NewCommand() *cobra.Command { cli.client, err = apiclient.NewClient(&apiclient.Config{ MachineID: cfg.API.Client.Credentials.Login, Password: strfmt.Password(cfg.API.Client.Credentials.Password), - UserAgent: cwversion.UserAgent(), URL: apiURL, VersionPrefix: "v1", }) @@ -226,17 +227,19 @@ func (cli *cliAlerts) NewCommand() *cobra.Command { }, } - cmd.AddCommand(cli.NewListCmd()) - cmd.AddCommand(cli.NewInspectCmd()) - cmd.AddCommand(cli.NewFlushCmd()) - cmd.AddCommand(cli.NewDeleteCmd()) + cmd.AddCommand(cli.newListCmd()) + cmd.AddCommand(cli.newInspectCmd()) + cmd.AddCommand(cli.newFlushCmd()) + cmd.AddCommand(cli.newDeleteCmd()) return cmd } -func (cli *cliAlerts) list(alertListFilter apiclient.AlertsListOpts, limit *int, contained *bool, printMachine bool) error { - if err := manageCliDecisionAlerts(alertListFilter.IPEquals, alertListFilter.RangeEquals, - alertListFilter.ScopeEquals, alertListFilter.ValueEquals); err != nil { +func (cli *cliAlerts) list(ctx context.Context, alertListFilter apiclient.AlertsListOpts, limit *int, contained *bool, printMachine bool) error { + var err error + + *alertListFilter.ScopeEquals, err = SanitizeScope(*alertListFilter.ScopeEquals, *alertListFilter.IPEquals, *alertListFilter.RangeEquals) + if err != nil { return err } @@ -308,7 +311,7 @@ func (cli *cliAlerts) list(alertListFilter apiclient.AlertsListOpts, limit *int, alertListFilter.Contains = new(bool) } - alerts, _, err := cli.client.Alerts.List(context.Background(), alertListFilter) + alerts, _, err := cli.client.Alerts.List(ctx, alertListFilter) if err != nil { return fmt.Errorf("unable to list alerts: %w", err) } @@ -320,7 +323,7 @@ func (cli *cliAlerts) list(alertListFilter apiclient.AlertsListOpts, limit *int, return nil } -func (cli *cliAlerts) NewListCmd() *cobra.Command { +func (cli *cliAlerts) newListCmd() *cobra.Command { alertListFilter := apiclient.AlertsListOpts{ ScopeEquals: new(string), ValueEquals: new(string), @@ -351,7 +354,7 @@ cscli alerts list --type ban`, Long: `List alerts with optional filters`, DisableAutoGenTag: true, RunE: func(cmd *cobra.Command, _ []string) error { - return cli.list(alertListFilter, limit, contained, printMachine) + return cli.list(cmd.Context(), alertListFilter, limit, contained, printMachine) }, } @@ -374,58 +377,58 @@ cscli alerts list --type ban`, return cmd } -func (cli *cliAlerts) delete(alertDeleteFilter apiclient.AlertsDeleteOpts, ActiveDecision *bool, AlertDeleteAll bool, delAlertByID string, contained *bool) error { +func (cli *cliAlerts) delete(ctx context.Context, delFilter apiclient.AlertsDeleteOpts, activeDecision *bool, deleteAll bool, delAlertByID string, contained *bool) error { var err error - if !AlertDeleteAll { - if err = manageCliDecisionAlerts(alertDeleteFilter.IPEquals, alertDeleteFilter.RangeEquals, - alertDeleteFilter.ScopeEquals, alertDeleteFilter.ValueEquals); err != nil { + if !deleteAll { + *delFilter.ScopeEquals, err = SanitizeScope(*delFilter.ScopeEquals, *delFilter.IPEquals, *delFilter.RangeEquals) + if err != nil { return err } - if ActiveDecision != nil { - alertDeleteFilter.ActiveDecisionEquals = ActiveDecision + if activeDecision != nil { + delFilter.ActiveDecisionEquals = activeDecision } - if *alertDeleteFilter.ScopeEquals == "" { - alertDeleteFilter.ScopeEquals = nil + if *delFilter.ScopeEquals == "" { + delFilter.ScopeEquals = nil } - if *alertDeleteFilter.ValueEquals == "" { - alertDeleteFilter.ValueEquals = nil + if *delFilter.ValueEquals == "" { + delFilter.ValueEquals = nil } - if *alertDeleteFilter.ScenarioEquals == "" { - alertDeleteFilter.ScenarioEquals = nil + if *delFilter.ScenarioEquals == "" { + delFilter.ScenarioEquals = nil } - if *alertDeleteFilter.IPEquals == "" { - alertDeleteFilter.IPEquals = nil + if *delFilter.IPEquals == "" { + delFilter.IPEquals = nil } - if *alertDeleteFilter.RangeEquals == "" { - alertDeleteFilter.RangeEquals = nil + if *delFilter.RangeEquals == "" { + delFilter.RangeEquals = nil } if contained != nil && *contained { - alertDeleteFilter.Contains = new(bool) + delFilter.Contains = new(bool) } limit := 0 - alertDeleteFilter.Limit = &limit + delFilter.Limit = &limit } else { limit := 0 - alertDeleteFilter = apiclient.AlertsDeleteOpts{Limit: &limit} + delFilter = apiclient.AlertsDeleteOpts{Limit: &limit} } var alerts *models.DeleteAlertsResponse if delAlertByID == "" { - alerts, _, err = cli.client.Alerts.Delete(context.Background(), alertDeleteFilter) + alerts, _, err = cli.client.Alerts.Delete(ctx, delFilter) if err != nil { return fmt.Errorf("unable to delete alerts: %w", err) } } else { - alerts, _, err = cli.client.Alerts.DeleteOne(context.Background(), delAlertByID) + alerts, _, err = cli.client.Alerts.DeleteOne(ctx, delAlertByID) if err != nil { return fmt.Errorf("unable to delete alert: %w", err) } @@ -436,14 +439,14 @@ func (cli *cliAlerts) delete(alertDeleteFilter apiclient.AlertsDeleteOpts, Activ return nil } -func (cli *cliAlerts) NewDeleteCmd() *cobra.Command { +func (cli *cliAlerts) newDeleteCmd() *cobra.Command { var ( - ActiveDecision *bool - AlertDeleteAll bool + activeDecision *bool + deleteAll bool delAlertByID string ) - alertDeleteFilter := apiclient.AlertsDeleteOpts{ + delFilter := apiclient.AlertsDeleteOpts{ ScopeEquals: new(string), ValueEquals: new(string), ScenarioEquals: new(string), @@ -462,14 +465,14 @@ cscli alerts delete --range 1.2.3.0/24 cscli alerts delete -s crowdsecurity/ssh-bf"`, DisableAutoGenTag: true, Aliases: []string{"remove"}, - Args: cobra.ExactArgs(0), + Args: cobra.NoArgs, PreRunE: func(cmd *cobra.Command, _ []string) error { - if AlertDeleteAll { + if deleteAll { return nil } - if *alertDeleteFilter.ScopeEquals == "" && *alertDeleteFilter.ValueEquals == "" && - *alertDeleteFilter.ScenarioEquals == "" && *alertDeleteFilter.IPEquals == "" && - *alertDeleteFilter.RangeEquals == "" && delAlertByID == "" { + if *delFilter.ScopeEquals == "" && *delFilter.ValueEquals == "" && + *delFilter.ScenarioEquals == "" && *delFilter.IPEquals == "" && + *delFilter.RangeEquals == "" && delAlertByID == "" { _ = cmd.Usage() return errors.New("at least one filter or --all must be specified") } @@ -477,25 +480,25 @@ cscli alerts delete -s crowdsecurity/ssh-bf"`, return nil }, RunE: func(cmd *cobra.Command, _ []string) error { - return cli.delete(alertDeleteFilter, ActiveDecision, AlertDeleteAll, delAlertByID, contained) + return cli.delete(cmd.Context(), delFilter, activeDecision, deleteAll, delAlertByID, contained) }, } flags := cmd.Flags() flags.SortFlags = false - flags.StringVar(alertDeleteFilter.ScopeEquals, "scope", "", "the scope (ie. ip,range)") - flags.StringVarP(alertDeleteFilter.ValueEquals, "value", "v", "", "the value to match for in the specified scope") - flags.StringVarP(alertDeleteFilter.ScenarioEquals, "scenario", "s", "", "the scenario (ie. crowdsecurity/ssh-bf)") - flags.StringVarP(alertDeleteFilter.IPEquals, "ip", "i", "", "Source ip (shorthand for --scope ip --value )") - flags.StringVarP(alertDeleteFilter.RangeEquals, "range", "r", "", "Range source ip (shorthand for --scope range --value )") + flags.StringVar(delFilter.ScopeEquals, "scope", "", "the scope (ie. ip,range)") + flags.StringVarP(delFilter.ValueEquals, "value", "v", "", "the value to match for in the specified scope") + flags.StringVarP(delFilter.ScenarioEquals, "scenario", "s", "", "the scenario (ie. crowdsecurity/ssh-bf)") + flags.StringVarP(delFilter.IPEquals, "ip", "i", "", "Source ip (shorthand for --scope ip --value )") + flags.StringVarP(delFilter.RangeEquals, "range", "r", "", "Range source ip (shorthand for --scope range --value )") flags.StringVar(&delAlertByID, "id", "", "alert ID") - flags.BoolVarP(&AlertDeleteAll, "all", "a", false, "delete all alerts") + flags.BoolVarP(&deleteAll, "all", "a", false, "delete all alerts") flags.BoolVar(contained, "contained", false, "query decisions contained by range") return cmd } -func (cli *cliAlerts) inspect(details bool, alertIDs ...string) error { +func (cli *cliAlerts) inspect(ctx context.Context, details bool, alertIDs ...string) error { cfg := cli.cfg() for _, alertID := range alertIDs { @@ -504,7 +507,7 @@ func (cli *cliAlerts) inspect(details bool, alertIDs ...string) error { return fmt.Errorf("bad alert id %s", alertID) } - alert, _, err := cli.client.Alerts.GetByID(context.Background(), id) + alert, _, err := cli.client.Alerts.GetByID(ctx, id) if err != nil { return fmt.Errorf("can't find alert with id %s: %w", alertID, err) } @@ -518,14 +521,14 @@ func (cli *cliAlerts) inspect(details bool, alertIDs ...string) error { case "json": data, err := json.MarshalIndent(alert, "", " ") if err != nil { - return fmt.Errorf("unable to marshal alert with id %s: %w", alertID, err) + return fmt.Errorf("unable to serialize alert with id %s: %w", alertID, err) } fmt.Printf("%s\n", string(data)) case "raw": data, err := yaml.Marshal(alert) if err != nil { - return fmt.Errorf("unable to marshal alert with id %s: %w", alertID, err) + return fmt.Errorf("unable to serialize alert with id %s: %w", alertID, err) } fmt.Println(string(data)) @@ -535,7 +538,7 @@ func (cli *cliAlerts) inspect(details bool, alertIDs ...string) error { return nil } -func (cli *cliAlerts) NewInspectCmd() *cobra.Command { +func (cli *cliAlerts) newInspectCmd() *cobra.Command { var details bool cmd := &cobra.Command{ @@ -548,7 +551,7 @@ func (cli *cliAlerts) NewInspectCmd() *cobra.Command { _ = cmd.Help() return errors.New("missing alert_id") } - return cli.inspect(details, args...) + return cli.inspect(cmd.Context(), details, args...) }, } @@ -558,7 +561,7 @@ func (cli *cliAlerts) NewInspectCmd() *cobra.Command { return cmd } -func (cli *cliAlerts) NewFlushCmd() *cobra.Command { +func (cli *cliAlerts) newFlushCmd() *cobra.Command { var ( maxItems int maxAge string @@ -572,15 +575,17 @@ func (cli *cliAlerts) NewFlushCmd() *cobra.Command { DisableAutoGenTag: true, RunE: func(cmd *cobra.Command, _ []string) error { cfg := cli.cfg() + ctx := cmd.Context() + if err := require.LAPI(cfg); err != nil { return err } - db, err := require.DBClient(cmd.Context(), cfg.DbConfig) + db, err := require.DBClient(ctx, cfg.DbConfig) if err != nil { return err } log.Info("Flushing alerts. !! This may take a long time !!") - err = db.FlushAlerts(maxAge, maxItems) + err = db.FlushAlerts(ctx, maxAge, maxItems) if err != nil { return fmt.Errorf("unable to flush alerts: %w", err) } diff --git a/cmd/crowdsec-cli/clialert/sanitize.go b/cmd/crowdsec-cli/clialert/sanitize.go new file mode 100644 index 00000000000..87b110649da --- /dev/null +++ b/cmd/crowdsec-cli/clialert/sanitize.go @@ -0,0 +1,26 @@ +package clialert + +import ( + "fmt" + "net" + + "github.com/crowdsecurity/crowdsec/pkg/types" +) + +// SanitizeScope validates ip and range and sets the scope accordingly to our case convention. +func SanitizeScope(scope, ip, ipRange string) (string, error) { + if ipRange != "" { + _, _, err := net.ParseCIDR(ipRange) + if err != nil { + return "", fmt.Errorf("%s is not a valid range", ipRange) + } + } + + if ip != "" { + if net.ParseIP(ip) == nil { + return "", fmt.Errorf("%s is not a valid ip", ip) + } + } + + return types.NormalizeScope(scope), nil +} diff --git a/cmd/crowdsec-cli/alerts_table.go b/cmd/crowdsec-cli/clialert/table.go similarity index 97% rename from cmd/crowdsec-cli/alerts_table.go rename to cmd/crowdsec-cli/clialert/table.go index 29383457ced..1416e1e435c 100644 --- a/cmd/crowdsec-cli/alerts_table.go +++ b/cmd/crowdsec-cli/clialert/table.go @@ -1,4 +1,4 @@ -package main +package clialert import ( "fmt" @@ -38,7 +38,7 @@ func alertsTable(out io.Writer, wantColor string, alerts *models.GetAlertsRespon *alertItem.Scenario, alertItem.Source.Cn, alertItem.Source.GetAsNumberName(), - DecisionsFromAlert(alertItem), + decisionsFromAlert(alertItem), *alertItem.StartAt, } diff --git a/cmd/crowdsec-cli/clibouncer/add.go b/cmd/crowdsec-cli/clibouncer/add.go new file mode 100644 index 00000000000..7cc74e45fba --- /dev/null +++ b/cmd/crowdsec-cli/clibouncer/add.go @@ -0,0 +1,72 @@ +package clibouncer + +import ( + "context" + "encoding/json" + "errors" + "fmt" + + "github.com/spf13/cobra" + + middlewares "github.com/crowdsecurity/crowdsec/pkg/apiserver/middlewares/v1" + "github.com/crowdsecurity/crowdsec/pkg/types" +) + +func (cli *cliBouncers) add(ctx context.Context, bouncerName string, key string) error { + var err error + + keyLength := 32 + + if key == "" { + key, err = middlewares.GenerateAPIKey(keyLength) + if err != nil { + return fmt.Errorf("unable to generate api key: %w", err) + } + } + + _, err = cli.db.CreateBouncer(ctx, bouncerName, "", middlewares.HashSHA512(key), types.ApiKeyAuthType, false) + if err != nil { + return fmt.Errorf("unable to create bouncer: %w", err) + } + + switch cli.cfg().Cscli.Output { + case "human": + fmt.Printf("API key for '%s':\n\n", bouncerName) + fmt.Printf(" %s\n\n", key) + fmt.Print("Please keep this key since you will not be able to retrieve it!\n") + case "raw": + fmt.Print(key) + case "json": + j, err := json.Marshal(key) + if err != nil { + return errors.New("unable to serialize api key") + } + + fmt.Print(string(j)) + } + + return nil +} + +func (cli *cliBouncers) newAddCmd() *cobra.Command { + var key string + + cmd := &cobra.Command{ + Use: "add MyBouncerName", + Short: "add a single bouncer to the database", + Example: `cscli bouncers add MyBouncerName +cscli bouncers add MyBouncerName --key `, + Args: cobra.ExactArgs(1), + DisableAutoGenTag: true, + RunE: func(cmd *cobra.Command, args []string) error { + return cli.add(cmd.Context(), args[0], key) + }, + } + + flags := cmd.Flags() + flags.StringP("length", "l", "", "length of the api key") + _ = flags.MarkDeprecated("length", "use --key instead") + flags.StringVarP(&key, "key", "k", "", "api key for the bouncer") + + return cmd +} diff --git a/cmd/crowdsec-cli/clibouncer/bouncers.go b/cmd/crowdsec-cli/clibouncer/bouncers.go new file mode 100644 index 00000000000..2b0a3556873 --- /dev/null +++ b/cmd/crowdsec-cli/clibouncer/bouncers.go @@ -0,0 +1,135 @@ +package clibouncer + +import ( + "slices" + "strings" + "time" + + "github.com/spf13/cobra" + + "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/clientinfo" + "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/require" + "github.com/crowdsecurity/crowdsec/pkg/csconfig" + "github.com/crowdsecurity/crowdsec/pkg/database" + "github.com/crowdsecurity/crowdsec/pkg/database/ent" +) + +type configGetter = func() *csconfig.Config + +type cliBouncers struct { + db *database.Client + cfg configGetter +} + +func New(cfg configGetter) *cliBouncers { + return &cliBouncers{ + cfg: cfg, + } +} + +func (cli *cliBouncers) NewCommand() *cobra.Command { + cmd := &cobra.Command{ + Use: "bouncers [action]", + Short: "Manage bouncers [requires local API]", + Long: `To list/add/delete/prune bouncers. +Note: This command requires database direct access, so is intended to be run on Local API/master. +`, + Args: cobra.MinimumNArgs(1), + Aliases: []string{"bouncer"}, + DisableAutoGenTag: true, + PersistentPreRunE: func(cmd *cobra.Command, _ []string) error { + var err error + + cfg := cli.cfg() + + if err = require.LAPI(cfg); err != nil { + return err + } + + cli.db, err = require.DBClient(cmd.Context(), cfg.DbConfig) + if err != nil { + return err + } + + return nil + }, + } + + cmd.AddCommand(cli.newListCmd()) + cmd.AddCommand(cli.newAddCmd()) + cmd.AddCommand(cli.newDeleteCmd()) + cmd.AddCommand(cli.newPruneCmd()) + cmd.AddCommand(cli.newInspectCmd()) + + return cmd +} + +// bouncerInfo contains only the data we want for inspect/list +type bouncerInfo struct { + CreatedAt time.Time `json:"created_at"` + UpdatedAt time.Time `json:"updated_at"` + Name string `json:"name"` + Revoked bool `json:"revoked"` + IPAddress string `json:"ip_address"` + Type string `json:"type"` + Version string `json:"version"` + LastPull *time.Time `json:"last_pull"` + AuthType string `json:"auth_type"` + OS string `json:"os,omitempty"` + Featureflags []string `json:"featureflags,omitempty"` + AutoCreated bool `json:"auto_created"` +} + +func newBouncerInfo(b *ent.Bouncer) bouncerInfo { + return bouncerInfo{ + CreatedAt: b.CreatedAt, + UpdatedAt: b.UpdatedAt, + Name: b.Name, + Revoked: b.Revoked, + IPAddress: b.IPAddress, + Type: b.Type, + Version: b.Version, + LastPull: b.LastPull, + AuthType: b.AuthType, + OS: clientinfo.GetOSNameAndVersion(b), + Featureflags: clientinfo.GetFeatureFlagList(b), + AutoCreated: b.AutoCreated, + } +} + +// validBouncerID returns a list of bouncer IDs for command completion +func (cli *cliBouncers) validBouncerID(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { + var err error + + cfg := cli.cfg() + ctx := cmd.Context() + + // need to load config and db because PersistentPreRunE is not called for completions + + if err = require.LAPI(cfg); err != nil { + cobra.CompError("unable to list bouncers " + err.Error()) + return nil, cobra.ShellCompDirectiveNoFileComp + } + + cli.db, err = require.DBClient(ctx, cfg.DbConfig) + if err != nil { + cobra.CompError("unable to list bouncers " + err.Error()) + return nil, cobra.ShellCompDirectiveNoFileComp + } + + bouncers, err := cli.db.ListBouncers(ctx) + if err != nil { + cobra.CompError("unable to list bouncers " + err.Error()) + return nil, cobra.ShellCompDirectiveNoFileComp + } + + ret := []string{} + + for _, bouncer := range bouncers { + if strings.Contains(bouncer.Name, toComplete) && !slices.Contains(args, bouncer.Name) { + ret = append(ret, bouncer.Name) + } + } + + return ret, cobra.ShellCompDirectiveNoFileComp +} diff --git a/cmd/crowdsec-cli/clibouncer/delete.go b/cmd/crowdsec-cli/clibouncer/delete.go new file mode 100644 index 00000000000..33419f483b6 --- /dev/null +++ b/cmd/crowdsec-cli/clibouncer/delete.go @@ -0,0 +1,99 @@ +package clibouncer + +import ( + "context" + "errors" + "fmt" + "strings" + + log "github.com/sirupsen/logrus" + "github.com/spf13/cobra" + + "github.com/crowdsecurity/crowdsec/pkg/database/ent" + "github.com/crowdsecurity/crowdsec/pkg/types" +) + +func (cli *cliBouncers) findParentBouncer(bouncerName string, bouncers []*ent.Bouncer) (string, error) { + bouncerPrefix := strings.Split(bouncerName, "@")[0] + for _, bouncer := range bouncers { + if strings.HasPrefix(bouncer.Name, bouncerPrefix) && !bouncer.AutoCreated { + return bouncer.Name, nil + } + } + + return "", errors.New("no parent bouncer found") +} + +func (cli *cliBouncers) delete(ctx context.Context, bouncers []string, ignoreMissing bool) error { + allBouncers, err := cli.db.ListBouncers(ctx) + if err != nil { + return fmt.Errorf("unable to list bouncers: %w", err) + } + for _, bouncerName := range bouncers { + bouncer, err := cli.db.SelectBouncerByName(ctx, bouncerName) + if err != nil { + var notFoundErr *ent.NotFoundError + if ignoreMissing && errors.As(err, ¬FoundErr) { + continue + } + return fmt.Errorf("unable to delete bouncer %s: %w", bouncerName, err) + } + + // For TLS bouncers, always delete them, they have no parents + if bouncer.AuthType == types.TlsAuthType { + if err := cli.db.DeleteBouncer(ctx, bouncerName); err != nil { + return fmt.Errorf("unable to delete bouncer %s: %w", bouncerName, err) + } + continue + } + + if bouncer.AutoCreated { + parentBouncer, err := cli.findParentBouncer(bouncerName, allBouncers) + if err != nil { + log.Errorf("bouncer '%s' is auto-created, but couldn't find a parent bouncer", err) + continue + } + log.Warnf("bouncer '%s' is auto-created and cannot be deleted, delete parent bouncer %s instead", bouncerName, parentBouncer) + continue + } + //Try to find all child bouncers and delete them + for _, childBouncer := range allBouncers { + if strings.HasPrefix(childBouncer.Name, bouncerName+"@") && childBouncer.AutoCreated { + if err := cli.db.DeleteBouncer(ctx, childBouncer.Name); err != nil { + return fmt.Errorf("unable to delete bouncer %s: %w", childBouncer.Name, err) + } + log.Infof("bouncer '%s' deleted successfully", childBouncer.Name) + } + } + + if err := cli.db.DeleteBouncer(ctx, bouncerName); err != nil { + return fmt.Errorf("unable to delete bouncer %s: %w", bouncerName, err) + } + + log.Infof("bouncer '%s' deleted successfully", bouncerName) + } + + return nil +} + +func (cli *cliBouncers) newDeleteCmd() *cobra.Command { + var ignoreMissing bool + + cmd := &cobra.Command{ + Use: "delete MyBouncerName", + Short: "delete bouncer(s) from the database", + Example: `cscli bouncers delete "bouncer1" "bouncer2"`, + Args: cobra.MinimumNArgs(1), + Aliases: []string{"remove"}, + DisableAutoGenTag: true, + ValidArgsFunction: cli.validBouncerID, + RunE: func(cmd *cobra.Command, args []string) error { + return cli.delete(cmd.Context(), args, ignoreMissing) + }, + } + + flags := cmd.Flags() + flags.BoolVar(&ignoreMissing, "ignore-missing", false, "don't print errors if one or more bouncers don't exist") + + return cmd +} diff --git a/cmd/crowdsec-cli/clibouncer/inspect.go b/cmd/crowdsec-cli/clibouncer/inspect.go new file mode 100644 index 00000000000..b62344baa9b --- /dev/null +++ b/cmd/crowdsec-cli/clibouncer/inspect.go @@ -0,0 +1,99 @@ +package clibouncer + +import ( + "encoding/json" + "errors" + "fmt" + "io" + + "github.com/fatih/color" + "github.com/jedib0t/go-pretty/v6/table" + "github.com/spf13/cobra" + + "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/clientinfo" + "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/cstable" + "github.com/crowdsecurity/crowdsec/pkg/database/ent" + "github.com/crowdsecurity/crowdsec/pkg/database/ent/bouncer" +) + +func (cli *cliBouncers) inspectHuman(out io.Writer, bouncer *ent.Bouncer) { + t := cstable.NewLight(out, cli.cfg().Cscli.Color).Writer + + t.SetTitle("Bouncer: " + bouncer.Name) + + t.SetColumnConfigs([]table.ColumnConfig{ + {Number: 1, AutoMerge: true}, + }) + + lastPull := "" + if bouncer.LastPull != nil { + lastPull = bouncer.LastPull.String() + } + + t.AppendRows([]table.Row{ + {"Created At", bouncer.CreatedAt}, + {"Last Update", bouncer.UpdatedAt}, + {"Revoked?", bouncer.Revoked}, + {"IP Address", bouncer.IPAddress}, + {"Type", bouncer.Type}, + {"Version", bouncer.Version}, + {"Last Pull", lastPull}, + {"Auth type", bouncer.AuthType}, + {"OS", clientinfo.GetOSNameAndVersion(bouncer)}, + {"Auto Created", bouncer.AutoCreated}, + }) + + for _, ff := range clientinfo.GetFeatureFlagList(bouncer) { + t.AppendRow(table.Row{"Feature Flags", ff}) + } + + io.WriteString(out, t.Render()+"\n") +} + +func (cli *cliBouncers) inspect(bouncer *ent.Bouncer) error { + out := color.Output + outputFormat := cli.cfg().Cscli.Output + + switch outputFormat { + case "human": + cli.inspectHuman(out, bouncer) + case "json": + enc := json.NewEncoder(out) + enc.SetIndent("", " ") + + if err := enc.Encode(newBouncerInfo(bouncer)); err != nil { + return errors.New("failed to serialize") + } + + return nil + default: + return fmt.Errorf("output format '%s' not supported for this command", outputFormat) + } + + return nil +} + +func (cli *cliBouncers) newInspectCmd() *cobra.Command { + cmd := &cobra.Command{ + Use: "inspect [bouncer_name]", + Short: "inspect a bouncer by name", + Example: `cscli bouncers inspect "bouncer1"`, + Args: cobra.ExactArgs(1), + DisableAutoGenTag: true, + ValidArgsFunction: cli.validBouncerID, + RunE: func(cmd *cobra.Command, args []string) error { + bouncerName := args[0] + + b, err := cli.db.Ent.Bouncer.Query(). + Where(bouncer.Name(bouncerName)). + Only(cmd.Context()) + if err != nil { + return fmt.Errorf("unable to read bouncer data '%s': %w", bouncerName, err) + } + + return cli.inspect(b) + }, + } + + return cmd +} diff --git a/cmd/crowdsec-cli/clibouncer/list.go b/cmd/crowdsec-cli/clibouncer/list.go new file mode 100644 index 00000000000..a13ca994e1e --- /dev/null +++ b/cmd/crowdsec-cli/clibouncer/list.go @@ -0,0 +1,117 @@ +package clibouncer + +import ( + "context" + "encoding/csv" + "encoding/json" + "errors" + "fmt" + "io" + "time" + + "github.com/fatih/color" + "github.com/jedib0t/go-pretty/v6/table" + "github.com/spf13/cobra" + + "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/cstable" + "github.com/crowdsecurity/crowdsec/pkg/database" + "github.com/crowdsecurity/crowdsec/pkg/database/ent" + "github.com/crowdsecurity/crowdsec/pkg/emoji" +) + +func (cli *cliBouncers) listHuman(out io.Writer, bouncers ent.Bouncers) { + t := cstable.NewLight(out, cli.cfg().Cscli.Color).Writer + t.AppendHeader(table.Row{"Name", "IP Address", "Valid", "Last API pull", "Type", "Version", "Auth Type"}) + + for _, b := range bouncers { + revoked := emoji.CheckMark + if b.Revoked { + revoked = emoji.Prohibited + } + + lastPull := "" + if b.LastPull != nil { + lastPull = b.LastPull.Format(time.RFC3339) + } + + t.AppendRow(table.Row{b.Name, b.IPAddress, revoked, lastPull, b.Type, b.Version, b.AuthType}) + } + + io.WriteString(out, t.Render()+"\n") +} + +func (cli *cliBouncers) listCSV(out io.Writer, bouncers ent.Bouncers) error { + csvwriter := csv.NewWriter(out) + + if err := csvwriter.Write([]string{"name", "ip", "revoked", "last_pull", "type", "version", "auth_type"}); err != nil { + return fmt.Errorf("failed to write raw header: %w", err) + } + + for _, b := range bouncers { + valid := "validated" + if b.Revoked { + valid = "pending" + } + + lastPull := "" + if b.LastPull != nil { + lastPull = b.LastPull.Format(time.RFC3339) + } + + if err := csvwriter.Write([]string{b.Name, b.IPAddress, valid, lastPull, b.Type, b.Version, b.AuthType}); err != nil { + return fmt.Errorf("failed to write raw: %w", err) + } + } + + csvwriter.Flush() + + return nil +} + +func (cli *cliBouncers) List(ctx context.Context, out io.Writer, db *database.Client) error { + // XXX: must use the provided db object, the one in the struct might be nil + // (calling List directly skips the PersistentPreRunE) + + bouncers, err := db.ListBouncers(ctx) + if err != nil { + return fmt.Errorf("unable to list bouncers: %w", err) + } + + switch cli.cfg().Cscli.Output { + case "human": + cli.listHuman(out, bouncers) + case "json": + info := make([]bouncerInfo, 0, len(bouncers)) + for _, b := range bouncers { + info = append(info, newBouncerInfo(b)) + } + + enc := json.NewEncoder(out) + enc.SetIndent("", " ") + + if err := enc.Encode(info); err != nil { + return errors.New("failed to serialize") + } + + return nil + case "raw": + return cli.listCSV(out, bouncers) + } + + return nil +} + +func (cli *cliBouncers) newListCmd() *cobra.Command { + cmd := &cobra.Command{ + Use: "list", + Short: "list all bouncers within the database", + Example: `cscli bouncers list`, + Args: cobra.NoArgs, + DisableAutoGenTag: true, + RunE: func(cmd *cobra.Command, _ []string) error { + return cli.List(cmd.Context(), color.Output, cli.db) + }, + } + + return cmd +} diff --git a/cmd/crowdsec-cli/clibouncer/prune.go b/cmd/crowdsec-cli/clibouncer/prune.go new file mode 100644 index 00000000000..754e0898a3b --- /dev/null +++ b/cmd/crowdsec-cli/clibouncer/prune.go @@ -0,0 +1,85 @@ +package clibouncer + +import ( + "context" + "fmt" + "os" + "time" + + "github.com/fatih/color" + "github.com/spf13/cobra" + + "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/ask" +) + +func (cli *cliBouncers) prune(ctx context.Context, duration time.Duration, force bool) error { + if duration < 2*time.Minute { + if yes, err := ask.YesNo( + "The duration you provided is less than 2 minutes. "+ + "This may remove active bouncers. Continue?", false); err != nil { + return err + } else if !yes { + fmt.Println("User aborted prune. No changes were made.") + return nil + } + } + + bouncers, err := cli.db.QueryBouncersInactiveSince(ctx, time.Now().UTC().Add(-duration)) + if err != nil { + return fmt.Errorf("unable to query bouncers: %w", err) + } + + if len(bouncers) == 0 { + fmt.Println("No bouncers to prune.") + return nil + } + + cli.listHuman(color.Output, bouncers) + + if !force { + if yes, err := ask.YesNo( + "You are about to PERMANENTLY remove the above bouncers from the database. "+ + "These will NOT be recoverable. Continue?", false); err != nil { + return err + } else if !yes { + fmt.Println("User aborted prune. No changes were made.") + return nil + } + } + + deleted, err := cli.db.BulkDeleteBouncers(ctx, bouncers) + if err != nil { + return fmt.Errorf("unable to prune bouncers: %w", err) + } + + fmt.Fprintf(os.Stderr, "Successfully deleted %d bouncers\n", deleted) + + return nil +} + +func (cli *cliBouncers) newPruneCmd() *cobra.Command { + var ( + duration time.Duration + force bool + ) + + const defaultDuration = 60 * time.Minute + + cmd := &cobra.Command{ + Use: "prune", + Short: "prune multiple bouncers from the database", + Args: cobra.NoArgs, + DisableAutoGenTag: true, + Example: `cscli bouncers prune -d 45m +cscli bouncers prune -d 45m --force`, + RunE: func(cmd *cobra.Command, _ []string) error { + return cli.prune(cmd.Context(), duration, force) + }, + } + + flags := cmd.Flags() + flags.DurationVarP(&duration, "duration", "d", defaultDuration, "duration of time since last pull") + flags.BoolVar(&force, "force", false, "force prune without asking for confirmation") + + return cmd +} diff --git a/cmd/crowdsec-cli/capi.go b/cmd/crowdsec-cli/clicapi/capi.go similarity index 57% rename from cmd/crowdsec-cli/capi.go rename to cmd/crowdsec-cli/clicapi/capi.go index 1888aa3545a..61d59836fdd 100644 --- a/cmd/crowdsec-cli/capi.go +++ b/cmd/crowdsec-cli/clicapi/capi.go @@ -1,36 +1,36 @@ -package main +package clicapi import ( "context" "errors" "fmt" + "io" "net/url" "os" + "github.com/fatih/color" "github.com/go-openapi/strfmt" log "github.com/sirupsen/logrus" "github.com/spf13/cobra" "gopkg.in/yaml.v3" + "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/idgen" + "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/reload" "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/require" "github.com/crowdsecurity/crowdsec/pkg/apiclient" "github.com/crowdsecurity/crowdsec/pkg/csconfig" "github.com/crowdsecurity/crowdsec/pkg/cwhub" - "github.com/crowdsecurity/crowdsec/pkg/cwversion" "github.com/crowdsecurity/crowdsec/pkg/models" "github.com/crowdsecurity/crowdsec/pkg/types" ) -const ( - CAPIBaseURL = "https://api.crowdsec.net/" - CAPIURLPrefix = "v3" -) +type configGetter = func() *csconfig.Config type cliCapi struct { cfg configGetter } -func NewCLICapi(cfg configGetter) *cliCapi { +func New(cfg configGetter) *cliCapi { return &cliCapi{ cfg: cfg, } @@ -58,27 +58,26 @@ func (cli *cliCapi) NewCommand() *cobra.Command { return cmd } -func (cli *cliCapi) register(capiUserPrefix string, outputFile string) error { +func (cli *cliCapi) register(ctx context.Context, capiUserPrefix string, outputFile string) error { cfg := cli.cfg() - capiUser, err := generateID(capiUserPrefix) + capiUser, err := idgen.GenerateMachineID(capiUserPrefix) if err != nil { return fmt.Errorf("unable to generate machine id: %w", err) } - password := strfmt.Password(generatePassword(passwordLength)) + password := strfmt.Password(idgen.GeneratePassword(idgen.PasswordLength)) apiurl, err := url.Parse(types.CAPIBaseURL) if err != nil { return fmt.Errorf("unable to parse api url %s: %w", types.CAPIBaseURL, err) } - _, err = apiclient.RegisterClient(&apiclient.Config{ + _, err = apiclient.RegisterClient(ctx, &apiclient.Config{ MachineID: capiUser, Password: password, - UserAgent: cwversion.UserAgent(), URL: apiurl, - VersionPrefix: CAPIURLPrefix, + VersionPrefix: "v3", }, nil) if err != nil { return fmt.Errorf("api client register ('%s'): %w", types.CAPIBaseURL, err) @@ -105,7 +104,7 @@ func (cli *cliCapi) register(capiUserPrefix string, outputFile string) error { apiConfigDump, err := yaml.Marshal(apiCfg) if err != nil { - return fmt.Errorf("unable to marshal api credentials: %w", err) + return fmt.Errorf("unable to serialize api credentials: %w", err) } if dumpFile != "" { @@ -119,7 +118,7 @@ func (cli *cliCapi) register(capiUserPrefix string, outputFile string) error { fmt.Println(string(apiConfigDump)) } - log.Warning(ReloadMessage()) + log.Warning(reload.Message) return nil } @@ -135,8 +134,8 @@ func (cli *cliCapi) newRegisterCmd() *cobra.Command { Short: "Register to Central API (CAPI)", Args: cobra.MinimumNArgs(0), DisableAutoGenTag: true, - RunE: func(_ *cobra.Command, _ []string) error { - return cli.register(capiUserPrefix, outputFile) + RunE: func(cmd *cobra.Command, _ []string) error { + return cli.register(cmd.Context(), capiUserPrefix, outputFile) }, } @@ -148,21 +147,17 @@ func (cli *cliCapi) newRegisterCmd() *cobra.Command { return cmd } -// QueryCAPIStatus checks if the Local API is reachable, and if the credentials are correct. It then checks if the instance is enrolle in the console. -func QueryCAPIStatus(hub *cwhub.Hub, credURL string, login string, password string) (bool, bool, error) { - +// queryCAPIStatus checks if the Central API is reachable, and if the credentials are correct. It then checks if the instance is enrolle in the console. +func queryCAPIStatus(ctx context.Context, hub *cwhub.Hub, credURL string, login string, password string) (bool, bool, error) { apiURL, err := url.Parse(credURL) if err != nil { - return false, false, fmt.Errorf("parsing api url: %w", err) + return false, false, err } - scenarios, err := hub.GetInstalledNamesByType(cwhub.SCENARIOS) - if err != nil { - return false, false, fmt.Errorf("failed to get scenarios: %w", err) - } + itemsForAPI := hub.GetInstalledListForAPI() - if len(scenarios) == 0 { - return false, false, errors.New("no scenarios installed, abort") + if len(itemsForAPI) == 0 { + return false, false, errors.New("no scenarios or appsec-rules installed, abort") } passwd := strfmt.Password(password) @@ -170,31 +165,17 @@ func QueryCAPIStatus(hub *cwhub.Hub, credURL string, login string, password stri client, err := apiclient.NewClient(&apiclient.Config{ MachineID: login, Password: passwd, - Scenarios: scenarios, - UserAgent: cwversion.UserAgent(), + Scenarios: itemsForAPI, URL: apiURL, - //I don't believe papi is neede to check enrollement - //PapiURL: papiURL, + // I don't believe papi is neede to check enrollement + // PapiURL: papiURL, VersionPrefix: "v3", - UpdateScenario: func() ([]string, error) { - l_scenarios, err := hub.GetInstalledNamesByType(cwhub.SCENARIOS) - if err != nil { - return nil, err - } - appsecRules, err := hub.GetInstalledNamesByType(cwhub.APPSEC_RULES) - if err != nil { - return nil, err - } - ret := make([]string, 0, len(l_scenarios)+len(appsecRules)) - ret = append(ret, l_scenarios...) - ret = append(ret, appsecRules...) - - return ret, nil + UpdateScenario: func(_ context.Context) ([]string, error) { + return itemsForAPI, nil }, }) - if err != nil { - return false, false, fmt.Errorf("new client api: %w", err) + return false, false, err } pw := strfmt.Password(password) @@ -202,10 +183,10 @@ func QueryCAPIStatus(hub *cwhub.Hub, credURL string, login string, password stri t := models.WatcherAuthRequest{ MachineID: &login, Password: &pw, - Scenarios: scenarios, + Scenarios: itemsForAPI, } - authResp, _, err := client.Auth.AuthenticateWatcher(context.Background(), t) + authResp, _, err := client.Auth.AuthenticateWatcher(ctx, t) if err != nil { return false, false, err } @@ -215,11 +196,11 @@ func QueryCAPIStatus(hub *cwhub.Hub, credURL string, login string, password stri if client.IsEnrolled() { return true, true, nil } - return true, false, nil + return true, false, nil } -func (cli *cliCapi) status() error { +func (cli *cliCapi) Status(ctx context.Context, out io.Writer, hub *cwhub.Hub) error { cfg := cli.cfg() if err := require.CAPIRegistered(cfg); err != nil { @@ -228,25 +209,43 @@ func (cli *cliCapi) status() error { cred := cfg.API.Server.OnlineClient.Credentials - hub, err := require.Hub(cfg, nil, nil) + fmt.Fprintf(out, "Loaded credentials from %s\n", cfg.API.Server.OnlineClient.CredentialsFilePath) + fmt.Fprintf(out, "Trying to authenticate with username %s on %s\n", cred.Login, cred.URL) + + auth, enrolled, err := queryCAPIStatus(ctx, hub, cred.URL, cred.Login, cred.Password) if err != nil { - return err + return fmt.Errorf("failed to authenticate to Central API (CAPI): %w", err) } - log.Infof("Loaded credentials from %s", cfg.API.Server.OnlineClient.CredentialsFilePath) - log.Infof("Trying to authenticate with username %s on %s", cred.Login, cred.URL) + if auth { + fmt.Fprint(out, "You can successfully interact with Central API (CAPI)\n") + } - auth, enrolled, err := QueryCAPIStatus(hub, cred.URL, cred.Login, cred.Password) + if enrolled { + fmt.Fprint(out, "Your instance is enrolled in the console\n") + } - if err != nil { - return fmt.Errorf("CAPI: failed to authenticate to Central API (CAPI): %s", err) + switch *cfg.API.Server.OnlineClient.Sharing { + case true: + fmt.Fprint(out, "Sharing signals is enabled\n") + case false: + fmt.Fprint(out, "Sharing signals is disabled\n") } - if auth { - log.Info("You can successfully interact with Central API (CAPI)") + + switch *cfg.API.Server.OnlineClient.PullConfig.Community { + case true: + fmt.Fprint(out, "Pulling community blocklist is enabled\n") + case false: + fmt.Fprint(out, "Pulling community blocklist is disabled\n") } - if enrolled { - log.Info("Your instance is enrolled in the console") + + switch *cfg.API.Server.OnlineClient.PullConfig.Blocklists { + case true: + fmt.Fprint(out, "Pulling blocklists from the console is enabled\n") + case false: + fmt.Fprint(out, "Pulling blocklists from the console is disabled\n") } + return nil } @@ -256,8 +255,13 @@ func (cli *cliCapi) newStatusCmd() *cobra.Command { Short: "Check status with the Central API (CAPI)", Args: cobra.MinimumNArgs(0), DisableAutoGenTag: true, - RunE: func(_ *cobra.Command, _ []string) error { - return cli.status() + RunE: func(cmd *cobra.Command, _ []string) error { + hub, err := require.Hub(cli.cfg(), nil, nil) + if err != nil { + return err + } + + return cli.Status(cmd.Context(), color.Output, hub) }, } diff --git a/cmd/crowdsec-cli/console.go b/cmd/crowdsec-cli/cliconsole/console.go similarity index 79% rename from cmd/crowdsec-cli/console.go rename to cmd/crowdsec-cli/cliconsole/console.go index 979c9f0ea60..448ddcee7fa 100644 --- a/cmd/crowdsec-cli/console.go +++ b/cmd/crowdsec-cli/cliconsole/console.go @@ -1,4 +1,4 @@ -package main +package cliconsole import ( "context" @@ -20,19 +20,20 @@ import ( "github.com/crowdsecurity/go-cs-lib/ptr" + "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/reload" "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/require" "github.com/crowdsecurity/crowdsec/pkg/apiclient" "github.com/crowdsecurity/crowdsec/pkg/csconfig" - "github.com/crowdsecurity/crowdsec/pkg/cwhub" - "github.com/crowdsecurity/crowdsec/pkg/cwversion" "github.com/crowdsecurity/crowdsec/pkg/types" ) +type configGetter func() *csconfig.Config + type cliConsole struct { cfg configGetter } -func NewCLIConsole(cfg configGetter) *cliConsole { +func New(cfg configGetter) *cliConsole { return &cliConsole{ cfg: cfg, } @@ -65,7 +66,7 @@ func (cli *cliConsole) NewCommand() *cobra.Command { return cmd } -func (cli *cliConsole) enroll(key string, name string, overwrite bool, tags []string, opts []string) error { +func (cli *cliConsole) enroll(ctx context.Context, key string, name string, overwrite bool, tags []string, opts []string) error { cfg := cli.cfg() password := strfmt.Password(cfg.API.Server.OnlineClient.Credentials.Password) @@ -74,20 +75,6 @@ func (cli *cliConsole) enroll(key string, name string, overwrite bool, tags []st return fmt.Errorf("could not parse CAPI URL: %w", err) } - hub, err := require.Hub(cfg, nil, nil) - if err != nil { - return err - } - - scenarios, err := hub.GetInstalledNamesByType(cwhub.SCENARIOS) - if err != nil { - return fmt.Errorf("failed to get installed scenarios: %w", err) - } - - if len(scenarios) == 0 { - scenarios = make([]string, 0) - } - enableOpts := []string{csconfig.SEND_MANUAL_SCENARIOS, csconfig.SEND_TAINTED_SCENARIOS} if len(opts) != 0 { @@ -100,23 +87,25 @@ func (cli *cliConsole) enroll(key string, name string, overwrite bool, tags []st } for _, availableOpt := range csconfig.CONSOLE_CONFIGS { - if opt == availableOpt { - valid = true - enable := true - - for _, enabledOpt := range enableOpts { - if opt == enabledOpt { - enable = false - continue - } - } + if opt != availableOpt { + continue + } + + valid = true + enable := true - if enable { - enableOpts = append(enableOpts, opt) + for _, enabledOpt := range enableOpts { + if opt == enabledOpt { + enable = false + continue } + } - break + if enable { + enableOpts = append(enableOpts, opt) } + + break } if !valid { @@ -125,16 +114,20 @@ func (cli *cliConsole) enroll(key string, name string, overwrite bool, tags []st } } + hub, err := require.Hub(cfg, nil, nil) + if err != nil { + return err + } + c, _ := apiclient.NewClient(&apiclient.Config{ MachineID: cli.cfg().API.Server.OnlineClient.Credentials.Login, Password: password, - Scenarios: scenarios, - UserAgent: cwversion.UserAgent(), + Scenarios: hub.GetInstalledListForAPI(), URL: apiURL, VersionPrefix: "v3", }) - resp, err := c.Auth.EnrollWatcher(context.Background(), key, name, tags, overwrite) + resp, err := c.Auth.EnrollWatcher(ctx, key, name, tags, overwrite) if err != nil { return fmt.Errorf("could not enroll instance: %w", err) } @@ -180,8 +173,8 @@ After running this command your will need to validate the enrollment in the weba valid options are : %s,all (see 'cscli console status' for details)`, strings.Join(csconfig.CONSOLE_CONFIGS, ",")), Args: cobra.ExactArgs(1), DisableAutoGenTag: true, - RunE: func(_ *cobra.Command, args []string) error { - return cli.enroll(args[0], name, overwrite, tags, opts) + RunE: func(cmd *cobra.Command, args []string) error { + return cli.enroll(cmd.Context(), args[0], name, overwrite, tags, opts) }, } @@ -221,7 +214,7 @@ Enable given information push to the central API. Allows to empower the console` log.Infof("%v have been enabled", args) } - log.Infof(ReloadMessage()) + log.Info(reload.Message) return nil }, @@ -255,7 +248,7 @@ Disable given information push to the central API.`, log.Infof("%v have been disabled", args) } - log.Infof(ReloadMessage()) + log.Info(reload.Message) return nil }, @@ -287,7 +280,7 @@ func (cli *cliConsole) newStatusCmd() *cobra.Command { } data, err := json.MarshalIndent(out, "", " ") if err != nil { - return fmt.Errorf("failed to marshal configuration: %w", err) + return fmt.Errorf("failed to serialize configuration: %w", err) } fmt.Println(string(data)) case "raw": @@ -325,7 +318,7 @@ func (cli *cliConsole) dumpConfig() error { out, err := yaml.Marshal(serverCfg.ConsoleConfig) if err != nil { - return fmt.Errorf("while marshaling ConsoleConfig (for %s): %w", serverCfg.ConsoleConfigPath, err) + return fmt.Errorf("while serializing ConsoleConfig (for %s): %w", serverCfg.ConsoleConfigPath, err) } if serverCfg.ConsoleConfigPath == "" { @@ -348,13 +341,8 @@ func (cli *cliConsole) setConsoleOpts(args []string, wanted bool) error { switch arg { case csconfig.CONSOLE_MANAGEMENT: /*for each flag check if it's already set before setting it*/ - if consoleCfg.ConsoleManagement != nil { - if *consoleCfg.ConsoleManagement == wanted { - log.Debugf("%s already set to %t", csconfig.CONSOLE_MANAGEMENT, wanted) - } else { - log.Infof("%s set to %t", csconfig.CONSOLE_MANAGEMENT, wanted) - *consoleCfg.ConsoleManagement = wanted - } + if consoleCfg.ConsoleManagement != nil && *consoleCfg.ConsoleManagement == wanted { + log.Debugf("%s already set to %t", csconfig.CONSOLE_MANAGEMENT, wanted) } else { log.Infof("%s set to %t", csconfig.CONSOLE_MANAGEMENT, wanted) consoleCfg.ConsoleManagement = ptr.Of(wanted) @@ -373,7 +361,7 @@ func (cli *cliConsole) setConsoleOpts(args []string, wanted bool) error { if changed { fileContent, err := yaml.Marshal(cfg.API.Server.OnlineClient.Credentials) if err != nil { - return fmt.Errorf("cannot marshal credentials: %w", err) + return fmt.Errorf("cannot serialize credentials: %w", err) } log.Infof("Updating credentials file: %s", cfg.API.Server.OnlineClient.CredentialsFilePath) @@ -386,52 +374,32 @@ func (cli *cliConsole) setConsoleOpts(args []string, wanted bool) error { } case csconfig.SEND_CUSTOM_SCENARIOS: /*for each flag check if it's already set before setting it*/ - if consoleCfg.ShareCustomScenarios != nil { - if *consoleCfg.ShareCustomScenarios == wanted { - log.Debugf("%s already set to %t", csconfig.SEND_CUSTOM_SCENARIOS, wanted) - } else { - log.Infof("%s set to %t", csconfig.SEND_CUSTOM_SCENARIOS, wanted) - *consoleCfg.ShareCustomScenarios = wanted - } + if consoleCfg.ShareCustomScenarios != nil && *consoleCfg.ShareCustomScenarios == wanted { + log.Debugf("%s already set to %t", csconfig.SEND_CUSTOM_SCENARIOS, wanted) } else { log.Infof("%s set to %t", csconfig.SEND_CUSTOM_SCENARIOS, wanted) consoleCfg.ShareCustomScenarios = ptr.Of(wanted) } case csconfig.SEND_TAINTED_SCENARIOS: /*for each flag check if it's already set before setting it*/ - if consoleCfg.ShareTaintedScenarios != nil { - if *consoleCfg.ShareTaintedScenarios == wanted { - log.Debugf("%s already set to %t", csconfig.SEND_TAINTED_SCENARIOS, wanted) - } else { - log.Infof("%s set to %t", csconfig.SEND_TAINTED_SCENARIOS, wanted) - *consoleCfg.ShareTaintedScenarios = wanted - } + if consoleCfg.ShareTaintedScenarios != nil && *consoleCfg.ShareTaintedScenarios == wanted { + log.Debugf("%s already set to %t", csconfig.SEND_TAINTED_SCENARIOS, wanted) } else { log.Infof("%s set to %t", csconfig.SEND_TAINTED_SCENARIOS, wanted) consoleCfg.ShareTaintedScenarios = ptr.Of(wanted) } case csconfig.SEND_MANUAL_SCENARIOS: /*for each flag check if it's already set before setting it*/ - if consoleCfg.ShareManualDecisions != nil { - if *consoleCfg.ShareManualDecisions == wanted { - log.Debugf("%s already set to %t", csconfig.SEND_MANUAL_SCENARIOS, wanted) - } else { - log.Infof("%s set to %t", csconfig.SEND_MANUAL_SCENARIOS, wanted) - *consoleCfg.ShareManualDecisions = wanted - } + if consoleCfg.ShareManualDecisions != nil && *consoleCfg.ShareManualDecisions == wanted { + log.Debugf("%s already set to %t", csconfig.SEND_MANUAL_SCENARIOS, wanted) } else { log.Infof("%s set to %t", csconfig.SEND_MANUAL_SCENARIOS, wanted) consoleCfg.ShareManualDecisions = ptr.Of(wanted) } case csconfig.SEND_CONTEXT: /*for each flag check if it's already set before setting it*/ - if consoleCfg.ShareContext != nil { - if *consoleCfg.ShareContext == wanted { - log.Debugf("%s already set to %t", csconfig.SEND_CONTEXT, wanted) - } else { - log.Infof("%s set to %t", csconfig.SEND_CONTEXT, wanted) - *consoleCfg.ShareContext = wanted - } + if consoleCfg.ShareContext != nil && *consoleCfg.ShareContext == wanted { + log.Debugf("%s already set to %t", csconfig.SEND_CONTEXT, wanted) } else { log.Infof("%s set to %t", csconfig.SEND_CONTEXT, wanted) consoleCfg.ShareContext = ptr.Of(wanted) diff --git a/cmd/crowdsec-cli/console_table.go b/cmd/crowdsec-cli/cliconsole/console_table.go similarity index 98% rename from cmd/crowdsec-cli/console_table.go rename to cmd/crowdsec-cli/cliconsole/console_table.go index 94976618573..8f17b97860a 100644 --- a/cmd/crowdsec-cli/console_table.go +++ b/cmd/crowdsec-cli/cliconsole/console_table.go @@ -1,4 +1,4 @@ -package main +package cliconsole import ( "io" diff --git a/cmd/crowdsec-cli/decisions.go b/cmd/crowdsec-cli/clidecision/decisions.go similarity index 90% rename from cmd/crowdsec-cli/decisions.go rename to cmd/crowdsec-cli/clidecision/decisions.go index d485c90254f..307cabffe51 100644 --- a/cmd/crowdsec-cli/decisions.go +++ b/cmd/crowdsec-cli/clidecision/decisions.go @@ -1,4 +1,4 @@ -package main +package clidecision import ( "context" @@ -17,8 +17,9 @@ import ( log "github.com/sirupsen/logrus" "github.com/spf13/cobra" + "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/clialert" "github.com/crowdsecurity/crowdsec/pkg/apiclient" - "github.com/crowdsecurity/crowdsec/pkg/cwversion" + "github.com/crowdsecurity/crowdsec/pkg/csconfig" "github.com/crowdsecurity/crowdsec/pkg/models" "github.com/crowdsecurity/crowdsec/pkg/types" ) @@ -114,12 +115,14 @@ func (cli *cliDecisions) decisionsToTable(alerts *models.GetAlertsResponse, prin return nil } +type configGetter func() *csconfig.Config + type cliDecisions struct { client *apiclient.ApiClient cfg configGetter } -func NewCLIDecisions(cfg configGetter) *cliDecisions { +func New(cfg configGetter) *cliDecisions { return &cliDecisions{ cfg: cfg, } @@ -148,7 +151,6 @@ func (cli *cliDecisions) NewCommand() *cobra.Command { cli.client, err = apiclient.NewClient(&apiclient.Config{ MachineID: cfg.API.Client.Credentials.Login, Password: strfmt.Password(cfg.API.Client.Credentials.Password), - UserAgent: cwversion.UserAgent(), URL: apiURL, VersionPrefix: "v1", }) @@ -168,10 +170,11 @@ func (cli *cliDecisions) NewCommand() *cobra.Command { return cmd } -func (cli *cliDecisions) list(filter apiclient.AlertsListOpts, NoSimu *bool, contained *bool, printMachine bool) error { +func (cli *cliDecisions) list(ctx context.Context, filter apiclient.AlertsListOpts, NoSimu *bool, contained *bool, printMachine bool) error { var err error - /*take care of shorthand options*/ - if err = manageCliDecisionAlerts(filter.IPEquals, filter.RangeEquals, filter.ScopeEquals, filter.ValueEquals); err != nil { + + *filter.ScopeEquals, err = clialert.SanitizeScope(*filter.ScopeEquals, *filter.IPEquals, *filter.RangeEquals) + if err != nil { return err } @@ -246,7 +249,7 @@ func (cli *cliDecisions) list(filter apiclient.AlertsListOpts, NoSimu *bool, con filter.Contains = new(bool) } - alerts, _, err := cli.client.Alerts.List(context.Background(), filter) + alerts, _, err := cli.client.Alerts.List(ctx, filter) if err != nil { return fmt.Errorf("unable to retrieve decisions: %w", err) } @@ -287,10 +290,10 @@ cscli decisions list -r 1.2.3.0/24 cscli decisions list -s crowdsecurity/ssh-bf cscli decisions list --origin lists --scenario list_name `, - Args: cobra.ExactArgs(0), + Args: cobra.NoArgs, DisableAutoGenTag: true, RunE: func(cmd *cobra.Command, _ []string) error { - return cli.list(filter, NoSimu, contained, printMachine) + return cli.list(cmd.Context(), filter, NoSimu, contained, printMachine) }, } @@ -314,7 +317,7 @@ cscli decisions list --origin lists --scenario list_name return cmd } -func (cli *cliDecisions) add(addIP, addRange, addDuration, addValue, addScope, addReason, addType string) error { +func (cli *cliDecisions) add(ctx context.Context, addIP, addRange, addDuration, addValue, addScope, addReason, addType string) error { alerts := models.AddAlertsRequest{} origin := types.CscliOrigin capacity := int32(0) @@ -326,8 +329,10 @@ func (cli *cliDecisions) add(addIP, addRange, addDuration, addValue, addScope, a stopAt := time.Now().UTC().Format(time.RFC3339) createdAt := time.Now().UTC().Format(time.RFC3339) - /*take care of shorthand options*/ - if err := manageCliDecisionAlerts(&addIP, &addRange, &addScope, &addValue); err != nil { + var err error + + addScope, err = clialert.SanitizeScope(addScope, addIP, addRange) + if err != nil { return err } @@ -381,7 +386,7 @@ func (cli *cliDecisions) add(addIP, addRange, addDuration, addValue, addScope, a } alerts = append(alerts, &alert) - _, _, err := cli.client.Alerts.Add(context.Background(), alerts) + _, _, err = cli.client.Alerts.Add(ctx, alerts) if err != nil { return err } @@ -411,10 +416,10 @@ cscli decisions add --ip 1.2.3.4 --duration 24h --type captcha cscli decisions add --scope username --value foobar `, /*TBD : fix long and example*/ - Args: cobra.ExactArgs(0), + Args: cobra.NoArgs, DisableAutoGenTag: true, RunE: func(cmd *cobra.Command, _ []string) error { - return cli.add(addIP, addRange, addDuration, addValue, addScope, addReason, addType) + return cli.add(cmd.Context(), addIP, addRange, addDuration, addValue, addScope, addReason, addType) }, } @@ -431,11 +436,12 @@ cscli decisions add --scope username --value foobar return cmd } -func (cli *cliDecisions) delete(delFilter apiclient.DecisionsDeleteOpts, delDecisionID string, contained *bool) error { +func (cli *cliDecisions) delete(ctx context.Context, delFilter apiclient.DecisionsDeleteOpts, delDecisionID string, contained *bool) error { var err error /*take care of shorthand options*/ - if err = manageCliDecisionAlerts(delFilter.IPEquals, delFilter.RangeEquals, delFilter.ScopeEquals, delFilter.ValueEquals); err != nil { + *delFilter.ScopeEquals, err = clialert.SanitizeScope(*delFilter.ScopeEquals, *delFilter.IPEquals, *delFilter.RangeEquals) + if err != nil { return err } @@ -474,7 +480,7 @@ func (cli *cliDecisions) delete(delFilter apiclient.DecisionsDeleteOpts, delDeci var decisions *models.DeleteDecisionResponse if delDecisionID == "" { - decisions, _, err = cli.client.Decisions.Delete(context.Background(), delFilter) + decisions, _, err = cli.client.Decisions.Delete(ctx, delFilter) if err != nil { return fmt.Errorf("unable to delete decisions: %w", err) } @@ -483,7 +489,7 @@ func (cli *cliDecisions) delete(delFilter apiclient.DecisionsDeleteOpts, delDeci return fmt.Errorf("id '%s' is not an integer: %w", delDecisionID, err) } - decisions, _, err = cli.client.Decisions.DeleteOne(context.Background(), delDecisionID) + decisions, _, err = cli.client.Decisions.DeleteOne(ctx, delDecisionID) if err != nil { return fmt.Errorf("unable to delete decision: %w", err) } @@ -537,8 +543,8 @@ cscli decisions delete --origin lists --scenario list_name return nil }, - RunE: func(_ *cobra.Command, _ []string) error { - return cli.delete(delFilter, delDecisionID, contained) + RunE: func(cmd *cobra.Command, _ []string) error { + return cli.delete(cmd.Context(), delFilter, delDecisionID, contained) }, } diff --git a/cmd/crowdsec-cli/decisions_import.go b/cmd/crowdsec-cli/clidecision/import.go similarity index 70% rename from cmd/crowdsec-cli/decisions_import.go rename to cmd/crowdsec-cli/clidecision/import.go index 338c1b7fb3e..5b34b74a250 100644 --- a/cmd/crowdsec-cli/decisions_import.go +++ b/cmd/crowdsec-cli/clidecision/import.go @@ -1,4 +1,4 @@ -package main +package clidecision import ( "bufio" @@ -67,65 +67,29 @@ func parseDecisionList(content []byte, format string) ([]decisionRaw, error) { return ret, nil } -func (cli *cliDecisions) runImport(cmd *cobra.Command, args []string) error { - flags := cmd.Flags() - - input, err := flags.GetString("input") - if err != nil { - return err - } - - defaultDuration, err := flags.GetString("duration") - if err != nil { - return err - } - - if defaultDuration == "" { - return errors.New("--duration cannot be empty") - } - - defaultScope, err := flags.GetString("scope") - if err != nil { - return err - } - - if defaultScope == "" { - return errors.New("--scope cannot be empty") - } - - defaultReason, err := flags.GetString("reason") - if err != nil { - return err - } - - if defaultReason == "" { - return errors.New("--reason cannot be empty") - } +func (cli *cliDecisions) import_(ctx context.Context, input string, duration string, scope string, reason string, type_ string, batch int, format string) error { + var ( + content []byte + fin *os.File + err error + ) - defaultType, err := flags.GetString("type") - if err != nil { - return err + if duration == "" { + return errors.New("default duration cannot be empty") } - if defaultType == "" { - return errors.New("--type cannot be empty") + if scope == "" { + return errors.New("default scope cannot be empty") } - batchSize, err := flags.GetInt("batch") - if err != nil { - return err + if reason == "" { + return errors.New("default reason cannot be empty") } - format, err := flags.GetString("format") - if err != nil { - return err + if type_ == "" { + return errors.New("default type cannot be empty") } - var ( - content []byte - fin *os.File - ) - // set format if the file has a json or csv extension if format == "" { if strings.HasSuffix(input, ".json") { @@ -167,23 +131,23 @@ func (cli *cliDecisions) runImport(cmd *cobra.Command, args []string) error { } if d.Duration == "" { - d.Duration = defaultDuration - log.Debugf("item %d: missing 'duration', using default '%s'", i, defaultDuration) + d.Duration = duration + log.Debugf("item %d: missing 'duration', using default '%s'", i, duration) } if d.Scenario == "" { - d.Scenario = defaultReason - log.Debugf("item %d: missing 'reason', using default '%s'", i, defaultReason) + d.Scenario = reason + log.Debugf("item %d: missing 'reason', using default '%s'", i, reason) } if d.Type == "" { - d.Type = defaultType - log.Debugf("item %d: missing 'type', using default '%s'", i, defaultType) + d.Type = type_ + log.Debugf("item %d: missing 'type', using default '%s'", i, type_) } if d.Scope == "" { - d.Scope = defaultScope - log.Debugf("item %d: missing 'scope', using default '%s'", i, defaultScope) + d.Scope = scope + log.Debugf("item %d: missing 'scope', using default '%s'", i, scope) } decisions[i] = &models.Decision{ @@ -201,7 +165,7 @@ func (cli *cliDecisions) runImport(cmd *cobra.Command, args []string) error { log.Infof("You are about to add %d decisions, this may take a while", len(decisions)) } - for _, chunk := range slicetools.Chunks(decisions, batchSize) { + for _, chunk := range slicetools.Chunks(decisions, batch) { log.Debugf("Processing chunk of %d decisions", len(chunk)) importAlert := models.Alert{ CreatedAt: time.Now().UTC().Format(time.RFC3339), @@ -224,7 +188,7 @@ func (cli *cliDecisions) runImport(cmd *cobra.Command, args []string) error { Decisions: chunk, } - _, _, err = cli.client.Alerts.Add(context.Background(), models.AddAlertsRequest{&importAlert}) + _, _, err = cli.client.Alerts.Add(ctx, models.AddAlertsRequest{&importAlert}) if err != nil { return err } @@ -236,12 +200,22 @@ func (cli *cliDecisions) runImport(cmd *cobra.Command, args []string) error { } func (cli *cliDecisions) newImportCmd() *cobra.Command { + var ( + input string + duration string + scope string + reason string + decisionType string + batch int + format string + ) + cmd := &cobra.Command{ Use: "import [options]", Short: "Import decisions from a file or pipe", Long: "expected format:\n" + "csv : any of duration,reason,scope,type,value, with a header line\n" + - "json :" + "`{" + `"duration" : "24h", "reason" : "my_scenario", "scope" : "ip", "type" : "ban", "value" : "x.y.z.z"` + "}`", + "json :" + "`{" + `"duration": "24h", "reason": "my_scenario", "scope": "ip", "type": "ban", "value": "x.y.z.z"` + "}`", Args: cobra.NoArgs, DisableAutoGenTag: true, Example: `decisions.csv: @@ -251,7 +225,7 @@ duration,scope,value $ cscli decisions import -i decisions.csv decisions.json: -[{"duration" : "4h", "scope" : "ip", "type" : "ban", "value" : "1.2.3.4"}] +[{"duration": "4h", "scope": "ip", "type": "ban", "value": "1.2.3.4"}] The file format is detected from the extension, but can be forced with the --format option which is required when reading from standard input. @@ -260,18 +234,20 @@ Raw values, standard input: $ echo "1.2.3.4" | cscli decisions import -i - --format values `, - RunE: cli.runImport, + RunE: func(cmd *cobra.Command, args []string) error { + return cli.import_(cmd.Context(), input, duration, scope, reason, decisionType, batch, format) + }, } flags := cmd.Flags() flags.SortFlags = false - flags.StringP("input", "i", "", "Input file") - flags.StringP("duration", "d", "4h", "Decision duration: 1h,4h,30m") - flags.String("scope", types.Ip, "Decision scope: ip,range,username") - flags.StringP("reason", "R", "manual", "Decision reason: ") - flags.StringP("type", "t", "ban", "Decision type: ban,captcha,throttle") - flags.Int("batch", 0, "Split import in batches of N decisions") - flags.String("format", "", "Input format: 'json', 'csv' or 'values' (each line is a value, no headers)") + flags.StringVarP(&input, "input", "i", "", "Input file") + flags.StringVarP(&duration, "duration", "d", "4h", "Decision duration: 1h,4h,30m") + flags.StringVar(&scope, "scope", types.Ip, "Decision scope: ip,range,username") + flags.StringVarP(&reason, "reason", "R", "manual", "Decision reason: ") + flags.StringVarP(&decisionType, "type", "t", "ban", "Decision type: ban,captcha,throttle") + flags.IntVar(&batch, "batch", 0, "Split import in batches of N decisions") + flags.StringVar(&format, "format", "", "Input format: 'json', 'csv' or 'values' (each line is a value, no headers)") _ = cmd.MarkFlagRequired("input") diff --git a/cmd/crowdsec-cli/decisions_table.go b/cmd/crowdsec-cli/clidecision/table.go similarity index 92% rename from cmd/crowdsec-cli/decisions_table.go rename to cmd/crowdsec-cli/clidecision/table.go index 02952f93b85..189eb80b8e5 100644 --- a/cmd/crowdsec-cli/decisions_table.go +++ b/cmd/crowdsec-cli/clidecision/table.go @@ -1,7 +1,6 @@ -package main +package clidecision import ( - "fmt" "io" "strconv" @@ -23,7 +22,7 @@ func (cli *cliDecisions) decisionsTable(out io.Writer, alerts *models.GetAlertsR for _, alertItem := range *alerts { for _, decisionItem := range alertItem.Decisions { if *alertItem.Simulated { - *decisionItem.Type = fmt.Sprintf("(simul)%s", *decisionItem.Type) + *decisionItem.Type = "(simul)" + *decisionItem.Type } row := []string{ diff --git a/cmd/crowdsec-cli/clientinfo/clientinfo.go b/cmd/crowdsec-cli/clientinfo/clientinfo.go new file mode 100644 index 00000000000..0bf1d98804f --- /dev/null +++ b/cmd/crowdsec-cli/clientinfo/clientinfo.go @@ -0,0 +1,39 @@ +package clientinfo + +import ( + "strings" +) + +type featureflagProvider interface { + GetFeatureflags() string +} + +type osProvider interface { + GetOsname() string + GetOsversion() string +} + +func GetOSNameAndVersion(o osProvider) string { + ret := o.GetOsname() + if o.GetOsversion() != "" { + if ret != "" { + ret += "/" + } + + ret += o.GetOsversion() + } + + if ret == "" { + return "?" + } + + return ret +} + +func GetFeatureFlagList(o featureflagProvider) []string { + if o.GetFeatureflags() == "" { + return nil + } + + return strings.Split(o.GetFeatureflags(), ",") +} diff --git a/cmd/crowdsec-cli/explain.go b/cmd/crowdsec-cli/cliexplain/explain.go similarity index 92% rename from cmd/crowdsec-cli/explain.go rename to cmd/crowdsec-cli/cliexplain/explain.go index c322cce47fe..d6e821e4e6c 100644 --- a/cmd/crowdsec-cli/explain.go +++ b/cmd/crowdsec-cli/cliexplain/explain.go @@ -1,4 +1,4 @@ -package main +package cliexplain import ( "bufio" @@ -12,6 +12,7 @@ import ( log "github.com/sirupsen/logrus" "github.com/spf13/cobra" + "github.com/crowdsecurity/crowdsec/pkg/csconfig" "github.com/crowdsecurity/crowdsec/pkg/dumps" "github.com/crowdsecurity/crowdsec/pkg/hubtest" ) @@ -40,9 +41,12 @@ func getLineCountForFile(filepath string) (int, error) { return lc, nil } +type configGetter func() *csconfig.Config + type cliExplain struct { - cfg configGetter - flags struct { + cfg configGetter + configFilePath string + flags struct { logFile string dsn string logLine string @@ -56,9 +60,10 @@ type cliExplain struct { } } -func NewCLIExplain(cfg configGetter) *cliExplain { +func New(cfg configGetter, configFilePath string) *cliExplain { return &cliExplain{ - cfg: cfg, + cfg: cfg, + configFilePath: configFilePath, } } @@ -75,7 +80,7 @@ cscli explain --log "Sep 19 18:33:22 scw-d95986 sshd[24347]: pam_unix(sshd:auth) cscli explain --dsn "file://myfile.log" --type nginx tail -n 5 myfile.log | cscli explain --type nginx -f - `, - Args: cobra.ExactArgs(0), + Args: cobra.NoArgs, DisableAutoGenTag: true, RunE: func(_ *cobra.Command, _ []string) error { return cli.run() @@ -103,7 +108,7 @@ tail -n 5 myfile.log | cscli explain --type nginx -f - flags.StringVar(&cli.flags.crowdsec, "crowdsec", "crowdsec", "Path to crowdsec") flags.BoolVar(&cli.flags.noClean, "no-clean", false, "Don't clean runtime environment after tests") - cmd.MarkFlagRequired("type") + _ = cmd.MarkFlagRequired("type") cmd.MarkFlagsOneRequired("log", "file", "dsn") return cmd @@ -192,7 +197,7 @@ func (cli *cliExplain) run() error { return fmt.Errorf("unable to get absolute path of '%s', exiting", logFile) } - dsn = fmt.Sprintf("file://%s", absolutePath) + dsn = "file://" + absolutePath lineCount, err := getLineCountForFile(absolutePath) if err != nil { @@ -214,7 +219,7 @@ func (cli *cliExplain) run() error { return errors.New("no acquisition (--file or --dsn) provided, can't run cscli test") } - cmdArgs := []string{"-c", ConfigFilePath, "-type", logType, "-dsn", dsn, "-dump-data", dir, "-no-api"} + cmdArgs := []string{"-c", cli.configFilePath, "-type", logType, "-dsn", dsn, "-dump-data", dir, "-no-api"} if labels != "" { log.Debugf("adding labels %s", labels) diff --git a/cmd/crowdsec-cli/hub.go b/cmd/crowdsec-cli/clihub/hub.go similarity index 85% rename from cmd/crowdsec-cli/hub.go rename to cmd/crowdsec-cli/clihub/hub.go index 70df30fc410..f189d6a2e13 100644 --- a/cmd/crowdsec-cli/hub.go +++ b/cmd/crowdsec-cli/clihub/hub.go @@ -1,9 +1,10 @@ -package main +package clihub import ( "context" "encoding/json" "fmt" + "io" "github.com/fatih/color" log "github.com/sirupsen/logrus" @@ -11,14 +12,17 @@ import ( "gopkg.in/yaml.v3" "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/require" + "github.com/crowdsecurity/crowdsec/pkg/csconfig" "github.com/crowdsecurity/crowdsec/pkg/cwhub" ) +type configGetter = func() *csconfig.Config + type cliHub struct { cfg configGetter } -func NewCLIHub(cfg configGetter) *cliHub { +func New(cfg configGetter) *cliHub { return &cliHub{ cfg: cfg, } @@ -35,7 +39,7 @@ The Hub is managed by cscli, to get the latest hub files from [Crowdsec Hub](htt Example: `cscli hub list cscli hub update cscli hub upgrade`, - Args: cobra.ExactArgs(0), + Args: cobra.NoArgs, DisableAutoGenTag: true, } @@ -47,14 +51,9 @@ cscli hub upgrade`, return cmd } -func (cli *cliHub) list(all bool) error { +func (cli *cliHub) List(out io.Writer, hub *cwhub.Hub, all bool) error { cfg := cli.cfg() - hub, err := require.Hub(cfg, nil, log.StandardLogger()) - if err != nil { - return err - } - for _, v := range hub.Warnings { log.Info(v) } @@ -65,14 +64,16 @@ func (cli *cliHub) list(all bool) error { items := make(map[string][]*cwhub.Item) + var err error + for _, itemType := range cwhub.ItemTypes { - items[itemType], err = selectItems(hub, itemType, nil, !all) + items[itemType], err = SelectItems(hub, itemType, nil, !all) if err != nil { return err } } - err = listItems(color.Output, cfg.Cscli.Color, cwhub.ItemTypes, items, true, cfg.Cscli.Output) + err = ListItems(out, cfg.Cscli.Color, cwhub.ItemTypes, items, true, cfg.Cscli.Output) if err != nil { return err } @@ -86,10 +87,15 @@ func (cli *cliHub) newListCmd() *cobra.Command { cmd := &cobra.Command{ Use: "list [-a]", Short: "List all installed configurations", - Args: cobra.ExactArgs(0), + Args: cobra.NoArgs, DisableAutoGenTag: true, RunE: func(_ *cobra.Command, _ []string) error { - return cli.list(all) + hub, err := require.Hub(cli.cfg(), nil, log.StandardLogger()) + if err != nil { + return err + } + + return cli.List(color.Output, hub, all) }, } @@ -134,7 +140,7 @@ func (cli *cliHub) newUpdateCmd() *cobra.Command { Long: ` Fetches the .index.json file from the hub, containing the list of available configs. `, - Args: cobra.ExactArgs(0), + Args: cobra.NoArgs, DisableAutoGenTag: true, RunE: func(cmd *cobra.Command, _ []string) error { return cli.update(cmd.Context(), withContent) @@ -154,16 +160,11 @@ func (cli *cliHub) upgrade(ctx context.Context, force bool) error { } for _, itemType := range cwhub.ItemTypes { - items, err := hub.GetInstalledItemsByType(itemType) - if err != nil { - return err - } - updated := 0 log.Infof("Upgrading %s", itemType) - for _, item := range items { + for _, item := range hub.GetInstalledByType(itemType, true) { didUpdate, err := item.Upgrade(ctx, force) if err != nil { return err @@ -189,7 +190,7 @@ func (cli *cliHub) newUpgradeCmd() *cobra.Command { Long: ` Upgrade all configs installed from Crowdsec Hub. Run 'sudo cscli hub update' if you want the latest versions available. `, - Args: cobra.ExactArgs(0), + Args: cobra.NoArgs, DisableAutoGenTag: true, RunE: func(cmd *cobra.Command, _ []string) error { return cli.upgrade(cmd.Context(), force) @@ -234,7 +235,7 @@ func (cli *cliHub) newTypesCmd() *cobra.Command { Long: ` List the types of supported hub items. `, - Args: cobra.ExactArgs(0), + Args: cobra.NoArgs, DisableAutoGenTag: true, RunE: func(_ *cobra.Command, _ []string) error { return cli.types() diff --git a/cmd/crowdsec-cli/item_metrics.go b/cmd/crowdsec-cli/clihub/item_metrics.go similarity index 89% rename from cmd/crowdsec-cli/item_metrics.go rename to cmd/crowdsec-cli/clihub/item_metrics.go index f00ae08b00b..f4af8f635db 100644 --- a/cmd/crowdsec-cli/item_metrics.go +++ b/cmd/crowdsec-cli/clihub/item_metrics.go @@ -1,4 +1,4 @@ -package main +package clihub import ( "net/http" @@ -16,22 +16,22 @@ import ( "github.com/crowdsecurity/crowdsec/pkg/cwhub" ) -func ShowMetrics(prometheusURL string, hubItem *cwhub.Item, wantColor string) error { +func showMetrics(prometheusURL string, hubItem *cwhub.Item, wantColor string) error { switch hubItem.Type { case cwhub.PARSERS: - metrics := GetParserMetric(prometheusURL, hubItem.Name) + metrics := getParserMetric(prometheusURL, hubItem.Name) parserMetricsTable(color.Output, wantColor, hubItem.Name, metrics) case cwhub.SCENARIOS: - metrics := GetScenarioMetric(prometheusURL, hubItem.Name) + metrics := getScenarioMetric(prometheusURL, hubItem.Name) scenarioMetricsTable(color.Output, wantColor, hubItem.Name, metrics) case cwhub.COLLECTIONS: for _, sub := range hubItem.SubItems() { - if err := ShowMetrics(prometheusURL, sub, wantColor); err != nil { + if err := showMetrics(prometheusURL, sub, wantColor); err != nil { return err } } case cwhub.APPSEC_RULES: - metrics := GetAppsecRuleMetric(prometheusURL, hubItem.Name) + metrics := getAppsecRuleMetric(prometheusURL, hubItem.Name) appsecMetricsTable(color.Output, wantColor, hubItem.Name, metrics) default: // no metrics for this item type } @@ -39,11 +39,11 @@ func ShowMetrics(prometheusURL string, hubItem *cwhub.Item, wantColor string) er return nil } -// GetParserMetric is a complete rip from prom2json -func GetParserMetric(url string, itemName string) map[string]map[string]int { +// getParserMetric is a complete rip from prom2json +func getParserMetric(url string, itemName string) map[string]map[string]int { stats := make(map[string]map[string]int) - result := GetPrometheusMetric(url) + result := getPrometheusMetric(url) for idx, fam := range result { if !strings.HasPrefix(fam.Name, "cs_") { continue @@ -131,7 +131,7 @@ func GetParserMetric(url string, itemName string) map[string]map[string]int { return stats } -func GetScenarioMetric(url string, itemName string) map[string]int { +func getScenarioMetric(url string, itemName string) map[string]int { stats := make(map[string]int) stats["instantiation"] = 0 @@ -140,7 +140,7 @@ func GetScenarioMetric(url string, itemName string) map[string]int { stats["pour"] = 0 stats["underflow"] = 0 - result := GetPrometheusMetric(url) + result := getPrometheusMetric(url) for idx, fam := range result { if !strings.HasPrefix(fam.Name, "cs_") { continue @@ -195,13 +195,13 @@ func GetScenarioMetric(url string, itemName string) map[string]int { return stats } -func GetAppsecRuleMetric(url string, itemName string) map[string]int { +func getAppsecRuleMetric(url string, itemName string) map[string]int { stats := make(map[string]int) stats["inband_hits"] = 0 stats["outband_hits"] = 0 - results := GetPrometheusMetric(url) + results := getPrometheusMetric(url) for idx, fam := range results { if !strings.HasPrefix(fam.Name, "cs_") { continue @@ -260,7 +260,7 @@ func GetAppsecRuleMetric(url string, itemName string) map[string]int { return stats } -func GetPrometheusMetric(url string) []*prom2json.Family { +func getPrometheusMetric(url string) []*prom2json.Family { mfChan := make(chan *dto.MetricFamily, 1024) // Start with the DefaultTransport for sane defaults. diff --git a/cmd/crowdsec-cli/items.go b/cmd/crowdsec-cli/clihub/items.go similarity index 84% rename from cmd/crowdsec-cli/items.go rename to cmd/crowdsec-cli/clihub/items.go index b0c03922166..f86fe65a2a1 100644 --- a/cmd/crowdsec-cli/items.go +++ b/cmd/crowdsec-cli/clihub/items.go @@ -1,4 +1,4 @@ -package main +package clihub import ( "encoding/csv" @@ -16,8 +16,13 @@ import ( ) // selectItems returns a slice of items of a given type, selected by name and sorted by case-insensitive name -func selectItems(hub *cwhub.Hub, itemType string, args []string, installedOnly bool) ([]*cwhub.Item, error) { - itemNames := hub.GetNamesByType(itemType) +func SelectItems(hub *cwhub.Hub, itemType string, args []string, installedOnly bool) ([]*cwhub.Item, error) { + allItems := hub.GetItemsByType(itemType, true) + + itemNames := make([]string, len(allItems)) + for idx, item := range allItems { + itemNames[idx] = item.Name + } notExist := []string{} @@ -38,7 +43,7 @@ func selectItems(hub *cwhub.Hub, itemType string, args []string, installedOnly b installedOnly = false } - items := make([]*cwhub.Item, 0, len(itemNames)) + wantedItems := make([]*cwhub.Item, 0, len(itemNames)) for _, itemName := range itemNames { item := hub.GetItem(itemType, itemName) @@ -46,15 +51,13 @@ func selectItems(hub *cwhub.Hub, itemType string, args []string, installedOnly b continue } - items = append(items, item) + wantedItems = append(wantedItems, item) } - cwhub.SortItemSlice(items) - - return items, nil + return wantedItems, nil } -func listItems(out io.Writer, wantColor string, itemTypes []string, items map[string][]*cwhub.Item, omitIfEmpty bool, output string) error { +func ListItems(out io.Writer, wantColor string, itemTypes []string, items map[string][]*cwhub.Item, omitIfEmpty bool, output string) error { switch output { case "human": nothingToDisplay := true @@ -103,7 +106,7 @@ func listItems(out io.Writer, wantColor string, itemTypes []string, items map[st x, err := json.MarshalIndent(hubStatus, "", " ") if err != nil { - return fmt.Errorf("failed to unmarshal: %w", err) + return fmt.Errorf("failed to parse: %w", err) } out.Write(x) @@ -143,7 +146,7 @@ func listItems(out io.Writer, wantColor string, itemTypes []string, items map[st return nil } -func inspectItem(item *cwhub.Item, showMetrics bool, output string, prometheusURL string, wantColor string) error { +func InspectItem(item *cwhub.Item, wantMetrics bool, output string, prometheusURL string, wantColor string) error { switch output { case "human", "raw": enc := yaml.NewEncoder(os.Stdout) @@ -155,7 +158,7 @@ func inspectItem(item *cwhub.Item, showMetrics bool, output string, prometheusUR case "json": b, err := json.MarshalIndent(*item, "", " ") if err != nil { - return fmt.Errorf("unable to marshal item: %w", err) + return fmt.Errorf("unable to serialize item: %w", err) } fmt.Print(string(b)) @@ -171,10 +174,10 @@ func inspectItem(item *cwhub.Item, showMetrics bool, output string, prometheusUR fmt.Println() } - if showMetrics { + if wantMetrics { fmt.Printf("\nCurrent metrics: \n") - if err := ShowMetrics(prometheusURL, item, wantColor); err != nil { + if err := showMetrics(prometheusURL, item, wantColor); err != nil { return err } } diff --git a/cmd/crowdsec-cli/utils_table.go b/cmd/crowdsec-cli/clihub/utils_table.go similarity index 92% rename from cmd/crowdsec-cli/utils_table.go rename to cmd/crowdsec-cli/clihub/utils_table.go index 6df16cd85f5..98f14341b10 100644 --- a/cmd/crowdsec-cli/utils_table.go +++ b/cmd/crowdsec-cli/clihub/utils_table.go @@ -1,4 +1,4 @@ -package main +package clihub import ( "fmt" @@ -22,7 +22,7 @@ func listHubItemTable(out io.Writer, wantColor string, title string, items []*cw } io.WriteString(out, title+"\n") - io.WriteString(out, t.Render() + "\n") + io.WriteString(out, t.Render()+"\n") } func appsecMetricsTable(out io.Writer, wantColor string, itemName string, metrics map[string]int) { @@ -35,7 +35,7 @@ func appsecMetricsTable(out io.Writer, wantColor string, itemName string, metric }) io.WriteString(out, fmt.Sprintf("\n - (AppSec Rule) %s:\n", itemName)) - io.WriteString(out, t.Render() + "\n") + io.WriteString(out, t.Render()+"\n") } func scenarioMetricsTable(out io.Writer, wantColor string, itemName string, metrics map[string]int) { @@ -55,7 +55,7 @@ func scenarioMetricsTable(out io.Writer, wantColor string, itemName string, metr }) io.WriteString(out, fmt.Sprintf("\n - (Scenario) %s:\n", itemName)) - io.WriteString(out, t.Render() + "\n") + io.WriteString(out, t.Render()+"\n") } func parserMetricsTable(out io.Writer, wantColor string, itemName string, metrics map[string]map[string]int) { @@ -80,6 +80,6 @@ func parserMetricsTable(out io.Writer, wantColor string, itemName string, metric if showTable { io.WriteString(out, fmt.Sprintf("\n - (Parser) %s:\n", itemName)) - io.WriteString(out, t.Render() + "\n") + io.WriteString(out, t.Render()+"\n") } } diff --git a/cmd/crowdsec-cli/clihubtest/clean.go b/cmd/crowdsec-cli/clihubtest/clean.go new file mode 100644 index 00000000000..e3b40b6bd57 --- /dev/null +++ b/cmd/crowdsec-cli/clihubtest/clean.go @@ -0,0 +1,31 @@ +package clihubtest + +import ( + "fmt" + + "github.com/spf13/cobra" +) + +func (cli *cliHubTest) newCleanCmd() *cobra.Command { + cmd := &cobra.Command{ + Use: "clean", + Short: "clean [test_name]", + Args: cobra.MinimumNArgs(1), + DisableAutoGenTag: true, + RunE: func(_ *cobra.Command, args []string) error { + for _, testName := range args { + test, err := hubPtr.LoadTestItem(testName) + if err != nil { + return fmt.Errorf("unable to load test '%s': %w", testName, err) + } + if err := test.Clean(); err != nil { + return fmt.Errorf("unable to clean test '%s' env: %w", test.Name, err) + } + } + + return nil + }, + } + + return cmd +} diff --git a/cmd/crowdsec-cli/clihubtest/coverage.go b/cmd/crowdsec-cli/clihubtest/coverage.go new file mode 100644 index 00000000000..5a4f231caf5 --- /dev/null +++ b/cmd/crowdsec-cli/clihubtest/coverage.go @@ -0,0 +1,166 @@ +package clihubtest + +import ( + "encoding/json" + "errors" + "fmt" + "math" + + "github.com/fatih/color" + "github.com/spf13/cobra" + + "github.com/crowdsecurity/crowdsec/pkg/hubtest" +) + +// getCoverage returns the coverage and the percentage of tests that passed +func getCoverage(show bool, getCoverageFunc func() ([]hubtest.Coverage, error)) ([]hubtest.Coverage, int, error) { + if !show { + return nil, 0, nil + } + + coverage, err := getCoverageFunc() + if err != nil { + return nil, 0, fmt.Errorf("while getting coverage: %w", err) + } + + tested := 0 + + for _, test := range coverage { + if test.TestsCount > 0 { + tested++ + } + } + + // keep coverage 0 if there's no tests? + percent := 0 + if len(coverage) > 0 { + percent = int(math.Round((float64(tested) / float64(len(coverage)) * 100))) + } + + return coverage, percent, nil +} + +func (cli *cliHubTest) coverage(showScenarioCov bool, showParserCov bool, showAppsecCov bool, showOnlyPercent bool) error { + cfg := cli.cfg() + + // for this one we explicitly don't do for appsec + if err := HubTest.LoadAllTests(); err != nil { + return fmt.Errorf("unable to load all tests: %+v", err) + } + + var err error + + // if all are false (flag by default), show them + if !showParserCov && !showScenarioCov && !showAppsecCov { + showParserCov = true + showScenarioCov = true + showAppsecCov = true + } + + parserCoverage, parserCoveragePercent, err := getCoverage(showParserCov, HubTest.GetParsersCoverage) + if err != nil { + return err + } + + scenarioCoverage, scenarioCoveragePercent, err := getCoverage(showScenarioCov, HubTest.GetScenariosCoverage) + if err != nil { + return err + } + + appsecRuleCoverage, appsecRuleCoveragePercent, err := getCoverage(showAppsecCov, HubTest.GetAppsecCoverage) + if err != nil { + return err + } + + if showOnlyPercent { + switch { + case showParserCov: + fmt.Printf("parsers=%d%%", parserCoveragePercent) + case showScenarioCov: + fmt.Printf("scenarios=%d%%", scenarioCoveragePercent) + case showAppsecCov: + fmt.Printf("appsec_rules=%d%%", appsecRuleCoveragePercent) + } + + return nil + } + + switch cfg.Cscli.Output { + case "human": + if showParserCov { + hubTestCoverageTable(color.Output, cfg.Cscli.Color, []string{"Parser", "Status", "Number of tests"}, parserCoverage) + } + + if showScenarioCov { + hubTestCoverageTable(color.Output, cfg.Cscli.Color, []string{"Scenario", "Status", "Number of tests"}, parserCoverage) + } + + if showAppsecCov { + hubTestCoverageTable(color.Output, cfg.Cscli.Color, []string{"Appsec Rule", "Status", "Number of tests"}, parserCoverage) + } + + fmt.Println() + + if showParserCov { + fmt.Printf("PARSERS : %d%% of coverage\n", parserCoveragePercent) + } + + if showScenarioCov { + fmt.Printf("SCENARIOS : %d%% of coverage\n", scenarioCoveragePercent) + } + + if showAppsecCov { + fmt.Printf("APPSEC RULES : %d%% of coverage\n", appsecRuleCoveragePercent) + } + case "json": + dump, err := json.MarshalIndent(parserCoverage, "", " ") + if err != nil { + return err + } + + fmt.Printf("%s", dump) + + dump, err = json.MarshalIndent(scenarioCoverage, "", " ") + if err != nil { + return err + } + + fmt.Printf("%s", dump) + + dump, err = json.MarshalIndent(appsecRuleCoverage, "", " ") + if err != nil { + return err + } + + fmt.Printf("%s", dump) + default: + return errors.New("only human/json output modes are supported") + } + + return nil +} + +func (cli *cliHubTest) newCoverageCmd() *cobra.Command { + var ( + showParserCov bool + showScenarioCov bool + showOnlyPercent bool + showAppsecCov bool + ) + + cmd := &cobra.Command{ + Use: "coverage", + Short: "coverage", + DisableAutoGenTag: true, + RunE: func(_ *cobra.Command, _ []string) error { + return cli.coverage(showScenarioCov, showParserCov, showAppsecCov, showOnlyPercent) + }, + } + + cmd.PersistentFlags().BoolVar(&showOnlyPercent, "percent", false, "Show only percentages of coverage") + cmd.PersistentFlags().BoolVar(&showParserCov, "parsers", false, "Show only parsers coverage") + cmd.PersistentFlags().BoolVar(&showScenarioCov, "scenarios", false, "Show only scenarios coverage") + cmd.PersistentFlags().BoolVar(&showAppsecCov, "appsec", false, "Show only appsec coverage") + + return cmd +} diff --git a/cmd/crowdsec-cli/clihubtest/create.go b/cmd/crowdsec-cli/clihubtest/create.go new file mode 100644 index 00000000000..3822bed8903 --- /dev/null +++ b/cmd/crowdsec-cli/clihubtest/create.go @@ -0,0 +1,158 @@ +package clihubtest + +import ( + "errors" + "fmt" + "os" + "path/filepath" + "text/template" + + "github.com/spf13/cobra" + "gopkg.in/yaml.v3" + + "github.com/crowdsecurity/crowdsec/pkg/hubtest" +) + +func (cli *cliHubTest) newCreateCmd() *cobra.Command { + var ( + ignoreParsers bool + labels map[string]string + logType string + ) + + parsers := []string{} + postoverflows := []string{} + scenarios := []string{} + + cmd := &cobra.Command{ + Use: "create", + Short: "create [test_name]", + Example: `cscli hubtest create my-awesome-test --type syslog +cscli hubtest create my-nginx-custom-test --type nginx +cscli hubtest create my-scenario-test --parsers crowdsecurity/nginx --scenarios crowdsecurity/http-probing`, + Args: cobra.ExactArgs(1), + DisableAutoGenTag: true, + RunE: func(_ *cobra.Command, args []string) error { + testName := args[0] + testPath := filepath.Join(hubPtr.HubTestPath, testName) + if _, err := os.Stat(testPath); os.IsExist(err) { + return fmt.Errorf("test '%s' already exists in '%s', exiting", testName, testPath) + } + + if isAppsecTest { + logType = "appsec" + } + + if logType == "" { + return errors.New("please provide a type (--type) for the test") + } + + if err := os.MkdirAll(testPath, os.ModePerm); err != nil { + return fmt.Errorf("unable to create folder '%s': %+v", testPath, err) + } + + configFilePath := filepath.Join(testPath, "config.yaml") + + configFileData := &hubtest.HubTestItemConfig{} + if logType == "appsec" { + // create empty nuclei template file + nucleiFileName := testName + ".yaml" + nucleiFilePath := filepath.Join(testPath, nucleiFileName) + + nucleiFile, err := os.OpenFile(nucleiFilePath, os.O_RDWR|os.O_CREATE, 0o755) + if err != nil { + return err + } + + ntpl := template.Must(template.New("nuclei").Parse(hubtest.TemplateNucleiFile)) + if ntpl == nil { + return errors.New("unable to parse nuclei template") + } + ntpl.ExecuteTemplate(nucleiFile, "nuclei", struct{ TestName string }{TestName: testName}) + nucleiFile.Close() + configFileData.AppsecRules = []string{"./appsec-rules//your_rule_here.yaml"} + configFileData.NucleiTemplate = nucleiFileName + fmt.Println() + fmt.Printf(" Test name : %s\n", testName) + fmt.Printf(" Test path : %s\n", testPath) + fmt.Printf(" Config File : %s\n", configFilePath) + fmt.Printf(" Nuclei Template : %s\n", nucleiFilePath) + } else { + // create empty log file + logFileName := testName + ".log" + logFilePath := filepath.Join(testPath, logFileName) + logFile, err := os.Create(logFilePath) + if err != nil { + return err + } + logFile.Close() + + // create empty parser assertion file + parserAssertFilePath := filepath.Join(testPath, hubtest.ParserAssertFileName) + parserAssertFile, err := os.Create(parserAssertFilePath) + if err != nil { + return err + } + parserAssertFile.Close() + // create empty scenario assertion file + scenarioAssertFilePath := filepath.Join(testPath, hubtest.ScenarioAssertFileName) + scenarioAssertFile, err := os.Create(scenarioAssertFilePath) + if err != nil { + return err + } + scenarioAssertFile.Close() + + parsers = append(parsers, "crowdsecurity/syslog-logs") + parsers = append(parsers, "crowdsecurity/dateparse-enrich") + + if len(scenarios) == 0 { + scenarios = append(scenarios, "") + } + + if len(postoverflows) == 0 { + postoverflows = append(postoverflows, "") + } + configFileData.Parsers = parsers + configFileData.Scenarios = scenarios + configFileData.PostOverflows = postoverflows + configFileData.LogFile = logFileName + configFileData.LogType = logType + configFileData.IgnoreParsers = ignoreParsers + configFileData.Labels = labels + fmt.Println() + fmt.Printf(" Test name : %s\n", testName) + fmt.Printf(" Test path : %s\n", testPath) + fmt.Printf(" Log file : %s (please fill it with logs)\n", logFilePath) + fmt.Printf(" Parser assertion file : %s (please fill it with assertion)\n", parserAssertFilePath) + fmt.Printf(" Scenario assertion file : %s (please fill it with assertion)\n", scenarioAssertFilePath) + fmt.Printf(" Configuration File : %s (please fill it with parsers, scenarios...)\n", configFilePath) + } + + fd, err := os.Create(configFilePath) + if err != nil { + return fmt.Errorf("open: %w", err) + } + data, err := yaml.Marshal(configFileData) + if err != nil { + return fmt.Errorf("serialize: %w", err) + } + _, err = fd.Write(data) + if err != nil { + return fmt.Errorf("write: %w", err) + } + if err := fd.Close(); err != nil { + return fmt.Errorf("close: %w", err) + } + + return nil + }, + } + + cmd.PersistentFlags().StringVarP(&logType, "type", "t", "", "Log type of the test") + cmd.Flags().StringSliceVarP(&parsers, "parsers", "p", parsers, "Parsers to add to test") + cmd.Flags().StringSliceVar(&postoverflows, "postoverflows", postoverflows, "Postoverflows to add to test") + cmd.Flags().StringSliceVarP(&scenarios, "scenarios", "s", scenarios, "Scenarios to add to test") + cmd.PersistentFlags().BoolVar(&ignoreParsers, "ignore-parsers", false, "Don't run test on parsers") + + return cmd +} diff --git a/cmd/crowdsec-cli/clihubtest/eval.go b/cmd/crowdsec-cli/clihubtest/eval.go new file mode 100644 index 00000000000..83e9eae9c15 --- /dev/null +++ b/cmd/crowdsec-cli/clihubtest/eval.go @@ -0,0 +1,44 @@ +package clihubtest + +import ( + "fmt" + + "github.com/spf13/cobra" +) + +func (cli *cliHubTest) newEvalCmd() *cobra.Command { + var evalExpression string + + cmd := &cobra.Command{ + Use: "eval", + Short: "eval [test_name]", + Args: cobra.ExactArgs(1), + DisableAutoGenTag: true, + RunE: func(_ *cobra.Command, args []string) error { + for _, testName := range args { + test, err := hubPtr.LoadTestItem(testName) + if err != nil { + return fmt.Errorf("can't load test: %+v", err) + } + + err = test.ParserAssert.LoadTest(test.ParserResultFile) + if err != nil { + return fmt.Errorf("can't load test results from '%s': %+v", test.ParserResultFile, err) + } + + output, err := test.ParserAssert.EvalExpression(evalExpression) + if err != nil { + return err + } + + fmt.Print(output) + } + + return nil + }, + } + + cmd.PersistentFlags().StringVarP(&evalExpression, "expr", "e", "", "Expression to eval") + + return cmd +} diff --git a/cmd/crowdsec-cli/clihubtest/explain.go b/cmd/crowdsec-cli/clihubtest/explain.go new file mode 100644 index 00000000000..dbe10fa7ec0 --- /dev/null +++ b/cmd/crowdsec-cli/clihubtest/explain.go @@ -0,0 +1,76 @@ +package clihubtest + +import ( + "fmt" + + "github.com/spf13/cobra" + + "github.com/crowdsecurity/crowdsec/pkg/dumps" +) + +func (cli *cliHubTest) explain(testName string, details bool, skipOk bool) error { + test, err := HubTest.LoadTestItem(testName) + if err != nil { + return fmt.Errorf("can't load test: %+v", err) + } + + err = test.ParserAssert.LoadTest(test.ParserResultFile) + if err != nil { + if err = test.Run(); err != nil { + return fmt.Errorf("running test '%s' failed: %+v", test.Name, err) + } + + if err = test.ParserAssert.LoadTest(test.ParserResultFile); err != nil { + return fmt.Errorf("unable to load parser result after run: %w", err) + } + } + + err = test.ScenarioAssert.LoadTest(test.ScenarioResultFile, test.BucketPourResultFile) + if err != nil { + if err = test.Run(); err != nil { + return fmt.Errorf("running test '%s' failed: %+v", test.Name, err) + } + + if err = test.ScenarioAssert.LoadTest(test.ScenarioResultFile, test.BucketPourResultFile); err != nil { + return fmt.Errorf("unable to load scenario result after run: %w", err) + } + } + + opts := dumps.DumpOpts{ + Details: details, + SkipOk: skipOk, + } + + dumps.DumpTree(*test.ParserAssert.TestData, *test.ScenarioAssert.PourData, opts) + + return nil +} + +func (cli *cliHubTest) newExplainCmd() *cobra.Command { + var ( + details bool + skipOk bool + ) + + cmd := &cobra.Command{ + Use: "explain", + Short: "explain [test_name]", + Args: cobra.ExactArgs(1), + DisableAutoGenTag: true, + RunE: func(_ *cobra.Command, args []string) error { + for _, testName := range args { + if err := cli.explain(testName, details, skipOk); err != nil { + return err + } + } + + return nil + }, + } + + flags := cmd.Flags() + flags.BoolVarP(&details, "verbose", "v", false, "Display individual changes") + flags.BoolVar(&skipOk, "failures", false, "Only show failed lines") + + return cmd +} diff --git a/cmd/crowdsec-cli/clihubtest/hubtest.go b/cmd/crowdsec-cli/clihubtest/hubtest.go new file mode 100644 index 00000000000..f4cfed2e1cb --- /dev/null +++ b/cmd/crowdsec-cli/clihubtest/hubtest.go @@ -0,0 +1,81 @@ +package clihubtest + +import ( + "fmt" + + "github.com/spf13/cobra" + + "github.com/crowdsecurity/crowdsec/pkg/csconfig" + "github.com/crowdsecurity/crowdsec/pkg/hubtest" +) + +type configGetter func() *csconfig.Config + +var ( + HubTest hubtest.HubTest + HubAppsecTests hubtest.HubTest + hubPtr *hubtest.HubTest + isAppsecTest bool +) + +type cliHubTest struct { + cfg configGetter +} + +func New(cfg configGetter) *cliHubTest { + return &cliHubTest{ + cfg: cfg, + } +} + +func (cli *cliHubTest) NewCommand() *cobra.Command { + var ( + hubPath string + crowdsecPath string + cscliPath string + ) + + cmd := &cobra.Command{ + Use: "hubtest", + Short: "Run functional tests on hub configurations", + Long: "Run functional tests on hub configurations (parsers, scenarios, collections...)", + Args: cobra.NoArgs, + DisableAutoGenTag: true, + PersistentPreRunE: func(_ *cobra.Command, _ []string) error { + var err error + HubTest, err = hubtest.NewHubTest(hubPath, crowdsecPath, cscliPath, false) + if err != nil { + return fmt.Errorf("unable to load hubtest: %+v", err) + } + + HubAppsecTests, err = hubtest.NewHubTest(hubPath, crowdsecPath, cscliPath, true) + if err != nil { + return fmt.Errorf("unable to load appsec specific hubtest: %+v", err) + } + + // commands will use the hubPtr, will point to the default hubTest object, or the one dedicated to appsec tests + hubPtr = &HubTest + if isAppsecTest { + hubPtr = &HubAppsecTests + } + + return nil + }, + } + + cmd.PersistentFlags().StringVar(&hubPath, "hub", ".", "Path to hub folder") + cmd.PersistentFlags().StringVar(&crowdsecPath, "crowdsec", "crowdsec", "Path to crowdsec") + cmd.PersistentFlags().StringVar(&cscliPath, "cscli", "cscli", "Path to cscli") + cmd.PersistentFlags().BoolVar(&isAppsecTest, "appsec", false, "Command relates to appsec tests") + + cmd.AddCommand(cli.newCreateCmd()) + cmd.AddCommand(cli.newRunCmd()) + cmd.AddCommand(cli.newCleanCmd()) + cmd.AddCommand(cli.newInfoCmd()) + cmd.AddCommand(cli.newListCmd()) + cmd.AddCommand(cli.newCoverageCmd()) + cmd.AddCommand(cli.newEvalCmd()) + cmd.AddCommand(cli.newExplainCmd()) + + return cmd +} diff --git a/cmd/crowdsec-cli/clihubtest/info.go b/cmd/crowdsec-cli/clihubtest/info.go new file mode 100644 index 00000000000..a5d760eea01 --- /dev/null +++ b/cmd/crowdsec-cli/clihubtest/info.go @@ -0,0 +1,44 @@ +package clihubtest + +import ( + "fmt" + "path/filepath" + "strings" + + "github.com/spf13/cobra" + + "github.com/crowdsecurity/crowdsec/pkg/hubtest" +) + +func (cli *cliHubTest) newInfoCmd() *cobra.Command { + cmd := &cobra.Command{ + Use: "info", + Short: "info [test_name]", + Args: cobra.MinimumNArgs(1), + DisableAutoGenTag: true, + RunE: func(_ *cobra.Command, args []string) error { + for _, testName := range args { + test, err := hubPtr.LoadTestItem(testName) + if err != nil { + return fmt.Errorf("unable to load test '%s': %w", testName, err) + } + fmt.Println() + fmt.Printf(" Test name : %s\n", test.Name) + fmt.Printf(" Test path : %s\n", test.Path) + if isAppsecTest { + fmt.Printf(" Nuclei Template : %s\n", test.Config.NucleiTemplate) + fmt.Printf(" Appsec Rules : %s\n", strings.Join(test.Config.AppsecRules, ", ")) + } else { + fmt.Printf(" Log file : %s\n", filepath.Join(test.Path, test.Config.LogFile)) + fmt.Printf(" Parser assertion file : %s\n", filepath.Join(test.Path, hubtest.ParserAssertFileName)) + fmt.Printf(" Scenario assertion file : %s\n", filepath.Join(test.Path, hubtest.ScenarioAssertFileName)) + } + fmt.Printf(" Configuration File : %s\n", filepath.Join(test.Path, "config.yaml")) + } + + return nil + }, + } + + return cmd +} diff --git a/cmd/crowdsec-cli/clihubtest/list.go b/cmd/crowdsec-cli/clihubtest/list.go new file mode 100644 index 00000000000..3e76824a18e --- /dev/null +++ b/cmd/crowdsec-cli/clihubtest/list.go @@ -0,0 +1,42 @@ +package clihubtest + +import ( + "encoding/json" + "errors" + "fmt" + + "github.com/fatih/color" + "github.com/spf13/cobra" +) + +func (cli *cliHubTest) newListCmd() *cobra.Command { + cmd := &cobra.Command{ + Use: "list", + Short: "list", + DisableAutoGenTag: true, + RunE: func(_ *cobra.Command, _ []string) error { + cfg := cli.cfg() + + if err := hubPtr.LoadAllTests(); err != nil { + return fmt.Errorf("unable to load all tests: %w", err) + } + + switch cfg.Cscli.Output { + case "human": + hubTestListTable(color.Output, cfg.Cscli.Color, hubPtr.Tests) + case "json": + j, err := json.MarshalIndent(hubPtr.Tests, " ", " ") + if err != nil { + return err + } + fmt.Println(string(j)) + default: + return errors.New("only human/json output modes are supported") + } + + return nil + }, + } + + return cmd +} diff --git a/cmd/crowdsec-cli/clihubtest/run.go b/cmd/crowdsec-cli/clihubtest/run.go new file mode 100644 index 00000000000..31cceb81884 --- /dev/null +++ b/cmd/crowdsec-cli/clihubtest/run.go @@ -0,0 +1,213 @@ +package clihubtest + +import ( + "encoding/json" + "errors" + "fmt" + "os" + "strings" + + "github.com/AlecAivazis/survey/v2" + "github.com/fatih/color" + log "github.com/sirupsen/logrus" + "github.com/spf13/cobra" + + "github.com/crowdsecurity/crowdsec/pkg/emoji" + "github.com/crowdsecurity/crowdsec/pkg/hubtest" +) + +func (cli *cliHubTest) run(runAll bool, nucleiTargetHost string, appSecHost string, args []string) error { + cfg := cli.cfg() + + if !runAll && len(args) == 0 { + return errors.New("please provide test to run or --all flag") + } + + hubPtr.NucleiTargetHost = nucleiTargetHost + hubPtr.AppSecHost = appSecHost + + if runAll { + if err := hubPtr.LoadAllTests(); err != nil { + return fmt.Errorf("unable to load all tests: %+v", err) + } + } else { + for _, testName := range args { + _, err := hubPtr.LoadTestItem(testName) + if err != nil { + return fmt.Errorf("unable to load test '%s': %w", testName, err) + } + } + } + + // set timezone to avoid DST issues + os.Setenv("TZ", "UTC") + + for _, test := range hubPtr.Tests { + if cfg.Cscli.Output == "human" { + log.Infof("Running test '%s'", test.Name) + } + + err := test.Run() + if err != nil { + log.Errorf("running test '%s' failed: %+v", test.Name, err) + } + } + + return nil +} + +func printParserFailures(test *hubtest.HubTestItem) { + if len(test.ParserAssert.Fails) == 0 { + return + } + + fmt.Println() + log.Errorf("Parser test '%s' failed (%d errors)\n", test.Name, len(test.ParserAssert.Fails)) + + for _, fail := range test.ParserAssert.Fails { + fmt.Printf("(L.%d) %s => %s\n", fail.Line, emoji.RedCircle, fail.Expression) + fmt.Printf(" Actual expression values:\n") + + for key, value := range fail.Debug { + fmt.Printf(" %s = '%s'\n", key, strings.TrimSuffix(value, "\n")) + } + + fmt.Println() + } +} + +func printScenarioFailures(test *hubtest.HubTestItem) { + if len(test.ScenarioAssert.Fails) == 0 { + return + } + + fmt.Println() + log.Errorf("Scenario test '%s' failed (%d errors)\n", test.Name, len(test.ScenarioAssert.Fails)) + + for _, fail := range test.ScenarioAssert.Fails { + fmt.Printf("(L.%d) %s => %s\n", fail.Line, emoji.RedCircle, fail.Expression) + fmt.Printf(" Actual expression values:\n") + + for key, value := range fail.Debug { + fmt.Printf(" %s = '%s'\n", key, strings.TrimSuffix(value, "\n")) + } + + fmt.Println() + } +} + +func (cli *cliHubTest) newRunCmd() *cobra.Command { + var ( + noClean bool + runAll bool + forceClean bool + nucleiTargetHost string + appSecHost string + ) + + cmd := &cobra.Command{ + Use: "run", + Short: "run [test_name]", + DisableAutoGenTag: true, + RunE: func(_ *cobra.Command, args []string) error { + return cli.run(runAll, nucleiTargetHost, appSecHost, args) + }, + PersistentPostRunE: func(_ *cobra.Command, _ []string) error { + cfg := cli.cfg() + + success := true + testResult := make(map[string]bool) + for _, test := range hubPtr.Tests { + if test.AutoGen && !isAppsecTest { + if test.ParserAssert.AutoGenAssert { + log.Warningf("Assert file '%s' is empty, generating assertion:", test.ParserAssert.File) + fmt.Println() + fmt.Println(test.ParserAssert.AutoGenAssertData) + } + if test.ScenarioAssert.AutoGenAssert { + log.Warningf("Assert file '%s' is empty, generating assertion:", test.ScenarioAssert.File) + fmt.Println() + fmt.Println(test.ScenarioAssert.AutoGenAssertData) + } + if !noClean { + if err := test.Clean(); err != nil { + return fmt.Errorf("unable to clean test '%s' env: %w", test.Name, err) + } + } + + return fmt.Errorf("please fill your assert file(s) for test '%s', exiting", test.Name) + } + testResult[test.Name] = test.Success + if test.Success { + if cfg.Cscli.Output == "human" { + log.Infof("Test '%s' passed successfully (%d assertions)\n", test.Name, test.ParserAssert.NbAssert+test.ScenarioAssert.NbAssert) + } + if !noClean { + if err := test.Clean(); err != nil { + return fmt.Errorf("unable to clean test '%s' env: %w", test.Name, err) + } + } + } else { + success = false + cleanTestEnv := false + if cfg.Cscli.Output == "human" { + printParserFailures(test) + printScenarioFailures(test) + if !forceClean && !noClean { + prompt := &survey.Confirm{ + Message: fmt.Sprintf("\nDo you want to remove runtime folder for test '%s'? (default: Yes)", test.Name), + Default: true, + } + if err := survey.AskOne(prompt, &cleanTestEnv); err != nil { + return fmt.Errorf("unable to ask to remove runtime folder: %w", err) + } + } + } + + if cleanTestEnv || forceClean { + if err := test.Clean(); err != nil { + return fmt.Errorf("unable to clean test '%s' env: %w", test.Name, err) + } + } + } + } + + switch cfg.Cscli.Output { + case "human": + hubTestResultTable(color.Output, cfg.Cscli.Color, testResult) + case "json": + jsonResult := make(map[string][]string, 0) + jsonResult["success"] = make([]string, 0) + jsonResult["fail"] = make([]string, 0) + for testName, success := range testResult { + if success { + jsonResult["success"] = append(jsonResult["success"], testName) + } else { + jsonResult["fail"] = append(jsonResult["fail"], testName) + } + } + jsonStr, err := json.Marshal(jsonResult) + if err != nil { + return fmt.Errorf("unable to json test result: %w", err) + } + fmt.Println(string(jsonStr)) + default: + return errors.New("only human/json output modes are supported") + } + + if !success { + return errors.New("some tests failed") + } + + return nil + }, + } + + cmd.Flags().BoolVar(&noClean, "no-clean", false, "Don't clean runtime environment if test succeed") + cmd.Flags().BoolVar(&forceClean, "clean", false, "Clean runtime environment if test fail") + cmd.Flags().StringVar(&nucleiTargetHost, "target", hubtest.DefaultNucleiTarget, "Target for AppSec Test") + cmd.Flags().StringVar(&appSecHost, "host", hubtest.DefaultAppsecHost, "Address to expose AppSec for hubtest") + cmd.Flags().BoolVar(&runAll, "all", false, "Run all tests") + + return cmd +} diff --git a/cmd/crowdsec-cli/hubtest_table.go b/cmd/crowdsec-cli/clihubtest/table.go similarity index 50% rename from cmd/crowdsec-cli/hubtest_table.go rename to cmd/crowdsec-cli/clihubtest/table.go index 1fa0f990be2..2a105a1f5c1 100644 --- a/cmd/crowdsec-cli/hubtest_table.go +++ b/cmd/crowdsec-cli/clihubtest/table.go @@ -1,4 +1,4 @@ -package main +package clihubtest import ( "fmt" @@ -42,51 +42,9 @@ func hubTestListTable(out io.Writer, wantColor string, tests []*hubtest.HubTestI t.Render() } -func hubTestParserCoverageTable(out io.Writer, wantColor string, coverage []hubtest.Coverage) { +func hubTestCoverageTable(out io.Writer, wantColor string, headers []string, coverage []hubtest.Coverage) { t := cstable.NewLight(out, wantColor) - t.SetHeaders("Parser", "Status", "Number of tests") - t.SetHeaderAlignment(text.AlignLeft, text.AlignLeft, text.AlignLeft) - t.SetAlignment(text.AlignLeft, text.AlignLeft, text.AlignLeft) - - parserTested := 0 - - for _, test := range coverage { - status := emoji.RedCircle - if test.TestsCount > 0 { - status = emoji.GreenCircle - parserTested++ - } - - t.AddRow(test.Name, status, fmt.Sprintf("%d times (across %d tests)", test.TestsCount, len(test.PresentIn))) - } - - t.Render() -} - -func hubTestAppsecRuleCoverageTable(out io.Writer, wantColor string, coverage []hubtest.Coverage) { - t := cstable.NewLight(out, wantColor) - t.SetHeaders("Appsec Rule", "Status", "Number of tests") - t.SetHeaderAlignment(text.AlignLeft, text.AlignLeft, text.AlignLeft) - t.SetAlignment(text.AlignLeft, text.AlignLeft, text.AlignLeft) - - parserTested := 0 - - for _, test := range coverage { - status := emoji.RedCircle - if test.TestsCount > 0 { - status = emoji.GreenCircle - parserTested++ - } - - t.AddRow(test.Name, status, fmt.Sprintf("%d times (across %d tests)", test.TestsCount, len(test.PresentIn))) - } - - t.Render() -} - -func hubTestScenarioCoverageTable(out io.Writer, wantColor string, coverage []hubtest.Coverage) { - t := cstable.NewLight(out, wantColor) - t.SetHeaders("Scenario", "Status", "Number of tests") + t.SetHeaders(headers...) t.SetHeaderAlignment(text.AlignLeft, text.AlignLeft, text.AlignLeft) t.SetAlignment(text.AlignLeft, text.AlignLeft, text.AlignLeft) diff --git a/cmd/crowdsec-cli/hubappsec.go b/cmd/crowdsec-cli/cliitem/appsec.go similarity index 93% rename from cmd/crowdsec-cli/hubappsec.go rename to cmd/crowdsec-cli/cliitem/appsec.go index 1df3212f941..44afa2133bd 100644 --- a/cmd/crowdsec-cli/hubappsec.go +++ b/cmd/crowdsec-cli/cliitem/appsec.go @@ -1,4 +1,4 @@ -package main +package cliitem import ( "fmt" @@ -13,7 +13,7 @@ import ( "github.com/crowdsecurity/crowdsec/pkg/cwhub" ) -func NewCLIAppsecConfig(cfg configGetter) *cliItem { +func NewAppsecConfig(cfg configGetter) *cliItem { return &cliItem{ cfg: cfg, name: cwhub.APPSEC_CONFIGS, @@ -47,7 +47,7 @@ cscli appsec-configs list crowdsecurity/vpatch`, } } -func NewCLIAppsecRule(cfg configGetter) *cliItem { +func NewAppsecRule(cfg configGetter) *cliItem { inspectDetail := func(item *cwhub.Item) error { // Only show the converted rules in human mode if cfg().Cscli.Output != "human" { @@ -62,7 +62,7 @@ func NewCLIAppsecRule(cfg configGetter) *cliItem { } if err := yaml.Unmarshal(yamlContent, &appsecRule); err != nil { - return fmt.Errorf("unable to unmarshal yaml file %s: %w", item.State.LocalPath, err) + return fmt.Errorf("unable to parse yaml file %s: %w", item.State.LocalPath, err) } for _, ruleType := range appsec_rule.SupportedTypes() { diff --git a/cmd/crowdsec-cli/hubcollection.go b/cmd/crowdsec-cli/cliitem/collection.go similarity index 95% rename from cmd/crowdsec-cli/hubcollection.go rename to cmd/crowdsec-cli/cliitem/collection.go index 655b36eb1b8..ea91c1e537a 100644 --- a/cmd/crowdsec-cli/hubcollection.go +++ b/cmd/crowdsec-cli/cliitem/collection.go @@ -1,10 +1,10 @@ -package main +package cliitem import ( "github.com/crowdsecurity/crowdsec/pkg/cwhub" ) -func NewCLICollection(cfg configGetter) *cliItem { +func NewCollection(cfg configGetter) *cliItem { return &cliItem{ cfg: cfg, name: cwhub.COLLECTIONS, diff --git a/cmd/crowdsec-cli/hubcontext.go b/cmd/crowdsec-cli/cliitem/context.go similarity index 94% rename from cmd/crowdsec-cli/hubcontext.go rename to cmd/crowdsec-cli/cliitem/context.go index 2a777327379..7d110b8203d 100644 --- a/cmd/crowdsec-cli/hubcontext.go +++ b/cmd/crowdsec-cli/cliitem/context.go @@ -1,10 +1,10 @@ -package main +package cliitem import ( "github.com/crowdsecurity/crowdsec/pkg/cwhub" ) -func NewCLIContext(cfg configGetter) *cliItem { +func NewContext(cfg configGetter) *cliItem { return &cliItem{ cfg: cfg, name: cwhub.CONTEXTS, diff --git a/cmd/crowdsec-cli/hubscenario.go b/cmd/crowdsec-cli/cliitem/hubscenario.go similarity index 95% rename from cmd/crowdsec-cli/hubscenario.go rename to cmd/crowdsec-cli/cliitem/hubscenario.go index 4434b9a2c45..a5e854b3c82 100644 --- a/cmd/crowdsec-cli/hubscenario.go +++ b/cmd/crowdsec-cli/cliitem/hubscenario.go @@ -1,10 +1,10 @@ -package main +package cliitem import ( "github.com/crowdsecurity/crowdsec/pkg/cwhub" ) -func NewCLIScenario(cfg configGetter) *cliItem { +func NewScenario(cfg configGetter) *cliItem { return &cliItem{ cfg: cfg, name: cwhub.SCENARIOS, diff --git a/cmd/crowdsec-cli/itemcli.go b/cmd/crowdsec-cli/cliitem/item.go similarity index 93% rename from cmd/crowdsec-cli/itemcli.go rename to cmd/crowdsec-cli/cliitem/item.go index 64c18ae89b1..28828eb9c95 100644 --- a/cmd/crowdsec-cli/itemcli.go +++ b/cmd/crowdsec-cli/cliitem/item.go @@ -1,4 +1,4 @@ -package main +package cliitem import ( "cmp" @@ -15,7 +15,10 @@ import ( log "github.com/sirupsen/logrus" "github.com/spf13/cobra" + "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/clihub" + "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/reload" "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/require" + "github.com/crowdsecurity/crowdsec/pkg/csconfig" "github.com/crowdsecurity/crowdsec/pkg/cwhub" ) @@ -28,6 +31,8 @@ type cliHelp struct { example string } +type configGetter func() *csconfig.Config + type cliItem struct { cfg configGetter name string // plural, as used in the hub index @@ -78,7 +83,7 @@ func (cli cliItem) install(ctx context.Context, args []string, downloadOnly bool return errors.New(msg) } - log.Errorf(msg) + log.Error(msg) continue } @@ -92,7 +97,7 @@ func (cli cliItem) install(ctx context.Context, args []string, downloadOnly bool } } - log.Infof(ReloadMessage()) + log.Info(reload.Message) return nil } @@ -147,19 +152,14 @@ func (cli cliItem) remove(args []string, purge bool, force bool, all bool) error } if all { - getter := hub.GetInstalledItemsByType + itemGetter := hub.GetInstalledByType if purge { - getter = hub.GetItemsByType - } - - items, err := getter(cli.name) - if err != nil { - return err + itemGetter = hub.GetItemsByType } removed := 0 - for _, item := range items { + for _, item := range itemGetter(cli.name, true) { didRemove, err := item.Remove(purge, force) if err != nil { return err @@ -175,7 +175,7 @@ func (cli cliItem) remove(args []string, purge bool, force bool, all bool) error log.Infof("Removed %d %s", removed, cli.name) if removed > 0 { - log.Infof(ReloadMessage()) + log.Info(reload.Message) } return nil @@ -217,7 +217,7 @@ func (cli cliItem) remove(args []string, purge bool, force bool, all bool) error log.Infof("Removed %d %s", removed, cli.name) if removed > 0 { - log.Infof(ReloadMessage()) + log.Info(reload.Message) } return nil @@ -262,14 +262,9 @@ func (cli cliItem) upgrade(ctx context.Context, args []string, force bool, all b } if all { - items, err := hub.GetInstalledItemsByType(cli.name) - if err != nil { - return err - } - updated := 0 - for _, item := range items { + for _, item := range hub.GetInstalledByType(cli.name, true) { didUpdate, err := item.Upgrade(ctx, force) if err != nil { return err @@ -283,7 +278,7 @@ func (cli cliItem) upgrade(ctx context.Context, args []string, force bool, all b log.Infof("Updated %d %s", updated, cli.name) if updated > 0 { - log.Infof(ReloadMessage()) + log.Info(reload.Message) } return nil @@ -314,7 +309,7 @@ func (cli cliItem) upgrade(ctx context.Context, args []string, force bool, all b } if updated > 0 { - log.Infof(ReloadMessage()) + log.Info(reload.Message) } return nil @@ -381,7 +376,7 @@ func (cli cliItem) inspect(ctx context.Context, args []string, url string, diff continue } - if err = inspectItem(item, !noMetrics, cfg.Cscli.Output, cfg.Cscli.PrometheusUrl, cfg.Cscli.Color); err != nil { + if err = clihub.InspectItem(item, !noMetrics, cfg.Cscli.Output, cfg.Cscli.PrometheusUrl, cfg.Cscli.Color); err != nil { return err } @@ -437,12 +432,12 @@ func (cli cliItem) list(args []string, all bool) error { items := make(map[string][]*cwhub.Item) - items[cli.name], err = selectItems(hub, cli.name, args, !all) + items[cli.name], err = clihub.SelectItems(hub, cli.name, args, !all) if err != nil { return err } - return listItems(color.Output, cfg.Cscli.Color, []string{cli.name}, items, false, cfg.Cscli.Output) + return clihub.ListItems(color.Output, cfg.Cscli.Color, []string{cli.name}, items, false, cfg.Cscli.Output) } func (cli cliItem) newListCmd() *cobra.Command { diff --git a/cmd/crowdsec-cli/hubparser.go b/cmd/crowdsec-cli/cliitem/parser.go similarity index 95% rename from cmd/crowdsec-cli/hubparser.go rename to cmd/crowdsec-cli/cliitem/parser.go index cc856cbedb9..bc1d96bdaf0 100644 --- a/cmd/crowdsec-cli/hubparser.go +++ b/cmd/crowdsec-cli/cliitem/parser.go @@ -1,10 +1,10 @@ -package main +package cliitem import ( "github.com/crowdsecurity/crowdsec/pkg/cwhub" ) -func NewCLIParser(cfg configGetter) *cliItem { +func NewParser(cfg configGetter) *cliItem { return &cliItem{ cfg: cfg, name: cwhub.PARSERS, diff --git a/cmd/crowdsec-cli/hubpostoverflow.go b/cmd/crowdsec-cli/cliitem/postoverflow.go similarity index 95% rename from cmd/crowdsec-cli/hubpostoverflow.go rename to cmd/crowdsec-cli/cliitem/postoverflow.go index 3fd45fd113d..ea53aef327d 100644 --- a/cmd/crowdsec-cli/hubpostoverflow.go +++ b/cmd/crowdsec-cli/cliitem/postoverflow.go @@ -1,10 +1,10 @@ -package main +package cliitem import ( "github.com/crowdsecurity/crowdsec/pkg/cwhub" ) -func NewCLIPostOverflow(cfg configGetter) *cliItem { +func NewPostOverflow(cfg configGetter) *cliItem { return &cliItem{ cfg: cfg, name: cwhub.POSTOVERFLOWS, diff --git a/cmd/crowdsec-cli/item_suggest.go b/cmd/crowdsec-cli/cliitem/suggest.go similarity index 77% rename from cmd/crowdsec-cli/item_suggest.go rename to cmd/crowdsec-cli/cliitem/suggest.go index 0ea656549ba..5b080722af9 100644 --- a/cmd/crowdsec-cli/item_suggest.go +++ b/cmd/crowdsec-cli/cliitem/suggest.go @@ -1,4 +1,4 @@ -package main +package cliitem import ( "fmt" @@ -19,7 +19,7 @@ func suggestNearestMessage(hub *cwhub.Hub, itemType string, itemName string) str score := 100 nearest := "" - for _, item := range hub.GetItemMap(itemType) { + for _, item := range hub.GetItemsByType(itemType, false) { d := levenshtein.Distance(itemName, item.Name, nil) if d < score { score = d @@ -44,7 +44,7 @@ func compAllItems(itemType string, args []string, toComplete string, cfg configG comp := make([]string, 0) - for _, item := range hub.GetItemMap(itemType) { + for _, item := range hub.GetItemsByType(itemType, false) { if !slices.Contains(args, item.Name) && strings.Contains(item.Name, toComplete) { comp = append(comp, item.Name) } @@ -61,22 +61,14 @@ func compInstalledItems(itemType string, args []string, toComplete string, cfg c return nil, cobra.ShellCompDirectiveDefault } - items, err := hub.GetInstalledNamesByType(itemType) - if err != nil { - cobra.CompDebugln(fmt.Sprintf("list installed %s err: %s", itemType, err), true) - return nil, cobra.ShellCompDirectiveDefault - } + items := hub.GetInstalledByType(itemType, true) comp := make([]string, 0) - if toComplete != "" { - for _, item := range items { - if strings.Contains(item, toComplete) { - comp = append(comp, item) - } + for _, item := range items { + if strings.Contains(item.Name, toComplete) { + comp = append(comp, item.Name) } - } else { - comp = items } cobra.CompDebugln(fmt.Sprintf("%s: %+v", itemType, comp), true) diff --git a/cmd/crowdsec-cli/lapi.go b/cmd/crowdsec-cli/clilapi/context.go similarity index 59% rename from cmd/crowdsec-cli/lapi.go rename to cmd/crowdsec-cli/clilapi/context.go index c0543f98733..20ceb2b9596 100644 --- a/cmd/crowdsec-cli/lapi.go +++ b/cmd/crowdsec-cli/clilapi/context.go @@ -1,261 +1,22 @@ -package main +package clilapi import ( - "context" "errors" "fmt" - "net/url" - "os" "slices" "sort" "strings" - "github.com/go-openapi/strfmt" log "github.com/sirupsen/logrus" "github.com/spf13/cobra" "gopkg.in/yaml.v3" "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/require" "github.com/crowdsecurity/crowdsec/pkg/alertcontext" - "github.com/crowdsecurity/crowdsec/pkg/apiclient" - "github.com/crowdsecurity/crowdsec/pkg/csconfig" - "github.com/crowdsecurity/crowdsec/pkg/cwhub" - "github.com/crowdsecurity/crowdsec/pkg/cwversion" "github.com/crowdsecurity/crowdsec/pkg/exprhelpers" - "github.com/crowdsecurity/crowdsec/pkg/models" "github.com/crowdsecurity/crowdsec/pkg/parser" ) -const LAPIURLPrefix = "v1" - -type cliLapi struct { - cfg configGetter -} - -func NewCLILapi(cfg configGetter) *cliLapi { - return &cliLapi{ - cfg: cfg, - } -} - -// QueryLAPIStatus checks if the Local API is reachable, and if the credentials are correct -func QueryLAPIStatus(hub *cwhub.Hub, credURL string, login string, password string) error { - apiURL, err := url.Parse(credURL) - if err != nil { - return fmt.Errorf("parsing api url: %w", err) - } - - scenarios, err := hub.GetInstalledNamesByType(cwhub.SCENARIOS) - if err != nil { - return fmt.Errorf("failed to get scenarios: %w", err) - } - - client, err := apiclient.NewDefaultClient(apiURL, - LAPIURLPrefix, - cwversion.UserAgent(), - nil) - if err != nil { - return fmt.Errorf("init default client: %w", err) - } - - pw := strfmt.Password(password) - - t := models.WatcherAuthRequest{ - MachineID: &login, - Password: &pw, - Scenarios: scenarios, - } - - _, _, err = client.Auth.AuthenticateWatcher(context.Background(), t) - if err != nil { - return err - } - - return nil -} - -func (cli *cliLapi) status() error { - cfg := cli.cfg() - - cred := cfg.API.Client.Credentials - - hub, err := require.Hub(cfg, nil, nil) - if err != nil { - return err - } - - log.Infof("Loaded credentials from %s", cfg.API.Client.CredentialsFilePath) - log.Infof("Trying to authenticate with username %s on %s", cred.Login, cred.URL) - - if err := QueryLAPIStatus(hub, cred.URL, cred.Login, cred.Password); err != nil { - return fmt.Errorf("failed to authenticate to Local API (LAPI): %w", err) - } - - log.Infof("You can successfully interact with Local API (LAPI)") - - return nil -} - -func (cli *cliLapi) register(apiURL string, outputFile string, machine string, token string) error { - var err error - - lapiUser := machine - cfg := cli.cfg() - - if lapiUser == "" { - lapiUser, err = generateID("") - if err != nil { - return fmt.Errorf("unable to generate machine id: %w", err) - } - } - - password := strfmt.Password(generatePassword(passwordLength)) - - apiurl, err := prepareAPIURL(cfg.API.Client, apiURL) - if err != nil { - return fmt.Errorf("parsing api url: %w", err) - } - - _, err = apiclient.RegisterClient(&apiclient.Config{ - MachineID: lapiUser, - Password: password, - UserAgent: cwversion.UserAgent(), - RegistrationToken: token, - URL: apiurl, - VersionPrefix: LAPIURLPrefix, - }, nil) - if err != nil { - return fmt.Errorf("api client register: %w", err) - } - - log.Printf("Successfully registered to Local API (LAPI)") - - var dumpFile string - - if outputFile != "" { - dumpFile = outputFile - } else if cfg.API.Client.CredentialsFilePath != "" { - dumpFile = cfg.API.Client.CredentialsFilePath - } else { - dumpFile = "" - } - - apiCfg := cfg.API.Client.Credentials - apiCfg.Login = lapiUser - apiCfg.Password = password.String() - - if apiURL != "" { - apiCfg.URL = apiURL - } - - apiConfigDump, err := yaml.Marshal(apiCfg) - if err != nil { - return fmt.Errorf("unable to marshal api credentials: %w", err) - } - - if dumpFile != "" { - err = os.WriteFile(dumpFile, apiConfigDump, 0o600) - if err != nil { - return fmt.Errorf("write api credentials to '%s' failed: %w", dumpFile, err) - } - - log.Printf("Local API credentials written to '%s'", dumpFile) - } else { - fmt.Printf("%s\n", string(apiConfigDump)) - } - - log.Warning(ReloadMessage()) - - return nil -} - -// prepareAPIURL checks/fixes a LAPI connection url (http, https or socket) and returns an URL struct -func prepareAPIURL(clientCfg *csconfig.LocalApiClientCfg, apiURL string) (*url.URL, error) { - if apiURL == "" { - if clientCfg == nil || clientCfg.Credentials == nil || clientCfg.Credentials.URL == "" { - return nil, errors.New("no Local API URL. Please provide it in your configuration or with the -u parameter") - } - - apiURL = clientCfg.Credentials.URL - } - - // URL needs to end with /, but user doesn't care - if !strings.HasSuffix(apiURL, "/") { - apiURL += "/" - } - - // URL needs to start with http://, but user doesn't care - if !strings.HasPrefix(apiURL, "http://") && !strings.HasPrefix(apiURL, "https://") && !strings.HasPrefix(apiURL, "/") { - apiURL = "http://" + apiURL - } - - return url.Parse(apiURL) -} - -func (cli *cliLapi) newStatusCmd() *cobra.Command { - cmdLapiStatus := &cobra.Command{ - Use: "status", - Short: "Check authentication to Local API (LAPI)", - Args: cobra.MinimumNArgs(0), - DisableAutoGenTag: true, - RunE: func(_ *cobra.Command, _ []string) error { - return cli.status() - }, - } - - return cmdLapiStatus -} - -func (cli *cliLapi) newRegisterCmd() *cobra.Command { - var ( - apiURL string - outputFile string - machine string - token string - ) - - cmd := &cobra.Command{ - Use: "register", - Short: "Register a machine to Local API (LAPI)", - Long: `Register your machine to the Local API (LAPI). -Keep in mind the machine needs to be validated by an administrator on LAPI side to be effective.`, - Args: cobra.MinimumNArgs(0), - DisableAutoGenTag: true, - RunE: func(_ *cobra.Command, _ []string) error { - return cli.register(apiURL, outputFile, machine, token) - }, - } - - flags := cmd.Flags() - flags.StringVarP(&apiURL, "url", "u", "", "URL of the API (ie. http://127.0.0.1)") - flags.StringVarP(&outputFile, "file", "f", "", "output file destination") - flags.StringVar(&machine, "machine", "", "Name of the machine to register with") - flags.StringVar(&token, "token", "", "Auto registration token to use") - - return cmd -} - -func (cli *cliLapi) NewCommand() *cobra.Command { - cmd := &cobra.Command{ - Use: "lapi [action]", - Short: "Manage interaction with Local API (LAPI)", - Args: cobra.MinimumNArgs(1), - DisableAutoGenTag: true, - PersistentPreRunE: func(_ *cobra.Command, _ []string) error { - if err := cli.cfg().LoadAPIClient(); err != nil { - return fmt.Errorf("loading api client: %w", err) - } - return nil - }, - } - - cmd.AddCommand(cli.newRegisterCmd()) - cmd.AddCommand(cli.newStatusCmd()) - cmd.AddCommand(cli.newContextCmd()) - - return cmd -} - func (cli *cliLapi) addContext(key string, values []string) error { cfg := cli.cfg() @@ -511,14 +272,14 @@ func detectStaticField(grokStatics []parser.ExtraField) []string { for _, static := range grokStatics { if static.Parsed != "" { - fieldName := fmt.Sprintf("evt.Parsed.%s", static.Parsed) + fieldName := "evt.Parsed." + static.Parsed if !slices.Contains(ret, fieldName) { ret = append(ret, fieldName) } } if static.Meta != "" { - fieldName := fmt.Sprintf("evt.Meta.%s", static.Meta) + fieldName := "evt.Meta." + static.Meta if !slices.Contains(ret, fieldName) { ret = append(ret, fieldName) } @@ -544,7 +305,7 @@ func detectNode(node parser.Node, parserCTX parser.UnixParserCtx) []string { if node.Grok.RunTimeRegexp != nil { for _, capturedField := range node.Grok.RunTimeRegexp.Names() { - fieldName := fmt.Sprintf("evt.Parsed.%s", capturedField) + fieldName := "evt.Parsed." + capturedField if !slices.Contains(ret, fieldName) { ret = append(ret, fieldName) } @@ -556,7 +317,7 @@ func detectNode(node parser.Node, parserCTX parser.UnixParserCtx) []string { // ignore error (parser does not exist?) if err == nil { for _, capturedField := range grokCompiled.Names() { - fieldName := fmt.Sprintf("evt.Parsed.%s", capturedField) + fieldName := "evt.Parsed." + capturedField if !slices.Contains(ret, fieldName) { ret = append(ret, fieldName) } @@ -591,7 +352,7 @@ func detectSubNode(node parser.Node, parserCTX parser.UnixParserCtx) []string { for _, subnode := range node.LeavesNodes { if subnode.Grok.RunTimeRegexp != nil { for _, capturedField := range subnode.Grok.RunTimeRegexp.Names() { - fieldName := fmt.Sprintf("evt.Parsed.%s", capturedField) + fieldName := "evt.Parsed." + capturedField if !slices.Contains(ret, fieldName) { ret = append(ret, fieldName) } @@ -603,7 +364,7 @@ func detectSubNode(node parser.Node, parserCTX parser.UnixParserCtx) []string { if err == nil { // ignore error (parser does not exist?) for _, capturedField := range grokCompiled.Names() { - fieldName := fmt.Sprintf("evt.Parsed.%s", capturedField) + fieldName := "evt.Parsed." + capturedField if !slices.Contains(ret, fieldName) { ret = append(ret, fieldName) } diff --git a/cmd/crowdsec-cli/clilapi/lapi.go b/cmd/crowdsec-cli/clilapi/lapi.go new file mode 100644 index 00000000000..01341330ae8 --- /dev/null +++ b/cmd/crowdsec-cli/clilapi/lapi.go @@ -0,0 +1,42 @@ +package clilapi + +import ( + "fmt" + + "github.com/spf13/cobra" + + "github.com/crowdsecurity/crowdsec/pkg/csconfig" +) + +type configGetter = func() *csconfig.Config + +type cliLapi struct { + cfg configGetter +} + +func New(cfg configGetter) *cliLapi { + return &cliLapi{ + cfg: cfg, + } +} + +func (cli *cliLapi) NewCommand() *cobra.Command { + cmd := &cobra.Command{ + Use: "lapi [action]", + Short: "Manage interaction with Local API (LAPI)", + Args: cobra.MinimumNArgs(1), + DisableAutoGenTag: true, + PersistentPreRunE: func(_ *cobra.Command, _ []string) error { + if err := cli.cfg().LoadAPIClient(); err != nil { + return fmt.Errorf("loading api client: %w", err) + } + return nil + }, + } + + cmd.AddCommand(cli.newRegisterCmd()) + cmd.AddCommand(cli.newStatusCmd()) + cmd.AddCommand(cli.newContextCmd()) + + return cmd +} diff --git a/cmd/crowdsec-cli/clilapi/register.go b/cmd/crowdsec-cli/clilapi/register.go new file mode 100644 index 00000000000..4c9b0f39903 --- /dev/null +++ b/cmd/crowdsec-cli/clilapi/register.go @@ -0,0 +1,117 @@ +package clilapi + +import ( + "context" + "fmt" + "os" + + "github.com/go-openapi/strfmt" + log "github.com/sirupsen/logrus" + "github.com/spf13/cobra" + "gopkg.in/yaml.v3" + + "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/idgen" + "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/reload" + "github.com/crowdsecurity/crowdsec/pkg/apiclient" +) + +func (cli *cliLapi) register(ctx context.Context, apiURL string, outputFile string, machine string, token string) error { + var err error + + lapiUser := machine + cfg := cli.cfg() + + if lapiUser == "" { + lapiUser, err = idgen.GenerateMachineID("") + if err != nil { + return fmt.Errorf("unable to generate machine id: %w", err) + } + } + + password := strfmt.Password(idgen.GeneratePassword(idgen.PasswordLength)) + + apiurl, err := prepareAPIURL(cfg.API.Client, apiURL) + if err != nil { + return fmt.Errorf("parsing api url: %w", err) + } + + _, err = apiclient.RegisterClient(ctx, &apiclient.Config{ + MachineID: lapiUser, + Password: password, + RegistrationToken: token, + URL: apiurl, + VersionPrefix: LAPIURLPrefix, + }, nil) + if err != nil { + return fmt.Errorf("api client register: %w", err) + } + + log.Printf("Successfully registered to Local API (LAPI)") + + var dumpFile string + + if outputFile != "" { + dumpFile = outputFile + } else if cfg.API.Client.CredentialsFilePath != "" { + dumpFile = cfg.API.Client.CredentialsFilePath + } else { + dumpFile = "" + } + + apiCfg := cfg.API.Client.Credentials + apiCfg.Login = lapiUser + apiCfg.Password = password.String() + + if apiURL != "" { + apiCfg.URL = apiURL + } + + apiConfigDump, err := yaml.Marshal(apiCfg) + if err != nil { + return fmt.Errorf("unable to serialize api credentials: %w", err) + } + + if dumpFile != "" { + err = os.WriteFile(dumpFile, apiConfigDump, 0o600) + if err != nil { + return fmt.Errorf("write api credentials to '%s' failed: %w", dumpFile, err) + } + + log.Printf("Local API credentials written to '%s'", dumpFile) + } else { + fmt.Printf("%s\n", string(apiConfigDump)) + } + + log.Warning(reload.Message) + + return nil +} + +func (cli *cliLapi) newRegisterCmd() *cobra.Command { + var ( + apiURL string + outputFile string + machine string + token string + ) + + cmd := &cobra.Command{ + Use: "register", + Short: "Register a machine to Local API (LAPI)", + Long: `Register your machine to the Local API (LAPI). +Keep in mind the machine needs to be validated by an administrator on LAPI side to be effective.`, + Args: cobra.MinimumNArgs(0), + DisableAutoGenTag: true, + RunE: func(cmd *cobra.Command, _ []string) error { + return cli.register(cmd.Context(), apiURL, outputFile, machine, token) + }, + } + + flags := cmd.Flags() + flags.StringVarP(&apiURL, "url", "u", "", "URL of the API (ie. http://127.0.0.1)") + flags.StringVarP(&outputFile, "file", "f", "", "output file destination") + flags.StringVar(&machine, "machine", "", "Name of the machine to register with") + flags.StringVar(&token, "token", "", "Auto registration token to use") + + return cmd +} diff --git a/cmd/crowdsec-cli/clilapi/status.go b/cmd/crowdsec-cli/clilapi/status.go new file mode 100644 index 00000000000..6ff88834602 --- /dev/null +++ b/cmd/crowdsec-cli/clilapi/status.go @@ -0,0 +1,115 @@ +package clilapi + +import ( + "context" + "errors" + "fmt" + "io" + "net/url" + "strings" + + "github.com/fatih/color" + "github.com/go-openapi/strfmt" + "github.com/spf13/cobra" + + "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/require" + "github.com/crowdsecurity/crowdsec/pkg/apiclient" + "github.com/crowdsecurity/crowdsec/pkg/csconfig" + "github.com/crowdsecurity/crowdsec/pkg/cwhub" + "github.com/crowdsecurity/crowdsec/pkg/models" +) + +const LAPIURLPrefix = "v1" + +// queryLAPIStatus checks if the Local API is reachable, and if the credentials are correct. +func queryLAPIStatus(ctx context.Context, hub *cwhub.Hub, credURL string, login string, password string) (bool, error) { + apiURL, err := url.Parse(credURL) + if err != nil { + return false, err + } + + client, err := apiclient.NewDefaultClient(apiURL, + LAPIURLPrefix, + "", + nil) + if err != nil { + return false, err + } + + pw := strfmt.Password(password) + + itemsForAPI := hub.GetInstalledListForAPI() + + t := models.WatcherAuthRequest{ + MachineID: &login, + Password: &pw, + Scenarios: itemsForAPI, + } + + _, _, err = client.Auth.AuthenticateWatcher(ctx, t) + if err != nil { + return false, err + } + + return true, nil +} + +func (cli *cliLapi) Status(ctx context.Context, out io.Writer, hub *cwhub.Hub) error { + cfg := cli.cfg() + + cred := cfg.API.Client.Credentials + + fmt.Fprintf(out, "Loaded credentials from %s\n", cfg.API.Client.CredentialsFilePath) + fmt.Fprintf(out, "Trying to authenticate with username %s on %s\n", cred.Login, cred.URL) + + _, err := queryLAPIStatus(ctx, hub, cred.URL, cred.Login, cred.Password) + if err != nil { + return fmt.Errorf("failed to authenticate to Local API (LAPI): %w", err) + } + + fmt.Fprintf(out, "You can successfully interact with Local API (LAPI)\n") + + return nil +} + +// prepareAPIURL checks/fixes a LAPI connection url (http, https or socket) and returns an URL struct +func prepareAPIURL(clientCfg *csconfig.LocalApiClientCfg, apiURL string) (*url.URL, error) { + if apiURL == "" { + if clientCfg == nil || clientCfg.Credentials == nil || clientCfg.Credentials.URL == "" { + return nil, errors.New("no Local API URL. Please provide it in your configuration or with the -u parameter") + } + + apiURL = clientCfg.Credentials.URL + } + + // URL needs to end with /, but user doesn't care + if !strings.HasSuffix(apiURL, "/") { + apiURL += "/" + } + + // URL needs to start with http://, but user doesn't care + if !strings.HasPrefix(apiURL, "http://") && !strings.HasPrefix(apiURL, "https://") && !strings.HasPrefix(apiURL, "/") { + apiURL = "http://" + apiURL + } + + return url.Parse(apiURL) +} + +func (cli *cliLapi) newStatusCmd() *cobra.Command { + cmdLapiStatus := &cobra.Command{ + Use: "status", + Short: "Check authentication to Local API (LAPI)", + Args: cobra.MinimumNArgs(0), + DisableAutoGenTag: true, + RunE: func(cmd *cobra.Command, _ []string) error { + hub, err := require.Hub(cli.cfg(), nil, nil) + if err != nil { + return err + } + + return cli.Status(cmd.Context(), color.Output, hub) + }, + } + + return cmdLapiStatus +} diff --git a/cmd/crowdsec-cli/lapi_test.go b/cmd/crowdsec-cli/clilapi/status_test.go similarity index 98% rename from cmd/crowdsec-cli/lapi_test.go rename to cmd/crowdsec-cli/clilapi/status_test.go index 018ecad8118..caf986d847a 100644 --- a/cmd/crowdsec-cli/lapi_test.go +++ b/cmd/crowdsec-cli/clilapi/status_test.go @@ -1,4 +1,4 @@ -package main +package clilapi import ( "testing" diff --git a/cmd/crowdsec-cli/clilapi/utils.go b/cmd/crowdsec-cli/clilapi/utils.go new file mode 100644 index 00000000000..e3ec65f2145 --- /dev/null +++ b/cmd/crowdsec-cli/clilapi/utils.go @@ -0,0 +1,24 @@ +package clilapi + +func removeFromSlice(val string, slice []string) []string { + var i int + var value string + + valueFound := false + + // get the index + for i, value = range slice { + if value == val { + valueFound = true + break + } + } + + if valueFound { + slice[i] = slice[len(slice)-1] + slice[len(slice)-1] = "" + slice = slice[:len(slice)-1] + } + + return slice +} diff --git a/cmd/crowdsec-cli/climachine/add.go b/cmd/crowdsec-cli/climachine/add.go new file mode 100644 index 00000000000..afddb4e4b65 --- /dev/null +++ b/cmd/crowdsec-cli/climachine/add.go @@ -0,0 +1,152 @@ +package climachine + +import ( + "context" + "errors" + "fmt" + "os" + + "github.com/AlecAivazis/survey/v2" + "github.com/go-openapi/strfmt" + "github.com/spf13/cobra" + "gopkg.in/yaml.v3" + + "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/idgen" + "github.com/crowdsecurity/crowdsec/pkg/csconfig" + "github.com/crowdsecurity/crowdsec/pkg/types" +) + +func (cli *cliMachines) add(ctx context.Context, args []string, machinePassword string, dumpFile string, apiURL string, interactive bool, autoAdd bool, force bool) error { + var ( + err error + machineID string + ) + + // create machineID if not specified by user + if len(args) == 0 { + if !autoAdd { + return errors.New("please specify a machine name to add, or use --auto") + } + + machineID, err = idgen.GenerateMachineID("") + if err != nil { + return fmt.Errorf("unable to generate machine id: %w", err) + } + } else { + machineID = args[0] + } + + clientCfg := cli.cfg().API.Client + serverCfg := cli.cfg().API.Server + + /*check if file already exists*/ + if dumpFile == "" && clientCfg != nil && clientCfg.CredentialsFilePath != "" { + credFile := clientCfg.CredentialsFilePath + // use the default only if the file does not exist + _, err = os.Stat(credFile) + + switch { + case os.IsNotExist(err) || force: + dumpFile = credFile + case err != nil: + return fmt.Errorf("unable to stat '%s': %w", credFile, err) + default: + return fmt.Errorf(`credentials file '%s' already exists: please remove it, use "--force" or specify a different file with "-f" ("-f -" for standard output)`, credFile) + } + } + + if dumpFile == "" { + return errors.New(`please specify a file to dump credentials to, with -f ("-f -" for standard output)`) + } + + // create a password if it's not specified by user + if machinePassword == "" && !interactive { + if !autoAdd { + return errors.New("please specify a password with --password or use --auto") + } + + machinePassword = idgen.GeneratePassword(idgen.PasswordLength) + } else if machinePassword == "" && interactive { + qs := &survey.Password{ + Message: "Please provide a password for the machine:", + } + survey.AskOne(qs, &machinePassword) + } + + password := strfmt.Password(machinePassword) + + _, err = cli.db.CreateMachine(ctx, &machineID, &password, "", true, force, types.PasswordAuthType) + if err != nil { + return fmt.Errorf("unable to create machine: %w", err) + } + + fmt.Fprintf(os.Stderr, "Machine '%s' successfully added to the local API.\n", machineID) + + if apiURL == "" { + if clientCfg != nil && clientCfg.Credentials != nil && clientCfg.Credentials.URL != "" { + apiURL = clientCfg.Credentials.URL + } else if serverCfg.ClientURL() != "" { + apiURL = serverCfg.ClientURL() + } else { + return errors.New("unable to dump an api URL. Please provide it in your configuration or with the -u parameter") + } + } + + apiCfg := csconfig.ApiCredentialsCfg{ + Login: machineID, + Password: password.String(), + URL: apiURL, + } + + apiConfigDump, err := yaml.Marshal(apiCfg) + if err != nil { + return fmt.Errorf("unable to serialize api credentials: %w", err) + } + + if dumpFile != "" && dumpFile != "-" { + if err = os.WriteFile(dumpFile, apiConfigDump, 0o600); err != nil { + return fmt.Errorf("write api credentials in '%s' failed: %w", dumpFile, err) + } + + fmt.Fprintf(os.Stderr, "API credentials written to '%s'.\n", dumpFile) + } else { + fmt.Print(string(apiConfigDump)) + } + + return nil +} + +func (cli *cliMachines) newAddCmd() *cobra.Command { + var ( + password MachinePassword + dumpFile string + apiURL string + interactive bool + autoAdd bool + force bool + ) + + cmd := &cobra.Command{ + Use: "add", + Short: "add a single machine to the database", + DisableAutoGenTag: true, + Long: `Register a new machine in the database. cscli should be on the same machine as LAPI.`, + Example: `cscli machines add --auto +cscli machines add MyTestMachine --auto +cscli machines add MyTestMachine --password MyPassword +cscli machines add -f- --auto > /tmp/mycreds.yaml`, + RunE: func(cmd *cobra.Command, args []string) error { + return cli.add(cmd.Context(), args, string(password), dumpFile, apiURL, interactive, autoAdd, force) + }, + } + + flags := cmd.Flags() + flags.VarP(&password, "password", "p", "machine password to login to the API") + flags.StringVarP(&dumpFile, "file", "f", "", "output file destination (defaults to "+csconfig.DefaultConfigPath("local_api_credentials.yaml")+")") + flags.StringVarP(&apiURL, "url", "u", "", "URL of the local API") + flags.BoolVarP(&interactive, "interactive", "i", false, "interfactive mode to enter the password") + flags.BoolVarP(&autoAdd, "auto", "a", false, "automatically generate password (and username if not provided)") + flags.BoolVar(&force, "force", false, "will force add the machine if it already exist") + + return cmd +} diff --git a/cmd/crowdsec-cli/climachine/delete.go b/cmd/crowdsec-cli/climachine/delete.go new file mode 100644 index 00000000000..644ce93c642 --- /dev/null +++ b/cmd/crowdsec-cli/climachine/delete.go @@ -0,0 +1,52 @@ +package climachine + +import ( + "context" + "errors" + + log "github.com/sirupsen/logrus" + "github.com/spf13/cobra" + + "github.com/crowdsecurity/crowdsec/pkg/database" +) + +func (cli *cliMachines) delete(ctx context.Context, machines []string, ignoreMissing bool) error { + for _, machineID := range machines { + if err := cli.db.DeleteWatcher(ctx, machineID); err != nil { + var notFoundErr *database.MachineNotFoundError + if ignoreMissing && errors.As(err, ¬FoundErr) { + return nil + } + + log.Errorf("unable to delete machine: %s", err) + + return nil + } + + log.Infof("machine '%s' deleted successfully", machineID) + } + + return nil +} + +func (cli *cliMachines) newDeleteCmd() *cobra.Command { + var ignoreMissing bool + + cmd := &cobra.Command{ + Use: "delete [machine_name]...", + Short: "delete machine(s) by name", + Example: `cscli machines delete "machine1" "machine2"`, + Args: cobra.MinimumNArgs(1), + Aliases: []string{"remove"}, + DisableAutoGenTag: true, + ValidArgsFunction: cli.validMachineID, + RunE: func(cmd *cobra.Command, args []string) error { + return cli.delete(cmd.Context(), args, ignoreMissing) + }, + } + + flags := cmd.Flags() + flags.BoolVar(&ignoreMissing, "ignore-missing", false, "don't print errors if one or more machines don't exist") + + return cmd +} diff --git a/cmd/crowdsec-cli/flag.go b/cmd/crowdsec-cli/climachine/flag.go similarity index 96% rename from cmd/crowdsec-cli/flag.go rename to cmd/crowdsec-cli/climachine/flag.go index 1780d08e5f7..c3fefd896e1 100644 --- a/cmd/crowdsec-cli/flag.go +++ b/cmd/crowdsec-cli/climachine/flag.go @@ -1,4 +1,4 @@ -package main +package climachine // Custom types for flag validation and conversion. diff --git a/cmd/crowdsec-cli/climachine/inspect.go b/cmd/crowdsec-cli/climachine/inspect.go new file mode 100644 index 00000000000..b08f2f62794 --- /dev/null +++ b/cmd/crowdsec-cli/climachine/inspect.go @@ -0,0 +1,184 @@ +package climachine + +import ( + "encoding/csv" + "encoding/json" + "errors" + "fmt" + "io" + + "github.com/fatih/color" + "github.com/jedib0t/go-pretty/v6/table" + "github.com/spf13/cobra" + + "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/clientinfo" + "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/cstable" + "github.com/crowdsecurity/crowdsec/pkg/cwhub" + "github.com/crowdsecurity/crowdsec/pkg/database/ent" +) + +func (cli *cliMachines) inspectHubHuman(out io.Writer, machine *ent.Machine) { + state := machine.Hubstate + + if len(state) == 0 { + fmt.Println("No hub items found for this machine") + return + } + + // group state rows by type for multiple tables + rowsByType := make(map[string][]table.Row) + + for itemType, items := range state { + for _, item := range items { + if _, ok := rowsByType[itemType]; !ok { + rowsByType[itemType] = make([]table.Row, 0) + } + + row := table.Row{item.Name, item.Status, item.Version} + rowsByType[itemType] = append(rowsByType[itemType], row) + } + } + + for itemType, rows := range rowsByType { + t := cstable.New(out, cli.cfg().Cscli.Color).Writer + t.AppendHeader(table.Row{"Name", "Status", "Version"}) + t.SetTitle(itemType) + t.AppendRows(rows) + io.WriteString(out, t.Render()+"\n") + } +} + +func (cli *cliMachines) inspectHuman(out io.Writer, machine *ent.Machine) { + t := cstable.New(out, cli.cfg().Cscli.Color).Writer + + t.SetTitle("Machine: " + machine.MachineId) + + t.SetColumnConfigs([]table.ColumnConfig{ + {Number: 1, AutoMerge: true}, + }) + + t.AppendRows([]table.Row{ + {"IP Address", machine.IpAddress}, + {"Created At", machine.CreatedAt}, + {"Last Update", machine.UpdatedAt}, + {"Last Heartbeat", machine.LastHeartbeat}, + {"Validated?", machine.IsValidated}, + {"CrowdSec version", machine.Version}, + {"OS", clientinfo.GetOSNameAndVersion(machine)}, + {"Auth type", machine.AuthType}, + }) + + for dsName, dsCount := range machine.Datasources { + t.AppendRow(table.Row{"Datasources", fmt.Sprintf("%s: %d", dsName, dsCount)}) + } + + for _, ff := range clientinfo.GetFeatureFlagList(machine) { + t.AppendRow(table.Row{"Feature Flags", ff}) + } + + for _, coll := range machine.Hubstate[cwhub.COLLECTIONS] { + t.AppendRow(table.Row{"Collections", coll.Name}) + } + + io.WriteString(out, t.Render()+"\n") +} + +func (cli *cliMachines) inspect(machine *ent.Machine) error { + out := color.Output + outputFormat := cli.cfg().Cscli.Output + + switch outputFormat { + case "human": + cli.inspectHuman(out, machine) + case "json": + enc := json.NewEncoder(out) + enc.SetIndent("", " ") + + if err := enc.Encode(newMachineInfo(machine)); err != nil { + return errors.New("failed to serialize") + } + + return nil + default: + return fmt.Errorf("output format '%s' not supported for this command", outputFormat) + } + + return nil +} + +func (cli *cliMachines) inspectHub(machine *ent.Machine) error { + out := color.Output + + switch cli.cfg().Cscli.Output { + case "human": + cli.inspectHubHuman(out, machine) + case "json": + enc := json.NewEncoder(out) + enc.SetIndent("", " ") + + if err := enc.Encode(machine.Hubstate); err != nil { + return errors.New("failed to serialize") + } + + return nil + case "raw": + csvwriter := csv.NewWriter(out) + + err := csvwriter.Write([]string{"type", "name", "status", "version"}) + if err != nil { + return fmt.Errorf("failed to write header: %w", err) + } + + rows := make([][]string, 0) + + for itemType, items := range machine.Hubstate { + for _, item := range items { + rows = append(rows, []string{itemType, item.Name, item.Status, item.Version}) + } + } + + for _, row := range rows { + if err := csvwriter.Write(row); err != nil { + return fmt.Errorf("failed to write raw output: %w", err) + } + } + + csvwriter.Flush() + } + + return nil +} + +func (cli *cliMachines) newInspectCmd() *cobra.Command { + var showHub bool + + cmd := &cobra.Command{ + Use: "inspect [machine_name]", + Short: "inspect a machine by name", + Example: `cscli machines inspect "machine1"`, + Args: cobra.ExactArgs(1), + DisableAutoGenTag: true, + ValidArgsFunction: cli.validMachineID, + RunE: func(cmd *cobra.Command, args []string) error { + ctx := cmd.Context() + machineID := args[0] + + machine, err := cli.db.QueryMachineByID(ctx, machineID) + if err != nil { + return fmt.Errorf("unable to read machine data '%s': %w", machineID, err) + } + + if showHub { + return cli.inspectHub(machine) + } + + return cli.inspect(machine) + }, + } + + flags := cmd.Flags() + + flags.BoolVarP(&showHub, "hub", "H", false, "show hub state") + + return cmd +} diff --git a/cmd/crowdsec-cli/climachine/list.go b/cmd/crowdsec-cli/climachine/list.go new file mode 100644 index 00000000000..6bedb2ad807 --- /dev/null +++ b/cmd/crowdsec-cli/climachine/list.go @@ -0,0 +1,137 @@ +package climachine + +import ( + "context" + "encoding/csv" + "encoding/json" + "errors" + "fmt" + "io" + "time" + + "github.com/fatih/color" + "github.com/jedib0t/go-pretty/v6/table" + "github.com/spf13/cobra" + + "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/clientinfo" + "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/cstable" + "github.com/crowdsecurity/crowdsec/pkg/database" + "github.com/crowdsecurity/crowdsec/pkg/database/ent" + "github.com/crowdsecurity/crowdsec/pkg/emoji" +) + +// getLastHeartbeat returns the last heartbeat timestamp of a machine +// and a boolean indicating if the machine is considered active or not. +func getLastHeartbeat(m *ent.Machine) (string, bool) { + if m.LastHeartbeat == nil { + return "-", false + } + + elapsed := time.Now().UTC().Sub(*m.LastHeartbeat) + + hb := elapsed.Truncate(time.Second).String() + if elapsed > 2*time.Minute { + return hb, false + } + + return hb, true +} + +func (cli *cliMachines) listHuman(out io.Writer, machines ent.Machines) { + t := cstable.NewLight(out, cli.cfg().Cscli.Color).Writer + t.AppendHeader(table.Row{"Name", "IP Address", "Last Update", "Status", "Version", "OS", "Auth Type", "Last Heartbeat"}) + + for _, m := range machines { + validated := emoji.Prohibited + if m.IsValidated { + validated = emoji.CheckMark + } + + hb, active := getLastHeartbeat(m) + if !active { + hb = emoji.Warning + " " + hb + } + + t.AppendRow(table.Row{m.MachineId, m.IpAddress, m.UpdatedAt.Format(time.RFC3339), validated, m.Version, clientinfo.GetOSNameAndVersion(m), m.AuthType, hb}) + } + + io.WriteString(out, t.Render()+"\n") +} + +func (cli *cliMachines) listCSV(out io.Writer, machines ent.Machines) error { + csvwriter := csv.NewWriter(out) + + err := csvwriter.Write([]string{"machine_id", "ip_address", "updated_at", "validated", "version", "auth_type", "last_heartbeat", "os"}) + if err != nil { + return fmt.Errorf("failed to write header: %w", err) + } + + for _, m := range machines { + validated := "false" + if m.IsValidated { + validated = "true" + } + + hb := "-" + if m.LastHeartbeat != nil { + hb = m.LastHeartbeat.Format(time.RFC3339) + } + + if err := csvwriter.Write([]string{m.MachineId, m.IpAddress, m.UpdatedAt.Format(time.RFC3339), validated, m.Version, m.AuthType, hb, fmt.Sprintf("%s/%s", m.Osname, m.Osversion)}); err != nil { + return fmt.Errorf("failed to write raw output: %w", err) + } + } + + csvwriter.Flush() + + return nil +} + +func (cli *cliMachines) List(ctx context.Context, out io.Writer, db *database.Client) error { + // XXX: must use the provided db object, the one in the struct might be nil + // (calling List directly skips the PersistentPreRunE) + + machines, err := db.ListMachines(ctx) + if err != nil { + return fmt.Errorf("unable to list machines: %w", err) + } + + switch cli.cfg().Cscli.Output { + case "human": + cli.listHuman(out, machines) + case "json": + info := make([]machineInfo, 0, len(machines)) + for _, m := range machines { + info = append(info, newMachineInfo(m)) + } + + enc := json.NewEncoder(out) + enc.SetIndent("", " ") + + if err := enc.Encode(info); err != nil { + return errors.New("failed to serialize") + } + + return nil + case "raw": + return cli.listCSV(out, machines) + } + + return nil +} + +func (cli *cliMachines) newListCmd() *cobra.Command { + cmd := &cobra.Command{ + Use: "list", + Short: "list all machines in the database", + Long: `list all machines in the database with their status and last heartbeat`, + Example: `cscli machines list`, + Args: cobra.NoArgs, + DisableAutoGenTag: true, + RunE: func(cmd *cobra.Command, _ []string) error { + return cli.List(cmd.Context(), color.Output, cli.db) + }, + } + + return cmd +} diff --git a/cmd/crowdsec-cli/climachine/machines.go b/cmd/crowdsec-cli/climachine/machines.go new file mode 100644 index 00000000000..ad503c6e936 --- /dev/null +++ b/cmd/crowdsec-cli/climachine/machines.go @@ -0,0 +1,132 @@ +package climachine + +import ( + "slices" + "strings" + "time" + + "github.com/spf13/cobra" + + "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/clientinfo" + "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/require" + "github.com/crowdsecurity/crowdsec/pkg/csconfig" + "github.com/crowdsecurity/crowdsec/pkg/database" + "github.com/crowdsecurity/crowdsec/pkg/database/ent" +) + +type configGetter = func() *csconfig.Config + +type cliMachines struct { + db *database.Client + cfg configGetter +} + +func New(cfg configGetter) *cliMachines { + return &cliMachines{ + cfg: cfg, + } +} + +func (cli *cliMachines) NewCommand() *cobra.Command { + cmd := &cobra.Command{ + Use: "machines [action]", + Short: "Manage local API machines [requires local API]", + Long: `To list/add/delete/validate/prune machines. +Note: This command requires database direct access, so is intended to be run on the local API machine. +`, + Example: `cscli machines [action]`, + DisableAutoGenTag: true, + Aliases: []string{"machine"}, + PersistentPreRunE: func(cmd *cobra.Command, _ []string) error { + var err error + if err = require.LAPI(cli.cfg()); err != nil { + return err + } + cli.db, err = require.DBClient(cmd.Context(), cli.cfg().DbConfig) + if err != nil { + return err + } + + return nil + }, + } + + cmd.AddCommand(cli.newListCmd()) + cmd.AddCommand(cli.newAddCmd()) + cmd.AddCommand(cli.newDeleteCmd()) + cmd.AddCommand(cli.newValidateCmd()) + cmd.AddCommand(cli.newPruneCmd()) + cmd.AddCommand(cli.newInspectCmd()) + + return cmd +} + +// machineInfo contains only the data we want for inspect/list: no hub status, scenarios, edges, etc. +type machineInfo struct { + CreatedAt time.Time `json:"created_at,omitempty"` + UpdatedAt time.Time `json:"updated_at,omitempty"` + LastPush *time.Time `json:"last_push,omitempty"` + LastHeartbeat *time.Time `json:"last_heartbeat,omitempty"` + MachineId string `json:"machineId,omitempty"` + IpAddress string `json:"ipAddress,omitempty"` + Version string `json:"version,omitempty"` + IsValidated bool `json:"isValidated,omitempty"` + AuthType string `json:"auth_type"` + OS string `json:"os,omitempty"` + Featureflags []string `json:"featureflags,omitempty"` + Datasources map[string]int64 `json:"datasources,omitempty"` +} + +func newMachineInfo(m *ent.Machine) machineInfo { + return machineInfo{ + CreatedAt: m.CreatedAt, + UpdatedAt: m.UpdatedAt, + LastPush: m.LastPush, + LastHeartbeat: m.LastHeartbeat, + MachineId: m.MachineId, + IpAddress: m.IpAddress, + Version: m.Version, + IsValidated: m.IsValidated, + AuthType: m.AuthType, + OS: clientinfo.GetOSNameAndVersion(m), + Featureflags: clientinfo.GetFeatureFlagList(m), + Datasources: m.Datasources, + } +} + +// validMachineID returns a list of machine IDs for command completion +func (cli *cliMachines) validMachineID(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { + var err error + + cfg := cli.cfg() + ctx := cmd.Context() + + // need to load config and db because PersistentPreRunE is not called for completions + + if err = require.LAPI(cfg); err != nil { + cobra.CompError("unable to list machines " + err.Error()) + return nil, cobra.ShellCompDirectiveNoFileComp + } + + cli.db, err = require.DBClient(ctx, cfg.DbConfig) + if err != nil { + cobra.CompError("unable to list machines " + err.Error()) + return nil, cobra.ShellCompDirectiveNoFileComp + } + + machines, err := cli.db.ListMachines(ctx) + if err != nil { + cobra.CompError("unable to list machines " + err.Error()) + return nil, cobra.ShellCompDirectiveNoFileComp + } + + ret := []string{} + + for _, machine := range machines { + if strings.Contains(machine.MachineId, toComplete) && !slices.Contains(args, machine.MachineId) { + ret = append(ret, machine.MachineId) + } + } + + return ret, cobra.ShellCompDirectiveNoFileComp +} diff --git a/cmd/crowdsec-cli/climachine/prune.go b/cmd/crowdsec-cli/climachine/prune.go new file mode 100644 index 00000000000..ed41ef0a736 --- /dev/null +++ b/cmd/crowdsec-cli/climachine/prune.go @@ -0,0 +1,96 @@ +package climachine + +import ( + "context" + "fmt" + "os" + "time" + + "github.com/fatih/color" + "github.com/spf13/cobra" + + "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/ask" + "github.com/crowdsecurity/crowdsec/pkg/database/ent" +) + +func (cli *cliMachines) prune(ctx context.Context, duration time.Duration, notValidOnly bool, force bool) error { + if duration < 2*time.Minute && !notValidOnly { + if yes, err := ask.YesNo( + "The duration you provided is less than 2 minutes. "+ + "This can break installations if the machines are only temporarily disconnected. Continue?", false); err != nil { + return err + } else if !yes { + fmt.Println("User aborted prune. No changes were made.") + return nil + } + } + + machines := []*ent.Machine{} + if pending, err := cli.db.QueryPendingMachine(ctx); err == nil { + machines = append(machines, pending...) + } + + if !notValidOnly { + if pending, err := cli.db.QueryMachinesInactiveSince(ctx, time.Now().UTC().Add(-duration)); err == nil { + machines = append(machines, pending...) + } + } + + if len(machines) == 0 { + fmt.Println("No machines to prune.") + return nil + } + + cli.listHuman(color.Output, machines) + + if !force { + if yes, err := ask.YesNo( + "You are about to PERMANENTLY remove the above machines from the database. "+ + "These will NOT be recoverable. Continue?", false); err != nil { + return err + } else if !yes { + fmt.Println("User aborted prune. No changes were made.") + return nil + } + } + + deleted, err := cli.db.BulkDeleteWatchers(ctx, machines) + if err != nil { + return fmt.Errorf("unable to prune machines: %w", err) + } + + fmt.Fprintf(os.Stderr, "successfully deleted %d machines\n", deleted) + + return nil +} + +func (cli *cliMachines) newPruneCmd() *cobra.Command { + var ( + duration time.Duration + notValidOnly bool + force bool + ) + + const defaultDuration = 10 * time.Minute + + cmd := &cobra.Command{ + Use: "prune", + Short: "prune multiple machines from the database", + Long: `prune multiple machines that are not validated or have not connected to the local API in a given duration.`, + Example: `cscli machines prune +cscli machines prune --duration 1h +cscli machines prune --not-validated-only --force`, + Args: cobra.NoArgs, + DisableAutoGenTag: true, + RunE: func(cmd *cobra.Command, _ []string) error { + return cli.prune(cmd.Context(), duration, notValidOnly, force) + }, + } + + flags := cmd.Flags() + flags.DurationVarP(&duration, "duration", "d", defaultDuration, "duration of time since validated machine last heartbeat") + flags.BoolVar(¬ValidOnly, "not-validated-only", false, "only prune machines that are not validated") + flags.BoolVar(&force, "force", false, "force prune without asking for confirmation") + + return cmd +} diff --git a/cmd/crowdsec-cli/climachine/validate.go b/cmd/crowdsec-cli/climachine/validate.go new file mode 100644 index 00000000000..cba872aa05d --- /dev/null +++ b/cmd/crowdsec-cli/climachine/validate.go @@ -0,0 +1,35 @@ +package climachine + +import ( + "context" + "fmt" + + log "github.com/sirupsen/logrus" + "github.com/spf13/cobra" +) + +func (cli *cliMachines) validate(ctx context.Context, machineID string) error { + if err := cli.db.ValidateMachine(ctx, machineID); err != nil { + return fmt.Errorf("unable to validate machine '%s': %w", machineID, err) + } + + log.Infof("machine '%s' validated successfully", machineID) + + return nil +} + +func (cli *cliMachines) newValidateCmd() *cobra.Command { + cmd := &cobra.Command{ + Use: "validate", + Short: "validate a machine to access the local API", + Long: `validate a machine to access the local API.`, + Example: `cscli machines validate "machine_name"`, + Args: cobra.ExactArgs(1), + DisableAutoGenTag: true, + RunE: func(cmd *cobra.Command, args []string) error { + return cli.validate(cmd.Context(), args[0]) + }, + } + + return cmd +} diff --git a/cmd/crowdsec-cli/climetrics/list.go b/cmd/crowdsec-cli/climetrics/list.go index d3afbef0669..27fa99710c8 100644 --- a/cmd/crowdsec-cli/climetrics/list.go +++ b/cmd/crowdsec-cli/climetrics/list.go @@ -64,11 +64,11 @@ func (cli *cliMetrics) list() error { t.AppendRow(table.Row{metric.Type, metric.Title, metric.Description}) } - io.WriteString(out, t.Render() + "\n") + io.WriteString(out, t.Render()+"\n") case "json": x, err := json.MarshalIndent(allMetrics, "", " ") if err != nil { - return fmt.Errorf("failed to marshal metric types: %w", err) + return fmt.Errorf("failed to serialize metric types: %w", err) } fmt.Println(string(x)) @@ -84,7 +84,7 @@ func (cli *cliMetrics) newListCmd() *cobra.Command { Use: "list", Short: "List available types of metrics.", Long: `List available types of metrics.`, - Args: cobra.ExactArgs(0), + Args: cobra.NoArgs, DisableAutoGenTag: true, RunE: func(_ *cobra.Command, _ []string) error { return cli.list() diff --git a/cmd/crowdsec-cli/climetrics/metrics.go b/cmd/crowdsec-cli/climetrics/metrics.go index f3bc4874460..67bd7b6ad93 100644 --- a/cmd/crowdsec-cli/climetrics/metrics.go +++ b/cmd/crowdsec-cli/climetrics/metrics.go @@ -36,7 +36,7 @@ cscli metrics --url http://lapi.local:6060/metrics show acquisition parsers # List available metric types cscli metrics list`, - Args: cobra.ExactArgs(0), + Args: cobra.NoArgs, DisableAutoGenTag: true, RunE: func(cmd *cobra.Command, _ []string) error { return cli.show(cmd.Context(), nil, url, noUnit) diff --git a/cmd/crowdsec-cli/climetrics/show.go b/cmd/crowdsec-cli/climetrics/show.go index 7559463b66b..045959048f6 100644 --- a/cmd/crowdsec-cli/climetrics/show.go +++ b/cmd/crowdsec-cli/climetrics/show.go @@ -5,9 +5,8 @@ import ( "errors" "fmt" - log "github.com/sirupsen/logrus" - "github.com/fatih/color" + log "github.com/sirupsen/logrus" "github.com/spf13/cobra" "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/require" diff --git a/cmd/crowdsec-cli/climetrics/statacquis.go b/cmd/crowdsec-cli/climetrics/statacquis.go index 827dcf036c3..0af2e796f40 100644 --- a/cmd/crowdsec-cli/climetrics/statacquis.go +++ b/cmd/crowdsec-cli/climetrics/statacquis.go @@ -37,8 +37,8 @@ func (s statAcquis) Table(out io.Writer, wantColor string, noUnit bool, showEmpt log.Warningf("while collecting acquis stats: %s", err) } else if numRows > 0 || showEmpty { title, _ := s.Description() - io.WriteString(out, title + ":\n") - io.WriteString(out, t.Render() + "\n") + io.WriteString(out, title+":\n") + io.WriteString(out, t.Render()+"\n") io.WriteString(out, "\n") } } diff --git a/cmd/crowdsec-cli/climetrics/statalert.go b/cmd/crowdsec-cli/climetrics/statalert.go index e48dd6c924f..942eceaa75c 100644 --- a/cmd/crowdsec-cli/climetrics/statalert.go +++ b/cmd/crowdsec-cli/climetrics/statalert.go @@ -38,8 +38,8 @@ func (s statAlert) Table(out io.Writer, wantColor string, noUnit bool, showEmpty if numRows > 0 || showEmpty { title, _ := s.Description() - io.WriteString(out, title + ":\n") - io.WriteString(out, t.Render() + "\n") + io.WriteString(out, title+":\n") + io.WriteString(out, t.Render()+"\n") io.WriteString(out, "\n") } } diff --git a/cmd/crowdsec-cli/climetrics/statappsecengine.go b/cmd/crowdsec-cli/climetrics/statappsecengine.go index 4a249e11687..d924375247f 100644 --- a/cmd/crowdsec-cli/climetrics/statappsecengine.go +++ b/cmd/crowdsec-cli/climetrics/statappsecengine.go @@ -34,8 +34,8 @@ func (s statAppsecEngine) Table(out io.Writer, wantColor string, noUnit bool, sh log.Warningf("while collecting appsec stats: %s", err) } else if numRows > 0 || showEmpty { title, _ := s.Description() - io.WriteString(out, title + ":\n") - io.WriteString(out, t.Render() + "\n") + io.WriteString(out, title+":\n") + io.WriteString(out, t.Render()+"\n") io.WriteString(out, "\n") } } diff --git a/cmd/crowdsec-cli/climetrics/statappsecrule.go b/cmd/crowdsec-cli/climetrics/statappsecrule.go index 2f859d70cfb..e06a7c2e2b3 100644 --- a/cmd/crowdsec-cli/climetrics/statappsecrule.go +++ b/cmd/crowdsec-cli/climetrics/statappsecrule.go @@ -41,7 +41,7 @@ func (s statAppsecRule) Table(out io.Writer, wantColor string, noUnit bool, show log.Warningf("while collecting appsec rules stats: %s", err) } else if numRows > 0 || showEmpty { io.WriteString(out, fmt.Sprintf("Appsec '%s' Rules Metrics:\n", appsecEngine)) - io.WriteString(out, t.Render() + "\n") + io.WriteString(out, t.Render()+"\n") io.WriteString(out, "\n") } } diff --git a/cmd/crowdsec-cli/climetrics/statbouncer.go b/cmd/crowdsec-cli/climetrics/statbouncer.go index 62e68b6bc41..bc0da152d6d 100644 --- a/cmd/crowdsec-cli/climetrics/statbouncer.go +++ b/cmd/crowdsec-cli/climetrics/statbouncer.go @@ -129,7 +129,7 @@ func (*statBouncer) Description() (string, string) { func logWarningOnce(warningsLogged map[string]bool, msg string) { if _, ok := warningsLogged[msg]; !ok { - log.Warningf(msg) + log.Warning(msg) warningsLogged[msg] = true } diff --git a/cmd/crowdsec-cli/climetrics/statbucket.go b/cmd/crowdsec-cli/climetrics/statbucket.go index 507d9f3a476..1882fe21df1 100644 --- a/cmd/crowdsec-cli/climetrics/statbucket.go +++ b/cmd/crowdsec-cli/climetrics/statbucket.go @@ -35,8 +35,8 @@ func (s statBucket) Table(out io.Writer, wantColor string, noUnit bool, showEmpt log.Warningf("while collecting scenario stats: %s", err) } else if numRows > 0 || showEmpty { title, _ := s.Description() - io.WriteString(out, title + ":\n") - io.WriteString(out, t.Render() + "\n") + io.WriteString(out, title+":\n") + io.WriteString(out, t.Render()+"\n") io.WriteString(out, "\n") } } diff --git a/cmd/crowdsec-cli/climetrics/statdecision.go b/cmd/crowdsec-cli/climetrics/statdecision.go index 145665cfba2..b862f49ff12 100644 --- a/cmd/crowdsec-cli/climetrics/statdecision.go +++ b/cmd/crowdsec-cli/climetrics/statdecision.go @@ -53,8 +53,8 @@ func (s statDecision) Table(out io.Writer, wantColor string, noUnit bool, showEm if numRows > 0 || showEmpty { title, _ := s.Description() - io.WriteString(out, title + ":\n") - io.WriteString(out, t.Render() + "\n") + io.WriteString(out, title+":\n") + io.WriteString(out, t.Render()+"\n") io.WriteString(out, "\n") } } diff --git a/cmd/crowdsec-cli/climetrics/statlapi.go b/cmd/crowdsec-cli/climetrics/statlapi.go index 45b384708bf..9559eacf0f4 100644 --- a/cmd/crowdsec-cli/climetrics/statlapi.go +++ b/cmd/crowdsec-cli/climetrics/statlapi.go @@ -49,8 +49,8 @@ func (s statLapi) Table(out io.Writer, wantColor string, noUnit bool, showEmpty if numRows > 0 || showEmpty { title, _ := s.Description() - io.WriteString(out, title + ":\n") - io.WriteString(out, t.Render() + "\n") + io.WriteString(out, title+":\n") + io.WriteString(out, t.Render()+"\n") io.WriteString(out, "\n") } } diff --git a/cmd/crowdsec-cli/climetrics/statlapibouncer.go b/cmd/crowdsec-cli/climetrics/statlapibouncer.go index 828ccb33413..5e5f63a79d3 100644 --- a/cmd/crowdsec-cli/climetrics/statlapibouncer.go +++ b/cmd/crowdsec-cli/climetrics/statlapibouncer.go @@ -35,8 +35,8 @@ func (s statLapiBouncer) Table(out io.Writer, wantColor string, noUnit bool, sho if numRows > 0 || showEmpty { title, _ := s.Description() - io.WriteString(out, title + ":\n") - io.WriteString(out, t.Render() + "\n") + io.WriteString(out, title+":\n") + io.WriteString(out, t.Render()+"\n") io.WriteString(out, "\n") } } diff --git a/cmd/crowdsec-cli/climetrics/statlapidecision.go b/cmd/crowdsec-cli/climetrics/statlapidecision.go index ffc999555c1..44f0e8f4b87 100644 --- a/cmd/crowdsec-cli/climetrics/statlapidecision.go +++ b/cmd/crowdsec-cli/climetrics/statlapidecision.go @@ -57,8 +57,8 @@ func (s statLapiDecision) Table(out io.Writer, wantColor string, noUnit bool, sh if numRows > 0 || showEmpty { title, _ := s.Description() - io.WriteString(out, title + ":\n") - io.WriteString(out, t.Render() + "\n") + io.WriteString(out, title+":\n") + io.WriteString(out, t.Render()+"\n") io.WriteString(out, "\n") } } diff --git a/cmd/crowdsec-cli/climetrics/statlapimachine.go b/cmd/crowdsec-cli/climetrics/statlapimachine.go index 09abe2dd44b..0e6693bea82 100644 --- a/cmd/crowdsec-cli/climetrics/statlapimachine.go +++ b/cmd/crowdsec-cli/climetrics/statlapimachine.go @@ -35,8 +35,8 @@ func (s statLapiMachine) Table(out io.Writer, wantColor string, noUnit bool, sho if numRows > 0 || showEmpty { title, _ := s.Description() - io.WriteString(out, title + ":\n") - io.WriteString(out, t.Render() + "\n") + io.WriteString(out, title+":\n") + io.WriteString(out, t.Render()+"\n") io.WriteString(out, "\n") } } diff --git a/cmd/crowdsec-cli/climetrics/statparser.go b/cmd/crowdsec-cli/climetrics/statparser.go index 0b3512052b9..520e68f9adf 100644 --- a/cmd/crowdsec-cli/climetrics/statparser.go +++ b/cmd/crowdsec-cli/climetrics/statparser.go @@ -36,8 +36,8 @@ func (s statParser) Table(out io.Writer, wantColor string, noUnit bool, showEmpt log.Warningf("while collecting parsers stats: %s", err) } else if numRows > 0 || showEmpty { title, _ := s.Description() - io.WriteString(out, title + ":\n") - io.WriteString(out, t.Render() + "\n") + io.WriteString(out, title+":\n") + io.WriteString(out, t.Render()+"\n") io.WriteString(out, "\n") } } diff --git a/cmd/crowdsec-cli/climetrics/statstash.go b/cmd/crowdsec-cli/climetrics/statstash.go index 5938ac05fc8..2729de931a1 100644 --- a/cmd/crowdsec-cli/climetrics/statstash.go +++ b/cmd/crowdsec-cli/climetrics/statstash.go @@ -52,8 +52,8 @@ func (s statStash) Table(out io.Writer, wantColor string, noUnit bool, showEmpty if numRows > 0 || showEmpty { title, _ := s.Description() - io.WriteString(out, title + ":\n") - io.WriteString(out, t.Render() + "\n") + io.WriteString(out, title+":\n") + io.WriteString(out, t.Render()+"\n") io.WriteString(out, "\n") } } diff --git a/cmd/crowdsec-cli/climetrics/statwhitelist.go b/cmd/crowdsec-cli/climetrics/statwhitelist.go index ccb7e52153b..7f533b45b4b 100644 --- a/cmd/crowdsec-cli/climetrics/statwhitelist.go +++ b/cmd/crowdsec-cli/climetrics/statwhitelist.go @@ -36,8 +36,8 @@ func (s statWhitelist) Table(out io.Writer, wantColor string, noUnit bool, showE log.Warningf("while collecting parsers stats: %s", err) } else if numRows > 0 || showEmpty { title, _ := s.Description() - io.WriteString(out, title + ":\n") - io.WriteString(out, t.Render() + "\n") + io.WriteString(out, title+":\n") + io.WriteString(out, t.Render()+"\n") io.WriteString(out, "\n") } } diff --git a/cmd/crowdsec-cli/climetrics/store.go b/cmd/crowdsec-cli/climetrics/store.go index 5de50558e89..55fab5dbd7f 100644 --- a/cmd/crowdsec-cli/climetrics/store.go +++ b/cmd/crowdsec-cli/climetrics/store.go @@ -260,7 +260,7 @@ func (ms metricStore) Format(out io.Writer, wantColor string, sections []string, case "json": x, err := json.MarshalIndent(want, "", " ") if err != nil { - return fmt.Errorf("failed to marshal metrics: %w", err) + return fmt.Errorf("failed to serialize metrics: %w", err) } out.Write(x) default: diff --git a/cmd/crowdsec-cli/notifications.go b/cmd/crowdsec-cli/clinotifications/notifications.go similarity index 89% rename from cmd/crowdsec-cli/notifications.go rename to cmd/crowdsec-cli/clinotifications/notifications.go index 8c6b6631b33..baf899c10cf 100644 --- a/cmd/crowdsec-cli/notifications.go +++ b/cmd/crowdsec-cli/clinotifications/notifications.go @@ -1,4 +1,4 @@ -package main +package clinotifications import ( "context" @@ -29,7 +29,6 @@ import ( "github.com/crowdsecurity/crowdsec/pkg/csconfig" "github.com/crowdsecurity/crowdsec/pkg/csplugin" "github.com/crowdsecurity/crowdsec/pkg/csprofiles" - "github.com/crowdsecurity/crowdsec/pkg/cwversion" "github.com/crowdsecurity/crowdsec/pkg/models" "github.com/crowdsecurity/crowdsec/pkg/types" ) @@ -40,11 +39,13 @@ type NotificationsCfg struct { ids []uint } +type configGetter func() *csconfig.Config + type cliNotifications struct { cfg configGetter } -func NewCLINotifications(cfg configGetter) *cliNotifications { +func New(cfg configGetter) *cliNotifications { return &cliNotifications{ cfg: cfg, } @@ -71,10 +72,10 @@ func (cli *cliNotifications) NewCommand() *cobra.Command { }, } - cmd.AddCommand(cli.NewListCmd()) - cmd.AddCommand(cli.NewInspectCmd()) - cmd.AddCommand(cli.NewReinjectCmd()) - cmd.AddCommand(cli.NewTestCmd()) + cmd.AddCommand(cli.newListCmd()) + cmd.AddCommand(cli.newInspectCmd()) + cmd.AddCommand(cli.newReinjectCmd()) + cmd.AddCommand(cli.newTestCmd()) return cmd } @@ -151,13 +152,13 @@ func (cli *cliNotifications) getProfilesConfigs() (map[string]NotificationsCfg, return ncfgs, nil } -func (cli *cliNotifications) NewListCmd() *cobra.Command { +func (cli *cliNotifications) newListCmd() *cobra.Command { cmd := &cobra.Command{ Use: "list", Short: "list notifications plugins", Long: `list notifications plugins and their status (active or not)`, Example: `cscli notifications list`, - Args: cobra.ExactArgs(0), + Args: cobra.NoArgs, DisableAutoGenTag: true, RunE: func(_ *cobra.Command, _ []string) error { cfg := cli.cfg() @@ -171,7 +172,7 @@ func (cli *cliNotifications) NewListCmd() *cobra.Command { } else if cfg.Cscli.Output == "json" { x, err := json.MarshalIndent(ncfgs, "", " ") if err != nil { - return fmt.Errorf("failed to marshal notification configuration: %w", err) + return fmt.Errorf("failed to serialize notification configuration: %w", err) } fmt.Printf("%s", string(x)) } else if cfg.Cscli.Output == "raw" { @@ -200,7 +201,7 @@ func (cli *cliNotifications) NewListCmd() *cobra.Command { return cmd } -func (cli *cliNotifications) NewInspectCmd() *cobra.Command { +func (cli *cliNotifications) newInspectCmd() *cobra.Command { cmd := &cobra.Command{ Use: "inspect", Short: "Inspect notifications plugin", @@ -230,7 +231,7 @@ func (cli *cliNotifications) NewInspectCmd() *cobra.Command { } else if cfg.Cscli.Output == "json" { x, err := json.MarshalIndent(cfg, "", " ") if err != nil { - return fmt.Errorf("failed to marshal notification configuration: %w", err) + return fmt.Errorf("failed to serialize notification configuration: %w", err) } fmt.Printf("%s", string(x)) } @@ -259,7 +260,7 @@ func (cli *cliNotifications) notificationConfigFilter(cmd *cobra.Command, args [ return ret, cobra.ShellCompDirectiveNoFileComp } -func (cli cliNotifications) NewTestCmd() *cobra.Command { +func (cli cliNotifications) newTestCmd() *cobra.Command { var ( pluginBroker csplugin.PluginBroker pluginTomb tomb.Tomb @@ -274,7 +275,8 @@ func (cli cliNotifications) NewTestCmd() *cobra.Command { Args: cobra.ExactArgs(1), DisableAutoGenTag: true, ValidArgsFunction: cli.notificationConfigFilter, - PreRunE: func(_ *cobra.Command, args []string) error { + PreRunE: func(cmd *cobra.Command, args []string) error { + ctx := cmd.Context() cfg := cli.cfg() pconfigs, err := cli.getPluginConfigs() if err != nil { @@ -285,7 +287,7 @@ func (cli cliNotifications) NewTestCmd() *cobra.Command { return fmt.Errorf("plugin name: '%s' does not exist", args[0]) } // Create a single profile with plugin name as notification name - return pluginBroker.Init(cfg.PluginConfig, []*csconfig.ProfileCfg{ + return pluginBroker.Init(ctx, cfg.PluginConfig, []*csconfig.ProfileCfg{ { Notifications: []string{ pcfg.Name, @@ -330,7 +332,7 @@ func (cli cliNotifications) NewTestCmd() *cobra.Command { CreatedAt: time.Now().UTC().Format(time.RFC3339), } if err := yaml.Unmarshal([]byte(alertOverride), alert); err != nil { - return fmt.Errorf("failed to unmarshal alert override: %w", err) + return fmt.Errorf("failed to parse alert override: %w", err) } pluginBroker.PluginChannel <- csplugin.ProfileAlert{ @@ -350,7 +352,7 @@ func (cli cliNotifications) NewTestCmd() *cobra.Command { return cmd } -func (cli *cliNotifications) NewReinjectCmd() *cobra.Command { +func (cli *cliNotifications) newReinjectCmd() *cobra.Command { var ( alertOverride string alert *models.Alert @@ -367,30 +369,31 @@ cscli notifications reinject -a '{"remediation": true,"scenario":"not `, Args: cobra.ExactArgs(1), DisableAutoGenTag: true, - PreRunE: func(_ *cobra.Command, args []string) error { + PreRunE: func(cmd *cobra.Command, args []string) error { var err error - alert, err = cli.fetchAlertFromArgString(args[0]) + alert, err = cli.fetchAlertFromArgString(cmd.Context(), args[0]) if err != nil { return err } return nil }, - RunE: func(_ *cobra.Command, _ []string) error { + RunE: func(cmd *cobra.Command, _ []string) error { var ( pluginBroker csplugin.PluginBroker pluginTomb tomb.Tomb ) + ctx := cmd.Context() cfg := cli.cfg() if alertOverride != "" { if err := json.Unmarshal([]byte(alertOverride), alert); err != nil { - return fmt.Errorf("can't unmarshal data in the alert flag: %w", err) + return fmt.Errorf("can't parse data in the alert flag: %w", err) } } - err := pluginBroker.Init(cfg.PluginConfig, cfg.API.Server.Profiles, cfg.ConfigPaths) + err := pluginBroker.Init(ctx, cfg.PluginConfig, cfg.API.Server.Profiles, cfg.ConfigPaths) if err != nil { return fmt.Errorf("can't initialize plugins: %w", err) } @@ -446,7 +449,7 @@ cscli notifications reinject -a '{"remediation": true,"scenario":"not return cmd } -func (cli *cliNotifications) fetchAlertFromArgString(toParse string) (*models.Alert, error) { +func (cli *cliNotifications) fetchAlertFromArgString(ctx context.Context, toParse string) (*models.Alert, error) { cfg := cli.cfg() id, err := strconv.Atoi(toParse) @@ -462,7 +465,6 @@ func (cli *cliNotifications) fetchAlertFromArgString(toParse string) (*models.Al client, err := apiclient.NewClient(&apiclient.Config{ MachineID: cfg.API.Client.Credentials.Login, Password: strfmt.Password(cfg.API.Client.Credentials.Password), - UserAgent: cwversion.UserAgent(), URL: apiURL, VersionPrefix: "v1", }) @@ -470,7 +472,7 @@ func (cli *cliNotifications) fetchAlertFromArgString(toParse string) (*models.Al return nil, fmt.Errorf("error creating the client for the API: %w", err) } - alert, _, err := client.Alerts.GetByID(context.Background(), id) + alert, _, err := client.Alerts.GetByID(ctx, id) if err != nil { return nil, fmt.Errorf("can't find alert with id %d: %w", id, err) } diff --git a/cmd/crowdsec-cli/notifications_table.go b/cmd/crowdsec-cli/clinotifications/notifications_table.go similarity index 97% rename from cmd/crowdsec-cli/notifications_table.go rename to cmd/crowdsec-cli/clinotifications/notifications_table.go index 2976797bd8a..0b6a3f58efc 100644 --- a/cmd/crowdsec-cli/notifications_table.go +++ b/cmd/crowdsec-cli/clinotifications/notifications_table.go @@ -1,4 +1,4 @@ -package main +package clinotifications import ( "io" diff --git a/cmd/crowdsec-cli/clipapi/papi.go b/cmd/crowdsec-cli/clipapi/papi.go new file mode 100644 index 00000000000..7ac2455d28f --- /dev/null +++ b/cmd/crowdsec-cli/clipapi/papi.go @@ -0,0 +1,174 @@ +package clipapi + +import ( + "context" + "fmt" + "io" + "time" + + "github.com/fatih/color" + log "github.com/sirupsen/logrus" + "github.com/spf13/cobra" + "gopkg.in/tomb.v2" + + "github.com/crowdsecurity/go-cs-lib/ptr" + + "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/require" + "github.com/crowdsecurity/crowdsec/pkg/apiserver" + "github.com/crowdsecurity/crowdsec/pkg/csconfig" + "github.com/crowdsecurity/crowdsec/pkg/database" +) + +type configGetter = func() *csconfig.Config + +type cliPapi struct { + cfg configGetter +} + +func New(cfg configGetter) *cliPapi { + return &cliPapi{ + cfg: cfg, + } +} + +func (cli *cliPapi) NewCommand() *cobra.Command { + cmd := &cobra.Command{ + Use: "papi [action]", + Short: "Manage interaction with Polling API (PAPI)", + Args: cobra.MinimumNArgs(1), + DisableAutoGenTag: true, + PersistentPreRunE: func(_ *cobra.Command, _ []string) error { + cfg := cli.cfg() + if err := require.LAPI(cfg); err != nil { + return err + } + if err := require.CAPI(cfg); err != nil { + return err + } + + return require.PAPI(cfg) + }, + } + + cmd.AddCommand(cli.newStatusCmd()) + cmd.AddCommand(cli.newSyncCmd()) + + return cmd +} + +func (cli *cliPapi) Status(ctx context.Context, out io.Writer, db *database.Client) error { + cfg := cli.cfg() + + apic, err := apiserver.NewAPIC(ctx, cfg.API.Server.OnlineClient, db, cfg.API.Server.ConsoleConfig, cfg.API.Server.CapiWhitelists) + if err != nil { + return fmt.Errorf("unable to initialize API client: %w", err) + } + + papi, err := apiserver.NewPAPI(apic, db, cfg.API.Server.ConsoleConfig, log.GetLevel()) + if err != nil { + return fmt.Errorf("unable to initialize PAPI client: %w", err) + } + + perms, err := papi.GetPermissions(ctx) + if err != nil { + return fmt.Errorf("unable to get PAPI permissions: %w", err) + } + + lastTimestampStr, err := db.GetConfigItem(ctx, apiserver.PapiPullKey) + if err != nil { + lastTimestampStr = ptr.Of("never") + } + + // both can and did happen + if lastTimestampStr == nil || *lastTimestampStr == "0001-01-01T00:00:00Z" { + lastTimestampStr = ptr.Of("never") + } + + fmt.Fprint(out, "You can successfully interact with Polling API (PAPI)\n") + fmt.Fprintf(out, "Console plan: %s\n", perms.Plan) + fmt.Fprintf(out, "Last order received: %s\n", *lastTimestampStr) + fmt.Fprint(out, "PAPI subscriptions:\n") + + for _, sub := range perms.Categories { + fmt.Fprintf(out, " - %s\n", sub) + } + + return nil +} + +func (cli *cliPapi) newStatusCmd() *cobra.Command { + cmd := &cobra.Command{ + Use: "status", + Short: "Get status of the Polling API", + Args: cobra.MinimumNArgs(0), + DisableAutoGenTag: true, + RunE: func(cmd *cobra.Command, _ []string) error { + cfg := cli.cfg() + ctx := cmd.Context() + + db, err := require.DBClient(ctx, cfg.DbConfig) + if err != nil { + return err + } + + return cli.Status(ctx, color.Output, db) + }, + } + + return cmd +} + +func (cli *cliPapi) sync(ctx context.Context, out io.Writer, db *database.Client) error { + cfg := cli.cfg() + t := tomb.Tomb{} + + apic, err := apiserver.NewAPIC(ctx, cfg.API.Server.OnlineClient, db, cfg.API.Server.ConsoleConfig, cfg.API.Server.CapiWhitelists) + if err != nil { + return fmt.Errorf("unable to initialize API client: %w", err) + } + + t.Go(func() error { return apic.Push(ctx) }) + + papi, err := apiserver.NewPAPI(apic, db, cfg.API.Server.ConsoleConfig, log.GetLevel()) + if err != nil { + return fmt.Errorf("unable to initialize PAPI client: %w", err) + } + + t.Go(papi.SyncDecisions) + + err = papi.PullOnce(ctx, time.Time{}, true) + if err != nil { + return fmt.Errorf("unable to sync decisions: %w", err) + } + + log.Infof("Sending acknowledgements to CAPI") + + apic.Shutdown() + papi.Shutdown() + t.Wait() + time.Sleep(5 * time.Second) // FIXME: the push done by apic.Push is run inside a sub goroutine, sleep to make sure it's done + + return nil +} + +func (cli *cliPapi) newSyncCmd() *cobra.Command { + cmd := &cobra.Command{ + Use: "sync", + Short: "Sync with the Polling API, pulling all non-expired orders for the instance", + Args: cobra.MinimumNArgs(0), + DisableAutoGenTag: true, + RunE: func(cmd *cobra.Command, _ []string) error { + cfg := cli.cfg() + ctx := cmd.Context() + + db, err := require.DBClient(ctx, cfg.DbConfig) + if err != nil { + return err + } + + return cli.sync(ctx, color.Output, db) + }, + } + + return cmd +} diff --git a/cmd/crowdsec-cli/clisetup/setup.go b/cmd/crowdsec-cli/clisetup/setup.go new file mode 100644 index 00000000000..269cdfb78e9 --- /dev/null +++ b/cmd/crowdsec-cli/clisetup/setup.go @@ -0,0 +1,307 @@ +package clisetup + +import ( + "bytes" + "context" + "errors" + "fmt" + "os" + "os/exec" + + goccyyaml "github.com/goccy/go-yaml" + log "github.com/sirupsen/logrus" + "github.com/spf13/cobra" + "gopkg.in/yaml.v3" + + "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/require" + "github.com/crowdsecurity/crowdsec/pkg/csconfig" + "github.com/crowdsecurity/crowdsec/pkg/setup" +) + +type configGetter func() *csconfig.Config + +type cliSetup struct { + cfg configGetter +} + +func New(cfg configGetter) *cliSetup { + return &cliSetup{ + cfg: cfg, + } +} + +func (cli *cliSetup) NewCommand() *cobra.Command { + cmd := &cobra.Command{ + Use: "setup", + Short: "Tools to configure crowdsec", + Long: "Manage hub configuration and service detection", + Args: cobra.MinimumNArgs(0), + DisableAutoGenTag: true, + } + + cmd.AddCommand(cli.newDetectCmd()) + cmd.AddCommand(cli.newInstallHubCmd()) + cmd.AddCommand(cli.newDataSourcesCmd()) + cmd.AddCommand(cli.newValidateCmd()) + + return cmd +} + +type detectFlags struct { + detectConfigFile string + listSupportedServices bool + forcedUnits []string + forcedProcesses []string + forcedOSFamily string + forcedOSID string + forcedOSVersion string + skipServices []string + snubSystemd bool + outYaml bool +} + +func (f *detectFlags) bind(cmd *cobra.Command) { + defaultServiceDetect := csconfig.DefaultConfigPath("hub", "detect.yaml") + + flags := cmd.Flags() + flags.StringVar(&f.detectConfigFile, "detect-config", defaultServiceDetect, "path to service detection configuration") + flags.BoolVar(&f.listSupportedServices, "list-supported-services", false, "do not detect; only print supported services") + flags.StringSliceVar(&f.forcedUnits, "force-unit", nil, "force detection of a systemd unit (can be repeated)") + flags.StringSliceVar(&f.forcedProcesses, "force-process", nil, "force detection of a running process (can be repeated)") + flags.StringSliceVar(&f.skipServices, "skip-service", nil, "ignore a service, don't recommend hub/datasources (can be repeated)") + flags.StringVar(&f.forcedOSFamily, "force-os-family", "", "override OS.Family: one of linux, freebsd, windows or darwin") + flags.StringVar(&f.forcedOSID, "force-os-id", "", "override OS.ID=[debian | ubuntu | , redhat...]") + flags.StringVar(&f.forcedOSVersion, "force-os-version", "", "override OS.RawVersion (of OS or Linux distribution)") + flags.BoolVar(&f.snubSystemd, "snub-systemd", false, "don't use systemd, even if available") + flags.BoolVar(&f.outYaml, "yaml", false, "output yaml, not json") +} + +func (cli *cliSetup) newDetectCmd() *cobra.Command { + f := detectFlags{} + + cmd := &cobra.Command{ + Use: "detect", + Short: "detect running services, generate a setup file", + DisableAutoGenTag: true, + RunE: func(_ *cobra.Command, args []string) error { + return cli.detect(f) + }, + } + + f.bind(cmd) + + return cmd +} + +func (cli *cliSetup) newInstallHubCmd() *cobra.Command { + var dryRun bool + + cmd := &cobra.Command{ + Use: "install-hub [setup_file] [flags]", + Short: "install items from a setup file", + Args: cobra.ExactArgs(1), + DisableAutoGenTag: true, + RunE: func(cmd *cobra.Command, args []string) error { + return cli.install(cmd.Context(), dryRun, args[0]) + }, + } + + flags := cmd.Flags() + flags.BoolVar(&dryRun, "dry-run", false, "don't install anything; print out what would have been") + + return cmd +} + +func (cli *cliSetup) newDataSourcesCmd() *cobra.Command { + var toDir string + + cmd := &cobra.Command{ + Use: "datasources [setup_file] [flags]", + Short: "generate datasource (acquisition) configuration from a setup file", + Args: cobra.ExactArgs(1), + DisableAutoGenTag: true, + RunE: func(cmd *cobra.Command, args []string) error { + return cli.dataSources(args[0], toDir) + }, + } + + flags := cmd.Flags() + flags.StringVar(&toDir, "to-dir", "", "write the configuration to a directory, in multiple files") + + return cmd +} + +func (cli *cliSetup) newValidateCmd() *cobra.Command { + cmd := &cobra.Command{ + Use: "validate [setup_file]", + Short: "validate a setup file", + Args: cobra.ExactArgs(1), + DisableAutoGenTag: true, + RunE: func(cmd *cobra.Command, args []string) error { + return cli.validate(args[0]) + }, + } + + return cmd +} + +func (cli *cliSetup) detect(f detectFlags) error { + var ( + detectReader *os.File + err error + ) + + switch f.detectConfigFile { + case "-": + log.Tracef("Reading detection rules from stdin") + + detectReader = os.Stdin + default: + log.Tracef("Reading detection rules: %s", f.detectConfigFile) + + detectReader, err = os.Open(f.detectConfigFile) + if err != nil { + return err + } + } + + if !f.snubSystemd { + _, err = exec.LookPath("systemctl") + if err != nil { + log.Debug("systemctl not available: snubbing systemd") + + f.snubSystemd = true + } + } + + if f.forcedOSFamily == "" && f.forcedOSID != "" { + log.Debug("force-os-id is set: force-os-family defaults to 'linux'") + + f.forcedOSFamily = "linux" + } + + if f.listSupportedServices { + supported, err := setup.ListSupported(detectReader) + if err != nil { + return err + } + + for _, svc := range supported { + fmt.Println(svc) + } + + return nil + } + + opts := setup.DetectOptions{ + ForcedUnits: f.forcedUnits, + ForcedProcesses: f.forcedProcesses, + ForcedOS: setup.ExprOS{ + Family: f.forcedOSFamily, + ID: f.forcedOSID, + RawVersion: f.forcedOSVersion, + }, + SkipServices: f.skipServices, + SnubSystemd: f.snubSystemd, + } + + hubSetup, err := setup.Detect(detectReader, opts) + if err != nil { + return fmt.Errorf("detecting services: %w", err) + } + + setup, err := setupAsString(hubSetup, f.outYaml) + if err != nil { + return err + } + + fmt.Println(setup) + + return nil +} + +func setupAsString(cs setup.Setup, outYaml bool) (string, error) { + var ( + ret []byte + err error + ) + + wrap := func(err error) error { + return fmt.Errorf("while serializing setup: %w", err) + } + + indentLevel := 2 + buf := &bytes.Buffer{} + enc := yaml.NewEncoder(buf) + enc.SetIndent(indentLevel) + + if err = enc.Encode(cs); err != nil { + return "", wrap(err) + } + + if err = enc.Close(); err != nil { + return "", wrap(err) + } + + ret = buf.Bytes() + + if !outYaml { + // take a general approach to output json, so we avoid the + // double tags in the structures and can use go-yaml features + // missing from the json package + ret, err = goccyyaml.YAMLToJSON(ret) + if err != nil { + return "", wrap(err) + } + } + + return string(ret), nil +} + +func (cli *cliSetup) dataSources(fromFile string, toDir string) error { + input, err := os.ReadFile(fromFile) + if err != nil { + return fmt.Errorf("while reading setup file: %w", err) + } + + output, err := setup.DataSources(input, toDir) + if err != nil { + return err + } + + if toDir == "" { + fmt.Println(output) + } + + return nil +} + +func (cli *cliSetup) install(ctx context.Context, dryRun bool, fromFile string) error { + input, err := os.ReadFile(fromFile) + if err != nil { + return fmt.Errorf("while reading file %s: %w", fromFile, err) + } + + cfg := cli.cfg() + + hub, err := require.Hub(cfg, require.RemoteHub(ctx, cfg), log.StandardLogger()) + if err != nil { + return err + } + + return setup.InstallHubItems(ctx, hub, input, dryRun) +} + +func (cli *cliSetup) validate(fromFile string) error { + input, err := os.ReadFile(fromFile) + if err != nil { + return fmt.Errorf("while reading stdin: %w", err) + } + + if err = setup.Validate(input); err != nil { + fmt.Printf("%v\n", err) + return errors.New("invalid setup file") + } + + return nil +} diff --git a/cmd/crowdsec-cli/simulation.go b/cmd/crowdsec-cli/clisimulation/simulation.go similarity index 91% rename from cmd/crowdsec-cli/simulation.go rename to cmd/crowdsec-cli/clisimulation/simulation.go index f8d8a660b8c..8136aa213c3 100644 --- a/cmd/crowdsec-cli/simulation.go +++ b/cmd/crowdsec-cli/clisimulation/simulation.go @@ -1,4 +1,4 @@ -package main +package clisimulation import ( "errors" @@ -10,15 +10,19 @@ import ( "github.com/spf13/cobra" "gopkg.in/yaml.v3" + "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/reload" "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/require" + "github.com/crowdsecurity/crowdsec/pkg/csconfig" "github.com/crowdsecurity/crowdsec/pkg/cwhub" ) +type configGetter func() *csconfig.Config + type cliSimulation struct { cfg configGetter } -func NewCLISimulation(cfg configGetter) *cliSimulation { +func New(cfg configGetter) *cliSimulation { return &cliSimulation{ cfg: cfg, } @@ -44,21 +48,21 @@ cscli simulation disable crowdsecurity/ssh-bf`, }, PersistentPostRun: func(cmd *cobra.Command, _ []string) { if cmd.Name() != "status" { - log.Infof(ReloadMessage()) + log.Info(reload.Message) } }, } cmd.Flags().SortFlags = false cmd.PersistentFlags().SortFlags = false - cmd.AddCommand(cli.NewEnableCmd()) - cmd.AddCommand(cli.NewDisableCmd()) - cmd.AddCommand(cli.NewStatusCmd()) + cmd.AddCommand(cli.newEnableCmd()) + cmd.AddCommand(cli.newDisableCmd()) + cmd.AddCommand(cli.newStatusCmd()) return cmd } -func (cli *cliSimulation) NewEnableCmd() *cobra.Command { +func (cli *cliSimulation) newEnableCmd() *cobra.Command { var forceGlobalSimulation bool cmd := &cobra.Command{ @@ -118,7 +122,7 @@ func (cli *cliSimulation) NewEnableCmd() *cobra.Command { return cmd } -func (cli *cliSimulation) NewDisableCmd() *cobra.Command { +func (cli *cliSimulation) newDisableCmd() *cobra.Command { var forceGlobalSimulation bool cmd := &cobra.Command{ @@ -165,7 +169,7 @@ func (cli *cliSimulation) NewDisableCmd() *cobra.Command { return cmd } -func (cli *cliSimulation) NewStatusCmd() *cobra.Command { +func (cli *cliSimulation) newStatusCmd() *cobra.Command { cmd := &cobra.Command{ Use: "status", Short: "Show simulation mode status", @@ -216,7 +220,7 @@ func (cli *cliSimulation) dumpSimulationFile() error { newConfigSim, err := yaml.Marshal(cfg.Cscli.SimulationConfig) if err != nil { - return fmt.Errorf("unable to marshal simulation configuration: %w", err) + return fmt.Errorf("unable to serialize simulation configuration: %w", err) } err = os.WriteFile(cfg.ConfigPaths.SimulationFilePath, newConfigSim, 0o644) @@ -238,7 +242,7 @@ func (cli *cliSimulation) disableGlobalSimulation() error { newConfigSim, err := yaml.Marshal(cfg.Cscli.SimulationConfig) if err != nil { - return fmt.Errorf("unable to marshal new simulation configuration: %w", err) + return fmt.Errorf("unable to serialize new simulation configuration: %w", err) } err = os.WriteFile(cfg.ConfigPaths.SimulationFilePath, newConfigSim, 0o644) diff --git a/cmd/crowdsec-cli/support.go b/cmd/crowdsec-cli/clisupport/support.go similarity index 82% rename from cmd/crowdsec-cli/support.go rename to cmd/crowdsec-cli/clisupport/support.go index ef14f90df17..4474f5c8f11 100644 --- a/cmd/crowdsec-cli/support.go +++ b/cmd/crowdsec-cli/clisupport/support.go @@ -1,4 +1,4 @@ -package main +package clisupport import ( "archive/zip" @@ -22,7 +22,13 @@ import ( "github.com/crowdsecurity/go-cs-lib/trace" + "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/clibouncer" + "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/clicapi" + "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/clihub" + "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/clilapi" + "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/climachine" "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/climetrics" + "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/clipapi" "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/require" "github.com/crowdsecurity/crowdsec/pkg/csconfig" "github.com/crowdsecurity/crowdsec/pkg/cwhub" @@ -36,12 +42,13 @@ const ( SUPPORT_VERSION_PATH = "version.txt" SUPPORT_FEATURES_PATH = "features.txt" SUPPORT_OS_INFO_PATH = "osinfo.txt" - SUPPORT_HUB_DIR = "hub/" + SUPPORT_HUB = "hub.txt" SUPPORT_BOUNCERS_PATH = "lapi/bouncers.txt" SUPPORT_AGENTS_PATH = "lapi/agents.txt" SUPPORT_CROWDSEC_CONFIG_PATH = "config/crowdsec.yaml" SUPPORT_LAPI_STATUS_PATH = "lapi_status.txt" SUPPORT_CAPI_STATUS_PATH = "capi_status.txt" + SUPPORT_PAPI_STATUS_PATH = "papi_status.txt" SUPPORT_ACQUISITION_DIR = "config/acquis/" SUPPORT_CROWDSEC_PROFILE_PATH = "config/profiles.yaml" SUPPORT_CRASH_DIR = "crash/" @@ -161,31 +168,28 @@ func (cli *cliSupport) dumpOSInfo(zw *zip.Writer) error { return nil } -func (cli *cliSupport) dumpHubItems(zw *zip.Writer, hub *cwhub.Hub, itemType string) error { - var err error +func (cli *cliSupport) dumpHubItems(zw *zip.Writer, hub *cwhub.Hub) error { + log.Infof("Collecting hub") - out := new(bytes.Buffer) - - log.Infof("Collecting hub: %s", itemType) - - items := make(map[string][]*cwhub.Item) - - if items[itemType], err = selectItems(hub, itemType, nil, true); err != nil { - return fmt.Errorf("could not collect %s list: %w", itemType, err) + if hub == nil { + return errors.New("no hub connection") } - if err := listItems(out, cli.cfg().Cscli.Color, []string{itemType}, items, false, "human"); err != nil { - return fmt.Errorf("could not list %s: %w", itemType, err) + out := new(bytes.Buffer) + ch := clihub.New(cli.cfg) + + if err := ch.List(out, hub, false); err != nil { + return err } stripped := stripAnsiString(out.String()) - cli.writeToZip(zw, SUPPORT_HUB_DIR+itemType+".txt", time.Now(), strings.NewReader(stripped)) + cli.writeToZip(zw, SUPPORT_HUB, time.Now(), strings.NewReader(stripped)) return nil } -func (cli *cliSupport) dumpBouncers(zw *zip.Writer, db *database.Client) error { +func (cli *cliSupport) dumpBouncers(ctx context.Context, zw *zip.Writer, db *database.Client) error { log.Info("Collecting bouncers") if db == nil { @@ -193,10 +197,11 @@ func (cli *cliSupport) dumpBouncers(zw *zip.Writer, db *database.Client) error { } out := new(bytes.Buffer) + cb := clibouncer.New(cli.cfg) - // call the "cscli bouncers list" command directly, skip any preRun - cm := cliBouncers{db: db, cfg: cli.cfg} - cm.list(out) + if err := cb.List(ctx, out, db); err != nil { + return err + } stripped := stripAnsiString(out.String()) @@ -205,7 +210,7 @@ func (cli *cliSupport) dumpBouncers(zw *zip.Writer, db *database.Client) error { return nil } -func (cli *cliSupport) dumpAgents(zw *zip.Writer, db *database.Client) error { +func (cli *cliSupport) dumpAgents(ctx context.Context, zw *zip.Writer, db *database.Client) error { log.Info("Collecting agents") if db == nil { @@ -213,10 +218,11 @@ func (cli *cliSupport) dumpAgents(zw *zip.Writer, db *database.Client) error { } out := new(bytes.Buffer) + cm := climachine.New(cli.cfg) - // call the "cscli machines list" command directly, skip any preRun - cm := cliMachines{db: db, cfg: cli.cfg} - cm.list(out) + if err := cm.List(ctx, out, db); err != nil { + return err + } stripped := stripAnsiString(out.String()) @@ -225,54 +231,56 @@ func (cli *cliSupport) dumpAgents(zw *zip.Writer, db *database.Client) error { return nil } -func (cli *cliSupport) dumpLAPIStatus(zw *zip.Writer, hub *cwhub.Hub) error { +func (cli *cliSupport) dumpLAPIStatus(ctx context.Context, zw *zip.Writer, hub *cwhub.Hub) error { log.Info("Collecting LAPI status") - cfg := cli.cfg() - cred := cfg.API.Client.Credentials - out := new(bytes.Buffer) + cl := clilapi.New(cli.cfg) - fmt.Fprintf(out, "LAPI credentials file: %s\n", cfg.API.Client.CredentialsFilePath) - fmt.Fprintf(out, "LAPI URL: %s\n", cred.URL) - fmt.Fprintf(out, "LAPI username: %s\n", cred.Login) - - if err := QueryLAPIStatus(hub, cred.URL, cred.Login, cred.Password); err != nil { - return fmt.Errorf("could not authenticate to Local API (LAPI): %w", err) + err := cl.Status(ctx, out, hub) + if err != nil { + fmt.Fprintf(out, "%s\n", err) } - fmt.Fprintln(out, "You can successfully interact with Local API (LAPI)") + stripped := stripAnsiString(out.String()) - cli.writeToZip(zw, SUPPORT_LAPI_STATUS_PATH, time.Now(), out) + cli.writeToZip(zw, SUPPORT_LAPI_STATUS_PATH, time.Now(), strings.NewReader(stripped)) return nil } -func (cli *cliSupport) dumpCAPIStatus(zw *zip.Writer, hub *cwhub.Hub) error { +func (cli *cliSupport) dumpCAPIStatus(ctx context.Context, zw *zip.Writer, hub *cwhub.Hub) error { log.Info("Collecting CAPI status") - cfg := cli.cfg() - cred := cfg.API.Server.OnlineClient.Credentials - out := new(bytes.Buffer) + cc := clicapi.New(cli.cfg) - fmt.Fprintf(out, "CAPI credentials file: %s\n", cfg.API.Server.OnlineClient.CredentialsFilePath) - fmt.Fprintf(out, "CAPI URL: %s\n", cred.URL) - fmt.Fprintf(out, "CAPI username: %s\n", cred.Login) - - auth, enrolled, err := QueryCAPIStatus(hub, cred.URL, cred.Login, cred.Password) + err := cc.Status(ctx, out, hub) if err != nil { - return fmt.Errorf("could not authenticate to Central API (CAPI): %w", err) - } - if auth { - fmt.Fprintln(out, "You can successfully interact with Central API (CAPI)") + fmt.Fprintf(out, "%s\n", err) } - if enrolled { - fmt.Fprintln(out, "Your instance is enrolled in the console") + stripped := stripAnsiString(out.String()) + + cli.writeToZip(zw, SUPPORT_CAPI_STATUS_PATH, time.Now(), strings.NewReader(stripped)) + + return nil +} + +func (cli *cliSupport) dumpPAPIStatus(ctx context.Context, zw *zip.Writer, db *database.Client) error { + log.Info("Collecting PAPI status") + + out := new(bytes.Buffer) + cp := clipapi.New(cli.cfg) + + err := cp.Status(ctx, out, db) + if err != nil { + fmt.Fprintf(out, "%s\n", err) } - cli.writeToZip(zw, SUPPORT_CAPI_STATUS_PATH, time.Now(), out) + stripped := stripAnsiString(out.String()) + + cli.writeToZip(zw, SUPPORT_PAPI_STATUS_PATH, time.Now(), strings.NewReader(stripped)) return nil } @@ -385,11 +393,13 @@ func (cli *cliSupport) dumpCrash(zw *zip.Writer) error { return nil } +type configGetter func() *csconfig.Config + type cliSupport struct { cfg configGetter } -func NewCLISupport(cfg configGetter) *cliSupport { +func New(cfg configGetter) *cliSupport { return &cliSupport{ cfg: cfg, } @@ -511,30 +521,30 @@ func (cli *cliSupport) dump(ctx context.Context, outFile string) error { log.Warnf("could not collect main config file: %s", err) } - if hub != nil { - for _, itemType := range cwhub.ItemTypes { - if err = cli.dumpHubItems(zipWriter, hub, itemType); err != nil { - log.Warnf("could not collect %s information: %s", itemType, err) - } - } + if err = cli.dumpHubItems(zipWriter, hub); err != nil { + log.Warnf("could not collect hub information: %s", err) } - if err = cli.dumpBouncers(zipWriter, db); err != nil { + if err = cli.dumpBouncers(ctx, zipWriter, db); err != nil { log.Warnf("could not collect bouncers information: %s", err) } - if err = cli.dumpAgents(zipWriter, db); err != nil { + if err = cli.dumpAgents(ctx, zipWriter, db); err != nil { log.Warnf("could not collect agents information: %s", err) } if !skipCAPI { - if err = cli.dumpCAPIStatus(zipWriter, hub); err != nil { + if err = cli.dumpCAPIStatus(ctx, zipWriter, hub); err != nil { log.Warnf("could not collect CAPI status: %s", err) } + + if err = cli.dumpPAPIStatus(ctx, zipWriter, db); err != nil { + log.Warnf("could not collect PAPI status: %s", err) + } } if !skipLAPI { - if err = cli.dumpLAPIStatus(zipWriter, hub); err != nil { + if err = cli.dumpLAPIStatus(ctx, zipWriter, hub); err != nil { log.Warnf("could not collect LAPI status: %s", err) } diff --git a/cmd/crowdsec-cli/config.go b/cmd/crowdsec-cli/config.go index e88845798e2..4cf8916ad4b 100644 --- a/cmd/crowdsec-cli/config.go +++ b/cmd/crowdsec-cli/config.go @@ -18,7 +18,7 @@ func (cli *cliConfig) NewCommand() *cobra.Command { cmd := &cobra.Command{ Use: "config [command]", Short: "Allows to view current config", - Args: cobra.ExactArgs(0), + Args: cobra.NoArgs, DisableAutoGenTag: true, } diff --git a/cmd/crowdsec-cli/config_backup.go b/cmd/crowdsec-cli/config_backup.go index e8ac6213530..d23aff80a78 100644 --- a/cmd/crowdsec-cli/config_backup.go +++ b/cmd/crowdsec-cli/config_backup.go @@ -74,7 +74,7 @@ func (cli *cliConfig) backupHub(dirPath string) error { upstreamParsersContent, err := json.MarshalIndent(upstreamParsers, "", " ") if err != nil { - return fmt.Errorf("failed marshaling upstream parsers: %w", err) + return fmt.Errorf("failed to serialize upstream parsers: %w", err) } err = os.WriteFile(upstreamParsersFname, upstreamParsersContent, 0o644) diff --git a/cmd/crowdsec-cli/config_feature_flags.go b/cmd/crowdsec-cli/config_feature_flags.go index d1dbe2b93b7..760e2194bb3 100644 --- a/cmd/crowdsec-cli/config_feature_flags.go +++ b/cmd/crowdsec-cli/config_feature_flags.go @@ -121,7 +121,7 @@ func (cli *cliConfig) newFeatureFlagsCmd() *cobra.Command { Use: "feature-flags", Short: "Displays feature flag status", Long: `Displays the supported feature flags and their current status.`, - Args: cobra.ExactArgs(0), + Args: cobra.NoArgs, DisableAutoGenTag: true, RunE: func(_ *cobra.Command, _ []string) error { return cli.featureFlags(showRetired) diff --git a/cmd/crowdsec-cli/config_restore.go b/cmd/crowdsec-cli/config_restore.go index fc3670165f8..c32328485ec 100644 --- a/cmd/crowdsec-cli/config_restore.go +++ b/cmd/crowdsec-cli/config_restore.go @@ -40,7 +40,7 @@ func (cli *cliConfig) restoreHub(ctx context.Context, dirPath string) error { err = json.Unmarshal(file, &upstreamList) if err != nil { - return fmt.Errorf("error unmarshaling %s: %w", upstreamListFN, err) + return fmt.Errorf("error parsing %s: %w", upstreamListFN, err) } for _, toinstall := range upstreamList { diff --git a/cmd/crowdsec-cli/config_show.go b/cmd/crowdsec-cli/config_show.go index e411f5a322b..3d17d264574 100644 --- a/cmd/crowdsec-cli/config_show.go +++ b/cmd/crowdsec-cli/config_show.go @@ -50,7 +50,7 @@ func (cli *cliConfig) showKey(key string) error { case "json": data, err := json.MarshalIndent(output, "", " ") if err != nil { - return fmt.Errorf("failed to marshal configuration: %w", err) + return fmt.Errorf("failed to serialize configuration: %w", err) } fmt.Println(string(data)) @@ -212,14 +212,14 @@ func (cli *cliConfig) show() error { case "json": data, err := json.MarshalIndent(cfg, "", " ") if err != nil { - return fmt.Errorf("failed to marshal configuration: %w", err) + return fmt.Errorf("failed to serialize configuration: %w", err) } fmt.Println(string(data)) case "raw": data, err := yaml.Marshal(cfg) if err != nil { - return fmt.Errorf("failed to marshal configuration: %w", err) + return fmt.Errorf("failed to serialize configuration: %w", err) } fmt.Println(string(data)) @@ -235,7 +235,7 @@ func (cli *cliConfig) newShowCmd() *cobra.Command { Use: "show", Short: "Displays current config", Long: `Displays the current cli configuration.`, - Args: cobra.ExactArgs(0), + Args: cobra.NoArgs, DisableAutoGenTag: true, RunE: func(_ *cobra.Command, _ []string) error { if err := cli.cfg().LoadAPIClient(); err != nil { diff --git a/cmd/crowdsec-cli/config_showyaml.go b/cmd/crowdsec-cli/config_showyaml.go index 52daee6a65e..10549648d09 100644 --- a/cmd/crowdsec-cli/config_showyaml.go +++ b/cmd/crowdsec-cli/config_showyaml.go @@ -15,7 +15,7 @@ func (cli *cliConfig) newShowYAMLCmd() *cobra.Command { cmd := &cobra.Command{ Use: "show-yaml", Short: "Displays merged config.yaml + config.yaml.local", - Args: cobra.ExactArgs(0), + Args: cobra.NoArgs, DisableAutoGenTag: true, RunE: func(_ *cobra.Command, _ []string) error { return cli.showYAML() diff --git a/cmd/crowdsec-cli/dashboard.go b/cmd/crowdsec-cli/dashboard.go index c61fc8eeded..53a7dff85a0 100644 --- a/cmd/crowdsec-cli/dashboard.go +++ b/cmd/crowdsec-cli/dashboard.go @@ -20,9 +20,11 @@ import ( log "github.com/sirupsen/logrus" "github.com/spf13/cobra" + "github.com/crowdsecurity/go-cs-lib/version" + + "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/idgen" "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/require" "github.com/crowdsecurity/crowdsec/pkg/metabase" - "github.com/crowdsecurity/go-cs-lib/version" ) var ( @@ -127,7 +129,7 @@ func (cli *cliDashboard) newSetupCmd() *cobra.Command { Use: "setup", Short: "Setup a metabase container.", Long: `Perform a metabase docker setup, download standard dashboards, create a fresh user and start the container`, - Args: cobra.ExactArgs(0), + Args: cobra.NoArgs, DisableAutoGenTag: true, Example: ` cscli dashboard setup @@ -142,7 +144,7 @@ cscli dashboard setup -l 0.0.0.0 -p 443 --password if metabasePassword == "" { isValid := passwordIsValid(metabasePassword) for !isValid { - metabasePassword = generatePassword(16) + metabasePassword = idgen.GeneratePassword(16) isValid = passwordIsValid(metabasePassword) } } @@ -196,7 +198,7 @@ func (cli *cliDashboard) newStartCmd() *cobra.Command { Use: "start", Short: "Start the metabase container.", Long: `Stats the metabase container using docker.`, - Args: cobra.ExactArgs(0), + Args: cobra.NoArgs, DisableAutoGenTag: true, RunE: func(_ *cobra.Command, _ []string) error { mb, err := metabase.NewMetabase(metabaseConfigPath, metabaseContainerID) @@ -227,7 +229,7 @@ func (cli *cliDashboard) newStopCmd() *cobra.Command { Use: "stop", Short: "Stops the metabase container.", Long: `Stops the metabase container using docker.`, - Args: cobra.ExactArgs(0), + Args: cobra.NoArgs, DisableAutoGenTag: true, RunE: func(_ *cobra.Command, _ []string) error { if err := metabase.StopContainer(metabaseContainerID); err != nil { @@ -243,7 +245,7 @@ func (cli *cliDashboard) newStopCmd() *cobra.Command { func (cli *cliDashboard) newShowPasswordCmd() *cobra.Command { cmd := &cobra.Command{Use: "show-password", Short: "displays password of metabase.", - Args: cobra.ExactArgs(0), + Args: cobra.NoArgs, DisableAutoGenTag: true, RunE: func(_ *cobra.Command, _ []string) error { m := metabase.Metabase{} @@ -266,7 +268,7 @@ func (cli *cliDashboard) newRemoveCmd() *cobra.Command { Use: "remove", Short: "removes the metabase container.", Long: `removes the metabase container using docker.`, - Args: cobra.ExactArgs(0), + Args: cobra.NoArgs, DisableAutoGenTag: true, Example: ` cscli dashboard remove diff --git a/cmd/crowdsec-cli/hubtest.go b/cmd/crowdsec-cli/hubtest.go deleted file mode 100644 index 2a4635d39f1..00000000000 --- a/cmd/crowdsec-cli/hubtest.go +++ /dev/null @@ -1,746 +0,0 @@ -package main - -import ( - "encoding/json" - "errors" - "fmt" - "math" - "os" - "path/filepath" - "strings" - "text/template" - - "github.com/AlecAivazis/survey/v2" - "github.com/fatih/color" - log "github.com/sirupsen/logrus" - "github.com/spf13/cobra" - "gopkg.in/yaml.v3" - - "github.com/crowdsecurity/crowdsec/pkg/dumps" - "github.com/crowdsecurity/crowdsec/pkg/emoji" - "github.com/crowdsecurity/crowdsec/pkg/hubtest" -) - -var ( - HubTest hubtest.HubTest - HubAppsecTests hubtest.HubTest - hubPtr *hubtest.HubTest - isAppsecTest bool -) - -type cliHubTest struct { - cfg configGetter -} - -func NewCLIHubTest(cfg configGetter) *cliHubTest { - return &cliHubTest{ - cfg: cfg, - } -} - -func (cli *cliHubTest) NewCommand() *cobra.Command { - var ( - hubPath string - crowdsecPath string - cscliPath string - ) - - cmd := &cobra.Command{ - Use: "hubtest", - Short: "Run functional tests on hub configurations", - Long: "Run functional tests on hub configurations (parsers, scenarios, collections...)", - Args: cobra.ExactArgs(0), - DisableAutoGenTag: true, - PersistentPreRunE: func(_ *cobra.Command, _ []string) error { - var err error - HubTest, err = hubtest.NewHubTest(hubPath, crowdsecPath, cscliPath, false) - if err != nil { - return fmt.Errorf("unable to load hubtest: %+v", err) - } - - HubAppsecTests, err = hubtest.NewHubTest(hubPath, crowdsecPath, cscliPath, true) - if err != nil { - return fmt.Errorf("unable to load appsec specific hubtest: %+v", err) - } - - // commands will use the hubPtr, will point to the default hubTest object, or the one dedicated to appsec tests - hubPtr = &HubTest - if isAppsecTest { - hubPtr = &HubAppsecTests - } - - return nil - }, - } - - cmd.PersistentFlags().StringVar(&hubPath, "hub", ".", "Path to hub folder") - cmd.PersistentFlags().StringVar(&crowdsecPath, "crowdsec", "crowdsec", "Path to crowdsec") - cmd.PersistentFlags().StringVar(&cscliPath, "cscli", "cscli", "Path to cscli") - cmd.PersistentFlags().BoolVar(&isAppsecTest, "appsec", false, "Command relates to appsec tests") - - cmd.AddCommand(cli.NewCreateCmd()) - cmd.AddCommand(cli.NewRunCmd()) - cmd.AddCommand(cli.NewCleanCmd()) - cmd.AddCommand(cli.NewInfoCmd()) - cmd.AddCommand(cli.NewListCmd()) - cmd.AddCommand(cli.NewCoverageCmd()) - cmd.AddCommand(cli.NewEvalCmd()) - cmd.AddCommand(cli.NewExplainCmd()) - - return cmd -} - -func (cli *cliHubTest) NewCreateCmd() *cobra.Command { - var ( - ignoreParsers bool - labels map[string]string - logType string - ) - - parsers := []string{} - postoverflows := []string{} - scenarios := []string{} - - cmd := &cobra.Command{ - Use: "create", - Short: "create [test_name]", - Example: `cscli hubtest create my-awesome-test --type syslog -cscli hubtest create my-nginx-custom-test --type nginx -cscli hubtest create my-scenario-test --parsers crowdsecurity/nginx --scenarios crowdsecurity/http-probing`, - Args: cobra.ExactArgs(1), - DisableAutoGenTag: true, - RunE: func(_ *cobra.Command, args []string) error { - testName := args[0] - testPath := filepath.Join(hubPtr.HubTestPath, testName) - if _, err := os.Stat(testPath); os.IsExist(err) { - return fmt.Errorf("test '%s' already exists in '%s', exiting", testName, testPath) - } - - if isAppsecTest { - logType = "appsec" - } - - if logType == "" { - return errors.New("please provide a type (--type) for the test") - } - - if err := os.MkdirAll(testPath, os.ModePerm); err != nil { - return fmt.Errorf("unable to create folder '%s': %+v", testPath, err) - } - - configFilePath := filepath.Join(testPath, "config.yaml") - - configFileData := &hubtest.HubTestItemConfig{} - if logType == "appsec" { - // create empty nuclei template file - nucleiFileName := fmt.Sprintf("%s.yaml", testName) - nucleiFilePath := filepath.Join(testPath, nucleiFileName) - - nucleiFile, err := os.OpenFile(nucleiFilePath, os.O_RDWR|os.O_CREATE, 0o755) - if err != nil { - return err - } - - ntpl := template.Must(template.New("nuclei").Parse(hubtest.TemplateNucleiFile)) - if ntpl == nil { - return errors.New("unable to parse nuclei template") - } - ntpl.ExecuteTemplate(nucleiFile, "nuclei", struct{ TestName string }{TestName: testName}) - nucleiFile.Close() - configFileData.AppsecRules = []string{"./appsec-rules//your_rule_here.yaml"} - configFileData.NucleiTemplate = nucleiFileName - fmt.Println() - fmt.Printf(" Test name : %s\n", testName) - fmt.Printf(" Test path : %s\n", testPath) - fmt.Printf(" Config File : %s\n", configFilePath) - fmt.Printf(" Nuclei Template : %s\n", nucleiFilePath) - } else { - // create empty log file - logFileName := fmt.Sprintf("%s.log", testName) - logFilePath := filepath.Join(testPath, logFileName) - logFile, err := os.Create(logFilePath) - if err != nil { - return err - } - logFile.Close() - - // create empty parser assertion file - parserAssertFilePath := filepath.Join(testPath, hubtest.ParserAssertFileName) - parserAssertFile, err := os.Create(parserAssertFilePath) - if err != nil { - return err - } - parserAssertFile.Close() - // create empty scenario assertion file - scenarioAssertFilePath := filepath.Join(testPath, hubtest.ScenarioAssertFileName) - scenarioAssertFile, err := os.Create(scenarioAssertFilePath) - if err != nil { - return err - } - scenarioAssertFile.Close() - - parsers = append(parsers, "crowdsecurity/syslog-logs") - parsers = append(parsers, "crowdsecurity/dateparse-enrich") - - if len(scenarios) == 0 { - scenarios = append(scenarios, "") - } - - if len(postoverflows) == 0 { - postoverflows = append(postoverflows, "") - } - configFileData.Parsers = parsers - configFileData.Scenarios = scenarios - configFileData.PostOverflows = postoverflows - configFileData.LogFile = logFileName - configFileData.LogType = logType - configFileData.IgnoreParsers = ignoreParsers - configFileData.Labels = labels - fmt.Println() - fmt.Printf(" Test name : %s\n", testName) - fmt.Printf(" Test path : %s\n", testPath) - fmt.Printf(" Log file : %s (please fill it with logs)\n", logFilePath) - fmt.Printf(" Parser assertion file : %s (please fill it with assertion)\n", parserAssertFilePath) - fmt.Printf(" Scenario assertion file : %s (please fill it with assertion)\n", scenarioAssertFilePath) - fmt.Printf(" Configuration File : %s (please fill it with parsers, scenarios...)\n", configFilePath) - } - - fd, err := os.Create(configFilePath) - if err != nil { - return fmt.Errorf("open: %w", err) - } - data, err := yaml.Marshal(configFileData) - if err != nil { - return fmt.Errorf("marshal: %w", err) - } - _, err = fd.Write(data) - if err != nil { - return fmt.Errorf("write: %w", err) - } - if err := fd.Close(); err != nil { - return fmt.Errorf("close: %w", err) - } - - return nil - }, - } - - cmd.PersistentFlags().StringVarP(&logType, "type", "t", "", "Log type of the test") - cmd.Flags().StringSliceVarP(&parsers, "parsers", "p", parsers, "Parsers to add to test") - cmd.Flags().StringSliceVar(&postoverflows, "postoverflows", postoverflows, "Postoverflows to add to test") - cmd.Flags().StringSliceVarP(&scenarios, "scenarios", "s", scenarios, "Scenarios to add to test") - cmd.PersistentFlags().BoolVar(&ignoreParsers, "ignore-parsers", false, "Don't run test on parsers") - - return cmd -} - - -func (cli *cliHubTest) run(runAll bool, NucleiTargetHost string, AppSecHost string, args []string) error { - cfg := cli.cfg() - - if !runAll && len(args) == 0 { - return errors.New("please provide test to run or --all flag") - } - hubPtr.NucleiTargetHost = NucleiTargetHost - hubPtr.AppSecHost = AppSecHost - if runAll { - if err := hubPtr.LoadAllTests(); err != nil { - return fmt.Errorf("unable to load all tests: %+v", err) - } - } else { - for _, testName := range args { - _, err := hubPtr.LoadTestItem(testName) - if err != nil { - return fmt.Errorf("unable to load test '%s': %w", testName, err) - } - } - } - - // set timezone to avoid DST issues - os.Setenv("TZ", "UTC") - for _, test := range hubPtr.Tests { - if cfg.Cscli.Output == "human" { - log.Infof("Running test '%s'", test.Name) - } - err := test.Run() - if err != nil { - log.Errorf("running test '%s' failed: %+v", test.Name, err) - } - } - - return nil -} - - -func (cli *cliHubTest) NewRunCmd() *cobra.Command { - var ( - noClean bool - runAll bool - forceClean bool - NucleiTargetHost string - AppSecHost string - ) - - cmd := &cobra.Command{ - Use: "run", - Short: "run [test_name]", - DisableAutoGenTag: true, - RunE: func(_ *cobra.Command, args []string) error { - return cli.run(runAll, NucleiTargetHost, AppSecHost, args) - }, - PersistentPostRunE: func(_ *cobra.Command, _ []string) error { - cfg := cli.cfg() - - success := true - testResult := make(map[string]bool) - for _, test := range hubPtr.Tests { - if test.AutoGen && !isAppsecTest { - if test.ParserAssert.AutoGenAssert { - log.Warningf("Assert file '%s' is empty, generating assertion:", test.ParserAssert.File) - fmt.Println() - fmt.Println(test.ParserAssert.AutoGenAssertData) - } - if test.ScenarioAssert.AutoGenAssert { - log.Warningf("Assert file '%s' is empty, generating assertion:", test.ScenarioAssert.File) - fmt.Println() - fmt.Println(test.ScenarioAssert.AutoGenAssertData) - } - if !noClean { - if err := test.Clean(); err != nil { - return fmt.Errorf("unable to clean test '%s' env: %w", test.Name, err) - } - } - return fmt.Errorf("please fill your assert file(s) for test '%s', exiting", test.Name) - } - testResult[test.Name] = test.Success - if test.Success { - if cfg.Cscli.Output == "human" { - log.Infof("Test '%s' passed successfully (%d assertions)\n", test.Name, test.ParserAssert.NbAssert+test.ScenarioAssert.NbAssert) - } - if !noClean { - if err := test.Clean(); err != nil { - return fmt.Errorf("unable to clean test '%s' env: %w", test.Name, err) - } - } - } else { - success = false - cleanTestEnv := false - if cfg.Cscli.Output == "human" { - if len(test.ParserAssert.Fails) > 0 { - fmt.Println() - log.Errorf("Parser test '%s' failed (%d errors)\n", test.Name, len(test.ParserAssert.Fails)) - for _, fail := range test.ParserAssert.Fails { - fmt.Printf("(L.%d) %s => %s\n", fail.Line, emoji.RedCircle, fail.Expression) - fmt.Printf(" Actual expression values:\n") - for key, value := range fail.Debug { - fmt.Printf(" %s = '%s'\n", key, strings.TrimSuffix(value, "\n")) - } - fmt.Println() - } - } - if len(test.ScenarioAssert.Fails) > 0 { - fmt.Println() - log.Errorf("Scenario test '%s' failed (%d errors)\n", test.Name, len(test.ScenarioAssert.Fails)) - for _, fail := range test.ScenarioAssert.Fails { - fmt.Printf("(L.%d) %s => %s\n", fail.Line, emoji.RedCircle, fail.Expression) - fmt.Printf(" Actual expression values:\n") - for key, value := range fail.Debug { - fmt.Printf(" %s = '%s'\n", key, strings.TrimSuffix(value, "\n")) - } - fmt.Println() - } - } - if !forceClean && !noClean { - prompt := &survey.Confirm{ - Message: fmt.Sprintf("\nDo you want to remove runtime folder for test '%s'? (default: Yes)", test.Name), - Default: true, - } - if err := survey.AskOne(prompt, &cleanTestEnv); err != nil { - return fmt.Errorf("unable to ask to remove runtime folder: %w", err) - } - } - } - - if cleanTestEnv || forceClean { - if err := test.Clean(); err != nil { - return fmt.Errorf("unable to clean test '%s' env: %w", test.Name, err) - } - } - } - } - - switch cfg.Cscli.Output { - case "human": - hubTestResultTable(color.Output, cfg.Cscli.Color, testResult) - case "json": - jsonResult := make(map[string][]string, 0) - jsonResult["success"] = make([]string, 0) - jsonResult["fail"] = make([]string, 0) - for testName, success := range testResult { - if success { - jsonResult["success"] = append(jsonResult["success"], testName) - } else { - jsonResult["fail"] = append(jsonResult["fail"], testName) - } - } - jsonStr, err := json.Marshal(jsonResult) - if err != nil { - return fmt.Errorf("unable to json test result: %w", err) - } - fmt.Println(string(jsonStr)) - default: - return errors.New("only human/json output modes are supported") - } - - if !success { - return errors.New("some tests failed") - } - - return nil - }, - } - - cmd.Flags().BoolVar(&noClean, "no-clean", false, "Don't clean runtime environment if test succeed") - cmd.Flags().BoolVar(&forceClean, "clean", false, "Clean runtime environment if test fail") - cmd.Flags().StringVar(&NucleiTargetHost, "target", hubtest.DefaultNucleiTarget, "Target for AppSec Test") - cmd.Flags().StringVar(&AppSecHost, "host", hubtest.DefaultAppsecHost, "Address to expose AppSec for hubtest") - cmd.Flags().BoolVar(&runAll, "all", false, "Run all tests") - - return cmd -} - -func (cli *cliHubTest) NewCleanCmd() *cobra.Command { - cmd := &cobra.Command{ - Use: "clean", - Short: "clean [test_name]", - Args: cobra.MinimumNArgs(1), - DisableAutoGenTag: true, - RunE: func(_ *cobra.Command, args []string) error { - for _, testName := range args { - test, err := hubPtr.LoadTestItem(testName) - if err != nil { - return fmt.Errorf("unable to load test '%s': %w", testName, err) - } - if err := test.Clean(); err != nil { - return fmt.Errorf("unable to clean test '%s' env: %w", test.Name, err) - } - } - - return nil - }, - } - - return cmd -} - -func (cli *cliHubTest) NewInfoCmd() *cobra.Command { - cmd := &cobra.Command{ - Use: "info", - Short: "info [test_name]", - Args: cobra.MinimumNArgs(1), - DisableAutoGenTag: true, - RunE: func(_ *cobra.Command, args []string) error { - for _, testName := range args { - test, err := hubPtr.LoadTestItem(testName) - if err != nil { - return fmt.Errorf("unable to load test '%s': %w", testName, err) - } - fmt.Println() - fmt.Printf(" Test name : %s\n", test.Name) - fmt.Printf(" Test path : %s\n", test.Path) - if isAppsecTest { - fmt.Printf(" Nuclei Template : %s\n", test.Config.NucleiTemplate) - fmt.Printf(" Appsec Rules : %s\n", strings.Join(test.Config.AppsecRules, ", ")) - } else { - fmt.Printf(" Log file : %s\n", filepath.Join(test.Path, test.Config.LogFile)) - fmt.Printf(" Parser assertion file : %s\n", filepath.Join(test.Path, hubtest.ParserAssertFileName)) - fmt.Printf(" Scenario assertion file : %s\n", filepath.Join(test.Path, hubtest.ScenarioAssertFileName)) - } - fmt.Printf(" Configuration File : %s\n", filepath.Join(test.Path, "config.yaml")) - } - - return nil - }, - } - - return cmd -} - -func (cli *cliHubTest) NewListCmd() *cobra.Command { - cmd := &cobra.Command{ - Use: "list", - Short: "list", - DisableAutoGenTag: true, - RunE: func(_ *cobra.Command, _ []string) error { - cfg := cli.cfg() - - if err := hubPtr.LoadAllTests(); err != nil { - return fmt.Errorf("unable to load all tests: %w", err) - } - - switch cfg.Cscli.Output { - case "human": - hubTestListTable(color.Output, cfg.Cscli.Color, hubPtr.Tests) - case "json": - j, err := json.MarshalIndent(hubPtr.Tests, " ", " ") - if err != nil { - return err - } - fmt.Println(string(j)) - default: - return errors.New("only human/json output modes are supported") - } - - return nil - }, - } - - return cmd -} - -func (cli *cliHubTest) coverage(showScenarioCov bool, showParserCov bool, showAppsecCov bool, showOnlyPercent bool) error { - cfg := cli.cfg() - - // for this one we explicitly don't do for appsec - if err := HubTest.LoadAllTests(); err != nil { - return fmt.Errorf("unable to load all tests: %+v", err) - } - - var err error - - scenarioCoverage := []hubtest.Coverage{} - parserCoverage := []hubtest.Coverage{} - appsecRuleCoverage := []hubtest.Coverage{} - scenarioCoveragePercent := 0 - parserCoveragePercent := 0 - appsecRuleCoveragePercent := 0 - - // if both are false (flag by default), show both - showAll := !showScenarioCov && !showParserCov && !showAppsecCov - - if showParserCov || showAll { - parserCoverage, err = HubTest.GetParsersCoverage() - if err != nil { - return fmt.Errorf("while getting parser coverage: %w", err) - } - - parserTested := 0 - - for _, test := range parserCoverage { - if test.TestsCount > 0 { - parserTested++ - } - } - - parserCoveragePercent = int(math.Round((float64(parserTested) / float64(len(parserCoverage)) * 100))) - } - - if showScenarioCov || showAll { - scenarioCoverage, err = HubTest.GetScenariosCoverage() - if err != nil { - return fmt.Errorf("while getting scenario coverage: %w", err) - } - - scenarioTested := 0 - - for _, test := range scenarioCoverage { - if test.TestsCount > 0 { - scenarioTested++ - } - } - - scenarioCoveragePercent = int(math.Round((float64(scenarioTested) / float64(len(scenarioCoverage)) * 100))) - } - - if showAppsecCov || showAll { - appsecRuleCoverage, err = HubTest.GetAppsecCoverage() - if err != nil { - return fmt.Errorf("while getting scenario coverage: %w", err) - } - - appsecRuleTested := 0 - - for _, test := range appsecRuleCoverage { - if test.TestsCount > 0 { - appsecRuleTested++ - } - } - - appsecRuleCoveragePercent = int(math.Round((float64(appsecRuleTested) / float64(len(appsecRuleCoverage)) * 100))) - } - - if showOnlyPercent { - switch { - case showAll: - fmt.Printf("parsers=%d%%\nscenarios=%d%%\nappsec_rules=%d%%", parserCoveragePercent, scenarioCoveragePercent, appsecRuleCoveragePercent) - case showParserCov: - fmt.Printf("parsers=%d%%", parserCoveragePercent) - case showScenarioCov: - fmt.Printf("scenarios=%d%%", scenarioCoveragePercent) - case showAppsecCov: - fmt.Printf("appsec_rules=%d%%", appsecRuleCoveragePercent) - } - - return nil - } - - switch cfg.Cscli.Output { - case "human": - if showParserCov || showAll { - hubTestParserCoverageTable(color.Output, cfg.Cscli.Color, parserCoverage) - } - - if showScenarioCov || showAll { - hubTestScenarioCoverageTable(color.Output, cfg.Cscli.Color, scenarioCoverage) - } - - if showAppsecCov || showAll { - hubTestAppsecRuleCoverageTable(color.Output, cfg.Cscli.Color, appsecRuleCoverage) - } - - fmt.Println() - - if showParserCov || showAll { - fmt.Printf("PARSERS : %d%% of coverage\n", parserCoveragePercent) - } - - if showScenarioCov || showAll { - fmt.Printf("SCENARIOS : %d%% of coverage\n", scenarioCoveragePercent) - } - - if showAppsecCov || showAll { - fmt.Printf("APPSEC RULES : %d%% of coverage\n", appsecRuleCoveragePercent) - } - case "json": - dump, err := json.MarshalIndent(parserCoverage, "", " ") - if err != nil { - return err - } - - fmt.Printf("%s", dump) - - dump, err = json.MarshalIndent(scenarioCoverage, "", " ") - if err != nil { - return err - } - - fmt.Printf("%s", dump) - - dump, err = json.MarshalIndent(appsecRuleCoverage, "", " ") - if err != nil { - return err - } - - fmt.Printf("%s", dump) - default: - return errors.New("only human/json output modes are supported") - } - - return nil -} - -func (cli *cliHubTest) NewCoverageCmd() *cobra.Command { - var ( - showParserCov bool - showScenarioCov bool - showOnlyPercent bool - showAppsecCov bool - ) - - cmd := &cobra.Command{ - Use: "coverage", - Short: "coverage", - DisableAutoGenTag: true, - RunE: func(_ *cobra.Command, _ []string) error { - return cli.coverage(showScenarioCov, showParserCov, showAppsecCov, showOnlyPercent) - }, - } - - cmd.PersistentFlags().BoolVar(&showOnlyPercent, "percent", false, "Show only percentages of coverage") - cmd.PersistentFlags().BoolVar(&showParserCov, "parsers", false, "Show only parsers coverage") - cmd.PersistentFlags().BoolVar(&showScenarioCov, "scenarios", false, "Show only scenarios coverage") - cmd.PersistentFlags().BoolVar(&showAppsecCov, "appsec", false, "Show only appsec coverage") - - return cmd -} - -func (cli *cliHubTest) NewEvalCmd() *cobra.Command { - var evalExpression string - - cmd := &cobra.Command{ - Use: "eval", - Short: "eval [test_name]", - Args: cobra.ExactArgs(1), - DisableAutoGenTag: true, - RunE: func(_ *cobra.Command, args []string) error { - for _, testName := range args { - test, err := hubPtr.LoadTestItem(testName) - if err != nil { - return fmt.Errorf("can't load test: %+v", err) - } - - err = test.ParserAssert.LoadTest(test.ParserResultFile) - if err != nil { - return fmt.Errorf("can't load test results from '%s': %+v", test.ParserResultFile, err) - } - - output, err := test.ParserAssert.EvalExpression(evalExpression) - if err != nil { - return err - } - - fmt.Print(output) - } - - return nil - }, - } - - cmd.PersistentFlags().StringVarP(&evalExpression, "expr", "e", "", "Expression to eval") - - return cmd -} - -func (cli *cliHubTest) NewExplainCmd() *cobra.Command { - cmd := &cobra.Command{ - Use: "explain", - Short: "explain [test_name]", - Args: cobra.ExactArgs(1), - DisableAutoGenTag: true, - RunE: func(_ *cobra.Command, args []string) error { - for _, testName := range args { - test, err := HubTest.LoadTestItem(testName) - if err != nil { - return fmt.Errorf("can't load test: %+v", err) - } - err = test.ParserAssert.LoadTest(test.ParserResultFile) - if err != nil { - if err = test.Run(); err != nil { - return fmt.Errorf("running test '%s' failed: %+v", test.Name, err) - } - - if err = test.ParserAssert.LoadTest(test.ParserResultFile); err != nil { - return fmt.Errorf("unable to load parser result after run: %w", err) - } - } - - err = test.ScenarioAssert.LoadTest(test.ScenarioResultFile, test.BucketPourResultFile) - if err != nil { - if err = test.Run(); err != nil { - return fmt.Errorf("running test '%s' failed: %+v", test.Name, err) - } - - if err = test.ScenarioAssert.LoadTest(test.ScenarioResultFile, test.BucketPourResultFile); err != nil { - return fmt.Errorf("unable to load scenario result after run: %w", err) - } - } - opts := dumps.DumpOpts{} - dumps.DumpTree(*test.ParserAssert.TestData, *test.ScenarioAssert.PourData, opts) - } - - return nil - }, - } - - return cmd -} diff --git a/cmd/crowdsec-cli/idgen/machineid.go b/cmd/crowdsec-cli/idgen/machineid.go new file mode 100644 index 00000000000..4bd356b3abc --- /dev/null +++ b/cmd/crowdsec-cli/idgen/machineid.go @@ -0,0 +1,48 @@ +package idgen + +import ( + "fmt" + "strings" + + "github.com/google/uuid" + log "github.com/sirupsen/logrus" + + "github.com/crowdsecurity/machineid" +) + +// Returns a unique identifier for each crowdsec installation, using an +// identifier of the OS installation where available, otherwise a random +// string. +func generateMachineIDPrefix() (string, error) { + prefix, err := machineid.ID() + if err == nil { + return prefix, nil + } + + log.Debugf("failed to get machine-id with usual files: %s", err) + + bID, err := uuid.NewRandom() + if err == nil { + return bID.String(), nil + } + + return "", fmt.Errorf("generating machine id: %w", err) +} + +// Generate a unique identifier, composed by a prefix and a random suffix. +// The prefix can be provided by a parameter to use in test environments. +func GenerateMachineID(prefix string) (string, error) { + var err error + if prefix == "" { + prefix, err = generateMachineIDPrefix() + } + + if err != nil { + return "", err + } + + prefix = strings.ReplaceAll(prefix, "-", "")[:32] + suffix := GeneratePassword(16) + + return prefix + suffix, nil +} diff --git a/cmd/crowdsec-cli/idgen/password.go b/cmd/crowdsec-cli/idgen/password.go new file mode 100644 index 00000000000..e0faa4daacc --- /dev/null +++ b/cmd/crowdsec-cli/idgen/password.go @@ -0,0 +1,32 @@ +package idgen + +import ( + saferand "crypto/rand" + "math/big" + + log "github.com/sirupsen/logrus" +) + +const PasswordLength = 64 + +func GeneratePassword(length int) string { + upper := "ABCDEFGHIJKLMNOPQRSTUVWXY" + lower := "abcdefghijklmnopqrstuvwxyz" + digits := "0123456789" + + charset := upper + lower + digits + charsetLength := len(charset) + + buf := make([]byte, length) + + for i := range length { + rInt, err := saferand.Int(saferand.Reader, big.NewInt(int64(charsetLength))) + if err != nil { + log.Fatalf("failed getting data from prng for password generation : %s", err) + } + + buf[i] = charset[rInt.Int64()] + } + + return string(buf) +} diff --git a/cmd/crowdsec-cli/machines.go b/cmd/crowdsec-cli/machines.go deleted file mode 100644 index dcdb1963b49..00000000000 --- a/cmd/crowdsec-cli/machines.go +++ /dev/null @@ -1,771 +0,0 @@ -package main - -import ( - saferand "crypto/rand" - "encoding/csv" - "encoding/json" - "errors" - "fmt" - "io" - "math/big" - "os" - "slices" - "strings" - "time" - - "github.com/AlecAivazis/survey/v2" - "github.com/fatih/color" - "github.com/go-openapi/strfmt" - "github.com/google/uuid" - "github.com/jedib0t/go-pretty/v6/table" - log "github.com/sirupsen/logrus" - "github.com/spf13/cobra" - "gopkg.in/yaml.v3" - - "github.com/crowdsecurity/machineid" - - "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/cstable" - "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/require" - "github.com/crowdsecurity/crowdsec/pkg/csconfig" - "github.com/crowdsecurity/crowdsec/pkg/cwhub" - "github.com/crowdsecurity/crowdsec/pkg/database" - "github.com/crowdsecurity/crowdsec/pkg/database/ent" - "github.com/crowdsecurity/crowdsec/pkg/emoji" - "github.com/crowdsecurity/crowdsec/pkg/types" -) - -const passwordLength = 64 - -func generatePassword(length int) string { - upper := "ABCDEFGHIJKLMNOPQRSTUVWXY" - lower := "abcdefghijklmnopqrstuvwxyz" - digits := "0123456789" - - charset := upper + lower + digits - charsetLength := len(charset) - - buf := make([]byte, length) - - for i := range length { - rInt, err := saferand.Int(saferand.Reader, big.NewInt(int64(charsetLength))) - if err != nil { - log.Fatalf("failed getting data from prng for password generation : %s", err) - } - - buf[i] = charset[rInt.Int64()] - } - - return string(buf) -} - -// Returns a unique identifier for each crowdsec installation, using an -// identifier of the OS installation where available, otherwise a random -// string. -func generateIDPrefix() (string, error) { - prefix, err := machineid.ID() - if err == nil { - return prefix, nil - } - - log.Debugf("failed to get machine-id with usual files: %s", err) - - bID, err := uuid.NewRandom() - if err == nil { - return bID.String(), nil - } - - return "", fmt.Errorf("generating machine id: %w", err) -} - -// Generate a unique identifier, composed by a prefix and a random suffix. -// The prefix can be provided by a parameter to use in test environments. -func generateID(prefix string) (string, error) { - var err error - if prefix == "" { - prefix, err = generateIDPrefix() - } - - if err != nil { - return "", err - } - - prefix = strings.ReplaceAll(prefix, "-", "")[:32] - suffix := generatePassword(16) - - return prefix + suffix, nil -} - -// getLastHeartbeat returns the last heartbeat timestamp of a machine -// and a boolean indicating if the machine is considered active or not. -func getLastHeartbeat(m *ent.Machine) (string, bool) { - if m.LastHeartbeat == nil { - return "-", false - } - - elapsed := time.Now().UTC().Sub(*m.LastHeartbeat) - - hb := elapsed.Truncate(time.Second).String() - if elapsed > 2*time.Minute { - return hb, false - } - - return hb, true -} - -type cliMachines struct { - db *database.Client - cfg configGetter -} - -func NewCLIMachines(cfg configGetter) *cliMachines { - return &cliMachines{ - cfg: cfg, - } -} - -func (cli *cliMachines) NewCommand() *cobra.Command { - cmd := &cobra.Command{ - Use: "machines [action]", - Short: "Manage local API machines [requires local API]", - Long: `To list/add/delete/validate/prune machines. -Note: This command requires database direct access, so is intended to be run on the local API machine. -`, - Example: `cscli machines [action]`, - DisableAutoGenTag: true, - Aliases: []string{"machine"}, - PersistentPreRunE: func(cmd *cobra.Command, _ []string) error { - var err error - if err = require.LAPI(cli.cfg()); err != nil { - return err - } - cli.db, err = require.DBClient(cmd.Context(), cli.cfg().DbConfig) - if err != nil { - return err - } - - return nil - }, - } - - cmd.AddCommand(cli.newListCmd()) - cmd.AddCommand(cli.newAddCmd()) - cmd.AddCommand(cli.newDeleteCmd()) - cmd.AddCommand(cli.newValidateCmd()) - cmd.AddCommand(cli.newPruneCmd()) - cmd.AddCommand(cli.newInspectCmd()) - - return cmd -} - -func (cli *cliMachines) inspectHubHuman(out io.Writer, machine *ent.Machine) { - state := machine.Hubstate - - if len(state) == 0 { - fmt.Println("No hub items found for this machine") - return - } - - // group state rows by type for multiple tables - rowsByType := make(map[string][]table.Row) - - for itemType, items := range state { - for _, item := range items { - if _, ok := rowsByType[itemType]; !ok { - rowsByType[itemType] = make([]table.Row, 0) - } - - row := table.Row{item.Name, item.Status, item.Version} - rowsByType[itemType] = append(rowsByType[itemType], row) - } - } - - for itemType, rows := range rowsByType { - t := cstable.New(out, cli.cfg().Cscli.Color).Writer - t.AppendHeader(table.Row{"Name", "Status", "Version"}) - t.SetTitle(itemType) - t.AppendRows(rows) - io.WriteString(out, t.Render() + "\n") - } -} - -func (cli *cliMachines) listHuman(out io.Writer, machines ent.Machines) { - t := cstable.NewLight(out, cli.cfg().Cscli.Color).Writer - t.AppendHeader(table.Row{"Name", "IP Address", "Last Update", "Status", "Version", "OS", "Auth Type", "Last Heartbeat"}) - - for _, m := range machines { - validated := emoji.Prohibited - if m.IsValidated { - validated = emoji.CheckMark - } - - hb, active := getLastHeartbeat(m) - if !active { - hb = emoji.Warning + " " + hb - } - - t.AppendRow(table.Row{m.MachineId, m.IpAddress, m.UpdatedAt.Format(time.RFC3339), validated, m.Version, getOSNameAndVersion(m), m.AuthType, hb}) - } - - io.WriteString(out, t.Render() + "\n") -} - -// machineInfo contains only the data we want for inspect/list: no hub status, scenarios, edges, etc. -type machineInfo struct { - CreatedAt time.Time `json:"created_at,omitempty"` - UpdatedAt time.Time `json:"updated_at,omitempty"` - LastPush *time.Time `json:"last_push,omitempty"` - LastHeartbeat *time.Time `json:"last_heartbeat,omitempty"` - MachineId string `json:"machineId,omitempty"` - IpAddress string `json:"ipAddress,omitempty"` - Version string `json:"version,omitempty"` - IsValidated bool `json:"isValidated,omitempty"` - AuthType string `json:"auth_type"` - OS string `json:"os,omitempty"` - Featureflags []string `json:"featureflags,omitempty"` - Datasources map[string]int64 `json:"datasources,omitempty"` -} - -func newMachineInfo(m *ent.Machine) machineInfo { - return machineInfo{ - CreatedAt: m.CreatedAt, - UpdatedAt: m.UpdatedAt, - LastPush: m.LastPush, - LastHeartbeat: m.LastHeartbeat, - MachineId: m.MachineId, - IpAddress: m.IpAddress, - Version: m.Version, - IsValidated: m.IsValidated, - AuthType: m.AuthType, - OS: getOSNameAndVersion(m), - Featureflags: getFeatureFlagList(m), - Datasources: m.Datasources, - } -} - -func (cli *cliMachines) listCSV(out io.Writer, machines ent.Machines) error { - csvwriter := csv.NewWriter(out) - - err := csvwriter.Write([]string{"machine_id", "ip_address", "updated_at", "validated", "version", "auth_type", "last_heartbeat", "os"}) - if err != nil { - return fmt.Errorf("failed to write header: %w", err) - } - - for _, m := range machines { - validated := "false" - if m.IsValidated { - validated = "true" - } - - hb := "-" - if m.LastHeartbeat != nil { - hb = m.LastHeartbeat.Format(time.RFC3339) - } - - if err := csvwriter.Write([]string{m.MachineId, m.IpAddress, m.UpdatedAt.Format(time.RFC3339), validated, m.Version, m.AuthType, hb, fmt.Sprintf("%s/%s", m.Osname, m.Osversion)}); err != nil { - return fmt.Errorf("failed to write raw output: %w", err) - } - } - - csvwriter.Flush() - - return nil -} - -func (cli *cliMachines) list(out io.Writer) error { - machines, err := cli.db.ListMachines() - if err != nil { - return fmt.Errorf("unable to list machines: %w", err) - } - - switch cli.cfg().Cscli.Output { - case "human": - cli.listHuman(out, machines) - case "json": - info := make([]machineInfo, 0, len(machines)) - for _, m := range machines { - info = append(info, newMachineInfo(m)) - } - - enc := json.NewEncoder(out) - enc.SetIndent("", " ") - - if err := enc.Encode(info); err != nil { - return errors.New("failed to marshal") - } - - return nil - case "raw": - return cli.listCSV(out, machines) - } - - return nil -} - -func (cli *cliMachines) newListCmd() *cobra.Command { - cmd := &cobra.Command{ - Use: "list", - Short: "list all machines in the database", - Long: `list all machines in the database with their status and last heartbeat`, - Example: `cscli machines list`, - Args: cobra.NoArgs, - DisableAutoGenTag: true, - RunE: func(_ *cobra.Command, _ []string) error { - return cli.list(color.Output) - }, - } - - return cmd -} - -func (cli *cliMachines) newAddCmd() *cobra.Command { - var ( - password MachinePassword - dumpFile string - apiURL string - interactive bool - autoAdd bool - force bool - ) - - cmd := &cobra.Command{ - Use: "add", - Short: "add a single machine to the database", - DisableAutoGenTag: true, - Long: `Register a new machine in the database. cscli should be on the same machine as LAPI.`, - Example: `cscli machines add --auto -cscli machines add MyTestMachine --auto -cscli machines add MyTestMachine --password MyPassword -cscli machines add -f- --auto > /tmp/mycreds.yaml`, - RunE: func(_ *cobra.Command, args []string) error { - return cli.add(args, string(password), dumpFile, apiURL, interactive, autoAdd, force) - }, - } - - flags := cmd.Flags() - flags.VarP(&password, "password", "p", "machine password to login to the API") - flags.StringVarP(&dumpFile, "file", "f", "", "output file destination (defaults to "+csconfig.DefaultConfigPath("local_api_credentials.yaml")+")") - flags.StringVarP(&apiURL, "url", "u", "", "URL of the local API") - flags.BoolVarP(&interactive, "interactive", "i", false, "interfactive mode to enter the password") - flags.BoolVarP(&autoAdd, "auto", "a", false, "automatically generate password (and username if not provided)") - flags.BoolVar(&force, "force", false, "will force add the machine if it already exist") - - return cmd -} - -func (cli *cliMachines) add(args []string, machinePassword string, dumpFile string, apiURL string, interactive bool, autoAdd bool, force bool) error { - var ( - err error - machineID string - ) - - // create machineID if not specified by user - if len(args) == 0 { - if !autoAdd { - return errors.New("please specify a machine name to add, or use --auto") - } - - machineID, err = generateID("") - if err != nil { - return fmt.Errorf("unable to generate machine id: %w", err) - } - } else { - machineID = args[0] - } - - clientCfg := cli.cfg().API.Client - serverCfg := cli.cfg().API.Server - - /*check if file already exists*/ - if dumpFile == "" && clientCfg != nil && clientCfg.CredentialsFilePath != "" { - credFile := clientCfg.CredentialsFilePath - // use the default only if the file does not exist - _, err = os.Stat(credFile) - - switch { - case os.IsNotExist(err) || force: - dumpFile = credFile - case err != nil: - return fmt.Errorf("unable to stat '%s': %w", credFile, err) - default: - return fmt.Errorf(`credentials file '%s' already exists: please remove it, use "--force" or specify a different file with "-f" ("-f -" for standard output)`, credFile) - } - } - - if dumpFile == "" { - return errors.New(`please specify a file to dump credentials to, with -f ("-f -" for standard output)`) - } - - // create a password if it's not specified by user - if machinePassword == "" && !interactive { - if !autoAdd { - return errors.New("please specify a password with --password or use --auto") - } - - machinePassword = generatePassword(passwordLength) - } else if machinePassword == "" && interactive { - qs := &survey.Password{ - Message: "Please provide a password for the machine:", - } - survey.AskOne(qs, &machinePassword) - } - - password := strfmt.Password(machinePassword) - - _, err = cli.db.CreateMachine(&machineID, &password, "", true, force, types.PasswordAuthType) - if err != nil { - return fmt.Errorf("unable to create machine: %w", err) - } - - fmt.Fprintf(os.Stderr, "Machine '%s' successfully added to the local API.\n", machineID) - - if apiURL == "" { - if clientCfg != nil && clientCfg.Credentials != nil && clientCfg.Credentials.URL != "" { - apiURL = clientCfg.Credentials.URL - } else if serverCfg.ClientURL() != "" { - apiURL = serverCfg.ClientURL() - } else { - return errors.New("unable to dump an api URL. Please provide it in your configuration or with the -u parameter") - } - } - - apiCfg := csconfig.ApiCredentialsCfg{ - Login: machineID, - Password: password.String(), - URL: apiURL, - } - - apiConfigDump, err := yaml.Marshal(apiCfg) - if err != nil { - return fmt.Errorf("unable to marshal api credentials: %w", err) - } - - if dumpFile != "" && dumpFile != "-" { - if err = os.WriteFile(dumpFile, apiConfigDump, 0o600); err != nil { - return fmt.Errorf("write api credentials in '%s' failed: %w", dumpFile, err) - } - - fmt.Fprintf(os.Stderr, "API credentials written to '%s'.\n", dumpFile) - } else { - fmt.Print(string(apiConfigDump)) - } - - return nil -} - -// validMachineID returns a list of machine IDs for command completion -func (cli *cliMachines) validMachineID(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { - var err error - - cfg := cli.cfg() - - // need to load config and db because PersistentPreRunE is not called for completions - - if err = require.LAPI(cfg); err != nil { - cobra.CompError("unable to list machines " + err.Error()) - return nil, cobra.ShellCompDirectiveNoFileComp - } - - cli.db, err = require.DBClient(cmd.Context(), cfg.DbConfig) - if err != nil { - cobra.CompError("unable to list machines " + err.Error()) - return nil, cobra.ShellCompDirectiveNoFileComp - } - - machines, err := cli.db.ListMachines() - if err != nil { - cobra.CompError("unable to list machines " + err.Error()) - return nil, cobra.ShellCompDirectiveNoFileComp - } - - ret := []string{} - - for _, machine := range machines { - if strings.Contains(machine.MachineId, toComplete) && !slices.Contains(args, machine.MachineId) { - ret = append(ret, machine.MachineId) - } - } - - return ret, cobra.ShellCompDirectiveNoFileComp -} - -func (cli *cliMachines) delete(machines []string, ignoreMissing bool) error { - for _, machineID := range machines { - if err := cli.db.DeleteWatcher(machineID); err != nil { - var notFoundErr *database.MachineNotFoundError - if ignoreMissing && errors.As(err, ¬FoundErr) { - return nil - } - - log.Errorf("unable to delete machine: %s", err) - - return nil - } - - log.Infof("machine '%s' deleted successfully", machineID) - } - - return nil -} - -func (cli *cliMachines) newDeleteCmd() *cobra.Command { - var ignoreMissing bool - - cmd := &cobra.Command{ - Use: "delete [machine_name]...", - Short: "delete machine(s) by name", - Example: `cscli machines delete "machine1" "machine2"`, - Args: cobra.MinimumNArgs(1), - Aliases: []string{"remove"}, - DisableAutoGenTag: true, - ValidArgsFunction: cli.validMachineID, - RunE: func(_ *cobra.Command, args []string) error { - return cli.delete(args, ignoreMissing) - }, - } - - flags := cmd.Flags() - flags.BoolVar(&ignoreMissing, "ignore-missing", false, "don't print errors if one or more machines don't exist") - - return cmd -} - -func (cli *cliMachines) prune(duration time.Duration, notValidOnly bool, force bool) error { - if duration < 2*time.Minute && !notValidOnly { - if yes, err := askYesNo( - "The duration you provided is less than 2 minutes. "+ - "This can break installations if the machines are only temporarily disconnected. Continue?", false); err != nil { - return err - } else if !yes { - fmt.Println("User aborted prune. No changes were made.") - return nil - } - } - - machines := []*ent.Machine{} - if pending, err := cli.db.QueryPendingMachine(); err == nil { - machines = append(machines, pending...) - } - - if !notValidOnly { - if pending, err := cli.db.QueryMachinesInactiveSince(time.Now().UTC().Add(-duration)); err == nil { - machines = append(machines, pending...) - } - } - - if len(machines) == 0 { - fmt.Println("No machines to prune.") - return nil - } - - cli.listHuman(color.Output, machines) - - if !force { - if yes, err := askYesNo( - "You are about to PERMANENTLY remove the above machines from the database. "+ - "These will NOT be recoverable. Continue?", false); err != nil { - return err - } else if !yes { - fmt.Println("User aborted prune. No changes were made.") - return nil - } - } - - deleted, err := cli.db.BulkDeleteWatchers(machines) - if err != nil { - return fmt.Errorf("unable to prune machines: %w", err) - } - - fmt.Fprintf(os.Stderr, "successfully deleted %d machines\n", deleted) - - return nil -} - -func (cli *cliMachines) newPruneCmd() *cobra.Command { - var ( - duration time.Duration - notValidOnly bool - force bool - ) - - const defaultDuration = 10 * time.Minute - - cmd := &cobra.Command{ - Use: "prune", - Short: "prune multiple machines from the database", - Long: `prune multiple machines that are not validated or have not connected to the local API in a given duration.`, - Example: `cscli machines prune -cscli machines prune --duration 1h -cscli machines prune --not-validated-only --force`, - Args: cobra.NoArgs, - DisableAutoGenTag: true, - RunE: func(_ *cobra.Command, _ []string) error { - return cli.prune(duration, notValidOnly, force) - }, - } - - flags := cmd.Flags() - flags.DurationVarP(&duration, "duration", "d", defaultDuration, "duration of time since validated machine last heartbeat") - flags.BoolVar(¬ValidOnly, "not-validated-only", false, "only prune machines that are not validated") - flags.BoolVar(&force, "force", false, "force prune without asking for confirmation") - - return cmd -} - -func (cli *cliMachines) validate(machineID string) error { - if err := cli.db.ValidateMachine(machineID); err != nil { - return fmt.Errorf("unable to validate machine '%s': %w", machineID, err) - } - - log.Infof("machine '%s' validated successfully", machineID) - - return nil -} - -func (cli *cliMachines) newValidateCmd() *cobra.Command { - cmd := &cobra.Command{ - Use: "validate", - Short: "validate a machine to access the local API", - Long: `validate a machine to access the local API.`, - Example: `cscli machines validate "machine_name"`, - Args: cobra.ExactArgs(1), - DisableAutoGenTag: true, - RunE: func(_ *cobra.Command, args []string) error { - return cli.validate(args[0]) - }, - } - - return cmd -} - -func (cli *cliMachines) inspectHuman(out io.Writer, machine *ent.Machine) { - t := cstable.New(out, cli.cfg().Cscli.Color).Writer - - t.SetTitle("Machine: " + machine.MachineId) - - t.SetColumnConfigs([]table.ColumnConfig{ - {Number: 1, AutoMerge: true}, - }) - - t.AppendRows([]table.Row{ - {"IP Address", machine.IpAddress}, - {"Created At", machine.CreatedAt}, - {"Last Update", machine.UpdatedAt}, - {"Last Heartbeat", machine.LastHeartbeat}, - {"Validated?", machine.IsValidated}, - {"CrowdSec version", machine.Version}, - {"OS", getOSNameAndVersion(machine)}, - {"Auth type", machine.AuthType}, - }) - - for dsName, dsCount := range machine.Datasources { - t.AppendRow(table.Row{"Datasources", fmt.Sprintf("%s: %d", dsName, dsCount)}) - } - - for _, ff := range getFeatureFlagList(machine) { - t.AppendRow(table.Row{"Feature Flags", ff}) - } - - for _, coll := range machine.Hubstate[cwhub.COLLECTIONS] { - t.AppendRow(table.Row{"Collections", coll.Name}) - } - - io.WriteString(out, t.Render() + "\n") -} - -func (cli *cliMachines) inspect(machine *ent.Machine) error { - out := color.Output - outputFormat := cli.cfg().Cscli.Output - - switch outputFormat { - case "human": - cli.inspectHuman(out, machine) - case "json": - enc := json.NewEncoder(out) - enc.SetIndent("", " ") - - if err := enc.Encode(newMachineInfo(machine)); err != nil { - return errors.New("failed to marshal") - } - - return nil - default: - return fmt.Errorf("output format '%s' not supported for this command", outputFormat) - } - - return nil -} - -func (cli *cliMachines) inspectHub(machine *ent.Machine) error { - out := color.Output - - switch cli.cfg().Cscli.Output { - case "human": - cli.inspectHubHuman(out, machine) - case "json": - enc := json.NewEncoder(out) - enc.SetIndent("", " ") - - if err := enc.Encode(machine.Hubstate); err != nil { - return errors.New("failed to marshal") - } - - return nil - case "raw": - csvwriter := csv.NewWriter(out) - - err := csvwriter.Write([]string{"type", "name", "status", "version"}) - if err != nil { - return fmt.Errorf("failed to write header: %w", err) - } - - rows := make([][]string, 0) - - for itemType, items := range machine.Hubstate { - for _, item := range items { - rows = append(rows, []string{itemType, item.Name, item.Status, item.Version}) - } - } - - for _, row := range rows { - if err := csvwriter.Write(row); err != nil { - return fmt.Errorf("failed to write raw output: %w", err) - } - } - - csvwriter.Flush() - } - - return nil -} - -func (cli *cliMachines) newInspectCmd() *cobra.Command { - var showHub bool - - cmd := &cobra.Command{ - Use: "inspect [machine_name]", - Short: "inspect a machine by name", - Example: `cscli machines inspect "machine1"`, - Args: cobra.ExactArgs(1), - DisableAutoGenTag: true, - ValidArgsFunction: cli.validMachineID, - RunE: func(_ *cobra.Command, args []string) error { - machineID := args[0] - machine, err := cli.db.QueryMachineByID(machineID) - if err != nil { - return fmt.Errorf("unable to read machine data '%s': %w", machineID, err) - } - - if showHub { - return cli.inspectHub(machine) - } - - return cli.inspect(machine) - }, - } - - flags := cmd.Flags() - - flags.BoolVarP(&showHub, "hub", "H", false, "show hub state") - - return cmd -} diff --git a/cmd/crowdsec-cli/main.go b/cmd/crowdsec-cli/main.go index d4046414030..1cca03b1d3d 100644 --- a/cmd/crowdsec-cli/main.go +++ b/cmd/crowdsec-cli/main.go @@ -14,8 +14,22 @@ import ( "github.com/crowdsecurity/go-cs-lib/trace" + "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/clialert" + "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/clibouncer" + "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/clicapi" + "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/cliconsole" + "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/clidecision" + "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/cliexplain" + "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/clihub" + "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/clihubtest" + "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/cliitem" + "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/clilapi" + "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/climachine" "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/climetrics" - + "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/clinotifications" + "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/clipapi" + "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/clisimulation" + "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/clisupport" "github.com/crowdsecurity/crowdsec/pkg/csconfig" "github.com/crowdsecurity/crowdsec/pkg/fflag" ) @@ -152,14 +166,6 @@ func (cli *cliRoot) initialize() error { return nil } -// list of valid subcommands for the shell completion -var validArgs = []string{ - "alerts", "appsec-configs", "appsec-rules", "bouncers", "capi", "collections", - "completion", "config", "console", "contexts", "dashboard", "decisions", "explain", - "hub", "hubtest", "lapi", "machines", "metrics", "notifications", "parsers", - "postoverflows", "scenarios", "simulation", "support", "version", -} - func (cli *cliRoot) colorize(cmd *cobra.Command) { cc.Init(&cc.Config{ RootCmd: cmd, @@ -191,6 +197,14 @@ func (cli *cliRoot) NewCommand() (*cobra.Command, error) { return nil, fmt.Errorf("failed to set feature flags from env: %w", err) } + // list of valid subcommands for the shell completion + validArgs := []string{ + "alerts", "appsec-configs", "appsec-rules", "bouncers", "capi", "collections", + "completion", "config", "console", "contexts", "dashboard", "decisions", "explain", + "hub", "hubtest", "lapi", "machines", "metrics", "notifications", "parsers", + "postoverflows", "scenarios", "simulation", "support", "version", + } + cmd := &cobra.Command{ Use: "cscli", Short: "cscli allows you to manage crowdsec", @@ -238,6 +252,36 @@ It is meant to allow you to manage bans, parsers/scenarios/etc, api and generall return nil, err } + cmd.AddCommand(NewCLIDoc().NewCommand(cmd)) + cmd.AddCommand(NewCLIVersion().NewCommand()) + cmd.AddCommand(NewCLIConfig(cli.cfg).NewCommand()) + cmd.AddCommand(clihub.New(cli.cfg).NewCommand()) + cmd.AddCommand(climetrics.New(cli.cfg).NewCommand()) + cmd.AddCommand(NewCLIDashboard(cli.cfg).NewCommand()) + cmd.AddCommand(clidecision.New(cli.cfg).NewCommand()) + cmd.AddCommand(clialert.New(cli.cfg).NewCommand()) + cmd.AddCommand(clisimulation.New(cli.cfg).NewCommand()) + cmd.AddCommand(clibouncer.New(cli.cfg).NewCommand()) + cmd.AddCommand(climachine.New(cli.cfg).NewCommand()) + cmd.AddCommand(clicapi.New(cli.cfg).NewCommand()) + cmd.AddCommand(clilapi.New(cli.cfg).NewCommand()) + cmd.AddCommand(NewCompletionCmd()) + cmd.AddCommand(cliconsole.New(cli.cfg).NewCommand()) + cmd.AddCommand(cliexplain.New(cli.cfg, ConfigFilePath).NewCommand()) + cmd.AddCommand(clihubtest.New(cli.cfg).NewCommand()) + cmd.AddCommand(clinotifications.New(cli.cfg).NewCommand()) + cmd.AddCommand(clisupport.New(cli.cfg).NewCommand()) + cmd.AddCommand(clipapi.New(cli.cfg).NewCommand()) + cmd.AddCommand(cliitem.NewCollection(cli.cfg).NewCommand()) + cmd.AddCommand(cliitem.NewParser(cli.cfg).NewCommand()) + cmd.AddCommand(cliitem.NewScenario(cli.cfg).NewCommand()) + cmd.AddCommand(cliitem.NewPostOverflow(cli.cfg).NewCommand()) + cmd.AddCommand(cliitem.NewContext(cli.cfg).NewCommand()) + cmd.AddCommand(cliitem.NewAppsecConfig(cli.cfg).NewCommand()) + cmd.AddCommand(cliitem.NewAppsecRule(cli.cfg).NewCommand()) + + cli.addSetup(cmd) + if len(os.Args) > 1 { cobra.OnInitialize( func() { @@ -248,38 +292,6 @@ It is meant to allow you to manage bans, parsers/scenarios/etc, api and generall ) } - cmd.AddCommand(NewCLIDoc().NewCommand(cmd)) - cmd.AddCommand(NewCLIVersion().NewCommand()) - cmd.AddCommand(NewCLIConfig(cli.cfg).NewCommand()) - cmd.AddCommand(NewCLIHub(cli.cfg).NewCommand()) - cmd.AddCommand(climetrics.New(cli.cfg).NewCommand()) - cmd.AddCommand(NewCLIDashboard(cli.cfg).NewCommand()) - cmd.AddCommand(NewCLIDecisions(cli.cfg).NewCommand()) - cmd.AddCommand(NewCLIAlerts(cli.cfg).NewCommand()) - cmd.AddCommand(NewCLISimulation(cli.cfg).NewCommand()) - cmd.AddCommand(NewCLIBouncers(cli.cfg).NewCommand()) - cmd.AddCommand(NewCLIMachines(cli.cfg).NewCommand()) - cmd.AddCommand(NewCLICapi(cli.cfg).NewCommand()) - cmd.AddCommand(NewCLILapi(cli.cfg).NewCommand()) - cmd.AddCommand(NewCompletionCmd()) - cmd.AddCommand(NewCLIConsole(cli.cfg).NewCommand()) - cmd.AddCommand(NewCLIExplain(cli.cfg).NewCommand()) - cmd.AddCommand(NewCLIHubTest(cli.cfg).NewCommand()) - cmd.AddCommand(NewCLINotifications(cli.cfg).NewCommand()) - cmd.AddCommand(NewCLISupport(cli.cfg).NewCommand()) - cmd.AddCommand(NewCLIPapi(cli.cfg).NewCommand()) - cmd.AddCommand(NewCLICollection(cli.cfg).NewCommand()) - cmd.AddCommand(NewCLIParser(cli.cfg).NewCommand()) - cmd.AddCommand(NewCLIScenario(cli.cfg).NewCommand()) - cmd.AddCommand(NewCLIPostOverflow(cli.cfg).NewCommand()) - cmd.AddCommand(NewCLIContext(cli.cfg).NewCommand()) - cmd.AddCommand(NewCLIAppsecConfig(cli.cfg).NewCommand()) - cmd.AddCommand(NewCLIAppsecRule(cli.cfg).NewCommand()) - - if fflag.CscliSetup.IsEnabled() { - cmd.AddCommand(NewCLISetup(cli.cfg).NewCommand()) - } - return cmd, nil } diff --git a/cmd/crowdsec-cli/messages.go b/cmd/crowdsec-cli/messages.go deleted file mode 100644 index 02f051601e4..00000000000 --- a/cmd/crowdsec-cli/messages.go +++ /dev/null @@ -1,23 +0,0 @@ -package main - -import ( - "fmt" - "runtime" -) - -// ReloadMessage returns a description of the task required to reload -// the crowdsec configuration, according to the operating system. -func ReloadMessage() string { - var msg string - - switch runtime.GOOS { - case "windows": - msg = "Please restart the crowdsec service" - case "freebsd": - msg = `Run 'sudo service crowdsec reload'` - default: - msg = `Run 'sudo systemctl reload crowdsec'` - } - - return fmt.Sprintf("%s for the new configuration to be effective.", msg) -} diff --git a/cmd/crowdsec-cli/papi.go b/cmd/crowdsec-cli/papi.go deleted file mode 100644 index a2fa0a90871..00000000000 --- a/cmd/crowdsec-cli/papi.go +++ /dev/null @@ -1,148 +0,0 @@ -package main - -import ( - "fmt" - "time" - - log "github.com/sirupsen/logrus" - "github.com/spf13/cobra" - "gopkg.in/tomb.v2" - - "github.com/crowdsecurity/go-cs-lib/ptr" - - "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/require" - "github.com/crowdsecurity/crowdsec/pkg/apiserver" -) - -type cliPapi struct { - cfg configGetter -} - -func NewCLIPapi(cfg configGetter) *cliPapi { - return &cliPapi{ - cfg: cfg, - } -} - -func (cli *cliPapi) NewCommand() *cobra.Command { - cmd := &cobra.Command{ - Use: "papi [action]", - Short: "Manage interaction with Polling API (PAPI)", - Args: cobra.MinimumNArgs(1), - DisableAutoGenTag: true, - PersistentPreRunE: func(_ *cobra.Command, _ []string) error { - cfg := cli.cfg() - if err := require.LAPI(cfg); err != nil { - return err - } - if err := require.CAPI(cfg); err != nil { - return err - } - - return require.PAPI(cfg) - }, - } - - cmd.AddCommand(cli.NewStatusCmd()) - cmd.AddCommand(cli.NewSyncCmd()) - - return cmd -} - -func (cli *cliPapi) NewStatusCmd() *cobra.Command { - cmd := &cobra.Command{ - Use: "status", - Short: "Get status of the Polling API", - Args: cobra.MinimumNArgs(0), - DisableAutoGenTag: true, - RunE: func(cmd *cobra.Command, _ []string) error { - var err error - cfg := cli.cfg() - db, err := require.DBClient(cmd.Context(), cfg.DbConfig) - if err != nil { - return err - } - - apic, err := apiserver.NewAPIC(cfg.API.Server.OnlineClient, db, cfg.API.Server.ConsoleConfig, cfg.API.Server.CapiWhitelists) - if err != nil { - return fmt.Errorf("unable to initialize API client: %w", err) - } - - papi, err := apiserver.NewPAPI(apic, db, cfg.API.Server.ConsoleConfig, log.GetLevel()) - if err != nil { - return fmt.Errorf("unable to initialize PAPI client: %w", err) - } - - perms, err := papi.GetPermissions() - if err != nil { - return fmt.Errorf("unable to get PAPI permissions: %w", err) - } - var lastTimestampStr *string - lastTimestampStr, err = db.GetConfigItem(apiserver.PapiPullKey) - if err != nil { - lastTimestampStr = ptr.Of("never") - } - log.Infof("You can successfully interact with Polling API (PAPI)") - log.Infof("Console plan: %s", perms.Plan) - log.Infof("Last order received: %s", *lastTimestampStr) - - log.Infof("PAPI subscriptions:") - for _, sub := range perms.Categories { - log.Infof(" - %s", sub) - } - - return nil - }, - } - - return cmd -} - -func (cli *cliPapi) NewSyncCmd() *cobra.Command { - cmd := &cobra.Command{ - Use: "sync", - Short: "Sync with the Polling API, pulling all non-expired orders for the instance", - Args: cobra.MinimumNArgs(0), - DisableAutoGenTag: true, - RunE: func(cmd *cobra.Command, _ []string) error { - var err error - cfg := cli.cfg() - t := tomb.Tomb{} - - db, err := require.DBClient(cmd.Context(), cfg.DbConfig) - if err != nil { - return err - } - - apic, err := apiserver.NewAPIC(cfg.API.Server.OnlineClient, db, cfg.API.Server.ConsoleConfig, cfg.API.Server.CapiWhitelists) - if err != nil { - return fmt.Errorf("unable to initialize API client: %w", err) - } - - t.Go(apic.Push) - - papi, err := apiserver.NewPAPI(apic, db, cfg.API.Server.ConsoleConfig, log.GetLevel()) - if err != nil { - return fmt.Errorf("unable to initialize PAPI client: %w", err) - } - - t.Go(papi.SyncDecisions) - - err = papi.PullOnce(time.Time{}, true) - if err != nil { - return fmt.Errorf("unable to sync decisions: %w", err) - } - - log.Infof("Sending acknowledgements to CAPI") - - apic.Shutdown() - papi.Shutdown() - t.Wait() - time.Sleep(5 * time.Second) // FIXME: the push done by apic.Push is run inside a sub goroutine, sleep to make sure it's done - - return nil - }, - } - - return cmd -} diff --git a/cmd/crowdsec-cli/reload/reload.go b/cmd/crowdsec-cli/reload/reload.go new file mode 100644 index 00000000000..fe03af1ea79 --- /dev/null +++ b/cmd/crowdsec-cli/reload/reload.go @@ -0,0 +1,6 @@ +//go:build !windows && !freebsd && !linux + +package reload + +// generic message since we don't know the platform +const Message = "Please reload the crowdsec process for the new configuration to be effective." diff --git a/cmd/crowdsec-cli/reload/reload_freebsd.go b/cmd/crowdsec-cli/reload/reload_freebsd.go new file mode 100644 index 00000000000..0dac99f2315 --- /dev/null +++ b/cmd/crowdsec-cli/reload/reload_freebsd.go @@ -0,0 +1,4 @@ +package reload + +// actually sudo is not that popular on freebsd, but this will do +const Message = "Run 'sudo service crowdsec reload' for the new configuration to be effective." diff --git a/cmd/crowdsec-cli/reload/reload_linux.go b/cmd/crowdsec-cli/reload/reload_linux.go new file mode 100644 index 00000000000..fbe16e5f168 --- /dev/null +++ b/cmd/crowdsec-cli/reload/reload_linux.go @@ -0,0 +1,4 @@ +package reload + +// assume systemd, although gentoo and others may differ +const Message = "Run 'sudo systemctl reload crowdsec' for the new configuration to be effective." diff --git a/cmd/crowdsec-cli/reload/reload_windows.go b/cmd/crowdsec-cli/reload/reload_windows.go new file mode 100644 index 00000000000..88642425ae2 --- /dev/null +++ b/cmd/crowdsec-cli/reload/reload_windows.go @@ -0,0 +1,3 @@ +package reload + +const Message = "Please restart the crowdsec service for the new configuration to be effective." diff --git a/cmd/crowdsec-cli/require/require.go b/cmd/crowdsec-cli/require/require.go index 15d8bce682d..191eee55bc5 100644 --- a/cmd/crowdsec-cli/require/require.go +++ b/cmd/crowdsec-cli/require/require.go @@ -34,6 +34,14 @@ func CAPI(c *csconfig.Config) error { } func PAPI(c *csconfig.Config) error { + if err := CAPI(c); err != nil { + return err + } + + if err := CAPIRegistered(c); err != nil { + return err + } + if c.API.Server.OnlineClient.Credentials.PapiURL == "" { return errors.New("no PAPI URL in configuration") } diff --git a/cmd/crowdsec-cli/setup.go b/cmd/crowdsec-cli/setup.go index d747af9225f..66c0d71e777 100644 --- a/cmd/crowdsec-cli/setup.go +++ b/cmd/crowdsec-cli/setup.go @@ -1,304 +1,18 @@ +//go:build !no_cscli_setup package main import ( - "bytes" - "context" - "errors" - "fmt" - "os" - "os/exec" - - goccyyaml "github.com/goccy/go-yaml" - log "github.com/sirupsen/logrus" "github.com/spf13/cobra" - "gopkg.in/yaml.v3" - "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/require" - "github.com/crowdsecurity/crowdsec/pkg/csconfig" - "github.com/crowdsecurity/crowdsec/pkg/setup" + "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/clisetup" + "github.com/crowdsecurity/crowdsec/pkg/cwversion/component" + "github.com/crowdsecurity/crowdsec/pkg/fflag" ) -type cliSetup struct { - cfg configGetter -} - -func NewCLISetup(cfg configGetter) *cliSetup { - return &cliSetup{ - cfg: cfg, - } -} - -func (cli *cliSetup) NewCommand() *cobra.Command { - cmd := &cobra.Command{ - Use: "setup", - Short: "Tools to configure crowdsec", - Long: "Manage hub configuration and service detection", - Args: cobra.MinimumNArgs(0), - DisableAutoGenTag: true, - } - - cmd.AddCommand(cli.NewDetectCmd()) - cmd.AddCommand(cli.NewInstallHubCmd()) - cmd.AddCommand(cli.NewDataSourcesCmd()) - cmd.AddCommand(cli.NewValidateCmd()) - - return cmd -} - -type detectFlags struct { - detectConfigFile string - listSupportedServices bool - forcedUnits []string - forcedProcesses []string - forcedOSFamily string - forcedOSID string - forcedOSVersion string - skipServices []string - snubSystemd bool - outYaml bool -} - -func (f *detectFlags) bind(cmd *cobra.Command) { - defaultServiceDetect := csconfig.DefaultConfigPath("hub", "detect.yaml") - - flags := cmd.Flags() - flags.StringVar(&f.detectConfigFile, "detect-config", defaultServiceDetect, "path to service detection configuration") - flags.BoolVar(&f.listSupportedServices, "list-supported-services", false, "do not detect; only print supported services") - flags.StringSliceVar(&f.forcedUnits, "force-unit", nil, "force detection of a systemd unit (can be repeated)") - flags.StringSliceVar(&f.forcedProcesses, "force-process", nil, "force detection of a running process (can be repeated)") - flags.StringSliceVar(&f.skipServices, "skip-service", nil, "ignore a service, don't recommend hub/datasources (can be repeated)") - flags.StringVar(&f.forcedOSFamily, "force-os-family", "", "override OS.Family: one of linux, freebsd, windows or darwin") - flags.StringVar(&f.forcedOSID, "force-os-id", "", "override OS.ID=[debian | ubuntu | , redhat...]") - flags.StringVar(&f.forcedOSVersion, "force-os-version", "", "override OS.RawVersion (of OS or Linux distribution)") - flags.BoolVar(&f.snubSystemd, "snub-systemd", false, "don't use systemd, even if available") - flags.BoolVar(&f.outYaml, "yaml", false, "output yaml, not json") -} - -func (cli *cliSetup) NewDetectCmd() *cobra.Command { - f := detectFlags{} - - cmd := &cobra.Command{ - Use: "detect", - Short: "detect running services, generate a setup file", - DisableAutoGenTag: true, - RunE: func(_ *cobra.Command, args []string) error { - return cli.detect(f) - }, - } - - f.bind(cmd) - return cmd -} - -func (cli *cliSetup) NewInstallHubCmd() *cobra.Command { - var dryRun bool - - cmd := &cobra.Command{ - Use: "install-hub [setup_file] [flags]", - Short: "install items from a setup file", - Args: cobra.ExactArgs(1), - DisableAutoGenTag: true, - RunE: func(cmd *cobra.Command, args []string) error { - return cli.install(cmd.Context(), dryRun, args[0]) - }, - } - - flags := cmd.Flags() - flags.BoolVar(&dryRun, "dry-run", false, "don't install anything; print out what would have been") - - return cmd -} - -func (cli *cliSetup) NewDataSourcesCmd() *cobra.Command { - var toDir string - - cmd := &cobra.Command{ - Use: "datasources [setup_file] [flags]", - Short: "generate datasource (acquisition) configuration from a setup file", - Args: cobra.ExactArgs(1), - DisableAutoGenTag: true, - RunE: func(cmd *cobra.Command, args []string) error { - return cli.dataSources(args[0], toDir) - }, - } - - flags := cmd.Flags() - flags.StringVar(&toDir, "to-dir", "", "write the configuration to a directory, in multiple files") - - return cmd -} - -func (cli *cliSetup) NewValidateCmd() *cobra.Command { - cmd := &cobra.Command{ - Use: "validate [setup_file]", - Short: "validate a setup file", - Args: cobra.ExactArgs(1), - DisableAutoGenTag: true, - RunE: func(cmd *cobra.Command, args []string) error { - return cli.validate(args[0]) - }, - } - - return cmd -} - -func (cli *cliSetup) detect(f detectFlags) error { - var ( - detectReader *os.File - err error - ) - - switch f.detectConfigFile { - case "-": - log.Tracef("Reading detection rules from stdin") - - detectReader = os.Stdin - default: - log.Tracef("Reading detection rules: %s", f.detectConfigFile) - - detectReader, err = os.Open(f.detectConfigFile) - if err != nil { - return err - } - } - - if !f.snubSystemd { - _, err := exec.LookPath("systemctl") - if err != nil { - log.Debug("systemctl not available: snubbing systemd") - - f.snubSystemd = true - } - } - - if f.forcedOSFamily == "" && f.forcedOSID != "" { - log.Debug("force-os-id is set: force-os-family defaults to 'linux'") - - f.forcedOSFamily = "linux" - } - - if f.listSupportedServices { - supported, err := setup.ListSupported(detectReader) - if err != nil { - return err - } - - for _, svc := range supported { - fmt.Println(svc) - } - - return nil - } - - opts := setup.DetectOptions{ - ForcedUnits: f.forcedUnits, - ForcedProcesses: f.forcedProcesses, - ForcedOS: setup.ExprOS{ - Family: f.forcedOSFamily, - ID: f.forcedOSID, - RawVersion: f.forcedOSVersion, - }, - SkipServices: f.skipServices, - SnubSystemd: f.snubSystemd, - } - - hubSetup, err := setup.Detect(detectReader, opts) - if err != nil { - return fmt.Errorf("detecting services: %w", err) - } - - setup, err := setupAsString(hubSetup, f.outYaml) - if err != nil { - return err - } - - fmt.Println(setup) - - return nil -} - -func setupAsString(cs setup.Setup, outYaml bool) (string, error) { - var ( - ret []byte - err error - ) - - wrap := func(err error) error { - return fmt.Errorf("while marshaling setup: %w", err) - } - - indentLevel := 2 - buf := &bytes.Buffer{} - enc := yaml.NewEncoder(buf) - enc.SetIndent(indentLevel) - - if err = enc.Encode(cs); err != nil { - return "", wrap(err) - } - - if err = enc.Close(); err != nil { - return "", wrap(err) - } - - ret = buf.Bytes() - - if !outYaml { - // take a general approach to output json, so we avoid the - // double tags in the structures and can use go-yaml features - // missing from the json package - ret, err = goccyyaml.YAMLToJSON(ret) - if err != nil { - return "", wrap(err) - } - } - - return string(ret), nil -} - -func (cli *cliSetup) dataSources(fromFile string, toDir string) error { - input, err := os.ReadFile(fromFile) - if err != nil { - return fmt.Errorf("while reading setup file: %w", err) - } - - output, err := setup.DataSources(input, toDir) - if err != nil { - return err - } - - if toDir == "" { - fmt.Println(output) - } - - return nil -} - -func (cli *cliSetup) install(ctx context.Context, dryRun bool, fromFile string) error { - input, err := os.ReadFile(fromFile) - if err != nil { - return fmt.Errorf("while reading file %s: %w", fromFile, err) - } - - cfg := cli.cfg() - - hub, err := require.Hub(cfg, require.RemoteHub(ctx, cfg), log.StandardLogger()) - if err != nil { - return err - } - - return setup.InstallHubItems(ctx, hub, input, dryRun) -} - -func (cli *cliSetup) validate(fromFile string) error { - input, err := os.ReadFile(fromFile) - if err != nil { - return fmt.Errorf("while reading stdin: %w", err) - } - - if err = setup.Validate(input); err != nil { - fmt.Printf("%v\n", err) - return errors.New("invalid setup file") +func (cli *cliRoot) addSetup(cmd *cobra.Command) { + if fflag.CscliSetup.IsEnabled() { + cmd.AddCommand(clisetup.New(cli.cfg).NewCommand()) } - return nil + component.Register("cscli_setup") } diff --git a/cmd/crowdsec-cli/setup_stub.go b/cmd/crowdsec-cli/setup_stub.go new file mode 100644 index 00000000000..e001f93c797 --- /dev/null +++ b/cmd/crowdsec-cli/setup_stub.go @@ -0,0 +1,9 @@ +//go:build no_cscli_setup +package main + +import ( + "github.com/spf13/cobra" +) + +func (cli *cliRoot) addSetup(_ *cobra.Command) { +} diff --git a/cmd/crowdsec-cli/utils.go b/cmd/crowdsec-cli/utils.go deleted file mode 100644 index f6c32094958..00000000000 --- a/cmd/crowdsec-cli/utils.go +++ /dev/null @@ -1,63 +0,0 @@ -package main - -import ( - "fmt" - "net" - "strings" - - "github.com/crowdsecurity/crowdsec/pkg/types" -) - -func manageCliDecisionAlerts(ip *string, ipRange *string, scope *string, value *string) error { - /*if a range is provided, change the scope*/ - if *ipRange != "" { - _, _, err := net.ParseCIDR(*ipRange) - if err != nil { - return fmt.Errorf("%s isn't a valid range", *ipRange) - } - } - - if *ip != "" { - ipRepr := net.ParseIP(*ip) - if ipRepr == nil { - return fmt.Errorf("%s isn't a valid ip", *ip) - } - } - - // avoid confusion on scope (ip vs Ip and range vs Range) - switch strings.ToLower(*scope) { - case "ip": - *scope = types.Ip - case "range": - *scope = types.Range - case "country": - *scope = types.Country - case "as": - *scope = types.AS - } - - return nil -} - -func removeFromSlice(val string, slice []string) []string { - var i int - var value string - - valueFound := false - - // get the index - for i, value = range slice { - if value == val { - valueFound = true - break - } - } - - if valueFound { - slice[i] = slice[len(slice)-1] - slice[len(slice)-1] = "" - slice = slice[:len(slice)-1] - } - - return slice -} diff --git a/cmd/crowdsec/api.go b/cmd/crowdsec/api.go index c57b8d87cff..ccb0acf0209 100644 --- a/cmd/crowdsec/api.go +++ b/cmd/crowdsec/api.go @@ -1,6 +1,7 @@ package main import ( + "context" "errors" "fmt" "runtime" @@ -14,12 +15,12 @@ import ( "github.com/crowdsecurity/crowdsec/pkg/csconfig" ) -func initAPIServer(cConfig *csconfig.Config) (*apiserver.APIServer, error) { +func initAPIServer(ctx context.Context, cConfig *csconfig.Config) (*apiserver.APIServer, error) { if cConfig.API.Server.OnlineClient == nil || cConfig.API.Server.OnlineClient.Credentials == nil { log.Info("push and pull to Central API disabled") } - apiServer, err := apiserver.NewServer(cConfig.API.Server) + apiServer, err := apiserver.NewServer(ctx, cConfig.API.Server) if err != nil { return nil, fmt.Errorf("unable to run local API: %w", err) } @@ -39,7 +40,7 @@ func initAPIServer(cConfig *csconfig.Config) (*apiserver.APIServer, error) { return nil, errors.New("plugins are enabled, but config_paths.plugin_dir is not defined") } - err = pluginBroker.Init(cConfig.PluginConfig, cConfig.API.Server.Profiles, cConfig.ConfigPaths) + err = pluginBroker.Init(ctx, cConfig.PluginConfig, cConfig.API.Server.Profiles, cConfig.ConfigPaths) if err != nil { return nil, fmt.Errorf("unable to run plugin broker: %w", err) } @@ -58,11 +59,14 @@ func initAPIServer(cConfig *csconfig.Config) (*apiserver.APIServer, error) { func serveAPIServer(apiServer *apiserver.APIServer) { apiReady := make(chan bool, 1) + apiTomb.Go(func() error { defer trace.CatchPanic("crowdsec/serveAPIServer") + go func() { defer trace.CatchPanic("crowdsec/runAPIServer") log.Debugf("serving API after %s ms", time.Since(crowdsecT0)) + if err := apiServer.Run(apiReady); err != nil { log.Fatal(err) } @@ -76,6 +80,7 @@ func serveAPIServer(apiServer *apiserver.APIServer) { <-apiTomb.Dying() // lock until go routine is dying pluginTomb.Kill(nil) log.Infof("serve: shutting down api server") + return apiServer.Shutdown() }) <-apiReady @@ -87,5 +92,6 @@ func hasPlugins(profiles []*csconfig.ProfileCfg) bool { return true } } + return false } diff --git a/cmd/crowdsec/appsec.go b/cmd/crowdsec/appsec.go new file mode 100644 index 00000000000..cb02b137dcd --- /dev/null +++ b/cmd/crowdsec/appsec.go @@ -0,0 +1,18 @@ +// +build !no_datasource_appsec + +package main + +import ( + "fmt" + + "github.com/crowdsecurity/crowdsec/pkg/appsec" + "github.com/crowdsecurity/crowdsec/pkg/cwhub" +) + +func LoadAppsecRules(hub *cwhub.Hub) error { + if err := appsec.LoadAppsecRules(hub); err != nil { + return fmt.Errorf("while loading appsec rules: %w", err) + } + + return nil +} diff --git a/cmd/crowdsec/appsec_stub.go b/cmd/crowdsec/appsec_stub.go new file mode 100644 index 00000000000..4a65b32a9ad --- /dev/null +++ b/cmd/crowdsec/appsec_stub.go @@ -0,0 +1,11 @@ +//go:build no_datasource_appsec + +package main + +import ( + "github.com/crowdsecurity/crowdsec/pkg/cwhub" +) + +func LoadAppsecRules(hub *cwhub.Hub) error { + return nil +} diff --git a/cmd/crowdsec/crowdsec.go b/cmd/crowdsec/crowdsec.go index 5aafc6b0dfe..db93992605d 100644 --- a/cmd/crowdsec/crowdsec.go +++ b/cmd/crowdsec/crowdsec.go @@ -14,7 +14,6 @@ import ( "github.com/crowdsecurity/crowdsec/pkg/acquisition" "github.com/crowdsecurity/crowdsec/pkg/acquisition/configuration" "github.com/crowdsecurity/crowdsec/pkg/alertcontext" - "github.com/crowdsecurity/crowdsec/pkg/appsec" "github.com/crowdsecurity/crowdsec/pkg/csconfig" "github.com/crowdsecurity/crowdsec/pkg/cwhub" "github.com/crowdsecurity/crowdsec/pkg/exprhelpers" @@ -43,12 +42,13 @@ func initCrowdsec(cConfig *csconfig.Config, hub *cwhub.Hub) (*parser.Parsers, [] return nil, nil, fmt.Errorf("while loading parsers: %w", err) } - if err := LoadBuckets(cConfig, hub); err != nil { + if err = LoadBuckets(cConfig, hub); err != nil { return nil, nil, fmt.Errorf("while loading scenarios: %w", err) } - if err := appsec.LoadAppsecRules(hub); err != nil { - return nil, nil, fmt.Errorf("while loading appsec rules: %w", err) + // can be nerfed by a build flag + if err = LoadAppsecRules(hub); err != nil { + return nil, nil, err } datasources, err := LoadAcquisition(cConfig) @@ -82,6 +82,7 @@ func runCrowdsec(cConfig *csconfig.Config, parsers *parser.Parsers, hub *cwhub.H return nil }) } + parserWg.Done() return nil @@ -108,13 +109,14 @@ func runCrowdsec(cConfig *csconfig.Config, parsers *parser.Parsers, hub *cwhub.H return runPour(inputEventChan, holders, buckets, cConfig) }) } + bucketWg.Done() return nil }) bucketWg.Wait() - apiClient, err := AuthenticatedLAPIClient(*cConfig.API.Client.Credentials, hub) + apiClient, err := AuthenticatedLAPIClient(context.TODO(), *cConfig.API.Client.Credentials, hub) if err != nil { return err } @@ -134,6 +136,7 @@ func runCrowdsec(cConfig *csconfig.Config, parsers *parser.Parsers, hub *cwhub.H return runOutput(inputEventChan, outputEventChan, buckets, *parsers.Povfwctx, parsers.Povfwnodes, apiClient) }) } + outputWg.Done() return nil @@ -166,7 +169,7 @@ func runCrowdsec(cConfig *csconfig.Config, parsers *parser.Parsers, hub *cwhub.H log.Info("Starting processing data") - if err := acquisition.StartAcquisition(dataSources, inputLineChan, &acquisTomb); err != nil { + if err := acquisition.StartAcquisition(context.TODO(), dataSources, inputLineChan, &acquisTomb); err != nil { return fmt.Errorf("starting acquisition error: %w", err) } diff --git a/cmd/crowdsec/lapiclient.go b/cmd/crowdsec/lapiclient.go index 6cc0fba9515..6656ba6b4c2 100644 --- a/cmd/crowdsec/lapiclient.go +++ b/cmd/crowdsec/lapiclient.go @@ -11,25 +11,10 @@ import ( "github.com/crowdsecurity/crowdsec/pkg/apiclient" "github.com/crowdsecurity/crowdsec/pkg/csconfig" "github.com/crowdsecurity/crowdsec/pkg/cwhub" - "github.com/crowdsecurity/crowdsec/pkg/cwversion" "github.com/crowdsecurity/crowdsec/pkg/models" ) -func AuthenticatedLAPIClient(credentials csconfig.ApiCredentialsCfg, hub *cwhub.Hub) (*apiclient.ApiClient, error) { - scenarios, err := hub.GetInstalledNamesByType(cwhub.SCENARIOS) - if err != nil { - return nil, fmt.Errorf("loading list of installed hub scenarios: %w", err) - } - - appsecRules, err := hub.GetInstalledNamesByType(cwhub.APPSEC_RULES) - if err != nil { - return nil, fmt.Errorf("loading list of installed hub appsec rules: %w", err) - } - - installedScenariosAndAppsecRules := make([]string, 0, len(scenarios)+len(appsecRules)) - installedScenariosAndAppsecRules = append(installedScenariosAndAppsecRules, scenarios...) - installedScenariosAndAppsecRules = append(installedScenariosAndAppsecRules, appsecRules...) - +func AuthenticatedLAPIClient(ctx context.Context, credentials csconfig.ApiCredentialsCfg, hub *cwhub.Hub) (*apiclient.ApiClient, error) { apiURL, err := url.Parse(credentials.URL) if err != nil { return nil, fmt.Errorf("parsing api url ('%s'): %w", credentials.URL, err) @@ -42,38 +27,27 @@ func AuthenticatedLAPIClient(credentials csconfig.ApiCredentialsCfg, hub *cwhub. password := strfmt.Password(credentials.Password) + itemsForAPI := hub.GetInstalledListForAPI() + client, err := apiclient.NewClient(&apiclient.Config{ MachineID: credentials.Login, Password: password, - Scenarios: installedScenariosAndAppsecRules, - UserAgent: cwversion.UserAgent(), + Scenarios: itemsForAPI, URL: apiURL, PapiURL: papiURL, VersionPrefix: "v1", - UpdateScenario: func() ([]string, error) { - scenarios, err := hub.GetInstalledNamesByType(cwhub.SCENARIOS) - if err != nil { - return nil, err - } - appsecRules, err := hub.GetInstalledNamesByType(cwhub.APPSEC_RULES) - if err != nil { - return nil, err - } - ret := make([]string, 0, len(scenarios)+len(appsecRules)) - ret = append(ret, scenarios...) - ret = append(ret, appsecRules...) - - return ret, nil + UpdateScenario: func(_ context.Context) ([]string, error) { + return itemsForAPI, nil }, }) if err != nil { return nil, fmt.Errorf("new client api: %w", err) } - authResp, _, err := client.Auth.AuthenticateWatcher(context.Background(), models.WatcherAuthRequest{ + authResp, _, err := client.Auth.AuthenticateWatcher(ctx, models.WatcherAuthRequest{ MachineID: &credentials.Login, Password: &password, - Scenarios: installedScenariosAndAppsecRules, + Scenarios: itemsForAPI, }) if err != nil { return nil, fmt.Errorf("authenticate watcher (%s): %w", credentials.Login, err) diff --git a/cmd/crowdsec/lpmetrics.go b/cmd/crowdsec/lpmetrics.go index 0fd27054071..24842851294 100644 --- a/cmd/crowdsec/lpmetrics.go +++ b/cmd/crowdsec/lpmetrics.go @@ -7,7 +7,6 @@ import ( "time" "github.com/sirupsen/logrus" - "gopkg.in/tomb.v2" "github.com/crowdsecurity/go-cs-lib/ptr" @@ -46,10 +45,8 @@ func getHubState(hub *cwhub.Hub) models.HubItems { for _, itemType := range cwhub.ItemTypes { ret[itemType] = []models.HubItem{} - items, _ := hub.GetInstalledItemsByType(itemType) - cwhub.SortItemSlice(items) - for _, item := range items { + for _, item := range hub.GetInstalledByType(itemType, true) { status := "official" if item.State.IsLocal() { status = "custom" @@ -90,7 +87,8 @@ func newStaticMetrics(consoleOptions []string, datasources []acquisition.DataSou } func NewMetricsProvider(apic *apiclient.ApiClient, interval time.Duration, logger *logrus.Entry, - consoleOptions []string, datasources []acquisition.DataSource, hub *cwhub.Hub) *MetricsProvider { + consoleOptions []string, datasources []acquisition.DataSource, hub *cwhub.Hub, +) *MetricsProvider { return &MetricsProvider{ apic: apic, interval: interval, diff --git a/cmd/crowdsec/main.go b/cmd/crowdsec/main.go index 18416e044e7..6d8ca24c335 100644 --- a/cmd/crowdsec/main.go +++ b/cmd/crowdsec/main.go @@ -91,10 +91,8 @@ func LoadBuckets(cConfig *csconfig.Config, hub *cwhub.Hub) error { files []string ) - for _, hubScenarioItem := range hub.GetItemMap(cwhub.SCENARIOS) { - if hubScenarioItem.State.Installed { - files = append(files, hubScenarioItem.State.LocalPath) - } + for _, hubScenarioItem := range hub.GetInstalledByType(cwhub.SCENARIOS, false) { + files = append(files, hubScenarioItem.State.LocalPath) } buckets = leakybucket.NewBuckets() diff --git a/cmd/crowdsec/metrics.go b/cmd/crowdsec/metrics.go index d3c6e172091..ff280fc3512 100644 --- a/cmd/crowdsec/metrics.go +++ b/cmd/crowdsec/metrics.go @@ -118,7 +118,9 @@ func computeDynamicMetrics(next http.Handler, dbClient *database.Client) http.Ha return } - decisions, err := dbClient.QueryDecisionCountByScenario() + ctx := r.Context() + + decisions, err := dbClient.QueryDecisionCountByScenario(ctx) if err != nil { log.Errorf("Error querying decisions for metrics: %v", err) next.ServeHTTP(w, r) @@ -138,7 +140,7 @@ func computeDynamicMetrics(next http.Handler, dbClient *database.Client) http.Ha "include_capi": {"false"}, } - alerts, err := dbClient.AlertsCountPerScenario(alertsFilter) + alerts, err := dbClient.AlertsCountPerScenario(ctx, alertsFilter) if err != nil { log.Errorf("Error querying alerts for metrics: %v", err) next.ServeHTTP(w, r) diff --git a/cmd/crowdsec/pour.go b/cmd/crowdsec/pour.go index 388c7a6c1b3..2fc7d7e42c9 100644 --- a/cmd/crowdsec/pour.go +++ b/cmd/crowdsec/pour.go @@ -32,7 +32,7 @@ func runPour(input chan types.Event, holders []leaky.BucketFactory, buckets *lea if parsed.MarshaledTime != "" { z := &time.Time{} if err := z.UnmarshalText([]byte(parsed.MarshaledTime)); err != nil { - log.Warningf("Failed to unmarshal time from event '%s' : %s", parsed.MarshaledTime, err) + log.Warningf("Failed to parse time from event '%s' : %s", parsed.MarshaledTime, err) } else { log.Warning("Starting buckets garbage collection ...") @@ -59,9 +59,9 @@ func runPour(input chan types.Event, holders []leaky.BucketFactory, buckets *lea globalBucketPourKo.Inc() } - if len(parsed.MarshaledTime) != 0 { + if parsed.MarshaledTime != "" { if err := lastProcessedItem.UnmarshalText([]byte(parsed.MarshaledTime)); err != nil { - log.Warningf("failed to unmarshal time from event : %s", err) + log.Warningf("failed to parse time from event : %s", err) } } } diff --git a/cmd/crowdsec/serve.go b/cmd/crowdsec/serve.go index f1a658e9512..14602c425fe 100644 --- a/cmd/crowdsec/serve.go +++ b/cmd/crowdsec/serve.go @@ -52,6 +52,8 @@ func debugHandler(sig os.Signal, cConfig *csconfig.Config) error { func reloadHandler(sig os.Signal) (*csconfig.Config, error) { var tmpFile string + ctx := context.TODO() + // re-initialize tombs acquisTomb = tomb.Tomb{} parsersTomb = tomb.Tomb{} @@ -74,7 +76,7 @@ func reloadHandler(sig os.Signal) (*csconfig.Config, error) { cConfig.API.Server.OnlineClient = nil } - apiServer, err := initAPIServer(cConfig) + apiServer, err := initAPIServer(ctx, cConfig) if err != nil { return nil, fmt.Errorf("unable to init api server: %w", err) } @@ -88,7 +90,7 @@ func reloadHandler(sig os.Signal) (*csconfig.Config, error) { return nil, err } - if err := hub.Load(); err != nil { + if err = hub.Load(); err != nil { return nil, err } @@ -374,7 +376,7 @@ func Serve(cConfig *csconfig.Config, agentReady chan bool) error { cConfig.API.Server.OnlineClient = nil } - apiServer, err := initAPIServer(cConfig) + apiServer, err := initAPIServer(ctx, cConfig) if err != nil { return fmt.Errorf("api server init: %w", err) } @@ -390,7 +392,7 @@ func Serve(cConfig *csconfig.Config, agentReady chan bool) error { return err } - if err := hub.Load(); err != nil { + if err = hub.Load(); err != nil { return err } diff --git a/cmd/notification-dummy/main.go b/cmd/notification-dummy/main.go index 024a1eb81ba..7fbb10d4fca 100644 --- a/cmd/notification-dummy/main.go +++ b/cmd/notification-dummy/main.go @@ -9,6 +9,7 @@ import ( plugin "github.com/hashicorp/go-plugin" "gopkg.in/yaml.v3" + "github.com/crowdsecurity/crowdsec/pkg/csplugin" "github.com/crowdsecurity/crowdsec/pkg/protobufs" ) @@ -19,6 +20,7 @@ type PluginConfig struct { } type DummyPlugin struct { + protobufs.UnimplementedNotifierServer PluginConfigByName map[string]PluginConfig } @@ -84,7 +86,7 @@ func main() { plugin.Serve(&plugin.ServeConfig{ HandshakeConfig: handshake, Plugins: map[string]plugin.Plugin{ - "dummy": &protobufs.NotifierPlugin{ + "dummy": &csplugin.NotifierPlugin{ Impl: sp, }, }, diff --git a/cmd/notification-email/main.go b/cmd/notification-email/main.go index 3b535ae7ffa..5fc02cdd1d7 100644 --- a/cmd/notification-email/main.go +++ b/cmd/notification-email/main.go @@ -12,6 +12,7 @@ import ( mail "github.com/xhit/go-simple-mail/v2" "gopkg.in/yaml.v3" + "github.com/crowdsecurity/crowdsec/pkg/csplugin" "github.com/crowdsecurity/crowdsec/pkg/protobufs" ) @@ -55,6 +56,7 @@ type PluginConfig struct { } type EmailPlugin struct { + protobufs.UnimplementedNotifierServer ConfigByName map[string]PluginConfig } @@ -81,7 +83,7 @@ func (n *EmailPlugin) Configure(ctx context.Context, config *protobufs.Config) ( return nil, errors.New("SMTP host is not set") } - if d.ReceiverEmails == nil || len(d.ReceiverEmails) == 0 { + if len(d.ReceiverEmails) == 0 { return nil, errors.New("receiver emails are not set") } @@ -170,7 +172,7 @@ func main() { plugin.Serve(&plugin.ServeConfig{ HandshakeConfig: handshake, Plugins: map[string]plugin.Plugin{ - "email": &protobufs.NotifierPlugin{ + "email": &csplugin.NotifierPlugin{ Impl: &EmailPlugin{ConfigByName: make(map[string]PluginConfig)}, }, }, diff --git a/cmd/notification-file/main.go b/cmd/notification-file/main.go index 7fc529cff41..a4dbb8ee5db 100644 --- a/cmd/notification-file/main.go +++ b/cmd/notification-file/main.go @@ -15,6 +15,7 @@ import ( plugin "github.com/hashicorp/go-plugin" "gopkg.in/yaml.v3" + "github.com/crowdsecurity/crowdsec/pkg/csplugin" "github.com/crowdsecurity/crowdsec/pkg/protobufs" ) @@ -52,6 +53,7 @@ type LogRotate struct { } type FilePlugin struct { + protobufs.UnimplementedNotifierServer PluginConfigByName map[string]PluginConfig } @@ -210,7 +212,7 @@ func (s *FilePlugin) Configure(ctx context.Context, config *protobufs.Config) (* d := PluginConfig{} err := yaml.Unmarshal(config.Config, &d) if err != nil { - logger.Error("Failed to unmarshal config", "error", err) + logger.Error("Failed to parse config", "error", err) return &protobufs.Empty{}, err } FileWriteMutex = &sync.Mutex{} @@ -241,7 +243,7 @@ func main() { plugin.Serve(&plugin.ServeConfig{ HandshakeConfig: handshake, Plugins: map[string]plugin.Plugin{ - "file": &protobufs.NotifierPlugin{ + "file": &csplugin.NotifierPlugin{ Impl: sp, }, }, diff --git a/cmd/notification-http/main.go b/cmd/notification-http/main.go index 6b11a78ef86..3f84984315b 100644 --- a/cmd/notification-http/main.go +++ b/cmd/notification-http/main.go @@ -16,6 +16,7 @@ import ( plugin "github.com/hashicorp/go-plugin" "gopkg.in/yaml.v3" + "github.com/crowdsecurity/crowdsec/pkg/csplugin" "github.com/crowdsecurity/crowdsec/pkg/protobufs" ) @@ -34,6 +35,7 @@ type PluginConfig struct { } type HTTPPlugin struct { + protobufs.UnimplementedNotifierServer PluginConfigByName map[string]PluginConfig } @@ -190,7 +192,7 @@ func main() { plugin.Serve(&plugin.ServeConfig{ HandshakeConfig: handshake, Plugins: map[string]plugin.Plugin{ - "http": &protobufs.NotifierPlugin{ + "http": &csplugin.NotifierPlugin{ Impl: sp, }, }, diff --git a/cmd/notification-sentinel/main.go b/cmd/notification-sentinel/main.go index a29e941f80c..0293d45b0a4 100644 --- a/cmd/notification-sentinel/main.go +++ b/cmd/notification-sentinel/main.go @@ -15,6 +15,7 @@ import ( "github.com/hashicorp/go-plugin" "gopkg.in/yaml.v3" + "github.com/crowdsecurity/crowdsec/pkg/csplugin" "github.com/crowdsecurity/crowdsec/pkg/protobufs" ) @@ -27,6 +28,7 @@ type PluginConfig struct { } type SentinelPlugin struct { + protobufs.UnimplementedNotifierServer PluginConfigByName map[string]PluginConfig } @@ -122,7 +124,7 @@ func main() { plugin.Serve(&plugin.ServeConfig{ HandshakeConfig: handshake, Plugins: map[string]plugin.Plugin{ - "sentinel": &protobufs.NotifierPlugin{ + "sentinel": &csplugin.NotifierPlugin{ Impl: sp, }, }, diff --git a/cmd/notification-slack/main.go b/cmd/notification-slack/main.go index fba1b33e334..34c7c0df361 100644 --- a/cmd/notification-slack/main.go +++ b/cmd/notification-slack/main.go @@ -10,6 +10,7 @@ import ( "github.com/slack-go/slack" "gopkg.in/yaml.v3" + "github.com/crowdsecurity/crowdsec/pkg/csplugin" "github.com/crowdsecurity/crowdsec/pkg/protobufs" ) @@ -23,6 +24,7 @@ type PluginConfig struct { LogLevel *string `yaml:"log_level"` } type Notify struct { + protobufs.UnimplementedNotifierServer ConfigByName map[string]PluginConfig } @@ -84,7 +86,7 @@ func main() { plugin.Serve(&plugin.ServeConfig{ HandshakeConfig: handshake, Plugins: map[string]plugin.Plugin{ - "slack": &protobufs.NotifierPlugin{ + "slack": &csplugin.NotifierPlugin{ Impl: &Notify{ConfigByName: make(map[string]PluginConfig)}, }, }, diff --git a/cmd/notification-splunk/main.go b/cmd/notification-splunk/main.go index 26190c58a89..e18f416c14a 100644 --- a/cmd/notification-splunk/main.go +++ b/cmd/notification-splunk/main.go @@ -14,6 +14,7 @@ import ( plugin "github.com/hashicorp/go-plugin" "gopkg.in/yaml.v3" + "github.com/crowdsecurity/crowdsec/pkg/csplugin" "github.com/crowdsecurity/crowdsec/pkg/protobufs" ) @@ -32,6 +33,7 @@ type PluginConfig struct { } type Splunk struct { + protobufs.UnimplementedNotifierServer PluginConfigByName map[string]PluginConfig Client http.Client } @@ -117,7 +119,7 @@ func main() { plugin.Serve(&plugin.ServeConfig{ HandshakeConfig: handshake, Plugins: map[string]plugin.Plugin{ - "splunk": &protobufs.NotifierPlugin{ + "splunk": &csplugin.NotifierPlugin{ Impl: sp, }, }, diff --git a/debian/rules b/debian/rules index c11771282ea..5b8d6fc51f8 100755 --- a/debian/rules +++ b/debian/rules @@ -13,7 +13,7 @@ override_dh_auto_build: override_dh_auto_install: # just use the prebuilt binaries, otherwise: - # make build BUILD_RE_WASM=0 BUILD_STATIC=1 + # make build BUILD_STATIC=1 mkdir -p debian/crowdsec/usr/bin mkdir -p debian/crowdsec/etc/crowdsec diff --git a/docker/test/Pipfile.lock b/docker/test/Pipfile.lock index 2cb587b6b88..99184d9f2a2 100644 --- a/docker/test/Pipfile.lock +++ b/docker/test/Pipfile.lock @@ -18,69 +18,84 @@ "default": { "certifi": { "hashes": [ - "sha256:5a1e7645bc0ec61a09e26c36f6106dd4cf40c6db3a1fb6352b0244e7fb057c7b", - "sha256:c198e21b1289c2ab85ee4e67bb4b4ef3ead0892059901a8d5b622f24a1101e90" + "sha256:922820b53db7a7257ffbda3f597266d435245903d80737e34f8a45ff3e3230d8", + "sha256:bec941d2aa8195e248a60b31ff9f0558284cf01a52591ceda73ea9afffd69fd9" ], "markers": "python_version >= '3.6'", - "version": "==2024.7.4" + "version": "==2024.8.30" }, "cffi": { "hashes": [ - "sha256:0c9ef6ff37e974b73c25eecc13952c55bceed9112be2d9d938ded8e856138bcc", - "sha256:131fd094d1065b19540c3d72594260f118b231090295d8c34e19a7bbcf2e860a", - "sha256:1b8ebc27c014c59692bb2664c7d13ce7a6e9a629be20e54e7271fa696ff2b417", - "sha256:2c56b361916f390cd758a57f2e16233eb4f64bcbeee88a4881ea90fca14dc6ab", - "sha256:2d92b25dbf6cae33f65005baf472d2c245c050b1ce709cc4588cdcdd5495b520", - "sha256:31d13b0f99e0836b7ff893d37af07366ebc90b678b6664c955b54561fc36ef36", - "sha256:32c68ef735dbe5857c810328cb2481e24722a59a2003018885514d4c09af9743", - "sha256:3686dffb02459559c74dd3d81748269ffb0eb027c39a6fc99502de37d501faa8", - "sha256:582215a0e9adbe0e379761260553ba11c58943e4bbe9c36430c4ca6ac74b15ed", - "sha256:5b50bf3f55561dac5438f8e70bfcdfd74543fd60df5fa5f62d94e5867deca684", - "sha256:5bf44d66cdf9e893637896c7faa22298baebcd18d1ddb6d2626a6e39793a1d56", - "sha256:6602bc8dc6f3a9e02b6c22c4fc1e47aa50f8f8e6d3f78a5e16ac33ef5fefa324", - "sha256:673739cb539f8cdaa07d92d02efa93c9ccf87e345b9a0b556e3ecc666718468d", - "sha256:68678abf380b42ce21a5f2abde8efee05c114c2fdb2e9eef2efdb0257fba1235", - "sha256:68e7c44931cc171c54ccb702482e9fc723192e88d25a0e133edd7aff8fcd1f6e", - "sha256:6b3d6606d369fc1da4fd8c357d026317fbb9c9b75d36dc16e90e84c26854b088", - "sha256:748dcd1e3d3d7cd5443ef03ce8685043294ad6bd7c02a38d1bd367cfd968e000", - "sha256:7651c50c8c5ef7bdb41108b7b8c5a83013bfaa8a935590c5d74627c047a583c7", - "sha256:7b78010e7b97fef4bee1e896df8a4bbb6712b7f05b7ef630f9d1da00f6444d2e", - "sha256:7e61e3e4fa664a8588aa25c883eab612a188c725755afff6289454d6362b9673", - "sha256:80876338e19c951fdfed6198e70bc88f1c9758b94578d5a7c4c91a87af3cf31c", - "sha256:8895613bcc094d4a1b2dbe179d88d7fb4a15cee43c052e8885783fac397d91fe", - "sha256:88e2b3c14bdb32e440be531ade29d3c50a1a59cd4e51b1dd8b0865c54ea5d2e2", - "sha256:8f8e709127c6c77446a8c0a8c8bf3c8ee706a06cd44b1e827c3e6a2ee6b8c098", - "sha256:9cb4a35b3642fc5c005a6755a5d17c6c8b6bcb6981baf81cea8bfbc8903e8ba8", - "sha256:9f90389693731ff1f659e55c7d1640e2ec43ff725cc61b04b2f9c6d8d017df6a", - "sha256:a09582f178759ee8128d9270cd1344154fd473bb77d94ce0aeb2a93ebf0feaf0", - "sha256:a6a14b17d7e17fa0d207ac08642c8820f84f25ce17a442fd15e27ea18d67c59b", - "sha256:a72e8961a86d19bdb45851d8f1f08b041ea37d2bd8d4fd19903bc3083d80c896", - "sha256:abd808f9c129ba2beda4cfc53bde801e5bcf9d6e0f22f095e45327c038bfe68e", - "sha256:ac0f5edd2360eea2f1daa9e26a41db02dd4b0451b48f7c318e217ee092a213e9", - "sha256:b29ebffcf550f9da55bec9e02ad430c992a87e5f512cd63388abb76f1036d8d2", - "sha256:b2ca4e77f9f47c55c194982e10f058db063937845bb2b7a86c84a6cfe0aefa8b", - "sha256:b7be2d771cdba2942e13215c4e340bfd76398e9227ad10402a8767ab1865d2e6", - "sha256:b84834d0cf97e7d27dd5b7f3aca7b6e9263c56308ab9dc8aae9784abb774d404", - "sha256:b86851a328eedc692acf81fb05444bdf1891747c25af7529e39ddafaf68a4f3f", - "sha256:bcb3ef43e58665bbda2fb198698fcae6776483e0c4a631aa5647806c25e02cc0", - "sha256:c0f31130ebc2d37cdd8e44605fb5fa7ad59049298b3f745c74fa74c62fbfcfc4", - "sha256:c6a164aa47843fb1b01e941d385aab7215563bb8816d80ff3a363a9f8448a8dc", - "sha256:d8a9d3ebe49f084ad71f9269834ceccbf398253c9fac910c4fd7053ff1386936", - "sha256:db8e577c19c0fda0beb7e0d4e09e0ba74b1e4c092e0e40bfa12fe05b6f6d75ba", - "sha256:dc9b18bf40cc75f66f40a7379f6a9513244fe33c0e8aa72e2d56b0196a7ef872", - "sha256:e09f3ff613345df5e8c3667da1d918f9149bd623cd9070c983c013792a9a62eb", - "sha256:e4108df7fe9b707191e55f33efbcb2d81928e10cea45527879a4749cbe472614", - "sha256:e6024675e67af929088fda399b2094574609396b1decb609c55fa58b028a32a1", - "sha256:e70f54f1796669ef691ca07d046cd81a29cb4deb1e5f942003f401c0c4a2695d", - "sha256:e715596e683d2ce000574bae5d07bd522c781a822866c20495e52520564f0969", - "sha256:e760191dd42581e023a68b758769e2da259b5d52e3103c6060ddc02c9edb8d7b", - "sha256:ed86a35631f7bfbb28e108dd96773b9d5a6ce4811cf6ea468bb6a359b256b1e4", - "sha256:ee07e47c12890ef248766a6e55bd38ebfb2bb8edd4142d56db91b21ea68b7627", - "sha256:fa3a0128b152627161ce47201262d3140edb5a5c3da88d73a1b790a959126956", - "sha256:fcc8eb6d5902bb1cf6dc4f187ee3ea80a1eba0a89aba40a5cb20a5087d961357" + "sha256:045d61c734659cc045141be4bae381a41d89b741f795af1dd018bfb532fd0df8", + "sha256:0984a4925a435b1da406122d4d7968dd861c1385afe3b45ba82b750f229811e2", + "sha256:0e2b1fac190ae3ebfe37b979cc1ce69c81f4e4fe5746bb401dca63a9062cdaf1", + "sha256:0f048dcf80db46f0098ccac01132761580d28e28bc0f78ae0d58048063317e15", + "sha256:1257bdabf294dceb59f5e70c64a3e2f462c30c7ad68092d01bbbfb1c16b1ba36", + "sha256:1c39c6016c32bc48dd54561950ebd6836e1670f2ae46128f67cf49e789c52824", + "sha256:1d599671f396c4723d016dbddb72fe8e0397082b0a77a4fab8028923bec050e8", + "sha256:28b16024becceed8c6dfbc75629e27788d8a3f9030691a1dbf9821a128b22c36", + "sha256:2bb1a08b8008b281856e5971307cc386a8e9c5b625ac297e853d36da6efe9c17", + "sha256:30c5e0cb5ae493c04c8b42916e52ca38079f1b235c2f8ae5f4527b963c401caf", + "sha256:31000ec67d4221a71bd3f67df918b1f88f676f1c3b535a7eb473255fdc0b83fc", + "sha256:386c8bf53c502fff58903061338ce4f4950cbdcb23e2902d86c0f722b786bbe3", + "sha256:3edc8d958eb099c634dace3c7e16560ae474aa3803a5df240542b305d14e14ed", + "sha256:45398b671ac6d70e67da8e4224a065cec6a93541bb7aebe1b198a61b58c7b702", + "sha256:46bf43160c1a35f7ec506d254e5c890f3c03648a4dbac12d624e4490a7046cd1", + "sha256:4ceb10419a9adf4460ea14cfd6bc43d08701f0835e979bf821052f1805850fe8", + "sha256:51392eae71afec0d0c8fb1a53b204dbb3bcabcb3c9b807eedf3e1e6ccf2de903", + "sha256:5da5719280082ac6bd9aa7becb3938dc9f9cbd57fac7d2871717b1feb0902ab6", + "sha256:610faea79c43e44c71e1ec53a554553fa22321b65fae24889706c0a84d4ad86d", + "sha256:636062ea65bd0195bc012fea9321aca499c0504409f413dc88af450b57ffd03b", + "sha256:6883e737d7d9e4899a8a695e00ec36bd4e5e4f18fabe0aca0efe0a4b44cdb13e", + "sha256:6b8b4a92e1c65048ff98cfe1f735ef8f1ceb72e3d5f0c25fdb12087a23da22be", + "sha256:6f17be4345073b0a7b8ea599688f692ac3ef23ce28e5df79c04de519dbc4912c", + "sha256:706510fe141c86a69c8ddc029c7910003a17353970cff3b904ff0686a5927683", + "sha256:72e72408cad3d5419375fc87d289076ee319835bdfa2caad331e377589aebba9", + "sha256:733e99bc2df47476e3848417c5a4540522f234dfd4ef3ab7fafdf555b082ec0c", + "sha256:7596d6620d3fa590f677e9ee430df2958d2d6d6de2feeae5b20e82c00b76fbf8", + "sha256:78122be759c3f8a014ce010908ae03364d00a1f81ab5c7f4a7a5120607ea56e1", + "sha256:805b4371bf7197c329fcb3ead37e710d1bca9da5d583f5073b799d5c5bd1eee4", + "sha256:85a950a4ac9c359340d5963966e3e0a94a676bd6245a4b55bc43949eee26a655", + "sha256:8f2cdc858323644ab277e9bb925ad72ae0e67f69e804f4898c070998d50b1a67", + "sha256:9755e4345d1ec879e3849e62222a18c7174d65a6a92d5b346b1863912168b595", + "sha256:98e3969bcff97cae1b2def8ba499ea3d6f31ddfdb7635374834cf89a1a08ecf0", + "sha256:a08d7e755f8ed21095a310a693525137cfe756ce62d066e53f502a83dc550f65", + "sha256:a1ed2dd2972641495a3ec98445e09766f077aee98a1c896dcb4ad0d303628e41", + "sha256:a24ed04c8ffd54b0729c07cee15a81d964e6fee0e3d4d342a27b020d22959dc6", + "sha256:a45e3c6913c5b87b3ff120dcdc03f6131fa0065027d0ed7ee6190736a74cd401", + "sha256:a9b15d491f3ad5d692e11f6b71f7857e7835eb677955c00cc0aefcd0669adaf6", + "sha256:ad9413ccdeda48c5afdae7e4fa2192157e991ff761e7ab8fdd8926f40b160cc3", + "sha256:b2ab587605f4ba0bf81dc0cb08a41bd1c0a5906bd59243d56bad7668a6fc6c16", + "sha256:b62ce867176a75d03a665bad002af8e6d54644fad99a3c70905c543130e39d93", + "sha256:c03e868a0b3bc35839ba98e74211ed2b05d2119be4e8a0f224fba9384f1fe02e", + "sha256:c59d6e989d07460165cc5ad3c61f9fd8f1b4796eacbd81cee78957842b834af4", + "sha256:c7eac2ef9b63c79431bc4b25f1cd649d7f061a28808cbc6c47b534bd789ef964", + "sha256:c9c3d058ebabb74db66e431095118094d06abf53284d9c81f27300d0e0d8bc7c", + "sha256:ca74b8dbe6e8e8263c0ffd60277de77dcee6c837a3d0881d8c1ead7268c9e576", + "sha256:caaf0640ef5f5517f49bc275eca1406b0ffa6aa184892812030f04c2abf589a0", + "sha256:cdf5ce3acdfd1661132f2a9c19cac174758dc2352bfe37d98aa7512c6b7178b3", + "sha256:d016c76bdd850f3c626af19b0542c9677ba156e4ee4fccfdd7848803533ef662", + "sha256:d01b12eeeb4427d3110de311e1774046ad344f5b1a7403101878976ecd7a10f3", + "sha256:d63afe322132c194cf832bfec0dc69a99fb9bb6bbd550f161a49e9e855cc78ff", + "sha256:da95af8214998d77a98cc14e3a3bd00aa191526343078b530ceb0bd710fb48a5", + "sha256:dd398dbc6773384a17fe0d3e7eeb8d1a21c2200473ee6806bb5e6a8e62bb73dd", + "sha256:de2ea4b5833625383e464549fec1bc395c1bdeeb5f25c4a3a82b5a8c756ec22f", + "sha256:de55b766c7aa2e2a3092c51e0483d700341182f08e67c63630d5b6f200bb28e5", + "sha256:df8b1c11f177bc2313ec4b2d46baec87a5f3e71fc8b45dab2ee7cae86d9aba14", + "sha256:e03eab0a8677fa80d646b5ddece1cbeaf556c313dcfac435ba11f107ba117b5d", + "sha256:e221cf152cff04059d011ee126477f0d9588303eb57e88923578ace7baad17f9", + "sha256:e31ae45bc2e29f6b2abd0de1cc3b9d5205aa847cafaecb8af1476a609a2f6eb7", + "sha256:edae79245293e15384b51f88b00613ba9f7198016a5948b5dddf4917d4d26382", + "sha256:f1e22e8c4419538cb197e4dd60acc919d7696e5ef98ee4da4e01d3f8cfa4cc5a", + "sha256:f3a2b4222ce6b60e2e8b337bb9596923045681d71e5a082783484d845390938e", + "sha256:f6a16c31041f09ead72d69f583767292f750d24913dadacf5756b966aacb3f1a", + "sha256:f75c7ab1f9e4aca5414ed4d8e5c0e303a34f4421f8a0d47a4d019ceff0ab6af4", + "sha256:f79fc4fc25f1c8698ff97788206bb3c2598949bfe0fef03d299eb1b5356ada99", + "sha256:f7f5baafcc48261359e14bcd6d9bff6d4b28d9103847c9e136694cb0501aef87", + "sha256:fc48c783f9c87e60831201f2cce7f3b2e4846bf4d8728eabe54d60700b318a0b" ], "markers": "platform_python_implementation != 'PyPy'", - "version": "==1.16.0" + "version": "==1.17.1" }, "charset-normalizer": { "hashes": [ @@ -180,36 +195,36 @@ }, "cryptography": { "hashes": [ - "sha256:0663585d02f76929792470451a5ba64424acc3cd5227b03921dab0e2f27b1709", - "sha256:08a24a7070b2b6804c1940ff0f910ff728932a9d0e80e7814234269f9d46d069", - "sha256:232ce02943a579095a339ac4b390fbbe97f5b5d5d107f8a08260ea2768be8cc2", - "sha256:2905ccf93a8a2a416f3ec01b1a7911c3fe4073ef35640e7ee5296754e30b762b", - "sha256:299d3da8e00b7e2b54bb02ef58d73cd5f55fb31f33ebbf33bd00d9aa6807df7e", - "sha256:2c6d112bf61c5ef44042c253e4859b3cbbb50df2f78fa8fae6747a7814484a70", - "sha256:31e44a986ceccec3d0498e16f3d27b2ee5fdf69ce2ab89b52eaad1d2f33d8778", - "sha256:3d9a1eca329405219b605fac09ecfc09ac09e595d6def650a437523fcd08dd22", - "sha256:3dcdedae5c7710b9f97ac6bba7e1052b95c7083c9d0e9df96e02a1932e777895", - "sha256:47ca71115e545954e6c1d207dd13461ab81f4eccfcb1345eac874828b5e3eaaf", - "sha256:4a997df8c1c2aae1e1e5ac49c2e4f610ad037fc5a3aadc7b64e39dea42249431", - "sha256:51956cf8730665e2bdf8ddb8da0056f699c1a5715648c1b0144670c1ba00b48f", - "sha256:5bcb8a5620008a8034d39bce21dc3e23735dfdb6a33a06974739bfa04f853947", - "sha256:64c3f16e2a4fc51c0d06af28441881f98c5d91009b8caaff40cf3548089e9c74", - "sha256:6e2b11c55d260d03a8cf29ac9b5e0608d35f08077d8c087be96287f43af3ccdc", - "sha256:7b3f5fe74a5ca32d4d0f302ffe6680fcc5c28f8ef0dc0ae8f40c0f3a1b4fca66", - "sha256:844b6d608374e7d08f4f6e6f9f7b951f9256db41421917dfb2d003dde4cd6b66", - "sha256:9a8d6802e0825767476f62aafed40532bd435e8a5f7d23bd8b4f5fd04cc80ecf", - "sha256:aae4d918f6b180a8ab8bf6511a419473d107df4dbb4225c7b48c5c9602c38c7f", - "sha256:ac1955ce000cb29ab40def14fd1bbfa7af2017cca696ee696925615cafd0dce5", - "sha256:b88075ada2d51aa9f18283532c9f60e72170041bba88d7f37e49cbb10275299e", - "sha256:cb013933d4c127349b3948aa8aaf2f12c0353ad0eccd715ca789c8a0f671646f", - "sha256:cc70b4b581f28d0a254d006f26949245e3657d40d8857066c2ae22a61222ef55", - "sha256:e9c5266c432a1e23738d178e51c2c7a5e2ddf790f248be939448c0ba2021f9d1", - "sha256:ea9e57f8ea880eeea38ab5abf9fbe39f923544d7884228ec67d666abd60f5a47", - "sha256:ee0c405832ade84d4de74b9029bedb7b31200600fa524d218fc29bfa371e97f5", - "sha256:fdcb265de28585de5b859ae13e3846a8e805268a823a12a4da2597f1f5afc9f0" + "sha256:014f58110f53237ace6a408b5beb6c427b64e084eb451ef25a28308270086494", + "sha256:1bbcce1a551e262dfbafb6e6252f1ae36a248e615ca44ba302df077a846a8806", + "sha256:203e92a75716d8cfb491dc47c79e17d0d9207ccffcbcb35f598fbe463ae3444d", + "sha256:27e613d7077ac613e399270253259d9d53872aaf657471473ebfc9a52935c062", + "sha256:2bd51274dcd59f09dd952afb696bf9c61a7a49dfc764c04dd33ef7a6b502a1e2", + "sha256:38926c50cff6f533f8a2dae3d7f19541432610d114a70808f0926d5aaa7121e4", + "sha256:511f4273808ab590912a93ddb4e3914dfd8a388fed883361b02dea3791f292e1", + "sha256:58d4e9129985185a06d849aa6df265bdd5a74ca6e1b736a77959b498e0505b85", + "sha256:5b43d1ea6b378b54a1dc99dd8a2b5be47658fe9a7ce0a58ff0b55f4b43ef2b84", + "sha256:61ec41068b7b74268fa86e3e9e12b9f0c21fcf65434571dbb13d954bceb08042", + "sha256:666ae11966643886c2987b3b721899d250855718d6d9ce41b521252a17985f4d", + "sha256:68aaecc4178e90719e95298515979814bda0cbada1256a4485414860bd7ab962", + "sha256:7c05650fe8023c5ed0d46793d4b7d7e6cd9c04e68eabe5b0aeea836e37bdcec2", + "sha256:80eda8b3e173f0f247f711eef62be51b599b5d425c429b5d4ca6a05e9e856baa", + "sha256:8385d98f6a3bf8bb2d65a73e17ed87a3ba84f6991c155691c51112075f9ffc5d", + "sha256:88cce104c36870d70c49c7c8fd22885875d950d9ee6ab54df2745f83ba0dc365", + "sha256:9d3cdb25fa98afdd3d0892d132b8d7139e2c087da1712041f6b762e4f807cc96", + "sha256:a575913fb06e05e6b4b814d7f7468c2c660e8bb16d8d5a1faf9b33ccc569dd47", + "sha256:ac119bb76b9faa00f48128b7f5679e1d8d437365c5d26f1c2c3f0da4ce1b553d", + "sha256:c1332724be35d23a854994ff0b66530119500b6053d0bd3363265f7e5e77288d", + "sha256:d03a475165f3134f773d1388aeb19c2d25ba88b6a9733c5c590b9ff7bbfa2e0c", + "sha256:d75601ad10b059ec832e78823b348bfa1a59f6b8d545db3a24fd44362a1564cb", + "sha256:de41fd81a41e53267cb020bb3a7212861da53a7d39f863585d13ea11049cf277", + "sha256:e710bf40870f4db63c3d7d929aa9e09e4e7ee219e703f949ec4073b4294f6172", + "sha256:ea25acb556320250756e53f9e20a4177515f012c9eaea17eb7587a8c4d8ae034", + "sha256:f98bf604c82c416bc829e490c700ca1553eafdf2912a91e23a79d97d9801372a", + "sha256:fba1007b3ef89946dbbb515aeeb41e30203b004f0b4b00e5e16078b518563289" ], "markers": "python_version >= '3.7'", - "version": "==43.0.0" + "version": "==43.0.1" }, "docker": { "hashes": [ @@ -229,11 +244,11 @@ }, "idna": { "hashes": [ - "sha256:028ff3aadf0609c1fd278d8ea3089299412a7a8b9bd005dd08b9f8285bcb5cfc", - "sha256:82fee1fc78add43492d3a1898bfa6d8a904cc97d8427f683ed8e798d07761aa0" + "sha256:12f65c9b470abda6dc35cf8e63cc574b1c52b11df2c86030af0ac09b01b13ea9", + "sha256:946d195a0d259cbba61165e88e65941f16e9b36ea6ddb97f00452bae8b1287d3" ], - "markers": "python_version >= '3.5'", - "version": "==3.7" + "markers": "python_version >= '3.6'", + "version": "==3.10" }, "iniconfig": { "hashes": [ @@ -292,11 +307,11 @@ }, "pytest": { "hashes": [ - "sha256:7e8e5c5abd6e93cb1cc151f23e57adc31fcf8cfd2a3ff2da63e23f732de35db6", - "sha256:e9600ccf4f563976e2c99fa02c7624ab938296551f280835ee6516df8bc4ae8c" + "sha256:70b98107bd648308a7952b06e6ca9a50bc660be218d53c257cc1fc94fda10181", + "sha256:a6853c7375b2663155079443d2e45de913a911a11d669df02a50814944db57b2" ], "markers": "python_version >= '3.8'", - "version": "==8.3.1" + "version": "==8.3.3" }, "pytest-cs": { "git": "https://github.com/crowdsecurity/pytest-cs.git", @@ -337,60 +352,62 @@ }, "pyyaml": { "hashes": [ - "sha256:04ac92ad1925b2cff1db0cfebffb6ffc43457495c9b3c39d3fcae417d7125dc5", - "sha256:062582fca9fabdd2c8b54a3ef1c978d786e0f6b3a1510e0ac93ef59e0ddae2bc", - "sha256:0d3304d8c0adc42be59c5f8a4d9e3d7379e6955ad754aa9d6ab7a398b59dd1df", - "sha256:1635fd110e8d85d55237ab316b5b011de701ea0f29d07611174a1b42f1444741", - "sha256:184c5108a2aca3c5b3d3bf9395d50893a7ab82a38004c8f61c258d4428e80206", - "sha256:18aeb1bf9a78867dc38b259769503436b7c72f7a1f1f4c93ff9a17de54319b27", - "sha256:1d4c7e777c441b20e32f52bd377e0c409713e8bb1386e1099c2415f26e479595", - "sha256:1e2722cc9fbb45d9b87631ac70924c11d3a401b2d7f410cc0e3bbf249f2dca62", - "sha256:1fe35611261b29bd1de0070f0b2f47cb6ff71fa6595c077e42bd0c419fa27b98", - "sha256:28c119d996beec18c05208a8bd78cbe4007878c6dd15091efb73a30e90539696", - "sha256:326c013efe8048858a6d312ddd31d56e468118ad4cdeda36c719bf5bb6192290", - "sha256:40df9b996c2b73138957fe23a16a4f0ba614f4c0efce1e9406a184b6d07fa3a9", - "sha256:42f8152b8dbc4fe7d96729ec2b99c7097d656dc1213a3229ca5383f973a5ed6d", - "sha256:49a183be227561de579b4a36efbb21b3eab9651dd81b1858589f796549873dd6", - "sha256:4fb147e7a67ef577a588a0e2c17b6db51dda102c71de36f8549b6816a96e1867", - "sha256:50550eb667afee136e9a77d6dc71ae76a44df8b3e51e41b77f6de2932bfe0f47", - "sha256:510c9deebc5c0225e8c96813043e62b680ba2f9c50a08d3724c7f28a747d1486", - "sha256:5773183b6446b2c99bb77e77595dd486303b4faab2b086e7b17bc6bef28865f6", - "sha256:596106435fa6ad000c2991a98fa58eeb8656ef2325d7e158344fb33864ed87e3", - "sha256:6965a7bc3cf88e5a1c3bd2e0b5c22f8d677dc88a455344035f03399034eb3007", - "sha256:69b023b2b4daa7548bcfbd4aa3da05b3a74b772db9e23b982788168117739938", - "sha256:6c22bec3fbe2524cde73d7ada88f6566758a8f7227bfbf93a408a9d86bcc12a0", - "sha256:704219a11b772aea0d8ecd7058d0082713c3562b4e271b849ad7dc4a5c90c13c", - "sha256:7e07cbde391ba96ab58e532ff4803f79c4129397514e1413a7dc761ccd755735", - "sha256:81e0b275a9ecc9c0c0c07b4b90ba548307583c125f54d5b6946cfee6360c733d", - "sha256:855fb52b0dc35af121542a76b9a84f8d1cd886ea97c84703eaa6d88e37a2ad28", - "sha256:8d4e9c88387b0f5c7d5f281e55304de64cf7f9c0021a3525bd3b1c542da3b0e4", - "sha256:9046c58c4395dff28dd494285c82ba00b546adfc7ef001486fbf0324bc174fba", - "sha256:9eb6caa9a297fc2c2fb8862bc5370d0303ddba53ba97e71f08023b6cd73d16a8", - "sha256:a08c6f0fe150303c1c6b71ebcd7213c2858041a7e01975da3a99aed1e7a378ef", - "sha256:a0cd17c15d3bb3fa06978b4e8958dcdc6e0174ccea823003a106c7d4d7899ac5", - "sha256:afd7e57eddb1a54f0f1a974bc4391af8bcce0b444685d936840f125cf046d5bd", - "sha256:b1275ad35a5d18c62a7220633c913e1b42d44b46ee12554e5fd39c70a243d6a3", - "sha256:b786eecbdf8499b9ca1d697215862083bd6d2a99965554781d0d8d1ad31e13a0", - "sha256:ba336e390cd8e4d1739f42dfe9bb83a3cc2e80f567d8805e11b46f4a943f5515", - "sha256:baa90d3f661d43131ca170712d903e6295d1f7a0f595074f151c0aed377c9b9c", - "sha256:bc1bf2925a1ecd43da378f4db9e4f799775d6367bdb94671027b73b393a7c42c", - "sha256:bd4af7373a854424dabd882decdc5579653d7868b8fb26dc7d0e99f823aa5924", - "sha256:bf07ee2fef7014951eeb99f56f39c9bb4af143d8aa3c21b1677805985307da34", - "sha256:bfdf460b1736c775f2ba9f6a92bca30bc2095067b8a9d77876d1fad6cc3b4a43", - "sha256:c8098ddcc2a85b61647b2590f825f3db38891662cfc2fc776415143f599bb859", - "sha256:d2b04aac4d386b172d5b9692e2d2da8de7bfb6c387fa4f801fbf6fb2e6ba4673", - "sha256:d483d2cdf104e7c9fa60c544d92981f12ad66a457afae824d146093b8c294c54", - "sha256:d858aa552c999bc8a8d57426ed01e40bef403cd8ccdd0fc5f6f04a00414cac2a", - "sha256:e7d73685e87afe9f3b36c799222440d6cf362062f78be1013661b00c5c6f678b", - "sha256:f003ed9ad21d6a4713f0a9b5a7a0a79e08dd0f221aff4525a2be4c346ee60aab", - "sha256:f22ac1c3cac4dbc50079e965eba2c1058622631e526bd9afd45fedd49ba781fa", - "sha256:faca3bdcf85b2fc05d06ff3fbc1f83e1391b3e724afa3feba7d13eeab355484c", - "sha256:fca0e3a251908a499833aa292323f32437106001d436eca0e6e7833256674585", - "sha256:fd1592b3fdf65fff2ad0004b5e363300ef59ced41c2e6b3a99d4089fa8c5435d", - "sha256:fd66fc5d0da6d9815ba2cebeb4205f95818ff4b79c3ebe268e75d961704af52f" + "sha256:01179a4a8559ab5de078078f37e5c1a30d76bb88519906844fd7bdea1b7729ff", + "sha256:0833f8694549e586547b576dcfaba4a6b55b9e96098b36cdc7ebefe667dfed48", + "sha256:0a9a2848a5b7feac301353437eb7d5957887edbf81d56e903999a75a3d743086", + "sha256:0b69e4ce7a131fe56b7e4d770c67429700908fc0752af059838b1cfb41960e4e", + "sha256:0ffe8360bab4910ef1b9e87fb812d8bc0a308b0d0eef8c8f44e0254ab3b07133", + "sha256:11d8f3dd2b9c1207dcaf2ee0bbbfd5991f571186ec9cc78427ba5bd32afae4b5", + "sha256:17e311b6c678207928d649faa7cb0d7b4c26a0ba73d41e99c4fff6b6c3276484", + "sha256:1e2120ef853f59c7419231f3bf4e7021f1b936f6ebd222406c3b60212205d2ee", + "sha256:1f71ea527786de97d1a0cc0eacd1defc0985dcf6b3f17bb77dcfc8c34bec4dc5", + "sha256:23502f431948090f597378482b4812b0caae32c22213aecf3b55325e049a6c68", + "sha256:24471b829b3bf607e04e88d79542a9d48bb037c2267d7927a874e6c205ca7e9a", + "sha256:29717114e51c84ddfba879543fb232a6ed60086602313ca38cce623c1d62cfbf", + "sha256:2e99c6826ffa974fe6e27cdb5ed0021786b03fc98e5ee3c5bfe1fd5015f42b99", + "sha256:39693e1f8320ae4f43943590b49779ffb98acb81f788220ea932a6b6c51004d8", + "sha256:3ad2a3decf9aaba3d29c8f537ac4b243e36bef957511b4766cb0057d32b0be85", + "sha256:3b1fdb9dc17f5a7677423d508ab4f243a726dea51fa5e70992e59a7411c89d19", + "sha256:41e4e3953a79407c794916fa277a82531dd93aad34e29c2a514c2c0c5fe971cc", + "sha256:43fa96a3ca0d6b1812e01ced1044a003533c47f6ee8aca31724f78e93ccc089a", + "sha256:50187695423ffe49e2deacb8cd10510bc361faac997de9efef88badc3bb9e2d1", + "sha256:5ac9328ec4831237bec75defaf839f7d4564be1e6b25ac710bd1a96321cc8317", + "sha256:5d225db5a45f21e78dd9358e58a98702a0302f2659a3c6cd320564b75b86f47c", + "sha256:6395c297d42274772abc367baaa79683958044e5d3835486c16da75d2a694631", + "sha256:688ba32a1cffef67fd2e9398a2efebaea461578b0923624778664cc1c914db5d", + "sha256:68ccc6023a3400877818152ad9a1033e3db8625d899c72eacb5a668902e4d652", + "sha256:70b189594dbe54f75ab3a1acec5f1e3faa7e8cf2f1e08d9b561cb41b845f69d5", + "sha256:797b4f722ffa07cc8d62053e4cff1486fa6dc094105d13fea7b1de7d8bf71c9e", + "sha256:7c36280e6fb8385e520936c3cb3b8042851904eba0e58d277dca80a5cfed590b", + "sha256:7e7401d0de89a9a855c839bc697c079a4af81cf878373abd7dc625847d25cbd8", + "sha256:80bab7bfc629882493af4aa31a4cfa43a4c57c83813253626916b8c7ada83476", + "sha256:82d09873e40955485746739bcb8b4586983670466c23382c19cffecbf1fd8706", + "sha256:8388ee1976c416731879ac16da0aff3f63b286ffdd57cdeb95f3f2e085687563", + "sha256:8824b5a04a04a047e72eea5cec3bc266db09e35de6bdfe34c9436ac5ee27d237", + "sha256:8b9c7197f7cb2738065c481a0461e50ad02f18c78cd75775628afb4d7137fb3b", + "sha256:9056c1ecd25795207ad294bcf39f2db3d845767be0ea6e6a34d856f006006083", + "sha256:936d68689298c36b53b29f23c6dbb74de12b4ac12ca6cfe0e047bedceea56180", + "sha256:9b22676e8097e9e22e36d6b7bda33190d0d400f345f23d4065d48f4ca7ae0425", + "sha256:a4d3091415f010369ae4ed1fc6b79def9416358877534caf6a0fdd2146c87a3e", + "sha256:a8786accb172bd8afb8be14490a16625cbc387036876ab6ba70912730faf8e1f", + "sha256:a9f8c2e67970f13b16084e04f134610fd1d374bf477b17ec1599185cf611d725", + "sha256:bc2fa7c6b47d6bc618dd7fb02ef6fdedb1090ec036abab80d4681424b84c1183", + "sha256:c70c95198c015b85feafc136515252a261a84561b7b1d51e3384e0655ddf25ab", + "sha256:cc1c1159b3d456576af7a3e4d1ba7e6924cb39de8f67111c735f6fc832082774", + "sha256:ce826d6ef20b1bc864f0a68340c8b3287705cae2f8b4b1d932177dcc76721725", + "sha256:d584d9ec91ad65861cc08d42e834324ef890a082e591037abe114850ff7bbc3e", + "sha256:d7fded462629cfa4b685c5416b949ebad6cec74af5e2d42905d41e257e0869f5", + "sha256:d84a1718ee396f54f3a086ea0a66d8e552b2ab2017ef8b420e92edbc841c352d", + "sha256:d8e03406cac8513435335dbab54c0d385e4a49e4945d2909a581c83647ca0290", + "sha256:e10ce637b18caea04431ce14fabcf5c64a1c61ec9c56b071a4b7ca131ca52d44", + "sha256:ec031d5d2feb36d1d1a24380e4db6d43695f3748343d99434e6f5f9156aaa2ed", + "sha256:ef6107725bd54b262d6dedcc2af448a266975032bc85ef0172c5f059da6325b4", + "sha256:efdca5630322a10774e8e98e1af481aad470dd62c3170801852d752aa7a783ba", + "sha256:f753120cb8181e736c57ef7636e83f31b9c0d1722c516f7e86cf15b7aa57ff12", + "sha256:ff3824dc5261f50c9b0dfb3be22b4567a6f938ccce4587b38952d85fd9e9afe4" ], - "markers": "python_version >= '3.6'", - "version": "==6.0.1" + "markers": "python_version >= '3.8'", + "version": "==6.0.2" }, "requests": { "hashes": [ @@ -410,11 +427,11 @@ }, "urllib3": { "hashes": [ - "sha256:a448b2f64d686155468037e1ace9f2d2199776e17f0a46610480d311f73e3472", - "sha256:dd505485549a7a552833da5e6063639d0d177c04f23bc3864e41e5dc5f612168" + "sha256:ca899ca043dcb1bafa3e262d73aa25c465bfb49e0bd9dd5d59f1d0acba2f8fac", + "sha256:e7d814a81dad81e6caf2ec9fdedb284ecc9c73076b62654547cc64ccdcae26e9" ], "markers": "python_version >= '3.8'", - "version": "==2.2.2" + "version": "==2.2.3" } }, "develop": { @@ -435,11 +452,11 @@ }, "executing": { "hashes": [ - "sha256:35afe2ce3affba8ee97f2d69927fa823b08b472b7b994e36a52a964b93d16147", - "sha256:eac49ca94516ccc753f9fb5ce82603156e590b27525a8bc32cce8ae302eb61bc" + "sha256:8d63781349375b5ebccc3142f4b30350c0cd9c79f921cde38be2be4637e98eaf", + "sha256:8ea27ddd260da8150fa5a708269c4a10e76161e2496ec3e587da9e3c0fe4b9ab" ], - "markers": "python_version >= '3.5'", - "version": "==2.0.1" + "markers": "python_version >= '3.8'", + "version": "==2.1.0" }, "gnureadline": { "hashes": [ @@ -485,11 +502,11 @@ }, "ipython": { "hashes": [ - "sha256:1cec0fbba8404af13facebe83d04436a7434c7400e59f47acf467c64abd0956c", - "sha256:e6b347c27bdf9c32ee9d31ae85defc525755a1869f14057e900675b9e8d6e6ff" + "sha256:0d0d15ca1e01faeb868ef56bc7ee5a0de5bd66885735682e8a322ae289a13d1a", + "sha256:530ef1e7bb693724d3cdc37287c80b07ad9b25986c007a53aa1857272dac3f35" ], "markers": "python_version >= '3.11'", - "version": "==8.26.0" + "version": "==8.28.0" }, "jedi": { "hashes": [ @@ -525,11 +542,11 @@ }, "prompt-toolkit": { "hashes": [ - "sha256:0d7bfa67001d5e39d02c224b663abc33687405033a8c422d0d675a5a13361d10", - "sha256:1e1b29cb58080b1e69f207c893a1a7bf16d127a5c30c9d17a25a5d77792e5360" + "sha256:d6623ab0477a80df74e646bdbc93621143f5caf104206aa29294d53de1a03d90", + "sha256:f49a827f90062e411f1ce1f854f2aedb3c23353244f8108b89283587397ac10e" ], "markers": "python_full_version >= '3.7.0'", - "version": "==3.0.47" + "version": "==3.0.48" }, "ptyprocess": { "hashes": [ diff --git a/go.mod b/go.mod index ec8566db84a..f4bd9379a2d 100644 --- a/go.mod +++ b/go.mod @@ -1,6 +1,6 @@ module github.com/crowdsecurity/crowdsec -go 1.22 +go 1.23.3 // Don't use the toolchain directive to avoid uncontrolled downloads during // a build, especially in sandboxed environments (freebsd, gentoo...). @@ -16,12 +16,12 @@ require ( github.com/appleboy/gin-jwt/v2 v2.9.2 github.com/aws/aws-lambda-go v1.47.0 github.com/aws/aws-sdk-go v1.52.0 - github.com/beevik/etree v1.3.0 + github.com/beevik/etree v1.4.1 github.com/blackfireio/osinfo v1.0.5 github.com/bluele/gcache v0.0.2 github.com/buger/jsonparser v1.1.1 github.com/c-robinson/iplib v1.0.8 - github.com/cespare/xxhash/v2 v2.2.0 + github.com/cespare/xxhash/v2 v2.3.0 github.com/corazawaf/libinjection-go v0.1.2 github.com/crowdsecurity/coraza/v3 v3.0.0-20240108124027-a62b8d8e5607 github.com/crowdsecurity/dlog v0.0.0-20170105205344-4fb5f8204f26 @@ -82,12 +82,12 @@ require ( github.com/umahmood/haversine v0.0.0-20151105152445-808ab04add26 github.com/wasilibs/go-re2 v1.7.0 github.com/xhit/go-simple-mail/v2 v2.16.0 - golang.org/x/crypto v0.22.0 - golang.org/x/mod v0.15.0 + golang.org/x/crypto v0.26.0 + golang.org/x/mod v0.17.0 golang.org/x/sys v0.24.0 - golang.org/x/text v0.14.0 - google.golang.org/grpc v1.56.3 - google.golang.org/protobuf v1.33.0 + golang.org/x/text v0.17.0 + google.golang.org/grpc v1.67.1 + google.golang.org/protobuf v1.34.2 gopkg.in/natefinch/lumberjack.v2 v2.2.1 gopkg.in/tomb.v2 v2.0.0-20161208151619-d5d1b5820637 gopkg.in/yaml.v2 v2.4.0 @@ -128,7 +128,7 @@ require ( github.com/go-stack/stack v1.8.0 // indirect github.com/goccy/go-json v0.10.2 // indirect github.com/gogo/protobuf v1.3.2 // indirect - github.com/golang/glog v1.1.0 // indirect + github.com/golang/glog v1.2.2 // indirect github.com/golang/protobuf v1.5.3 // indirect github.com/google/go-cmp v0.6.0 // indirect github.com/google/gofuzz v1.2.0 // indirect @@ -201,14 +201,14 @@ require ( go.mongodb.org/mongo-driver v1.9.4 // indirect go.uber.org/atomic v1.10.0 // indirect golang.org/x/arch v0.7.0 // indirect - golang.org/x/net v0.24.0 // indirect - golang.org/x/sync v0.6.0 // indirect - golang.org/x/term v0.19.0 // indirect + golang.org/x/net v0.28.0 // indirect + golang.org/x/sync v0.8.0 // indirect + golang.org/x/term v0.23.0 // indirect golang.org/x/time v0.3.0 // indirect - golang.org/x/tools v0.18.0 // indirect + golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d // indirect golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 // indirect google.golang.org/appengine v1.6.7 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234030-28d5490b6b19 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20240814211410-ddb44dafa142 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 // indirect gotest.tools/v3 v3.5.0 // indirect diff --git a/go.sum b/go.sum index ff73dc56332..b2bd77c9915 100644 --- a/go.sum +++ b/go.sum @@ -58,6 +58,8 @@ github.com/aws/aws-sdk-go v1.52.0 h1:ptgek/4B2v/ljsjYSEvLQ8LTD+SQyrqhOOWvHc/VGPI github.com/aws/aws-sdk-go v1.52.0/go.mod h1:LF8svs817+Nz+DmiMQKTO3ubZ/6IaTpq3TjupRn3Eqk= github.com/beevik/etree v1.3.0 h1:hQTc+pylzIKDb23yYprodCWWTt+ojFfUZyzU09a/hmU= github.com/beevik/etree v1.3.0/go.mod h1:aiPf89g/1k3AShMVAzriilpcE4R/Vuor90y83zVZWFc= +github.com/beevik/etree v1.4.1 h1:PmQJDDYahBGNKDcpdX8uPy1xRCwoCGVUiW669MEirVI= +github.com/beevik/etree v1.4.1/go.mod h1:gPNJNaBGVZ9AwsidazFZyygnd+0pAU38N4D+WemwKNs= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= @@ -74,8 +76,8 @@ github.com/bytedance/sonic v1.10.2 h1:GQebETVBxYB7JGWJtLBi07OVzWwt+8dWA00gEVW2ZF github.com/bytedance/sonic v1.10.2/go.mod h1:iZcSUejdk5aukTND/Eu/ivjQuEL0Cu9/rf50Hi0u/g4= github.com/c-robinson/iplib v1.0.8 h1:exDRViDyL9UBLcfmlxxkY5odWX5092nPsQIykHXhIn4= github.com/c-robinson/iplib v1.0.8/go.mod h1:i3LuuFL1hRT5gFpBRnEydzw8R6yhGkF4szNDIbF8pgo= -github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= -github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= +github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/chenzhuoyu/base64x v0.0.0-20211019084208-fb5309c8db06/go.mod h1:DH46F32mSOjUmXrMHnKwZdA8wcEefY7UVqBKYGjpdQY= github.com/chenzhuoyu/base64x v0.0.0-20221115062448-fe3a3abad311/go.mod h1:b583jCggY9gE99b6G5LEC39OIiVsWj+R97kbl5odCEk= github.com/chenzhuoyu/base64x v0.0.0-20230717121745-296ad89f973d h1:77cEq6EriyTZ0g/qfRdp61a3Uu/AWrgIq2s0ClJV1g0= @@ -294,8 +296,8 @@ github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/golang-jwt/jwt/v4 v4.5.0 h1:7cYmW1XlMY7h7ii7UhUyChSgS5wUJEnm9uZVTGqOWzg= github.com/golang-jwt/jwt/v4 v4.5.0/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= -github.com/golang/glog v1.1.0 h1:/d3pCKDPWNnvIWe0vVUpNP32qc8U3PDVxySP/y360qE= -github.com/golang/glog v1.1.0/go.mod h1:pfYeQZ3JWZoXTV5sFc986z3HTpwQs9At6P4ImfuP3NQ= +github.com/golang/glog v1.2.2 h1:1+mZ9upx1Dh6FmUTFR1naJ77miKiXgALjWOZ3NVFPmY= +github.com/golang/glog v1.2.2/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwmO+w= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= @@ -763,8 +765,8 @@ golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97/go.mod h1:GvvjBRRGRdwPK5y golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.3.0/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4= golang.org/x/crypto v0.14.0/go.mod h1:MVFd36DqK4CsrnJYDkBA3VC4m2GkXAM0PvzMCn4JQf4= -golang.org/x/crypto v0.22.0 h1:g1v0xeRhjcugydODzvb3mEM9SQ0HGp9s/nh3COQ/C30= -golang.org/x/crypto v0.22.0/go.mod h1:vr6Su+7cTlO45qkww3VDJlzDn0ctJvRgYbC2NvXHt+M= +golang.org/x/crypto v0.26.0 h1:RrRspgV4mU+YwB4FYnuBoKsUapNIL5cohGAmSH3azsw= +golang.org/x/crypto v0.26.0/go.mod h1:GY7jblb9wI+FOo5y8/S2oY4zWP07AkOJ4+jxCqdqn54= golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= @@ -772,8 +774,8 @@ golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= -golang.org/x/mod v0.15.0 h1:SernR4v+D55NyBH2QiEQrlBAnj1ECL6AGrA5+dPaMY8= -golang.org/x/mod v0.15.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/mod v0.17.0 h1:zY54UmvipHiNd+pm+m0x9KhZ9hl1/7QNMyxXbc6ICqA= +golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/net v0.0.0-20181005035420-146acd28ed58/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= @@ -797,8 +799,8 @@ golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY= golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE= -golang.org/x/net v0.24.0 h1:1PcaxkF854Fu3+lvBIx5SYn9wRlBzzcnHZSiaFFAb0w= -golang.org/x/net v0.24.0/go.mod h1:2Q7sJY5mzlzWjKtYUEXSlBWCdyaioyXzRB2RtU8KVE8= +golang.org/x/net v0.28.0 h1:a9JDOJc5GMUJ0+UDqmLT86WiEy7iWyIhz8gz8E4e5hE= +golang.org/x/net v0.28.0/go.mod h1:yqtgsTWOOnlGLG9GFRrK3++bGOUEkNBoHZc8MEDWPNg= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -808,8 +810,8 @@ golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.6.0 h1:5BMeUDZ7vkXGfEr1x9B4bRcTH4lpkTkpdh0T/J+qjbQ= -golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.8.0 h1:3NFvSEYkUoMifnESzZl15y791HH1qU2xm6eCJU5ZPXQ= +golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -854,8 +856,8 @@ golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= golang.org/x/term v0.13.0/go.mod h1:LTmsnFJwVN6bCy1rVCoS+qHT1HhALEFxKncY3WNNh4U= -golang.org/x/term v0.19.0 h1:+ThwsDv+tYfnJFhF4L8jITxu1tdTWRTZpdsWgEgjL6Q= -golang.org/x/term v0.19.0/go.mod h1:2CuTdWZ7KHSQwUzKva0cbMg6q2DMI3Mmxp+gKJbskEk= +golang.org/x/term v0.23.0 h1:F6D4vR+EHoL9/sWAWgAR1H2DcHr4PareCbAaCo1RpuU= +golang.org/x/term v0.23.0/go.mod h1:DgV24QBUrK6jhZXl+20l6UWznPlwAHm1Q1mGHtydmSk= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= @@ -868,8 +870,8 @@ golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= -golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= -golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= +golang.org/x/text v0.17.0 h1:XtiM5bkSOt+ewxlOE/aE/AKEHibwj/6gvWMl9Rsh0Qc= +golang.org/x/text v0.17.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4= golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -893,8 +895,8 @@ golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roY golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= -golang.org/x/tools v0.18.0 h1:k8NLag8AGHnn+PHbl7g43CtqZAwG60vZkLqgyZgIHgQ= -golang.org/x/tools v0.18.0/go.mod h1:GL7B4CwcLLeo59yx/9UWWuNOW1n3VZ4f5axWfML7Lcg= +golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d h1:vU5i/LfpvrRCpgM/VPfJLg5KjxD3E+hfT1SH+d9zLwg= +golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk= golang.org/x/xerrors v0.0.0-20190410155217-1f06c39b4373/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20190513163551-3ee3066db522/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -906,14 +908,14 @@ google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCID google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c= google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234030-28d5490b6b19 h1:0nDDozoAU19Qb2HwhXadU8OcsiO/09cnTqhUtq2MEOM= -google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234030-28d5490b6b19/go.mod h1:66JfowdXAEgad5O9NnYcsNPLCPZJD++2L9X0PCMODrA= -google.golang.org/grpc v1.56.3 h1:8I4C0Yq1EjstUzUJzpcRVbuYA2mODtEmpWiQoN/b2nc= -google.golang.org/grpc v1.56.3/go.mod h1:I9bI3vqKfayGqPUAwGdOSu7kt6oIJLixfffKrpXqQ9s= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240814211410-ddb44dafa142 h1:e7S5W7MGGLaSu8j3YjdezkZ+m1/Nm0uRVRMEMGk26Xs= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240814211410-ddb44dafa142/go.mod h1:UqMtugtsSgubUsoxbuAoiCXvqvErP7Gf0so0mK9tHxU= +google.golang.org/grpc v1.67.1 h1:zWnc1Vrcno+lHZCOofnIMvycFcc0QRGIzm9dhnDX68E= +google.golang.org/grpc v1.67.1/go.mod h1:1gLDyUQU7CTLJI90u3nXZ9ekeghjeM7pTDZlqFNg2AA= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI= -google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= +google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg= +google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= diff --git a/pkg/acquisition/acquisition.go b/pkg/acquisition/acquisition.go index 634557021f1..ef5a413b91f 100644 --- a/pkg/acquisition/acquisition.go +++ b/pkg/acquisition/acquisition.go @@ -1,6 +1,7 @@ package acquisition import ( + "context" "errors" "fmt" "io" @@ -18,19 +19,8 @@ import ( "github.com/crowdsecurity/go-cs-lib/trace" "github.com/crowdsecurity/crowdsec/pkg/acquisition/configuration" - appsecacquisition "github.com/crowdsecurity/crowdsec/pkg/acquisition/modules/appsec" - cloudwatchacquisition "github.com/crowdsecurity/crowdsec/pkg/acquisition/modules/cloudwatch" - dockeracquisition "github.com/crowdsecurity/crowdsec/pkg/acquisition/modules/docker" - fileacquisition "github.com/crowdsecurity/crowdsec/pkg/acquisition/modules/file" - journalctlacquisition "github.com/crowdsecurity/crowdsec/pkg/acquisition/modules/journalctl" - kafkaacquisition "github.com/crowdsecurity/crowdsec/pkg/acquisition/modules/kafka" - kinesisacquisition "github.com/crowdsecurity/crowdsec/pkg/acquisition/modules/kinesis" - k8sauditacquisition "github.com/crowdsecurity/crowdsec/pkg/acquisition/modules/kubernetesaudit" - lokiacquisition "github.com/crowdsecurity/crowdsec/pkg/acquisition/modules/loki" - s3acquisition "github.com/crowdsecurity/crowdsec/pkg/acquisition/modules/s3" - syslogacquisition "github.com/crowdsecurity/crowdsec/pkg/acquisition/modules/syslog" - wineventlogacquisition "github.com/crowdsecurity/crowdsec/pkg/acquisition/modules/wineventlog" "github.com/crowdsecurity/crowdsec/pkg/csconfig" + "github.com/crowdsecurity/crowdsec/pkg/cwversion/component" "github.com/crowdsecurity/crowdsec/pkg/exprhelpers" "github.com/crowdsecurity/crowdsec/pkg/types" ) @@ -50,43 +40,76 @@ func (e *DataSourceUnavailableError) Unwrap() error { // The interface each datasource must implement type DataSource interface { - GetMetrics() []prometheus.Collector // Returns pointers to metrics that are managed by the module - GetAggregMetrics() []prometheus.Collector // Returns pointers to metrics that are managed by the module (aggregated mode, limits cardinality) - UnmarshalConfig([]byte) error // Decode and pre-validate the YAML datasource - anything that can be checked before runtime - Configure([]byte, *log.Entry, int) error // Complete the YAML datasource configuration and perform runtime checks. - ConfigureByDSN(string, map[string]string, *log.Entry, string) error // Configure the datasource - GetMode() string // Get the mode (TAIL, CAT or SERVER) - GetName() string // Get the name of the module - OneShotAcquisition(chan types.Event, *tomb.Tomb) error // Start one shot acquisition(eg, cat a file) - StreamingAcquisition(chan types.Event, *tomb.Tomb) error // Start live acquisition (eg, tail a file) - CanRun() error // Whether the datasource can run or not (eg, journalctl on BSD is a non-sense) - GetUuid() string // Get the unique identifier of the datasource + GetMetrics() []prometheus.Collector // Returns pointers to metrics that are managed by the module + GetAggregMetrics() []prometheus.Collector // Returns pointers to metrics that are managed by the module (aggregated mode, limits cardinality) + UnmarshalConfig([]byte) error // Decode and pre-validate the YAML datasource - anything that can be checked before runtime + Configure([]byte, *log.Entry, int) error // Complete the YAML datasource configuration and perform runtime checks. + ConfigureByDSN(string, map[string]string, *log.Entry, string) error // Configure the datasource + GetMode() string // Get the mode (TAIL, CAT or SERVER) + GetName() string // Get the name of the module + OneShotAcquisition(context.Context, chan types.Event, *tomb.Tomb) error // Start one shot acquisition(eg, cat a file) + StreamingAcquisition(context.Context, chan types.Event, *tomb.Tomb) error // Start live acquisition (eg, tail a file) + CanRun() error // Whether the datasource can run or not (eg, journalctl on BSD is a non-sense) + GetUuid() string // Get the unique identifier of the datasource Dump() interface{} } -var AcquisitionSources = map[string]func() DataSource{ - "file": func() DataSource { return &fileacquisition.FileSource{} }, - "journalctl": func() DataSource { return &journalctlacquisition.JournalCtlSource{} }, - "cloudwatch": func() DataSource { return &cloudwatchacquisition.CloudwatchSource{} }, - "syslog": func() DataSource { return &syslogacquisition.SyslogSource{} }, - "docker": func() DataSource { return &dockeracquisition.DockerSource{} }, - "kinesis": func() DataSource { return &kinesisacquisition.KinesisSource{} }, - "wineventlog": func() DataSource { return &wineventlogacquisition.WinEventLogSource{} }, - "kafka": func() DataSource { return &kafkaacquisition.KafkaSource{} }, - "k8s-audit": func() DataSource { return &k8sauditacquisition.KubernetesAuditSource{} }, - "loki": func() DataSource { return &lokiacquisition.LokiSource{} }, - "s3": func() DataSource { return &s3acquisition.S3Source{} }, - "appsec": func() DataSource { return &appsecacquisition.AppsecSource{} }, +var ( + // We declare everything here so we can tell if they are unsupported, or excluded from the build + AcquisitionSources = map[string]func() DataSource{} + transformRuntimes = map[string]*vm.Program{} +) + +func GetDataSourceIface(dataSourceType string) (DataSource, error) { + source, registered := AcquisitionSources[dataSourceType] + if registered { + return source(), nil + } + + built, known := component.Built["datasource_"+dataSourceType] + + if !known { + return nil, fmt.Errorf("unknown data source %s", dataSourceType) + } + + if built { + panic("datasource " + dataSourceType + " is built but not registered") + } + + return nil, fmt.Errorf("data source %s is not built in this version of crowdsec", dataSourceType) } -var transformRuntimes = map[string]*vm.Program{} +// registerDataSource registers a datasource in the AcquisitionSources map. +// It must be called in the init() function of the datasource package, and the datasource name +// must be declared with a nil value in the map, to allow for conditional compilation. +func registerDataSource(dataSourceType string, dsGetter func() DataSource) { + component.Register("datasource_" + dataSourceType) -func GetDataSourceIface(dataSourceType string) DataSource { - source := AcquisitionSources[dataSourceType] - if source == nil { - return nil + AcquisitionSources[dataSourceType] = dsGetter +} + +// setupLogger creates a logger for the datasource to use at runtime. +func setupLogger(source, name string, level *log.Level) (*log.Entry, error) { + clog := log.New() + if err := types.ConfigureLogger(clog); err != nil { + return nil, fmt.Errorf("while configuring datasource logger: %w", err) + } + + if level != nil { + clog.SetLevel(*level) } - return source() + + fields := log.Fields{ + "type": source, + } + + if name != "" { + fields["name"] = name + } + + subLogger := clog.WithFields(fields) + + return subLogger, nil } // DataSourceConfigure creates and returns a DataSource object from a configuration, @@ -98,35 +121,29 @@ func DataSourceConfigure(commonConfig configuration.DataSourceCommonCfg, metrics // once to DataSourceCommonCfg, and then later to the dedicated type of the datasource yamlConfig, err := yaml.Marshal(commonConfig) if err != nil { - return nil, fmt.Errorf("unable to marshal back interface: %w", err) + return nil, fmt.Errorf("unable to serialize back interface: %w", err) } - if dataSrc := GetDataSourceIface(commonConfig.Source); dataSrc != nil { - /* this logger will then be used by the datasource at runtime */ - clog := log.New() - if err := types.ConfigureLogger(clog); err != nil { - return nil, fmt.Errorf("while configuring datasource logger: %w", err) - } - if commonConfig.LogLevel != nil { - clog.SetLevel(*commonConfig.LogLevel) - } - customLog := log.Fields{ - "type": commonConfig.Source, - } - if commonConfig.Name != "" { - customLog["name"] = commonConfig.Name - } - subLogger := clog.WithFields(customLog) - /* check eventual dependencies are satisfied (ie. journald will check journalctl availability) */ - if err := dataSrc.CanRun(); err != nil { - return nil, &DataSourceUnavailableError{Name: commonConfig.Source, Err: err} - } - /* configure the actual datasource */ - if err := dataSrc.Configure(yamlConfig, subLogger, metricsLevel); err != nil { - return nil, fmt.Errorf("failed to configure datasource %s: %w", commonConfig.Source, err) - } - return &dataSrc, nil + + dataSrc, err := GetDataSourceIface(commonConfig.Source) + if err != nil { + return nil, err } - return nil, fmt.Errorf("cannot find source %s", commonConfig.Source) + + subLogger, err := setupLogger(commonConfig.Source, commonConfig.Name, commonConfig.LogLevel) + if err != nil { + return nil, err + } + + /* check eventual dependencies are satisfied (ie. journald will check journalctl availability) */ + if err := dataSrc.CanRun(); err != nil { + return nil, &DataSourceUnavailableError{Name: commonConfig.Source, Err: err} + } + /* configure the actual datasource */ + if err := dataSrc.Configure(yamlConfig, subLogger, metricsLevel); err != nil { + return nil, fmt.Errorf("failed to configure datasource %s: %w", commonConfig.Source, err) + } + + return &dataSrc, nil } // detectBackwardCompatAcquis: try to magically detect the type for backward compat (type was not mandatory then) @@ -134,12 +151,15 @@ func detectBackwardCompatAcquis(sub configuration.DataSourceCommonCfg) string { if _, ok := sub.Config["filename"]; ok { return "file" } + if _, ok := sub.Config["filenames"]; ok { return "file" } + if _, ok := sub.Config["journalctl_filter"]; ok { return "journalctl" } + return "" } @@ -150,29 +170,35 @@ func LoadAcquisitionFromDSN(dsn string, labels map[string]string, transformExpr if len(frags) == 1 { return nil, fmt.Errorf("%s isn't valid dsn (no protocol)", dsn) } - dataSrc := GetDataSourceIface(frags[0]) - if dataSrc == nil { - return nil, fmt.Errorf("no acquisition for protocol %s://", frags[0]) + + dataSrc, err := GetDataSourceIface(frags[0]) + if err != nil { + return nil, fmt.Errorf("no acquisition for protocol %s:// - %w", frags[0], err) } - /* this logger will then be used by the datasource at runtime */ - clog := log.New() - if err := types.ConfigureLogger(clog); err != nil { - return nil, fmt.Errorf("while configuring datasource logger: %w", err) + + subLogger, err := setupLogger(dsn, "", nil) + if err != nil { + return nil, err } - subLogger := clog.WithField("type", dsn) + uniqueId := uuid.NewString() + if transformExpr != "" { vm, err := expr.Compile(transformExpr, exprhelpers.GetExprOptions(map[string]interface{}{"evt": &types.Event{}})...) if err != nil { return nil, fmt.Errorf("while compiling transform expression '%s': %w", transformExpr, err) } + transformRuntimes[uniqueId] = vm } - err := dataSrc.ConfigureByDSN(dsn, labels, subLogger, uniqueId) + + err = dataSrc.ConfigureByDSN(dsn, labels, subLogger, uniqueId) if err != nil { return nil, fmt.Errorf("while configuration datasource for %s: %w", dsn, err) } + sources = append(sources, dataSrc) + return sources, nil } @@ -180,9 +206,11 @@ func GetMetricsLevelFromPromCfg(prom *csconfig.PrometheusCfg) int { if prom == nil { return configuration.METRICS_FULL } + if !prom.Enabled { return configuration.METRICS_NONE } + if prom.Level == configuration.CFG_METRICS_AGGREGATE { return configuration.METRICS_AGGREGATE } @@ -190,6 +218,7 @@ func GetMetricsLevelFromPromCfg(prom *csconfig.PrometheusCfg) int { if prom.Level == configuration.CFG_METRICS_FULL { return configuration.METRICS_FULL } + return configuration.METRICS_FULL } @@ -198,50 +227,66 @@ func LoadAcquisitionFromFile(config *csconfig.CrowdsecServiceCfg, prom *csconfig var sources []DataSource metrics_level := GetMetricsLevelFromPromCfg(prom) + for _, acquisFile := range config.AcquisitionFiles { log.Infof("loading acquisition file : %s", acquisFile) + yamlFile, err := os.Open(acquisFile) if err != nil { return nil, err } + dec := yaml.NewDecoder(yamlFile) dec.SetStrict(true) + idx := -1 + for { var sub configuration.DataSourceCommonCfg - err = dec.Decode(&sub) + idx += 1 + + err = dec.Decode(&sub) if err != nil { if !errors.Is(err, io.EOF) { return nil, fmt.Errorf("failed to yaml decode %s: %w", acquisFile, err) } + log.Tracef("End of yaml file") + break } - //for backward compat ('type' was not mandatory, detect it) + // for backward compat ('type' was not mandatory, detect it) if guessType := detectBackwardCompatAcquis(sub); guessType != "" { sub.Source = guessType } - //it's an empty item, skip it + // it's an empty item, skip it if len(sub.Labels) == 0 { if sub.Source == "" { log.Debugf("skipping empty item in %s", acquisFile) continue } + if sub.Source != "docker" { - //docker is the only source that can be empty + // docker is the only source that can be empty return nil, fmt.Errorf("missing labels in %s (position: %d)", acquisFile, idx) } } + if sub.Source == "" { return nil, fmt.Errorf("data source type is empty ('source') in %s (position: %d)", acquisFile, idx) } - if GetDataSourceIface(sub.Source) == nil { - return nil, fmt.Errorf("unknown data source %s in %s (position: %d)", sub.Source, acquisFile, idx) + + // pre-check that the source is valid + _, err := GetDataSourceIface(sub.Source) + if err != nil { + return nil, fmt.Errorf("in file %s (position: %d) - %w", acquisFile, idx, err) } + uniqueId := uuid.NewString() sub.UniqueId = uniqueId + src, err := DataSourceConfigure(sub, metrics_level) if err != nil { var dserr *DataSourceUnavailableError @@ -249,29 +294,36 @@ func LoadAcquisitionFromFile(config *csconfig.CrowdsecServiceCfg, prom *csconfig log.Error(err) continue } + return nil, fmt.Errorf("while configuring datasource of type %s from %s (position: %d): %w", sub.Source, acquisFile, idx, err) } + if sub.TransformExpr != "" { vm, err := expr.Compile(sub.TransformExpr, exprhelpers.GetExprOptions(map[string]interface{}{"evt": &types.Event{}})...) if err != nil { return nil, fmt.Errorf("while compiling transform expression '%s' for datasource %s in %s (position: %d): %w", sub.TransformExpr, sub.Source, acquisFile, idx, err) } + transformRuntimes[uniqueId] = vm } + sources = append(sources, *src) } } + return sources, nil } func GetMetrics(sources []DataSource, aggregated bool) error { var metrics []prometheus.Collector - for i := range len(sources) { + + for i := range sources { if aggregated { metrics = sources[i].GetMetrics() } else { metrics = sources[i].GetAggregMetrics() } + for _, metric := range metrics { if err := prometheus.Register(metric); err != nil { if _, ok := err.(prometheus.AlreadyRegisteredError); !ok { @@ -281,12 +333,28 @@ func GetMetrics(sources []DataSource, aggregated bool) error { } } } + return nil } +// There's no need for an actual deep copy +// The event is almost empty, we are mostly interested in allocating new maps for Parsed/Meta/... +func copyEvent(evt types.Event, line string) types.Event { + evtCopy := types.MakeEvent(evt.ExpectMode == types.TIMEMACHINE, evt.Type, evt.Process) + evtCopy.Line = evt.Line + evtCopy.Line.Raw = line + evtCopy.Line.Labels = make(map[string]string) + for k, v := range evt.Line.Labels { + evtCopy.Line.Labels[k] = v + } + + return evtCopy +} + func transform(transformChan chan types.Event, output chan types.Event, AcquisTomb *tomb.Tomb, transformRuntime *vm.Program, logger *log.Entry) { defer trace.CatchPanic("crowdsec/acquis") logger.Infof("transformer started") + for { select { case <-AcquisTomb.Dying(): @@ -294,22 +362,25 @@ func transform(transformChan chan types.Event, output chan types.Event, AcquisTo return case evt := <-transformChan: logger.Tracef("Received event %s", evt.Line.Raw) + out, err := expr.Run(transformRuntime, map[string]interface{}{"evt": &evt}) if err != nil { logger.Errorf("while running transform expression: %s, sending event as-is", err) output <- evt } + if out == nil { logger.Errorf("transform expression returned nil, sending event as-is") output <- evt } + switch v := out.(type) { case string: logger.Tracef("transform expression returned %s", v) - evt.Line.Raw = v - output <- evt + output <- copyEvent(evt, v) case []interface{}: logger.Tracef("transform expression returned %v", v) //nolint:asasalint // We actually want to log the slice content + for _, line := range v { l, ok := line.(string) if !ok { @@ -317,14 +388,14 @@ func transform(transformChan chan types.Event, output chan types.Event, AcquisTo output <- evt continue } - evt.Line.Raw = l - output <- evt + + output <- copyEvent(evt, l) } case []string: logger.Tracef("transform expression returned %v", v) + for _, line := range v { - evt.Line.Raw = line - output <- evt + output <- copyEvent(evt, line) } default: logger.Errorf("transform expression returned an invalid type %T, sending event as-is", out) @@ -334,49 +405,58 @@ func transform(transformChan chan types.Event, output chan types.Event, AcquisTo } } -func StartAcquisition(sources []DataSource, output chan types.Event, AcquisTomb *tomb.Tomb) error { +func StartAcquisition(ctx context.Context, sources []DataSource, output chan types.Event, AcquisTomb *tomb.Tomb) error { // Don't wait if we have no sources, as it will hang forever if len(sources) == 0 { return nil } - for i := range len(sources) { - subsrc := sources[i] //ensure its a copy + for i := range sources { + subsrc := sources[i] // ensure its a copy log.Debugf("starting one source %d/%d ->> %T", i, len(sources), subsrc) AcquisTomb.Go(func() error { defer trace.CatchPanic("crowdsec/acquis") + var err error outChan := output + log.Debugf("datasource %s UUID: %s", subsrc.GetName(), subsrc.GetUuid()) + if transformRuntime, ok := transformRuntimes[subsrc.GetUuid()]; ok { log.Infof("transform expression found for datasource %s", subsrc.GetName()) + transformChan := make(chan types.Event) outChan = transformChan transformLogger := log.WithFields(log.Fields{ "component": "transform", "datasource": subsrc.GetName(), }) + AcquisTomb.Go(func() error { transform(outChan, output, AcquisTomb, transformRuntime, transformLogger) return nil }) } + if subsrc.GetMode() == configuration.TAIL_MODE { - err = subsrc.StreamingAcquisition(outChan, AcquisTomb) + err = subsrc.StreamingAcquisition(ctx, outChan, AcquisTomb) } else { - err = subsrc.OneShotAcquisition(outChan, AcquisTomb) + err = subsrc.OneShotAcquisition(ctx, outChan, AcquisTomb) } + if err != nil { - //if one of the acqusition returns an error, we kill the others to properly shutdown + // if one of the acqusition returns an error, we kill the others to properly shutdown AcquisTomb.Kill(err) } + return nil }) } /*return only when acquisition is over (cat) or never (tail)*/ err := AcquisTomb.Wait() + return err } diff --git a/pkg/acquisition/acquisition_test.go b/pkg/acquisition/acquisition_test.go index a5eecbc20ed..dd70172cf62 100644 --- a/pkg/acquisition/acquisition_test.go +++ b/pkg/acquisition/acquisition_test.go @@ -1,6 +1,7 @@ package acquisition import ( + "context" "errors" "fmt" "strings" @@ -56,14 +57,19 @@ func (f *MockSource) Configure(cfg []byte, logger *log.Entry, metricsLevel int) return nil } -func (f *MockSource) GetMode() string { return f.Mode } -func (f *MockSource) OneShotAcquisition(chan types.Event, *tomb.Tomb) error { return nil } -func (f *MockSource) StreamingAcquisition(chan types.Event, *tomb.Tomb) error { return nil } -func (f *MockSource) CanRun() error { return nil } -func (f *MockSource) GetMetrics() []prometheus.Collector { return nil } -func (f *MockSource) GetAggregMetrics() []prometheus.Collector { return nil } -func (f *MockSource) Dump() interface{} { return f } -func (f *MockSource) GetName() string { return "mock" } +func (f *MockSource) GetMode() string { return f.Mode } +func (f *MockSource) OneShotAcquisition(context.Context, chan types.Event, *tomb.Tomb) error { + return nil +} + +func (f *MockSource) StreamingAcquisition(context.Context, chan types.Event, *tomb.Tomb) error { + return nil +} +func (f *MockSource) CanRun() error { return nil } +func (f *MockSource) GetMetrics() []prometheus.Collector { return nil } +func (f *MockSource) GetAggregMetrics() []prometheus.Collector { return nil } +func (f *MockSource) Dump() interface{} { return f } +func (f *MockSource) GetName() string { return "mock" } func (f *MockSource) ConfigureByDSN(string, map[string]string, *log.Entry, string) error { return errors.New("not supported") } @@ -79,13 +85,8 @@ func (f *MockSourceCantRun) GetName() string { return "mock_cant_run" } // appendMockSource is only used to add mock source for tests func appendMockSource() { - if GetDataSourceIface("mock") == nil { - AcquisitionSources["mock"] = func() DataSource { return &MockSource{} } - } - - if GetDataSourceIface("mock_cant_run") == nil { - AcquisitionSources["mock_cant_run"] = func() DataSource { return &MockSourceCantRun{} } - } + AcquisitionSources["mock"] = func() DataSource { return &MockSource{} } + AcquisitionSources["mock_cant_run"] = func() DataSource { return &MockSourceCantRun{} } } func TestDataSourceConfigure(t *testing.T) { @@ -150,7 +151,7 @@ labels: log_level: debug source: tutu `, - ExpectedError: "cannot find source tutu", + ExpectedError: "unknown data source tutu", }, { TestName: "mismatch_config", @@ -184,6 +185,7 @@ wowo: ajsajasjas yaml.Unmarshal([]byte(tc.String), &common) ds, err := DataSourceConfigure(common, configuration.METRICS_NONE) cstest.RequireErrorContains(t, err, tc.ExpectedError) + if tc.ExpectedError != "" { return } @@ -270,7 +272,7 @@ func TestLoadAcquisitionFromFile(t *testing.T) { Config: csconfig.CrowdsecServiceCfg{ AcquisitionFiles: []string{"test_files/bad_source.yaml"}, }, - ExpectedError: "unknown data source does_not_exist in test_files/bad_source.yaml", + ExpectedError: "in file test_files/bad_source.yaml (position: 0) - unknown data source does_not_exist", }, { TestName: "invalid_filetype_config", @@ -284,6 +286,7 @@ func TestLoadAcquisitionFromFile(t *testing.T) { t.Run(tc.TestName, func(t *testing.T) { dss, err := LoadAcquisitionFromFile(&tc.Config, nil) cstest.RequireErrorContains(t, err, tc.ExpectedError) + if tc.ExpectedError != "" { return } @@ -320,7 +323,7 @@ func (f *MockCat) Configure(cfg []byte, logger *log.Entry, metricsLevel int) err func (f *MockCat) UnmarshalConfig(cfg []byte) error { return nil } func (f *MockCat) GetName() string { return "mock_cat" } func (f *MockCat) GetMode() string { return "cat" } -func (f *MockCat) OneShotAcquisition(out chan types.Event, tomb *tomb.Tomb) error { +func (f *MockCat) OneShotAcquisition(ctx context.Context, out chan types.Event, tomb *tomb.Tomb) error { for range 10 { evt := types.Event{} evt.Line.Src = "test" @@ -329,7 +332,8 @@ func (f *MockCat) OneShotAcquisition(out chan types.Event, tomb *tomb.Tomb) erro return nil } -func (f *MockCat) StreamingAcquisition(chan types.Event, *tomb.Tomb) error { + +func (f *MockCat) StreamingAcquisition(context.Context, chan types.Event, *tomb.Tomb) error { return errors.New("can't run in tail") } func (f *MockCat) CanRun() error { return nil } @@ -364,15 +368,17 @@ func (f *MockTail) Configure(cfg []byte, logger *log.Entry, metricsLevel int) er func (f *MockTail) UnmarshalConfig(cfg []byte) error { return nil } func (f *MockTail) GetName() string { return "mock_tail" } func (f *MockTail) GetMode() string { return "tail" } -func (f *MockTail) OneShotAcquisition(out chan types.Event, tomb *tomb.Tomb) error { +func (f *MockTail) OneShotAcquisition(_ context.Context, _ chan types.Event, _ *tomb.Tomb) error { return errors.New("can't run in cat mode") } -func (f *MockTail) StreamingAcquisition(out chan types.Event, t *tomb.Tomb) error { + +func (f *MockTail) StreamingAcquisition(ctx context.Context, out chan types.Event, t *tomb.Tomb) error { for range 10 { evt := types.Event{} evt.Line.Src = "test" out <- evt } + <-t.Dying() return nil @@ -386,9 +392,10 @@ func (f *MockTail) ConfigureByDSN(string, map[string]string, *log.Entry, string) } func (f *MockTail) GetUuid() string { return "" } -//func StartAcquisition(sources []DataSource, output chan types.Event, AcquisTomb *tomb.Tomb) error { +// func StartAcquisition(sources []DataSource, output chan types.Event, AcquisTomb *tomb.Tomb) error { func TestStartAcquisitionCat(t *testing.T) { + ctx := context.Background() sources := []DataSource{ &MockCat{}, } @@ -396,7 +403,7 @@ func TestStartAcquisitionCat(t *testing.T) { acquisTomb := tomb.Tomb{} go func() { - if err := StartAcquisition(sources, out, &acquisTomb); err != nil { + if err := StartAcquisition(ctx, sources, out, &acquisTomb); err != nil { t.Errorf("unexpected error") } }() @@ -416,6 +423,7 @@ READLOOP: } func TestStartAcquisitionTail(t *testing.T) { + ctx := context.Background() sources := []DataSource{ &MockTail{}, } @@ -423,7 +431,7 @@ func TestStartAcquisitionTail(t *testing.T) { acquisTomb := tomb.Tomb{} go func() { - if err := StartAcquisition(sources, out, &acquisTomb); err != nil { + if err := StartAcquisition(ctx, sources, out, &acquisTomb); err != nil { t.Errorf("unexpected error") } }() @@ -450,18 +458,20 @@ type MockTailError struct { MockTail } -func (f *MockTailError) StreamingAcquisition(out chan types.Event, t *tomb.Tomb) error { +func (f *MockTailError) StreamingAcquisition(ctx context.Context, out chan types.Event, t *tomb.Tomb) error { for range 10 { evt := types.Event{} evt.Line.Src = "test" out <- evt } + t.Kill(errors.New("got error (tomb)")) return errors.New("got error") } func TestStartAcquisitionTailError(t *testing.T) { + ctx := context.Background() sources := []DataSource{ &MockTailError{}, } @@ -469,7 +479,7 @@ func TestStartAcquisitionTailError(t *testing.T) { acquisTomb := tomb.Tomb{} go func() { - if err := StartAcquisition(sources, out, &acquisTomb); err != nil && err.Error() != "got error (tomb)" { + if err := StartAcquisition(ctx, sources, out, &acquisTomb); err != nil && err.Error() != "got error (tomb)" { t.Errorf("expected error, got '%s'", err) } }() @@ -485,7 +495,7 @@ READLOOP: } } assert.Equal(t, 10, count) - //acquisTomb.Kill(nil) + // acquisTomb.Kill(nil) time.Sleep(1 * time.Second) cstest.RequireErrorContains(t, acquisTomb.Err(), "got error (tomb)") } @@ -500,14 +510,19 @@ func (f *MockSourceByDSN) UnmarshalConfig(cfg []byte) error { return nil } func (f *MockSourceByDSN) Configure(cfg []byte, logger *log.Entry, metricsLevel int) error { return nil } -func (f *MockSourceByDSN) GetMode() string { return f.Mode } -func (f *MockSourceByDSN) OneShotAcquisition(chan types.Event, *tomb.Tomb) error { return nil } -func (f *MockSourceByDSN) StreamingAcquisition(chan types.Event, *tomb.Tomb) error { return nil } -func (f *MockSourceByDSN) CanRun() error { return nil } -func (f *MockSourceByDSN) GetMetrics() []prometheus.Collector { return nil } -func (f *MockSourceByDSN) GetAggregMetrics() []prometheus.Collector { return nil } -func (f *MockSourceByDSN) Dump() interface{} { return f } -func (f *MockSourceByDSN) GetName() string { return "mockdsn" } +func (f *MockSourceByDSN) GetMode() string { return f.Mode } +func (f *MockSourceByDSN) OneShotAcquisition(context.Context, chan types.Event, *tomb.Tomb) error { + return nil +} + +func (f *MockSourceByDSN) StreamingAcquisition(context.Context, chan types.Event, *tomb.Tomb) error { + return nil +} +func (f *MockSourceByDSN) CanRun() error { return nil } +func (f *MockSourceByDSN) GetMetrics() []prometheus.Collector { return nil } +func (f *MockSourceByDSN) GetAggregMetrics() []prometheus.Collector { return nil } +func (f *MockSourceByDSN) Dump() interface{} { return f } +func (f *MockSourceByDSN) GetName() string { return "mockdsn" } func (f *MockSourceByDSN) ConfigureByDSN(dsn string, labels map[string]string, logger *log.Entry, uuid string) error { dsn = strings.TrimPrefix(dsn, "mockdsn://") if dsn != "test_expect" { @@ -542,9 +557,7 @@ func TestConfigureByDSN(t *testing.T) { }, } - if GetDataSourceIface("mockdsn") == nil { - AcquisitionSources["mockdsn"] = func() DataSource { return &MockSourceByDSN{} } - } + AcquisitionSources["mockdsn"] = func() DataSource { return &MockSourceByDSN{} } for _, tc := range tests { t.Run(tc.dsn, func(t *testing.T) { diff --git a/pkg/acquisition/appsec.go b/pkg/acquisition/appsec.go new file mode 100644 index 00000000000..81616d3d2b8 --- /dev/null +++ b/pkg/acquisition/appsec.go @@ -0,0 +1,12 @@ +//go:build !no_datasource_appsec + +package acquisition + +import ( + appsecacquisition "github.com/crowdsecurity/crowdsec/pkg/acquisition/modules/appsec" +) + +//nolint:gochecknoinits +func init() { + registerDataSource("appsec", func() DataSource { return &appsecacquisition.AppsecSource{} }) +} diff --git a/pkg/acquisition/cloudwatch.go b/pkg/acquisition/cloudwatch.go new file mode 100644 index 00000000000..e6b3d3e3e53 --- /dev/null +++ b/pkg/acquisition/cloudwatch.go @@ -0,0 +1,12 @@ +//go:build !no_datasource_cloudwatch + +package acquisition + +import ( + cloudwatchacquisition "github.com/crowdsecurity/crowdsec/pkg/acquisition/modules/cloudwatch" +) + +//nolint:gochecknoinits +func init() { + registerDataSource("cloudwatch", func() DataSource { return &cloudwatchacquisition.CloudwatchSource{} }) +} diff --git a/pkg/acquisition/docker.go b/pkg/acquisition/docker.go new file mode 100644 index 00000000000..3bf792a039a --- /dev/null +++ b/pkg/acquisition/docker.go @@ -0,0 +1,12 @@ +//go:build !no_datasource_docker + +package acquisition + +import ( + dockeracquisition "github.com/crowdsecurity/crowdsec/pkg/acquisition/modules/docker" +) + +//nolint:gochecknoinits +func init() { + registerDataSource("docker", func() DataSource { return &dockeracquisition.DockerSource{} }) +} diff --git a/pkg/acquisition/file.go b/pkg/acquisition/file.go new file mode 100644 index 00000000000..1ff2e4a3c0e --- /dev/null +++ b/pkg/acquisition/file.go @@ -0,0 +1,12 @@ +//go:build !no_datasource_file + +package acquisition + +import ( + fileacquisition "github.com/crowdsecurity/crowdsec/pkg/acquisition/modules/file" +) + +//nolint:gochecknoinits +func init() { + registerDataSource("file", func() DataSource { return &fileacquisition.FileSource{} }) +} diff --git a/pkg/acquisition/http.go b/pkg/acquisition/http.go new file mode 100644 index 00000000000..59745772b62 --- /dev/null +++ b/pkg/acquisition/http.go @@ -0,0 +1,12 @@ +//go:build !no_datasource_http + +package acquisition + +import ( + httpacquisition "github.com/crowdsecurity/crowdsec/pkg/acquisition/modules/http" +) + +//nolint:gochecknoinits +func init() { + registerDataSource("http", func() DataSource { return &httpacquisition.HTTPSource{} }) +} diff --git a/pkg/acquisition/journalctl.go b/pkg/acquisition/journalctl.go new file mode 100644 index 00000000000..691f961ae77 --- /dev/null +++ b/pkg/acquisition/journalctl.go @@ -0,0 +1,12 @@ +//go:build !no_datasource_journalctl + +package acquisition + +import ( + journalctlacquisition "github.com/crowdsecurity/crowdsec/pkg/acquisition/modules/journalctl" +) + +//nolint:gochecknoinits +func init() { + registerDataSource("journalctl", func() DataSource { return &journalctlacquisition.JournalCtlSource{} }) +} diff --git a/pkg/acquisition/k8s.go b/pkg/acquisition/k8s.go new file mode 100644 index 00000000000..cb9446be285 --- /dev/null +++ b/pkg/acquisition/k8s.go @@ -0,0 +1,12 @@ +//go:build !no_datasource_k8saudit + +package acquisition + +import ( + k8sauditacquisition "github.com/crowdsecurity/crowdsec/pkg/acquisition/modules/kubernetesaudit" +) + +//nolint:gochecknoinits +func init() { + registerDataSource("k8s-audit", func() DataSource { return &k8sauditacquisition.KubernetesAuditSource{} }) +} diff --git a/pkg/acquisition/kafka.go b/pkg/acquisition/kafka.go new file mode 100644 index 00000000000..7d315d87feb --- /dev/null +++ b/pkg/acquisition/kafka.go @@ -0,0 +1,12 @@ +//go:build !no_datasource_kafka + +package acquisition + +import ( + kafkaacquisition "github.com/crowdsecurity/crowdsec/pkg/acquisition/modules/kafka" +) + +//nolint:gochecknoinits +func init() { + registerDataSource("kafka", func() DataSource { return &kafkaacquisition.KafkaSource{} }) +} diff --git a/pkg/acquisition/kinesis.go b/pkg/acquisition/kinesis.go new file mode 100644 index 00000000000..b41372e7fb9 --- /dev/null +++ b/pkg/acquisition/kinesis.go @@ -0,0 +1,12 @@ +//go:build !no_datasource_kinesis + +package acquisition + +import ( + kinesisacquisition "github.com/crowdsecurity/crowdsec/pkg/acquisition/modules/kinesis" +) + +//nolint:gochecknoinits +func init() { + registerDataSource("kinesis", func() DataSource { return &kinesisacquisition.KinesisSource{} }) +} diff --git a/pkg/acquisition/loki.go b/pkg/acquisition/loki.go new file mode 100644 index 00000000000..1eed6686591 --- /dev/null +++ b/pkg/acquisition/loki.go @@ -0,0 +1,12 @@ +//go:build !no_datasource_loki + +package acquisition + +import ( + "github.com/crowdsecurity/crowdsec/pkg/acquisition/modules/loki" +) + +//nolint:gochecknoinits +func init() { + registerDataSource("loki", func() DataSource { return &loki.LokiSource{} }) +} diff --git a/pkg/acquisition/modules/appsec/appsec.go b/pkg/acquisition/modules/appsec/appsec.go index 5b0661a21b7..2f7861b32ff 100644 --- a/pkg/acquisition/modules/appsec/appsec.go +++ b/pkg/acquisition/modules/appsec/appsec.go @@ -41,6 +41,7 @@ type AppsecSourceConfig struct { Path string `yaml:"path"` Routines int `yaml:"routines"` AppsecConfig string `yaml:"appsec_config"` + AppsecConfigs []string `yaml:"appsec_configs"` AppsecConfigPath string `yaml:"appsec_config_path"` AuthCacheDuration *time.Duration `yaml:"auth_cache_duration"` configuration.DataSourceCommonCfg `yaml:",inline"` @@ -59,7 +60,7 @@ type AppsecSource struct { AppsecConfigs map[string]appsec.AppsecConfig lapiURL string AuthCache AuthCache - AppsecRunners []AppsecRunner //one for each go-routine + AppsecRunners []AppsecRunner // one for each go-routine } // Struct to handle cache of authentication @@ -85,6 +86,7 @@ func (ac *AuthCache) Get(apiKey string) (time.Time, bool) { ac.mu.RLock() expiration, exists := ac.APIKeys[apiKey] ac.mu.RUnlock() + return expiration, exists } @@ -120,14 +122,19 @@ func (w *AppsecSource) UnmarshalConfig(yamlConfig []byte) error { w.config.Routines = 1 } - if w.config.AppsecConfig == "" && w.config.AppsecConfigPath == "" { + if w.config.AppsecConfig == "" && w.config.AppsecConfigPath == "" && len(w.config.AppsecConfigs) == 0 { return errors.New("appsec_config or appsec_config_path must be set") } + if (w.config.AppsecConfig != "" || w.config.AppsecConfigPath != "") && len(w.config.AppsecConfigs) != 0 { + return errors.New("appsec_config and appsec_config_path are mutually exclusive with appsec_configs") + } + if w.config.Name == "" { if w.config.ListenSocket != "" && w.config.ListenAddr == "" { w.config.Name = w.config.ListenSocket } + if w.config.ListenSocket == "" { w.config.Name = fmt.Sprintf("%s%s", w.config.ListenAddr, w.config.Path) } @@ -153,6 +160,7 @@ func (w *AppsecSource) Configure(yamlConfig []byte, logger *log.Entry, MetricsLe if err != nil { return fmt.Errorf("unable to parse appsec configuration: %w", err) } + w.logger = logger w.metricsLevel = MetricsLevel w.logger.Tracef("Appsec configuration: %+v", w.config) @@ -172,7 +180,10 @@ func (w *AppsecSource) Configure(yamlConfig []byte, logger *log.Entry, MetricsLe w.InChan = make(chan appsec.ParsedRequest) appsecCfg := appsec.AppsecConfig{Logger: w.logger.WithField("component", "appsec_config")} - //let's load the associated appsec_config: + //we keep the datasource name + appsecCfg.Name = w.config.Name + + // let's load the associated appsec_config: if w.config.AppsecConfigPath != "" { err := appsecCfg.LoadByPath(w.config.AppsecConfigPath) if err != nil { @@ -183,10 +194,20 @@ func (w *AppsecSource) Configure(yamlConfig []byte, logger *log.Entry, MetricsLe if err != nil { return fmt.Errorf("unable to load appsec_config: %w", err) } + } else if len(w.config.AppsecConfigs) > 0 { + for _, appsecConfig := range w.config.AppsecConfigs { + err := appsecCfg.Load(appsecConfig) + if err != nil { + return fmt.Errorf("unable to load appsec_config: %w", err) + } + } } else { return errors.New("no appsec_config provided") } + // Now we can set up the logger + appsecCfg.SetUpLogger() + w.AppsecRuntime, err = appsecCfg.Build() if err != nil { return fmt.Errorf("unable to build appsec_config: %w", err) @@ -201,7 +222,7 @@ func (w *AppsecSource) Configure(yamlConfig []byte, logger *log.Entry, MetricsLe for nbRoutine := range w.config.Routines { appsecRunnerUUID := uuid.New().String() - //we copy AppsecRutime for each runner + // we copy AppsecRutime for each runner wrt := *w.AppsecRuntime wrt.Logger = w.logger.Dup().WithField("runner_uuid", appsecRunnerUUID) runner := AppsecRunner{ @@ -211,17 +232,20 @@ func (w *AppsecSource) Configure(yamlConfig []byte, logger *log.Entry, MetricsLe AppsecRuntime: &wrt, Labels: w.config.Labels, } + err := runner.Init(appsecCfg.GetDataDir()) if err != nil { return fmt.Errorf("unable to initialize runner: %w", err) } + w.AppsecRunners[nbRoutine] = runner } w.logger.Infof("Created %d appsec runners", len(w.AppsecRunners)) - //We don´t use the wrapper provided by coraza because we want to fully control what happens when a rule match to send the information in crowdsec + // We don´t use the wrapper provided by coraza because we want to fully control what happens when a rule match to send the information in crowdsec w.mux.HandleFunc(w.config.Path, w.appsecHandler) + return nil } @@ -237,16 +261,18 @@ func (w *AppsecSource) GetName() string { return "appsec" } -func (w *AppsecSource) OneShotAcquisition(out chan types.Event, t *tomb.Tomb) error { +func (w *AppsecSource) OneShotAcquisition(_ context.Context, _ chan types.Event, _ *tomb.Tomb) error { return errors.New("AppSec datasource does not support command line acquisition") } -func (w *AppsecSource) StreamingAcquisition(out chan types.Event, t *tomb.Tomb) error { +func (w *AppsecSource) StreamingAcquisition(ctx context.Context, out chan types.Event, t *tomb.Tomb) error { w.outChan = out + t.Go(func() error { defer trace.CatchPanic("crowdsec/acquis/appsec/live") w.logger.Infof("%d appsec runner to start", len(w.AppsecRunners)) + for _, runner := range w.AppsecRunners { runner.outChan = out t.Go(func() error { @@ -254,6 +280,7 @@ func (w *AppsecSource) StreamingAcquisition(out chan types.Event, t *tomb.Tomb) return runner.Run(t) }) } + t.Go(func() error { if w.config.ListenSocket != "" { w.logger.Infof("creating unix socket %s", w.config.ListenSocket) @@ -268,10 +295,11 @@ func (w *AppsecSource) StreamingAcquisition(out chan types.Event, t *tomb.Tomb) } else { err = w.server.Serve(listener) } - if err != nil && err != http.ErrServerClosed { + if err != nil && !errors.Is(err, http.ErrServerClosed) { return fmt.Errorf("appsec server failed: %w", err) } } + return nil }) t.Go(func() error { @@ -288,15 +316,17 @@ func (w *AppsecSource) StreamingAcquisition(out chan types.Event, t *tomb.Tomb) return fmt.Errorf("appsec server failed: %w", err) } } + return nil }) <-t.Dying() w.logger.Info("Shutting down Appsec server") - //xx let's clean up the appsec runners :) + // xx let's clean up the appsec runners :) appsec.AppsecRulesDetails = make(map[int]appsec.RulesDetails) - w.server.Shutdown(context.TODO()) + w.server.Shutdown(ctx) return nil }) + return nil } @@ -391,9 +421,10 @@ func (w *AppsecSource) appsecHandler(rw http.ResponseWriter, r *http.Request) { logger.Debugf("Response: %+v", appsecResponse) rw.WriteHeader(statusCode) + body, err := json.Marshal(appsecResponse) if err != nil { - logger.Errorf("unable to marshal response: %s", err) + logger.Errorf("unable to serialize response: %s", err) rw.WriteHeader(http.StatusInternalServerError) } else { rw.Write(body) diff --git a/pkg/acquisition/modules/appsec/appsec_lnx_test.go b/pkg/acquisition/modules/appsec/appsec_lnx_test.go index 3e40a1f970c..61dfc536f5e 100644 --- a/pkg/acquisition/modules/appsec/appsec_lnx_test.go +++ b/pkg/acquisition/modules/appsec/appsec_lnx_test.go @@ -1,5 +1,4 @@ //go:build !windows -// +build !windows package appsecacquisition @@ -16,6 +15,7 @@ import ( func TestAppsecRuleTransformsOthers(t *testing.T) { log.SetLevel(log.TraceLevel) + tests := []appsecRuleTest{ { name: "normalizepath", diff --git a/pkg/acquisition/modules/appsec/appsec_rules_test.go b/pkg/acquisition/modules/appsec/appsec_rules_test.go index 909f16357ed..00093c5a5ad 100644 --- a/pkg/acquisition/modules/appsec/appsec_rules_test.go +++ b/pkg/acquisition/modules/appsec/appsec_rules_test.go @@ -28,7 +28,8 @@ func TestAppsecRuleMatches(t *testing.T) { }, }, input_request: appsec.ParsedRequest{ - RemoteAddr: "1.2.3.4", + ClientIP: "1.2.3.4", + RemoteAddr: "127.0.0.1", Method: "GET", URI: "/urllll", Args: url.Values{"foo": []string{"toto"}}, @@ -59,7 +60,8 @@ func TestAppsecRuleMatches(t *testing.T) { }, }, input_request: appsec.ParsedRequest{ - RemoteAddr: "1.2.3.4", + ClientIP: "1.2.3.4", + RemoteAddr: "127.0.0.1", Method: "GET", URI: "/urllll", Args: url.Values{"foo": []string{"tutu"}}, @@ -84,7 +86,8 @@ func TestAppsecRuleMatches(t *testing.T) { }, }, input_request: appsec.ParsedRequest{ - RemoteAddr: "1.2.3.4", + ClientIP: "1.2.3.4", + RemoteAddr: "127.0.0.1", Method: "GET", URI: "/urllll", Args: url.Values{"foo": []string{"toto"}}, @@ -110,7 +113,8 @@ func TestAppsecRuleMatches(t *testing.T) { }, }, input_request: appsec.ParsedRequest{ - RemoteAddr: "1.2.3.4", + ClientIP: "1.2.3.4", + RemoteAddr: "127.0.0.1", Method: "GET", URI: "/urllll", Args: url.Values{"foo": []string{"toto"}}, @@ -136,7 +140,8 @@ func TestAppsecRuleMatches(t *testing.T) { }, }, input_request: appsec.ParsedRequest{ - RemoteAddr: "1.2.3.4", + ClientIP: "1.2.3.4", + RemoteAddr: "127.0.0.1", Method: "GET", URI: "/urllll", Args: url.Values{"foo": []string{"toto"}}, @@ -165,7 +170,8 @@ func TestAppsecRuleMatches(t *testing.T) { {Filter: "IsInBand == true", Apply: []string{"SetRemediation('captcha')"}}, }, input_request: appsec.ParsedRequest{ - RemoteAddr: "1.2.3.4", + ClientIP: "1.2.3.4", + RemoteAddr: "127.0.0.1", Method: "GET", URI: "/urllll", Args: url.Values{"foo": []string{"bla"}}, @@ -192,7 +198,8 @@ func TestAppsecRuleMatches(t *testing.T) { {Filter: "IsInBand == true", Apply: []string{"SetReturnCode(418)"}}, }, input_request: appsec.ParsedRequest{ - RemoteAddr: "1.2.3.4", + ClientIP: "1.2.3.4", + RemoteAddr: "127.0.0.1", Method: "GET", URI: "/urllll", Args: url.Values{"foo": []string{"bla"}}, @@ -219,7 +226,8 @@ func TestAppsecRuleMatches(t *testing.T) { {Filter: "IsInBand == true", Apply: []string{"SetRemediationByName('rule42', 'captcha')"}}, }, input_request: appsec.ParsedRequest{ - RemoteAddr: "1.2.3.4", + ClientIP: "1.2.3.4", + RemoteAddr: "127.0.0.1", Method: "GET", URI: "/urllll", Args: url.Values{"foo": []string{"bla"}}, @@ -243,7 +251,8 @@ func TestAppsecRuleMatches(t *testing.T) { }, }, input_request: appsec.ParsedRequest{ - RemoteAddr: "1.2.3.4", + ClientIP: "1.2.3.4", + RemoteAddr: "127.0.0.1", Method: "GET", URI: "/urllll", Headers: http.Header{"Cookie": []string{"foo=toto"}}, @@ -273,7 +282,8 @@ func TestAppsecRuleMatches(t *testing.T) { }, }, input_request: appsec.ParsedRequest{ - RemoteAddr: "1.2.3.4", + ClientIP: "1.2.3.4", + RemoteAddr: "127.0.0.1", Method: "GET", URI: "/urllll", Headers: http.Header{"Cookie": []string{"foo=toto; bar=tutu"}}, @@ -303,7 +313,8 @@ func TestAppsecRuleMatches(t *testing.T) { }, }, input_request: appsec.ParsedRequest{ - RemoteAddr: "1.2.3.4", + ClientIP: "1.2.3.4", + RemoteAddr: "127.0.0.1", Method: "GET", URI: "/urllll", Headers: http.Header{"Cookie": []string{"bar=tutu; tututata=toto"}}, @@ -333,7 +344,8 @@ func TestAppsecRuleMatches(t *testing.T) { }, }, input_request: appsec.ParsedRequest{ - RemoteAddr: "1.2.3.4", + ClientIP: "1.2.3.4", + RemoteAddr: "127.0.0.1", Method: "GET", URI: "/urllll", Headers: http.Header{"Content-Type": []string{"multipart/form-data; boundary=boundary"}}, @@ -354,6 +366,32 @@ toto require.Len(t, events[1].Appsec.MatchedRules, 1) require.Equal(t, "rule1", events[1].Appsec.MatchedRules[0]["msg"]) + require.Len(t, responses, 1) + require.True(t, responses[0].InBandInterrupt) + }, + }, + { + name: "Basic matching IP address", + expected_load_ok: true, + inband_native_rules: []string{ + "SecRule REMOTE_ADDR \"@ipMatch 1.2.3.4\" \"id:1,phase:1,log,deny,msg: 'block ip'\"", + }, + input_request: appsec.ParsedRequest{ + ClientIP: "1.2.3.4", + RemoteAddr: "127.0.0.1", + Method: "GET", + URI: "/urllll", + Headers: http.Header{"Content-Type": []string{"multipart/form-data; boundary=boundary"}}, + }, + output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { + require.Len(t, events, 2) + require.Equal(t, types.APPSEC, events[0].Type) + + require.Equal(t, types.LOG, events[1].Type) + require.True(t, events[1].Appsec.HasInBandMatches) + require.Len(t, events[1].Appsec.MatchedRules, 1) + require.Equal(t, "block ip", events[1].Appsec.MatchedRules[0]["msg"]) + require.Len(t, responses, 1) require.True(t, responses[0].InBandInterrupt) }, @@ -381,7 +419,8 @@ func TestAppsecRuleTransforms(t *testing.T) { }, }, input_request: appsec.ParsedRequest{ - RemoteAddr: "1.2.3.4", + ClientIP: "1.2.3.4", + RemoteAddr: "127.0.0.1", Method: "GET", URI: "/toto", }, @@ -404,7 +443,8 @@ func TestAppsecRuleTransforms(t *testing.T) { }, }, input_request: appsec.ParsedRequest{ - RemoteAddr: "1.2.3.4", + ClientIP: "1.2.3.4", + RemoteAddr: "127.0.0.1", Method: "GET", URI: "/TOTO", }, @@ -427,7 +467,8 @@ func TestAppsecRuleTransforms(t *testing.T) { }, }, input_request: appsec.ParsedRequest{ - RemoteAddr: "1.2.3.4", + ClientIP: "1.2.3.4", + RemoteAddr: "127.0.0.1", Method: "GET", URI: "/toto", }, @@ -451,7 +492,8 @@ func TestAppsecRuleTransforms(t *testing.T) { }, }, input_request: appsec.ParsedRequest{ - RemoteAddr: "1.2.3.4", + ClientIP: "1.2.3.4", + RemoteAddr: "127.0.0.1", Method: "GET", URI: "/?foo=dG90bw", }, @@ -475,7 +517,8 @@ func TestAppsecRuleTransforms(t *testing.T) { }, }, input_request: appsec.ParsedRequest{ - RemoteAddr: "1.2.3.4", + ClientIP: "1.2.3.4", + RemoteAddr: "127.0.0.1", Method: "GET", URI: "/?foo=dG90bw===", }, @@ -499,7 +542,8 @@ func TestAppsecRuleTransforms(t *testing.T) { }, }, input_request: appsec.ParsedRequest{ - RemoteAddr: "1.2.3.4", + ClientIP: "1.2.3.4", + RemoteAddr: "127.0.0.1", Method: "GET", URI: "/?foo=toto", }, @@ -523,7 +567,8 @@ func TestAppsecRuleTransforms(t *testing.T) { }, }, input_request: appsec.ParsedRequest{ - RemoteAddr: "1.2.3.4", + ClientIP: "1.2.3.4", + RemoteAddr: "127.0.0.1", Method: "GET", URI: "/?foo=%42%42%2F%41", }, @@ -547,7 +592,8 @@ func TestAppsecRuleTransforms(t *testing.T) { }, }, input_request: appsec.ParsedRequest{ - RemoteAddr: "1.2.3.4", + ClientIP: "1.2.3.4", + RemoteAddr: "127.0.0.1", Method: "GET", URI: "/?foo=%20%20%42%42%2F%41%20%20", }, @@ -585,7 +631,8 @@ func TestAppsecRuleZones(t *testing.T) { }, }, input_request: appsec.ParsedRequest{ - RemoteAddr: "1.2.3.4", + ClientIP: "1.2.3.4", + RemoteAddr: "127.0.0.1", Method: "GET", URI: "/foobar?something=toto&foobar=smth", }, @@ -612,7 +659,8 @@ func TestAppsecRuleZones(t *testing.T) { }, }, input_request: appsec.ParsedRequest{ - RemoteAddr: "1.2.3.4", + ClientIP: "1.2.3.4", + RemoteAddr: "127.0.0.1", Method: "GET", URI: "/foobar?something=toto&foobar=smth", }, @@ -639,7 +687,8 @@ func TestAppsecRuleZones(t *testing.T) { }, }, input_request: appsec.ParsedRequest{ - RemoteAddr: "1.2.3.4", + ClientIP: "1.2.3.4", + RemoteAddr: "127.0.0.1", Method: "GET", URI: "/", Body: []byte("smth=toto&foobar=other"), @@ -668,7 +717,8 @@ func TestAppsecRuleZones(t *testing.T) { }, }, input_request: appsec.ParsedRequest{ - RemoteAddr: "1.2.3.4", + ClientIP: "1.2.3.4", + RemoteAddr: "127.0.0.1", Method: "GET", URI: "/", Body: []byte("smth=toto&foobar=other"), @@ -697,7 +747,8 @@ func TestAppsecRuleZones(t *testing.T) { }, }, input_request: appsec.ParsedRequest{ - RemoteAddr: "1.2.3.4", + ClientIP: "1.2.3.4", + RemoteAddr: "127.0.0.1", Method: "GET", URI: "/", Headers: http.Header{"foobar": []string{"toto"}}, @@ -725,7 +776,8 @@ func TestAppsecRuleZones(t *testing.T) { }, }, input_request: appsec.ParsedRequest{ - RemoteAddr: "1.2.3.4", + ClientIP: "1.2.3.4", + RemoteAddr: "127.0.0.1", Method: "GET", URI: "/", Headers: http.Header{"foobar": []string{"toto"}}, @@ -748,7 +800,8 @@ func TestAppsecRuleZones(t *testing.T) { }, }, input_request: appsec.ParsedRequest{ - RemoteAddr: "1.2.3.4", + ClientIP: "1.2.3.4", + RemoteAddr: "127.0.0.1", Method: "GET", URI: "/", }, @@ -770,7 +823,8 @@ func TestAppsecRuleZones(t *testing.T) { }, }, input_request: appsec.ParsedRequest{ - RemoteAddr: "1.2.3.4", + ClientIP: "1.2.3.4", + RemoteAddr: "127.0.0.1", Method: "GET", URI: "/", Proto: "HTTP/3.1", @@ -793,7 +847,8 @@ func TestAppsecRuleZones(t *testing.T) { }, }, input_request: appsec.ParsedRequest{ - RemoteAddr: "1.2.3.4", + ClientIP: "1.2.3.4", + RemoteAddr: "127.0.0.1", Method: "GET", URI: "/foobar", }, @@ -815,7 +870,8 @@ func TestAppsecRuleZones(t *testing.T) { }, }, input_request: appsec.ParsedRequest{ - RemoteAddr: "1.2.3.4", + ClientIP: "1.2.3.4", + RemoteAddr: "127.0.0.1", Method: "GET", URI: "/foobar?a=b", }, @@ -837,7 +893,8 @@ func TestAppsecRuleZones(t *testing.T) { }, }, input_request: appsec.ParsedRequest{ - RemoteAddr: "1.2.3.4", + ClientIP: "1.2.3.4", + RemoteAddr: "127.0.0.1", Method: "GET", URI: "/", Body: []byte("foobar=42421"), diff --git a/pkg/acquisition/modules/appsec/appsec_runner.go b/pkg/acquisition/modules/appsec/appsec_runner.go index ed49d6a7b41..7ce43779591 100644 --- a/pkg/acquisition/modules/appsec/appsec_runner.go +++ b/pkg/acquisition/modules/appsec/appsec_runner.go @@ -4,6 +4,7 @@ import ( "fmt" "os" "slices" + "strings" "time" "github.com/prometheus/client_golang/prometheus" @@ -31,23 +32,38 @@ type AppsecRunner struct { logger *log.Entry } +func (r *AppsecRunner) MergeDedupRules(collections []appsec.AppsecCollection, logger *log.Entry) string { + var rulesArr []string + dedupRules := make(map[string]struct{}) + + for _, collection := range collections { + for _, rule := range collection.Rules { + if _, ok := dedupRules[rule]; !ok { + rulesArr = append(rulesArr, rule) + dedupRules[rule] = struct{}{} + } else { + logger.Debugf("Discarding duplicate rule : %s", rule) + } + } + } + if len(rulesArr) != len(dedupRules) { + logger.Warningf("%d rules were discarded as they were duplicates", len(rulesArr)-len(dedupRules)) + } + + return strings.Join(rulesArr, "\n") +} + func (r *AppsecRunner) Init(datadir string) error { var err error fs := os.DirFS(datadir) - inBandRules := "" - outOfBandRules := "" - - for _, collection := range r.AppsecRuntime.InBandRules { - inBandRules += collection.String() - } - - for _, collection := range r.AppsecRuntime.OutOfBandRules { - outOfBandRules += collection.String() - } inBandLogger := r.logger.Dup().WithField("band", "inband") outBandLogger := r.logger.Dup().WithField("band", "outband") + //While loading rules, we dedup rules based on their content, while keeping the order + inBandRules := r.MergeDedupRules(r.AppsecRuntime.InBandRules, inBandLogger) + outOfBandRules := r.MergeDedupRules(r.AppsecRuntime.OutOfBandRules, outBandLogger) + //setting up inband engine inbandCfg := coraza.NewWAFConfig().WithDirectives(inBandRules).WithRootFS(fs).WithDebugLogger(appsec.NewCrzLogger(inBandLogger)) if !r.AppsecRuntime.Config.InbandOptions.DisableBodyInspection { @@ -135,7 +151,7 @@ func (r *AppsecRunner) processRequest(tx appsec.ExtendedTransaction, request *ap //FIXME: should we abort here ? } - request.Tx.ProcessConnection(request.RemoteAddr, 0, "", 0) + request.Tx.ProcessConnection(request.ClientIP, 0, "", 0) for k, v := range request.Args { for _, vv := range v { @@ -167,7 +183,7 @@ func (r *AppsecRunner) processRequest(tx appsec.ExtendedTransaction, request *ap return nil } - if request.Body != nil && len(request.Body) > 0 { + if len(request.Body) > 0 { in, _, err = request.Tx.WriteRequestBody(request.Body) if err != nil { r.logger.Errorf("unable to write request body : %s", err) @@ -249,7 +265,7 @@ func (r *AppsecRunner) handleInBandInterrupt(request *appsec.ParsedRequest) { // Should the in band match trigger an overflow ? if r.AppsecRuntime.Response.SendAlert { - appsecOvlfw, err := AppsecEventGeneration(evt) + appsecOvlfw, err := AppsecEventGeneration(evt, request.HTTPRequest) if err != nil { r.logger.Errorf("unable to generate appsec event : %s", err) return @@ -293,7 +309,7 @@ func (r *AppsecRunner) handleOutBandInterrupt(request *appsec.ParsedRequest) { // Should the match trigger an overflow ? if r.AppsecRuntime.Response.SendAlert { - appsecOvlfw, err := AppsecEventGeneration(evt) + appsecOvlfw, err := AppsecEventGeneration(evt, request.HTTPRequest) if err != nil { r.logger.Errorf("unable to generate appsec event : %s", err) return diff --git a/pkg/acquisition/modules/appsec/appsec_runner_test.go b/pkg/acquisition/modules/appsec/appsec_runner_test.go new file mode 100644 index 00000000000..2027cf1d2c0 --- /dev/null +++ b/pkg/acquisition/modules/appsec/appsec_runner_test.go @@ -0,0 +1,139 @@ +package appsecacquisition + +import ( + "testing" + + "github.com/crowdsecurity/crowdsec/pkg/appsec/appsec_rule" + log "github.com/sirupsen/logrus" + "github.com/stretchr/testify/require" +) + +func TestAppsecRuleLoad(t *testing.T) { + log.SetLevel(log.TraceLevel) + tests := []appsecRuleTest{ + { + name: "simple rule load", + expected_load_ok: true, + inband_rules: []appsec_rule.CustomRule{ + { + Name: "rule1", + Zones: []string{"ARGS"}, + Match: appsec_rule.Match{Type: "equals", Value: "toto"}, + }, + }, + afterload_asserts: func(runner AppsecRunner) { + require.Len(t, runner.AppsecInbandEngine.GetRuleGroup().GetRules(), 1) + }, + }, + { + name: "simple native rule load", + expected_load_ok: true, + inband_native_rules: []string{ + `Secrule REQUEST_HEADERS:Content-Type "@rx ^application/x-www-form-urlencoded" "id:100,phase:1,pass,nolog,noauditlog,ctl:requestBodyProcessor=URLENCODED"`, + }, + afterload_asserts: func(runner AppsecRunner) { + require.Len(t, runner.AppsecInbandEngine.GetRuleGroup().GetRules(), 1) + }, + }, + { + name: "simple native rule load (2)", + expected_load_ok: true, + inband_native_rules: []string{ + `Secrule REQUEST_HEADERS:Content-Type "@rx ^application/x-www-form-urlencoded" "id:100,phase:1,pass,nolog,noauditlog,ctl:requestBodyProcessor=URLENCODED"`, + `Secrule REQUEST_HEADERS:Content-Type "@rx ^multipart/form-data" "id:101,phase:1,pass,nolog,noauditlog,ctl:requestBodyProcessor=MULTIPART"`, + }, + afterload_asserts: func(runner AppsecRunner) { + require.Len(t, runner.AppsecInbandEngine.GetRuleGroup().GetRules(), 2) + }, + }, + { + name: "simple native rule load + dedup", + expected_load_ok: true, + inband_native_rules: []string{ + `Secrule REQUEST_HEADERS:Content-Type "@rx ^application/x-www-form-urlencoded" "id:100,phase:1,pass,nolog,noauditlog,ctl:requestBodyProcessor=URLENCODED"`, + `Secrule REQUEST_HEADERS:Content-Type "@rx ^multipart/form-data" "id:101,phase:1,pass,nolog,noauditlog,ctl:requestBodyProcessor=MULTIPART"`, + `Secrule REQUEST_HEADERS:Content-Type "@rx ^application/x-www-form-urlencoded" "id:100,phase:1,pass,nolog,noauditlog,ctl:requestBodyProcessor=URLENCODED"`, + }, + afterload_asserts: func(runner AppsecRunner) { + require.Len(t, runner.AppsecInbandEngine.GetRuleGroup().GetRules(), 2) + }, + }, + { + name: "multi simple rule load", + expected_load_ok: true, + inband_rules: []appsec_rule.CustomRule{ + { + Name: "rule1", + Zones: []string{"ARGS"}, + Match: appsec_rule.Match{Type: "equals", Value: "toto"}, + }, + { + Name: "rule2", + Zones: []string{"ARGS"}, + Match: appsec_rule.Match{Type: "equals", Value: "toto"}, + }, + }, + afterload_asserts: func(runner AppsecRunner) { + require.Len(t, runner.AppsecInbandEngine.GetRuleGroup().GetRules(), 2) + }, + }, + { + name: "multi simple rule load", + expected_load_ok: true, + inband_rules: []appsec_rule.CustomRule{ + { + Name: "rule1", + Zones: []string{"ARGS"}, + Match: appsec_rule.Match{Type: "equals", Value: "toto"}, + }, + { + Name: "rule2", + Zones: []string{"ARGS"}, + Match: appsec_rule.Match{Type: "equals", Value: "toto"}, + }, + }, + afterload_asserts: func(runner AppsecRunner) { + require.Len(t, runner.AppsecInbandEngine.GetRuleGroup().GetRules(), 2) + }, + }, + { + name: "imbricated rule load", + expected_load_ok: true, + inband_rules: []appsec_rule.CustomRule{ + { + Name: "rule1", + + Or: []appsec_rule.CustomRule{ + { + //Name: "rule1", + Zones: []string{"ARGS"}, + Match: appsec_rule.Match{Type: "equals", Value: "toto"}, + }, + { + //Name: "rule1", + Zones: []string{"ARGS"}, + Match: appsec_rule.Match{Type: "equals", Value: "tutu"}, + }, + { + //Name: "rule1", + Zones: []string{"ARGS"}, + Match: appsec_rule.Match{Type: "equals", Value: "tata"}, + }, { + //Name: "rule1", + Zones: []string{"ARGS"}, + Match: appsec_rule.Match{Type: "equals", Value: "titi"}, + }, + }, + }, + }, + afterload_asserts: func(runner AppsecRunner) { + require.Len(t, runner.AppsecInbandEngine.GetRuleGroup().GetRules(), 4) + }, + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + loadAppSecEngine(test, t) + }) + } +} diff --git a/pkg/acquisition/modules/appsec/appsec_test.go b/pkg/acquisition/modules/appsec/appsec_test.go index d2079b43726..1534f5cb7fa 100644 --- a/pkg/acquisition/modules/appsec/appsec_test.go +++ b/pkg/acquisition/modules/appsec/appsec_test.go @@ -18,6 +18,8 @@ type appsecRuleTest struct { expected_load_ok bool inband_rules []appsec_rule.CustomRule outofband_rules []appsec_rule.CustomRule + inband_native_rules []string + outofband_native_rules []string on_load []appsec.Hook pre_eval []appsec.Hook post_eval []appsec.Hook @@ -28,6 +30,7 @@ type appsecRuleTest struct { DefaultRemediation string DefaultPassAction string input_request appsec.ParsedRequest + afterload_asserts func(runner AppsecRunner) output_asserts func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) } @@ -53,6 +56,8 @@ func loadAppSecEngine(test appsecRuleTest, t *testing.T) { inbandRules = append(inbandRules, strRule) } + inbandRules = append(inbandRules, test.inband_native_rules...) + outofbandRules = append(outofbandRules, test.outofband_native_rules...) for ridx, rule := range test.outofband_rules { strRule, _, err := rule.Convert(appsec_rule.ModsecurityRuleType, rule.Name) if err != nil { @@ -94,6 +99,13 @@ func loadAppSecEngine(test appsecRuleTest, t *testing.T) { t.Fatalf("unable to initialize runner : %s", err) } + if test.afterload_asserts != nil { + //afterload asserts are just to evaluate the state of the runner after the rules have been loaded + //if it's present, don't try to process requests + test.afterload_asserts(runner) + return + } + input := test.input_request input.ResponseChannel = make(chan appsec.AppsecTempResponse) OutputEvents := make([]types.Event, 0) diff --git a/pkg/acquisition/modules/appsec/appsec_win_test.go b/pkg/acquisition/modules/appsec/appsec_win_test.go index e85d75df251..a6b8f3a0340 100644 --- a/pkg/acquisition/modules/appsec/appsec_win_test.go +++ b/pkg/acquisition/modules/appsec/appsec_win_test.go @@ -1,5 +1,4 @@ //go:build windows -// +build windows package appsecacquisition diff --git a/pkg/acquisition/modules/appsec/utils.go b/pkg/acquisition/modules/appsec/utils.go index 15de8046716..8995b305680 100644 --- a/pkg/acquisition/modules/appsec/utils.go +++ b/pkg/acquisition/modules/appsec/utils.go @@ -1,10 +1,10 @@ package appsecacquisition import ( + "errors" "fmt" "net" - "slices" - "strconv" + "net/http" "time" "github.com/oschwald/geoip2-golang" @@ -22,32 +22,49 @@ import ( "github.com/crowdsecurity/crowdsec/pkg/types" ) -var appsecMetaKeys = []string{ - "id", - "name", - "method", - "uri", - "matched_zones", - "msg", -} +func AppsecEventGenerationGeoIPEnrich(src *models.Source) error { -func appendMeta(meta models.Meta, key string, value string) models.Meta { - if value == "" { - return meta + if src == nil || src.Scope == nil || *src.Scope != types.Ip { + return errors.New("source is nil or not an IP") } - meta = append(meta, &models.MetaItems0{ - Key: key, - Value: value, - }) - return meta + //GeoIP enrich + asndata, err := exprhelpers.GeoIPASNEnrich(src.IP) + + if err != nil { + return err + } else if asndata != nil { + record := asndata.(*geoip2.ASN) + src.AsName = record.AutonomousSystemOrganization + src.AsNumber = fmt.Sprintf("%d", record.AutonomousSystemNumber) + } + + cityData, err := exprhelpers.GeoIPEnrich(src.IP) + if err != nil { + return err + } else if cityData != nil { + record := cityData.(*geoip2.City) + src.Cn = record.Country.IsoCode + src.Latitude = float32(record.Location.Latitude) + src.Longitude = float32(record.Location.Longitude) + } + + rangeData, err := exprhelpers.GeoIPRangeEnrich(src.IP) + if err != nil { + return err + } else if rangeData != nil { + record := rangeData.(*net.IPNet) + src.Range = record.String() + } + return nil } -func AppsecEventGeneration(inEvt types.Event) (*types.Event, error) { - //if the request didnd't trigger inband rules, we don't want to generate an event to LAPI/CAPI +func AppsecEventGeneration(inEvt types.Event, request *http.Request) (*types.Event, error) { + // if the request didnd't trigger inband rules, we don't want to generate an event to LAPI/CAPI if !inEvt.Appsec.HasInBandMatches { return nil, nil } + evt := types.Event{} evt.Type = types.APPSEC evt.Process = true @@ -58,34 +75,12 @@ func AppsecEventGeneration(inEvt types.Event) (*types.Event, error) { Scope: ptr.Of(types.Ip), } - asndata, err := exprhelpers.GeoIPASNEnrich(sourceIP) - - if err != nil { - log.Errorf("Unable to enrich ip '%s' for ASN: %s", sourceIP, err) - } else if asndata != nil { - record := asndata.(*geoip2.ASN) - source.AsName = record.AutonomousSystemOrganization - source.AsNumber = fmt.Sprintf("%d", record.AutonomousSystemNumber) - } - - cityData, err := exprhelpers.GeoIPEnrich(sourceIP) - if err != nil { - log.Errorf("Unable to enrich ip '%s' for geo data: %s", sourceIP, err) - } else if cityData != nil { - record := cityData.(*geoip2.City) - source.Cn = record.Country.IsoCode - source.Latitude = float32(record.Location.Latitude) - source.Longitude = float32(record.Location.Longitude) - } - - rangeData, err := exprhelpers.GeoIPRangeEnrich(sourceIP) - if err != nil { - log.Errorf("Unable to enrich ip '%s' for range: %s", sourceIP, err) - } else if rangeData != nil { - record := rangeData.(*net.IPNet) - source.Range = record.String() + // Enrich source with GeoIP data + if err := AppsecEventGenerationGeoIPEnrich(&source); err != nil { + log.Errorf("unable to enrich source with GeoIP data : %s", err) } + // Build overflow evt.Overflow.Sources = make(map[string]models.Source) evt.Overflow.Sources[sourceIP] = source @@ -93,80 +88,11 @@ func AppsecEventGeneration(inEvt types.Event) (*types.Event, error) { alert.Capacity = ptr.Of(int32(1)) alert.Events = make([]*models.Event, len(evt.Appsec.GetRuleIDs())) - now := ptr.Of(time.Now().UTC().Format(time.RFC3339)) - - tmpAppsecContext := make(map[string][]string) - - for _, matched_rule := range inEvt.Appsec.MatchedRules { - evtRule := models.Event{} - - evtRule.Timestamp = now - - evtRule.Meta = make(models.Meta, 0) - - for _, key := range appsecMetaKeys { - - if tmpAppsecContext[key] == nil { - tmpAppsecContext[key] = make([]string, 0) - } - - switch value := matched_rule[key].(type) { - case string: - evtRule.Meta = appendMeta(evtRule.Meta, key, value) - if value != "" && !slices.Contains(tmpAppsecContext[key], value) { - tmpAppsecContext[key] = append(tmpAppsecContext[key], value) - } - case int: - val := strconv.Itoa(value) - evtRule.Meta = appendMeta(evtRule.Meta, key, val) - if val != "" && !slices.Contains(tmpAppsecContext[key], val) { - tmpAppsecContext[key] = append(tmpAppsecContext[key], val) - } - case []string: - for _, v := range value { - evtRule.Meta = appendMeta(evtRule.Meta, key, v) - if v != "" && !slices.Contains(tmpAppsecContext[key], v) { - tmpAppsecContext[key] = append(tmpAppsecContext[key], v) - } - } - case []int: - for _, v := range value { - val := strconv.Itoa(v) - evtRule.Meta = appendMeta(evtRule.Meta, key, val) - if val != "" && !slices.Contains(tmpAppsecContext[key], val) { - tmpAppsecContext[key] = append(tmpAppsecContext[key], val) - } - - } - default: - val := fmt.Sprintf("%v", value) - evtRule.Meta = appendMeta(evtRule.Meta, key, val) - if val != "" && !slices.Contains(tmpAppsecContext[key], val) { - tmpAppsecContext[key] = append(tmpAppsecContext[key], val) - } - - } + metas, errors := alertcontext.AppsecEventToContext(inEvt.Appsec, request) + if len(errors) > 0 { + for _, err := range errors { + log.Errorf("failed to generate appsec context: %s", err) } - alert.Events = append(alert.Events, &evtRule) - } - - metas := make([]*models.MetaItems0, 0) - - for key, values := range tmpAppsecContext { - if len(values) == 0 { - continue - } - - valueStr, err := alertcontext.TruncateContext(values, alertcontext.MaxContextValueLen) - if err != nil { - log.Warningf(err.Error()) - } - - meta := models.MetaItems0{ - Key: key, - Value: valueStr, - } - metas = append(metas, &meta) } alert.Meta = metas @@ -185,15 +111,13 @@ func AppsecEventGeneration(inEvt types.Event) (*types.Event, error) { alert.StopAt = ptr.Of(time.Now().UTC().Format(time.RFC3339)) evt.Overflow.APIAlerts = []models.Alert{alert} evt.Overflow.Alert = &alert + return &evt, nil } func EventFromRequest(r *appsec.ParsedRequest, labels map[string]string) (types.Event, error) { - evt := types.Event{} - //we might want to change this based on in-band vs out-of-band ? - evt.Type = types.LOG - evt.ExpectMode = types.LIVE - //def needs fixing + evt := types.MakeEvent(false, types.LOG, true) + // def needs fixing evt.Stage = "s00-raw" evt.Parsed = map[string]string{ "source_ip": r.ClientIP, @@ -203,19 +127,19 @@ func EventFromRequest(r *appsec.ParsedRequest, labels map[string]string) (types. "req_uuid": r.Tx.ID(), "source": "crowdsec-appsec", "remediation_cmpt_ip": r.RemoteAddrNormalized, - //TBD: - //http_status - //user_agent + // TBD: + // http_status + // user_agent } evt.Line = types.Line{ Time: time.Now(), - //should we add some info like listen addr/port/path ? + // should we add some info like listen addr/port/path ? Labels: labels, Process: true, Module: "appsec", Src: "appsec", - Raw: "dummy-appsec-data", //we discard empty Line.Raw items :) + Raw: "dummy-appsec-data", // we discard empty Line.Raw items :) } evt.Appsec = types.AppsecEvent{} @@ -247,29 +171,29 @@ func LogAppsecEvent(evt *types.Event, logger *log.Entry) { "target_uri": req, }).Debugf("%s triggered non-blocking rules on %s (%d rules) [%v]", evt.Parsed["source_ip"], req, len(evt.Appsec.MatchedRules), evt.Appsec.GetRuleIDs()) } - } func (r *AppsecRunner) AccumulateTxToEvent(evt *types.Event, req *appsec.ParsedRequest) error { - if evt == nil { - //an error was already emitted, let's not spam the logs + // an error was already emitted, let's not spam the logs return nil } if !req.Tx.IsInterrupted() { - //if the phase didn't generate an interruption, we don't have anything to add to the event + // if the phase didn't generate an interruption, we don't have anything to add to the event return nil } - //if one interruption was generated, event is good for processing :) + // if one interruption was generated, event is good for processing :) evt.Process = true if evt.Meta == nil { evt.Meta = map[string]string{} } + if evt.Parsed == nil { evt.Parsed = map[string]string{} } + if req.IsInBand { evt.Meta["appsec_interrupted"] = "true" evt.Meta["appsec_action"] = req.Tx.Interruption().Action @@ -290,9 +214,11 @@ func (r *AppsecRunner) AccumulateTxToEvent(evt *types.Event, req *appsec.ParsedR if variable.Key() != "" { key += "." + variable.Key() } + if variable.Value() == "" { continue } + for _, collectionToKeep := range r.AppsecRuntime.CompiledVariablesTracking { match := collectionToKeep.MatchString(key) if match { @@ -303,6 +229,7 @@ func (r *AppsecRunner) AccumulateTxToEvent(evt *types.Event, req *appsec.ParsedR } } } + return true }) @@ -325,11 +252,12 @@ func (r *AppsecRunner) AccumulateTxToEvent(evt *types.Event, req *appsec.ParsedR ruleNameProm := fmt.Sprintf("%d", rule.Rule().ID()) if details, ok := appsec.AppsecRulesDetails[rule.Rule().ID()]; ok { - //Only set them for custom rules, not for rules written in seclang + // Only set them for custom rules, not for rules written in seclang name = details.Name version = details.Version hash = details.Hash ruleNameProm = details.Name + r.logger.Debugf("custom rule for event, setting name: %s, version: %s, hash: %s", name, version, hash) } else { name = fmt.Sprintf("native_rule:%d", rule.Rule().ID()) @@ -338,12 +266,15 @@ func (r *AppsecRunner) AccumulateTxToEvent(evt *types.Event, req *appsec.ParsedR AppsecRuleHits.With(prometheus.Labels{"rule_name": ruleNameProm, "type": kind, "source": req.RemoteAddrNormalized, "appsec_engine": req.AppsecEngine}).Inc() matchedZones := make([]string, 0) + for _, matchData := range rule.MatchedDatas() { zone := matchData.Variable().Name() + varName := matchData.Key() if varName != "" { zone += "." + varName } + matchedZones = append(matchedZones, zone) } diff --git a/pkg/acquisition/modules/cloudwatch/cloudwatch.go b/pkg/acquisition/modules/cloudwatch/cloudwatch.go index 1a78ae6fa7a..ba267c9050b 100644 --- a/pkg/acquisition/modules/cloudwatch/cloudwatch.go +++ b/pkg/acquisition/modules/cloudwatch/cloudwatch.go @@ -57,16 +57,16 @@ type CloudwatchSource struct { // CloudwatchSourceConfiguration allows user to define one or more streams to monitor within a cloudwatch log group type CloudwatchSourceConfiguration struct { configuration.DataSourceCommonCfg `yaml:",inline"` - GroupName string `yaml:"group_name"` //the group name to be monitored - StreamRegexp *string `yaml:"stream_regexp,omitempty"` //allow to filter specific streams + GroupName string `yaml:"group_name"` // the group name to be monitored + StreamRegexp *string `yaml:"stream_regexp,omitempty"` // allow to filter specific streams StreamName *string `yaml:"stream_name,omitempty"` StartTime, EndTime *time.Time `yaml:"-"` - DescribeLogStreamsLimit *int64 `yaml:"describelogstreams_limit,omitempty"` //batch size for DescribeLogStreamsPagesWithContext + DescribeLogStreamsLimit *int64 `yaml:"describelogstreams_limit,omitempty"` // batch size for DescribeLogStreamsPagesWithContext GetLogEventsPagesLimit *int64 `yaml:"getlogeventspages_limit,omitempty"` - PollNewStreamInterval *time.Duration `yaml:"poll_new_stream_interval,omitempty"` //frequency at which we poll for new streams within the log group - MaxStreamAge *time.Duration `yaml:"max_stream_age,omitempty"` //monitor only streams that have been updated within $duration - PollStreamInterval *time.Duration `yaml:"poll_stream_interval,omitempty"` //frequency at which we poll each stream - StreamReadTimeout *time.Duration `yaml:"stream_read_timeout,omitempty"` //stop monitoring streams that haven't been updated within $duration, might be reopened later tho + PollNewStreamInterval *time.Duration `yaml:"poll_new_stream_interval,omitempty"` // frequency at which we poll for new streams within the log group + MaxStreamAge *time.Duration `yaml:"max_stream_age,omitempty"` // monitor only streams that have been updated within $duration + PollStreamInterval *time.Duration `yaml:"poll_stream_interval,omitempty"` // frequency at which we poll each stream + StreamReadTimeout *time.Duration `yaml:"stream_read_timeout,omitempty"` // stop monitoring streams that haven't been updated within $duration, might be reopened later tho AwsApiCallTimeout *time.Duration `yaml:"aws_api_timeout,omitempty"` AwsProfile *string `yaml:"aws_profile,omitempty"` PrependCloudwatchTimestamp *bool `yaml:"prepend_cloudwatch_timestamp,omitempty"` @@ -86,7 +86,7 @@ type LogStreamTailConfig struct { logger *log.Entry ExpectMode int t tomb.Tomb - StartTime, EndTime time.Time //only used for CatMode + StartTime, EndTime time.Time // only used for CatMode } var ( @@ -111,7 +111,7 @@ func (cw *CloudwatchSource) UnmarshalConfig(yamlConfig []byte) error { return fmt.Errorf("cannot parse CloudwatchSource configuration: %w", err) } - if len(cw.Config.GroupName) == 0 { + if cw.Config.GroupName == "" { return errors.New("group_name is mandatory for CloudwatchSource") } @@ -159,6 +159,7 @@ func (cw *CloudwatchSource) Configure(yamlConfig []byte, logger *log.Entry, Metr if err != nil { return err } + cw.metricsLevel = MetricsLevel cw.logger = logger.WithField("group", cw.Config.GroupName) @@ -175,16 +176,18 @@ func (cw *CloudwatchSource) Configure(yamlConfig []byte, logger *log.Entry, Metr if *cw.Config.MaxStreamAge > *cw.Config.StreamReadTimeout { cw.logger.Warningf("max_stream_age > stream_read_timeout, stream might keep being opened/closed") } + cw.logger.Tracef("aws_config_dir set to %s", *cw.Config.AwsConfigDir) if *cw.Config.AwsConfigDir != "" { _, err := os.Stat(*cw.Config.AwsConfigDir) if err != nil { cw.logger.Errorf("can't read aws_config_dir '%s' got err %s", *cw.Config.AwsConfigDir, err) - return fmt.Errorf("can't read aws_config_dir %s got err %s ", *cw.Config.AwsConfigDir, err) + return fmt.Errorf("can't read aws_config_dir %s got err %w ", *cw.Config.AwsConfigDir, err) } + os.Setenv("AWS_SDK_LOAD_CONFIG", "1") - //as aws sdk relies on $HOME, let's allow the user to override it :) + // as aws sdk relies on $HOME, let's allow the user to override it :) os.Setenv("AWS_CONFIG_FILE", fmt.Sprintf("%s/config", *cw.Config.AwsConfigDir)) os.Setenv("AWS_SHARED_CREDENTIALS_FILE", fmt.Sprintf("%s/credentials", *cw.Config.AwsConfigDir)) } else { @@ -192,25 +195,30 @@ func (cw *CloudwatchSource) Configure(yamlConfig []byte, logger *log.Entry, Metr cw.logger.Errorf("aws_region is not specified, specify it or aws_config_dir") return errors.New("aws_region is not specified, specify it or aws_config_dir") } + os.Setenv("AWS_REGION", *cw.Config.AwsRegion) } if err := cw.newClient(); err != nil { return err } + cw.streamIndexes = make(map[string]string) targetStream := "*" + if cw.Config.StreamRegexp != nil { if _, err := regexp.Compile(*cw.Config.StreamRegexp); err != nil { return fmt.Errorf("while compiling regexp '%s': %w", *cw.Config.StreamRegexp, err) } + targetStream = *cw.Config.StreamRegexp } else if cw.Config.StreamName != nil { targetStream = *cw.Config.StreamName } cw.logger.Infof("Adding cloudwatch group '%s' (stream:%s) to datasources", cw.Config.GroupName, targetStream) + return nil } @@ -231,25 +239,30 @@ func (cw *CloudwatchSource) newClient() error { if sess == nil { return errors.New("failed to create aws session") } + if v := os.Getenv("AWS_ENDPOINT_FORCE"); v != "" { cw.logger.Debugf("[testing] overloading endpoint with %s", v) cw.cwClient = cloudwatchlogs.New(sess, aws.NewConfig().WithEndpoint(v)) } else { cw.cwClient = cloudwatchlogs.New(sess) } + if cw.cwClient == nil { return errors.New("failed to create cloudwatch client") } + return nil } -func (cw *CloudwatchSource) StreamingAcquisition(out chan types.Event, t *tomb.Tomb) error { +func (cw *CloudwatchSource) StreamingAcquisition(ctx context.Context, out chan types.Event, t *tomb.Tomb) error { cw.t = t monitChan := make(chan LogStreamTailConfig) + t.Go(func() error { - return cw.LogStreamManager(monitChan, out) + return cw.LogStreamManager(ctx, monitChan, out) }) - return cw.WatchLogGroupForStreams(monitChan) + + return cw.WatchLogGroupForStreams(ctx, monitChan) } func (cw *CloudwatchSource) GetMetrics() []prometheus.Collector { @@ -276,9 +289,10 @@ func (cw *CloudwatchSource) Dump() interface{} { return cw } -func (cw *CloudwatchSource) WatchLogGroupForStreams(out chan LogStreamTailConfig) error { +func (cw *CloudwatchSource) WatchLogGroupForStreams(ctx context.Context, out chan LogStreamTailConfig) error { cw.logger.Debugf("Starting to watch group (interval:%s)", cw.Config.PollNewStreamInterval) ticker := time.NewTicker(*cw.Config.PollNewStreamInterval) + var startFrom *string for { @@ -289,11 +303,11 @@ func (cw *CloudwatchSource) WatchLogGroupForStreams(out chan LogStreamTailConfig case <-ticker.C: hasMoreStreams := true startFrom = nil + for hasMoreStreams { cw.logger.Tracef("doing the call to DescribeLogStreamsPagesWithContext") - ctx := context.Background() - //there can be a lot of streams in a group, and we're only interested in those recently written to, so we sort by LastEventTime + // there can be a lot of streams in a group, and we're only interested in those recently written to, so we sort by LastEventTime err := cw.cwClient.DescribeLogStreamsPagesWithContext( ctx, &cloudwatchlogs.DescribeLogStreamsInput{ @@ -305,13 +319,14 @@ func (cw *CloudwatchSource) WatchLogGroupForStreams(out chan LogStreamTailConfig }, func(page *cloudwatchlogs.DescribeLogStreamsOutput, lastPage bool) bool { cw.logger.Tracef("in helper of DescribeLogStreamsPagesWithContext") + for _, event := range page.LogStreams { startFrom = page.NextToken - //we check if the stream has been written to recently enough to be monitored + // we check if the stream has been written to recently enough to be monitored if event.LastIngestionTime != nil { - //aws uses millisecond since the epoch + // aws uses millisecond since the epoch oldest := time.Now().UTC().Add(-*cw.Config.MaxStreamAge) - //TBD : verify that this is correct : Unix 2nd arg expects Nanoseconds, and have a code that is more explicit. + // TBD : verify that this is correct : Unix 2nd arg expects Nanoseconds, and have a code that is more explicit. LastIngestionTime := time.Unix(0, *event.LastIngestionTime*int64(time.Millisecond)) if LastIngestionTime.Before(oldest) { cw.logger.Tracef("stop iteration, %s reached oldest age, stop (%s < %s)", *event.LogStreamName, LastIngestionTime, time.Now().UTC().Add(-*cw.Config.MaxStreamAge)) @@ -319,7 +334,7 @@ func (cw *CloudwatchSource) WatchLogGroupForStreams(out chan LogStreamTailConfig return false } cw.logger.Tracef("stream %s is elligible for monitoring", *event.LogStreamName) - //the stream has been updated recently, check if we should monitor it + // the stream has been updated recently, check if we should monitor it var expectMode int if !cw.Config.UseTimeMachine { expectMode = types.LIVE @@ -356,8 +371,7 @@ func (cw *CloudwatchSource) WatchLogGroupForStreams(out chan LogStreamTailConfig } // LogStreamManager receives the potential streams to monitor, and starts a go routine when needed -func (cw *CloudwatchSource) LogStreamManager(in chan LogStreamTailConfig, outChan chan types.Event) error { - +func (cw *CloudwatchSource) LogStreamManager(ctx context.Context, in chan LogStreamTailConfig, outChan chan types.Event) error { cw.logger.Debugf("starting to monitor streams for %s", cw.Config.GroupName) pollDeadStreamInterval := time.NewTicker(def_PollDeadStreamInterval) @@ -384,7 +398,7 @@ func (cw *CloudwatchSource) LogStreamManager(in chan LogStreamTailConfig, outCha for idx, stream := range cw.monitoredStreams { if newStream.GroupName == stream.GroupName && newStream.StreamName == stream.StreamName { - //stream exists, but is dead, remove it from list + // stream exists, but is dead, remove it from list if !stream.t.Alive() { cw.logger.Debugf("stream %s already exists, but is dead", newStream.StreamName) cw.monitoredStreams = append(cw.monitoredStreams[:idx], cw.monitoredStreams[idx+1:]...) @@ -398,7 +412,7 @@ func (cw *CloudwatchSource) LogStreamManager(in chan LogStreamTailConfig, outCha } } - //let's start watching this stream + // let's start watching this stream if shouldCreate { if cw.metricsLevel != configuration.METRICS_NONE { openedStreams.With(prometheus.Labels{"group": newStream.GroupName}).Inc() @@ -407,7 +421,7 @@ func (cw *CloudwatchSource) LogStreamManager(in chan LogStreamTailConfig, outCha newStream.logger = cw.logger.WithField("stream", newStream.StreamName) cw.logger.Debugf("starting tail of stream %s", newStream.StreamName) newStream.t.Go(func() error { - return cw.TailLogStream(&newStream, outChan) + return cw.TailLogStream(ctx, &newStream, outChan) }) cw.monitoredStreams = append(cw.monitoredStreams, &newStream) } @@ -442,11 +456,11 @@ func (cw *CloudwatchSource) LogStreamManager(in chan LogStreamTailConfig, outCha } } -func (cw *CloudwatchSource) TailLogStream(cfg *LogStreamTailConfig, outChan chan types.Event) error { +func (cw *CloudwatchSource) TailLogStream(ctx context.Context, cfg *LogStreamTailConfig, outChan chan types.Event) error { var startFrom *string lastReadMessage := time.Now().UTC() ticker := time.NewTicker(cfg.PollStreamInterval) - //resume at existing index if we already had + // resume at existing index if we already had streamIndexMutex.Lock() v := cw.streamIndexes[cfg.GroupName+"+"+cfg.StreamName] streamIndexMutex.Unlock() @@ -464,7 +478,6 @@ func (cw *CloudwatchSource) TailLogStream(cfg *LogStreamTailConfig, outChan chan for hasMorePages { /*for the first call, we only consume the last item*/ cfg.logger.Tracef("calling GetLogEventsPagesWithContext") - ctx := context.Background() err := cw.cwClient.GetLogEventsPagesWithContext(ctx, &cloudwatchlogs.GetLogEventsInput{ Limit: aws.Int64(cfg.GetLogEventsPagesLimit), @@ -567,7 +580,7 @@ func (cw *CloudwatchSource) ConfigureByDSN(dsn string, labels map[string]string, if len(v) != 1 { return errors.New("expected zero or one argument for 'start_date'") } - //let's reuse our parser helper so that a ton of date formats are supported + // let's reuse our parser helper so that a ton of date formats are supported strdate, startDate := parser.GenDateParse(v[0]) cw.logger.Debugf("parsed '%s' as '%s'", v[0], strdate) cw.Config.StartTime = &startDate @@ -575,7 +588,7 @@ func (cw *CloudwatchSource) ConfigureByDSN(dsn string, labels map[string]string, if len(v) != 1 { return errors.New("expected zero or one argument for 'end_date'") } - //let's reuse our parser helper so that a ton of date formats are supported + // let's reuse our parser helper so that a ton of date formats are supported strdate, endDate := parser.GenDateParse(v[0]) cw.logger.Debugf("parsed '%s' as '%s'", v[0], strdate) cw.Config.EndTime = &endDate @@ -583,7 +596,7 @@ func (cw *CloudwatchSource) ConfigureByDSN(dsn string, labels map[string]string, if len(v) != 1 { return errors.New("expected zero or one argument for 'backlog'") } - //let's reuse our parser helper so that a ton of date formats are supported + // let's reuse our parser helper so that a ton of date formats are supported duration, err := time.ParseDuration(v[0]) if err != nil { return fmt.Errorf("unable to parse '%s' as duration: %w", v[0], err) @@ -618,8 +631,8 @@ func (cw *CloudwatchSource) ConfigureByDSN(dsn string, labels map[string]string, return nil } -func (cw *CloudwatchSource) OneShotAcquisition(out chan types.Event, t *tomb.Tomb) error { - //StreamName string, Start time.Time, End time.Time +func (cw *CloudwatchSource) OneShotAcquisition(ctx context.Context, out chan types.Event, t *tomb.Tomb) error { + // StreamName string, Start time.Time, End time.Time config := LogStreamTailConfig{ GroupName: cw.Config.GroupName, StreamName: *cw.Config.StreamName, @@ -633,12 +646,12 @@ func (cw *CloudwatchSource) OneShotAcquisition(out chan types.Event, t *tomb.Tom Labels: cw.Config.Labels, ExpectMode: types.TIMEMACHINE, } - return cw.CatLogStream(&config, out) + return cw.CatLogStream(ctx, &config, out) } -func (cw *CloudwatchSource) CatLogStream(cfg *LogStreamTailConfig, outChan chan types.Event) error { +func (cw *CloudwatchSource) CatLogStream(ctx context.Context, cfg *LogStreamTailConfig, outChan chan types.Event) error { var startFrom *string - var head = true + head := true /*convert the times*/ startTime := cfg.StartTime.UTC().Unix() * 1000 endTime := cfg.EndTime.UTC().Unix() * 1000 @@ -652,7 +665,6 @@ func (cw *CloudwatchSource) CatLogStream(cfg *LogStreamTailConfig, outChan chan if startFrom != nil { cfg.logger.Tracef("next_token: %s", *startFrom) } - ctx := context.Background() err := cw.cwClient.GetLogEventsPagesWithContext(ctx, &cloudwatchlogs.GetLogEventsInput{ Limit: aws.Int64(10), @@ -698,7 +710,7 @@ func (cw *CloudwatchSource) CatLogStream(cfg *LogStreamTailConfig, outChan chan func cwLogToEvent(log *cloudwatchlogs.OutputLogEvent, cfg *LogStreamTailConfig) (types.Event, error) { l := types.Line{} - evt := types.Event{} + evt := types.MakeEvent(cfg.ExpectMode == types.TIMEMACHINE, types.LOG, true) if log.Message == nil { return evt, errors.New("nil message") } @@ -714,9 +726,6 @@ func cwLogToEvent(log *cloudwatchlogs.OutputLogEvent, cfg *LogStreamTailConfig) l.Process = true l.Module = "cloudwatch" evt.Line = l - evt.Process = true - evt.Type = types.LOG - evt.ExpectMode = cfg.ExpectMode cfg.logger.Debugf("returned event labels : %+v", evt.Line.Labels) return evt, nil } diff --git a/pkg/acquisition/modules/cloudwatch/cloudwatch_test.go b/pkg/acquisition/modules/cloudwatch/cloudwatch_test.go index bab7593f26f..3d638896537 100644 --- a/pkg/acquisition/modules/cloudwatch/cloudwatch_test.go +++ b/pkg/acquisition/modules/cloudwatch/cloudwatch_test.go @@ -1,6 +1,7 @@ package cloudwatchacquisition import ( + "context" "errors" "fmt" "net" @@ -34,6 +35,7 @@ func deleteAllLogGroups(t *testing.T, cw *CloudwatchSource) { input := &cloudwatchlogs.DescribeLogGroupsInput{} result, err := cw.cwClient.DescribeLogGroups(input) require.NoError(t, err) + for _, group := range result.LogGroups { _, err := cw.cwClient.DeleteLogGroup(&cloudwatchlogs.DeleteLogGroupInput{ LogGroupName: group.LogGroupName, @@ -62,18 +64,22 @@ func TestMain(m *testing.M) { if runtime.GOOS == "windows" { os.Exit(0) } + if err := checkForLocalStackAvailability(); err != nil { log.Fatalf("local stack error : %s", err) } + def_PollNewStreamInterval = 1 * time.Second def_PollStreamInterval = 1 * time.Second def_StreamReadTimeout = 10 * time.Second def_MaxStreamAge = 5 * time.Second def_PollDeadStreamInterval = 5 * time.Second + os.Exit(m.Run()) } func TestWatchLogGroupForStreams(t *testing.T) { + ctx := context.Background() if runtime.GOOS == "windows" { t.Skip("Skipping test on windows") } @@ -447,7 +453,7 @@ stream_name: test_stream`), dbgLogger.Infof("running StreamingAcquisition") actmb := tomb.Tomb{} actmb.Go(func() error { - err := cw.StreamingAcquisition(out, &actmb) + err := cw.StreamingAcquisition(ctx, out, &actmb) dbgLogger.Infof("acquis done") cstest.RequireErrorContains(t, err, tc.expectedStartErr) return nil @@ -503,7 +509,6 @@ stream_name: test_stream`), if len(res) != 0 { t.Fatalf("leftover unmatched results : %v", res) } - } if tc.teardown != nil { tc.teardown(t, &cw) @@ -513,6 +518,7 @@ stream_name: test_stream`), } func TestConfiguration(t *testing.T) { + ctx := context.Background() if runtime.GOOS == "windows" { t.Skip("Skipping test on windows") } @@ -571,9 +577,9 @@ stream_name: test_stream`), switch cw.GetMode() { case "tail": - err = cw.StreamingAcquisition(out, &tmb) + err = cw.StreamingAcquisition(ctx, out, &tmb) case "cat": - err = cw.OneShotAcquisition(out, &tmb) + err = cw.OneShotAcquisition(ctx, out, &tmb) } cstest.RequireErrorContains(t, err, tc.expectedStartErr) @@ -631,6 +637,8 @@ func TestConfigureByDSN(t *testing.T) { } func TestOneShotAcquisition(t *testing.T) { + ctx := context.Background() + if runtime.GOOS == "windows" { t.Skip("Skipping test on windows") } @@ -762,7 +770,7 @@ func TestOneShotAcquisition(t *testing.T) { var rcvdEvts []types.Event dbgLogger.Infof("running StreamingAcquisition") - err = cw.OneShotAcquisition(out, &tmb) + err = cw.OneShotAcquisition(ctx, out, &tmb) dbgLogger.Infof("acquis done") cstest.RequireErrorContains(t, err, tc.expectedStartErr) close(out) @@ -798,7 +806,6 @@ func TestOneShotAcquisition(t *testing.T) { if len(res) != 0 { t.Fatalf("leftover unmatched results : %v", res) } - } if tc.teardown != nil { tc.teardown(t, &cw) diff --git a/pkg/acquisition/modules/docker/docker.go b/pkg/acquisition/modules/docker/docker.go index 9a6e13feee4..b27255ec13f 100644 --- a/pkg/acquisition/modules/docker/docker.go +++ b/pkg/acquisition/modules/docker/docker.go @@ -286,9 +286,9 @@ func (d *DockerSource) SupportedModes() []string { } // OneShotAcquisition reads a set of file and returns when done -func (d *DockerSource) OneShotAcquisition(out chan types.Event, t *tomb.Tomb) error { +func (d *DockerSource) OneShotAcquisition(ctx context.Context, out chan types.Event, t *tomb.Tomb) error { d.logger.Debug("In oneshot") - runningContainer, err := d.Client.ContainerList(context.Background(), dockerTypes.ContainerListOptions{}) + runningContainer, err := d.Client.ContainerList(ctx, dockerTypes.ContainerListOptions{}) if err != nil { return err } @@ -298,10 +298,10 @@ func (d *DockerSource) OneShotAcquisition(out chan types.Event, t *tomb.Tomb) er d.logger.Debugf("container with id %s is already being read from", container.ID) continue } - if containerConfig := d.EvalContainer(container); containerConfig != nil { + if containerConfig := d.EvalContainer(ctx, container); containerConfig != nil { d.logger.Infof("reading logs from container %s", containerConfig.Name) d.logger.Debugf("logs options: %+v", *d.containerLogsOptions) - dockerReader, err := d.Client.ContainerLogs(context.Background(), containerConfig.ID, *d.containerLogsOptions) + dockerReader, err := d.Client.ContainerLogs(ctx, containerConfig.ID, *d.containerLogsOptions) if err != nil { d.logger.Errorf("unable to read logs from container: %+v", err) return err @@ -334,7 +334,10 @@ func (d *DockerSource) OneShotAcquisition(out chan types.Event, t *tomb.Tomb) er if d.metricsLevel != configuration.METRICS_NONE { linesRead.With(prometheus.Labels{"source": containerConfig.Name}).Inc() } - evt := types.Event{Line: l, Process: true, Type: types.LOG, ExpectMode: types.TIMEMACHINE} + evt := types.MakeEvent(true, types.LOG, true) + evt.Line = l + evt.Process = true + evt.Type = types.LOG out <- evt d.logger.Debugf("Sent line to parsing: %+v", evt.Line.Raw) } @@ -372,58 +375,56 @@ func (d *DockerSource) CanRun() error { return nil } -func (d *DockerSource) getContainerTTY(containerId string) bool { - containerDetails, err := d.Client.ContainerInspect(context.Background(), containerId) +func (d *DockerSource) getContainerTTY(ctx context.Context, containerId string) bool { + containerDetails, err := d.Client.ContainerInspect(ctx, containerId) if err != nil { return false } return containerDetails.Config.Tty } -func (d *DockerSource) getContainerLabels(containerId string) map[string]interface{} { - containerDetails, err := d.Client.ContainerInspect(context.Background(), containerId) +func (d *DockerSource) getContainerLabels(ctx context.Context, containerId string) map[string]interface{} { + containerDetails, err := d.Client.ContainerInspect(ctx, containerId) if err != nil { return map[string]interface{}{} } return parseLabels(containerDetails.Config.Labels) } -func (d *DockerSource) EvalContainer(container dockerTypes.Container) *ContainerConfig { +func (d *DockerSource) EvalContainer(ctx context.Context, container dockerTypes.Container) *ContainerConfig { for _, containerID := range d.Config.ContainerID { if containerID == container.ID { - return &ContainerConfig{ID: container.ID, Name: container.Names[0], Labels: d.Config.Labels, Tty: d.getContainerTTY(container.ID)} + return &ContainerConfig{ID: container.ID, Name: container.Names[0], Labels: d.Config.Labels, Tty: d.getContainerTTY(ctx, container.ID)} } } for _, containerName := range d.Config.ContainerName { for _, name := range container.Names { - if strings.HasPrefix(name, "/") && len(name) > 0 { + if strings.HasPrefix(name, "/") && name != "" { name = name[1:] } if name == containerName { - return &ContainerConfig{ID: container.ID, Name: name, Labels: d.Config.Labels, Tty: d.getContainerTTY(container.ID)} + return &ContainerConfig{ID: container.ID, Name: name, Labels: d.Config.Labels, Tty: d.getContainerTTY(ctx, container.ID)} } } - } for _, cont := range d.compiledContainerID { if matched := cont.MatchString(container.ID); matched { - return &ContainerConfig{ID: container.ID, Name: container.Names[0], Labels: d.Config.Labels, Tty: d.getContainerTTY(container.ID)} + return &ContainerConfig{ID: container.ID, Name: container.Names[0], Labels: d.Config.Labels, Tty: d.getContainerTTY(ctx, container.ID)} } } for _, cont := range d.compiledContainerName { for _, name := range container.Names { if matched := cont.MatchString(name); matched { - return &ContainerConfig{ID: container.ID, Name: name, Labels: d.Config.Labels, Tty: d.getContainerTTY(container.ID)} + return &ContainerConfig{ID: container.ID, Name: name, Labels: d.Config.Labels, Tty: d.getContainerTTY(ctx, container.ID)} } } - } if d.Config.UseContainerLabels { - parsedLabels := d.getContainerLabels(container.ID) + parsedLabels := d.getContainerLabels(ctx, container.ID) if len(parsedLabels) == 0 { d.logger.Tracef("container has no 'crowdsec' labels set, ignoring container: %s", container.ID) return nil @@ -460,13 +461,13 @@ func (d *DockerSource) EvalContainer(container dockerTypes.Container) *Container } d.logger.Errorf("label %s is not a string", k) } - return &ContainerConfig{ID: container.ID, Name: container.Names[0], Labels: labels, Tty: d.getContainerTTY(container.ID)} + return &ContainerConfig{ID: container.ID, Name: container.Names[0], Labels: labels, Tty: d.getContainerTTY(ctx, container.ID)} } return nil } -func (d *DockerSource) WatchContainer(monitChan chan *ContainerConfig, deleteChan chan *ContainerConfig) error { +func (d *DockerSource) WatchContainer(ctx context.Context, monitChan chan *ContainerConfig, deleteChan chan *ContainerConfig) error { ticker := time.NewTicker(d.CheckIntervalDuration) d.logger.Infof("Container watcher started, interval: %s", d.CheckIntervalDuration.String()) for { @@ -477,7 +478,7 @@ func (d *DockerSource) WatchContainer(monitChan chan *ContainerConfig, deleteCha case <-ticker.C: // to track for garbage collection runningContainersID := make(map[string]bool) - runningContainer, err := d.Client.ContainerList(context.Background(), dockerTypes.ContainerListOptions{}) + runningContainer, err := d.Client.ContainerList(ctx, dockerTypes.ContainerListOptions{}) if err != nil { if strings.Contains(strings.ToLower(err.Error()), "cannot connect to the docker daemon at") { for idx, container := range d.runningContainerState { @@ -503,7 +504,7 @@ func (d *DockerSource) WatchContainer(monitChan chan *ContainerConfig, deleteCha if _, ok := d.runningContainerState[container.ID]; ok { continue } - if containerConfig := d.EvalContainer(container); containerConfig != nil { + if containerConfig := d.EvalContainer(ctx, container); containerConfig != nil { monitChan <- containerConfig } } @@ -520,16 +521,16 @@ func (d *DockerSource) WatchContainer(monitChan chan *ContainerConfig, deleteCha } } -func (d *DockerSource) StreamingAcquisition(out chan types.Event, t *tomb.Tomb) error { +func (d *DockerSource) StreamingAcquisition(ctx context.Context, out chan types.Event, t *tomb.Tomb) error { d.t = t monitChan := make(chan *ContainerConfig) deleteChan := make(chan *ContainerConfig) d.logger.Infof("Starting docker acquisition") t.Go(func() error { - return d.DockerManager(monitChan, deleteChan, out) + return d.DockerManager(ctx, monitChan, deleteChan, out) }) - return d.WatchContainer(monitChan, deleteChan) + return d.WatchContainer(ctx, monitChan, deleteChan) } func (d *DockerSource) Dump() interface{} { @@ -543,9 +544,9 @@ func ReadTailScanner(scanner *bufio.Scanner, out chan string, t *tomb.Tomb) erro return scanner.Err() } -func (d *DockerSource) TailDocker(container *ContainerConfig, outChan chan types.Event, deleteChan chan *ContainerConfig) error { +func (d *DockerSource) TailDocker(ctx context.Context, container *ContainerConfig, outChan chan types.Event, deleteChan chan *ContainerConfig) error { container.logger.Infof("start tail for container %s", container.Name) - dockerReader, err := d.Client.ContainerLogs(context.Background(), container.ID, *d.containerLogsOptions) + dockerReader, err := d.Client.ContainerLogs(ctx, container.ID, *d.containerLogsOptions) if err != nil { container.logger.Errorf("unable to read logs from container: %+v", err) return err @@ -581,21 +582,17 @@ func (d *DockerSource) TailDocker(container *ContainerConfig, outChan chan types l.Src = container.Name l.Process = true l.Module = d.GetName() - var evt types.Event - if !d.Config.UseTimeMachine { - evt = types.Event{Line: l, Process: true, Type: types.LOG, ExpectMode: types.LIVE} - } else { - evt = types.Event{Line: l, Process: true, Type: types.LOG, ExpectMode: types.TIMEMACHINE} - } + evt := types.MakeEvent(d.Config.UseTimeMachine, types.LOG, true) + evt.Line = l linesRead.With(prometheus.Labels{"source": container.Name}).Inc() outChan <- evt d.logger.Debugf("Sent line to parsing: %+v", evt.Line.Raw) case <-readerTomb.Dying(): - //This case is to handle temporarily losing the connection to the docker socket - //The only known case currently is when using docker-socket-proxy (and maybe a docker daemon restart) + // This case is to handle temporarily losing the connection to the docker socket + // The only known case currently is when using docker-socket-proxy (and maybe a docker daemon restart) d.logger.Debugf("readerTomb dying for container %s, removing it from runningContainerState", container.Name) deleteChan <- container - //Also reset the Since to avoid re-reading logs + // Also reset the Since to avoid re-reading logs d.Config.Since = time.Now().UTC().Format(time.RFC3339) d.containerLogsOptions.Since = d.Config.Since return nil @@ -603,7 +600,7 @@ func (d *DockerSource) TailDocker(container *ContainerConfig, outChan chan types } } -func (d *DockerSource) DockerManager(in chan *ContainerConfig, deleteChan chan *ContainerConfig, outChan chan types.Event) error { +func (d *DockerSource) DockerManager(ctx context.Context, in chan *ContainerConfig, deleteChan chan *ContainerConfig, outChan chan types.Event) error { d.logger.Info("DockerSource Manager started") for { select { @@ -612,7 +609,7 @@ func (d *DockerSource) DockerManager(in chan *ContainerConfig, deleteChan chan * newContainer.t = &tomb.Tomb{} newContainer.logger = d.logger.WithField("container_name", newContainer.Name) newContainer.t.Go(func() error { - return d.TailDocker(newContainer, outChan, deleteChan) + return d.TailDocker(ctx, newContainer, outChan, deleteChan) }) d.runningContainerState[newContainer.ID] = newContainer } diff --git a/pkg/acquisition/modules/docker/docker_test.go b/pkg/acquisition/modules/docker/docker_test.go index e332569fb3a..5d8208637e8 100644 --- a/pkg/acquisition/modules/docker/docker_test.go +++ b/pkg/acquisition/modules/docker/docker_test.go @@ -120,6 +120,7 @@ type mockDockerCli struct { } func TestStreamingAcquisition(t *testing.T) { + ctx := context.Background() log.SetOutput(os.Stdout) log.SetLevel(log.InfoLevel) log.Info("Test 'TestStreamingAcquisition'") @@ -185,7 +186,7 @@ container_name_regexp: readerTomb := &tomb.Tomb{} streamTomb := tomb.Tomb{} streamTomb.Go(func() error { - return dockerSource.StreamingAcquisition(out, &dockerTomb) + return dockerSource.StreamingAcquisition(ctx, out, &dockerTomb) }) readerTomb.Go(func() error { time.Sleep(1 * time.Second) @@ -245,7 +246,7 @@ func (cli *mockDockerCli) ContainerLogs(ctx context.Context, container string, o for _, line := range data { startLineByte := make([]byte, 8) - binary.LittleEndian.PutUint32(startLineByte, 1) //stdout stream + binary.LittleEndian.PutUint32(startLineByte, 1) // stdout stream binary.BigEndian.PutUint32(startLineByte[4:], uint32(len(line))) ret += fmt.Sprintf("%s%s", startLineByte, line) } @@ -266,6 +267,8 @@ func (cli *mockDockerCli) ContainerInspect(ctx context.Context, c string) (docke } func TestOneShot(t *testing.T) { + ctx := context.Background() + log.Infof("Test 'TestOneShot'") tests := []struct { @@ -320,7 +323,7 @@ func TestOneShot(t *testing.T) { dockerClient.Client = new(mockDockerCli) out := make(chan types.Event, 100) tomb := tomb.Tomb{} - err := dockerClient.OneShotAcquisition(out, &tomb) + err := dockerClient.OneShotAcquisition(ctx, out, &tomb) cstest.AssertErrorContains(t, err, ts.expectedErr) // else we do the check before actualLines is incremented ... diff --git a/pkg/acquisition/modules/docker/utils.go b/pkg/acquisition/modules/docker/utils.go index c724f581194..6a0d494097f 100644 --- a/pkg/acquisition/modules/docker/utils.go +++ b/pkg/acquisition/modules/docker/utils.go @@ -22,7 +22,7 @@ func parseKeyToMap(m map[string]interface{}, key string, value string) { return } - for i := range len(parts) { + for i := range parts { if parts[i] == "" { return } diff --git a/pkg/acquisition/modules/file/file.go b/pkg/acquisition/modules/file/file.go index c36672507db..9f439b0c82e 100644 --- a/pkg/acquisition/modules/file/file.go +++ b/pkg/acquisition/modules/file/file.go @@ -3,6 +3,7 @@ package fileacquisition import ( "bufio" "compress/gzip" + "context" "errors" "fmt" "io" @@ -73,7 +74,7 @@ func (f *FileSource) UnmarshalConfig(yamlConfig []byte) error { f.logger.Tracef("FileAcquisition configuration: %+v", f.config) } - if len(f.config.Filename) != 0 { + if f.config.Filename != "" { f.config.Filenames = append(f.config.Filenames, f.config.Filename) } @@ -202,11 +203,11 @@ func (f *FileSource) ConfigureByDSN(dsn string, labels map[string]string, logger args := strings.Split(dsn, "?") - if len(args[0]) == 0 { + if args[0] == "" { return errors.New("empty file:// DSN") } - if len(args) == 2 && len(args[1]) != 0 { + if len(args) == 2 && args[1] != "" { params, err := url.ParseQuery(args[1]) if err != nil { return fmt.Errorf("could not parse file args: %w", err) @@ -279,7 +280,7 @@ func (f *FileSource) SupportedModes() []string { } // OneShotAcquisition reads a set of file and returns when done -func (f *FileSource) OneShotAcquisition(out chan types.Event, t *tomb.Tomb) error { +func (f *FileSource) OneShotAcquisition(ctx context.Context, out chan types.Event, t *tomb.Tomb) error { f.logger.Debug("In oneshot") for _, file := range f.files { @@ -320,7 +321,7 @@ func (f *FileSource) CanRun() error { return nil } -func (f *FileSource) StreamingAcquisition(out chan types.Event, t *tomb.Tomb) error { +func (f *FileSource) StreamingAcquisition(ctx context.Context, out chan types.Event, t *tomb.Tomb) error { f.logger.Debug("Starting live acquisition") t.Go(func() error { return f.monitorNewFiles(out, t) @@ -385,7 +386,6 @@ func (f *FileSource) StreamingAcquisition(out chan types.Event, t *tomb.Tomb) er } filink, err := os.Lstat(file) - if err != nil { f.logger.Errorf("Could not lstat() new file %s, ignoring it : %s", file, err) continue @@ -427,118 +427,122 @@ func (f *FileSource) monitorNewFiles(out chan types.Event, t *tomb.Tomb) error { return nil } - if event.Op&fsnotify.Create == fsnotify.Create { - fi, err := os.Stat(event.Name) - if err != nil { - logger.Errorf("Could not stat() new file %s, ignoring it : %s", event.Name, err) - continue - } + if event.Op&fsnotify.Create != fsnotify.Create { + continue + } - if fi.IsDir() { - continue - } + fi, err := os.Stat(event.Name) + if err != nil { + logger.Errorf("Could not stat() new file %s, ignoring it : %s", event.Name, err) + continue + } - logger.Debugf("Detected new file %s", event.Name) + if fi.IsDir() { + continue + } - matched := false + logger.Debugf("Detected new file %s", event.Name) - for _, pattern := range f.config.Filenames { - logger.Debugf("Matching %s with %s", pattern, event.Name) + matched := false - matched, err = filepath.Match(pattern, event.Name) - if err != nil { - logger.Errorf("Could not match pattern : %s", err) - continue - } + for _, pattern := range f.config.Filenames { + logger.Debugf("Matching %s with %s", pattern, event.Name) - if matched { - logger.Debugf("Matched %s with %s", pattern, event.Name) - break - } + matched, err = filepath.Match(pattern, event.Name) + if err != nil { + logger.Errorf("Could not match pattern : %s", err) + continue } - if !matched { - continue + if matched { + logger.Debugf("Matched %s with %s", pattern, event.Name) + break } + } - // before opening the file, check if we need to specifically avoid it. (XXX) - skip := false + if !matched { + continue + } - for _, pattern := range f.exclude_regexps { - if pattern.MatchString(event.Name) { - f.logger.Infof("file %s matches exclusion pattern %s, skipping", event.Name, pattern.String()) + // before opening the file, check if we need to specifically avoid it. (XXX) + skip := false - skip = true + for _, pattern := range f.exclude_regexps { + if pattern.MatchString(event.Name) { + f.logger.Infof("file %s matches exclusion pattern %s, skipping", event.Name, pattern.String()) - break - } - } + skip = true - if skip { - continue + break } + } - f.tailMapMutex.RLock() - if f.tails[event.Name] { - f.tailMapMutex.RUnlock() - // we already have a tail on it, do not start a new one - logger.Debugf("Already tailing file %s, not creating a new tail", event.Name) + if skip { + continue + } - break - } + f.tailMapMutex.RLock() + if f.tails[event.Name] { f.tailMapMutex.RUnlock() - // cf. https://github.com/crowdsecurity/crowdsec/issues/1168 - // do not rely on stat, reclose file immediately as it's opened by Tail - fd, err := os.Open(event.Name) - if err != nil { - f.logger.Errorf("unable to read %s : %s", event.Name, err) - continue - } - if err := fd.Close(); err != nil { - f.logger.Errorf("unable to close %s : %s", event.Name, err) - continue - } + // we already have a tail on it, do not start a new one + logger.Debugf("Already tailing file %s, not creating a new tail", event.Name) - pollFile := false - if f.config.PollWithoutInotify != nil { - pollFile = *f.config.PollWithoutInotify - } else { - networkFS, fsType, err := types.IsNetworkFS(event.Name) - if err != nil { - f.logger.Warningf("Could not get fs type for %s : %s", event.Name, err) - } - f.logger.Debugf("fs for %s is network: %t (%s)", event.Name, networkFS, fsType) - if networkFS { - pollFile = true - } - } + break + } + f.tailMapMutex.RUnlock() + // cf. https://github.com/crowdsecurity/crowdsec/issues/1168 + // do not rely on stat, reclose file immediately as it's opened by Tail + fd, err := os.Open(event.Name) + if err != nil { + f.logger.Errorf("unable to read %s : %s", event.Name, err) + continue + } - filink, err := os.Lstat(event.Name) + if err = fd.Close(); err != nil { + f.logger.Errorf("unable to close %s : %s", event.Name, err) + continue + } + pollFile := false + if f.config.PollWithoutInotify != nil { + pollFile = *f.config.PollWithoutInotify + } else { + networkFS, fsType, err := types.IsNetworkFS(event.Name) if err != nil { - logger.Errorf("Could not lstat() new file %s, ignoring it : %s", event.Name, err) - continue + f.logger.Warningf("Could not get fs type for %s : %s", event.Name, err) } - if filink.Mode()&os.ModeSymlink == os.ModeSymlink && !pollFile { - logger.Warnf("File %s is a symlink, but inotify polling is enabled. Crowdsec will not be able to detect rotation. Consider setting poll_without_inotify to true in your configuration", event.Name) - } + f.logger.Debugf("fs for %s is network: %t (%s)", event.Name, networkFS, fsType) - //Slightly different parameters for Location, as we want to read the first lines of the newly created file - tail, err := tail.TailFile(event.Name, tail.Config{ReOpen: true, Follow: true, Poll: pollFile, Location: &tail.SeekInfo{Offset: 0, Whence: io.SeekStart}}) - if err != nil { - logger.Errorf("Could not start tailing file %s : %s", event.Name, err) - break + if networkFS { + pollFile = true } + } - f.tailMapMutex.Lock() - f.tails[event.Name] = true - f.tailMapMutex.Unlock() - t.Go(func() error { - defer trace.CatchPanic("crowdsec/acquis/tailfile") - return f.tailFile(out, t, tail) - }) + filink, err := os.Lstat(event.Name) + if err != nil { + logger.Errorf("Could not lstat() new file %s, ignoring it : %s", event.Name, err) + continue } + + if filink.Mode()&os.ModeSymlink == os.ModeSymlink && !pollFile { + logger.Warnf("File %s is a symlink, but inotify polling is enabled. Crowdsec will not be able to detect rotation. Consider setting poll_without_inotify to true in your configuration", event.Name) + } + + // Slightly different parameters for Location, as we want to read the first lines of the newly created file + tail, err := tail.TailFile(event.Name, tail.Config{ReOpen: true, Follow: true, Poll: pollFile, Location: &tail.SeekInfo{Offset: 0, Whence: io.SeekStart}}) + if err != nil { + logger.Errorf("Could not start tailing file %s : %s", event.Name, err) + break + } + + f.tailMapMutex.Lock() + f.tails[event.Name] = true + f.tailMapMutex.Unlock() + t.Go(func() error { + defer trace.CatchPanic("crowdsec/acquis/tailfile") + return f.tailFile(out, t, tail) + }) case err, ok := <-f.watcher.Errors: if !ok { return nil @@ -572,13 +576,14 @@ func (f *FileSource) tailFile(out chan types.Event, t *tomb.Tomb, tail *tail.Tai return nil case <-tail.Dying(): // our tailer is dying - err := tail.Err() errMsg := fmt.Sprintf("file reader of %s died", tail.Filename) + + err := tail.Err() if err != nil { errMsg = fmt.Sprintf(errMsg+" : %s", err) } - logger.Warningf(errMsg) + logger.Warning(errMsg) return nil case line := <-tail.Lines: @@ -616,11 +621,9 @@ func (f *FileSource) tailFile(out chan types.Event, t *tomb.Tomb, tail *tail.Tai // we're tailing, it must be real time logs logger.Debugf("pushing %+v", l) - expectMode := types.LIVE - if f.config.UseTimeMachine { - expectMode = types.TIMEMACHINE - } - out <- types.Event{Line: l, Process: true, Type: types.LOG, ExpectMode: expectMode} + evt := types.MakeEvent(f.config.UseTimeMachine, types.LOG, true) + evt.Line = l + out <- evt } } } @@ -629,8 +632,8 @@ func (f *FileSource) readFile(filename string, out chan types.Event, t *tomb.Tom var scanner *bufio.Scanner logger := f.logger.WithField("oneshot", filename) - fd, err := os.Open(filename) + fd, err := os.Open(filename) if err != nil { return fmt.Errorf("failed opening %s: %w", filename, err) } @@ -679,7 +682,7 @@ func (f *FileSource) readFile(filename string, out chan types.Event, t *tomb.Tom linesRead.With(prometheus.Labels{"source": filename}).Inc() // we're reading logs at once, it must be time-machine buckets - out <- types.Event{Line: l, Process: true, Type: types.LOG, ExpectMode: types.TIMEMACHINE} + out <- types.Event{Line: l, Process: true, Type: types.LOG, ExpectMode: types.TIMEMACHINE, Unmarshaled: make(map[string]interface{})} } } diff --git a/pkg/acquisition/modules/file/file_test.go b/pkg/acquisition/modules/file/file_test.go index 5d38552b3c5..a26e44cc9c7 100644 --- a/pkg/acquisition/modules/file/file_test.go +++ b/pkg/acquisition/modules/file/file_test.go @@ -1,6 +1,7 @@ package fileacquisition_test import ( + "context" "fmt" "os" "runtime" @@ -100,6 +101,8 @@ func TestConfigureDSN(t *testing.T) { } func TestOneShot(t *testing.T) { + ctx := context.Background() + permDeniedFile := "/etc/shadow" permDeniedError := "failed opening /etc/shadow: open /etc/shadow: permission denied" @@ -223,7 +226,7 @@ filename: test_files/test_delete.log`, if tc.afterConfigure != nil { tc.afterConfigure() } - err = f.OneShotAcquisition(out, &tomb) + err = f.OneShotAcquisition(ctx, out, &tomb) actualLines := len(out) cstest.RequireErrorContains(t, err, tc.expectedErr) @@ -243,6 +246,7 @@ filename: test_files/test_delete.log`, } func TestLiveAcquisition(t *testing.T) { + ctx := context.Background() permDeniedFile := "/etc/shadow" permDeniedError := "unable to read /etc/shadow : open /etc/shadow: permission denied" testPattern := "test_files/*.log" @@ -394,7 +398,7 @@ force_inotify: true`, testPattern), }() } - err = f.StreamingAcquisition(out, &tomb) + err = f.StreamingAcquisition(ctx, out, &tomb) cstest.RequireErrorContains(t, err, tc.expectedErr) if tc.expectedLines != 0 { diff --git a/pkg/acquisition/modules/http/http.go b/pkg/acquisition/modules/http/http.go new file mode 100644 index 00000000000..98af134c84e --- /dev/null +++ b/pkg/acquisition/modules/http/http.go @@ -0,0 +1,416 @@ +package httpacquisition + +import ( + "compress/gzip" + "context" + "crypto/tls" + "crypto/x509" + "encoding/json" + "errors" + "fmt" + "io" + "net" + "net/http" + "os" + "time" + + "github.com/prometheus/client_golang/prometheus" + log "github.com/sirupsen/logrus" + + "gopkg.in/tomb.v2" + "gopkg.in/yaml.v3" + + "github.com/crowdsecurity/go-cs-lib/trace" + + "github.com/crowdsecurity/crowdsec/pkg/acquisition/configuration" + "github.com/crowdsecurity/crowdsec/pkg/types" +) + +var ( + dataSourceName = "http" +) + +var linesRead = prometheus.NewCounterVec( + prometheus.CounterOpts{ + Name: "cs_httpsource_hits_total", + Help: "Total lines that were read from http source", + }, + []string{"path", "src"}) + +type HttpConfiguration struct { + //IPFilter []string `yaml:"ip_filter"` + //ChunkSize *int64 `yaml:"chunk_size"` + ListenAddr string `yaml:"listen_addr"` + Path string `yaml:"path"` + AuthType string `yaml:"auth_type"` + BasicAuth *BasicAuthConfig `yaml:"basic_auth"` + Headers *map[string]string `yaml:"headers"` + TLS *TLSConfig `yaml:"tls"` + CustomStatusCode *int `yaml:"custom_status_code"` + CustomHeaders *map[string]string `yaml:"custom_headers"` + MaxBodySize *int64 `yaml:"max_body_size"` + Timeout *time.Duration `yaml:"timeout"` + configuration.DataSourceCommonCfg `yaml:",inline"` +} + +type BasicAuthConfig struct { + Username string `yaml:"username"` + Password string `yaml:"password"` +} + +type TLSConfig struct { + InsecureSkipVerify bool `yaml:"insecure_skip_verify"` + ServerCert string `yaml:"server_cert"` + ServerKey string `yaml:"server_key"` + CaCert string `yaml:"ca_cert"` +} + +type HTTPSource struct { + metricsLevel int + Config HttpConfiguration + logger *log.Entry + Server *http.Server +} + +func (h *HTTPSource) GetUuid() string { + return h.Config.UniqueId +} + +func (h *HTTPSource) UnmarshalConfig(yamlConfig []byte) error { + h.Config = HttpConfiguration{} + err := yaml.Unmarshal(yamlConfig, &h.Config) + if err != nil { + return fmt.Errorf("cannot parse %s datasource configuration: %w", dataSourceName, err) + } + + if h.Config.Mode == "" { + h.Config.Mode = configuration.TAIL_MODE + } + + return nil +} + +func (hc *HttpConfiguration) Validate() error { + if hc.ListenAddr == "" { + return errors.New("listen_addr is required") + } + + if hc.Path == "" { + hc.Path = "/" + } + if hc.Path[0] != '/' { + return errors.New("path must start with /") + } + + switch hc.AuthType { + case "basic_auth": + baseErr := "basic_auth is selected, but" + if hc.BasicAuth == nil { + return errors.New(baseErr + " basic_auth is not provided") + } + if hc.BasicAuth.Username == "" { + return errors.New(baseErr + " username is not provided") + } + if hc.BasicAuth.Password == "" { + return errors.New(baseErr + " password is not provided") + } + case "headers": + if hc.Headers == nil { + return errors.New("headers is selected, but headers is not provided") + } + case "mtls": + if hc.TLS == nil || hc.TLS.CaCert == "" { + return errors.New("mtls is selected, but ca_cert is not provided") + } + default: + return errors.New("invalid auth_type: must be one of basic_auth, headers, mtls") + } + + if hc.TLS != nil { + if hc.TLS.ServerCert == "" { + return errors.New("server_cert is required") + } + if hc.TLS.ServerKey == "" { + return errors.New("server_key is required") + } + } + + if hc.MaxBodySize != nil && *hc.MaxBodySize <= 0 { + return errors.New("max_body_size must be positive") + } + + /* + if hc.ChunkSize != nil && *hc.ChunkSize <= 0 { + return errors.New("chunk_size must be positive") + } + */ + + if hc.CustomStatusCode != nil { + statusText := http.StatusText(*hc.CustomStatusCode) + if statusText == "" { + return errors.New("invalid HTTP status code") + } + } + + return nil +} + +func (h *HTTPSource) Configure(yamlConfig []byte, logger *log.Entry, MetricsLevel int) error { + h.logger = logger + h.metricsLevel = MetricsLevel + err := h.UnmarshalConfig(yamlConfig) + if err != nil { + return err + } + + if err := h.Config.Validate(); err != nil { + return fmt.Errorf("invalid configuration: %w", err) + } + + return nil +} + +func (h *HTTPSource) ConfigureByDSN(string, map[string]string, *log.Entry, string) error { + return fmt.Errorf("%s datasource does not support command-line acquisition", dataSourceName) +} + +func (h *HTTPSource) GetMode() string { + return h.Config.Mode +} + +func (h *HTTPSource) GetName() string { + return dataSourceName +} + +func (h *HTTPSource) OneShotAcquisition(ctx context.Context, out chan types.Event, t *tomb.Tomb) error { + return fmt.Errorf("%s datasource does not support one-shot acquisition", dataSourceName) +} + +func (h *HTTPSource) CanRun() error { + return nil +} + +func (h *HTTPSource) GetMetrics() []prometheus.Collector { + return []prometheus.Collector{linesRead} +} + +func (h *HTTPSource) GetAggregMetrics() []prometheus.Collector { + return []prometheus.Collector{linesRead} +} + +func (h *HTTPSource) Dump() interface{} { + return h +} + +func (hc *HttpConfiguration) NewTLSConfig() (*tls.Config, error) { + tlsConfig := tls.Config{ + InsecureSkipVerify: hc.TLS.InsecureSkipVerify, + } + + if hc.TLS.ServerCert != "" && hc.TLS.ServerKey != "" { + cert, err := tls.LoadX509KeyPair(hc.TLS.ServerCert, hc.TLS.ServerKey) + if err != nil { + return nil, fmt.Errorf("failed to load server cert/key: %w", err) + } + tlsConfig.Certificates = []tls.Certificate{cert} + } + + if hc.AuthType == "mtls" && hc.TLS.CaCert != "" { + caCert, err := os.ReadFile(hc.TLS.CaCert) + if err != nil { + return nil, fmt.Errorf("failed to read ca cert: %w", err) + } + + caCertPool, err := x509.SystemCertPool() + if err != nil { + return nil, fmt.Errorf("failed to load system cert pool: %w", err) + } + + if caCertPool == nil { + caCertPool = x509.NewCertPool() + } + caCertPool.AppendCertsFromPEM(caCert) + tlsConfig.ClientCAs = caCertPool + tlsConfig.ClientAuth = tls.RequireAndVerifyClientCert + } + + return &tlsConfig, nil +} + +func authorizeRequest(r *http.Request, hc *HttpConfiguration) error { + if hc.AuthType == "basic_auth" { + username, password, ok := r.BasicAuth() + if !ok { + return errors.New("missing basic auth") + } + if username != hc.BasicAuth.Username || password != hc.BasicAuth.Password { + return errors.New("invalid basic auth") + } + } + if hc.AuthType == "headers" { + for key, value := range *hc.Headers { + if r.Header.Get(key) != value { + return errors.New("invalid headers") + } + } + } + return nil +} + +func (h *HTTPSource) processRequest(w http.ResponseWriter, r *http.Request, hc *HttpConfiguration, out chan types.Event) error { + if hc.MaxBodySize != nil && r.ContentLength > *hc.MaxBodySize { + w.WriteHeader(http.StatusRequestEntityTooLarge) + return fmt.Errorf("body size exceeds max body size: %d > %d", r.ContentLength, *hc.MaxBodySize) + } + + srcHost, _, err := net.SplitHostPort(r.RemoteAddr) + if err != nil { + return err + } + + defer r.Body.Close() + + reader := r.Body + + if r.Header.Get("Content-Encoding") == "gzip" { + reader, err = gzip.NewReader(r.Body) + if err != nil { + w.WriteHeader(http.StatusBadRequest) + return fmt.Errorf("failed to create gzip reader: %w", err) + } + defer reader.Close() + } + + decoder := json.NewDecoder(reader) + for { + var message json.RawMessage + + if err := decoder.Decode(&message); err != nil { + if err == io.EOF { + break + } + w.WriteHeader(http.StatusBadRequest) + return fmt.Errorf("failed to decode: %w", err) + } + + line := types.Line{ + Raw: string(message), + Src: srcHost, + Time: time.Now().UTC(), + Labels: hc.Labels, + Process: true, + Module: h.GetName(), + } + + if h.metricsLevel == configuration.METRICS_AGGREGATE { + line.Src = hc.Path + } + + evt := types.MakeEvent(h.Config.UseTimeMachine, types.LOG, true) + evt.Line = line + + if h.metricsLevel == configuration.METRICS_AGGREGATE { + linesRead.With(prometheus.Labels{"path": hc.Path, "src": ""}).Inc() + } else if h.metricsLevel == configuration.METRICS_FULL { + linesRead.With(prometheus.Labels{"path": hc.Path, "src": srcHost}).Inc() + } + + h.logger.Tracef("line to send: %+v", line) + out <- evt + } + + return nil +} + +func (h *HTTPSource) RunServer(out chan types.Event, t *tomb.Tomb) error { + mux := http.NewServeMux() + mux.HandleFunc(h.Config.Path, func(w http.ResponseWriter, r *http.Request) { + if r.Method != http.MethodPost { + h.logger.Errorf("method not allowed: %s", r.Method) + http.Error(w, "Method Not Allowed", http.StatusMethodNotAllowed) + return + } + if err := authorizeRequest(r, &h.Config); err != nil { + h.logger.Errorf("failed to authorize request from '%s': %s", r.RemoteAddr, err) + http.Error(w, "Unauthorized", http.StatusUnauthorized) + return + } + err := h.processRequest(w, r, &h.Config, out) + if err != nil { + h.logger.Errorf("failed to process request from '%s': %s", r.RemoteAddr, err) + return + } + + if h.Config.CustomHeaders != nil { + for key, value := range *h.Config.CustomHeaders { + w.Header().Set(key, value) + } + } + if h.Config.CustomStatusCode != nil { + w.WriteHeader(*h.Config.CustomStatusCode) + } else { + w.WriteHeader(http.StatusOK) + } + + w.Write([]byte("OK")) + }) + + h.Server = &http.Server{ + Addr: h.Config.ListenAddr, + Handler: mux, + } + + if h.Config.Timeout != nil { + h.Server.ReadTimeout = *h.Config.Timeout + } + + if h.Config.TLS != nil { + tlsConfig, err := h.Config.NewTLSConfig() + if err != nil { + return fmt.Errorf("failed to create tls config: %w", err) + } + h.logger.Tracef("tls config: %+v", tlsConfig) + h.Server.TLSConfig = tlsConfig + } + + t.Go(func() error { + defer trace.CatchPanic("crowdsec/acquis/http/server") + if h.Config.TLS != nil { + h.logger.Infof("start https server on %s", h.Config.ListenAddr) + err := h.Server.ListenAndServeTLS(h.Config.TLS.ServerCert, h.Config.TLS.ServerKey) + if err != nil && err != http.ErrServerClosed { + return fmt.Errorf("https server failed: %w", err) + } + } else { + h.logger.Infof("start http server on %s", h.Config.ListenAddr) + err := h.Server.ListenAndServe() + if err != nil && err != http.ErrServerClosed { + return fmt.Errorf("http server failed: %w", err) + } + } + return nil + }) + + //nolint //fp + for { + select { + case <-t.Dying(): + h.logger.Infof("%s datasource stopping", dataSourceName) + if err := h.Server.Close(); err != nil { + return fmt.Errorf("while closing %s server: %w", dataSourceName, err) + } + return nil + } + } +} + +func (h *HTTPSource) StreamingAcquisition(ctx context.Context, out chan types.Event, t *tomb.Tomb) error { + h.logger.Debugf("start http server on %s", h.Config.ListenAddr) + + t.Go(func() error { + defer trace.CatchPanic("crowdsec/acquis/http/live") + return h.RunServer(out, t) + }) + + return nil +} diff --git a/pkg/acquisition/modules/http/http_test.go b/pkg/acquisition/modules/http/http_test.go new file mode 100644 index 00000000000..f89ba7aa8ba --- /dev/null +++ b/pkg/acquisition/modules/http/http_test.go @@ -0,0 +1,785 @@ +package httpacquisition + +import ( + "compress/gzip" + "context" + "crypto/tls" + "crypto/x509" + "errors" + "fmt" + "io" + "net/http" + "os" + "strings" + "testing" + "time" + + "github.com/crowdsecurity/crowdsec/pkg/types" + "github.com/crowdsecurity/go-cs-lib/cstest" + "github.com/prometheus/client_golang/prometheus" + log "github.com/sirupsen/logrus" + "gopkg.in/tomb.v2" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +const ( + testHTTPServerAddr = "http://127.0.0.1:8080" + testHTTPServerAddrTLS = "https://127.0.0.1:8080" +) + +func TestConfigure(t *testing.T) { + tests := []struct { + config string + expectedErr string + }{ + { + config: ` +foobar: bla`, + expectedErr: "invalid configuration: listen_addr is required", + }, + { + config: ` +source: http +listen_addr: 127.0.0.1:8080 +path: wrongpath`, + expectedErr: "invalid configuration: path must start with /", + }, + { + config: ` +source: http +listen_addr: 127.0.0.1:8080 +path: /test +auth_type: basic_auth`, + expectedErr: "invalid configuration: basic_auth is selected, but basic_auth is not provided", + }, + { + config: ` +source: http +listen_addr: 127.0.0.1:8080 +path: /test +auth_type: headers`, + expectedErr: "invalid configuration: headers is selected, but headers is not provided", + }, + { + config: ` +source: http +listen_addr: 127.0.0.1:8080 +path: /test +auth_type: basic_auth +basic_auth: + username: 132`, + expectedErr: "invalid configuration: basic_auth is selected, but password is not provided", + }, + { + config: ` +source: http +listen_addr: 127.0.0.1:8080 +path: /test +auth_type: basic_auth +basic_auth: + password: 132`, + expectedErr: "invalid configuration: basic_auth is selected, but username is not provided", + }, + { + config: ` +source: http +listen_addr: 127.0.0.1:8080 +path: /test +auth_type: headers +headers:`, + expectedErr: "invalid configuration: headers is selected, but headers is not provided", + }, + { + config: ` +source: http +listen_addr: 127.0.0.1:8080 +path: /test +auth_type: toto`, + expectedErr: "invalid configuration: invalid auth_type: must be one of basic_auth, headers, mtls", + }, + { + config: ` +source: http +listen_addr: 127.0.0.1:8080 +path: /test +auth_type: headers +headers: + key: value +tls: + server_key: key`, + expectedErr: "invalid configuration: server_cert is required", + }, + { + config: ` +source: http +listen_addr: 127.0.0.1:8080 +path: /test +auth_type: headers +headers: + key: value +tls: + server_cert: cert`, + expectedErr: "invalid configuration: server_key is required", + }, + { + config: ` +source: http +listen_addr: 127.0.0.1:8080 +path: /test +auth_type: mtls +tls: + server_cert: cert + server_key: key`, + expectedErr: "invalid configuration: mtls is selected, but ca_cert is not provided", + }, + { + config: ` +source: http +listen_addr: 127.0.0.1:8080 +path: /test +auth_type: headers +headers: + key: value +max_body_size: 0`, + expectedErr: "invalid configuration: max_body_size must be positive", + }, + { + config: ` +source: http +listen_addr: 127.0.0.1:8080 +path: /test +auth_type: headers +headers: + key: value +timeout: toto`, + expectedErr: "cannot parse http datasource configuration: yaml: unmarshal errors:\n line 8: cannot unmarshal !!str `toto` into time.Duration", + }, + { + config: ` +source: http +listen_addr: 127.0.0.1:8080 +path: /test +auth_type: headers +headers: + key: value +custom_status_code: 999`, + expectedErr: "invalid configuration: invalid HTTP status code", + }, + } + + subLogger := log.WithFields(log.Fields{ + "type": "http", + }) + + for _, test := range tests { + h := HTTPSource{} + err := h.Configure([]byte(test.config), subLogger, 0) + cstest.AssertErrorContains(t, err, test.expectedErr) + } +} + +func TestGetUuid(t *testing.T) { + h := HTTPSource{} + h.Config.UniqueId = "test" + assert.Equal(t, "test", h.GetUuid()) +} + +func TestUnmarshalConfig(t *testing.T) { + h := HTTPSource{} + err := h.UnmarshalConfig([]byte(` +source: http +listen_addr: 127.0.0.1:8080 +path: 15 + auth_type: headers`)) + cstest.AssertErrorMessage(t, err, "cannot parse http datasource configuration: yaml: line 4: found a tab character that violates indentation") +} + +func TestConfigureByDSN(t *testing.T) { + h := HTTPSource{} + err := h.ConfigureByDSN("http://localhost:8080/test", map[string]string{}, log.WithFields(log.Fields{ + "type": "http", + }), "test") + cstest.AssertErrorMessage( + t, + err, + "http datasource does not support command-line acquisition", + ) +} + +func TestGetMode(t *testing.T) { + h := HTTPSource{} + h.Config.Mode = "test" + assert.Equal(t, "test", h.GetMode()) +} + +func TestGetName(t *testing.T) { + h := HTTPSource{} + assert.Equal(t, "http", h.GetName()) +} + +func SetupAndRunHTTPSource(t *testing.T, h *HTTPSource, config []byte, metricLevel int) (chan types.Event, *tomb.Tomb) { + ctx := context.Background() + subLogger := log.WithFields(log.Fields{ + "type": "http", + }) + err := h.Configure(config, subLogger, metricLevel) + require.NoError(t, err) + tomb := tomb.Tomb{} + out := make(chan types.Event) + err = h.StreamingAcquisition(ctx, out, &tomb) + require.NoError(t, err) + + for _, metric := range h.GetMetrics() { + prometheus.Register(metric) + } + + return out, &tomb +} + +func TestStreamingAcquisitionWrongHTTPMethod(t *testing.T) { + h := &HTTPSource{} + _, tomb := SetupAndRunHTTPSource(t, h, []byte(` +source: http +listen_addr: 127.0.0.1:8080 +path: /test +auth_type: basic_auth +basic_auth: + username: test + password: test`), 0) + + time.Sleep(1 * time.Second) + + res, err := http.Get(fmt.Sprintf("%s/test", testHTTPServerAddr)) + require.NoError(t, err) + assert.Equal(t, http.StatusMethodNotAllowed, res.StatusCode) + + h.Server.Close() + tomb.Kill(nil) + tomb.Wait() + +} + +func TestStreamingAcquisitionUnknownPath(t *testing.T) { + h := &HTTPSource{} + _, tomb := SetupAndRunHTTPSource(t, h, []byte(` +source: http +listen_addr: 127.0.0.1:8080 +path: /test +auth_type: basic_auth +basic_auth: + username: test + password: test`), 0) + + time.Sleep(1 * time.Second) + + res, err := http.Get(fmt.Sprintf("%s/unknown", testHTTPServerAddr)) + require.NoError(t, err) + assert.Equal(t, http.StatusNotFound, res.StatusCode) + + h.Server.Close() + tomb.Kill(nil) + tomb.Wait() +} + +func TestStreamingAcquisitionBasicAuth(t *testing.T) { + h := &HTTPSource{} + _, tomb := SetupAndRunHTTPSource(t, h, []byte(` +source: http +listen_addr: 127.0.0.1:8080 +path: /test +auth_type: basic_auth +basic_auth: + username: test + password: test`), 0) + + time.Sleep(1 * time.Second) + + client := &http.Client{} + + resp, err := http.Post(fmt.Sprintf("%s/test", testHTTPServerAddr), "application/json", strings.NewReader("test")) + require.NoError(t, err) + assert.Equal(t, http.StatusUnauthorized, resp.StatusCode) + + req, err := http.NewRequest(http.MethodPost, fmt.Sprintf("%s/test", testHTTPServerAddr), strings.NewReader("test")) + require.NoError(t, err) + req.SetBasicAuth("test", "WrongPassword") + + resp, err = client.Do(req) + require.NoError(t, err) + assert.Equal(t, http.StatusUnauthorized, resp.StatusCode) + + h.Server.Close() + tomb.Kill(nil) + tomb.Wait() +} + +func TestStreamingAcquisitionBadHeaders(t *testing.T) { + h := &HTTPSource{} + _, tomb := SetupAndRunHTTPSource(t, h, []byte(` +source: http +listen_addr: 127.0.0.1:8080 +path: /test +auth_type: headers +headers: + key: test`), 0) + + time.Sleep(1 * time.Second) + + client := &http.Client{} + + req, err := http.NewRequest(http.MethodPost, fmt.Sprintf("%s/test", testHTTPServerAddr), strings.NewReader("test")) + require.NoError(t, err) + + req.Header.Add("Key", "wrong") + resp, err := client.Do(req) + require.NoError(t, err) + assert.Equal(t, http.StatusUnauthorized, resp.StatusCode) + + h.Server.Close() + tomb.Kill(nil) + tomb.Wait() +} + +func TestStreamingAcquisitionMaxBodySize(t *testing.T) { + h := &HTTPSource{} + _, tomb := SetupAndRunHTTPSource(t, h, []byte(` +source: http +listen_addr: 127.0.0.1:8080 +path: /test +auth_type: headers +headers: + key: test +max_body_size: 5`), 0) + + time.Sleep(1 * time.Second) + + client := &http.Client{} + req, err := http.NewRequest(http.MethodPost, fmt.Sprintf("%s/test", testHTTPServerAddr), strings.NewReader("testtest")) + require.NoError(t, err) + + req.Header.Add("Key", "test") + resp, err := client.Do(req) + require.NoError(t, err) + + assert.Equal(t, http.StatusRequestEntityTooLarge, resp.StatusCode) + + h.Server.Close() + tomb.Kill(nil) + tomb.Wait() +} + +func TestStreamingAcquisitionSuccess(t *testing.T) { + h := &HTTPSource{} + out, tomb := SetupAndRunHTTPSource(t, h, []byte(` +source: http +listen_addr: 127.0.0.1:8080 +path: /test +auth_type: headers +headers: + key: test`), 2) + + time.Sleep(1 * time.Second) + rawEvt := `{"test": "test"}` + + errChan := make(chan error) + go assertEvents(out, []string{rawEvt}, errChan) + + client := &http.Client{} + req, err := http.NewRequest(http.MethodPost, fmt.Sprintf("%s/test", testHTTPServerAddr), strings.NewReader(rawEvt)) + require.NoError(t, err) + + req.Header.Add("Key", "test") + resp, err := client.Do(req) + require.NoError(t, err) + assert.Equal(t, http.StatusOK, resp.StatusCode) + + err = <-errChan + require.NoError(t, err) + + assertMetrics(t, h.GetMetrics(), 1) + + h.Server.Close() + tomb.Kill(nil) + tomb.Wait() +} + +func TestStreamingAcquisitionCustomStatusCodeAndCustomHeaders(t *testing.T) { + h := &HTTPSource{} + out, tomb := SetupAndRunHTTPSource(t, h, []byte(` +source: http +listen_addr: 127.0.0.1:8080 +path: /test +auth_type: headers +headers: + key: test +custom_status_code: 201 +custom_headers: + success: true`), 2) + + time.Sleep(1 * time.Second) + + rawEvt := `{"test": "test"}` + errChan := make(chan error) + go assertEvents(out, []string{rawEvt}, errChan) + + req, err := http.NewRequest(http.MethodPost, fmt.Sprintf("%s/test", testHTTPServerAddr), strings.NewReader(rawEvt)) + require.NoError(t, err) + + req.Header.Add("Key", "test") + resp, err := http.DefaultClient.Do(req) + require.NoError(t, err) + + assert.Equal(t, http.StatusCreated, resp.StatusCode) + assert.Equal(t, "true", resp.Header.Get("Success")) + + err = <-errChan + require.NoError(t, err) + + assertMetrics(t, h.GetMetrics(), 1) + + h.Server.Close() + tomb.Kill(nil) + tomb.Wait() +} + +type slowReader struct { + delay time.Duration + body []byte + index int +} + +func (sr *slowReader) Read(p []byte) (int, error) { + if sr.index >= len(sr.body) { + return 0, io.EOF + } + time.Sleep(sr.delay) // Simulate a delay in reading + n := copy(p, sr.body[sr.index:]) + sr.index += n + return n, nil +} + +func assertEvents(out chan types.Event, expected []string, errChan chan error) { + readLines := []types.Event{} + + for i := 0; i < len(expected); i++ { + select { + case event := <-out: + readLines = append(readLines, event) + case <-time.After(2 * time.Second): + errChan <- errors.New("timeout waiting for event") + return + } + } + + if len(readLines) != len(expected) { + errChan <- fmt.Errorf("expected %d lines, got %d", len(expected), len(readLines)) + return + } + + for i, evt := range readLines { + if evt.Line.Raw != expected[i] { + errChan <- fmt.Errorf(`expected %s, got '%+v'`, expected, evt.Line.Raw) + return + } + if evt.Line.Src != "127.0.0.1" { + errChan <- fmt.Errorf("expected '127.0.0.1', got '%s'", evt.Line.Src) + return + } + if evt.Line.Module != "http" { + errChan <- fmt.Errorf("expected 'http', got '%s'", evt.Line.Module) + return + } + } + errChan <- nil +} + +func TestStreamingAcquisitionTimeout(t *testing.T) { + h := &HTTPSource{} + _, tomb := SetupAndRunHTTPSource(t, h, []byte(` +source: http +listen_addr: 127.0.0.1:8080 +path: /test +auth_type: headers +headers: + key: test +timeout: 1s`), 0) + + time.Sleep(1 * time.Second) + + slow := &slowReader{ + delay: 2 * time.Second, + body: []byte(`{"test": "delayed_payload"}`), + } + + req, err := http.NewRequest(http.MethodPost, fmt.Sprintf("%s/test", testHTTPServerAddr), slow) + require.NoError(t, err) + + req.Header.Add("Key", "test") + req.Header.Set("Content-Type", "application/json") + + client := &http.Client{} + resp, err := client.Do(req) + require.NoError(t, err) + + assert.Equal(t, http.StatusBadRequest, resp.StatusCode) + + h.Server.Close() + tomb.Kill(nil) + tomb.Wait() +} + +func TestStreamingAcquisitionTLSHTTPRequest(t *testing.T) { + h := &HTTPSource{} + _, tomb := SetupAndRunHTTPSource(t, h, []byte(` +source: http +listen_addr: 127.0.0.1:8080 +auth_type: mtls +path: /test +tls: + server_cert: testdata/server.crt + server_key: testdata/server.key + ca_cert: testdata/ca.crt`), 0) + + time.Sleep(1 * time.Second) + + resp, err := http.Post(fmt.Sprintf("%s/test", testHTTPServerAddr), "application/json", strings.NewReader("test")) + require.NoError(t, err) + + assert.Equal(t, http.StatusBadRequest, resp.StatusCode) + + h.Server.Close() + tomb.Kill(nil) + tomb.Wait() +} + +func TestStreamingAcquisitionTLSWithHeadersAuthSuccess(t *testing.T) { + h := &HTTPSource{} + out, tomb := SetupAndRunHTTPSource(t, h, []byte(` +source: http +listen_addr: 127.0.0.1:8080 +path: /test +auth_type: headers +headers: + key: test +tls: + server_cert: testdata/server.crt + server_key: testdata/server.key +`), 0) + + time.Sleep(1 * time.Second) + + caCert, err := os.ReadFile("testdata/server.crt") + require.NoError(t, err) + + caCertPool := x509.NewCertPool() + caCertPool.AppendCertsFromPEM(caCert) + + tlsConfig := &tls.Config{ + RootCAs: caCertPool, + } + + client := &http.Client{ + Transport: &http.Transport{ + TLSClientConfig: tlsConfig, + }, + } + + rawEvt := `{"test": "test"}` + errChan := make(chan error) + go assertEvents(out, []string{rawEvt}, errChan) + + req, err := http.NewRequest(http.MethodPost, fmt.Sprintf("%s/test", testHTTPServerAddrTLS), strings.NewReader(rawEvt)) + require.NoError(t, err) + + req.Header.Add("Key", "test") + resp, err := client.Do(req) + require.NoError(t, err) + + assert.Equal(t, http.StatusOK, resp.StatusCode) + + err = <-errChan + require.NoError(t, err) + + assertMetrics(t, h.GetMetrics(), 0) + + h.Server.Close() + tomb.Kill(nil) + tomb.Wait() +} + +func TestStreamingAcquisitionMTLS(t *testing.T) { + h := &HTTPSource{} + out, tomb := SetupAndRunHTTPSource(t, h, []byte(` +source: http +listen_addr: 127.0.0.1:8080 +path: /test +auth_type: mtls +tls: + server_cert: testdata/server.crt + server_key: testdata/server.key + ca_cert: testdata/ca.crt`), 0) + + time.Sleep(1 * time.Second) + + // init client cert + cert, err := tls.LoadX509KeyPair("testdata/client.crt", "testdata/client.key") + require.NoError(t, err) + + caCert, err := os.ReadFile("testdata/ca.crt") + require.NoError(t, err) + + caCertPool := x509.NewCertPool() + caCertPool.AppendCertsFromPEM(caCert) + + tlsConfig := &tls.Config{ + Certificates: []tls.Certificate{cert}, + RootCAs: caCertPool, + } + + client := &http.Client{ + Transport: &http.Transport{ + TLSClientConfig: tlsConfig, + }, + } + + rawEvt := `{"test": "test"}` + errChan := make(chan error) + go assertEvents(out, []string{rawEvt}, errChan) + + req, err := http.NewRequest(http.MethodPost, fmt.Sprintf("%s/test", testHTTPServerAddrTLS), strings.NewReader(rawEvt)) + require.NoError(t, err) + + resp, err := client.Do(req) + require.NoError(t, err) + + assert.Equal(t, http.StatusOK, resp.StatusCode) + + err = <-errChan + require.NoError(t, err) + + assertMetrics(t, h.GetMetrics(), 0) + + h.Server.Close() + tomb.Kill(nil) + tomb.Wait() +} + +func TestStreamingAcquisitionGzipData(t *testing.T) { + h := &HTTPSource{} + out, tomb := SetupAndRunHTTPSource(t, h, []byte(` +source: http +listen_addr: 127.0.0.1:8080 +path: /test +auth_type: headers +headers: + key: test`), 2) + + time.Sleep(1 * time.Second) + + rawEvt := `{"test": "test"}` + errChan := make(chan error) + go assertEvents(out, []string{rawEvt, rawEvt}, errChan) + + var b strings.Builder + gz := gzip.NewWriter(&b) + + _, err := gz.Write([]byte(rawEvt)) + require.NoError(t, err) + + _, err = gz.Write([]byte(rawEvt)) + require.NoError(t, err) + + err = gz.Close() + require.NoError(t, err) + + // send gzipped compressed data + client := &http.Client{} + req, err := http.NewRequest(http.MethodPost, fmt.Sprintf("%s/test", testHTTPServerAddr), strings.NewReader(b.String())) + require.NoError(t, err) + + req.Header.Add("Key", "test") + req.Header.Add("Content-Encoding", "gzip") + req.Header.Add("Content-Type", "application/json") + + resp, err := client.Do(req) + require.NoError(t, err) + + assert.Equal(t, http.StatusOK, resp.StatusCode) + + err = <-errChan + require.NoError(t, err) + + assertMetrics(t, h.GetMetrics(), 2) + + h.Server.Close() + tomb.Kill(nil) + tomb.Wait() +} + +func TestStreamingAcquisitionNDJson(t *testing.T) { + h := &HTTPSource{} + out, tomb := SetupAndRunHTTPSource(t, h, []byte(` +source: http +listen_addr: 127.0.0.1:8080 +path: /test +auth_type: headers +headers: + key: test`), 2) + + time.Sleep(1 * time.Second) + rawEvt := `{"test": "test"}` + + errChan := make(chan error) + go assertEvents(out, []string{rawEvt, rawEvt}, errChan) + + client := &http.Client{} + req, err := http.NewRequest(http.MethodPost, fmt.Sprintf("%s/test", testHTTPServerAddr), strings.NewReader(fmt.Sprintf("%s\n%s\n", rawEvt, rawEvt))) + + require.NoError(t, err) + + req.Header.Add("Key", "test") + req.Header.Add("Content-Type", "application/x-ndjson") + + resp, err := client.Do(req) + require.NoError(t, err) + assert.Equal(t, http.StatusOK, resp.StatusCode) + + err = <-errChan + require.NoError(t, err) + + assertMetrics(t, h.GetMetrics(), 2) + + h.Server.Close() + tomb.Kill(nil) + tomb.Wait() +} + +func assertMetrics(t *testing.T, metrics []prometheus.Collector, expected int) { + promMetrics, err := prometheus.DefaultGatherer.Gather() + require.NoError(t, err) + + isExist := false + for _, metricFamily := range promMetrics { + if metricFamily.GetName() == "cs_httpsource_hits_total" { + isExist = true + assert.Len(t, metricFamily.GetMetric(), 1) + for _, metric := range metricFamily.GetMetric() { + assert.InDelta(t, float64(expected), metric.GetCounter().GetValue(), 0.000001) + labels := metric.GetLabel() + assert.Len(t, labels, 2) + assert.Equal(t, "path", labels[0].GetName()) + assert.Equal(t, "/test", labels[0].GetValue()) + assert.Equal(t, "src", labels[1].GetName()) + assert.Equal(t, "127.0.0.1", labels[1].GetValue()) + } + } + } + if !isExist && expected > 0 { + t.Fatalf("expected metric cs_httpsource_hits_total not found") + } + + for _, metric := range metrics { + metric.(*prometheus.CounterVec).Reset() + } +} diff --git a/pkg/acquisition/modules/http/testdata/ca.crt b/pkg/acquisition/modules/http/testdata/ca.crt new file mode 100644 index 00000000000..ac81b9db8a6 --- /dev/null +++ b/pkg/acquisition/modules/http/testdata/ca.crt @@ -0,0 +1,23 @@ +-----BEGIN CERTIFICATE----- +MIIDvzCCAqegAwIBAgIUHQfsFpWkCy7gAmDa3A6O+y5CvAswDQYJKoZIhvcNAQEL +BQAwbzELMAkGA1UEBhMCRlIxFjAUBgNVBAgTDUlsZS1kZS1GcmFuY2UxDjAMBgNV +BAcTBVBhcmlzMREwDwYDVQQKEwhDcm93ZHNlYzERMA8GA1UECxMIQ3Jvd2RzZWMx +EjAQBgNVBAMTCWxvY2FsaG9zdDAeFw0yNDEwMjMxMDAxMDBaFw0yOTEwMjIxMDAx +MDBaMG8xCzAJBgNVBAYTAkZSMRYwFAYDVQQIEw1JbGUtZGUtRnJhbmNlMQ4wDAYD +VQQHEwVQYXJpczERMA8GA1UEChMIQ3Jvd2RzZWMxETAPBgNVBAsTCENyb3dkc2Vj +MRIwEAYDVQQDEwlsb2NhbGhvc3QwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEK +AoIBAQCZSR2/A24bpVHSiEeSlelfdA32uhk9wHkauwy2qxos/G/UmKG/dgWrHzRh +LawlFVHtVn4u7Hjqz2y2EsH3bX42jC5NMVARgXIOBr1dE6F5/bPqA6SoVgkDm9wh +ZBigyAMxYsR4+3ahuf0pQflBShKrLZ1UYoe6tQXob7l3x5vThEhNkBawBkLfWpj7 +7Imm1tGyEZdxCMkT400KRtSmJRrnpiOCUosnacwgp7MCbKWOIOng07Eh16cVUiuI +BthWU/LycIuac2xaD9PFpeK/MpwASRRPXZgPUhiZuaa7vttD0phCdDaS46Oln5/7 +tFRZH0alBZmkpVZJCWAP4ujIA3vLAgMBAAGjUzBRMA4GA1UdDwEB/wQEAwIBBjAP +BgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBTwpg+WN1nZJs4gj5hfoa+fMSZjGTAP +BgNVHREECDAGhwR/AAABMA0GCSqGSIb3DQEBCwUAA4IBAQAZuOWT8zHcwbWvC6Jm +/ccgB/U7SbeIYFJrCZd9mTyqsgnkFNH8yJ5F4dXXtPXr+SO/uWWa3G5hams3qVFf +zWzzPDQdyhUhfh5fjUHR2RsSGBmCxcapYHpVvAP5aY1/ujYrXMvAJV0hfDO2tGHb +rveuJxhe8ymQ1Yb2u9NcmI1HG9IVt3Airz4gAIUJWbFvRigky0bukfddOkfiUiaF +DMPJQO6HAj8d8ctSHHVZWzhAInZ1pDg6HIHYF44m1tT27pSQoi0ZFocskDi/fC2f +EIF0nu5fRLUS6BZEfpnDi9U0lbJ/kUrgT5IFHMFqXdRpDqcnXpJZhYtp5l6GoqjM +gT33 +-----END CERTIFICATE----- diff --git a/pkg/acquisition/modules/http/testdata/client.crt b/pkg/acquisition/modules/http/testdata/client.crt new file mode 100644 index 00000000000..55efdddad09 --- /dev/null +++ b/pkg/acquisition/modules/http/testdata/client.crt @@ -0,0 +1,24 @@ +-----BEGIN CERTIFICATE----- +MIID7jCCAtagAwIBAgIUJMTPh3oPJLPgsnb9T85ieb4EuOQwDQYJKoZIhvcNAQEL +BQAwbzELMAkGA1UEBhMCRlIxFjAUBgNVBAgTDUlsZS1kZS1GcmFuY2UxDjAMBgNV +BAcTBVBhcmlzMREwDwYDVQQKEwhDcm93ZHNlYzERMA8GA1UECxMIQ3Jvd2RzZWMx +EjAQBgNVBAMTCWxvY2FsaG9zdDAeFw0yNDEwMjMxMDQ2MDBaFw0yNTEwMjMxMDQ2 +MDBaMHIxCzAJBgNVBAYTAkZSMRYwFAYDVQQIEw1JbGUtZGUtRnJhbmNlMQ4wDAYD +VQQHEwVQYXJpczERMA8GA1UEChMIQ3Jvd2RzZWMxFzAVBgNVBAsTDlRlc3Rpbmcg +Y2xpZW50MQ8wDQYDVQQDEwZjbGllbnQwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAw +ggEKAoIBAQDAUOdpRieRrrH6krUjgcjLgJg6TzoWAb/iv6rfcioX1L9bj9fZSkwu +GqKzXX/PceIXElzQgiGJZErbJtnTzhGS80QgtAB8BwWQIT2zgoGcYJf7pPFvmcMM +qMGFwK0dMC+LHPk+ePtFz8dskI2XJ8jgBdtuZcnDblMuVGtjYT6n0rszvRdo118+ +mlGCLPzOfsO1JdOqLWAR88yZfqCFt1TrwmzpRT1crJQeM6i7muw4aO0L7uSek9QM +6APHz0QexSq7/zHOtRjA4jnJbDzZJHRlwOdlsNU9cmTz6uWIQXlg+2ovD55YurNy ++jYfmfDYpimhoeGf54zaETp1fTuTJYpxAgMBAAGjfzB9MA4GA1UdDwEB/wQEAwIF +oDAdBgNVHSUEFjAUBggrBgEFBQcDAQYIKwYBBQUHAwIwDAYDVR0TAQH/BAIwADAd +BgNVHQ4EFgQUmH0/7RuKnoW7sEK4Cr8eVNGbb8swHwYDVR0jBBgwFoAU8KYPljdZ +2SbOII+YX6GvnzEmYxkwDQYJKoZIhvcNAQELBQADggEBAHVn9Zuoyxu9iTFoyJ50 +e/XKcmt2uK2M1x+ap2Av7Wb/Omikx/R2YPq7994BfiUCAezY2YtreZzkE6Io1wNM +qApijEJnlqEmOXiYJqlF89QrCcsAsz6lfaqitYBZSL3o4KT+7/uUDVxgNEjEksRz +9qy6DFBLvyhxbOM2zDEV+MVfemBWSvNiojHqXzDBkZnBHHclJLuIKsXDZDGhKbNd +hsoGU00RLevvcUpUJ3a68ekgwiYFJifm0uyfmao9lmiB3i+8ZW3Q4rbwHtD+U7U2 +3n+U5PkhiUAveuMfrvUMzsTolZiop9ZLtcALDUFaqyr4tjfVOf5+CGjiluio7oE1 +UYg= +-----END CERTIFICATE----- diff --git a/pkg/acquisition/modules/http/testdata/client.key b/pkg/acquisition/modules/http/testdata/client.key new file mode 100644 index 00000000000..f8ef2efbd58 --- /dev/null +++ b/pkg/acquisition/modules/http/testdata/client.key @@ -0,0 +1,27 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIEowIBAAKCAQEAwFDnaUYnka6x+pK1I4HIy4CYOk86FgG/4r+q33IqF9S/W4/X +2UpMLhqis11/z3HiFxJc0IIhiWRK2ybZ084RkvNEILQAfAcFkCE9s4KBnGCX+6Tx +b5nDDKjBhcCtHTAvixz5Pnj7Rc/HbJCNlyfI4AXbbmXJw25TLlRrY2E+p9K7M70X +aNdfPppRgiz8zn7DtSXTqi1gEfPMmX6ghbdU68Js6UU9XKyUHjOou5rsOGjtC+7k +npPUDOgDx89EHsUqu/8xzrUYwOI5yWw82SR0ZcDnZbDVPXJk8+rliEF5YPtqLw+e +WLqzcvo2H5nw2KYpoaHhn+eM2hE6dX07kyWKcQIDAQABAoIBAQChriKuza0MfBri +9x3UCRN/is/wDZVe1P+2KL8F9ZvPxytNVeP4qM7c38WzF8MQ6sRR8z0WiqCZOjj4 +f3QX7iG2MlAvUkUqAFk778ZIuUov5sE/bU8RLOrfJKz1vqOLa2w8/xHH5LwS1/jn +m6t9zZPCSwpMiMSUSZci1xQlS6b6POZMjeqLPqv9cP8PJNv9UNrHFcQnQi1iwKJH +MJ7CQI3R8FSeGad3P7tB9YDaBm7hHmd/TevuFkymcKNT44XBSgddPDfgKui6sHTY +QQWgWI9VGVO350ZBLRLkrk8wboY4vc15qbBzYFG66WiR/tNdLt3rDYxpcXaDvcQy +e47mYNVxAoGBAMFsUmPDssqzmOkmZxHDM+VmgHYPXjDqQdE299FtuClobUW4iU4g +By7o84aCIBQz2sp9f1KM+10lr+Bqw3s7QBbR5M67PA8Zm45DL9t70NR/NZHGzFRD +BR/NMbwzCqNtY2UGDhYQLGhW8heAwsYwir8ZqmOfKTd9aY1pu/S8m9AlAoGBAP6I +483EIN8R5y+beGcGynYeIrH5Gc+W2FxWIW9jh/G7vRbhMlW4z0GxV3uEAYmOlBH2 +AqUkV6+uzU0P4B/m3vCYqLycBVDwifJazDj9nskVL5kGMxia62iwDMXs5nqNS4WJ +ZM5Gl2xIiwmgWnYnujM3eKF2wbm439wj4na80SldAoGANdIqatA9o+GtntKsw2iJ +vD91Z2SHVR0aC1k8Q+4/3GXOYiQjMLYAybDQcpEq0/RJ4SZik1nfZ9/gvJV4p4Wp +I7Br9opq/9ikTEWtv2kIhtiO02151ciAWIUEXdXmE+uQSMASk1kUwkPPQXL2v6cq +NFqz6tyS33nqMQtG3abNxHECgYA4AEA2nmcpDRRTSh50dG8JC9pQU+EU5jhWIHEc +w8Y+LjMNHKDpcU7QQkdgGowICsGTLhAo61ULhycORGboPfBg+QVu8djNlQ6Urttt +0ocj8LBXN6D4UeVnVAyLY3LWFc4+5Bq0s51PKqrEhG5Cvrzd1d+JjspSpVVDZvXF +cAeI1QKBgC/cMN3+2Sc+2biu46DnkdYpdF/N0VGMOgzz+unSVD4RA2mEJ9UdwGga +feshtrtcroHtEmc+WDYgTTnAq1MbsVFQYIwZ5fL/GJ1R8ccaWiPuX2HrKALKG4Y3 +CMFpDUWhRgtaBsmuOpUq3FeS5cyPNMHk6axL1KyFoJk9AgfhqhTp +-----END RSA PRIVATE KEY----- diff --git a/pkg/acquisition/modules/http/testdata/server.crt b/pkg/acquisition/modules/http/testdata/server.crt new file mode 100644 index 00000000000..7a02c606c9d --- /dev/null +++ b/pkg/acquisition/modules/http/testdata/server.crt @@ -0,0 +1,23 @@ +-----BEGIN CERTIFICATE----- +MIID5jCCAs6gAwIBAgIUU3F6URi0oTe9ontkf7JqXOo89QYwDQYJKoZIhvcNAQEL +BQAwbzELMAkGA1UEBhMCRlIxFjAUBgNVBAgTDUlsZS1kZS1GcmFuY2UxDjAMBgNV +BAcTBVBhcmlzMREwDwYDVQQKEwhDcm93ZHNlYzERMA8GA1UECxMIQ3Jvd2RzZWMx +EjAQBgNVBAMTCWxvY2FsaG9zdDAeFw0yNDEwMjMxMDAzMDBaFw0yNTEwMjMxMDAz +MDBaMG8xCzAJBgNVBAYTAkZSMRYwFAYDVQQIEw1JbGUtZGUtRnJhbmNlMQ4wDAYD +VQQHEwVQYXJpczERMA8GA1UEChMIQ3Jvd2RzZWMxETAPBgNVBAsTCENyb3dkc2Vj +MRIwEAYDVQQDEwlsb2NhbGhvc3QwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEK +AoIBAQC/lnUubjBGe5x0LgIE5GeG52LRzj99iLWuvey4qbSwFZ07ECgv+JttVwDm +AjEeakj2ZR46WHvHAR9eBNkRCORyWX0iKVIzm09PXYi80KtwGLaA8YMEio9/08Cc ++LS0TuP0yiOcw+btrhmvvauDzcQhA6u55q8anCZiF2BlHfX9Sh6QKewA3NhOkzbU +VTxqrOqfcRsGNub7dheqfP5bfrPkF6Y6l/0Fhyx0NMsu1zaQ0hCls2hkTf0Y3XGt +IQNWoN22seexR3qRmPf0j3jBa0qOmGgd6kAd+YpsjDblgCNUIJZiVj51fVb0sGRx +ShkfKGU6t0eznTWPCqswujO/sn+pAgMBAAGjejB4MA4GA1UdDwEB/wQEAwIFoDAd +BgNVHSUEFjAUBggrBgEFBQcDAQYIKwYBBQUHAwIwDAYDVR0TAQH/BAIwADAdBgNV +HQ4EFgQUOiIF+7Wzx1J8Ki3DiBfx+E6zlSUwGgYDVR0RBBMwEYIJbG9jYWxob3N0 +hwR/AAABMA0GCSqGSIb3DQEBCwUAA4IBAQA0dzlhBr/0wXPyj/iWxMOXxZ1FNJ9f +lxBMhLAgX0WrT2ys+284J7Hcn0lJeqelluYpmeKn9vmCAEj3MmUmHzZyf//lhuUJ +0DlYWIHUsGaJHJ7A+1hQqrcXHhkcRy5WGIM9VoddKbBbg2b6qzTSvxn8EnuD7H4h +28wLyGLCzsSXoVcAB8u+svYt29TPuy6xmMAokyIShV8FsE77fjVTgtCuxmx1PKv3 +zd6+uEae7bbZ+GJH1zKF0vokejQvmByt+YuIXlNbMseaMUeDdpy+6qlRvbbN1dyp +rkQXfWvidMfSue5nH/akAn83v/CdKxG6tfW83d9Rud3naabUkywALDng +-----END CERTIFICATE----- diff --git a/pkg/acquisition/modules/http/testdata/server.key b/pkg/acquisition/modules/http/testdata/server.key new file mode 100644 index 00000000000..4d0ee53b4c2 --- /dev/null +++ b/pkg/acquisition/modules/http/testdata/server.key @@ -0,0 +1,27 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIEpQIBAAKCAQEAv5Z1Lm4wRnucdC4CBORnhudi0c4/fYi1rr3suKm0sBWdOxAo +L/ibbVcA5gIxHmpI9mUeOlh7xwEfXgTZEQjkcll9IilSM5tPT12IvNCrcBi2gPGD +BIqPf9PAnPi0tE7j9MojnMPm7a4Zr72rg83EIQOrueavGpwmYhdgZR31/UoekCns +ANzYTpM21FU8aqzqn3EbBjbm+3YXqnz+W36z5BemOpf9BYcsdDTLLtc2kNIQpbNo +ZE39GN1xrSEDVqDdtrHnsUd6kZj39I94wWtKjphoHepAHfmKbIw25YAjVCCWYlY+ +dX1W9LBkcUoZHyhlOrdHs501jwqrMLozv7J/qQIDAQABAoIBAF1Vd/rJlV0Q5RQ4 +QaWOe9zdpmedeZK3YgMh5UvE6RCLRxC5+0n7bASlSPvEf5dYofjfJA26g3pcUqKj +6/d/hIMsk2hsBu67L7TzVSTe51XxxB8nCPPSaLwWNZSDGM1qTWU4gIbjbQHHOh5C +YWcRfAW1WxhyiEWHYq+QwdYg9XCRrSg1UzvVvW1Yt2wDGcSZP5whbXipfw3BITDs +XU7ODYNkU1sjIzQZwzVGxOf9qKdhZFZ26Vhoz8OJNMLyJxY7EspuwR7HbDGt11Pb +CxOt/BV44LwdVYeqh57oIKtckQW33W/6EeaWr7GfMzyH5WSrsOJoK5IJVrZaPTcS +QiMYLA0CgYEA9vMVsGshBl3TeRGaU3XLHqooXD4kszbdnjfPrwGlfCO/iybhDqo5 +WFypM/bYcIWzbTez/ihufHEHPSCUbFEcN4B+oczGcuxTcZjFyvJYvq2ycxPUiDIi +JnVUcVxgh1Yn39+CsQ/b6meP7MumTD2P3I87CeQGlWTO5Ys9mdw0BjcCgYEAxpv1 +64l5UoFJGr4yElNKDIKnhEFbJZsLGKiiuVXcS1QVHW5Az5ar9fPxuepyHpz416l3 +ppncuhJiUIP+jbu5e0s0LsN46mLS3wkHLgYJj06CNT3uOSLSg1iFl7DusdbyiaA7 +wEJ/aotS1NZ4XaeryAWHwYJ6Kag3nz6NV3ZYuR8CgYEAxAFCuMj+6F+2RsTa+d1n +v8oMyNImLPyiQD9KHzyuTW7OTDMqtIoVg/Xf8re9KOpl9I0e1t7eevT3auQeCi8C +t2bMm7290V+UB3jbnO5n08hn+ADIUuV/x4ie4m8QyrpuYbm0sLbGtTFHwgoNzzuZ +oNUqZfpP42mk8fpnhWSLAlcCgYEAgpY7XRI4HkJ5ocbav2faMV2a7X/XgWNvKViA +HeJRhYoUlBRRMuz7xi0OjFKVlIFbsNlxna5fDk1WLWCMd/6tl168Qd8u2tX9lr6l +5OH9WSeiv4Un5JN73PbQaAvi9jXBpTIg92oBwzk2TlFyNQoxDcRtHZQ/5LIBWIhV +gOOEtLsCgYEA1wbGc4XlH+/nXVsvx7gmfK8pZG8XA4/ToeIEURwPYrxtQZLB4iZs +aqWGgIwiB4F4UkuKZIjMrgInU9y0fG6EL96Qty7Yjh7dGy1vJTZl6C+QU6o4sEwl +r5Id5BNLEaqISWQ0LvzfwdfABYlvFfBdaGbzUzLEitD79eyhxuNEOBw= +-----END RSA PRIVATE KEY----- diff --git a/pkg/acquisition/modules/journalctl/journalctl.go b/pkg/acquisition/modules/journalctl/journalctl.go index 762dfe9ba12..27f20b9f446 100644 --- a/pkg/acquisition/modules/journalctl/journalctl.go +++ b/pkg/acquisition/modules/journalctl/journalctl.go @@ -65,8 +65,8 @@ func readLine(scanner *bufio.Scanner, out chan string, errChan chan error) error return nil } -func (j *JournalCtlSource) runJournalCtl(out chan types.Event, t *tomb.Tomb) error { - ctx, cancel := context.WithCancel(context.Background()) +func (j *JournalCtlSource) runJournalCtl(ctx context.Context, out chan types.Event, t *tomb.Tomb) error { + ctx, cancel := context.WithCancel(ctx) cmd := exec.CommandContext(ctx, journalctlCmd, j.args...) stdout, err := cmd.StdoutPipe() @@ -113,7 +113,7 @@ func (j *JournalCtlSource) runJournalCtl(out chan types.Event, t *tomb.Tomb) err return readLine(stdoutscanner, stdoutChan, errChan) }) t.Go(func() error { - //looks like journalctl closes stderr quite early, so ignore its status (but not its output) + // looks like journalctl closes stderr quite early, so ignore its status (but not its output) return readLine(stderrScanner, stderrChan, nil) }) @@ -122,7 +122,7 @@ func (j *JournalCtlSource) runJournalCtl(out chan types.Event, t *tomb.Tomb) err case <-t.Dying(): logger.Infof("journalctl datasource %s stopping", j.src) cancel() - cmd.Wait() //avoid zombie process + cmd.Wait() // avoid zombie process return nil case stdoutLine := <-stdoutChan: l := types.Line{} @@ -136,12 +136,9 @@ func (j *JournalCtlSource) runJournalCtl(out chan types.Event, t *tomb.Tomb) err if j.metricsLevel != configuration.METRICS_NONE { linesRead.With(prometheus.Labels{"source": j.src}).Inc() } - var evt types.Event - if !j.config.UseTimeMachine { - evt = types.Event{Line: l, Process: true, Type: types.LOG, ExpectMode: types.LIVE} - } else { - evt = types.Event{Line: l, Process: true, Type: types.LOG, ExpectMode: types.TIMEMACHINE} - } + + evt := types.MakeEvent(j.config.UseTimeMachine, types.LOG, true) + evt.Line = l out <- evt case stderrLine := <-stderrChan: logger.Warnf("Got stderr message : %s", stderrLine) @@ -217,13 +214,13 @@ func (j *JournalCtlSource) ConfigureByDSN(dsn string, labels map[string]string, j.config.Labels = labels j.config.UniqueId = uuid - //format for the DSN is : journalctl://filters=FILTER1&filters=FILTER2 + // format for the DSN is : journalctl://filters=FILTER1&filters=FILTER2 if !strings.HasPrefix(dsn, "journalctl://") { return fmt.Errorf("invalid DSN %s for journalctl source, must start with journalctl://", dsn) } qs := strings.TrimPrefix(dsn, "journalctl://") - if len(qs) == 0 { + if qs == "" { return errors.New("empty journalctl:// DSN") } @@ -262,26 +259,27 @@ func (j *JournalCtlSource) GetName() string { return "journalctl" } -func (j *JournalCtlSource) OneShotAcquisition(out chan types.Event, t *tomb.Tomb) error { +func (j *JournalCtlSource) OneShotAcquisition(ctx context.Context, out chan types.Event, t *tomb.Tomb) error { defer trace.CatchPanic("crowdsec/acquis/journalctl/oneshot") - err := j.runJournalCtl(out, t) + err := j.runJournalCtl(ctx, out, t) j.logger.Debug("Oneshot journalctl acquisition is done") return err - } -func (j *JournalCtlSource) StreamingAcquisition(out chan types.Event, t *tomb.Tomb) error { +func (j *JournalCtlSource) StreamingAcquisition(ctx context.Context, out chan types.Event, t *tomb.Tomb) error { t.Go(func() error { defer trace.CatchPanic("crowdsec/acquis/journalctl/streaming") - return j.runJournalCtl(out, t) + return j.runJournalCtl(ctx, out, t) }) return nil } + func (j *JournalCtlSource) CanRun() error { - //TODO: add a more precise check on version or something ? + // TODO: add a more precise check on version or something ? _, err := exec.LookPath(journalctlCmd) return err } + func (j *JournalCtlSource) Dump() interface{} { return j } diff --git a/pkg/acquisition/modules/journalctl/journalctl_test.go b/pkg/acquisition/modules/journalctl/journalctl_test.go index 53e2d0802ad..687067c1881 100644 --- a/pkg/acquisition/modules/journalctl/journalctl_test.go +++ b/pkg/acquisition/modules/journalctl/journalctl_test.go @@ -1,6 +1,7 @@ package journalctlacquisition import ( + "context" "os" "os/exec" "path/filepath" @@ -106,6 +107,8 @@ func TestConfigureDSN(t *testing.T) { } func TestOneShot(t *testing.T) { + ctx := context.Background() + if runtime.GOOS == "windows" { t.Skip("Skipping test on windows") } @@ -164,7 +167,7 @@ journalctl_filter: t.Fatalf("Unexpected error : %s", err) } - err = j.OneShotAcquisition(out, &tomb) + err = j.OneShotAcquisition(ctx, out, &tomb) cstest.AssertErrorContains(t, err, ts.expectedErr) if err != nil { @@ -187,6 +190,7 @@ journalctl_filter: } func TestStreaming(t *testing.T) { + ctx := context.Background() if runtime.GOOS == "windows" { t.Skip("Skipping test on windows") } @@ -250,7 +254,7 @@ journalctl_filter: }() } - err = j.StreamingAcquisition(out, &tomb) + err = j.StreamingAcquisition(ctx, out, &tomb) cstest.AssertErrorContains(t, err, ts.expectedErr) if err != nil { diff --git a/pkg/acquisition/modules/kafka/kafka.go b/pkg/acquisition/modules/kafka/kafka.go index ca0a7556fca..77fc44e310d 100644 --- a/pkg/acquisition/modules/kafka/kafka.go +++ b/pkg/acquisition/modules/kafka/kafka.go @@ -23,9 +23,7 @@ import ( "github.com/crowdsecurity/crowdsec/pkg/types" ) -var ( - dataSourceName = "kafka" -) +var dataSourceName = "kafka" var linesRead = prometheus.NewCounterVec( prometheus.CounterOpts{ @@ -82,7 +80,7 @@ func (k *KafkaSource) UnmarshalConfig(yamlConfig []byte) error { k.Config.Mode = configuration.TAIL_MODE } - k.logger.Debugf("successfully unmarshaled kafka configuration : %+v", k.Config) + k.logger.Debugf("successfully parsed kafka configuration : %+v", k.Config) return err } @@ -129,7 +127,7 @@ func (k *KafkaSource) GetName() string { return dataSourceName } -func (k *KafkaSource) OneShotAcquisition(out chan types.Event, t *tomb.Tomb) error { +func (k *KafkaSource) OneShotAcquisition(_ context.Context, _ chan types.Event, _ *tomb.Tomb) error { return fmt.Errorf("%s datasource does not support one-shot acquisition", dataSourceName) } @@ -149,12 +147,12 @@ func (k *KafkaSource) Dump() interface{} { return k } -func (k *KafkaSource) ReadMessage(out chan types.Event) error { +func (k *KafkaSource) ReadMessage(ctx context.Context, out chan types.Event) error { // Start processing from latest Offset - k.Reader.SetOffsetAt(context.Background(), time.Now()) + k.Reader.SetOffsetAt(ctx, time.Now()) for { k.logger.Tracef("reading message from topic '%s'", k.Config.Topic) - m, err := k.Reader.ReadMessage(context.Background()) + m, err := k.Reader.ReadMessage(ctx) if err != nil { if errors.Is(err, io.EOF) { return nil @@ -175,21 +173,16 @@ func (k *KafkaSource) ReadMessage(out chan types.Event) error { if k.metricsLevel != configuration.METRICS_NONE { linesRead.With(prometheus.Labels{"topic": k.Config.Topic}).Inc() } - var evt types.Event - - if !k.Config.UseTimeMachine { - evt = types.Event{Line: l, Process: true, Type: types.LOG, ExpectMode: types.LIVE} - } else { - evt = types.Event{Line: l, Process: true, Type: types.LOG, ExpectMode: types.TIMEMACHINE} - } + evt := types.MakeEvent(k.Config.UseTimeMachine, types.LOG, true) + evt.Line = l out <- evt } } -func (k *KafkaSource) RunReader(out chan types.Event, t *tomb.Tomb) error { +func (k *KafkaSource) RunReader(ctx context.Context, out chan types.Event, t *tomb.Tomb) error { k.logger.Debugf("starting %s datasource reader goroutine with configuration %+v", dataSourceName, k.Config) t.Go(func() error { - return k.ReadMessage(out) + return k.ReadMessage(ctx, out) }) //nolint //fp for { @@ -204,12 +197,12 @@ func (k *KafkaSource) RunReader(out chan types.Event, t *tomb.Tomb) error { } } -func (k *KafkaSource) StreamingAcquisition(out chan types.Event, t *tomb.Tomb) error { +func (k *KafkaSource) StreamingAcquisition(ctx context.Context, out chan types.Event, t *tomb.Tomb) error { k.logger.Infof("start reader on brokers '%+v' with topic '%s'", k.Config.Brokers, k.Config.Topic) t.Go(func() error { defer trace.CatchPanic("crowdsec/acquis/kafka/live") - return k.RunReader(out, t) + return k.RunReader(ctx, out, t) }) return nil diff --git a/pkg/acquisition/modules/kafka/kafka_test.go b/pkg/acquisition/modules/kafka/kafka_test.go index 7b467142cc9..d796166a6ca 100644 --- a/pkg/acquisition/modules/kafka/kafka_test.go +++ b/pkg/acquisition/modules/kafka/kafka_test.go @@ -80,9 +80,9 @@ group_id: crowdsec`, } } -func writeToKafka(w *kafka.Writer, logs []string) { +func writeToKafka(ctx context.Context, w *kafka.Writer, logs []string) { for idx, log := range logs { - err := w.WriteMessages(context.Background(), kafka.Message{ + err := w.WriteMessages(ctx, kafka.Message{ Key: []byte(strconv.Itoa(idx)), // create an arbitrary message payload for the value Value: []byte(log), @@ -128,6 +128,7 @@ func createTopic(topic string, broker string) { } func TestStreamingAcquisition(t *testing.T) { + ctx := context.Background() if runtime.GOOS == "windows" { t.Skip("Skipping test on windows") } @@ -176,12 +177,12 @@ topic: crowdsecplaintext`), subLogger, configuration.METRICS_NONE) tomb := tomb.Tomb{} out := make(chan types.Event) - err = k.StreamingAcquisition(out, &tomb) + err = k.StreamingAcquisition(ctx, out, &tomb) cstest.AssertErrorContains(t, err, ts.expectedErr) actualLines := 0 - go writeToKafka(w, ts.logs) + go writeToKafka(ctx, w, ts.logs) READLOOP: for { select { @@ -199,6 +200,7 @@ topic: crowdsecplaintext`), subLogger, configuration.METRICS_NONE) } func TestStreamingAcquisitionWithSSL(t *testing.T) { + ctx := context.Background() if runtime.GOOS == "windows" { t.Skip("Skipping test on windows") } @@ -252,12 +254,12 @@ tls: tomb := tomb.Tomb{} out := make(chan types.Event) - err = k.StreamingAcquisition(out, &tomb) + err = k.StreamingAcquisition(ctx, out, &tomb) cstest.AssertErrorContains(t, err, ts.expectedErr) actualLines := 0 - go writeToKafka(w2, ts.logs) + go writeToKafka(ctx, w2, ts.logs) READLOOP: for { select { diff --git a/pkg/acquisition/modules/kinesis/kinesis.go b/pkg/acquisition/modules/kinesis/kinesis.go index 0e6c1980fa9..3744e43f38d 100644 --- a/pkg/acquisition/modules/kinesis/kinesis.go +++ b/pkg/acquisition/modules/kinesis/kinesis.go @@ -3,6 +3,7 @@ package kinesisacquisition import ( "bytes" "compress/gzip" + "context" "encoding/json" "errors" "fmt" @@ -29,7 +30,7 @@ type KinesisConfiguration struct { configuration.DataSourceCommonCfg `yaml:",inline"` StreamName string `yaml:"stream_name"` StreamARN string `yaml:"stream_arn"` - UseEnhancedFanOut bool `yaml:"use_enhanced_fanout"` //Use RegisterStreamConsumer and SubscribeToShard instead of GetRecords + UseEnhancedFanOut bool `yaml:"use_enhanced_fanout"` // Use RegisterStreamConsumer and SubscribeToShard instead of GetRecords AwsProfile *string `yaml:"aws_profile"` AwsRegion string `yaml:"aws_region"` AwsEndpoint string `yaml:"aws_endpoint"` @@ -114,8 +115,8 @@ func (k *KinesisSource) newClient() error { func (k *KinesisSource) GetMetrics() []prometheus.Collector { return []prometheus.Collector{linesRead, linesReadShards} - } + func (k *KinesisSource) GetAggregMetrics() []prometheus.Collector { return []prometheus.Collector{linesRead, linesReadShards} } @@ -181,14 +182,13 @@ func (k *KinesisSource) GetName() string { return "kinesis" } -func (k *KinesisSource) OneShotAcquisition(out chan types.Event, t *tomb.Tomb) error { +func (k *KinesisSource) OneShotAcquisition(_ context.Context, _ chan types.Event, _ *tomb.Tomb) error { return errors.New("kinesis datasource does not support one-shot acquisition") } func (k *KinesisSource) decodeFromSubscription(record []byte) ([]CloudwatchSubscriptionLogEvent, error) { b := bytes.NewBuffer(record) r, err := gzip.NewReader(b) - if err != nil { k.logger.Error(err) return nil, err @@ -299,8 +299,8 @@ func (k *KinesisSource) ParseAndPushRecords(records []*kinesis.Record, out chan var data []CloudwatchSubscriptionLogEvent var err error if k.Config.FromSubscription { - //The AWS docs says that the data is base64 encoded - //but apparently GetRecords decodes it for us ? + // The AWS docs says that the data is base64 encoded + // but apparently GetRecords decodes it for us ? data, err = k.decodeFromSubscription(record.Data) if err != nil { logger.Errorf("Cannot decode data: %s", err) @@ -322,12 +322,8 @@ func (k *KinesisSource) ParseAndPushRecords(records []*kinesis.Record, out chan } else { l.Src = k.Config.StreamName } - var evt types.Event - if !k.Config.UseTimeMachine { - evt = types.Event{Line: l, Process: true, Type: types.LOG, ExpectMode: types.LIVE} - } else { - evt = types.Event{Line: l, Process: true, Type: types.LOG, ExpectMode: types.TIMEMACHINE} - } + evt := types.MakeEvent(k.Config.UseTimeMachine, types.LOG, true) + evt.Line = l out <- evt } } @@ -335,9 +331,9 @@ func (k *KinesisSource) ParseAndPushRecords(records []*kinesis.Record, out chan func (k *KinesisSource) ReadFromSubscription(reader kinesis.SubscribeToShardEventStreamReader, out chan types.Event, shardId string, streamName string) error { logger := k.logger.WithField("shard_id", shardId) - //ghetto sync, kinesis allows to subscribe to a closed shard, which will make the goroutine exit immediately - //and we won't be able to start a new one if this is the first one started by the tomb - //TODO: look into parent shards to see if a shard is closed before starting to read it ? + // ghetto sync, kinesis allows to subscribe to a closed shard, which will make the goroutine exit immediately + // and we won't be able to start a new one if this is the first one started by the tomb + // TODO: look into parent shards to see if a shard is closed before starting to read it ? time.Sleep(time.Second) for { select { @@ -420,7 +416,7 @@ func (k *KinesisSource) EnhancedRead(out chan types.Event, t *tomb.Tomb) error { case <-t.Dying(): k.logger.Infof("Kinesis source is dying") k.shardReaderTomb.Kill(nil) - _ = k.shardReaderTomb.Wait() //we don't care about the error as we kill the tomb ourselves + _ = k.shardReaderTomb.Wait() // we don't care about the error as we kill the tomb ourselves err = k.DeregisterConsumer() if err != nil { return fmt.Errorf("cannot deregister consumer: %w", err) @@ -431,7 +427,7 @@ func (k *KinesisSource) EnhancedRead(out chan types.Event, t *tomb.Tomb) error { if k.shardReaderTomb.Err() != nil { return k.shardReaderTomb.Err() } - //All goroutines have exited without error, so a resharding event, start again + // All goroutines have exited without error, so a resharding event, start again k.logger.Debugf("All reader goroutines have exited, resharding event or periodic resubscribe") continue } @@ -441,15 +437,17 @@ func (k *KinesisSource) EnhancedRead(out chan types.Event, t *tomb.Tomb) error { func (k *KinesisSource) ReadFromShard(out chan types.Event, shardId string) error { logger := k.logger.WithField("shard", shardId) logger.Debugf("Starting to read shard") - sharIt, err := k.kClient.GetShardIterator(&kinesis.GetShardIteratorInput{ShardId: aws.String(shardId), + sharIt, err := k.kClient.GetShardIterator(&kinesis.GetShardIteratorInput{ + ShardId: aws.String(shardId), StreamName: &k.Config.StreamName, - ShardIteratorType: aws.String(kinesis.ShardIteratorTypeLatest)}) + ShardIteratorType: aws.String(kinesis.ShardIteratorTypeLatest), + }) if err != nil { logger.Errorf("Cannot get shard iterator: %s", err) return fmt.Errorf("cannot get shard iterator: %w", err) } it := sharIt.ShardIterator - //AWS recommends to wait for a second between calls to GetRecords for a given shard + // AWS recommends to wait for a second between calls to GetRecords for a given shard ticker := time.NewTicker(time.Second) for { select { @@ -460,7 +458,7 @@ func (k *KinesisSource) ReadFromShard(out chan types.Event, shardId string) erro switch err.(type) { case *kinesis.ProvisionedThroughputExceededException: logger.Warn("Provisioned throughput exceeded") - //TODO: implement exponential backoff + // TODO: implement exponential backoff continue case *kinesis.ExpiredIteratorException: logger.Warn("Expired iterator") @@ -506,7 +504,7 @@ func (k *KinesisSource) ReadFromStream(out chan types.Event, t *tomb.Tomb) error case <-t.Dying(): k.logger.Info("kinesis source is dying") k.shardReaderTomb.Kill(nil) - _ = k.shardReaderTomb.Wait() //we don't care about the error as we kill the tomb ourselves + _ = k.shardReaderTomb.Wait() // we don't care about the error as we kill the tomb ourselves return nil case <-k.shardReaderTomb.Dying(): reason := k.shardReaderTomb.Err() @@ -520,7 +518,7 @@ func (k *KinesisSource) ReadFromStream(out chan types.Event, t *tomb.Tomb) error } } -func (k *KinesisSource) StreamingAcquisition(out chan types.Event, t *tomb.Tomb) error { +func (k *KinesisSource) StreamingAcquisition(ctx context.Context, out chan types.Event, t *tomb.Tomb) error { t.Go(func() error { defer trace.CatchPanic("crowdsec/acquis/kinesis/streaming") if k.Config.UseEnhancedFanOut { diff --git a/pkg/acquisition/modules/kinesis/kinesis_test.go b/pkg/acquisition/modules/kinesis/kinesis_test.go index 46e404aa49b..027cbde9240 100644 --- a/pkg/acquisition/modules/kinesis/kinesis_test.go +++ b/pkg/acquisition/modules/kinesis/kinesis_test.go @@ -3,6 +3,7 @@ package kinesisacquisition import ( "bytes" "compress/gzip" + "context" "encoding/json" "fmt" "net" @@ -60,8 +61,8 @@ func GenSubObject(i int) []byte { gz := gzip.NewWriter(&b) gz.Write(body) gz.Close() - //AWS actually base64 encodes the data, but it looks like kinesis automatically decodes it at some point - //localstack does not do it, so let's just write a raw gzipped stream + // AWS actually base64 encodes the data, but it looks like kinesis automatically decodes it at some point + // localstack does not do it, so let's just write a raw gzipped stream return b.Bytes() } @@ -99,10 +100,10 @@ func TestMain(m *testing.M) { os.Setenv("AWS_ACCESS_KEY_ID", "foobar") os.Setenv("AWS_SECRET_ACCESS_KEY", "foobar") - //delete_streams() - //create_streams() + // delete_streams() + // create_streams() code := m.Run() - //delete_streams() + // delete_streams() os.Exit(code) } @@ -149,6 +150,7 @@ stream_arn: arn:aws:kinesis:eu-west-1:123456789012:stream/my-stream`, } func TestReadFromStream(t *testing.T) { + ctx := context.Background() if runtime.GOOS == "windows" { t.Skip("Skipping test on windows") } @@ -176,11 +178,11 @@ stream_name: stream-1-shard`, } tomb := &tomb.Tomb{} out := make(chan types.Event) - err = f.StreamingAcquisition(out, tomb) + err = f.StreamingAcquisition(ctx, out, tomb) if err != nil { t.Fatalf("Error starting source: %s", err) } - //Allow the datasource to start listening to the stream + // Allow the datasource to start listening to the stream time.Sleep(4 * time.Second) WriteToStream(f.Config.StreamName, test.count, test.shards, false) for i := range test.count { @@ -193,6 +195,7 @@ stream_name: stream-1-shard`, } func TestReadFromMultipleShards(t *testing.T) { + ctx := context.Background() if runtime.GOOS == "windows" { t.Skip("Skipping test on windows") } @@ -220,11 +223,11 @@ stream_name: stream-2-shards`, } tomb := &tomb.Tomb{} out := make(chan types.Event) - err = f.StreamingAcquisition(out, tomb) + err = f.StreamingAcquisition(ctx, out, tomb) if err != nil { t.Fatalf("Error starting source: %s", err) } - //Allow the datasource to start listening to the stream + // Allow the datasource to start listening to the stream time.Sleep(4 * time.Second) WriteToStream(f.Config.StreamName, test.count, test.shards, false) c := 0 @@ -239,6 +242,7 @@ stream_name: stream-2-shards`, } func TestFromSubscription(t *testing.T) { + ctx := context.Background() if runtime.GOOS == "windows" { t.Skip("Skipping test on windows") } @@ -267,11 +271,11 @@ from_subscription: true`, } tomb := &tomb.Tomb{} out := make(chan types.Event) - err = f.StreamingAcquisition(out, tomb) + err = f.StreamingAcquisition(ctx, out, tomb) if err != nil { t.Fatalf("Error starting source: %s", err) } - //Allow the datasource to start listening to the stream + // Allow the datasource to start listening to the stream time.Sleep(4 * time.Second) WriteToStream(f.Config.StreamName, test.count, test.shards, true) for i := range test.count { diff --git a/pkg/acquisition/modules/kubernetesaudit/k8s_audit.go b/pkg/acquisition/modules/kubernetesaudit/k8s_audit.go index e48a074b764..1fa6c894a32 100644 --- a/pkg/acquisition/modules/kubernetesaudit/k8s_audit.go +++ b/pkg/acquisition/modules/kubernetesaudit/k8s_audit.go @@ -131,11 +131,11 @@ func (ka *KubernetesAuditSource) GetName() string { return "k8s-audit" } -func (ka *KubernetesAuditSource) OneShotAcquisition(out chan types.Event, t *tomb.Tomb) error { +func (ka *KubernetesAuditSource) OneShotAcquisition(_ context.Context, _ chan types.Event, _ *tomb.Tomb) error { return errors.New("k8s-audit datasource does not support one-shot acquisition") } -func (ka *KubernetesAuditSource) StreamingAcquisition(out chan types.Event, t *tomb.Tomb) error { +func (ka *KubernetesAuditSource) StreamingAcquisition(ctx context.Context, out chan types.Event, t *tomb.Tomb) error { ka.outChan = out t.Go(func() error { defer trace.CatchPanic("crowdsec/acquis/k8s-audit/live") @@ -149,7 +149,7 @@ func (ka *KubernetesAuditSource) StreamingAcquisition(out chan types.Event, t *t }) <-t.Dying() ka.logger.Infof("Stopping k8s-audit server on %s:%d%s", ka.config.ListenAddr, ka.config.ListenPort, ka.config.WebhookPath) - ka.server.Shutdown(context.TODO()) + ka.server.Shutdown(ctx) return nil }) return nil @@ -164,7 +164,6 @@ func (ka *KubernetesAuditSource) Dump() interface{} { } func (ka *KubernetesAuditSource) webhookHandler(w http.ResponseWriter, r *http.Request) { - if ka.metricsLevel != configuration.METRICS_NONE { requestCount.WithLabelValues(ka.addr).Inc() } @@ -196,7 +195,7 @@ func (ka *KubernetesAuditSource) webhookHandler(w http.ResponseWriter, r *http.R } bytesEvent, err := json.Marshal(auditEvent) if err != nil { - ka.logger.Errorf("Error marshaling audit event: %s", err) + ka.logger.Errorf("Error serializing audit event: %s", err) continue } ka.logger.Tracef("Got audit event: %s", string(bytesEvent)) @@ -208,11 +207,8 @@ func (ka *KubernetesAuditSource) webhookHandler(w http.ResponseWriter, r *http.R Process: true, Module: ka.GetName(), } - ka.outChan <- types.Event{ - Line: l, - Process: true, - Type: types.LOG, - ExpectMode: types.LIVE, - } + evt := types.MakeEvent(ka.config.UseTimeMachine, types.LOG, true) + evt.Line = l + ka.outChan <- evt } } diff --git a/pkg/acquisition/modules/kubernetesaudit/k8s_audit_test.go b/pkg/acquisition/modules/kubernetesaudit/k8s_audit_test.go index 020bd4c91a0..a086a756e4a 100644 --- a/pkg/acquisition/modules/kubernetesaudit/k8s_audit_test.go +++ b/pkg/acquisition/modules/kubernetesaudit/k8s_audit_test.go @@ -1,6 +1,7 @@ package kubernetesauditacquisition import ( + "context" "net/http/httptest" "strings" "testing" @@ -52,6 +53,7 @@ listen_addr: 0.0.0.0`, } func TestInvalidConfig(t *testing.T) { + ctx := context.Background() tests := []struct { name string config string @@ -83,7 +85,7 @@ webhook_path: /k8s-audit`, err = f.Configure([]byte(test.config), subLogger, configuration.METRICS_NONE) require.NoError(t, err) - f.StreamingAcquisition(out, tb) + f.StreamingAcquisition(ctx, out, tb) time.Sleep(1 * time.Second) tb.Kill(nil) @@ -98,6 +100,7 @@ webhook_path: /k8s-audit`, } func TestHandler(t *testing.T) { + ctx := context.Background() tests := []struct { name string config string @@ -257,14 +260,14 @@ webhook_path: /k8s-audit`, req := httptest.NewRequest(test.method, "/k8s-audit", strings.NewReader(test.body)) w := httptest.NewRecorder() - f.StreamingAcquisition(out, tb) + f.StreamingAcquisition(ctx, out, tb) f.webhookHandler(w, req) res := w.Result() assert.Equal(t, test.expectedStatusCode, res.StatusCode) - //time.Sleep(1 * time.Second) + // time.Sleep(1 * time.Second) require.NoError(t, err) tb.Kill(nil) diff --git a/pkg/acquisition/modules/loki/internal/lokiclient/loki_client.go b/pkg/acquisition/modules/loki/internal/lokiclient/loki_client.go index 420da6e391c..fce199c5708 100644 --- a/pkg/acquisition/modules/loki/internal/lokiclient/loki_client.go +++ b/pkg/acquisition/modules/loki/internal/lokiclient/loki_client.go @@ -16,7 +16,7 @@ import ( log "github.com/sirupsen/logrus" "gopkg.in/tomb.v2" - "github.com/crowdsecurity/crowdsec/pkg/cwversion" + "github.com/crowdsecurity/crowdsec/pkg/apiclient/useragent" ) type LokiClient struct { @@ -119,7 +119,7 @@ func (lc *LokiClient) queryRange(ctx context.Context, uri string, c chan *LokiQu case <-lc.t.Dying(): return lc.t.Err() case <-ticker.C: - resp, err := lc.Get(uri) + resp, err := lc.Get(ctx, uri) if err != nil { if ok := lc.shouldRetry(); !ok { return fmt.Errorf("error querying range: %w", err) @@ -215,7 +215,7 @@ func (lc *LokiClient) Ready(ctx context.Context) error { return lc.t.Err() case <-tick.C: lc.Logger.Debug("Checking if Loki is ready") - resp, err := lc.Get(url) + resp, err := lc.Get(ctx, url) if err != nil { lc.Logger.Warnf("Error checking if Loki is ready: %s", err) continue @@ -300,8 +300,8 @@ func (lc *LokiClient) QueryRange(ctx context.Context, infinite bool) chan *LokiQ } // Create a wrapper for http.Get to be able to set headers and auth -func (lc *LokiClient) Get(url string) (*http.Response, error) { - request, err := http.NewRequest(http.MethodGet, url, nil) +func (lc *LokiClient) Get(ctx context.Context, url string) (*http.Response, error) { + request, err := http.NewRequestWithContext(ctx, http.MethodGet, url, nil) if err != nil { return nil, err } @@ -319,6 +319,6 @@ func NewLokiClient(config Config) *LokiClient { if config.Username != "" || config.Password != "" { headers["Authorization"] = "Basic " + base64.StdEncoding.EncodeToString([]byte(config.Username+":"+config.Password)) } - headers["User-Agent"] = cwversion.UserAgent() + headers["User-Agent"] = useragent.Default() return &LokiClient{Logger: log.WithField("component", "lokiclient"), config: config, requestHeaders: headers} } diff --git a/pkg/acquisition/modules/loki/loki.go b/pkg/acquisition/modules/loki/loki.go index 15c454723ee..c57e6a67c94 100644 --- a/pkg/acquisition/modules/loki/loki.go +++ b/pkg/acquisition/modules/loki/loki.go @@ -53,6 +53,7 @@ type LokiConfiguration struct { WaitForReady time.Duration `yaml:"wait_for_ready"` // Retry interval, default is 10 seconds Auth LokiAuthConfiguration `yaml:"auth"` MaxFailureDuration time.Duration `yaml:"max_failure_duration"` // Max duration of failure before stopping the source + NoReadyCheck bool `yaml:"no_ready_check"` // Bypass /ready check before starting configuration.DataSourceCommonCfg `yaml:",inline"` } @@ -229,6 +230,14 @@ func (l *LokiSource) ConfigureByDSN(dsn string, labels map[string]string, logger l.logger.Logger.SetLevel(level) } + if noReadyCheck := params.Get("no_ready_check"); noReadyCheck != "" { + noReadyCheck, err := strconv.ParseBool(noReadyCheck) + if err != nil { + return fmt.Errorf("invalid no_ready_check in dsn: %w", err) + } + l.Config.NoReadyCheck = noReadyCheck + } + l.Config.URL = fmt.Sprintf("%s://%s", scheme, u.Host) if u.User != nil { l.Config.Auth.Username = u.User.Username() @@ -261,29 +270,31 @@ func (l *LokiSource) GetName() string { } // OneShotAcquisition reads a set of file and returns when done -func (l *LokiSource) OneShotAcquisition(out chan types.Event, t *tomb.Tomb) error { +func (l *LokiSource) OneShotAcquisition(ctx context.Context, out chan types.Event, t *tomb.Tomb) error { l.logger.Debug("Loki one shot acquisition") l.Client.SetTomb(t) - readyCtx, cancel := context.WithTimeout(context.Background(), l.Config.WaitForReady) - defer cancel() - err := l.Client.Ready(readyCtx) - if err != nil { - return fmt.Errorf("loki is not ready: %w", err) + + if !l.Config.NoReadyCheck { + readyCtx, readyCancel := context.WithTimeout(ctx, l.Config.WaitForReady) + defer readyCancel() + err := l.Client.Ready(readyCtx) + if err != nil { + return fmt.Errorf("loki is not ready: %w", err) + } } - ctx, cancel := context.WithCancel(context.Background()) - c := l.Client.QueryRange(ctx, false) + lokiCtx, cancel := context.WithCancel(ctx) + defer cancel() + c := l.Client.QueryRange(lokiCtx, false) for { select { case <-t.Dying(): l.logger.Debug("Loki one shot acquisition stopped") - cancel() return nil case resp, ok := <-c: if !ok { l.logger.Info("Loki acquisition done, chan closed") - cancel() return nil } for _, stream := range resp.Data.Result { @@ -307,41 +318,33 @@ func (l *LokiSource) readOneEntry(entry lokiclient.Entry, labels map[string]stri if l.metricsLevel != configuration.METRICS_NONE { linesRead.With(prometheus.Labels{"source": l.Config.URL}).Inc() } - expectMode := types.LIVE - if l.Config.UseTimeMachine { - expectMode = types.TIMEMACHINE - } - out <- types.Event{ - Line: ll, - Process: true, - Type: types.LOG, - ExpectMode: expectMode, - } + evt := types.MakeEvent(l.Config.UseTimeMachine, types.LOG, true) + evt.Line = ll + out <- evt } -func (l *LokiSource) StreamingAcquisition(out chan types.Event, t *tomb.Tomb) error { +func (l *LokiSource) StreamingAcquisition(ctx context.Context, out chan types.Event, t *tomb.Tomb) error { l.Client.SetTomb(t) - readyCtx, cancel := context.WithTimeout(context.Background(), l.Config.WaitForReady) - defer cancel() - err := l.Client.Ready(readyCtx) - if err != nil { - return fmt.Errorf("loki is not ready: %w", err) + + if !l.Config.NoReadyCheck { + readyCtx, readyCancel := context.WithTimeout(ctx, l.Config.WaitForReady) + defer readyCancel() + err := l.Client.Ready(readyCtx) + if err != nil { + return fmt.Errorf("loki is not ready: %w", err) + } } ll := l.logger.WithField("websocket_url", l.lokiWebsocket) t.Go(func() error { - ctx, cancel := context.WithCancel(context.Background()) + ctx, cancel := context.WithCancel(ctx) defer cancel() respChan := l.Client.QueryRange(ctx, true) - if err != nil { - ll.Errorf("could not start loki tail: %s", err) - return fmt.Errorf("while starting loki tail: %w", err) - } for { select { case resp, ok := <-respChan: if !ok { ll.Warnf("loki channel closed") - return err + return errors.New("loki channel closed") } for _, stream := range resp.Data.Result { for _, entry := range stream.Entries { diff --git a/pkg/acquisition/modules/loki/loki_test.go b/pkg/acquisition/modules/loki/loki_test.go index 5f41cd4c62e..643aefad715 100644 --- a/pkg/acquisition/modules/loki/loki_test.go +++ b/pkg/acquisition/modules/loki/loki_test.go @@ -34,6 +34,7 @@ func TestConfiguration(t *testing.T) { password string waitForReady time.Duration delayFor time.Duration + noReadyCheck bool testName string }{ { @@ -95,7 +96,19 @@ query: > delayFor: 1 * time.Second, }, { - + config: ` +mode: tail +source: loki +url: http://localhost:3100/ +no_ready_check: true +query: > + {server="demo"} +`, + expectedErr: "", + testName: "Correct config with no_ready_check", + noReadyCheck: true, + }, + { config: ` mode: tail source: loki @@ -111,7 +124,6 @@ query: > testName: "Correct config with password", }, { - config: ` mode: tail source: loki @@ -150,6 +162,8 @@ query: > t.Fatalf("Wrong DelayFor %v != %v", lokiSource.Config.DelayFor, test.delayFor) } } + + assert.Equal(t, test.noReadyCheck, lokiSource.Config.NoReadyCheck) }) } } @@ -166,6 +180,7 @@ func TestConfigureDSN(t *testing.T) { scheme string waitForReady time.Duration delayFor time.Duration + noReadyCheck bool }{ { name: "Wrong scheme", @@ -204,10 +219,11 @@ func TestConfigureDSN(t *testing.T) { }, { name: "Correct DSN", - dsn: `loki://localhost:3100/?query={server="demo"}&wait_for_ready=5s&delay_for=1s`, + dsn: `loki://localhost:3100/?query={server="demo"}&wait_for_ready=5s&delay_for=1s&no_ready_check=true`, expectedErr: "", waitForReady: 5 * time.Second, delayFor: 1 * time.Second, + noReadyCheck: true, }, { name: "SSL DSN", @@ -258,10 +274,13 @@ func TestConfigureDSN(t *testing.T) { t.Fatalf("Wrong DelayFor %v != %v", lokiSource.Config.DelayFor, test.delayFor) } } + + assert.Equal(t, test.noReadyCheck, lokiSource.Config.NoReadyCheck) + } } -func feedLoki(logger *log.Entry, n int, title string) error { +func feedLoki(ctx context.Context, logger *log.Entry, n int, title string) error { streams := LogStreams{ Streams: []LogStream{ { @@ -286,7 +305,7 @@ func feedLoki(logger *log.Entry, n int, title string) error { return err } - req, err := http.NewRequest(http.MethodPost, "http://127.0.0.1:3100/loki/api/v1/push", bytes.NewBuffer(buff)) + req, err := http.NewRequestWithContext(ctx, http.MethodPost, "http://127.0.0.1:3100/loki/api/v1/push", bytes.NewBuffer(buff)) if err != nil { return err } @@ -314,6 +333,8 @@ func feedLoki(logger *log.Entry, n int, title string) error { } func TestOneShotAcquisition(t *testing.T) { + ctx := context.Background() + if runtime.GOOS == "windows" { t.Skip("Skipping test on windows") } @@ -344,12 +365,11 @@ since: 1h subLogger := logger.WithField("type", "loki") lokiSource := loki.LokiSource{} err := lokiSource.Configure([]byte(ts.config), subLogger, configuration.METRICS_NONE) - if err != nil { t.Fatalf("Unexpected error : %s", err) } - err = feedLoki(subLogger, 20, title) + err = feedLoki(ctx, subLogger, 20, title) if err != nil { t.Fatalf("Unexpected error : %s", err) } @@ -367,7 +387,7 @@ since: 1h lokiTomb := tomb.Tomb{} - err = lokiSource.OneShotAcquisition(out, &lokiTomb) + err = lokiSource.OneShotAcquisition(ctx, out, &lokiTomb) if err != nil { t.Fatalf("Unexpected error : %s", err) } @@ -421,6 +441,8 @@ query: > }, } + ctx := context.Background() + for _, ts := range tests { t.Run(ts.name, func(t *testing.T) { logger := log.New() @@ -438,7 +460,7 @@ query: > t.Fatalf("Unexpected error : %s", err) } - err = lokiSource.StreamingAcquisition(out, &lokiTomb) + err = lokiSource.StreamingAcquisition(ctx, out, &lokiTomb) cstest.AssertErrorContains(t, err, ts.streamErr) if ts.streamErr != "" { @@ -448,7 +470,7 @@ query: > time.Sleep(time.Second * 2) // We need to give time to start reading from the WS readTomb := tomb.Tomb{} - readCtx, cancel := context.WithTimeout(context.Background(), time.Second*10) + readCtx, cancel := context.WithTimeout(ctx, time.Second*10) count := 0 readTomb.Go(func() error { @@ -472,7 +494,7 @@ query: > } }) - err = feedLoki(subLogger, ts.expectedLines, title) + err = feedLoki(ctx, subLogger, ts.expectedLines, title) if err != nil { t.Fatalf("Unexpected error : %s", err) } @@ -491,6 +513,7 @@ query: > } func TestStopStreaming(t *testing.T) { + ctx := context.Background() if runtime.GOOS == "windows" { t.Skip("Skipping test on windows") } @@ -518,14 +541,14 @@ query: > lokiTomb := &tomb.Tomb{} - err = lokiSource.StreamingAcquisition(out, lokiTomb) + err = lokiSource.StreamingAcquisition(ctx, out, lokiTomb) if err != nil { t.Fatalf("Unexpected error : %s", err) } time.Sleep(time.Second * 2) - err = feedLoki(subLogger, 1, title) + err = feedLoki(ctx, subLogger, 1, title) if err != nil { t.Fatalf("Unexpected error : %s", err) } diff --git a/pkg/acquisition/modules/s3/s3.go b/pkg/acquisition/modules/s3/s3.go index 9ef4d2ba757..cdc84a8a3ca 100644 --- a/pkg/acquisition/modules/s3/s3.go +++ b/pkg/acquisition/modules/s3/s3.go @@ -38,7 +38,7 @@ type S3Configuration struct { AwsEndpoint string `yaml:"aws_endpoint"` BucketName string `yaml:"bucket_name"` Prefix string `yaml:"prefix"` - Key string `yaml:"-"` //Only for DSN acquisition + Key string `yaml:"-"` // Only for DSN acquisition PollingMethod string `yaml:"polling_method"` PollingInterval int `yaml:"polling_interval"` SQSName string `yaml:"sqs_name"` @@ -93,10 +93,12 @@ type S3Event struct { } `json:"detail"` } -const PollMethodList = "list" -const PollMethodSQS = "sqs" -const SQSFormatEventBridge = "eventbridge" -const SQSFormatS3Notification = "s3notification" +const ( + PollMethodList = "list" + PollMethodSQS = "sqs" + SQSFormatEventBridge = "eventbridge" + SQSFormatS3Notification = "s3notification" +) var linesRead = prometheus.NewCounterVec( prometheus.CounterOpts{ @@ -336,7 +338,7 @@ func (s *S3Source) sqsPoll() error { out, err := s.sqsClient.ReceiveMessageWithContext(s.ctx, &sqs.ReceiveMessageInput{ QueueUrl: aws.String(s.Config.SQSName), MaxNumberOfMessages: aws.Int64(10), - WaitTimeSeconds: aws.Int64(20), //Probably no need to make it configurable ? + WaitTimeSeconds: aws.Int64(20), // Probably no need to make it configurable ? }) if err != nil { logger.Errorf("Error while polling SQS: %s", err) @@ -351,7 +353,7 @@ func (s *S3Source) sqsPoll() error { bucket, key, err := s.extractBucketAndPrefix(message.Body) if err != nil { logger.Errorf("Error while parsing SQS message: %s", err) - //Always delete the message to avoid infinite loop + // Always delete the message to avoid infinite loop _, err = s.sqsClient.DeleteMessage(&sqs.DeleteMessageInput{ QueueUrl: aws.String(s.Config.SQSName), ReceiptHandle: message.ReceiptHandle, @@ -377,7 +379,7 @@ func (s *S3Source) sqsPoll() error { } func (s *S3Source) readFile(bucket string, key string) error { - //TODO: Handle SSE-C + // TODO: Handle SSE-C var scanner *bufio.Scanner logger := s.logger.WithFields(log.Fields{ @@ -390,14 +392,13 @@ func (s *S3Source) readFile(bucket string, key string) error { Bucket: aws.String(bucket), Key: aws.String(key), }) - if err != nil { return fmt.Errorf("failed to get object %s/%s: %w", bucket, key, err) } defer output.Body.Close() if strings.HasSuffix(key, ".gz") { - //This *might* be a gzipped file, but sometimes the SDK will decompress the data for us (it's not clear when it happens, only had the issue with cloudtrail logs) + // This *might* be a gzipped file, but sometimes the SDK will decompress the data for us (it's not clear when it happens, only had the issue with cloudtrail logs) header := make([]byte, 2) _, err := output.Body.Read(header) if err != nil { @@ -442,12 +443,8 @@ func (s *S3Source) readFile(bucket string, key string) error { } else if s.MetricsLevel == configuration.METRICS_AGGREGATE { l.Src = bucket } - var evt types.Event - if !s.Config.UseTimeMachine { - evt = types.Event{Line: l, Process: true, Type: types.LOG, ExpectMode: types.LIVE} - } else { - evt = types.Event{Line: l, Process: true, Type: types.LOG, ExpectMode: types.TIMEMACHINE} - } + evt := types.MakeEvent(s.Config.UseTimeMachine, types.LOG, true) + evt.Line = l s.out <- evt } } @@ -467,6 +464,7 @@ func (s *S3Source) GetUuid() string { func (s *S3Source) GetMetrics() []prometheus.Collector { return []prometheus.Collector{linesRead, objectsRead, sqsMessagesReceived} } + func (s *S3Source) GetAggregMetrics() []prometheus.Collector { return []prometheus.Collector{linesRead, objectsRead, sqsMessagesReceived} } @@ -567,11 +565,11 @@ func (s *S3Source) ConfigureByDSN(dsn string, labels map[string]string, logger * }) dsn = strings.TrimPrefix(dsn, "s3://") args := strings.Split(dsn, "?") - if len(args[0]) == 0 { + if args[0] == "" { return errors.New("empty s3:// DSN") } - if len(args) == 2 && len(args[1]) != 0 { + if len(args) == 2 && args[1] != "" { params, err := url.ParseQuery(args[1]) if err != nil { return fmt.Errorf("could not parse s3 args: %w", err) @@ -610,7 +608,7 @@ func (s *S3Source) ConfigureByDSN(dsn string, labels map[string]string, logger * pathParts := strings.Split(args[0], "/") s.logger.Debugf("pathParts: %v", pathParts) - //FIXME: handle s3://bucket/ + // FIXME: handle s3://bucket/ if len(pathParts) == 1 { s.Config.BucketName = pathParts[0] s.Config.Prefix = "" @@ -641,10 +639,10 @@ func (s *S3Source) GetName() string { return "s3" } -func (s *S3Source) OneShotAcquisition(out chan types.Event, t *tomb.Tomb) error { +func (s *S3Source) OneShotAcquisition(ctx context.Context, out chan types.Event, t *tomb.Tomb) error { s.logger.Infof("starting acquisition of %s/%s/%s", s.Config.BucketName, s.Config.Prefix, s.Config.Key) s.out = out - s.ctx, s.cancel = context.WithCancel(context.Background()) + s.ctx, s.cancel = context.WithCancel(ctx) s.Config.UseTimeMachine = true s.t = t if s.Config.Key != "" { @@ -653,7 +651,7 @@ func (s *S3Source) OneShotAcquisition(out chan types.Event, t *tomb.Tomb) error return err } } else { - //No key, get everything in the bucket based on the prefix + // No key, get everything in the bucket based on the prefix objects, err := s.getBucketContent() if err != nil { return err @@ -669,11 +667,11 @@ func (s *S3Source) OneShotAcquisition(out chan types.Event, t *tomb.Tomb) error return nil } -func (s *S3Source) StreamingAcquisition(out chan types.Event, t *tomb.Tomb) error { +func (s *S3Source) StreamingAcquisition(ctx context.Context, out chan types.Event, t *tomb.Tomb) error { s.t = t s.out = out - s.readerChan = make(chan S3Object, 100) //FIXME: does this needs to be buffered? - s.ctx, s.cancel = context.WithCancel(context.Background()) + s.readerChan = make(chan S3Object, 100) // FIXME: does this needs to be buffered? + s.ctx, s.cancel = context.WithCancel(ctx) s.logger.Infof("starting acquisition of %s/%s", s.Config.BucketName, s.Config.Prefix) t.Go(func() error { s.readManager() diff --git a/pkg/acquisition/modules/s3/s3_test.go b/pkg/acquisition/modules/s3/s3_test.go index 93e166dfec5..367048aa33a 100644 --- a/pkg/acquisition/modules/s3/s3_test.go +++ b/pkg/acquisition/modules/s3/s3_test.go @@ -208,6 +208,7 @@ func (msqs mockSQSClientNotif) DeleteMessage(input *sqs.DeleteMessageInput) (*sq } func TestDSNAcquis(t *testing.T) { + ctx := context.Background() tests := []struct { name string dsn string @@ -260,7 +261,7 @@ func TestDSNAcquis(t *testing.T) { f.s3Client = mockS3Client{} tmb := tomb.Tomb{} - err = f.OneShotAcquisition(out, &tmb) + err = f.OneShotAcquisition(ctx, out, &tmb) if err != nil { t.Fatalf("unexpected error: %s", err.Error()) } @@ -272,6 +273,7 @@ func TestDSNAcquis(t *testing.T) { } func TestListPolling(t *testing.T) { + ctx := context.Background() tests := []struct { name string config string @@ -331,7 +333,7 @@ prefix: foo/ } }() - err = f.StreamingAcquisition(out, &tb) + err = f.StreamingAcquisition(ctx, out, &tb) if err != nil { t.Fatalf("unexpected error: %s", err.Error()) } @@ -348,6 +350,7 @@ prefix: foo/ } func TestSQSPoll(t *testing.T) { + ctx := context.Background() tests := []struct { name string config string @@ -411,7 +414,7 @@ sqs_name: test } }() - err = f.StreamingAcquisition(out, &tb) + err = f.StreamingAcquisition(ctx, out, &tb) if err != nil { t.Fatalf("unexpected error: %s", err.Error()) } diff --git a/pkg/acquisition/modules/syslog/internal/parser/rfc3164/parse_test.go b/pkg/acquisition/modules/syslog/internal/parser/rfc3164/parse_test.go index 8fb5089a61f..3af6614bce6 100644 --- a/pkg/acquisition/modules/syslog/internal/parser/rfc3164/parse_test.go +++ b/pkg/acquisition/modules/syslog/internal/parser/rfc3164/parse_test.go @@ -4,6 +4,10 @@ import ( "fmt" "testing" "time" + + "github.com/stretchr/testify/assert" + + "github.com/crowdsecurity/go-cs-lib/cstest" ) func TestPri(t *testing.T) { @@ -26,28 +30,20 @@ func TestPri(t *testing.T) { r := &RFC3164{} r.buf = []byte(test.input) r.len = len(r.buf) + err := r.parsePRI() - if err != nil { - if test.expectedErr != "" { - if err.Error() != test.expectedErr { - t.Errorf("expected error %s, got %s", test.expectedErr, err) - } - } else { - t.Errorf("unexpected error: %s", err) - } - } else { - if test.expectedErr != "" { - t.Errorf("expected error %s, got no error", test.expectedErr) - } else if r.PRI != test.expected { - t.Errorf("expected %d, got %d", test.expected, r.PRI) - } + cstest.RequireErrorContains(t, err, test.expectedErr) + + if test.expectedErr != "" { + return } + + assert.Equal(t, test.expected, r.PRI) }) } } func TestTimestamp(t *testing.T) { - tests := []struct { input string expected string @@ -68,25 +64,19 @@ func TestTimestamp(t *testing.T) { if test.currentYear { opts = append(opts, WithCurrentYear()) } + r := NewRFC3164Parser(opts...) r.buf = []byte(test.input) r.len = len(r.buf) + err := r.parseTimestamp() - if err != nil { - if test.expectedErr != "" { - if err.Error() != test.expectedErr { - t.Errorf("expected error %s, got %s", test.expectedErr, err) - } - } else { - t.Errorf("unexpected error: %s", err) - } - } else { - if test.expectedErr != "" { - t.Errorf("expected error %s, got no error", test.expectedErr) - } else if r.Timestamp.Format(time.RFC3339) != test.expected { - t.Errorf("expected %s, got %s", test.expected, r.Timestamp.Format(time.RFC3339)) - } + cstest.RequireErrorContains(t, err, test.expectedErr) + + if test.expectedErr != "" { + return } + + assert.Equal(t, test.expected, r.Timestamp.Format(time.RFC3339)) }) } } @@ -121,25 +111,19 @@ func TestHostname(t *testing.T) { if test.strictHostname { opts = append(opts, WithStrictHostname()) } + r := NewRFC3164Parser(opts...) r.buf = []byte(test.input) r.len = len(r.buf) + err := r.parseHostname() - if err != nil { - if test.expectedErr != "" { - if err.Error() != test.expectedErr { - t.Errorf("expected error %s, got %s", test.expectedErr, err) - } - } else { - t.Errorf("unexpected error: %s", err) - } - } else { - if test.expectedErr != "" { - t.Errorf("expected error %s, got no error", test.expectedErr) - } else if r.Hostname != test.expected { - t.Errorf("expected %s, got %s", test.expected, r.Hostname) - } + cstest.RequireErrorContains(t, err, test.expectedErr) + + if test.expectedErr != "" { + return } + + assert.Equal(t, test.expected, r.Hostname) }) } } @@ -164,27 +148,16 @@ func TestTag(t *testing.T) { r := &RFC3164{} r.buf = []byte(test.input) r.len = len(r.buf) + err := r.parseTag() - if err != nil { - if test.expectedErr != "" { - if err.Error() != test.expectedErr { - t.Errorf("expected error %s, got %s", test.expectedErr, err) - } - } else { - t.Errorf("unexpected error: %s", err) - } - } else { - if test.expectedErr != "" { - t.Errorf("expected error %s, got no error", test.expectedErr) - } else { - if r.Tag != test.expected { - t.Errorf("expected %s, got %s", test.expected, r.Tag) - } - if r.PID != test.expectedPID { - t.Errorf("expected %s, got %s", test.expected, r.Message) - } - } + cstest.RequireErrorContains(t, err, test.expectedErr) + + if test.expectedErr != "" { + return } + + assert.Equal(t, test.expected, r.Tag) + assert.Equal(t, test.expectedPID, r.PID) }) } } @@ -207,22 +180,15 @@ func TestMessage(t *testing.T) { r := &RFC3164{} r.buf = []byte(test.input) r.len = len(r.buf) + err := r.parseMessage() - if err != nil { - if test.expectedErr != "" { - if err.Error() != test.expectedErr { - t.Errorf("expected error %s, got %s", test.expectedErr, err) - } - } else { - t.Errorf("unexpected error: %s", err) - } - } else { - if test.expectedErr != "" { - t.Errorf("expected error %s, got no error", test.expectedErr) - } else if r.Message != test.expected { - t.Errorf("expected message %s, got %s", test.expected, r.Tag) - } + cstest.RequireErrorContains(t, err, test.expectedErr) + + if test.expectedErr != "" { + return } + + assert.Equal(t, test.expected, r.Message) }) } } @@ -236,6 +202,7 @@ func TestParse(t *testing.T) { Message string PRI int } + tests := []struct { input string expected expected @@ -326,39 +293,20 @@ func TestParse(t *testing.T) { for _, test := range tests { t.Run(test.input, func(t *testing.T) { r := NewRFC3164Parser(test.opts...) + err := r.Parse([]byte(test.input)) - if err != nil { - if test.expectedErr != "" { - if err.Error() != test.expectedErr { - t.Errorf("expected error '%s', got '%s'", test.expectedErr, err) - } - } else { - t.Errorf("unexpected error: '%s'", err) - } - } else { - if test.expectedErr != "" { - t.Errorf("expected error '%s', got no error", test.expectedErr) - } else { - if r.Timestamp != test.expected.Timestamp { - t.Errorf("expected timestamp '%s', got '%s'", test.expected.Timestamp, r.Timestamp) - } - if r.Hostname != test.expected.Hostname { - t.Errorf("expected hostname '%s', got '%s'", test.expected.Hostname, r.Hostname) - } - if r.Tag != test.expected.Tag { - t.Errorf("expected tag '%s', got '%s'", test.expected.Tag, r.Tag) - } - if r.PID != test.expected.PID { - t.Errorf("expected pid '%s', got '%s'", test.expected.PID, r.PID) - } - if r.Message != test.expected.Message { - t.Errorf("expected message '%s', got '%s'", test.expected.Message, r.Message) - } - if r.PRI != test.expected.PRI { - t.Errorf("expected pri '%d', got '%d'", test.expected.PRI, r.PRI) - } - } + cstest.RequireErrorContains(t, err, test.expectedErr) + + if test.expectedErr != "" { + return } + + assert.Equal(t, test.expected.Timestamp, r.Timestamp) + assert.Equal(t, test.expected.Hostname, r.Hostname) + assert.Equal(t, test.expected.Tag, r.Tag) + assert.Equal(t, test.expected.PID, r.PID) + assert.Equal(t, test.expected.Message, r.Message) + assert.Equal(t, test.expected.PRI, r.PRI) }) } } diff --git a/pkg/acquisition/modules/syslog/syslog.go b/pkg/acquisition/modules/syslog/syslog.go index 06c32e62f77..fb6a04600c1 100644 --- a/pkg/acquisition/modules/syslog/syslog.go +++ b/pkg/acquisition/modules/syslog/syslog.go @@ -1,6 +1,7 @@ package syslogacquisition import ( + "context" "errors" "fmt" "net" @@ -83,7 +84,7 @@ func (s *SyslogSource) ConfigureByDSN(dsn string, labels map[string]string, logg return errors.New("syslog datasource does not support one shot acquisition") } -func (s *SyslogSource) OneShotAcquisition(out chan types.Event, t *tomb.Tomb) error { +func (s *SyslogSource) OneShotAcquisition(_ context.Context, _ chan types.Event, _ *tomb.Tomb) error { return errors.New("syslog datasource does not support one shot acquisition") } @@ -105,7 +106,7 @@ func (s *SyslogSource) UnmarshalConfig(yamlConfig []byte) error { } if s.config.Addr == "" { - s.config.Addr = "127.0.0.1" //do we want a usable or secure default ? + s.config.Addr = "127.0.0.1" // do we want a usable or secure default ? } if s.config.Port == 0 { s.config.Port = 514 @@ -135,7 +136,7 @@ func (s *SyslogSource) Configure(yamlConfig []byte, logger *log.Entry, MetricsLe return nil } -func (s *SyslogSource) StreamingAcquisition(out chan types.Event, t *tomb.Tomb) error { +func (s *SyslogSource) StreamingAcquisition(ctx context.Context, out chan types.Event, t *tomb.Tomb) error { c := make(chan syslogserver.SyslogMessage) s.server = &syslogserver.SyslogServer{Logger: s.logger.WithField("syslog", "internal"), MaxMessageLen: s.config.MaxMessageLen} s.server.SetChannel(c) @@ -152,7 +153,8 @@ func (s *SyslogSource) StreamingAcquisition(out chan types.Event, t *tomb.Tomb) } func (s *SyslogSource) buildLogFromSyslog(ts time.Time, hostname string, - appname string, pid string, msg string) string { + appname string, pid string, msg string, +) string { ret := "" if !ts.IsZero() { ret += ts.Format("Jan 2 15:04:05") @@ -178,7 +180,6 @@ func (s *SyslogSource) buildLogFromSyslog(ts time.Time, hostname string, ret += msg } return ret - } func (s *SyslogSource) handleSyslogMsg(out chan types.Event, t *tomb.Tomb, c chan syslogserver.SyslogMessage) error { @@ -234,11 +235,9 @@ func (s *SyslogSource) handleSyslogMsg(out chan types.Event, t *tomb.Tomb, c cha l.Time = ts l.Src = syslogLine.Client l.Process = true - if !s.config.UseTimeMachine { - out <- types.Event{Line: l, Process: true, Type: types.LOG, ExpectMode: types.LIVE} - } else { - out <- types.Event{Line: l, Process: true, Type: types.LOG, ExpectMode: types.TIMEMACHINE} - } + evt := types.MakeEvent(s.config.UseTimeMachine, types.LOG, true) + evt.Line = l + out <- evt } } } diff --git a/pkg/acquisition/modules/syslog/syslog_test.go b/pkg/acquisition/modules/syslog/syslog_test.go index 1750f375138..57fa3e8747b 100644 --- a/pkg/acquisition/modules/syslog/syslog_test.go +++ b/pkg/acquisition/modules/syslog/syslog_test.go @@ -1,6 +1,7 @@ package syslogacquisition import ( + "context" "fmt" "net" "runtime" @@ -80,6 +81,7 @@ func writeToSyslog(logs []string) { } func TestStreamingAcquisition(t *testing.T) { + ctx := context.Background() tests := []struct { name string config string @@ -100,8 +102,10 @@ listen_addr: 127.0.0.1`, listen_port: 4242 listen_addr: 127.0.0.1`, expectedLines: 2, - logs: []string{`<13>1 2021-05-18T11:58:40.828081+02:00 mantis sshd 49340 - [timeQuality isSynced="0" tzKnown="1"] blabla`, - `<13>1 2021-05-18T12:12:37.560695+02:00 mantis sshd 49340 - [timeQuality isSynced="0" tzKnown="1"] blabla2[foobar]`}, + logs: []string{ + `<13>1 2021-05-18T11:58:40.828081+02:00 mantis sshd 49340 - [timeQuality isSynced="0" tzKnown="1"] blabla`, + `<13>1 2021-05-18T12:12:37.560695+02:00 mantis sshd 49340 - [timeQuality isSynced="0" tzKnown="1"] blabla2[foobar]`, + }, }, { name: "RFC3164", @@ -109,10 +113,12 @@ listen_addr: 127.0.0.1`, listen_port: 4242 listen_addr: 127.0.0.1`, expectedLines: 3, - logs: []string{`<13>May 18 12:37:56 mantis sshd[49340]: blabla2[foobar]`, + logs: []string{ + `<13>May 18 12:37:56 mantis sshd[49340]: blabla2[foobar]`, `<13>May 18 12:37:56 mantis sshd[49340]: blabla2`, `<13>May 18 12:37:56 mantis sshd: blabla2`, - `<13>May 18 12:37:56 mantis sshd`}, + `<13>May 18 12:37:56 mantis sshd`, + }, }, } if runtime.GOOS != "windows" { @@ -139,7 +145,7 @@ listen_addr: 127.0.0.1`, } tomb := tomb.Tomb{} out := make(chan types.Event) - err = s.StreamingAcquisition(out, &tomb) + err = s.StreamingAcquisition(ctx, out, &tomb) cstest.AssertErrorContains(t, err, ts.expectedErr) if ts.expectedErr != "" { return diff --git a/pkg/acquisition/modules/wineventlog/test_files/Setup.evtx b/pkg/acquisition/modules/wineventlog/test_files/Setup.evtx new file mode 100644 index 0000000000000000000000000000000000000000..2c4f8b0f680d0cf86be8d25825fc6a94576a5fe2 GIT binary patch literal 69632 zcmeI43yf6N8OOhwogHQ$%J2&c39?Ah6Pcbrlt@+rTV43oH9TccCb? zZm3#g+G-n3Oi8MtPZMiss|_XAXlrX~2@un?#zNYrG-~=turW3EvHkzg+yl#H=q||Y zEb0GD?%vnA_k8#F{qJ{P7glz6wsdu8ESxS14h&))u_Cj|NI1&}+UMQgSyOb&2fB$a z76B0u0TB=Z5fA|p5CIVo0TB=Z5f~MLmEE1oH}`HqA3vL>2kY?uAdWx2#O$r5W@~pJ zxF^VV&MV&kdvo7>aQF>o#?6jQF#CHk%83m>V-`!8EyXaz*`|XdTyrnVWSWUGnf?*v z-xD0?vE4Y<8M5ap?7tV~S+fs?%IFF1#dA*u+t(s*3+}l;bRXs!o<0)XbNDfCKL;-7 znl8Wm%va8zZ?+Hj_Wr)|jz=1jt6q;(L?Tumxp;Ew32za%7bN%p@Ur)he)WrAX}#?M zJPC6xtFpVX$p>WGUtVl0ZM$Wx*9Nfvc`WhUgOkr!6x$B#v2L8~$BE)dy=_Le0o!U< z+7sqV9FNLidYr@~miQ@ZD z*^5SohF*oQyx;n6jN4kf3wIuHC7MN-4GrB2ySzt_?@U;$D}6i4>OihLakc?Vix=bS z$71#g7~6xBX33;Q;G;Y_y zfpiNIvA78DRBUswOdZchi?%z_0gTBr+Cm`1Uzx8DvFdsEhl41OVPw(v~R!-X)-Ozl8+`!t(3Oh zkBfXj*z-=AEqB>yN4K-YV{JpemX_cRFFjsr%W%g57*4ee;7POhO-`_iID-P+UjMTy zW6{|=@tMwW(L~B+Xs8Ee_#m?Hxe`3U4rll_S5Vfyt7r$jX|$#EHzk2)HzH#{et{JU zOT-yk>Dv376Ck`fVCw-SODs_osdRE;FlPE3B)S|q(-2axvztz{pUxw9$I&E zDaj0g%vOZst6ux&CSMV7JGr|nwd)nTcvt4H|1O6m49D;ajbc5@v|<+}=b^qBq7Pp)o^|jU;ffJlZbI1|?o`ITl>^Xj#-bDP@O0(8;4YV79nW6F zBJaeHFBjh(Muq6ah$WbRVUYinGEU}S3lpe0;74V4Ad>MKmIL?2 zVsJJd5+vw!uAg_Vy2$1X8uYy!)lFN-2W{}f$;zM()k+(Dvkgbn2qPKy-p#h^6IXjd z_9LHHcpD(nqrG6%hwB*R!sRsJZj0gZ)poVjp~|YmuNkE^Xqr?eTH{)$P{mt+thTn{uq}hfkCoQwFd^5sc|H-)ZIvXtX zdkS9P4)4?VP4NB*zIV=s)9L>N)_6x4K`z_XvG7>!`fk>`8Reyk*jn>nZ+k9kKVY z9{#6J8lVCG+MRCF&<0za>~GY#o=FFCHM^}^mzR2?-ZtWj4*a-9buAvPj@TYS!_TEp z&Hwgom3fOtx!R+?#ITMYM?5+Lv%F;m)zSY&dz5jdB+$W2zrE(``O`tJddZg<*1-Ti zXwn0F^iz*xJko(2HCUQ)MGcl( zr@k~4HA)?|A|9bZ>e6HH?%bO{9WBk#PWB~+b@U=S#%o}fx2&K#nl`T5$!FG4e&W$6 z$6Tl*wwJj^+I!cd-$vcxyL%M3H4k9uIX^OMw|~C-d@6>t4nJi+57Ra z)sZhTtfTud2sH(rcyF0>lp`*Mb(F2=IF{1@y=2#Hc{f8DY&m7yMk$N)JXP@@{MD;1N_SH}T=i;kFGv2JHI18O^g*UqBlfvB7Y}|XS=g7}I zT6(H=ex7ta6wST?3ynS6uQXhIRA}Xy`jwyk!N|k>b)QzAf?N9PC$QzmhatwD#Fc z^QWg={Ze0ISWhWTGpL%YD7L)InQcoB(cj=jX_s!0qj&j8%Ut(BC z_hBOFQ7qoFg6e3z#-*{D4?2hT5l7}ir8w6q3`x2&Lg8n1C_ zZ1i*%acNpsM%L21<+A+gC|6waC5Cl$4C5ScVeys~R7V#|T*}Y<;#tNe)^q;aR{_2ZW0m}m4QZrxiPH7-9MEKS(U_Bs^aib)}FSwZzQWn9fO@?T-_y9bUiHM+lx zF*?NW@x!C}9J}WjDrcfoSUAeWu~?29ai9P2kY7UZ>pbSA9+{V;PyCl89Es&O6dQ4T zgy@u=U-aWl{@>&F{ql)Fj;)@2iD5lW$AnNDKI-+BNlz!n^T)BCf@)4H5fA|p5CIVo z0TB=Z5fA|p5CIVo0TB=Z5fA|p5CIVo0TB=Z5fA|p5CIVo0TB=Z5fA|p5CIVo0TB=Z z5fA|p5CIVo0TB=Z5fA|p5CIVo0TB=Z5fA|p5CIVo0TB=Z5fA|p5CIVo0TB=Z5fA|p z5CIVo0TB=Z5fA|p5CIVo0TB=Z5fA|p5CIVo0TB=Z5fA|p5CIVo0TB=Z5fA|p5CIVo z0TB=Z5fA|p5CIVo0TB=Z5fA|p5CIVo0TB=Z5fA|p5CIVo0TB=Z5fA|p5CIVo0TB=Z z5fA|p5CIVo0TB=Z5fA|p5CIVo0TB=Z5fA|p5CIVo0TB=Z5fA|p5CIVo0TB=Z5fA|p a5CIVo0TB=Z5fA|p5CIVo0TCE?1pW&`k>URU literal 0 HcmV?d00001 diff --git a/pkg/acquisition/modules/wineventlog/wineventlog.go b/pkg/acquisition/modules/wineventlog/wineventlog.go index 44035d0a708..3023a371576 100644 --- a/pkg/acquisition/modules/wineventlog/wineventlog.go +++ b/pkg/acquisition/modules/wineventlog/wineventlog.go @@ -3,6 +3,7 @@ package wineventlogacquisition import ( + "context" "errors" "github.com/prometheus/client_golang/prometheus" @@ -39,7 +40,7 @@ func (w *WinEventLogSource) SupportedModes() []string { return []string{configuration.TAIL_MODE, configuration.CAT_MODE} } -func (w *WinEventLogSource) OneShotAcquisition(out chan types.Event, t *tomb.Tomb) error { +func (w *WinEventLogSource) OneShotAcquisition(_ context.Context, _ chan types.Event, _ *tomb.Tomb) error { return nil } @@ -59,7 +60,7 @@ func (w *WinEventLogSource) CanRun() error { return errors.New("windows event log acquisition is only supported on Windows") } -func (w *WinEventLogSource) StreamingAcquisition(out chan types.Event, t *tomb.Tomb) error { +func (w *WinEventLogSource) StreamingAcquisition(ctx context.Context, out chan types.Event, t *tomb.Tomb) error { return nil } diff --git a/pkg/acquisition/modules/wineventlog/wineventlog_windows.go b/pkg/acquisition/modules/wineventlog/wineventlog_windows.go index c6b10b7c38c..8283bcc21a2 100644 --- a/pkg/acquisition/modules/wineventlog/wineventlog_windows.go +++ b/pkg/acquisition/modules/wineventlog/wineventlog_windows.go @@ -1,10 +1,13 @@ package wineventlogacquisition import ( + "context" "encoding/xml" "errors" "fmt" + "net/url" "runtime" + "strconv" "strings" "syscall" "time" @@ -29,7 +32,7 @@ type WinEventLogConfiguration struct { EventLevel string `yaml:"event_level"` EventIDs []int `yaml:"event_ids"` XPathQuery string `yaml:"xpath_query"` - EventFile string `yaml:"event_file"` + EventFile string PrettyName string `yaml:"pretty_name"` } @@ -47,10 +50,13 @@ type QueryList struct { } type Select struct { - Path string `xml:"Path,attr"` + Path string `xml:"Path,attr,omitempty"` Query string `xml:",chardata"` } +// 0 identifies the local machine in windows APIs +const localMachine = 0 + var linesRead = prometheus.NewCounterVec( prometheus.CounterOpts{ Name: "cs_winevtlogsource_hits_total", @@ -77,7 +83,7 @@ func logLevelToInt(logLevel string) ([]string, error) { // This is lifted from winops/winlog, but we only want to render the basic XML string, we don't need the extra fluff func (w *WinEventLogSource) getXMLEvents(config *winlog.SubscribeConfig, publisherCache map[string]windows.Handle, resultSet windows.Handle, maxEvents int) ([]string, error) { - var events = make([]windows.Handle, maxEvents) + events := make([]windows.Handle, maxEvents) var returned uint32 // Get handles to events from the result set. @@ -88,7 +94,7 @@ func (w *WinEventLogSource) getXMLEvents(config *winlog.SubscribeConfig, publish 2000, // Timeout in milliseconds to wait. 0, // Reserved. Must be zero. &returned) // The number of handles in the array that are set by the API. - if err == windows.ERROR_NO_MORE_ITEMS { + if errors.Is(err, windows.ERROR_NO_MORE_ITEMS) { return nil, err } else if err != nil { return nil, fmt.Errorf("wevtapi.EvtNext failed: %v", err) @@ -149,7 +155,7 @@ func (w *WinEventLogSource) buildXpathQuery() (string, error) { queryList := QueryList{Select: Select{Path: w.config.EventChannel, Query: query}} xpathQuery, err := xml.Marshal(queryList) if err != nil { - w.logger.Errorf("Marshal failed: %v", err) + w.logger.Errorf("Serialize failed: %v", err) return "", err } w.logger.Debugf("xpathQuery: %s", xpathQuery) @@ -182,7 +188,7 @@ func (w *WinEventLogSource) getEvents(out chan types.Event, t *tomb.Tomb) error } if status == syscall.WAIT_OBJECT_0 { renderedEvents, err := w.getXMLEvents(w.evtConfig, publisherCache, subscription, 500) - if err == windows.ERROR_NO_MORE_ITEMS { + if errors.Is(err, windows.ERROR_NO_MORE_ITEMS) { windows.ResetEvent(w.evtConfig.SignalEvent) } else if err != nil { w.logger.Errorf("getXMLEvents failed: %v", err) @@ -200,9 +206,9 @@ func (w *WinEventLogSource) getEvents(out chan types.Event, t *tomb.Tomb) error l.Src = w.name l.Process = true if !w.config.UseTimeMachine { - out <- types.Event{Line: l, Process: true, Type: types.LOG, ExpectMode: types.LIVE} + out <- types.Event{Line: l, Process: true, Type: types.LOG, ExpectMode: types.LIVE, Unmarshaled: make(map[string]interface{})} } else { - out <- types.Event{Line: l, Process: true, Type: types.LOG, ExpectMode: types.TIMEMACHINE} + out <- types.Event{Line: l, Process: true, Type: types.LOG, ExpectMode: types.TIMEMACHINE, Unmarshaled: make(map[string]interface{})} } } } @@ -211,20 +217,28 @@ func (w *WinEventLogSource) getEvents(out chan types.Event, t *tomb.Tomb) error } } -func (w *WinEventLogSource) generateConfig(query string) (*winlog.SubscribeConfig, error) { +func (w *WinEventLogSource) generateConfig(query string, live bool) (*winlog.SubscribeConfig, error) { var config winlog.SubscribeConfig var err error - // Create a subscription signaler. - config.SignalEvent, err = windows.CreateEvent( - nil, // Default security descriptor. - 1, // Manual reset. - 1, // Initial state is signaled. - nil) // Optional name. - if err != nil { - return &config, fmt.Errorf("windows.CreateEvent failed: %v", err) + if live { + // Create a subscription signaler. + config.SignalEvent, err = windows.CreateEvent( + nil, // Default security descriptor. + 1, // Manual reset. + 1, // Initial state is signaled. + nil) // Optional name. + if err != nil { + return &config, fmt.Errorf("windows.CreateEvent failed: %v", err) + } + config.Flags = wevtapi.EvtSubscribeToFutureEvents + } else { + config.ChannelPath, err = syscall.UTF16PtrFromString(w.config.EventFile) + if err != nil { + return &config, fmt.Errorf("syscall.UTF16PtrFromString failed: %v", err) + } + config.Flags = wevtapi.EvtQueryFilePath | wevtapi.EvtQueryForwardDirection } - config.Flags = wevtapi.EvtSubscribeToFutureEvents config.Query, err = syscall.UTF16PtrFromString(query) if err != nil { return &config, fmt.Errorf("syscall.UTF16PtrFromString failed: %v", err) @@ -282,7 +296,7 @@ func (w *WinEventLogSource) Configure(yamlConfig []byte, logger *log.Entry, Metr return err } - w.evtConfig, err = w.generateConfig(w.query) + w.evtConfig, err = w.generateConfig(w.query, true) if err != nil { return err } @@ -291,6 +305,78 @@ func (w *WinEventLogSource) Configure(yamlConfig []byte, logger *log.Entry, Metr } func (w *WinEventLogSource) ConfigureByDSN(dsn string, labels map[string]string, logger *log.Entry, uuid string) error { + if !strings.HasPrefix(dsn, "wineventlog://") { + return fmt.Errorf("invalid DSN %s for wineventlog source, must start with wineventlog://", dsn) + } + + w.logger = logger + w.config = WinEventLogConfiguration{} + + dsn = strings.TrimPrefix(dsn, "wineventlog://") + + args := strings.Split(dsn, "?") + + if args[0] == "" { + return errors.New("empty wineventlog:// DSN") + } + + if len(args) > 2 { + return errors.New("too many arguments in DSN") + } + + w.config.EventFile = args[0] + + if len(args) == 2 && args[1] != "" { + params, err := url.ParseQuery(args[1]) + if err != nil { + return fmt.Errorf("failed to parse DSN parameters: %w", err) + } + + for key, value := range params { + switch key { + case "log_level": + if len(value) != 1 { + return errors.New("log_level must be a single value") + } + lvl, err := log.ParseLevel(value[0]) + if err != nil { + return fmt.Errorf("failed to parse log_level: %s", err) + } + w.logger.Logger.SetLevel(lvl) + case "event_id": + for _, id := range value { + evtid, err := strconv.Atoi(id) + if err != nil { + return fmt.Errorf("failed to parse event_id: %s", err) + } + w.config.EventIDs = append(w.config.EventIDs, evtid) + } + case "event_level": + if len(value) != 1 { + return errors.New("event_level must be a single value") + } + w.config.EventLevel = value[0] + } + } + } + + var err error + + // FIXME: handle custom xpath query + w.query, err = w.buildXpathQuery() + + if err != nil { + return fmt.Errorf("buildXpathQuery failed: %w", err) + } + + w.logger.Debugf("query: %s\n", w.query) + + w.evtConfig, err = w.generateConfig(w.query, false) + + if err != nil { + return fmt.Errorf("generateConfig failed: %w", err) + } + return nil } @@ -299,10 +385,58 @@ func (w *WinEventLogSource) GetMode() string { } func (w *WinEventLogSource) SupportedModes() []string { - return []string{configuration.TAIL_MODE} + return []string{configuration.TAIL_MODE, configuration.CAT_MODE} } -func (w *WinEventLogSource) OneShotAcquisition(out chan types.Event, t *tomb.Tomb) error { +func (w *WinEventLogSource) OneShotAcquisition(ctx context.Context, out chan types.Event, t *tomb.Tomb) error { + handle, err := wevtapi.EvtQuery(localMachine, w.evtConfig.ChannelPath, w.evtConfig.Query, w.evtConfig.Flags) + if err != nil { + return fmt.Errorf("EvtQuery failed: %v", err) + } + + defer winlog.Close(handle) + + publisherCache := make(map[string]windows.Handle) + defer func() { + for _, h := range publisherCache { + winlog.Close(h) + } + }() + +OUTER_LOOP: + for { + select { + case <-t.Dying(): + w.logger.Infof("wineventlog is dying") + return nil + default: + evts, err := w.getXMLEvents(w.evtConfig, publisherCache, handle, 500) + if errors.Is(err, windows.ERROR_NO_MORE_ITEMS) { + log.Info("No more items") + break OUTER_LOOP + } else if err != nil { + return fmt.Errorf("getXMLEvents failed: %v", err) + } + w.logger.Debugf("Got %d events", len(evts)) + for _, evt := range evts { + w.logger.Tracef("Event: %s", evt) + if w.metricsLevel != configuration.METRICS_NONE { + linesRead.With(prometheus.Labels{"source": w.name}).Inc() + } + l := types.Line{} + l.Raw = evt + l.Module = w.GetName() + l.Labels = w.config.Labels + l.Time = time.Now() + l.Src = w.name + l.Process = true + csevt := types.MakeEvent(w.config.UseTimeMachine, types.LOG, true) + csevt.Line = l + out <- csevt + } + } + } + return nil } @@ -325,7 +459,7 @@ func (w *WinEventLogSource) CanRun() error { return nil } -func (w *WinEventLogSource) StreamingAcquisition(out chan types.Event, t *tomb.Tomb) error { +func (w *WinEventLogSource) StreamingAcquisition(ctx context.Context, out chan types.Event, t *tomb.Tomb) error { t.Go(func() error { defer trace.CatchPanic("crowdsec/acquis/wineventlog/streaming") return w.getEvents(out, t) diff --git a/pkg/acquisition/modules/wineventlog/wineventlog_test.go b/pkg/acquisition/modules/wineventlog/wineventlog_windows_test.go similarity index 71% rename from pkg/acquisition/modules/wineventlog/wineventlog_test.go rename to pkg/acquisition/modules/wineventlog/wineventlog_windows_test.go index 2ea0e365be5..2f6fe15450f 100644 --- a/pkg/acquisition/modules/wineventlog/wineventlog_test.go +++ b/pkg/acquisition/modules/wineventlog/wineventlog_windows_test.go @@ -3,7 +3,7 @@ package wineventlogacquisition import ( - "runtime" + "context" "testing" "time" @@ -18,9 +18,8 @@ import ( ) func TestBadConfiguration(t *testing.T) { - if runtime.GOOS != "windows" { - t.Skip("Skipping test on non-windows OS") - } + exprhelpers.Init(nil) + tests := []struct { config string expectedErr string @@ -63,9 +62,8 @@ xpath_query: test`, } func TestQueryBuilder(t *testing.T) { - if runtime.GOOS != "windows" { - t.Skip("Skipping test on non-windows OS") - } + exprhelpers.Init(nil) + tests := []struct { config string expectedQuery string @@ -129,9 +127,8 @@ event_level: bla`, } func TestLiveAcquisition(t *testing.T) { - if runtime.GOOS != "windows" { - t.Skip("Skipping test on non-windows OS") - } + exprhelpers.Init(nil) + ctx := context.Background() tests := []struct { config string @@ -180,7 +177,6 @@ event_ids: subLogger := log.WithField("type", "windowseventlog") evthandler, err := eventlog.Open("Application") - if err != nil { t.Fatalf("failed to open event log: %s", err) } @@ -190,7 +186,7 @@ event_ids: c := make(chan types.Event) f := WinEventLogSource{} f.Configure([]byte(test.config), subLogger, configuration.METRICS_NONE) - f.StreamingAcquisition(c, to) + f.StreamingAcquisition(ctx, c, to) time.Sleep(time.Second) lines := test.expectedLines go func() { @@ -225,3 +221,83 @@ event_ids: to.Wait() } } + +func TestOneShotAcquisition(t *testing.T) { + ctx := context.Background() + tests := []struct { + name string + dsn string + expectedCount int + expectedErr string + expectedConfigureErr string + }{ + { + name: "non-existing file", + dsn: `wineventlog://foo.evtx`, + expectedCount: 0, + expectedErr: "The system cannot find the file specified.", + }, + { + name: "empty DSN", + dsn: `wineventlog://`, + expectedCount: 0, + expectedConfigureErr: "empty wineventlog:// DSN", + }, + { + name: "existing file", + dsn: `wineventlog://test_files/Setup.evtx`, + expectedCount: 24, + expectedErr: "", + }, + { + name: "filter on event_id", + dsn: `wineventlog://test_files/Setup.evtx?event_id=2`, + expectedCount: 1, + }, + { + name: "filter on event_id", + dsn: `wineventlog://test_files/Setup.evtx?event_id=2&event_id=3`, + expectedCount: 24, + }, + } + + exprhelpers.Init(nil) + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + lineCount := 0 + to := &tomb.Tomb{} + c := make(chan types.Event) + f := WinEventLogSource{} + err := f.ConfigureByDSN(test.dsn, map[string]string{"type": "wineventlog"}, log.WithField("type", "windowseventlog"), "") + + if test.expectedConfigureErr != "" { + assert.Contains(t, err.Error(), test.expectedConfigureErr) + return + } + + require.NoError(t, err) + + go func() { + for { + select { + case <-c: + lineCount++ + case <-to.Dying(): + return + } + } + }() + + err = f.OneShotAcquisition(ctx, c, to) + if test.expectedErr != "" { + assert.Contains(t, err.Error(), test.expectedErr) + } else { + require.NoError(t, err) + + time.Sleep(2 * time.Second) + assert.Equal(t, test.expectedCount, lineCount) + } + }) + } +} diff --git a/pkg/acquisition/s3.go b/pkg/acquisition/s3.go new file mode 100644 index 00000000000..73343b0408d --- /dev/null +++ b/pkg/acquisition/s3.go @@ -0,0 +1,12 @@ +//go:build !no_datasource_s3 + +package acquisition + +import ( + s3acquisition "github.com/crowdsecurity/crowdsec/pkg/acquisition/modules/s3" +) + +//nolint:gochecknoinits +func init() { + registerDataSource("s3", func() DataSource { return &s3acquisition.S3Source{} }) +} diff --git a/pkg/acquisition/syslog.go b/pkg/acquisition/syslog.go new file mode 100644 index 00000000000..f62cc23b916 --- /dev/null +++ b/pkg/acquisition/syslog.go @@ -0,0 +1,12 @@ +//go:build !no_datasource_syslog + +package acquisition + +import ( + syslogacquisition "github.com/crowdsecurity/crowdsec/pkg/acquisition/modules/syslog" +) + +//nolint:gochecknoinits +func init() { + registerDataSource("syslog", func() DataSource { return &syslogacquisition.SyslogSource{} }) +} diff --git a/pkg/acquisition/wineventlog.go b/pkg/acquisition/wineventlog.go new file mode 100644 index 00000000000..0c4889a3f5c --- /dev/null +++ b/pkg/acquisition/wineventlog.go @@ -0,0 +1,12 @@ +//go:build !no_datasource_wineventlog + +package acquisition + +import ( + wineventlogacquisition "github.com/crowdsecurity/crowdsec/pkg/acquisition/modules/wineventlog" +) + +//nolint:gochecknoinits +func init() { + registerDataSource("wineventlog", func() DataSource { return &wineventlogacquisition.WinEventLogSource{} }) +} diff --git a/pkg/alertcontext/alertcontext.go b/pkg/alertcontext/alertcontext.go index c502def32cd..1b7d1e20018 100644 --- a/pkg/alertcontext/alertcontext.go +++ b/pkg/alertcontext/alertcontext.go @@ -3,6 +3,7 @@ package alertcontext import ( "encoding/json" "fmt" + "net/http" "slices" "strconv" @@ -30,9 +31,12 @@ type Context struct { func ValidateContextExpr(key string, expressions []string) error { for _, expression := range expressions { - _, err := expr.Compile(expression, exprhelpers.GetExprOptions(map[string]interface{}{"evt": &types.Event{}})...) + _, err := expr.Compile(expression, exprhelpers.GetExprOptions(map[string]interface{}{ + "evt": &types.Event{}, + "match": &types.MatchedRule{}, + "req": &http.Request{}})...) if err != nil { - return fmt.Errorf("compilation of '%s' failed: %v", expression, err) + return fmt.Errorf("compilation of '%s' failed: %w", expression, err) } } @@ -72,9 +76,12 @@ func NewAlertContext(contextToSend map[string][]string, valueLength int) error { } for _, value := range values { - valueCompiled, err := expr.Compile(value, exprhelpers.GetExprOptions(map[string]interface{}{"evt": &types.Event{}})...) + valueCompiled, err := expr.Compile(value, exprhelpers.GetExprOptions(map[string]interface{}{ + "evt": &types.Event{}, + "match": &types.MatchedRule{}, + "req": &http.Request{}})...) if err != nil { - return fmt.Errorf("compilation of '%s' context value failed: %v", value, err) + return fmt.Errorf("compilation of '%s' context value failed: %w", value, err) } alertContext.ContextToSendCompiled[key] = append(alertContext.ContextToSendCompiled[key], valueCompiled) @@ -85,6 +92,32 @@ func NewAlertContext(contextToSend map[string][]string, valueLength int) error { return nil } +// Truncate the context map to fit in the context value length +func TruncateContextMap(contextMap map[string][]string, contextValueLen int) ([]*models.MetaItems0, []error) { + metas := make([]*models.MetaItems0, 0) + errors := make([]error, 0) + + for key, values := range contextMap { + if len(values) == 0 { + continue + } + + valueStr, err := TruncateContext(values, alertContext.ContextValueLen) + if err != nil { + errors = append(errors, fmt.Errorf("error truncating content for %s: %w", key, err)) + continue + } + + meta := models.MetaItems0{ + Key: key, + Value: valueStr, + } + metas = append(metas, &meta) + } + return metas, errors +} + +// Truncate an individual []string to fit in the context value length func TruncateContext(values []string, contextValueLen int) (string, error) { valueByte, err := json.Marshal(values) if err != nil { @@ -116,61 +149,102 @@ func TruncateContext(values []string, contextValueLen int) (string, error) { return ret, nil } -func EventToContext(events []types.Event) (models.Meta, []error) { +func EvalAlertContextRules(evt types.Event, match *types.MatchedRule, request *http.Request, tmpContext map[string][]string) []error { + var errors []error - metas := make([]*models.MetaItems0, 0) - tmpContext := make(map[string][]string) + //if we're evaluating context for appsec event, match and request will be present. + //otherwise, only evt will be. + if match == nil { + match = types.NewMatchedRule() + } + if request == nil { + request = &http.Request{} + } - for _, evt := range events { - for key, values := range alertContext.ContextToSendCompiled { - if _, ok := tmpContext[key]; !ok { - tmpContext[key] = make([]string, 0) - } + for key, values := range alertContext.ContextToSendCompiled { - for _, value := range values { - var val string + if _, ok := tmpContext[key]; !ok { + tmpContext[key] = make([]string, 0) + } - output, err := expr.Run(value, map[string]interface{}{"evt": evt}) - if err != nil { - errors = append(errors, fmt.Errorf("failed to get value for %s : %v", key, err)) - continue - } + for _, value := range values { + var val string - switch out := output.(type) { - case string: - val = out - case int: - val = strconv.Itoa(out) - default: - errors = append(errors, fmt.Errorf("unexpected return type for %s : %T", key, output)) - continue + output, err := expr.Run(value, map[string]interface{}{"match": match, "evt": evt, "req": request}) + if err != nil { + errors = append(errors, fmt.Errorf("failed to get value for %s: %w", key, err)) + continue + } + switch out := output.(type) { + case string: + val = out + if val != "" && !slices.Contains(tmpContext[key], val) { + tmpContext[key] = append(tmpContext[key], val) } - + case []string: + for _, v := range out { + if v != "" && !slices.Contains(tmpContext[key], v) { + tmpContext[key] = append(tmpContext[key], v) + } + } + case int: + val = strconv.Itoa(out) + if val != "" && !slices.Contains(tmpContext[key], val) { + tmpContext[key] = append(tmpContext[key], val) + } + case []int: + for _, v := range out { + val = strconv.Itoa(v) + if val != "" && !slices.Contains(tmpContext[key], val) { + tmpContext[key] = append(tmpContext[key], val) + } + } + default: + val := fmt.Sprintf("%v", output) if val != "" && !slices.Contains(tmpContext[key], val) { tmpContext[key] = append(tmpContext[key], val) } } } } + return errors +} - for key, values := range tmpContext { - if len(values) == 0 { - continue - } +// Iterate over the individual appsec matched rules to create the needed alert context. +func AppsecEventToContext(event types.AppsecEvent, request *http.Request) (models.Meta, []error) { + var errors []error - valueStr, err := TruncateContext(values, alertContext.ContextValueLen) - if err != nil { - log.Warningf(err.Error()) - } + tmpContext := make(map[string][]string) - meta := models.MetaItems0{ - Key: key, - Value: valueStr, - } - metas = append(metas, &meta) + evt := types.MakeEvent(false, types.LOG, false) + for _, matched_rule := range event.MatchedRules { + tmpErrors := EvalAlertContextRules(evt, &matched_rule, request, tmpContext) + errors = append(errors, tmpErrors...) } + metas, truncErrors := TruncateContextMap(tmpContext, alertContext.ContextValueLen) + errors = append(errors, truncErrors...) + + ret := models.Meta(metas) + + return ret, errors +} + +// Iterate over the individual events to create the needed alert context. +func EventToContext(events []types.Event) (models.Meta, []error) { + var errors []error + + tmpContext := make(map[string][]string) + + for _, evt := range events { + tmpErrors := EvalAlertContextRules(evt, nil, nil, tmpContext) + errors = append(errors, tmpErrors...) + } + + metas, truncErrors := TruncateContextMap(tmpContext, alertContext.ContextValueLen) + errors = append(errors, truncErrors...) + ret := models.Meta(metas) return ret, errors diff --git a/pkg/alertcontext/alertcontext_test.go b/pkg/alertcontext/alertcontext_test.go index c111d1bbcfb..dc752ba8b09 100644 --- a/pkg/alertcontext/alertcontext_test.go +++ b/pkg/alertcontext/alertcontext_test.go @@ -2,6 +2,7 @@ package alertcontext import ( "fmt" + "net/http" "testing" "github.com/stretchr/testify/assert" @@ -9,6 +10,7 @@ import ( "github.com/crowdsecurity/crowdsec/pkg/models" "github.com/crowdsecurity/crowdsec/pkg/types" + "github.com/crowdsecurity/go-cs-lib/ptr" ) func TestNewAlertContext(t *testing.T) { @@ -200,3 +202,163 @@ func TestEventToContext(t *testing.T) { assert.ElementsMatch(t, test.expectedResult, metas) } } + +func TestValidateContextExpr(t *testing.T) { + tests := []struct { + name string + key string + exprs []string + expectedErr *string + }{ + { + name: "basic config", + key: "source_ip", + exprs: []string{ + "evt.Parsed.source_ip", + }, + expectedErr: nil, + }, + { + name: "basic config with non existent field", + key: "source_ip", + exprs: []string{ + "evt.invalid.source_ip", + }, + expectedErr: ptr.Of("compilation of 'evt.invalid.source_ip' failed: type types.Event has no field invalid"), + }, + } + for _, test := range tests { + fmt.Printf("Running test '%s'\n", test.name) + err := ValidateContextExpr(test.key, test.exprs) + if test.expectedErr == nil { + require.NoError(t, err) + } else { + require.ErrorContains(t, err, *test.expectedErr) + } + } +} + +func TestAppsecEventToContext(t *testing.T) { + + tests := []struct { + name string + contextToSend map[string][]string + match types.AppsecEvent + req *http.Request + expectedResult models.Meta + expectedErrLen int + }{ + { + name: "basic test on match", + contextToSend: map[string][]string{ + "id": {"match.id"}, + }, + match: types.AppsecEvent{ + MatchedRules: types.MatchedRules{ + { + "id": "test", + }, + }, + }, + req: &http.Request{}, + expectedResult: []*models.MetaItems0{ + { + Key: "id", + Value: "[\"test\"]", + }, + }, + expectedErrLen: 0, + }, + { + name: "basic test on req", + contextToSend: map[string][]string{ + "ua": {"req.UserAgent()"}, + }, + match: types.AppsecEvent{ + MatchedRules: types.MatchedRules{ + { + "id": "test", + }, + }, + }, + req: &http.Request{ + Header: map[string][]string{ + "User-Agent": {"test"}, + }, + }, + expectedResult: []*models.MetaItems0{ + { + Key: "ua", + Value: "[\"test\"]", + }, + }, + expectedErrLen: 0, + }, + { + name: "test on req -> []string", + contextToSend: map[string][]string{ + "foobarxx": {"req.Header.Values('Foobar')"}, + }, + match: types.AppsecEvent{ + MatchedRules: types.MatchedRules{ + { + "id": "test", + }, + }, + }, + req: &http.Request{ + Header: map[string][]string{ + "User-Agent": {"test"}, + "Foobar": {"test1", "test2"}, + }, + }, + expectedResult: []*models.MetaItems0{ + { + Key: "foobarxx", + Value: "[\"test1\",\"test2\"]", + }, + }, + expectedErrLen: 0, + }, + { + name: "test on type int", + contextToSend: map[string][]string{ + "foobarxx": {"len(req.Header.Values('Foobar'))"}, + }, + match: types.AppsecEvent{ + MatchedRules: types.MatchedRules{ + { + "id": "test", + }, + }, + }, + req: &http.Request{ + Header: map[string][]string{ + "User-Agent": {"test"}, + "Foobar": {"test1", "test2"}, + }, + }, + expectedResult: []*models.MetaItems0{ + { + Key: "foobarxx", + Value: "[\"2\"]", + }, + }, + expectedErrLen: 0, + }, + } + + for _, test := range tests { + //reset cache + alertContext = Context{} + //compile + if err := NewAlertContext(test.contextToSend, 100); err != nil { + t.Fatalf("failed to compile %s: %s", test.name, err) + } + //run + + metas, errors := AppsecEventToContext(test.match, test.req) + assert.Len(t, errors, test.expectedErrLen) + assert.ElementsMatch(t, test.expectedResult, metas) + } +} diff --git a/pkg/alertcontext/config.go b/pkg/alertcontext/config.go index 21d16db3972..6ef877619e4 100644 --- a/pkg/alertcontext/config.go +++ b/pkg/alertcontext/config.go @@ -98,20 +98,14 @@ func addContextFromFile(toSend map[string][]string, filePath string) error { return nil } - // LoadConsoleContext loads the context from the hub (if provided) and the file console_context_path. func LoadConsoleContext(c *csconfig.Config, hub *cwhub.Hub) error { c.Crowdsec.ContextToSend = make(map[string][]string, 0) if hub != nil { - items, err := hub.GetInstalledItemsByType(cwhub.CONTEXTS) - if err != nil { - return err - } - - for _, item := range items { + for _, item := range hub.GetInstalledByType(cwhub.CONTEXTS, true) { // context in item files goes under the key 'context' - if err = addContextFromItem(c.Crowdsec.ContextToSend, item); err != nil { + if err := addContextFromItem(c.Crowdsec.ContextToSend, item); err != nil { return err } } @@ -139,7 +133,7 @@ func LoadConsoleContext(c *csconfig.Config, hub *cwhub.Hub) error { feedback, err := json.Marshal(c.Crowdsec.ContextToSend) if err != nil { - return fmt.Errorf("marshaling console context: %s", err) + return fmt.Errorf("serializing console context: %s", err) } log.Debugf("console context to send: %s", feedback) diff --git a/pkg/apiclient/alerts_service_test.go b/pkg/apiclient/alerts_service_test.go index 12ef2d295f4..0d1ff41685f 100644 --- a/pkg/apiclient/alerts_service_test.go +++ b/pkg/apiclient/alerts_service_test.go @@ -14,7 +14,6 @@ import ( "github.com/crowdsecurity/go-cs-lib/cstest" "github.com/crowdsecurity/go-cs-lib/ptr" - "github.com/crowdsecurity/crowdsec/pkg/cwversion" "github.com/crowdsecurity/crowdsec/pkg/models" ) @@ -35,7 +34,6 @@ func TestAlertsListAsMachine(t *testing.T) { client, err := NewClient(&Config{ MachineID: "test_login", Password: "test_password", - UserAgent: cwversion.UserAgent(), URL: apiURL, VersionPrefix: "v1", }) @@ -180,16 +178,16 @@ func TestAlertsListAsMachine(t *testing.T) { }, } - //log.Debugf("data : -> %s", spew.Sdump(alerts)) - //log.Debugf("resp : -> %s", spew.Sdump(resp)) - //log.Debugf("expected : -> %s", spew.Sdump(expected)) - //first one returns data + // log.Debugf("data : -> %s", spew.Sdump(alerts)) + // log.Debugf("resp : -> %s", spew.Sdump(resp)) + // log.Debugf("expected : -> %s", spew.Sdump(expected)) + // first one returns data alerts, resp, err := client.Alerts.List(context.Background(), AlertsListOpts{}) require.NoError(t, err) assert.Equal(t, http.StatusOK, resp.Response.StatusCode) assert.Equal(t, expected, *alerts) - //this one doesn't + // this one doesn't filter := AlertsListOpts{IPEquals: ptr.Of("1.2.3.4")} alerts, resp, err = client.Alerts.List(context.Background(), filter) @@ -214,7 +212,6 @@ func TestAlertsGetAsMachine(t *testing.T) { client, err := NewClient(&Config{ MachineID: "test_login", Password: "test_password", - UserAgent: cwversion.UserAgent(), URL: apiURL, VersionPrefix: "v1", }) @@ -360,7 +357,7 @@ func TestAlertsGetAsMachine(t *testing.T) { assert.Equal(t, http.StatusOK, resp.Response.StatusCode) assert.Equal(t, *expected, *alerts) - //fail + // fail _, _, err = client.Alerts.GetByID(context.Background(), 2) cstest.RequireErrorMessage(t, err, "API error: object not found") } @@ -388,7 +385,6 @@ func TestAlertsCreateAsMachine(t *testing.T) { client, err := NewClient(&Config{ MachineID: "test_login", Password: "test_password", - UserAgent: cwversion.UserAgent(), URL: apiURL, VersionPrefix: "v1", }) @@ -430,7 +426,6 @@ func TestAlertsDeleteAsMachine(t *testing.T) { client, err := NewClient(&Config{ MachineID: "test_login", Password: "test_password", - UserAgent: cwversion.UserAgent(), URL: apiURL, VersionPrefix: "v1", }) diff --git a/pkg/apiclient/auth_jwt.go b/pkg/apiclient/auth_jwt.go index b202e382842..193486ff065 100644 --- a/pkg/apiclient/auth_jwt.go +++ b/pkg/apiclient/auth_jwt.go @@ -2,6 +2,7 @@ package apiclient import ( "bytes" + "context" "encoding/json" "fmt" "io" @@ -30,15 +31,17 @@ type JWTTransport struct { // Transport is the underlying HTTP transport to use when making requests. // It will default to http.DefaultTransport if nil. Transport http.RoundTripper - UpdateScenario func() ([]string, error) + UpdateScenario func(context.Context) ([]string, error) refreshTokenMutex sync.Mutex } func (t *JWTTransport) refreshJwtToken() error { var err error + ctx := context.TODO() + if t.UpdateScenario != nil { - t.Scenarios, err = t.UpdateScenario() + t.Scenarios, err = t.UpdateScenario(ctx) if err != nil { return fmt.Errorf("can't update scenario list: %w", err) } diff --git a/pkg/apiclient/auth_service_test.go b/pkg/apiclient/auth_service_test.go index 3e887149a98..d22c9394014 100644 --- a/pkg/apiclient/auth_service_test.go +++ b/pkg/apiclient/auth_service_test.go @@ -14,7 +14,6 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "github.com/crowdsecurity/crowdsec/pkg/cwversion" "github.com/crowdsecurity/crowdsec/pkg/models" ) @@ -36,11 +35,13 @@ func initBasicMuxMock(t *testing.T, mux *http.ServeMux, path string) { mux.HandleFunc(path, func(w http.ResponseWriter, r *http.Request) { testMethod(t, r, "POST") + buf := new(bytes.Buffer) _, _ = buf.ReadFrom(r.Body) newStr := buf.String() var payload BasicMockPayload + err := json.Unmarshal([]byte(newStr), &payload) if err != nil || payload.MachineID == "" || payload.Password == "" { log.Printf("Bad payload") @@ -48,8 +49,8 @@ func initBasicMuxMock(t *testing.T, mux *http.ServeMux, path string) { } var responseBody string - responseCode, hasFoundErrorMock := loginsForMockErrorCases[payload.MachineID] + responseCode, hasFoundErrorMock := loginsForMockErrorCases[payload.MachineID] if !hasFoundErrorMock { responseCode = http.StatusOK responseBody = `{"code":200,"expire":"2029-11-30T14:14:24+01:00","token":"toto"}` @@ -76,7 +77,7 @@ func TestWatcherRegister(t *testing.T) { mux, urlx, teardown := setup() defer teardown() - //body: models.WatcherRegistrationRequest{MachineID: &config.MachineID, Password: &config.Password} + // body: models.WatcherRegistrationRequest{MachineID: &config.MachineID, Password: &config.Password} initBasicMuxMock(t, mux, "/watchers") log.Printf("URL is %s", urlx) @@ -87,12 +88,13 @@ func TestWatcherRegister(t *testing.T) { clientconfig := Config{ MachineID: "test_login", Password: "test_password", - UserAgent: cwversion.UserAgent(), URL: apiURL, VersionPrefix: "v1", } - client, err := RegisterClient(&clientconfig, &http.Client{}) + ctx := context.Background() + + client, err := RegisterClient(ctx, &clientconfig, &http.Client{}) require.NoError(t, err) log.Printf("->%T", client) @@ -102,7 +104,7 @@ func TestWatcherRegister(t *testing.T) { for _, errorCodeToTest := range errorCodesToTest { clientconfig.MachineID = fmt.Sprintf("login_%d", errorCodeToTest) - client, err = RegisterClient(&clientconfig, &http.Client{}) + client, err = RegisterClient(ctx, &clientconfig, &http.Client{}) require.Nil(t, client, "nil expected for the response code %d", errorCodeToTest) require.Error(t, err, "error expected for the response code %d", errorCodeToTest) } @@ -113,7 +115,7 @@ func TestWatcherAuth(t *testing.T) { mux, urlx, teardown := setup() defer teardown() - //body: models.WatcherRegistrationRequest{MachineID: &config.MachineID, Password: &config.Password} + // body: models.WatcherRegistrationRequest{MachineID: &config.MachineID, Password: &config.Password} initBasicMuxMock(t, mux, "/watchers/login") log.Printf("URL is %s", urlx) @@ -121,11 +123,10 @@ func TestWatcherAuth(t *testing.T) { apiURL, err := url.Parse(urlx + "/") require.NoError(t, err) - //ok auth + // ok auth clientConfig := &Config{ MachineID: "test_login", Password: "test_password", - UserAgent: cwversion.UserAgent(), URL: apiURL, VersionPrefix: "v1", Scenarios: []string{"crowdsecurity/test"}, @@ -161,7 +162,7 @@ func TestWatcherAuth(t *testing.T) { bodyBytes, err := io.ReadAll(resp.Response.Body) require.NoError(t, err) - log.Printf(string(bodyBytes)) + log.Print(string(bodyBytes)) t.Fatalf("The AuthenticateWatcher function should have returned an error for the response code %d", errorCodeToTest) } @@ -174,7 +175,7 @@ func TestWatcherUnregister(t *testing.T) { mux, urlx, teardown := setup() defer teardown() - //body: models.WatcherRegistrationRequest{MachineID: &config.MachineID, Password: &config.Password} + // body: models.WatcherRegistrationRequest{MachineID: &config.MachineID, Password: &config.Password} mux.HandleFunc("/watchers", func(w http.ResponseWriter, r *http.Request) { testMethod(t, r, "DELETE") @@ -184,6 +185,7 @@ func TestWatcherUnregister(t *testing.T) { mux.HandleFunc("/watchers/login", func(w http.ResponseWriter, r *http.Request) { testMethod(t, r, "POST") + buf := new(bytes.Buffer) _, _ = buf.ReadFrom(r.Body) @@ -206,7 +208,6 @@ func TestWatcherUnregister(t *testing.T) { mycfg := &Config{ MachineID: "test_login", Password: "test_password", - UserAgent: cwversion.UserAgent(), URL: apiURL, VersionPrefix: "v1", Scenarios: []string{"crowdsecurity/test"}, @@ -229,6 +230,7 @@ func TestWatcherEnroll(t *testing.T) { mux.HandleFunc("/watchers/enroll", func(w http.ResponseWriter, r *http.Request) { testMethod(t, r, "POST") + buf := new(bytes.Buffer) _, _ = buf.ReadFrom(r.Body) newStr := buf.String() @@ -260,7 +262,6 @@ func TestWatcherEnroll(t *testing.T) { mycfg := &Config{ MachineID: "test_login", Password: "test_password", - UserAgent: cwversion.UserAgent(), URL: apiURL, VersionPrefix: "v1", Scenarios: []string{"crowdsecurity/test"}, diff --git a/pkg/apiclient/client.go b/pkg/apiclient/client.go index 2cb68f597f3..47d97a28344 100644 --- a/pkg/apiclient/client.go +++ b/pkg/apiclient/client.go @@ -12,6 +12,7 @@ import ( "github.com/golang-jwt/jwt/v4" + "github.com/crowdsecurity/crowdsec/pkg/apiclient/useragent" "github.com/crowdsecurity/crowdsec/pkg/models" ) @@ -66,11 +67,16 @@ type service struct { } func NewClient(config *Config) (*ApiClient, error) { + userAgent := config.UserAgent + if userAgent == "" { + userAgent = useragent.Default() + } + t := &JWTTransport{ MachineID: &config.MachineID, Password: &config.Password, Scenarios: config.Scenarios, - UserAgent: config.UserAgent, + UserAgent: userAgent, VersionPrefix: config.VersionPrefix, UpdateScenario: config.UpdateScenario, RetryConfig: NewRetryConfig( @@ -105,7 +111,7 @@ func NewClient(config *Config) (*ApiClient, error) { t.Transport.(*http.Transport).TLSClientConfig = &tlsconfig } - c := &ApiClient{client: t.Client(), BaseURL: baseURL, UserAgent: config.UserAgent, URLPrefix: config.VersionPrefix, PapiURL: config.PapiURL} + c := &ApiClient{client: t.Client(), BaseURL: baseURL, UserAgent: userAgent, URLPrefix: config.VersionPrefix, PapiURL: config.PapiURL} c.common.client = c c.Decisions = (*DecisionsService)(&c.common) c.Alerts = (*AlertsService)(&c.common) @@ -143,6 +149,10 @@ func NewDefaultClient(URL *url.URL, prefix string, userAgent string, client *htt } } + if userAgent == "" { + userAgent = useragent.Default() + } + c := &ApiClient{client: client, BaseURL: baseURL, UserAgent: userAgent, URLPrefix: prefix} c.common.client = c c.Decisions = (*DecisionsService)(&c.common) @@ -157,7 +167,7 @@ func NewDefaultClient(URL *url.URL, prefix string, userAgent string, client *htt return c, nil } -func RegisterClient(config *Config, client *http.Client) (*ApiClient, error) { +func RegisterClient(ctx context.Context, config *Config, client *http.Client) (*ApiClient, error) { transport, baseURL := createTransport(config.URL) if client == nil { @@ -178,15 +188,20 @@ func RegisterClient(config *Config, client *http.Client) (*ApiClient, error) { client.Transport = transport } - c := &ApiClient{client: client, BaseURL: baseURL, UserAgent: config.UserAgent, URLPrefix: config.VersionPrefix} + userAgent := config.UserAgent + if userAgent == "" { + userAgent = useragent.Default() + } + + c := &ApiClient{client: client, BaseURL: baseURL, UserAgent: userAgent, URLPrefix: config.VersionPrefix} c.common.client = c c.Decisions = (*DecisionsService)(&c.common) c.Alerts = (*AlertsService)(&c.common) c.Auth = (*AuthService)(&c.common) - resp, err := c.Auth.RegisterWatcher(context.Background(), models.WatcherRegistrationRequest{MachineID: &config.MachineID, Password: &config.Password, RegistrationToken: config.RegistrationToken}) - /*if we have http status, return it*/ + resp, err := c.Auth.RegisterWatcher(ctx, models.WatcherRegistrationRequest{MachineID: &config.MachineID, Password: &config.Password, RegistrationToken: config.RegistrationToken}) if err != nil { + /*if we have http status, return it*/ if resp != nil && resp.Response != nil { return nil, fmt.Errorf("api register (%s) http %s: %w", c.BaseURL, resp.Response.Status, err) } diff --git a/pkg/apiclient/client_http.go b/pkg/apiclient/client_http.go index 0240618f535..eeca929ea6e 100644 --- a/pkg/apiclient/client_http.go +++ b/pkg/apiclient/client_http.go @@ -61,9 +61,7 @@ func (c *ApiClient) Do(ctx context.Context, req *http.Request, v interface{}) (* req.Header.Add("User-Agent", c.UserAgent) } - if log.GetLevel() >= log.DebugLevel { - log.Debugf("[URL] %s %s", req.Method, req.URL) - } + log.Debugf("[URL] %s %s", req.Method, req.URL) resp, err := c.client.Do(req) if resp != nil && resp.Body != nil { diff --git a/pkg/apiclient/client_http_test.go b/pkg/apiclient/client_http_test.go index 4bdfe1d0da5..45cd8410a8e 100644 --- a/pkg/apiclient/client_http_test.go +++ b/pkg/apiclient/client_http_test.go @@ -10,22 +10,19 @@ import ( "github.com/stretchr/testify/require" "github.com/crowdsecurity/go-cs-lib/cstest" - - "github.com/crowdsecurity/crowdsec/pkg/cwversion" ) func TestNewRequestInvalid(t *testing.T) { mux, urlx, teardown := setup() defer teardown() - //missing slash in uri + // missing slash in uri apiURL, err := url.Parse(urlx) require.NoError(t, err) client, err := NewClient(&Config{ MachineID: "test_login", Password: "test_password", - UserAgent: cwversion.UserAgent(), URL: apiURL, VersionPrefix: "v1", }) @@ -57,7 +54,6 @@ func TestNewRequestTimeout(t *testing.T) { client, err := NewClient(&Config{ MachineID: "test_login", Password: "test_password", - UserAgent: cwversion.UserAgent(), URL: apiURL, VersionPrefix: "v1", }) diff --git a/pkg/apiclient/client_test.go b/pkg/apiclient/client_test.go index bd83e512afc..d1f58f33ad2 100644 --- a/pkg/apiclient/client_test.go +++ b/pkg/apiclient/client_test.go @@ -17,8 +17,6 @@ import ( "github.com/stretchr/testify/require" "github.com/crowdsecurity/go-cs-lib/cstest" - - "github.com/crowdsecurity/crowdsec/pkg/cwversion" ) /*this is a ripoff of google/go-github approach : @@ -97,7 +95,6 @@ func TestNewClientOk(t *testing.T) { client, err := NewClient(&Config{ MachineID: "test_login", Password: "test_password", - UserAgent: cwversion.UserAgent(), URL: apiURL, VersionPrefix: "v1", }) @@ -134,7 +131,6 @@ func TestNewClientOk_UnixSocket(t *testing.T) { client, err := NewClient(&Config{ MachineID: "test_login", Password: "test_password", - UserAgent: cwversion.UserAgent(), URL: apiURL, VersionPrefix: "v1", }) @@ -172,7 +168,6 @@ func TestNewClientKo(t *testing.T) { client, err := NewClient(&Config{ MachineID: "test_login", Password: "test_password", - UserAgent: cwversion.UserAgent(), URL: apiURL, VersionPrefix: "v1", }) @@ -247,10 +242,11 @@ func TestNewClientRegisterKO(t *testing.T) { apiURL, err := url.Parse("http://127.0.0.1:4242/") require.NoError(t, err) - _, err = RegisterClient(&Config{ + ctx := context.Background() + + _, err = RegisterClient(ctx, &Config{ MachineID: "test_login", Password: "test_password", - UserAgent: cwversion.UserAgent(), URL: apiURL, VersionPrefix: "v1", }, &http.Client{}) @@ -278,10 +274,11 @@ func TestNewClientRegisterOK(t *testing.T) { apiURL, err := url.Parse(urlx + "/") require.NoError(t, err) - client, err := RegisterClient(&Config{ + ctx := context.Background() + + client, err := RegisterClient(ctx, &Config{ MachineID: "test_login", Password: "test_password", - UserAgent: cwversion.UserAgent(), URL: apiURL, VersionPrefix: "v1", }, &http.Client{}) @@ -311,10 +308,11 @@ func TestNewClientRegisterOK_UnixSocket(t *testing.T) { t.Fatalf("parsing api url: %s", apiURL) } - client, err := RegisterClient(&Config{ + ctx := context.Background() + + client, err := RegisterClient(ctx, &Config{ MachineID: "test_login", Password: "test_password", - UserAgent: cwversion.UserAgent(), URL: apiURL, VersionPrefix: "v1", }, &http.Client{}) @@ -341,10 +339,11 @@ func TestNewClientBadAnswer(t *testing.T) { apiURL, err := url.Parse(urlx + "/") require.NoError(t, err) - _, err = RegisterClient(&Config{ + ctx := context.Background() + + _, err = RegisterClient(ctx, &Config{ MachineID: "test_login", Password: "test_password", - UserAgent: cwversion.UserAgent(), URL: apiURL, VersionPrefix: "v1", }, &http.Client{}) diff --git a/pkg/apiclient/config.go b/pkg/apiclient/config.go index b08452e74e0..29a8acf185e 100644 --- a/pkg/apiclient/config.go +++ b/pkg/apiclient/config.go @@ -1,6 +1,7 @@ package apiclient import ( + "context" "net/url" "github.com/go-openapi/strfmt" @@ -15,5 +16,5 @@ type Config struct { VersionPrefix string UserAgent string RegistrationToken string - UpdateScenario func() ([]string, error) + UpdateScenario func(context.Context) ([]string, error) } diff --git a/pkg/apiclient/decisions_service.go b/pkg/apiclient/decisions_service.go index 388a870f999..fea2f39072d 100644 --- a/pkg/apiclient/decisions_service.go +++ b/pkg/apiclient/decisions_service.go @@ -31,6 +31,8 @@ type DecisionsListOpts struct { type DecisionsStreamOpts struct { Startup bool `url:"startup,omitempty"` + CommunityPull bool `url:"community_pull"` + AdditionalPull bool `url:"additional_pull"` Scopes string `url:"scopes,omitempty"` ScenariosContaining string `url:"scenarios_containing,omitempty"` ScenariosNotContaining string `url:"scenarios_not_containing,omitempty"` @@ -43,6 +45,17 @@ func (o *DecisionsStreamOpts) addQueryParamsToURL(url string) (string, error) { return "", err } + //Those 2 are a bit different + //They default to true, and we only want to include them if they are false + + if params.Get("community_pull") == "true" { + params.Del("community_pull") + } + + if params.Get("additional_pull") == "true" { + params.Del("additional_pull") + } + return fmt.Sprintf("%s?%s", url, params.Encode()), nil } @@ -144,7 +157,7 @@ func (s *DecisionsService) FetchV3Decisions(ctx context.Context, url string) (*m partialDecisions := make([]*models.Decision, len(decisionsGroup.Decisions)) for idx, decision := range decisionsGroup.Decisions { - decision := decision // fix exportloopref linter message + decision := decision //nolint:copyloopvar // fix exportloopref linter message partialDecisions[idx] = &models.Decision{ Scenario: &scenarioDeleted, Scope: decisionsGroup.Scope, diff --git a/pkg/apiclient/decisions_service_test.go b/pkg/apiclient/decisions_service_test.go index 6942cfc9d85..942d14689ff 100644 --- a/pkg/apiclient/decisions_service_test.go +++ b/pkg/apiclient/decisions_service_test.go @@ -4,6 +4,7 @@ import ( "context" "net/http" "net/url" + "strings" "testing" log "github.com/sirupsen/logrus" @@ -13,7 +14,6 @@ import ( "github.com/crowdsecurity/go-cs-lib/cstest" "github.com/crowdsecurity/go-cs-lib/ptr" - "github.com/crowdsecurity/crowdsec/pkg/cwversion" "github.com/crowdsecurity/crowdsec/pkg/models" "github.com/crowdsecurity/crowdsec/pkg/modelscapi" ) @@ -26,6 +26,7 @@ func TestDecisionsList(t *testing.T) { mux.HandleFunc("/decisions", func(w http.ResponseWriter, r *http.Request) { testMethod(t, r, "GET") + if r.URL.RawQuery == "ip=1.2.3.4" { assert.Equal(t, "ip=1.2.3.4", r.URL.RawQuery) assert.Equal(t, "ixu", r.Header.Get("X-Api-Key")) @@ -34,14 +35,14 @@ func TestDecisionsList(t *testing.T) { } else { w.WriteHeader(http.StatusOK) w.Write([]byte(`null`)) - //no results + // no results } }) apiURL, err := url.Parse(urlx + "/") require.NoError(t, err) - //ok answer + // ok answer auth := &APIKeyTransport{ APIKey: "ixu", } @@ -68,7 +69,7 @@ func TestDecisionsList(t *testing.T) { assert.Equal(t, http.StatusOK, resp.Response.StatusCode) assert.Equal(t, *expected, *decisions) - //Empty return + // Empty return decisionsFilter = DecisionsListOpts{IPEquals: ptr.Of("1.2.3.5")} decisions, resp, err = newcli.Decisions.List(context.Background(), decisionsFilter) require.NoError(t, err) @@ -85,8 +86,9 @@ func TestDecisionsStream(t *testing.T) { mux.HandleFunc("/decisions/stream", func(w http.ResponseWriter, r *http.Request) { assert.Equal(t, "ixu", r.Header.Get("X-Api-Key")) testMethod(t, r, http.MethodGet) + if r.Method == http.MethodGet { - if r.URL.RawQuery == "startup=true" { + if strings.Contains(r.URL.RawQuery, "startup=true") { w.WriteHeader(http.StatusOK) w.Write([]byte(`{"deleted":null,"new":[{"duration":"3h59m55.756182786s","id":4,"origin":"cscli","scenario":"manual 'ban' from '82929df7ee394b73b81252fe3b4e50203yaT2u6nXiaN7Ix9'","scope":"Ip","type":"ban","value":"1.2.3.4"}]}`)) } else { @@ -99,6 +101,7 @@ func TestDecisionsStream(t *testing.T) { mux.HandleFunc("/decisions", func(w http.ResponseWriter, r *http.Request) { assert.Equal(t, "ixu", r.Header.Get("X-Api-Key")) testMethod(t, r, http.MethodDelete) + if r.Method == http.MethodDelete { w.WriteHeader(http.StatusOK) } @@ -107,7 +110,7 @@ func TestDecisionsStream(t *testing.T) { apiURL, err := url.Parse(urlx + "/") require.NoError(t, err) - //ok answer + // ok answer auth := &APIKeyTransport{ APIKey: "ixu", } @@ -134,14 +137,14 @@ func TestDecisionsStream(t *testing.T) { assert.Equal(t, http.StatusOK, resp.Response.StatusCode) assert.Equal(t, *expected, *decisions) - //and second call, we get empty lists + // and second call, we get empty lists decisions, resp, err = newcli.Decisions.GetStream(context.Background(), DecisionsStreamOpts{Startup: false}) require.NoError(t, err) assert.Equal(t, http.StatusOK, resp.Response.StatusCode) assert.Empty(t, decisions.New) assert.Empty(t, decisions.Deleted) - //delete stream + // delete stream resp, err = newcli.Decisions.StopStream(context.Background()) require.NoError(t, err) assert.Equal(t, http.StatusOK, resp.Response.StatusCode) @@ -156,8 +159,9 @@ func TestDecisionsStreamV3Compatibility(t *testing.T) { mux.HandleFunc("/decisions/stream", func(w http.ResponseWriter, r *http.Request) { assert.Equal(t, "ixu", r.Header.Get("X-Api-Key")) testMethod(t, r, http.MethodGet) + if r.Method == http.MethodGet { - if r.URL.RawQuery == "startup=true" { + if strings.Contains(r.URL.RawQuery, "startup=true") { w.WriteHeader(http.StatusOK) w.Write([]byte(`{"deleted":[{"scope":"ip","decisions":["1.2.3.5"]}],"new":[{"scope":"ip", "scenario": "manual 'ban' from '82929df7ee394b73b81252fe3b4e50203yaT2u6nXiaN7Ix9'", "decisions":[{"duration":"3h59m55.756182786s","value":"1.2.3.4"}]}]}`)) } else { @@ -170,7 +174,7 @@ func TestDecisionsStreamV3Compatibility(t *testing.T) { apiURL, err := url.Parse(urlx + "/") require.NoError(t, err) - //ok answer + // ok answer auth := &APIKeyTransport{ APIKey: "ixu", } @@ -220,6 +224,7 @@ func TestDecisionsStreamV3(t *testing.T) { mux.HandleFunc("/decisions/stream", func(w http.ResponseWriter, r *http.Request) { assert.Equal(t, "ixu", r.Header.Get("X-Api-Key")) testMethod(t, r, http.MethodGet) + if r.Method == http.MethodGet { w.WriteHeader(http.StatusOK) w.Write([]byte(`{"deleted":[{"scope":"ip","decisions":["1.2.3.5"]}], @@ -231,7 +236,7 @@ func TestDecisionsStreamV3(t *testing.T) { apiURL, err := url.Parse(urlx + "/") require.NoError(t, err) - //ok answer + // ok answer auth := &APIKeyTransport{ APIKey: "ixu", } @@ -305,7 +310,7 @@ func TestDecisionsFromBlocklist(t *testing.T) { apiURL, err := url.Parse(urlx + "/") require.NoError(t, err) - //ok answer + // ok answer auth := &APIKeyTransport{ APIKey: "ixu", } @@ -391,7 +396,7 @@ func TestDeleteDecisions(t *testing.T) { assert.Equal(t, "ip=1.2.3.4", r.URL.RawQuery) w.WriteHeader(http.StatusOK) w.Write([]byte(`{"nbDeleted":"1"}`)) - //w.Write([]byte(`{"message":"0 deleted alerts"}`)) + // w.Write([]byte(`{"message":"0 deleted alerts"}`)) }) log.Printf("URL is %s", urlx) @@ -402,7 +407,6 @@ func TestDeleteDecisions(t *testing.T) { client, err := NewClient(&Config{ MachineID: "test_login", Password: "test_password", - UserAgent: cwversion.UserAgent(), URL: apiURL, VersionPrefix: "v1", }) @@ -426,6 +430,8 @@ func TestDecisionsStreamOpts_addQueryParamsToURL(t *testing.T) { Scopes string ScenariosContaining string ScenariosNotContaining string + CommunityPull bool + AdditionalPull bool } tests := []struct { @@ -437,11 +443,17 @@ func TestDecisionsStreamOpts_addQueryParamsToURL(t *testing.T) { { name: "no filter", expected: baseURLString + "?", + fields: fields{ + CommunityPull: true, + AdditionalPull: true, + }, }, { name: "startup=true", fields: fields{ - Startup: true, + Startup: true, + CommunityPull: true, + AdditionalPull: true, }, expected: baseURLString + "?startup=true", }, @@ -452,9 +464,19 @@ func TestDecisionsStreamOpts_addQueryParamsToURL(t *testing.T) { Scopes: "ip,range", ScenariosContaining: "ssh", ScenariosNotContaining: "bf", + CommunityPull: true, + AdditionalPull: true, }, expected: baseURLString + "?scenarios_containing=ssh&scenarios_not_containing=bf&scopes=ip%2Crange&startup=true", }, + { + name: "pull options", + fields: fields{ + CommunityPull: false, + AdditionalPull: false, + }, + expected: baseURLString + "?additional_pull=false&community_pull=false", + }, } for _, tt := range tests { @@ -464,10 +486,13 @@ func TestDecisionsStreamOpts_addQueryParamsToURL(t *testing.T) { Scopes: tt.fields.Scopes, ScenariosContaining: tt.fields.ScenariosContaining, ScenariosNotContaining: tt.fields.ScenariosNotContaining, + CommunityPull: tt.fields.CommunityPull, + AdditionalPull: tt.fields.AdditionalPull, } got, err := o.addQueryParamsToURL(baseURLString) cstest.RequireErrorContains(t, err, tt.expectedErr) + if tt.expectedErr != "" { return } @@ -502,7 +527,6 @@ func TestDecisionsStreamOpts_addQueryParamsToURL(t *testing.T) { // client, err := NewClient(&Config{ // MachineID: "test_login", // Password: "test_password", -// UserAgent: cwversion.UserAgent(), // URL: apiURL, // VersionPrefix: "v1", // }) diff --git a/pkg/apiclient/resperr.go b/pkg/apiclient/resperr.go index 00689147332..1b0786f9882 100644 --- a/pkg/apiclient/resperr.go +++ b/pkg/apiclient/resperr.go @@ -19,7 +19,7 @@ func (e *ErrorResponse) Error() string { message := ptr.OrEmpty(e.Message) errors := "" - if len(e.Errors) > 0 { + if e.Errors != "" { errors = fmt.Sprintf(" (%s)", e.Errors) } @@ -51,7 +51,7 @@ func CheckResponse(r *http.Response) error { // try to unmarshal and if there are no 'message' or 'errors' fields, display the body as is, // the API is following a different convention err := json.Unmarshal(data, ret) - if err != nil || (ret.Message == nil && len(ret.Errors) == 0) { + if err != nil || (ret.Message == nil && ret.Errors == "") { ret.Message = ptr.Of(fmt.Sprintf("http code %d, response: %s", r.StatusCode, string(data))) return ret } diff --git a/pkg/apiclient/useragent/useragent.go b/pkg/apiclient/useragent/useragent.go new file mode 100644 index 00000000000..5a62ce1ac06 --- /dev/null +++ b/pkg/apiclient/useragent/useragent.go @@ -0,0 +1,9 @@ +package useragent + +import ( + "github.com/crowdsecurity/go-cs-lib/version" +) + +func Default() string { + return "crowdsec/" + version.String() + "-" + version.System +} diff --git a/pkg/apiserver/alerts_test.go b/pkg/apiserver/alerts_test.go index 891eb3a8f4a..d86234e4813 100644 --- a/pkg/apiserver/alerts_test.go +++ b/pkg/apiserver/alerts_test.go @@ -1,6 +1,7 @@ package apiserver import ( + "context" "encoding/json" "fmt" "net/http" @@ -25,11 +26,11 @@ type LAPI struct { DBConfig *csconfig.DatabaseCfg } -func SetupLAPITest(t *testing.T) LAPI { +func SetupLAPITest(t *testing.T, ctx context.Context) LAPI { t.Helper() - router, loginResp, config := InitMachineTest(t) + router, loginResp, config := InitMachineTest(t, ctx) - APIKey := CreateTestBouncer(t, config.API.Server.DbConfig) + APIKey := CreateTestBouncer(t, ctx, config.API.Server.DbConfig) return LAPI{ router: router, @@ -39,14 +40,14 @@ func SetupLAPITest(t *testing.T) LAPI { } } -func (l *LAPI) InsertAlertFromFile(t *testing.T, path string) *httptest.ResponseRecorder { +func (l *LAPI) InsertAlertFromFile(t *testing.T, ctx context.Context, path string) *httptest.ResponseRecorder { alertReader := GetAlertReaderFromFile(t, path) - return l.RecordResponse(t, http.MethodPost, "/v1/alerts", alertReader, "password") + return l.RecordResponse(t, ctx, http.MethodPost, "/v1/alerts", alertReader, "password") } -func (l *LAPI) RecordResponse(t *testing.T, verb string, url string, body *strings.Reader, authType string) *httptest.ResponseRecorder { +func (l *LAPI) RecordResponse(t *testing.T, ctx context.Context, verb string, url string, body *strings.Reader, authType string) *httptest.ResponseRecorder { w := httptest.NewRecorder() - req, err := http.NewRequest(verb, url, body) + req, err := http.NewRequestWithContext(ctx, verb, url, body) require.NoError(t, err) switch authType { @@ -58,24 +59,27 @@ func (l *LAPI) RecordResponse(t *testing.T, verb string, url string, body *strin t.Fatal("auth type not supported") } + // Port is required for gin to properly parse the client IP + req.RemoteAddr = "127.0.0.1:1234" + l.router.ServeHTTP(w, req) return w } -func InitMachineTest(t *testing.T) (*gin.Engine, models.WatcherAuthResponse, csconfig.Config) { - router, config := NewAPITest(t) - loginResp := LoginToTestAPI(t, router, config) +func InitMachineTest(t *testing.T, ctx context.Context) (*gin.Engine, models.WatcherAuthResponse, csconfig.Config) { + router, config := NewAPITest(t, ctx) + loginResp := LoginToTestAPI(t, ctx, router, config) return router, loginResp, config } -func LoginToTestAPI(t *testing.T, router *gin.Engine, config csconfig.Config) models.WatcherAuthResponse { - body := CreateTestMachine(t, router, "") - ValidateMachine(t, "test", config.API.Server.DbConfig) +func LoginToTestAPI(t *testing.T, ctx context.Context, router *gin.Engine, config csconfig.Config) models.WatcherAuthResponse { + body := CreateTestMachine(t, ctx, router, "") + ValidateMachine(t, ctx, "test", config.API.Server.DbConfig) w := httptest.NewRecorder() - req, _ := http.NewRequest(http.MethodPost, "/v1/watchers/login", strings.NewReader(body)) + req, _ := http.NewRequestWithContext(ctx, http.MethodPost, "/v1/watchers/login", strings.NewReader(body)) req.Header.Add("User-Agent", UserAgent) router.ServeHTTP(w, req) @@ -92,50 +96,55 @@ func AddAuthHeaders(request *http.Request, authResponse models.WatcherAuthRespon } func TestSimulatedAlert(t *testing.T) { - lapi := SetupLAPITest(t) - lapi.InsertAlertFromFile(t, "./tests/alert_minibulk+simul.json") + ctx := context.Background() + lapi := SetupLAPITest(t, ctx) + lapi.InsertAlertFromFile(t, ctx, "./tests/alert_minibulk+simul.json") alertContent := GetAlertReaderFromFile(t, "./tests/alert_minibulk+simul.json") - //exclude decision in simulation mode + // exclude decision in simulation mode - w := lapi.RecordResponse(t, "GET", "/v1/alerts?simulated=false", alertContent, "password") + w := lapi.RecordResponse(t, ctx, "GET", "/v1/alerts?simulated=false", alertContent, "password") assert.Equal(t, 200, w.Code) assert.Contains(t, w.Body.String(), `"message":"Ip 91.121.79.178 performed crowdsecurity/ssh-bf (6 events over `) assert.NotContains(t, w.Body.String(), `"message":"Ip 91.121.79.179 performed crowdsecurity/ssh-bf (6 events over `) - //include decision in simulation mode + // include decision in simulation mode - w = lapi.RecordResponse(t, "GET", "/v1/alerts?simulated=true", alertContent, "password") + w = lapi.RecordResponse(t, ctx, "GET", "/v1/alerts?simulated=true", alertContent, "password") assert.Equal(t, 200, w.Code) assert.Contains(t, w.Body.String(), `"message":"Ip 91.121.79.178 performed crowdsecurity/ssh-bf (6 events over `) assert.Contains(t, w.Body.String(), `"message":"Ip 91.121.79.179 performed crowdsecurity/ssh-bf (6 events over `) } func TestCreateAlert(t *testing.T) { - lapi := SetupLAPITest(t) + ctx := context.Background() + lapi := SetupLAPITest(t, ctx) // Create Alert with invalid format - w := lapi.RecordResponse(t, http.MethodPost, "/v1/alerts", strings.NewReader("test"), "password") + w := lapi.RecordResponse(t, ctx, http.MethodPost, "/v1/alerts", strings.NewReader("test"), "password") assert.Equal(t, 400, w.Code) assert.Equal(t, `{"message":"invalid character 'e' in literal true (expecting 'r')"}`, w.Body.String()) // Create Alert with invalid input alertContent := GetAlertReaderFromFile(t, "./tests/invalidAlert_sample.json") - w = lapi.RecordResponse(t, http.MethodPost, "/v1/alerts", alertContent, "password") + w = lapi.RecordResponse(t, ctx, http.MethodPost, "/v1/alerts", alertContent, "password") assert.Equal(t, 500, w.Code) - assert.Equal(t, `{"message":"validation failure list:\n0.scenario in body is required\n0.scenario_hash in body is required\n0.scenario_version in body is required\n0.simulated in body is required\n0.source in body is required"}`, w.Body.String()) + assert.Equal(t, + `{"message":"validation failure list:\n0.scenario in body is required\n0.scenario_hash in body is required\n0.scenario_version in body is required\n0.simulated in body is required\n0.source in body is required"}`, + w.Body.String()) // Create Valid Alert - w = lapi.InsertAlertFromFile(t, "./tests/alert_sample.json") + w = lapi.InsertAlertFromFile(t, ctx, "./tests/alert_sample.json") assert.Equal(t, 201, w.Code) assert.Equal(t, `["1"]`, w.Body.String()) } func TestCreateAlertChannels(t *testing.T) { - apiServer, config := NewAPIServer(t) + ctx := context.Background() + apiServer, config := NewAPIServer(t, ctx) apiServer.controller.PluginChannel = make(chan csplugin.ProfileAlert) apiServer.InitController() - loginResp := LoginToTestAPI(t, apiServer.router, config) + loginResp := LoginToTestAPI(t, ctx, apiServer.router, config) lapi := LAPI{router: apiServer.router, loginResp: loginResp} var ( @@ -151,221 +160,225 @@ func TestCreateAlertChannels(t *testing.T) { wg.Done() }() - lapi.InsertAlertFromFile(t, "./tests/alert_ssh-bf.json") + lapi.InsertAlertFromFile(t, ctx, "./tests/alert_ssh-bf.json") wg.Wait() assert.Len(t, pd.Alert.Decisions, 1) apiServer.Close() } func TestAlertListFilters(t *testing.T) { - lapi := SetupLAPITest(t) - lapi.InsertAlertFromFile(t, "./tests/alert_ssh-bf.json") + ctx := context.Background() + lapi := SetupLAPITest(t, ctx) + lapi.InsertAlertFromFile(t, ctx, "./tests/alert_ssh-bf.json") alertContent := GetAlertReaderFromFile(t, "./tests/alert_ssh-bf.json") - //bad filter + // bad filter - w := lapi.RecordResponse(t, "GET", "/v1/alerts?test=test", alertContent, "password") + w := lapi.RecordResponse(t, ctx, "GET", "/v1/alerts?test=test", alertContent, "password") assert.Equal(t, 500, w.Code) assert.Equal(t, `{"message":"Filter parameter 'test' is unknown (=test): invalid filter"}`, w.Body.String()) - //get without filters + // get without filters - w = lapi.RecordResponse(t, "GET", "/v1/alerts", emptyBody, "password") + w = lapi.RecordResponse(t, ctx, "GET", "/v1/alerts", emptyBody, "password") assert.Equal(t, 200, w.Code) - //check alert and decision + // check alert and decision assert.Contains(t, w.Body.String(), "Ip 91.121.79.195 performed 'crowdsecurity/ssh-bf' (6 events over ") assert.Contains(t, w.Body.String(), `scope":"Ip","simulated":false,"type":"ban","value":"91.121.79.195"`) - //test decision_type filter (ok) + // test decision_type filter (ok) - w = lapi.RecordResponse(t, "GET", "/v1/alerts?decision_type=ban", emptyBody, "password") + w = lapi.RecordResponse(t, ctx, "GET", "/v1/alerts?decision_type=ban", emptyBody, "password") assert.Equal(t, 200, w.Code) assert.Contains(t, w.Body.String(), "Ip 91.121.79.195 performed 'crowdsecurity/ssh-bf' (6 events over ") assert.Contains(t, w.Body.String(), `scope":"Ip","simulated":false,"type":"ban","value":"91.121.79.195"`) - //test decision_type filter (bad value) + // test decision_type filter (bad value) - w = lapi.RecordResponse(t, "GET", "/v1/alerts?decision_type=ratata", emptyBody, "password") + w = lapi.RecordResponse(t, ctx, "GET", "/v1/alerts?decision_type=ratata", emptyBody, "password") assert.Equal(t, 200, w.Code) assert.Equal(t, "null", w.Body.String()) - //test scope (ok) + // test scope (ok) - w = lapi.RecordResponse(t, "GET", "/v1/alerts?scope=Ip", emptyBody, "password") + w = lapi.RecordResponse(t, ctx, "GET", "/v1/alerts?scope=Ip", emptyBody, "password") assert.Equal(t, 200, w.Code) assert.Contains(t, w.Body.String(), "Ip 91.121.79.195 performed 'crowdsecurity/ssh-bf' (6 events over ") assert.Contains(t, w.Body.String(), `scope":"Ip","simulated":false,"type":"ban","value":"91.121.79.195"`) - //test scope (bad value) + // test scope (bad value) - w = lapi.RecordResponse(t, "GET", "/v1/alerts?scope=rarara", emptyBody, "password") + w = lapi.RecordResponse(t, ctx, "GET", "/v1/alerts?scope=rarara", emptyBody, "password") assert.Equal(t, 200, w.Code) assert.Equal(t, "null", w.Body.String()) - //test scenario (ok) + // test scenario (ok) - w = lapi.RecordResponse(t, "GET", "/v1/alerts?scenario=crowdsecurity/ssh-bf", emptyBody, "password") + w = lapi.RecordResponse(t, ctx, "GET", "/v1/alerts?scenario=crowdsecurity/ssh-bf", emptyBody, "password") assert.Equal(t, 200, w.Code) assert.Contains(t, w.Body.String(), "Ip 91.121.79.195 performed 'crowdsecurity/ssh-bf' (6 events over ") assert.Contains(t, w.Body.String(), `scope":"Ip","simulated":false,"type":"ban","value":"91.121.79.195"`) - //test scenario (bad value) + // test scenario (bad value) - w = lapi.RecordResponse(t, "GET", "/v1/alerts?scenario=crowdsecurity/nope", emptyBody, "password") + w = lapi.RecordResponse(t, ctx, "GET", "/v1/alerts?scenario=crowdsecurity/nope", emptyBody, "password") assert.Equal(t, 200, w.Code) assert.Equal(t, "null", w.Body.String()) - //test ip (ok) + // test ip (ok) - w = lapi.RecordResponse(t, "GET", "/v1/alerts?ip=91.121.79.195", emptyBody, "password") + w = lapi.RecordResponse(t, ctx, "GET", "/v1/alerts?ip=91.121.79.195", emptyBody, "password") assert.Equal(t, 200, w.Code) assert.Contains(t, w.Body.String(), "Ip 91.121.79.195 performed 'crowdsecurity/ssh-bf' (6 events over ") assert.Contains(t, w.Body.String(), `scope":"Ip","simulated":false,"type":"ban","value":"91.121.79.195"`) - //test ip (bad value) + // test ip (bad value) - w = lapi.RecordResponse(t, "GET", "/v1/alerts?ip=99.122.77.195", emptyBody, "password") + w = lapi.RecordResponse(t, ctx, "GET", "/v1/alerts?ip=99.122.77.195", emptyBody, "password") assert.Equal(t, 200, w.Code) assert.Equal(t, "null", w.Body.String()) - //test ip (invalid value) + // test ip (invalid value) - w = lapi.RecordResponse(t, "GET", "/v1/alerts?ip=gruueq", emptyBody, "password") + w = lapi.RecordResponse(t, ctx, "GET", "/v1/alerts?ip=gruueq", emptyBody, "password") assert.Equal(t, 500, w.Code) assert.Equal(t, `{"message":"unable to convert 'gruueq' to int: invalid address: invalid ip address / range"}`, w.Body.String()) - //test range (ok) + // test range (ok) - w = lapi.RecordResponse(t, "GET", "/v1/alerts?range=91.121.79.0/24&contains=false", emptyBody, "password") + w = lapi.RecordResponse(t, ctx, "GET", "/v1/alerts?range=91.121.79.0/24&contains=false", emptyBody, "password") assert.Equal(t, 200, w.Code) assert.Contains(t, w.Body.String(), "Ip 91.121.79.195 performed 'crowdsecurity/ssh-bf' (6 events over ") assert.Contains(t, w.Body.String(), `scope":"Ip","simulated":false,"type":"ban","value":"91.121.79.195"`) - //test range + // test range - w = lapi.RecordResponse(t, "GET", "/v1/alerts?range=99.122.77.0/24&contains=false", emptyBody, "password") + w = lapi.RecordResponse(t, ctx, "GET", "/v1/alerts?range=99.122.77.0/24&contains=false", emptyBody, "password") assert.Equal(t, 200, w.Code) assert.Equal(t, "null", w.Body.String()) - //test range (invalid value) + // test range (invalid value) - w = lapi.RecordResponse(t, "GET", "/v1/alerts?range=ratata", emptyBody, "password") + w = lapi.RecordResponse(t, ctx, "GET", "/v1/alerts?range=ratata", emptyBody, "password") assert.Equal(t, 500, w.Code) assert.Equal(t, `{"message":"unable to convert 'ratata' to int: invalid address: invalid ip address / range"}`, w.Body.String()) - //test since (ok) + // test since (ok) - w = lapi.RecordResponse(t, "GET", "/v1/alerts?since=1h", emptyBody, "password") + w = lapi.RecordResponse(t, ctx, "GET", "/v1/alerts?since=1h", emptyBody, "password") assert.Equal(t, 200, w.Code) assert.Contains(t, w.Body.String(), "Ip 91.121.79.195 performed 'crowdsecurity/ssh-bf' (6 events over ") assert.Contains(t, w.Body.String(), `scope":"Ip","simulated":false,"type":"ban","value":"91.121.79.195"`) - //test since (ok but yields no results) + // test since (ok but yields no results) - w = lapi.RecordResponse(t, "GET", "/v1/alerts?since=1ns", emptyBody, "password") + w = lapi.RecordResponse(t, ctx, "GET", "/v1/alerts?since=1ns", emptyBody, "password") assert.Equal(t, 200, w.Code) assert.Equal(t, "null", w.Body.String()) - //test since (invalid value) + // test since (invalid value) - w = lapi.RecordResponse(t, "GET", "/v1/alerts?since=1zuzu", emptyBody, "password") + w = lapi.RecordResponse(t, ctx, "GET", "/v1/alerts?since=1zuzu", emptyBody, "password") assert.Equal(t, 500, w.Code) assert.Contains(t, w.Body.String(), `{"message":"while parsing duration: time: unknown unit`) - //test until (ok) + // test until (ok) - w = lapi.RecordResponse(t, "GET", "/v1/alerts?until=1ns", emptyBody, "password") + w = lapi.RecordResponse(t, ctx, "GET", "/v1/alerts?until=1ns", emptyBody, "password") assert.Equal(t, 200, w.Code) assert.Contains(t, w.Body.String(), "Ip 91.121.79.195 performed 'crowdsecurity/ssh-bf' (6 events over ") assert.Contains(t, w.Body.String(), `scope":"Ip","simulated":false,"type":"ban","value":"91.121.79.195"`) - //test until (ok but no return) + // test until (ok but no return) - w = lapi.RecordResponse(t, "GET", "/v1/alerts?until=1m", emptyBody, "password") + w = lapi.RecordResponse(t, ctx, "GET", "/v1/alerts?until=1m", emptyBody, "password") assert.Equal(t, 200, w.Code) assert.Equal(t, "null", w.Body.String()) - //test until (invalid value) + // test until (invalid value) - w = lapi.RecordResponse(t, "GET", "/v1/alerts?until=1zuzu", emptyBody, "password") + w = lapi.RecordResponse(t, ctx, "GET", "/v1/alerts?until=1zuzu", emptyBody, "password") assert.Equal(t, 500, w.Code) assert.Contains(t, w.Body.String(), `{"message":"while parsing duration: time: unknown unit`) - //test simulated (ok) + // test simulated (ok) - w = lapi.RecordResponse(t, "GET", "/v1/alerts?simulated=true", emptyBody, "password") + w = lapi.RecordResponse(t, ctx, "GET", "/v1/alerts?simulated=true", emptyBody, "password") assert.Equal(t, 200, w.Code) assert.Contains(t, w.Body.String(), "Ip 91.121.79.195 performed 'crowdsecurity/ssh-bf' (6 events over ") assert.Contains(t, w.Body.String(), `scope":"Ip","simulated":false,"type":"ban","value":"91.121.79.195"`) - //test simulated (ok) + // test simulated (ok) - w = lapi.RecordResponse(t, "GET", "/v1/alerts?simulated=false", emptyBody, "password") + w = lapi.RecordResponse(t, ctx, "GET", "/v1/alerts?simulated=false", emptyBody, "password") assert.Equal(t, 200, w.Code) assert.Contains(t, w.Body.String(), "Ip 91.121.79.195 performed 'crowdsecurity/ssh-bf' (6 events over ") assert.Contains(t, w.Body.String(), `scope":"Ip","simulated":false,"type":"ban","value":"91.121.79.195"`) - //test has active decision + // test has active decision - w = lapi.RecordResponse(t, "GET", "/v1/alerts?has_active_decision=true", emptyBody, "password") + w = lapi.RecordResponse(t, ctx, "GET", "/v1/alerts?has_active_decision=true", emptyBody, "password") assert.Equal(t, 200, w.Code) assert.Contains(t, w.Body.String(), "Ip 91.121.79.195 performed 'crowdsecurity/ssh-bf' (6 events over ") assert.Contains(t, w.Body.String(), `scope":"Ip","simulated":false,"type":"ban","value":"91.121.79.195"`) - //test has active decision + // test has active decision - w = lapi.RecordResponse(t, "GET", "/v1/alerts?has_active_decision=false", emptyBody, "password") + w = lapi.RecordResponse(t, ctx, "GET", "/v1/alerts?has_active_decision=false", emptyBody, "password") assert.Equal(t, 200, w.Code) assert.Equal(t, "null", w.Body.String()) - //test has active decision (invalid value) + // test has active decision (invalid value) - w = lapi.RecordResponse(t, "GET", "/v1/alerts?has_active_decision=ratatqata", emptyBody, "password") + w = lapi.RecordResponse(t, ctx, "GET", "/v1/alerts?has_active_decision=ratatqata", emptyBody, "password") assert.Equal(t, 500, w.Code) assert.Equal(t, `{"message":"'ratatqata' is not a boolean: strconv.ParseBool: parsing \"ratatqata\": invalid syntax: unable to parse type"}`, w.Body.String()) } func TestAlertBulkInsert(t *testing.T) { - lapi := SetupLAPITest(t) - //insert a bulk of 20 alerts to trigger bulk insert - lapi.InsertAlertFromFile(t, "./tests/alert_bulk.json") + ctx := context.Background() + lapi := SetupLAPITest(t, ctx) + // insert a bulk of 20 alerts to trigger bulk insert + lapi.InsertAlertFromFile(t, ctx, "./tests/alert_bulk.json") alertContent := GetAlertReaderFromFile(t, "./tests/alert_bulk.json") - w := lapi.RecordResponse(t, "GET", "/v1/alerts", alertContent, "password") + w := lapi.RecordResponse(t, ctx, "GET", "/v1/alerts", alertContent, "password") assert.Equal(t, 200, w.Code) } func TestListAlert(t *testing.T) { - lapi := SetupLAPITest(t) - lapi.InsertAlertFromFile(t, "./tests/alert_sample.json") + ctx := context.Background() + lapi := SetupLAPITest(t, ctx) + lapi.InsertAlertFromFile(t, ctx, "./tests/alert_sample.json") // List Alert with invalid filter - w := lapi.RecordResponse(t, "GET", "/v1/alerts?test=test", emptyBody, "password") + w := lapi.RecordResponse(t, ctx, "GET", "/v1/alerts?test=test", emptyBody, "password") assert.Equal(t, 500, w.Code) assert.Equal(t, `{"message":"Filter parameter 'test' is unknown (=test): invalid filter"}`, w.Body.String()) // List Alert - w = lapi.RecordResponse(t, "GET", "/v1/alerts", emptyBody, "password") + w = lapi.RecordResponse(t, ctx, "GET", "/v1/alerts", emptyBody, "password") assert.Equal(t, 200, w.Code) assert.Contains(t, w.Body.String(), "crowdsecurity/test") } func TestCreateAlertErrors(t *testing.T) { - lapi := SetupLAPITest(t) + ctx := context.Background() + lapi := SetupLAPITest(t, ctx) alertContent := GetAlertReaderFromFile(t, "./tests/alert_sample.json") - //test invalid bearer + // test invalid bearer w := httptest.NewRecorder() - req, _ := http.NewRequest(http.MethodPost, "/v1/alerts", alertContent) + req, _ := http.NewRequestWithContext(ctx, http.MethodPost, "/v1/alerts", alertContent) req.Header.Add("User-Agent", UserAgent) req.Header.Add("Authorization", fmt.Sprintf("Bearer %s", "ratata")) lapi.router.ServeHTTP(w, req) assert.Equal(t, 401, w.Code) - //test invalid bearer + // test invalid bearer w = httptest.NewRecorder() - req, _ = http.NewRequest(http.MethodPost, "/v1/alerts", alertContent) + req, _ = http.NewRequestWithContext(ctx, http.MethodPost, "/v1/alerts", alertContent) req.Header.Add("User-Agent", UserAgent) req.Header.Add("Authorization", fmt.Sprintf("Bearer %s", lapi.loginResp.Token+"s")) lapi.router.ServeHTTP(w, req) @@ -373,12 +386,13 @@ func TestCreateAlertErrors(t *testing.T) { } func TestDeleteAlert(t *testing.T) { - lapi := SetupLAPITest(t) - lapi.InsertAlertFromFile(t, "./tests/alert_sample.json") + ctx := context.Background() + lapi := SetupLAPITest(t, ctx) + lapi.InsertAlertFromFile(t, ctx, "./tests/alert_sample.json") // Fail Delete Alert w := httptest.NewRecorder() - req, _ := http.NewRequest(http.MethodDelete, "/v1/alerts", strings.NewReader("")) + req, _ := http.NewRequestWithContext(ctx, http.MethodDelete, "/v1/alerts", strings.NewReader("")) AddAuthHeaders(req, lapi.loginResp) req.RemoteAddr = "127.0.0.2:4242" lapi.router.ServeHTTP(w, req) @@ -387,7 +401,7 @@ func TestDeleteAlert(t *testing.T) { // Delete Alert w = httptest.NewRecorder() - req, _ = http.NewRequest(http.MethodDelete, "/v1/alerts", strings.NewReader("")) + req, _ = http.NewRequestWithContext(ctx, http.MethodDelete, "/v1/alerts", strings.NewReader("")) AddAuthHeaders(req, lapi.loginResp) req.RemoteAddr = "127.0.0.1:4242" lapi.router.ServeHTTP(w, req) @@ -396,12 +410,13 @@ func TestDeleteAlert(t *testing.T) { } func TestDeleteAlertByID(t *testing.T) { - lapi := SetupLAPITest(t) - lapi.InsertAlertFromFile(t, "./tests/alert_sample.json") + ctx := context.Background() + lapi := SetupLAPITest(t, ctx) + lapi.InsertAlertFromFile(t, ctx, "./tests/alert_sample.json") // Fail Delete Alert w := httptest.NewRecorder() - req, _ := http.NewRequest(http.MethodDelete, "/v1/alerts/1", strings.NewReader("")) + req, _ := http.NewRequestWithContext(ctx, http.MethodDelete, "/v1/alerts/1", strings.NewReader("")) AddAuthHeaders(req, lapi.loginResp) req.RemoteAddr = "127.0.0.2:4242" lapi.router.ServeHTTP(w, req) @@ -410,7 +425,7 @@ func TestDeleteAlertByID(t *testing.T) { // Delete Alert w = httptest.NewRecorder() - req, _ = http.NewRequest(http.MethodDelete, "/v1/alerts/1", strings.NewReader("")) + req, _ = http.NewRequestWithContext(ctx, http.MethodDelete, "/v1/alerts/1", strings.NewReader("")) AddAuthHeaders(req, lapi.loginResp) req.RemoteAddr = "127.0.0.1:4242" lapi.router.ServeHTTP(w, req) @@ -419,12 +434,13 @@ func TestDeleteAlertByID(t *testing.T) { } func TestDeleteAlertTrustedIPS(t *testing.T) { + ctx := context.Background() cfg := LoadTestConfig(t) // IPv6 mocking doesn't seem to work. // cfg.API.Server.TrustedIPs = []string{"1.2.3.4", "1.2.4.0/24", "::"} cfg.API.Server.TrustedIPs = []string{"1.2.3.4", "1.2.4.0/24"} cfg.API.Server.ListenURI = "::8080" - server, err := NewServer(cfg.API.Server) + server, err := NewServer(ctx, cfg.API.Server) require.NoError(t, err) err = server.InitController() @@ -433,7 +449,7 @@ func TestDeleteAlertTrustedIPS(t *testing.T) { router, err := server.Router() require.NoError(t, err) - loginResp := LoginToTestAPI(t, router, cfg) + loginResp := LoginToTestAPI(t, ctx, router, cfg) lapi := LAPI{ router: router, loginResp: loginResp, @@ -441,7 +457,7 @@ func TestDeleteAlertTrustedIPS(t *testing.T) { assertAlertDeleteFailedFromIP := func(ip string) { w := httptest.NewRecorder() - req, _ := http.NewRequest(http.MethodDelete, "/v1/alerts", strings.NewReader("")) + req, _ := http.NewRequestWithContext(ctx, http.MethodDelete, "/v1/alerts", strings.NewReader("")) AddAuthHeaders(req, loginResp) req.RemoteAddr = ip + ":1234" @@ -453,7 +469,7 @@ func TestDeleteAlertTrustedIPS(t *testing.T) { assertAlertDeletedFromIP := func(ip string) { w := httptest.NewRecorder() - req, _ := http.NewRequest(http.MethodDelete, "/v1/alerts", strings.NewReader("")) + req, _ := http.NewRequestWithContext(ctx, http.MethodDelete, "/v1/alerts", strings.NewReader("")) AddAuthHeaders(req, loginResp) req.RemoteAddr = ip + ":1234" @@ -462,17 +478,17 @@ func TestDeleteAlertTrustedIPS(t *testing.T) { assert.Equal(t, `{"nbDeleted":"1"}`, w.Body.String()) } - lapi.InsertAlertFromFile(t, "./tests/alert_sample.json") + lapi.InsertAlertFromFile(t, ctx, "./tests/alert_sample.json") assertAlertDeleteFailedFromIP("4.3.2.1") assertAlertDeletedFromIP("1.2.3.4") - lapi.InsertAlertFromFile(t, "./tests/alert_sample.json") + lapi.InsertAlertFromFile(t, ctx, "./tests/alert_sample.json") assertAlertDeletedFromIP("1.2.4.0") - lapi.InsertAlertFromFile(t, "./tests/alert_sample.json") + lapi.InsertAlertFromFile(t, ctx, "./tests/alert_sample.json") assertAlertDeletedFromIP("1.2.4.1") - lapi.InsertAlertFromFile(t, "./tests/alert_sample.json") + lapi.InsertAlertFromFile(t, ctx, "./tests/alert_sample.json") assertAlertDeletedFromIP("1.2.4.255") - lapi.InsertAlertFromFile(t, "./tests/alert_sample.json") + lapi.InsertAlertFromFile(t, ctx, "./tests/alert_sample.json") assertAlertDeletedFromIP("127.0.0.1") } diff --git a/pkg/apiserver/api_key_test.go b/pkg/apiserver/api_key_test.go index 883ff21298d..45c02c806e7 100644 --- a/pkg/apiserver/api_key_test.go +++ b/pkg/apiserver/api_key_test.go @@ -1,6 +1,7 @@ package apiserver import ( + "context" "net/http" "net/http/httptest" "strings" @@ -10,36 +11,83 @@ import ( ) func TestAPIKey(t *testing.T) { - router, config := NewAPITest(t) + ctx := context.Background() + router, config := NewAPITest(t, ctx) - APIKey := CreateTestBouncer(t, config.API.Server.DbConfig) + APIKey := CreateTestBouncer(t, ctx, config.API.Server.DbConfig) // Login with empty token w := httptest.NewRecorder() - req, _ := http.NewRequest(http.MethodGet, "/v1/decisions", strings.NewReader("")) + req, _ := http.NewRequestWithContext(ctx, http.MethodGet, "/v1/decisions", strings.NewReader("")) req.Header.Add("User-Agent", UserAgent) + req.RemoteAddr = "127.0.0.1:1234" router.ServeHTTP(w, req) - assert.Equal(t, 403, w.Code) - assert.Equal(t, `{"message":"access forbidden"}`, w.Body.String()) + assert.Equal(t, http.StatusForbidden, w.Code) + assert.JSONEq(t, `{"message":"access forbidden"}`, w.Body.String()) // Login with invalid token w = httptest.NewRecorder() - req, _ = http.NewRequest(http.MethodGet, "/v1/decisions", strings.NewReader("")) + req, _ = http.NewRequestWithContext(ctx, http.MethodGet, "/v1/decisions", strings.NewReader("")) req.Header.Add("User-Agent", UserAgent) req.Header.Add("X-Api-Key", "a1b2c3d4e5f6") + req.RemoteAddr = "127.0.0.1:1234" router.ServeHTTP(w, req) - assert.Equal(t, 403, w.Code) - assert.Equal(t, `{"message":"access forbidden"}`, w.Body.String()) + assert.Equal(t, http.StatusForbidden, w.Code) + assert.JSONEq(t, `{"message":"access forbidden"}`, w.Body.String()) // Login with valid token w = httptest.NewRecorder() - req, _ = http.NewRequest(http.MethodGet, "/v1/decisions", strings.NewReader("")) + req, _ = http.NewRequestWithContext(ctx, http.MethodGet, "/v1/decisions", strings.NewReader("")) req.Header.Add("User-Agent", UserAgent) req.Header.Add("X-Api-Key", APIKey) + req.RemoteAddr = "127.0.0.1:1234" router.ServeHTTP(w, req) - assert.Equal(t, 200, w.Code) + assert.Equal(t, http.StatusOK, w.Code) assert.Equal(t, "null", w.Body.String()) + + // Login with valid token from another IP + w = httptest.NewRecorder() + req, _ = http.NewRequestWithContext(ctx, http.MethodGet, "/v1/decisions", strings.NewReader("")) + req.Header.Add("User-Agent", UserAgent) + req.Header.Add("X-Api-Key", APIKey) + req.RemoteAddr = "4.3.2.1:1234" + router.ServeHTTP(w, req) + + assert.Equal(t, http.StatusOK, w.Code) + assert.Equal(t, "null", w.Body.String()) + + // Make the requests multiple times to make sure we only create one + w = httptest.NewRecorder() + req, _ = http.NewRequestWithContext(ctx, http.MethodGet, "/v1/decisions", strings.NewReader("")) + req.Header.Add("User-Agent", UserAgent) + req.Header.Add("X-Api-Key", APIKey) + req.RemoteAddr = "4.3.2.1:1234" + router.ServeHTTP(w, req) + + assert.Equal(t, http.StatusOK, w.Code) + assert.Equal(t, "null", w.Body.String()) + + // Use the original bouncer again + w = httptest.NewRecorder() + req, _ = http.NewRequestWithContext(ctx, http.MethodGet, "/v1/decisions", strings.NewReader("")) + req.Header.Add("User-Agent", UserAgent) + req.Header.Add("X-Api-Key", APIKey) + req.RemoteAddr = "127.0.0.1:1234" + router.ServeHTTP(w, req) + + assert.Equal(t, http.StatusOK, w.Code) + assert.Equal(t, "null", w.Body.String()) + + // Check if our second bouncer was properly created + bouncers := GetBouncers(t, config.API.Server.DbConfig) + + assert.Len(t, bouncers, 2) + assert.Equal(t, "test@4.3.2.1", bouncers[1].Name) + assert.Equal(t, bouncers[0].APIKey, bouncers[1].APIKey) + assert.Equal(t, bouncers[0].AuthType, bouncers[1].AuthType) + assert.False(t, bouncers[0].AutoCreated) + assert.True(t, bouncers[1].AutoCreated) } diff --git a/pkg/apiserver/apic.go b/pkg/apiserver/apic.go index 5b850cbff0d..51a85b1ea23 100644 --- a/pkg/apiserver/apic.go +++ b/pkg/apiserver/apic.go @@ -23,7 +23,6 @@ import ( "github.com/crowdsecurity/crowdsec/pkg/apiclient" "github.com/crowdsecurity/crowdsec/pkg/csconfig" - "github.com/crowdsecurity/crowdsec/pkg/cwversion" "github.com/crowdsecurity/crowdsec/pkg/database" "github.com/crowdsecurity/crowdsec/pkg/database/ent" "github.com/crowdsecurity/crowdsec/pkg/database/ent/alert" @@ -70,6 +69,10 @@ type apic struct { consoleConfig *csconfig.ConsoleConfig isPulling chan bool whitelists *csconfig.CapiWhitelist + + pullBlocklists bool + pullCommunity bool + shareSignals bool } // randomDuration returns a duration value between d-delta and d+delta @@ -83,10 +86,10 @@ func randomDuration(d time.Duration, delta time.Duration) time.Duration { return ret } -func (a *apic) FetchScenariosListFromDB() ([]string, error) { +func (a *apic) FetchScenariosListFromDB(ctx context.Context) ([]string, error) { scenarios := make([]string, 0) - machines, err := a.dbClient.ListMachines() + machines, err := a.dbClient.ListMachines(ctx) if err != nil { return nil, fmt.Errorf("while listing machines: %w", err) } @@ -175,7 +178,7 @@ func alertToSignal(alert *models.Alert, scenarioTrust string, shareContext bool) return signal } -func NewAPIC(config *csconfig.OnlineApiClientCfg, dbClient *database.Client, consoleConfig *csconfig.ConsoleConfig, apicWhitelist *csconfig.CapiWhitelist) (*apic, error) { +func NewAPIC(ctx context.Context, config *csconfig.OnlineApiClientCfg, dbClient *database.Client, consoleConfig *csconfig.ConsoleConfig, apicWhitelist *csconfig.CapiWhitelist) (*apic, error) { var err error ret := &apic{ @@ -199,6 +202,9 @@ func NewAPIC(config *csconfig.OnlineApiClientCfg, dbClient *database.Client, con usageMetricsIntervalFirst: randomDuration(usageMetricsInterval, usageMetricsIntervalDelta), isPulling: make(chan bool, 1), whitelists: apicWhitelist, + pullBlocklists: *config.PullConfig.Blocklists, + pullCommunity: *config.PullConfig.Community, + shareSignals: *config.Sharing, } password := strfmt.Password(config.Credentials.Password) @@ -213,7 +219,7 @@ func NewAPIC(config *csconfig.OnlineApiClientCfg, dbClient *database.Client, con return nil, fmt.Errorf("while parsing '%s': %w", config.Credentials.PapiURL, err) } - ret.scenarioList, err = ret.FetchScenariosListFromDB() + ret.scenarioList, err = ret.FetchScenariosListFromDB(ctx) if err != nil { return nil, fmt.Errorf("while fetching scenarios from db: %w", err) } @@ -221,7 +227,6 @@ func NewAPIC(config *csconfig.OnlineApiClientCfg, dbClient *database.Client, con ret.apiClient, err = apiclient.NewClient(&apiclient.Config{ MachineID: config.Credentials.Login, Password: password, - UserAgent: cwversion.UserAgent(), URL: apiURL, PapiURL: papiURL, VersionPrefix: "v3", @@ -234,12 +239,12 @@ func NewAPIC(config *csconfig.OnlineApiClientCfg, dbClient *database.Client, con // The watcher will be authenticated by the RoundTripper the first time it will call CAPI // Explicit authentication will provoke a useless supplementary call to CAPI - scenarios, err := ret.FetchScenariosListFromDB() + scenarios, err := ret.FetchScenariosListFromDB(ctx) if err != nil { return ret, fmt.Errorf("get scenario in db: %w", err) } - authResp, _, err := ret.apiClient.Auth.AuthenticateWatcher(context.Background(), models.WatcherAuthRequest{ + authResp, _, err := ret.apiClient.Auth.AuthenticateWatcher(ctx, models.WatcherAuthRequest{ MachineID: &config.Credentials.Login, Password: &password, Scenarios: scenarios, @@ -258,7 +263,7 @@ func NewAPIC(config *csconfig.OnlineApiClientCfg, dbClient *database.Client, con } // keep track of all alerts in cache and push it to CAPI every PushInterval. -func (a *apic) Push() error { +func (a *apic) Push(ctx context.Context) error { defer trace.CatchPanic("lapi/pushToAPIC") var cache models.AddSignalsRequest @@ -278,7 +283,7 @@ func (a *apic) Push() error { return nil } - go a.Send(&cache) + go a.Send(ctx, &cache) return nil case <-ticker.C: @@ -291,13 +296,13 @@ func (a *apic) Push() error { a.mu.Unlock() log.Infof("Signal push: %d signals to push", len(cacheCopy)) - go a.Send(&cacheCopy) + go a.Send(ctx, &cacheCopy) } case alerts := <-a.AlertsAddChan: var signals []*models.AddSignalsRequestItem for _, alert := range alerts { - if ok := shouldShareAlert(alert, a.consoleConfig); ok { + if ok := shouldShareAlert(alert, a.consoleConfig, a.shareSignals); ok { signals = append(signals, alertToSignal(alert, getScenarioTrustOfAlert(alert), *a.consoleConfig.ShareContext)) } } @@ -326,7 +331,13 @@ func getScenarioTrustOfAlert(alert *models.Alert) string { return scenarioTrust } -func shouldShareAlert(alert *models.Alert, consoleConfig *csconfig.ConsoleConfig) bool { +func shouldShareAlert(alert *models.Alert, consoleConfig *csconfig.ConsoleConfig, shareSignals bool) bool { + + if !shareSignals { + log.Debugf("sharing signals is disabled") + return false + } + if *alert.Simulated { log.Debugf("simulation enabled for alert (id:%d), will not be sent to CAPI", alert.ID) return false @@ -353,7 +364,7 @@ func shouldShareAlert(alert *models.Alert, consoleConfig *csconfig.ConsoleConfig return true } -func (a *apic) Send(cacheOrig *models.AddSignalsRequest) { +func (a *apic) Send(ctx context.Context, cacheOrig *models.AddSignalsRequest) { /*we do have a problem with this : The apic.Push background routine reads from alertToPush chan. This chan is filled by Controller.CreateAlert @@ -377,7 +388,7 @@ func (a *apic) Send(cacheOrig *models.AddSignalsRequest) { for { if pageEnd >= len(cache) { send = cache[pageStart:] - ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + ctx, cancel := context.WithTimeout(ctx, 5*time.Second) defer cancel() @@ -391,7 +402,7 @@ func (a *apic) Send(cacheOrig *models.AddSignalsRequest) { } send = cache[pageStart:pageEnd] - ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + ctx, cancel := context.WithTimeout(ctx, 5*time.Second) defer cancel() @@ -406,13 +417,13 @@ func (a *apic) Send(cacheOrig *models.AddSignalsRequest) { } } -func (a *apic) CAPIPullIsOld() (bool, error) { +func (a *apic) CAPIPullIsOld(ctx context.Context) (bool, error) { /*only pull community blocklist if it's older than 1h30 */ alerts := a.dbClient.Ent.Alert.Query() alerts = alerts.Where(alert.HasDecisionsWith(decision.OriginEQ(database.CapiMachineID))) alerts = alerts.Where(alert.CreatedAtGTE(time.Now().UTC().Add(-time.Duration(1*time.Hour + 30*time.Minute)))) //nolint:unconvert - count, err := alerts.Count(a.dbClient.CTX) + count, err := alerts.Count(ctx) if err != nil { return false, fmt.Errorf("while looking for CAPI alert: %w", err) } @@ -425,37 +436,7 @@ func (a *apic) CAPIPullIsOld() (bool, error) { return true, nil } -func (a *apic) HandleDeletedDecisions(deletedDecisions []*models.Decision, deleteCounters map[string]map[string]int) (int, error) { - nbDeleted := 0 - - for _, decision := range deletedDecisions { - filter := map[string][]string{ - "value": {*decision.Value}, - "origin": {*decision.Origin}, - } - if strings.ToLower(*decision.Scope) != "ip" { - filter["type"] = []string{*decision.Type} - filter["scopes"] = []string{*decision.Scope} - } - - dbCliRet, _, err := a.dbClient.ExpireDecisionsWithFilter(filter) - if err != nil { - return 0, fmt.Errorf("expiring decisions error: %w", err) - } - - dbCliDel, err := strconv.Atoi(dbCliRet) - if err != nil { - return 0, fmt.Errorf("converting db ret %d: %w", dbCliDel, err) - } - - updateCounterForDecision(deleteCounters, decision.Origin, decision.Scenario, dbCliDel) - nbDeleted += dbCliDel - } - - return nbDeleted, nil -} - -func (a *apic) HandleDeletedDecisionsV3(deletedDecisions []*modelscapi.GetDecisionsStreamResponseDeletedItem, deleteCounters map[string]map[string]int) (int, error) { +func (a *apic) HandleDeletedDecisionsV3(ctx context.Context, deletedDecisions []*modelscapi.GetDecisionsStreamResponseDeletedItem, deleteCounters map[string]map[string]int) (int, error) { var nbDeleted int for _, decisions := range deletedDecisions { @@ -470,7 +451,7 @@ func (a *apic) HandleDeletedDecisionsV3(deletedDecisions []*modelscapi.GetDecisi filter["scopes"] = []string{*scope} } - dbCliRet, _, err := a.dbClient.ExpireDecisionsWithFilter(filter) + dbCliRet, _, err := a.dbClient.ExpireDecisionsWithFilter(ctx, filter) if err != nil { return 0, fmt.Errorf("expiring decisions error: %w", err) } @@ -616,7 +597,7 @@ func fillAlertsWithDecisions(alerts []*models.Alert, decisions []*models.Decisio // we receive a list of decisions and links for blocklist and we need to create a list of alerts : // one alert for "community blocklist" // one alert per list we're subscribed to -func (a *apic) PullTop(forcePull bool) error { +func (a *apic) PullTop(ctx context.Context, forcePull bool) error { var err error // A mutex with TryLock would be a bit simpler @@ -631,7 +612,7 @@ func (a *apic) PullTop(forcePull bool) error { } if !forcePull { - if lastPullIsOld, err := a.CAPIPullIsOld(); err != nil { + if lastPullIsOld, err := a.CAPIPullIsOld(ctx); err != nil { return err } else if !lastPullIsOld { return nil @@ -640,7 +621,7 @@ func (a *apic) PullTop(forcePull bool) error { log.Debug("Acquiring lock for pullCAPI") - err = a.dbClient.AcquirePullCAPILock() + err = a.dbClient.AcquirePullCAPILock(ctx) if a.dbClient.IsLocked(err) { log.Info("PullCAPI is already running, skipping") return nil @@ -650,14 +631,16 @@ func (a *apic) PullTop(forcePull bool) error { defer func() { log.Debug("Releasing lock for pullCAPI") - if err := a.dbClient.ReleasePullCAPILock(); err != nil { + if err := a.dbClient.ReleasePullCAPILock(ctx); err != nil { log.Errorf("while releasing lock: %v", err) } }() log.Infof("Starting community-blocklist update") - data, _, err := a.apiClient.Decisions.GetStreamV3(context.Background(), apiclient.DecisionsStreamOpts{Startup: a.startup}) + log.Debugf("Community pull: %t | Blocklist pull: %t", a.pullCommunity, a.pullBlocklists) + + data, _, err := a.apiClient.Decisions.GetStreamV3(ctx, apiclient.DecisionsStreamOpts{Startup: a.startup, CommunityPull: a.pullCommunity, AdditionalPull: a.pullBlocklists}) if err != nil { return fmt.Errorf("get stream: %w", err) } @@ -675,34 +658,37 @@ func (a *apic) PullTop(forcePull bool) error { addCounters, deleteCounters := makeAddAndDeleteCounters() // process deleted decisions - nbDeleted, err := a.HandleDeletedDecisionsV3(data.Deleted, deleteCounters) + nbDeleted, err := a.HandleDeletedDecisionsV3(ctx, data.Deleted, deleteCounters) if err != nil { return err } log.Printf("capi/community-blocklist : %d explicit deletions", nbDeleted) - if len(data.New) == 0 { - log.Infof("capi/community-blocklist : received 0 new entries (expected if you just installed crowdsec)") - return nil - } + if len(data.New) > 0 { + // create one alert for community blocklist using the first decision + decisions := a.apiClient.Decisions.GetDecisionsFromGroups(data.New) + // apply APIC specific whitelists + decisions = a.ApplyApicWhitelists(decisions) - // create one alert for community blocklist using the first decision - decisions := a.apiClient.Decisions.GetDecisionsFromGroups(data.New) - // apply APIC specific whitelists - decisions = a.ApplyApicWhitelists(decisions) + alert := createAlertForDecision(decisions[0]) + alertsFromCapi := []*models.Alert{alert} + alertsFromCapi = fillAlertsWithDecisions(alertsFromCapi, decisions, addCounters) - alert := createAlertForDecision(decisions[0]) - alertsFromCapi := []*models.Alert{alert} - alertsFromCapi = fillAlertsWithDecisions(alertsFromCapi, decisions, addCounters) - - err = a.SaveAlerts(alertsFromCapi, addCounters, deleteCounters) - if err != nil { - return fmt.Errorf("while saving alerts: %w", err) + err = a.SaveAlerts(ctx, alertsFromCapi, addCounters, deleteCounters) + if err != nil { + return fmt.Errorf("while saving alerts: %w", err) + } + } else { + if a.pullCommunity { + log.Info("capi/community-blocklist : received 0 new entries (expected if you just installed crowdsec)") + } else { + log.Debug("capi/community-blocklist : community blocklist pull is disabled") + } } // update blocklists - if err := a.UpdateBlocklists(data.Links, addCounters, forcePull); err != nil { + if err := a.UpdateBlocklists(ctx, data.Links, addCounters, forcePull); err != nil { return fmt.Errorf("while updating blocklists: %w", err) } @@ -710,9 +696,9 @@ func (a *apic) PullTop(forcePull bool) error { } // we receive a link to a blocklist, we pull the content of the blocklist and we create one alert -func (a *apic) PullBlocklist(blocklist *modelscapi.BlocklistLink, forcePull bool) error { +func (a *apic) PullBlocklist(ctx context.Context, blocklist *modelscapi.BlocklistLink, forcePull bool) error { addCounters, _ := makeAddAndDeleteCounters() - if err := a.UpdateBlocklists(&modelscapi.GetDecisionsStreamResponseLinks{ + if err := a.UpdateBlocklists(ctx, &modelscapi.GetDecisionsStreamResponseLinks{ Blocklists: []*modelscapi.BlocklistLink{blocklist}, }, addCounters, forcePull); err != nil { return fmt.Errorf("while pulling blocklist: %w", err) @@ -765,7 +751,7 @@ func (a *apic) ApplyApicWhitelists(decisions []*models.Decision) []*models.Decis return decisions[:outIdx] } -func (a *apic) SaveAlerts(alertsFromCapi []*models.Alert, addCounters map[string]map[string]int, deleteCounters map[string]map[string]int) error { +func (a *apic) SaveAlerts(ctx context.Context, alertsFromCapi []*models.Alert, addCounters map[string]map[string]int, deleteCounters map[string]map[string]int) error { for _, alert := range alertsFromCapi { setAlertScenario(alert, addCounters, deleteCounters) log.Debugf("%s has %d decisions", *alert.Source.Scope, len(alert.Decisions)) @@ -774,7 +760,7 @@ func (a *apic) SaveAlerts(alertsFromCapi []*models.Alert, addCounters map[string log.Warningf("sqlite is not using WAL mode, LAPI might become unresponsive when inserting the community blocklist") } - alertID, inserted, deleted, err := a.dbClient.UpdateCommunityBlocklist(alert) + alertID, inserted, deleted, err := a.dbClient.UpdateCommunityBlocklist(ctx, alert) if err != nil { return fmt.Errorf("while saving alert from %s: %w", *alert.Source.Scope, err) } @@ -785,13 +771,13 @@ func (a *apic) SaveAlerts(alertsFromCapi []*models.Alert, addCounters map[string return nil } -func (a *apic) ShouldForcePullBlocklist(blocklist *modelscapi.BlocklistLink) (bool, error) { +func (a *apic) ShouldForcePullBlocklist(ctx context.Context, blocklist *modelscapi.BlocklistLink) (bool, error) { // we should force pull if the blocklist decisions are about to expire or there's no decision in the db alertQuery := a.dbClient.Ent.Alert.Query() alertQuery.Where(alert.SourceScopeEQ(fmt.Sprintf("%s:%s", types.ListOrigin, *blocklist.Name))) alertQuery.Order(ent.Desc(alert.FieldCreatedAt)) - alertInstance, err := alertQuery.First(context.Background()) + alertInstance, err := alertQuery.First(ctx) if err != nil { if ent.IsNotFound(err) { log.Debugf("no alert found for %s, force refresh", *blocklist.Name) @@ -804,7 +790,7 @@ func (a *apic) ShouldForcePullBlocklist(blocklist *modelscapi.BlocklistLink) (bo decisionQuery := a.dbClient.Ent.Decision.Query() decisionQuery.Where(decision.HasOwnerWith(alert.IDEQ(alertInstance.ID))) - firstDecision, err := decisionQuery.First(context.Background()) + firstDecision, err := decisionQuery.First(ctx) if err != nil { if ent.IsNotFound(err) { log.Debugf("no decision found for %s, force refresh", *blocklist.Name) @@ -822,7 +808,7 @@ func (a *apic) ShouldForcePullBlocklist(blocklist *modelscapi.BlocklistLink) (bo return false, nil } -func (a *apic) updateBlocklist(client *apiclient.ApiClient, blocklist *modelscapi.BlocklistLink, addCounters map[string]map[string]int, forcePull bool) error { +func (a *apic) updateBlocklist(ctx context.Context, client *apiclient.ApiClient, blocklist *modelscapi.BlocklistLink, addCounters map[string]map[string]int, forcePull bool) error { if blocklist.Scope == nil { log.Warningf("blocklist has no scope") return nil @@ -834,7 +820,7 @@ func (a *apic) updateBlocklist(client *apiclient.ApiClient, blocklist *modelscap } if !forcePull { - _forcePull, err := a.ShouldForcePullBlocklist(blocklist) + _forcePull, err := a.ShouldForcePullBlocklist(ctx, blocklist) if err != nil { return fmt.Errorf("while checking if we should force pull blocklist %s: %w", *blocklist.Name, err) } @@ -850,13 +836,13 @@ func (a *apic) updateBlocklist(client *apiclient.ApiClient, blocklist *modelscap ) if !forcePull { - lastPullTimestamp, err = a.dbClient.GetConfigItem(blocklistConfigItemName) + lastPullTimestamp, err = a.dbClient.GetConfigItem(ctx, blocklistConfigItemName) if err != nil { return fmt.Errorf("while getting last pull timestamp for blocklist %s: %w", *blocklist.Name, err) } } - decisions, hasChanged, err := client.Decisions.GetDecisionsFromBlocklist(context.Background(), blocklist, lastPullTimestamp) + decisions, hasChanged, err := client.Decisions.GetDecisionsFromBlocklist(ctx, blocklist, lastPullTimestamp) if err != nil { return fmt.Errorf("while getting decisions from blocklist %s: %w", *blocklist.Name, err) } @@ -871,7 +857,7 @@ func (a *apic) updateBlocklist(client *apiclient.ApiClient, blocklist *modelscap return nil } - err = a.dbClient.SetConfigItem(blocklistConfigItemName, time.Now().UTC().Format(http.TimeFormat)) + err = a.dbClient.SetConfigItem(ctx, blocklistConfigItemName, time.Now().UTC().Format(http.TimeFormat)) if err != nil { return fmt.Errorf("while setting last pull timestamp for blocklist %s: %w", *blocklist.Name, err) } @@ -886,7 +872,7 @@ func (a *apic) updateBlocklist(client *apiclient.ApiClient, blocklist *modelscap alertsFromCapi := []*models.Alert{alert} alertsFromCapi = fillAlertsWithDecisions(alertsFromCapi, decisions, addCounters) - err = a.SaveAlerts(alertsFromCapi, addCounters, nil) + err = a.SaveAlerts(ctx, alertsFromCapi, addCounters, nil) if err != nil { return fmt.Errorf("while saving alert from blocklist %s: %w", *blocklist.Name, err) } @@ -894,7 +880,7 @@ func (a *apic) updateBlocklist(client *apiclient.ApiClient, blocklist *modelscap return nil } -func (a *apic) UpdateBlocklists(links *modelscapi.GetDecisionsStreamResponseLinks, addCounters map[string]map[string]int, forcePull bool) error { +func (a *apic) UpdateBlocklists(ctx context.Context, links *modelscapi.GetDecisionsStreamResponseLinks, addCounters map[string]map[string]int, forcePull bool) error { if links == nil { return nil } @@ -910,7 +896,7 @@ func (a *apic) UpdateBlocklists(links *modelscapi.GetDecisionsStreamResponseLink } for _, blocklist := range links.Blocklists { - if err := a.updateBlocklist(defaultClient, blocklist, addCounters, forcePull); err != nil { + if err := a.updateBlocklist(ctx, defaultClient, blocklist, addCounters, forcePull); err != nil { return err } } @@ -933,13 +919,13 @@ func setAlertScenario(alert *models.Alert, addCounters map[string]map[string]int } } -func (a *apic) Pull() error { +func (a *apic) Pull(ctx context.Context) error { defer trace.CatchPanic("lapi/pullFromAPIC") toldOnce := false for { - scenario, err := a.FetchScenariosListFromDB() + scenario, err := a.FetchScenariosListFromDB(ctx) if err != nil { log.Errorf("unable to fetch scenarios from db: %s", err) } @@ -957,7 +943,7 @@ func (a *apic) Pull() error { time.Sleep(1 * time.Second) } - if err := a.PullTop(false); err != nil { + if err := a.PullTop(ctx, false); err != nil { log.Errorf("capi pull top: %s", err) } @@ -969,7 +955,7 @@ func (a *apic) Pull() error { case <-ticker.C: ticker.Reset(a.pullInterval) - if err := a.PullTop(false); err != nil { + if err := a.PullTop(ctx, false); err != nil { log.Errorf("capi pull top: %s", err) continue } diff --git a/pkg/apiserver/apic_metrics.go b/pkg/apiserver/apic_metrics.go index 176984f1ad6..fe0dfd55821 100644 --- a/pkg/apiserver/apic_metrics.go +++ b/pkg/apiserver/apic_metrics.go @@ -23,22 +23,22 @@ type dbPayload struct { Metrics []*models.DetailedMetrics `json:"metrics"` } -func (a *apic) GetUsageMetrics() (*models.AllMetrics, []int, error) { +func (a *apic) GetUsageMetrics(ctx context.Context) (*models.AllMetrics, []int, error) { allMetrics := &models.AllMetrics{} metricsIds := make([]int, 0) - lps, err := a.dbClient.ListMachines() + lps, err := a.dbClient.ListMachines(ctx) if err != nil { return nil, nil, err } - bouncers, err := a.dbClient.ListBouncers() + bouncers, err := a.dbClient.ListBouncers(ctx) if err != nil { return nil, nil, err } for _, bouncer := range bouncers { - dbMetrics, err := a.dbClient.GetBouncerUsageMetricsByName(bouncer.Name) + dbMetrics, err := a.dbClient.GetBouncerUsageMetricsByName(ctx, bouncer.Name) if err != nil { log.Errorf("unable to get bouncer usage metrics: %s", err) continue @@ -70,7 +70,7 @@ func (a *apic) GetUsageMetrics() (*models.AllMetrics, []int, error) { err := json.Unmarshal([]byte(dbMetric.Payload), dbPayload) if err != nil { - log.Errorf("unable to unmarshal bouncer metric (%s)", err) + log.Errorf("unable to parse bouncer metric (%s)", err) continue } @@ -81,7 +81,7 @@ func (a *apic) GetUsageMetrics() (*models.AllMetrics, []int, error) { } for _, lp := range lps { - dbMetrics, err := a.dbClient.GetLPUsageMetricsByMachineID(lp.MachineId) + dbMetrics, err := a.dbClient.GetLPUsageMetricsByMachineID(ctx, lp.MachineId) if err != nil { log.Errorf("unable to get LP usage metrics: %s", err) continue @@ -132,7 +132,7 @@ func (a *apic) GetUsageMetrics() (*models.AllMetrics, []int, error) { err := json.Unmarshal([]byte(dbMetric.Payload), dbPayload) if err != nil { - log.Errorf("unable to unmarshal log processor metric (%s)", err) + log.Errorf("unable to parse log processor metric (%s)", err) continue } @@ -181,12 +181,12 @@ func (a *apic) GetUsageMetrics() (*models.AllMetrics, []int, error) { return allMetrics, metricsIds, nil } -func (a *apic) MarkUsageMetricsAsSent(ids []int) error { - return a.dbClient.MarkUsageMetricsAsSent(ids) +func (a *apic) MarkUsageMetricsAsSent(ctx context.Context, ids []int) error { + return a.dbClient.MarkUsageMetricsAsSent(ctx, ids) } -func (a *apic) GetMetrics() (*models.Metrics, error) { - machines, err := a.dbClient.ListMachines() +func (a *apic) GetMetrics(ctx context.Context) (*models.Metrics, error) { + machines, err := a.dbClient.ListMachines(ctx) if err != nil { return nil, err } @@ -202,7 +202,7 @@ func (a *apic) GetMetrics() (*models.Metrics, error) { } } - bouncers, err := a.dbClient.ListBouncers() + bouncers, err := a.dbClient.ListBouncers(ctx) if err != nil { return nil, err } @@ -230,8 +230,8 @@ func (a *apic) GetMetrics() (*models.Metrics, error) { }, nil } -func (a *apic) fetchMachineIDs() ([]string, error) { - machines, err := a.dbClient.ListMachines() +func (a *apic) fetchMachineIDs(ctx context.Context) ([]string, error) { + machines, err := a.dbClient.ListMachines(ctx) if err != nil { return nil, err } @@ -251,7 +251,7 @@ func (a *apic) fetchMachineIDs() ([]string, error) { // Metrics are sent at start, then at the randomized metricsIntervalFirst, // then at regular metricsInterval. If a change is detected in the list // of machines, the next metrics are sent immediately. -func (a *apic) SendMetrics(stop chan (bool)) { +func (a *apic) SendMetrics(ctx context.Context, stop chan (bool)) { defer trace.CatchPanic("lapi/metricsToAPIC") // verify the list of machines every interval @@ -275,7 +275,7 @@ func (a *apic) SendMetrics(stop chan (bool)) { machineIDs := []string{} reloadMachineIDs := func() { - ids, err := a.fetchMachineIDs() + ids, err := a.fetchMachineIDs(ctx) if err != nil { log.Debugf("unable to get machines (%s), will retry", err) @@ -311,7 +311,7 @@ func (a *apic) SendMetrics(stop chan (bool)) { case <-metTicker.C: metTicker.Stop() - metrics, err := a.GetMetrics() + metrics, err := a.GetMetrics(ctx) if err != nil { log.Errorf("unable to get metrics (%s)", err) } @@ -319,7 +319,7 @@ func (a *apic) SendMetrics(stop chan (bool)) { if metrics != nil { log.Info("capi metrics: sending") - _, _, err = a.apiClient.Metrics.Add(context.Background(), metrics) + _, _, err = a.apiClient.Metrics.Add(ctx, metrics) if err != nil { log.Errorf("capi metrics: failed: %s", err) } @@ -337,7 +337,7 @@ func (a *apic) SendMetrics(stop chan (bool)) { } } -func (a *apic) SendUsageMetrics() { +func (a *apic) SendUsageMetrics(ctx context.Context) { defer trace.CatchPanic("lapi/usageMetricsToAPIC") firstRun := true @@ -358,16 +358,21 @@ func (a *apic) SendUsageMetrics() { ticker.Reset(a.usageMetricsInterval) } - metrics, metricsId, err := a.GetUsageMetrics() + metrics, metricsId, err := a.GetUsageMetrics(ctx) if err != nil { log.Errorf("unable to get usage metrics: %s", err) continue } - _, resp, err := a.apiClient.UsageMetrics.Add(context.Background(), metrics) + _, resp, err := a.apiClient.UsageMetrics.Add(ctx, metrics) if err != nil { log.Errorf("unable to send usage metrics: %s", err) + if resp == nil || resp.Response == nil { + // Most likely a transient network error, it will be retried later + continue + } + if resp.Response.StatusCode >= http.StatusBadRequest && resp.Response.StatusCode != http.StatusUnprocessableEntity { // In case of 422, mark the metrics as sent anyway, the API did not like what we sent, // and it's unlikely we'll be able to fix it @@ -375,7 +380,7 @@ func (a *apic) SendUsageMetrics() { } } - err = a.MarkUsageMetricsAsSent(metricsId) + err = a.MarkUsageMetricsAsSent(ctx, metricsId) if err != nil { log.Errorf("unable to mark usage metrics as sent: %s", err) continue diff --git a/pkg/apiserver/apic_metrics_test.go b/pkg/apiserver/apic_metrics_test.go index d1e48ac90a3..d81af03f710 100644 --- a/pkg/apiserver/apic_metrics_test.go +++ b/pkg/apiserver/apic_metrics_test.go @@ -11,10 +11,11 @@ import ( "github.com/stretchr/testify/require" "github.com/crowdsecurity/crowdsec/pkg/apiclient" - "github.com/crowdsecurity/crowdsec/pkg/cwversion" ) func TestAPICSendMetrics(t *testing.T) { + ctx := context.Background() + tests := []struct { name string duration time.Duration @@ -35,7 +36,7 @@ func TestAPICSendMetrics(t *testing.T) { metricsInterval: time.Millisecond * 20, expectedCalls: 5, setUp: func(api *apic) { - api.dbClient.Ent.Machine.Delete().ExecX(context.Background()) + api.dbClient.Ent.Machine.Delete().ExecX(ctx) api.dbClient.Ent.Machine.Create(). SetMachineId("1234"). SetPassword(testPassword.String()). @@ -43,16 +44,16 @@ func TestAPICSendMetrics(t *testing.T) { SetScenarios("crowdsecurity/test"). SetLastPush(time.Time{}). SetUpdatedAt(time.Time{}). - ExecX(context.Background()) + ExecX(ctx) - api.dbClient.Ent.Bouncer.Delete().ExecX(context.Background()) + api.dbClient.Ent.Bouncer.Delete().ExecX(ctx) api.dbClient.Ent.Bouncer.Create(). SetIPAddress("1.2.3.6"). SetName("someBouncer"). SetAPIKey("foobar"). SetRevoked(false). SetLastPull(time.Time{}). - ExecX(context.Background()) + ExecX(ctx) }, }, } @@ -70,12 +71,12 @@ func TestAPICSendMetrics(t *testing.T) { apiClient, err := apiclient.NewDefaultClient( url, "/api", - cwversion.UserAgent(), + "", nil, ) require.NoError(t, err) - api := getAPIC(t) + api := getAPIC(t, ctx) api.pushInterval = time.Millisecond api.pushIntervalFirst = time.Millisecond api.apiClient = apiClient @@ -87,7 +88,7 @@ func TestAPICSendMetrics(t *testing.T) { httpmock.ZeroCallCounters() - go api.SendMetrics(stop) + go api.SendMetrics(ctx, stop) time.Sleep(tc.duration) stop <- true diff --git a/pkg/apiserver/apic_test.go b/pkg/apiserver/apic_test.go index 546a236251f..a8fbb40c4fa 100644 --- a/pkg/apiserver/apic_test.go +++ b/pkg/apiserver/apic_test.go @@ -26,7 +26,6 @@ import ( "github.com/crowdsecurity/crowdsec/pkg/apiclient" "github.com/crowdsecurity/crowdsec/pkg/csconfig" - "github.com/crowdsecurity/crowdsec/pkg/cwversion" "github.com/crowdsecurity/crowdsec/pkg/database" "github.com/crowdsecurity/crowdsec/pkg/database/ent/decision" "github.com/crowdsecurity/crowdsec/pkg/database/ent/machine" @@ -35,11 +34,9 @@ import ( "github.com/crowdsecurity/crowdsec/pkg/types" ) -func getDBClient(t *testing.T) *database.Client { +func getDBClient(t *testing.T, ctx context.Context) *database.Client { t.Helper() - ctx := context.Background() - dbPath, err := os.CreateTemp("", "*sqlite") require.NoError(t, err) dbClient, err := database.NewClient(ctx, &csconfig.DatabaseCfg{ @@ -52,9 +49,9 @@ func getDBClient(t *testing.T) *database.Client { return dbClient } -func getAPIC(t *testing.T) *apic { +func getAPIC(t *testing.T, ctx context.Context) *apic { t.Helper() - dbClient := getDBClient(t) + dbClient := getDBClient(t, ctx) return &apic{ AlertsAddChan: make(chan []*models.Alert), @@ -72,7 +69,10 @@ func getAPIC(t *testing.T) *apic { ShareCustomScenarios: ptr.Of(false), ShareContext: ptr.Of(false), }, - isPulling: make(chan bool, 1), + isPulling: make(chan bool, 1), + shareSignals: true, + pullBlocklists: true, + pullCommunity: true, } } @@ -85,8 +85,8 @@ func absDiff(a int, b int) int { return c } -func assertTotalDecisionCount(t *testing.T, dbClient *database.Client, count int) { - d := dbClient.Ent.Decision.Query().AllX(context.Background()) +func assertTotalDecisionCount(t *testing.T, ctx context.Context, dbClient *database.Client, count int) { + d := dbClient.Ent.Decision.Query().AllX(ctx) assert.Len(t, d, count) } @@ -112,9 +112,10 @@ func assertTotalAlertCount(t *testing.T, dbClient *database.Client, count int) { } func TestAPICCAPIPullIsOld(t *testing.T) { - api := getAPIC(t) + ctx := context.Background() + api := getAPIC(t, ctx) - isOld, err := api.CAPIPullIsOld() + isOld, err := api.CAPIPullIsOld(ctx) require.NoError(t, err) assert.True(t, isOld) @@ -125,7 +126,7 @@ func TestAPICCAPIPullIsOld(t *testing.T) { SetScope("Country"). SetValue("Blah"). SetOrigin(types.CAPIOrigin). - SaveX(context.Background()) + SaveX(ctx) api.dbClient.Ent.Alert.Create(). SetCreatedAt(time.Now()). @@ -133,15 +134,17 @@ func TestAPICCAPIPullIsOld(t *testing.T) { AddDecisions( decision, ). - SaveX(context.Background()) + SaveX(ctx) - isOld, err = api.CAPIPullIsOld() + isOld, err = api.CAPIPullIsOld(ctx) require.NoError(t, err) assert.False(t, isOld) } func TestAPICFetchScenariosListFromDB(t *testing.T) { + ctx := context.Background() + tests := []struct { name string machineIDsWithScenarios map[string]string @@ -166,21 +169,21 @@ func TestAPICFetchScenariosListFromDB(t *testing.T) { for _, tc := range tests { t.Run(tc.name, func(t *testing.T) { - api := getAPIC(t) + api := getAPIC(t, ctx) for machineID, scenarios := range tc.machineIDsWithScenarios { api.dbClient.Ent.Machine.Create(). SetMachineId(machineID). SetPassword(testPassword.String()). SetIpAddress("1.2.3.4"). SetScenarios(scenarios). - ExecX(context.Background()) + ExecX(ctx) } - scenarios, err := api.FetchScenariosListFromDB() + scenarios, err := api.FetchScenariosListFromDB(ctx) require.NoError(t, err) for machineID := range tc.machineIDsWithScenarios { - api.dbClient.Ent.Machine.Delete().Where(machine.MachineIdEQ(machineID)).ExecX(context.Background()) + api.dbClient.Ent.Machine.Delete().Where(machine.MachineIdEQ(machineID)).ExecX(ctx) } assert.ElementsMatch(t, tc.expectedScenarios, scenarios) @@ -189,6 +192,8 @@ func TestAPICFetchScenariosListFromDB(t *testing.T) { } func TestNewAPIC(t *testing.T) { + ctx := context.Background() + var testConfig *csconfig.OnlineApiClientCfg setConfig := func() { @@ -198,6 +203,11 @@ func TestNewAPIC(t *testing.T) { Login: "foo", Password: "bar", }, + Sharing: ptr.Of(true), + PullConfig: csconfig.CapiPullConfig{ + Community: ptr.Of(true), + Blocklists: ptr.Of(true), + }, } } @@ -216,7 +226,7 @@ func TestNewAPIC(t *testing.T) { name: "simple", action: func() {}, args: args{ - dbClient: getDBClient(t), + dbClient: getDBClient(t, ctx), consoleConfig: LoadTestConfig(t).API.Server.ConsoleConfig, }, }, @@ -224,7 +234,7 @@ func TestNewAPIC(t *testing.T) { name: "error in parsing URL", action: func() { testConfig.Credentials.URL = "foobar http://" }, args: args{ - dbClient: getDBClient(t), + dbClient: getDBClient(t, ctx), consoleConfig: LoadTestConfig(t).API.Server.ConsoleConfig, }, expectedErr: "first path segment in URL cannot contain colon", @@ -247,53 +257,18 @@ func TestNewAPIC(t *testing.T) { ), )) tc.action() - _, err := NewAPIC(testConfig, tc.args.dbClient, tc.args.consoleConfig, nil) + _, err := NewAPIC(ctx, testConfig, tc.args.dbClient, tc.args.consoleConfig, nil) cstest.RequireErrorContains(t, err, tc.expectedErr) }) } } -func TestAPICHandleDeletedDecisions(t *testing.T) { - api := getAPIC(t) - _, deleteCounters := makeAddAndDeleteCounters() - - decision1 := api.dbClient.Ent.Decision.Create(). - SetUntil(time.Now().Add(time.Hour)). - SetScenario("crowdsec/test"). - SetType("ban"). - SetScope("IP"). - SetValue("1.2.3.4"). - SetOrigin(types.CAPIOrigin). - SaveX(context.Background()) - - api.dbClient.Ent.Decision.Create(). - SetUntil(time.Now().Add(time.Hour)). - SetScenario("crowdsec/test"). - SetType("ban"). - SetScope("IP"). - SetValue("1.2.3.4"). - SetOrigin(types.CAPIOrigin). - SaveX(context.Background()) - - assertTotalDecisionCount(t, api.dbClient, 2) - - nbDeleted, err := api.HandleDeletedDecisions([]*models.Decision{{ - Value: ptr.Of("1.2.3.4"), - Origin: ptr.Of(types.CAPIOrigin), - Type: &decision1.Type, - Scenario: ptr.Of("crowdsec/test"), - Scope: ptr.Of("IP"), - }}, deleteCounters) - - require.NoError(t, err) - assert.Equal(t, 2, nbDeleted) - assert.Equal(t, 2, deleteCounters[types.CAPIOrigin]["all"]) -} - func TestAPICGetMetrics(t *testing.T) { + ctx := context.Background() + cleanUp := func(api *apic) { - api.dbClient.Ent.Bouncer.Delete().ExecX(context.Background()) - api.dbClient.Ent.Machine.Delete().ExecX(context.Background()) + api.dbClient.Ent.Bouncer.Delete().ExecX(ctx) + api.dbClient.Ent.Machine.Delete().ExecX(ctx) } tests := []struct { name string @@ -352,7 +327,7 @@ func TestAPICGetMetrics(t *testing.T) { for _, tc := range tests { t.Run(tc.name, func(t *testing.T) { - apiClient := getAPIC(t) + apiClient := getAPIC(t, ctx) cleanUp(apiClient) for i, machineID := range tc.machineIDs { @@ -363,7 +338,7 @@ func TestAPICGetMetrics(t *testing.T) { SetScenarios("crowdsecurity/test"). SetLastPush(time.Time{}). SetUpdatedAt(time.Time{}). - ExecX(context.Background()) + ExecX(ctx) } for i, bouncerName := range tc.bouncers { @@ -373,10 +348,10 @@ func TestAPICGetMetrics(t *testing.T) { SetAPIKey("foobar"). SetRevoked(false). SetLastPull(time.Time{}). - ExecX(context.Background()) + ExecX(ctx) } - foundMetrics, err := apiClient.GetMetrics() + foundMetrics, err := apiClient.GetMetrics(ctx) require.NoError(t, err) assert.Equal(t, tc.expectedMetric.Bouncers, foundMetrics.Bouncers) @@ -547,7 +522,8 @@ func TestFillAlertsWithDecisions(t *testing.T) { } func TestAPICWhitelists(t *testing.T) { - api := getAPIC(t) + ctx := context.Background() + api := getAPIC(t, ctx) // one whitelist on IP, one on CIDR api.whitelists = &csconfig.CapiWhitelist{} api.whitelists.Ips = append(api.whitelists.Ips, net.ParseIP("9.2.3.4"), net.ParseIP("7.2.3.4")) @@ -570,7 +546,7 @@ func TestAPICWhitelists(t *testing.T) { SetScenario("crowdsecurity/ssh-bf"). SetUntil(time.Now().Add(time.Hour)). ExecX(context.Background()) - assertTotalDecisionCount(t, api.dbClient, 1) + assertTotalDecisionCount(t, ctx, api.dbClient, 1) assertTotalValidDecisionCount(t, api.dbClient, 1) httpmock.Activate() @@ -676,16 +652,16 @@ func TestAPICWhitelists(t *testing.T) { apic, err := apiclient.NewDefaultClient( url, "/api", - cwversion.UserAgent(), + "", nil, ) require.NoError(t, err) api.apiClient = apic - err = api.PullTop(false) + err = api.PullTop(ctx, false) require.NoError(t, err) - assertTotalDecisionCount(t, api.dbClient, 5) // 2 from FIRE + 2 from bl + 1 existing + assertTotalDecisionCount(t, ctx, api.dbClient, 5) // 2 from FIRE + 2 from bl + 1 existing assertTotalValidDecisionCount(t, api.dbClient, 4) assertTotalAlertCount(t, api.dbClient, 3) // 2 for list sub , 1 for community list. alerts := api.dbClient.Ent.Alert.Query().AllX(context.Background()) @@ -733,7 +709,8 @@ func TestAPICWhitelists(t *testing.T) { } func TestAPICPullTop(t *testing.T) { - api := getAPIC(t) + ctx := context.Background() + api := getAPIC(t, ctx) api.dbClient.Ent.Decision.Create(). SetOrigin(types.CAPIOrigin). SetType("ban"). @@ -741,8 +718,8 @@ func TestAPICPullTop(t *testing.T) { SetScope("Ip"). SetScenario("crowdsecurity/ssh-bf"). SetUntil(time.Now().Add(time.Hour)). - ExecX(context.Background()) - assertTotalDecisionCount(t, api.dbClient, 1) + ExecX(ctx) + assertTotalDecisionCount(t, ctx, api.dbClient, 1) assertTotalValidDecisionCount(t, api.dbClient, 1) httpmock.Activate() @@ -817,23 +794,22 @@ func TestAPICPullTop(t *testing.T) { apic, err := apiclient.NewDefaultClient( url, "/api", - cwversion.UserAgent(), + "", nil, ) require.NoError(t, err) api.apiClient = apic - err = api.PullTop(false) + err = api.PullTop(ctx, false) require.NoError(t, err) - assertTotalDecisionCount(t, api.dbClient, 5) + assertTotalDecisionCount(t, ctx, api.dbClient, 5) assertTotalValidDecisionCount(t, api.dbClient, 4) assertTotalAlertCount(t, api.dbClient, 3) // 2 for list sub , 1 for community list. alerts := api.dbClient.Ent.Alert.Query().AllX(context.Background()) validDecisions := api.dbClient.Ent.Decision.Query().Where( decision.UntilGT(time.Now())). - AllX(context.Background(), - ) + AllX(context.Background()) decisionScenarioFreq := make(map[string]int) alertScenario := make(map[string]int) @@ -858,8 +834,9 @@ func TestAPICPullTop(t *testing.T) { } func TestAPICPullTopBLCacheFirstCall(t *testing.T) { + ctx := context.Background() // no decision in db, no last modified parameter. - api := getAPIC(t) + api := getAPIC(t, ctx) httpmock.Activate() defer httpmock.DeactivateAndReset() @@ -905,17 +882,17 @@ func TestAPICPullTopBLCacheFirstCall(t *testing.T) { apic, err := apiclient.NewDefaultClient( url, "/api", - cwversion.UserAgent(), + "", nil, ) require.NoError(t, err) api.apiClient = apic - err = api.PullTop(false) + err = api.PullTop(ctx, false) require.NoError(t, err) blocklistConfigItemName := "blocklist:blocklist1:last_pull" - lastPullTimestamp, err := api.dbClient.GetConfigItem(blocklistConfigItemName) + lastPullTimestamp, err := api.dbClient.GetConfigItem(ctx, blocklistConfigItemName) require.NoError(t, err) assert.NotEqual(t, "", *lastPullTimestamp) @@ -925,15 +902,16 @@ func TestAPICPullTopBLCacheFirstCall(t *testing.T) { return httpmock.NewStringResponse(304, ""), nil }) - err = api.PullTop(false) + err = api.PullTop(ctx, false) require.NoError(t, err) - secondLastPullTimestamp, err := api.dbClient.GetConfigItem(blocklistConfigItemName) + secondLastPullTimestamp, err := api.dbClient.GetConfigItem(ctx, blocklistConfigItemName) require.NoError(t, err) assert.Equal(t, *lastPullTimestamp, *secondLastPullTimestamp) } func TestAPICPullTopBLCacheForceCall(t *testing.T) { - api := getAPIC(t) + ctx := context.Background() + api := getAPIC(t, ctx) httpmock.Activate() defer httpmock.DeactivateAndReset() @@ -997,18 +975,19 @@ func TestAPICPullTopBLCacheForceCall(t *testing.T) { apic, err := apiclient.NewDefaultClient( url, "/api", - cwversion.UserAgent(), + "", nil, ) require.NoError(t, err) api.apiClient = apic - err = api.PullTop(false) + err = api.PullTop(ctx, false) require.NoError(t, err) } func TestAPICPullBlocklistCall(t *testing.T) { - api := getAPIC(t) + ctx := context.Background() + api := getAPIC(t, ctx) httpmock.Activate() defer httpmock.DeactivateAndReset() @@ -1024,13 +1003,13 @@ func TestAPICPullBlocklistCall(t *testing.T) { apic, err := apiclient.NewDefaultClient( url, "/api", - cwversion.UserAgent(), + "", nil, ) require.NoError(t, err) api.apiClient = apic - err = api.PullBlocklist(&modelscapi.BlocklistLink{ + err = api.PullBlocklist(ctx, &modelscapi.BlocklistLink{ URL: ptr.Of("http://api.crowdsec.net/blocklist1"), Name: ptr.Of("blocklist1"), Scope: ptr.Of("Ip"), @@ -1041,6 +1020,7 @@ func TestAPICPullBlocklistCall(t *testing.T) { } func TestAPICPush(t *testing.T) { + ctx := context.Background() tests := []struct { name string alerts []*models.Alert @@ -1093,9 +1073,8 @@ func TestAPICPush(t *testing.T) { } for _, tc := range tests { - tc := tc t.Run(tc.name, func(t *testing.T) { - api := getAPIC(t) + api := getAPIC(t, ctx) api.pushInterval = time.Millisecond api.pushIntervalFirst = time.Millisecond url, err := url.ParseRequestURI("http://api.crowdsec.net/") @@ -1107,7 +1086,7 @@ func TestAPICPush(t *testing.T) { apic, err := apiclient.NewDefaultClient( url, "/api", - cwversion.UserAgent(), + "", nil, ) require.NoError(t, err) @@ -1116,14 +1095,16 @@ func TestAPICPush(t *testing.T) { httpmock.RegisterResponder("POST", "http://api.crowdsec.net/api/signals", httpmock.NewBytesResponder(200, []byte{})) + // capture the alerts to avoid datarace + alerts := tc.alerts go func() { - api.AlertsAddChan <- tc.alerts + api.AlertsAddChan <- alerts time.Sleep(time.Second) api.Shutdown() }() - err = api.Push() + err = api.Push(ctx) require.NoError(t, err) assert.Equal(t, tc.expectedCalls, httpmock.GetTotalCallCount()) }) @@ -1131,7 +1112,8 @@ func TestAPICPush(t *testing.T) { } func TestAPICPull(t *testing.T) { - api := getAPIC(t) + ctx := context.Background() + api := getAPIC(t, ctx) tests := []struct { name string setUp func() @@ -1159,7 +1141,7 @@ func TestAPICPull(t *testing.T) { for _, tc := range tests { t.Run(tc.name, func(t *testing.T) { - api = getAPIC(t) + api = getAPIC(t, ctx) api.pullInterval = time.Millisecond api.pullIntervalFirst = time.Millisecond url, err := url.ParseRequestURI("http://api.crowdsec.net/") @@ -1171,7 +1153,7 @@ func TestAPICPull(t *testing.T) { apic, err := apiclient.NewDefaultClient( url, "/api", - cwversion.UserAgent(), + "", nil, ) require.NoError(t, err) @@ -1201,7 +1183,7 @@ func TestAPICPull(t *testing.T) { go func() { logrus.SetOutput(&buf) - if err := api.Pull(); err != nil { + if err := api.Pull(ctx); err != nil { panic(err) } }() @@ -1210,7 +1192,7 @@ func TestAPICPull(t *testing.T) { time.Sleep(time.Millisecond * 500) logrus.SetOutput(os.Stderr) assert.Contains(t, buf.String(), tc.logContains) - assertTotalDecisionCount(t, api.dbClient, tc.expectedDecisionCount) + assertTotalDecisionCount(t, ctx, api.dbClient, tc.expectedDecisionCount) }) } } @@ -1219,6 +1201,7 @@ func TestShouldShareAlert(t *testing.T) { tests := []struct { name string consoleConfig *csconfig.ConsoleConfig + shareSignals bool alert *models.Alert expectedRet bool expectedTrust string @@ -1229,6 +1212,7 @@ func TestShouldShareAlert(t *testing.T) { ShareCustomScenarios: ptr.Of(true), }, alert: &models.Alert{Simulated: ptr.Of(false)}, + shareSignals: true, expectedRet: true, expectedTrust: "custom", }, @@ -1238,6 +1222,7 @@ func TestShouldShareAlert(t *testing.T) { ShareCustomScenarios: ptr.Of(false), }, alert: &models.Alert{Simulated: ptr.Of(false)}, + shareSignals: true, expectedRet: false, expectedTrust: "custom", }, @@ -1246,6 +1231,7 @@ func TestShouldShareAlert(t *testing.T) { consoleConfig: &csconfig.ConsoleConfig{ ShareManualDecisions: ptr.Of(true), }, + shareSignals: true, alert: &models.Alert{ Simulated: ptr.Of(false), Decisions: []*models.Decision{{Origin: ptr.Of(types.CscliOrigin)}}, @@ -1258,6 +1244,7 @@ func TestShouldShareAlert(t *testing.T) { consoleConfig: &csconfig.ConsoleConfig{ ShareManualDecisions: ptr.Of(false), }, + shareSignals: true, alert: &models.Alert{ Simulated: ptr.Of(false), Decisions: []*models.Decision{{Origin: ptr.Of(types.CscliOrigin)}}, @@ -1270,6 +1257,7 @@ func TestShouldShareAlert(t *testing.T) { consoleConfig: &csconfig.ConsoleConfig{ ShareTaintedScenarios: ptr.Of(true), }, + shareSignals: true, alert: &models.Alert{ Simulated: ptr.Of(false), ScenarioHash: ptr.Of("whateverHash"), @@ -1282,6 +1270,7 @@ func TestShouldShareAlert(t *testing.T) { consoleConfig: &csconfig.ConsoleConfig{ ShareTaintedScenarios: ptr.Of(false), }, + shareSignals: true, alert: &models.Alert{ Simulated: ptr.Of(false), ScenarioHash: ptr.Of("whateverHash"), @@ -1289,11 +1278,24 @@ func TestShouldShareAlert(t *testing.T) { expectedRet: false, expectedTrust: "tainted", }, + { + name: "manual alert should not be shared if global sharing is disabled", + consoleConfig: &csconfig.ConsoleConfig{ + ShareManualDecisions: ptr.Of(true), + }, + shareSignals: false, + alert: &models.Alert{ + Simulated: ptr.Of(false), + ScenarioHash: ptr.Of("whateverHash"), + }, + expectedRet: false, + expectedTrust: "manual", + }, } for _, tc := range tests { t.Run(tc.name, func(t *testing.T) { - ret := shouldShareAlert(tc.alert, tc.consoleConfig) + ret := shouldShareAlert(tc.alert, tc.consoleConfig, tc.shareSignals) assert.Equal(t, tc.expectedRet, ret) }) } diff --git a/pkg/apiserver/apiserver.go b/pkg/apiserver/apiserver.go index 31b31bcb82d..05f9150b037 100644 --- a/pkg/apiserver/apiserver.go +++ b/pkg/apiserver/apiserver.go @@ -46,20 +46,11 @@ type APIServer struct { consoleConfig *csconfig.ConsoleConfig } -func recoverFromPanic(c *gin.Context) { - err := recover() - if err == nil { - return - } - - // Check for a broken connection, as it is not really a - // condition that warrants a panic stack trace. - brokenPipe := false - +func isBrokenConnection(err any) bool { if ne, ok := err.(*net.OpError); ok { if se, ok := ne.Err.(*os.SyscallError); ok { if strings.Contains(strings.ToLower(se.Error()), "broken pipe") || strings.Contains(strings.ToLower(se.Error()), "connection reset by peer") { - brokenPipe = true + return true } } } @@ -79,11 +70,22 @@ func recoverFromPanic(c *gin.Context) { errors.Is(strErr, errClosedBody) || errors.Is(strErr, errHandlerComplete) || errors.Is(strErr, errStreamClosed) { - brokenPipe = true + return true } } - if brokenPipe { + return false +} + +func recoverFromPanic(c *gin.Context) { + err := recover() + if err == nil { + return + } + + // Check for a broken connection, as it is not really a + // condition that warrants a panic stack trace. + if isBrokenConnection(err) { log.Warningf("client %s disconnected: %s", c.ClientIP(), err) c.Abort() } else { @@ -159,18 +161,16 @@ func newGinLogger(config *csconfig.LocalApiServerCfg) (*log.Logger, string, erro // NewServer creates a LAPI server. // It sets up a gin router, a database client, and a controller. -func NewServer(config *csconfig.LocalApiServerCfg) (*APIServer, error) { +func NewServer(ctx context.Context, config *csconfig.LocalApiServerCfg) (*APIServer, error) { var flushScheduler *gocron.Scheduler - ctx := context.TODO() - dbClient, err := database.NewClient(ctx, config.DbConfig) if err != nil { return nil, fmt.Errorf("unable to init database client: %w", err) } if config.DbConfig.Flush != nil { - flushScheduler, err = dbClient.StartFlushScheduler(config.DbConfig.Flush) + flushScheduler, err = dbClient.StartFlushScheduler(ctx, config.DbConfig.Flush) if err != nil { return nil, err } @@ -229,7 +229,6 @@ func NewServer(config *csconfig.LocalApiServerCfg) (*APIServer, error) { controller := &controllers.Controller{ DBClient: dbClient, - Ectx: ctx, Router: router, Profiles: config.Profiles, Log: clog, @@ -249,7 +248,7 @@ func NewServer(config *csconfig.LocalApiServerCfg) (*APIServer, error) { if config.OnlineClient != nil && config.OnlineClient.Credentials != nil { log.Printf("Loading CAPI manager") - apiClient, err = NewAPIC(config.OnlineClient, dbClient, config.ConsoleConfig, config.CapiWhitelists) + apiClient, err = NewAPIC(ctx, config.OnlineClient, dbClient, config.ConsoleConfig, config.CapiWhitelists) if err != nil { return nil, err } @@ -258,7 +257,7 @@ func NewServer(config *csconfig.LocalApiServerCfg) (*APIServer, error) { controller.AlertsAddChan = apiClient.AlertsAddChan - if config.ConsoleConfig.IsPAPIEnabled() { + if config.ConsoleConfig.IsPAPIEnabled() && config.OnlineClient.Credentials.PapiURL != "" { if apiClient.apiClient.IsEnrolled() { log.Info("Machine is enrolled in the console, Loading PAPI Client") @@ -301,6 +300,72 @@ func (s *APIServer) Router() (*gin.Engine, error) { return s.router, nil } +func (s *APIServer) apicPush(ctx context.Context) error { + if err := s.apic.Push(ctx); err != nil { + log.Errorf("capi push: %s", err) + return err + } + + return nil +} + +func (s *APIServer) apicPull(ctx context.Context) error { + if err := s.apic.Pull(ctx); err != nil { + log.Errorf("capi pull: %s", err) + return err + } + + return nil +} + +func (s *APIServer) papiPull(ctx context.Context) error { + if err := s.papi.Pull(ctx); err != nil { + log.Errorf("papi pull: %s", err) + return err + } + + return nil +} + +func (s *APIServer) papiSync() error { + if err := s.papi.SyncDecisions(); err != nil { + log.Errorf("capi decisions sync: %s", err) + return err + } + + return nil +} + +func (s *APIServer) initAPIC(ctx context.Context) { + s.apic.pushTomb.Go(func() error { return s.apicPush(ctx) }) + s.apic.pullTomb.Go(func() error { return s.apicPull(ctx) }) + + // csConfig.API.Server.ConsoleConfig.ShareCustomScenarios + if s.apic.apiClient.IsEnrolled() { + if s.consoleConfig.IsPAPIEnabled() && s.papi != nil { + if s.papi.URL != "" { + log.Info("Starting PAPI decision receiver") + s.papi.pullTomb.Go(func() error { return s.papiPull(ctx) }) + s.papi.syncTomb.Go(s.papiSync) + } else { + log.Warnf("papi_url is not set in online_api_credentials.yaml, can't synchronize with the console. Run cscli console enable console_management to add it.") + } + } else { + log.Warningf("Machine is not allowed to synchronize decisions, you can enable it with `cscli console enable console_management`") + } + } + + s.apic.metricsTomb.Go(func() error { + s.apic.SendMetrics(ctx, make(chan bool)) + return nil + }) + + s.apic.metricsTomb.Go(func() error { + s.apic.SendUsageMetrics(ctx) + return nil + }) +} + func (s *APIServer) Run(apiReady chan bool) error { defer trace.CatchPanic("lapi/runServer") @@ -315,64 +380,10 @@ func (s *APIServer) Run(apiReady chan bool) error { TLSConfig: tlsCfg, } - if s.apic != nil { - s.apic.pushTomb.Go(func() error { - if err := s.apic.Push(); err != nil { - log.Errorf("capi push: %s", err) - return err - } - - return nil - }) - - s.apic.pullTomb.Go(func() error { - if err := s.apic.Pull(); err != nil { - log.Errorf("capi pull: %s", err) - return err - } - - return nil - }) - - // csConfig.API.Server.ConsoleConfig.ShareCustomScenarios - if s.apic.apiClient.IsEnrolled() { - if s.consoleConfig.IsPAPIEnabled() { - if s.papi.URL != "" { - log.Info("Starting PAPI decision receiver") - s.papi.pullTomb.Go(func() error { - if err := s.papi.Pull(); err != nil { - log.Errorf("papi pull: %s", err) - return err - } - - return nil - }) - - s.papi.syncTomb.Go(func() error { - if err := s.papi.SyncDecisions(); err != nil { - log.Errorf("capi decisions sync: %s", err) - return err - } - - return nil - }) - } else { - log.Warnf("papi_url is not set in online_api_credentials.yaml, can't synchronize with the console. Run cscli console enable console_management to add it.") - } - } else { - log.Warningf("Machine is not allowed to synchronize decisions, you can enable it with `cscli console enable console_management`") - } - } - - s.apic.metricsTomb.Go(func() error { - s.apic.SendMetrics(make(chan bool)) - return nil - }) + ctx := context.TODO() - s.apic.metricsTomb.Go(func() error { - s.apic.SendUsageMetrics() - return nil - }) + if s.apic != nil { + s.initAPIC(ctx) } s.httpServerTomb.Go(func() error { diff --git a/pkg/apiserver/apiserver_test.go b/pkg/apiserver/apiserver_test.go index f48791ebcb8..cf4c91dedda 100644 --- a/pkg/apiserver/apiserver_test.go +++ b/pkg/apiserver/apiserver_test.go @@ -3,7 +3,6 @@ package apiserver import ( "context" "encoding/json" - "fmt" "net/http" "net/http/httptest" "os" @@ -25,6 +24,7 @@ import ( middlewares "github.com/crowdsecurity/crowdsec/pkg/apiserver/middlewares/v1" "github.com/crowdsecurity/crowdsec/pkg/csconfig" "github.com/crowdsecurity/crowdsec/pkg/database" + "github.com/crowdsecurity/crowdsec/pkg/database/ent" "github.com/crowdsecurity/crowdsec/pkg/models" "github.com/crowdsecurity/crowdsec/pkg/types" ) @@ -41,7 +41,7 @@ var ( MachineID: &testMachineID, Password: &testPassword, } - UserAgent = fmt.Sprintf("crowdsec-test/%s", version.Version) + UserAgent = "crowdsec-test/" + version.Version emptyBody = strings.NewReader("") ) @@ -63,6 +63,7 @@ func LoadTestConfig(t *testing.T) csconfig.Config { } apiServerConfig := csconfig.LocalApiServerCfg{ ListenURI: "http://127.0.0.1:8080", + LogLevel: ptr.Of(log.DebugLevel), DbConfig: &dbconfig, ProfilesPath: "./tests/profiles.yaml", ConsoleConfig: &csconfig.ConsoleConfig{ @@ -135,12 +136,12 @@ func LoadTestConfigForwardedFor(t *testing.T) csconfig.Config { return config } -func NewAPIServer(t *testing.T) (*APIServer, csconfig.Config) { +func NewAPIServer(t *testing.T, ctx context.Context) (*APIServer, csconfig.Config) { config := LoadTestConfig(t) os.Remove("./ent") - apiServer, err := NewServer(config.API.Server) + apiServer, err := NewServer(ctx, config.API.Server) require.NoError(t, err) log.Printf("Creating new API server") @@ -149,8 +150,8 @@ func NewAPIServer(t *testing.T) (*APIServer, csconfig.Config) { return apiServer, config } -func NewAPITest(t *testing.T) (*gin.Engine, csconfig.Config) { - apiServer, config := NewAPIServer(t) +func NewAPITest(t *testing.T, ctx context.Context) (*gin.Engine, csconfig.Config) { + apiServer, config := NewAPIServer(t, ctx) err := apiServer.InitController() require.NoError(t, err) @@ -161,12 +162,12 @@ func NewAPITest(t *testing.T) (*gin.Engine, csconfig.Config) { return router, config } -func NewAPITestForwardedFor(t *testing.T) (*gin.Engine, csconfig.Config) { +func NewAPITestForwardedFor(t *testing.T, ctx context.Context) (*gin.Engine, csconfig.Config) { config := LoadTestConfigForwardedFor(t) os.Remove("./ent") - apiServer, err := NewServer(config.API.Server) + apiServer, err := NewServer(ctx, config.API.Server) require.NoError(t, err) err = apiServer.InitController() @@ -181,13 +182,11 @@ func NewAPITestForwardedFor(t *testing.T) (*gin.Engine, csconfig.Config) { return router, config } -func ValidateMachine(t *testing.T, machineID string, config *csconfig.DatabaseCfg) { - ctx := context.Background() - +func ValidateMachine(t *testing.T, ctx context.Context, machineID string, config *csconfig.DatabaseCfg) { dbClient, err := database.NewClient(ctx, config) require.NoError(t, err) - err = dbClient.ValidateMachine(machineID) + err = dbClient.ValidateMachine(ctx, machineID) require.NoError(t, err) } @@ -197,7 +196,7 @@ func GetMachineIP(t *testing.T, machineID string, config *csconfig.DatabaseCfg) dbClient, err := database.NewClient(ctx, config) require.NoError(t, err) - machines, err := dbClient.ListMachines() + machines, err := dbClient.ListMachines(ctx) require.NoError(t, err) for _, machine := range machines { @@ -209,6 +208,18 @@ func GetMachineIP(t *testing.T, machineID string, config *csconfig.DatabaseCfg) return "" } +func GetBouncers(t *testing.T, config *csconfig.DatabaseCfg) []*ent.Bouncer { + ctx := context.Background() + + dbClient, err := database.NewClient(ctx, config) + require.NoError(t, err) + + bouncers, err := dbClient.ListBouncers(ctx) + require.NoError(t, err) + + return bouncers +} + func GetAlertReaderFromFile(t *testing.T, path string) *strings.Reader { alertContentBytes, err := os.ReadFile(path) require.NoError(t, err) @@ -270,7 +281,7 @@ func readDecisionsStreamResp(t *testing.T, resp *httptest.ResponseRecorder) (map return response, resp.Code } -func CreateTestMachine(t *testing.T, router *gin.Engine, token string) string { +func CreateTestMachine(t *testing.T, ctx context.Context, router *gin.Engine, token string) string { regReq := MachineTest regReq.RegistrationToken = token b, err := json.Marshal(regReq) @@ -279,56 +290,57 @@ func CreateTestMachine(t *testing.T, router *gin.Engine, token string) string { body := string(b) w := httptest.NewRecorder() - req, _ := http.NewRequest(http.MethodPost, "/v1/watchers", strings.NewReader(body)) + req, _ := http.NewRequestWithContext(ctx, http.MethodPost, "/v1/watchers", strings.NewReader(body)) req.Header.Set("User-Agent", UserAgent) router.ServeHTTP(w, req) return body } -func CreateTestBouncer(t *testing.T, config *csconfig.DatabaseCfg) string { - ctx := context.Background() - +func CreateTestBouncer(t *testing.T, ctx context.Context, config *csconfig.DatabaseCfg) string { dbClient, err := database.NewClient(ctx, config) require.NoError(t, err) apiKey, err := middlewares.GenerateAPIKey(keyLength) require.NoError(t, err) - _, err = dbClient.CreateBouncer("test", "127.0.0.1", middlewares.HashSHA512(apiKey), types.ApiKeyAuthType) + _, err = dbClient.CreateBouncer(ctx, "test", "127.0.0.1", middlewares.HashSHA512(apiKey), types.ApiKeyAuthType, false) require.NoError(t, err) return apiKey } func TestWithWrongDBConfig(t *testing.T) { + ctx := context.Background() config := LoadTestConfig(t) config.API.Server.DbConfig.Type = "test" - apiServer, err := NewServer(config.API.Server) + apiServer, err := NewServer(ctx, config.API.Server) cstest.RequireErrorContains(t, err, "unable to init database client: unknown database type 'test'") assert.Nil(t, apiServer) } func TestWithWrongFlushConfig(t *testing.T) { + ctx := context.Background() config := LoadTestConfig(t) maxItems := -1 config.API.Server.DbConfig.Flush.MaxItems = &maxItems - apiServer, err := NewServer(config.API.Server) + apiServer, err := NewServer(ctx, config.API.Server) cstest.RequireErrorContains(t, err, "max_items can't be zero or negative") assert.Nil(t, apiServer) } func TestUnknownPath(t *testing.T) { - router, _ := NewAPITest(t) + ctx := context.Background() + router, _ := NewAPITest(t, ctx) w := httptest.NewRecorder() - req, _ := http.NewRequest(http.MethodGet, "/test", nil) + req, _ := http.NewRequestWithContext(ctx, http.MethodGet, "/test", nil) req.Header.Set("User-Agent", UserAgent) router.ServeHTTP(w, req) - assert.Equal(t, 404, w.Code) + assert.Equal(t, http.StatusNotFound, w.Code) } /* @@ -347,6 +359,8 @@ ListenURI string `yaml:"listen_uri,omitempty"` //127.0 */ func TestLoggingDebugToFileConfig(t *testing.T) { + ctx := context.Background() + /*declare settings*/ maxAge := "1h" flushConfig := csconfig.FlushDBCfg{ @@ -368,7 +382,7 @@ func TestLoggingDebugToFileConfig(t *testing.T) { LogDir: tempDir, DbConfig: &dbconfig, } - expectedFile := fmt.Sprintf("%s/crowdsec_api.log", tempDir) + expectedFile := filepath.Join(tempDir, "crowdsec_api.log") expectedLines := []string{"/test42"} cfg.LogLevel = ptr.Of(log.DebugLevel) @@ -376,15 +390,15 @@ func TestLoggingDebugToFileConfig(t *testing.T) { err := types.SetDefaultLoggerConfig(cfg.LogMedia, cfg.LogDir, *cfg.LogLevel, cfg.LogMaxSize, cfg.LogMaxFiles, cfg.LogMaxAge, cfg.CompressLogs, false) require.NoError(t, err) - api, err := NewServer(&cfg) + api, err := NewServer(ctx, &cfg) require.NoError(t, err) require.NotNil(t, api) w := httptest.NewRecorder() - req, _ := http.NewRequest(http.MethodGet, "/test42", nil) + req, _ := http.NewRequestWithContext(ctx, http.MethodGet, "/test42", nil) req.Header.Set("User-Agent", UserAgent) api.router.ServeHTTP(w, req) - assert.Equal(t, 404, w.Code) + assert.Equal(t, http.StatusNotFound, w.Code) // wait for the request to happen time.Sleep(500 * time.Millisecond) @@ -398,6 +412,8 @@ func TestLoggingDebugToFileConfig(t *testing.T) { } func TestLoggingErrorToFileConfig(t *testing.T) { + ctx := context.Background() + /*declare settings*/ maxAge := "1h" flushConfig := csconfig.FlushDBCfg{ @@ -419,19 +435,19 @@ func TestLoggingErrorToFileConfig(t *testing.T) { LogDir: tempDir, DbConfig: &dbconfig, } - expectedFile := fmt.Sprintf("%s/crowdsec_api.log", tempDir) + expectedFile := filepath.Join(tempDir, "crowdsec_api.log") cfg.LogLevel = ptr.Of(log.ErrorLevel) // Configure logging err := types.SetDefaultLoggerConfig(cfg.LogMedia, cfg.LogDir, *cfg.LogLevel, cfg.LogMaxSize, cfg.LogMaxFiles, cfg.LogMaxAge, cfg.CompressLogs, false) require.NoError(t, err) - api, err := NewServer(&cfg) + api, err := NewServer(ctx, &cfg) require.NoError(t, err) require.NotNil(t, api) w := httptest.NewRecorder() - req, _ := http.NewRequest(http.MethodGet, "/test42", nil) + req, _ := http.NewRequestWithContext(ctx, http.MethodGet, "/test42", nil) req.Header.Set("User-Agent", UserAgent) api.router.ServeHTTP(w, req) assert.Equal(t, http.StatusNotFound, w.Code) diff --git a/pkg/apiserver/controllers/controller.go b/pkg/apiserver/controllers/controller.go index 29f02723b70..719bb231006 100644 --- a/pkg/apiserver/controllers/controller.go +++ b/pkg/apiserver/controllers/controller.go @@ -1,7 +1,6 @@ package controllers import ( - "context" "net" "net/http" "strings" @@ -18,7 +17,6 @@ import ( ) type Controller struct { - Ectx context.Context DBClient *database.Client Router *gin.Engine Profiles []*csconfig.ProfileCfg @@ -83,7 +81,6 @@ func (c *Controller) NewV1() error { v1Config := v1.ControllerV1Config{ DbClient: c.DBClient, - Ctx: c.Ectx, ProfilesCfg: c.Profiles, DecisionDeleteChan: c.DecisionDeleteChan, AlertsAddChan: c.AlertsAddChan, diff --git a/pkg/apiserver/controllers/v1/alerts.go b/pkg/apiserver/controllers/v1/alerts.go index 82dc51d6879..d1f93228512 100644 --- a/pkg/apiserver/controllers/v1/alerts.go +++ b/pkg/apiserver/controllers/v1/alerts.go @@ -6,7 +6,6 @@ import ( "net" "net/http" "strconv" - "strings" "time" "github.com/gin-gonic/gin" @@ -64,7 +63,7 @@ func FormatOneAlert(alert *ent.Alert) *models.Alert { var Metas models.Meta if err := json.Unmarshal([]byte(eventItem.Serialized), &Metas); err != nil { - log.Errorf("unable to unmarshall events meta '%s' : %s", eventItem.Serialized, err) + log.Errorf("unable to parse events meta '%s' : %s", eventItem.Serialized, err) } outputAlert.Events = append(outputAlert.Events, &models.Event{ @@ -124,25 +123,11 @@ func (c *Controller) sendAlertToPluginChannel(alert *models.Alert, profileID uin } } -func normalizeScope(scope string) string { - switch strings.ToLower(scope) { - case "ip": - return types.Ip - case "range": - return types.Range - case "as": - return types.AS - case "country": - return types.Country - default: - return scope - } -} - // CreateAlert writes the alerts received in the body to the database func (c *Controller) CreateAlert(gctx *gin.Context) { var input models.AddAlertsRequest + ctx := gctx.Request.Context() machineID, _ := getMachineIDFromContext(gctx) if err := gctx.ShouldBindJSON(&input); err != nil { @@ -160,12 +145,12 @@ func (c *Controller) CreateAlert(gctx *gin.Context) { for _, alert := range input { // normalize scope for alert.Source and decisions if alert.Source.Scope != nil { - *alert.Source.Scope = normalizeScope(*alert.Source.Scope) + *alert.Source.Scope = types.NormalizeScope(*alert.Source.Scope) } for _, decision := range alert.Decisions { if decision.Scope != nil { - *decision.Scope = normalizeScope(*decision.Scope) + *decision.Scope = types.NormalizeScope(*decision.Scope) } } @@ -255,7 +240,7 @@ func (c *Controller) CreateAlert(gctx *gin.Context) { c.DBClient.CanFlush = false } - alerts, err := c.DBClient.CreateAlert(machineID, input) + alerts, err := c.DBClient.CreateAlert(ctx, machineID, input) c.DBClient.CanFlush = true if err != nil { @@ -277,7 +262,9 @@ func (c *Controller) CreateAlert(gctx *gin.Context) { // FindAlerts: returns alerts from the database based on the specified filter func (c *Controller) FindAlerts(gctx *gin.Context) { - result, err := c.DBClient.QueryAlertWithFilter(gctx.Request.URL.Query()) + ctx := gctx.Request.Context() + + result, err := c.DBClient.QueryAlertWithFilter(ctx, gctx.Request.URL.Query()) if err != nil { c.HandleDBErrors(gctx, err) return @@ -295,15 +282,16 @@ func (c *Controller) FindAlerts(gctx *gin.Context) { // FindAlertByID returns the alert associated with the ID func (c *Controller) FindAlertByID(gctx *gin.Context) { + ctx := gctx.Request.Context() alertIDStr := gctx.Param("alert_id") - alertID, err := strconv.Atoi(alertIDStr) + alertID, err := strconv.Atoi(alertIDStr) if err != nil { gctx.JSON(http.StatusBadRequest, gin.H{"message": "alert_id must be valid integer"}) return } - result, err := c.DBClient.GetAlertByID(alertID) + result, err := c.DBClient.GetAlertByID(ctx, alertID) if err != nil { c.HandleDBErrors(gctx, err) return @@ -323,6 +311,8 @@ func (c *Controller) FindAlertByID(gctx *gin.Context) { func (c *Controller) DeleteAlertByID(gctx *gin.Context) { var err error + ctx := gctx.Request.Context() + incomingIP := gctx.ClientIP() if incomingIP != "127.0.0.1" && incomingIP != "::1" && !networksContainIP(c.TrustedIPs, incomingIP) && !isUnixSocket(gctx) { gctx.JSON(http.StatusForbidden, gin.H{"message": fmt.Sprintf("access forbidden from this IP (%s)", incomingIP)}) @@ -337,7 +327,7 @@ func (c *Controller) DeleteAlertByID(gctx *gin.Context) { return } - err = c.DBClient.DeleteAlertByID(decisionID) + err = c.DBClient.DeleteAlertByID(ctx, decisionID) if err != nil { c.HandleDBErrors(gctx, err) return @@ -350,13 +340,15 @@ func (c *Controller) DeleteAlertByID(gctx *gin.Context) { // DeleteAlerts deletes alerts from the database based on the specified filter func (c *Controller) DeleteAlerts(gctx *gin.Context) { + ctx := gctx.Request.Context() + incomingIP := gctx.ClientIP() if incomingIP != "127.0.0.1" && incomingIP != "::1" && !networksContainIP(c.TrustedIPs, incomingIP) && !isUnixSocket(gctx) { gctx.JSON(http.StatusForbidden, gin.H{"message": fmt.Sprintf("access forbidden from this IP (%s)", incomingIP)}) return } - nbDeleted, err := c.DBClient.DeleteAlertWithFilter(gctx.Request.URL.Query()) + nbDeleted, err := c.DBClient.DeleteAlertWithFilter(ctx, gctx.Request.URL.Query()) if err != nil { c.HandleDBErrors(gctx, err) return diff --git a/pkg/apiserver/controllers/v1/controller.go b/pkg/apiserver/controllers/v1/controller.go index 6de4abe3b3b..f8b6aa76ea5 100644 --- a/pkg/apiserver/controllers/v1/controller.go +++ b/pkg/apiserver/controllers/v1/controller.go @@ -1,7 +1,6 @@ package v1 import ( - "context" "fmt" "net" @@ -14,7 +13,6 @@ import ( ) type Controller struct { - Ectx context.Context DBClient *database.Client APIKeyHeader string Middlewares *middlewares.Middlewares @@ -31,7 +29,6 @@ type Controller struct { type ControllerV1Config struct { DbClient *database.Client - Ctx context.Context ProfilesCfg []*csconfig.ProfileCfg AlertsAddChan chan []*models.Alert @@ -52,7 +49,6 @@ func New(cfg *ControllerV1Config) (*Controller, error) { } v1 := &Controller{ - Ectx: cfg.Ctx, DBClient: cfg.DbClient, APIKeyHeader: middlewares.APIKeyHeader, Profiles: profiles, diff --git a/pkg/apiserver/controllers/v1/decisions.go b/pkg/apiserver/controllers/v1/decisions.go index 3d8e0232224..ffefffc226b 100644 --- a/pkg/apiserver/controllers/v1/decisions.go +++ b/pkg/apiserver/controllers/v1/decisions.go @@ -1,8 +1,8 @@ package v1 import ( + "context" "encoding/json" - "fmt" "net/http" "strconv" "time" @@ -43,6 +43,8 @@ func (c *Controller) GetDecision(gctx *gin.Context) { data []*ent.Decision ) + ctx := gctx.Request.Context() + bouncerInfo, err := getBouncerFromContext(gctx) if err != nil { gctx.JSON(http.StatusUnauthorized, gin.H{"message": "not allowed"}) @@ -50,7 +52,7 @@ func (c *Controller) GetDecision(gctx *gin.Context) { return } - data, err = c.DBClient.QueryDecisionWithFilter(gctx.Request.URL.Query()) + data, err = c.DBClient.QueryDecisionWithFilter(ctx, gctx.Request.URL.Query()) if err != nil { c.HandleDBErrors(gctx, err) @@ -73,7 +75,7 @@ func (c *Controller) GetDecision(gctx *gin.Context) { } if bouncerInfo.LastPull == nil || time.Now().UTC().Sub(*bouncerInfo.LastPull) >= time.Minute { - if err := c.DBClient.UpdateBouncerLastPull(time.Now().UTC(), bouncerInfo.ID); err != nil { + if err := c.DBClient.UpdateBouncerLastPull(ctx, time.Now().UTC(), bouncerInfo.ID); err != nil { log.Errorf("failed to update bouncer last pull: %v", err) } } @@ -91,7 +93,9 @@ func (c *Controller) DeleteDecisionById(gctx *gin.Context) { return } - nbDeleted, deletedFromDB, err := c.DBClient.ExpireDecisionByID(decisionID) + ctx := gctx.Request.Context() + + nbDeleted, deletedFromDB, err := c.DBClient.ExpireDecisionByID(ctx, decisionID) if err != nil { c.HandleDBErrors(gctx, err) @@ -113,7 +117,9 @@ func (c *Controller) DeleteDecisionById(gctx *gin.Context) { } func (c *Controller) DeleteDecisions(gctx *gin.Context) { - nbDeleted, deletedFromDB, err := c.DBClient.ExpireDecisionsWithFilter(gctx.Request.URL.Query()) + ctx := gctx.Request.Context() + + nbDeleted, deletedFromDB, err := c.DBClient.ExpireDecisionsWithFilter(ctx, gctx.Request.URL.Query()) if err != nil { c.HandleDBErrors(gctx, err) @@ -134,33 +140,38 @@ func (c *Controller) DeleteDecisions(gctx *gin.Context) { gctx.JSON(http.StatusOK, deleteDecisionResp) } -func writeStartupDecisions(gctx *gin.Context, filters map[string][]string, dbFunc func(map[string][]string) ([]*ent.Decision, error)) error { +func writeStartupDecisions(gctx *gin.Context, filters map[string][]string, dbFunc func(context.Context, map[string][]string) ([]*ent.Decision, error)) error { // respBuffer := bytes.NewBuffer([]byte{}) - limit := 30000 //FIXME : make it configurable + limit := 30000 // FIXME : make it configurable needComma := false lastId := 0 - limitStr := fmt.Sprintf("%d", limit) + ctx := gctx.Request.Context() + + limitStr := strconv.Itoa(limit) filters["limit"] = []string{limitStr} + for { if lastId > 0 { - lastIdStr := fmt.Sprintf("%d", lastId) + lastIdStr := strconv.Itoa(lastId) filters["id_gt"] = []string{lastIdStr} } - data, err := dbFunc(filters) + data, err := dbFunc(ctx, filters) if err != nil { return err } + if len(data) > 0 { lastId = data[len(data)-1].ID + results := FormatDecisions(data) for _, decision := range results { decisionJSON, _ := json.Marshal(decision) if needComma { - //respBuffer.Write([]byte(",")) - gctx.Writer.Write([]byte(",")) + // respBuffer.Write([]byte(",")) + gctx.Writer.WriteString(",") } else { needComma = true } @@ -172,10 +183,12 @@ func writeStartupDecisions(gctx *gin.Context, filters map[string][]string, dbFun return err } - //respBuffer.Reset() + // respBuffer.Reset() } } + log.Debugf("startup: %d decisions returned (limit: %d, lastid: %d)", len(data), limit, lastId) + if len(data) < limit { gctx.Writer.Flush() @@ -186,33 +199,38 @@ func writeStartupDecisions(gctx *gin.Context, filters map[string][]string, dbFun return nil } -func writeDeltaDecisions(gctx *gin.Context, filters map[string][]string, lastPull *time.Time, dbFunc func(*time.Time, map[string][]string) ([]*ent.Decision, error)) error { - //respBuffer := bytes.NewBuffer([]byte{}) - limit := 30000 //FIXME : make it configurable +func writeDeltaDecisions(gctx *gin.Context, filters map[string][]string, lastPull *time.Time, dbFunc func(context.Context, *time.Time, map[string][]string) ([]*ent.Decision, error)) error { + // respBuffer := bytes.NewBuffer([]byte{}) + limit := 30000 // FIXME : make it configurable needComma := false lastId := 0 - limitStr := fmt.Sprintf("%d", limit) + ctx := gctx.Request.Context() + + limitStr := strconv.Itoa(limit) filters["limit"] = []string{limitStr} + for { if lastId > 0 { - lastIdStr := fmt.Sprintf("%d", lastId) + lastIdStr := strconv.Itoa(lastId) filters["id_gt"] = []string{lastIdStr} } - data, err := dbFunc(lastPull, filters) + data, err := dbFunc(ctx, lastPull, filters) if err != nil { return err } + if len(data) > 0 { lastId = data[len(data)-1].ID + results := FormatDecisions(data) for _, decision := range results { decisionJSON, _ := json.Marshal(decision) if needComma { - //respBuffer.Write([]byte(",")) - gctx.Writer.Write([]byte(",")) + // respBuffer.Write([]byte(",")) + gctx.Writer.WriteString(",") } else { needComma = true } @@ -224,10 +242,12 @@ func writeDeltaDecisions(gctx *gin.Context, filters map[string][]string, lastPul return err } - //respBuffer.Reset() + // respBuffer.Reset() } } + log.Debugf("startup: %d decisions returned (limit: %d, lastid: %d)", len(data), limit, lastId) + if len(data) < limit { gctx.Writer.Flush() @@ -244,7 +264,7 @@ func (c *Controller) StreamDecisionChunked(gctx *gin.Context, bouncerInfo *ent.B gctx.Writer.Header().Set("Content-Type", "application/json") gctx.Writer.Header().Set("Transfer-Encoding", "chunked") gctx.Writer.WriteHeader(http.StatusOK) - gctx.Writer.Write([]byte(`{"new": [`)) //No need to check for errors, the doc says it always returns nil + gctx.Writer.WriteString(`{"new": [`) // No need to check for errors, the doc says it always returns nil // if the blocker just started, return all decisions if val, ok := gctx.Request.URL.Query()["startup"]; ok && val[0] == "true" { @@ -252,48 +272,47 @@ func (c *Controller) StreamDecisionChunked(gctx *gin.Context, bouncerInfo *ent.B err := writeStartupDecisions(gctx, filters, c.DBClient.QueryAllDecisionsWithFilters) if err != nil { log.Errorf("failed sending new decisions for startup: %v", err) - gctx.Writer.Write([]byte(`], "deleted": []}`)) + gctx.Writer.WriteString(`], "deleted": []}`) gctx.Writer.Flush() return err } - gctx.Writer.Write([]byte(`], "deleted": [`)) - //Expired decisions + gctx.Writer.WriteString(`], "deleted": [`) + // Expired decisions err = writeStartupDecisions(gctx, filters, c.DBClient.QueryExpiredDecisionsWithFilters) if err != nil { log.Errorf("failed sending expired decisions for startup: %v", err) - gctx.Writer.Write([]byte(`]}`)) + gctx.Writer.WriteString(`]}`) gctx.Writer.Flush() return err } - gctx.Writer.Write([]byte(`]}`)) + gctx.Writer.WriteString(`]}`) gctx.Writer.Flush() } else { err = writeDeltaDecisions(gctx, filters, bouncerInfo.LastPull, c.DBClient.QueryNewDecisionsSinceWithFilters) if err != nil { log.Errorf("failed sending new decisions for delta: %v", err) - gctx.Writer.Write([]byte(`], "deleted": []}`)) + gctx.Writer.WriteString(`], "deleted": []}`) gctx.Writer.Flush() return err } - gctx.Writer.Write([]byte(`], "deleted": [`)) + gctx.Writer.WriteString(`], "deleted": [`) err = writeDeltaDecisions(gctx, filters, bouncerInfo.LastPull, c.DBClient.QueryExpiredDecisionsSinceWithFilters) - if err != nil { log.Errorf("failed sending expired decisions for delta: %v", err) - gctx.Writer.Write([]byte(`]}`)) + gctx.Writer.WriteString("]}") gctx.Writer.Flush() return err } - gctx.Writer.Write([]byte(`]}`)) + gctx.Writer.WriteString("]}") gctx.Writer.Flush() } @@ -301,8 +320,12 @@ func (c *Controller) StreamDecisionChunked(gctx *gin.Context, bouncerInfo *ent.B } func (c *Controller) StreamDecisionNonChunked(gctx *gin.Context, bouncerInfo *ent.Bouncer, streamStartTime time.Time, filters map[string][]string) error { - var data []*ent.Decision - var err error + var ( + data []*ent.Decision + err error + ) + + ctx := gctx.Request.Context() ret := make(map[string][]*models.Decision, 0) ret["new"] = []*models.Decision{} @@ -310,18 +333,18 @@ func (c *Controller) StreamDecisionNonChunked(gctx *gin.Context, bouncerInfo *en if val, ok := gctx.Request.URL.Query()["startup"]; ok { if val[0] == "true" { - data, err = c.DBClient.QueryAllDecisionsWithFilters(filters) + data, err = c.DBClient.QueryAllDecisionsWithFilters(ctx, filters) if err != nil { log.Errorf("failed querying decisions: %v", err) gctx.JSON(http.StatusInternalServerError, gin.H{"message": err.Error()}) return err } - //data = KeepLongestDecision(data) + // data = KeepLongestDecision(data) ret["new"] = FormatDecisions(data) // getting expired decisions - data, err = c.DBClient.QueryExpiredDecisionsWithFilters(filters) + data, err = c.DBClient.QueryExpiredDecisionsWithFilters(ctx, filters) if err != nil { log.Errorf("unable to query expired decision for '%s' : %v", bouncerInfo.Name, err) gctx.JSON(http.StatusInternalServerError, gin.H{"message": err.Error()}) @@ -338,14 +361,14 @@ func (c *Controller) StreamDecisionNonChunked(gctx *gin.Context, bouncerInfo *en } // getting new decisions - data, err = c.DBClient.QueryNewDecisionsSinceWithFilters(bouncerInfo.LastPull, filters) + data, err = c.DBClient.QueryNewDecisionsSinceWithFilters(ctx, bouncerInfo.LastPull, filters) if err != nil { log.Errorf("unable to query new decision for '%s' : %v", bouncerInfo.Name, err) gctx.JSON(http.StatusInternalServerError, gin.H{"message": err.Error()}) return err } - //data = KeepLongestDecision(data) + // data = KeepLongestDecision(data) ret["new"] = FormatDecisions(data) since := time.Time{} @@ -354,7 +377,7 @@ func (c *Controller) StreamDecisionNonChunked(gctx *gin.Context, bouncerInfo *en } // getting expired decisions - data, err = c.DBClient.QueryExpiredDecisionsSinceWithFilters(&since, filters) // do we want to give exactly lastPull time ? + data, err = c.DBClient.QueryExpiredDecisionsSinceWithFilters(ctx, &since, filters) // do we want to give exactly lastPull time ? if err != nil { log.Errorf("unable to query expired decision for '%s' : %v", bouncerInfo.Name, err) gctx.JSON(http.StatusInternalServerError, gin.H{"message": err.Error()}) @@ -371,6 +394,8 @@ func (c *Controller) StreamDecisionNonChunked(gctx *gin.Context, bouncerInfo *en func (c *Controller) StreamDecision(gctx *gin.Context) { var err error + ctx := gctx.Request.Context() + streamStartTime := time.Now().UTC() bouncerInfo, err := getBouncerFromContext(gctx) @@ -381,8 +406,8 @@ func (c *Controller) StreamDecision(gctx *gin.Context) { } if gctx.Request.Method == http.MethodHead { - //For HEAD, just return as the bouncer won't get a body anyway, so no need to query the db - //We also don't update the last pull time, as it would mess with the delta sent on the next request (if done without startup=true) + // For HEAD, just return as the bouncer won't get a body anyway, so no need to query the db + // We also don't update the last pull time, as it would mess with the delta sent on the next request (if done without startup=true) gctx.String(http.StatusOK, "") return @@ -400,8 +425,8 @@ func (c *Controller) StreamDecision(gctx *gin.Context) { } if err == nil { - //Only update the last pull time if no error occurred when sending the decisions to avoid missing decisions - if err := c.DBClient.UpdateBouncerLastPull(streamStartTime, bouncerInfo.ID); err != nil { + // Only update the last pull time if no error occurred when sending the decisions to avoid missing decisions + if err := c.DBClient.UpdateBouncerLastPull(ctx, streamStartTime, bouncerInfo.ID); err != nil { log.Errorf("unable to update bouncer '%s' pull: %v", bouncerInfo.Name, err) } } diff --git a/pkg/apiserver/controllers/v1/heartbeat.go b/pkg/apiserver/controllers/v1/heartbeat.go index e1231eaa9ec..799b736ccfe 100644 --- a/pkg/apiserver/controllers/v1/heartbeat.go +++ b/pkg/apiserver/controllers/v1/heartbeat.go @@ -9,7 +9,9 @@ import ( func (c *Controller) HeartBeat(gctx *gin.Context) { machineID, _ := getMachineIDFromContext(gctx) - if err := c.DBClient.UpdateMachineLastHeartBeat(machineID); err != nil { + ctx := gctx.Request.Context() + + if err := c.DBClient.UpdateMachineLastHeartBeat(ctx, machineID); err != nil { c.HandleDBErrors(gctx, err) return } diff --git a/pkg/apiserver/controllers/v1/machines.go b/pkg/apiserver/controllers/v1/machines.go index 0030f7d3b39..ff59e389cb1 100644 --- a/pkg/apiserver/controllers/v1/machines.go +++ b/pkg/apiserver/controllers/v1/machines.go @@ -46,6 +46,8 @@ func (c *Controller) shouldAutoRegister(token string, gctx *gin.Context) (bool, } func (c *Controller) CreateMachine(gctx *gin.Context) { + ctx := gctx.Request.Context() + var input models.WatcherRegistrationRequest if err := gctx.ShouldBindJSON(&input); err != nil { @@ -66,7 +68,7 @@ func (c *Controller) CreateMachine(gctx *gin.Context) { return } - if _, err := c.DBClient.CreateMachine(input.MachineID, input.Password, gctx.ClientIP(), autoRegister, false, types.PasswordAuthType); err != nil { + if _, err := c.DBClient.CreateMachine(ctx, input.MachineID, input.Password, gctx.ClientIP(), autoRegister, false, types.PasswordAuthType); err != nil { c.HandleDBErrors(gctx, err) return } diff --git a/pkg/apiserver/controllers/v1/metrics.go b/pkg/apiserver/controllers/v1/metrics.go index ddb38512a11..4f6ee0986eb 100644 --- a/pkg/apiserver/controllers/v1/metrics.go +++ b/pkg/apiserver/controllers/v1/metrics.go @@ -68,7 +68,8 @@ func PrometheusBouncersHasEmptyDecision(c *gin.Context) { bouncer, _ := getBouncerFromContext(c) if bouncer != nil { LapiNilDecisions.With(prometheus.Labels{ - "bouncer": bouncer.Name}).Inc() + "bouncer": bouncer.Name, + }).Inc() } } @@ -76,7 +77,8 @@ func PrometheusBouncersHasNonEmptyDecision(c *gin.Context) { bouncer, _ := getBouncerFromContext(c) if bouncer != nil { LapiNonNilDecisions.With(prometheus.Labels{ - "bouncer": bouncer.Name}).Inc() + "bouncer": bouncer.Name, + }).Inc() } } @@ -87,7 +89,8 @@ func PrometheusMachinesMiddleware() gin.HandlerFunc { LapiMachineHits.With(prometheus.Labels{ "machine": machineID, "route": c.Request.URL.Path, - "method": c.Request.Method}).Inc() + "method": c.Request.Method, + }).Inc() } c.Next() @@ -101,7 +104,8 @@ func PrometheusBouncersMiddleware() gin.HandlerFunc { LapiBouncerHits.With(prometheus.Labels{ "bouncer": bouncer.Name, "route": c.Request.URL.Path, - "method": c.Request.Method}).Inc() + "method": c.Request.Method, + }).Inc() } c.Next() @@ -114,7 +118,8 @@ func PrometheusMiddleware() gin.HandlerFunc { LapiRouteHits.With(prometheus.Labels{ "route": c.Request.URL.Path, - "method": c.Request.Method}).Inc() + "method": c.Request.Method, + }).Inc() c.Next() elapsed := time.Since(startTime) diff --git a/pkg/apiserver/controllers/v1/usagemetrics.go b/pkg/apiserver/controllers/v1/usagemetrics.go index 74f27bb6cf4..5b2c3e3b1a9 100644 --- a/pkg/apiserver/controllers/v1/usagemetrics.go +++ b/pkg/apiserver/controllers/v1/usagemetrics.go @@ -1,6 +1,7 @@ package v1 import ( + "context" "encoding/json" "errors" "net/http" @@ -18,17 +19,15 @@ import ( ) // updateBaseMetrics updates the base metrics for a machine or bouncer -func (c *Controller) updateBaseMetrics(machineID string, bouncer *ent.Bouncer, baseMetrics models.BaseMetrics, hubItems models.HubItems, datasources map[string]int64) error { +func (c *Controller) updateBaseMetrics(ctx context.Context, machineID string, bouncer *ent.Bouncer, baseMetrics models.BaseMetrics, hubItems models.HubItems, datasources map[string]int64) error { switch { case machineID != "": - c.DBClient.MachineUpdateBaseMetrics(machineID, baseMetrics, hubItems, datasources) + return c.DBClient.MachineUpdateBaseMetrics(ctx, machineID, baseMetrics, hubItems, datasources) case bouncer != nil: - c.DBClient.BouncerUpdateBaseMetrics(bouncer.Name, bouncer.Type, baseMetrics) + return c.DBClient.BouncerUpdateBaseMetrics(ctx, bouncer.Name, bouncer.Type, baseMetrics) default: return errors.New("no machineID or bouncerName set") } - - return nil } // UsageMetrics receives metrics from log processors and remediation components @@ -172,7 +171,9 @@ func (c *Controller) UsageMetrics(gctx *gin.Context) { } } - err := c.updateBaseMetrics(machineID, bouncer, baseMetrics, hubItems, datasources) + ctx := gctx.Request.Context() + + err := c.updateBaseMetrics(ctx, machineID, bouncer, baseMetrics, hubItems, datasources) if err != nil { logger.Errorf("Failed to update base metrics: %s", err) c.HandleDBErrors(gctx, err) @@ -182,7 +183,7 @@ func (c *Controller) UsageMetrics(gctx *gin.Context) { jsonPayload, err := json.Marshal(payload) if err != nil { - logger.Errorf("Failed to marshal usage metrics: %s", err) + logger.Errorf("Failed to serialize usage metrics: %s", err) c.HandleDBErrors(gctx, err) return @@ -190,7 +191,7 @@ func (c *Controller) UsageMetrics(gctx *gin.Context) { receivedAt := time.Now().UTC() - if _, err := c.DBClient.CreateMetric(generatedType, generatedBy, receivedAt, string(jsonPayload)); err != nil { + if _, err := c.DBClient.CreateMetric(ctx, generatedType, generatedBy, receivedAt, string(jsonPayload)); err != nil { logger.Error(err) c.HandleDBErrors(gctx, err) diff --git a/pkg/apiserver/decisions_test.go b/pkg/apiserver/decisions_test.go index e4c9dda47ce..a0af6956443 100644 --- a/pkg/apiserver/decisions_test.go +++ b/pkg/apiserver/decisions_test.go @@ -1,6 +1,7 @@ package apiserver import ( + "context" "testing" "github.com/stretchr/testify/assert" @@ -12,82 +13,86 @@ const ( ) func TestDeleteDecisionRange(t *testing.T) { - lapi := SetupLAPITest(t) + ctx := context.Background() + lapi := SetupLAPITest(t, ctx) // Create Valid Alert - lapi.InsertAlertFromFile(t, "./tests/alert_minibulk.json") + lapi.InsertAlertFromFile(t, ctx, "./tests/alert_minibulk.json") // delete by ip wrong - w := lapi.RecordResponse(t, "DELETE", "/v1/decisions?range=1.2.3.0/24", emptyBody, PASSWORD) + w := lapi.RecordResponse(t, ctx, "DELETE", "/v1/decisions?range=1.2.3.0/24", emptyBody, PASSWORD) assert.Equal(t, 200, w.Code) assert.Equal(t, `{"nbDeleted":"0"}`, w.Body.String()) // delete by range - w = lapi.RecordResponse(t, "DELETE", "/v1/decisions?range=91.121.79.0/24&contains=false", emptyBody, PASSWORD) + w = lapi.RecordResponse(t, ctx, "DELETE", "/v1/decisions?range=91.121.79.0/24&contains=false", emptyBody, PASSWORD) assert.Equal(t, 200, w.Code) assert.Equal(t, `{"nbDeleted":"2"}`, w.Body.String()) // delete by range : ensure it was already deleted - w = lapi.RecordResponse(t, "DELETE", "/v1/decisions?range=91.121.79.0/24", emptyBody, PASSWORD) + w = lapi.RecordResponse(t, ctx, "DELETE", "/v1/decisions?range=91.121.79.0/24", emptyBody, PASSWORD) assert.Equal(t, 200, w.Code) assert.Equal(t, `{"nbDeleted":"0"}`, w.Body.String()) } func TestDeleteDecisionFilter(t *testing.T) { - lapi := SetupLAPITest(t) + ctx := context.Background() + lapi := SetupLAPITest(t, ctx) // Create Valid Alert - lapi.InsertAlertFromFile(t, "./tests/alert_minibulk.json") + lapi.InsertAlertFromFile(t, ctx, "./tests/alert_minibulk.json") // delete by ip wrong - w := lapi.RecordResponse(t, "DELETE", "/v1/decisions?ip=1.2.3.4", emptyBody, PASSWORD) + w := lapi.RecordResponse(t, ctx, "DELETE", "/v1/decisions?ip=1.2.3.4", emptyBody, PASSWORD) assert.Equal(t, 200, w.Code) assert.Equal(t, `{"nbDeleted":"0"}`, w.Body.String()) // delete by ip good - w = lapi.RecordResponse(t, "DELETE", "/v1/decisions?ip=91.121.79.179", emptyBody, PASSWORD) + w = lapi.RecordResponse(t, ctx, "DELETE", "/v1/decisions?ip=91.121.79.179", emptyBody, PASSWORD) assert.Equal(t, 200, w.Code) assert.Equal(t, `{"nbDeleted":"1"}`, w.Body.String()) // delete by scope/value - w = lapi.RecordResponse(t, "DELETE", "/v1/decisions?scopes=Ip&value=91.121.79.178", emptyBody, PASSWORD) + w = lapi.RecordResponse(t, ctx, "DELETE", "/v1/decisions?scopes=Ip&value=91.121.79.178", emptyBody, PASSWORD) assert.Equal(t, 200, w.Code) assert.Equal(t, `{"nbDeleted":"1"}`, w.Body.String()) } func TestDeleteDecisionFilterByScenario(t *testing.T) { - lapi := SetupLAPITest(t) + ctx := context.Background() + lapi := SetupLAPITest(t, ctx) // Create Valid Alert - lapi.InsertAlertFromFile(t, "./tests/alert_minibulk.json") + lapi.InsertAlertFromFile(t, ctx, "./tests/alert_minibulk.json") // delete by wrong scenario - w := lapi.RecordResponse(t, "DELETE", "/v1/decisions?scenario=crowdsecurity/ssh-bff", emptyBody, PASSWORD) + w := lapi.RecordResponse(t, ctx, "DELETE", "/v1/decisions?scenario=crowdsecurity/ssh-bff", emptyBody, PASSWORD) assert.Equal(t, 200, w.Code) assert.Equal(t, `{"nbDeleted":"0"}`, w.Body.String()) // delete by scenario good - w = lapi.RecordResponse(t, "DELETE", "/v1/decisions?scenario=crowdsecurity/ssh-bf", emptyBody, PASSWORD) + w = lapi.RecordResponse(t, ctx, "DELETE", "/v1/decisions?scenario=crowdsecurity/ssh-bf", emptyBody, PASSWORD) assert.Equal(t, 200, w.Code) assert.Equal(t, `{"nbDeleted":"2"}`, w.Body.String()) } func TestGetDecisionFilters(t *testing.T) { - lapi := SetupLAPITest(t) + ctx := context.Background() + lapi := SetupLAPITest(t, ctx) // Create Valid Alert - lapi.InsertAlertFromFile(t, "./tests/alert_minibulk.json") + lapi.InsertAlertFromFile(t, ctx, "./tests/alert_minibulk.json") // Get Decision - w := lapi.RecordResponse(t, "GET", "/v1/decisions", emptyBody, APIKEY) + w := lapi.RecordResponse(t, ctx, "GET", "/v1/decisions", emptyBody, APIKEY) assert.Equal(t, 200, w.Code) decisions, code := readDecisionsGetResp(t, w) assert.Equal(t, 200, code) @@ -101,7 +106,7 @@ func TestGetDecisionFilters(t *testing.T) { // Get Decision : type filter - w = lapi.RecordResponse(t, "GET", "/v1/decisions?type=ban", emptyBody, APIKEY) + w = lapi.RecordResponse(t, ctx, "GET", "/v1/decisions?type=ban", emptyBody, APIKEY) assert.Equal(t, 200, w.Code) decisions, code = readDecisionsGetResp(t, w) assert.Equal(t, 200, code) @@ -118,7 +123,7 @@ func TestGetDecisionFilters(t *testing.T) { // Get Decision : scope/value - w = lapi.RecordResponse(t, "GET", "/v1/decisions?scopes=Ip&value=91.121.79.179", emptyBody, APIKEY) + w = lapi.RecordResponse(t, ctx, "GET", "/v1/decisions?scopes=Ip&value=91.121.79.179", emptyBody, APIKEY) assert.Equal(t, 200, w.Code) decisions, code = readDecisionsGetResp(t, w) assert.Equal(t, 200, code) @@ -132,7 +137,7 @@ func TestGetDecisionFilters(t *testing.T) { // Get Decision : ip filter - w = lapi.RecordResponse(t, "GET", "/v1/decisions?ip=91.121.79.179", emptyBody, APIKEY) + w = lapi.RecordResponse(t, ctx, "GET", "/v1/decisions?ip=91.121.79.179", emptyBody, APIKEY) assert.Equal(t, 200, w.Code) decisions, code = readDecisionsGetResp(t, w) assert.Equal(t, 200, code) @@ -145,7 +150,7 @@ func TestGetDecisionFilters(t *testing.T) { // assert.NotContains(t, w.Body.String(), `"id":2,"origin":"crowdsec","scenario":"crowdsecurity/ssh-bf","scope":"Ip","type":"ban","value":"91.121.79.178"`) // Get decision : by range - w = lapi.RecordResponse(t, "GET", "/v1/decisions?range=91.121.79.0/24&contains=false", emptyBody, APIKEY) + w = lapi.RecordResponse(t, ctx, "GET", "/v1/decisions?range=91.121.79.0/24&contains=false", emptyBody, APIKEY) assert.Equal(t, 200, w.Code) decisions, code = readDecisionsGetResp(t, w) assert.Equal(t, 200, code) @@ -155,13 +160,14 @@ func TestGetDecisionFilters(t *testing.T) { } func TestGetDecision(t *testing.T) { - lapi := SetupLAPITest(t) + ctx := context.Background() + lapi := SetupLAPITest(t, ctx) // Create Valid Alert - lapi.InsertAlertFromFile(t, "./tests/alert_sample.json") + lapi.InsertAlertFromFile(t, ctx, "./tests/alert_sample.json") // Get Decision - w := lapi.RecordResponse(t, "GET", "/v1/decisions", emptyBody, APIKEY) + w := lapi.RecordResponse(t, ctx, "GET", "/v1/decisions", emptyBody, APIKEY) assert.Equal(t, 200, w.Code) decisions, code := readDecisionsGetResp(t, w) assert.Equal(t, 200, code) @@ -180,51 +186,52 @@ func TestGetDecision(t *testing.T) { assert.Equal(t, int64(3), decisions[2].ID) // Get Decision with invalid filter. It should ignore this filter - w = lapi.RecordResponse(t, "GET", "/v1/decisions?test=test", emptyBody, APIKEY) + w = lapi.RecordResponse(t, ctx, "GET", "/v1/decisions?test=test", emptyBody, APIKEY) assert.Equal(t, 200, w.Code) assert.Len(t, decisions, 3) } func TestDeleteDecisionByID(t *testing.T) { - lapi := SetupLAPITest(t) + ctx := context.Background() + lapi := SetupLAPITest(t, ctx) // Create Valid Alert - lapi.InsertAlertFromFile(t, "./tests/alert_sample.json") + lapi.InsertAlertFromFile(t, ctx, "./tests/alert_sample.json") - //Have one alerts - w := lapi.RecordResponse(t, "GET", "/v1/decisions/stream?startup=true", emptyBody, APIKEY) + // Have one alert + w := lapi.RecordResponse(t, ctx, "GET", "/v1/decisions/stream?startup=true", emptyBody, APIKEY) decisions, code := readDecisionsStreamResp(t, w) assert.Equal(t, 200, code) assert.Empty(t, decisions["deleted"]) assert.Len(t, decisions["new"], 1) // Delete alert with Invalid ID - w = lapi.RecordResponse(t, "DELETE", "/v1/decisions/test", emptyBody, PASSWORD) + w = lapi.RecordResponse(t, ctx, "DELETE", "/v1/decisions/test", emptyBody, PASSWORD) assert.Equal(t, 400, w.Code) errResp, _ := readDecisionsErrorResp(t, w) assert.Equal(t, "decision_id must be valid integer", errResp["message"]) // Delete alert with ID that not exist - w = lapi.RecordResponse(t, "DELETE", "/v1/decisions/100", emptyBody, PASSWORD) + w = lapi.RecordResponse(t, ctx, "DELETE", "/v1/decisions/100", emptyBody, PASSWORD) assert.Equal(t, 500, w.Code) errResp, _ = readDecisionsErrorResp(t, w) assert.Equal(t, "decision with id '100' doesn't exist: unable to delete", errResp["message"]) - //Have one alerts - w = lapi.RecordResponse(t, "GET", "/v1/decisions/stream?startup=true", emptyBody, APIKEY) + // Have one alert + w = lapi.RecordResponse(t, ctx, "GET", "/v1/decisions/stream?startup=true", emptyBody, APIKEY) decisions, code = readDecisionsStreamResp(t, w) assert.Equal(t, 200, code) assert.Empty(t, decisions["deleted"]) assert.Len(t, decisions["new"], 1) // Delete alert with valid ID - w = lapi.RecordResponse(t, "DELETE", "/v1/decisions/1", emptyBody, PASSWORD) + w = lapi.RecordResponse(t, ctx, "DELETE", "/v1/decisions/1", emptyBody, PASSWORD) assert.Equal(t, 200, w.Code) resp, _ := readDecisionsDeleteResp(t, w) assert.Equal(t, "1", resp.NbDeleted) - //Have one alert (because we delete an alert that has dup targets) - w = lapi.RecordResponse(t, "GET", "/v1/decisions/stream?startup=true", emptyBody, APIKEY) + // Have one alert (because we delete an alert that has dup targets) + w = lapi.RecordResponse(t, ctx, "GET", "/v1/decisions/stream?startup=true", emptyBody, APIKEY) decisions, code = readDecisionsStreamResp(t, w) assert.Equal(t, 200, code) assert.Empty(t, decisions["deleted"]) @@ -232,33 +239,35 @@ func TestDeleteDecisionByID(t *testing.T) { } func TestDeleteDecision(t *testing.T) { - lapi := SetupLAPITest(t) + ctx := context.Background() + lapi := SetupLAPITest(t, ctx) // Create Valid Alert - lapi.InsertAlertFromFile(t, "./tests/alert_sample.json") + lapi.InsertAlertFromFile(t, ctx, "./tests/alert_sample.json") // Delete alert with Invalid filter - w := lapi.RecordResponse(t, "DELETE", "/v1/decisions?test=test", emptyBody, PASSWORD) + w := lapi.RecordResponse(t, ctx, "DELETE", "/v1/decisions?test=test", emptyBody, PASSWORD) assert.Equal(t, 500, w.Code) errResp, _ := readDecisionsErrorResp(t, w) assert.Equal(t, "'test' doesn't exist: invalid filter", errResp["message"]) // Delete all alert - w = lapi.RecordResponse(t, "DELETE", "/v1/decisions", emptyBody, PASSWORD) + w = lapi.RecordResponse(t, ctx, "DELETE", "/v1/decisions", emptyBody, PASSWORD) assert.Equal(t, 200, w.Code) resp, _ := readDecisionsDeleteResp(t, w) assert.Equal(t, "3", resp.NbDeleted) } func TestStreamStartDecisionDedup(t *testing.T) { - //Ensure that at stream startup we only get the longest decision - lapi := SetupLAPITest(t) + ctx := context.Background() + // Ensure that at stream startup we only get the longest decision + lapi := SetupLAPITest(t, ctx) // Create Valid Alert : 3 decisions for 127.0.0.1, longest has id=3 - lapi.InsertAlertFromFile(t, "./tests/alert_sample.json") + lapi.InsertAlertFromFile(t, ctx, "./tests/alert_sample.json") // Get Stream, we only get one decision (the longest one) - w := lapi.RecordResponse(t, "GET", "/v1/decisions/stream?startup=true", emptyBody, APIKEY) + w := lapi.RecordResponse(t, ctx, "GET", "/v1/decisions/stream?startup=true", emptyBody, APIKEY) decisions, code := readDecisionsStreamResp(t, w) assert.Equal(t, 200, code) assert.Empty(t, decisions["deleted"]) @@ -268,11 +277,11 @@ func TestStreamStartDecisionDedup(t *testing.T) { assert.Equal(t, "127.0.0.1", *decisions["new"][0].Value) // id=3 decision is deleted, this won't affect `deleted`, because there are decisions on the same ip - w = lapi.RecordResponse(t, "DELETE", "/v1/decisions/3", emptyBody, PASSWORD) + w = lapi.RecordResponse(t, ctx, "DELETE", "/v1/decisions/3", emptyBody, PASSWORD) assert.Equal(t, 200, w.Code) // Get Stream, we only get one decision (the longest one, id=2) - w = lapi.RecordResponse(t, "GET", "/v1/decisions/stream?startup=true", emptyBody, APIKEY) + w = lapi.RecordResponse(t, ctx, "GET", "/v1/decisions/stream?startup=true", emptyBody, APIKEY) decisions, code = readDecisionsStreamResp(t, w) assert.Equal(t, 200, code) assert.Empty(t, decisions["deleted"]) @@ -282,11 +291,11 @@ func TestStreamStartDecisionDedup(t *testing.T) { assert.Equal(t, "127.0.0.1", *decisions["new"][0].Value) // We delete another decision, yet don't receive it in stream, since there's another decision on same IP - w = lapi.RecordResponse(t, "DELETE", "/v1/decisions/2", emptyBody, PASSWORD) + w = lapi.RecordResponse(t, ctx, "DELETE", "/v1/decisions/2", emptyBody, PASSWORD) assert.Equal(t, 200, w.Code) // And get the remaining decision (1) - w = lapi.RecordResponse(t, "GET", "/v1/decisions/stream?startup=true", emptyBody, APIKEY) + w = lapi.RecordResponse(t, ctx, "GET", "/v1/decisions/stream?startup=true", emptyBody, APIKEY) decisions, code = readDecisionsStreamResp(t, w) assert.Equal(t, 200, code) assert.Empty(t, decisions["deleted"]) @@ -296,11 +305,11 @@ func TestStreamStartDecisionDedup(t *testing.T) { assert.Equal(t, "127.0.0.1", *decisions["new"][0].Value) // We delete the last decision, we receive the delete order - w = lapi.RecordResponse(t, "DELETE", "/v1/decisions/1", emptyBody, PASSWORD) + w = lapi.RecordResponse(t, ctx, "DELETE", "/v1/decisions/1", emptyBody, PASSWORD) assert.Equal(t, 200, w.Code) - //and now we only get a deleted decision - w = lapi.RecordResponse(t, "GET", "/v1/decisions/stream?startup=true", emptyBody, APIKEY) + // and now we only get a deleted decision + w = lapi.RecordResponse(t, ctx, "GET", "/v1/decisions/stream?startup=true", emptyBody, APIKEY) decisions, code = readDecisionsStreamResp(t, w) assert.Equal(t, 200, code) assert.Len(t, decisions["deleted"], 1) diff --git a/pkg/apiserver/heartbeat_test.go b/pkg/apiserver/heartbeat_test.go index fbf01c7fb8e..db051566f75 100644 --- a/pkg/apiserver/heartbeat_test.go +++ b/pkg/apiserver/heartbeat_test.go @@ -1,6 +1,7 @@ package apiserver import ( + "context" "net/http" "testing" @@ -8,11 +9,12 @@ import ( ) func TestHeartBeat(t *testing.T) { - lapi := SetupLAPITest(t) + ctx := context.Background() + lapi := SetupLAPITest(t, ctx) - w := lapi.RecordResponse(t, http.MethodGet, "/v1/heartbeat", emptyBody, "password") + w := lapi.RecordResponse(t, ctx, http.MethodGet, "/v1/heartbeat", emptyBody, "password") assert.Equal(t, 200, w.Code) - w = lapi.RecordResponse(t, "POST", "/v1/heartbeat", emptyBody, "password") + w = lapi.RecordResponse(t, ctx, "POST", "/v1/heartbeat", emptyBody, "password") assert.Equal(t, 405, w.Code) } diff --git a/pkg/apiserver/jwt_test.go b/pkg/apiserver/jwt_test.go index aa6e84e416b..f6f51763975 100644 --- a/pkg/apiserver/jwt_test.go +++ b/pkg/apiserver/jwt_test.go @@ -1,6 +1,7 @@ package apiserver import ( + "context" "net/http" "net/http/httptest" "strings" @@ -10,13 +11,14 @@ import ( ) func TestLogin(t *testing.T) { - router, config := NewAPITest(t) + ctx := context.Background() + router, config := NewAPITest(t, ctx) - body := CreateTestMachine(t, router, "") + body := CreateTestMachine(t, ctx, router, "") // Login with machine not validated yet w := httptest.NewRecorder() - req, _ := http.NewRequest(http.MethodPost, "/v1/watchers/login", strings.NewReader(body)) + req, _ := http.NewRequestWithContext(ctx, http.MethodPost, "/v1/watchers/login", strings.NewReader(body)) req.Header.Add("User-Agent", UserAgent) router.ServeHTTP(w, req) @@ -25,7 +27,7 @@ func TestLogin(t *testing.T) { // Login with machine not exist w = httptest.NewRecorder() - req, _ = http.NewRequest(http.MethodPost, "/v1/watchers/login", strings.NewReader(`{"machine_id": "test1", "password": "test1"}`)) + req, _ = http.NewRequestWithContext(ctx, http.MethodPost, "/v1/watchers/login", strings.NewReader(`{"machine_id": "test1", "password": "test1"}`)) req.Header.Add("User-Agent", UserAgent) router.ServeHTTP(w, req) @@ -34,7 +36,7 @@ func TestLogin(t *testing.T) { // Login with invalid body w = httptest.NewRecorder() - req, _ = http.NewRequest(http.MethodPost, "/v1/watchers/login", strings.NewReader("test")) + req, _ = http.NewRequestWithContext(ctx, http.MethodPost, "/v1/watchers/login", strings.NewReader("test")) req.Header.Add("User-Agent", UserAgent) router.ServeHTTP(w, req) @@ -43,19 +45,19 @@ func TestLogin(t *testing.T) { // Login with invalid format w = httptest.NewRecorder() - req, _ = http.NewRequest(http.MethodPost, "/v1/watchers/login", strings.NewReader(`{"machine_id": "test1"}`)) + req, _ = http.NewRequestWithContext(ctx, http.MethodPost, "/v1/watchers/login", strings.NewReader(`{"machine_id": "test1"}`)) req.Header.Add("User-Agent", UserAgent) router.ServeHTTP(w, req) assert.Equal(t, 401, w.Code) assert.Equal(t, `{"code":401,"message":"validation failure list:\npassword in body is required"}`, w.Body.String()) - //Validate machine - ValidateMachine(t, "test", config.API.Server.DbConfig) + // Validate machine + ValidateMachine(t, ctx, "test", config.API.Server.DbConfig) // Login with invalid password w = httptest.NewRecorder() - req, _ = http.NewRequest(http.MethodPost, "/v1/watchers/login", strings.NewReader(`{"machine_id": "test", "password": "test1"}`)) + req, _ = http.NewRequestWithContext(ctx, http.MethodPost, "/v1/watchers/login", strings.NewReader(`{"machine_id": "test", "password": "test1"}`)) req.Header.Add("User-Agent", UserAgent) router.ServeHTTP(w, req) @@ -64,7 +66,7 @@ func TestLogin(t *testing.T) { // Login with valid machine w = httptest.NewRecorder() - req, _ = http.NewRequest(http.MethodPost, "/v1/watchers/login", strings.NewReader(body)) + req, _ = http.NewRequestWithContext(ctx, http.MethodPost, "/v1/watchers/login", strings.NewReader(body)) req.Header.Add("User-Agent", UserAgent) router.ServeHTTP(w, req) @@ -74,7 +76,7 @@ func TestLogin(t *testing.T) { // Login with valid machine + scenarios w = httptest.NewRecorder() - req, _ = http.NewRequest(http.MethodPost, "/v1/watchers/login", strings.NewReader(`{"machine_id": "test", "password": "test", "scenarios": ["crowdsecurity/test", "crowdsecurity/test2"]}`)) + req, _ = http.NewRequestWithContext(ctx, http.MethodPost, "/v1/watchers/login", strings.NewReader(`{"machine_id": "test", "password": "test", "scenarios": ["crowdsecurity/test", "crowdsecurity/test2"]}`)) req.Header.Add("User-Agent", UserAgent) router.ServeHTTP(w, req) diff --git a/pkg/apiserver/machines_test.go b/pkg/apiserver/machines_test.go index 041a6bee528..969f75707d6 100644 --- a/pkg/apiserver/machines_test.go +++ b/pkg/apiserver/machines_test.go @@ -1,6 +1,7 @@ package apiserver import ( + "context" "encoding/json" "net/http" "net/http/httptest" @@ -14,11 +15,12 @@ import ( ) func TestCreateMachine(t *testing.T) { - router, _ := NewAPITest(t) + ctx := context.Background() + router, _ := NewAPITest(t, ctx) // Create machine with invalid format w := httptest.NewRecorder() - req, _ := http.NewRequest(http.MethodPost, "/v1/watchers", strings.NewReader("test")) + req, _ := http.NewRequestWithContext(ctx, http.MethodPost, "/v1/watchers", strings.NewReader("test")) req.Header.Add("User-Agent", UserAgent) router.ServeHTTP(w, req) @@ -27,7 +29,7 @@ func TestCreateMachine(t *testing.T) { // Create machine with invalid input w = httptest.NewRecorder() - req, _ = http.NewRequest(http.MethodPost, "/v1/watchers", strings.NewReader(`{"test": "test"}`)) + req, _ = http.NewRequestWithContext(ctx, http.MethodPost, "/v1/watchers", strings.NewReader(`{"test": "test"}`)) req.Header.Add("User-Agent", UserAgent) router.ServeHTTP(w, req) @@ -41,7 +43,7 @@ func TestCreateMachine(t *testing.T) { body := string(b) w = httptest.NewRecorder() - req, _ = http.NewRequest(http.MethodPost, "/v1/watchers", strings.NewReader(body)) + req, _ = http.NewRequestWithContext(ctx, http.MethodPost, "/v1/watchers", strings.NewReader(body)) req.Header.Add("User-Agent", UserAgent) router.ServeHTTP(w, req) @@ -50,8 +52,10 @@ func TestCreateMachine(t *testing.T) { } func TestCreateMachineWithForwardedFor(t *testing.T) { - router, config := NewAPITestForwardedFor(t) + ctx := context.Background() + router, config := NewAPITestForwardedFor(t, ctx) router.TrustedPlatform = "X-Real-IP" + // Create machine b, err := json.Marshal(MachineTest) require.NoError(t, err) @@ -59,7 +63,7 @@ func TestCreateMachineWithForwardedFor(t *testing.T) { body := string(b) w := httptest.NewRecorder() - req, _ := http.NewRequest(http.MethodPost, "/v1/watchers", strings.NewReader(body)) + req, _ := http.NewRequestWithContext(ctx, http.MethodPost, "/v1/watchers", strings.NewReader(body)) req.Header.Add("User-Agent", UserAgent) req.Header.Add("X-Real-Ip", "1.1.1.1") router.ServeHTTP(w, req) @@ -73,7 +77,8 @@ func TestCreateMachineWithForwardedFor(t *testing.T) { } func TestCreateMachineWithForwardedForNoConfig(t *testing.T) { - router, config := NewAPITest(t) + ctx := context.Background() + router, config := NewAPITest(t, ctx) // Create machine b, err := json.Marshal(MachineTest) @@ -82,7 +87,7 @@ func TestCreateMachineWithForwardedForNoConfig(t *testing.T) { body := string(b) w := httptest.NewRecorder() - req, _ := http.NewRequest(http.MethodPost, "/v1/watchers", strings.NewReader(body)) + req, _ := http.NewRequestWithContext(ctx, http.MethodPost, "/v1/watchers", strings.NewReader(body)) req.Header.Add("User-Agent", UserAgent) req.Header.Add("X-Real-IP", "1.1.1.1") router.ServeHTTP(w, req) @@ -92,13 +97,14 @@ func TestCreateMachineWithForwardedForNoConfig(t *testing.T) { ip := GetMachineIP(t, *MachineTest.MachineID, config.API.Server.DbConfig) - //For some reason, the IP is empty when running tests - //if no forwarded-for headers are present + // For some reason, the IP is empty when running tests + // if no forwarded-for headers are present assert.Equal(t, "", ip) } func TestCreateMachineWithoutForwardedFor(t *testing.T) { - router, config := NewAPITestForwardedFor(t) + ctx := context.Background() + router, config := NewAPITestForwardedFor(t, ctx) // Create machine b, err := json.Marshal(MachineTest) @@ -107,7 +113,7 @@ func TestCreateMachineWithoutForwardedFor(t *testing.T) { body := string(b) w := httptest.NewRecorder() - req, _ := http.NewRequest(http.MethodPost, "/v1/watchers", strings.NewReader(body)) + req, _ := http.NewRequestWithContext(ctx, http.MethodPost, "/v1/watchers", strings.NewReader(body)) req.Header.Add("User-Agent", UserAgent) router.ServeHTTP(w, req) @@ -116,23 +122,24 @@ func TestCreateMachineWithoutForwardedFor(t *testing.T) { ip := GetMachineIP(t, *MachineTest.MachineID, config.API.Server.DbConfig) - //For some reason, the IP is empty when running tests - //if no forwarded-for headers are present + // For some reason, the IP is empty when running tests + // if no forwarded-for headers are present assert.Equal(t, "", ip) } func TestCreateMachineAlreadyExist(t *testing.T) { - router, _ := NewAPITest(t) + ctx := context.Background() + router, _ := NewAPITest(t, ctx) - body := CreateTestMachine(t, router, "") + body := CreateTestMachine(t, ctx, router, "") w := httptest.NewRecorder() - req, _ := http.NewRequest(http.MethodPost, "/v1/watchers", strings.NewReader(body)) + req, _ := http.NewRequestWithContext(ctx, http.MethodPost, "/v1/watchers", strings.NewReader(body)) req.Header.Add("User-Agent", UserAgent) router.ServeHTTP(w, req) w = httptest.NewRecorder() - req, _ = http.NewRequest(http.MethodPost, "/v1/watchers", strings.NewReader(body)) + req, _ = http.NewRequestWithContext(ctx, http.MethodPost, "/v1/watchers", strings.NewReader(body)) req.Header.Add("User-Agent", UserAgent) router.ServeHTTP(w, req) @@ -141,9 +148,10 @@ func TestCreateMachineAlreadyExist(t *testing.T) { } func TestAutoRegistration(t *testing.T) { - router, _ := NewAPITest(t) + ctx := context.Background() + router, _ := NewAPITest(t, ctx) - //Invalid registration token / valid source IP + // Invalid registration token / valid source IP regReq := MachineTest regReq.RegistrationToken = invalidRegistrationToken b, err := json.Marshal(regReq) @@ -152,14 +160,14 @@ func TestAutoRegistration(t *testing.T) { body := string(b) w := httptest.NewRecorder() - req, _ := http.NewRequest(http.MethodPost, "/v1/watchers", strings.NewReader(body)) + req, _ := http.NewRequestWithContext(ctx, http.MethodPost, "/v1/watchers", strings.NewReader(body)) req.Header.Add("User-Agent", UserAgent) req.RemoteAddr = "127.0.0.1:4242" router.ServeHTTP(w, req) assert.Equal(t, http.StatusUnauthorized, w.Code) - //Invalid registration token / invalid source IP + // Invalid registration token / invalid source IP regReq = MachineTest regReq.RegistrationToken = invalidRegistrationToken b, err = json.Marshal(regReq) @@ -168,14 +176,14 @@ func TestAutoRegistration(t *testing.T) { body = string(b) w = httptest.NewRecorder() - req, _ = http.NewRequest(http.MethodPost, "/v1/watchers", strings.NewReader(body)) + req, _ = http.NewRequestWithContext(ctx, http.MethodPost, "/v1/watchers", strings.NewReader(body)) req.Header.Add("User-Agent", UserAgent) req.RemoteAddr = "42.42.42.42:4242" router.ServeHTTP(w, req) assert.Equal(t, http.StatusUnauthorized, w.Code) - //valid registration token / invalid source IP + // valid registration token / invalid source IP regReq = MachineTest regReq.RegistrationToken = validRegistrationToken b, err = json.Marshal(regReq) @@ -184,14 +192,14 @@ func TestAutoRegistration(t *testing.T) { body = string(b) w = httptest.NewRecorder() - req, _ = http.NewRequest(http.MethodPost, "/v1/watchers", strings.NewReader(body)) + req, _ = http.NewRequestWithContext(ctx, http.MethodPost, "/v1/watchers", strings.NewReader(body)) req.Header.Add("User-Agent", UserAgent) req.RemoteAddr = "42.42.42.42:4242" router.ServeHTTP(w, req) assert.Equal(t, http.StatusUnauthorized, w.Code) - //Valid registration token / valid source IP + // Valid registration token / valid source IP regReq = MachineTest regReq.RegistrationToken = validRegistrationToken b, err = json.Marshal(regReq) @@ -200,14 +208,14 @@ func TestAutoRegistration(t *testing.T) { body = string(b) w = httptest.NewRecorder() - req, _ = http.NewRequest(http.MethodPost, "/v1/watchers", strings.NewReader(body)) + req, _ = http.NewRequestWithContext(ctx, http.MethodPost, "/v1/watchers", strings.NewReader(body)) req.Header.Add("User-Agent", UserAgent) req.RemoteAddr = "127.0.0.1:4242" router.ServeHTTP(w, req) assert.Equal(t, http.StatusAccepted, w.Code) - //No token / valid source IP + // No token / valid source IP regReq = MachineTest regReq.MachineID = ptr.Of("test2") b, err = json.Marshal(regReq) @@ -216,7 +224,7 @@ func TestAutoRegistration(t *testing.T) { body = string(b) w = httptest.NewRecorder() - req, _ = http.NewRequest(http.MethodPost, "/v1/watchers", strings.NewReader(body)) + req, _ = http.NewRequestWithContext(ctx, http.MethodPost, "/v1/watchers", strings.NewReader(body)) req.Header.Add("User-Agent", UserAgent) req.RemoteAddr = "127.0.0.1:4242" router.ServeHTTP(w, req) diff --git a/pkg/apiserver/middlewares/v1/api_key.go b/pkg/apiserver/middlewares/v1/api_key.go index e822666db0f..3c154be4fab 100644 --- a/pkg/apiserver/middlewares/v1/api_key.go +++ b/pkg/apiserver/middlewares/v1/api_key.go @@ -64,6 +64,8 @@ func (a *APIKey) authTLS(c *gin.Context, logger *log.Entry) *ent.Bouncer { return nil } + ctx := c.Request.Context() + extractedCN, err := a.TlsAuth.ValidateCert(c) if err != nil { logger.Warn(err) @@ -73,7 +75,7 @@ func (a *APIKey) authTLS(c *gin.Context, logger *log.Entry) *ent.Bouncer { logger = logger.WithField("cn", extractedCN) bouncerName := fmt.Sprintf("%s@%s", extractedCN, c.ClientIP()) - bouncer, err := a.DbClient.SelectBouncerByName(bouncerName) + bouncer, err := a.DbClient.SelectBouncerByName(ctx, bouncerName) // This is likely not the proper way, but isNotFound does not seem to work if err != nil && strings.Contains(err.Error(), "bouncer not found") { @@ -87,7 +89,7 @@ func (a *APIKey) authTLS(c *gin.Context, logger *log.Entry) *ent.Bouncer { logger.Infof("Creating bouncer %s", bouncerName) - bouncer, err = a.DbClient.CreateBouncer(bouncerName, c.ClientIP(), HashSHA512(apiKey), types.TlsAuthType) + bouncer, err = a.DbClient.CreateBouncer(ctx, bouncerName, c.ClientIP(), HashSHA512(apiKey), types.TlsAuthType, true) if err != nil { logger.Errorf("while creating bouncer db entry: %s", err) return nil @@ -112,16 +114,69 @@ func (a *APIKey) authPlain(c *gin.Context, logger *log.Entry) *ent.Bouncer { return nil } + clientIP := c.ClientIP() + + ctx := c.Request.Context() + hashStr := HashSHA512(val[0]) - bouncer, err := a.DbClient.SelectBouncer(hashStr) + // Appsec case, we only care if the key is valid + // No content is returned, no last_pull update or anything + if c.Request.Method == http.MethodHead { + bouncer, err := a.DbClient.SelectBouncers(ctx, hashStr, types.ApiKeyAuthType) + if err != nil { + logger.Errorf("while fetching bouncer info: %s", err) + return nil + } + return bouncer[0] + } + + // most common case, check if this specific bouncer exists + bouncer, err := a.DbClient.SelectBouncerWithIP(ctx, hashStr, clientIP) + if err != nil && !ent.IsNotFound(err) { + logger.Errorf("while fetching bouncer info: %s", err) + return nil + } + + // We found the bouncer with key and IP, we can use it + if bouncer != nil { + if bouncer.AuthType != types.ApiKeyAuthType { + logger.Errorf("bouncer isn't allowed to auth by API key") + return nil + } + return bouncer + } + + // We didn't find the bouncer with key and IP, let's try to find it with the key only + bouncers, err := a.DbClient.SelectBouncers(ctx, hashStr, types.ApiKeyAuthType) if err != nil { logger.Errorf("while fetching bouncer info: %s", err) return nil } - if bouncer.AuthType != types.ApiKeyAuthType { - logger.Errorf("bouncer %s attempted to login using an API key but it is configured to auth with %s", bouncer.Name, bouncer.AuthType) + if len(bouncers) == 0 { + logger.Debugf("no bouncer found with this key") + return nil + } + + logger.Debugf("found %d bouncers with this key", len(bouncers)) + + // We only have one bouncer with this key and no IP + // This is the first request made by this bouncer, keep this one + if len(bouncers) == 1 && bouncers[0].IPAddress == "" { + return bouncers[0] + } + + // Bouncers are ordered by ID, first one *should* be the manually created one + // Can probably get a bit weird if the user deletes the manually created one + bouncerName := fmt.Sprintf("%s@%s", bouncers[0].Name, clientIP) + + logger.Infof("Creating bouncer %s", bouncerName) + + bouncer, err = a.DbClient.CreateBouncer(ctx, bouncerName, clientIP, hashStr, types.ApiKeyAuthType, true) + + if err != nil { + logger.Errorf("while creating bouncer db entry: %s", err) return nil } @@ -132,6 +187,8 @@ func (a *APIKey) MiddlewareFunc() gin.HandlerFunc { return func(c *gin.Context) { var bouncer *ent.Bouncer + ctx := c.Request.Context() + clientIP := c.ClientIP() logger := log.WithField("ip", clientIP) @@ -150,27 +207,20 @@ func (a *APIKey) MiddlewareFunc() gin.HandlerFunc { return } - logger = logger.WithField("name", bouncer.Name) - - if bouncer.IPAddress == "" { - if err := a.DbClient.UpdateBouncerIP(clientIP, bouncer.ID); err != nil { - logger.Errorf("Failed to update ip address for '%s': %s\n", bouncer.Name, err) - c.JSON(http.StatusForbidden, gin.H{"message": "access forbidden"}) - c.Abort() - - return - } + // Appsec request, return immediately if we found something + if c.Request.Method == http.MethodHead { + c.Set(BouncerContextKey, bouncer) + return } - // Don't update IP on HEAD request, as it's used by the appsec to check the validity of the API key provided - if bouncer.IPAddress != clientIP && bouncer.IPAddress != "" && c.Request.Method != http.MethodHead { - log.Warningf("new IP address detected for bouncer '%s': %s (old: %s)", bouncer.Name, clientIP, bouncer.IPAddress) + logger = logger.WithField("name", bouncer.Name) - if err := a.DbClient.UpdateBouncerIP(clientIP, bouncer.ID); err != nil { + // 1st time we see this bouncer, we update its IP + if bouncer.IPAddress == "" { + if err := a.DbClient.UpdateBouncerIP(ctx, clientIP, bouncer.ID); err != nil { logger.Errorf("Failed to update ip address for '%s': %s\n", bouncer.Name, err) c.JSON(http.StatusForbidden, gin.H{"message": "access forbidden"}) c.Abort() - return } } @@ -182,7 +232,7 @@ func (a *APIKey) MiddlewareFunc() gin.HandlerFunc { } if bouncer.Version != useragent[1] || bouncer.Type != useragent[0] { - if err := a.DbClient.UpdateBouncerTypeAndVersion(useragent[0], useragent[1], bouncer.ID); err != nil { + if err := a.DbClient.UpdateBouncerTypeAndVersion(ctx, useragent[0], useragent[1], bouncer.ID); err != nil { logger.Errorf("failed to update bouncer version and type: %s", err) c.JSON(http.StatusForbidden, gin.H{"message": "bad user agent"}) c.Abort() diff --git a/pkg/apiserver/middlewares/v1/cache.go b/pkg/apiserver/middlewares/v1/cache.go index a058ec40393..b0037bc4fa4 100644 --- a/pkg/apiserver/middlewares/v1/cache.go +++ b/pkg/apiserver/middlewares/v1/cache.go @@ -9,7 +9,7 @@ import ( ) type cacheEntry struct { - err error // if nil, the certificate is not revocated + err error // if nil, the certificate is not revocated timestamp time.Time } diff --git a/pkg/apiserver/middlewares/v1/crl.go b/pkg/apiserver/middlewares/v1/crl.go index f85a410998e..64d7d3f0d96 100644 --- a/pkg/apiserver/middlewares/v1/crl.go +++ b/pkg/apiserver/middlewares/v1/crl.go @@ -12,13 +12,13 @@ import ( ) type CRLChecker struct { - path string // path to the CRL file - fileInfo os.FileInfo // last stat of the CRL file - crls []*x509.RevocationList // parsed CRLs + path string // path to the CRL file + fileInfo os.FileInfo // last stat of the CRL file + crls []*x509.RevocationList // parsed CRLs logger *log.Entry mu sync.RWMutex - lastLoad time.Time // time when the CRL file was last read successfully - onLoad func() // called when the CRL file changes (and is read successfully) + lastLoad time.Time // time when the CRL file was last read successfully + onLoad func() // called when the CRL file changes (and is read successfully) } func NewCRLChecker(crlPath string, onLoad func(), logger *log.Entry) (*CRLChecker, error) { diff --git a/pkg/apiserver/middlewares/v1/jwt.go b/pkg/apiserver/middlewares/v1/jwt.go index 64406deff3e..9171e9fce06 100644 --- a/pkg/apiserver/middlewares/v1/jwt.go +++ b/pkg/apiserver/middlewares/v1/jwt.go @@ -55,6 +55,7 @@ type authInput struct { } func (j *JWT) authTLS(c *gin.Context) (*authInput, error) { + ctx := c.Request.Context() ret := authInput{} if j.TlsAuth == nil { @@ -76,7 +77,7 @@ func (j *JWT) authTLS(c *gin.Context) (*authInput, error) { ret.clientMachine, err = j.DbClient.Ent.Machine.Query(). Where(machine.MachineId(ret.machineID)). - First(j.DbClient.CTX) + First(ctx) if ent.IsNotFound(err) { // Machine was not found, let's create it logger.Infof("machine %s not found, create it", ret.machineID) @@ -91,7 +92,7 @@ func (j *JWT) authTLS(c *gin.Context) (*authInput, error) { password := strfmt.Password(pwd) - ret.clientMachine, err = j.DbClient.CreateMachine(&ret.machineID, &password, "", true, true, types.TlsAuthType) + ret.clientMachine, err = j.DbClient.CreateMachine(ctx, &ret.machineID, &password, "", true, true, types.TlsAuthType) if err != nil { return nil, fmt.Errorf("while creating machine entry for %s: %w", ret.machineID, err) } @@ -127,6 +128,8 @@ func (j *JWT) authPlain(c *gin.Context) (*authInput, error) { err error ) + ctx := c.Request.Context() + ret := authInput{} if err = c.ShouldBindJSON(&loginInput); err != nil { @@ -143,7 +146,7 @@ func (j *JWT) authPlain(c *gin.Context) (*authInput, error) { ret.clientMachine, err = j.DbClient.Ent.Machine.Query(). Where(machine.MachineId(ret.machineID)). - First(j.DbClient.CTX) + First(ctx) if err != nil { log.Infof("Error machine login for %s : %+v ", ret.machineID, err) return nil, err @@ -175,6 +178,8 @@ func (j *JWT) Authenticator(c *gin.Context) (interface{}, error) { auth *authInput ) + ctx := c.Request.Context() + if c.Request.TLS != nil && len(c.Request.TLS.PeerCertificates) > 0 { auth, err = j.authTLS(c) if err != nil { @@ -198,7 +203,7 @@ func (j *JWT) Authenticator(c *gin.Context) (interface{}, error) { } } - err = j.DbClient.UpdateMachineScenarios(scenarios, auth.clientMachine.ID) + err = j.DbClient.UpdateMachineScenarios(ctx, scenarios, auth.clientMachine.ID) if err != nil { log.Errorf("Failed to update scenarios list for '%s': %s\n", auth.machineID, err) return nil, jwt.ErrFailedAuthentication @@ -208,7 +213,7 @@ func (j *JWT) Authenticator(c *gin.Context) (interface{}, error) { clientIP := c.ClientIP() if auth.clientMachine.IpAddress == "" { - err = j.DbClient.UpdateMachineIP(clientIP, auth.clientMachine.ID) + err = j.DbClient.UpdateMachineIP(ctx, clientIP, auth.clientMachine.ID) if err != nil { log.Errorf("Failed to update ip address for '%s': %s\n", auth.machineID, err) return nil, jwt.ErrFailedAuthentication @@ -218,7 +223,7 @@ func (j *JWT) Authenticator(c *gin.Context) (interface{}, error) { if auth.clientMachine.IpAddress != clientIP && auth.clientMachine.IpAddress != "" { log.Warningf("new IP address detected for machine '%s': %s (old: %s)", auth.clientMachine.MachineId, clientIP, auth.clientMachine.IpAddress) - err = j.DbClient.UpdateMachineIP(clientIP, auth.clientMachine.ID) + err = j.DbClient.UpdateMachineIP(ctx, clientIP, auth.clientMachine.ID) if err != nil { log.Errorf("Failed to update ip address for '%s': %s\n", auth.clientMachine.MachineId, err) return nil, jwt.ErrFailedAuthentication @@ -231,7 +236,7 @@ func (j *JWT) Authenticator(c *gin.Context) (interface{}, error) { return nil, jwt.ErrFailedAuthentication } - if err := j.DbClient.UpdateMachineVersion(useragent[1], auth.clientMachine.ID); err != nil { + if err := j.DbClient.UpdateMachineVersion(ctx, useragent[1], auth.clientMachine.ID); err != nil { log.Errorf("unable to update machine '%s' version '%s': %s", auth.clientMachine.MachineId, useragent[1], err) log.Errorf("bad user agent from : %s", clientIP) diff --git a/pkg/apiserver/middlewares/v1/ocsp.go b/pkg/apiserver/middlewares/v1/ocsp.go index 24557bfda7b..0b6406ad0e7 100644 --- a/pkg/apiserver/middlewares/v1/ocsp.go +++ b/pkg/apiserver/middlewares/v1/ocsp.go @@ -70,7 +70,7 @@ func (oc *OCSPChecker) query(server string, cert *x509.Certificate, issuer *x509 // It returns a boolean indicating if the certificate is revoked and a boolean indicating // if the OCSP check was successful and could be cached. func (oc *OCSPChecker) isRevokedBy(cert *x509.Certificate, issuer *x509.Certificate) (bool, bool) { - if cert.OCSPServer == nil || len(cert.OCSPServer) == 0 { + if len(cert.OCSPServer) == 0 { oc.logger.Infof("TLSAuth: no OCSP Server present in client certificate, skipping OCSP verification") return false, true } diff --git a/pkg/apiserver/papi.go b/pkg/apiserver/papi.go index 0d0fd0ecd42..83ba13843b9 100644 --- a/pkg/apiserver/papi.go +++ b/pkg/apiserver/papi.go @@ -156,11 +156,11 @@ func (p *Papi) handleEvent(event longpollclient.Event, sync bool) error { return nil } -func (p *Papi) GetPermissions() (PapiPermCheckSuccess, error) { +func (p *Papi) GetPermissions(ctx context.Context) (PapiPermCheckSuccess, error) { httpClient := p.apiClient.GetClient() papiCheckUrl := fmt.Sprintf("%s%s%s", p.URL, types.PAPIVersion, types.PAPIPermissionsUrl) - req, err := http.NewRequest(http.MethodGet, papiCheckUrl, nil) + req, err := http.NewRequestWithContext(ctx, http.MethodGet, papiCheckUrl, nil) if err != nil { return PapiPermCheckSuccess{}, fmt.Errorf("failed to create request: %w", err) } @@ -205,8 +205,8 @@ func reverse(s []longpollclient.Event) []longpollclient.Event { return a } -func (p *Papi) PullOnce(since time.Time, sync bool) error { - events, err := p.Client.PullOnce(since) +func (p *Papi) PullOnce(ctx context.Context, since time.Time, sync bool) error { + events, err := p.Client.PullOnce(ctx, since) if err != nil { return err } @@ -230,13 +230,13 @@ func (p *Papi) PullOnce(since time.Time, sync bool) error { } // PullPAPI is the long polling client for real-time decisions from PAPI -func (p *Papi) Pull() error { +func (p *Papi) Pull(ctx context.Context) error { defer trace.CatchPanic("lapi/PullPAPI") p.Logger.Infof("Starting Polling API Pull") lastTimestamp := time.Time{} - lastTimestampStr, err := p.DBClient.GetConfigItem(PapiPullKey) + lastTimestampStr, err := p.DBClient.GetConfigItem(ctx, PapiPullKey) if err != nil { p.Logger.Warningf("failed to get last timestamp for papi pull: %s", err) } @@ -245,30 +245,30 @@ func (p *Papi) Pull() error { if lastTimestampStr == nil { binTime, err := lastTimestamp.MarshalText() if err != nil { - return fmt.Errorf("failed to marshal last timestamp: %w", err) + return fmt.Errorf("failed to serialize last timestamp: %w", err) } - if err := p.DBClient.SetConfigItem(PapiPullKey, string(binTime)); err != nil { + if err := p.DBClient.SetConfigItem(ctx, PapiPullKey, string(binTime)); err != nil { p.Logger.Errorf("error setting papi pull last key: %s", err) } else { p.Logger.Debugf("config item '%s' set in database with value '%s'", PapiPullKey, string(binTime)) } } else { if err := lastTimestamp.UnmarshalText([]byte(*lastTimestampStr)); err != nil { - return fmt.Errorf("failed to unmarshal last timestamp: %w", err) + return fmt.Errorf("failed to parse last timestamp: %w", err) } } p.Logger.Infof("Starting PAPI pull (since:%s)", lastTimestamp) - for event := range p.Client.Start(lastTimestamp) { + for event := range p.Client.Start(ctx, lastTimestamp) { logger := p.Logger.WithField("request-id", event.RequestId) // update last timestamp in database newTime := time.Now().UTC() binTime, err := newTime.MarshalText() if err != nil { - return fmt.Errorf("failed to marshal last timestamp: %w", err) + return fmt.Errorf("failed to serialize last timestamp: %w", err) } err = p.handleEvent(event, false) @@ -277,7 +277,7 @@ func (p *Papi) Pull() error { continue } - if err := p.DBClient.SetConfigItem(PapiPullKey, string(binTime)); err != nil { + if err := p.DBClient.SetConfigItem(ctx, PapiPullKey, string(binTime)); err != nil { return fmt.Errorf("failed to update last timestamp: %w", err) } diff --git a/pkg/apiserver/papi_cmd.go b/pkg/apiserver/papi_cmd.go index a1137161698..78f5dc9b0fe 100644 --- a/pkg/apiserver/papi_cmd.go +++ b/pkg/apiserver/papi_cmd.go @@ -1,6 +1,7 @@ package apiserver import ( + "context" "encoding/json" "fmt" "time" @@ -42,6 +43,8 @@ type listUnsubscribe struct { } func DecisionCmd(message *Message, p *Papi, sync bool) error { + ctx := context.TODO() + switch message.Header.OperationCmd { case "delete": data, err := json.Marshal(message.Data) @@ -64,7 +67,7 @@ func DecisionCmd(message *Message, p *Papi, sync bool) error { filter := make(map[string][]string) filter["uuid"] = UUIDs - _, deletedDecisions, err := p.DBClient.ExpireDecisionsWithFilter(filter) + _, deletedDecisions, err := p.DBClient.ExpireDecisionsWithFilter(ctx, filter) if err != nil { return fmt.Errorf("unable to expire decisions %+v: %w", UUIDs, err) } @@ -94,6 +97,8 @@ func DecisionCmd(message *Message, p *Papi, sync bool) error { } func AlertCmd(message *Message, p *Papi, sync bool) error { + ctx := context.TODO() + switch message.Header.OperationCmd { case "add": data, err := json.Marshal(message.Data) @@ -152,7 +157,7 @@ func AlertCmd(message *Message, p *Papi, sync bool) error { } // use a different method: alert and/or decision might already be partially present in the database - _, err = p.DBClient.CreateOrUpdateAlert("", alert) + _, err = p.DBClient.CreateOrUpdateAlert(ctx, "", alert) if err != nil { log.Errorf("Failed to create alerts in DB: %s", err) } else { @@ -167,6 +172,8 @@ func AlertCmd(message *Message, p *Papi, sync bool) error { } func ManagementCmd(message *Message, p *Papi, sync bool) error { + ctx := context.TODO() + if sync { p.Logger.Infof("Ignoring management command from PAPI in sync mode") return nil @@ -194,7 +201,7 @@ func ManagementCmd(message *Message, p *Papi, sync bool) error { filter["origin"] = []string{types.ListOrigin} filter["scenario"] = []string{unsubscribeMsg.Name} - _, deletedDecisions, err := p.DBClient.ExpireDecisionsWithFilter(filter) + _, deletedDecisions, err := p.DBClient.ExpireDecisionsWithFilter(ctx, filter) if err != nil { return fmt.Errorf("unable to expire decisions for list %s : %w", unsubscribeMsg.Name, err) } @@ -215,17 +222,19 @@ func ManagementCmd(message *Message, p *Papi, sync bool) error { return fmt.Errorf("message for '%s' contains bad data format: %w", message.Header.OperationType, err) } + ctx := context.TODO() + if forcePullMsg.Blocklist == nil { p.Logger.Infof("Received force_pull command from PAPI, pulling community and 3rd-party blocklists") - err = p.apic.PullTop(true) + err = p.apic.PullTop(ctx, true) if err != nil { return fmt.Errorf("failed to force pull operation: %w", err) } } else { p.Logger.Infof("Received force_pull command from PAPI, pulling blocklist %s", forcePullMsg.Blocklist.Name) - err = p.apic.PullBlocklist(&modelscapi.BlocklistLink{ + err = p.apic.PullBlocklist(ctx, &modelscapi.BlocklistLink{ Name: &forcePullMsg.Blocklist.Name, URL: &forcePullMsg.Blocklist.Url, Remediation: &forcePullMsg.Blocklist.Remediation, diff --git a/pkg/apiserver/usage_metrics_test.go b/pkg/apiserver/usage_metrics_test.go index 41dd0ccdc2c..32aeb7d9a5a 100644 --- a/pkg/apiserver/usage_metrics_test.go +++ b/pkg/apiserver/usage_metrics_test.go @@ -13,6 +13,8 @@ import ( ) func TestLPMetrics(t *testing.T) { + ctx := context.Background() + tests := []struct { name string body string @@ -28,7 +30,7 @@ func TestLPMetrics(t *testing.T) { name: "empty metrics for LP", body: `{ }`, - expectedStatusCode: 400, + expectedStatusCode: http.StatusBadRequest, expectedResponse: "Missing log processor data", authType: PASSWORD, }, @@ -48,7 +50,7 @@ func TestLPMetrics(t *testing.T) { } ] }`, - expectedStatusCode: 201, + expectedStatusCode: http.StatusCreated, expectedMetricsCount: 1, expectedResponse: "", expectedOSName: "foo", @@ -72,7 +74,7 @@ func TestLPMetrics(t *testing.T) { } ] }`, - expectedStatusCode: 201, + expectedStatusCode: http.StatusCreated, expectedMetricsCount: 1, expectedResponse: "", expectedOSName: "foo", @@ -96,7 +98,7 @@ func TestLPMetrics(t *testing.T) { } ] }`, - expectedStatusCode: 400, + expectedStatusCode: http.StatusBadRequest, expectedResponse: "Missing remediation component data", authType: APIKEY, }, @@ -115,7 +117,7 @@ func TestLPMetrics(t *testing.T) { } ] }`, - expectedStatusCode: 201, + expectedStatusCode: http.StatusCreated, expectedResponse: "", expectedMetricsCount: 1, expectedFeatureFlags: "a,b,c", @@ -136,7 +138,7 @@ func TestLPMetrics(t *testing.T) { } ] }`, - expectedStatusCode: 422, + expectedStatusCode: http.StatusUnprocessableEntity, expectedResponse: "log_processors.0.datasources in body is required", authType: PASSWORD, }, @@ -155,7 +157,7 @@ func TestLPMetrics(t *testing.T) { } ] }`, - expectedStatusCode: 201, + expectedStatusCode: http.StatusCreated, expectedMetricsCount: 1, expectedOSName: "foo", expectedOSVersion: "42", @@ -177,7 +179,7 @@ func TestLPMetrics(t *testing.T) { } ] }`, - expectedStatusCode: 422, + expectedStatusCode: http.StatusUnprocessableEntity, expectedResponse: "log_processors.0.os.name in body is required", authType: PASSWORD, }, @@ -185,20 +187,20 @@ func TestLPMetrics(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - lapi := SetupLAPITest(t) + lapi := SetupLAPITest(t, ctx) - dbClient, err := database.NewClient(context.Background(), lapi.DBConfig) + dbClient, err := database.NewClient(ctx, lapi.DBConfig) if err != nil { t.Fatalf("unable to create database client: %s", err) } - w := lapi.RecordResponse(t, http.MethodPost, "/v1/usage-metrics", strings.NewReader(tt.body), tt.authType) + w := lapi.RecordResponse(t, ctx, http.MethodPost, "/v1/usage-metrics", strings.NewReader(tt.body), tt.authType) assert.Equal(t, tt.expectedStatusCode, w.Code) assert.Contains(t, w.Body.String(), tt.expectedResponse) - machine, _ := dbClient.QueryMachineByID("test") - metrics, _ := dbClient.GetLPUsageMetricsByMachineID("test") + machine, _ := dbClient.QueryMachineByID(ctx, "test") + metrics, _ := dbClient.GetLPUsageMetricsByMachineID(ctx, "test") assert.Len(t, metrics, tt.expectedMetricsCount) assert.Equal(t, tt.expectedOSName, machine.Osname) @@ -214,6 +216,8 @@ func TestLPMetrics(t *testing.T) { } func TestRCMetrics(t *testing.T) { + ctx := context.Background() + tests := []struct { name string body string @@ -229,7 +233,7 @@ func TestRCMetrics(t *testing.T) { name: "empty metrics for RC", body: `{ }`, - expectedStatusCode: 400, + expectedStatusCode: http.StatusBadRequest, expectedResponse: "Missing remediation component data", authType: APIKEY, }, @@ -247,7 +251,7 @@ func TestRCMetrics(t *testing.T) { } ] }`, - expectedStatusCode: 201, + expectedStatusCode: http.StatusCreated, expectedMetricsCount: 1, expectedResponse: "", expectedOSName: "foo", @@ -269,7 +273,7 @@ func TestRCMetrics(t *testing.T) { } ] }`, - expectedStatusCode: 201, + expectedStatusCode: http.StatusCreated, expectedMetricsCount: 1, expectedResponse: "", expectedOSName: "foo", @@ -291,7 +295,7 @@ func TestRCMetrics(t *testing.T) { } ] }`, - expectedStatusCode: 400, + expectedStatusCode: http.StatusBadRequest, expectedResponse: "Missing log processor data", authType: PASSWORD, }, @@ -308,7 +312,7 @@ func TestRCMetrics(t *testing.T) { } ] }`, - expectedStatusCode: 201, + expectedStatusCode: http.StatusCreated, expectedResponse: "", expectedMetricsCount: 1, expectedFeatureFlags: "a,b,c", @@ -327,7 +331,7 @@ func TestRCMetrics(t *testing.T) { } ] }`, - expectedStatusCode: 201, + expectedStatusCode: http.StatusCreated, expectedMetricsCount: 1, expectedOSName: "foo", expectedOSVersion: "42", @@ -347,7 +351,7 @@ func TestRCMetrics(t *testing.T) { } ] }`, - expectedStatusCode: 422, + expectedStatusCode: http.StatusUnprocessableEntity, expectedResponse: "remediation_components.0.os.name in body is required", authType: APIKEY, }, @@ -355,20 +359,20 @@ func TestRCMetrics(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - lapi := SetupLAPITest(t) + lapi := SetupLAPITest(t, ctx) - dbClient, err := database.NewClient(context.Background(), lapi.DBConfig) + dbClient, err := database.NewClient(ctx, lapi.DBConfig) if err != nil { t.Fatalf("unable to create database client: %s", err) } - w := lapi.RecordResponse(t, http.MethodPost, "/v1/usage-metrics", strings.NewReader(tt.body), tt.authType) + w := lapi.RecordResponse(t, ctx, http.MethodPost, "/v1/usage-metrics", strings.NewReader(tt.body), tt.authType) assert.Equal(t, tt.expectedStatusCode, w.Code) assert.Contains(t, w.Body.String(), tt.expectedResponse) - bouncer, _ := dbClient.SelectBouncerByName("test") - metrics, _ := dbClient.GetBouncerUsageMetricsByName("test") + bouncer, _ := dbClient.SelectBouncerByName(ctx, "test") + metrics, _ := dbClient.GetBouncerUsageMetricsByName(ctx, "test") assert.Len(t, metrics, tt.expectedMetricsCount) assert.Equal(t, tt.expectedOSName, bouncer.Osname) diff --git a/pkg/appsec/appsec.go b/pkg/appsec/appsec.go index 96f977b4738..553db205b5d 100644 --- a/pkg/appsec/appsec.go +++ b/pkg/appsec/appsec.go @@ -1,7 +1,6 @@ package appsec import ( - "errors" "fmt" "net/http" "os" @@ -40,7 +39,6 @@ const ( ) func (h *Hook) Build(hookStage int) error { - ctx := map[string]interface{}{} switch hookStage { case hookOnLoad: @@ -54,7 +52,7 @@ func (h *Hook) Build(hookStage int) error { } opts := exprhelpers.GetExprOptions(ctx) if h.Filter != "" { - program, err := expr.Compile(h.Filter, opts...) //FIXME: opts + program, err := expr.Compile(h.Filter, opts...) // FIXME: opts if err != nil { return fmt.Errorf("unable to compile filter %s : %w", h.Filter, err) } @@ -73,11 +71,11 @@ func (h *Hook) Build(hookStage int) error { type AppsecTempResponse struct { InBandInterrupt bool OutOfBandInterrupt bool - Action string //allow, deny, captcha, log - UserHTTPResponseCode int //The response code to send to the user - BouncerHTTPResponseCode int //The response code to send to the remediation component - SendEvent bool //do we send an internal event on rule match - SendAlert bool //do we send an alert on rule match + Action string // allow, deny, captcha, log + UserHTTPResponseCode int // The response code to send to the user + BouncerHTTPResponseCode int // The response code to send to the remediation component + SendEvent bool // do we send an internal event on rule match + SendAlert bool // do we send an alert on rule match } type AppsecSubEngineOpts struct { @@ -93,7 +91,7 @@ type AppsecRuntimeConfig struct { InBandRules []AppsecCollection DefaultRemediation string - RemediationByTag map[string]string //Also used for ByName, as the name (for modsec rules) is a tag crowdsec-NAME + RemediationByTag map[string]string // Also used for ByName, as the name (for modsec rules) is a tag crowdsec-NAME RemediationById map[int]string CompiledOnLoad []Hook CompiledPreEval []Hook @@ -101,22 +99,22 @@ type AppsecRuntimeConfig struct { CompiledOnMatch []Hook CompiledVariablesTracking []*regexp.Regexp Config *AppsecConfig - //CorazaLogger debuglog.Logger + // CorazaLogger debuglog.Logger - //those are ephemeral, created/destroyed with every req - OutOfBandTx ExtendedTransaction //is it a good idea ? - InBandTx ExtendedTransaction //is it a good idea ? + // those are ephemeral, created/destroyed with every req + OutOfBandTx ExtendedTransaction // is it a good idea ? + InBandTx ExtendedTransaction // is it a good idea ? Response AppsecTempResponse - //should we store matched rules here ? + // should we store matched rules here ? Logger *log.Entry - //Set by on_load to ignore some rules on loading + // Set by on_load to ignore some rules on loading DisabledInBandRuleIds []int - DisabledInBandRulesTags []string //Also used for ByName, as the name (for modsec rules) is a tag crowdsec-NAME + DisabledInBandRulesTags []string // Also used for ByName, as the name (for modsec rules) is a tag crowdsec-NAME DisabledOutOfBandRuleIds []int - DisabledOutOfBandRulesTags []string //Also used for ByName, as the name (for modsec rules) is a tag crowdsec-NAME + DisabledOutOfBandRulesTags []string // Also used for ByName, as the name (for modsec rules) is a tag crowdsec-NAME } type AppsecConfig struct { @@ -125,10 +123,10 @@ type AppsecConfig struct { InBandRules []string `yaml:"inband_rules"` DefaultRemediation string `yaml:"default_remediation"` DefaultPassAction string `yaml:"default_pass_action"` - BouncerBlockedHTTPCode int `yaml:"blocked_http_code"` //returned to the bouncer - BouncerPassedHTTPCode int `yaml:"passed_http_code"` //returned to the bouncer - UserBlockedHTTPCode int `yaml:"user_blocked_http_code"` //returned to the user - UserPassedHTTPCode int `yaml:"user_passed_http_code"` //returned to the user + BouncerBlockedHTTPCode int `yaml:"blocked_http_code"` // returned to the bouncer + BouncerPassedHTTPCode int `yaml:"passed_http_code"` // returned to the bouncer + UserBlockedHTTPCode int `yaml:"user_blocked_http_code"` // returned to the user + UserPassedHTTPCode int `yaml:"user_passed_http_code"` // returned to the user OnLoad []Hook `yaml:"on_load"` PreEval []Hook `yaml:"pre_eval"` @@ -151,45 +149,95 @@ func (w *AppsecRuntimeConfig) ClearResponse() { w.Response.SendAlert = true } -func (wc *AppsecConfig) LoadByPath(file string) error { +func (wc *AppsecConfig) SetUpLogger() { + if wc.LogLevel == nil { + lvl := wc.Logger.Logger.GetLevel() + wc.LogLevel = &lvl + } + /* wc.Name is actually the datasource name.*/ + wc.Logger = wc.Logger.Dup().WithField("name", wc.Name) + wc.Logger.Logger.SetLevel(*wc.LogLevel) + +} + +func (wc *AppsecConfig) LoadByPath(file string) error { wc.Logger.Debugf("loading config %s", file) yamlFile, err := os.ReadFile(file) if err != nil { return fmt.Errorf("unable to read file %s : %s", file, err) } - err = yaml.UnmarshalStrict(yamlFile, wc) + + //as LoadByPath can be called several time, we append rules/hooks, but override other options + var tmp AppsecConfig + + err = yaml.UnmarshalStrict(yamlFile, &tmp) if err != nil { return fmt.Errorf("unable to parse yaml file %s : %s", file, err) } - if wc.Name == "" { - return errors.New("name cannot be empty") + if wc.Name == "" && tmp.Name != "" { + wc.Name = tmp.Name } - if wc.LogLevel == nil { - lvl := wc.Logger.Logger.GetLevel() - wc.LogLevel = &lvl + + //We can append rules/hooks + if tmp.OutOfBandRules != nil { + wc.OutOfBandRules = append(wc.OutOfBandRules, tmp.OutOfBandRules...) } - wc.Logger = wc.Logger.Dup().WithField("name", wc.Name) - wc.Logger.Logger.SetLevel(*wc.LogLevel) + if tmp.InBandRules != nil { + wc.InBandRules = append(wc.InBandRules, tmp.InBandRules...) + } + if tmp.OnLoad != nil { + wc.OnLoad = append(wc.OnLoad, tmp.OnLoad...) + } + if tmp.PreEval != nil { + wc.PreEval = append(wc.PreEval, tmp.PreEval...) + } + if tmp.PostEval != nil { + wc.PostEval = append(wc.PostEval, tmp.PostEval...) + } + if tmp.OnMatch != nil { + wc.OnMatch = append(wc.OnMatch, tmp.OnMatch...) + } + if tmp.VariablesTracking != nil { + wc.VariablesTracking = append(wc.VariablesTracking, tmp.VariablesTracking...) + } + + //override other options + wc.LogLevel = tmp.LogLevel + + wc.DefaultRemediation = tmp.DefaultRemediation + wc.DefaultPassAction = tmp.DefaultPassAction + wc.BouncerBlockedHTTPCode = tmp.BouncerBlockedHTTPCode + wc.BouncerPassedHTTPCode = tmp.BouncerPassedHTTPCode + wc.UserBlockedHTTPCode = tmp.UserBlockedHTTPCode + wc.UserPassedHTTPCode = tmp.UserPassedHTTPCode + + if tmp.InbandOptions.DisableBodyInspection { + wc.InbandOptions.DisableBodyInspection = true + } + if tmp.InbandOptions.RequestBodyInMemoryLimit != nil { + wc.InbandOptions.RequestBodyInMemoryLimit = tmp.InbandOptions.RequestBodyInMemoryLimit + } + if tmp.OutOfBandOptions.DisableBodyInspection { + wc.OutOfBandOptions.DisableBodyInspection = true + } + if tmp.OutOfBandOptions.RequestBodyInMemoryLimit != nil { + wc.OutOfBandOptions.RequestBodyInMemoryLimit = tmp.OutOfBandOptions.RequestBodyInMemoryLimit + } + return nil } func (wc *AppsecConfig) Load(configName string) error { - appsecConfigs := hub.GetItemMap(cwhub.APPSEC_CONFIGS) + item := hub.GetItem(cwhub.APPSEC_CONFIGS, configName) - for _, hubAppsecConfigItem := range appsecConfigs { - if !hubAppsecConfigItem.State.Installed { - continue - } - if hubAppsecConfigItem.Name != configName { - continue - } - wc.Logger.Infof("loading %s", hubAppsecConfigItem.State.LocalPath) - err := wc.LoadByPath(hubAppsecConfigItem.State.LocalPath) + if item != nil && item.State.Installed { + wc.Logger.Infof("loading %s", item.State.LocalPath) + err := wc.LoadByPath(item.State.LocalPath) if err != nil { - return fmt.Errorf("unable to load appsec-config %s : %s", hubAppsecConfigItem.State.LocalPath, err) + return fmt.Errorf("unable to load appsec-config %s : %s", item.State.LocalPath, err) } return nil } @@ -224,10 +272,10 @@ func (wc *AppsecConfig) Build() (*AppsecRuntimeConfig, error) { wc.DefaultRemediation = BanRemediation } - //set the defaults + // set the defaults switch wc.DefaultRemediation { case BanRemediation, CaptchaRemediation, AllowRemediation: - //those are the officially supported remediation(s) + // those are the officially supported remediation(s) default: wc.Logger.Warningf("default '%s' remediation of %s is none of [%s,%s,%s] ensure bouncer compatbility!", wc.DefaultRemediation, wc.Name, BanRemediation, CaptchaRemediation, AllowRemediation) } @@ -237,7 +285,7 @@ func (wc *AppsecConfig) Build() (*AppsecRuntimeConfig, error) { ret.DefaultRemediation = wc.DefaultRemediation wc.Logger.Tracef("Loading config %+v", wc) - //load rules + // load rules for _, rule := range wc.OutOfBandRules { wc.Logger.Infof("loading outofband rule %s", rule) collections, err := LoadCollection(rule, wc.Logger.WithField("component", "appsec_collection_loader")) @@ -259,7 +307,7 @@ func (wc *AppsecConfig) Build() (*AppsecRuntimeConfig, error) { wc.Logger.Infof("Loaded %d inband rules", len(ret.InBandRules)) - //load hooks + // load hooks for _, hook := range wc.OnLoad { if hook.OnSuccess != "" && hook.OnSuccess != "continue" && hook.OnSuccess != "break" { return nil, fmt.Errorf("invalid 'on_success' for on_load hook : %s", hook.OnSuccess) @@ -304,7 +352,7 @@ func (wc *AppsecConfig) Build() (*AppsecRuntimeConfig, error) { ret.CompiledOnMatch = append(ret.CompiledOnMatch, hook) } - //variable tracking + // variable tracking for _, variable := range wc.VariablesTracking { compiledVariableRule, err := regexp.Compile(variable) if err != nil { @@ -460,7 +508,6 @@ func (w *AppsecRuntimeConfig) ProcessPostEvalRules(request *ParsedRequest) error // here means there is no filter or the filter matched for _, applyExpr := range rule.ApplyExpr { o, err := exprhelpers.Run(applyExpr, GetPostEvalEnv(w, request), w.Logger, w.Logger.Level >= log.DebugLevel) - if err != nil { w.Logger.Errorf("unable to apply appsec post_eval expr: %s", err) continue @@ -604,7 +651,7 @@ func (w *AppsecRuntimeConfig) SetActionByName(name string, action string) error } func (w *AppsecRuntimeConfig) SetAction(action string) error { - //log.Infof("setting to %s", action) + // log.Infof("setting to %s", action) w.Logger.Debugf("setting action to %s", action) w.Response.Action = action return nil @@ -628,7 +675,7 @@ func (w *AppsecRuntimeConfig) GenerateResponse(response AppsecTempResponse, logg if response.Action == AllowRemediation { resp.HTTPStatus = w.Config.UserPassedHTTPCode bouncerStatusCode = w.Config.BouncerPassedHTTPCode - } else { //ban, captcha and anything else + } else { // ban, captcha and anything else resp.HTTPStatus = response.UserHTTPResponseCode if resp.HTTPStatus == 0 { resp.HTTPStatus = w.Config.UserBlockedHTTPCode diff --git a/pkg/appsec/appsec_rules_collection.go b/pkg/appsec/appsec_rules_collection.go index 09c1670de70..d283f95cb19 100644 --- a/pkg/appsec/appsec_rules_collection.go +++ b/pkg/appsec/appsec_rules_collection.go @@ -29,11 +29,11 @@ type AppsecCollectionConfig struct { SecLangRules []string `yaml:"seclang_rules"` Rules []appsec_rule.CustomRule `yaml:"rules"` - Labels map[string]interface{} `yaml:"labels"` //Labels is K:V list aiming at providing context the overflow + Labels map[string]interface{} `yaml:"labels"` // Labels is K:V list aiming at providing context the overflow - Data interface{} `yaml:"data"` //Ignore it - hash string `yaml:"-"` - version string `yaml:"-"` + Data interface{} `yaml:"data"` // Ignore it + hash string + version string } type RulesDetails struct { @@ -108,7 +108,7 @@ func LoadCollection(pattern string, logger *log.Entry) ([]AppsecCollection, erro logger.Debugf("Adding rule %s", strRule) appsecCol.Rules = append(appsecCol.Rules, strRule) - //We only take the first id, as it's the one of the "main" rule + // We only take the first id, as it's the one of the "main" rule if _, ok := AppsecRulesDetails[int(rulesId[0])]; !ok { AppsecRulesDetails[int(rulesId[0])] = RulesDetails{ LogLevel: log.InfoLevel, diff --git a/pkg/appsec/loader.go b/pkg/appsec/loader.go index 56ec23e3671..c724010cec2 100644 --- a/pkg/appsec/loader.go +++ b/pkg/appsec/loader.go @@ -9,19 +9,15 @@ import ( "github.com/crowdsecurity/crowdsec/pkg/cwhub" ) -var appsecRules = make(map[string]AppsecCollectionConfig) //FIXME: would probably be better to have a struct for this +var appsecRules = make(map[string]AppsecCollectionConfig) // FIXME: would probably be better to have a struct for this -var hub *cwhub.Hub //FIXME: this is a temporary hack to make the hub available in the package +var hub *cwhub.Hub // FIXME: this is a temporary hack to make the hub available in the package func LoadAppsecRules(hubInstance *cwhub.Hub) error { hub = hubInstance appsecRules = make(map[string]AppsecCollectionConfig) - for _, hubAppsecRuleItem := range hub.GetItemMap(cwhub.APPSEC_RULES) { - if !hubAppsecRuleItem.State.Installed { - continue - } - + for _, hubAppsecRuleItem := range hub.GetInstalledByType(cwhub.APPSEC_RULES, false) { content, err := os.ReadFile(hubAppsecRuleItem.State.LocalPath) if err != nil { log.Warnf("unable to read file %s : %s", hubAppsecRuleItem.State.LocalPath, err) @@ -32,7 +28,7 @@ func LoadAppsecRules(hubInstance *cwhub.Hub) error { err = yaml.UnmarshalStrict(content, &rule) if err != nil { - log.Warnf("unable to unmarshal file %s : %s", hubAppsecRuleItem.State.LocalPath, err) + log.Warnf("unable to parse file %s : %s", hubAppsecRuleItem.State.LocalPath, err) continue } diff --git a/pkg/csconfig/api.go b/pkg/csconfig/api.go index 4a28b590e80..5f2f8f9248b 100644 --- a/pkg/csconfig/api.go +++ b/pkg/csconfig/api.go @@ -38,10 +38,17 @@ type ApiCredentialsCfg struct { CertPath string `yaml:"cert_path,omitempty"` } -/*global api config (for lapi->oapi)*/ +type CapiPullConfig struct { + Community *bool `yaml:"community,omitempty"` + Blocklists *bool `yaml:"blocklists,omitempty"` +} + +/*global api config (for lapi->capi)*/ type OnlineApiClientCfg struct { CredentialsFilePath string `yaml:"credentials_path,omitempty"` // credz will be edited by software, store in diff file Credentials *ApiCredentialsCfg `yaml:"-"` + PullConfig CapiPullConfig `yaml:"pull,omitempty"` + Sharing *bool `yaml:"sharing,omitempty"` } /*local api config (for crowdsec/cscli->lapi)*/ @@ -99,7 +106,7 @@ func (o *OnlineApiClientCfg) Load() error { err = dec.Decode(o.Credentials) if err != nil { if !errors.Is(err, io.EOF) { - return fmt.Errorf("failed unmarshaling api server credentials configuration file '%s': %w", o.CredentialsFilePath, err) + return fmt.Errorf("failed to parse api server credentials configuration file '%s': %w", o.CredentialsFilePath, err) } } @@ -134,7 +141,7 @@ func (l *LocalApiClientCfg) Load() error { err = dec.Decode(&l.Credentials) if err != nil { if !errors.Is(err, io.EOF) { - return fmt.Errorf("failed unmarshaling api client credential configuration file '%s': %w", l.CredentialsFilePath, err) + return fmt.Errorf("failed to parse api client credential configuration file '%s': %w", l.CredentialsFilePath, err) } } @@ -344,6 +351,21 @@ func (c *Config) LoadAPIServer(inCli bool) error { log.Printf("push and pull to Central API disabled") } + //Set default values for CAPI push/pull + if c.API.Server.OnlineClient != nil { + if c.API.Server.OnlineClient.PullConfig.Community == nil { + c.API.Server.OnlineClient.PullConfig.Community = ptr.Of(true) + } + + if c.API.Server.OnlineClient.PullConfig.Blocklists == nil { + c.API.Server.OnlineClient.PullConfig.Blocklists = ptr.Of(true) + } + + if c.API.Server.OnlineClient.Sharing == nil { + c.API.Server.OnlineClient.Sharing = ptr.Of(true) + } + } + if err := c.LoadDBConfig(inCli); err != nil { return err } diff --git a/pkg/csconfig/api_test.go b/pkg/csconfig/api_test.go index 96945202aa8..17802ba31dd 100644 --- a/pkg/csconfig/api_test.go +++ b/pkg/csconfig/api_test.go @@ -101,7 +101,7 @@ func TestLoadOnlineApiClientCfg(t *testing.T) { CredentialsFilePath: "./testdata/bad_lapi-secrets.yaml", }, expected: &ApiCredentialsCfg{}, - expectedErr: "failed unmarshaling api server credentials", + expectedErr: "failed to parse api server credentials", }, { name: "missing field configuration", @@ -212,6 +212,11 @@ func TestLoadAPIServer(t *testing.T) { Login: "test", Password: "testpassword", }, + Sharing: ptr.Of(true), + PullConfig: CapiPullConfig{ + Community: ptr.Of(true), + Blocklists: ptr.Of(true), + }, }, Profiles: tmpLAPI.Profiles, ProfilesPath: "./testdata/profiles.yaml", diff --git a/pkg/csconfig/config_paths.go b/pkg/csconfig/config_paths.go index 7675b90d7dd..a8d39a664f3 100644 --- a/pkg/csconfig/config_paths.go +++ b/pkg/csconfig/config_paths.go @@ -10,7 +10,7 @@ type ConfigurationPaths struct { ConfigDir string `yaml:"config_dir"` DataDir string `yaml:"data_dir,omitempty"` SimulationFilePath string `yaml:"simulation_path,omitempty"` - HubIndexFile string `yaml:"index_path,omitempty"` //path of the .index.json + HubIndexFile string `yaml:"index_path,omitempty"` // path of the .index.json HubDir string `yaml:"hub_dir,omitempty"` PluginDir string `yaml:"plugin_dir,omitempty"` NotificationDir string `yaml:"notification_dir,omitempty"` @@ -28,18 +28,18 @@ func (c *Config) loadConfigurationPaths() error { } if c.ConfigPaths.HubDir == "" { - c.ConfigPaths.HubDir = filepath.Clean(c.ConfigPaths.ConfigDir + "/hub") + c.ConfigPaths.HubDir = filepath.Join(c.ConfigPaths.ConfigDir, "hub") } if c.ConfigPaths.HubIndexFile == "" { - c.ConfigPaths.HubIndexFile = filepath.Clean(c.ConfigPaths.HubDir + "/.index.json") + c.ConfigPaths.HubIndexFile = filepath.Join(c.ConfigPaths.HubDir, ".index.json") } if c.ConfigPaths.PatternDir == "" { - c.ConfigPaths.PatternDir = filepath.Join(c.ConfigPaths.ConfigDir, "patterns/") + c.ConfigPaths.PatternDir = filepath.Join(c.ConfigPaths.ConfigDir, "patterns") } - var configPathsCleanup = []*string{ + configPathsCleanup := []*string{ &c.ConfigPaths.HubDir, &c.ConfigPaths.HubIndexFile, &c.ConfigPaths.ConfigDir, diff --git a/pkg/csconfig/config_test.go b/pkg/csconfig/config_test.go index 11f1f0cf68d..b69954de178 100644 --- a/pkg/csconfig/config_test.go +++ b/pkg/csconfig/config_test.go @@ -42,5 +42,5 @@ func TestNewCrowdSecConfig(t *testing.T) { func TestDefaultConfig(t *testing.T) { x := NewDefaultConfig() _, err := yaml.Marshal(x) - require.NoError(t, err, "failed marshaling config: %s", err) + require.NoError(t, err, "failed to serialize config: %s", err) } diff --git a/pkg/csconfig/console.go b/pkg/csconfig/console.go index 4c14f5f7d49..21ecbf3d736 100644 --- a/pkg/csconfig/console.go +++ b/pkg/csconfig/console.go @@ -95,7 +95,7 @@ func (c *LocalApiServerCfg) LoadConsoleConfig() error { err = yaml.Unmarshal(yamlFile, c.ConsoleConfig) if err != nil { - return fmt.Errorf("unmarshaling console config file '%s': %w", c.ConsoleConfigPath, err) + return fmt.Errorf("parsing console config file '%s': %w", c.ConsoleConfigPath, err) } if c.ConsoleConfig.ShareCustomScenarios == nil { diff --git a/pkg/csconfig/crowdsec_service.go b/pkg/csconfig/crowdsec_service.go index 7820595b46f..cf796805dee 100644 --- a/pkg/csconfig/crowdsec_service.go +++ b/pkg/csconfig/crowdsec_service.go @@ -143,14 +143,14 @@ func (c *CrowdsecServiceCfg) DumpContextConfigFile() error { // XXX: MakeDirs out, err := yaml.Marshal(c.ContextToSend) if err != nil { - return fmt.Errorf("while marshaling ConsoleConfig (for %s): %w", c.ConsoleContextPath, err) + return fmt.Errorf("while serializing ConsoleConfig (for %s): %w", c.ConsoleContextPath, err) } - if err = os.MkdirAll(filepath.Dir(c.ConsoleContextPath), 0700); err != nil { + if err = os.MkdirAll(filepath.Dir(c.ConsoleContextPath), 0o700); err != nil { return fmt.Errorf("while creating directories for %s: %w", c.ConsoleContextPath, err) } - if err := os.WriteFile(c.ConsoleContextPath, out, 0600); err != nil { + if err := os.WriteFile(c.ConsoleContextPath, out, 0o600); err != nil { return fmt.Errorf("while dumping console config to %s: %w", c.ConsoleContextPath, err) } diff --git a/pkg/csconfig/simulation.go b/pkg/csconfig/simulation.go index 947b47e3c1e..c9041df464a 100644 --- a/pkg/csconfig/simulation.go +++ b/pkg/csconfig/simulation.go @@ -37,7 +37,7 @@ func (c *Config) LoadSimulation() error { simCfg := SimulationConfig{} if c.ConfigPaths.SimulationFilePath == "" { - c.ConfigPaths.SimulationFilePath = filepath.Clean(c.ConfigPaths.ConfigDir + "/simulation.yaml") + c.ConfigPaths.SimulationFilePath = filepath.Join(c.ConfigPaths.ConfigDir, "simulation.yaml") } patcher := yamlpatch.NewPatcher(c.ConfigPaths.SimulationFilePath, ".local") @@ -52,7 +52,7 @@ func (c *Config) LoadSimulation() error { if err := dec.Decode(&simCfg); err != nil { if !errors.Is(err, io.EOF) { - return fmt.Errorf("while unmarshaling simulation file '%s': %w", c.ConfigPaths.SimulationFilePath, err) + return fmt.Errorf("while parsing simulation file '%s': %w", c.ConfigPaths.SimulationFilePath, err) } } diff --git a/pkg/csconfig/simulation_test.go b/pkg/csconfig/simulation_test.go index a678d7edd49..a1e5f0a5b02 100644 --- a/pkg/csconfig/simulation_test.go +++ b/pkg/csconfig/simulation_test.go @@ -60,7 +60,7 @@ func TestSimulationLoading(t *testing.T) { }, Crowdsec: &CrowdsecServiceCfg{}, }, - expectedErr: "while unmarshaling simulation file './testdata/config.yaml': yaml: unmarshal errors", + expectedErr: "while parsing simulation file './testdata/config.yaml': yaml: unmarshal errors", }, { name: "basic bad file content", @@ -71,7 +71,7 @@ func TestSimulationLoading(t *testing.T) { }, Crowdsec: &CrowdsecServiceCfg{}, }, - expectedErr: "while unmarshaling simulation file './testdata/config.yaml': yaml: unmarshal errors", + expectedErr: "while parsing simulation file './testdata/config.yaml': yaml: unmarshal errors", }, } diff --git a/pkg/csplugin/broker.go b/pkg/csplugin/broker.go index f6629b2609e..e996fa9b68c 100644 --- a/pkg/csplugin/broker.go +++ b/pkg/csplugin/broker.go @@ -45,7 +45,7 @@ type PluginBroker struct { pluginConfigByName map[string]PluginConfig pluginMap map[string]plugin.Plugin notificationConfigsByPluginType map[string][][]byte // "slack" -> []{config1, config2} - notificationPluginByName map[string]Notifier + notificationPluginByName map[string]protobufs.NotifierServer watcher PluginWatcher pluginKillMethods []func() pluginProcConfig *csconfig.PluginCfg @@ -72,10 +72,10 @@ type ProfileAlert struct { Alert *models.Alert } -func (pb *PluginBroker) Init(pluginCfg *csconfig.PluginCfg, profileConfigs []*csconfig.ProfileCfg, configPaths *csconfig.ConfigurationPaths) error { +func (pb *PluginBroker) Init(ctx context.Context, pluginCfg *csconfig.PluginCfg, profileConfigs []*csconfig.ProfileCfg, configPaths *csconfig.ConfigurationPaths) error { pb.PluginChannel = make(chan ProfileAlert) pb.notificationConfigsByPluginType = make(map[string][][]byte) - pb.notificationPluginByName = make(map[string]Notifier) + pb.notificationPluginByName = make(map[string]protobufs.NotifierServer) pb.pluginMap = make(map[string]plugin.Plugin) pb.pluginConfigByName = make(map[string]PluginConfig) pb.alertsByPluginName = make(map[string][]*models.Alert) @@ -85,7 +85,7 @@ func (pb *PluginBroker) Init(pluginCfg *csconfig.PluginCfg, profileConfigs []*cs if err := pb.loadConfig(configPaths.NotificationDir); err != nil { return fmt.Errorf("while loading plugin config: %w", err) } - if err := pb.loadPlugins(configPaths.PluginDir); err != nil { + if err := pb.loadPlugins(ctx, configPaths.PluginDir); err != nil { return fmt.Errorf("while loading plugin: %w", err) } pb.watcher = PluginWatcher{} @@ -230,7 +230,7 @@ func (pb *PluginBroker) verifyPluginBinaryWithProfile() error { return nil } -func (pb *PluginBroker) loadPlugins(path string) error { +func (pb *PluginBroker) loadPlugins(ctx context.Context, path string) error { binaryPaths, err := listFilesAtPath(path) if err != nil { return err @@ -265,7 +265,7 @@ func (pb *PluginBroker) loadPlugins(path string) error { return err } data = []byte(csstring.StrictExpand(string(data), os.LookupEnv)) - _, err = pluginClient.Configure(context.Background(), &protobufs.Config{Config: data}) + _, err = pluginClient.Configure(ctx, &protobufs.Config{Config: data}) if err != nil { return fmt.Errorf("while configuring %s: %w", pc.Name, err) } @@ -276,7 +276,7 @@ func (pb *PluginBroker) loadPlugins(path string) error { return pb.verifyPluginBinaryWithProfile() } -func (pb *PluginBroker) loadNotificationPlugin(name string, binaryPath string) (Notifier, error) { +func (pb *PluginBroker) loadNotificationPlugin(name string, binaryPath string) (protobufs.NotifierServer, error) { handshake, err := getHandshake() if err != nil { @@ -313,7 +313,7 @@ func (pb *PluginBroker) loadNotificationPlugin(name string, binaryPath string) ( return nil, err } pb.pluginKillMethods = append(pb.pluginKillMethods, c.Kill) - return raw.(Notifier), nil + return raw.(protobufs.NotifierServer), nil } func (pb *PluginBroker) pushNotificationsToPlugin(pluginName string, alerts []*models.Alert) error { diff --git a/pkg/csplugin/broker_suite_test.go b/pkg/csplugin/broker_suite_test.go index 778bb2dfe2e..1210c67058a 100644 --- a/pkg/csplugin/broker_suite_test.go +++ b/pkg/csplugin/broker_suite_test.go @@ -1,6 +1,7 @@ package csplugin import ( + "context" "io" "os" "os/exec" @@ -96,6 +97,7 @@ func (s *PluginSuite) TearDownTest() { func (s *PluginSuite) SetupSubTest() { var err error + t := s.T() s.runDir, err = os.MkdirTemp("", "cs_plugin_test") @@ -127,6 +129,7 @@ func (s *PluginSuite) SetupSubTest() { func (s *PluginSuite) TearDownSubTest() { t := s.T() + if s.pluginBroker != nil { s.pluginBroker.Kill() s.pluginBroker = nil @@ -140,19 +143,24 @@ func (s *PluginSuite) TearDownSubTest() { os.Remove("./out") } -func (s *PluginSuite) InitBroker(procCfg *csconfig.PluginCfg) (*PluginBroker, error) { +func (s *PluginSuite) InitBroker(ctx context.Context, procCfg *csconfig.PluginCfg) (*PluginBroker, error) { pb := PluginBroker{} + if procCfg == nil { procCfg = &csconfig.PluginCfg{} } + profiles := csconfig.NewDefaultConfig().API.Server.Profiles profiles = append(profiles, &csconfig.ProfileCfg{ Notifications: []string{"dummy_default"}, }) - err := pb.Init(procCfg, profiles, &csconfig.ConfigurationPaths{ + + err := pb.Init(ctx, procCfg, profiles, &csconfig.ConfigurationPaths{ PluginDir: s.pluginDir, NotificationDir: s.notifDir, }) + s.pluginBroker = &pb + return s.pluginBroker, err } diff --git a/pkg/csplugin/broker_test.go b/pkg/csplugin/broker_test.go index f2179acb2c1..ae5a615b489 100644 --- a/pkg/csplugin/broker_test.go +++ b/pkg/csplugin/broker_test.go @@ -4,6 +4,7 @@ package csplugin import ( "bytes" + "context" "encoding/json" "io" "os" @@ -38,7 +39,7 @@ func (s *PluginSuite) readconfig() PluginConfig { require.NoError(t, err, "unable to read config file %s", s.pluginConfig) err = yaml.Unmarshal(orig, &config) - require.NoError(t, err, "unable to unmarshal config file") + require.NoError(t, err, "unable to parse config file") return config } @@ -46,13 +47,14 @@ func (s *PluginSuite) readconfig() PluginConfig { func (s *PluginSuite) writeconfig(config PluginConfig) { t := s.T() data, err := yaml.Marshal(&config) - require.NoError(t, err, "unable to marshal config file") + require.NoError(t, err, "unable to serialize config file") err = os.WriteFile(s.pluginConfig, data, 0o644) require.NoError(t, err, "unable to write config file %s", s.pluginConfig) } func (s *PluginSuite) TestBrokerInit() { + ctx := context.Background() tests := []struct { name string action func(*testing.T) @@ -135,20 +137,22 @@ func (s *PluginSuite) TestBrokerInit() { tc.action(t) } - _, err := s.InitBroker(&tc.procCfg) + _, err := s.InitBroker(ctx, &tc.procCfg) cstest.RequireErrorContains(t, err, tc.expectedErr) }) } } func (s *PluginSuite) TestBrokerNoThreshold() { + ctx := context.Background() + var alerts []models.Alert DefaultEmptyTicker = 50 * time.Millisecond t := s.T() - pb, err := s.InitBroker(nil) + pb, err := s.InitBroker(ctx, nil) require.NoError(t, err) tomb := tomb.Tomb{} @@ -187,6 +191,8 @@ func (s *PluginSuite) TestBrokerNoThreshold() { } func (s *PluginSuite) TestBrokerRunGroupAndTimeThreshold_TimeFirst() { + ctx := context.Background() + // test grouping by "time" DefaultEmptyTicker = 50 * time.Millisecond @@ -198,7 +204,7 @@ func (s *PluginSuite) TestBrokerRunGroupAndTimeThreshold_TimeFirst() { cfg.GroupWait = 1 * time.Second s.writeconfig(cfg) - pb, err := s.InitBroker(nil) + pb, err := s.InitBroker(ctx, nil) require.NoError(t, err) tomb := tomb.Tomb{} @@ -224,6 +230,7 @@ func (s *PluginSuite) TestBrokerRunGroupAndTimeThreshold_TimeFirst() { } func (s *PluginSuite) TestBrokerRunGroupAndTimeThreshold_CountFirst() { + ctx := context.Background() DefaultEmptyTicker = 50 * time.Millisecond t := s.T() @@ -234,7 +241,7 @@ func (s *PluginSuite) TestBrokerRunGroupAndTimeThreshold_CountFirst() { cfg.GroupWait = 4 * time.Second s.writeconfig(cfg) - pb, err := s.InitBroker(nil) + pb, err := s.InitBroker(ctx, nil) require.NoError(t, err) tomb := tomb.Tomb{} @@ -264,6 +271,7 @@ func (s *PluginSuite) TestBrokerRunGroupAndTimeThreshold_CountFirst() { } func (s *PluginSuite) TestBrokerRunGroupThreshold() { + ctx := context.Background() // test grouping by "size" DefaultEmptyTicker = 50 * time.Millisecond @@ -274,7 +282,7 @@ func (s *PluginSuite) TestBrokerRunGroupThreshold() { cfg.GroupThreshold = 4 s.writeconfig(cfg) - pb, err := s.InitBroker(nil) + pb, err := s.InitBroker(ctx, nil) require.NoError(t, err) tomb := tomb.Tomb{} @@ -318,6 +326,7 @@ func (s *PluginSuite) TestBrokerRunGroupThreshold() { } func (s *PluginSuite) TestBrokerRunTimeThreshold() { + ctx := context.Background() DefaultEmptyTicker = 50 * time.Millisecond t := s.T() @@ -327,7 +336,7 @@ func (s *PluginSuite) TestBrokerRunTimeThreshold() { cfg.GroupWait = 1 * time.Second s.writeconfig(cfg) - pb, err := s.InitBroker(nil) + pb, err := s.InitBroker(ctx, nil) require.NoError(t, err) tomb := tomb.Tomb{} @@ -353,11 +362,12 @@ func (s *PluginSuite) TestBrokerRunTimeThreshold() { } func (s *PluginSuite) TestBrokerRunSimple() { + ctx := context.Background() DefaultEmptyTicker = 50 * time.Millisecond t := s.T() - pb, err := s.InitBroker(nil) + pb, err := s.InitBroker(ctx, nil) require.NoError(t, err) tomb := tomb.Tomb{} diff --git a/pkg/csplugin/broker_win_test.go b/pkg/csplugin/broker_win_test.go index 97a3ad33deb..570f23e5015 100644 --- a/pkg/csplugin/broker_win_test.go +++ b/pkg/csplugin/broker_win_test.go @@ -4,6 +4,7 @@ package csplugin import ( "bytes" + "context" "encoding/json" "io" "os" @@ -26,6 +27,7 @@ not if it will actually reject plugins with invalid permissions */ func (s *PluginSuite) TestBrokerInit() { + ctx := context.Background() tests := []struct { name string action func(*testing.T) @@ -54,22 +56,22 @@ func (s *PluginSuite) TestBrokerInit() { } for _, tc := range tests { - tc := tc s.Run(tc.name, func() { t := s.T() if tc.action != nil { tc.action(t) } - _, err := s.InitBroker(&tc.procCfg) + _, err := s.InitBroker(ctx, &tc.procCfg) cstest.RequireErrorContains(t, err, tc.expectedErr) }) } } func (s *PluginSuite) TestBrokerRun() { + ctx := context.Background() t := s.T() - pb, err := s.InitBroker(nil) + pb, err := s.InitBroker(ctx, nil) require.NoError(t, err) tomb := tomb.Tomb{} diff --git a/pkg/csplugin/listfiles_test.go b/pkg/csplugin/listfiles_test.go index a4188804149..c476d7a4e4a 100644 --- a/pkg/csplugin/listfiles_test.go +++ b/pkg/csplugin/listfiles_test.go @@ -21,7 +21,7 @@ func TestListFilesAtPath(t *testing.T) { require.NoError(t, err) _, err = os.Create(filepath.Join(dir, "slack")) require.NoError(t, err) - err = os.Mkdir(filepath.Join(dir, "somedir"), 0755) + err = os.Mkdir(filepath.Join(dir, "somedir"), 0o755) require.NoError(t, err) _, err = os.Create(filepath.Join(dir, "somedir", "inner")) require.NoError(t, err) diff --git a/pkg/csplugin/notifier.go b/pkg/csplugin/notifier.go index 2b5d57fbcff..615322ac0c3 100644 --- a/pkg/csplugin/notifier.go +++ b/pkg/csplugin/notifier.go @@ -10,17 +10,15 @@ import ( "github.com/crowdsecurity/crowdsec/pkg/protobufs" ) -type Notifier interface { - Notify(ctx context.Context, notification *protobufs.Notification) (*protobufs.Empty, error) - Configure(ctx context.Context, cfg *protobufs.Config) (*protobufs.Empty, error) -} - type NotifierPlugin struct { plugin.Plugin - Impl Notifier + Impl protobufs.NotifierServer } -type GRPCClient struct{ client protobufs.NotifierClient } +type GRPCClient struct{ + protobufs.UnimplementedNotifierServer + client protobufs.NotifierClient +} func (m *GRPCClient) Notify(ctx context.Context, notification *protobufs.Notification) (*protobufs.Empty, error) { done := make(chan error) @@ -40,14 +38,12 @@ func (m *GRPCClient) Notify(ctx context.Context, notification *protobufs.Notific } func (m *GRPCClient) Configure(ctx context.Context, config *protobufs.Config) (*protobufs.Empty, error) { - _, err := m.client.Configure( - context.Background(), config, - ) + _, err := m.client.Configure(ctx, config) return &protobufs.Empty{}, err } type GRPCServer struct { - Impl Notifier + Impl protobufs.NotifierServer } func (p *NotifierPlugin) GRPCServer(broker *plugin.GRPCBroker, s *grpc.Server) error { diff --git a/pkg/csplugin/utils.go b/pkg/csplugin/utils.go index 2e7f0c80528..571d78add56 100644 --- a/pkg/csplugin/utils.go +++ b/pkg/csplugin/utils.go @@ -123,10 +123,10 @@ func pluginIsValid(path string) error { mode := details.Mode() perm := uint32(mode) - if (perm & 00002) != 0 { + if (perm & 0o0002) != 0 { return fmt.Errorf("plugin at %s is world writable, world writable plugins are invalid", path) } - if (perm & 00020) != 0 { + if (perm & 0o0020) != 0 { return fmt.Errorf("plugin at %s is group writable, group writable plugins are invalid", path) } if (mode & os.ModeSetgid) != 0 { diff --git a/pkg/csplugin/utils_windows.go b/pkg/csplugin/utils_windows.go index 8d4956ceeeb..91002079398 100644 --- a/pkg/csplugin/utils_windows.go +++ b/pkg/csplugin/utils_windows.go @@ -116,7 +116,7 @@ func CheckPerms(path string) error { */ aceCount := rs.Field(3).Uint() - for i := uint64(0); i < aceCount; i++ { + for i := range aceCount { ace := &AccessAllowedAce{} ret, _, _ := procGetAce.Call(uintptr(unsafe.Pointer(dacl)), uintptr(i), uintptr(unsafe.Pointer(&ace))) if ret == 0 { diff --git a/pkg/csplugin/utils_windows_test.go b/pkg/csplugin/utils_windows_test.go index 6a76e1215e5..1eb4dfb9033 100644 --- a/pkg/csplugin/utils_windows_test.go +++ b/pkg/csplugin/utils_windows_test.go @@ -37,7 +37,6 @@ func TestGetPluginNameAndTypeFromPath(t *testing.T) { }, } for _, tc := range tests { - tc := tc t.Run(tc.name, func(t *testing.T) { got, got1, err := getPluginTypeAndSubtypeFromPath(tc.path) cstest.RequireErrorContains(t, err, tc.expectedErr) diff --git a/pkg/csplugin/watcher_test.go b/pkg/csplugin/watcher_test.go index b76c3c4eadd..84e63ec6493 100644 --- a/pkg/csplugin/watcher_test.go +++ b/pkg/csplugin/watcher_test.go @@ -15,11 +15,10 @@ import ( "github.com/crowdsecurity/crowdsec/pkg/models" ) -var ctx = context.Background() - func resetTestTomb(testTomb *tomb.Tomb, pw *PluginWatcher) { testTomb.Kill(nil) <-pw.PluginEvents + if err := testTomb.Wait(); err != nil { log.Fatal(err) } @@ -46,13 +45,17 @@ func listenChannelWithTimeout(ctx context.Context, channel chan string) error { case <-ctx.Done(): return ctx.Err() } + return nil } func TestPluginWatcherInterval(t *testing.T) { + ctx := context.Background() + if runtime.GOOS == "windows" { t.Skip("Skipping test on windows because timing is not reliable") } + pw := PluginWatcher{} alertsByPluginName := make(map[string][]*models.Alert) testTomb := tomb.Tomb{} @@ -66,6 +69,7 @@ func TestPluginWatcherInterval(t *testing.T) { ct, cancel := context.WithTimeout(ctx, time.Microsecond) defer cancel() + err := listenChannelWithTimeout(ct, pw.PluginEvents) cstest.RequireErrorContains(t, err, "context deadline exceeded") resetTestTomb(&testTomb, &pw) @@ -74,6 +78,7 @@ func TestPluginWatcherInterval(t *testing.T) { ct, cancel = context.WithTimeout(ctx, time.Millisecond*5) defer cancel() + err = listenChannelWithTimeout(ct, pw.PluginEvents) require.NoError(t, err) resetTestTomb(&testTomb, &pw) @@ -81,9 +86,12 @@ func TestPluginWatcherInterval(t *testing.T) { } func TestPluginAlertCountWatcher(t *testing.T) { + ctx := context.Background() + if runtime.GOOS == "windows" { t.Skip("Skipping test on windows because timing is not reliable") } + pw := PluginWatcher{} alertsByPluginName := make(map[string][]*models.Alert) configs := map[string]PluginConfig{ @@ -92,28 +100,34 @@ func TestPluginAlertCountWatcher(t *testing.T) { }, } testTomb := tomb.Tomb{} + pw.Init(configs, alertsByPluginName) pw.Start(&testTomb) // Channel won't contain any events since threshold is not crossed. ct, cancel := context.WithTimeout(ctx, time.Second) defer cancel() + err := listenChannelWithTimeout(ct, pw.PluginEvents) cstest.RequireErrorContains(t, err, "context deadline exceeded") // Channel won't contain any events since threshold is not crossed. resetWatcherAlertCounter(&pw) insertNAlertsToPlugin(&pw, 4, "testPlugin") + ct, cancel = context.WithTimeout(ctx, time.Second) defer cancel() + err = listenChannelWithTimeout(ct, pw.PluginEvents) cstest.RequireErrorContains(t, err, "context deadline exceeded") // Channel will contain an event since threshold is crossed. resetWatcherAlertCounter(&pw) insertNAlertsToPlugin(&pw, 5, "testPlugin") + ct, cancel = context.WithTimeout(ctx, time.Second) defer cancel() + err = listenChannelWithTimeout(ct, pw.PluginEvents) require.NoError(t, err) resetTestTomb(&testTomb, &pw) diff --git a/pkg/cticlient/client.go b/pkg/cticlient/client.go index b817121e222..90112d80abf 100644 --- a/pkg/cticlient/client.go +++ b/pkg/cticlient/client.go @@ -8,8 +8,9 @@ import ( "net/http" "strings" - "github.com/crowdsecurity/crowdsec/pkg/cwversion" log "github.com/sirupsen/logrus" + + "github.com/crowdsecurity/crowdsec/pkg/apiclient/useragent" ) const ( @@ -46,7 +47,7 @@ func (c *CrowdsecCTIClient) doRequest(method string, endpoint string, params map } req.Header.Set("X-Api-Key", c.apiKey) - req.Header.Set("User-Agent", cwversion.UserAgent()) + req.Header.Set("User-Agent", useragent.Default()) resp, err := c.httpClient.Do(req) if err != nil { diff --git a/pkg/cwhub/cwhub.go b/pkg/cwhub/cwhub.go index 0a9cc443ce0..683f1853b43 100644 --- a/pkg/cwhub/cwhub.go +++ b/pkg/cwhub/cwhub.go @@ -4,11 +4,10 @@ import ( "fmt" "net/http" "path/filepath" - "sort" "strings" "time" - "github.com/crowdsecurity/crowdsec/pkg/cwversion" + "github.com/crowdsecurity/crowdsec/pkg/apiclient/useragent" ) // hubTransport wraps a Transport to set a custom User-Agent. @@ -17,7 +16,7 @@ type hubTransport struct { } func (t *hubTransport) RoundTrip(req *http.Request) (*http.Response, error) { - req.Header.Set("User-Agent", cwversion.UserAgent()) + req.Header.Set("User-Agent", useragent.Default()) return t.RoundTripper.RoundTrip(req) } @@ -45,10 +44,3 @@ func safePath(dir, filePath string) (string, error) { return absFilePath, nil } - -// SortItemSlice sorts a slice of items by name, case insensitive. -func SortItemSlice(items []*Item) { - sort.Slice(items, func(i, j int) bool { - return strings.ToLower(items[i].Name) < strings.ToLower(items[j].Name) - }) -} diff --git a/pkg/cwhub/cwhub_test.go b/pkg/cwhub/cwhub_test.go index a4641483622..17e7a0dc723 100644 --- a/pkg/cwhub/cwhub_test.go +++ b/pkg/cwhub/cwhub_test.go @@ -146,7 +146,7 @@ func setResponseByPath() { "/crowdsecurity/master/parsers/s01-parse/crowdsecurity/foobar_parser.yaml": fileToStringX("./testdata/foobar_parser.yaml"), "/crowdsecurity/master/parsers/s01-parse/crowdsecurity/foobar_subparser.yaml": fileToStringX("./testdata/foobar_parser.yaml"), "/crowdsecurity/master/collections/crowdsecurity/test_collection.yaml": fileToStringX("./testdata/collection_v1.yaml"), - "/crowdsecurity/master/.index.json": fileToStringX("./testdata/index1.json"), + "/crowdsecurity/master/.index.json": fileToStringX("./testdata/index1.json"), "/crowdsecurity/master/scenarios/crowdsecurity/foobar_scenario.yaml": `filter: true name: crowdsecurity/foobar_scenario`, "/crowdsecurity/master/scenarios/crowdsecurity/barfoo_scenario.yaml": `filter: true diff --git a/pkg/cwhub/doc.go b/pkg/cwhub/doc.go index 89d8de0fa8b..f86b95c6454 100644 --- a/pkg/cwhub/doc.go +++ b/pkg/cwhub/doc.go @@ -74,7 +74,7 @@ // Now you can use the hub object to access the existing items: // // // list all the parsers -// for _, parser := range hub.GetItemMap(cwhub.PARSERS) { +// for _, parser := range hub.GetItemsByType(cwhub.PARSERS, false) { // fmt.Printf("parser: %s\n", parser.Name) // } // diff --git a/pkg/cwhub/errors.go b/pkg/cwhub/errors.go index f1e779b5476..b0be444fcba 100644 --- a/pkg/cwhub/errors.go +++ b/pkg/cwhub/errors.go @@ -5,10 +5,8 @@ import ( "fmt" ) -var ( - // ErrNilRemoteHub is returned when trying to download with a local-only configuration. - ErrNilRemoteHub = errors.New("remote hub configuration is not provided. Please report this issue to the developers") -) +// ErrNilRemoteHub is returned when trying to download with a local-only configuration. +var ErrNilRemoteHub = errors.New("remote hub configuration is not provided. Please report this issue to the developers") // IndexNotFoundError is returned when the remote hub index is not found. type IndexNotFoundError struct { diff --git a/pkg/cwhub/hub.go b/pkg/cwhub/hub.go index 1293d6fa235..f74a794a512 100644 --- a/pkg/cwhub/hub.go +++ b/pkg/cwhub/hub.go @@ -8,11 +8,12 @@ import ( "io" "os" "path" - "slices" "strings" "github.com/sirupsen/logrus" + "github.com/crowdsecurity/go-cs-lib/maptools" + "github.com/crowdsecurity/crowdsec/pkg/csconfig" ) @@ -78,7 +79,7 @@ func (h *Hub) parseIndex() error { } if err := json.Unmarshal(bidx, &h.items); err != nil { - return fmt.Errorf("failed to unmarshal index: %w", err) + return fmt.Errorf("failed to parse index: %w", err) } h.logger.Debugf("%d item types in hub index", len(ItemTypes)) @@ -117,13 +118,14 @@ func (h *Hub) ItemStats() []string { tainted := 0 for _, itemType := range ItemTypes { - if len(h.GetItemMap(itemType)) == 0 { + items := h.GetItemsByType(itemType, false) + if len(items) == 0 { continue } - loaded += fmt.Sprintf("%d %s, ", len(h.GetItemMap(itemType)), itemType) + loaded += fmt.Sprintf("%d %s, ", len(items), itemType) - for _, item := range h.GetItemMap(itemType) { + for _, item := range items { if item.State.IsLocal() { local++ } @@ -153,7 +155,7 @@ func (h *Hub) ItemStats() []string { // Update downloads the latest version of the index and writes it to disk if it changed. It cannot be called after Load() // unless the hub is completely empty. func (h *Hub) Update(ctx context.Context) error { - if h.pathIndex != nil && len(h.pathIndex) > 0 { + if len(h.pathIndex) > 0 { // if this happens, it's a bug. return errors.New("cannot update hub after items have been loaded") } @@ -218,73 +220,62 @@ func (h *Hub) GetItemFQ(itemFQName string) (*Item, error) { return i, nil } -// GetNamesByType returns a slice of (full) item names for a given type -// (eg. for collections: crowdsecurity/apache2 crowdsecurity/nginx). -func (h *Hub) GetNamesByType(itemType string) []string { - m := h.GetItemMap(itemType) - if m == nil { - return nil - } +// GetItemsByType returns a slice of all the items of a given type, installed or not, optionally sorted by case-insensitive name. +// A non-existent type will silently return an empty slice. +func (h *Hub) GetItemsByType(itemType string, sorted bool) []*Item { + items := h.items[itemType] - names := make([]string, 0, len(m)) - for k := range m { - names = append(names, k) - } + ret := make([]*Item, len(items)) - return names -} + if sorted { + for idx, name := range maptools.SortedKeysNoCase(items) { + ret[idx] = items[name] + } -// GetItemsByType returns a slice of all the items of a given type, installed or not. -func (h *Hub) GetItemsByType(itemType string) ([]*Item, error) { - if !slices.Contains(ItemTypes, itemType) { - return nil, fmt.Errorf("invalid item type %s", itemType) + return ret } - items := h.items[itemType] - - ret := make([]*Item, len(items)) - idx := 0 - for _, item := range items { ret[idx] = item - idx++ + idx += 1 } - return ret, nil + return ret } -// GetInstalledItemsByType returns a slice of the installed items of a given type. -func (h *Hub) GetInstalledItemsByType(itemType string) ([]*Item, error) { - if !slices.Contains(ItemTypes, itemType) { - return nil, fmt.Errorf("invalid item type %s", itemType) - } - - items := h.items[itemType] +// GetInstalledByType returns a slice of all the installed items of a given type, optionally sorted by case-insensitive name. +// A non-existent type will silently return an empty slice. +func (h *Hub) GetInstalledByType(itemType string, sorted bool) []*Item { + ret := make([]*Item, 0) - retItems := make([]*Item, 0) - - for _, item := range items { + for _, item := range h.GetItemsByType(itemType, sorted) { if item.State.Installed { - retItems = append(retItems, item) + ret = append(ret, item) } } - return retItems, nil + return ret } -// GetInstalledNamesByType returns the names of the installed items of a given type. -func (h *Hub) GetInstalledNamesByType(itemType string) ([]string, error) { - items, err := h.GetInstalledItemsByType(itemType) - if err != nil { - return nil, err - } +// GetInstalledListForAPI returns a slice of names of all the installed scenarios and appsec-rules. +// The returned list is sorted by type (scenarios first) and case-insensitive name. +func (h *Hub) GetInstalledListForAPI() []string { + scenarios := h.GetInstalledByType(SCENARIOS, true) + appsecRules := h.GetInstalledByType(APPSEC_RULES, true) + + ret := make([]string, len(scenarios)+len(appsecRules)) - retStr := make([]string, len(items)) + idx := 0 + for _, item := range scenarios { + ret[idx] = item.Name + idx += 1 + } - for idx, it := range items { - retStr[idx] = it.Name + for _, item := range appsecRules { + ret[idx] = item.Name + idx += 1 } - return retStr, nil + return ret } diff --git a/pkg/cwhub/relativepath.go b/pkg/cwhub/relativepath.go new file mode 100644 index 00000000000..bcd4c576840 --- /dev/null +++ b/pkg/cwhub/relativepath.go @@ -0,0 +1,28 @@ +package cwhub + +import ( + "path/filepath" + "strings" +) + +// relativePathComponents returns the list of path components after baseDir. +// If path is not inside baseDir, it returns an empty slice. +func relativePathComponents(path string, baseDir string) []string { + absPath, err := filepath.Abs(path) + if err != nil { + return []string{} + } + + absBaseDir, err := filepath.Abs(baseDir) + if err != nil { + return []string{} + } + + // is path inside baseDir? + relPath, err := filepath.Rel(absBaseDir, absPath) + if err != nil || strings.HasPrefix(relPath, "..") || relPath == "." { + return []string{} + } + + return strings.Split(relPath, string(filepath.Separator)) +} diff --git a/pkg/cwhub/relativepath_test.go b/pkg/cwhub/relativepath_test.go new file mode 100644 index 00000000000..11eba566064 --- /dev/null +++ b/pkg/cwhub/relativepath_test.go @@ -0,0 +1,72 @@ +package cwhub + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestRelativePathComponents(t *testing.T) { + tests := []struct { + name string + path string + baseDir string + expected []string + }{ + { + name: "Path within baseDir", + path: "/home/user/project/src/file.go", + baseDir: "/home/user/project", + expected: []string{"src", "file.go"}, + }, + { + name: "Path is baseDir", + path: "/home/user/project", + baseDir: "/home/user/project", + expected: []string{}, + }, + { + name: "Path outside baseDir", + path: "/home/user/otherproject/src/file.go", + baseDir: "/home/user/project", + expected: []string{}, + }, + { + name: "Path is subdirectory of baseDir", + path: "/home/user/project/src/", + baseDir: "/home/user/project", + expected: []string{"src"}, + }, + { + name: "Relative paths", + path: "project/src/file.go", + baseDir: "project", + expected: []string{"src", "file.go"}, + }, + { + name: "BaseDir with trailing slash", + path: "/home/user/project/src/file.go", + baseDir: "/home/user/project/", + expected: []string{"src", "file.go"}, + }, + { + name: "Empty baseDir", + path: "/home/user/project/src/file.go", + baseDir: "", + expected: []string{}, + }, + { + name: "Empty path", + path: "", + baseDir: "/home/user/project", + expected: []string{}, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := relativePathComponents(tt.path, tt.baseDir) + assert.Equal(t, tt.expected, result) + }) + } +} diff --git a/pkg/cwhub/sync.go b/pkg/cwhub/sync.go index 38bb376ae3b..c82822e64ef 100644 --- a/pkg/cwhub/sync.go +++ b/pkg/cwhub/sync.go @@ -20,22 +20,49 @@ func isYAMLFileName(path string) bool { return strings.HasSuffix(path, ".yaml") || strings.HasSuffix(path, ".yml") } -// linkTarget returns the target of a symlink, or empty string if it's dangling. -func linkTarget(path string, logger *logrus.Logger) (string, error) { - hubpath, err := os.Readlink(path) - if err != nil { - return "", fmt.Errorf("unable to read symlink: %s", path) +// resolveSymlink returns the ultimate target path of a symlink +// returns error if the symlink is dangling or too many symlinks are followed +func resolveSymlink(path string) (string, error) { + const maxSymlinks = 10 // Prevent infinite loops + for range maxSymlinks { + fi, err := os.Lstat(path) + if err != nil { + return "", err // dangling link + } + + if fi.Mode()&os.ModeSymlink == 0 { + // found the target + return path, nil + } + + path, err = os.Readlink(path) + if err != nil { + return "", err + } + + // relative to the link's directory? + if !filepath.IsAbs(path) { + path = filepath.Join(filepath.Dir(path), path) + } } - logger.Tracef("symlink %s -> %s", path, hubpath) + return "", errors.New("too many levels of symbolic links") +} - _, err = os.Lstat(hubpath) - if os.IsNotExist(err) { - logger.Warningf("link target does not exist: %s -> %s", path, hubpath) - return "", nil +// isPathInside checks if a path is inside the given directory +// it can return false negatives if the filesystem is case insensitive +func isPathInside(path, dir string) (bool, error) { + absFilePath, err := filepath.Abs(path) + if err != nil { + return false, err + } + + absDir, err := filepath.Abs(dir) + if err != nil { + return false, err } - return hubpath, nil + return strings.HasPrefix(absFilePath, absDir), nil } // information used to create a new Item, from a file path. @@ -53,58 +80,76 @@ func (h *Hub) getItemFileInfo(path string, logger *logrus.Logger) (*itemFileInfo hubDir := h.local.HubDir installDir := h.local.InstallDir - subs := strings.Split(path, string(os.PathSeparator)) + subsHub := relativePathComponents(path, hubDir) + subsInstall := relativePathComponents(path, installDir) - logger.Tracef("path:%s, hubdir:%s, installdir:%s", path, hubDir, installDir) - logger.Tracef("subs:%v", subs) - // we're in hub (~/.hub/hub/) - if strings.HasPrefix(path, hubDir) { + switch { + case len(subsHub) > 0: logger.Tracef("in hub dir") - // .../hub/parsers/s00-raw/crowdsec/skip-pretag.yaml - // .../hub/scenarios/crowdsec/ssh_bf.yaml - // .../hub/profiles/crowdsec/linux.yaml - if len(subs) < 4 { - return nil, fmt.Errorf("path is too short: %s (%d)", path, len(subs)) + // .../hub/parsers/s00-raw/crowdsecurity/skip-pretag.yaml + // .../hub/scenarios/crowdsecurity/ssh_bf.yaml + // .../hub/profiles/crowdsecurity/linux.yaml + if len(subsHub) < 3 { + return nil, fmt.Errorf("path is too short: %s (%d)", path, len(subsHub)) + } + + ftype := subsHub[0] + if !slices.Contains(ItemTypes, ftype) { + // this doesn't really happen anymore, because we only scan the {hubtype} directories + return nil, fmt.Errorf("unknown configuration type '%s'", ftype) + } + + stage := "" + fauthor := subsHub[1] + fname := subsHub[2] + + if ftype == PARSERS || ftype == POSTOVERFLOWS { + stage = subsHub[1] + fauthor = subsHub[2] + fname = subsHub[3] } ret = &itemFileInfo{ inhub: true, - fname: subs[len(subs)-1], - fauthor: subs[len(subs)-2], - stage: subs[len(subs)-3], - ftype: subs[len(subs)-4], + ftype: ftype, + stage: stage, + fauthor: fauthor, + fname: fname, } - } else if strings.HasPrefix(path, installDir) { // we're in install /etc/crowdsec//... + + case len(subsInstall) > 0: logger.Tracef("in install dir") - if len(subs) < 3 { - return nil, fmt.Errorf("path is too short: %s (%d)", path, len(subs)) - } // .../config/parser/stage/file.yaml // .../config/postoverflow/stage/file.yaml // .../config/scenarios/scenar.yaml // .../config/collections/linux.yaml //file is empty - ret = &itemFileInfo{ - inhub: false, - fname: subs[len(subs)-1], - stage: subs[len(subs)-2], - ftype: subs[len(subs)-3], - fauthor: "", + + if len(subsInstall) < 2 { + return nil, fmt.Errorf("path is too short: %s (%d)", path, len(subsInstall)) } - } else { - return nil, fmt.Errorf("file '%s' is not from hub '%s' nor from the configuration directory '%s'", path, hubDir, installDir) - } - logger.Tracef("stage:%s ftype:%s", ret.stage, ret.ftype) + // this can be in any number of subdirs, we join them to compose the item name + + ftype := subsInstall[0] + stage := "" + fname := strings.Join(subsInstall[1:], "/") - if ret.ftype != PARSERS && ret.ftype != POSTOVERFLOWS { - if !slices.Contains(ItemTypes, ret.stage) { - return nil, errors.New("unknown configuration type") + if ftype == PARSERS || ftype == POSTOVERFLOWS { + stage = subsInstall[1] + fname = strings.Join(subsInstall[2:], "/") } - ret.ftype = ret.stage - ret.stage = "" + ret = &itemFileInfo{ + inhub: false, + ftype: ftype, + stage: stage, + fauthor: "", + fname: fname, + } + default: + return nil, fmt.Errorf("file '%s' is not from hub '%s' nor from the configuration directory '%s'", path, hubDir, installDir) } logger.Tracef("CORRECTED [%s] by [%s] in stage [%s] of type [%s]", ret.fname, ret.fauthor, ret.stage, ret.ftype) @@ -165,7 +210,7 @@ func newLocalItem(h *Hub, path string, info *itemFileInfo) (*Item, error) { err = yaml.Unmarshal(itemContent, &itemName) if err != nil { - return nil, fmt.Errorf("failed to unmarshal %s: %w", path, err) + return nil, fmt.Errorf("failed to parse %s: %w", path, err) } if itemName.Name != "" { @@ -176,8 +221,6 @@ func newLocalItem(h *Hub, path string, info *itemFileInfo) (*Item, error) { } func (h *Hub) itemVisit(path string, f os.DirEntry, err error) error { - hubpath := "" - if err != nil { h.logger.Debugf("while syncing hub dir: %s", err) // there is a path error, we ignore the file @@ -190,8 +233,26 @@ func (h *Hub) itemVisit(path string, f os.DirEntry, err error) error { return err } + // permission errors, files removed while reading, etc. + if f == nil { + return nil + } + + if f.IsDir() { + // if a directory starts with a dot, we don't traverse it + // - single dot prefix is hidden by unix convention + // - double dot prefix is used by k8s to mount config maps + if strings.HasPrefix(f.Name(), ".") { + h.logger.Tracef("skipping hidden directory %s", path) + return filepath.SkipDir + } + + // keep traversing + return nil + } + // we only care about YAML files - if f == nil || f.IsDir() || !isYAMLFileName(f.Name()) { + if !isYAMLFileName(f.Name()) { return nil } @@ -201,35 +262,38 @@ func (h *Hub) itemVisit(path string, f os.DirEntry, err error) error { return nil } - // non symlinks are local user files or hub files - if f.Type()&os.ModeSymlink == 0 { - h.logger.Tracef("%s is not a symlink", path) - - if !info.inhub { - h.logger.Tracef("%s is a local file, skip", path) + // follow the link to see if it falls in the hub directory + // if it's not a link, target == path + target, err := resolveSymlink(path) + if err != nil { + // target does not exist, the user might have removed the file + // or switched to a hub branch without it; or symlink loop + h.logger.Warningf("Ignoring file %s: %s", path, err) + return nil + } - item, err := newLocalItem(h, path, info) - if err != nil { - return err - } + targetInHub, err := isPathInside(target, h.local.HubDir) + if err != nil { + h.logger.Warningf("Ignoring file %s: %s", path, err) + return nil + } - h.addItem(item) + // local (custom) item if the file or link target is not inside the hub dir + if !targetInHub { + h.logger.Tracef("%s is a local file, skip", path) - return nil - } - } else { - hubpath, err = linkTarget(path, h.logger) + item, err := newLocalItem(h, path, info) if err != nil { return err } - if hubpath == "" { - // target does not exist, the user might have removed the file - // or switched to a hub branch without it - return nil - } + h.addItem(item) + + return nil } + hubpath := target + // try to find which configuration item it is h.logger.Tracef("check [%s] of %s", info.fname, info.ftype) diff --git a/pkg/cwversion/component/component.go b/pkg/cwversion/component/component.go new file mode 100644 index 00000000000..7ed596525e0 --- /dev/null +++ b/pkg/cwversion/component/component.go @@ -0,0 +1,35 @@ +package component + +// Package component provides functionality for managing the registration of +// optional, compile-time components in the system. This is meant as a space +// saving measure, separate from feature flags (package pkg/fflag) which are +// only enabled/disabled at runtime. + +// Built is a map of all the known components, and whether they are built-in or not. +// This is populated as soon as possible by the respective init() functions +var Built = map[string]bool{ + "datasource_appsec": false, + "datasource_cloudwatch": false, + "datasource_docker": false, + "datasource_file": false, + "datasource_journalctl": false, + "datasource_k8s-audit": false, + "datasource_kafka": false, + "datasource_kinesis": false, + "datasource_loki": false, + "datasource_s3": false, + "datasource_syslog": false, + "datasource_wineventlog": false, + "datasource_http": false, + "cscli_setup": false, +} + +func Register(name string) { + if _, ok := Built[name]; !ok { + // having a list of the disabled components is essential + // to debug users' issues + panic("cannot register unknown compile-time component: " + name) + } + + Built[name] = true +} diff --git a/pkg/cwversion/constraint/constraint.go b/pkg/cwversion/constraint/constraint.go new file mode 100644 index 00000000000..67593f9ebbc --- /dev/null +++ b/pkg/cwversion/constraint/constraint.go @@ -0,0 +1,32 @@ +package constraint + +import ( + "fmt" + + goversion "github.com/hashicorp/go-version" +) + +const ( + Parser = ">= 1.0, <= 3.0" + Scenario = ">= 1.0, <= 3.0" + API = "v1" + Acquis = ">= 1.0, < 2.0" +) + +func Satisfies(strvers string, constraint string) (bool, error) { + vers, err := goversion.NewVersion(strvers) + if err != nil { + return false, fmt.Errorf("failed to parse '%s': %w", strvers, err) + } + + constraints, err := goversion.NewConstraint(constraint) + if err != nil { + return false, fmt.Errorf("failed to parse constraint '%s'", constraint) + } + + if !constraints.Check(vers) { + return false, nil + } + + return true, nil +} diff --git a/pkg/cwversion/version.go b/pkg/cwversion/version.go index 28d5c2a621c..2cb7de13e18 100644 --- a/pkg/cwversion/version.go +++ b/pkg/cwversion/version.go @@ -4,9 +4,12 @@ import ( "fmt" "strings" - goversion "github.com/hashicorp/go-version" - + "github.com/crowdsecurity/go-cs-lib/maptools" "github.com/crowdsecurity/go-cs-lib/version" + + "github.com/crowdsecurity/crowdsec/pkg/apiclient/useragent" + "github.com/crowdsecurity/crowdsec/pkg/cwversion/component" + "github.com/crowdsecurity/crowdsec/pkg/cwversion/constraint" ) var ( @@ -14,31 +17,44 @@ var ( Libre2 = "WebAssembly" ) -const ( - Constraint_parser = ">= 1.0, <= 3.0" - Constraint_scenario = ">= 1.0, <= 3.0" - Constraint_api = "v1" - Constraint_acquis = ">= 1.0, < 2.0" -) - func FullString() string { + dsBuilt := map[string]struct{}{} + dsExcluded := map[string]struct{}{} + + for ds, built := range component.Built { + if built { + dsBuilt[ds] = struct{}{} + continue + } + + dsExcluded[ds] = struct{}{} + } + ret := fmt.Sprintf("version: %s\n", version.String()) ret += fmt.Sprintf("Codename: %s\n", Codename) ret += fmt.Sprintf("BuildDate: %s\n", version.BuildDate) ret += fmt.Sprintf("GoVersion: %s\n", version.GoVersion) ret += fmt.Sprintf("Platform: %s\n", version.System) ret += fmt.Sprintf("libre2: %s\n", Libre2) - ret += fmt.Sprintf("User-Agent: %s\n", UserAgent()) - ret += fmt.Sprintf("Constraint_parser: %s\n", Constraint_parser) - ret += fmt.Sprintf("Constraint_scenario: %s\n", Constraint_scenario) - ret += fmt.Sprintf("Constraint_api: %s\n", Constraint_api) - ret += fmt.Sprintf("Constraint_acquis: %s\n", Constraint_acquis) + ret += fmt.Sprintf("User-Agent: %s\n", useragent.Default()) + ret += fmt.Sprintf("Constraint_parser: %s\n", constraint.Parser) + ret += fmt.Sprintf("Constraint_scenario: %s\n", constraint.Scenario) + ret += fmt.Sprintf("Constraint_api: %s\n", constraint.API) + ret += fmt.Sprintf("Constraint_acquis: %s\n", constraint.Acquis) - return ret -} + built := "(none)" + + if len(dsBuilt) > 0 { + built = strings.Join(maptools.SortedKeys(dsBuilt), ", ") + } -func UserAgent() string { - return "crowdsec/" + version.String() + "-" + version.System + ret += fmt.Sprintf("Built-in optional components: %s\n", built) + + if len(dsExcluded) > 0 { + ret += fmt.Sprintf("Excluded components: %s\n", strings.Join(maptools.SortedKeys(dsExcluded), ", ")) + } + + return ret } // VersionStrip remove the tag from the version string, used to match with a hub branch @@ -48,21 +64,3 @@ func VersionStrip() string { return ret[0] } - -func Satisfies(strvers string, constraint string) (bool, error) { - vers, err := goversion.NewVersion(strvers) - if err != nil { - return false, fmt.Errorf("failed to parse '%s': %w", strvers, err) - } - - constraints, err := goversion.NewConstraint(constraint) - if err != nil { - return false, fmt.Errorf("failed to parse constraint '%s'", constraint) - } - - if !constraints.Check(vers) { - return false, nil - } - - return true, nil -} diff --git a/pkg/database/alerts.go b/pkg/database/alerts.go index 0f6d87fb1b6..ede9c89fe9a 100644 --- a/pkg/database/alerts.go +++ b/pkg/database/alerts.go @@ -35,12 +35,12 @@ const ( // CreateOrUpdateAlert is specific to PAPI : It checks if alert already exists, otherwise inserts it // if alert already exists, it checks it associated decisions already exists // if some associated decisions are missing (ie. previous insert ended up in error) it inserts them -func (c *Client) CreateOrUpdateAlert(machineID string, alertItem *models.Alert) (string, error) { +func (c *Client) CreateOrUpdateAlert(ctx context.Context, machineID string, alertItem *models.Alert) (string, error) { if alertItem.UUID == "" { return "", errors.New("alert UUID is empty") } - alerts, err := c.Ent.Alert.Query().Where(alert.UUID(alertItem.UUID)).WithDecisions().All(c.CTX) + alerts, err := c.Ent.Alert.Query().Where(alert.UUID(alertItem.UUID)).WithDecisions().All(ctx) if err != nil && !ent.IsNotFound(err) { return "", fmt.Errorf("unable to query alerts for uuid %s: %w", alertItem.UUID, err) @@ -48,7 +48,7 @@ func (c *Client) CreateOrUpdateAlert(machineID string, alertItem *models.Alert) // alert wasn't found, insert it (expected hotpath) if ent.IsNotFound(err) || len(alerts) == 0 { - alertIDs, err := c.CreateAlert(machineID, []*models.Alert{alertItem}) + alertIDs, err := c.CreateAlert(ctx, machineID, []*models.Alert{alertItem}) if err != nil { return "", fmt.Errorf("unable to create alert: %w", err) } @@ -165,7 +165,7 @@ func (c *Client) CreateOrUpdateAlert(machineID string, alertItem *models.Alert) builderChunks := slicetools.Chunks(decisionBuilders, c.decisionBulkSize) for _, builderChunk := range builderChunks { - decisionsCreateRet, err := c.Ent.Decision.CreateBulk(builderChunk...).Save(c.CTX) + decisionsCreateRet, err := c.Ent.Decision.CreateBulk(builderChunk...).Save(ctx) if err != nil { return "", fmt.Errorf("creating alert decisions: %w", err) } @@ -178,7 +178,7 @@ func (c *Client) CreateOrUpdateAlert(machineID string, alertItem *models.Alert) decisionChunks := slicetools.Chunks(decisions, c.decisionBulkSize) for _, decisionChunk := range decisionChunks { - err = c.Ent.Alert.Update().Where(alert.UUID(alertItem.UUID)).AddDecisions(decisionChunk...).Exec(c.CTX) + err = c.Ent.Alert.Update().Where(alert.UUID(alertItem.UUID)).AddDecisions(decisionChunk...).Exec(ctx) if err != nil { return "", fmt.Errorf("updating alert %s: %w", alertItem.UUID, err) } @@ -191,7 +191,7 @@ func (c *Client) CreateOrUpdateAlert(machineID string, alertItem *models.Alert) // it takes care of creating the new alert with the associated decisions, and it will as well deleted the "older" overlapping decisions: // 1st pull, you get decisions [1,2,3]. it inserts [1,2,3] // 2nd pull, you get decisions [1,2,3,4]. it inserts [1,2,3,4] and will try to delete [1,2,3,4] with a different alert ID and same origin -func (c *Client) UpdateCommunityBlocklist(alertItem *models.Alert) (int, int, int, error) { +func (c *Client) UpdateCommunityBlocklist(ctx context.Context, alertItem *models.Alert) (int, int, int, error) { if alertItem == nil { return 0, 0, 0, errors.New("nil alert") } @@ -244,7 +244,7 @@ func (c *Client) UpdateCommunityBlocklist(alertItem *models.Alert) (int, int, in SetScenarioHash(*alertItem.ScenarioHash). SetRemediation(true) // it's from CAPI, we always have decisions - alertRef, err := alertB.Save(c.CTX) + alertRef, err := alertB.Save(ctx) if err != nil { return 0, 0, 0, errors.Wrapf(BulkError, "error creating alert : %s", err) } @@ -253,7 +253,7 @@ func (c *Client) UpdateCommunityBlocklist(alertItem *models.Alert) (int, int, in return alertRef.ID, 0, 0, nil } - txClient, err := c.Ent.Tx(c.CTX) + txClient, err := c.Ent.Tx(ctx) if err != nil { return 0, 0, 0, errors.Wrapf(BulkError, "error creating transaction : %s", err) } @@ -347,7 +347,7 @@ func (c *Client) UpdateCommunityBlocklist(alertItem *models.Alert) (int, int, in decision.OriginEQ(DecOrigin), decision.Not(decision.HasOwnerWith(alert.IDEQ(alertRef.ID))), decision.ValueIn(deleteChunk...), - )).Exec(c.CTX) + )).Exec(ctx) if err != nil { rollbackErr := txClient.Rollback() if rollbackErr != nil { @@ -363,7 +363,7 @@ func (c *Client) UpdateCommunityBlocklist(alertItem *models.Alert) (int, int, in builderChunks := slicetools.Chunks(decisionBuilders, c.decisionBulkSize) for _, builderChunk := range builderChunks { - insertedDecisions, err := txClient.Decision.CreateBulk(builderChunk...).Save(c.CTX) + insertedDecisions, err := txClient.Decision.CreateBulk(builderChunk...).Save(ctx) if err != nil { rollbackErr := txClient.Rollback() if rollbackErr != nil { @@ -391,7 +391,7 @@ func (c *Client) UpdateCommunityBlocklist(alertItem *models.Alert) (int, int, in return alertRef.ID, inserted, deleted, nil } -func (c *Client) createDecisionChunk(simulated bool, stopAtTime time.Time, decisions []*models.Decision) ([]*ent.Decision, error) { +func (c *Client) createDecisionChunk(ctx context.Context, simulated bool, stopAtTime time.Time, decisions []*models.Decision) ([]*ent.Decision, error) { decisionCreate := []*ent.DecisionCreate{} for _, decisionItem := range decisions { @@ -436,7 +436,7 @@ func (c *Client) createDecisionChunk(simulated bool, stopAtTime time.Time, decis return nil, nil } - ret, err := c.Ent.Decision.CreateBulk(decisionCreate...).Save(c.CTX) + ret, err := c.Ent.Decision.CreateBulk(decisionCreate...).Save(ctx) if err != nil { return nil, err } @@ -444,7 +444,7 @@ func (c *Client) createDecisionChunk(simulated bool, stopAtTime time.Time, decis return ret, nil } -func (c *Client) createAlertChunk(machineID string, owner *ent.Machine, alerts []*models.Alert) ([]string, error) { +func (c *Client) createAlertChunk(ctx context.Context, machineID string, owner *ent.Machine, alerts []*models.Alert) ([]string, error) { alertBuilders := []*ent.AlertCreate{} alertDecisions := [][]*ent.Decision{} @@ -456,14 +456,14 @@ func (c *Client) createAlertChunk(machineID string, owner *ent.Machine, alerts [ startAtTime, err := time.Parse(time.RFC3339, *alertItem.StartAt) if err != nil { - c.Log.Errorf("CreateAlertBulk: Failed to parse startAtTime '%s', defaulting to now: %s", *alertItem.StartAt, err) + c.Log.Errorf("creating alert: Failed to parse startAtTime '%s', defaulting to now: %s", *alertItem.StartAt, err) startAtTime = time.Now().UTC() } stopAtTime, err := time.Parse(time.RFC3339, *alertItem.StopAt) if err != nil { - c.Log.Errorf("CreateAlertBulk: Failed to parse stopAtTime '%s', defaulting to now: %s", *alertItem.StopAt, err) + c.Log.Errorf("creating alert: Failed to parse stopAtTime '%s', defaulting to now: %s", *alertItem.StopAt, err) stopAtTime = time.Now().UTC() } @@ -483,7 +483,7 @@ func (c *Client) createAlertChunk(machineID string, owner *ent.Machine, alerts [ for i, eventItem := range alertItem.Events { ts, err := time.Parse(time.RFC3339, *eventItem.Timestamp) if err != nil { - c.Log.Errorf("CreateAlertBulk: Failed to parse event timestamp '%s', defaulting to now: %s", *eventItem.Timestamp, err) + c.Log.Errorf("creating alert: Failed to parse event timestamp '%s', defaulting to now: %s", *eventItem.Timestamp, err) ts = time.Now().UTC() } @@ -540,7 +540,7 @@ func (c *Client) createAlertChunk(machineID string, owner *ent.Machine, alerts [ c.Log.Warningf("dropped 'serialized' field (machine %s / scenario %s)", machineID, *alertItem.Scenario) } - events, err = c.Ent.Event.CreateBulk(eventBulk...).Save(c.CTX) + events, err = c.Ent.Event.CreateBulk(eventBulk...).Save(ctx) if err != nil { return nil, errors.Wrapf(BulkError, "creating alert events: %s", err) } @@ -554,12 +554,14 @@ func (c *Client) createAlertChunk(machineID string, owner *ent.Machine, alerts [ value := metaItem.Value if len(metaItem.Value) > 4095 { - c.Log.Warningf("truncated meta %s : value too long", metaItem.Key) + c.Log.Warningf("truncated meta %s: value too long", metaItem.Key) + value = value[:4095] } if len(metaItem.Key) > 255 { - c.Log.Warningf("truncated meta %s : key too long", metaItem.Key) + c.Log.Warningf("truncated meta %s: key too long", metaItem.Key) + key = key[:255] } @@ -568,7 +570,7 @@ func (c *Client) createAlertChunk(machineID string, owner *ent.Machine, alerts [ SetValue(value) } - metas, err = c.Ent.Meta.CreateBulk(metaBulk...).Save(c.CTX) + metas, err = c.Ent.Meta.CreateBulk(metaBulk...).Save(ctx) if err != nil { c.Log.Warningf("error creating alert meta: %s", err) } @@ -578,7 +580,7 @@ func (c *Client) createAlertChunk(machineID string, owner *ent.Machine, alerts [ decisionChunks := slicetools.Chunks(alertItem.Decisions, c.decisionBulkSize) for _, decisionChunk := range decisionChunks { - decisionRet, err := c.createDecisionChunk(*alertItem.Simulated, stopAtTime, decisionChunk) + decisionRet, err := c.createDecisionChunk(ctx, *alertItem.Simulated, stopAtTime, decisionChunk) if err != nil { return nil, fmt.Errorf("creating alert decisions: %w", err) } @@ -636,7 +638,7 @@ func (c *Client) createAlertChunk(machineID string, owner *ent.Machine, alerts [ return nil, nil } - alertsCreateBulk, err := c.Ent.Alert.CreateBulk(alertBuilders...).Save(c.CTX) + alertsCreateBulk, err := c.Ent.Alert.CreateBulk(alertBuilders...).Save(ctx) if err != nil { return nil, errors.Wrapf(BulkError, "bulk creating alert : %s", err) } @@ -653,7 +655,7 @@ func (c *Client) createAlertChunk(machineID string, owner *ent.Machine, alerts [ for retry < maxLockRetries { // so much for the happy path... but sqlite3 errors work differently - _, err := c.Ent.Alert.Update().Where(alert.IDEQ(a.ID)).AddDecisions(d2...).Save(c.CTX) + _, err := c.Ent.Alert.Update().Where(alert.IDEQ(a.ID)).AddDecisions(d2...).Save(ctx) if err == nil { break } @@ -678,23 +680,24 @@ func (c *Client) createAlertChunk(machineID string, owner *ent.Machine, alerts [ } } } + return ret, nil } -func (c *Client) CreateAlert(machineID string, alertList []*models.Alert) ([]string, error) { +func (c *Client) CreateAlert(ctx context.Context, machineID string, alertList []*models.Alert) ([]string, error) { var ( owner *ent.Machine err error ) if machineID != "" { - owner, err = c.QueryMachineByID(machineID) + owner, err = c.QueryMachineByID(ctx, machineID) if err != nil { if !errors.Is(err, UserNotExists) { return nil, fmt.Errorf("machine '%s': %w", machineID, err) } - c.Log.Debugf("CreateAlertBulk: Machine Id %s doesn't exist", machineID) + c.Log.Debugf("creating alert: machine %s doesn't exist", machineID) owner = nil } @@ -706,7 +709,7 @@ func (c *Client) CreateAlert(machineID string, alertList []*models.Alert) ([]str alertIDs := []string{} for _, alertChunk := range alertChunks { - ids, err := c.createAlertChunk(machineID, owner, alertChunk) + ids, err := c.createAlertChunk(ctx, machineID, owner, alertChunk) if err != nil { return nil, fmt.Errorf("machine '%s': %w", machineID, err) } @@ -715,7 +718,7 @@ func (c *Client) CreateAlert(machineID string, alertList []*models.Alert) ([]str } if owner != nil { - err = owner.Update().SetLastPush(time.Now().UTC()).Exec(c.CTX) + err = owner.Update().SetLastPush(time.Now().UTC()).Exec(ctx) if err != nil { return nil, fmt.Errorf("machine '%s': %w", machineID, err) } @@ -724,6 +727,160 @@ func (c *Client) CreateAlert(machineID string, alertList []*models.Alert) ([]str return alertIDs, nil } +func handleSimulatedFilter(filter map[string][]string, predicates *[]predicate.Alert) { + /* the simulated filter is a bit different : if it's not present *or* set to false, specifically exclude records with simulated to true */ + if v, ok := filter["simulated"]; ok && v[0] == "false" { + *predicates = append(*predicates, alert.SimulatedEQ(false)) + } +} + +func handleOriginFilter(filter map[string][]string, predicates *[]predicate.Alert) { + if _, ok := filter["origin"]; ok { + filter["include_capi"] = []string{"true"} + } +} + +func handleScopeFilter(scope string, predicates *[]predicate.Alert) { + if strings.ToLower(scope) == "ip" { + scope = types.Ip + } else if strings.ToLower(scope) == "range" { + scope = types.Range + } + + *predicates = append(*predicates, alert.SourceScopeEQ(scope)) +} + +func handleTimeFilters(param, value string, predicates *[]predicate.Alert) error { + duration, err := ParseDuration(value) + if err != nil { + return fmt.Errorf("while parsing duration: %w", err) + } + + timePoint := time.Now().UTC().Add(-duration) + if timePoint.IsZero() { + return fmt.Errorf("empty time now() - %s", timePoint.String()) + } + + switch param { + case "since": + *predicates = append(*predicates, alert.StartedAtGTE(timePoint)) + case "created_before": + *predicates = append(*predicates, alert.CreatedAtLTE(timePoint)) + case "until": + *predicates = append(*predicates, alert.StartedAtLTE(timePoint)) + } + + return nil +} + +func handleIPv4Predicates(ip_sz int, contains bool, start_ip, start_sfx, end_ip, end_sfx int64, predicates *[]predicate.Alert) { + if contains { // decision contains {start_ip,end_ip} + *predicates = append(*predicates, alert.And( + alert.HasDecisionsWith(decision.StartIPLTE(start_ip)), + alert.HasDecisionsWith(decision.EndIPGTE(end_ip)), + alert.HasDecisionsWith(decision.IPSizeEQ(int64(ip_sz))), + )) + } else { // decision is contained within {start_ip,end_ip} + *predicates = append(*predicates, alert.And( + alert.HasDecisionsWith(decision.StartIPGTE(start_ip)), + alert.HasDecisionsWith(decision.EndIPLTE(end_ip)), + alert.HasDecisionsWith(decision.IPSizeEQ(int64(ip_sz))), + )) + } +} + +func handleIPv6Predicates(ip_sz int, contains bool, start_ip, start_sfx, end_ip, end_sfx int64, predicates *[]predicate.Alert) { + if contains { // decision contains {start_ip,end_ip} + *predicates = append(*predicates, alert.And( + // matching addr size + alert.HasDecisionsWith(decision.IPSizeEQ(int64(ip_sz))), + alert.Or( + // decision.start_ip < query.start_ip + alert.HasDecisionsWith(decision.StartIPLT(start_ip)), + alert.And( + // decision.start_ip == query.start_ip + alert.HasDecisionsWith(decision.StartIPEQ(start_ip)), + // decision.start_suffix <= query.start_suffix + alert.HasDecisionsWith(decision.StartSuffixLTE(start_sfx)), + ), + ), + alert.Or( + // decision.end_ip > query.end_ip + alert.HasDecisionsWith(decision.EndIPGT(end_ip)), + alert.And( + // decision.end_ip == query.end_ip + alert.HasDecisionsWith(decision.EndIPEQ(end_ip)), + // decision.end_suffix >= query.end_suffix + alert.HasDecisionsWith(decision.EndSuffixGTE(end_sfx)), + ), + ), + )) + } else { // decision is contained within {start_ip,end_ip} + *predicates = append(*predicates, alert.And( + // matching addr size + alert.HasDecisionsWith(decision.IPSizeEQ(int64(ip_sz))), + alert.Or( + // decision.start_ip > query.start_ip + alert.HasDecisionsWith(decision.StartIPGT(start_ip)), + alert.And( + // decision.start_ip == query.start_ip + alert.HasDecisionsWith(decision.StartIPEQ(start_ip)), + // decision.start_suffix >= query.start_suffix + alert.HasDecisionsWith(decision.StartSuffixGTE(start_sfx)), + ), + ), + alert.Or( + // decision.end_ip < query.end_ip + alert.HasDecisionsWith(decision.EndIPLT(end_ip)), + alert.And( + // decision.end_ip == query.end_ip + alert.HasDecisionsWith(decision.EndIPEQ(end_ip)), + // decision.end_suffix <= query.end_suffix + alert.HasDecisionsWith(decision.EndSuffixLTE(end_sfx)), + ), + ), + )) + } +} + +func handleIPPredicates(ip_sz int, contains bool, start_ip, start_sfx, end_ip, end_sfx int64, predicates *[]predicate.Alert) error { + if ip_sz == 4 { + handleIPv4Predicates(ip_sz, contains, start_ip, start_sfx, end_ip, end_sfx, predicates) + } else if ip_sz == 16 { + handleIPv6Predicates(ip_sz, contains, start_ip, start_sfx, end_ip, end_sfx, predicates) + } else if ip_sz != 0 { + return errors.Wrapf(InvalidFilter, "Unknown ip size %d", ip_sz) + } + + return nil +} + +func handleIncludeCapiFilter(value string, predicates *[]predicate.Alert) error { + if value == "false" { + *predicates = append(*predicates, alert.And( + // do not show alerts with active decisions having origin CAPI or lists + alert.And( + alert.Not(alert.HasDecisionsWith(decision.OriginEQ(types.CAPIOrigin))), + alert.Not(alert.HasDecisionsWith(decision.OriginEQ(types.ListOrigin))), + ), + alert.Not( + alert.And( + // do not show neither alerts with no decisions if the Source Scope is lists: or CAPI + alert.Not(alert.HasDecisions()), + alert.Or( + alert.SourceScopeHasPrefix(types.ListOrigin+":"), + alert.SourceScopeEQ(types.CommunityBlocklistPullSourceScope), + ), + ), + ), + )) + } else if value != "true" { + log.Errorf("invalid bool '%s' for include_capi", value) + } + + return nil +} + func AlertPredicatesFromFilter(filter map[string][]string) ([]predicate.Alert, error) { predicates := make([]predicate.Alert, 0) @@ -739,16 +896,8 @@ func AlertPredicatesFromFilter(filter map[string][]string) ([]predicate.Alert, e /*if contains is true, return bans that *contains* the given value (value is the inner) else, return bans that are *contained* by the given value (value is the outer)*/ - /*the simulated filter is a bit different : if it's not present *or* set to false, specifically exclude records with simulated to true */ - if v, ok := filter["simulated"]; ok { - if v[0] == "false" { - predicates = append(predicates, alert.SimulatedEQ(false)) - } - } - - if _, ok := filter["origin"]; ok { - filter["include_capi"] = []string{"true"} - } + handleSimulatedFilter(filter, &predicates) + handleOriginFilter(filter, &predicates) for param, value := range filter { switch param { @@ -758,14 +907,7 @@ func AlertPredicatesFromFilter(filter map[string][]string) ([]predicate.Alert, e return nil, errors.Wrapf(InvalidFilter, "invalid contains value : %s", err) } case "scope": - scope := value[0] - if strings.ToLower(scope) == "ip" { - scope = types.Ip - } else if strings.ToLower(scope) == "range" { - scope = types.Range - } - - predicates = append(predicates, alert.SourceScopeEQ(scope)) + handleScopeFilter(value[0], &predicates) case "value": predicates = append(predicates, alert.SourceValueEQ(value[0])) case "scenario": @@ -775,68 +917,17 @@ func AlertPredicatesFromFilter(filter map[string][]string) ([]predicate.Alert, e if err != nil { return nil, errors.Wrapf(InvalidIPOrRange, "unable to convert '%s' to int: %s", value[0], err) } - case "since": - duration, err := ParseDuration(value[0]) - if err != nil { - return nil, fmt.Errorf("while parsing duration: %w", err) + case "since", "created_before", "until": + if err := handleTimeFilters(param, value[0], &predicates); err != nil { + return nil, err } - - since := time.Now().UTC().Add(-duration) - if since.IsZero() { - return nil, fmt.Errorf("empty time now() - %s", since.String()) - } - - predicates = append(predicates, alert.StartedAtGTE(since)) - case "created_before": - duration, err := ParseDuration(value[0]) - if err != nil { - return nil, fmt.Errorf("while parsing duration: %w", err) - } - - since := time.Now().UTC().Add(-duration) - if since.IsZero() { - return nil, fmt.Errorf("empty time now() - %s", since.String()) - } - - predicates = append(predicates, alert.CreatedAtLTE(since)) - case "until": - duration, err := ParseDuration(value[0]) - if err != nil { - return nil, fmt.Errorf("while parsing duration: %w", err) - } - - until := time.Now().UTC().Add(-duration) - if until.IsZero() { - return nil, fmt.Errorf("empty time now() - %s", until.String()) - } - - predicates = append(predicates, alert.StartedAtLTE(until)) case "decision_type": predicates = append(predicates, alert.HasDecisionsWith(decision.TypeEQ(value[0]))) case "origin": predicates = append(predicates, alert.HasDecisionsWith(decision.OriginEQ(value[0]))) case "include_capi": // allows to exclude one or more specific origins - if value[0] == "false" { - predicates = append(predicates, alert.And( - // do not show alerts with active decisions having origin CAPI or lists - alert.And( - alert.Not(alert.HasDecisionsWith(decision.OriginEQ(types.CAPIOrigin))), - alert.Not(alert.HasDecisionsWith(decision.OriginEQ(types.ListOrigin))), - ), - alert.Not( - alert.And( - // do not show neither alerts with no decisions if the Source Scope is lists: or CAPI - alert.Not(alert.HasDecisions()), - alert.Or( - alert.SourceScopeHasPrefix(types.ListOrigin+":"), - alert.SourceScopeEQ(types.CommunityBlocklistPullSourceScope), - ), - ), - ), - ), - ) - } else if value[0] != "true" { - log.Errorf("Invalid bool '%s' for include_capi", value[0]) + if err = handleIncludeCapiFilter(value[0], &predicates); err != nil { + return nil, err } case "has_active_decision": if hasActiveDecision, err = strconv.ParseBool(value[0]); err != nil { @@ -861,72 +952,8 @@ func AlertPredicatesFromFilter(filter map[string][]string) ([]predicate.Alert, e } } - if ip_sz == 4 { - if contains { /*decision contains {start_ip,end_ip}*/ - predicates = append(predicates, alert.And( - alert.HasDecisionsWith(decision.StartIPLTE(start_ip)), - alert.HasDecisionsWith(decision.EndIPGTE(end_ip)), - alert.HasDecisionsWith(decision.IPSizeEQ(int64(ip_sz))), - )) - } else { /*decision is contained within {start_ip,end_ip}*/ - predicates = append(predicates, alert.And( - alert.HasDecisionsWith(decision.StartIPGTE(start_ip)), - alert.HasDecisionsWith(decision.EndIPLTE(end_ip)), - alert.HasDecisionsWith(decision.IPSizeEQ(int64(ip_sz))), - )) - } - } else if ip_sz == 16 { - if contains { /*decision contains {start_ip,end_ip}*/ - predicates = append(predicates, alert.And( - // matching addr size - alert.HasDecisionsWith(decision.IPSizeEQ(int64(ip_sz))), - alert.Or( - // decision.start_ip < query.start_ip - alert.HasDecisionsWith(decision.StartIPLT(start_ip)), - alert.And( - // decision.start_ip == query.start_ip - alert.HasDecisionsWith(decision.StartIPEQ(start_ip)), - // decision.start_suffix <= query.start_suffix - alert.HasDecisionsWith(decision.StartSuffixLTE(start_sfx)), - )), - alert.Or( - // decision.end_ip > query.end_ip - alert.HasDecisionsWith(decision.EndIPGT(end_ip)), - alert.And( - // decision.end_ip == query.end_ip - alert.HasDecisionsWith(decision.EndIPEQ(end_ip)), - // decision.end_suffix >= query.end_suffix - alert.HasDecisionsWith(decision.EndSuffixGTE(end_sfx)), - ), - ), - )) - } else { /*decision is contained within {start_ip,end_ip}*/ - predicates = append(predicates, alert.And( - // matching addr size - alert.HasDecisionsWith(decision.IPSizeEQ(int64(ip_sz))), - alert.Or( - // decision.start_ip > query.start_ip - alert.HasDecisionsWith(decision.StartIPGT(start_ip)), - alert.And( - // decision.start_ip == query.start_ip - alert.HasDecisionsWith(decision.StartIPEQ(start_ip)), - // decision.start_suffix >= query.start_suffix - alert.HasDecisionsWith(decision.StartSuffixGTE(start_sfx)), - )), - alert.Or( - // decision.end_ip < query.end_ip - alert.HasDecisionsWith(decision.EndIPLT(end_ip)), - alert.And( - // decision.end_ip == query.end_ip - alert.HasDecisionsWith(decision.EndIPEQ(end_ip)), - // decision.end_suffix <= query.end_suffix - alert.HasDecisionsWith(decision.EndSuffixLTE(end_sfx)), - ), - ), - )) - } - } else if ip_sz != 0 { - return nil, errors.Wrapf(InvalidFilter, "Unknown ip size %d", ip_sz) + if err := handleIPPredicates(ip_sz, contains, start_ip, start_sfx, end_ip, end_sfx, &predicates); err != nil { + return nil, err } return predicates, nil @@ -941,14 +968,12 @@ func BuildAlertRequestFromFilter(alerts *ent.AlertQuery, filter map[string][]str return alerts.Where(preds...), nil } -func (c *Client) AlertsCountPerScenario(filters map[string][]string) (map[string]int, error) { +func (c *Client) AlertsCountPerScenario(ctx context.Context, filters map[string][]string) (map[string]int, error) { var res []struct { Scenario string Count int } - ctx := context.TODO() - query := c.Ent.Alert.Query() query, err := BuildAlertRequestFromFilter(query, filters) @@ -970,11 +995,11 @@ func (c *Client) AlertsCountPerScenario(filters map[string][]string) (map[string return counts, nil } -func (c *Client) TotalAlerts() (int, error) { - return c.Ent.Alert.Query().Count(c.CTX) +func (c *Client) TotalAlerts(ctx context.Context) (int, error) { + return c.Ent.Alert.Query().Count(ctx) } -func (c *Client) QueryAlertWithFilter(filter map[string][]string) ([]*ent.Alert, error) { +func (c *Client) QueryAlertWithFilter(ctx context.Context, filter map[string][]string) ([]*ent.Alert, error) { sort := "DESC" // we sort by desc by default if val, ok := filter["sort"]; ok { @@ -1021,7 +1046,7 @@ func (c *Client) QueryAlertWithFilter(filter map[string][]string) ([]*ent.Alert, WithOwner() if limit == 0 { - limit, err = alerts.Count(c.CTX) + limit, err = alerts.Count(ctx) if err != nil { return nil, fmt.Errorf("unable to count nb alerts: %w", err) } @@ -1033,7 +1058,7 @@ func (c *Client) QueryAlertWithFilter(filter map[string][]string) ([]*ent.Alert, alerts = alerts.Order(ent.Desc(alert.FieldCreatedAt), ent.Desc(alert.FieldID)) } - result, err := alerts.Limit(paginationSize).Offset(offset).All(c.CTX) + result, err := alerts.Limit(paginationSize).Offset(offset).All(ctx) if err != nil { return nil, errors.Wrapf(QueryFail, "pagination size: %d, offset: %d: %s", paginationSize, offset, err) } @@ -1062,35 +1087,35 @@ func (c *Client) QueryAlertWithFilter(filter map[string][]string) ([]*ent.Alert, return ret, nil } -func (c *Client) DeleteAlertGraphBatch(alertItems []*ent.Alert) (int, error) { +func (c *Client) DeleteAlertGraphBatch(ctx context.Context, alertItems []*ent.Alert) (int, error) { idList := make([]int, 0) for _, alert := range alertItems { idList = append(idList, alert.ID) } _, err := c.Ent.Event.Delete(). - Where(event.HasOwnerWith(alert.IDIn(idList...))).Exec(c.CTX) + Where(event.HasOwnerWith(alert.IDIn(idList...))).Exec(ctx) if err != nil { c.Log.Warningf("DeleteAlertGraphBatch : %s", err) return 0, errors.Wrapf(DeleteFail, "alert graph delete batch events") } _, err = c.Ent.Meta.Delete(). - Where(meta.HasOwnerWith(alert.IDIn(idList...))).Exec(c.CTX) + Where(meta.HasOwnerWith(alert.IDIn(idList...))).Exec(ctx) if err != nil { c.Log.Warningf("DeleteAlertGraphBatch : %s", err) return 0, errors.Wrapf(DeleteFail, "alert graph delete batch meta") } _, err = c.Ent.Decision.Delete(). - Where(decision.HasOwnerWith(alert.IDIn(idList...))).Exec(c.CTX) + Where(decision.HasOwnerWith(alert.IDIn(idList...))).Exec(ctx) if err != nil { c.Log.Warningf("DeleteAlertGraphBatch : %s", err) return 0, errors.Wrapf(DeleteFail, "alert graph delete batch decisions") } deleted, err := c.Ent.Alert.Delete(). - Where(alert.IDIn(idList...)).Exec(c.CTX) + Where(alert.IDIn(idList...)).Exec(ctx) if err != nil { c.Log.Warningf("DeleteAlertGraphBatch : %s", err) return deleted, errors.Wrapf(DeleteFail, "alert graph delete batch") @@ -1101,10 +1126,10 @@ func (c *Client) DeleteAlertGraphBatch(alertItems []*ent.Alert) (int, error) { return deleted, nil } -func (c *Client) DeleteAlertGraph(alertItem *ent.Alert) error { +func (c *Client) DeleteAlertGraph(ctx context.Context, alertItem *ent.Alert) error { // delete the associated events _, err := c.Ent.Event.Delete(). - Where(event.HasOwnerWith(alert.IDEQ(alertItem.ID))).Exec(c.CTX) + Where(event.HasOwnerWith(alert.IDEQ(alertItem.ID))).Exec(ctx) if err != nil { c.Log.Warningf("DeleteAlertGraph : %s", err) return errors.Wrapf(DeleteFail, "event with alert ID '%d'", alertItem.ID) @@ -1112,7 +1137,7 @@ func (c *Client) DeleteAlertGraph(alertItem *ent.Alert) error { // delete the associated meta _, err = c.Ent.Meta.Delete(). - Where(meta.HasOwnerWith(alert.IDEQ(alertItem.ID))).Exec(c.CTX) + Where(meta.HasOwnerWith(alert.IDEQ(alertItem.ID))).Exec(ctx) if err != nil { c.Log.Warningf("DeleteAlertGraph : %s", err) return errors.Wrapf(DeleteFail, "meta with alert ID '%d'", alertItem.ID) @@ -1120,14 +1145,14 @@ func (c *Client) DeleteAlertGraph(alertItem *ent.Alert) error { // delete the associated decisions _, err = c.Ent.Decision.Delete(). - Where(decision.HasOwnerWith(alert.IDEQ(alertItem.ID))).Exec(c.CTX) + Where(decision.HasOwnerWith(alert.IDEQ(alertItem.ID))).Exec(ctx) if err != nil { c.Log.Warningf("DeleteAlertGraph : %s", err) return errors.Wrapf(DeleteFail, "decision with alert ID '%d'", alertItem.ID) } // delete the alert - err = c.Ent.Alert.DeleteOne(alertItem).Exec(c.CTX) + err = c.Ent.Alert.DeleteOne(alertItem).Exec(ctx) if err != nil { c.Log.Warningf("DeleteAlertGraph : %s", err) return errors.Wrapf(DeleteFail, "alert with ID '%d'", alertItem.ID) @@ -1136,26 +1161,26 @@ func (c *Client) DeleteAlertGraph(alertItem *ent.Alert) error { return nil } -func (c *Client) DeleteAlertByID(id int) error { - alertItem, err := c.Ent.Alert.Query().Where(alert.IDEQ(id)).Only(c.CTX) +func (c *Client) DeleteAlertByID(ctx context.Context, id int) error { + alertItem, err := c.Ent.Alert.Query().Where(alert.IDEQ(id)).Only(ctx) if err != nil { return err } - return c.DeleteAlertGraph(alertItem) + return c.DeleteAlertGraph(ctx, alertItem) } -func (c *Client) DeleteAlertWithFilter(filter map[string][]string) (int, error) { +func (c *Client) DeleteAlertWithFilter(ctx context.Context, filter map[string][]string) (int, error) { preds, err := AlertPredicatesFromFilter(filter) if err != nil { return 0, err } - return c.Ent.Alert.Delete().Where(preds...).Exec(c.CTX) + return c.Ent.Alert.Delete().Where(preds...).Exec(ctx) } -func (c *Client) GetAlertByID(alertID int) (*ent.Alert, error) { - alert, err := c.Ent.Alert.Query().Where(alert.IDEQ(alertID)).WithDecisions().WithEvents().WithMetas().WithOwner().First(c.CTX) +func (c *Client) GetAlertByID(ctx context.Context, alertID int) (*ent.Alert, error) { + alert, err := c.Ent.Alert.Query().Where(alert.IDEQ(alertID)).WithDecisions().WithEvents().WithMetas().WithOwner().First(ctx) if err != nil { /*record not found, 404*/ if ent.IsNotFound(err) { diff --git a/pkg/database/bouncers.go b/pkg/database/bouncers.go index f79e9580afe..f9e62bc6522 100644 --- a/pkg/database/bouncers.go +++ b/pkg/database/bouncers.go @@ -1,6 +1,7 @@ package database import ( + "context" "fmt" "strings" "time" @@ -20,7 +21,7 @@ func (e *BouncerNotFoundError) Error() string { return fmt.Sprintf("'%s' does not exist", e.BouncerName) } -func (c *Client) BouncerUpdateBaseMetrics(bouncerName string, bouncerType string, baseMetrics models.BaseMetrics) error { +func (c *Client) BouncerUpdateBaseMetrics(ctx context.Context, bouncerName string, bouncerType string, baseMetrics models.BaseMetrics) error { os := baseMetrics.Os features := strings.Join(baseMetrics.FeatureFlags, ",") @@ -32,7 +33,7 @@ func (c *Client) BouncerUpdateBaseMetrics(bouncerName string, bouncerType string SetOsversion(*os.Version). SetFeatureflags(features). SetType(bouncerType). - Save(c.CTX) + Save(ctx) if err != nil { return fmt.Errorf("unable to update base bouncer metrics in database: %w", err) } @@ -40,8 +41,10 @@ func (c *Client) BouncerUpdateBaseMetrics(bouncerName string, bouncerType string return nil } -func (c *Client) SelectBouncer(apiKeyHash string) (*ent.Bouncer, error) { - result, err := c.Ent.Bouncer.Query().Where(bouncer.APIKeyEQ(apiKeyHash)).First(c.CTX) +func (c *Client) SelectBouncers(ctx context.Context, apiKeyHash string, authType string) ([]*ent.Bouncer, error) { + //Order by ID so manually created bouncer will be first in the list to use as the base name + //when automatically creating a new entry if API keys are shared + result, err := c.Ent.Bouncer.Query().Where(bouncer.APIKeyEQ(apiKeyHash), bouncer.AuthTypeEQ(authType)).Order(ent.Asc(bouncer.FieldID)).All(ctx) if err != nil { return nil, err } @@ -49,8 +52,8 @@ func (c *Client) SelectBouncer(apiKeyHash string) (*ent.Bouncer, error) { return result, nil } -func (c *Client) SelectBouncerByName(bouncerName string) (*ent.Bouncer, error) { - result, err := c.Ent.Bouncer.Query().Where(bouncer.NameEQ(bouncerName)).First(c.CTX) +func (c *Client) SelectBouncerWithIP(ctx context.Context, apiKeyHash string, clientIP string) (*ent.Bouncer, error) { + result, err := c.Ent.Bouncer.Query().Where(bouncer.APIKeyEQ(apiKeyHash), bouncer.IPAddressEQ(clientIP)).First(ctx) if err != nil { return nil, err } @@ -58,8 +61,17 @@ func (c *Client) SelectBouncerByName(bouncerName string) (*ent.Bouncer, error) { return result, nil } -func (c *Client) ListBouncers() ([]*ent.Bouncer, error) { - result, err := c.Ent.Bouncer.Query().All(c.CTX) +func (c *Client) SelectBouncerByName(ctx context.Context, bouncerName string) (*ent.Bouncer, error) { + result, err := c.Ent.Bouncer.Query().Where(bouncer.NameEQ(bouncerName)).First(ctx) + if err != nil { + return nil, err + } + + return result, nil +} + +func (c *Client) ListBouncers(ctx context.Context) ([]*ent.Bouncer, error) { + result, err := c.Ent.Bouncer.Query().All(ctx) if err != nil { return nil, errors.Wrapf(QueryFail, "listing bouncers: %s", err) } @@ -67,14 +79,16 @@ func (c *Client) ListBouncers() ([]*ent.Bouncer, error) { return result, nil } -func (c *Client) CreateBouncer(name string, ipAddr string, apiKey string, authType string) (*ent.Bouncer, error) { +func (c *Client) CreateBouncer(ctx context.Context, name string, ipAddr string, apiKey string, authType string, autoCreated bool) (*ent.Bouncer, error) { bouncer, err := c.Ent.Bouncer. Create(). SetName(name). SetAPIKey(apiKey). SetRevoked(false). SetAuthType(authType). - Save(c.CTX) + SetIPAddress(ipAddr). + SetAutoCreated(autoCreated). + Save(ctx) if err != nil { if ent.IsConstraintError(err) { return nil, fmt.Errorf("bouncer %s already exists", name) @@ -86,11 +100,11 @@ func (c *Client) CreateBouncer(name string, ipAddr string, apiKey string, authTy return bouncer, nil } -func (c *Client) DeleteBouncer(name string) error { +func (c *Client) DeleteBouncer(ctx context.Context, name string) error { nbDeleted, err := c.Ent.Bouncer. Delete(). Where(bouncer.NameEQ(name)). - Exec(c.CTX) + Exec(ctx) if err != nil { return err } @@ -102,13 +116,13 @@ func (c *Client) DeleteBouncer(name string) error { return nil } -func (c *Client) BulkDeleteBouncers(bouncers []*ent.Bouncer) (int, error) { +func (c *Client) BulkDeleteBouncers(ctx context.Context, bouncers []*ent.Bouncer) (int, error) { ids := make([]int, len(bouncers)) for i, b := range bouncers { ids[i] = b.ID } - nbDeleted, err := c.Ent.Bouncer.Delete().Where(bouncer.IDIn(ids...)).Exec(c.CTX) + nbDeleted, err := c.Ent.Bouncer.Delete().Where(bouncer.IDIn(ids...)).Exec(ctx) if err != nil { return nbDeleted, fmt.Errorf("unable to delete bouncers: %w", err) } @@ -116,10 +130,10 @@ func (c *Client) BulkDeleteBouncers(bouncers []*ent.Bouncer) (int, error) { return nbDeleted, nil } -func (c *Client) UpdateBouncerLastPull(lastPull time.Time, id int) error { +func (c *Client) UpdateBouncerLastPull(ctx context.Context, lastPull time.Time, id int) error { _, err := c.Ent.Bouncer.UpdateOneID(id). SetLastPull(lastPull). - Save(c.CTX) + Save(ctx) if err != nil { return fmt.Errorf("unable to update machine last pull in database: %w", err) } @@ -127,8 +141,8 @@ func (c *Client) UpdateBouncerLastPull(lastPull time.Time, id int) error { return nil } -func (c *Client) UpdateBouncerIP(ipAddr string, id int) error { - _, err := c.Ent.Bouncer.UpdateOneID(id).SetIPAddress(ipAddr).Save(c.CTX) +func (c *Client) UpdateBouncerIP(ctx context.Context, ipAddr string, id int) error { + _, err := c.Ent.Bouncer.UpdateOneID(id).SetIPAddress(ipAddr).Save(ctx) if err != nil { return fmt.Errorf("unable to update bouncer ip address in database: %w", err) } @@ -136,8 +150,8 @@ func (c *Client) UpdateBouncerIP(ipAddr string, id int) error { return nil } -func (c *Client) UpdateBouncerTypeAndVersion(bType string, version string, id int) error { - _, err := c.Ent.Bouncer.UpdateOneID(id).SetVersion(version).SetType(bType).Save(c.CTX) +func (c *Client) UpdateBouncerTypeAndVersion(ctx context.Context, bType string, version string, id int) error { + _, err := c.Ent.Bouncer.UpdateOneID(id).SetVersion(version).SetType(bType).Save(ctx) if err != nil { return fmt.Errorf("unable to update bouncer type and version in database: %w", err) } @@ -145,7 +159,7 @@ func (c *Client) UpdateBouncerTypeAndVersion(bType string, version string, id in return nil } -func (c *Client) QueryBouncersInactiveSince(t time.Time) ([]*ent.Bouncer, error) { +func (c *Client) QueryBouncersInactiveSince(ctx context.Context, t time.Time) ([]*ent.Bouncer, error) { return c.Ent.Bouncer.Query().Where( // poor man's coalesce bouncer.Or( @@ -155,5 +169,5 @@ func (c *Client) QueryBouncersInactiveSince(t time.Time) ([]*ent.Bouncer, error) bouncer.CreatedAtLT(t), ), ), - ).All(c.CTX) + ).All(ctx) } diff --git a/pkg/database/config.go b/pkg/database/config.go index 8c3578ad596..89ccb1e1b28 100644 --- a/pkg/database/config.go +++ b/pkg/database/config.go @@ -1,17 +1,20 @@ package database import ( + "context" + "github.com/pkg/errors" "github.com/crowdsecurity/crowdsec/pkg/database/ent" "github.com/crowdsecurity/crowdsec/pkg/database/ent/configitem" ) -func (c *Client) GetConfigItem(key string) (*string, error) { - result, err := c.Ent.ConfigItem.Query().Where(configitem.NameEQ(key)).First(c.CTX) +func (c *Client) GetConfigItem(ctx context.Context, key string) (*string, error) { + result, err := c.Ent.ConfigItem.Query().Where(configitem.NameEQ(key)).First(ctx) if err != nil && ent.IsNotFound(err) { return nil, nil } + if err != nil { return nil, errors.Wrapf(QueryFail, "select config item: %s", err) } @@ -19,16 +22,16 @@ func (c *Client) GetConfigItem(key string) (*string, error) { return &result.Value, nil } -func (c *Client) SetConfigItem(key string, value string) error { - - nbUpdated, err := c.Ent.ConfigItem.Update().SetValue(value).Where(configitem.NameEQ(key)).Save(c.CTX) - if (err != nil && ent.IsNotFound(err)) || nbUpdated == 0 { //not found, create - err := c.Ent.ConfigItem.Create().SetName(key).SetValue(value).Exec(c.CTX) +func (c *Client) SetConfigItem(ctx context.Context, key string, value string) error { + nbUpdated, err := c.Ent.ConfigItem.Update().SetValue(value).Where(configitem.NameEQ(key)).Save(ctx) + if (err != nil && ent.IsNotFound(err)) || nbUpdated == 0 { // not found, create + err := c.Ent.ConfigItem.Create().SetName(key).SetValue(value).Exec(ctx) if err != nil { return errors.Wrapf(QueryFail, "insert config item: %s", err) } } else if err != nil { return errors.Wrapf(QueryFail, "update config item: %s", err) } + return nil } diff --git a/pkg/database/database.go b/pkg/database/database.go index e513459199f..bb41dd3b645 100644 --- a/pkg/database/database.go +++ b/pkg/database/database.go @@ -21,7 +21,6 @@ import ( type Client struct { Ent *ent.Client - CTX context.Context Log *log.Logger CanFlush bool Type string @@ -106,7 +105,6 @@ func NewClient(ctx context.Context, config *csconfig.DatabaseCfg) (*Client, erro return &Client{ Ent: client, - CTX: ctx, Log: clog, CanFlush: true, Type: config.Type, diff --git a/pkg/database/decisions.go b/pkg/database/decisions.go index fc582247e59..7522a272799 100644 --- a/pkg/database/decisions.go +++ b/pkg/database/decisions.go @@ -1,6 +1,7 @@ package database import ( + "context" "fmt" "strconv" "strings" @@ -30,7 +31,7 @@ func BuildDecisionRequestWithFilter(query *ent.DecisionQuery, filter map[string] var err error var start_ip, start_sfx, end_ip, end_sfx int64 var ip_sz int - var contains = true + contains := true /*if contains is true, return bans that *contains* the given value (value is the inner) else, return bans that are *contained* by the given value (value is the outer)*/ @@ -120,7 +121,7 @@ func BuildDecisionRequestWithFilter(query *ent.DecisionQuery, filter map[string] return query, nil } -func (c *Client) QueryAllDecisionsWithFilters(filters map[string][]string) ([]*ent.Decision, error) { +func (c *Client) QueryAllDecisionsWithFilters(ctx context.Context, filters map[string][]string) ([]*ent.Decision, error) { query := c.Ent.Decision.Query().Where( decision.UntilGT(time.Now().UTC()), ) @@ -137,7 +138,7 @@ func (c *Client) QueryAllDecisionsWithFilters(filters map[string][]string) ([]*e query = query.Order(ent.Asc(decision.FieldID)) - data, err := query.All(c.CTX) + data, err := query.All(ctx) if err != nil { c.Log.Warningf("QueryAllDecisionsWithFilters : %s", err) return []*ent.Decision{}, errors.Wrap(QueryFail, "get all decisions with filters") @@ -146,7 +147,7 @@ func (c *Client) QueryAllDecisionsWithFilters(filters map[string][]string) ([]*e return data, nil } -func (c *Client) QueryExpiredDecisionsWithFilters(filters map[string][]string) ([]*ent.Decision, error) { +func (c *Client) QueryExpiredDecisionsWithFilters(ctx context.Context, filters map[string][]string) ([]*ent.Decision, error) { query := c.Ent.Decision.Query().Where( decision.UntilLT(time.Now().UTC()), ) @@ -164,7 +165,7 @@ func (c *Client) QueryExpiredDecisionsWithFilters(filters map[string][]string) ( return []*ent.Decision{}, errors.Wrap(QueryFail, "get expired decisions with filters") } - data, err := query.All(c.CTX) + data, err := query.All(ctx) if err != nil { c.Log.Warningf("QueryExpiredDecisionsWithFilters : %s", err) return []*ent.Decision{}, errors.Wrap(QueryFail, "expired decisions") @@ -173,7 +174,7 @@ func (c *Client) QueryExpiredDecisionsWithFilters(filters map[string][]string) ( return data, nil } -func (c *Client) QueryDecisionCountByScenario() ([]*DecisionsByScenario, error) { +func (c *Client) QueryDecisionCountByScenario(ctx context.Context) ([]*DecisionsByScenario, error) { query := c.Ent.Decision.Query().Where( decision.UntilGT(time.Now().UTC()), ) @@ -186,7 +187,7 @@ func (c *Client) QueryDecisionCountByScenario() ([]*DecisionsByScenario, error) var r []*DecisionsByScenario - err = query.GroupBy(decision.FieldScenario, decision.FieldOrigin, decision.FieldType).Aggregate(ent.Count()).Scan(c.CTX, &r) + err = query.GroupBy(decision.FieldScenario, decision.FieldOrigin, decision.FieldType).Aggregate(ent.Count()).Scan(ctx, &r) if err != nil { c.Log.Warningf("QueryDecisionCountByScenario : %s", err) return nil, errors.Wrap(QueryFail, "count all decisions with filters") @@ -195,7 +196,7 @@ func (c *Client) QueryDecisionCountByScenario() ([]*DecisionsByScenario, error) return r, nil } -func (c *Client) QueryDecisionWithFilter(filter map[string][]string) ([]*ent.Decision, error) { +func (c *Client) QueryDecisionWithFilter(ctx context.Context, filter map[string][]string) ([]*ent.Decision, error) { var data []*ent.Decision var err error @@ -217,7 +218,7 @@ func (c *Client) QueryDecisionWithFilter(filter map[string][]string) ([]*ent.Dec decision.FieldValue, decision.FieldScope, decision.FieldOrigin, - ).Scan(c.CTX, &data) + ).Scan(ctx, &data) if err != nil { c.Log.Warningf("QueryDecisionWithFilter : %s", err) return []*ent.Decision{}, errors.Wrap(QueryFail, "query decision failed") @@ -254,7 +255,7 @@ func longestDecisionForScopeTypeValue(s *sql.Selector) { ) } -func (c *Client) QueryExpiredDecisionsSinceWithFilters(since *time.Time, filters map[string][]string) ([]*ent.Decision, error) { +func (c *Client) QueryExpiredDecisionsSinceWithFilters(ctx context.Context, since *time.Time, filters map[string][]string) ([]*ent.Decision, error) { query := c.Ent.Decision.Query().Where( decision.UntilLT(time.Now().UTC()), ) @@ -276,7 +277,7 @@ func (c *Client) QueryExpiredDecisionsSinceWithFilters(since *time.Time, filters query = query.Order(ent.Asc(decision.FieldID)) - data, err := query.All(c.CTX) + data, err := query.All(ctx) if err != nil { c.Log.Warningf("QueryExpiredDecisionsSinceWithFilters : %s", err) return []*ent.Decision{}, errors.Wrap(QueryFail, "expired decisions with filters") @@ -285,7 +286,7 @@ func (c *Client) QueryExpiredDecisionsSinceWithFilters(since *time.Time, filters return data, nil } -func (c *Client) QueryNewDecisionsSinceWithFilters(since *time.Time, filters map[string][]string) ([]*ent.Decision, error) { +func (c *Client) QueryNewDecisionsSinceWithFilters(ctx context.Context, since *time.Time, filters map[string][]string) ([]*ent.Decision, error) { query := c.Ent.Decision.Query().Where( decision.UntilGT(time.Now().UTC()), ) @@ -307,7 +308,7 @@ func (c *Client) QueryNewDecisionsSinceWithFilters(since *time.Time, filters map query = query.Order(ent.Asc(decision.FieldID)) - data, err := query.All(c.CTX) + data, err := query.All(ctx) if err != nil { c.Log.Warningf("QueryNewDecisionsSinceWithFilters : %s", err) return []*ent.Decision{}, errors.Wrapf(QueryFail, "new decisions since '%s'", since.String()) @@ -316,24 +317,11 @@ func (c *Client) QueryNewDecisionsSinceWithFilters(since *time.Time, filters map return data, nil } -func (c *Client) DeleteDecisionById(decisionID int) ([]*ent.Decision, error) { - toDelete, err := c.Ent.Decision.Query().Where(decision.IDEQ(decisionID)).All(c.CTX) - if err != nil { - c.Log.Warningf("DeleteDecisionById : %s", err) - return nil, errors.Wrapf(DeleteFail, "decision with id '%d' doesn't exist", decisionID) - } - - count, err := c.DeleteDecisions(toDelete) - c.Log.Debugf("deleted %d decisions", count) - - return toDelete, err -} - -func (c *Client) DeleteDecisionsWithFilter(filter map[string][]string) (string, []*ent.Decision, error) { +func (c *Client) DeleteDecisionsWithFilter(ctx context.Context, filter map[string][]string) (string, []*ent.Decision, error) { var err error var start_ip, start_sfx, end_ip, end_sfx int64 var ip_sz int - var contains = true + contains := true /*if contains is true, return bans that *contains* the given value (value is the inner) else, return bans that are *contained* by the given value (value is the outer) */ @@ -432,13 +420,13 @@ func (c *Client) DeleteDecisionsWithFilter(filter map[string][]string) (string, return "0", nil, errors.Wrapf(InvalidFilter, "Unknown ip size %d", ip_sz) } - toDelete, err := decisions.All(c.CTX) + toDelete, err := decisions.All(ctx) if err != nil { c.Log.Warningf("DeleteDecisionsWithFilter : %s", err) return "0", nil, errors.Wrap(DeleteFail, "decisions with provided filter") } - count, err := c.DeleteDecisions(toDelete) + count, err := c.DeleteDecisions(ctx, toDelete) if err != nil { c.Log.Warningf("While deleting decisions : %s", err) return "0", nil, errors.Wrap(DeleteFail, "decisions with provided filter") @@ -448,11 +436,11 @@ func (c *Client) DeleteDecisionsWithFilter(filter map[string][]string) (string, } // ExpireDecisionsWithFilter updates the expiration time to now() for the decisions matching the filter, and returns the updated items -func (c *Client) ExpireDecisionsWithFilter(filter map[string][]string) (string, []*ent.Decision, error) { +func (c *Client) ExpireDecisionsWithFilter(ctx context.Context, filter map[string][]string) (string, []*ent.Decision, error) { var err error var start_ip, start_sfx, end_ip, end_sfx int64 var ip_sz int - var contains = true + contains := true /*if contains is true, return bans that *contains* the given value (value is the inner) else, return bans that are *contained* by the given value (value is the outer)*/ decisions := c.Ent.Decision.Query().Where(decision.UntilGT(time.Now().UTC())) @@ -557,13 +545,13 @@ func (c *Client) ExpireDecisionsWithFilter(filter map[string][]string) (string, return "0", nil, errors.Wrapf(InvalidFilter, "Unknown ip size %d", ip_sz) } - DecisionsToDelete, err := decisions.All(c.CTX) + DecisionsToDelete, err := decisions.All(ctx) if err != nil { c.Log.Warningf("ExpireDecisionsWithFilter : %s", err) return "0", nil, errors.Wrap(DeleteFail, "expire decisions with provided filter") } - count, err := c.ExpireDecisions(DecisionsToDelete) + count, err := c.ExpireDecisions(ctx, DecisionsToDelete) if err != nil { return "0", nil, errors.Wrapf(DeleteFail, "expire decisions with provided filter : %s", err) } @@ -582,13 +570,13 @@ func decisionIDs(decisions []*ent.Decision) []int { // ExpireDecisions sets the expiration of a list of decisions to now() // It returns the number of impacted decisions for the CAPI/PAPI -func (c *Client) ExpireDecisions(decisions []*ent.Decision) (int, error) { +func (c *Client) ExpireDecisions(ctx context.Context, decisions []*ent.Decision) (int, error) { if len(decisions) <= decisionDeleteBulkSize { ids := decisionIDs(decisions) rows, err := c.Ent.Decision.Update().Where( decision.IDIn(ids...), - ).SetUntil(time.Now().UTC()).Save(c.CTX) + ).SetUntil(time.Now().UTC()).Save(ctx) if err != nil { return 0, fmt.Errorf("expire decisions with provided filter: %w", err) } @@ -601,7 +589,7 @@ func (c *Client) ExpireDecisions(decisions []*ent.Decision) (int, error) { total := 0 for _, chunk := range slicetools.Chunks(decisions, decisionDeleteBulkSize) { - rows, err := c.ExpireDecisions(chunk) + rows, err := c.ExpireDecisions(ctx, chunk) if err != nil { return total, err } @@ -614,13 +602,13 @@ func (c *Client) ExpireDecisions(decisions []*ent.Decision) (int, error) { // DeleteDecisions removes a list of decisions from the database // It returns the number of impacted decisions for the CAPI/PAPI -func (c *Client) DeleteDecisions(decisions []*ent.Decision) (int, error) { +func (c *Client) DeleteDecisions(ctx context.Context, decisions []*ent.Decision) (int, error) { if len(decisions) < decisionDeleteBulkSize { ids := decisionIDs(decisions) rows, err := c.Ent.Decision.Delete().Where( decision.IDIn(ids...), - ).Exec(c.CTX) + ).Exec(ctx) if err != nil { return 0, fmt.Errorf("hard delete decisions with provided filter: %w", err) } @@ -633,7 +621,7 @@ func (c *Client) DeleteDecisions(decisions []*ent.Decision) (int, error) { tot := 0 for _, chunk := range slicetools.Chunks(decisions, decisionDeleteBulkSize) { - rows, err := c.DeleteDecisions(chunk) + rows, err := c.DeleteDecisions(ctx, chunk) if err != nil { return tot, err } @@ -645,8 +633,8 @@ func (c *Client) DeleteDecisions(decisions []*ent.Decision) (int, error) { } // ExpireDecision set the expiration of a decision to now() -func (c *Client) ExpireDecisionByID(decisionID int) (int, []*ent.Decision, error) { - toUpdate, err := c.Ent.Decision.Query().Where(decision.IDEQ(decisionID)).All(c.CTX) +func (c *Client) ExpireDecisionByID(ctx context.Context, decisionID int) (int, []*ent.Decision, error) { + toUpdate, err := c.Ent.Decision.Query().Where(decision.IDEQ(decisionID)).All(ctx) // XXX: do we want 500 or 404 here? if err != nil || len(toUpdate) == 0 { @@ -658,12 +646,12 @@ func (c *Client) ExpireDecisionByID(decisionID int) (int, []*ent.Decision, error return 0, nil, ItemNotFound } - count, err := c.ExpireDecisions(toUpdate) + count, err := c.ExpireDecisions(ctx, toUpdate) return count, toUpdate, err } -func (c *Client) CountDecisionsByValue(decisionValue string) (int, error) { +func (c *Client) CountDecisionsByValue(ctx context.Context, decisionValue string) (int, error) { var err error var start_ip, start_sfx, end_ip, end_sfx int64 var ip_sz, count int @@ -681,7 +669,7 @@ func (c *Client) CountDecisionsByValue(decisionValue string) (int, error) { return 0, errors.Wrapf(err, "fail to apply StartIpEndIpFilter") } - count, err = decisions.Count(c.CTX) + count, err = decisions.Count(ctx) if err != nil { return 0, errors.Wrapf(err, "fail to count decisions") } @@ -689,7 +677,7 @@ func (c *Client) CountDecisionsByValue(decisionValue string) (int, error) { return count, nil } -func (c *Client) CountActiveDecisionsByValue(decisionValue string) (int, error) { +func (c *Client) CountActiveDecisionsByValue(ctx context.Context, decisionValue string) (int, error) { var err error var start_ip, start_sfx, end_ip, end_sfx int64 var ip_sz, count int @@ -709,7 +697,7 @@ func (c *Client) CountActiveDecisionsByValue(decisionValue string) (int, error) decisions = decisions.Where(decision.UntilGT(time.Now().UTC())) - count, err = decisions.Count(c.CTX) + count, err = decisions.Count(ctx) if err != nil { return 0, fmt.Errorf("fail to count decisions: %w", err) } @@ -717,7 +705,7 @@ func (c *Client) CountActiveDecisionsByValue(decisionValue string) (int, error) return count, nil } -func (c *Client) GetActiveDecisionsTimeLeftByValue(decisionValue string) (time.Duration, error) { +func (c *Client) GetActiveDecisionsTimeLeftByValue(ctx context.Context, decisionValue string) (time.Duration, error) { var err error var start_ip, start_sfx, end_ip, end_sfx int64 var ip_sz int @@ -739,7 +727,7 @@ func (c *Client) GetActiveDecisionsTimeLeftByValue(decisionValue string) (time.D decisions = decisions.Order(ent.Desc(decision.FieldUntil)) - decision, err := decisions.First(c.CTX) + decision, err := decisions.First(ctx) if err != nil && !ent.IsNotFound(err) { return 0, fmt.Errorf("fail to get decision: %w", err) } @@ -751,7 +739,7 @@ func (c *Client) GetActiveDecisionsTimeLeftByValue(decisionValue string) (time.D return decision.Until.Sub(time.Now().UTC()), nil } -func (c *Client) CountDecisionsSinceByValue(decisionValue string, since time.Time) (int, error) { +func (c *Client) CountDecisionsSinceByValue(ctx context.Context, decisionValue string, since time.Time) (int, error) { ip_sz, start_ip, start_sfx, end_ip, end_sfx, err := types.Addr2Ints(decisionValue) if err != nil { return 0, errors.Wrapf(InvalidIPOrRange, "unable to convert '%s' to int: %s", decisionValue, err) @@ -767,7 +755,7 @@ func (c *Client) CountDecisionsSinceByValue(decisionValue string, since time.Tim return 0, errors.Wrapf(err, "fail to apply StartIpEndIpFilter") } - count, err := decisions.Count(c.CTX) + count, err := decisions.Count(ctx) if err != nil { return 0, errors.Wrapf(err, "fail to count decisions") } diff --git a/pkg/database/ent/bouncer.go b/pkg/database/ent/bouncer.go index 3b4d619e384..197f61cde19 100644 --- a/pkg/database/ent/bouncer.go +++ b/pkg/database/ent/bouncer.go @@ -43,6 +43,8 @@ type Bouncer struct { Osversion string `json:"osversion,omitempty"` // Featureflags holds the value of the "featureflags" field. Featureflags string `json:"featureflags,omitempty"` + // AutoCreated holds the value of the "auto_created" field. + AutoCreated bool `json:"auto_created"` selectValues sql.SelectValues } @@ -51,7 +53,7 @@ func (*Bouncer) scanValues(columns []string) ([]any, error) { values := make([]any, len(columns)) for i := range columns { switch columns[i] { - case bouncer.FieldRevoked: + case bouncer.FieldRevoked, bouncer.FieldAutoCreated: values[i] = new(sql.NullBool) case bouncer.FieldID: values[i] = new(sql.NullInt64) @@ -159,6 +161,12 @@ func (b *Bouncer) assignValues(columns []string, values []any) error { } else if value.Valid { b.Featureflags = value.String } + case bouncer.FieldAutoCreated: + if value, ok := values[i].(*sql.NullBool); !ok { + return fmt.Errorf("unexpected type %T for field auto_created", values[i]) + } else if value.Valid { + b.AutoCreated = value.Bool + } default: b.selectValues.Set(columns[i], values[i]) } @@ -234,6 +242,9 @@ func (b *Bouncer) String() string { builder.WriteString(", ") builder.WriteString("featureflags=") builder.WriteString(b.Featureflags) + builder.WriteString(", ") + builder.WriteString("auto_created=") + builder.WriteString(fmt.Sprintf("%v", b.AutoCreated)) builder.WriteByte(')') return builder.String() } diff --git a/pkg/database/ent/bouncer/bouncer.go b/pkg/database/ent/bouncer/bouncer.go index a6f62aeadd5..f25b5a5815a 100644 --- a/pkg/database/ent/bouncer/bouncer.go +++ b/pkg/database/ent/bouncer/bouncer.go @@ -39,6 +39,8 @@ const ( FieldOsversion = "osversion" // FieldFeatureflags holds the string denoting the featureflags field in the database. FieldFeatureflags = "featureflags" + // FieldAutoCreated holds the string denoting the auto_created field in the database. + FieldAutoCreated = "auto_created" // Table holds the table name of the bouncer in the database. Table = "bouncers" ) @@ -59,6 +61,7 @@ var Columns = []string{ FieldOsname, FieldOsversion, FieldFeatureflags, + FieldAutoCreated, } // ValidColumn reports if the column name is valid (part of the table columns). @@ -82,6 +85,8 @@ var ( DefaultIPAddress string // DefaultAuthType holds the default value on creation for the "auth_type" field. DefaultAuthType string + // DefaultAutoCreated holds the default value on creation for the "auto_created" field. + DefaultAutoCreated bool ) // OrderOption defines the ordering options for the Bouncer queries. @@ -156,3 +161,8 @@ func ByOsversion(opts ...sql.OrderTermOption) OrderOption { func ByFeatureflags(opts ...sql.OrderTermOption) OrderOption { return sql.OrderByField(FieldFeatureflags, opts...).ToFunc() } + +// ByAutoCreated orders the results by the auto_created field. +func ByAutoCreated(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldAutoCreated, opts...).ToFunc() +} diff --git a/pkg/database/ent/bouncer/where.go b/pkg/database/ent/bouncer/where.go index e02199bc0a9..79b8999354f 100644 --- a/pkg/database/ent/bouncer/where.go +++ b/pkg/database/ent/bouncer/where.go @@ -119,6 +119,11 @@ func Featureflags(v string) predicate.Bouncer { return predicate.Bouncer(sql.FieldEQ(FieldFeatureflags, v)) } +// AutoCreated applies equality check predicate on the "auto_created" field. It's identical to AutoCreatedEQ. +func AutoCreated(v bool) predicate.Bouncer { + return predicate.Bouncer(sql.FieldEQ(FieldAutoCreated, v)) +} + // CreatedAtEQ applies the EQ predicate on the "created_at" field. func CreatedAtEQ(v time.Time) predicate.Bouncer { return predicate.Bouncer(sql.FieldEQ(FieldCreatedAt, v)) @@ -904,6 +909,16 @@ func FeatureflagsContainsFold(v string) predicate.Bouncer { return predicate.Bouncer(sql.FieldContainsFold(FieldFeatureflags, v)) } +// AutoCreatedEQ applies the EQ predicate on the "auto_created" field. +func AutoCreatedEQ(v bool) predicate.Bouncer { + return predicate.Bouncer(sql.FieldEQ(FieldAutoCreated, v)) +} + +// AutoCreatedNEQ applies the NEQ predicate on the "auto_created" field. +func AutoCreatedNEQ(v bool) predicate.Bouncer { + return predicate.Bouncer(sql.FieldNEQ(FieldAutoCreated, v)) +} + // And groups predicates with the AND operator between them. func And(predicates ...predicate.Bouncer) predicate.Bouncer { return predicate.Bouncer(sql.AndPredicates(predicates...)) diff --git a/pkg/database/ent/bouncer_create.go b/pkg/database/ent/bouncer_create.go index 29b23f87cf1..9ff4c0e0820 100644 --- a/pkg/database/ent/bouncer_create.go +++ b/pkg/database/ent/bouncer_create.go @@ -178,6 +178,20 @@ func (bc *BouncerCreate) SetNillableFeatureflags(s *string) *BouncerCreate { return bc } +// SetAutoCreated sets the "auto_created" field. +func (bc *BouncerCreate) SetAutoCreated(b bool) *BouncerCreate { + bc.mutation.SetAutoCreated(b) + return bc +} + +// SetNillableAutoCreated sets the "auto_created" field if the given value is not nil. +func (bc *BouncerCreate) SetNillableAutoCreated(b *bool) *BouncerCreate { + if b != nil { + bc.SetAutoCreated(*b) + } + return bc +} + // Mutation returns the BouncerMutation object of the builder. func (bc *BouncerCreate) Mutation() *BouncerMutation { return bc.mutation @@ -229,6 +243,10 @@ func (bc *BouncerCreate) defaults() { v := bouncer.DefaultAuthType bc.mutation.SetAuthType(v) } + if _, ok := bc.mutation.AutoCreated(); !ok { + v := bouncer.DefaultAutoCreated + bc.mutation.SetAutoCreated(v) + } } // check runs all checks and user-defined validators on the builder. @@ -251,6 +269,9 @@ func (bc *BouncerCreate) check() error { if _, ok := bc.mutation.AuthType(); !ok { return &ValidationError{Name: "auth_type", err: errors.New(`ent: missing required field "Bouncer.auth_type"`)} } + if _, ok := bc.mutation.AutoCreated(); !ok { + return &ValidationError{Name: "auto_created", err: errors.New(`ent: missing required field "Bouncer.auto_created"`)} + } return nil } @@ -329,6 +350,10 @@ func (bc *BouncerCreate) createSpec() (*Bouncer, *sqlgraph.CreateSpec) { _spec.SetField(bouncer.FieldFeatureflags, field.TypeString, value) _node.Featureflags = value } + if value, ok := bc.mutation.AutoCreated(); ok { + _spec.SetField(bouncer.FieldAutoCreated, field.TypeBool, value) + _node.AutoCreated = value + } return _node, _spec } diff --git a/pkg/database/ent/migrate/schema.go b/pkg/database/ent/migrate/schema.go index 986f5bc8c67..dae248c7f38 100644 --- a/pkg/database/ent/migrate/schema.go +++ b/pkg/database/ent/migrate/schema.go @@ -74,6 +74,7 @@ var ( {Name: "osname", Type: field.TypeString, Nullable: true}, {Name: "osversion", Type: field.TypeString, Nullable: true}, {Name: "featureflags", Type: field.TypeString, Nullable: true}, + {Name: "auto_created", Type: field.TypeBool, Default: false}, } // BouncersTable holds the schema information for the "bouncers" table. BouncersTable = &schema.Table{ diff --git a/pkg/database/ent/mutation.go b/pkg/database/ent/mutation.go index 5c6596f3db4..fa1ccb3da58 100644 --- a/pkg/database/ent/mutation.go +++ b/pkg/database/ent/mutation.go @@ -2471,6 +2471,7 @@ type BouncerMutation struct { osname *string osversion *string featureflags *string + auto_created *bool clearedFields map[string]struct{} done bool oldValue func(context.Context) (*Bouncer, error) @@ -3134,6 +3135,42 @@ func (m *BouncerMutation) ResetFeatureflags() { delete(m.clearedFields, bouncer.FieldFeatureflags) } +// SetAutoCreated sets the "auto_created" field. +func (m *BouncerMutation) SetAutoCreated(b bool) { + m.auto_created = &b +} + +// AutoCreated returns the value of the "auto_created" field in the mutation. +func (m *BouncerMutation) AutoCreated() (r bool, exists bool) { + v := m.auto_created + if v == nil { + return + } + return *v, true +} + +// OldAutoCreated returns the old "auto_created" field's value of the Bouncer entity. +// If the Bouncer object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *BouncerMutation) OldAutoCreated(ctx context.Context) (v bool, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldAutoCreated is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldAutoCreated requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldAutoCreated: %w", err) + } + return oldValue.AutoCreated, nil +} + +// ResetAutoCreated resets all changes to the "auto_created" field. +func (m *BouncerMutation) ResetAutoCreated() { + m.auto_created = nil +} + // Where appends a list predicates to the BouncerMutation builder. func (m *BouncerMutation) Where(ps ...predicate.Bouncer) { m.predicates = append(m.predicates, ps...) @@ -3168,7 +3205,7 @@ func (m *BouncerMutation) Type() string { // order to get all numeric fields that were incremented/decremented, call // AddedFields(). func (m *BouncerMutation) Fields() []string { - fields := make([]string, 0, 13) + fields := make([]string, 0, 14) if m.created_at != nil { fields = append(fields, bouncer.FieldCreatedAt) } @@ -3208,6 +3245,9 @@ func (m *BouncerMutation) Fields() []string { if m.featureflags != nil { fields = append(fields, bouncer.FieldFeatureflags) } + if m.auto_created != nil { + fields = append(fields, bouncer.FieldAutoCreated) + } return fields } @@ -3242,6 +3282,8 @@ func (m *BouncerMutation) Field(name string) (ent.Value, bool) { return m.Osversion() case bouncer.FieldFeatureflags: return m.Featureflags() + case bouncer.FieldAutoCreated: + return m.AutoCreated() } return nil, false } @@ -3277,6 +3319,8 @@ func (m *BouncerMutation) OldField(ctx context.Context, name string) (ent.Value, return m.OldOsversion(ctx) case bouncer.FieldFeatureflags: return m.OldFeatureflags(ctx) + case bouncer.FieldAutoCreated: + return m.OldAutoCreated(ctx) } return nil, fmt.Errorf("unknown Bouncer field %s", name) } @@ -3377,6 +3421,13 @@ func (m *BouncerMutation) SetField(name string, value ent.Value) error { } m.SetFeatureflags(v) return nil + case bouncer.FieldAutoCreated: + v, ok := value.(bool) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetAutoCreated(v) + return nil } return fmt.Errorf("unknown Bouncer field %s", name) } @@ -3510,6 +3561,9 @@ func (m *BouncerMutation) ResetField(name string) error { case bouncer.FieldFeatureflags: m.ResetFeatureflags() return nil + case bouncer.FieldAutoCreated: + m.ResetAutoCreated() + return nil } return fmt.Errorf("unknown Bouncer field %s", name) } diff --git a/pkg/database/ent/runtime.go b/pkg/database/ent/runtime.go index 15413490633..49921a17b03 100644 --- a/pkg/database/ent/runtime.go +++ b/pkg/database/ent/runtime.go @@ -76,6 +76,10 @@ func init() { bouncerDescAuthType := bouncerFields[9].Descriptor() // bouncer.DefaultAuthType holds the default value on creation for the auth_type field. bouncer.DefaultAuthType = bouncerDescAuthType.Default.(string) + // bouncerDescAutoCreated is the schema descriptor for auto_created field. + bouncerDescAutoCreated := bouncerFields[13].Descriptor() + // bouncer.DefaultAutoCreated holds the default value on creation for the auto_created field. + bouncer.DefaultAutoCreated = bouncerDescAutoCreated.Default.(bool) configitemFields := schema.ConfigItem{}.Fields() _ = configitemFields // configitemDescCreatedAt is the schema descriptor for created_at field. diff --git a/pkg/database/ent/schema/bouncer.go b/pkg/database/ent/schema/bouncer.go index 599c4c404fc..c176bf0f766 100644 --- a/pkg/database/ent/schema/bouncer.go +++ b/pkg/database/ent/schema/bouncer.go @@ -33,6 +33,8 @@ func (Bouncer) Fields() []ent.Field { field.String("osname").Optional(), field.String("osversion").Optional(), field.String("featureflags").Optional(), + // Old auto-created TLS bouncers will have a wrong value for this field + field.Bool("auto_created").StructTag(`json:"auto_created"`).Default(false).Immutable(), } } diff --git a/pkg/database/errors.go b/pkg/database/errors.go index 8e96f52d7ce..77f92707e51 100644 --- a/pkg/database/errors.go +++ b/pkg/database/errors.go @@ -13,8 +13,8 @@ var ( ItemNotFound = errors.New("object not found") ParseTimeFail = errors.New("unable to parse time") ParseDurationFail = errors.New("unable to parse duration") - MarshalFail = errors.New("unable to marshal") - UnmarshalFail = errors.New("unable to unmarshal") + MarshalFail = errors.New("unable to serialize") + UnmarshalFail = errors.New("unable to parse") BulkError = errors.New("unable to insert bulk") ParseType = errors.New("unable to parse type") InvalidIPOrRange = errors.New("invalid ip address / range") diff --git a/pkg/database/flush.go b/pkg/database/flush.go index 5d53d10c942..8f646ddc961 100644 --- a/pkg/database/flush.go +++ b/pkg/database/flush.go @@ -1,6 +1,7 @@ package database import ( + "context" "errors" "fmt" "time" @@ -26,7 +27,7 @@ const ( flushInterval = 1 * time.Minute ) -func (c *Client) StartFlushScheduler(config *csconfig.FlushDBCfg) (*gocron.Scheduler, error) { +func (c *Client) StartFlushScheduler(ctx context.Context, config *csconfig.FlushDBCfg) (*gocron.Scheduler, error) { maxItems := 0 maxAge := "" @@ -45,7 +46,7 @@ func (c *Client) StartFlushScheduler(config *csconfig.FlushDBCfg) (*gocron.Sched // Init & Start cronjob every minute for alerts scheduler := gocron.NewScheduler(time.UTC) - job, err := scheduler.Every(1).Minute().Do(c.FlushAlerts, maxAge, maxItems) + job, err := scheduler.Every(1).Minute().Do(c.FlushAlerts, ctx, maxAge, maxItems) if err != nil { return nil, fmt.Errorf("while starting FlushAlerts scheduler: %w", err) } @@ -100,14 +101,14 @@ func (c *Client) StartFlushScheduler(config *csconfig.FlushDBCfg) (*gocron.Sched } } - baJob, err := scheduler.Every(flushInterval).Do(c.FlushAgentsAndBouncers, config.AgentsGC, config.BouncersGC) + baJob, err := scheduler.Every(flushInterval).Do(c.FlushAgentsAndBouncers, ctx, config.AgentsGC, config.BouncersGC) if err != nil { return nil, fmt.Errorf("while starting FlushAgentsAndBouncers scheduler: %w", err) } baJob.SingletonMode() - metricsJob, err := scheduler.Every(flushInterval).Do(c.flushMetrics, config.MetricsMaxAge) + metricsJob, err := scheduler.Every(flushInterval).Do(c.flushMetrics, ctx, config.MetricsMaxAge) if err != nil { return nil, fmt.Errorf("while starting flushMetrics scheduler: %w", err) } @@ -120,7 +121,7 @@ func (c *Client) StartFlushScheduler(config *csconfig.FlushDBCfg) (*gocron.Sched } // flushMetrics deletes metrics older than maxAge, regardless if they have been pushed to CAPI or not -func (c *Client) flushMetrics(maxAge *time.Duration) { +func (c *Client) flushMetrics(ctx context.Context, maxAge *time.Duration) { if maxAge == nil { maxAge = ptr.Of(defaultMetricsMaxAge) } @@ -129,7 +130,7 @@ func (c *Client) flushMetrics(maxAge *time.Duration) { deleted, err := c.Ent.Metric.Delete().Where( metric.ReceivedAtLTE(time.Now().UTC().Add(-*maxAge)), - ).Exec(c.CTX) + ).Exec(ctx) if err != nil { c.Log.Errorf("while flushing metrics: %s", err) return @@ -140,10 +141,10 @@ func (c *Client) flushMetrics(maxAge *time.Duration) { } } -func (c *Client) FlushOrphans() { +func (c *Client) FlushOrphans(ctx context.Context) { /* While it has only been linked to some very corner-case bug : https://github.com/crowdsecurity/crowdsec/issues/778 */ /* We want to take care of orphaned events for which the parent alert/decision has been deleted */ - eventsCount, err := c.Ent.Event.Delete().Where(event.Not(event.HasOwner())).Exec(c.CTX) + eventsCount, err := c.Ent.Event.Delete().Where(event.Not(event.HasOwner())).Exec(ctx) if err != nil { c.Log.Warningf("error while deleting orphan events: %s", err) return @@ -154,7 +155,7 @@ func (c *Client) FlushOrphans() { } eventsCount, err = c.Ent.Decision.Delete().Where( - decision.Not(decision.HasOwner())).Where(decision.UntilLTE(time.Now().UTC())).Exec(c.CTX) + decision.Not(decision.HasOwner())).Where(decision.UntilLTE(time.Now().UTC())).Exec(ctx) if err != nil { c.Log.Warningf("error while deleting orphan decisions: %s", err) return @@ -165,7 +166,7 @@ func (c *Client) FlushOrphans() { } } -func (c *Client) flushBouncers(authType string, duration *time.Duration) { +func (c *Client) flushBouncers(ctx context.Context, authType string, duration *time.Duration) { if duration == nil { return } @@ -174,7 +175,7 @@ func (c *Client) flushBouncers(authType string, duration *time.Duration) { bouncer.LastPullLTE(time.Now().UTC().Add(-*duration)), ).Where( bouncer.AuthTypeEQ(authType), - ).Exec(c.CTX) + ).Exec(ctx) if err != nil { c.Log.Errorf("while auto-deleting expired bouncers (%s): %s", authType, err) return @@ -185,7 +186,7 @@ func (c *Client) flushBouncers(authType string, duration *time.Duration) { } } -func (c *Client) flushAgents(authType string, duration *time.Duration) { +func (c *Client) flushAgents(ctx context.Context, authType string, duration *time.Duration) { if duration == nil { return } @@ -194,7 +195,7 @@ func (c *Client) flushAgents(authType string, duration *time.Duration) { machine.LastHeartbeatLTE(time.Now().UTC().Add(-*duration)), machine.Not(machine.HasAlerts()), machine.AuthTypeEQ(authType), - ).Exec(c.CTX) + ).Exec(ctx) if err != nil { c.Log.Errorf("while auto-deleting expired machines (%s): %s", authType, err) return @@ -205,23 +206,23 @@ func (c *Client) flushAgents(authType string, duration *time.Duration) { } } -func (c *Client) FlushAgentsAndBouncers(agentsCfg *csconfig.AuthGCCfg, bouncersCfg *csconfig.AuthGCCfg) error { +func (c *Client) FlushAgentsAndBouncers(ctx context.Context, agentsCfg *csconfig.AuthGCCfg, bouncersCfg *csconfig.AuthGCCfg) error { log.Debug("starting FlushAgentsAndBouncers") if agentsCfg != nil { - c.flushAgents(types.TlsAuthType, agentsCfg.CertDuration) - c.flushAgents(types.PasswordAuthType, agentsCfg.LoginPasswordDuration) + c.flushAgents(ctx, types.TlsAuthType, agentsCfg.CertDuration) + c.flushAgents(ctx, types.PasswordAuthType, agentsCfg.LoginPasswordDuration) } if bouncersCfg != nil { - c.flushBouncers(types.TlsAuthType, bouncersCfg.CertDuration) - c.flushBouncers(types.ApiKeyAuthType, bouncersCfg.ApiDuration) + c.flushBouncers(ctx, types.TlsAuthType, bouncersCfg.CertDuration) + c.flushBouncers(ctx, types.ApiKeyAuthType, bouncersCfg.ApiDuration) } return nil } -func (c *Client) FlushAlerts(MaxAge string, MaxItems int) error { +func (c *Client) FlushAlerts(ctx context.Context, MaxAge string, MaxItems int) error { var ( deletedByAge int deletedByNbItem int @@ -235,10 +236,10 @@ func (c *Client) FlushAlerts(MaxAge string, MaxItems int) error { } c.Log.Debug("Flushing orphan alerts") - c.FlushOrphans() + c.FlushOrphans(ctx) c.Log.Debug("Done flushing orphan alerts") - totalAlerts, err = c.TotalAlerts() + totalAlerts, err = c.TotalAlerts(ctx) if err != nil { c.Log.Warningf("FlushAlerts (max items count): %s", err) return fmt.Errorf("unable to get alerts count: %w", err) @@ -251,7 +252,7 @@ func (c *Client) FlushAlerts(MaxAge string, MaxItems int) error { "created_before": {MaxAge}, } - nbDeleted, err := c.DeleteAlertWithFilter(filter) + nbDeleted, err := c.DeleteAlertWithFilter(ctx, filter) if err != nil { c.Log.Warningf("FlushAlerts (max age): %s", err) return fmt.Errorf("unable to flush alerts with filter until=%s: %w", MaxAge, err) @@ -267,7 +268,7 @@ func (c *Client) FlushAlerts(MaxAge string, MaxItems int) error { // This gives us the oldest alert that we want to keep // We then delete all the alerts with an id lower than this one // We can do this because the id is auto-increment, and the database won't reuse the same id twice - lastAlert, err := c.QueryAlertWithFilter(map[string][]string{ + lastAlert, err := c.QueryAlertWithFilter(ctx, map[string][]string{ "sort": {"DESC"}, "limit": {"1"}, // we do not care about fetching the edges, we just want the id @@ -287,7 +288,7 @@ func (c *Client) FlushAlerts(MaxAge string, MaxItems int) error { if maxid > 0 { // This may lead to orphan alerts (at least on MySQL), but the next time the flush job will run, they will be deleted - deletedByNbItem, err = c.Ent.Alert.Delete().Where(alert.IDLT(maxid)).Exec(c.CTX) + deletedByNbItem, err = c.Ent.Alert.Delete().Where(alert.IDLT(maxid)).Exec(ctx) if err != nil { c.Log.Errorf("FlushAlerts: Could not delete alerts: %s", err) return fmt.Errorf("could not delete alerts: %w", err) diff --git a/pkg/database/lock.go b/pkg/database/lock.go index d25b71870f0..474228a069c 100644 --- a/pkg/database/lock.go +++ b/pkg/database/lock.go @@ -1,6 +1,7 @@ package database import ( + "context" "time" "github.com/pkg/errors" @@ -16,40 +17,45 @@ const ( CapiPullLockName = "pullCAPI" ) -func (c *Client) AcquireLock(name string) error { +func (c *Client) AcquireLock(ctx context.Context, name string) error { log.Debugf("acquiring lock %s", name) _, err := c.Ent.Lock.Create(). SetName(name). SetCreatedAt(types.UtcNow()). - Save(c.CTX) + Save(ctx) + if ent.IsConstraintError(err) { return err } + if err != nil { return errors.Wrapf(InsertFail, "insert lock: %s", err) } + return nil } -func (c *Client) ReleaseLock(name string) error { +func (c *Client) ReleaseLock(ctx context.Context, name string) error { log.Debugf("releasing lock %s", name) - _, err := c.Ent.Lock.Delete().Where(lock.NameEQ(name)).Exec(c.CTX) + _, err := c.Ent.Lock.Delete().Where(lock.NameEQ(name)).Exec(ctx) if err != nil { return errors.Wrapf(DeleteFail, "delete lock: %s", err) } + return nil } -func (c *Client) ReleaseLockWithTimeout(name string, timeout int) error { +func (c *Client) ReleaseLockWithTimeout(ctx context.Context, name string, timeout int) error { log.Debugf("releasing lock %s with timeout of %d minutes", name, timeout) + _, err := c.Ent.Lock.Delete().Where( lock.NameEQ(name), lock.CreatedAtLT(time.Now().UTC().Add(-time.Duration(timeout)*time.Minute)), - ).Exec(c.CTX) - + ).Exec(ctx) if err != nil { return errors.Wrapf(DeleteFail, "delete lock: %s", err) } + return nil } @@ -57,23 +63,25 @@ func (c *Client) IsLocked(err error) bool { return ent.IsConstraintError(err) } -func (c *Client) AcquirePullCAPILock() error { - - /*delete orphan "old" lock if present*/ - err := c.ReleaseLockWithTimeout(CapiPullLockName, CAPIPullLockTimeout) +func (c *Client) AcquirePullCAPILock(ctx context.Context) error { + // delete orphan "old" lock if present + err := c.ReleaseLockWithTimeout(ctx, CapiPullLockName, CAPIPullLockTimeout) if err != nil { log.Errorf("unable to release pullCAPI lock: %s", err) } - return c.AcquireLock(CapiPullLockName) + + return c.AcquireLock(ctx, CapiPullLockName) } -func (c *Client) ReleasePullCAPILock() error { +func (c *Client) ReleasePullCAPILock(ctx context.Context) error { log.Debugf("deleting lock %s", CapiPullLockName) + _, err := c.Ent.Lock.Delete().Where( lock.NameEQ(CapiPullLockName), - ).Exec(c.CTX) + ).Exec(ctx) if err != nil { return errors.Wrapf(DeleteFail, "delete lock: %s", err) } + return nil } diff --git a/pkg/database/machines.go b/pkg/database/machines.go index 75b0ee5fdaa..d8c02825312 100644 --- a/pkg/database/machines.go +++ b/pkg/database/machines.go @@ -1,6 +1,7 @@ package database import ( + "context" "fmt" "strings" "time" @@ -29,13 +30,13 @@ func (e *MachineNotFoundError) Error() string { return fmt.Sprintf("'%s' does not exist", e.MachineID) } -func (c *Client) MachineUpdateBaseMetrics(machineID string, baseMetrics models.BaseMetrics, hubItems models.HubItems, datasources map[string]int64) error { +func (c *Client) MachineUpdateBaseMetrics(ctx context.Context, machineID string, baseMetrics models.BaseMetrics, hubItems models.HubItems, datasources map[string]int64) error { os := baseMetrics.Os features := strings.Join(baseMetrics.FeatureFlags, ",") var heartbeat time.Time - if baseMetrics.Metrics == nil || len(baseMetrics.Metrics) == 0 { + if len(baseMetrics.Metrics) == 0 { heartbeat = time.Now().UTC() } else { heartbeat = time.Unix(*baseMetrics.Metrics[0].Meta.UtcNowTimestamp, 0) @@ -63,7 +64,7 @@ func (c *Client) MachineUpdateBaseMetrics(machineID string, baseMetrics models.B SetLastHeartbeat(heartbeat). SetHubstate(hubState). SetDatasources(datasources). - Save(c.CTX) + Save(ctx) if err != nil { return fmt.Errorf("unable to update base machine metrics in database: %w", err) } @@ -71,7 +72,7 @@ func (c *Client) MachineUpdateBaseMetrics(machineID string, baseMetrics models.B return nil } -func (c *Client) CreateMachine(machineID *string, password *strfmt.Password, ipAddress string, isValidated bool, force bool, authType string) (*ent.Machine, error) { +func (c *Client) CreateMachine(ctx context.Context, machineID *string, password *strfmt.Password, ipAddress string, isValidated bool, force bool, authType string) (*ent.Machine, error) { hashPassword, err := bcrypt.GenerateFromPassword([]byte(*password), bcrypt.DefaultCost) if err != nil { c.Log.Warningf("CreateMachine: %s", err) @@ -81,20 +82,20 @@ func (c *Client) CreateMachine(machineID *string, password *strfmt.Password, ipA machineExist, err := c.Ent.Machine. Query(). Where(machine.MachineIdEQ(*machineID)). - Select(machine.FieldMachineId).Strings(c.CTX) + Select(machine.FieldMachineId).Strings(ctx) if err != nil { return nil, errors.Wrapf(QueryFail, "machine '%s': %s", *machineID, err) } if len(machineExist) > 0 { if force { - _, err := c.Ent.Machine.Update().Where(machine.MachineIdEQ(*machineID)).SetPassword(string(hashPassword)).Save(c.CTX) + _, err := c.Ent.Machine.Update().Where(machine.MachineIdEQ(*machineID)).SetPassword(string(hashPassword)).Save(ctx) if err != nil { c.Log.Warningf("CreateMachine : %s", err) return nil, errors.Wrapf(UpdateFail, "machine '%s'", *machineID) } - machine, err := c.QueryMachineByID(*machineID) + machine, err := c.QueryMachineByID(ctx, *machineID) if err != nil { return nil, errors.Wrapf(QueryFail, "machine '%s': %s", *machineID, err) } @@ -112,7 +113,7 @@ func (c *Client) CreateMachine(machineID *string, password *strfmt.Password, ipA SetIpAddress(ipAddress). SetIsValidated(isValidated). SetAuthType(authType). - Save(c.CTX) + Save(ctx) if err != nil { c.Log.Warningf("CreateMachine : %s", err) return nil, errors.Wrapf(InsertFail, "creating machine '%s'", *machineID) @@ -121,11 +122,11 @@ func (c *Client) CreateMachine(machineID *string, password *strfmt.Password, ipA return machine, nil } -func (c *Client) QueryMachineByID(machineID string) (*ent.Machine, error) { +func (c *Client) QueryMachineByID(ctx context.Context, machineID string) (*ent.Machine, error) { machine, err := c.Ent.Machine. Query(). Where(machine.MachineIdEQ(machineID)). - Only(c.CTX) + Only(ctx) if err != nil { c.Log.Warningf("QueryMachineByID : %s", err) return &ent.Machine{}, errors.Wrapf(UserNotExists, "user '%s'", machineID) @@ -134,8 +135,8 @@ func (c *Client) QueryMachineByID(machineID string) (*ent.Machine, error) { return machine, nil } -func (c *Client) ListMachines() ([]*ent.Machine, error) { - machines, err := c.Ent.Machine.Query().All(c.CTX) +func (c *Client) ListMachines(ctx context.Context) ([]*ent.Machine, error) { + machines, err := c.Ent.Machine.Query().All(ctx) if err != nil { return nil, errors.Wrapf(QueryFail, "listing machines: %s", err) } @@ -143,8 +144,8 @@ func (c *Client) ListMachines() ([]*ent.Machine, error) { return machines, nil } -func (c *Client) ValidateMachine(machineID string) error { - rets, err := c.Ent.Machine.Update().Where(machine.MachineIdEQ(machineID)).SetIsValidated(true).Save(c.CTX) +func (c *Client) ValidateMachine(ctx context.Context, machineID string) error { + rets, err := c.Ent.Machine.Update().Where(machine.MachineIdEQ(machineID)).SetIsValidated(true).Save(ctx) if err != nil { return errors.Wrapf(UpdateFail, "validating machine: %s", err) } @@ -156,8 +157,8 @@ func (c *Client) ValidateMachine(machineID string) error { return nil } -func (c *Client) QueryPendingMachine() ([]*ent.Machine, error) { - machines, err := c.Ent.Machine.Query().Where(machine.IsValidatedEQ(false)).All(c.CTX) +func (c *Client) QueryPendingMachine(ctx context.Context) ([]*ent.Machine, error) { + machines, err := c.Ent.Machine.Query().Where(machine.IsValidatedEQ(false)).All(ctx) if err != nil { c.Log.Warningf("QueryPendingMachine : %s", err) return nil, errors.Wrapf(QueryFail, "querying pending machines: %s", err) @@ -166,11 +167,11 @@ func (c *Client) QueryPendingMachine() ([]*ent.Machine, error) { return machines, nil } -func (c *Client) DeleteWatcher(name string) error { +func (c *Client) DeleteWatcher(ctx context.Context, name string) error { nbDeleted, err := c.Ent.Machine. Delete(). Where(machine.MachineIdEQ(name)). - Exec(c.CTX) + Exec(ctx) if err != nil { return err } @@ -182,13 +183,13 @@ func (c *Client) DeleteWatcher(name string) error { return nil } -func (c *Client) BulkDeleteWatchers(machines []*ent.Machine) (int, error) { +func (c *Client) BulkDeleteWatchers(ctx context.Context, machines []*ent.Machine) (int, error) { ids := make([]int, len(machines)) for i, b := range machines { ids[i] = b.ID } - nbDeleted, err := c.Ent.Machine.Delete().Where(machine.IDIn(ids...)).Exec(c.CTX) + nbDeleted, err := c.Ent.Machine.Delete().Where(machine.IDIn(ids...)).Exec(ctx) if err != nil { return nbDeleted, err } @@ -196,8 +197,8 @@ func (c *Client) BulkDeleteWatchers(machines []*ent.Machine) (int, error) { return nbDeleted, nil } -func (c *Client) UpdateMachineLastHeartBeat(machineID string) error { - _, err := c.Ent.Machine.Update().Where(machine.MachineIdEQ(machineID)).SetLastHeartbeat(time.Now().UTC()).Save(c.CTX) +func (c *Client) UpdateMachineLastHeartBeat(ctx context.Context, machineID string) error { + _, err := c.Ent.Machine.Update().Where(machine.MachineIdEQ(machineID)).SetLastHeartbeat(time.Now().UTC()).Save(ctx) if err != nil { return errors.Wrapf(UpdateFail, "updating machine last_heartbeat: %s", err) } @@ -205,11 +206,11 @@ func (c *Client) UpdateMachineLastHeartBeat(machineID string) error { return nil } -func (c *Client) UpdateMachineScenarios(scenarios string, id int) error { +func (c *Client) UpdateMachineScenarios(ctx context.Context, scenarios string, id int) error { _, err := c.Ent.Machine.UpdateOneID(id). SetUpdatedAt(time.Now().UTC()). SetScenarios(scenarios). - Save(c.CTX) + Save(ctx) if err != nil { return fmt.Errorf("unable to update machine in database: %w", err) } @@ -217,10 +218,10 @@ func (c *Client) UpdateMachineScenarios(scenarios string, id int) error { return nil } -func (c *Client) UpdateMachineIP(ipAddr string, id int) error { +func (c *Client) UpdateMachineIP(ctx context.Context, ipAddr string, id int) error { _, err := c.Ent.Machine.UpdateOneID(id). SetIpAddress(ipAddr). - Save(c.CTX) + Save(ctx) if err != nil { return fmt.Errorf("unable to update machine IP in database: %w", err) } @@ -228,10 +229,10 @@ func (c *Client) UpdateMachineIP(ipAddr string, id int) error { return nil } -func (c *Client) UpdateMachineVersion(ipAddr string, id int) error { +func (c *Client) UpdateMachineVersion(ctx context.Context, ipAddr string, id int) error { _, err := c.Ent.Machine.UpdateOneID(id). SetVersion(ipAddr). - Save(c.CTX) + Save(ctx) if err != nil { return fmt.Errorf("unable to update machine version in database: %w", err) } @@ -239,8 +240,8 @@ func (c *Client) UpdateMachineVersion(ipAddr string, id int) error { return nil } -func (c *Client) IsMachineRegistered(machineID string) (bool, error) { - exist, err := c.Ent.Machine.Query().Where().Select(machine.FieldMachineId).Strings(c.CTX) +func (c *Client) IsMachineRegistered(ctx context.Context, machineID string) (bool, error) { + exist, err := c.Ent.Machine.Query().Where().Select(machine.FieldMachineId).Strings(ctx) if err != nil { return false, err } @@ -256,11 +257,11 @@ func (c *Client) IsMachineRegistered(machineID string) (bool, error) { return false, nil } -func (c *Client) QueryMachinesInactiveSince(t time.Time) ([]*ent.Machine, error) { +func (c *Client) QueryMachinesInactiveSince(ctx context.Context, t time.Time) ([]*ent.Machine, error) { return c.Ent.Machine.Query().Where( machine.Or( machine.And(machine.LastHeartbeatLT(t), machine.IsValidatedEQ(true)), machine.And(machine.LastHeartbeatIsNil(), machine.CreatedAtLT(t)), ), - ).All(c.CTX) + ).All(ctx) } diff --git a/pkg/database/metrics.go b/pkg/database/metrics.go index 7626c39f6f1..eb4c472821e 100644 --- a/pkg/database/metrics.go +++ b/pkg/database/metrics.go @@ -1,6 +1,7 @@ package database import ( + "context" "fmt" "time" @@ -8,15 +9,15 @@ import ( "github.com/crowdsecurity/crowdsec/pkg/database/ent/metric" ) -func (c *Client) CreateMetric(generatedType metric.GeneratedType, generatedBy string, receivedAt time.Time, payload string) (*ent.Metric, error) { +func (c *Client) CreateMetric(ctx context.Context, generatedType metric.GeneratedType, generatedBy string, receivedAt time.Time, payload string) (*ent.Metric, error) { metric, err := c.Ent.Metric. Create(). SetGeneratedType(generatedType). SetGeneratedBy(generatedBy). SetReceivedAt(receivedAt). SetPayload(payload). - Save(c.CTX) - if err != nil { + Save(ctx) + if err != nil { c.Log.Warningf("CreateMetric: %s", err) return nil, fmt.Errorf("storing metrics snapshot for '%s' at %s: %w", generatedBy, receivedAt, InsertFail) } @@ -24,14 +25,14 @@ func (c *Client) CreateMetric(generatedType metric.GeneratedType, generatedBy st return metric, nil } -func (c *Client) GetLPUsageMetricsByMachineID(machineId string) ([]*ent.Metric, error) { +func (c *Client) GetLPUsageMetricsByMachineID(ctx context.Context, machineId string) ([]*ent.Metric, error) { metrics, err := c.Ent.Metric.Query(). Where( metric.GeneratedTypeEQ(metric.GeneratedTypeLP), metric.GeneratedByEQ(machineId), metric.PushedAtIsNil(), ). - All(c.CTX) + All(ctx) if err != nil { c.Log.Warningf("GetLPUsageMetricsByOrigin: %s", err) return nil, fmt.Errorf("getting LP usage metrics by origin %s: %w", machineId, err) @@ -40,14 +41,14 @@ func (c *Client) GetLPUsageMetricsByMachineID(machineId string) ([]*ent.Metric, return metrics, nil } -func (c *Client) GetBouncerUsageMetricsByName(bouncerName string) ([]*ent.Metric, error) { +func (c *Client) GetBouncerUsageMetricsByName(ctx context.Context, bouncerName string) ([]*ent.Metric, error) { metrics, err := c.Ent.Metric.Query(). Where( metric.GeneratedTypeEQ(metric.GeneratedTypeRC), metric.GeneratedByEQ(bouncerName), metric.PushedAtIsNil(), ). - All(c.CTX) + All(ctx) if err != nil { c.Log.Warningf("GetBouncerUsageMetricsByName: %s", err) return nil, fmt.Errorf("getting bouncer usage metrics by name %s: %w", bouncerName, err) @@ -56,11 +57,11 @@ func (c *Client) GetBouncerUsageMetricsByName(bouncerName string) ([]*ent.Metric return metrics, nil } -func (c *Client) MarkUsageMetricsAsSent(ids []int) error { +func (c *Client) MarkUsageMetricsAsSent(ctx context.Context, ids []int) error { _, err := c.Ent.Metric.Update(). Where(metric.IDIn(ids...)). SetPushedAt(time.Now().UTC()). - Save(c.CTX) + Save(ctx) if err != nil { c.Log.Warningf("MarkUsageMetricsAsSent: %s", err) return fmt.Errorf("marking usage metrics as sent: %w", err) diff --git a/pkg/database/utils.go b/pkg/database/utils.go index f1c06565635..8148df56f24 100644 --- a/pkg/database/utils.go +++ b/pkg/database/utils.go @@ -42,7 +42,8 @@ func LastAddress(n *net.IPNet) net.IP { ip[6] | ^n.Mask[6], ip[7] | ^n.Mask[7], ip[8] | ^n.Mask[8], ip[9] | ^n.Mask[9], ip[10] | ^n.Mask[10], ip[11] | ^n.Mask[11], ip[12] | ^n.Mask[12], ip[13] | ^n.Mask[13], ip[14] | ^n.Mask[14], - ip[15] | ^n.Mask[15]} + ip[15] | ^n.Mask[15], + } } return net.IPv4( @@ -74,7 +75,7 @@ func ParseDuration(d string) (time.Duration, error) { if strings.HasSuffix(d, "d") { days := strings.Split(d, "d")[0] - if len(days) == 0 { + if days == "" { return 0, fmt.Errorf("'%s' can't be parsed as duration", d) } diff --git a/pkg/dumps/parser_dump.go b/pkg/dumps/parser_dump.go index d43f3cdc1b9..bc8f78dc203 100644 --- a/pkg/dumps/parser_dump.go +++ b/pkg/dumps/parser_dump.go @@ -259,7 +259,7 @@ func (t *tree) displayResults(opts DumpOpts) { } if updated > 0 { - if len(changeStr) > 0 { + if changeStr != "" { changeStr += " " } @@ -267,7 +267,7 @@ func (t *tree) displayResults(opts DumpOpts) { } if deleted > 0 { - if len(changeStr) > 0 { + if changeStr != "" { changeStr += " " } @@ -275,7 +275,7 @@ func (t *tree) displayResults(opts DumpOpts) { } if whitelisted { - if len(changeStr) > 0 { + if changeStr != "" { changeStr += " " } diff --git a/pkg/exprhelpers/debugger.go b/pkg/exprhelpers/debugger.go index 711aa491078..2e47af6d1de 100644 --- a/pkg/exprhelpers/debugger.go +++ b/pkg/exprhelpers/debugger.go @@ -53,9 +53,8 @@ type OpOutput struct { } func (o *OpOutput) String() string { - ret := fmt.Sprintf("%*c", o.CodeDepth, ' ') - if len(o.Code) != 0 { + if o.Code != "" { ret += fmt.Sprintf("[%s]", o.Code) } ret += " " @@ -70,7 +69,7 @@ func (o *OpOutput) String() string { indent = 0 } ret = fmt.Sprintf("%*cBLOCK_END [%s]", indent, ' ', o.Code) - if len(o.StrConditionResult) > 0 { + if o.StrConditionResult != "" { ret += fmt.Sprintf(" -> %s", o.StrConditionResult) } return ret diff --git a/pkg/exprhelpers/debugger_test.go b/pkg/exprhelpers/debugger_test.go index efdcbc1a769..32144454084 100644 --- a/pkg/exprhelpers/debugger_test.go +++ b/pkg/exprhelpers/debugger_test.go @@ -26,6 +26,7 @@ type ExprDbgTest struct { func UpperTwo(params ...any) (any, error) { s := params[0].(string) v := params[1].(string) + return strings.ToUpper(s) + strings.ToUpper(v), nil } @@ -33,6 +34,7 @@ func UpperThree(params ...any) (any, error) { s := params[0].(string) v := params[1].(string) x := params[2].(string) + return strings.ToUpper(s) + strings.ToUpper(v) + strings.ToUpper(x), nil } @@ -41,6 +43,7 @@ func UpperN(params ...any) (any, error) { v := params[1].(string) x := params[2].(string) y := params[3].(string) + return strings.ToUpper(s) + strings.ToUpper(v) + strings.ToUpper(x) + strings.ToUpper(y), nil } @@ -76,9 +79,9 @@ func TestBaseDbg(t *testing.T) { // use '%#v' to dump in golang syntax // use regexp to clear empty/default fields: // [a-z]+: (false|\[\]string\(nil\)|""), - //ConditionResult:(*bool) + // ConditionResult:(*bool) - //Missing multi parametes function + // Missing multi parametes function tests := []ExprDbgTest{ { Name: "nil deref", @@ -272,6 +275,7 @@ func TestBaseDbg(t *testing.T) { } logger := log.WithField("test", "exprhelpers") + for _, test := range tests { if test.LogLevel != 0 { log.SetLevel(test.LogLevel) @@ -308,10 +312,13 @@ func TestBaseDbg(t *testing.T) { t.Fatalf("test %s : unexpected compile error : %s", test.Name, err) } } + if test.Name == "nil deref" { test.Env["nilvar"] = nil } + outdbg, ret, err := RunWithDebug(prog, test.Env, logger) + if test.ExpectedFailRuntime { if err == nil { t.Fatalf("test %s : expected runtime error", test.Name) @@ -321,25 +328,30 @@ func TestBaseDbg(t *testing.T) { t.Fatalf("test %s : unexpected runtime error : %s", test.Name, err) } } + log.SetLevel(log.DebugLevel) DisplayExprDebug(prog, outdbg, logger, ret) + if len(outdbg) != len(test.ExpectedOutputs) { t.Errorf("failed test %s", test.Name) t.Errorf("%#v", outdbg) - //out, _ := yaml.Marshal(outdbg) - //fmt.Printf("%s", string(out)) + // out, _ := yaml.Marshal(outdbg) + // fmt.Printf("%s", string(out)) t.Fatalf("test %s : expected %d outputs, got %d", test.Name, len(test.ExpectedOutputs), len(outdbg)) - } + for i, out := range outdbg { - if !reflect.DeepEqual(out, test.ExpectedOutputs[i]) { - spew.Config.DisableMethods = true - t.Errorf("failed test %s", test.Name) - t.Errorf("expected : %#v", test.ExpectedOutputs[i]) - t.Errorf("got : %#v", out) - t.Fatalf("%d/%d : mismatch", i, len(outdbg)) + if reflect.DeepEqual(out, test.ExpectedOutputs[i]) { + // DisplayExprDebug(prog, outdbg, logger, ret) + continue } - //DisplayExprDebug(prog, outdbg, logger, ret) + + spew.Config.DisableMethods = true + + t.Errorf("failed test %s", test.Name) + t.Errorf("expected : %#v", test.ExpectedOutputs[i]) + t.Errorf("got : %#v", out) + t.Fatalf("%d/%d : mismatch", i, len(outdbg)) } } } diff --git a/pkg/exprhelpers/helpers.go b/pkg/exprhelpers/helpers.go index 17ce468f623..9bc991a8f2d 100644 --- a/pkg/exprhelpers/helpers.go +++ b/pkg/exprhelpers/helpers.go @@ -2,6 +2,7 @@ package exprhelpers import ( "bufio" + "context" "encoding/base64" "errors" "fmt" @@ -128,7 +129,7 @@ func Init(databaseClient *database.Client) error { dataFileRegex = make(map[string][]*regexp.Regexp) dataFileRe2 = make(map[string][]*re2.Regexp) dbClient = databaseClient - + XMLCacheInit() return nil } @@ -213,7 +214,7 @@ func FileInit(fileFolder string, filename string, fileType string) error { if strings.HasPrefix(scanner.Text(), "#") { // allow comments continue } - if len(scanner.Text()) == 0 { //skip empty lines + if scanner.Text() == "" { //skip empty lines continue } @@ -254,7 +255,6 @@ func Distinct(params ...any) (any, error) { } } return ret, nil - } func FlattenDistinct(params ...any) (any, error) { @@ -280,6 +280,7 @@ func flatten(args []interface{}, v reflect.Value) []interface{} { return args } + func existsInFileMaps(filename string, ftype string) (bool, error) { ok := false var err error @@ -592,7 +593,10 @@ func GetDecisionsCount(params ...any) (any, error) { return 0, nil } - count, err := dbClient.CountDecisionsByValue(value) + + ctx := context.TODO() + + count, err := dbClient.CountDecisionsByValue(ctx, value) if err != nil { log.Errorf("Failed to get decisions count from value '%s'", value) return 0, nil //nolint:nilerr // This helper did not return an error before the move to expr.Function, we keep this behavior for backward compatibility @@ -613,8 +617,11 @@ func GetDecisionsSinceCount(params ...any) (any, error) { log.Errorf("Failed to parse since parameter '%s' : %s", since, err) return 0, nil } + + ctx := context.TODO() sinceTime := time.Now().UTC().Add(-sinceDuration) - count, err := dbClient.CountDecisionsSinceByValue(value, sinceTime) + + count, err := dbClient.CountDecisionsSinceByValue(ctx, value, sinceTime) if err != nil { log.Errorf("Failed to get decisions count from value '%s'", value) return 0, nil //nolint:nilerr // This helper did not return an error before the move to expr.Function, we keep this behavior for backward compatibility @@ -628,7 +635,8 @@ func GetActiveDecisionsCount(params ...any) (any, error) { log.Error("No database config to call GetActiveDecisionsCount()") return 0, nil } - count, err := dbClient.CountActiveDecisionsByValue(value) + ctx := context.TODO() + count, err := dbClient.CountActiveDecisionsByValue(ctx, value) if err != nil { log.Errorf("Failed to get active decisions count from value '%s'", value) return 0, err @@ -642,7 +650,8 @@ func GetActiveDecisionsTimeLeft(params ...any) (any, error) { log.Error("No database config to call GetActiveDecisionsTimeLeft()") return 0, nil } - timeLeft, err := dbClient.GetActiveDecisionsTimeLeftByValue(value) + ctx := context.TODO() + timeLeft, err := dbClient.GetActiveDecisionsTimeLeftByValue(ctx, value) if err != nil { log.Errorf("Failed to get active decisions time left from value '%s'", value) return 0, err @@ -765,7 +774,6 @@ func B64Decode(params ...any) (any, error) { } func ParseKV(params ...any) (any, error) { - blob := params[0].(string) target := params[1].(map[string]interface{}) prefix := params[2].(string) diff --git a/pkg/exprhelpers/xml.go b/pkg/exprhelpers/xml.go index 75758e18316..0b550bdb641 100644 --- a/pkg/exprhelpers/xml.go +++ b/pkg/exprhelpers/xml.go @@ -1,43 +1,103 @@ package exprhelpers import ( + "errors" + "sync" + "time" + "github.com/beevik/etree" + "github.com/bluele/gcache" + "github.com/cespare/xxhash/v2" log "github.com/sirupsen/logrus" ) -var pathCache = make(map[string]etree.Path) +var ( + pathCache = make(map[string]etree.Path) + rwMutex = sync.RWMutex{} + xmlDocumentCache gcache.Cache +) + +func compileOrGetPath(path string) (etree.Path, error) { + rwMutex.RLock() + compiledPath, ok := pathCache[path] + rwMutex.RUnlock() + + if !ok { + var err error + compiledPath, err = etree.CompilePath(path) + if err != nil { + return etree.Path{}, err + } + + rwMutex.Lock() + pathCache[path] = compiledPath + rwMutex.Unlock() + } + + return compiledPath, nil +} + +func getXMLDocumentFromCache(xmlString string) (*etree.Document, error) { + cacheKey := xxhash.Sum64String(xmlString) + cacheObj, err := xmlDocumentCache.Get(cacheKey) + + if err != nil && !errors.Is(err, gcache.KeyNotFoundError) { + return nil, err + } + + doc, ok := cacheObj.(*etree.Document) + if !ok || cacheObj == nil { + doc = etree.NewDocument() + if err := doc.ReadFromString(xmlString); err != nil { + return nil, err + } + if err := xmlDocumentCache.Set(cacheKey, doc); err != nil { + log.Warnf("Could not set XML document in cache: %s", err) + } + } + + return doc, nil +} + +func XMLCacheInit() { + gc := gcache.New(50) + // Short cache expiration because we each line we read is different, but we can call multiple times XML helpers on each of them + gc.Expiration(5 * time.Second) + gc = gc.LRU() + + xmlDocumentCache = gc.Build() +} // func XMLGetAttributeValue(xmlString string, path string, attributeName string) string { func XMLGetAttributeValue(params ...any) (any, error) { xmlString := params[0].(string) path := params[1].(string) attributeName := params[2].(string) - if _, ok := pathCache[path]; !ok { - compiledPath, err := etree.CompilePath(path) - if err != nil { - log.Errorf("Could not compile path %s: %s", path, err) - return "", nil - } - pathCache[path] = compiledPath + + compiledPath, err := compileOrGetPath(path) + if err != nil { + log.Errorf("Could not compile path %s: %s", path, err) + return "", nil } - compiledPath := pathCache[path] - doc := etree.NewDocument() - err := doc.ReadFromString(xmlString) + doc, err := getXMLDocumentFromCache(xmlString) if err != nil { log.Tracef("Could not parse XML: %s", err) return "", nil } + elem := doc.FindElementPath(compiledPath) if elem == nil { log.Debugf("Could not find element %s", path) return "", nil } + attr := elem.SelectAttr(attributeName) if attr == nil { log.Debugf("Could not find attribute %s", attributeName) return "", nil } + return attr.Value, nil } @@ -45,26 +105,24 @@ func XMLGetAttributeValue(params ...any) (any, error) { func XMLGetNodeValue(params ...any) (any, error) { xmlString := params[0].(string) path := params[1].(string) - if _, ok := pathCache[path]; !ok { - compiledPath, err := etree.CompilePath(path) - if err != nil { - log.Errorf("Could not compile path %s: %s", path, err) - return "", nil - } - pathCache[path] = compiledPath + + compiledPath, err := compileOrGetPath(path) + if err != nil { + log.Errorf("Could not compile path %s: %s", path, err) + return "", nil } - compiledPath := pathCache[path] - doc := etree.NewDocument() - err := doc.ReadFromString(xmlString) + doc, err := getXMLDocumentFromCache(xmlString) if err != nil { log.Tracef("Could not parse XML: %s", err) return "", nil } + elem := doc.FindElementPath(compiledPath) if elem == nil { log.Debugf("Could not find element %s", path) return "", nil } + return elem.Text(), nil } diff --git a/pkg/fflag/features.go b/pkg/fflag/features.go index 3a106984a66..c8a3d7755ea 100644 --- a/pkg/fflag/features.go +++ b/pkg/fflag/features.go @@ -97,7 +97,7 @@ type FeatureRegister struct { features map[string]*Feature } -var featureNameRexp = regexp.MustCompile(`^[a-z0-9_\.]+$`) +var featureNameRexp = regexp.MustCompile(`^[a-z0-9_.]+$`) func validateFeatureName(featureName string) error { if featureName == "" { diff --git a/pkg/hubtest/coverage.go b/pkg/hubtest/coverage.go index 4156def06d7..e42c1e23455 100644 --- a/pkg/hubtest/coverage.go +++ b/pkg/hubtest/coverage.go @@ -57,7 +57,7 @@ func (h *HubTest) GetAppsecCoverage() ([]Coverage, error) { err = yaml.Unmarshal(yamlFile, configFileData) if err != nil { - return nil, fmt.Errorf("unmarshal: %v", err) + return nil, fmt.Errorf("parsing: %v", err) } for _, appsecRulesFile := range configFileData.AppsecRules { @@ -70,7 +70,7 @@ func (h *HubTest) GetAppsecCoverage() ([]Coverage, error) { err = yaml.Unmarshal(yamlFile, appsecRuleData) if err != nil { - return nil, fmt.Errorf("unmarshal: %v", err) + return nil, fmt.Errorf("parsing: %v", err) } appsecRuleName := appsecRuleData.Name diff --git a/pkg/hubtest/hubtest.go b/pkg/hubtest/hubtest.go index a4ca275c310..93f5abaa879 100644 --- a/pkg/hubtest/hubtest.go +++ b/pkg/hubtest/hubtest.go @@ -83,7 +83,7 @@ func NewHubTest(hubPath string, crowdsecPath string, cscliPath string, isAppsecT } if isAppsecTest { - HubTestPath := filepath.Join(hubPath, "./.appsec-tests/") + HubTestPath := filepath.Join(hubPath, ".appsec-tests") hubIndexFile := filepath.Join(hubPath, ".index.json") local := &csconfig.LocalHubCfg{ @@ -119,7 +119,7 @@ func NewHubTest(hubPath string, crowdsecPath string, cscliPath string, isAppsecT }, nil } - HubTestPath := filepath.Join(hubPath, "./.tests/") + HubTestPath := filepath.Join(hubPath, ".tests") hubIndexFile := filepath.Join(hubPath, ".index.json") diff --git a/pkg/hubtest/hubtest_item.go b/pkg/hubtest/hubtest_item.go index da4969ee8dd..bc9c8955d0d 100644 --- a/pkg/hubtest/hubtest_item.go +++ b/pkg/hubtest/hubtest_item.go @@ -111,7 +111,7 @@ func NewTest(name string, hubTest *HubTest) (*HubTestItem, error) { err = yaml.Unmarshal(yamlFile, configFileData) if err != nil { - return nil, fmt.Errorf("unmarshal: %w", err) + return nil, fmt.Errorf("parsing: %w", err) } parserAssertFilePath := filepath.Join(testPath, ParserAssertFileName) @@ -201,7 +201,7 @@ func (t *HubTestItem) InstallHub() error { b, err := yaml.Marshal(n) if err != nil { - return fmt.Errorf("unable to marshal overrides: %w", err) + return fmt.Errorf("unable to serialize overrides: %w", err) } tgtFilename := fmt.Sprintf("%s/parsers/s00-raw/00_overrides.yaml", t.RuntimePath) @@ -223,39 +223,30 @@ func (t *HubTestItem) InstallHub() error { ctx := context.Background() // install data for parsers if needed - ret := hub.GetItemMap(cwhub.PARSERS) - for parserName, item := range ret { - if item.State.Installed { - if err := item.DownloadDataIfNeeded(ctx, true); err != nil { - return fmt.Errorf("unable to download data for parser '%s': %+v", parserName, err) - } - - log.Debugf("parser '%s' installed successfully in runtime environment", parserName) + for _, item := range hub.GetInstalledByType(cwhub.PARSERS, true) { + if err := item.DownloadDataIfNeeded(ctx, true); err != nil { + return fmt.Errorf("unable to download data for parser '%s': %+v", item.Name, err) } + + log.Debugf("parser '%s' installed successfully in runtime environment", item.Name) } // install data for scenarios if needed - ret = hub.GetItemMap(cwhub.SCENARIOS) - for scenarioName, item := range ret { - if item.State.Installed { - if err := item.DownloadDataIfNeeded(ctx, true); err != nil { - return fmt.Errorf("unable to download data for parser '%s': %+v", scenarioName, err) - } - - log.Debugf("scenario '%s' installed successfully in runtime environment", scenarioName) + for _, item := range hub.GetInstalledByType(cwhub.SCENARIOS, true) { + if err := item.DownloadDataIfNeeded(ctx, true); err != nil { + return fmt.Errorf("unable to download data for parser '%s': %+v", item.Name, err) } + + log.Debugf("scenario '%s' installed successfully in runtime environment", item.Name) } // install data for postoverflows if needed - ret = hub.GetItemMap(cwhub.POSTOVERFLOWS) - for postoverflowName, item := range ret { - if item.State.Installed { - if err := item.DownloadDataIfNeeded(ctx, true); err != nil { - return fmt.Errorf("unable to download data for parser '%s': %+v", postoverflowName, err) - } - - log.Debugf("postoverflow '%s' installed successfully in runtime environment", postoverflowName) + for _, item := range hub.GetInstalledByType(cwhub.POSTOVERFLOWS, true) { + if err := item.DownloadDataIfNeeded(ctx, true); err != nil { + return fmt.Errorf("unable to download data for parser '%s': %+v", item.Name, err) } + + log.Debugf("postoverflow '%s' installed successfully in runtime environment", item.Name) } return nil diff --git a/pkg/hubtest/nucleirunner.go b/pkg/hubtest/nucleirunner.go index 0bf2013dd8d..32c81eb64d8 100644 --- a/pkg/hubtest/nucleirunner.go +++ b/pkg/hubtest/nucleirunner.go @@ -42,11 +42,11 @@ func (nc *NucleiConfig) RunNucleiTemplate(testName string, templatePath string, err := cmd.Run() - if err := os.WriteFile(outputPrefix+"_stdout.txt", out.Bytes(), 0644); err != nil { + if err := os.WriteFile(outputPrefix+"_stdout.txt", out.Bytes(), 0o644); err != nil { log.Warningf("Error writing stdout: %s", err) } - if err := os.WriteFile(outputPrefix+"_stderr.txt", outErr.Bytes(), 0644); err != nil { + if err := os.WriteFile(outputPrefix+"_stderr.txt", outErr.Bytes(), 0o644); err != nil { log.Warningf("Error writing stderr: %s", err) } @@ -56,7 +56,7 @@ func (nc *NucleiConfig) RunNucleiTemplate(testName string, templatePath string, log.Warningf("Stderr saved to %s", outputPrefix+"_stderr.txt") log.Warningf("Nuclei generated output saved to %s", outputPrefix+".json") return err - } else if len(out.String()) == 0 { + } else if out.String() == "" { log.Warningf("Stdout saved to %s", outputPrefix+"_stdout.txt") log.Warningf("Stderr saved to %s", outputPrefix+"_stderr.txt") log.Warningf("Nuclei generated output saved to %s", outputPrefix+".json") diff --git a/pkg/hubtest/regexp.go b/pkg/hubtest/regexp.go index f9165eae3d1..8b2fcc928dd 100644 --- a/pkg/hubtest/regexp.go +++ b/pkg/hubtest/regexp.go @@ -5,7 +5,7 @@ import ( ) var ( - variableRE = regexp.MustCompile(`(?P[^ =]+) == .*`) - parserResultRE = regexp.MustCompile(`^results\["[^"]+"\]\["(?P[^"]+)"\]\[[0-9]+\]\.Evt\..*`) + variableRE = regexp.MustCompile(`(?P[^ =]+) == .*`) + parserResultRE = regexp.MustCompile(`^results\["[^"]+"\]\["(?P[^"]+)"\]\[[0-9]+\]\.Evt\..*`) scenarioResultRE = regexp.MustCompile(`^results\[[0-9]+\].Overflow.Alert.GetScenario\(\) == "(?P[^"]+)"`) ) diff --git a/pkg/hubtest/utils.go b/pkg/hubtest/utils.go index a7373fcc0bf..b42a73461f3 100644 --- a/pkg/hubtest/utils.go +++ b/pkg/hubtest/utils.go @@ -91,7 +91,7 @@ func CopyDir(src string, dest string) error { return errors.New("Source " + file.Name() + " is not a directory!") } - err = os.MkdirAll(dest, 0755) + err = os.MkdirAll(dest, 0o755) if err != nil { return err } diff --git a/pkg/leakybucket/buckets_test.go b/pkg/leakybucket/buckets_test.go index 989e03944c3..1da906cb555 100644 --- a/pkg/leakybucket/buckets_test.go +++ b/pkg/leakybucket/buckets_test.go @@ -136,7 +136,7 @@ func testOneBucket(t *testing.T, hub *cwhub.Hub, dir string, tomb *tomb.Tomb) er } if err := yaml.UnmarshalStrict(out.Bytes(), &stages); err != nil { - t.Fatalf("failed unmarshaling %s : %s", stagecfg, err) + t.Fatalf("failed to parse %s : %s", stagecfg, err) } files := []string{} @@ -201,7 +201,7 @@ func testFile(t *testing.T, file string, bs string, holders []BucketFactory, res var ts time.Time if err := ts.UnmarshalText([]byte(in.MarshaledTime)); err != nil { - t.Fatalf("Failed to unmarshal time from input event : %s", err) + t.Fatalf("Failed to parse time from input event : %s", err) } if latest_ts.IsZero() { diff --git a/pkg/leakybucket/manager_load.go b/pkg/leakybucket/manager_load.go index ca2e4d17d99..b8310b8cb17 100644 --- a/pkg/leakybucket/manager_load.go +++ b/pkg/leakybucket/manager_load.go @@ -22,7 +22,7 @@ import ( "github.com/crowdsecurity/crowdsec/pkg/alertcontext" "github.com/crowdsecurity/crowdsec/pkg/csconfig" "github.com/crowdsecurity/crowdsec/pkg/cwhub" - "github.com/crowdsecurity/crowdsec/pkg/cwversion" + "github.com/crowdsecurity/crowdsec/pkg/cwversion/constraint" "github.com/crowdsecurity/crowdsec/pkg/exprhelpers" "github.com/crowdsecurity/crowdsec/pkg/types" ) @@ -45,12 +45,12 @@ type BucketFactory struct { Debug bool `yaml:"debug"` // Debug, when set to true, will enable debugging for _this_ scenario specifically Labels map[string]interface{} `yaml:"labels"` // Labels is K:V list aiming at providing context the overflow Blackhole string `yaml:"blackhole,omitempty"` // Blackhole is a duration that, if present, will prevent same bucket partition to overflow more often than $duration - logger *log.Entry `yaml:"-"` // logger is bucket-specific logger (used by Debug as well) - Reprocess bool `yaml:"reprocess"` // Reprocess, if true, will for the bucket to be re-injected into processing chain - CacheSize int `yaml:"cache_size"` // CacheSize, if > 0, limits the size of in-memory cache of the bucket - Profiling bool `yaml:"profiling"` // Profiling, if true, will make the bucket record pours/overflows/etc. - OverflowFilter string `yaml:"overflow_filter"` // OverflowFilter if present, is a filter that must return true for the overflow to go through - ConditionalOverflow string `yaml:"condition"` // condition if present, is an expression that must return true for the bucket to overflow + logger *log.Entry // logger is bucket-specific logger (used by Debug as well) + Reprocess bool `yaml:"reprocess"` // Reprocess, if true, will for the bucket to be re-injected into processing chain + CacheSize int `yaml:"cache_size"` // CacheSize, if > 0, limits the size of in-memory cache of the bucket + Profiling bool `yaml:"profiling"` // Profiling, if true, will make the bucket record pours/overflows/etc. + OverflowFilter string `yaml:"overflow_filter"` // OverflowFilter if present, is a filter that must return true for the overflow to go through + ConditionalOverflow string `yaml:"condition"` // condition if present, is an expression that must return true for the bucket to overflow BayesianPrior float32 `yaml:"bayesian_prior"` BayesianThreshold float32 `yaml:"bayesian_threshold"` BayesianConditions []RawBayesianCondition `yaml:"bayesian_conditions"` // conditions for the bayesian bucket @@ -68,95 +68,136 @@ type BucketFactory struct { processors []Processor // processors is the list of hooks for pour/overflow/create (cf. uniq, blackhole etc.) output bool // ?? ScenarioVersion string `yaml:"version,omitempty"` - hash string `yaml:"-"` - Simulated bool `yaml:"simulated"` // Set to true if the scenario instantiating the bucket was in the exclusion list - tomb *tomb.Tomb `yaml:"-"` - wgPour *sync.WaitGroup `yaml:"-"` - wgDumpState *sync.WaitGroup `yaml:"-"` + hash string + Simulated bool `yaml:"simulated"` // Set to true if the scenario instantiating the bucket was in the exclusion list + tomb *tomb.Tomb + wgPour *sync.WaitGroup + wgDumpState *sync.WaitGroup orderEvent bool } // we use one NameGenerator for all the future buckets var seed namegenerator.Generator = namegenerator.NewNameGenerator(time.Now().UTC().UnixNano()) -func ValidateFactory(bucketFactory *BucketFactory) error { - if bucketFactory.Name == "" { - return errors.New("bucket must have name") +func validateLeakyType(bucketFactory *BucketFactory) error { + if bucketFactory.Capacity <= 0 { // capacity must be a positive int + return fmt.Errorf("bad capacity for leaky '%d'", bucketFactory.Capacity) } - if bucketFactory.Description == "" { - return errors.New("description is mandatory") + if bucketFactory.LeakSpeed == "" { + return errors.New("leakspeed can't be empty for leaky") } - if bucketFactory.Type == "leaky" { - if bucketFactory.Capacity <= 0 { // capacity must be a positive int - return fmt.Errorf("bad capacity for leaky '%d'", bucketFactory.Capacity) - } + if bucketFactory.leakspeed == 0 { + return fmt.Errorf("bad leakspeed for leaky '%s'", bucketFactory.LeakSpeed) + } - if bucketFactory.LeakSpeed == "" { - return errors.New("leakspeed can't be empty for leaky") - } + return nil +} - if bucketFactory.leakspeed == 0 { - return fmt.Errorf("bad leakspeed for leaky '%s'", bucketFactory.LeakSpeed) - } - } else if bucketFactory.Type == "counter" { - if bucketFactory.Duration == "" { - return errors.New("duration can't be empty for counter") - } +func validateCounterType(bucketFactory *BucketFactory) error { + if bucketFactory.Duration == "" { + return errors.New("duration can't be empty for counter") + } - if bucketFactory.duration == 0 { - return fmt.Errorf("bad duration for counter bucket '%d'", bucketFactory.duration) - } + if bucketFactory.duration == 0 { + return fmt.Errorf("bad duration for counter bucket '%d'", bucketFactory.duration) + } - if bucketFactory.Capacity != -1 { - return errors.New("counter bucket must have -1 capacity") - } - } else if bucketFactory.Type == "trigger" { - if bucketFactory.Capacity != 0 { - return errors.New("trigger bucket must have 0 capacity") - } - } else if bucketFactory.Type == "conditional" { - if bucketFactory.ConditionalOverflow == "" { - return errors.New("conditional bucket must have a condition") - } + if bucketFactory.Capacity != -1 { + return errors.New("counter bucket must have -1 capacity") + } - if bucketFactory.Capacity != -1 { - bucketFactory.logger.Warnf("Using a value different than -1 as capacity for conditional bucket, this may lead to unexpected overflows") - } + return nil +} - if bucketFactory.LeakSpeed == "" { - return errors.New("leakspeed can't be empty for conditional bucket") - } +func validateTriggerType(bucketFactory *BucketFactory) error { + if bucketFactory.Capacity != 0 { + return errors.New("trigger bucket must have 0 capacity") + } - if bucketFactory.leakspeed == 0 { - return fmt.Errorf("bad leakspeed for conditional bucket '%s'", bucketFactory.LeakSpeed) - } - } else if bucketFactory.Type == "bayesian" { - if bucketFactory.BayesianConditions == nil { - return errors.New("bayesian bucket must have bayesian conditions") - } + return nil +} - if bucketFactory.BayesianPrior == 0 { - return errors.New("bayesian bucket must have a valid, non-zero prior") - } +func validateConditionalType(bucketFactory *BucketFactory) error { + if bucketFactory.ConditionalOverflow == "" { + return errors.New("conditional bucket must have a condition") + } - if bucketFactory.BayesianThreshold == 0 { - return errors.New("bayesian bucket must have a valid, non-zero threshold") - } + if bucketFactory.Capacity != -1 { + bucketFactory.logger.Warnf("Using a value different than -1 as capacity for conditional bucket, this may lead to unexpected overflows") + } - if bucketFactory.BayesianPrior > 1 { - return errors.New("bayesian bucket must have a valid, non-zero prior") - } + if bucketFactory.LeakSpeed == "" { + return errors.New("leakspeed can't be empty for conditional bucket") + } - if bucketFactory.BayesianThreshold > 1 { - return errors.New("bayesian bucket must have a valid, non-zero threshold") - } + if bucketFactory.leakspeed == 0 { + return fmt.Errorf("bad leakspeed for conditional bucket '%s'", bucketFactory.LeakSpeed) + } + + return nil +} + +func validateBayesianType(bucketFactory *BucketFactory) error { + if bucketFactory.BayesianConditions == nil { + return errors.New("bayesian bucket must have bayesian conditions") + } + + if bucketFactory.BayesianPrior == 0 { + return errors.New("bayesian bucket must have a valid, non-zero prior") + } + + if bucketFactory.BayesianThreshold == 0 { + return errors.New("bayesian bucket must have a valid, non-zero threshold") + } + + if bucketFactory.BayesianPrior > 1 { + return errors.New("bayesian bucket must have a valid, non-zero prior") + } + + if bucketFactory.BayesianThreshold > 1 { + return errors.New("bayesian bucket must have a valid, non-zero threshold") + } + + if bucketFactory.Capacity != -1 { + return errors.New("bayesian bucket must have capacity -1") + } + + return nil +} + +func ValidateFactory(bucketFactory *BucketFactory) error { + if bucketFactory.Name == "" { + return errors.New("bucket must have name") + } + + if bucketFactory.Description == "" { + return errors.New("description is mandatory") + } - if bucketFactory.Capacity != -1 { - return errors.New("bayesian bucket must have capacity -1") + switch bucketFactory.Type { + case "leaky": + if err := validateLeakyType(bucketFactory); err != nil { + return err } - } else { + case "counter": + if err := validateCounterType(bucketFactory); err != nil { + return err + } + case "trigger": + if err := validateTriggerType(bucketFactory); err != nil { + return err + } + case "conditional": + if err := validateConditionalType(bucketFactory); err != nil { + return err + } + case "bayesian": + if err := validateBayesianType(bucketFactory); err != nil { + return err + } + default: return fmt.Errorf("unknown bucket type '%s'", bucketFactory.Type) } @@ -230,8 +271,8 @@ func LoadBuckets(cscfg *csconfig.CrowdsecServiceCfg, hub *cwhub.Hub, files []str err = dec.Decode(&bucketFactory) if err != nil { if !errors.Is(err, io.EOF) { - log.Errorf("Bad yaml in %s : %v", f, err) - return nil, nil, fmt.Errorf("bad yaml in %s : %v", f, err) + log.Errorf("Bad yaml in %s: %v", f, err) + return nil, nil, fmt.Errorf("bad yaml in %s: %w", f, err) } log.Tracef("End of yaml file") @@ -251,13 +292,13 @@ func LoadBuckets(cscfg *csconfig.CrowdsecServiceCfg, hub *cwhub.Hub, files []str bucketFactory.FormatVersion = "1.0" } - ok, err := cwversion.Satisfies(bucketFactory.FormatVersion, cwversion.Constraint_scenario) + ok, err := constraint.Satisfies(bucketFactory.FormatVersion, constraint.Scenario) if err != nil { return nil, nil, fmt.Errorf("failed to check version: %w", err) } if !ok { - log.Errorf("can't load %s : %s doesn't satisfy scenario format %s, skip", bucketFactory.Name, bucketFactory.FormatVersion, cwversion.Constraint_scenario) + log.Errorf("can't load %s : %s doesn't satisfy scenario format %s, skip", bucketFactory.Name, bucketFactory.FormatVersion, constraint.Scenario) continue } @@ -282,8 +323,8 @@ func LoadBuckets(cscfg *csconfig.CrowdsecServiceCfg, hub *cwhub.Hub, files []str err = LoadBucket(&bucketFactory, tomb) if err != nil { - log.Errorf("Failed to load bucket %s : %v", bucketFactory.Name, err) - return nil, nil, fmt.Errorf("loading of %s failed : %v", bucketFactory.Name, err) + log.Errorf("Failed to load bucket %s: %v", bucketFactory.Name, err) + return nil, nil, fmt.Errorf("loading of %s failed: %w", bucketFactory.Name, err) } bucketFactory.orderEvent = orderEvent @@ -326,7 +367,7 @@ func LoadBucket(bucketFactory *BucketFactory, tomb *tomb.Tomb) error { if bucketFactory.LeakSpeed != "" { if bucketFactory.leakspeed, err = time.ParseDuration(bucketFactory.LeakSpeed); err != nil { - return fmt.Errorf("bad leakspeed '%s' in %s : %v", bucketFactory.LeakSpeed, bucketFactory.Filename, err) + return fmt.Errorf("bad leakspeed '%s' in %s: %w", bucketFactory.LeakSpeed, bucketFactory.Filename, err) } } else { bucketFactory.leakspeed = time.Duration(0) @@ -334,7 +375,7 @@ func LoadBucket(bucketFactory *BucketFactory, tomb *tomb.Tomb) error { if bucketFactory.Duration != "" { if bucketFactory.duration, err = time.ParseDuration(bucketFactory.Duration); err != nil { - return fmt.Errorf("invalid Duration '%s' in %s : %v", bucketFactory.Duration, bucketFactory.Filename, err) + return fmt.Errorf("invalid Duration '%s' in %s: %w", bucketFactory.Duration, bucketFactory.Filename, err) } } @@ -345,13 +386,13 @@ func LoadBucket(bucketFactory *BucketFactory, tomb *tomb.Tomb) error { bucketFactory.RunTimeFilter, err = expr.Compile(bucketFactory.Filter, exprhelpers.GetExprOptions(map[string]interface{}{"evt": &types.Event{}})...) if err != nil { - return fmt.Errorf("invalid filter '%s' in %s : %v", bucketFactory.Filter, bucketFactory.Filename, err) + return fmt.Errorf("invalid filter '%s' in %s: %w", bucketFactory.Filter, bucketFactory.Filename, err) } if bucketFactory.GroupBy != "" { bucketFactory.RunTimeGroupBy, err = expr.Compile(bucketFactory.GroupBy, exprhelpers.GetExprOptions(map[string]interface{}{"evt": &types.Event{}})...) if err != nil { - return fmt.Errorf("invalid groupby '%s' in %s : %v", bucketFactory.GroupBy, bucketFactory.Filename, err) + return fmt.Errorf("invalid groupby '%s' in %s: %w", bucketFactory.GroupBy, bucketFactory.Filename, err) } } @@ -370,7 +411,7 @@ func LoadBucket(bucketFactory *BucketFactory, tomb *tomb.Tomb) error { case "bayesian": bucketFactory.processors = append(bucketFactory.processors, &DumbProcessor{}) default: - return fmt.Errorf("invalid type '%s' in %s : %v", bucketFactory.Type, bucketFactory.Filename, err) + return fmt.Errorf("invalid type '%s' in %s: %w", bucketFactory.Type, bucketFactory.Filename, err) } if bucketFactory.Distinct != "" { @@ -435,7 +476,7 @@ func LoadBucket(bucketFactory *BucketFactory, tomb *tomb.Tomb) error { bucketFactory.output = false if err := ValidateFactory(bucketFactory); err != nil { - return fmt.Errorf("invalid bucket from %s : %v", bucketFactory.Filename, err) + return fmt.Errorf("invalid bucket from %s: %w", bucketFactory.Filename, err) } bucketFactory.tomb = tomb @@ -452,7 +493,7 @@ func LoadBucketsState(file string, buckets *Buckets, bucketFactories []BucketFac } if err := json.Unmarshal(body, &state); err != nil { - return fmt.Errorf("can't unmarshal state file %s: %w", file, err) + return fmt.Errorf("can't parse state file %s: %w", file, err) } for k, v := range state { @@ -468,37 +509,39 @@ func LoadBucketsState(file string, buckets *Buckets, bucketFactories []BucketFac found := false for _, h := range bucketFactories { - if h.Name == v.Name { - log.Debugf("found factory %s/%s -> %s", h.Author, h.Name, h.Description) - // check in which mode the bucket was - if v.Mode == types.TIMEMACHINE { - tbucket = NewTimeMachine(h) - } else if v.Mode == types.LIVE { - tbucket = NewLeaky(h) - } else { - log.Errorf("Unknown bucket type : %d", v.Mode) - } - /*Trying to restore queue state*/ - tbucket.Queue = v.Queue - /*Trying to set the limiter to the saved values*/ - tbucket.Limiter.Load(v.SerializedState) - tbucket.In = make(chan *types.Event) - tbucket.Mapkey = k - tbucket.Signal = make(chan bool, 1) - tbucket.First_ts = v.First_ts - tbucket.Last_ts = v.Last_ts - tbucket.Ovflw_ts = v.Ovflw_ts - tbucket.Total_count = v.Total_count - buckets.Bucket_map.Store(k, tbucket) - h.tomb.Go(func() error { - return LeakRoutine(tbucket) - }) - <-tbucket.Signal - - found = true + if h.Name != v.Name { + continue + } - break + log.Debugf("found factory %s/%s -> %s", h.Author, h.Name, h.Description) + // check in which mode the bucket was + if v.Mode == types.TIMEMACHINE { + tbucket = NewTimeMachine(h) + } else if v.Mode == types.LIVE { + tbucket = NewLeaky(h) + } else { + log.Errorf("Unknown bucket type : %d", v.Mode) } + /*Trying to restore queue state*/ + tbucket.Queue = v.Queue + /*Trying to set the limiter to the saved values*/ + tbucket.Limiter.Load(v.SerializedState) + tbucket.In = make(chan *types.Event) + tbucket.Mapkey = k + tbucket.Signal = make(chan bool, 1) + tbucket.First_ts = v.First_ts + tbucket.Last_ts = v.Last_ts + tbucket.Ovflw_ts = v.Ovflw_ts + tbucket.Total_count = v.Total_count + buckets.Bucket_map.Store(k, tbucket) + h.tomb.Go(func() error { + return LeakRoutine(tbucket) + }) + <-tbucket.Signal + + found = true + + break } if !found { diff --git a/pkg/leakybucket/manager_run.go b/pkg/leakybucket/manager_run.go index 673b372d81e..2858d8b5635 100644 --- a/pkg/leakybucket/manager_run.go +++ b/pkg/leakybucket/manager_run.go @@ -132,7 +132,7 @@ func DumpBucketsStateAt(deadline time.Time, outputdir string, buckets *Buckets) }) bbuckets, err := json.MarshalIndent(serialized, "", " ") if err != nil { - return "", fmt.Errorf("failed to unmarshal buckets: %s", err) + return "", fmt.Errorf("failed to parse buckets: %s", err) } size, err := tmpFd.Write(bbuckets) if err != nil { @@ -203,7 +203,7 @@ func PourItemToBucket(bucket *Leaky, holder BucketFactory, buckets *Buckets, par var d time.Time err = d.UnmarshalText([]byte(parsed.MarshaledTime)) if err != nil { - holder.logger.Warningf("Failed unmarshaling event time (%s) : %v", parsed.MarshaledTime, err) + holder.logger.Warningf("Failed to parse event time (%s) : %v", parsed.MarshaledTime, err) } if d.After(lastTs.Add(bucket.Duration)) { bucket.logger.Tracef("bucket is expired (curr event: %s, bucket deadline: %s), kill", d, lastTs.Add(bucket.Duration)) @@ -298,7 +298,7 @@ func PourItemToHolders(parsed types.Event, holders []BucketFactory, buckets *Buc BucketPourCache["OK"] = append(BucketPourCache["OK"], evt.(types.Event)) } //find the relevant holders (scenarios) - for idx := range len(holders) { + for idx := range holders { //for idx, holder := range holders { //evaluate bucket's condition diff --git a/pkg/leakybucket/overflows.go b/pkg/leakybucket/overflows.go index 3ee067177ef..39b0e6a0ec4 100644 --- a/pkg/leakybucket/overflows.go +++ b/pkg/leakybucket/overflows.go @@ -19,66 +19,77 @@ import ( // SourceFromEvent extracts and formats a valid models.Source object from an Event func SourceFromEvent(evt types.Event, leaky *Leaky) (map[string]models.Source, error) { - srcs := make(map[string]models.Source) /*if it's already an overflow, we have properly formatted sources. we can just twitch them to reflect the requested scope*/ if evt.Type == types.OVFLW { - for k, v := range evt.Overflow.Sources { - /*the scopes are already similar, nothing to do*/ - if leaky.scopeType.Scope == *v.Scope { - srcs[k] = v - continue - } + return overflowEventSources(evt, leaky) + } - /*The bucket requires a decision on scope Range */ - if leaky.scopeType.Scope == types.Range { - /*the original bucket was target IPs, check that we do have range*/ - if *v.Scope == types.Ip { - src := models.Source{} - src.AsName = v.AsName - src.AsNumber = v.AsNumber - src.Cn = v.Cn - src.Latitude = v.Latitude - src.Longitude = v.Longitude - src.Range = v.Range - src.Value = new(string) - src.Scope = new(string) - *src.Scope = leaky.scopeType.Scope - *src.Value = "" - - if v.Range != "" { - *src.Value = v.Range - } + return eventSources(evt, leaky) +} - if leaky.scopeType.RunTimeFilter != nil { - retValue, err := exprhelpers.Run(leaky.scopeType.RunTimeFilter, map[string]interface{}{"evt": &evt}, leaky.logger, leaky.BucketConfig.Debug) - if err != nil { - return srcs, fmt.Errorf("while running scope filter: %w", err) - } +func overflowEventSources(evt types.Event, leaky *Leaky) (map[string]models.Source, error) { + srcs := make(map[string]models.Source) - value, ok := retValue.(string) - if !ok { - value = "" - } + for k, v := range evt.Overflow.Sources { + /*the scopes are already similar, nothing to do*/ + if leaky.scopeType.Scope == *v.Scope { + srcs[k] = v + continue + } - src.Value = &value + /*The bucket requires a decision on scope Range */ + if leaky.scopeType.Scope == types.Range { + /*the original bucket was target IPs, check that we do have range*/ + if *v.Scope == types.Ip { + src := models.Source{} + src.AsName = v.AsName + src.AsNumber = v.AsNumber + src.Cn = v.Cn + src.Latitude = v.Latitude + src.Longitude = v.Longitude + src.Range = v.Range + src.Value = new(string) + src.Scope = new(string) + *src.Scope = leaky.scopeType.Scope + *src.Value = "" + + if v.Range != "" { + *src.Value = v.Range + } + + if leaky.scopeType.RunTimeFilter != nil { + retValue, err := exprhelpers.Run(leaky.scopeType.RunTimeFilter, map[string]interface{}{"evt": &evt}, leaky.logger, leaky.BucketConfig.Debug) + if err != nil { + return srcs, fmt.Errorf("while running scope filter: %w", err) } - if *src.Value != "" { - srcs[*src.Value] = src - } else { - log.Warningf("bucket %s requires scope Range, but none was provided. It seems that the %s wasn't enriched to include its range.", leaky.Name, *v.Value) + value, ok := retValue.(string) + if !ok { + value = "" } + + src.Value = &value + } + + if *src.Value != "" { + srcs[*src.Value] = src } else { - log.Warningf("bucket %s requires scope Range, but can't extrapolate from %s (%s)", - leaky.Name, *v.Scope, *v.Value) + log.Warningf("bucket %s requires scope Range, but none was provided. It seems that the %s wasn't enriched to include its range.", leaky.Name, *v.Value) } + } else { + log.Warningf("bucket %s requires scope Range, but can't extrapolate from %s (%s)", + leaky.Name, *v.Scope, *v.Value) } } - - return srcs, nil } + return srcs, nil +} + +func eventSources(evt types.Event, leaky *Leaky) (map[string]models.Source, error) { + srcs := make(map[string]models.Source) + src := models.Source{} switch leaky.scopeType.Scope { @@ -220,7 +231,7 @@ func EventsFromQueue(queue *types.Queue) []*models.Event { raw, err := evt.Time.MarshalText() if err != nil { - log.Warningf("while marshaling time '%s' : %s", evt.Time.String(), err) + log.Warningf("while serializing time '%s' : %s", evt.Time.String(), err) } else { *ovflwEvent.Timestamp = string(raw) } @@ -236,9 +247,10 @@ func EventsFromQueue(queue *types.Queue) []*models.Event { // alertFormatSource iterates over the queue to collect sources func alertFormatSource(leaky *Leaky, queue *types.Queue) (map[string]models.Source, string, error) { - var sources = make(map[string]models.Source) var source_type string + sources := make(map[string]models.Source) + log.Debugf("Formatting (%s) - scope Info : scope_type:%s / scope_filter:%s", leaky.Name, leaky.scopeType.Scope, leaky.scopeType.Filter) for _, evt := range queue.Queue { @@ -274,12 +286,12 @@ func NewAlert(leaky *Leaky, queue *types.Queue) (types.RuntimeAlert, error) { */ start_at, err := leaky.First_ts.MarshalText() if err != nil { - log.Warningf("failed to marshal start ts %s : %s", leaky.First_ts.String(), err) + log.Warningf("failed to serialize start ts %s : %s", leaky.First_ts.String(), err) } stop_at, err := leaky.Ovflw_ts.MarshalText() if err != nil { - log.Warningf("failed to marshal ovflw ts %s : %s", leaky.First_ts.String(), err) + log.Warningf("failed to serialize ovflw ts %s : %s", leaky.First_ts.String(), err) } capacity := int32(leaky.Capacity) @@ -299,6 +311,7 @@ func NewAlert(leaky *Leaky, queue *types.Queue) (types.RuntimeAlert, error) { StopAt: &stopAt, Simulated: &leaky.Simulated, } + if leaky.BucketConfig == nil { return runtimeAlert, errors.New("leaky.BucketConfig is nil") } diff --git a/pkg/leakybucket/timemachine.go b/pkg/leakybucket/timemachine.go index e72bb1a464c..34073d1cc5c 100644 --- a/pkg/leakybucket/timemachine.go +++ b/pkg/leakybucket/timemachine.go @@ -24,7 +24,7 @@ func TimeMachinePour(l *Leaky, msg types.Event) { err = d.UnmarshalText([]byte(msg.MarshaledTime)) if err != nil { - log.Warningf("Failed unmarshaling event time (%s) : %v", msg.MarshaledTime, err) + log.Warningf("Failed to parse event time (%s) : %v", msg.MarshaledTime, err) return } diff --git a/pkg/leakybucket/trigger.go b/pkg/leakybucket/trigger.go index b6af1431888..d13e57856f9 100644 --- a/pkg/leakybucket/trigger.go +++ b/pkg/leakybucket/trigger.go @@ -16,25 +16,31 @@ func (t *Trigger) OnBucketPour(b *BucketFactory) func(types.Event, *Leaky) *type // Pour makes the bucket overflow all the time // TriggerPour unconditionally overflows return func(msg types.Event, l *Leaky) *types.Event { + now := time.Now().UTC() + if l.Mode == types.TIMEMACHINE { var d time.Time + err := d.UnmarshalText([]byte(msg.MarshaledTime)) if err != nil { - log.Warningf("Failed unmarshaling event time (%s) : %v", msg.MarshaledTime, err) - d = time.Now().UTC() + log.Warningf("Failed to parse event time (%s) : %v", msg.MarshaledTime, err) + + d = now } + l.logger.Debugf("yay timemachine overflow time : %s --> %s", d, msg.MarshaledTime) l.Last_ts = d l.First_ts = d l.Ovflw_ts = d } else { - l.Last_ts = time.Now().UTC() - l.First_ts = time.Now().UTC() - l.Ovflw_ts = time.Now().UTC() + l.Last_ts = now + l.First_ts = now + l.Ovflw_ts = now } + l.Total_count = 1 - l.logger.Infof("Bucket overflow") + l.logger.Debug("Bucket overflow") l.Queue.Add(msg) l.Out <- l.Queue diff --git a/pkg/longpollclient/client.go b/pkg/longpollclient/client.go index 9fa3b4b3f9a..5c395185b20 100644 --- a/pkg/longpollclient/client.go +++ b/pkg/longpollclient/client.go @@ -1,6 +1,7 @@ package longpollclient import ( + "context" "encoding/json" "errors" "fmt" @@ -50,7 +51,7 @@ var errUnauthorized = errors.New("user is not authorized to use PAPI") const timeoutMessage = "no events before timeout" -func (c *LongPollClient) doQuery() (*http.Response, error) { +func (c *LongPollClient) doQuery(ctx context.Context) (*http.Response, error) { logger := c.logger.WithField("method", "doQuery") query := c.url.Query() query.Set("since_time", fmt.Sprintf("%d", c.since)) @@ -59,7 +60,7 @@ func (c *LongPollClient) doQuery() (*http.Response, error) { logger.Debugf("Query parameters: %s", c.url.RawQuery) - req, err := http.NewRequest(http.MethodGet, c.url.String(), nil) + req, err := http.NewRequestWithContext(ctx, http.MethodGet, c.url.String(), nil) if err != nil { logger.Errorf("failed to create request: %s", err) return nil, err @@ -73,12 +74,10 @@ func (c *LongPollClient) doQuery() (*http.Response, error) { return resp, nil } -func (c *LongPollClient) poll() error { - +func (c *LongPollClient) poll(ctx context.Context) error { logger := c.logger.WithField("method", "poll") - resp, err := c.doQuery() - + resp, err := c.doQuery(ctx) if err != nil { return err } @@ -95,7 +94,7 @@ func (c *LongPollClient) poll() error { logger.Errorf("failed to read response body: %s", err) return err } - logger.Errorf(string(bodyContent)) + logger.Error(string(bodyContent)) return errUnauthorized } return fmt.Errorf("unexpected status code: %d", resp.StatusCode) @@ -122,7 +121,7 @@ func (c *LongPollClient) poll() error { logger.Tracef("got response: %+v", pollResp) - if len(pollResp.ErrorMessage) > 0 { + if pollResp.ErrorMessage != "" { if pollResp.ErrorMessage == timeoutMessage { logger.Debugf("got timeout message") return nil @@ -148,7 +147,7 @@ func (c *LongPollClient) poll() error { } } -func (c *LongPollClient) pollEvents() error { +func (c *LongPollClient) pollEvents(ctx context.Context) error { for { select { case <-c.t.Dying(): @@ -156,7 +155,7 @@ func (c *LongPollClient) pollEvents() error { return nil default: c.logger.Debug("Polling PAPI") - err := c.poll() + err := c.poll(ctx) if err != nil { c.logger.Errorf("failed to poll: %s", err) if errors.Is(err, errUnauthorized) { @@ -170,12 +169,12 @@ func (c *LongPollClient) pollEvents() error { } } -func (c *LongPollClient) Start(since time.Time) chan Event { +func (c *LongPollClient) Start(ctx context.Context, since time.Time) chan Event { c.logger.Infof("starting polling client") c.c = make(chan Event) c.since = since.Unix() * 1000 c.timeout = "45" - c.t.Go(c.pollEvents) + c.t.Go(func() error {return c.pollEvents(ctx)}) return c.c } @@ -184,11 +183,11 @@ func (c *LongPollClient) Stop() error { return nil } -func (c *LongPollClient) PullOnce(since time.Time) ([]Event, error) { +func (c *LongPollClient) PullOnce(ctx context.Context, since time.Time) ([]Event, error) { c.logger.Debug("Pulling PAPI once") c.since = since.Unix() * 1000 c.timeout = "1" - resp, err := c.doQuery() + resp, err := c.doQuery(ctx) if err != nil { return nil, err } @@ -209,7 +208,7 @@ func (c *LongPollClient) PullOnce(since time.Time) ([]Event, error) { c.logger.Tracef("got response: %+v", pollResp) - if len(pollResp.ErrorMessage) > 0 { + if pollResp.ErrorMessage != "" { if pollResp.ErrorMessage == timeoutMessage { c.logger.Debugf("got timeout message") break diff --git a/pkg/metabase/api.go b/pkg/metabase/api.go index 387e8d151e0..08e10188678 100644 --- a/pkg/metabase/api.go +++ b/pkg/metabase/api.go @@ -9,7 +9,7 @@ import ( "github.com/dghubble/sling" log "github.com/sirupsen/logrus" - "github.com/crowdsecurity/crowdsec/pkg/cwversion" + "github.com/crowdsecurity/crowdsec/pkg/apiclient/useragent" ) type MBClient struct { @@ -38,7 +38,7 @@ var ( func NewMBClient(url string) (*MBClient, error) { httpClient := &http.Client{Timeout: 20 * time.Second} return &MBClient{ - CTX: sling.New().Client(httpClient).Base(url).Set("User-Agent", cwversion.UserAgent()), + CTX: sling.New().Client(httpClient).Base(url).Set("User-Agent", useragent.Default()), Client: httpClient, }, nil } diff --git a/pkg/metabase/metabase.go b/pkg/metabase/metabase.go index 837bab796d5..324a05666a1 100644 --- a/pkg/metabase/metabase.go +++ b/pkg/metabase/metabase.go @@ -70,12 +70,12 @@ func (m *Metabase) Init(containerName string, image string) error { switch m.Config.Database.Type { case "mysql": - return fmt.Errorf("'mysql' is not supported yet for cscli dashboard") + return errors.New("'mysql' is not supported yet for cscli dashboard") //DBConnectionURI = fmt.Sprintf("MB_DB_CONNECTION_URI=mysql://%s:%d/%s?user=%s&password=%s&allowPublicKeyRetrieval=true", remoteDBAddr, m.Config.Database.Port, m.Config.Database.DbName, m.Config.Database.User, m.Config.Database.Password) case "sqlite": m.InternalDBURL = metabaseSQLiteDBURL case "postgresql", "postgres", "pgsql": - return fmt.Errorf("'postgresql' is not supported yet by cscli dashboard") + return errors.New("'postgresql' is not supported yet by cscli dashboard") default: return fmt.Errorf("database '%s' not supported", m.Config.Database.Type) } diff --git a/pkg/models/generate.go b/pkg/models/generate.go index ccacc409ab5..502d6f3d2cf 100644 --- a/pkg/models/generate.go +++ b/pkg/models/generate.go @@ -1,4 +1,4 @@ package models -//go:generate go run -mod=mod github.com/go-swagger/go-swagger/cmd/swagger@v0.30.5 generate model --spec=./localapi_swagger.yaml --target=../ +//go:generate go run -mod=mod github.com/go-swagger/go-swagger/cmd/swagger@v0.31.0 generate model --spec=./localapi_swagger.yaml --target=../ diff --git a/pkg/modelscapi/add_signals_request.go b/pkg/modelscapi/add_signals_request.go index 62fe590cb79..7bfe6ae80e0 100644 --- a/pkg/modelscapi/add_signals_request.go +++ b/pkg/modelscapi/add_signals_request.go @@ -56,6 +56,11 @@ func (m AddSignalsRequest) ContextValidate(ctx context.Context, formats strfmt.R for i := 0; i < len(m); i++ { if m[i] != nil { + + if swag.IsZero(m[i]) { // not required + return nil + } + if err := m[i].ContextValidate(ctx, formats); err != nil { if ve, ok := err.(*errors.Validation); ok { return ve.ValidateName(strconv.Itoa(i)) diff --git a/pkg/modelscapi/add_signals_request_item.go b/pkg/modelscapi/add_signals_request_item.go index f9c865b4c68..5f63b542d5a 100644 --- a/pkg/modelscapi/add_signals_request_item.go +++ b/pkg/modelscapi/add_signals_request_item.go @@ -65,6 +65,9 @@ type AddSignalsRequestItem struct { // stop at // Required: true StopAt *string `json:"stop_at"` + + // UUID of the alert + UUID string `json:"uuid,omitempty"` } // Validate validates this add signals request item @@ -257,6 +260,11 @@ func (m *AddSignalsRequestItem) contextValidateContext(ctx context.Context, form for i := 0; i < len(m.Context); i++ { if m.Context[i] != nil { + + if swag.IsZero(m.Context[i]) { // not required + return nil + } + if err := m.Context[i].ContextValidate(ctx, formats); err != nil { if ve, ok := err.(*errors.Validation); ok { return ve.ValidateName("context" + "." + strconv.Itoa(i)) @@ -289,6 +297,7 @@ func (m *AddSignalsRequestItem) contextValidateDecisions(ctx context.Context, fo func (m *AddSignalsRequestItem) contextValidateSource(ctx context.Context, formats strfmt.Registry) error { if m.Source != nil { + if err := m.Source.ContextValidate(ctx, formats); err != nil { if ve, ok := err.(*errors.Validation); ok { return ve.ValidateName("source") diff --git a/pkg/modelscapi/add_signals_request_item_decisions.go b/pkg/modelscapi/add_signals_request_item_decisions.go index 54e123ab3f8..11ed27a496d 100644 --- a/pkg/modelscapi/add_signals_request_item_decisions.go +++ b/pkg/modelscapi/add_signals_request_item_decisions.go @@ -54,6 +54,11 @@ func (m AddSignalsRequestItemDecisions) ContextValidate(ctx context.Context, for for i := 0; i < len(m); i++ { if m[i] != nil { + + if swag.IsZero(m[i]) { // not required + return nil + } + if err := m[i].ContextValidate(ctx, formats); err != nil { if ve, ok := err.(*errors.Validation); ok { return ve.ValidateName(strconv.Itoa(i)) diff --git a/pkg/modelscapi/add_signals_request_item_decisions_item.go b/pkg/modelscapi/add_signals_request_item_decisions_item.go index 34dfeb5bce5..797c517e33f 100644 --- a/pkg/modelscapi/add_signals_request_item_decisions_item.go +++ b/pkg/modelscapi/add_signals_request_item_decisions_item.go @@ -49,6 +49,9 @@ type AddSignalsRequestItemDecisionsItem struct { // until Until string `json:"until,omitempty"` + // UUID of the decision + UUID string `json:"uuid,omitempty"` + // the value of the decision scope : an IP, a range, a username, etc // Required: true Value *string `json:"value"` diff --git a/pkg/modelscapi/centralapi_swagger.yaml b/pkg/modelscapi/centralapi_swagger.yaml new file mode 100644 index 00000000000..c75233809c8 --- /dev/null +++ b/pkg/modelscapi/centralapi_swagger.yaml @@ -0,0 +1,888 @@ +swagger: "2.0" +info: + description: + "API to manage machines using [crowdsec](https://github.com/crowdsecurity/crowdsec)\ + \ and bouncers.\n" + version: "2023-01-23T11:16:39Z" + title: "prod-capi-v3" + contact: + name: "Crowdsec team" + url: "https://github.com/crowdsecurity/crowdsec" + email: "support@crowdsec.net" +host: "api.crowdsec.net" +basePath: "/v3" +tags: + - name: "watchers" + description: "Operations about watchers: crowdsec & cscli" + - name: "bouncers" + description: "Operations about decisions : bans, captcha, rate-limit etc." +schemes: + - "https" +paths: + /decisions/delete: + post: + tags: + - "watchers" + summary: "delete decisions" + description: "delete provided decisions" + consumes: + - "application/json" + produces: + - "application/json" + parameters: + - in: "body" + name: "DecisionsDeleteRequest" + required: true + schema: + $ref: "#/definitions/DecisionsDeleteRequest" + responses: + "200": + description: "200 response" + schema: + $ref: "#/definitions/SuccessResponse" + "500": + description: "500 response" + schema: + $ref: "#/definitions/ErrorResponse" + security: + - UserPoolAuthorizer: [] + /decisions/stream: + get: + tags: + - "bouncers" + - "watchers" + summary: "returns list of top decisions" + description: "returns list of top decisions to add or delete" + produces: + - "application/json" + parameters: + - in: query + name: "community_pull" + type: "boolean" + default: true + required: false + description: "Fetch the community blocklist content" + - in: query + name: "additional_pull" + type: "boolean" + default: true + required: false + description: "Fetch additional blocklists content" + responses: + "200": + description: "200 response" + schema: + $ref: "#/definitions/GetDecisionsStreamResponse" + "400": + description: "400 response" + schema: + $ref: "#/definitions/ErrorResponse" + "500": + description: "500 response" + schema: + $ref: "#/definitions/ErrorResponse" + "404": + description: "404 response" + schema: + $ref: "#/definitions/ErrorResponse" + security: + - UserPoolAuthorizer: [] + options: + consumes: + - "application/json" + produces: + - "application/json" + responses: + "200": + description: "200 response" + headers: + Access-Control-Allow-Origin: + type: "string" + Access-Control-Allow-Methods: + type: "string" + Access-Control-Allow-Headers: + type: "string" + /decisions/sync: + post: + tags: + - "watchers" + summary: "sync decisions" + description: "sync provided decisions" + consumes: + - "application/json" + produces: + - "application/json" + parameters: + - in: "body" + name: "DecisionsSyncRequest" + required: true + schema: + $ref: "#/definitions/DecisionsSyncRequest" + responses: + "200": + description: "200 response" + schema: + $ref: "#/definitions/SuccessResponse" + "500": + description: "500 response" + schema: + $ref: "#/definitions/ErrorResponse" + security: + - UserPoolAuthorizer: [] + /metrics: + post: + tags: + - "watchers" + summary: "receive metrics about enrolled machines and bouncers in APIL" + description: "receive metrics about enrolled machines and bouncers in APIL" + consumes: + - "application/json" + produces: + - "application/json" + parameters: + - in: "body" + name: "MetricsRequest" + required: true + schema: + $ref: "#/definitions/MetricsRequest" + responses: + "200": + description: "200 response" + schema: + $ref: "#/definitions/SuccessResponse" + "400": + description: "400 response" + schema: + $ref: "#/definitions/ErrorResponse" + "500": + description: "500 response" + schema: + $ref: "#/definitions/ErrorResponse" + security: + - UserPoolAuthorizer: [] + /signals: + post: + tags: + - "watchers" + summary: "Push signals" + description: "to push signals" + consumes: + - "application/json" + produces: + - "application/json" + parameters: + - in: "body" + name: "AddSignalsRequest" + required: true + schema: + $ref: "#/definitions/AddSignalsRequest" + responses: + "200": + description: "200 response" + schema: + $ref: "#/definitions/SuccessResponse" + "400": + description: "400 response" + schema: + $ref: "#/definitions/ErrorResponse" + "500": + description: "500 response" + schema: + $ref: "#/definitions/ErrorResponse" + security: + - UserPoolAuthorizer: [] + /watchers: + post: + tags: + - "watchers" + summary: "Register watcher" + description: "Register a watcher" + consumes: + - "application/json" + produces: + - "application/json" + parameters: + - in: "body" + name: "RegisterRequest" + required: true + schema: + $ref: "#/definitions/RegisterRequest" + responses: + "200": + description: "200 response" + schema: + $ref: "#/definitions/SuccessResponse" + "400": + description: "400 response" + schema: + $ref: "#/definitions/ErrorResponse" + "500": + description: "500 response" + schema: + $ref: "#/definitions/ErrorResponse" + /watchers/enroll: + post: + tags: + - "watchers" + summary: "watcher enrollment" + description: "watcher enrollment : enroll watcher to crowdsec backoffice account" + consumes: + - "application/json" + produces: + - "application/json" + parameters: + - in: "body" + name: "EnrollRequest" + required: true + schema: + $ref: "#/definitions/EnrollRequest" + responses: + "200": + description: "200 response" + schema: + $ref: "#/definitions/SuccessResponse" + "400": + description: "400 response" + schema: + $ref: "#/definitions/ErrorResponse" + "500": + description: "500 response" + schema: + $ref: "#/definitions/ErrorResponse" + "403": + description: "403 response" + schema: + $ref: "#/definitions/ErrorResponse" + security: + - UserPoolAuthorizer: [] + /watchers/login: + post: + tags: + - "watchers" + summary: "watcher login" + description: "Sign-in to get a valid token" + consumes: + - "application/json" + produces: + - "application/json" + parameters: + - in: "body" + name: "LoginRequest" + required: true + schema: + $ref: "#/definitions/LoginRequest" + responses: + "200": + description: "200 response" + schema: + $ref: "#/definitions/LoginResponse" + "400": + description: "400 response" + schema: + $ref: "#/definitions/ErrorResponse" + "500": + description: "500 response" + schema: + $ref: "#/definitions/ErrorResponse" + "403": + description: "403 response" + schema: + $ref: "#/definitions/ErrorResponse" + /watchers/reset: + post: + tags: + - "watchers" + summary: "Reset Password" + description: "to reset a watcher password" + consumes: + - "application/json" + produces: + - "application/json" + parameters: + - in: "body" + name: "ResetPasswordRequest" + required: true + schema: + $ref: "#/definitions/ResetPasswordRequest" + responses: + "200": + description: "200 response" + schema: + $ref: "#/definitions/SuccessResponse" + headers: + Content-type: + type: "string" + Access-Control-Allow-Origin: + type: "string" + "400": + description: "400 response" + schema: + $ref: "#/definitions/ErrorResponse" + "500": + description: "500 response" + schema: + $ref: "#/definitions/ErrorResponse" + headers: + Content-type: + type: "string" + Access-Control-Allow-Origin: + type: "string" + "403": + description: "403 response" + schema: + $ref: "#/definitions/ErrorResponse" + "404": + description: "404 response" + headers: + Content-type: + type: "string" + Access-Control-Allow-Origin: + type: "string" + options: + consumes: + - "application/json" + produces: + - "application/json" + responses: + "200": + description: "200 response" + headers: + Access-Control-Allow-Origin: + type: "string" + Access-Control-Allow-Methods: + type: "string" + Access-Control-Allow-Headers: + type: "string" +securityDefinitions: + UserPoolAuthorizer: + type: "apiKey" + name: "Authorization" + in: "header" + x-amazon-apigateway-authtype: "cognito_user_pools" +definitions: + DecisionsDeleteRequest: + title: "delete decisions" + type: "array" + description: "delete decision model" + items: + $ref: "#/definitions/DecisionsDeleteRequestItem" + DecisionsSyncRequestItem: + type: "object" + required: + - "message" + - "scenario" + - "scenario_hash" + - "scenario_version" + - "source" + - "start_at" + - "stop_at" + properties: + scenario_trust: + type: "string" + scenario_hash: + type: "string" + scenario: + type: "string" + alert_id: + type: "integer" + created_at: + type: "string" + machine_id: + type: "string" + decisions: + $ref: "#/definitions/DecisionsSyncRequestItemDecisions" + source: + $ref: "#/definitions/DecisionsSyncRequestItemSource" + scenario_version: + type: "string" + message: + type: "string" + description: "a human readable message" + start_at: + type: "string" + stop_at: + type: "string" + title: "Signal" + AddSignalsRequestItem: + type: "object" + required: + - "message" + - "scenario" + - "scenario_hash" + - "scenario_version" + - "source" + - "start_at" + - "stop_at" + properties: + created_at: + type: "string" + machine_id: + type: "string" + source: + $ref: "#/definitions/AddSignalsRequestItemSource" + scenario_version: + type: "string" + message: + type: "string" + description: "a human readable message" + uuid: + type: "string" + description: "UUID of the alert" + start_at: + type: "string" + scenario_trust: + type: "string" + scenario_hash: + type: "string" + scenario: + type: "string" + alert_id: + type: "integer" + context: + type: "array" + items: + type: "object" + properties: + value: + type: "string" + key: + type: "string" + decisions: + $ref: "#/definitions/AddSignalsRequestItemDecisions" + stop_at: + type: "string" + title: "Signal" + DecisionsSyncRequest: + title: "sync decisions request" + type: "array" + description: "sync decision model" + items: + $ref: "#/definitions/DecisionsSyncRequestItem" + LoginRequest: + type: "object" + required: + - "machine_id" + - "password" + properties: + password: + type: "string" + description: "Password, should respect the password policy (link to add)" + machine_id: + type: "string" + description: "machine_id is a (username) generated by crowdsec" + minLength: 48 + maxLength: 48 + pattern: "^[a-zA-Z0-9]+$" + scenarios: + type: "array" + description: "all scenarios installed" + items: + type: "string" + title: "login request" + description: "Login request model" + GetDecisionsStreamResponseNewItem: + type: "object" + required: + - "scenario" + - "scope" + - "decisions" + properties: + scenario: + type: "string" + scope: + type: "string" + description: + "the scope of decision : does it apply to an IP, a range, a username,\ + \ etc" + decisions: + type: array + items: + type: object + required: + - value + - duration + properties: + duration: + type: "string" + value: + type: "string" + description: + "the value of the decision scope : an IP, a range, a username,\ + \ etc" + title: "New Decisions" + GetDecisionsStreamResponseDeletedItem: + type: object + required: + - scope + - decisions + properties: + scope: + type: "string" + description: + "the scope of decision : does it apply to an IP, a range, a username,\ + \ etc" + decisions: + type: array + items: + type: string + BlocklistLink: + type: object + required: + - name + - url + - remediation + - scope + - duration + properties: + name: + type: string + description: "the name of the blocklist" + url: + type: string + description: "the url from which the blocklist content can be downloaded" + remediation: + type: string + description: "the remediation that should be used for the blocklist" + scope: + type: string + description: "the scope of decisions in the blocklist" + duration: + type: string + AddSignalsRequestItemDecisionsItem: + type: "object" + required: + - "duration" + - "id" + - "origin" + - "scenario" + - "scope" + - "type" + - "value" + properties: + duration: + type: "string" + uuid: + type: "string" + description: "UUID of the decision" + scenario: + type: "string" + origin: + type: "string" + description: "the origin of the decision : cscli, crowdsec" + scope: + type: "string" + description: + "the scope of decision : does it apply to an IP, a range, a username,\ + \ etc" + simulated: + type: "boolean" + until: + type: "string" + id: + type: "integer" + description: "(only relevant for GET ops) the unique id" + type: + type: "string" + description: + "the type of decision, might be 'ban', 'captcha' or something\ + \ custom. Ignored when watcher (cscli/crowdsec) is pushing to APIL." + value: + type: "string" + description: + "the value of the decision scope : an IP, a range, a username,\ + \ etc" + title: "Decision" + EnrollRequest: + type: "object" + required: + - "attachment_key" + properties: + name: + type: "string" + description: "The name that will be display in the console for the instance" + overwrite: + type: "boolean" + description: "To force enroll the instance" + attachment_key: + type: "string" + description: + "attachment_key is generated in your crowdsec backoffice account\ + \ and allows you to enroll your machines to your BO account" + pattern: "^[a-zA-Z0-9]+$" + tags: + type: "array" + description: "Tags to apply on the console for the instance" + items: + type: "string" + title: "enroll request" + description: "enroll request model" + ResetPasswordRequest: + type: "object" + required: + - "machine_id" + - "password" + properties: + password: + type: "string" + description: "Password, should respect the password policy (link to add)" + machine_id: + type: "string" + description: "machine_id is a (username) generated by crowdsec" + minLength: 48 + maxLength: 48 + pattern: "^[a-zA-Z0-9]+$" + title: "resetPassword" + description: "ResetPassword request model" + MetricsRequestBouncersItem: + type: "object" + properties: + last_pull: + type: "string" + description: "last bouncer pull date" + custom_name: + type: "string" + description: "bouncer name" + name: + type: "string" + description: "bouncer type (firewall, php...)" + version: + type: "string" + description: "bouncer version" + title: "MetricsBouncerInfo" + AddSignalsRequestItemSource: + type: "object" + required: + - "scope" + - "value" + properties: + scope: + type: "string" + description: "the scope of a source : ip,range,username,etc" + ip: + type: "string" + description: "provided as a convenience when the source is an IP" + latitude: + type: "number" + format: "float" + as_number: + type: "string" + description: "provided as a convenience when the source is an IP" + range: + type: "string" + description: "provided as a convenience when the source is an IP" + cn: + type: "string" + value: + type: "string" + description: "the value of a source : the ip, the range, the username,etc" + as_name: + type: "string" + description: "provided as a convenience when the source is an IP" + longitude: + type: "number" + format: "float" + title: "Source" + DecisionsSyncRequestItemDecisions: + title: "Decisions list" + type: "array" + items: + $ref: "#/definitions/DecisionsSyncRequestItemDecisionsItem" + RegisterRequest: + type: "object" + required: + - "machine_id" + - "password" + properties: + password: + type: "string" + description: "Password, should respect the password policy (link to add)" + machine_id: + type: "string" + description: "machine_id is a (username) generated by crowdsec" + pattern: "^[a-zA-Z0-9]+$" + title: "register request" + description: "Register request model" + SuccessResponse: + type: "object" + required: + - "message" + properties: + message: + type: "string" + description: "message" + title: "success response" + description: "success response return by the API" + LoginResponse: + type: "object" + properties: + code: + type: "integer" + expire: + type: "string" + token: + type: "string" + title: "login response" + description: "Login request model" + DecisionsSyncRequestItemDecisionsItem: + type: "object" + required: + - "duration" + - "id" + - "origin" + - "scenario" + - "scope" + - "type" + - "value" + properties: + duration: + type: "string" + scenario: + type: "string" + origin: + type: "string" + description: "the origin of the decision : cscli, crowdsec" + scope: + type: "string" + description: + "the scope of decision : does it apply to an IP, a range, a username,\ + \ etc" + simulated: + type: "boolean" + until: + type: "string" + id: + type: "integer" + description: "(only relevant for GET ops) the unique id" + type: + type: "string" + description: + "the type of decision, might be 'ban', 'captcha' or something\ + \ custom. Ignored when watcher (cscli/crowdsec) is pushing to APIL." + value: + type: "string" + description: + "the value of the decision scope : an IP, a range, a username,\ + \ etc" + title: "Decision" + GetDecisionsStreamResponse: + type: "object" + properties: + new: + $ref: "#/definitions/GetDecisionsStreamResponseNew" + deleted: + $ref: "#/definitions/GetDecisionsStreamResponseDeleted" + links: + $ref: "#/definitions/GetDecisionsStreamResponseLinks" + title: "get decisions stream response" + description: "get decision response model" + DecisionsSyncRequestItemSource: + type: "object" + required: + - "scope" + - "value" + properties: + scope: + type: "string" + description: "the scope of a source : ip,range,username,etc" + ip: + type: "string" + description: "provided as a convenience when the source is an IP" + latitude: + type: "number" + format: "float" + as_number: + type: "string" + description: "provided as a convenience when the source is an IP" + range: + type: "string" + description: "provided as a convenience when the source is an IP" + cn: + type: "string" + value: + type: "string" + description: "the value of a source : the ip, the range, the username,etc" + as_name: + type: "string" + description: "provided as a convenience when the source is an IP" + longitude: + type: "number" + format: "float" + title: "Source" + AddSignalsRequestItemDecisions: + title: "Decisions list" + type: "array" + items: + $ref: "#/definitions/AddSignalsRequestItemDecisionsItem" + MetricsRequestMachinesItem: + type: "object" + properties: + last_update: + type: "string" + description: "last agent update date" + name: + type: "string" + description: "agent name" + last_push: + type: "string" + description: "last agent push date" + version: + type: "string" + description: "agent version" + title: "MetricsAgentInfo" + MetricsRequest: + type: "object" + required: + - "bouncers" + - "machines" + properties: + bouncers: + type: "array" + items: + $ref: "#/definitions/MetricsRequestBouncersItem" + machines: + type: "array" + items: + $ref: "#/definitions/MetricsRequestMachinesItem" + title: "metrics" + description: "push metrics model" + ErrorResponse: + type: "object" + required: + - "message" + properties: + message: + type: "string" + description: "Error message" + errors: + type: "string" + description: "more detail on individual errors" + title: "error response" + description: "error response return by the API" + AddSignalsRequest: + title: "add signals request" + type: "array" + description: "All signals request model" + items: + $ref: "#/definitions/AddSignalsRequestItem" + DecisionsDeleteRequestItem: + type: "string" + title: "decisionsIDs" + GetDecisionsStreamResponseNew: + title: "Decisions list" + type: "array" + items: + $ref: "#/definitions/GetDecisionsStreamResponseNewItem" + GetDecisionsStreamResponseDeleted: + title: "Decisions list" + type: "array" + items: + $ref: "#/definitions/GetDecisionsStreamResponseDeletedItem" + GetDecisionsStreamResponseLinks: + title: "Decisions list" + type: "object" + properties: + blocklists: + type: array + items: + $ref: "#/definitions/BlocklistLink" + diff --git a/pkg/modelscapi/decisions_delete_request.go b/pkg/modelscapi/decisions_delete_request.go index e8718835027..0c93558adf1 100644 --- a/pkg/modelscapi/decisions_delete_request.go +++ b/pkg/modelscapi/decisions_delete_request.go @@ -11,6 +11,7 @@ import ( "github.com/go-openapi/errors" "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" ) // DecisionsDeleteRequest delete decisions @@ -49,6 +50,10 @@ func (m DecisionsDeleteRequest) ContextValidate(ctx context.Context, formats str for i := 0; i < len(m); i++ { + if swag.IsZero(m[i]) { // not required + return nil + } + if err := m[i].ContextValidate(ctx, formats); err != nil { if ve, ok := err.(*errors.Validation); ok { return ve.ValidateName(strconv.Itoa(i)) diff --git a/pkg/modelscapi/decisions_sync_request.go b/pkg/modelscapi/decisions_sync_request.go index e3a95162519..c087d39ff62 100644 --- a/pkg/modelscapi/decisions_sync_request.go +++ b/pkg/modelscapi/decisions_sync_request.go @@ -56,6 +56,11 @@ func (m DecisionsSyncRequest) ContextValidate(ctx context.Context, formats strfm for i := 0; i < len(m); i++ { if m[i] != nil { + + if swag.IsZero(m[i]) { // not required + return nil + } + if err := m[i].ContextValidate(ctx, formats); err != nil { if ve, ok := err.(*errors.Validation); ok { return ve.ValidateName(strconv.Itoa(i)) diff --git a/pkg/modelscapi/decisions_sync_request_item.go b/pkg/modelscapi/decisions_sync_request_item.go index 5139ea2de4b..460fe4d430e 100644 --- a/pkg/modelscapi/decisions_sync_request_item.go +++ b/pkg/modelscapi/decisions_sync_request_item.go @@ -231,6 +231,7 @@ func (m *DecisionsSyncRequestItem) contextValidateDecisions(ctx context.Context, func (m *DecisionsSyncRequestItem) contextValidateSource(ctx context.Context, formats strfmt.Registry) error { if m.Source != nil { + if err := m.Source.ContextValidate(ctx, formats); err != nil { if ve, ok := err.(*errors.Validation); ok { return ve.ValidateName("source") diff --git a/pkg/modelscapi/decisions_sync_request_item_decisions.go b/pkg/modelscapi/decisions_sync_request_item_decisions.go index 76316e43c5e..bdc8e77e2b6 100644 --- a/pkg/modelscapi/decisions_sync_request_item_decisions.go +++ b/pkg/modelscapi/decisions_sync_request_item_decisions.go @@ -54,6 +54,11 @@ func (m DecisionsSyncRequestItemDecisions) ContextValidate(ctx context.Context, for i := 0; i < len(m); i++ { if m[i] != nil { + + if swag.IsZero(m[i]) { // not required + return nil + } + if err := m[i].ContextValidate(ctx, formats); err != nil { if ve, ok := err.(*errors.Validation); ok { return ve.ValidateName(strconv.Itoa(i)) diff --git a/pkg/modelscapi/generate.go b/pkg/modelscapi/generate.go new file mode 100644 index 00000000000..66dc2a34b7e --- /dev/null +++ b/pkg/modelscapi/generate.go @@ -0,0 +1,4 @@ +package modelscapi + +//go:generate go run -mod=mod github.com/go-swagger/go-swagger/cmd/swagger@v0.31.0 generate model --spec=./centralapi_swagger.yaml --target=../ --model-package=modelscapi + diff --git a/pkg/modelscapi/get_decisions_stream_response.go b/pkg/modelscapi/get_decisions_stream_response.go index af19b85c4d3..5ebf29c5d93 100644 --- a/pkg/modelscapi/get_decisions_stream_response.go +++ b/pkg/modelscapi/get_decisions_stream_response.go @@ -144,6 +144,11 @@ func (m *GetDecisionsStreamResponse) contextValidateDeleted(ctx context.Context, func (m *GetDecisionsStreamResponse) contextValidateLinks(ctx context.Context, formats strfmt.Registry) error { if m.Links != nil { + + if swag.IsZero(m.Links) { // not required + return nil + } + if err := m.Links.ContextValidate(ctx, formats); err != nil { if ve, ok := err.(*errors.Validation); ok { return ve.ValidateName("links") diff --git a/pkg/modelscapi/get_decisions_stream_response_deleted.go b/pkg/modelscapi/get_decisions_stream_response_deleted.go index d218bf87e4e..78292860f22 100644 --- a/pkg/modelscapi/get_decisions_stream_response_deleted.go +++ b/pkg/modelscapi/get_decisions_stream_response_deleted.go @@ -54,6 +54,11 @@ func (m GetDecisionsStreamResponseDeleted) ContextValidate(ctx context.Context, for i := 0; i < len(m); i++ { if m[i] != nil { + + if swag.IsZero(m[i]) { // not required + return nil + } + if err := m[i].ContextValidate(ctx, formats); err != nil { if ve, ok := err.(*errors.Validation); ok { return ve.ValidateName(strconv.Itoa(i)) diff --git a/pkg/modelscapi/get_decisions_stream_response_links.go b/pkg/modelscapi/get_decisions_stream_response_links.go index 85cc9af9b48..6b9054574f1 100644 --- a/pkg/modelscapi/get_decisions_stream_response_links.go +++ b/pkg/modelscapi/get_decisions_stream_response_links.go @@ -82,6 +82,11 @@ func (m *GetDecisionsStreamResponseLinks) contextValidateBlocklists(ctx context. for i := 0; i < len(m.Blocklists); i++ { if m.Blocklists[i] != nil { + + if swag.IsZero(m.Blocklists[i]) { // not required + return nil + } + if err := m.Blocklists[i].ContextValidate(ctx, formats); err != nil { if ve, ok := err.(*errors.Validation); ok { return ve.ValidateName("blocklists" + "." + strconv.Itoa(i)) diff --git a/pkg/modelscapi/get_decisions_stream_response_new.go b/pkg/modelscapi/get_decisions_stream_response_new.go index e9525bf6fa7..8e09f1b20e7 100644 --- a/pkg/modelscapi/get_decisions_stream_response_new.go +++ b/pkg/modelscapi/get_decisions_stream_response_new.go @@ -54,6 +54,11 @@ func (m GetDecisionsStreamResponseNew) ContextValidate(ctx context.Context, form for i := 0; i < len(m); i++ { if m[i] != nil { + + if swag.IsZero(m[i]) { // not required + return nil + } + if err := m[i].ContextValidate(ctx, formats); err != nil { if ve, ok := err.(*errors.Validation); ok { return ve.ValidateName(strconv.Itoa(i)) diff --git a/pkg/modelscapi/get_decisions_stream_response_new_item.go b/pkg/modelscapi/get_decisions_stream_response_new_item.go index a3592d0ab61..77cc06732ce 100644 --- a/pkg/modelscapi/get_decisions_stream_response_new_item.go +++ b/pkg/modelscapi/get_decisions_stream_response_new_item.go @@ -119,6 +119,11 @@ func (m *GetDecisionsStreamResponseNewItem) contextValidateDecisions(ctx context for i := 0; i < len(m.Decisions); i++ { if m.Decisions[i] != nil { + + if swag.IsZero(m.Decisions[i]) { // not required + return nil + } + if err := m.Decisions[i].ContextValidate(ctx, formats); err != nil { if ve, ok := err.(*errors.Validation); ok { return ve.ValidateName("decisions" + "." + strconv.Itoa(i)) diff --git a/pkg/modelscapi/metrics_request.go b/pkg/modelscapi/metrics_request.go index d5b7d058fc1..5d663cf1750 100644 --- a/pkg/modelscapi/metrics_request.go +++ b/pkg/modelscapi/metrics_request.go @@ -126,6 +126,11 @@ func (m *MetricsRequest) contextValidateBouncers(ctx context.Context, formats st for i := 0; i < len(m.Bouncers); i++ { if m.Bouncers[i] != nil { + + if swag.IsZero(m.Bouncers[i]) { // not required + return nil + } + if err := m.Bouncers[i].ContextValidate(ctx, formats); err != nil { if ve, ok := err.(*errors.Validation); ok { return ve.ValidateName("bouncers" + "." + strconv.Itoa(i)) @@ -146,6 +151,11 @@ func (m *MetricsRequest) contextValidateMachines(ctx context.Context, formats st for i := 0; i < len(m.Machines); i++ { if m.Machines[i] != nil { + + if swag.IsZero(m.Machines[i]) { // not required + return nil + } + if err := m.Machines[i].ContextValidate(ctx, formats); err != nil { if ve, ok := err.(*errors.Validation); ok { return ve.ValidateName("machines" + "." + strconv.Itoa(i)) diff --git a/pkg/parser/enrich_date.go b/pkg/parser/enrich_date.go index 748a466d7c3..40c8de39da5 100644 --- a/pkg/parser/enrich_date.go +++ b/pkg/parser/enrich_date.go @@ -18,7 +18,7 @@ func parseDateWithFormat(date, format string) (string, time.Time) { } retstr, err := t.MarshalText() if err != nil { - log.Warningf("Failed marshaling '%v'", t) + log.Warningf("Failed to serialize '%v'", t) return "", time.Time{} } return string(retstr), t @@ -98,7 +98,7 @@ func ParseDate(in string, p *types.Event, plog *log.Entry) (map[string]string, e now := time.Now().UTC() retstr, err := now.MarshalText() if err != nil { - plog.Warning("Failed marshaling current time") + plog.Warning("Failed to serialize current time") return ret, err } ret["MarshaledTime"] = string(retstr) diff --git a/pkg/parser/enrich_unmarshal.go b/pkg/parser/enrich_unmarshal.go index 7ff91b70aea..dbdd9d3f583 100644 --- a/pkg/parser/enrich_unmarshal.go +++ b/pkg/parser/enrich_unmarshal.go @@ -11,7 +11,7 @@ import ( func unmarshalJSON(field string, p *types.Event, plog *log.Entry) (map[string]string, error) { err := json.Unmarshal([]byte(p.Line.Raw), &p.Unmarshaled) if err != nil { - plog.Errorf("could not unmarshal JSON: %s", err) + plog.Errorf("could not parse JSON: %s", err) return nil, err } plog.Tracef("unmarshaled JSON: %+v", p.Unmarshaled) diff --git a/pkg/parser/parsing_test.go b/pkg/parser/parsing_test.go index 0542c69c049..269d51a1ba2 100644 --- a/pkg/parser/parsing_test.go +++ b/pkg/parser/parsing_test.go @@ -132,7 +132,7 @@ func testOneParser(pctx *UnixParserCtx, ectx EnricherCtx, dir string, b *testing } if err = yaml.UnmarshalStrict(out.Bytes(), &parser_configs); err != nil { - return fmt.Errorf("failed unmarshaling %s: %w", parser_cfg_file, err) + return fmt.Errorf("failed to parse %s: %w", parser_cfg_file, err) } pnodes, err = LoadStages(parser_configs, pctx, ectx) diff --git a/pkg/parser/stage.go b/pkg/parser/stage.go index fe538023b61..b98db350254 100644 --- a/pkg/parser/stage.go +++ b/pkg/parser/stage.go @@ -21,7 +21,7 @@ import ( log "github.com/sirupsen/logrus" yaml "gopkg.in/yaml.v2" - "github.com/crowdsecurity/crowdsec/pkg/cwversion" + "github.com/crowdsecurity/crowdsec/pkg/cwversion/constraint" "github.com/crowdsecurity/crowdsec/pkg/exprhelpers" ) @@ -85,12 +85,12 @@ func LoadStages(stageFiles []Stagefile, pctx *UnixParserCtx, ectx EnricherCtx) ( log.Tracef("no version in %s, assuming '1.0'", node.Name) node.FormatVersion = "1.0" } - ok, err := cwversion.Satisfies(node.FormatVersion, cwversion.Constraint_parser) + ok, err := constraint.Satisfies(node.FormatVersion, constraint.Parser) if err != nil { return nil, fmt.Errorf("failed to check version : %s", err) } if !ok { - log.Errorf("%s : %s doesn't satisfy parser format %s, skip", node.Name, node.FormatVersion, cwversion.Constraint_parser) + log.Errorf("%s : %s doesn't satisfy parser format %s, skip", node.Name, node.FormatVersion, constraint.Parser) continue } diff --git a/pkg/parser/unix_parser.go b/pkg/parser/unix_parser.go index 280d122ecc1..f0f26a06645 100644 --- a/pkg/parser/unix_parser.go +++ b/pkg/parser/unix_parser.go @@ -43,7 +43,7 @@ func Init(c map[string]interface{}) (*UnixParserCtx, error) { } r.DataFolder = c["data"].(string) for _, f := range files { - if strings.Contains(f.Name(), ".") { + if strings.Contains(f.Name(), ".") || f.IsDir() { continue } if err := r.Grok.AddFromFile(filepath.Join(c["patterns"].(string), f.Name())); err != nil { @@ -66,21 +66,20 @@ func NewParsers(hub *cwhub.Hub) *Parsers { } for _, itemType := range []string{cwhub.PARSERS, cwhub.POSTOVERFLOWS} { - for _, hubParserItem := range hub.GetItemMap(itemType) { - if hubParserItem.State.Installed { - stagefile := Stagefile{ - Filename: hubParserItem.State.LocalPath, - Stage: hubParserItem.Stage, - } - if itemType == cwhub.PARSERS { - parsers.StageFiles = append(parsers.StageFiles, stagefile) - } - if itemType == cwhub.POSTOVERFLOWS { - parsers.PovfwStageFiles = append(parsers.PovfwStageFiles, stagefile) - } + for _, hubParserItem := range hub.GetInstalledByType(itemType, false) { + stagefile := Stagefile{ + Filename: hubParserItem.State.LocalPath, + Stage: hubParserItem.Stage, + } + if itemType == cwhub.PARSERS { + parsers.StageFiles = append(parsers.StageFiles, stagefile) + } + if itemType == cwhub.POSTOVERFLOWS { + parsers.PovfwStageFiles = append(parsers.PovfwStageFiles, stagefile) } } } + if parsers.StageFiles != nil { sort.Slice(parsers.StageFiles, func(i, j int) bool { return parsers.StageFiles[i].Filename < parsers.StageFiles[j].Filename @@ -101,13 +100,17 @@ func LoadParsers(cConfig *csconfig.Config, parsers *Parsers) (*Parsers, error) { patternsDir := cConfig.ConfigPaths.PatternDir log.Infof("Loading grok library %s", patternsDir) /* load base regexps for two grok parsers */ - parsers.Ctx, err = Init(map[string]interface{}{"patterns": patternsDir, - "data": cConfig.ConfigPaths.DataDir}) + parsers.Ctx, err = Init(map[string]interface{}{ + "patterns": patternsDir, + "data": cConfig.ConfigPaths.DataDir, + }) if err != nil { return parsers, fmt.Errorf("failed to load parser patterns : %v", err) } - parsers.Povfwctx, err = Init(map[string]interface{}{"patterns": patternsDir, - "data": cConfig.ConfigPaths.DataDir}) + parsers.Povfwctx, err = Init(map[string]interface{}{ + "patterns": patternsDir, + "data": cConfig.ConfigPaths.DataDir, + }) if err != nil { return parsers, fmt.Errorf("failed to load postovflw parser patterns : %v", err) } diff --git a/pkg/protobufs/generate.go b/pkg/protobufs/generate.go new file mode 100644 index 00000000000..0e90d65b643 --- /dev/null +++ b/pkg/protobufs/generate.go @@ -0,0 +1,14 @@ +package protobufs + +// Dependencies: +// +// apt install protobuf-compiler +// +// keep this in sync with go.mod +// go install google.golang.org/protobuf/cmd/protoc-gen-go@v1.34.2 +// +// Not the same versions as google.golang.org/grpc +// go list -m -versions google.golang.org/grpc/cmd/protoc-gen-go-grpc +// go install google.golang.org/grpc/cmd/protoc-gen-go-grpc@v1.5.1 + +//go:generate protoc --go_out=. --go_opt=paths=source_relative --go-grpc_out=. --go-grpc_opt=paths=source_relative notifier.proto diff --git a/pkg/protobufs/notifier.pb.go b/pkg/protobufs/notifier.pb.go index b5dc8113568..8c4754da773 100644 --- a/pkg/protobufs/notifier.pb.go +++ b/pkg/protobufs/notifier.pb.go @@ -1,16 +1,12 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.27.1 -// protoc v3.12.4 +// protoc-gen-go v1.34.2 +// protoc v3.21.12 // source: notifier.proto package protobufs import ( - context "context" - grpc "google.golang.org/grpc" - codes "google.golang.org/grpc/codes" - status "google.golang.org/grpc/status" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" reflect "reflect" @@ -198,7 +194,7 @@ func file_notifier_proto_rawDescGZIP() []byte { } var file_notifier_proto_msgTypes = make([]protoimpl.MessageInfo, 3) -var file_notifier_proto_goTypes = []interface{}{ +var file_notifier_proto_goTypes = []any{ (*Notification)(nil), // 0: proto.Notification (*Config)(nil), // 1: proto.Config (*Empty)(nil), // 2: proto.Empty @@ -221,7 +217,7 @@ func file_notifier_proto_init() { return } if !protoimpl.UnsafeEnabled { - file_notifier_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + file_notifier_proto_msgTypes[0].Exporter = func(v any, i int) any { switch v := v.(*Notification); i { case 0: return &v.state @@ -233,7 +229,7 @@ func file_notifier_proto_init() { return nil } } - file_notifier_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + file_notifier_proto_msgTypes[1].Exporter = func(v any, i int) any { switch v := v.(*Config); i { case 0: return &v.state @@ -245,7 +241,7 @@ func file_notifier_proto_init() { return nil } } - file_notifier_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + file_notifier_proto_msgTypes[2].Exporter = func(v any, i int) any { switch v := v.(*Empty); i { case 0: return &v.state @@ -277,119 +273,3 @@ func file_notifier_proto_init() { file_notifier_proto_goTypes = nil file_notifier_proto_depIdxs = nil } - -// Reference imports to suppress errors if they are not otherwise used. -var _ context.Context -var _ grpc.ClientConnInterface - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the grpc package it is being compiled against. -const _ = grpc.SupportPackageIsVersion6 - -// NotifierClient is the client API for Notifier service. -// -// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. -type NotifierClient interface { - Notify(ctx context.Context, in *Notification, opts ...grpc.CallOption) (*Empty, error) - Configure(ctx context.Context, in *Config, opts ...grpc.CallOption) (*Empty, error) -} - -type notifierClient struct { - cc grpc.ClientConnInterface -} - -func NewNotifierClient(cc grpc.ClientConnInterface) NotifierClient { - return ¬ifierClient{cc} -} - -func (c *notifierClient) Notify(ctx context.Context, in *Notification, opts ...grpc.CallOption) (*Empty, error) { - out := new(Empty) - err := c.cc.Invoke(ctx, "/proto.Notifier/Notify", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *notifierClient) Configure(ctx context.Context, in *Config, opts ...grpc.CallOption) (*Empty, error) { - out := new(Empty) - err := c.cc.Invoke(ctx, "/proto.Notifier/Configure", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -// NotifierServer is the server API for Notifier service. -type NotifierServer interface { - Notify(context.Context, *Notification) (*Empty, error) - Configure(context.Context, *Config) (*Empty, error) -} - -// UnimplementedNotifierServer can be embedded to have forward compatible implementations. -type UnimplementedNotifierServer struct { -} - -func (*UnimplementedNotifierServer) Notify(context.Context, *Notification) (*Empty, error) { - return nil, status.Errorf(codes.Unimplemented, "method Notify not implemented") -} -func (*UnimplementedNotifierServer) Configure(context.Context, *Config) (*Empty, error) { - return nil, status.Errorf(codes.Unimplemented, "method Configure not implemented") -} - -func RegisterNotifierServer(s *grpc.Server, srv NotifierServer) { - s.RegisterService(&_Notifier_serviceDesc, srv) -} - -func _Notifier_Notify_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(Notification) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(NotifierServer).Notify(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/proto.Notifier/Notify", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(NotifierServer).Notify(ctx, req.(*Notification)) - } - return interceptor(ctx, in, info, handler) -} - -func _Notifier_Configure_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(Config) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(NotifierServer).Configure(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/proto.Notifier/Configure", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(NotifierServer).Configure(ctx, req.(*Config)) - } - return interceptor(ctx, in, info, handler) -} - -var _Notifier_serviceDesc = grpc.ServiceDesc{ - ServiceName: "proto.Notifier", - HandlerType: (*NotifierServer)(nil), - Methods: []grpc.MethodDesc{ - { - MethodName: "Notify", - Handler: _Notifier_Notify_Handler, - }, - { - MethodName: "Configure", - Handler: _Notifier_Configure_Handler, - }, - }, - Streams: []grpc.StreamDesc{}, - Metadata: "notifier.proto", -} diff --git a/pkg/protobufs/notifier_grpc.pb.go b/pkg/protobufs/notifier_grpc.pb.go new file mode 100644 index 00000000000..5141e83f98b --- /dev/null +++ b/pkg/protobufs/notifier_grpc.pb.go @@ -0,0 +1,159 @@ +// Code generated by protoc-gen-go-grpc. DO NOT EDIT. +// versions: +// - protoc-gen-go-grpc v1.5.1 +// - protoc v3.21.12 +// source: notifier.proto + +package protobufs + +import ( + context "context" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" +) + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +// Requires gRPC-Go v1.64.0 or later. +const _ = grpc.SupportPackageIsVersion9 + +const ( + Notifier_Notify_FullMethodName = "/proto.Notifier/Notify" + Notifier_Configure_FullMethodName = "/proto.Notifier/Configure" +) + +// NotifierClient is the client API for Notifier service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. +type NotifierClient interface { + Notify(ctx context.Context, in *Notification, opts ...grpc.CallOption) (*Empty, error) + Configure(ctx context.Context, in *Config, opts ...grpc.CallOption) (*Empty, error) +} + +type notifierClient struct { + cc grpc.ClientConnInterface +} + +func NewNotifierClient(cc grpc.ClientConnInterface) NotifierClient { + return ¬ifierClient{cc} +} + +func (c *notifierClient) Notify(ctx context.Context, in *Notification, opts ...grpc.CallOption) (*Empty, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(Empty) + err := c.cc.Invoke(ctx, Notifier_Notify_FullMethodName, in, out, cOpts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *notifierClient) Configure(ctx context.Context, in *Config, opts ...grpc.CallOption) (*Empty, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(Empty) + err := c.cc.Invoke(ctx, Notifier_Configure_FullMethodName, in, out, cOpts...) + if err != nil { + return nil, err + } + return out, nil +} + +// NotifierServer is the server API for Notifier service. +// All implementations must embed UnimplementedNotifierServer +// for forward compatibility. +type NotifierServer interface { + Notify(context.Context, *Notification) (*Empty, error) + Configure(context.Context, *Config) (*Empty, error) + mustEmbedUnimplementedNotifierServer() +} + +// UnimplementedNotifierServer must be embedded to have +// forward compatible implementations. +// +// NOTE: this should be embedded by value instead of pointer to avoid a nil +// pointer dereference when methods are called. +type UnimplementedNotifierServer struct{} + +func (UnimplementedNotifierServer) Notify(context.Context, *Notification) (*Empty, error) { + return nil, status.Errorf(codes.Unimplemented, "method Notify not implemented") +} +func (UnimplementedNotifierServer) Configure(context.Context, *Config) (*Empty, error) { + return nil, status.Errorf(codes.Unimplemented, "method Configure not implemented") +} +func (UnimplementedNotifierServer) mustEmbedUnimplementedNotifierServer() {} +func (UnimplementedNotifierServer) testEmbeddedByValue() {} + +// UnsafeNotifierServer may be embedded to opt out of forward compatibility for this service. +// Use of this interface is not recommended, as added methods to NotifierServer will +// result in compilation errors. +type UnsafeNotifierServer interface { + mustEmbedUnimplementedNotifierServer() +} + +func RegisterNotifierServer(s grpc.ServiceRegistrar, srv NotifierServer) { + // If the following call pancis, it indicates UnimplementedNotifierServer was + // embedded by pointer and is nil. This will cause panics if an + // unimplemented method is ever invoked, so we test this at initialization + // time to prevent it from happening at runtime later due to I/O. + if t, ok := srv.(interface{ testEmbeddedByValue() }); ok { + t.testEmbeddedByValue() + } + s.RegisterService(&Notifier_ServiceDesc, srv) +} + +func _Notifier_Notify_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(Notification) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(NotifierServer).Notify(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: Notifier_Notify_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(NotifierServer).Notify(ctx, req.(*Notification)) + } + return interceptor(ctx, in, info, handler) +} + +func _Notifier_Configure_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(Config) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(NotifierServer).Configure(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: Notifier_Configure_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(NotifierServer).Configure(ctx, req.(*Config)) + } + return interceptor(ctx, in, info, handler) +} + +// Notifier_ServiceDesc is the grpc.ServiceDesc for Notifier service. +// It's only intended for direct use with grpc.RegisterService, +// and not to be introspected or modified (even as a copy) +var Notifier_ServiceDesc = grpc.ServiceDesc{ + ServiceName: "proto.Notifier", + HandlerType: (*NotifierServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Notify", + Handler: _Notifier_Notify_Handler, + }, + { + MethodName: "Configure", + Handler: _Notifier_Configure_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "notifier.proto", +} diff --git a/pkg/protobufs/plugin_interface.go b/pkg/protobufs/plugin_interface.go deleted file mode 100644 index fc89b2fa009..00000000000 --- a/pkg/protobufs/plugin_interface.go +++ /dev/null @@ -1,47 +0,0 @@ -package protobufs - -import ( - "context" - - plugin "github.com/hashicorp/go-plugin" - "google.golang.org/grpc" -) - -type Notifier interface { - Notify(ctx context.Context, notification *Notification) (*Empty, error) - Configure(ctx context.Context, config *Config) (*Empty, error) -} - -// This is the implementation of plugin.NotifierPlugin so we can serve/consume this. -type NotifierPlugin struct { - // GRPCPlugin must still implement the Plugin interface - plugin.Plugin - // Concrete implementation, written in Go. This is only used for plugins - // that are written in Go. - Impl Notifier -} - -type GRPCClient struct{ client NotifierClient } - -func (m *GRPCClient) Notify(ctx context.Context, notification *Notification) (*Empty, error) { - _, err := m.client.Notify(context.Background(), notification) - return &Empty{}, err -} - -func (m *GRPCClient) Configure(ctx context.Context, config *Config) (*Empty, error) { - _, err := m.client.Configure(context.Background(), config) - return &Empty{}, err -} - -type GRPCServer struct { - Impl Notifier -} - -func (p *NotifierPlugin) GRPCServer(broker *plugin.GRPCBroker, s *grpc.Server) error { - RegisterNotifierServer(s, p.Impl) - return nil -} - -func (p *NotifierPlugin) GRPCClient(ctx context.Context, broker *plugin.GRPCBroker, c *grpc.ClientConn) (interface{}, error) { - return &GRPCClient{client: NewNotifierClient(c)}, nil -} diff --git a/pkg/setup/detect.go b/pkg/setup/detect.go index 55af951bf89..073b221b10c 100644 --- a/pkg/setup/detect.go +++ b/pkg/setup/detect.go @@ -73,9 +73,9 @@ func validateDataSource(opaqueDS DataSourceItem) error { // source must be known - ds := acquisition.GetDataSourceIface(commonDS.Source) - if ds == nil { - return fmt.Errorf("unknown source '%s'", commonDS.Source) + ds, err := acquisition.GetDataSourceIface(commonDS.Source) + if err != nil { + return err } // unmarshal and validate the rest with the specific implementation @@ -545,7 +545,7 @@ func Detect(detectReader io.Reader, opts DetectOptions) (Setup, error) { // } // err = yaml.Unmarshal(svc.AcquisYAML, svc.DataSource) // if err != nil { - // return Setup{}, fmt.Errorf("while unmarshaling datasource for service %s: %w", name, err) + // return Setup{}, fmt.Errorf("while parsing datasource for service %s: %w", name, err) // } // } diff --git a/pkg/setup/detect_test.go b/pkg/setup/detect_test.go index c744e7d6796..588e74dab54 100644 --- a/pkg/setup/detect_test.go +++ b/pkg/setup/detect_test.go @@ -184,7 +184,6 @@ func TestNormalizeVersion(t *testing.T) { } for _, tc := range tests { - tc := tc t.Run(tc.version, func(t *testing.T) { t.Parallel() actual := setup.NormalizeVersion(tc.version) @@ -871,7 +870,7 @@ func TestDetectDatasourceValidation(t *testing.T) { datasource: source: wombat`, expected: setup.Setup{Setup: []setup.ServiceSetup{}}, - expectedErr: "invalid datasource for foobar: unknown source 'wombat'", + expectedErr: "invalid datasource for foobar: unknown data source wombat", }, { name: "source is misplaced", config: ` diff --git a/pkg/setup/install.go b/pkg/setup/install.go index fc5bd380fd9..d63a1ee1775 100644 --- a/pkg/setup/install.go +++ b/pkg/setup/install.go @@ -40,7 +40,7 @@ func decodeSetup(input []byte, fancyErrors bool) (Setup, error) { dec2.KnownFields(true) if err := dec2.Decode(&ret); err != nil { - return ret, fmt.Errorf("while unmarshaling setup file: %w", err) + return ret, fmt.Errorf("while parsing setup file: %w", err) } return ret, nil diff --git a/pkg/setup/units.go b/pkg/setup/units.go index ab1eec6f33e..861513d3f1d 100644 --- a/pkg/setup/units.go +++ b/pkg/setup/units.go @@ -35,7 +35,7 @@ func systemdUnitList() ([]string, error) { for scanner.Scan() { line := scanner.Text() - if len(line) == 0 { + if line == "" { break // the rest of the output is footer } diff --git a/pkg/types/appsec_event.go b/pkg/types/appsec_event.go index dc81c63b344..11d70ad368d 100644 --- a/pkg/types/appsec_event.go +++ b/pkg/types/appsec_event.go @@ -18,7 +18,9 @@ len(evt.Waf.ByTagRx("*CVE*").ByConfidence("high").ByAction("block")) > 1 */ -type MatchedRules []map[string]interface{} +type MatchedRules []MatchedRule + +type MatchedRule map[string]interface{} type AppsecEvent struct { HasInBandMatches, HasOutBandMatches bool @@ -45,6 +47,10 @@ const ( Kind Field = "kind" ) +func NewMatchedRule() *MatchedRule { + return &MatchedRule{} +} + func (w AppsecEvent) GetVar(varName string) string { if w.Vars == nil { return "" diff --git a/pkg/types/event.go b/pkg/types/event.go index 76a447bdc8c..9300626b927 100644 --- a/pkg/types/event.go +++ b/pkg/types/event.go @@ -2,6 +2,7 @@ package types import ( "net" + "strings" "time" "github.com/expr-lang/expr/vm" @@ -19,11 +20,11 @@ const ( // Event is the structure representing a runtime event (log or overflow) type Event struct { /* is it a log or an overflow */ - Type int `yaml:"Type,omitempty" json:"Type,omitempty"` //Can be types.LOG (0) or types.OVFLOW (1) - ExpectMode int `yaml:"ExpectMode,omitempty" json:"ExpectMode,omitempty"` //how to buckets should handle event : types.TIMEMACHINE or types.LIVE + Type int `yaml:"Type,omitempty" json:"Type,omitempty"` // Can be types.LOG (0) or types.OVFLOW (1) + ExpectMode int `yaml:"ExpectMode,omitempty" json:"ExpectMode,omitempty"` // how to buckets should handle event : types.TIMEMACHINE or types.LIVE Whitelisted bool `yaml:"Whitelisted,omitempty" json:"Whitelisted,omitempty"` WhitelistReason string `yaml:"WhitelistReason,omitempty" json:"whitelist_reason,omitempty"` - //should add whitelist reason ? + // should add whitelist reason ? /* the current stage of the line being parsed */ Stage string `yaml:"Stage,omitempty" json:"Stage,omitempty"` /* original line (produced by acquisition) */ @@ -36,21 +37,39 @@ type Event struct { Unmarshaled map[string]interface{} `yaml:"Unmarshaled,omitempty" json:"Unmarshaled,omitempty"` /* Overflow */ Overflow RuntimeAlert `yaml:"Overflow,omitempty" json:"Alert,omitempty"` - Time time.Time `yaml:"Time,omitempty" json:"Time,omitempty"` //parsed time `json:"-"` `` + Time time.Time `yaml:"Time,omitempty" json:"Time,omitempty"` // parsed time `json:"-"` `` StrTime string `yaml:"StrTime,omitempty" json:"StrTime,omitempty"` StrTimeFormat string `yaml:"StrTimeFormat,omitempty" json:"StrTimeFormat,omitempty"` MarshaledTime string `yaml:"MarshaledTime,omitempty" json:"MarshaledTime,omitempty"` - Process bool `yaml:"Process,omitempty" json:"Process,omitempty"` //can be set to false to avoid processing line + Process bool `yaml:"Process,omitempty" json:"Process,omitempty"` // can be set to false to avoid processing line Appsec AppsecEvent `yaml:"Appsec,omitempty" json:"Appsec,omitempty"` /* Meta is the only part that will make it to the API - it should be normalized */ Meta map[string]string `yaml:"Meta,omitempty" json:"Meta,omitempty"` } +func MakeEvent(timeMachine bool, evtType int, process bool) Event { + evt := Event{ + Parsed: make(map[string]string), + Meta: make(map[string]string), + Unmarshaled: make(map[string]interface{}), + Enriched: make(map[string]string), + ExpectMode: LIVE, + Process: process, + Type: evtType, + } + if timeMachine { + evt.ExpectMode = TIMEMACHINE + } + return evt +} + func (e *Event) SetMeta(key string, value string) bool { if e.Meta == nil { e.Meta = make(map[string]string) } + e.Meta[key] = value + return true } @@ -58,7 +77,9 @@ func (e *Event) SetParsed(key string, value string) bool { if e.Parsed == nil { e.Parsed = make(map[string]string) } + e.Parsed[key] = value + return true } @@ -90,11 +111,13 @@ func (e *Event) GetMeta(key string) string { } } } + return "" } func (e *Event) ParseIPSources() []net.IP { var srcs []net.IP + switch e.Type { case LOG: if _, ok := e.Meta["source_ip"]; ok { @@ -105,6 +128,7 @@ func (e *Event) ParseIPSources() []net.IP { srcs = append(srcs, net.ParseIP(k)) } } + return srcs } @@ -131,8 +155,8 @@ type RuntimeAlert struct { Whitelisted bool `yaml:"Whitelisted,omitempty" json:"Whitelisted,omitempty"` Reprocess bool `yaml:"Reprocess,omitempty" json:"Reprocess,omitempty"` Sources map[string]models.Source `yaml:"Sources,omitempty" json:"Sources,omitempty"` - Alert *models.Alert `yaml:"Alert,omitempty" json:"Alert,omitempty"` //this one is a pointer to APIAlerts[0] for convenience. - //APIAlerts will be populated at the end when there is more than one source + Alert *models.Alert `yaml:"Alert,omitempty" json:"Alert,omitempty"` // this one is a pointer to APIAlerts[0] for convenience. + // APIAlerts will be populated at the end when there is more than one source APIAlerts []models.Alert `yaml:"APIAlerts,omitempty" json:"APIAlerts,omitempty"` } @@ -141,5 +165,21 @@ func (r RuntimeAlert) GetSources() []string { for key := range r.Sources { ret = append(ret, key) } + return ret } + +func NormalizeScope(scope string) string { + switch strings.ToLower(scope) { + case "ip": + return Ip + case "range": + return Range + case "as": + return AS + case "country": + return Country + default: + return scope + } +} diff --git a/rpm/SPECS/crowdsec.spec b/rpm/SPECS/crowdsec.spec index ab71b650d11..ac438ad0c14 100644 --- a/rpm/SPECS/crowdsec.spec +++ b/rpm/SPECS/crowdsec.spec @@ -12,7 +12,7 @@ Patch0: user.patch BuildRoot: %{_tmppath}/%{name}-%{version}-%{release}-root-%(%{__id_u} -n) BuildRequires: systemd -Requires: crontabs +Requires: (crontabs or cron) %{?fc33:BuildRequires: systemd-rpm-macros} %{?fc34:BuildRequires: systemd-rpm-macros} %{?fc35:BuildRequires: systemd-rpm-macros} diff --git a/test/ansible/vagrant/fedora-40/Vagrantfile b/test/ansible/vagrant/fedora-40/Vagrantfile index ec03661fe39..5541d453acf 100644 --- a/test/ansible/vagrant/fedora-40/Vagrantfile +++ b/test/ansible/vagrant/fedora-40/Vagrantfile @@ -1,7 +1,7 @@ # frozen_string_literal: true Vagrant.configure('2') do |config| - config.vm.box = "fedora/39-cloud-base" + config.vm.box = "fedora/40-cloud-base" config.vm.provision "shell", inline: <<-SHELL SHELL end diff --git a/test/ansible/vagrant/fedora-41/Vagrantfile b/test/ansible/vagrant/fedora-41/Vagrantfile new file mode 100644 index 00000000000..3f905f51671 --- /dev/null +++ b/test/ansible/vagrant/fedora-41/Vagrantfile @@ -0,0 +1,13 @@ +# frozen_string_literal: true + +Vagrant.configure('2') do |config| + config.vm.box = "fedora/40-cloud-base" + config.vm.provision "shell", inline: <<-SHELL + SHELL + config.vm.provision "shell" do |s| + s.inline = "sudo dnf upgrade --refresh -y && sudo dnf install dnf-plugin-system-upgrade -y && sudo dnf system-upgrade download --releasever=41 -y && sudo dnf system-upgrade reboot -y" + end +end + +common = '../common' +load common if File.exist?(common) diff --git a/test/ansible/vagrant/fedora-41/skip b/test/ansible/vagrant/fedora-41/skip new file mode 100644 index 00000000000..4f1a9063d2b --- /dev/null +++ b/test/ansible/vagrant/fedora-41/skip @@ -0,0 +1,9 @@ +#!/bin/sh + +die() { + echo "$@" >&2 + exit 1 +} + +[ "${DB_BACKEND}" = "mysql" ] && die "mysql role does not support this distribution" +exit 0 diff --git a/test/ansible/vagrant/opensuse-leap-15/Vagrantfile b/test/ansible/vagrant/opensuse-leap-15/Vagrantfile new file mode 100644 index 00000000000..d10e68a50a7 --- /dev/null +++ b/test/ansible/vagrant/opensuse-leap-15/Vagrantfile @@ -0,0 +1,10 @@ +# frozen_string_literal: true + +Vagrant.configure('2') do |config| + config.vm.box = "opensuse/Leap-15.6.x86_64" + config.vm.provision "shell", inline: <<-SHELL + SHELL +end + +common = '../common' +load common if File.exist?(common) diff --git a/test/ansible/vagrant/opensuse-leap-15/skip b/test/ansible/vagrant/opensuse-leap-15/skip new file mode 100644 index 00000000000..4f1a9063d2b --- /dev/null +++ b/test/ansible/vagrant/opensuse-leap-15/skip @@ -0,0 +1,9 @@ +#!/bin/sh + +die() { + echo "$@" >&2 + exit 1 +} + +[ "${DB_BACKEND}" = "mysql" ] && die "mysql role does not support this distribution" +exit 0 diff --git a/test/bats.mk b/test/bats.mk index 8f507cb659b..72ac8863f72 100644 --- a/test/bats.mk +++ b/test/bats.mk @@ -38,6 +38,7 @@ define ENV := export TEST_DIR="$(TEST_DIR)" export LOCAL_DIR="$(LOCAL_DIR)" export BIN_DIR="$(BIN_DIR)" +# append .min to the binary names to use the minimal profile export CROWDSEC="$(CROWDSEC)" export CSCLI="$(CSCLI)" export CONFIG_YAML="$(CONFIG_DIR)/config.yaml" @@ -66,8 +67,8 @@ bats-check-requirements: ## Check dependencies for functional tests @$(TEST_DIR)/bin/check-requirements bats-update-tools: ## Install/update tools required for functional tests - # yq v4.43.1 - GOBIN=$(TEST_DIR)/tools go install github.com/mikefarah/yq/v4@c35ec752e38ea0c096d3c44e13cfc0797ac394d8 + # yq v4.44.3 + GOBIN=$(TEST_DIR)/tools go install github.com/mikefarah/yq/v4@bbdd97482f2d439126582a59689eb1c855944955 # cfssl v1.6.5 GOBIN=$(TEST_DIR)/tools go install github.com/cloudflare/cfssl/cmd/cfssl@96259aa29c9cc9b2f4e04bad7d4bc152e5405dda GOBIN=$(TEST_DIR)/tools go install github.com/cloudflare/cfssl/cmd/cfssljson@96259aa29c9cc9b2f4e04bad7d4bc152e5405dda @@ -75,6 +76,11 @@ bats-update-tools: ## Install/update tools required for functional tests # Build and installs crowdsec in a local directory. Rebuilds if already exists. bats-build: bats-environment ## Build binaries for functional tests @$(MKDIR) $(BIN_DIR) $(LOG_DIR) $(PID_DIR) $(BATS_PLUGIN_DIR) + # minimal profile + @$(MAKE) build DEBUG=1 TEST_COVERAGE=$(TEST_COVERAGE) DEFAULT_CONFIGDIR=$(CONFIG_DIR) DEFAULT_DATADIR=$(DATA_DIR) BUILD_PROFILE=minimal + @install -m 0755 cmd/crowdsec/crowdsec $(BIN_DIR)/crowdsec.min + @install -m 0755 cmd/crowdsec-cli/cscli $(BIN_DIR)/cscli.min + # default profile @$(MAKE) build DEBUG=1 TEST_COVERAGE=$(TEST_COVERAGE) DEFAULT_CONFIGDIR=$(CONFIG_DIR) DEFAULT_DATADIR=$(DATA_DIR) @install -m 0755 cmd/crowdsec/crowdsec cmd/crowdsec-cli/cscli $(BIN_DIR)/ @install -m 0755 cmd/notification-*/notification-* $(BATS_PLUGIN_DIR)/ diff --git a/test/bats/01_crowdsec.bats b/test/bats/01_crowdsec.bats index 83072b0f159..aa5830a6bae 100644 --- a/test/bats/01_crowdsec.bats +++ b/test/bats/01_crowdsec.bats @@ -199,7 +199,42 @@ teardown() { assert_stderr --partial "crowdsec init: while loading acquisition config: no datasource enabled" } -@test "crowdsec (disabled datasources)" { +@test "crowdsec (datasource not built)" { + config_set '.common.log_media="stdout"' + + # a datasource cannot run - it's not built in the log processor executable + + ACQUIS_DIR=$(config_get '.crowdsec_service.acquisition_dir') + mkdir -p "$ACQUIS_DIR" + cat >"$ACQUIS_DIR"/foo.yaml <<-EOT + source: journalctl + journalctl_filter: + - "_SYSTEMD_UNIT=ssh.service" + labels: + type: syslog + EOT + + #shellcheck disable=SC2016 + rune -1 wait-for \ + --err "crowdsec init: while loading acquisition config: in file $ACQUIS_DIR/foo.yaml (position: 0) - data source journalctl is not built in this version of crowdsec" \ + env PATH='' "$CROWDSEC".min + + # auto-detection of journalctl_filter still works + cat >"$ACQUIS_DIR"/foo.yaml <<-EOT + source: whatever + journalctl_filter: + - "_SYSTEMD_UNIT=ssh.service" + labels: + type: syslog + EOT + + #shellcheck disable=SC2016 + rune -1 wait-for \ + --err "crowdsec init: while loading acquisition config: in file $ACQUIS_DIR/foo.yaml (position: 0) - data source journalctl is not built in this version of crowdsec" \ + env PATH='' "$CROWDSEC".min +} + +@test "crowdsec (disabled datasource)" { if is_package_testing; then # we can't hide journalctl in package testing # because crowdsec is run from systemd diff --git a/test/bats/01_cscli_lapi.bats b/test/bats/01_cscli_lapi.bats index a503dfff8cf..6e876576a6e 100644 --- a/test/bats/01_cscli_lapi.bats +++ b/test/bats/01_cscli_lapi.bats @@ -29,9 +29,9 @@ teardown() { rune -0 ./instance-crowdsec start rune -0 cscli lapi status - assert_stderr --partial "Loaded credentials from" - assert_stderr --partial "Trying to authenticate with username" - assert_stderr --partial "You can successfully interact with Local API (LAPI)" + assert_output --partial "Loaded credentials from" + assert_output --partial "Trying to authenticate with username" + assert_output --partial "You can successfully interact with Local API (LAPI)" } @test "cscli - missing LAPI credentials file" { @@ -76,7 +76,7 @@ teardown() { rune -0 ./instance-crowdsec start rune -0 cscli lapi status - assert_stderr --partial "You can successfully interact with Local API (LAPI)" + assert_output --partial "You can successfully interact with Local API (LAPI)" rm "$LOCAL_API_CREDENTIALS".local @@ -88,7 +88,7 @@ teardown() { config_set "$LOCAL_API_CREDENTIALS" '.password="$PASSWORD"' rune -0 cscli lapi status - assert_stderr --partial "You can successfully interact with Local API (LAPI)" + assert_output --partial "You can successfully interact with Local API (LAPI)" # but if a variable is not defined, there is no specific error message unset URL @@ -115,7 +115,7 @@ teardown() { rune -1 cscli lapi status -o json rune -0 jq -r '.msg' <(stderr) - assert_output 'failed to authenticate to Local API (LAPI): parsing api url: parse "http://127.0.0.1:-80/": invalid port ":-80" after host' + assert_output 'failed to authenticate to Local API (LAPI): parse "http://127.0.0.1:-80/": invalid port ":-80" after host' } @test "cscli - bad LAPI password" { diff --git a/test/bats/03_noagent.bats b/test/bats/03_noagent.bats index 60731b90713..6be5101cee2 100644 --- a/test/bats/03_noagent.bats +++ b/test/bats/03_noagent.bats @@ -76,7 +76,7 @@ teardown() { config_disable_agent ./instance-crowdsec start rune -0 cscli lapi status - assert_stderr --partial "You can successfully interact with Local API (LAPI)" + assert_output --partial "You can successfully interact with Local API (LAPI)" } @test "cscli metrics" { diff --git a/test/bats/04_capi.bats b/test/bats/04_capi.bats index d5154c1a0d7..7ba6bfa4428 100644 --- a/test/bats/04_capi.bats +++ b/test/bats/04_capi.bats @@ -46,19 +46,32 @@ setup() { assert_stderr --regexp "no configuration for Central API \(CAPI\) in '$(echo $CONFIG_YAML|sed s#//#/#g)'" } -@test "cscli capi status" { +@test "cscli {capi,papi} status" { ./instance-data load config_enable_capi + + # should not panic with no credentials, but return an error + rune -1 cscli papi status + assert_stderr --partial "the Central API (CAPI) must be configured with 'cscli capi register'" + rune -0 cscli capi register --schmilblick githubciXXXXXXXXXXXXXXXXXXXXXXXX rune -1 cscli capi status - assert_stderr --partial "no scenarios installed, abort" + assert_stderr --partial "no scenarios or appsec-rules installed, abort" + + rune -1 cscli papi status + assert_stderr --partial "no PAPI URL in configuration" + + rune -0 cscli console enable console_management + rune -1 cscli papi status + assert_stderr --partial "unable to get PAPI permissions" + assert_stderr --partial "Forbidden for plan" rune -0 cscli scenarios install crowdsecurity/ssh-bf rune -0 cscli capi status - assert_stderr --partial "Loaded credentials from" - assert_stderr --partial "Trying to authenticate with username" - assert_stderr --partial " on https://api.crowdsec.net/" - assert_stderr --partial "You can successfully interact with Central API (CAPI)" + assert_output --partial "Loaded credentials from" + assert_output --partial "Trying to authenticate with username" + assert_output --partial " on https://api.crowdsec.net/" + assert_output --partial "You can successfully interact with Central API (CAPI)" } @test "cscli alerts list: receive a community pull when capi is enabled" { @@ -85,7 +98,7 @@ setup() { config_disable_agent ./instance-crowdsec start rune -0 cscli capi status - assert_stderr --partial "You can successfully interact with Central API (CAPI)" + assert_output --partial "You can successfully interact with Central API (CAPI)" } @test "capi register must be run from lapi" { diff --git a/test/bats/04_nocapi.bats b/test/bats/04_nocapi.bats index c02a75810b9..d22a6f0a953 100644 --- a/test/bats/04_nocapi.bats +++ b/test/bats/04_nocapi.bats @@ -66,7 +66,7 @@ teardown() { config_disable_capi ./instance-crowdsec start rune -0 cscli lapi status - assert_stderr --partial "You can successfully interact with Local API (LAPI)" + assert_output --partial "You can successfully interact with Local API (LAPI)" } @test "cscli metrics" { diff --git a/test/bats/07_setup.bats b/test/bats/07_setup.bats index 2106d3ab6b2..f832ac572d2 100644 --- a/test/bats/07_setup.bats +++ b/test/bats/07_setup.bats @@ -819,6 +819,6 @@ update-notifier-motd.timer enabled enabled setup: alsdk al; sdf EOT - assert_output "while unmarshaling setup file: yaml: line 2: could not find expected ':'" + assert_output "while parsing setup file: yaml: line 2: could not find expected ':'" assert_stderr --partial "invalid setup file" } diff --git a/test/bats/09_socket.bats b/test/bats/09_socket.bats index f770abaad2e..f861d8a40dc 100644 --- a/test/bats/09_socket.bats +++ b/test/bats/09_socket.bats @@ -37,22 +37,22 @@ teardown() { ./instance-crowdsec start rune -0 cscli lapi status - assert_stderr --regexp "Trying to authenticate with username .* on $socket" - assert_stderr --partial "You can successfully interact with Local API (LAPI)" + assert_output --regexp "Trying to authenticate with username .* on $socket" + assert_output --partial "You can successfully interact with Local API (LAPI)" } @test "crowdsec - listen on both socket and TCP" { ./instance-crowdsec start rune -0 cscli lapi status - assert_stderr --regexp "Trying to authenticate with username .* on http://127.0.0.1:8080/" - assert_stderr --partial "You can successfully interact with Local API (LAPI)" + assert_output --regexp "Trying to authenticate with username .* on http://127.0.0.1:8080/" + assert_output --partial "You can successfully interact with Local API (LAPI)" config_set "$LOCAL_API_CREDENTIALS" ".url=strenv(socket)" rune -0 cscli lapi status - assert_stderr --regexp "Trying to authenticate with username .* on $socket" - assert_stderr --partial "You can successfully interact with Local API (LAPI)" + assert_output --regexp "Trying to authenticate with username .* on $socket" + assert_output --partial "You can successfully interact with Local API (LAPI)" } @test "cscli - authenticate new machine with socket" { diff --git a/test/bats/10_bouncers.bats b/test/bats/10_bouncers.bats index f99913dcee5..b1c90116dd2 100644 --- a/test/bats/10_bouncers.bats +++ b/test/bats/10_bouncers.bats @@ -63,7 +63,7 @@ teardown() { @test "delete non-existent bouncer" { # this is a fatal error, which is not consistent with "machines delete" rune -1 cscli bouncers delete something - assert_stderr --partial "unable to delete bouncer: 'something' does not exist" + assert_stderr --partial "unable to delete bouncer something: ent: bouncer not found" rune -0 cscli bouncers delete something --ignore-missing refute_stderr } @@ -144,3 +144,56 @@ teardown() { rune -0 cscli bouncers prune assert_output 'No bouncers to prune.' } + +curl_localhost() { + [[ -z "$API_KEY" ]] && { fail "${FUNCNAME[0]}: missing API_KEY"; } + local path=$1 + shift + curl "localhost:8080$path" -sS --fail-with-body -H "X-Api-Key: $API_KEY" "$@" +} + +# We can't use curl-with-key here, as we want to query localhost, not 127.0.0.1 +@test "multiple bouncers sharing api key" { + export API_KEY=bouncerkey + + # crowdsec needs to listen on all interfaces + rune -0 ./instance-crowdsec stop + rune -0 config_set 'del(.api.server.listen_socket) | del(.api.server.listen_uri)' + echo "{'api':{'server':{'listen_uri':0.0.0.0:8080}}}" >"${CONFIG_YAML}.local" + + rune -0 ./instance-crowdsec start + + # add a decision for our bouncers + rune -0 cscli decisions add -i '1.2.3.5' + + rune -0 cscli bouncers add test-auto -k "$API_KEY" + + # query with 127.0.0.1 as source ip + rune -0 curl_localhost "/v1/decisions/stream" -4 + rune -0 jq -r '.new' <(output) + assert_output --partial '1.2.3.5' + + # now with ::1, we should get the same IP, even though we are using the same key + rune -0 curl_localhost "/v1/decisions/stream" -6 + rune -0 jq -r '.new' <(output) + assert_output --partial '1.2.3.5' + + rune -0 cscli bouncers list -o json + rune -0 jq -c '[.[] | [.name,.revoked,.ip_address,.auto_created]]' <(output) + assert_json '[["test-auto",false,"127.0.0.1",false],["test-auto@::1",false,"::1",true]]' + + # check the 2nd bouncer was created automatically + rune -0 cscli bouncers inspect "test-auto@::1" -o json + rune -0 jq -r '.ip_address' <(output) + assert_output --partial '::1' + + # attempt to delete the auto-created bouncer, it should fail + rune -0 cscli bouncers delete 'test-auto@::1' + assert_stderr --partial 'cannot be deleted' + + # delete the "real" bouncer, it should delete both + rune -0 cscli bouncers delete 'test-auto' + + rune -0 cscli bouncers list -o json + assert_json [] +} diff --git a/test/bats/20_hub_items.bats b/test/bats/20_hub_items.bats index 214d07d927f..4b390c90ed4 100644 --- a/test/bats/20_hub_items.bats +++ b/test/bats/20_hub_items.bats @@ -176,7 +176,7 @@ teardown() { rune -0 mkdir -p "$CONFIG_DIR/collections" rune -0 ln -s /this/does/not/exist.yaml "$CONFIG_DIR/collections/foobar.yaml" rune -0 cscli hub list - assert_stderr --partial "link target does not exist: $CONFIG_DIR/collections/foobar.yaml -> /this/does/not/exist.yaml" + assert_stderr --partial "Ignoring file $CONFIG_DIR/collections/foobar.yaml: lstat /this/does/not/exist.yaml: no such file or directory" rune -0 cscli hub list -o json rune -0 jq '.collections' <(output) assert_json '[]' @@ -194,9 +194,89 @@ teardown() { assert_output 'false' } -@test "skip files if we can't guess their type" { - rune -0 mkdir -p "$CONFIG_DIR/scenarios/foo" - rune -0 touch "$CONFIG_DIR/scenarios/foo/bar.yaml" - rune -0 cscli hub list - assert_stderr --partial "Ignoring file $CONFIG_DIR/scenarios/foo/bar.yaml: unknown configuration type" +@test "don't traverse hidden directories (starting with a dot)" { + rune -0 mkdir -p "$CONFIG_DIR/scenarios/.foo" + rune -0 touch "$CONFIG_DIR/scenarios/.foo/bar.yaml" + rune -0 cscli hub list --trace + assert_stderr --partial "skipping hidden directory $CONFIG_DIR/scenarios/.foo" +} + +@test "allow symlink to target inside a hidden directory" { + # k8s config maps use hidden directories and links when mounted + rune -0 mkdir -p "$CONFIG_DIR/scenarios/.foo" + + # ignored + rune -0 touch "$CONFIG_DIR/scenarios/.foo/hidden.yaml" + rune -0 cscli scenarios list -o json + rune -0 jq '.scenarios | length' <(output) + assert_output 0 + + # real file + rune -0 touch "$CONFIG_DIR/scenarios/myfoo.yaml" + rune -0 cscli scenarios list -o json + rune -0 jq '.scenarios | length' <(output) + assert_output 1 + + rune -0 rm "$CONFIG_DIR/scenarios/myfoo.yaml" + rune -0 cscli scenarios list -o json + rune -0 jq '.scenarios | length' <(output) + assert_output 0 + + # link to ignored is not ignored, and the name comes from the link + rune -0 ln -s "$CONFIG_DIR/scenarios/.foo/hidden.yaml" "$CONFIG_DIR/scenarios/myfoo.yaml" + rune -0 cscli scenarios list -o json + rune -0 jq -c '[.scenarios[].name] | sort' <(output) + assert_json '["myfoo.yaml"]' +} + +@test "item files can be links to links" { + rune -0 mkdir -p "$CONFIG_DIR"/scenarios/{.foo,.bar} + + rune -0 ln -s "$CONFIG_DIR/scenarios/.foo/hidden.yaml" "$CONFIG_DIR/scenarios/.bar/hidden.yaml" + + # link to a danling link + rune -0 ln -s "$CONFIG_DIR/scenarios/.bar/hidden.yaml" "$CONFIG_DIR/scenarios/myfoo.yaml" + rune -0 cscli scenarios list + assert_stderr --partial "Ignoring file $CONFIG_DIR/scenarios/myfoo.yaml: lstat $CONFIG_DIR/scenarios/.foo/hidden.yaml: no such file or directory" + rune -0 cscli scenarios list -o json + rune -0 jq '.scenarios | length' <(output) + assert_output 0 + + # detect link loops + rune -0 ln -s "$CONFIG_DIR/scenarios/.bar/hidden.yaml" "$CONFIG_DIR/scenarios/.foo/hidden.yaml" + rune -0 cscli scenarios list + assert_stderr --partial "Ignoring file $CONFIG_DIR/scenarios/myfoo.yaml: too many levels of symbolic links" + + rune -0 rm "$CONFIG_DIR/scenarios/.foo/hidden.yaml" + rune -0 touch "$CONFIG_DIR/scenarios/.foo/hidden.yaml" + rune -0 cscli scenarios list -o json + rune -0 jq '.scenarios | length' <(output) + assert_output 1 +} + +@test "item files can be in a subdirectory" { + rune -0 mkdir -p "$CONFIG_DIR/scenarios/sub/sub2/sub3" + rune -0 touch "$CONFIG_DIR/scenarios/sub/imlocal.yaml" + # subdir name is now part of the item name + rune -0 cscli scenarios inspect sub/imlocal.yaml -o json + rune -0 jq -e '[.tainted,.local==false,true]' <(output) + rune -0 rm "$CONFIG_DIR/scenarios/sub/imlocal.yaml" + + rune -0 ln -s "$HUB_DIR/scenarios/crowdsecurity/smb-bf.yaml" "$CONFIG_DIR/scenarios/sub/smb-bf.yaml" + rune -0 cscli scenarios inspect crowdsecurity/smb-bf -o json + rune -0 jq -e '[.tainted,.local==false,false]' <(output) + rune -0 rm "$CONFIG_DIR/scenarios/sub/smb-bf.yaml" + + rune -0 ln -s "$HUB_DIR/scenarios/crowdsecurity/smb-bf.yaml" "$CONFIG_DIR/scenarios/sub/sub2/sub3/smb-bf.yaml" + rune -0 cscli scenarios inspect crowdsecurity/smb-bf -o json + rune -0 jq -e '[.tainted,.local==false,false]' <(output) +} + +@test "same file name for local items in different subdirectories" { + rune -0 mkdir -p "$CONFIG_DIR"/scenarios/{foo,bar} + rune -0 touch "$CONFIG_DIR/scenarios/foo/local.yaml" + rune -0 touch "$CONFIG_DIR/scenarios/bar/local.yaml" + rune -0 cscli scenarios list -o json + rune -0 jq -c '[.scenarios[].name] | sort' <(output) + assert_json '["bar/local.yaml","foo/local.yaml"]' } diff --git a/test/bats/90_decisions.bats b/test/bats/90_decisions.bats index c7ed214ffc9..8601414db48 100644 --- a/test/bats/90_decisions.bats +++ b/test/bats/90_decisions.bats @@ -78,13 +78,13 @@ teardown() { # invalid defaults rune -1 cscli decisions import --duration "" -i - <<<'value\n5.6.7.8' --format csv - assert_stderr --partial "--duration cannot be empty" + assert_stderr --partial "default duration cannot be empty" rune -1 cscli decisions import --scope "" -i - <<<'value\n5.6.7.8' --format csv - assert_stderr --partial "--scope cannot be empty" + assert_stderr --partial "default scope cannot be empty" rune -1 cscli decisions import --reason "" -i - <<<'value\n5.6.7.8' --format csv - assert_stderr --partial "--reason cannot be empty" + assert_stderr --partial "default reason cannot be empty" rune -1 cscli decisions import --type "" -i - <<<'value\n5.6.7.8' --format csv - assert_stderr --partial "--type cannot be empty" + assert_stderr --partial "default type cannot be empty" #---------- # JSON @@ -108,12 +108,12 @@ teardown() { # invalid json rune -1 cscli decisions import -i - <<<'{"blah":"blah"}' --format json assert_stderr --partial 'Parsing json' - assert_stderr --partial 'json: cannot unmarshal object into Go value of type []main.decisionRaw' + assert_stderr --partial 'json: cannot unmarshal object into Go value of type []clidecision.decisionRaw' # json with extra data rune -1 cscli decisions import -i - <<<'{"values":"1.2.3.4","blah":"blah"}' --format json assert_stderr --partial 'Parsing json' - assert_stderr --partial 'json: cannot unmarshal object into Go value of type []main.decisionRaw' + assert_stderr --partial 'json: cannot unmarshal object into Go value of type []clidecision.decisionRaw' #---------- # CSV diff --git a/test/instance-data b/test/instance-data index e4e76d3980a..e7fd05a9e54 100755 --- a/test/instance-data +++ b/test/instance-data @@ -1,16 +1,26 @@ #!/usr/bin/env bash +set -eu + +die() { + echo >&2 "$@" + exit 1 +} + #shellcheck disable=SC1007 THIS_DIR=$(CDPATH= cd -- "$(dirname -- "$0")" && pwd) cd "$THIS_DIR" || exit 1 # shellcheck disable=SC1091 . ./.environment.sh +if [[ -f "$LOCAL_INIT_DIR/.lock" ]] && [[ "$1" != "unlock" ]]; then + die "init data is locked: are you doing some manual test? if so, please finish what you are doing, run 'instance-data unlock' and retry" +fi + backend_script="./lib/config/config-${CONFIG_BACKEND}" if [[ ! -x "$backend_script" ]]; then - echo "unknown config backend '${CONFIG_BACKEND}'" >&2 - exit 1 + die "unknown config backend '${CONFIG_BACKEND}'" fi exec "$backend_script" "$@" diff --git a/test/lib/init/crowdsec-daemon b/test/lib/init/crowdsec-daemon index a232f344b6a..ba8e98992db 100755 --- a/test/lib/init/crowdsec-daemon +++ b/test/lib/init/crowdsec-daemon @@ -51,7 +51,11 @@ stop() { PGID="$(ps -o pgid= -p "$(cat "${DAEMON_PID}")" | tr -d ' ')" # ps above should work on linux, freebsd, busybox.. if [[ -n "${PGID}" ]]; then - kill -- "-${PGID}" + kill -- "-${PGID}" + + while pgrep -g "${PGID}" >/dev/null; do + sleep .05 + done fi rm -f -- "${DAEMON_PID}" diff --git a/test/run-tests b/test/run-tests index 6fe3bd004e2..957eb663b9c 100755 --- a/test/run-tests +++ b/test/run-tests @@ -10,12 +10,12 @@ die() { # shellcheck disable=SC1007 TEST_DIR=$(CDPATH= cd -- "$(dirname -- "$0")" && pwd) # shellcheck source=./.environment.sh -. "${TEST_DIR}/.environment.sh" +. "$TEST_DIR/.environment.sh" -"${TEST_DIR}/bin/check-requirements" +"$TEST_DIR/bin/check-requirements" echo "Running tests..." -echo "DB_BACKEND: ${DB_BACKEND}" +echo "DB_BACKEND: $DB_BACKEND" if [[ -z "$TEST_COVERAGE" ]]; then echo "Coverage report: no" else @@ -24,23 +24,23 @@ fi [[ -f "$LOCAL_INIT_DIR/.lock" ]] && die "init data is locked: are you doing some manual test? if so, please finish what you are doing, run 'instance-data unlock' and retry" -dump_backend="$(cat "${LOCAL_INIT_DIR}/.backend")" +dump_backend="$(cat "$LOCAL_INIT_DIR/.backend")" if [[ "$DB_BACKEND" != "$dump_backend" ]]; then - die "Can't run with backend '${DB_BACKEND}' because the test data was build with '${dump_backend}'" + die "Can't run with backend '$DB_BACKEND' because the test data was build with '$dump_backend'" fi if [[ $# -ge 1 ]]; then echo "test files: $*" - "${TEST_DIR}/lib/bats-core/bin/bats" \ + "$TEST_DIR/lib/bats-core/bin/bats" \ --jobs 1 \ --timing \ --print-output-on-failure \ "$@" else - echo "test files: ${TEST_DIR}/bats ${TEST_DIR}/dyn-bats" - "${TEST_DIR}/lib/bats-core/bin/bats" \ + echo "test files: $TEST_DIR/bats $TEST_DIR/dyn-bats" + "$TEST_DIR/lib/bats-core/bin/bats" \ --jobs 1 \ --timing \ --print-output-on-failure \ - "${TEST_DIR}/bats" "${TEST_DIR}/dyn-bats" + "$TEST_DIR/bats" "$TEST_DIR/dyn-bats" fi From 2ab93f79a106035a804a3362be0e3a6aa78fb6ec Mon Sep 17 00:00:00 2001 From: blotus Date: Mon, 25 Nov 2024 17:35:21 +0100 Subject: [PATCH 356/581] appsec: missing err check when initializing out-of-band engine (#3344) --- pkg/acquisition/modules/appsec/appsec_runner.go | 8 ++++---- .../modules/appsec/appsec_runner_test.go | 14 ++++++++++++++ pkg/acquisition/modules/appsec/appsec_test.go | 6 ++++++ 3 files changed, 24 insertions(+), 4 deletions(-) diff --git a/pkg/acquisition/modules/appsec/appsec_runner.go b/pkg/acquisition/modules/appsec/appsec_runner.go index 7ce43779591..c6ca4fa5870 100644 --- a/pkg/acquisition/modules/appsec/appsec_runner.go +++ b/pkg/acquisition/modules/appsec/appsec_runner.go @@ -91,6 +91,10 @@ func (r *AppsecRunner) Init(datadir string) error { } r.AppsecOutbandEngine, err = coraza.NewWAF(outbandCfg) + if err != nil { + return fmt.Errorf("unable to initialize outband engine : %w", err) + } + if r.AppsecRuntime.DisabledInBandRulesTags != nil { for _, tag := range r.AppsecRuntime.DisabledInBandRulesTags { r.AppsecInbandEngine.GetRuleGroup().DeleteByTag(tag) @@ -118,10 +122,6 @@ func (r *AppsecRunner) Init(datadir string) error { r.logger.Tracef("Loaded inband rules: %+v", r.AppsecInbandEngine.GetRuleGroup().GetRules()) r.logger.Tracef("Loaded outband rules: %+v", r.AppsecOutbandEngine.GetRuleGroup().GetRules()) - if err != nil { - return fmt.Errorf("unable to initialize outband engine : %w", err) - } - return nil } diff --git a/pkg/acquisition/modules/appsec/appsec_runner_test.go b/pkg/acquisition/modules/appsec/appsec_runner_test.go index 2027cf1d2c0..d07fb153186 100644 --- a/pkg/acquisition/modules/appsec/appsec_runner_test.go +++ b/pkg/acquisition/modules/appsec/appsec_runner_test.go @@ -130,6 +130,20 @@ func TestAppsecRuleLoad(t *testing.T) { require.Len(t, runner.AppsecInbandEngine.GetRuleGroup().GetRules(), 4) }, }, + { + name: "invalid inband rule", + expected_load_ok: false, + inband_native_rules: []string{ + "this_is_not_a_rule", + }, + }, + { + name: "invalid outofband rule", + expected_load_ok: false, + outofband_native_rules: []string{ + "this_is_not_a_rule", + }, + }, } for _, test := range tests { t.Run(test.name, func(t *testing.T) { diff --git a/pkg/acquisition/modules/appsec/appsec_test.go b/pkg/acquisition/modules/appsec/appsec_test.go index 1534f5cb7fa..142b5174940 100644 --- a/pkg/acquisition/modules/appsec/appsec_test.go +++ b/pkg/acquisition/modules/appsec/appsec_test.go @@ -96,8 +96,14 @@ func loadAppSecEngine(test appsecRuleTest, t *testing.T) { } err = runner.Init("/tmp/") if err != nil { + if !test.expected_load_ok { + return + } t.Fatalf("unable to initialize runner : %s", err) } + if !test.expected_load_ok { + t.Fatalf("expected load to fail but it didn't") + } if test.afterload_asserts != nil { //afterload asserts are just to evaluate the state of the runner after the rules have been loaded From bbe77529670664acee12780fa33ff3c0f52bf885 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Wed, 27 Nov 2024 14:14:56 +0100 Subject: [PATCH 357/581] update golangci-lint to 1.62 (#3332) - ensure consistent pointer/value receivers - testify: json assertions with dedicated methods --- .github/workflows/go-tests-windows.yml | 2 +- .github/workflows/go-tests.yml | 2 +- .golangci.yml | 13 ++++++++-- .../clinotifications/notifications.go | 2 +- cmd/crowdsec/main.go | 4 ++-- pkg/apiserver/alerts_test.go | 24 +++++++++---------- pkg/apiserver/decisions_test.go | 16 ++++++------- pkg/apiserver/jwt_test.go | 10 ++++---- pkg/apiserver/machines_test.go | 6 ++--- pkg/appsec/coraza_logger.go | 2 +- pkg/cache/cache_test.go | 13 +++++----- 11 files changed, 52 insertions(+), 42 deletions(-) diff --git a/.github/workflows/go-tests-windows.yml b/.github/workflows/go-tests-windows.yml index 2966b999a4a..3276dbb1bfd 100644 --- a/.github/workflows/go-tests-windows.yml +++ b/.github/workflows/go-tests-windows.yml @@ -61,6 +61,6 @@ jobs: - name: golangci-lint uses: golangci/golangci-lint-action@v6 with: - version: v1.61 + version: v1.62 args: --issues-exit-code=1 --timeout 10m only-new-issues: false diff --git a/.github/workflows/go-tests.yml b/.github/workflows/go-tests.yml index 3f4aa67e139..3638696b4f6 100644 --- a/.github/workflows/go-tests.yml +++ b/.github/workflows/go-tests.yml @@ -190,6 +190,6 @@ jobs: - name: golangci-lint uses: golangci/golangci-lint-action@v6 with: - version: v1.61 + version: v1.62 args: --issues-exit-code=1 --timeout 10m only-new-issues: false diff --git a/.golangci.yml b/.golangci.yml index acde901dbe6..fd595994e7c 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -211,9 +211,7 @@ linters: # # DEPRECATED by golangi-lint # - - execinquery - exportloopref - - gomnd # # Redundant @@ -456,3 +454,14 @@ issues: - revive path: "cmd/crowdsec/win_service.go" text: "deep-exit: .*" + + - linters: + - recvcheck + path: "pkg/csplugin/hclog_adapter.go" + text: 'the methods of "HCLogAdapter" use pointer receiver and non-pointer receiver.' + + # encoding to json/yaml requires value receivers + - linters: + - recvcheck + path: "pkg/cwhub/item.go" + text: 'the methods of "Item" use pointer receiver and non-pointer receiver.' diff --git a/cmd/crowdsec-cli/clinotifications/notifications.go b/cmd/crowdsec-cli/clinotifications/notifications.go index baf899c10cf..80ffebeaa23 100644 --- a/cmd/crowdsec-cli/clinotifications/notifications.go +++ b/cmd/crowdsec-cli/clinotifications/notifications.go @@ -260,7 +260,7 @@ func (cli *cliNotifications) notificationConfigFilter(cmd *cobra.Command, args [ return ret, cobra.ShellCompDirectiveNoFileComp } -func (cli cliNotifications) newTestCmd() *cobra.Command { +func (cli *cliNotifications) newTestCmd() *cobra.Command { var ( pluginBroker csplugin.PluginBroker pluginTomb tomb.Tomb diff --git a/cmd/crowdsec/main.go b/cmd/crowdsec/main.go index 6d8ca24c335..e414f59f3e2 100644 --- a/cmd/crowdsec/main.go +++ b/cmd/crowdsec/main.go @@ -148,14 +148,14 @@ func (l *labelsMap) String() string { return "labels" } -func (l labelsMap) Set(label string) error { +func (l *labelsMap) Set(label string) error { for _, pair := range strings.Split(label, ",") { split := strings.Split(pair, ":") if len(split) != 2 { return fmt.Errorf("invalid format for label '%s', must be key:value", pair) } - l[split[0]] = split[1] + (*l)[split[0]] = split[1] } return nil diff --git a/pkg/apiserver/alerts_test.go b/pkg/apiserver/alerts_test.go index d86234e4813..346619bf691 100644 --- a/pkg/apiserver/alerts_test.go +++ b/pkg/apiserver/alerts_test.go @@ -121,14 +121,14 @@ func TestCreateAlert(t *testing.T) { w := lapi.RecordResponse(t, ctx, http.MethodPost, "/v1/alerts", strings.NewReader("test"), "password") assert.Equal(t, 400, w.Code) - assert.Equal(t, `{"message":"invalid character 'e' in literal true (expecting 'r')"}`, w.Body.String()) + assert.JSONEq(t, `{"message":"invalid character 'e' in literal true (expecting 'r')"}`, w.Body.String()) // Create Alert with invalid input alertContent := GetAlertReaderFromFile(t, "./tests/invalidAlert_sample.json") w = lapi.RecordResponse(t, ctx, http.MethodPost, "/v1/alerts", alertContent, "password") assert.Equal(t, 500, w.Code) - assert.Equal(t, + assert.JSONEq(t, `{"message":"validation failure list:\n0.scenario in body is required\n0.scenario_hash in body is required\n0.scenario_version in body is required\n0.simulated in body is required\n0.source in body is required"}`, w.Body.String()) @@ -176,7 +176,7 @@ func TestAlertListFilters(t *testing.T) { w := lapi.RecordResponse(t, ctx, "GET", "/v1/alerts?test=test", alertContent, "password") assert.Equal(t, 500, w.Code) - assert.Equal(t, `{"message":"Filter parameter 'test' is unknown (=test): invalid filter"}`, w.Body.String()) + assert.JSONEq(t, `{"message":"Filter parameter 'test' is unknown (=test): invalid filter"}`, w.Body.String()) // get without filters @@ -242,7 +242,7 @@ func TestAlertListFilters(t *testing.T) { w = lapi.RecordResponse(t, ctx, "GET", "/v1/alerts?ip=gruueq", emptyBody, "password") assert.Equal(t, 500, w.Code) - assert.Equal(t, `{"message":"unable to convert 'gruueq' to int: invalid address: invalid ip address / range"}`, w.Body.String()) + assert.JSONEq(t, `{"message":"unable to convert 'gruueq' to int: invalid address: invalid ip address / range"}`, w.Body.String()) // test range (ok) @@ -261,7 +261,7 @@ func TestAlertListFilters(t *testing.T) { w = lapi.RecordResponse(t, ctx, "GET", "/v1/alerts?range=ratata", emptyBody, "password") assert.Equal(t, 500, w.Code) - assert.Equal(t, `{"message":"unable to convert 'ratata' to int: invalid address: invalid ip address / range"}`, w.Body.String()) + assert.JSONEq(t, `{"message":"unable to convert 'ratata' to int: invalid address: invalid ip address / range"}`, w.Body.String()) // test since (ok) @@ -332,7 +332,7 @@ func TestAlertListFilters(t *testing.T) { w = lapi.RecordResponse(t, ctx, "GET", "/v1/alerts?has_active_decision=ratatqata", emptyBody, "password") assert.Equal(t, 500, w.Code) - assert.Equal(t, `{"message":"'ratatqata' is not a boolean: strconv.ParseBool: parsing \"ratatqata\": invalid syntax: unable to parse type"}`, w.Body.String()) + assert.JSONEq(t, `{"message":"'ratatqata' is not a boolean: strconv.ParseBool: parsing \"ratatqata\": invalid syntax: unable to parse type"}`, w.Body.String()) } func TestAlertBulkInsert(t *testing.T) { @@ -354,7 +354,7 @@ func TestListAlert(t *testing.T) { w := lapi.RecordResponse(t, ctx, "GET", "/v1/alerts?test=test", emptyBody, "password") assert.Equal(t, 500, w.Code) - assert.Equal(t, `{"message":"Filter parameter 'test' is unknown (=test): invalid filter"}`, w.Body.String()) + assert.JSONEq(t, `{"message":"Filter parameter 'test' is unknown (=test): invalid filter"}`, w.Body.String()) // List Alert @@ -397,7 +397,7 @@ func TestDeleteAlert(t *testing.T) { req.RemoteAddr = "127.0.0.2:4242" lapi.router.ServeHTTP(w, req) assert.Equal(t, 403, w.Code) - assert.Equal(t, `{"message":"access forbidden from this IP (127.0.0.2)"}`, w.Body.String()) + assert.JSONEq(t, `{"message":"access forbidden from this IP (127.0.0.2)"}`, w.Body.String()) // Delete Alert w = httptest.NewRecorder() @@ -406,7 +406,7 @@ func TestDeleteAlert(t *testing.T) { req.RemoteAddr = "127.0.0.1:4242" lapi.router.ServeHTTP(w, req) assert.Equal(t, 200, w.Code) - assert.Equal(t, `{"nbDeleted":"1"}`, w.Body.String()) + assert.JSONEq(t, `{"nbDeleted":"1"}`, w.Body.String()) } func TestDeleteAlertByID(t *testing.T) { @@ -421,7 +421,7 @@ func TestDeleteAlertByID(t *testing.T) { req.RemoteAddr = "127.0.0.2:4242" lapi.router.ServeHTTP(w, req) assert.Equal(t, 403, w.Code) - assert.Equal(t, `{"message":"access forbidden from this IP (127.0.0.2)"}`, w.Body.String()) + assert.JSONEq(t, `{"message":"access forbidden from this IP (127.0.0.2)"}`, w.Body.String()) // Delete Alert w = httptest.NewRecorder() @@ -430,7 +430,7 @@ func TestDeleteAlertByID(t *testing.T) { req.RemoteAddr = "127.0.0.1:4242" lapi.router.ServeHTTP(w, req) assert.Equal(t, 200, w.Code) - assert.Equal(t, `{"nbDeleted":"1"}`, w.Body.String()) + assert.JSONEq(t, `{"nbDeleted":"1"}`, w.Body.String()) } func TestDeleteAlertTrustedIPS(t *testing.T) { @@ -475,7 +475,7 @@ func TestDeleteAlertTrustedIPS(t *testing.T) { router.ServeHTTP(w, req) assert.Equal(t, 200, w.Code) - assert.Equal(t, `{"nbDeleted":"1"}`, w.Body.String()) + assert.JSONEq(t, `{"nbDeleted":"1"}`, w.Body.String()) } lapi.InsertAlertFromFile(t, ctx, "./tests/alert_sample.json") diff --git a/pkg/apiserver/decisions_test.go b/pkg/apiserver/decisions_test.go index a0af6956443..cb5d2e1c4f1 100644 --- a/pkg/apiserver/decisions_test.go +++ b/pkg/apiserver/decisions_test.go @@ -22,19 +22,19 @@ func TestDeleteDecisionRange(t *testing.T) { // delete by ip wrong w := lapi.RecordResponse(t, ctx, "DELETE", "/v1/decisions?range=1.2.3.0/24", emptyBody, PASSWORD) assert.Equal(t, 200, w.Code) - assert.Equal(t, `{"nbDeleted":"0"}`, w.Body.String()) + assert.JSONEq(t, `{"nbDeleted":"0"}`, w.Body.String()) // delete by range w = lapi.RecordResponse(t, ctx, "DELETE", "/v1/decisions?range=91.121.79.0/24&contains=false", emptyBody, PASSWORD) assert.Equal(t, 200, w.Code) - assert.Equal(t, `{"nbDeleted":"2"}`, w.Body.String()) + assert.JSONEq(t, `{"nbDeleted":"2"}`, w.Body.String()) // delete by range : ensure it was already deleted w = lapi.RecordResponse(t, ctx, "DELETE", "/v1/decisions?range=91.121.79.0/24", emptyBody, PASSWORD) assert.Equal(t, 200, w.Code) - assert.Equal(t, `{"nbDeleted":"0"}`, w.Body.String()) + assert.JSONEq(t, `{"nbDeleted":"0"}`, w.Body.String()) } func TestDeleteDecisionFilter(t *testing.T) { @@ -48,19 +48,19 @@ func TestDeleteDecisionFilter(t *testing.T) { w := lapi.RecordResponse(t, ctx, "DELETE", "/v1/decisions?ip=1.2.3.4", emptyBody, PASSWORD) assert.Equal(t, 200, w.Code) - assert.Equal(t, `{"nbDeleted":"0"}`, w.Body.String()) + assert.JSONEq(t, `{"nbDeleted":"0"}`, w.Body.String()) // delete by ip good w = lapi.RecordResponse(t, ctx, "DELETE", "/v1/decisions?ip=91.121.79.179", emptyBody, PASSWORD) assert.Equal(t, 200, w.Code) - assert.Equal(t, `{"nbDeleted":"1"}`, w.Body.String()) + assert.JSONEq(t, `{"nbDeleted":"1"}`, w.Body.String()) // delete by scope/value w = lapi.RecordResponse(t, ctx, "DELETE", "/v1/decisions?scopes=Ip&value=91.121.79.178", emptyBody, PASSWORD) assert.Equal(t, 200, w.Code) - assert.Equal(t, `{"nbDeleted":"1"}`, w.Body.String()) + assert.JSONEq(t, `{"nbDeleted":"1"}`, w.Body.String()) } func TestDeleteDecisionFilterByScenario(t *testing.T) { @@ -74,13 +74,13 @@ func TestDeleteDecisionFilterByScenario(t *testing.T) { w := lapi.RecordResponse(t, ctx, "DELETE", "/v1/decisions?scenario=crowdsecurity/ssh-bff", emptyBody, PASSWORD) assert.Equal(t, 200, w.Code) - assert.Equal(t, `{"nbDeleted":"0"}`, w.Body.String()) + assert.JSONEq(t, `{"nbDeleted":"0"}`, w.Body.String()) // delete by scenario good w = lapi.RecordResponse(t, ctx, "DELETE", "/v1/decisions?scenario=crowdsecurity/ssh-bf", emptyBody, PASSWORD) assert.Equal(t, 200, w.Code) - assert.Equal(t, `{"nbDeleted":"2"}`, w.Body.String()) + assert.JSONEq(t, `{"nbDeleted":"2"}`, w.Body.String()) } func TestGetDecisionFilters(t *testing.T) { diff --git a/pkg/apiserver/jwt_test.go b/pkg/apiserver/jwt_test.go index f6f51763975..72ae0302ae4 100644 --- a/pkg/apiserver/jwt_test.go +++ b/pkg/apiserver/jwt_test.go @@ -23,7 +23,7 @@ func TestLogin(t *testing.T) { router.ServeHTTP(w, req) assert.Equal(t, 401, w.Code) - assert.Equal(t, `{"code":401,"message":"machine test not validated"}`, w.Body.String()) + assert.JSONEq(t, `{"code":401,"message":"machine test not validated"}`, w.Body.String()) // Login with machine not exist w = httptest.NewRecorder() @@ -32,7 +32,7 @@ func TestLogin(t *testing.T) { router.ServeHTTP(w, req) assert.Equal(t, 401, w.Code) - assert.Equal(t, `{"code":401,"message":"ent: machine not found"}`, w.Body.String()) + assert.JSONEq(t, `{"code":401,"message":"ent: machine not found"}`, w.Body.String()) // Login with invalid body w = httptest.NewRecorder() @@ -41,7 +41,7 @@ func TestLogin(t *testing.T) { router.ServeHTTP(w, req) assert.Equal(t, 401, w.Code) - assert.Equal(t, `{"code":401,"message":"missing: invalid character 'e' in literal true (expecting 'r')"}`, w.Body.String()) + assert.JSONEq(t, `{"code":401,"message":"missing: invalid character 'e' in literal true (expecting 'r')"}`, w.Body.String()) // Login with invalid format w = httptest.NewRecorder() @@ -50,7 +50,7 @@ func TestLogin(t *testing.T) { router.ServeHTTP(w, req) assert.Equal(t, 401, w.Code) - assert.Equal(t, `{"code":401,"message":"validation failure list:\npassword in body is required"}`, w.Body.String()) + assert.JSONEq(t, `{"code":401,"message":"validation failure list:\npassword in body is required"}`, w.Body.String()) // Validate machine ValidateMachine(t, ctx, "test", config.API.Server.DbConfig) @@ -62,7 +62,7 @@ func TestLogin(t *testing.T) { router.ServeHTTP(w, req) assert.Equal(t, 401, w.Code) - assert.Equal(t, `{"code":401,"message":"incorrect Username or Password"}`, w.Body.String()) + assert.JSONEq(t, `{"code":401,"message":"incorrect Username or Password"}`, w.Body.String()) // Login with valid machine w = httptest.NewRecorder() diff --git a/pkg/apiserver/machines_test.go b/pkg/apiserver/machines_test.go index 969f75707d6..57b96f54ddd 100644 --- a/pkg/apiserver/machines_test.go +++ b/pkg/apiserver/machines_test.go @@ -25,7 +25,7 @@ func TestCreateMachine(t *testing.T) { router.ServeHTTP(w, req) assert.Equal(t, http.StatusBadRequest, w.Code) - assert.Equal(t, `{"message":"invalid character 'e' in literal true (expecting 'r')"}`, w.Body.String()) + assert.JSONEq(t, `{"message":"invalid character 'e' in literal true (expecting 'r')"}`, w.Body.String()) // Create machine with invalid input w = httptest.NewRecorder() @@ -34,7 +34,7 @@ func TestCreateMachine(t *testing.T) { router.ServeHTTP(w, req) assert.Equal(t, http.StatusUnprocessableEntity, w.Code) - assert.Equal(t, `{"message":"validation failure list:\nmachine_id in body is required\npassword in body is required"}`, w.Body.String()) + assert.JSONEq(t, `{"message":"validation failure list:\nmachine_id in body is required\npassword in body is required"}`, w.Body.String()) // Create machine b, err := json.Marshal(MachineTest) @@ -144,7 +144,7 @@ func TestCreateMachineAlreadyExist(t *testing.T) { router.ServeHTTP(w, req) assert.Equal(t, http.StatusForbidden, w.Code) - assert.Equal(t, `{"message":"user 'test': user already exist"}`, w.Body.String()) + assert.JSONEq(t, `{"message":"user 'test': user already exist"}`, w.Body.String()) } func TestAutoRegistration(t *testing.T) { diff --git a/pkg/appsec/coraza_logger.go b/pkg/appsec/coraza_logger.go index d2c1612cbd7..93e31be5876 100644 --- a/pkg/appsec/coraza_logger.go +++ b/pkg/appsec/coraza_logger.go @@ -124,7 +124,7 @@ func (e *crzLogEvent) Stringer(key string, val fmt.Stringer) dbg.Event { return e } -func (e crzLogEvent) IsEnabled() bool { +func (e *crzLogEvent) IsEnabled() bool { return !e.muted } diff --git a/pkg/cache/cache_test.go b/pkg/cache/cache_test.go index a4e0bd0127a..4da9fd5bf7b 100644 --- a/pkg/cache/cache_test.go +++ b/pkg/cache/cache_test.go @@ -5,26 +5,27 @@ import ( "time" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) func TestCreateSetGet(t *testing.T) { err := CacheInit(CacheCfg{Name: "test", Size: 100, TTL: 1 * time.Second}) - assert.Empty(t, err) + require.NoError(t, err) //set & get err = SetKey("test", "testkey0", "testvalue1", nil) - assert.Empty(t, err) + require.NoError(t, err) ret, err := GetKey("test", "testkey0") assert.Equal(t, "testvalue1", ret) - assert.Empty(t, err) + require.NoError(t, err) //re-set err = SetKey("test", "testkey0", "testvalue2", nil) - assert.Empty(t, err) + require.NoError(t, err) assert.Equal(t, "testvalue1", ret) - assert.Empty(t, err) + require.NoError(t, err) //expire time.Sleep(1500 * time.Millisecond) ret, err = GetKey("test", "testkey0") assert.Equal(t, "", ret) - assert.Empty(t, err) + require.NoError(t, err) } From 7a1ad8376a6bc36e0007d5c7c75630ac8c81508c Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Thu, 5 Dec 2024 10:40:48 +0100 Subject: [PATCH 358/581] lint: style, autofix (#3354) --- cmd/crowdsec-cli/dashboard.go | 4 +- cmd/crowdsec-cli/setup.go | 1 + cmd/crowdsec/appsec.go | 2 +- cmd/notification-email/main.go | 2 +- .../configuration/configuration.go | 10 ++-- .../modules/appsec/appsec_hooks_test.go | 4 +- .../modules/appsec/appsec_runner.go | 2 - pkg/acquisition/modules/appsec/appsec_test.go | 6 +- .../modules/appsec/bodyprocessors/raw.go | 7 +-- pkg/acquisition/modules/http/http.go | 4 +- pkg/acquisition/modules/http/http_test.go | 3 +- .../syslog/internal/parser/rfc3164/parse.go | 1 - .../syslog/internal/parser/rfc5424/parse.go | 3 - .../internal/parser/rfc5424/parse_test.go | 58 +++++++++++++----- .../syslog/internal/server/syslogserver.go | 1 - pkg/alertcontext/alertcontext_test.go | 1 - pkg/apiclient/auth_jwt.go | 3 - pkg/apiserver/apic.go | 1 - pkg/apiserver/middlewares/v1/api_key.go | 1 - pkg/appsec/appsec.go | 1 - pkg/appsec/appsec_rule/appsec_rule.go | 1 - pkg/appsec/appsec_rule/modsec_rule_test.go | 2 - pkg/appsec/request_test.go | 3 - pkg/csplugin/broker.go | 4 +- pkg/cticlient/types.go | 2 - pkg/exprhelpers/crowdsec_cti.go | 20 ++++--- pkg/exprhelpers/geoip.go | 3 - pkg/fflag/crowdsec.go | 14 +++-- pkg/leakybucket/blackhole.go | 2 - pkg/leakybucket/bucket.go | 1 - pkg/leakybucket/buckets.go | 1 - pkg/leakybucket/conditional.go | 6 +- pkg/leakybucket/manager_load_test.go | 59 ++++++++----------- pkg/leakybucket/manager_run.go | 13 ++-- pkg/leakybucket/processor.go | 3 +- pkg/leakybucket/reset_filter.go | 10 ++-- pkg/leakybucket/uniq.go | 6 +- pkg/parser/enrich.go | 6 +- pkg/parser/enrich_geoip.go | 3 - pkg/parser/parsing_test.go | 4 +- pkg/parser/runtime.go | 14 +++-- pkg/types/appsec_event.go | 1 - pkg/types/constants.go | 34 ++++++----- pkg/types/event_test.go | 2 - pkg/types/getfstype.go | 1 - pkg/types/ip.go | 3 +- pkg/types/ip_test.go | 5 +- pkg/types/utils.go | 8 ++- 48 files changed, 177 insertions(+), 169 deletions(-) diff --git a/cmd/crowdsec-cli/dashboard.go b/cmd/crowdsec-cli/dashboard.go index 53a7dff85a0..7ddac093dcd 100644 --- a/cmd/crowdsec-cli/dashboard.go +++ b/cmd/crowdsec-cli/dashboard.go @@ -243,7 +243,8 @@ func (cli *cliDashboard) newStopCmd() *cobra.Command { } func (cli *cliDashboard) newShowPasswordCmd() *cobra.Command { - cmd := &cobra.Command{Use: "show-password", + cmd := &cobra.Command{ + Use: "show-password", Short: "displays password of metabase.", Args: cobra.NoArgs, DisableAutoGenTag: true, @@ -457,7 +458,6 @@ func checkGroups(forceYes *bool) (*user.Group, error) { func (cli *cliDashboard) chownDatabase(gid string) error { cfg := cli.cfg() intID, err := strconv.Atoi(gid) - if err != nil { return fmt.Errorf("unable to convert group ID to int: %s", err) } diff --git a/cmd/crowdsec-cli/setup.go b/cmd/crowdsec-cli/setup.go index 66c0d71e777..3581d69f052 100644 --- a/cmd/crowdsec-cli/setup.go +++ b/cmd/crowdsec-cli/setup.go @@ -1,4 +1,5 @@ //go:build !no_cscli_setup + package main import ( diff --git a/cmd/crowdsec/appsec.go b/cmd/crowdsec/appsec.go index cb02b137dcd..4320133b063 100644 --- a/cmd/crowdsec/appsec.go +++ b/cmd/crowdsec/appsec.go @@ -1,4 +1,4 @@ -// +build !no_datasource_appsec +//go:build !no_datasource_appsec package main diff --git a/cmd/notification-email/main.go b/cmd/notification-email/main.go index 5fc02cdd1d7..b61644611b4 100644 --- a/cmd/notification-email/main.go +++ b/cmd/notification-email/main.go @@ -68,7 +68,7 @@ func (n *EmailPlugin) Configure(ctx context.Context, config *protobufs.Config) ( EncryptionType: "ssltls", AuthType: "login", SenderEmail: "crowdsec@crowdsec.local", - HeloHost: "localhost", + HeloHost: "localhost", } if err := yaml.Unmarshal(config.Config, &d); err != nil { diff --git a/pkg/acquisition/configuration/configuration.go b/pkg/acquisition/configuration/configuration.go index 3e27da1b9e6..a9d570d2788 100644 --- a/pkg/acquisition/configuration/configuration.go +++ b/pkg/acquisition/configuration/configuration.go @@ -13,12 +13,14 @@ type DataSourceCommonCfg struct { UseTimeMachine bool `yaml:"use_time_machine,omitempty"` UniqueId string `yaml:"unique_id,omitempty"` TransformExpr string `yaml:"transform,omitempty"` - Config map[string]interface{} `yaml:",inline"` //to keep the datasource-specific configuration directives + Config map[string]interface{} `yaml:",inline"` // to keep the datasource-specific configuration directives } -var TAIL_MODE = "tail" -var CAT_MODE = "cat" -var SERVER_MODE = "server" // No difference with tail, just a bit more verbose +var ( + TAIL_MODE = "tail" + CAT_MODE = "cat" + SERVER_MODE = "server" // No difference with tail, just a bit more verbose +) const ( METRICS_NONE = iota diff --git a/pkg/acquisition/modules/appsec/appsec_hooks_test.go b/pkg/acquisition/modules/appsec/appsec_hooks_test.go index c549d2ef1d1..d87384a0189 100644 --- a/pkg/acquisition/modules/appsec/appsec_hooks_test.go +++ b/pkg/acquisition/modules/appsec/appsec_hooks_test.go @@ -341,7 +341,6 @@ func TestAppsecOnMatchHooks(t *testing.T) { } func TestAppsecPreEvalHooks(t *testing.T) { - tests := []appsecRuleTest{ { name: "Basic pre_eval hook to disable inband rule", @@ -403,7 +402,6 @@ func TestAppsecPreEvalHooks(t *testing.T) { require.Len(t, responses, 1) require.True(t, responses[0].InBandInterrupt) - }, }, { @@ -670,7 +668,6 @@ func TestAppsecPreEvalHooks(t *testing.T) { } func TestAppsecRemediationConfigHooks(t *testing.T) { - tests := []appsecRuleTest{ { name: "Basic matching rule", @@ -759,6 +756,7 @@ func TestAppsecRemediationConfigHooks(t *testing.T) { }) } } + func TestOnMatchRemediationHooks(t *testing.T) { tests := []appsecRuleTest{ { diff --git a/pkg/acquisition/modules/appsec/appsec_runner.go b/pkg/acquisition/modules/appsec/appsec_runner.go index c6ca4fa5870..d4535d3f9a2 100644 --- a/pkg/acquisition/modules/appsec/appsec_runner.go +++ b/pkg/acquisition/modules/appsec/appsec_runner.go @@ -90,7 +90,6 @@ func (r *AppsecRunner) Init(datadir string) error { outbandCfg = outbandCfg.WithRequestBodyInMemoryLimit(*r.AppsecRuntime.Config.OutOfBandOptions.RequestBodyInMemoryLimit) } r.AppsecOutbandEngine, err = coraza.NewWAF(outbandCfg) - if err != nil { return fmt.Errorf("unable to initialize outband engine : %w", err) } @@ -379,7 +378,6 @@ func (r *AppsecRunner) handleRequest(request *appsec.ParsedRequest) { // time spent to process inband AND out of band rules globalParsingElapsed := time.Since(startGlobalParsing) AppsecGlobalParsingHistogram.With(prometheus.Labels{"source": request.RemoteAddrNormalized, "appsec_engine": request.AppsecEngine}).Observe(globalParsingElapsed.Seconds()) - } func (r *AppsecRunner) Run(t *tomb.Tomb) error { diff --git a/pkg/acquisition/modules/appsec/appsec_test.go b/pkg/acquisition/modules/appsec/appsec_test.go index 142b5174940..c0af1002f49 100644 --- a/pkg/acquisition/modules/appsec/appsec_test.go +++ b/pkg/acquisition/modules/appsec/appsec_test.go @@ -66,7 +66,8 @@ func loadAppSecEngine(test appsecRuleTest, t *testing.T) { outofbandRules = append(outofbandRules, strRule) } - appsecCfg := appsec.AppsecConfig{Logger: logger, + appsecCfg := appsec.AppsecConfig{ + Logger: logger, OnLoad: test.on_load, PreEval: test.pre_eval, PostEval: test.post_eval, @@ -75,7 +76,8 @@ func loadAppSecEngine(test appsecRuleTest, t *testing.T) { UserBlockedHTTPCode: test.UserBlockedHTTPCode, UserPassedHTTPCode: test.UserPassedHTTPCode, DefaultRemediation: test.DefaultRemediation, - DefaultPassAction: test.DefaultPassAction} + DefaultPassAction: test.DefaultPassAction, + } AppsecRuntime, err := appsecCfg.Build() if err != nil { t.Fatalf("unable to build appsec runtime : %s", err) diff --git a/pkg/acquisition/modules/appsec/bodyprocessors/raw.go b/pkg/acquisition/modules/appsec/bodyprocessors/raw.go index e2e23eb57ae..aa467ecf048 100644 --- a/pkg/acquisition/modules/appsec/bodyprocessors/raw.go +++ b/pkg/acquisition/modules/appsec/bodyprocessors/raw.go @@ -9,8 +9,7 @@ import ( "github.com/crowdsecurity/coraza/v3/experimental/plugins/plugintypes" ) -type rawBodyProcessor struct { -} +type rawBodyProcessor struct{} type setterInterface interface { Set(string) @@ -33,9 +32,7 @@ func (*rawBodyProcessor) ProcessResponse(reader io.Reader, v plugintypes.Transac return nil } -var ( - _ plugintypes.BodyProcessor = &rawBodyProcessor{} -) +var _ plugintypes.BodyProcessor = &rawBodyProcessor{} //nolint:gochecknoinits //Coraza recommends to use init() for registering plugins func init() { diff --git a/pkg/acquisition/modules/http/http.go b/pkg/acquisition/modules/http/http.go index 98af134c84e..6bb8228f32c 100644 --- a/pkg/acquisition/modules/http/http.go +++ b/pkg/acquisition/modules/http/http.go @@ -26,9 +26,7 @@ import ( "github.com/crowdsecurity/crowdsec/pkg/types" ) -var ( - dataSourceName = "http" -) +var dataSourceName = "http" var linesRead = prometheus.NewCounterVec( prometheus.CounterOpts{ diff --git a/pkg/acquisition/modules/http/http_test.go b/pkg/acquisition/modules/http/http_test.go index f89ba7aa8ba..4d99134419f 100644 --- a/pkg/acquisition/modules/http/http_test.go +++ b/pkg/acquisition/modules/http/http_test.go @@ -18,9 +18,9 @@ import ( "github.com/crowdsecurity/go-cs-lib/cstest" "github.com/prometheus/client_golang/prometheus" log "github.com/sirupsen/logrus" - "gopkg.in/tomb.v2" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "gopkg.in/tomb.v2" ) const ( @@ -257,7 +257,6 @@ basic_auth: h.Server.Close() tomb.Kill(nil) tomb.Wait() - } func TestStreamingAcquisitionUnknownPath(t *testing.T) { diff --git a/pkg/acquisition/modules/syslog/internal/parser/rfc3164/parse.go b/pkg/acquisition/modules/syslog/internal/parser/rfc3164/parse.go index 66d842ed519..04c7053ef27 100644 --- a/pkg/acquisition/modules/syslog/internal/parser/rfc3164/parse.go +++ b/pkg/acquisition/modules/syslog/internal/parser/rfc3164/parse.go @@ -48,7 +48,6 @@ func WithStrictHostname() RFC3164Option { } func (r *RFC3164) parsePRI() error { - pri := 0 if r.buf[r.position] != '<' { diff --git a/pkg/acquisition/modules/syslog/internal/parser/rfc5424/parse.go b/pkg/acquisition/modules/syslog/internal/parser/rfc5424/parse.go index 639e91e1224..c9aa89f7256 100644 --- a/pkg/acquisition/modules/syslog/internal/parser/rfc5424/parse.go +++ b/pkg/acquisition/modules/syslog/internal/parser/rfc5424/parse.go @@ -48,7 +48,6 @@ func WithStrictHostname() RFC5424Option { } func (r *RFC5424) parsePRI() error { - pri := 0 if r.buf[r.position] != '<' { @@ -94,7 +93,6 @@ func (r *RFC5424) parseVersion() error { } func (r *RFC5424) parseTimestamp() error { - timestamp := []byte{} if r.buf[r.position] == NIL_VALUE { @@ -121,7 +119,6 @@ func (r *RFC5424) parseTimestamp() error { } date, err := time.Parse(VALID_TIMESTAMP, string(timestamp)) - if err != nil { return errors.New("timestamp is not valid") } diff --git a/pkg/acquisition/modules/syslog/internal/parser/rfc5424/parse_test.go b/pkg/acquisition/modules/syslog/internal/parser/rfc5424/parse_test.go index 0938e947fe7..d3a68c196db 100644 --- a/pkg/acquisition/modules/syslog/internal/parser/rfc5424/parse_test.go +++ b/pkg/acquisition/modules/syslog/internal/parser/rfc5424/parse_test.go @@ -94,7 +94,8 @@ func TestParse(t *testing.T) { }{ { "valid msg", - `<13>1 2021-05-18T11:58:40.828081+02:42 mantis sshd 49340 - [timeQuality isSynced="0" tzKnown="1"] blabla`, expected{ + `<13>1 2021-05-18T11:58:40.828081+02:42 mantis sshd 49340 - [timeQuality isSynced="0" tzKnown="1"] blabla`, + expected{ Timestamp: time.Date(2021, 5, 18, 11, 58, 40, 828081000, time.FixedZone("+0242", 9720)), Hostname: "mantis", Tag: "sshd", @@ -102,11 +103,14 @@ func TestParse(t *testing.T) { MsgID: "", Message: "blabla", PRI: 13, - }, "", []RFC5424Option{}, + }, + "", + []RFC5424Option{}, }, { "valid msg with msgid", - `<13>1 2021-05-18T11:58:40.828081+02:42 mantis foobar 49340 123123 [timeQuality isSynced="0" tzKnown="1"] blabla`, expected{ + `<13>1 2021-05-18T11:58:40.828081+02:42 mantis foobar 49340 123123 [timeQuality isSynced="0" tzKnown="1"] blabla`, + expected{ Timestamp: time.Date(2021, 5, 18, 11, 58, 40, 828081000, time.FixedZone("+0242", 9720)), Hostname: "mantis", Tag: "foobar", @@ -114,11 +118,14 @@ func TestParse(t *testing.T) { MsgID: "123123", Message: "blabla", PRI: 13, - }, "", []RFC5424Option{}, + }, + "", + []RFC5424Option{}, }, { "valid msg with repeating SD", - `<13>1 2021-05-18T11:58:40.828081+02:42 mantis foobar 49340 123123 [timeQuality isSynced="0" tzKnown="1"][foo="bar][a] blabla`, expected{ + `<13>1 2021-05-18T11:58:40.828081+02:42 mantis foobar 49340 123123 [timeQuality isSynced="0" tzKnown="1"][foo="bar][a] blabla`, + expected{ Timestamp: time.Date(2021, 5, 18, 11, 58, 40, 828081000, time.FixedZone("+0242", 9720)), Hostname: "mantis", Tag: "foobar", @@ -126,36 +133,53 @@ func TestParse(t *testing.T) { MsgID: "123123", Message: "blabla", PRI: 13, - }, "", []RFC5424Option{}, + }, + "", + []RFC5424Option{}, }, { "invalid SD", - `<13>1 2021-05-18T11:58:40.828081+02:00 mantis foobar 49340 123123 [timeQuality asd`, expected{}, "structured data must end with ']'", []RFC5424Option{}, + `<13>1 2021-05-18T11:58:40.828081+02:00 mantis foobar 49340 123123 [timeQuality asd`, + expected{}, + "structured data must end with ']'", + []RFC5424Option{}, }, { "invalid version", - `<13>42 2021-05-18T11:58:40.828081+02:00 mantis foobar 49340 123123 [timeQuality isSynced="0" tzKnown="1"] blabla`, expected{}, "version must be 1", []RFC5424Option{}, + `<13>42 2021-05-18T11:58:40.828081+02:00 mantis foobar 49340 123123 [timeQuality isSynced="0" tzKnown="1"] blabla`, + expected{}, + "version must be 1", + []RFC5424Option{}, }, { "invalid message", - `<13>1`, expected{}, "version must be followed by a space", []RFC5424Option{}, + `<13>1`, + expected{}, + "version must be followed by a space", + []RFC5424Option{}, }, { "valid msg with empty fields", - `<13>1 - foo - - - - blabla`, expected{ + `<13>1 - foo - - - - blabla`, + expected{ Timestamp: time.Now().UTC(), Hostname: "foo", PRI: 13, Message: "blabla", - }, "", []RFC5424Option{}, + }, + "", + []RFC5424Option{}, }, { "valid msg with empty fields", - `<13>1 - - - - - - blabla`, expected{ + `<13>1 - - - - - - blabla`, + expected{ Timestamp: time.Now().UTC(), PRI: 13, Message: "blabla", - }, "", []RFC5424Option{}, + }, + "", + []RFC5424Option{}, }, { "valid msg with escaped SD", @@ -167,7 +191,9 @@ func TestParse(t *testing.T) { Hostname: "testhostname", MsgID: `sn="msgid"`, Message: `testmessage`, - }, "", []RFC5424Option{}, + }, + "", + []RFC5424Option{}, }, { "valid complex msg", @@ -179,7 +205,9 @@ func TestParse(t *testing.T) { PRI: 13, MsgID: `sn="msgid"`, Message: `source: sn="www.foobar.com" | message: 1.1.1.1 - - [24/May/2022:10:57:37 +0200] "GET /dist/precache-manifest.58b57debe6bc4f96698da0dc314461e9.js HTTP/2.0" 304 0 "https://www.foobar.com/sw.js" "Mozilla/5.0 (Linux; Android 9; ANE-LX1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/101.0.4951.61 Mobile Safari/537.36" "-" "www.foobar.com" sn="www.foobar.com" rt=0.000 ua="-" us="-" ut="-" ul="-" cs=HIT { request: /dist/precache-manifest.58b57debe6bc4f96698da0dc314461e9.js | src_ip_geo_country: DE | MONTH: May | COMMONAPACHELOG: 1.1.1.1 - - [24/May/2022:10:57:37 +0200] "GET /dist/precache-manifest.58b57debe6bc4f96698da0dc314461e9.js HTTP/2.0" 304 0 | auth: - | HOUR: 10 | gl2_remote_ip: 172.31.32.142 | ident: - | gl2_remote_port: 43375 | BASE10NUM: [2.0, 304, 0] | pid: -1 | program: nginx | gl2_source_input: 623ed3440183476d61cff974 | INT: +0200 | is_private_ip: false | YEAR: 2022 | src_ip_geo_city: Achern | clientip: 1.1.1.1 | USERNAME:`, - }, "", []RFC5424Option{}, + }, + "", + []RFC5424Option{}, }, { "partial message", diff --git a/pkg/acquisition/modules/syslog/internal/server/syslogserver.go b/pkg/acquisition/modules/syslog/internal/server/syslogserver.go index 7118c295b54..83f5e5a57e5 100644 --- a/pkg/acquisition/modules/syslog/internal/server/syslogserver.go +++ b/pkg/acquisition/modules/syslog/internal/server/syslogserver.go @@ -25,7 +25,6 @@ type SyslogMessage struct { } func (s *SyslogServer) Listen(listenAddr string, port int) error { - s.listenAddr = listenAddr s.port = port udpAddr, err := net.ResolveUDPAddr("udp", fmt.Sprintf("%s:%d", s.listenAddr, s.port)) diff --git a/pkg/alertcontext/alertcontext_test.go b/pkg/alertcontext/alertcontext_test.go index dc752ba8b09..284ff451bc2 100644 --- a/pkg/alertcontext/alertcontext_test.go +++ b/pkg/alertcontext/alertcontext_test.go @@ -239,7 +239,6 @@ func TestValidateContextExpr(t *testing.T) { } func TestAppsecEventToContext(t *testing.T) { - tests := []struct { name string contextToSend map[string][]string diff --git a/pkg/apiclient/auth_jwt.go b/pkg/apiclient/auth_jwt.go index 193486ff065..c43e9fc291c 100644 --- a/pkg/apiclient/auth_jwt.go +++ b/pkg/apiclient/auth_jwt.go @@ -62,7 +62,6 @@ func (t *JWTTransport) refreshJwtToken() error { enc := json.NewEncoder(buf) enc.SetEscapeHTML(false) err = enc.Encode(auth) - if err != nil { return fmt.Errorf("could not encode jwt auth body: %w", err) } @@ -169,7 +168,6 @@ func (t *JWTTransport) prepareRequest(req *http.Request) (*http.Request, error) // RoundTrip implements the RoundTripper interface. func (t *JWTTransport) RoundTrip(req *http.Request) (*http.Response, error) { - var resp *http.Response attemptsCount := make(map[int]int) @@ -229,7 +227,6 @@ func (t *JWTTransport) RoundTrip(req *http.Request) (*http.Response, error) { } } return resp, nil - } func (t *JWTTransport) Client() *http.Client { diff --git a/pkg/apiserver/apic.go b/pkg/apiserver/apic.go index 51a85b1ea23..32847f7489a 100644 --- a/pkg/apiserver/apic.go +++ b/pkg/apiserver/apic.go @@ -332,7 +332,6 @@ func getScenarioTrustOfAlert(alert *models.Alert) string { } func shouldShareAlert(alert *models.Alert, consoleConfig *csconfig.ConsoleConfig, shareSignals bool) bool { - if !shareSignals { log.Debugf("sharing signals is disabled") return false diff --git a/pkg/apiserver/middlewares/v1/api_key.go b/pkg/apiserver/middlewares/v1/api_key.go index 3c154be4fab..df2f68930d6 100644 --- a/pkg/apiserver/middlewares/v1/api_key.go +++ b/pkg/apiserver/middlewares/v1/api_key.go @@ -174,7 +174,6 @@ func (a *APIKey) authPlain(c *gin.Context, logger *log.Entry) *ent.Bouncer { logger.Infof("Creating bouncer %s", bouncerName) bouncer, err = a.DbClient.CreateBouncer(ctx, bouncerName, clientIP, hashStr, types.ApiKeyAuthType, true) - if err != nil { logger.Errorf("while creating bouncer db entry: %s", err) return nil diff --git a/pkg/appsec/appsec.go b/pkg/appsec/appsec.go index 553db205b5d..5f01f76d993 100644 --- a/pkg/appsec/appsec.go +++ b/pkg/appsec/appsec.go @@ -158,7 +158,6 @@ func (wc *AppsecConfig) SetUpLogger() { /* wc.Name is actually the datasource name.*/ wc.Logger = wc.Logger.Dup().WithField("name", wc.Name) wc.Logger.Logger.SetLevel(*wc.LogLevel) - } func (wc *AppsecConfig) LoadByPath(file string) error { diff --git a/pkg/appsec/appsec_rule/appsec_rule.go b/pkg/appsec/appsec_rule/appsec_rule.go index 136d8b11cb7..9d47c0eed5c 100644 --- a/pkg/appsec/appsec_rule/appsec_rule.go +++ b/pkg/appsec/appsec_rule/appsec_rule.go @@ -47,7 +47,6 @@ type CustomRule struct { } func (v *CustomRule) Convert(ruleType string, appsecRuleName string) (string, []uint32, error) { - if v.Zones == nil && v.And == nil && v.Or == nil { return "", nil, errors.New("no zones defined") } diff --git a/pkg/appsec/appsec_rule/modsec_rule_test.go b/pkg/appsec/appsec_rule/modsec_rule_test.go index ffb8a15ff1f..74e9b85426e 100644 --- a/pkg/appsec/appsec_rule/modsec_rule_test.go +++ b/pkg/appsec/appsec_rule/modsec_rule_test.go @@ -88,7 +88,6 @@ func TestVPatchRuleString(t *testing.T) { rule: CustomRule{ And: []CustomRule{ { - Zones: []string{"ARGS"}, Variables: []string{"foo"}, Match: Match{Type: "regex", Value: "[^a-zA-Z]"}, @@ -161,7 +160,6 @@ SecRule ARGS_GET:foo "@rx [^a-zA-Z]" "id:1519945803,phase:2,deny,log,msg:'OR AND for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { actual, _, err := tt.rule.Convert(ModsecurityRuleType, tt.name) - if err != nil { t.Errorf("Error converting rule: %s", err) } diff --git a/pkg/appsec/request_test.go b/pkg/appsec/request_test.go index f8333e4e5f9..8b457e24dab 100644 --- a/pkg/appsec/request_test.go +++ b/pkg/appsec/request_test.go @@ -3,7 +3,6 @@ package appsec import "testing" func TestBodyDumper(t *testing.T) { - tests := []struct { name string req *ParsedRequest @@ -159,7 +158,6 @@ func TestBodyDumper(t *testing.T) { } for idx, test := range tests { - t.Run(test.name, func(t *testing.T) { orig_dr := test.req.DumpRequest() result := test.filter(orig_dr).GetFilteredRequest() @@ -177,5 +175,4 @@ func TestBodyDumper(t *testing.T) { } }) } - } diff --git a/pkg/csplugin/broker.go b/pkg/csplugin/broker.go index e996fa9b68c..f53c831e186 100644 --- a/pkg/csplugin/broker.go +++ b/pkg/csplugin/broker.go @@ -91,7 +91,6 @@ func (pb *PluginBroker) Init(ctx context.Context, pluginCfg *csconfig.PluginCfg, pb.watcher = PluginWatcher{} pb.watcher.Init(pb.pluginConfigByName, pb.alertsByPluginName) return nil - } func (pb *PluginBroker) Kill() { @@ -166,6 +165,7 @@ func (pb *PluginBroker) addProfileAlert(profileAlert ProfileAlert) { pb.watcher.Inserts <- pluginName } } + func (pb *PluginBroker) profilesContainPlugin(pluginName string) bool { for _, profileCfg := range pb.profileConfigs { for _, name := range profileCfg.Notifications { @@ -176,6 +176,7 @@ func (pb *PluginBroker) profilesContainPlugin(pluginName string) bool { } return false } + func (pb *PluginBroker) loadConfig(path string) error { files, err := listFilesAtPath(path) if err != nil { @@ -277,7 +278,6 @@ func (pb *PluginBroker) loadPlugins(ctx context.Context, path string) error { } func (pb *PluginBroker) loadNotificationPlugin(name string, binaryPath string) (protobufs.NotifierServer, error) { - handshake, err := getHandshake() if err != nil { return nil, err diff --git a/pkg/cticlient/types.go b/pkg/cticlient/types.go index 2ad0a6eb34e..954d24641b4 100644 --- a/pkg/cticlient/types.go +++ b/pkg/cticlient/types.go @@ -210,7 +210,6 @@ func (c *SmokeItem) GetFalsePositives() []string { } func (c *SmokeItem) IsFalsePositive() bool { - if c.Classifications.FalsePositives != nil { if len(c.Classifications.FalsePositives) > 0 { return true @@ -284,7 +283,6 @@ func (c *FireItem) GetFalsePositives() []string { } func (c *FireItem) IsFalsePositive() bool { - if c.Classifications.FalsePositives != nil { if len(c.Classifications.FalsePositives) > 0 { return true diff --git a/pkg/exprhelpers/crowdsec_cti.go b/pkg/exprhelpers/crowdsec_cti.go index ccd67b27a49..9b9eac4b95c 100644 --- a/pkg/exprhelpers/crowdsec_cti.go +++ b/pkg/exprhelpers/crowdsec_cti.go @@ -12,16 +12,20 @@ import ( "github.com/crowdsecurity/crowdsec/pkg/types" ) -var CTIUrl = "https://cti.api.crowdsec.net" -var CTIUrlSuffix = "/v2/smoke/" -var CTIApiKey = "" +var ( + CTIUrl = "https://cti.api.crowdsec.net" + CTIUrlSuffix = "/v2/smoke/" + CTIApiKey = "" +) // this is set for non-recoverable errors, such as 403 when querying API or empty API key var CTIApiEnabled = false // when hitting quotas or auth errors, we temporarily disable the API -var CTIBackOffUntil time.Time -var CTIBackOffDuration = 5 * time.Minute +var ( + CTIBackOffUntil time.Time + CTIBackOffDuration = 5 * time.Minute +) var ctiClient *cticlient.CrowdsecCTIClient @@ -62,8 +66,10 @@ func ShutdownCrowdsecCTI() { } // Cache for responses -var CTICache gcache.Cache -var CacheExpiration time.Duration +var ( + CTICache gcache.Cache + CacheExpiration time.Duration +) func CrowdsecCTIInitCache(size int, ttl time.Duration) { CTICache = gcache.New(size).LRU().Build() diff --git a/pkg/exprhelpers/geoip.go b/pkg/exprhelpers/geoip.go index fb0c344d884..6d8813dc0ad 100644 --- a/pkg/exprhelpers/geoip.go +++ b/pkg/exprhelpers/geoip.go @@ -14,7 +14,6 @@ func GeoIPEnrich(params ...any) (any, error) { parsedIP := net.ParseIP(ip) city, err := geoIPCityReader.City(parsedIP) - if err != nil { return nil, err } @@ -31,7 +30,6 @@ func GeoIPASNEnrich(params ...any) (any, error) { parsedIP := net.ParseIP(ip) asn, err := geoIPASNReader.ASN(parsedIP) - if err != nil { return nil, err } @@ -50,7 +48,6 @@ func GeoIPRangeEnrich(params ...any) (any, error) { parsedIP := net.ParseIP(ip) rangeIP, ok, err := geoIPRangeReader.LookupNetwork(parsedIP, &dummy) - if err != nil { return nil, err } diff --git a/pkg/fflag/crowdsec.go b/pkg/fflag/crowdsec.go index d42d6a05ef6..ea397bfe5bc 100644 --- a/pkg/fflag/crowdsec.go +++ b/pkg/fflag/crowdsec.go @@ -2,12 +2,14 @@ package fflag var Crowdsec = FeatureRegister{EnvPrefix: "CROWDSEC_FEATURE_"} -var CscliSetup = &Feature{Name: "cscli_setup", Description: "Enable cscli setup command (service detection)"} -var DisableHttpRetryBackoff = &Feature{Name: "disable_http_retry_backoff", Description: "Disable http retry backoff"} -var ChunkedDecisionsStream = &Feature{Name: "chunked_decisions_stream", Description: "Enable chunked decisions stream"} -var PapiClient = &Feature{Name: "papi_client", Description: "Enable Polling API client", State: DeprecatedState} -var Re2GrokSupport = &Feature{Name: "re2_grok_support", Description: "Enable RE2 support for GROK patterns"} -var Re2RegexpInfileSupport = &Feature{Name: "re2_regexp_in_file_support", Description: "Enable RE2 support for RegexpInFile expr helper"} +var ( + CscliSetup = &Feature{Name: "cscli_setup", Description: "Enable cscli setup command (service detection)"} + DisableHttpRetryBackoff = &Feature{Name: "disable_http_retry_backoff", Description: "Disable http retry backoff"} + ChunkedDecisionsStream = &Feature{Name: "chunked_decisions_stream", Description: "Enable chunked decisions stream"} + PapiClient = &Feature{Name: "papi_client", Description: "Enable Polling API client", State: DeprecatedState} + Re2GrokSupport = &Feature{Name: "re2_grok_support", Description: "Enable RE2 support for GROK patterns"} + Re2RegexpInfileSupport = &Feature{Name: "re2_regexp_in_file_support", Description: "Enable RE2 support for RegexpInFile expr helper"} +) func RegisterAllFeatures() error { err := Crowdsec.RegisterFeature(CscliSetup) diff --git a/pkg/leakybucket/blackhole.go b/pkg/leakybucket/blackhole.go index b12f169acd9..bda2e7c9ed1 100644 --- a/pkg/leakybucket/blackhole.go +++ b/pkg/leakybucket/blackhole.go @@ -49,7 +49,6 @@ func (bl *Blackhole) OnBucketOverflow(bucketFactory *BucketFactory) func(*Leaky, tmp = append(tmp, element) } else { leaky.logger.Debugf("%s left blackhole %s ago", element.key, leaky.Ovflw_ts.Sub(element.expiration)) - } } bl.hiddenKeys = tmp @@ -64,5 +63,4 @@ func (bl *Blackhole) OnBucketOverflow(bucketFactory *BucketFactory) func(*Leaky, leaky.logger.Debugf("Adding overflow to blackhole (%s)", leaky.First_ts) return alert, queue } - } diff --git a/pkg/leakybucket/bucket.go b/pkg/leakybucket/bucket.go index e981551af8f..bc81a505925 100644 --- a/pkg/leakybucket/bucket.go +++ b/pkg/leakybucket/bucket.go @@ -204,7 +204,6 @@ func FromFactory(bucketFactory BucketFactory) *Leaky { /* for now mimic a leak routine */ //LeakRoutine us the life of a bucket. It dies when the bucket underflows or overflows func LeakRoutine(leaky *Leaky) error { - var ( durationTickerChan = make(<-chan time.Time) durationTicker *time.Ticker diff --git a/pkg/leakybucket/buckets.go b/pkg/leakybucket/buckets.go index cfe8d7c302e..72948da1ad7 100644 --- a/pkg/leakybucket/buckets.go +++ b/pkg/leakybucket/buckets.go @@ -25,5 +25,4 @@ func NewBuckets() *Buckets { func GetKey(bucketCfg BucketFactory, stackkey string) string { return fmt.Sprintf("%x", sha1.Sum([]byte(bucketCfg.Filter+stackkey+bucketCfg.Name))) - } diff --git a/pkg/leakybucket/conditional.go b/pkg/leakybucket/conditional.go index a203a639743..b3a84b07c21 100644 --- a/pkg/leakybucket/conditional.go +++ b/pkg/leakybucket/conditional.go @@ -11,8 +11,10 @@ import ( "github.com/crowdsecurity/crowdsec/pkg/types" ) -var conditionalExprCache map[string]vm.Program -var conditionalExprCacheLock sync.Mutex +var ( + conditionalExprCache map[string]vm.Program + conditionalExprCacheLock sync.Mutex +) type ConditionalOverflow struct { ConditionalFilter string diff --git a/pkg/leakybucket/manager_load_test.go b/pkg/leakybucket/manager_load_test.go index 513f11ff373..9d207da164e 100644 --- a/pkg/leakybucket/manager_load_test.go +++ b/pkg/leakybucket/manager_load_test.go @@ -51,93 +51,86 @@ func TestBadBucketsConfig(t *testing.T) { } func TestLeakyBucketsConfig(t *testing.T) { - var CfgTests = []cfgTest{ - //leaky with bad capacity + CfgTests := []cfgTest{ + // leaky with bad capacity {BucketFactory{Name: "test", Description: "test1", Type: "leaky", Capacity: 0}, false, false}, - //leaky with empty leakspeed + // leaky with empty leakspeed {BucketFactory{Name: "test", Description: "test1", Type: "leaky", Capacity: 1}, false, false}, - //leaky with missing filter + // leaky with missing filter {BucketFactory{Name: "test", Description: "test1", Type: "leaky", Capacity: 1, LeakSpeed: "1s"}, false, true}, - //leaky with invalid leakspeed + // leaky with invalid leakspeed {BucketFactory{Name: "test", Description: "test1", Type: "leaky", Capacity: 1, LeakSpeed: "abs", Filter: "true"}, false, false}, - //leaky with valid filter + // leaky with valid filter {BucketFactory{Name: "test", Description: "test1", Type: "leaky", Capacity: 1, LeakSpeed: "1s", Filter: "true"}, true, true}, - //leaky with invalid filter + // leaky with invalid filter {BucketFactory{Name: "test", Description: "test1", Type: "leaky", Capacity: 1, LeakSpeed: "1s", Filter: "xu"}, false, true}, - //leaky with valid filter + // leaky with valid filter {BucketFactory{Name: "test", Description: "test1", Type: "leaky", Capacity: 1, LeakSpeed: "1s", Filter: "true"}, true, true}, - //leaky with bad overflow filter + // leaky with bad overflow filter {BucketFactory{Name: "test", Description: "test1", Type: "leaky", Capacity: 1, LeakSpeed: "1s", Filter: "true", OverflowFilter: "xu"}, false, true}, } if err := runTest(CfgTests); err != nil { t.Fatalf("%s", err) } - } func TestBlackholeConfig(t *testing.T) { - var CfgTests = []cfgTest{ - //basic bh + CfgTests := []cfgTest{ + // basic bh {BucketFactory{Name: "test", Description: "test1", Type: "trigger", Filter: "true", Blackhole: "15s"}, true, true}, - //bad bh + // bad bh {BucketFactory{Name: "test", Description: "test1", Type: "trigger", Filter: "true", Blackhole: "abc"}, false, true}, } if err := runTest(CfgTests); err != nil { t.Fatalf("%s", err) } - } func TestTriggerBucketsConfig(t *testing.T) { - var CfgTests = []cfgTest{ - //basic valid counter + CfgTests := []cfgTest{ + // basic valid counter {BucketFactory{Name: "test", Description: "test1", Type: "trigger", Filter: "true"}, true, true}, } if err := runTest(CfgTests); err != nil { t.Fatalf("%s", err) } - } func TestCounterBucketsConfig(t *testing.T) { - var CfgTests = []cfgTest{ - - //basic valid counter + CfgTests := []cfgTest{ + // basic valid counter {BucketFactory{Name: "test", Description: "test1", Type: "counter", Capacity: -1, Duration: "5s", Filter: "true"}, true, true}, - //missing duration + // missing duration {BucketFactory{Name: "test", Description: "test1", Type: "counter", Capacity: -1, Filter: "true"}, false, false}, - //bad duration + // bad duration {BucketFactory{Name: "test", Description: "test1", Type: "counter", Capacity: -1, Duration: "abc", Filter: "true"}, false, false}, - //capacity must be -1 + // capacity must be -1 {BucketFactory{Name: "test", Description: "test1", Type: "counter", Capacity: 0, Duration: "5s", Filter: "true"}, false, false}, } if err := runTest(CfgTests); err != nil { t.Fatalf("%s", err) } - } func TestBayesianBucketsConfig(t *testing.T) { - var CfgTests = []cfgTest{ - - //basic valid counter + CfgTests := []cfgTest{ + // basic valid counter {BucketFactory{Name: "test", Description: "test1", Type: "bayesian", Capacity: -1, Filter: "true", BayesianPrior: 0.5, BayesianThreshold: 0.5, BayesianConditions: []RawBayesianCondition{{ConditionalFilterName: "true", ProbGivenEvil: 0.5, ProbGivenBenign: 0.5}}}, true, true}, - //bad capacity + // bad capacity {BucketFactory{Name: "test", Description: "test1", Type: "bayesian", Capacity: 1, Filter: "true", BayesianPrior: 0.5, BayesianThreshold: 0.5, BayesianConditions: []RawBayesianCondition{{ConditionalFilterName: "true", ProbGivenEvil: 0.5, ProbGivenBenign: 0.5}}}, false, false}, - //missing prior + // missing prior {BucketFactory{Name: "test", Description: "test1", Type: "bayesian", Capacity: -1, Filter: "true", BayesianThreshold: 0.5, BayesianConditions: []RawBayesianCondition{{ConditionalFilterName: "true", ProbGivenEvil: 0.5, ProbGivenBenign: 0.5}}}, false, false}, - //missing threshold + // missing threshold {BucketFactory{Name: "test", Description: "test1", Type: "bayesian", Capacity: -1, Filter: "true", BayesianPrior: 0.5, BayesianConditions: []RawBayesianCondition{{ConditionalFilterName: "true", ProbGivenEvil: 0.5, ProbGivenBenign: 0.5}}}, false, false}, - //bad prior + // bad prior {BucketFactory{Name: "test", Description: "test1", Type: "bayesian", Capacity: -1, Filter: "true", BayesianPrior: 1.5, BayesianThreshold: 0.5, BayesianConditions: []RawBayesianCondition{{ConditionalFilterName: "true", ProbGivenEvil: 0.5, ProbGivenBenign: 0.5}}}, false, false}, - //bad threshold + // bad threshold {BucketFactory{Name: "test", Description: "test1", Type: "bayesian", Capacity: -1, Filter: "true", BayesianPrior: 0.5, BayesianThreshold: 1.5, BayesianConditions: []RawBayesianCondition{{ConditionalFilterName: "true", ProbGivenEvil: 0.5, ProbGivenBenign: 0.5}}}, false, false}, } if err := runTest(CfgTests); err != nil { t.Fatalf("%s", err) } - } diff --git a/pkg/leakybucket/manager_run.go b/pkg/leakybucket/manager_run.go index 2858d8b5635..e6712e6e47e 100644 --- a/pkg/leakybucket/manager_run.go +++ b/pkg/leakybucket/manager_run.go @@ -17,9 +17,11 @@ import ( "github.com/crowdsecurity/crowdsec/pkg/types" ) -var serialized map[string]Leaky -var BucketPourCache map[string][]types.Event -var BucketPourTrack bool +var ( + serialized map[string]Leaky + BucketPourCache map[string][]types.Event + BucketPourTrack bool +) /* The leaky routines lifecycle are based on "real" time. @@ -243,7 +245,6 @@ func PourItemToBucket(bucket *Leaky, holder BucketFactory, buckets *Buckets, par } func LoadOrStoreBucketFromHolder(partitionKey string, buckets *Buckets, holder BucketFactory, expectMode int) (*Leaky, error) { - biface, ok := buckets.Bucket_map.Load(partitionKey) /* the bucket doesn't exist, create it !*/ @@ -283,9 +284,7 @@ func LoadOrStoreBucketFromHolder(partitionKey string, buckets *Buckets, holder B var orderEvent map[string]*sync.WaitGroup func PourItemToHolders(parsed types.Event, holders []BucketFactory, buckets *Buckets) (bool, error) { - var ( - ok, condition, poured bool - ) + var ok, condition, poured bool if BucketPourTrack { if BucketPourCache == nil { diff --git a/pkg/leakybucket/processor.go b/pkg/leakybucket/processor.go index 81af3000c1c..dc5330a612e 100644 --- a/pkg/leakybucket/processor.go +++ b/pkg/leakybucket/processor.go @@ -10,8 +10,7 @@ type Processor interface { AfterBucketPour(Bucket *BucketFactory) func(types.Event, *Leaky) *types.Event } -type DumbProcessor struct { -} +type DumbProcessor struct{} func (d *DumbProcessor) OnBucketInit(bucketFactory *BucketFactory) error { return nil diff --git a/pkg/leakybucket/reset_filter.go b/pkg/leakybucket/reset_filter.go index 452ccc085b1..3b9b876aff4 100644 --- a/pkg/leakybucket/reset_filter.go +++ b/pkg/leakybucket/reset_filter.go @@ -23,10 +23,12 @@ type CancelOnFilter struct { Debug bool } -var cancelExprCacheLock sync.Mutex -var cancelExprCache map[string]struct { - CancelOnFilter *vm.Program -} +var ( + cancelExprCacheLock sync.Mutex + cancelExprCache map[string]struct { + CancelOnFilter *vm.Program + } +) func (u *CancelOnFilter) OnBucketPour(bucketFactory *BucketFactory) func(types.Event, *Leaky) *types.Event { return func(msg types.Event, leaky *Leaky) *types.Event { diff --git a/pkg/leakybucket/uniq.go b/pkg/leakybucket/uniq.go index 0cc0583390b..3a4683ae309 100644 --- a/pkg/leakybucket/uniq.go +++ b/pkg/leakybucket/uniq.go @@ -16,8 +16,10 @@ import ( // on overflow // on leak -var uniqExprCache map[string]vm.Program -var uniqExprCacheLock sync.Mutex +var ( + uniqExprCache map[string]vm.Program + uniqExprCacheLock sync.Mutex +) type Uniq struct { DistinctCompiled *vm.Program diff --git a/pkg/parser/enrich.go b/pkg/parser/enrich.go index 661410d20d3..a69cd963813 100644 --- a/pkg/parser/enrich.go +++ b/pkg/parser/enrich.go @@ -7,8 +7,10 @@ import ( ) /* should be part of a package shared with enrich/geoip.go */ -type EnrichFunc func(string, *types.Event, *log.Entry) (map[string]string, error) -type InitFunc func(map[string]string) (interface{}, error) +type ( + EnrichFunc func(string, *types.Event, *log.Entry) (map[string]string, error) + InitFunc func(map[string]string) (interface{}, error) +) type EnricherCtx struct { Registered map[string]*Enricher diff --git a/pkg/parser/enrich_geoip.go b/pkg/parser/enrich_geoip.go index 1756927bc4b..79a70077283 100644 --- a/pkg/parser/enrich_geoip.go +++ b/pkg/parser/enrich_geoip.go @@ -18,7 +18,6 @@ func IpToRange(field string, p *types.Event, plog *log.Entry) (map[string]string } r, err := exprhelpers.GeoIPRangeEnrich(field) - if err != nil { plog.Errorf("Unable to enrich ip '%s'", field) return nil, nil //nolint:nilerr @@ -47,7 +46,6 @@ func GeoIpASN(field string, p *types.Event, plog *log.Entry) (map[string]string, } r, err := exprhelpers.GeoIPASNEnrich(field) - if err != nil { plog.Debugf("Unable to enrich ip '%s'", field) return nil, nil //nolint:nilerr @@ -81,7 +79,6 @@ func GeoIpCity(field string, p *types.Event, plog *log.Entry) (map[string]string } r, err := exprhelpers.GeoIPEnrich(field) - if err != nil { plog.Debugf("Unable to enrich ip '%s'", field) return nil, nil //nolint:nilerr diff --git a/pkg/parser/parsing_test.go b/pkg/parser/parsing_test.go index 269d51a1ba2..5f6f924e7df 100644 --- a/pkg/parser/parsing_test.go +++ b/pkg/parser/parsing_test.go @@ -151,7 +151,7 @@ func testOneParser(pctx *UnixParserCtx, ectx EnricherCtx, dir string, b *testing b.ResetTimer() } - for range(count) { + for range count { if !testFile(tests, *pctx, pnodes) { return errors.New("test failed") } @@ -285,7 +285,7 @@ func matchEvent(expected types.Event, out types.Event, debug bool) ([]string, bo valid = true - for mapIdx := range(len(expectMaps)) { + for mapIdx := range len(expectMaps) { for expKey, expVal := range expectMaps[mapIdx] { outVal, ok := outMaps[mapIdx][expKey] if !ok { diff --git a/pkg/parser/runtime.go b/pkg/parser/runtime.go index 8068690b68f..db7f467615a 100644 --- a/pkg/parser/runtime.go +++ b/pkg/parser/runtime.go @@ -248,14 +248,18 @@ func stageidx(stage string, stages []string) int { return -1 } -var ParseDump bool -var DumpFolder string +var ( + ParseDump bool + DumpFolder string +) -var StageParseCache dumps.ParserResults -var StageParseMutex sync.Mutex +var ( + StageParseCache dumps.ParserResults + StageParseMutex sync.Mutex +) func Parse(ctx UnixParserCtx, xp types.Event, nodes []Node) (types.Event, error) { - var event = xp + event := xp /* the stage is undefined, probably line is freshly acquired, set to first stage !*/ if event.Stage == "" && len(ctx.Stages) > 0 { diff --git a/pkg/types/appsec_event.go b/pkg/types/appsec_event.go index 11d70ad368d..54163f53fef 100644 --- a/pkg/types/appsec_event.go +++ b/pkg/types/appsec_event.go @@ -60,7 +60,6 @@ func (w AppsecEvent) GetVar(varName string) string { } log.Infof("var %s not found. Available variables: %+v", varName, w.Vars) return "" - } // getters diff --git a/pkg/types/constants.go b/pkg/types/constants.go index acb5b5bfacf..2421b076b97 100644 --- a/pkg/types/constants.go +++ b/pkg/types/constants.go @@ -1,23 +1,29 @@ package types -const ApiKeyAuthType = "api-key" -const TlsAuthType = "tls" -const PasswordAuthType = "password" +const ( + ApiKeyAuthType = "api-key" + TlsAuthType = "tls" + PasswordAuthType = "password" +) -const PAPIBaseURL = "https://papi.api.crowdsec.net/" -const PAPIVersion = "v1" -const PAPIPollUrl = "/decisions/stream/poll" -const PAPIPermissionsUrl = "/permissions" +const ( + PAPIBaseURL = "https://papi.api.crowdsec.net/" + PAPIVersion = "v1" + PAPIPollUrl = "/decisions/stream/poll" + PAPIPermissionsUrl = "/permissions" +) const CAPIBaseURL = "https://api.crowdsec.net/" -const CscliOrigin = "cscli" -const CrowdSecOrigin = "crowdsec" -const ConsoleOrigin = "console" -const CscliImportOrigin = "cscli-import" -const ListOrigin = "lists" -const CAPIOrigin = "CAPI" -const CommunityBlocklistPullSourceScope = "crowdsecurity/community-blocklist" +const ( + CscliOrigin = "cscli" + CrowdSecOrigin = "crowdsec" + ConsoleOrigin = "console" + CscliImportOrigin = "cscli-import" + ListOrigin = "lists" + CAPIOrigin = "CAPI" + CommunityBlocklistPullSourceScope = "crowdsecurity/community-blocklist" +) const DecisionTypeBan = "ban" diff --git a/pkg/types/event_test.go b/pkg/types/event_test.go index 97b13f96d9a..638e42fe757 100644 --- a/pkg/types/event_test.go +++ b/pkg/types/event_test.go @@ -46,7 +46,6 @@ func TestSetParsed(t *testing.T) { assert.Equal(t, tt.value, tt.evt.Parsed[tt.key]) }) } - } func TestSetMeta(t *testing.T) { @@ -86,7 +85,6 @@ func TestSetMeta(t *testing.T) { assert.Equal(t, tt.value, tt.evt.GetMeta(tt.key)) }) } - } func TestParseIPSources(t *testing.T) { diff --git a/pkg/types/getfstype.go b/pkg/types/getfstype.go index 728e986bed0..c16fe86ec9c 100644 --- a/pkg/types/getfstype.go +++ b/pkg/types/getfstype.go @@ -100,7 +100,6 @@ func GetFSType(path string) (string, error) { var buf unix.Statfs_t err := unix.Statfs(path, &buf) - if err != nil { return "", err } diff --git a/pkg/types/ip.go b/pkg/types/ip.go index 9d08afd8809..47fb3fc83a5 100644 --- a/pkg/types/ip.go +++ b/pkg/types/ip.go @@ -23,7 +23,8 @@ func LastAddress(n net.IPNet) net.IP { ip[6] | ^n.Mask[6], ip[7] | ^n.Mask[7], ip[8] | ^n.Mask[8], ip[9] | ^n.Mask[9], ip[10] | ^n.Mask[10], ip[11] | ^n.Mask[11], ip[12] | ^n.Mask[12], ip[13] | ^n.Mask[13], ip[14] | ^n.Mask[14], - ip[15] | ^n.Mask[15]} + ip[15] | ^n.Mask[15], + } } return net.IPv4( diff --git a/pkg/types/ip_test.go b/pkg/types/ip_test.go index f8c14b12e3c..b9298ba487f 100644 --- a/pkg/types/ip_test.go +++ b/pkg/types/ip_test.go @@ -8,21 +8,20 @@ import ( ) func TestIP2Int(t *testing.T) { - tEmpty := net.IP{} _, _, _, err := IP2Ints(tEmpty) if !strings.Contains(err.Error(), "unexpected len 0 for ") { t.Fatalf("unexpected: %s", err) } } + func TestRange2Int(t *testing.T) { tEmpty := net.IPNet{} - //empty item + // empty item _, _, _, _, _, err := Range2Ints(tEmpty) if !strings.Contains(err.Error(), "converting first ip in range") { t.Fatalf("unexpected: %s", err) } - } func TestAdd2Int(t *testing.T) { diff --git a/pkg/types/utils.go b/pkg/types/utils.go index 712d44ba12d..3e1ae4f7547 100644 --- a/pkg/types/utils.go +++ b/pkg/types/utils.go @@ -10,9 +10,11 @@ import ( "gopkg.in/natefinch/lumberjack.v2" ) -var logFormatter log.Formatter -var LogOutput *lumberjack.Logger //io.Writer -var logLevel log.Level +var ( + logFormatter log.Formatter + LogOutput *lumberjack.Logger // io.Writer + logLevel log.Level +) func SetDefaultLoggerConfig(cfgMode string, cfgFolder string, cfgLevel log.Level, maxSize int, maxFiles int, maxAge int, compress *bool, forceColors bool) error { /*Configure logs*/ From 411bb48a815c6de0ac6a3700b88eac19bbeb1eef Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Thu, 5 Dec 2024 18:04:26 +0100 Subject: [PATCH 359/581] loop performance optimizations / 1 (#3313) * rangeValCopy: each iteration copies 248 bytes * rangeValCopy: each iteration copies 576 bytes * rangeValCopy: each iteration copies 376 bytes * rangeValCopy: each iteration copies 312 bytes * enable linter: gocritic/rangeValCopy --- .golangci.yml | 21 ++++++++++- .../modules/kubernetesaudit/k8s_audit.go | 23 ++++++++++-- pkg/leakybucket/overflows.go | 27 +++++++------- pkg/parser/node.go | 14 ++++---- pkg/parser/runtime.go | 35 ++++++++++++------- pkg/types/event.go | 6 ++-- 6 files changed, 89 insertions(+), 37 deletions(-) diff --git a/.golangci.yml b/.golangci.yml index fd595994e7c..7217c6da2b1 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -183,7 +183,6 @@ linters-settings: - ifElseChain - importShadow - hugeParam - - rangeValCopy - commentedOutCode - commentedOutImport - unnamedResult @@ -465,3 +464,23 @@ issues: - recvcheck path: "pkg/cwhub/item.go" text: 'the methods of "Item" use pointer receiver and non-pointer receiver.' + + - linters: + - gocritic + path: "cmd/crowdsec-cli" + text: "rangeValCopy: .*" + + - linters: + - gocritic + path: "pkg/(cticlient|hubtest)" + text: "rangeValCopy: .*" + + - linters: + - gocritic + path: "(.+)_test.go" + text: "rangeValCopy: .*" + + - linters: + - gocritic + path: "pkg/(appsec|acquisition|dumps|alertcontext|leakybucket|exprhelpers)" + text: "rangeValCopy: .*" diff --git a/pkg/acquisition/modules/kubernetesaudit/k8s_audit.go b/pkg/acquisition/modules/kubernetesaudit/k8s_audit.go index 1fa6c894a32..aaa83a3bbb2 100644 --- a/pkg/acquisition/modules/kubernetesaudit/k8s_audit.go +++ b/pkg/acquisition/modules/kubernetesaudit/k8s_audit.go @@ -66,6 +66,7 @@ func (ka *KubernetesAuditSource) GetAggregMetrics() []prometheus.Collector { func (ka *KubernetesAuditSource) UnmarshalConfig(yamlConfig []byte) error { k8sConfig := KubernetesAuditConfiguration{} + err := yaml.UnmarshalStrict(yamlConfig, &k8sConfig) if err != nil { return fmt.Errorf("cannot parse k8s-audit configuration: %w", err) @@ -92,6 +93,7 @@ func (ka *KubernetesAuditSource) UnmarshalConfig(yamlConfig []byte) error { if ka.config.Mode == "" { ka.config.Mode = configuration.TAIL_MODE } + return nil } @@ -116,6 +118,7 @@ func (ka *KubernetesAuditSource) Configure(config []byte, logger *log.Entry, Met } ka.mux.HandleFunc(ka.config.WebhookPath, ka.webhookHandler) + return nil } @@ -137,6 +140,7 @@ func (ka *KubernetesAuditSource) OneShotAcquisition(_ context.Context, _ chan ty func (ka *KubernetesAuditSource) StreamingAcquisition(ctx context.Context, out chan types.Event, t *tomb.Tomb) error { ka.outChan = out + t.Go(func() error { defer trace.CatchPanic("crowdsec/acquis/k8s-audit/live") ka.logger.Infof("Starting k8s-audit server on %s:%d%s", ka.config.ListenAddr, ka.config.ListenPort, ka.config.WebhookPath) @@ -145,13 +149,16 @@ func (ka *KubernetesAuditSource) StreamingAcquisition(ctx context.Context, out c if err != nil && err != http.ErrServerClosed { return fmt.Errorf("k8s-audit server failed: %w", err) } + return nil }) <-t.Dying() ka.logger.Infof("Stopping k8s-audit server on %s:%d%s", ka.config.ListenAddr, ka.config.ListenPort, ka.config.WebhookPath) ka.server.Shutdown(ctx) + return nil }) + return nil } @@ -167,42 +174,52 @@ func (ka *KubernetesAuditSource) webhookHandler(w http.ResponseWriter, r *http.R if ka.metricsLevel != configuration.METRICS_NONE { requestCount.WithLabelValues(ka.addr).Inc() } + if r.Method != http.MethodPost { w.WriteHeader(http.StatusMethodNotAllowed) return } + ka.logger.Tracef("webhookHandler called") + var auditEvents audit.EventList jsonBody, err := io.ReadAll(r.Body) if err != nil { ka.logger.Errorf("Error reading request body: %v", err) w.WriteHeader(http.StatusInternalServerError) + return } + ka.logger.Tracef("webhookHandler receveid: %s", string(jsonBody)) + err = json.Unmarshal(jsonBody, &auditEvents) if err != nil { ka.logger.Errorf("Error decoding audit events: %s", err) w.WriteHeader(http.StatusInternalServerError) + return } remoteIP := strings.Split(r.RemoteAddr, ":")[0] - for _, auditEvent := range auditEvents.Items { + + for idx := range auditEvents.Items { if ka.metricsLevel != configuration.METRICS_NONE { eventCount.WithLabelValues(ka.addr).Inc() } - bytesEvent, err := json.Marshal(auditEvent) + + bytesEvent, err := json.Marshal(auditEvents.Items[idx]) if err != nil { ka.logger.Errorf("Error serializing audit event: %s", err) continue } + ka.logger.Tracef("Got audit event: %s", string(bytesEvent)) l := types.Line{ Raw: string(bytesEvent), Labels: ka.config.Labels, - Time: auditEvent.StageTimestamp.Time, + Time: auditEvents.Items[idx].StageTimestamp.Time, Src: remoteIP, Process: true, Module: ka.GetName(), diff --git a/pkg/leakybucket/overflows.go b/pkg/leakybucket/overflows.go index 39b0e6a0ec4..126bcd05685 100644 --- a/pkg/leakybucket/overflows.go +++ b/pkg/leakybucket/overflows.go @@ -198,22 +198,24 @@ func eventSources(evt types.Event, leaky *Leaky) (map[string]models.Source, erro func EventsFromQueue(queue *types.Queue) []*models.Event { events := []*models.Event{} - for _, evt := range queue.Queue { - if evt.Meta == nil { + qEvents := queue.GetQueue() + + for idx := range qEvents { + if qEvents[idx].Meta == nil { continue } meta := models.Meta{} // we want consistence - skeys := make([]string, 0, len(evt.Meta)) - for k := range evt.Meta { + skeys := make([]string, 0, len(qEvents[idx].Meta)) + for k := range qEvents[idx].Meta { skeys = append(skeys, k) } sort.Strings(skeys) for _, k := range skeys { - v := evt.Meta[k] + v := qEvents[idx].Meta[k] subMeta := models.MetaItems0{Key: k, Value: v} meta = append(meta, &subMeta) } @@ -223,15 +225,15 @@ func EventsFromQueue(queue *types.Queue) []*models.Event { Meta: meta, } // either MarshaledTime is present and is extracted from log - if evt.MarshaledTime != "" { - tmpTimeStamp := evt.MarshaledTime + if qEvents[idx].MarshaledTime != "" { + tmpTimeStamp := qEvents[idx].MarshaledTime ovflwEvent.Timestamp = &tmpTimeStamp - } else if !evt.Time.IsZero() { // or .Time has been set during parse as time.Now().UTC() + } else if !qEvents[idx].Time.IsZero() { // or .Time has been set during parse as time.Now().UTC() ovflwEvent.Timestamp = new(string) - raw, err := evt.Time.MarshalText() + raw, err := qEvents[idx].Time.MarshalText() if err != nil { - log.Warningf("while serializing time '%s' : %s", evt.Time.String(), err) + log.Warningf("while serializing time '%s' : %s", qEvents[idx].Time.String(), err) } else { *ovflwEvent.Timestamp = string(raw) } @@ -253,8 +255,9 @@ func alertFormatSource(leaky *Leaky, queue *types.Queue) (map[string]models.Sour log.Debugf("Formatting (%s) - scope Info : scope_type:%s / scope_filter:%s", leaky.Name, leaky.scopeType.Scope, leaky.scopeType.Filter) - for _, evt := range queue.Queue { - srcs, err := SourceFromEvent(evt, leaky) + qEvents := queue.GetQueue() + for idx := range qEvents { + srcs, err := SourceFromEvent(qEvents[idx], leaky) if err != nil { return nil, "", fmt.Errorf("while extracting scope from bucket %s: %w", leaky.Name, err) } diff --git a/pkg/parser/node.go b/pkg/parser/node.go index 26046ae4fd6..62a1ff6c4e2 100644 --- a/pkg/parser/node.go +++ b/pkg/parser/node.go @@ -3,6 +3,7 @@ package parser import ( "errors" "fmt" + "strconv" "strings" "time" @@ -236,7 +237,7 @@ func (n *Node) processGrok(p *types.Event, cachedExprEnv map[string]any) (bool, case string: gstr = out case int: - gstr = fmt.Sprintf("%d", out) + gstr = strconv.Itoa(out) case float64, float32: gstr = fmt.Sprintf("%f", out) default: @@ -357,16 +358,17 @@ func (n *Node) process(p *types.Event, ctx UnixParserCtx, expressionEnv map[stri } // Iterate on leafs - for _, leaf := range n.LeavesNodes { - ret, err := leaf.process(p, ctx, cachedExprEnv) + leaves := n.LeavesNodes + for idx := range leaves { + ret, err := leaves[idx].process(p, ctx, cachedExprEnv) if err != nil { - clog.Tracef("\tNode (%s) failed : %v", leaf.rn, err) + clog.Tracef("\tNode (%s) failed : %v", leaves[idx].rn, err) clog.Debugf("Event leaving node : ko") return false, err } - clog.Tracef("\tsub-node (%s) ret : %v (strategy:%s)", leaf.rn, ret, n.OnSuccess) + clog.Tracef("\tsub-node (%s) ret : %v (strategy:%s)", leaves[idx].rn, ret, n.OnSuccess) if ret { NodeState = true @@ -593,7 +595,7 @@ func (n *Node) compile(pctx *UnixParserCtx, ectx EnricherCtx) error { /* compile leafs if present */ for idx := range n.LeavesNodes { if n.LeavesNodes[idx].Name == "" { - n.LeavesNodes[idx].Name = fmt.Sprintf("child-%s", n.Name) + n.LeavesNodes[idx].Name = "child-" + n.Name } /*propagate debug/stats to child nodes*/ if !n.LeavesNodes[idx].Debug && n.Debug { diff --git a/pkg/parser/runtime.go b/pkg/parser/runtime.go index db7f467615a..7af82a71535 100644 --- a/pkg/parser/runtime.go +++ b/pkg/parser/runtime.go @@ -29,10 +29,11 @@ func SetTargetByName(target string, value string, evt *types.Event) bool { return false } - //it's a hack, we do it for the user + // it's a hack, we do it for the user target = strings.TrimPrefix(target, "evt.") log.Debugf("setting target %s to %s", target, value) + defer func() { if r := recover(); r != nil { log.Errorf("Runtime error while trying to set '%s': %+v", target, r) @@ -46,6 +47,7 @@ func SetTargetByName(target string, value string, evt *types.Event) bool { //event is nil return false } + for _, f := range strings.Split(target, ".") { /* ** According to current Event layout we only have to handle struct and map @@ -57,7 +59,9 @@ func SetTargetByName(target string, value string, evt *types.Event) bool { if (tmp == reflect.Value{}) || tmp.IsZero() { log.Debugf("map entry is zero in '%s'", target) } + iter.SetMapIndex(reflect.ValueOf(f), reflect.ValueOf(value)) + return true case reflect.Struct: tmp := iter.FieldByName(f) @@ -65,9 +69,11 @@ func SetTargetByName(target string, value string, evt *types.Event) bool { log.Debugf("'%s' is not a valid target because '%s' is not valid", target, f) return false } + if tmp.Kind() == reflect.Ptr { tmp = reflect.Indirect(tmp) } + iter = tmp case reflect.Ptr: tmp := iter.Elem() @@ -82,11 +88,14 @@ func SetTargetByName(target string, value string, evt *types.Event) bool { log.Errorf("'%s' can't be set", target) return false } + if iter.Kind() != reflect.String { log.Errorf("Expected string, got %v when handling '%s'", iter.Kind(), target) return false } + iter.Set(reflect.ValueOf(value)) + return true } @@ -321,46 +330,46 @@ func Parse(ctx UnixParserCtx, xp types.Event, nodes []Node) (types.Event, error) } isStageOK := false - for idx, node := range nodes { + for idx := range nodes { //Only process current stage's nodes - if event.Stage != node.Stage { + if event.Stage != nodes[idx].Stage { continue } clog := log.WithFields(log.Fields{ - "node-name": node.rn, + "node-name": nodes[idx].rn, "stage": event.Stage, }) - clog.Tracef("Processing node %d/%d -> %s", idx, len(nodes), node.rn) + clog.Tracef("Processing node %d/%d -> %s", idx, len(nodes), nodes[idx].rn) if ctx.Profiling { - node.Profiling = true + nodes[idx].Profiling = true } - ret, err := node.process(&event, ctx, map[string]interface{}{"evt": &event}) + ret, err := nodes[idx].process(&event, ctx, map[string]interface{}{"evt": &event}) if err != nil { clog.Errorf("Error while processing node : %v", err) return event, err } - clog.Tracef("node (%s) ret : %v", node.rn, ret) + clog.Tracef("node (%s) ret : %v", nodes[idx].rn, ret) if ParseDump { var parserIdxInStage int StageParseMutex.Lock() - if len(StageParseCache[stage][node.Name]) == 0 { - StageParseCache[stage][node.Name] = make([]dumps.ParserResult, 0) + if len(StageParseCache[stage][nodes[idx].Name]) == 0 { + StageParseCache[stage][nodes[idx].Name] = make([]dumps.ParserResult, 0) parserIdxInStage = len(StageParseCache[stage]) } else { - parserIdxInStage = StageParseCache[stage][node.Name][0].Idx + parserIdxInStage = StageParseCache[stage][nodes[idx].Name][0].Idx } StageParseMutex.Unlock() evtcopy := deepcopy.Copy(event) parserInfo := dumps.ParserResult{Evt: evtcopy.(types.Event), Success: ret, Idx: parserIdxInStage} StageParseMutex.Lock() - StageParseCache[stage][node.Name] = append(StageParseCache[stage][node.Name], parserInfo) + StageParseCache[stage][nodes[idx].Name] = append(StageParseCache[stage][nodes[idx].Name], parserInfo) StageParseMutex.Unlock() } if ret { isStageOK = true } - if ret && node.OnSuccess == "next_stage" { + if ret && nodes[idx].OnSuccess == "next_stage" { clog.Debugf("node successful, stop end stage %s", stage) break } diff --git a/pkg/types/event.go b/pkg/types/event.go index 9300626b927..0b09bf7cbdf 100644 --- a/pkg/types/event.go +++ b/pkg/types/event.go @@ -60,6 +60,7 @@ func MakeEvent(timeMachine bool, evtType int, process bool) Event { if timeMachine { evt.ExpectMode = TIMEMACHINE } + return evt } @@ -97,8 +98,9 @@ func (e *Event) GetType() string { func (e *Event) GetMeta(key string) string { if e.Type == OVFLW { - for _, alert := range e.Overflow.APIAlerts { - for _, event := range alert.Events { + alerts := e.Overflow.APIAlerts + for idx := range alerts { + for _, event := range alerts[idx].Events { if event.GetMeta(key) != "" { return event.GetMeta(key) } From 88c5f8506478a594da558bf4563bc3d1c006dcc6 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Tue, 10 Dec 2024 13:19:56 +0100 Subject: [PATCH 360/581] sigmahq: fix functional test (#3359) --- cmd/crowdsec-cli/clihub/items.go | 4 ++-- test/bats/20_hub_scenarios.bats | 3 ++- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/cmd/crowdsec-cli/clihub/items.go b/cmd/crowdsec-cli/clihub/items.go index f86fe65a2a1..ef3127033ac 100644 --- a/cmd/crowdsec-cli/clihub/items.go +++ b/cmd/crowdsec-cli/clihub/items.go @@ -97,7 +97,7 @@ func ListItems(out io.Writer, wantColor string, itemTypes []string, items map[st Name: item.Name, LocalVersion: item.State.LocalVersion, LocalPath: item.State.LocalPath, - Description: item.Description, + Description: strings.TrimSpace(item.Description), Status: status, UTF8Status: fmt.Sprintf("%v %s", statusEmo, status), } @@ -128,7 +128,7 @@ func ListItems(out io.Writer, wantColor string, itemTypes []string, items map[st item.Name, item.State.Text(), item.State.LocalVersion, - item.Description, + strings.TrimSpace(item.Description), } if len(itemTypes) > 1 { row = append(row, itemType) diff --git a/test/bats/20_hub_scenarios.bats b/test/bats/20_hub_scenarios.bats index 3ab3d944c93..b5f3a642233 100644 --- a/test/bats/20_hub_scenarios.bats +++ b/test/bats/20_hub_scenarios.bats @@ -85,7 +85,8 @@ teardown() { list_human=$(cscli scenarios list -o human -a | tail -n +6 | head -n -1 | cut -d' ' -f2) list_json=$(cscli scenarios list -o json -a | jq -r '.scenarios[].name') - rune -0 sort -f <<<"$list_raw" + # use python to sort because it handles "_" like go + rune -0 python3 -c 'import sys; print("".join(sorted(sys.stdin.readlines(), key=str.casefold)), end="")' <<<"$list_raw" assert_output "$list_raw" assert_equal "$list_raw" "$list_json" From dada07b2dac08ab79c176f36048649b7c16b717c Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Wed, 11 Dec 2024 14:02:22 +0100 Subject: [PATCH 361/581] cscli: display expired decisions' expiration time in red (#3357) * cscli decisions: display negative expiration times in red * honor color flag --- cmd/crowdsec-cli/clidecision/table.go | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) diff --git a/cmd/crowdsec-cli/clidecision/table.go b/cmd/crowdsec-cli/clidecision/table.go index 189eb80b8e5..4beda572d8e 100644 --- a/cmd/crowdsec-cli/clidecision/table.go +++ b/cmd/crowdsec-cli/clidecision/table.go @@ -3,13 +3,17 @@ package clidecision import ( "io" "strconv" + "strings" + + "github.com/fatih/color" "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/cstable" "github.com/crowdsecurity/crowdsec/pkg/models" ) func (cli *cliDecisions) decisionsTable(out io.Writer, alerts *models.GetAlertsResponse, printMachine bool) { - t := cstable.New(out, cli.cfg().Cscli.Color) + wantColor := cli.cfg().Cscli.Color + t := cstable.New(out, wantColor) t.SetRowLines(false) header := []string{"ID", "Source", "Scope:Value", "Reason", "Action", "Country", "AS", "Events", "expiration", "Alert ID"} @@ -25,6 +29,11 @@ func (cli *cliDecisions) decisionsTable(out io.Writer, alerts *models.GetAlertsR *decisionItem.Type = "(simul)" + *decisionItem.Type } + duration := *decisionItem.Duration + if strings.HasPrefix(duration, "-") && wantColor != "no" { + duration = color.RedString(duration) + } + row := []string{ strconv.Itoa(int(decisionItem.ID)), *decisionItem.Origin, @@ -34,7 +43,7 @@ func (cli *cliDecisions) decisionsTable(out io.Writer, alerts *models.GetAlertsR alertItem.Source.Cn, alertItem.Source.GetAsNumberName(), strconv.Itoa(int(*alertItem.EventsCount)), - *decisionItem.Duration, + duration, strconv.Itoa(int(alertItem.ID)), } From d35d01fd9a7c69fe6848d2c32bf7748ad6d8d056 Mon Sep 17 00:00:00 2001 From: blotus Date: Wed, 11 Dec 2024 17:57:07 +0100 Subject: [PATCH 362/581] support dump: generate pprof files with debug=1 to avoid generating an unusable heap dump (#3361) --- cmd/crowdsec-cli/clisupport/support.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmd/crowdsec-cli/clisupport/support.go b/cmd/crowdsec-cli/clisupport/support.go index 4474f5c8f11..a21d00e3b20 100644 --- a/cmd/crowdsec-cli/clisupport/support.go +++ b/cmd/crowdsec-cli/clisupport/support.go @@ -314,7 +314,7 @@ func (cli *cliSupport) dumpPprof(ctx context.Context, zw *zip.Writer, prometheus ctx, http.MethodGet, fmt.Sprintf( - "http://%s/debug/pprof/%s?debug=1", + "http://%s/debug/pprof/%s", net.JoinHostPort( prometheusCfg.ListenAddr, strconv.Itoa(prometheusCfg.ListenPort), From 08296d9cfaaf851bba505c11ab598c1e51b365b2 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Fri, 13 Dec 2024 11:00:41 +0100 Subject: [PATCH 363/581] acquisition: add some test and warning for wrong source type (#3362) --- .gitignore | 3 ++ pkg/acquisition/acquisition.go | 28 ++++++----- pkg/acquisition/acquisition_test.go | 9 ++-- test/bats/01_crowdsec.bats | 5 +- test/bats/crowdsec-acquisition.bats | 78 +++++++++++++++++++++++++++++ 5 files changed, 105 insertions(+), 18 deletions(-) create mode 100644 test/bats/crowdsec-acquisition.bats diff --git a/.gitignore b/.gitignore index 6e6624fd282..cba570fdb84 100644 --- a/.gitignore +++ b/.gitignore @@ -21,6 +21,9 @@ # Test dependencies test/tools/* +# Saved test status +test/bats/.bats/run-logs + # VMs used for dev/test .vagrant diff --git a/pkg/acquisition/acquisition.go b/pkg/acquisition/acquisition.go index ef5a413b91f..6ac47a8cdf1 100644 --- a/pkg/acquisition/acquisition.go +++ b/pkg/acquisition/acquisition.go @@ -140,7 +140,7 @@ func DataSourceConfigure(commonConfig configuration.DataSourceCommonCfg, metrics } /* configure the actual datasource */ if err := dataSrc.Configure(yamlConfig, subLogger, metricsLevel); err != nil { - return nil, fmt.Errorf("failed to configure datasource %s: %w", commonConfig.Source, err) + return nil, err } return &dataSrc, nil @@ -164,8 +164,6 @@ func detectBackwardCompatAcquis(sub configuration.DataSourceCommonCfg) string { } func LoadAcquisitionFromDSN(dsn string, labels map[string]string, transformExpr string) ([]DataSource, error) { - var sources []DataSource - frags := strings.Split(dsn, ":") if len(frags) == 1 { return nil, fmt.Errorf("%s isn't valid dsn (no protocol)", dsn) @@ -197,9 +195,7 @@ func LoadAcquisitionFromDSN(dsn string, labels map[string]string, transformExpr return nil, fmt.Errorf("while configuration datasource for %s: %w", dsn, err) } - sources = append(sources, dataSrc) - - return sources, nil + return []DataSource{dataSrc}, nil } func GetMetricsLevelFromPromCfg(prom *csconfig.PrometheusCfg) int { @@ -249,7 +245,7 @@ func LoadAcquisitionFromFile(config *csconfig.CrowdsecServiceCfg, prom *csconfig err = dec.Decode(&sub) if err != nil { if !errors.Is(err, io.EOF) { - return nil, fmt.Errorf("failed to yaml decode %s: %w", acquisFile, err) + return nil, fmt.Errorf("failed to parse %s: %w", acquisFile, err) } log.Tracef("End of yaml file") @@ -259,6 +255,12 @@ func LoadAcquisitionFromFile(config *csconfig.CrowdsecServiceCfg, prom *csconfig // for backward compat ('type' was not mandatory, detect it) if guessType := detectBackwardCompatAcquis(sub); guessType != "" { + log.Debugf("datasource type missing in %s (position %d): detected 'source=%s'", acquisFile, idx, guessType) + + if sub.Source != "" && sub.Source != guessType { + log.Warnf("datasource type mismatch in %s (position %d): found '%s' but should probably be '%s'", acquisFile, idx, sub.Source, guessType) + } + sub.Source = guessType } // it's an empty item, skip it @@ -270,18 +272,18 @@ func LoadAcquisitionFromFile(config *csconfig.CrowdsecServiceCfg, prom *csconfig if sub.Source != "docker" { // docker is the only source that can be empty - return nil, fmt.Errorf("missing labels in %s (position: %d)", acquisFile, idx) + return nil, fmt.Errorf("missing labels in %s (position %d)", acquisFile, idx) } } if sub.Source == "" { - return nil, fmt.Errorf("data source type is empty ('source') in %s (position: %d)", acquisFile, idx) + return nil, fmt.Errorf("data source type is empty ('source') in %s (position %d)", acquisFile, idx) } // pre-check that the source is valid _, err := GetDataSourceIface(sub.Source) if err != nil { - return nil, fmt.Errorf("in file %s (position: %d) - %w", acquisFile, idx, err) + return nil, fmt.Errorf("in file %s (position %d) - %w", acquisFile, idx, err) } uniqueId := uuid.NewString() @@ -295,13 +297,13 @@ func LoadAcquisitionFromFile(config *csconfig.CrowdsecServiceCfg, prom *csconfig continue } - return nil, fmt.Errorf("while configuring datasource of type %s from %s (position: %d): %w", sub.Source, acquisFile, idx, err) + return nil, fmt.Errorf("while configuring datasource of type %s from %s (position %d): %w", sub.Source, acquisFile, idx, err) } if sub.TransformExpr != "" { vm, err := expr.Compile(sub.TransformExpr, exprhelpers.GetExprOptions(map[string]interface{}{"evt": &types.Event{}})...) if err != nil { - return nil, fmt.Errorf("while compiling transform expression '%s' for datasource %s in %s (position: %d): %w", sub.TransformExpr, sub.Source, acquisFile, idx, err) + return nil, fmt.Errorf("while compiling transform expression '%s' for datasource %s in %s (position %d): %w", sub.TransformExpr, sub.Source, acquisFile, idx, err) } transformRuntimes[uniqueId] = vm @@ -344,6 +346,7 @@ func copyEvent(evt types.Event, line string) types.Event { evtCopy.Line = evt.Line evtCopy.Line.Raw = line evtCopy.Line.Labels = make(map[string]string) + for k, v := range evt.Line.Labels { evtCopy.Line.Labels[k] = v } @@ -386,6 +389,7 @@ func transform(transformChan chan types.Event, output chan types.Event, AcquisTo if !ok { logger.Errorf("transform expression returned []interface{}, but cannot assert an element to string") output <- evt + continue } diff --git a/pkg/acquisition/acquisition_test.go b/pkg/acquisition/acquisition_test.go index dd70172cf62..671426d344b 100644 --- a/pkg/acquisition/acquisition_test.go +++ b/pkg/acquisition/acquisition_test.go @@ -140,7 +140,7 @@ log_level: debug source: mock toto: test_value1 `, - ExpectedError: "failed to configure datasource mock: mode ratata is not supported", + ExpectedError: "mode ratata is not supported", }, { TestName: "bad_type_config", @@ -182,7 +182,8 @@ wowo: ajsajasjas for _, tc := range tests { t.Run(tc.TestName, func(t *testing.T) { common := configuration.DataSourceCommonCfg{} - yaml.Unmarshal([]byte(tc.String), &common) + err := yaml.Unmarshal([]byte(tc.String), &common) + require.NoError(t, err) ds, err := DataSourceConfigure(common, configuration.METRICS_NONE) cstest.RequireErrorContains(t, err, tc.ExpectedError) @@ -236,7 +237,7 @@ func TestLoadAcquisitionFromFile(t *testing.T) { Config: csconfig.CrowdsecServiceCfg{ AcquisitionFiles: []string{"test_files/badyaml.yaml"}, }, - ExpectedError: "failed to yaml decode test_files/badyaml.yaml: yaml: unmarshal errors", + ExpectedError: "failed to parse test_files/badyaml.yaml: yaml: unmarshal errors", ExpectedLen: 0, }, { @@ -272,7 +273,7 @@ func TestLoadAcquisitionFromFile(t *testing.T) { Config: csconfig.CrowdsecServiceCfg{ AcquisitionFiles: []string{"test_files/bad_source.yaml"}, }, - ExpectedError: "in file test_files/bad_source.yaml (position: 0) - unknown data source does_not_exist", + ExpectedError: "in file test_files/bad_source.yaml (position 0) - unknown data source does_not_exist", }, { TestName: "invalid_filetype_config", diff --git a/test/bats/01_crowdsec.bats b/test/bats/01_crowdsec.bats index aa5830a6bae..a768a8d4d28 100644 --- a/test/bats/01_crowdsec.bats +++ b/test/bats/01_crowdsec.bats @@ -1,5 +1,4 @@ #!/usr/bin/env bats -# vim: ft=bats:list:ts=8:sts=4:sw=4:et:ai:si: set -u @@ -138,6 +137,8 @@ teardown() { rune -0 ./instance-crowdsec stop } +# TODO: move acquisition tests to test/bats/crowdsec-acquisition.bats + @test "crowdsec (error if the acquisition_path file is defined but missing)" { ACQUIS_YAML=$(config_get '.crowdsec_service.acquisition_path') rm -f "$ACQUIS_YAML" @@ -278,7 +279,7 @@ teardown() { # if filenames are missing, it won't be able to detect source type config_set "$ACQUIS_YAML" '.source="file"' rune -1 wait-for "$CROWDSEC" - assert_stderr --partial "failed to configure datasource file: no filename or filenames configuration provided" + assert_stderr --partial "while configuring datasource of type file from $ACQUIS_YAML (position 0): no filename or filenames configuration provided" config_set "$ACQUIS_YAML" '.filenames=["file.log"]' config_set "$ACQUIS_YAML" '.meh=3' diff --git a/test/bats/crowdsec-acquisition.bats b/test/bats/crowdsec-acquisition.bats new file mode 100644 index 00000000000..5189790f01f --- /dev/null +++ b/test/bats/crowdsec-acquisition.bats @@ -0,0 +1,78 @@ +#!/usr/bin/env bats + +set -u + +setup_file() { + load "../lib/setup_file.sh" +} + +teardown_file() { + load "../lib/teardown_file.sh" +} + +setup() { + load "../lib/setup.sh" + load "../lib/bats-file/load.bash" + ./instance-data load + ACQUIS_DIR=$(config_get '.crowdsec_service.acquisition_dir') + mkdir -p "$ACQUIS_DIR" +} + +teardown() { + ./instance-crowdsec stop +} + +#---------- + +@test "malformed acqusition file" { + cat >"$ACQUIS_DIR/file.yaml" <<-EOT + filename: + - /path/to/file.log + labels: + type: syslog + EOT + + rune -1 "$CROWDSEC" -t + assert_stderr --partial "crowdsec init: while loading acquisition config: while configuring datasource of type file from $ACQUIS_DIR/file.yaml (position 0): cannot parse FileAcquisition configuration: yaml: unmarshal errors:\n line 6: cannot unmarshal !!seq into string" +} + +@test "datasource type detection" { + config_set '.common.log_level="debug" | .common.log_media="stdout"' + + # for backward compatibility, a missing source type is not a problem if it can be detected by the presence of other fields + + cat >"$ACQUIS_DIR/file.yaml" <<-EOT + filename: /path/to/file.log + labels: + type: syslog + --- + filenames: + - /path/to/file.log + labels: + type: syslog + EOT + + cat >"$ACQUIS_DIR"/journal.yaml <<-EOT + journalctl_filter: + - "_SYSTEMD_UNIT=ssh.service" + labels: + type: syslog + EOT + + # However, a wrong source type will raise a brow. + # This is currently not a fatal error because it has been tolerated in the past. + + cat >"$ACQUIS_DIR"/bad.yaml <<-EOT + source: docker + journalctl_filter: + - "_SYSTEMD_UNIT=ssh.service" + labels: + type: syslog + EOT + + rune -0 "$CROWDSEC" -t + assert_stderr --partial "datasource type missing in $ACQUIS_DIR/file.yaml (position 0): detected 'source=file'" + assert_stderr --partial "datasource type missing in $ACQUIS_DIR/file.yaml (position 1): detected 'source=file'" + assert_stderr --partial "datasource type missing in $ACQUIS_DIR/journal.yaml (position 0): detected 'source=journalctl'" + assert_stderr --partial "datasource type mismatch in $ACQUIS_DIR/bad.yaml (position 0): found 'docker' but should probably be 'journalctl'" +} From 118275fd03ad7d56ddba4e391e119e741f7cc82b Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Fri, 13 Dec 2024 15:42:55 +0100 Subject: [PATCH 364/581] lint: enable more gocritic checks (#3363) * lint: gocritic - avoid unnecessary byte->string conversions * lint: gocritic - use %q instead of "%s" * lint: return struct from DataSource constructor, not pointer (ptrToRefParam) * lint: gocritic - check use of append() --- .golangci.yml | 4 --- pkg/acquisition/acquisition.go | 6 ++-- pkg/acquisition/acquisition_test.go | 6 ++-- .../modules/journalctl/journalctl.go | 33 ++++++++++++++++--- .../modules/journalctl/journalctl_test.go | 5 +-- pkg/apiserver/apiserver.go | 2 +- pkg/csprofiles/csprofiles_test.go | 7 +++- pkg/hubtest/parser_assert.go | 6 ++-- 8 files changed, 47 insertions(+), 22 deletions(-) diff --git a/.golangci.yml b/.golangci.yml index 7217c6da2b1..a1b215ae8fd 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -192,13 +192,9 @@ linters-settings: - typeUnparen - commentFormatting - deferInLoop # - - sprintfQuotedString # - whyNoLint - equalFold # - unnecessaryBlock # - - ptrToRefParam # - - stringXbytes # - - appendAssign # - tooManyResultsChecker - unnecessaryDefer - docStub diff --git a/pkg/acquisition/acquisition.go b/pkg/acquisition/acquisition.go index 6ac47a8cdf1..4e233aad616 100644 --- a/pkg/acquisition/acquisition.go +++ b/pkg/acquisition/acquisition.go @@ -116,7 +116,7 @@ func setupLogger(source, name string, level *log.Level) (*log.Entry, error) { // if the configuration is not valid it returns an error. // If the datasource can't be run (eg. journalctl not available), it still returns an error which // can be checked for the appropriate action. -func DataSourceConfigure(commonConfig configuration.DataSourceCommonCfg, metricsLevel int) (*DataSource, error) { +func DataSourceConfigure(commonConfig configuration.DataSourceCommonCfg, metricsLevel int) (DataSource, error) { // we dump it back to []byte, because we want to decode the yaml blob twice: // once to DataSourceCommonCfg, and then later to the dedicated type of the datasource yamlConfig, err := yaml.Marshal(commonConfig) @@ -143,7 +143,7 @@ func DataSourceConfigure(commonConfig configuration.DataSourceCommonCfg, metrics return nil, err } - return &dataSrc, nil + return dataSrc, nil } // detectBackwardCompatAcquis: try to magically detect the type for backward compat (type was not mandatory then) @@ -309,7 +309,7 @@ func LoadAcquisitionFromFile(config *csconfig.CrowdsecServiceCfg, prom *csconfig transformRuntimes[uniqueId] = vm } - sources = append(sources, *src) + sources = append(sources, src) } } diff --git a/pkg/acquisition/acquisition_test.go b/pkg/acquisition/acquisition_test.go index 671426d344b..cfe1e74c612 100644 --- a/pkg/acquisition/acquisition_test.go +++ b/pkg/acquisition/acquisition_test.go @@ -193,19 +193,19 @@ wowo: ajsajasjas switch tc.TestName { case "basic_valid_config": - mock := (*ds).Dump().(*MockSource) + mock := ds.Dump().(*MockSource) assert.Equal(t, "test_value1", mock.Toto) assert.Equal(t, "cat", mock.Mode) assert.Equal(t, log.InfoLevel, mock.logger.Logger.Level) assert.Equal(t, map[string]string{"test": "foobar"}, mock.Labels) case "basic_debug_config": - mock := (*ds).Dump().(*MockSource) + mock := ds.Dump().(*MockSource) assert.Equal(t, "test_value1", mock.Toto) assert.Equal(t, "cat", mock.Mode) assert.Equal(t, log.DebugLevel, mock.logger.Logger.Level) assert.Equal(t, map[string]string{"test": "foobar"}, mock.Labels) case "basic_tailmode_config": - mock := (*ds).Dump().(*MockSource) + mock := ds.Dump().(*MockSource) assert.Equal(t, "test_value1", mock.Toto) assert.Equal(t, "tail", mock.Mode) assert.Equal(t, log.DebugLevel, mock.logger.Logger.Level) diff --git a/pkg/acquisition/modules/journalctl/journalctl.go b/pkg/acquisition/modules/journalctl/journalctl.go index 27f20b9f446..47d90e2b3a0 100644 --- a/pkg/acquisition/modules/journalctl/journalctl.go +++ b/pkg/acquisition/modules/journalctl/journalctl.go @@ -53,15 +53,18 @@ func readLine(scanner *bufio.Scanner, out chan string, errChan chan error) error txt := scanner.Text() out <- txt } + if errChan != nil && scanner.Err() != nil { errChan <- scanner.Err() close(errChan) // the error is already consumed by runJournalCtl return nil //nolint:nilerr } + if errChan != nil { close(errChan) } + return nil } @@ -69,15 +72,17 @@ func (j *JournalCtlSource) runJournalCtl(ctx context.Context, out chan types.Eve ctx, cancel := context.WithCancel(ctx) cmd := exec.CommandContext(ctx, journalctlCmd, j.args...) + stdout, err := cmd.StdoutPipe() if err != nil { cancel() - return fmt.Errorf("could not get journalctl stdout: %s", err) + return fmt.Errorf("could not get journalctl stdout: %w", err) } + stderr, err := cmd.StderrPipe() if err != nil { cancel() - return fmt.Errorf("could not get journalctl stderr: %s", err) + return fmt.Errorf("could not get journalctl stderr: %w", err) } stderrChan := make(chan string) @@ -87,6 +92,7 @@ func (j *JournalCtlSource) runJournalCtl(ctx context.Context, out chan types.Eve logger := j.logger.WithField("src", j.src) logger.Infof("Running journalctl command: %s %s", cmd.Path, cmd.Args) + err = cmd.Start() if err != nil { cancel() @@ -109,9 +115,11 @@ func (j *JournalCtlSource) runJournalCtl(ctx context.Context, out chan types.Eve cmd.Wait() return errors.New("failed to create stderr scanner") } + t.Go(func() error { return readLine(stdoutscanner, stdoutChan, errChan) }) + t.Go(func() error { // looks like journalctl closes stderr quite early, so ignore its status (but not its output) return readLine(stderrScanner, stderrChan, nil) @@ -123,6 +131,7 @@ func (j *JournalCtlSource) runJournalCtl(ctx context.Context, out chan types.Eve logger.Infof("journalctl datasource %s stopping", j.src) cancel() cmd.Wait() // avoid zombie process + return nil case stdoutLine := <-stdoutChan: l := types.Line{} @@ -133,6 +142,7 @@ func (j *JournalCtlSource) runJournalCtl(ctx context.Context, out chan types.Eve l.Src = j.src l.Process = true l.Module = j.GetName() + if j.metricsLevel != configuration.METRICS_NONE { linesRead.With(prometheus.Labels{"source": j.src}).Inc() } @@ -149,6 +159,7 @@ func (j *JournalCtlSource) runJournalCtl(ctx context.Context, out chan types.Eve logger.Debugf("errChan is closed, quitting") t.Kill(nil) } + if errScanner != nil { t.Kill(errScanner) } @@ -170,6 +181,7 @@ func (j *JournalCtlSource) GetAggregMetrics() []prometheus.Collector { func (j *JournalCtlSource) UnmarshalConfig(yamlConfig []byte) error { j.config = JournalCtlConfiguration{} + err := yaml.UnmarshalStrict(yamlConfig, &j.config) if err != nil { return fmt.Errorf("cannot parse JournalCtlSource configuration: %w", err) @@ -189,8 +201,11 @@ func (j *JournalCtlSource) UnmarshalConfig(yamlConfig []byte) error { if len(j.config.Filters) == 0 { return errors.New("journalctl_filter is required") } - j.args = append(args, j.config.Filters...) - j.src = fmt.Sprintf("journalctl-%s", strings.Join(j.config.Filters, ".")) + + args = append(args, j.config.Filters...) + + j.args = args + j.src = "journalctl-%s" + strings.Join(j.config.Filters, ".") return nil } @@ -226,8 +241,9 @@ func (j *JournalCtlSource) ConfigureByDSN(dsn string, labels map[string]string, params, err := url.ParseQuery(qs) if err != nil { - return fmt.Errorf("could not parse journalctl DSN : %s", err) + return fmt.Errorf("could not parse journalctl DSN: %w", err) } + for key, value := range params { switch key { case "filters": @@ -236,10 +252,12 @@ func (j *JournalCtlSource) ConfigureByDSN(dsn string, labels map[string]string, if len(value) != 1 { return errors.New("expected zero or one value for 'log_level'") } + lvl, err := log.ParseLevel(value[0]) if err != nil { return fmt.Errorf("unknown level %s: %w", value[0], err) } + j.logger.Logger.SetLevel(lvl) case "since": j.args = append(j.args, "--since", value[0]) @@ -247,7 +265,9 @@ func (j *JournalCtlSource) ConfigureByDSN(dsn string, labels map[string]string, return fmt.Errorf("unsupported key %s in journalctl DSN", key) } } + j.args = append(j.args, j.config.Filters...) + return nil } @@ -261,8 +281,10 @@ func (j *JournalCtlSource) GetName() string { func (j *JournalCtlSource) OneShotAcquisition(ctx context.Context, out chan types.Event, t *tomb.Tomb) error { defer trace.CatchPanic("crowdsec/acquis/journalctl/oneshot") + err := j.runJournalCtl(ctx, out, t) j.logger.Debug("Oneshot journalctl acquisition is done") + return err } @@ -271,6 +293,7 @@ func (j *JournalCtlSource) StreamingAcquisition(ctx context.Context, out chan ty defer trace.CatchPanic("crowdsec/acquis/journalctl/streaming") return j.runJournalCtl(ctx, out, t) }) + return nil } diff --git a/pkg/acquisition/modules/journalctl/journalctl_test.go b/pkg/acquisition/modules/journalctl/journalctl_test.go index 687067c1881..fedbed6b707 100644 --- a/pkg/acquisition/modules/journalctl/journalctl_test.go +++ b/pkg/acquisition/modules/journalctl/journalctl_test.go @@ -81,7 +81,7 @@ func TestConfigureDSN(t *testing.T) { }, { dsn: "journalctl://filters=%ZZ", - expectedErr: "could not parse journalctl DSN : invalid URL escape \"%ZZ\"", + expectedErr: "could not parse journalctl DSN: invalid URL escape \"%ZZ\"", }, { dsn: "journalctl://filters=_UID=42?log_level=warn", @@ -191,6 +191,7 @@ journalctl_filter: func TestStreaming(t *testing.T) { ctx := context.Background() + if runtime.GOOS == "windows" { t.Skip("Skipping test on windows") } @@ -270,7 +271,7 @@ journalctl_filter: tomb.Wait() output, _ := exec.Command("pgrep", "-x", "journalctl").CombinedOutput() - if string(output) != "" { + if len(output) != 0 { t.Fatalf("Found a journalctl process after killing the tomb !") } diff --git a/pkg/apiserver/apiserver.go b/pkg/apiserver/apiserver.go index 05f9150b037..e1d9ce95349 100644 --- a/pkg/apiserver/apiserver.go +++ b/pkg/apiserver/apiserver.go @@ -209,7 +209,7 @@ func NewServer(ctx context.Context, config *csconfig.LocalApiServerCfg) (*APISer gin.DefaultWriter = clog.Writer() router.Use(gin.LoggerWithFormatter(func(param gin.LogFormatterParams) string { - return fmt.Sprintf("%s - [%s] \"%s %s %s %d %s \"%s\" %s\"\n", + return fmt.Sprintf("%s - [%s] \"%s %s %s %d %s %q %s\"\n", param.ClientIP, param.TimeStamp.Format(time.RFC1123), param.Method, diff --git a/pkg/csprofiles/csprofiles_test.go b/pkg/csprofiles/csprofiles_test.go index 0247243ddd3..d09bf25d95b 100644 --- a/pkg/csprofiles/csprofiles_test.go +++ b/pkg/csprofiles/csprofiles_test.go @@ -132,7 +132,7 @@ func TestEvaluateProfile(t *testing.T) { name: "simple pass single expr", args: args{ profileCfg: &csconfig.ProfileCfg{ - Filters: []string{fmt.Sprintf("Alert.GetScenario() == \"%s\"", scenario)}, + Filters: []string{fmt.Sprintf("Alert.GetScenario() == %q", scenario)}, Debug: &boolFalse, }, Alert: &models.Alert{Remediation: true, Scenario: &scenario}, @@ -199,17 +199,22 @@ func TestEvaluateProfile(t *testing.T) { profilesCfg := []*csconfig.ProfileCfg{ tt.args.profileCfg, } + profile, err := NewProfile(profilesCfg) if err != nil { t.Errorf("failed to get newProfile : %+v", err) } + got, got1, _ := profile[0].EvaluateProfile(tt.args.Alert) + if !reflect.DeepEqual(len(got), tt.expectedDecisionCount) { t.Errorf("EvaluateProfile() got = %+v, want %+v", got, tt.expectedDecisionCount) } + if got1 != tt.expectedMatchStatus { t.Errorf("EvaluateProfile() got1 = %v, want %v", got1, tt.expectedMatchStatus) } + if tt.expectedDuration != "" { require.Equal(t, tt.expectedDuration, *got[0].Duration, "The two durations should be the same") } diff --git a/pkg/hubtest/parser_assert.go b/pkg/hubtest/parser_assert.go index be4fdbdb5e6..90d952506d1 100644 --- a/pkg/hubtest/parser_assert.go +++ b/pkg/hubtest/parser_assert.go @@ -270,7 +270,7 @@ func (p *ParserAssert) AutoGenParserAssert() string { continue } - base := fmt.Sprintf(`results["%s"]["%s"][%d].Evt.Unmarshaled["%s"]`, stage, parser, pidx, ukey) + base := fmt.Sprintf("results[%q][%q][%d].Evt.Unmarshaled[%q]", stage, parser, pidx, ukey) for _, line := range p.buildUnmarshaledAssert(base, uval) { ret += line @@ -295,11 +295,11 @@ func (p *ParserAssert) buildUnmarshaledAssert(ekey string, eval interface{}) []s switch val := eval.(type) { case map[string]interface{}: for k, v := range val { - ret = append(ret, p.buildUnmarshaledAssert(fmt.Sprintf(`%s["%s"]`, ekey, k), v)...) + ret = append(ret, p.buildUnmarshaledAssert(fmt.Sprintf("%s[%q]", ekey, k), v)...) } case map[interface{}]interface{}: for k, v := range val { - ret = append(ret, p.buildUnmarshaledAssert(fmt.Sprintf(`%s["%s"]`, ekey, k), v)...) + ret = append(ret, p.buildUnmarshaledAssert(fmt.Sprintf("%s[%q]", ekey, k), v)...) } case []interface{}: case string: From 082c1dde71dad5564960bb0059c1715186c62da4 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Fri, 13 Dec 2024 15:44:21 +0100 Subject: [PATCH 365/581] lint/deep-exit: avoid log.Fatal (#3360) * deep-exit: bubble up error from item_metrics.go * deep-exit: bubble up error from password.go --- .golangci.yml | 10 ---- cmd/crowdsec-cli/clicapi/capi.go | 7 ++- cmd/crowdsec-cli/clihub/item_metrics.go | 67 ++++++++++++++++++------- cmd/crowdsec-cli/clilapi/register.go | 7 ++- cmd/crowdsec-cli/climachine/add.go | 5 +- cmd/crowdsec-cli/dashboard.go | 6 ++- cmd/crowdsec-cli/idgen/machineid.go | 6 ++- cmd/crowdsec-cli/idgen/password.go | 9 ++-- pkg/leakybucket/overflows.go | 2 + 9 files changed, 80 insertions(+), 39 deletions(-) diff --git a/.golangci.yml b/.golangci.yml index a1b215ae8fd..097cc86d20c 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -420,16 +420,6 @@ issues: path: "cmd/crowdsec-cli/main.go" text: "deep-exit: .*" - - linters: - - revive - path: "cmd/crowdsec-cli/clihub/item_metrics.go" - text: "deep-exit: .*" - - - linters: - - revive - path: "cmd/crowdsec-cli/idgen/password.go" - text: "deep-exit: .*" - - linters: - revive path: "pkg/leakybucket/overflows.go" diff --git a/cmd/crowdsec-cli/clicapi/capi.go b/cmd/crowdsec-cli/clicapi/capi.go index 61d59836fdd..2cce01c7d3e 100644 --- a/cmd/crowdsec-cli/clicapi/capi.go +++ b/cmd/crowdsec-cli/clicapi/capi.go @@ -66,7 +66,12 @@ func (cli *cliCapi) register(ctx context.Context, capiUserPrefix string, outputF return fmt.Errorf("unable to generate machine id: %w", err) } - password := strfmt.Password(idgen.GeneratePassword(idgen.PasswordLength)) + pstr, err := idgen.GeneratePassword(idgen.PasswordLength) + if err != nil { + return err + } + + password := strfmt.Password(pstr) apiurl, err := url.Parse(types.CAPIBaseURL) if err != nil { diff --git a/cmd/crowdsec-cli/clihub/item_metrics.go b/cmd/crowdsec-cli/clihub/item_metrics.go index f4af8f635db..ac9c18640fa 100644 --- a/cmd/crowdsec-cli/clihub/item_metrics.go +++ b/cmd/crowdsec-cli/clihub/item_metrics.go @@ -1,6 +1,7 @@ package clihub import ( + "fmt" "net/http" "strconv" "strings" @@ -19,10 +20,16 @@ import ( func showMetrics(prometheusURL string, hubItem *cwhub.Item, wantColor string) error { switch hubItem.Type { case cwhub.PARSERS: - metrics := getParserMetric(prometheusURL, hubItem.Name) + metrics, err := getParserMetric(prometheusURL, hubItem.Name) + if err != nil { + return err + } parserMetricsTable(color.Output, wantColor, hubItem.Name, metrics) case cwhub.SCENARIOS: - metrics := getScenarioMetric(prometheusURL, hubItem.Name) + metrics, err := getScenarioMetric(prometheusURL, hubItem.Name) + if err != nil { + return err + } scenarioMetricsTable(color.Output, wantColor, hubItem.Name, metrics) case cwhub.COLLECTIONS: for _, sub := range hubItem.SubItems() { @@ -31,7 +38,10 @@ func showMetrics(prometheusURL string, hubItem *cwhub.Item, wantColor string) er } } case cwhub.APPSEC_RULES: - metrics := getAppsecRuleMetric(prometheusURL, hubItem.Name) + metrics, err := getAppsecRuleMetric(prometheusURL, hubItem.Name) + if err != nil { + return err + } appsecMetricsTable(color.Output, wantColor, hubItem.Name, metrics) default: // no metrics for this item type } @@ -40,11 +50,15 @@ func showMetrics(prometheusURL string, hubItem *cwhub.Item, wantColor string) er } // getParserMetric is a complete rip from prom2json -func getParserMetric(url string, itemName string) map[string]map[string]int { +func getParserMetric(url string, itemName string) (map[string]map[string]int, error) { stats := make(map[string]map[string]int) - result := getPrometheusMetric(url) - for idx, fam := range result { + results, err := getPrometheusMetric(url) + if err != nil { + return nil, err + } + + for idx, fam := range results { if !strings.HasPrefix(fam.Name, "cs_") { continue } @@ -128,10 +142,10 @@ func getParserMetric(url string, itemName string) map[string]map[string]int { } } - return stats + return stats, nil } -func getScenarioMetric(url string, itemName string) map[string]int { +func getScenarioMetric(url string, itemName string) (map[string]int, error) { stats := make(map[string]int) stats["instantiation"] = 0 @@ -140,8 +154,12 @@ func getScenarioMetric(url string, itemName string) map[string]int { stats["pour"] = 0 stats["underflow"] = 0 - result := getPrometheusMetric(url) - for idx, fam := range result { + results, err := getPrometheusMetric(url) + if err != nil { + return nil, err + } + + for idx, fam := range results { if !strings.HasPrefix(fam.Name, "cs_") { continue } @@ -192,16 +210,20 @@ func getScenarioMetric(url string, itemName string) map[string]int { } } - return stats + return stats, nil } -func getAppsecRuleMetric(url string, itemName string) map[string]int { +func getAppsecRuleMetric(url string, itemName string) (map[string]int, error) { stats := make(map[string]int) stats["inband_hits"] = 0 stats["outband_hits"] = 0 - results := getPrometheusMetric(url) + results, err := getPrometheusMetric(url) + if err != nil { + return nil, err + } + for idx, fam := range results { if !strings.HasPrefix(fam.Name, "cs_") { continue @@ -257,10 +279,10 @@ func getAppsecRuleMetric(url string, itemName string) map[string]int { } } - return stats + return stats, nil } -func getPrometheusMetric(url string) []*prom2json.Family { +func getPrometheusMetric(url string) ([]*prom2json.Family, error) { mfChan := make(chan *dto.MetricFamily, 1024) // Start with the DefaultTransport for sane defaults. @@ -271,12 +293,15 @@ func getPrometheusMetric(url string) []*prom2json.Family { // Timeout early if the server doesn't even return the headers. transport.ResponseHeaderTimeout = time.Minute + var fetchErr error + go func() { defer trace.CatchPanic("crowdsec/GetPrometheusMetric") - err := prom2json.FetchMetricFamilies(url, mfChan, transport) - if err != nil { - log.Fatalf("failed to fetch prometheus metrics : %v", err) + // mfChan is closed by prom2json.FetchMetricFamilies in all cases. + if err := prom2json.FetchMetricFamilies(url, mfChan, transport); err != nil { + fetchErr = fmt.Errorf("failed to fetch prometheus metrics: %w", err) + return } }() @@ -285,7 +310,11 @@ func getPrometheusMetric(url string) []*prom2json.Family { result = append(result, prom2json.NewFamily(mf)) } + if fetchErr != nil { + return nil, fetchErr + } + log.Debugf("Finished reading prometheus output, %d entries", len(result)) - return result + return result, nil } diff --git a/cmd/crowdsec-cli/clilapi/register.go b/cmd/crowdsec-cli/clilapi/register.go index 4c9b0f39903..e8eb7ddc543 100644 --- a/cmd/crowdsec-cli/clilapi/register.go +++ b/cmd/crowdsec-cli/clilapi/register.go @@ -28,7 +28,12 @@ func (cli *cliLapi) register(ctx context.Context, apiURL string, outputFile stri } } - password := strfmt.Password(idgen.GeneratePassword(idgen.PasswordLength)) + pstr, err := idgen.GeneratePassword(idgen.PasswordLength) + if err != nil { + return err + } + + password := strfmt.Password(pstr) apiurl, err := prepareAPIURL(cfg.API.Client, apiURL) if err != nil { diff --git a/cmd/crowdsec-cli/climachine/add.go b/cmd/crowdsec-cli/climachine/add.go index afddb4e4b65..4f28119dde6 100644 --- a/cmd/crowdsec-cli/climachine/add.go +++ b/cmd/crowdsec-cli/climachine/add.go @@ -65,7 +65,10 @@ func (cli *cliMachines) add(ctx context.Context, args []string, machinePassword return errors.New("please specify a password with --password or use --auto") } - machinePassword = idgen.GeneratePassword(idgen.PasswordLength) + machinePassword, err = idgen.GeneratePassword(idgen.PasswordLength) + if err != nil { + return err + } } else if machinePassword == "" && interactive { qs := &survey.Password{ Message: "Please provide a password for the machine:", diff --git a/cmd/crowdsec-cli/dashboard.go b/cmd/crowdsec-cli/dashboard.go index 7ddac093dcd..a653fcb3a47 100644 --- a/cmd/crowdsec-cli/dashboard.go +++ b/cmd/crowdsec-cli/dashboard.go @@ -144,7 +144,11 @@ cscli dashboard setup -l 0.0.0.0 -p 443 --password if metabasePassword == "" { isValid := passwordIsValid(metabasePassword) for !isValid { - metabasePassword = idgen.GeneratePassword(16) + var err error + metabasePassword, err = idgen.GeneratePassword(16) + if err != nil { + return err + } isValid = passwordIsValid(metabasePassword) } } diff --git a/cmd/crowdsec-cli/idgen/machineid.go b/cmd/crowdsec-cli/idgen/machineid.go index 4bd356b3abc..434f79128e9 100644 --- a/cmd/crowdsec-cli/idgen/machineid.go +++ b/cmd/crowdsec-cli/idgen/machineid.go @@ -42,7 +42,11 @@ func GenerateMachineID(prefix string) (string, error) { } prefix = strings.ReplaceAll(prefix, "-", "")[:32] - suffix := GeneratePassword(16) + + suffix, err := GeneratePassword(16) + if err != nil { + return "", err + } return prefix + suffix, nil } diff --git a/cmd/crowdsec-cli/idgen/password.go b/cmd/crowdsec-cli/idgen/password.go index e0faa4daacc..9f1925288ce 100644 --- a/cmd/crowdsec-cli/idgen/password.go +++ b/cmd/crowdsec-cli/idgen/password.go @@ -2,14 +2,13 @@ package idgen import ( saferand "crypto/rand" + "fmt" "math/big" - - log "github.com/sirupsen/logrus" ) const PasswordLength = 64 -func GeneratePassword(length int) string { +func GeneratePassword(length int) (string, error) { upper := "ABCDEFGHIJKLMNOPQRSTUVWXY" lower := "abcdefghijklmnopqrstuvwxyz" digits := "0123456789" @@ -22,11 +21,11 @@ func GeneratePassword(length int) string { for i := range length { rInt, err := saferand.Int(saferand.Reader, big.NewInt(int64(charsetLength))) if err != nil { - log.Fatalf("failed getting data from prng for password generation : %s", err) + return "", fmt.Errorf("prng failed to generate unique id or password: %w", err) } buf[i] = charset[rInt.Int64()] } - return string(buf) + return string(buf), nil } diff --git a/pkg/leakybucket/overflows.go b/pkg/leakybucket/overflows.go index 126bcd05685..62ba3bc9a81 100644 --- a/pkg/leakybucket/overflows.go +++ b/pkg/leakybucket/overflows.go @@ -149,6 +149,7 @@ func eventSources(evt types.Event, leaky *Leaky) (map[string]models.Source, erro leaky.logger.Tracef("Valid range from %s : %s", src.IP, src.Range) } } + if leaky.scopeType.Scope == types.Ip { src.Value = &src.IP } else if leaky.scopeType.Scope == types.Range { @@ -364,6 +365,7 @@ func NewAlert(leaky *Leaky, queue *types.Queue) (types.RuntimeAlert, error) { if err := newApiAlert.Validate(strfmt.Default); err != nil { log.Errorf("Generated alerts isn't valid") log.Errorf("->%s", spew.Sdump(newApiAlert)) + // XXX: deep-exit - note other errors returned from this function are not fatal log.Fatalf("error : %s", err) } From b1e2b9523cabc755527364413b970ada85f12ec3 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Fri, 13 Dec 2024 15:45:07 +0100 Subject: [PATCH 366/581] cscli: print errors in plain text with -o json (#2973) --- cmd/crowdsec-cli/main.go | 4 +++- test/bats/01_cscli.bats | 8 ++++---- test/bats/01_cscli_lapi.bats | 16 ++++++++-------- test/bats/10_bouncers.bats | 7 ++----- test/bats/20_hub_items.bats | 5 ++--- test/bats/30_machines.bats | 5 ++--- test/bats/90_decisions.bats | 13 +++---------- 7 files changed, 24 insertions(+), 34 deletions(-) diff --git a/cmd/crowdsec-cli/main.go b/cmd/crowdsec-cli/main.go index 1cca03b1d3d..8b3077a579e 100644 --- a/cmd/crowdsec-cli/main.go +++ b/cmd/crowdsec-cli/main.go @@ -302,6 +302,8 @@ func main() { } if err := cmd.Execute(); err != nil { - log.Fatal(err) + red := color.New(color.FgRed).SprintFunc() + fmt.Fprintln(os.Stderr, red("Error:"), err) + os.Exit(1) } } diff --git a/test/bats/01_cscli.bats b/test/bats/01_cscli.bats index 264870501a5..63c204a9e86 100644 --- a/test/bats/01_cscli.bats +++ b/test/bats/01_cscli.bats @@ -33,9 +33,9 @@ teardown() { # no "usage" output after every error rune -1 cscli blahblah - # error is displayed as log entry, not with print - assert_stderr --partial 'level=fatal msg="unknown command \"blahblah\" for \"cscli\""' - refute_stderr --partial 'unknown command "blahblah" for "cscli"' + # error is displayed with print, not as a log entry + assert_stderr --partial 'unknown command "blahblah" for "cscli"' + refute_stderr --partial 'level=fatal' } @test "cscli version" { @@ -294,7 +294,7 @@ teardown() { # it is possible to enable subcommands with feature flags defined in feature.yaml rune -1 cscli setup - assert_stderr --partial 'unknown command \"setup\" for \"cscli\"' + assert_stderr --partial 'unknown command "setup" for "cscli"' CONFIG_DIR=$(dirname "$CONFIG_YAML") echo ' - cscli_setup' >> "$CONFIG_DIR"/feature.yaml rune -0 cscli setup diff --git a/test/bats/01_cscli_lapi.bats b/test/bats/01_cscli_lapi.bats index 6e876576a6e..005eb15e141 100644 --- a/test/bats/01_cscli_lapi.bats +++ b/test/bats/01_cscli_lapi.bats @@ -113,9 +113,8 @@ teardown() { LOCAL_API_CREDENTIALS=$(config_get '.api.client.credentials_path') config_set "$LOCAL_API_CREDENTIALS" '.url="http://127.0.0.1:-80"' - rune -1 cscli lapi status -o json - rune -0 jq -r '.msg' <(stderr) - assert_output 'failed to authenticate to Local API (LAPI): parse "http://127.0.0.1:-80/": invalid port ":-80" after host' + rune -1 cscli lapi status + assert_stderr 'Error: failed to authenticate to Local API (LAPI): parse "http://127.0.0.1:-80/": invalid port ":-80" after host' } @test "cscli - bad LAPI password" { @@ -123,9 +122,8 @@ teardown() { LOCAL_API_CREDENTIALS=$(config_get '.api.client.credentials_path') config_set "$LOCAL_API_CREDENTIALS" '.password="meh"' - rune -1 cscli lapi status -o json - rune -0 jq -r '.msg' <(stderr) - assert_output 'failed to authenticate to Local API (LAPI): API error: incorrect Username or Password' + rune -1 cscli lapi status + assert_stderr 'Error: failed to authenticate to Local API (LAPI): API error: incorrect Username or Password' } @test "cscli lapi register / machines validate" { @@ -189,8 +187,10 @@ teardown() { rune -1 cscli lapi register --machine malicious --token 123456789012345678901234badtoken assert_stderr --partial "401 Unauthorized: API error: invalid token for auto registration" - rune -1 cscli machines inspect malicious -o json - assert_stderr --partial "unable to read machine data 'malicious': user 'malicious': user doesn't exist" + rune -1 cscli machines inspect malicious + # XXX: we may want to remove this warning + assert_stderr --partial 'QueryMachineByID : ent: machine not found' + assert_stderr --partial "Error: unable to read machine data 'malicious': user 'malicious': user doesn't exist" rune -0 cscli lapi register --machine newmachine --token 12345678901234567890123456789012 assert_stderr --partial "Successfully registered to Local API" diff --git a/test/bats/10_bouncers.bats b/test/bats/10_bouncers.bats index b1c90116dd2..c9ee1b0cd0c 100644 --- a/test/bats/10_bouncers.bats +++ b/test/bats/10_bouncers.bats @@ -117,12 +117,9 @@ teardown() { @test "we can't add the same bouncer twice" { rune -0 cscli bouncers add ciTestBouncer - rune -1 cscli bouncers add ciTestBouncer -o json + rune -1 cscli bouncers add ciTestBouncer - # XXX temporary hack to filter out unwanted log lines that may appear before - # log configuration (= not json) - rune -0 jq -c '[.level,.msg]' <(stderr | grep "^{") - assert_output '["fatal","unable to create bouncer: bouncer ciTestBouncer already exists"]' + assert_stderr 'Error: unable to create bouncer: bouncer ciTestBouncer already exists' rune -0 cscli bouncers list -o json rune -0 jq '. | length' <(output) diff --git a/test/bats/20_hub_items.bats b/test/bats/20_hub_items.bats index 4b390c90ed4..d29a7d2c14c 100644 --- a/test/bats/20_hub_items.bats +++ b/test/bats/20_hub_items.bats @@ -80,10 +80,9 @@ teardown() { echo "$new_hub" >"$INDEX_PATH" rune -0 cscli collections install crowdsecurity/sshd - rune -1 cscli collections inspect crowdsecurity/sshd --no-metrics -o json + rune -1 cscli collections inspect crowdsecurity/sshd --no-metrics # XXX: we are on the verbose side here... - rune -0 jq -r ".msg" <(stderr) - assert_output --regexp "failed to read Hub index: failed to sync hub items: failed to scan .*: while syncing collections sshd.yaml: 1.2.3.4: Invalid Semantic Version. Run 'sudo cscli hub update' to download the index again" + assert_stderr --regexp "Error: failed to read Hub index: failed to sync hub items: failed to scan .*: while syncing collections sshd.yaml: 1.2.3.4: Invalid Semantic Version. Run 'sudo cscli hub update' to download the index again" } @test "removing or purging an item already removed by hand" { diff --git a/test/bats/30_machines.bats b/test/bats/30_machines.bats index d4cce67d0b0..3d73bd096ae 100644 --- a/test/bats/30_machines.bats +++ b/test/bats/30_machines.bats @@ -30,9 +30,8 @@ teardown() { } @test "don't overwrite local credentials by default" { - rune -1 cscli machines add local -a -o json - rune -0 jq -r '.msg' <(stderr) - assert_output --partial 'already exists: please remove it, use "--force" or specify a different file with "-f"' + rune -1 cscli machines add local -a + assert_stderr --partial 'already exists: please remove it, use "--force" or specify a different file with "-f"' rune -0 cscli machines add local -a --force assert_stderr --partial "Machine 'local' successfully added to the local API." } diff --git a/test/bats/90_decisions.bats b/test/bats/90_decisions.bats index 8601414db48..c8f5139faf8 100644 --- a/test/bats/90_decisions.bats +++ b/test/bats/90_decisions.bats @@ -31,11 +31,7 @@ teardown() { @test "'decisions add' requires parameters" { rune -1 cscli decisions add - assert_stderr --partial "missing arguments, a value is required (--ip, --range or --scope and --value)" - - rune -1 cscli decisions add -o json - rune -0 jq -c '[ .level, .msg]' <(stderr | grep "^{") - assert_output '["fatal","missing arguments, a value is required (--ip, --range or --scope and --value)"]' + assert_stderr "Error: missing arguments, a value is required (--ip, --range or --scope and --value)" } @test "cscli decisions list, with and without --machine" { @@ -61,16 +57,13 @@ teardown() { @test "cscli decisions list, incorrect parameters" { rune -1 cscli decisions list --until toto - assert_stderr --partial 'unable to retrieve decisions: performing request: API error: while parsing duration: time: invalid duration \"toto\"' - rune -1 cscli decisions list --until toto -o json - rune -0 jq -c '[.level, .msg]' <(stderr | grep "^{") - assert_output '["fatal","unable to retrieve decisions: performing request: API error: while parsing duration: time: invalid duration \"toto\""]' + assert_stderr 'Error: unable to retrieve decisions: performing request: API error: while parsing duration: time: invalid duration "toto"' } @test "cscli decisions import" { # required input rune -1 cscli decisions import - assert_stderr --partial 'required flag(s) \"input\" not set"' + assert_stderr 'Error: required flag(s) "input" not set' # unsupported format rune -1 cscli decisions import -i - <<<'value\n5.6.7.8' --format xml From 99552f4a5391bd28d8cf9318abeda79f7bb88879 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Wed, 18 Dec 2024 16:35:00 +0100 Subject: [PATCH 367/581] update dependency on docker (#3175) * update dependency on docker since docker doesn't have a go.mod, the opentelemetry dependencies in go.sum must be updated by hand according to the vendor.mod file of the related docker release ex. https://github.com/docker/cli/blob/v27.1.1/vendor.mod --- .github/workflows/go-tests.yml | 1 - go.mod | 74 +++++----- go.sum | 119 ++++++++++------ pkg/acquisition/modules/docker/docker.go | 128 ++++++++++++------ pkg/acquisition/modules/docker/docker_test.go | 81 ++++++----- pkg/metabase/container.go | 12 +- test/localstack/docker-compose.yml | 1 - 7 files changed, 258 insertions(+), 158 deletions(-) diff --git a/.github/workflows/go-tests.yml b/.github/workflows/go-tests.yml index 3638696b4f6..3a194e1084a 100644 --- a/.github/workflows/go-tests.yml +++ b/.github/workflows/go-tests.yml @@ -42,7 +42,6 @@ jobs: DEBUG: "" LAMBDA_EXECUTOR: "" KINESIS_ERROR_PROBABILITY: "" - DOCKER_HOST: unix:///var/run/docker.sock KINESIS_INITIALIZE_STREAMS: ${{ env.KINESIS_INITIALIZE_STREAMS }} LOCALSTACK_HOST: ${{ env.AWS_HOST }} # Required so that resource urls are provided properly # e.g sqs url will get localhost if we don't set this env to map our service diff --git a/go.mod b/go.mod index f4bd9379a2d..43f0ed4e6f2 100644 --- a/go.mod +++ b/go.mod @@ -11,6 +11,7 @@ require ( github.com/AlecAivazis/survey/v2 v2.3.7 github.com/Masterminds/semver/v3 v3.2.1 github.com/Masterminds/sprig/v3 v3.2.3 + github.com/Microsoft/go-winio v0.6.2 // indirect github.com/agext/levenshtein v1.2.3 github.com/alexliesenfeld/health v0.8.0 github.com/appleboy/gin-jwt/v2 v2.9.2 @@ -22,7 +23,10 @@ require ( github.com/buger/jsonparser v1.1.1 github.com/c-robinson/iplib v1.0.8 github.com/cespare/xxhash/v2 v2.3.0 + github.com/containerd/log v0.1.0 // indirect github.com/corazawaf/libinjection-go v0.1.2 + github.com/coreos/go-systemd/v22 v22.5.0 // indirect + github.com/creack/pty v1.1.21 // indirect github.com/crowdsecurity/coraza/v3 v3.0.0-20240108124027-a62b8d8e5607 github.com/crowdsecurity/dlog v0.0.0-20170105205344-4fb5f8204f26 github.com/crowdsecurity/go-cs-lib v0.0.15 @@ -30,8 +34,10 @@ require ( github.com/crowdsecurity/machineid v1.0.2 github.com/davecgh/go-spew v1.1.1 github.com/dghubble/sling v1.4.2 - github.com/docker/docker v24.0.9+incompatible - github.com/docker/go-connections v0.4.0 + github.com/distribution/reference v0.6.0 // indirect + github.com/docker/docker v27.3.1+incompatible + github.com/docker/go-connections v0.5.0 + github.com/docker/go-units v0.5.0 // indirect github.com/expr-lang/expr v1.16.9 github.com/fatih/color v1.16.0 github.com/fsnotify/fsnotify v1.7.0 @@ -44,7 +50,10 @@ require ( github.com/go-sql-driver/mysql v1.6.0 github.com/goccy/go-yaml v1.11.0 github.com/gofrs/uuid v4.0.0+incompatible + github.com/gogo/protobuf v1.3.2 // indirect github.com/golang-jwt/jwt/v4 v4.5.0 + github.com/golang/protobuf v1.5.4 // indirect + github.com/google/go-cmp v0.6.0 // indirect github.com/google/go-querystring v1.1.0 github.com/google/uuid v1.6.0 github.com/google/winops v0.0.0-20230712152054-af9b550d0601 @@ -59,17 +68,25 @@ require ( github.com/jarcoal/httpmock v1.1.0 github.com/jedib0t/go-pretty/v6 v6.5.9 github.com/jszwec/csvutil v1.5.1 + github.com/klauspost/compress v1.17.9 // indirect github.com/lithammer/dedent v1.1.0 github.com/mattn/go-isatty v0.0.20 github.com/mattn/go-sqlite3 v1.14.16 + github.com/miekg/dns v1.1.57 // indirect + github.com/mitchellh/copystructure v1.2.0 // indirect + github.com/moby/docker-image-spec v1.3.1 // indirect + github.com/moby/term v0.5.0 // indirect github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826 + github.com/morikuni/aec v1.0.0 // indirect github.com/nxadm/tail v1.4.8 + github.com/opencontainers/go-digest v1.0.0 // indirect + github.com/opencontainers/image-spec v1.1.0 // indirect github.com/oschwald/geoip2-golang v1.9.0 github.com/oschwald/maxminddb-golang v1.12.0 github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 github.com/pkg/errors v0.9.1 - github.com/prometheus/client_golang v1.16.0 - github.com/prometheus/client_model v0.4.0 + github.com/prometheus/client_golang v1.17.0 + github.com/prometheus/client_model v0.5.0 github.com/prometheus/prom2json v1.3.0 github.com/r3labs/diff/v2 v2.14.1 github.com/sanity-io/litter v1.5.5 @@ -77,28 +94,38 @@ require ( github.com/shirou/gopsutil/v3 v3.23.5 github.com/sirupsen/logrus v1.9.3 github.com/slack-go/slack v0.12.2 - github.com/spf13/cobra v1.8.0 + github.com/spf13/cobra v1.8.1 + github.com/spf13/pflag v1.0.5 // indirect github.com/stretchr/testify v1.9.0 github.com/umahmood/haversine v0.0.0-20151105152445-808ab04add26 github.com/wasilibs/go-re2 v1.7.0 github.com/xhit/go-simple-mail/v2 v2.16.0 + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.46.1 // indirect + go.opentelemetry.io/otel v1.28.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.28.0 // indirect + go.opentelemetry.io/otel/sdk v1.28.0 // indirect + go.opentelemetry.io/otel/trace v1.28.0 // indirect golang.org/x/crypto v0.26.0 - golang.org/x/mod v0.17.0 + golang.org/x/mod v0.20.0 + golang.org/x/net v0.28.0 // indirect + golang.org/x/sync v0.8.0 // indirect golang.org/x/sys v0.24.0 golang.org/x/text v0.17.0 + golang.org/x/time v0.6.0 // indirect google.golang.org/grpc v1.67.1 google.golang.org/protobuf v1.34.2 gopkg.in/natefinch/lumberjack.v2 v2.2.1 gopkg.in/tomb.v2 v2.0.0-20161208151619-d5d1b5820637 gopkg.in/yaml.v2 v2.4.0 gopkg.in/yaml.v3 v3.0.1 + gotest.tools/v3 v3.5.1 // indirect k8s.io/apiserver v0.28.4 + ) require ( ariga.io/atlas v0.19.1-0.20240203083654-5948b60a8e43 // indirect github.com/Masterminds/goutils v1.1.1 // indirect - github.com/Microsoft/go-winio v0.6.1 // indirect github.com/ahmetalpbalkan/dlog v0.0.0-20170105205344-4fb5f8204f26 // indirect github.com/apparentlymart/go-textseg/v13 v13.0.0 // indirect github.com/asaskevich/govalidator v0.0.0-20200907205600-7a23bdc65eef // indirect @@ -106,14 +133,12 @@ require ( github.com/bytedance/sonic v1.10.2 // indirect github.com/chenzhuoyu/base64x v0.0.0-20230717121745-296ad89f973d // indirect github.com/chenzhuoyu/iasm v0.9.1 // indirect - github.com/coreos/go-systemd/v22 v22.5.0 // indirect - github.com/cpuguy83/go-md2man/v2 v2.0.3 // indirect - github.com/creack/pty v1.1.18 // indirect - github.com/docker/distribution v2.8.2+incompatible // indirect - github.com/docker/go-units v0.5.0 // indirect + github.com/cpuguy83/go-md2man/v2 v2.0.4 // indirect + github.com/felixge/httpsnoop v1.0.4 // indirect github.com/gabriel-vasile/mimetype v1.4.3 // indirect github.com/gin-contrib/sse v0.1.0 // indirect - github.com/go-logr/logr v1.2.4 // indirect + github.com/go-logr/logr v1.4.2 // indirect + github.com/go-logr/stdr v1.2.2 // indirect github.com/go-ole/go-ole v1.2.6 // indirect github.com/go-openapi/analysis v0.19.16 // indirect github.com/go-openapi/inflect v0.19.0 // indirect @@ -127,10 +152,7 @@ require ( github.com/go-playground/validator/v10 v10.17.0 // indirect github.com/go-stack/stack v1.8.0 // indirect github.com/goccy/go-json v0.10.2 // indirect - github.com/gogo/protobuf v1.3.2 // indirect github.com/golang/glog v1.2.2 // indirect - github.com/golang/protobuf v1.5.3 // indirect - github.com/google/go-cmp v0.6.0 // indirect github.com/google/gofuzz v1.2.0 // indirect github.com/hashicorp/hcl/v2 v2.13.0 // indirect github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb // indirect @@ -148,7 +170,6 @@ require ( github.com/josharian/intern v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 // indirect - github.com/klauspost/compress v1.17.3 // indirect github.com/klauspost/cpuid/v2 v2.2.6 // indirect github.com/leodido/go-urn v1.3.0 // indirect github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 // indirect @@ -158,33 +179,28 @@ require ( github.com/mattn/go-runewidth v0.0.15 // indirect github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect github.com/mgutz/ansi v0.0.0-20200706080929-d51e80ef957d // indirect - github.com/mitchellh/copystructure v1.2.0 // indirect github.com/mitchellh/go-testing-interface v1.0.0 // indirect github.com/mitchellh/go-wordwrap v1.0.1 // indirect github.com/mitchellh/mapstructure v1.5.0 // indirect github.com/mitchellh/reflectwalk v1.0.2 // indirect - github.com/moby/term v0.5.0 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect - github.com/morikuni/aec v1.0.0 // indirect github.com/oklog/run v1.0.0 // indirect - github.com/opencontainers/go-digest v1.0.0 // indirect - github.com/opencontainers/image-spec v1.0.3-0.20211202183452-c5a74bcca799 // indirect github.com/pelletier/go-toml/v2 v2.1.1 // indirect github.com/petar-dambovaliev/aho-corasick v0.0.0-20230725210150-fb29fc3c913e // indirect github.com/pierrec/lz4/v4 v4.1.18 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c // indirect github.com/prometheus/common v0.44.0 // indirect - github.com/prometheus/procfs v0.10.1 // indirect + github.com/prometheus/procfs v0.15.1 // indirect github.com/rivo/uniseg v0.2.0 // indirect github.com/robfig/cron/v3 v3.0.1 // indirect + github.com/rogpeppe/go-internal v1.12.0 // indirect github.com/russross/blackfriday/v2 v2.1.0 // indirect github.com/sergi/go-diff v1.3.1 // indirect github.com/shoenig/go-m1cpu v0.1.6 // indirect github.com/shopspring/decimal v1.2.0 // indirect github.com/spf13/cast v1.3.1 // indirect - github.com/spf13/pflag v1.0.5 // indirect github.com/tetratelabs/wazero v1.8.0 // indirect github.com/tidwall/gjson v1.17.0 // indirect github.com/tidwall/match v1.1.1 // indirect @@ -199,19 +215,15 @@ require ( github.com/yusufpapurcu/wmi v1.2.3 // indirect github.com/zclconf/go-cty v1.8.0 // indirect go.mongodb.org/mongo-driver v1.9.4 // indirect + go.opentelemetry.io/otel/metric v1.28.0 // indirect go.uber.org/atomic v1.10.0 // indirect golang.org/x/arch v0.7.0 // indirect - golang.org/x/net v0.28.0 // indirect - golang.org/x/sync v0.8.0 // indirect golang.org/x/term v0.23.0 // indirect - golang.org/x/time v0.3.0 // indirect - golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d // indirect - golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 // indirect - google.golang.org/appengine v1.6.7 // indirect + golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 // indirect + google.golang.org/appengine v1.6.8 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20240814211410-ddb44dafa142 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 // indirect - gotest.tools/v3 v3.5.0 // indirect k8s.io/api v0.28.4 // indirect k8s.io/apimachinery v0.28.4 // indirect k8s.io/klog/v2 v2.100.1 // indirect diff --git a/go.sum b/go.sum index b2bd77c9915..80f8a079bae 100644 --- a/go.sum +++ b/go.sum @@ -19,8 +19,8 @@ github.com/Masterminds/semver/v3 v3.2.1 h1:RN9w6+7QoMeJVGyfmbcgs28Br8cvmnucEXnY0 github.com/Masterminds/semver/v3 v3.2.1/go.mod h1:qvl/7zhW3nngYb5+80sSMF+FG2BjYrf8m9wsX0PNOMQ= github.com/Masterminds/sprig/v3 v3.2.3 h1:eL2fZNezLomi0uOLqjQoN6BfsDD+fyLtgbJMAj9n6YA= github.com/Masterminds/sprig/v3 v3.2.3/go.mod h1:rXcFaZ2zZbLRJv/xSysmlgIM1u11eBaRMhvYXJNkGuM= -github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow= -github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM= +github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERoyfY= +github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU= github.com/Netflix/go-expect v0.0.0-20220104043353-73e0943537d2 h1:+vx7roKuyA63nhn5WAunQHLTznkw5W8b1Xc0dNjp83s= github.com/Netflix/go-expect v0.0.0-20220104043353-73e0943537d2/go.mod h1:HBCaDeC1lPdgDeDbhX8XFpy1jqjK0IBG8W5K+xYqA0w= github.com/PuerkitoBio/purell v1.1.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= @@ -56,8 +56,6 @@ github.com/aws/aws-lambda-go v1.47.0/go.mod h1:dpMpZgvWx5vuQJfBt0zqBha60q7Dd7Rfg github.com/aws/aws-sdk-go v1.34.28/go.mod h1:H7NKnBqNVzoTJpGfLrQkkD+ytBA93eiDYi/+8rV9s48= github.com/aws/aws-sdk-go v1.52.0 h1:ptgek/4B2v/ljsjYSEvLQ8LTD+SQyrqhOOWvHc/VGPI= github.com/aws/aws-sdk-go v1.52.0/go.mod h1:LF8svs817+Nz+DmiMQKTO3ubZ/6IaTpq3TjupRn3Eqk= -github.com/beevik/etree v1.3.0 h1:hQTc+pylzIKDb23yYprodCWWTt+ojFfUZyzU09a/hmU= -github.com/beevik/etree v1.3.0/go.mod h1:aiPf89g/1k3AShMVAzriilpcE4R/Vuor90y83zVZWFc= github.com/beevik/etree v1.4.1 h1:PmQJDDYahBGNKDcpdX8uPy1xRCwoCGVUiW669MEirVI= github.com/beevik/etree v1.4.1/go.mod h1:gPNJNaBGVZ9AwsidazFZyygnd+0pAU38N4D+WemwKNs= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= @@ -76,6 +74,8 @@ github.com/bytedance/sonic v1.10.2 h1:GQebETVBxYB7JGWJtLBi07OVzWwt+8dWA00gEVW2ZF github.com/bytedance/sonic v1.10.2/go.mod h1:iZcSUejdk5aukTND/Eu/ivjQuEL0Cu9/rf50Hi0u/g4= github.com/c-robinson/iplib v1.0.8 h1:exDRViDyL9UBLcfmlxxkY5odWX5092nPsQIykHXhIn4= github.com/c-robinson/iplib v1.0.8/go.mod h1:i3LuuFL1hRT5gFpBRnEydzw8R6yhGkF4szNDIbF8pgo= +github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8= +github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/chenzhuoyu/base64x v0.0.0-20211019084208-fb5309c8db06/go.mod h1:DH46F32mSOjUmXrMHnKwZdA8wcEefY7UVqBKYGjpdQY= @@ -87,6 +87,8 @@ github.com/chenzhuoyu/iasm v0.9.1 h1:tUHQJXo3NhBqw6s33wkGn9SP3bvrWLdlVIJ3hQBL7P0 github.com/chenzhuoyu/iasm v0.9.1/go.mod h1:Xjy2NpN3h7aUqeqM+woSuuvxmIe6+DDsiNLIrkAmYog= github.com/cockroachdb/apd v1.1.0 h1:3LFP3629v+1aKXU5Q37mxmRxX/pIu1nijXydLShEq5I= github.com/cockroachdb/apd v1.1.0/go.mod h1:8Sl8LxpKi29FqWXR16WEFZRNSz3SoPzUzeMeY4+DwBQ= +github.com/containerd/log v0.1.0 h1:TCJt7ioM2cr/tfR8GPbGf9/VRAX8D2B4PjzCpfX540I= +github.com/containerd/log v0.1.0/go.mod h1:VRRf09a7mHDIRezVKTRCrOq78v577GXq3bSa3EhrzVo= github.com/corazawaf/libinjection-go v0.1.2 h1:oeiV9pc5rvJ+2oqOqXEAMJousPpGiup6f7Y3nZj5GoM= github.com/corazawaf/libinjection-go v0.1.2/go.mod h1:OP4TM7xdJ2skyXqNX1AN1wN5nNZEmJNuWbNPOItn7aw= github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= @@ -94,13 +96,13 @@ github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f/go.mod h1:F5haX7 github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs= github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= github.com/cpuguy83/go-md2man/v2 v2.0.1/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= -github.com/cpuguy83/go-md2man/v2 v2.0.3 h1:qMCsGGgs+MAzDFyp9LpAe1Lqy/fY/qCovCm0qnXZOBM= -github.com/cpuguy83/go-md2man/v2 v2.0.3/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= +github.com/cpuguy83/go-md2man/v2 v2.0.4 h1:wfIWP927BUkWJb2NmU/kNDYIBTh/ziUX91+lVfRxZq4= +github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/creack/pty v1.1.17/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4= -github.com/creack/pty v1.1.18 h1:n56/Zwd5o6whRC5PMGretI4IdRLlmBXYNjScPaBgsbY= -github.com/creack/pty v1.1.18/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4= +github.com/creack/pty v1.1.21 h1:1/QdRyBaHHJP61QkWMXlOIBfsgdDeeKfK8SYVUWJKf0= +github.com/creack/pty v1.1.21/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4= github.com/crowdsecurity/coraza/v3 v3.0.0-20240108124027-a62b8d8e5607 h1:hyrYw3h8clMcRL2u5ooZ3tmwnmJftmhb9Ws1MKmavvI= github.com/crowdsecurity/coraza/v3 v3.0.0-20240108124027-a62b8d8e5607/go.mod h1:br36fEqurGYZQGit+iDYsIzW0FF6VufMbDzyyLxEuPA= github.com/crowdsecurity/dlog v0.0.0-20170105205344-4fb5f8204f26 h1:r97WNVC30Uen+7WnLs4xDScS/Ex988+id2k6mDf8psU= @@ -117,12 +119,12 @@ github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/dghubble/sling v1.4.2 h1:vs1HIGBbSl2SEALyU+irpYFLZMfc49Fp+jYryFebQjM= github.com/dghubble/sling v1.4.2/go.mod h1:o0arCOz0HwfqYQJLrRtqunaWOn4X6jxE/6ORKRpVTD4= -github.com/docker/distribution v2.8.2+incompatible h1:T3de5rq0dB1j30rp0sA2rER+m322EBzniBPB6ZIzuh8= -github.com/docker/distribution v2.8.2+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= -github.com/docker/docker v24.0.9+incompatible h1:HPGzNmwfLZWdxHqK9/II92pyi1EpYKsAqcl4G0Of9v0= -github.com/docker/docker v24.0.9+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= -github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ= -github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= +github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5QvfrDyIgxBk= +github.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= +github.com/docker/docker v27.3.1+incompatible h1:KttF0XoteNTicmUtBO0L2tP+J7FGRFTjaEF4k6WdhfI= +github.com/docker/docker v27.3.1+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/go-connections v0.5.0 h1:USnMq7hx7gwdVZq1L49hLXaFtUdTADjXGp+uj1Br63c= +github.com/docker/go-connections v0.5.0/go.mod h1:ov60Kzw0kKElRwhNs9UlUHAE/F9Fe6GLaXnqyDdmEXc= github.com/docker/go-units v0.3.3/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= @@ -132,6 +134,8 @@ github.com/expr-lang/expr v1.16.9/go.mod h1:8/vRC7+7HBzESEqt5kKpYXxrxkr31SaO8r40 github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk= github.com/fatih/color v1.16.0 h1:zmkK9Ngbjj+K0yRhTVONQh1p/HknKYSlNT+vZCzyokM= github.com/fatih/color v1.16.0/go.mod h1:fL2Sau1YI5c0pdGEVCbKQbLXB6edEj1ZgiY4NijnWvE= +github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= +github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= github.com/foxcpp/go-mockdns v1.0.0 h1:7jBqxd3WDWwi/6WhDvacvH1XsN3rOLXyHM1uhvIx6FI= github.com/foxcpp/go-mockdns v1.0.0/go.mod h1:lgRN6+KxQBawyIghpnl5CezHFGS9VLzvtVlwxvzXTQ4= github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= @@ -154,8 +158,11 @@ github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9 github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/logr v1.2.4 h1:g01GSCwiDw2xSZfjJ2/T9M+S6pFdcNtFYsp+Y43HYDQ= -github.com/go-logr/logr v1.2.4/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= +github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= +github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/go-ole/go-ole v1.2.6 h1:/Fpf6oFPoeFik9ty7siob0G6Ke8QvQEuVcuChpwXzpY= github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0= github.com/go-openapi/analysis v0.0.0-20180825180245-b006789cd277/go.mod h1:k70tL6pCuVxPJOHXQ+wIac1FUrvNkHolPie/cLEU6hI= @@ -303,8 +310,9 @@ github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5y github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= -github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= -github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= +github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= @@ -334,6 +342,9 @@ github.com/goombaio/namegenerator v0.0.0-20181006234301-989e774b106e/go.mod h1:A github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc= github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= +github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 h1:bkypFPDjIYGfCYD5mRBvpqxfYX1YCS1PXdKYWi8FsN0= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0/go.mod h1:P+Lt/0by1T8bfcF3z737NnSbmxQAppXMRziHUxPOC8k= github.com/hashicorp/go-hclog v1.5.0 h1:bI2ocEMgcVlz55Oj1xZNBsVi900c7II+fWDyV9o+13c= github.com/hashicorp/go-hclog v1.5.0/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M= github.com/hashicorp/go-plugin v1.4.10 h1:xUbmA4jC6Dq163/fWcp8P3JuHilrHHMLNRxzGQJ9hNk= @@ -434,8 +445,8 @@ github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+o github.com/klauspost/compress v1.9.5/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= github.com/klauspost/compress v1.15.9/go.mod h1:PhcZ0MbTNciWF3rruxRgKxI5NkcHHrHUDtV4Yw2GlzU= -github.com/klauspost/compress v1.17.3 h1:qkRjuerhUU1EmXLYGkSH6EZL+vPSxIrYjLNAK4slzwA= -github.com/klauspost/compress v1.17.3/go.mod h1:/dCuZOvVtNoHsyb+cuJD3itjs3NbnF6KH9zAO4BDxPM= +github.com/klauspost/compress v1.17.9 h1:6KIumPrER1LHsvBVuDa0r5xaG0Es51mhhB9BQB2qeMA= +github.com/klauspost/compress v1.17.9/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw= github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= github.com/klauspost/cpuid/v2 v2.2.6 h1:ndNyv040zDGIDh8thGkXYjnFtiN02M1PVVF+JE/48xc= github.com/klauspost/cpuid/v2 v2.2.6/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws= @@ -504,8 +515,8 @@ github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfr github.com/mgutz/ansi v0.0.0-20170206155736-9520e82c474b/go.mod h1:01TrycV0kFyexm33Z7vhZRXopbI8J3TDReVlkTgMUxE= github.com/mgutz/ansi v0.0.0-20200706080929-d51e80ef957d h1:5PJl274Y63IEHC+7izoQE9x6ikvDFZS2mDVS3drnohI= github.com/mgutz/ansi v0.0.0-20200706080929-d51e80ef957d/go.mod h1:01TrycV0kFyexm33Z7vhZRXopbI8J3TDReVlkTgMUxE= -github.com/miekg/dns v1.1.50 h1:DQUfb9uc6smULcREF09Uc+/Gd46YWqJd5DbpPE9xkcA= -github.com/miekg/dns v1.1.50/go.mod h1:e3IlAVfNqAllflbibAZEWOXOQ+Ynzk/dDozDxY7XnME= +github.com/miekg/dns v1.1.57 h1:Jzi7ApEIzwEPLHWRcafCN9LZSBbqQpxjt/wpgvg7wcM= +github.com/miekg/dns v1.1.57/go.mod h1:uqRjCRUuEAA6qsOiJvDd+CFo/vW+y5WR6SNmHE55hZk= github.com/mitchellh/copystructure v1.0.0/go.mod h1:SNtv71yrdKgLRyLFxmLdkAbkKEFWgYaq1OVrnRcwhnw= github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa15WveJJGw= github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s= @@ -522,6 +533,8 @@ github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RR github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ= github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= +github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0= +github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo= github.com/moby/term v0.5.0 h1:xt8Q1nalod/v7BqbG21f8mQPqH+xAaC9C3N3wfWbVP0= github.com/moby/term v0.5.0/go.mod h1:8FzsFHVUBGZdbDsJw/ot+X+d5HLUbvklYLJ9uGfcI3Y= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= @@ -543,8 +556,8 @@ github.com/oklog/run v1.0.0 h1:Ru7dDtJNOyC66gQ5dQmaCa0qIsAUFY3sFpK1Xk8igrw= github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= -github.com/opencontainers/image-spec v1.0.3-0.20211202183452-c5a74bcca799 h1:rc3tiVYb5z54aKaDfakKn0dDjIyPpTtszkjuMzyt7ec= -github.com/opencontainers/image-spec v1.0.3-0.20211202183452-c5a74bcca799/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= +github.com/opencontainers/image-spec v1.1.0 h1:8SG7/vwALn54lVB/0yZ/MMwhFrPYtpEHQb2IpWsCzug= +github.com/opencontainers/image-spec v1.1.0/go.mod h1:W4s4sFTMaBeK1BQLXbG4AdM2szdn85PY75RI83NrTrM= github.com/oschwald/geoip2-golang v1.9.0 h1:uvD3O6fXAXs+usU+UGExshpdP13GAqp4GBrzN7IgKZc= github.com/oschwald/geoip2-golang v1.9.0/go.mod h1:BHK6TvDyATVQhKNbQBdrj9eAvuwOMi2zSFXizL3K81Y= github.com/oschwald/maxminddb-golang v1.12.0 h1:9FnTOD0YOhP7DGxGsq4glzpGy5+w7pq50AS6wALUMYs= @@ -573,21 +586,21 @@ github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c h1:ncq/mPwQF github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= -github.com/prometheus/client_golang v1.16.0 h1:yk/hx9hDbrGHovbci4BY+pRMfSuuat626eFsHb7tmT8= -github.com/prometheus/client_golang v1.16.0/go.mod h1:Zsulrv/L9oM40tJ7T815tM89lFEugiJ9HzIqaAx4LKc= +github.com/prometheus/client_golang v1.17.0 h1:rl2sfwZMtSthVU752MqfjQozy7blglC+1SOtjMAMh+Q= +github.com/prometheus/client_golang v1.17.0/go.mod h1:VeL+gMmOAxkS2IqfCq0ZmHSL+LjWfWDUmp1mBz9JgUY= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.1.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.4.0 h1:5lQXD3cAg1OXBf4Wq03gTrXHeaV0TQvGfUooCfx1yqY= -github.com/prometheus/client_model v0.4.0/go.mod h1:oMQmHW1/JoDwqLtg57MGgP/Fb1CJEYF2imWWhWtMkYU= +github.com/prometheus/client_model v0.5.0 h1:VQw1hfvPvk3Uv6Qf29VrPF32JB6rtbgI6cYPYQjL0Qw= +github.com/prometheus/client_model v0.5.0/go.mod h1:dTiFglRmd66nLR9Pv9f0mZi7B7fk5Pm3gvsjB5tr+kI= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.7.0/go.mod h1:DjGbpBbp5NYNiECxcL/VnbXCCaQpKd3tt26CguLLsqA= github.com/prometheus/common v0.44.0 h1:+5BrQJwiBB9xsMygAB3TNvpQKOwlkc25LbISbrdOOfY= github.com/prometheus/common v0.44.0/go.mod h1:ofAIvZbQ1e/nugmZGz4/qCb9Ap1VoSTIO7x0VV9VvuY= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= -github.com/prometheus/procfs v0.10.1 h1:kYK1Va/YMlutzCGazswoHKo//tZVlFpKYh+PymziUAg= -github.com/prometheus/procfs v0.10.1/go.mod h1:nwNm2aOCAYw8uTR/9bWRREkZFxAUcWzPHWJq+XBB/FM= +github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc= +github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk= github.com/prometheus/prom2json v1.3.0 h1:BlqrtbT9lLH3ZsOVhXPsHzFrApCTKRifB7gjJuypu6Y= github.com/prometheus/prom2json v1.3.0/go.mod h1:rMN7m0ApCowcoDlypBHlkNbp5eJQf/+1isKykIP5ZnM= github.com/r3labs/diff/v2 v2.14.1 h1:wRZ3jB44Ny50DSXsoIcFQ27l2x+n5P31K/Pk+b9B0Ic= @@ -601,8 +614,8 @@ github.com/rogpeppe/go-internal v1.2.2/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFR github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= github.com/rogpeppe/go-internal v1.8.1/go.mod h1:JeRgkft04UBgHMgCIwADu4Pn6Mtm5d4nPKWu0nJ5d+o= -github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= -github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog= +github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= +github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= github.com/rs/xid v1.2.1/go.mod h1:+uKXf+4Djp6Md1KODXJxgGQPKngRmWyn10oCKFzNHOQ= github.com/rs/zerolog v1.13.0/go.mod h1:YbFCdg8HfsridGWAh22vktObvhZbQsZXe4/zB0OKkWU= github.com/rs/zerolog v1.15.0/go.mod h1:xYTKnLHcpfU2225ny5qZjxnj9NvkumZYjJHlAThCjNc= @@ -637,8 +650,8 @@ github.com/spf13/cast v1.3.1 h1:nFm6S0SMdyzrzcmThSipiEubIDy8WEXKNZ0UOgiRpng= github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= github.com/spf13/cobra v1.4.0/go.mod h1:Wo4iy3BUC+X2Fybo0PDqwJIv3dNRiZLHQymsfxlB84g= -github.com/spf13/cobra v1.8.0 h1:7aJaZx1B85qltLMc546zn58BxxfZdR/W22ej9CFoEf0= -github.com/spf13/cobra v1.8.0/go.mod h1:WXLWApfZ71AjXPya3WOlMsY9yMs7YeiHhFVlvLyhcho= +github.com/spf13/cobra v1.8.1 h1:e5/vxKd/rZsfSJMUX1agtjeTDf+qv1/JdBF8gg5k9ZM= +github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3kD9Y= github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= @@ -729,6 +742,22 @@ go.mongodb.org/mongo-driver v1.4.3/go.mod h1:WcMNYLx/IlOxLe6JRJiv2uXuCz6zBLndR4S go.mongodb.org/mongo-driver v1.4.4/go.mod h1:WcMNYLx/IlOxLe6JRJiv2uXuCz6zBLndR4SoGjYphSc= go.mongodb.org/mongo-driver v1.9.4 h1:qXWlnK2WCOWSxJ/Hm3XyYOGKv3ujA2btBsCyuIFvQjc= go.mongodb.org/mongo-driver v1.9.4/go.mod h1:0sQWfOeY63QTntERDJJ/0SuKK0T1uVSgKCuAROlKEPY= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.46.1 h1:aFJWCqJMNjENlcleuuOkGAPH82y0yULBScfXcIEdS24= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.46.1/go.mod h1:sEGXWArGqc3tVa+ekntsN65DmVbVeW+7lTKTjZF3/Fo= +go.opentelemetry.io/otel v1.28.0 h1:/SqNcYk+idO0CxKEUOtKQClMK/MimZihKYMruSMViUo= +go.opentelemetry.io/otel v1.28.0/go.mod h1:q68ijF8Fc8CnMHKyzqL6akLO46ePnjkgfIMIjUIX9z4= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0 h1:3Q/xZUyC1BBkualc9ROb4G8qkH90LXEIICcs5zv1OYY= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0/go.mod h1:s75jGIWA9OfCMzF0xr+ZgfrB5FEbbV7UuYo32ahUiFI= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.28.0 h1:j9+03ymgYhPKmeXGk5Zu+cIZOlVzd9Zv7QIiyItjFBU= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.28.0/go.mod h1:Y5+XiUG4Emn1hTfciPzGPJaSI+RpDts6BnCIir0SLqk= +go.opentelemetry.io/otel/metric v1.28.0 h1:f0HGvSl1KRAU1DLgLGFjrwVyismPlnuU6JD6bOeuA5Q= +go.opentelemetry.io/otel/metric v1.28.0/go.mod h1:Fb1eVBFZmLVTMb6PPohq3TO9IIhUisDsbJoL/+uQW4s= +go.opentelemetry.io/otel/sdk v1.28.0 h1:b9d7hIry8yZsgtbmM0DKyPWMMUMlK9NEKuIG4aBqWyE= +go.opentelemetry.io/otel/sdk v1.28.0/go.mod h1:oYj7ClPUA7Iw3m+r7GeEjz0qckQRJK2B8zjcZEfu7Pg= +go.opentelemetry.io/otel/trace v1.28.0 h1:GhQ9cUuQGmNDd5BTCP2dAvv75RdMxEfTmYejp+lkx9g= +go.opentelemetry.io/otel/trace v1.28.0/go.mod h1:jPyXzNPg6da9+38HEwElrQiHlVMTnVfM3/yv2OlIHaI= +go.opentelemetry.io/proto/otlp v1.3.1 h1:TrMUixzpM0yuc/znrFTP9MMRh8trP93mkCiDVeXrui0= +go.opentelemetry.io/proto/otlp v1.3.1/go.mod h1:0X1WI4de4ZsLrrJNLAQbFeLCm3T7yBkR0XqQ7niQU+8= go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= @@ -774,8 +803,8 @@ golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= -golang.org/x/mod v0.17.0 h1:zY54UmvipHiNd+pm+m0x9KhZ9hl1/7QNMyxXbc6ICqA= -golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/mod v0.20.0 h1:utOm6MM3R3dnawAiJgn0y+xvuYRsm1RKM/4giyfDgV0= +golang.org/x/mod v0.20.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/net v0.0.0-20181005035420-146acd28ed58/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= @@ -872,8 +901,8 @@ golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/text v0.17.0 h1:XtiM5bkSOt+ewxlOE/aE/AKEHibwj/6gvWMl9Rsh0Qc= golang.org/x/text v0.17.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= -golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4= -golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.6.0 h1:eTDhh4ZXt5Qf0augr54TN6suAUudPcawVZeIAPU7D4U= +golang.org/x/time v0.6.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190125232054-d66bd3c5d5a6/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -902,12 +931,16 @@ golang.org/x/xerrors v0.0.0-20190513163551-3ee3066db522/go.mod h1:I/5z698sn9Ka8T golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 h1:H2TDz8ibqkAF6YGhCdN3jS9O0/s90v0rJh3X/OLHEUk= +golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c= -google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.8 h1:IhEN5q69dyKagZPYMSdIjS2HqprW324FRQZJcGqPAsM= +google.golang.org/appengine v1.6.8/go.mod h1:1jJ3jBArFh5pcgW8gCtRJnepW8FzD1V44FJffLiz/Ds= +google.golang.org/genproto v0.0.0-20230526161137-0005af68ea54 h1:9NWlQfY2ePejTmfwUH1OWwmznFa+0kKcHGPDvcPza9M= +google.golang.org/genproto/googleapis/api v0.0.0-20240814211410-ddb44dafa142 h1:wKguEg1hsxI2/L3hUYrpo1RVi48K+uTyzKqprwLXsb8= +google.golang.org/genproto/googleapis/api v0.0.0-20240814211410-ddb44dafa142/go.mod h1:d6be+8HhtEtucleCbxpPW9PA9XwISACu8nvpPqF0BVo= google.golang.org/genproto/googleapis/rpc v0.0.0-20240814211410-ddb44dafa142 h1:e7S5W7MGGLaSu8j3YjdezkZ+m1/Nm0uRVRMEMGk26Xs= google.golang.org/genproto/googleapis/rpc v0.0.0-20240814211410-ddb44dafa142/go.mod h1:UqMtugtsSgubUsoxbuAoiCXvqvErP7Gf0so0mK9tHxU= google.golang.org/grpc v1.67.1 h1:zWnc1Vrcno+lHZCOofnIMvycFcc0QRGIzm9dhnDX68E= @@ -945,8 +978,8 @@ gopkg.in/yaml.v3 v3.0.0-20200605160147-a5ece683394c/go.mod h1:K4uyk7z7BCEPqu6E+C gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gotest.tools/v3 v3.5.0 h1:Ljk6PdHdOhAb5aDMWXjDLMMhph+BpztA4v1QdqEW2eY= -gotest.tools/v3 v3.5.0/go.mod h1:isy3WKz7GK6uNw/sbHzfKBLvlvXwUyV06n6brMxxopU= +gotest.tools/v3 v3.5.1 h1:EENdUnS3pdur5nybKYIh2Vfgc8IUNBjxDPSjtiJcOzU= +gotest.tools/v3 v3.5.1/go.mod h1:isy3WKz7GK6uNw/sbHzfKBLvlvXwUyV06n6brMxxopU= honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= k8s.io/api v0.28.4 h1:8ZBrLjwosLl/NYgv1P7EQLqoO8MGQApnbgH8tu3BMzY= k8s.io/api v0.28.4/go.mod h1:axWTGrY88s/5YE+JSt4uUi6NMM+gur1en2REMR7IRj0= diff --git a/pkg/acquisition/modules/docker/docker.go b/pkg/acquisition/modules/docker/docker.go index b27255ec13f..798eba29440 100644 --- a/pkg/acquisition/modules/docker/docker.go +++ b/pkg/acquisition/modules/docker/docker.go @@ -12,6 +12,7 @@ import ( "time" dockerTypes "github.com/docker/docker/api/types" + dockerContainer "github.com/docker/docker/api/types/container" "github.com/docker/docker/client" "github.com/prometheus/client_golang/prometheus" log "github.com/sirupsen/logrus" @@ -56,7 +57,7 @@ type DockerSource struct { logger *log.Entry Client client.CommonAPIClient t *tomb.Tomb - containerLogsOptions *dockerTypes.ContainerLogsOptions + containerLogsOptions *dockerContainer.LogsOptions } type ContainerConfig struct { @@ -104,6 +105,7 @@ func (d *DockerSource) UnmarshalConfig(yamlConfig []byte) error { if d.Config.Mode == "" { d.Config.Mode = configuration.TAIL_MODE } + if d.Config.Mode != configuration.CAT_MODE && d.Config.Mode != configuration.TAIL_MODE { return fmt.Errorf("unsupported mode %s for docker datasource", d.Config.Mode) } @@ -120,7 +122,7 @@ func (d *DockerSource) UnmarshalConfig(yamlConfig []byte) error { d.Config.Since = time.Now().UTC().Format(time.RFC3339) } - d.containerLogsOptions = &dockerTypes.ContainerLogsOptions{ + d.containerLogsOptions = &dockerContainer.LogsOptions{ ShowStdout: d.Config.FollowStdout, ShowStderr: d.Config.FollowStdErr, Follow: true, @@ -137,6 +139,7 @@ func (d *DockerSource) UnmarshalConfig(yamlConfig []byte) error { func (d *DockerSource) Configure(yamlConfig []byte, logger *log.Entry, MetricsLevel int) error { d.logger = logger d.metricsLevel = MetricsLevel + err := d.UnmarshalConfig(yamlConfig) if err != nil { return err @@ -146,18 +149,19 @@ func (d *DockerSource) Configure(yamlConfig []byte, logger *log.Entry, MetricsLe d.logger.Tracef("Actual DockerAcquisition configuration %+v", d.Config) - dockerClient, err := client.NewClientWithOpts(client.FromEnv, client.WithAPIVersionNegotiation()) - if err != nil { - return err + opts := []client.Opt{ + client.FromEnv, + client.WithAPIVersionNegotiation(), } if d.Config.DockerHost != "" { - err = client.WithHost(d.Config.DockerHost)(dockerClient) - if err != nil { - return err - } + opts = append(opts, client.WithHost(d.Config.DockerHost)) + } + + d.Client, err = client.NewClientWithOpts(opts...) + if err != nil { + return err } - d.Client = dockerClient _, err = d.Client.Info(context.Background()) if err != nil { @@ -170,7 +174,12 @@ func (d *DockerSource) Configure(yamlConfig []byte, logger *log.Entry, MetricsLe func (d *DockerSource) ConfigureByDSN(dsn string, labels map[string]string, logger *log.Entry, uuid string) error { var err error - if !strings.HasPrefix(dsn, d.GetName()+"://") { + parsedURL, err := url.Parse(dsn) + if err != nil { + return fmt.Errorf("failed to parse DSN %s: %w", dsn, err) + } + + if parsedURL.Scheme != d.GetName() { return fmt.Errorf("invalid DSN %s for docker source, must start with %s://", dsn, d.GetName()) } @@ -187,40 +196,28 @@ func (d *DockerSource) ConfigureByDSN(dsn string, labels map[string]string, logg d.logger = logger d.Config.Labels = labels - dockerClient, err := client.NewClientWithOpts(client.FromEnv, client.WithAPIVersionNegotiation()) - if err != nil { - return err + opts := []client.Opt{ + client.FromEnv, + client.WithAPIVersionNegotiation(), } - d.containerLogsOptions = &dockerTypes.ContainerLogsOptions{ + d.containerLogsOptions = &dockerContainer.LogsOptions{ ShowStdout: d.Config.FollowStdout, ShowStderr: d.Config.FollowStdErr, Follow: false, } - dsn = strings.TrimPrefix(dsn, d.GetName()+"://") - args := strings.Split(dsn, "?") - if len(args) == 0 { - return fmt.Errorf("invalid dsn: %s", dsn) - } + containerNameOrID := parsedURL.Host - if len(args) == 1 && args[0] == "" { + if containerNameOrID == "" { return fmt.Errorf("empty %s DSN", d.GetName()+"://") } - d.Config.ContainerName = append(d.Config.ContainerName, args[0]) + + d.Config.ContainerName = append(d.Config.ContainerName, containerNameOrID) // we add it as an ID also so user can provide docker name or docker ID - d.Config.ContainerID = append(d.Config.ContainerID, args[0]) + d.Config.ContainerID = append(d.Config.ContainerID, containerNameOrID) - // no parameters - if len(args) == 1 { - d.Client = dockerClient - return nil - } - - parameters, err := url.ParseQuery(args[1]) - if err != nil { - return fmt.Errorf("while parsing parameters %s: %w", dsn, err) - } + parameters := parsedURL.Query() for k, v := range parameters { switch k { @@ -267,12 +264,15 @@ func (d *DockerSource) ConfigureByDSN(dsn string, labels map[string]string, logg if len(v) != 1 { return errors.New("only one 'docker_host' parameters is required, not many") } - if err := client.WithHost(v[0])(dockerClient); err != nil { - return err - } + opts = append(opts, client.WithHost(v[0])) } } - d.Client = dockerClient + + d.Client, err = client.NewClientWithOpts(opts...) + if err != nil { + return err + } + return nil } @@ -288,33 +288,42 @@ func (d *DockerSource) SupportedModes() []string { // OneShotAcquisition reads a set of file and returns when done func (d *DockerSource) OneShotAcquisition(ctx context.Context, out chan types.Event, t *tomb.Tomb) error { d.logger.Debug("In oneshot") - runningContainer, err := d.Client.ContainerList(ctx, dockerTypes.ContainerListOptions{}) + + runningContainers, err := d.Client.ContainerList(ctx, dockerContainer.ListOptions{}) if err != nil { return err } + foundOne := false - for _, container := range runningContainer { + + for _, container := range runningContainers { if _, ok := d.runningContainerState[container.ID]; ok { d.logger.Debugf("container with id %s is already being read from", container.ID) continue } + if containerConfig := d.EvalContainer(ctx, container); containerConfig != nil { d.logger.Infof("reading logs from container %s", containerConfig.Name) d.logger.Debugf("logs options: %+v", *d.containerLogsOptions) + dockerReader, err := d.Client.ContainerLogs(ctx, containerConfig.ID, *d.containerLogsOptions) if err != nil { d.logger.Errorf("unable to read logs from container: %+v", err) return err } + // we use this library to normalize docker API logs (cf. https://ahmet.im/blog/docker-logs-api-binary-format-explained/) foundOne = true + var scanner *bufio.Scanner + if containerConfig.Tty { scanner = bufio.NewScanner(dockerReader) } else { reader := dlog.NewReader(dockerReader) scanner = bufio.NewScanner(reader) } + for scanner.Scan() { select { case <-t.Dying(): @@ -324,6 +333,7 @@ func (d *DockerSource) OneShotAcquisition(ctx context.Context, out chan types.Ev if line == "" { continue } + l := types.Line{} l.Raw = line l.Labels = d.Config.Labels @@ -331,9 +341,11 @@ func (d *DockerSource) OneShotAcquisition(ctx context.Context, out chan types.Ev l.Src = containerConfig.Name l.Process = true l.Module = d.GetName() + if d.metricsLevel != configuration.METRICS_NONE { linesRead.With(prometheus.Labels{"source": containerConfig.Name}).Inc() } + evt := types.MakeEvent(true, types.LOG, true) evt.Line = l evt.Process = true @@ -342,10 +354,12 @@ func (d *DockerSource) OneShotAcquisition(ctx context.Context, out chan types.Ev d.logger.Debugf("Sent line to parsing: %+v", evt.Line.Raw) } } + err = scanner.Err() if err != nil { d.logger.Errorf("Got error from docker read: %s", err) } + d.runningContainerState[container.ID] = containerConfig } } @@ -380,6 +394,7 @@ func (d *DockerSource) getContainerTTY(ctx context.Context, containerId string) if err != nil { return false } + return containerDetails.Config.Tty } @@ -388,6 +403,7 @@ func (d *DockerSource) getContainerLabels(ctx context.Context, containerId strin if err != nil { return map[string]interface{}{} } + return parseLabels(containerDetails.Config.Labels) } @@ -403,6 +419,7 @@ func (d *DockerSource) EvalContainer(ctx context.Context, container dockerTypes. if strings.HasPrefix(name, "/") && name != "" { name = name[1:] } + if name == containerName { return &ContainerConfig{ID: container.ID, Name: name, Labels: d.Config.Labels, Tty: d.getContainerTTY(ctx, container.ID)} } @@ -429,38 +446,49 @@ func (d *DockerSource) EvalContainer(ctx context.Context, container dockerTypes. d.logger.Tracef("container has no 'crowdsec' labels set, ignoring container: %s", container.ID) return nil } + if _, ok := parsedLabels["enable"]; !ok { d.logger.Errorf("container has 'crowdsec' labels set but no 'crowdsec.enable' key found") return nil } + enable, ok := parsedLabels["enable"].(string) if !ok { d.logger.Error("container has 'crowdsec.enable' label set but it's not a string") return nil } + if strings.ToLower(enable) != "true" { d.logger.Debugf("container has 'crowdsec.enable' label not set to true ignoring container: %s", container.ID) return nil } + if _, ok = parsedLabels["labels"]; !ok { d.logger.Error("container has 'crowdsec.enable' label set to true but no 'labels' keys found") return nil } + labelsTypeCast, ok := parsedLabels["labels"].(map[string]interface{}) if !ok { d.logger.Error("container has 'crowdsec.enable' label set to true but 'labels' is not a map") return nil } + d.logger.Debugf("container labels %+v", labelsTypeCast) + labels := make(map[string]string) + for k, v := range labelsTypeCast { if v, ok := v.(string); ok { log.Debugf("label %s is a string with value %s", k, v) labels[k] = v + continue } + d.logger.Errorf("label %s is not a string", k) } + return &ContainerConfig{ID: container.ID, Name: container.Names[0], Labels: labels, Tty: d.getContainerTTY(ctx, container.ID)} } @@ -470,6 +498,7 @@ func (d *DockerSource) EvalContainer(ctx context.Context, container dockerTypes. func (d *DockerSource) WatchContainer(ctx context.Context, monitChan chan *ContainerConfig, deleteChan chan *ContainerConfig) error { ticker := time.NewTicker(d.CheckIntervalDuration) d.logger.Infof("Container watcher started, interval: %s", d.CheckIntervalDuration.String()) + for { select { case <-d.t.Dying(): @@ -478,32 +507,37 @@ func (d *DockerSource) WatchContainer(ctx context.Context, monitChan chan *Conta case <-ticker.C: // to track for garbage collection runningContainersID := make(map[string]bool) - runningContainer, err := d.Client.ContainerList(ctx, dockerTypes.ContainerListOptions{}) + + runningContainers, err := d.Client.ContainerList(ctx, dockerContainer.ListOptions{}) if err != nil { if strings.Contains(strings.ToLower(err.Error()), "cannot connect to the docker daemon at") { for idx, container := range d.runningContainerState { if d.runningContainerState[idx].t.Alive() { d.logger.Infof("killing tail for container %s", container.Name) d.runningContainerState[idx].t.Kill(nil) + if err := d.runningContainerState[idx].t.Wait(); err != nil { d.logger.Infof("error while waiting for death of %s : %s", container.Name, err) } } + delete(d.runningContainerState, idx) } } else { log.Errorf("container list err: %s", err) } + continue } - for _, container := range runningContainer { + for _, container := range runningContainers { runningContainersID[container.ID] = true // don't need to re eval an already monitored container if _, ok := d.runningContainerState[container.ID]; ok { continue } + if containerConfig := d.EvalContainer(ctx, container); containerConfig != nil { monitChan <- containerConfig } @@ -514,6 +548,7 @@ func (d *DockerSource) WatchContainer(ctx context.Context, monitChan chan *Conta deleteChan <- containerConfig } } + d.logger.Tracef("Reading logs from %d containers", len(d.runningContainerState)) ticker.Reset(d.CheckIntervalDuration) @@ -525,7 +560,9 @@ func (d *DockerSource) StreamingAcquisition(ctx context.Context, out chan types. d.t = t monitChan := make(chan *ContainerConfig) deleteChan := make(chan *ContainerConfig) + d.logger.Infof("Starting docker acquisition") + t.Go(func() error { return d.DockerManager(ctx, monitChan, deleteChan, out) }) @@ -546,6 +583,7 @@ func ReadTailScanner(scanner *bufio.Scanner, out chan string, t *tomb.Tomb) erro func (d *DockerSource) TailDocker(ctx context.Context, container *ContainerConfig, outChan chan types.Event, deleteChan chan *ContainerConfig) error { container.logger.Infof("start tail for container %s", container.Name) + dockerReader, err := d.Client.ContainerLogs(ctx, container.ID, *d.containerLogsOptions) if err != nil { container.logger.Errorf("unable to read logs from container: %+v", err) @@ -560,11 +598,13 @@ func (d *DockerSource) TailDocker(ctx context.Context, container *ContainerConfi reader := dlog.NewReader(dockerReader) scanner = bufio.NewScanner(reader) } + readerChan := make(chan string) readerTomb := &tomb.Tomb{} readerTomb.Go(func() error { return ReadTailScanner(scanner, readerChan, readerTomb) }) + for { select { case <-container.t.Dying(): @@ -595,6 +635,7 @@ func (d *DockerSource) TailDocker(ctx context.Context, container *ContainerConfi // Also reset the Since to avoid re-reading logs d.Config.Since = time.Now().UTC().Format(time.RFC3339) d.containerLogsOptions.Since = d.Config.Since + return nil } } @@ -602,6 +643,7 @@ func (d *DockerSource) TailDocker(ctx context.Context, container *ContainerConfi func (d *DockerSource) DockerManager(ctx context.Context, in chan *ContainerConfig, deleteChan chan *ContainerConfig, outChan chan types.Event) error { d.logger.Info("DockerSource Manager started") + for { select { case newContainer := <-in: @@ -611,6 +653,7 @@ func (d *DockerSource) DockerManager(ctx context.Context, in chan *ContainerConf newContainer.t.Go(func() error { return d.TailDocker(ctx, newContainer, outChan, deleteChan) }) + d.runningContainerState[newContainer.ID] = newContainer } case containerToDelete := <-deleteChan: @@ -624,13 +667,16 @@ func (d *DockerSource) DockerManager(ctx context.Context, in chan *ContainerConf if d.runningContainerState[idx].t.Alive() { d.logger.Infof("killing tail for container %s", container.Name) d.runningContainerState[idx].t.Kill(nil) + if err := d.runningContainerState[idx].t.Wait(); err != nil { d.logger.Infof("error while waiting for death of %s : %s", container.Name, err) } } } + d.runningContainerState = nil d.logger.Debugf("routine cleanup done, return") + return nil } } diff --git a/pkg/acquisition/modules/docker/docker_test.go b/pkg/acquisition/modules/docker/docker_test.go index 5d8208637e8..73e26b1e497 100644 --- a/pkg/acquisition/modules/docker/docker_test.go +++ b/pkg/acquisition/modules/docker/docker_test.go @@ -82,6 +82,11 @@ func TestConfigureDSN(t *testing.T) { }{ { name: "invalid DSN", + dsn: "asdfasdf", + expectedErr: "invalid DSN asdfasdf for docker source, must start with docker://", + }, + { + name: "invalid DSN scheme", dsn: "asd://", expectedErr: "invalid DSN asd:// for docker source, must start with docker://", }, @@ -102,16 +107,18 @@ func TestConfigureDSN(t *testing.T) { }, { name: "DSN ok with multiple parameters", - dsn: fmt.Sprintf("docker://test_docker?since=42min&docker_host=%s", dockerHost), + dsn: "docker://test_docker?since=42min&docker_host=" + dockerHost, expectedErr: "", }, } subLogger := log.WithField("type", "docker") for _, test := range tests { - f := DockerSource{} - err := f.ConfigureByDSN(test.dsn, map[string]string{"type": "testtype"}, subLogger, "") - cstest.AssertErrorContains(t, err, test.expectedErr) + t.Run(test.name, func(t *testing.T) { + f := DockerSource{} + err := f.ConfigureByDSN(test.dsn, map[string]string{"type": "testtype"}, subLogger, "") + cstest.AssertErrorContains(t, err, test.expectedErr) + }) } } @@ -121,6 +128,7 @@ type mockDockerCli struct { func TestStreamingAcquisition(t *testing.T) { ctx := context.Background() + log.SetOutput(os.Stdout) log.SetLevel(log.InfoLevel) log.Info("Test 'TestStreamingAcquisition'") @@ -191,6 +199,7 @@ container_name_regexp: readerTomb.Go(func() error { time.Sleep(1 * time.Second) ticker := time.NewTicker(1 * time.Second) + for { select { case <-out: @@ -205,7 +214,7 @@ container_name_regexp: }) cstest.AssertErrorContains(t, err, ts.expectedErr) - if err := readerTomb.Wait(); err != nil { + if err = readerTomb.Wait(); err != nil { t.Fatal(err) } @@ -220,7 +229,7 @@ container_name_regexp: } } -func (cli *mockDockerCli) ContainerList(ctx context.Context, options dockerTypes.ContainerListOptions) ([]dockerTypes.Container, error) { +func (cli *mockDockerCli) ContainerList(ctx context.Context, options dockerContainer.ListOptions) ([]dockerTypes.Container, error) { if readLogs { return []dockerTypes.Container{}, nil } @@ -235,7 +244,7 @@ func (cli *mockDockerCli) ContainerList(ctx context.Context, options dockerTypes return containers, nil } -func (cli *mockDockerCli) ContainerLogs(ctx context.Context, container string, options dockerTypes.ContainerLogsOptions) (io.ReadCloser, error) { +func (cli *mockDockerCli) ContainerLogs(ctx context.Context, container string, options dockerContainer.LogsOptions) (io.ReadCloser, error) { if readLogs { return io.NopCloser(strings.NewReader("")), nil } @@ -298,38 +307,40 @@ func TestOneShot(t *testing.T) { } for _, ts := range tests { - var ( - subLogger *log.Entry - logger *log.Logger - ) - - if ts.expectedOutput != "" { - logger.SetLevel(ts.logLevel) - subLogger = logger.WithField("type", "docker") - } else { - log.SetLevel(ts.logLevel) - subLogger = log.WithField("type", "docker") - } + t.Run(ts.dsn, func(t *testing.T) { + var ( + subLogger *log.Entry + logger *log.Logger + ) + + if ts.expectedOutput != "" { + logger.SetLevel(ts.logLevel) + subLogger = logger.WithField("type", "docker") + } else { + log.SetLevel(ts.logLevel) + subLogger = log.WithField("type", "docker") + } - readLogs = false - dockerClient := &DockerSource{} - labels := make(map[string]string) - labels["type"] = ts.logType + readLogs = false + dockerClient := &DockerSource{} + labels := make(map[string]string) + labels["type"] = ts.logType - if err := dockerClient.ConfigureByDSN(ts.dsn, labels, subLogger, ""); err != nil { - t.Fatalf("unable to configure dsn '%s': %s", ts.dsn, err) - } + if err := dockerClient.ConfigureByDSN(ts.dsn, labels, subLogger, ""); err != nil { + t.Fatalf("unable to configure dsn '%s': %s", ts.dsn, err) + } - dockerClient.Client = new(mockDockerCli) - out := make(chan types.Event, 100) - tomb := tomb.Tomb{} - err := dockerClient.OneShotAcquisition(ctx, out, &tomb) - cstest.AssertErrorContains(t, err, ts.expectedErr) + dockerClient.Client = new(mockDockerCli) + out := make(chan types.Event, 100) + tomb := tomb.Tomb{} + err := dockerClient.OneShotAcquisition(ctx, out, &tomb) + cstest.AssertErrorContains(t, err, ts.expectedErr) - // else we do the check before actualLines is incremented ... - if ts.expectedLines != 0 { - assert.Len(t, out, ts.expectedLines) - } + // else we do the check before actualLines is incremented ... + if ts.expectedLines != 0 { + assert.Len(t, out, ts.expectedLines) + } + }) } } diff --git a/pkg/metabase/container.go b/pkg/metabase/container.go index 8b3dd4084c0..73e4596fcde 100644 --- a/pkg/metabase/container.go +++ b/pkg/metabase/container.go @@ -5,8 +5,8 @@ import ( "context" "fmt" - "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/container" + typesImage "github.com/docker/docker/api/types/image" "github.com/docker/docker/api/types/mount" "github.com/docker/docker/client" "github.com/docker/go-connections/nat" @@ -47,7 +47,7 @@ func NewContainer(listenAddr string, listenPort string, sharedFolder string, con func (c *Container) Create() error { ctx := context.Background() log.Printf("Pulling docker image %s", c.Image) - reader, err := c.CLI.ImagePull(ctx, c.Image, types.ImagePullOptions{}) + reader, err := c.CLI.ImagePull(ctx, c.Image, typesImage.PullOptions{}) if err != nil { return fmt.Errorf("failed to pull docker image : %s", err) } @@ -105,7 +105,7 @@ func (c *Container) Create() error { func (c *Container) Start() error { ctx := context.Background() - if err := c.CLI.ContainerStart(ctx, c.Name, types.ContainerStartOptions{}); err != nil { + if err := c.CLI.ContainerStart(ctx, c.Name, container.StartOptions{}); err != nil { return fmt.Errorf("failed while starting %s : %s", c.ID, err) } @@ -118,7 +118,7 @@ func StartContainer(name string) error { return fmt.Errorf("failed to create docker client : %s", err) } ctx := context.Background() - if err := cli.ContainerStart(ctx, name, types.ContainerStartOptions{}); err != nil { + if err := cli.ContainerStart(ctx, name, container.StartOptions{}); err != nil { return fmt.Errorf("failed while starting %s : %s", name, err) } @@ -146,7 +146,7 @@ func RemoveContainer(name string) error { } ctx := context.Background() log.Printf("Removing docker metabase %s", name) - if err := cli.ContainerRemove(ctx, name, types.ContainerRemoveOptions{}); err != nil { + if err := cli.ContainerRemove(ctx, name, container.RemoveOptions{}); err != nil { return fmt.Errorf("failed to remove container %s : %s", name, err) } return nil @@ -159,7 +159,7 @@ func RemoveImageContainer(image string) error { } ctx := context.Background() log.Printf("Removing docker image '%s'", image) - if _, err := cli.ImageRemove(ctx, image, types.ImageRemoveOptions{}); err != nil { + if _, err := cli.ImageRemove(ctx, image, typesImage.RemoveOptions{}); err != nil { return fmt.Errorf("failed to remove image container %s : %s", image, err) } return nil diff --git a/test/localstack/docker-compose.yml b/test/localstack/docker-compose.yml index f58f3c7f263..9f0a690353b 100644 --- a/test/localstack/docker-compose.yml +++ b/test/localstack/docker-compose.yml @@ -15,7 +15,6 @@ services: AWS_HOST: localstack DEBUG: "" KINESYS_ERROR_PROBABILITY: "" - DOCKER_HOST: "unix://var/run/docker.sock" LOCALSTACK_HOST: "localstack" AWS_REGION: "us-east-1" From 2c95a24f69e842ca34f85f6538909c8827d3d082 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Wed, 18 Dec 2024 16:40:40 +0100 Subject: [PATCH 368/581] loop performance optimizations / 2 (#3364) --- pkg/alertcontext/alertcontext.go | 24 +++++++++++++----------- pkg/dumps/parser_dump.go | 20 ++++++++++---------- pkg/leakybucket/manager_load.go | 26 +++++++++++++------------- 3 files changed, 36 insertions(+), 34 deletions(-) diff --git a/pkg/alertcontext/alertcontext.go b/pkg/alertcontext/alertcontext.go index 1b7d1e20018..0afcb2abd3f 100644 --- a/pkg/alertcontext/alertcontext.go +++ b/pkg/alertcontext/alertcontext.go @@ -16,9 +16,7 @@ import ( "github.com/crowdsecurity/crowdsec/pkg/types" ) -const ( - MaxContextValueLen = 4000 -) +const MaxContextValueLen = 4000 var alertContext = Context{} @@ -34,7 +32,8 @@ func ValidateContextExpr(key string, expressions []string) error { _, err := expr.Compile(expression, exprhelpers.GetExprOptions(map[string]interface{}{ "evt": &types.Event{}, "match": &types.MatchedRule{}, - "req": &http.Request{}})...) + "req": &http.Request{}, + })...) if err != nil { return fmt.Errorf("compilation of '%s' failed: %w", expression, err) } @@ -79,7 +78,8 @@ func NewAlertContext(contextToSend map[string][]string, valueLength int) error { valueCompiled, err := expr.Compile(value, exprhelpers.GetExprOptions(map[string]interface{}{ "evt": &types.Event{}, "match": &types.MatchedRule{}, - "req": &http.Request{}})...) + "req": &http.Request{}, + })...) if err != nil { return fmt.Errorf("compilation of '%s' context value failed: %w", value, err) } @@ -114,6 +114,7 @@ func TruncateContextMap(contextMap map[string][]string, contextValueLen int) ([] } metas = append(metas, &meta) } + return metas, errors } @@ -150,20 +151,19 @@ func TruncateContext(values []string, contextValueLen int) (string, error) { } func EvalAlertContextRules(evt types.Event, match *types.MatchedRule, request *http.Request, tmpContext map[string][]string) []error { - var errors []error - //if we're evaluating context for appsec event, match and request will be present. - //otherwise, only evt will be. + // if we're evaluating context for appsec event, match and request will be present. + // otherwise, only evt will be. if match == nil { match = types.NewMatchedRule() } + if request == nil { request = &http.Request{} } for key, values := range alertContext.ContextToSendCompiled { - if _, ok := tmpContext[key]; !ok { tmpContext[key] = make([]string, 0) } @@ -176,6 +176,7 @@ func EvalAlertContextRules(evt types.Event, match *types.MatchedRule, request *h errors = append(errors, fmt.Errorf("failed to get value for %s: %w", key, err)) continue } + switch out := output.(type) { case string: val = out @@ -208,6 +209,7 @@ func EvalAlertContextRules(evt types.Event, match *types.MatchedRule, request *h } } } + return errors } @@ -237,8 +239,8 @@ func EventToContext(events []types.Event) (models.Meta, []error) { tmpContext := make(map[string][]string) - for _, evt := range events { - tmpErrors := EvalAlertContextRules(evt, nil, nil, tmpContext) + for i := range events { + tmpErrors := EvalAlertContextRules(events[i], nil, nil, tmpContext) errors = append(errors, tmpErrors...) } diff --git a/pkg/dumps/parser_dump.go b/pkg/dumps/parser_dump.go index bc8f78dc203..bd385bec194 100644 --- a/pkg/dumps/parser_dump.go +++ b/pkg/dumps/parser_dump.go @@ -145,25 +145,25 @@ func (t *tree) processEvents(parserResults ParserResults) { } func (t *tree) processBuckets(bucketPour BucketPourInfo) { - for bname, evtlist := range bucketPour { - for _, evt := range evtlist { - if evt.Line.Raw == "" { + for bname, events := range bucketPour { + for i := range events { + if events[i].Line.Raw == "" { continue } // it might be bucket overflow being reprocessed, skip this - if _, ok := t.state[evt.Line.Time]; !ok { - t.state[evt.Line.Time] = make(map[string]map[string]ParserResult) - t.assoc[evt.Line.Time] = evt.Line.Raw + if _, ok := t.state[events[i].Line.Time]; !ok { + t.state[events[i].Line.Time] = make(map[string]map[string]ParserResult) + t.assoc[events[i].Line.Time] = events[i].Line.Raw } - // there is a trick : to know if an event successfully exit the parsers, we check if it reached the pour() phase + // there is a trick: to know if an event successfully exit the parsers, we check if it reached the pour() phase // we thus use a fake stage "buckets" and a fake parser "OK" to know if it entered - if _, ok := t.state[evt.Line.Time]["buckets"]; !ok { - t.state[evt.Line.Time]["buckets"] = make(map[string]ParserResult) + if _, ok := t.state[events[i].Line.Time]["buckets"]; !ok { + t.state[events[i].Line.Time]["buckets"] = make(map[string]ParserResult) } - t.state[evt.Line.Time]["buckets"][bname] = ParserResult{Success: true} + t.state[events[i].Line.Time]["buckets"][bname] = ParserResult{Success: true} } } } diff --git a/pkg/leakybucket/manager_load.go b/pkg/leakybucket/manager_load.go index b8310b8cb17..bc907ac257b 100644 --- a/pkg/leakybucket/manager_load.go +++ b/pkg/leakybucket/manager_load.go @@ -348,7 +348,7 @@ func LoadBucket(bucketFactory *BucketFactory, tomb *tomb.Tomb) error { if bucketFactory.Debug { clog := log.New() - if err := types.ConfigureLogger(clog); err != nil { + if err = types.ConfigureLogger(clog); err != nil { return fmt.Errorf("while creating bucket-specific logger: %w", err) } @@ -496,7 +496,7 @@ func LoadBucketsState(file string, buckets *Buckets, bucketFactories []BucketFac return fmt.Errorf("can't parse state file %s: %w", file, err) } - for k, v := range state { + for k := range state { var tbucket *Leaky log.Debugf("Reloading bucket %s", k) @@ -509,30 +509,30 @@ func LoadBucketsState(file string, buckets *Buckets, bucketFactories []BucketFac found := false for _, h := range bucketFactories { - if h.Name != v.Name { + if h.Name != state[k].Name { continue } log.Debugf("found factory %s/%s -> %s", h.Author, h.Name, h.Description) // check in which mode the bucket was - if v.Mode == types.TIMEMACHINE { + if state[k].Mode == types.TIMEMACHINE { tbucket = NewTimeMachine(h) - } else if v.Mode == types.LIVE { + } else if state[k].Mode == types.LIVE { tbucket = NewLeaky(h) } else { - log.Errorf("Unknown bucket type : %d", v.Mode) + log.Errorf("Unknown bucket type : %d", state[k].Mode) } /*Trying to restore queue state*/ - tbucket.Queue = v.Queue + tbucket.Queue = state[k].Queue /*Trying to set the limiter to the saved values*/ - tbucket.Limiter.Load(v.SerializedState) + tbucket.Limiter.Load(state[k].SerializedState) tbucket.In = make(chan *types.Event) tbucket.Mapkey = k tbucket.Signal = make(chan bool, 1) - tbucket.First_ts = v.First_ts - tbucket.Last_ts = v.Last_ts - tbucket.Ovflw_ts = v.Ovflw_ts - tbucket.Total_count = v.Total_count + tbucket.First_ts = state[k].First_ts + tbucket.Last_ts = state[k].Last_ts + tbucket.Ovflw_ts = state[k].Ovflw_ts + tbucket.Total_count = state[k].Total_count buckets.Bucket_map.Store(k, tbucket) h.tomb.Go(func() error { return LeakRoutine(tbucket) @@ -545,7 +545,7 @@ func LoadBucketsState(file string, buckets *Buckets, bucketFactories []BucketFac } if !found { - return fmt.Errorf("unable to find holder for bucket %s: %s", k, spew.Sdump(v)) + return fmt.Errorf("unable to find holder for bucket %s: %s", k, spew.Sdump(state[k])) } } From ecf34c2fa12e54798c7ef08ee5280332a82f1804 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Wed, 18 Dec 2024 16:43:19 +0100 Subject: [PATCH 369/581] lint/deep-exit: avoid log.Fatal (#3367) * lint/deep-exit: don't fail on invalid alert * lint/deep-exit: kinesis_test.go * lint/deep-exit: watcher_test.go * lint/deep-exit: parsing_test.go * lint/deep-exit: client_test.go --- .golangci.yml | 11 --- .../modules/kinesis/kinesis_test.go | 89 ++++++++++--------- pkg/apiclient/client_test.go | 12 ++- pkg/csplugin/watcher_test.go | 13 ++- pkg/leakybucket/bucket.go | 2 +- pkg/leakybucket/overflows.go | 5 +- pkg/parser/parsing_test.go | 79 ++++++---------- pkg/setup/detect_test.go | 2 +- 8 files changed, 87 insertions(+), 126 deletions(-) diff --git a/.golangci.yml b/.golangci.yml index 097cc86d20c..d0fdd3b37f4 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -402,12 +402,6 @@ issues: path: "pkg/(.+)_test.go" text: "line-length-limit: .*" - # tolerate deep exit in tests, for now - - linters: - - revive - path: "pkg/(.+)_test.go" - text: "deep-exit: .*" - # we use t,ctx instead of ctx,t in tests - linters: - revive @@ -420,11 +414,6 @@ issues: path: "cmd/crowdsec-cli/main.go" text: "deep-exit: .*" - - linters: - - revive - path: "pkg/leakybucket/overflows.go" - text: "deep-exit: .*" - - linters: - revive path: "cmd/crowdsec/crowdsec.go" diff --git a/pkg/acquisition/modules/kinesis/kinesis_test.go b/pkg/acquisition/modules/kinesis/kinesis_test.go index 027cbde9240..778dda4a681 100644 --- a/pkg/acquisition/modules/kinesis/kinesis_test.go +++ b/pkg/acquisition/modules/kinesis/kinesis_test.go @@ -9,6 +9,7 @@ import ( "net" "os" "runtime" + "strconv" "strings" "testing" "time" @@ -18,6 +19,7 @@ import ( "github.com/aws/aws-sdk-go/service/kinesis" log "github.com/sirupsen/logrus" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" "gopkg.in/tomb.v2" "github.com/crowdsecurity/go-cs-lib/cstest" @@ -28,17 +30,20 @@ import ( func getLocalStackEndpoint() (string, error) { endpoint := "http://localhost:4566" + if v := os.Getenv("AWS_ENDPOINT_FORCE"); v != "" { v = strings.TrimPrefix(v, "http://") + _, err := net.Dial("tcp", v) if err != nil { return "", fmt.Errorf("while dialing %s: %w: aws endpoint isn't available", v, err) } } + return endpoint, nil } -func GenSubObject(i int) []byte { +func GenSubObject(t *testing.T, i int) []byte { r := CloudWatchSubscriptionRecord{ MessageType: "subscription", Owner: "test", @@ -48,15 +53,14 @@ func GenSubObject(i int) []byte { LogEvents: []CloudwatchSubscriptionLogEvent{ { ID: "testid", - Message: fmt.Sprintf("%d", i), + Message: strconv.Itoa(i), Timestamp: time.Now().UTC().Unix(), }, }, } body, err := json.Marshal(r) - if err != nil { - log.Fatal(err) - } + require.NoError(t, err) + var b bytes.Buffer gz := gzip.NewWriter(&b) gz.Write(body) @@ -66,33 +70,33 @@ func GenSubObject(i int) []byte { return b.Bytes() } -func WriteToStream(streamName string, count int, shards int, sub bool) { +func WriteToStream(t *testing.T, streamName string, count int, shards int, sub bool) { endpoint, err := getLocalStackEndpoint() - if err != nil { - log.Fatal(err) - } + require.NoError(t, err) + sess := session.Must(session.NewSession()) kinesisClient := kinesis.New(sess, aws.NewConfig().WithEndpoint(endpoint).WithRegion("us-east-1")) + for i := range count { partition := "partition" if shards != 1 { partition = fmt.Sprintf("partition-%d", i%shards) } + var data []byte + if sub { - data = GenSubObject(i) + data = GenSubObject(t, i) } else { - data = []byte(fmt.Sprintf("%d", i)) + data = []byte(strconv.Itoa(i)) } + _, err = kinesisClient.PutRecord(&kinesis.PutRecordInput{ Data: data, PartitionKey: aws.String(partition), StreamName: aws.String(streamName), }) - if err != nil { - fmt.Printf("Error writing to stream: %s\n", err) - log.Fatal(err) - } + require.NoError(t, err) } } @@ -111,6 +115,7 @@ func TestBadConfiguration(t *testing.T) { if runtime.GOOS == "windows" { t.Skip("Skipping test on windows") } + tests := []struct { config string expectedErr string @@ -142,6 +147,7 @@ stream_arn: arn:aws:kinesis:eu-west-1:123456789012:stream/my-stream`, } subLogger := log.WithField("type", "kinesis") + for _, test := range tests { f := KinesisSource{} err := f.Configure([]byte(test.config), subLogger, configuration.METRICS_NONE) @@ -151,9 +157,11 @@ stream_arn: arn:aws:kinesis:eu-west-1:123456789012:stream/my-stream`, func TestReadFromStream(t *testing.T) { ctx := context.Background() + if runtime.GOOS == "windows" { t.Skip("Skipping test on windows") } + tests := []struct { config string count int @@ -169,26 +177,26 @@ stream_name: stream-1-shard`, }, } endpoint, _ := getLocalStackEndpoint() + for _, test := range tests { f := KinesisSource{} config := fmt.Sprintf(test.config, endpoint) err := f.Configure([]byte(config), log.WithField("type", "kinesis"), configuration.METRICS_NONE) - if err != nil { - t.Fatalf("Error configuring source: %s", err) - } + require.NoError(t, err) + tomb := &tomb.Tomb{} out := make(chan types.Event) err = f.StreamingAcquisition(ctx, out, tomb) - if err != nil { - t.Fatalf("Error starting source: %s", err) - } + require.NoError(t, err) // Allow the datasource to start listening to the stream time.Sleep(4 * time.Second) - WriteToStream(f.Config.StreamName, test.count, test.shards, false) + WriteToStream(t, f.Config.StreamName, test.count, test.shards, false) + for i := range test.count { e := <-out - assert.Equal(t, fmt.Sprintf("%d", i), e.Line.Raw) + assert.Equal(t, strconv.Itoa(i), e.Line.Raw) } + tomb.Kill(nil) tomb.Wait() } @@ -196,9 +204,11 @@ stream_name: stream-1-shard`, func TestReadFromMultipleShards(t *testing.T) { ctx := context.Background() + if runtime.GOOS == "windows" { t.Skip("Skipping test on windows") } + tests := []struct { config string count int @@ -214,23 +224,22 @@ stream_name: stream-2-shards`, }, } endpoint, _ := getLocalStackEndpoint() + for _, test := range tests { f := KinesisSource{} config := fmt.Sprintf(test.config, endpoint) err := f.Configure([]byte(config), log.WithField("type", "kinesis"), configuration.METRICS_NONE) - if err != nil { - t.Fatalf("Error configuring source: %s", err) - } + require.NoError(t, err) tomb := &tomb.Tomb{} out := make(chan types.Event) err = f.StreamingAcquisition(ctx, out, tomb) - if err != nil { - t.Fatalf("Error starting source: %s", err) - } + require.NoError(t, err) // Allow the datasource to start listening to the stream time.Sleep(4 * time.Second) - WriteToStream(f.Config.StreamName, test.count, test.shards, false) + WriteToStream(t, f.Config.StreamName, test.count, test.shards, false) + c := 0 + for range test.count { <-out c += 1 @@ -243,9 +252,11 @@ stream_name: stream-2-shards`, func TestFromSubscription(t *testing.T) { ctx := context.Background() + if runtime.GOOS == "windows" { t.Skip("Skipping test on windows") } + tests := []struct { config string count int @@ -266,18 +277,14 @@ from_subscription: true`, f := KinesisSource{} config := fmt.Sprintf(test.config, endpoint) err := f.Configure([]byte(config), log.WithField("type", "kinesis"), configuration.METRICS_NONE) - if err != nil { - t.Fatalf("Error configuring source: %s", err) - } + require.NoError(t, err) tomb := &tomb.Tomb{} out := make(chan types.Event) err = f.StreamingAcquisition(ctx, out, tomb) - if err != nil { - t.Fatalf("Error starting source: %s", err) - } + require.NoError(t, err) // Allow the datasource to start listening to the stream time.Sleep(4 * time.Second) - WriteToStream(f.Config.StreamName, test.count, test.shards, true) + WriteToStream(t, f.Config.StreamName, test.count, test.shards, true) for i := range test.count { e := <-out assert.Equal(t, fmt.Sprintf("%d", i), e.Line.Raw) @@ -310,15 +317,11 @@ use_enhanced_fanout: true`, f := KinesisSource{} config := fmt.Sprintf(test.config, endpoint) err := f.Configure([]byte(config), log.WithField("type", "kinesis")) - if err != nil { - t.Fatalf("Error configuring source: %s", err) - } + require.NoError(t, err) tomb := &tomb.Tomb{} out := make(chan types.Event) err = f.StreamingAcquisition(out, tomb) - if err != nil { - t.Fatalf("Error starting source: %s", err) - } + require.NoError(t, err) //Allow the datasource to start listening to the stream time.Sleep(10 * time.Second) WriteToStream("stream-1-shard", test.count, test.shards) diff --git a/pkg/apiclient/client_test.go b/pkg/apiclient/client_test.go index d1f58f33ad2..327bf8fbd9f 100644 --- a/pkg/apiclient/client_test.go +++ b/pkg/apiclient/client_test.go @@ -56,13 +56,11 @@ func toUNCPath(path string) (string, error) { return uncPath, nil } -func setupUnixSocketWithPrefix(socket string, urlPrefix string) (mux *http.ServeMux, serverURL string, teardown func()) { +func setupUnixSocketWithPrefix(t *testing.T, socket string, urlPrefix string) (mux *http.ServeMux, serverURL string, teardown func()) { var err error if runtime.GOOS == "windows" { socket, err = toUNCPath(socket) - if err != nil { - log.Fatalf("converting to UNC path: %s", err) - } + require.NoError(t, err, "converting to UNC path") } mux = http.NewServeMux() @@ -120,7 +118,7 @@ func TestNewClientOk_UnixSocket(t *testing.T) { tmpDir := t.TempDir() socket := path.Join(tmpDir, "socket") - mux, urlx, teardown := setupUnixSocketWithPrefix(socket, "v1") + mux, urlx, teardown := setupUnixSocketWithPrefix(t, socket, "v1") defer teardown() apiURL, err := url.Parse(urlx) @@ -215,7 +213,7 @@ func TestNewDefaultClient_UnixSocket(t *testing.T) { tmpDir := t.TempDir() socket := path.Join(tmpDir, "socket") - mux, urlx, teardown := setupUnixSocketWithPrefix(socket, "v1") + mux, urlx, teardown := setupUnixSocketWithPrefix(t, socket, "v1") defer teardown() apiURL, err := url.Parse(urlx) @@ -293,7 +291,7 @@ func TestNewClientRegisterOK_UnixSocket(t *testing.T) { tmpDir := t.TempDir() socket := path.Join(tmpDir, "socket") - mux, urlx, teardown := setupUnixSocketWithPrefix(socket, "v1") + mux, urlx, teardown := setupUnixSocketWithPrefix(t, socket, "v1") defer teardown() /*mock login*/ diff --git a/pkg/csplugin/watcher_test.go b/pkg/csplugin/watcher_test.go index 84e63ec6493..9868b8433c3 100644 --- a/pkg/csplugin/watcher_test.go +++ b/pkg/csplugin/watcher_test.go @@ -15,13 +15,12 @@ import ( "github.com/crowdsecurity/crowdsec/pkg/models" ) -func resetTestTomb(testTomb *tomb.Tomb, pw *PluginWatcher) { +func resetTestTomb(t *testing.T, testTomb *tomb.Tomb, pw *PluginWatcher) { testTomb.Kill(nil) <-pw.PluginEvents - if err := testTomb.Wait(); err != nil { - log.Fatal(err) - } + err := testTomb.Wait() + require.NoError(t, err) } func resetWatcherAlertCounter(pw *PluginWatcher) { @@ -72,7 +71,7 @@ func TestPluginWatcherInterval(t *testing.T) { err := listenChannelWithTimeout(ct, pw.PluginEvents) cstest.RequireErrorContains(t, err, "context deadline exceeded") - resetTestTomb(&testTomb, &pw) + resetTestTomb(t, &testTomb, &pw) testTomb = tomb.Tomb{} pw.Start(&testTomb) @@ -81,7 +80,7 @@ func TestPluginWatcherInterval(t *testing.T) { err = listenChannelWithTimeout(ct, pw.PluginEvents) require.NoError(t, err) - resetTestTomb(&testTomb, &pw) + resetTestTomb(t, &testTomb, &pw) // This is to avoid the int complaining } @@ -130,5 +129,5 @@ func TestPluginAlertCountWatcher(t *testing.T) { err = listenChannelWithTimeout(ct, pw.PluginEvents) require.NoError(t, err) - resetTestTomb(&testTomb, &pw) + resetTestTomb(t, &testTomb, &pw) } diff --git a/pkg/leakybucket/bucket.go b/pkg/leakybucket/bucket.go index bc81a505925..e7ea6e3e240 100644 --- a/pkg/leakybucket/bucket.go +++ b/pkg/leakybucket/bucket.go @@ -316,7 +316,7 @@ func LeakRoutine(leaky *Leaky) error { alert, err = NewAlert(leaky, ofw) if err != nil { - log.Errorf("%s", err) + log.Error(err) } for _, f := range leaky.BucketConfig.processors { alert, ofw = f.OnBucketOverflow(leaky.BucketConfig)(leaky, alert, ofw) diff --git a/pkg/leakybucket/overflows.go b/pkg/leakybucket/overflows.go index 62ba3bc9a81..9357caefaff 100644 --- a/pkg/leakybucket/overflows.go +++ b/pkg/leakybucket/overflows.go @@ -363,10 +363,7 @@ func NewAlert(leaky *Leaky, queue *types.Queue) (types.RuntimeAlert, error) { } if err := newApiAlert.Validate(strfmt.Default); err != nil { - log.Errorf("Generated alerts isn't valid") - log.Errorf("->%s", spew.Sdump(newApiAlert)) - // XXX: deep-exit - note other errors returned from this function are not fatal - log.Fatalf("error : %s", err) + return runtimeAlert, fmt.Errorf("invalid generated alert: %w: %s", err, spew.Sdump(newApiAlert)) } runtimeAlert.APIAlerts = append(runtimeAlert.APIAlerts, newApiAlert) diff --git a/pkg/parser/parsing_test.go b/pkg/parser/parsing_test.go index 5f6f924e7df..84d5f4db743 100644 --- a/pkg/parser/parsing_test.go +++ b/pkg/parser/parsing_test.go @@ -13,6 +13,8 @@ import ( "github.com/davecgh/go-spew/spew" log "github.com/sirupsen/logrus" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" "gopkg.in/yaml.v2" "github.com/crowdsecurity/crowdsec/pkg/exprhelpers" @@ -33,14 +35,11 @@ func TestParser(t *testing.T) { envSetting := os.Getenv("TEST_ONLY") - pctx, ectx, err := prepTests() - if err != nil { - t.Fatalf("failed to load env : %s", err) - } + pctx, ectx := prepTests(t) // Init the enricher if envSetting != "" { - if err := testOneParser(pctx, ectx, envSetting, nil); err != nil { + if err := testOneParser(t, pctx, ectx, envSetting, nil); err != nil { t.Fatalf("Test '%s' failed : %s", envSetting, err) } } else { @@ -57,7 +56,7 @@ func TestParser(t *testing.T) { fname := "./tests/" + fd.Name() log.Infof("Running test on %s", fname) - if err := testOneParser(pctx, ectx, fname, nil); err != nil { + if err := testOneParser(t, pctx, ectx, fname, nil); err != nil { t.Fatalf("Test '%s' failed : %s", fname, err) } } @@ -71,22 +70,16 @@ func BenchmarkParser(t *testing.B) { log.SetLevel(log.ErrorLevel) - pctx, ectx, err := prepTests() - if err != nil { - t.Fatalf("failed to load env : %s", err) - } + pctx, ectx := prepTests(t) envSetting := os.Getenv("TEST_ONLY") if envSetting != "" { - if err := testOneParser(pctx, ectx, envSetting, t); err != nil { - t.Fatalf("Test '%s' failed : %s", envSetting, err) - } + err := testOneParser(t, pctx, ectx, envSetting, t) + require.NoError(t, err, "Test '%s' failed", envSetting) } else { fds, err := os.ReadDir("./tests/") - if err != nil { - t.Fatalf("Unable to read test directory : %s", err) - } + require.NoError(t, err, "Unable to read test directory") for _, fd := range fds { if !fd.IsDir() { @@ -96,14 +89,13 @@ func BenchmarkParser(t *testing.B) { fname := "./tests/" + fd.Name() log.Infof("Running test on %s", fname) - if err := testOneParser(pctx, ectx, fname, t); err != nil { - t.Fatalf("Test '%s' failed : %s", fname, err) - } + err := testOneParser(t, pctx, ectx, fname, t) + require.NoError(t, err, "Test '%s' failed", fname) } } } -func testOneParser(pctx *UnixParserCtx, ectx EnricherCtx, dir string, b *testing.B) error { +func testOneParser(t require.TestingT, pctx *UnixParserCtx, ectx EnricherCtx, dir string, b *testing.B) error { var ( err error pnodes []Node @@ -143,7 +135,7 @@ func testOneParser(pctx *UnixParserCtx, ectx EnricherCtx, dir string, b *testing // TBD: Load post overflows // func testFile(t *testing.T, file string, pctx UnixParserCtx, nodes []Node) bool { parser_test_file := fmt.Sprintf("%s/test.yaml", dir) - tests := loadTestFile(parser_test_file) + tests := loadTestFile(t, parser_test_file) count := 1 if b != nil { @@ -152,7 +144,7 @@ func testOneParser(pctx *UnixParserCtx, ectx EnricherCtx, dir string, b *testing } for range count { - if !testFile(tests, *pctx, pnodes) { + if !testFile(t, tests, *pctx, pnodes) { return errors.New("test failed") } } @@ -161,7 +153,7 @@ func testOneParser(pctx *UnixParserCtx, ectx EnricherCtx, dir string, b *testing } // prepTests is going to do the initialisation of parser : it's going to load enrichment plugins and load the patterns. This is done here so that we don't redo it for each test -func prepTests() (*UnixParserCtx, EnricherCtx, error) { +func prepTests(t require.TestingT) (*UnixParserCtx, EnricherCtx) { var ( err error pctx *UnixParserCtx @@ -169,22 +161,16 @@ func prepTests() (*UnixParserCtx, EnricherCtx, error) { ) err = exprhelpers.Init(nil) - if err != nil { - return nil, ectx, fmt.Errorf("exprhelpers init failed: %w", err) - } + require.NoError(t, err, "exprhelpers init failed") // Load enrichment datadir := "./test_data/" err = exprhelpers.GeoIPInit(datadir) - if err != nil { - log.Fatalf("unable to initialize GeoIP: %s", err) - } + require.NoError(t, err, "geoip init failed") ectx, err = Loadplugin() - if err != nil { - return nil, ectx, fmt.Errorf("failed to load plugin geoip: %v", err) - } + require.NoError(t, err, "load plugin failed") log.Printf("Loaded -> %+v", ectx) @@ -194,18 +180,14 @@ func prepTests() (*UnixParserCtx, EnricherCtx, error) { /* this should be refactored to 2 lines :p */ // Init the parser pctx, err = Init(map[string]interface{}{"patterns": cfgdir + string("/patterns/"), "data": "./tests/"}) - if err != nil { - return nil, ectx, fmt.Errorf("failed to initialize parser: %v", err) - } + require.NoError(t, err, "parser init failed") - return pctx, ectx, nil + return pctx, ectx } -func loadTestFile(file string) []TestFile { +func loadTestFile(t require.TestingT, file string) []TestFile { yamlFile, err := os.Open(file) - if err != nil { - log.Fatalf("yamlFile.Get err #%v ", err) - } + require.NoError(t, err, "failed to open test file") dec := yaml.NewDecoder(yamlFile) dec.SetStrict(true) @@ -221,7 +203,7 @@ func loadTestFile(file string) []TestFile { break } - log.Fatalf("Failed to load testfile '%s' yaml error : %v", file, err) + require.NoError(t, err, "failed to load testfile '%s'", file) return nil } @@ -391,19 +373,14 @@ reCheck: return true, nil } -func testFile(testSet []TestFile, pctx UnixParserCtx, nodes []Node) bool { +func testFile(t require.TestingT, testSet []TestFile, pctx UnixParserCtx, nodes []Node) bool { log.Warning("Going to process one test set") for _, tf := range testSet { // func testSubSet(testSet TestFile, pctx UnixParserCtx, nodes []Node) (bool, error) { testOk, err := testSubSet(tf, pctx, nodes) - if err != nil { - log.Fatalf("test failed : %s", err) - } - - if !testOk { - log.Fatalf("failed test : %+v", tf) - } + require.NoError(t, err, "test failed") + assert.True(t, testOk, "failed test: %+v", tf) } return true @@ -427,9 +404,7 @@ func TestGeneratePatternsDoc(t *testing.T) { } pctx, err := Init(map[string]interface{}{"patterns": "../../config/patterns/", "data": "./tests/"}) - if err != nil { - t.Fatalf("unable to load patterns : %s", err) - } + require.NoError(t, err, "unable to load patterns") log.Infof("-> %s", spew.Sdump(pctx)) /*don't judge me, we do it for the users*/ diff --git a/pkg/setup/detect_test.go b/pkg/setup/detect_test.go index 588e74dab54..553617032a4 100644 --- a/pkg/setup/detect_test.go +++ b/pkg/setup/detect_test.go @@ -54,7 +54,7 @@ func TestSetupHelperProcess(t *testing.T) { } fmt.Fprint(os.Stdout, fakeSystemctlOutput) - os.Exit(0) + os.Exit(0) //nolint:revive,deep-exit } func tempYAML(t *testing.T, content string) os.File { From 26c15a126789b26781854ed0d98604d159d5a46f Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Thu, 19 Dec 2024 17:01:16 +0100 Subject: [PATCH 370/581] refact pkg/database: clean up code and error messages (#3263) * refact pkg/database: extract alertfilter.go * refact pkg/database: extract function rollbackOnError(); dry error messages --- pkg/apiserver/alerts_test.go | 4 +- pkg/apiserver/controllers/v1/errors.go | 12 -- pkg/database/alertfilter.go | 258 ++++++++++++++++++++++ pkg/database/alerts.go | 285 ++----------------------- pkg/database/errors.go | 1 - pkg/types/ip.go | 7 +- pkg/types/ip_test.go | 2 +- test/bats/90_decisions.bats | 4 +- 8 files changed, 279 insertions(+), 294 deletions(-) create mode 100644 pkg/database/alertfilter.go diff --git a/pkg/apiserver/alerts_test.go b/pkg/apiserver/alerts_test.go index 346619bf691..e51987ba71a 100644 --- a/pkg/apiserver/alerts_test.go +++ b/pkg/apiserver/alerts_test.go @@ -242,7 +242,7 @@ func TestAlertListFilters(t *testing.T) { w = lapi.RecordResponse(t, ctx, "GET", "/v1/alerts?ip=gruueq", emptyBody, "password") assert.Equal(t, 500, w.Code) - assert.JSONEq(t, `{"message":"unable to convert 'gruueq' to int: invalid address: invalid ip address / range"}`, w.Body.String()) + assert.JSONEq(t, `{"message":"invalid ip address 'gruueq'"}`, w.Body.String()) // test range (ok) @@ -261,7 +261,7 @@ func TestAlertListFilters(t *testing.T) { w = lapi.RecordResponse(t, ctx, "GET", "/v1/alerts?range=ratata", emptyBody, "password") assert.Equal(t, 500, w.Code) - assert.JSONEq(t, `{"message":"unable to convert 'ratata' to int: invalid address: invalid ip address / range"}`, w.Body.String()) + assert.JSONEq(t, `{"message":"invalid ip address 'ratata'"}`, w.Body.String()) // test since (ok) diff --git a/pkg/apiserver/controllers/v1/errors.go b/pkg/apiserver/controllers/v1/errors.go index d661de44b0e..d7b60c1a1b8 100644 --- a/pkg/apiserver/controllers/v1/errors.go +++ b/pkg/apiserver/controllers/v1/errors.go @@ -21,18 +21,6 @@ func (c *Controller) HandleDBErrors(gctx *gin.Context, err error) { case errors.Is(err, database.HashError): gctx.JSON(http.StatusBadRequest, gin.H{"message": err.Error()}) return - case errors.Is(err, database.InsertFail): - gctx.JSON(http.StatusInternalServerError, gin.H{"message": err.Error()}) - return - case errors.Is(err, database.QueryFail): - gctx.JSON(http.StatusInternalServerError, gin.H{"message": err.Error()}) - return - case errors.Is(err, database.ParseTimeFail): - gctx.JSON(http.StatusInternalServerError, gin.H{"message": err.Error()}) - return - case errors.Is(err, database.ParseDurationFail): - gctx.JSON(http.StatusInternalServerError, gin.H{"message": err.Error()}) - return default: gctx.JSON(http.StatusInternalServerError, gin.H{"message": err.Error()}) return diff --git a/pkg/database/alertfilter.go b/pkg/database/alertfilter.go new file mode 100644 index 00000000000..9e8cf53a450 --- /dev/null +++ b/pkg/database/alertfilter.go @@ -0,0 +1,258 @@ +package database + +import ( + "fmt" + "strconv" + "strings" + "time" + + "github.com/pkg/errors" + log "github.com/sirupsen/logrus" + + "github.com/crowdsecurity/crowdsec/pkg/database/ent" + "github.com/crowdsecurity/crowdsec/pkg/database/ent/alert" + "github.com/crowdsecurity/crowdsec/pkg/database/ent/decision" + "github.com/crowdsecurity/crowdsec/pkg/database/ent/predicate" + "github.com/crowdsecurity/crowdsec/pkg/types" +) + +func handleSimulatedFilter(filter map[string][]string, predicates *[]predicate.Alert) { + /* the simulated filter is a bit different : if it's not present *or* set to false, specifically exclude records with simulated to true */ + if v, ok := filter["simulated"]; ok && v[0] == "false" { + *predicates = append(*predicates, alert.SimulatedEQ(false)) + } +} + +func handleOriginFilter(filter map[string][]string, predicates *[]predicate.Alert) { + if _, ok := filter["origin"]; ok { + filter["include_capi"] = []string{"true"} + } +} + +func handleScopeFilter(scope string, predicates *[]predicate.Alert) { + if strings.ToLower(scope) == "ip" { + scope = types.Ip + } else if strings.ToLower(scope) == "range" { + scope = types.Range + } + + *predicates = append(*predicates, alert.SourceScopeEQ(scope)) +} + +func handleTimeFilters(param, value string, predicates *[]predicate.Alert) error { + duration, err := ParseDuration(value) + if err != nil { + return fmt.Errorf("while parsing duration: %w", err) + } + + timePoint := time.Now().UTC().Add(-duration) + if timePoint.IsZero() { + return fmt.Errorf("empty time now() - %s", timePoint.String()) + } + + switch param { + case "since": + *predicates = append(*predicates, alert.StartedAtGTE(timePoint)) + case "created_before": + *predicates = append(*predicates, alert.CreatedAtLTE(timePoint)) + case "until": + *predicates = append(*predicates, alert.StartedAtLTE(timePoint)) + } + + return nil +} + +func handleIPv4Predicates(ip_sz int, contains bool, start_ip, start_sfx, end_ip, end_sfx int64, predicates *[]predicate.Alert) { + if contains { // decision contains {start_ip,end_ip} + *predicates = append(*predicates, alert.And( + alert.HasDecisionsWith(decision.StartIPLTE(start_ip)), + alert.HasDecisionsWith(decision.EndIPGTE(end_ip)), + alert.HasDecisionsWith(decision.IPSizeEQ(int64(ip_sz))), + )) + } else { // decision is contained within {start_ip,end_ip} + *predicates = append(*predicates, alert.And( + alert.HasDecisionsWith(decision.StartIPGTE(start_ip)), + alert.HasDecisionsWith(decision.EndIPLTE(end_ip)), + alert.HasDecisionsWith(decision.IPSizeEQ(int64(ip_sz))), + )) + } +} + +func handleIPv6Predicates(ip_sz int, contains bool, start_ip, start_sfx, end_ip, end_sfx int64, predicates *[]predicate.Alert) { + if contains { // decision contains {start_ip,end_ip} + *predicates = append(*predicates, alert.And( + // matching addr size + alert.HasDecisionsWith(decision.IPSizeEQ(int64(ip_sz))), + alert.Or( + // decision.start_ip < query.start_ip + alert.HasDecisionsWith(decision.StartIPLT(start_ip)), + alert.And( + // decision.start_ip == query.start_ip + alert.HasDecisionsWith(decision.StartIPEQ(start_ip)), + // decision.start_suffix <= query.start_suffix + alert.HasDecisionsWith(decision.StartSuffixLTE(start_sfx)), + ), + ), + alert.Or( + // decision.end_ip > query.end_ip + alert.HasDecisionsWith(decision.EndIPGT(end_ip)), + alert.And( + // decision.end_ip == query.end_ip + alert.HasDecisionsWith(decision.EndIPEQ(end_ip)), + // decision.end_suffix >= query.end_suffix + alert.HasDecisionsWith(decision.EndSuffixGTE(end_sfx)), + ), + ), + )) + } else { // decision is contained within {start_ip,end_ip} + *predicates = append(*predicates, alert.And( + // matching addr size + alert.HasDecisionsWith(decision.IPSizeEQ(int64(ip_sz))), + alert.Or( + // decision.start_ip > query.start_ip + alert.HasDecisionsWith(decision.StartIPGT(start_ip)), + alert.And( + // decision.start_ip == query.start_ip + alert.HasDecisionsWith(decision.StartIPEQ(start_ip)), + // decision.start_suffix >= query.start_suffix + alert.HasDecisionsWith(decision.StartSuffixGTE(start_sfx)), + ), + ), + alert.Or( + // decision.end_ip < query.end_ip + alert.HasDecisionsWith(decision.EndIPLT(end_ip)), + alert.And( + // decision.end_ip == query.end_ip + alert.HasDecisionsWith(decision.EndIPEQ(end_ip)), + // decision.end_suffix <= query.end_suffix + alert.HasDecisionsWith(decision.EndSuffixLTE(end_sfx)), + ), + ), + )) + } +} + +func handleIPPredicates(ip_sz int, contains bool, start_ip, start_sfx, end_ip, end_sfx int64, predicates *[]predicate.Alert) error { + if ip_sz == 4 { + handleIPv4Predicates(ip_sz, contains, start_ip, start_sfx, end_ip, end_sfx, predicates) + } else if ip_sz == 16 { + handleIPv6Predicates(ip_sz, contains, start_ip, start_sfx, end_ip, end_sfx, predicates) + } else if ip_sz != 0 { + return errors.Wrapf(InvalidFilter, "Unknown ip size %d", ip_sz) + } + + return nil +} + +func handleIncludeCapiFilter(value string, predicates *[]predicate.Alert) error { + if value == "false" { + *predicates = append(*predicates, alert.And( + // do not show alerts with active decisions having origin CAPI or lists + alert.And( + alert.Not(alert.HasDecisionsWith(decision.OriginEQ(types.CAPIOrigin))), + alert.Not(alert.HasDecisionsWith(decision.OriginEQ(types.ListOrigin))), + ), + alert.Not( + alert.And( + // do not show neither alerts with no decisions if the Source Scope is lists: or CAPI + alert.Not(alert.HasDecisions()), + alert.Or( + alert.SourceScopeHasPrefix(types.ListOrigin+":"), + alert.SourceScopeEQ(types.CommunityBlocklistPullSourceScope), + ), + ), + ), + )) + } else if value != "true" { + log.Errorf("invalid bool '%s' for include_capi", value) + } + + return nil +} + +func AlertPredicatesFromFilter(filter map[string][]string) ([]predicate.Alert, error) { + predicates := make([]predicate.Alert, 0) + + var ( + err error + start_ip, start_sfx, end_ip, end_sfx int64 + hasActiveDecision bool + ip_sz int + ) + + contains := true + + /*if contains is true, return bans that *contains* the given value (value is the inner) + else, return bans that are *contained* by the given value (value is the outer)*/ + + handleSimulatedFilter(filter, &predicates) + handleOriginFilter(filter, &predicates) + + for param, value := range filter { + switch param { + case "contains": + contains, err = strconv.ParseBool(value[0]) + if err != nil { + return nil, errors.Wrapf(InvalidFilter, "invalid contains value : %s", err) + } + case "scope": + handleScopeFilter(value[0], &predicates) + case "value": + predicates = append(predicates, alert.SourceValueEQ(value[0])) + case "scenario": + predicates = append(predicates, alert.HasDecisionsWith(decision.ScenarioEQ(value[0]))) + case "ip", "range": + ip_sz, start_ip, start_sfx, end_ip, end_sfx, err = types.Addr2Ints(value[0]) + if err != nil { + return nil, err + } + case "since", "created_before", "until": + if err := handleTimeFilters(param, value[0], &predicates); err != nil { + return nil, err + } + case "decision_type": + predicates = append(predicates, alert.HasDecisionsWith(decision.TypeEQ(value[0]))) + case "origin": + predicates = append(predicates, alert.HasDecisionsWith(decision.OriginEQ(value[0]))) + case "include_capi": // allows to exclude one or more specific origins + if err = handleIncludeCapiFilter(value[0], &predicates); err != nil { + return nil, err + } + case "has_active_decision": + if hasActiveDecision, err = strconv.ParseBool(value[0]); err != nil { + return nil, errors.Wrapf(ParseType, "'%s' is not a boolean: %s", value[0], err) + } + + if hasActiveDecision { + predicates = append(predicates, alert.HasDecisionsWith(decision.UntilGTE(time.Now().UTC()))) + } else { + predicates = append(predicates, alert.Not(alert.HasDecisions())) + } + case "limit": + continue + case "sort": + continue + case "simulated": + continue + case "with_decisions": + continue + default: + return nil, errors.Wrapf(InvalidFilter, "Filter parameter '%s' is unknown (=%s)", param, value[0]) + } + } + + if err := handleIPPredicates(ip_sz, contains, start_ip, start_sfx, end_ip, end_sfx, &predicates); err != nil { + return nil, err + } + + return predicates, nil +} + +func BuildAlertRequestFromFilter(alerts *ent.AlertQuery, filter map[string][]string) (*ent.AlertQuery, error) { + preds, err := AlertPredicatesFromFilter(filter) + if err != nil { + return nil, err + } + + return alerts.Where(preds...), nil +} diff --git a/pkg/database/alerts.go b/pkg/database/alerts.go index ede9c89fe9a..4e3f209b012 100644 --- a/pkg/database/alerts.go +++ b/pkg/database/alerts.go @@ -20,7 +20,6 @@ import ( "github.com/crowdsecurity/crowdsec/pkg/database/ent/decision" "github.com/crowdsecurity/crowdsec/pkg/database/ent/event" "github.com/crowdsecurity/crowdsec/pkg/database/ent/meta" - "github.com/crowdsecurity/crowdsec/pkg/database/ent/predicate" "github.com/crowdsecurity/crowdsec/pkg/models" "github.com/crowdsecurity/crowdsec/pkg/types" ) @@ -32,6 +31,14 @@ const ( maxLockRetries = 10 // how many times to retry a bulk operation when sqlite3.ErrBusy is encountered ) +func rollbackOnError(tx *ent.Tx, err error, msg string) error { + if rbErr := tx.Rollback(); rbErr != nil { + log.Errorf("rollback error: %v", rbErr) + } + + return fmt.Errorf("%s: %w", msg, err) +} + // CreateOrUpdateAlert is specific to PAPI : It checks if alert already exists, otherwise inserts it // if alert already exists, it checks it associated decisions already exists // if some associated decisions are missing (ie. previous insert ended up in error) it inserts them @@ -285,12 +292,7 @@ func (c *Client) UpdateCommunityBlocklist(ctx context.Context, alertItem *models duration, err := time.ParseDuration(*decisionItem.Duration) if err != nil { - rollbackErr := txClient.Rollback() - if rollbackErr != nil { - log.Errorf("rollback error: %s", rollbackErr) - } - - return 0, 0, 0, errors.Wrapf(ParseDurationFail, "decision duration '%+v' : %s", *decisionItem.Duration, err) + return 0,0,0, rollbackOnError(txClient, err, "parsing decision duration") } if decisionItem.Scope == nil { @@ -302,12 +304,7 @@ func (c *Client) UpdateCommunityBlocklist(ctx context.Context, alertItem *models if strings.ToLower(*decisionItem.Scope) == "ip" || strings.ToLower(*decisionItem.Scope) == "range" { sz, start_ip, start_sfx, end_ip, end_sfx, err = types.Addr2Ints(*decisionItem.Value) if err != nil { - rollbackErr := txClient.Rollback() - if rollbackErr != nil { - log.Errorf("rollback error: %s", rollbackErr) - } - - return 0, 0, 0, errors.Wrapf(InvalidIPOrRange, "invalid addr/range %s : %s", *decisionItem.Value, err) + return 0, 0, 0, rollbackOnError(txClient, err, "invalid ip addr/range") } } @@ -349,12 +346,7 @@ func (c *Client) UpdateCommunityBlocklist(ctx context.Context, alertItem *models decision.ValueIn(deleteChunk...), )).Exec(ctx) if err != nil { - rollbackErr := txClient.Rollback() - if rollbackErr != nil { - log.Errorf("rollback error: %s", rollbackErr) - } - - return 0, 0, 0, fmt.Errorf("while deleting older community blocklist decisions: %w", err) + return 0, 0, 0, rollbackOnError(txClient, err, "deleting older community blocklist decisions") } deleted += deletedDecisions @@ -365,12 +357,7 @@ func (c *Client) UpdateCommunityBlocklist(ctx context.Context, alertItem *models for _, builderChunk := range builderChunks { insertedDecisions, err := txClient.Decision.CreateBulk(builderChunk...).Save(ctx) if err != nil { - rollbackErr := txClient.Rollback() - if rollbackErr != nil { - log.Errorf("rollback error: %s", rollbackErr) - } - - return 0, 0, 0, fmt.Errorf("while bulk creating decisions: %w", err) + return 0, 0, 0, rollbackOnError(txClient, err, "bulk creating decisions") } inserted += len(insertedDecisions) @@ -380,12 +367,7 @@ func (c *Client) UpdateCommunityBlocklist(ctx context.Context, alertItem *models err = txClient.Commit() if err != nil { - rollbackErr := txClient.Rollback() - if rollbackErr != nil { - log.Errorf("rollback error: %s", rollbackErr) - } - - return 0, 0, 0, fmt.Errorf("error committing transaction: %w", err) + return 0, 0, 0, rollbackOnError(txClient, err, "error committing transaction") } return alertRef.ID, inserted, deleted, nil @@ -727,247 +709,6 @@ func (c *Client) CreateAlert(ctx context.Context, machineID string, alertList [] return alertIDs, nil } -func handleSimulatedFilter(filter map[string][]string, predicates *[]predicate.Alert) { - /* the simulated filter is a bit different : if it's not present *or* set to false, specifically exclude records with simulated to true */ - if v, ok := filter["simulated"]; ok && v[0] == "false" { - *predicates = append(*predicates, alert.SimulatedEQ(false)) - } -} - -func handleOriginFilter(filter map[string][]string, predicates *[]predicate.Alert) { - if _, ok := filter["origin"]; ok { - filter["include_capi"] = []string{"true"} - } -} - -func handleScopeFilter(scope string, predicates *[]predicate.Alert) { - if strings.ToLower(scope) == "ip" { - scope = types.Ip - } else if strings.ToLower(scope) == "range" { - scope = types.Range - } - - *predicates = append(*predicates, alert.SourceScopeEQ(scope)) -} - -func handleTimeFilters(param, value string, predicates *[]predicate.Alert) error { - duration, err := ParseDuration(value) - if err != nil { - return fmt.Errorf("while parsing duration: %w", err) - } - - timePoint := time.Now().UTC().Add(-duration) - if timePoint.IsZero() { - return fmt.Errorf("empty time now() - %s", timePoint.String()) - } - - switch param { - case "since": - *predicates = append(*predicates, alert.StartedAtGTE(timePoint)) - case "created_before": - *predicates = append(*predicates, alert.CreatedAtLTE(timePoint)) - case "until": - *predicates = append(*predicates, alert.StartedAtLTE(timePoint)) - } - - return nil -} - -func handleIPv4Predicates(ip_sz int, contains bool, start_ip, start_sfx, end_ip, end_sfx int64, predicates *[]predicate.Alert) { - if contains { // decision contains {start_ip,end_ip} - *predicates = append(*predicates, alert.And( - alert.HasDecisionsWith(decision.StartIPLTE(start_ip)), - alert.HasDecisionsWith(decision.EndIPGTE(end_ip)), - alert.HasDecisionsWith(decision.IPSizeEQ(int64(ip_sz))), - )) - } else { // decision is contained within {start_ip,end_ip} - *predicates = append(*predicates, alert.And( - alert.HasDecisionsWith(decision.StartIPGTE(start_ip)), - alert.HasDecisionsWith(decision.EndIPLTE(end_ip)), - alert.HasDecisionsWith(decision.IPSizeEQ(int64(ip_sz))), - )) - } -} - -func handleIPv6Predicates(ip_sz int, contains bool, start_ip, start_sfx, end_ip, end_sfx int64, predicates *[]predicate.Alert) { - if contains { // decision contains {start_ip,end_ip} - *predicates = append(*predicates, alert.And( - // matching addr size - alert.HasDecisionsWith(decision.IPSizeEQ(int64(ip_sz))), - alert.Or( - // decision.start_ip < query.start_ip - alert.HasDecisionsWith(decision.StartIPLT(start_ip)), - alert.And( - // decision.start_ip == query.start_ip - alert.HasDecisionsWith(decision.StartIPEQ(start_ip)), - // decision.start_suffix <= query.start_suffix - alert.HasDecisionsWith(decision.StartSuffixLTE(start_sfx)), - ), - ), - alert.Or( - // decision.end_ip > query.end_ip - alert.HasDecisionsWith(decision.EndIPGT(end_ip)), - alert.And( - // decision.end_ip == query.end_ip - alert.HasDecisionsWith(decision.EndIPEQ(end_ip)), - // decision.end_suffix >= query.end_suffix - alert.HasDecisionsWith(decision.EndSuffixGTE(end_sfx)), - ), - ), - )) - } else { // decision is contained within {start_ip,end_ip} - *predicates = append(*predicates, alert.And( - // matching addr size - alert.HasDecisionsWith(decision.IPSizeEQ(int64(ip_sz))), - alert.Or( - // decision.start_ip > query.start_ip - alert.HasDecisionsWith(decision.StartIPGT(start_ip)), - alert.And( - // decision.start_ip == query.start_ip - alert.HasDecisionsWith(decision.StartIPEQ(start_ip)), - // decision.start_suffix >= query.start_suffix - alert.HasDecisionsWith(decision.StartSuffixGTE(start_sfx)), - ), - ), - alert.Or( - // decision.end_ip < query.end_ip - alert.HasDecisionsWith(decision.EndIPLT(end_ip)), - alert.And( - // decision.end_ip == query.end_ip - alert.HasDecisionsWith(decision.EndIPEQ(end_ip)), - // decision.end_suffix <= query.end_suffix - alert.HasDecisionsWith(decision.EndSuffixLTE(end_sfx)), - ), - ), - )) - } -} - -func handleIPPredicates(ip_sz int, contains bool, start_ip, start_sfx, end_ip, end_sfx int64, predicates *[]predicate.Alert) error { - if ip_sz == 4 { - handleIPv4Predicates(ip_sz, contains, start_ip, start_sfx, end_ip, end_sfx, predicates) - } else if ip_sz == 16 { - handleIPv6Predicates(ip_sz, contains, start_ip, start_sfx, end_ip, end_sfx, predicates) - } else if ip_sz != 0 { - return errors.Wrapf(InvalidFilter, "Unknown ip size %d", ip_sz) - } - - return nil -} - -func handleIncludeCapiFilter(value string, predicates *[]predicate.Alert) error { - if value == "false" { - *predicates = append(*predicates, alert.And( - // do not show alerts with active decisions having origin CAPI or lists - alert.And( - alert.Not(alert.HasDecisionsWith(decision.OriginEQ(types.CAPIOrigin))), - alert.Not(alert.HasDecisionsWith(decision.OriginEQ(types.ListOrigin))), - ), - alert.Not( - alert.And( - // do not show neither alerts with no decisions if the Source Scope is lists: or CAPI - alert.Not(alert.HasDecisions()), - alert.Or( - alert.SourceScopeHasPrefix(types.ListOrigin+":"), - alert.SourceScopeEQ(types.CommunityBlocklistPullSourceScope), - ), - ), - ), - )) - } else if value != "true" { - log.Errorf("invalid bool '%s' for include_capi", value) - } - - return nil -} - -func AlertPredicatesFromFilter(filter map[string][]string) ([]predicate.Alert, error) { - predicates := make([]predicate.Alert, 0) - - var ( - err error - start_ip, start_sfx, end_ip, end_sfx int64 - hasActiveDecision bool - ip_sz int - ) - - contains := true - - /*if contains is true, return bans that *contains* the given value (value is the inner) - else, return bans that are *contained* by the given value (value is the outer)*/ - - handleSimulatedFilter(filter, &predicates) - handleOriginFilter(filter, &predicates) - - for param, value := range filter { - switch param { - case "contains": - contains, err = strconv.ParseBool(value[0]) - if err != nil { - return nil, errors.Wrapf(InvalidFilter, "invalid contains value : %s", err) - } - case "scope": - handleScopeFilter(value[0], &predicates) - case "value": - predicates = append(predicates, alert.SourceValueEQ(value[0])) - case "scenario": - predicates = append(predicates, alert.HasDecisionsWith(decision.ScenarioEQ(value[0]))) - case "ip", "range": - ip_sz, start_ip, start_sfx, end_ip, end_sfx, err = types.Addr2Ints(value[0]) - if err != nil { - return nil, errors.Wrapf(InvalidIPOrRange, "unable to convert '%s' to int: %s", value[0], err) - } - case "since", "created_before", "until": - if err := handleTimeFilters(param, value[0], &predicates); err != nil { - return nil, err - } - case "decision_type": - predicates = append(predicates, alert.HasDecisionsWith(decision.TypeEQ(value[0]))) - case "origin": - predicates = append(predicates, alert.HasDecisionsWith(decision.OriginEQ(value[0]))) - case "include_capi": // allows to exclude one or more specific origins - if err = handleIncludeCapiFilter(value[0], &predicates); err != nil { - return nil, err - } - case "has_active_decision": - if hasActiveDecision, err = strconv.ParseBool(value[0]); err != nil { - return nil, errors.Wrapf(ParseType, "'%s' is not a boolean: %s", value[0], err) - } - - if hasActiveDecision { - predicates = append(predicates, alert.HasDecisionsWith(decision.UntilGTE(time.Now().UTC()))) - } else { - predicates = append(predicates, alert.Not(alert.HasDecisions())) - } - case "limit": - continue - case "sort": - continue - case "simulated": - continue - case "with_decisions": - continue - default: - return nil, errors.Wrapf(InvalidFilter, "Filter parameter '%s' is unknown (=%s)", param, value[0]) - } - } - - if err := handleIPPredicates(ip_sz, contains, start_ip, start_sfx, end_ip, end_sfx, &predicates); err != nil { - return nil, err - } - - return predicates, nil -} - -func BuildAlertRequestFromFilter(alerts *ent.AlertQuery, filter map[string][]string) (*ent.AlertQuery, error) { - preds, err := AlertPredicatesFromFilter(filter) - if err != nil { - return nil, err - } - - return alerts.Where(preds...), nil -} - func (c *Client) AlertsCountPerScenario(ctx context.Context, filters map[string][]string) (map[string]int, error) { var res []struct { Scenario string diff --git a/pkg/database/errors.go b/pkg/database/errors.go index 77f92707e51..e0223be95b8 100644 --- a/pkg/database/errors.go +++ b/pkg/database/errors.go @@ -14,7 +14,6 @@ var ( ParseTimeFail = errors.New("unable to parse time") ParseDurationFail = errors.New("unable to parse duration") MarshalFail = errors.New("unable to serialize") - UnmarshalFail = errors.New("unable to parse") BulkError = errors.New("unable to insert bulk") ParseType = errors.New("unable to parse type") InvalidIPOrRange = errors.New("invalid ip address / range") diff --git a/pkg/types/ip.go b/pkg/types/ip.go index 47fb3fc83a5..3f52a7ccf18 100644 --- a/pkg/types/ip.go +++ b/pkg/types/ip.go @@ -2,7 +2,6 @@ package types import ( "encoding/binary" - "errors" "fmt" "math" "net" @@ -39,7 +38,7 @@ func Addr2Ints(anyIP string) (int, int64, int64, int64, int64, error) { if strings.Contains(anyIP, "/") { _, net, err := net.ParseCIDR(anyIP) if err != nil { - return -1, 0, 0, 0, 0, fmt.Errorf("while parsing range %s: %w", anyIP, err) + return -1, 0, 0, 0, 0, fmt.Errorf("invalid ip range '%s': %w", anyIP, err) } return Range2Ints(*net) @@ -47,12 +46,12 @@ func Addr2Ints(anyIP string) (int, int64, int64, int64, int64, error) { ip := net.ParseIP(anyIP) if ip == nil { - return -1, 0, 0, 0, 0, errors.New("invalid address") + return -1, 0, 0, 0, 0, fmt.Errorf("invalid ip address '%s'", anyIP) } sz, start, end, err := IP2Ints(ip) if err != nil { - return -1, 0, 0, 0, 0, fmt.Errorf("while parsing ip %s: %w", anyIP, err) + return -1, 0, 0, 0, 0, fmt.Errorf("invalid ip address '%s': %w", anyIP, err) } return sz, start, end, start, end, nil diff --git a/pkg/types/ip_test.go b/pkg/types/ip_test.go index b9298ba487f..ef7253f8a9b 100644 --- a/pkg/types/ip_test.go +++ b/pkg/types/ip_test.go @@ -180,7 +180,7 @@ func TestAdd2Int(t *testing.T) { }, { in_addr: "xxx2", - exp_error: "invalid address", + exp_error: "invalid ip address 'xxx2'", }, } diff --git a/test/bats/90_decisions.bats b/test/bats/90_decisions.bats index c8f5139faf8..3c3ab9987ca 100644 --- a/test/bats/90_decisions.bats +++ b/test/bats/90_decisions.bats @@ -165,7 +165,7 @@ teardown() { EOT assert_stderr --partial 'Parsing values' assert_stderr --partial 'Imported 1 decisions' - assert_file_contains "$LOGFILE" "invalid addr/range 'whatever': invalid address" + assert_file_contains "$LOGFILE" "invalid addr/range 'whatever': invalid ip address 'whatever'" rune -0 cscli decisions list -a -o json assert_json '[]' @@ -182,7 +182,7 @@ teardown() { EOT assert_stderr --partial 'Parsing values' assert_stderr --partial 'Imported 3 decisions' - assert_file_contains "$LOGFILE" "invalid addr/range 'bad-apple': invalid address" + assert_file_contains "$LOGFILE" "invalid addr/range 'bad-apple': invalid ip address 'bad-apple'" rune -0 cscli decisions list -a -o json rune -0 jq -r '.[0].decisions | length' <(output) From 4748720a07f59dbf734cf609248ed9a5306d109b Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Fri, 20 Dec 2024 14:33:24 +0100 Subject: [PATCH 371/581] refactor pkg/leakybucket (#3371) * refact pkg/leakybucket - call LoadBuckets with Item instances * extract compileScopeFilter() --- cmd/crowdsec/main.go | 15 ++---- pkg/apiserver/alerts_test.go | 82 +++++++++++++++---------------- pkg/leakybucket/buckets_test.go | 38 +++++++++------ pkg/leakybucket/manager_load.go | 86 ++++++++++++++------------------- pkg/types/ip_test.go | 11 ++++- 5 files changed, 116 insertions(+), 116 deletions(-) diff --git a/cmd/crowdsec/main.go b/cmd/crowdsec/main.go index e414f59f3e2..518bd8e9c0d 100644 --- a/cmd/crowdsec/main.go +++ b/cmd/crowdsec/main.go @@ -86,20 +86,15 @@ func (f *Flags) haveTimeMachine() bool { type labelsMap map[string]string func LoadBuckets(cConfig *csconfig.Config, hub *cwhub.Hub) error { - var ( - err error - files []string - ) - - for _, hubScenarioItem := range hub.GetInstalledByType(cwhub.SCENARIOS, false) { - files = append(files, hubScenarioItem.State.LocalPath) - } + var err error buckets = leakybucket.NewBuckets() - log.Infof("Loading %d scenario files", len(files)) + scenarios := hub.GetInstalledByType(cwhub.SCENARIOS, false) + + log.Infof("Loading %d scenario files", len(scenarios)) - holders, outputEventChan, err = leakybucket.LoadBuckets(cConfig.Crowdsec, hub, files, &bucketsTomb, buckets, flags.OrderEvent) + holders, outputEventChan, err = leakybucket.LoadBuckets(cConfig.Crowdsec, hub, scenarios, &bucketsTomb, buckets, flags.OrderEvent) if err != nil { return fmt.Errorf("scenario loading failed: %w", err) } diff --git a/pkg/apiserver/alerts_test.go b/pkg/apiserver/alerts_test.go index e51987ba71a..4c5c6ef129c 100644 --- a/pkg/apiserver/alerts_test.go +++ b/pkg/apiserver/alerts_test.go @@ -103,13 +103,13 @@ func TestSimulatedAlert(t *testing.T) { // exclude decision in simulation mode w := lapi.RecordResponse(t, ctx, "GET", "/v1/alerts?simulated=false", alertContent, "password") - assert.Equal(t, 200, w.Code) + assert.Equal(t, http.StatusOK, w.Code) assert.Contains(t, w.Body.String(), `"message":"Ip 91.121.79.178 performed crowdsecurity/ssh-bf (6 events over `) assert.NotContains(t, w.Body.String(), `"message":"Ip 91.121.79.179 performed crowdsecurity/ssh-bf (6 events over `) // include decision in simulation mode w = lapi.RecordResponse(t, ctx, "GET", "/v1/alerts?simulated=true", alertContent, "password") - assert.Equal(t, 200, w.Code) + assert.Equal(t, http.StatusOK, w.Code) assert.Contains(t, w.Body.String(), `"message":"Ip 91.121.79.178 performed crowdsecurity/ssh-bf (6 events over `) assert.Contains(t, w.Body.String(), `"message":"Ip 91.121.79.179 performed crowdsecurity/ssh-bf (6 events over `) } @@ -120,21 +120,21 @@ func TestCreateAlert(t *testing.T) { // Create Alert with invalid format w := lapi.RecordResponse(t, ctx, http.MethodPost, "/v1/alerts", strings.NewReader("test"), "password") - assert.Equal(t, 400, w.Code) + assert.Equal(t, http.StatusBadRequest, w.Code) assert.JSONEq(t, `{"message":"invalid character 'e' in literal true (expecting 'r')"}`, w.Body.String()) // Create Alert with invalid input alertContent := GetAlertReaderFromFile(t, "./tests/invalidAlert_sample.json") w = lapi.RecordResponse(t, ctx, http.MethodPost, "/v1/alerts", alertContent, "password") - assert.Equal(t, 500, w.Code) + assert.Equal(t, http.StatusInternalServerError, w.Code) assert.JSONEq(t, `{"message":"validation failure list:\n0.scenario in body is required\n0.scenario_hash in body is required\n0.scenario_version in body is required\n0.simulated in body is required\n0.source in body is required"}`, w.Body.String()) // Create Valid Alert w = lapi.InsertAlertFromFile(t, ctx, "./tests/alert_sample.json") - assert.Equal(t, 201, w.Code) + assert.Equal(t, http.StatusCreated, w.Code) assert.Equal(t, `["1"]`, w.Body.String()) } @@ -175,13 +175,13 @@ func TestAlertListFilters(t *testing.T) { // bad filter w := lapi.RecordResponse(t, ctx, "GET", "/v1/alerts?test=test", alertContent, "password") - assert.Equal(t, 500, w.Code) + assert.Equal(t, http.StatusInternalServerError, w.Code) assert.JSONEq(t, `{"message":"Filter parameter 'test' is unknown (=test): invalid filter"}`, w.Body.String()) // get without filters w = lapi.RecordResponse(t, ctx, "GET", "/v1/alerts", emptyBody, "password") - assert.Equal(t, 200, w.Code) + assert.Equal(t, http.StatusOK, w.Code) // check alert and decision assert.Contains(t, w.Body.String(), "Ip 91.121.79.195 performed 'crowdsecurity/ssh-bf' (6 events over ") assert.Contains(t, w.Body.String(), `scope":"Ip","simulated":false,"type":"ban","value":"91.121.79.195"`) @@ -189,149 +189,149 @@ func TestAlertListFilters(t *testing.T) { // test decision_type filter (ok) w = lapi.RecordResponse(t, ctx, "GET", "/v1/alerts?decision_type=ban", emptyBody, "password") - assert.Equal(t, 200, w.Code) + assert.Equal(t, http.StatusOK, w.Code) assert.Contains(t, w.Body.String(), "Ip 91.121.79.195 performed 'crowdsecurity/ssh-bf' (6 events over ") assert.Contains(t, w.Body.String(), `scope":"Ip","simulated":false,"type":"ban","value":"91.121.79.195"`) // test decision_type filter (bad value) w = lapi.RecordResponse(t, ctx, "GET", "/v1/alerts?decision_type=ratata", emptyBody, "password") - assert.Equal(t, 200, w.Code) + assert.Equal(t, http.StatusOK, w.Code) assert.Equal(t, "null", w.Body.String()) // test scope (ok) w = lapi.RecordResponse(t, ctx, "GET", "/v1/alerts?scope=Ip", emptyBody, "password") - assert.Equal(t, 200, w.Code) + assert.Equal(t, http.StatusOK, w.Code) assert.Contains(t, w.Body.String(), "Ip 91.121.79.195 performed 'crowdsecurity/ssh-bf' (6 events over ") assert.Contains(t, w.Body.String(), `scope":"Ip","simulated":false,"type":"ban","value":"91.121.79.195"`) // test scope (bad value) w = lapi.RecordResponse(t, ctx, "GET", "/v1/alerts?scope=rarara", emptyBody, "password") - assert.Equal(t, 200, w.Code) + assert.Equal(t, http.StatusOK, w.Code) assert.Equal(t, "null", w.Body.String()) // test scenario (ok) w = lapi.RecordResponse(t, ctx, "GET", "/v1/alerts?scenario=crowdsecurity/ssh-bf", emptyBody, "password") - assert.Equal(t, 200, w.Code) + assert.Equal(t, http.StatusOK, w.Code) assert.Contains(t, w.Body.String(), "Ip 91.121.79.195 performed 'crowdsecurity/ssh-bf' (6 events over ") assert.Contains(t, w.Body.String(), `scope":"Ip","simulated":false,"type":"ban","value":"91.121.79.195"`) // test scenario (bad value) w = lapi.RecordResponse(t, ctx, "GET", "/v1/alerts?scenario=crowdsecurity/nope", emptyBody, "password") - assert.Equal(t, 200, w.Code) + assert.Equal(t, http.StatusOK, w.Code) assert.Equal(t, "null", w.Body.String()) // test ip (ok) w = lapi.RecordResponse(t, ctx, "GET", "/v1/alerts?ip=91.121.79.195", emptyBody, "password") - assert.Equal(t, 200, w.Code) + assert.Equal(t, http.StatusOK, w.Code) assert.Contains(t, w.Body.String(), "Ip 91.121.79.195 performed 'crowdsecurity/ssh-bf' (6 events over ") assert.Contains(t, w.Body.String(), `scope":"Ip","simulated":false,"type":"ban","value":"91.121.79.195"`) // test ip (bad value) w = lapi.RecordResponse(t, ctx, "GET", "/v1/alerts?ip=99.122.77.195", emptyBody, "password") - assert.Equal(t, 200, w.Code) + assert.Equal(t, http.StatusOK, w.Code) assert.Equal(t, "null", w.Body.String()) // test ip (invalid value) w = lapi.RecordResponse(t, ctx, "GET", "/v1/alerts?ip=gruueq", emptyBody, "password") - assert.Equal(t, 500, w.Code) + assert.Equal(t, http.StatusInternalServerError, w.Code) assert.JSONEq(t, `{"message":"invalid ip address 'gruueq'"}`, w.Body.String()) // test range (ok) w = lapi.RecordResponse(t, ctx, "GET", "/v1/alerts?range=91.121.79.0/24&contains=false", emptyBody, "password") - assert.Equal(t, 200, w.Code) + assert.Equal(t, http.StatusOK, w.Code) assert.Contains(t, w.Body.String(), "Ip 91.121.79.195 performed 'crowdsecurity/ssh-bf' (6 events over ") assert.Contains(t, w.Body.String(), `scope":"Ip","simulated":false,"type":"ban","value":"91.121.79.195"`) // test range w = lapi.RecordResponse(t, ctx, "GET", "/v1/alerts?range=99.122.77.0/24&contains=false", emptyBody, "password") - assert.Equal(t, 200, w.Code) + assert.Equal(t, http.StatusOK, w.Code) assert.Equal(t, "null", w.Body.String()) // test range (invalid value) w = lapi.RecordResponse(t, ctx, "GET", "/v1/alerts?range=ratata", emptyBody, "password") - assert.Equal(t, 500, w.Code) + assert.Equal(t, http.StatusInternalServerError, w.Code) assert.JSONEq(t, `{"message":"invalid ip address 'ratata'"}`, w.Body.String()) // test since (ok) w = lapi.RecordResponse(t, ctx, "GET", "/v1/alerts?since=1h", emptyBody, "password") - assert.Equal(t, 200, w.Code) + assert.Equal(t, http.StatusOK, w.Code) assert.Contains(t, w.Body.String(), "Ip 91.121.79.195 performed 'crowdsecurity/ssh-bf' (6 events over ") assert.Contains(t, w.Body.String(), `scope":"Ip","simulated":false,"type":"ban","value":"91.121.79.195"`) // test since (ok but yields no results) w = lapi.RecordResponse(t, ctx, "GET", "/v1/alerts?since=1ns", emptyBody, "password") - assert.Equal(t, 200, w.Code) + assert.Equal(t, http.StatusOK, w.Code) assert.Equal(t, "null", w.Body.String()) // test since (invalid value) w = lapi.RecordResponse(t, ctx, "GET", "/v1/alerts?since=1zuzu", emptyBody, "password") - assert.Equal(t, 500, w.Code) + assert.Equal(t, http.StatusInternalServerError, w.Code) assert.Contains(t, w.Body.String(), `{"message":"while parsing duration: time: unknown unit`) // test until (ok) w = lapi.RecordResponse(t, ctx, "GET", "/v1/alerts?until=1ns", emptyBody, "password") - assert.Equal(t, 200, w.Code) + assert.Equal(t, http.StatusOK, w.Code) assert.Contains(t, w.Body.String(), "Ip 91.121.79.195 performed 'crowdsecurity/ssh-bf' (6 events over ") assert.Contains(t, w.Body.String(), `scope":"Ip","simulated":false,"type":"ban","value":"91.121.79.195"`) // test until (ok but no return) w = lapi.RecordResponse(t, ctx, "GET", "/v1/alerts?until=1m", emptyBody, "password") - assert.Equal(t, 200, w.Code) + assert.Equal(t, http.StatusOK, w.Code) assert.Equal(t, "null", w.Body.String()) // test until (invalid value) w = lapi.RecordResponse(t, ctx, "GET", "/v1/alerts?until=1zuzu", emptyBody, "password") - assert.Equal(t, 500, w.Code) + assert.Equal(t, http.StatusInternalServerError, w.Code) assert.Contains(t, w.Body.String(), `{"message":"while parsing duration: time: unknown unit`) // test simulated (ok) w = lapi.RecordResponse(t, ctx, "GET", "/v1/alerts?simulated=true", emptyBody, "password") - assert.Equal(t, 200, w.Code) + assert.Equal(t, http.StatusOK, w.Code) assert.Contains(t, w.Body.String(), "Ip 91.121.79.195 performed 'crowdsecurity/ssh-bf' (6 events over ") assert.Contains(t, w.Body.String(), `scope":"Ip","simulated":false,"type":"ban","value":"91.121.79.195"`) // test simulated (ok) w = lapi.RecordResponse(t, ctx, "GET", "/v1/alerts?simulated=false", emptyBody, "password") - assert.Equal(t, 200, w.Code) + assert.Equal(t, http.StatusOK, w.Code) assert.Contains(t, w.Body.String(), "Ip 91.121.79.195 performed 'crowdsecurity/ssh-bf' (6 events over ") assert.Contains(t, w.Body.String(), `scope":"Ip","simulated":false,"type":"ban","value":"91.121.79.195"`) // test has active decision w = lapi.RecordResponse(t, ctx, "GET", "/v1/alerts?has_active_decision=true", emptyBody, "password") - assert.Equal(t, 200, w.Code) + assert.Equal(t, http.StatusOK, w.Code) assert.Contains(t, w.Body.String(), "Ip 91.121.79.195 performed 'crowdsecurity/ssh-bf' (6 events over ") assert.Contains(t, w.Body.String(), `scope":"Ip","simulated":false,"type":"ban","value":"91.121.79.195"`) // test has active decision w = lapi.RecordResponse(t, ctx, "GET", "/v1/alerts?has_active_decision=false", emptyBody, "password") - assert.Equal(t, 200, w.Code) + assert.Equal(t, http.StatusOK, w.Code) assert.Equal(t, "null", w.Body.String()) // test has active decision (invalid value) w = lapi.RecordResponse(t, ctx, "GET", "/v1/alerts?has_active_decision=ratatqata", emptyBody, "password") - assert.Equal(t, 500, w.Code) + assert.Equal(t, http.StatusInternalServerError, w.Code) assert.JSONEq(t, `{"message":"'ratatqata' is not a boolean: strconv.ParseBool: parsing \"ratatqata\": invalid syntax: unable to parse type"}`, w.Body.String()) } @@ -343,7 +343,7 @@ func TestAlertBulkInsert(t *testing.T) { alertContent := GetAlertReaderFromFile(t, "./tests/alert_bulk.json") w := lapi.RecordResponse(t, ctx, "GET", "/v1/alerts", alertContent, "password") - assert.Equal(t, 200, w.Code) + assert.Equal(t, http.StatusOK, w.Code) } func TestListAlert(t *testing.T) { @@ -353,13 +353,13 @@ func TestListAlert(t *testing.T) { // List Alert with invalid filter w := lapi.RecordResponse(t, ctx, "GET", "/v1/alerts?test=test", emptyBody, "password") - assert.Equal(t, 500, w.Code) + assert.Equal(t, http.StatusInternalServerError, w.Code) assert.JSONEq(t, `{"message":"Filter parameter 'test' is unknown (=test): invalid filter"}`, w.Body.String()) // List Alert w = lapi.RecordResponse(t, ctx, "GET", "/v1/alerts", emptyBody, "password") - assert.Equal(t, 200, w.Code) + assert.Equal(t, http.StatusOK, w.Code) assert.Contains(t, w.Body.String(), "crowdsecurity/test") } @@ -374,7 +374,7 @@ func TestCreateAlertErrors(t *testing.T) { req.Header.Add("User-Agent", UserAgent) req.Header.Add("Authorization", fmt.Sprintf("Bearer %s", "ratata")) lapi.router.ServeHTTP(w, req) - assert.Equal(t, 401, w.Code) + assert.Equal(t, http.StatusUnauthorized, w.Code) // test invalid bearer w = httptest.NewRecorder() @@ -382,7 +382,7 @@ func TestCreateAlertErrors(t *testing.T) { req.Header.Add("User-Agent", UserAgent) req.Header.Add("Authorization", fmt.Sprintf("Bearer %s", lapi.loginResp.Token+"s")) lapi.router.ServeHTTP(w, req) - assert.Equal(t, 401, w.Code) + assert.Equal(t, http.StatusUnauthorized, w.Code) } func TestDeleteAlert(t *testing.T) { @@ -396,7 +396,7 @@ func TestDeleteAlert(t *testing.T) { AddAuthHeaders(req, lapi.loginResp) req.RemoteAddr = "127.0.0.2:4242" lapi.router.ServeHTTP(w, req) - assert.Equal(t, 403, w.Code) + assert.Equal(t, http.StatusForbidden, w.Code) assert.JSONEq(t, `{"message":"access forbidden from this IP (127.0.0.2)"}`, w.Body.String()) // Delete Alert @@ -405,7 +405,7 @@ func TestDeleteAlert(t *testing.T) { AddAuthHeaders(req, lapi.loginResp) req.RemoteAddr = "127.0.0.1:4242" lapi.router.ServeHTTP(w, req) - assert.Equal(t, 200, w.Code) + assert.Equal(t, http.StatusOK, w.Code) assert.JSONEq(t, `{"nbDeleted":"1"}`, w.Body.String()) } @@ -420,7 +420,7 @@ func TestDeleteAlertByID(t *testing.T) { AddAuthHeaders(req, lapi.loginResp) req.RemoteAddr = "127.0.0.2:4242" lapi.router.ServeHTTP(w, req) - assert.Equal(t, 403, w.Code) + assert.Equal(t, http.StatusForbidden, w.Code) assert.JSONEq(t, `{"message":"access forbidden from this IP (127.0.0.2)"}`, w.Body.String()) // Delete Alert @@ -429,7 +429,7 @@ func TestDeleteAlertByID(t *testing.T) { AddAuthHeaders(req, lapi.loginResp) req.RemoteAddr = "127.0.0.1:4242" lapi.router.ServeHTTP(w, req) - assert.Equal(t, 200, w.Code) + assert.Equal(t, http.StatusOK, w.Code) assert.JSONEq(t, `{"nbDeleted":"1"}`, w.Body.String()) } @@ -463,7 +463,7 @@ func TestDeleteAlertTrustedIPS(t *testing.T) { req.RemoteAddr = ip + ":1234" router.ServeHTTP(w, req) - assert.Equal(t, 403, w.Code) + assert.Equal(t, http.StatusForbidden, w.Code) assert.Contains(t, w.Body.String(), fmt.Sprintf(`{"message":"access forbidden from this IP (%s)"}`, ip)) } @@ -474,7 +474,7 @@ func TestDeleteAlertTrustedIPS(t *testing.T) { req.RemoteAddr = ip + ":1234" router.ServeHTTP(w, req) - assert.Equal(t, 200, w.Code) + assert.Equal(t, http.StatusOK, w.Code) assert.JSONEq(t, `{"nbDeleted":"1"}`, w.Body.String()) } diff --git a/pkg/leakybucket/buckets_test.go b/pkg/leakybucket/buckets_test.go index 1da906cb555..8bb7a3d4c47 100644 --- a/pkg/leakybucket/buckets_test.go +++ b/pkg/leakybucket/buckets_test.go @@ -139,14 +139,24 @@ func testOneBucket(t *testing.T, hub *cwhub.Hub, dir string, tomb *tomb.Tomb) er t.Fatalf("failed to parse %s : %s", stagecfg, err) } - files := []string{} + scenarios := []*cwhub.Item{} for _, x := range stages { - files = append(files, x.Filename) + // XXX: LoadBuckets should take an interface, BucketProvider ScenarioProvider or w/e + item := &cwhub.Item{ + Name: x.Filename, + State: cwhub.ItemState{ + LocalVersion: "", + LocalPath: x.Filename, + LocalHash: "", + }, + } + + scenarios = append(scenarios, item) } cscfg := &csconfig.CrowdsecServiceCfg{} - holders, response, err := LoadBuckets(cscfg, hub, files, tomb, buckets, false) + holders, response, err := LoadBuckets(cscfg, hub, scenarios, tomb, buckets, false) if err != nil { t.Fatalf("failed loading bucket : %s", err) } @@ -184,7 +194,7 @@ func testFile(t *testing.T, file string, bs string, holders []BucketFactory, res } dec := json.NewDecoder(yamlFile) dec.DisallowUnknownFields() - //dec.SetStrict(true) + // dec.SetStrict(true) tf := TestFile{} err = dec.Decode(&tf) if err != nil { @@ -196,7 +206,7 @@ func testFile(t *testing.T, file string, bs string, holders []BucketFactory, res } var latest_ts time.Time for _, in := range tf.Lines { - //just to avoid any race during ingestion of funny scenarios + // just to avoid any race during ingestion of funny scenarios time.Sleep(50 * time.Millisecond) var ts time.Time @@ -226,7 +236,7 @@ func testFile(t *testing.T, file string, bs string, holders []BucketFactory, res time.Sleep(1 * time.Second) - //Read results from chan + // Read results from chan POLL_AGAIN: fails := 0 for fails < 2 { @@ -287,37 +297,37 @@ POLL_AGAIN: log.Tracef("Checking next expected result.") - //empty overflow + // empty overflow if out.Overflow.Alert == nil && expected.Overflow.Alert == nil { - //match stuff + // match stuff } else { if out.Overflow.Alert == nil || expected.Overflow.Alert == nil { log.Printf("Here ?") continue } - //Scenario + // Scenario if *out.Overflow.Alert.Scenario != *expected.Overflow.Alert.Scenario { log.Errorf("(scenario) %v != %v", *out.Overflow.Alert.Scenario, *expected.Overflow.Alert.Scenario) continue } log.Infof("(scenario) %v == %v", *out.Overflow.Alert.Scenario, *expected.Overflow.Alert.Scenario) - //EventsCount + // EventsCount if *out.Overflow.Alert.EventsCount != *expected.Overflow.Alert.EventsCount { log.Errorf("(EventsCount) %d != %d", *out.Overflow.Alert.EventsCount, *expected.Overflow.Alert.EventsCount) continue } log.Infof("(EventsCount) %d == %d", *out.Overflow.Alert.EventsCount, *expected.Overflow.Alert.EventsCount) - //Sources + // Sources if !reflect.DeepEqual(out.Overflow.Sources, expected.Overflow.Sources) { log.Errorf("(Sources %s != %s)", spew.Sdump(out.Overflow.Sources), spew.Sdump(expected.Overflow.Sources)) continue } log.Infof("(Sources: %s == %s)", spew.Sdump(out.Overflow.Sources), spew.Sdump(expected.Overflow.Sources)) } - //Events + // Events // if !reflect.DeepEqual(out.Overflow.Alert.Events, expected.Overflow.Alert.Events) { // log.Errorf("(Events %s != %s)", spew.Sdump(out.Overflow.Alert.Events), spew.Sdump(expected.Overflow.Alert.Events)) // valid = false @@ -326,10 +336,10 @@ POLL_AGAIN: // log.Infof("(Events: %s == %s)", spew.Sdump(out.Overflow.Alert.Events), spew.Sdump(expected.Overflow.Alert.Events)) // } - //CheckFailed: + // CheckFailed: log.Warningf("The test is valid, remove entry %d from expects, and %d from t.Results", eidx, ridx) - //don't do this at home : delete current element from list and redo + // don't do this at home : delete current element from list and redo results[eidx] = results[len(results)-1] results = results[:len(results)-1] tf.Results[ridx] = tf.Results[len(tf.Results)-1] diff --git a/pkg/leakybucket/manager_load.go b/pkg/leakybucket/manager_load.go index bc907ac257b..5e8bab8486e 100644 --- a/pkg/leakybucket/manager_load.go +++ b/pkg/leakybucket/manager_load.go @@ -7,7 +7,6 @@ import ( "io" "os" "path/filepath" - "strings" "sync" "time" @@ -201,44 +200,41 @@ func ValidateFactory(bucketFactory *BucketFactory) error { return fmt.Errorf("unknown bucket type '%s'", bucketFactory.Type) } - switch bucketFactory.ScopeType.Scope { - case types.Undefined: + return compileScopeFilter(bucketFactory) +} + +func compileScopeFilter(bucketFactory *BucketFactory) error { + if bucketFactory.ScopeType.Scope == types.Undefined { bucketFactory.ScopeType.Scope = types.Ip - case types.Ip: - case types.Range: - var ( - runTimeFilter *vm.Program - err error - ) + } + if bucketFactory.ScopeType.Scope == types.Ip { if bucketFactory.ScopeType.Filter != "" { - if runTimeFilter, err = expr.Compile(bucketFactory.ScopeType.Filter, exprhelpers.GetExprOptions(map[string]interface{}{"evt": &types.Event{}})...); err != nil { - return fmt.Errorf("error compiling the scope filter: %w", err) - } - - bucketFactory.ScopeType.RunTimeFilter = runTimeFilter + return errors.New("filter is not allowed for IP scope") } - default: - // Compile the scope filter - var ( - runTimeFilter *vm.Program - err error - ) + return nil + } - if bucketFactory.ScopeType.Filter != "" { - if runTimeFilter, err = expr.Compile(bucketFactory.ScopeType.Filter, exprhelpers.GetExprOptions(map[string]interface{}{"evt": &types.Event{}})...); err != nil { - return fmt.Errorf("error compiling the scope filter: %w", err) - } + if bucketFactory.ScopeType.Scope == types.Range && bucketFactory.ScopeType.Filter == "" { + return nil + } - bucketFactory.ScopeType.RunTimeFilter = runTimeFilter - } + if bucketFactory.ScopeType.Filter == "" { + return errors.New("filter is mandatory for non-IP, non-Range scope") } + runTimeFilter, err := expr.Compile(bucketFactory.ScopeType.Filter, exprhelpers.GetExprOptions(map[string]interface{}{"evt": &types.Event{}})...) + if err != nil { + return fmt.Errorf("error compiling the scope filter: %w", err) + } + + bucketFactory.ScopeType.RunTimeFilter = runTimeFilter + return nil } -func LoadBuckets(cscfg *csconfig.CrowdsecServiceCfg, hub *cwhub.Hub, files []string, tomb *tomb.Tomb, buckets *Buckets, orderEvent bool) ([]BucketFactory, chan types.Event, error) { +func LoadBuckets(cscfg *csconfig.CrowdsecServiceCfg, hub *cwhub.Hub, scenarios []*cwhub.Item, tomb *tomb.Tomb, buckets *Buckets, orderEvent bool) ([]BucketFactory, chan types.Event, error) { var ( ret = []BucketFactory{} response chan types.Event @@ -246,18 +242,15 @@ func LoadBuckets(cscfg *csconfig.CrowdsecServiceCfg, hub *cwhub.Hub, files []str response = make(chan types.Event, 1) - for _, f := range files { - log.Debugf("Loading '%s'", f) + for _, item := range scenarios { + log.Debugf("Loading '%s'", item.State.LocalPath) - if !strings.HasSuffix(f, ".yaml") && !strings.HasSuffix(f, ".yml") { - log.Debugf("Skipping %s : not a yaml file", f) - continue - } + itemPath := item.State.LocalPath // process the yaml - bucketConfigurationFile, err := os.Open(f) + bucketConfigurationFile, err := os.Open(itemPath) if err != nil { - log.Errorf("Can't access leaky configuration file %s", f) + log.Errorf("Can't access leaky configuration file %s", itemPath) return nil, nil, err } @@ -271,8 +264,8 @@ func LoadBuckets(cscfg *csconfig.CrowdsecServiceCfg, hub *cwhub.Hub, files []str err = dec.Decode(&bucketFactory) if err != nil { if !errors.Is(err, io.EOF) { - log.Errorf("Bad yaml in %s: %v", f, err) - return nil, nil, fmt.Errorf("bad yaml in %s: %w", f, err) + log.Errorf("Bad yaml in %s: %v", itemPath, err) + return nil, nil, fmt.Errorf("bad yaml in %s: %w", itemPath, err) } log.Tracef("End of yaml file") @@ -288,7 +281,7 @@ func LoadBuckets(cscfg *csconfig.CrowdsecServiceCfg, hub *cwhub.Hub, files []str } // check compat if bucketFactory.FormatVersion == "" { - log.Tracef("no version in %s : %s, assuming '1.0'", bucketFactory.Name, f) + log.Tracef("no version in %s : %s, assuming '1.0'", bucketFactory.Name, itemPath) bucketFactory.FormatVersion = "1.0" } @@ -302,22 +295,17 @@ func LoadBuckets(cscfg *csconfig.CrowdsecServiceCfg, hub *cwhub.Hub, files []str continue } - bucketFactory.Filename = filepath.Clean(f) + bucketFactory.Filename = filepath.Clean(itemPath) bucketFactory.BucketName = seed.Generate() bucketFactory.ret = response - hubItem := hub.GetItemByPath(bucketFactory.Filename) - if hubItem == nil { - log.Errorf("scenario %s (%s) could not be found in hub (ignore if in unit tests)", bucketFactory.Name, bucketFactory.Filename) - } else { - if cscfg.SimulationConfig != nil { - bucketFactory.Simulated = cscfg.SimulationConfig.IsSimulated(hubItem.Name) - } - - bucketFactory.ScenarioVersion = hubItem.State.LocalVersion - bucketFactory.hash = hubItem.State.LocalHash + if cscfg.SimulationConfig != nil { + bucketFactory.Simulated = cscfg.SimulationConfig.IsSimulated(item.Name) } + bucketFactory.ScenarioVersion = item.State.LocalVersion + bucketFactory.hash = item.State.LocalHash + bucketFactory.wgDumpState = buckets.wgDumpState bucketFactory.wgPour = buckets.wgPour diff --git a/pkg/types/ip_test.go b/pkg/types/ip_test.go index ef7253f8a9b..571163761d4 100644 --- a/pkg/types/ip_test.go +++ b/pkg/types/ip_test.go @@ -9,6 +9,7 @@ import ( func TestIP2Int(t *testing.T) { tEmpty := net.IP{} + _, _, _, err := IP2Ints(tEmpty) if !strings.Contains(err.Error(), "unexpected len 0 for ") { t.Fatalf("unexpected: %s", err) @@ -189,31 +190,37 @@ func TestAdd2Int(t *testing.T) { if err != nil && test.exp_error == "" { t.Fatalf("%d unexpected error : %s", idx, err) } + if test.exp_error != "" { if !strings.Contains(err.Error(), test.exp_error) { t.Fatalf("%d unmatched error : %s != %s", idx, err, test.exp_error) } - continue //we can skip this one + + continue // we can skip this one } + if sz != test.exp_sz { t.Fatalf("%d unexpected size %d != %d", idx, sz, test.exp_sz) } + if start_ip != test.exp_start_ip { t.Fatalf("%d unexpected start_ip %d != %d", idx, start_ip, test.exp_start_ip) } + if sz == 16 { if start_sfx != test.exp_start_sfx { t.Fatalf("%d unexpected start sfx %d != %d", idx, start_sfx, test.exp_start_sfx) } } + if end_ip != test.exp_end_ip { t.Fatalf("%d unexpected end ip %d != %d", idx, end_ip, test.exp_end_ip) } + if sz == 16 { if end_sfx != test.exp_end_sfx { t.Fatalf("%d unexpected end sfx %d != %d", idx, end_sfx, test.exp_end_sfx) } } - } } From 466f39b88027d77045fe724f6e80b6f28a2cec67 Mon Sep 17 00:00:00 2001 From: victoredvardsson <62360867+victoredvardsson@users.noreply.github.com> Date: Mon, 23 Dec 2024 11:08:33 +0100 Subject: [PATCH 372/581] Add possibility to configure log format #799 (#2941) * make it possible to enable json log * fix * fix typo * fix typo * fix typo * fix typo * fix typo * fix typo * Add error handling * Add log_format to default config * Fix syntax error in if statement * Fix typo * Fix typo * Fix some typos and change naming from native to text, makes more sense * Set same timestamp format for json logging * Fix formatting * Move in if statement under previous * Fix some formatting that got messed up * Default to text formatter, if log_format is not configured. * defining logFormatter outside if statement so that log.SetFormatter(logFormatter) is not undefined when function is called * Add variables that were undefined * Argument were missing when calling SetDefaultLoggerConfig function * Fix order of arguments passed * Fix order of arguments passed * Fix typo * Implicit log_format = "text" * functional test * ignore log_format in FatalHook * make it possible to enable json log * fix * fix typo * fix typo * fix typo * fix typo * fix typo * fix typo * Add error handling * Add log_format to default config * Fix syntax error in if statement * Fix typo * Fix typo * Fix some typos and change naming from native to text, makes more sense * Set same timestamp format for json logging * Fix formatting * Move in if statement under previous * Fix some formatting that got messed up * Default to text formatter, if log_format is not configured. * defining logFormatter outside if statement so that log.SetFormatter(logFormatter) is not undefined when function is called * Add variables that were undefined * Argument were missing when calling SetDefaultLoggerConfig function * Fix order of arguments passed * Fix order of arguments passed * Fix typo * Implicit log_format = "text" * functional test * ignore log_format in FatalHook * lint * fix func test * lint * remove < > characters from log --------- Co-authored-by: Victor Edvardsson Co-authored-by: marco Co-authored-by: Thibault "bui" Koechlin --- .golangci.yml | 5 +++++ cmd/crowdsec-cli/main.go | 5 ++++- cmd/crowdsec/fatalhook.go | 24 ++++++++++++++++++-- cmd/crowdsec/main.go | 7 ++---- pkg/apiserver/apiserver_test.go | 4 ++-- pkg/csconfig/api.go | 4 +++- pkg/csconfig/common.go | 8 +++++-- pkg/csconfig/fflag.go | 2 +- pkg/types/utils.go | 30 ++++++++++++++++++++++--- test/bats/01_crowdsec.bats | 34 +++++++++++++++++++++++++++++ test/bats/crowdsec-acquisition.bats | 2 +- 11 files changed, 107 insertions(+), 18 deletions(-) diff --git a/.golangci.yml b/.golangci.yml index d0fdd3b37f4..b51f17df489 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -459,3 +459,8 @@ issues: - gocritic path: "pkg/(appsec|acquisition|dumps|alertcontext|leakybucket|exprhelpers)" text: "rangeValCopy: .*" + + - linters: + - revive + path: "pkg/types/utils.go" + text: "argument-limit: .*" diff --git a/cmd/crowdsec-cli/main.go b/cmd/crowdsec-cli/main.go index 8b3077a579e..87e9d82fea2 100644 --- a/cmd/crowdsec-cli/main.go +++ b/cmd/crowdsec-cli/main.go @@ -146,7 +146,10 @@ func (cli *cliRoot) initialize() error { return fmt.Errorf("output format '%s' not supported: must be one of human, json, raw", csConfig.Cscli.Output) } - log.SetFormatter(&log.TextFormatter{DisableTimestamp: true}) + log.SetFormatter(&log.TextFormatter{ + DisableTimestamp: true, + DisableLevelTruncation: true, + }) if csConfig.Cscli.Output == "json" { log.SetFormatter(&log.JSONFormatter{}) diff --git a/cmd/crowdsec/fatalhook.go b/cmd/crowdsec/fatalhook.go index 84a57406a21..56e945c84a5 100644 --- a/cmd/crowdsec/fatalhook.go +++ b/cmd/crowdsec/fatalhook.go @@ -2,6 +2,7 @@ package main import ( "io" + "os" log "github.com/sirupsen/logrus" ) @@ -9,16 +10,35 @@ import ( // FatalHook is used to log fatal messages to stderr when the rest goes to a file type FatalHook struct { Writer io.Writer + Formatter log.Formatter LogLevels []log.Level } +func newFatalHook() *FatalHook { + return &FatalHook{ + Writer: os.Stderr, + Formatter: &log.TextFormatter{ + DisableTimestamp: true, + // XXX: logrus.TextFormatter has either key pairs with no colors, + // or "LEVEL [optional timestamp] message", with colors. + // We force colors to make sure we get the latter, even if + // the output is not a terminal. + // There are more flexible formatters that don't conflate the two concepts, + // or we can write our own. + ForceColors: true, + DisableLevelTruncation: true, + }, + LogLevels: []log.Level{log.FatalLevel, log.PanicLevel}, + } +} + func (hook *FatalHook) Fire(entry *log.Entry) error { - line, err := entry.String() + line, err := hook.Formatter.Format(entry) if err != nil { return err } - _, err = hook.Writer.Write([]byte(line)) + _, err = hook.Writer.Write(line) return err } diff --git a/cmd/crowdsec/main.go b/cmd/crowdsec/main.go index 518bd8e9c0d..02220e15216 100644 --- a/cmd/crowdsec/main.go +++ b/cmd/crowdsec/main.go @@ -249,16 +249,13 @@ func LoadConfig(configFile string, disableAgent bool, disableAPI bool, quiet boo if err := types.SetDefaultLoggerConfig(cConfig.Common.LogMedia, cConfig.Common.LogDir, *cConfig.Common.LogLevel, cConfig.Common.LogMaxSize, cConfig.Common.LogMaxFiles, - cConfig.Common.LogMaxAge, cConfig.Common.CompressLogs, + cConfig.Common.LogMaxAge, cConfig.Common.LogFormat, cConfig.Common.CompressLogs, cConfig.Common.ForceColorLogs); err != nil { return nil, err } if cConfig.Common.LogMedia != "stdout" { - log.AddHook(&FatalHook{ - Writer: os.Stderr, - LogLevels: []log.Level{log.FatalLevel, log.PanicLevel}, - }) + log.AddHook(newFatalHook()) } if err := csconfig.LoadFeatureFlagsFile(configFile, log.StandardLogger()); err != nil { diff --git a/pkg/apiserver/apiserver_test.go b/pkg/apiserver/apiserver_test.go index cf4c91dedda..d8f24add75e 100644 --- a/pkg/apiserver/apiserver_test.go +++ b/pkg/apiserver/apiserver_test.go @@ -387,7 +387,7 @@ func TestLoggingDebugToFileConfig(t *testing.T) { cfg.LogLevel = ptr.Of(log.DebugLevel) // Configure logging - err := types.SetDefaultLoggerConfig(cfg.LogMedia, cfg.LogDir, *cfg.LogLevel, cfg.LogMaxSize, cfg.LogMaxFiles, cfg.LogMaxAge, cfg.CompressLogs, false) + err := types.SetDefaultLoggerConfig(cfg.LogMedia, cfg.LogDir, *cfg.LogLevel, cfg.LogMaxSize, cfg.LogMaxFiles, cfg.LogMaxAge, cfg.LogFormat, cfg.CompressLogs, false) require.NoError(t, err) api, err := NewServer(ctx, &cfg) @@ -439,7 +439,7 @@ func TestLoggingErrorToFileConfig(t *testing.T) { cfg.LogLevel = ptr.Of(log.ErrorLevel) // Configure logging - err := types.SetDefaultLoggerConfig(cfg.LogMedia, cfg.LogDir, *cfg.LogLevel, cfg.LogMaxSize, cfg.LogMaxFiles, cfg.LogMaxAge, cfg.CompressLogs, false) + err := types.SetDefaultLoggerConfig(cfg.LogMedia, cfg.LogDir, *cfg.LogLevel, cfg.LogMaxSize, cfg.LogMaxFiles, cfg.LogMaxAge, cfg.LogFormat, cfg.CompressLogs, false) require.NoError(t, err) api, err := NewServer(ctx, &cfg) diff --git a/pkg/csconfig/api.go b/pkg/csconfig/api.go index 5f2f8f9248b..d94d90aaf19 100644 --- a/pkg/csconfig/api.go +++ b/pkg/csconfig/api.go @@ -271,6 +271,7 @@ type LocalApiServerCfg struct { LogMaxSize int `yaml:"-"` LogMaxAge int `yaml:"-"` LogMaxFiles int `yaml:"-"` + LogFormat string `yaml:"-"` TrustedIPs []string `yaml:"trusted_ips,omitempty"` PapiLogLevel *log.Level `yaml:"papi_log_level"` DisableRemoteLapiRegistration bool `yaml:"disable_remote_lapi_registration,omitempty"` @@ -351,7 +352,7 @@ func (c *Config) LoadAPIServer(inCli bool) error { log.Printf("push and pull to Central API disabled") } - //Set default values for CAPI push/pull + // Set default values for CAPI push/pull if c.API.Server.OnlineClient != nil { if c.API.Server.OnlineClient.PullConfig.Community == nil { c.API.Server.OnlineClient.PullConfig.Community = ptr.Of(true) @@ -391,6 +392,7 @@ func (c *Config) LoadAPIServer(inCli bool) error { c.API.Server.CompressLogs = c.Common.CompressLogs c.API.Server.LogMaxSize = c.Common.LogMaxSize c.API.Server.LogMaxAge = c.Common.LogMaxAge + c.API.Server.LogFormat = c.Common.LogFormat c.API.Server.LogMaxFiles = c.Common.LogMaxFiles if c.API.Server.UseForwardedForHeaders && c.API.Server.TrustedProxies == nil { diff --git a/pkg/csconfig/common.go b/pkg/csconfig/common.go index 7e1ef6e5c98..e312756ce20 100644 --- a/pkg/csconfig/common.go +++ b/pkg/csconfig/common.go @@ -12,11 +12,12 @@ type CommonCfg struct { Daemonize bool PidDir string `yaml:"pid_dir,omitempty"` // TODO: This is just for backward compat. Remove this later LogMedia string `yaml:"log_media"` - LogDir string `yaml:"log_dir,omitempty"` //if LogMedia = file + LogDir string `yaml:"log_dir,omitempty"` // if LogMedia = file LogLevel *log.Level `yaml:"log_level"` WorkingDir string `yaml:"working_dir,omitempty"` // TODO: This is just for backward compat. Remove this later CompressLogs *bool `yaml:"compress_logs,omitempty"` LogMaxSize int `yaml:"log_max_size,omitempty"` + LogFormat string `yaml:"log_format,omitempty"` LogMaxAge int `yaml:"log_max_age,omitempty"` LogMaxFiles int `yaml:"log_max_files,omitempty"` ForceColorLogs bool `yaml:"force_color_logs,omitempty"` @@ -24,6 +25,7 @@ type CommonCfg struct { func (c *Config) loadCommon() error { var err error + if c.Common == nil { c.Common = &CommonCfg{} } @@ -32,13 +34,15 @@ func (c *Config) loadCommon() error { c.Common.LogMedia = "stdout" } - var CommonCleanup = []*string{ + CommonCleanup := []*string{ &c.Common.LogDir, } + for _, k := range CommonCleanup { if *k == "" { continue } + *k, err = filepath.Abs(*k) if err != nil { return fmt.Errorf("failed to get absolute path of '%s': %w", *k, err) diff --git a/pkg/csconfig/fflag.go b/pkg/csconfig/fflag.go index c86686889eb..ec1282c5a04 100644 --- a/pkg/csconfig/fflag.go +++ b/pkg/csconfig/fflag.go @@ -38,7 +38,7 @@ func LoadFeatureFlagsFile(configPath string, logger *log.Logger) error { func ListFeatureFlags() string { enabledFeatures := fflag.Crowdsec.GetEnabledFeatures() - msg := "" + msg := "none" if len(enabledFeatures) > 0 { msg = strings.Join(enabledFeatures, ", ") } diff --git a/pkg/types/utils.go b/pkg/types/utils.go index 3e1ae4f7547..d5e4ac6f986 100644 --- a/pkg/types/utils.go +++ b/pkg/types/utils.go @@ -16,21 +16,40 @@ var ( logLevel log.Level ) -func SetDefaultLoggerConfig(cfgMode string, cfgFolder string, cfgLevel log.Level, maxSize int, maxFiles int, maxAge int, compress *bool, forceColors bool) error { - /*Configure logs*/ +func SetDefaultLoggerConfig(cfgMode string, cfgFolder string, cfgLevel log.Level, maxSize int, maxFiles int, maxAge int, format string, compress *bool, forceColors bool) error { + if format == "" { + format = "text" + } + + switch format { + case "text": + logFormatter = &log.TextFormatter{ + TimestampFormat: time.RFC3339, + FullTimestamp: true, + ForceColors: forceColors, + } + case "json": + logFormatter = &log.JSONFormatter{TimestampFormat: time.RFC3339} + default: + return fmt.Errorf("unknown log_format '%s'", format) + } + if cfgMode == "file" { _maxsize := 500 if maxSize != 0 { _maxsize = maxSize } + _maxfiles := 3 if maxFiles != 0 { _maxfiles = maxFiles } + _maxage := 28 if maxAge != 0 { _maxage = maxAge } + _compress := true if compress != nil { _compress = *compress @@ -47,10 +66,11 @@ func SetDefaultLoggerConfig(cfgMode string, cfgFolder string, cfgLevel log.Level } else if cfgMode != "stdout" { return fmt.Errorf("log mode '%s' unknown", cfgMode) } + logLevel = cfgLevel log.SetLevel(logLevel) - logFormatter = &log.TextFormatter{TimestampFormat: time.RFC3339, FullTimestamp: true, ForceColors: forceColors} log.SetFormatter(logFormatter) + return nil } @@ -63,7 +83,9 @@ func ConfigureLogger(clog *log.Logger) error { if logFormatter != nil { clog.SetFormatter(logFormatter) } + clog.SetLevel(logLevel) + return nil } @@ -76,6 +98,8 @@ func IsNetworkFS(path string) (bool, string, error) { if err != nil { return false, "", err } + fsType = strings.ToLower(fsType) + return fsType == "nfs" || fsType == "cifs" || fsType == "smb" || fsType == "smb2", fsType, nil } diff --git a/test/bats/01_crowdsec.bats b/test/bats/01_crowdsec.bats index a768a8d4d28..3df0b42a0f2 100644 --- a/test/bats/01_crowdsec.bats +++ b/test/bats/01_crowdsec.bats @@ -67,6 +67,40 @@ teardown() { refute_output } +@test "crowdsec - log format" { + # fail early + config_disable_lapi + config_disable_agent + + config_set '.common.log_media="stdout"' + + config_set '.common.log_format=""' + rune -0 wait-for --err "you must run at least the API Server or crowdsec" "$CROWDSEC" + assert_stderr --partial 'level=fatal msg="you must run at least the API Server or crowdsec"' + + config_set '.common.log_format="text"' + rune -0 wait-for --err "you must run at least the API Server or crowdsec" "$CROWDSEC" + assert_stderr --partial 'level=fatal msg="you must run at least the API Server or crowdsec"' + + config_set '.common.log_format="json"' + rune -0 wait-for --err "you must run at least the API Server or crowdsec" "$CROWDSEC" + rune -0 jq -c 'select(.msg=="you must run at least the API Server or crowdsec") | .level' <(stderr | grep "^{") + assert_output '"fatal"' + + # If log_media='file', a hook to stderr is added only for fatal messages, + # with a predefined formatter (level + msg, no timestamp, ignore log_format) + + config_set '.common.log_media="file"' + + config_set '.common.log_format="text"' + rune -0 wait-for --err "you must run at least the API Server or crowdsec" "$CROWDSEC" + assert_stderr --regexp 'FATAL.* you must run at least the API Server or crowdsec$' + + config_set '.common.log_format="json"' + rune -0 wait-for --err "you must run at least the API Server or crowdsec" "$CROWDSEC" + assert_stderr --regexp 'FATAL.* you must run at least the API Server or crowdsec$' +} + @test "CS_LAPI_SECRET not strong enough" { CS_LAPI_SECRET=foo rune -1 wait-for "$CROWDSEC" assert_stderr --partial "api server init: unable to run local API: controller init: CS_LAPI_SECRET not strong enough" diff --git a/test/bats/crowdsec-acquisition.bats b/test/bats/crowdsec-acquisition.bats index 5189790f01f..1a92624b4c4 100644 --- a/test/bats/crowdsec-acquisition.bats +++ b/test/bats/crowdsec-acquisition.bats @@ -33,7 +33,7 @@ teardown() { EOT rune -1 "$CROWDSEC" -t - assert_stderr --partial "crowdsec init: while loading acquisition config: while configuring datasource of type file from $ACQUIS_DIR/file.yaml (position 0): cannot parse FileAcquisition configuration: yaml: unmarshal errors:\n line 6: cannot unmarshal !!seq into string" + assert_stderr --partial "crowdsec init: while loading acquisition config: while configuring datasource of type file from $ACQUIS_DIR/file.yaml (position 0): cannot parse FileAcquisition configuration: yaml: unmarshal errors:" } @test "datasource type detection" { From a1d26bdc5be716a80a80861745c2aefbe518db3f Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Thu, 26 Dec 2024 15:21:52 +0100 Subject: [PATCH 373/581] cscli: improved hub management (#3352) --- cmd/crowdsec-cli/clihub/hub.go | 69 ++-- cmd/crowdsec-cli/clihub/items.go | 43 -- cmd/crowdsec-cli/clihub/utils_table.go | 60 --- cmd/crowdsec-cli/cliitem/inspect.go | 57 +++ cmd/crowdsec-cli/cliitem/item.go | 213 ++++++---- .../item_metrics.go => cliitem/metrics.go} | 8 +- cmd/crowdsec-cli/cliitem/metrics_table.go | 71 ++++ cmd/crowdsec-cli/clisetup/setup.go | 15 +- cmd/crowdsec-cli/config_restore.go | 13 +- cmd/crowdsec-cli/require/require.go | 2 +- docker/test/tests/test_hub.py | 2 +- docker/test/tests/test_hub_collections.py | 13 +- go.mod | 2 +- go.sum | 6 +- pkg/csconfig/cscli.go | 1 + pkg/cwhub/cwhub.go | 8 +- pkg/cwhub/cwhub_test.go | 8 +- pkg/cwhub/dataset.go | 72 ---- pkg/cwhub/doc.go | 17 +- pkg/cwhub/fetch.go | 108 +++++ pkg/cwhub/hub.go | 38 +- pkg/cwhub/hub_test.go | 8 +- pkg/cwhub/item.go | 227 +++++++---- pkg/cwhub/iteminstall.go | 73 ---- pkg/cwhub/iteminstall_test.go | 10 +- pkg/cwhub/itemlink.go | 78 ---- pkg/cwhub/itemremove.go | 138 ------- pkg/cwhub/itemupgrade.go | 254 ------------ pkg/cwhub/itemupgrade_test.go | 4 + pkg/cwhub/remote.go | 13 +- pkg/cwhub/sync.go | 12 +- pkg/emoji/emoji.go | 4 + pkg/hubops/colorize.go | 38 ++ pkg/hubops/datarefresh.go | 75 ++++ pkg/hubops/disable.go | 121 ++++++ pkg/hubops/download.go | 212 ++++++++++ pkg/hubops/enable.go | 113 ++++++ pkg/hubops/plan.go | 250 ++++++++++++ pkg/hubops/purge.go | 88 ++++ pkg/hubtest/hubtest_item.go | 7 +- pkg/setup/detect_test.go | 2 +- pkg/setup/install.go | 51 +-- test/bats/07_setup.bats | 31 +- test/bats/20_hub.bats | 40 +- test/bats/20_hub_collections.bats | 381 ----------------- test/bats/20_hub_collections_dep.bats | 26 +- test/bats/20_hub_items.bats | 70 ++-- test/bats/20_hub_parsers.bats | 383 ------------------ test/bats/20_hub_postoverflows.bats | 383 ------------------ test/bats/20_hub_scenarios.bats | 383 ------------------ test/bats/cscli-hubtype-inspect.bats | 93 +++++ test/bats/cscli-hubtype-install.bats | 269 ++++++++++++ test/bats/cscli-hubtype-list.bats | 130 ++++++ test/bats/cscli-hubtype-remove.bats | 245 +++++++++++ test/bats/cscli-hubtype-upgrade.bats | 253 ++++++++++++ test/bats/cscli-parsers.bats | 44 ++ test/bats/cscli-postoverflows.bats | 44 ++ test/bats/hub-index.bats | 357 ++++++++++++++++ test/bin/remove-all-hub-items | 2 +- test/lib/config/config-local | 2 +- test/lib/setup_file.sh | 24 +- 61 files changed, 3115 insertions(+), 2649 deletions(-) create mode 100644 cmd/crowdsec-cli/cliitem/inspect.go rename cmd/crowdsec-cli/{clihub/item_metrics.go => cliitem/metrics.go} (96%) create mode 100644 cmd/crowdsec-cli/cliitem/metrics_table.go delete mode 100644 pkg/cwhub/dataset.go create mode 100644 pkg/cwhub/fetch.go delete mode 100644 pkg/cwhub/iteminstall.go delete mode 100644 pkg/cwhub/itemlink.go delete mode 100644 pkg/cwhub/itemremove.go delete mode 100644 pkg/cwhub/itemupgrade.go create mode 100644 pkg/hubops/colorize.go create mode 100644 pkg/hubops/datarefresh.go create mode 100644 pkg/hubops/disable.go create mode 100644 pkg/hubops/download.go create mode 100644 pkg/hubops/enable.go create mode 100644 pkg/hubops/plan.go create mode 100644 pkg/hubops/purge.go delete mode 100644 test/bats/20_hub_collections.bats delete mode 100644 test/bats/20_hub_parsers.bats delete mode 100644 test/bats/20_hub_postoverflows.bats delete mode 100644 test/bats/20_hub_scenarios.bats create mode 100644 test/bats/cscli-hubtype-inspect.bats create mode 100644 test/bats/cscli-hubtype-install.bats create mode 100644 test/bats/cscli-hubtype-list.bats create mode 100644 test/bats/cscli-hubtype-remove.bats create mode 100644 test/bats/cscli-hubtype-upgrade.bats create mode 100644 test/bats/cscli-parsers.bats create mode 100644 test/bats/cscli-postoverflows.bats create mode 100644 test/bats/hub-index.bats diff --git a/cmd/crowdsec-cli/clihub/hub.go b/cmd/crowdsec-cli/clihub/hub.go index f189d6a2e13..49ccd761285 100644 --- a/cmd/crowdsec-cli/clihub/hub.go +++ b/cmd/crowdsec-cli/clihub/hub.go @@ -5,15 +5,18 @@ import ( "encoding/json" "fmt" "io" + "os" "github.com/fatih/color" log "github.com/sirupsen/logrus" "github.com/spf13/cobra" "gopkg.in/yaml.v3" + "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/reload" "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/require" "github.com/crowdsecurity/crowdsec/pkg/csconfig" "github.com/crowdsecurity/crowdsec/pkg/cwhub" + "github.com/crowdsecurity/crowdsec/pkg/hubops" ) type configGetter = func() *csconfig.Config @@ -55,11 +58,11 @@ func (cli *cliHub) List(out io.Writer, hub *cwhub.Hub, all bool) error { cfg := cli.cfg() for _, v := range hub.Warnings { - log.Info(v) + fmt.Fprintln(os.Stderr, v) } for _, line := range hub.ItemStats() { - log.Info(line) + fmt.Fprintln(os.Stderr, line) } items := make(map[string][]*cwhub.Item) @@ -100,7 +103,7 @@ func (cli *cliHub) newListCmd() *cobra.Command { } flags := cmd.Flags() - flags.BoolVarP(&all, "all", "a", false, "List disabled items as well") + flags.BoolVarP(&all, "all", "a", false, "List all available items, including those not installed") return cmd } @@ -108,7 +111,6 @@ func (cli *cliHub) newListCmd() *cobra.Command { func (cli *cliHub) update(ctx context.Context, withContent bool) error { local := cli.cfg().Hub remote := require.RemoteHub(ctx, cli.cfg()) - remote.EmbedItemContent = withContent // don't use require.Hub because if there is no index file, it would fail hub, err := cwhub.NewHub(local, remote, log.StandardLogger()) @@ -116,7 +118,7 @@ func (cli *cliHub) update(ctx context.Context, withContent bool) error { return err } - if err := hub.Update(ctx); err != nil { + if err := hub.Update(ctx, withContent); err != nil { return fmt.Errorf("failed to update hub: %w", err) } @@ -125,7 +127,7 @@ func (cli *cliHub) update(ctx context.Context, withContent bool) error { } for _, v := range hub.Warnings { - log.Info(v) + fmt.Fprintln(os.Stderr, v) } return nil @@ -140,10 +142,18 @@ func (cli *cliHub) newUpdateCmd() *cobra.Command { Long: ` Fetches the .index.json file from the hub, containing the list of available configs. `, + Example: `# Download the last version of the index file. +cscli hub update + +# Download a 4x bigger version with all item contents (effectively pre-caching item downloads, but not data files). +cscli hub update --with-content`, Args: cobra.NoArgs, DisableAutoGenTag: true, RunE: func(cmd *cobra.Command, _ []string) error { - return cli.update(cmd.Context(), withContent) + if cmd.Flags().Changed("with-content") { + return cli.update(cmd.Context(), withContent) + } + return cli.update(cmd.Context(), cli.cfg().Cscli.HubWithContent) }, } @@ -153,36 +163,43 @@ Fetches the .index.json file from the hub, containing the list of available conf return cmd } -func (cli *cliHub) upgrade(ctx context.Context, force bool) error { - hub, err := require.Hub(cli.cfg(), require.RemoteHub(ctx, cli.cfg()), log.StandardLogger()) +func (cli *cliHub) upgrade(ctx context.Context, yes bool, dryRun bool, force bool) error { + cfg := cli.cfg() + + hub, err := require.Hub(cfg, require.RemoteHub(ctx, cfg), log.StandardLogger()) if err != nil { return err } + plan := hubops.NewActionPlan(hub) + for _, itemType := range cwhub.ItemTypes { - updated := 0 + for _, item := range hub.GetInstalledByType(itemType, true) { + plan.AddCommand(hubops.NewDownloadCommand(item, force)) + } + } - log.Infof("Upgrading %s", itemType) + plan.AddCommand(hubops.NewDataRefreshCommand(force)) - for _, item := range hub.GetInstalledByType(itemType, true) { - didUpdate, err := item.Upgrade(ctx, force) - if err != nil { - return err - } + verbose := (cfg.Cscli.Output == "raw") - if didUpdate { - updated++ - } - } + if err := plan.Execute(ctx, yes, dryRun, verbose); err != nil { + return err + } - log.Infof("Upgraded %d %s", updated, itemType) + if plan.ReloadNeeded { + fmt.Println("\n" + reload.Message) } return nil } func (cli *cliHub) newUpgradeCmd() *cobra.Command { - var force bool + var ( + yes bool + dryRun bool + force bool + ) cmd := &cobra.Command{ Use: "upgrade", @@ -190,15 +207,19 @@ func (cli *cliHub) newUpgradeCmd() *cobra.Command { Long: ` Upgrade all configs installed from Crowdsec Hub. Run 'sudo cscli hub update' if you want the latest versions available. `, + // TODO: Example Args: cobra.NoArgs, DisableAutoGenTag: true, RunE: func(cmd *cobra.Command, _ []string) error { - return cli.upgrade(cmd.Context(), force) + return cli.upgrade(cmd.Context(), yes, dryRun, force) }, } flags := cmd.Flags() - flags.BoolVar(&force, "force", false, "Force upgrade: overwrite tainted and outdated files") + flags.BoolVar(&yes, "yes", false, "Confirm execution without prompt") + flags.BoolVar(&dryRun, "dry-run", false, "Don't install or remove anything; print the execution plan") + flags.BoolVar(&force, "force", false, "Force upgrade: overwrite tainted and outdated items; always update data files") + cmd.MarkFlagsMutuallyExclusive("yes", "dry-run") return cmd } diff --git a/cmd/crowdsec-cli/clihub/items.go b/cmd/crowdsec-cli/clihub/items.go index ef3127033ac..f63dc4bedd7 100644 --- a/cmd/crowdsec-cli/clihub/items.go +++ b/cmd/crowdsec-cli/clihub/items.go @@ -5,13 +5,9 @@ import ( "encoding/json" "fmt" "io" - "os" - "path/filepath" "slices" "strings" - "gopkg.in/yaml.v3" - "github.com/crowdsecurity/crowdsec/pkg/cwhub" ) @@ -145,42 +141,3 @@ func ListItems(out io.Writer, wantColor string, itemTypes []string, items map[st return nil } - -func InspectItem(item *cwhub.Item, wantMetrics bool, output string, prometheusURL string, wantColor string) error { - switch output { - case "human", "raw": - enc := yaml.NewEncoder(os.Stdout) - enc.SetIndent(2) - - if err := enc.Encode(item); err != nil { - return fmt.Errorf("unable to encode item: %w", err) - } - case "json": - b, err := json.MarshalIndent(*item, "", " ") - if err != nil { - return fmt.Errorf("unable to serialize item: %w", err) - } - - fmt.Print(string(b)) - } - - if output != "human" { - return nil - } - - if item.State.Tainted { - fmt.Println() - fmt.Printf(`This item is tainted. Use "%s %s inspect --diff %s" to see why.`, filepath.Base(os.Args[0]), item.Type, item.Name) - fmt.Println() - } - - if wantMetrics { - fmt.Printf("\nCurrent metrics: \n") - - if err := showMetrics(prometheusURL, item, wantColor); err != nil { - return err - } - } - - return nil -} diff --git a/cmd/crowdsec-cli/clihub/utils_table.go b/cmd/crowdsec-cli/clihub/utils_table.go index 98f14341b10..4693161005b 100644 --- a/cmd/crowdsec-cli/clihub/utils_table.go +++ b/cmd/crowdsec-cli/clihub/utils_table.go @@ -3,7 +3,6 @@ package clihub import ( "fmt" "io" - "strconv" "github.com/jedib0t/go-pretty/v6/table" @@ -24,62 +23,3 @@ func listHubItemTable(out io.Writer, wantColor string, title string, items []*cw io.WriteString(out, title+"\n") io.WriteString(out, t.Render()+"\n") } - -func appsecMetricsTable(out io.Writer, wantColor string, itemName string, metrics map[string]int) { - t := cstable.NewLight(out, wantColor).Writer - t.AppendHeader(table.Row{"Inband Hits", "Outband Hits"}) - - t.AppendRow(table.Row{ - strconv.Itoa(metrics["inband_hits"]), - strconv.Itoa(metrics["outband_hits"]), - }) - - io.WriteString(out, fmt.Sprintf("\n - (AppSec Rule) %s:\n", itemName)) - io.WriteString(out, t.Render()+"\n") -} - -func scenarioMetricsTable(out io.Writer, wantColor string, itemName string, metrics map[string]int) { - if metrics["instantiation"] == 0 { - return - } - - t := cstable.New(out, wantColor).Writer - t.AppendHeader(table.Row{"Current Count", "Overflows", "Instantiated", "Poured", "Expired"}) - - t.AppendRow(table.Row{ - strconv.Itoa(metrics["curr_count"]), - strconv.Itoa(metrics["overflow"]), - strconv.Itoa(metrics["instantiation"]), - strconv.Itoa(metrics["pour"]), - strconv.Itoa(metrics["underflow"]), - }) - - io.WriteString(out, fmt.Sprintf("\n - (Scenario) %s:\n", itemName)) - io.WriteString(out, t.Render()+"\n") -} - -func parserMetricsTable(out io.Writer, wantColor string, itemName string, metrics map[string]map[string]int) { - t := cstable.New(out, wantColor).Writer - t.AppendHeader(table.Row{"Parsers", "Hits", "Parsed", "Unparsed"}) - - // don't show table if no hits - showTable := false - - for source, stats := range metrics { - if stats["hits"] > 0 { - t.AppendRow(table.Row{ - source, - strconv.Itoa(stats["hits"]), - strconv.Itoa(stats["parsed"]), - strconv.Itoa(stats["unparsed"]), - }) - - showTable = true - } - } - - if showTable { - io.WriteString(out, fmt.Sprintf("\n - (Parser) %s:\n", itemName)) - io.WriteString(out, t.Render()+"\n") - } -} diff --git a/cmd/crowdsec-cli/cliitem/inspect.go b/cmd/crowdsec-cli/cliitem/inspect.go new file mode 100644 index 00000000000..596674aa788 --- /dev/null +++ b/cmd/crowdsec-cli/cliitem/inspect.go @@ -0,0 +1,57 @@ +package cliitem + +import ( + "fmt" + "encoding/json" + "os" + "path/filepath" + + "gopkg.in/yaml.v3" + + "github.com/crowdsecurity/crowdsec/pkg/cwhub" +) + +func inspectItem(hub *cwhub.Hub, item *cwhub.Item, wantMetrics bool, output string, prometheusURL string, wantColor string) error { + // This is dirty... + // We want to show current dependencies (from content), not latest (from index). + // The item is modifed but after this function the whole hub should be thrown away. + // A cleaner way would be to copy the struct first. + item.Dependencies = item.CurrentDependencies() + + switch output { + case "human", "raw": + enc := yaml.NewEncoder(os.Stdout) + enc.SetIndent(2) + + if err := enc.Encode(item); err != nil { + return fmt.Errorf("unable to encode item: %w", err) + } + case "json": + b, err := json.MarshalIndent(*item, "", " ") + if err != nil { + return fmt.Errorf("unable to serialize item: %w", err) + } + + fmt.Print(string(b)) + } + + if output != "human" { + return nil + } + + if item.State.Tainted { + fmt.Println() + fmt.Printf(`This item is tainted. Use "%s %s inspect --diff %s" to see why.`, filepath.Base(os.Args[0]), item.Type, item.Name) + fmt.Println() + } + + if wantMetrics { + fmt.Printf("\nCurrent metrics: \n") + + if err := showMetrics(prometheusURL, hub, item, wantColor); err != nil { + return err + } + } + + return nil +} diff --git a/cmd/crowdsec-cli/cliitem/item.go b/cmd/crowdsec-cli/cliitem/item.go index 28828eb9c95..05e52d18dd3 100644 --- a/cmd/crowdsec-cli/cliitem/item.go +++ b/cmd/crowdsec-cli/cliitem/item.go @@ -20,6 +20,7 @@ import ( "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/require" "github.com/crowdsecurity/crowdsec/pkg/csconfig" "github.com/crowdsecurity/crowdsec/pkg/cwhub" + "github.com/crowdsecurity/crowdsec/pkg/hubops" ) type cliHelp struct { @@ -67,7 +68,7 @@ func (cli cliItem) NewCommand() *cobra.Command { return cmd } -func (cli cliItem) install(ctx context.Context, args []string, downloadOnly bool, force bool, ignoreError bool) error { +func (cli cliItem) install(ctx context.Context, args []string, yes bool, dryRun bool, downloadOnly bool, force bool, ignoreError bool) error { cfg := cli.cfg() hub, err := require.Hub(cfg, require.RemoteHub(ctx, cfg), log.StandardLogger()) @@ -75,6 +76,8 @@ func (cli cliItem) install(ctx context.Context, args []string, downloadOnly bool return err } + plan := hubops.NewActionPlan(hub) + for _, name := range args { item := hub.GetItem(cli.name, name) if item == nil { @@ -88,22 +91,38 @@ func (cli cliItem) install(ctx context.Context, args []string, downloadOnly bool continue } - if err := item.Install(ctx, force, downloadOnly); err != nil { - if !ignoreError { - return fmt.Errorf("error while installing '%s': %w", item.Name, err) + if err = plan.AddCommand(hubops.NewDownloadCommand(item, force)); err != nil { + return err + } + + if !downloadOnly { + if err = plan.AddCommand(hubops.NewEnableCommand(item, force)); err != nil { + return err } + } + } + + verbose := (cfg.Cscli.Output == "raw") - log.Errorf("Error while installing '%s': %s", item.Name, err) + if err := plan.Execute(ctx, yes, dryRun, verbose); err != nil { + if !ignoreError { + return err } + + log.Error(err) } - log.Info(reload.Message) + if plan.ReloadNeeded { + fmt.Println("\n" + reload.Message) + } return nil } func (cli cliItem) newInstallCmd() *cobra.Command { var ( + yes bool + dryRun bool downloadOnly bool force bool ignoreError bool @@ -120,20 +139,23 @@ func (cli cliItem) newInstallCmd() *cobra.Command { return compAllItems(cli.name, args, toComplete, cli.cfg) }, RunE: func(cmd *cobra.Command, args []string) error { - return cli.install(cmd.Context(), args, downloadOnly, force, ignoreError) + return cli.install(cmd.Context(), args, yes, dryRun, downloadOnly, force, ignoreError) }, } flags := cmd.Flags() + flags.BoolVarP(&yes, "yes", "y", false, "Confirm execution without prompt") + flags.BoolVar(&dryRun, "dry-run", false, "Don't install or remove anything; print the execution plan") flags.BoolVarP(&downloadOnly, "download-only", "d", false, "Only download packages, don't enable") flags.BoolVar(&force, "force", false, "Force install: overwrite tainted and outdated files") flags.BoolVar(&ignoreError, "ignore", false, "Ignore errors when installing multiple "+cli.name) + cmd.MarkFlagsMutuallyExclusive("yes", "dry-run") return cmd } // return the names of the installed parents of an item, used to check if we can remove it -func istalledParentNames(item *cwhub.Item) []string { +func installedParentNames(item *cwhub.Item) []string { ret := make([]string, 0) for _, parent := range item.Ancestors() { @@ -145,11 +167,8 @@ func istalledParentNames(item *cwhub.Item) []string { return ret } -func (cli cliItem) remove(args []string, purge bool, force bool, all bool) error { - hub, err := require.Hub(cli.cfg(), nil, log.StandardLogger()) - if err != nil { - return err - } +func (cli cliItem) removePlan(hub *cwhub.Hub, args []string, purge bool, force bool, all bool) (*hubops.ActionPlan, error) { + plan := hubops.NewActionPlan(hub) if all { itemGetter := hub.GetInstalledByType @@ -157,43 +176,31 @@ func (cli cliItem) remove(args []string, purge bool, force bool, all bool) error itemGetter = hub.GetItemsByType } - removed := 0 - for _, item := range itemGetter(cli.name, true) { - didRemove, err := item.Remove(purge, force) - if err != nil { - return err + if err := plan.AddCommand(hubops.NewDisableCommand(item, force)); err != nil { + return nil, err } - - if didRemove { - log.Infof("Removed %s", item.Name) - - removed++ + if purge { + if err := plan.AddCommand(hubops.NewPurgeCommand(item, force)); err != nil { + return nil, err + } } } - log.Infof("Removed %d %s", removed, cli.name) - - if removed > 0 { - log.Info(reload.Message) - } - - return nil + return plan, nil } if len(args) == 0 { - return fmt.Errorf("specify at least one %s to remove or '--all'", cli.singular) + return nil, fmt.Errorf("specify at least one %s to remove or '--all'", cli.singular) } - removed := 0 - for _, itemName := range args { item := hub.GetItem(cli.name, itemName) if item == nil { - return fmt.Errorf("can't find '%s' in %s", itemName, cli.name) + return nil, fmt.Errorf("can't find '%s' in %s", itemName, cli.name) } - parents := istalledParentNames(item) + parents := installedParentNames(item) if !force && len(parents) > 0 { log.Warningf("%s belongs to collections: %s", item.Name, parents) @@ -202,22 +209,43 @@ func (cli cliItem) remove(args []string, purge bool, force bool, all bool) error continue } - didRemove, err := item.Remove(purge, force) - if err != nil { - return err - } + if err := plan.AddCommand(hubops.NewDisableCommand(item, force)); err != nil { + return nil, err - if didRemove { - log.Infof("Removed %s", item.Name) + } + if purge { + if err := plan.AddCommand(hubops.NewPurgeCommand(item, force)); err != nil { + return nil, err - removed++ + } } } - log.Infof("Removed %d %s", removed, cli.name) + return plan, nil +} - if removed > 0 { - log.Info(reload.Message) + +func (cli cliItem) remove(ctx context.Context, args []string, yes bool, dryRun bool, purge bool, force bool, all bool) error { + cfg := cli.cfg() + + hub, err := require.Hub(cli.cfg(), nil, log.StandardLogger()) + if err != nil { + return err + } + + plan, err := cli.removePlan(hub, args, purge, force, all) + if err != nil { + return err + } + + verbose := (cfg.Cscli.Output == "raw") + + if err := plan.Execute(ctx, yes, dryRun, verbose); err != nil { + return err + } + + if plan.ReloadNeeded { + fmt.Println("\n" + reload.Message) } return nil @@ -225,9 +253,11 @@ func (cli cliItem) remove(args []string, purge bool, force bool, all bool) error func (cli cliItem) newRemoveCmd() *cobra.Command { var ( - purge bool - force bool - all bool + yes bool + dryRun bool + purge bool + force bool + all bool ) cmd := &cobra.Command{ @@ -240,76 +270,78 @@ func (cli cliItem) newRemoveCmd() *cobra.Command { ValidArgsFunction: func(_ *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { return compInstalledItems(cli.name, args, toComplete, cli.cfg) }, - RunE: func(_ *cobra.Command, args []string) error { - return cli.remove(args, purge, force, all) + RunE: func(cmd *cobra.Command, args []string) error { + if len(args) > 0 && all { + return errors.New("can't specify items and '--all' at the same time") + } + + return cli.remove(cmd.Context(), args, yes, dryRun, purge, force, all) }, } flags := cmd.Flags() + flags.BoolVarP(&yes, "yes", "y", false, "Confirm execution without prompt") + flags.BoolVar(&dryRun, "dry-run", false, "Don't install or remove anything; print the execution plan") flags.BoolVar(&purge, "purge", false, "Delete source file too") flags.BoolVar(&force, "force", false, "Force remove: remove tainted and outdated files") flags.BoolVar(&all, "all", false, "Remove all the "+cli.name) + cmd.MarkFlagsMutuallyExclusive("yes", "dry-run") return cmd } -func (cli cliItem) upgrade(ctx context.Context, args []string, force bool, all bool) error { - cfg := cli.cfg() - - hub, err := require.Hub(cfg, require.RemoteHub(ctx, cfg), log.StandardLogger()) - if err != nil { - return err - } +func (cli cliItem) upgradePlan(hub *cwhub.Hub, args []string, force bool, all bool) (*hubops.ActionPlan, error) { + plan := hubops.NewActionPlan(hub) if all { - updated := 0 - for _, item := range hub.GetInstalledByType(cli.name, true) { - didUpdate, err := item.Upgrade(ctx, force) - if err != nil { - return err + if err := plan.AddCommand(hubops.NewDownloadCommand(item, force)); err != nil { + return nil, err } - - if didUpdate { - updated++ - } - } - - log.Infof("Updated %d %s", updated, cli.name) - - if updated > 0 { - log.Info(reload.Message) } - return nil + return plan, nil } if len(args) == 0 { - return fmt.Errorf("specify at least one %s to upgrade or '--all'", cli.singular) + return nil, fmt.Errorf("specify at least one %s to upgrade or '--all'", cli.singular) } - updated := 0 - for _, itemName := range args { item := hub.GetItem(cli.name, itemName) if item == nil { - return fmt.Errorf("can't find '%s' in %s", itemName, cli.name) + return nil, fmt.Errorf("can't find '%s' in %s", itemName, cli.name) } - didUpdate, err := item.Upgrade(ctx, force) - if err != nil { - return err + if err := plan.AddCommand(hubops.NewDownloadCommand(item, force)); err != nil { + return nil, err } + } - if didUpdate { - log.Infof("Updated %s", item.Name) + return plan, nil +} - updated++ - } +func (cli cliItem) upgrade(ctx context.Context, args []string, yes bool, dryRun bool, force bool, all bool) error { + cfg := cli.cfg() + + hub, err := require.Hub(cfg, require.RemoteHub(ctx, cfg), log.StandardLogger()) + if err != nil { + return err + } + + plan, err := cli.upgradePlan(hub, args, force, all) + if err != nil { + return err + } + + verbose := (cfg.Cscli.Output == "raw") + + if err := plan.Execute(ctx, yes, dryRun, verbose); err != nil { + return err } - if updated > 0 { - log.Info(reload.Message) + if plan.ReloadNeeded { + fmt.Println("\n" + reload.Message) } return nil @@ -317,6 +349,8 @@ func (cli cliItem) upgrade(ctx context.Context, args []string, force bool, all b func (cli cliItem) newUpgradeCmd() *cobra.Command { var ( + yes bool + dryRun bool all bool force bool ) @@ -331,13 +365,16 @@ func (cli cliItem) newUpgradeCmd() *cobra.Command { return compInstalledItems(cli.name, args, toComplete, cli.cfg) }, RunE: func(cmd *cobra.Command, args []string) error { - return cli.upgrade(cmd.Context(), args, force, all) + return cli.upgrade(cmd.Context(), args, yes, dryRun, force, all) }, } flags := cmd.Flags() + flags.BoolVarP(&yes, "yes", "y", false, "Confirm execution without prompt") + flags.BoolVar(&dryRun, "dry-run", false, "Don't install or remove anything; print the execution plan") flags.BoolVarP(&all, "all", "a", false, "Upgrade all the "+cli.name) flags.BoolVar(&force, "force", false, "Force upgrade: overwrite tainted and outdated files") + cmd.MarkFlagsMutuallyExclusive("yes", "dry-run") return cmd } @@ -376,7 +413,7 @@ func (cli cliItem) inspect(ctx context.Context, args []string, url string, diff continue } - if err = clihub.InspectItem(item, !noMetrics, cfg.Cscli.Output, cfg.Cscli.PrometheusUrl, cfg.Cscli.Color); err != nil { + if err = inspectItem(hub, item, !noMetrics, cfg.Cscli.Output, cfg.Cscli.PrometheusUrl, cfg.Cscli.Color); err != nil { return err } diff --git a/cmd/crowdsec-cli/clihub/item_metrics.go b/cmd/crowdsec-cli/cliitem/metrics.go similarity index 96% rename from cmd/crowdsec-cli/clihub/item_metrics.go rename to cmd/crowdsec-cli/cliitem/metrics.go index ac9c18640fa..4999ea38078 100644 --- a/cmd/crowdsec-cli/clihub/item_metrics.go +++ b/cmd/crowdsec-cli/cliitem/metrics.go @@ -1,4 +1,4 @@ -package clihub +package cliitem import ( "fmt" @@ -17,7 +17,7 @@ import ( "github.com/crowdsecurity/crowdsec/pkg/cwhub" ) -func showMetrics(prometheusURL string, hubItem *cwhub.Item, wantColor string) error { +func showMetrics(prometheusURL string, hub *cwhub.Hub, hubItem *cwhub.Item, wantColor string) error { switch hubItem.Type { case cwhub.PARSERS: metrics, err := getParserMetric(prometheusURL, hubItem.Name) @@ -32,8 +32,8 @@ func showMetrics(prometheusURL string, hubItem *cwhub.Item, wantColor string) er } scenarioMetricsTable(color.Output, wantColor, hubItem.Name, metrics) case cwhub.COLLECTIONS: - for _, sub := range hubItem.SubItems() { - if err := showMetrics(prometheusURL, sub, wantColor); err != nil { + for sub := range hubItem.CurrentDependencies().SubItems(hub) { + if err := showMetrics(prometheusURL, hub, sub, wantColor); err != nil { return err } } diff --git a/cmd/crowdsec-cli/cliitem/metrics_table.go b/cmd/crowdsec-cli/cliitem/metrics_table.go new file mode 100644 index 00000000000..378394bad85 --- /dev/null +++ b/cmd/crowdsec-cli/cliitem/metrics_table.go @@ -0,0 +1,71 @@ +package cliitem + +import ( + "fmt" + "io" + "strconv" + + "github.com/jedib0t/go-pretty/v6/table" + + "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/cstable" +) + + +func appsecMetricsTable(out io.Writer, wantColor string, itemName string, metrics map[string]int) { + t := cstable.NewLight(out, wantColor).Writer + t.AppendHeader(table.Row{"Inband Hits", "Outband Hits"}) + + t.AppendRow(table.Row{ + strconv.Itoa(metrics["inband_hits"]), + strconv.Itoa(metrics["outband_hits"]), + }) + + io.WriteString(out, fmt.Sprintf("\n - (AppSec Rule) %s:\n", itemName)) + io.WriteString(out, t.Render()+"\n") +} + +func scenarioMetricsTable(out io.Writer, wantColor string, itemName string, metrics map[string]int) { + if metrics["instantiation"] == 0 { + return + } + + t := cstable.New(out, wantColor).Writer + t.AppendHeader(table.Row{"Current Count", "Overflows", "Instantiated", "Poured", "Expired"}) + + t.AppendRow(table.Row{ + strconv.Itoa(metrics["curr_count"]), + strconv.Itoa(metrics["overflow"]), + strconv.Itoa(metrics["instantiation"]), + strconv.Itoa(metrics["pour"]), + strconv.Itoa(metrics["underflow"]), + }) + + io.WriteString(out, fmt.Sprintf("\n - (Scenario) %s:\n", itemName)) + io.WriteString(out, t.Render()+"\n") +} + +func parserMetricsTable(out io.Writer, wantColor string, itemName string, metrics map[string]map[string]int) { + t := cstable.New(out, wantColor).Writer + t.AppendHeader(table.Row{"Parsers", "Hits", "Parsed", "Unparsed"}) + + // don't show table if no hits + showTable := false + + for source, stats := range metrics { + if stats["hits"] > 0 { + t.AppendRow(table.Row{ + source, + strconv.Itoa(stats["hits"]), + strconv.Itoa(stats["parsed"]), + strconv.Itoa(stats["unparsed"]), + }) + + showTable = true + } + } + + if showTable { + io.WriteString(out, fmt.Sprintf("\n - (Parser) %s:\n", itemName)) + io.WriteString(out, t.Render()+"\n") + } +} diff --git a/cmd/crowdsec-cli/clisetup/setup.go b/cmd/crowdsec-cli/clisetup/setup.go index 269cdfb78e9..4cb423e484c 100644 --- a/cmd/crowdsec-cli/clisetup/setup.go +++ b/cmd/crowdsec-cli/clisetup/setup.go @@ -94,7 +94,10 @@ func (cli *cliSetup) newDetectCmd() *cobra.Command { } func (cli *cliSetup) newInstallHubCmd() *cobra.Command { - var dryRun bool + var ( + yes bool + dryRun bool + ) cmd := &cobra.Command{ Use: "install-hub [setup_file] [flags]", @@ -102,12 +105,14 @@ func (cli *cliSetup) newInstallHubCmd() *cobra.Command { Args: cobra.ExactArgs(1), DisableAutoGenTag: true, RunE: func(cmd *cobra.Command, args []string) error { - return cli.install(cmd.Context(), dryRun, args[0]) + return cli.install(cmd.Context(), yes, dryRun, args[0]) }, } flags := cmd.Flags() + flags.BoolVarP(&yes, "yes", "y", false, "confirm execution without prompt") flags.BoolVar(&dryRun, "dry-run", false, "don't install anything; print out what would have been") + cmd.MarkFlagsMutuallyExclusive("yes", "dry-run") return cmd } @@ -276,7 +281,7 @@ func (cli *cliSetup) dataSources(fromFile string, toDir string) error { return nil } -func (cli *cliSetup) install(ctx context.Context, dryRun bool, fromFile string) error { +func (cli *cliSetup) install(ctx context.Context, yes bool, dryRun bool, fromFile string) error { input, err := os.ReadFile(fromFile) if err != nil { return fmt.Errorf("while reading file %s: %w", fromFile, err) @@ -289,7 +294,9 @@ func (cli *cliSetup) install(ctx context.Context, dryRun bool, fromFile string) return err } - return setup.InstallHubItems(ctx, hub, input, dryRun) + verbose := (cfg.Cscli.Output == "raw") + + return setup.InstallHubItems(ctx, hub, input, yes, dryRun, verbose) } func (cli *cliSetup) validate(fromFile string) error { diff --git a/cmd/crowdsec-cli/config_restore.go b/cmd/crowdsec-cli/config_restore.go index c32328485ec..8884fa448d2 100644 --- a/cmd/crowdsec-cli/config_restore.go +++ b/cmd/crowdsec-cli/config_restore.go @@ -12,6 +12,7 @@ import ( "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/require" "github.com/crowdsecurity/crowdsec/pkg/cwhub" + "github.com/crowdsecurity/crowdsec/pkg/hubops" ) func (cli *cliConfig) restoreHub(ctx context.Context, dirPath string) error { @@ -50,7 +51,17 @@ func (cli *cliConfig) restoreHub(ctx context.Context, dirPath string) error { continue } - if err = item.Install(ctx, false, false); err != nil { + plan := hubops.NewActionPlan(hub) + + if err = plan.AddCommand(hubops.NewDownloadCommand(item, false)); err != nil { + return err + } + + if err = plan.AddCommand(hubops.NewEnableCommand(item, false)); err != nil { + return err + } + + if err = plan.Execute(ctx, true, false, false); err != nil { log.Errorf("Error while installing %s : %s", toinstall, err) } } diff --git a/cmd/crowdsec-cli/require/require.go b/cmd/crowdsec-cli/require/require.go index 191eee55bc5..7b3410021c1 100644 --- a/cmd/crowdsec-cli/require/require.go +++ b/cmd/crowdsec-cli/require/require.go @@ -116,7 +116,7 @@ func Hub(c *csconfig.Config, remote *cwhub.RemoteHubCfg, logger *logrus.Logger) } if err := hub.Load(); err != nil { - return nil, fmt.Errorf("failed to read Hub index: %w. Run 'sudo cscli hub update' to download the index again", err) + return nil, fmt.Errorf("failed to read hub index: %w. Run 'sudo cscli hub update' to download the index again", err) } return hub, nil diff --git a/docker/test/tests/test_hub.py b/docker/test/tests/test_hub.py index 2365e3a9cef..e70555ea855 100644 --- a/docker/test/tests/test_hub.py +++ b/docker/test/tests/test_hub.py @@ -17,7 +17,7 @@ def test_preinstalled_hub(crowdsec, flavor): with crowdsec(flavor=flavor) as cs: cs.wait_for_log("*Starting processing data*") cs.wait_for_http(8080, '/health', want_status=HTTPStatus.OK) - res = cs.cont.exec_run('cscli hub list -o json') + res = cs.cont.exec_run('cscli hub list -o json', stderr=False) assert res.exit_code == 0 j = json.loads(res.output) collections = {c['name']: c for c in j['collections']} diff --git a/docker/test/tests/test_hub_collections.py b/docker/test/tests/test_hub_collections.py index 962f8ff8df4..0d1b3ee5e94 100644 --- a/docker/test/tests/test_hub_collections.py +++ b/docker/test/tests/test_hub_collections.py @@ -28,10 +28,8 @@ def test_install_two_collections(crowdsec, flavor): assert items[it1]['status'] == 'enabled' assert items[it2]['status'] == 'enabled' cs.wait_for_log([ - # f'*collections install "{it1}"*' - # f'*collections install "{it2}"*' - f'*Enabled collections: {it1}*', - f'*Enabled collections: {it2}*', + f'*enabling collections:{it1}*', + f'*enabling collections:{it2}*', ]) @@ -50,8 +48,7 @@ def test_disable_collection(crowdsec, flavor): items = {c['name'] for c in j['collections']} assert it not in items cs.wait_for_log([ - # f'*collections remove "{it}*", - f'*Removed symlink [[]{it}[]]*', + f'*disabling collections:{it}*', ]) @@ -72,7 +69,7 @@ def test_install_and_disable_collection(crowdsec, flavor): assert it not in items logs = cs.log_lines() # check that there was no attempt to install - assert not any(f'Enabled collections: {it}' in line for line in logs) + assert not any(f'enabling collections:{it}' in line for line in logs) # already done in bats, prividing here as example of a somewhat complex test @@ -91,7 +88,7 @@ def test_taint_bubble_up(crowdsec, tmp_path_factory, flavor): # implicit check for tainted=False assert items[coll]['status'] == 'enabled' cs.wait_for_log([ - f'*Enabled collections: {coll}*', + f'*enabling collections:{coll}*', ]) scenario = 'crowdsecurity/http-crawl-non_statics' diff --git a/go.mod b/go.mod index 43f0ed4e6f2..e437bbd688a 100644 --- a/go.mod +++ b/go.mod @@ -29,7 +29,7 @@ require ( github.com/creack/pty v1.1.21 // indirect github.com/crowdsecurity/coraza/v3 v3.0.0-20240108124027-a62b8d8e5607 github.com/crowdsecurity/dlog v0.0.0-20170105205344-4fb5f8204f26 - github.com/crowdsecurity/go-cs-lib v0.0.15 + github.com/crowdsecurity/go-cs-lib v0.0.16-0.20241219154300-555e14e3988f github.com/crowdsecurity/grokky v0.2.2 github.com/crowdsecurity/machineid v1.0.2 github.com/davecgh/go-spew v1.1.1 diff --git a/go.sum b/go.sum index 80f8a079bae..d092956d0a8 100644 --- a/go.sum +++ b/go.sum @@ -107,8 +107,10 @@ github.com/crowdsecurity/coraza/v3 v3.0.0-20240108124027-a62b8d8e5607 h1:hyrYw3h github.com/crowdsecurity/coraza/v3 v3.0.0-20240108124027-a62b8d8e5607/go.mod h1:br36fEqurGYZQGit+iDYsIzW0FF6VufMbDzyyLxEuPA= github.com/crowdsecurity/dlog v0.0.0-20170105205344-4fb5f8204f26 h1:r97WNVC30Uen+7WnLs4xDScS/Ex988+id2k6mDf8psU= github.com/crowdsecurity/dlog v0.0.0-20170105205344-4fb5f8204f26/go.mod h1:zpv7r+7KXwgVUZnUNjyP22zc/D7LKjyoY02weH2RBbk= -github.com/crowdsecurity/go-cs-lib v0.0.15 h1:zNWqOPVLHgKUstlr6clom9d66S0eIIW66jQG3Y7FEvo= -github.com/crowdsecurity/go-cs-lib v0.0.15/go.mod h1:ePyQyJBxp1W/1bq4YpVAilnLSz7HkzmtI7TRhX187EU= +github.com/crowdsecurity/go-cs-lib v0.0.16-0.20241203101722-e557f9809413 h1:VIedap4s3mXM4+tM2NMm7R3E/kn79ayLZaLHDqPYVCc= +github.com/crowdsecurity/go-cs-lib v0.0.16-0.20241203101722-e557f9809413/go.mod h1:XwGcvTt4lMq4Tm1IRMSKMDf0CVrnytTU8Uoofa7AR+g= +github.com/crowdsecurity/go-cs-lib v0.0.16-0.20241219154300-555e14e3988f h1:Pd+O4UK78uQtTqbvYX+nHvqZ7TffD51uC4q0RE/podk= +github.com/crowdsecurity/go-cs-lib v0.0.16-0.20241219154300-555e14e3988f/go.mod h1:XwGcvTt4lMq4Tm1IRMSKMDf0CVrnytTU8Uoofa7AR+g= github.com/crowdsecurity/grokky v0.2.2 h1:yALsI9zqpDArYzmSSxfBq2dhYuGUTKMJq8KOEIAsuo4= github.com/crowdsecurity/grokky v0.2.2/go.mod h1:33usDIYzGDsgX1kHAThCbseso6JuWNJXOzRQDGXHtWM= github.com/crowdsecurity/machineid v1.0.2 h1:wpkpsUghJF8Khtmn/tg6GxgdhLA1Xflerh5lirI+bdc= diff --git a/pkg/csconfig/cscli.go b/pkg/csconfig/cscli.go index 9393156c0ed..ad119dc9e13 100644 --- a/pkg/csconfig/cscli.go +++ b/pkg/csconfig/cscli.go @@ -10,6 +10,7 @@ type CscliCfg struct { Color string `yaml:"color,omitempty"` HubBranch string `yaml:"hub_branch"` HubURLTemplate string `yaml:"__hub_url_template__,omitempty"` + HubWithContent bool `yaml:"hub_with_content,omitempty"` SimulationConfig *SimulationConfig `yaml:"-"` DbConfig *DatabaseCfg `yaml:"-"` diff --git a/pkg/cwhub/cwhub.go b/pkg/cwhub/cwhub.go index 683f1853b43..b41d1d16312 100644 --- a/pkg/cwhub/cwhub.go +++ b/pkg/cwhub/cwhub.go @@ -20,14 +20,14 @@ func (t *hubTransport) RoundTrip(req *http.Request) (*http.Response, error) { return t.RoundTripper.RoundTrip(req) } -// hubClient is the HTTP client used to communicate with the CrowdSec Hub. -var hubClient = &http.Client{ +// HubClient is the HTTP client used to communicate with the CrowdSec Hub. +var HubClient = &http.Client{ Timeout: 120 * time.Second, Transport: &hubTransport{http.DefaultTransport}, } -// safePath returns a joined path and ensures that it does not escape the base directory. -func safePath(dir, filePath string) (string, error) { +// SafePath returns a joined path and ensures that it does not escape the base directory. +func SafePath(dir, filePath string) (string, error) { absBaseDir, err := filepath.Abs(filepath.Clean(dir)) if err != nil { return "", err diff --git a/pkg/cwhub/cwhub_test.go b/pkg/cwhub/cwhub_test.go index 17e7a0dc723..1b5dee34dd3 100644 --- a/pkg/cwhub/cwhub_test.go +++ b/pkg/cwhub/cwhub_test.go @@ -68,7 +68,7 @@ func testHub(t *testing.T, update bool) *Hub { if update { ctx := context.Background() - err := hub.Update(ctx) + err := hub.Update(ctx, false) require.NoError(t, err) } @@ -83,14 +83,14 @@ func envSetup(t *testing.T) *Hub { setResponseByPath() log.SetLevel(log.DebugLevel) - defaultTransport := hubClient.Transport + defaultTransport := HubClient.Transport t.Cleanup(func() { - hubClient.Transport = defaultTransport + HubClient.Transport = defaultTransport }) // Mock the http client - hubClient.Transport = newMockTransport() + HubClient.Transport = newMockTransport() hub := testHub(t, true) diff --git a/pkg/cwhub/dataset.go b/pkg/cwhub/dataset.go deleted file mode 100644 index 90bc9e057f9..00000000000 --- a/pkg/cwhub/dataset.go +++ /dev/null @@ -1,72 +0,0 @@ -package cwhub - -import ( - "context" - "errors" - "fmt" - "io" - "time" - - "github.com/sirupsen/logrus" - "gopkg.in/yaml.v3" - - "github.com/crowdsecurity/go-cs-lib/downloader" - - "github.com/crowdsecurity/crowdsec/pkg/types" -) - -// The DataSet is a list of data sources required by an item (built from the data: section in the yaml). -type DataSet struct { - Data []types.DataSource `yaml:"data,omitempty"` -} - -// downloadDataSet downloads all the data files for an item. -func downloadDataSet(ctx context.Context, dataFolder string, force bool, reader io.Reader, logger *logrus.Logger) error { - dec := yaml.NewDecoder(reader) - - for { - data := &DataSet{} - - if err := dec.Decode(data); err != nil { - if errors.Is(err, io.EOF) { - break - } - - return fmt.Errorf("while reading file: %w", err) - } - - for _, dataS := range data.Data { - destPath, err := safePath(dataFolder, dataS.DestPath) - if err != nil { - return err - } - - d := downloader. - New(). - WithHTTPClient(hubClient). - ToFile(destPath). - CompareContent(). - WithLogger(logrus.WithField("url", dataS.SourceURL)) - - if !force { - d = d.WithLastModified(). - WithShelfLife(7 * 24 * time.Hour) - } - - downloaded, err := d.Download(ctx, dataS.SourceURL) - if err != nil { - return fmt.Errorf("while getting data: %w", err) - } - - if downloaded { - logger.Infof("Downloaded %s", destPath) - // a check on stdout is used while scripting to know if the hub has been upgraded - // and a configuration reload is required - // TODO: use a better way to communicate this - fmt.Printf("updated %s\n", destPath) - } - } - } - - return nil -} diff --git a/pkg/cwhub/doc.go b/pkg/cwhub/doc.go index f86b95c6454..a1ee9d37ee7 100644 --- a/pkg/cwhub/doc.go +++ b/pkg/cwhub/doc.go @@ -1,4 +1,5 @@ -// Package cwhub is responsible for installing and upgrading the local hub files for CrowdSec. +// Package cwhub is responsible for providing the state of the local hub to the security engine and cscli command. +// Installation, upgrade and removal of items or data files has been moved to pkg/hubops. // // # Definitions // @@ -84,20 +85,6 @@ // return fmt.Errorf("collection not found") // } // -// You can also install items if they have already been downloaded: -// -// // install a parser -// force := false -// downloadOnly := false -// err := parser.Install(force, downloadOnly) -// if err != nil { -// return fmt.Errorf("unable to install parser: %w", err) -// } -// -// As soon as you try to install an item that is not downloaded or is not up-to-date (meaning its computed hash -// does not correspond to the latest version available in the index), a download will be attempted and you'll -// get the error "remote hub configuration is not provided". -// // To provide the remote hub configuration, use the second parameter of NewHub(): // // remoteHub := cwhub.RemoteHubCfg{ diff --git a/pkg/cwhub/fetch.go b/pkg/cwhub/fetch.go new file mode 100644 index 00000000000..92198e63ef1 --- /dev/null +++ b/pkg/cwhub/fetch.go @@ -0,0 +1,108 @@ +package cwhub + +// Install, upgrade and remove items from the hub to the local configuration + +import ( + "context" + "crypto" + "encoding/base64" + "encoding/hex" + "errors" + "fmt" + "os" + "path/filepath" + + "github.com/sirupsen/logrus" + + "github.com/crowdsecurity/go-cs-lib/downloader" + +) + + +// writeEmbeddedContentTo writes the embedded content to the specified path and checks the hash. +// If the content is base64 encoded, it will be decoded before writing. Check for item.Content +// before calling this method. +func (i *Item) writeEmbeddedContentTo(destPath, wantHash string) error { + if i.Content == "" { + return fmt.Errorf("no embedded content for %s", i.Name) + } + + content, err := base64.StdEncoding.DecodeString(i.Content) + if err != nil { + content = []byte(i.Content) + } + + dir := filepath.Dir(destPath) + + if err := os.MkdirAll(dir, 0o755); err != nil { + return fmt.Errorf("while creating %s: %w", dir, err) + } + + // check sha256 + hash := crypto.SHA256.New() + if _, err := hash.Write(content); err != nil { + return fmt.Errorf("while hashing %s: %w", i.Name, err) + } + + gotHash := hex.EncodeToString(hash.Sum(nil)) + if gotHash != wantHash { + return fmt.Errorf("hash mismatch: expected %s, got %s. The index file is invalid, please run 'cscli hub update' and try again", wantHash, gotHash) + } + + if err := os.WriteFile(destPath, content, 0o600); err != nil { + return fmt.Errorf("while writing %s: %w", destPath, err) + } + + return nil +} + +// writeRemoteContentTo downloads the content to the specified path and checks the hash. +func (i *Item) writeRemoteContentTo(ctx context.Context, destPath, wantHash string) (bool, string, error) { + url, err := i.hub.remote.urlTo(i.RemotePath) + if err != nil { + return false, "", fmt.Errorf("failed to build request: %w", err) + } + + d := downloader. + New(). + WithHTTPClient(HubClient). + ToFile(destPath). + WithETagFn(downloader.SHA256). + WithMakeDirs(true). + WithLogger(logrus.WithField("url", url)). + CompareContent(). + VerifyHash("sha256", wantHash) + + hasherr := downloader.HashMismatchError{} + + downloaded, err := d.Download(ctx, url) + + switch { + case errors.As(err, &hasherr): + i.hub.logger.Warnf("%s. The index file is outdated, please run 'cscli hub update' and try again", err.Error()) + case err != nil: + return false, "", err + } + + return downloaded, url, nil +} + +// FetchContentTo writes the last version of the item's YAML file to the specified path. +// Returns whether the file was downloaded, and the remote url for feedback purposes. +func (i *Item) FetchContentTo(ctx context.Context, destPath string) (bool, string, error) { + wantHash := i.latestHash() + if wantHash == "" { + return false, "", fmt.Errorf("%s: latest hash missing from index. The index file is invalid, please run 'cscli hub update' and try again", i.FQName()) + } + + // Use the embedded content if available + if i.Content != "" { + if err := i.writeEmbeddedContentTo(destPath, wantHash); err != nil { + return false, "", err + } + + return true, fmt.Sprintf("(embedded in %s)", i.hub.local.HubIndexFile), nil + } + + return i.writeRemoteContentTo(ctx, destPath, wantHash) +} diff --git a/pkg/cwhub/hub.go b/pkg/cwhub/hub.go index f74a794a512..55469fed711 100644 --- a/pkg/cwhub/hub.go +++ b/pkg/cwhub/hub.go @@ -61,7 +61,7 @@ func (h *Hub) Load() error { h.logger.Debugf("loading hub idx %s", h.local.HubIndexFile) if err := h.parseIndex(); err != nil { - return fmt.Errorf("failed to load hub index: %w", err) + return err } if err := h.localSync(); err != nil { @@ -82,21 +82,25 @@ func (h *Hub) parseIndex() error { return fmt.Errorf("failed to parse index: %w", err) } - h.logger.Debugf("%d item types in hub index", len(ItemTypes)) - // Iterate over the different types to complete the struct for _, itemType := range ItemTypes { - h.logger.Tracef("%s: %d items", itemType, len(h.GetItemMap(itemType))) - for name, item := range h.GetItemMap(itemType) { - item.hub = h - item.Name = name + if item == nil { + // likely defined as empty object or null in the index file + return fmt.Errorf("%s:%s has no index metadata", itemType, name) + } - // if the item has no (redundant) author, take it from the json key - if item.Author == "" && strings.Contains(name, "/") { - item.Author = strings.Split(name, "/")[0] + if item.RemotePath == "" { + return fmt.Errorf("%s:%s has no download path", itemType, name) } + if (itemType == PARSERS || itemType == POSTOVERFLOWS) && item.Stage == "" { + return fmt.Errorf("%s:%s has no stage", itemType, name) + } + + item.hub = h + item.Name = name + item.Type = itemType item.FileName = path.Base(item.RemotePath) @@ -152,23 +156,21 @@ func (h *Hub) ItemStats() []string { return ret } -// Update downloads the latest version of the index and writes it to disk if it changed. It cannot be called after Load() -// unless the hub is completely empty. -func (h *Hub) Update(ctx context.Context) error { +// Update downloads the latest version of the index and writes it to disk if it changed. +// It cannot be called after Load() unless the hub is completely empty. +func (h *Hub) Update(ctx context.Context, withContent bool) error { if len(h.pathIndex) > 0 { // if this happens, it's a bug. return errors.New("cannot update hub after items have been loaded") } - downloaded, err := h.remote.fetchIndex(ctx, h.local.HubIndexFile) + downloaded, err := h.remote.fetchIndex(ctx, h.local.HubIndexFile, withContent) if err != nil { return err } - if downloaded { - h.logger.Infof("Wrote index to %s", h.local.HubIndexFile) - } else { - h.logger.Info("hub index is up to date") + if !downloaded { + fmt.Println("Nothing to do, the hub index is up to date.") } return nil diff --git a/pkg/cwhub/hub_test.go b/pkg/cwhub/hub_test.go index 1c2c9ccceca..727b9a18fdf 100644 --- a/pkg/cwhub/hub_test.go +++ b/pkg/cwhub/hub_test.go @@ -24,7 +24,7 @@ func TestInitHubUpdate(t *testing.T) { ctx := context.Background() - err = hub.Update(ctx) + err = hub.Update(ctx, false) require.NoError(t, err) err = hub.Load() @@ -58,7 +58,7 @@ func TestUpdateIndex(t *testing.T) { ctx := context.Background() - err = hub.Update(ctx) + err = hub.Update(ctx, false) cstest.RequireErrorContains(t, err, "failed to build hub index request: invalid URL template 'x'") // bad domain @@ -70,7 +70,7 @@ func TestUpdateIndex(t *testing.T) { IndexPath: ".index.json", } - err = hub.Update(ctx) + err = hub.Update(ctx, false) require.NoError(t, err) // XXX: this is not failing // cstest.RequireErrorContains(t, err, "failed http request for hub index: Get") @@ -86,6 +86,6 @@ func TestUpdateIndex(t *testing.T) { hub.local.HubIndexFile = "/does/not/exist/index.json" - err = hub.Update(ctx) + err = hub.Update(ctx, false) cstest.RequireErrorContains(t, err, "failed to create temporary download file for /does/not/exist/index.json:") } diff --git a/pkg/cwhub/item.go b/pkg/cwhub/item.go index 32d1acf94ff..8cdb88a18ed 100644 --- a/pkg/cwhub/item.go +++ b/pkg/cwhub/item.go @@ -2,11 +2,15 @@ package cwhub import ( "encoding/json" + "errors" "fmt" + "io/fs" + "os" "path/filepath" "slices" "github.com/Masterminds/semver/v3" + yaml "gopkg.in/yaml.v3" "github.com/crowdsecurity/crowdsec/pkg/emoji" ) @@ -98,52 +102,91 @@ func (s *ItemState) Emoji() string { } } +type Dependencies struct { + Parsers []string `json:"parsers,omitempty" yaml:"parsers,omitempty"` + PostOverflows []string `json:"postoverflows,omitempty" yaml:"postoverflows,omitempty"` + Scenarios []string `json:"scenarios,omitempty" yaml:"scenarios,omitempty"` + Collections []string `json:"collections,omitempty" yaml:"collections,omitempty"` + Contexts []string `json:"contexts,omitempty" yaml:"contexts,omitempty"` + AppsecConfigs []string `json:"appsec-configs,omitempty" yaml:"appsec-configs,omitempty"` + AppsecRules []string `json:"appsec-rules,omitempty" yaml:"appsec-rules,omitempty"` +} + +// a group of items of the same type +type itemgroup struct { + typeName string + itemNames []string +} + +func (d Dependencies) byType() []itemgroup { + return []itemgroup{ + {PARSERS, d.Parsers}, + {POSTOVERFLOWS, d.PostOverflows}, + {SCENARIOS, d.Scenarios}, + {CONTEXTS, d.Contexts}, + {APPSEC_CONFIGS, d.AppsecConfigs}, + {APPSEC_RULES, d.AppsecRules}, + {COLLECTIONS, d.Collections}, + } +} + +// SubItems iterates over the sub-items in the struct, excluding the ones that were not found in the hub. +func (d Dependencies) SubItems(hub *Hub) func(func(*Item) bool) { + return func(yield func(*Item) bool) { + for _, typeGroup := range d.byType() { + for _, name := range typeGroup.itemNames { + s := hub.GetItem(typeGroup.typeName, name) + if s == nil { + continue + } + if !yield(s) { + return + } + } + } + } +} + // Item is created from an index file and enriched with local info. type Item struct { hub *Hub // back pointer to the hub, to retrieve other items and call install/remove methods State ItemState `json:"-" yaml:"-"` // local state, not stored in the index - Type string `json:"type,omitempty" yaml:"type,omitempty"` // one of the ItemTypes - Stage string `json:"stage,omitempty" yaml:"stage,omitempty"` // Stage for parser|postoverflow: s00-raw/s01-... - Name string `json:"name,omitempty" yaml:"name,omitempty"` // usually "author/name" - FileName string `json:"file_name,omitempty" yaml:"file_name,omitempty"` // eg. apache2-logs.yaml + Type string `json:"type,omitempty" yaml:"type,omitempty"` + Stage string `json:"stage,omitempty" yaml:"stage,omitempty"` // Stage for parser|postoverflow: s00-raw/s01-... + Name string `json:"name,omitempty" yaml:"name,omitempty"` // usually "author/name" + FileName string `json:"file_name,omitempty" yaml:"file_name,omitempty"` // eg. apache2-logs.yaml Description string `json:"description,omitempty" yaml:"description,omitempty"` - Content string `json:"content,omitempty" yaml:"-"` - Author string `json:"author,omitempty" yaml:"author,omitempty"` - References []string `json:"references,omitempty" yaml:"references,omitempty"` + Content string `json:"content,omitempty" yaml:"-"` + References []string `json:"references,omitempty" yaml:"references,omitempty"` + // NOTE: RemotePath could be derived from the other fields RemotePath string `json:"path,omitempty" yaml:"path,omitempty"` // path relative to the base URL eg. /parsers/stage/author/file.yaml Version string `json:"version,omitempty" yaml:"version,omitempty"` // the last available version Versions map[string]ItemVersion `json:"versions,omitempty" yaml:"-"` // all the known versions - // if it's a collection, it can have sub items - Parsers []string `json:"parsers,omitempty" yaml:"parsers,omitempty"` - PostOverflows []string `json:"postoverflows,omitempty" yaml:"postoverflows,omitempty"` - Scenarios []string `json:"scenarios,omitempty" yaml:"scenarios,omitempty"` - Collections []string `json:"collections,omitempty" yaml:"collections,omitempty"` - Contexts []string `json:"contexts,omitempty" yaml:"contexts,omitempty"` - AppsecConfigs []string `json:"appsec-configs,omitempty" yaml:"appsec-configs,omitempty"` - AppsecRules []string `json:"appsec-rules,omitempty" yaml:"appsec-rules,omitempty"` + // The index contains the dependencies of the "latest" version (collections only) + Dependencies } -// installPath returns the location of the symlink to the item in the hub, or the path of the item itself if it's local +// InstallPath returns the location of the symlink to the item in the hub, or the path of the item itself if it's local // (eg. /etc/crowdsec/collections/xyz.yaml). // Raises an error if the path goes outside of the install dir. -func (i *Item) installPath() (string, error) { +func (i *Item) InstallPath() (string, error) { p := i.Type if i.Stage != "" { p = filepath.Join(p, i.Stage) } - return safePath(i.hub.local.InstallDir, filepath.Join(p, i.FileName)) + return SafePath(i.hub.local.InstallDir, filepath.Join(p, i.FileName)) } -// downloadPath returns the location of the actual config file in the hub +// DownloadPath returns the location of the actual config file in the hub // (eg. /etc/crowdsec/hub/collections/author/xyz.yaml). // Raises an error if the path goes outside of the hub dir. -func (i *Item) downloadPath() (string, error) { - ret, err := safePath(i.hub.local.HubDir, i.RemotePath) +func (i *Item) DownloadPath() (string, error) { + ret, err := SafePath(i.hub.local.HubDir, i.RemotePath) if err != nil { return "", err } @@ -203,76 +246,50 @@ func (i Item) MarshalYAML() (interface{}, error) { }, nil } -// SubItems returns a slice of sub-items, excluding the ones that were not found. -func (i *Item) SubItems() []*Item { - sub := make([]*Item, 0) - - for _, name := range i.Parsers { - s := i.hub.GetItem(PARSERS, name) - if s == nil { - continue - } - - sub = append(sub, s) - } - - for _, name := range i.PostOverflows { - s := i.hub.GetItem(POSTOVERFLOWS, name) - if s == nil { - continue - } +// LatestDependencies returns a slice of sub-items of the "latest" available version of the item, as opposed to the version that is actually installed. The information comes from the index. +func (i *Item) LatestDependencies() Dependencies { + return i.Dependencies +} - sub = append(sub, s) +// CurrentSubItems returns a slice of sub-items of the installed version, excluding the ones that were not found. +// The list comes from the content file if parseable, otherwise from the index (same as LatestDependencies). +func (i *Item) CurrentDependencies() Dependencies { + if !i.HasSubItems() { + return Dependencies{} } - for _, name := range i.Scenarios { - s := i.hub.GetItem(SCENARIOS, name) - if s == nil { - continue - } - - sub = append(sub, s) + if i.State.UpToDate { + return i.Dependencies } - for _, name := range i.Contexts { - s := i.hub.GetItem(CONTEXTS, name) - if s == nil { - continue - } - - sub = append(sub, s) + contentPath, err := i.InstallPath() + if err != nil { + i.hub.logger.Warningf("can't access dependencies for %s, using index", i.FQName()) + return i.Dependencies } - for _, name := range i.AppsecConfigs { - s := i.hub.GetItem(APPSEC_CONFIGS, name) - if s == nil { - continue - } - - sub = append(sub, s) + currentContent, err := os.ReadFile(contentPath) + if errors.Is(err, fs.ErrNotExist) { + return i.Dependencies } - - for _, name := range i.AppsecRules { - s := i.hub.GetItem(APPSEC_RULES, name) - if s == nil { - continue - } - - sub = append(sub, s) + if err != nil { + // a file might be corrupted, or in development + i.hub.logger.Warningf("can't read dependencies for %s, using index", i.FQName()) + return i.Dependencies } - for _, name := range i.Collections { - s := i.hub.GetItem(COLLECTIONS, name) - if s == nil { - continue - } + var d Dependencies - sub = append(sub, s) + // XXX: assume collection content never has multiple documents + if err := yaml.Unmarshal(currentContent, &d); err != nil { + i.hub.logger.Warningf("can't parse dependencies for %s, using index", i.FQName()) + return i.Dependencies } - - return sub + + return d } + func (i *Item) logMissingSubItems() { if !i.HasSubItems() { return @@ -337,7 +354,59 @@ func (i *Item) Ancestors() []*Item { return ret } -// descendants returns a list of all (direct or indirect) dependencies of the item. +// SafeToRemoveDeps returns a slice of dependencies that can be safely removed when this item is removed. +// The returned slice can contain items that are not installed, or not downloaded. +func (i *Item) SafeToRemoveDeps() ([]*Item, error) { + ret := make([]*Item, 0) + + // can return err for circular dependencies + descendants, err := i.descendants() + if err != nil { + return nil, err + } + + ancestors := i.Ancestors() + + for sub := range i.CurrentDependencies().SubItems(i.hub) { + safe := true + + // if the sub depends on a collection that is not a direct or indirect dependency + // of the current item, it is not removed + for _, subParent := range sub.Ancestors() { + if !subParent.State.Installed { + continue + } + + // the ancestor that would block the removal of the sub item is also an ancestor + // of the item we are removing, so we don't want false warnings + // (e.g. crowdsecurity/sshd-logs was not removed because it also belongs to crowdsecurity/linux, + // while we are removing crowdsecurity/sshd) + if slices.Contains(ancestors, subParent) { + continue + } + + // the sub-item belongs to the item we are removing, but we already knew that + if subParent == i { + continue + } + + if !slices.Contains(descendants, subParent) { + // not removing %s because it also belongs to %s", sub.FQName(), subParent.FQName()) + safe = false + break + } + } + + if safe { + ret = append(ret, sub) + } + } + + return ret, nil +} + + +// descendants returns a list of all (direct or indirect) dependencies of the item's current version. func (i *Item) descendants() ([]*Item, error) { var collectSubItems func(item *Item, visited map[*Item]bool, result *[]*Item) error @@ -352,7 +421,7 @@ func (i *Item) descendants() ([]*Item, error) { visited[item] = true - for _, subItem := range item.SubItems() { + for subItem := range item.CurrentDependencies().SubItems(item.hub) { if subItem == i { return fmt.Errorf("circular dependency detected: %s depends on %s", item.Name, i.Name) } diff --git a/pkg/cwhub/iteminstall.go b/pkg/cwhub/iteminstall.go deleted file mode 100644 index 912897d0d7e..00000000000 --- a/pkg/cwhub/iteminstall.go +++ /dev/null @@ -1,73 +0,0 @@ -package cwhub - -import ( - "context" - "fmt" -) - -// enable enables the item by creating a symlink to the downloaded content, and also enables sub-items. -func (i *Item) enable() error { - if i.State.Installed { - if i.State.Tainted { - return fmt.Errorf("%s is tainted, won't overwrite unless --force", i.Name) - } - - if i.State.IsLocal() { - return fmt.Errorf("%s is local, won't overwrite", i.Name) - } - - // if it's a collection, check sub-items even if the collection file itself is up-to-date - if i.State.UpToDate && !i.HasSubItems() { - i.hub.logger.Tracef("%s is installed and up-to-date, skip.", i.Name) - return nil - } - } - - for _, sub := range i.SubItems() { - if err := sub.enable(); err != nil { - return fmt.Errorf("while installing %s: %w", sub.Name, err) - } - } - - if err := i.createInstallLink(); err != nil { - return err - } - - i.hub.logger.Infof("Enabled %s: %s", i.Type, i.Name) - i.State.Installed = true - - return nil -} - -// Install installs the item from the hub, downloading it if needed. -func (i *Item) Install(ctx context.Context, force bool, downloadOnly bool) error { - if downloadOnly && i.State.Downloaded && i.State.UpToDate { - i.hub.logger.Infof("%s is already downloaded and up-to-date", i.Name) - - if !force { - return nil - } - } - - downloaded, err := i.downloadLatest(ctx, force, true) - if err != nil { - return err - } - - if downloadOnly && downloaded { - return nil - } - - if err := i.enable(); err != nil { - return fmt.Errorf("while enabling %s: %w", i.Name, err) - } - - // a check on stdout is used while scripting to know if the hub has been upgraded - // and a configuration reload is required - // TODO: use a better way to communicate this - fmt.Printf("installed %s\n", i.Name) - - i.hub.logger.Infof("Enabled %s", i.Name) - - return nil -} diff --git a/pkg/cwhub/iteminstall_test.go b/pkg/cwhub/iteminstall_test.go index 5bfc7e8148e..ba47f2f4b4a 100644 --- a/pkg/cwhub/iteminstall_test.go +++ b/pkg/cwhub/iteminstall_test.go @@ -1,5 +1,9 @@ package cwhub +// XXX: these tests are obsolete + +/* + import ( "context" "os" @@ -103,7 +107,7 @@ func TestInstallParser(t *testing.T) { - force update it - check its status - remove it - */ + * hub := envSetup(t) // map iteration is random by itself @@ -126,7 +130,7 @@ func TestInstallCollection(t *testing.T) { - force update it - check its status - remove it - */ + * hub := envSetup(t) // map iteration is random by itself @@ -139,3 +143,5 @@ func TestInstallCollection(t *testing.T) { break } } + +*/ diff --git a/pkg/cwhub/itemlink.go b/pkg/cwhub/itemlink.go deleted file mode 100644 index 8a78d6805b7..00000000000 --- a/pkg/cwhub/itemlink.go +++ /dev/null @@ -1,78 +0,0 @@ -package cwhub - -import ( - "fmt" - "os" - "path/filepath" -) - -// createInstallLink creates a symlink between the actual config file at hub.HubDir and hub.ConfigDir. -func (i *Item) createInstallLink() error { - dest, err := i.installPath() - if err != nil { - return err - } - - destDir := filepath.Dir(dest) - if err = os.MkdirAll(destDir, os.ModePerm); err != nil { - return fmt.Errorf("while creating %s: %w", destDir, err) - } - - if _, err = os.Lstat(dest); !os.IsNotExist(err) { - i.hub.logger.Infof("%s already exists.", dest) - return nil - } - - src, err := i.downloadPath() - if err != nil { - return err - } - - if err = os.Symlink(src, dest); err != nil { - return fmt.Errorf("while creating symlink from %s to %s: %w", src, dest, err) - } - - return nil -} - -// removeInstallLink removes the symlink to the downloaded content. -func (i *Item) removeInstallLink() error { - syml, err := i.installPath() - if err != nil { - return err - } - - stat, err := os.Lstat(syml) - if err != nil { - return err - } - - // if it's managed by hub, it's a symlink to csconfig.GConfig.hub.HubDir / ... - if stat.Mode()&os.ModeSymlink == 0 { - i.hub.logger.Warningf("%s (%s) isn't a symlink, can't disable", i.Name, syml) - return fmt.Errorf("%s isn't managed by hub", i.Name) - } - - hubpath, err := os.Readlink(syml) - if err != nil { - return fmt.Errorf("while reading symlink: %w", err) - } - - src, err := i.downloadPath() - if err != nil { - return err - } - - if hubpath != src { - i.hub.logger.Warningf("%s (%s) isn't a symlink to %s", i.Name, syml, src) - return fmt.Errorf("%s isn't managed by hub", i.Name) - } - - if err := os.Remove(syml); err != nil { - return fmt.Errorf("while removing symlink: %w", err) - } - - i.hub.logger.Infof("Removed symlink [%s]: %s", i.Name, syml) - - return nil -} diff --git a/pkg/cwhub/itemremove.go b/pkg/cwhub/itemremove.go deleted file mode 100644 index eca0c856237..00000000000 --- a/pkg/cwhub/itemremove.go +++ /dev/null @@ -1,138 +0,0 @@ -package cwhub - -import ( - "fmt" - "os" - "slices" -) - -// purge removes the actual config file that was downloaded. -func (i *Item) purge() (bool, error) { - if !i.State.Downloaded { - i.hub.logger.Debugf("removing %s: not downloaded -- no need to remove", i.Name) - return false, nil - } - - src, err := i.downloadPath() - if err != nil { - return false, err - } - - if err := os.Remove(src); err != nil { - if os.IsNotExist(err) { - i.hub.logger.Debugf("%s doesn't exist, no need to remove", src) - return false, nil - } - - return false, fmt.Errorf("while removing file: %w", err) - } - - i.State.Downloaded = false - i.hub.logger.Infof("Removed source file [%s]: %s", i.Name, src) - - return true, nil -} - -// disable removes the install link, and optionally the downloaded content. -func (i *Item) disable(purge bool, force bool) (bool, error) { - didRemove := true - - err := i.removeInstallLink() - if os.IsNotExist(err) { - if !purge && !force { - link, _ := i.installPath() - return false, fmt.Errorf("link %s does not exist (override with --force or --purge)", link) - } - - didRemove = false - } else if err != nil { - return false, err - } - - i.State.Installed = false - didPurge := false - - if purge { - if didPurge, err = i.purge(); err != nil { - return didRemove, err - } - } - - ret := didRemove || didPurge - - return ret, nil -} - -// Remove disables the item, optionally removing the downloaded content. -func (i *Item) Remove(purge bool, force bool) (bool, error) { - if i.State.IsLocal() { - i.hub.logger.Warningf("%s is a local item, please delete manually", i.Name) - return false, nil - } - - if i.State.Tainted && !force { - return false, fmt.Errorf("%s is tainted, use '--force' to remove", i.Name) - } - - if !i.State.Installed && !purge { - i.hub.logger.Infof("removing %s: not installed -- no need to remove", i.Name) - return false, nil - } - - removed := false - - descendants, err := i.descendants() - if err != nil { - return false, err - } - - ancestors := i.Ancestors() - - for _, sub := range i.SubItems() { - if !sub.State.Installed { - continue - } - - // if the sub depends on a collection that is not a direct or indirect dependency - // of the current item, it is not removed - for _, subParent := range sub.Ancestors() { - if !purge && !subParent.State.Installed { - continue - } - - // the ancestor that would block the removal of the sub item is also an ancestor - // of the item we are removing, so we don't want false warnings - // (e.g. crowdsecurity/sshd-logs was not removed because it also belongs to crowdsecurity/linux, - // while we are removing crowdsecurity/sshd) - if slices.Contains(ancestors, subParent) { - continue - } - - // the sub-item belongs to the item we are removing, but we already knew that - if subParent == i { - continue - } - - if !slices.Contains(descendants, subParent) { - i.hub.logger.Infof("%s was not removed because it also belongs to %s", sub.Name, subParent.Name) - continue - } - } - - subRemoved, err := sub.Remove(purge, force) - if err != nil { - return false, fmt.Errorf("unable to disable %s: %w", i.Name, err) - } - - removed = removed || subRemoved - } - - didDisable, err := i.disable(purge, force) - if err != nil { - return false, fmt.Errorf("while removing %s: %w", i.Name, err) - } - - removed = removed || didDisable - - return removed, nil -} diff --git a/pkg/cwhub/itemupgrade.go b/pkg/cwhub/itemupgrade.go deleted file mode 100644 index 105e5ebec31..00000000000 --- a/pkg/cwhub/itemupgrade.go +++ /dev/null @@ -1,254 +0,0 @@ -package cwhub - -// Install, upgrade and remove items from the hub to the local configuration - -import ( - "context" - "crypto" - "encoding/base64" - "encoding/hex" - "errors" - "fmt" - "os" - "path/filepath" - - "github.com/sirupsen/logrus" - - "github.com/crowdsecurity/go-cs-lib/downloader" - - "github.com/crowdsecurity/crowdsec/pkg/emoji" -) - -// Upgrade downloads and applies the last version of the item from the hub. -func (i *Item) Upgrade(ctx context.Context, force bool) (bool, error) { - if i.State.IsLocal() { - i.hub.logger.Infof("not upgrading %s: local item", i.Name) - return false, nil - } - - if !i.State.Downloaded { - return false, fmt.Errorf("can't upgrade %s: not installed", i.Name) - } - - if !i.State.Installed { - return false, fmt.Errorf("can't upgrade %s: downloaded but not installed", i.Name) - } - - if i.State.UpToDate { - i.hub.logger.Infof("%s: up-to-date", i.Name) - - if err := i.DownloadDataIfNeeded(ctx, force); err != nil { - return false, fmt.Errorf("%s: download failed: %w", i.Name, err) - } - - if !force { - // no upgrade needed - return false, nil - } - } - - if _, err := i.downloadLatest(ctx, force, true); err != nil { - return false, fmt.Errorf("%s: download failed: %w", i.Name, err) - } - - if !i.State.UpToDate { - if i.State.Tainted { - i.hub.logger.Warningf("%v %s is tainted, --force to overwrite", emoji.Warning, i.Name) - } - - return false, nil - } - - // a check on stdout is used while scripting to know if the hub has been upgraded - // and a configuration reload is required - // TODO: use a better way to communicate this - fmt.Printf("updated %s\n", i.Name) - i.hub.logger.Infof("%v %s: updated", emoji.Package, i.Name) - - return true, nil -} - -// downloadLatest downloads the latest version of the item to the hub directory. -func (i *Item) downloadLatest(ctx context.Context, overwrite bool, updateOnly bool) (bool, error) { - i.hub.logger.Debugf("Downloading %s %s", i.Type, i.Name) - - for _, sub := range i.SubItems() { - if !sub.State.Installed && updateOnly && sub.State.Downloaded { - i.hub.logger.Debugf("skipping upgrade of %s: not installed", i.Name) - continue - } - - i.hub.logger.Debugf("Download %s sub-item: %s %s (%t -> %t)", i.Name, sub.Type, sub.Name, i.State.Installed, updateOnly) - - // recurse as it's a collection - if sub.HasSubItems() { - i.hub.logger.Tracef("collection, recurse") - - if _, err := sub.downloadLatest(ctx, overwrite, updateOnly); err != nil { - return false, err - } - } - - downloaded := sub.State.Downloaded - - if _, err := sub.download(ctx, overwrite); err != nil { - return false, err - } - - // We need to enable an item when it has been added to a collection since latest release of the collection. - // We check if sub.Downloaded is false because maybe the item has been disabled by the user. - if !sub.State.Installed && !downloaded { - if err := sub.enable(); err != nil { - return false, fmt.Errorf("enabling '%s': %w", sub.Name, err) - } - } - } - - if !i.State.Installed && updateOnly && i.State.Downloaded && !overwrite { - i.hub.logger.Debugf("skipping upgrade of %s: not installed", i.Name) - return false, nil - } - - return i.download(ctx, overwrite) -} - -// FetchContentTo downloads the last version of the item's YAML file to the specified path. -func (i *Item) FetchContentTo(ctx context.Context, destPath string) (bool, string, error) { - wantHash := i.latestHash() - if wantHash == "" { - return false, "", errors.New("latest hash missing from index. The index file is invalid, please run 'cscli hub update' and try again") - } - - // Use the embedded content if available - if i.Content != "" { - // the content was historically base64 encoded - content, err := base64.StdEncoding.DecodeString(i.Content) - if err != nil { - content = []byte(i.Content) - } - - dir := filepath.Dir(destPath) - - if err := os.MkdirAll(dir, 0o755); err != nil { - return false, "", fmt.Errorf("while creating %s: %w", dir, err) - } - - // check sha256 - hash := crypto.SHA256.New() - if _, err := hash.Write(content); err != nil { - return false, "", fmt.Errorf("while hashing %s: %w", i.Name, err) - } - - gotHash := hex.EncodeToString(hash.Sum(nil)) - if gotHash != wantHash { - return false, "", fmt.Errorf("hash mismatch: expected %s, got %s. The index file is invalid, please run 'cscli hub update' and try again", wantHash, gotHash) - } - - if err := os.WriteFile(destPath, content, 0o600); err != nil { - return false, "", fmt.Errorf("while writing %s: %w", destPath, err) - } - - i.hub.logger.Debugf("Wrote %s content from .index.json to %s", i.Name, destPath) - - return true, fmt.Sprintf("(embedded in %s)", i.hub.local.HubIndexFile), nil - } - - url, err := i.hub.remote.urlTo(i.RemotePath) - if err != nil { - return false, "", fmt.Errorf("failed to build request: %w", err) - } - - d := downloader. - New(). - WithHTTPClient(hubClient). - ToFile(destPath). - WithETagFn(downloader.SHA256). - WithMakeDirs(true). - WithLogger(logrus.WithField("url", url)). - CompareContent(). - VerifyHash("sha256", wantHash) - - // TODO: recommend hub update if hash does not match - - downloaded, err := d.Download(ctx, url) - if err != nil { - return false, "", err - } - - return downloaded, url, nil -} - -// download downloads the item from the hub and writes it to the hub directory. -func (i *Item) download(ctx context.Context, overwrite bool) (bool, error) { - // ensure that target file is within target dir - finalPath, err := i.downloadPath() - if err != nil { - return false, err - } - - if i.State.IsLocal() { - i.hub.logger.Warningf("%s is local, can't download", i.Name) - return false, nil - } - - // if user didn't --force, don't overwrite local, tainted, up-to-date files - if !overwrite { - if i.State.Tainted { - i.hub.logger.Debugf("%s: tainted, not updated", i.Name) - return false, nil - } - - if i.State.UpToDate { - // We still have to check if data files are present - i.hub.logger.Debugf("%s: up-to-date, not updated", i.Name) - } - } - - downloaded, _, err := i.FetchContentTo(ctx, finalPath) - if err != nil { - return false, err - } - - if downloaded { - i.hub.logger.Infof("Downloaded %s", i.Name) - } - - i.State.Downloaded = true - i.State.Tainted = false - i.State.UpToDate = true - - // read content to get the list of data files - reader, err := os.Open(finalPath) - if err != nil { - return false, fmt.Errorf("while opening %s: %w", finalPath, err) - } - - defer reader.Close() - - if err = downloadDataSet(ctx, i.hub.local.InstallDataDir, overwrite, reader, i.hub.logger); err != nil { - return false, fmt.Errorf("while downloading data for %s: %w", i.FileName, err) - } - - return true, nil -} - -// DownloadDataIfNeeded downloads the data set for the item. -func (i *Item) DownloadDataIfNeeded(ctx context.Context, force bool) error { - itemFilePath, err := i.installPath() - if err != nil { - return err - } - - itemFile, err := os.Open(itemFilePath) - if err != nil { - return fmt.Errorf("while opening %s: %w", itemFilePath, err) - } - - defer itemFile.Close() - - if err = downloadDataSet(ctx, i.hub.local.InstallDataDir, force, itemFile, i.hub.logger); err != nil { - return fmt.Errorf("while downloading data for %s: %w", itemFilePath, err) - } - - return nil -} diff --git a/pkg/cwhub/itemupgrade_test.go b/pkg/cwhub/itemupgrade_test.go index 5f9e4d1944e..e523a222d69 100644 --- a/pkg/cwhub/itemupgrade_test.go +++ b/pkg/cwhub/itemupgrade_test.go @@ -1,5 +1,7 @@ package cwhub +/* + import ( "context" "testing" @@ -221,3 +223,5 @@ func pushUpdateToCollectionInHub() { responseByPath["/crowdsecurity/master/.index.json"] = fileToStringX("./testdata/index2.json") responseByPath["/crowdsecurity/master/collections/crowdsecurity/test_collection.yaml"] = fileToStringX("./testdata/collection_v2.yaml") } + +*/ diff --git a/pkg/cwhub/remote.go b/pkg/cwhub/remote.go index 8d2dc2dbb94..c96471b390c 100644 --- a/pkg/cwhub/remote.go +++ b/pkg/cwhub/remote.go @@ -3,6 +3,7 @@ package cwhub import ( "context" "fmt" + "net/http" "net/url" "github.com/sirupsen/logrus" @@ -15,7 +16,6 @@ type RemoteHubCfg struct { Branch string URLTemplate string IndexPath string - EmbedItemContent bool } // urlTo builds the URL to download a file from the remote hub. @@ -32,7 +32,7 @@ func (r *RemoteHubCfg) urlTo(remotePath string) (string, error) { return fmt.Sprintf(r.URLTemplate, r.Branch, remotePath), nil } -// addURLParam adds the "with_content=true" parameter to the URL if it's not already present. +// addURLParam adds a parameter with a value (ex. "with_content=true") to the URL if it's not already present. func addURLParam(rawURL string, param string, value string) (string, error) { parsedURL, err := url.Parse(rawURL) if err != nil { @@ -51,7 +51,7 @@ func addURLParam(rawURL string, param string, value string) (string, error) { } // fetchIndex downloads the index from the hub and returns the content. -func (r *RemoteHubCfg) fetchIndex(ctx context.Context, destPath string) (bool, error) { +func (r *RemoteHubCfg) fetchIndex(ctx context.Context, destPath string, withContent bool) (bool, error) { if r == nil { return false, ErrNilRemoteHub } @@ -61,7 +61,7 @@ func (r *RemoteHubCfg) fetchIndex(ctx context.Context, destPath string) (bool, e return false, fmt.Errorf("failed to build hub index request: %w", err) } - if r.EmbedItemContent { + if withContent { url, err = addURLParam(url, "with_content", "true") if err != nil { return false, fmt.Errorf("failed to add 'with_content' parameter to URL: %w", err) @@ -70,11 +70,14 @@ func (r *RemoteHubCfg) fetchIndex(ctx context.Context, destPath string) (bool, e downloaded, err := downloader. New(). - WithHTTPClient(hubClient). + WithHTTPClient(HubClient). ToFile(destPath). WithETagFn(downloader.SHA256). CompareContent(). WithLogger(logrus.WithField("url", url)). + BeforeRequest(func(_ *http.Request) { + fmt.Println("Downloading "+destPath) + }). Download(ctx, url) if err != nil { return false, err diff --git a/pkg/cwhub/sync.go b/pkg/cwhub/sync.go index c82822e64ef..d2b59df35d6 100644 --- a/pkg/cwhub/sync.go +++ b/pkg/cwhub/sync.go @@ -105,6 +105,9 @@ func (h *Hub) getItemFileInfo(path string, logger *logrus.Logger) (*itemFileInfo fname := subsHub[2] if ftype == PARSERS || ftype == POSTOVERFLOWS { + if len(subsHub) < 4 { + return nil, fmt.Errorf("path is too short: %s (%d)", path, len(subsHub)) + } stage = subsHub[1] fauthor = subsHub[2] fname = subsHub[3] @@ -308,17 +311,12 @@ func (h *Hub) itemVisit(path string, f os.DirEntry, err error) error { // if we are walking hub dir, just mark present files as downloaded if info.inhub { - // wrong author - if info.fauthor != item.Author { - continue - } - // not the item we're looking for if !item.validPath(info.fauthor, info.fname) { continue } - src, err := item.downloadPath() + src, err := item.DownloadPath() if err != nil { return err } @@ -364,7 +362,7 @@ func (i *Item) checkSubItemVersions() []string { // ensure all the sub-items are installed, or tag the parent as tainted i.hub.logger.Tracef("checking submembers of %s installed:%t", i.Name, i.State.Installed) - for _, sub := range i.SubItems() { + for sub := range i.CurrentDependencies().SubItems(i.hub) { i.hub.logger.Tracef("check %s installed:%t", sub.Name, sub.State.Installed) if !i.State.Installed { diff --git a/pkg/emoji/emoji.go b/pkg/emoji/emoji.go index 51295a85411..9b939249bf0 100644 --- a/pkg/emoji/emoji.go +++ b/pkg/emoji/emoji.go @@ -11,4 +11,8 @@ const ( QuestionMark = "\u2753" // ❓ RedCircle = "\U0001f534" // 🔴 Warning = "\u26a0\ufe0f" // ⚠️ + InboxTray = "\U0001f4e5" // 📥 + DownArrow = "\u2b07" // ⬇️ + Wastebasket = "\U0001f5d1" // 🗑 + Sync = "\U0001F504" // 🔄 official name is Anticlockwise Downwards and Upwards Open Circle Arrows and I'm not even joking ) diff --git a/pkg/hubops/colorize.go b/pkg/hubops/colorize.go new file mode 100644 index 00000000000..3af2aecab93 --- /dev/null +++ b/pkg/hubops/colorize.go @@ -0,0 +1,38 @@ +package hubops + +import ( + "strings" + + "github.com/fatih/color" + + "github.com/crowdsecurity/crowdsec/pkg/emoji" +) + +// colorizeItemName splits the input string on "/" and colorizes the second part. +func colorizeItemName(fullname string) string { + parts := strings.SplitN(fullname, "/", 2) + if len(parts) == 2 { + bold := color.New(color.Bold) + author := parts[0] + name := parts[1] + return author + "/" + bold.Sprint(name) + } + return fullname +} + +func colorizeOpType(opType string) string { + switch opType { + case (&DownloadCommand{}).OperationType(): + return emoji.InboxTray + " " + color.BlueString(opType) + case (&EnableCommand{}).OperationType(): + return emoji.CheckMarkButton + " " + color.GreenString(opType) + case (&DisableCommand{}).OperationType(): + return emoji.CrossMark + " " + color.RedString(opType) + case (&PurgeCommand{}).OperationType(): + return emoji.Wastebasket + " " + color.RedString(opType) + case (&DataRefreshCommand{}).OperationType(): + return emoji.Sync + " " + opType + } + + return opType +} diff --git a/pkg/hubops/datarefresh.go b/pkg/hubops/datarefresh.go new file mode 100644 index 00000000000..985db8c1a11 --- /dev/null +++ b/pkg/hubops/datarefresh.go @@ -0,0 +1,75 @@ +package hubops + +import ( + "context" + "fmt" + "os" + + "github.com/crowdsecurity/crowdsec/pkg/cwhub" +) + +// XXX: TODO: temporary for hubtests, but will have to go. +// DownloadDataIfNeeded downloads the data set for the item. +func DownloadDataIfNeeded(ctx context.Context, hub *cwhub.Hub, item *cwhub.Item, force bool) (bool, error) { + itemFilePath, err := item.InstallPath() + if err != nil { + return false, err + } + + itemFile, err := os.Open(itemFilePath) + if err != nil { + return false, fmt.Errorf("while opening %s: %w", itemFilePath, err) + } + + defer itemFile.Close() + + needReload, err := downloadDataSet(ctx, hub.GetDataDir(), force, itemFile) + if err != nil { + return needReload, fmt.Errorf("while downloading data for %s: %w", itemFilePath, err) + } + + return needReload, nil +} + +// DataRefreshCommand updates the data files associated with the installed hub items. +type DataRefreshCommand struct { + Force bool +} + +func NewDataRefreshCommand(force bool) *DataRefreshCommand { + return &DataRefreshCommand{Force: force} +} + +func (c *DataRefreshCommand) Prepare(plan *ActionPlan) (bool, error) { + // we can't prepare much at this point because we don't know which data files yet, + // and items needs to be downloaded/updated + // evertyhing will be done in Run() + return true, nil +} + +func (c *DataRefreshCommand) Run(ctx context.Context, plan *ActionPlan) error { + for _, itemType := range cwhub.ItemTypes { + for _, item := range plan.hub.GetInstalledByType(itemType, true) { + needReload, err := DownloadDataIfNeeded(ctx, plan.hub, item, c.Force) + if err != nil { + return err + } + + plan.ReloadNeeded = plan.ReloadNeeded || needReload + } + } + + return nil +} + +func (c *DataRefreshCommand) OperationType() string { + return "check & update data files" +} + +func (c *DataRefreshCommand) ItemType() string { + return "" +} + +func (c *DataRefreshCommand) Detail() string { + return "" +} diff --git a/pkg/hubops/disable.go b/pkg/hubops/disable.go new file mode 100644 index 00000000000..b6368e85036 --- /dev/null +++ b/pkg/hubops/disable.go @@ -0,0 +1,121 @@ +package hubops + +import ( + "context" + "fmt" + "os" + + "github.com/crowdsecurity/crowdsec/pkg/cwhub" +) + +// RemoveInstallLink removes the item's symlink between the installation directory and the local hub. +func RemoveInstallLink(i *cwhub.Item) error { + syml, err := i.InstallPath() + if err != nil { + return err + } + + stat, err := os.Lstat(syml) + if err != nil { + return err + } + + // if it's managed by hub, it's a symlink to csconfig.GConfig.hub.HubDir / ... + if stat.Mode()&os.ModeSymlink == 0 { + return fmt.Errorf("%s isn't managed by hub", i.Name) + } + + hubpath, err := os.Readlink(syml) + if err != nil { + return fmt.Errorf("while reading symlink: %w", err) + } + + src, err := i.DownloadPath() + if err != nil { + return err + } + + if hubpath != src { + return fmt.Errorf("%s isn't managed by hub", i.Name) + } + + if err := os.Remove(syml); err != nil { + return fmt.Errorf("while removing symlink: %w", err) + } + + return nil +} + +// DisableCommand uninstalls an item and its dependencies, ensuring that no +// sub-item is left in an inconsistent state. +type DisableCommand struct { + Item *cwhub.Item + Force bool +} + +func NewDisableCommand(item *cwhub.Item, force bool) *DisableCommand { + return &DisableCommand{Item: item, Force: force} +} + +func (c *DisableCommand) Prepare(plan *ActionPlan) (bool, error) { + i := c.Item + + if i.State.IsLocal() { + plan.Warning(i.FQName() + " is a local item, please delete manually") + return false, nil + } + + if i.State.Tainted && !c.Force { + return false, fmt.Errorf("%s is tainted, use '--force' to remove", i.Name) + } + + if !i.State.Installed { + return false, nil + } + + subsToRemove, err := i.SafeToRemoveDeps() + if err != nil { + return false, err + } + + for _, sub := range subsToRemove { + if !sub.State.Installed { + continue + } + + if err := plan.AddCommand(NewDisableCommand(sub, c.Force)); err != nil { + return false, err + } + } + + return true, nil +} + +func (c *DisableCommand) Run(ctx context.Context, plan *ActionPlan) error { + i := c.Item + + fmt.Println("disabling " + colorizeItemName(i.FQName())) + + if err := RemoveInstallLink(i); err != nil { + return fmt.Errorf("while disabling %s: %w", i.FQName(), err) + } + + plan.ReloadNeeded = true + + i.State.Installed = false + i.State.Tainted = false + + return nil +} + +func (c *DisableCommand) OperationType() string { + return "disable" +} + +func (c *DisableCommand) ItemType() string { + return c.Item.Type +} + +func (c *DisableCommand) Detail() string { + return colorizeItemName(c.Item.Name) +} diff --git a/pkg/hubops/download.go b/pkg/hubops/download.go new file mode 100644 index 00000000000..4a722efdb77 --- /dev/null +++ b/pkg/hubops/download.go @@ -0,0 +1,212 @@ +package hubops + +import ( + "context" + "errors" + "fmt" + "io" + "net/http" + "os" + "time" + + "github.com/sirupsen/logrus" + "gopkg.in/yaml.v3" + "github.com/fatih/color" + + "github.com/crowdsecurity/go-cs-lib/downloader" + + "github.com/crowdsecurity/crowdsec/pkg/cwhub" + "github.com/crowdsecurity/crowdsec/pkg/types" +) + + +// DownloadCommand handles the downloading of hub items. +// It ensures that items are fetched from the hub (or from the index file if it also has content) +// managing dependencies and verifying the integrity of downloaded content. +// This is used by "cscli install" and "cscli upgrade". +// Tainted items require the force parameter, local items are skipped. +type DownloadCommand struct { + Item *cwhub.Item + Force bool +} + +func NewDownloadCommand(item *cwhub.Item, force bool) *DownloadCommand { + return &DownloadCommand{Item: item, Force: force} +} + +func (c *DownloadCommand) Prepare(plan *ActionPlan) (bool, error) { + i := c.Item + + if i.State.IsLocal() { + plan.Info(i.FQName() + " - not downloading local item") + return false, nil + } + + // XXX: if it's tainted do we upgrade the dependencies anyway? + if i.State.Tainted && !c.Force { + plan.Warning(i.FQName() + " is tainted, use '--force' to overwrite") + return false, nil + } + + toDisable := make(map[*cwhub.Item]struct{}) + + var disableKeys []*cwhub.Item + + if i.State.Installed { + for sub := range i.CurrentDependencies().SubItems(plan.hub) { + disableKeys = append(disableKeys, sub) + toDisable[sub] = struct{}{} + } + } + + for sub := range i.LatestDependencies().SubItems(plan.hub) { + if err := plan.AddCommand(NewDownloadCommand(sub, c.Force)); err != nil { + return false, err + } + + if i.State.Installed { + // ensure the _new_ dependencies are installed too + if err := plan.AddCommand(NewEnableCommand(sub, c.Force)); err != nil { + return false, err + } + + for _, sub2 := range disableKeys { + if sub2 == sub { + delete(toDisable, sub) + } + } + } + } + + for sub := range toDisable { + if err := plan.AddCommand(NewDisableCommand(sub, c.Force)); err != nil { + return false, err + } + } + + if i.State.Downloaded && i.State.UpToDate { + return false, nil + } + + return true, nil +} + +// The DataSet is a list of data sources required by an item (built from the data: section in the yaml). +type DataSet struct { + Data []types.DataSource `yaml:"data,omitempty"` +} + +// downloadDataSet downloads all the data files for an item. +func downloadDataSet(ctx context.Context, dataFolder string, force bool, reader io.Reader) (bool, error) { + needReload := false + + dec := yaml.NewDecoder(reader) + + for { + data := &DataSet{} + + if err := dec.Decode(data); err != nil { + if errors.Is(err, io.EOF) { + break + } + + return needReload, fmt.Errorf("while reading file: %w", err) + } + + for _, dataS := range data.Data { + // XXX: check context cancellation + destPath, err := cwhub.SafePath(dataFolder, dataS.DestPath) + if err != nil { + return needReload, err + } + + d := downloader. + New(). + WithHTTPClient(cwhub.HubClient). + ToFile(destPath). + CompareContent(). + BeforeRequest(func(req *http.Request) { + fmt.Printf("downloading %s\n", req.URL) + }). + WithLogger(logrus.WithField("url", dataS.SourceURL)) + + if !force { + d = d.WithLastModified(). + WithShelfLife(7 * 24 * time.Hour) + } + + downloaded, err := d.Download(ctx, dataS.SourceURL) + if err != nil { + return needReload, fmt.Errorf("while getting data: %w", err) + } + + needReload = needReload || downloaded + } + } + + return needReload, nil +} + +func (c *DownloadCommand) Run(ctx context.Context, plan *ActionPlan) error { + i := c.Item + + fmt.Printf("downloading %s\n", colorizeItemName(i.FQName())) + + // ensure that target file is within target dir + finalPath, err := i.DownloadPath() + if err != nil { + return err + } + + downloaded, _, err := i.FetchContentTo(ctx, finalPath) + if err != nil { + return fmt.Errorf("%s: %w", i.FQName(), err) + } + + if downloaded { + plan.ReloadNeeded = true + } + + i.State.Downloaded = true + i.State.Tainted = false + i.State.UpToDate = true + + // read content to get the list of data files + reader, err := os.Open(finalPath) + if err != nil { + return fmt.Errorf("while opening %s: %w", finalPath, err) + } + + defer reader.Close() + + needReload, err := downloadDataSet(ctx, plan.hub.GetDataDir(), c.Force, reader) + if err != nil { + return fmt.Errorf("while downloading data for %s: %w", i.FileName, err) + } + + if needReload { + plan.ReloadNeeded = true + } + + return nil +} + +func (c *DownloadCommand) OperationType() string { + return "download" +} + +func (c *DownloadCommand) ItemType() string { + return c.Item.Type +} + +func (c *DownloadCommand) Detail() string { + i := c.Item + + version := color.YellowString(i.Version) + + if i.State.Downloaded { + version = c.Item.State.LocalVersion + " -> " + color.YellowString(i.Version) + } + + return colorizeItemName(c.Item.Name) + " (" + version + ")" +} diff --git a/pkg/hubops/enable.go b/pkg/hubops/enable.go new file mode 100644 index 00000000000..40de40c8662 --- /dev/null +++ b/pkg/hubops/enable.go @@ -0,0 +1,113 @@ +package hubops + +import ( + "context" + "fmt" + "os" + "path/filepath" + + "github.com/crowdsecurity/crowdsec/pkg/cwhub" +) + +// EnableCommand installs a hub item and its dependencies. +// In case this command is called during an upgrade, the sub-items list it taken from the +// latest version in the index, otherwise from the version that is currently installed. +type EnableCommand struct { + Item *cwhub.Item + Force bool + FromLatest bool +} + +func NewEnableCommand(item *cwhub.Item, force bool) *EnableCommand { + return &EnableCommand{Item: item, Force: force} +} + +func (c *EnableCommand) Prepare(plan *ActionPlan) (bool, error) { + var dependencies cwhub.Dependencies + + i := c.Item + + if c.FromLatest { + // we are upgrading + dependencies = i.LatestDependencies() + } else { + dependencies = i.CurrentDependencies() + } + + for sub := range dependencies.SubItems(plan.hub) { + if err := plan.AddCommand(NewEnableCommand(sub, c.Force)); err != nil { + return false, err + } + } + + if i.State.Installed { + return false, nil + } + + return true, nil +} + +// CreateInstallLink creates a symlink between the actual config file at hub.HubDir and hub.ConfigDir. +func CreateInstallLink(i *cwhub.Item) error { + dest, err := i.InstallPath() + if err != nil { + return err + } + + destDir := filepath.Dir(dest) + if err = os.MkdirAll(destDir, os.ModePerm); err != nil { + return fmt.Errorf("while creating %s: %w", destDir, err) + } + + if _, err = os.Lstat(dest); err == nil { + // already exists + return nil + } else if !os.IsNotExist(err) { + return fmt.Errorf("failed to stat %s: %w", dest, err) + } + + src, err := i.DownloadPath() + if err != nil { + return err + } + + if err = os.Symlink(src, dest); err != nil { + return fmt.Errorf("while creating symlink from %s to %s: %w", src, dest, err) + } + + return nil +} + +func (c *EnableCommand) Run(ctx context.Context, plan *ActionPlan) error { + i := c.Item + + fmt.Println("enabling " + colorizeItemName(i.FQName())) + + if !i.State.Downloaded { + // XXX: this a warning? + return fmt.Errorf("can't enable %s: not downloaded", i.FQName()) + } + + if err := CreateInstallLink(i); err != nil { + return fmt.Errorf("while enabling %s: %w", i.FQName(), err) + } + + plan.ReloadNeeded = true + + i.State.Installed = true + i.State.Tainted = false + + return nil +} + +func (c *EnableCommand) OperationType() string { + return "enable" +} + +func (c *EnableCommand) ItemType() string { + return c.Item.Type +} + +func (c *EnableCommand) Detail() string { + return colorizeItemName(c.Item.Name) +} diff --git a/pkg/hubops/plan.go b/pkg/hubops/plan.go new file mode 100644 index 00000000000..1535bc41c64 --- /dev/null +++ b/pkg/hubops/plan.go @@ -0,0 +1,250 @@ +package hubops + +import ( + "context" + "fmt" + "os" + "slices" + "strings" + + "github.com/AlecAivazis/survey/v2" + isatty "github.com/mattn/go-isatty" + "github.com/fatih/color" + + "github.com/crowdsecurity/go-cs-lib/slicetools" + + "github.com/crowdsecurity/crowdsec/pkg/cwhub" +) + +// Command represents an operation that can be performed on a CrowdSec hub item. +// +// Each concrete implementation defines a Prepare() method to check for errors and preconditions, +// decide which sub-commands are required (like installing dependencies) and add them to the action plan. +type Command interface { + // Prepare sets up the command for execution within the given + // ActionPlan. It may add additional commands to the ActionPlan based + // on dependencies or prerequisites. Returns a boolean indicating + // whether the command execution should be skipped (it can be + // redundant, like installing something that is already installed) and + // an error if the preparation failed. + // NOTE: Returning an error will bubble up from the plan.AddCommand() method, + // but Prepare() might already have modified the plan's command slice. + Prepare(*ActionPlan) (bool, error) + + // Run executes the command within the provided context and ActionPlan. + // It performs the actual operation and returns an error if execution fails. + // NOTE: Returning an error will currently stop the execution of the action plan. + Run(ctx context.Context, plan *ActionPlan) error + + // OperationType returns a unique string representing the type of operation to perform + // (e.g., "download", "enable"). + OperationType() string + + // ItemType returns the type of item the operation is performed on + // (e.g., "collections"). Used in confirmation prompt and dry-run. + ItemType() string + + // Detail provides further details on the operation, + // such as the item's name and version. + Detail() string +} + +// UniqueKey generates a unique string key for a Command based on its operation type, item type, and detail. +// Is is used to avoid adding duplicate commands to the action plan. +func UniqueKey(c Command) string { + return fmt.Sprintf("%s:%s:%s", c.OperationType(), c.ItemType(), c.Detail()) +} + +// ActionPlan orchestrates the sequence of operations (Commands) to manage CrowdSec hub items. +type ActionPlan struct { + // hold the list of Commands to be executed as part of the action plan. + // If a command is skipped (i.e. calling Prepare() returned false), it won't be included in the slice. + commands []Command + + // Tracks unique commands + commandsTracker map[string]struct{} + + // A reference to the Hub instance, required for dependency lookup. + hub *cwhub.Hub + + // Indicates whether a reload of the CrowdSec service is required after executing the action plan. + ReloadNeeded bool +} + +func NewActionPlan(hub *cwhub.Hub) *ActionPlan { + return &ActionPlan{ + hub: hub, + commandsTracker: make(map[string]struct{}), + } +} + +func (p *ActionPlan) AddCommand(c Command) error { + ok, err := c.Prepare(p) + if err != nil { + return err + } + + if ok { + key := UniqueKey(c) + if _, exists := p.commandsTracker[key]; !exists { + p.commands = append(p.commands, c) + p.commandsTracker[key] = struct{}{} + } + } + + return nil +} + +func (p *ActionPlan) Info(msg string) { + fmt.Println(msg) +} + +func (p *ActionPlan) Warning(msg string) { + fmt.Printf("%s %s\n", color.YellowString("WARN"), msg) +} + +// Description returns a string representation of the action plan. +// If verbose is false, the operations are grouped by item type and operation type. +// If verbose is true, they are listed as they appear in the command slice. +func (p *ActionPlan) Description(verbose bool) string { + if verbose { + return p.verboseDescription() + } + + return p.compactDescription() +} + +func (p *ActionPlan) verboseDescription() string { + sb := strings.Builder{} + + // Here we display the commands in the order they will be executed. + for _, cmd := range p.commands { + sb.WriteString(colorizeOpType(cmd.OperationType()) + " " + cmd.ItemType() + ":" + cmd.Detail() + "\n") + } + + return sb.String() +} + +// describe the operations of a given type in a compact way. +func describe(opType string, desc map[string]map[string][]string, sb *strings.Builder) { + if _, ok := desc[opType]; !ok { + return + } + + sb.WriteString(colorizeOpType(opType) + "\n") + + // iterate cwhub.ItemTypes in reverse order, so we have collections first + for _, itemType := range slicetools.Backward(cwhub.ItemTypes) { + if desc[opType][itemType] == nil { + continue + } + + details := desc[opType][itemType] + // Sorting for user convenience, but it's not the same order the commands will be carried out. + slices.Sort(details) + + if itemType != "" { + sb.WriteString(" " + itemType + ": ") + } + + if len(details) != 0 { + sb.WriteString(strings.Join(details, ", ")) + sb.WriteString("\n") + } + } +} + +func (p *ActionPlan) compactDescription() string { + desc := make(map[string]map[string][]string) + + for _, cmd := range p.commands { + opType := cmd.OperationType() + itemType := cmd.ItemType() + detail := cmd.Detail() + + if _, ok := desc[opType]; !ok { + desc[opType] = make(map[string][]string) + } + + desc[opType][itemType] = append(desc[opType][itemType], detail) + } + + sb := strings.Builder{} + + // Enforce presentation order. + + describe("download", desc, &sb) + delete(desc, "download") + describe("enable", desc, &sb) + delete(desc, "enable") + describe("disable", desc, &sb) + delete(desc, "disable") + describe("remove", desc, &sb) + delete(desc, "remove") + + for optype := range desc { + describe(optype, desc, &sb) + } + + return sb.String() +} + +func (p *ActionPlan) Confirm(verbose bool) (bool, error) { + if !isatty.IsTerminal(os.Stdout.Fd()) && !isatty.IsCygwinTerminal(os.Stdout.Fd()) { + return true, nil + } + + fmt.Println("The following actions will be performed:\n" + p.Description(verbose)) + + var answer bool + + prompt := &survey.Confirm{ + Message: "Do you want to continue?", + Default: true, + } + + if err := survey.AskOne(prompt, &answer); err != nil { + return false, err + } + + fmt.Println() + + return answer, nil +} + +func (p *ActionPlan) Execute(ctx context.Context, confirm bool, dryRun bool, verbose bool) error { + var err error + + if len(p.commands) == 0 { + // XXX: show skipped commands, warnings? + fmt.Println("Nothing to do.") + return nil + } + + if dryRun { + fmt.Println("Action plan:\n" + p.Description(verbose)) + fmt.Println("Dry run, no action taken.") + + return nil + } + + if !confirm { + confirm, err = p.Confirm(verbose) + if err != nil { + return err + } + } + + if !confirm { + fmt.Println("Operation canceled.") + return nil + } + + for _, c := range p.commands { + if err := c.Run(ctx, p); err != nil { + return err + } + } + + return nil +} diff --git a/pkg/hubops/purge.go b/pkg/hubops/purge.go new file mode 100644 index 00000000000..3b415b27428 --- /dev/null +++ b/pkg/hubops/purge.go @@ -0,0 +1,88 @@ +package hubops + +import ( + "context" + "fmt" + "os" + + "github.com/crowdsecurity/crowdsec/pkg/cwhub" +) + +// PurgeCommand removes the downloaded content of a hub item, effectively +// removing it from the local system. This command also removes the sub-items +// but not the associated data files. +type PurgeCommand struct { + Item *cwhub.Item + Force bool +} + +func NewPurgeCommand(item *cwhub.Item, force bool) *PurgeCommand { + return &PurgeCommand{Item: item, Force: force} +} + +func (c *PurgeCommand) Prepare(plan *ActionPlan) (bool, error) { + i := c.Item + + if i.State.IsLocal() { + // not downloaded, by definition + return false, nil + } + + if i.State.Tainted && !c.Force { + return false, fmt.Errorf("%s is tainted, use '--force' to remove", i.Name) + } + + subsToRemove, err := i.SafeToRemoveDeps() + if err != nil { + return false, err + } + + for _, sub := range subsToRemove { + if err := plan.AddCommand(NewPurgeCommand(sub, c.Force)); err != nil { + return false, err + } + } + + if !i.State.Downloaded { + return false, nil + } + + return true, nil +} + +func (c *PurgeCommand) Run(ctx context.Context, plan *ActionPlan) error { + i := c.Item + + fmt.Println("purging " + colorizeItemName(i.FQName())) + + src, err := i.DownloadPath() + if err != nil { + return err + } + + if err := os.Remove(src); err != nil { + if os.IsNotExist(err) { + return nil + } + + return fmt.Errorf("while removing file: %w", err) + } + + i.State.Downloaded = false + i.State.Tainted = false + i.State.UpToDate = false + + return nil +} + +func (c *PurgeCommand) OperationType() string { + return "purge (delete source)" +} + +func (c *PurgeCommand) ItemType() string { + return c.Item.Type +} + +func (c *PurgeCommand) Detail() string { + return colorizeItemName(c.Item.Name) +} diff --git a/pkg/hubtest/hubtest_item.go b/pkg/hubtest/hubtest_item.go index bc9c8955d0d..d999d15ba6e 100644 --- a/pkg/hubtest/hubtest_item.go +++ b/pkg/hubtest/hubtest_item.go @@ -15,6 +15,7 @@ import ( "github.com/crowdsecurity/crowdsec/pkg/csconfig" "github.com/crowdsecurity/crowdsec/pkg/cwhub" + "github.com/crowdsecurity/crowdsec/pkg/hubops" "github.com/crowdsecurity/crowdsec/pkg/parser" ) @@ -224,7 +225,7 @@ func (t *HubTestItem) InstallHub() error { // install data for parsers if needed for _, item := range hub.GetInstalledByType(cwhub.PARSERS, true) { - if err := item.DownloadDataIfNeeded(ctx, true); err != nil { + if _, err := hubops.DownloadDataIfNeeded(ctx, hub, item, true); err != nil { return fmt.Errorf("unable to download data for parser '%s': %+v", item.Name, err) } @@ -233,7 +234,7 @@ func (t *HubTestItem) InstallHub() error { // install data for scenarios if needed for _, item := range hub.GetInstalledByType(cwhub.SCENARIOS, true) { - if err := item.DownloadDataIfNeeded(ctx, true); err != nil { + if _, err := hubops.DownloadDataIfNeeded(ctx, hub, item, true); err != nil { return fmt.Errorf("unable to download data for parser '%s': %+v", item.Name, err) } @@ -242,7 +243,7 @@ func (t *HubTestItem) InstallHub() error { // install data for postoverflows if needed for _, item := range hub.GetInstalledByType(cwhub.POSTOVERFLOWS, true) { - if err := item.DownloadDataIfNeeded(ctx, true); err != nil { + if _, err := hubops.DownloadDataIfNeeded(ctx, hub, item, true); err != nil { return fmt.Errorf("unable to download data for parser '%s': %+v", item.Name, err) } diff --git a/pkg/setup/detect_test.go b/pkg/setup/detect_test.go index 553617032a4..475f3af0928 100644 --- a/pkg/setup/detect_test.go +++ b/pkg/setup/detect_test.go @@ -54,7 +54,7 @@ func TestSetupHelperProcess(t *testing.T) { } fmt.Fprint(os.Stdout, fakeSystemctlOutput) - os.Exit(0) //nolint:revive,deep-exit + os.Exit(0) //nolint:revive } func tempYAML(t *testing.T, content string) os.File { diff --git a/pkg/setup/install.go b/pkg/setup/install.go index d63a1ee1775..dcefe744a76 100644 --- a/pkg/setup/install.go +++ b/pkg/setup/install.go @@ -13,6 +13,7 @@ import ( "gopkg.in/yaml.v3" "github.com/crowdsecurity/crowdsec/pkg/cwhub" + "github.com/crowdsecurity/crowdsec/pkg/hubops" ) // AcquisDocument is created from a SetupItem. It represents a single YAML document, and can be part of a multi-document file. @@ -47,12 +48,14 @@ func decodeSetup(input []byte, fancyErrors bool) (Setup, error) { } // InstallHubItems installs the objects recommended in a setup file. -func InstallHubItems(ctx context.Context, hub *cwhub.Hub, input []byte, dryRun bool) error { +func InstallHubItems(ctx context.Context, hub *cwhub.Hub, input []byte, yes, dryRun, verbose bool) error { setupEnvelope, err := decodeSetup(input, false) if err != nil { return err } + plan := hubops.NewActionPlan(hub) + for _, setupItem := range setupEnvelope.Setup { forceAction := false downloadOnly := false @@ -68,70 +71,50 @@ func InstallHubItems(ctx context.Context, hub *cwhub.Hub, input []byte, dryRun b return fmt.Errorf("collection %s not found", collection) } - if dryRun { - fmt.Println("dry-run: would install collection", collection) - - continue - } - - if err := item.Install(ctx, forceAction, downloadOnly); err != nil { - return fmt.Errorf("while installing collection %s: %w", item.Name, err) + plan.AddCommand(hubops.NewDownloadCommand(item, forceAction)) + if !downloadOnly { + plan.AddCommand(hubops.NewEnableCommand(item, forceAction)) } } for _, parser := range setupItem.Install.Parsers { - if dryRun { - fmt.Println("dry-run: would install parser", parser) - - continue - } - item := hub.GetItem(cwhub.PARSERS, parser) if item == nil { return fmt.Errorf("parser %s not found", parser) } - if err := item.Install(ctx, forceAction, downloadOnly); err != nil { - return fmt.Errorf("while installing parser %s: %w", item.Name, err) + plan.AddCommand(hubops.NewDownloadCommand(item, forceAction)) + if !downloadOnly { + plan.AddCommand(hubops.NewEnableCommand(item, forceAction)) } } for _, scenario := range setupItem.Install.Scenarios { - if dryRun { - fmt.Println("dry-run: would install scenario", scenario) - - continue - } - item := hub.GetItem(cwhub.SCENARIOS, scenario) if item == nil { return fmt.Errorf("scenario %s not found", scenario) } - if err := item.Install(ctx, forceAction, downloadOnly); err != nil { - return fmt.Errorf("while installing scenario %s: %w", item.Name, err) + plan.AddCommand(hubops.NewDownloadCommand(item, forceAction)) + if !downloadOnly { + plan.AddCommand(hubops.NewEnableCommand(item, forceAction)) } } for _, postoverflow := range setupItem.Install.PostOverflows { - if dryRun { - fmt.Println("dry-run: would install postoverflow", postoverflow) - - continue - } - item := hub.GetItem(cwhub.POSTOVERFLOWS, postoverflow) if item == nil { return fmt.Errorf("postoverflow %s not found", postoverflow) } - if err := item.Install(ctx, forceAction, downloadOnly); err != nil { - return fmt.Errorf("while installing postoverflow %s: %w", item.Name, err) + plan.AddCommand(hubops.NewDownloadCommand(item, forceAction)) + if !downloadOnly { + plan.AddCommand(hubops.NewEnableCommand(item, forceAction)) } } } - return nil + return plan.Execute(ctx, yes, dryRun, verbose) } // marshalAcquisDocuments creates the monolithic file, or itemized files (if a directory is provided) with the acquisition documents. diff --git a/test/bats/07_setup.bats b/test/bats/07_setup.bats index f832ac572d2..72a8b64a57a 100644 --- a/test/bats/07_setup.bats +++ b/test/bats/07_setup.bats @@ -511,8 +511,9 @@ update-notifier-motd.timer enabled enabled rune -0 jq -e '.installed == false' <(output) # we install it - rune -0 cscli setup install-hub /dev/stdin --dry-run <<< '{"setup":[{"install":{"collections":["crowdsecurity/apache2"]}}]}' - assert_output 'dry-run: would install collection crowdsecurity/apache2' + rune -0 cscli setup install-hub /dev/stdin --dry-run --output raw <<< '{"setup":[{"install":{"collections":["crowdsecurity/apache2"]}}]}' + assert_line --regexp 'download collections:crowdsecurity/apache2' + assert_line --regexp 'enable collections:crowdsecurity/apache2' # still not installed rune -0 cscli collections inspect crowdsecurity/apache2 -o json @@ -520,8 +521,8 @@ update-notifier-motd.timer enabled enabled # same with dependencies rune -0 cscli collections remove --all - rune -0 cscli setup install-hub /dev/stdin --dry-run <<< '{"setup":[{"install":{"collections":["crowdsecurity/linux"]}}]}' - assert_output 'dry-run: would install collection crowdsecurity/linux' + rune -0 cscli setup install-hub /dev/stdin --dry-run --output raw <<< '{"setup":[{"install":{"collections":["crowdsecurity/linux"]}}]}' + assert_line --regexp 'enable collections:crowdsecurity/linux' } @test "cscli setup install-hub (dry run: install multiple collections)" { @@ -530,8 +531,8 @@ update-notifier-motd.timer enabled enabled rune -0 jq -e '.installed == false' <(output) # we install it - rune -0 cscli setup install-hub /dev/stdin --dry-run <<< '{"setup":[{"install":{"collections":["crowdsecurity/apache2"]}}]}' - assert_output 'dry-run: would install collection crowdsecurity/apache2' + rune -0 cscli setup install-hub /dev/stdin --dry-run --output raw <<< '{"setup":[{"install":{"collections":["crowdsecurity/apache2"]}}]}' + assert_line --regexp 'enable collections:crowdsecurity/apache2' # still not installed rune -0 cscli collections inspect crowdsecurity/apache2 -o json @@ -539,15 +540,15 @@ update-notifier-motd.timer enabled enabled } @test "cscli setup install-hub (dry run: install multiple collections, parsers, scenarios, postoverflows)" { - rune -0 cscli setup install-hub /dev/stdin --dry-run <<< '{"setup":[{"install":{"collections":["crowdsecurity/aws-console","crowdsecurity/caddy"],"parsers":["crowdsecurity/asterisk-logs"],"scenarios":["crowdsecurity/smb-fs"],"postoverflows":["crowdsecurity/cdn-whitelist","crowdsecurity/rdns"]}}]}' - assert_line 'dry-run: would install collection crowdsecurity/aws-console' - assert_line 'dry-run: would install collection crowdsecurity/caddy' - assert_line 'dry-run: would install parser crowdsecurity/asterisk-logs' - assert_line 'dry-run: would install scenario crowdsecurity/smb-fs' - assert_line 'dry-run: would install postoverflow crowdsecurity/cdn-whitelist' - assert_line 'dry-run: would install postoverflow crowdsecurity/rdns' - - rune -1 cscli setup install-hub /dev/stdin --dry-run <<< '{"setup":[{"install":{"collections":["crowdsecurity/foo"]}}]}' + rune -0 cscli setup install-hub /dev/stdin --dry-run --output raw <<< '{"setup":[{"install":{"collections":["crowdsecurity/aws-console","crowdsecurity/caddy"],"parsers":["crowdsecurity/asterisk-logs"],"scenarios":["crowdsecurity/smb-bf"],"postoverflows":["crowdsecurity/cdn-whitelist","crowdsecurity/rdns"]}}]}' + assert_line --regexp 'enable collections:crowdsecurity/aws-console' + assert_line --regexp 'enable collections:crowdsecurity/caddy' + assert_line --regexp 'enable parsers:crowdsecurity/asterisk-logs' + assert_line --regexp 'enable scenarios:crowdsecurity/smb-bf' + assert_line --regexp 'enable postoverflows:crowdsecurity/cdn-whitelist' + assert_line --regexp 'enable postoverflows:crowdsecurity/rdns' + + rune -1 cscli setup install-hub /dev/stdin --dry-run --output raw <<< '{"setup":[{"install":{"collections":["crowdsecurity/foo"]}}]}' assert_stderr --partial 'collection crowdsecurity/foo not found' } diff --git a/test/bats/20_hub.bats b/test/bats/20_hub.bats index b8fa1e9efca..03723ecc82b 100644 --- a/test/bats/20_hub.bats +++ b/test/bats/20_hub.bats @@ -20,7 +20,6 @@ setup() { load "../lib/setup.sh" load "../lib/bats-file/load.bash" ./instance-data load - hub_strip_index } teardown() { @@ -76,7 +75,7 @@ teardown() { assert_stderr --partial "invalid hub item appsec-rules:crowdsecurity/vpatch-laravel-debug-mode: latest version missing from index" rune -1 cscli appsec-rules install crowdsecurity/vpatch-laravel-debug-mode --force - assert_stderr --partial "error while installing 'crowdsecurity/vpatch-laravel-debug-mode': latest hash missing from index. The index file is invalid, please run 'cscli hub update' and try again" + assert_stderr --partial "appsec-rules:crowdsecurity/vpatch-laravel-debug-mode: latest hash missing from index. The index file is invalid, please run 'cscli hub update' and try again" } @test "missing reference in hub index" { @@ -108,47 +107,28 @@ teardown() { @test "cscli hub update" { rm -f "$INDEX_PATH" rune -0 cscli hub update - assert_stderr --partial "Wrote index to $INDEX_PATH" + assert_output "Downloading $INDEX_PATH" rune -0 cscli hub update - assert_stderr --partial "hub index is up to date" + assert_output "Nothing to do, the hub index is up to date." } -@test "cscli hub upgrade" { +@test "cscli hub upgrade (up to date)" { rune -0 cscli hub upgrade - assert_stderr --partial "Upgrading parsers" - assert_stderr --partial "Upgraded 0 parsers" - assert_stderr --partial "Upgrading postoverflows" - assert_stderr --partial "Upgraded 0 postoverflows" - assert_stderr --partial "Upgrading scenarios" - assert_stderr --partial "Upgraded 0 scenarios" - assert_stderr --partial "Upgrading contexts" - assert_stderr --partial "Upgraded 0 contexts" - assert_stderr --partial "Upgrading collections" - assert_stderr --partial "Upgraded 0 collections" - assert_stderr --partial "Upgrading appsec-configs" - assert_stderr --partial "Upgraded 0 appsec-configs" - assert_stderr --partial "Upgrading appsec-rules" - assert_stderr --partial "Upgraded 0 appsec-rules" - assert_stderr --partial "Upgrading collections" - assert_stderr --partial "Upgraded 0 collections" + refute_output rune -0 cscli parsers install crowdsecurity/syslog-logs - rune -0 cscli hub upgrade - assert_stderr --partial "crowdsecurity/syslog-logs: up-to-date" - rune -0 cscli hub upgrade --force - assert_stderr --partial "crowdsecurity/syslog-logs: up-to-date" - assert_stderr --partial "crowdsecurity/syslog-logs: updated" - assert_stderr --partial "Upgraded 1 parsers" - # this is used by the cron script to know if the hub was updated - assert_output --partial "updated crowdsecurity/syslog-logs" + refute_output + skip "todo: data files are re-downloaded with --force" } @test "cscli hub upgrade (with local items)" { mkdir -p "$CONFIG_DIR/collections" touch "$CONFIG_DIR/collections/foo.yaml" rune -0 cscli hub upgrade - assert_stderr --partial "not upgrading foo.yaml: local item" + assert_output - <<-EOT + collections:foo.yaml - not downloading local item + EOT } @test "cscli hub types" { diff --git a/test/bats/20_hub_collections.bats b/test/bats/20_hub_collections.bats deleted file mode 100644 index 6822339ae40..00000000000 --- a/test/bats/20_hub_collections.bats +++ /dev/null @@ -1,381 +0,0 @@ -#!/usr/bin/env bats -# vim: ft=bats:list:ts=8:sts=4:sw=4:et:ai:si: - -set -u - -setup_file() { - load "../lib/setup_file.sh" - ./instance-data load - HUB_DIR=$(config_get '.config_paths.hub_dir') - export HUB_DIR - INDEX_PATH=$(config_get '.config_paths.index_path') - export INDEX_PATH - CONFIG_DIR=$(config_get '.config_paths.config_dir') - export CONFIG_DIR -} - -teardown_file() { - load "../lib/teardown_file.sh" -} - -setup() { - load "../lib/setup.sh" - load "../lib/bats-file/load.bash" - ./instance-data load - hub_strip_index -} - -teardown() { - ./instance-crowdsec stop -} - -#---------- - -@test "cscli collections list" { - hub_purge_all - - # no items - rune -0 cscli collections list - assert_output --partial "COLLECTIONS" - rune -0 cscli collections list -o json - assert_json '{collections:[]}' - rune -0 cscli collections list -o raw - assert_output 'name,status,version,description' - - # some items - rune -0 cscli collections install crowdsecurity/sshd crowdsecurity/smb - - rune -0 cscli collections list - assert_output --partial crowdsecurity/sshd - assert_output --partial crowdsecurity/smb - rune -0 grep -c enabled <(output) - assert_output "2" - - rune -0 cscli collections list -o json - assert_output --partial crowdsecurity/sshd - assert_output --partial crowdsecurity/smb - rune -0 jq '.collections | length' <(output) - assert_output "2" - - rune -0 cscli collections list -o raw - assert_output --partial crowdsecurity/sshd - assert_output --partial crowdsecurity/smb - rune -0 grep -vc 'name,status,version,description' <(output) - assert_output "2" -} - -@test "cscli collections list -a" { - expected=$(jq <"$INDEX_PATH" -r '.collections | length') - - rune -0 cscli collections list -a - rune -0 grep -c disabled <(output) - assert_output "$expected" - - rune -0 cscli collections list -o json -a - rune -0 jq '.collections | length' <(output) - assert_output "$expected" - - rune -0 cscli collections list -o raw -a - rune -0 grep -vc 'name,status,version,description' <(output) - assert_output "$expected" - - # the list should be the same in all formats, and sorted (not case sensitive) - - list_raw=$(cscli collections list -o raw -a | tail -n +2 | cut -d, -f1) - list_human=$(cscli collections list -o human -a | tail -n +6 | head -n -1 | cut -d' ' -f2) - list_json=$(cscli collections list -o json -a | jq -r '.collections[].name') - - rune -0 sort -f <<<"$list_raw" - assert_output "$list_raw" - - assert_equal "$list_raw" "$list_json" - assert_equal "$list_raw" "$list_human" -} - -@test "cscli collections list [collection]..." { - # non-existent - rune -1 cscli collections install foo/bar - assert_stderr --partial "can't find 'foo/bar' in collections" - - # not installed - rune -0 cscli collections list crowdsecurity/smb - assert_output --regexp 'crowdsecurity/smb.*disabled' - - # install two items - rune -0 cscli collections install crowdsecurity/sshd crowdsecurity/smb - - # list an installed item - rune -0 cscli collections list crowdsecurity/sshd - assert_output --regexp "crowdsecurity/sshd" - refute_output --partial "crowdsecurity/smb" - - # list multiple installed and non installed items - rune -0 cscli collections list crowdsecurity/sshd crowdsecurity/smb crowdsecurity/nginx - assert_output --partial "crowdsecurity/sshd" - assert_output --partial "crowdsecurity/smb" - assert_output --partial "crowdsecurity/nginx" - - rune -0 cscli collections list crowdsecurity/sshd -o json - rune -0 jq '.collections | length' <(output) - assert_output "1" - rune -0 cscli collections list crowdsecurity/sshd crowdsecurity/smb crowdsecurity/nginx -o json - rune -0 jq '.collections | length' <(output) - assert_output "3" - - rune -0 cscli collections list crowdsecurity/sshd -o raw - rune -0 grep -vc 'name,status,version,description' <(output) - assert_output "1" - rune -0 cscli collections list crowdsecurity/sshd crowdsecurity/smb -o raw - rune -0 grep -vc 'name,status,version,description' <(output) - assert_output "2" -} - -@test "cscli collections install" { - rune -1 cscli collections install - assert_stderr --partial 'requires at least 1 arg(s), only received 0' - - # not in hub - rune -1 cscli collections install crowdsecurity/blahblah - assert_stderr --partial "can't find 'crowdsecurity/blahblah' in collections" - - # simple install - rune -0 cscli collections install crowdsecurity/sshd - rune -0 cscli collections inspect crowdsecurity/sshd --no-metrics - assert_output --partial 'crowdsecurity/sshd' - assert_output --partial 'installed: true' - - # autocorrect - rune -1 cscli collections install crowdsecurity/ssshd - assert_stderr --partial "can't find 'crowdsecurity/ssshd' in collections, did you mean 'crowdsecurity/sshd'?" - - # install multiple - rune -0 cscli collections install crowdsecurity/sshd crowdsecurity/smb - rune -0 cscli collections inspect crowdsecurity/sshd --no-metrics - assert_output --partial 'crowdsecurity/sshd' - assert_output --partial 'installed: true' - rune -0 cscli collections inspect crowdsecurity/smb --no-metrics - assert_output --partial 'crowdsecurity/smb' - assert_output --partial 'installed: true' -} - -@test "cscli collections install (file location and download-only)" { - rune -0 cscli collections install crowdsecurity/linux --download-only - rune -0 cscli collections inspect crowdsecurity/linux --no-metrics - assert_output --partial 'crowdsecurity/linux' - assert_output --partial 'installed: false' - assert_file_exists "$HUB_DIR/collections/crowdsecurity/linux.yaml" - assert_file_not_exists "$CONFIG_DIR/collections/linux.yaml" - - rune -0 cscli collections install crowdsecurity/linux - rune -0 cscli collections inspect crowdsecurity/linux --no-metrics - assert_output --partial 'installed: true' - assert_file_exists "$CONFIG_DIR/collections/linux.yaml" -} - -@test "cscli collections install --force (tainted)" { - rune -0 cscli collections install crowdsecurity/sshd - echo "dirty" >"$CONFIG_DIR/collections/sshd.yaml" - - rune -1 cscli collections install crowdsecurity/sshd - assert_stderr --partial "error while installing 'crowdsecurity/sshd': while enabling crowdsecurity/sshd: crowdsecurity/sshd is tainted, won't overwrite unless --force" - - rune -0 cscli collections install crowdsecurity/sshd --force - assert_stderr --partial "Enabled crowdsecurity/sshd" -} - -@test "cscli collections install --ignore (skip on errors)" { - rune -1 cscli collections install foo/bar crowdsecurity/sshd - assert_stderr --partial "can't find 'foo/bar' in collections" - refute_stderr --partial "Enabled collections: crowdsecurity/sshd" - - rune -0 cscli collections install foo/bar crowdsecurity/sshd --ignore - assert_stderr --partial "can't find 'foo/bar' in collections" - assert_stderr --partial "Enabled collections: crowdsecurity/sshd" -} - -@test "cscli collections inspect" { - rune -1 cscli collections inspect - assert_stderr --partial 'requires at least 1 arg(s), only received 0' - # required for metrics - ./instance-crowdsec start - - rune -1 cscli collections inspect blahblah/blahblah - assert_stderr --partial "can't find 'blahblah/blahblah' in collections" - - # one item - rune -0 cscli collections inspect crowdsecurity/sshd --no-metrics - assert_line 'type: collections' - assert_line 'name: crowdsecurity/sshd' - assert_line 'author: crowdsecurity' - assert_line 'path: collections/crowdsecurity/sshd.yaml' - assert_line 'installed: false' - refute_line --partial 'Current metrics:' - - # one item, with metrics - rune -0 cscli collections inspect crowdsecurity/sshd - assert_line --partial 'Current metrics:' - - # one item, json - rune -0 cscli collections inspect crowdsecurity/sshd -o json - rune -0 jq -c '[.type, .name, .author, .path, .installed]' <(output) - assert_json '["collections","crowdsecurity/sshd","crowdsecurity","collections/crowdsecurity/sshd.yaml",false]' - - # one item, raw - rune -0 cscli collections inspect crowdsecurity/sshd -o raw - assert_line 'type: collections' - assert_line 'name: crowdsecurity/sshd' - assert_line 'author: crowdsecurity' - assert_line 'path: collections/crowdsecurity/sshd.yaml' - assert_line 'installed: false' - refute_line --partial 'Current metrics:' - - # multiple items - rune -0 cscli collections inspect crowdsecurity/sshd crowdsecurity/smb --no-metrics - assert_output --partial 'crowdsecurity/sshd' - assert_output --partial 'crowdsecurity/smb' - rune -1 grep -c 'Current metrics:' <(output) - assert_output "0" - - # multiple items, with metrics - rune -0 cscli collections inspect crowdsecurity/sshd crowdsecurity/smb - rune -0 grep -c 'Current metrics:' <(output) - assert_output "2" - - # multiple items, json - rune -0 cscli collections inspect crowdsecurity/sshd crowdsecurity/smb -o json - rune -0 jq -sc '[.[] | [.type, .name, .author, .path, .installed]]' <(output) - assert_json '[["collections","crowdsecurity/sshd","crowdsecurity","collections/crowdsecurity/sshd.yaml",false],["collections","crowdsecurity/smb","crowdsecurity","collections/crowdsecurity/smb.yaml",false]]' - - # multiple items, raw - rune -0 cscli collections inspect crowdsecurity/sshd crowdsecurity/smb -o raw - assert_output --partial 'crowdsecurity/sshd' - assert_output --partial 'crowdsecurity/smb' - rune -1 grep -c 'Current metrics:' <(output) - assert_output "0" -} - -@test "cscli collections remove" { - rune -1 cscli collections remove - assert_stderr --partial "specify at least one collection to remove or '--all'" - rune -1 cscli collections remove blahblah/blahblah - assert_stderr --partial "can't find 'blahblah/blahblah' in collections" - - rune -0 cscli collections install crowdsecurity/sshd --download-only - rune -0 cscli collections remove crowdsecurity/sshd - assert_stderr --partial 'removing crowdsecurity/sshd: not installed -- no need to remove' - - rune -0 cscli collections install crowdsecurity/sshd - rune -0 cscli collections remove crowdsecurity/sshd - assert_stderr --partial 'Removed crowdsecurity/sshd' - - rune -0 cscli collections remove crowdsecurity/sshd --purge - assert_stderr --partial 'Removed source file [crowdsecurity/sshd]' - - rune -0 cscli collections remove crowdsecurity/sshd - assert_stderr --partial 'removing crowdsecurity/sshd: not installed -- no need to remove' - - rune -0 cscli collections remove crowdsecurity/sshd --purge --debug - assert_stderr --partial 'removing crowdsecurity/sshd: not downloaded -- no need to remove' - refute_stderr --partial 'Removed source file [crowdsecurity/sshd]' - - # install, then remove, check files - rune -0 cscli collections install crowdsecurity/sshd - assert_file_exists "$CONFIG_DIR/collections/sshd.yaml" - rune -0 cscli collections remove crowdsecurity/sshd - assert_file_not_exists "$CONFIG_DIR/collections/sshd.yaml" - - # delete is an alias for remove - rune -0 cscli collections install crowdsecurity/sshd - assert_file_exists "$CONFIG_DIR/collections/sshd.yaml" - rune -0 cscli collections delete crowdsecurity/sshd - assert_file_not_exists "$CONFIG_DIR/collections/sshd.yaml" - - # purge - assert_file_exists "$HUB_DIR/collections/crowdsecurity/sshd.yaml" - rune -0 cscli collections remove crowdsecurity/sshd --purge - assert_file_not_exists "$HUB_DIR/collections/crowdsecurity/sshd.yaml" - - rune -0 cscli collections install crowdsecurity/sshd crowdsecurity/smb - - # --all - rune -0 cscli collections list -o raw - rune -0 grep -vc 'name,status,version,description' <(output) - assert_output "2" - - rune -0 cscli collections remove --all - - rune -0 cscli collections list -o raw - rune -1 grep -vc 'name,status,version,description' <(output) - assert_output "0" -} - -@test "cscli collections remove --force" { - # remove a collections that belongs to a collection - rune -0 cscli collections install crowdsecurity/linux - rune -0 cscli collections remove crowdsecurity/sshd - assert_stderr --partial "crowdsecurity/sshd belongs to collections: [crowdsecurity/linux]" - assert_stderr --partial "Run 'sudo cscli collections remove crowdsecurity/sshd --force' if you want to force remove this collection" -} - -@test "cscli collections upgrade" { - rune -1 cscli collections upgrade - assert_stderr --partial "specify at least one collection to upgrade or '--all'" - rune -1 cscli collections upgrade blahblah/blahblah - assert_stderr --partial "can't find 'blahblah/blahblah' in collections" - rune -0 cscli collections remove crowdsecurity/exim --purge - rune -1 cscli collections upgrade crowdsecurity/exim - assert_stderr --partial "can't upgrade crowdsecurity/exim: not installed" - rune -0 cscli collections install crowdsecurity/exim --download-only - rune -1 cscli collections upgrade crowdsecurity/exim - assert_stderr --partial "can't upgrade crowdsecurity/exim: downloaded but not installed" - - # hash of the string "v0.0" - sha256_0_0="dfebecf42784a31aa3d009dbcec0c657154a034b45f49cf22a895373f6dbf63d" - - # add version 0.0 to all collections - new_hub=$(jq --arg DIGEST "$sha256_0_0" <"$INDEX_PATH" '.collections |= with_entries(.value.versions["0.0"] = {"digest": $DIGEST, "deprecated": false})') - echo "$new_hub" >"$INDEX_PATH" - - rune -0 cscli collections install crowdsecurity/sshd - - echo "v0.0" > "$CONFIG_DIR/collections/sshd.yaml" - rune -0 cscli collections inspect crowdsecurity/sshd -o json - rune -0 jq -e '.local_version=="0.0"' <(output) - - # upgrade - rune -0 cscli collections upgrade crowdsecurity/sshd - rune -0 cscli collections inspect crowdsecurity/sshd -o json - rune -0 jq -e '.local_version==.version' <(output) - - # taint - echo "dirty" >"$CONFIG_DIR/collections/sshd.yaml" - # XXX: should return error - rune -0 cscli collections upgrade crowdsecurity/sshd - assert_stderr --partial "crowdsecurity/sshd is tainted, --force to overwrite" - rune -0 cscli collections inspect crowdsecurity/sshd -o json - rune -0 jq -e '.local_version=="?"' <(output) - - # force upgrade with taint - rune -0 cscli collections upgrade crowdsecurity/sshd --force - rune -0 cscli collections inspect crowdsecurity/sshd -o json - rune -0 jq -e '.local_version==.version' <(output) - - # multiple items - rune -0 cscli collections install crowdsecurity/smb - echo "v0.0" >"$CONFIG_DIR/collections/sshd.yaml" - echo "v0.0" >"$CONFIG_DIR/collections/smb.yaml" - rune -0 cscli collections list -o json - rune -0 jq -e '[.collections[].local_version]==["0.0","0.0"]' <(output) - rune -0 cscli collections upgrade crowdsecurity/sshd crowdsecurity/smb - rune -0 cscli collections list -o json - rune -0 jq -e 'any(.collections[].local_version; .=="0.0") | not' <(output) - - # upgrade all - echo "v0.0" >"$CONFIG_DIR/collections/sshd.yaml" - echo "v0.0" >"$CONFIG_DIR/collections/smb.yaml" - rune -0 cscli collections list -o json - rune -0 jq -e '[.collections[].local_version]==["0.0","0.0"]' <(output) - rune -0 cscli collections upgrade --all - rune -0 cscli collections list -o json - rune -0 jq -e 'any(.collections[].local_version; .=="0.0") | not' <(output) -} diff --git a/test/bats/20_hub_collections_dep.bats b/test/bats/20_hub_collections_dep.bats index 673b812dc0d..94a984709a8 100644 --- a/test/bats/20_hub_collections_dep.bats +++ b/test/bats/20_hub_collections_dep.bats @@ -20,7 +20,6 @@ setup() { load "../lib/setup.sh" load "../lib/bats-file/load.bash" ./instance-data load - hub_strip_index } teardown() { @@ -84,18 +83,32 @@ teardown() { assert_stderr --partial "crowdsecurity/smb is tainted, use '--force' to remove" } +@test "cscli collections inspect (dependencies)" { + rune -0 cscli collections install crowdsecurity/smb + + # The inspect command must show the dependencies of the local or older version. + echo "{'collections': ['crowdsecurity/sshd']}" >"$CONFIG_DIR/collections/smb.yaml" + + rune -0 cscli collections inspect crowdsecurity/smb --no-metrics -o json + rune -0 jq -e '.collections' <(output) + assert_json '["crowdsecurity/sshd"]' +} + @test "cscli collections (dependencies II: the revenge)" { rune -0 cscli collections install crowdsecurity/wireguard baudneo/gotify rune -0 cscli collections remove crowdsecurity/wireguard - assert_stderr --partial "crowdsecurity/syslog-logs was not removed because it also belongs to baudneo/gotify" + assert_output --regexp 'disabling collections:crowdsecurity/wireguard' + refute_output --regexp 'disabling parsers:crowdsecurity/syslog-logs' rune -0 cscli collections inspect crowdsecurity/wireguard -o json rune -0 jq -e '.installed==false' <(output) + rune -0 cscli parsers inspect crowdsecurity/syslog-logs -o json + rune -0 jq -e '.installed==true' <(output) } @test "cscli collections (dependencies III: origins)" { # it is perfectly fine to remove an item belonging to a collection that we are removing anyway - # inject a dependency: sshd requires the syslog-logs parsers, but linux does too + # inject a direct dependency: sshd requires the syslog-logs parsers, but linux does too hub_dep=$(jq <"$INDEX_PATH" '. * {collections:{"crowdsecurity/sshd":{parsers:["crowdsecurity/syslog-logs"]}}}') echo "$hub_dep" >"$INDEX_PATH" @@ -108,11 +121,8 @@ teardown() { # removing linux should remove syslog-logs even though sshd depends on it rune -0 cscli collections remove crowdsecurity/linux - refute_stderr --partial "crowdsecurity/syslog-logs was not removed" - # we must also consider indirect dependencies - refute_stderr --partial "crowdsecurity/ssh-bf was not removed" - rune -0 cscli parsers list -o json - rune -0 jq -e '.parsers | length == 0' <(output) + rune -0 cscli hub list -o json + rune -0 jq -e 'add | length == 0' <(output) } @test "cscli collections (dependencies IV: looper)" { diff --git a/test/bats/20_hub_items.bats b/test/bats/20_hub_items.bats index d29a7d2c14c..2f1c952848b 100644 --- a/test/bats/20_hub_items.bats +++ b/test/bats/20_hub_items.bats @@ -22,7 +22,6 @@ setup() { load "../lib/setup.sh" load "../lib/bats-file/load.bash" ./instance-data load - hub_strip_index } teardown() { @@ -82,7 +81,7 @@ teardown() { rune -0 cscli collections install crowdsecurity/sshd rune -1 cscli collections inspect crowdsecurity/sshd --no-metrics # XXX: we are on the verbose side here... - assert_stderr --regexp "Error: failed to read Hub index: failed to sync hub items: failed to scan .*: while syncing collections sshd.yaml: 1.2.3.4: Invalid Semantic Version. Run 'sudo cscli hub update' to download the index again" + assert_stderr "Error: failed to read hub index: failed to sync hub items: failed to scan $CONFIG_DIR: while syncing collections sshd.yaml: 1.2.3.4: Invalid Semantic Version. Run 'sudo cscli hub update' to download the index again" } @test "removing or purging an item already removed by hand" { @@ -91,19 +90,21 @@ teardown() { rune -0 jq -r '.local_path' <(output) rune -0 rm "$(output)" - rune -0 cscli parsers remove crowdsecurity/syslog-logs --debug - assert_stderr --partial "removing crowdsecurity/syslog-logs: not installed -- no need to remove" + rune -0 cscli parsers remove crowdsecurity/syslog-logs + assert_output "Nothing to do." rune -0 cscli parsers inspect crowdsecurity/syslog-logs -o json rune -0 jq -r '.path' <(output) rune -0 rm "$HUB_DIR/$(output)" - rune -0 cscli parsers remove crowdsecurity/syslog-logs --purge --debug - assert_stderr --partial "removing crowdsecurity/syslog-logs: not downloaded -- no need to remove" + rune -0 cscli parsers remove crowdsecurity/syslog-logs --purge + assert_output "Nothing to do." - rune -0 cscli parsers remove crowdsecurity/linux --all --error --purge --force - rune -0 cscli collections remove crowdsecurity/linux --all --error --purge --force - refute_output + rune -0 cscli parsers remove --all --error --purge --force + assert_output "Nothing to do." + refute_stderr + rune -0 cscli collections remove --all --error --purge --force + assert_output "Nothing to do." refute_stderr } @@ -121,7 +122,7 @@ teardown() { # and not from hub update rune -0 cscli hub update - assert_stderr --partial "collection crowdsecurity/sshd is tainted" + assert_stderr --partial "collection crowdsecurity/sshd is tainted by local changes" refute_stderr --partial "collection foobar.yaml is tainted" } @@ -150,25 +151,42 @@ teardown() { @test "a local item cannot be downloaded by cscli" { rune -0 mkdir -p "$CONFIG_DIR/collections" rune -0 touch "$CONFIG_DIR/collections/foobar.yaml" - rune -1 cscli collections install foobar.yaml - assert_stderr --partial "foobar.yaml is local, can't download" - rune -1 cscli collections install foobar.yaml --force - assert_stderr --partial "foobar.yaml is local, can't download" + rune -0 cscli collections install foobar.yaml + assert_output --partial "Nothing to do." + rune -0 cscli collections install foobar.yaml --force + assert_output --partial "Nothing to do." + rune -0 cscli collections install --download-only foobar.yaml + assert_output --partial "Nothing to do." } @test "a local item cannot be removed by cscli" { - rune -0 mkdir -p "$CONFIG_DIR/collections" - rune -0 touch "$CONFIG_DIR/collections/foobar.yaml" - rune -0 cscli collections remove foobar.yaml - assert_stderr --partial "foobar.yaml is a local item, please delete manually" - rune -0 cscli collections remove foobar.yaml --purge - assert_stderr --partial "foobar.yaml is a local item, please delete manually" - rune -0 cscli collections remove foobar.yaml --force - assert_stderr --partial "foobar.yaml is a local item, please delete manually" - rune -0 cscli collections remove --all - assert_stderr --partial "foobar.yaml is a local item, please delete manually" - rune -0 cscli collections remove --all --purge - assert_stderr --partial "foobar.yaml is a local item, please delete manually" + rune -0 mkdir -p "$CONFIG_DIR/scenarios" + rune -0 touch "$CONFIG_DIR/scenarios/foobar.yaml" + rune -0 cscli scenarios remove foobar.yaml + assert_output - <<-EOT + WARN scenarios:foobar.yaml is a local item, please delete manually + Nothing to do. + EOT + rune -0 cscli scenarios remove foobar.yaml --purge + assert_output - <<-EOT + WARN scenarios:foobar.yaml is a local item, please delete manually + Nothing to do. + EOT + rune -0 cscli scenarios remove foobar.yaml --force + assert_output - <<-EOT + WARN scenarios:foobar.yaml is a local item, please delete manually + Nothing to do. + EOT + + rune -0 cscli scenarios install crowdsecurity/ssh-bf + + rune -0 cscli scenarios remove --all + assert_line "WARN scenarios:foobar.yaml is a local item, please delete manually" + assert_line "disabling scenarios:crowdsecurity/ssh-bf" + + rune -0 cscli scenarios remove --all --purge + assert_line "WARN scenarios:foobar.yaml is a local item, please delete manually" + assert_line "purging scenarios:crowdsecurity/ssh-bf" } @test "a dangling link is reported with a warning" { diff --git a/test/bats/20_hub_parsers.bats b/test/bats/20_hub_parsers.bats deleted file mode 100644 index 791b1a2177f..00000000000 --- a/test/bats/20_hub_parsers.bats +++ /dev/null @@ -1,383 +0,0 @@ -#!/usr/bin/env bats -# vim: ft=bats:list:ts=8:sts=4:sw=4:et:ai:si: - -set -u - -setup_file() { - load "../lib/setup_file.sh" - ./instance-data load - HUB_DIR=$(config_get '.config_paths.hub_dir') - export HUB_DIR - INDEX_PATH=$(config_get '.config_paths.index_path') - export INDEX_PATH - CONFIG_DIR=$(config_get '.config_paths.config_dir') - export CONFIG_DIR -} - -teardown_file() { - load "../lib/teardown_file.sh" -} - -setup() { - load "../lib/setup.sh" - load "../lib/bats-file/load.bash" - ./instance-data load - hub_strip_index -} - -teardown() { - ./instance-crowdsec stop -} - -#---------- - -@test "cscli parsers list" { - hub_purge_all - - # no items - rune -0 cscli parsers list - assert_output --partial "PARSERS" - rune -0 cscli parsers list -o json - assert_json '{parsers:[]}' - rune -0 cscli parsers list -o raw - assert_output 'name,status,version,description' - - # some items - rune -0 cscli parsers install crowdsecurity/whitelists crowdsecurity/windows-auth - - rune -0 cscli parsers list - assert_output --partial crowdsecurity/whitelists - assert_output --partial crowdsecurity/windows-auth - rune -0 grep -c enabled <(output) - assert_output "2" - - rune -0 cscli parsers list -o json - assert_output --partial crowdsecurity/whitelists - assert_output --partial crowdsecurity/windows-auth - rune -0 jq '.parsers | length' <(output) - assert_output "2" - - rune -0 cscli parsers list -o raw - assert_output --partial crowdsecurity/whitelists - assert_output --partial crowdsecurity/windows-auth - rune -0 grep -vc 'name,status,version,description' <(output) - assert_output "2" -} - -@test "cscli parsers list -a" { - expected=$(jq <"$INDEX_PATH" -r '.parsers | length') - - rune -0 cscli parsers list -a - rune -0 grep -c disabled <(output) - assert_output "$expected" - - rune -0 cscli parsers list -o json -a - rune -0 jq '.parsers | length' <(output) - assert_output "$expected" - - rune -0 cscli parsers list -o raw -a - rune -0 grep -vc 'name,status,version,description' <(output) - assert_output "$expected" - - # the list should be the same in all formats, and sorted (not case sensitive) - - list_raw=$(cscli parsers list -o raw -a | tail -n +2 | cut -d, -f1) - list_human=$(cscli parsers list -o human -a | tail -n +6 | head -n -1 | cut -d' ' -f2) - list_json=$(cscli parsers list -o json -a | jq -r '.parsers[].name') - - rune -0 sort -f <<<"$list_raw" - assert_output "$list_raw" - - assert_equal "$list_raw" "$list_json" - assert_equal "$list_raw" "$list_human" -} - -@test "cscli parsers list [parser]..." { - # non-existent - rune -1 cscli parsers install foo/bar - assert_stderr --partial "can't find 'foo/bar' in parsers" - - # not installed - rune -0 cscli parsers list crowdsecurity/whitelists - assert_output --regexp 'crowdsecurity/whitelists.*disabled' - - # install two items - rune -0 cscli parsers install crowdsecurity/whitelists crowdsecurity/windows-auth - - # list an installed item - rune -0 cscli parsers list crowdsecurity/whitelists - assert_output --regexp "crowdsecurity/whitelists.*enabled" - refute_output --partial "crowdsecurity/windows-auth" - - # list multiple installed and non installed items - rune -0 cscli parsers list crowdsecurity/whitelists crowdsecurity/windows-auth crowdsecurity/traefik-logs - assert_output --partial "crowdsecurity/whitelists" - assert_output --partial "crowdsecurity/windows-auth" - assert_output --partial "crowdsecurity/traefik-logs" - - rune -0 cscli parsers list crowdsecurity/whitelists -o json - rune -0 jq '.parsers | length' <(output) - assert_output "1" - rune -0 cscli parsers list crowdsecurity/whitelists crowdsecurity/windows-auth crowdsecurity/traefik-logs -o json - rune -0 jq '.parsers | length' <(output) - assert_output "3" - - rune -0 cscli parsers list crowdsecurity/whitelists -o raw - rune -0 grep -vc 'name,status,version,description' <(output) - assert_output "1" - rune -0 cscli parsers list crowdsecurity/whitelists crowdsecurity/windows-auth crowdsecurity/traefik-logs -o raw - rune -0 grep -vc 'name,status,version,description' <(output) - assert_output "3" -} - -@test "cscli parsers install" { - rune -1 cscli parsers install - assert_stderr --partial 'requires at least 1 arg(s), only received 0' - - # not in hub - rune -1 cscli parsers install crowdsecurity/blahblah - assert_stderr --partial "can't find 'crowdsecurity/blahblah' in parsers" - - # simple install - rune -0 cscli parsers install crowdsecurity/whitelists - rune -0 cscli parsers inspect crowdsecurity/whitelists --no-metrics - assert_output --partial 'crowdsecurity/whitelists' - assert_output --partial 'installed: true' - - # autocorrect - rune -1 cscli parsers install crowdsecurity/sshd-logz - assert_stderr --partial "can't find 'crowdsecurity/sshd-logz' in parsers, did you mean 'crowdsecurity/sshd-logs'?" - - # install multiple - rune -0 cscli parsers install crowdsecurity/pgsql-logs crowdsecurity/postfix-logs - rune -0 cscli parsers inspect crowdsecurity/pgsql-logs --no-metrics - assert_output --partial 'crowdsecurity/pgsql-logs' - assert_output --partial 'installed: true' - rune -0 cscli parsers inspect crowdsecurity/postfix-logs --no-metrics - assert_output --partial 'crowdsecurity/postfix-logs' - assert_output --partial 'installed: true' -} - -@test "cscli parsers install (file location and download-only)" { - rune -0 cscli parsers install crowdsecurity/whitelists --download-only - rune -0 cscli parsers inspect crowdsecurity/whitelists --no-metrics - assert_output --partial 'crowdsecurity/whitelists' - assert_output --partial 'installed: false' - assert_file_exists "$HUB_DIR/parsers/s02-enrich/crowdsecurity/whitelists.yaml" - assert_file_not_exists "$CONFIG_DIR/parsers/s02-enrich/whitelists.yaml" - - rune -0 cscli parsers install crowdsecurity/whitelists - rune -0 cscli parsers inspect crowdsecurity/whitelists --no-metrics - assert_output --partial 'installed: true' - assert_file_exists "$CONFIG_DIR/parsers/s02-enrich/whitelists.yaml" -} - -@test "cscli parsers install --force (tainted)" { - rune -0 cscli parsers install crowdsecurity/whitelists - echo "dirty" >"$CONFIG_DIR/parsers/s02-enrich/whitelists.yaml" - - rune -1 cscli parsers install crowdsecurity/whitelists - assert_stderr --partial "error while installing 'crowdsecurity/whitelists': while enabling crowdsecurity/whitelists: crowdsecurity/whitelists is tainted, won't overwrite unless --force" - - rune -0 cscli parsers install crowdsecurity/whitelists --force - assert_stderr --partial "Enabled crowdsecurity/whitelists" -} - -@test "cscli parsers install --ignore (skip on errors)" { - rune -1 cscli parsers install foo/bar crowdsecurity/whitelists - assert_stderr --partial "can't find 'foo/bar' in parsers" - refute_stderr --partial "Enabled parsers: crowdsecurity/whitelists" - - rune -0 cscli parsers install foo/bar crowdsecurity/whitelists --ignore - assert_stderr --partial "can't find 'foo/bar' in parsers" - assert_stderr --partial "Enabled parsers: crowdsecurity/whitelists" -} - -@test "cscli parsers inspect" { - rune -1 cscli parsers inspect - assert_stderr --partial 'requires at least 1 arg(s), only received 0' - # required for metrics - ./instance-crowdsec start - - rune -1 cscli parsers inspect blahblah/blahblah - assert_stderr --partial "can't find 'blahblah/blahblah' in parsers" - - # one item - rune -0 cscli parsers inspect crowdsecurity/sshd-logs --no-metrics - assert_line 'type: parsers' - assert_line 'stage: s01-parse' - assert_line 'name: crowdsecurity/sshd-logs' - assert_line 'author: crowdsecurity' - assert_line 'path: parsers/s01-parse/crowdsecurity/sshd-logs.yaml' - assert_line 'installed: false' - refute_line --partial 'Current metrics:' - - # one item, with metrics - rune -0 cscli parsers inspect crowdsecurity/sshd-logs - assert_line --partial 'Current metrics:' - - # one item, json - rune -0 cscli parsers inspect crowdsecurity/sshd-logs -o json - rune -0 jq -c '[.type, .stage, .name, .author, .path, .installed]' <(output) - assert_json '["parsers","s01-parse","crowdsecurity/sshd-logs","crowdsecurity","parsers/s01-parse/crowdsecurity/sshd-logs.yaml",false]' - - # one item, raw - rune -0 cscli parsers inspect crowdsecurity/sshd-logs -o raw - assert_line 'type: parsers' - assert_line 'name: crowdsecurity/sshd-logs' - assert_line 'stage: s01-parse' - assert_line 'author: crowdsecurity' - assert_line 'path: parsers/s01-parse/crowdsecurity/sshd-logs.yaml' - assert_line 'installed: false' - refute_line --partial 'Current metrics:' - - # multiple items - rune -0 cscli parsers inspect crowdsecurity/sshd-logs crowdsecurity/whitelists --no-metrics - assert_output --partial 'crowdsecurity/sshd-logs' - assert_output --partial 'crowdsecurity/whitelists' - rune -1 grep -c 'Current metrics:' <(output) - assert_output "0" - - # multiple items, with metrics - rune -0 cscli parsers inspect crowdsecurity/sshd-logs crowdsecurity/whitelists - rune -0 grep -c 'Current metrics:' <(output) - assert_output "2" - - # multiple items, json - rune -0 cscli parsers inspect crowdsecurity/sshd-logs crowdsecurity/whitelists -o json - rune -0 jq -sc '[.[] | [.type, .stage, .name, .author, .path, .installed]]' <(output) - assert_json '[["parsers","s01-parse","crowdsecurity/sshd-logs","crowdsecurity","parsers/s01-parse/crowdsecurity/sshd-logs.yaml",false],["parsers","s02-enrich","crowdsecurity/whitelists","crowdsecurity","parsers/s02-enrich/crowdsecurity/whitelists.yaml",false]]' - - # multiple items, raw - rune -0 cscli parsers inspect crowdsecurity/sshd-logs crowdsecurity/whitelists -o raw - assert_output --partial 'crowdsecurity/sshd-logs' - assert_output --partial 'crowdsecurity/whitelists' - rune -1 grep -c 'Current metrics:' <(output) - assert_output "0" -} - -@test "cscli parsers remove" { - rune -1 cscli parsers remove - assert_stderr --partial "specify at least one parser to remove or '--all'" - rune -1 cscli parsers remove blahblah/blahblah - assert_stderr --partial "can't find 'blahblah/blahblah' in parsers" - - rune -0 cscli parsers install crowdsecurity/whitelists --download-only - rune -0 cscli parsers remove crowdsecurity/whitelists - assert_stderr --partial "removing crowdsecurity/whitelists: not installed -- no need to remove" - - rune -0 cscli parsers install crowdsecurity/whitelists - rune -0 cscli parsers remove crowdsecurity/whitelists - assert_stderr --partial "Removed crowdsecurity/whitelists" - - rune -0 cscli parsers remove crowdsecurity/whitelists --purge - assert_stderr --partial 'Removed source file [crowdsecurity/whitelists]' - - rune -0 cscli parsers remove crowdsecurity/whitelists - assert_stderr --partial "removing crowdsecurity/whitelists: not installed -- no need to remove" - - rune -0 cscli parsers remove crowdsecurity/whitelists --purge --debug - assert_stderr --partial 'removing crowdsecurity/whitelists: not downloaded -- no need to remove' - refute_stderr --partial 'Removed source file [crowdsecurity/whitelists]' - - # install, then remove, check files - rune -0 cscli parsers install crowdsecurity/whitelists - assert_file_exists "$CONFIG_DIR/parsers/s02-enrich/whitelists.yaml" - rune -0 cscli parsers remove crowdsecurity/whitelists - assert_file_not_exists "$CONFIG_DIR/parsers/s02-enrich/whitelists.yaml" - - # delete is an alias for remove - rune -0 cscli parsers install crowdsecurity/whitelists - assert_file_exists "$CONFIG_DIR/parsers/s02-enrich/whitelists.yaml" - rune -0 cscli parsers delete crowdsecurity/whitelists - assert_file_not_exists "$CONFIG_DIR/parsers/s02-enrich/whitelists.yaml" - - # purge - assert_file_exists "$HUB_DIR/parsers/s02-enrich/crowdsecurity/whitelists.yaml" - rune -0 cscli parsers remove crowdsecurity/whitelists --purge - assert_file_not_exists "$HUB_DIR/parsers/s02-enrich/crowdsecurity/whitelists.yaml" - - rune -0 cscli parsers install crowdsecurity/whitelists crowdsecurity/windows-auth - - # --all - rune -0 cscli parsers list -o raw - rune -0 grep -vc 'name,status,version,description' <(output) - assert_output "2" - - rune -0 cscli parsers remove --all - - rune -0 cscli parsers list -o raw - rune -1 grep -vc 'name,status,version,description' <(output) - assert_output "0" -} - -@test "cscli parsers remove --force" { - # remove a parser that belongs to a collection - rune -0 cscli collections install crowdsecurity/sshd - rune -0 cscli parsers remove crowdsecurity/sshd-logs - assert_stderr --partial "crowdsecurity/sshd-logs belongs to collections: [crowdsecurity/sshd]" - assert_stderr --partial "Run 'sudo cscli parsers remove crowdsecurity/sshd-logs --force' if you want to force remove this parser" -} - -@test "cscli parsers upgrade" { - rune -1 cscli parsers upgrade - assert_stderr --partial "specify at least one parser to upgrade or '--all'" - rune -1 cscli parsers upgrade blahblah/blahblah - assert_stderr --partial "can't find 'blahblah/blahblah' in parsers" - rune -0 cscli parsers remove crowdsecurity/pam-logs --purge - rune -1 cscli parsers upgrade crowdsecurity/pam-logs - assert_stderr --partial "can't upgrade crowdsecurity/pam-logs: not installed" - rune -0 cscli parsers install crowdsecurity/pam-logs --download-only - rune -1 cscli parsers upgrade crowdsecurity/pam-logs - assert_stderr --partial "can't upgrade crowdsecurity/pam-logs: downloaded but not installed" - - # hash of the string "v0.0" - sha256_0_0="dfebecf42784a31aa3d009dbcec0c657154a034b45f49cf22a895373f6dbf63d" - - # add version 0.0 to all parsers - new_hub=$(jq --arg DIGEST "$sha256_0_0" <"$INDEX_PATH" '.parsers |= with_entries(.value.versions["0.0"] = {"digest": $DIGEST, "deprecated": false})') - echo "$new_hub" >"$INDEX_PATH" - - rune -0 cscli parsers install crowdsecurity/whitelists - - echo "v0.0" > "$CONFIG_DIR/parsers/s02-enrich/whitelists.yaml" - rune -0 cscli parsers inspect crowdsecurity/whitelists -o json - rune -0 jq -e '.local_version=="0.0"' <(output) - - # upgrade - rune -0 cscli parsers upgrade crowdsecurity/whitelists - rune -0 cscli parsers inspect crowdsecurity/whitelists -o json - rune -0 jq -e '.local_version==.version' <(output) - - # taint - echo "dirty" >"$CONFIG_DIR/parsers/s02-enrich/whitelists.yaml" - # XXX: should return error - rune -0 cscli parsers upgrade crowdsecurity/whitelists - assert_stderr --partial "crowdsecurity/whitelists is tainted, --force to overwrite" - rune -0 cscli parsers inspect crowdsecurity/whitelists -o json - rune -0 jq -e '.local_version=="?"' <(output) - - # force upgrade with taint - rune -0 cscli parsers upgrade crowdsecurity/whitelists --force - rune -0 cscli parsers inspect crowdsecurity/whitelists -o json - rune -0 jq -e '.local_version==.version' <(output) - - # multiple items - rune -0 cscli parsers install crowdsecurity/windows-auth - echo "v0.0" >"$CONFIG_DIR/parsers/s02-enrich/whitelists.yaml" - echo "v0.0" >"$CONFIG_DIR/parsers/s01-parse/windows-auth.yaml" - rune -0 cscli parsers list -o json - rune -0 jq -e '[.parsers[].local_version]==["0.0","0.0"]' <(output) - rune -0 cscli parsers upgrade crowdsecurity/whitelists crowdsecurity/windows-auth - rune -0 cscli parsers list -o json - rune -0 jq -e 'any(.parsers[].local_version; .=="0.0") | not' <(output) - - # upgrade all - echo "v0.0" >"$CONFIG_DIR/parsers/s02-enrich/whitelists.yaml" - echo "v0.0" >"$CONFIG_DIR/parsers/s01-parse/windows-auth.yaml" - rune -0 cscli parsers list -o json - rune -0 jq -e '[.parsers[].local_version]==["0.0","0.0"]' <(output) - rune -0 cscli parsers upgrade --all - rune -0 cscli parsers list -o json - rune -0 jq -e 'any(.parsers[].local_version; .=="0.0") | not' <(output) -} diff --git a/test/bats/20_hub_postoverflows.bats b/test/bats/20_hub_postoverflows.bats deleted file mode 100644 index 37337b08caa..00000000000 --- a/test/bats/20_hub_postoverflows.bats +++ /dev/null @@ -1,383 +0,0 @@ -#!/usr/bin/env bats -# vim: ft=bats:list:ts=8:sts=4:sw=4:et:ai:si: - -set -u - -setup_file() { - load "../lib/setup_file.sh" - ./instance-data load - HUB_DIR=$(config_get '.config_paths.hub_dir') - export HUB_DIR - INDEX_PATH=$(config_get '.config_paths.index_path') - export INDEX_PATH - CONFIG_DIR=$(config_get '.config_paths.config_dir') - export CONFIG_DIR -} - -teardown_file() { - load "../lib/teardown_file.sh" -} - -setup() { - load "../lib/setup.sh" - load "../lib/bats-file/load.bash" - ./instance-data load - hub_strip_index -} - -teardown() { - ./instance-crowdsec stop -} - -#---------- - -@test "cscli postoverflows list" { - hub_purge_all - - # no items - rune -0 cscli postoverflows list - assert_output --partial "POSTOVERFLOWS" - rune -0 cscli postoverflows list -o json - assert_json '{postoverflows:[]}' - rune -0 cscli postoverflows list -o raw - assert_output 'name,status,version,description' - - # some items - rune -0 cscli postoverflows install crowdsecurity/rdns crowdsecurity/cdn-whitelist - - rune -0 cscli postoverflows list - assert_output --partial crowdsecurity/rdns - assert_output --partial crowdsecurity/cdn-whitelist - rune -0 grep -c enabled <(output) - assert_output "2" - - rune -0 cscli postoverflows list -o json - assert_output --partial crowdsecurity/rdns - assert_output --partial crowdsecurity/cdn-whitelist - rune -0 jq '.postoverflows | length' <(output) - assert_output "2" - - rune -0 cscli postoverflows list -o raw - assert_output --partial crowdsecurity/rdns - assert_output --partial crowdsecurity/cdn-whitelist - rune -0 grep -vc 'name,status,version,description' <(output) - assert_output "2" -} - -@test "cscli postoverflows list -a" { - expected=$(jq <"$INDEX_PATH" -r '.postoverflows | length') - - rune -0 cscli postoverflows list -a - rune -0 grep -c disabled <(output) - assert_output "$expected" - - rune -0 cscli postoverflows list -o json -a - rune -0 jq '.postoverflows | length' <(output) - assert_output "$expected" - - rune -0 cscli postoverflows list -o raw -a - rune -0 grep -vc 'name,status,version,description' <(output) - assert_output "$expected" - - # the list should be the same in all formats, and sorted (not case sensitive) - - list_raw=$(cscli postoverflows list -o raw -a | tail -n +2 | cut -d, -f1) - list_human=$(cscli postoverflows list -o human -a | tail -n +6 | head -n -1 | cut -d' ' -f2) - list_json=$(cscli postoverflows list -o json -a | jq -r '.postoverflows[].name') - - rune -0 sort -f <<<"$list_raw" - assert_output "$list_raw" - - assert_equal "$list_raw" "$list_json" - assert_equal "$list_raw" "$list_human" -} - -@test "cscli postoverflows list [postoverflow]..." { - # non-existent - rune -1 cscli postoverflows install foo/bar - assert_stderr --partial "can't find 'foo/bar' in postoverflows" - - # not installed - rune -0 cscli postoverflows list crowdsecurity/rdns - assert_output --regexp 'crowdsecurity/rdns.*disabled' - - # install two items - rune -0 cscli postoverflows install crowdsecurity/rdns crowdsecurity/cdn-whitelist - - # list an installed item - rune -0 cscli postoverflows list crowdsecurity/rdns - assert_output --regexp "crowdsecurity/rdns.*enabled" - refute_output --partial "crowdsecurity/cdn-whitelist" - - # list multiple installed and non installed items - rune -0 cscli postoverflows list crowdsecurity/rdns crowdsecurity/cdn-whitelist crowdsecurity/ipv6_to_range - assert_output --partial "crowdsecurity/rdns" - assert_output --partial "crowdsecurity/cdn-whitelist" - assert_output --partial "crowdsecurity/ipv6_to_range" - - rune -0 cscli postoverflows list crowdsecurity/rdns -o json - rune -0 jq '.postoverflows | length' <(output) - assert_output "1" - rune -0 cscli postoverflows list crowdsecurity/rdns crowdsecurity/cdn-whitelist crowdsecurity/ipv6_to_range -o json - rune -0 jq '.postoverflows | length' <(output) - assert_output "3" - - rune -0 cscli postoverflows list crowdsecurity/rdns -o raw - rune -0 grep -vc 'name,status,version,description' <(output) - assert_output "1" - rune -0 cscli postoverflows list crowdsecurity/rdns crowdsecurity/cdn-whitelist crowdsecurity/ipv6_to_range -o raw - rune -0 grep -vc 'name,status,version,description' <(output) - assert_output "3" -} - -@test "cscli postoverflows install" { - rune -1 cscli postoverflows install - assert_stderr --partial 'requires at least 1 arg(s), only received 0' - - # not in hub - rune -1 cscli postoverflows install crowdsecurity/blahblah - assert_stderr --partial "can't find 'crowdsecurity/blahblah' in postoverflows" - - # simple install - rune -0 cscli postoverflows install crowdsecurity/rdns - rune -0 cscli postoverflows inspect crowdsecurity/rdns --no-metrics - assert_output --partial 'crowdsecurity/rdns' - assert_output --partial 'installed: true' - - # autocorrect - rune -1 cscli postoverflows install crowdsecurity/rdnf - assert_stderr --partial "can't find 'crowdsecurity/rdnf' in postoverflows, did you mean 'crowdsecurity/rdns'?" - - # install multiple - rune -0 cscli postoverflows install crowdsecurity/rdns crowdsecurity/cdn-whitelist - rune -0 cscli postoverflows inspect crowdsecurity/rdns --no-metrics - assert_output --partial 'crowdsecurity/rdns' - assert_output --partial 'installed: true' - rune -0 cscli postoverflows inspect crowdsecurity/cdn-whitelist --no-metrics - assert_output --partial 'crowdsecurity/cdn-whitelist' - assert_output --partial 'installed: true' -} - -@test "cscli postoverflows install (file location and download-only)" { - rune -0 cscli postoverflows install crowdsecurity/rdns --download-only - rune -0 cscli postoverflows inspect crowdsecurity/rdns --no-metrics - assert_output --partial 'crowdsecurity/rdns' - assert_output --partial 'installed: false' - assert_file_exists "$HUB_DIR/postoverflows/s00-enrich/crowdsecurity/rdns.yaml" - assert_file_not_exists "$CONFIG_DIR/postoverflows/s00-enrich/rdns.yaml" - - rune -0 cscli postoverflows install crowdsecurity/rdns - rune -0 cscli postoverflows inspect crowdsecurity/rdns --no-metrics - assert_output --partial 'installed: true' - assert_file_exists "$CONFIG_DIR/postoverflows/s00-enrich/rdns.yaml" -} - -@test "cscli postoverflows install --force (tainted)" { - rune -0 cscli postoverflows install crowdsecurity/rdns - echo "dirty" >"$CONFIG_DIR/postoverflows/s00-enrich/rdns.yaml" - - rune -1 cscli postoverflows install crowdsecurity/rdns - assert_stderr --partial "error while installing 'crowdsecurity/rdns': while enabling crowdsecurity/rdns: crowdsecurity/rdns is tainted, won't overwrite unless --force" - - rune -0 cscli postoverflows install crowdsecurity/rdns --force - assert_stderr --partial "Enabled crowdsecurity/rdns" -} - -@test "cscli postoverflow install --ignore (skip on errors)" { - rune -1 cscli postoverflows install foo/bar crowdsecurity/rdns - assert_stderr --partial "can't find 'foo/bar' in postoverflows" - refute_stderr --partial "Enabled postoverflows: crowdsecurity/rdns" - - rune -0 cscli postoverflows install foo/bar crowdsecurity/rdns --ignore - assert_stderr --partial "can't find 'foo/bar' in postoverflows" - assert_stderr --partial "Enabled postoverflows: crowdsecurity/rdns" -} - -@test "cscli postoverflows inspect" { - rune -1 cscli postoverflows inspect - assert_stderr --partial 'requires at least 1 arg(s), only received 0' - # required for metrics - ./instance-crowdsec start - - rune -1 cscli postoverflows inspect blahblah/blahblah - assert_stderr --partial "can't find 'blahblah/blahblah' in postoverflows" - - # one item - rune -0 cscli postoverflows inspect crowdsecurity/rdns --no-metrics - assert_line 'type: postoverflows' - assert_line 'stage: s00-enrich' - assert_line 'name: crowdsecurity/rdns' - assert_line 'author: crowdsecurity' - assert_line 'path: postoverflows/s00-enrich/crowdsecurity/rdns.yaml' - assert_line 'installed: false' - refute_line --partial 'Current metrics:' - - # one item, with metrics - rune -0 cscli postoverflows inspect crowdsecurity/rdns - assert_line --partial 'Current metrics:' - - # one item, json - rune -0 cscli postoverflows inspect crowdsecurity/rdns -o json - rune -0 jq -c '[.type, .stage, .name, .author, .path, .installed]' <(output) - assert_json '["postoverflows","s00-enrich","crowdsecurity/rdns","crowdsecurity","postoverflows/s00-enrich/crowdsecurity/rdns.yaml",false]' - - # one item, raw - rune -0 cscli postoverflows inspect crowdsecurity/rdns -o raw - assert_line 'type: postoverflows' - assert_line 'name: crowdsecurity/rdns' - assert_line 'stage: s00-enrich' - assert_line 'author: crowdsecurity' - assert_line 'path: postoverflows/s00-enrich/crowdsecurity/rdns.yaml' - assert_line 'installed: false' - refute_line --partial 'Current metrics:' - - # multiple items - rune -0 cscli postoverflows inspect crowdsecurity/rdns crowdsecurity/cdn-whitelist --no-metrics - assert_output --partial 'crowdsecurity/rdns' - assert_output --partial 'crowdsecurity/cdn-whitelist' - rune -1 grep -c 'Current metrics:' <(output) - assert_output "0" - - # multiple items, with metrics - rune -0 cscli postoverflows inspect crowdsecurity/rdns crowdsecurity/cdn-whitelist - rune -0 grep -c 'Current metrics:' <(output) - assert_output "2" - - # multiple items, json - rune -0 cscli postoverflows inspect crowdsecurity/rdns crowdsecurity/cdn-whitelist -o json - rune -0 jq -sc '[.[] | [.type, .stage, .name, .author, .path, .installed]]' <(output) - assert_json '[["postoverflows","s00-enrich","crowdsecurity/rdns","crowdsecurity","postoverflows/s00-enrich/crowdsecurity/rdns.yaml",false],["postoverflows","s01-whitelist","crowdsecurity/cdn-whitelist","crowdsecurity","postoverflows/s01-whitelist/crowdsecurity/cdn-whitelist.yaml",false]]' - - # multiple items, raw - rune -0 cscli postoverflows inspect crowdsecurity/rdns crowdsecurity/cdn-whitelist -o raw - assert_output --partial 'crowdsecurity/rdns' - assert_output --partial 'crowdsecurity/cdn-whitelist' - run -1 grep -c 'Current metrics:' <(output) - assert_output "0" -} - -@test "cscli postoverflows remove" { - rune -1 cscli postoverflows remove - assert_stderr --partial "specify at least one postoverflow to remove or '--all'" - rune -1 cscli postoverflows remove blahblah/blahblah - assert_stderr --partial "can't find 'blahblah/blahblah' in postoverflows" - - rune -0 cscli postoverflows install crowdsecurity/rdns --download-only - rune -0 cscli postoverflows remove crowdsecurity/rdns - assert_stderr --partial "removing crowdsecurity/rdns: not installed -- no need to remove" - - rune -0 cscli postoverflows install crowdsecurity/rdns - rune -0 cscli postoverflows remove crowdsecurity/rdns - assert_stderr --partial 'Removed crowdsecurity/rdns' - - rune -0 cscli postoverflows remove crowdsecurity/rdns --purge - assert_stderr --partial 'Removed source file [crowdsecurity/rdns]' - - rune -0 cscli postoverflows remove crowdsecurity/rdns - assert_stderr --partial 'removing crowdsecurity/rdns: not installed -- no need to remove' - - rune -0 cscli postoverflows remove crowdsecurity/rdns --purge --debug - assert_stderr --partial 'removing crowdsecurity/rdns: not downloaded -- no need to remove' - refute_stderr --partial 'Removed source file [crowdsecurity/rdns]' - - # install, then remove, check files - rune -0 cscli postoverflows install crowdsecurity/rdns - assert_file_exists "$CONFIG_DIR/postoverflows/s00-enrich/rdns.yaml" - rune -0 cscli postoverflows remove crowdsecurity/rdns - assert_file_not_exists "$CONFIG_DIR/postoverflows/s00-enrich/rdns.yaml" - - # delete is an alias for remove - rune -0 cscli postoverflows install crowdsecurity/rdns - assert_file_exists "$CONFIG_DIR/postoverflows/s00-enrich/rdns.yaml" - rune -0 cscli postoverflows delete crowdsecurity/rdns - assert_file_not_exists "$CONFIG_DIR/postoverflows/s00-enrich/rdns.yaml" - - # purge - assert_file_exists "$HUB_DIR/postoverflows/s00-enrich/crowdsecurity/rdns.yaml" - rune -0 cscli postoverflows remove crowdsecurity/rdns --purge - assert_file_not_exists "$HUB_DIR/postoverflows/s00-enrich/crowdsecurity/rdns.yaml" - - rune -0 cscli postoverflows install crowdsecurity/rdns crowdsecurity/cdn-whitelist - - # --all - rune -0 cscli postoverflows list -o raw - rune -0 grep -vc 'name,status,version,description' <(output) - assert_output "2" - - rune -0 cscli postoverflows remove --all - - rune -0 cscli postoverflows list -o raw - rune -1 grep -vc 'name,status,version,description' <(output) - assert_output "0" -} - -@test "cscli postoverflows remove --force" { - # remove a postoverflow that belongs to a collection - rune -0 cscli collections install crowdsecurity/auditd - rune -0 cscli postoverflows remove crowdsecurity/auditd-whitelisted-process - assert_stderr --partial "crowdsecurity/auditd-whitelisted-process belongs to collections: [crowdsecurity/auditd]" - assert_stderr --partial "Run 'sudo cscli postoverflows remove crowdsecurity/auditd-whitelisted-process --force' if you want to force remove this postoverflow" -} - -@test "cscli postoverflows upgrade" { - rune -1 cscli postoverflows upgrade - assert_stderr --partial "specify at least one postoverflow to upgrade or '--all'" - rune -1 cscli postoverflows upgrade blahblah/blahblah - assert_stderr --partial "can't find 'blahblah/blahblah' in postoverflows" - rune -0 cscli postoverflows remove crowdsecurity/discord-crawler-whitelist --purge - rune -1 cscli postoverflows upgrade crowdsecurity/discord-crawler-whitelist - assert_stderr --partial "can't upgrade crowdsecurity/discord-crawler-whitelist: not installed" - rune -0 cscli postoverflows install crowdsecurity/discord-crawler-whitelist --download-only - rune -1 cscli postoverflows upgrade crowdsecurity/discord-crawler-whitelist - assert_stderr --partial "can't upgrade crowdsecurity/discord-crawler-whitelist: downloaded but not installed" - - # hash of the string "v0.0" - sha256_0_0="dfebecf42784a31aa3d009dbcec0c657154a034b45f49cf22a895373f6dbf63d" - - # add version 0.0 to all postoverflows - new_hub=$(jq --arg DIGEST "$sha256_0_0" <"$INDEX_PATH" '.postoverflows |= with_entries(.value.versions["0.0"] = {"digest": $DIGEST, "deprecated": false})') - echo "$new_hub" >"$INDEX_PATH" - - rune -0 cscli postoverflows install crowdsecurity/rdns - - echo "v0.0" > "$CONFIG_DIR/postoverflows/s00-enrich/rdns.yaml" - rune -0 cscli postoverflows inspect crowdsecurity/rdns -o json - rune -0 jq -e '.local_version=="0.0"' <(output) - - # upgrade - rune -0 cscli postoverflows upgrade crowdsecurity/rdns - rune -0 cscli postoverflows inspect crowdsecurity/rdns -o json - rune -0 jq -e '.local_version==.version' <(output) - - # taint - echo "dirty" >"$CONFIG_DIR/postoverflows/s00-enrich/rdns.yaml" - # XXX: should return error - rune -0 cscli postoverflows upgrade crowdsecurity/rdns - assert_stderr --partial "crowdsecurity/rdns is tainted, --force to overwrite" - rune -0 cscli postoverflows inspect crowdsecurity/rdns -o json - rune -0 jq -e '.local_version=="?"' <(output) - - # force upgrade with taint - rune -0 cscli postoverflows upgrade crowdsecurity/rdns --force - rune -0 cscli postoverflows inspect crowdsecurity/rdns -o json - rune -0 jq -e '.local_version==.version' <(output) - - # multiple items - rune -0 cscli postoverflows install crowdsecurity/cdn-whitelist - echo "v0.0" >"$CONFIG_DIR/postoverflows/s00-enrich/rdns.yaml" - echo "v0.0" >"$CONFIG_DIR/postoverflows/s01-whitelist/cdn-whitelist.yaml" - rune -0 cscli postoverflows list -o json - rune -0 jq -e '[.postoverflows[].local_version]==["0.0","0.0"]' <(output) - rune -0 cscli postoverflows upgrade crowdsecurity/rdns crowdsecurity/cdn-whitelist - rune -0 cscli postoverflows list -o json - rune -0 jq -e 'any(.postoverflows[].local_version; .=="0.0") | not' <(output) - - # upgrade all - echo "v0.0" >"$CONFIG_DIR/postoverflows/s00-enrich/rdns.yaml" - echo "v0.0" >"$CONFIG_DIR/postoverflows/s01-whitelist/cdn-whitelist.yaml" - rune -0 cscli postoverflows list -o json - rune -0 jq -e '[.postoverflows[].local_version]==["0.0","0.0"]' <(output) - rune -0 cscli postoverflows upgrade --all - rune -0 cscli postoverflows list -o json - rune -0 jq -e 'any(.postoverflows[].local_version; .=="0.0") | not' <(output) -} diff --git a/test/bats/20_hub_scenarios.bats b/test/bats/20_hub_scenarios.bats deleted file mode 100644 index b5f3a642233..00000000000 --- a/test/bats/20_hub_scenarios.bats +++ /dev/null @@ -1,383 +0,0 @@ -#!/usr/bin/env bats -# vim: ft=bats:list:ts=8:sts=4:sw=4:et:ai:si: - -set -u - -setup_file() { - load "../lib/setup_file.sh" - ./instance-data load - HUB_DIR=$(config_get '.config_paths.hub_dir') - export HUB_DIR - INDEX_PATH=$(config_get '.config_paths.index_path') - export INDEX_PATH - CONFIG_DIR=$(config_get '.config_paths.config_dir') - export CONFIG_DIR -} - -teardown_file() { - load "../lib/teardown_file.sh" -} - -setup() { - load "../lib/setup.sh" - load "../lib/bats-file/load.bash" - ./instance-data load - hub_strip_index -} - -teardown() { - ./instance-crowdsec stop -} - -#---------- - -@test "cscli scenarios list" { - hub_purge_all - - # no items - rune -0 cscli scenarios list - assert_output --partial "SCENARIOS" - rune -0 cscli scenarios list -o json - assert_json '{scenarios:[]}' - rune -0 cscli scenarios list -o raw - assert_output 'name,status,version,description' - - # some items - rune -0 cscli scenarios install crowdsecurity/ssh-bf crowdsecurity/telnet-bf - - rune -0 cscli scenarios list - assert_output --partial crowdsecurity/ssh-bf - assert_output --partial crowdsecurity/telnet-bf - rune -0 grep -c enabled <(output) - assert_output "2" - - rune -0 cscli scenarios list -o json - assert_output --partial crowdsecurity/ssh-bf - assert_output --partial crowdsecurity/telnet-bf - rune -0 jq '.scenarios | length' <(output) - assert_output "2" - - rune -0 cscli scenarios list -o raw - assert_output --partial crowdsecurity/ssh-bf - assert_output --partial crowdsecurity/telnet-bf - rune -0 grep -vc 'name,status,version,description' <(output) - assert_output "2" -} - -@test "cscli scenarios list -a" { - expected=$(jq <"$INDEX_PATH" -r '.scenarios | length') - - rune -0 cscli scenarios list -a - rune -0 grep -c disabled <(output) - assert_output "$expected" - - rune -0 cscli scenarios list -o json -a - rune -0 jq '.scenarios | length' <(output) - assert_output "$expected" - - rune -0 cscli scenarios list -o raw -a - rune -0 grep -vc 'name,status,version,description' <(output) - assert_output "$expected" - - # the list should be the same in all formats, and sorted (not case sensitive) - - list_raw=$(cscli scenarios list -o raw -a | tail -n +2 | cut -d, -f1) - list_human=$(cscli scenarios list -o human -a | tail -n +6 | head -n -1 | cut -d' ' -f2) - list_json=$(cscli scenarios list -o json -a | jq -r '.scenarios[].name') - - # use python to sort because it handles "_" like go - rune -0 python3 -c 'import sys; print("".join(sorted(sys.stdin.readlines(), key=str.casefold)), end="")' <<<"$list_raw" - assert_output "$list_raw" - - assert_equal "$list_raw" "$list_json" - assert_equal "$list_raw" "$list_human" -} - -@test "cscli scenarios list [scenario]..." { - # non-existent - rune -1 cscli scenario install foo/bar - assert_stderr --partial "can't find 'foo/bar' in scenarios" - - # not installed - rune -0 cscli scenarios list crowdsecurity/ssh-bf - assert_output --regexp 'crowdsecurity/ssh-bf.*disabled' - - # install two items - rune -0 cscli scenarios install crowdsecurity/ssh-bf crowdsecurity/telnet-bf - - # list an installed item - rune -0 cscli scenarios list crowdsecurity/ssh-bf - assert_output --regexp "crowdsecurity/ssh-bf.*enabled" - refute_output --partial "crowdsecurity/telnet-bf" - - # list multiple installed and non installed items - rune -0 cscli scenarios list crowdsecurity/ssh-bf crowdsecurity/telnet-bf crowdsecurity/aws-bf crowdsecurity/aws-bf - assert_output --partial "crowdsecurity/ssh-bf" - assert_output --partial "crowdsecurity/telnet-bf" - assert_output --partial "crowdsecurity/aws-bf" - - rune -0 cscli scenarios list crowdsecurity/ssh-bf -o json - rune -0 jq '.scenarios | length' <(output) - assert_output "1" - rune -0 cscli scenarios list crowdsecurity/ssh-bf crowdsecurity/telnet-bf crowdsecurity/aws-bf -o json - rune -0 jq '.scenarios | length' <(output) - assert_output "3" - - rune -0 cscli scenarios list crowdsecurity/ssh-bf -o raw - rune -0 grep -vc 'name,status,version,description' <(output) - assert_output "1" - rune -0 cscli scenarios list crowdsecurity/ssh-bf crowdsecurity/telnet-bf crowdsecurity/aws-bf -o raw - rune -0 grep -vc 'name,status,version,description' <(output) - assert_output "3" -} - -@test "cscli scenarios install" { - rune -1 cscli scenarios install - assert_stderr --partial 'requires at least 1 arg(s), only received 0' - - # not in hub - rune -1 cscli scenarios install crowdsecurity/blahblah - assert_stderr --partial "can't find 'crowdsecurity/blahblah' in scenarios" - - # simple install - rune -0 cscli scenarios install crowdsecurity/ssh-bf - rune -0 cscli scenarios inspect crowdsecurity/ssh-bf --no-metrics - assert_output --partial 'crowdsecurity/ssh-bf' - assert_output --partial 'installed: true' - - # autocorrect - rune -1 cscli scenarios install crowdsecurity/ssh-tf - assert_stderr --partial "can't find 'crowdsecurity/ssh-tf' in scenarios, did you mean 'crowdsecurity/ssh-bf'?" - - # install multiple - rune -0 cscli scenarios install crowdsecurity/ssh-bf crowdsecurity/telnet-bf - rune -0 cscli scenarios inspect crowdsecurity/ssh-bf --no-metrics - assert_output --partial 'crowdsecurity/ssh-bf' - assert_output --partial 'installed: true' - rune -0 cscli scenarios inspect crowdsecurity/telnet-bf --no-metrics - assert_output --partial 'crowdsecurity/telnet-bf' - assert_output --partial 'installed: true' -} - -@test "cscli scenarios install (file location and download-only)" { - # simple install - rune -0 cscli scenarios install crowdsecurity/ssh-bf --download-only - rune -0 cscli scenarios inspect crowdsecurity/ssh-bf --no-metrics - assert_output --partial 'crowdsecurity/ssh-bf' - assert_output --partial 'installed: false' - assert_file_exists "$HUB_DIR/scenarios/crowdsecurity/ssh-bf.yaml" - assert_file_not_exists "$CONFIG_DIR/scenarios/ssh-bf.yaml" - - rune -0 cscli scenarios install crowdsecurity/ssh-bf - rune -0 cscli scenarios inspect crowdsecurity/ssh-bf --no-metrics - assert_output --partial 'installed: true' - assert_file_exists "$CONFIG_DIR/scenarios/ssh-bf.yaml" -} - -@test "cscli scenarios install --force (tainted)" { - rune -0 cscli scenarios install crowdsecurity/ssh-bf - echo "dirty" >"$CONFIG_DIR/scenarios/ssh-bf.yaml" - - rune -1 cscli scenarios install crowdsecurity/ssh-bf - assert_stderr --partial "error while installing 'crowdsecurity/ssh-bf': while enabling crowdsecurity/ssh-bf: crowdsecurity/ssh-bf is tainted, won't overwrite unless --force" - - rune -0 cscli scenarios install crowdsecurity/ssh-bf --force - assert_stderr --partial "Enabled crowdsecurity/ssh-bf" -} - -@test "cscli scenarios install --ignore (skip on errors)" { - rune -1 cscli scenarios install foo/bar crowdsecurity/ssh-bf - assert_stderr --partial "can't find 'foo/bar' in scenarios" - refute_stderr --partial "Enabled scenarios: crowdsecurity/ssh-bf" - - rune -0 cscli scenarios install foo/bar crowdsecurity/ssh-bf --ignore - assert_stderr --partial "can't find 'foo/bar' in scenarios" - assert_stderr --partial "Enabled scenarios: crowdsecurity/ssh-bf" -} - -@test "cscli scenarios inspect" { - rune -1 cscli scenarios inspect - assert_stderr --partial 'requires at least 1 arg(s), only received 0' - # required for metrics - ./instance-crowdsec start - - rune -1 cscli scenarios inspect blahblah/blahblah - assert_stderr --partial "can't find 'blahblah/blahblah' in scenarios" - - # one item - rune -0 cscli scenarios inspect crowdsecurity/ssh-bf --no-metrics - assert_line 'type: scenarios' - assert_line 'name: crowdsecurity/ssh-bf' - assert_line 'author: crowdsecurity' - assert_line 'path: scenarios/crowdsecurity/ssh-bf.yaml' - assert_line 'installed: false' - refute_line --partial 'Current metrics:' - - # one item, with metrics - rune -0 cscli scenarios inspect crowdsecurity/ssh-bf - assert_line --partial 'Current metrics:' - - # one item, json - rune -0 cscli scenarios inspect crowdsecurity/ssh-bf -o json - rune -0 jq -c '[.type, .name, .author, .path, .installed]' <(output) - assert_json '["scenarios","crowdsecurity/ssh-bf","crowdsecurity","scenarios/crowdsecurity/ssh-bf.yaml",false]' - - # one item, raw - rune -0 cscli scenarios inspect crowdsecurity/ssh-bf -o raw - assert_line 'type: scenarios' - assert_line 'name: crowdsecurity/ssh-bf' - assert_line 'author: crowdsecurity' - assert_line 'path: scenarios/crowdsecurity/ssh-bf.yaml' - assert_line 'installed: false' - refute_line --partial 'Current metrics:' - - # multiple items - rune -0 cscli scenarios inspect crowdsecurity/ssh-bf crowdsecurity/telnet-bf --no-metrics - assert_output --partial 'crowdsecurity/ssh-bf' - assert_output --partial 'crowdsecurity/telnet-bf' - rune -1 grep -c 'Current metrics:' <(output) - assert_output "0" - - # multiple items, with metrics - rune -0 cscli scenarios inspect crowdsecurity/ssh-bf crowdsecurity/telnet-bf - rune -0 grep -c 'Current metrics:' <(output) - assert_output "2" - - # multiple items, json - rune -0 cscli scenarios inspect crowdsecurity/ssh-bf crowdsecurity/telnet-bf -o json - rune -0 jq -sc '[.[] | [.type, .name, .author, .path, .installed]]' <(output) - assert_json '[["scenarios","crowdsecurity/ssh-bf","crowdsecurity","scenarios/crowdsecurity/ssh-bf.yaml",false],["scenarios","crowdsecurity/telnet-bf","crowdsecurity","scenarios/crowdsecurity/telnet-bf.yaml",false]]' - - # multiple items, raw - rune -0 cscli scenarios inspect crowdsecurity/ssh-bf crowdsecurity/telnet-bf -o raw - assert_output --partial 'crowdsecurity/ssh-bf' - assert_output --partial 'crowdsecurity/telnet-bf' - run -1 grep -c 'Current metrics:' <(output) - assert_output "0" -} - -@test "cscli scenarios remove" { - rune -1 cscli scenarios remove - assert_stderr --partial "specify at least one scenario to remove or '--all'" - rune -1 cscli scenarios remove blahblah/blahblah - assert_stderr --partial "can't find 'blahblah/blahblah' in scenarios" - - rune -0 cscli scenarios install crowdsecurity/ssh-bf --download-only - rune -0 cscli scenarios remove crowdsecurity/ssh-bf - assert_stderr --partial "removing crowdsecurity/ssh-bf: not installed -- no need to remove" - - rune -0 cscli scenarios install crowdsecurity/ssh-bf - rune -0 cscli scenarios remove crowdsecurity/ssh-bf - assert_stderr --partial "Removed crowdsecurity/ssh-bf" - - rune -0 cscli scenarios remove crowdsecurity/ssh-bf --purge - assert_stderr --partial 'Removed source file [crowdsecurity/ssh-bf]' - - rune -0 cscli scenarios remove crowdsecurity/ssh-bf - assert_stderr --partial "removing crowdsecurity/ssh-bf: not installed -- no need to remove" - - rune -0 cscli scenarios remove crowdsecurity/ssh-bf --purge --debug - assert_stderr --partial 'removing crowdsecurity/ssh-bf: not downloaded -- no need to remove' - refute_stderr --partial 'Removed source file [crowdsecurity/ssh-bf]' - - # install, then remove, check files - rune -0 cscli scenarios install crowdsecurity/ssh-bf - assert_file_exists "$CONFIG_DIR/scenarios/ssh-bf.yaml" - rune -0 cscli scenarios remove crowdsecurity/ssh-bf - assert_file_not_exists "$CONFIG_DIR/scenarios/ssh-bf.yaml" - - # delete is an alias for remove - rune -0 cscli scenarios install crowdsecurity/ssh-bf - assert_file_exists "$CONFIG_DIR/scenarios/ssh-bf.yaml" - rune -0 cscli scenarios delete crowdsecurity/ssh-bf - assert_file_not_exists "$CONFIG_DIR/scenarios/ssh-bf.yaml" - - # purge - assert_file_exists "$HUB_DIR/scenarios/crowdsecurity/ssh-bf.yaml" - rune -0 cscli scenarios remove crowdsecurity/ssh-bf --purge - assert_file_not_exists "$HUB_DIR/scenarios/crowdsecurity/ssh-bf.yaml" - - rune -0 cscli scenarios install crowdsecurity/ssh-bf crowdsecurity/telnet-bf - - # --all - rune -0 cscli scenarios list -o raw - rune -0 grep -vc 'name,status,version,description' <(output) - assert_output "2" - - rune -0 cscli scenarios remove --all - - rune -0 cscli scenarios list -o raw - rune -1 grep -vc 'name,status,version,description' <(output) - assert_output "0" -} - -@test "cscli scenarios remove --force" { - # remove a scenario that belongs to a collection - rune -0 cscli collections install crowdsecurity/sshd - rune -0 cscli scenarios remove crowdsecurity/ssh-bf - assert_stderr --partial "crowdsecurity/ssh-bf belongs to collections: [crowdsecurity/sshd]" - assert_stderr --partial "Run 'sudo cscli scenarios remove crowdsecurity/ssh-bf --force' if you want to force remove this scenario" -} - -@test "cscli scenarios upgrade" { - rune -1 cscli scenarios upgrade - assert_stderr --partial "specify at least one scenario to upgrade or '--all'" - rune -1 cscli scenarios upgrade blahblah/blahblah - assert_stderr --partial "can't find 'blahblah/blahblah' in scenarios" - rune -0 cscli scenarios remove crowdsecurity/vsftpd-bf --purge - rune -1 cscli scenarios upgrade crowdsecurity/vsftpd-bf - assert_stderr --partial "can't upgrade crowdsecurity/vsftpd-bf: not installed" - rune -0 cscli scenarios install crowdsecurity/vsftpd-bf --download-only - rune -1 cscli scenarios upgrade crowdsecurity/vsftpd-bf - assert_stderr --partial "can't upgrade crowdsecurity/vsftpd-bf: downloaded but not installed" - - # hash of the string "v0.0" - sha256_0_0="dfebecf42784a31aa3d009dbcec0c657154a034b45f49cf22a895373f6dbf63d" - - # add version 0.0 to all scenarios - new_hub=$(jq --arg DIGEST "$sha256_0_0" <"$INDEX_PATH" '.scenarios |= with_entries(.value.versions["0.0"] = {"digest": $DIGEST, "deprecated": false})') - echo "$new_hub" >"$INDEX_PATH" - - rune -0 cscli scenarios install crowdsecurity/ssh-bf - - echo "v0.0" > "$CONFIG_DIR/scenarios/ssh-bf.yaml" - rune -0 cscli scenarios inspect crowdsecurity/ssh-bf -o json - rune -0 jq -e '.local_version=="0.0"' <(output) - - # upgrade - rune -0 cscli scenarios upgrade crowdsecurity/ssh-bf - rune -0 cscli scenarios inspect crowdsecurity/ssh-bf -o json - rune -0 jq -e '.local_version==.version' <(output) - - # taint - echo "dirty" >"$CONFIG_DIR/scenarios/ssh-bf.yaml" - # XXX: should return error - rune -0 cscli scenarios upgrade crowdsecurity/ssh-bf - assert_stderr --partial "crowdsecurity/ssh-bf is tainted, --force to overwrite" - rune -0 cscli scenarios inspect crowdsecurity/ssh-bf -o json - rune -0 jq -e '.local_version=="?"' <(output) - - # force upgrade with taint - rune -0 cscli scenarios upgrade crowdsecurity/ssh-bf --force - rune -0 cscli scenarios inspect crowdsecurity/ssh-bf -o json - rune -0 jq -e '.local_version==.version' <(output) - - # multiple items - rune -0 cscli scenarios install crowdsecurity/telnet-bf - echo "v0.0" >"$CONFIG_DIR/scenarios/ssh-bf.yaml" - echo "v0.0" >"$CONFIG_DIR/scenarios/telnet-bf.yaml" - rune -0 cscli scenarios list -o json - rune -0 jq -e '[.scenarios[].local_version]==["0.0","0.0"]' <(output) - rune -0 cscli scenarios upgrade crowdsecurity/ssh-bf crowdsecurity/telnet-bf - rune -0 cscli scenarios list -o json - rune -0 jq -e 'any(.scenarios[].local_version; .=="0.0") | not' <(output) - - # upgrade all - echo "v0.0" >"$CONFIG_DIR/scenarios/ssh-bf.yaml" - echo "v0.0" >"$CONFIG_DIR/scenarios/telnet-bf.yaml" - rune -0 cscli scenarios list -o json - rune -0 jq -e '[.scenarios[].local_version]==["0.0","0.0"]' <(output) - rune -0 cscli scenarios upgrade --all - rune -0 cscli scenarios list -o json - rune -0 jq -e 'any(.scenarios[].local_version; .=="0.0") | not' <(output) -} diff --git a/test/bats/cscli-hubtype-inspect.bats b/test/bats/cscli-hubtype-inspect.bats new file mode 100644 index 00000000000..9c96aadb3ad --- /dev/null +++ b/test/bats/cscli-hubtype-inspect.bats @@ -0,0 +1,93 @@ +#!/usr/bin/env bats + +# Generic tests for the command "cscli inspect". +# +# Behavior that is specific to a hubtype should be tested in a separate file. + +set -u + +setup_file() { + load "../lib/setup_file.sh" + ./instance-data load + HUB_DIR=$(config_get '.config_paths.hub_dir') + export HUB_DIR + INDEX_PATH=$(config_get '.config_paths.index_path') + export INDEX_PATH + CONFIG_DIR=$(config_get '.config_paths.config_dir') + export CONFIG_DIR +} + +teardown_file() { + load "../lib/teardown_file.sh" +} + +setup() { + load "../lib/setup.sh" + load "../lib/bats-file/load.bash" + ./instance-data load +} + +teardown() { + ./instance-crowdsec stop +} + +#---------- + +@test "cscli parsers inspect" { + rune -1 cscli parsers inspect + assert_stderr --partial 'requires at least 1 arg(s), only received 0' + # required for metrics + ./instance-crowdsec start + + rune -1 cscli parsers inspect blahblah/blahblah + assert_stderr --partial "can't find 'blahblah/blahblah' in parsers" + + # one item + rune -0 cscli parsers inspect crowdsecurity/sshd-logs --no-metrics + assert_line 'type: parsers' + assert_line 'name: crowdsecurity/sshd-logs' + assert_line 'path: parsers/s01-parse/crowdsecurity/sshd-logs.yaml' + assert_line 'installed: false' + refute_line --partial 'Current metrics:' + + # one item, with metrics + rune -0 cscli parsers inspect crowdsecurity/sshd-logs + assert_line --partial 'Current metrics:' + + # one item, json + rune -0 cscli parsers inspect crowdsecurity/sshd-logs -o json + rune -0 jq -c '[.type, .name, .path, .installed]' <(output) + assert_json '["parsers","crowdsecurity/sshd-logs","parsers/s01-parse/crowdsecurity/sshd-logs.yaml",false]' + + # one item, raw + rune -0 cscli parsers inspect crowdsecurity/sshd-logs -o raw + assert_line 'type: parsers' + assert_line 'name: crowdsecurity/sshd-logs' + assert_line 'path: parsers/s01-parse/crowdsecurity/sshd-logs.yaml' + assert_line 'installed: false' + refute_line --partial 'Current metrics:' + + # multiple items + rune -0 cscli parsers inspect crowdsecurity/sshd-logs crowdsecurity/whitelists --no-metrics + assert_output --partial 'crowdsecurity/sshd-logs' + assert_output --partial 'crowdsecurity/whitelists' + rune -1 grep -c 'Current metrics:' <(output) + assert_output "0" + + # multiple items, with metrics + rune -0 cscli parsers inspect crowdsecurity/sshd-logs crowdsecurity/whitelists + rune -0 grep -c 'Current metrics:' <(output) + assert_output "2" + + # multiple items, json + rune -0 cscli parsers inspect crowdsecurity/sshd-logs crowdsecurity/whitelists -o json + rune -0 jq -sc '[.[] | [.type, .name, .path, .installed]]' <(output) + assert_json '[["parsers","crowdsecurity/sshd-logs","parsers/s01-parse/crowdsecurity/sshd-logs.yaml",false],["parsers","crowdsecurity/whitelists","parsers/s02-enrich/crowdsecurity/whitelists.yaml",false]]' + + # multiple items, raw + rune -0 cscli parsers inspect crowdsecurity/sshd-logs crowdsecurity/whitelists -o raw + assert_output --partial 'crowdsecurity/sshd-logs' + assert_output --partial 'crowdsecurity/whitelists' + rune -1 grep -c 'Current metrics:' <(output) + assert_output "0" +} diff --git a/test/bats/cscli-hubtype-install.bats b/test/bats/cscli-hubtype-install.bats new file mode 100644 index 00000000000..2304e5a72cc --- /dev/null +++ b/test/bats/cscli-hubtype-install.bats @@ -0,0 +1,269 @@ +#!/usr/bin/env bats + +# Generic tests for the command "cscli install". +# +# Behavior that is specific to a hubtype should be tested in a separate file. + +set -u + +setup_file() { + load "../lib/setup_file.sh" + ./instance-data load + HUB_DIR=$(config_get '.config_paths.hub_dir') + export HUB_DIR +# INDEX_PATH=$(config_get '.config_paths.index_path') +# export INDEX_PATH + CONFIG_DIR=$(config_get '.config_paths.config_dir') + export CONFIG_DIR +} + +teardown_file() { + load "../lib/teardown_file.sh" +} + +setup() { + load "../lib/setup.sh" + load "../lib/bats-file/load.bash" + ./instance-data load + # make sure the hub is empty + hub_purge_all +} + +teardown() { + # most tests don't need the service, but we ensure it's stopped + ./instance-crowdsec stop +} + +#---------- + +@test "cscli install (no argument)" { + rune -1 cscli parsers install + refute_output + assert_stderr --partial 'requires at least 1 arg(s), only received 0' +} + +@test "cscli install (aliased)" { + rune -1 cscli parser install + refute_output + assert_stderr --partial 'requires at least 1 arg(s), only received 0' +} + +@test "install an item (non-existent)" { + rune -1 cscli parsers install foo/bar + assert_stderr --partial "can't find 'foo/bar' in parsers" +} + +@test "install an item (dry run)" { + rune -0 cscli parsers install crowdsecurity/whitelists --dry-run + assert_output - --regexp <<-EOT + Action plan: + 📥 download + parsers: crowdsecurity/whitelists \([0-9]+.[0-9]+\) + ✅ enable + parsers: crowdsecurity/whitelists + + Dry run, no action taken. + EOT + rune -0 cscli parsers inspect crowdsecurity/whitelists --no-metrics -o json + rune -0 jq -e '.installed==false' <(output) + assert_file_not_exists "$CONFIG_DIR/parsers/s02-enrich/whitelists.yaml" +} + +@test "install an item (dry-run, de-duplicate commands)" { + rune -0 cscli parsers install crowdsecurity/whitelists crowdsecurity/whitelists --dry-run --output raw + assert_output - --regexp <<-EOT + Action plan: + 📥 download parsers:crowdsecurity/whitelists \([0-9]+.[0-9]+\) + ✅ enable parsers:crowdsecurity/whitelists + + Dry run, no action taken. + EOT + refute_stderr +} + +@test "install an item" { + rune -0 cscli parsers install crowdsecurity/whitelists + assert_output - <<-EOT + downloading parsers:crowdsecurity/whitelists + enabling parsers:crowdsecurity/whitelists + + $RELOAD_MESSAGE + EOT + refute_stderr + + rune -0 cscli parsers inspect crowdsecurity/whitelists --no-metrics -o json + rune -0 jq -e '.installed==true' <(output) + assert_file_exists "$CONFIG_DIR/parsers/s02-enrich/whitelists.yaml" +} + +@test "install an item (autocorrect)" { + rune -1 cscli parsers install crowdsecurity/whatelists + assert_stderr --partial "can't find 'crowdsecurity/whatelists' in parsers, did you mean 'crowdsecurity/whitelists'?" + refute_output +} + +@test "install an item (download only)" { + assert_file_not_exists "$HUB_DIR/parsers/s02-enrich/crowdsecurity/whitelists.yaml" + rune -0 cscli parsers install crowdsecurity/whitelists --download-only + assert_output - <<-EOT + downloading parsers:crowdsecurity/whitelists + + $RELOAD_MESSAGE + EOT + refute_stderr + + rune -0 cscli parsers inspect crowdsecurity/whitelists --no-metrics -o json + rune -0 jq -e '.installed==false' <(output) + assert_file_exists "$HUB_DIR/parsers/s02-enrich/crowdsecurity/whitelists.yaml" +} + +@test "install an item (already installed)" { + rune -0 cscli parsers install crowdsecurity/whitelists + rune -0 cscli parsers install crowdsecurity/whitelists --dry-run + assert_output "Nothing to do." + refute_stderr + rune -0 cscli parsers install crowdsecurity/whitelists + assert_output "Nothing to do." + refute_stderr +} + +@test "install an item (force is no-op if not tainted)" { + rune -0 cscli parsers install crowdsecurity/whitelists + rune -0 cscli parsers install crowdsecurity/whitelists + assert_output "Nothing to do." + refute_stderr + rune -0 cscli parsers install crowdsecurity/whitelists --force + assert_output "Nothing to do." + refute_stderr +} + +@test "install an item (tainted, requires --force)" { + rune -0 cscli parsers install crowdsecurity/whitelists + echo "dirty" >"$CONFIG_DIR/parsers/s02-enrich/whitelists.yaml" + + rune -0 cscli parsers install crowdsecurity/whitelists --dry-run + assert_output - --stderr <<-EOT + WARN parsers:crowdsecurity/whitelists is tainted, use '--force' to overwrite + Nothing to do. + EOT + refute_stderr + + # XXX should this fail with status 1 instead? + rune -0 cscli parsers install crowdsecurity/whitelists + assert_output - <<-EOT + WARN parsers:crowdsecurity/whitelists is tainted, use '--force' to overwrite + Nothing to do. + EOT + refute_stderr + + rune -0 cscli parsers install crowdsecurity/whitelists --force + assert_output - <<-EOT + downloading parsers:crowdsecurity/whitelists + + $RELOAD_MESSAGE + EOT + refute_stderr + rune -0 cscli parsers inspect crowdsecurity/whitelists --no-metrics -o json + rune -0 jq -e '.installed==true' <(output) +} + +@test "install multiple items" { + rune -0 cscli parsers install crowdsecurity/pgsql-logs crowdsecurity/postfix-logs + rune -0 cscli parsers inspect crowdsecurity/pgsql-logs --no-metrics -o json + rune -0 jq -e '.installed==true' <(output) + rune -0 cscli parsers inspect crowdsecurity/postfix-logs --no-metrics -o json + rune -0 jq -e '.installed==true' <(output) +} + +@test "install multiple items (some already installed)" { + rune -0 cscli parsers install crowdsecurity/pgsql-logs + rune -0 cscli parsers install crowdsecurity/pgsql-logs crowdsecurity/postfix-logs --dry-run + assert_output - --regexp <<-EOT + Action plan: + 📥 download + parsers: crowdsecurity/postfix-logs \([0-9]+.[0-9]+\) + ✅ enable + parsers: crowdsecurity/postfix-logs + + Dry run, no action taken. + EOT + refute_stderr +} + +@test "install one or multiple items (ignore errors)" { + rune -0 cscli parsers install foo/bar --ignore + assert_stderr --partial "can't find 'foo/bar' in parsers" + assert_output "Nothing to do." + + rune -0 cscli parsers install crowdsecurity/whitelists + echo "dirty" >"$CONFIG_DIR/parsers/s02-enrich/whitelists.yaml" + # XXX: this is not testing '--ignore' anymore; TODO find a better error to ignore + # and maybe re-evaluate the --ignore flag + rune -0 cscli parsers install crowdsecurity/whitelists --ignore + assert_output - <<-EOT + WARN parsers:crowdsecurity/whitelists is tainted, use '--force' to overwrite + Nothing to do. + EOT + refute_stderr + + # error on one item, should still install the others + rune -0 cscli parsers install crowdsecurity/whitelists crowdsecurity/pgsql-logs --ignore + refute_stderr + assert_output - <<-EOT + WARN parsers:crowdsecurity/whitelists is tainted, use '--force' to overwrite + downloading parsers:crowdsecurity/pgsql-logs + enabling parsers:crowdsecurity/pgsql-logs + + $RELOAD_MESSAGE + EOT + rune -0 cscli parsers inspect crowdsecurity/pgsql-logs --no-metrics -o json + rune -0 jq -e '.installed==true' <(output) +} + +@test "override part of a collection with local items" { + # A collection will use a local item to fulfil a dependency provided it has + # the correct name field. + + mkdir -p "$CONFIG_DIR/parsers/s01-parse" + echo "name: crowdsecurity/sshd-logs" > "$CONFIG_DIR/parsers/s01-parse/sshd-logs.yaml" + rune -0 cscli parsers list -o json + rune -0 jq -c '.parsers[] | [.name,.status]' <(output) + assert_json '["crowdsecurity/sshd-logs","enabled,local"]' + + # attempt to install from hub + rune -0 cscli parsers install crowdsecurity/sshd-logs + assert_line 'parsers:crowdsecurity/sshd-logs - not downloading local item' + rune -0 cscli parsers list -o json + rune -0 jq -c '.parsers[] | [.name,.status]' <(output) + assert_json '["crowdsecurity/sshd-logs","enabled,local"]' + + # attempt to install from a collection + rune -0 cscli collections install crowdsecurity/sshd + assert_line 'parsers:crowdsecurity/sshd-logs - not downloading local item' + + # verify it installed the rest of the collection + assert_line 'enabling contexts:crowdsecurity/bf_base' + assert_line 'enabling collections:crowdsecurity/sshd' + + # remove them + rune -0 cscli collections delete crowdsecurity/sshd --force --purge + rune -0 rm "$CONFIG_DIR/parsers/s01-parse/sshd-logs.yaml" + + # do the same with a different file name + echo "name: crowdsecurity/sshd-logs" > "$CONFIG_DIR/parsers/s01-parse/something.yaml" + rune -0 cscli parsers list -o json + rune -0 jq -c '.parsers[] | [.name,.status]' <(output) + assert_json '["crowdsecurity/sshd-logs","enabled,local"]' + + # attempt to install from hub + rune -0 cscli parsers install crowdsecurity/sshd-logs + assert_line 'parsers:crowdsecurity/sshd-logs - not downloading local item' + + # attempt to install from a collection + rune -0 cscli collections install crowdsecurity/sshd + assert_line 'parsers:crowdsecurity/sshd-logs - not downloading local item' + + # verify it installed the rest of the collection + assert_line 'enabling contexts:crowdsecurity/bf_base' + assert_line 'enabling collections:crowdsecurity/sshd' +} diff --git a/test/bats/cscli-hubtype-list.bats b/test/bats/cscli-hubtype-list.bats new file mode 100644 index 00000000000..14113650c74 --- /dev/null +++ b/test/bats/cscli-hubtype-list.bats @@ -0,0 +1,130 @@ +#!/usr/bin/env bats + +set -u + +setup_file() { + load "../lib/setup_file.sh" + ./instance-data load + HUB_DIR=$(config_get '.config_paths.hub_dir') + export HUB_DIR + INDEX_PATH=$(config_get '.config_paths.index_path') + export INDEX_PATH + CONFIG_DIR=$(config_get '.config_paths.config_dir') + export CONFIG_DIR +} + +teardown_file() { + load "../lib/teardown_file.sh" +} + +setup() { + load "../lib/setup.sh" + load "../lib/bats-file/load.bash" + ./instance-data load +} + +teardown() { + ./instance-crowdsec stop +} + +#---------- + +@test "cscli parsers list" { + hub_purge_all + + # no items + rune -0 cscli parsers list + assert_output --partial "PARSERS" + rune -0 cscli parsers list -o json + assert_json '{parsers:[]}' + rune -0 cscli parsers list -o raw + assert_output 'name,status,version,description' + + # some items + rune -0 cscli parsers install crowdsecurity/whitelists crowdsecurity/windows-auth + + rune -0 cscli parsers list + assert_output --partial crowdsecurity/whitelists + assert_output --partial crowdsecurity/windows-auth + rune -0 grep -c enabled <(output) + assert_output "2" + + rune -0 cscli parsers list -o json + assert_output --partial crowdsecurity/whitelists + assert_output --partial crowdsecurity/windows-auth + rune -0 jq '.parsers | length' <(output) + assert_output "2" + + rune -0 cscli parsers list -o raw + assert_output --partial crowdsecurity/whitelists + assert_output --partial crowdsecurity/windows-auth + rune -0 grep -vc 'name,status,version,description' <(output) + assert_output "2" +} + +@test "cscli parsers list -a" { + expected=$(jq <"$INDEX_PATH" -r '.parsers | length') + + rune -0 cscli parsers list -a + rune -0 grep -c disabled <(output) + assert_output "$expected" + + rune -0 cscli parsers list -o json -a + rune -0 jq '.parsers | length' <(output) + assert_output "$expected" + + rune -0 cscli parsers list -o raw -a + rune -0 grep -vc 'name,status,version,description' <(output) + assert_output "$expected" + + # the list should be the same in all formats, and sorted (not case sensitive) + + list_raw=$(cscli parsers list -o raw -a | tail -n +2 | cut -d, -f1) + list_human=$(cscli parsers list -o human -a | tail -n +6 | head -n -1 | cut -d' ' -f2) + list_json=$(cscli parsers list -o json -a | jq -r '.parsers[].name') + + # use python to sort because it handles "_" like go + rune -0 python3 -c 'import sys; print("".join(sorted(sys.stdin.readlines(), key=str.casefold)), end="")' <<<"$list_raw" + assert_output "$list_raw" + + assert_equal "$list_raw" "$list_json" + assert_equal "$list_raw" "$list_human" +} + +@test "cscli parsers list [parser]..." { + # non-existent + rune -1 cscli parsers install foo/bar + assert_stderr --partial "can't find 'foo/bar' in parsers" + + # not installed + rune -0 cscli parsers list crowdsecurity/whitelists + assert_output --regexp 'crowdsecurity/whitelists.*disabled' + + # install two items + rune -0 cscli parsers install crowdsecurity/whitelists crowdsecurity/windows-auth + + # list an installed item + rune -0 cscli parsers list crowdsecurity/whitelists + assert_output --regexp "crowdsecurity/whitelists.*enabled" + refute_output --partial "crowdsecurity/windows-auth" + + # list multiple installed and non installed items + rune -0 cscli parsers list crowdsecurity/whitelists crowdsecurity/windows-auth crowdsecurity/traefik-logs + assert_output --partial "crowdsecurity/whitelists" + assert_output --partial "crowdsecurity/windows-auth" + assert_output --partial "crowdsecurity/traefik-logs" + + rune -0 cscli parsers list crowdsecurity/whitelists -o json + rune -0 jq '.parsers | length' <(output) + assert_output "1" + rune -0 cscli parsers list crowdsecurity/whitelists crowdsecurity/windows-auth crowdsecurity/traefik-logs -o json + rune -0 jq '.parsers | length' <(output) + assert_output "3" + + rune -0 cscli parsers list crowdsecurity/whitelists -o raw + rune -0 grep -vc 'name,status,version,description' <(output) + assert_output "1" + rune -0 cscli parsers list crowdsecurity/whitelists crowdsecurity/windows-auth crowdsecurity/traefik-logs -o raw + rune -0 grep -vc 'name,status,version,description' <(output) + assert_output "3" +} diff --git a/test/bats/cscli-hubtype-remove.bats b/test/bats/cscli-hubtype-remove.bats new file mode 100644 index 00000000000..32db8efe788 --- /dev/null +++ b/test/bats/cscli-hubtype-remove.bats @@ -0,0 +1,245 @@ +#!/usr/bin/env bats + +# Generic tests for the command "cscli remove". +# +# Behavior that is specific to a hubtype should be tested in a separate file. + + +set -u + +setup_file() { + load "../lib/setup_file.sh" + ./instance-data load + HUB_DIR=$(config_get '.config_paths.hub_dir') + export HUB_DIR +# INDEX_PATH=$(config_get '.config_paths.index_path') +# export INDEX_PATH + CONFIG_DIR=$(config_get '.config_paths.config_dir') + export CONFIG_DIR +} + +teardown_file() { + load "../lib/teardown_file.sh" +} + +setup() { + load "../lib/setup.sh" + load "../lib/bats-file/load.bash" + ./instance-data load + # make sure the hub is empty + hub_purge_all +} + +teardown() { + # most tests don't need the service, but we ensure it's stopped + ./instance-crowdsec stop +} + +#---------- + +@test "cscli remove (no argument)" { + rune -1 cscli parsers remove + refute_output + assert_stderr --partial "specify at least one parser to remove or '--all'" +} + +@test "cscli remove (aliased)" { + rune -1 cscli parser remove + refute_output + assert_stderr --partial "specify at least one parser to remove or '--all'" +} + +@test "cscli delete (alias of remove)" { + rune -1 cscli parsers delete + refute_output + assert_stderr --partial "specify at least one parser to remove or '--all'" +} + +@test "remove an item (non-existent)" { + rune -1 cscli parsers remove foo/bar + refute_output + assert_stderr --partial "can't find 'foo/bar' in parsers" +} + +@test "remove an item (not downloaded)" { + rune -0 cscli parsers inspect crowdsecurity/whitelists --no-metrics -o json + rune -0 jq -e '.downloaded==false' <(output) + + rune -0 cscli parsers remove crowdsecurity/whitelists --dry-run + assert_output "Nothing to do." + refute_stderr + rune -0 cscli parsers remove crowdsecurity/whitelists + assert_output "Nothing to do." + refute_stderr + rune -0 cscli parsers remove crowdsecurity/whitelists --force + assert_output "Nothing to do." + refute_stderr + rune -0 cscli parsers remove crowdsecurity/whitelists --purge + assert_output "Nothing to do." + refute_stderr +} + +@test "remove an item (not installed)" { + rune -0 cscli parsers install crowdsecurity/whitelists --download-only + rune -0 cscli parsers inspect crowdsecurity/whitelists --no-metrics -o json + rune -0 jq -e '.installed==false' <(output) + + rune -0 cscli parsers remove crowdsecurity/whitelists --dry-run + assert_output "Nothing to do." + refute_stderr + rune -0 cscli parsers remove crowdsecurity/whitelists + assert_output "Nothing to do." + refute_stderr + rune -0 cscli parsers remove crowdsecurity/whitelists --force + assert_output "Nothing to do." + refute_stderr + rune -0 cscli parsers remove crowdsecurity/whitelists --purge + assert_output --partial "purging parsers:crowdsecurity/whitelists" +} + +@test "remove an item (dry run)" { + rune -0 cscli parsers install crowdsecurity/whitelists + assert_file_exists "$CONFIG_DIR/parsers/s02-enrich/whitelists.yaml" + + rune -0 cscli parsers remove crowdsecurity/whitelists --dry-run + assert_output - --regexp <<-EOT + Action plan: + ❌ disable + parsers: crowdsecurity/whitelists + + Dry run, no action taken. + EOT + refute_stderr + rune -0 cscli parsers inspect crowdsecurity/whitelists --no-metrics -o json + rune -0 jq -e '.installed==true' <(output) + assert_file_exists "$CONFIG_DIR/parsers/s02-enrich/whitelists.yaml" +} + +@test "remove an item" { + rune -0 cscli parsers install crowdsecurity/whitelists + assert_file_exists "$CONFIG_DIR/parsers/s02-enrich/whitelists.yaml" + + rune -0 cscli parsers remove crowdsecurity/whitelists + assert_output - <<-EOT + disabling parsers:crowdsecurity/whitelists + + $RELOAD_MESSAGE + EOT + refute_stderr + + rune -0 cscli parsers inspect crowdsecurity/whitelists --no-metrics -o json + rune -0 jq -e '.installed==false' <(output) + assert_file_not_exists "$CONFIG_DIR/parsers/s02-enrich/whitelists.yaml" + assert_file_exists "$HUB_DIR/parsers/s02-enrich/crowdsecurity/whitelists.yaml" +} + +@test "remove an item (purge)" { + rune -0 cscli parsers install crowdsecurity/whitelists + assert_file_exists "$CONFIG_DIR/parsers/s02-enrich/whitelists.yaml" + + rune -0 cscli parsers remove crowdsecurity/whitelists --purge + assert_output - <<-EOT + disabling parsers:crowdsecurity/whitelists + purging parsers:crowdsecurity/whitelists + + $RELOAD_MESSAGE + EOT + refute_stderr + + rune -0 cscli parsers inspect crowdsecurity/whitelists --no-metrics -o json + rune -0 jq -e '.downloaded==false' <(output) + assert_file_not_exists "$CONFIG_DIR/parsers/s02-enrich/whitelists.yaml" + assert_file_not_exists "$HUB_DIR/parsers/s02-enrich/crowdsecurity/whitelists.yaml" +} + +@test "remove multiple items" { + rune -0 cscli parsers install crowdsecurity/whitelists crowdsecurity/windows-auth + rune -0 cscli parsers remove crowdsecurity/whitelists crowdsecurity/windows-auth --dry-run + assert_output - --regexp <<-EOT + Action plan: + ❌ disable + parsers: crowdsecurity/whitelists, crowdsecurity/windows-auth + + Dry run, no action taken. + EOT + refute_stderr + + rune -0 cscli parsers remove crowdsecurity/whitelists crowdsecurity/windows-auth + rune -0 cscli parsers inspect crowdsecurity/whitelists --no-metrics -o json + rune -0 jq -e '.installed==false' <(output) + rune -0 cscli parsers inspect crowdsecurity/windows-auth --no-metrics -o json + rune -0 jq -e '.installed==false' <(output) +} + +@test "remove all items of a same type" { + rune -0 cscli parsers install crowdsecurity/whitelists crowdsecurity/windows-auth + + rune -1 cscli parsers remove crowdsecurity/whitelists --all + assert_stderr "Error: can't specify items and '--all' at the same time" + + rune -0 cscli parsers remove --all --dry-run + assert_output - --regexp <<-EOT + Action plan: + ❌ disable + parsers: crowdsecurity/whitelists, crowdsecurity/windows-auth + + Dry run, no action taken. + EOT + refute_stderr + + rune -0 cscli parsers remove --all + rune -0 cscli parsers inspect crowdsecurity/whitelists --no-metrics -o json + rune -0 jq -e '.installed==false' <(output) + rune -0 cscli parsers inspect crowdsecurity/windows-auth --no-metrics -o json + rune -0 jq -e '.installed==false' <(output) +} + +@test "remove an item (tainted, requires --force)" { + rune -0 cscli parsers install crowdsecurity/whitelists + echo "dirty" >"$CONFIG_DIR/parsers/s02-enrich/whitelists.yaml" + + rune -1 cscli parsers remove crowdsecurity/whitelists --dry-run + assert_stderr --partial "crowdsecurity/whitelists is tainted, use '--force' to remove" + refute_output + + rune -1 cscli parsers remove crowdsecurity/whitelists + assert_stderr --partial "crowdsecurity/whitelists is tainted, use '--force' to remove" + refute_output + + rune -0 cscli parsers remove crowdsecurity/whitelists --force + assert_output - <<-EOT + disabling parsers:crowdsecurity/whitelists + + $RELOAD_MESSAGE + EOT + refute_stderr + rune -0 cscli parsers inspect crowdsecurity/whitelists --no-metrics -o json + rune -0 jq -e '.installed==false' <(output) + assert_file_not_exists "$CONFIG_DIR/parsers/s02-enrich/crowdsecurity/whitelists.yaml" +} + +@test "remove an item that belongs to a collection (requires --force)" { + rune -0 cscli collections install crowdsecurity/sshd + # XXX: should exit with 1? + rune -0 cscli parsers remove crowdsecurity/sshd-logs + assert_output "Nothing to do." + assert_stderr --partial "crowdsecurity/sshd-logs belongs to collections: [crowdsecurity/sshd]" + assert_stderr --partial "Run 'sudo cscli parsers remove crowdsecurity/sshd-logs --force' if you want to force remove this parser" + assert_file_exists "$CONFIG_DIR/parsers/s01-parse/sshd-logs.yaml" + + rune -0 cscli parsers remove crowdsecurity/sshd-logs --force + assert_output - <<-EOT + disabling parsers:crowdsecurity/sshd-logs + + $RELOAD_MESSAGE + EOT + refute_stderr + assert_file_not_exists "$CONFIG_DIR/parsers/s01-parse/sshd-logs.yaml" +} + +@test "remove an item (autocomplete)" { + rune -0 cscli parsers install crowdsecurity/whitelists + rune -0 cscli __complete parsers remove crowd + assert_stderr --partial '[Debug] parsers: [crowdsecurity/whitelists]' + assert_output --partial 'crowdsecurity/whitelists' +} diff --git a/test/bats/cscli-hubtype-upgrade.bats b/test/bats/cscli-hubtype-upgrade.bats new file mode 100644 index 00000000000..4244e611cf6 --- /dev/null +++ b/test/bats/cscli-hubtype-upgrade.bats @@ -0,0 +1,253 @@ +#!/usr/bin/env bats + +# Generic tests for the upgrade of hub items and data files. +# +# Commands under test: +# cscli upgrade +# +# This file should test behavior that can be applied to all types. + +set -u + +setup_file() { + load "../lib/setup_file.sh" + ./instance-data load + HUB_DIR=$(config_get '.config_paths.hub_dir') + export HUB_DIR + INDEX_PATH=$(config_get '.config_paths.index_path') + export INDEX_PATH + CONFIG_DIR=$(config_get '.config_paths.config_dir') + export CONFIG_DIR +} + +teardown_file() { + load "../lib/teardown_file.sh" +} + +setup() { + load "../lib/setup.sh" + load "../lib/bats-file/load.bash" + ./instance-data load + # make sure the hub is empty + hub_purge_all +} + +teardown() { + # most tests don't need the service, but we ensure it's stopped + ./instance-crowdsec stop +} + +hub_inject_v0() { + # add a version 0.0 to all parsers + + # hash of the string "v0.0" + sha256_0_0="daa1832414a685d69269e0ae15024b908f4602db45f9900e9c6e7f204af207c0" + + new_hub=$(jq --arg DIGEST "$sha256_0_0" <"$INDEX_PATH" '.parsers |= with_entries(.value.versions["0.0"] = {"digest": $DIGEST, "deprecated": false})') + echo "$new_hub" >"$INDEX_PATH" +} + +install_v0() { + local hubtype=$1 + shift + local item_name=$1 + shift + + cscli "$hubtype" install "$item_name" + printf "%s" "v0.0" > "$(jq -r '.local_path' <(cscli "$hubtype" inspect "$item_name" --no-metrics -o json))" +} + +#---------- + +@test "cscli upgrade (no argument)" { + rune -1 cscli parsers upgrade + refute_output + assert_stderr --partial "specify at least one parser to upgrade or '--all'" +} + +@test "cscli upgrade (aliased)" { + rune -1 cscli parser upgrade + refute_output + assert_stderr --partial "specify at least one parser to upgrade or '--all'" +} + +@test "upgrade an item (non-existent)" { + rune -1 cscli parsers upgrade foo/bar + assert_stderr --partial "can't find 'foo/bar' in parsers" +} + +@test "upgrade an item (non installed)" { + rune -0 cscli parsers upgrade crowdsecurity/whitelists + assert_output - <<-EOT + downloading parsers:crowdsecurity/whitelists + + $RELOAD_MESSAGE + EOT + refute_stderr + + rune -0 cscli parsers install crowdsecurity/whitelists --download-only + rune -0 cscli parsers upgrade crowdsecurity/whitelists + assert_output 'Nothing to do.' + refute_stderr +} + +@test "upgrade an item (up-to-date)" { + rune -0 cscli parsers install crowdsecurity/whitelists + rune -0 cscli parsers upgrade crowdsecurity/whitelists --dry-run + assert_output 'Nothing to do.' + rune -0 cscli parsers upgrade crowdsecurity/whitelists + assert_output 'Nothing to do.' +} + +@test "upgrade an item (dry run)" { + hub_inject_v0 + install_v0 parsers crowdsecurity/whitelists + latest=$(get_latest_version parsers crowdsecurity/whitelists) + + rune -0 cscli parsers upgrade crowdsecurity/whitelists --dry-run + assert_output - <<-EOT + Action plan: + 📥 download + parsers: crowdsecurity/whitelists (0.0 -> $latest) + + Dry run, no action taken. + EOT + refute_stderr +} + +get_latest_version() { + local hubtype=$1 + shift + local item_name=$1 + shift + + cscli "$hubtype" inspect "$item_name" -o json | jq -r '.version' +} + +@test "upgrade an item" { + hub_inject_v0 + install_v0 parsers crowdsecurity/whitelists + + rune -0 cscli parsers inspect crowdsecurity/whitelists -o json + rune -0 jq -e '.local_version=="0.0"' <(output) + + rune -0 cscli parsers upgrade crowdsecurity/whitelists + assert_output - <<-EOT + downloading parsers:crowdsecurity/whitelists + + $RELOAD_MESSAGE + EOT + refute_stderr + + rune -0 cscli parsers inspect crowdsecurity/whitelists -o json + + # the version is now the latest + rune -0 jq -e '.local_version==.version' <(output) +} + +@test "upgrade an item (tainted, requires --force)" { + rune -0 cscli parsers install crowdsecurity/whitelists + echo "dirty" >"$CONFIG_DIR/parsers/s02-enrich/whitelists.yaml" + + rune -0 cscli parsers inspect crowdsecurity/whitelists -o json + rune -0 jq -e '.local_version=="?"' <(output) + + rune -0 cscli parsers upgrade crowdsecurity/whitelists --dry-run + assert_output - <<-EOT + WARN parsers:crowdsecurity/whitelists is tainted, use '--force' to overwrite + Nothing to do. + EOT + refute_stderr + + rune -0 cscli parsers upgrade crowdsecurity/whitelists + assert_output - <<-EOT + WARN parsers:crowdsecurity/whitelists is tainted, use '--force' to overwrite + Nothing to do. + EOT + refute_stderr + + rune -0 cscli parsers upgrade crowdsecurity/whitelists --force + assert_output - <<-EOT + downloading parsers:crowdsecurity/whitelists + + $RELOAD_MESSAGE + EOT + refute_stderr + + rune -0 cscli parsers inspect crowdsecurity/whitelists -o json + rune -0 jq -e '.local_version==.version' <(output) +} + +@test "upgrade multiple items" { + hub_inject_v0 + + install_v0 parsers crowdsecurity/whitelists + rune -0 cscli parsers inspect crowdsecurity/whitelists -o json + rune -0 jq -e '.local_version=="0.0"' <(output) + latest_whitelists=$(get_latest_version parsers crowdsecurity/whitelists) + + install_v0 parsers crowdsecurity/sshd-logs + rune -0 cscli parsers inspect crowdsecurity/sshd-logs -o json + rune -0 jq -e '.local_version=="0.0"' <(output) + latest_sshd=$(get_latest_version parsers crowdsecurity/sshd-logs) + + rune -0 cscli parsers upgrade crowdsecurity/whitelists crowdsecurity/sshd-logs --dry-run + assert_output - <<-EOT + Action plan: + 📥 download + parsers: crowdsecurity/sshd-logs (0.0 -> $latest_sshd), crowdsecurity/whitelists (0.0 -> $latest_whitelists) + + Dry run, no action taken. + EOT + refute_stderr + + rune -0 cscli parsers upgrade crowdsecurity/whitelists crowdsecurity/sshd-logs + assert_output - <<-EOT + downloading parsers:crowdsecurity/whitelists + downloading parsers:crowdsecurity/sshd-logs + + $RELOAD_MESSAGE + EOT + refute_stderr + + rune -0 cscli parsers inspect crowdsecurity/whitelists -o json + rune -0 jq -e '.local_version==.version' <(output) + + rune -0 cscli parsers inspect crowdsecurity/sshd-logs -o json + rune -0 jq -e '.local_version==.version' <(output) +} + +@test "upgrade all items of the same type" { + hub_inject_v0 + + install_v0 parsers crowdsecurity/whitelists + install_v0 parsers crowdsecurity/sshd-logs + install_v0 parsers crowdsecurity/windows-auth + + rune -0 cscli parsers upgrade --all + assert_output - <<-EOT + downloading parsers:crowdsecurity/sshd-logs + downloading parsers:crowdsecurity/whitelists + downloading parsers:crowdsecurity/windows-auth + + $RELOAD_MESSAGE + EOT + refute_stderr + + rune -0 cscli parsers inspect crowdsecurity/whitelists -o json + rune -0 jq -e '.local_version==.version' <(output) + + rune -0 cscli parsers inspect crowdsecurity/sshd-logs -o json + rune -0 jq -e '.local_version==.version' <(output) + + rune -0 cscli parsers inspect crowdsecurity/windows-auth -o json + rune -0 jq -e '.local_version==.version' <(output) +} + +@test "upgrade an item (autocomplete)" { + rune -0 cscli parsers install crowdsecurity/whitelists + rune -0 cscli __complete parsers upgrade crowd + assert_stderr --partial '[Debug] parsers: [crowdsecurity/whitelists]' + assert_output --partial 'crowdsecurity/whitelists' +} + diff --git a/test/bats/cscli-parsers.bats b/test/bats/cscli-parsers.bats new file mode 100644 index 00000000000..6ff138e9fd8 --- /dev/null +++ b/test/bats/cscli-parsers.bats @@ -0,0 +1,44 @@ +#!/usr/bin/env bats + +# Tests for the "cscli parsers" behavior that is not covered by cscli-hubtype-*.bats + +set -u + +setup_file() { + load "../lib/setup_file.sh" + ./instance-data load + HUB_DIR=$(config_get '.config_paths.hub_dir') + export HUB_DIR + INDEX_PATH=$(config_get '.config_paths.index_path') + export INDEX_PATH + CONFIG_DIR=$(config_get '.config_paths.config_dir') + export CONFIG_DIR +} + +teardown_file() { + load "../lib/teardown_file.sh" +} + +setup() { + load "../lib/setup.sh" + load "../lib/bats-file/load.bash" + ./instance-data load +} + +teardown() { + ./instance-crowdsec stop +} + +#---------- + +@test "cscli parsers inspect (includes the stage attribute)" { + rune -0 cscli parsers inspect crowdsecurity/sshd-logs --no-metrics -o human + assert_line 'stage: s01-parse' + + rune -0 cscli parsers inspect crowdsecurity/sshd-logs --no-metrics -o raw + assert_line 'stage: s01-parse' + + rune -0 cscli parsers inspect crowdsecurity/sshd-logs --no-metrics -o json + rune -0 jq -r '.stage' <(output) + assert_output 's01-parse' +} diff --git a/test/bats/cscli-postoverflows.bats b/test/bats/cscli-postoverflows.bats new file mode 100644 index 00000000000..979ee81defb --- /dev/null +++ b/test/bats/cscli-postoverflows.bats @@ -0,0 +1,44 @@ +#!/usr/bin/env bats + +# Tests for the "cscli postoverflows" behavior that is not covered by cscli-hubtype-*.bats + +set -u + +setup_file() { + load "../lib/setup_file.sh" + ./instance-data load + HUB_DIR=$(config_get '.config_paths.hub_dir') + export HUB_DIR + INDEX_PATH=$(config_get '.config_paths.index_path') + export INDEX_PATH + CONFIG_DIR=$(config_get '.config_paths.config_dir') + export CONFIG_DIR +} + +teardown_file() { + load "../lib/teardown_file.sh" +} + +setup() { + load "../lib/setup.sh" + load "../lib/bats-file/load.bash" + ./instance-data load +} + +teardown() { + ./instance-crowdsec stop +} + +#---------- + +@test "cscli postoverflows inspect (includes the stage attribute)" { + rune -0 cscli postoverflows inspect crowdsecurity/rdns --no-metrics -o human + assert_line 'stage: s00-enrich' + + rune -0 cscli postoverflows inspect crowdsecurity/rdns --no-metrics -o raw + assert_line 'stage: s00-enrich' + + rune -0 cscli postoverflows inspect crowdsecurity/rdns --no-metrics -o json + rune -0 jq -r '.stage' <(output) + assert_output 's00-enrich' +} diff --git a/test/bats/hub-index.bats b/test/bats/hub-index.bats new file mode 100644 index 00000000000..76759991e4a --- /dev/null +++ b/test/bats/hub-index.bats @@ -0,0 +1,357 @@ +#!/usr/bin/env bats + +set -u + +setup_file() { + load "../lib/setup_file.sh" + ./instance-data load + INDEX_PATH=$(config_get '.config_paths.index_path') + export INDEX_PATH +} + +teardown_file() { + load "../lib/teardown_file.sh" +} + +setup() { + load "../lib/setup.sh" + load "../lib/bats-file/load.bash" + ./instance-data load +} + +teardown() { + ./instance-crowdsec stop +} + +#---------- + +@test "malformed index - null item" { + yq -o json >"$INDEX_PATH" <<-'EOF' + parsers: + author/pars1: + EOF + + rune -1 cscli hub list + assert_stderr --partial "failed to read hub index: parsers:author/pars1 has no index metadata." +} + +@test "malformed index - no download path" { + yq -o json >"$INDEX_PATH" <<-'EOF' + parsers: + author/pars1: + version: "0.0" + versions: + 0.0: + digest: daa1832414a685d69269e0ae15024b908f4602db45f9900e9c6e7f204af207c0 + EOF + + rune -1 cscli hub list + assert_stderr --partial "failed to read hub index: parsers:author/pars1 has no download path." +} + +@test "malformed parser - no stage" { + # Installing a parser requires a stage directory + yq -o json >"$INDEX_PATH" <<-'EOF' + parsers: + author/pars1: + path: parsers/s01-parse/author/pars1.yaml + version: "0.0" + versions: + 0.0: + digest: 44136fa355b3678a1146ad16f7e8649e94fb4fc21fe77e8310c060f61caaff8a + content: "{}" + EOF + + rune -1 cscli hub list -o raw + assert_stderr --partial "failed to read hub index: parsers:author/pars1 has no stage." +} + +@test "malformed parser - short path" { + # Installing a parser requires a stage directory + yq -o json >"$INDEX_PATH" <<-'EOF' + parsers: + author/pars1: + path: parsers/s01-parse/pars1.yaml + stage: s01-parse + version: "0.0" + versions: + 0.0: + digest: 44136fa355b3678a1146ad16f7e8649e94fb4fc21fe77e8310c060f61caaff8a + content: "{}" + EOF + + rune -0 cscli hub list -o raw + rune -0 cscli parsers install author/pars1 + rune -0 cscli hub list + # XXX here the item is installed but won't work, we only have a warning + assert_stderr --partial 'Ignoring file' + assert_stderr --partial 'path is too short' +} + +@test "malformed item - not yaml" { + # Installing an item requires reading the list of data files + yq -o json >"$INDEX_PATH" <<-'EOF' + parsers: + author/pars1: + path: parsers/s01-parse/pars1.yaml + stage: s01-parse + version: "0.0" + versions: + 0.0: + digest: daa1832414a685d69269e0ae15024b908f4602db45f9900e9c6e7f204af207c0 + content: "v0.0" + EOF + + rune -0 cscli hub list -o raw + rune -1 cscli parsers install author/pars1 + assert_stderr --partial 'unmarshal errors' +} + +@test "malformed item - hash mismatch" { + yq -o json >"$INDEX_PATH" <<-'EOF' + parsers: + author/pars1: + path: parsers/s01-parse/pars1.yaml + stage: s01-parse + version: "0.0" + versions: + 0.0: + digest: "0000000000000000000000000000000000000000000000000000000000000000" + content: "v0.0" + EOF + + rune -0 cscli hub list -o raw + rune -1 cscli parsers install author/pars1 + assert_stderr --partial 'parsers:author/pars1: hash mismatch: expected 0000000000000000000000000000000000000000000000000000000000000000, got daa1832414a685d69269e0ae15024b908f4602db45f9900e9c6e7f204af207c0.' +} + +@test "install minimal item" { + yq -o json >"$INDEX_PATH" <<-'EOF' + parsers: + author/pars1: + path: parsers/s01-parse/pars1.yaml + stage: s01-parse + version: "0.0" + versions: + 0.0: + digest: 44136fa355b3678a1146ad16f7e8649e94fb4fc21fe77e8310c060f61caaff8a + content: "{}" + EOF + + rune -0 cscli hub list -o raw + rune -0 cscli parsers install author/pars1 + assert_line "downloading parsers:author/pars1" + assert_line "enabling parsers:author/pars1" + rune -0 cscli hub list +} + +@test "replace an item in a collection update" { + # A new version of coll1 will uninstall pars1 and install pars2. + yq -o json >"$INDEX_PATH" <<-'EOF' + collections: + author/coll1: + path: collections/author/coll1.yaml + version: "0.0" + versions: + 0.0: + digest: 801e11865f8fdf82a348e70fe3f568af190715c40a176e058da2ad21ff5e20be + content: "{'parsers': ['author/pars1']}" + parsers: + - author/pars1 + parsers: + author/pars1: + path: parsers/s01-parse/author/pars1.yaml + stage: s01-parse + version: "0.0" + versions: + 0.0: + digest: 44136fa355b3678a1146ad16f7e8649e94fb4fc21fe77e8310c060f61caaff8a + content: "{}" + author/pars2: + path: parsers/s01-parse/author/pars2.yaml + stage: s01-parse + version: "0.0" + versions: + 0.0: + digest: 44136fa355b3678a1146ad16f7e8649e94fb4fc21fe77e8310c060f61caaff8a + content: "{}" + EOF + + rune -0 cscli hub list + rune -0 cscli collections install author/coll1 + + yq -o json >"$INDEX_PATH" <<-'EOF' + collections: + author/coll1: + path: collections/author/coll1.yaml + version: "0.1" + versions: + 0.0: + digest: 801e11865f8fdf82a348e70fe3f568af190715c40a176e058da2ad21ff5e20be + 0.1: + digest: f3c535c2d01abec5aadbb5ce03c357a478d91b116410c9fee288e073cd34c0dd + content: "{'parsers': ['author/pars2']}" + parsers: + - author/pars2 + parsers: + author/pars1: + path: parsers/s01-parse/author/pars1.yaml + stage: s01-parse + version: "0.0" + versions: + 0.0: + digest: 44136fa355b3678a1146ad16f7e8649e94fb4fc21fe77e8310c060f61caaff8a + content: "{}" + author/pars2: + path: parsers/s01-parse/author/pars2.yaml + stage: s01-parse + version: "0.0" + versions: + 0.0: + digest: 44136fa355b3678a1146ad16f7e8649e94fb4fc21fe77e8310c060f61caaff8a + content: "{}" + EOF + + rune -0 cscli hub list -o raw + rune -0 cscli collections upgrade author/coll1 + assert_output - <<-EOT + downloading parsers:author/pars2 + enabling parsers:author/pars2 + disabling parsers:author/pars1 + downloading collections:author/coll1 + + $RELOAD_MESSAGE + EOT + + rune -0 cscli hub list -o raw + assert_output - <<-EOT + name,status,version,description,type + author/pars2,enabled,0.0,,parsers + author/coll1,enabled,0.1,,collections + EOT +} + +@test "replace an outdated item only if it's not used elsewhere" { + # XXX + skip "not implemented" + # A new version of coll1 will uninstall pars1 and install pars2. + # Pars3 will not be uninstalled because it's still required by coll2. + yq -o json >"$INDEX_PATH" <<-'EOF' + collections: + author/coll1: + path: collections/author/coll1.yaml + version: "0.0" + versions: + 0.0: + digest: 0c397c7b3e19d730578932fdc260c53f39bd2488fad87207ab6b7e4dc315b067 + content: "{'parsers': ['author/pars1', 'author/pars3']}" + parsers: + - author/pars1 + - author/pars3 + author/coll2: + path: collections/author/coll2.yaml + version: "0.0" + versions: + 0.0: + digest: 96df483ff697d4d214792b135a3ba5ddaca0ebfd856e7da89215926394ac4001 + content: "{'parsers': ['author/pars3']}" + parsers: + - author/pars3 + parsers: + author/pars1: + path: parsers/s01-parse/author/pars1.yaml + stage: s01-parse + version: "0.0" + versions: + 0.0: + digest: 44136fa355b3678a1146ad16f7e8649e94fb4fc21fe77e8310c060f61caaff8a + content: "{}" + author/pars2: + path: parsers/s01-parse/author/pars2.yaml + stage: s01-parse + version: "0.0" + versions: + 0.0: + digest: 44136fa355b3678a1146ad16f7e8649e94fb4fc21fe77e8310c060f61caaff8a + content: "{}" + author/pars3: + path: parsers/s01-parse/author/pars3.yaml + stage: s01-parse + version: "0.0" + versions: + 0.0: + digest: 44136fa355b3678a1146ad16f7e8649e94fb4fc21fe77e8310c060f61caaff8a + content: "{}" + EOF + + rune -0 cscli hub list + rune -0 cscli collections install author/coll1 author/coll2 + + yq -o json >"$INDEX_PATH" <<-'EOF' + collections: + author/coll1: + path: collections/author/coll1.yaml + version: "0.1" + versions: + 0.0: + digest: 0c397c7b3e19d730578932fdc260c53f39bd2488fad87207ab6b7e4dc315b067 + 0.1: + digest: f3c535c2d01abec5aadbb5ce03c357a478d91b116410c9fee288e073cd34c0dd + content: "{'parsers': ['author/pars2']}" + parsers: + - author/pars2 + author/coll2: + path: collections/author/coll2.yaml + version: "0.0" + versions: + 0.0: + digest: 96df483ff697d4d214792b135a3ba5ddaca0ebfd856e7da89215926394ac4001 + content: "{'parsers': ['author/pars3']}" + parsers: + - author/pars3 + parsers: + author/pars1: + path: parsers/s01-parse/author/pars1.yaml + stage: s01-parse + version: "0.0" + versions: + 0.0: + digest: 44136fa355b3678a1146ad16f7e8649e94fb4fc21fe77e8310c060f61caaff8a + content: "{}" + author/pars2: + path: parsers/s01-parse/author/pars2.yaml + stage: s01-parse + version: "0.0" + versions: + 0.0: + digest: 44136fa355b3678a1146ad16f7e8649e94fb4fc21fe77e8310c060f61caaff8a + content: "{}" + author/pars3: + path: parsers/s01-parse/author/pars3.yaml + stage: s01-parse + version: "0.0" + versions: + 0.0: + digest: 44136fa355b3678a1146ad16f7e8649e94fb4fc21fe77e8310c060f61caaff8a + content: "{}" + EOF + + rune -0 cscli hub list -o raw + rune -0 cscli collections upgrade author/coll1 + assert_output - <<-EOT + downloading parsers:author/pars2 + enabling parsers:author/pars2 + disabling parsers:author/pars1 + downloading collections:author/coll1 + + $RELOAD_MESSAGE + EOT + + rune -0 cscli hub list -o raw + assert_output - <<-EOT + name,status,version,description,type + author/pars2,enabled,0.0,,parsers + author/pars3,enabled,0.0,,parsers + author/coll1,enabled,0.1,,collections + EOT +} diff --git a/test/bin/remove-all-hub-items b/test/bin/remove-all-hub-items index 981602b775a..b5d611782ff 100755 --- a/test/bin/remove-all-hub-items +++ b/test/bin/remove-all-hub-items @@ -14,7 +14,7 @@ echo "Pre-downloading Hub content..." types=$("$CSCLI" hub types -o raw) for itemtype in $types; do - "$CSCLI" "$itemtype" remove --all --force + "$CSCLI" "$itemtype" remove --all --force --purge --yes done echo " done." diff --git a/test/lib/config/config-local b/test/lib/config/config-local index 3e3c806b616..4f3ec7cc2ae 100755 --- a/test/lib/config/config-local +++ b/test/lib/config/config-local @@ -117,7 +117,7 @@ make_init_data() { "$CSCLI" --warning hub update --with-content # preload some content and data files - "$CSCLI" collections install crowdsecurity/linux --download-only + "$CSCLI" collections install crowdsecurity/linux --download-only --yes # sub-items did not respect --download-only ./bin/remove-all-hub-items diff --git a/test/lib/setup_file.sh b/test/lib/setup_file.sh index 39a084596e2..902edc5de82 100755 --- a/test/lib/setup_file.sh +++ b/test/lib/setup_file.sh @@ -260,16 +260,6 @@ hub_purge_all() { } export -f hub_purge_all -# remove unused data from the index, to make sure we don't rely on it in any way -hub_strip_index() { - local INDEX - INDEX=$(config_get .config_paths.index_path) - local hub_min - hub_min=$(jq <"$INDEX" 'del(..|.long_description?) | del(..|.deprecated?) | del (..|.labels?)') - echo "$hub_min" >"$INDEX" -} -export -f hub_strip_index - # remove color and style sequences from stdin plaintext() { sed -E 's/\x1B\[[0-9;]*[JKmsu]//g' @@ -340,3 +330,17 @@ lp-get-token() { echo "$resp" | yq -r '.token' } export -f lp-get-token + +case $(uname) in + "Linux") + # shellcheck disable=SC2089 + RELOAD_MESSAGE="Run 'sudo systemctl reload crowdsec' for the new configuration to be effective." + ;; + *) + # shellcheck disable=SC2089 + RELOAD_MESSAGE="Run 'sudo service crowdsec reload' for the new configuration to be effective." + ;; +esac + +# shellcheck disable=SC2090 +export RELOAD_MESSAGE From fc17c0c61341bff22a18d6e0bfeb567b3cad672b Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Fri, 27 Dec 2024 10:40:50 +0100 Subject: [PATCH 374/581] lint: replace type assertions and type switch on errors (#3376) * errorlint: replace type assertions on errors * errorlint: replace type switch on errors * lint --- .golangci.yml | 10 +- cmd/crowdsec/win_service.go | 2 +- pkg/acquisition/acquisition.go | 3 +- pkg/acquisition/modules/kinesis/kinesis.go | 128 ++++++++++++++++----- pkg/apiclient/client_http.go | 9 +- pkg/apiserver/apiserver.go | 42 ++++--- pkg/database/alerts.go | 3 +- 7 files changed, 132 insertions(+), 65 deletions(-) diff --git a/.golangci.yml b/.golangci.yml index b51f17df489..deb073c2eea 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -118,7 +118,7 @@ linters-settings: arguments: [6] - name: function-length # lower this after refactoring - arguments: [110, 237] + arguments: [111, 238] - name: get-return disabled: true - name: increment-decrement @@ -333,14 +333,6 @@ issues: - errorlint text: "non-wrapping format verb for fmt.Errorf. Use `%w` to format errors" - - linters: - - errorlint - text: "type assertion on error will fail on wrapped errors. Use errors.As to check for specific errors" - - - linters: - - errorlint - text: "type switch on error will fail on wrapped errors. Use errors.As to check for specific errors" - - linters: - nosprintfhostport text: "host:port in url should be constructed with net.JoinHostPort and not directly with fmt.Sprintf" diff --git a/cmd/crowdsec/win_service.go b/cmd/crowdsec/win_service.go index 6aa363ca3a7..ae48e77447c 100644 --- a/cmd/crowdsec/win_service.go +++ b/cmd/crowdsec/win_service.go @@ -67,7 +67,7 @@ func runService(name string) error { // All the calls to logging before the logger is configured are pretty much useless, but we keep them for clarity err := eventlog.InstallAsEventCreate("CrowdSec", eventlog.Error|eventlog.Warning|eventlog.Info) if err != nil { - if errno, ok := err.(syscall.Errno); ok { + if errno, ok := err.(syscall.Errno); ok { //nolint:errorlint if errno == windows.ERROR_ACCESS_DENIED { log.Warnf("Access denied when installing event source, running as non-admin ?") } else { diff --git a/pkg/acquisition/acquisition.go b/pkg/acquisition/acquisition.go index 4e233aad616..291bc369c3e 100644 --- a/pkg/acquisition/acquisition.go +++ b/pkg/acquisition/acquisition.go @@ -328,7 +328,8 @@ func GetMetrics(sources []DataSource, aggregated bool) error { for _, metric := range metrics { if err := prometheus.Register(metric); err != nil { - if _, ok := err.(prometheus.AlreadyRegisteredError); !ok { + var alreadyRegisteredErr prometheus.AlreadyRegisteredError + if !errors.As(err, &alreadyRegisteredErr) { return fmt.Errorf("could not register metrics for datasource %s: %w", sources[i].GetName(), err) } // ignore the error diff --git a/pkg/acquisition/modules/kinesis/kinesis.go b/pkg/acquisition/modules/kinesis/kinesis.go index 3744e43f38d..b166a706ca9 100644 --- a/pkg/acquisition/modules/kinesis/kinesis.go +++ b/pkg/acquisition/modules/kinesis/kinesis.go @@ -99,17 +99,22 @@ func (k *KinesisSource) newClient() error { if sess == nil { return errors.New("failed to create aws session") } + config := aws.NewConfig() + if k.Config.AwsRegion != "" { config = config.WithRegion(k.Config.AwsRegion) } + if k.Config.AwsEndpoint != "" { config = config.WithEndpoint(k.Config.AwsEndpoint) } + k.kClient = kinesis.New(sess, config) if k.kClient == nil { return errors.New("failed to create kinesis client") } + return nil } @@ -136,15 +141,19 @@ func (k *KinesisSource) UnmarshalConfig(yamlConfig []byte) error { if k.Config.StreamName == "" && !k.Config.UseEnhancedFanOut { return errors.New("stream_name is mandatory when use_enhanced_fanout is false") } + if k.Config.StreamARN == "" && k.Config.UseEnhancedFanOut { return errors.New("stream_arn is mandatory when use_enhanced_fanout is true") } + if k.Config.ConsumerName == "" && k.Config.UseEnhancedFanOut { return errors.New("consumer_name is mandatory when use_enhanced_fanout is true") } + if k.Config.StreamARN != "" && k.Config.StreamName != "" { return errors.New("stream_arn and stream_name are mutually exclusive") } + if k.Config.MaxRetries <= 0 { k.Config.MaxRetries = 10 } @@ -167,6 +176,7 @@ func (k *KinesisSource) Configure(yamlConfig []byte, logger *log.Entry, MetricsL } k.shardReaderTomb = &tomb.Tomb{} + return nil } @@ -188,22 +198,27 @@ func (k *KinesisSource) OneShotAcquisition(_ context.Context, _ chan types.Event func (k *KinesisSource) decodeFromSubscription(record []byte) ([]CloudwatchSubscriptionLogEvent, error) { b := bytes.NewBuffer(record) + r, err := gzip.NewReader(b) if err != nil { k.logger.Error(err) return nil, err } + decompressed, err := io.ReadAll(r) if err != nil { k.logger.Error(err) return nil, err } + var subscriptionRecord CloudWatchSubscriptionRecord + err = json.Unmarshal(decompressed, &subscriptionRecord) if err != nil { k.logger.Error(err) return nil, err } + return subscriptionRecord.LogEvents, nil } @@ -214,17 +229,20 @@ func (k *KinesisSource) WaitForConsumerDeregistration(consumerName string, strea ConsumerName: aws.String(consumerName), StreamARN: aws.String(streamARN), }) + + var resourceNotFoundErr *kinesis.ResourceNotFoundException + if errors.As(err, &resourceNotFoundErr) { + return nil + } + if err != nil { - switch err.(type) { - case *kinesis.ResourceNotFoundException: - return nil - default: - k.logger.Errorf("Error while waiting for consumer deregistration: %s", err) - return fmt.Errorf("cannot describe stream consumer: %w", err) - } + k.logger.Errorf("Error while waiting for consumer deregistration: %s", err) + return fmt.Errorf("cannot describe stream consumer: %w", err) } + time.Sleep(time.Millisecond * 200 * time.Duration(i+1)) } + return fmt.Errorf("consumer %s is not deregistered after %d tries", consumerName, maxTries) } @@ -234,17 +252,21 @@ func (k *KinesisSource) DeregisterConsumer() error { ConsumerName: aws.String(k.Config.ConsumerName), StreamARN: aws.String(k.Config.StreamARN), }) + + var resourceNotFoundErr *kinesis.ResourceNotFoundException + if errors.As(err, &resourceNotFoundErr) { + return nil + } + if err != nil { - switch err.(type) { - case *kinesis.ResourceNotFoundException: - default: - return fmt.Errorf("cannot deregister stream consumer: %w", err) - } + return fmt.Errorf("cannot deregister stream consumer: %w", err) } + err = k.WaitForConsumerDeregistration(k.Config.ConsumerName, k.Config.StreamARN) if err != nil { return fmt.Errorf("cannot wait for consumer deregistration: %w", err) } + return nil } @@ -257,18 +279,22 @@ func (k *KinesisSource) WaitForConsumerRegistration(consumerARN string) error { if err != nil { return fmt.Errorf("cannot describe stream consumer: %w", err) } + if *describeOutput.ConsumerDescription.ConsumerStatus == "ACTIVE" { k.logger.Debugf("Consumer %s is active", consumerARN) return nil } + time.Sleep(time.Millisecond * 200 * time.Duration(i+1)) k.logger.Debugf("Waiting for consumer registration %d", i) } + return fmt.Errorf("consumer %s is not active after %d tries", consumerARN, maxTries) } func (k *KinesisSource) RegisterConsumer() (*kinesis.RegisterStreamConsumerOutput, error) { k.logger.Debugf("Registering consumer %s", k.Config.ConsumerName) + streamConsumer, err := k.kClient.RegisterStreamConsumer(&kinesis.RegisterStreamConsumerInput{ ConsumerName: aws.String(k.Config.ConsumerName), StreamARN: aws.String(k.Config.StreamARN), @@ -276,10 +302,12 @@ func (k *KinesisSource) RegisterConsumer() (*kinesis.RegisterStreamConsumerOutpu if err != nil { return nil, fmt.Errorf("cannot register stream consumer: %w", err) } + err = k.WaitForConsumerRegistration(*streamConsumer.Consumer.ConsumerARN) if err != nil { return nil, fmt.Errorf("timeout while waiting for consumer to be active: %w", err) } + return streamConsumer, nil } @@ -296,8 +324,12 @@ func (k *KinesisSource) ParseAndPushRecords(records []*kinesis.Record, out chan linesRead.With(prometheus.Labels{"stream": k.Config.StreamName}).Inc() } } - var data []CloudwatchSubscriptionLogEvent - var err error + + var ( + data []CloudwatchSubscriptionLogEvent + err error + ) + if k.Config.FromSubscription { // The AWS docs says that the data is base64 encoded // but apparently GetRecords decodes it for us ? @@ -309,19 +341,22 @@ func (k *KinesisSource) ParseAndPushRecords(records []*kinesis.Record, out chan } else { data = []CloudwatchSubscriptionLogEvent{{Message: string(record.Data)}} } + for _, event := range data { logger.Tracef("got record %s", event.Message) + l := types.Line{} l.Raw = event.Message l.Labels = k.Config.Labels l.Time = time.Now().UTC() l.Process = true l.Module = k.GetName() - if k.Config.StreamARN != "" { - l.Src = k.Config.StreamARN - } else { + + l.Src = k.Config.StreamARN + if l.Src == "" { l.Src = k.Config.StreamName } + evt := types.MakeEvent(k.Config.UseTimeMachine, types.LOG, true) evt.Line = l out <- evt @@ -335,20 +370,23 @@ func (k *KinesisSource) ReadFromSubscription(reader kinesis.SubscribeToShardEven // and we won't be able to start a new one if this is the first one started by the tomb // TODO: look into parent shards to see if a shard is closed before starting to read it ? time.Sleep(time.Second) + for { select { case <-k.shardReaderTomb.Dying(): logger.Infof("Subscribed shard reader is dying") - err := reader.Close() - if err != nil { + + if err := reader.Close(); err != nil { return fmt.Errorf("cannot close kinesis subscribed shard reader: %w", err) } + return nil case event, ok := <-reader.Events(): if !ok { logger.Infof("Event chan has been closed") return nil } + switch event := event.(type) { case *kinesis.SubscribeToShardEvent: k.ParseAndPushRecords(event.Records, out, logger, shardId) @@ -369,6 +407,7 @@ func (k *KinesisSource) SubscribeToShards(arn arn.ARN, streamConsumer *kinesis.R for _, shard := range shards.Shards { shardId := *shard.ShardId + r, err := k.kClient.SubscribeToShard(&kinesis.SubscribeToShardInput{ ShardId: aws.String(shardId), StartingPosition: &kinesis.StartingPosition{Type: aws.String(kinesis.ShardIteratorTypeLatest)}, @@ -377,10 +416,12 @@ func (k *KinesisSource) SubscribeToShards(arn arn.ARN, streamConsumer *kinesis.R if err != nil { return fmt.Errorf("cannot subscribe to shard: %w", err) } + k.shardReaderTomb.Go(func() error { return k.ReadFromSubscription(r.GetEventStream().Reader, out, shardId, arn.Resource[7:]) }) } + return nil } @@ -389,12 +430,14 @@ func (k *KinesisSource) EnhancedRead(out chan types.Event, t *tomb.Tomb) error { if err != nil { return fmt.Errorf("cannot parse stream ARN: %w", err) } + if !strings.HasPrefix(parsedARN.Resource, "stream/") { return fmt.Errorf("resource part of stream ARN %s does not start with stream/", k.Config.StreamARN) } k.logger = k.logger.WithField("stream", parsedARN.Resource[7:]) k.logger.Info("starting kinesis acquisition with enhanced fan-out") + err = k.DeregisterConsumer() if err != nil { return fmt.Errorf("cannot deregister consumer: %w", err) @@ -417,18 +460,22 @@ func (k *KinesisSource) EnhancedRead(out chan types.Event, t *tomb.Tomb) error { k.logger.Infof("Kinesis source is dying") k.shardReaderTomb.Kill(nil) _ = k.shardReaderTomb.Wait() // we don't care about the error as we kill the tomb ourselves + err = k.DeregisterConsumer() if err != nil { return fmt.Errorf("cannot deregister consumer: %w", err) } + return nil case <-k.shardReaderTomb.Dying(): k.logger.Debugf("Kinesis subscribed shard reader is dying") + if k.shardReaderTomb.Err() != nil { return k.shardReaderTomb.Err() } // All goroutines have exited without error, so a resharding event, start again k.logger.Debugf("All reader goroutines have exited, resharding event or periodic resubscribe") + continue } } @@ -437,6 +484,7 @@ func (k *KinesisSource) EnhancedRead(out chan types.Event, t *tomb.Tomb) error { func (k *KinesisSource) ReadFromShard(out chan types.Event, shardId string) error { logger := k.logger.WithField("shard", shardId) logger.Debugf("Starting to read shard") + sharIt, err := k.kClient.GetShardIterator(&kinesis.GetShardIteratorInput{ ShardId: aws.String(shardId), StreamName: &k.Config.StreamName, @@ -446,28 +494,35 @@ func (k *KinesisSource) ReadFromShard(out chan types.Event, shardId string) erro logger.Errorf("Cannot get shard iterator: %s", err) return fmt.Errorf("cannot get shard iterator: %w", err) } + it := sharIt.ShardIterator // AWS recommends to wait for a second between calls to GetRecords for a given shard ticker := time.NewTicker(time.Second) + for { select { case <-ticker.C: records, err := k.kClient.GetRecords(&kinesis.GetRecordsInput{ShardIterator: it}) it = records.NextShardIterator + + var throughputErr *kinesis.ProvisionedThroughputExceededException + if errors.As(err, &throughputErr) { + logger.Warn("Provisioned throughput exceeded") + // TODO: implement exponential backoff + continue + } + + var expiredIteratorErr *kinesis.ExpiredIteratorException + if errors.As(err, &expiredIteratorErr) { + logger.Warn("Expired iterator") + continue + } + if err != nil { - switch err.(type) { - case *kinesis.ProvisionedThroughputExceededException: - logger.Warn("Provisioned throughput exceeded") - // TODO: implement exponential backoff - continue - case *kinesis.ExpiredIteratorException: - logger.Warn("Expired iterator") - continue - default: - logger.Error("Cannot get records") - return fmt.Errorf("cannot get records: %w", err) - } + logger.Error("Cannot get records") + return fmt.Errorf("cannot get records: %w", err) } + k.ParseAndPushRecords(records.Records, out, logger, shardId) if it == nil { @@ -477,6 +532,7 @@ func (k *KinesisSource) ReadFromShard(out chan types.Event, shardId string) erro case <-k.shardReaderTomb.Dying(): logger.Infof("shardReaderTomb is dying, exiting ReadFromShard") ticker.Stop() + return nil } } @@ -485,6 +541,7 @@ func (k *KinesisSource) ReadFromShard(out chan types.Event, shardId string) erro func (k *KinesisSource) ReadFromStream(out chan types.Event, t *tomb.Tomb) error { k.logger = k.logger.WithField("stream", k.Config.StreamName) k.logger.Info("starting kinesis acquisition from shards") + for { shards, err := k.kClient.ListShards(&kinesis.ListShardsInput{ StreamName: aws.String(k.Config.StreamName), @@ -492,9 +549,12 @@ func (k *KinesisSource) ReadFromStream(out chan types.Event, t *tomb.Tomb) error if err != nil { return fmt.Errorf("cannot list shards: %w", err) } + k.shardReaderTomb = &tomb.Tomb{} + for _, shard := range shards.Shards { shardId := *shard.ShardId + k.shardReaderTomb.Go(func() error { defer trace.CatchPanic("crowdsec/acquis/kinesis/streaming/shard") return k.ReadFromShard(out, shardId) @@ -505,6 +565,7 @@ func (k *KinesisSource) ReadFromStream(out chan types.Event, t *tomb.Tomb) error k.logger.Info("kinesis source is dying") k.shardReaderTomb.Kill(nil) _ = k.shardReaderTomb.Wait() // we don't care about the error as we kill the tomb ourselves + return nil case <-k.shardReaderTomb.Dying(): reason := k.shardReaderTomb.Err() @@ -512,7 +573,9 @@ func (k *KinesisSource) ReadFromStream(out chan types.Event, t *tomb.Tomb) error k.logger.Errorf("Unexpected error from shard reader : %s", reason) return reason } + k.logger.Infof("All shards have been closed, probably a resharding event, restarting acquisition") + continue } } @@ -521,11 +584,14 @@ func (k *KinesisSource) ReadFromStream(out chan types.Event, t *tomb.Tomb) error func (k *KinesisSource) StreamingAcquisition(ctx context.Context, out chan types.Event, t *tomb.Tomb) error { t.Go(func() error { defer trace.CatchPanic("crowdsec/acquis/kinesis/streaming") + if k.Config.UseEnhancedFanOut { return k.EnhancedRead(out, t) } + return k.ReadFromStream(out, t) }) + return nil } diff --git a/pkg/apiclient/client_http.go b/pkg/apiclient/client_http.go index eeca929ea6e..c64404dc7ee 100644 --- a/pkg/apiclient/client_http.go +++ b/pkg/apiclient/client_http.go @@ -78,10 +78,11 @@ func (c *ApiClient) Do(ctx context.Context, req *http.Request, v interface{}) (* } // If the error type is *url.Error, sanitize its URL before returning. - if e, ok := err.(*url.Error); ok { - if url, err := url.Parse(e.URL); err == nil { - e.URL = url.String() - return newResponse(resp), e + var urlErr *url.Error + if errors.As(err, &urlErr) { + if parsedURL, parseErr := url.Parse(urlErr.URL); parseErr == nil { + urlErr.URL = parsedURL.String() + return newResponse(resp), urlErr } return newResponse(resp), err diff --git a/pkg/apiserver/apiserver.go b/pkg/apiserver/apiserver.go index e1d9ce95349..88f1bd21dc4 100644 --- a/pkg/apiserver/apiserver.go +++ b/pkg/apiserver/apiserver.go @@ -46,10 +46,18 @@ type APIServer struct { consoleConfig *csconfig.ConsoleConfig } -func isBrokenConnection(err any) bool { - if ne, ok := err.(*net.OpError); ok { - if se, ok := ne.Err.(*os.SyscallError); ok { - if strings.Contains(strings.ToLower(se.Error()), "broken pipe") || strings.Contains(strings.ToLower(se.Error()), "connection reset by peer") { +func isBrokenConnection(maybeError any) bool { + err, ok := maybeError.(error) + if !ok { + return false + } + + var netOpError *net.OpError + if errors.As(err, &netOpError) { + var syscallError *os.SyscallError + if errors.As(netOpError.Err, &syscallError) { + if strings.Contains(strings.ToLower(syscallError.Error()), "broken pipe") || + strings.Contains(strings.ToLower(syscallError.Error()), "connection reset by peer") { return true } } @@ -57,21 +65,19 @@ func isBrokenConnection(err any) bool { // because of https://github.com/golang/net/blob/39120d07d75e76f0079fe5d27480bcb965a21e4c/http2/server.go // and because it seems gin doesn't handle those neither, we need to "hand define" some errors to properly catch them - if strErr, ok := err.(error); ok { - // stolen from http2/server.go in x/net - var ( - errClientDisconnected = errors.New("client disconnected") - errClosedBody = errors.New("body closed by handler") - errHandlerComplete = errors.New("http2: request body closed due to handler exiting") - errStreamClosed = errors.New("http2: stream closed") - ) + // stolen from http2/server.go in x/net + var ( + errClientDisconnected = errors.New("client disconnected") + errClosedBody = errors.New("body closed by handler") + errHandlerComplete = errors.New("http2: request body closed due to handler exiting") + errStreamClosed = errors.New("http2: stream closed") + ) - if errors.Is(strErr, errClientDisconnected) || - errors.Is(strErr, errClosedBody) || - errors.Is(strErr, errHandlerComplete) || - errors.Is(strErr, errStreamClosed) { - return true - } + if errors.Is(err, errClientDisconnected) || + errors.Is(err, errClosedBody) || + errors.Is(err, errHandlerComplete) || + errors.Is(err, errStreamClosed) { + return true } return false diff --git a/pkg/database/alerts.go b/pkg/database/alerts.go index 4e3f209b012..107abcbb1d0 100644 --- a/pkg/database/alerts.go +++ b/pkg/database/alerts.go @@ -642,7 +642,8 @@ func (c *Client) createAlertChunk(ctx context.Context, machineID string, owner * break } - if sqliteErr, ok := err.(sqlite3.Error); ok { + var sqliteErr sqlite3.Error + if errors.As(err, &sqliteErr) { if sqliteErr.Code == sqlite3.ErrBusy { // sqlite3.Error{ // Code: 5, From 78f4b85311462e8f25aa79c32aa26310d38dfe00 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Fri, 27 Dec 2024 11:49:14 +0100 Subject: [PATCH 375/581] pkg/cwhub - refact Downloader (#3382) * pkg/cwhub - refact Downloader * single method interfaces * lint --- cmd/crowdsec-cli/clicapi/capi.go | 2 +- cmd/crowdsec-cli/cliconsole/console.go | 2 +- cmd/crowdsec-cli/clihub/hub.go | 20 +-- cmd/crowdsec-cli/cliitem/item.go | 53 ++++---- cmd/crowdsec-cli/cliitem/suggest.go | 4 +- cmd/crowdsec-cli/clilapi/context.go | 6 +- cmd/crowdsec-cli/clilapi/status.go | 2 +- cmd/crowdsec-cli/clisetup/setup.go | 8 +- cmd/crowdsec-cli/clisimulation/simulation.go | 2 +- cmd/crowdsec-cli/clisupport/support.go | 4 +- cmd/crowdsec-cli/config_backup.go | 2 +- cmd/crowdsec-cli/config_restore.go | 6 +- cmd/crowdsec-cli/require/require.go | 9 +- cmd/crowdsec/serve.go | 4 +- go.mod | 2 +- go.sum | 6 +- pkg/cwhub/cwhub_test.go | 16 +-- pkg/cwhub/doc.go | 11 +- pkg/cwhub/download.go | 124 +++++++++++++++++++ pkg/cwhub/download_test.go | 50 ++++++++ pkg/cwhub/errors.go | 19 --- pkg/cwhub/fetch.go | 44 +------ pkg/cwhub/hub.go | 11 +- pkg/cwhub/hub_test.go | 35 +++--- pkg/cwhub/itemupgrade_test.go | 8 +- pkg/cwhub/remote.go | 87 ------------- pkg/hubops/download.go | 16 +-- pkg/hubtest/hubtest.go | 8 +- pkg/hubtest/hubtest_item.go | 2 +- pkg/leakybucket/buckets_test.go | 3 +- pkg/setup/install.go | 14 ++- 31 files changed, 307 insertions(+), 273 deletions(-) create mode 100644 pkg/cwhub/download.go create mode 100644 pkg/cwhub/download_test.go delete mode 100644 pkg/cwhub/errors.go delete mode 100644 pkg/cwhub/remote.go diff --git a/cmd/crowdsec-cli/clicapi/capi.go b/cmd/crowdsec-cli/clicapi/capi.go index 2cce01c7d3e..120acca8b59 100644 --- a/cmd/crowdsec-cli/clicapi/capi.go +++ b/cmd/crowdsec-cli/clicapi/capi.go @@ -261,7 +261,7 @@ func (cli *cliCapi) newStatusCmd() *cobra.Command { Args: cobra.MinimumNArgs(0), DisableAutoGenTag: true, RunE: func(cmd *cobra.Command, _ []string) error { - hub, err := require.Hub(cli.cfg(), nil, nil) + hub, err := require.Hub(cli.cfg(), nil) if err != nil { return err } diff --git a/cmd/crowdsec-cli/cliconsole/console.go b/cmd/crowdsec-cli/cliconsole/console.go index 448ddcee7fa..dbbe2487cd4 100644 --- a/cmd/crowdsec-cli/cliconsole/console.go +++ b/cmd/crowdsec-cli/cliconsole/console.go @@ -114,7 +114,7 @@ func (cli *cliConsole) enroll(ctx context.Context, key string, name string, over } } - hub, err := require.Hub(cfg, nil, nil) + hub, err := require.Hub(cfg, nil) if err != nil { return err } diff --git a/cmd/crowdsec-cli/clihub/hub.go b/cmd/crowdsec-cli/clihub/hub.go index 49ccd761285..9571b3d866d 100644 --- a/cmd/crowdsec-cli/clihub/hub.go +++ b/cmd/crowdsec-cli/clihub/hub.go @@ -93,7 +93,7 @@ func (cli *cliHub) newListCmd() *cobra.Command { Args: cobra.NoArgs, DisableAutoGenTag: true, RunE: func(_ *cobra.Command, _ []string) error { - hub, err := require.Hub(cli.cfg(), nil, log.StandardLogger()) + hub, err := require.Hub(cli.cfg(), log.StandardLogger()) if err != nil { return err } @@ -110,15 +110,15 @@ func (cli *cliHub) newListCmd() *cobra.Command { func (cli *cliHub) update(ctx context.Context, withContent bool) error { local := cli.cfg().Hub - remote := require.RemoteHub(ctx, cli.cfg()) - // don't use require.Hub because if there is no index file, it would fail - hub, err := cwhub.NewHub(local, remote, log.StandardLogger()) + hub, err := cwhub.NewHub(local, log.StandardLogger()) if err != nil { return err } - if err := hub.Update(ctx, withContent); err != nil { + indexProvider := require.HubDownloader(ctx, cli.cfg()) + + if err := hub.Update(ctx, indexProvider, withContent); err != nil { return fmt.Errorf("failed to update hub: %w", err) } @@ -166,16 +166,18 @@ cscli hub update --with-content`, func (cli *cliHub) upgrade(ctx context.Context, yes bool, dryRun bool, force bool) error { cfg := cli.cfg() - hub, err := require.Hub(cfg, require.RemoteHub(ctx, cfg), log.StandardLogger()) + hub, err := require.Hub(cfg, log.StandardLogger()) if err != nil { return err } plan := hubops.NewActionPlan(hub) + contentProvider := require.HubDownloader(ctx, cfg) + for _, itemType := range cwhub.ItemTypes { for _, item := range hub.GetInstalledByType(itemType, true) { - plan.AddCommand(hubops.NewDownloadCommand(item, force)) + plan.AddCommand(hubops.NewDownloadCommand(item, contentProvider, force)) } } @@ -196,9 +198,9 @@ func (cli *cliHub) upgrade(ctx context.Context, yes bool, dryRun bool, force boo func (cli *cliHub) newUpgradeCmd() *cobra.Command { var ( - yes bool + yes bool dryRun bool - force bool + force bool ) cmd := &cobra.Command{ diff --git a/cmd/crowdsec-cli/cliitem/item.go b/cmd/crowdsec-cli/cliitem/item.go index 05e52d18dd3..637bd3023cf 100644 --- a/cmd/crowdsec-cli/cliitem/item.go +++ b/cmd/crowdsec-cli/cliitem/item.go @@ -71,13 +71,15 @@ func (cli cliItem) NewCommand() *cobra.Command { func (cli cliItem) install(ctx context.Context, args []string, yes bool, dryRun bool, downloadOnly bool, force bool, ignoreError bool) error { cfg := cli.cfg() - hub, err := require.Hub(cfg, require.RemoteHub(ctx, cfg), log.StandardLogger()) + hub, err := require.Hub(cfg, log.StandardLogger()) if err != nil { return err } plan := hubops.NewActionPlan(hub) + contentProvider := require.HubDownloader(ctx, cfg) + for _, name := range args { item := hub.GetItem(cli.name, name) if item == nil { @@ -91,7 +93,7 @@ func (cli cliItem) install(ctx context.Context, args []string, yes bool, dryRun continue } - if err = plan.AddCommand(hubops.NewDownloadCommand(item, force)); err != nil { + if err = plan.AddCommand(hubops.NewDownloadCommand(item, contentProvider, force)); err != nil { return err } @@ -121,7 +123,7 @@ func (cli cliItem) install(ctx context.Context, args []string, yes bool, dryRun func (cli cliItem) newInstallCmd() *cobra.Command { var ( - yes bool + yes bool dryRun bool downloadOnly bool force bool @@ -180,6 +182,7 @@ func (cli cliItem) removePlan(hub *cwhub.Hub, args []string, purge bool, force b if err := plan.AddCommand(hubops.NewDisableCommand(item, force)); err != nil { return nil, err } + if purge { if err := plan.AddCommand(hubops.NewPurgeCommand(item, force)); err != nil { return nil, err @@ -211,12 +214,11 @@ func (cli cliItem) removePlan(hub *cwhub.Hub, args []string, purge bool, force b if err := plan.AddCommand(hubops.NewDisableCommand(item, force)); err != nil { return nil, err - } + if purge { if err := plan.AddCommand(hubops.NewPurgeCommand(item, force)); err != nil { return nil, err - } } } @@ -224,11 +226,10 @@ func (cli cliItem) removePlan(hub *cwhub.Hub, args []string, purge bool, force b return plan, nil } - func (cli cliItem) remove(ctx context.Context, args []string, yes bool, dryRun bool, purge bool, force bool, all bool) error { cfg := cli.cfg() - hub, err := require.Hub(cli.cfg(), nil, log.StandardLogger()) + hub, err := require.Hub(cli.cfg(), log.StandardLogger()) if err != nil { return err } @@ -253,7 +254,7 @@ func (cli cliItem) remove(ctx context.Context, args []string, yes bool, dryRun b func (cli cliItem) newRemoveCmd() *cobra.Command { var ( - yes bool + yes bool dryRun bool purge bool force bool @@ -290,12 +291,12 @@ func (cli cliItem) newRemoveCmd() *cobra.Command { return cmd } -func (cli cliItem) upgradePlan(hub *cwhub.Hub, args []string, force bool, all bool) (*hubops.ActionPlan, error) { +func (cli cliItem) upgradePlan(hub *cwhub.Hub, contentProvider cwhub.ContentProvider, args []string, force bool, all bool) (*hubops.ActionPlan, error) { plan := hubops.NewActionPlan(hub) if all { for _, item := range hub.GetInstalledByType(cli.name, true) { - if err := plan.AddCommand(hubops.NewDownloadCommand(item, force)); err != nil { + if err := plan.AddCommand(hubops.NewDownloadCommand(item, contentProvider, force)); err != nil { return nil, err } } @@ -313,7 +314,7 @@ func (cli cliItem) upgradePlan(hub *cwhub.Hub, args []string, force bool, all bo return nil, fmt.Errorf("can't find '%s' in %s", itemName, cli.name) } - if err := plan.AddCommand(hubops.NewDownloadCommand(item, force)); err != nil { + if err := plan.AddCommand(hubops.NewDownloadCommand(item, contentProvider, force)); err != nil { return nil, err } } @@ -324,12 +325,14 @@ func (cli cliItem) upgradePlan(hub *cwhub.Hub, args []string, force bool, all bo func (cli cliItem) upgrade(ctx context.Context, args []string, yes bool, dryRun bool, force bool, all bool) error { cfg := cli.cfg() - hub, err := require.Hub(cfg, require.RemoteHub(ctx, cfg), log.StandardLogger()) + hub, err := require.Hub(cfg, log.StandardLogger()) if err != nil { return err } - plan, err := cli.upgradePlan(hub, args, force, all) + contentProvider := require.HubDownloader(ctx, cfg) + + plan, err := cli.upgradePlan(hub, contentProvider, args, force, all) if err != nil { return err } @@ -349,10 +352,10 @@ func (cli cliItem) upgrade(ctx context.Context, args []string, yes bool, dryRun func (cli cliItem) newUpgradeCmd() *cobra.Command { var ( - yes bool + yes bool dryRun bool - all bool - force bool + all bool + force bool ) cmd := &cobra.Command{ @@ -390,13 +393,13 @@ func (cli cliItem) inspect(ctx context.Context, args []string, url string, diff cfg.Cscli.PrometheusUrl = url } - remote := (*cwhub.RemoteHubCfg)(nil) + var contentProvider cwhub.ContentProvider if diff { - remote = require.RemoteHub(ctx, cfg) + contentProvider = require.HubDownloader(ctx, cfg) } - hub, err := require.Hub(cfg, remote, log.StandardLogger()) + hub, err := require.Hub(cfg, log.StandardLogger()) if err != nil { return err } @@ -408,7 +411,7 @@ func (cli cliItem) inspect(ctx context.Context, args []string, url string, diff } if diff { - fmt.Println(cli.whyTainted(ctx, hub, item, rev)) + fmt.Println(cli.whyTainted(ctx, hub, contentProvider, item, rev)) continue } @@ -462,7 +465,7 @@ func (cli cliItem) newInspectCmd() *cobra.Command { func (cli cliItem) list(args []string, all bool) error { cfg := cli.cfg() - hub, err := require.Hub(cli.cfg(), nil, log.StandardLogger()) + hub, err := require.Hub(cli.cfg(), log.StandardLogger()) if err != nil { return err } @@ -498,7 +501,7 @@ func (cli cliItem) newListCmd() *cobra.Command { } // return the diff between the installed version and the latest version -func (cli cliItem) itemDiff(ctx context.Context, item *cwhub.Item, reverse bool) (string, error) { +func (cli cliItem) itemDiff(ctx context.Context, item *cwhub.Item, contentProvider cwhub.ContentProvider, reverse bool) (string, error) { if !item.State.Installed { return "", fmt.Errorf("'%s' is not installed", item.FQName()) } @@ -509,7 +512,7 @@ func (cli cliItem) itemDiff(ctx context.Context, item *cwhub.Item, reverse bool) } defer os.Remove(dest.Name()) - _, remoteURL, err := item.FetchContentTo(ctx, dest.Name()) + _, remoteURL, err := item.FetchContentTo(ctx, contentProvider, dest.Name()) if err != nil { return "", err } @@ -540,7 +543,7 @@ func (cli cliItem) itemDiff(ctx context.Context, item *cwhub.Item, reverse bool) return fmt.Sprintf("%s", diff), nil } -func (cli cliItem) whyTainted(ctx context.Context, hub *cwhub.Hub, item *cwhub.Item, reverse bool) string { +func (cli cliItem) whyTainted(ctx context.Context, hub *cwhub.Hub, contentProvider cwhub.ContentProvider, item *cwhub.Item, reverse bool) string { if !item.State.Installed { return fmt.Sprintf("# %s is not installed", item.FQName()) } @@ -565,7 +568,7 @@ func (cli cliItem) whyTainted(ctx context.Context, hub *cwhub.Hub, item *cwhub.I ret = append(ret, err.Error()) } - diff, err := cli.itemDiff(ctx, sub, reverse) + diff, err := cli.itemDiff(ctx, sub, contentProvider, reverse) if err != nil { ret = append(ret, err.Error()) } diff --git a/cmd/crowdsec-cli/cliitem/suggest.go b/cmd/crowdsec-cli/cliitem/suggest.go index 5b080722af9..b0f19b6993c 100644 --- a/cmd/crowdsec-cli/cliitem/suggest.go +++ b/cmd/crowdsec-cli/cliitem/suggest.go @@ -37,7 +37,7 @@ func suggestNearestMessage(hub *cwhub.Hub, itemType string, itemName string) str } func compAllItems(itemType string, args []string, toComplete string, cfg configGetter) ([]string, cobra.ShellCompDirective) { - hub, err := require.Hub(cfg(), nil, nil) + hub, err := require.Hub(cfg(), nil) if err != nil { return nil, cobra.ShellCompDirectiveDefault } @@ -56,7 +56,7 @@ func compAllItems(itemType string, args []string, toComplete string, cfg configG } func compInstalledItems(itemType string, args []string, toComplete string, cfg configGetter) ([]string, cobra.ShellCompDirective) { - hub, err := require.Hub(cfg(), nil, nil) + hub, err := require.Hub(cfg(), nil) if err != nil { return nil, cobra.ShellCompDirectiveDefault } diff --git a/cmd/crowdsec-cli/clilapi/context.go b/cmd/crowdsec-cli/clilapi/context.go index 20ceb2b9596..0730ba2b2a9 100644 --- a/cmd/crowdsec-cli/clilapi/context.go +++ b/cmd/crowdsec-cli/clilapi/context.go @@ -59,7 +59,7 @@ cscli lapi context add --value evt.Meta.source_ip --value evt.Meta.target_user `, DisableAutoGenTag: true, RunE: func(_ *cobra.Command, _ []string) error { - hub, err := require.Hub(cli.cfg(), nil, nil) + hub, err := require.Hub(cli.cfg(), nil) if err != nil { return err } @@ -101,7 +101,7 @@ func (cli *cliLapi) newContextStatusCmd() *cobra.Command { DisableAutoGenTag: true, RunE: func(_ *cobra.Command, _ []string) error { cfg := cli.cfg() - hub, err := require.Hub(cfg, nil, nil) + hub, err := require.Hub(cfg, nil) if err != nil { return err } @@ -153,7 +153,7 @@ cscli lapi context detect crowdsecurity/sshd-logs return fmt.Errorf("failed to init expr helpers: %w", err) } - hub, err := require.Hub(cfg, nil, nil) + hub, err := require.Hub(cfg, nil) if err != nil { return err } diff --git a/cmd/crowdsec-cli/clilapi/status.go b/cmd/crowdsec-cli/clilapi/status.go index 6ff88834602..039c75e585d 100644 --- a/cmd/crowdsec-cli/clilapi/status.go +++ b/cmd/crowdsec-cli/clilapi/status.go @@ -102,7 +102,7 @@ func (cli *cliLapi) newStatusCmd() *cobra.Command { Args: cobra.MinimumNArgs(0), DisableAutoGenTag: true, RunE: func(cmd *cobra.Command, _ []string) error { - hub, err := require.Hub(cli.cfg(), nil, nil) + hub, err := require.Hub(cli.cfg(), nil) if err != nil { return err } diff --git a/cmd/crowdsec-cli/clisetup/setup.go b/cmd/crowdsec-cli/clisetup/setup.go index 4cb423e484c..77c357e7251 100644 --- a/cmd/crowdsec-cli/clisetup/setup.go +++ b/cmd/crowdsec-cli/clisetup/setup.go @@ -95,7 +95,7 @@ func (cli *cliSetup) newDetectCmd() *cobra.Command { func (cli *cliSetup) newInstallHubCmd() *cobra.Command { var ( - yes bool + yes bool dryRun bool ) @@ -289,14 +289,16 @@ func (cli *cliSetup) install(ctx context.Context, yes bool, dryRun bool, fromFil cfg := cli.cfg() - hub, err := require.Hub(cfg, require.RemoteHub(ctx, cfg), log.StandardLogger()) + hub, err := require.Hub(cfg, log.StandardLogger()) if err != nil { return err } verbose := (cfg.Cscli.Output == "raw") - return setup.InstallHubItems(ctx, hub, input, yes, dryRun, verbose) + contentProvider := require.HubDownloader(ctx, cfg) + + return setup.InstallHubItems(ctx, hub, contentProvider, input, yes, dryRun, verbose) } func (cli *cliSetup) validate(fromFile string) error { diff --git a/cmd/crowdsec-cli/clisimulation/simulation.go b/cmd/crowdsec-cli/clisimulation/simulation.go index 8136aa213c3..c06db56f200 100644 --- a/cmd/crowdsec-cli/clisimulation/simulation.go +++ b/cmd/crowdsec-cli/clisimulation/simulation.go @@ -71,7 +71,7 @@ func (cli *cliSimulation) newEnableCmd() *cobra.Command { Example: `cscli simulation enable`, DisableAutoGenTag: true, RunE: func(cmd *cobra.Command, args []string) error { - hub, err := require.Hub(cli.cfg(), nil, nil) + hub, err := require.Hub(cli.cfg(), nil) if err != nil { return err } diff --git a/cmd/crowdsec-cli/clisupport/support.go b/cmd/crowdsec-cli/clisupport/support.go index a21d00e3b20..5f6032a17bd 100644 --- a/cmd/crowdsec-cli/clisupport/support.go +++ b/cmd/crowdsec-cli/clisupport/support.go @@ -491,9 +491,9 @@ func (cli *cliSupport) dump(ctx context.Context, outFile string) error { skipAgent = true } - hub, err := require.Hub(cfg, nil, nil) + hub, err := require.Hub(cfg, nil) if err != nil { - log.Warn("Could not init hub, running on LAPI ? Hub related information will not be collected") + log.Warn("Could not init hub, running on LAPI? Hub related information will not be collected") // XXX: lapi status check requires scenarios, will return an error } diff --git a/cmd/crowdsec-cli/config_backup.go b/cmd/crowdsec-cli/config_backup.go index d23aff80a78..faac786ebdc 100644 --- a/cmd/crowdsec-cli/config_backup.go +++ b/cmd/crowdsec-cli/config_backup.go @@ -15,7 +15,7 @@ import ( ) func (cli *cliConfig) backupHub(dirPath string) error { - hub, err := require.Hub(cli.cfg(), nil, nil) + hub, err := require.Hub(cli.cfg(), nil) if err != nil { return err } diff --git a/cmd/crowdsec-cli/config_restore.go b/cmd/crowdsec-cli/config_restore.go index 8884fa448d2..b5fbf36b2b4 100644 --- a/cmd/crowdsec-cli/config_restore.go +++ b/cmd/crowdsec-cli/config_restore.go @@ -18,11 +18,13 @@ import ( func (cli *cliConfig) restoreHub(ctx context.Context, dirPath string) error { cfg := cli.cfg() - hub, err := require.Hub(cfg, require.RemoteHub(ctx, cfg), nil) + hub, err := require.Hub(cfg, nil) if err != nil { return err } + contentProvider := require.HubDownloader(ctx, cfg) + for _, itype := range cwhub.ItemTypes { itemDirectory := fmt.Sprintf("%s/%s/", dirPath, itype) if _, err = os.Stat(itemDirectory); err != nil { @@ -53,7 +55,7 @@ func (cli *cliConfig) restoreHub(ctx context.Context, dirPath string) error { plan := hubops.NewActionPlan(hub) - if err = plan.AddCommand(hubops.NewDownloadCommand(item, false)); err != nil { + if err = plan.AddCommand(hubops.NewDownloadCommand(item, contentProvider, false)); err != nil { return err } diff --git a/cmd/crowdsec-cli/require/require.go b/cmd/crowdsec-cli/require/require.go index 7b3410021c1..a44e76ae47d 100644 --- a/cmd/crowdsec-cli/require/require.go +++ b/cmd/crowdsec-cli/require/require.go @@ -82,12 +82,11 @@ func Notifications(c *csconfig.Config) error { return nil } -// RemoteHub returns the configuration required to download hub index and items: url, branch, etc. -func RemoteHub(ctx context.Context, c *csconfig.Config) *cwhub.RemoteHubCfg { +func HubDownloader(ctx context.Context, c *csconfig.Config) *cwhub.Downloader { // set branch in config, and log if necessary branch := HubBranch(ctx, c) urlTemplate := HubURLTemplate(c) - remote := &cwhub.RemoteHubCfg{ + remote := &cwhub.Downloader{ Branch: branch, URLTemplate: urlTemplate, IndexPath: ".index.json", @@ -98,7 +97,7 @@ func RemoteHub(ctx context.Context, c *csconfig.Config) *cwhub.RemoteHubCfg { // Hub initializes the hub. If a remote configuration is provided, it can be used to download the index and items. // If no remote parameter is provided, the hub can only be used for local operations. -func Hub(c *csconfig.Config, remote *cwhub.RemoteHubCfg, logger *logrus.Logger) (*cwhub.Hub, error) { +func Hub(c *csconfig.Config, logger *logrus.Logger) (*cwhub.Hub, error) { local := c.Hub if local == nil { @@ -110,7 +109,7 @@ func Hub(c *csconfig.Config, remote *cwhub.RemoteHubCfg, logger *logrus.Logger) logger.SetOutput(io.Discard) } - hub, err := cwhub.NewHub(local, remote, logger) + hub, err := cwhub.NewHub(local, logger) if err != nil { return nil, err } diff --git a/cmd/crowdsec/serve.go b/cmd/crowdsec/serve.go index 14602c425fe..62b721befdb 100644 --- a/cmd/crowdsec/serve.go +++ b/cmd/crowdsec/serve.go @@ -85,7 +85,7 @@ func reloadHandler(sig os.Signal) (*csconfig.Config, error) { } if !cConfig.DisableAgent { - hub, err := cwhub.NewHub(cConfig.Hub, nil, log.StandardLogger()) + hub, err := cwhub.NewHub(cConfig.Hub, log.StandardLogger()) if err != nil { return nil, err } @@ -387,7 +387,7 @@ func Serve(cConfig *csconfig.Config, agentReady chan bool) error { } if !cConfig.DisableAgent { - hub, err := cwhub.NewHub(cConfig.Hub, nil, log.StandardLogger()) + hub, err := cwhub.NewHub(cConfig.Hub, log.StandardLogger()) if err != nil { return err } diff --git a/go.mod b/go.mod index e437bbd688a..aa723b38409 100644 --- a/go.mod +++ b/go.mod @@ -29,7 +29,7 @@ require ( github.com/creack/pty v1.1.21 // indirect github.com/crowdsecurity/coraza/v3 v3.0.0-20240108124027-a62b8d8e5607 github.com/crowdsecurity/dlog v0.0.0-20170105205344-4fb5f8204f26 - github.com/crowdsecurity/go-cs-lib v0.0.16-0.20241219154300-555e14e3988f + github.com/crowdsecurity/go-cs-lib v0.0.16 github.com/crowdsecurity/grokky v0.2.2 github.com/crowdsecurity/machineid v1.0.2 github.com/davecgh/go-spew v1.1.1 diff --git a/go.sum b/go.sum index d092956d0a8..e7f181d7d24 100644 --- a/go.sum +++ b/go.sum @@ -107,10 +107,8 @@ github.com/crowdsecurity/coraza/v3 v3.0.0-20240108124027-a62b8d8e5607 h1:hyrYw3h github.com/crowdsecurity/coraza/v3 v3.0.0-20240108124027-a62b8d8e5607/go.mod h1:br36fEqurGYZQGit+iDYsIzW0FF6VufMbDzyyLxEuPA= github.com/crowdsecurity/dlog v0.0.0-20170105205344-4fb5f8204f26 h1:r97WNVC30Uen+7WnLs4xDScS/Ex988+id2k6mDf8psU= github.com/crowdsecurity/dlog v0.0.0-20170105205344-4fb5f8204f26/go.mod h1:zpv7r+7KXwgVUZnUNjyP22zc/D7LKjyoY02weH2RBbk= -github.com/crowdsecurity/go-cs-lib v0.0.16-0.20241203101722-e557f9809413 h1:VIedap4s3mXM4+tM2NMm7R3E/kn79ayLZaLHDqPYVCc= -github.com/crowdsecurity/go-cs-lib v0.0.16-0.20241203101722-e557f9809413/go.mod h1:XwGcvTt4lMq4Tm1IRMSKMDf0CVrnytTU8Uoofa7AR+g= -github.com/crowdsecurity/go-cs-lib v0.0.16-0.20241219154300-555e14e3988f h1:Pd+O4UK78uQtTqbvYX+nHvqZ7TffD51uC4q0RE/podk= -github.com/crowdsecurity/go-cs-lib v0.0.16-0.20241219154300-555e14e3988f/go.mod h1:XwGcvTt4lMq4Tm1IRMSKMDf0CVrnytTU8Uoofa7AR+g= +github.com/crowdsecurity/go-cs-lib v0.0.16 h1:2/htodjwc/sfsv4deX8F/2Fzg1bOI8w3O1/BPSvvsB0= +github.com/crowdsecurity/go-cs-lib v0.0.16/go.mod h1:XwGcvTt4lMq4Tm1IRMSKMDf0CVrnytTU8Uoofa7AR+g= github.com/crowdsecurity/grokky v0.2.2 h1:yALsI9zqpDArYzmSSxfBq2dhYuGUTKMJq8KOEIAsuo4= github.com/crowdsecurity/grokky v0.2.2/go.mod h1:33usDIYzGDsgX1kHAThCbseso6JuWNJXOzRQDGXHtWM= github.com/crowdsecurity/machineid v1.0.2 h1:wpkpsUghJF8Khtmn/tg6GxgdhLA1Xflerh5lirI+bdc= diff --git a/pkg/cwhub/cwhub_test.go b/pkg/cwhub/cwhub_test.go index 1b5dee34dd3..94a1d6ef6fd 100644 --- a/pkg/cwhub/cwhub_test.go +++ b/pkg/cwhub/cwhub_test.go @@ -57,18 +57,18 @@ func testHub(t *testing.T, update bool) *Hub { os.RemoveAll(tmpDir) }) - remote := &RemoteHubCfg{ - Branch: "master", - URLTemplate: mockURLTemplate, - IndexPath: ".index.json", - } - - hub, err := NewHub(local, remote, log.StandardLogger()) + hub, err := NewHub(local, log.StandardLogger()) require.NoError(t, err) if update { + indexProvider := &Downloader{ + Branch: "master", + URLTemplate: mockURLTemplate, + IndexPath: ".index.json", + } + ctx := context.Background() - err := hub.Update(ctx, false) + err := hub.Update(ctx, indexProvider, false) require.NoError(t, err) } diff --git a/pkg/cwhub/doc.go b/pkg/cwhub/doc.go index a1ee9d37ee7..b85d7634da4 100644 --- a/pkg/cwhub/doc.go +++ b/pkg/cwhub/doc.go @@ -85,25 +85,20 @@ // return fmt.Errorf("collection not found") // } // -// To provide the remote hub configuration, use the second parameter of NewHub(): +// Some commands require an object to provide the hub index, or contents: // -// remoteHub := cwhub.RemoteHubCfg{ +// indexProvider := cwhub.Downloader{ // URLTemplate: "https://cdn-hub.crowdsec.net/crowdsecurity/%s/%s", // Branch: "master", // IndexPath: ".index.json", // } // -// hub, err := cwhub.NewHub(localHub, remoteHub, logger) -// if err != nil { -// return fmt.Errorf("unable to initialize hub: %w", err) -// } -// // The URLTemplate is a string that will be used to build the URL of the remote hub. It must contain two // placeholders: the branch and the file path (it will be an index or an item). // // Before calling hub.Load(), you can update the index file by calling the Update() method: // -// err := hub.Update(context.Background()) +// err := hub.Update(context.Background(), indexProvider) // if err != nil { // return fmt.Errorf("unable to update hub index: %w", err) // } diff --git a/pkg/cwhub/download.go b/pkg/cwhub/download.go new file mode 100644 index 00000000000..48cb2382668 --- /dev/null +++ b/pkg/cwhub/download.go @@ -0,0 +1,124 @@ +package cwhub + +import ( + "context" + "errors" + "fmt" + "net/http" + "net/url" + + "github.com/sirupsen/logrus" + + "github.com/crowdsecurity/go-cs-lib/downloader" +) + +// Downloader is used to retrieve index and items from a remote hub, with cache control. +type Downloader struct { + Branch string + URLTemplate string + IndexPath string +} + +// IndexProvider retrieves and writes .index.json +type IndexProvider interface { + FetchIndex(ctx context.Context, indexFile string, withContent bool, logger *logrus.Logger) (bool, error) +} + +// ContentProvider retrieves and writes the YAML files with the item content. +type ContentProvider interface { + FetchContent(ctx context.Context, remotePath, destPath, wantHash string, logger *logrus.Logger) (bool, string, error) +} + +// urlTo builds the URL to download a file from the remote hub. +func (d *Downloader) urlTo(remotePath string) (string, error) { + // the template must contain two string placeholders + if fmt.Sprintf(d.URLTemplate, "%s", "%s") != d.URLTemplate { + return "", fmt.Errorf("invalid URL template '%s'", d.URLTemplate) + } + + return fmt.Sprintf(d.URLTemplate, d.Branch, remotePath), nil +} + +// addURLParam adds a parameter with a value (ex. "with_content=true") to the URL if it's not already present. +func addURLParam(rawURL string, param string, value string) (string, error) { + parsedURL, err := url.Parse(rawURL) + if err != nil { + return "", fmt.Errorf("failed to parse URL: %w", err) + } + + query := parsedURL.Query() + + if _, exists := query[param]; !exists { + query.Add(param, value) + } + + parsedURL.RawQuery = query.Encode() + + return parsedURL.String(), nil +} + +// FetchIndex downloads the index from the hub and writes it to the filesystem. +// It uses a temporary file to avoid partial downloads, and won't overwrite the original +// if it has not changed. +func (d *Downloader) FetchIndex(ctx context.Context, destPath string, withContent bool, logger *logrus.Logger) (bool, error) { + url, err := d.urlTo(d.IndexPath) + if err != nil { + return false, fmt.Errorf("failed to build hub index request: %w", err) + } + + if withContent { + url, err = addURLParam(url, "with_content", "true") + if err != nil { + return false, fmt.Errorf("failed to add 'with_content' parameter to URL: %w", err) + } + } + + downloaded, err := downloader. + New(). + WithHTTPClient(HubClient). + ToFile(destPath). + WithETagFn(downloader.SHA256). + CompareContent(). + WithLogger(logger.WithField("url", url)). + BeforeRequest(func(_ *http.Request) { + fmt.Println("Downloading " + destPath) + }). + Download(ctx, url) + if err != nil { + return false, err + } + + return downloaded, nil +} + +// FetchContent downloads the content to the specified path, through a temporary file +// to avoid partial downloads. +// If the hash does not match, it will not overwrite and log a warning. +func (d *Downloader) FetchContent(ctx context.Context, remotePath, destPath, wantHash string, logger *logrus.Logger) (bool, string, error) { + url, err := d.urlTo(remotePath) + if err != nil { + return false, "", fmt.Errorf("failed to build request: %w", err) + } + + downloaded, err := downloader. + New(). + WithHTTPClient(HubClient). + ToFile(destPath). + WithETagFn(downloader.SHA256). + WithMakeDirs(true). + WithLogger(logger.WithField("url", url)). + CompareContent(). + VerifyHash("sha256", wantHash). + Download(ctx, url) + + var hasherr downloader.HashMismatchError + + switch { + case errors.As(err, &hasherr): + logger.Warnf("%s. The index file is outdated, please run 'cscli hub update' and try again", err.Error()) + case err != nil: + return false, "", err + } + + return downloaded, url, nil +} diff --git a/pkg/cwhub/download_test.go b/pkg/cwhub/download_test.go new file mode 100644 index 00000000000..ec07862abcf --- /dev/null +++ b/pkg/cwhub/download_test.go @@ -0,0 +1,50 @@ +package cwhub + +import ( + "context" + "io" + "net/http" + "net/http/httptest" + "os" + "path/filepath" + "testing" + + "github.com/sirupsen/logrus" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestFetchIndex(t *testing.T) { + ctx := context.Background() + + mockServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if r.URL.Query().Get("with_content") == "true" { + w.WriteHeader(http.StatusOK) + w.Write([]byte(`Hi I'm an index with content`)) + } else { + w.WriteHeader(http.StatusOK) + w.Write([]byte(`Hi I'm a regular index`)) + } + })) + defer mockServer.Close() + + downloader := &Downloader{ + Branch: "main", + URLTemplate: mockServer.URL + "/%s/%s", + IndexPath: "index.txt", + } + + logger := logrus.New() + logger.Out = io.Discard + + destPath := filepath.Join(t.TempDir(), "index.txt") + withContent := true + + downloaded, err := downloader.FetchIndex(ctx, destPath, withContent, logger) + require.NoError(t, err) + assert.True(t, downloaded) + + content, err := os.ReadFile(destPath) + require.NoError(t, err) + assert.Equal(t, "Hi I'm an index with content", string(content)) +} diff --git a/pkg/cwhub/errors.go b/pkg/cwhub/errors.go deleted file mode 100644 index b0be444fcba..00000000000 --- a/pkg/cwhub/errors.go +++ /dev/null @@ -1,19 +0,0 @@ -package cwhub - -import ( - "errors" - "fmt" -) - -// ErrNilRemoteHub is returned when trying to download with a local-only configuration. -var ErrNilRemoteHub = errors.New("remote hub configuration is not provided. Please report this issue to the developers") - -// IndexNotFoundError is returned when the remote hub index is not found. -type IndexNotFoundError struct { - URL string - Branch string -} - -func (e IndexNotFoundError) Error() string { - return fmt.Sprintf("index not found at %s, branch '%s'. Please check the .cscli.hub_branch value if you specified it in config.yaml, or use 'master' if not sure", e.URL, e.Branch) -} diff --git a/pkg/cwhub/fetch.go b/pkg/cwhub/fetch.go index 92198e63ef1..dd1a520d7e2 100644 --- a/pkg/cwhub/fetch.go +++ b/pkg/cwhub/fetch.go @@ -1,24 +1,15 @@ package cwhub -// Install, upgrade and remove items from the hub to the local configuration - import ( "context" "crypto" "encoding/base64" "encoding/hex" - "errors" "fmt" "os" "path/filepath" - - "github.com/sirupsen/logrus" - - "github.com/crowdsecurity/go-cs-lib/downloader" - ) - // writeEmbeddedContentTo writes the embedded content to the specified path and checks the hash. // If the content is base64 encoded, it will be decoded before writing. Check for item.Content // before calling this method. @@ -56,40 +47,9 @@ func (i *Item) writeEmbeddedContentTo(destPath, wantHash string) error { return nil } -// writeRemoteContentTo downloads the content to the specified path and checks the hash. -func (i *Item) writeRemoteContentTo(ctx context.Context, destPath, wantHash string) (bool, string, error) { - url, err := i.hub.remote.urlTo(i.RemotePath) - if err != nil { - return false, "", fmt.Errorf("failed to build request: %w", err) - } - - d := downloader. - New(). - WithHTTPClient(HubClient). - ToFile(destPath). - WithETagFn(downloader.SHA256). - WithMakeDirs(true). - WithLogger(logrus.WithField("url", url)). - CompareContent(). - VerifyHash("sha256", wantHash) - - hasherr := downloader.HashMismatchError{} - - downloaded, err := d.Download(ctx, url) - - switch { - case errors.As(err, &hasherr): - i.hub.logger.Warnf("%s. The index file is outdated, please run 'cscli hub update' and try again", err.Error()) - case err != nil: - return false, "", err - } - - return downloaded, url, nil -} - // FetchContentTo writes the last version of the item's YAML file to the specified path. // Returns whether the file was downloaded, and the remote url for feedback purposes. -func (i *Item) FetchContentTo(ctx context.Context, destPath string) (bool, string, error) { +func (i *Item) FetchContentTo(ctx context.Context, contentProvider ContentProvider, destPath string) (bool, string, error) { wantHash := i.latestHash() if wantHash == "" { return false, "", fmt.Errorf("%s: latest hash missing from index. The index file is invalid, please run 'cscli hub update' and try again", i.FQName()) @@ -104,5 +64,5 @@ func (i *Item) FetchContentTo(ctx context.Context, destPath string) (bool, strin return true, fmt.Sprintf("(embedded in %s)", i.hub.local.HubIndexFile), nil } - return i.writeRemoteContentTo(ctx, destPath, wantHash) + return contentProvider.FetchContent(ctx, i.RemotePath, destPath, wantHash, i.hub.logger) } diff --git a/pkg/cwhub/hub.go b/pkg/cwhub/hub.go index 55469fed711..3722ceaafcd 100644 --- a/pkg/cwhub/hub.go +++ b/pkg/cwhub/hub.go @@ -22,7 +22,6 @@ type Hub struct { items HubItems // Items read from HubDir and InstallDir pathIndex map[string]*Item local *csconfig.LocalHubCfg - remote *RemoteHubCfg logger *logrus.Logger Warnings []string // Warnings encountered during sync } @@ -35,8 +34,7 @@ func (h *Hub) GetDataDir() string { // NewHub returns a new Hub instance with local and (optionally) remote configuration. // The hub is not synced automatically. Load() must be called to read the index, sync the local state, // and check for unmanaged items. -// All download operations (including updateIndex) return ErrNilRemoteHub if the remote configuration is not set. -func NewHub(local *csconfig.LocalHubCfg, remote *RemoteHubCfg, logger *logrus.Logger) (*Hub, error) { +func NewHub(local *csconfig.LocalHubCfg, logger *logrus.Logger) (*Hub, error) { if local == nil { return nil, errors.New("no hub configuration found") } @@ -48,7 +46,6 @@ func NewHub(local *csconfig.LocalHubCfg, remote *RemoteHubCfg, logger *logrus.Lo hub := &Hub{ local: local, - remote: remote, logger: logger, pathIndex: make(map[string]*Item, 0), } @@ -158,13 +155,13 @@ func (h *Hub) ItemStats() []string { // Update downloads the latest version of the index and writes it to disk if it changed. // It cannot be called after Load() unless the hub is completely empty. -func (h *Hub) Update(ctx context.Context, withContent bool) error { +func (h *Hub) Update(ctx context.Context, indexProvider IndexProvider, withContent bool) error { if len(h.pathIndex) > 0 { // if this happens, it's a bug. return errors.New("cannot update hub after items have been loaded") } - downloaded, err := h.remote.fetchIndex(ctx, h.local.HubIndexFile, withContent) + downloaded, err := indexProvider.FetchIndex(ctx, h.local.HubIndexFile, withContent, h.logger) if err != nil { return err } @@ -238,6 +235,7 @@ func (h *Hub) GetItemsByType(itemType string, sorted bool) []*Item { } idx := 0 + for _, item := range items { ret[idx] = item idx += 1 @@ -269,6 +267,7 @@ func (h *Hub) GetInstalledListForAPI() []string { ret := make([]string, len(scenarios)+len(appsecRules)) idx := 0 + for _, item := range scenarios { ret[idx] = item.Name idx += 1 diff --git a/pkg/cwhub/hub_test.go b/pkg/cwhub/hub_test.go index 727b9a18fdf..c2b949b7cdf 100644 --- a/pkg/cwhub/hub_test.go +++ b/pkg/cwhub/hub_test.go @@ -13,18 +13,19 @@ import ( func TestInitHubUpdate(t *testing.T) { hub := envSetup(t) - remote := &RemoteHubCfg{ - URLTemplate: mockURLTemplate, - Branch: "master", - IndexPath: ".index.json", - } - _, err := NewHub(hub.local, remote, nil) + _, err := NewHub(hub.local, nil) require.NoError(t, err) ctx := context.Background() - err = hub.Update(ctx, false) + indexProvider := &Downloader{ + URLTemplate: mockURLTemplate, + Branch: "master", + IndexPath: ".index.json", + } + + err = hub.Update(ctx, indexProvider, false) require.NoError(t, err) err = hub.Load() @@ -48,29 +49,29 @@ func TestUpdateIndex(t *testing.T) { hub := envSetup(t) - hub.remote = &RemoteHubCfg{ + hub.local.HubIndexFile = tmpIndex.Name() + + ctx := context.Background() + + indexProvider := &Downloader{ URLTemplate: "x", Branch: "", IndexPath: "", } - hub.local.HubIndexFile = tmpIndex.Name() - - ctx := context.Background() - - err = hub.Update(ctx, false) + err = hub.Update(ctx, indexProvider, false) cstest.RequireErrorContains(t, err, "failed to build hub index request: invalid URL template 'x'") // bad domain fmt.Println("Test 'bad domain'") - hub.remote = &RemoteHubCfg{ + indexProvider = &Downloader{ URLTemplate: "https://baddomain/crowdsecurity/%s/%s", Branch: "master", IndexPath: ".index.json", } - err = hub.Update(ctx, false) + err = hub.Update(ctx, indexProvider, false) require.NoError(t, err) // XXX: this is not failing // cstest.RequireErrorContains(t, err, "failed http request for hub index: Get") @@ -78,7 +79,7 @@ func TestUpdateIndex(t *testing.T) { // bad target path fmt.Println("Test 'bad target path'") - hub.remote = &RemoteHubCfg{ + indexProvider = &Downloader{ URLTemplate: mockURLTemplate, Branch: "master", IndexPath: ".index.json", @@ -86,6 +87,6 @@ func TestUpdateIndex(t *testing.T) { hub.local.HubIndexFile = "/does/not/exist/index.json" - err = hub.Update(ctx, false) + err = hub.Update(ctx, indexProvider, false) cstest.RequireErrorContains(t, err, "failed to create temporary download file for /does/not/exist/index.json:") } diff --git a/pkg/cwhub/itemupgrade_test.go b/pkg/cwhub/itemupgrade_test.go index e523a222d69..da02837e972 100644 --- a/pkg/cwhub/itemupgrade_test.go +++ b/pkg/cwhub/itemupgrade_test.go @@ -38,7 +38,7 @@ func TestUpgradeItemNewScenarioInCollection(t *testing.T) { // collection receives an update. It now adds new scenario "crowdsecurity/barfoo_scenario" pushUpdateToCollectionInHub() - remote := &RemoteHubCfg{ + remote := &Downloader{ URLTemplate: mockURLTemplate, Branch: "master", IndexPath: ".index.json", @@ -98,7 +98,7 @@ func TestUpgradeItemInDisabledScenarioShouldNotBeInstalled(t *testing.T) { require.NoError(t, err) require.True(t, didRemove) - remote := &RemoteHubCfg{ + remote := &Downloader{ URLTemplate: mockURLTemplate, Branch: "master", IndexPath: ".index.json", @@ -132,7 +132,7 @@ func TestUpgradeItemInDisabledScenarioShouldNotBeInstalled(t *testing.T) { } // getHubOrFail refreshes the hub state (load index, sync) and returns the singleton, or fails the test. -func getHubOrFail(t *testing.T, local *csconfig.LocalHubCfg, remote *RemoteHubCfg) *Hub { +func getHubOrFail(t *testing.T, local *csconfig.LocalHubCfg, remote *Downloader) *Hub { hub, err := NewHub(local, remote, nil) require.NoError(t, err) @@ -170,7 +170,7 @@ func TestUpgradeItemNewScenarioIsInstalledWhenReferencedScenarioIsDisabled(t *te require.NoError(t, err) require.True(t, didRemove) - remote := &RemoteHubCfg{ + remote := &Downloader{ URLTemplate: mockURLTemplate, Branch: "master", IndexPath: ".index.json", diff --git a/pkg/cwhub/remote.go b/pkg/cwhub/remote.go deleted file mode 100644 index c96471b390c..00000000000 --- a/pkg/cwhub/remote.go +++ /dev/null @@ -1,87 +0,0 @@ -package cwhub - -import ( - "context" - "fmt" - "net/http" - "net/url" - - "github.com/sirupsen/logrus" - - "github.com/crowdsecurity/go-cs-lib/downloader" -) - -// RemoteHubCfg is used to retrieve index and items from the remote hub. -type RemoteHubCfg struct { - Branch string - URLTemplate string - IndexPath string -} - -// urlTo builds the URL to download a file from the remote hub. -func (r *RemoteHubCfg) urlTo(remotePath string) (string, error) { - if r == nil { - return "", ErrNilRemoteHub - } - - // the template must contain two string placeholders - if fmt.Sprintf(r.URLTemplate, "%s", "%s") != r.URLTemplate { - return "", fmt.Errorf("invalid URL template '%s'", r.URLTemplate) - } - - return fmt.Sprintf(r.URLTemplate, r.Branch, remotePath), nil -} - -// addURLParam adds a parameter with a value (ex. "with_content=true") to the URL if it's not already present. -func addURLParam(rawURL string, param string, value string) (string, error) { - parsedURL, err := url.Parse(rawURL) - if err != nil { - return "", fmt.Errorf("failed to parse URL: %w", err) - } - - query := parsedURL.Query() - - if _, exists := query[param]; !exists { - query.Add(param, value) - } - - parsedURL.RawQuery = query.Encode() - - return parsedURL.String(), nil -} - -// fetchIndex downloads the index from the hub and returns the content. -func (r *RemoteHubCfg) fetchIndex(ctx context.Context, destPath string, withContent bool) (bool, error) { - if r == nil { - return false, ErrNilRemoteHub - } - - url, err := r.urlTo(r.IndexPath) - if err != nil { - return false, fmt.Errorf("failed to build hub index request: %w", err) - } - - if withContent { - url, err = addURLParam(url, "with_content", "true") - if err != nil { - return false, fmt.Errorf("failed to add 'with_content' parameter to URL: %w", err) - } - } - - downloaded, err := downloader. - New(). - WithHTTPClient(HubClient). - ToFile(destPath). - WithETagFn(downloader.SHA256). - CompareContent(). - WithLogger(logrus.WithField("url", url)). - BeforeRequest(func(_ *http.Request) { - fmt.Println("Downloading "+destPath) - }). - Download(ctx, url) - if err != nil { - return false, err - } - - return downloaded, nil -} diff --git a/pkg/hubops/download.go b/pkg/hubops/download.go index 4a722efdb77..72aed542115 100644 --- a/pkg/hubops/download.go +++ b/pkg/hubops/download.go @@ -9,9 +9,9 @@ import ( "os" "time" + "github.com/fatih/color" "github.com/sirupsen/logrus" "gopkg.in/yaml.v3" - "github.com/fatih/color" "github.com/crowdsecurity/go-cs-lib/downloader" @@ -19,19 +19,19 @@ import ( "github.com/crowdsecurity/crowdsec/pkg/types" ) - // DownloadCommand handles the downloading of hub items. // It ensures that items are fetched from the hub (or from the index file if it also has content) // managing dependencies and verifying the integrity of downloaded content. // This is used by "cscli install" and "cscli upgrade". // Tainted items require the force parameter, local items are skipped. type DownloadCommand struct { - Item *cwhub.Item - Force bool + Item *cwhub.Item + Force bool + contentProvider cwhub.ContentProvider } -func NewDownloadCommand(item *cwhub.Item, force bool) *DownloadCommand { - return &DownloadCommand{Item: item, Force: force} +func NewDownloadCommand(item *cwhub.Item, contentProvider cwhub.ContentProvider, force bool) *DownloadCommand { + return &DownloadCommand{Item: item, Force: force, contentProvider: contentProvider} } func (c *DownloadCommand) Prepare(plan *ActionPlan) (bool, error) { @@ -60,7 +60,7 @@ func (c *DownloadCommand) Prepare(plan *ActionPlan) (bool, error) { } for sub := range i.LatestDependencies().SubItems(plan.hub) { - if err := plan.AddCommand(NewDownloadCommand(sub, c.Force)); err != nil { + if err := plan.AddCommand(NewDownloadCommand(sub, c.contentProvider, c.Force)); err != nil { return false, err } @@ -158,7 +158,7 @@ func (c *DownloadCommand) Run(ctx context.Context, plan *ActionPlan) error { return err } - downloaded, _, err := i.FetchContentTo(ctx, finalPath) + downloaded, _, err := i.FetchContentTo(ctx, c.contentProvider, finalPath) if err != nil { return fmt.Errorf("%s: %w", i.FQName(), err) } diff --git a/pkg/hubtest/hubtest.go b/pkg/hubtest/hubtest.go index 93f5abaa879..ce9efcec601 100644 --- a/pkg/hubtest/hubtest.go +++ b/pkg/hubtest/hubtest.go @@ -14,8 +14,8 @@ type HubTest struct { CrowdSecPath string CscliPath string HubPath string - HubTestPath string //generic parser/scenario tests .tests - HubAppsecTestPath string //dir specific to appsec tests .appsec-tests + HubTestPath string // generic parser/scenario tests .tests + HubAppsecTestPath string // dir specific to appsec tests .appsec-tests HubIndexFile string TemplateConfigPath string TemplateProfilePath string @@ -93,7 +93,7 @@ func NewHubTest(hubPath string, crowdsecPath string, cscliPath string, isAppsecT InstallDataDir: HubTestPath, } - hub, err := cwhub.NewHub(local, nil, nil) + hub, err := cwhub.NewHub(local, nil) if err != nil { return HubTest{}, err } @@ -130,7 +130,7 @@ func NewHubTest(hubPath string, crowdsecPath string, cscliPath string, isAppsecT InstallDataDir: HubTestPath, } - hub, err := cwhub.NewHub(local, nil, nil) + hub, err := cwhub.NewHub(local, nil) if err != nil { return HubTest{}, err } diff --git a/pkg/hubtest/hubtest_item.go b/pkg/hubtest/hubtest_item.go index d999d15ba6e..e8bc56f650a 100644 --- a/pkg/hubtest/hubtest_item.go +++ b/pkg/hubtest/hubtest_item.go @@ -212,7 +212,7 @@ func (t *HubTestItem) InstallHub() error { } // load installed hub - hub, err := cwhub.NewHub(t.RuntimeHubConfig, nil, nil) + hub, err := cwhub.NewHub(t.RuntimeHubConfig, nil) if err != nil { return err } diff --git a/pkg/leakybucket/buckets_test.go b/pkg/leakybucket/buckets_test.go index 8bb7a3d4c47..90a751160cb 100644 --- a/pkg/leakybucket/buckets_test.go +++ b/pkg/leakybucket/buckets_test.go @@ -46,7 +46,7 @@ func TestBucket(t *testing.T) { InstallDataDir: testdata, } - hub, err := cwhub.NewHub(hubCfg, nil, nil) + hub, err := cwhub.NewHub(hubCfg, nil) require.NoError(t, err) err = hub.Load() @@ -140,6 +140,7 @@ func testOneBucket(t *testing.T, hub *cwhub.Hub, dir string, tomb *tomb.Tomb) er } scenarios := []*cwhub.Item{} + for _, x := range stages { // XXX: LoadBuckets should take an interface, BucketProvider ScenarioProvider or w/e item := &cwhub.Item{ diff --git a/pkg/setup/install.go b/pkg/setup/install.go index dcefe744a76..42634672912 100644 --- a/pkg/setup/install.go +++ b/pkg/setup/install.go @@ -48,7 +48,7 @@ func decodeSetup(input []byte, fancyErrors bool) (Setup, error) { } // InstallHubItems installs the objects recommended in a setup file. -func InstallHubItems(ctx context.Context, hub *cwhub.Hub, input []byte, yes, dryRun, verbose bool) error { +func InstallHubItems(ctx context.Context, hub *cwhub.Hub, contentProvider cwhub.ContentProvider, input []byte, yes, dryRun, verbose bool) error { setupEnvelope, err := decodeSetup(input, false) if err != nil { return err @@ -71,7 +71,8 @@ func InstallHubItems(ctx context.Context, hub *cwhub.Hub, input []byte, yes, dry return fmt.Errorf("collection %s not found", collection) } - plan.AddCommand(hubops.NewDownloadCommand(item, forceAction)) + plan.AddCommand(hubops.NewDownloadCommand(item, contentProvider, forceAction)) + if !downloadOnly { plan.AddCommand(hubops.NewEnableCommand(item, forceAction)) } @@ -83,7 +84,8 @@ func InstallHubItems(ctx context.Context, hub *cwhub.Hub, input []byte, yes, dry return fmt.Errorf("parser %s not found", parser) } - plan.AddCommand(hubops.NewDownloadCommand(item, forceAction)) + plan.AddCommand(hubops.NewDownloadCommand(item, contentProvider, forceAction)) + if !downloadOnly { plan.AddCommand(hubops.NewEnableCommand(item, forceAction)) } @@ -95,7 +97,8 @@ func InstallHubItems(ctx context.Context, hub *cwhub.Hub, input []byte, yes, dry return fmt.Errorf("scenario %s not found", scenario) } - plan.AddCommand(hubops.NewDownloadCommand(item, forceAction)) + plan.AddCommand(hubops.NewDownloadCommand(item, contentProvider, forceAction)) + if !downloadOnly { plan.AddCommand(hubops.NewEnableCommand(item, forceAction)) } @@ -107,7 +110,8 @@ func InstallHubItems(ctx context.Context, hub *cwhub.Hub, input []byte, yes, dry return fmt.Errorf("postoverflow %s not found", postoverflow) } - plan.AddCommand(hubops.NewDownloadCommand(item, forceAction)) + plan.AddCommand(hubops.NewDownloadCommand(item, contentProvider, forceAction)) + if !downloadOnly { plan.AddCommand(hubops.NewEnableCommand(item, forceAction)) } From 5c0c4f9fa60ff60f361a4af9232e53ab2ec0b03f Mon Sep 17 00:00:00 2001 From: Laurence Jones Date: Fri, 27 Dec 2024 15:20:23 +0000 Subject: [PATCH 376/581] enhancement: add logdata to appsec AccumlateTxToEvent (#3383) --- pkg/acquisition/modules/appsec/utils.go | 1 + 1 file changed, 1 insertion(+) diff --git a/pkg/acquisition/modules/appsec/utils.go b/pkg/acquisition/modules/appsec/utils.go index 8995b305680..65bb4601c21 100644 --- a/pkg/acquisition/modules/appsec/utils.go +++ b/pkg/acquisition/modules/appsec/utils.go @@ -296,6 +296,7 @@ func (r *AppsecRunner) AccumulateTxToEvent(evt *types.Event, req *appsec.ParsedR "hash": hash, "version": version, "matched_zones": matchedZones, + "logdata": rule.Data(), } evt.Appsec.MatchedRules = append(evt.Appsec.MatchedRules, corazaRule) } From 4e6e6dec65deea66c4c355fe1ce99782a5471dd9 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Thu, 2 Jan 2025 12:33:54 +0100 Subject: [PATCH 377/581] lint: explicit error checks (#3388) * errcheck: tests * fflag errcheck * http_test errcheck (avoid duplicate metric registration) --- cmd/crowdsec-cli/clihub/hub.go | 8 +- pkg/acquisition/modules/file/file_test.go | 18 ++-- pkg/acquisition/modules/http/http_test.go | 89 +++++++++++-------- .../modules/journalctl/journalctl_test.go | 4 +- pkg/acquisition/modules/kafka/kafka_test.go | 6 +- .../modules/kinesis/kinesis_test.go | 12 ++- .../modules/kubernetesaudit/k8s_audit_test.go | 6 +- pkg/acquisition/modules/syslog/syslog_test.go | 4 +- pkg/apiclient/alerts_service_test.go | 18 ++-- pkg/apiclient/auth_key_test.go | 6 +- pkg/apiclient/client_http_test.go | 4 +- pkg/apiclient/client_test.go | 24 +++-- pkg/apiclient/decisions_service_test.go | 31 ++++--- pkg/apiserver/alerts_test.go | 3 +- pkg/csprofiles/csprofiles_test.go | 3 +- pkg/cwhub/download_test.go | 6 +- pkg/fflag/features_test.go | 6 +- pkg/parser/whitelist_test.go | 4 +- pkg/setup/install.go | 32 +++++-- 19 files changed, 185 insertions(+), 99 deletions(-) diff --git a/cmd/crowdsec-cli/clihub/hub.go b/cmd/crowdsec-cli/clihub/hub.go index 9571b3d866d..36e851d1b74 100644 --- a/cmd/crowdsec-cli/clihub/hub.go +++ b/cmd/crowdsec-cli/clihub/hub.go @@ -177,11 +177,15 @@ func (cli *cliHub) upgrade(ctx context.Context, yes bool, dryRun bool, force boo for _, itemType := range cwhub.ItemTypes { for _, item := range hub.GetInstalledByType(itemType, true) { - plan.AddCommand(hubops.NewDownloadCommand(item, contentProvider, force)) + if err := plan.AddCommand(hubops.NewDownloadCommand(item, contentProvider, force)); err != nil { + return err + } } } - plan.AddCommand(hubops.NewDataRefreshCommand(force)) + if err := plan.AddCommand(hubops.NewDataRefreshCommand(force)); err != nil { + return err + } verbose := (cfg.Cscli.Output == "raw") diff --git a/pkg/acquisition/modules/file/file_test.go b/pkg/acquisition/modules/file/file_test.go index a26e44cc9c7..b9c6e65d8ce 100644 --- a/pkg/acquisition/modules/file/file_test.go +++ b/pkg/acquisition/modules/file/file_test.go @@ -333,14 +333,19 @@ force_inotify: true`, testPattern), logLevel: log.DebugLevel, name: "GlobInotifyChmod", afterConfigure: func() { - f, _ := os.Create("test_files/a.log") - f.Close() + f, err := os.Create("test_files/a.log") + require.NoError(t, err) + err = f.Close() + require.NoError(t, err) time.Sleep(1 * time.Second) - os.Chmod("test_files/a.log", 0o000) + err = os.Chmod("test_files/a.log", 0o000) + require.NoError(t, err) }, teardown: func() { - os.Chmod("test_files/a.log", 0o644) - os.Remove("test_files/a.log") + err := os.Chmod("test_files/a.log", 0o644) + require.NoError(t, err) + err = os.Remove("test_files/a.log") + require.NoError(t, err) }, }, { @@ -353,7 +358,8 @@ force_inotify: true`, testPattern), logLevel: log.DebugLevel, name: "InotifyMkDir", afterConfigure: func() { - os.Mkdir("test_files/pouet/", 0o700) + err := os.Mkdir("test_files/pouet/", 0o700) + require.NoError(t, err) }, teardown: func() { os.Remove("test_files/pouet/") diff --git a/pkg/acquisition/modules/http/http_test.go b/pkg/acquisition/modules/http/http_test.go index 4d99134419f..c76f311d669 100644 --- a/pkg/acquisition/modules/http/http_test.go +++ b/pkg/acquisition/modules/http/http_test.go @@ -218,7 +218,7 @@ func TestGetName(t *testing.T) { assert.Equal(t, "http", h.GetName()) } -func SetupAndRunHTTPSource(t *testing.T, h *HTTPSource, config []byte, metricLevel int) (chan types.Event, *tomb.Tomb) { +func SetupAndRunHTTPSource(t *testing.T, h *HTTPSource, config []byte, metricLevel int) (chan types.Event, *prometheus.Registry, *tomb.Tomb) { ctx := context.Background() subLogger := log.WithFields(log.Fields{ "type": "http", @@ -230,16 +230,18 @@ func SetupAndRunHTTPSource(t *testing.T, h *HTTPSource, config []byte, metricLev err = h.StreamingAcquisition(ctx, out, &tomb) require.NoError(t, err) + testRegistry := prometheus.NewPedanticRegistry() for _, metric := range h.GetMetrics() { - prometheus.Register(metric) + err = testRegistry.Register(metric) + require.NoError(t, err) } - return out, &tomb + return out, testRegistry, &tomb } func TestStreamingAcquisitionWrongHTTPMethod(t *testing.T) { h := &HTTPSource{} - _, tomb := SetupAndRunHTTPSource(t, h, []byte(` + _, _, tomb:= SetupAndRunHTTPSource(t, h, []byte(` source: http listen_addr: 127.0.0.1:8080 path: /test @@ -256,12 +258,13 @@ basic_auth: h.Server.Close() tomb.Kill(nil) - tomb.Wait() + err = tomb.Wait() + require.NoError(t, err) } func TestStreamingAcquisitionUnknownPath(t *testing.T) { h := &HTTPSource{} - _, tomb := SetupAndRunHTTPSource(t, h, []byte(` + _, _, tomb := SetupAndRunHTTPSource(t, h, []byte(` source: http listen_addr: 127.0.0.1:8080 path: /test @@ -278,12 +281,13 @@ basic_auth: h.Server.Close() tomb.Kill(nil) - tomb.Wait() + err = tomb.Wait() + require.NoError(t, err) } func TestStreamingAcquisitionBasicAuth(t *testing.T) { h := &HTTPSource{} - _, tomb := SetupAndRunHTTPSource(t, h, []byte(` + _, _, tomb := SetupAndRunHTTPSource(t, h, []byte(` source: http listen_addr: 127.0.0.1:8080 path: /test @@ -310,12 +314,13 @@ basic_auth: h.Server.Close() tomb.Kill(nil) - tomb.Wait() + err = tomb.Wait() + require.NoError(t, err) } func TestStreamingAcquisitionBadHeaders(t *testing.T) { h := &HTTPSource{} - _, tomb := SetupAndRunHTTPSource(t, h, []byte(` + _, _, tomb := SetupAndRunHTTPSource(t, h, []byte(` source: http listen_addr: 127.0.0.1:8080 path: /test @@ -337,12 +342,13 @@ headers: h.Server.Close() tomb.Kill(nil) - tomb.Wait() + err = tomb.Wait() + require.NoError(t, err) } func TestStreamingAcquisitionMaxBodySize(t *testing.T) { h := &HTTPSource{} - _, tomb := SetupAndRunHTTPSource(t, h, []byte(` + _, _, tomb := SetupAndRunHTTPSource(t, h, []byte(` source: http listen_addr: 127.0.0.1:8080 path: /test @@ -365,12 +371,13 @@ max_body_size: 5`), 0) h.Server.Close() tomb.Kill(nil) - tomb.Wait() + err = tomb.Wait() + require.NoError(t, err) } func TestStreamingAcquisitionSuccess(t *testing.T) { h := &HTTPSource{} - out, tomb := SetupAndRunHTTPSource(t, h, []byte(` + out, reg, tomb := SetupAndRunHTTPSource(t, h, []byte(` source: http listen_addr: 127.0.0.1:8080 path: /test @@ -396,16 +403,17 @@ headers: err = <-errChan require.NoError(t, err) - assertMetrics(t, h.GetMetrics(), 1) + assertMetrics(t, reg, h.GetMetrics(), 1) h.Server.Close() tomb.Kill(nil) - tomb.Wait() + err = tomb.Wait() + require.NoError(t, err) } func TestStreamingAcquisitionCustomStatusCodeAndCustomHeaders(t *testing.T) { h := &HTTPSource{} - out, tomb := SetupAndRunHTTPSource(t, h, []byte(` + out, reg, tomb := SetupAndRunHTTPSource(t, h, []byte(` source: http listen_addr: 127.0.0.1:8080 path: /test @@ -435,11 +443,12 @@ custom_headers: err = <-errChan require.NoError(t, err) - assertMetrics(t, h.GetMetrics(), 1) + assertMetrics(t, reg, h.GetMetrics(), 1) h.Server.Close() tomb.Kill(nil) - tomb.Wait() + err = tomb.Wait() + require.NoError(t, err) } type slowReader struct { @@ -495,7 +504,7 @@ func assertEvents(out chan types.Event, expected []string, errChan chan error) { func TestStreamingAcquisitionTimeout(t *testing.T) { h := &HTTPSource{} - _, tomb := SetupAndRunHTTPSource(t, h, []byte(` + _, _, tomb := SetupAndRunHTTPSource(t, h, []byte(` source: http listen_addr: 127.0.0.1:8080 path: /test @@ -525,12 +534,13 @@ timeout: 1s`), 0) h.Server.Close() tomb.Kill(nil) - tomb.Wait() + err = tomb.Wait() + require.NoError(t, err) } func TestStreamingAcquisitionTLSHTTPRequest(t *testing.T) { h := &HTTPSource{} - _, tomb := SetupAndRunHTTPSource(t, h, []byte(` + _, _, tomb := SetupAndRunHTTPSource(t, h, []byte(` source: http listen_addr: 127.0.0.1:8080 auth_type: mtls @@ -549,12 +559,13 @@ tls: h.Server.Close() tomb.Kill(nil) - tomb.Wait() + err = tomb.Wait() + require.NoError(t, err) } func TestStreamingAcquisitionTLSWithHeadersAuthSuccess(t *testing.T) { h := &HTTPSource{} - out, tomb := SetupAndRunHTTPSource(t, h, []byte(` + out, reg, tomb := SetupAndRunHTTPSource(t, h, []byte(` source: http listen_addr: 127.0.0.1:8080 path: /test @@ -600,16 +611,17 @@ tls: err = <-errChan require.NoError(t, err) - assertMetrics(t, h.GetMetrics(), 0) + assertMetrics(t, reg, h.GetMetrics(), 0) h.Server.Close() tomb.Kill(nil) - tomb.Wait() + err = tomb.Wait() + require.NoError(t, err) } func TestStreamingAcquisitionMTLS(t *testing.T) { h := &HTTPSource{} - out, tomb := SetupAndRunHTTPSource(t, h, []byte(` + out, reg, tomb := SetupAndRunHTTPSource(t, h, []byte(` source: http listen_addr: 127.0.0.1:8080 path: /test @@ -657,16 +669,17 @@ tls: err = <-errChan require.NoError(t, err) - assertMetrics(t, h.GetMetrics(), 0) + assertMetrics(t, reg, h.GetMetrics(), 0) h.Server.Close() tomb.Kill(nil) - tomb.Wait() + err = tomb.Wait() + require.NoError(t, err) } func TestStreamingAcquisitionGzipData(t *testing.T) { h := &HTTPSource{} - out, tomb := SetupAndRunHTTPSource(t, h, []byte(` + out, reg, tomb := SetupAndRunHTTPSource(t, h, []byte(` source: http listen_addr: 127.0.0.1:8080 path: /test @@ -709,16 +722,17 @@ headers: err = <-errChan require.NoError(t, err) - assertMetrics(t, h.GetMetrics(), 2) + assertMetrics(t, reg, h.GetMetrics(), 2) h.Server.Close() tomb.Kill(nil) - tomb.Wait() + err = tomb.Wait() + require.NoError(t, err) } func TestStreamingAcquisitionNDJson(t *testing.T) { h := &HTTPSource{} - out, tomb := SetupAndRunHTTPSource(t, h, []byte(` + out, reg, tomb := SetupAndRunHTTPSource(t, h, []byte(` source: http listen_addr: 127.0.0.1:8080 path: /test @@ -747,15 +761,16 @@ headers: err = <-errChan require.NoError(t, err) - assertMetrics(t, h.GetMetrics(), 2) + assertMetrics(t, reg, h.GetMetrics(), 2) h.Server.Close() tomb.Kill(nil) - tomb.Wait() + err = tomb.Wait() + require.NoError(t, err) } -func assertMetrics(t *testing.T, metrics []prometheus.Collector, expected int) { - promMetrics, err := prometheus.DefaultGatherer.Gather() +func assertMetrics(t *testing.T, reg *prometheus.Registry, metrics []prometheus.Collector, expected int) { + promMetrics, err := reg.Gather() require.NoError(t, err) isExist := false diff --git a/pkg/acquisition/modules/journalctl/journalctl_test.go b/pkg/acquisition/modules/journalctl/journalctl_test.go index fedbed6b707..48b034f41c6 100644 --- a/pkg/acquisition/modules/journalctl/journalctl_test.go +++ b/pkg/acquisition/modules/journalctl/journalctl_test.go @@ -12,6 +12,7 @@ import ( log "github.com/sirupsen/logrus" "github.com/sirupsen/logrus/hooks/test" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" "gopkg.in/tomb.v2" "github.com/crowdsecurity/go-cs-lib/cstest" @@ -268,7 +269,8 @@ journalctl_filter: } tomb.Kill(nil) - tomb.Wait() + err = tomb.Wait() + require.NoError(t, err) output, _ := exec.Command("pgrep", "-x", "journalctl").CombinedOutput() if len(output) != 0 { diff --git a/pkg/acquisition/modules/kafka/kafka_test.go b/pkg/acquisition/modules/kafka/kafka_test.go index d796166a6ca..2f3361c4f6b 100644 --- a/pkg/acquisition/modules/kafka/kafka_test.go +++ b/pkg/acquisition/modules/kafka/kafka_test.go @@ -194,7 +194,8 @@ topic: crowdsecplaintext`), subLogger, configuration.METRICS_NONE) } require.Equal(t, ts.expectedLines, actualLines) tomb.Kill(nil) - tomb.Wait() + err = tomb.Wait() + require.NoError(t, err) }) } } @@ -271,7 +272,8 @@ tls: } require.Equal(t, ts.expectedLines, actualLines) tomb.Kill(nil) - tomb.Wait() + err = tomb.Wait() + require.NoError(t, err) }) } } diff --git a/pkg/acquisition/modules/kinesis/kinesis_test.go b/pkg/acquisition/modules/kinesis/kinesis_test.go index 778dda4a681..3f6d780b192 100644 --- a/pkg/acquisition/modules/kinesis/kinesis_test.go +++ b/pkg/acquisition/modules/kinesis/kinesis_test.go @@ -63,7 +63,8 @@ func GenSubObject(t *testing.T, i int) []byte { var b bytes.Buffer gz := gzip.NewWriter(&b) - gz.Write(body) + _, err = gz.Write(body) + require.NoError(t, err) gz.Close() // AWS actually base64 encodes the data, but it looks like kinesis automatically decodes it at some point // localstack does not do it, so let's just write a raw gzipped stream @@ -198,7 +199,8 @@ stream_name: stream-1-shard`, } tomb.Kill(nil) - tomb.Wait() + err = tomb.Wait() + require.NoError(t, err) } } @@ -246,7 +248,8 @@ stream_name: stream-2-shards`, } assert.Equal(t, test.count, c) tomb.Kill(nil) - tomb.Wait() + err = tomb.Wait() + require.NoError(t, err) } } @@ -290,7 +293,8 @@ from_subscription: true`, assert.Equal(t, fmt.Sprintf("%d", i), e.Line.Raw) } tomb.Kill(nil) - tomb.Wait() + err = tomb.Wait() + require.NoError(t, err) } } diff --git a/pkg/acquisition/modules/kubernetesaudit/k8s_audit_test.go b/pkg/acquisition/modules/kubernetesaudit/k8s_audit_test.go index a086a756e4a..bf8a8cea02c 100644 --- a/pkg/acquisition/modules/kubernetesaudit/k8s_audit_test.go +++ b/pkg/acquisition/modules/kubernetesaudit/k8s_audit_test.go @@ -85,7 +85,8 @@ webhook_path: /k8s-audit`, err = f.Configure([]byte(test.config), subLogger, configuration.METRICS_NONE) require.NoError(t, err) - f.StreamingAcquisition(ctx, out, tb) + err = f.StreamingAcquisition(ctx, out, tb) + require.NoError(t, err) time.Sleep(1 * time.Second) tb.Kill(nil) @@ -260,7 +261,8 @@ webhook_path: /k8s-audit`, req := httptest.NewRequest(test.method, "/k8s-audit", strings.NewReader(test.body)) w := httptest.NewRecorder() - f.StreamingAcquisition(ctx, out, tb) + err = f.StreamingAcquisition(ctx, out, tb) + require.NoError(t, err) f.webhookHandler(w, req) diff --git a/pkg/acquisition/modules/syslog/syslog_test.go b/pkg/acquisition/modules/syslog/syslog_test.go index 57fa3e8747b..3008ba5507b 100644 --- a/pkg/acquisition/modules/syslog/syslog_test.go +++ b/pkg/acquisition/modules/syslog/syslog_test.go @@ -10,6 +10,7 @@ import ( log "github.com/sirupsen/logrus" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" "gopkg.in/tomb.v2" "github.com/crowdsecurity/go-cs-lib/cstest" @@ -168,7 +169,8 @@ listen_addr: 127.0.0.1`, } assert.Equal(t, ts.expectedLines, actualLines) tomb.Kill(nil) - tomb.Wait() + err = tomb.Wait() + require.NoError(t, err) }) } } diff --git a/pkg/apiclient/alerts_service_test.go b/pkg/apiclient/alerts_service_test.go index 0d1ff41685f..9df633fa8be 100644 --- a/pkg/apiclient/alerts_service_test.go +++ b/pkg/apiclient/alerts_service_test.go @@ -23,7 +23,8 @@ func TestAlertsListAsMachine(t *testing.T) { mux, urlx, teardown := setup() mux.HandleFunc("/watchers/login", func(w http.ResponseWriter, r *http.Request) { w.WriteHeader(http.StatusOK) - w.Write([]byte(`{"code": 200, "expire": "2030-01-02T15:04:05Z", "token": "oklol"}`)) + _, err := w.Write([]byte(`{"code": 200, "expire": "2030-01-02T15:04:05Z", "token": "oklol"}`)) + assert.NoError(t, err) }) log.Printf("URL is %s", urlx) @@ -202,7 +203,8 @@ func TestAlertsGetAsMachine(t *testing.T) { mux, urlx, teardown := setup() mux.HandleFunc("/watchers/login", func(w http.ResponseWriter, r *http.Request) { w.WriteHeader(http.StatusOK) - w.Write([]byte(`{"code": 200, "expire": "2030-01-02T15:04:05Z", "token": "oklol"}`)) + _, err := w.Write([]byte(`{"code": 200, "expire": "2030-01-02T15:04:05Z", "token": "oklol"}`)) + assert.NoError(t, err) }) log.Printf("URL is %s", urlx) @@ -368,13 +370,15 @@ func TestAlertsCreateAsMachine(t *testing.T) { mux, urlx, teardown := setup() mux.HandleFunc("/watchers/login", func(w http.ResponseWriter, r *http.Request) { w.WriteHeader(http.StatusOK) - w.Write([]byte(`{"code": 200, "expire": "2030-01-02T15:04:05Z", "token": "oklol"}`)) + _, err := w.Write([]byte(`{"code": 200, "expire": "2030-01-02T15:04:05Z", "token": "oklol"}`)) + assert.NoError(t, err) }) mux.HandleFunc("/alerts", func(w http.ResponseWriter, r *http.Request) { testMethod(t, r, "POST") w.WriteHeader(http.StatusOK) - w.Write([]byte(`["3"]`)) + _, err := w.Write([]byte(`["3"]`)) + assert.NoError(t, err) }) log.Printf("URL is %s", urlx) @@ -408,14 +412,16 @@ func TestAlertsDeleteAsMachine(t *testing.T) { mux, urlx, teardown := setup() mux.HandleFunc("/watchers/login", func(w http.ResponseWriter, r *http.Request) { w.WriteHeader(http.StatusOK) - w.Write([]byte(`{"code": 200, "expire": "2030-01-02T15:04:05Z", "token": "oklol"}`)) + _, err := w.Write([]byte(`{"code": 200, "expire": "2030-01-02T15:04:05Z", "token": "oklol"}`)) + assert.NoError(t, err) }) mux.HandleFunc("/alerts", func(w http.ResponseWriter, r *http.Request) { testMethod(t, r, "DELETE") assert.Equal(t, "ip=1.2.3.4", r.URL.RawQuery) w.WriteHeader(http.StatusOK) - w.Write([]byte(`{"message":"0 deleted alerts"}`)) + _, err := w.Write([]byte(`{"message":"0 deleted alerts"}`)) + assert.NoError(t, err) }) log.Printf("URL is %s", urlx) diff --git a/pkg/apiclient/auth_key_test.go b/pkg/apiclient/auth_key_test.go index f686de6227a..b7cce3e15c9 100644 --- a/pkg/apiclient/auth_key_test.go +++ b/pkg/apiclient/auth_key_test.go @@ -24,10 +24,12 @@ func TestApiAuth(t *testing.T) { if r.Header.Get("X-Api-Key") == "ixu" { assert.Equal(t, "ip=1.2.3.4", r.URL.RawQuery) w.WriteHeader(http.StatusOK) - w.Write([]byte(`null`)) + _, err := w.Write([]byte(`null`)) + assert.NoError(t, err) } else { w.WriteHeader(http.StatusForbidden) - w.Write([]byte(`{"message":"access forbidden"}`)) + _, err := w.Write([]byte(`{"message":"access forbidden"}`)) + assert.NoError(t, err) } }) diff --git a/pkg/apiclient/client_http_test.go b/pkg/apiclient/client_http_test.go index 45cd8410a8e..0d6cf3d993e 100644 --- a/pkg/apiclient/client_http_test.go +++ b/pkg/apiclient/client_http_test.go @@ -7,6 +7,7 @@ import ( "testing" "time" + "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "github.com/crowdsecurity/go-cs-lib/cstest" @@ -31,7 +32,8 @@ func TestNewRequestInvalid(t *testing.T) { /*mock login*/ mux.HandleFunc("/watchers/login", func(w http.ResponseWriter, r *http.Request) { w.WriteHeader(http.StatusUnauthorized) - w.Write([]byte(`{"code": 401, "message" : "bad login/password"}`)) + _, err := w.Write([]byte(`{"code": 401, "message" : "bad login/password"}`)) + assert.NoError(t, err) }) mux.HandleFunc("/alerts", func(w http.ResponseWriter, r *http.Request) { diff --git a/pkg/apiclient/client_test.go b/pkg/apiclient/client_test.go index 327bf8fbd9f..c172849c21e 100644 --- a/pkg/apiclient/client_test.go +++ b/pkg/apiclient/client_test.go @@ -101,7 +101,8 @@ func TestNewClientOk(t *testing.T) { /*mock login*/ mux.HandleFunc("/watchers/login", func(w http.ResponseWriter, r *http.Request) { w.WriteHeader(http.StatusOK) - w.Write([]byte(`{"code": 200, "expire": "2030-01-02T15:04:05Z", "token": "oklol"}`)) + _, err := w.Write([]byte(`{"code": 200, "expire": "2030-01-02T15:04:05Z", "token": "oklol"}`)) + assert.NoError(t, err) }) mux.HandleFunc("/alerts", func(w http.ResponseWriter, r *http.Request) { @@ -138,7 +139,8 @@ func TestNewClientOk_UnixSocket(t *testing.T) { /*mock login*/ mux.HandleFunc("/watchers/login", func(w http.ResponseWriter, r *http.Request) { w.WriteHeader(http.StatusOK) - w.Write([]byte(`{"code": 200, "expire": "2030-01-02T15:04:05Z", "token": "oklol"}`)) + _, err := w.Write([]byte(`{"code": 200, "expire": "2030-01-02T15:04:05Z", "token": "oklol"}`)) + assert.NoError(t, err) }) mux.HandleFunc("/alerts", func(w http.ResponseWriter, r *http.Request) { @@ -174,7 +176,8 @@ func TestNewClientKo(t *testing.T) { /*mock login*/ mux.HandleFunc("/watchers/login", func(w http.ResponseWriter, r *http.Request) { w.WriteHeader(http.StatusUnauthorized) - w.Write([]byte(`{"code": 401, "message" : "bad login/password"}`)) + _, err := w.Write([]byte(`{"code": 401, "message" : "bad login/password"}`)) + assert.NoError(t, err) }) mux.HandleFunc("/alerts", func(w http.ResponseWriter, r *http.Request) { @@ -200,7 +203,8 @@ func TestNewDefaultClient(t *testing.T) { mux.HandleFunc("/alerts", func(w http.ResponseWriter, r *http.Request) { w.WriteHeader(http.StatusUnauthorized) - w.Write([]byte(`{"code": 401, "message" : "brr"}`)) + _, err := w.Write([]byte(`{"code": 401, "message" : "brr"}`)) + assert.NoError(t, err) }) _, _, err = client.Alerts.List(context.Background(), AlertsListOpts{}) @@ -228,7 +232,8 @@ func TestNewDefaultClient_UnixSocket(t *testing.T) { mux.HandleFunc("/alerts", func(w http.ResponseWriter, r *http.Request) { w.WriteHeader(http.StatusUnauthorized) - w.Write([]byte(`{"code": 401, "message" : "brr"}`)) + _, err := w.Write([]byte(`{"code": 401, "message" : "brr"}`)) + assert.NoError(t, err) }) _, _, err = client.Alerts.List(context.Background(), AlertsListOpts{}) @@ -266,7 +271,8 @@ func TestNewClientRegisterOK(t *testing.T) { mux.HandleFunc("/watchers", func(w http.ResponseWriter, r *http.Request) { testMethod(t, r, "POST") w.WriteHeader(http.StatusOK) - w.Write([]byte(`{"code": 200, "expire": "2030-01-02T15:04:05Z", "token": "oklol"}`)) + _, err := w.Write([]byte(`{"code": 200, "expire": "2030-01-02T15:04:05Z", "token": "oklol"}`)) + assert.NoError(t, err) }) apiURL, err := url.Parse(urlx + "/") @@ -298,7 +304,8 @@ func TestNewClientRegisterOK_UnixSocket(t *testing.T) { mux.HandleFunc("/watchers", func(w http.ResponseWriter, r *http.Request) { testMethod(t, r, "POST") w.WriteHeader(http.StatusOK) - w.Write([]byte(`{"code": 200, "expire": "2030-01-02T15:04:05Z", "token": "oklol"}`)) + _, err := w.Write([]byte(`{"code": 200, "expire": "2030-01-02T15:04:05Z", "token": "oklol"}`)) + assert.NoError(t, err) }) apiURL, err := url.Parse(urlx) @@ -331,7 +338,8 @@ func TestNewClientBadAnswer(t *testing.T) { mux.HandleFunc("/watchers", func(w http.ResponseWriter, r *http.Request) { testMethod(t, r, "POST") w.WriteHeader(http.StatusUnauthorized) - w.Write([]byte(`bad`)) + _, err := w.Write([]byte(`bad`)) + assert.NoError(t, err) }) apiURL, err := url.Parse(urlx + "/") diff --git a/pkg/apiclient/decisions_service_test.go b/pkg/apiclient/decisions_service_test.go index 942d14689ff..b8bc327a7d7 100644 --- a/pkg/apiclient/decisions_service_test.go +++ b/pkg/apiclient/decisions_service_test.go @@ -31,11 +31,12 @@ func TestDecisionsList(t *testing.T) { assert.Equal(t, "ip=1.2.3.4", r.URL.RawQuery) assert.Equal(t, "ixu", r.Header.Get("X-Api-Key")) w.WriteHeader(http.StatusOK) - w.Write([]byte(`[{"duration":"3h59m55.756182786s","id":4,"origin":"cscli","scenario":"manual 'ban' from '82929df7ee394b73b81252fe3b4e50203yaT2u6nXiaN7Ix9'","scope":"Ip","type":"ban","value":"1.2.3.4"}]`)) + _, err := w.Write([]byte(`[{"duration":"3h59m55.756182786s","id":4,"origin":"cscli","scenario":"manual 'ban' from '82929df7ee394b73b81252fe3b4e50203yaT2u6nXiaN7Ix9'","scope":"Ip","type":"ban","value":"1.2.3.4"}]`)) + assert.NoError(t, err) } else { w.WriteHeader(http.StatusOK) - w.Write([]byte(`null`)) - // no results + _, err := w.Write([]byte(`null`)) + assert.NoError(t, err) } }) @@ -90,10 +91,12 @@ func TestDecisionsStream(t *testing.T) { if r.Method == http.MethodGet { if strings.Contains(r.URL.RawQuery, "startup=true") { w.WriteHeader(http.StatusOK) - w.Write([]byte(`{"deleted":null,"new":[{"duration":"3h59m55.756182786s","id":4,"origin":"cscli","scenario":"manual 'ban' from '82929df7ee394b73b81252fe3b4e50203yaT2u6nXiaN7Ix9'","scope":"Ip","type":"ban","value":"1.2.3.4"}]}`)) + _, err := w.Write([]byte(`{"deleted":null,"new":[{"duration":"3h59m55.756182786s","id":4,"origin":"cscli","scenario":"manual 'ban' from '82929df7ee394b73b81252fe3b4e50203yaT2u6nXiaN7Ix9'","scope":"Ip","type":"ban","value":"1.2.3.4"}]}`)) + assert.NoError(t, err) } else { w.WriteHeader(http.StatusOK) - w.Write([]byte(`{"deleted":null,"new":null}`)) + _, err := w.Write([]byte(`{"deleted":null,"new":null}`)) + assert.NoError(t, err) } } }) @@ -163,10 +166,12 @@ func TestDecisionsStreamV3Compatibility(t *testing.T) { if r.Method == http.MethodGet { if strings.Contains(r.URL.RawQuery, "startup=true") { w.WriteHeader(http.StatusOK) - w.Write([]byte(`{"deleted":[{"scope":"ip","decisions":["1.2.3.5"]}],"new":[{"scope":"ip", "scenario": "manual 'ban' from '82929df7ee394b73b81252fe3b4e50203yaT2u6nXiaN7Ix9'", "decisions":[{"duration":"3h59m55.756182786s","value":"1.2.3.4"}]}]}`)) + _, err := w.Write([]byte(`{"deleted":[{"scope":"ip","decisions":["1.2.3.5"]}],"new":[{"scope":"ip", "scenario": "manual 'ban' from '82929df7ee394b73b81252fe3b4e50203yaT2u6nXiaN7Ix9'", "decisions":[{"duration":"3h59m55.756182786s","value":"1.2.3.4"}]}]}`)) + assert.NoError(t, err) } else { w.WriteHeader(http.StatusOK) - w.Write([]byte(`{"deleted":null,"new":null}`)) + _, err := w.Write([]byte(`{"deleted":null,"new":null}`)) + assert.NoError(t, err) } } }) @@ -227,9 +232,10 @@ func TestDecisionsStreamV3(t *testing.T) { if r.Method == http.MethodGet { w.WriteHeader(http.StatusOK) - w.Write([]byte(`{"deleted":[{"scope":"ip","decisions":["1.2.3.5"]}], + _, err := w.Write([]byte(`{"deleted":[{"scope":"ip","decisions":["1.2.3.5"]}], "new":[{"scope":"ip", "scenario": "manual 'ban' from '82929df7ee394b73b81252fe3b4e50203yaT2u6nXiaN7Ix9'", "decisions":[{"duration":"3h59m55.756182786s","value":"1.2.3.4"}]}], "links": {"blocklists":[{"name":"blocklist1","url":"/v3/blocklist","scope":"ip","remediation":"ban","duration":"24h"}]}}`)) + assert.NoError(t, err) } }) @@ -303,7 +309,8 @@ func TestDecisionsFromBlocklist(t *testing.T) { if r.Method == http.MethodGet { w.WriteHeader(http.StatusOK) - w.Write([]byte("1.2.3.4\r\n1.2.3.5")) + _, err := w.Write([]byte("1.2.3.4\r\n1.2.3.5")) + assert.NoError(t, err) } }) @@ -388,14 +395,16 @@ func TestDeleteDecisions(t *testing.T) { mux, urlx, teardown := setup() mux.HandleFunc("/watchers/login", func(w http.ResponseWriter, r *http.Request) { w.WriteHeader(http.StatusOK) - w.Write([]byte(`{"code": 200, "expire": "2030-01-02T15:04:05Z", "token": "oklol"}`)) + _, err := w.Write([]byte(`{"code": 200, "expire": "2030-01-02T15:04:05Z", "token": "oklol"}`)) + assert.NoError(t, err) }) mux.HandleFunc("/decisions", func(w http.ResponseWriter, r *http.Request) { testMethod(t, r, "DELETE") assert.Equal(t, "ip=1.2.3.4", r.URL.RawQuery) w.WriteHeader(http.StatusOK) - w.Write([]byte(`{"nbDeleted":"1"}`)) + _, err := w.Write([]byte(`{"nbDeleted":"1"}`)) + assert.NoError(t, err) // w.Write([]byte(`{"message":"0 deleted alerts"}`)) }) diff --git a/pkg/apiserver/alerts_test.go b/pkg/apiserver/alerts_test.go index 4c5c6ef129c..c4edb42d475 100644 --- a/pkg/apiserver/alerts_test.go +++ b/pkg/apiserver/alerts_test.go @@ -142,7 +142,8 @@ func TestCreateAlertChannels(t *testing.T) { ctx := context.Background() apiServer, config := NewAPIServer(t, ctx) apiServer.controller.PluginChannel = make(chan csplugin.ProfileAlert) - apiServer.InitController() + err := apiServer.InitController() + require.NoError(t, err) loginResp := LoginToTestAPI(t, ctx, apiServer.router, config) lapi := LAPI{router: apiServer.router, loginResp: loginResp} diff --git a/pkg/csprofiles/csprofiles_test.go b/pkg/csprofiles/csprofiles_test.go index d09bf25d95b..dc3239fe5c1 100644 --- a/pkg/csprofiles/csprofiles_test.go +++ b/pkg/csprofiles/csprofiles_test.go @@ -119,7 +119,8 @@ func TestEvaluateProfile(t *testing.T) { Alert *models.Alert } - exprhelpers.Init(nil) + err := exprhelpers.Init(nil) + require.NoError(t, err) tests := []struct { name string diff --git a/pkg/cwhub/download_test.go b/pkg/cwhub/download_test.go index ec07862abcf..fc0b257a284 100644 --- a/pkg/cwhub/download_test.go +++ b/pkg/cwhub/download_test.go @@ -20,10 +20,12 @@ func TestFetchIndex(t *testing.T) { mockServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { if r.URL.Query().Get("with_content") == "true" { w.WriteHeader(http.StatusOK) - w.Write([]byte(`Hi I'm an index with content`)) + _, err := w.Write([]byte(`Hi I'm an index with content`)) + assert.NoError(t, err) } else { w.WriteHeader(http.StatusOK) - w.Write([]byte(`Hi I'm a regular index`)) + _, err := w.Write([]byte(`Hi I'm a regular index`)) + assert.NoError(t, err) } })) defer mockServer.Close() diff --git a/pkg/fflag/features_test.go b/pkg/fflag/features_test.go index 481e86573e8..144e7049362 100644 --- a/pkg/fflag/features_test.go +++ b/pkg/fflag/features_test.go @@ -376,11 +376,13 @@ func TestGetEnabledFeatures(t *testing.T) { feat1, err := fr.GetFeature("new_standard") require.NoError(t, err) - feat1.Set(true) + err = feat1.Set(true) + require.Error(t, err, "the flag is deprecated") feat2, err := fr.GetFeature("experimental1") require.NoError(t, err) - feat2.Set(true) + err = feat2.Set(true) + require.NoError(t, err) expected := []string{ "experimental1", diff --git a/pkg/parser/whitelist_test.go b/pkg/parser/whitelist_test.go index 02846f17fc1..a3b95b2fa3f 100644 --- a/pkg/parser/whitelist_test.go +++ b/pkg/parser/whitelist_test.go @@ -284,9 +284,9 @@ func TestWhitelistCheck(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - var err error node.Whitelist = tt.whitelist - node.CompileWLs() + _, err := node.CompileWLs() + require.NoError(t, err) isWhitelisted := node.CheckIPsWL(tt.event) if !isWhitelisted { isWhitelisted, err = node.CheckExprWL(map[string]interface{}{"evt": tt.event}, tt.event) diff --git a/pkg/setup/install.go b/pkg/setup/install.go index 42634672912..3d1540f23be 100644 --- a/pkg/setup/install.go +++ b/pkg/setup/install.go @@ -71,10 +71,14 @@ func InstallHubItems(ctx context.Context, hub *cwhub.Hub, contentProvider cwhub. return fmt.Errorf("collection %s not found", collection) } - plan.AddCommand(hubops.NewDownloadCommand(item, contentProvider, forceAction)) + if err := plan.AddCommand(hubops.NewDownloadCommand(item, contentProvider, forceAction)); err != nil { + return err + } if !downloadOnly { - plan.AddCommand(hubops.NewEnableCommand(item, forceAction)) + if err := plan.AddCommand(hubops.NewEnableCommand(item, forceAction)); err != nil { + return err + } } } @@ -84,10 +88,14 @@ func InstallHubItems(ctx context.Context, hub *cwhub.Hub, contentProvider cwhub. return fmt.Errorf("parser %s not found", parser) } - plan.AddCommand(hubops.NewDownloadCommand(item, contentProvider, forceAction)) + if err := plan.AddCommand(hubops.NewDownloadCommand(item, contentProvider, forceAction)); err != nil { + return err + } if !downloadOnly { - plan.AddCommand(hubops.NewEnableCommand(item, forceAction)) + if err := plan.AddCommand(hubops.NewEnableCommand(item, forceAction)); err != nil { + return err + } } } @@ -97,10 +105,14 @@ func InstallHubItems(ctx context.Context, hub *cwhub.Hub, contentProvider cwhub. return fmt.Errorf("scenario %s not found", scenario) } - plan.AddCommand(hubops.NewDownloadCommand(item, contentProvider, forceAction)) + if err := plan.AddCommand(hubops.NewDownloadCommand(item, contentProvider, forceAction)); err != nil { + return err + } if !downloadOnly { - plan.AddCommand(hubops.NewEnableCommand(item, forceAction)) + if err := plan.AddCommand(hubops.NewEnableCommand(item, forceAction)); err != nil { + return err + } } } @@ -110,10 +122,14 @@ func InstallHubItems(ctx context.Context, hub *cwhub.Hub, contentProvider cwhub. return fmt.Errorf("postoverflow %s not found", postoverflow) } - plan.AddCommand(hubops.NewDownloadCommand(item, contentProvider, forceAction)) + if err := plan.AddCommand(hubops.NewDownloadCommand(item, contentProvider, forceAction)); err != nil { + return err + } if !downloadOnly { - plan.AddCommand(hubops.NewEnableCommand(item, forceAction)) + if err := plan.AddCommand(hubops.NewEnableCommand(item, forceAction)); err != nil { + return err + } } } } From f938b0c602ca524f5897ef0f381778c4e9bb2dcc Mon Sep 17 00:00:00 2001 From: Laurence Jones Date: Thu, 2 Jan 2025 14:21:03 +0000 Subject: [PATCH 378/581] enhancement: Hubtest respect patterndir option set via config.yaml (#3386) * enhancement: Hubtest respect patterndir option set via config.yaml * pass patternDir to Run() --------- Co-authored-by: marco --- cmd/crowdsec-cli/clihubtest/explain.go | 7 +++++-- cmd/crowdsec-cli/clihubtest/run.go | 4 +++- cmd/crowdsec-cli/main.go | 1 - pkg/hubtest/hubtest.go | 4 ++-- pkg/hubtest/hubtest_item.go | 18 +++++++----------- 5 files changed, 17 insertions(+), 17 deletions(-) diff --git a/cmd/crowdsec-cli/clihubtest/explain.go b/cmd/crowdsec-cli/clihubtest/explain.go index dbe10fa7ec0..877aec98a37 100644 --- a/cmd/crowdsec-cli/clihubtest/explain.go +++ b/cmd/crowdsec-cli/clihubtest/explain.go @@ -14,9 +14,12 @@ func (cli *cliHubTest) explain(testName string, details bool, skipOk bool) error return fmt.Errorf("can't load test: %+v", err) } + cfg := cli.cfg() + patternDir := cfg.ConfigPaths.PatternDir + err = test.ParserAssert.LoadTest(test.ParserResultFile) if err != nil { - if err = test.Run(); err != nil { + if err = test.Run(patternDir); err != nil { return fmt.Errorf("running test '%s' failed: %+v", test.Name, err) } @@ -27,7 +30,7 @@ func (cli *cliHubTest) explain(testName string, details bool, skipOk bool) error err = test.ScenarioAssert.LoadTest(test.ScenarioResultFile, test.BucketPourResultFile) if err != nil { - if err = test.Run(); err != nil { + if err = test.Run(patternDir); err != nil { return fmt.Errorf("running test '%s' failed: %+v", test.Name, err) } diff --git a/cmd/crowdsec-cli/clihubtest/run.go b/cmd/crowdsec-cli/clihubtest/run.go index 31cceb81884..94a3b0c10f3 100644 --- a/cmd/crowdsec-cli/clihubtest/run.go +++ b/cmd/crowdsec-cli/clihubtest/run.go @@ -42,12 +42,14 @@ func (cli *cliHubTest) run(runAll bool, nucleiTargetHost string, appSecHost stri // set timezone to avoid DST issues os.Setenv("TZ", "UTC") + patternDir := cfg.ConfigPaths.PatternDir + for _, test := range hubPtr.Tests { if cfg.Cscli.Output == "human" { log.Infof("Running test '%s'", test.Name) } - err := test.Run() + err := test.Run(patternDir) if err != nil { log.Errorf("running test '%s' failed: %+v", test.Name, err) } diff --git a/cmd/crowdsec-cli/main.go b/cmd/crowdsec-cli/main.go index 87e9d82fea2..936211be7ff 100644 --- a/cmd/crowdsec-cli/main.go +++ b/cmd/crowdsec-cli/main.go @@ -91,7 +91,6 @@ func loadConfigFor(command string) (*csconfig.Config, string, error) { "help", "completion", "version", - "hubtest", } if !slices.Contains(noNeedConfig, command) { diff --git a/pkg/hubtest/hubtest.go b/pkg/hubtest/hubtest.go index ce9efcec601..6e5a11fff10 100644 --- a/pkg/hubtest/hubtest.go +++ b/pkg/hubtest/hubtest.go @@ -25,8 +25,8 @@ type HubTest struct { NucleiTargetHost string AppSecHost string - HubIndex *cwhub.Hub - Tests []*HubTestItem + HubIndex *cwhub.Hub + Tests []*HubTestItem } const ( diff --git a/pkg/hubtest/hubtest_item.go b/pkg/hubtest/hubtest_item.go index e8bc56f650a..bf029b0e835 100644 --- a/pkg/hubtest/hubtest_item.go +++ b/pkg/hubtest/hubtest_item.go @@ -382,7 +382,7 @@ func createDirs(dirs []string) error { return nil } -func (t *HubTestItem) RunWithLogFile() error { +func (t *HubTestItem) RunWithLogFile(patternDir string) error { testPath := filepath.Join(t.HubTestPath, t.Name) if _, err := os.Stat(testPath); os.IsNotExist(err) { return fmt.Errorf("test '%s' doesn't exist in '%s', exiting", t.Name, t.HubTestPath) @@ -417,11 +417,9 @@ func (t *HubTestItem) RunWithLogFile() error { return fmt.Errorf("unable to copy '%s' to '%s': %v", t.TemplateSimulationPath, t.RuntimeSimulationFilePath, err) } - crowdsecPatternsFolder := csconfig.DefaultConfigPath("patterns") - // copy template patterns folder to runtime folder - if err = CopyDir(crowdsecPatternsFolder, t.RuntimePatternsPath); err != nil { - return fmt.Errorf("unable to copy 'patterns' from '%s' to '%s': %w", crowdsecPatternsFolder, t.RuntimePatternsPath, err) + if err = CopyDir(patternDir, t.RuntimePatternsPath); err != nil { + return fmt.Errorf("unable to copy 'patterns' from '%s' to '%s': %w", patternDir, t.RuntimePatternsPath, err) } // install the hub in the runtime folder @@ -566,7 +564,7 @@ func (t *HubTestItem) RunWithLogFile() error { return nil } -func (t *HubTestItem) Run() error { +func (t *HubTestItem) Run(patternDir string) error { var err error t.Success = false @@ -596,11 +594,9 @@ func (t *HubTestItem) Run() error { return fmt.Errorf("unable to copy '%s' to '%s': %v", t.TemplateSimulationPath, t.RuntimeSimulationFilePath, err) } - crowdsecPatternsFolder := csconfig.DefaultConfigPath("patterns") - // copy template patterns folder to runtime folder - if err = CopyDir(crowdsecPatternsFolder, t.RuntimePatternsPath); err != nil { - return fmt.Errorf("unable to copy 'patterns' from '%s' to '%s': %w", crowdsecPatternsFolder, t.RuntimePatternsPath, err) + if err = CopyDir(patternDir, t.RuntimePatternsPath); err != nil { + return fmt.Errorf("unable to copy 'patterns' from '%s' to '%s': %w", patternDir, t.RuntimePatternsPath, err) } // create the appsec-configs dir @@ -634,7 +630,7 @@ func (t *HubTestItem) Run() error { } if t.Config.LogFile != "" { - return t.RunWithLogFile() + return t.RunWithLogFile(patternDir) } else if t.Config.NucleiTemplate != "" { return t.RunWithNucleiTemplate() } From 90f7c56aabe86ea25f2bcf4b2c4109609800c97d Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Thu, 2 Jan 2025 16:41:11 +0100 Subject: [PATCH 379/581] wizard: unattended install w/ new hubops (#3392) --- wizard.sh | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/wizard.sh b/wizard.sh index 6e215365f6c..57311d40cdb 100755 --- a/wizard.sh +++ b/wizard.sh @@ -262,20 +262,26 @@ install_collection() { fi done + local YES="" + if [[ ${SILENT} == "false" ]]; then COLLECTION_TO_INSTALL=($(whiptail --separate-output --ok-button Continue --title "Crowdsec collections" --checklist "Available collections in crowdsec, try to pick one that fits your profile. Collections contains parsers and scenarios to protect your system." 20 120 10 "${HMENU[@]}" 3>&1 1>&2 2>&3)) if [ $? -eq 1 ]; then log_err "user bailed out at collection selection" exit 1; fi; + else + YES="--yes" fi; for collection in "${COLLECTION_TO_INSTALL[@]}"; do log_info "Installing collection '${collection}'" - ${CSCLI_BIN_INSTALLED} collections install "${collection}" --error + # shellcheck disable=SC2248 + ${CSCLI_BIN_INSTALLED} collections install "${collection}" --error ${YES} done - ${CSCLI_BIN_INSTALLED} parsers install "crowdsecurity/whitelists" --error + # shellcheck disable=SC2248 + ${CSCLI_BIN_INSTALLED} parsers install "crowdsecurity/whitelists" --error ${YES} if [[ ${SILENT} == "false" ]]; then whiptail --msgbox "Out of safety, I installed a parser called 'crowdsecurity/whitelists'. This one will prevent private IP addresses from being banned, feel free to remove it any time." 20 50 fi From 5c7b957a34fd4e743f5127e85974c75a51f8df02 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Thu, 2 Jan 2025 16:58:03 +0100 Subject: [PATCH 380/581] cscli: restyle table titles; autocomplete "cscli metrics show" (#3391) * cscli: restyle table titles; autocomplete "cscli metrics show" * lint --- cmd/crowdsec-cli/clialert/table.go | 2 +- cmd/crowdsec-cli/clibouncer/inspect.go | 2 +- cmd/crowdsec-cli/clibouncer/list.go | 3 +- cmd/crowdsec-cli/clihub/items.go | 2 +- cmd/crowdsec-cli/clihub/utils_table.go | 4 +- cmd/crowdsec-cli/cliitem/inspect.go | 2 +- cmd/crowdsec-cli/cliitem/metrics_table.go | 13 +++-- cmd/crowdsec-cli/climachine/inspect.go | 4 +- cmd/crowdsec-cli/climachine/list.go | 3 +- cmd/crowdsec-cli/climetrics/list.go | 3 +- cmd/crowdsec-cli/climetrics/show.go | 15 ++++++ cmd/crowdsec-cli/climetrics/statacquis.go | 6 +-- cmd/crowdsec-cli/climetrics/statalert.go | 6 +-- .../climetrics/statappsecengine.go | 6 +-- cmd/crowdsec-cli/climetrics/statappsecrule.go | 5 +- cmd/crowdsec-cli/climetrics/statbouncer.go | 13 ++--- cmd/crowdsec-cli/climetrics/statbucket.go | 6 +-- cmd/crowdsec-cli/climetrics/statdecision.go | 6 +-- cmd/crowdsec-cli/climetrics/statlapi.go | 6 +-- .../climetrics/statlapibouncer.go | 6 +-- .../climetrics/statlapidecision.go | 6 +-- .../climetrics/statlapimachine.go | 6 +-- cmd/crowdsec-cli/climetrics/statparser.go | 6 +-- cmd/crowdsec-cli/climetrics/statstash.go | 6 +-- cmd/crowdsec-cli/climetrics/statwhitelist.go | 6 +-- cmd/crowdsec-cli/climetrics/store.go | 3 +- .../modules/appsec/appsec_runner_test.go | 15 +++--- pkg/acquisition/modules/http/http.go | 27 ++++++++-- pkg/acquisition/modules/http/http_test.go | 8 +-- pkg/alertcontext/alertcontext_test.go | 10 ++-- pkg/cwhub/item.go | 54 +++++++++---------- pkg/hubops/plan.go | 8 +-- pkg/hubtest/hubtest_item.go | 9 ++-- test/bats/04_nocapi.bats | 2 +- test/bats/08_metrics.bats | 4 +- test/bats/08_metrics_bouncer.bats | 28 +++++++--- test/bats/80_alerts.bats | 2 +- test/bats/cscli-hubtype-list.bats | 2 +- 38 files changed, 185 insertions(+), 130 deletions(-) diff --git a/cmd/crowdsec-cli/clialert/table.go b/cmd/crowdsec-cli/clialert/table.go index 1416e1e435c..4fe7c4b99c6 100644 --- a/cmd/crowdsec-cli/clialert/table.go +++ b/cmd/crowdsec-cli/clialert/table.go @@ -86,7 +86,7 @@ func alertDecisionsTable(out io.Writer, wantColor string, alert *models.Alert) { } if foundActive { - fmt.Printf(" - Active Decisions :\n") + t.Writer.SetTitle("Active Decisions") t.Render() // Send output } } diff --git a/cmd/crowdsec-cli/clibouncer/inspect.go b/cmd/crowdsec-cli/clibouncer/inspect.go index b62344baa9b..9f1d56124d8 100644 --- a/cmd/crowdsec-cli/clibouncer/inspect.go +++ b/cmd/crowdsec-cli/clibouncer/inspect.go @@ -47,7 +47,7 @@ func (cli *cliBouncers) inspectHuman(out io.Writer, bouncer *ent.Bouncer) { t.AppendRow(table.Row{"Feature Flags", ff}) } - io.WriteString(out, t.Render()+"\n") + fmt.Fprint(out, t.Render()) } func (cli *cliBouncers) inspect(bouncer *ent.Bouncer) error { diff --git a/cmd/crowdsec-cli/clibouncer/list.go b/cmd/crowdsec-cli/clibouncer/list.go index a13ca994e1e..4ed22ce752f 100644 --- a/cmd/crowdsec-cli/clibouncer/list.go +++ b/cmd/crowdsec-cli/clibouncer/list.go @@ -37,7 +37,7 @@ func (cli *cliBouncers) listHuman(out io.Writer, bouncers ent.Bouncers) { t.AppendRow(table.Row{b.Name, b.IPAddress, revoked, lastPull, b.Type, b.Version, b.AuthType}) } - io.WriteString(out, t.Render()+"\n") + fmt.Fprintln(out, t.Render()) } func (cli *cliBouncers) listCSV(out io.Writer, bouncers ent.Bouncers) error { @@ -71,7 +71,6 @@ func (cli *cliBouncers) listCSV(out io.Writer, bouncers ent.Bouncers) error { func (cli *cliBouncers) List(ctx context.Context, out io.Writer, db *database.Client) error { // XXX: must use the provided db object, the one in the struct might be nil // (calling List directly skips the PersistentPreRunE) - bouncers, err := db.ListBouncers(ctx) if err != nil { return fmt.Errorf("unable to list bouncers: %w", err) diff --git a/cmd/crowdsec-cli/clihub/items.go b/cmd/crowdsec-cli/clihub/items.go index f63dc4bedd7..730d2208be0 100644 --- a/cmd/crowdsec-cli/clihub/items.go +++ b/cmd/crowdsec-cli/clihub/items.go @@ -105,7 +105,7 @@ func ListItems(out io.Writer, wantColor string, itemTypes []string, items map[st return fmt.Errorf("failed to parse: %w", err) } - out.Write(x) + fmt.Fprint(out, string(x)) case "raw": csvwriter := csv.NewWriter(out) diff --git a/cmd/crowdsec-cli/clihub/utils_table.go b/cmd/crowdsec-cli/clihub/utils_table.go index 4693161005b..b89f8447896 100644 --- a/cmd/crowdsec-cli/clihub/utils_table.go +++ b/cmd/crowdsec-cli/clihub/utils_table.go @@ -20,6 +20,6 @@ func listHubItemTable(out io.Writer, wantColor string, title string, items []*cw t.AppendRow(table.Row{item.Name, status, item.State.LocalVersion, item.State.LocalPath}) } - io.WriteString(out, title+"\n") - io.WriteString(out, t.Render()+"\n") + t.SetTitle(title) + fmt.Fprintln(out, t.Render()) } diff --git a/cmd/crowdsec-cli/cliitem/inspect.go b/cmd/crowdsec-cli/cliitem/inspect.go index 596674aa788..9939de1810e 100644 --- a/cmd/crowdsec-cli/cliitem/inspect.go +++ b/cmd/crowdsec-cli/cliitem/inspect.go @@ -1,8 +1,8 @@ package cliitem import ( - "fmt" "encoding/json" + "fmt" "os" "path/filepath" diff --git a/cmd/crowdsec-cli/cliitem/metrics_table.go b/cmd/crowdsec-cli/cliitem/metrics_table.go index 378394bad85..a41ea0fad39 100644 --- a/cmd/crowdsec-cli/cliitem/metrics_table.go +++ b/cmd/crowdsec-cli/cliitem/metrics_table.go @@ -10,7 +10,6 @@ import ( "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/cstable" ) - func appsecMetricsTable(out io.Writer, wantColor string, itemName string, metrics map[string]int) { t := cstable.NewLight(out, wantColor).Writer t.AppendHeader(table.Row{"Inband Hits", "Outband Hits"}) @@ -20,8 +19,8 @@ func appsecMetricsTable(out io.Writer, wantColor string, itemName string, metric strconv.Itoa(metrics["outband_hits"]), }) - io.WriteString(out, fmt.Sprintf("\n - (AppSec Rule) %s:\n", itemName)) - io.WriteString(out, t.Render()+"\n") + t.SetTitle("(AppSec) " + itemName) + fmt.Fprintln(out, t.Render()) } func scenarioMetricsTable(out io.Writer, wantColor string, itemName string, metrics map[string]int) { @@ -40,8 +39,8 @@ func scenarioMetricsTable(out io.Writer, wantColor string, itemName string, metr strconv.Itoa(metrics["underflow"]), }) - io.WriteString(out, fmt.Sprintf("\n - (Scenario) %s:\n", itemName)) - io.WriteString(out, t.Render()+"\n") + t.SetTitle("(Scenario) " + itemName) + fmt.Fprintln(out, t.Render()) } func parserMetricsTable(out io.Writer, wantColor string, itemName string, metrics map[string]map[string]int) { @@ -65,7 +64,7 @@ func parserMetricsTable(out io.Writer, wantColor string, itemName string, metric } if showTable { - io.WriteString(out, fmt.Sprintf("\n - (Parser) %s:\n", itemName)) - io.WriteString(out, t.Render()+"\n") + t.SetTitle("(Parser) " + itemName) + fmt.Fprintln(out, t.Render()) } } diff --git a/cmd/crowdsec-cli/climachine/inspect.go b/cmd/crowdsec-cli/climachine/inspect.go index b08f2f62794..e973d07e96b 100644 --- a/cmd/crowdsec-cli/climachine/inspect.go +++ b/cmd/crowdsec-cli/climachine/inspect.go @@ -44,7 +44,7 @@ func (cli *cliMachines) inspectHubHuman(out io.Writer, machine *ent.Machine) { t.AppendHeader(table.Row{"Name", "Status", "Version"}) t.SetTitle(itemType) t.AppendRows(rows) - io.WriteString(out, t.Render()+"\n") + fmt.Fprintln(out, t.Render()) } } @@ -80,7 +80,7 @@ func (cli *cliMachines) inspectHuman(out io.Writer, machine *ent.Machine) { t.AppendRow(table.Row{"Collections", coll.Name}) } - io.WriteString(out, t.Render()+"\n") + fmt.Fprintln(out, t.Render()) } func (cli *cliMachines) inspect(machine *ent.Machine) error { diff --git a/cmd/crowdsec-cli/climachine/list.go b/cmd/crowdsec-cli/climachine/list.go index 6bedb2ad807..6fb45166aa2 100644 --- a/cmd/crowdsec-cli/climachine/list.go +++ b/cmd/crowdsec-cli/climachine/list.go @@ -55,7 +55,7 @@ func (cli *cliMachines) listHuman(out io.Writer, machines ent.Machines) { t.AppendRow(table.Row{m.MachineId, m.IpAddress, m.UpdatedAt.Format(time.RFC3339), validated, m.Version, clientinfo.GetOSNameAndVersion(m), m.AuthType, hb}) } - io.WriteString(out, t.Render()+"\n") + fmt.Fprintln(out, t.Render()) } func (cli *cliMachines) listCSV(out io.Writer, machines ent.Machines) error { @@ -90,7 +90,6 @@ func (cli *cliMachines) listCSV(out io.Writer, machines ent.Machines) error { func (cli *cliMachines) List(ctx context.Context, out io.Writer, db *database.Client) error { // XXX: must use the provided db object, the one in the struct might be nil // (calling List directly skips the PersistentPreRunE) - machines, err := db.ListMachines(ctx) if err != nil { return fmt.Errorf("unable to list machines: %w", err) diff --git a/cmd/crowdsec-cli/climetrics/list.go b/cmd/crowdsec-cli/climetrics/list.go index 27fa99710c8..32e2f8e0a80 100644 --- a/cmd/crowdsec-cli/climetrics/list.go +++ b/cmd/crowdsec-cli/climetrics/list.go @@ -3,7 +3,6 @@ package climetrics import ( "encoding/json" "fmt" - "io" "github.com/fatih/color" "github.com/jedib0t/go-pretty/v6/table" @@ -64,7 +63,7 @@ func (cli *cliMetrics) list() error { t.AppendRow(table.Row{metric.Type, metric.Title, metric.Description}) } - io.WriteString(out, t.Render()+"\n") + fmt.Fprintln(out, t.Render()) case "json": x, err := json.MarshalIndent(allMetrics, "", " ") if err != nil { diff --git a/cmd/crowdsec-cli/climetrics/show.go b/cmd/crowdsec-cli/climetrics/show.go index 045959048f6..172d3799435 100644 --- a/cmd/crowdsec-cli/climetrics/show.go +++ b/cmd/crowdsec-cli/climetrics/show.go @@ -4,11 +4,15 @@ import ( "context" "errors" "fmt" + "slices" + "strings" "github.com/fatih/color" log "github.com/sirupsen/logrus" "github.com/spf13/cobra" + "github.com/crowdsecurity/go-cs-lib/maptools" + "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/require" ) @@ -99,6 +103,17 @@ cscli metrics list; cscli metrics list -o json cscli metrics show acquisition parsers scenarios stash -o json`, // Positional args are optional DisableAutoGenTag: true, + ValidArgsFunction: func(_ *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { + ms := NewMetricStore() + ret := []string{} + for _, section := range maptools.SortedKeys(ms) { + if !slices.Contains(args, section) && strings.Contains(section, toComplete) { + ret = append(ret, section) + } + } + + return ret, cobra.ShellCompDirectiveNoFileComp + }, RunE: func(cmd *cobra.Command, args []string) error { args = expandAlias(args) return cli.show(cmd.Context(), args, url, noUnit) diff --git a/cmd/crowdsec-cli/climetrics/statacquis.go b/cmd/crowdsec-cli/climetrics/statacquis.go index 0af2e796f40..da17b1d9480 100644 --- a/cmd/crowdsec-cli/climetrics/statacquis.go +++ b/cmd/crowdsec-cli/climetrics/statacquis.go @@ -1,6 +1,7 @@ package climetrics import ( + "fmt" "io" "github.com/jedib0t/go-pretty/v6/table" @@ -37,8 +38,7 @@ func (s statAcquis) Table(out io.Writer, wantColor string, noUnit bool, showEmpt log.Warningf("while collecting acquis stats: %s", err) } else if numRows > 0 || showEmpty { title, _ := s.Description() - io.WriteString(out, title+":\n") - io.WriteString(out, t.Render()+"\n") - io.WriteString(out, "\n") + t.SetTitle(title) + fmt.Fprintln(out, t.Render()) } } diff --git a/cmd/crowdsec-cli/climetrics/statalert.go b/cmd/crowdsec-cli/climetrics/statalert.go index 942eceaa75c..416b78f0508 100644 --- a/cmd/crowdsec-cli/climetrics/statalert.go +++ b/cmd/crowdsec-cli/climetrics/statalert.go @@ -1,6 +1,7 @@ package climetrics import ( + "fmt" "io" "strconv" @@ -38,8 +39,7 @@ func (s statAlert) Table(out io.Writer, wantColor string, noUnit bool, showEmpty if numRows > 0 || showEmpty { title, _ := s.Description() - io.WriteString(out, title+":\n") - io.WriteString(out, t.Render()+"\n") - io.WriteString(out, "\n") + t.SetTitle(title) + fmt.Fprintln(out, t.Render()) } } diff --git a/cmd/crowdsec-cli/climetrics/statappsecengine.go b/cmd/crowdsec-cli/climetrics/statappsecengine.go index d924375247f..93cc1283c96 100644 --- a/cmd/crowdsec-cli/climetrics/statappsecengine.go +++ b/cmd/crowdsec-cli/climetrics/statappsecengine.go @@ -1,6 +1,7 @@ package climetrics import ( + "fmt" "io" "github.com/jedib0t/go-pretty/v6/table" @@ -34,8 +35,7 @@ func (s statAppsecEngine) Table(out io.Writer, wantColor string, noUnit bool, sh log.Warningf("while collecting appsec stats: %s", err) } else if numRows > 0 || showEmpty { title, _ := s.Description() - io.WriteString(out, title+":\n") - io.WriteString(out, t.Render()+"\n") - io.WriteString(out, "\n") + t.SetTitle(title) + fmt.Fprintln(out, t.Render()) } } diff --git a/cmd/crowdsec-cli/climetrics/statappsecrule.go b/cmd/crowdsec-cli/climetrics/statappsecrule.go index e06a7c2e2b3..8e243aba642 100644 --- a/cmd/crowdsec-cli/climetrics/statappsecrule.go +++ b/cmd/crowdsec-cli/climetrics/statappsecrule.go @@ -40,9 +40,8 @@ func (s statAppsecRule) Table(out io.Writer, wantColor string, noUnit bool, show if numRows, err := metricsToTable(t, appsecEngineRulesStats, keys, noUnit); err != nil { log.Warningf("while collecting appsec rules stats: %s", err) } else if numRows > 0 || showEmpty { - io.WriteString(out, fmt.Sprintf("Appsec '%s' Rules Metrics:\n", appsecEngine)) - io.WriteString(out, t.Render()+"\n") - io.WriteString(out, "\n") + t.SetTitle(fmt.Sprintf("Appsec '%s' Rules Metrics", appsecEngine)) + fmt.Fprintln(out, t.Render()) } } } diff --git a/cmd/crowdsec-cli/climetrics/statbouncer.go b/cmd/crowdsec-cli/climetrics/statbouncer.go index bc0da152d6d..ac79074d506 100644 --- a/cmd/crowdsec-cli/climetrics/statbouncer.go +++ b/cmd/crowdsec-cli/climetrics/statbouncer.go @@ -176,17 +176,20 @@ func (*statBouncer) extractRawMetrics(metrics []*ent.Metric) ([]bouncerMetricIte if item.Name == nil { logWarningOnce(warningsLogged, "missing 'name' field in metrics reported by "+bouncerName) + // no continue - keep checking the rest valid = false } if item.Unit == nil { logWarningOnce(warningsLogged, "missing 'unit' field in metrics reported by "+bouncerName) + valid = false } if item.Value == nil { logWarningOnce(warningsLogged, "missing 'value' field in metrics reported by "+bouncerName) + valid = false } @@ -439,11 +442,8 @@ func (s *statBouncer) bouncerTable(out io.Writer, bouncerName string, wantColor title = fmt.Sprintf("%s since %s", title, s.oldestTS[bouncerName].String()) } - // don't use SetTitle() because it draws the title inside table box - io.WriteString(out, title+":\n") - io.WriteString(out, t.Render()+"\n") - // empty line between tables - io.WriteString(out, "\n") + t.SetTitle(title) + fmt.Fprintln(out, t.Render()) } // Table displays a table of metrics for each bouncer @@ -452,10 +452,11 @@ func (s *statBouncer) Table(out io.Writer, wantColor string, noUnit bool, showEm for _, bouncerName := range maptools.SortedKeys(s.aggOverOrigin) { s.bouncerTable(out, bouncerName, wantColor, noUnit) + found = true } if !found && showEmpty { - io.WriteString(out, "No bouncer metrics found.\n\n") + fmt.Fprintln(out, "No bouncer metrics found.") } } diff --git a/cmd/crowdsec-cli/climetrics/statbucket.go b/cmd/crowdsec-cli/climetrics/statbucket.go index 1882fe21df1..4cddfeb3731 100644 --- a/cmd/crowdsec-cli/climetrics/statbucket.go +++ b/cmd/crowdsec-cli/climetrics/statbucket.go @@ -1,6 +1,7 @@ package climetrics import ( + "fmt" "io" "github.com/jedib0t/go-pretty/v6/table" @@ -35,8 +36,7 @@ func (s statBucket) Table(out io.Writer, wantColor string, noUnit bool, showEmpt log.Warningf("while collecting scenario stats: %s", err) } else if numRows > 0 || showEmpty { title, _ := s.Description() - io.WriteString(out, title+":\n") - io.WriteString(out, t.Render()+"\n") - io.WriteString(out, "\n") + t.SetTitle(title) + fmt.Fprintln(out, t.Render()) } } diff --git a/cmd/crowdsec-cli/climetrics/statdecision.go b/cmd/crowdsec-cli/climetrics/statdecision.go index b862f49ff12..2f27410f56f 100644 --- a/cmd/crowdsec-cli/climetrics/statdecision.go +++ b/cmd/crowdsec-cli/climetrics/statdecision.go @@ -1,6 +1,7 @@ package climetrics import ( + "fmt" "io" "strconv" @@ -53,8 +54,7 @@ func (s statDecision) Table(out io.Writer, wantColor string, noUnit bool, showEm if numRows > 0 || showEmpty { title, _ := s.Description() - io.WriteString(out, title+":\n") - io.WriteString(out, t.Render()+"\n") - io.WriteString(out, "\n") + t.SetTitle(title) + fmt.Fprintln(out, t.Render()) } } diff --git a/cmd/crowdsec-cli/climetrics/statlapi.go b/cmd/crowdsec-cli/climetrics/statlapi.go index 9559eacf0f4..2f460ca5a71 100644 --- a/cmd/crowdsec-cli/climetrics/statlapi.go +++ b/cmd/crowdsec-cli/climetrics/statlapi.go @@ -1,6 +1,7 @@ package climetrics import ( + "fmt" "io" "strconv" @@ -49,8 +50,7 @@ func (s statLapi) Table(out io.Writer, wantColor string, noUnit bool, showEmpty if numRows > 0 || showEmpty { title, _ := s.Description() - io.WriteString(out, title+":\n") - io.WriteString(out, t.Render()+"\n") - io.WriteString(out, "\n") + t.SetTitle(title) + fmt.Fprintln(out, t.Render()) } } diff --git a/cmd/crowdsec-cli/climetrics/statlapibouncer.go b/cmd/crowdsec-cli/climetrics/statlapibouncer.go index 5e5f63a79d3..2ea6b67cd0a 100644 --- a/cmd/crowdsec-cli/climetrics/statlapibouncer.go +++ b/cmd/crowdsec-cli/climetrics/statlapibouncer.go @@ -1,6 +1,7 @@ package climetrics import ( + "fmt" "io" "github.com/jedib0t/go-pretty/v6/table" @@ -35,8 +36,7 @@ func (s statLapiBouncer) Table(out io.Writer, wantColor string, noUnit bool, sho if numRows > 0 || showEmpty { title, _ := s.Description() - io.WriteString(out, title+":\n") - io.WriteString(out, t.Render()+"\n") - io.WriteString(out, "\n") + t.SetTitle(title) + fmt.Fprintln(out, t.Render()) } } diff --git a/cmd/crowdsec-cli/climetrics/statlapidecision.go b/cmd/crowdsec-cli/climetrics/statlapidecision.go index 44f0e8f4b87..3371cb0e8ff 100644 --- a/cmd/crowdsec-cli/climetrics/statlapidecision.go +++ b/cmd/crowdsec-cli/climetrics/statlapidecision.go @@ -1,6 +1,7 @@ package climetrics import ( + "fmt" "io" "strconv" @@ -57,8 +58,7 @@ func (s statLapiDecision) Table(out io.Writer, wantColor string, noUnit bool, sh if numRows > 0 || showEmpty { title, _ := s.Description() - io.WriteString(out, title+":\n") - io.WriteString(out, t.Render()+"\n") - io.WriteString(out, "\n") + t.SetTitle(title) + fmt.Fprintln(out, t.Render()) } } diff --git a/cmd/crowdsec-cli/climetrics/statlapimachine.go b/cmd/crowdsec-cli/climetrics/statlapimachine.go index 0e6693bea82..04fbb98ae8e 100644 --- a/cmd/crowdsec-cli/climetrics/statlapimachine.go +++ b/cmd/crowdsec-cli/climetrics/statlapimachine.go @@ -1,6 +1,7 @@ package climetrics import ( + "fmt" "io" "github.com/jedib0t/go-pretty/v6/table" @@ -35,8 +36,7 @@ func (s statLapiMachine) Table(out io.Writer, wantColor string, noUnit bool, sho if numRows > 0 || showEmpty { title, _ := s.Description() - io.WriteString(out, title+":\n") - io.WriteString(out, t.Render()+"\n") - io.WriteString(out, "\n") + t.SetTitle(title) + fmt.Fprintln(out, t.Render()) } } diff --git a/cmd/crowdsec-cli/climetrics/statparser.go b/cmd/crowdsec-cli/climetrics/statparser.go index 520e68f9adf..bdc9caa8597 100644 --- a/cmd/crowdsec-cli/climetrics/statparser.go +++ b/cmd/crowdsec-cli/climetrics/statparser.go @@ -1,6 +1,7 @@ package climetrics import ( + "fmt" "io" "github.com/jedib0t/go-pretty/v6/table" @@ -36,8 +37,7 @@ func (s statParser) Table(out io.Writer, wantColor string, noUnit bool, showEmpt log.Warningf("while collecting parsers stats: %s", err) } else if numRows > 0 || showEmpty { title, _ := s.Description() - io.WriteString(out, title+":\n") - io.WriteString(out, t.Render()+"\n") - io.WriteString(out, "\n") + t.SetTitle(title) + fmt.Fprintln(out, t.Render()) } } diff --git a/cmd/crowdsec-cli/climetrics/statstash.go b/cmd/crowdsec-cli/climetrics/statstash.go index 2729de931a1..496deaf0535 100644 --- a/cmd/crowdsec-cli/climetrics/statstash.go +++ b/cmd/crowdsec-cli/climetrics/statstash.go @@ -1,6 +1,7 @@ package climetrics import ( + "fmt" "io" "strconv" @@ -52,8 +53,7 @@ func (s statStash) Table(out io.Writer, wantColor string, noUnit bool, showEmpty if numRows > 0 || showEmpty { title, _ := s.Description() - io.WriteString(out, title+":\n") - io.WriteString(out, t.Render()+"\n") - io.WriteString(out, "\n") + t.SetTitle(title) + fmt.Fprintln(out, t.Render()) } } diff --git a/cmd/crowdsec-cli/climetrics/statwhitelist.go b/cmd/crowdsec-cli/climetrics/statwhitelist.go index 7f533b45b4b..a42f653d50d 100644 --- a/cmd/crowdsec-cli/climetrics/statwhitelist.go +++ b/cmd/crowdsec-cli/climetrics/statwhitelist.go @@ -1,6 +1,7 @@ package climetrics import ( + "fmt" "io" "github.com/jedib0t/go-pretty/v6/table" @@ -36,8 +37,7 @@ func (s statWhitelist) Table(out io.Writer, wantColor string, noUnit bool, showE log.Warningf("while collecting parsers stats: %s", err) } else if numRows > 0 || showEmpty { title, _ := s.Description() - io.WriteString(out, title+":\n") - io.WriteString(out, t.Render()+"\n") - io.WriteString(out, "\n") + t.SetTitle(title) + fmt.Fprintln(out, t.Render()) } } diff --git a/cmd/crowdsec-cli/climetrics/store.go b/cmd/crowdsec-cli/climetrics/store.go index 55fab5dbd7f..6c402447901 100644 --- a/cmd/crowdsec-cli/climetrics/store.go +++ b/cmd/crowdsec-cli/climetrics/store.go @@ -262,7 +262,8 @@ func (ms metricStore) Format(out io.Writer, wantColor string, sections []string, if err != nil { return fmt.Errorf("failed to serialize metrics: %w", err) } - out.Write(x) + + fmt.Fprint(out, string(x)) default: return fmt.Errorf("output format '%s' not supported for this command", outputFormat) } diff --git a/pkg/acquisition/modules/appsec/appsec_runner_test.go b/pkg/acquisition/modules/appsec/appsec_runner_test.go index d07fb153186..38549a9106c 100644 --- a/pkg/acquisition/modules/appsec/appsec_runner_test.go +++ b/pkg/acquisition/modules/appsec/appsec_runner_test.go @@ -3,13 +3,15 @@ package appsecacquisition import ( "testing" - "github.com/crowdsecurity/crowdsec/pkg/appsec/appsec_rule" log "github.com/sirupsen/logrus" "github.com/stretchr/testify/require" + + "github.com/crowdsecurity/crowdsec/pkg/appsec/appsec_rule" ) func TestAppsecRuleLoad(t *testing.T) { log.SetLevel(log.TraceLevel) + tests := []appsecRuleTest{ { name: "simple rule load", @@ -105,21 +107,22 @@ func TestAppsecRuleLoad(t *testing.T) { Or: []appsec_rule.CustomRule{ { - //Name: "rule1", + // Name: "rule1", Zones: []string{"ARGS"}, Match: appsec_rule.Match{Type: "equals", Value: "toto"}, }, { - //Name: "rule1", + // Name: "rule1", Zones: []string{"ARGS"}, Match: appsec_rule.Match{Type: "equals", Value: "tutu"}, }, { - //Name: "rule1", + // Name: "rule1", Zones: []string{"ARGS"}, Match: appsec_rule.Match{Type: "equals", Value: "tata"}, - }, { - //Name: "rule1", + }, + { + // Name: "rule1", Zones: []string{"ARGS"}, Match: appsec_rule.Match{Type: "equals", Value: "titi"}, }, diff --git a/pkg/acquisition/modules/http/http.go b/pkg/acquisition/modules/http/http.go index 6bb8228f32c..3e4f26915fd 100644 --- a/pkg/acquisition/modules/http/http.go +++ b/pkg/acquisition/modules/http/http.go @@ -16,7 +16,6 @@ import ( "github.com/prometheus/client_golang/prometheus" log "github.com/sirupsen/logrus" - "gopkg.in/tomb.v2" "gopkg.in/yaml.v3" @@ -36,8 +35,8 @@ var linesRead = prometheus.NewCounterVec( []string{"path", "src"}) type HttpConfiguration struct { - //IPFilter []string `yaml:"ip_filter"` - //ChunkSize *int64 `yaml:"chunk_size"` + // IPFilter []string `yaml:"ip_filter"` + // ChunkSize *int64 `yaml:"chunk_size"` ListenAddr string `yaml:"listen_addr"` Path string `yaml:"path"` AuthType string `yaml:"auth_type"` @@ -76,6 +75,7 @@ func (h *HTTPSource) GetUuid() string { func (h *HTTPSource) UnmarshalConfig(yamlConfig []byte) error { h.Config = HttpConfiguration{} + err := yaml.Unmarshal(yamlConfig, &h.Config) if err != nil { return fmt.Errorf("cannot parse %s datasource configuration: %w", dataSourceName, err) @@ -96,6 +96,7 @@ func (hc *HttpConfiguration) Validate() error { if hc.Path == "" { hc.Path = "/" } + if hc.Path[0] != '/' { return errors.New("path must start with /") } @@ -106,9 +107,11 @@ func (hc *HttpConfiguration) Validate() error { if hc.BasicAuth == nil { return errors.New(baseErr + " basic_auth is not provided") } + if hc.BasicAuth.Username == "" { return errors.New(baseErr + " username is not provided") } + if hc.BasicAuth.Password == "" { return errors.New(baseErr + " password is not provided") } @@ -128,6 +131,7 @@ func (hc *HttpConfiguration) Validate() error { if hc.TLS.ServerCert == "" { return errors.New("server_cert is required") } + if hc.TLS.ServerKey == "" { return errors.New("server_key is required") } @@ -156,6 +160,7 @@ func (hc *HttpConfiguration) Validate() error { func (h *HTTPSource) Configure(yamlConfig []byte, logger *log.Entry, MetricsLevel int) error { h.logger = logger h.metricsLevel = MetricsLevel + err := h.UnmarshalConfig(yamlConfig) if err != nil { return err @@ -210,6 +215,7 @@ func (hc *HttpConfiguration) NewTLSConfig() (*tls.Config, error) { if err != nil { return nil, fmt.Errorf("failed to load server cert/key: %w", err) } + tlsConfig.Certificates = []tls.Certificate{cert} } @@ -227,6 +233,7 @@ func (hc *HttpConfiguration) NewTLSConfig() (*tls.Config, error) { if caCertPool == nil { caCertPool = x509.NewCertPool() } + caCertPool.AppendCertsFromPEM(caCert) tlsConfig.ClientCAs = caCertPool tlsConfig.ClientAuth = tls.RequireAndVerifyClientCert @@ -241,10 +248,12 @@ func authorizeRequest(r *http.Request, hc *HttpConfiguration) error { if !ok { return errors.New("missing basic auth") } + if username != hc.BasicAuth.Username || password != hc.BasicAuth.Password { return errors.New("invalid basic auth") } } + if hc.AuthType == "headers" { for key, value := range *hc.Headers { if r.Header.Get(key) != value { @@ -252,6 +261,7 @@ func authorizeRequest(r *http.Request, hc *HttpConfiguration) error { } } } + return nil } @@ -280,6 +290,7 @@ func (h *HTTPSource) processRequest(w http.ResponseWriter, r *http.Request, hc * } decoder := json.NewDecoder(reader) + for { var message json.RawMessage @@ -287,7 +298,9 @@ func (h *HTTPSource) processRequest(w http.ResponseWriter, r *http.Request, hc * if err == io.EOF { break } + w.WriteHeader(http.StatusBadRequest) + return fmt.Errorf("failed to decode: %w", err) } @@ -328,11 +341,13 @@ func (h *HTTPSource) RunServer(out chan types.Event, t *tomb.Tomb) error { http.Error(w, "Method Not Allowed", http.StatusMethodNotAllowed) return } + if err := authorizeRequest(r, &h.Config); err != nil { h.logger.Errorf("failed to authorize request from '%s': %s", r.RemoteAddr, err) http.Error(w, "Unauthorized", http.StatusUnauthorized) return } + err := h.processRequest(w, r, &h.Config, out) if err != nil { h.logger.Errorf("failed to process request from '%s': %s", r.RemoteAddr, err) @@ -344,6 +359,7 @@ func (h *HTTPSource) RunServer(out chan types.Event, t *tomb.Tomb) error { w.Header().Set(key, value) } } + if h.Config.CustomStatusCode != nil { w.WriteHeader(*h.Config.CustomStatusCode) } else { @@ -367,25 +383,30 @@ func (h *HTTPSource) RunServer(out chan types.Event, t *tomb.Tomb) error { if err != nil { return fmt.Errorf("failed to create tls config: %w", err) } + h.logger.Tracef("tls config: %+v", tlsConfig) h.Server.TLSConfig = tlsConfig } t.Go(func() error { defer trace.CatchPanic("crowdsec/acquis/http/server") + if h.Config.TLS != nil { h.logger.Infof("start https server on %s", h.Config.ListenAddr) + err := h.Server.ListenAndServeTLS(h.Config.TLS.ServerCert, h.Config.TLS.ServerKey) if err != nil && err != http.ErrServerClosed { return fmt.Errorf("https server failed: %w", err) } } else { h.logger.Infof("start http server on %s", h.Config.ListenAddr) + err := h.Server.ListenAndServe() if err != nil && err != http.ErrServerClosed { return fmt.Errorf("http server failed: %w", err) } } + return nil }) diff --git a/pkg/acquisition/modules/http/http_test.go b/pkg/acquisition/modules/http/http_test.go index c76f311d669..b05979c5adf 100644 --- a/pkg/acquisition/modules/http/http_test.go +++ b/pkg/acquisition/modules/http/http_test.go @@ -14,13 +14,15 @@ import ( "testing" "time" - "github.com/crowdsecurity/crowdsec/pkg/types" - "github.com/crowdsecurity/go-cs-lib/cstest" "github.com/prometheus/client_golang/prometheus" log "github.com/sirupsen/logrus" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "gopkg.in/tomb.v2" + + "github.com/crowdsecurity/go-cs-lib/cstest" + + "github.com/crowdsecurity/crowdsec/pkg/types" ) const ( @@ -241,7 +243,7 @@ func SetupAndRunHTTPSource(t *testing.T, h *HTTPSource, config []byte, metricLev func TestStreamingAcquisitionWrongHTTPMethod(t *testing.T) { h := &HTTPSource{} - _, _, tomb:= SetupAndRunHTTPSource(t, h, []byte(` + _, _, tomb := SetupAndRunHTTPSource(t, h, []byte(` source: http listen_addr: 127.0.0.1:8080 path: /test diff --git a/pkg/alertcontext/alertcontext_test.go b/pkg/alertcontext/alertcontext_test.go index 284ff451bc2..b1572edd76b 100644 --- a/pkg/alertcontext/alertcontext_test.go +++ b/pkg/alertcontext/alertcontext_test.go @@ -8,9 +8,10 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "github.com/crowdsecurity/go-cs-lib/ptr" + "github.com/crowdsecurity/crowdsec/pkg/models" "github.com/crowdsecurity/crowdsec/pkg/types" - "github.com/crowdsecurity/go-cs-lib/ptr" ) func TestNewAlertContext(t *testing.T) { @@ -229,6 +230,7 @@ func TestValidateContextExpr(t *testing.T) { } for _, test := range tests { fmt.Printf("Running test '%s'\n", test.name) + err := ValidateContextExpr(test.key, test.exprs) if test.expectedErr == nil { require.NoError(t, err) @@ -348,13 +350,13 @@ func TestAppsecEventToContext(t *testing.T) { } for _, test := range tests { - //reset cache + // reset cache alertContext = Context{} - //compile + // compile if err := NewAlertContext(test.contextToSend, 100); err != nil { t.Fatalf("failed to compile %s: %s", test.name, err) } - //run + // run metas, errors := AppsecEventToContext(test.match, test.req) assert.Len(t, errors, test.expectedErrLen) diff --git a/pkg/cwhub/item.go b/pkg/cwhub/item.go index 8cdb88a18ed..74b1cfa3ebe 100644 --- a/pkg/cwhub/item.go +++ b/pkg/cwhub/item.go @@ -114,37 +114,38 @@ type Dependencies struct { // a group of items of the same type type itemgroup struct { - typeName string - itemNames []string + typeName string + itemNames []string } func (d Dependencies) byType() []itemgroup { - return []itemgroup{ - {PARSERS, d.Parsers}, - {POSTOVERFLOWS, d.PostOverflows}, - {SCENARIOS, d.Scenarios}, - {CONTEXTS, d.Contexts}, - {APPSEC_CONFIGS, d.AppsecConfigs}, - {APPSEC_RULES, d.AppsecRules}, - {COLLECTIONS, d.Collections}, - } + return []itemgroup{ + {PARSERS, d.Parsers}, + {POSTOVERFLOWS, d.PostOverflows}, + {SCENARIOS, d.Scenarios}, + {CONTEXTS, d.Contexts}, + {APPSEC_CONFIGS, d.AppsecConfigs}, + {APPSEC_RULES, d.AppsecRules}, + {COLLECTIONS, d.Collections}, + } } // SubItems iterates over the sub-items in the struct, excluding the ones that were not found in the hub. func (d Dependencies) SubItems(hub *Hub) func(func(*Item) bool) { - return func(yield func(*Item) bool) { - for _, typeGroup := range d.byType() { - for _, name := range typeGroup.itemNames { - s := hub.GetItem(typeGroup.typeName, name) - if s == nil { - continue - } - if !yield(s) { - return - } - } - } - } + return func(yield func(*Item) bool) { + for _, typeGroup := range d.byType() { + for _, name := range typeGroup.itemNames { + s := hub.GetItem(typeGroup.typeName, name) + if s == nil { + continue + } + + if !yield(s) { + return + } + } + } + } } // Item is created from an index file and enriched with local info. @@ -272,6 +273,7 @@ func (i *Item) CurrentDependencies() Dependencies { if errors.Is(err, fs.ErrNotExist) { return i.Dependencies } + if err != nil { // a file might be corrupted, or in development i.hub.logger.Warningf("can't read dependencies for %s, using index", i.FQName()) @@ -285,11 +287,10 @@ func (i *Item) CurrentDependencies() Dependencies { i.hub.logger.Warningf("can't parse dependencies for %s, using index", i.FQName()) return i.Dependencies } - + return d } - func (i *Item) logMissingSubItems() { if !i.HasSubItems() { return @@ -405,7 +406,6 @@ func (i *Item) SafeToRemoveDeps() ([]*Item, error) { return ret, nil } - // descendants returns a list of all (direct or indirect) dependencies of the item's current version. func (i *Item) descendants() ([]*Item, error) { var collectSubItems func(item *Item, visited map[*Item]bool, result *[]*Item) error diff --git a/pkg/hubops/plan.go b/pkg/hubops/plan.go index 1535bc41c64..eb99056fab3 100644 --- a/pkg/hubops/plan.go +++ b/pkg/hubops/plan.go @@ -8,8 +8,8 @@ import ( "strings" "github.com/AlecAivazis/survey/v2" - isatty "github.com/mattn/go-isatty" "github.com/fatih/color" + isatty "github.com/mattn/go-isatty" "github.com/crowdsecurity/go-cs-lib/slicetools" @@ -59,13 +59,13 @@ func UniqueKey(c Command) string { type ActionPlan struct { // hold the list of Commands to be executed as part of the action plan. // If a command is skipped (i.e. calling Prepare() returned false), it won't be included in the slice. - commands []Command + commands []Command // Tracks unique commands commandsTracker map[string]struct{} // A reference to the Hub instance, required for dependency lookup. - hub *cwhub.Hub + hub *cwhub.Hub // Indicates whether a reload of the CrowdSec service is required after executing the action plan. ReloadNeeded bool @@ -73,7 +73,7 @@ type ActionPlan struct { func NewActionPlan(hub *cwhub.Hub) *ActionPlan { return &ActionPlan{ - hub: hub, + hub: hub, commandsTracker: make(map[string]struct{}), } } diff --git a/pkg/hubtest/hubtest_item.go b/pkg/hubtest/hubtest_item.go index bf029b0e835..75895dc729b 100644 --- a/pkg/hubtest/hubtest_item.go +++ b/pkg/hubtest/hubtest_item.go @@ -300,7 +300,7 @@ func (t *HubTestItem) RunWithNucleiTemplate() error { crowdsecDaemon.Start() // wait for the appsec port to be available - if _, err := IsAlive(t.AppSecHost); err != nil { + if _, err = IsAlive(t.AppSecHost); err != nil { crowdsecLog, err2 := os.ReadFile(crowdsecLogFile) if err2 != nil { log.Errorf("unable to read crowdsec log file '%s': %s", crowdsecLogFile, err) @@ -319,7 +319,7 @@ func (t *HubTestItem) RunWithNucleiTemplate() error { } nucleiTargetHost := nucleiTargetParsedURL.Host - if _, err := IsAlive(nucleiTargetHost); err != nil { + if _, err = IsAlive(nucleiTargetHost); err != nil { return fmt.Errorf("target is down: %w", err) } @@ -631,8 +631,11 @@ func (t *HubTestItem) Run(patternDir string) error { if t.Config.LogFile != "" { return t.RunWithLogFile(patternDir) - } else if t.Config.NucleiTemplate != "" { + } + + if t.Config.NucleiTemplate != "" { return t.RunWithNucleiTemplate() } + return fmt.Errorf("log file or nuclei template must be set in '%s'", t.Name) } diff --git a/test/bats/04_nocapi.bats b/test/bats/04_nocapi.bats index d22a6f0a953..8d0018a9a4a 100644 --- a/test/bats/04_nocapi.bats +++ b/test/bats/04_nocapi.bats @@ -76,5 +76,5 @@ teardown() { rune -0 cscli metrics assert_output --partial "Route" assert_output --partial '/v1/watchers/login' - assert_output --partial "Local API Metrics:" + assert_output --partial "Local API Metrics" } diff --git a/test/bats/08_metrics.bats b/test/bats/08_metrics.bats index e260e667524..f3be9c60a95 100644 --- a/test/bats/08_metrics.bats +++ b/test/bats/08_metrics.bats @@ -66,7 +66,7 @@ teardown() { rune -0 cscli metrics assert_output --partial "Route" assert_output --partial '/v1/watchers/login' - assert_output --partial "Local API Metrics:" + assert_output --partial "Local API Metrics" rune -0 cscli metrics -o json rune -0 jq 'keys' <(output) @@ -93,7 +93,7 @@ teardown() { assert_stderr --partial "unknown metrics type: foobar" rune -0 cscli metrics show lapi - assert_output --partial "Local API Metrics:" + assert_output --partial "Local API Metrics" assert_output --regexp "Route.*Method.*Hits" assert_output --regexp "/v1/watchers/login.*POST" diff --git a/test/bats/08_metrics_bouncer.bats b/test/bats/08_metrics_bouncer.bats index c4dfebbab1d..5fb2c543bda 100644 --- a/test/bats/08_metrics_bouncer.bats +++ b/test/bats/08_metrics_bouncer.bats @@ -136,7 +136,10 @@ teardown() { rune -0 cscli metrics show bouncers assert_output - <<-EOT - Bouncer Metrics (testbouncer) since 2024-02-08 13:35:16 +0000 UTC: + +--------------------------+ + | Bouncer Metrics (testbou | + | ncer) since 2024-02-08 1 | + | 3:35:16 +0000 UTC | +--------+-----------------+ | Origin | foo | | | dogyear | pound | @@ -226,7 +229,8 @@ teardown() { rune -0 cscli metrics show bouncers assert_output - <<-EOT - Bouncer Metrics (testbouncer) since 2024-02-08 13:35:16 +0000 UTC: + +-------------------------------------------------------------------------------------------+ + | Bouncer Metrics (testbouncer) since 2024-02-08 13:35:16 +0000 UTC | +----------------------------------+------------------+-------------------+-----------------+ | Origin | active_decisions | dropped | foo | | | IPs | bytes | packets | dogyear | pound | @@ -309,7 +313,8 @@ teardown() { rune -0 cscli metrics show bouncers assert_output - <<-EOT - Bouncer Metrics (testbouncer) since 2024-02-08 13:35:16 +0000 UTC: + +-------------------------------------------------------------------------------------------+ + | Bouncer Metrics (testbouncer) since 2024-02-08 13:35:16 +0000 UTC | +----------------------------------+------------------+-------------------+-----------------+ | Origin | active_decisions | dropped | foo | | | IPs | bytes | packets | dogyear | pound | @@ -365,7 +370,9 @@ teardown() { rune -0 cscli metrics show bouncers assert_output - <<-EOT - Bouncer Metrics (testbouncer) since 2024-02-09 03:40:00 +0000 UTC: + +-----------------------------------------------+ + | Bouncer Metrics (testbouncer) since 2024-02-0 | + | 9 03:40:00 +0000 UTC | +--------------------------+--------+-----------+ | Origin | ima | notagauge | | | second | inch | @@ -417,7 +424,9 @@ teardown() { rune -0 cscli metrics show bouncers assert_output - <<-EOT - Bouncer Metrics (testbouncer) since 2024-02-09 03:40:00 +0000 UTC: + +---------------------------------------------+ + | Bouncer Metrics (testbouncer) since 2024-02 | + | -09 03:40:00 +0000 UTC | +--------------------------+------------------+ | Origin | active_decisions | | | IPs | @@ -502,7 +511,9 @@ teardown() { rune -0 cscli metrics show bouncers assert_output - <<-EOT - Bouncer Metrics (bouncer1) since 2024-02-08 13:35:16 +0000 UTC: + +--------------------------------------------------------------+ + | Bouncer Metrics (bouncer1) since 2024-02-08 13:35:16 +0000 U | + | TC | +----------------------------+---------+-----------------------+ | Origin | dropped | processed | | | bytes | bytes | packets | @@ -512,8 +523,9 @@ teardown() { +----------------------------+---------+-----------+-----------+ | Total | 1.80k | 12.34k | 100 | +----------------------------+---------+-----------+-----------+ - - Bouncer Metrics (bouncer2) since 2024-02-08 10:48:36 +0000 UTC: + +------------------------------------------------+ + | Bouncer Metrics (bouncer2) since 2024-02-08 10 | + | :48:36 +0000 UTC | +----------------------------+-------------------+ | Origin | dropped | | | bytes | packets | diff --git a/test/bats/80_alerts.bats b/test/bats/80_alerts.bats index 6d84c1a1fce..f01e918925c 100644 --- a/test/bats/80_alerts.bats +++ b/test/bats/80_alerts.bats @@ -89,7 +89,7 @@ teardown() { assert_line --regexp "^ - AS *: *$" assert_line --regexp "^ - Begin *: .*$" assert_line --regexp "^ - End *: .*$" - assert_line --regexp "^ - Active Decisions *:$" + assert_line --regexp "^\| Active Decisions *\|$" assert_line --regexp "^.* ID .* scope:value .* action .* expiration .* created_at .*$" assert_line --regexp "^.* Ip:10.20.30.40 .* ban .*$" diff --git a/test/bats/cscli-hubtype-list.bats b/test/bats/cscli-hubtype-list.bats index 14113650c74..b3db4743eb9 100644 --- a/test/bats/cscli-hubtype-list.bats +++ b/test/bats/cscli-hubtype-list.bats @@ -80,7 +80,7 @@ teardown() { # the list should be the same in all formats, and sorted (not case sensitive) list_raw=$(cscli parsers list -o raw -a | tail -n +2 | cut -d, -f1) - list_human=$(cscli parsers list -o human -a | tail -n +6 | head -n -1 | cut -d' ' -f2) + list_human=$(cscli parsers list -o human -a | tail -n +7 | head -n -1 | cut -d' ' -f2) list_json=$(cscli parsers list -o json -a | jq -r '.parsers[].name') # use python to sort because it handles "_" like go From 57d7247a43d2e45f9eb9bdfb73dfe6de5834e821 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Mon, 6 Jan 2025 23:55:39 +0100 Subject: [PATCH 381/581] cscli hub: refact/split files; add some doc/examples (#3394) --- cmd/crowdsec-cli/clihub/hub.go | 6 +- cmd/crowdsec-cli/cliitem/appsec.go | 123 ----- cmd/crowdsec-cli/cliitem/cmdinspect.go | 236 ++++++++++ cmd/crowdsec-cli/cliitem/cmdinstall.go | 150 ++++++ cmd/crowdsec-cli/cliitem/cmdremove.go | 151 ++++++ cmd/crowdsec-cli/cliitem/cmdupgrade.go | 106 +++++ cmd/crowdsec-cli/cliitem/collection.go | 41 -- cmd/crowdsec-cli/cliitem/context.go | 41 -- cmd/crowdsec-cli/cliitem/hubappsec.go | 255 ++++++++++ cmd/crowdsec-cli/cliitem/hubcollection.go | 105 +++++ cmd/crowdsec-cli/cliitem/hubcontext.go | 102 ++++ cmd/crowdsec-cli/cliitem/hubparser.go | 105 +++++ cmd/crowdsec-cli/cliitem/hubpostoverflow.go | 102 ++++ cmd/crowdsec-cli/cliitem/hubscenario.go | 78 +++- cmd/crowdsec-cli/cliitem/inspect.go | 57 --- cmd/crowdsec-cli/cliitem/item.go | 492 +------------------- cmd/crowdsec-cli/cliitem/parser.go | 41 -- cmd/crowdsec-cli/cliitem/postoverflow.go | 41 -- cmd/crowdsec-cli/cliitem/suggest.go | 77 --- 19 files changed, 1399 insertions(+), 910 deletions(-) delete mode 100644 cmd/crowdsec-cli/cliitem/appsec.go create mode 100644 cmd/crowdsec-cli/cliitem/cmdinspect.go create mode 100644 cmd/crowdsec-cli/cliitem/cmdinstall.go create mode 100644 cmd/crowdsec-cli/cliitem/cmdremove.go create mode 100644 cmd/crowdsec-cli/cliitem/cmdupgrade.go delete mode 100644 cmd/crowdsec-cli/cliitem/collection.go delete mode 100644 cmd/crowdsec-cli/cliitem/context.go create mode 100644 cmd/crowdsec-cli/cliitem/hubappsec.go create mode 100644 cmd/crowdsec-cli/cliitem/hubcollection.go create mode 100644 cmd/crowdsec-cli/cliitem/hubcontext.go create mode 100644 cmd/crowdsec-cli/cliitem/hubparser.go create mode 100644 cmd/crowdsec-cli/cliitem/hubpostoverflow.go delete mode 100644 cmd/crowdsec-cli/cliitem/inspect.go delete mode 100644 cmd/crowdsec-cli/cliitem/parser.go delete mode 100644 cmd/crowdsec-cli/cliitem/postoverflow.go delete mode 100644 cmd/crowdsec-cli/cliitem/suggest.go diff --git a/cmd/crowdsec-cli/clihub/hub.go b/cmd/crowdsec-cli/clihub/hub.go index 36e851d1b74..0d1e625f715 100644 --- a/cmd/crowdsec-cli/clihub/hub.go +++ b/cmd/crowdsec-cli/clihub/hub.go @@ -213,7 +213,11 @@ func (cli *cliHub) newUpgradeCmd() *cobra.Command { Long: ` Upgrade all configs installed from Crowdsec Hub. Run 'sudo cscli hub update' if you want the latest versions available. `, - // TODO: Example + Example: `# Upgrade all the collections, scenarios etc. to the latest version in the downloaded index. Update data files too. +cscli hub upgrade + +# Upgrade tainted items as well; force re-download of data files. +cscli hub upgrade --force`, Args: cobra.NoArgs, DisableAutoGenTag: true, RunE: func(cmd *cobra.Command, _ []string) error { diff --git a/cmd/crowdsec-cli/cliitem/appsec.go b/cmd/crowdsec-cli/cliitem/appsec.go deleted file mode 100644 index 44afa2133bd..00000000000 --- a/cmd/crowdsec-cli/cliitem/appsec.go +++ /dev/null @@ -1,123 +0,0 @@ -package cliitem - -import ( - "fmt" - "os" - - "golang.org/x/text/cases" - "golang.org/x/text/language" - "gopkg.in/yaml.v3" - - "github.com/crowdsecurity/crowdsec/pkg/appsec" - "github.com/crowdsecurity/crowdsec/pkg/appsec/appsec_rule" - "github.com/crowdsecurity/crowdsec/pkg/cwhub" -) - -func NewAppsecConfig(cfg configGetter) *cliItem { - return &cliItem{ - cfg: cfg, - name: cwhub.APPSEC_CONFIGS, - singular: "appsec-config", - oneOrMore: "appsec-config(s)", - help: cliHelp{ - example: `cscli appsec-configs list -a -cscli appsec-configs install crowdsecurity/vpatch -cscli appsec-configs inspect crowdsecurity/vpatch -cscli appsec-configs upgrade crowdsecurity/vpatch -cscli appsec-configs remove crowdsecurity/vpatch -`, - }, - installHelp: cliHelp{ - example: `cscli appsec-configs install crowdsecurity/vpatch`, - }, - removeHelp: cliHelp{ - example: `cscli appsec-configs remove crowdsecurity/vpatch`, - }, - upgradeHelp: cliHelp{ - example: `cscli appsec-configs upgrade crowdsecurity/vpatch`, - }, - inspectHelp: cliHelp{ - example: `cscli appsec-configs inspect crowdsecurity/vpatch`, - }, - listHelp: cliHelp{ - example: `cscli appsec-configs list -cscli appsec-configs list -a -cscli appsec-configs list crowdsecurity/vpatch`, - }, - } -} - -func NewAppsecRule(cfg configGetter) *cliItem { - inspectDetail := func(item *cwhub.Item) error { - // Only show the converted rules in human mode - if cfg().Cscli.Output != "human" { - return nil - } - - appsecRule := appsec.AppsecCollectionConfig{} - - yamlContent, err := os.ReadFile(item.State.LocalPath) - if err != nil { - return fmt.Errorf("unable to read file %s: %w", item.State.LocalPath, err) - } - - if err := yaml.Unmarshal(yamlContent, &appsecRule); err != nil { - return fmt.Errorf("unable to parse yaml file %s: %w", item.State.LocalPath, err) - } - - for _, ruleType := range appsec_rule.SupportedTypes() { - fmt.Printf("\n%s format:\n", cases.Title(language.Und, cases.NoLower).String(ruleType)) - - for _, rule := range appsecRule.Rules { - convertedRule, _, err := rule.Convert(ruleType, appsecRule.Name) - if err != nil { - return fmt.Errorf("unable to convert rule %s: %w", rule.Name, err) - } - - fmt.Println(convertedRule) - } - - switch ruleType { //nolint:gocritic - case appsec_rule.ModsecurityRuleType: - for _, rule := range appsecRule.SecLangRules { - fmt.Println(rule) - } - } - } - - return nil - } - - return &cliItem{ - cfg: cfg, - name: "appsec-rules", - singular: "appsec-rule", - oneOrMore: "appsec-rule(s)", - help: cliHelp{ - example: `cscli appsec-rules list -a -cscli appsec-rules install crowdsecurity/crs -cscli appsec-rules inspect crowdsecurity/crs -cscli appsec-rules upgrade crowdsecurity/crs -cscli appsec-rules remove crowdsecurity/crs -`, - }, - installHelp: cliHelp{ - example: `cscli appsec-rules install crowdsecurity/crs`, - }, - removeHelp: cliHelp{ - example: `cscli appsec-rules remove crowdsecurity/crs`, - }, - upgradeHelp: cliHelp{ - example: `cscli appsec-rules upgrade crowdsecurity/crs`, - }, - inspectHelp: cliHelp{ - example: `cscli appsec-rules inspect crowdsecurity/crs`, - }, - inspectDetail: inspectDetail, - listHelp: cliHelp{ - example: `cscli appsec-rules list -cscli appsec-rules list -a -cscli appsec-rules list crowdsecurity/crs`, - }, - } -} diff --git a/cmd/crowdsec-cli/cliitem/cmdinspect.go b/cmd/crowdsec-cli/cliitem/cmdinspect.go new file mode 100644 index 00000000000..b5ee0816d72 --- /dev/null +++ b/cmd/crowdsec-cli/cliitem/cmdinspect.go @@ -0,0 +1,236 @@ +package cliitem + +import ( + "cmp" + "context" + "encoding/json" + "errors" + "fmt" + "os" + "path/filepath" + "strings" + + "github.com/hexops/gotextdiff" + "github.com/hexops/gotextdiff/myers" + "github.com/hexops/gotextdiff/span" + log "github.com/sirupsen/logrus" + "github.com/spf13/cobra" + "gopkg.in/yaml.v3" + + "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/require" + "github.com/crowdsecurity/crowdsec/pkg/cwhub" +) + +func (cli cliItem) inspect(ctx context.Context, args []string, url string, diff bool, rev bool, noMetrics bool) error { + cfg := cli.cfg() + + if rev && !diff { + return errors.New("--rev can only be used with --diff") + } + + if url != "" { + cfg.Cscli.PrometheusUrl = url + } + + var contentProvider cwhub.ContentProvider + + if diff { + contentProvider = require.HubDownloader(ctx, cfg) + } + + hub, err := require.Hub(cfg, log.StandardLogger()) + if err != nil { + return err + } + + for _, name := range args { + item := hub.GetItem(cli.name, name) + if item == nil { + return fmt.Errorf("can't find '%s' in %s", name, cli.name) + } + + if diff { + fmt.Println(cli.whyTainted(ctx, hub, contentProvider, item, rev)) + + continue + } + + if err = inspectItem(hub, item, !noMetrics, cfg.Cscli.Output, cfg.Cscli.PrometheusUrl, cfg.Cscli.Color); err != nil { + return err + } + + if cli.inspectDetail != nil { + if err = cli.inspectDetail(item); err != nil { + return err + } + } + } + + return nil +} + +// return the diff between the installed version and the latest version +func (cli cliItem) itemDiff(ctx context.Context, item *cwhub.Item, contentProvider cwhub.ContentProvider, reverse bool) (string, error) { + if !item.State.Installed { + return "", fmt.Errorf("'%s' is not installed", item.FQName()) + } + + dest, err := os.CreateTemp("", "cscli-diff-*") + if err != nil { + return "", fmt.Errorf("while creating temporary file: %w", err) + } + defer os.Remove(dest.Name()) + + _, remoteURL, err := item.FetchContentTo(ctx, contentProvider, dest.Name()) + if err != nil { + return "", err + } + + latestContent, err := os.ReadFile(dest.Name()) + if err != nil { + return "", fmt.Errorf("while reading %s: %w", dest.Name(), err) + } + + localContent, err := os.ReadFile(item.State.LocalPath) + if err != nil { + return "", fmt.Errorf("while reading %s: %w", item.State.LocalPath, err) + } + + file1 := item.State.LocalPath + file2 := remoteURL + content1 := string(localContent) + content2 := string(latestContent) + + if reverse { + file1, file2 = file2, file1 + content1, content2 = content2, content1 + } + + edits := myers.ComputeEdits(span.URIFromPath(file1), content1, content2) + diff := gotextdiff.ToUnified(file1, file2, content1, edits) + + return fmt.Sprintf("%s", diff), nil +} + +func (cli cliItem) whyTainted(ctx context.Context, hub *cwhub.Hub, contentProvider cwhub.ContentProvider, item *cwhub.Item, reverse bool) string { + if !item.State.Installed { + return fmt.Sprintf("# %s is not installed", item.FQName()) + } + + if !item.State.Tainted { + return fmt.Sprintf("# %s is not tainted", item.FQName()) + } + + if len(item.State.TaintedBy) == 0 { + return fmt.Sprintf("# %s is tainted but we don't know why. please report this as a bug", item.FQName()) + } + + ret := []string{ + fmt.Sprintf("# Let's see why %s is tainted.", item.FQName()), + } + + for _, fqsub := range item.State.TaintedBy { + ret = append(ret, fmt.Sprintf("\n-> %s\n", fqsub)) + + sub, err := hub.GetItemFQ(fqsub) + if err != nil { + ret = append(ret, err.Error()) + } + + diff, err := cli.itemDiff(ctx, sub, contentProvider, reverse) + if err != nil { + ret = append(ret, err.Error()) + } + + if diff != "" { + ret = append(ret, diff) + } else if len(sub.State.TaintedBy) > 0 { + taintList := strings.Join(sub.State.TaintedBy, ", ") + if sub.FQName() == taintList { + // hack: avoid message "item is tainted by itself" + continue + } + + ret = append(ret, fmt.Sprintf("# %s is tainted by %s", sub.FQName(), taintList)) + } + } + + return strings.Join(ret, "\n") +} + +func (cli cliItem) newInspectCmd() *cobra.Command { + var ( + url string + diff bool + rev bool + noMetrics bool + ) + + cmd := &cobra.Command{ + Use: cmp.Or(cli.inspectHelp.use, "inspect [item]..."), + Short: cmp.Or(cli.inspectHelp.short, "Inspect given "+cli.oneOrMore), + Long: cmp.Or(cli.inspectHelp.long, "Inspect the state of one or more "+cli.name), + Example: cli.inspectHelp.example, + Args: cobra.MinimumNArgs(1), + DisableAutoGenTag: true, + ValidArgsFunction: func(_ *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { + return compInstalledItems(cli.name, args, toComplete, cli.cfg) + }, + RunE: func(cmd *cobra.Command, args []string) error { + return cli.inspect(cmd.Context(), args, url, diff, rev, noMetrics) + }, + } + + flags := cmd.Flags() + flags.StringVarP(&url, "url", "u", "", "Prometheus url") + flags.BoolVar(&diff, "diff", false, "Show diff with latest version (for tainted items)") + flags.BoolVar(&rev, "rev", false, "Reverse diff output") + flags.BoolVar(&noMetrics, "no-metrics", false, "Don't show metrics (when cscli.output=human)") + + return cmd +} + +func inspectItem(hub *cwhub.Hub, item *cwhub.Item, wantMetrics bool, output string, prometheusURL string, wantColor string) error { + // This is dirty... + // We want to show current dependencies (from content), not latest (from index). + // The item is modifed but after this function the whole hub should be thrown away. + // A cleaner way would be to copy the struct first. + item.Dependencies = item.CurrentDependencies() + + switch output { + case "human", "raw": + enc := yaml.NewEncoder(os.Stdout) + enc.SetIndent(2) + + if err := enc.Encode(item); err != nil { + return fmt.Errorf("unable to encode item: %w", err) + } + case "json": + b, err := json.MarshalIndent(*item, "", " ") + if err != nil { + return fmt.Errorf("unable to serialize item: %w", err) + } + + fmt.Print(string(b)) + } + + if output != "human" { + return nil + } + + if item.State.Tainted { + fmt.Println() + fmt.Printf(`This item is tainted. Use "%s %s inspect --diff %s" to see why.`, filepath.Base(os.Args[0]), item.Type, item.Name) + fmt.Println() + } + + if wantMetrics { + fmt.Printf("\nCurrent metrics: \n") + + if err := showMetrics(prometheusURL, hub, item, wantColor); err != nil { + return err + } + } + + return nil +} diff --git a/cmd/crowdsec-cli/cliitem/cmdinstall.go b/cmd/crowdsec-cli/cliitem/cmdinstall.go new file mode 100644 index 00000000000..daddbe84a4b --- /dev/null +++ b/cmd/crowdsec-cli/cliitem/cmdinstall.go @@ -0,0 +1,150 @@ +package cliitem + +import ( + "cmp" + "context" + "errors" + "fmt" + "slices" + "strings" + + "github.com/agext/levenshtein" + log "github.com/sirupsen/logrus" + "github.com/spf13/cobra" + + "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/reload" + "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/require" + "github.com/crowdsecurity/crowdsec/pkg/cwhub" + "github.com/crowdsecurity/crowdsec/pkg/hubops" +) + +// suggestNearestMessage returns a message with the most similar item name, if one is found +func suggestNearestMessage(hub *cwhub.Hub, itemType string, itemName string) string { + const maxDistance = 7 + + score := 100 + nearest := "" + + for _, item := range hub.GetItemsByType(itemType, false) { + d := levenshtein.Distance(itemName, item.Name, nil) + if d < score { + score = d + nearest = item.Name + } + } + + msg := fmt.Sprintf("can't find '%s' in %s", itemName, itemType) + + if score < maxDistance { + msg += fmt.Sprintf(", did you mean '%s'?", nearest) + } + + return msg +} + +func (cli cliItem) install(ctx context.Context, args []string, yes bool, dryRun bool, downloadOnly bool, force bool, ignoreError bool) error { + cfg := cli.cfg() + + hub, err := require.Hub(cfg, log.StandardLogger()) + if err != nil { + return err + } + + plan := hubops.NewActionPlan(hub) + + contentProvider := require.HubDownloader(ctx, cfg) + + for _, name := range args { + item := hub.GetItem(cli.name, name) + if item == nil { + msg := suggestNearestMessage(hub, cli.name, name) + if !ignoreError { + return errors.New(msg) + } + + log.Error(msg) + + continue + } + + if err = plan.AddCommand(hubops.NewDownloadCommand(item, contentProvider, force)); err != nil { + return err + } + + if !downloadOnly { + if err = plan.AddCommand(hubops.NewEnableCommand(item, force)); err != nil { + return err + } + } + } + + verbose := (cfg.Cscli.Output == "raw") + + if err := plan.Execute(ctx, yes, dryRun, verbose); err != nil { + if !ignoreError { + return err + } + + log.Error(err) + } + + if plan.ReloadNeeded { + fmt.Println("\n" + reload.Message) + } + + return nil +} + +func compAllItems(itemType string, args []string, toComplete string, cfg configGetter) ([]string, cobra.ShellCompDirective) { + hub, err := require.Hub(cfg(), nil) + if err != nil { + return nil, cobra.ShellCompDirectiveDefault + } + + comp := make([]string, 0) + + for _, item := range hub.GetItemsByType(itemType, false) { + if !slices.Contains(args, item.Name) && strings.Contains(item.Name, toComplete) { + comp = append(comp, item.Name) + } + } + + cobra.CompDebugln(fmt.Sprintf("%s: %+v", itemType, comp), true) + + return comp, cobra.ShellCompDirectiveNoFileComp +} + +func (cli cliItem) newInstallCmd() *cobra.Command { + var ( + yes bool + dryRun bool + downloadOnly bool + force bool + ignoreError bool + ) + + cmd := &cobra.Command{ + Use: cmp.Or(cli.installHelp.use, "install [item]..."), + Short: cmp.Or(cli.installHelp.short, "Install given "+cli.oneOrMore), + Long: cmp.Or(cli.installHelp.long, fmt.Sprintf("Fetch and install one or more %s from the hub", cli.name)), + Example: cli.installHelp.example, + Args: cobra.MinimumNArgs(1), + DisableAutoGenTag: true, + ValidArgsFunction: func(_ *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { + return compAllItems(cli.name, args, toComplete, cli.cfg) + }, + RunE: func(cmd *cobra.Command, args []string) error { + return cli.install(cmd.Context(), args, yes, dryRun, downloadOnly, force, ignoreError) + }, + } + + flags := cmd.Flags() + flags.BoolVarP(&yes, "yes", "y", false, "Confirm execution without prompt") + flags.BoolVar(&dryRun, "dry-run", false, "Don't install or remove anything; print the execution plan") + flags.BoolVarP(&downloadOnly, "download-only", "d", false, "Only download packages, don't enable") + flags.BoolVar(&force, "force", false, "Force install: overwrite tainted and outdated files") + flags.BoolVar(&ignoreError, "ignore", false, "Ignore errors when installing multiple "+cli.name) + cmd.MarkFlagsMutuallyExclusive("yes", "dry-run") + + return cmd +} diff --git a/cmd/crowdsec-cli/cliitem/cmdremove.go b/cmd/crowdsec-cli/cliitem/cmdremove.go new file mode 100644 index 00000000000..ac9410c047d --- /dev/null +++ b/cmd/crowdsec-cli/cliitem/cmdremove.go @@ -0,0 +1,151 @@ +package cliitem + +import ( + "cmp" + "context" + "errors" + "fmt" + + log "github.com/sirupsen/logrus" + "github.com/spf13/cobra" + + "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/reload" + "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/require" + "github.com/crowdsecurity/crowdsec/pkg/cwhub" + "github.com/crowdsecurity/crowdsec/pkg/hubops" +) + +func (cli cliItem) removePlan(hub *cwhub.Hub, args []string, purge bool, force bool, all bool) (*hubops.ActionPlan, error) { + plan := hubops.NewActionPlan(hub) + + if all { + itemGetter := hub.GetInstalledByType + if purge { + itemGetter = hub.GetItemsByType + } + + for _, item := range itemGetter(cli.name, true) { + if err := plan.AddCommand(hubops.NewDisableCommand(item, force)); err != nil { + return nil, err + } + + if purge { + if err := plan.AddCommand(hubops.NewPurgeCommand(item, force)); err != nil { + return nil, err + } + } + } + + return plan, nil + } + + if len(args) == 0 { + return nil, fmt.Errorf("specify at least one %s to remove or '--all'", cli.singular) + } + + for _, itemName := range args { + item := hub.GetItem(cli.name, itemName) + if item == nil { + return nil, fmt.Errorf("can't find '%s' in %s", itemName, cli.name) + } + + parents := installedParentNames(item) + + if !force && len(parents) > 0 { + log.Warningf("%s belongs to collections: %s", item.Name, parents) + log.Warningf("Run 'sudo cscli %s remove %s --force' if you want to force remove this %s", item.Type, item.Name, cli.singular) + + continue + } + + if err := plan.AddCommand(hubops.NewDisableCommand(item, force)); err != nil { + return nil, err + } + + if purge { + if err := plan.AddCommand(hubops.NewPurgeCommand(item, force)); err != nil { + return nil, err + } + } + } + + return plan, nil +} + +// return the names of the installed parents of an item, used to check if we can remove it +func installedParentNames(item *cwhub.Item) []string { + ret := make([]string, 0) + + for _, parent := range item.Ancestors() { + if parent.State.Installed { + ret = append(ret, parent.Name) + } + } + + return ret +} + +func (cli cliItem) remove(ctx context.Context, args []string, yes bool, dryRun bool, purge bool, force bool, all bool) error { + cfg := cli.cfg() + + hub, err := require.Hub(cli.cfg(), log.StandardLogger()) + if err != nil { + return err + } + + plan, err := cli.removePlan(hub, args, purge, force, all) + if err != nil { + return err + } + + verbose := (cfg.Cscli.Output == "raw") + + if err := plan.Execute(ctx, yes, dryRun, verbose); err != nil { + return err + } + + if plan.ReloadNeeded { + fmt.Println("\n" + reload.Message) + } + + return nil +} + +func (cli cliItem) newRemoveCmd() *cobra.Command { + var ( + yes bool + dryRun bool + purge bool + force bool + all bool + ) + + cmd := &cobra.Command{ + Use: cmp.Or(cli.removeHelp.use, "remove [item]..."), + Short: cmp.Or(cli.removeHelp.short, "Remove given "+cli.oneOrMore), + Long: cmp.Or(cli.removeHelp.long, "Remove one or more "+cli.name), + Example: cli.removeHelp.example, + Aliases: []string{"delete"}, + DisableAutoGenTag: true, + ValidArgsFunction: func(_ *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { + return compInstalledItems(cli.name, args, toComplete, cli.cfg) + }, + RunE: func(cmd *cobra.Command, args []string) error { + if len(args) > 0 && all { + return errors.New("can't specify items and '--all' at the same time") + } + + return cli.remove(cmd.Context(), args, yes, dryRun, purge, force, all) + }, + } + + flags := cmd.Flags() + flags.BoolVarP(&yes, "yes", "y", false, "Confirm execution without prompt") + flags.BoolVar(&dryRun, "dry-run", false, "Don't install or remove anything; print the execution plan") + flags.BoolVar(&purge, "purge", false, "Delete source file too") + flags.BoolVar(&force, "force", false, "Force remove: remove tainted and outdated files") + flags.BoolVar(&all, "all", false, "Remove all the "+cli.name) + cmd.MarkFlagsMutuallyExclusive("yes", "dry-run") + + return cmd +} diff --git a/cmd/crowdsec-cli/cliitem/cmdupgrade.go b/cmd/crowdsec-cli/cliitem/cmdupgrade.go new file mode 100644 index 00000000000..1ddd07485d4 --- /dev/null +++ b/cmd/crowdsec-cli/cliitem/cmdupgrade.go @@ -0,0 +1,106 @@ +package cliitem + +import ( + "cmp" + "context" + "fmt" + + log "github.com/sirupsen/logrus" + "github.com/spf13/cobra" + + "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/reload" + "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/require" + "github.com/crowdsecurity/crowdsec/pkg/cwhub" + "github.com/crowdsecurity/crowdsec/pkg/hubops" +) + +func (cli cliItem) upgradePlan(hub *cwhub.Hub, contentProvider cwhub.ContentProvider, args []string, force bool, all bool) (*hubops.ActionPlan, error) { + plan := hubops.NewActionPlan(hub) + + if all { + for _, item := range hub.GetInstalledByType(cli.name, true) { + if err := plan.AddCommand(hubops.NewDownloadCommand(item, contentProvider, force)); err != nil { + return nil, err + } + } + + return plan, nil + } + + if len(args) == 0 { + return nil, fmt.Errorf("specify at least one %s to upgrade or '--all'", cli.singular) + } + + for _, itemName := range args { + item := hub.GetItem(cli.name, itemName) + if item == nil { + return nil, fmt.Errorf("can't find '%s' in %s", itemName, cli.name) + } + + if err := plan.AddCommand(hubops.NewDownloadCommand(item, contentProvider, force)); err != nil { + return nil, err + } + } + + return plan, nil +} + +func (cli cliItem) upgrade(ctx context.Context, args []string, yes bool, dryRun bool, force bool, all bool) error { + cfg := cli.cfg() + + hub, err := require.Hub(cfg, log.StandardLogger()) + if err != nil { + return err + } + + contentProvider := require.HubDownloader(ctx, cfg) + + plan, err := cli.upgradePlan(hub, contentProvider, args, force, all) + if err != nil { + return err + } + + verbose := (cfg.Cscli.Output == "raw") + + if err := plan.Execute(ctx, yes, dryRun, verbose); err != nil { + return err + } + + if plan.ReloadNeeded { + fmt.Println("\n" + reload.Message) + } + + return nil +} + +func (cli cliItem) newUpgradeCmd() *cobra.Command { + var ( + yes bool + dryRun bool + all bool + force bool + ) + + cmd := &cobra.Command{ + Use: cmp.Or(cli.upgradeHelp.use, "upgrade [item]..."), + Short: cmp.Or(cli.upgradeHelp.short, "Upgrade given "+cli.oneOrMore), + Long: cmp.Or(cli.upgradeHelp.long, fmt.Sprintf("Fetch and upgrade one or more %s from the hub", cli.name)), + Example: cli.upgradeHelp.example, + DisableAutoGenTag: true, + ValidArgsFunction: func(_ *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { + return compInstalledItems(cli.name, args, toComplete, cli.cfg) + }, + RunE: func(cmd *cobra.Command, args []string) error { + return cli.upgrade(cmd.Context(), args, yes, dryRun, force, all) + }, + } + + flags := cmd.Flags() + flags.BoolVarP(&yes, "yes", "y", false, "Confirm execution without prompt") + flags.BoolVar(&dryRun, "dry-run", false, "Don't install or remove anything; print the execution plan") + flags.BoolVarP(&all, "all", "a", false, "Upgrade all the "+cli.name) + flags.BoolVar(&force, "force", false, "Force upgrade: overwrite tainted and outdated files") + cmd.MarkFlagsMutuallyExclusive("yes", "dry-run") + + return cmd +} diff --git a/cmd/crowdsec-cli/cliitem/collection.go b/cmd/crowdsec-cli/cliitem/collection.go deleted file mode 100644 index ea91c1e537a..00000000000 --- a/cmd/crowdsec-cli/cliitem/collection.go +++ /dev/null @@ -1,41 +0,0 @@ -package cliitem - -import ( - "github.com/crowdsecurity/crowdsec/pkg/cwhub" -) - -func NewCollection(cfg configGetter) *cliItem { - return &cliItem{ - cfg: cfg, - name: cwhub.COLLECTIONS, - singular: "collection", - oneOrMore: "collection(s)", - help: cliHelp{ - example: `cscli collections list -a -cscli collections install crowdsecurity/http-cve crowdsecurity/iptables -cscli collections inspect crowdsecurity/http-cve crowdsecurity/iptables -cscli collections upgrade crowdsecurity/http-cve crowdsecurity/iptables -cscli collections remove crowdsecurity/http-cve crowdsecurity/iptables -`, - }, - installHelp: cliHelp{ - example: `cscli collections install crowdsecurity/http-cve crowdsecurity/iptables`, - }, - removeHelp: cliHelp{ - example: `cscli collections remove crowdsecurity/http-cve crowdsecurity/iptables`, - }, - upgradeHelp: cliHelp{ - example: `cscli collections upgrade crowdsecurity/http-cve crowdsecurity/iptables`, - }, - inspectHelp: cliHelp{ - example: `cscli collections inspect crowdsecurity/http-cve crowdsecurity/iptables`, - }, - listHelp: cliHelp{ - example: `cscli collections list -cscli collections list -a -cscli collections list crowdsecurity/http-cve crowdsecurity/iptables - -List only enabled collections unless "-a" or names are specified.`, - }, - } -} diff --git a/cmd/crowdsec-cli/cliitem/context.go b/cmd/crowdsec-cli/cliitem/context.go deleted file mode 100644 index 7d110b8203d..00000000000 --- a/cmd/crowdsec-cli/cliitem/context.go +++ /dev/null @@ -1,41 +0,0 @@ -package cliitem - -import ( - "github.com/crowdsecurity/crowdsec/pkg/cwhub" -) - -func NewContext(cfg configGetter) *cliItem { - return &cliItem{ - cfg: cfg, - name: cwhub.CONTEXTS, - singular: "context", - oneOrMore: "context(s)", - help: cliHelp{ - example: `cscli contexts list -a -cscli contexts install crowdsecurity/yyy crowdsecurity/zzz -cscli contexts inspect crowdsecurity/yyy crowdsecurity/zzz -cscli contexts upgrade crowdsecurity/yyy crowdsecurity/zzz -cscli contexts remove crowdsecurity/yyy crowdsecurity/zzz -`, - }, - installHelp: cliHelp{ - example: `cscli contexts install crowdsecurity/yyy crowdsecurity/zzz`, - }, - removeHelp: cliHelp{ - example: `cscli contexts remove crowdsecurity/yyy crowdsecurity/zzz`, - }, - upgradeHelp: cliHelp{ - example: `cscli contexts upgrade crowdsecurity/yyy crowdsecurity/zzz`, - }, - inspectHelp: cliHelp{ - example: `cscli contexts inspect crowdsecurity/yyy crowdsecurity/zzz`, - }, - listHelp: cliHelp{ - example: `cscli contexts list -cscli contexts list -a -cscli contexts list crowdsecurity/yyy crowdsecurity/zzz - -List only enabled contexts unless "-a" or names are specified.`, - }, - } -} diff --git a/cmd/crowdsec-cli/cliitem/hubappsec.go b/cmd/crowdsec-cli/cliitem/hubappsec.go new file mode 100644 index 00000000000..7f9143d35b8 --- /dev/null +++ b/cmd/crowdsec-cli/cliitem/hubappsec.go @@ -0,0 +1,255 @@ +package cliitem + +import ( + "fmt" + "os" + + "golang.org/x/text/cases" + "golang.org/x/text/language" + "gopkg.in/yaml.v3" + + "github.com/crowdsecurity/crowdsec/pkg/appsec" + "github.com/crowdsecurity/crowdsec/pkg/appsec/appsec_rule" + "github.com/crowdsecurity/crowdsec/pkg/cwhub" +) + +func NewAppsecConfig(cfg configGetter) *cliItem { + return &cliItem{ + cfg: cfg, + name: cwhub.APPSEC_CONFIGS, + singular: "appsec-config", + oneOrMore: "appsec-config(s)", + help: cliHelp{ + example: `cscli appsec-configs list -a +cscli appsec-configs install crowdsecurity/virtual-patching +cscli appsec-configs inspect crowdsecurity/virtual-patching +cscli appsec-configs upgrade crowdsecurity/virtual-patching +cscli appsec-configs remove crowdsecurity/virtual-patching +`, + }, + installHelp: cliHelp{ + example: `# Install some appsec-configs. +cscli appsec-configs install crowdsecurity/virtual-patching + +# Show the execution plan without changing anything - compact output sorted by type and name. +cscli appsec-configs install crowdsecurity/virtual-patching --dry-run + +# Show the execution plan without changing anything - verbose output sorted by execution order. +cscli appsec-configs install crowdsecurity/virtual-patching --dry-run -o raw + +# Download only, to be installed later. +cscli appsec-configs install crowdsecurity/virtual-patching --download-only + +# Install over tainted items. Can be used to restore or repair after local modifications or missing dependencies. +cscli appsec-configs install crowdsecurity/virtual-patching --force + +# Proceed without prompting. +cscli appsec-configs install crowdsecurity/virtual-patching --yes + +# The "--yes" parameter is implied when the command is not connected to a terminal, like pipes or scripts.`, + }, + removeHelp: cliHelp{ + example: `# Uninstall some appsec-configs. +cscli appsec-configs remove crowdsecurity/virtual-patching + +# Show the execution plan without changing anything - compact output sorted by type and name. +cscli appsec-configs remove crowdsecurity/virtual-patching --dry-run + +# Show the execution plan without changing anything - verbose output sorted by execution order. +cscli appsec-configs remove crowdsecurity/virtual-patching --dry-run -o raw + +# Uninstall and also remove the downloaded files. +cscli appsec-configs remove crowdsecurity/virtual-patching --purge + +# Remove tainted items. +cscli appsec-configs remove crowdsecurity/virtual-patching --force + +# Proceed without prompting. +cscli appsec-configs remove crowdsecurity/virtual-patching --yes + +# The "--yes" parameter is implied when the command is not connected to a terminal, like pipes or scripts.`, + }, + upgradeHelp: cliHelp{ + example: `# Upgrade some appsec-configs. If they are not currently installed, they are downloaded but not installed. +cscli appsec-configs upgrade crowdsecurity/virtual-patching + +# Show the execution plan without changing anything - compact output sorted by type and name. +cscli appsec-configs upgrade crowdsecurity/virtual-patching --dry-run + +# Show the execution plan without changing anything - verbose output sorted by execution order. +cscli appsec-configs upgrade crowdsecurity/virtual-patching --dry-run -o raw + +# Upgrade over tainted items. Can be used to restore or repair after local modifications or missing dependencies. +cscli appsec-configs upgrade crowdsecurity/virtual-patching --force + +# Proceed without prompting. +cscli appsec-configs upgrade crowdsecurity/virtual-patching --yes + +# The "--yes" parameter is implied when the command is not connected to a terminal, like pipes or scripts.`, + }, + inspectHelp: cliHelp{ + example: `# Display metadata, state, metrics and ancestor collections of appsec-configs (installed or not). +cscli appsec-configs inspect crowdsecurity/virtual-patching + +# Don't collect metrics (avoid error if crowdsec is not running). +cscli appsec-configs inspect crowdsecurity/virtual-patching --no-metrics + +# Display difference between a tainted item and the latest one. +cscli appsec-configs inspect crowdsecurity/virtual-patching --diff + +# Reverse the above diff +cscli appsec-configs inspect crowdsecurity/virtual-patching --diff --rev`, + }, + listHelp: cliHelp{ + example: `# List enabled (installed) appsec-configs. +cscli appsec-configs list + +# List all available appsec-configs (installed or not). +cscli appsec-configs list -a + +# List specific appsec-configs (installed or not). +cscli appsec-configs list crowdsecurity/virtual-patching crowdsecurity/generic-rules`, + }, + } +} + +func NewAppsecRule(cfg configGetter) *cliItem { + inspectDetail := func(item *cwhub.Item) error { + // Only show the converted rules in human mode + if cfg().Cscli.Output != "human" { + return nil + } + + appsecRule := appsec.AppsecCollectionConfig{} + + yamlContent, err := os.ReadFile(item.State.LocalPath) + if err != nil { + return fmt.Errorf("unable to read file %s: %w", item.State.LocalPath, err) + } + + if err := yaml.Unmarshal(yamlContent, &appsecRule); err != nil { + return fmt.Errorf("unable to parse yaml file %s: %w", item.State.LocalPath, err) + } + + for _, ruleType := range appsec_rule.SupportedTypes() { + fmt.Printf("\n%s format:\n", cases.Title(language.Und, cases.NoLower).String(ruleType)) + + for _, rule := range appsecRule.Rules { + convertedRule, _, err := rule.Convert(ruleType, appsecRule.Name) + if err != nil { + return fmt.Errorf("unable to convert rule %s: %w", rule.Name, err) + } + + fmt.Println(convertedRule) + } + + switch ruleType { //nolint:gocritic + case appsec_rule.ModsecurityRuleType: + for _, rule := range appsecRule.SecLangRules { + fmt.Println(rule) + } + } + } + + return nil + } + + return &cliItem{ + cfg: cfg, + name: "appsec-rules", + singular: "appsec-rule", + oneOrMore: "appsec-rule(s)", + help: cliHelp{ + example: `cscli appsec-rules list -a +cscli appsec-rules install crowdsecurity/crs +cscli appsec-rules inspect crowdsecurity/crs +cscli appsec-rules upgrade crowdsecurity/crs +cscli appsec-rules remove crowdsecurity/crs +`, + }, + installHelp: cliHelp{ + example: `# Install some appsec-rules. +cscli appsec-rules install crowdsecurity/crs + +# Show the execution plan without changing anything - compact output sorted by type and name. +cscli appsec-rules install crowdsecurity/crs --dry-run + +# Show the execution plan without changing anything - verbose output sorted by execution order. +cscli appsec-rules install crowdsecurity/crs --dry-run -o raw + +# Download only, to be installed later. +cscli appsec-rules install crowdsecurity/crs --download-only + +# Install over tainted items. Can be used to restore or repair after local modifications or missing dependencies. +cscli appsec-rules install crowdsecurity/crs --force + +# Proceed without prompting. +cscli appsec-rules install crowdsecurity/crs --yes + +# The "--yes" parameter is implied when the command is not connected to a terminal, like pipes or scripts.`, + }, + removeHelp: cliHelp{ + example: `# Uninstall some appsec-rules. +cscli appsec-rules remove crowdsecurity/crs + +# Show the execution plan without changing anything - compact output sorted by type and name. +cscli appsec-rules remove crowdsecurity/crs --dry-run + +# Show the execution plan without changing anything - verbose output sorted by execution order. +cscli appsec-rules remove crowdsecurity/crs --dry-run -o raw + +# Uninstall and also remove the downloaded files. +cscli appsec-rules remove crowdsecurity/crs --purge + +# Remove tainted items. +cscli appsec-rules remove crowdsecurity/crs --force + +# Proceed without prompting. +cscli appsec-rules remove crowdsecurity/crs --yes + +# The "--yes" parameter is implied when the command is not connected to a terminal, like pipes or scripts.`, + }, + upgradeHelp: cliHelp{ + example: `# Upgrade some appsec-rules. If they are not currently installed, they are downloaded but not installed. +cscli appsec-rules upgrade crowdsecurity/crs + +# Show the execution plan without changing anything - compact output sorted by type and name. +cscli appsec-rules upgrade crowdsecurity/crs --dry-run + +# Show the execution plan without changing anything - verbose output sorted by execution order. +cscli appsec-rules upgrade crowdsecurity/crs --dry-run -o raw + +# Upgrade over tainted items. Can be used to restore or repair after local modifications or missing dependencies. +cscli appsec-rules upgrade crowdsecurity/crs --force + +# Proceed without prompting. +cscli appsec-rules upgrade crowdsecurity/crs --yes + +# The "--yes" parameter is implied when the command is not connected to a terminal, like pipes or scripts.`, + }, + inspectHelp: cliHelp{ + example: `# Display metadata, state, metrics and ancestor collections of appsec-rules (installed or not). +cscli appsec-rules inspect crowdsecurity/crs + +# Don't collect metrics (avoid error if crowdsec is not running). +cscli appsec-configs inspect crowdsecurity/crs --no-metrics + +# Display difference between a tainted item and the latest one. +cscli appsec-rules inspect crowdsecurity/crs --diff + +# Reverse the above diff +cscli appsec-rules inspect crowdsecurity/crs --diff --rev`, + }, + inspectDetail: inspectDetail, + listHelp: cliHelp{ + example: `# List enabled (installed) appsec-rules. +cscli appsec-rules list + +# List all available appsec-rules (installed or not). +cscli appsec-rules list -a + +# List specific appsec-rules (installed or not). +cscli appsec-rules list crowdsecurity/crs crowdsecurity/vpatch-git-config`, + }, + } +} diff --git a/cmd/crowdsec-cli/cliitem/hubcollection.go b/cmd/crowdsec-cli/cliitem/hubcollection.go new file mode 100644 index 00000000000..b45f956e0ac --- /dev/null +++ b/cmd/crowdsec-cli/cliitem/hubcollection.go @@ -0,0 +1,105 @@ +package cliitem + +import ( + "github.com/crowdsecurity/crowdsec/pkg/cwhub" +) + +func NewCollection(cfg configGetter) *cliItem { + return &cliItem{ + cfg: cfg, + name: cwhub.COLLECTIONS, + singular: "collection", + oneOrMore: "collection(s)", + help: cliHelp{ + example: `cscli collections list -a +cscli collections install crowdsecurity/http-cve crowdsecurity/iptables +cscli collections inspect crowdsecurity/http-cve crowdsecurity/iptables +cscli collections upgrade crowdsecurity/http-cve crowdsecurity/iptables +cscli collections remove crowdsecurity/http-cve crowdsecurity/iptables +`, + }, + installHelp: cliHelp{ + example: `# Install some collections. +cscli collections install crowdsecurity/http-cve crowdsecurity/iptables + +# Show the execution plan without changing anything - compact output sorted by type and name. +cscli collections install crowdsecurity/http-cve crowdsecurity/iptables --dry-run + +# Show the execution plan without changing anything - verbose output sorted by execution order. +cscli collections install crowdsecurity/http-cve crowdsecurity/iptables --dry-run -o raw + +# Download only, to be installed later. +cscli collections install crowdsecurity/http-cve crowdsecurity/iptables --download-only + +# Install over tainted items. Can be used to restore or repair after local modifications or missing dependencies. +cscli collections install crowdsecurity/http-cve crowdsecurity/iptables --force + +# Proceed without prompting. +cscli collections install crowdsecurity/http-cve crowdsecurity/iptables --yes + +# The "--yes" parameter is implied when the command is not connected to a terminal, like pipes or scripts.`, + }, + removeHelp: cliHelp{ + example: `# Uninstall some collections. +cscli collections remove crowdsecurity/http-cve crowdsecurity/iptables + +# Show the execution plan without changing anything - compact output sorted by type and name. +cscli collections remove crowdsecurity/http-cve crowdsecurity/iptables --dry-run + +# Show the execution plan without changing anything - verbose output sorted by execution order. +cscli collections remove crowdsecurity/http-cve crowdsecurity/iptables --dry-run -o raw + +# Uninstall and also remove the downloaded files. +cscli collections remove crowdsecurity/http-cve crowdsecurity/iptables --purge + +# Remove tainted items. +cscli collections remove crowdsecurity/http-cve crowdsecurity/iptables --force + +# Proceed without prompting. +cscli collections remove crowdsecurity/http-cve crowdsecurity/iptables --yes + +# The "--yes" parameter is implied when the command is not connected to a terminal, like pipes or scripts.`, + }, + upgradeHelp: cliHelp{ + example: `# Upgrade some collections. If they are not currently installed, they are downloaded but not installed. +cscli collections upgrade crowdsecurity/http-cve crowdsecurity/iptables + +# Show the execution plan without changing anything - compact output sorted by type and name. +cscli collections upgrade crowdsecurity/http-cve crowdsecurity/iptables --dry-run + +# Show the execution plan without changing anything - verbose output sorted by execution order. +cscli collections upgrade crowdsecurity/http-cve crowdsecurity/iptables --dry-run -o raw + +# Upgrade over tainted items. Can be used to restore or repair after local modifications or missing dependencies. +cscli collections upgrade crowdsecurity/http-cve crowdsecurity/iptables --force + +# Proceed without prompting. +cscli collections upgrade crowdsecurity/http-cve crowdsecurity/iptables --yes + +# The "--yes" parameter is implied when the command is not connected to a terminal, like pipes or scripts.`, + }, + inspectHelp: cliHelp{ + example: `# Display metadata, state, metrics and dependencies of collections (installed or not). +cscli collections inspect crowdsecurity/http-cve crowdsecurity/iptables + +# Don't collect metrics (avoid error if crowdsec is not running). +cscli collections inspect crowdsecurity/http-cve crowdsecurity/iptables --no-metrics + +# Display difference between a tainted item and the latest one, or the reason for the taint if it's a dependency. +cscli collections inspect crowdsecurity/http-cve --diff + +# Reverse the above diff +cscli collections inspect crowdsecurity/http-cve --diff --rev`, + }, + listHelp: cliHelp{ + example: `# List enabled (installed) collections. +cscli collections list + +# List all available collections (installed or not). +cscli collections list -a + +# List specific collections (installed or not). +cscli collections list crowdsecurity/http-cve crowdsecurity/iptables`, + }, + } +} diff --git a/cmd/crowdsec-cli/cliitem/hubcontext.go b/cmd/crowdsec-cli/cliitem/hubcontext.go new file mode 100644 index 00000000000..3a94687843d --- /dev/null +++ b/cmd/crowdsec-cli/cliitem/hubcontext.go @@ -0,0 +1,102 @@ +package cliitem + +import ( + "github.com/crowdsecurity/crowdsec/pkg/cwhub" +) + +func NewContext(cfg configGetter) *cliItem { + return &cliItem{ + cfg: cfg, + name: cwhub.CONTEXTS, + singular: "context", + oneOrMore: "context(s)", + help: cliHelp{ + example: `cscli contexts list -a +cscli contexts install crowdsecurity/bf_base crowdsecurity/fortinet +cscli contexts inspect crowdsecurity/bf_base crowdsecurity/fortinet +cscli contexts upgrade crowdsecurity/bf_base crowdsecurity/fortinet +cscli contexts remove crowdsecurity/bf_base crowdsecurity/fortinet +`, + }, + installHelp: cliHelp{ + example: `# Install some contexts. +cscli contexts install crowdsecurity/bf_base crowdsecurity/fortinet + +# Show the execution plan without changing anything - compact output sorted by type and name. +cscli contexts install crowdsecurity/bf_base crowdsecurity/fortinet --dry-run + +# Show the execution plan without changing anything - verbose output sorted by execution order. +cscli contexts install crowdsecurity/bf_base crowdsecurity/fortinet --dry-run -o raw + +# Download only, to be installed later. +cscli contexts install crowdsecurity/bf_base crowdsecurity/fortinet --download-only + +# Install over tainted items. Can be used to restore or repair after local modifications or missing dependencies. +cscli contexts install crowdsecurity/bf_base crowdsecurity/fortinet --force + +# Proceed without prompting. +cscli contexts install crowdsecurity/bf_base crowdsecurity/fortinet --yes + +# The "--yes" parameter is implied when the command is not connected to a terminal, like pipes or scripts.`, + }, + removeHelp: cliHelp{ + example: `# Uninstall some contexts. +cscli contexts remove crowdsecurity/bf_base crowdsecurity/fortinet + +# Show the execution plan without changing anything - compact output sorted by type and name. +cscli contexts remove crowdsecurity/bf_base crowdsecurity/fortinet --dry-run + +# Show the execution plan without changing anything - verbose output sorted by execution order. +cscli contexts remove crowdsecurity/bf_base crowdsecurity/fortinet --dry-run -o raw + +# Uninstall and also remove the downloaded files. +cscli contexts remove crowdsecurity/bf_base crowdsecurity/fortinet --purge + +# Remove tainted items. +cscli contexts remove crowdsecurity/bf_base crowdsecurity/fortinet --force + +# Proceed without prompting. +cscli contexts remove crowdsecurity/bf_base crowdsecurity/fortinet --yes + +# The "--yes" parameter is implied when the command is not connected to a terminal, like pipes or scripts.`, + }, + upgradeHelp: cliHelp{ + example: `# Upgrade some contexts. If they are not currently installed, they are downloaded but not installed. +cscli contexts upgrade crowdsecurity/bf_base crowdsecurity/fortinet + +# Show the execution plan without changing anything - compact output sorted by type and name. +cscli contexts upgrade crowdsecurity/bf_base crowdsecurity/fortinet --dry-run + +# Show the execution plan without changing anything - verbose output sorted by execution order. +cscli contexts upgrade crowdsecurity/bf_base crowdsecurity/fortinet --dry-run -o raw + +# Upgrade over tainted items. Can be used to restore or repair after local modifications or missing dependencies. +cscli contexts upgrade crowdsecurity/bf_base crowdsecurity/fortinet --force + +# Proceed without prompting. +cscli contexts upgrade crowdsecurity/bf_base crowdsecurity/fortinet --yes + +# The "--yes" parameter is implied when the command is not connected to a terminal, like pipes or scripts.`, + }, + inspectHelp: cliHelp{ + example: `# Display metadata, state and ancestor collections of contexts (installed or not). +cscli contexts inspect crowdsecurity/bf_base crowdsecurity/fortinet + +# Display difference between a tainted item and the latest one. +cscli contexts inspect crowdsecurity/bf_base --diff + +# Reverse the above diff +cscli contexts inspect crowdsecurity/bf_base --diff --rev`, + }, + listHelp: cliHelp{ + example: `# List enabled (installed) contexts. +cscli contexts list + +# List all available contexts (installed or not). +cscli contexts list -a + +# List specific contexts (installed or not). +cscli contexts list crowdsecurity/bf_base crowdsecurity/fortinet`, + }, + } +} diff --git a/cmd/crowdsec-cli/cliitem/hubparser.go b/cmd/crowdsec-cli/cliitem/hubparser.go new file mode 100644 index 00000000000..440cb61204f --- /dev/null +++ b/cmd/crowdsec-cli/cliitem/hubparser.go @@ -0,0 +1,105 @@ +package cliitem + +import ( + "github.com/crowdsecurity/crowdsec/pkg/cwhub" +) + +func NewParser(cfg configGetter) *cliItem { + return &cliItem{ + cfg: cfg, + name: cwhub.PARSERS, + singular: "parser", + oneOrMore: "parser(s)", + help: cliHelp{ + example: `cscli parsers list -a +cscli parsers install crowdsecurity/caddy-logs crowdsecurity/sshd-logs +cscli parsers inspect crowdsecurity/caddy-logs crowdsecurity/sshd-logs +cscli parsers upgrade crowdsecurity/caddy-logs crowdsecurity/sshd-logs +cscli parsers remove crowdsecurity/caddy-logs crowdsecurity/sshd-logs +`, + }, + installHelp: cliHelp{ + example: `# Install some parsers. +cscli parsers install crowdsecurity/caddy-logs crowdsecurity/sshd-logs + +# Show the execution plan without changing anything - compact output sorted by type and name. +cscli parsers install crowdsecurity/caddy-logs crowdsecurity/sshd-logs --dry-run + +# Show the execution plan without changing anything - verbose output sorted by execution order. +cscli parsers install crowdsecurity/caddy-logs crowdsecurity/sshd-logs --dry-run -o raw + +# Download only, to be installed later. +cscli parsers install crowdsecurity/caddy-logs crowdsecurity/sshd-logs --download-only + +# Install over tainted items. Can be used to restore or repair after local modifications or missing dependencies. +cscli parsers install crowdsecurity/caddy-logs crowdsecurity/sshd-logs --force + +# Proceed without prompting. +cscli parsers install crowdsecurity/caddy-logs crowdsecurity/sshd-logs --yes + +# The "--yes" parameter is implied when the command is not connected to a terminal, like pipes or scripts.`, + }, + removeHelp: cliHelp{ + example: `# Uninstall some parsers. +cscli parsers remove crowdsecurity/caddy-logs crowdsecurity/sshd-logs + +# Show the execution plan without changing anything - compact output sorted by type and name. +cscli parsers remove crowdsecurity/caddy-logs crowdsecurity/sshd-logs --dry-run + +# Show the execution plan without changing anything - verbose output sorted by execution order. +cscli parsers remove crowdsecurity/caddy-logs crowdsecurity/sshd-logs --dry-run -o raw + +# Uninstall and also remove the downloaded files. +cscli parsers remove crowdsecurity/caddy-logs crowdsecurity/sshd-logs --purge + +# Remove tainted items. +cscli parsers remove crowdsecurity/caddy-logs crowdsecurity/sshd-logs --force + +# Proceed without prompting. +cscli parsers remove crowdsecurity/caddy-logs crowdsecurity/sshd-logs --yes + +# The "--yes" parameter is implied when the command is not connected to a terminal, like pipes or scripts.`, + }, + upgradeHelp: cliHelp{ + example: `# Upgrade some parsers. If they are not currently installed, they are downloaded but not installed. +cscli parsers upgrade crowdsecurity/caddy-logs crowdsecurity/sshd-logs + +# Show the execution plan without changing anything - compact output sorted by type and name. +cscli parsers upgrade crowdsecurity/caddy-logs crowdsecurity/sshd-logs --dry-run + +# Show the execution plan without changing anything - verbose output sorted by execution order. +cscli parsers upgrade crowdsecurity/caddy-logs crowdsecurity/sshd-logs --dry-run -o raw + +# Upgrade over tainted items. Can be used to restore or repair after local modifications or missing dependencies. +cscli parsers upgrade crowdsecurity/caddy-logs crowdsecurity/sshd-logs --force + +# Proceed without prompting. +cscli parsers upgrade crowdsecurity/caddy-logs crowdsecurity/sshd-logs --yes + +# The "--yes" parameter is implied when the command is not connected to a terminal, like pipes or scripts.`, + }, + inspectHelp: cliHelp{ + example: `# Display metadata, state, metrics and ancestor collections of parsers (installed or not). +cscli parsers inspect crowdsecurity/httpd-logs crowdsecurity/sshd-logs + +# Don't collect metrics (avoid error if crowdsec is not running). +cscli parsers inspect crowdsecurity/httpd-logs --no-metrics + +# Display difference between a tainted item and the latest one. +cscli parsers inspect crowdsecurity/httpd-logs --diff + +# Reverse the above diff +cscli parsers inspect crowdsecurity/httpd-logs --diff --rev`, + }, + listHelp: cliHelp{ + example: `# List enabled (installed) parsers. +cscli parsers list + +# List all available parsers (installed or not). +cscli parsers list -a + +# List specific parsers (installed or not). +cscli parsers list crowdsecurity/caddy-logs crowdsecurity/sshd-logs`, + }, + } +} diff --git a/cmd/crowdsec-cli/cliitem/hubpostoverflow.go b/cmd/crowdsec-cli/cliitem/hubpostoverflow.go new file mode 100644 index 00000000000..cfd5f7c95aa --- /dev/null +++ b/cmd/crowdsec-cli/cliitem/hubpostoverflow.go @@ -0,0 +1,102 @@ +package cliitem + +import ( + "github.com/crowdsecurity/crowdsec/pkg/cwhub" +) + +func NewPostOverflow(cfg configGetter) *cliItem { + return &cliItem{ + cfg: cfg, + name: cwhub.POSTOVERFLOWS, + singular: "postoverflow", + oneOrMore: "postoverflow(s)", + help: cliHelp{ + example: `cscli postoverflows list -a +cscli postoverflows install crowdsecurity/cdn-whitelist crowdsecurity/rdns +cscli postoverflows inspect crowdsecurity/cdn-whitelist crowdsecurity/rdns +cscli postoverflows upgrade crowdsecurity/cdn-whitelist crowdsecurity/rdns +cscli postoverflows remove crowdsecurity/cdn-whitelist crowdsecurity/rdns +`, + }, + installHelp: cliHelp{ + example: `# Install some postoverflows. +cscli postoverflows install crowdsecurity/cdn-whitelist crowdsecurity/rdns + +# Show the execution plan without changing anything - compact output sorted by type and name. +cscli postoverflows install crowdsecurity/cdn-whitelist crowdsecurity/rdns --dry-run + +# Show the execution plan without changing anything - verbose output sorted by execution order. +cscli postoverflows install crowdsecurity/cdn-whitelist crowdsecurity/rdns --dry-run -o raw + +# Download only, to be installed later. +cscli postoverflows install crowdsecurity/cdn-whitelist crowdsecurity/rdns --download-only + +# Install over tainted items. Can be used to restore or repair after local modifications or missing dependencies. +cscli postoverflows install crowdsecurity/cdn-whitelist crowdsecurity/rdns --force + +# Proceed without prompting. +cscli postoverflows install crowdsecurity/cdn-whitelist crowdsecurity/rdns --yes + +# The "--yes" parameter is implied when the command is not connected to a terminal, like pipes or scripts.`, + }, + removeHelp: cliHelp{ + example: `# Uninstall some postoverflows. +cscli postoverflows remove crowdsecurity/cdn-whitelist crowdsecurity/rdns + +# Show the execution plan without changing anything - compact output sorted by type and name. +cscli postoverflows remove crowdsecurity/cdn-whitelist crowdsecurity/rdns --dry-run + +# Show the execution plan without changing anything - verbose output sorted by execution order. +cscli postoverflows remove crowdsecurity/cdn-whitelist crowdsecurity/rdns --dry-run -o raw + +# Uninstall and also remove the downloaded files. +cscli postoverflows remove crowdsecurity/cdn-whitelist crowdsecurity/rdns --purge + +# Remove tainted items. +cscli postoverflows remove crowdsecurity/cdn-whitelist crowdsecurity/rdns --force + +# Proceed without prompting. +cscli postoverflows remove crowdsecurity/cdn-whitelist crowdsecurity/rdns --yes + +# The "--yes" parameter is implied when the command is not connected to a terminal, like pipes or scripts.`, + }, + upgradeHelp: cliHelp{ + example: `# Upgrade some postoverflows. If they are not currently installed, they are downloaded but not installed. +cscli postoverflows upgrade crowdsecurity/cdn-whitelist crowdsecurity/rdnss + +# Show the execution plan without changing anything - compact output sorted by type and name. +cscli postoverflows upgrade crowdsecurity/cdn-whitelist crowdsecurity/rdnss --dry-run + +# Show the execution plan without changing anything - verbose output sorted by execution order. +cscli postoverflows upgrade crowdsecurity/cdn-whitelist crowdsecurity/rdnss --dry-run -o raw + +# Upgrade over tainted items. Can be used to restore or repair after local modifications or missing dependencies. +cscli postoverflows upgrade crowdsecurity/cdn-whitelist crowdsecurity/rdnss --force + +# Proceed without prompting. +cscli postoverflows upgrade crowdsecurity/cdn-whitelist crowdsecurity/rdnss --yes + +# The "--yes" parameter is implied when the command is not connected to a terminal, like pipes or scripts.`, + }, + inspectHelp: cliHelp{ + example: `# Display metadata, state and ancestor collections of postoverflows (installed or not). +cscli postoverflows inspect crowdsecurity/cdn-whitelist + +# Display difference between a tainted item and the latest one. +cscli postoverflows inspect crowdsecurity/cdn-whitelist --diff + +# Reverse the above diff +cscli postoverflows inspect crowdsecurity/cdn-whitelist --diff --rev`, + }, + listHelp: cliHelp{ + example: `# List enabled (installed) postoverflows. +cscli postoverflows list + +# List all available postoverflows (installed or not). +cscli postoverflows list -a + +# List specific postoverflows (installed or not). +cscli postoverflows list crowdsecurity/cdn-whitelists crowdsecurity/rdns`, + }, + } +} diff --git a/cmd/crowdsec-cli/cliitem/hubscenario.go b/cmd/crowdsec-cli/cliitem/hubscenario.go index a5e854b3c82..5dee3323f6f 100644 --- a/cmd/crowdsec-cli/cliitem/hubscenario.go +++ b/cmd/crowdsec-cli/cliitem/hubscenario.go @@ -19,23 +19,87 @@ cscli scenarios remove crowdsecurity/ssh-bf crowdsecurity/http-probing `, }, installHelp: cliHelp{ - example: `cscli scenarios install crowdsecurity/ssh-bf crowdsecurity/http-probing`, + example: `# Install some scenarios. +cscli scenarios install crowdsecurity/ssh-bf crowdsecurity/http-probing + +# Show the execution plan without changing anything - compact output sorted by type and name. +cscli scenarios install crowdsecurity/ssh-bf crowdsecurity/http-probing --dry-run + +# Show the execution plan without changing anything - verbose output sorted by execution order. +cscli scenarios install crowdsecurity/ssh-bf crowdsecurity/http-probing --dry-run -o raw + +# Download only, to be installed later. +cscli scenarios install crowdsecurity/ssh-bf crowdsecurity/http-probing --download-only + +# Install over tainted items. Can be used to restore or repair after local modifications or missing dependencies. +cscli scenarios install crowdsecurity/ssh-bf crowdsecurity/http-probing --force + +# Proceed without prompting. +cscli scenarios install crowdsecurity/ssh-bf crowdsecurity/http-probing --yes + +# The "--yes" parameter is implied when the command is not connected to a terminal, like pipes or scripts.`, }, removeHelp: cliHelp{ - example: `cscli scenarios remove crowdsecurity/ssh-bf crowdsecurity/http-probing`, + example: `# Uninstall some scenarios. +cscli scenarios remove crowdsecurity/ssh-bf crowdsecurity/http-probing + +# Show the execution plan without changing anything - compact output sorted by type and name. +cscli scenarios remove crowdsecurity/ssh-bf crowdsecurity/http-probing --dry-run + +# Show the execution plan without changing anything - verbose output sorted by execution order. +cscli scenarios remove crowdsecurity/ssh-bf crowdsecurity/http-probing --dry-run -o raw + +# Uninstall and also remove the downloaded files. +cscli scenarios remove crowdsecurity/ssh-bf crowdsecurity/http-probing --purge + +# Remove tainted items. +cscli scenarios remove crowdsecurity/ssh-bf crowdsecurity/http-probing --force + +# Proceed without prompting. +cscli scenarios remove crowdsecurity/ssh-bf crowdsecurity/http-probing --yes + +# The "--yes" parameter is implied when the command is not connected to a terminal, like pipes or scripts.`, }, upgradeHelp: cliHelp{ - example: `cscli scenarios upgrade crowdsecurity/ssh-bf crowdsecurity/http-probing`, + example: `# Upgrade some scenarios. If they are not currently installed, they are downloaded but not installed. +cscli scenarios upgrade crowdsecurity/ssh-bf crowdsecurity/http-probing + +# Show the execution plan without changing anything - compact output sorted by type and name. +cscli scenarios upgrade crowdsecurity/ssh-bf crowdsecurity/http-probing --dry-run + +# Show the execution plan without changing anything - verbose output sorted by execution order. +cscli scenarios upgrade crowdsecurity/ssh-bf crowdsecurity/http-probing --dry-run -o raw + +# Upgrade over tainted items. Can be used to restore or repair after local modifications or missing dependencies. +cscli scenarios upgrade crowdsecurity/ssh-bf crowdsecurity/http-probing --force + +# Proceed without prompting. +cscli scenarios upgrade crowdsecurity/ssh-bf crowdsecurity/http-probing --yes + +# The "--yes" parameter is implied when the command is not connected to a terminal, like pipes or scripts.`, }, inspectHelp: cliHelp{ - example: `cscli scenarios inspect crowdsecurity/ssh-bf crowdsecurity/http-probing`, + example: `# Display metadata, state, metrics and ancestor collections of scenarios (installed or not). +cscli scenarios inspect crowdsecurity/ssh-bf crowdsecurity/http-probing + +# Don't collect metrics (avoid error if crowdsec is not running). +cscli scenarios inspect crowdsecurity/ssh-bf --no-metrics + +# Display difference between a tainted item and the latest one. +cscli scenarios inspect crowdsecurity/ssh-bf --diff + +# Reverse the above diff +cscli scenarios inspect crowdsecurity/ssh-bf --diff --rev`, }, listHelp: cliHelp{ - example: `cscli scenarios list + example: `# List enabled (installed) scenarios. +cscli scenarios list + +# List all available scenarios (installed or not). cscli scenarios list -a -cscli scenarios list crowdsecurity/ssh-bf crowdsecurity/http-probing -List only enabled scenarios unless "-a" or names are specified.`, +# List specific scenarios (installed or not). +cscli scenarios list crowdsecurity/ssh-bf crowdsecurity/http-probing`, }, } } diff --git a/cmd/crowdsec-cli/cliitem/inspect.go b/cmd/crowdsec-cli/cliitem/inspect.go deleted file mode 100644 index 9939de1810e..00000000000 --- a/cmd/crowdsec-cli/cliitem/inspect.go +++ /dev/null @@ -1,57 +0,0 @@ -package cliitem - -import ( - "encoding/json" - "fmt" - "os" - "path/filepath" - - "gopkg.in/yaml.v3" - - "github.com/crowdsecurity/crowdsec/pkg/cwhub" -) - -func inspectItem(hub *cwhub.Hub, item *cwhub.Item, wantMetrics bool, output string, prometheusURL string, wantColor string) error { - // This is dirty... - // We want to show current dependencies (from content), not latest (from index). - // The item is modifed but after this function the whole hub should be thrown away. - // A cleaner way would be to copy the struct first. - item.Dependencies = item.CurrentDependencies() - - switch output { - case "human", "raw": - enc := yaml.NewEncoder(os.Stdout) - enc.SetIndent(2) - - if err := enc.Encode(item); err != nil { - return fmt.Errorf("unable to encode item: %w", err) - } - case "json": - b, err := json.MarshalIndent(*item, "", " ") - if err != nil { - return fmt.Errorf("unable to serialize item: %w", err) - } - - fmt.Print(string(b)) - } - - if output != "human" { - return nil - } - - if item.State.Tainted { - fmt.Println() - fmt.Printf(`This item is tainted. Use "%s %s inspect --diff %s" to see why.`, filepath.Base(os.Args[0]), item.Type, item.Name) - fmt.Println() - } - - if wantMetrics { - fmt.Printf("\nCurrent metrics: \n") - - if err := showMetrics(prometheusURL, hub, item, wantColor); err != nil { - return err - } - } - - return nil -} diff --git a/cmd/crowdsec-cli/cliitem/item.go b/cmd/crowdsec-cli/cliitem/item.go index 637bd3023cf..3dcc0665a89 100644 --- a/cmd/crowdsec-cli/cliitem/item.go +++ b/cmd/crowdsec-cli/cliitem/item.go @@ -2,25 +2,17 @@ package cliitem import ( "cmp" - "context" - "errors" "fmt" - "os" "strings" "github.com/fatih/color" - "github.com/hexops/gotextdiff" - "github.com/hexops/gotextdiff/myers" - "github.com/hexops/gotextdiff/span" log "github.com/sirupsen/logrus" "github.com/spf13/cobra" "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/clihub" - "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/reload" "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/require" "github.com/crowdsecurity/crowdsec/pkg/csconfig" "github.com/crowdsecurity/crowdsec/pkg/cwhub" - "github.com/crowdsecurity/crowdsec/pkg/hubops" ) type cliHelp struct { @@ -68,400 +60,6 @@ func (cli cliItem) NewCommand() *cobra.Command { return cmd } -func (cli cliItem) install(ctx context.Context, args []string, yes bool, dryRun bool, downloadOnly bool, force bool, ignoreError bool) error { - cfg := cli.cfg() - - hub, err := require.Hub(cfg, log.StandardLogger()) - if err != nil { - return err - } - - plan := hubops.NewActionPlan(hub) - - contentProvider := require.HubDownloader(ctx, cfg) - - for _, name := range args { - item := hub.GetItem(cli.name, name) - if item == nil { - msg := suggestNearestMessage(hub, cli.name, name) - if !ignoreError { - return errors.New(msg) - } - - log.Error(msg) - - continue - } - - if err = plan.AddCommand(hubops.NewDownloadCommand(item, contentProvider, force)); err != nil { - return err - } - - if !downloadOnly { - if err = plan.AddCommand(hubops.NewEnableCommand(item, force)); err != nil { - return err - } - } - } - - verbose := (cfg.Cscli.Output == "raw") - - if err := plan.Execute(ctx, yes, dryRun, verbose); err != nil { - if !ignoreError { - return err - } - - log.Error(err) - } - - if plan.ReloadNeeded { - fmt.Println("\n" + reload.Message) - } - - return nil -} - -func (cli cliItem) newInstallCmd() *cobra.Command { - var ( - yes bool - dryRun bool - downloadOnly bool - force bool - ignoreError bool - ) - - cmd := &cobra.Command{ - Use: cmp.Or(cli.installHelp.use, "install [item]..."), - Short: cmp.Or(cli.installHelp.short, "Install given "+cli.oneOrMore), - Long: cmp.Or(cli.installHelp.long, fmt.Sprintf("Fetch and install one or more %s from the hub", cli.name)), - Example: cli.installHelp.example, - Args: cobra.MinimumNArgs(1), - DisableAutoGenTag: true, - ValidArgsFunction: func(_ *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { - return compAllItems(cli.name, args, toComplete, cli.cfg) - }, - RunE: func(cmd *cobra.Command, args []string) error { - return cli.install(cmd.Context(), args, yes, dryRun, downloadOnly, force, ignoreError) - }, - } - - flags := cmd.Flags() - flags.BoolVarP(&yes, "yes", "y", false, "Confirm execution without prompt") - flags.BoolVar(&dryRun, "dry-run", false, "Don't install or remove anything; print the execution plan") - flags.BoolVarP(&downloadOnly, "download-only", "d", false, "Only download packages, don't enable") - flags.BoolVar(&force, "force", false, "Force install: overwrite tainted and outdated files") - flags.BoolVar(&ignoreError, "ignore", false, "Ignore errors when installing multiple "+cli.name) - cmd.MarkFlagsMutuallyExclusive("yes", "dry-run") - - return cmd -} - -// return the names of the installed parents of an item, used to check if we can remove it -func installedParentNames(item *cwhub.Item) []string { - ret := make([]string, 0) - - for _, parent := range item.Ancestors() { - if parent.State.Installed { - ret = append(ret, parent.Name) - } - } - - return ret -} - -func (cli cliItem) removePlan(hub *cwhub.Hub, args []string, purge bool, force bool, all bool) (*hubops.ActionPlan, error) { - plan := hubops.NewActionPlan(hub) - - if all { - itemGetter := hub.GetInstalledByType - if purge { - itemGetter = hub.GetItemsByType - } - - for _, item := range itemGetter(cli.name, true) { - if err := plan.AddCommand(hubops.NewDisableCommand(item, force)); err != nil { - return nil, err - } - - if purge { - if err := plan.AddCommand(hubops.NewPurgeCommand(item, force)); err != nil { - return nil, err - } - } - } - - return plan, nil - } - - if len(args) == 0 { - return nil, fmt.Errorf("specify at least one %s to remove or '--all'", cli.singular) - } - - for _, itemName := range args { - item := hub.GetItem(cli.name, itemName) - if item == nil { - return nil, fmt.Errorf("can't find '%s' in %s", itemName, cli.name) - } - - parents := installedParentNames(item) - - if !force && len(parents) > 0 { - log.Warningf("%s belongs to collections: %s", item.Name, parents) - log.Warningf("Run 'sudo cscli %s remove %s --force' if you want to force remove this %s", item.Type, item.Name, cli.singular) - - continue - } - - if err := plan.AddCommand(hubops.NewDisableCommand(item, force)); err != nil { - return nil, err - } - - if purge { - if err := plan.AddCommand(hubops.NewPurgeCommand(item, force)); err != nil { - return nil, err - } - } - } - - return plan, nil -} - -func (cli cliItem) remove(ctx context.Context, args []string, yes bool, dryRun bool, purge bool, force bool, all bool) error { - cfg := cli.cfg() - - hub, err := require.Hub(cli.cfg(), log.StandardLogger()) - if err != nil { - return err - } - - plan, err := cli.removePlan(hub, args, purge, force, all) - if err != nil { - return err - } - - verbose := (cfg.Cscli.Output == "raw") - - if err := plan.Execute(ctx, yes, dryRun, verbose); err != nil { - return err - } - - if plan.ReloadNeeded { - fmt.Println("\n" + reload.Message) - } - - return nil -} - -func (cli cliItem) newRemoveCmd() *cobra.Command { - var ( - yes bool - dryRun bool - purge bool - force bool - all bool - ) - - cmd := &cobra.Command{ - Use: cmp.Or(cli.removeHelp.use, "remove [item]..."), - Short: cmp.Or(cli.removeHelp.short, "Remove given "+cli.oneOrMore), - Long: cmp.Or(cli.removeHelp.long, "Remove one or more "+cli.name), - Example: cli.removeHelp.example, - Aliases: []string{"delete"}, - DisableAutoGenTag: true, - ValidArgsFunction: func(_ *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { - return compInstalledItems(cli.name, args, toComplete, cli.cfg) - }, - RunE: func(cmd *cobra.Command, args []string) error { - if len(args) > 0 && all { - return errors.New("can't specify items and '--all' at the same time") - } - - return cli.remove(cmd.Context(), args, yes, dryRun, purge, force, all) - }, - } - - flags := cmd.Flags() - flags.BoolVarP(&yes, "yes", "y", false, "Confirm execution without prompt") - flags.BoolVar(&dryRun, "dry-run", false, "Don't install or remove anything; print the execution plan") - flags.BoolVar(&purge, "purge", false, "Delete source file too") - flags.BoolVar(&force, "force", false, "Force remove: remove tainted and outdated files") - flags.BoolVar(&all, "all", false, "Remove all the "+cli.name) - cmd.MarkFlagsMutuallyExclusive("yes", "dry-run") - - return cmd -} - -func (cli cliItem) upgradePlan(hub *cwhub.Hub, contentProvider cwhub.ContentProvider, args []string, force bool, all bool) (*hubops.ActionPlan, error) { - plan := hubops.NewActionPlan(hub) - - if all { - for _, item := range hub.GetInstalledByType(cli.name, true) { - if err := plan.AddCommand(hubops.NewDownloadCommand(item, contentProvider, force)); err != nil { - return nil, err - } - } - - return plan, nil - } - - if len(args) == 0 { - return nil, fmt.Errorf("specify at least one %s to upgrade or '--all'", cli.singular) - } - - for _, itemName := range args { - item := hub.GetItem(cli.name, itemName) - if item == nil { - return nil, fmt.Errorf("can't find '%s' in %s", itemName, cli.name) - } - - if err := plan.AddCommand(hubops.NewDownloadCommand(item, contentProvider, force)); err != nil { - return nil, err - } - } - - return plan, nil -} - -func (cli cliItem) upgrade(ctx context.Context, args []string, yes bool, dryRun bool, force bool, all bool) error { - cfg := cli.cfg() - - hub, err := require.Hub(cfg, log.StandardLogger()) - if err != nil { - return err - } - - contentProvider := require.HubDownloader(ctx, cfg) - - plan, err := cli.upgradePlan(hub, contentProvider, args, force, all) - if err != nil { - return err - } - - verbose := (cfg.Cscli.Output == "raw") - - if err := plan.Execute(ctx, yes, dryRun, verbose); err != nil { - return err - } - - if plan.ReloadNeeded { - fmt.Println("\n" + reload.Message) - } - - return nil -} - -func (cli cliItem) newUpgradeCmd() *cobra.Command { - var ( - yes bool - dryRun bool - all bool - force bool - ) - - cmd := &cobra.Command{ - Use: cmp.Or(cli.upgradeHelp.use, "upgrade [item]..."), - Short: cmp.Or(cli.upgradeHelp.short, "Upgrade given "+cli.oneOrMore), - Long: cmp.Or(cli.upgradeHelp.long, fmt.Sprintf("Fetch and upgrade one or more %s from the hub", cli.name)), - Example: cli.upgradeHelp.example, - DisableAutoGenTag: true, - ValidArgsFunction: func(_ *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { - return compInstalledItems(cli.name, args, toComplete, cli.cfg) - }, - RunE: func(cmd *cobra.Command, args []string) error { - return cli.upgrade(cmd.Context(), args, yes, dryRun, force, all) - }, - } - - flags := cmd.Flags() - flags.BoolVarP(&yes, "yes", "y", false, "Confirm execution without prompt") - flags.BoolVar(&dryRun, "dry-run", false, "Don't install or remove anything; print the execution plan") - flags.BoolVarP(&all, "all", "a", false, "Upgrade all the "+cli.name) - flags.BoolVar(&force, "force", false, "Force upgrade: overwrite tainted and outdated files") - cmd.MarkFlagsMutuallyExclusive("yes", "dry-run") - - return cmd -} - -func (cli cliItem) inspect(ctx context.Context, args []string, url string, diff bool, rev bool, noMetrics bool) error { - cfg := cli.cfg() - - if rev && !diff { - return errors.New("--rev can only be used with --diff") - } - - if url != "" { - cfg.Cscli.PrometheusUrl = url - } - - var contentProvider cwhub.ContentProvider - - if diff { - contentProvider = require.HubDownloader(ctx, cfg) - } - - hub, err := require.Hub(cfg, log.StandardLogger()) - if err != nil { - return err - } - - for _, name := range args { - item := hub.GetItem(cli.name, name) - if item == nil { - return fmt.Errorf("can't find '%s' in %s", name, cli.name) - } - - if diff { - fmt.Println(cli.whyTainted(ctx, hub, contentProvider, item, rev)) - - continue - } - - if err = inspectItem(hub, item, !noMetrics, cfg.Cscli.Output, cfg.Cscli.PrometheusUrl, cfg.Cscli.Color); err != nil { - return err - } - - if cli.inspectDetail != nil { - if err = cli.inspectDetail(item); err != nil { - return err - } - } - } - - return nil -} - -func (cli cliItem) newInspectCmd() *cobra.Command { - var ( - url string - diff bool - rev bool - noMetrics bool - ) - - cmd := &cobra.Command{ - Use: cmp.Or(cli.inspectHelp.use, "inspect [item]..."), - Short: cmp.Or(cli.inspectHelp.short, "Inspect given "+cli.oneOrMore), - Long: cmp.Or(cli.inspectHelp.long, "Inspect the state of one or more "+cli.name), - Example: cli.inspectHelp.example, - Args: cobra.MinimumNArgs(1), - DisableAutoGenTag: true, - ValidArgsFunction: func(_ *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { - return compInstalledItems(cli.name, args, toComplete, cli.cfg) - }, - RunE: func(cmd *cobra.Command, args []string) error { - return cli.inspect(cmd.Context(), args, url, diff, rev, noMetrics) - }, - } - - flags := cmd.Flags() - flags.StringVarP(&url, "url", "u", "", "Prometheus url") - flags.BoolVar(&diff, "diff", false, "Show diff with latest version (for tainted items)") - flags.BoolVar(&rev, "rev", false, "Reverse diff output") - flags.BoolVar(&noMetrics, "no-metrics", false, "Don't show metrics (when cscli.output=human)") - - return cmd -} - func (cli cliItem) list(args []string, all bool) error { cfg := cli.cfg() @@ -500,91 +98,23 @@ func (cli cliItem) newListCmd() *cobra.Command { return cmd } -// return the diff between the installed version and the latest version -func (cli cliItem) itemDiff(ctx context.Context, item *cwhub.Item, contentProvider cwhub.ContentProvider, reverse bool) (string, error) { - if !item.State.Installed { - return "", fmt.Errorf("'%s' is not installed", item.FQName()) - } - - dest, err := os.CreateTemp("", "cscli-diff-*") +func compInstalledItems(itemType string, args []string, toComplete string, cfg configGetter) ([]string, cobra.ShellCompDirective) { + hub, err := require.Hub(cfg(), nil) if err != nil { - return "", fmt.Errorf("while creating temporary file: %w", err) + return nil, cobra.ShellCompDirectiveDefault } - defer os.Remove(dest.Name()) - _, remoteURL, err := item.FetchContentTo(ctx, contentProvider, dest.Name()) - if err != nil { - return "", err - } + items := hub.GetInstalledByType(itemType, true) - latestContent, err := os.ReadFile(dest.Name()) - if err != nil { - return "", fmt.Errorf("while reading %s: %w", dest.Name(), err) - } - - localContent, err := os.ReadFile(item.State.LocalPath) - if err != nil { - return "", fmt.Errorf("while reading %s: %w", item.State.LocalPath, err) - } - - file1 := item.State.LocalPath - file2 := remoteURL - content1 := string(localContent) - content2 := string(latestContent) - - if reverse { - file1, file2 = file2, file1 - content1, content2 = content2, content1 - } - - edits := myers.ComputeEdits(span.URIFromPath(file1), content1, content2) - diff := gotextdiff.ToUnified(file1, file2, content1, edits) - - return fmt.Sprintf("%s", diff), nil -} - -func (cli cliItem) whyTainted(ctx context.Context, hub *cwhub.Hub, contentProvider cwhub.ContentProvider, item *cwhub.Item, reverse bool) string { - if !item.State.Installed { - return fmt.Sprintf("# %s is not installed", item.FQName()) - } + comp := make([]string, 0) - if !item.State.Tainted { - return fmt.Sprintf("# %s is not tainted", item.FQName()) - } - - if len(item.State.TaintedBy) == 0 { - return fmt.Sprintf("# %s is tainted but we don't know why. please report this as a bug", item.FQName()) - } - - ret := []string{ - fmt.Sprintf("# Let's see why %s is tainted.", item.FQName()), - } - - for _, fqsub := range item.State.TaintedBy { - ret = append(ret, fmt.Sprintf("\n-> %s\n", fqsub)) - - sub, err := hub.GetItemFQ(fqsub) - if err != nil { - ret = append(ret, err.Error()) - } - - diff, err := cli.itemDiff(ctx, sub, contentProvider, reverse) - if err != nil { - ret = append(ret, err.Error()) - } - - if diff != "" { - ret = append(ret, diff) - } else if len(sub.State.TaintedBy) > 0 { - taintList := strings.Join(sub.State.TaintedBy, ", ") - if sub.FQName() == taintList { - // hack: avoid message "item is tainted by itself" - continue - } - - ret = append(ret, fmt.Sprintf("# %s is tainted by %s", sub.FQName(), taintList)) + for _, item := range items { + if strings.Contains(item.Name, toComplete) { + comp = append(comp, item.Name) } } - return strings.Join(ret, "\n") + cobra.CompDebugln(fmt.Sprintf("%s: %+v", itemType, comp), true) + + return comp, cobra.ShellCompDirectiveNoFileComp } diff --git a/cmd/crowdsec-cli/cliitem/parser.go b/cmd/crowdsec-cli/cliitem/parser.go deleted file mode 100644 index bc1d96bdaf0..00000000000 --- a/cmd/crowdsec-cli/cliitem/parser.go +++ /dev/null @@ -1,41 +0,0 @@ -package cliitem - -import ( - "github.com/crowdsecurity/crowdsec/pkg/cwhub" -) - -func NewParser(cfg configGetter) *cliItem { - return &cliItem{ - cfg: cfg, - name: cwhub.PARSERS, - singular: "parser", - oneOrMore: "parser(s)", - help: cliHelp{ - example: `cscli parsers list -a -cscli parsers install crowdsecurity/caddy-logs crowdsecurity/sshd-logs -cscli parsers inspect crowdsecurity/caddy-logs crowdsecurity/sshd-logs -cscli parsers upgrade crowdsecurity/caddy-logs crowdsecurity/sshd-logs -cscli parsers remove crowdsecurity/caddy-logs crowdsecurity/sshd-logs -`, - }, - installHelp: cliHelp{ - example: `cscli parsers install crowdsecurity/caddy-logs crowdsecurity/sshd-logs`, - }, - removeHelp: cliHelp{ - example: `cscli parsers remove crowdsecurity/caddy-logs crowdsecurity/sshd-logs`, - }, - upgradeHelp: cliHelp{ - example: `cscli parsers upgrade crowdsecurity/caddy-logs crowdsecurity/sshd-logs`, - }, - inspectHelp: cliHelp{ - example: `cscli parsers inspect crowdsecurity/httpd-logs crowdsecurity/sshd-logs`, - }, - listHelp: cliHelp{ - example: `cscli parsers list -cscli parsers list -a -cscli parsers list crowdsecurity/caddy-logs crowdsecurity/sshd-logs - -List only enabled parsers unless "-a" or names are specified.`, - }, - } -} diff --git a/cmd/crowdsec-cli/cliitem/postoverflow.go b/cmd/crowdsec-cli/cliitem/postoverflow.go deleted file mode 100644 index ea53aef327d..00000000000 --- a/cmd/crowdsec-cli/cliitem/postoverflow.go +++ /dev/null @@ -1,41 +0,0 @@ -package cliitem - -import ( - "github.com/crowdsecurity/crowdsec/pkg/cwhub" -) - -func NewPostOverflow(cfg configGetter) *cliItem { - return &cliItem{ - cfg: cfg, - name: cwhub.POSTOVERFLOWS, - singular: "postoverflow", - oneOrMore: "postoverflow(s)", - help: cliHelp{ - example: `cscli postoverflows list -a -cscli postoverflows install crowdsecurity/cdn-whitelist crowdsecurity/rdns -cscli postoverflows inspect crowdsecurity/cdn-whitelist crowdsecurity/rdns -cscli postoverflows upgrade crowdsecurity/cdn-whitelist crowdsecurity/rdns -cscli postoverflows remove crowdsecurity/cdn-whitelist crowdsecurity/rdns -`, - }, - installHelp: cliHelp{ - example: `cscli postoverflows install crowdsecurity/cdn-whitelist crowdsecurity/rdns`, - }, - removeHelp: cliHelp{ - example: `cscli postoverflows remove crowdsecurity/cdn-whitelist crowdsecurity/rdns`, - }, - upgradeHelp: cliHelp{ - example: `cscli postoverflows upgrade crowdsecurity/cdn-whitelist crowdsecurity/rdns`, - }, - inspectHelp: cliHelp{ - example: `cscli postoverflows inspect crowdsecurity/cdn-whitelist crowdsecurity/rdns`, - }, - listHelp: cliHelp{ - example: `cscli postoverflows list -cscli postoverflows list -a -cscli postoverflows list crowdsecurity/cdn-whitelist crowdsecurity/rdns - -List only enabled postoverflows unless "-a" or names are specified.`, - }, - } -} diff --git a/cmd/crowdsec-cli/cliitem/suggest.go b/cmd/crowdsec-cli/cliitem/suggest.go deleted file mode 100644 index b0f19b6993c..00000000000 --- a/cmd/crowdsec-cli/cliitem/suggest.go +++ /dev/null @@ -1,77 +0,0 @@ -package cliitem - -import ( - "fmt" - "slices" - "strings" - - "github.com/agext/levenshtein" - "github.com/spf13/cobra" - - "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/require" - "github.com/crowdsecurity/crowdsec/pkg/cwhub" -) - -// suggestNearestMessage returns a message with the most similar item name, if one is found -func suggestNearestMessage(hub *cwhub.Hub, itemType string, itemName string) string { - const maxDistance = 7 - - score := 100 - nearest := "" - - for _, item := range hub.GetItemsByType(itemType, false) { - d := levenshtein.Distance(itemName, item.Name, nil) - if d < score { - score = d - nearest = item.Name - } - } - - msg := fmt.Sprintf("can't find '%s' in %s", itemName, itemType) - - if score < maxDistance { - msg += fmt.Sprintf(", did you mean '%s'?", nearest) - } - - return msg -} - -func compAllItems(itemType string, args []string, toComplete string, cfg configGetter) ([]string, cobra.ShellCompDirective) { - hub, err := require.Hub(cfg(), nil) - if err != nil { - return nil, cobra.ShellCompDirectiveDefault - } - - comp := make([]string, 0) - - for _, item := range hub.GetItemsByType(itemType, false) { - if !slices.Contains(args, item.Name) && strings.Contains(item.Name, toComplete) { - comp = append(comp, item.Name) - } - } - - cobra.CompDebugln(fmt.Sprintf("%s: %+v", itemType, comp), true) - - return comp, cobra.ShellCompDirectiveNoFileComp -} - -func compInstalledItems(itemType string, args []string, toComplete string, cfg configGetter) ([]string, cobra.ShellCompDirective) { - hub, err := require.Hub(cfg(), nil) - if err != nil { - return nil, cobra.ShellCompDirectiveDefault - } - - items := hub.GetInstalledByType(itemType, true) - - comp := make([]string, 0) - - for _, item := range items { - if strings.Contains(item.Name, toComplete) { - comp = append(comp, item.Name) - } - } - - cobra.CompDebugln(fmt.Sprintf("%s: %+v", itemType, comp), true) - - return comp, cobra.ShellCompDirectiveNoFileComp -} From 9dfa74bb52d7905c5b711ff9bebf8d077bc391ed Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 7 Jan 2025 10:24:58 +0100 Subject: [PATCH 382/581] build(deps): bump github.com/golang-jwt/jwt/v4 from 4.5.0 to 4.5.1 (#3380) Bumps [github.com/golang-jwt/jwt/v4](https://github.com/golang-jwt/jwt) from 4.5.0 to 4.5.1. - [Release notes](https://github.com/golang-jwt/jwt/releases) - [Changelog](https://github.com/golang-jwt/jwt/blob/main/VERSION_HISTORY.md) - [Commits](https://github.com/golang-jwt/jwt/compare/v4.5.0...v4.5.1) --- updated-dependencies: - dependency-name: github.com/golang-jwt/jwt/v4 dependency-type: direct:production ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index aa723b38409..20f8de63218 100644 --- a/go.mod +++ b/go.mod @@ -51,7 +51,7 @@ require ( github.com/goccy/go-yaml v1.11.0 github.com/gofrs/uuid v4.0.0+incompatible github.com/gogo/protobuf v1.3.2 // indirect - github.com/golang-jwt/jwt/v4 v4.5.0 + github.com/golang-jwt/jwt/v4 v4.5.1 github.com/golang/protobuf v1.5.4 // indirect github.com/google/go-cmp v0.6.0 // indirect github.com/google/go-querystring v1.1.0 diff --git a/go.sum b/go.sum index e7f181d7d24..950f901839c 100644 --- a/go.sum +++ b/go.sum @@ -301,8 +301,8 @@ github.com/gofrs/uuid v4.0.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRx github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= -github.com/golang-jwt/jwt/v4 v4.5.0 h1:7cYmW1XlMY7h7ii7UhUyChSgS5wUJEnm9uZVTGqOWzg= -github.com/golang-jwt/jwt/v4 v4.5.0/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= +github.com/golang-jwt/jwt/v4 v4.5.1 h1:JdqV9zKUdtaa9gdPlywC3aeoEsR681PlKC+4F5gQgeo= +github.com/golang-jwt/jwt/v4 v4.5.1/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= github.com/golang/glog v1.2.2 h1:1+mZ9upx1Dh6FmUTFR1naJ77miKiXgALjWOZ3NVFPmY= github.com/golang/glog v1.2.2/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwmO+w= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= From aebe9724f8053aa8f336f479b31b030bef5d171f Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 7 Jan 2025 11:09:13 +0100 Subject: [PATCH 383/581] build(deps): bump golang.org/x/crypto from 0.26.0 to 0.31.0 (#3381) Bumps [golang.org/x/crypto](https://github.com/golang/crypto) from 0.26.0 to 0.31.0. - [Commits](https://github.com/golang/crypto/compare/v0.26.0...v0.31.0) --- updated-dependencies: - dependency-name: golang.org/x/crypto dependency-type: direct:production ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 10 +++++----- go.sum | 20 ++++++++++---------- 2 files changed, 15 insertions(+), 15 deletions(-) diff --git a/go.mod b/go.mod index 20f8de63218..d002aa43d9b 100644 --- a/go.mod +++ b/go.mod @@ -105,12 +105,12 @@ require ( go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.28.0 // indirect go.opentelemetry.io/otel/sdk v1.28.0 // indirect go.opentelemetry.io/otel/trace v1.28.0 // indirect - golang.org/x/crypto v0.26.0 + golang.org/x/crypto v0.31.0 golang.org/x/mod v0.20.0 golang.org/x/net v0.28.0 // indirect - golang.org/x/sync v0.8.0 // indirect - golang.org/x/sys v0.24.0 - golang.org/x/text v0.17.0 + golang.org/x/sync v0.10.0 // indirect + golang.org/x/sys v0.28.0 + golang.org/x/text v0.21.0 golang.org/x/time v0.6.0 // indirect google.golang.org/grpc v1.67.1 google.golang.org/protobuf v1.34.2 @@ -218,7 +218,7 @@ require ( go.opentelemetry.io/otel/metric v1.28.0 // indirect go.uber.org/atomic v1.10.0 // indirect golang.org/x/arch v0.7.0 // indirect - golang.org/x/term v0.23.0 // indirect + golang.org/x/term v0.27.0 // indirect golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 // indirect google.golang.org/appengine v1.6.8 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20240814211410-ddb44dafa142 // indirect diff --git a/go.sum b/go.sum index 950f901839c..63b6394ef1c 100644 --- a/go.sum +++ b/go.sum @@ -794,8 +794,8 @@ golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97/go.mod h1:GvvjBRRGRdwPK5y golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.3.0/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4= golang.org/x/crypto v0.14.0/go.mod h1:MVFd36DqK4CsrnJYDkBA3VC4m2GkXAM0PvzMCn4JQf4= -golang.org/x/crypto v0.26.0 h1:RrRspgV4mU+YwB4FYnuBoKsUapNIL5cohGAmSH3azsw= -golang.org/x/crypto v0.26.0/go.mod h1:GY7jblb9wI+FOo5y8/S2oY4zWP07AkOJ4+jxCqdqn54= +golang.org/x/crypto v0.31.0 h1:ihbySMvVjLAeSH1IbfcRTkD/iNscyz8rGzjF/E5hV6U= +golang.org/x/crypto v0.31.0/go.mod h1:kDsLvtWBEx7MV9tJOj9bnXsPbxwJQ6csT/x4KIN4Ssk= golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= @@ -839,8 +839,8 @@ golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.8.0 h1:3NFvSEYkUoMifnESzZl15y791HH1qU2xm6eCJU5ZPXQ= -golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.10.0 h1:3NQrjDixjgGwUOCaF8w2+VYHv0Ve/vGYSbdkTa98gmQ= +golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -876,8 +876,8 @@ golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.24.0 h1:Twjiwq9dn6R1fQcyiK+wQyHWfaz/BJB+YIpzU/Cv3Xg= -golang.org/x/sys v0.24.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.28.0 h1:Fksou7UEQUWlKvIdsqzJmUmCX3cZuD2+P3XyyzwMhlA= +golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= @@ -885,8 +885,8 @@ golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= golang.org/x/term v0.13.0/go.mod h1:LTmsnFJwVN6bCy1rVCoS+qHT1HhALEFxKncY3WNNh4U= -golang.org/x/term v0.23.0 h1:F6D4vR+EHoL9/sWAWgAR1H2DcHr4PareCbAaCo1RpuU= -golang.org/x/term v0.23.0/go.mod h1:DgV24QBUrK6jhZXl+20l6UWznPlwAHm1Q1mGHtydmSk= +golang.org/x/term v0.27.0 h1:WP60Sv1nlK1T6SupCHbXzSaN0b9wUmsPoRS9b61A23Q= +golang.org/x/term v0.27.0/go.mod h1:iMsnZpn0cago0GOrHO2+Y7u7JPn5AylBrcoWkElMTSM= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= @@ -899,8 +899,8 @@ golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= -golang.org/x/text v0.17.0 h1:XtiM5bkSOt+ewxlOE/aE/AKEHibwj/6gvWMl9Rsh0Qc= -golang.org/x/text v0.17.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= +golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo= +golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ= golang.org/x/time v0.6.0 h1:eTDhh4ZXt5Qf0augr54TN6suAUudPcawVZeIAPU7D4U= golang.org/x/time v0.6.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= From 866b0ad60c97c515c069991d5e31a320a047525e Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Tue, 7 Jan 2025 16:39:32 +0100 Subject: [PATCH 384/581] Hubops tests (#3393) --- cmd/crowdsec-cli/require/require.go | 3 +- pkg/cwhub/cwhub_test.go | 18 +- pkg/cwhub/doc.go | 1 - pkg/cwhub/download.go | 6 +- pkg/cwhub/download_test.go | 150 ++++++++++++++-- pkg/cwhub/fetch.go | 8 +- pkg/cwhub/hub.go | 18 +- pkg/cwhub/hub_test.go | 260 +++++++++++++++++++++++----- pkg/cwhub/item.go | 106 +----------- pkg/cwhub/item_test.go | 25 +-- pkg/cwhub/itemupgrade_test.go | 3 - pkg/cwhub/state.go | 61 +++++++ pkg/cwhub/state_test.go | 76 ++++++++ pkg/cwhub/sync.go | 11 +- pkg/hubops/doc.go | 45 +++++ test/bats/20_hub.bats | 4 +- test/bats/20_hub_items.bats | 4 +- test/bats/hub-index.bats | 6 +- 18 files changed, 580 insertions(+), 225 deletions(-) create mode 100644 pkg/cwhub/state.go create mode 100644 pkg/cwhub/state_test.go create mode 100644 pkg/hubops/doc.go diff --git a/cmd/crowdsec-cli/require/require.go b/cmd/crowdsec-cli/require/require.go index a44e76ae47d..dd98cd092cb 100644 --- a/cmd/crowdsec-cli/require/require.go +++ b/cmd/crowdsec-cli/require/require.go @@ -89,7 +89,6 @@ func HubDownloader(ctx context.Context, c *csconfig.Config) *cwhub.Downloader { remote := &cwhub.Downloader{ Branch: branch, URLTemplate: urlTemplate, - IndexPath: ".index.json", } return remote @@ -115,7 +114,7 @@ func Hub(c *csconfig.Config, logger *logrus.Logger) (*cwhub.Hub, error) { } if err := hub.Load(); err != nil { - return nil, fmt.Errorf("failed to read hub index: %w. Run 'sudo cscli hub update' to download the index again", err) + return nil, err } return hub, nil diff --git a/pkg/cwhub/cwhub_test.go b/pkg/cwhub/cwhub_test.go index 94a1d6ef6fd..befd279ff65 100644 --- a/pkg/cwhub/cwhub_test.go +++ b/pkg/cwhub/cwhub_test.go @@ -29,10 +29,9 @@ const mockURLTemplate = "https://cdn-hub.crowdsec.net/crowdsecurity/%s/%s" var responseByPath map[string]string -// testHub initializes a temporary hub with an empty json file, optionally updating it. -func testHub(t *testing.T, update bool) *Hub { - tmpDir, err := os.MkdirTemp("", "testhub") - require.NoError(t, err) +// testHubOld initializes a temporary hub with an empty json file, optionally updating it. +func testHubOld(t *testing.T, update bool) *Hub { + tmpDir := t.TempDir() local := &csconfig.LocalHubCfg{ HubDir: filepath.Join(tmpDir, "crowdsec", "hub"), @@ -41,7 +40,7 @@ func testHub(t *testing.T, update bool) *Hub { InstallDataDir: filepath.Join(tmpDir, "installed-data"), } - err = os.MkdirAll(local.HubDir, 0o700) + err := os.MkdirAll(local.HubDir, 0o700) require.NoError(t, err) err = os.MkdirAll(local.InstallDir, 0o700) @@ -53,10 +52,6 @@ func testHub(t *testing.T, update bool) *Hub { err = os.WriteFile(local.HubIndexFile, []byte("{}"), 0o644) require.NoError(t, err) - t.Cleanup(func() { - os.RemoveAll(tmpDir) - }) - hub, err := NewHub(local, log.StandardLogger()) require.NoError(t, err) @@ -64,11 +59,10 @@ func testHub(t *testing.T, update bool) *Hub { indexProvider := &Downloader{ Branch: "master", URLTemplate: mockURLTemplate, - IndexPath: ".index.json", } ctx := context.Background() - err := hub.Update(ctx, indexProvider, false) + err = hub.Update(ctx, indexProvider, false) require.NoError(t, err) } @@ -92,7 +86,7 @@ func envSetup(t *testing.T) *Hub { // Mock the http client HubClient.Transport = newMockTransport() - hub := testHub(t, true) + hub := testHubOld(t, true) return hub } diff --git a/pkg/cwhub/doc.go b/pkg/cwhub/doc.go index b85d7634da4..fb7209b77ae 100644 --- a/pkg/cwhub/doc.go +++ b/pkg/cwhub/doc.go @@ -90,7 +90,6 @@ // indexProvider := cwhub.Downloader{ // URLTemplate: "https://cdn-hub.crowdsec.net/crowdsecurity/%s/%s", // Branch: "master", -// IndexPath: ".index.json", // } // // The URLTemplate is a string that will be used to build the URL of the remote hub. It must contain two diff --git a/pkg/cwhub/download.go b/pkg/cwhub/download.go index 48cb2382668..fa92e9960de 100644 --- a/pkg/cwhub/download.go +++ b/pkg/cwhub/download.go @@ -12,11 +12,13 @@ import ( "github.com/crowdsecurity/go-cs-lib/downloader" ) +// no need to import the lib package to use this +type NotFoundError = downloader.NotFoundError + // Downloader is used to retrieve index and items from a remote hub, with cache control. type Downloader struct { Branch string URLTemplate string - IndexPath string } // IndexProvider retrieves and writes .index.json @@ -61,7 +63,7 @@ func addURLParam(rawURL string, param string, value string) (string, error) { // It uses a temporary file to avoid partial downloads, and won't overwrite the original // if it has not changed. func (d *Downloader) FetchIndex(ctx context.Context, destPath string, withContent bool, logger *logrus.Logger) (bool, error) { - url, err := d.urlTo(d.IndexPath) + url, err := d.urlTo(".index.json") if err != nil { return false, fmt.Errorf("failed to build hub index request: %w", err) } diff --git a/pkg/cwhub/download_test.go b/pkg/cwhub/download_test.go index fc0b257a284..7b0b99c28d8 100644 --- a/pkg/cwhub/download_test.go +++ b/pkg/cwhub/download_test.go @@ -10,43 +10,173 @@ import ( "testing" "github.com/sirupsen/logrus" + logtest "github.com/sirupsen/logrus/hooks/test" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + + "github.com/crowdsecurity/go-cs-lib/cstest" ) func TestFetchIndex(t *testing.T) { ctx := context.Background() mockServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if r.URL.Path != "/main/.index.json" { + w.WriteHeader(http.StatusNotFound) + } + if r.URL.Query().Get("with_content") == "true" { - w.WriteHeader(http.StatusOK) _, err := w.Write([]byte(`Hi I'm an index with content`)) assert.NoError(t, err) } else { - w.WriteHeader(http.StatusOK) - _, err := w.Write([]byte(`Hi I'm a regular index`)) + _, err := w.Write([]byte(`Hi I'm a minified index`)) assert.NoError(t, err) } })) defer mockServer.Close() + discard := logrus.New() + discard.Out = io.Discard + downloader := &Downloader{ - Branch: "main", URLTemplate: mockServer.URL + "/%s/%s", - IndexPath: "index.txt", } - logger := logrus.New() - logger.Out = io.Discard - - destPath := filepath.Join(t.TempDir(), "index.txt") + destPath := filepath.Join(t.TempDir(), "index-here") withContent := true - downloaded, err := downloader.FetchIndex(ctx, destPath, withContent, logger) + var notFoundError NotFoundError + + // bad branch + + downloader.Branch = "dev" + + downloaded, err := downloader.FetchIndex(ctx, destPath, withContent, discard) + require.ErrorAs(t, err, ¬FoundError) + assert.False(t, downloaded) + + // ok + + downloader.Branch = "main" + + downloaded, err = downloader.FetchIndex(ctx, destPath, withContent, discard) require.NoError(t, err) assert.True(t, downloaded) content, err := os.ReadFile(destPath) require.NoError(t, err) assert.Equal(t, "Hi I'm an index with content", string(content)) + + // not "downloading" a second time + // since we don't have cache control in the mockServer, + // the file is downloaded to a temporary location but not replaced + + downloaded, err = downloader.FetchIndex(ctx, destPath, withContent, discard) + require.NoError(t, err) + assert.False(t, downloaded) + + // download without item content + + downloaded, err = downloader.FetchIndex(ctx, destPath, !withContent, discard) + require.NoError(t, err) + assert.True(t, downloaded) + + content, err = os.ReadFile(destPath) + require.NoError(t, err) + assert.Equal(t, "Hi I'm a minified index", string(content)) + + // bad domain name + + downloader.URLTemplate = "x/%s/%s" + downloaded, err = downloader.FetchIndex(ctx, destPath, !withContent, discard) + cstest.AssertErrorContains(t, err, `Get "x/main/.index.json": unsupported protocol scheme ""`) + assert.False(t, downloaded) + + downloader.URLTemplate = "http://x/%s/%s" + downloaded, err = downloader.FetchIndex(ctx, destPath, !withContent, discard) + // can be no such host, server misbehaving, etc + cstest.AssertErrorContains(t, err, `Get "http://x/main/.index.json": dial tcp: lookup x`) + assert.False(t, downloaded) +} + +func TestFetchContent(t *testing.T) { + ctx := context.Background() + + wantContent := "{'description':'linux'}" + wantHash := "e557cb9e1cb051bc3b6a695e4396c5f8e0eff4b7b0d2cc09f7684e1d52ea2224" + remotePath := "collections/crowdsecurity/linux.yaml" + + mockServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if r.URL.Path != "/main/"+remotePath { + w.WriteHeader(http.StatusNotFound) + } + + _, err := w.Write([]byte(wantContent)) + assert.NoError(t, err) + })) + defer mockServer.Close() + + wantURL := mockServer.URL + "/main/collections/crowdsecurity/linux.yaml" + + // bad branch + + hubDownloader := &Downloader{ + URLTemplate: mockServer.URL + "/%s/%s", + } + + discard := logrus.New() + discard.Out = io.Discard + + destPath := filepath.Join(t.TempDir(), "content-here") + + var notFoundError NotFoundError + + // bad branch + + hubDownloader.Branch = "dev" + + downloaded, url, err := hubDownloader.FetchContent(ctx, remotePath, destPath, wantHash, discard) + assert.Empty(t, url) + require.ErrorAs(t, err, ¬FoundError) + assert.False(t, downloaded) + + // bad path + + hubDownloader.Branch = "main" + + downloaded, url, err = hubDownloader.FetchContent(ctx, "collections/linux.yaml", destPath, wantHash, discard) + assert.Empty(t, url) + require.ErrorAs(t, err, ¬FoundError) + assert.False(t, downloaded) + + // hash mismatch: the file is not reported as downloaded because it's not replaced + + capture, hook := logtest.NewNullLogger() + capture.SetLevel(logrus.WarnLevel) + + downloaded, url, err = hubDownloader.FetchContent(ctx, remotePath, destPath, "1234", capture) + assert.Equal(t, wantURL, url) + require.NoError(t, err) + assert.False(t, downloaded) + cstest.RequireLogContains(t, hook, "hash mismatch: expected 1234, got "+wantHash) + + // ok + + downloaded, url, err = hubDownloader.FetchContent(ctx, remotePath, destPath, wantHash, discard) + assert.Equal(t, wantURL, url) + require.NoError(t, err) + assert.True(t, downloaded) + + content, err := os.ReadFile(destPath) + require.NoError(t, err) + assert.Equal(t, wantContent, string(content)) + + // not "downloading" a second time + // since we don't have cache control in the mockServer, + // the file is downloaded to a temporary location but not replaced + + downloaded, url, err = hubDownloader.FetchContent(ctx, remotePath, destPath, wantHash, discard) + assert.Equal(t, wantURL, url) + require.NoError(t, err) + assert.False(t, downloaded) } diff --git a/pkg/cwhub/fetch.go b/pkg/cwhub/fetch.go index dd1a520d7e2..e8dacad4a6d 100644 --- a/pkg/cwhub/fetch.go +++ b/pkg/cwhub/fetch.go @@ -11,8 +11,8 @@ import ( ) // writeEmbeddedContentTo writes the embedded content to the specified path and checks the hash. -// If the content is base64 encoded, it will be decoded before writing. Check for item.Content -// before calling this method. +// If the content is base64 encoded, it will be decoded before writing. Call this method only +// if item.Content if not empty. func (i *Item) writeEmbeddedContentTo(destPath, wantHash string) error { if i.Content == "" { return fmt.Errorf("no embedded content for %s", i.Name) @@ -48,7 +48,9 @@ func (i *Item) writeEmbeddedContentTo(destPath, wantHash string) error { } // FetchContentTo writes the last version of the item's YAML file to the specified path. -// Returns whether the file was downloaded, and the remote url for feedback purposes. +// If the file is embedded in the index file, it will be written directly without downloads. +// Returns whether the file was downloaded (to inform if the security engine needs reloading) +// and the remote url for feedback purposes. func (i *Item) FetchContentTo(ctx context.Context, contentProvider ContentProvider, destPath string) (bool, string, error) { wantHash := i.latestHash() if wantHash == "" { diff --git a/pkg/cwhub/hub.go b/pkg/cwhub/hub.go index 3722ceaafcd..998a4032359 100644 --- a/pkg/cwhub/hub.go +++ b/pkg/cwhub/hub.go @@ -36,7 +36,7 @@ func (h *Hub) GetDataDir() string { // and check for unmanaged items. func NewHub(local *csconfig.LocalHubCfg, logger *logrus.Logger) (*Hub, error) { if local == nil { - return nil, errors.New("no hub configuration found") + return nil, errors.New("no hub configuration provided") } if logger == nil { @@ -58,14 +58,10 @@ func (h *Hub) Load() error { h.logger.Debugf("loading hub idx %s", h.local.HubIndexFile) if err := h.parseIndex(); err != nil { - return err - } - - if err := h.localSync(); err != nil { - return fmt.Errorf("failed to sync hub items: %w", err) + return fmt.Errorf("invalid hub index: %w. Run 'sudo cscli hub update' to download the index again", err) } - return nil + return h.localSync() } // parseIndex takes the content of an index file and fills the map of associated parsers/scenarios/collections. @@ -153,12 +149,14 @@ func (h *Hub) ItemStats() []string { return ret } +var ErrUpdateAfterSync = errors.New("cannot update hub index after load/sync") + // Update downloads the latest version of the index and writes it to disk if it changed. -// It cannot be called after Load() unless the hub is completely empty. +// It cannot be called after Load() unless the index was completely empty. func (h *Hub) Update(ctx context.Context, indexProvider IndexProvider, withContent bool) error { - if len(h.pathIndex) > 0 { + if len(h.items) > 0 { // if this happens, it's a bug. - return errors.New("cannot update hub after items have been loaded") + return ErrUpdateAfterSync } downloaded, err := indexProvider.FetchIndex(ctx, h.local.HubIndexFile, withContent, h.logger) diff --git a/pkg/cwhub/hub_test.go b/pkg/cwhub/hub_test.go index c2b949b7cdf..461b59de78b 100644 --- a/pkg/cwhub/hub_test.go +++ b/pkg/cwhub/hub_test.go @@ -2,91 +2,261 @@ package cwhub import ( "context" - "fmt" + "net/http" + "net/http/httptest" "os" + "path/filepath" "testing" + "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "github.com/crowdsecurity/go-cs-lib/cstest" + + "github.com/crowdsecurity/crowdsec/pkg/csconfig" ) -func TestInitHubUpdate(t *testing.T) { - hub := envSetup(t) +// testHubCfg creates an empty hub structure in a temporary directory +// and returns its configuration object. +// +// This allow the reuse of the hub content for multiple instances +// of the Hub object. +func testHubCfg(t *testing.T) *csconfig.LocalHubCfg { + tempDir := t.TempDir() + + local := csconfig.LocalHubCfg{ + HubDir: filepath.Join(tempDir, "crowdsec", "hub"), + HubIndexFile: filepath.Join(tempDir, "crowdsec", "hub", ".index.json"), + InstallDir: filepath.Join(tempDir, "crowdsec"), + InstallDataDir: filepath.Join(tempDir, "installed-data"), + } + + err := os.MkdirAll(local.HubDir, 0o755) + require.NoError(t, err) - _, err := NewHub(hub.local, nil) + err = os.MkdirAll(local.InstallDir, 0o755) require.NoError(t, err) - ctx := context.Background() + err = os.MkdirAll(local.InstallDataDir, 0o755) + require.NoError(t, err) + + return &local +} - indexProvider := &Downloader{ - URLTemplate: mockURLTemplate, - Branch: "master", - IndexPath: ".index.json", +func testHub(t *testing.T, localCfg *csconfig.LocalHubCfg, indexJson string) (*Hub, error) { + if localCfg == nil { + localCfg = testHubCfg(t) } - err = hub.Update(ctx, indexProvider, false) + err := os.WriteFile(localCfg.HubIndexFile, []byte(indexJson), 0o644) require.NoError(t, err) + hub, err := NewHub(localCfg, nil) + require.NoError(t, err) err = hub.Load() + + return hub, err +} + +func TestIndexEmpty(t *testing.T) { + // an empty hub is valid, and should not have warnings + hub, err := testHub(t, nil, "{}") require.NoError(t, err) + assert.Empty(t, hub.Warnings) } -func TestUpdateIndex(t *testing.T) { - // bad url template - fmt.Println("Test 'bad URL'") +func TestIndexJSON(t *testing.T) { + // but it can't be an empty string + hub, err := testHub(t, nil, "") + cstest.RequireErrorContains(t, err, "invalid hub index: failed to parse index: unexpected end of JSON input") + assert.Empty(t, hub.Warnings) + + // it must be valid json + hub, err = testHub(t, nil, "def not json") + cstest.RequireErrorContains(t, err, "invalid hub index: failed to parse index: invalid character 'd' looking for beginning of value. Run 'sudo cscli hub update' to download the index again") + assert.Empty(t, hub.Warnings) + + hub, err = testHub(t, nil, "{") + cstest.RequireErrorContains(t, err, "invalid hub index: failed to parse index: unexpected end of JSON input") + assert.Empty(t, hub.Warnings) - tmpIndex, err := os.CreateTemp("", "index.json") + // and by json we mean an object + hub, err = testHub(t, nil, "[]") + cstest.RequireErrorContains(t, err, "invalid hub index: failed to parse index: json: cannot unmarshal array into Go value of type cwhub.HubItems") + assert.Empty(t, hub.Warnings) +} + +func TestIndexUnknownItemType(t *testing.T) { + // Allow unknown fields in the top level object, likely new item types + hub, err := testHub(t, nil, `{"goodies": {}}`) require.NoError(t, err) + assert.Empty(t, hub.Warnings) +} - // close the file to avoid preventing the rename on windows - err = tmpIndex.Close() +func TestHubUpdate(t *testing.T) { + // update an empty hub with a index containing a parser. + hub, err := testHub(t, nil, "{}") require.NoError(t, err) - t.Cleanup(func() { - os.Remove(tmpIndex.Name()) - }) + index1 := ` +{ + "parsers": { + "author/pars1": { + "path": "parsers/s01-parse/pars1.yaml", + "stage": "s01-parse", + "version": "0.0", + "versions": { + "0.0": { + "digest": "44136fa355b3678a1146ad16f7e8649e94fb4fc21fe77e8310c060f61caaff8a" + } + }, + "content": "{}" + } + } +}` - hub := envSetup(t) + mockServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if r.URL.Path != "/main/.index.json" { + w.WriteHeader(http.StatusNotFound) + } - hub.local.HubIndexFile = tmpIndex.Name() + _, err = w.Write([]byte(index1)) + assert.NoError(t, err) + })) + defer mockServer.Close() ctx := context.Background() - indexProvider := &Downloader{ - URLTemplate: "x", - Branch: "", - IndexPath: "", + downloader := &Downloader{ + Branch: "main", + URLTemplate: mockServer.URL + "/%s/%s", } - err = hub.Update(ctx, indexProvider, false) - cstest.RequireErrorContains(t, err, "failed to build hub index request: invalid URL template 'x'") + err = hub.Update(ctx, downloader, true) + require.NoError(t, err) + + err = hub.Load() + require.NoError(t, err) + + item := hub.GetItem("parsers", "author/pars1") + assert.NotEmpty(t, item) + assert.Equal(t, "author/pars1", item.Name) +} + +func TestHubUpdateInvalidTemplate(t *testing.T) { + hub, err := testHub(t, nil, "{}") + require.NoError(t, err) - // bad domain - fmt.Println("Test 'bad domain'") + ctx := context.Background() - indexProvider = &Downloader{ - URLTemplate: "https://baddomain/crowdsecurity/%s/%s", - Branch: "master", - IndexPath: ".index.json", + downloader := &Downloader{ + Branch: "main", + URLTemplate: "x", } - err = hub.Update(ctx, indexProvider, false) + err = hub.Update(ctx, downloader, true) + cstest.RequireErrorMessage(t, err, "failed to build hub index request: invalid URL template 'x'") +} + +func TestHubUpdateCannotWrite(t *testing.T) { + hub, err := testHub(t, nil, "{}") require.NoError(t, err) - // XXX: this is not failing - // cstest.RequireErrorContains(t, err, "failed http request for hub index: Get") - // bad target path - fmt.Println("Test 'bad target path'") + index1 := ` +{ + "parsers": { + "author/pars1": { + "path": "parsers/s01-parse/pars1.yaml", + "stage": "s01-parse", + "version": "0.0", + "versions": { + "0.0": { + "digest": "44136fa355b3678a1146ad16f7e8649e94fb4fc21fe77e8310c060f61caaff8a" + } + }, + "content": "{}" + } + } +}` + + mockServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if r.URL.Path != "/main/.index.json" { + w.WriteHeader(http.StatusNotFound) + } + + _, err = w.Write([]byte(index1)) + assert.NoError(t, err) + })) + defer mockServer.Close() + + ctx := context.Background() - indexProvider = &Downloader{ - URLTemplate: mockURLTemplate, - Branch: "master", - IndexPath: ".index.json", + downloader := &Downloader{ + Branch: "main", + URLTemplate: mockServer.URL + "/%s/%s", } - hub.local.HubIndexFile = "/does/not/exist/index.json" + hub.local.HubIndexFile = "/proc/foo/bar/baz/.index.json" + + err = hub.Update(ctx, downloader, true) + cstest.RequireErrorContains(t, err, "failed to create temporary download file for /proc/foo/bar/baz/.index.json") +} + +func TestHubUpdateAfterLoad(t *testing.T) { + // Update() can't be called after Load() if the hub is not completely empty. + index1 := ` +{ + "parsers": { + "author/pars1": { + "path": "parsers/s01-parse/pars1.yaml", + "stage": "s01-parse", + "version": "0.0", + "versions": { + "0.0": { + "digest": "44136fa355b3678a1146ad16f7e8649e94fb4fc21fe77e8310c060f61caaff8a" + } + }, + "content": "{}" + } + } +}` + hub, err := testHub(t, nil, index1) + require.NoError(t, err) + + index2 := ` +{ + "parsers": { + "author/pars2": { + "path": "parsers/s01-parse/pars2.yaml", + "stage": "s01-parse", + "version": "0.0", + "versions": { + "0.0": { + "digest": "44136fa355b3678a1146ad16f7e8649e94fb4fc21fe77e8310c060f61caaff8a" + } + }, + "content": "{}" + } + } +}` + + mockServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if r.URL.Path != "/main/.index.json" { + w.WriteHeader(http.StatusNotFound) + } + + _, err = w.Write([]byte(index2)) + assert.NoError(t, err) + })) + defer mockServer.Close() + + ctx := context.Background() + + downloader := &Downloader{ + Branch: "main", + URLTemplate: mockServer.URL + "/%s/%s", + } - err = hub.Update(ctx, indexProvider, false) - cstest.RequireErrorContains(t, err, "failed to create temporary download file for /does/not/exist/index.json:") + err = hub.Update(ctx, downloader, true) + require.ErrorIs(t, err, ErrUpdateAfterSync) } diff --git a/pkg/cwhub/item.go b/pkg/cwhub/item.go index 74b1cfa3ebe..38385d9399d 100644 --- a/pkg/cwhub/item.go +++ b/pkg/cwhub/item.go @@ -11,8 +11,6 @@ import ( "github.com/Masterminds/semver/v3" yaml "gopkg.in/yaml.v3" - - "github.com/crowdsecurity/crowdsec/pkg/emoji" ) const ( @@ -46,62 +44,6 @@ type ItemVersion struct { Deprecated bool `json:"deprecated,omitempty" yaml:"deprecated,omitempty"` } -// ItemState is used to keep the local state (i.e. at runtime) of an item. -// This data is not stored in the index, but is displayed with "cscli ... inspect". -type ItemState struct { - LocalPath string `json:"local_path,omitempty" yaml:"local_path,omitempty"` - LocalVersion string `json:"local_version,omitempty" yaml:"local_version,omitempty"` - LocalHash string `json:"local_hash,omitempty" yaml:"local_hash,omitempty"` - Installed bool `json:"installed"` - Downloaded bool `json:"downloaded"` - UpToDate bool `json:"up_to_date"` - Tainted bool `json:"tainted"` - TaintedBy []string `json:"tainted_by,omitempty" yaml:"tainted_by,omitempty"` - BelongsToCollections []string `json:"belongs_to_collections,omitempty" yaml:"belongs_to_collections,omitempty"` -} - -// IsLocal returns true if the item has been create by a user (not downloaded from the hub). -func (s *ItemState) IsLocal() bool { - return s.Installed && !s.Downloaded -} - -// Text returns the status of the item as a string (eg. "enabled,update-available"). -func (s *ItemState) Text() string { - ret := "disabled" - - if s.Installed { - ret = "enabled" - } - - if s.IsLocal() { - ret += ",local" - } - - if s.Tainted { - ret += ",tainted" - } else if !s.UpToDate && !s.IsLocal() { - ret += ",update-available" - } - - return ret -} - -// Emoji returns the status of the item as an emoji (eg. emoji.Warning). -func (s *ItemState) Emoji() string { - switch { - case s.IsLocal(): - return emoji.House - case !s.Installed: - return emoji.Prohibited - case s.Tainted || (!s.UpToDate && !s.IsLocal()): - return emoji.Warning - case s.Installed: - return emoji.CheckMark - default: - return emoji.QuestionMark - } -} - type Dependencies struct { Parsers []string `json:"parsers,omitempty" yaml:"parsers,omitempty"` PostOverflows []string `json:"postoverflows,omitempty" yaml:"postoverflows,omitempty"` @@ -292,49 +234,11 @@ func (i *Item) CurrentDependencies() Dependencies { } func (i *Item) logMissingSubItems() { - if !i.HasSubItems() { - return - } - - for _, subName := range i.Parsers { - if i.hub.GetItem(PARSERS, subName) == nil { - i.hub.logger.Errorf("can't find %s in %s, required by %s", subName, PARSERS, i.Name) - } - } - - for _, subName := range i.Scenarios { - if i.hub.GetItem(SCENARIOS, subName) == nil { - i.hub.logger.Errorf("can't find %s in %s, required by %s", subName, SCENARIOS, i.Name) - } - } - - for _, subName := range i.PostOverflows { - if i.hub.GetItem(POSTOVERFLOWS, subName) == nil { - i.hub.logger.Errorf("can't find %s in %s, required by %s", subName, POSTOVERFLOWS, i.Name) - } - } - - for _, subName := range i.Contexts { - if i.hub.GetItem(CONTEXTS, subName) == nil { - i.hub.logger.Errorf("can't find %s in %s, required by %s", subName, CONTEXTS, i.Name) - } - } - - for _, subName := range i.AppsecConfigs { - if i.hub.GetItem(APPSEC_CONFIGS, subName) == nil { - i.hub.logger.Errorf("can't find %s in %s, required by %s", subName, APPSEC_CONFIGS, i.Name) - } - } - - for _, subName := range i.AppsecRules { - if i.hub.GetItem(APPSEC_RULES, subName) == nil { - i.hub.logger.Errorf("can't find %s in %s, required by %s", subName, APPSEC_RULES, i.Name) - } - } - - for _, subName := range i.Collections { - if i.hub.GetItem(COLLECTIONS, subName) == nil { - i.hub.logger.Errorf("can't find %s in %s, required by %s", subName, COLLECTIONS, i.Name) + for _, sub := range i.CurrentDependencies().byType() { + for _, subName := range sub.itemNames { + if i.hub.GetItem(sub.typeName, subName) == nil { + i.hub.logger.Errorf("can't find %s:%s, required by %s", sub.typeName, subName, i.Name) + } } } } diff --git a/pkg/cwhub/item_test.go b/pkg/cwhub/item_test.go index 703bbb5cb90..350861ff85e 100644 --- a/pkg/cwhub/item_test.go +++ b/pkg/cwhub/item_test.go @@ -6,39 +6,16 @@ import ( "github.com/stretchr/testify/require" ) -func TestItemStatus(t *testing.T) { +func TestItemStats(t *testing.T) { hub := envSetup(t) // get existing map x := hub.GetItemMap(COLLECTIONS) require.NotEmpty(t, x) - // Get item: good and bad - for k := range x { - item := hub.GetItem(COLLECTIONS, k) - require.NotNil(t, item) - - item.State.Installed = true - item.State.UpToDate = false - item.State.Tainted = false - item.State.Downloaded = true - - txt := item.State.Text() - require.Equal(t, "enabled,update-available", txt) - - item.State.Installed = true - item.State.UpToDate = false - item.State.Tainted = false - item.State.Downloaded = false - - txt = item.State.Text() - require.Equal(t, "enabled,local", txt) - } - stats := hub.ItemStats() require.Equal(t, []string{ "Loaded: 2 parsers, 1 scenarios, 3 collections", - "Unmanaged items: 3 local, 0 tainted", }, stats) } diff --git a/pkg/cwhub/itemupgrade_test.go b/pkg/cwhub/itemupgrade_test.go index da02837e972..3225d2f013b 100644 --- a/pkg/cwhub/itemupgrade_test.go +++ b/pkg/cwhub/itemupgrade_test.go @@ -41,7 +41,6 @@ func TestUpgradeItemNewScenarioInCollection(t *testing.T) { remote := &Downloader{ URLTemplate: mockURLTemplate, Branch: "master", - IndexPath: ".index.json", } hub, err := NewHub(hub.local, remote, nil) @@ -101,7 +100,6 @@ func TestUpgradeItemInDisabledScenarioShouldNotBeInstalled(t *testing.T) { remote := &Downloader{ URLTemplate: mockURLTemplate, Branch: "master", - IndexPath: ".index.json", } hub = getHubOrFail(t, hub.local, remote) @@ -173,7 +171,6 @@ func TestUpgradeItemNewScenarioIsInstalledWhenReferencedScenarioIsDisabled(t *te remote := &Downloader{ URLTemplate: mockURLTemplate, Branch: "master", - IndexPath: ".index.json", } hub = getHubOrFail(t, hub.local, remote) diff --git a/pkg/cwhub/state.go b/pkg/cwhub/state.go new file mode 100644 index 00000000000..518185aff1c --- /dev/null +++ b/pkg/cwhub/state.go @@ -0,0 +1,61 @@ +package cwhub + +import ( + "github.com/crowdsecurity/crowdsec/pkg/emoji" +) + +// ItemState is used to keep the local state (i.e. at runtime) of an item. +// This data is not stored in the index, but is displayed with "cscli ... inspect". +type ItemState struct { + LocalPath string `json:"local_path,omitempty" yaml:"local_path,omitempty"` + LocalVersion string `json:"local_version,omitempty" yaml:"local_version,omitempty"` + LocalHash string `json:"local_hash,omitempty" yaml:"local_hash,omitempty"` + Installed bool `json:"installed"` + Downloaded bool `json:"downloaded"` + UpToDate bool `json:"up_to_date"` + Tainted bool `json:"tainted"` + TaintedBy []string `json:"tainted_by,omitempty" yaml:"tainted_by,omitempty"` + BelongsToCollections []string `json:"belongs_to_collections,omitempty" yaml:"belongs_to_collections,omitempty"` +} + +// IsLocal returns true if the item has been create by a user (not downloaded from the hub). +func (s *ItemState) IsLocal() bool { + return s.Installed && !s.Downloaded +} + +// Text returns the status of the item as a string (eg. "enabled,update-available"). +func (s *ItemState) Text() string { + ret := "disabled" + + if s.Installed { + ret = "enabled" + } + + if s.IsLocal() { + ret += ",local" + } + + if s.Tainted { + ret += ",tainted" + } else if !s.UpToDate && !s.IsLocal() { + ret += ",update-available" + } + + return ret +} + +// Emoji returns the status of the item as an emoji (eg. emoji.Warning). +func (s *ItemState) Emoji() string { + switch { + case s.IsLocal(): + return emoji.House + case !s.Installed: + return emoji.Prohibited + case s.Tainted || (!s.UpToDate && !s.IsLocal()): + return emoji.Warning + case s.Installed: + return emoji.CheckMark + default: + return emoji.QuestionMark + } +} diff --git a/pkg/cwhub/state_test.go b/pkg/cwhub/state_test.go new file mode 100644 index 00000000000..3ed3de16fcc --- /dev/null +++ b/pkg/cwhub/state_test.go @@ -0,0 +1,76 @@ +package cwhub + +import ( + "strconv" + "testing" + + "github.com/stretchr/testify/assert" + + "github.com/crowdsecurity/crowdsec/pkg/emoji" +) + +func TestItemStateText(t *testing.T) { + // Test the text representation of an item state. + type test struct { + state ItemState + want string + wantIcon string + } + + tests := []test{ + { + ItemState{ + Installed: true, + UpToDate: false, + Tainted: false, + Downloaded: true, + }, + "enabled,update-available", + emoji.Warning, + }, { + ItemState{ + Installed: true, + UpToDate: true, + Tainted: false, + Downloaded: true, + }, + "enabled", + emoji.CheckMark, + }, { + ItemState{ + Installed: true, + UpToDate: false, + Tainted: false, + Downloaded: false, + }, + "enabled,local", + emoji.House, + }, { + ItemState{ + Installed: false, + UpToDate: false, + Tainted: false, + Downloaded: true, + }, + "disabled,update-available", + emoji.Prohibited, + }, { + ItemState{ + Installed: true, + UpToDate: false, + Tainted: true, + Downloaded: true, + }, + "enabled,tainted", + emoji.Warning, + }, + } + + for idx, tc := range tests { + t.Run("Test "+strconv.Itoa(idx), func(t *testing.T) { + got := tc.state.Text() + assert.Equal(t, tc.want, got) + assert.Equal(t, tc.wantIcon, tc.state.Emoji()) + }) + } +} diff --git a/pkg/cwhub/sync.go b/pkg/cwhub/sync.go index d2b59df35d6..ee8e49f2bf0 100644 --- a/pkg/cwhub/sync.go +++ b/pkg/cwhub/sync.go @@ -108,6 +108,7 @@ func (h *Hub) getItemFileInfo(path string, logger *logrus.Logger) (*itemFileInfo if len(subsHub) < 4 { return nil, fmt.Errorf("path is too short: %s (%d)", path, len(subsHub)) } + stage = subsHub[1] fauthor = subsHub[2] fname = subsHub[3] @@ -167,6 +168,7 @@ func sortedVersions(raw []string) ([]string, error) { for idx, r := range raw { v, err := semver.NewVersion(r) if err != nil { + // TODO: should catch this during index parsing return nil, fmt.Errorf("%s: %w", r, err) } @@ -461,13 +463,12 @@ func removeDuplicates(sl []string) []string { // localSync updates the hub state with downloaded, installed and local items. func (h *Hub) localSync() error { - err := h.syncDir(h.local.InstallDir) - if err != nil { - return fmt.Errorf("failed to scan %s: %w", h.local.InstallDir, err) + if err := h.syncDir(h.local.InstallDir); err != nil { + return fmt.Errorf("failed to sync %s: %w", h.local.InstallDir, err) } - if err = h.syncDir(h.local.HubDir); err != nil { - return fmt.Errorf("failed to scan %s: %w", h.local.HubDir, err) + if err := h.syncDir(h.local.HubDir); err != nil { + return fmt.Errorf("failed to sync %s: %w", h.local.HubDir, err) } warnings := make([]string, 0) diff --git a/pkg/hubops/doc.go b/pkg/hubops/doc.go new file mode 100644 index 00000000000..b87a42653bc --- /dev/null +++ b/pkg/hubops/doc.go @@ -0,0 +1,45 @@ +/* +Package hubops is responsible for managing the local hub (items and data files) for CrowdSec. + +The index file itself (.index.json) is still managed by pkg/cwhub, which also provides the Hub +and Item structs. + +The hubops package is mostly used by cscli for the "cscli install/remove/upgrade ..." commands. + +It adopts a command-based pattern: a Plan contains a sequence of Commands. Both Plan and Command +have separate preparation and execution methods. + + - Command Interface: + The Command interface defines the contract for all operations that can be + performed on hub items. Each operation implements the Prepare and Run + methods, allowing for pre-execution setup and actual execution logic. + + - ActionPlan: + ActionPlan serves as a container for a sequence of Commands. It manages the + addition of commands, handles dependencies between them, and orchestrates their + execution. ActionPlan also provides a mechanism for interactive confirmation and dry-run. + +To perform operations on hub items, create an ActionPlan and add the desired +Commands to it. Once all commands are added, execute the ActionPlan to perform +the operations in the correct order, handling dependencies and user confirmations. + +Example: + + hub := cwhub.NewHub(...) + plan := hubops.NewActionPlan(hub) + + downloadCmd := hubops.NewDownloadCommand(item, force) + if err := plan.AddCommand(downloadCmd); err != nil { + logrus.Fatalf("Failed to add download command: %v", err) + } + + enableCmd := hubops.NewEnableCommand(item, force) + if err := plan.AddCommand(enableCmd); err != nil { + logrus.Fatalf("Failed to add enable command: %v", err) + } + + if err := plan.Execute(ctx, confirm, dryRun, verbose); err != nil { + logrus.Fatalf("Failed to execute action plan: %v", err) + } +*/ +package hubops diff --git a/test/bats/20_hub.bats b/test/bats/20_hub.bats index 03723ecc82b..b03b58732fa 100644 --- a/test/bats/20_hub.bats +++ b/test/bats/20_hub.bats @@ -82,8 +82,8 @@ teardown() { new_hub=$(jq <"$INDEX_PATH" 'del(.parsers."crowdsecurity/smb-logs") | del (.scenarios."crowdsecurity/mysql-bf")') echo "$new_hub" >"$INDEX_PATH" rune -0 cscli hub list --error - assert_stderr --partial "can't find crowdsecurity/smb-logs in parsers, required by crowdsecurity/smb" - assert_stderr --partial "can't find crowdsecurity/mysql-bf in scenarios, required by crowdsecurity/mysql" + assert_stderr --partial "can't find parsers:crowdsecurity/smb-logs, required by crowdsecurity/smb" + assert_stderr --partial "can't find scenarios:crowdsecurity/mysql-bf, required by crowdsecurity/mysql" } @test "loading hub reports tainted items (subitem is tainted)" { diff --git a/test/bats/20_hub_items.bats b/test/bats/20_hub_items.bats index 2f1c952848b..8ebe505c6e1 100644 --- a/test/bats/20_hub_items.bats +++ b/test/bats/20_hub_items.bats @@ -80,8 +80,8 @@ teardown() { rune -0 cscli collections install crowdsecurity/sshd rune -1 cscli collections inspect crowdsecurity/sshd --no-metrics - # XXX: we are on the verbose side here... - assert_stderr "Error: failed to read hub index: failed to sync hub items: failed to scan $CONFIG_DIR: while syncing collections sshd.yaml: 1.2.3.4: Invalid Semantic Version. Run 'sudo cscli hub update' to download the index again" + # XXX: this must be triggered during parse, not sync + assert_stderr "Error: failed to sync $CONFIG_DIR: while syncing collections sshd.yaml: 1.2.3.4: Invalid Semantic Version" } @test "removing or purging an item already removed by hand" { diff --git a/test/bats/hub-index.bats b/test/bats/hub-index.bats index 76759991e4a..a609974d67a 100644 --- a/test/bats/hub-index.bats +++ b/test/bats/hub-index.bats @@ -32,7 +32,7 @@ teardown() { EOF rune -1 cscli hub list - assert_stderr --partial "failed to read hub index: parsers:author/pars1 has no index metadata." + assert_stderr --partial "invalid hub index: parsers:author/pars1 has no index metadata." } @test "malformed index - no download path" { @@ -46,7 +46,7 @@ teardown() { EOF rune -1 cscli hub list - assert_stderr --partial "failed to read hub index: parsers:author/pars1 has no download path." + assert_stderr --partial "invalid hub index: parsers:author/pars1 has no download path." } @test "malformed parser - no stage" { @@ -63,7 +63,7 @@ teardown() { EOF rune -1 cscli hub list -o raw - assert_stderr --partial "failed to read hub index: parsers:author/pars1 has no stage." + assert_stderr --partial "invalid hub index: parsers:author/pars1 has no stage." } @test "malformed parser - short path" { From 34c0e6a4bd2094d77f023d6e5b8a8c55d93a2ce8 Mon Sep 17 00:00:00 2001 From: blotus Date: Fri, 10 Jan 2025 14:57:05 +0100 Subject: [PATCH 385/581] expand env var when loading acquis (#3375) --- pkg/acquisition/acquisition.go | 12 +++++++++++- pkg/acquisition/acquisition_test.go | 15 +++++++++++++++ pkg/acquisition/test_files/env.yaml | 6 ++++++ 3 files changed, 32 insertions(+), 1 deletion(-) create mode 100644 pkg/acquisition/test_files/env.yaml diff --git a/pkg/acquisition/acquisition.go b/pkg/acquisition/acquisition.go index 291bc369c3e..06a4918592b 100644 --- a/pkg/acquisition/acquisition.go +++ b/pkg/acquisition/acquisition.go @@ -16,6 +16,7 @@ import ( tomb "gopkg.in/tomb.v2" "gopkg.in/yaml.v2" + "github.com/crowdsecurity/go-cs-lib/csstring" "github.com/crowdsecurity/go-cs-lib/trace" "github.com/crowdsecurity/crowdsec/pkg/acquisition/configuration" @@ -232,7 +233,16 @@ func LoadAcquisitionFromFile(config *csconfig.CrowdsecServiceCfg, prom *csconfig return nil, err } - dec := yaml.NewDecoder(yamlFile) + defer yamlFile.Close() + + acquisContent, err := io.ReadAll(yamlFile) + if err != nil { + return nil, fmt.Errorf("failed to read %s: %w", acquisFile, err) + } + + expandedAcquis := csstring.StrictExpand(string(acquisContent), os.LookupEnv) + + dec := yaml.NewDecoder(strings.NewReader(expandedAcquis)) dec.SetStrict(true) idx := -1 diff --git a/pkg/acquisition/acquisition_test.go b/pkg/acquisition/acquisition_test.go index cfe1e74c612..1ea8f11c22a 100644 --- a/pkg/acquisition/acquisition_test.go +++ b/pkg/acquisition/acquisition_test.go @@ -217,6 +217,7 @@ wowo: ajsajasjas func TestLoadAcquisitionFromFile(t *testing.T) { appendMockSource() + t.Setenv("TEST_ENV", "test_value2") tests := []struct { TestName string @@ -282,6 +283,13 @@ func TestLoadAcquisitionFromFile(t *testing.T) { }, ExpectedError: "while configuring datasource of type file from test_files/bad_filetype.yaml", }, + { + TestName: "from_env", + Config: csconfig.CrowdsecServiceCfg{ + AcquisitionFiles: []string{"test_files/env.yaml"}, + }, + ExpectedLen: 1, + }, } for _, tc := range tests { t.Run(tc.TestName, func(t *testing.T) { @@ -293,6 +301,13 @@ func TestLoadAcquisitionFromFile(t *testing.T) { } assert.Len(t, dss, tc.ExpectedLen) + if tc.TestName == "from_env" { + mock := dss[0].Dump().(*MockSource) + assert.Equal(t, "test_value2", mock.Toto) + assert.Equal(t, "foobar", mock.Labels["test"]) + assert.Equal(t, "${NON_EXISTING}", mock.Labels["non_existing"]) + assert.Equal(t, log.InfoLevel, mock.logger.Logger.Level) + } }) } } diff --git a/pkg/acquisition/test_files/env.yaml b/pkg/acquisition/test_files/env.yaml new file mode 100644 index 00000000000..8abd4b16ca5 --- /dev/null +++ b/pkg/acquisition/test_files/env.yaml @@ -0,0 +1,6 @@ +labels: + test: foobar + non_existing: ${NON_EXISTING} +log_level: info +source: mock +toto: ${TEST_ENV} \ No newline at end of file From 303ce8e42ccb1c69b15d01ad871701c447b631b6 Mon Sep 17 00:00:00 2001 From: blotus Date: Fri, 10 Jan 2025 16:00:24 +0100 Subject: [PATCH 386/581] gin: do not use gin context after returning response (#3398) --- pkg/apiserver/controllers/v1/decisions.go | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/pkg/apiserver/controllers/v1/decisions.go b/pkg/apiserver/controllers/v1/decisions.go index ffefffc226b..6a316d8a2e4 100644 --- a/pkg/apiserver/controllers/v1/decisions.go +++ b/pkg/apiserver/controllers/v1/decisions.go @@ -394,8 +394,6 @@ func (c *Controller) StreamDecisionNonChunked(gctx *gin.Context, bouncerInfo *en func (c *Controller) StreamDecision(gctx *gin.Context) { var err error - ctx := gctx.Request.Context() - streamStartTime := time.Now().UTC() bouncerInfo, err := getBouncerFromContext(gctx) @@ -426,7 +424,8 @@ func (c *Controller) StreamDecision(gctx *gin.Context) { if err == nil { // Only update the last pull time if no error occurred when sending the decisions to avoid missing decisions - if err := c.DBClient.UpdateBouncerLastPull(ctx, streamStartTime, bouncerInfo.ID); err != nil { + // Do not reuse the context provided by gin because we already have sent the response to the client, so there's a chance for it to already be canceled + if err := c.DBClient.UpdateBouncerLastPull(context.Background(), streamStartTime, bouncerInfo.ID); err != nil { log.Errorf("unable to update bouncer '%s' pull: %v", bouncerInfo.Name, err) } } From e9c1ed05d97bdbabb1fafbcb09a7a2388e63d9d9 Mon Sep 17 00:00:00 2001 From: Nathanael Demacon <7271496+quantumsheep@users.noreply.github.com> Date: Mon, 13 Jan 2025 09:17:18 +0100 Subject: [PATCH 387/581] feat(cscli): add env variable flag for dashboard setup (#3110) Signed-off-by: Nathanael DEMACON Co-authored-by: Nathanael DEMACON Co-authored-by: Laurence Jones --- cmd/crowdsec-cli/dashboard.go | 12 ++++++---- pkg/metabase/container.go | 44 ++++++++++++++++++----------------- pkg/metabase/metabase.go | 43 +++++++++++++++++----------------- 3 files changed, 52 insertions(+), 47 deletions(-) diff --git a/cmd/crowdsec-cli/dashboard.go b/cmd/crowdsec-cli/dashboard.go index a653fcb3a47..e138b8285c9 100644 --- a/cmd/crowdsec-cli/dashboard.go +++ b/cmd/crowdsec-cli/dashboard.go @@ -36,10 +36,11 @@ var ( metabaseConfigFile = "metabase.yaml" metabaseImage = "metabase/metabase:v0.46.6.1" /**/ - metabaseListenAddress = "127.0.0.1" - metabaseListenPort = "3000" - metabaseContainerID = "crowdsec-metabase" - crowdsecGroup = "crowdsec" + metabaseListenAddress = "127.0.0.1" + metabaseListenPort = "3000" + metabaseContainerID = "crowdsec-metabase" + metabaseContainerEnvironmentVariables []string + crowdsecGroup = "crowdsec" forceYes bool @@ -166,7 +167,7 @@ cscli dashboard setup -l 0.0.0.0 -p 443 --password if err = cli.chownDatabase(dockerGroup.Gid); err != nil { return err } - mb, err := metabase.SetupMetabase(cli.cfg().API.Server.DbConfig, metabaseListenAddress, metabaseListenPort, metabaseUser, metabasePassword, metabaseDBPath, dockerGroup.Gid, metabaseContainerID, metabaseImage) + mb, err := metabase.SetupMetabase(cli.cfg().API.Server.DbConfig, metabaseListenAddress, metabaseListenPort, metabaseUser, metabasePassword, metabaseDBPath, dockerGroup.Gid, metabaseContainerID, metabaseImage, metabaseContainerEnvironmentVariables) if err != nil { return err } @@ -193,6 +194,7 @@ cscli dashboard setup -l 0.0.0.0 -p 443 --password flags.BoolVarP(&forceYes, "yes", "y", false, "force yes") // flags.StringVarP(&metabaseUser, "user", "u", "crowdsec@crowdsec.net", "metabase user") flags.StringVar(&metabasePassword, "password", "", "metabase password") + flags.StringSliceVarP(&metabaseContainerEnvironmentVariables, "env", "e", nil, "Additional environment variables to pass to the metabase container") return cmd } diff --git a/pkg/metabase/container.go b/pkg/metabase/container.go index 73e4596fcde..9787e535e86 100644 --- a/pkg/metabase/container.go +++ b/pkg/metabase/container.go @@ -16,31 +16,33 @@ import ( ) type Container struct { - ListenAddr string - ListenPort string - SharedFolder string - Image string - Name string - ID string - CLI *client.Client - MBDBUri string - DockerGroupID string + ListenAddr string + ListenPort string + SharedFolder string + Image string + Name string + ID string + CLI *client.Client + MBDBUri string + DockerGroupID string + EnvironmentVariables []string } -func NewContainer(listenAddr string, listenPort string, sharedFolder string, containerName string, image string, mbDBURI string, dockerGroupID string) (*Container, error) { +func NewContainer(listenAddr string, listenPort string, sharedFolder string, containerName string, image string, mbDBURI string, dockerGroupID string, environmentVariables []string) (*Container, error) { cli, err := client.NewClientWithOpts(client.FromEnv, client.WithAPIVersionNegotiation()) if err != nil { return nil, fmt.Errorf("failed to create docker client : %s", err) } return &Container{ - ListenAddr: listenAddr, - ListenPort: listenPort, - SharedFolder: sharedFolder, - Image: image, - Name: containerName, - CLI: cli, - MBDBUri: mbDBURI, - DockerGroupID: dockerGroupID, + ListenAddr: listenAddr, + ListenPort: listenPort, + SharedFolder: sharedFolder, + Image: image, + Name: containerName, + CLI: cli, + MBDBUri: mbDBURI, + DockerGroupID: dockerGroupID, + EnvironmentVariables: environmentVariables, }, nil } @@ -79,9 +81,9 @@ func (c *Container) Create() error { }, } - env := []string{ - fmt.Sprintf("MB_DB_FILE=%s/metabase.db", containerSharedFolder), - } + env := c.EnvironmentVariables + + env = append(env, fmt.Sprintf("MB_DB_FILE=%s/metabase.db", containerSharedFolder)) if c.MBDBUri != "" { env = append(env, c.MBDBUri) } diff --git a/pkg/metabase/metabase.go b/pkg/metabase/metabase.go index 324a05666a1..0ebb219d211 100644 --- a/pkg/metabase/metabase.go +++ b/pkg/metabase/metabase.go @@ -30,15 +30,16 @@ type Metabase struct { } type Config struct { - Database *csconfig.DatabaseCfg `yaml:"database"` - ListenAddr string `yaml:"listen_addr"` - ListenPort string `yaml:"listen_port"` - ListenURL string `yaml:"listen_url"` - Username string `yaml:"username"` - Password string `yaml:"password"` - DBPath string `yaml:"metabase_db_path"` - DockerGroupID string `yaml:"-"` - Image string `yaml:"image"` + Database *csconfig.DatabaseCfg `yaml:"database"` + ListenAddr string `yaml:"listen_addr"` + ListenPort string `yaml:"listen_port"` + ListenURL string `yaml:"listen_url"` + Username string `yaml:"username"` + Password string `yaml:"password"` + DBPath string `yaml:"metabase_db_path"` + DockerGroupID string `yaml:"-"` + Image string `yaml:"image"` + EnvironmentVariables []string `yaml:"environment_variables"` } var ( @@ -88,7 +89,7 @@ func (m *Metabase) Init(containerName string, image string) error { if err != nil { return err } - m.Container, err = NewContainer(m.Config.ListenAddr, m.Config.ListenPort, m.Config.DBPath, containerName, image, DBConnectionURI, m.Config.DockerGroupID) + m.Container, err = NewContainer(m.Config.ListenAddr, m.Config.ListenPort, m.Config.DBPath, containerName, image, DBConnectionURI, m.Config.DockerGroupID, m.Config.EnvironmentVariables) if err != nil { return fmt.Errorf("container init: %w", err) } @@ -137,21 +138,21 @@ func (m *Metabase) LoadConfig(configPath string) error { m.Config = config return nil - } -func SetupMetabase(dbConfig *csconfig.DatabaseCfg, listenAddr string, listenPort string, username string, password string, mbDBPath string, dockerGroupID string, containerName string, image string) (*Metabase, error) { +func SetupMetabase(dbConfig *csconfig.DatabaseCfg, listenAddr string, listenPort string, username string, password string, mbDBPath string, dockerGroupID string, containerName string, image string, environmentVariables []string) (*Metabase, error) { metabase := &Metabase{ Config: &Config{ - Database: dbConfig, - ListenAddr: listenAddr, - ListenPort: listenPort, - Username: username, - Password: password, - ListenURL: fmt.Sprintf("http://%s:%s", listenAddr, listenPort), - DBPath: mbDBPath, - DockerGroupID: dockerGroupID, - Image: image, + Database: dbConfig, + ListenAddr: listenAddr, + ListenPort: listenPort, + Username: username, + Password: password, + ListenURL: fmt.Sprintf("http://%s:%s", listenAddr, listenPort), + DBPath: mbDBPath, + DockerGroupID: dockerGroupID, + Image: image, + EnvironmentVariables: environmentVariables, }, } if err := metabase.Init(containerName, image); err != nil { From 5df56844d93902e5b4d1ad7ce259e0fede001caa Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Mon, 13 Jan 2025 13:28:48 +0100 Subject: [PATCH 388/581] log warning if local items have conflicting names (#3399) --- cmd/crowdsec-cli/clihub/items.go | 2 +- cmd/crowdsec-cli/dashboard.go | 4 +- pkg/cwhub/hub.go | 1 + pkg/cwhub/state.go | 11 +- pkg/cwhub/state_test.go | 1 + pkg/cwhub/sync.go | 390 +++++++++++++++++---------- test/bats/20_hub_items.bats | 21 +- test/bats/cscli-hubtype-install.bats | 32 +++ test/bats/cscli-hubtype-list.bats | 2 +- 9 files changed, 306 insertions(+), 158 deletions(-) diff --git a/cmd/crowdsec-cli/clihub/items.go b/cmd/crowdsec-cli/clihub/items.go index 730d2208be0..87cb10b1f93 100644 --- a/cmd/crowdsec-cli/clihub/items.go +++ b/cmd/crowdsec-cli/clihub/items.go @@ -63,7 +63,7 @@ func ListItems(out io.Writer, wantColor string, itemTypes []string, items map[st continue } - listHubItemTable(out, wantColor, "\n"+strings.ToUpper(itemType), items[itemType]) + listHubItemTable(out, wantColor, strings.ToUpper(itemType), items[itemType]) nothingToDisplay = false } diff --git a/cmd/crowdsec-cli/dashboard.go b/cmd/crowdsec-cli/dashboard.go index e138b8285c9..c3c974eb9b8 100644 --- a/cmd/crowdsec-cli/dashboard.go +++ b/cmd/crowdsec-cli/dashboard.go @@ -167,7 +167,9 @@ cscli dashboard setup -l 0.0.0.0 -p 443 --password if err = cli.chownDatabase(dockerGroup.Gid); err != nil { return err } - mb, err := metabase.SetupMetabase(cli.cfg().API.Server.DbConfig, metabaseListenAddress, metabaseListenPort, metabaseUser, metabasePassword, metabaseDBPath, dockerGroup.Gid, metabaseContainerID, metabaseImage, metabaseContainerEnvironmentVariables) + mb, err := metabase.SetupMetabase(cli.cfg().API.Server.DbConfig, metabaseListenAddress, + metabaseListenPort, metabaseUser, metabasePassword, metabaseDBPath, dockerGroup.Gid, + metabaseContainerID, metabaseImage, metabaseContainerEnvironmentVariables) if err != nil { return err } diff --git a/pkg/cwhub/hub.go b/pkg/cwhub/hub.go index 998a4032359..aeccb3268f7 100644 --- a/pkg/cwhub/hub.go +++ b/pkg/cwhub/hub.go @@ -171,6 +171,7 @@ func (h *Hub) Update(ctx context.Context, indexProvider IndexProvider, withConte return nil } +// addItem adds an item to the hub. It silently replaces an existing item with the same type and name. func (h *Hub) addItem(item *Item) { if h.items[item.Type] == nil { h.items[item.Type] = make(map[string]*Item) diff --git a/pkg/cwhub/state.go b/pkg/cwhub/state.go index 518185aff1c..63a433151cd 100644 --- a/pkg/cwhub/state.go +++ b/pkg/cwhub/state.go @@ -7,10 +7,11 @@ import ( // ItemState is used to keep the local state (i.e. at runtime) of an item. // This data is not stored in the index, but is displayed with "cscli ... inspect". type ItemState struct { - LocalPath string `json:"local_path,omitempty" yaml:"local_path,omitempty"` - LocalVersion string `json:"local_version,omitempty" yaml:"local_version,omitempty"` - LocalHash string `json:"local_hash,omitempty" yaml:"local_hash,omitempty"` - Installed bool `json:"installed"` + LocalPath string `json:"local_path,omitempty" yaml:"local_path,omitempty"` + LocalVersion string `json:"local_version,omitempty" yaml:"local_version,omitempty"` + LocalHash string `json:"local_hash,omitempty" yaml:"local_hash,omitempty"` + Installed bool `json:"installed"` + local bool Downloaded bool `json:"downloaded"` UpToDate bool `json:"up_to_date"` Tainted bool `json:"tainted"` @@ -20,7 +21,7 @@ type ItemState struct { // IsLocal returns true if the item has been create by a user (not downloaded from the hub). func (s *ItemState) IsLocal() bool { - return s.Installed && !s.Downloaded + return s.local } // Text returns the status of the item as a string (eg. "enabled,update-available"). diff --git a/pkg/cwhub/state_test.go b/pkg/cwhub/state_test.go index 3ed3de16fcc..20741809ae2 100644 --- a/pkg/cwhub/state_test.go +++ b/pkg/cwhub/state_test.go @@ -40,6 +40,7 @@ func TestItemStateText(t *testing.T) { ItemState{ Installed: true, UpToDate: false, + local: true, Tainted: false, Downloaded: false, }, diff --git a/pkg/cwhub/sync.go b/pkg/cwhub/sync.go index ee8e49f2bf0..59c1383d7c2 100644 --- a/pkg/cwhub/sync.go +++ b/pkg/cwhub/sync.go @@ -50,9 +50,8 @@ func resolveSymlink(path string) (string, error) { } // isPathInside checks if a path is inside the given directory -// it can return false negatives if the filesystem is case insensitive func isPathInside(path, dir string) (bool, error) { - absFilePath, err := filepath.Abs(path) + absFile, err := filepath.Abs(path) if err != nil { return false, err } @@ -62,103 +61,145 @@ func isPathInside(path, dir string) (bool, error) { return false, err } - return strings.HasPrefix(absFilePath, absDir), nil -} + rel, err := filepath.Rel(absDir, absFile) + if err != nil { + return false, err + } -// information used to create a new Item, from a file path. -type itemFileInfo struct { - fname string - stage string - ftype string - fauthor string - inhub bool + return !strings.HasPrefix(rel, ".."), nil } -func (h *Hub) getItemFileInfo(path string, logger *logrus.Logger) (*itemFileInfo, error) { - var ret *itemFileInfo +// itemSpec contains some information needed to complete the items +// after they have been parsed from the index. itemSpecs are created by +// scanning the hub (/etc/crowdsec/hub/*) and install (/etc/crowdsec/*) directories. +// Only directories for the known types are scanned. +type itemSpec struct { + path string // full path to the file (or link) + fname string // name of the item: + // for local item, taken from the file content or defaults to the filename (including extension) + // for non-local items, always {author}/{name} + stage string // stage for parsers and overflows + ftype string // type, plural (collections, contexts etc.) + fauthor string // author - empty for local items + inhub bool // true if the spec comes from the hub dir + target string // the target of path if it's a link, otherwise == path + local bool // is this a spec for a local item? +} - hubDir := h.local.HubDir - installDir := h.local.InstallDir +func newHubItemSpec(path string, subs []string, logger *logrus.Logger) (*itemSpec, error) { + // .../hub/parsers/s00-raw/crowdsecurity/skip-pretag.yaml + // .../hub/scenarios/crowdsecurity/ssh_bf.yaml + // .../hub/profiles/crowdsecurity/linux.yaml + if len(subs) < 3 { + return nil, fmt.Errorf("path is too short: %s (%d)", path, len(subs)) + } - subsHub := relativePathComponents(path, hubDir) - subsInstall := relativePathComponents(path, installDir) + ftype := subs[0] + if !slices.Contains(ItemTypes, ftype) { + // this doesn't really happen anymore, because we only scan the {hubtype} directories + return nil, fmt.Errorf("unknown configuration type '%s'", ftype) + } - switch { - case len(subsHub) > 0: - logger.Tracef("in hub dir") + stage := "" + fauthor := subs[1] + fname := subs[2] - // .../hub/parsers/s00-raw/crowdsecurity/skip-pretag.yaml - // .../hub/scenarios/crowdsecurity/ssh_bf.yaml - // .../hub/profiles/crowdsecurity/linux.yaml - if len(subsHub) < 3 { - return nil, fmt.Errorf("path is too short: %s (%d)", path, len(subsHub)) + if ftype == PARSERS || ftype == POSTOVERFLOWS { + if len(subs) < 4 { + return nil, fmt.Errorf("path is too short: %s (%d)", path, len(subs)) } - ftype := subsHub[0] - if !slices.Contains(ItemTypes, ftype) { - // this doesn't really happen anymore, because we only scan the {hubtype} directories - return nil, fmt.Errorf("unknown configuration type '%s'", ftype) - } + stage = subs[1] + fauthor = subs[2] + fname = subs[3] + } - stage := "" - fauthor := subsHub[1] - fname := subsHub[2] + spec := itemSpec{ + path: path, + inhub: true, + ftype: ftype, + stage: stage, + fauthor: fauthor, + fname: fname, + } - if ftype == PARSERS || ftype == POSTOVERFLOWS { - if len(subsHub) < 4 { - return nil, fmt.Errorf("path is too short: %s (%d)", path, len(subsHub)) - } + return &spec, nil +} - stage = subsHub[1] - fauthor = subsHub[2] - fname = subsHub[3] - } +func newInstallItemSpec(path string, subs []string, logger *logrus.Logger) (*itemSpec, error) { + logger.Tracef("%s in install dir", path) - ret = &itemFileInfo{ - inhub: true, - ftype: ftype, - stage: stage, - fauthor: fauthor, - fname: fname, - } + // .../config/parser/stage/file.yaml + // .../config/postoverflow/stage/file.yaml + // .../config/scenarios/scenar.yaml + // .../config/collections/linux.yaml //file is empty + + if len(subs) < 2 { + return nil, fmt.Errorf("path is too short: %s (%d)", path, len(subs)) + } - case len(subsInstall) > 0: - logger.Tracef("in install dir") + // this can be in any number of subdirs, we join them to compose the item name - // .../config/parser/stage/file.yaml - // .../config/postoverflow/stage/file.yaml - // .../config/scenarios/scenar.yaml - // .../config/collections/linux.yaml //file is empty + ftype := subs[0] + stage := "" + fname := strings.Join(subs[1:], "/") - if len(subsInstall) < 2 { - return nil, fmt.Errorf("path is too short: %s (%d)", path, len(subsInstall)) - } + if ftype == PARSERS || ftype == POSTOVERFLOWS { + stage = subs[1] + fname = strings.Join(subs[2:], "/") + } - // this can be in any number of subdirs, we join them to compose the item name + spec := itemSpec{ + path: path, + inhub: false, + ftype: ftype, + stage: stage, + fauthor: "", + fname: fname, + } - ftype := subsInstall[0] - stage := "" - fname := strings.Join(subsInstall[1:], "/") + return &spec, nil +} - if ftype == PARSERS || ftype == POSTOVERFLOWS { - stage = subsInstall[1] - fname = strings.Join(subsInstall[2:], "/") - } +func newItemSpec(path, hubDir, installDir string, logger *logrus.Logger) (*itemSpec, error) { + var ( + spec *itemSpec + err error + ) - ret = &itemFileInfo{ - inhub: false, - ftype: ftype, - stage: stage, - fauthor: "", - fname: fname, + if subs := relativePathComponents(path, hubDir); len(subs) > 0 { + spec, err = newHubItemSpec(path, subs, logger) + if err != nil { + return nil, err } - default: + } else if subs := relativePathComponents(path, installDir); len(subs) > 0 { + spec, err = newInstallItemSpec(path, subs, logger) + if err != nil { + return nil, err + } + } + + if spec == nil { return nil, fmt.Errorf("file '%s' is not from hub '%s' nor from the configuration directory '%s'", path, hubDir, installDir) } - logger.Tracef("CORRECTED [%s] by [%s] in stage [%s] of type [%s]", ret.fname, ret.fauthor, ret.stage, ret.ftype) + // follow the link to see if it falls in the hub directory + // if it's not a link, target == path + spec.target, err = resolveSymlink(spec.path) + if err != nil { + // target does not exist, the user might have removed the file + // or switched to a hub branch without it; or symlink loop + return nil, err + } - return ret, nil + targetInHub, err := isPathInside(spec.target, hubDir) + if err != nil { + return nil, ErrSkipPath + } + + spec.local = !targetInHub + + return spec, nil } // sortedVersions returns the input data, sorted in reverse order (new, old) by semver. @@ -185,7 +226,7 @@ func sortedVersions(raw []string) ([]string, error) { return ret, nil } -func newLocalItem(h *Hub, path string, info *itemFileInfo) (*Item, error) { +func newLocalItem(h *Hub, path string, spec *itemSpec) (*Item, error) { type localItemName struct { Name string `yaml:"name"` } @@ -194,12 +235,13 @@ func newLocalItem(h *Hub, path string, info *itemFileInfo) (*Item, error) { item := &Item{ hub: h, - Name: info.fname, - Stage: info.stage, - Type: info.ftype, + Name: spec.fname, + Stage: spec.stage, + Type: spec.ftype, FileName: fileName, State: ItemState{ LocalPath: path, + local: true, Installed: true, UpToDate: true, }, @@ -225,22 +267,25 @@ func newLocalItem(h *Hub, path string, info *itemFileInfo) (*Item, error) { return item, nil } -func (h *Hub) itemVisit(path string, f os.DirEntry, err error) error { +// A sentinel to skip regular files because "nil, nil" is ambiguous. Returning SkipDir with files would skip the rest of the directory. +var ErrSkipPath = errors.New("sentinel") + +func (h *Hub) itemVisit(path string, f os.DirEntry, err error) (*itemSpec, error) { if err != nil { h.logger.Debugf("while syncing hub dir: %s", err) // there is a path error, we ignore the file - return nil + return nil, ErrSkipPath + } + + // permission errors, files removed while reading, etc. + if f == nil { + return nil, ErrSkipPath } // only happens if the current working directory was removed (!) path, err = filepath.Abs(path) if err != nil { - return err - } - - // permission errors, files removed while reading, etc. - if f == nil { - return nil + return nil, err } if f.IsDir() { @@ -249,101 +294,125 @@ func (h *Hub) itemVisit(path string, f os.DirEntry, err error) error { // - double dot prefix is used by k8s to mount config maps if strings.HasPrefix(f.Name(), ".") { h.logger.Tracef("skipping hidden directory %s", path) - return filepath.SkipDir + return nil, filepath.SkipDir } // keep traversing - return nil + return nil, nil } // we only care about YAML files if !isYAMLFileName(f.Name()) { - return nil + return nil, ErrSkipPath } - info, err := h.getItemFileInfo(path, h.logger) + spec, err := newItemSpec(path, h.local.HubDir, h.local.InstallDir, h.logger) if err != nil { h.logger.Warningf("Ignoring file %s: %s", path, err) - return nil + return nil, ErrSkipPath } - // follow the link to see if it falls in the hub directory - // if it's not a link, target == path - target, err := resolveSymlink(path) - if err != nil { - // target does not exist, the user might have removed the file - // or switched to a hub branch without it; or symlink loop - h.logger.Warningf("Ignoring file %s: %s", path, err) - return nil - } - - targetInHub, err := isPathInside(target, h.local.HubDir) - if err != nil { - h.logger.Warningf("Ignoring file %s: %s", path, err) - return nil - } - - // local (custom) item if the file or link target is not inside the hub dir - if !targetInHub { - h.logger.Tracef("%s is a local file, skip", path) - - item, err := newLocalItem(h, path, info) - if err != nil { - return err - } - - h.addItem(item) + return spec, nil +} - return nil +func updateNonLocalItem(h *Hub, path string, spec *itemSpec, symlinkTarget string) (*Item, error) { + // look for the matching index entry + tot := 0 + for range h.GetItemMap(spec.ftype) { + tot++ } - hubpath := target - - // try to find which configuration item it is - h.logger.Tracef("check [%s] of %s", info.fname, info.ftype) - - for _, item := range h.GetItemMap(info.ftype) { - if info.fname != item.FileName { + for _, item := range h.GetItemMap(spec.ftype) { + if spec.fname != item.FileName { continue } - if item.Stage != info.stage { + if item.Stage != spec.stage { continue } - // if we are walking hub dir, just mark present files as downloaded - if info.inhub { + // Downloaded item, in the hub dir. + if spec.inhub { // not the item we're looking for - if !item.validPath(info.fauthor, info.fname) { + if !item.validPath(spec.fauthor, spec.fname) { continue } src, err := item.DownloadPath() if err != nil { - return err + return nil, err } - if path == src { + if spec.path == src { h.logger.Tracef("marking %s as downloaded", item.Name) item.State.Downloaded = true } - } else if !hasPathSuffix(hubpath, item.RemotePath) { + } else if !hasPathSuffix(symlinkTarget, item.RemotePath) { // wrong file // ///.yaml continue } - err := item.setVersionState(path, info.inhub) + err := item.setVersionState(spec.path, spec.inhub) + if err != nil { + return nil, err + } + + return item, nil + } + + return nil, nil +} + +// addItemFromSpec adds an item to the hub based on the spec, or updates it if already present. +// +// When the item is: +// +// Local - an itemSpec instance is created while scanning the install directory +// and an Item instance will be added to the hub.items map. +// +// Not downloaded, not installed - an Item instance is already on hub.items (decoded from index) and left untouched. +// +// Downloaded, not installed - an Item instance is on hub.items (decoded from index) and an itemSpec instance is created +// to complete it (i.e. set version and state flags). +// +// Downloaded, installed - an Item instance is on hub.items and is complemented with two itemSpecs: one from the file +// on the hub directory, one from the link in the install directory. +func (h *Hub) addItemFromSpec(spec *itemSpec) error { + var ( + item *Item + err error + ) + + // Local item: links outside the hub directory. + // We add it, or overwrite the existing one if it happened to have the same name. + if spec.local { + item, err = newLocalItem(h, spec.path, spec) if err != nil { return err } - h.pathIndex[path] = item + // we now have the name declared in the file (for local), + // see if there's another installed item of the same name + theOtherItem := h.GetItem(spec.ftype, item.Name) + if theOtherItem != nil { + if theOtherItem.State.Installed { + h.logger.Warnf("multiple %s named %s: ignoring %s", spec.ftype, item.Name, theOtherItem.State.LocalPath) + } + } + } else { + item, err = updateNonLocalItem(h, spec.path, spec, spec.target) + if err != nil { + return err + } + } + if item == nil { + h.logger.Infof("Ignoring file %s of type %s", spec.path, spec.ftype) return nil } - h.logger.Infof("Ignoring file %s of type %s", path, info.ftype) + h.addItem(item) return nil } @@ -411,6 +480,8 @@ func (i *Item) checkSubItemVersions() []string { // syncDir scans a directory for items, and updates the Hub state accordingly. func (h *Hub) syncDir(dir string) error { + specs := []*itemSpec{} + // For each, scan PARSERS, POSTOVERFLOWS... and COLLECTIONS last for _, scan := range ItemTypes { // cpath: top-level item directory, either downloaded or installed items. @@ -423,11 +494,46 @@ func (h *Hub) syncDir(dir string) error { // explicit check for non existing directory, avoid spamming log.Debug if _, err = os.Stat(cpath); os.IsNotExist(err) { - h.logger.Tracef("directory %s doesn't exist, skipping", cpath) continue } - if err = filepath.WalkDir(cpath, h.itemVisit); err != nil { + // wrap itemVisit to collect spec results + specCollector := func(path string, f os.DirEntry, err error) error { + spec, err := h.itemVisit(path, f, err) + if err == nil && spec != nil { + specs = append(specs, spec) + } + + if errors.Is(err, ErrSkipPath) { + return nil + } + + return err + } + + if err = filepath.WalkDir(cpath, specCollector); err != nil { + return err + } + } + + // add non-local items first, so they can find the place in the index + // before it's overridden by local items in case of name collision + for _, spec := range specs { + if spec.local { + continue + } + + if err := h.addItemFromSpec(spec); err != nil { + return err + } + } + + for _, spec := range specs { + if !spec.local { + continue + } + + if err := h.addItemFromSpec(spec); err != nil { return err } } @@ -463,14 +569,16 @@ func removeDuplicates(sl []string) []string { // localSync updates the hub state with downloaded, installed and local items. func (h *Hub) localSync() error { - if err := h.syncDir(h.local.InstallDir); err != nil { - return fmt.Errorf("failed to sync %s: %w", h.local.InstallDir, err) - } - + // add downloaded files first, so they can find the place in the index + // before it's overridden by local items in case of name collision if err := h.syncDir(h.local.HubDir); err != nil { return fmt.Errorf("failed to sync %s: %w", h.local.HubDir, err) } + if err := h.syncDir(h.local.InstallDir); err != nil { + return fmt.Errorf("failed to sync %s: %w", h.local.InstallDir, err) + } + warnings := make([]string, 0) for _, item := range h.GetItemMap(COLLECTIONS) { diff --git a/test/bats/20_hub_items.bats b/test/bats/20_hub_items.bats index 8ebe505c6e1..4ddaf387488 100644 --- a/test/bats/20_hub_items.bats +++ b/test/bats/20_hub_items.bats @@ -80,8 +80,8 @@ teardown() { rune -0 cscli collections install crowdsecurity/sshd rune -1 cscli collections inspect crowdsecurity/sshd --no-metrics - # XXX: this must be triggered during parse, not sync - assert_stderr "Error: failed to sync $CONFIG_DIR: while syncing collections sshd.yaml: 1.2.3.4: Invalid Semantic Version" + # XXX: it would be better to trigger this during parse, not sync + assert_stderr "Error: failed to sync $HUB_DIR: while syncing collections sshd.yaml: 1.2.3.4: Invalid Semantic Version" } @test "removing or purging an item already removed by hand" { @@ -99,13 +99,6 @@ teardown() { rune -0 cscli parsers remove crowdsecurity/syslog-logs --purge assert_output "Nothing to do." - - rune -0 cscli parsers remove --all --error --purge --force - assert_output "Nothing to do." - refute_stderr - rune -0 cscli collections remove --all --error --purge --force - assert_output "Nothing to do." - refute_stderr } @test "a local item is not tainted" { @@ -199,6 +192,16 @@ teardown() { assert_json '[]' } +@test "replacing a symlink with a regular file makes a local item" { + rune -0 cscli parsers install crowdsecurity/caddy-logs + rune -0 rm "$CONFIG_DIR/parsers/s01-parse/caddy-logs.yaml" + rune -0 cp "$HUB_DIR/parsers/s01-parse/crowdsecurity/caddy-logs.yaml" "$CONFIG_DIR/parsers/s01-parse/caddy-logs.yaml" + rune -0 cscli hub list + rune -0 cscli parsers inspect crowdsecurity/caddy-logs -o json + rune -0 jq -e '[.tainted,.local,.local_version==false,true,"?"]' <(output) + refute_stderr +} + @test "tainted hub file, not enabled, install --force should repair" { rune -0 cscli scenarios install crowdsecurity/ssh-bf rune -0 cscli scenarios inspect crowdsecurity/ssh-bf -o json diff --git a/test/bats/cscli-hubtype-install.bats b/test/bats/cscli-hubtype-install.bats index 2304e5a72cc..58c16dd968d 100644 --- a/test/bats/cscli-hubtype-install.bats +++ b/test/bats/cscli-hubtype-install.bats @@ -267,3 +267,35 @@ teardown() { assert_line 'enabling contexts:crowdsecurity/bf_base' assert_line 'enabling collections:crowdsecurity/sshd' } + +@test "a local item can override an official one, if it's not installed" { + mkdir -p "$CONFIG_DIR/parsers/s02-enrich" + rune -0 cscli parsers install crowdsecurity/whitelists --download-only + echo "name: crowdsecurity/whitelists" > "$CONFIG_DIR/parsers/s02-enrich/hi.yaml" + # no warning + rune -0 cscli parsers list + refute_stderr + rune -0 cscli parsers list -o json + rune -0 jq -e '.installed,.local==true,true' <(output) +} + +@test "conflicting item names: local and non local - the local one has priority" { + mkdir -p "$CONFIG_DIR/parsers/s02-enrich" + rune -0 cscli parsers install crowdsecurity/whitelists + echo "name: crowdsecurity/whitelists" > "$CONFIG_DIR/parsers/s02-enrich/hi.yaml" + rune -0 cscli parsers list -o json + rune -0 jq -e '.installed,.local==true,true' <(output) + rune -0 cscli parsers list + assert_stderr --partial "multiple parsers named crowdsecurity/whitelists: ignoring $CONFIG_DIR/parsers/s02-enrich/whitelists.yaml" +} + +@test "conflicting item names: both local, the last one wins" { + mkdir -p "$CONFIG_DIR/parsers/s02-enrich" + echo "name: crowdsecurity/whitelists" > "$CONFIG_DIR/parsers/s02-enrich/one.yaml" + echo "name: crowdsecurity/whitelists" > "$CONFIG_DIR/parsers/s02-enrich/two.yaml" + rune -0 cscli parsers inspect crowdsecurity/whitelists -o json + rune -0 jq -r '.local_path' <(output) + assert_output --partial "/parsers/s02-enrich/two.yaml" + rune -0 cscli parsers list + assert_stderr --partial "multiple parsers named crowdsecurity/whitelists: ignoring $CONFIG_DIR/parsers/s02-enrich/one.yaml" +} diff --git a/test/bats/cscli-hubtype-list.bats b/test/bats/cscli-hubtype-list.bats index b3db4743eb9..14113650c74 100644 --- a/test/bats/cscli-hubtype-list.bats +++ b/test/bats/cscli-hubtype-list.bats @@ -80,7 +80,7 @@ teardown() { # the list should be the same in all formats, and sorted (not case sensitive) list_raw=$(cscli parsers list -o raw -a | tail -n +2 | cut -d, -f1) - list_human=$(cscli parsers list -o human -a | tail -n +7 | head -n -1 | cut -d' ' -f2) + list_human=$(cscli parsers list -o human -a | tail -n +6 | head -n -1 | cut -d' ' -f2) list_json=$(cscli parsers list -o json -a | jq -r '.parsers[].name') # use python to sort because it handles "_" like go From cc1196c3ad0741e3a8626987ee00081d098b3c57 Mon Sep 17 00:00:00 2001 From: Laurence Jones Date: Tue, 14 Jan 2025 10:48:05 +0000 Subject: [PATCH 389/581] fix: #2790 (#3378) --- cmd/crowdsec/pour.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmd/crowdsec/pour.go b/cmd/crowdsec/pour.go index 2fc7d7e42c9..4c83b65bd48 100644 --- a/cmd/crowdsec/pour.go +++ b/cmd/crowdsec/pour.go @@ -46,7 +46,7 @@ func runPour(input chan types.Event, holders []leaky.BucketFactory, buckets *lea // here we can bucketify with parsed poured, err := leaky.PourItemToHolders(parsed, holders, buckets) if err != nil { - log.Errorf("bucketify failed for: %v", parsed) + log.Errorf("bucketify failed for: %v with %s", parsed, err) continue } From 9ef5f58f885eb35d0759f73cf0d5caba73ea3dd2 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Wed, 15 Jan 2025 12:13:54 +0100 Subject: [PATCH 390/581] test pkg/exprhelpers: explicit message if the tag "expr_debug" is missing (#3400) * test pkg/exprhelpers: explicit message if the tag "expr_debug" is missing * typo * lint: use build tag expr_debug while linting * lint --- .github/workflows/go-tests.yml | 4 +- .golangci.yml | 4 + pkg/exprhelpers/debugger.go | 112 +++++++++++++++++++-------- pkg/exprhelpers/debugger_test.go | 1 + pkg/exprhelpers/debuggerstub_test.go | 10 +++ 5 files changed, 95 insertions(+), 36 deletions(-) create mode 100644 pkg/exprhelpers/debuggerstub_test.go diff --git a/.github/workflows/go-tests.yml b/.github/workflows/go-tests.yml index 3a194e1084a..d882f88580e 100644 --- a/.github/workflows/go-tests.yml +++ b/.github/workflows/go-tests.yml @@ -143,11 +143,11 @@ jobs: go generate ./... protoc --version if [[ $(git status --porcelain) ]]; then - echo "Error: Uncommitted changes found after running 'make generate'. Please commit all generated code." + echo "Error: Uncommitted changes found after running 'go generate'. Please commit all generated code." git diff exit 1 else - echo "No changes detected after running 'make generate'." + echo "No changes detected after running 'go generate'." fi - name: Create localstack streams diff --git a/.golangci.yml b/.golangci.yml index deb073c2eea..5995f14c512 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -1,5 +1,9 @@ # https://github.com/golangci/golangci-lint/blob/master/.golangci.reference.yml +run: + build-tags: + - expr_debug + linters-settings: gci: sections: diff --git a/pkg/exprhelpers/debugger.go b/pkg/exprhelpers/debugger.go index 2e47af6d1de..d44b8fc97e1 100644 --- a/pkg/exprhelpers/debugger.go +++ b/pkg/exprhelpers/debugger.go @@ -21,35 +21,35 @@ var IndentStep = 4 // we use this struct to store the output of the expr runtime type OpOutput struct { - Code string //relevant code part + Code string // relevant code part - CodeDepth int //level of nesting + CodeDepth int // level of nesting BlockStart bool BlockEnd bool - Func bool //true if it's a function call + Func bool // true if it's a function call FuncName string Args []string FuncResults []string // - Comparison bool //true if it's a comparison + Comparison bool // true if it's a comparison Negated bool Left string Right string // - JumpIf bool //true if it's conditional jump + JumpIf bool // true if it's conditional jump IfTrue bool IfFalse bool // - Condition bool //true if it's a condition + Condition bool // true if it's a condition ConditionIn bool ConditionContains bool - //used for comparisons, conditional jumps and conditions + // used for comparisons, conditional jumps and conditions StrConditionResult string - ConditionResult *bool //should always be present for conditions + ConditionResult *bool // should always be present for conditions // - Finalized bool //used when a node is finalized, we already fetched result from next OP + Finalized bool // used when a node is finalized, we already fetched result from next OP } func (o *OpOutput) String() string { @@ -57,6 +57,7 @@ func (o *OpOutput) String() string { if o.Code != "" { ret += fmt.Sprintf("[%s]", o.Code) } + ret += " " switch { @@ -68,19 +69,24 @@ func (o *OpOutput) String() string { if indent < 0 { indent = 0 } + ret = fmt.Sprintf("%*cBLOCK_END [%s]", indent, ' ', o.Code) + if o.StrConditionResult != "" { ret += fmt.Sprintf(" -> %s", o.StrConditionResult) } + return ret - //A block end can carry a value, for example if it's a count, any, all etc. XXX + // A block end can carry a value, for example if it's a count, any, all etc. XXX case o.Func: return ret + fmt.Sprintf("%s(%s) = %s", o.FuncName, strings.Join(o.Args, ", "), strings.Join(o.FuncResults, ", ")) case o.Comparison: if o.Negated { ret += "NOT " } + ret += fmt.Sprintf("%s == %s -> %s", o.Left, o.Right, o.StrConditionResult) + return ret case o.ConditionIn: return ret + fmt.Sprintf("%s in %s -> %s", o.Args[0], o.Args[1], o.StrConditionResult) @@ -91,18 +97,23 @@ func (o *OpOutput) String() string { if *o.ConditionResult { return ret + "OR -> false" } + return ret + "OR -> true" } + return ret + "OR(?)" case o.JumpIf && o.IfFalse: if o.ConditionResult != nil { if *o.ConditionResult { return ret + "AND -> true" } + return ret + "AND -> false" } + return ret + "AND(?)" } + return ret + "" } @@ -135,7 +146,7 @@ func (erp ExprRuntimeDebug) extractCode(ip int, program *vm.Program) string { func autoQuote(v any) string { switch x := v.(type) { case string: - //let's avoid printing long strings. it can happen ie. when we are debugging expr with `File()` or similar helpers + // let's avoid printing long strings. it can happen ie. when we are debugging expr with `File()` or similar helpers if len(x) > 40 { return fmt.Sprintf("%q", x[:40]+"...") } else { @@ -147,35 +158,40 @@ func autoQuote(v any) string { } func (erp ExprRuntimeDebug) ipDebug(ip int, vm *vm.VM, program *vm.Program, parts []string, outputs []OpOutput) ([]OpOutput, error) { - IdxOut := len(outputs) prevIdxOut := 0 currentDepth := 0 - //when there is a function call or comparison, we need to wait for the next instruction to get the result and "finalize" the previous one + // when there is a function call or comparison, we need to wait for the next instruction to get the result and "finalize" the previous one if IdxOut > 0 { prevIdxOut = IdxOut - 1 currentDepth = outputs[prevIdxOut].CodeDepth + if outputs[prevIdxOut].Func && !outputs[prevIdxOut].Finalized { stack := vm.Stack num_items := 1 + for i := len(stack) - 1; i >= 0 && num_items > 0; i-- { outputs[prevIdxOut].FuncResults = append(outputs[prevIdxOut].FuncResults, autoQuote(stack[i])) num_items-- } + outputs[prevIdxOut].Finalized = true } else if (outputs[prevIdxOut].Comparison || outputs[prevIdxOut].Condition) && !outputs[prevIdxOut].Finalized { stack := vm.Stack outputs[prevIdxOut].StrConditionResult = fmt.Sprintf("%+v", stack) + if val, ok := stack[0].(bool); ok { outputs[prevIdxOut].ConditionResult = new(bool) *outputs[prevIdxOut].ConditionResult = val } + outputs[prevIdxOut].Finalized = true } } erp.Logger.Tracef("[STEP %d:%s] (stack:%+v) (parts:%+v) {depth:%d}", ip, parts[1], vm.Stack, parts, currentDepth) + out := OpOutput{} out.CodeDepth = currentDepth out.Code = erp.extractCode(ip, program) @@ -188,27 +204,28 @@ func (erp ExprRuntimeDebug) ipDebug(ip int, vm *vm.VM, program *vm.Program, part case "OpEnd": out.CodeDepth -= IndentStep out.BlockEnd = true - //OpEnd can carry value, if it's any/all/count etc. + // OpEnd can carry value, if it's any/all/count etc. if len(vm.Stack) > 0 { out.StrConditionResult = fmt.Sprintf("%v", vm.Stack) } + outputs = append(outputs, out) case "OpNot": - //negate the previous condition + // negate the previous condition outputs[prevIdxOut].Negated = true - case "OpTrue": //generated when possible ? (1 == 1) + case "OpTrue": // generated when possible ? (1 == 1) out.Condition = true out.ConditionResult = new(bool) *out.ConditionResult = true out.StrConditionResult = "true" outputs = append(outputs, out) - case "OpFalse": //generated when possible ? (1 != 1) + case "OpFalse": // generated when possible ? (1 != 1) out.Condition = true out.ConditionResult = new(bool) *out.ConditionResult = false out.StrConditionResult = "false" outputs = append(outputs, out) - case "OpJumpIfTrue": //OR + case "OpJumpIfTrue": // OR stack := vm.Stack out.JumpIf = true out.IfTrue = true @@ -218,78 +235,88 @@ func (erp ExprRuntimeDebug) ipDebug(ip int, vm *vm.VM, program *vm.Program, part out.ConditionResult = new(bool) *out.ConditionResult = val } + outputs = append(outputs, out) - case "OpJumpIfFalse": //AND + case "OpJumpIfFalse": // AND stack := vm.Stack out.JumpIf = true out.IfFalse = true out.StrConditionResult = fmt.Sprintf("%v", stack[0]) + if val, ok := stack[0].(bool); ok { out.ConditionResult = new(bool) *out.ConditionResult = val } + outputs = append(outputs, out) - case "OpCall1": //Op for function calls + case "OpCall1": // Op for function calls out.Func = true out.FuncName = parts[3] stack := vm.Stack + num_items := 1 for i := len(stack) - 1; i >= 0 && num_items > 0; i-- { out.Args = append(out.Args, autoQuote(stack[i])) num_items-- } + outputs = append(outputs, out) - case "OpCall2": //Op for function calls + case "OpCall2": // Op for function calls out.Func = true out.FuncName = parts[3] stack := vm.Stack + num_items := 2 for i := len(stack) - 1; i >= 0 && num_items > 0; i-- { out.Args = append(out.Args, autoQuote(stack[i])) num_items-- } + outputs = append(outputs, out) - case "OpCall3": //Op for function calls + case "OpCall3": // Op for function calls out.Func = true out.FuncName = parts[3] stack := vm.Stack + num_items := 3 for i := len(stack) - 1; i >= 0 && num_items > 0; i-- { out.Args = append(out.Args, autoQuote(stack[i])) num_items-- } + outputs = append(outputs, out) - //double check OpCallFast and OpCallTyped + // double check OpCallFast and OpCallTyped case "OpCallFast", "OpCallTyped": // - case "OpCallN": //Op for function calls with more than 3 args + case "OpCallN": // Op for function calls with more than 3 args out.Func = true out.FuncName = parts[1] stack := vm.Stack - //for OpCallN, we get the number of args + // for OpCallN, we get the number of args if len(program.Arguments) >= ip { nb_args := program.Arguments[ip] if nb_args > 0 { - //we need to skip the top item on stack + // we need to skip the top item on stack for i := len(stack) - 2; i >= 0 && nb_args > 0; i-- { out.Args = append(out.Args, autoQuote(stack[i])) nb_args-- } } - } else { //let's blindly take the items on stack + } else { // let's blindly take the items on stack for _, val := range vm.Stack { out.Args = append(out.Args, autoQuote(val)) } } + outputs = append(outputs, out) - case "OpEqualString", "OpEqual", "OpEqualInt": //comparisons + case "OpEqualString", "OpEqual", "OpEqualInt": // comparisons stack := vm.Stack out.Comparison = true out.Left = autoQuote(stack[0]) out.Right = autoQuote(stack[1]) outputs = append(outputs, out) - case "OpIn": //in operator + case "OpIn": // in operator stack := vm.Stack out.Condition = true out.ConditionIn = true @@ -299,7 +326,7 @@ func (erp ExprRuntimeDebug) ipDebug(ip int, vm *vm.VM, program *vm.Program, part out.Args = append(out.Args, autoQuote(stack[0])) out.Args = append(out.Args, autoQuote(stack[1])) outputs = append(outputs, out) - case "OpContains": //kind OpIn , but reverse + case "OpContains": // kind OpIn , but reverse stack := vm.Stack out.Condition = true out.ConditionContains = true @@ -310,6 +337,7 @@ func (erp ExprRuntimeDebug) ipDebug(ip int, vm *vm.VM, program *vm.Program, part out.Args = append(out.Args, autoQuote(stack[1])) outputs = append(outputs, out) } + return outputs, nil } @@ -319,10 +347,12 @@ func (erp ExprRuntimeDebug) ipSeek(ip int) []string { if len(parts) == 0 { continue } + if parts[0] == strconv.Itoa(ip) { return parts } } + return nil } @@ -330,19 +360,23 @@ func Run(program *vm.Program, env interface{}, logger *log.Entry, debug bool) (a if debug { dbgInfo, ret, err := RunWithDebug(program, env, logger) DisplayExprDebug(program, dbgInfo, logger, ret) + return ret, err } + return expr.Run(program, env) } func cleanTextForDebug(text string) string { text = strings.Join(strings.Fields(text), " ") text = strings.Trim(text, " \t\n") + return text } func DisplayExprDebug(program *vm.Program, outputs []OpOutput, logger *log.Entry, ret any) { logger.Debugf("dbg(result=%v): %s", ret, cleanTextForDebug(string(program.Source()))) + for _, output := range outputs { logger.Debugf("%s", output.String()) } @@ -360,46 +394,55 @@ func RunWithDebug(program *vm.Program, env interface{}, logger *log.Entry) ([]Op erp.Lines = lines go func() { - //We must never return until the execution of the program is done + // We must never return until the execution of the program is done var err error + erp.Logger.Tracef("[START] ip 0") + ops := erp.ipSeek(0) if ops == nil { log.Warningf("error while debugging expr: failed getting ops for ip 0") } + if outputs, err = erp.ipDebug(0, vm, program, ops, outputs); err != nil { log.Warningf("error while debugging expr: error while debugging at ip 0") } + vm.Step() + for ip := range vm.Position() { ops := erp.ipSeek(ip) if ops == nil { erp.Logger.Tracef("[DONE] ip %d", ip) break } + if outputs, err = erp.ipDebug(ip, vm, program, ops, outputs); err != nil { log.Warningf("error while debugging expr: error while debugging at ip %d", ip) } + vm.Step() } }() var return_error error + ret, err := vm.Run(program, env) - //if the expr runtime failed, we don't need to wait for the debug to finish + // if the expr runtime failed, we don't need to wait for the debug to finish if err != nil { return_error = err } - //the overall result of expression is the result of last op ? + // the overall result of expression is the result of last op ? if len(outputs) > 0 { lastOutIdx := len(outputs) if lastOutIdx > 0 { lastOutIdx -= 1 } + switch val := ret.(type) { case bool: log.Tracef("completing with bool %t", ret) - //if outputs[lastOutIdx].Comparison { + // if outputs[lastOutIdx].Comparison { outputs[lastOutIdx].StrConditionResult = fmt.Sprintf("%v", ret) outputs[lastOutIdx].ConditionResult = new(bool) *outputs[lastOutIdx].ConditionResult = val @@ -412,5 +455,6 @@ func RunWithDebug(program *vm.Program, env interface{}, logger *log.Entry) ([]Op } else { log.Tracef("no output from expr runtime") } + return outputs, ret, return_error } diff --git a/pkg/exprhelpers/debugger_test.go b/pkg/exprhelpers/debugger_test.go index 32144454084..0852d7ab2de 100644 --- a/pkg/exprhelpers/debugger_test.go +++ b/pkg/exprhelpers/debugger_test.go @@ -1,3 +1,4 @@ +//go:build expr_debug package exprhelpers import ( diff --git a/pkg/exprhelpers/debuggerstub_test.go b/pkg/exprhelpers/debuggerstub_test.go new file mode 100644 index 00000000000..cc41c793b47 --- /dev/null +++ b/pkg/exprhelpers/debuggerstub_test.go @@ -0,0 +1,10 @@ +//go:build !expr_debug +package exprhelpers + +import ( + "testing" +) + +func TestFailWithoutExprDebug(t *testing.T) { + t.Fatal("To test pkg/exprhelpers, you need the expr_debug build tag") +} From 65292157757730f00705f319e5d41de2edb31cdf Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Thu, 16 Jan 2025 13:22:08 +0100 Subject: [PATCH 391/581] CI: golangci-lint 1.63 (#3396) --- .github/workflows/go-tests-windows.yml | 2 +- .github/workflows/go-tests.yml | 2 +- .golangci.yml | 11 +++++++++++ pkg/csplugin/listfiles_test.go | 19 +++++++++++-------- pkg/database/database.go | 2 +- pkg/exprhelpers/exprlib_test.go | 6 +----- pkg/fflag/features_test.go | 4 +--- pkg/setup/detect_test.go | 16 ++++++---------- 8 files changed, 33 insertions(+), 29 deletions(-) diff --git a/.github/workflows/go-tests-windows.yml b/.github/workflows/go-tests-windows.yml index 3276dbb1bfd..44abbbe24a3 100644 --- a/.github/workflows/go-tests-windows.yml +++ b/.github/workflows/go-tests-windows.yml @@ -61,6 +61,6 @@ jobs: - name: golangci-lint uses: golangci/golangci-lint-action@v6 with: - version: v1.62 + version: v1.63 args: --issues-exit-code=1 --timeout 10m only-new-issues: false diff --git a/.github/workflows/go-tests.yml b/.github/workflows/go-tests.yml index d882f88580e..649c47ebd26 100644 --- a/.github/workflows/go-tests.yml +++ b/.github/workflows/go-tests.yml @@ -189,6 +189,6 @@ jobs: - name: golangci-lint uses: golangci/golangci-lint-action@v6 with: - version: v1.62 + version: v1.63 args: --issues-exit-code=1 --timeout 10m only-new-issues: false diff --git a/.golangci.yml b/.golangci.yml index 5995f14c512..12d35ed8737 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -460,3 +460,14 @@ issues: - revive path: "pkg/types/utils.go" text: "argument-limit: .*" + + # need some cleanup first: to create db in memory and share the client, not the config + - linters: + - usetesting + path: "pkg/apiserver/(.+)_test.go" + text: "os.MkdirTemp.* could be replaced by t.TempDir.*" + + - linters: + - usetesting + path: "pkg/apiserver/(.+)_test.go" + text: "os.CreateTemp.* could be replaced by os.CreateTemp.*" diff --git a/pkg/csplugin/listfiles_test.go b/pkg/csplugin/listfiles_test.go index c476d7a4e4a..32269f3f5f1 100644 --- a/pkg/csplugin/listfiles_test.go +++ b/pkg/csplugin/listfiles_test.go @@ -12,19 +12,22 @@ import ( ) func TestListFilesAtPath(t *testing.T) { - dir, err := os.MkdirTemp("", "test-listfiles") - require.NoError(t, err) - t.Cleanup(func() { - os.RemoveAll(dir) - }) - _, err = os.Create(filepath.Join(dir, "notification-gitter")) + dir := t.TempDir() + + f, err := os.Create(filepath.Join(dir, "notification-gitter")) require.NoError(t, err) - _, err = os.Create(filepath.Join(dir, "slack")) + require.NoError(t, f.Close()) + + f, err = os.Create(filepath.Join(dir, "slack")) require.NoError(t, err) + require.NoError(t, f.Close()) + err = os.Mkdir(filepath.Join(dir, "somedir"), 0o755) require.NoError(t, err) - _, err = os.Create(filepath.Join(dir, "somedir", "inner")) + + f, err = os.Create(filepath.Join(dir, "somedir", "inner")) require.NoError(t, err) + require.NoError(t, f.Close()) tests := []struct { name string diff --git a/pkg/database/database.go b/pkg/database/database.go index bb41dd3b645..80479710751 100644 --- a/pkg/database/database.go +++ b/pkg/database/database.go @@ -68,7 +68,7 @@ func NewClient(ctx context.Context, config *csconfig.DatabaseCfg) (*Client, erro return nil, err // unsupported database caught here } - if config.Type == "sqlite" { + if config.Type == "sqlite" && config.DbPath != ":memory:" { /*if it's the first startup, we want to touch and chmod file*/ if _, err = os.Stat(config.DbPath); os.IsNotExist(err) { f, err := os.OpenFile(config.DbPath, os.O_CREATE|os.O_RDWR, 0o600) diff --git a/pkg/exprhelpers/exprlib_test.go b/pkg/exprhelpers/exprlib_test.go index f2eb208ebfa..932db4b7da4 100644 --- a/pkg/exprhelpers/exprlib_test.go +++ b/pkg/exprhelpers/exprlib_test.go @@ -3,7 +3,6 @@ package exprhelpers import ( "context" "errors" - "os" "testing" "time" @@ -26,15 +25,12 @@ const TestFolder = "tests" func getDBClient(t *testing.T) *database.Client { t.Helper() - dbPath, err := os.CreateTemp("", "*sqlite") - require.NoError(t, err) - ctx := context.Background() testDBClient, err := database.NewClient(ctx, &csconfig.DatabaseCfg{ Type: "sqlite", DbName: "crowdsec", - DbPath: dbPath.Name(), + DbPath: ":memory:", }) require.NoError(t, err) diff --git a/pkg/fflag/features_test.go b/pkg/fflag/features_test.go index 144e7049362..bf8ddeca8fd 100644 --- a/pkg/fflag/features_test.go +++ b/pkg/fflag/features_test.go @@ -351,11 +351,9 @@ func TestSetFromYaml(t *testing.T) { } func TestSetFromYamlFile(t *testing.T) { - tmpfile, err := os.CreateTemp("", "test") + tmpfile, err := os.CreateTemp(t.TempDir(), "test") require.NoError(t, err) - defer os.Remove(tmpfile.Name()) - // write the config file _, err = tmpfile.WriteString("- experimental1") require.NoError(t, err) diff --git a/pkg/setup/detect_test.go b/pkg/setup/detect_test.go index 475f3af0928..72356bc1924 100644 --- a/pkg/setup/detect_test.go +++ b/pkg/setup/detect_test.go @@ -60,9 +60,14 @@ func TestSetupHelperProcess(t *testing.T) { func tempYAML(t *testing.T, content string) os.File { t.Helper() require := require.New(t) - file, err := os.CreateTemp("", "") + file, err := os.CreateTemp(t.TempDir(), "") require.NoError(err) + t.Cleanup(func() { + require.NoError(file.Close()) + require.NoError(os.Remove(file.Name())) + }) + _, err = file.WriteString(dedent.Dedent(content)) require.NoError(err) @@ -249,7 +254,6 @@ func TestListSupported(t *testing.T) { t.Parallel() f := tempYAML(t, tc.yml) - defer os.Remove(f.Name()) supported, err := setup.ListSupported(&f) cstest.RequireErrorContains(t, err, tc.expectedErr) @@ -375,7 +379,6 @@ func TestDetectSimpleRule(t *testing.T) { - false ugly: `) - defer os.Remove(f.Name()) detected, err := setup.Detect(&f, setup.DetectOptions{}) require.NoError(err) @@ -421,7 +424,6 @@ detect: for _, tc := range tests { t.Run(tc.name, func(t *testing.T) { f := tempYAML(t, tc.config) - defer os.Remove(f.Name()) detected, err := setup.Detect(&f, setup.DetectOptions{}) cstest.RequireErrorContains(t, err, tc.expectedErr) @@ -514,7 +516,6 @@ detect: for _, tc := range tests { t.Run(tc.name, func(t *testing.T) { f := tempYAML(t, tc.config) - defer os.Remove(f.Name()) detected, err := setup.Detect(&f, setup.DetectOptions{}) cstest.RequireErrorContains(t, err, tc.expectedErr) @@ -542,7 +543,6 @@ func TestDetectForcedUnit(t *testing.T) { journalctl_filter: - _SYSTEMD_UNIT=crowdsec-setup-forced.service `) - defer os.Remove(f.Name()) detected, err := setup.Detect(&f, setup.DetectOptions{ForcedUnits: []string{"crowdsec-setup-forced.service"}}) require.NoError(err) @@ -580,7 +580,6 @@ func TestDetectForcedProcess(t *testing.T) { when: - ProcessRunning("foobar") `) - defer os.Remove(f.Name()) detected, err := setup.Detect(&f, setup.DetectOptions{ForcedProcesses: []string{"foobar"}}) require.NoError(err) @@ -610,7 +609,6 @@ func TestDetectSkipService(t *testing.T) { when: - ProcessRunning("foobar") `) - defer os.Remove(f.Name()) detected, err := setup.Detect(&f, setup.DetectOptions{ForcedProcesses: []string{"foobar"}, SkipServices: []string{"wizard"}}) require.NoError(err) @@ -825,7 +823,6 @@ func TestDetectForcedOS(t *testing.T) { for _, tc := range tests { t.Run(tc.name, func(t *testing.T) { f := tempYAML(t, tc.config) - defer os.Remove(f.Name()) detected, err := setup.Detect(&f, setup.DetectOptions{ForcedOS: tc.forced}) cstest.RequireErrorContains(t, err, tc.expectedErr) @@ -1009,7 +1006,6 @@ func TestDetectDatasourceValidation(t *testing.T) { for _, tc := range tests { t.Run(tc.name, func(t *testing.T) { f := tempYAML(t, tc.config) - defer os.Remove(f.Name()) detected, err := setup.Detect(&f, setup.DetectOptions{}) cstest.RequireErrorContains(t, err, tc.expectedErr) require.Equal(tc.expected, detected) From b582730d061c10fbafc055f36998b3b5b6d1e1ae Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Thu, 16 Jan 2025 14:02:39 +0100 Subject: [PATCH 392/581] remove commands "cscli config backup/restore" (#3158) * remove "cscli config backup/restore" * remove backup/restore calls from deb,rpm pre/postinst and wizard * remove from prerm too --- cmd/crowdsec-cli/config_backup.go | 224 +---------------------- cmd/crowdsec-cli/config_restore.go | 275 +---------------------------- cmd/crowdsec-cli/copyfile.go | 82 --------- debian/postinst | 16 -- debian/preinst | 35 ---- debian/prerm | 3 +- rpm/SPECS/crowdsec.spec | 24 +-- test/bats/01_cscli.bats | 36 +--- test/bats/02_nolapi.bats | 12 -- test/bats/03_noagent.bats | 12 -- test/bats/04_nocapi.bats | 11 -- wizard.sh | 66 ++----- 12 files changed, 31 insertions(+), 765 deletions(-) delete mode 100644 cmd/crowdsec-cli/copyfile.go diff --git a/cmd/crowdsec-cli/config_backup.go b/cmd/crowdsec-cli/config_backup.go index faac786ebdc..0a58a8c1ab3 100644 --- a/cmd/crowdsec-cli/config_backup.go +++ b/cmd/crowdsec-cli/config_backup.go @@ -1,234 +1,18 @@ package main import ( - "encoding/json" - "errors" "fmt" - "os" - "path/filepath" - log "github.com/sirupsen/logrus" "github.com/spf13/cobra" - - "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/require" - "github.com/crowdsecurity/crowdsec/pkg/cwhub" ) -func (cli *cliConfig) backupHub(dirPath string) error { - hub, err := require.Hub(cli.cfg(), nil) - if err != nil { - return err - } - - for _, itemType := range cwhub.ItemTypes { - clog := log.WithField("type", itemType) - - itemMap := hub.GetItemMap(itemType) - if itemMap == nil { - clog.Infof("No %s to backup.", itemType) - continue - } - - itemDirectory := fmt.Sprintf("%s/%s/", dirPath, itemType) - if err = os.MkdirAll(itemDirectory, os.ModePerm); err != nil { - return fmt.Errorf("error while creating %s: %w", itemDirectory, err) - } - - upstreamParsers := []string{} - - for k, v := range itemMap { - clog = clog.WithField("file", v.Name) - if !v.State.Installed { // only backup installed ones - clog.Debugf("[%s]: not installed", k) - continue - } - - // for the local/tainted ones, we back up the full file - if v.State.Tainted || v.State.IsLocal() || !v.State.UpToDate { - // we need to backup stages for parsers - if itemType == cwhub.PARSERS || itemType == cwhub.POSTOVERFLOWS { - fstagedir := fmt.Sprintf("%s%s", itemDirectory, v.Stage) - if err = os.MkdirAll(fstagedir, os.ModePerm); err != nil { - return fmt.Errorf("error while creating stage dir %s: %w", fstagedir, err) - } - } - - clog.Debugf("[%s]: backing up file (tainted:%t local:%t up-to-date:%t)", k, v.State.Tainted, v.State.IsLocal(), v.State.UpToDate) - - tfile := fmt.Sprintf("%s%s/%s", itemDirectory, v.Stage, v.FileName) - if err = CopyFile(v.State.LocalPath, tfile); err != nil { - return fmt.Errorf("failed copy %s %s to %s: %w", itemType, v.State.LocalPath, tfile, err) - } - - clog.Infof("local/tainted saved %s to %s", v.State.LocalPath, tfile) - - continue - } - - clog.Debugf("[%s]: from hub, just backup name (up-to-date:%t)", k, v.State.UpToDate) - clog.Infof("saving, version:%s, up-to-date:%t", v.Version, v.State.UpToDate) - upstreamParsers = append(upstreamParsers, v.Name) - } - // write the upstream items - upstreamParsersFname := fmt.Sprintf("%s/upstream-%s.json", itemDirectory, itemType) - - upstreamParsersContent, err := json.MarshalIndent(upstreamParsers, "", " ") - if err != nil { - return fmt.Errorf("failed to serialize upstream parsers: %w", err) - } - - err = os.WriteFile(upstreamParsersFname, upstreamParsersContent, 0o644) - if err != nil { - return fmt.Errorf("unable to write to %s %s: %w", itemType, upstreamParsersFname, err) - } - - clog.Infof("Wrote %d entries for %s to %s", len(upstreamParsers), itemType, upstreamParsersFname) - } - - return nil -} - -/* - Backup crowdsec configurations to directory : - -- Main config (config.yaml) -- Profiles config (profiles.yaml) -- Simulation config (simulation.yaml) -- Backup of API credentials (local API and online API) -- List of scenarios, parsers, postoverflows and collections that are up-to-date -- Tainted/local/out-of-date scenarios, parsers, postoverflows and collections -- Acquisition files (acquis.yaml, acquis.d/*.yaml) -*/ -func (cli *cliConfig) backup(dirPath string) error { - var err error - - cfg := cli.cfg() - - if dirPath == "" { - return errors.New("directory path can't be empty") - } - - log.Infof("Starting configuration backup") - - /*if parent directory doesn't exist, bail out. create final dir with Mkdir*/ - parentDir := filepath.Dir(dirPath) - if _, err = os.Stat(parentDir); err != nil { - return fmt.Errorf("while checking parent directory %s existence: %w", parentDir, err) - } - - if err = os.Mkdir(dirPath, 0o700); err != nil { - return fmt.Errorf("while creating %s: %w", dirPath, err) - } - - if cfg.ConfigPaths.SimulationFilePath != "" { - backupSimulation := filepath.Join(dirPath, "simulation.yaml") - if err = CopyFile(cfg.ConfigPaths.SimulationFilePath, backupSimulation); err != nil { - return fmt.Errorf("failed copy %s to %s: %w", cfg.ConfigPaths.SimulationFilePath, backupSimulation, err) - } - - log.Infof("Saved simulation to %s", backupSimulation) - } - - /* - - backup AcquisitionFilePath - - backup the other files of acquisition directory - */ - if cfg.Crowdsec != nil && cfg.Crowdsec.AcquisitionFilePath != "" { - backupAcquisition := filepath.Join(dirPath, "acquis.yaml") - if err = CopyFile(cfg.Crowdsec.AcquisitionFilePath, backupAcquisition); err != nil { - return fmt.Errorf("failed copy %s to %s: %w", cfg.Crowdsec.AcquisitionFilePath, backupAcquisition, err) - } - } - - acquisBackupDir := filepath.Join(dirPath, "acquis") - if err = os.Mkdir(acquisBackupDir, 0o700); err != nil { - return fmt.Errorf("error while creating %s: %w", acquisBackupDir, err) - } - - if cfg.Crowdsec != nil && len(cfg.Crowdsec.AcquisitionFiles) > 0 { - for _, acquisFile := range cfg.Crowdsec.AcquisitionFiles { - /*if it was the default one, it was already backup'ed*/ - if cfg.Crowdsec.AcquisitionFilePath == acquisFile { - continue - } - - targetFname, err := filepath.Abs(filepath.Join(acquisBackupDir, filepath.Base(acquisFile))) - if err != nil { - return fmt.Errorf("while saving %s to %s: %w", acquisFile, acquisBackupDir, err) - } - - if err = CopyFile(acquisFile, targetFname); err != nil { - return fmt.Errorf("failed copy %s to %s: %w", acquisFile, targetFname, err) - } - - log.Infof("Saved acquis %s to %s", acquisFile, targetFname) - } - } - - if ConfigFilePath != "" { - backupMain := fmt.Sprintf("%s/config.yaml", dirPath) - if err = CopyFile(ConfigFilePath, backupMain); err != nil { - return fmt.Errorf("failed copy %s to %s: %w", ConfigFilePath, backupMain, err) - } - - log.Infof("Saved default yaml to %s", backupMain) - } - - if cfg.API != nil && cfg.API.Server != nil && cfg.API.Server.OnlineClient != nil && cfg.API.Server.OnlineClient.CredentialsFilePath != "" { - backupCAPICreds := fmt.Sprintf("%s/online_api_credentials.yaml", dirPath) - if err = CopyFile(cfg.API.Server.OnlineClient.CredentialsFilePath, backupCAPICreds); err != nil { - return fmt.Errorf("failed copy %s to %s: %w", cfg.API.Server.OnlineClient.CredentialsFilePath, backupCAPICreds, err) - } - - log.Infof("Saved online API credentials to %s", backupCAPICreds) - } - - if cfg.API != nil && cfg.API.Client != nil && cfg.API.Client.CredentialsFilePath != "" { - backupLAPICreds := fmt.Sprintf("%s/local_api_credentials.yaml", dirPath) - if err = CopyFile(cfg.API.Client.CredentialsFilePath, backupLAPICreds); err != nil { - return fmt.Errorf("failed copy %s to %s: %w", cfg.API.Client.CredentialsFilePath, backupLAPICreds, err) - } - - log.Infof("Saved local API credentials to %s", backupLAPICreds) - } - - if cfg.API != nil && cfg.API.Server != nil && cfg.API.Server.ProfilesPath != "" { - backupProfiles := fmt.Sprintf("%s/profiles.yaml", dirPath) - if err = CopyFile(cfg.API.Server.ProfilesPath, backupProfiles); err != nil { - return fmt.Errorf("failed copy %s to %s: %w", cfg.API.Server.ProfilesPath, backupProfiles, err) - } - - log.Infof("Saved profiles to %s", backupProfiles) - } - - if err = cli.backupHub(dirPath); err != nil { - return fmt.Errorf("failed to backup hub config: %w", err) - } - - return nil -} - func (cli *cliConfig) newBackupCmd() *cobra.Command { cmd := &cobra.Command{ - Use: `backup "directory"`, - Short: "Backup current config", - Long: `Backup the current crowdsec configuration including : - -- Main config (config.yaml) -- Simulation config (simulation.yaml) -- Profiles config (profiles.yaml) -- List of scenarios, parsers, postoverflows and collections that are up-to-date -- Tainted/local/out-of-date scenarios, parsers, postoverflows and collections -- Backup of API credentials (local API and online API)`, - Example: `cscli config backup ./my-backup`, - Args: cobra.ExactArgs(1), + Use: "backup", DisableAutoGenTag: true, - RunE: func(_ *cobra.Command, args []string) error { - if err := cli.backup(args[0]); err != nil { - return fmt.Errorf("failed to backup config: %w", err) - } - - return nil + RunE: func(_ *cobra.Command, _ []string) error { + configDir := cli.cfg().ConfigPaths.ConfigDir + return fmt.Errorf("'cscli config backup' has been removed, you can manually backup/restore %s instead", configDir) }, } diff --git a/cmd/crowdsec-cli/config_restore.go b/cmd/crowdsec-cli/config_restore.go index b5fbf36b2b4..75373475ed9 100644 --- a/cmd/crowdsec-cli/config_restore.go +++ b/cmd/crowdsec-cli/config_restore.go @@ -1,285 +1,18 @@ package main import ( - "context" - "encoding/json" "fmt" - "os" - "path/filepath" - log "github.com/sirupsen/logrus" "github.com/spf13/cobra" - - "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/require" - "github.com/crowdsecurity/crowdsec/pkg/cwhub" - "github.com/crowdsecurity/crowdsec/pkg/hubops" ) -func (cli *cliConfig) restoreHub(ctx context.Context, dirPath string) error { - cfg := cli.cfg() - - hub, err := require.Hub(cfg, nil) - if err != nil { - return err - } - - contentProvider := require.HubDownloader(ctx, cfg) - - for _, itype := range cwhub.ItemTypes { - itemDirectory := fmt.Sprintf("%s/%s/", dirPath, itype) - if _, err = os.Stat(itemDirectory); err != nil { - log.Infof("no %s in backup", itype) - continue - } - /*restore the upstream items*/ - upstreamListFN := fmt.Sprintf("%s/upstream-%s.json", itemDirectory, itype) - - file, err := os.ReadFile(upstreamListFN) - if err != nil { - return fmt.Errorf("error while opening %s: %w", upstreamListFN, err) - } - - var upstreamList []string - - err = json.Unmarshal(file, &upstreamList) - if err != nil { - return fmt.Errorf("error parsing %s: %w", upstreamListFN, err) - } - - for _, toinstall := range upstreamList { - item := hub.GetItem(itype, toinstall) - if item == nil { - log.Errorf("Item %s/%s not found in hub", itype, toinstall) - continue - } - - plan := hubops.NewActionPlan(hub) - - if err = plan.AddCommand(hubops.NewDownloadCommand(item, contentProvider, false)); err != nil { - return err - } - - if err = plan.AddCommand(hubops.NewEnableCommand(item, false)); err != nil { - return err - } - - if err = plan.Execute(ctx, true, false, false); err != nil { - log.Errorf("Error while installing %s : %s", toinstall, err) - } - } - - /*restore the local and tainted items*/ - files, err := os.ReadDir(itemDirectory) - if err != nil { - return fmt.Errorf("failed enumerating files of %s: %w", itemDirectory, err) - } - - for _, file := range files { - // this was the upstream data - if file.Name() == fmt.Sprintf("upstream-%s.json", itype) { - continue - } - - if itype == cwhub.PARSERS || itype == cwhub.POSTOVERFLOWS { - // we expect a stage here - if !file.IsDir() { - continue - } - - stage := file.Name() - stagedir := fmt.Sprintf("%s/%s/%s/", cfg.ConfigPaths.ConfigDir, itype, stage) - log.Debugf("Found stage %s in %s, target directory : %s", stage, itype, stagedir) - - if err = os.MkdirAll(stagedir, os.ModePerm); err != nil { - return fmt.Errorf("error while creating stage directory %s: %w", stagedir, err) - } - - // find items - ifiles, err := os.ReadDir(itemDirectory + "/" + stage + "/") - if err != nil { - return fmt.Errorf("failed enumerating files of %s: %w", itemDirectory+"/"+stage, err) - } - - // finally copy item - for _, tfile := range ifiles { - log.Infof("Going to restore local/tainted [%s]", tfile.Name()) - sourceFile := fmt.Sprintf("%s/%s/%s", itemDirectory, stage, tfile.Name()) - - destinationFile := fmt.Sprintf("%s%s", stagedir, tfile.Name()) - if err = CopyFile(sourceFile, destinationFile); err != nil { - return fmt.Errorf("failed copy %s %s to %s: %w", itype, sourceFile, destinationFile, err) - } - - log.Infof("restored %s to %s", sourceFile, destinationFile) - } - } else { - log.Infof("Going to restore local/tainted [%s]", file.Name()) - sourceFile := fmt.Sprintf("%s/%s", itemDirectory, file.Name()) - destinationFile := fmt.Sprintf("%s/%s/%s", cfg.ConfigPaths.ConfigDir, itype, file.Name()) - - if err = CopyFile(sourceFile, destinationFile); err != nil { - return fmt.Errorf("failed copy %s %s to %s: %w", itype, sourceFile, destinationFile, err) - } - - log.Infof("restored %s to %s", sourceFile, destinationFile) - } - } - } - - return nil -} - -/* - Restore crowdsec configurations to directory : - -- Main config (config.yaml) -- Profiles config (profiles.yaml) -- Simulation config (simulation.yaml) -- Backup of API credentials (local API and online API) -- List of scenarios, parsers, postoverflows and collections that are up-to-date -- Tainted/local/out-of-date scenarios, parsers, postoverflows and collections -- Acquisition files (acquis.yaml, acquis.d/*.yaml) -*/ -func (cli *cliConfig) restore(ctx context.Context, dirPath string) error { - var err error - - cfg := cli.cfg() - - backupMain := fmt.Sprintf("%s/config.yaml", dirPath) - if _, err = os.Stat(backupMain); err == nil { - if cfg.ConfigPaths != nil && cfg.ConfigPaths.ConfigDir != "" { - if err = CopyFile(backupMain, fmt.Sprintf("%s/config.yaml", cfg.ConfigPaths.ConfigDir)); err != nil { - return fmt.Errorf("failed copy %s to %s: %w", backupMain, cfg.ConfigPaths.ConfigDir, err) - } - } - } - - // Now we have config.yaml, we should regenerate config struct to have rights paths etc - ConfigFilePath = fmt.Sprintf("%s/config.yaml", cfg.ConfigPaths.ConfigDir) - - log.Debug("Reloading configuration") - - csConfig, _, err = loadConfigFor("config") - if err != nil { - return fmt.Errorf("failed to reload configuration: %w", err) - } - - cfg = cli.cfg() - - backupCAPICreds := fmt.Sprintf("%s/online_api_credentials.yaml", dirPath) - if _, err = os.Stat(backupCAPICreds); err == nil { - if err = CopyFile(backupCAPICreds, cfg.API.Server.OnlineClient.CredentialsFilePath); err != nil { - return fmt.Errorf("failed copy %s to %s: %w", backupCAPICreds, cfg.API.Server.OnlineClient.CredentialsFilePath, err) - } - } - - backupLAPICreds := fmt.Sprintf("%s/local_api_credentials.yaml", dirPath) - if _, err = os.Stat(backupLAPICreds); err == nil { - if err = CopyFile(backupLAPICreds, cfg.API.Client.CredentialsFilePath); err != nil { - return fmt.Errorf("failed copy %s to %s: %w", backupLAPICreds, cfg.API.Client.CredentialsFilePath, err) - } - } - - backupProfiles := fmt.Sprintf("%s/profiles.yaml", dirPath) - if _, err = os.Stat(backupProfiles); err == nil { - if err = CopyFile(backupProfiles, cfg.API.Server.ProfilesPath); err != nil { - return fmt.Errorf("failed copy %s to %s: %w", backupProfiles, cfg.API.Server.ProfilesPath, err) - } - } - - backupSimulation := fmt.Sprintf("%s/simulation.yaml", dirPath) - if _, err = os.Stat(backupSimulation); err == nil { - if err = CopyFile(backupSimulation, cfg.ConfigPaths.SimulationFilePath); err != nil { - return fmt.Errorf("failed copy %s to %s: %w", backupSimulation, cfg.ConfigPaths.SimulationFilePath, err) - } - } - - /*if there is a acquisition dir, restore its content*/ - if cfg.Crowdsec.AcquisitionDirPath != "" { - if err = os.MkdirAll(cfg.Crowdsec.AcquisitionDirPath, 0o700); err != nil { - return fmt.Errorf("error while creating %s: %w", cfg.Crowdsec.AcquisitionDirPath, err) - } - } - - // if there was a single one - backupAcquisition := fmt.Sprintf("%s/acquis.yaml", dirPath) - if _, err = os.Stat(backupAcquisition); err == nil { - log.Debugf("restoring backup'ed %s", backupAcquisition) - - if err = CopyFile(backupAcquisition, cfg.Crowdsec.AcquisitionFilePath); err != nil { - return fmt.Errorf("failed copy %s to %s: %w", backupAcquisition, cfg.Crowdsec.AcquisitionFilePath, err) - } - } - - // if there are files in the acquis backup dir, restore them - acquisBackupDir := filepath.Join(dirPath, "acquis", "*.yaml") - if acquisFiles, err := filepath.Glob(acquisBackupDir); err == nil { - for _, acquisFile := range acquisFiles { - targetFname, err := filepath.Abs(cfg.Crowdsec.AcquisitionDirPath + "/" + filepath.Base(acquisFile)) - if err != nil { - return fmt.Errorf("while saving %s to %s: %w", acquisFile, targetFname, err) - } - - log.Debugf("restoring %s to %s", acquisFile, targetFname) - - if err = CopyFile(acquisFile, targetFname); err != nil { - return fmt.Errorf("failed copy %s to %s: %w", acquisFile, targetFname, err) - } - } - } - - if cfg.Crowdsec != nil && len(cfg.Crowdsec.AcquisitionFiles) > 0 { - for _, acquisFile := range cfg.Crowdsec.AcquisitionFiles { - log.Infof("backup filepath from dir -> %s", acquisFile) - - // if it was the default one, it has already been backed up - if cfg.Crowdsec.AcquisitionFilePath == acquisFile { - log.Infof("skip this one") - continue - } - - targetFname, err := filepath.Abs(filepath.Join(acquisBackupDir, filepath.Base(acquisFile))) - if err != nil { - return fmt.Errorf("while saving %s to %s: %w", acquisFile, acquisBackupDir, err) - } - - if err = CopyFile(acquisFile, targetFname); err != nil { - return fmt.Errorf("failed copy %s to %s: %w", acquisFile, targetFname, err) - } - - log.Infof("Saved acquis %s to %s", acquisFile, targetFname) - } - } - - if err = cli.restoreHub(ctx, dirPath); err != nil { - return fmt.Errorf("failed to restore hub config: %w", err) - } - - return nil -} - func (cli *cliConfig) newRestoreCmd() *cobra.Command { cmd := &cobra.Command{ - Use: `restore "directory"`, - Short: `Restore config in backup "directory"`, - Long: `Restore the crowdsec configuration from specified backup "directory" including: - -- Main config (config.yaml) -- Simulation config (simulation.yaml) -- Profiles config (profiles.yaml) -- List of scenarios, parsers, postoverflows and collections that are up-to-date -- Tainted/local/out-of-date scenarios, parsers, postoverflows and collections -- Backup of API credentials (local API and online API)`, - Args: cobra.ExactArgs(1), + Use: "restore", DisableAutoGenTag: true, - RunE: func(cmd *cobra.Command, args []string) error { - dirPath := args[0] - - if err := cli.restore(cmd.Context(), dirPath); err != nil { - return fmt.Errorf("failed to restore config from %s: %w", dirPath, err) - } - - return nil + RunE: func(cmd *cobra.Command, _ []string) error { + configDir := cli.cfg().ConfigPaths.ConfigDir + return fmt.Errorf("'cscli config restore' has been removed, you can manually backup/restore %s instead", configDir) }, } diff --git a/cmd/crowdsec-cli/copyfile.go b/cmd/crowdsec-cli/copyfile.go deleted file mode 100644 index 272fb3f7851..00000000000 --- a/cmd/crowdsec-cli/copyfile.go +++ /dev/null @@ -1,82 +0,0 @@ -package main - -import ( - "fmt" - "io" - "os" - "path/filepath" - - log "github.com/sirupsen/logrus" -) - -/*help to copy the file, ioutil doesn't offer the feature*/ - -func copyFileContents(src, dst string) (err error) { - in, err := os.Open(src) - if err != nil { - return - } - defer in.Close() - - out, err := os.Create(dst) - if err != nil { - return - } - - defer func() { - cerr := out.Close() - if err == nil { - err = cerr - } - }() - - if _, err = io.Copy(out, in); err != nil { - return - } - - err = out.Sync() - - return -} - -/*copy the file, ioutile doesn't offer the feature*/ -func CopyFile(sourceSymLink, destinationFile string) error { - sourceFile, err := filepath.EvalSymlinks(sourceSymLink) - if err != nil { - log.Infof("Not a symlink : %s", err) - - sourceFile = sourceSymLink - } - - sourceFileStat, err := os.Stat(sourceFile) - if err != nil { - return err - } - - if !sourceFileStat.Mode().IsRegular() { - // cannot copy non-regular files (e.g., directories, - // symlinks, devices, etc.) - return fmt.Errorf("copyFile: non-regular source file %s (%q)", sourceFileStat.Name(), sourceFileStat.Mode().String()) - } - - destinationFileStat, err := os.Stat(destinationFile) - if err != nil { - if !os.IsNotExist(err) { - return err - } - } else { - if !(destinationFileStat.Mode().IsRegular()) { - return fmt.Errorf("copyFile: non-regular destination file %s (%q)", destinationFileStat.Name(), destinationFileStat.Mode().String()) - } - - if os.SameFile(sourceFileStat, destinationFileStat) { - return err - } - } - - if err = os.Link(sourceFile, destinationFile); err != nil { - err = copyFileContents(sourceFile, destinationFile) - } - - return err -} diff --git a/debian/postinst b/debian/postinst index 77f2511f556..d50a7c0bfe2 100644 --- a/debian/postinst +++ b/debian/postinst @@ -11,14 +11,6 @@ if [ "$1" = configure ]; then mkdir -p /var/lib/crowdsec/data fi - if [[ -d /var/lib/crowdsec/backup ]]; then - cscli config restore /var/lib/crowdsec/backup/backup.config - rm -rf /var/lib/crowdsec/backup - /usr/bin/cscli hub update - /usr/bin/cscli hub upgrade - systemctl start crowdsec - fi - . /usr/share/crowdsec/wizard.sh -n if ! [[ -f /etc/crowdsec/acquis.yaml ]]; then echo Creating /etc/crowdsec/acquis.yaml @@ -82,12 +74,6 @@ if [ "$1" = configure ]; then set -e fi - - if [[ -f /var/lib/crowdsec/data/crowdsec.db.backup ]]; then - cp /var/lib/crowdsec/data/crowdsec.db.backup /var/lib/crowdsec/data/crowdsec.db - rm -f /var/lib/crowdsec/data/crowdsec.db.backup - fi - systemctl --quiet is-enabled crowdsec || systemctl unmask crowdsec && systemctl enable crowdsec API=$(cscli config show --key "Config.API.Server") @@ -107,8 +93,6 @@ if [ "$1" = configure ]; then echo " * Detailed guides are available in our documentation: https://docs.crowdsec.net" echo " * Configuration items created by the community can be found at the Hub: https://hub.crowdsec.net" echo " * Gain insights into your use of CrowdSec with the help of the console https://app.crowdsec.net" - - fi echo "You can always run the configuration again interactively by using '/usr/share/crowdsec/wizard.sh -c'" diff --git a/debian/preinst b/debian/preinst index 217b836caa6..df5b56cef3f 100644 --- a/debian/preinst +++ b/debian/preinst @@ -5,39 +5,4 @@ set -e # Source debconf library. . /usr/share/debconf/confmodule - -OLD_MAJOR_VERSION=$(echo $2 | cut -d'.' -f1) -OLD_MINOR_VERSION=$(echo $2 | cut -d'.' -f2) -OLD_PATCH_VERSION=$(echo $2 | cut -d'.' -f3|cut -d'-' -f1) - -NEW_MAJOR_VERSION=$(echo $3 | cut -d'.' -f1) -NEW_MINOR_VERSION=$(echo $3 | cut -d'.' -f2) -NEW_PATCH_VERSION=$(echo $3 | cut -d'.' -f3|cut -d'-' -f1) - - - -if [ "$1" = upgrade ]; then - - OLD_MAJOR_VERSION=$(echo $2 | cut -d'.' -f1) - OLD_MINOR_VERSION=$(echo $2 | cut -d'.' -f2) - OLD_PATCH_VERSION=$(echo $2 | cut -d'.' -f3|cut -d'-' -f1) - - NEW_MAJOR_VERSION=$(echo $3 | cut -d'.' -f1) - NEW_MINOR_VERSION=$(echo $3 | cut -d'.' -f2) - NEW_PATCH_VERSION=$(echo $3 | cut -d'.' -f3|cut -d'-' -f1) - - - if [[ $OLD_MAJOR_VERSION -eq "1" ]] && [[ $OLD_MINOR_VERSION -eq "0" ]] && [[ $OLD_PATCH_VERSION -lt "9" ]]; then - if [[ -f /var/lib/crowdsec/data/crowdsec.db ]]; then - cp /var/lib/crowdsec/data/crowdsec.db /var/lib/crowdsec/data/crowdsec.db.backup - fi - fi - - if [[ $NEW_MAJOR_VERSION -gt $OLD_MAJOR_VERSION ]]; then - echo "Stopping crowdsec" - systemctl stop crowdsec || true - cscli config backup /var/lib/crowdsec/backup - fi -fi - echo "You can always run the configuration again interactively by using '/usr/share/crowdsec/wizard.sh -c'" diff --git a/debian/prerm b/debian/prerm index a463a6a1c80..10afcf1906d 100644 --- a/debian/prerm +++ b/debian/prerm @@ -1,9 +1,8 @@ if [ "$1" = "remove" ]; then - cscli dashboard remove -f -y --error || echo "Ignore the above error if you never installed the local dashboard." systemctl stop crowdsec systemctl disable crowdsec fi if [ "$1" = "upgrade" ]; then systemctl stop crowdsec -fi \ No newline at end of file +fi diff --git a/rpm/SPECS/crowdsec.spec b/rpm/SPECS/crowdsec.spec index ac438ad0c14..c24b3f2ac0d 100644 --- a/rpm/SPECS/crowdsec.spec +++ b/rpm/SPECS/crowdsec.spec @@ -143,18 +143,15 @@ rm -rf %{buildroot} #systemctl stop crowdsec || true -if [ $1 == 2 ];then - if [[ ! -d /var/lib/crowdsec/backup ]]; then - cscli config backup /var/lib/crowdsec/backup - fi -fi +#if [ $1 == 2 ]; then +# upgrade pre-install here +#fi %post -p /bin/bash #install if [ $1 == 1 ]; then - if [ ! -f "/var/lib/crowdsec/data/crowdsec.db" ] ; then touch /var/lib/crowdsec/data/crowdsec.db fi @@ -185,21 +182,6 @@ if [ $1 == 1 ]; then echo " * Detailed guides are available in our documentation: https://docs.crowdsec.net" echo " * Configuration items created by the community can be found at the Hub: https://hub.crowdsec.net" echo " * Gain insights into your use of CrowdSec with the help of the console https://app.crowdsec.net" - -#upgrade -elif [ $1 == 2 ] && [ -d /var/lib/crowdsec/backup ]; then - cscli config restore /var/lib/crowdsec/backup - if [ $? == 0 ]; then - rm -rf /var/lib/crowdsec/backup - fi - - if [[ -f %{_sysconfdir}/crowdsec/online_api_credentials.yaml ]] ; then - chmod 600 %{_sysconfdir}/crowdsec/online_api_credentials.yaml - fi - - if [[ -f %{_sysconfdir}/crowdsec/local_api_credentials.yaml ]] ; then - chmod 600 %{_sysconfdir}/crowdsec/local_api_credentials.yaml - fi fi %systemd_post %{name}.service diff --git a/test/bats/01_cscli.bats b/test/bats/01_cscli.bats index 63c204a9e86..9af3c841759 100644 --- a/test/bats/01_cscli.bats +++ b/test/bats/01_cscli.bats @@ -172,41 +172,13 @@ teardown() { } @test "cscli config backup / restore" { - # test that we need a valid path - # disabled because in CI, the empty string is not passed as a parameter - #rune -1 cscli config backup "" - #assert_stderr --partial "failed to backup config: directory path can't be empty" + CONFIG_DIR=$(config_get '.config_paths.config_dir') rune -1 cscli config backup "/dev/null/blah" - assert_stderr --partial "failed to backup config: while creating /dev/null/blah: mkdir /dev/null/blah: not a directory" + assert_stderr --partial "'cscli config backup' has been removed, you can manually backup/restore $CONFIG_DIR instead" - # pick a dirpath - backupdir=$(TMPDIR="$BATS_TEST_TMPDIR" mktemp -u) - - # succeed the first time - rune -0 cscli config backup "$backupdir" - assert_stderr --partial "Starting configuration backup" - - # don't overwrite an existing backup - rune -1 cscli config backup "$backupdir" - assert_stderr --partial "failed to backup config" - assert_stderr --partial "file exists" - - SIMULATION_YAML="$(config_get '.config_paths.simulation_path')" - - # restore - rm "$SIMULATION_YAML" - rune -0 cscli config restore "$backupdir" - assert_file_exists "$SIMULATION_YAML" - - # cleanup - rm -rf -- "${backupdir:?}" - - # backup: detect missing files - rm "$SIMULATION_YAML" - rune -1 cscli config backup "$backupdir" - assert_stderr --regexp "failed to backup config: failed copy .* to .*: stat .*: no such file or directory" - rm -rf -- "${backupdir:?}" + rune -1 cscli config restore "/dev/null/blah" + assert_stderr --partial "'cscli config restore' has been removed, you can manually backup/restore $CONFIG_DIR instead" } @test "'cscli completion' with or without configuration file" { diff --git a/test/bats/02_nolapi.bats b/test/bats/02_nolapi.bats index cefa6d798b4..70495a0ed91 100644 --- a/test/bats/02_nolapi.bats +++ b/test/bats/02_nolapi.bats @@ -66,18 +66,6 @@ teardown() { refute_output --partial "Local API Server" } -@test "cscli config backup" { - config_disable_lapi - backupdir=$(TMPDIR="$BATS_TEST_TMPDIR" mktemp -u) - rune -0 cscli config backup "$backupdir" - assert_stderr --partial "Starting configuration backup" - rune -1 cscli config backup "$backupdir" - rm -rf -- "${backupdir:?}" - - assert_stderr --partial "failed to backup config" - assert_stderr --partial "file exists" -} - @test "lapi status shouldn't be ok without api.server" { config_disable_lapi rune -1 cscli machines list diff --git a/test/bats/03_noagent.bats b/test/bats/03_noagent.bats index 6be5101cee2..972b84977ad 100644 --- a/test/bats/03_noagent.bats +++ b/test/bats/03_noagent.bats @@ -60,18 +60,6 @@ teardown() { refute_output --partial "Crowdsec" } -@test "no agent: cscli config backup" { - config_disable_agent - backupdir=$(TMPDIR="$BATS_TEST_TMPDIR" mktemp -u) - rune -0 cscli config backup "$backupdir" - assert_stderr --partial "Starting configuration backup" - rune -1 cscli config backup "$backupdir" - - assert_stderr --partial "failed to backup config" - assert_stderr --partial "file exists" - rm -rf -- "${backupdir:?}" -} - @test "no agent: lapi status should be ok" { config_disable_agent ./instance-crowdsec start diff --git a/test/bats/04_nocapi.bats b/test/bats/04_nocapi.bats index 8d0018a9a4a..eaeb0939112 100644 --- a/test/bats/04_nocapi.bats +++ b/test/bats/04_nocapi.bats @@ -51,17 +51,6 @@ teardown() { assert_output --regexp "Global:.*Crowdsec.*cscli:.*Local API Server:" } -@test "no agent: cscli config backup" { - config_disable_capi - backupdir=$(TMPDIR="$BATS_TEST_TMPDIR" mktemp -u) - rune -0 cscli config backup "$backupdir" - assert_stderr --partial "Starting configuration backup" - rune -1 cscli config backup "$backupdir" - assert_stderr --partial "failed to backup config" - assert_stderr --partial "file exists" - rm -rf -- "${backupdir:?}" -} - @test "without capi: cscli lapi status -> success" { config_disable_capi ./instance-crowdsec start diff --git a/wizard.sh b/wizard.sh index 57311d40cdb..4da970cd695 100755 --- a/wizard.sh +++ b/wizard.sh @@ -21,11 +21,8 @@ DOCKER_MODE="false" CROWDSEC_LIB_DIR="/var/lib/crowdsec" CROWDSEC_USR_DIR="/usr/local/lib/crowdsec" CROWDSEC_DATA_DIR="${CROWDSEC_LIB_DIR}/data" -CROWDSEC_DB_PATH="${CROWDSEC_DATA_DIR}/crowdsec.db" CROWDSEC_PATH="/etc/crowdsec" CROWDSEC_CONFIG_PATH="${CROWDSEC_PATH}" -CROWDSEC_LOG_FILE="/var/log/crowdsec.log" -LAPI_LOG_FILE="/var/log/crowdsec_api.log" CROWDSEC_PLUGIN_DIR="${CROWDSEC_USR_DIR}/plugins" CROWDSEC_CONSOLE_DIR="${CROWDSEC_PATH}/console" @@ -35,8 +32,6 @@ CSCLI_BIN="./cmd/crowdsec-cli/cscli" CLIENT_SECRETS="local_api_credentials.yaml" LAPI_SECRETS="online_api_credentials.yaml" -CONSOLE_FILE="console.yaml" - BIN_INSTALL_PATH="/usr/local/bin" CROWDSEC_BIN_INSTALLED="${BIN_INSTALL_PATH}/crowdsec" @@ -91,9 +86,6 @@ SENTINEL_PLUGIN_CONFIG="./cmd/notification-sentinel/sentinel.yaml" FILE_PLUGIN_CONFIG="./cmd/notification-file/file.yaml" -BACKUP_DIR=$(mktemp -d) -rm -rf -- "$BACKUP_DIR" - log_info() { msg=$1 date=$(date "+%Y-%m-%d %H:%M:%S") @@ -426,27 +418,20 @@ install_crowdsec() { mkdir -p "${CROWDSEC_CONFIG_PATH}/contexts" || exit mkdir -p "${CROWDSEC_CONSOLE_DIR}" || exit - # tmp - mkdir -p /tmp/data mkdir -p /etc/crowdsec/hub/ - install -v -m 600 -D "./config/${CLIENT_SECRETS}" "${CROWDSEC_CONFIG_PATH}" 1> /dev/null || exit - install -v -m 600 -D "./config/${LAPI_SECRETS}" "${CROWDSEC_CONFIG_PATH}" 1> /dev/null || exit - - ## end tmp - install -v -m 600 -D ./config/config.yaml "${CROWDSEC_CONFIG_PATH}" 1> /dev/null || exit - install -v -m 644 -D ./config/dev.yaml "${CROWDSEC_CONFIG_PATH}" 1> /dev/null || exit - install -v -m 644 -D ./config/user.yaml "${CROWDSEC_CONFIG_PATH}" 1> /dev/null || exit - install -v -m 644 -D ./config/acquis.yaml "${CROWDSEC_CONFIG_PATH}" 1> /dev/null || exit - install -v -m 644 -D ./config/profiles.yaml "${CROWDSEC_CONFIG_PATH}" 1> /dev/null || exit - install -v -m 644 -D ./config/simulation.yaml "${CROWDSEC_CONFIG_PATH}" 1> /dev/null || exit - install -v -m 644 -D ./config/"${CONSOLE_FILE}" "${CROWDSEC_CONFIG_PATH}" 1> /dev/null || exit - install -v -m 644 -D ./config/context.yaml "${CROWDSEC_CONSOLE_DIR}" 1> /dev/null || exit + # Don't overwrite existing files + [[ ! -f "${CROWDSEC_CONFIG_PATH}/${CLIENT_SECRETS}" ]] && install -v -m 600 -D "./config/${CLIENT_SECRETS}" "${CROWDSEC_CONFIG_PATH}" >/dev/null || exit + [[ ! -f "${CROWDSEC_CONFIG_PATH}/${LAPI_SECRETS}" ]] && install -v -m 600 -D "./config/${LAPI_SECRETS}" "${CROWDSEC_CONFIG_PATH}" > /dev/null || exit + [[ ! -f "${CROWDSEC_CONFIG_PATH}/config.yaml" ]] && install -v -m 600 -D ./config/config.yaml "${CROWDSEC_CONFIG_PATH}" > /dev/null || exit + [[ ! -f "${CROWDSEC_CONFIG_PATH}/dev.yaml" ]] && install -v -m 644 -D ./config/dev.yaml "${CROWDSEC_CONFIG_PATH}" > /dev/null || exit + [[ ! -f "${CROWDSEC_CONFIG_PATH}/user.yaml" ]] && install -v -m 644 -D ./config/user.yaml "${CROWDSEC_CONFIG_PATH}" > /dev/null || exit + [[ ! -f "${CROWDSEC_CONFIG_PATH}/acquis.yaml" ]] && install -v -m 644 -D ./config/acquis.yaml "${CROWDSEC_CONFIG_PATH}" > /dev/null || exit + [[ ! -f "${CROWDSEC_CONFIG_PATH}/profiles.yaml" ]] && install -v -m 644 -D ./config/profiles.yaml "${CROWDSEC_CONFIG_PATH}" > /dev/null || exit + [[ ! -f "${CROWDSEC_CONFIG_PATH}/simulation.yaml" ]] && install -v -m 644 -D ./config/simulation.yaml "${CROWDSEC_CONFIG_PATH}" > /dev/null || exit + [[ ! -f "${CROWDSEC_CONFIG_PATH}/console.yaml" ]] && install -v -m 644 -D ./config/console.yaml "${CROWDSEC_CONFIG_PATH}" > /dev/null || exit + [[ ! -f "${CROWDSEC_CONFIG_PATH}/context.yaml" ]] && install -v -m 644 -D ./config/context.yaml "${CROWDSEC_CONSOLE_DIR}" > /dev/null || exit - DATA=${CROWDSEC_DATA_DIR} CFG=${CROWDSEC_CONFIG_PATH} envsubst '$CFG $DATA' < ./config/user.yaml > ${CROWDSEC_CONFIG_PATH}"/user.yaml" || log_fatal "unable to generate user configuration file" - if [[ ${DOCKER_MODE} == "false" ]]; then - CFG=${CROWDSEC_CONFIG_PATH} BIN=${CROWDSEC_BIN_INSTALLED} envsubst '$CFG $BIN' < ./config/crowdsec.service > "${SYSTEMD_PATH_FILE}" || log_fatal "unable to crowdsec systemd file" - fi install_bins if [[ ${DOCKER_MODE} == "false" ]]; then @@ -471,23 +456,12 @@ update_full() { log_err "Cscli binary '$CSCLI_BIN' not found. Please build it with 'make build'" && exit fi - log_info "Backing up existing configuration" - ${CSCLI_BIN_INSTALLED} config backup ${BACKUP_DIR} - log_info "Saving default database content if exist" - if [[ -f "/var/lib/crowdsec/data/crowdsec.db" ]]; then - cp /var/lib/crowdsec/data/crowdsec.db ${BACKUP_DIR}/crowdsec.db - fi - log_info "Cleanup existing crowdsec configuration" + log_info "Removing old binaries" uninstall_crowdsec log_info "Installing crowdsec" install_crowdsec - log_info "Restoring configuration" + log_info "Updating hub" ${CSCLI_BIN_INSTALLED} hub update - ${CSCLI_BIN_INSTALLED} config restore ${BACKUP_DIR} - log_info "Restoring saved database if exist" - if [[ -f "${BACKUP_DIR}/crowdsec.db" ]]; then - cp ${BACKUP_DIR}/crowdsec.db /var/lib/crowdsec/data/crowdsec.db - fi log_info "Finished, restarting" systemctl restart crowdsec || log_fatal "Failed to restart crowdsec" } @@ -565,15 +539,6 @@ uninstall_crowdsec() { ${CSCLI_BIN} dashboard remove -f -y >/dev/null delete_bins - # tmp - rm -rf /tmp/data/ - ## end tmp - - find /etc/crowdsec -maxdepth 1 -mindepth 1 | grep -v "bouncer" | xargs rm -rf || echo "" - rm -f ${CROWDSEC_LOG_FILE} || echo "" - rm -f ${LAPI_LOG_FILE} || echo "" - rm -f ${CROWDSEC_DB_PATH} || echo "" - rm -rf ${CROWDSEC_LIB_DIR} || echo "" rm -rf ${CROWDSEC_USR_DIR} || echo "" rm -f ${SYSTEMD_PATH_FILE} || echo "" log_info "crowdsec successfully uninstalled" @@ -765,12 +730,11 @@ usage() { echo " ./wizard.sh --unattended Install in unattended mode, no question will be asked and defaults will be followed" echo " ./wizard.sh --docker-mode Will install crowdsec without systemd and generate random machine-id" echo " ./wizard.sh -n|--noop Do nothing" - - exit 0 } if [[ $# -eq 0 ]]; then -usage + usage + exit 0 fi while [[ $# -gt 0 ]] From fe931af5cae0e7a5739e2073e5ae7f2f4ccbdebd Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Thu, 16 Jan 2025 14:03:53 +0100 Subject: [PATCH 393/581] lint: gocritic/captLocal (don't capitalize local variables) (#3402) * lint: gocritic/captLocal (don't capitalize local variables) * lint (whitespace) --- .golangci.yml | 1 - cmd/crowdsec-cli/clidecision/decisions.go | 4 +- pkg/acquisition/acquisition.go | 20 ++++---- pkg/acquisition/modules/appsec/appsec.go | 22 +++++++-- .../modules/cloudwatch/cloudwatch.go | 39 +++++++++++++++- pkg/acquisition/modules/docker/docker.go | 4 +- pkg/acquisition/modules/file/file.go | 4 +- pkg/acquisition/modules/http/http.go | 6 ++- .../modules/journalctl/journalctl.go | 4 +- pkg/acquisition/modules/kafka/kafka.go | 7 ++- pkg/acquisition/modules/kinesis/kinesis.go | 4 +- .../modules/kubernetesaudit/k8s_audit.go | 4 +- pkg/acquisition/modules/loki/loki.go | 4 +- pkg/acquisition/modules/syslog/syslog.go | 4 +- .../wineventlog/wineventlog_windows.go | 4 +- pkg/apiclient/client.go | 4 +- pkg/csprofiles/csprofiles.go | 44 +++++++++--------- pkg/database/flush.go | 16 +++---- pkg/exprhelpers/crowdsec_cti.go | 24 +++++----- pkg/exprhelpers/helpers.go | 46 +++++++++++++------ pkg/leakybucket/bayesian.go | 4 +- pkg/leakybucket/overflow_filter.go | 4 +- 22 files changed, 172 insertions(+), 101 deletions(-) diff --git a/.golangci.yml b/.golangci.yml index 12d35ed8737..7df08cf717c 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -192,7 +192,6 @@ linters-settings: - unnamedResult - sloppyReassign - appendCombine - - captLocal - typeUnparen - commentFormatting - deferInLoop # diff --git a/cmd/crowdsec-cli/clidecision/decisions.go b/cmd/crowdsec-cli/clidecision/decisions.go index 307cabffe51..b5865bab6e0 100644 --- a/cmd/crowdsec-cli/clidecision/decisions.go +++ b/cmd/crowdsec-cli/clidecision/decisions.go @@ -170,7 +170,7 @@ func (cli *cliDecisions) NewCommand() *cobra.Command { return cmd } -func (cli *cliDecisions) list(ctx context.Context, filter apiclient.AlertsListOpts, NoSimu *bool, contained *bool, printMachine bool) error { +func (cli *cliDecisions) list(ctx context.Context, filter apiclient.AlertsListOpts, noSimu *bool, contained *bool, printMachine bool) error { var err error *filter.ScopeEquals, err = clialert.SanitizeScope(*filter.ScopeEquals, *filter.IPEquals, *filter.RangeEquals) @@ -181,7 +181,7 @@ func (cli *cliDecisions) list(ctx context.Context, filter apiclient.AlertsListOp filter.ActiveDecisionEquals = new(bool) *filter.ActiveDecisionEquals = true - if NoSimu != nil && *NoSimu { + if noSimu != nil && *noSimu { filter.IncludeSimulated = new(bool) } /* nullify the empty entries to avoid bad filter */ diff --git a/pkg/acquisition/acquisition.go b/pkg/acquisition/acquisition.go index 06a4918592b..d3928270598 100644 --- a/pkg/acquisition/acquisition.go +++ b/pkg/acquisition/acquisition.go @@ -365,13 +365,13 @@ func copyEvent(evt types.Event, line string) types.Event { return evtCopy } -func transform(transformChan chan types.Event, output chan types.Event, AcquisTomb *tomb.Tomb, transformRuntime *vm.Program, logger *log.Entry) { +func transform(transformChan chan types.Event, output chan types.Event, acquisTomb *tomb.Tomb, transformRuntime *vm.Program, logger *log.Entry) { defer trace.CatchPanic("crowdsec/acquis") logger.Infof("transformer started") for { select { - case <-AcquisTomb.Dying(): + case <-acquisTomb.Dying(): logger.Debugf("transformer is dying") return case evt := <-transformChan: @@ -420,7 +420,7 @@ func transform(transformChan chan types.Event, output chan types.Event, AcquisTo } } -func StartAcquisition(ctx context.Context, sources []DataSource, output chan types.Event, AcquisTomb *tomb.Tomb) error { +func StartAcquisition(ctx context.Context, sources []DataSource, output chan types.Event, acquisTomb *tomb.Tomb) error { // Don't wait if we have no sources, as it will hang forever if len(sources) == 0 { return nil @@ -430,7 +430,7 @@ func StartAcquisition(ctx context.Context, sources []DataSource, output chan typ subsrc := sources[i] // ensure its a copy log.Debugf("starting one source %d/%d ->> %T", i, len(sources), subsrc) - AcquisTomb.Go(func() error { + acquisTomb.Go(func() error { defer trace.CatchPanic("crowdsec/acquis") var err error @@ -449,21 +449,21 @@ func StartAcquisition(ctx context.Context, sources []DataSource, output chan typ "datasource": subsrc.GetName(), }) - AcquisTomb.Go(func() error { - transform(outChan, output, AcquisTomb, transformRuntime, transformLogger) + acquisTomb.Go(func() error { + transform(outChan, output, acquisTomb, transformRuntime, transformLogger) return nil }) } if subsrc.GetMode() == configuration.TAIL_MODE { - err = subsrc.StreamingAcquisition(ctx, outChan, AcquisTomb) + err = subsrc.StreamingAcquisition(ctx, outChan, acquisTomb) } else { - err = subsrc.OneShotAcquisition(ctx, outChan, AcquisTomb) + err = subsrc.OneShotAcquisition(ctx, outChan, acquisTomb) } if err != nil { // if one of the acqusition returns an error, we kill the others to properly shutdown - AcquisTomb.Kill(err) + acquisTomb.Kill(err) } return nil @@ -471,7 +471,7 @@ func StartAcquisition(ctx context.Context, sources []DataSource, output chan typ } /*return only when acquisition is over (cat) or never (tail)*/ - err := AcquisTomb.Wait() + err := acquisTomb.Wait() return err } diff --git a/pkg/acquisition/modules/appsec/appsec.go b/pkg/acquisition/modules/appsec/appsec.go index 2f7861b32ff..86dbfe38b71 100644 --- a/pkg/acquisition/modules/appsec/appsec.go +++ b/pkg/acquisition/modules/appsec/appsec.go @@ -155,14 +155,14 @@ func (w *AppsecSource) GetAggregMetrics() []prometheus.Collector { return []prometheus.Collector{AppsecReqCounter, AppsecBlockCounter, AppsecRuleHits, AppsecOutbandParsingHistogram, AppsecInbandParsingHistogram, AppsecGlobalParsingHistogram} } -func (w *AppsecSource) Configure(yamlConfig []byte, logger *log.Entry, MetricsLevel int) error { +func (w *AppsecSource) Configure(yamlConfig []byte, logger *log.Entry, metricsLevel int) error { err := w.UnmarshalConfig(yamlConfig) if err != nil { return fmt.Errorf("unable to parse appsec configuration: %w", err) } w.logger = logger - w.metricsLevel = MetricsLevel + w.metricsLevel = metricsLevel w.logger.Tracef("Appsec configuration: %+v", w.config) if w.config.AuthCacheDuration == nil { @@ -180,7 +180,7 @@ func (w *AppsecSource) Configure(yamlConfig []byte, logger *log.Entry, MetricsLe w.InChan = make(chan appsec.ParsedRequest) appsecCfg := appsec.AppsecConfig{Logger: w.logger.WithField("component", "appsec_config")} - //we keep the datasource name + // we keep the datasource name appsecCfg.Name = w.config.Name // let's load the associated appsec_config: @@ -275,6 +275,7 @@ func (w *AppsecSource) StreamingAcquisition(ctx context.Context, out chan types. for _, runner := range w.AppsecRunners { runner.outChan = out + t.Go(func() error { defer trace.CatchPanic("crowdsec/acquis/appsec/live/runner") return runner.Run(t) @@ -285,16 +286,20 @@ func (w *AppsecSource) StreamingAcquisition(ctx context.Context, out chan types. if w.config.ListenSocket != "" { w.logger.Infof("creating unix socket %s", w.config.ListenSocket) _ = os.RemoveAll(w.config.ListenSocket) + listener, err := net.Listen("unix", w.config.ListenSocket) if err != nil { return fmt.Errorf("appsec server failed: %w", err) } + defer listener.Close() + if w.config.CertFilePath != "" && w.config.KeyFilePath != "" { err = w.server.ServeTLS(listener, w.config.CertFilePath, w.config.KeyFilePath) } else { err = w.server.Serve(listener) } + if err != nil && !errors.Is(err, http.ErrServerClosed) { return fmt.Errorf("appsec server failed: %w", err) } @@ -304,8 +309,10 @@ func (w *AppsecSource) StreamingAcquisition(ctx context.Context, out chan types. }) t.Go(func() error { var err error + if w.config.ListenAddr != "" { w.logger.Infof("creating TCP server on %s", w.config.ListenAddr) + if w.config.CertFilePath != "" && w.config.KeyFilePath != "" { err = w.server.ListenAndServeTLS(w.config.CertFilePath, w.config.KeyFilePath) } else { @@ -324,6 +331,7 @@ func (w *AppsecSource) StreamingAcquisition(ctx context.Context, out chan types. // xx let's clean up the appsec runners :) appsec.AppsecRulesDetails = make(map[int]appsec.RulesDetails) w.server.Shutdown(ctx) + return nil }) @@ -354,11 +362,13 @@ func (w *AppsecSource) IsAuth(apiKey string) bool { } req.Header.Add("X-Api-Key", apiKey) + resp, err := client.Do(req) if err != nil { log.Errorf("Error performing request: %s", err) return false } + defer resp.Body.Close() return resp.StatusCode == http.StatusOK @@ -371,17 +381,21 @@ func (w *AppsecSource) appsecHandler(rw http.ResponseWriter, r *http.Request) { apiKey := r.Header.Get(appsec.APIKeyHeaderName) clientIP := r.Header.Get(appsec.IPHeaderName) remoteIP := r.RemoteAddr + if apiKey == "" { w.logger.Errorf("Unauthorized request from '%s' (real IP = %s)", remoteIP, clientIP) rw.WriteHeader(http.StatusUnauthorized) + return } + expiration, exists := w.AuthCache.Get(apiKey) // if the apiKey is not in cache or has expired, just recheck the auth if !exists || time.Now().After(expiration) { if !w.IsAuth(apiKey) { rw.WriteHeader(http.StatusUnauthorized) w.logger.Errorf("Unauthorized request from '%s' (real IP = %s)", remoteIP, clientIP) + return } @@ -394,8 +408,10 @@ func (w *AppsecSource) appsecHandler(rw http.ResponseWriter, r *http.Request) { if err != nil { w.logger.Errorf("%s", err) rw.WriteHeader(http.StatusInternalServerError) + return } + parsedRequest.AppsecEngine = w.config.Name logger := w.logger.WithFields(log.Fields{ diff --git a/pkg/acquisition/modules/cloudwatch/cloudwatch.go b/pkg/acquisition/modules/cloudwatch/cloudwatch.go index ba267c9050b..5739ebc3124 100644 --- a/pkg/acquisition/modules/cloudwatch/cloudwatch.go +++ b/pkg/acquisition/modules/cloudwatch/cloudwatch.go @@ -154,13 +154,13 @@ func (cw *CloudwatchSource) UnmarshalConfig(yamlConfig []byte) error { return nil } -func (cw *CloudwatchSource) Configure(yamlConfig []byte, logger *log.Entry, MetricsLevel int) error { +func (cw *CloudwatchSource) Configure(yamlConfig []byte, logger *log.Entry, metricsLevel int) error { err := cw.UnmarshalConfig(yamlConfig) if err != nil { return err } - cw.metricsLevel = MetricsLevel + cw.metricsLevel = metricsLevel cw.logger = logger.WithField("group", cw.Config.GroupName) @@ -330,9 +330,12 @@ func (cw *CloudwatchSource) WatchLogGroupForStreams(ctx context.Context, out cha LastIngestionTime := time.Unix(0, *event.LastIngestionTime*int64(time.Millisecond)) if LastIngestionTime.Before(oldest) { cw.logger.Tracef("stop iteration, %s reached oldest age, stop (%s < %s)", *event.LogStreamName, LastIngestionTime, time.Now().UTC().Add(-*cw.Config.MaxStreamAge)) + hasMoreStreams = false + return false } + cw.logger.Tracef("stream %s is elligible for monitoring", *event.LogStreamName) // the stream has been updated recently, check if we should monitor it var expectMode int @@ -341,6 +344,7 @@ func (cw *CloudwatchSource) WatchLogGroupForStreams(ctx context.Context, out cha } else { expectMode = types.TIMEMACHINE } + monitorStream := LogStreamTailConfig{ GroupName: cw.Config.GroupName, StreamName: *event.LogStreamName, @@ -354,16 +358,20 @@ func (cw *CloudwatchSource) WatchLogGroupForStreams(ctx context.Context, out cha out <- monitorStream } } + if lastPage { cw.logger.Tracef("reached last page") + hasMoreStreams = false } + return true }, ) if err != nil { return fmt.Errorf("while describing group %s: %w", cw.Config.GroupName, err) } + cw.logger.Tracef("after DescribeLogStreamsPagesWithContext") } } @@ -373,12 +381,14 @@ func (cw *CloudwatchSource) WatchLogGroupForStreams(ctx context.Context, out cha // LogStreamManager receives the potential streams to monitor, and starts a go routine when needed func (cw *CloudwatchSource) LogStreamManager(ctx context.Context, in chan LogStreamTailConfig, outChan chan types.Event) error { cw.logger.Debugf("starting to monitor streams for %s", cw.Config.GroupName) + pollDeadStreamInterval := time.NewTicker(def_PollDeadStreamInterval) for { select { case newStream := <-in: //nolint:govet // copylocks won't matter if the tomb is not initialized shouldCreate := true + cw.logger.Tracef("received new streams to monitor : %s/%s", newStream.GroupName, newStream.StreamName) if cw.Config.StreamName != nil && newStream.StreamName != *cw.Config.StreamName { @@ -402,12 +412,16 @@ func (cw *CloudwatchSource) LogStreamManager(ctx context.Context, in chan LogStr if !stream.t.Alive() { cw.logger.Debugf("stream %s already exists, but is dead", newStream.StreamName) cw.monitoredStreams = append(cw.monitoredStreams[:idx], cw.monitoredStreams[idx+1:]...) + if cw.metricsLevel != configuration.METRICS_NONE { openedStreams.With(prometheus.Labels{"group": newStream.GroupName}).Dec() } + break } + shouldCreate = false + break } } @@ -417,19 +431,23 @@ func (cw *CloudwatchSource) LogStreamManager(ctx context.Context, in chan LogStr if cw.metricsLevel != configuration.METRICS_NONE { openedStreams.With(prometheus.Labels{"group": newStream.GroupName}).Inc() } + newStream.t = tomb.Tomb{} newStream.logger = cw.logger.WithField("stream", newStream.StreamName) cw.logger.Debugf("starting tail of stream %s", newStream.StreamName) newStream.t.Go(func() error { return cw.TailLogStream(ctx, &newStream, outChan) }) + cw.monitoredStreams = append(cw.monitoredStreams, &newStream) } case <-pollDeadStreamInterval.C: newMonitoredStreams := cw.monitoredStreams[:0] + for idx, stream := range cw.monitoredStreams { if !cw.monitoredStreams[idx].t.Alive() { cw.logger.Debugf("remove dead stream %s", stream.StreamName) + if cw.metricsLevel != configuration.METRICS_NONE { openedStreams.With(prometheus.Labels{"group": cw.monitoredStreams[idx].GroupName}).Dec() } @@ -437,20 +455,25 @@ func (cw *CloudwatchSource) LogStreamManager(ctx context.Context, in chan LogStr newMonitoredStreams = append(newMonitoredStreams, stream) } } + cw.monitoredStreams = newMonitoredStreams case <-cw.t.Dying(): cw.logger.Infof("LogStreamManager for %s is dying, %d alive streams", cw.Config.GroupName, len(cw.monitoredStreams)) + for idx, stream := range cw.monitoredStreams { if cw.monitoredStreams[idx].t.Alive() { cw.logger.Debugf("killing stream %s", stream.StreamName) cw.monitoredStreams[idx].t.Kill(nil) + if err := cw.monitoredStreams[idx].t.Wait(); err != nil { cw.logger.Debugf("error while waiting for death of %s : %s", stream.StreamName, err) } } } + cw.monitoredStreams = nil cw.logger.Debugf("routine cleanup done, return") + return nil } } @@ -458,12 +481,14 @@ func (cw *CloudwatchSource) LogStreamManager(ctx context.Context, in chan LogStr func (cw *CloudwatchSource) TailLogStream(ctx context.Context, cfg *LogStreamTailConfig, outChan chan types.Event) error { var startFrom *string + lastReadMessage := time.Now().UTC() ticker := time.NewTicker(cfg.PollStreamInterval) // resume at existing index if we already had streamIndexMutex.Lock() v := cw.streamIndexes[cfg.GroupName+"+"+cfg.StreamName] streamIndexMutex.Unlock() + if v != "" { cfg.logger.Debugf("restarting on index %s", v) startFrom = &v @@ -474,7 +499,9 @@ func (cw *CloudwatchSource) TailLogStream(ctx context.Context, cfg *LogStreamTai select { case <-ticker.C: cfg.logger.Tracef("entering loop") + hasMorePages := true + for hasMorePages { /*for the first call, we only consume the last item*/ cfg.logger.Tracef("calling GetLogEventsPagesWithContext") @@ -489,36 +516,44 @@ func (cw *CloudwatchSource) TailLogStream(ctx context.Context, cfg *LogStreamTai func(page *cloudwatchlogs.GetLogEventsOutput, lastPage bool) bool { cfg.logger.Tracef("%d results, last:%t", len(page.Events), lastPage) startFrom = page.NextForwardToken + if page.NextForwardToken != nil { streamIndexMutex.Lock() cw.streamIndexes[cfg.GroupName+"+"+cfg.StreamName] = *page.NextForwardToken streamIndexMutex.Unlock() } + if lastPage { /*wait another ticker to check on new log availability*/ cfg.logger.Tracef("last page") + hasMorePages = false } + if len(page.Events) > 0 { lastReadMessage = time.Now().UTC() } + for _, event := range page.Events { evt, err := cwLogToEvent(event, cfg) if err != nil { cfg.logger.Warningf("cwLogToEvent error, discarded event : %s", err) } else { cfg.logger.Debugf("pushing message : %s", evt.Line.Raw) + if cw.metricsLevel != configuration.METRICS_NONE { linesRead.With(prometheus.Labels{"group": cfg.GroupName, "stream": cfg.StreamName}).Inc() } outChan <- evt } } + return true }, ) if err != nil { newerr := fmt.Errorf("while reading %s/%s: %w", cfg.GroupName, cfg.StreamName, err) cfg.logger.Warningf("err : %s", newerr) + return newerr } cfg.logger.Tracef("done reading GetLogEventsPagesWithContext") diff --git a/pkg/acquisition/modules/docker/docker.go b/pkg/acquisition/modules/docker/docker.go index 798eba29440..582da3d53a1 100644 --- a/pkg/acquisition/modules/docker/docker.go +++ b/pkg/acquisition/modules/docker/docker.go @@ -136,9 +136,9 @@ func (d *DockerSource) UnmarshalConfig(yamlConfig []byte) error { return nil } -func (d *DockerSource) Configure(yamlConfig []byte, logger *log.Entry, MetricsLevel int) error { +func (d *DockerSource) Configure(yamlConfig []byte, logger *log.Entry, metricsLevel int) error { d.logger = logger - d.metricsLevel = MetricsLevel + d.metricsLevel = metricsLevel err := d.UnmarshalConfig(yamlConfig) if err != nil { diff --git a/pkg/acquisition/modules/file/file.go b/pkg/acquisition/modules/file/file.go index 9f439b0c82e..697a3d35dc2 100644 --- a/pkg/acquisition/modules/file/file.go +++ b/pkg/acquisition/modules/file/file.go @@ -102,9 +102,9 @@ func (f *FileSource) UnmarshalConfig(yamlConfig []byte) error { return nil } -func (f *FileSource) Configure(yamlConfig []byte, logger *log.Entry, MetricsLevel int) error { +func (f *FileSource) Configure(yamlConfig []byte, logger *log.Entry, metricsLevel int) error { f.logger = logger - f.metricsLevel = MetricsLevel + f.metricsLevel = metricsLevel err := f.UnmarshalConfig(yamlConfig) if err != nil { diff --git a/pkg/acquisition/modules/http/http.go b/pkg/acquisition/modules/http/http.go index 3e4f26915fd..97e220570ff 100644 --- a/pkg/acquisition/modules/http/http.go +++ b/pkg/acquisition/modules/http/http.go @@ -157,9 +157,9 @@ func (hc *HttpConfiguration) Validate() error { return nil } -func (h *HTTPSource) Configure(yamlConfig []byte, logger *log.Entry, MetricsLevel int) error { +func (h *HTTPSource) Configure(yamlConfig []byte, logger *log.Entry, metricsLevel int) error { h.logger = logger - h.metricsLevel = MetricsLevel + h.metricsLevel = metricsLevel err := h.UnmarshalConfig(yamlConfig) if err != nil { @@ -339,12 +339,14 @@ func (h *HTTPSource) RunServer(out chan types.Event, t *tomb.Tomb) error { if r.Method != http.MethodPost { h.logger.Errorf("method not allowed: %s", r.Method) http.Error(w, "Method Not Allowed", http.StatusMethodNotAllowed) + return } if err := authorizeRequest(r, &h.Config); err != nil { h.logger.Errorf("failed to authorize request from '%s': %s", r.RemoteAddr, err) http.Error(w, "Unauthorized", http.StatusUnauthorized) + return } diff --git a/pkg/acquisition/modules/journalctl/journalctl.go b/pkg/acquisition/modules/journalctl/journalctl.go index 47d90e2b3a0..f72878d9b3c 100644 --- a/pkg/acquisition/modules/journalctl/journalctl.go +++ b/pkg/acquisition/modules/journalctl/journalctl.go @@ -210,9 +210,9 @@ func (j *JournalCtlSource) UnmarshalConfig(yamlConfig []byte) error { return nil } -func (j *JournalCtlSource) Configure(yamlConfig []byte, logger *log.Entry, MetricsLevel int) error { +func (j *JournalCtlSource) Configure(yamlConfig []byte, logger *log.Entry, metricsLevel int) error { j.logger = logger - j.metricsLevel = MetricsLevel + j.metricsLevel = metricsLevel err := j.UnmarshalConfig(yamlConfig) if err != nil { diff --git a/pkg/acquisition/modules/kafka/kafka.go b/pkg/acquisition/modules/kafka/kafka.go index 77fc44e310d..f213b85814c 100644 --- a/pkg/acquisition/modules/kafka/kafka.go +++ b/pkg/acquisition/modules/kafka/kafka.go @@ -85,9 +85,9 @@ func (k *KafkaSource) UnmarshalConfig(yamlConfig []byte) error { return err } -func (k *KafkaSource) Configure(yamlConfig []byte, logger *log.Entry, MetricsLevel int) error { +func (k *KafkaSource) Configure(yamlConfig []byte, logger *log.Entry, metricsLevel int) error { k.logger = logger - k.metricsLevel = MetricsLevel + k.metricsLevel = metricsLevel k.logger.Debugf("start configuring %s source", dataSourceName) @@ -160,6 +160,7 @@ func (k *KafkaSource) ReadMessage(ctx context.Context, out chan types.Event) err k.logger.Errorln(fmt.Errorf("while reading %s message: %w", dataSourceName, err)) continue } + k.logger.Tracef("got message: %s", string(m.Value)) l := types.Line{ Raw: string(m.Value), @@ -170,9 +171,11 @@ func (k *KafkaSource) ReadMessage(ctx context.Context, out chan types.Event) err Module: k.GetName(), } k.logger.Tracef("line with message read from topic '%s': %+v", k.Config.Topic, l) + if k.metricsLevel != configuration.METRICS_NONE { linesRead.With(prometheus.Labels{"topic": k.Config.Topic}).Inc() } + evt := types.MakeEvent(k.Config.UseTimeMachine, types.LOG, true) evt.Line = l out <- evt diff --git a/pkg/acquisition/modules/kinesis/kinesis.go b/pkg/acquisition/modules/kinesis/kinesis.go index b166a706ca9..16c91ad06bc 100644 --- a/pkg/acquisition/modules/kinesis/kinesis.go +++ b/pkg/acquisition/modules/kinesis/kinesis.go @@ -161,9 +161,9 @@ func (k *KinesisSource) UnmarshalConfig(yamlConfig []byte) error { return nil } -func (k *KinesisSource) Configure(yamlConfig []byte, logger *log.Entry, MetricsLevel int) error { +func (k *KinesisSource) Configure(yamlConfig []byte, logger *log.Entry, metricsLevel int) error { k.logger = logger - k.metricsLevel = MetricsLevel + k.metricsLevel = metricsLevel err := k.UnmarshalConfig(yamlConfig) if err != nil { diff --git a/pkg/acquisition/modules/kubernetesaudit/k8s_audit.go b/pkg/acquisition/modules/kubernetesaudit/k8s_audit.go index aaa83a3bbb2..f8c325b5c4b 100644 --- a/pkg/acquisition/modules/kubernetesaudit/k8s_audit.go +++ b/pkg/acquisition/modules/kubernetesaudit/k8s_audit.go @@ -97,9 +97,9 @@ func (ka *KubernetesAuditSource) UnmarshalConfig(yamlConfig []byte) error { return nil } -func (ka *KubernetesAuditSource) Configure(config []byte, logger *log.Entry, MetricsLevel int) error { +func (ka *KubernetesAuditSource) Configure(config []byte, logger *log.Entry, metricsLevel int) error { ka.logger = logger - ka.metricsLevel = MetricsLevel + ka.metricsLevel = metricsLevel err := ka.UnmarshalConfig(config) if err != nil { diff --git a/pkg/acquisition/modules/loki/loki.go b/pkg/acquisition/modules/loki/loki.go index c57e6a67c94..47493d8cdfe 100644 --- a/pkg/acquisition/modules/loki/loki.go +++ b/pkg/acquisition/modules/loki/loki.go @@ -120,10 +120,10 @@ func (l *LokiSource) UnmarshalConfig(yamlConfig []byte) error { return nil } -func (l *LokiSource) Configure(config []byte, logger *log.Entry, MetricsLevel int) error { +func (l *LokiSource) Configure(config []byte, logger *log.Entry, metricsLevel int) error { l.Config = LokiConfiguration{} l.logger = logger - l.metricsLevel = MetricsLevel + l.metricsLevel = metricsLevel err := l.UnmarshalConfig(config) if err != nil { return err diff --git a/pkg/acquisition/modules/syslog/syslog.go b/pkg/acquisition/modules/syslog/syslog.go index fb6a04600c1..df805d08cae 100644 --- a/pkg/acquisition/modules/syslog/syslog.go +++ b/pkg/acquisition/modules/syslog/syslog.go @@ -124,10 +124,10 @@ func (s *SyslogSource) UnmarshalConfig(yamlConfig []byte) error { return nil } -func (s *SyslogSource) Configure(yamlConfig []byte, logger *log.Entry, MetricsLevel int) error { +func (s *SyslogSource) Configure(yamlConfig []byte, logger *log.Entry, metricsLevel int) error { s.logger = logger s.logger.Infof("Starting syslog datasource configuration") - s.metricsLevel = MetricsLevel + s.metricsLevel = metricsLevel err := s.UnmarshalConfig(yamlConfig) if err != nil { return err diff --git a/pkg/acquisition/modules/wineventlog/wineventlog_windows.go b/pkg/acquisition/modules/wineventlog/wineventlog_windows.go index 8283bcc21a2..22186ea96cb 100644 --- a/pkg/acquisition/modules/wineventlog/wineventlog_windows.go +++ b/pkg/acquisition/modules/wineventlog/wineventlog_windows.go @@ -287,9 +287,9 @@ func (w *WinEventLogSource) UnmarshalConfig(yamlConfig []byte) error { return nil } -func (w *WinEventLogSource) Configure(yamlConfig []byte, logger *log.Entry, MetricsLevel int) error { +func (w *WinEventLogSource) Configure(yamlConfig []byte, logger *log.Entry, metricsLevel int) error { w.logger = logger - w.metricsLevel = MetricsLevel + w.metricsLevel = metricsLevel err := w.UnmarshalConfig(yamlConfig) if err != nil { diff --git a/pkg/apiclient/client.go b/pkg/apiclient/client.go index 47d97a28344..ec473beca77 100644 --- a/pkg/apiclient/client.go +++ b/pkg/apiclient/client.go @@ -125,8 +125,8 @@ func NewClient(config *Config) (*ApiClient, error) { return c, nil } -func NewDefaultClient(URL *url.URL, prefix string, userAgent string, client *http.Client) (*ApiClient, error) { - transport, baseURL := createTransport(URL) +func NewDefaultClient(url *url.URL, prefix string, userAgent string, client *http.Client) (*ApiClient, error) { + transport, baseURL := createTransport(url) if client == nil { client = &http.Client{} diff --git a/pkg/csprofiles/csprofiles.go b/pkg/csprofiles/csprofiles.go index 52cda1ed2e1..c509fb448e3 100644 --- a/pkg/csprofiles/csprofiles.go +++ b/pkg/csprofiles/csprofiles.go @@ -96,17 +96,17 @@ func NewProfile(profilesCfg []*csconfig.ProfileCfg) ([]*Runtime, error) { return profilesRuntime, nil } -func (Profile *Runtime) GenerateDecisionFromProfile(Alert *models.Alert) ([]*models.Decision, error) { +func (profile *Runtime) GenerateDecisionFromProfile(alert *models.Alert) ([]*models.Decision, error) { var decisions []*models.Decision - for _, refDecision := range Profile.Cfg.Decisions { + for _, refDecision := range profile.Cfg.Decisions { decision := models.Decision{} /*the reference decision from profile is in simulated mode */ if refDecision.Simulated != nil && *refDecision.Simulated { decision.Simulated = new(bool) *decision.Simulated = true /*the event is already in simulation mode */ - } else if Alert.Simulated != nil && *Alert.Simulated { + } else if alert.Simulated != nil && *alert.Simulated { decision.Simulated = new(bool) *decision.Simulated = true } @@ -116,7 +116,7 @@ func (Profile *Runtime) GenerateDecisionFromProfile(Alert *models.Alert) ([]*mod if refDecision.Scope != nil && *refDecision.Scope != "" { *decision.Scope = *refDecision.Scope } else { - *decision.Scope = *Alert.Source.Scope + *decision.Scope = *alert.Source.Scope } /*some fields are populated from the reference object : duration, scope, type*/ @@ -125,19 +125,19 @@ func (Profile *Runtime) GenerateDecisionFromProfile(Alert *models.Alert) ([]*mod *decision.Duration = *refDecision.Duration } - if Profile.Cfg.DurationExpr != "" && Profile.RuntimeDurationExpr != nil { + if profile.Cfg.DurationExpr != "" && profile.RuntimeDurationExpr != nil { profileDebug := false - if Profile.Cfg.Debug != nil && *Profile.Cfg.Debug { + if profile.Cfg.Debug != nil && *profile.Cfg.Debug { profileDebug = true } - duration, err := exprhelpers.Run(Profile.RuntimeDurationExpr, map[string]interface{}{"Alert": Alert}, Profile.Logger, profileDebug) + duration, err := exprhelpers.Run(profile.RuntimeDurationExpr, map[string]interface{}{"Alert": alert}, profile.Logger, profileDebug) if err != nil { - Profile.Logger.Warningf("Failed to run duration_expr : %v", err) + profile.Logger.Warningf("Failed to run duration_expr : %v", err) } else { durationStr := fmt.Sprint(duration) if _, err := time.ParseDuration(durationStr); err != nil { - Profile.Logger.Warningf("Failed to parse expr duration result '%s'", duration) + profile.Logger.Warningf("Failed to parse expr duration result '%s'", duration) } else { *decision.Duration = durationStr } @@ -149,7 +149,7 @@ func (Profile *Runtime) GenerateDecisionFromProfile(Alert *models.Alert) ([]*mod /*for the others, let's populate it from the alert and its source*/ decision.Value = new(string) - *decision.Value = *Alert.Source.Value + *decision.Value = *alert.Source.Value decision.Origin = new(string) *decision.Origin = types.CrowdSecOrigin @@ -158,7 +158,7 @@ func (Profile *Runtime) GenerateDecisionFromProfile(Alert *models.Alert) ([]*mod } decision.Scenario = new(string) - *decision.Scenario = *Alert.Scenario + *decision.Scenario = *alert.Scenario decisions = append(decisions, &decision) } @@ -166,21 +166,21 @@ func (Profile *Runtime) GenerateDecisionFromProfile(Alert *models.Alert) ([]*mod } // EvaluateProfile is going to evaluate an Alert against a profile to generate Decisions -func (Profile *Runtime) EvaluateProfile(Alert *models.Alert) ([]*models.Decision, bool, error) { +func (profile *Runtime) EvaluateProfile(alert *models.Alert) ([]*models.Decision, bool, error) { var decisions []*models.Decision matched := false - for eIdx, expression := range Profile.RuntimeFilters { + for eIdx, expression := range profile.RuntimeFilters { debugProfile := false - if Profile.Cfg.Debug != nil && *Profile.Cfg.Debug { + if profile.Cfg.Debug != nil && *profile.Cfg.Debug { debugProfile = true } - output, err := exprhelpers.Run(expression, map[string]interface{}{"Alert": Alert}, Profile.Logger, debugProfile) + output, err := exprhelpers.Run(expression, map[string]interface{}{"Alert": alert}, profile.Logger, debugProfile) if err != nil { - Profile.Logger.Warningf("failed to run profile expr for %s: %v", Profile.Cfg.Name, err) - return nil, matched, fmt.Errorf("while running expression %s: %w", Profile.Cfg.Filters[eIdx], err) + profile.Logger.Warningf("failed to run profile expr for %s: %v", profile.Cfg.Name, err) + return nil, matched, fmt.Errorf("while running expression %s: %w", profile.Cfg.Filters[eIdx], err) } switch out := output.(type) { @@ -188,22 +188,22 @@ func (Profile *Runtime) EvaluateProfile(Alert *models.Alert) ([]*models.Decision if out { matched = true /*the expression matched, create the associated decision*/ - subdecisions, err := Profile.GenerateDecisionFromProfile(Alert) + subdecisions, err := profile.GenerateDecisionFromProfile(alert) if err != nil { - return nil, matched, fmt.Errorf("while generating decision from profile %s: %w", Profile.Cfg.Name, err) + return nil, matched, fmt.Errorf("while generating decision from profile %s: %w", profile.Cfg.Name, err) } decisions = append(decisions, subdecisions...) } else { - Profile.Logger.Debugf("Profile %s filter is unsuccessful", Profile.Cfg.Name) + profile.Logger.Debugf("Profile %s filter is unsuccessful", profile.Cfg.Name) - if Profile.Cfg.OnFailure == "break" { + if profile.Cfg.OnFailure == "break" { break } } default: - return nil, matched, fmt.Errorf("unexpected type %t (%v) while running '%s'", output, output, Profile.Cfg.Filters[eIdx]) + return nil, matched, fmt.Errorf("unexpected type %t (%v) while running '%s'", output, output, profile.Cfg.Filters[eIdx]) } } diff --git a/pkg/database/flush.go b/pkg/database/flush.go index 8f646ddc961..4a3a93a406c 100644 --- a/pkg/database/flush.go +++ b/pkg/database/flush.go @@ -222,7 +222,7 @@ func (c *Client) FlushAgentsAndBouncers(ctx context.Context, agentsCfg *csconfig return nil } -func (c *Client) FlushAlerts(ctx context.Context, MaxAge string, MaxItems int) error { +func (c *Client) FlushAlerts(ctx context.Context, maxAge string, maxItems int) error { var ( deletedByAge int deletedByNbItem int @@ -247,22 +247,22 @@ func (c *Client) FlushAlerts(ctx context.Context, MaxAge string, MaxItems int) e c.Log.Debugf("FlushAlerts (Total alerts): %d", totalAlerts) - if MaxAge != "" { + if maxAge != "" { filter := map[string][]string{ - "created_before": {MaxAge}, + "created_before": {maxAge}, } nbDeleted, err := c.DeleteAlertWithFilter(ctx, filter) if err != nil { c.Log.Warningf("FlushAlerts (max age): %s", err) - return fmt.Errorf("unable to flush alerts with filter until=%s: %w", MaxAge, err) + return fmt.Errorf("unable to flush alerts with filter until=%s: %w", maxAge, err) } c.Log.Debugf("FlushAlerts (deleted max age alerts): %d", nbDeleted) deletedByAge = nbDeleted } - if MaxItems > 0 { + if maxItems > 0 { // We get the highest id for the alerts // We subtract MaxItems to avoid deleting alerts that are not old enough // This gives us the oldest alert that we want to keep @@ -282,7 +282,7 @@ func (c *Client) FlushAlerts(ctx context.Context, MaxAge string, MaxItems int) e } if len(lastAlert) != 0 { - maxid := lastAlert[0].ID - MaxItems + maxid := lastAlert[0].ID - maxItems c.Log.Debugf("FlushAlerts (max id): %d", maxid) @@ -299,12 +299,12 @@ func (c *Client) FlushAlerts(ctx context.Context, MaxAge string, MaxItems int) e if deletedByNbItem > 0 { c.Log.Infof("flushed %d/%d alerts because the max number of alerts has been reached (%d max)", - deletedByNbItem, totalAlerts, MaxItems) + deletedByNbItem, totalAlerts, maxItems) } if deletedByAge > 0 { c.Log.Infof("flushed %d/%d alerts because they were created %s ago or more", - deletedByAge, totalAlerts, MaxAge) + deletedByAge, totalAlerts, maxAge) } return nil diff --git a/pkg/exprhelpers/crowdsec_cti.go b/pkg/exprhelpers/crowdsec_cti.go index 9b9eac4b95c..900bd7824a8 100644 --- a/pkg/exprhelpers/crowdsec_cti.go +++ b/pkg/exprhelpers/crowdsec_cti.go @@ -29,29 +29,29 @@ var ( var ctiClient *cticlient.CrowdsecCTIClient -func InitCrowdsecCTI(Key *string, TTL *time.Duration, Size *int, LogLevel *log.Level) error { - if Key == nil || *Key == "" { +func InitCrowdsecCTI(key *string, ttl *time.Duration, size *int, logLevel *log.Level) error { + if key == nil || *key == "" { log.Warningf("CTI API key not set or empty, CTI will not be available") return cticlient.ErrDisabled } - CTIApiKey = *Key - if Size == nil { - Size = new(int) - *Size = 1000 + CTIApiKey = *key + if size == nil { + size = new(int) + *size = 1000 } - if TTL == nil { - TTL = new(time.Duration) - *TTL = 5 * time.Minute + if ttl == nil { + ttl = new(time.Duration) + *ttl = 5 * time.Minute } clog := log.New() if err := types.ConfigureLogger(clog); err != nil { return fmt.Errorf("while configuring datasource logger: %w", err) } - if LogLevel != nil { - clog.SetLevel(*LogLevel) + if logLevel != nil { + clog.SetLevel(*logLevel) } subLogger := clog.WithField("type", "crowdsec-cti") - CrowdsecCTIInitCache(*Size, *TTL) + CrowdsecCTIInitCache(*size, *ttl) ctiClient = cticlient.NewCrowdsecCTIClient(cticlient.WithAPIKey(CTIApiKey), cticlient.WithLogger(subLogger)) CTIApiEnabled = true return nil diff --git a/pkg/exprhelpers/helpers.go b/pkg/exprhelpers/helpers.go index 9bc991a8f2d..96de0020ccc 100644 --- a/pkg/exprhelpers/helpers.go +++ b/pkg/exprhelpers/helpers.go @@ -129,32 +129,34 @@ func Init(databaseClient *database.Client) error { dataFileRegex = make(map[string][]*regexp.Regexp) dataFileRe2 = make(map[string][]*re2.Regexp) dbClient = databaseClient + XMLCacheInit() + return nil } -func RegexpCacheInit(filename string, CacheCfg types.DataSource) error { +func RegexpCacheInit(filename string, cacheCfg types.DataSource) error { // cache is explicitly disabled - if CacheCfg.Cache != nil && !*CacheCfg.Cache { + if cacheCfg.Cache != nil && !*cacheCfg.Cache { return nil } // cache is implicitly disabled if no cache config is provided - if CacheCfg.Strategy == nil && CacheCfg.TTL == nil && CacheCfg.Size == nil { + if cacheCfg.Strategy == nil && cacheCfg.TTL == nil && cacheCfg.Size == nil { return nil } // cache is enabled - if CacheCfg.Size == nil { - CacheCfg.Size = ptr.Of(50) + if cacheCfg.Size == nil { + cacheCfg.Size = ptr.Of(50) } - gc := gcache.New(*CacheCfg.Size) + gc := gcache.New(*cacheCfg.Size) - if CacheCfg.Strategy == nil { - CacheCfg.Strategy = ptr.Of("LRU") + if cacheCfg.Strategy == nil { + cacheCfg.Strategy = ptr.Of("LRU") } - switch *CacheCfg.Strategy { + switch *cacheCfg.Strategy { case "LRU": gc = gc.LRU() case "LFU": @@ -162,11 +164,11 @@ func RegexpCacheInit(filename string, CacheCfg types.DataSource) error { case "ARC": gc = gc.ARC() default: - return fmt.Errorf("unknown cache strategy '%s'", *CacheCfg.Strategy) + return fmt.Errorf("unknown cache strategy '%s'", *cacheCfg.Strategy) } - if CacheCfg.TTL != nil { - gc.Expiration(*CacheCfg.TTL) + if cacheCfg.TTL != nil { + gc.Expiration(*cacheCfg.TTL) } cache := gc.Build() @@ -240,6 +242,7 @@ func Distinct(params ...any) (any, error) { if rt := reflect.TypeOf(params[0]).Kind(); rt != reflect.Slice && rt != reflect.Array { return nil, nil } + array := params[0].([]interface{}) if array == nil { return []interface{}{}, nil @@ -254,6 +257,7 @@ func Distinct(params ...any) (any, error) { ret = append(ret, val) } } + return ret, nil } @@ -282,8 +286,10 @@ func flatten(args []interface{}, v reflect.Value) []interface{} { } func existsInFileMaps(filename string, ftype string) (bool, error) { - ok := false var err error + + ok := false + switch ftype { case "regex", "regexp": if fflag.Re2RegexpInfileSupport.IsEnabled() { @@ -296,10 +302,11 @@ func existsInFileMaps(filename string, ftype string) (bool, error) { default: err = fmt.Errorf("unknown data type '%s' for : '%s'", ftype, filename) } + return ok, err } -//Expr helpers +// Expr helpers // func Get(arr []string, index int) string { func Get(params ...any) (any, error) { @@ -315,10 +322,12 @@ func Get(params ...any) (any, error) { func Atof(params ...any) (any, error) { x := params[0].(string) log.Debugf("debug atof %s", x) + ret, err := strconv.ParseFloat(x, 64) if err != nil { log.Warningf("Atof : can't convert float '%s' : %v", x, err) } + return ret, nil } @@ -340,22 +349,28 @@ func Distance(params ...any) (any, error) { long1 := params[1].(string) lat2 := params[2].(string) long2 := params[3].(string) + lat1f, err := strconv.ParseFloat(lat1, 64) if err != nil { log.Warningf("lat1 is not a float : %v", err) + return 0.0, fmt.Errorf("lat1 is not a float : %v", err) } + long1f, err := strconv.ParseFloat(long1, 64) if err != nil { log.Warningf("long1 is not a float : %v", err) + return 0.0, fmt.Errorf("long1 is not a float : %v", err) } + lat2f, err := strconv.ParseFloat(lat2, 64) if err != nil { log.Warningf("lat2 is not a float : %v", err) return 0.0, fmt.Errorf("lat2 is not a float : %v", err) } + long2f, err := strconv.ParseFloat(long2, 64) if err != nil { log.Warningf("long2 is not a float : %v", err) @@ -363,7 +378,7 @@ func Distance(params ...any) (any, error) { return 0.0, fmt.Errorf("long2 is not a float : %v", err) } - //either set of coordinates is 0,0, return 0 to avoid FPs + // either set of coordinates is 0,0, return 0 to avoid FPs if (lat1f == 0.0 && long1f == 0.0) || (lat2f == 0.0 && long2f == 0.0) { log.Warningf("one of the coordinates is 0,0, returning 0") return 0.0, nil @@ -373,6 +388,7 @@ func Distance(params ...any) (any, error) { second := haversine.Coord{Lat: lat2f, Lon: long2f} _, km := haversine.Distance(first, second) + return km, nil } diff --git a/pkg/leakybucket/bayesian.go b/pkg/leakybucket/bayesian.go index 357d51f597b..30e1b396ef8 100644 --- a/pkg/leakybucket/bayesian.go +++ b/pkg/leakybucket/bayesian.go @@ -31,9 +31,9 @@ type BayesianBucket struct { DumbProcessor } -func updateProbability(prior, probGivenEvil, ProbGivenBenign float32) float32 { +func updateProbability(prior, probGivenEvil, probGivenBenign float32) float32 { numerator := probGivenEvil * prior - denominator := numerator + ProbGivenBenign*(1-prior) + denominator := numerator + probGivenBenign*(1-prior) return numerator / denominator } diff --git a/pkg/leakybucket/overflow_filter.go b/pkg/leakybucket/overflow_filter.go index 01dd491ed41..b37e431fadf 100644 --- a/pkg/leakybucket/overflow_filter.go +++ b/pkg/leakybucket/overflow_filter.go @@ -36,10 +36,10 @@ func NewOverflowFilter(g *BucketFactory) (*OverflowFilter, error) { return &u, nil } -func (u *OverflowFilter) OnBucketOverflow(Bucket *BucketFactory) func(*Leaky, types.RuntimeAlert, *types.Queue) (types.RuntimeAlert, *types.Queue) { +func (u *OverflowFilter) OnBucketOverflow(bucket *BucketFactory) func(*Leaky, types.RuntimeAlert, *types.Queue) (types.RuntimeAlert, *types.Queue) { return func(l *Leaky, s types.RuntimeAlert, q *types.Queue) (types.RuntimeAlert, *types.Queue) { el, err := exprhelpers.Run(u.FilterRuntime, map[string]interface{}{ - "queue": q, "signal": s, "leaky": l}, l.logger, Bucket.Debug) + "queue": q, "signal": s, "leaky": l}, l.logger, bucket.Debug) if err != nil { l.logger.Errorf("Failed running overflow filter: %s", err) return s, q From 49fb24c3b15cf04fded18489f4fd1c6794cb365c Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Thu, 16 Jan 2025 16:13:10 +0100 Subject: [PATCH 394/581] lint: enable errcheck; add allowlist and explicit checks (#3403) * lint: enable errcheck with explicit allow list * add explicit error checks * windows tests * windows nolint --- .golangci.yml | 35 ++++++++++-- cmd/crowdsec-cli/climachine/add.go | 8 ++- cmd/crowdsec-cli/completion.go | 8 +-- cmd/crowdsec/serve.go | 2 +- pkg/acquisition/modules/appsec/appsec.go | 5 +- .../modules/kubernetesaudit/k8s_audit.go | 4 +- .../wineventlog/wineventlog_windows_test.go | 56 +++++++++++-------- pkg/cticlient/example/fire.go | 10 +++- pkg/exprhelpers/helpers.go | 18 +++--- pkg/leakybucket/manager_load.go | 4 +- pkg/parser/node.go | 4 +- pkg/parser/stage.go | 6 +- pkg/setup/install.go | 4 +- 13 files changed, 112 insertions(+), 52 deletions(-) diff --git a/.golangci.yml b/.golangci.yml index 7df08cf717c..fe77aec2d3c 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -5,6 +5,37 @@ run: - expr_debug linters-settings: + errcheck: + # Report about not checking of errors in type assertions: `a := b.(MyStruct)`. + # Such cases aren't reported by default. + # Default: false + check-type-assertions: false + # List of functions to exclude from checking, where each entry is a single function to exclude. + # See https://github.com/kisielk/errcheck#excluding-functions for details. + exclude-functions: + - (*bytes.Buffer).ReadFrom # TODO: + - io.Copy # TODO: + - (net/http.ResponseWriter).Write # TODO: + - (*os/exec.Cmd).Start + - (*os/exec.Cmd).Wait + - (*os.Process).Kill + - (*text/template.Template).ExecuteTemplate + - syscall.FreeLibrary + - golang.org/x/sys/windows.CloseHandle + - golang.org/x/sys/windows.ResetEvent + - (*golang.org/x/sys/windows/svc/eventlog.Log).Info + - (*golang.org/x/sys/windows/svc/mgr.Mgr).Disconnect + + - (github.com/bluele/gcache.Cache).Set + - (github.com/gin-gonic/gin.ResponseWriter).WriteString + - (*github.com/segmentio/kafka-go.Reader).SetOffsetAt + - (*gopkg.in/tomb.v2.Tomb).Wait + + - (*github.com/crowdsecurity/crowdsec/pkg/appsec.ReqDumpFilter).FilterArgs + - (*github.com/crowdsecurity/crowdsec/pkg/appsec.ReqDumpFilter).FilterBody + - (*github.com/crowdsecurity/crowdsec/pkg/appsec.ReqDumpFilter).FilterHeaders + - (*github.com/crowdsecurity/crowdsec/pkg/longpollclient.LongPollClient).Stop + gci: sections: - standard @@ -318,10 +349,6 @@ issues: - govet text: "shadow: declaration of \"(err|ctx)\" shadows declaration" - - linters: - - errcheck - text: "Error return value of `.*` is not checked" - # Will fix, trivial - just beware of merge conflicts - linters: diff --git a/cmd/crowdsec-cli/climachine/add.go b/cmd/crowdsec-cli/climachine/add.go index 4f28119dde6..b2595583823 100644 --- a/cmd/crowdsec-cli/climachine/add.go +++ b/cmd/crowdsec-cli/climachine/add.go @@ -73,7 +73,9 @@ func (cli *cliMachines) add(ctx context.Context, args []string, machinePassword qs := &survey.Password{ Message: "Please provide a password for the machine:", } - survey.AskOne(qs, &machinePassword) + if err := survey.AskOne(qs, &machinePassword); err != nil { + return err + } } password := strfmt.Password(machinePassword) @@ -147,9 +149,9 @@ cscli machines add -f- --auto > /tmp/mycreds.yaml`, flags.VarP(&password, "password", "p", "machine password to login to the API") flags.StringVarP(&dumpFile, "file", "f", "", "output file destination (defaults to "+csconfig.DefaultConfigPath("local_api_credentials.yaml")+")") flags.StringVarP(&apiURL, "url", "u", "", "URL of the local API") - flags.BoolVarP(&interactive, "interactive", "i", false, "interfactive mode to enter the password") + flags.BoolVarP(&interactive, "interactive", "i", false, "interactive mode to enter the password") flags.BoolVarP(&autoAdd, "auto", "a", false, "automatically generate password (and username if not provided)") - flags.BoolVar(&force, "force", false, "will force add the machine if it already exist") + flags.BoolVar(&force, "force", false, "will force add the machine if it already exists") return cmd } diff --git a/cmd/crowdsec-cli/completion.go b/cmd/crowdsec-cli/completion.go index 7b6531f5516..fb60f9afab0 100644 --- a/cmd/crowdsec-cli/completion.go +++ b/cmd/crowdsec-cli/completion.go @@ -71,13 +71,13 @@ func NewCompletionCmd() *cobra.Command { Run: func(cmd *cobra.Command, args []string) { switch args[0] { case "bash": - cmd.Root().GenBashCompletion(os.Stdout) + _ = cmd.Root().GenBashCompletion(os.Stdout) case "zsh": - cmd.Root().GenZshCompletion(os.Stdout) + _ = cmd.Root().GenZshCompletion(os.Stdout) case "powershell": - cmd.Root().GenPowerShellCompletion(os.Stdout) + _ = cmd.Root().GenPowerShellCompletion(os.Stdout) case "fish": - cmd.Root().GenFishCompletion(os.Stdout, true) + _ = cmd.Root().GenFishCompletion(os.Stdout, true) } }, } diff --git a/cmd/crowdsec/serve.go b/cmd/crowdsec/serve.go index 62b721befdb..0f7a84ce5c7 100644 --- a/cmd/crowdsec/serve.go +++ b/cmd/crowdsec/serve.go @@ -419,7 +419,7 @@ func Serve(cConfig *csconfig.Config, agentReady chan bool) error { } if cConfig.Common != nil && cConfig.Common.Daemonize { - csdaemon.Notify(csdaemon.Ready, log.StandardLogger()) + _ = csdaemon.Notify(csdaemon.Ready, log.StandardLogger()) // wait for signals return HandleSignals(cConfig) } diff --git a/pkg/acquisition/modules/appsec/appsec.go b/pkg/acquisition/modules/appsec/appsec.go index 86dbfe38b71..a4c2c5124b3 100644 --- a/pkg/acquisition/modules/appsec/appsec.go +++ b/pkg/acquisition/modules/appsec/appsec.go @@ -330,7 +330,10 @@ func (w *AppsecSource) StreamingAcquisition(ctx context.Context, out chan types. w.logger.Info("Shutting down Appsec server") // xx let's clean up the appsec runners :) appsec.AppsecRulesDetails = make(map[int]appsec.RulesDetails) - w.server.Shutdown(ctx) + + if err := w.server.Shutdown(ctx); err != nil { + w.logger.Errorf("Error shutting down Appsec server: %s", err.Error()) + } return nil }) diff --git a/pkg/acquisition/modules/kubernetesaudit/k8s_audit.go b/pkg/acquisition/modules/kubernetesaudit/k8s_audit.go index f8c325b5c4b..b0650d3906e 100644 --- a/pkg/acquisition/modules/kubernetesaudit/k8s_audit.go +++ b/pkg/acquisition/modules/kubernetesaudit/k8s_audit.go @@ -154,7 +154,9 @@ func (ka *KubernetesAuditSource) StreamingAcquisition(ctx context.Context, out c }) <-t.Dying() ka.logger.Infof("Stopping k8s-audit server on %s:%d%s", ka.config.ListenAddr, ka.config.ListenPort, ka.config.WebhookPath) - ka.server.Shutdown(ctx) + if err := ka.server.Shutdown(ctx); err != nil { + ka.logger.Errorf("Error shutting down k8s-audit server: %s", err.Error()) + } return nil }) diff --git a/pkg/acquisition/modules/wineventlog/wineventlog_windows_test.go b/pkg/acquisition/modules/wineventlog/wineventlog_windows_test.go index 2f6fe15450f..b4998de76c4 100644 --- a/pkg/acquisition/modules/wineventlog/wineventlog_windows_test.go +++ b/pkg/acquisition/modules/wineventlog/wineventlog_windows_test.go @@ -7,18 +7,22 @@ import ( "testing" "time" - "github.com/crowdsecurity/crowdsec/pkg/acquisition/configuration" - "github.com/crowdsecurity/crowdsec/pkg/exprhelpers" - "github.com/crowdsecurity/crowdsec/pkg/types" log "github.com/sirupsen/logrus" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "golang.org/x/sys/windows/svc/eventlog" "gopkg.in/tomb.v2" + + "github.com/crowdsecurity/go-cs-lib/cstest" + + "github.com/crowdsecurity/crowdsec/pkg/acquisition/configuration" + "github.com/crowdsecurity/crowdsec/pkg/exprhelpers" + "github.com/crowdsecurity/crowdsec/pkg/types" ) func TestBadConfiguration(t *testing.T) { - exprhelpers.Init(nil) + err := exprhelpers.Init(nil) + require.NoError(t, err) tests := []struct { config string @@ -62,7 +66,8 @@ xpath_query: test`, } func TestQueryBuilder(t *testing.T) { - exprhelpers.Init(nil) + err := exprhelpers.Init(nil) + require.NoError(t, err) tests := []struct { config string @@ -111,23 +116,26 @@ event_level: bla`, } subLogger := log.WithField("type", "windowseventlog") for _, test := range tests { - f := WinEventLogSource{} - f.Configure([]byte(test.config), subLogger, configuration.METRICS_NONE) - q, err := f.buildXpathQuery() - if test.expectedErr != "" { - if err == nil { - t.Fatalf("expected error '%s' but got none", test.expectedErr) + t.Run(test.config, func(t *testing.T) { + f := WinEventLogSource{} + + err := f.Configure([]byte(test.config), subLogger, configuration.METRICS_NONE) + cstest.AssertErrorContains(t, err, test.expectedErr) + if test.expectedErr != "" { + return } - assert.Contains(t, err.Error(), test.expectedErr) - } else { + + q, err := f.buildXpathQuery() require.NoError(t, err) assert.Equal(t, test.expectedQuery, q) - } + }) } } func TestLiveAcquisition(t *testing.T) { - exprhelpers.Init(nil) + err := exprhelpers.Init(nil) + require.NoError(t, err) + ctx := context.Background() tests := []struct { @@ -185,8 +193,13 @@ event_ids: to := &tomb.Tomb{} c := make(chan types.Event) f := WinEventLogSource{} - f.Configure([]byte(test.config), subLogger, configuration.METRICS_NONE) - f.StreamingAcquisition(ctx, c, to) + + err := f.Configure([]byte(test.config), subLogger, configuration.METRICS_NONE) + require.NoError(t, err) + + err = f.StreamingAcquisition(ctx, c, to) + require.NoError(t, err) + time.Sleep(time.Second) lines := test.expectedLines go func() { @@ -261,7 +274,8 @@ func TestOneShotAcquisition(t *testing.T) { }, } - exprhelpers.Init(nil) + err := exprhelpers.Init(nil) + require.NoError(t, err) for _, test := range tests { t.Run(test.name, func(t *testing.T) { @@ -269,15 +283,13 @@ func TestOneShotAcquisition(t *testing.T) { to := &tomb.Tomb{} c := make(chan types.Event) f := WinEventLogSource{} - err := f.ConfigureByDSN(test.dsn, map[string]string{"type": "wineventlog"}, log.WithField("type", "windowseventlog"), "") + err := f.ConfigureByDSN(test.dsn, map[string]string{"type": "wineventlog"}, log.WithField("type", "windowseventlog"), "") + cstest.AssertErrorContains(t, err, test.expectedConfigureErr) if test.expectedConfigureErr != "" { - assert.Contains(t, err.Error(), test.expectedConfigureErr) return } - require.NoError(t, err) - go func() { for { select { diff --git a/pkg/cticlient/example/fire.go b/pkg/cticlient/example/fire.go index e52922571ef..598175ce02c 100644 --- a/pkg/cticlient/example/fire.go +++ b/pkg/cticlient/example/fire.go @@ -57,6 +57,12 @@ func main() { }) } } - csvWriter.Write(csvHeader) - csvWriter.WriteAll(allItems) + + if err = csvWriter.Write(csvHeader); err != nil { + panic(err) + } + + if err = csvWriter.WriteAll(allItems); err != nil { + panic(err) + } } diff --git a/pkg/exprhelpers/helpers.go b/pkg/exprhelpers/helpers.go index 96de0020ccc..d0f6f2cfe22 100644 --- a/pkg/exprhelpers/helpers.go +++ b/pkg/exprhelpers/helpers.go @@ -29,8 +29,6 @@ import ( "github.com/umahmood/haversine" "github.com/wasilibs/go-re2" - "github.com/crowdsecurity/go-cs-lib/ptr" - "github.com/crowdsecurity/crowdsec/pkg/cache" "github.com/crowdsecurity/crowdsec/pkg/database" "github.com/crowdsecurity/crowdsec/pkg/fflag" @@ -146,17 +144,19 @@ func RegexpCacheInit(filename string, cacheCfg types.DataSource) error { } // cache is enabled - if cacheCfg.Size == nil { - cacheCfg.Size = ptr.Of(50) + size := 50 + if cacheCfg.Size != nil { + size = *cacheCfg.Size } - gc := gcache.New(*cacheCfg.Size) + gc := gcache.New(size) - if cacheCfg.Strategy == nil { - cacheCfg.Strategy = ptr.Of("LRU") + strategy := "LRU" + if cacheCfg.Strategy != nil { + strategy = *cacheCfg.Strategy } - switch *cacheCfg.Strategy { + switch strategy { case "LRU": gc = gc.LRU() case "LFU": @@ -164,7 +164,7 @@ func RegexpCacheInit(filename string, cacheCfg types.DataSource) error { case "ARC": gc = gc.ARC() default: - return fmt.Errorf("unknown cache strategy '%s'", *cacheCfg.Strategy) + return fmt.Errorf("unknown cache strategy '%s'", strategy) } if cacheCfg.TTL != nil { diff --git a/pkg/leakybucket/manager_load.go b/pkg/leakybucket/manager_load.go index 5e8bab8486e..1ed9c2d2980 100644 --- a/pkg/leakybucket/manager_load.go +++ b/pkg/leakybucket/manager_load.go @@ -458,7 +458,9 @@ func LoadBucket(bucketFactory *BucketFactory, tomb *tomb.Tomb) error { } if data.Type == "regexp" { // cache only makes sense for regexp - exprhelpers.RegexpCacheInit(data.DestPath, *data) + if err := exprhelpers.RegexpCacheInit(data.DestPath, *data); err != nil { + bucketFactory.logger.Error(err.Error()) + } } } diff --git a/pkg/parser/node.go b/pkg/parser/node.go index 62a1ff6c4e2..1229a0f4470 100644 --- a/pkg/parser/node.go +++ b/pkg/parser/node.go @@ -353,7 +353,9 @@ func (n *Node) process(p *types.Event, ctx UnixParserCtx, expressionEnv map[stri clog.Warningf("unexpected type %t (%v) while running '%s'", output, output, stash.Key) continue } - cache.SetKey(stash.Name, key, value, &stash.TTLVal) + if err = cache.SetKey(stash.Name, key, value, &stash.TTLVal); err != nil { + clog.Warningf("failed to store data in cache: %s", err.Error()) + } } } diff --git a/pkg/parser/stage.go b/pkg/parser/stage.go index b98db350254..ddc07ca7f1d 100644 --- a/pkg/parser/stage.go +++ b/pkg/parser/stage.go @@ -114,10 +114,12 @@ func LoadStages(stageFiles []Stagefile, pctx *UnixParserCtx, ectx EnricherCtx) ( for _, data := range node.Data { err = exprhelpers.FileInit(pctx.DataFolder, data.DestPath, data.Type) if err != nil { - log.Error(err) + log.Error(err.Error()) } if data.Type == "regexp" { //cache only makes sense for regexp - exprhelpers.RegexpCacheInit(data.DestPath, *data) + if err = exprhelpers.RegexpCacheInit(data.DestPath, *data); err != nil { + log.Error(err.Error()) + } } } diff --git a/pkg/setup/install.go b/pkg/setup/install.go index 3d1540f23be..556ddab4c9a 100644 --- a/pkg/setup/install.go +++ b/pkg/setup/install.go @@ -192,7 +192,9 @@ func marshalAcquisDocuments(ads []AcquisDocument, toDir string) (string, error) return "", fmt.Errorf("while writing to %s: %w", ad.AcquisFilename, err) } - f.Sync() + if err = f.Sync(); err != nil { + return "", fmt.Errorf("while syncing %s: %w", ad.AcquisFilename, err) + } continue } From 7d12b806cdc5508eca914c226ce848e63853090f Mon Sep 17 00:00:00 2001 From: Laurence Jones Date: Sat, 18 Jan 2025 13:13:46 +0100 Subject: [PATCH 395/581] enhance: Log appsec error on writing response to remediation (#3412) * enhance: Log appsec error on writing response to remediation * fix: note to self dont write code at midnight --- pkg/acquisition/modules/appsec/appsec.go | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/pkg/acquisition/modules/appsec/appsec.go b/pkg/acquisition/modules/appsec/appsec.go index a4c2c5124b3..78225d5f8c3 100644 --- a/pkg/acquisition/modules/appsec/appsec.go +++ b/pkg/acquisition/modules/appsec/appsec.go @@ -446,6 +446,8 @@ func (w *AppsecSource) appsecHandler(rw http.ResponseWriter, r *http.Request) { logger.Errorf("unable to serialize response: %s", err) rw.WriteHeader(http.StatusInternalServerError) } else { - rw.Write(body) + if _, err := rw.Write(body); err != nil { + logger.Errorf("unable to write response: %s", err) + } } } From bd7e1b50c32d3569c2f5b9d41c774fbe926f21b2 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Mon, 20 Jan 2025 10:30:36 +0100 Subject: [PATCH 396/581] cscli: cliconfig - remove global variables and gratuitous pointer (#3414) --- .../{config_backup.go => cliconfig/backup.go} | 2 +- cmd/crowdsec-cli/{ => cliconfig}/config.go | 14 ++++++++++---- .../feature_flags.go} | 4 ++-- .../{config_restore.go => cliconfig/restore.go} | 2 +- .../{config_show.go => cliconfig/show.go} | 2 +- .../{config_showyaml.go => cliconfig/showyaml.go} | 8 ++++---- cmd/crowdsec-cli/clisupport/support.go | 2 +- cmd/crowdsec-cli/main.go | 3 ++- cmd/crowdsec-cli/require/require.go | 2 +- pkg/csconfig/config.go | 9 +++++---- 10 files changed, 28 insertions(+), 20 deletions(-) rename cmd/crowdsec-cli/{config_backup.go => cliconfig/backup.go} (95%) rename cmd/crowdsec-cli/{ => cliconfig}/config.go (58%) rename cmd/crowdsec-cli/{config_feature_flags.go => cliconfig/feature_flags.go} (96%) rename cmd/crowdsec-cli/{config_restore.go => cliconfig/restore.go} (95%) rename cmd/crowdsec-cli/{config_show.go => cliconfig/show.go} (99%) rename cmd/crowdsec-cli/{config_showyaml.go => cliconfig/showyaml.go} (62%) diff --git a/cmd/crowdsec-cli/config_backup.go b/cmd/crowdsec-cli/cliconfig/backup.go similarity index 95% rename from cmd/crowdsec-cli/config_backup.go rename to cmd/crowdsec-cli/cliconfig/backup.go index 0a58a8c1ab3..5cd34fcf07f 100644 --- a/cmd/crowdsec-cli/config_backup.go +++ b/cmd/crowdsec-cli/cliconfig/backup.go @@ -1,4 +1,4 @@ -package main +package cliconfig import ( "fmt" diff --git a/cmd/crowdsec-cli/config.go b/cmd/crowdsec-cli/cliconfig/config.go similarity index 58% rename from cmd/crowdsec-cli/config.go rename to cmd/crowdsec-cli/cliconfig/config.go index 4cf8916ad4b..22095ac7d5b 100644 --- a/cmd/crowdsec-cli/config.go +++ b/cmd/crowdsec-cli/cliconfig/config.go @@ -1,20 +1,26 @@ -package main +package cliconfig import ( "github.com/spf13/cobra" + + "github.com/crowdsecurity/crowdsec/pkg/csconfig" ) +type configGetter func() *csconfig.Config + +type mergedConfigGetter func() string + type cliConfig struct { cfg configGetter } -func NewCLIConfig(cfg configGetter) *cliConfig { +func New(cfg configGetter) *cliConfig { return &cliConfig{ cfg: cfg, } } -func (cli *cliConfig) NewCommand() *cobra.Command { +func (cli *cliConfig) NewCommand(mergedConfigGetter mergedConfigGetter) *cobra.Command { cmd := &cobra.Command{ Use: "config [command]", Short: "Allows to view current config", @@ -23,7 +29,7 @@ func (cli *cliConfig) NewCommand() *cobra.Command { } cmd.AddCommand(cli.newShowCmd()) - cmd.AddCommand(cli.newShowYAMLCmd()) + cmd.AddCommand(cli.newShowYAMLCmd(mergedConfigGetter)) cmd.AddCommand(cli.newBackupCmd()) cmd.AddCommand(cli.newRestoreCmd()) cmd.AddCommand(cli.newFeatureFlagsCmd()) diff --git a/cmd/crowdsec-cli/config_feature_flags.go b/cmd/crowdsec-cli/cliconfig/feature_flags.go similarity index 96% rename from cmd/crowdsec-cli/config_feature_flags.go rename to cmd/crowdsec-cli/cliconfig/feature_flags.go index 760e2194bb3..c03db10ccce 100644 --- a/cmd/crowdsec-cli/config_feature_flags.go +++ b/cmd/crowdsec-cli/cliconfig/feature_flags.go @@ -1,4 +1,4 @@ -package main +package cliconfig import ( "fmt" @@ -86,7 +86,7 @@ func (cli *cliConfig) featureFlags(showRetired bool) error { fmt.Println("To enable a feature you can: ") fmt.Println(" - set the environment variable CROWDSEC_FEATURE_ to true") - featurePath, err := filepath.Abs(csconfig.GetFeatureFilePath(ConfigFilePath)) + featurePath, err := filepath.Abs(csconfig.GetFeatureFilePath(cli.cfg().FilePath)) if err != nil { // we already read the file, shouldn't happen return err diff --git a/cmd/crowdsec-cli/config_restore.go b/cmd/crowdsec-cli/cliconfig/restore.go similarity index 95% rename from cmd/crowdsec-cli/config_restore.go rename to cmd/crowdsec-cli/cliconfig/restore.go index 75373475ed9..d368b27ea30 100644 --- a/cmd/crowdsec-cli/config_restore.go +++ b/cmd/crowdsec-cli/cliconfig/restore.go @@ -1,4 +1,4 @@ -package main +package cliconfig import ( "fmt" diff --git a/cmd/crowdsec-cli/config_show.go b/cmd/crowdsec-cli/cliconfig/show.go similarity index 99% rename from cmd/crowdsec-cli/config_show.go rename to cmd/crowdsec-cli/cliconfig/show.go index 3d17d264574..90c0ab71069 100644 --- a/cmd/crowdsec-cli/config_show.go +++ b/cmd/crowdsec-cli/cliconfig/show.go @@ -1,4 +1,4 @@ -package main +package cliconfig import ( "encoding/json" diff --git a/cmd/crowdsec-cli/config_showyaml.go b/cmd/crowdsec-cli/cliconfig/showyaml.go similarity index 62% rename from cmd/crowdsec-cli/config_showyaml.go rename to cmd/crowdsec-cli/cliconfig/showyaml.go index 10549648d09..2e46a0171ab 100644 --- a/cmd/crowdsec-cli/config_showyaml.go +++ b/cmd/crowdsec-cli/cliconfig/showyaml.go @@ -1,4 +1,4 @@ -package main +package cliconfig import ( "fmt" @@ -6,19 +6,19 @@ import ( "github.com/spf13/cobra" ) -func (cli *cliConfig) showYAML() error { +func (cli *cliConfig) showYAML(mergedConfig string) error { fmt.Println(mergedConfig) return nil } -func (cli *cliConfig) newShowYAMLCmd() *cobra.Command { +func (cli *cliConfig) newShowYAMLCmd(mergedConfigGetter mergedConfigGetter) *cobra.Command { cmd := &cobra.Command{ Use: "show-yaml", Short: "Displays merged config.yaml + config.yaml.local", Args: cobra.NoArgs, DisableAutoGenTag: true, RunE: func(_ *cobra.Command, _ []string) error { - return cli.showYAML() + return cli.showYAML(mergedConfigGetter()) }, } diff --git a/cmd/crowdsec-cli/clisupport/support.go b/cmd/crowdsec-cli/clisupport/support.go index 5f6032a17bd..eb3e03df253 100644 --- a/cmd/crowdsec-cli/clisupport/support.go +++ b/cmd/crowdsec-cli/clisupport/support.go @@ -290,7 +290,7 @@ func (cli *cliSupport) dumpConfigYAML(zw *zip.Writer) error { cfg := cli.cfg() - config, err := os.ReadFile(*cfg.FilePath) + config, err := os.ReadFile(cfg.FilePath) if err != nil { return fmt.Errorf("could not read config file: %w", err) } diff --git a/cmd/crowdsec-cli/main.go b/cmd/crowdsec-cli/main.go index 936211be7ff..a17bafb96d8 100644 --- a/cmd/crowdsec-cli/main.go +++ b/cmd/crowdsec-cli/main.go @@ -17,6 +17,7 @@ import ( "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/clialert" "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/clibouncer" "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/clicapi" + "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/cliconfig" "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/cliconsole" "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/clidecision" "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/cliexplain" @@ -256,7 +257,7 @@ It is meant to allow you to manage bans, parsers/scenarios/etc, api and generall cmd.AddCommand(NewCLIDoc().NewCommand(cmd)) cmd.AddCommand(NewCLIVersion().NewCommand()) - cmd.AddCommand(NewCLIConfig(cli.cfg).NewCommand()) + cmd.AddCommand(cliconfig.New(cli.cfg).NewCommand(func() string { return mergedConfig })) cmd.AddCommand(clihub.New(cli.cfg).NewCommand()) cmd.AddCommand(climetrics.New(cli.cfg).NewCommand()) cmd.AddCommand(NewCLIDashboard(cli.cfg).NewCommand()) diff --git a/cmd/crowdsec-cli/require/require.go b/cmd/crowdsec-cli/require/require.go index dd98cd092cb..beffa29f3eb 100644 --- a/cmd/crowdsec-cli/require/require.go +++ b/cmd/crowdsec-cli/require/require.go @@ -27,7 +27,7 @@ func LAPI(c *csconfig.Config) error { func CAPI(c *csconfig.Config) error { if c.API.Server.OnlineClient == nil { - return fmt.Errorf("no configuration for Central API (CAPI) in '%s'", *c.FilePath) + return fmt.Errorf("no configuration for Central API (CAPI) in '%s'", c.FilePath) } return nil diff --git a/pkg/csconfig/config.go b/pkg/csconfig/config.go index 3bbdf607187..b0784e5e6f3 100644 --- a/pkg/csconfig/config.go +++ b/pkg/csconfig/config.go @@ -30,7 +30,7 @@ var globalConfig = Config{} // Config contains top-level defaults -> overridden by configuration file -> overridden by CLI flags type Config struct { // just a path to ourselves :p - FilePath *string `yaml:"-"` + FilePath string `yaml:"-"` Self []byte `yaml:"-"` Common *CommonCfg `yaml:"common,omitempty"` Prometheus *PrometheusCfg `yaml:"prometheus,omitempty"` @@ -45,9 +45,10 @@ type Config struct { Hub *LocalHubCfg `yaml:"-"` } -func NewConfig(configFile string, disableAgent bool, disableAPI bool, inCli bool) (*Config, string, error) { +// NewConfig +func NewConfig(configFile string, disableAgent bool, disableAPI bool, quiet bool) (*Config, string, error) { patcher := yamlpatch.NewPatcher(configFile, ".local") - patcher.SetQuiet(inCli) + patcher.SetQuiet(quiet) fcontent, err := patcher.MergedPatchContent() if err != nil { @@ -56,7 +57,7 @@ func NewConfig(configFile string, disableAgent bool, disableAPI bool, inCli bool configData := csstring.StrictExpand(string(fcontent), os.LookupEnv) cfg := Config{ - FilePath: &configFile, + FilePath: configFile, DisableAgent: disableAgent, DisableAPI: disableAPI, } From ff9b350e0e23d4addba01c7904237206a8045fec Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Mon, 20 Jan 2025 10:31:55 +0100 Subject: [PATCH 397/581] deb build: get version with dpkg-parsechangelog -S (#3405) --- debian/rules | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/debian/rules b/debian/rules index 5b8d6fc51f8..ec80caff985 100755 --- a/debian/rules +++ b/debian/rules @@ -1,6 +1,6 @@ #!/usr/bin/make -f -export DEB_VERSION=$(shell dpkg-parsechangelog | grep -E '^Version:' | cut -f 2 -d ' ') +export DEB_VERSION=$(shell dpkg-parsechangelog -SVersion) export BUILD_VERSION=v${DEB_VERSION}-debian-pragmatic export GO111MODULE=on From d6b3841f13aa578c199ef25a5842f2187977e85f Mon Sep 17 00:00:00 2001 From: AlteredCoder <64792091+AlteredCoder@users.noreply.github.com> Date: Mon, 20 Jan 2025 11:21:25 +0100 Subject: [PATCH 398/581] pkg/cticlient: Add missing field in SmokeItem and FireItem (#3413) * pkg/cticlient: Add missing field in SmokeItem and FireItem --- pkg/cticlient/types.go | 32 ++++++++++++++++++++++++++++++++ pkg/cticlient/types_test.go | 12 ++++++++++-- 2 files changed, 42 insertions(+), 2 deletions(-) diff --git a/pkg/cticlient/types.go b/pkg/cticlient/types.go index 954d24641b4..5ea29d6c5b0 100644 --- a/pkg/cticlient/types.go +++ b/pkg/cticlient/types.go @@ -64,6 +64,9 @@ type CTIReferences struct { type SmokeItem struct { IpRangeScore int `json:"ip_range_score"` Ip string `json:"ip"` + Reputation string `json:"reputation"` + BackgroundNoise string `json:"background_noise"` + Confidence string `json:"confidence"` IpRange *string `json:"ip_range"` AsName *string `json:"as_name"` AsNum *int `json:"as_num"` @@ -77,6 +80,7 @@ type SmokeItem struct { BackgroundNoiseScore *int `json:"background_noise_score"` Scores CTIScores `json:"scores"` References []CTIReferences `json:"references"` + CVEs []string `json:"cves"` IsOk bool `json:"-"` } @@ -120,6 +124,10 @@ type FireItem struct { BackgroundNoiseScore *int `json:"background_noise_score"` Scores CTIScores `json:"scores"` References []CTIReferences `json:"references"` + CVEs []string `json:"cves"` + Reputation string `json:"reputation"` + BackgroundNoise string `json:"background_noise"` + Confidence string `json:"confidence"` State string `json:"state"` Expiration CustomTime `json:"expiration"` } @@ -209,6 +217,18 @@ func (c *SmokeItem) GetFalsePositives() []string { return ret } +func (c *SmokeItem) GetClassifications() []string { + ret := make([]string, 0) + + if c.Classifications.Classifications != nil { + for _, b := range c.Classifications.Classifications { + ret = append(ret, b.Name) + } + } + + return ret +} + func (c *SmokeItem) IsFalsePositive() bool { if c.Classifications.FalsePositives != nil { if len(c.Classifications.FalsePositives) > 0 { @@ -282,6 +302,18 @@ func (c *FireItem) GetFalsePositives() []string { return ret } +func (c *FireItem) GetClassifications() []string { + ret := make([]string, 0) + + if c.Classifications.Classifications != nil { + for _, b := range c.Classifications.Classifications { + ret = append(ret, b.Name) + } + } + + return ret +} + func (c *FireItem) IsFalsePositive() bool { if c.Classifications.FalsePositives != nil { if len(c.Classifications.FalsePositives) > 0 { diff --git a/pkg/cticlient/types_test.go b/pkg/cticlient/types_test.go index a7308af35e0..9c7840de324 100644 --- a/pkg/cticlient/types_test.go +++ b/pkg/cticlient/types_test.go @@ -40,8 +40,14 @@ func getSampleSmokeItem() SmokeItem { DaysAge: 1, }, Classifications: CTIClassifications{ - FalsePositives: []CTIClassification{}, - Classifications: []CTIClassification{}, + FalsePositives: []CTIClassification{}, + Classifications: []CTIClassification{ + { + Name: "profile:likely_botnet", + Label: "Likely Botnet", + Description: "IP appears to be a botnet.", + }, + }, }, AttackDetails: []*CTIAttackDetails{ { @@ -101,6 +107,7 @@ func TestBasicSmokeItem(t *testing.T) { assert.Equal(t, 3, item.GetBackgroundNoiseScore()) assert.Equal(t, []string{}, item.GetFalsePositives()) assert.False(t, item.IsFalsePositive()) + assert.Equal(t, []string{"profile:likely_botnet"}, item.GetClassifications()) } func TestEmptySmokeItem(t *testing.T) { @@ -112,4 +119,5 @@ func TestEmptySmokeItem(t *testing.T) { assert.Equal(t, 0, item.GetBackgroundNoiseScore()) assert.Equal(t, []string{}, item.GetFalsePositives()) assert.False(t, item.IsFalsePositive()) + assert.Equal(t, []string{}, item.GetClassifications()) } From e1a1b1e565f4806efb313a6bf04a624a0354f52d Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Mon, 20 Jan 2025 14:16:01 +0100 Subject: [PATCH 399/581] deb packaging: don't duplicate crowdsec.service if /lib is not linked to /usr/lib (#3415) We don't need to explicitly copy the file, but the standard location is /usr/lib, not /lib - hence we must avoid duplicating it by copying explicitly if /lib is not linked to /usr/lib. Also we don't need preinst anymore and the message is in postinst as well. --- debian/install | 1 - debian/preinst | 8 -------- 2 files changed, 9 deletions(-) delete mode 100644 debian/preinst diff --git a/debian/install b/debian/install index fa422cac8d9..2d4cc6e1a7f 100644 --- a/debian/install +++ b/debian/install @@ -3,7 +3,6 @@ config/profiles.yaml etc/crowdsec/ config/simulation.yaml etc/crowdsec/ config/patterns/* etc/crowdsec/patterns -config/crowdsec.service lib/systemd/system # Referenced configs: cmd/notification-slack/slack.yaml etc/crowdsec/notifications/ diff --git a/debian/preinst b/debian/preinst deleted file mode 100644 index df5b56cef3f..00000000000 --- a/debian/preinst +++ /dev/null @@ -1,8 +0,0 @@ -#!/bin/bash - -set -e - -# Source debconf library. -. /usr/share/debconf/confmodule - -echo "You can always run the configuration again interactively by using '/usr/share/crowdsec/wizard.sh -c'" From 013fd8b198f63b7f63f6d2ad302d8c1cc16f7792 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Mon, 20 Jan 2025 15:01:34 +0100 Subject: [PATCH 400/581] remove dependency from github.com/gofrs/uuid (#3406) --- go.mod | 1 - pkg/longpollclient/client.go | 2 +- 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/go.mod b/go.mod index d002aa43d9b..44dcd9c6753 100644 --- a/go.mod +++ b/go.mod @@ -49,7 +49,6 @@ require ( github.com/go-openapi/validate v0.20.0 github.com/go-sql-driver/mysql v1.6.0 github.com/goccy/go-yaml v1.11.0 - github.com/gofrs/uuid v4.0.0+incompatible github.com/gogo/protobuf v1.3.2 // indirect github.com/golang-jwt/jwt/v4 v4.5.1 github.com/golang/protobuf v1.5.4 // indirect diff --git a/pkg/longpollclient/client.go b/pkg/longpollclient/client.go index 5c395185b20..6a668e07d84 100644 --- a/pkg/longpollclient/client.go +++ b/pkg/longpollclient/client.go @@ -10,7 +10,7 @@ import ( "net/url" "time" - "github.com/gofrs/uuid" + "github.com/google/uuid" log "github.com/sirupsen/logrus" "gopkg.in/tomb.v2" ) From 29b2252b50c6acab330148725c88f1e5416226a4 Mon Sep 17 00:00:00 2001 From: blotus Date: Mon, 20 Jan 2025 15:57:26 +0100 Subject: [PATCH 401/581] Improve post-installation message (#3407) --- debian/postinst | 16 ++++++++++++---- rpm/SPECS/crowdsec.spec | 16 ++++++++++++---- 2 files changed, 24 insertions(+), 8 deletions(-) diff --git a/debian/postinst b/debian/postinst index d50a7c0bfe2..ed537325c44 100644 --- a/debian/postinst +++ b/debian/postinst @@ -89,10 +89,18 @@ if [ "$1" = configure ]; then echo "This port is configured through /etc/crowdsec/config.yaml and /etc/crowdsec/local_api_credentials.yaml" fi - echo "Get started with CrowdSec:" - echo " * Detailed guides are available in our documentation: https://docs.crowdsec.net" - echo " * Configuration items created by the community can be found at the Hub: https://hub.crowdsec.net" - echo " * Gain insights into your use of CrowdSec with the help of the console https://app.crowdsec.net" + GREEN='\033[0;32m' + BOLD='\033[1m' + RESET='\033[0m' + + echo -e "${BOLD}Get started with CrowdSec:${RESET}" + echo -e " * Go further by following our ${BOLD}post installation steps${RESET} : ${GREEN}${BOLD}https://docs.crowdsec.net/u/getting_started/next_steps${RESET}" + echo -e "====================================================================================================================" + echo -e " * Install a ${BOLD}remediation component${RESET} to block attackers: ${GREEN}${BOLD}https://docs.crowdsec.net/u/bouncers/intro${RESET}" + echo -e "====================================================================================================================" + echo -e " * Find more ${BOLD}collections${RESET}, ${BOLD}parsers${RESET} and ${BOLD}scenarios${RESET} created by the community with the Hub: ${GREEN}${BOLD}https://hub.crowdsec.net${RESET}" + echo -e "====================================================================================================================" + echo -e " * Subscribe to ${BOLD}additional blocklists${RESET}, ${BOLD}visualize${RESET} your alerts and more with the console: ${GREEN}${BOLD}https://app.crowdsec.net${RESET}" fi echo "You can always run the configuration again interactively by using '/usr/share/crowdsec/wizard.sh -c'" diff --git a/rpm/SPECS/crowdsec.spec b/rpm/SPECS/crowdsec.spec index c24b3f2ac0d..eba022d9bda 100644 --- a/rpm/SPECS/crowdsec.spec +++ b/rpm/SPECS/crowdsec.spec @@ -178,10 +178,18 @@ if [ $1 == 1 ]; then cscli hub update CSCLI_BIN_INSTALLED="/usr/bin/cscli" SILENT=true install_collection - echo "Get started with CrowdSec:" - echo " * Detailed guides are available in our documentation: https://docs.crowdsec.net" - echo " * Configuration items created by the community can be found at the Hub: https://hub.crowdsec.net" - echo " * Gain insights into your use of CrowdSec with the help of the console https://app.crowdsec.net" + GREEN='\033[0;32m' + BOLD='\033[1m' + RESET='\033[0m' + + echo -e "${BOLD}Get started with CrowdSec:${RESET}" + echo -e " * Go further by following our ${BOLD}post installation steps${RESET} : ${GREEN}${BOLD}https://docs.crowdsec.net/u/getting_started/next_steps${RESET}" + echo -e "====================================================================================================================" + echo -e " * Install a ${BOLD}remediation component${RESET} to block attackers: ${GREEN}${BOLD}https://docs.crowdsec.net/u/bouncers/intro${RESET}" + echo -e "====================================================================================================================" + echo -e " * Find more ${BOLD}collections${RESET}, ${BOLD}parsers${RESET} and ${BOLD}scenarios${RESET} created by the community with the Hub: ${GREEN}${BOLD}https://hub.crowdsec.net${RESET}" + echo -e "====================================================================================================================" + echo -e " * Subscribe to ${BOLD}additional blocklists${RESET}, ${BOLD}visualize${RESET} your alerts and more with the console: ${GREEN}${BOLD}https://app.crowdsec.net${RESET}" fi %systemd_post %{name}.service From 1b5514c9e47e8398956d57ca21a5a472afe706ac Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Tue, 21 Jan 2025 13:15:42 +0100 Subject: [PATCH 402/581] wizard: install systemd service file (#3418) fix previous PR --- wizard.sh | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/wizard.sh b/wizard.sh index 4da970cd695..2d3260fc22f 100755 --- a/wizard.sh +++ b/wizard.sh @@ -432,6 +432,10 @@ install_crowdsec() { [[ ! -f "${CROWDSEC_CONFIG_PATH}/console.yaml" ]] && install -v -m 644 -D ./config/console.yaml "${CROWDSEC_CONFIG_PATH}" > /dev/null || exit [[ ! -f "${CROWDSEC_CONFIG_PATH}/context.yaml" ]] && install -v -m 644 -D ./config/context.yaml "${CROWDSEC_CONSOLE_DIR}" > /dev/null || exit + DATA=${CROWDSEC_DATA_DIR} CFG=${CROWDSEC_CONFIG_PATH} envsubst '$CFG $DATA' < ./config/user.yaml > ${CROWDSEC_CONFIG_PATH}"/user.yaml" || log_fatal "unable to generate user configuration file" + if [[ ${DOCKER_MODE} == "false" ]]; then + CFG=${CROWDSEC_CONFIG_PATH} BIN=${CROWDSEC_BIN_INSTALLED} envsubst '$CFG $BIN' < ./config/crowdsec.service > "${SYSTEMD_PATH_FILE}" || log_fatal "unable to crowdsec systemd file" + fi install_bins if [[ ${DOCKER_MODE} == "false" ]]; then From d0de3c9f5dda5f6be4a3e3db937f379c41ec2e70 Mon Sep 17 00:00:00 2001 From: "Thibault \"bui\" Koechlin" Date: Tue, 21 Jan 2025 16:05:12 +0100 Subject: [PATCH 403/581] README revamp (#3408) * wip * attempt #2 * next * up * up * up * up * add more feature description * up * next iteration * up --- README.md | 146 +++++++++++------------------------------------------- 1 file changed, 28 insertions(+), 118 deletions(-) diff --git a/README.md b/README.md index 1e57d4e91c4..dc6d3ee6806 100644 --- a/README.md +++ b/README.md @@ -8,83 +8,47 @@

- - - - -Go Reference - - - - - +

+_CrowdSec is an open-source and participative security solution offering crowdsourced server detection and protection against malicious IPs. Detect and block with our Security Engine, contribute to the network, and enjoy our real-time community blocklist._ +

-:computer: Console (WebApp) -:books: Documentation -:diamond_shape_with_a_dot_inside: Configuration Hub -:speech_balloon: Discourse (Forum) -:speech_balloon: Discord (Live Chat) +CrowdSec schema

+## Features & Advantages -:dancer: This is a community-driven project, we need your feedback. - -## +### Versatile Security Engine -CrowdSec is a free, modern & collaborative behavior detection engine, coupled with a global IP reputation network. It stacks on fail2ban's philosophy but is IPV6 compatible and 60x faster (Go vs Python), it uses Grok patterns to parse logs and YAML scenarios to identify behaviors. CrowdSec is engineered for modern Cloud / Containers / VM-based infrastructures (by decoupling detection and remediation). Once detected you can remedy threats with various bouncers (firewall block, nginx http 403, Captchas, etc.) while the aggressive IP can be sent to CrowdSec for curation before being shared among all users to further improve everyone's security. See [FAQ](https://doc.crowdsec.net/docs/faq) or read below for more. +[CrowdSec Security Engine](https://doc.crowdsec.net/docs/next/intro/) is an all-in-one [IDS/IPS](https://doc.crowdsec.net/docs/next/log_processor/intro) and [WAF](https://doc.crowdsec.net/docs/next/appsec/intro). -## 2 mins install +It detects bad behaviors by analyzing log sources and HTTP requests, and allows active remedation thanks to the [Remediation Components](https://doc.crowdsec.net/u/bouncers/intro). -Installing it through the [Package system](https://doc.crowdsec.net/docs/getting_started/install_crowdsec) of your OS is the easiest way to proceed. -Otherwise, you can install it from source. +[Detection rules are available on our hub](https://hub.crowdsec.net) under MIT license. -### From package (Debian) +### CrowdSec Community Blocklist -```sh -curl -s https://packagecloud.io/install/repositories/crowdsec/crowdsec/script.deb.sh | sudo bash -sudo apt-get update -sudo apt-get install crowdsec -``` + -### From package (rhel/centos/amazon linux) +The "Community Blocklist" is a curated list of IP addresses identified as malicious by CrowdSec. The Security Engine proactively block the IP addresses of this blocklist, preventing malevolent IPs from reaching your systems. -```sh -curl -s https://packagecloud.io/install/repositories/crowdsec/crowdsec/script.rpm.sh | sudo bash -sudo yum install crowdsec -``` +[![CrowdSec Community Blocklist](https://doc.crowdsec.net/assets/images/data_insights-1e7678f47cb672122cc847d068b6eadf.png)](https://doc.crowdsec.net/docs/next/central_api/community_blocklist) -### From package (FreeBSD) - -``` -sudo pkg update -sudo pkg install crowdsec -``` + -### From source +### Console - Monitoring & Automation of your security stack -```sh -wget https://github.com/crowdsecurity/crowdsec/releases/latest/download/crowdsec-release.tgz -tar xzvf crowdsec-release.tgz -cd crowdsec-v* && sudo ./wizard.sh -i -``` +[![CrowdSec Console](https://doc.crowdsec.net/assets/images/visualizer-summary-c8087e2eaef65d110bad6a7f274cf953.png)](https://doc.crowdsec.net/u/console/intro) -## :information_source: About the CrowdSec project +### Multiple Platforms support -Crowdsec is an open-source, lightweight software, detecting peers with aggressive behaviors to prevent them from accessing your systems. Its user-friendly design and assistance offer a low technical barrier of entry and nevertheless a high security gain. +[![Multiple Platforms support](https://github.com/crowdsecurity/crowdsec-docs/blob/main/crowdsec-docs/static/img/supported_platforms.png)](https://doc.crowdsec.net/) -The architecture is as follows : - -

- CrowdSec -

- -Once an unwanted behavior is detected, deal with it through a [bouncer](https://app.crowdsec.net/hub/remediation-components). The aggressive IP, scenario triggered and timestamp are sent for curation, to avoid poisoning & false positives. (This can be disabled). If verified, this IP is then redistributed to all CrowdSec users running the same scenario. ## Outnumbering hackers all together @@ -92,72 +56,18 @@ By sharing the threat they faced, all users are protecting each-others (hence th CrowdSec ships by default with scenarios (brute force, port scan, web scan, etc.) adapted for most contexts, but you can easily extend it by picking more of them from the **[HUB](https://hub.crowdsec.net)**. It is also easy to adapt an existing one or create one yourself. -## :point_right: What it is not - -CrowdSec is not a SIEM, storing your logs (neither locally nor remotely). Your data are analyzed locally and forgotten. - -Signals sent to the curation platform are limited to the very strict minimum: IP, Scenario, Timestamp. They are only used to allow the system to spot new rogue IPs, and rule out false positives or poisoning attempts. - -## :arrow_down: Install it ! - -Crowdsec is available for various platforms : - - - [Use our debian repositories](https://doc.crowdsec.net/docs/getting_started/install_crowdsec) or the [official debian packages](https://packages.debian.org/search?keywords=crowdsec&searchon=names&suite=stable§ion=all) - - An [image](https://hub.docker.com/r/crowdsecurity/crowdsec) is available for docker - - [Prebuilt release packages](https://github.com/crowdsecurity/crowdsec/releases) are also available (suitable for `amd64`) - - You can as well [build it from source](https://doc.crowdsec.net/docs/user_guides/building) - -Or look directly at [installation documentation](https://doc.crowdsec.net/docs/getting_started/install_crowdsec) for other methods and platforms. - -## :tada: Key benefits - -### Fast assisted installation, no technical barrier - -
- Initial configuration is automated, providing functional out-of-the-box setup - -
- -### Out of the box detection - -
- Baseline detection is effective out-of-the-box, no fine-tuning required (click to expand) - -
- -### Easy bouncer deployment - -
- It's trivial to add bouncers to enforce decisions of crowdsec (click to expand) - -
- -### Easy dashboard access - -
- It's easy to deploy a metabase interface to view your data simply with cscli (click to expand) - -
- -### Hot & Cold logs - -
- Process cold logs, for forensic, tests and chasing false positives & false negatives (click to expand) - -
- - -## 📦 About this repository - -This repository contains the code for the two main components of crowdsec : - - `crowdsec` : the daemon a-la-fail2ban that can read, parse, enrich and apply heuristics to logs. This is the component in charge of "detecting" the attacks - - `cscli` : the cli tool mainly used to interact with crowdsec : ban/unban/view current bans, enable/disable parsers and scenarios. +## Installation + -## Contributing +[Follow our documentation to install CrowdSec in a few minutes on Linux, Windows, Docker, OpnSense, Kubernetes, and more.](https://doc.crowdsec.net/) -If you wish to contribute to the core of crowdsec, you are welcome to open a PR in this repository. -If you wish to add a new parser, scenario or collection, please open a PR in the [hub repository](https://github.com/crowdsecurity/hub). +## Resources -If you wish to contribute to the documentation, please open a PR in the [documentation repository](http://github.com/crowdsecurity/crowdsec-docs). + - [Console](https://app.crowdsec.net): Supercharge your CrowdSec setup with visualization, management capabilities, extra blocklists and premium features. + - [Documentation](https://doc.crowdsec.net): Learn how to exploit your CrowdSec setup to deter more attacks. + - [Discord](https://discord.gg/crowdsec): A question or a suggestion? This is the place. + - [Hub](https://hub.crowdsec.net): Improve your stack protection, find the relevant remediation components for your infrastructure. + - [CrowdSec Academy](https://academy.crowdsec.net/): Learn and grow with our courses. + - [Corporate Website](https://crowdsec.net): For everything else. From 83cb3e9eadb1a8c36a5d075a61f8c1a678d3fe3c Mon Sep 17 00:00:00 2001 From: blotus Date: Tue, 21 Jan 2025 17:27:00 +0100 Subject: [PATCH 404/581] upgrade coraza to latest version (#3417) --- go.mod | 24 +++++++++++++----------- go.sum | 56 ++++++++++++++++++++++++++++++++------------------------ 2 files changed, 45 insertions(+), 35 deletions(-) diff --git a/go.mod b/go.mod index 44dcd9c6753..48564d51265 100644 --- a/go.mod +++ b/go.mod @@ -24,10 +24,10 @@ require ( github.com/c-robinson/iplib v1.0.8 github.com/cespare/xxhash/v2 v2.3.0 github.com/containerd/log v0.1.0 // indirect - github.com/corazawaf/libinjection-go v0.1.2 + github.com/corazawaf/libinjection-go v0.2.2 github.com/coreos/go-systemd/v22 v22.5.0 // indirect github.com/creack/pty v1.1.21 // indirect - github.com/crowdsecurity/coraza/v3 v3.0.0-20240108124027-a62b8d8e5607 + github.com/crowdsecurity/coraza/v3 v3.0.0-20250121111732-9b0043b679d7 github.com/crowdsecurity/dlog v0.0.0-20170105205344-4fb5f8204f26 github.com/crowdsecurity/go-cs-lib v0.0.16 github.com/crowdsecurity/grokky v0.2.2 @@ -71,7 +71,6 @@ require ( github.com/lithammer/dedent v1.1.0 github.com/mattn/go-isatty v0.0.20 github.com/mattn/go-sqlite3 v1.14.16 - github.com/miekg/dns v1.1.57 // indirect github.com/mitchellh/copystructure v1.2.0 // indirect github.com/moby/docker-image-spec v1.3.1 // indirect github.com/moby/term v0.5.0 // indirect @@ -104,15 +103,15 @@ require ( go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.28.0 // indirect go.opentelemetry.io/otel/sdk v1.28.0 // indirect go.opentelemetry.io/otel/trace v1.28.0 // indirect - golang.org/x/crypto v0.31.0 + golang.org/x/crypto v0.32.0 golang.org/x/mod v0.20.0 - golang.org/x/net v0.28.0 // indirect + golang.org/x/net v0.34.0 // indirect golang.org/x/sync v0.10.0 // indirect - golang.org/x/sys v0.28.0 + golang.org/x/sys v0.29.0 golang.org/x/text v0.21.0 golang.org/x/time v0.6.0 // indirect google.golang.org/grpc v1.67.1 - google.golang.org/protobuf v1.34.2 + google.golang.org/protobuf v1.36.3 gopkg.in/natefinch/lumberjack.v2 v2.2.1 gopkg.in/tomb.v2 v2.0.0-20161208151619-d5d1b5820637 gopkg.in/yaml.v2 v2.4.0 @@ -132,6 +131,7 @@ require ( github.com/bytedance/sonic v1.10.2 // indirect github.com/chenzhuoyu/base64x v0.0.0-20230717121745-296ad89f973d // indirect github.com/chenzhuoyu/iasm v0.9.1 // indirect + github.com/corazawaf/coraza-coreruleset v0.0.0-20240226094324-415b1017abdc // indirect github.com/cpuguy83/go-md2man/v2 v2.0.4 // indirect github.com/felixge/httpsnoop v1.0.4 // indirect github.com/gabriel-vasile/mimetype v1.4.3 // indirect @@ -165,6 +165,7 @@ require ( github.com/jackc/pgproto3/v2 v2.3.3 // indirect github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a // indirect github.com/jackc/pgtype v1.14.0 // indirect + github.com/jcchavezs/mergefs v0.1.0 // indirect github.com/jmespath/go-jmespath v0.4.0 // indirect github.com/josharian/intern v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect @@ -172,7 +173,7 @@ require ( github.com/klauspost/cpuid/v2 v2.2.6 // indirect github.com/leodido/go-urn v1.3.0 // indirect github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 // indirect - github.com/magefile/mage v1.15.1-0.20230912152418-9f54e0f83e2a // indirect + github.com/magefile/mage v1.15.1-0.20241126214340-bdc92f694516 // indirect github.com/mailru/easyjson v0.7.7 // indirect github.com/mattn/go-colorable v0.1.13 // indirect github.com/mattn/go-runewidth v0.0.15 // indirect @@ -186,7 +187,7 @@ require ( github.com/modern-go/reflect2 v1.0.2 // indirect github.com/oklog/run v1.0.0 // indirect github.com/pelletier/go-toml/v2 v2.1.1 // indirect - github.com/petar-dambovaliev/aho-corasick v0.0.0-20230725210150-fb29fc3c913e // indirect + github.com/petar-dambovaliev/aho-corasick v0.0.0-20240411101913-e07a1f0e8eb4 // indirect github.com/pierrec/lz4/v4 v4.1.18 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c // indirect @@ -201,7 +202,7 @@ require ( github.com/shopspring/decimal v1.2.0 // indirect github.com/spf13/cast v1.3.1 // indirect github.com/tetratelabs/wazero v1.8.0 // indirect - github.com/tidwall/gjson v1.17.0 // indirect + github.com/tidwall/gjson v1.18.0 // indirect github.com/tidwall/match v1.1.1 // indirect github.com/tidwall/pretty v1.2.1 // indirect github.com/tklauser/go-sysconf v0.3.11 // indirect @@ -209,6 +210,7 @@ require ( github.com/toorop/go-dkim v0.0.0-20201103131630-e1cd1a0a5208 // indirect github.com/twitchyliquid64/golang-asm v0.15.1 // indirect github.com/ugorji/go/codec v1.2.12 // indirect + github.com/valllabh/ocsf-schema-golang v1.0.3 // indirect github.com/vmihailenco/msgpack v4.0.4+incompatible // indirect github.com/wasilibs/wazero-helpers v0.0.0-20240620070341-3dff1577cd52 // indirect github.com/yusufpapurcu/wmi v1.2.3 // indirect @@ -217,7 +219,7 @@ require ( go.opentelemetry.io/otel/metric v1.28.0 // indirect go.uber.org/atomic v1.10.0 // indirect golang.org/x/arch v0.7.0 // indirect - golang.org/x/term v0.27.0 // indirect + golang.org/x/term v0.28.0 // indirect golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 // indirect google.golang.org/appengine v1.6.8 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20240814211410-ddb44dafa142 // indirect diff --git a/go.sum b/go.sum index 63b6394ef1c..e9873f0d46f 100644 --- a/go.sum +++ b/go.sum @@ -89,8 +89,12 @@ github.com/cockroachdb/apd v1.1.0 h1:3LFP3629v+1aKXU5Q37mxmRxX/pIu1nijXydLShEq5I github.com/cockroachdb/apd v1.1.0/go.mod h1:8Sl8LxpKi29FqWXR16WEFZRNSz3SoPzUzeMeY4+DwBQ= github.com/containerd/log v0.1.0 h1:TCJt7ioM2cr/tfR8GPbGf9/VRAX8D2B4PjzCpfX540I= github.com/containerd/log v0.1.0/go.mod h1:VRRf09a7mHDIRezVKTRCrOq78v577GXq3bSa3EhrzVo= -github.com/corazawaf/libinjection-go v0.1.2 h1:oeiV9pc5rvJ+2oqOqXEAMJousPpGiup6f7Y3nZj5GoM= -github.com/corazawaf/libinjection-go v0.1.2/go.mod h1:OP4TM7xdJ2skyXqNX1AN1wN5nNZEmJNuWbNPOItn7aw= +github.com/corazawaf/coraza-coreruleset v0.0.0-20240226094324-415b1017abdc h1:OlJhrgI3I+FLUCTI3JJW8MoqyM78WbqJjecqMnqG+wc= +github.com/corazawaf/coraza-coreruleset v0.0.0-20240226094324-415b1017abdc/go.mod h1:7rsocqNDkTCira5T0M7buoKR2ehh7YZiPkzxRuAgvVU= +github.com/corazawaf/coraza/v3 v3.3.2 h1:eG1HPLySTR9lND6y6fPOajubwbuHRF6aXCsCtxyqKTY= +github.com/corazawaf/coraza/v3 v3.3.2/go.mod h1:4EqMZkRoil11FnResCT/2JIg61dH+6D7F48VG8SVzuA= +github.com/corazawaf/libinjection-go v0.2.2 h1:Chzodvb6+NXh6wew5/yhD0Ggioif9ACrQGR4qjTCs1g= +github.com/corazawaf/libinjection-go v0.2.2/go.mod h1:OP4TM7xdJ2skyXqNX1AN1wN5nNZEmJNuWbNPOItn7aw= github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs= @@ -103,8 +107,8 @@ github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ3 github.com/creack/pty v1.1.17/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4= github.com/creack/pty v1.1.21 h1:1/QdRyBaHHJP61QkWMXlOIBfsgdDeeKfK8SYVUWJKf0= github.com/creack/pty v1.1.21/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4= -github.com/crowdsecurity/coraza/v3 v3.0.0-20240108124027-a62b8d8e5607 h1:hyrYw3h8clMcRL2u5ooZ3tmwnmJftmhb9Ws1MKmavvI= -github.com/crowdsecurity/coraza/v3 v3.0.0-20240108124027-a62b8d8e5607/go.mod h1:br36fEqurGYZQGit+iDYsIzW0FF6VufMbDzyyLxEuPA= +github.com/crowdsecurity/coraza/v3 v3.0.0-20250121111732-9b0043b679d7 h1:nIwAjapWmiQD3W/uAWYE3z+DC5Coy/zTyPBCJ379fAw= +github.com/crowdsecurity/coraza/v3 v3.0.0-20250121111732-9b0043b679d7/go.mod h1:A+uciRXu+yhZcHMtM052bSM6vyJsMMU37NJN+tVoGqo= github.com/crowdsecurity/dlog v0.0.0-20170105205344-4fb5f8204f26 h1:r97WNVC30Uen+7WnLs4xDScS/Ex988+id2k6mDf8psU= github.com/crowdsecurity/dlog v0.0.0-20170105205344-4fb5f8204f26/go.mod h1:zpv7r+7KXwgVUZnUNjyP22zc/D7LKjyoY02weH2RBbk= github.com/crowdsecurity/go-cs-lib v0.0.16 h1:2/htodjwc/sfsv4deX8F/2Fzg1bOI8w3O1/BPSvvsB0= @@ -136,8 +140,8 @@ github.com/fatih/color v1.16.0 h1:zmkK9Ngbjj+K0yRhTVONQh1p/HknKYSlNT+vZCzyokM= github.com/fatih/color v1.16.0/go.mod h1:fL2Sau1YI5c0pdGEVCbKQbLXB6edEj1ZgiY4NijnWvE= github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= -github.com/foxcpp/go-mockdns v1.0.0 h1:7jBqxd3WDWwi/6WhDvacvH1XsN3rOLXyHM1uhvIx6FI= -github.com/foxcpp/go-mockdns v1.0.0/go.mod h1:lgRN6+KxQBawyIghpnl5CezHFGS9VLzvtVlwxvzXTQ4= +github.com/foxcpp/go-mockdns v1.1.0 h1:jI0rD8M0wuYAxL7r/ynTrCQQq0BVqfB99Vgk7DlmewI= +github.com/foxcpp/go-mockdns v1.1.0/go.mod h1:IhLeSFGed3mJIAXPH2aiRQB+kqz7oqu8ld2qVbOu7Wk= github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= @@ -418,6 +422,8 @@ github.com/jackc/puddle v0.0.0-20190608224051-11cab39313c9/go.mod h1:m4B5Dj62Y0f github.com/jackc/puddle v1.1.3/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= github.com/jarcoal/httpmock v1.1.0 h1:F47ChZj1Y2zFsCXxNkBPwNNKnAyOATcdQibk0qEdVCE= github.com/jarcoal/httpmock v1.1.0/go.mod h1:ATjnClrvW/3tijVmpL/va5Z3aAyGvqU3gCT8nX0Txik= +github.com/jcchavezs/mergefs v0.1.0 h1:7oteO7Ocl/fnfFMkoVLJxTveCjrsd//UB0j89xmnpec= +github.com/jcchavezs/mergefs v0.1.0/go.mod h1:eRLTrsA+vFwQZ48hj8p8gki/5v9C2bFtHH5Mnn4bcGk= github.com/jedib0t/go-pretty/v6 v6.5.9 h1:ACteMBRrrmm1gMsXe9PSTOClQ63IXDUt03H5U+UV8OU= github.com/jedib0t/go-pretty/v6 v6.5.9/go.mod h1:zbn98qrYlh95FIhwwsbIip0LYpwSG8SUOScs+v9/t0E= github.com/jhump/protoreflect v1.6.0 h1:h5jfMVslIg6l29nsMs0D8Wj17RDVdNYti0vDN/PZZoE= @@ -478,8 +484,8 @@ github.com/lithammer/dedent v1.1.0 h1:VNzHMVCBNG1j0fh3OrsFRkVUwStdDArbgBWoPAffkt github.com/lithammer/dedent v1.1.0/go.mod h1:jrXYCQtgg0nJiN+StA2KgR7w6CiQNv9Fd/Z9BP0jIOc= github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 h1:6E+4a0GO5zZEnZ81pIr0yLvtUWk2if982qA3F3QD6H4= github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0/go.mod h1:zJYVVT2jmtg6P3p1VtQj7WsuWi/y4VnjVBn7F8KPB3I= -github.com/magefile/mage v1.15.1-0.20230912152418-9f54e0f83e2a h1:tdPcGgyiH0K+SbsJBBm2oPyEIOTAvLBwD9TuUwVtZho= -github.com/magefile/mage v1.15.1-0.20230912152418-9f54e0f83e2a/go.mod h1:z5UZb/iS3GoOSn0JgWuiw7dxlurVYTu+/jHXqQg881A= +github.com/magefile/mage v1.15.1-0.20241126214340-bdc92f694516 h1:aAO0L0ulox6m/CLRYvJff+jWXYYCKGpEm3os7dM/Z+M= +github.com/magefile/mage v1.15.1-0.20241126214340-bdc92f694516/go.mod h1:z5UZb/iS3GoOSn0JgWuiw7dxlurVYTu+/jHXqQg881A= github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20190312143242-1de009706dbe/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= @@ -569,8 +575,8 @@ github.com/pelletier/go-toml v1.4.0/go.mod h1:PN7xzY2wHTK0K9p34ErDQMlFxa51Fk0OUr github.com/pelletier/go-toml v1.7.0/go.mod h1:vwGMzjaWMwyfHwgIBhI2YUM4fB6nL6lVAvS1LBMMhTE= github.com/pelletier/go-toml/v2 v2.1.1 h1:LWAJwfNvjQZCFIDKWYQaM62NcYeYViCmWIwmOStowAI= github.com/pelletier/go-toml/v2 v2.1.1/go.mod h1:tJU2Z3ZkXwnxa4DPO899bsyIoywizdUvyaeZurnPPDc= -github.com/petar-dambovaliev/aho-corasick v0.0.0-20230725210150-fb29fc3c913e h1:POJco99aNgosh92lGqmx7L1ei+kCymivB/419SD15PQ= -github.com/petar-dambovaliev/aho-corasick v0.0.0-20230725210150-fb29fc3c913e/go.mod h1:EHPiTAKtiFmrMldLUNswFwfZ2eJIYBHktdaUTZxYWRw= +github.com/petar-dambovaliev/aho-corasick v0.0.0-20240411101913-e07a1f0e8eb4 h1:1Kw2vDBXmjop+LclnzCb/fFy+sgb3gYARwfmoUcQe6o= +github.com/petar-dambovaliev/aho-corasick v0.0.0-20240411101913-e07a1f0e8eb4/go.mod h1:EHPiTAKtiFmrMldLUNswFwfZ2eJIYBHktdaUTZxYWRw= github.com/pierrec/lz4/v4 v4.1.15/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= github.com/pierrec/lz4/v4 v4.1.18 h1:xaKrnTkyoqfh1YItXl56+6KJNVYWlEEPuAQW9xsplYQ= github.com/pierrec/lz4/v4 v4.1.18/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= @@ -680,8 +686,8 @@ github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsT github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/tetratelabs/wazero v1.8.0 h1:iEKu0d4c2Pd+QSRieYbnQC9yiFlMS9D+Jr0LsRmcF4g= github.com/tetratelabs/wazero v1.8.0/go.mod h1:yAI0XTsMBhREkM/YDAK/zNou3GoiAce1P6+rp/wQhjs= -github.com/tidwall/gjson v1.17.0 h1:/Jocvlh98kcTfpN2+JzGQWQcqrPQwDrVEMApx/M5ZwM= -github.com/tidwall/gjson v1.17.0/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk= +github.com/tidwall/gjson v1.18.0 h1:FIDeeyB800efLX89e5a8Y0BNH+LOngJyGrIWxG2FKQY= +github.com/tidwall/gjson v1.18.0/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk= github.com/tidwall/match v1.1.1 h1:+Ho715JplO36QYgwN9PGYNhgZvoUSc9X2c80KVTi+GA= github.com/tidwall/match v1.1.1/go.mod h1:eRSPERbgtNPcGhD8UCthc6PmLEQXEWd3PRB5JTxsfmM= github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= @@ -700,6 +706,8 @@ github.com/ugorji/go/codec v1.2.12 h1:9LC83zGrHhuUA9l16C9AHXAqEV/2wBQ4nkvumAE65E github.com/ugorji/go/codec v1.2.12/go.mod h1:UNopzCgEMSXjBc6AOMqYvWC1ktqTAfzJZUZgYf6w6lg= github.com/umahmood/haversine v0.0.0-20151105152445-808ab04add26 h1:UFHFmFfixpmfRBcxuu+LA9l8MdURWVdVNUHxO5n1d2w= github.com/umahmood/haversine v0.0.0-20151105152445-808ab04add26/go.mod h1:IGhd0qMDsUa9acVjsbsT7bu3ktadtGOHI79+idTew/M= +github.com/valllabh/ocsf-schema-golang v1.0.3 h1:eR8k/3jP/OOqB8LRCtdJ4U+vlgd/gk5y3KMXoodrsrw= +github.com/valllabh/ocsf-schema-golang v1.0.3/go.mod h1:sZ3as9xqm1SSK5feFWIR2CuGeGRhsM7TR1MbpBctzPk= github.com/vektah/gqlparser v1.1.2/go.mod h1:1ycwN7Ij5njmMkPPAOaRFY4rET2Enx7IkVv3vaXspKw= github.com/vjeantet/grok v1.0.1 h1:2rhIR7J4gThTgcZ1m2JY4TrJZNgjn985U28kT2wQrJ4= github.com/vjeantet/grok v1.0.1/go.mod h1:ax1aAchzC6/QMXMcyzHQGZWaW1l195+uMYIkCWPCNIo= @@ -794,8 +802,8 @@ golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97/go.mod h1:GvvjBRRGRdwPK5y golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.3.0/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4= golang.org/x/crypto v0.14.0/go.mod h1:MVFd36DqK4CsrnJYDkBA3VC4m2GkXAM0PvzMCn4JQf4= -golang.org/x/crypto v0.31.0 h1:ihbySMvVjLAeSH1IbfcRTkD/iNscyz8rGzjF/E5hV6U= -golang.org/x/crypto v0.31.0/go.mod h1:kDsLvtWBEx7MV9tJOj9bnXsPbxwJQ6csT/x4KIN4Ssk= +golang.org/x/crypto v0.32.0 h1:euUpcYgM8WcP71gNpTqQCn6rC2t6ULUPiOzfWaXVVfc= +golang.org/x/crypto v0.32.0/go.mod h1:ZnnJkOaASj8g0AjIduWNlq2NRxL0PlBrbKVyZ6V/Ugc= golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= @@ -828,8 +836,8 @@ golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY= golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE= -golang.org/x/net v0.28.0 h1:a9JDOJc5GMUJ0+UDqmLT86WiEy7iWyIhz8gz8E4e5hE= -golang.org/x/net v0.28.0/go.mod h1:yqtgsTWOOnlGLG9GFRrK3++bGOUEkNBoHZc8MEDWPNg= +golang.org/x/net v0.34.0 h1:Mb7Mrk043xzHgnRM88suvJFwzVrRfHEHJEl5/71CKw0= +golang.org/x/net v0.34.0/go.mod h1:di0qlW3YNM5oh6GqDGQr92MyTozJPmybPK4Ev/Gm31k= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -876,8 +884,8 @@ golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.28.0 h1:Fksou7UEQUWlKvIdsqzJmUmCX3cZuD2+P3XyyzwMhlA= -golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.29.0 h1:TPYlXGxvx1MGTn2GiZDhnjPA9wZzZeGKHHmKhHYvgaU= +golang.org/x/sys v0.29.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= @@ -885,8 +893,8 @@ golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= golang.org/x/term v0.13.0/go.mod h1:LTmsnFJwVN6bCy1rVCoS+qHT1HhALEFxKncY3WNNh4U= -golang.org/x/term v0.27.0 h1:WP60Sv1nlK1T6SupCHbXzSaN0b9wUmsPoRS9b61A23Q= -golang.org/x/term v0.27.0/go.mod h1:iMsnZpn0cago0GOrHO2+Y7u7JPn5AylBrcoWkElMTSM= +golang.org/x/term v0.28.0 h1:/Ts8HFuMR2E6IP/jlo7QVLZHggjKQbhu/7H0LJFr3Gg= +golang.org/x/term v0.28.0/go.mod h1:Sw/lC2IAUZ92udQNf3WodGtn4k/XoLyZoh8v/8uiwek= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= @@ -924,8 +932,8 @@ golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roY golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= -golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d h1:vU5i/LfpvrRCpgM/VPfJLg5KjxD3E+hfT1SH+d9zLwg= -golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk= +golang.org/x/tools v0.22.0 h1:gqSGLZqv+AI9lIQzniJ0nZDRG5GBPsSi+DRNHWNz6yA= +golang.org/x/tools v0.22.0/go.mod h1:aCwcsjqvq7Yqt6TNyX7QMU2enbQ/Gt0bo6krSeEri+c= golang.org/x/xerrors v0.0.0-20190410155217-1f06c39b4373/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20190513163551-3ee3066db522/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -947,8 +955,8 @@ google.golang.org/grpc v1.67.1 h1:zWnc1Vrcno+lHZCOofnIMvycFcc0QRGIzm9dhnDX68E= google.golang.org/grpc v1.67.1/go.mod h1:1gLDyUQU7CTLJI90u3nXZ9ekeghjeM7pTDZlqFNg2AA= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg= -google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw= +google.golang.org/protobuf v1.36.3 h1:82DV7MYdb8anAVi3qge1wSnMDrnKK7ebr+I0hHRN1BU= +google.golang.org/protobuf v1.36.3/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= From 4935dc536e9be4da2cb49f05ab7ca68d57c8ffae Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Thu, 23 Jan 2025 09:29:29 +0100 Subject: [PATCH 405/581] cscli hub: handle freebsd pre-release version numbers (#3423) --- cmd/crowdsec-cli/require/branch.go | 2 +- pkg/cwversion/version.go | 18 ++++++-- pkg/cwversion/version_test.go | 68 ++++++++++++++++++++++++++++++ 3 files changed, 83 insertions(+), 5 deletions(-) create mode 100644 pkg/cwversion/version_test.go diff --git a/cmd/crowdsec-cli/require/branch.go b/cmd/crowdsec-cli/require/branch.go index 09acc0fef8a..ab9b8e50bdc 100644 --- a/cmd/crowdsec-cli/require/branch.go +++ b/cmd/crowdsec-cli/require/branch.go @@ -69,7 +69,7 @@ func chooseBranch(ctx context.Context, cfg *csconfig.Config) string { return "master" } - csVersion := cwversion.VersionStrip() + csVersion := cwversion.BaseVersion() if csVersion == "" { log.Warning("Crowdsec version is not set, using hub branch 'master'") return "master" diff --git a/pkg/cwversion/version.go b/pkg/cwversion/version.go index 2cb7de13e18..87d855444e7 100644 --- a/pkg/cwversion/version.go +++ b/pkg/cwversion/version.go @@ -2,6 +2,7 @@ package cwversion import ( "fmt" + "regexp" "strings" "github.com/crowdsecurity/go-cs-lib/maptools" @@ -57,10 +58,19 @@ func FullString() string { return ret } -// VersionStrip remove the tag from the version string, used to match with a hub branch -func VersionStrip() string { - ret := strings.Split(version.Version, "~") - ret = strings.Split(ret[0], "-") +// StripTags removes any tag (-rc, ~foo3, .r1, etc) from a version string +func StripTags(version string) string { + reVersion := regexp.MustCompile(`^v(\d+)\.(\d+)\.(\d+)`) + ret := reVersion.FindStringSubmatch(version) + + if len(ret) == 0 { + return version + } return ret[0] } + +// BaseVersion returns the version number used to match a hub branch. +func BaseVersion() string { + return StripTags(version.Version) +} diff --git a/pkg/cwversion/version_test.go b/pkg/cwversion/version_test.go new file mode 100644 index 00000000000..13293d4a479 --- /dev/null +++ b/pkg/cwversion/version_test.go @@ -0,0 +1,68 @@ +package cwversion + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +func TestStripTags(t *testing.T) { + tests := []struct { + name string + input string + want string + }{ + { + name: "no tag, valid version v1.2.3", + input: "v1.2.3", + want: "v1.2.3", + }, + { + name: "tag appended with dash", + input: "v1.2.3-rc1", + want: "v1.2.3", + }, + { + name: "tag appended with tilde", + input: "v1.2.3~foo3", + want: "v1.2.3", + }, + { + name: "tag appended with dot", + input: "v1.2.3.r1", + want: "v1.2.3", + }, + { + name: "tag appended directly", + input: "v1.2.3r1", + want: "v1.2.3", + }, + { + name: "multiple digits in version", + input: "v10.20.30-rc2", + want: "v10.20.30", + }, + { + name: "invalid version (no 'v' prefix)", + input: "1.2.3-tag", + want: "1.2.3-tag", + }, + { + name: "random string", + input: "some-random-string", + want: "some-random-string", + }, + { + name: "freebsd pre-release", + input: "v1.6.5.r1", + want: "v1.6.5", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got := StripTags(tt.input) + require.Equal(t, tt.want, got) + }) + } +} From 62308f535c2aabc1204f5d8691ec73c7a582341b Mon Sep 17 00:00:00 2001 From: srkoster <89455157+srkoster@users.noreply.github.com> Date: Thu, 23 Jan 2025 18:50:31 +0100 Subject: [PATCH 406/581] Removed last_heartbeat update in MachineUpdateBaseMetrics (#3425) --- pkg/database/machines.go | 9 --------- 1 file changed, 9 deletions(-) diff --git a/pkg/database/machines.go b/pkg/database/machines.go index d8c02825312..1293633ed9e 100644 --- a/pkg/database/machines.go +++ b/pkg/database/machines.go @@ -34,14 +34,6 @@ func (c *Client) MachineUpdateBaseMetrics(ctx context.Context, machineID string, os := baseMetrics.Os features := strings.Join(baseMetrics.FeatureFlags, ",") - var heartbeat time.Time - - if len(baseMetrics.Metrics) == 0 { - heartbeat = time.Now().UTC() - } else { - heartbeat = time.Unix(*baseMetrics.Metrics[0].Meta.UtcNowTimestamp, 0) - } - hubState := map[string][]schema.ItemState{} for itemType, items := range hubItems { hubState[itemType] = []schema.ItemState{} @@ -61,7 +53,6 @@ func (c *Client) MachineUpdateBaseMetrics(ctx context.Context, machineID string, SetOsname(*os.Name). SetOsversion(*os.Version). SetFeatureflags(features). - SetLastHeartbeat(heartbeat). SetHubstate(hubState). SetDatasources(datasources). Save(ctx) From 5b90dfb3c8983e423e579b0ce3db8565f3ad5ce7 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Fri, 24 Jan 2025 14:45:45 +0100 Subject: [PATCH 407/581] use go 1.23.5 (#3419) --- go.mod | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/go.mod b/go.mod index 48564d51265..ed406e4aedc 100644 --- a/go.mod +++ b/go.mod @@ -1,6 +1,6 @@ module github.com/crowdsecurity/crowdsec -go 1.23.3 +go 1.23.5 // Don't use the toolchain directive to avoid uncontrolled downloads during // a build, especially in sandboxed environments (freebsd, gentoo...). From fdd37370b31700b3ce87925ed409f00e45ba1213 Mon Sep 17 00:00:00 2001 From: "Thibault \"bui\" Koechlin" Date: Mon, 27 Jan 2025 10:11:50 +0100 Subject: [PATCH 408/581] appsec: do not attempt to deduplicate native modsec rules (#3347) * fix #3343 * fix #3350 * fix #3350 --------- Co-authored-by: blotus Co-authored-by: Laurence Jones --- .../modules/appsec/appsec_runner.go | 19 +++-- .../modules/appsec/appsec_runner_test.go | 72 ++++++++++++++++--- pkg/acquisition/modules/appsec/appsec_test.go | 10 +-- pkg/appsec/appsec_rules_collection.go | 5 +- 4 files changed, 83 insertions(+), 23 deletions(-) diff --git a/pkg/acquisition/modules/appsec/appsec_runner.go b/pkg/acquisition/modules/appsec/appsec_runner.go index d4535d3f9a2..8bdb6405d98 100644 --- a/pkg/acquisition/modules/appsec/appsec_runner.go +++ b/pkg/acquisition/modules/appsec/appsec_runner.go @@ -35,19 +35,24 @@ type AppsecRunner struct { func (r *AppsecRunner) MergeDedupRules(collections []appsec.AppsecCollection, logger *log.Entry) string { var rulesArr []string dedupRules := make(map[string]struct{}) + discarded := 0 for _, collection := range collections { + // Dedup *our* rules for _, rule := range collection.Rules { - if _, ok := dedupRules[rule]; !ok { - rulesArr = append(rulesArr, rule) - dedupRules[rule] = struct{}{} - } else { - logger.Debugf("Discarding duplicate rule : %s", rule) + if _, ok := dedupRules[rule]; ok { + discarded++ + logger.Debugf("Discarding duplicate rule : %s", rule) + continue } + rulesArr = append(rulesArr, rule) + dedupRules[rule] = struct{}{} } + // Don't mess up with native modsec rules + rulesArr = append(rulesArr, collection.NativeRules...) } - if len(rulesArr) != len(dedupRules) { - logger.Warningf("%d rules were discarded as they were duplicates", len(rulesArr)-len(dedupRules)) + if discarded > 0 { + logger.Warningf("%d rules were discarded as they were duplicates", discarded) } return strings.Join(rulesArr, "\n") diff --git a/pkg/acquisition/modules/appsec/appsec_runner_test.go b/pkg/acquisition/modules/appsec/appsec_runner_test.go index 38549a9106c..38d8bbe431f 100644 --- a/pkg/acquisition/modules/appsec/appsec_runner_test.go +++ b/pkg/acquisition/modules/appsec/appsec_runner_test.go @@ -9,10 +9,29 @@ import ( "github.com/crowdsecurity/crowdsec/pkg/appsec/appsec_rule" ) -func TestAppsecRuleLoad(t *testing.T) { +func TestAppsecConflictRuleLoad(t *testing.T) { log.SetLevel(log.TraceLevel) - tests := []appsecRuleTest{ + { + name: "simple native rule load", + expected_load_ok: true, + inband_native_rules: []string{ + `Secrule REQUEST_HEADERS:Content-Type "@rx ^application/x-www-form-urlencoded" "id:100,phase:1,pass,nolog,noauditlog,ctl:requestBodyProcessor=URLENCODED"`, + `Secrule REQUEST_HEADERS:Content-Type "@rx ^multipart/form-data" "id:101,phase:1,pass,nolog,noauditlog,ctl:requestBodyProcessor=MULTIPART"`, + }, + afterload_asserts: func(runner AppsecRunner) { + require.Len(t, runner.AppsecInbandEngine.GetRuleGroup().GetRules(), 2) + }, + }, + { + name: "id conflict on native rule load", + expected_load_ok: false, + inband_native_rules: []string{ + `Secrule REQUEST_HEADERS:Content-Type "@rx ^application/x-www-form-urlencoded" "id:100,phase:1,pass,nolog,noauditlog,ctl:requestBodyProcessor=URLENCODED"`, + `Secrule REQUEST_HEADERS:Content-Type "@rx ^multipart/form-data" "id:101,phase:1,pass,nolog,noauditlog,ctl:requestBodyProcessor=MULTIPART"`, + `Secrule REQUEST_HEADERS:Content-Type "@rx ^application/x-www-form-urlencoded" "id:100,phase:1,pass,nolog,noauditlog,ctl:requestBodyProcessor=URLENCODED"`, + }, + }, { name: "simple rule load", expected_load_ok: true, @@ -28,33 +47,66 @@ func TestAppsecRuleLoad(t *testing.T) { }, }, { - name: "simple native rule load", + name: "duplicate rule load", expected_load_ok: true, - inband_native_rules: []string{ - `Secrule REQUEST_HEADERS:Content-Type "@rx ^application/x-www-form-urlencoded" "id:100,phase:1,pass,nolog,noauditlog,ctl:requestBodyProcessor=URLENCODED"`, + inband_rules: []appsec_rule.CustomRule{ + { + Name: "rule1", + Zones: []string{"ARGS"}, + Match: appsec_rule.Match{Type: "equals", Value: "toto"}, + }, + { + Name: "rule1", + Zones: []string{"ARGS"}, + Match: appsec_rule.Match{Type: "equals", Value: "toto"}, + }, }, afterload_asserts: func(runner AppsecRunner) { require.Len(t, runner.AppsecInbandEngine.GetRuleGroup().GetRules(), 1) }, }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + loadAppSecEngine(test, t) + }) + } +} + +func TestAppsecRuleLoad(t *testing.T) { + log.SetLevel(log.TraceLevel) + + tests := []appsecRuleTest{ { - name: "simple native rule load (2)", + name: "simple rule load", + expected_load_ok: true, + inband_rules: []appsec_rule.CustomRule{ + { + Name: "rule1", + Zones: []string{"ARGS"}, + Match: appsec_rule.Match{Type: "equals", Value: "toto"}, + }, + }, + afterload_asserts: func(runner AppsecRunner) { + require.Len(t, runner.AppsecInbandEngine.GetRuleGroup().GetRules(), 1) + }, + }, + { + name: "simple native rule load", expected_load_ok: true, inband_native_rules: []string{ `Secrule REQUEST_HEADERS:Content-Type "@rx ^application/x-www-form-urlencoded" "id:100,phase:1,pass,nolog,noauditlog,ctl:requestBodyProcessor=URLENCODED"`, - `Secrule REQUEST_HEADERS:Content-Type "@rx ^multipart/form-data" "id:101,phase:1,pass,nolog,noauditlog,ctl:requestBodyProcessor=MULTIPART"`, }, afterload_asserts: func(runner AppsecRunner) { - require.Len(t, runner.AppsecInbandEngine.GetRuleGroup().GetRules(), 2) + require.Len(t, runner.AppsecInbandEngine.GetRuleGroup().GetRules(), 1) }, }, { - name: "simple native rule load + dedup", + name: "simple native rule load (2)", expected_load_ok: true, inband_native_rules: []string{ `Secrule REQUEST_HEADERS:Content-Type "@rx ^application/x-www-form-urlencoded" "id:100,phase:1,pass,nolog,noauditlog,ctl:requestBodyProcessor=URLENCODED"`, `Secrule REQUEST_HEADERS:Content-Type "@rx ^multipart/form-data" "id:101,phase:1,pass,nolog,noauditlog,ctl:requestBodyProcessor=MULTIPART"`, - `Secrule REQUEST_HEADERS:Content-Type "@rx ^application/x-www-form-urlencoded" "id:100,phase:1,pass,nolog,noauditlog,ctl:requestBodyProcessor=URLENCODED"`, }, afterload_asserts: func(runner AppsecRunner) { require.Len(t, runner.AppsecInbandEngine.GetRuleGroup().GetRules(), 2) diff --git a/pkg/acquisition/modules/appsec/appsec_test.go b/pkg/acquisition/modules/appsec/appsec_test.go index c0af1002f49..5f2b93836f6 100644 --- a/pkg/acquisition/modules/appsec/appsec_test.go +++ b/pkg/acquisition/modules/appsec/appsec_test.go @@ -41,7 +41,9 @@ func loadAppSecEngine(test appsecRuleTest, t *testing.T) { log.SetLevel(log.WarnLevel) } inbandRules := []string{} + nativeInbandRules := []string{} outofbandRules := []string{} + nativeOutofbandRules := []string{} InChan := make(chan appsec.ParsedRequest) OutChan := make(chan types.Event) @@ -56,8 +58,8 @@ func loadAppSecEngine(test appsecRuleTest, t *testing.T) { inbandRules = append(inbandRules, strRule) } - inbandRules = append(inbandRules, test.inband_native_rules...) - outofbandRules = append(outofbandRules, test.outofband_native_rules...) + nativeInbandRules = append(nativeInbandRules, test.inband_native_rules...) + nativeOutofbandRules = append(nativeOutofbandRules, test.outofband_native_rules...) for ridx, rule := range test.outofband_rules { strRule, _, err := rule.Convert(appsec_rule.ModsecurityRuleType, rule.Name) if err != nil { @@ -82,8 +84,8 @@ func loadAppSecEngine(test appsecRuleTest, t *testing.T) { if err != nil { t.Fatalf("unable to build appsec runtime : %s", err) } - AppsecRuntime.InBandRules = []appsec.AppsecCollection{{Rules: inbandRules}} - AppsecRuntime.OutOfBandRules = []appsec.AppsecCollection{{Rules: outofbandRules}} + AppsecRuntime.InBandRules = []appsec.AppsecCollection{{Rules: inbandRules, NativeRules: nativeInbandRules}} + AppsecRuntime.OutOfBandRules = []appsec.AppsecCollection{{Rules: outofbandRules, NativeRules: nativeOutofbandRules}} appsecRunnerUUID := uuid.New().String() //we copy AppsecRutime for each runner wrt := *AppsecRuntime diff --git a/pkg/appsec/appsec_rules_collection.go b/pkg/appsec/appsec_rules_collection.go index d283f95cb19..33e442e7f5b 100644 --- a/pkg/appsec/appsec_rules_collection.go +++ b/pkg/appsec/appsec_rules_collection.go @@ -15,6 +15,7 @@ import ( type AppsecCollection struct { collectionName string Rules []string + NativeRules []string } var APPSEC_RULE = "appsec-rule" @@ -88,14 +89,14 @@ func LoadCollection(pattern string, logger *log.Entry) ([]AppsecCollection, erro if strings.TrimSpace(line) == "" { continue } - appsecCol.Rules = append(appsecCol.Rules, line) + appsecCol.NativeRules = append(appsecCol.NativeRules, line) } } } if appsecRule.SecLangRules != nil { logger.Tracef("Adding inline rules %+v", appsecRule.SecLangRules) - appsecCol.Rules = append(appsecCol.Rules, appsecRule.SecLangRules...) + appsecCol.NativeRules = append(appsecCol.NativeRules, appsecRule.SecLangRules...) } if appsecRule.Rules != nil { From b2bcf31ed75d5bbfdb31c47047962e00a8485653 Mon Sep 17 00:00:00 2001 From: blotus Date: Mon, 27 Jan 2025 12:45:39 +0100 Subject: [PATCH 409/581] use the actual bucket name when checking for simulation mode (#3416) --- pkg/leakybucket/manager_load.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/leakybucket/manager_load.go b/pkg/leakybucket/manager_load.go index 1ed9c2d2980..6e601bb2ec1 100644 --- a/pkg/leakybucket/manager_load.go +++ b/pkg/leakybucket/manager_load.go @@ -300,7 +300,7 @@ func LoadBuckets(cscfg *csconfig.CrowdsecServiceCfg, hub *cwhub.Hub, scenarios [ bucketFactory.ret = response if cscfg.SimulationConfig != nil { - bucketFactory.Simulated = cscfg.SimulationConfig.IsSimulated(item.Name) + bucketFactory.Simulated = cscfg.SimulationConfig.IsSimulated(bucketFactory.Name) } bucketFactory.ScenarioVersion = item.State.LocalVersion From 5a37161df268ee4ded05b5a4c9f3adb138c8eeed Mon Sep 17 00:00:00 2001 From: Laurence Jones Date: Mon, 27 Jan 2025 13:05:26 +0100 Subject: [PATCH 410/581] fix: use CreatedAt instead of StartAt (#3427) * fix: use CreatedAt instead of StartAt fix: #3321 * not a ptr --- cmd/crowdsec-cli/clialert/alerts.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmd/crowdsec-cli/clialert/alerts.go b/cmd/crowdsec-cli/clialert/alerts.go index 5907d4a0fa8..4ae72919a9e 100644 --- a/cmd/crowdsec-cli/clialert/alerts.go +++ b/cmd/crowdsec-cli/clialert/alerts.go @@ -78,7 +78,7 @@ func (cli *cliAlerts) alertsToTable(alerts *models.GetAlertsResponse, printMachi alertItem.Source.Cn, alertItem.Source.GetAsNumberName(), decisionsFromAlert(alertItem), - *alertItem.StartAt, + alertItem.CreatedAt, } if printMachine { row = append(row, alertItem.MachineID) From 51f762c6dbc870aea65645be02de834a887a55fd Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Wed, 29 Jan 2025 10:16:29 +0100 Subject: [PATCH 411/581] CI: package docker tests with uv (#3429) --- .github/workflows/docker-tests.yml | 32 +- Dockerfile | 4 +- Dockerfile.debian | 4 +- docker/test/.python-version | 1 + docker/test/Pipfile | 11 - docker/test/Pipfile.lock | 604 -------------------- docker/test/README.md | 0 docker/test/pyproject.toml | 41 ++ docker/test/tests/conftest.py | 9 +- docker/test/tests/test_agent.py | 56 +- docker/test/tests/test_agent_only.py | 22 +- docker/test/tests/test_bouncer.py | 25 +- docker/test/tests/test_capi.py | 23 +- docker/test/tests/test_capi_whitelists.py | 22 +- docker/test/tests/test_cold_logs.py | 29 +- docker/test/tests/test_flavors.py | 47 +- docker/test/tests/test_hello.py | 13 +- docker/test/tests/test_hub.py | 16 +- docker/test/tests/test_hub_collections.py | 100 ++-- docker/test/tests/test_hub_parsers.py | 60 +- docker/test/tests/test_hub_postoverflows.py | 41 +- docker/test/tests/test_hub_scenarios.py | 55 +- docker/test/tests/test_local_api_url.py | 40 +- docker/test/tests/test_local_item.py | 28 +- docker/test/tests/test_metrics.py | 46 +- docker/test/tests/test_nolapi.py | 5 +- docker/test/tests/test_simple.py | 2 +- docker/test/tests/test_tls.py | 234 ++++---- docker/test/tests/test_version.py | 8 +- docker/test/tests/test_wal.py | 16 +- docker/test/uv.lock | 587 +++++++++++++++++++ 31 files changed, 1080 insertions(+), 1101 deletions(-) create mode 100644 docker/test/.python-version delete mode 100644 docker/test/Pipfile delete mode 100644 docker/test/Pipfile.lock create mode 100644 docker/test/README.md create mode 100644 docker/test/pyproject.toml create mode 100644 docker/test/uv.lock diff --git a/.github/workflows/docker-tests.yml b/.github/workflows/docker-tests.yml index 918f3bcaf1d..647f3e55cdb 100644 --- a/.github/workflows/docker-tests.yml +++ b/.github/workflows/docker-tests.yml @@ -49,28 +49,30 @@ jobs: cache-from: type=gha cache-to: type=gha,mode=min - - name: "Setup Python" + - name: "Create Docker network" + run: docker network create net-test + + - name: Install uv + uses: astral-sh/setup-uv@v5 + with: + version: 0.5.24 + enable-cache: true + cache-dependency-glob: "uv.lock" + + - name: "Set up Python" uses: actions/setup-python@v5 with: - python-version: "3.x" - cache: 'pipenv' + python-version-file: "./docker/test/.python-version" - - name: "Install dependencies" + # running serially to reduce test flakiness + - name: Lint and run the tests run: | cd docker/test - python -m pip install --upgrade pipenv wheel - pipenv install --deploy - - - name: "Create Docker network" - run: docker network create net-test - - - name: "Run tests" + uv sync --all-extras --dev --locked + uv run ruff check + uv run pytest tests -n 1 --durations=0 --color=yes env: CROWDSEC_TEST_VERSION: test CROWDSEC_TEST_FLAVORS: ${{ matrix.flavor }} CROWDSEC_TEST_NETWORK: net-test CROWDSEC_TEST_TIMEOUT: 90 - # running serially to reduce test flakiness - run: | - cd docker/test - pipenv run pytest -n 1 --durations=0 --color=yes diff --git a/Dockerfile b/Dockerfile index 880df88dc02..ee6d54abb02 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,5 +1,5 @@ # vim: set ft=dockerfile: -FROM golang:1.23-alpine3.20 AS build +FROM docker.io/golang:1.23-alpine3.20 AS build ARG BUILD_VERSION @@ -31,7 +31,7 @@ RUN make clean release DOCKER_BUILD=1 BUILD_STATIC=1 CGO_CFLAGS="-D_LARGEFILE64_ # In case we need to remove agents here.. # cscli machines list -o json | yq '.[].machineId' | xargs -r cscli machines delete -FROM alpine:latest AS slim +FROM docker.io/alpine:latest AS slim RUN apk add --no-cache --repository=http://dl-cdn.alpinelinux.org/alpine/edge/community tzdata bash rsync && \ mkdir -p /staging/etc/crowdsec && \ diff --git a/Dockerfile.debian b/Dockerfile.debian index 5d47f167e99..f37ba02a7c2 100644 --- a/Dockerfile.debian +++ b/Dockerfile.debian @@ -1,5 +1,5 @@ # vim: set ft=dockerfile: -FROM golang:1.23-bookworm AS build +FROM docker.io/golang:1.23-bookworm AS build ARG BUILD_VERSION @@ -36,7 +36,7 @@ RUN make clean release DOCKER_BUILD=1 BUILD_STATIC=1 && \ # In case we need to remove agents here.. # cscli machines list -o json | yq '.[].machineId' | xargs -r cscli machines delete -FROM debian:bookworm-slim AS slim +FROM docker.io/debian:bookworm-slim AS slim ENV DEBIAN_FRONTEND=noninteractive ENV DEBCONF_NOWARNINGS="yes" diff --git a/docker/test/.python-version b/docker/test/.python-version new file mode 100644 index 00000000000..e4fba218358 --- /dev/null +++ b/docker/test/.python-version @@ -0,0 +1 @@ +3.12 diff --git a/docker/test/Pipfile b/docker/test/Pipfile deleted file mode 100644 index c57ccb628e8..00000000000 --- a/docker/test/Pipfile +++ /dev/null @@ -1,11 +0,0 @@ -[packages] -pytest-dotenv = "0.5.2" -pytest-xdist = "3.5.0" -pytest-cs = {ref = "0.7.19", git = "https://github.com/crowdsecurity/pytest-cs.git"} - -[dev-packages] -gnureadline = "8.1.2" -ipdb = "0.13.13" - -[requires] -python_version = "*" diff --git a/docker/test/Pipfile.lock b/docker/test/Pipfile.lock deleted file mode 100644 index 99184d9f2a2..00000000000 --- a/docker/test/Pipfile.lock +++ /dev/null @@ -1,604 +0,0 @@ -{ - "_meta": { - "hash": { - "sha256": "b5d25a7199d15a900b285be1af97cf7b7083c6637d631ad777b454471c8319fe" - }, - "pipfile-spec": 6, - "requires": { - "python_version": "*" - }, - "sources": [ - { - "name": "pypi", - "url": "https://pypi.org/simple", - "verify_ssl": true - } - ] - }, - "default": { - "certifi": { - "hashes": [ - "sha256:922820b53db7a7257ffbda3f597266d435245903d80737e34f8a45ff3e3230d8", - "sha256:bec941d2aa8195e248a60b31ff9f0558284cf01a52591ceda73ea9afffd69fd9" - ], - "markers": "python_version >= '3.6'", - "version": "==2024.8.30" - }, - "cffi": { - "hashes": [ - "sha256:045d61c734659cc045141be4bae381a41d89b741f795af1dd018bfb532fd0df8", - "sha256:0984a4925a435b1da406122d4d7968dd861c1385afe3b45ba82b750f229811e2", - "sha256:0e2b1fac190ae3ebfe37b979cc1ce69c81f4e4fe5746bb401dca63a9062cdaf1", - "sha256:0f048dcf80db46f0098ccac01132761580d28e28bc0f78ae0d58048063317e15", - "sha256:1257bdabf294dceb59f5e70c64a3e2f462c30c7ad68092d01bbbfb1c16b1ba36", - "sha256:1c39c6016c32bc48dd54561950ebd6836e1670f2ae46128f67cf49e789c52824", - "sha256:1d599671f396c4723d016dbddb72fe8e0397082b0a77a4fab8028923bec050e8", - "sha256:28b16024becceed8c6dfbc75629e27788d8a3f9030691a1dbf9821a128b22c36", - "sha256:2bb1a08b8008b281856e5971307cc386a8e9c5b625ac297e853d36da6efe9c17", - "sha256:30c5e0cb5ae493c04c8b42916e52ca38079f1b235c2f8ae5f4527b963c401caf", - "sha256:31000ec67d4221a71bd3f67df918b1f88f676f1c3b535a7eb473255fdc0b83fc", - "sha256:386c8bf53c502fff58903061338ce4f4950cbdcb23e2902d86c0f722b786bbe3", - "sha256:3edc8d958eb099c634dace3c7e16560ae474aa3803a5df240542b305d14e14ed", - "sha256:45398b671ac6d70e67da8e4224a065cec6a93541bb7aebe1b198a61b58c7b702", - "sha256:46bf43160c1a35f7ec506d254e5c890f3c03648a4dbac12d624e4490a7046cd1", - "sha256:4ceb10419a9adf4460ea14cfd6bc43d08701f0835e979bf821052f1805850fe8", - "sha256:51392eae71afec0d0c8fb1a53b204dbb3bcabcb3c9b807eedf3e1e6ccf2de903", - "sha256:5da5719280082ac6bd9aa7becb3938dc9f9cbd57fac7d2871717b1feb0902ab6", - "sha256:610faea79c43e44c71e1ec53a554553fa22321b65fae24889706c0a84d4ad86d", - "sha256:636062ea65bd0195bc012fea9321aca499c0504409f413dc88af450b57ffd03b", - "sha256:6883e737d7d9e4899a8a695e00ec36bd4e5e4f18fabe0aca0efe0a4b44cdb13e", - "sha256:6b8b4a92e1c65048ff98cfe1f735ef8f1ceb72e3d5f0c25fdb12087a23da22be", - "sha256:6f17be4345073b0a7b8ea599688f692ac3ef23ce28e5df79c04de519dbc4912c", - "sha256:706510fe141c86a69c8ddc029c7910003a17353970cff3b904ff0686a5927683", - "sha256:72e72408cad3d5419375fc87d289076ee319835bdfa2caad331e377589aebba9", - "sha256:733e99bc2df47476e3848417c5a4540522f234dfd4ef3ab7fafdf555b082ec0c", - "sha256:7596d6620d3fa590f677e9ee430df2958d2d6d6de2feeae5b20e82c00b76fbf8", - "sha256:78122be759c3f8a014ce010908ae03364d00a1f81ab5c7f4a7a5120607ea56e1", - "sha256:805b4371bf7197c329fcb3ead37e710d1bca9da5d583f5073b799d5c5bd1eee4", - "sha256:85a950a4ac9c359340d5963966e3e0a94a676bd6245a4b55bc43949eee26a655", - "sha256:8f2cdc858323644ab277e9bb925ad72ae0e67f69e804f4898c070998d50b1a67", - "sha256:9755e4345d1ec879e3849e62222a18c7174d65a6a92d5b346b1863912168b595", - "sha256:98e3969bcff97cae1b2def8ba499ea3d6f31ddfdb7635374834cf89a1a08ecf0", - "sha256:a08d7e755f8ed21095a310a693525137cfe756ce62d066e53f502a83dc550f65", - "sha256:a1ed2dd2972641495a3ec98445e09766f077aee98a1c896dcb4ad0d303628e41", - "sha256:a24ed04c8ffd54b0729c07cee15a81d964e6fee0e3d4d342a27b020d22959dc6", - "sha256:a45e3c6913c5b87b3ff120dcdc03f6131fa0065027d0ed7ee6190736a74cd401", - "sha256:a9b15d491f3ad5d692e11f6b71f7857e7835eb677955c00cc0aefcd0669adaf6", - "sha256:ad9413ccdeda48c5afdae7e4fa2192157e991ff761e7ab8fdd8926f40b160cc3", - "sha256:b2ab587605f4ba0bf81dc0cb08a41bd1c0a5906bd59243d56bad7668a6fc6c16", - "sha256:b62ce867176a75d03a665bad002af8e6d54644fad99a3c70905c543130e39d93", - "sha256:c03e868a0b3bc35839ba98e74211ed2b05d2119be4e8a0f224fba9384f1fe02e", - "sha256:c59d6e989d07460165cc5ad3c61f9fd8f1b4796eacbd81cee78957842b834af4", - "sha256:c7eac2ef9b63c79431bc4b25f1cd649d7f061a28808cbc6c47b534bd789ef964", - "sha256:c9c3d058ebabb74db66e431095118094d06abf53284d9c81f27300d0e0d8bc7c", - "sha256:ca74b8dbe6e8e8263c0ffd60277de77dcee6c837a3d0881d8c1ead7268c9e576", - "sha256:caaf0640ef5f5517f49bc275eca1406b0ffa6aa184892812030f04c2abf589a0", - "sha256:cdf5ce3acdfd1661132f2a9c19cac174758dc2352bfe37d98aa7512c6b7178b3", - "sha256:d016c76bdd850f3c626af19b0542c9677ba156e4ee4fccfdd7848803533ef662", - "sha256:d01b12eeeb4427d3110de311e1774046ad344f5b1a7403101878976ecd7a10f3", - "sha256:d63afe322132c194cf832bfec0dc69a99fb9bb6bbd550f161a49e9e855cc78ff", - "sha256:da95af8214998d77a98cc14e3a3bd00aa191526343078b530ceb0bd710fb48a5", - "sha256:dd398dbc6773384a17fe0d3e7eeb8d1a21c2200473ee6806bb5e6a8e62bb73dd", - "sha256:de2ea4b5833625383e464549fec1bc395c1bdeeb5f25c4a3a82b5a8c756ec22f", - "sha256:de55b766c7aa2e2a3092c51e0483d700341182f08e67c63630d5b6f200bb28e5", - "sha256:df8b1c11f177bc2313ec4b2d46baec87a5f3e71fc8b45dab2ee7cae86d9aba14", - "sha256:e03eab0a8677fa80d646b5ddece1cbeaf556c313dcfac435ba11f107ba117b5d", - "sha256:e221cf152cff04059d011ee126477f0d9588303eb57e88923578ace7baad17f9", - "sha256:e31ae45bc2e29f6b2abd0de1cc3b9d5205aa847cafaecb8af1476a609a2f6eb7", - "sha256:edae79245293e15384b51f88b00613ba9f7198016a5948b5dddf4917d4d26382", - "sha256:f1e22e8c4419538cb197e4dd60acc919d7696e5ef98ee4da4e01d3f8cfa4cc5a", - "sha256:f3a2b4222ce6b60e2e8b337bb9596923045681d71e5a082783484d845390938e", - "sha256:f6a16c31041f09ead72d69f583767292f750d24913dadacf5756b966aacb3f1a", - "sha256:f75c7ab1f9e4aca5414ed4d8e5c0e303a34f4421f8a0d47a4d019ceff0ab6af4", - "sha256:f79fc4fc25f1c8698ff97788206bb3c2598949bfe0fef03d299eb1b5356ada99", - "sha256:f7f5baafcc48261359e14bcd6d9bff6d4b28d9103847c9e136694cb0501aef87", - "sha256:fc48c783f9c87e60831201f2cce7f3b2e4846bf4d8728eabe54d60700b318a0b" - ], - "markers": "platform_python_implementation != 'PyPy'", - "version": "==1.17.1" - }, - "charset-normalizer": { - "hashes": [ - "sha256:06435b539f889b1f6f4ac1758871aae42dc3a8c0e24ac9e60c2384973ad73027", - "sha256:06a81e93cd441c56a9b65d8e1d043daeb97a3d0856d177d5c90ba85acb3db087", - "sha256:0a55554a2fa0d408816b3b5cedf0045f4b8e1a6065aec45849de2d6f3f8e9786", - "sha256:0b2b64d2bb6d3fb9112bafa732def486049e63de9618b5843bcdd081d8144cd8", - "sha256:10955842570876604d404661fbccbc9c7e684caf432c09c715ec38fbae45ae09", - "sha256:122c7fa62b130ed55f8f285bfd56d5f4b4a5b503609d181f9ad85e55c89f4185", - "sha256:1ceae2f17a9c33cb48e3263960dc5fc8005351ee19db217e9b1bb15d28c02574", - "sha256:1d3193f4a680c64b4b6a9115943538edb896edc190f0b222e73761716519268e", - "sha256:1f79682fbe303db92bc2b1136016a38a42e835d932bab5b3b1bfcfbf0640e519", - "sha256:2127566c664442652f024c837091890cb1942c30937add288223dc895793f898", - "sha256:22afcb9f253dac0696b5a4be4a1c0f8762f8239e21b99680099abd9b2b1b2269", - "sha256:25baf083bf6f6b341f4121c2f3c548875ee6f5339300e08be3f2b2ba1721cdd3", - "sha256:2e81c7b9c8979ce92ed306c249d46894776a909505d8f5a4ba55b14206e3222f", - "sha256:3287761bc4ee9e33561a7e058c72ac0938c4f57fe49a09eae428fd88aafe7bb6", - "sha256:34d1c8da1e78d2e001f363791c98a272bb734000fcef47a491c1e3b0505657a8", - "sha256:37e55c8e51c236f95b033f6fb391d7d7970ba5fe7ff453dad675e88cf303377a", - "sha256:3d47fa203a7bd9c5b6cee4736ee84ca03b8ef23193c0d1ca99b5089f72645c73", - "sha256:3e4d1f6587322d2788836a99c69062fbb091331ec940e02d12d179c1d53e25fc", - "sha256:42cb296636fcc8b0644486d15c12376cb9fa75443e00fb25de0b8602e64c1714", - "sha256:45485e01ff4d3630ec0d9617310448a8702f70e9c01906b0d0118bdf9d124cf2", - "sha256:4a78b2b446bd7c934f5dcedc588903fb2f5eec172f3d29e52a9096a43722adfc", - "sha256:4ab2fe47fae9e0f9dee8c04187ce5d09f48eabe611be8259444906793ab7cbce", - "sha256:4d0d1650369165a14e14e1e47b372cfcb31d6ab44e6e33cb2d4e57265290044d", - "sha256:549a3a73da901d5bc3ce8d24e0600d1fa85524c10287f6004fbab87672bf3e1e", - "sha256:55086ee1064215781fff39a1af09518bc9255b50d6333f2e4c74ca09fac6a8f6", - "sha256:572c3763a264ba47b3cf708a44ce965d98555f618ca42c926a9c1616d8f34269", - "sha256:573f6eac48f4769d667c4442081b1794f52919e7edada77495aaed9236d13a96", - "sha256:5b4c145409bef602a690e7cfad0a15a55c13320ff7a3ad7ca59c13bb8ba4d45d", - "sha256:6463effa3186ea09411d50efc7d85360b38d5f09b870c48e4600f63af490e56a", - "sha256:65f6f63034100ead094b8744b3b97965785388f308a64cf8d7c34f2f2e5be0c4", - "sha256:663946639d296df6a2bb2aa51b60a2454ca1cb29835324c640dafb5ff2131a77", - "sha256:6897af51655e3691ff853668779c7bad41579facacf5fd7253b0133308cf000d", - "sha256:68d1f8a9e9e37c1223b656399be5d6b448dea850bed7d0f87a8311f1ff3dabb0", - "sha256:6ac7ffc7ad6d040517be39eb591cac5ff87416c2537df6ba3cba3bae290c0fed", - "sha256:6b3251890fff30ee142c44144871185dbe13b11bab478a88887a639655be1068", - "sha256:6c4caeef8fa63d06bd437cd4bdcf3ffefe6738fb1b25951440d80dc7df8c03ac", - "sha256:6ef1d82a3af9d3eecdba2321dc1b3c238245d890843e040e41e470ffa64c3e25", - "sha256:753f10e867343b4511128c6ed8c82f7bec3bd026875576dfd88483c5c73b2fd8", - "sha256:7cd13a2e3ddeed6913a65e66e94b51d80a041145a026c27e6bb76c31a853c6ab", - "sha256:7ed9e526742851e8d5cc9e6cf41427dfc6068d4f5a3bb03659444b4cabf6bc26", - "sha256:7f04c839ed0b6b98b1a7501a002144b76c18fb1c1850c8b98d458ac269e26ed2", - "sha256:802fe99cca7457642125a8a88a084cef28ff0cf9407060f7b93dca5aa25480db", - "sha256:80402cd6ee291dcb72644d6eac93785fe2c8b9cb30893c1af5b8fdd753b9d40f", - "sha256:8465322196c8b4d7ab6d1e049e4c5cb460d0394da4a27d23cc242fbf0034b6b5", - "sha256:86216b5cee4b06df986d214f664305142d9c76df9b6512be2738aa72a2048f99", - "sha256:87d1351268731db79e0f8e745d92493ee2841c974128ef629dc518b937d9194c", - "sha256:8bdb58ff7ba23002a4c5808d608e4e6c687175724f54a5dade5fa8c67b604e4d", - "sha256:8c622a5fe39a48f78944a87d4fb8a53ee07344641b0562c540d840748571b811", - "sha256:8d756e44e94489e49571086ef83b2bb8ce311e730092d2c34ca8f7d925cb20aa", - "sha256:8f4a014bc36d3c57402e2977dada34f9c12300af536839dc38c0beab8878f38a", - "sha256:9063e24fdb1e498ab71cb7419e24622516c4a04476b17a2dab57e8baa30d6e03", - "sha256:90d558489962fd4918143277a773316e56c72da56ec7aa3dc3dbbe20fdfed15b", - "sha256:923c0c831b7cfcb071580d3f46c4baf50f174be571576556269530f4bbd79d04", - "sha256:95f2a5796329323b8f0512e09dbb7a1860c46a39da62ecb2324f116fa8fdc85c", - "sha256:96b02a3dc4381e5494fad39be677abcb5e6634bf7b4fa83a6dd3112607547001", - "sha256:9f96df6923e21816da7e0ad3fd47dd8f94b2a5ce594e00677c0013018b813458", - "sha256:a10af20b82360ab00827f916a6058451b723b4e65030c5a18577c8b2de5b3389", - "sha256:a50aebfa173e157099939b17f18600f72f84eed3049e743b68ad15bd69b6bf99", - "sha256:a981a536974bbc7a512cf44ed14938cf01030a99e9b3a06dd59578882f06f985", - "sha256:a9a8e9031d613fd2009c182b69c7b2c1ef8239a0efb1df3f7c8da66d5dd3d537", - "sha256:ae5f4161f18c61806f411a13b0310bea87f987c7d2ecdbdaad0e94eb2e404238", - "sha256:aed38f6e4fb3f5d6bf81bfa990a07806be9d83cf7bacef998ab1a9bd660a581f", - "sha256:b01b88d45a6fcb69667cd6d2f7a9aeb4bf53760d7fc536bf679ec94fe9f3ff3d", - "sha256:b261ccdec7821281dade748d088bb6e9b69e6d15b30652b74cbbac25e280b796", - "sha256:b2b0a0c0517616b6869869f8c581d4eb2dd83a4d79e0ebcb7d373ef9956aeb0a", - "sha256:b4a23f61ce87adf89be746c8a8974fe1c823c891d8f86eb218bb957c924bb143", - "sha256:bd8f7df7d12c2db9fab40bdd87a7c09b1530128315d047a086fa3ae3435cb3a8", - "sha256:beb58fe5cdb101e3a055192ac291b7a21e3b7ef4f67fa1d74e331a7f2124341c", - "sha256:c002b4ffc0be611f0d9da932eb0f704fe2602a9a949d1f738e4c34c75b0863d5", - "sha256:c083af607d2515612056a31f0a8d9e0fcb5876b7bfc0abad3ecd275bc4ebc2d5", - "sha256:c180f51afb394e165eafe4ac2936a14bee3eb10debc9d9e4db8958fe36afe711", - "sha256:c235ebd9baae02f1b77bcea61bce332cb4331dc3617d254df3323aa01ab47bd4", - "sha256:cd70574b12bb8a4d2aaa0094515df2463cb429d8536cfb6c7ce983246983e5a6", - "sha256:d0eccceffcb53201b5bfebb52600a5fb483a20b61da9dbc885f8b103cbe7598c", - "sha256:d965bba47ddeec8cd560687584e88cf699fd28f192ceb452d1d7ee807c5597b7", - "sha256:db364eca23f876da6f9e16c9da0df51aa4f104a972735574842618b8c6d999d4", - "sha256:ddbb2551d7e0102e7252db79ba445cdab71b26640817ab1e3e3648dad515003b", - "sha256:deb6be0ac38ece9ba87dea880e438f25ca3eddfac8b002a2ec3d9183a454e8ae", - "sha256:e06ed3eb3218bc64786f7db41917d4e686cc4856944f53d5bdf83a6884432e12", - "sha256:e27ad930a842b4c5eb8ac0016b0a54f5aebbe679340c26101df33424142c143c", - "sha256:e537484df0d8f426ce2afb2d0f8e1c3d0b114b83f8850e5f2fbea0e797bd82ae", - "sha256:eb00ed941194665c332bf8e078baf037d6c35d7c4f3102ea2d4f16ca94a26dc8", - "sha256:eb6904c354526e758fda7167b33005998fb68c46fbc10e013ca97f21ca5c8887", - "sha256:eb8821e09e916165e160797a6c17edda0679379a4be5c716c260e836e122f54b", - "sha256:efcb3f6676480691518c177e3b465bcddf57cea040302f9f4e6e191af91174d4", - "sha256:f27273b60488abe721a075bcca6d7f3964f9f6f067c8c4c605743023d7d3944f", - "sha256:f30c3cb33b24454a82faecaf01b19c18562b1e89558fb6c56de4d9118a032fd5", - "sha256:fb69256e180cb6c8a894fee62b3afebae785babc1ee98b81cdf68bbca1987f33", - "sha256:fd1abc0d89e30cc4e02e4064dc67fcc51bd941eb395c502aac3ec19fab46b519", - "sha256:ff8fa367d09b717b2a17a052544193ad76cd49979c805768879cb63d9ca50561" - ], - "markers": "python_full_version >= '3.7.0'", - "version": "==3.3.2" - }, - "cryptography": { - "hashes": [ - "sha256:014f58110f53237ace6a408b5beb6c427b64e084eb451ef25a28308270086494", - "sha256:1bbcce1a551e262dfbafb6e6252f1ae36a248e615ca44ba302df077a846a8806", - "sha256:203e92a75716d8cfb491dc47c79e17d0d9207ccffcbcb35f598fbe463ae3444d", - "sha256:27e613d7077ac613e399270253259d9d53872aaf657471473ebfc9a52935c062", - "sha256:2bd51274dcd59f09dd952afb696bf9c61a7a49dfc764c04dd33ef7a6b502a1e2", - "sha256:38926c50cff6f533f8a2dae3d7f19541432610d114a70808f0926d5aaa7121e4", - "sha256:511f4273808ab590912a93ddb4e3914dfd8a388fed883361b02dea3791f292e1", - "sha256:58d4e9129985185a06d849aa6df265bdd5a74ca6e1b736a77959b498e0505b85", - "sha256:5b43d1ea6b378b54a1dc99dd8a2b5be47658fe9a7ce0a58ff0b55f4b43ef2b84", - "sha256:61ec41068b7b74268fa86e3e9e12b9f0c21fcf65434571dbb13d954bceb08042", - "sha256:666ae11966643886c2987b3b721899d250855718d6d9ce41b521252a17985f4d", - "sha256:68aaecc4178e90719e95298515979814bda0cbada1256a4485414860bd7ab962", - "sha256:7c05650fe8023c5ed0d46793d4b7d7e6cd9c04e68eabe5b0aeea836e37bdcec2", - "sha256:80eda8b3e173f0f247f711eef62be51b599b5d425c429b5d4ca6a05e9e856baa", - "sha256:8385d98f6a3bf8bb2d65a73e17ed87a3ba84f6991c155691c51112075f9ffc5d", - "sha256:88cce104c36870d70c49c7c8fd22885875d950d9ee6ab54df2745f83ba0dc365", - "sha256:9d3cdb25fa98afdd3d0892d132b8d7139e2c087da1712041f6b762e4f807cc96", - "sha256:a575913fb06e05e6b4b814d7f7468c2c660e8bb16d8d5a1faf9b33ccc569dd47", - "sha256:ac119bb76b9faa00f48128b7f5679e1d8d437365c5d26f1c2c3f0da4ce1b553d", - "sha256:c1332724be35d23a854994ff0b66530119500b6053d0bd3363265f7e5e77288d", - "sha256:d03a475165f3134f773d1388aeb19c2d25ba88b6a9733c5c590b9ff7bbfa2e0c", - "sha256:d75601ad10b059ec832e78823b348bfa1a59f6b8d545db3a24fd44362a1564cb", - "sha256:de41fd81a41e53267cb020bb3a7212861da53a7d39f863585d13ea11049cf277", - "sha256:e710bf40870f4db63c3d7d929aa9e09e4e7ee219e703f949ec4073b4294f6172", - "sha256:ea25acb556320250756e53f9e20a4177515f012c9eaea17eb7587a8c4d8ae034", - "sha256:f98bf604c82c416bc829e490c700ca1553eafdf2912a91e23a79d97d9801372a", - "sha256:fba1007b3ef89946dbbb515aeeb41e30203b004f0b4b00e5e16078b518563289" - ], - "markers": "python_version >= '3.7'", - "version": "==43.0.1" - }, - "docker": { - "hashes": [ - "sha256:ad8c70e6e3f8926cb8a92619b832b4ea5299e2831c14284663184e200546fa6c", - "sha256:c96b93b7f0a746f9e77d325bcfb87422a3d8bd4f03136ae8a85b37f1898d5fc0" - ], - "markers": "python_version >= '3.8'", - "version": "==7.1.0" - }, - "execnet": { - "hashes": [ - "sha256:26dee51f1b80cebd6d0ca8e74dd8745419761d3bef34163928cbebbdc4749fdc", - "sha256:5189b52c6121c24feae288166ab41b32549c7e2348652736540b9e6e7d4e72e3" - ], - "markers": "python_version >= '3.8'", - "version": "==2.1.1" - }, - "idna": { - "hashes": [ - "sha256:12f65c9b470abda6dc35cf8e63cc574b1c52b11df2c86030af0ac09b01b13ea9", - "sha256:946d195a0d259cbba61165e88e65941f16e9b36ea6ddb97f00452bae8b1287d3" - ], - "markers": "python_version >= '3.6'", - "version": "==3.10" - }, - "iniconfig": { - "hashes": [ - "sha256:2d91e135bf72d31a410b17c16da610a82cb55f6b0477d1a902134b24a455b8b3", - "sha256:b6a85871a79d2e3b22d2d1b94ac2824226a63c6b741c88f7ae975f18b6778374" - ], - "markers": "python_version >= '3.7'", - "version": "==2.0.0" - }, - "packaging": { - "hashes": [ - "sha256:026ed72c8ed3fcce5bf8950572258698927fd1dbda10a5e981cdf0ac37f4f002", - "sha256:5b8f2217dbdbd2f7f384c41c628544e6d52f2d0f53c6d0c3ea61aa5d1d7ff124" - ], - "markers": "python_version >= '3.8'", - "version": "==24.1" - }, - "pluggy": { - "hashes": [ - "sha256:2cffa88e94fdc978c4c574f15f9e59b7f4201d439195c3715ca9e2486f1d0cf1", - "sha256:44e1ad92c8ca002de6377e165f3e0f1be63266ab4d554740532335b9d75ea669" - ], - "markers": "python_version >= '3.8'", - "version": "==1.5.0" - }, - "psutil": { - "hashes": [ - "sha256:02b69001f44cc73c1c5279d02b30a817e339ceb258ad75997325e0e6169d8b35", - "sha256:1287c2b95f1c0a364d23bc6f2ea2365a8d4d9b726a3be7294296ff7ba97c17f0", - "sha256:1e7c870afcb7d91fdea2b37c24aeb08f98b6d67257a5cb0a8bc3ac68d0f1a68c", - "sha256:21f1fb635deccd510f69f485b87433460a603919b45e2a324ad65b0cc74f8fb1", - "sha256:33ea5e1c975250a720b3a6609c490db40dae5d83a4eb315170c4fe0d8b1f34b3", - "sha256:34859b8d8f423b86e4385ff3665d3f4d94be3cdf48221fbe476e883514fdb71c", - "sha256:5fd9a97c8e94059b0ef54a7d4baf13b405011176c3b6ff257c247cae0d560ecd", - "sha256:6ec7588fb3ddaec7344a825afe298db83fe01bfaaab39155fa84cf1c0d6b13c3", - "sha256:6ed2440ada7ef7d0d608f20ad89a04ec47d2d3ab7190896cd62ca5fc4fe08bf0", - "sha256:8faae4f310b6d969fa26ca0545338b21f73c6b15db7c4a8d934a5482faa818f2", - "sha256:a021da3e881cd935e64a3d0a20983bda0bb4cf80e4f74fa9bfcb1bc5785360c6", - "sha256:a495580d6bae27291324fe60cea0b5a7c23fa36a7cd35035a16d93bdcf076b9d", - "sha256:a9a3dbfb4de4f18174528d87cc352d1f788b7496991cca33c6996f40c9e3c92c", - "sha256:c588a7e9b1173b6e866756dde596fd4cad94f9399daf99ad8c3258b3cb2b47a0", - "sha256:e2e8d0054fc88153ca0544f5c4d554d42e33df2e009c4ff42284ac9ebdef4132", - "sha256:fc8c9510cde0146432bbdb433322861ee8c3efbf8589865c8bf8d21cb30c4d14", - "sha256:ffe7fc9b6b36beadc8c322f84e1caff51e8703b88eee1da46d1e3a6ae11b4fd0" - ], - "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3, 3.4, 3.5'", - "version": "==6.0.0" - }, - "pycparser": { - "hashes": [ - "sha256:491c8be9c040f5390f5bf44a5b07752bd07f56edf992381b05c701439eec10f6", - "sha256:c3702b6d3dd8c7abc1afa565d7e63d53a1d0bd86cdc24edd75470f4de499cfcc" - ], - "markers": "python_version >= '3.8'", - "version": "==2.22" - }, - "pytest": { - "hashes": [ - "sha256:70b98107bd648308a7952b06e6ca9a50bc660be218d53c257cc1fc94fda10181", - "sha256:a6853c7375b2663155079443d2e45de913a911a11d669df02a50814944db57b2" - ], - "markers": "python_version >= '3.8'", - "version": "==8.3.3" - }, - "pytest-cs": { - "git": "https://github.com/crowdsecurity/pytest-cs.git", - "ref": "aea7e8549faa32f5e1d1f17755a5db3712396a2a" - }, - "pytest-datadir": { - "hashes": [ - "sha256:1617ed92f9afda0c877e4eac91904b5f779d24ba8f5e438752e3ae39d8d2ee3f", - "sha256:34adf361bcc7b37961bbc1dfa8d25a4829e778bab461703c38a5c50ca9c36dc8" - ], - "markers": "python_version >= '3.8'", - "version": "==1.5.0" - }, - "pytest-dotenv": { - "hashes": [ - "sha256:2dc6c3ac6d8764c71c6d2804e902d0ff810fa19692e95fe138aefc9b1aa73732", - "sha256:40a2cece120a213898afaa5407673f6bd924b1fa7eafce6bda0e8abffe2f710f" - ], - "index": "pypi", - "version": "==0.5.2" - }, - "pytest-xdist": { - "hashes": [ - "sha256:cbb36f3d67e0c478baa57fa4edc8843887e0f6cfc42d677530a36d7472b32d8a", - "sha256:d075629c7e00b611df89f490a5063944bee7a4362a5ff11c7cc7824a03dfce24" - ], - "index": "pypi", - "markers": "python_version >= '3.7'", - "version": "==3.5.0" - }, - "python-dotenv": { - "hashes": [ - "sha256:e324ee90a023d808f1959c46bcbc04446a10ced277783dc6ee09987c37ec10ca", - "sha256:f7b63ef50f1b690dddf550d03497b66d609393b40b564ed0d674909a68ebf16a" - ], - "markers": "python_version >= '3.8'", - "version": "==1.0.1" - }, - "pyyaml": { - "hashes": [ - "sha256:01179a4a8559ab5de078078f37e5c1a30d76bb88519906844fd7bdea1b7729ff", - "sha256:0833f8694549e586547b576dcfaba4a6b55b9e96098b36cdc7ebefe667dfed48", - "sha256:0a9a2848a5b7feac301353437eb7d5957887edbf81d56e903999a75a3d743086", - "sha256:0b69e4ce7a131fe56b7e4d770c67429700908fc0752af059838b1cfb41960e4e", - "sha256:0ffe8360bab4910ef1b9e87fb812d8bc0a308b0d0eef8c8f44e0254ab3b07133", - "sha256:11d8f3dd2b9c1207dcaf2ee0bbbfd5991f571186ec9cc78427ba5bd32afae4b5", - "sha256:17e311b6c678207928d649faa7cb0d7b4c26a0ba73d41e99c4fff6b6c3276484", - "sha256:1e2120ef853f59c7419231f3bf4e7021f1b936f6ebd222406c3b60212205d2ee", - "sha256:1f71ea527786de97d1a0cc0eacd1defc0985dcf6b3f17bb77dcfc8c34bec4dc5", - "sha256:23502f431948090f597378482b4812b0caae32c22213aecf3b55325e049a6c68", - "sha256:24471b829b3bf607e04e88d79542a9d48bb037c2267d7927a874e6c205ca7e9a", - "sha256:29717114e51c84ddfba879543fb232a6ed60086602313ca38cce623c1d62cfbf", - "sha256:2e99c6826ffa974fe6e27cdb5ed0021786b03fc98e5ee3c5bfe1fd5015f42b99", - "sha256:39693e1f8320ae4f43943590b49779ffb98acb81f788220ea932a6b6c51004d8", - "sha256:3ad2a3decf9aaba3d29c8f537ac4b243e36bef957511b4766cb0057d32b0be85", - "sha256:3b1fdb9dc17f5a7677423d508ab4f243a726dea51fa5e70992e59a7411c89d19", - "sha256:41e4e3953a79407c794916fa277a82531dd93aad34e29c2a514c2c0c5fe971cc", - "sha256:43fa96a3ca0d6b1812e01ced1044a003533c47f6ee8aca31724f78e93ccc089a", - "sha256:50187695423ffe49e2deacb8cd10510bc361faac997de9efef88badc3bb9e2d1", - "sha256:5ac9328ec4831237bec75defaf839f7d4564be1e6b25ac710bd1a96321cc8317", - "sha256:5d225db5a45f21e78dd9358e58a98702a0302f2659a3c6cd320564b75b86f47c", - "sha256:6395c297d42274772abc367baaa79683958044e5d3835486c16da75d2a694631", - "sha256:688ba32a1cffef67fd2e9398a2efebaea461578b0923624778664cc1c914db5d", - "sha256:68ccc6023a3400877818152ad9a1033e3db8625d899c72eacb5a668902e4d652", - "sha256:70b189594dbe54f75ab3a1acec5f1e3faa7e8cf2f1e08d9b561cb41b845f69d5", - "sha256:797b4f722ffa07cc8d62053e4cff1486fa6dc094105d13fea7b1de7d8bf71c9e", - "sha256:7c36280e6fb8385e520936c3cb3b8042851904eba0e58d277dca80a5cfed590b", - "sha256:7e7401d0de89a9a855c839bc697c079a4af81cf878373abd7dc625847d25cbd8", - "sha256:80bab7bfc629882493af4aa31a4cfa43a4c57c83813253626916b8c7ada83476", - "sha256:82d09873e40955485746739bcb8b4586983670466c23382c19cffecbf1fd8706", - "sha256:8388ee1976c416731879ac16da0aff3f63b286ffdd57cdeb95f3f2e085687563", - "sha256:8824b5a04a04a047e72eea5cec3bc266db09e35de6bdfe34c9436ac5ee27d237", - "sha256:8b9c7197f7cb2738065c481a0461e50ad02f18c78cd75775628afb4d7137fb3b", - "sha256:9056c1ecd25795207ad294bcf39f2db3d845767be0ea6e6a34d856f006006083", - "sha256:936d68689298c36b53b29f23c6dbb74de12b4ac12ca6cfe0e047bedceea56180", - "sha256:9b22676e8097e9e22e36d6b7bda33190d0d400f345f23d4065d48f4ca7ae0425", - "sha256:a4d3091415f010369ae4ed1fc6b79def9416358877534caf6a0fdd2146c87a3e", - "sha256:a8786accb172bd8afb8be14490a16625cbc387036876ab6ba70912730faf8e1f", - "sha256:a9f8c2e67970f13b16084e04f134610fd1d374bf477b17ec1599185cf611d725", - "sha256:bc2fa7c6b47d6bc618dd7fb02ef6fdedb1090ec036abab80d4681424b84c1183", - "sha256:c70c95198c015b85feafc136515252a261a84561b7b1d51e3384e0655ddf25ab", - "sha256:cc1c1159b3d456576af7a3e4d1ba7e6924cb39de8f67111c735f6fc832082774", - "sha256:ce826d6ef20b1bc864f0a68340c8b3287705cae2f8b4b1d932177dcc76721725", - "sha256:d584d9ec91ad65861cc08d42e834324ef890a082e591037abe114850ff7bbc3e", - "sha256:d7fded462629cfa4b685c5416b949ebad6cec74af5e2d42905d41e257e0869f5", - "sha256:d84a1718ee396f54f3a086ea0a66d8e552b2ab2017ef8b420e92edbc841c352d", - "sha256:d8e03406cac8513435335dbab54c0d385e4a49e4945d2909a581c83647ca0290", - "sha256:e10ce637b18caea04431ce14fabcf5c64a1c61ec9c56b071a4b7ca131ca52d44", - "sha256:ec031d5d2feb36d1d1a24380e4db6d43695f3748343d99434e6f5f9156aaa2ed", - "sha256:ef6107725bd54b262d6dedcc2af448a266975032bc85ef0172c5f059da6325b4", - "sha256:efdca5630322a10774e8e98e1af481aad470dd62c3170801852d752aa7a783ba", - "sha256:f753120cb8181e736c57ef7636e83f31b9c0d1722c516f7e86cf15b7aa57ff12", - "sha256:ff3824dc5261f50c9b0dfb3be22b4567a6f938ccce4587b38952d85fd9e9afe4" - ], - "markers": "python_version >= '3.8'", - "version": "==6.0.2" - }, - "requests": { - "hashes": [ - "sha256:55365417734eb18255590a9ff9eb97e9e1da868d4ccd6402399eaf68af20a760", - "sha256:70761cfe03c773ceb22aa2f671b4757976145175cdfca038c02654d061d6dcc6" - ], - "markers": "python_version >= '3.8'", - "version": "==2.32.3" - }, - "trustme": { - "hashes": [ - "sha256:5375ad7fb427074bec956592e0d4ee2a4cf4da68934e1ba4bcf4217126bc45e6", - "sha256:ce105b68fb9f6d7ac7a9ee6e95bb2347a22ce4d3be78ef9a6494d5ef890e1e16" - ], - "markers": "python_version >= '3.8'", - "version": "==1.1.0" - }, - "urllib3": { - "hashes": [ - "sha256:ca899ca043dcb1bafa3e262d73aa25c465bfb49e0bd9dd5d59f1d0acba2f8fac", - "sha256:e7d814a81dad81e6caf2ec9fdedb284ecc9c73076b62654547cc64ccdcae26e9" - ], - "markers": "python_version >= '3.8'", - "version": "==2.2.3" - } - }, - "develop": { - "asttokens": { - "hashes": [ - "sha256:051ed49c3dcae8913ea7cd08e46a606dba30b79993209636c4875bc1d637bc24", - "sha256:b03869718ba9a6eb027e134bfdf69f38a236d681c83c160d510768af11254ba0" - ], - "version": "==2.4.1" - }, - "decorator": { - "hashes": [ - "sha256:637996211036b6385ef91435e4fae22989472f9d571faba8927ba8253acbc330", - "sha256:b8c3f85900b9dc423225913c5aace94729fe1fa9763b38939a95226f02d37186" - ], - "markers": "python_version >= '3.11'", - "version": "==5.1.1" - }, - "executing": { - "hashes": [ - "sha256:8d63781349375b5ebccc3142f4b30350c0cd9c79f921cde38be2be4637e98eaf", - "sha256:8ea27ddd260da8150fa5a708269c4a10e76161e2496ec3e587da9e3c0fe4b9ab" - ], - "markers": "python_version >= '3.8'", - "version": "==2.1.0" - }, - "gnureadline": { - "hashes": [ - "sha256:17a651e0c49d4b44e8ccf8992edc5a544e33ed9695d3b940ef002858c2215744", - "sha256:194bafa818d0fc3d46f8d71a8811a297a493c1264d3e2d0a71b1b1ff05f8fc15", - "sha256:1e3a8aaf1d61d351c16ad2d3425caf5768603ff5d0e86ba61da9b8756bdd1b95", - "sha256:264f22e865975a3c2ac1183f431dddd8ff7de5a645b89a801c6a276d800f49f3", - "sha256:2753aa1e46b4260b38da424c6a7da7a3ddac161a0b4e6fb71c1093e9ef3d2e73", - "sha256:2816bac8be6bc0e3aa2301acac76e308137eeef1b618c9e0c95c1f89a139a4d8", - "sha256:2ce5c49ecc54e1df0193e90422806a5940f908553206689aeaa04bc959d3aa9a", - "sha256:33ea248385e0d87a3fada38c9164a5756861aa59d6ee010c8be30eeb41f41b49", - "sha256:3903cba2987d42340f1d85c38d3780e954c95e64bfe1839002c7818aa63f8ac3", - "sha256:4262a6aa356ab22ef642f43a7f94eb42a72d6f0c532edb4e8c6b933f573056d2", - "sha256:49df5a432e4ff39cee1b0632c6d0e5fb304757113e502d70b50e33d9ffa47372", - "sha256:4ad9b10409d969ba42acbf89e58352cf3043a5155c2ee677d061e292336b5479", - "sha256:5e1e2d34b0c4ad81c7b00019fafa6de2faf6969c55fa58229e26267cae34047e", - "sha256:5fde3e6417d9004381e8e9835e0a89d81d2d77eeace9364d2e3d9fb64054d449", - "sha256:72da8bac1eb24b6c8237a33d7019a3f004a3d5ba867337175ed764831d9a2c99", - "sha256:74f2538ac15ff4ef9534823abdef077bb34c7dd343e204a36d978f09e168462f", - "sha256:861936c9b362d96152af2d73ccb6f3e901e70f0e4a2e7e62f4e226e91d349edb", - "sha256:8c4690d6c89dbead0958b19263ae67ef995e6109d6bc880cb0e40720cb1ba301", - "sha256:aa29a18594277ea691f92b0c6627d594c0f3387a6685e2e42038ab3f718c794e", - "sha256:b422ff3a78e281ee2e19b0eff70efa48396284bbefa86b83438d668ea9d038a3", - "sha256:c1bcb32e3b63442570d6425055aa6d5c3b6e8b09b9c7d1f8333e70203166a5a3", - "sha256:c402bc6e107beb015ae18c3d2e11f28375f049e464423ead88b35affe80f9be0", - "sha256:c7971653083a48049abd52baa9c8c0188aee362e7b2dd236fe51ecd4e6bc9bbe", - "sha256:de3d8ea66f1b5d00ed843b8925fc07476b8c838c38e584af8639c6a976a43d08", - "sha256:deb921c2cbc14671bb81f3f33d9363a9d0720203b5d716baee32e51c399e914b", - "sha256:e84e903de1514043e6a22866a1973c2ad5f5717f78e9d54e4d6809c48fbd3d81", - "sha256:ecdc4368bd2f7ae9a22de31b024455222082cb49b98ee69ffd0a59734bf648e1" - ], - "index": "pypi", - "version": "==8.1.2" - }, - "ipdb": { - "hashes": [ - "sha256:45529994741c4ab6d2388bfa5d7b725c2cf7fe9deffabdb8a6113aa5ed449ed4", - "sha256:e3ac6018ef05126d442af680aad863006ec19d02290561ac88b8b1c0b0cfc726" - ], - "index": "pypi", - "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'", - "version": "==0.13.13" - }, - "ipython": { - "hashes": [ - "sha256:0d0d15ca1e01faeb868ef56bc7ee5a0de5bd66885735682e8a322ae289a13d1a", - "sha256:530ef1e7bb693724d3cdc37287c80b07ad9b25986c007a53aa1857272dac3f35" - ], - "markers": "python_version >= '3.11'", - "version": "==8.28.0" - }, - "jedi": { - "hashes": [ - "sha256:cf0496f3651bc65d7174ac1b7d043eff454892c708a87d1b683e57b569927ffd", - "sha256:e983c654fe5c02867aef4cdfce5a2fbb4a50adc0af145f70504238f18ef5e7e0" - ], - "markers": "python_version >= '3.6'", - "version": "==0.19.1" - }, - "matplotlib-inline": { - "hashes": [ - "sha256:8423b23ec666be3d16e16b60bdd8ac4e86e840ebd1dd11a30b9f117f2fa0ab90", - "sha256:df192d39a4ff8f21b1895d72e6a13f5fcc5099f00fa84384e0ea28c2cc0653ca" - ], - "markers": "python_version >= '3.8'", - "version": "==0.1.7" - }, - "parso": { - "hashes": [ - "sha256:a418670a20291dacd2dddc80c377c5c3791378ee1e8d12bffc35420643d43f18", - "sha256:eb3a7b58240fb99099a345571deecc0f9540ea5f4dd2fe14c2a99d6b281ab92d" - ], - "markers": "python_version >= '3.6'", - "version": "==0.8.4" - }, - "pexpect": { - "hashes": [ - "sha256:7236d1e080e4936be2dc3e326cec0af72acf9212a7e1d060210e70a47e253523", - "sha256:ee7d41123f3c9911050ea2c2dac107568dc43b2d3b0c7557a33212c398ead30f" - ], - "markers": "sys_platform != 'win32' and sys_platform != 'emscripten'", - "version": "==4.9.0" - }, - "prompt-toolkit": { - "hashes": [ - "sha256:d6623ab0477a80df74e646bdbc93621143f5caf104206aa29294d53de1a03d90", - "sha256:f49a827f90062e411f1ce1f854f2aedb3c23353244f8108b89283587397ac10e" - ], - "markers": "python_full_version >= '3.7.0'", - "version": "==3.0.48" - }, - "ptyprocess": { - "hashes": [ - "sha256:4b41f3967fce3af57cc7e94b888626c18bf37a083e3651ca8feeb66d492fef35", - "sha256:5c5d0a3b48ceee0b48485e0c26037c0acd7d29765ca3fbb5cb3831d347423220" - ], - "version": "==0.7.0" - }, - "pure-eval": { - "hashes": [ - "sha256:1db8e35b67b3d218d818ae653e27f06c3aa420901fa7b081ca98cbedc874e0d0", - "sha256:5f4e983f40564c576c7c8635ae88db5956bb2229d7e9237d03b3c0b0190eaf42" - ], - "version": "==0.2.3" - }, - "pygments": { - "hashes": [ - "sha256:786ff802f32e91311bff3889f6e9a86e81505fe99f2735bb6d60ae0c5004f199", - "sha256:b8e6aca0523f3ab76fee51799c488e38782ac06eafcf95e7ba832985c8e7b13a" - ], - "markers": "python_version >= '3.8'", - "version": "==2.18.0" - }, - "six": { - "hashes": [ - "sha256:1e61c37477a1626458e36f7b1d82aa5c9b094fa4802892072e49de9c60c4c926", - "sha256:8abb2f1d86890a2dfb989f9a77cfcfd3e47c2a354b01111771326f8aa26e0254" - ], - "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'", - "version": "==1.16.0" - }, - "stack-data": { - "hashes": [ - "sha256:836a778de4fec4dcd1dcd89ed8abff8a221f58308462e1c4aa2a3cf30148f0b9", - "sha256:d5558e0c25a4cb0853cddad3d77da9891a08cb85dd9f9f91b9f8cd66e511e695" - ], - "version": "==0.6.3" - }, - "traitlets": { - "hashes": [ - "sha256:9ed0579d3502c94b4b3732ac120375cda96f923114522847de4b3bb98b96b6b7", - "sha256:b74e89e397b1ed28cc831db7aea759ba6640cb3de13090ca145426688ff1ac4f" - ], - "markers": "python_version >= '3.8'", - "version": "==5.14.3" - }, - "wcwidth": { - "hashes": [ - "sha256:3da69048e4540d84af32131829ff948f1e022c1c6bdb8d6102117aac784f6859", - "sha256:72ea0c06399eb286d978fdedb6923a9eb47e1c486ce63e9b4e64fc18303972b5" - ], - "version": "==0.2.13" - } - } -} diff --git a/docker/test/README.md b/docker/test/README.md new file mode 100644 index 00000000000..e69de29bb2d diff --git a/docker/test/pyproject.toml b/docker/test/pyproject.toml new file mode 100644 index 00000000000..d32d184424f --- /dev/null +++ b/docker/test/pyproject.toml @@ -0,0 +1,41 @@ +[project] +name = "crowdsec-docker-tests" +version = "0.1.0" +description = "Docker tests for Crowdsec" +readme = "README.md" +requires-python = ">=3.12" +dependencies = [ + "pytest>=8.3.4", + "pytest-cs", + "pytest-dotenv>=0.5.2", + "pytest-xdist>=3.6.1", +] + +[dependency-groups] +dev = [ + "ipdb>=0.13.13", + "ruff>=0.9.3", +] + +[tool.uv.sources] +pytest-cs = { git = "https://github.com/crowdsecurity/pytest-cs" } + +[tool.ruff] + +line-length = 120 + +[tool.ruff.lint] +select = [ + "E", # pycodestyle errors + "W", # pycodestyle warnings + "F", # pyflakes + "I", # isort + "C", # flake8-comprehensions + "B", # flake8-bugbear + "UP", # pyupgrade + "C90", # macabe +] + +ignore = [ + "B008", # do not perform function calls in argument defaults +] diff --git a/docker/test/tests/conftest.py b/docker/test/tests/conftest.py index 3498da82660..d32ffa28c37 100644 --- a/docker/test/tests/conftest.py +++ b/docker/test/tests/conftest.py @@ -1,11 +1,6 @@ - pytest_plugins = ("cs",) def pytest_configure(config): - config.addinivalue_line( - 'markers', 'docker: mark tests for lone or manually orchestrated containers' - ) - config.addinivalue_line( - 'markers', 'compose: mark tests for docker compose projects' - ) + config.addinivalue_line("markers", "docker: mark tests for lone or manually orchestrated containers") + config.addinivalue_line("markers", "compose: mark tests for docker compose projects") diff --git a/docker/test/tests/test_agent.py b/docker/test/tests/test_agent.py index e55d11af850..aec1bbdaae8 100644 --- a/docker/test/tests/test_agent.py +++ b/docker/test/tests/test_agent.py @@ -10,12 +10,12 @@ def test_no_agent(crowdsec, flavor): """Test DISABLE_AGENT=true""" env = { - 'DISABLE_AGENT': 'true', + "DISABLE_AGENT": "true", } with crowdsec(flavor=flavor, environment=env) as cs: cs.wait_for_log("*CrowdSec Local API listening on *:8080*") - cs.wait_for_http(8080, '/health', want_status=HTTPStatus.OK) - res = cs.cont.exec_run('cscli lapi status') + cs.wait_for_http(8080, "/health", want_status=HTTPStatus.OK) + res = cs.cont.exec_run("cscli lapi status") assert res.exit_code == 0 stdout = res.output.decode() assert "You can successfully interact with Local API (LAPI)" in stdout @@ -24,23 +24,25 @@ def test_no_agent(crowdsec, flavor): def test_machine_register(crowdsec, flavor, tmp_path_factory): """A local agent is always registered for use by cscli""" - data_dir = tmp_path_factory.mktemp('data') + data_dir = tmp_path_factory.mktemp("data") env = { - 'DISABLE_AGENT': 'true', + "DISABLE_AGENT": "true", } volumes = { - data_dir: {'bind': '/var/lib/crowdsec/data', 'mode': 'rw'}, + data_dir: {"bind": "/var/lib/crowdsec/data", "mode": "rw"}, } with crowdsec(flavor=flavor, environment=env, volumes=volumes) as cs: - cs.wait_for_log([ + cs.wait_for_log( + [ "*Generate local agent credentials*", "*CrowdSec Local API listening on *:8080*", - ]) - cs.wait_for_http(8080, '/health', want_status=HTTPStatus.OK) - res = cs.cont.exec_run('cscli lapi status') + ] + ) + cs.wait_for_http(8080, "/health", want_status=HTTPStatus.OK) + res = cs.cont.exec_run("cscli lapi status") assert res.exit_code == 0 stdout = res.output.decode() assert "You can successfully interact with Local API (LAPI)" in stdout @@ -48,27 +50,31 @@ def test_machine_register(crowdsec, flavor, tmp_path_factory): # The local agent is not registered, because we didn't persist local_api_credentials.yaml with crowdsec(flavor=flavor, environment=env, volumes=volumes) as cs: - cs.wait_for_log([ + cs.wait_for_log( + [ "*Generate local agent credentials*", "*CrowdSec Local API listening on *:8080*", - ]) - cs.wait_for_http(8080, '/health', want_status=HTTPStatus.OK) - res = cs.cont.exec_run('cscli lapi status') + ] + ) + cs.wait_for_http(8080, "/health", want_status=HTTPStatus.OK) + res = cs.cont.exec_run("cscli lapi status") assert res.exit_code == 0 stdout = res.output.decode() assert "You can successfully interact with Local API (LAPI)" in stdout - config_dir = tmp_path_factory.mktemp('config') + config_dir = tmp_path_factory.mktemp("config") - volumes[config_dir] = {'bind': '/etc/crowdsec', 'mode': 'rw'} + volumes[config_dir] = {"bind": "/etc/crowdsec", "mode": "rw"} with crowdsec(flavor=flavor, environment=env, volumes=volumes) as cs: - cs.wait_for_log([ + cs.wait_for_log( + [ "*Generate local agent credentials*", "*CrowdSec Local API listening on *:8080*", - ]) - cs.wait_for_http(8080, '/health', want_status=HTTPStatus.OK) - res = cs.cont.exec_run('cscli lapi status') + ] + ) + cs.wait_for_http(8080, "/health", want_status=HTTPStatus.OK) + res = cs.cont.exec_run("cscli lapi status") assert res.exit_code == 0 stdout = res.output.decode() assert "You can successfully interact with Local API (LAPI)" in stdout @@ -76,12 +82,14 @@ def test_machine_register(crowdsec, flavor, tmp_path_factory): # The local agent is now already registered with crowdsec(flavor=flavor, environment=env, volumes=volumes) as cs: - cs.wait_for_log([ + cs.wait_for_log( + [ "*Local agent already registered*", "*CrowdSec Local API listening on *:8080*", - ]) - cs.wait_for_http(8080, '/health', want_status=HTTPStatus.OK) - res = cs.cont.exec_run('cscli lapi status') + ] + ) + cs.wait_for_http(8080, "/health", want_status=HTTPStatus.OK) + res = cs.cont.exec_run("cscli lapi status") assert res.exit_code == 0 stdout = res.output.decode() assert "You can successfully interact with Local API (LAPI)" in stdout diff --git a/docker/test/tests/test_agent_only.py b/docker/test/tests/test_agent_only.py index 038b726e324..4e1689e0b9b 100644 --- a/docker/test/tests/test_agent_only.py +++ b/docker/test/tests/test_agent_only.py @@ -1,7 +1,7 @@ #!/usr/bin/env python -from http import HTTPStatus import random +from http import HTTPStatus import pytest @@ -10,19 +10,19 @@ def test_split_lapi_agent(crowdsec, flavor): rand = str(random.randint(0, 10000)) - lapiname = f'lapi-{rand}' - agentname = f'agent-{rand}' + lapiname = f"lapi-{rand}" + agentname = f"agent-{rand}" lapi_env = { - 'AGENT_USERNAME': 'testagent', - 'AGENT_PASSWORD': 'testpassword', + "AGENT_USERNAME": "testagent", + "AGENT_PASSWORD": "testpassword", } agent_env = { - 'AGENT_USERNAME': 'testagent', - 'AGENT_PASSWORD': 'testpassword', - 'DISABLE_LOCAL_API': 'true', - 'LOCAL_API_URL': f'http://{lapiname}:8080', + "AGENT_USERNAME": "testagent", + "AGENT_PASSWORD": "testpassword", + "DISABLE_LOCAL_API": "true", + "LOCAL_API_URL": f"http://{lapiname}:8080", } cs_lapi = crowdsec(name=lapiname, environment=lapi_env, flavor=flavor) @@ -30,10 +30,10 @@ def test_split_lapi_agent(crowdsec, flavor): with cs_lapi as lapi: lapi.wait_for_log("*CrowdSec Local API listening on *:8080*") - lapi.wait_for_http(8080, '/health', want_status=HTTPStatus.OK) + lapi.wait_for_http(8080, "/health", want_status=HTTPStatus.OK) with cs_agent as agent: agent.wait_for_log("*Starting processing data*") - res = agent.cont.exec_run('cscli lapi status') + res = agent.cont.exec_run("cscli lapi status") assert res.exit_code == 0 stdout = res.output.decode() assert "You can successfully interact with Local API (LAPI)" in stdout diff --git a/docker/test/tests/test_bouncer.py b/docker/test/tests/test_bouncer.py index 98b86de858c..d87aff734c5 100644 --- a/docker/test/tests/test_bouncer.py +++ b/docker/test/tests/test_bouncer.py @@ -5,8 +5,8 @@ """ import hashlib -from http import HTTPStatus import json +from http import HTTPStatus import pytest @@ -21,36 +21,33 @@ def hex512(s): def test_register_bouncer_env(crowdsec, flavor): """Test installing bouncers at startup, from envvar""" - env = { - 'BOUNCER_KEY_bouncer1name': 'bouncer1key', - 'BOUNCER_KEY_bouncer2name': 'bouncer2key' - } + env = {"BOUNCER_KEY_bouncer1name": "bouncer1key", "BOUNCER_KEY_bouncer2name": "bouncer2key"} with crowdsec(flavor=flavor, environment=env) as cs: cs.wait_for_log("*Starting processing data*") - cs.wait_for_http(8080, '/health', want_status=HTTPStatus.OK) - res = cs.cont.exec_run('cscli bouncers list -o json') + cs.wait_for_http(8080, "/health", want_status=HTTPStatus.OK) + res = cs.cont.exec_run("cscli bouncers list -o json") assert res.exit_code == 0 j = json.loads(res.output) assert len(j) == 2 bouncer1, bouncer2 = j - assert bouncer1['name'] == 'bouncer1name' - assert bouncer2['name'] == 'bouncer2name' + assert bouncer1["name"] == "bouncer1name" + assert bouncer2["name"] == "bouncer2name" # add a second bouncer at runtime - res = cs.cont.exec_run('cscli bouncers add bouncer3name -k bouncer3key') + res = cs.cont.exec_run("cscli bouncers add bouncer3name -k bouncer3key") assert res.exit_code == 0 - res = cs.cont.exec_run('cscli bouncers list -o json') + res = cs.cont.exec_run("cscli bouncers list -o json") assert res.exit_code == 0 j = json.loads(res.output) assert len(j) == 3 bouncer3 = j[2] - assert bouncer3['name'] == 'bouncer3name' + assert bouncer3["name"] == "bouncer3name" # remove all bouncers - res = cs.cont.exec_run('cscli bouncers delete bouncer1name bouncer2name bouncer3name') + res = cs.cont.exec_run("cscli bouncers delete bouncer1name bouncer2name bouncer3name") assert res.exit_code == 0 - res = cs.cont.exec_run('cscli bouncers list -o json') + res = cs.cont.exec_run("cscli bouncers list -o json") assert res.exit_code == 0 j = json.loads(res.output) assert len(j) == 0 diff --git a/docker/test/tests/test_capi.py b/docker/test/tests/test_capi.py index 08b3a70471e..ad25f7a766f 100644 --- a/docker/test/tests/test_capi.py +++ b/docker/test/tests/test_capi.py @@ -3,6 +3,7 @@ from http import HTTPStatus import pytest + pytestmark = pytest.mark.docker @@ -10,13 +11,13 @@ def test_no_capi(crowdsec, flavor): """Test no CAPI (disabled by default in tests)""" env = { - 'DISABLE_ONLINE_API': 'true', + "DISABLE_ONLINE_API": "true", } with crowdsec(flavor=flavor, environment=env) as cs: cs.wait_for_log("*Starting processing data*") - cs.wait_for_http(8080, '/health', want_status=HTTPStatus.OK) - res = cs.cont.exec_run('cscli capi status') + cs.wait_for_http(8080, "/health", want_status=HTTPStatus.OK) + res = cs.cont.exec_run("cscli capi status") assert res.exit_code == 1 assert "You can successfully interact with Central API (CAPI)" not in res.output.decode() @@ -29,17 +30,19 @@ def test_capi(crowdsec, flavor): """Test CAPI""" env = { - 'DISABLE_ONLINE_API': 'false', + "DISABLE_ONLINE_API": "false", } with crowdsec(flavor=flavor, environment=env) as cs: cs.wait_for_log("*Starting processing data*") - cs.wait_for_http(8080, '/health', want_status=HTTPStatus.OK) - res = cs.cont.exec_run('cscli capi status') + cs.wait_for_http(8080, "/health", want_status=HTTPStatus.OK) + res = cs.cont.exec_run("cscli capi status") assert res.exit_code == 0 assert "You can successfully interact with Central API (CAPI)" in res.output.decode() - cs.wait_for_log([ - "*Successfully registered to Central API (CAPI)*", - "*Registration to online API done*", - ]) + cs.wait_for_log( + [ + "*Successfully registered to Central API (CAPI)*", + "*Registration to online API done*", + ] + ) diff --git a/docker/test/tests/test_capi_whitelists.py b/docker/test/tests/test_capi_whitelists.py index 19378ba86f0..6cdd5f401f5 100644 --- a/docker/test/tests/test_capi_whitelists.py +++ b/docker/test/tests/test_capi_whitelists.py @@ -1,32 +1,32 @@ #!/usr/bin/env python from http import HTTPStatus -import yaml import pytest +import yaml pytestmark = pytest.mark.docker -def test_capi_whitelists(crowdsec, tmp_path_factory, flavor,): +def test_capi_whitelists( + crowdsec, + tmp_path_factory, + flavor, +): """Test CAPI_WHITELISTS_PATH""" - env = { - "CAPI_WHITELISTS_PATH": "/path/to/whitelists.yaml" - } + env = {"CAPI_WHITELISTS_PATH": "/path/to/whitelists.yaml"} whitelists = tmp_path_factory.mktemp("whitelists") with open(whitelists / "whitelists.yaml", "w") as f: yaml.dump({"ips": ["1.2.3.4", "2.3.4.5"], "cidrs": ["1.2.3.0/24"]}, f) - volumes = { - whitelists / "whitelists.yaml": {"bind": "/path/to/whitelists.yaml", "mode": "ro"} - } + volumes = {whitelists / "whitelists.yaml": {"bind": "/path/to/whitelists.yaml", "mode": "ro"}} with crowdsec(flavor=flavor, environment=env, volumes=volumes) as cs: cs.wait_for_log("*Starting processing data*") - cs.wait_for_http(8080, '/health', want_status=HTTPStatus.OK) - res = cs.cont.exec_run('cscli config show-yaml') + cs.wait_for_http(8080, "/health", want_status=HTTPStatus.OK) + res = cs.cont.exec_run("cscli config show-yaml") assert res.exit_code == 0 stdout = res.output.decode() y = yaml.safe_load(stdout) - assert y['api']['server']['capi_whitelists_path'] == '/path/to/whitelists.yaml' + assert y["api"]["server"]["capi_whitelists_path"] == "/path/to/whitelists.yaml" diff --git a/docker/test/tests/test_cold_logs.py b/docker/test/tests/test_cold_logs.py index 6f6c578ebe0..2eb3248ffd7 100644 --- a/docker/test/tests/test_cold_logs.py +++ b/docker/test/tests/test_cold_logs.py @@ -2,16 +2,15 @@ import datetime -from pytest_cs import Status - import pytest +from pytest_cs import Status pytestmark = pytest.mark.docker def test_cold_logs(crowdsec, tmp_path_factory, flavor): env = { - 'DSN': 'file:///var/log/toto.log', + "DSN": "file:///var/log/toto.log", } logs = tmp_path_factory.mktemp("logs") @@ -20,11 +19,11 @@ def test_cold_logs(crowdsec, tmp_path_factory, flavor): with open(logs / "toto.log", "w") as f: # like date '+%b %d %H:%M:%S' but in python for i in range(10): - ts = (now + datetime.timedelta(seconds=i)).strftime('%b %d %H:%M:%S') - f.write(ts + ' sd-126005 sshd[12422]: Invalid user netflix from 1.1.1.172 port 35424\n') + ts = (now + datetime.timedelta(seconds=i)).strftime("%b %d %H:%M:%S") + f.write(ts + " sd-126005 sshd[12422]: Invalid user netflix from 1.1.1.172 port 35424\n") volumes = { - logs / "toto.log": {'bind': '/var/log/toto.log', 'mode': 'ro'}, + logs / "toto.log": {"bind": "/var/log/toto.log", "mode": "ro"}, } # missing type @@ -32,20 +31,22 @@ def test_cold_logs(crowdsec, tmp_path_factory, flavor): with crowdsec(flavor=flavor, environment=env, volumes=volumes, wait_status=Status.EXITED) as cs: cs.wait_for_log("*-dsn requires a -type argument*") - env['TYPE'] = 'syslog' + env["TYPE"] = "syslog" with crowdsec(flavor=flavor, environment=env, volumes=volumes) as cs: - cs.wait_for_log([ - "*Adding file /var/log/toto.log to filelist*", - "*reading /var/log/toto.log at once*", - "*Ip 1.1.1.172 performed 'crowdsecurity/ssh-bf' (6 events over 5s)*", - "*crowdsec shutdown*" - ]) + cs.wait_for_log( + [ + "*Adding file /var/log/toto.log to filelist*", + "*reading /var/log/toto.log at once*", + "*Ip 1.1.1.172 performed 'crowdsecurity/ssh-bf' (6 events over 5s)*", + "*crowdsec shutdown*", + ] + ) def test_cold_logs_missing_dsn(crowdsec, flavor): env = { - 'TYPE': 'syslog', + "TYPE": "syslog", } with crowdsec(flavor=flavor, environment=env, wait_status=Status.EXITED) as cs: diff --git a/docker/test/tests/test_flavors.py b/docker/test/tests/test_flavors.py index 7e78b8d681b..a48fe428c7b 100644 --- a/docker/test/tests/test_flavors.py +++ b/docker/test/tests/test_flavors.py @@ -15,8 +15,8 @@ def test_cscli_lapi(crowdsec, flavor): """Test if cscli can talk to lapi""" with crowdsec(flavor=flavor) as cs: cs.wait_for_log("*Starting processing data*") - cs.wait_for_http(8080, '/health', want_status=HTTPStatus.OK) - x = cs.cont.exec_run('cscli lapi status') + cs.wait_for_http(8080, "/health", want_status=HTTPStatus.OK) + x = cs.cont.exec_run("cscli lapi status") assert x.exit_code == 0 stdout = x.output.decode() assert "You can successfully interact with Local API (LAPI)" in stdout @@ -27,35 +27,34 @@ def test_flavor_content(crowdsec, flavor): """Test flavor contents""" with crowdsec(flavor=flavor) as cs: cs.wait_for_log("*Starting processing data*") - cs.wait_for_http(8080, '/health', want_status=HTTPStatus.OK) - x = cs.cont.exec_run('ls -1 /var/lib/crowdsec/data/') + cs.wait_for_http(8080, "/health", want_status=HTTPStatus.OK) + x = cs.cont.exec_run("ls -1 /var/lib/crowdsec/data/") assert x.exit_code == 0 stdout = x.output.decode() - if 'slim' in flavor or 'plugins' in flavor: - assert 'GeoLite2-City.mmdb' not in stdout - assert 'GeoLite2-ASN.mmdb' not in stdout + if "slim" in flavor or "plugins" in flavor: + assert "GeoLite2-City.mmdb" not in stdout + assert "GeoLite2-ASN.mmdb" not in stdout else: - assert 'GeoLite2-City.mmdb' in stdout - assert 'GeoLite2-ASN.mmdb' in stdout - assert 'crowdsec.db' in stdout + assert "GeoLite2-City.mmdb" in stdout + assert "GeoLite2-ASN.mmdb" in stdout + assert "crowdsec.db" in stdout - x = cs.cont.exec_run( - 'ls -1 /usr/local/lib/crowdsec/plugins/') + x = cs.cont.exec_run("ls -1 /usr/local/lib/crowdsec/plugins/") stdout = x.output.decode() - if 'slim' in flavor: + if "slim" in flavor: # the exact return code and full message depend # on the 'ls' implementation (busybox vs coreutils) assert x.exit_code != 0 - assert 'No such file or directory' in stdout - assert 'notification-email' not in stdout - assert 'notification-http' not in stdout - assert 'notification-slack' not in stdout - assert 'notification-splunk' not in stdout - assert 'notification-sentinel' not in stdout + assert "No such file or directory" in stdout + assert "notification-email" not in stdout + assert "notification-http" not in stdout + assert "notification-slack" not in stdout + assert "notification-splunk" not in stdout + assert "notification-sentinel" not in stdout else: assert x.exit_code == 0 - assert 'notification-email' in stdout - assert 'notification-http' in stdout - assert 'notification-slack' in stdout - assert 'notification-splunk' in stdout - assert 'notification-sentinel' in stdout + assert "notification-email" in stdout + assert "notification-http" in stdout + assert "notification-slack" in stdout + assert "notification-splunk" in stdout + assert "notification-sentinel" in stdout diff --git a/docker/test/tests/test_hello.py b/docker/test/tests/test_hello.py index a21fde85044..a3ff4f07a93 100644 --- a/docker/test/tests/test_hello.py +++ b/docker/test/tests/test_hello.py @@ -13,24 +13,23 @@ def test_docker_cli_run(): """Test if docker run works from the command line. Capture stdout too""" - res = subprocess.run(['docker', 'run', '--rm', 'hello-world'], - capture_output=True, text=True) + res = subprocess.run(["docker", "run", "--rm", "hello-world"], capture_output=True, text=True) assert 0 == res.returncode - assert 'Hello from Docker!' in res.stdout + assert "Hello from Docker!" in res.stdout def test_docker_run(docker_client): """Test if docker run works from the python SDK.""" - output = docker_client.containers.run('hello-world', remove=True) + output = docker_client.containers.run("hello-world", remove=True) lines = output.decode().splitlines() assert "Hello from Docker!" in lines def test_docker_run_detach(docker_client): """Test with python SDK (async).""" - cont = docker_client.containers.run('hello-world', detach=True) - assert cont.status == 'created' - assert cont.attrs['State']['ExitCode'] == 0 + cont = docker_client.containers.run("hello-world", detach=True) + assert cont.status == "created" + assert cont.attrs["State"]["ExitCode"] == 0 lines = cont.logs().decode().splitlines() assert "Hello from Docker!" in lines cont.remove(force=True) diff --git a/docker/test/tests/test_hub.py b/docker/test/tests/test_hub.py index e70555ea855..a7134fcb5c8 100644 --- a/docker/test/tests/test_hub.py +++ b/docker/test/tests/test_hub.py @@ -4,8 +4,8 @@ Test pre-installed hub items. """ -from http import HTTPStatus import json +from http import HTTPStatus import pytest @@ -16,12 +16,12 @@ def test_preinstalled_hub(crowdsec, flavor): """Test hub objects installed in the entrypoint""" with crowdsec(flavor=flavor) as cs: cs.wait_for_log("*Starting processing data*") - cs.wait_for_http(8080, '/health', want_status=HTTPStatus.OK) - res = cs.cont.exec_run('cscli hub list -o json', stderr=False) + cs.wait_for_http(8080, "/health", want_status=HTTPStatus.OK) + res = cs.cont.exec_run("cscli hub list -o json", stderr=False) assert res.exit_code == 0 j = json.loads(res.output) - collections = {c['name']: c for c in j['collections']} - assert collections['crowdsecurity/linux']['status'] == 'enabled' - parsers = {c['name']: c for c in j['parsers']} - assert parsers['crowdsecurity/whitelists']['status'] == 'enabled' - assert parsers['crowdsecurity/docker-logs']['status'] == 'enabled' + collections = {c["name"]: c for c in j["collections"]} + assert collections["crowdsecurity/linux"]["status"] == "enabled" + parsers = {c["name"]: c for c in j["parsers"]} + assert parsers["crowdsecurity/whitelists"]["status"] == "enabled" + assert parsers["crowdsecurity/docker-logs"]["status"] == "enabled" diff --git a/docker/test/tests/test_hub_collections.py b/docker/test/tests/test_hub_collections.py index 0d1b3ee5e94..71fa698af06 100644 --- a/docker/test/tests/test_hub_collections.py +++ b/docker/test/tests/test_hub_collections.py @@ -4,8 +4,8 @@ Test collection management """ -from http import HTTPStatus import json +from http import HTTPStatus import pytest @@ -14,98 +14,98 @@ def test_install_two_collections(crowdsec, flavor): """Test installing collections at startup""" - it1 = 'crowdsecurity/apache2' - it2 = 'crowdsecurity/asterisk' - env = { - 'COLLECTIONS': f'{it1} {it2}' - } + it1 = "crowdsecurity/apache2" + it2 = "crowdsecurity/asterisk" + env = {"COLLECTIONS": f"{it1} {it2}"} with crowdsec(flavor=flavor, environment=env) as cs: - cs.wait_for_http(8080, '/health', want_status=HTTPStatus.OK) - res = cs.cont.exec_run('cscli collections list -o json') + cs.wait_for_http(8080, "/health", want_status=HTTPStatus.OK) + res = cs.cont.exec_run("cscli collections list -o json") assert res.exit_code == 0 j = json.loads(res.output) - items = {c['name']: c for c in j['collections']} - assert items[it1]['status'] == 'enabled' - assert items[it2]['status'] == 'enabled' - cs.wait_for_log([ - f'*enabling collections:{it1}*', - f'*enabling collections:{it2}*', - ]) + items = {c["name"]: c for c in j["collections"]} + assert items[it1]["status"] == "enabled" + assert items[it2]["status"] == "enabled" + cs.wait_for_log( + [ + f"*enabling collections:{it1}*", + f"*enabling collections:{it2}*", + ] + ) def test_disable_collection(crowdsec, flavor): """Test removing a pre-installed collection at startup""" - it = 'crowdsecurity/linux' - env = { - 'DISABLE_COLLECTIONS': it - } + it = "crowdsecurity/linux" + env = {"DISABLE_COLLECTIONS": it} with crowdsec(flavor=flavor, environment=env) as cs: cs.wait_for_log("*Starting processing data*") - cs.wait_for_http(8080, '/health', want_status=HTTPStatus.OK) - res = cs.cont.exec_run('cscli collections list -o json') + cs.wait_for_http(8080, "/health", want_status=HTTPStatus.OK) + res = cs.cont.exec_run("cscli collections list -o json") assert res.exit_code == 0 j = json.loads(res.output) - items = {c['name'] for c in j['collections']} + items = {c["name"] for c in j["collections"]} assert it not in items - cs.wait_for_log([ - f'*disabling collections:{it}*', - ]) + cs.wait_for_log( + [ + f"*disabling collections:{it}*", + ] + ) def test_install_and_disable_collection(crowdsec, flavor): """Declare a collection to install AND disable: disable wins""" - it = 'crowdsecurity/apache2' + it = "crowdsecurity/apache2" env = { - 'COLLECTIONS': it, - 'DISABLE_COLLECTIONS': it, + "COLLECTIONS": it, + "DISABLE_COLLECTIONS": it, } with crowdsec(flavor=flavor, environment=env) as cs: cs.wait_for_log("*Starting processing data*") - cs.wait_for_http(8080, '/health', want_status=HTTPStatus.OK) - res = cs.cont.exec_run('cscli collections list -o json') + cs.wait_for_http(8080, "/health", want_status=HTTPStatus.OK) + res = cs.cont.exec_run("cscli collections list -o json") assert res.exit_code == 0 j = json.loads(res.output) - items = {c['name'] for c in j['collections']} + items = {c["name"] for c in j["collections"]} assert it not in items logs = cs.log_lines() # check that there was no attempt to install - assert not any(f'enabling collections:{it}' in line for line in logs) + assert not any(f"enabling collections:{it}" in line for line in logs) # already done in bats, prividing here as example of a somewhat complex test def test_taint_bubble_up(crowdsec, tmp_path_factory, flavor): - coll = 'crowdsecurity/nginx' - env = { - 'COLLECTIONS': f'{coll}' - } + coll = "crowdsecurity/nginx" + env = {"COLLECTIONS": f"{coll}"} with crowdsec(flavor=flavor, environment=env) as cs: - cs.wait_for_http(8080, '/health', want_status=HTTPStatus.OK) - res = cs.cont.exec_run('cscli collections list -o json') + cs.wait_for_http(8080, "/health", want_status=HTTPStatus.OK) + res = cs.cont.exec_run("cscli collections list -o json") assert res.exit_code == 0 j = json.loads(res.output) - items = {c['name']: c for c in j['collections']} + items = {c["name"]: c for c in j["collections"]} # implicit check for tainted=False - assert items[coll]['status'] == 'enabled' - cs.wait_for_log([ - f'*enabling collections:{coll}*', - ]) + assert items[coll]["status"] == "enabled" + cs.wait_for_log( + [ + f"*enabling collections:{coll}*", + ] + ) - scenario = 'crowdsecurity/http-crawl-non_statics' + scenario = "crowdsecurity/http-crawl-non_statics" # the description won't be read back, it's from the index yq_command = f"yq -e -i '.description=\"tainted\"' /etc/crowdsec/hub/scenarios/{scenario}.yaml" res = cs.cont.exec_run(yq_command) assert res.exit_code == 0 - res = cs.cont.exec_run(f'cscli scenarios inspect {scenario} -o json') + res = cs.cont.exec_run(f"cscli scenarios inspect {scenario} -o json") assert res.exit_code == 0 j = json.loads(res.output) - assert j['tainted'] is True + assert j["tainted"] is True - res = cs.cont.exec_run('cscli collections list -o json') + res = cs.cont.exec_run("cscli collections list -o json") assert res.exit_code == 0 j = json.loads(res.output) - items = {c['name']: c for c in j['collections']} - assert items['crowdsecurity/nginx']['status'] == 'enabled,tainted' - assert items['crowdsecurity/base-http-scenarios']['status'] == 'enabled,tainted' + items = {c["name"]: c for c in j["collections"]} + assert items["crowdsecurity/nginx"]["status"] == "enabled,tainted" + assert items["crowdsecurity/base-http-scenarios"]["status"] == "enabled,tainted" diff --git a/docker/test/tests/test_hub_parsers.py b/docker/test/tests/test_hub_parsers.py index 8cfaeecf94c..42794d20b42 100644 --- a/docker/test/tests/test_hub_parsers.py +++ b/docker/test/tests/test_hub_parsers.py @@ -4,8 +4,8 @@ Test parser management """ -from http import HTTPStatus import json +from http import HTTPStatus import pytest @@ -14,60 +14,54 @@ def test_install_two_parsers(crowdsec, flavor): """Test installing parsers at startup""" - it1 = 'crowdsecurity/cpanel-logs' - it2 = 'crowdsecurity/cowrie-logs' - env = { - 'PARSERS': f'{it1} {it2}' - } + it1 = "crowdsecurity/cpanel-logs" + it2 = "crowdsecurity/cowrie-logs" + env = {"PARSERS": f"{it1} {it2}"} with crowdsec(flavor=flavor, environment=env) as cs: - cs.wait_for_log([ - f'*parsers install "{it1}"*', - f'*parsers install "{it2}"*', - "*Starting processing data*" - ]) - cs.wait_for_http(8080, '/health', want_status=HTTPStatus.OK) - res = cs.cont.exec_run('cscli parsers list -o json') + cs.wait_for_log([f'*parsers install "{it1}"*', f'*parsers install "{it2}"*', "*Starting processing data*"]) + cs.wait_for_http(8080, "/health", want_status=HTTPStatus.OK) + res = cs.cont.exec_run("cscli parsers list -o json") assert res.exit_code == 0 j = json.loads(res.output) - items = {c['name']: c for c in j['parsers']} - assert items[it1]['status'] == 'enabled' - assert items[it2]['status'] == 'enabled' + items = {c["name"]: c for c in j["parsers"]} + assert items[it1]["status"] == "enabled" + assert items[it2]["status"] == "enabled" # XXX check that the parser is preinstalled by default def test_disable_parser(crowdsec, flavor): """Test removing a pre-installed parser at startup""" - it = 'crowdsecurity/whitelists' - env = { - 'DISABLE_PARSERS': it - } + it = "crowdsecurity/whitelists" + env = {"DISABLE_PARSERS": it} with crowdsec(flavor=flavor, environment=env) as cs: - cs.wait_for_log([ - f'*parsers remove "{it}"*', - "*Starting processing data*", - ]) - cs.wait_for_http(8080, '/health', want_status=HTTPStatus.OK) - res = cs.cont.exec_run('cscli parsers list -o json') + cs.wait_for_log( + [ + f'*parsers remove "{it}"*', + "*Starting processing data*", + ] + ) + cs.wait_for_http(8080, "/health", want_status=HTTPStatus.OK) + res = cs.cont.exec_run("cscli parsers list -o json") assert res.exit_code == 0 j = json.loads(res.output) - items = {c['name'] for c in j['parsers']} + items = {c["name"] for c in j["parsers"]} assert it not in items def test_install_and_disable_parser(crowdsec, flavor): """Declare a parser to install AND disable: disable wins""" - it = 'crowdsecurity/cpanel-logs' + it = "crowdsecurity/cpanel-logs" env = { - 'PARSERS': it, - 'DISABLE_PARSERS': it, + "PARSERS": it, + "DISABLE_PARSERS": it, } with crowdsec(flavor=flavor, environment=env) as cs: cs.wait_for_log("*Starting processing data*") - cs.wait_for_http(8080, '/health', want_status=HTTPStatus.OK) - res = cs.cont.exec_run('cscli parsers list -o json') + cs.wait_for_http(8080, "/health", want_status=HTTPStatus.OK) + res = cs.cont.exec_run("cscli parsers list -o json") assert res.exit_code == 0 j = json.loads(res.output) - items = {c['name'] for c in j['parsers']} + items = {c["name"] for c in j["parsers"]} assert it not in items logs = cs.log_lines() # check that there was no attempt to install diff --git a/docker/test/tests/test_hub_postoverflows.py b/docker/test/tests/test_hub_postoverflows.py index 80fdbc2b7bd..69f383cda24 100644 --- a/docker/test/tests/test_hub_postoverflows.py +++ b/docker/test/tests/test_hub_postoverflows.py @@ -4,8 +4,9 @@ Test postoverflow management """ -from http import HTTPStatus import json +from http import HTTPStatus + import pytest pytestmark = pytest.mark.docker @@ -13,24 +14,20 @@ def test_install_two_postoverflows(crowdsec, flavor): """Test installing postoverflows at startup""" - it1 = 'crowdsecurity/cdn-whitelist' - it2 = 'crowdsecurity/ipv6_to_range' - env = { - 'POSTOVERFLOWS': f'{it1} {it2}' - } + it1 = "crowdsecurity/cdn-whitelist" + it2 = "crowdsecurity/ipv6_to_range" + env = {"POSTOVERFLOWS": f"{it1} {it2}"} with crowdsec(flavor=flavor, environment=env) as cs: - cs.wait_for_log([ - f'*postoverflows install "{it1}"*', - f'*postoverflows install "{it2}"*', - "*Starting processing data*" - ]) - cs.wait_for_http(8080, '/health', want_status=HTTPStatus.OK) - res = cs.cont.exec_run('cscli postoverflows list -o json') + cs.wait_for_log( + [f'*postoverflows install "{it1}"*', f'*postoverflows install "{it2}"*', "*Starting processing data*"] + ) + cs.wait_for_http(8080, "/health", want_status=HTTPStatus.OK) + res = cs.cont.exec_run("cscli postoverflows list -o json") assert res.exit_code == 0 j = json.loads(res.output) - items = {c['name']: c for c in j['postoverflows']} - assert items[it1]['status'] == 'enabled' - assert items[it2]['status'] == 'enabled' + items = {c["name"]: c for c in j["postoverflows"]} + assert items[it1]["status"] == "enabled" + assert items[it2]["status"] == "enabled" def test_disable_postoverflow(): @@ -40,18 +37,18 @@ def test_disable_postoverflow(): def test_install_and_disable_postoverflow(crowdsec, flavor): """Declare a postoverflow to install AND disable: disable wins""" - it = 'crowdsecurity/cdn-whitelist' + it = "crowdsecurity/cdn-whitelist" env = { - 'POSTOVERFLOWS': it, - 'DISABLE_POSTOVERFLOWS': it, + "POSTOVERFLOWS": it, + "DISABLE_POSTOVERFLOWS": it, } with crowdsec(flavor=flavor, environment=env) as cs: cs.wait_for_log("*Starting processing data*") - cs.wait_for_http(8080, '/health', want_status=HTTPStatus.OK) - res = cs.cont.exec_run('cscli postoverflows list -o json') + cs.wait_for_http(8080, "/health", want_status=HTTPStatus.OK) + res = cs.cont.exec_run("cscli postoverflows list -o json") assert res.exit_code == 0 j = json.loads(res.output) - items = {c['name'] for c in j['postoverflows']} + items = {c["name"] for c in j["postoverflows"]} assert it not in items logs = cs.log_lines() # check that there was no attempt to install diff --git a/docker/test/tests/test_hub_scenarios.py b/docker/test/tests/test_hub_scenarios.py index 2a8c3a275f2..4376a3ce64a 100644 --- a/docker/test/tests/test_hub_scenarios.py +++ b/docker/test/tests/test_hub_scenarios.py @@ -4,8 +4,8 @@ Test scenario management """ -from http import HTTPStatus import json +from http import HTTPStatus import pytest @@ -14,59 +14,48 @@ def test_install_two_scenarios(crowdsec, flavor): """Test installing scenarios at startup""" - it1 = 'crowdsecurity/cpanel-bf-attempt' - it2 = 'crowdsecurity/asterisk_bf' - env = { - 'SCENARIOS': f'{it1} {it2}' - } + it1 = "crowdsecurity/cpanel-bf-attempt" + it2 = "crowdsecurity/asterisk_bf" + env = {"SCENARIOS": f"{it1} {it2}"} with crowdsec(flavor=flavor, environment=env) as cs: - cs.wait_for_log([ - f'*scenarios install "{it1}"*', - f'*scenarios install "{it2}"*', - "*Starting processing data*" - ]) - cs.wait_for_http(8080, '/health', want_status=HTTPStatus.OK) - res = cs.cont.exec_run('cscli scenarios list -o json') + cs.wait_for_log([f'*scenarios install "{it1}"*', f'*scenarios install "{it2}"*', "*Starting processing data*"]) + cs.wait_for_http(8080, "/health", want_status=HTTPStatus.OK) + res = cs.cont.exec_run("cscli scenarios list -o json") assert res.exit_code == 0 j = json.loads(res.output) - items = {c['name']: c for c in j['scenarios']} - assert items[it1]['status'] == 'enabled' - assert items[it2]['status'] == 'enabled' + items = {c["name"]: c for c in j["scenarios"]} + assert items[it1]["status"] == "enabled" + assert items[it2]["status"] == "enabled" def test_disable_scenario(crowdsec, flavor): """Test removing a pre-installed scenario at startup""" - it = 'crowdsecurity/ssh-bf' - env = { - 'DISABLE_SCENARIOS': it - } + it = "crowdsecurity/ssh-bf" + env = {"DISABLE_SCENARIOS": it} with crowdsec(flavor=flavor, environment=env) as cs: - cs.wait_for_log([ - f'*scenarios remove "{it}"*', - "*Starting processing data*" - ]) - cs.wait_for_http(8080, '/health', want_status=HTTPStatus.OK) - res = cs.cont.exec_run('cscli scenarios list -o json') + cs.wait_for_log([f'*scenarios remove "{it}"*', "*Starting processing data*"]) + cs.wait_for_http(8080, "/health", want_status=HTTPStatus.OK) + res = cs.cont.exec_run("cscli scenarios list -o json") assert res.exit_code == 0 j = json.loads(res.output) - items = {c['name'] for c in j['scenarios']} + items = {c["name"] for c in j["scenarios"]} assert it not in items def test_install_and_disable_scenario(crowdsec, flavor): """Declare a scenario to install AND disable: disable wins""" - it = 'crowdsecurity/asterisk_bf' + it = "crowdsecurity/asterisk_bf" env = { - 'SCENARIOS': it, - 'DISABLE_SCENARIOS': it, + "SCENARIOS": it, + "DISABLE_SCENARIOS": it, } with crowdsec(flavor=flavor, environment=env) as cs: cs.wait_for_log("*Starting processing data*") - cs.wait_for_http(8080, '/health', want_status=HTTPStatus.OK) - res = cs.cont.exec_run('cscli scenarios list -o json') + cs.wait_for_http(8080, "/health", want_status=HTTPStatus.OK) + res = cs.cont.exec_run("cscli scenarios list -o json") assert res.exit_code == 0 j = json.loads(res.output) - items = {c['name'] for c in j['scenarios']} + items = {c["name"] for c in j["scenarios"]} assert it not in items logs = cs.cont.logs().decode().splitlines() # check that there was no attempt to install diff --git a/docker/test/tests/test_local_api_url.py b/docker/test/tests/test_local_api_url.py index aa90c9fb798..e38af3fedbe 100644 --- a/docker/test/tests/test_local_api_url.py +++ b/docker/test/tests/test_local_api_url.py @@ -10,12 +10,9 @@ def test_local_api_url_default(crowdsec, flavor): """Test LOCAL_API_URL (default)""" with crowdsec(flavor=flavor) as cs: - cs.wait_for_log([ - "*CrowdSec Local API listening on *:8080*", - "*Starting processing data*" - ]) - cs.wait_for_http(8080, '/health', want_status=HTTPStatus.OK) - res = cs.cont.exec_run('cscli lapi status') + cs.wait_for_log(["*CrowdSec Local API listening on *:8080*", "*Starting processing data*"]) + cs.wait_for_http(8080, "/health", want_status=HTTPStatus.OK) + res = cs.cont.exec_run("cscli lapi status") assert res.exit_code == 0 stdout = res.output.decode() assert "on http://0.0.0.0:8080/" in stdout @@ -24,16 +21,11 @@ def test_local_api_url_default(crowdsec, flavor): def test_local_api_url(crowdsec, flavor): """Test LOCAL_API_URL (custom)""" - env = { - "LOCAL_API_URL": "http://127.0.0.1:8080" - } + env = {"LOCAL_API_URL": "http://127.0.0.1:8080"} with crowdsec(flavor=flavor, environment=env) as cs: - cs.wait_for_log([ - "*CrowdSec Local API listening on *:8080*", - "*Starting processing data*" - ]) - cs.wait_for_http(8080, '/health', want_status=HTTPStatus.OK) - res = cs.cont.exec_run('cscli lapi status') + cs.wait_for_log(["*CrowdSec Local API listening on *:8080*", "*Starting processing data*"]) + cs.wait_for_http(8080, "/health", want_status=HTTPStatus.OK) + res = cs.cont.exec_run("cscli lapi status") assert res.exit_code == 0 stdout = res.output.decode() assert "on http://127.0.0.1:8080/" in stdout @@ -48,16 +40,16 @@ def test_local_api_url_ipv6(crowdsec, flavor): # FIXME: https://forums.docker.com/t/assigning-default-ipv6-addresses/128665/3 # FIXME: https://github.com/moby/moby/issues/41438 - env = { - "LOCAL_API_URL": "http://[::1]:8080" - } + env = {"LOCAL_API_URL": "http://[::1]:8080"} with crowdsec(flavor=flavor, environment=env) as cs: - cs.wait_for_log([ - "*Starting processing data*", - "*CrowdSec Local API listening on [::1]:8080*", - ]) - cs.wait_for_http(8080, '/health', want_status=HTTPStatus.OK) - res = cs.cont.exec_run('cscli lapi status') + cs.wait_for_log( + [ + "*Starting processing data*", + "*CrowdSec Local API listening on [::1]:8080*", + ] + ) + cs.wait_for_http(8080, "/health", want_status=HTTPStatus.OK) + res = cs.cont.exec_run("cscli lapi status") assert res.exit_code == 0 stdout = res.output.decode() assert "on http://[::1]:8080/" in stdout diff --git a/docker/test/tests/test_local_item.py b/docker/test/tests/test_local_item.py index 3d6ac2fc954..e4c8e3c165a 100644 --- a/docker/test/tests/test_local_item.py +++ b/docker/test/tests/test_local_item.py @@ -4,8 +4,8 @@ Test bind-mounting local items """ -from http import HTTPStatus import json +from http import HTTPStatus import pytest @@ -15,33 +15,29 @@ def test_inject_local_item(crowdsec, tmp_path_factory, flavor): """Test mounting a custom whitelist at startup""" - localitems = tmp_path_factory.mktemp('localitems') - custom_whitelists = localitems / 'custom_whitelists.yaml' + localitems = tmp_path_factory.mktemp("localitems") + custom_whitelists = localitems / "custom_whitelists.yaml" - with open(custom_whitelists, 'w') as f: + with open(custom_whitelists, "w") as f: f.write('{"whitelist":{"reason":"Good IPs","ip":["1.2.3.4"]}}') - volumes = { - custom_whitelists: {'bind': '/etc/crowdsec/parsers/s02-enrich/custom_whitelists.yaml'} - } + volumes = {custom_whitelists: {"bind": "/etc/crowdsec/parsers/s02-enrich/custom_whitelists.yaml"}} with crowdsec(flavor=flavor, volumes=volumes) as cs: - cs.wait_for_log([ - "*Starting processing data*" - ]) - cs.wait_for_http(8080, '/health', want_status=HTTPStatus.OK) + cs.wait_for_log(["*Starting processing data*"]) + cs.wait_for_http(8080, "/health", want_status=HTTPStatus.OK) # the parser should be enabled - res = cs.cont.exec_run('cscli parsers list -o json') + res = cs.cont.exec_run("cscli parsers list -o json") assert res.exit_code == 0 j = json.loads(res.output) - items = {c['name']: c for c in j['parsers']} - assert items['custom_whitelists.yaml']['status'] == 'enabled,local' + items = {c["name"]: c for c in j["parsers"]} + assert items["custom_whitelists.yaml"]["status"] == "enabled,local" # regression test: the linux collection should not be tainted # (the parsers were not copied from /staging when using "cp -an" with local parsers) - res = cs.cont.exec_run('cscli collections inspect crowdsecurity/linux -o json') + res = cs.cont.exec_run("cscli collections inspect crowdsecurity/linux -o json") assert res.exit_code == 0 j = json.loads(res.output) # crowdsec <= 1.5.5 omits a "tainted" when it's false - assert j.get('tainted', False) is False + assert j.get("tainted", False) is False diff --git a/docker/test/tests/test_metrics.py b/docker/test/tests/test_metrics.py index 8a6d5318156..bd41bdcea41 100644 --- a/docker/test/tests/test_metrics.py +++ b/docker/test/tests/test_metrics.py @@ -12,12 +12,12 @@ def test_metrics_port_default(crowdsec, flavor): metrics_port = 6060 with crowdsec(flavor=flavor) as cs: cs.wait_for_log("*Starting processing data*") - cs.wait_for_http(8080, '/health', want_status=HTTPStatus.OK) - cs.wait_for_http(metrics_port, '/metrics', want_status=HTTPStatus.OK) - res = cs.cont.exec_run(f'wget -O - http://127.0.0.1:{metrics_port}/metrics') - if 'executable file not found' in res.output.decode(): + cs.wait_for_http(8080, "/health", want_status=HTTPStatus.OK) + cs.wait_for_http(metrics_port, "/metrics", want_status=HTTPStatus.OK) + res = cs.cont.exec_run(f"wget -O - http://127.0.0.1:{metrics_port}/metrics") + if "executable file not found" in res.output.decode(): # TODO: find an alternative to wget - pytest.skip('wget not found') + pytest.skip("wget not found") assert res.exit_code == 0 stdout = res.output.decode() assert "# HELP cs_info Information about Crowdsec." in stdout @@ -25,15 +25,15 @@ def test_metrics_port_default(crowdsec, flavor): def test_metrics_port_default_ipv6(crowdsec, flavor): """Test metrics (ipv6)""" - pytest.skip('ipv6 not supported yet') + pytest.skip("ipv6 not supported yet") port = 6060 with crowdsec(flavor=flavor) as cs: cs.wait_for_log("*Starting processing data*") - cs.wait_for_http(8080, '/health', want_status=HTTPStatus.OK) - res = cs.cont.exec_run(f'wget -O - http://[::1]:{port}/metrics') - if 'executable file not found' in res.output.decode(): + cs.wait_for_http(8080, "/health", want_status=HTTPStatus.OK) + res = cs.cont.exec_run(f"wget -O - http://[::1]:{port}/metrics") + if "executable file not found" in res.output.decode(): # TODO: find an alternative to wget - pytest.skip('wget not found') + pytest.skip("wget not found") assert res.exit_code == 0 stdout = res.output.decode() assert "# HELP cs_info Information about Crowdsec." in stdout @@ -42,16 +42,14 @@ def test_metrics_port_default_ipv6(crowdsec, flavor): def test_metrics_port(crowdsec, flavor): """Test metrics (custom METRICS_PORT)""" port = 7070 - env = { - "METRICS_PORT": port - } + env = {"METRICS_PORT": port} with crowdsec(flavor=flavor, environment=env) as cs: cs.wait_for_log("*Starting processing data*") - cs.wait_for_http(8080, '/health', want_status=HTTPStatus.OK) - res = cs.cont.exec_run(f'wget -O - http://127.0.0.1:{port}/metrics') - if 'executable file not found' in res.output.decode(): + cs.wait_for_http(8080, "/health", want_status=HTTPStatus.OK) + res = cs.cont.exec_run(f"wget -O - http://127.0.0.1:{port}/metrics") + if "executable file not found" in res.output.decode(): # TODO: find an alternative to wget - pytest.skip('wget not found') + pytest.skip("wget not found") assert res.exit_code == 0 stdout = res.output.decode() assert "# HELP cs_info Information about Crowdsec." in stdout @@ -59,18 +57,16 @@ def test_metrics_port(crowdsec, flavor): def test_metrics_port_ipv6(crowdsec, flavor): """Test metrics (custom METRICS_PORT, ipv6)""" - pytest.skip('ipv6 not supported yet') + pytest.skip("ipv6 not supported yet") port = 7070 - env = { - "METRICS_PORT": port - } + env = {"METRICS_PORT": port} with crowdsec(flavor=flavor, environment=env) as cs: cs.wait_for_log("*Starting processing data*") - cs.wait_for_http(8080, '/health', want_status=HTTPStatus.OK) - res = cs.cont.exec_run(f'wget -O - http://[::1]:{port}/metrics') - if 'executable file not found' in res.output.decode(): + cs.wait_for_http(8080, "/health", want_status=HTTPStatus.OK) + res = cs.cont.exec_run(f"wget -O - http://[::1]:{port}/metrics") + if "executable file not found" in res.output.decode(): # TODO: find an alternative to wget - pytest.skip('wget not found') + pytest.skip("wget not found") assert res.exit_code == 0 stdout = res.output.decode() assert "# HELP cs_info Information about Crowdsec." in stdout diff --git a/docker/test/tests/test_nolapi.py b/docker/test/tests/test_nolapi.py index 6edb354fe75..e5dbc3c2624 100644 --- a/docker/test/tests/test_nolapi.py +++ b/docker/test/tests/test_nolapi.py @@ -1,8 +1,7 @@ #!/usr/bin/env python -from pytest_cs import Status - import pytest +from pytest_cs import Status pytestmark = pytest.mark.docker @@ -10,7 +9,7 @@ def test_no_agent(crowdsec, flavor): """Test DISABLE_LOCAL_API=true (failing stand-alone container)""" env = { - 'DISABLE_LOCAL_API': 'true', + "DISABLE_LOCAL_API": "true", } # if an alternative lapi url is not defined, the container should exit diff --git a/docker/test/tests/test_simple.py b/docker/test/tests/test_simple.py index 951d8be4b24..b5c8425b371 100644 --- a/docker/test/tests/test_simple.py +++ b/docker/test/tests/test_simple.py @@ -13,4 +13,4 @@ def test_crowdsec(crowdsec, flavor): matcher.fnmatch_lines(["*Starting processing data*"]) res = cs.cont.exec_run('sh -c "echo $CI_TESTING"') assert res.exit_code == 0 - assert 'true' == res.output.decode().strip() + assert "true" == res.output.decode().strip() diff --git a/docker/test/tests/test_tls.py b/docker/test/tests/test_tls.py index d2f512fcbc1..220738a9f07 100644 --- a/docker/test/tests/test_tls.py +++ b/docker/test/tests/test_tls.py @@ -6,9 +6,8 @@ import uuid -from pytest_cs import Status - import pytest +from pytest_cs import Status pytestmark = pytest.mark.docker @@ -17,8 +16,8 @@ def test_missing_key_file(crowdsec, flavor): """Test that cscli and agent can communicate to LAPI with TLS""" env = { - 'CERT_FILE': '/etc/ssl/crowdsec/cert.pem', - 'USE_TLS': 'true', + "CERT_FILE": "/etc/ssl/crowdsec/cert.pem", + "USE_TLS": "true", } with crowdsec(flavor=flavor, environment=env, wait_status=Status.EXITED) as cs: @@ -29,8 +28,8 @@ def test_missing_cert_file(crowdsec, flavor): """Test that cscli and agent can communicate to LAPI with TLS""" env = { - 'KEY_FILE': '/etc/ssl/crowdsec/cert.key', - 'USE_TLS': 'true', + "KEY_FILE": "/etc/ssl/crowdsec/cert.key", + "USE_TLS": "true", } with crowdsec(flavor=flavor, environment=env, wait_status=Status.EXITED) as cs: @@ -41,14 +40,14 @@ def test_tls_missing_ca(crowdsec, flavor, certs_dir): """Missing CA cert, unknown authority""" env = { - 'CERT_FILE': '/etc/ssl/crowdsec/lapi.crt', - 'KEY_FILE': '/etc/ssl/crowdsec/lapi.key', - 'USE_TLS': 'true', - 'LOCAL_API_URL': 'https://localhost:8080', + "CERT_FILE": "/etc/ssl/crowdsec/lapi.crt", + "KEY_FILE": "/etc/ssl/crowdsec/lapi.key", + "USE_TLS": "true", + "LOCAL_API_URL": "https://localhost:8080", } volumes = { - certs_dir(lapi_hostname='lapi'): {'bind': '/etc/ssl/crowdsec', 'mode': 'ro'}, + certs_dir(lapi_hostname="lapi"): {"bind": "/etc/ssl/crowdsec", "mode": "ro"}, } with crowdsec(flavor=flavor, environment=env, volumes=volumes, wait_status=Status.EXITED) as cs: @@ -59,22 +58,22 @@ def test_tls_legacy_var(crowdsec, flavor, certs_dir): """Test server-only certificate, legacy variables""" env = { - 'CACERT_FILE': '/etc/ssl/crowdsec/ca.crt', - 'CERT_FILE': '/etc/ssl/crowdsec/lapi.crt', - 'KEY_FILE': '/etc/ssl/crowdsec/lapi.key', - 'USE_TLS': 'true', - 'LOCAL_API_URL': 'https://localhost:8080', + "CACERT_FILE": "/etc/ssl/crowdsec/ca.crt", + "CERT_FILE": "/etc/ssl/crowdsec/lapi.crt", + "KEY_FILE": "/etc/ssl/crowdsec/lapi.key", + "USE_TLS": "true", + "LOCAL_API_URL": "https://localhost:8080", } volumes = { - certs_dir(lapi_hostname='lapi'): {'bind': '/etc/ssl/crowdsec', 'mode': 'ro'}, + certs_dir(lapi_hostname="lapi"): {"bind": "/etc/ssl/crowdsec", "mode": "ro"}, } with crowdsec(flavor=flavor, environment=env, volumes=volumes) as cs: cs.wait_for_log("*Starting processing data*") # TODO: wait_for_https - cs.wait_for_http(8080, '/health', want_status=None) - x = cs.cont.exec_run('cscli lapi status') + cs.wait_for_http(8080, "/health", want_status=None) + x = cs.cont.exec_run("cscli lapi status") assert x.exit_code == 0 stdout = x.output.decode() assert "You can successfully interact with Local API (LAPI)" in stdout @@ -84,24 +83,24 @@ def test_tls_mutual_monolith(crowdsec, flavor, certs_dir): """Server and client certificates, on the same container""" env = { - 'CACERT_FILE': '/etc/ssl/crowdsec/ca.crt', - 'LAPI_CERT_FILE': '/etc/ssl/crowdsec/lapi.crt', - 'LAPI_KEY_FILE': '/etc/ssl/crowdsec/lapi.key', - 'CLIENT_CERT_FILE': '/etc/ssl/crowdsec/agent.crt', - 'CLIENT_KEY_FILE': '/etc/ssl/crowdsec/agent.key', - 'USE_TLS': 'true', - 'LOCAL_API_URL': 'https://localhost:8080', + "CACERT_FILE": "/etc/ssl/crowdsec/ca.crt", + "LAPI_CERT_FILE": "/etc/ssl/crowdsec/lapi.crt", + "LAPI_KEY_FILE": "/etc/ssl/crowdsec/lapi.key", + "CLIENT_CERT_FILE": "/etc/ssl/crowdsec/agent.crt", + "CLIENT_KEY_FILE": "/etc/ssl/crowdsec/agent.key", + "USE_TLS": "true", + "LOCAL_API_URL": "https://localhost:8080", } volumes = { - certs_dir(lapi_hostname='lapi'): {'bind': '/etc/ssl/crowdsec', 'mode': 'ro'}, + certs_dir(lapi_hostname="lapi"): {"bind": "/etc/ssl/crowdsec", "mode": "ro"}, } with crowdsec(flavor=flavor, environment=env, volumes=volumes) as cs: cs.wait_for_log("*Starting processing data*") # TODO: wait_for_https - cs.wait_for_http(8080, '/health', want_status=None) - x = cs.cont.exec_run('cscli lapi status') + cs.wait_for_http(8080, "/health", want_status=None) + x = cs.cont.exec_run("cscli lapi status") assert x.exit_code == 0 stdout = x.output.decode() assert "You can successfully interact with Local API (LAPI)" in stdout @@ -111,26 +110,27 @@ def test_tls_lapi_var(crowdsec, flavor, certs_dir): """Test server-only certificate, lapi variables""" env = { - 'CACERT_FILE': '/etc/ssl/crowdsec/ca.crt', - 'LAPI_CERT_FILE': '/etc/ssl/crowdsec/lapi.crt', - 'LAPI_KEY_FILE': '/etc/ssl/crowdsec/lapi.key', - 'USE_TLS': 'true', - 'LOCAL_API_URL': 'https://localhost:8080', + "CACERT_FILE": "/etc/ssl/crowdsec/ca.crt", + "LAPI_CERT_FILE": "/etc/ssl/crowdsec/lapi.crt", + "LAPI_KEY_FILE": "/etc/ssl/crowdsec/lapi.key", + "USE_TLS": "true", + "LOCAL_API_URL": "https://localhost:8080", } volumes = { - certs_dir(lapi_hostname='lapi'): {'bind': '/etc/ssl/crowdsec', 'mode': 'ro'}, + certs_dir(lapi_hostname="lapi"): {"bind": "/etc/ssl/crowdsec", "mode": "ro"}, } with crowdsec(flavor=flavor, environment=env, volumes=volumes) as cs: cs.wait_for_log("*Starting processing data*") # TODO: wait_for_https - cs.wait_for_http(8080, '/health', want_status=None) - x = cs.cont.exec_run('cscli lapi status') + cs.wait_for_http(8080, "/health", want_status=None) + x = cs.cont.exec_run("cscli lapi status") assert x.exit_code == 0 stdout = x.output.decode() assert "You can successfully interact with Local API (LAPI)" in stdout + # TODO: bad lapi hostname # the cert is valid, but has a CN that doesn't match the hostname # we must set insecure_skip_verify to true to use it @@ -140,50 +140,49 @@ def test_tls_split_lapi_agent(crowdsec, flavor, certs_dir): """Server-only certificate, split containers""" rand = uuid.uuid1() - lapiname = 'lapi-' + str(rand) - agentname = 'agent-' + str(rand) + lapiname = "lapi-" + str(rand) + agentname = "agent-" + str(rand) lapi_env = { - 'USE_TLS': 'true', - 'CACERT_FILE': '/etc/ssl/crowdsec/ca.crt', - 'LAPI_CERT_FILE': '/etc/ssl/crowdsec/lapi.crt', - 'LAPI_KEY_FILE': '/etc/ssl/crowdsec/lapi.key', - 'AGENT_USERNAME': 'testagent', - 'AGENT_PASSWORD': 'testpassword', - 'LOCAL_API_URL': 'https://localhost:8080', + "USE_TLS": "true", + "CACERT_FILE": "/etc/ssl/crowdsec/ca.crt", + "LAPI_CERT_FILE": "/etc/ssl/crowdsec/lapi.crt", + "LAPI_KEY_FILE": "/etc/ssl/crowdsec/lapi.key", + "AGENT_USERNAME": "testagent", + "AGENT_PASSWORD": "testpassword", + "LOCAL_API_URL": "https://localhost:8080", } agent_env = { - 'USE_TLS': 'true', - 'CACERT_FILE': '/etc/ssl/crowdsec/ca.crt', - 'AGENT_USERNAME': 'testagent', - 'AGENT_PASSWORD': 'testpassword', - 'LOCAL_API_URL': f'https://{lapiname}:8080', - 'DISABLE_LOCAL_API': 'true', - 'CROWDSEC_FEATURE_DISABLE_HTTP_RETRY_BACKOFF': 'false', + "USE_TLS": "true", + "CACERT_FILE": "/etc/ssl/crowdsec/ca.crt", + "AGENT_USERNAME": "testagent", + "AGENT_PASSWORD": "testpassword", + "LOCAL_API_URL": f"https://{lapiname}:8080", + "DISABLE_LOCAL_API": "true", + "CROWDSEC_FEATURE_DISABLE_HTTP_RETRY_BACKOFF": "false", } volumes = { - certs_dir(lapi_hostname=lapiname): {'bind': '/etc/ssl/crowdsec', 'mode': 'ro'}, + certs_dir(lapi_hostname=lapiname): {"bind": "/etc/ssl/crowdsec", "mode": "ro"}, } cs_lapi = crowdsec(flavor=flavor, name=lapiname, environment=lapi_env, volumes=volumes) cs_agent = crowdsec(flavor=flavor, name=agentname, environment=agent_env, volumes=volumes) with cs_lapi as lapi: - lapi.wait_for_log([ - "*(tls) Client Auth Type set to VerifyClientCertIfGiven*", - "*CrowdSec Local API listening on *:8080*" - ]) + lapi.wait_for_log( + ["*(tls) Client Auth Type set to VerifyClientCertIfGiven*", "*CrowdSec Local API listening on *:8080*"] + ) # TODO: wait_for_https - lapi.wait_for_http(8080, '/health', want_status=None) + lapi.wait_for_http(8080, "/health", want_status=None) with cs_agent as agent: agent.wait_for_log("*Starting processing data*") - res = agent.cont.exec_run('cscli lapi status') + res = agent.cont.exec_run("cscli lapi status") assert res.exit_code == 0 stdout = res.output.decode() assert "You can successfully interact with Local API (LAPI)" in stdout - res = lapi.cont.exec_run('cscli lapi status') + res = lapi.cont.exec_run("cscli lapi status") assert res.exit_code == 0 stdout = res.output.decode() assert "You can successfully interact with Local API (LAPI)" in stdout @@ -193,48 +192,47 @@ def test_tls_mutual_split_lapi_agent(crowdsec, flavor, certs_dir): """Server and client certificates, split containers""" rand = uuid.uuid1() - lapiname = 'lapi-' + str(rand) - agentname = 'agent-' + str(rand) + lapiname = "lapi-" + str(rand) + agentname = "agent-" + str(rand) lapi_env = { - 'USE_TLS': 'true', - 'CACERT_FILE': '/etc/ssl/crowdsec/ca.crt', - 'LAPI_CERT_FILE': '/etc/ssl/crowdsec/lapi.crt', - 'LAPI_KEY_FILE': '/etc/ssl/crowdsec/lapi.key', - 'LOCAL_API_URL': 'https://localhost:8080', + "USE_TLS": "true", + "CACERT_FILE": "/etc/ssl/crowdsec/ca.crt", + "LAPI_CERT_FILE": "/etc/ssl/crowdsec/lapi.crt", + "LAPI_KEY_FILE": "/etc/ssl/crowdsec/lapi.key", + "LOCAL_API_URL": "https://localhost:8080", } agent_env = { - 'USE_TLS': 'true', - 'CACERT_FILE': '/etc/ssl/crowdsec/ca.crt', - 'CLIENT_CERT_FILE': '/etc/ssl/crowdsec/agent.crt', - 'CLIENT_KEY_FILE': '/etc/ssl/crowdsec/agent.key', - 'LOCAL_API_URL': f'https://{lapiname}:8080', - 'DISABLE_LOCAL_API': 'true', - 'CROWDSEC_FEATURE_DISABLE_HTTP_RETRY_BACKOFF': 'false', + "USE_TLS": "true", + "CACERT_FILE": "/etc/ssl/crowdsec/ca.crt", + "CLIENT_CERT_FILE": "/etc/ssl/crowdsec/agent.crt", + "CLIENT_KEY_FILE": "/etc/ssl/crowdsec/agent.key", + "LOCAL_API_URL": f"https://{lapiname}:8080", + "DISABLE_LOCAL_API": "true", + "CROWDSEC_FEATURE_DISABLE_HTTP_RETRY_BACKOFF": "false", } volumes = { - certs_dir(lapi_hostname=lapiname): {'bind': '/etc/ssl/crowdsec', 'mode': 'ro'}, + certs_dir(lapi_hostname=lapiname): {"bind": "/etc/ssl/crowdsec", "mode": "ro"}, } cs_lapi = crowdsec(flavor=flavor, name=lapiname, environment=lapi_env, volumes=volumes) cs_agent = crowdsec(flavor=flavor, name=agentname, environment=agent_env, volumes=volumes) with cs_lapi as lapi: - lapi.wait_for_log([ - "*(tls) Client Auth Type set to VerifyClientCertIfGiven*", - "*CrowdSec Local API listening on *:8080*" - ]) + lapi.wait_for_log( + ["*(tls) Client Auth Type set to VerifyClientCertIfGiven*", "*CrowdSec Local API listening on *:8080*"] + ) # TODO: wait_for_https - lapi.wait_for_http(8080, '/health', want_status=None) + lapi.wait_for_http(8080, "/health", want_status=None) with cs_agent as agent: agent.wait_for_log("*Starting processing data*") - res = agent.cont.exec_run('cscli lapi status') + res = agent.cont.exec_run("cscli lapi status") assert res.exit_code == 0 stdout = res.output.decode() assert "You can successfully interact with Local API (LAPI)" in stdout - res = lapi.cont.exec_run('cscli lapi status') + res = lapi.cont.exec_run("cscli lapi status") assert res.exit_code == 0 stdout = res.output.decode() assert "You can successfully interact with Local API (LAPI)" in stdout @@ -244,78 +242,78 @@ def test_tls_client_ou(crowdsec, flavor, certs_dir): """Check behavior of client certificate vs AGENTS_ALLOWED_OU""" rand = uuid.uuid1() - lapiname = 'lapi-' + str(rand) - agentname = 'agent-' + str(rand) + lapiname = "lapi-" + str(rand) + agentname = "agent-" + str(rand) lapi_env = { - 'USE_TLS': 'true', - 'CACERT_FILE': '/etc/ssl/crowdsec/ca.crt', - 'LAPI_CERT_FILE': '/etc/ssl/crowdsec/lapi.crt', - 'LAPI_KEY_FILE': '/etc/ssl/crowdsec/lapi.key', - 'LOCAL_API_URL': 'https://localhost:8080', + "USE_TLS": "true", + "CACERT_FILE": "/etc/ssl/crowdsec/ca.crt", + "LAPI_CERT_FILE": "/etc/ssl/crowdsec/lapi.crt", + "LAPI_KEY_FILE": "/etc/ssl/crowdsec/lapi.key", + "LOCAL_API_URL": "https://localhost:8080", } agent_env = { - 'USE_TLS': 'true', - 'CACERT_FILE': '/etc/ssl/crowdsec/ca.crt', - 'CLIENT_CERT_FILE': '/etc/ssl/crowdsec/agent.crt', - 'CLIENT_KEY_FILE': '/etc/ssl/crowdsec/agent.key', - 'LOCAL_API_URL': f'https://{lapiname}:8080', - 'DISABLE_LOCAL_API': 'true', - 'CROWDSEC_FEATURE_DISABLE_HTTP_RETRY_BACKOFF': 'false', + "USE_TLS": "true", + "CACERT_FILE": "/etc/ssl/crowdsec/ca.crt", + "CLIENT_CERT_FILE": "/etc/ssl/crowdsec/agent.crt", + "CLIENT_KEY_FILE": "/etc/ssl/crowdsec/agent.key", + "LOCAL_API_URL": f"https://{lapiname}:8080", + "DISABLE_LOCAL_API": "true", + "CROWDSEC_FEATURE_DISABLE_HTTP_RETRY_BACKOFF": "false", } volumes = { - certs_dir(lapi_hostname=lapiname, agent_ou='custom-client-ou'): {'bind': '/etc/ssl/crowdsec', 'mode': 'ro'}, + certs_dir(lapi_hostname=lapiname, agent_ou="custom-client-ou"): {"bind": "/etc/ssl/crowdsec", "mode": "ro"}, } cs_lapi = crowdsec(flavor=flavor, name=lapiname, environment=lapi_env, volumes=volumes) cs_agent = crowdsec(flavor=flavor, name=agentname, environment=agent_env, volumes=volumes) with cs_lapi as lapi: - lapi.wait_for_log([ - "*(tls) Client Auth Type set to VerifyClientCertIfGiven*", - "*CrowdSec Local API listening on *:8080*" - ]) + lapi.wait_for_log( + ["*(tls) Client Auth Type set to VerifyClientCertIfGiven*", "*CrowdSec Local API listening on *:8080*"] + ) # TODO: wait_for_https - lapi.wait_for_http(8080, '/health', want_status=None) + lapi.wait_for_http(8080, "/health", want_status=None) with cs_agent as agent: - lapi.wait_for_log([ - "*client certificate OU ?custom-client-ou? doesn't match expected OU ?agent-ou?*", - ]) + lapi.wait_for_log( + [ + "*client certificate OU ?custom-client-ou? doesn't match expected OU ?agent-ou?*", + ] + ) - lapi_env['AGENTS_ALLOWED_OU'] = 'custom-client-ou' + lapi_env["AGENTS_ALLOWED_OU"] = "custom-client-ou" # change container names to avoid conflict # recreate certificates because they need the new hostname rand = uuid.uuid1() - lapiname = 'lapi-' + str(rand) - agentname = 'agent-' + str(rand) + lapiname = "lapi-" + str(rand) + agentname = "agent-" + str(rand) - agent_env['LOCAL_API_URL'] = f'https://{lapiname}:8080' + agent_env["LOCAL_API_URL"] = f"https://{lapiname}:8080" volumes = { - certs_dir(lapi_hostname=lapiname, agent_ou='custom-client-ou'): {'bind': '/etc/ssl/crowdsec', 'mode': 'ro'}, + certs_dir(lapi_hostname=lapiname, agent_ou="custom-client-ou"): {"bind": "/etc/ssl/crowdsec", "mode": "ro"}, } cs_lapi = crowdsec(flavor=flavor, name=lapiname, environment=lapi_env, volumes=volumes) cs_agent = crowdsec(flavor=flavor, name=agentname, environment=agent_env, volumes=volumes) with cs_lapi as lapi: - lapi.wait_for_log([ - "*(tls) Client Auth Type set to VerifyClientCertIfGiven*", - "*CrowdSec Local API listening on *:8080*" - ]) + lapi.wait_for_log( + ["*(tls) Client Auth Type set to VerifyClientCertIfGiven*", "*CrowdSec Local API listening on *:8080*"] + ) # TODO: wait_for_https - lapi.wait_for_http(8080, '/health', want_status=None) + lapi.wait_for_http(8080, "/health", want_status=None) with cs_agent as agent: agent.wait_for_log("*Starting processing data*") - res = agent.cont.exec_run('cscli lapi status') + res = agent.cont.exec_run("cscli lapi status") assert res.exit_code == 0 stdout = res.output.decode() assert "You can successfully interact with Local API (LAPI)" in stdout - res = lapi.cont.exec_run('cscli lapi status') + res = lapi.cont.exec_run("cscli lapi status") assert res.exit_code == 0 stdout = res.output.decode() assert "You can successfully interact with Local API (LAPI)" in stdout diff --git a/docker/test/tests/test_version.py b/docker/test/tests/test_version.py index c152d2e4e6c..baac61c36ab 100644 --- a/docker/test/tests/test_version.py +++ b/docker/test/tests/test_version.py @@ -10,9 +10,9 @@ def test_version_docker_platform(crowdsec, flavor): for waiter in cs.log_waiters(): with waiter as matcher: matcher.fnmatch_lines(["*Starting processing data*"]) - res = cs.cont.exec_run('cscli version') + res = cs.cont.exec_run("cscli version") assert res.exit_code == 0 - assert 'Platform: docker' in res.output.decode() - res = cs.cont.exec_run('crowdsec -version') + assert "Platform: docker" in res.output.decode() + res = cs.cont.exec_run("crowdsec -version") assert res.exit_code == 0 - assert 'Platform: docker' in res.output.decode() + assert "Platform: docker" in res.output.decode() diff --git a/docker/test/tests/test_wal.py b/docker/test/tests/test_wal.py index e3edbcaf385..e1fe3d260be 100644 --- a/docker/test/tests/test_wal.py +++ b/docker/test/tests/test_wal.py @@ -11,8 +11,8 @@ def test_use_wal_default(crowdsec, flavor): """Test USE_WAL default""" with crowdsec(flavor=flavor) as cs: cs.wait_for_log("*Starting processing data*") - cs.wait_for_http(8080, '/health', want_status=HTTPStatus.OK) - res = cs.cont.exec_run('cscli config show --key Config.DbConfig.UseWal -o json') + cs.wait_for_http(8080, "/health", want_status=HTTPStatus.OK) + res = cs.cont.exec_run("cscli config show --key Config.DbConfig.UseWal -o json") assert res.exit_code == 0 stdout = res.output.decode() assert "false" in stdout @@ -21,12 +21,12 @@ def test_use_wal_default(crowdsec, flavor): def test_use_wal_true(crowdsec, flavor): """Test USE_WAL=true""" env = { - 'USE_WAL': 'true', + "USE_WAL": "true", } with crowdsec(flavor=flavor, environment=env) as cs: cs.wait_for_log("*Starting processing data*") - cs.wait_for_http(8080, '/health', want_status=HTTPStatus.OK) - res = cs.cont.exec_run('cscli config show --key Config.DbConfig.UseWal -o json') + cs.wait_for_http(8080, "/health", want_status=HTTPStatus.OK) + res = cs.cont.exec_run("cscli config show --key Config.DbConfig.UseWal -o json") assert res.exit_code == 0 stdout = res.output.decode() assert "true" in stdout @@ -35,12 +35,12 @@ def test_use_wal_true(crowdsec, flavor): def test_use_wal_false(crowdsec, flavor): """Test USE_WAL=false""" env = { - 'USE_WAL': 'false', + "USE_WAL": "false", } with crowdsec(flavor=flavor, environment=env) as cs: cs.wait_for_log("*Starting processing data*") - cs.wait_for_http(8080, '/health', want_status=HTTPStatus.OK) - res = cs.cont.exec_run('cscli config show --key Config.DbConfig.UseWal -o json') + cs.wait_for_http(8080, "/health", want_status=HTTPStatus.OK) + res = cs.cont.exec_run("cscli config show --key Config.DbConfig.UseWal -o json") assert res.exit_code == 0 stdout = res.output.decode() assert "false" in stdout diff --git a/docker/test/uv.lock b/docker/test/uv.lock new file mode 100644 index 00000000000..d8cc42c89ab --- /dev/null +++ b/docker/test/uv.lock @@ -0,0 +1,587 @@ +version = 1 +requires-python = ">=3.12" + +[[package]] +name = "asttokens" +version = "3.0.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/4a/e7/82da0a03e7ba5141f05cce0d302e6eed121ae055e0456ca228bf693984bc/asttokens-3.0.0.tar.gz", hash = "sha256:0dcd8baa8d62b0c1d118b399b2ddba3c4aff271d0d7a9e0d4c1681c79035bbc7", size = 61978 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/25/8a/c46dcc25341b5bce5472c718902eb3d38600a903b14fa6aeecef3f21a46f/asttokens-3.0.0-py3-none-any.whl", hash = "sha256:e3078351a059199dd5138cb1c706e6430c05eff2ff136af5eb4790f9d28932e2", size = 26918 }, +] + +[[package]] +name = "certifi" +version = "2024.12.14" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/0f/bd/1d41ee578ce09523c81a15426705dd20969f5abf006d1afe8aeff0dd776a/certifi-2024.12.14.tar.gz", hash = "sha256:b650d30f370c2b724812bee08008be0c4163b163ddaec3f2546c1caf65f191db", size = 166010 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/a5/32/8f6669fc4798494966bf446c8c4a162e0b5d893dff088afddf76414f70e1/certifi-2024.12.14-py3-none-any.whl", hash = "sha256:1275f7a45be9464efc1173084eaa30f866fe2e47d389406136d332ed4967ec56", size = 164927 }, +] + +[[package]] +name = "cffi" +version = "1.17.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "pycparser" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/fc/97/c783634659c2920c3fc70419e3af40972dbaf758daa229a7d6ea6135c90d/cffi-1.17.1.tar.gz", hash = "sha256:1c39c6016c32bc48dd54561950ebd6836e1670f2ae46128f67cf49e789c52824", size = 516621 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/5a/84/e94227139ee5fb4d600a7a4927f322e1d4aea6fdc50bd3fca8493caba23f/cffi-1.17.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:805b4371bf7197c329fcb3ead37e710d1bca9da5d583f5073b799d5c5bd1eee4", size = 183178 }, + { url = "https://files.pythonhosted.org/packages/da/ee/fb72c2b48656111c4ef27f0f91da355e130a923473bf5ee75c5643d00cca/cffi-1.17.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:733e99bc2df47476e3848417c5a4540522f234dfd4ef3ab7fafdf555b082ec0c", size = 178840 }, + { url = "https://files.pythonhosted.org/packages/cc/b6/db007700f67d151abadf508cbfd6a1884f57eab90b1bb985c4c8c02b0f28/cffi-1.17.1-cp312-cp312-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1257bdabf294dceb59f5e70c64a3e2f462c30c7ad68092d01bbbfb1c16b1ba36", size = 454803 }, + { url = "https://files.pythonhosted.org/packages/1a/df/f8d151540d8c200eb1c6fba8cd0dfd40904f1b0682ea705c36e6c2e97ab3/cffi-1.17.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:da95af8214998d77a98cc14e3a3bd00aa191526343078b530ceb0bd710fb48a5", size = 478850 }, + { url = "https://files.pythonhosted.org/packages/28/c0/b31116332a547fd2677ae5b78a2ef662dfc8023d67f41b2a83f7c2aa78b1/cffi-1.17.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d63afe322132c194cf832bfec0dc69a99fb9bb6bbd550f161a49e9e855cc78ff", size = 485729 }, + { url = "https://files.pythonhosted.org/packages/91/2b/9a1ddfa5c7f13cab007a2c9cc295b70fbbda7cb10a286aa6810338e60ea1/cffi-1.17.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f79fc4fc25f1c8698ff97788206bb3c2598949bfe0fef03d299eb1b5356ada99", size = 471256 }, + { url = "https://files.pythonhosted.org/packages/b2/d5/da47df7004cb17e4955df6a43d14b3b4ae77737dff8bf7f8f333196717bf/cffi-1.17.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b62ce867176a75d03a665bad002af8e6d54644fad99a3c70905c543130e39d93", size = 479424 }, + { url = "https://files.pythonhosted.org/packages/0b/ac/2a28bcf513e93a219c8a4e8e125534f4f6db03e3179ba1c45e949b76212c/cffi-1.17.1-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:386c8bf53c502fff58903061338ce4f4950cbdcb23e2902d86c0f722b786bbe3", size = 484568 }, + { url = "https://files.pythonhosted.org/packages/d4/38/ca8a4f639065f14ae0f1d9751e70447a261f1a30fa7547a828ae08142465/cffi-1.17.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:4ceb10419a9adf4460ea14cfd6bc43d08701f0835e979bf821052f1805850fe8", size = 488736 }, + { url = "https://files.pythonhosted.org/packages/86/c5/28b2d6f799ec0bdecf44dced2ec5ed43e0eb63097b0f58c293583b406582/cffi-1.17.1-cp312-cp312-win32.whl", hash = "sha256:a08d7e755f8ed21095a310a693525137cfe756ce62d066e53f502a83dc550f65", size = 172448 }, + { url = "https://files.pythonhosted.org/packages/50/b9/db34c4755a7bd1cb2d1603ac3863f22bcecbd1ba29e5ee841a4bc510b294/cffi-1.17.1-cp312-cp312-win_amd64.whl", hash = "sha256:51392eae71afec0d0c8fb1a53b204dbb3bcabcb3c9b807eedf3e1e6ccf2de903", size = 181976 }, + { url = "https://files.pythonhosted.org/packages/8d/f8/dd6c246b148639254dad4d6803eb6a54e8c85c6e11ec9df2cffa87571dbe/cffi-1.17.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:f3a2b4222ce6b60e2e8b337bb9596923045681d71e5a082783484d845390938e", size = 182989 }, + { url = "https://files.pythonhosted.org/packages/8b/f1/672d303ddf17c24fc83afd712316fda78dc6fce1cd53011b839483e1ecc8/cffi-1.17.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:0984a4925a435b1da406122d4d7968dd861c1385afe3b45ba82b750f229811e2", size = 178802 }, + { url = "https://files.pythonhosted.org/packages/0e/2d/eab2e858a91fdff70533cab61dcff4a1f55ec60425832ddfdc9cd36bc8af/cffi-1.17.1-cp313-cp313-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d01b12eeeb4427d3110de311e1774046ad344f5b1a7403101878976ecd7a10f3", size = 454792 }, + { url = "https://files.pythonhosted.org/packages/75/b2/fbaec7c4455c604e29388d55599b99ebcc250a60050610fadde58932b7ee/cffi-1.17.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:706510fe141c86a69c8ddc029c7910003a17353970cff3b904ff0686a5927683", size = 478893 }, + { url = "https://files.pythonhosted.org/packages/4f/b7/6e4a2162178bf1935c336d4da8a9352cccab4d3a5d7914065490f08c0690/cffi-1.17.1-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:de55b766c7aa2e2a3092c51e0483d700341182f08e67c63630d5b6f200bb28e5", size = 485810 }, + { url = "https://files.pythonhosted.org/packages/c7/8a/1d0e4a9c26e54746dc08c2c6c037889124d4f59dffd853a659fa545f1b40/cffi-1.17.1-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c59d6e989d07460165cc5ad3c61f9fd8f1b4796eacbd81cee78957842b834af4", size = 471200 }, + { url = "https://files.pythonhosted.org/packages/26/9f/1aab65a6c0db35f43c4d1b4f580e8df53914310afc10ae0397d29d697af4/cffi-1.17.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dd398dbc6773384a17fe0d3e7eeb8d1a21c2200473ee6806bb5e6a8e62bb73dd", size = 479447 }, + { url = "https://files.pythonhosted.org/packages/5f/e4/fb8b3dd8dc0e98edf1135ff067ae070bb32ef9d509d6cb0f538cd6f7483f/cffi-1.17.1-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:3edc8d958eb099c634dace3c7e16560ae474aa3803a5df240542b305d14e14ed", size = 484358 }, + { url = "https://files.pythonhosted.org/packages/f1/47/d7145bf2dc04684935d57d67dff9d6d795b2ba2796806bb109864be3a151/cffi-1.17.1-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:72e72408cad3d5419375fc87d289076ee319835bdfa2caad331e377589aebba9", size = 488469 }, + { url = "https://files.pythonhosted.org/packages/bf/ee/f94057fa6426481d663b88637a9a10e859e492c73d0384514a17d78ee205/cffi-1.17.1-cp313-cp313-win32.whl", hash = "sha256:e03eab0a8677fa80d646b5ddece1cbeaf556c313dcfac435ba11f107ba117b5d", size = 172475 }, + { url = "https://files.pythonhosted.org/packages/7c/fc/6a8cb64e5f0324877d503c854da15d76c1e50eb722e320b15345c4d0c6de/cffi-1.17.1-cp313-cp313-win_amd64.whl", hash = "sha256:f6a16c31041f09ead72d69f583767292f750d24913dadacf5756b966aacb3f1a", size = 182009 }, +] + +[[package]] +name = "charset-normalizer" +version = "3.4.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/16/b0/572805e227f01586461c80e0fd25d65a2115599cc9dad142fee4b747c357/charset_normalizer-3.4.1.tar.gz", hash = "sha256:44251f18cd68a75b56585dd00dae26183e102cd5e0f9f1466e6df5da2ed64ea3", size = 123188 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/0a/9a/dd1e1cdceb841925b7798369a09279bd1cf183cef0f9ddf15a3a6502ee45/charset_normalizer-3.4.1-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:73d94b58ec7fecbc7366247d3b0b10a21681004153238750bb67bd9012414545", size = 196105 }, + { url = "https://files.pythonhosted.org/packages/d3/8c/90bfabf8c4809ecb648f39794cf2a84ff2e7d2a6cf159fe68d9a26160467/charset_normalizer-3.4.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:dad3e487649f498dd991eeb901125411559b22e8d7ab25d3aeb1af367df5efd7", size = 140404 }, + { url = "https://files.pythonhosted.org/packages/ad/8f/e410d57c721945ea3b4f1a04b74f70ce8fa800d393d72899f0a40526401f/charset_normalizer-3.4.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c30197aa96e8eed02200a83fba2657b4c3acd0f0aa4bdc9f6c1af8e8962e0757", size = 150423 }, + { url = "https://files.pythonhosted.org/packages/f0/b8/e6825e25deb691ff98cf5c9072ee0605dc2acfca98af70c2d1b1bc75190d/charset_normalizer-3.4.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2369eea1ee4a7610a860d88f268eb39b95cb588acd7235e02fd5a5601773d4fa", size = 143184 }, + { url = "https://files.pythonhosted.org/packages/3e/a2/513f6cbe752421f16d969e32f3583762bfd583848b763913ddab8d9bfd4f/charset_normalizer-3.4.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bc2722592d8998c870fa4e290c2eec2c1569b87fe58618e67d38b4665dfa680d", size = 145268 }, + { url = "https://files.pythonhosted.org/packages/74/94/8a5277664f27c3c438546f3eb53b33f5b19568eb7424736bdc440a88a31f/charset_normalizer-3.4.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ffc9202a29ab3920fa812879e95a9e78b2465fd10be7fcbd042899695d75e616", size = 147601 }, + { url = "https://files.pythonhosted.org/packages/7c/5f/6d352c51ee763623a98e31194823518e09bfa48be2a7e8383cf691bbb3d0/charset_normalizer-3.4.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:804a4d582ba6e5b747c625bf1255e6b1507465494a40a2130978bda7b932c90b", size = 141098 }, + { url = "https://files.pythonhosted.org/packages/78/d4/f5704cb629ba5ab16d1d3d741396aec6dc3ca2b67757c45b0599bb010478/charset_normalizer-3.4.1-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:0f55e69f030f7163dffe9fd0752b32f070566451afe180f99dbeeb81f511ad8d", size = 149520 }, + { url = "https://files.pythonhosted.org/packages/c5/96/64120b1d02b81785f222b976c0fb79a35875457fa9bb40827678e54d1bc8/charset_normalizer-3.4.1-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:c4c3e6da02df6fa1410a7680bd3f63d4f710232d3139089536310d027950696a", size = 152852 }, + { url = "https://files.pythonhosted.org/packages/84/c9/98e3732278a99f47d487fd3468bc60b882920cef29d1fa6ca460a1fdf4e6/charset_normalizer-3.4.1-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:5df196eb874dae23dcfb968c83d4f8fdccb333330fe1fc278ac5ceeb101003a9", size = 150488 }, + { url = "https://files.pythonhosted.org/packages/13/0e/9c8d4cb99c98c1007cc11eda969ebfe837bbbd0acdb4736d228ccaabcd22/charset_normalizer-3.4.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:e358e64305fe12299a08e08978f51fc21fac060dcfcddd95453eabe5b93ed0e1", size = 146192 }, + { url = "https://files.pythonhosted.org/packages/b2/21/2b6b5b860781a0b49427309cb8670785aa543fb2178de875b87b9cc97746/charset_normalizer-3.4.1-cp312-cp312-win32.whl", hash = "sha256:9b23ca7ef998bc739bf6ffc077c2116917eabcc901f88da1b9856b210ef63f35", size = 95550 }, + { url = "https://files.pythonhosted.org/packages/21/5b/1b390b03b1d16c7e382b561c5329f83cc06623916aab983e8ab9239c7d5c/charset_normalizer-3.4.1-cp312-cp312-win_amd64.whl", hash = "sha256:6ff8a4a60c227ad87030d76e99cd1698345d4491638dfa6673027c48b3cd395f", size = 102785 }, + { url = "https://files.pythonhosted.org/packages/38/94/ce8e6f63d18049672c76d07d119304e1e2d7c6098f0841b51c666e9f44a0/charset_normalizer-3.4.1-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:aabfa34badd18f1da5ec1bc2715cadc8dca465868a4e73a0173466b688f29dda", size = 195698 }, + { url = "https://files.pythonhosted.org/packages/24/2e/dfdd9770664aae179a96561cc6952ff08f9a8cd09a908f259a9dfa063568/charset_normalizer-3.4.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:22e14b5d70560b8dd51ec22863f370d1e595ac3d024cb8ad7d308b4cd95f8313", size = 140162 }, + { url = "https://files.pythonhosted.org/packages/24/4e/f646b9093cff8fc86f2d60af2de4dc17c759de9d554f130b140ea4738ca6/charset_normalizer-3.4.1-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8436c508b408b82d87dc5f62496973a1805cd46727c34440b0d29d8a2f50a6c9", size = 150263 }, + { url = "https://files.pythonhosted.org/packages/5e/67/2937f8d548c3ef6e2f9aab0f6e21001056f692d43282b165e7c56023e6dd/charset_normalizer-3.4.1-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2d074908e1aecee37a7635990b2c6d504cd4766c7bc9fc86d63f9c09af3fa11b", size = 142966 }, + { url = "https://files.pythonhosted.org/packages/52/ed/b7f4f07de100bdb95c1756d3a4d17b90c1a3c53715c1a476f8738058e0fa/charset_normalizer-3.4.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:955f8851919303c92343d2f66165294848d57e9bba6cf6e3625485a70a038d11", size = 144992 }, + { url = "https://files.pythonhosted.org/packages/96/2c/d49710a6dbcd3776265f4c923bb73ebe83933dfbaa841c5da850fe0fd20b/charset_normalizer-3.4.1-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:44ecbf16649486d4aebafeaa7ec4c9fed8b88101f4dd612dcaf65d5e815f837f", size = 147162 }, + { url = "https://files.pythonhosted.org/packages/b4/41/35ff1f9a6bd380303dea55e44c4933b4cc3c4850988927d4082ada230273/charset_normalizer-3.4.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:0924e81d3d5e70f8126529951dac65c1010cdf117bb75eb02dd12339b57749dd", size = 140972 }, + { url = "https://files.pythonhosted.org/packages/fb/43/c6a0b685fe6910d08ba971f62cd9c3e862a85770395ba5d9cad4fede33ab/charset_normalizer-3.4.1-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:2967f74ad52c3b98de4c3b32e1a44e32975e008a9cd2a8cc8966d6a5218c5cb2", size = 149095 }, + { url = "https://files.pythonhosted.org/packages/4c/ff/a9a504662452e2d2878512115638966e75633519ec11f25fca3d2049a94a/charset_normalizer-3.4.1-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:c75cb2a3e389853835e84a2d8fb2b81a10645b503eca9bcb98df6b5a43eb8886", size = 152668 }, + { url = "https://files.pythonhosted.org/packages/6c/71/189996b6d9a4b932564701628af5cee6716733e9165af1d5e1b285c530ed/charset_normalizer-3.4.1-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:09b26ae6b1abf0d27570633b2b078a2a20419c99d66fb2823173d73f188ce601", size = 150073 }, + { url = "https://files.pythonhosted.org/packages/e4/93/946a86ce20790e11312c87c75ba68d5f6ad2208cfb52b2d6a2c32840d922/charset_normalizer-3.4.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:fa88b843d6e211393a37219e6a1c1df99d35e8fd90446f1118f4216e307e48cd", size = 145732 }, + { url = "https://files.pythonhosted.org/packages/cd/e5/131d2fb1b0dddafc37be4f3a2fa79aa4c037368be9423061dccadfd90091/charset_normalizer-3.4.1-cp313-cp313-win32.whl", hash = "sha256:eb8178fe3dba6450a3e024e95ac49ed3400e506fd4e9e5c32d30adda88cbd407", size = 95391 }, + { url = "https://files.pythonhosted.org/packages/27/f2/4f9a69cc7712b9b5ad8fdb87039fd89abba997ad5cbe690d1835d40405b0/charset_normalizer-3.4.1-cp313-cp313-win_amd64.whl", hash = "sha256:b1ac5992a838106edb89654e0aebfc24f5848ae2547d22c2c3f66454daa11971", size = 102702 }, + { url = "https://files.pythonhosted.org/packages/0e/f6/65ecc6878a89bb1c23a086ea335ad4bf21a588990c3f535a227b9eea9108/charset_normalizer-3.4.1-py3-none-any.whl", hash = "sha256:d98b1668f06378c6dbefec3b92299716b931cd4e6061f3c875a71ced1780ab85", size = 49767 }, +] + +[[package]] +name = "colorama" +version = "0.4.6" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/d8/53/6f443c9a4a8358a93a6792e2acffb9d9d5cb0a5cfd8802644b7b1c9a02e4/colorama-0.4.6.tar.gz", hash = "sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44", size = 27697 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/d1/d6/3965ed04c63042e047cb6a3e6ed1a63a35087b6a609aa3a15ed8ac56c221/colorama-0.4.6-py2.py3-none-any.whl", hash = "sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6", size = 25335 }, +] + +[[package]] +name = "crowdsec-docker-tests" +version = "0.1.0" +source = { virtual = "." } +dependencies = [ + { name = "pytest" }, + { name = "pytest-cs" }, + { name = "pytest-dotenv" }, + { name = "pytest-xdist" }, +] + +[package.dev-dependencies] +dev = [ + { name = "ipdb" }, + { name = "ruff" }, +] + +[package.metadata] +requires-dist = [ + { name = "pytest", specifier = ">=8.3.4" }, + { name = "pytest-cs", git = "https://github.com/crowdsecurity/pytest-cs" }, + { name = "pytest-dotenv", specifier = ">=0.5.2" }, + { name = "pytest-xdist", specifier = ">=3.6.1" }, +] + +[package.metadata.requires-dev] +dev = [ + { name = "ipdb", specifier = ">=0.13.13" }, + { name = "ruff", specifier = ">=0.9.3" }, +] + +[[package]] +name = "cryptography" +version = "44.0.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "cffi", marker = "platform_python_implementation != 'PyPy'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/91/4c/45dfa6829acffa344e3967d6006ee4ae8be57af746ae2eba1c431949b32c/cryptography-44.0.0.tar.gz", hash = "sha256:cd4e834f340b4293430701e772ec543b0fbe6c2dea510a5286fe0acabe153a02", size = 710657 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/55/09/8cc67f9b84730ad330b3b72cf867150744bf07ff113cda21a15a1c6d2c7c/cryptography-44.0.0-cp37-abi3-macosx_10_9_universal2.whl", hash = "sha256:84111ad4ff3f6253820e6d3e58be2cc2a00adb29335d4cacb5ab4d4d34f2a123", size = 6541833 }, + { url = "https://files.pythonhosted.org/packages/7e/5b/3759e30a103144e29632e7cb72aec28cedc79e514b2ea8896bb17163c19b/cryptography-44.0.0-cp37-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b15492a11f9e1b62ba9d73c210e2416724633167de94607ec6069ef724fad092", size = 3922710 }, + { url = "https://files.pythonhosted.org/packages/5f/58/3b14bf39f1a0cfd679e753e8647ada56cddbf5acebffe7db90e184c76168/cryptography-44.0.0-cp37-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:831c3c4d0774e488fdc83a1923b49b9957d33287de923d58ebd3cec47a0ae43f", size = 4137546 }, + { url = "https://files.pythonhosted.org/packages/98/65/13d9e76ca19b0ba5603d71ac8424b5694415b348e719db277b5edc985ff5/cryptography-44.0.0-cp37-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:761817a3377ef15ac23cd7834715081791d4ec77f9297ee694ca1ee9c2c7e5eb", size = 3915420 }, + { url = "https://files.pythonhosted.org/packages/b1/07/40fe09ce96b91fc9276a9ad272832ead0fddedcba87f1190372af8e3039c/cryptography-44.0.0-cp37-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:3c672a53c0fb4725a29c303be906d3c1fa99c32f58abe008a82705f9ee96f40b", size = 4154498 }, + { url = "https://files.pythonhosted.org/packages/75/ea/af65619c800ec0a7e4034207aec543acdf248d9bffba0533342d1bd435e1/cryptography-44.0.0-cp37-abi3-manylinux_2_34_aarch64.whl", hash = "sha256:4ac4c9f37eba52cb6fbeaf5b59c152ea976726b865bd4cf87883a7e7006cc543", size = 3932569 }, + { url = "https://files.pythonhosted.org/packages/c7/af/d1deb0c04d59612e3d5e54203159e284d3e7a6921e565bb0eeb6269bdd8a/cryptography-44.0.0-cp37-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:ed3534eb1090483c96178fcb0f8893719d96d5274dfde98aa6add34614e97c8e", size = 4016721 }, + { url = "https://files.pythonhosted.org/packages/bd/69/7ca326c55698d0688db867795134bdfac87136b80ef373aaa42b225d6dd5/cryptography-44.0.0-cp37-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:f3f6fdfa89ee2d9d496e2c087cebef9d4fcbb0ad63c40e821b39f74bf48d9c5e", size = 4240915 }, + { url = "https://files.pythonhosted.org/packages/ef/d4/cae11bf68c0f981e0413906c6dd03ae7fa864347ed5fac40021df1ef467c/cryptography-44.0.0-cp37-abi3-win32.whl", hash = "sha256:eb33480f1bad5b78233b0ad3e1b0be21e8ef1da745d8d2aecbb20671658b9053", size = 2757925 }, + { url = "https://files.pythonhosted.org/packages/64/b1/50d7739254d2002acae64eed4fc43b24ac0cc44bf0a0d388d1ca06ec5bb1/cryptography-44.0.0-cp37-abi3-win_amd64.whl", hash = "sha256:abc998e0c0eee3c8a1904221d3f67dcfa76422b23620173e28c11d3e626c21bd", size = 3202055 }, + { url = "https://files.pythonhosted.org/packages/11/18/61e52a3d28fc1514a43b0ac291177acd1b4de00e9301aaf7ef867076ff8a/cryptography-44.0.0-cp39-abi3-macosx_10_9_universal2.whl", hash = "sha256:660cb7312a08bc38be15b696462fa7cc7cd85c3ed9c576e81f4dc4d8b2b31591", size = 6542801 }, + { url = "https://files.pythonhosted.org/packages/1a/07/5f165b6c65696ef75601b781a280fc3b33f1e0cd6aa5a92d9fb96c410e97/cryptography-44.0.0-cp39-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1923cb251c04be85eec9fda837661c67c1049063305d6be5721643c22dd4e2b7", size = 3922613 }, + { url = "https://files.pythonhosted.org/packages/28/34/6b3ac1d80fc174812486561cf25194338151780f27e438526f9c64e16869/cryptography-44.0.0-cp39-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:404fdc66ee5f83a1388be54300ae978b2efd538018de18556dde92575e05defc", size = 4137925 }, + { url = "https://files.pythonhosted.org/packages/d0/c7/c656eb08fd22255d21bc3129625ed9cd5ee305f33752ef2278711b3fa98b/cryptography-44.0.0-cp39-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:c5eb858beed7835e5ad1faba59e865109f3e52b3783b9ac21e7e47dc5554e289", size = 3915417 }, + { url = "https://files.pythonhosted.org/packages/ef/82/72403624f197af0db6bac4e58153bc9ac0e6020e57234115db9596eee85d/cryptography-44.0.0-cp39-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:f53c2c87e0fb4b0c00fa9571082a057e37690a8f12233306161c8f4b819960b7", size = 4155160 }, + { url = "https://files.pythonhosted.org/packages/a2/cd/2f3c440913d4329ade49b146d74f2e9766422e1732613f57097fea61f344/cryptography-44.0.0-cp39-abi3-manylinux_2_34_aarch64.whl", hash = "sha256:9e6fc8a08e116fb7c7dd1f040074c9d7b51d74a8ea40d4df2fc7aa08b76b9e6c", size = 3932331 }, + { url = "https://files.pythonhosted.org/packages/7f/df/8be88797f0a1cca6e255189a57bb49237402b1880d6e8721690c5603ac23/cryptography-44.0.0-cp39-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:d2436114e46b36d00f8b72ff57e598978b37399d2786fd39793c36c6d5cb1c64", size = 4017372 }, + { url = "https://files.pythonhosted.org/packages/af/36/5ccc376f025a834e72b8e52e18746b927f34e4520487098e283a719c205e/cryptography-44.0.0-cp39-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:a01956ddfa0a6790d594f5b34fc1bfa6098aca434696a03cfdbe469b8ed79285", size = 4239657 }, + { url = "https://files.pythonhosted.org/packages/46/b0/f4f7d0d0bcfbc8dd6296c1449be326d04217c57afb8b2594f017eed95533/cryptography-44.0.0-cp39-abi3-win32.whl", hash = "sha256:eca27345e1214d1b9f9490d200f9db5a874479be914199194e746c893788d417", size = 2758672 }, + { url = "https://files.pythonhosted.org/packages/97/9b/443270b9210f13f6ef240eff73fd32e02d381e7103969dc66ce8e89ee901/cryptography-44.0.0-cp39-abi3-win_amd64.whl", hash = "sha256:708ee5f1bafe76d041b53a4f95eb28cdeb8d18da17e597d46d7833ee59b97ede", size = 3202071 }, +] + +[[package]] +name = "decorator" +version = "5.1.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/66/0c/8d907af351aa16b42caae42f9d6aa37b900c67308052d10fdce809f8d952/decorator-5.1.1.tar.gz", hash = "sha256:637996211036b6385ef91435e4fae22989472f9d571faba8927ba8253acbc330", size = 35016 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/d5/50/83c593b07763e1161326b3b8c6686f0f4b0f24d5526546bee538c89837d6/decorator-5.1.1-py3-none-any.whl", hash = "sha256:b8c3f85900b9dc423225913c5aace94729fe1fa9763b38939a95226f02d37186", size = 9073 }, +] + +[[package]] +name = "docker" +version = "7.1.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "pywin32", marker = "sys_platform == 'win32'" }, + { name = "requests" }, + { name = "urllib3" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/91/9b/4a2ea29aeba62471211598dac5d96825bb49348fa07e906ea930394a83ce/docker-7.1.0.tar.gz", hash = "sha256:ad8c70e6e3f8926cb8a92619b832b4ea5299e2831c14284663184e200546fa6c", size = 117834 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/e3/26/57c6fb270950d476074c087527a558ccb6f4436657314bfb6cdf484114c4/docker-7.1.0-py3-none-any.whl", hash = "sha256:c96b93b7f0a746f9e77d325bcfb87422a3d8bd4f03136ae8a85b37f1898d5fc0", size = 147774 }, +] + +[[package]] +name = "execnet" +version = "2.1.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/bb/ff/b4c0dc78fbe20c3e59c0c7334de0c27eb4001a2b2017999af398bf730817/execnet-2.1.1.tar.gz", hash = "sha256:5189b52c6121c24feae288166ab41b32549c7e2348652736540b9e6e7d4e72e3", size = 166524 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/43/09/2aea36ff60d16dd8879bdb2f5b3ee0ba8d08cbbdcdfe870e695ce3784385/execnet-2.1.1-py3-none-any.whl", hash = "sha256:26dee51f1b80cebd6d0ca8e74dd8745419761d3bef34163928cbebbdc4749fdc", size = 40612 }, +] + +[[package]] +name = "executing" +version = "2.2.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/91/50/a9d80c47ff289c611ff12e63f7c5d13942c65d68125160cefd768c73e6e4/executing-2.2.0.tar.gz", hash = "sha256:5d108c028108fe2551d1a7b2e8b713341e2cb4fc0aa7dcf966fa4327a5226755", size = 978693 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/7b/8f/c4d9bafc34ad7ad5d8dc16dd1347ee0e507a52c3adb6bfa8887e1c6a26ba/executing-2.2.0-py2.py3-none-any.whl", hash = "sha256:11387150cad388d62750327a53d3339fad4888b39a6fe233c3afbb54ecffd3aa", size = 26702 }, +] + +[[package]] +name = "idna" +version = "3.10" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/f1/70/7703c29685631f5a7590aa73f1f1d3fa9a380e654b86af429e0934a32f7d/idna-3.10.tar.gz", hash = "sha256:12f65c9b470abda6dc35cf8e63cc574b1c52b11df2c86030af0ac09b01b13ea9", size = 190490 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/76/c6/c88e154df9c4e1a2a66ccf0005a88dfb2650c1dffb6f5ce603dfbd452ce3/idna-3.10-py3-none-any.whl", hash = "sha256:946d195a0d259cbba61165e88e65941f16e9b36ea6ddb97f00452bae8b1287d3", size = 70442 }, +] + +[[package]] +name = "iniconfig" +version = "2.0.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/d7/4b/cbd8e699e64a6f16ca3a8220661b5f83792b3017d0f79807cb8708d33913/iniconfig-2.0.0.tar.gz", hash = "sha256:2d91e135bf72d31a410b17c16da610a82cb55f6b0477d1a902134b24a455b8b3", size = 4646 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/ef/a6/62565a6e1cf69e10f5727360368e451d4b7f58beeac6173dc9db836a5b46/iniconfig-2.0.0-py3-none-any.whl", hash = "sha256:b6a85871a79d2e3b22d2d1b94ac2824226a63c6b741c88f7ae975f18b6778374", size = 5892 }, +] + +[[package]] +name = "ipdb" +version = "0.13.13" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "decorator" }, + { name = "ipython" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/3d/1b/7e07e7b752017f7693a0f4d41c13e5ca29ce8cbcfdcc1fd6c4ad8c0a27a0/ipdb-0.13.13.tar.gz", hash = "sha256:e3ac6018ef05126d442af680aad863006ec19d02290561ac88b8b1c0b0cfc726", size = 17042 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/0c/4c/b075da0092003d9a55cf2ecc1cae9384a1ca4f650d51b00fc59875fe76f6/ipdb-0.13.13-py3-none-any.whl", hash = "sha256:45529994741c4ab6d2388bfa5d7b725c2cf7fe9deffabdb8a6113aa5ed449ed4", size = 12130 }, +] + +[[package]] +name = "ipython" +version = "8.31.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "colorama", marker = "sys_platform == 'win32'" }, + { name = "decorator" }, + { name = "jedi" }, + { name = "matplotlib-inline" }, + { name = "pexpect", marker = "sys_platform != 'emscripten' and sys_platform != 'win32'" }, + { name = "prompt-toolkit" }, + { name = "pygments" }, + { name = "stack-data" }, + { name = "traitlets" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/01/35/6f90fdddff7a08b7b715fccbd2427b5212c9525cd043d26fdc45bee0708d/ipython-8.31.0.tar.gz", hash = "sha256:b6a2274606bec6166405ff05e54932ed6e5cfecaca1fc05f2cacde7bb074d70b", size = 5501011 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/04/60/d0feb6b6d9fe4ab89fe8fe5b47cbf6cd936bfd9f1e7ffa9d0015425aeed6/ipython-8.31.0-py3-none-any.whl", hash = "sha256:46ec58f8d3d076a61d128fe517a51eb730e3aaf0c184ea8c17d16e366660c6a6", size = 821583 }, +] + +[[package]] +name = "jedi" +version = "0.19.2" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "parso" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/72/3a/79a912fbd4d8dd6fbb02bf69afd3bb72cf0c729bb3063c6f4498603db17a/jedi-0.19.2.tar.gz", hash = "sha256:4770dc3de41bde3966b02eb84fbcf557fb33cce26ad23da12c742fb50ecb11f0", size = 1231287 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/c0/5a/9cac0c82afec3d09ccd97c8b6502d48f165f9124db81b4bcb90b4af974ee/jedi-0.19.2-py2.py3-none-any.whl", hash = "sha256:a8ef22bde8490f57fe5c7681a3c83cb58874daf72b4784de3cce5b6ef6edb5b9", size = 1572278 }, +] + +[[package]] +name = "matplotlib-inline" +version = "0.1.7" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "traitlets" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/99/5b/a36a337438a14116b16480db471ad061c36c3694df7c2084a0da7ba538b7/matplotlib_inline-0.1.7.tar.gz", hash = "sha256:8423b23ec666be3d16e16b60bdd8ac4e86e840ebd1dd11a30b9f117f2fa0ab90", size = 8159 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/8f/8e/9ad090d3553c280a8060fbf6e24dc1c0c29704ee7d1c372f0c174aa59285/matplotlib_inline-0.1.7-py3-none-any.whl", hash = "sha256:df192d39a4ff8f21b1895d72e6a13f5fcc5099f00fa84384e0ea28c2cc0653ca", size = 9899 }, +] + +[[package]] +name = "packaging" +version = "24.2" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/d0/63/68dbb6eb2de9cb10ee4c9c14a0148804425e13c4fb20d61cce69f53106da/packaging-24.2.tar.gz", hash = "sha256:c228a6dc5e932d346bc5739379109d49e8853dd8223571c7c5b55260edc0b97f", size = 163950 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/88/ef/eb23f262cca3c0c4eb7ab1933c3b1f03d021f2c48f54763065b6f0e321be/packaging-24.2-py3-none-any.whl", hash = "sha256:09abb1bccd265c01f4a3aa3f7a7db064b36514d2cba19a2f694fe6150451a759", size = 65451 }, +] + +[[package]] +name = "parso" +version = "0.8.4" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/66/94/68e2e17afaa9169cf6412ab0f28623903be73d1b32e208d9e8e541bb086d/parso-0.8.4.tar.gz", hash = "sha256:eb3a7b58240fb99099a345571deecc0f9540ea5f4dd2fe14c2a99d6b281ab92d", size = 400609 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/c6/ac/dac4a63f978e4dcb3c6d3a78c4d8e0192a113d288502a1216950c41b1027/parso-0.8.4-py2.py3-none-any.whl", hash = "sha256:a418670a20291dacd2dddc80c377c5c3791378ee1e8d12bffc35420643d43f18", size = 103650 }, +] + +[[package]] +name = "pexpect" +version = "4.9.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "ptyprocess" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/42/92/cc564bf6381ff43ce1f4d06852fc19a2f11d180f23dc32d9588bee2f149d/pexpect-4.9.0.tar.gz", hash = "sha256:ee7d41123f3c9911050ea2c2dac107568dc43b2d3b0c7557a33212c398ead30f", size = 166450 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/9e/c3/059298687310d527a58bb01f3b1965787ee3b40dce76752eda8b44e9a2c5/pexpect-4.9.0-py2.py3-none-any.whl", hash = "sha256:7236d1e080e4936be2dc3e326cec0af72acf9212a7e1d060210e70a47e253523", size = 63772 }, +] + +[[package]] +name = "pluggy" +version = "1.5.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/96/2d/02d4312c973c6050a18b314a5ad0b3210edb65a906f868e31c111dede4a6/pluggy-1.5.0.tar.gz", hash = "sha256:2cffa88e94fdc978c4c574f15f9e59b7f4201d439195c3715ca9e2486f1d0cf1", size = 67955 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/88/5f/e351af9a41f866ac3f1fac4ca0613908d9a41741cfcf2228f4ad853b697d/pluggy-1.5.0-py3-none-any.whl", hash = "sha256:44e1ad92c8ca002de6377e165f3e0f1be63266ab4d554740532335b9d75ea669", size = 20556 }, +] + +[[package]] +name = "prompt-toolkit" +version = "3.0.50" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "wcwidth" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/a1/e1/bd15cb8ffdcfeeb2bdc215de3c3cffca11408d829e4b8416dcfe71ba8854/prompt_toolkit-3.0.50.tar.gz", hash = "sha256:544748f3860a2623ca5cd6d2795e7a14f3d0e1c3c9728359013f79877fc89bab", size = 429087 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/e4/ea/d836f008d33151c7a1f62caf3d8dd782e4d15f6a43897f64480c2b8de2ad/prompt_toolkit-3.0.50-py3-none-any.whl", hash = "sha256:9b6427eb19e479d98acff65196a307c555eb567989e6d88ebbb1b509d9779198", size = 387816 }, +] + +[[package]] +name = "psutil" +version = "6.1.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/1f/5a/07871137bb752428aa4b659f910b399ba6f291156bdea939be3e96cae7cb/psutil-6.1.1.tar.gz", hash = "sha256:cf8496728c18f2d0b45198f06895be52f36611711746b7f30c464b422b50e2f5", size = 508502 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/61/99/ca79d302be46f7bdd8321089762dd4476ee725fce16fc2b2e1dbba8cac17/psutil-6.1.1-cp36-abi3-macosx_10_9_x86_64.whl", hash = "sha256:fc0ed7fe2231a444fc219b9c42d0376e0a9a1a72f16c5cfa0f68d19f1a0663e8", size = 247511 }, + { url = "https://files.pythonhosted.org/packages/0b/6b/73dbde0dd38f3782905d4587049b9be64d76671042fdcaf60e2430c6796d/psutil-6.1.1-cp36-abi3-macosx_11_0_arm64.whl", hash = "sha256:0bdd4eab935276290ad3cb718e9809412895ca6b5b334f5a9111ee6d9aff9377", size = 248985 }, + { url = "https://files.pythonhosted.org/packages/17/38/c319d31a1d3f88c5b79c68b3116c129e5133f1822157dd6da34043e32ed6/psutil-6.1.1-cp36-abi3-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b6e06c20c05fe95a3d7302d74e7097756d4ba1247975ad6905441ae1b5b66003", size = 284488 }, + { url = "https://files.pythonhosted.org/packages/9c/39/0f88a830a1c8a3aba27fededc642da37613c57cbff143412e3536f89784f/psutil-6.1.1-cp36-abi3-manylinux_2_12_x86_64.manylinux2010_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:97f7cb9921fbec4904f522d972f0c0e1f4fabbdd4e0287813b21215074a0f160", size = 287477 }, + { url = "https://files.pythonhosted.org/packages/47/da/99f4345d4ddf2845cb5b5bd0d93d554e84542d116934fde07a0c50bd4e9f/psutil-6.1.1-cp36-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:33431e84fee02bc84ea36d9e2c4a6d395d479c9dd9bba2376c1f6ee8f3a4e0b3", size = 289017 }, + { url = "https://files.pythonhosted.org/packages/38/53/bd755c2896f4461fd4f36fa6a6dcb66a88a9e4b9fd4e5b66a77cf9d4a584/psutil-6.1.1-cp37-abi3-win32.whl", hash = "sha256:eaa912e0b11848c4d9279a93d7e2783df352b082f40111e078388701fd479e53", size = 250602 }, + { url = "https://files.pythonhosted.org/packages/7b/d7/7831438e6c3ebbfa6e01a927127a6cb42ad3ab844247f3c5b96bea25d73d/psutil-6.1.1-cp37-abi3-win_amd64.whl", hash = "sha256:f35cfccb065fff93529d2afb4a2e89e363fe63ca1e4a5da22b603a85833c2649", size = 254444 }, +] + +[[package]] +name = "ptyprocess" +version = "0.7.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/20/e5/16ff212c1e452235a90aeb09066144d0c5a6a8c0834397e03f5224495c4e/ptyprocess-0.7.0.tar.gz", hash = "sha256:5c5d0a3b48ceee0b48485e0c26037c0acd7d29765ca3fbb5cb3831d347423220", size = 70762 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/22/a6/858897256d0deac81a172289110f31629fc4cee19b6f01283303e18c8db3/ptyprocess-0.7.0-py2.py3-none-any.whl", hash = "sha256:4b41f3967fce3af57cc7e94b888626c18bf37a083e3651ca8feeb66d492fef35", size = 13993 }, +] + +[[package]] +name = "pure-eval" +version = "0.2.3" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/cd/05/0a34433a064256a578f1783a10da6df098ceaa4a57bbeaa96a6c0352786b/pure_eval-0.2.3.tar.gz", hash = "sha256:5f4e983f40564c576c7c8635ae88db5956bb2229d7e9237d03b3c0b0190eaf42", size = 19752 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/8e/37/efad0257dc6e593a18957422533ff0f87ede7c9c6ea010a2177d738fb82f/pure_eval-0.2.3-py3-none-any.whl", hash = "sha256:1db8e35b67b3d218d818ae653e27f06c3aa420901fa7b081ca98cbedc874e0d0", size = 11842 }, +] + +[[package]] +name = "pycparser" +version = "2.22" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/1d/b2/31537cf4b1ca988837256c910a668b553fceb8f069bedc4b1c826024b52c/pycparser-2.22.tar.gz", hash = "sha256:491c8be9c040f5390f5bf44a5b07752bd07f56edf992381b05c701439eec10f6", size = 172736 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/13/a3/a812df4e2dd5696d1f351d58b8fe16a405b234ad2886a0dab9183fb78109/pycparser-2.22-py3-none-any.whl", hash = "sha256:c3702b6d3dd8c7abc1afa565d7e63d53a1d0bd86cdc24edd75470f4de499cfcc", size = 117552 }, +] + +[[package]] +name = "pygments" +version = "2.19.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/7c/2d/c3338d48ea6cc0feb8446d8e6937e1408088a72a39937982cc6111d17f84/pygments-2.19.1.tar.gz", hash = "sha256:61c16d2a8576dc0649d9f39e089b5f02bcd27fba10d8fb4dcc28173f7a45151f", size = 4968581 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/8a/0b/9fcc47d19c48b59121088dd6da2488a49d5f72dacf8262e2790a1d2c7d15/pygments-2.19.1-py3-none-any.whl", hash = "sha256:9ea1544ad55cecf4b8242fab6dd35a93bbce657034b0611ee383099054ab6d8c", size = 1225293 }, +] + +[[package]] +name = "pytest" +version = "8.3.4" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "colorama", marker = "sys_platform == 'win32'" }, + { name = "iniconfig" }, + { name = "packaging" }, + { name = "pluggy" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/05/35/30e0d83068951d90a01852cb1cef56e5d8a09d20c7f511634cc2f7e0372a/pytest-8.3.4.tar.gz", hash = "sha256:965370d062bce11e73868e0335abac31b4d3de0e82f4007408d242b4f8610761", size = 1445919 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/11/92/76a1c94d3afee238333bc0a42b82935dd8f9cf8ce9e336ff87ee14d9e1cf/pytest-8.3.4-py3-none-any.whl", hash = "sha256:50e16d954148559c9a74109af1eaf0c945ba2d8f30f0a3d3335edde19788b6f6", size = 343083 }, +] + +[[package]] +name = "pytest-cs" +version = "0.7.20" +source = { git = "https://github.com/crowdsecurity/pytest-cs#73380b837a80337f361414bebbaf4b914713c4ae" } +dependencies = [ + { name = "docker" }, + { name = "psutil" }, + { name = "pytest" }, + { name = "pytest-datadir" }, + { name = "pytest-dotenv" }, + { name = "pyyaml" }, + { name = "requests" }, + { name = "trustme" }, +] + +[[package]] +name = "pytest-datadir" +version = "1.5.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "pytest" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/aa/97/a93900d82635aa3f419c3cd2059b4de7d7fe44e415eaf00c298854582dcc/pytest-datadir-1.5.0.tar.gz", hash = "sha256:1617ed92f9afda0c877e4eac91904b5f779d24ba8f5e438752e3ae39d8d2ee3f", size = 8821 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/7c/90/96b9474cddda5ef9e10e6f1871c0fadfa153b605e0e749ba30437bfb62a0/pytest_datadir-1.5.0-py3-none-any.whl", hash = "sha256:34adf361bcc7b37961bbc1dfa8d25a4829e778bab461703c38a5c50ca9c36dc8", size = 5095 }, +] + +[[package]] +name = "pytest-dotenv" +version = "0.5.2" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "pytest" }, + { name = "python-dotenv" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/cd/b0/cafee9c627c1bae228eb07c9977f679b3a7cb111b488307ab9594ba9e4da/pytest-dotenv-0.5.2.tar.gz", hash = "sha256:2dc6c3ac6d8764c71c6d2804e902d0ff810fa19692e95fe138aefc9b1aa73732", size = 3782 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/d0/da/9da67c67b3d0963160e3d2cbc7c38b6fae342670cc8e6d5936644b2cf944/pytest_dotenv-0.5.2-py3-none-any.whl", hash = "sha256:40a2cece120a213898afaa5407673f6bd924b1fa7eafce6bda0e8abffe2f710f", size = 3993 }, +] + +[[package]] +name = "pytest-xdist" +version = "3.6.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "execnet" }, + { name = "pytest" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/41/c4/3c310a19bc1f1e9ef50075582652673ef2bfc8cd62afef9585683821902f/pytest_xdist-3.6.1.tar.gz", hash = "sha256:ead156a4db231eec769737f57668ef58a2084a34b2e55c4a8fa20d861107300d", size = 84060 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/6d/82/1d96bf03ee4c0fdc3c0cbe61470070e659ca78dc0086fb88b66c185e2449/pytest_xdist-3.6.1-py3-none-any.whl", hash = "sha256:9ed4adfb68a016610848639bb7e02c9352d5d9f03d04809919e2dafc3be4cca7", size = 46108 }, +] + +[[package]] +name = "python-dotenv" +version = "1.0.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/bc/57/e84d88dfe0aec03b7a2d4327012c1627ab5f03652216c63d49846d7a6c58/python-dotenv-1.0.1.tar.gz", hash = "sha256:e324ee90a023d808f1959c46bcbc04446a10ced277783dc6ee09987c37ec10ca", size = 39115 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/6a/3e/b68c118422ec867fa7ab88444e1274aa40681c606d59ac27de5a5588f082/python_dotenv-1.0.1-py3-none-any.whl", hash = "sha256:f7b63ef50f1b690dddf550d03497b66d609393b40b564ed0d674909a68ebf16a", size = 19863 }, +] + +[[package]] +name = "pywin32" +version = "308" +source = { registry = "https://pypi.org/simple" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/00/7c/d00d6bdd96de4344e06c4afbf218bc86b54436a94c01c71a8701f613aa56/pywin32-308-cp312-cp312-win32.whl", hash = "sha256:587f3e19696f4bf96fde9d8a57cec74a57021ad5f204c9e627e15c33ff568897", size = 5939729 }, + { url = "https://files.pythonhosted.org/packages/21/27/0c8811fbc3ca188f93b5354e7c286eb91f80a53afa4e11007ef661afa746/pywin32-308-cp312-cp312-win_amd64.whl", hash = "sha256:00b3e11ef09ede56c6a43c71f2d31857cf7c54b0ab6e78ac659497abd2834f47", size = 6543015 }, + { url = "https://files.pythonhosted.org/packages/9d/0f/d40f8373608caed2255781a3ad9a51d03a594a1248cd632d6a298daca693/pywin32-308-cp312-cp312-win_arm64.whl", hash = "sha256:9b4de86c8d909aed15b7011182c8cab38c8850de36e6afb1f0db22b8959e3091", size = 7976033 }, + { url = "https://files.pythonhosted.org/packages/a9/a4/aa562d8935e3df5e49c161b427a3a2efad2ed4e9cf81c3de636f1fdddfd0/pywin32-308-cp313-cp313-win32.whl", hash = "sha256:1c44539a37a5b7b21d02ab34e6a4d314e0788f1690d65b48e9b0b89f31abbbed", size = 5938579 }, + { url = "https://files.pythonhosted.org/packages/c7/50/b0efb8bb66210da67a53ab95fd7a98826a97ee21f1d22949863e6d588b22/pywin32-308-cp313-cp313-win_amd64.whl", hash = "sha256:fd380990e792eaf6827fcb7e187b2b4b1cede0585e3d0c9e84201ec27b9905e4", size = 6542056 }, + { url = "https://files.pythonhosted.org/packages/26/df/2b63e3e4f2df0224f8aaf6d131f54fe4e8c96400eb9df563e2aae2e1a1f9/pywin32-308-cp313-cp313-win_arm64.whl", hash = "sha256:ef313c46d4c18dfb82a2431e3051ac8f112ccee1a34f29c263c583c568db63cd", size = 7974986 }, +] + +[[package]] +name = "pyyaml" +version = "6.0.2" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/54/ed/79a089b6be93607fa5cdaedf301d7dfb23af5f25c398d5ead2525b063e17/pyyaml-6.0.2.tar.gz", hash = "sha256:d584d9ec91ad65861cc08d42e834324ef890a082e591037abe114850ff7bbc3e", size = 130631 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/86/0c/c581167fc46d6d6d7ddcfb8c843a4de25bdd27e4466938109ca68492292c/PyYAML-6.0.2-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:c70c95198c015b85feafc136515252a261a84561b7b1d51e3384e0655ddf25ab", size = 183873 }, + { url = "https://files.pythonhosted.org/packages/a8/0c/38374f5bb272c051e2a69281d71cba6fdb983413e6758b84482905e29a5d/PyYAML-6.0.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:ce826d6ef20b1bc864f0a68340c8b3287705cae2f8b4b1d932177dcc76721725", size = 173302 }, + { url = "https://files.pythonhosted.org/packages/c3/93/9916574aa8c00aa06bbac729972eb1071d002b8e158bd0e83a3b9a20a1f7/PyYAML-6.0.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1f71ea527786de97d1a0cc0eacd1defc0985dcf6b3f17bb77dcfc8c34bec4dc5", size = 739154 }, + { url = "https://files.pythonhosted.org/packages/95/0f/b8938f1cbd09739c6da569d172531567dbcc9789e0029aa070856f123984/PyYAML-6.0.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9b22676e8097e9e22e36d6b7bda33190d0d400f345f23d4065d48f4ca7ae0425", size = 766223 }, + { url = "https://files.pythonhosted.org/packages/b9/2b/614b4752f2e127db5cc206abc23a8c19678e92b23c3db30fc86ab731d3bd/PyYAML-6.0.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:80bab7bfc629882493af4aa31a4cfa43a4c57c83813253626916b8c7ada83476", size = 767542 }, + { url = "https://files.pythonhosted.org/packages/d4/00/dd137d5bcc7efea1836d6264f049359861cf548469d18da90cd8216cf05f/PyYAML-6.0.2-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:0833f8694549e586547b576dcfaba4a6b55b9e96098b36cdc7ebefe667dfed48", size = 731164 }, + { url = "https://files.pythonhosted.org/packages/c9/1f/4f998c900485e5c0ef43838363ba4a9723ac0ad73a9dc42068b12aaba4e4/PyYAML-6.0.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:8b9c7197f7cb2738065c481a0461e50ad02f18c78cd75775628afb4d7137fb3b", size = 756611 }, + { url = "https://files.pythonhosted.org/packages/df/d1/f5a275fdb252768b7a11ec63585bc38d0e87c9e05668a139fea92b80634c/PyYAML-6.0.2-cp312-cp312-win32.whl", hash = "sha256:ef6107725bd54b262d6dedcc2af448a266975032bc85ef0172c5f059da6325b4", size = 140591 }, + { url = "https://files.pythonhosted.org/packages/0c/e8/4f648c598b17c3d06e8753d7d13d57542b30d56e6c2dedf9c331ae56312e/PyYAML-6.0.2-cp312-cp312-win_amd64.whl", hash = "sha256:7e7401d0de89a9a855c839bc697c079a4af81cf878373abd7dc625847d25cbd8", size = 156338 }, + { url = "https://files.pythonhosted.org/packages/ef/e3/3af305b830494fa85d95f6d95ef7fa73f2ee1cc8ef5b495c7c3269fb835f/PyYAML-6.0.2-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:efdca5630322a10774e8e98e1af481aad470dd62c3170801852d752aa7a783ba", size = 181309 }, + { url = "https://files.pythonhosted.org/packages/45/9f/3b1c20a0b7a3200524eb0076cc027a970d320bd3a6592873c85c92a08731/PyYAML-6.0.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:50187695423ffe49e2deacb8cd10510bc361faac997de9efef88badc3bb9e2d1", size = 171679 }, + { url = "https://files.pythonhosted.org/packages/7c/9a/337322f27005c33bcb656c655fa78325b730324c78620e8328ae28b64d0c/PyYAML-6.0.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0ffe8360bab4910ef1b9e87fb812d8bc0a308b0d0eef8c8f44e0254ab3b07133", size = 733428 }, + { url = "https://files.pythonhosted.org/packages/a3/69/864fbe19e6c18ea3cc196cbe5d392175b4cf3d5d0ac1403ec3f2d237ebb5/PyYAML-6.0.2-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:17e311b6c678207928d649faa7cb0d7b4c26a0ba73d41e99c4fff6b6c3276484", size = 763361 }, + { url = "https://files.pythonhosted.org/packages/04/24/b7721e4845c2f162d26f50521b825fb061bc0a5afcf9a386840f23ea19fa/PyYAML-6.0.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:70b189594dbe54f75ab3a1acec5f1e3faa7e8cf2f1e08d9b561cb41b845f69d5", size = 759523 }, + { url = "https://files.pythonhosted.org/packages/2b/b2/e3234f59ba06559c6ff63c4e10baea10e5e7df868092bf9ab40e5b9c56b6/PyYAML-6.0.2-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:41e4e3953a79407c794916fa277a82531dd93aad34e29c2a514c2c0c5fe971cc", size = 726660 }, + { url = "https://files.pythonhosted.org/packages/fe/0f/25911a9f080464c59fab9027482f822b86bf0608957a5fcc6eaac85aa515/PyYAML-6.0.2-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:68ccc6023a3400877818152ad9a1033e3db8625d899c72eacb5a668902e4d652", size = 751597 }, + { url = "https://files.pythonhosted.org/packages/14/0d/e2c3b43bbce3cf6bd97c840b46088a3031085179e596d4929729d8d68270/PyYAML-6.0.2-cp313-cp313-win32.whl", hash = "sha256:bc2fa7c6b47d6bc618dd7fb02ef6fdedb1090ec036abab80d4681424b84c1183", size = 140527 }, + { url = "https://files.pythonhosted.org/packages/fa/de/02b54f42487e3d3c6efb3f89428677074ca7bf43aae402517bc7cca949f3/PyYAML-6.0.2-cp313-cp313-win_amd64.whl", hash = "sha256:8388ee1976c416731879ac16da0aff3f63b286ffdd57cdeb95f3f2e085687563", size = 156446 }, +] + +[[package]] +name = "requests" +version = "2.32.3" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "certifi" }, + { name = "charset-normalizer" }, + { name = "idna" }, + { name = "urllib3" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/63/70/2bf7780ad2d390a8d301ad0b550f1581eadbd9a20f896afe06353c2a2913/requests-2.32.3.tar.gz", hash = "sha256:55365417734eb18255590a9ff9eb97e9e1da868d4ccd6402399eaf68af20a760", size = 131218 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/f9/9b/335f9764261e915ed497fcdeb11df5dfd6f7bf257d4a6a2a686d80da4d54/requests-2.32.3-py3-none-any.whl", hash = "sha256:70761cfe03c773ceb22aa2f671b4757976145175cdfca038c02654d061d6dcc6", size = 64928 }, +] + +[[package]] +name = "ruff" +version = "0.9.3" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/1e/7f/60fda2eec81f23f8aa7cbbfdf6ec2ca11eb11c273827933fb2541c2ce9d8/ruff-0.9.3.tar.gz", hash = "sha256:8293f89985a090ebc3ed1064df31f3b4b56320cdfcec8b60d3295bddb955c22a", size = 3586740 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/f9/77/4fb790596d5d52c87fd55b7160c557c400e90f6116a56d82d76e95d9374a/ruff-0.9.3-py3-none-linux_armv6l.whl", hash = "sha256:7f39b879064c7d9670197d91124a75d118d00b0990586549949aae80cdc16624", size = 11656815 }, + { url = "https://files.pythonhosted.org/packages/a2/a8/3338ecb97573eafe74505f28431df3842c1933c5f8eae615427c1de32858/ruff-0.9.3-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:a187171e7c09efa4b4cc30ee5d0d55a8d6c5311b3e1b74ac5cb96cc89bafc43c", size = 11594821 }, + { url = "https://files.pythonhosted.org/packages/8e/89/320223c3421962762531a6b2dd58579b858ca9916fb2674874df5e97d628/ruff-0.9.3-py3-none-macosx_11_0_arm64.whl", hash = "sha256:c59ab92f8e92d6725b7ded9d4a31be3ef42688a115c6d3da9457a5bda140e2b4", size = 11040475 }, + { url = "https://files.pythonhosted.org/packages/b2/bd/1d775eac5e51409535804a3a888a9623e87a8f4b53e2491580858a083692/ruff-0.9.3-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2dc153c25e715be41bb228bc651c1e9b1a88d5c6e5ed0194fa0dfea02b026439", size = 11856207 }, + { url = "https://files.pythonhosted.org/packages/7f/c6/3e14e09be29587393d188454064a4aa85174910d16644051a80444e4fd88/ruff-0.9.3-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:646909a1e25e0dc28fbc529eab8eb7bb583079628e8cbe738192853dbbe43af5", size = 11420460 }, + { url = "https://files.pythonhosted.org/packages/ef/42/b7ca38ffd568ae9b128a2fa76353e9a9a3c80ef19746408d4ce99217ecc1/ruff-0.9.3-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:5a5a46e09355695fbdbb30ed9889d6cf1c61b77b700a9fafc21b41f097bfbba4", size = 12605472 }, + { url = "https://files.pythonhosted.org/packages/a6/a1/3167023f23e3530fde899497ccfe239e4523854cb874458ac082992d206c/ruff-0.9.3-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:c4bb09d2bbb394e3730d0918c00276e79b2de70ec2a5231cd4ebb51a57df9ba1", size = 13243123 }, + { url = "https://files.pythonhosted.org/packages/d0/b4/3c600758e320f5bf7de16858502e849f4216cb0151f819fa0d1154874802/ruff-0.9.3-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:96a87ec31dc1044d8c2da2ebbed1c456d9b561e7d087734336518181b26b3aa5", size = 12744650 }, + { url = "https://files.pythonhosted.org/packages/be/38/266fbcbb3d0088862c9bafa8b1b99486691d2945a90b9a7316336a0d9a1b/ruff-0.9.3-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9bb7554aca6f842645022fe2d301c264e6925baa708b392867b7a62645304df4", size = 14458585 }, + { url = "https://files.pythonhosted.org/packages/63/a6/47fd0e96990ee9b7a4abda62de26d291bd3f7647218d05b7d6d38af47c30/ruff-0.9.3-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cabc332b7075a914ecea912cd1f3d4370489c8018f2c945a30bcc934e3bc06a6", size = 12419624 }, + { url = "https://files.pythonhosted.org/packages/84/5d/de0b7652e09f7dda49e1a3825a164a65f4998175b6486603c7601279baad/ruff-0.9.3-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:33866c3cc2a575cbd546f2cd02bdd466fed65118e4365ee538a3deffd6fcb730", size = 11843238 }, + { url = "https://files.pythonhosted.org/packages/9e/be/3f341ceb1c62b565ec1fb6fd2139cc40b60ae6eff4b6fb8f94b1bb37c7a9/ruff-0.9.3-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:006e5de2621304c8810bcd2ee101587712fa93b4f955ed0985907a36c427e0c2", size = 11484012 }, + { url = "https://files.pythonhosted.org/packages/a3/c8/ff8acbd33addc7e797e702cf00bfde352ab469723720c5607b964491d5cf/ruff-0.9.3-py3-none-musllinux_1_2_i686.whl", hash = "sha256:ba6eea4459dbd6b1be4e6bfc766079fb9b8dd2e5a35aff6baee4d9b1514ea519", size = 12038494 }, + { url = "https://files.pythonhosted.org/packages/73/b1/8d9a2c0efbbabe848b55f877bc10c5001a37ab10aca13c711431673414e5/ruff-0.9.3-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:90230a6b8055ad47d3325e9ee8f8a9ae7e273078a66401ac66df68943ced029b", size = 12473639 }, + { url = "https://files.pythonhosted.org/packages/cb/44/a673647105b1ba6da9824a928634fe23186ab19f9d526d7bdf278cd27bc3/ruff-0.9.3-py3-none-win32.whl", hash = "sha256:eabe5eb2c19a42f4808c03b82bd313fc84d4e395133fb3fc1b1516170a31213c", size = 9834353 }, + { url = "https://files.pythonhosted.org/packages/c3/01/65cadb59bf8d4fbe33d1a750103e6883d9ef302f60c28b73b773092fbde5/ruff-0.9.3-py3-none-win_amd64.whl", hash = "sha256:040ceb7f20791dfa0e78b4230ee9dce23da3b64dd5848e40e3bf3ab76468dcf4", size = 10821444 }, + { url = "https://files.pythonhosted.org/packages/69/cb/b3fe58a136a27d981911cba2f18e4b29f15010623b79f0f2510fd0d31fd3/ruff-0.9.3-py3-none-win_arm64.whl", hash = "sha256:800d773f6d4d33b0a3c60e2c6ae8f4c202ea2de056365acfa519aa48acf28e0b", size = 10038168 }, +] + +[[package]] +name = "stack-data" +version = "0.6.3" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "asttokens" }, + { name = "executing" }, + { name = "pure-eval" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/28/e3/55dcc2cfbc3ca9c29519eb6884dd1415ecb53b0e934862d3559ddcb7e20b/stack_data-0.6.3.tar.gz", hash = "sha256:836a778de4fec4dcd1dcd89ed8abff8a221f58308462e1c4aa2a3cf30148f0b9", size = 44707 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/f1/7b/ce1eafaf1a76852e2ec9b22edecf1daa58175c090266e9f6c64afcd81d91/stack_data-0.6.3-py3-none-any.whl", hash = "sha256:d5558e0c25a4cb0853cddad3d77da9891a08cb85dd9f9f91b9f8cd66e511e695", size = 24521 }, +] + +[[package]] +name = "traitlets" +version = "5.14.3" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/eb/79/72064e6a701c2183016abbbfedaba506d81e30e232a68c9f0d6f6fcd1574/traitlets-5.14.3.tar.gz", hash = "sha256:9ed0579d3502c94b4b3732ac120375cda96f923114522847de4b3bb98b96b6b7", size = 161621 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/00/c0/8f5d070730d7836adc9c9b6408dec68c6ced86b304a9b26a14df072a6e8c/traitlets-5.14.3-py3-none-any.whl", hash = "sha256:b74e89e397b1ed28cc831db7aea759ba6640cb3de13090ca145426688ff1ac4f", size = 85359 }, +] + +[[package]] +name = "trustme" +version = "1.2.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "cryptography" }, + { name = "idna" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/4b/c5/931476f4cf1cd9e736f32651005078061a50dc164a2569fb874e00eb2786/trustme-1.2.1.tar.gz", hash = "sha256:6528ba2bbc7f2db41f33825c8dd13e3e3eb9d334ba0f909713c8c3139f4ae47f", size = 26844 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/b5/f3/c34dbabf6da5eda56fe923226769d40e11806952cd7f46655dd06e10f018/trustme-1.2.1-py3-none-any.whl", hash = "sha256:d768e5fc57c86dfc5ec9365102e9b092541cd6954b35d8c1eea01a84f35a762a", size = 16530 }, +] + +[[package]] +name = "urllib3" +version = "2.3.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/aa/63/e53da845320b757bf29ef6a9062f5c669fe997973f966045cb019c3f4b66/urllib3-2.3.0.tar.gz", hash = "sha256:f8c5449b3cf0861679ce7e0503c7b44b5ec981bec0d1d3795a07f1ba96f0204d", size = 307268 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/c8/19/4ec628951a74043532ca2cf5d97b7b14863931476d117c471e8e2b1eb39f/urllib3-2.3.0-py3-none-any.whl", hash = "sha256:1cee9ad369867bfdbbb48b7dd50374c0967a0bb7710050facf0dd6911440e3df", size = 128369 }, +] + +[[package]] +name = "wcwidth" +version = "0.2.13" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/6c/63/53559446a878410fc5a5974feb13d31d78d752eb18aeba59c7fef1af7598/wcwidth-0.2.13.tar.gz", hash = "sha256:72ea0c06399eb286d978fdedb6923a9eb47e1c486ce63e9b4e64fc18303972b5", size = 101301 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/fd/84/fd2ba7aafacbad3c4201d395674fc6348826569da3c0937e75505ead3528/wcwidth-0.2.13-py2.py3-none-any.whl", hash = "sha256:3da69048e4540d84af32131829ff948f1e022c1c6bdb8d6102117aac784f6859", size = 34166 }, +] From 5178a919c8c8e99550d43b454d6c005f7cc6262c Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Wed, 29 Jan 2025 14:10:49 +0100 Subject: [PATCH 412/581] CI: setup go from version in go.mod (#3432) this should fix a caching issue with github --- .github/workflows/bats-hub.yml | 2 +- .github/workflows/bats-mysql.yml | 2 +- .github/workflows/bats-postgres.yml | 2 +- .github/workflows/bats-sqlite-coverage.yml | 2 +- .github/workflows/ci-windows-build-msi.yml | 2 +- .github/workflows/codeql-analysis.yml | 2 +- .github/workflows/go-tests-windows.yml | 2 +- .github/workflows/go-tests.yml | 2 +- .github/workflows/publish-tarball-release.yml | 2 +- 9 files changed, 9 insertions(+), 9 deletions(-) diff --git a/.github/workflows/bats-hub.yml b/.github/workflows/bats-hub.yml index e631c3ebc71..42f1252c8b9 100644 --- a/.github/workflows/bats-hub.yml +++ b/.github/workflows/bats-hub.yml @@ -33,7 +33,7 @@ jobs: - name: "Set up Go" uses: actions/setup-go@v5 with: - go-version: "1.23" + go-version-file: go.mod - name: "Install bats dependencies" env: diff --git a/.github/workflows/bats-mysql.yml b/.github/workflows/bats-mysql.yml index a94e28b1f97..394b85427fe 100644 --- a/.github/workflows/bats-mysql.yml +++ b/.github/workflows/bats-mysql.yml @@ -36,7 +36,7 @@ jobs: - name: "Set up Go" uses: actions/setup-go@v5 with: - go-version: "1.23" + go-version-file: go.mod - name: "Install bats dependencies" env: diff --git a/.github/workflows/bats-postgres.yml b/.github/workflows/bats-postgres.yml index a1054463341..25c302da787 100644 --- a/.github/workflows/bats-postgres.yml +++ b/.github/workflows/bats-postgres.yml @@ -45,7 +45,7 @@ jobs: - name: "Set up Go" uses: actions/setup-go@v5 with: - go-version: "1.23" + go-version-file: go.mod - name: "Install bats dependencies" env: diff --git a/.github/workflows/bats-sqlite-coverage.yml b/.github/workflows/bats-sqlite-coverage.yml index ac685bf4e87..a5b2758b6b0 100644 --- a/.github/workflows/bats-sqlite-coverage.yml +++ b/.github/workflows/bats-sqlite-coverage.yml @@ -31,7 +31,7 @@ jobs: - name: "Set up Go" uses: actions/setup-go@v5 with: - go-version: "1.23" + go-version-file: go.mod - name: "Install bats dependencies" env: diff --git a/.github/workflows/ci-windows-build-msi.yml b/.github/workflows/ci-windows-build-msi.yml index 07e29071e05..5f26b0fccbf 100644 --- a/.github/workflows/ci-windows-build-msi.yml +++ b/.github/workflows/ci-windows-build-msi.yml @@ -35,7 +35,7 @@ jobs: - name: "Set up Go" uses: actions/setup-go@v5 with: - go-version: "1.23" + go-version-file: go.mod - name: Build run: make windows_installer BUILD_RE2_WASM=1 diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml index 4128cb435f9..cd37c7afaa9 100644 --- a/.github/workflows/codeql-analysis.yml +++ b/.github/workflows/codeql-analysis.yml @@ -52,7 +52,7 @@ jobs: - name: "Set up Go" uses: actions/setup-go@v5 with: - go-version: "1.23" + go-version-file: go.mod cache-dependency-path: "**/go.sum" # Initializes the CodeQL tools for scanning. diff --git a/.github/workflows/go-tests-windows.yml b/.github/workflows/go-tests-windows.yml index 44abbbe24a3..68cb9715b18 100644 --- a/.github/workflows/go-tests-windows.yml +++ b/.github/workflows/go-tests-windows.yml @@ -34,7 +34,7 @@ jobs: - name: "Set up Go" uses: actions/setup-go@v5 with: - go-version: "1.23" + go-version-file: go.mod - name: Build run: | diff --git a/.github/workflows/go-tests.yml b/.github/workflows/go-tests.yml index 649c47ebd26..462c5ac39d2 100644 --- a/.github/workflows/go-tests.yml +++ b/.github/workflows/go-tests.yml @@ -125,7 +125,7 @@ jobs: - name: "Set up Go" uses: actions/setup-go@v5 with: - go-version: "1.23" + go-version-file: go.mod - name: Run "go generate" and check for changes run: | diff --git a/.github/workflows/publish-tarball-release.yml b/.github/workflows/publish-tarball-release.yml index 6a41c3fba53..18541f86e41 100644 --- a/.github/workflows/publish-tarball-release.yml +++ b/.github/workflows/publish-tarball-release.yml @@ -25,7 +25,7 @@ jobs: - name: "Set up Go" uses: actions/setup-go@v5 with: - go-version: "1.23" + go-version-file: go.mod - name: Build the binaries run: | From 172d6c6dc6a5a5484fa1de0938ab2be56ae7dd44 Mon Sep 17 00:00:00 2001 From: Zakhar Bessarab Date: Wed, 29 Jan 2025 19:59:03 +0400 Subject: [PATCH 413/581] acquisition/victorialogs: add new datasource (#3310) * acquisition/victorialogs: add new datasource Data source supports: - cat mode with automatic adjustment of poll interval (same as one at Loki datasource) - tail mode by using tailing API --- .github/workflows/go-tests.yml | 11 + .golangci.yml | 6 + Makefile | 1 + .../victorialogs/internal/vlclient/types.go | 12 + .../internal/vlclient/vl_client.go | 405 +++++++++++++++ .../modules/victorialogs/victorialogs.go | 369 ++++++++++++++ .../modules/victorialogs/victorialogs_test.go | 479 ++++++++++++++++++ pkg/acquisition/victorialogs.go | 12 + pkg/cwversion/component/component.go | 29 +- 9 files changed, 1310 insertions(+), 14 deletions(-) create mode 100644 pkg/acquisition/modules/victorialogs/internal/vlclient/types.go create mode 100644 pkg/acquisition/modules/victorialogs/internal/vlclient/vl_client.go create mode 100644 pkg/acquisition/modules/victorialogs/victorialogs.go create mode 100644 pkg/acquisition/modules/victorialogs/victorialogs_test.go create mode 100644 pkg/acquisition/victorialogs.go diff --git a/.github/workflows/go-tests.yml b/.github/workflows/go-tests.yml index 462c5ac39d2..5a8148c473e 100644 --- a/.github/workflows/go-tests.yml +++ b/.github/workflows/go-tests.yml @@ -114,6 +114,17 @@ jobs: --health-retries 5 --health-start-period 30s + victorialogs: + image: victoriametrics/victoria-logs:v1.5.0-victorialogs + ports: + - "9428:9428" + options: >- + --name=victorialogs1 + --health-cmd "wget -q -O - http://0.0.0.0:9428" + --health-interval 30s + --health-timeout 10s + --health-retries 5 + --health-start-period 30s steps: - name: Check out CrowdSec repository diff --git a/.golangci.yml b/.golangci.yml index fe77aec2d3c..b3be5adb687 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -97,6 +97,7 @@ linters-settings: - "!**/pkg/acquisition/modules/kubernetesaudit/k8s_audit.go" - "!**/pkg/acquisition/modules/loki/loki.go" - "!**/pkg/acquisition/modules/loki/timestamp_test.go" + - "!**/pkg/acquisition/modules/victorialogs/victorialogs.go" - "!**/pkg/acquisition/modules/s3/s3.go" - "!**/pkg/acquisition/modules/syslog/syslog.go" - "!**/pkg/acquisition/modules/wineventlog/wineventlog_windows.go" @@ -398,6 +399,11 @@ issues: path: pkg/acquisition/modules/loki/internal/lokiclient/loki_client.go text: "confusing-naming: Method 'QueryRange' differs only by capitalization to method 'queryRange' in the same source file" + - linters: + - revive + path: pkg/acquisition/modules/victorialogs/internal/vlclient/vl_client.go + text: "confusing-naming: Method 'QueryRange' differs only by capitalization to method 'queryRange' in the same source file" + - linters: - revive path: cmd/crowdsec-cli/copyfile.go diff --git a/Makefile b/Makefile index f8ae66e1cb6..93387488001 100644 --- a/Makefile +++ b/Makefile @@ -138,6 +138,7 @@ COMPONENTS := \ datasource_journalctl \ datasource_kinesis \ datasource_loki \ + datasource_victorialogs \ datasource_s3 \ datasource_syslog \ datasource_wineventlog \ diff --git a/pkg/acquisition/modules/victorialogs/internal/vlclient/types.go b/pkg/acquisition/modules/victorialogs/internal/vlclient/types.go new file mode 100644 index 00000000000..167a84e41b1 --- /dev/null +++ b/pkg/acquisition/modules/victorialogs/internal/vlclient/types.go @@ -0,0 +1,12 @@ +package vlclient + +import ( + "time" +) + +// Log represents a VictoriaLogs log line +// See: https://docs.victoriametrics.com/victorialogs/querying/#querying-logs +type Log struct { + Message string `json:"_msg"` + Time time.Time `json:"_time"` +} diff --git a/pkg/acquisition/modules/victorialogs/internal/vlclient/vl_client.go b/pkg/acquisition/modules/victorialogs/internal/vlclient/vl_client.go new file mode 100644 index 00000000000..402754a1307 --- /dev/null +++ b/pkg/acquisition/modules/victorialogs/internal/vlclient/vl_client.go @@ -0,0 +1,405 @@ +package vlclient + +import ( + "bufio" + "bytes" + "context" + "encoding/base64" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "time" + + log "github.com/sirupsen/logrus" + "gopkg.in/tomb.v2" + + "github.com/crowdsecurity/crowdsec/pkg/apiclient/useragent" +) + +type VLClient struct { + Logger *log.Entry + + config Config + t *tomb.Tomb + failStart time.Time + currentTickerInterval time.Duration + requestHeaders map[string]string + + client *http.Client +} + +type Config struct { + URL string + Prefix string + Query string + Headers map[string]string + + Username string + Password string + + Since time.Duration + + FailMaxDuration time.Duration + + Limit int +} + +func updateURI(uri string, newStart time.Time) string { + u, _ := url.Parse(uri) + queryParams := u.Query() + + if !newStart.IsZero() { + // +1 the last timestamp to avoid getting the same result again. + updatedStart := newStart.Add(1 * time.Nanosecond) + queryParams.Set("start", updatedStart.Format(time.RFC3339Nano)) + } + + u.RawQuery = queryParams.Encode() + + return u.String() +} + +func (lc *VLClient) SetTomb(t *tomb.Tomb) { + lc.t = t +} + +func (lc *VLClient) shouldRetry() bool { + if lc.failStart.IsZero() { + lc.Logger.Warningf("VictoriaLogs is not available, will retry for %s", lc.config.FailMaxDuration) + lc.failStart = time.Now() + + return true + } + + if time.Since(lc.failStart) > lc.config.FailMaxDuration { + lc.Logger.Errorf("VictoriaLogs didn't manage to recover after %s, giving up", lc.config.FailMaxDuration) + return false + } + + return true +} + +func (lc *VLClient) increaseTicker(ticker *time.Ticker) { + maxTicker := 10 * time.Second + if lc.currentTickerInterval < maxTicker { + lc.currentTickerInterval *= 2 + if lc.currentTickerInterval > maxTicker { + lc.currentTickerInterval = maxTicker + } + + ticker.Reset(lc.currentTickerInterval) + } +} + +func (lc *VLClient) decreaseTicker(ticker *time.Ticker) { + minTicker := 100 * time.Millisecond + if lc.currentTickerInterval != minTicker { + lc.currentTickerInterval = minTicker + ticker.Reset(lc.currentTickerInterval) + } +} + +func (lc *VLClient) doQueryRange(ctx context.Context, uri string, c chan *Log, infinite bool) error { + lc.currentTickerInterval = 100 * time.Millisecond + ticker := time.NewTicker(lc.currentTickerInterval) + + defer ticker.Stop() + + for { + select { + case <-ctx.Done(): + return ctx.Err() + case <-lc.t.Dying(): + return lc.t.Err() + case <-ticker.C: + resp, err := lc.Get(ctx, uri) + if err != nil { + if ok := lc.shouldRetry(); !ok { + return fmt.Errorf("error querying range: %w", err) + } + + lc.increaseTicker(ticker) + + continue + } + + if resp.StatusCode != http.StatusOK { + lc.Logger.Warnf("bad HTTP response code for query range: %d", resp.StatusCode) + body, _ := io.ReadAll(resp.Body) + resp.Body.Close() + + if ok := lc.shouldRetry(); !ok { + return fmt.Errorf("bad HTTP response code: %d: %s: %w", resp.StatusCode, string(body), err) + } + + lc.increaseTicker(ticker) + + continue + } + + n, largestTime, err := lc.readResponse(ctx, resp, c) + if err != nil { + return err + } + + if !infinite && n < lc.config.Limit { + lc.Logger.Infof("Got less than %d results (%d), stopping", lc.config.Limit, n) + close(c) + + return nil + } + + lc.Logger.Debugf("(timer:%v) %d results (uri:%s)", lc.currentTickerInterval, n, uri) + + if infinite { + if n > 0 { + // as long as we get results, we keep lowest ticker + lc.decreaseTicker(ticker) + } else { + lc.increaseTicker(ticker) + } + } + + uri = updateURI(uri, largestTime) + } + } +} + +// Parses response from body in JSON-LD format and sends results to the channel +func (lc *VLClient) readResponse(ctx context.Context, resp *http.Response, c chan *Log) (int, time.Time, error) { + br := bufio.NewReaderSize(resp.Body, 64*1024) + + var ( + finishedReading bool + n int + latestTs time.Time + ) + + for !finishedReading { + select { + case <-ctx.Done(): + return n, latestTs, nil + default: + } + + b, err := br.ReadBytes('\n') + if err != nil { + if errors.Is(err, bufio.ErrBufferFull) { + lc.Logger.Infof("skipping line number #%d: line too long", n) + continue + } + + if errors.Is(err, io.EOF) { + // b can be != nil when EOF is returned, so we need to process it + finishedReading = true + } else if errors.Is(err, context.Canceled) { + return n, latestTs, nil + } else { + return n, latestTs, fmt.Errorf("cannot read line in response: %w", err) + } + } + + if len(b) == 0 { + continue + } + + b = bytes.Trim(b, "\n") + + var logLine Log + + if err := json.Unmarshal(b, &logLine); err != nil { + lc.Logger.Warnf("cannot unmarshal line in response: %s", string(b)) + continue + } + + n++ + + lc.Logger.Tracef("Got response: %+v", logLine) + c <- &logLine + + if logLine.Time.After(latestTs) { + latestTs = logLine.Time + } + } + + return n, latestTs, nil +} + +func (lc *VLClient) getURLFor(endpoint string, params map[string]string) string { + u, err := url.Parse(lc.config.URL) + if err != nil { + return "" + } + + queryParams := u.Query() + + for k, v := range params { + queryParams.Set(k, v) + } + + u.RawQuery = queryParams.Encode() + + u.Path, err = url.JoinPath(lc.config.Prefix, u.Path, endpoint) + if err != nil { + return "" + } + + return u.String() +} + +func (lc *VLClient) Ready(ctx context.Context) error { + tick := time.NewTicker(500 * time.Millisecond) + u := lc.getURLFor("", nil) + + for { + select { + case <-ctx.Done(): + tick.Stop() + return ctx.Err() + case <-lc.t.Dying(): + tick.Stop() + return lc.t.Err() + case <-tick.C: + lc.Logger.Debug("Checking if VictoriaLogs is ready") + + resp, err := lc.Get(ctx, u) + if err != nil { + lc.Logger.Warnf("Error checking if VictoriaLogs is ready: %s", err) + continue + } + + _ = resp.Body.Close() + + if resp.StatusCode != http.StatusOK { + lc.Logger.Debugf("VictoriaLogs is not ready, status code: %d", resp.StatusCode) + continue + } + + lc.Logger.Info("VictoriaLogs is ready") + + return nil + } + } +} + +// Tail live-tailing for logs +// See: https://docs.victoriametrics.com/victorialogs/querying/#live-tailing +func (lc *VLClient) Tail(ctx context.Context) (chan *Log, error) { + t := time.Now().Add(-1 * lc.config.Since) + u := lc.getURLFor("select/logsql/tail", map[string]string{ + "limit": strconv.Itoa(lc.config.Limit), + "start": t.Format(time.RFC3339Nano), + "query": lc.config.Query, + }) + + lc.Logger.Debugf("Since: %s (%s)", lc.config.Since, t) + lc.Logger.Infof("Connecting to %s", u) + + var ( + resp *http.Response + err error + ) + + for { + resp, err = lc.Get(ctx, u) + lc.Logger.Tracef("Tail request done: %v | %s", resp, err) + + if err != nil { + if errors.Is(err, context.Canceled) { + return nil, nil + } + + if ok := lc.shouldRetry(); !ok { + return nil, fmt.Errorf("error tailing logs: %w", err) + } + + continue + } + + break + } + + if resp.StatusCode != http.StatusOK { + lc.Logger.Warnf("bad HTTP response code for tail request: %d", resp.StatusCode) + body, _ := io.ReadAll(resp.Body) + resp.Body.Close() + + if ok := lc.shouldRetry(); !ok { + return nil, fmt.Errorf("bad HTTP response code: %d: %s: %w", resp.StatusCode, string(body), err) + } + } + + responseChan := make(chan *Log) + + lc.t.Go(func() error { + _, _, err = lc.readResponse(ctx, resp, responseChan) + if err != nil { + return fmt.Errorf("error while reading tail response: %w", err) + } + + return nil + }) + + return responseChan, nil +} + +// QueryRange queries the logs +// See: https://docs.victoriametrics.com/victorialogs/querying/#querying-logs +func (lc *VLClient) QueryRange(ctx context.Context, infinite bool) chan *Log { + t := time.Now().Add(-1 * lc.config.Since) + u := lc.getURLFor("select/logsql/query", map[string]string{ + "query": lc.config.Query, + "start": t.Format(time.RFC3339Nano), + "limit": strconv.Itoa(lc.config.Limit), + }) + + c := make(chan *Log) + + lc.Logger.Debugf("Since: %s (%s)", lc.config.Since, t) + + lc.Logger.Infof("Connecting to %s", u) + lc.t.Go(func() error { + return lc.doQueryRange(ctx, u, c, infinite) + }) + + return c +} + +func (lc *VLClient) Get(ctx context.Context, url string) (*http.Response, error) { + request, err := http.NewRequestWithContext(ctx, http.MethodGet, url, nil) + if err != nil { + return nil, err + } + + for k, v := range lc.requestHeaders { + request.Header.Add(k, v) + } + + lc.Logger.Debugf("GET %s", url) + + return lc.client.Do(request) +} + +func NewVLClient(config Config) *VLClient { + headers := make(map[string]string) + for k, v := range config.Headers { + headers[k] = v + } + + if config.Username != "" || config.Password != "" { + headers["Authorization"] = "Basic " + base64.StdEncoding.EncodeToString([]byte(config.Username+":"+config.Password)) + } + + headers["User-Agent"] = useragent.Default() + + return &VLClient{ + Logger: log.WithField("component", "victorialogs-client"), + config: config, + requestHeaders: headers, + client: &http.Client{}, + } +} diff --git a/pkg/acquisition/modules/victorialogs/victorialogs.go b/pkg/acquisition/modules/victorialogs/victorialogs.go new file mode 100644 index 00000000000..c6bb3b320ba --- /dev/null +++ b/pkg/acquisition/modules/victorialogs/victorialogs.go @@ -0,0 +1,369 @@ +package victorialogs + +import ( + "context" + "errors" + "fmt" + "net/url" + "strconv" + "strings" + "time" + + "github.com/prometheus/client_golang/prometheus" + log "github.com/sirupsen/logrus" + "gopkg.in/tomb.v2" + "gopkg.in/yaml.v2" + + "github.com/crowdsecurity/crowdsec/pkg/acquisition/configuration" + "github.com/crowdsecurity/crowdsec/pkg/acquisition/modules/victorialogs/internal/vlclient" + "github.com/crowdsecurity/crowdsec/pkg/types" +) + +const ( + defaultLimit int = 100 +) + +var linesRead = prometheus.NewCounterVec( + prometheus.CounterOpts{ + Name: "cs_victorialogssource_hits_total", + Help: "Total lines that were read.", + }, + []string{"source"}) + +type VLAuthConfiguration struct { + Username string `yaml:"username"` + Password string `yaml:"password"` +} + +type VLConfiguration struct { + URL string `yaml:"url"` // VictoriaLogs url + Prefix string `yaml:"prefix"` // VictoriaLogs prefix + Query string `yaml:"query"` // LogsQL query + Limit int `yaml:"limit"` // Limit of logs to read + Since time.Duration `yaml:"since"` + Headers map[string]string `yaml:"headers"` // HTTP headers for talking to VictoriaLogs + WaitForReady time.Duration `yaml:"wait_for_ready"` // Retry interval, default is 10 seconds + Auth VLAuthConfiguration `yaml:"auth"` + MaxFailureDuration time.Duration `yaml:"max_failure_duration"` // Max duration of failure before stopping the source + configuration.DataSourceCommonCfg `yaml:",inline"` +} + +type VLSource struct { + metricsLevel int + Config VLConfiguration + + Client *vlclient.VLClient + + logger *log.Entry +} + +func (l *VLSource) GetMetrics() []prometheus.Collector { + return []prometheus.Collector{linesRead} +} + +func (l *VLSource) GetAggregMetrics() []prometheus.Collector { + return []prometheus.Collector{linesRead} +} + +func (l *VLSource) UnmarshalConfig(yamlConfig []byte) error { + err := yaml.UnmarshalStrict(yamlConfig, &l.Config) + if err != nil { + return fmt.Errorf("cannot parse VictoriaLogs acquisition configuration: %w", err) + } + + if l.Config.Query == "" { + return errors.New("VictoriaLogs query is mandatory") + } + + if l.Config.WaitForReady == 0 { + l.Config.WaitForReady = 10 * time.Second + } + + if l.Config.Mode == "" { + l.Config.Mode = configuration.TAIL_MODE + } + if l.Config.Prefix == "" { + l.Config.Prefix = "/" + } + + if !strings.HasSuffix(l.Config.Prefix, "/") { + l.Config.Prefix += "/" + } + + if l.Config.Limit == 0 { + l.Config.Limit = defaultLimit + } + + if l.Config.Mode == configuration.TAIL_MODE { + l.logger.Infof("Resetting since") + l.Config.Since = 0 + } + + if l.Config.MaxFailureDuration == 0 { + l.Config.MaxFailureDuration = 30 * time.Second + } + + return nil +} + +func (l *VLSource) Configure(config []byte, logger *log.Entry, metricsLevel int) error { + l.Config = VLConfiguration{} + l.logger = logger + l.metricsLevel = metricsLevel + err := l.UnmarshalConfig(config) + if err != nil { + return err + } + + l.logger.Infof("Since value: %s", l.Config.Since.String()) + + clientConfig := vlclient.Config{ + URL: l.Config.URL, + Headers: l.Config.Headers, + Limit: l.Config.Limit, + Query: l.Config.Query, + Since: l.Config.Since, + Username: l.Config.Auth.Username, + Password: l.Config.Auth.Password, + FailMaxDuration: l.Config.MaxFailureDuration, + } + + l.Client = vlclient.NewVLClient(clientConfig) + l.Client.Logger = logger.WithFields(log.Fields{"component": "victorialogs-client", "source": l.Config.URL}) + return nil +} + +func (l *VLSource) ConfigureByDSN(dsn string, labels map[string]string, logger *log.Entry, uuid string) error { + l.logger = logger + l.Config = VLConfiguration{} + l.Config.Mode = configuration.CAT_MODE + l.Config.Labels = labels + l.Config.UniqueId = uuid + + u, err := url.Parse(dsn) + if err != nil { + return fmt.Errorf("while parsing dsn '%s': %w", dsn, err) + } + if u.Scheme != "victorialogs" { + return fmt.Errorf("invalid DSN %s for VictoriaLogs source, must start with victorialogs://", dsn) + } + if u.Host == "" { + return errors.New("empty host") + } + scheme := "http" + + params := u.Query() + if q := params.Get("ssl"); q != "" { + scheme = "https" + } + if q := params.Get("query"); q != "" { + l.Config.Query = q + } + if w := params.Get("wait_for_ready"); w != "" { + l.Config.WaitForReady, err = time.ParseDuration(w) + if err != nil { + return err + } + } else { + l.Config.WaitForReady = 10 * time.Second + } + + if s := params.Get("since"); s != "" { + l.Config.Since, err = time.ParseDuration(s) + if err != nil { + return fmt.Errorf("invalid since in dsn: %w", err) + } + } + + if maxFailureDuration := params.Get("max_failure_duration"); maxFailureDuration != "" { + duration, err := time.ParseDuration(maxFailureDuration) + if err != nil { + return fmt.Errorf("invalid max_failure_duration in dsn: %w", err) + } + l.Config.MaxFailureDuration = duration + } else { + l.Config.MaxFailureDuration = 5 * time.Second // for OneShot mode it doesn't make sense to have longer duration + } + + if limit := params.Get("limit"); limit != "" { + limit, err := strconv.Atoi(limit) + if err != nil { + return fmt.Errorf("invalid limit in dsn: %w", err) + } + l.Config.Limit = limit + } + + if logLevel := params.Get("log_level"); logLevel != "" { + level, err := log.ParseLevel(logLevel) + if err != nil { + return fmt.Errorf("invalid log_level in dsn: %w", err) + } + l.Config.LogLevel = &level + l.logger.Logger.SetLevel(level) + } + + l.Config.URL = fmt.Sprintf("%s://%s", scheme, u.Host) + if u.User != nil { + l.Config.Auth.Username = u.User.Username() + l.Config.Auth.Password, _ = u.User.Password() + } + + clientConfig := vlclient.Config{ + URL: l.Config.URL, + Headers: l.Config.Headers, + Limit: l.Config.Limit, + Query: l.Config.Query, + Since: l.Config.Since, + Username: l.Config.Auth.Username, + Password: l.Config.Auth.Password, + } + + l.Client = vlclient.NewVLClient(clientConfig) + l.Client.Logger = logger.WithFields(log.Fields{"component": "victorialogs-client", "source": l.Config.URL}) + + return nil +} + +func (l *VLSource) GetMode() string { + return l.Config.Mode +} + +func (l *VLSource) GetName() string { + return "victorialogs" +} + +// OneShotAcquisition reads a set of file and returns when done +func (l *VLSource) OneShotAcquisition(ctx context.Context, out chan types.Event, t *tomb.Tomb) error { + l.logger.Debug("VictoriaLogs one shot acquisition") + l.Client.SetTomb(t) + readyCtx, cancel := context.WithTimeout(ctx, l.Config.WaitForReady) + defer cancel() + err := l.Client.Ready(readyCtx) + if err != nil { + return fmt.Errorf("VictoriaLogs is not ready: %w", err) + } + + ctx, cancel = context.WithCancel(ctx) + defer cancel() + + respChan, err := l.getResponseChan(ctx, false) + if err != nil { + return fmt.Errorf("error when starting acquisition: %w", err) + } + + for { + select { + case <-t.Dying(): + l.logger.Debug("VictoriaLogs one shot acquisition stopped") + return nil + case resp, ok := <-respChan: + if !ok { + l.logger.Info("VictoriaLogs acquisition completed") + return nil + } + l.readOneEntry(resp, l.Config.Labels, out) + } + } +} + +func (l *VLSource) readOneEntry(entry *vlclient.Log, labels map[string]string, out chan types.Event) { + ll := types.Line{} + ll.Raw = entry.Message + ll.Time = entry.Time + ll.Src = l.Config.URL + ll.Labels = labels + ll.Process = true + ll.Module = l.GetName() + + if l.metricsLevel != configuration.METRICS_NONE { + linesRead.With(prometheus.Labels{"source": l.Config.URL}).Inc() + } + expectMode := types.LIVE + if l.Config.UseTimeMachine { + expectMode = types.TIMEMACHINE + } + out <- types.Event{ + Line: ll, + Process: true, + Type: types.LOG, + ExpectMode: expectMode, + } +} + +func (l *VLSource) StreamingAcquisition(ctx context.Context, out chan types.Event, t *tomb.Tomb) error { + l.Client.SetTomb(t) + readyCtx, cancel := context.WithTimeout(ctx, l.Config.WaitForReady) + defer cancel() + err := l.Client.Ready(readyCtx) + if err != nil { + return fmt.Errorf("VictoriaLogs is not ready: %w", err) + } + + lctx, clientCancel := context.WithCancel(ctx) + //Don't defer clientCancel(), the client outlives this function call + + t.Go(func() error { + <-t.Dying() + clientCancel() + return nil + }) + + t.Go(func() error { + respChan, err := l.getResponseChan(lctx, true) + if err != nil { + clientCancel() + l.logger.Errorf("could not start VictoriaLogs tail: %s", err) + return fmt.Errorf("while starting VictoriaLogs tail: %w", err) + } + for { + select { + case resp, ok := <-respChan: + if !ok { + l.logger.Warnf("VictoriaLogs channel closed") + clientCancel() + return err + } + l.readOneEntry(resp, l.Config.Labels, out) + case <-t.Dying(): + clientCancel() + return nil + } + } + }) + return nil +} + +func (l *VLSource) getResponseChan(ctx context.Context, infinite bool) (chan *vlclient.Log, error) { + var ( + respChan chan *vlclient.Log + err error + ) + + if l.Config.Mode == configuration.TAIL_MODE { + respChan, err = l.Client.Tail(ctx) + if err != nil { + l.logger.Errorf("could not start VictoriaLogs tail: %s", err) + return respChan, fmt.Errorf("while starting VictoriaLogs tail: %w", err) + } + } else { + respChan = l.Client.QueryRange(ctx, infinite) + } + return respChan, err +} + +func (l *VLSource) CanRun() error { + return nil +} + +func (l *VLSource) GetUuid() string { + return l.Config.UniqueId +} + +func (l *VLSource) Dump() interface{} { + return l +} + +// SupportedModes returns the supported modes by the acquisition module +func (l *VLSource) SupportedModes() []string { + return []string{configuration.TAIL_MODE, configuration.CAT_MODE} +} diff --git a/pkg/acquisition/modules/victorialogs/victorialogs_test.go b/pkg/acquisition/modules/victorialogs/victorialogs_test.go new file mode 100644 index 00000000000..182b009c414 --- /dev/null +++ b/pkg/acquisition/modules/victorialogs/victorialogs_test.go @@ -0,0 +1,479 @@ +package victorialogs_test + +import ( + "bytes" + "context" + "fmt" + "io" + "math/rand" + "net/http" + "net/url" + "os" + "runtime" + "strconv" + "strings" + "testing" + "time" + + log "github.com/sirupsen/logrus" + "github.com/stretchr/testify/assert" + "gopkg.in/tomb.v2" + + "github.com/crowdsecurity/go-cs-lib/cstest" + + "github.com/crowdsecurity/crowdsec/pkg/acquisition/configuration" + "github.com/crowdsecurity/crowdsec/pkg/acquisition/modules/victorialogs" + "github.com/crowdsecurity/crowdsec/pkg/types" +) + +func TestConfiguration(t *testing.T) { + log.Infof("Test 'TestConfigure'") + + tests := []struct { + config string + expectedErr string + password string + waitForReady time.Duration + testName string + }{ + { + config: `foobar: asd`, + expectedErr: "line 1: field foobar not found in type victorialogs.VLConfiguration", + testName: "Unknown field", + }, + { + config: ` +mode: tail +source: victorialogs`, + expectedErr: "query is mandatory", + testName: "Missing url", + }, + { + config: ` +mode: tail +source: victorialogs +url: http://localhost:9428/ +`, + expectedErr: "query is mandatory", + testName: "Missing query", + }, + { + config: ` +mode: tail +source: victorialogs +url: http://localhost:9428/ +query: > + {server="demo"} +`, + expectedErr: "", + testName: "Correct config", + }, + { + config: ` +mode: tail +source: victorialogs +url: http://localhost:9428/ +wait_for_ready: 5s +query: > + {server="demo"} +`, + expectedErr: "", + testName: "Correct config with wait_for_ready", + waitForReady: 5 * time.Second, + }, + { + config: ` +mode: tail +source: victorialogs +url: http://localhost:9428/ +auth: + username: foo + password: bar +query: > + {server="demo"} +`, + expectedErr: "", + password: "bar", + testName: "Correct config with password", + }, + } + subLogger := log.WithField("type", "victorialogs") + + for _, test := range tests { + t.Run(test.testName, func(t *testing.T) { + vlSource := victorialogs.VLSource{} + err := vlSource.Configure([]byte(test.config), subLogger, configuration.METRICS_NONE) + cstest.AssertErrorContains(t, err, test.expectedErr) + + if test.password != "" { + p := vlSource.Config.Auth.Password + if test.password != p { + t.Fatalf("Password mismatch : %s != %s", test.password, p) + } + } + + if test.waitForReady != 0 { + if vlSource.Config.WaitForReady != test.waitForReady { + t.Fatalf("Wrong WaitForReady %v != %v", vlSource.Config.WaitForReady, test.waitForReady) + } + } + }) + } +} + +func TestConfigureDSN(t *testing.T) { + log.Infof("Test 'TestConfigureDSN'") + + tests := []struct { + name string + dsn string + expectedErr string + since time.Time + password string + scheme string + waitForReady time.Duration + }{ + { + name: "Wrong scheme", + dsn: "wrong://", + expectedErr: "invalid DSN wrong:// for VictoriaLogs source, must start with victorialogs://", + }, + { + name: "Correct DSN", + dsn: `victorialogs://localhost:9428/?query={server="demo"}`, + expectedErr: "", + }, + { + name: "Empty host", + dsn: "victorialogs://", + expectedErr: "empty host", + }, + { + name: "Invalid DSN", + dsn: "victorialogs", + expectedErr: "invalid DSN victorialogs for VictoriaLogs source, must start with victorialogs://", + }, + { + name: "Bad since param", + dsn: `victorialogs://127.0.0.1:9428/?since=3h&query={server="demo"}`, + since: time.Now().Add(-3 * time.Hour), + }, + { + name: "Basic Auth", + dsn: `victorialogs://login:password@localhost:3102/?query={server="demo"}`, + password: "password", + }, + { + name: "Correct DSN", + dsn: `victorialogs://localhost:9428/?query={server="demo"}&wait_for_ready=5s`, + expectedErr: "", + waitForReady: 5 * time.Second, + }, + { + name: "SSL DSN", + dsn: `victorialogs://localhost:9428/?ssl=true`, + scheme: "https", + }, + } + + for _, test := range tests { + subLogger := log.WithFields(log.Fields{ + "type": "victorialogs", + "name": test.name, + }) + + t.Logf("Test : %s", test.name) + + vlSource := &victorialogs.VLSource{} + err := vlSource.ConfigureByDSN(test.dsn, map[string]string{"type": "testtype"}, subLogger, "") + cstest.AssertErrorContains(t, err, test.expectedErr) + + noDuration, _ := time.ParseDuration("0s") + if vlSource.Config.Since != noDuration && vlSource.Config.Since.Round(time.Second) != time.Since(test.since).Round(time.Second) { + t.Fatalf("Invalid since %v", vlSource.Config.Since) + } + + if test.password != "" { + p := vlSource.Config.Auth.Password + if test.password != p { + t.Fatalf("Password mismatch : %s != %s", test.password, p) + } + } + + if test.scheme != "" { + url, _ := url.Parse(vlSource.Config.URL) + if test.scheme != url.Scheme { + t.Fatalf("Schema mismatch : %s != %s", test.scheme, url.Scheme) + } + } + + if test.waitForReady != 0 { + if vlSource.Config.WaitForReady != test.waitForReady { + t.Fatalf("Wrong WaitForReady %v != %v", vlSource.Config.WaitForReady, test.waitForReady) + } + } + } +} + +// Ingestion format docs: https://docs.victoriametrics.com/victorialogs/data-ingestion/#json-stream-api +func feedVLogs(ctx context.Context, logger *log.Entry, n int, title string) error { + bb := bytes.NewBuffer(nil) + for i := range n { + fmt.Fprintf(bb, + `{ "_time": %q,"_msg":"Log line #%d %v", "server": "demo", "key": %q} +`, time.Now().Format(time.RFC3339), i, title, title) + } + + req, err := http.NewRequestWithContext(ctx, http.MethodPost, "http://127.0.0.1:9428/insert/jsonline?_stream_fields=server,key", bb) + if err != nil { + return err + } + + req.Header.Set("Content-Type", "application/json") + + resp, err := http.DefaultClient.Do(req) + if err != nil { + return err + } + + defer resp.Body.Close() + + if resp.StatusCode != http.StatusOK { + b, _ := io.ReadAll(resp.Body) + logger.Error(string(b)) + + return fmt.Errorf("Bad post status %d", resp.StatusCode) + } + + logger.Info(n, " Events sent") + // VictoriaLogs buffers data before saving to disk + // Default flush deadline is 2s, waiting 3s to be safe + time.Sleep(3 * time.Second) + + return nil +} + +func TestOneShotAcquisition(t *testing.T) { + ctx := context.Background() + + if runtime.GOOS == "windows" { + t.Skip("Skipping test on windows") + } + + log.SetOutput(os.Stdout) + log.SetLevel(log.InfoLevel) + log.Info("Test 'TestStreamingAcquisition'") + + key := strconv.Itoa(rand.Intn(1000)) + tests := []struct { + config string + }{ + { + config: fmt.Sprintf(` +mode: cat +source: victorialogs +url: http://127.0.0.1:9428 +query: > + {server=demo, key=%q} +since: 1h +`, key), + }, + } + + for _, ts := range tests { + logger := log.New() + subLogger := logger.WithField("type", "victorialogs") + vlSource := victorialogs.VLSource{} + + err := vlSource.Configure([]byte(ts.config), subLogger, configuration.METRICS_NONE) + if err != nil { + t.Fatalf("Unexpected error : %s", err) + } + + err = feedVLogs(ctx, subLogger, 20, key) + if err != nil { + t.Fatalf("Unexpected error : %s", err) + } + + out := make(chan types.Event) + read := 0 + + go func() { + for { + <-out + + read++ + } + }() + + vlTomb := tomb.Tomb{} + + err = vlSource.OneShotAcquisition(ctx, out, &vlTomb) + if err != nil { + t.Fatalf("Unexpected error : %s", err) + } + + // Some logs might be buffered + assert.Greater(t, read, 10) + } +} + +func TestStreamingAcquisition(t *testing.T) { + if runtime.GOOS == "windows" { + t.Skip("Skipping test on windows") + } + + log.SetOutput(os.Stdout) + log.SetLevel(log.InfoLevel) + log.Info("Test 'TestStreamingAcquisition'") + + title := time.Now().String() + tests := []struct { + name string + config string + expectedErr string + streamErr string + expectedLines int + }{ + { + name: "Bad port", + config: `mode: tail +source: victorialogs +url: "http://127.0.0.1:9429" +query: > + server:"demo"`, // Wrong port + expectedErr: "", + streamErr: `VictoriaLogs is not ready`, + expectedLines: 0, + }, + { + name: "ok", + config: `mode: tail +source: victorialogs +url: "http://127.0.0.1:9428" +query: > + server:"demo"`, + expectedErr: "", + streamErr: "", + expectedLines: 20, + }, + } + + ctx := context.Background() + + for _, ts := range tests { + t.Run(ts.name, func(t *testing.T) { + logger := log.New() + subLogger := logger.WithFields(log.Fields{ + "type": "victorialogs", + "name": ts.name, + }) + + out := make(chan types.Event) + vlTomb := tomb.Tomb{} + vlSource := victorialogs.VLSource{} + + err := vlSource.Configure([]byte(ts.config), subLogger, configuration.METRICS_NONE) + if err != nil { + t.Fatalf("Unexpected error : %s", err) + } + + err = vlSource.StreamingAcquisition(ctx, out, &vlTomb) + cstest.AssertErrorContains(t, err, ts.streamErr) + + if ts.streamErr != "" { + return + } + + time.Sleep(time.Second * 2) // We need to give time to start reading from the WS + + readTomb := tomb.Tomb{} + readCtx, cancel := context.WithTimeout(ctx, time.Second*10) + count := 0 + + readTomb.Go(func() error { + defer cancel() + + for { + select { + case <-readCtx.Done(): + return readCtx.Err() + case evt := <-out: + count++ + + if !strings.HasSuffix(evt.Line.Raw, title) { + return fmt.Errorf("Incorrect suffix : %s", evt.Line.Raw) + } + + if count == ts.expectedLines { + return nil + } + } + } + }) + + err = feedVLogs(ctx, subLogger, ts.expectedLines, title) + if err != nil { + t.Fatalf("Unexpected error : %s", err) + } + + err = readTomb.Wait() + + cancel() + + if err != nil { + t.Fatalf("Unexpected error : %s", err) + } + + assert.Equal(t, ts.expectedLines, count) + }) + } +} + +func TestStopStreaming(t *testing.T) { + ctx := context.Background() + + if runtime.GOOS == "windows" { + t.Skip("Skipping test on windows") + } + + config := ` +mode: tail +source: victorialogs +url: http://127.0.0.1:9428 +query: > + server:"demo" +` + logger := log.New() + subLogger := logger.WithField("type", "victorialogs") + title := time.Now().String() + vlSource := victorialogs.VLSource{} + + err := vlSource.Configure([]byte(config), subLogger, configuration.METRICS_NONE) + if err != nil { + t.Fatalf("Unexpected error : %s", err) + } + + out := make(chan types.Event, 10) + + vlTomb := &tomb.Tomb{} + + err = vlSource.StreamingAcquisition(ctx, out, vlTomb) + if err != nil { + t.Fatalf("Unexpected error : %s", err) + } + + time.Sleep(time.Second * 2) + + err = feedVLogs(ctx, subLogger, 1, title) + if err != nil { + t.Fatalf("Unexpected error : %s", err) + } + + vlTomb.Kill(nil) + + err = vlTomb.Wait() + if err != nil { + t.Fatalf("Unexpected error : %s", err) + } +} diff --git a/pkg/acquisition/victorialogs.go b/pkg/acquisition/victorialogs.go new file mode 100644 index 00000000000..b097f0c8dfc --- /dev/null +++ b/pkg/acquisition/victorialogs.go @@ -0,0 +1,12 @@ +//go:build !no_datasource_victorialogs + +package acquisition + +import ( + "github.com/crowdsecurity/crowdsec/pkg/acquisition/modules/victorialogs" +) + +//nolint:gochecknoinits +func init() { + registerDataSource("victorialogs", func() DataSource { return &victorialogs.VLSource{} }) +} diff --git a/pkg/cwversion/component/component.go b/pkg/cwversion/component/component.go index 7ed596525e0..2c6374e4bb7 100644 --- a/pkg/cwversion/component/component.go +++ b/pkg/cwversion/component/component.go @@ -8,20 +8,21 @@ package component // Built is a map of all the known components, and whether they are built-in or not. // This is populated as soon as possible by the respective init() functions var Built = map[string]bool{ - "datasource_appsec": false, - "datasource_cloudwatch": false, - "datasource_docker": false, - "datasource_file": false, - "datasource_journalctl": false, - "datasource_k8s-audit": false, - "datasource_kafka": false, - "datasource_kinesis": false, - "datasource_loki": false, - "datasource_s3": false, - "datasource_syslog": false, - "datasource_wineventlog": false, - "datasource_http": false, - "cscli_setup": false, + "datasource_appsec": false, + "datasource_cloudwatch": false, + "datasource_docker": false, + "datasource_file": false, + "datasource_journalctl": false, + "datasource_k8s-audit": false, + "datasource_kafka": false, + "datasource_kinesis": false, + "datasource_loki": false, + "datasource_s3": false, + "datasource_syslog": false, + "datasource_wineventlog": false, + "datasource_victorialogs": false, + "datasource_http": false, + "cscli_setup": false, } func Register(name string) { From 5260cf16cc1eb0a2b3fec29a72ebfe4bae3c055b Mon Sep 17 00:00:00 2001 From: Gilbert Gilb's Date: Thu, 30 Jan 2025 17:14:06 +0100 Subject: [PATCH 414/581] fix parsing of noncompliant RFC3339 timestamps missing only a timezone (#3346) --- pkg/parser/enrich_date.go | 1 + pkg/parser/enrich_date_test.go | 32 ++++++++++++++++++++++++++++++++ 2 files changed, 33 insertions(+) diff --git a/pkg/parser/enrich_date.go b/pkg/parser/enrich_date.go index 40c8de39da5..0a4bc51b862 100644 --- a/pkg/parser/enrich_date.go +++ b/pkg/parser/enrich_date.go @@ -44,6 +44,7 @@ func GenDateParse(date string) (string, time.Time) { "2006-01-02 15:04", "2006/01/02 15:04:05", "2006-01-02 15:04:05", + "2006-01-02T15:04:05", } ) diff --git a/pkg/parser/enrich_date_test.go b/pkg/parser/enrich_date_test.go index 930633feb35..13e106f3049 100644 --- a/pkg/parser/enrich_date_test.go +++ b/pkg/parser/enrich_date_test.go @@ -40,6 +40,38 @@ func TestDateParse(t *testing.T) { }, expected: "2011-12-17T08:17:43Z", }, + { + name: "ISO 8601, no timezone", + evt: types.Event{ + StrTime: "2024-11-26T20:13:32", + StrTimeFormat: "", + }, + expected: "2024-11-26T20:13:32Z", + }, + { + name: "ISO 8601, no timezone, milliseconds", + evt: types.Event{ + StrTime: "2024-11-26T20:13:32.123", + StrTimeFormat: "", + }, + expected: "2024-11-26T20:13:32.123Z", + }, + { + name: "ISO 8601, no timezone, microseconds", + evt: types.Event{ + StrTime: "2024-11-26T20:13:32.123456", + StrTimeFormat: "", + }, + expected: "2024-11-26T20:13:32.123456Z", + }, + { + name: "ISO 8601, no timezone, nanoseconds", + evt: types.Event{ + StrTime: "2024-11-26T20:13:32.123456789", + StrTimeFormat: "", + }, + expected: "2024-11-26T20:13:32.123456789Z", + }, } logger := log.WithField("test", "test") From 6827f065faf5143a78512b5ca51cd0275d6f0473 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Thu, 30 Jan 2025 19:19:57 +0100 Subject: [PATCH 415/581] bucket: avoid crashing on malformed expression (fix #3351) (#3368) --- pkg/leakybucket/blackhole.go | 1 - pkg/leakybucket/manager_load.go | 16 ++++++++++++++++ pkg/leakybucket/manager_load_test.go | 14 ++++++++++++++ pkg/leakybucket/uniq.go | 10 +++++----- 4 files changed, 35 insertions(+), 6 deletions(-) diff --git a/pkg/leakybucket/blackhole.go b/pkg/leakybucket/blackhole.go index bda2e7c9ed1..95ea18f723b 100644 --- a/pkg/leakybucket/blackhole.go +++ b/pkg/leakybucket/blackhole.go @@ -21,7 +21,6 @@ type Blackhole struct { func NewBlackhole(bucketFactory *BucketFactory) (*Blackhole, error) { duration, err := time.ParseDuration(bucketFactory.Blackhole) if err != nil { - bucketFactory.logger.Warning("Blackhole duration not valid, using 1h") return nil, fmt.Errorf("blackhole duration not valid '%s'", bucketFactory.Blackhole) } return &Blackhole{ diff --git a/pkg/leakybucket/manager_load.go b/pkg/leakybucket/manager_load.go index 6e601bb2ec1..13ce1df75ae 100644 --- a/pkg/leakybucket/manager_load.go +++ b/pkg/leakybucket/manager_load.go @@ -405,11 +405,22 @@ func LoadBucket(bucketFactory *BucketFactory, tomb *tomb.Tomb) error { if bucketFactory.Distinct != "" { bucketFactory.logger.Tracef("Adding a non duplicate filter") bucketFactory.processors = append(bucketFactory.processors, &Uniq{}) + bucketFactory.logger.Infof("Compiling distinct '%s'", bucketFactory.Distinct) + //we're compiling and discarding the expression to be able to detect it during loading + _, err = expr.Compile(bucketFactory.Distinct, exprhelpers.GetExprOptions(map[string]interface{}{"evt": &types.Event{}})...) + if err != nil { + return fmt.Errorf("invalid distinct '%s' in %s: %w", bucketFactory.Distinct, bucketFactory.Filename, err) + } } if bucketFactory.CancelOnFilter != "" { bucketFactory.logger.Tracef("Adding a cancel_on filter") bucketFactory.processors = append(bucketFactory.processors, &CancelOnFilter{}) + //we're compiling and discarding the expression to be able to detect it during loading + _, err = expr.Compile(bucketFactory.CancelOnFilter, exprhelpers.GetExprOptions(map[string]interface{}{"evt": &types.Event{}})...) + if err != nil { + return fmt.Errorf("invalid cancel_on '%s' in %s: %w", bucketFactory.CancelOnFilter, bucketFactory.Filename, err) + } } if bucketFactory.OverflowFilter != "" { @@ -439,6 +450,11 @@ func LoadBucket(bucketFactory *BucketFactory, tomb *tomb.Tomb) error { if bucketFactory.ConditionalOverflow != "" { bucketFactory.logger.Tracef("Adding conditional overflow") bucketFactory.processors = append(bucketFactory.processors, &ConditionalOverflow{}) + //we're compiling and discarding the expression to be able to detect it during loading + _, err = expr.Compile(bucketFactory.ConditionalOverflow, exprhelpers.GetExprOptions(map[string]interface{}{"queue": &types.Queue{}, "leaky": &Leaky{}, "evt": &types.Event{}})...) + if err != nil { + return fmt.Errorf("invalid condition '%s' in %s: %w", bucketFactory.ConditionalOverflow, bucketFactory.Filename, err) + } } if bucketFactory.BayesianThreshold != 0 { diff --git a/pkg/leakybucket/manager_load_test.go b/pkg/leakybucket/manager_load_test.go index 9d207da164e..6b40deb8c9e 100644 --- a/pkg/leakybucket/manager_load_test.go +++ b/pkg/leakybucket/manager_load_test.go @@ -64,10 +64,24 @@ func TestLeakyBucketsConfig(t *testing.T) { {BucketFactory{Name: "test", Description: "test1", Type: "leaky", Capacity: 1, LeakSpeed: "1s", Filter: "true"}, true, true}, // leaky with invalid filter {BucketFactory{Name: "test", Description: "test1", Type: "leaky", Capacity: 1, LeakSpeed: "1s", Filter: "xu"}, false, true}, + // leaky with invalid uniq + {BucketFactory{Name: "test", Description: "test1", Type: "leaky", Capacity: 1, LeakSpeed: "1s", Filter: "true", Distinct: "foo"}, false, true}, + // leaky with valid uniq + {BucketFactory{Name: "test", Description: "test1", Type: "leaky", Capacity: 1, LeakSpeed: "1s", Filter: "true", Distinct: "evt.Parsed.foobar"}, true, true}, // leaky with valid filter {BucketFactory{Name: "test", Description: "test1", Type: "leaky", Capacity: 1, LeakSpeed: "1s", Filter: "true"}, true, true}, // leaky with bad overflow filter {BucketFactory{Name: "test", Description: "test1", Type: "leaky", Capacity: 1, LeakSpeed: "1s", Filter: "true", OverflowFilter: "xu"}, false, true}, + // leaky with valid overflow filter + {BucketFactory{Name: "test", Description: "test1", Type: "leaky", Capacity: 1, LeakSpeed: "1s", Filter: "true", OverflowFilter: "true"}, true, true}, + // leaky with invalid cancel_on filter + {BucketFactory{Name: "test", Description: "test1", Type: "leaky", Capacity: 1, LeakSpeed: "1s", Filter: "true", CancelOnFilter: "xu"}, false, true}, + // leaky with valid cancel_on filter + {BucketFactory{Name: "test", Description: "test1", Type: "leaky", Capacity: 1, LeakSpeed: "1s", Filter: "true", CancelOnFilter: "true"}, true, true}, + // leaky with invalid conditional overflow filter + {BucketFactory{Name: "test", Description: "test1", Type: "leaky", Capacity: 1, LeakSpeed: "1s", Filter: "true", ConditionalOverflow: "xu"}, false, true}, + // leaky with valid conditional overflow filter + {BucketFactory{Name: "test", Description: "test1", Type: "leaky", Capacity: 1, LeakSpeed: "1s", Filter: "true", ConditionalOverflow: "true"}, true, true}, } if err := runTest(CfgTests); err != nil { diff --git a/pkg/leakybucket/uniq.go b/pkg/leakybucket/uniq.go index 3a4683ae309..8a97f30b092 100644 --- a/pkg/leakybucket/uniq.go +++ b/pkg/leakybucket/uniq.go @@ -60,9 +60,6 @@ func (u *Uniq) AfterBucketPour(bucketFactory *BucketFactory) func(types.Event, * } func (u *Uniq) OnBucketInit(bucketFactory *BucketFactory) error { - var err error - var compiledExpr *vm.Program - if uniqExprCache == nil { uniqExprCache = make(map[string]vm.Program) } @@ -74,14 +71,17 @@ func (u *Uniq) OnBucketInit(bucketFactory *BucketFactory) error { } else { uniqExprCacheLock.Unlock() //release the lock during compile - compiledExpr, err = expr.Compile(bucketFactory.Distinct, exprhelpers.GetExprOptions(map[string]interface{}{"evt": &types.Event{}})...) + compiledExpr, err := expr.Compile(bucketFactory.Distinct, exprhelpers.GetExprOptions(map[string]interface{}{"evt": &types.Event{}})...) + if err != nil { + return err + } u.DistinctCompiled = compiledExpr uniqExprCacheLock.Lock() uniqExprCache[bucketFactory.Distinct] = *compiledExpr uniqExprCacheLock.Unlock() } u.KeyCache = make(map[string]bool) - return err + return nil } // getElement computes a string from an event and a filter From 763959fb68c101ce25e72835e5beaae3c7a82ebc Mon Sep 17 00:00:00 2001 From: blotus Date: Fri, 31 Jan 2025 10:12:19 +0100 Subject: [PATCH 416/581] ignore zero value variables for context (#3436) --- pkg/alertcontext/alertcontext.go | 5 +++ pkg/alertcontext/alertcontext_test.go | 46 +++++++++++++++++++++++++++ 2 files changed, 51 insertions(+) diff --git a/pkg/alertcontext/alertcontext.go b/pkg/alertcontext/alertcontext.go index 0afcb2abd3f..0b38336a698 100644 --- a/pkg/alertcontext/alertcontext.go +++ b/pkg/alertcontext/alertcontext.go @@ -4,6 +4,7 @@ import ( "encoding/json" "fmt" "net/http" + "reflect" "slices" "strconv" @@ -202,6 +203,10 @@ func EvalAlertContextRules(evt types.Event, match *types.MatchedRule, request *h } } default: + r := reflect.ValueOf(output) + if r.IsZero() || r.IsNil() { + continue + } val := fmt.Sprintf("%v", output) if val != "" && !slices.Contains(tmpContext[key], val) { tmpContext[key] = append(tmpContext[key], val) diff --git a/pkg/alertcontext/alertcontext_test.go b/pkg/alertcontext/alertcontext_test.go index b1572edd76b..9d9373bcd36 100644 --- a/pkg/alertcontext/alertcontext_test.go +++ b/pkg/alertcontext/alertcontext_test.go @@ -363,3 +363,49 @@ func TestAppsecEventToContext(t *testing.T) { assert.ElementsMatch(t, test.expectedResult, metas) } } + +func TestEvalAlertContextRules(t *testing.T) { + tests := []struct { + name string + contextToSend map[string][]string + event types.Event + match types.MatchedRule + req *http.Request + expectedResult map[string][]string + expectedErrLen int + }{ + { + name: "no appsec match", + contextToSend: map[string][]string{ + "source_ip": {"evt.Parsed.source_ip"}, + "id": {"match.id"}, + }, + event: types.Event{ + Parsed: map[string]string{ + "source_ip": "1.2.3.4", + "source_machine": "mymachine", + "uri": "/test/test/test/../../../../../../../../", + }, + }, + expectedResult: map[string][]string{ + "source_ip": {"1.2.3.4"}, + "id": {}, + }, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + contextDict := make(map[string][]string) + + alertContext = Context{} + if err := NewAlertContext(test.contextToSend, 100); err != nil { + t.Fatalf("failed to compile %s: %s", test.name, err) + } + + errs := EvalAlertContextRules(test.event, &test.match, test.req, contextDict) + assert.Len(t, errs, test.expectedErrLen) + assert.Equal(t, test.expectedResult, contextDict) + }) + } +} From bfed861ba7c99e9ea4be4a9e7e1d9748c180a224 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Fri, 31 Jan 2025 11:15:28 +0100 Subject: [PATCH 417/581] don't ask user to reload systemd service when running in docker (#3434) * don't ask user to reload systemd service when running in docker * refactor + give appropriate message if terminal is attached * remove explicit filetype --- Dockerfile | 1 - Dockerfile.debian | 1 - cmd/crowdsec-cli/clicapi/capi.go | 4 +++- cmd/crowdsec-cli/cliconsole/console.go | 8 +++++-- cmd/crowdsec-cli/clihub/hub.go | 4 ++-- cmd/crowdsec-cli/cliitem/cmdinstall.go | 4 ++-- cmd/crowdsec-cli/cliitem/cmdremove.go | 4 ++-- cmd/crowdsec-cli/cliitem/cmdupgrade.go | 4 ++-- cmd/crowdsec-cli/clilapi/register.go | 4 +++- cmd/crowdsec-cli/clisimulation/simulation.go | 4 ++-- cmd/crowdsec-cli/reload/message.go | 6 +++++ .../{reload_freebsd.go => message_freebsd.go} | 2 +- .../{reload_linux.go => message_linux.go} | 2 +- cmd/crowdsec-cli/reload/message_windows.go | 3 +++ cmd/crowdsec-cli/reload/reload.go | 22 +++++++++++++++---- cmd/crowdsec-cli/reload/reload_windows.go | 3 --- 16 files changed, 51 insertions(+), 25 deletions(-) create mode 100644 cmd/crowdsec-cli/reload/message.go rename cmd/crowdsec-cli/reload/{reload_freebsd.go => message_freebsd.go} (64%) rename cmd/crowdsec-cli/reload/{reload_linux.go => message_linux.go} (62%) create mode 100644 cmd/crowdsec-cli/reload/message_windows.go delete mode 100644 cmd/crowdsec-cli/reload/reload_windows.go diff --git a/Dockerfile b/Dockerfile index ee6d54abb02..d368f0f6ede 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,4 +1,3 @@ -# vim: set ft=dockerfile: FROM docker.io/golang:1.23-alpine3.20 AS build ARG BUILD_VERSION diff --git a/Dockerfile.debian b/Dockerfile.debian index f37ba02a7c2..a9b58c633ed 100644 --- a/Dockerfile.debian +++ b/Dockerfile.debian @@ -1,4 +1,3 @@ -# vim: set ft=dockerfile: FROM docker.io/golang:1.23-bookworm AS build ARG BUILD_VERSION diff --git a/cmd/crowdsec-cli/clicapi/capi.go b/cmd/crowdsec-cli/clicapi/capi.go index 120acca8b59..14637a26e1a 100644 --- a/cmd/crowdsec-cli/clicapi/capi.go +++ b/cmd/crowdsec-cli/clicapi/capi.go @@ -123,7 +123,9 @@ func (cli *cliCapi) register(ctx context.Context, capiUserPrefix string, outputF fmt.Println(string(apiConfigDump)) } - log.Warning(reload.Message) + if msg := reload.UserMessage(); msg != "" { + log.Warning(msg) + } return nil } diff --git a/cmd/crowdsec-cli/cliconsole/console.go b/cmd/crowdsec-cli/cliconsole/console.go index dbbe2487cd4..fcc128bd5b5 100644 --- a/cmd/crowdsec-cli/cliconsole/console.go +++ b/cmd/crowdsec-cli/cliconsole/console.go @@ -214,7 +214,9 @@ Enable given information push to the central API. Allows to empower the console` log.Infof("%v have been enabled", args) } - log.Info(reload.Message) + if reload.UserMessage() != "" { + log.Info(reload.UserMessage()) + } return nil }, @@ -248,7 +250,9 @@ Disable given information push to the central API.`, log.Infof("%v have been disabled", args) } - log.Info(reload.Message) + if msg := reload.UserMessage(); msg != "" { + log.Info(msg) + } return nil }, diff --git a/cmd/crowdsec-cli/clihub/hub.go b/cmd/crowdsec-cli/clihub/hub.go index 0d1e625f715..66fbe7c405a 100644 --- a/cmd/crowdsec-cli/clihub/hub.go +++ b/cmd/crowdsec-cli/clihub/hub.go @@ -193,8 +193,8 @@ func (cli *cliHub) upgrade(ctx context.Context, yes bool, dryRun bool, force boo return err } - if plan.ReloadNeeded { - fmt.Println("\n" + reload.Message) + if msg := reload.UserMessage(); msg != "" && plan.ReloadNeeded { + fmt.Println("\n" + msg) } return nil diff --git a/cmd/crowdsec-cli/cliitem/cmdinstall.go b/cmd/crowdsec-cli/cliitem/cmdinstall.go index daddbe84a4b..74ffbe727f4 100644 --- a/cmd/crowdsec-cli/cliitem/cmdinstall.go +++ b/cmd/crowdsec-cli/cliitem/cmdinstall.go @@ -88,8 +88,8 @@ func (cli cliItem) install(ctx context.Context, args []string, yes bool, dryRun log.Error(err) } - if plan.ReloadNeeded { - fmt.Println("\n" + reload.Message) + if msg := reload.UserMessage(); msg != "" && plan.ReloadNeeded { + fmt.Println("\n" + msg) } return nil diff --git a/cmd/crowdsec-cli/cliitem/cmdremove.go b/cmd/crowdsec-cli/cliitem/cmdremove.go index ac9410c047d..c8ea041acbf 100644 --- a/cmd/crowdsec-cli/cliitem/cmdremove.go +++ b/cmd/crowdsec-cli/cliitem/cmdremove.go @@ -104,8 +104,8 @@ func (cli cliItem) remove(ctx context.Context, args []string, yes bool, dryRun b return err } - if plan.ReloadNeeded { - fmt.Println("\n" + reload.Message) + if msg := reload.UserMessage(); msg != "" && plan.ReloadNeeded { + fmt.Println("\n" + msg) } return nil diff --git a/cmd/crowdsec-cli/cliitem/cmdupgrade.go b/cmd/crowdsec-cli/cliitem/cmdupgrade.go index 1ddd07485d4..5320bc04bc6 100644 --- a/cmd/crowdsec-cli/cliitem/cmdupgrade.go +++ b/cmd/crowdsec-cli/cliitem/cmdupgrade.go @@ -66,8 +66,8 @@ func (cli cliItem) upgrade(ctx context.Context, args []string, yes bool, dryRun return err } - if plan.ReloadNeeded { - fmt.Println("\n" + reload.Message) + if msg := reload.UserMessage(); msg != "" && plan.ReloadNeeded { + fmt.Println("\n" + msg) } return nil diff --git a/cmd/crowdsec-cli/clilapi/register.go b/cmd/crowdsec-cli/clilapi/register.go index e8eb7ddc543..7430c73c3c8 100644 --- a/cmd/crowdsec-cli/clilapi/register.go +++ b/cmd/crowdsec-cli/clilapi/register.go @@ -87,7 +87,9 @@ func (cli *cliLapi) register(ctx context.Context, apiURL string, outputFile stri fmt.Printf("%s\n", string(apiConfigDump)) } - log.Warning(reload.Message) + if msg := reload.UserMessage(); msg != "" { + log.Warning(msg) + } return nil } diff --git a/cmd/crowdsec-cli/clisimulation/simulation.go b/cmd/crowdsec-cli/clisimulation/simulation.go index c06db56f200..1b46c70c90a 100644 --- a/cmd/crowdsec-cli/clisimulation/simulation.go +++ b/cmd/crowdsec-cli/clisimulation/simulation.go @@ -47,8 +47,8 @@ cscli simulation disable crowdsecurity/ssh-bf`, return nil }, PersistentPostRun: func(cmd *cobra.Command, _ []string) { - if cmd.Name() != "status" { - log.Info(reload.Message) + if msg := reload.UserMessage(); msg != "" && cmd.Name() != "status" { + log.Info(msg) } }, } diff --git a/cmd/crowdsec-cli/reload/message.go b/cmd/crowdsec-cli/reload/message.go new file mode 100644 index 00000000000..cd8e7d4795f --- /dev/null +++ b/cmd/crowdsec-cli/reload/message.go @@ -0,0 +1,6 @@ +//go:build !windows && !freebsd && !linux + +package reload + +// generic message since we don't know the platform +const message = "Please reload the crowdsec process for the new configuration to be effective." diff --git a/cmd/crowdsec-cli/reload/reload_freebsd.go b/cmd/crowdsec-cli/reload/message_freebsd.go similarity index 64% rename from cmd/crowdsec-cli/reload/reload_freebsd.go rename to cmd/crowdsec-cli/reload/message_freebsd.go index 0dac99f2315..9328f935be8 100644 --- a/cmd/crowdsec-cli/reload/reload_freebsd.go +++ b/cmd/crowdsec-cli/reload/message_freebsd.go @@ -1,4 +1,4 @@ package reload // actually sudo is not that popular on freebsd, but this will do -const Message = "Run 'sudo service crowdsec reload' for the new configuration to be effective." +const message = "Run 'sudo service crowdsec reload' for the new configuration to be effective." diff --git a/cmd/crowdsec-cli/reload/reload_linux.go b/cmd/crowdsec-cli/reload/message_linux.go similarity index 62% rename from cmd/crowdsec-cli/reload/reload_linux.go rename to cmd/crowdsec-cli/reload/message_linux.go index fbe16e5f168..11c95165372 100644 --- a/cmd/crowdsec-cli/reload/reload_linux.go +++ b/cmd/crowdsec-cli/reload/message_linux.go @@ -1,4 +1,4 @@ package reload // assume systemd, although gentoo and others may differ -const Message = "Run 'sudo systemctl reload crowdsec' for the new configuration to be effective." +const message = "Run 'sudo systemctl reload crowdsec' for the new configuration to be effective." diff --git a/cmd/crowdsec-cli/reload/message_windows.go b/cmd/crowdsec-cli/reload/message_windows.go new file mode 100644 index 00000000000..888cb44b0d2 --- /dev/null +++ b/cmd/crowdsec-cli/reload/message_windows.go @@ -0,0 +1,3 @@ +package reload + +const message = "Please restart the crowdsec service for the new configuration to be effective." diff --git a/cmd/crowdsec-cli/reload/reload.go b/cmd/crowdsec-cli/reload/reload.go index fe03af1ea79..44d001fda0c 100644 --- a/cmd/crowdsec-cli/reload/reload.go +++ b/cmd/crowdsec-cli/reload/reload.go @@ -1,6 +1,20 @@ -//go:build !windows && !freebsd && !linux - package reload -// generic message since we don't know the platform -const Message = "Please reload the crowdsec process for the new configuration to be effective." +import ( + "os" + + "github.com/crowdsecurity/go-cs-lib/version" + isatty "github.com/mattn/go-isatty" +) + +func UserMessage() string { + if version.System == "docker" { + if isatty.IsTerminal(os.Stdout.Fd()) || isatty.IsCygwinTerminal(os.Stdout.Fd()) { + return "You may need to restart the container to apply the changes." + } + + return "" + } + + return message +} diff --git a/cmd/crowdsec-cli/reload/reload_windows.go b/cmd/crowdsec-cli/reload/reload_windows.go deleted file mode 100644 index 88642425ae2..00000000000 --- a/cmd/crowdsec-cli/reload/reload_windows.go +++ /dev/null @@ -1,3 +0,0 @@ -package reload - -const Message = "Please restart the crowdsec service for the new configuration to be effective." From dc28ae58dc59e72981ee4724b1c72a79ba586ad8 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Tue, 4 Feb 2025 16:51:02 +0100 Subject: [PATCH 418/581] run 'hub upgrade' in rpm/deb postinst, improve hub message (#3440) --- debian/postinst | 2 ++ pkg/cwhub/sync.go | 2 +- rpm/SPECS/crowdsec.spec | 1 + 3 files changed, 4 insertions(+), 1 deletion(-) diff --git a/debian/postinst b/debian/postinst index ed537325c44..b73619b9e6f 100644 --- a/debian/postinst +++ b/debian/postinst @@ -68,6 +68,8 @@ if [ "$1" = configure ]; then echo Updating hub /usr/bin/cscli hub update + /usr/bin/cscli hub upgrade + if [ "$COLLECTIONS" = true ]; then set +e CSCLI_BIN_INSTALLED="/usr/bin/cscli" SILENT=true install_collection diff --git a/pkg/cwhub/sync.go b/pkg/cwhub/sync.go index 59c1383d7c2..ed99f4806d5 100644 --- a/pkg/cwhub/sync.go +++ b/pkg/cwhub/sync.go @@ -467,7 +467,7 @@ func (i *Item) checkSubItemVersions() []string { if !sub.State.UpToDate { i.State.UpToDate = false - warn = append(warn, fmt.Sprintf("%s is tainted by outdated %s", i.Name, sub.FQName())) + warn = append(warn, fmt.Sprintf("%s is outdated because of %s", i.Name, sub.FQName())) continue } diff --git a/rpm/SPECS/crowdsec.spec b/rpm/SPECS/crowdsec.spec index eba022d9bda..ca912d58e49 100644 --- a/rpm/SPECS/crowdsec.spec +++ b/rpm/SPECS/crowdsec.spec @@ -176,6 +176,7 @@ if [ $1 == 1 ]; then fi cscli hub update + cscli hub upgrade CSCLI_BIN_INSTALLED="/usr/bin/cscli" SILENT=true install_collection GREEN='\033[0;32m' From c7ef2a9a69e75bde26660f9fda1f4ad326641621 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Wed, 5 Feb 2025 14:34:55 +0100 Subject: [PATCH 419/581] merge from master (dc28ae58dc59e72981ee4724b1c72a79ba586ad8) for 1.6.5 (#3444) --- .github/workflows/bats-hub.yml | 2 +- .github/workflows/bats-mysql.yml | 2 +- .github/workflows/bats-postgres.yml | 2 +- .github/workflows/bats-sqlite-coverage.yml | 2 +- .github/workflows/ci-windows-build-msi.yml | 2 +- .github/workflows/codeql-analysis.yml | 2 +- .github/workflows/docker-tests.yml | 32 +- .github/workflows/go-tests-windows.yml | 4 +- .github/workflows/go-tests.yml | 20 +- .github/workflows/publish-tarball-release.yml | 2 +- .gitignore | 3 + .golangci.yml | 123 ++-- Dockerfile | 5 +- Dockerfile.debian | 5 +- Makefile | 1 + README.md | 146 +---- cmd/crowdsec-cli/clialert/alerts.go | 2 +- cmd/crowdsec-cli/clialert/table.go | 2 +- cmd/crowdsec-cli/clibouncer/inspect.go | 2 +- cmd/crowdsec-cli/clibouncer/list.go | 3 +- cmd/crowdsec-cli/clicapi/capi.go | 13 +- cmd/crowdsec-cli/cliconfig/backup.go | 20 + cmd/crowdsec-cli/{ => cliconfig}/config.go | 14 +- .../feature_flags.go} | 4 +- cmd/crowdsec-cli/cliconfig/restore.go | 20 + .../{config_show.go => cliconfig/show.go} | 2 +- .../showyaml.go} | 8 +- cmd/crowdsec-cli/cliconsole/console.go | 10 +- cmd/crowdsec-cli/clidecision/decisions.go | 4 +- cmd/crowdsec-cli/clidecision/table.go | 13 +- cmd/crowdsec-cli/clihub/hub.go | 83 ++- cmd/crowdsec-cli/clihub/items.go | 51 +- cmd/crowdsec-cli/clihub/utils_table.go | 64 +- cmd/crowdsec-cli/clihubtest/explain.go | 7 +- cmd/crowdsec-cli/clihubtest/run.go | 4 +- cmd/crowdsec-cli/cliitem/appsec.go | 123 ---- cmd/crowdsec-cli/cliitem/cmdinspect.go | 236 +++++++ cmd/crowdsec-cli/cliitem/cmdinstall.go | 150 +++++ cmd/crowdsec-cli/cliitem/cmdremove.go | 151 +++++ cmd/crowdsec-cli/cliitem/cmdupgrade.go | 106 +++ cmd/crowdsec-cli/cliitem/collection.go | 41 -- cmd/crowdsec-cli/cliitem/context.go | 41 -- cmd/crowdsec-cli/cliitem/hubappsec.go | 255 ++++++++ cmd/crowdsec-cli/cliitem/hubcollection.go | 105 +++ cmd/crowdsec-cli/cliitem/hubcontext.go | 102 +++ cmd/crowdsec-cli/cliitem/hubparser.go | 105 +++ cmd/crowdsec-cli/cliitem/hubpostoverflow.go | 102 +++ cmd/crowdsec-cli/cliitem/hubscenario.go | 78 ++- cmd/crowdsec-cli/cliitem/item.go | 454 +------------ .../item_metrics.go => cliitem/metrics.go} | 75 ++- cmd/crowdsec-cli/cliitem/metrics_table.go | 70 ++ cmd/crowdsec-cli/cliitem/parser.go | 41 -- cmd/crowdsec-cli/cliitem/postoverflow.go | 41 -- cmd/crowdsec-cli/cliitem/suggest.go | 77 --- cmd/crowdsec-cli/clilapi/context.go | 6 +- cmd/crowdsec-cli/clilapi/register.go | 11 +- cmd/crowdsec-cli/clilapi/status.go | 2 +- cmd/crowdsec-cli/climachine/add.go | 13 +- cmd/crowdsec-cli/climachine/inspect.go | 4 +- cmd/crowdsec-cli/climachine/list.go | 3 +- cmd/crowdsec-cli/climetrics/list.go | 3 +- cmd/crowdsec-cli/climetrics/show.go | 15 + cmd/crowdsec-cli/climetrics/statacquis.go | 6 +- cmd/crowdsec-cli/climetrics/statalert.go | 6 +- .../climetrics/statappsecengine.go | 6 +- cmd/crowdsec-cli/climetrics/statappsecrule.go | 5 +- cmd/crowdsec-cli/climetrics/statbouncer.go | 13 +- cmd/crowdsec-cli/climetrics/statbucket.go | 6 +- cmd/crowdsec-cli/climetrics/statdecision.go | 6 +- cmd/crowdsec-cli/climetrics/statlapi.go | 6 +- .../climetrics/statlapibouncer.go | 6 +- .../climetrics/statlapidecision.go | 6 +- .../climetrics/statlapimachine.go | 6 +- cmd/crowdsec-cli/climetrics/statparser.go | 6 +- cmd/crowdsec-cli/climetrics/statstash.go | 6 +- cmd/crowdsec-cli/climetrics/statwhitelist.go | 6 +- cmd/crowdsec-cli/climetrics/store.go | 3 +- .../clinotifications/notifications.go | 2 +- cmd/crowdsec-cli/clisetup/setup.go | 19 +- cmd/crowdsec-cli/clisimulation/simulation.go | 6 +- cmd/crowdsec-cli/clisupport/support.go | 8 +- cmd/crowdsec-cli/completion.go | 8 +- cmd/crowdsec-cli/config_backup.go | 236 ------- cmd/crowdsec-cli/config_restore.go | 274 -------- cmd/crowdsec-cli/copyfile.go | 82 --- cmd/crowdsec-cli/dashboard.go | 24 +- cmd/crowdsec-cli/idgen/machineid.go | 6 +- cmd/crowdsec-cli/idgen/password.go | 9 +- cmd/crowdsec-cli/main.go | 13 +- cmd/crowdsec-cli/reload/message.go | 6 + .../{reload_freebsd.go => message_freebsd.go} | 2 +- .../{reload_linux.go => message_linux.go} | 2 +- cmd/crowdsec-cli/reload/message_windows.go | 3 + cmd/crowdsec-cli/reload/reload.go | 22 +- cmd/crowdsec-cli/reload/reload_windows.go | 3 - cmd/crowdsec-cli/require/branch.go | 2 +- cmd/crowdsec-cli/require/require.go | 14 +- cmd/crowdsec-cli/setup.go | 1 + cmd/crowdsec/appsec.go | 2 +- cmd/crowdsec/fatalhook.go | 24 +- cmd/crowdsec/main.go | 26 +- cmd/crowdsec/pour.go | 2 +- cmd/crowdsec/serve.go | 6 +- cmd/crowdsec/win_service.go | 2 +- cmd/notification-email/main.go | 2 +- debian/install | 1 - debian/postinst | 34 +- debian/preinst | 43 -- debian/prerm | 3 +- debian/rules | 2 +- docker/test/.python-version | 1 + docker/test/Pipfile | 11 - docker/test/Pipfile.lock | 604 ------------------ docker/test/README.md | 0 docker/test/pyproject.toml | 41 ++ docker/test/tests/conftest.py | 9 +- docker/test/tests/test_agent.py | 56 +- docker/test/tests/test_agent_only.py | 22 +- docker/test/tests/test_bouncer.py | 25 +- docker/test/tests/test_capi.py | 23 +- docker/test/tests/test_capi_whitelists.py | 22 +- docker/test/tests/test_cold_logs.py | 29 +- docker/test/tests/test_flavors.py | 47 +- docker/test/tests/test_hello.py | 13 +- docker/test/tests/test_hub.py | 16 +- docker/test/tests/test_hub_collections.py | 103 ++- docker/test/tests/test_hub_parsers.py | 60 +- docker/test/tests/test_hub_postoverflows.py | 41 +- docker/test/tests/test_hub_scenarios.py | 55 +- docker/test/tests/test_local_api_url.py | 40 +- docker/test/tests/test_local_item.py | 28 +- docker/test/tests/test_metrics.py | 46 +- docker/test/tests/test_nolapi.py | 5 +- docker/test/tests/test_simple.py | 2 +- docker/test/tests/test_tls.py | 234 ++++--- docker/test/tests/test_version.py | 8 +- docker/test/tests/test_wal.py | 16 +- docker/test/uv.lock | 587 +++++++++++++++++ go.mod | 103 +-- go.sum | 191 +++--- pkg/acquisition/acquisition.go | 69 +- pkg/acquisition/acquisition_test.go | 30 +- .../configuration/configuration.go | 10 +- pkg/acquisition/modules/appsec/appsec.go | 31 +- .../modules/appsec/appsec_hooks_test.go | 4 +- .../modules/appsec/appsec_runner.go | 27 +- .../modules/appsec/appsec_runner_test.go | 99 ++- pkg/acquisition/modules/appsec/appsec_test.go | 22 +- .../modules/appsec/bodyprocessors/raw.go | 7 +- pkg/acquisition/modules/appsec/utils.go | 1 + .../modules/cloudwatch/cloudwatch.go | 39 +- pkg/acquisition/modules/docker/docker.go | 132 ++-- pkg/acquisition/modules/docker/docker_test.go | 81 ++- pkg/acquisition/modules/file/file.go | 4 +- pkg/acquisition/modules/file/file_test.go | 18 +- pkg/acquisition/modules/http/http.go | 37 +- pkg/acquisition/modules/http/http_test.go | 98 +-- .../modules/journalctl/journalctl.go | 37 +- .../modules/journalctl/journalctl_test.go | 9 +- pkg/acquisition/modules/kafka/kafka.go | 7 +- pkg/acquisition/modules/kafka/kafka_test.go | 6 +- pkg/acquisition/modules/kinesis/kinesis.go | 132 +++- .../modules/kinesis/kinesis_test.go | 101 +-- .../modules/kubernetesaudit/k8s_audit.go | 31 +- .../modules/kubernetesaudit/k8s_audit_test.go | 6 +- pkg/acquisition/modules/loki/loki.go | 4 +- .../syslog/internal/parser/rfc3164/parse.go | 1 - .../syslog/internal/parser/rfc5424/parse.go | 3 - .../internal/parser/rfc5424/parse_test.go | 58 +- .../syslog/internal/server/syslogserver.go | 1 - pkg/acquisition/modules/syslog/syslog.go | 4 +- pkg/acquisition/modules/syslog/syslog_test.go | 4 +- .../victorialogs/internal/vlclient/types.go | 12 + .../internal/vlclient/vl_client.go | 405 ++++++++++++ .../modules/victorialogs/victorialogs.go | 369 +++++++++++ .../modules/victorialogs/victorialogs_test.go | 479 ++++++++++++++ .../wineventlog/wineventlog_windows.go | 4 +- .../wineventlog/wineventlog_windows_test.go | 56 +- pkg/acquisition/test_files/env.yaml | 6 + pkg/acquisition/victorialogs.go | 12 + pkg/alertcontext/alertcontext.go | 29 +- pkg/alertcontext/alertcontext_test.go | 57 +- pkg/apiclient/alerts_service_test.go | 18 +- pkg/apiclient/auth_jwt.go | 3 - pkg/apiclient/auth_key_test.go | 6 +- pkg/apiclient/client.go | 4 +- pkg/apiclient/client_http.go | 9 +- pkg/apiclient/client_http_test.go | 4 +- pkg/apiclient/client_test.go | 36 +- pkg/apiclient/decisions_service_test.go | 31 +- pkg/apiserver/alerts_test.go | 109 ++-- pkg/apiserver/apic.go | 1 - pkg/apiserver/apiserver.go | 44 +- pkg/apiserver/apiserver_test.go | 4 +- pkg/apiserver/controllers/v1/decisions.go | 5 +- pkg/apiserver/controllers/v1/errors.go | 12 - pkg/apiserver/decisions_test.go | 16 +- pkg/apiserver/jwt_test.go | 10 +- pkg/apiserver/machines_test.go | 6 +- pkg/apiserver/middlewares/v1/api_key.go | 1 - pkg/appsec/appsec.go | 1 - pkg/appsec/appsec_rule/appsec_rule.go | 1 - pkg/appsec/appsec_rule/modsec_rule_test.go | 2 - pkg/appsec/appsec_rules_collection.go | 5 +- pkg/appsec/coraza_logger.go | 2 +- pkg/appsec/request_test.go | 3 - pkg/cache/cache_test.go | 13 +- pkg/csconfig/api.go | 4 +- pkg/csconfig/common.go | 8 +- pkg/csconfig/config.go | 9 +- pkg/csconfig/cscli.go | 1 + pkg/csconfig/fflag.go | 2 +- pkg/csplugin/broker.go | 4 +- pkg/csplugin/listfiles_test.go | 19 +- pkg/csplugin/watcher_test.go | 13 +- pkg/csprofiles/csprofiles.go | 44 +- pkg/csprofiles/csprofiles_test.go | 10 +- pkg/cticlient/example/fire.go | 10 +- pkg/cticlient/types.go | 34 +- pkg/cticlient/types_test.go | 12 +- pkg/cwhub/cwhub.go | 8 +- pkg/cwhub/cwhub_test.go | 36 +- pkg/cwhub/dataset.go | 72 --- pkg/cwhub/doc.go | 29 +- pkg/cwhub/download.go | 126 ++++ pkg/cwhub/download_test.go | 182 ++++++ pkg/cwhub/errors.go | 19 - pkg/cwhub/fetch.go | 70 ++ pkg/cwhub/hub.go | 60 +- pkg/cwhub/hub_test.go | 263 ++++++-- pkg/cwhub/item.go | 307 ++++----- pkg/cwhub/item_test.go | 25 +- pkg/cwhub/iteminstall.go | 73 --- pkg/cwhub/iteminstall_test.go | 10 +- pkg/cwhub/itemlink.go | 78 --- pkg/cwhub/itemremove.go | 138 ---- pkg/cwhub/itemupgrade.go | 254 -------- pkg/cwhub/itemupgrade_test.go | 15 +- pkg/cwhub/remote.go | 84 --- pkg/cwhub/state.go | 62 ++ pkg/cwhub/state_test.go | 77 +++ pkg/cwhub/sync.go | 401 +++++++----- pkg/cwversion/component/component.go | 29 +- pkg/cwversion/version.go | 18 +- pkg/cwversion/version_test.go | 68 ++ pkg/database/alertfilter.go | 258 ++++++++ pkg/database/alerts.go | 288 +-------- pkg/database/database.go | 2 +- pkg/database/errors.go | 1 - pkg/database/flush.go | 16 +- pkg/database/machines.go | 9 - pkg/dumps/parser_dump.go | 20 +- pkg/emoji/emoji.go | 4 + pkg/exprhelpers/crowdsec_cti.go | 44 +- pkg/exprhelpers/debugger.go | 112 +++- pkg/exprhelpers/debugger_test.go | 1 + pkg/exprhelpers/debuggerstub_test.go | 10 + pkg/exprhelpers/exprlib_test.go | 6 +- pkg/exprhelpers/geoip.go | 3 - pkg/exprhelpers/helpers.go | 50 +- pkg/fflag/crowdsec.go | 14 +- pkg/fflag/features_test.go | 10 +- pkg/hubops/colorize.go | 38 ++ pkg/hubops/datarefresh.go | 75 +++ pkg/hubops/disable.go | 121 ++++ pkg/hubops/doc.go | 45 ++ pkg/hubops/download.go | 212 ++++++ pkg/hubops/enable.go | 113 ++++ pkg/hubops/plan.go | 250 ++++++++ pkg/hubops/purge.go | 88 +++ pkg/hubtest/hubtest.go | 12 +- pkg/hubtest/hubtest_item.go | 36 +- pkg/hubtest/parser_assert.go | 6 +- pkg/leakybucket/bayesian.go | 4 +- pkg/leakybucket/blackhole.go | 3 - pkg/leakybucket/bucket.go | 3 +- pkg/leakybucket/buckets.go | 1 - pkg/leakybucket/buckets_test.go | 41 +- pkg/leakybucket/conditional.go | 6 +- pkg/leakybucket/manager_load.go | 132 ++-- pkg/leakybucket/manager_load_test.go | 73 ++- pkg/leakybucket/manager_run.go | 13 +- pkg/leakybucket/overflow_filter.go | 4 +- pkg/leakybucket/overflows.go | 32 +- pkg/leakybucket/processor.go | 3 +- pkg/leakybucket/reset_filter.go | 10 +- pkg/leakybucket/uniq.go | 16 +- pkg/longpollclient/client.go | 2 +- pkg/metabase/container.go | 56 +- pkg/metabase/metabase.go | 43 +- pkg/parser/enrich.go | 6 +- pkg/parser/enrich_date.go | 1 + pkg/parser/enrich_date_test.go | 32 + pkg/parser/enrich_geoip.go | 3 - pkg/parser/node.go | 18 +- pkg/parser/parsing_test.go | 83 +-- pkg/parser/runtime.go | 49 +- pkg/parser/stage.go | 6 +- pkg/parser/whitelist_test.go | 4 +- pkg/setup/detect_test.go | 18 +- pkg/setup/install.go | 71 +- pkg/types/appsec_event.go | 1 - pkg/types/constants.go | 34 +- pkg/types/event.go | 6 +- pkg/types/event_test.go | 2 - pkg/types/getfstype.go | 1 - pkg/types/ip.go | 10 +- pkg/types/ip_test.go | 18 +- pkg/types/utils.go | 38 +- rpm/SPECS/crowdsec.spec | 41 +- test/bats/01_crowdsec.bats | 39 +- test/bats/01_cscli.bats | 44 +- test/bats/01_cscli_lapi.bats | 16 +- test/bats/02_nolapi.bats | 12 - test/bats/03_noagent.bats | 12 - test/bats/04_nocapi.bats | 13 +- test/bats/07_setup.bats | 31 +- test/bats/08_metrics.bats | 4 +- test/bats/08_metrics_bouncer.bats | 28 +- test/bats/10_bouncers.bats | 7 +- test/bats/20_hub.bats | 44 +- test/bats/20_hub_collections.bats | 381 ----------- test/bats/20_hub_collections_dep.bats | 26 +- test/bats/20_hub_items.bats | 82 ++- test/bats/20_hub_parsers.bats | 383 ----------- test/bats/20_hub_postoverflows.bats | 383 ----------- test/bats/20_hub_scenarios.bats | 382 ----------- test/bats/30_machines.bats | 5 +- test/bats/80_alerts.bats | 2 +- test/bats/90_decisions.bats | 17 +- test/bats/crowdsec-acquisition.bats | 78 +++ test/bats/cscli-hubtype-inspect.bats | 93 +++ test/bats/cscli-hubtype-install.bats | 301 +++++++++ test/bats/cscli-hubtype-list.bats | 130 ++++ test/bats/cscli-hubtype-remove.bats | 245 +++++++ test/bats/cscli-hubtype-upgrade.bats | 253 ++++++++ test/bats/cscli-parsers.bats | 44 ++ test/bats/cscli-postoverflows.bats | 44 ++ test/bats/hub-index.bats | 357 +++++++++++ test/bin/remove-all-hub-items | 2 +- test/lib/config/config-local | 2 +- test/lib/setup_file.sh | 24 +- test/localstack/docker-compose.yml | 1 - wizard.sh | 72 +-- 344 files changed, 10732 insertions(+), 7624 deletions(-) create mode 100644 cmd/crowdsec-cli/cliconfig/backup.go rename cmd/crowdsec-cli/{ => cliconfig}/config.go (58%) rename cmd/crowdsec-cli/{config_feature_flags.go => cliconfig/feature_flags.go} (96%) create mode 100644 cmd/crowdsec-cli/cliconfig/restore.go rename cmd/crowdsec-cli/{config_show.go => cliconfig/show.go} (99%) rename cmd/crowdsec-cli/{config_showyaml.go => cliconfig/showyaml.go} (62%) delete mode 100644 cmd/crowdsec-cli/cliitem/appsec.go create mode 100644 cmd/crowdsec-cli/cliitem/cmdinspect.go create mode 100644 cmd/crowdsec-cli/cliitem/cmdinstall.go create mode 100644 cmd/crowdsec-cli/cliitem/cmdremove.go create mode 100644 cmd/crowdsec-cli/cliitem/cmdupgrade.go delete mode 100644 cmd/crowdsec-cli/cliitem/collection.go delete mode 100644 cmd/crowdsec-cli/cliitem/context.go create mode 100644 cmd/crowdsec-cli/cliitem/hubappsec.go create mode 100644 cmd/crowdsec-cli/cliitem/hubcollection.go create mode 100644 cmd/crowdsec-cli/cliitem/hubcontext.go create mode 100644 cmd/crowdsec-cli/cliitem/hubparser.go create mode 100644 cmd/crowdsec-cli/cliitem/hubpostoverflow.go rename cmd/crowdsec-cli/{clihub/item_metrics.go => cliitem/metrics.go} (78%) create mode 100644 cmd/crowdsec-cli/cliitem/metrics_table.go delete mode 100644 cmd/crowdsec-cli/cliitem/parser.go delete mode 100644 cmd/crowdsec-cli/cliitem/postoverflow.go delete mode 100644 cmd/crowdsec-cli/cliitem/suggest.go delete mode 100644 cmd/crowdsec-cli/config_backup.go delete mode 100644 cmd/crowdsec-cli/config_restore.go delete mode 100644 cmd/crowdsec-cli/copyfile.go create mode 100644 cmd/crowdsec-cli/reload/message.go rename cmd/crowdsec-cli/reload/{reload_freebsd.go => message_freebsd.go} (64%) rename cmd/crowdsec-cli/reload/{reload_linux.go => message_linux.go} (62%) create mode 100644 cmd/crowdsec-cli/reload/message_windows.go delete mode 100644 cmd/crowdsec-cli/reload/reload_windows.go delete mode 100644 debian/preinst create mode 100644 docker/test/.python-version delete mode 100644 docker/test/Pipfile delete mode 100644 docker/test/Pipfile.lock create mode 100644 docker/test/README.md create mode 100644 docker/test/pyproject.toml create mode 100644 docker/test/uv.lock create mode 100644 pkg/acquisition/modules/victorialogs/internal/vlclient/types.go create mode 100644 pkg/acquisition/modules/victorialogs/internal/vlclient/vl_client.go create mode 100644 pkg/acquisition/modules/victorialogs/victorialogs.go create mode 100644 pkg/acquisition/modules/victorialogs/victorialogs_test.go create mode 100644 pkg/acquisition/test_files/env.yaml create mode 100644 pkg/acquisition/victorialogs.go delete mode 100644 pkg/cwhub/dataset.go create mode 100644 pkg/cwhub/download.go create mode 100644 pkg/cwhub/download_test.go delete mode 100644 pkg/cwhub/errors.go create mode 100644 pkg/cwhub/fetch.go delete mode 100644 pkg/cwhub/iteminstall.go delete mode 100644 pkg/cwhub/itemlink.go delete mode 100644 pkg/cwhub/itemremove.go delete mode 100644 pkg/cwhub/itemupgrade.go delete mode 100644 pkg/cwhub/remote.go create mode 100644 pkg/cwhub/state.go create mode 100644 pkg/cwhub/state_test.go create mode 100644 pkg/cwversion/version_test.go create mode 100644 pkg/database/alertfilter.go create mode 100644 pkg/exprhelpers/debuggerstub_test.go create mode 100644 pkg/hubops/colorize.go create mode 100644 pkg/hubops/datarefresh.go create mode 100644 pkg/hubops/disable.go create mode 100644 pkg/hubops/doc.go create mode 100644 pkg/hubops/download.go create mode 100644 pkg/hubops/enable.go create mode 100644 pkg/hubops/plan.go create mode 100644 pkg/hubops/purge.go delete mode 100644 test/bats/20_hub_collections.bats delete mode 100644 test/bats/20_hub_parsers.bats delete mode 100644 test/bats/20_hub_postoverflows.bats delete mode 100644 test/bats/20_hub_scenarios.bats create mode 100644 test/bats/crowdsec-acquisition.bats create mode 100644 test/bats/cscli-hubtype-inspect.bats create mode 100644 test/bats/cscli-hubtype-install.bats create mode 100644 test/bats/cscli-hubtype-list.bats create mode 100644 test/bats/cscli-hubtype-remove.bats create mode 100644 test/bats/cscli-hubtype-upgrade.bats create mode 100644 test/bats/cscli-parsers.bats create mode 100644 test/bats/cscli-postoverflows.bats create mode 100644 test/bats/hub-index.bats diff --git a/.github/workflows/bats-hub.yml b/.github/workflows/bats-hub.yml index e631c3ebc71..42f1252c8b9 100644 --- a/.github/workflows/bats-hub.yml +++ b/.github/workflows/bats-hub.yml @@ -33,7 +33,7 @@ jobs: - name: "Set up Go" uses: actions/setup-go@v5 with: - go-version: "1.23" + go-version-file: go.mod - name: "Install bats dependencies" env: diff --git a/.github/workflows/bats-mysql.yml b/.github/workflows/bats-mysql.yml index a94e28b1f97..394b85427fe 100644 --- a/.github/workflows/bats-mysql.yml +++ b/.github/workflows/bats-mysql.yml @@ -36,7 +36,7 @@ jobs: - name: "Set up Go" uses: actions/setup-go@v5 with: - go-version: "1.23" + go-version-file: go.mod - name: "Install bats dependencies" env: diff --git a/.github/workflows/bats-postgres.yml b/.github/workflows/bats-postgres.yml index a1054463341..25c302da787 100644 --- a/.github/workflows/bats-postgres.yml +++ b/.github/workflows/bats-postgres.yml @@ -45,7 +45,7 @@ jobs: - name: "Set up Go" uses: actions/setup-go@v5 with: - go-version: "1.23" + go-version-file: go.mod - name: "Install bats dependencies" env: diff --git a/.github/workflows/bats-sqlite-coverage.yml b/.github/workflows/bats-sqlite-coverage.yml index ac685bf4e87..a5b2758b6b0 100644 --- a/.github/workflows/bats-sqlite-coverage.yml +++ b/.github/workflows/bats-sqlite-coverage.yml @@ -31,7 +31,7 @@ jobs: - name: "Set up Go" uses: actions/setup-go@v5 with: - go-version: "1.23" + go-version-file: go.mod - name: "Install bats dependencies" env: diff --git a/.github/workflows/ci-windows-build-msi.yml b/.github/workflows/ci-windows-build-msi.yml index 07e29071e05..5f26b0fccbf 100644 --- a/.github/workflows/ci-windows-build-msi.yml +++ b/.github/workflows/ci-windows-build-msi.yml @@ -35,7 +35,7 @@ jobs: - name: "Set up Go" uses: actions/setup-go@v5 with: - go-version: "1.23" + go-version-file: go.mod - name: Build run: make windows_installer BUILD_RE2_WASM=1 diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml index 4128cb435f9..cd37c7afaa9 100644 --- a/.github/workflows/codeql-analysis.yml +++ b/.github/workflows/codeql-analysis.yml @@ -52,7 +52,7 @@ jobs: - name: "Set up Go" uses: actions/setup-go@v5 with: - go-version: "1.23" + go-version-file: go.mod cache-dependency-path: "**/go.sum" # Initializes the CodeQL tools for scanning. diff --git a/.github/workflows/docker-tests.yml b/.github/workflows/docker-tests.yml index 918f3bcaf1d..647f3e55cdb 100644 --- a/.github/workflows/docker-tests.yml +++ b/.github/workflows/docker-tests.yml @@ -49,28 +49,30 @@ jobs: cache-from: type=gha cache-to: type=gha,mode=min - - name: "Setup Python" + - name: "Create Docker network" + run: docker network create net-test + + - name: Install uv + uses: astral-sh/setup-uv@v5 + with: + version: 0.5.24 + enable-cache: true + cache-dependency-glob: "uv.lock" + + - name: "Set up Python" uses: actions/setup-python@v5 with: - python-version: "3.x" - cache: 'pipenv' + python-version-file: "./docker/test/.python-version" - - name: "Install dependencies" + # running serially to reduce test flakiness + - name: Lint and run the tests run: | cd docker/test - python -m pip install --upgrade pipenv wheel - pipenv install --deploy - - - name: "Create Docker network" - run: docker network create net-test - - - name: "Run tests" + uv sync --all-extras --dev --locked + uv run ruff check + uv run pytest tests -n 1 --durations=0 --color=yes env: CROWDSEC_TEST_VERSION: test CROWDSEC_TEST_FLAVORS: ${{ matrix.flavor }} CROWDSEC_TEST_NETWORK: net-test CROWDSEC_TEST_TIMEOUT: 90 - # running serially to reduce test flakiness - run: | - cd docker/test - pipenv run pytest -n 1 --durations=0 --color=yes diff --git a/.github/workflows/go-tests-windows.yml b/.github/workflows/go-tests-windows.yml index 2966b999a4a..68cb9715b18 100644 --- a/.github/workflows/go-tests-windows.yml +++ b/.github/workflows/go-tests-windows.yml @@ -34,7 +34,7 @@ jobs: - name: "Set up Go" uses: actions/setup-go@v5 with: - go-version: "1.23" + go-version-file: go.mod - name: Build run: | @@ -61,6 +61,6 @@ jobs: - name: golangci-lint uses: golangci/golangci-lint-action@v6 with: - version: v1.61 + version: v1.63 args: --issues-exit-code=1 --timeout 10m only-new-issues: false diff --git a/.github/workflows/go-tests.yml b/.github/workflows/go-tests.yml index 3f4aa67e139..5a8148c473e 100644 --- a/.github/workflows/go-tests.yml +++ b/.github/workflows/go-tests.yml @@ -42,7 +42,6 @@ jobs: DEBUG: "" LAMBDA_EXECUTOR: "" KINESIS_ERROR_PROBABILITY: "" - DOCKER_HOST: unix:///var/run/docker.sock KINESIS_INITIALIZE_STREAMS: ${{ env.KINESIS_INITIALIZE_STREAMS }} LOCALSTACK_HOST: ${{ env.AWS_HOST }} # Required so that resource urls are provided properly # e.g sqs url will get localhost if we don't set this env to map our service @@ -115,6 +114,17 @@ jobs: --health-retries 5 --health-start-period 30s + victorialogs: + image: victoriametrics/victoria-logs:v1.5.0-victorialogs + ports: + - "9428:9428" + options: >- + --name=victorialogs1 + --health-cmd "wget -q -O - http://0.0.0.0:9428" + --health-interval 30s + --health-timeout 10s + --health-retries 5 + --health-start-period 30s steps: - name: Check out CrowdSec repository @@ -126,7 +136,7 @@ jobs: - name: "Set up Go" uses: actions/setup-go@v5 with: - go-version: "1.23" + go-version-file: go.mod - name: Run "go generate" and check for changes run: | @@ -144,11 +154,11 @@ jobs: go generate ./... protoc --version if [[ $(git status --porcelain) ]]; then - echo "Error: Uncommitted changes found after running 'make generate'. Please commit all generated code." + echo "Error: Uncommitted changes found after running 'go generate'. Please commit all generated code." git diff exit 1 else - echo "No changes detected after running 'make generate'." + echo "No changes detected after running 'go generate'." fi - name: Create localstack streams @@ -190,6 +200,6 @@ jobs: - name: golangci-lint uses: golangci/golangci-lint-action@v6 with: - version: v1.61 + version: v1.63 args: --issues-exit-code=1 --timeout 10m only-new-issues: false diff --git a/.github/workflows/publish-tarball-release.yml b/.github/workflows/publish-tarball-release.yml index 6a41c3fba53..18541f86e41 100644 --- a/.github/workflows/publish-tarball-release.yml +++ b/.github/workflows/publish-tarball-release.yml @@ -25,7 +25,7 @@ jobs: - name: "Set up Go" uses: actions/setup-go@v5 with: - go-version: "1.23" + go-version-file: go.mod - name: Build the binaries run: | diff --git a/.gitignore b/.gitignore index 6e6624fd282..cba570fdb84 100644 --- a/.gitignore +++ b/.gitignore @@ -21,6 +21,9 @@ # Test dependencies test/tools/* +# Saved test status +test/bats/.bats/run-logs + # VMs used for dev/test .vagrant diff --git a/.golangci.yml b/.golangci.yml index acde901dbe6..b3be5adb687 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -1,6 +1,41 @@ # https://github.com/golangci/golangci-lint/blob/master/.golangci.reference.yml +run: + build-tags: + - expr_debug + linters-settings: + errcheck: + # Report about not checking of errors in type assertions: `a := b.(MyStruct)`. + # Such cases aren't reported by default. + # Default: false + check-type-assertions: false + # List of functions to exclude from checking, where each entry is a single function to exclude. + # See https://github.com/kisielk/errcheck#excluding-functions for details. + exclude-functions: + - (*bytes.Buffer).ReadFrom # TODO: + - io.Copy # TODO: + - (net/http.ResponseWriter).Write # TODO: + - (*os/exec.Cmd).Start + - (*os/exec.Cmd).Wait + - (*os.Process).Kill + - (*text/template.Template).ExecuteTemplate + - syscall.FreeLibrary + - golang.org/x/sys/windows.CloseHandle + - golang.org/x/sys/windows.ResetEvent + - (*golang.org/x/sys/windows/svc/eventlog.Log).Info + - (*golang.org/x/sys/windows/svc/mgr.Mgr).Disconnect + + - (github.com/bluele/gcache.Cache).Set + - (github.com/gin-gonic/gin.ResponseWriter).WriteString + - (*github.com/segmentio/kafka-go.Reader).SetOffsetAt + - (*gopkg.in/tomb.v2.Tomb).Wait + + - (*github.com/crowdsecurity/crowdsec/pkg/appsec.ReqDumpFilter).FilterArgs + - (*github.com/crowdsecurity/crowdsec/pkg/appsec.ReqDumpFilter).FilterBody + - (*github.com/crowdsecurity/crowdsec/pkg/appsec.ReqDumpFilter).FilterHeaders + - (*github.com/crowdsecurity/crowdsec/pkg/longpollclient.LongPollClient).Stop + gci: sections: - standard @@ -62,6 +97,7 @@ linters-settings: - "!**/pkg/acquisition/modules/kubernetesaudit/k8s_audit.go" - "!**/pkg/acquisition/modules/loki/loki.go" - "!**/pkg/acquisition/modules/loki/timestamp_test.go" + - "!**/pkg/acquisition/modules/victorialogs/victorialogs.go" - "!**/pkg/acquisition/modules/s3/s3.go" - "!**/pkg/acquisition/modules/syslog/syslog.go" - "!**/pkg/acquisition/modules/wineventlog/wineventlog_windows.go" @@ -118,7 +154,7 @@ linters-settings: arguments: [6] - name: function-length # lower this after refactoring - arguments: [110, 237] + arguments: [111, 238] - name: get-return disabled: true - name: increment-decrement @@ -183,23 +219,17 @@ linters-settings: - ifElseChain - importShadow - hugeParam - - rangeValCopy - commentedOutCode - commentedOutImport - unnamedResult - sloppyReassign - appendCombine - - captLocal - typeUnparen - commentFormatting - deferInLoop # - - sprintfQuotedString # - whyNoLint - equalFold # - unnecessaryBlock # - - ptrToRefParam # - - stringXbytes # - - appendAssign # - tooManyResultsChecker - unnecessaryDefer - docStub @@ -211,9 +241,7 @@ linters: # # DEPRECATED by golangi-lint # - - execinquery - exportloopref - - gomnd # # Redundant @@ -322,10 +350,6 @@ issues: - govet text: "shadow: declaration of \"(err|ctx)\" shadows declaration" - - linters: - - errcheck - text: "Error return value of `.*` is not checked" - # Will fix, trivial - just beware of merge conflicts - linters: @@ -340,14 +364,6 @@ issues: - errorlint text: "non-wrapping format verb for fmt.Errorf. Use `%w` to format errors" - - linters: - - errorlint - text: "type assertion on error will fail on wrapped errors. Use errors.As to check for specific errors" - - - linters: - - errorlint - text: "type switch on error will fail on wrapped errors. Use errors.As to check for specific errors" - - linters: - nosprintfhostport text: "host:port in url should be constructed with net.JoinHostPort and not directly with fmt.Sprintf" @@ -383,6 +399,11 @@ issues: path: pkg/acquisition/modules/loki/internal/lokiclient/loki_client.go text: "confusing-naming: Method 'QueryRange' differs only by capitalization to method 'queryRange' in the same source file" + - linters: + - revive + path: pkg/acquisition/modules/victorialogs/internal/vlclient/vl_client.go + text: "confusing-naming: Method 'QueryRange' differs only by capitalization to method 'queryRange' in the same source file" + - linters: - revive path: cmd/crowdsec-cli/copyfile.go @@ -409,12 +430,6 @@ issues: path: "pkg/(.+)_test.go" text: "line-length-limit: .*" - # tolerate deep exit in tests, for now - - linters: - - revive - path: "pkg/(.+)_test.go" - text: "deep-exit: .*" - # we use t,ctx instead of ctx,t in tests - linters: - revive @@ -429,30 +444,62 @@ issues: - linters: - revive - path: "cmd/crowdsec-cli/clihub/item_metrics.go" + path: "cmd/crowdsec/crowdsec.go" text: "deep-exit: .*" - linters: - revive - path: "cmd/crowdsec-cli/idgen/password.go" + path: "cmd/crowdsec/api.go" text: "deep-exit: .*" - linters: - revive - path: "pkg/leakybucket/overflows.go" + path: "cmd/crowdsec/win_service.go" text: "deep-exit: .*" - linters: - - revive - path: "cmd/crowdsec/crowdsec.go" - text: "deep-exit: .*" + - recvcheck + path: "pkg/csplugin/hclog_adapter.go" + text: 'the methods of "HCLogAdapter" use pointer receiver and non-pointer receiver.' + # encoding to json/yaml requires value receivers - linters: - - revive - path: "cmd/crowdsec/api.go" - text: "deep-exit: .*" + - recvcheck + path: "pkg/cwhub/item.go" + text: 'the methods of "Item" use pointer receiver and non-pointer receiver.' + + - linters: + - gocritic + path: "cmd/crowdsec-cli" + text: "rangeValCopy: .*" + + - linters: + - gocritic + path: "pkg/(cticlient|hubtest)" + text: "rangeValCopy: .*" + + - linters: + - gocritic + path: "(.+)_test.go" + text: "rangeValCopy: .*" + + - linters: + - gocritic + path: "pkg/(appsec|acquisition|dumps|alertcontext|leakybucket|exprhelpers)" + text: "rangeValCopy: .*" - linters: - revive - path: "cmd/crowdsec/win_service.go" - text: "deep-exit: .*" + path: "pkg/types/utils.go" + text: "argument-limit: .*" + + # need some cleanup first: to create db in memory and share the client, not the config + - linters: + - usetesting + path: "pkg/apiserver/(.+)_test.go" + text: "os.MkdirTemp.* could be replaced by t.TempDir.*" + + - linters: + - usetesting + path: "pkg/apiserver/(.+)_test.go" + text: "os.CreateTemp.* could be replaced by os.CreateTemp.*" diff --git a/Dockerfile b/Dockerfile index 880df88dc02..d368f0f6ede 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,5 +1,4 @@ -# vim: set ft=dockerfile: -FROM golang:1.23-alpine3.20 AS build +FROM docker.io/golang:1.23-alpine3.20 AS build ARG BUILD_VERSION @@ -31,7 +30,7 @@ RUN make clean release DOCKER_BUILD=1 BUILD_STATIC=1 CGO_CFLAGS="-D_LARGEFILE64_ # In case we need to remove agents here.. # cscli machines list -o json | yq '.[].machineId' | xargs -r cscli machines delete -FROM alpine:latest AS slim +FROM docker.io/alpine:latest AS slim RUN apk add --no-cache --repository=http://dl-cdn.alpinelinux.org/alpine/edge/community tzdata bash rsync && \ mkdir -p /staging/etc/crowdsec && \ diff --git a/Dockerfile.debian b/Dockerfile.debian index 5d47f167e99..a9b58c633ed 100644 --- a/Dockerfile.debian +++ b/Dockerfile.debian @@ -1,5 +1,4 @@ -# vim: set ft=dockerfile: -FROM golang:1.23-bookworm AS build +FROM docker.io/golang:1.23-bookworm AS build ARG BUILD_VERSION @@ -36,7 +35,7 @@ RUN make clean release DOCKER_BUILD=1 BUILD_STATIC=1 && \ # In case we need to remove agents here.. # cscli machines list -o json | yq '.[].machineId' | xargs -r cscli machines delete -FROM debian:bookworm-slim AS slim +FROM docker.io/debian:bookworm-slim AS slim ENV DEBIAN_FRONTEND=noninteractive ENV DEBCONF_NOWARNINGS="yes" diff --git a/Makefile b/Makefile index f8ae66e1cb6..93387488001 100644 --- a/Makefile +++ b/Makefile @@ -138,6 +138,7 @@ COMPONENTS := \ datasource_journalctl \ datasource_kinesis \ datasource_loki \ + datasource_victorialogs \ datasource_s3 \ datasource_syslog \ datasource_wineventlog \ diff --git a/README.md b/README.md index 1e57d4e91c4..dc6d3ee6806 100644 --- a/README.md +++ b/README.md @@ -8,83 +8,47 @@

- - - - -Go Reference - - - - - +

+_CrowdSec is an open-source and participative security solution offering crowdsourced server detection and protection against malicious IPs. Detect and block with our Security Engine, contribute to the network, and enjoy our real-time community blocklist._ +

-:computer: Console (WebApp) -:books: Documentation -:diamond_shape_with_a_dot_inside: Configuration Hub -:speech_balloon: Discourse (Forum) -:speech_balloon: Discord (Live Chat) +CrowdSec schema

+## Features & Advantages -:dancer: This is a community-driven project, we need your feedback. - -## +### Versatile Security Engine -CrowdSec is a free, modern & collaborative behavior detection engine, coupled with a global IP reputation network. It stacks on fail2ban's philosophy but is IPV6 compatible and 60x faster (Go vs Python), it uses Grok patterns to parse logs and YAML scenarios to identify behaviors. CrowdSec is engineered for modern Cloud / Containers / VM-based infrastructures (by decoupling detection and remediation). Once detected you can remedy threats with various bouncers (firewall block, nginx http 403, Captchas, etc.) while the aggressive IP can be sent to CrowdSec for curation before being shared among all users to further improve everyone's security. See [FAQ](https://doc.crowdsec.net/docs/faq) or read below for more. +[CrowdSec Security Engine](https://doc.crowdsec.net/docs/next/intro/) is an all-in-one [IDS/IPS](https://doc.crowdsec.net/docs/next/log_processor/intro) and [WAF](https://doc.crowdsec.net/docs/next/appsec/intro). -## 2 mins install +It detects bad behaviors by analyzing log sources and HTTP requests, and allows active remedation thanks to the [Remediation Components](https://doc.crowdsec.net/u/bouncers/intro). -Installing it through the [Package system](https://doc.crowdsec.net/docs/getting_started/install_crowdsec) of your OS is the easiest way to proceed. -Otherwise, you can install it from source. +[Detection rules are available on our hub](https://hub.crowdsec.net) under MIT license. -### From package (Debian) +### CrowdSec Community Blocklist -```sh -curl -s https://packagecloud.io/install/repositories/crowdsec/crowdsec/script.deb.sh | sudo bash -sudo apt-get update -sudo apt-get install crowdsec -``` + -### From package (rhel/centos/amazon linux) +The "Community Blocklist" is a curated list of IP addresses identified as malicious by CrowdSec. The Security Engine proactively block the IP addresses of this blocklist, preventing malevolent IPs from reaching your systems. -```sh -curl -s https://packagecloud.io/install/repositories/crowdsec/crowdsec/script.rpm.sh | sudo bash -sudo yum install crowdsec -``` +[![CrowdSec Community Blocklist](https://doc.crowdsec.net/assets/images/data_insights-1e7678f47cb672122cc847d068b6eadf.png)](https://doc.crowdsec.net/docs/next/central_api/community_blocklist) -### From package (FreeBSD) - -``` -sudo pkg update -sudo pkg install crowdsec -``` + -### From source +### Console - Monitoring & Automation of your security stack -```sh -wget https://github.com/crowdsecurity/crowdsec/releases/latest/download/crowdsec-release.tgz -tar xzvf crowdsec-release.tgz -cd crowdsec-v* && sudo ./wizard.sh -i -``` +[![CrowdSec Console](https://doc.crowdsec.net/assets/images/visualizer-summary-c8087e2eaef65d110bad6a7f274cf953.png)](https://doc.crowdsec.net/u/console/intro) -## :information_source: About the CrowdSec project +### Multiple Platforms support -Crowdsec is an open-source, lightweight software, detecting peers with aggressive behaviors to prevent them from accessing your systems. Its user-friendly design and assistance offer a low technical barrier of entry and nevertheless a high security gain. +[![Multiple Platforms support](https://github.com/crowdsecurity/crowdsec-docs/blob/main/crowdsec-docs/static/img/supported_platforms.png)](https://doc.crowdsec.net/) -The architecture is as follows : - -

- CrowdSec -

- -Once an unwanted behavior is detected, deal with it through a [bouncer](https://app.crowdsec.net/hub/remediation-components). The aggressive IP, scenario triggered and timestamp are sent for curation, to avoid poisoning & false positives. (This can be disabled). If verified, this IP is then redistributed to all CrowdSec users running the same scenario. ## Outnumbering hackers all together @@ -92,72 +56,18 @@ By sharing the threat they faced, all users are protecting each-others (hence th CrowdSec ships by default with scenarios (brute force, port scan, web scan, etc.) adapted for most contexts, but you can easily extend it by picking more of them from the **[HUB](https://hub.crowdsec.net)**. It is also easy to adapt an existing one or create one yourself. -## :point_right: What it is not - -CrowdSec is not a SIEM, storing your logs (neither locally nor remotely). Your data are analyzed locally and forgotten. - -Signals sent to the curation platform are limited to the very strict minimum: IP, Scenario, Timestamp. They are only used to allow the system to spot new rogue IPs, and rule out false positives or poisoning attempts. - -## :arrow_down: Install it ! - -Crowdsec is available for various platforms : - - - [Use our debian repositories](https://doc.crowdsec.net/docs/getting_started/install_crowdsec) or the [official debian packages](https://packages.debian.org/search?keywords=crowdsec&searchon=names&suite=stable§ion=all) - - An [image](https://hub.docker.com/r/crowdsecurity/crowdsec) is available for docker - - [Prebuilt release packages](https://github.com/crowdsecurity/crowdsec/releases) are also available (suitable for `amd64`) - - You can as well [build it from source](https://doc.crowdsec.net/docs/user_guides/building) - -Or look directly at [installation documentation](https://doc.crowdsec.net/docs/getting_started/install_crowdsec) for other methods and platforms. - -## :tada: Key benefits - -### Fast assisted installation, no technical barrier - -
- Initial configuration is automated, providing functional out-of-the-box setup - -
- -### Out of the box detection - -
- Baseline detection is effective out-of-the-box, no fine-tuning required (click to expand) - -
- -### Easy bouncer deployment - -
- It's trivial to add bouncers to enforce decisions of crowdsec (click to expand) - -
- -### Easy dashboard access - -
- It's easy to deploy a metabase interface to view your data simply with cscli (click to expand) - -
- -### Hot & Cold logs - -
- Process cold logs, for forensic, tests and chasing false positives & false negatives (click to expand) - -
- - -## 📦 About this repository - -This repository contains the code for the two main components of crowdsec : - - `crowdsec` : the daemon a-la-fail2ban that can read, parse, enrich and apply heuristics to logs. This is the component in charge of "detecting" the attacks - - `cscli` : the cli tool mainly used to interact with crowdsec : ban/unban/view current bans, enable/disable parsers and scenarios. +## Installation + -## Contributing +[Follow our documentation to install CrowdSec in a few minutes on Linux, Windows, Docker, OpnSense, Kubernetes, and more.](https://doc.crowdsec.net/) -If you wish to contribute to the core of crowdsec, you are welcome to open a PR in this repository. -If you wish to add a new parser, scenario or collection, please open a PR in the [hub repository](https://github.com/crowdsecurity/hub). +## Resources -If you wish to contribute to the documentation, please open a PR in the [documentation repository](http://github.com/crowdsecurity/crowdsec-docs). + - [Console](https://app.crowdsec.net): Supercharge your CrowdSec setup with visualization, management capabilities, extra blocklists and premium features. + - [Documentation](https://doc.crowdsec.net): Learn how to exploit your CrowdSec setup to deter more attacks. + - [Discord](https://discord.gg/crowdsec): A question or a suggestion? This is the place. + - [Hub](https://hub.crowdsec.net): Improve your stack protection, find the relevant remediation components for your infrastructure. + - [CrowdSec Academy](https://academy.crowdsec.net/): Learn and grow with our courses. + - [Corporate Website](https://crowdsec.net): For everything else. diff --git a/cmd/crowdsec-cli/clialert/alerts.go b/cmd/crowdsec-cli/clialert/alerts.go index 5907d4a0fa8..4ae72919a9e 100644 --- a/cmd/crowdsec-cli/clialert/alerts.go +++ b/cmd/crowdsec-cli/clialert/alerts.go @@ -78,7 +78,7 @@ func (cli *cliAlerts) alertsToTable(alerts *models.GetAlertsResponse, printMachi alertItem.Source.Cn, alertItem.Source.GetAsNumberName(), decisionsFromAlert(alertItem), - *alertItem.StartAt, + alertItem.CreatedAt, } if printMachine { row = append(row, alertItem.MachineID) diff --git a/cmd/crowdsec-cli/clialert/table.go b/cmd/crowdsec-cli/clialert/table.go index 1416e1e435c..4fe7c4b99c6 100644 --- a/cmd/crowdsec-cli/clialert/table.go +++ b/cmd/crowdsec-cli/clialert/table.go @@ -86,7 +86,7 @@ func alertDecisionsTable(out io.Writer, wantColor string, alert *models.Alert) { } if foundActive { - fmt.Printf(" - Active Decisions :\n") + t.Writer.SetTitle("Active Decisions") t.Render() // Send output } } diff --git a/cmd/crowdsec-cli/clibouncer/inspect.go b/cmd/crowdsec-cli/clibouncer/inspect.go index b62344baa9b..9f1d56124d8 100644 --- a/cmd/crowdsec-cli/clibouncer/inspect.go +++ b/cmd/crowdsec-cli/clibouncer/inspect.go @@ -47,7 +47,7 @@ func (cli *cliBouncers) inspectHuman(out io.Writer, bouncer *ent.Bouncer) { t.AppendRow(table.Row{"Feature Flags", ff}) } - io.WriteString(out, t.Render()+"\n") + fmt.Fprint(out, t.Render()) } func (cli *cliBouncers) inspect(bouncer *ent.Bouncer) error { diff --git a/cmd/crowdsec-cli/clibouncer/list.go b/cmd/crowdsec-cli/clibouncer/list.go index a13ca994e1e..4ed22ce752f 100644 --- a/cmd/crowdsec-cli/clibouncer/list.go +++ b/cmd/crowdsec-cli/clibouncer/list.go @@ -37,7 +37,7 @@ func (cli *cliBouncers) listHuman(out io.Writer, bouncers ent.Bouncers) { t.AppendRow(table.Row{b.Name, b.IPAddress, revoked, lastPull, b.Type, b.Version, b.AuthType}) } - io.WriteString(out, t.Render()+"\n") + fmt.Fprintln(out, t.Render()) } func (cli *cliBouncers) listCSV(out io.Writer, bouncers ent.Bouncers) error { @@ -71,7 +71,6 @@ func (cli *cliBouncers) listCSV(out io.Writer, bouncers ent.Bouncers) error { func (cli *cliBouncers) List(ctx context.Context, out io.Writer, db *database.Client) error { // XXX: must use the provided db object, the one in the struct might be nil // (calling List directly skips the PersistentPreRunE) - bouncers, err := db.ListBouncers(ctx) if err != nil { return fmt.Errorf("unable to list bouncers: %w", err) diff --git a/cmd/crowdsec-cli/clicapi/capi.go b/cmd/crowdsec-cli/clicapi/capi.go index 61d59836fdd..14637a26e1a 100644 --- a/cmd/crowdsec-cli/clicapi/capi.go +++ b/cmd/crowdsec-cli/clicapi/capi.go @@ -66,7 +66,12 @@ func (cli *cliCapi) register(ctx context.Context, capiUserPrefix string, outputF return fmt.Errorf("unable to generate machine id: %w", err) } - password := strfmt.Password(idgen.GeneratePassword(idgen.PasswordLength)) + pstr, err := idgen.GeneratePassword(idgen.PasswordLength) + if err != nil { + return err + } + + password := strfmt.Password(pstr) apiurl, err := url.Parse(types.CAPIBaseURL) if err != nil { @@ -118,7 +123,9 @@ func (cli *cliCapi) register(ctx context.Context, capiUserPrefix string, outputF fmt.Println(string(apiConfigDump)) } - log.Warning(reload.Message) + if msg := reload.UserMessage(); msg != "" { + log.Warning(msg) + } return nil } @@ -256,7 +263,7 @@ func (cli *cliCapi) newStatusCmd() *cobra.Command { Args: cobra.MinimumNArgs(0), DisableAutoGenTag: true, RunE: func(cmd *cobra.Command, _ []string) error { - hub, err := require.Hub(cli.cfg(), nil, nil) + hub, err := require.Hub(cli.cfg(), nil) if err != nil { return err } diff --git a/cmd/crowdsec-cli/cliconfig/backup.go b/cmd/crowdsec-cli/cliconfig/backup.go new file mode 100644 index 00000000000..5cd34fcf07f --- /dev/null +++ b/cmd/crowdsec-cli/cliconfig/backup.go @@ -0,0 +1,20 @@ +package cliconfig + +import ( + "fmt" + + "github.com/spf13/cobra" +) + +func (cli *cliConfig) newBackupCmd() *cobra.Command { + cmd := &cobra.Command{ + Use: "backup", + DisableAutoGenTag: true, + RunE: func(_ *cobra.Command, _ []string) error { + configDir := cli.cfg().ConfigPaths.ConfigDir + return fmt.Errorf("'cscli config backup' has been removed, you can manually backup/restore %s instead", configDir) + }, + } + + return cmd +} diff --git a/cmd/crowdsec-cli/config.go b/cmd/crowdsec-cli/cliconfig/config.go similarity index 58% rename from cmd/crowdsec-cli/config.go rename to cmd/crowdsec-cli/cliconfig/config.go index 4cf8916ad4b..22095ac7d5b 100644 --- a/cmd/crowdsec-cli/config.go +++ b/cmd/crowdsec-cli/cliconfig/config.go @@ -1,20 +1,26 @@ -package main +package cliconfig import ( "github.com/spf13/cobra" + + "github.com/crowdsecurity/crowdsec/pkg/csconfig" ) +type configGetter func() *csconfig.Config + +type mergedConfigGetter func() string + type cliConfig struct { cfg configGetter } -func NewCLIConfig(cfg configGetter) *cliConfig { +func New(cfg configGetter) *cliConfig { return &cliConfig{ cfg: cfg, } } -func (cli *cliConfig) NewCommand() *cobra.Command { +func (cli *cliConfig) NewCommand(mergedConfigGetter mergedConfigGetter) *cobra.Command { cmd := &cobra.Command{ Use: "config [command]", Short: "Allows to view current config", @@ -23,7 +29,7 @@ func (cli *cliConfig) NewCommand() *cobra.Command { } cmd.AddCommand(cli.newShowCmd()) - cmd.AddCommand(cli.newShowYAMLCmd()) + cmd.AddCommand(cli.newShowYAMLCmd(mergedConfigGetter)) cmd.AddCommand(cli.newBackupCmd()) cmd.AddCommand(cli.newRestoreCmd()) cmd.AddCommand(cli.newFeatureFlagsCmd()) diff --git a/cmd/crowdsec-cli/config_feature_flags.go b/cmd/crowdsec-cli/cliconfig/feature_flags.go similarity index 96% rename from cmd/crowdsec-cli/config_feature_flags.go rename to cmd/crowdsec-cli/cliconfig/feature_flags.go index 760e2194bb3..c03db10ccce 100644 --- a/cmd/crowdsec-cli/config_feature_flags.go +++ b/cmd/crowdsec-cli/cliconfig/feature_flags.go @@ -1,4 +1,4 @@ -package main +package cliconfig import ( "fmt" @@ -86,7 +86,7 @@ func (cli *cliConfig) featureFlags(showRetired bool) error { fmt.Println("To enable a feature you can: ") fmt.Println(" - set the environment variable CROWDSEC_FEATURE_ to true") - featurePath, err := filepath.Abs(csconfig.GetFeatureFilePath(ConfigFilePath)) + featurePath, err := filepath.Abs(csconfig.GetFeatureFilePath(cli.cfg().FilePath)) if err != nil { // we already read the file, shouldn't happen return err diff --git a/cmd/crowdsec-cli/cliconfig/restore.go b/cmd/crowdsec-cli/cliconfig/restore.go new file mode 100644 index 00000000000..d368b27ea30 --- /dev/null +++ b/cmd/crowdsec-cli/cliconfig/restore.go @@ -0,0 +1,20 @@ +package cliconfig + +import ( + "fmt" + + "github.com/spf13/cobra" +) + +func (cli *cliConfig) newRestoreCmd() *cobra.Command { + cmd := &cobra.Command{ + Use: "restore", + DisableAutoGenTag: true, + RunE: func(cmd *cobra.Command, _ []string) error { + configDir := cli.cfg().ConfigPaths.ConfigDir + return fmt.Errorf("'cscli config restore' has been removed, you can manually backup/restore %s instead", configDir) + }, + } + + return cmd +} diff --git a/cmd/crowdsec-cli/config_show.go b/cmd/crowdsec-cli/cliconfig/show.go similarity index 99% rename from cmd/crowdsec-cli/config_show.go rename to cmd/crowdsec-cli/cliconfig/show.go index 3d17d264574..90c0ab71069 100644 --- a/cmd/crowdsec-cli/config_show.go +++ b/cmd/crowdsec-cli/cliconfig/show.go @@ -1,4 +1,4 @@ -package main +package cliconfig import ( "encoding/json" diff --git a/cmd/crowdsec-cli/config_showyaml.go b/cmd/crowdsec-cli/cliconfig/showyaml.go similarity index 62% rename from cmd/crowdsec-cli/config_showyaml.go rename to cmd/crowdsec-cli/cliconfig/showyaml.go index 10549648d09..2e46a0171ab 100644 --- a/cmd/crowdsec-cli/config_showyaml.go +++ b/cmd/crowdsec-cli/cliconfig/showyaml.go @@ -1,4 +1,4 @@ -package main +package cliconfig import ( "fmt" @@ -6,19 +6,19 @@ import ( "github.com/spf13/cobra" ) -func (cli *cliConfig) showYAML() error { +func (cli *cliConfig) showYAML(mergedConfig string) error { fmt.Println(mergedConfig) return nil } -func (cli *cliConfig) newShowYAMLCmd() *cobra.Command { +func (cli *cliConfig) newShowYAMLCmd(mergedConfigGetter mergedConfigGetter) *cobra.Command { cmd := &cobra.Command{ Use: "show-yaml", Short: "Displays merged config.yaml + config.yaml.local", Args: cobra.NoArgs, DisableAutoGenTag: true, RunE: func(_ *cobra.Command, _ []string) error { - return cli.showYAML() + return cli.showYAML(mergedConfigGetter()) }, } diff --git a/cmd/crowdsec-cli/cliconsole/console.go b/cmd/crowdsec-cli/cliconsole/console.go index 448ddcee7fa..fcc128bd5b5 100644 --- a/cmd/crowdsec-cli/cliconsole/console.go +++ b/cmd/crowdsec-cli/cliconsole/console.go @@ -114,7 +114,7 @@ func (cli *cliConsole) enroll(ctx context.Context, key string, name string, over } } - hub, err := require.Hub(cfg, nil, nil) + hub, err := require.Hub(cfg, nil) if err != nil { return err } @@ -214,7 +214,9 @@ Enable given information push to the central API. Allows to empower the console` log.Infof("%v have been enabled", args) } - log.Info(reload.Message) + if reload.UserMessage() != "" { + log.Info(reload.UserMessage()) + } return nil }, @@ -248,7 +250,9 @@ Disable given information push to the central API.`, log.Infof("%v have been disabled", args) } - log.Info(reload.Message) + if msg := reload.UserMessage(); msg != "" { + log.Info(msg) + } return nil }, diff --git a/cmd/crowdsec-cli/clidecision/decisions.go b/cmd/crowdsec-cli/clidecision/decisions.go index 307cabffe51..b5865bab6e0 100644 --- a/cmd/crowdsec-cli/clidecision/decisions.go +++ b/cmd/crowdsec-cli/clidecision/decisions.go @@ -170,7 +170,7 @@ func (cli *cliDecisions) NewCommand() *cobra.Command { return cmd } -func (cli *cliDecisions) list(ctx context.Context, filter apiclient.AlertsListOpts, NoSimu *bool, contained *bool, printMachine bool) error { +func (cli *cliDecisions) list(ctx context.Context, filter apiclient.AlertsListOpts, noSimu *bool, contained *bool, printMachine bool) error { var err error *filter.ScopeEquals, err = clialert.SanitizeScope(*filter.ScopeEquals, *filter.IPEquals, *filter.RangeEquals) @@ -181,7 +181,7 @@ func (cli *cliDecisions) list(ctx context.Context, filter apiclient.AlertsListOp filter.ActiveDecisionEquals = new(bool) *filter.ActiveDecisionEquals = true - if NoSimu != nil && *NoSimu { + if noSimu != nil && *noSimu { filter.IncludeSimulated = new(bool) } /* nullify the empty entries to avoid bad filter */ diff --git a/cmd/crowdsec-cli/clidecision/table.go b/cmd/crowdsec-cli/clidecision/table.go index 189eb80b8e5..4beda572d8e 100644 --- a/cmd/crowdsec-cli/clidecision/table.go +++ b/cmd/crowdsec-cli/clidecision/table.go @@ -3,13 +3,17 @@ package clidecision import ( "io" "strconv" + "strings" + + "github.com/fatih/color" "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/cstable" "github.com/crowdsecurity/crowdsec/pkg/models" ) func (cli *cliDecisions) decisionsTable(out io.Writer, alerts *models.GetAlertsResponse, printMachine bool) { - t := cstable.New(out, cli.cfg().Cscli.Color) + wantColor := cli.cfg().Cscli.Color + t := cstable.New(out, wantColor) t.SetRowLines(false) header := []string{"ID", "Source", "Scope:Value", "Reason", "Action", "Country", "AS", "Events", "expiration", "Alert ID"} @@ -25,6 +29,11 @@ func (cli *cliDecisions) decisionsTable(out io.Writer, alerts *models.GetAlertsR *decisionItem.Type = "(simul)" + *decisionItem.Type } + duration := *decisionItem.Duration + if strings.HasPrefix(duration, "-") && wantColor != "no" { + duration = color.RedString(duration) + } + row := []string{ strconv.Itoa(int(decisionItem.ID)), *decisionItem.Origin, @@ -34,7 +43,7 @@ func (cli *cliDecisions) decisionsTable(out io.Writer, alerts *models.GetAlertsR alertItem.Source.Cn, alertItem.Source.GetAsNumberName(), strconv.Itoa(int(*alertItem.EventsCount)), - *decisionItem.Duration, + duration, strconv.Itoa(int(alertItem.ID)), } diff --git a/cmd/crowdsec-cli/clihub/hub.go b/cmd/crowdsec-cli/clihub/hub.go index f189d6a2e13..66fbe7c405a 100644 --- a/cmd/crowdsec-cli/clihub/hub.go +++ b/cmd/crowdsec-cli/clihub/hub.go @@ -5,15 +5,18 @@ import ( "encoding/json" "fmt" "io" + "os" "github.com/fatih/color" log "github.com/sirupsen/logrus" "github.com/spf13/cobra" "gopkg.in/yaml.v3" + "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/reload" "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/require" "github.com/crowdsecurity/crowdsec/pkg/csconfig" "github.com/crowdsecurity/crowdsec/pkg/cwhub" + "github.com/crowdsecurity/crowdsec/pkg/hubops" ) type configGetter = func() *csconfig.Config @@ -55,11 +58,11 @@ func (cli *cliHub) List(out io.Writer, hub *cwhub.Hub, all bool) error { cfg := cli.cfg() for _, v := range hub.Warnings { - log.Info(v) + fmt.Fprintln(os.Stderr, v) } for _, line := range hub.ItemStats() { - log.Info(line) + fmt.Fprintln(os.Stderr, line) } items := make(map[string][]*cwhub.Item) @@ -90,7 +93,7 @@ func (cli *cliHub) newListCmd() *cobra.Command { Args: cobra.NoArgs, DisableAutoGenTag: true, RunE: func(_ *cobra.Command, _ []string) error { - hub, err := require.Hub(cli.cfg(), nil, log.StandardLogger()) + hub, err := require.Hub(cli.cfg(), log.StandardLogger()) if err != nil { return err } @@ -100,23 +103,22 @@ func (cli *cliHub) newListCmd() *cobra.Command { } flags := cmd.Flags() - flags.BoolVarP(&all, "all", "a", false, "List disabled items as well") + flags.BoolVarP(&all, "all", "a", false, "List all available items, including those not installed") return cmd } func (cli *cliHub) update(ctx context.Context, withContent bool) error { local := cli.cfg().Hub - remote := require.RemoteHub(ctx, cli.cfg()) - remote.EmbedItemContent = withContent - // don't use require.Hub because if there is no index file, it would fail - hub, err := cwhub.NewHub(local, remote, log.StandardLogger()) + hub, err := cwhub.NewHub(local, log.StandardLogger()) if err != nil { return err } - if err := hub.Update(ctx); err != nil { + indexProvider := require.HubDownloader(ctx, cli.cfg()) + + if err := hub.Update(ctx, indexProvider, withContent); err != nil { return fmt.Errorf("failed to update hub: %w", err) } @@ -125,7 +127,7 @@ func (cli *cliHub) update(ctx context.Context, withContent bool) error { } for _, v := range hub.Warnings { - log.Info(v) + fmt.Fprintln(os.Stderr, v) } return nil @@ -140,10 +142,18 @@ func (cli *cliHub) newUpdateCmd() *cobra.Command { Long: ` Fetches the .index.json file from the hub, containing the list of available configs. `, + Example: `# Download the last version of the index file. +cscli hub update + +# Download a 4x bigger version with all item contents (effectively pre-caching item downloads, but not data files). +cscli hub update --with-content`, Args: cobra.NoArgs, DisableAutoGenTag: true, RunE: func(cmd *cobra.Command, _ []string) error { - return cli.update(cmd.Context(), withContent) + if cmd.Flags().Changed("with-content") { + return cli.update(cmd.Context(), withContent) + } + return cli.update(cmd.Context(), cli.cfg().Cscli.HubWithContent) }, } @@ -153,36 +163,49 @@ Fetches the .index.json file from the hub, containing the list of available conf return cmd } -func (cli *cliHub) upgrade(ctx context.Context, force bool) error { - hub, err := require.Hub(cli.cfg(), require.RemoteHub(ctx, cli.cfg()), log.StandardLogger()) +func (cli *cliHub) upgrade(ctx context.Context, yes bool, dryRun bool, force bool) error { + cfg := cli.cfg() + + hub, err := require.Hub(cfg, log.StandardLogger()) if err != nil { return err } - for _, itemType := range cwhub.ItemTypes { - updated := 0 + plan := hubops.NewActionPlan(hub) - log.Infof("Upgrading %s", itemType) + contentProvider := require.HubDownloader(ctx, cfg) + for _, itemType := range cwhub.ItemTypes { for _, item := range hub.GetInstalledByType(itemType, true) { - didUpdate, err := item.Upgrade(ctx, force) - if err != nil { + if err := plan.AddCommand(hubops.NewDownloadCommand(item, contentProvider, force)); err != nil { return err } - - if didUpdate { - updated++ - } } + } + + if err := plan.AddCommand(hubops.NewDataRefreshCommand(force)); err != nil { + return err + } - log.Infof("Upgraded %d %s", updated, itemType) + verbose := (cfg.Cscli.Output == "raw") + + if err := plan.Execute(ctx, yes, dryRun, verbose); err != nil { + return err + } + + if msg := reload.UserMessage(); msg != "" && plan.ReloadNeeded { + fmt.Println("\n" + msg) } return nil } func (cli *cliHub) newUpgradeCmd() *cobra.Command { - var force bool + var ( + yes bool + dryRun bool + force bool + ) cmd := &cobra.Command{ Use: "upgrade", @@ -190,15 +213,23 @@ func (cli *cliHub) newUpgradeCmd() *cobra.Command { Long: ` Upgrade all configs installed from Crowdsec Hub. Run 'sudo cscli hub update' if you want the latest versions available. `, + Example: `# Upgrade all the collections, scenarios etc. to the latest version in the downloaded index. Update data files too. +cscli hub upgrade + +# Upgrade tainted items as well; force re-download of data files. +cscli hub upgrade --force`, Args: cobra.NoArgs, DisableAutoGenTag: true, RunE: func(cmd *cobra.Command, _ []string) error { - return cli.upgrade(cmd.Context(), force) + return cli.upgrade(cmd.Context(), yes, dryRun, force) }, } flags := cmd.Flags() - flags.BoolVar(&force, "force", false, "Force upgrade: overwrite tainted and outdated files") + flags.BoolVar(&yes, "yes", false, "Confirm execution without prompt") + flags.BoolVar(&dryRun, "dry-run", false, "Don't install or remove anything; print the execution plan") + flags.BoolVar(&force, "force", false, "Force upgrade: overwrite tainted and outdated items; always update data files") + cmd.MarkFlagsMutuallyExclusive("yes", "dry-run") return cmd } diff --git a/cmd/crowdsec-cli/clihub/items.go b/cmd/crowdsec-cli/clihub/items.go index f86fe65a2a1..87cb10b1f93 100644 --- a/cmd/crowdsec-cli/clihub/items.go +++ b/cmd/crowdsec-cli/clihub/items.go @@ -5,13 +5,9 @@ import ( "encoding/json" "fmt" "io" - "os" - "path/filepath" "slices" "strings" - "gopkg.in/yaml.v3" - "github.com/crowdsecurity/crowdsec/pkg/cwhub" ) @@ -67,7 +63,7 @@ func ListItems(out io.Writer, wantColor string, itemTypes []string, items map[st continue } - listHubItemTable(out, wantColor, "\n"+strings.ToUpper(itemType), items[itemType]) + listHubItemTable(out, wantColor, strings.ToUpper(itemType), items[itemType]) nothingToDisplay = false } @@ -97,7 +93,7 @@ func ListItems(out io.Writer, wantColor string, itemTypes []string, items map[st Name: item.Name, LocalVersion: item.State.LocalVersion, LocalPath: item.State.LocalPath, - Description: item.Description, + Description: strings.TrimSpace(item.Description), Status: status, UTF8Status: fmt.Sprintf("%v %s", statusEmo, status), } @@ -109,7 +105,7 @@ func ListItems(out io.Writer, wantColor string, itemTypes []string, items map[st return fmt.Errorf("failed to parse: %w", err) } - out.Write(x) + fmt.Fprint(out, string(x)) case "raw": csvwriter := csv.NewWriter(out) @@ -128,7 +124,7 @@ func ListItems(out io.Writer, wantColor string, itemTypes []string, items map[st item.Name, item.State.Text(), item.State.LocalVersion, - item.Description, + strings.TrimSpace(item.Description), } if len(itemTypes) > 1 { row = append(row, itemType) @@ -145,42 +141,3 @@ func ListItems(out io.Writer, wantColor string, itemTypes []string, items map[st return nil } - -func InspectItem(item *cwhub.Item, wantMetrics bool, output string, prometheusURL string, wantColor string) error { - switch output { - case "human", "raw": - enc := yaml.NewEncoder(os.Stdout) - enc.SetIndent(2) - - if err := enc.Encode(item); err != nil { - return fmt.Errorf("unable to encode item: %w", err) - } - case "json": - b, err := json.MarshalIndent(*item, "", " ") - if err != nil { - return fmt.Errorf("unable to serialize item: %w", err) - } - - fmt.Print(string(b)) - } - - if output != "human" { - return nil - } - - if item.State.Tainted { - fmt.Println() - fmt.Printf(`This item is tainted. Use "%s %s inspect --diff %s" to see why.`, filepath.Base(os.Args[0]), item.Type, item.Name) - fmt.Println() - } - - if wantMetrics { - fmt.Printf("\nCurrent metrics: \n") - - if err := showMetrics(prometheusURL, item, wantColor); err != nil { - return err - } - } - - return nil -} diff --git a/cmd/crowdsec-cli/clihub/utils_table.go b/cmd/crowdsec-cli/clihub/utils_table.go index 98f14341b10..b89f8447896 100644 --- a/cmd/crowdsec-cli/clihub/utils_table.go +++ b/cmd/crowdsec-cli/clihub/utils_table.go @@ -3,7 +3,6 @@ package clihub import ( "fmt" "io" - "strconv" "github.com/jedib0t/go-pretty/v6/table" @@ -21,65 +20,6 @@ func listHubItemTable(out io.Writer, wantColor string, title string, items []*cw t.AppendRow(table.Row{item.Name, status, item.State.LocalVersion, item.State.LocalPath}) } - io.WriteString(out, title+"\n") - io.WriteString(out, t.Render()+"\n") -} - -func appsecMetricsTable(out io.Writer, wantColor string, itemName string, metrics map[string]int) { - t := cstable.NewLight(out, wantColor).Writer - t.AppendHeader(table.Row{"Inband Hits", "Outband Hits"}) - - t.AppendRow(table.Row{ - strconv.Itoa(metrics["inband_hits"]), - strconv.Itoa(metrics["outband_hits"]), - }) - - io.WriteString(out, fmt.Sprintf("\n - (AppSec Rule) %s:\n", itemName)) - io.WriteString(out, t.Render()+"\n") -} - -func scenarioMetricsTable(out io.Writer, wantColor string, itemName string, metrics map[string]int) { - if metrics["instantiation"] == 0 { - return - } - - t := cstable.New(out, wantColor).Writer - t.AppendHeader(table.Row{"Current Count", "Overflows", "Instantiated", "Poured", "Expired"}) - - t.AppendRow(table.Row{ - strconv.Itoa(metrics["curr_count"]), - strconv.Itoa(metrics["overflow"]), - strconv.Itoa(metrics["instantiation"]), - strconv.Itoa(metrics["pour"]), - strconv.Itoa(metrics["underflow"]), - }) - - io.WriteString(out, fmt.Sprintf("\n - (Scenario) %s:\n", itemName)) - io.WriteString(out, t.Render()+"\n") -} - -func parserMetricsTable(out io.Writer, wantColor string, itemName string, metrics map[string]map[string]int) { - t := cstable.New(out, wantColor).Writer - t.AppendHeader(table.Row{"Parsers", "Hits", "Parsed", "Unparsed"}) - - // don't show table if no hits - showTable := false - - for source, stats := range metrics { - if stats["hits"] > 0 { - t.AppendRow(table.Row{ - source, - strconv.Itoa(stats["hits"]), - strconv.Itoa(stats["parsed"]), - strconv.Itoa(stats["unparsed"]), - }) - - showTable = true - } - } - - if showTable { - io.WriteString(out, fmt.Sprintf("\n - (Parser) %s:\n", itemName)) - io.WriteString(out, t.Render()+"\n") - } + t.SetTitle(title) + fmt.Fprintln(out, t.Render()) } diff --git a/cmd/crowdsec-cli/clihubtest/explain.go b/cmd/crowdsec-cli/clihubtest/explain.go index dbe10fa7ec0..877aec98a37 100644 --- a/cmd/crowdsec-cli/clihubtest/explain.go +++ b/cmd/crowdsec-cli/clihubtest/explain.go @@ -14,9 +14,12 @@ func (cli *cliHubTest) explain(testName string, details bool, skipOk bool) error return fmt.Errorf("can't load test: %+v", err) } + cfg := cli.cfg() + patternDir := cfg.ConfigPaths.PatternDir + err = test.ParserAssert.LoadTest(test.ParserResultFile) if err != nil { - if err = test.Run(); err != nil { + if err = test.Run(patternDir); err != nil { return fmt.Errorf("running test '%s' failed: %+v", test.Name, err) } @@ -27,7 +30,7 @@ func (cli *cliHubTest) explain(testName string, details bool, skipOk bool) error err = test.ScenarioAssert.LoadTest(test.ScenarioResultFile, test.BucketPourResultFile) if err != nil { - if err = test.Run(); err != nil { + if err = test.Run(patternDir); err != nil { return fmt.Errorf("running test '%s' failed: %+v", test.Name, err) } diff --git a/cmd/crowdsec-cli/clihubtest/run.go b/cmd/crowdsec-cli/clihubtest/run.go index 31cceb81884..94a3b0c10f3 100644 --- a/cmd/crowdsec-cli/clihubtest/run.go +++ b/cmd/crowdsec-cli/clihubtest/run.go @@ -42,12 +42,14 @@ func (cli *cliHubTest) run(runAll bool, nucleiTargetHost string, appSecHost stri // set timezone to avoid DST issues os.Setenv("TZ", "UTC") + patternDir := cfg.ConfigPaths.PatternDir + for _, test := range hubPtr.Tests { if cfg.Cscli.Output == "human" { log.Infof("Running test '%s'", test.Name) } - err := test.Run() + err := test.Run(patternDir) if err != nil { log.Errorf("running test '%s' failed: %+v", test.Name, err) } diff --git a/cmd/crowdsec-cli/cliitem/appsec.go b/cmd/crowdsec-cli/cliitem/appsec.go deleted file mode 100644 index 44afa2133bd..00000000000 --- a/cmd/crowdsec-cli/cliitem/appsec.go +++ /dev/null @@ -1,123 +0,0 @@ -package cliitem - -import ( - "fmt" - "os" - - "golang.org/x/text/cases" - "golang.org/x/text/language" - "gopkg.in/yaml.v3" - - "github.com/crowdsecurity/crowdsec/pkg/appsec" - "github.com/crowdsecurity/crowdsec/pkg/appsec/appsec_rule" - "github.com/crowdsecurity/crowdsec/pkg/cwhub" -) - -func NewAppsecConfig(cfg configGetter) *cliItem { - return &cliItem{ - cfg: cfg, - name: cwhub.APPSEC_CONFIGS, - singular: "appsec-config", - oneOrMore: "appsec-config(s)", - help: cliHelp{ - example: `cscli appsec-configs list -a -cscli appsec-configs install crowdsecurity/vpatch -cscli appsec-configs inspect crowdsecurity/vpatch -cscli appsec-configs upgrade crowdsecurity/vpatch -cscli appsec-configs remove crowdsecurity/vpatch -`, - }, - installHelp: cliHelp{ - example: `cscli appsec-configs install crowdsecurity/vpatch`, - }, - removeHelp: cliHelp{ - example: `cscli appsec-configs remove crowdsecurity/vpatch`, - }, - upgradeHelp: cliHelp{ - example: `cscli appsec-configs upgrade crowdsecurity/vpatch`, - }, - inspectHelp: cliHelp{ - example: `cscli appsec-configs inspect crowdsecurity/vpatch`, - }, - listHelp: cliHelp{ - example: `cscli appsec-configs list -cscli appsec-configs list -a -cscli appsec-configs list crowdsecurity/vpatch`, - }, - } -} - -func NewAppsecRule(cfg configGetter) *cliItem { - inspectDetail := func(item *cwhub.Item) error { - // Only show the converted rules in human mode - if cfg().Cscli.Output != "human" { - return nil - } - - appsecRule := appsec.AppsecCollectionConfig{} - - yamlContent, err := os.ReadFile(item.State.LocalPath) - if err != nil { - return fmt.Errorf("unable to read file %s: %w", item.State.LocalPath, err) - } - - if err := yaml.Unmarshal(yamlContent, &appsecRule); err != nil { - return fmt.Errorf("unable to parse yaml file %s: %w", item.State.LocalPath, err) - } - - for _, ruleType := range appsec_rule.SupportedTypes() { - fmt.Printf("\n%s format:\n", cases.Title(language.Und, cases.NoLower).String(ruleType)) - - for _, rule := range appsecRule.Rules { - convertedRule, _, err := rule.Convert(ruleType, appsecRule.Name) - if err != nil { - return fmt.Errorf("unable to convert rule %s: %w", rule.Name, err) - } - - fmt.Println(convertedRule) - } - - switch ruleType { //nolint:gocritic - case appsec_rule.ModsecurityRuleType: - for _, rule := range appsecRule.SecLangRules { - fmt.Println(rule) - } - } - } - - return nil - } - - return &cliItem{ - cfg: cfg, - name: "appsec-rules", - singular: "appsec-rule", - oneOrMore: "appsec-rule(s)", - help: cliHelp{ - example: `cscli appsec-rules list -a -cscli appsec-rules install crowdsecurity/crs -cscli appsec-rules inspect crowdsecurity/crs -cscli appsec-rules upgrade crowdsecurity/crs -cscli appsec-rules remove crowdsecurity/crs -`, - }, - installHelp: cliHelp{ - example: `cscli appsec-rules install crowdsecurity/crs`, - }, - removeHelp: cliHelp{ - example: `cscli appsec-rules remove crowdsecurity/crs`, - }, - upgradeHelp: cliHelp{ - example: `cscli appsec-rules upgrade crowdsecurity/crs`, - }, - inspectHelp: cliHelp{ - example: `cscli appsec-rules inspect crowdsecurity/crs`, - }, - inspectDetail: inspectDetail, - listHelp: cliHelp{ - example: `cscli appsec-rules list -cscli appsec-rules list -a -cscli appsec-rules list crowdsecurity/crs`, - }, - } -} diff --git a/cmd/crowdsec-cli/cliitem/cmdinspect.go b/cmd/crowdsec-cli/cliitem/cmdinspect.go new file mode 100644 index 00000000000..b5ee0816d72 --- /dev/null +++ b/cmd/crowdsec-cli/cliitem/cmdinspect.go @@ -0,0 +1,236 @@ +package cliitem + +import ( + "cmp" + "context" + "encoding/json" + "errors" + "fmt" + "os" + "path/filepath" + "strings" + + "github.com/hexops/gotextdiff" + "github.com/hexops/gotextdiff/myers" + "github.com/hexops/gotextdiff/span" + log "github.com/sirupsen/logrus" + "github.com/spf13/cobra" + "gopkg.in/yaml.v3" + + "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/require" + "github.com/crowdsecurity/crowdsec/pkg/cwhub" +) + +func (cli cliItem) inspect(ctx context.Context, args []string, url string, diff bool, rev bool, noMetrics bool) error { + cfg := cli.cfg() + + if rev && !diff { + return errors.New("--rev can only be used with --diff") + } + + if url != "" { + cfg.Cscli.PrometheusUrl = url + } + + var contentProvider cwhub.ContentProvider + + if diff { + contentProvider = require.HubDownloader(ctx, cfg) + } + + hub, err := require.Hub(cfg, log.StandardLogger()) + if err != nil { + return err + } + + for _, name := range args { + item := hub.GetItem(cli.name, name) + if item == nil { + return fmt.Errorf("can't find '%s' in %s", name, cli.name) + } + + if diff { + fmt.Println(cli.whyTainted(ctx, hub, contentProvider, item, rev)) + + continue + } + + if err = inspectItem(hub, item, !noMetrics, cfg.Cscli.Output, cfg.Cscli.PrometheusUrl, cfg.Cscli.Color); err != nil { + return err + } + + if cli.inspectDetail != nil { + if err = cli.inspectDetail(item); err != nil { + return err + } + } + } + + return nil +} + +// return the diff between the installed version and the latest version +func (cli cliItem) itemDiff(ctx context.Context, item *cwhub.Item, contentProvider cwhub.ContentProvider, reverse bool) (string, error) { + if !item.State.Installed { + return "", fmt.Errorf("'%s' is not installed", item.FQName()) + } + + dest, err := os.CreateTemp("", "cscli-diff-*") + if err != nil { + return "", fmt.Errorf("while creating temporary file: %w", err) + } + defer os.Remove(dest.Name()) + + _, remoteURL, err := item.FetchContentTo(ctx, contentProvider, dest.Name()) + if err != nil { + return "", err + } + + latestContent, err := os.ReadFile(dest.Name()) + if err != nil { + return "", fmt.Errorf("while reading %s: %w", dest.Name(), err) + } + + localContent, err := os.ReadFile(item.State.LocalPath) + if err != nil { + return "", fmt.Errorf("while reading %s: %w", item.State.LocalPath, err) + } + + file1 := item.State.LocalPath + file2 := remoteURL + content1 := string(localContent) + content2 := string(latestContent) + + if reverse { + file1, file2 = file2, file1 + content1, content2 = content2, content1 + } + + edits := myers.ComputeEdits(span.URIFromPath(file1), content1, content2) + diff := gotextdiff.ToUnified(file1, file2, content1, edits) + + return fmt.Sprintf("%s", diff), nil +} + +func (cli cliItem) whyTainted(ctx context.Context, hub *cwhub.Hub, contentProvider cwhub.ContentProvider, item *cwhub.Item, reverse bool) string { + if !item.State.Installed { + return fmt.Sprintf("# %s is not installed", item.FQName()) + } + + if !item.State.Tainted { + return fmt.Sprintf("# %s is not tainted", item.FQName()) + } + + if len(item.State.TaintedBy) == 0 { + return fmt.Sprintf("# %s is tainted but we don't know why. please report this as a bug", item.FQName()) + } + + ret := []string{ + fmt.Sprintf("# Let's see why %s is tainted.", item.FQName()), + } + + for _, fqsub := range item.State.TaintedBy { + ret = append(ret, fmt.Sprintf("\n-> %s\n", fqsub)) + + sub, err := hub.GetItemFQ(fqsub) + if err != nil { + ret = append(ret, err.Error()) + } + + diff, err := cli.itemDiff(ctx, sub, contentProvider, reverse) + if err != nil { + ret = append(ret, err.Error()) + } + + if diff != "" { + ret = append(ret, diff) + } else if len(sub.State.TaintedBy) > 0 { + taintList := strings.Join(sub.State.TaintedBy, ", ") + if sub.FQName() == taintList { + // hack: avoid message "item is tainted by itself" + continue + } + + ret = append(ret, fmt.Sprintf("# %s is tainted by %s", sub.FQName(), taintList)) + } + } + + return strings.Join(ret, "\n") +} + +func (cli cliItem) newInspectCmd() *cobra.Command { + var ( + url string + diff bool + rev bool + noMetrics bool + ) + + cmd := &cobra.Command{ + Use: cmp.Or(cli.inspectHelp.use, "inspect [item]..."), + Short: cmp.Or(cli.inspectHelp.short, "Inspect given "+cli.oneOrMore), + Long: cmp.Or(cli.inspectHelp.long, "Inspect the state of one or more "+cli.name), + Example: cli.inspectHelp.example, + Args: cobra.MinimumNArgs(1), + DisableAutoGenTag: true, + ValidArgsFunction: func(_ *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { + return compInstalledItems(cli.name, args, toComplete, cli.cfg) + }, + RunE: func(cmd *cobra.Command, args []string) error { + return cli.inspect(cmd.Context(), args, url, diff, rev, noMetrics) + }, + } + + flags := cmd.Flags() + flags.StringVarP(&url, "url", "u", "", "Prometheus url") + flags.BoolVar(&diff, "diff", false, "Show diff with latest version (for tainted items)") + flags.BoolVar(&rev, "rev", false, "Reverse diff output") + flags.BoolVar(&noMetrics, "no-metrics", false, "Don't show metrics (when cscli.output=human)") + + return cmd +} + +func inspectItem(hub *cwhub.Hub, item *cwhub.Item, wantMetrics bool, output string, prometheusURL string, wantColor string) error { + // This is dirty... + // We want to show current dependencies (from content), not latest (from index). + // The item is modifed but after this function the whole hub should be thrown away. + // A cleaner way would be to copy the struct first. + item.Dependencies = item.CurrentDependencies() + + switch output { + case "human", "raw": + enc := yaml.NewEncoder(os.Stdout) + enc.SetIndent(2) + + if err := enc.Encode(item); err != nil { + return fmt.Errorf("unable to encode item: %w", err) + } + case "json": + b, err := json.MarshalIndent(*item, "", " ") + if err != nil { + return fmt.Errorf("unable to serialize item: %w", err) + } + + fmt.Print(string(b)) + } + + if output != "human" { + return nil + } + + if item.State.Tainted { + fmt.Println() + fmt.Printf(`This item is tainted. Use "%s %s inspect --diff %s" to see why.`, filepath.Base(os.Args[0]), item.Type, item.Name) + fmt.Println() + } + + if wantMetrics { + fmt.Printf("\nCurrent metrics: \n") + + if err := showMetrics(prometheusURL, hub, item, wantColor); err != nil { + return err + } + } + + return nil +} diff --git a/cmd/crowdsec-cli/cliitem/cmdinstall.go b/cmd/crowdsec-cli/cliitem/cmdinstall.go new file mode 100644 index 00000000000..74ffbe727f4 --- /dev/null +++ b/cmd/crowdsec-cli/cliitem/cmdinstall.go @@ -0,0 +1,150 @@ +package cliitem + +import ( + "cmp" + "context" + "errors" + "fmt" + "slices" + "strings" + + "github.com/agext/levenshtein" + log "github.com/sirupsen/logrus" + "github.com/spf13/cobra" + + "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/reload" + "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/require" + "github.com/crowdsecurity/crowdsec/pkg/cwhub" + "github.com/crowdsecurity/crowdsec/pkg/hubops" +) + +// suggestNearestMessage returns a message with the most similar item name, if one is found +func suggestNearestMessage(hub *cwhub.Hub, itemType string, itemName string) string { + const maxDistance = 7 + + score := 100 + nearest := "" + + for _, item := range hub.GetItemsByType(itemType, false) { + d := levenshtein.Distance(itemName, item.Name, nil) + if d < score { + score = d + nearest = item.Name + } + } + + msg := fmt.Sprintf("can't find '%s' in %s", itemName, itemType) + + if score < maxDistance { + msg += fmt.Sprintf(", did you mean '%s'?", nearest) + } + + return msg +} + +func (cli cliItem) install(ctx context.Context, args []string, yes bool, dryRun bool, downloadOnly bool, force bool, ignoreError bool) error { + cfg := cli.cfg() + + hub, err := require.Hub(cfg, log.StandardLogger()) + if err != nil { + return err + } + + plan := hubops.NewActionPlan(hub) + + contentProvider := require.HubDownloader(ctx, cfg) + + for _, name := range args { + item := hub.GetItem(cli.name, name) + if item == nil { + msg := suggestNearestMessage(hub, cli.name, name) + if !ignoreError { + return errors.New(msg) + } + + log.Error(msg) + + continue + } + + if err = plan.AddCommand(hubops.NewDownloadCommand(item, contentProvider, force)); err != nil { + return err + } + + if !downloadOnly { + if err = plan.AddCommand(hubops.NewEnableCommand(item, force)); err != nil { + return err + } + } + } + + verbose := (cfg.Cscli.Output == "raw") + + if err := plan.Execute(ctx, yes, dryRun, verbose); err != nil { + if !ignoreError { + return err + } + + log.Error(err) + } + + if msg := reload.UserMessage(); msg != "" && plan.ReloadNeeded { + fmt.Println("\n" + msg) + } + + return nil +} + +func compAllItems(itemType string, args []string, toComplete string, cfg configGetter) ([]string, cobra.ShellCompDirective) { + hub, err := require.Hub(cfg(), nil) + if err != nil { + return nil, cobra.ShellCompDirectiveDefault + } + + comp := make([]string, 0) + + for _, item := range hub.GetItemsByType(itemType, false) { + if !slices.Contains(args, item.Name) && strings.Contains(item.Name, toComplete) { + comp = append(comp, item.Name) + } + } + + cobra.CompDebugln(fmt.Sprintf("%s: %+v", itemType, comp), true) + + return comp, cobra.ShellCompDirectiveNoFileComp +} + +func (cli cliItem) newInstallCmd() *cobra.Command { + var ( + yes bool + dryRun bool + downloadOnly bool + force bool + ignoreError bool + ) + + cmd := &cobra.Command{ + Use: cmp.Or(cli.installHelp.use, "install [item]..."), + Short: cmp.Or(cli.installHelp.short, "Install given "+cli.oneOrMore), + Long: cmp.Or(cli.installHelp.long, fmt.Sprintf("Fetch and install one or more %s from the hub", cli.name)), + Example: cli.installHelp.example, + Args: cobra.MinimumNArgs(1), + DisableAutoGenTag: true, + ValidArgsFunction: func(_ *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { + return compAllItems(cli.name, args, toComplete, cli.cfg) + }, + RunE: func(cmd *cobra.Command, args []string) error { + return cli.install(cmd.Context(), args, yes, dryRun, downloadOnly, force, ignoreError) + }, + } + + flags := cmd.Flags() + flags.BoolVarP(&yes, "yes", "y", false, "Confirm execution without prompt") + flags.BoolVar(&dryRun, "dry-run", false, "Don't install or remove anything; print the execution plan") + flags.BoolVarP(&downloadOnly, "download-only", "d", false, "Only download packages, don't enable") + flags.BoolVar(&force, "force", false, "Force install: overwrite tainted and outdated files") + flags.BoolVar(&ignoreError, "ignore", false, "Ignore errors when installing multiple "+cli.name) + cmd.MarkFlagsMutuallyExclusive("yes", "dry-run") + + return cmd +} diff --git a/cmd/crowdsec-cli/cliitem/cmdremove.go b/cmd/crowdsec-cli/cliitem/cmdremove.go new file mode 100644 index 00000000000..c8ea041acbf --- /dev/null +++ b/cmd/crowdsec-cli/cliitem/cmdremove.go @@ -0,0 +1,151 @@ +package cliitem + +import ( + "cmp" + "context" + "errors" + "fmt" + + log "github.com/sirupsen/logrus" + "github.com/spf13/cobra" + + "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/reload" + "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/require" + "github.com/crowdsecurity/crowdsec/pkg/cwhub" + "github.com/crowdsecurity/crowdsec/pkg/hubops" +) + +func (cli cliItem) removePlan(hub *cwhub.Hub, args []string, purge bool, force bool, all bool) (*hubops.ActionPlan, error) { + plan := hubops.NewActionPlan(hub) + + if all { + itemGetter := hub.GetInstalledByType + if purge { + itemGetter = hub.GetItemsByType + } + + for _, item := range itemGetter(cli.name, true) { + if err := plan.AddCommand(hubops.NewDisableCommand(item, force)); err != nil { + return nil, err + } + + if purge { + if err := plan.AddCommand(hubops.NewPurgeCommand(item, force)); err != nil { + return nil, err + } + } + } + + return plan, nil + } + + if len(args) == 0 { + return nil, fmt.Errorf("specify at least one %s to remove or '--all'", cli.singular) + } + + for _, itemName := range args { + item := hub.GetItem(cli.name, itemName) + if item == nil { + return nil, fmt.Errorf("can't find '%s' in %s", itemName, cli.name) + } + + parents := installedParentNames(item) + + if !force && len(parents) > 0 { + log.Warningf("%s belongs to collections: %s", item.Name, parents) + log.Warningf("Run 'sudo cscli %s remove %s --force' if you want to force remove this %s", item.Type, item.Name, cli.singular) + + continue + } + + if err := plan.AddCommand(hubops.NewDisableCommand(item, force)); err != nil { + return nil, err + } + + if purge { + if err := plan.AddCommand(hubops.NewPurgeCommand(item, force)); err != nil { + return nil, err + } + } + } + + return plan, nil +} + +// return the names of the installed parents of an item, used to check if we can remove it +func installedParentNames(item *cwhub.Item) []string { + ret := make([]string, 0) + + for _, parent := range item.Ancestors() { + if parent.State.Installed { + ret = append(ret, parent.Name) + } + } + + return ret +} + +func (cli cliItem) remove(ctx context.Context, args []string, yes bool, dryRun bool, purge bool, force bool, all bool) error { + cfg := cli.cfg() + + hub, err := require.Hub(cli.cfg(), log.StandardLogger()) + if err != nil { + return err + } + + plan, err := cli.removePlan(hub, args, purge, force, all) + if err != nil { + return err + } + + verbose := (cfg.Cscli.Output == "raw") + + if err := plan.Execute(ctx, yes, dryRun, verbose); err != nil { + return err + } + + if msg := reload.UserMessage(); msg != "" && plan.ReloadNeeded { + fmt.Println("\n" + msg) + } + + return nil +} + +func (cli cliItem) newRemoveCmd() *cobra.Command { + var ( + yes bool + dryRun bool + purge bool + force bool + all bool + ) + + cmd := &cobra.Command{ + Use: cmp.Or(cli.removeHelp.use, "remove [item]..."), + Short: cmp.Or(cli.removeHelp.short, "Remove given "+cli.oneOrMore), + Long: cmp.Or(cli.removeHelp.long, "Remove one or more "+cli.name), + Example: cli.removeHelp.example, + Aliases: []string{"delete"}, + DisableAutoGenTag: true, + ValidArgsFunction: func(_ *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { + return compInstalledItems(cli.name, args, toComplete, cli.cfg) + }, + RunE: func(cmd *cobra.Command, args []string) error { + if len(args) > 0 && all { + return errors.New("can't specify items and '--all' at the same time") + } + + return cli.remove(cmd.Context(), args, yes, dryRun, purge, force, all) + }, + } + + flags := cmd.Flags() + flags.BoolVarP(&yes, "yes", "y", false, "Confirm execution without prompt") + flags.BoolVar(&dryRun, "dry-run", false, "Don't install or remove anything; print the execution plan") + flags.BoolVar(&purge, "purge", false, "Delete source file too") + flags.BoolVar(&force, "force", false, "Force remove: remove tainted and outdated files") + flags.BoolVar(&all, "all", false, "Remove all the "+cli.name) + cmd.MarkFlagsMutuallyExclusive("yes", "dry-run") + + return cmd +} diff --git a/cmd/crowdsec-cli/cliitem/cmdupgrade.go b/cmd/crowdsec-cli/cliitem/cmdupgrade.go new file mode 100644 index 00000000000..5320bc04bc6 --- /dev/null +++ b/cmd/crowdsec-cli/cliitem/cmdupgrade.go @@ -0,0 +1,106 @@ +package cliitem + +import ( + "cmp" + "context" + "fmt" + + log "github.com/sirupsen/logrus" + "github.com/spf13/cobra" + + "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/reload" + "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/require" + "github.com/crowdsecurity/crowdsec/pkg/cwhub" + "github.com/crowdsecurity/crowdsec/pkg/hubops" +) + +func (cli cliItem) upgradePlan(hub *cwhub.Hub, contentProvider cwhub.ContentProvider, args []string, force bool, all bool) (*hubops.ActionPlan, error) { + plan := hubops.NewActionPlan(hub) + + if all { + for _, item := range hub.GetInstalledByType(cli.name, true) { + if err := plan.AddCommand(hubops.NewDownloadCommand(item, contentProvider, force)); err != nil { + return nil, err + } + } + + return plan, nil + } + + if len(args) == 0 { + return nil, fmt.Errorf("specify at least one %s to upgrade or '--all'", cli.singular) + } + + for _, itemName := range args { + item := hub.GetItem(cli.name, itemName) + if item == nil { + return nil, fmt.Errorf("can't find '%s' in %s", itemName, cli.name) + } + + if err := plan.AddCommand(hubops.NewDownloadCommand(item, contentProvider, force)); err != nil { + return nil, err + } + } + + return plan, nil +} + +func (cli cliItem) upgrade(ctx context.Context, args []string, yes bool, dryRun bool, force bool, all bool) error { + cfg := cli.cfg() + + hub, err := require.Hub(cfg, log.StandardLogger()) + if err != nil { + return err + } + + contentProvider := require.HubDownloader(ctx, cfg) + + plan, err := cli.upgradePlan(hub, contentProvider, args, force, all) + if err != nil { + return err + } + + verbose := (cfg.Cscli.Output == "raw") + + if err := plan.Execute(ctx, yes, dryRun, verbose); err != nil { + return err + } + + if msg := reload.UserMessage(); msg != "" && plan.ReloadNeeded { + fmt.Println("\n" + msg) + } + + return nil +} + +func (cli cliItem) newUpgradeCmd() *cobra.Command { + var ( + yes bool + dryRun bool + all bool + force bool + ) + + cmd := &cobra.Command{ + Use: cmp.Or(cli.upgradeHelp.use, "upgrade [item]..."), + Short: cmp.Or(cli.upgradeHelp.short, "Upgrade given "+cli.oneOrMore), + Long: cmp.Or(cli.upgradeHelp.long, fmt.Sprintf("Fetch and upgrade one or more %s from the hub", cli.name)), + Example: cli.upgradeHelp.example, + DisableAutoGenTag: true, + ValidArgsFunction: func(_ *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { + return compInstalledItems(cli.name, args, toComplete, cli.cfg) + }, + RunE: func(cmd *cobra.Command, args []string) error { + return cli.upgrade(cmd.Context(), args, yes, dryRun, force, all) + }, + } + + flags := cmd.Flags() + flags.BoolVarP(&yes, "yes", "y", false, "Confirm execution without prompt") + flags.BoolVar(&dryRun, "dry-run", false, "Don't install or remove anything; print the execution plan") + flags.BoolVarP(&all, "all", "a", false, "Upgrade all the "+cli.name) + flags.BoolVar(&force, "force", false, "Force upgrade: overwrite tainted and outdated files") + cmd.MarkFlagsMutuallyExclusive("yes", "dry-run") + + return cmd +} diff --git a/cmd/crowdsec-cli/cliitem/collection.go b/cmd/crowdsec-cli/cliitem/collection.go deleted file mode 100644 index ea91c1e537a..00000000000 --- a/cmd/crowdsec-cli/cliitem/collection.go +++ /dev/null @@ -1,41 +0,0 @@ -package cliitem - -import ( - "github.com/crowdsecurity/crowdsec/pkg/cwhub" -) - -func NewCollection(cfg configGetter) *cliItem { - return &cliItem{ - cfg: cfg, - name: cwhub.COLLECTIONS, - singular: "collection", - oneOrMore: "collection(s)", - help: cliHelp{ - example: `cscli collections list -a -cscli collections install crowdsecurity/http-cve crowdsecurity/iptables -cscli collections inspect crowdsecurity/http-cve crowdsecurity/iptables -cscli collections upgrade crowdsecurity/http-cve crowdsecurity/iptables -cscli collections remove crowdsecurity/http-cve crowdsecurity/iptables -`, - }, - installHelp: cliHelp{ - example: `cscli collections install crowdsecurity/http-cve crowdsecurity/iptables`, - }, - removeHelp: cliHelp{ - example: `cscli collections remove crowdsecurity/http-cve crowdsecurity/iptables`, - }, - upgradeHelp: cliHelp{ - example: `cscli collections upgrade crowdsecurity/http-cve crowdsecurity/iptables`, - }, - inspectHelp: cliHelp{ - example: `cscli collections inspect crowdsecurity/http-cve crowdsecurity/iptables`, - }, - listHelp: cliHelp{ - example: `cscli collections list -cscli collections list -a -cscli collections list crowdsecurity/http-cve crowdsecurity/iptables - -List only enabled collections unless "-a" or names are specified.`, - }, - } -} diff --git a/cmd/crowdsec-cli/cliitem/context.go b/cmd/crowdsec-cli/cliitem/context.go deleted file mode 100644 index 7d110b8203d..00000000000 --- a/cmd/crowdsec-cli/cliitem/context.go +++ /dev/null @@ -1,41 +0,0 @@ -package cliitem - -import ( - "github.com/crowdsecurity/crowdsec/pkg/cwhub" -) - -func NewContext(cfg configGetter) *cliItem { - return &cliItem{ - cfg: cfg, - name: cwhub.CONTEXTS, - singular: "context", - oneOrMore: "context(s)", - help: cliHelp{ - example: `cscli contexts list -a -cscli contexts install crowdsecurity/yyy crowdsecurity/zzz -cscli contexts inspect crowdsecurity/yyy crowdsecurity/zzz -cscli contexts upgrade crowdsecurity/yyy crowdsecurity/zzz -cscli contexts remove crowdsecurity/yyy crowdsecurity/zzz -`, - }, - installHelp: cliHelp{ - example: `cscli contexts install crowdsecurity/yyy crowdsecurity/zzz`, - }, - removeHelp: cliHelp{ - example: `cscli contexts remove crowdsecurity/yyy crowdsecurity/zzz`, - }, - upgradeHelp: cliHelp{ - example: `cscli contexts upgrade crowdsecurity/yyy crowdsecurity/zzz`, - }, - inspectHelp: cliHelp{ - example: `cscli contexts inspect crowdsecurity/yyy crowdsecurity/zzz`, - }, - listHelp: cliHelp{ - example: `cscli contexts list -cscli contexts list -a -cscli contexts list crowdsecurity/yyy crowdsecurity/zzz - -List only enabled contexts unless "-a" or names are specified.`, - }, - } -} diff --git a/cmd/crowdsec-cli/cliitem/hubappsec.go b/cmd/crowdsec-cli/cliitem/hubappsec.go new file mode 100644 index 00000000000..7f9143d35b8 --- /dev/null +++ b/cmd/crowdsec-cli/cliitem/hubappsec.go @@ -0,0 +1,255 @@ +package cliitem + +import ( + "fmt" + "os" + + "golang.org/x/text/cases" + "golang.org/x/text/language" + "gopkg.in/yaml.v3" + + "github.com/crowdsecurity/crowdsec/pkg/appsec" + "github.com/crowdsecurity/crowdsec/pkg/appsec/appsec_rule" + "github.com/crowdsecurity/crowdsec/pkg/cwhub" +) + +func NewAppsecConfig(cfg configGetter) *cliItem { + return &cliItem{ + cfg: cfg, + name: cwhub.APPSEC_CONFIGS, + singular: "appsec-config", + oneOrMore: "appsec-config(s)", + help: cliHelp{ + example: `cscli appsec-configs list -a +cscli appsec-configs install crowdsecurity/virtual-patching +cscli appsec-configs inspect crowdsecurity/virtual-patching +cscli appsec-configs upgrade crowdsecurity/virtual-patching +cscli appsec-configs remove crowdsecurity/virtual-patching +`, + }, + installHelp: cliHelp{ + example: `# Install some appsec-configs. +cscli appsec-configs install crowdsecurity/virtual-patching + +# Show the execution plan without changing anything - compact output sorted by type and name. +cscli appsec-configs install crowdsecurity/virtual-patching --dry-run + +# Show the execution plan without changing anything - verbose output sorted by execution order. +cscli appsec-configs install crowdsecurity/virtual-patching --dry-run -o raw + +# Download only, to be installed later. +cscli appsec-configs install crowdsecurity/virtual-patching --download-only + +# Install over tainted items. Can be used to restore or repair after local modifications or missing dependencies. +cscli appsec-configs install crowdsecurity/virtual-patching --force + +# Proceed without prompting. +cscli appsec-configs install crowdsecurity/virtual-patching --yes + +# The "--yes" parameter is implied when the command is not connected to a terminal, like pipes or scripts.`, + }, + removeHelp: cliHelp{ + example: `# Uninstall some appsec-configs. +cscli appsec-configs remove crowdsecurity/virtual-patching + +# Show the execution plan without changing anything - compact output sorted by type and name. +cscli appsec-configs remove crowdsecurity/virtual-patching --dry-run + +# Show the execution plan without changing anything - verbose output sorted by execution order. +cscli appsec-configs remove crowdsecurity/virtual-patching --dry-run -o raw + +# Uninstall and also remove the downloaded files. +cscli appsec-configs remove crowdsecurity/virtual-patching --purge + +# Remove tainted items. +cscli appsec-configs remove crowdsecurity/virtual-patching --force + +# Proceed without prompting. +cscli appsec-configs remove crowdsecurity/virtual-patching --yes + +# The "--yes" parameter is implied when the command is not connected to a terminal, like pipes or scripts.`, + }, + upgradeHelp: cliHelp{ + example: `# Upgrade some appsec-configs. If they are not currently installed, they are downloaded but not installed. +cscli appsec-configs upgrade crowdsecurity/virtual-patching + +# Show the execution plan without changing anything - compact output sorted by type and name. +cscli appsec-configs upgrade crowdsecurity/virtual-patching --dry-run + +# Show the execution plan without changing anything - verbose output sorted by execution order. +cscli appsec-configs upgrade crowdsecurity/virtual-patching --dry-run -o raw + +# Upgrade over tainted items. Can be used to restore or repair after local modifications or missing dependencies. +cscli appsec-configs upgrade crowdsecurity/virtual-patching --force + +# Proceed without prompting. +cscli appsec-configs upgrade crowdsecurity/virtual-patching --yes + +# The "--yes" parameter is implied when the command is not connected to a terminal, like pipes or scripts.`, + }, + inspectHelp: cliHelp{ + example: `# Display metadata, state, metrics and ancestor collections of appsec-configs (installed or not). +cscli appsec-configs inspect crowdsecurity/virtual-patching + +# Don't collect metrics (avoid error if crowdsec is not running). +cscli appsec-configs inspect crowdsecurity/virtual-patching --no-metrics + +# Display difference between a tainted item and the latest one. +cscli appsec-configs inspect crowdsecurity/virtual-patching --diff + +# Reverse the above diff +cscli appsec-configs inspect crowdsecurity/virtual-patching --diff --rev`, + }, + listHelp: cliHelp{ + example: `# List enabled (installed) appsec-configs. +cscli appsec-configs list + +# List all available appsec-configs (installed or not). +cscli appsec-configs list -a + +# List specific appsec-configs (installed or not). +cscli appsec-configs list crowdsecurity/virtual-patching crowdsecurity/generic-rules`, + }, + } +} + +func NewAppsecRule(cfg configGetter) *cliItem { + inspectDetail := func(item *cwhub.Item) error { + // Only show the converted rules in human mode + if cfg().Cscli.Output != "human" { + return nil + } + + appsecRule := appsec.AppsecCollectionConfig{} + + yamlContent, err := os.ReadFile(item.State.LocalPath) + if err != nil { + return fmt.Errorf("unable to read file %s: %w", item.State.LocalPath, err) + } + + if err := yaml.Unmarshal(yamlContent, &appsecRule); err != nil { + return fmt.Errorf("unable to parse yaml file %s: %w", item.State.LocalPath, err) + } + + for _, ruleType := range appsec_rule.SupportedTypes() { + fmt.Printf("\n%s format:\n", cases.Title(language.Und, cases.NoLower).String(ruleType)) + + for _, rule := range appsecRule.Rules { + convertedRule, _, err := rule.Convert(ruleType, appsecRule.Name) + if err != nil { + return fmt.Errorf("unable to convert rule %s: %w", rule.Name, err) + } + + fmt.Println(convertedRule) + } + + switch ruleType { //nolint:gocritic + case appsec_rule.ModsecurityRuleType: + for _, rule := range appsecRule.SecLangRules { + fmt.Println(rule) + } + } + } + + return nil + } + + return &cliItem{ + cfg: cfg, + name: "appsec-rules", + singular: "appsec-rule", + oneOrMore: "appsec-rule(s)", + help: cliHelp{ + example: `cscli appsec-rules list -a +cscli appsec-rules install crowdsecurity/crs +cscli appsec-rules inspect crowdsecurity/crs +cscli appsec-rules upgrade crowdsecurity/crs +cscli appsec-rules remove crowdsecurity/crs +`, + }, + installHelp: cliHelp{ + example: `# Install some appsec-rules. +cscli appsec-rules install crowdsecurity/crs + +# Show the execution plan without changing anything - compact output sorted by type and name. +cscli appsec-rules install crowdsecurity/crs --dry-run + +# Show the execution plan without changing anything - verbose output sorted by execution order. +cscli appsec-rules install crowdsecurity/crs --dry-run -o raw + +# Download only, to be installed later. +cscli appsec-rules install crowdsecurity/crs --download-only + +# Install over tainted items. Can be used to restore or repair after local modifications or missing dependencies. +cscli appsec-rules install crowdsecurity/crs --force + +# Proceed without prompting. +cscli appsec-rules install crowdsecurity/crs --yes + +# The "--yes" parameter is implied when the command is not connected to a terminal, like pipes or scripts.`, + }, + removeHelp: cliHelp{ + example: `# Uninstall some appsec-rules. +cscli appsec-rules remove crowdsecurity/crs + +# Show the execution plan without changing anything - compact output sorted by type and name. +cscli appsec-rules remove crowdsecurity/crs --dry-run + +# Show the execution plan without changing anything - verbose output sorted by execution order. +cscli appsec-rules remove crowdsecurity/crs --dry-run -o raw + +# Uninstall and also remove the downloaded files. +cscli appsec-rules remove crowdsecurity/crs --purge + +# Remove tainted items. +cscli appsec-rules remove crowdsecurity/crs --force + +# Proceed without prompting. +cscli appsec-rules remove crowdsecurity/crs --yes + +# The "--yes" parameter is implied when the command is not connected to a terminal, like pipes or scripts.`, + }, + upgradeHelp: cliHelp{ + example: `# Upgrade some appsec-rules. If they are not currently installed, they are downloaded but not installed. +cscli appsec-rules upgrade crowdsecurity/crs + +# Show the execution plan without changing anything - compact output sorted by type and name. +cscli appsec-rules upgrade crowdsecurity/crs --dry-run + +# Show the execution plan without changing anything - verbose output sorted by execution order. +cscli appsec-rules upgrade crowdsecurity/crs --dry-run -o raw + +# Upgrade over tainted items. Can be used to restore or repair after local modifications or missing dependencies. +cscli appsec-rules upgrade crowdsecurity/crs --force + +# Proceed without prompting. +cscli appsec-rules upgrade crowdsecurity/crs --yes + +# The "--yes" parameter is implied when the command is not connected to a terminal, like pipes or scripts.`, + }, + inspectHelp: cliHelp{ + example: `# Display metadata, state, metrics and ancestor collections of appsec-rules (installed or not). +cscli appsec-rules inspect crowdsecurity/crs + +# Don't collect metrics (avoid error if crowdsec is not running). +cscli appsec-configs inspect crowdsecurity/crs --no-metrics + +# Display difference between a tainted item and the latest one. +cscli appsec-rules inspect crowdsecurity/crs --diff + +# Reverse the above diff +cscli appsec-rules inspect crowdsecurity/crs --diff --rev`, + }, + inspectDetail: inspectDetail, + listHelp: cliHelp{ + example: `# List enabled (installed) appsec-rules. +cscli appsec-rules list + +# List all available appsec-rules (installed or not). +cscli appsec-rules list -a + +# List specific appsec-rules (installed or not). +cscli appsec-rules list crowdsecurity/crs crowdsecurity/vpatch-git-config`, + }, + } +} diff --git a/cmd/crowdsec-cli/cliitem/hubcollection.go b/cmd/crowdsec-cli/cliitem/hubcollection.go new file mode 100644 index 00000000000..b45f956e0ac --- /dev/null +++ b/cmd/crowdsec-cli/cliitem/hubcollection.go @@ -0,0 +1,105 @@ +package cliitem + +import ( + "github.com/crowdsecurity/crowdsec/pkg/cwhub" +) + +func NewCollection(cfg configGetter) *cliItem { + return &cliItem{ + cfg: cfg, + name: cwhub.COLLECTIONS, + singular: "collection", + oneOrMore: "collection(s)", + help: cliHelp{ + example: `cscli collections list -a +cscli collections install crowdsecurity/http-cve crowdsecurity/iptables +cscli collections inspect crowdsecurity/http-cve crowdsecurity/iptables +cscli collections upgrade crowdsecurity/http-cve crowdsecurity/iptables +cscli collections remove crowdsecurity/http-cve crowdsecurity/iptables +`, + }, + installHelp: cliHelp{ + example: `# Install some collections. +cscli collections install crowdsecurity/http-cve crowdsecurity/iptables + +# Show the execution plan without changing anything - compact output sorted by type and name. +cscli collections install crowdsecurity/http-cve crowdsecurity/iptables --dry-run + +# Show the execution plan without changing anything - verbose output sorted by execution order. +cscli collections install crowdsecurity/http-cve crowdsecurity/iptables --dry-run -o raw + +# Download only, to be installed later. +cscli collections install crowdsecurity/http-cve crowdsecurity/iptables --download-only + +# Install over tainted items. Can be used to restore or repair after local modifications or missing dependencies. +cscli collections install crowdsecurity/http-cve crowdsecurity/iptables --force + +# Proceed without prompting. +cscli collections install crowdsecurity/http-cve crowdsecurity/iptables --yes + +# The "--yes" parameter is implied when the command is not connected to a terminal, like pipes or scripts.`, + }, + removeHelp: cliHelp{ + example: `# Uninstall some collections. +cscli collections remove crowdsecurity/http-cve crowdsecurity/iptables + +# Show the execution plan without changing anything - compact output sorted by type and name. +cscli collections remove crowdsecurity/http-cve crowdsecurity/iptables --dry-run + +# Show the execution plan without changing anything - verbose output sorted by execution order. +cscli collections remove crowdsecurity/http-cve crowdsecurity/iptables --dry-run -o raw + +# Uninstall and also remove the downloaded files. +cscli collections remove crowdsecurity/http-cve crowdsecurity/iptables --purge + +# Remove tainted items. +cscli collections remove crowdsecurity/http-cve crowdsecurity/iptables --force + +# Proceed without prompting. +cscli collections remove crowdsecurity/http-cve crowdsecurity/iptables --yes + +# The "--yes" parameter is implied when the command is not connected to a terminal, like pipes or scripts.`, + }, + upgradeHelp: cliHelp{ + example: `# Upgrade some collections. If they are not currently installed, they are downloaded but not installed. +cscli collections upgrade crowdsecurity/http-cve crowdsecurity/iptables + +# Show the execution plan without changing anything - compact output sorted by type and name. +cscli collections upgrade crowdsecurity/http-cve crowdsecurity/iptables --dry-run + +# Show the execution plan without changing anything - verbose output sorted by execution order. +cscli collections upgrade crowdsecurity/http-cve crowdsecurity/iptables --dry-run -o raw + +# Upgrade over tainted items. Can be used to restore or repair after local modifications or missing dependencies. +cscli collections upgrade crowdsecurity/http-cve crowdsecurity/iptables --force + +# Proceed without prompting. +cscli collections upgrade crowdsecurity/http-cve crowdsecurity/iptables --yes + +# The "--yes" parameter is implied when the command is not connected to a terminal, like pipes or scripts.`, + }, + inspectHelp: cliHelp{ + example: `# Display metadata, state, metrics and dependencies of collections (installed or not). +cscli collections inspect crowdsecurity/http-cve crowdsecurity/iptables + +# Don't collect metrics (avoid error if crowdsec is not running). +cscli collections inspect crowdsecurity/http-cve crowdsecurity/iptables --no-metrics + +# Display difference between a tainted item and the latest one, or the reason for the taint if it's a dependency. +cscli collections inspect crowdsecurity/http-cve --diff + +# Reverse the above diff +cscli collections inspect crowdsecurity/http-cve --diff --rev`, + }, + listHelp: cliHelp{ + example: `# List enabled (installed) collections. +cscli collections list + +# List all available collections (installed or not). +cscli collections list -a + +# List specific collections (installed or not). +cscli collections list crowdsecurity/http-cve crowdsecurity/iptables`, + }, + } +} diff --git a/cmd/crowdsec-cli/cliitem/hubcontext.go b/cmd/crowdsec-cli/cliitem/hubcontext.go new file mode 100644 index 00000000000..3a94687843d --- /dev/null +++ b/cmd/crowdsec-cli/cliitem/hubcontext.go @@ -0,0 +1,102 @@ +package cliitem + +import ( + "github.com/crowdsecurity/crowdsec/pkg/cwhub" +) + +func NewContext(cfg configGetter) *cliItem { + return &cliItem{ + cfg: cfg, + name: cwhub.CONTEXTS, + singular: "context", + oneOrMore: "context(s)", + help: cliHelp{ + example: `cscli contexts list -a +cscli contexts install crowdsecurity/bf_base crowdsecurity/fortinet +cscli contexts inspect crowdsecurity/bf_base crowdsecurity/fortinet +cscli contexts upgrade crowdsecurity/bf_base crowdsecurity/fortinet +cscli contexts remove crowdsecurity/bf_base crowdsecurity/fortinet +`, + }, + installHelp: cliHelp{ + example: `# Install some contexts. +cscli contexts install crowdsecurity/bf_base crowdsecurity/fortinet + +# Show the execution plan without changing anything - compact output sorted by type and name. +cscli contexts install crowdsecurity/bf_base crowdsecurity/fortinet --dry-run + +# Show the execution plan without changing anything - verbose output sorted by execution order. +cscli contexts install crowdsecurity/bf_base crowdsecurity/fortinet --dry-run -o raw + +# Download only, to be installed later. +cscli contexts install crowdsecurity/bf_base crowdsecurity/fortinet --download-only + +# Install over tainted items. Can be used to restore or repair after local modifications or missing dependencies. +cscli contexts install crowdsecurity/bf_base crowdsecurity/fortinet --force + +# Proceed without prompting. +cscli contexts install crowdsecurity/bf_base crowdsecurity/fortinet --yes + +# The "--yes" parameter is implied when the command is not connected to a terminal, like pipes or scripts.`, + }, + removeHelp: cliHelp{ + example: `# Uninstall some contexts. +cscli contexts remove crowdsecurity/bf_base crowdsecurity/fortinet + +# Show the execution plan without changing anything - compact output sorted by type and name. +cscli contexts remove crowdsecurity/bf_base crowdsecurity/fortinet --dry-run + +# Show the execution plan without changing anything - verbose output sorted by execution order. +cscli contexts remove crowdsecurity/bf_base crowdsecurity/fortinet --dry-run -o raw + +# Uninstall and also remove the downloaded files. +cscli contexts remove crowdsecurity/bf_base crowdsecurity/fortinet --purge + +# Remove tainted items. +cscli contexts remove crowdsecurity/bf_base crowdsecurity/fortinet --force + +# Proceed without prompting. +cscli contexts remove crowdsecurity/bf_base crowdsecurity/fortinet --yes + +# The "--yes" parameter is implied when the command is not connected to a terminal, like pipes or scripts.`, + }, + upgradeHelp: cliHelp{ + example: `# Upgrade some contexts. If they are not currently installed, they are downloaded but not installed. +cscli contexts upgrade crowdsecurity/bf_base crowdsecurity/fortinet + +# Show the execution plan without changing anything - compact output sorted by type and name. +cscli contexts upgrade crowdsecurity/bf_base crowdsecurity/fortinet --dry-run + +# Show the execution plan without changing anything - verbose output sorted by execution order. +cscli contexts upgrade crowdsecurity/bf_base crowdsecurity/fortinet --dry-run -o raw + +# Upgrade over tainted items. Can be used to restore or repair after local modifications or missing dependencies. +cscli contexts upgrade crowdsecurity/bf_base crowdsecurity/fortinet --force + +# Proceed without prompting. +cscli contexts upgrade crowdsecurity/bf_base crowdsecurity/fortinet --yes + +# The "--yes" parameter is implied when the command is not connected to a terminal, like pipes or scripts.`, + }, + inspectHelp: cliHelp{ + example: `# Display metadata, state and ancestor collections of contexts (installed or not). +cscli contexts inspect crowdsecurity/bf_base crowdsecurity/fortinet + +# Display difference between a tainted item and the latest one. +cscli contexts inspect crowdsecurity/bf_base --diff + +# Reverse the above diff +cscli contexts inspect crowdsecurity/bf_base --diff --rev`, + }, + listHelp: cliHelp{ + example: `# List enabled (installed) contexts. +cscli contexts list + +# List all available contexts (installed or not). +cscli contexts list -a + +# List specific contexts (installed or not). +cscli contexts list crowdsecurity/bf_base crowdsecurity/fortinet`, + }, + } +} diff --git a/cmd/crowdsec-cli/cliitem/hubparser.go b/cmd/crowdsec-cli/cliitem/hubparser.go new file mode 100644 index 00000000000..440cb61204f --- /dev/null +++ b/cmd/crowdsec-cli/cliitem/hubparser.go @@ -0,0 +1,105 @@ +package cliitem + +import ( + "github.com/crowdsecurity/crowdsec/pkg/cwhub" +) + +func NewParser(cfg configGetter) *cliItem { + return &cliItem{ + cfg: cfg, + name: cwhub.PARSERS, + singular: "parser", + oneOrMore: "parser(s)", + help: cliHelp{ + example: `cscli parsers list -a +cscli parsers install crowdsecurity/caddy-logs crowdsecurity/sshd-logs +cscli parsers inspect crowdsecurity/caddy-logs crowdsecurity/sshd-logs +cscli parsers upgrade crowdsecurity/caddy-logs crowdsecurity/sshd-logs +cscli parsers remove crowdsecurity/caddy-logs crowdsecurity/sshd-logs +`, + }, + installHelp: cliHelp{ + example: `# Install some parsers. +cscli parsers install crowdsecurity/caddy-logs crowdsecurity/sshd-logs + +# Show the execution plan without changing anything - compact output sorted by type and name. +cscli parsers install crowdsecurity/caddy-logs crowdsecurity/sshd-logs --dry-run + +# Show the execution plan without changing anything - verbose output sorted by execution order. +cscli parsers install crowdsecurity/caddy-logs crowdsecurity/sshd-logs --dry-run -o raw + +# Download only, to be installed later. +cscli parsers install crowdsecurity/caddy-logs crowdsecurity/sshd-logs --download-only + +# Install over tainted items. Can be used to restore or repair after local modifications or missing dependencies. +cscli parsers install crowdsecurity/caddy-logs crowdsecurity/sshd-logs --force + +# Proceed without prompting. +cscli parsers install crowdsecurity/caddy-logs crowdsecurity/sshd-logs --yes + +# The "--yes" parameter is implied when the command is not connected to a terminal, like pipes or scripts.`, + }, + removeHelp: cliHelp{ + example: `# Uninstall some parsers. +cscli parsers remove crowdsecurity/caddy-logs crowdsecurity/sshd-logs + +# Show the execution plan without changing anything - compact output sorted by type and name. +cscli parsers remove crowdsecurity/caddy-logs crowdsecurity/sshd-logs --dry-run + +# Show the execution plan without changing anything - verbose output sorted by execution order. +cscli parsers remove crowdsecurity/caddy-logs crowdsecurity/sshd-logs --dry-run -o raw + +# Uninstall and also remove the downloaded files. +cscli parsers remove crowdsecurity/caddy-logs crowdsecurity/sshd-logs --purge + +# Remove tainted items. +cscli parsers remove crowdsecurity/caddy-logs crowdsecurity/sshd-logs --force + +# Proceed without prompting. +cscli parsers remove crowdsecurity/caddy-logs crowdsecurity/sshd-logs --yes + +# The "--yes" parameter is implied when the command is not connected to a terminal, like pipes or scripts.`, + }, + upgradeHelp: cliHelp{ + example: `# Upgrade some parsers. If they are not currently installed, they are downloaded but not installed. +cscli parsers upgrade crowdsecurity/caddy-logs crowdsecurity/sshd-logs + +# Show the execution plan without changing anything - compact output sorted by type and name. +cscli parsers upgrade crowdsecurity/caddy-logs crowdsecurity/sshd-logs --dry-run + +# Show the execution plan without changing anything - verbose output sorted by execution order. +cscli parsers upgrade crowdsecurity/caddy-logs crowdsecurity/sshd-logs --dry-run -o raw + +# Upgrade over tainted items. Can be used to restore or repair after local modifications or missing dependencies. +cscli parsers upgrade crowdsecurity/caddy-logs crowdsecurity/sshd-logs --force + +# Proceed without prompting. +cscli parsers upgrade crowdsecurity/caddy-logs crowdsecurity/sshd-logs --yes + +# The "--yes" parameter is implied when the command is not connected to a terminal, like pipes or scripts.`, + }, + inspectHelp: cliHelp{ + example: `# Display metadata, state, metrics and ancestor collections of parsers (installed or not). +cscli parsers inspect crowdsecurity/httpd-logs crowdsecurity/sshd-logs + +# Don't collect metrics (avoid error if crowdsec is not running). +cscli parsers inspect crowdsecurity/httpd-logs --no-metrics + +# Display difference between a tainted item and the latest one. +cscli parsers inspect crowdsecurity/httpd-logs --diff + +# Reverse the above diff +cscli parsers inspect crowdsecurity/httpd-logs --diff --rev`, + }, + listHelp: cliHelp{ + example: `# List enabled (installed) parsers. +cscli parsers list + +# List all available parsers (installed or not). +cscli parsers list -a + +# List specific parsers (installed or not). +cscli parsers list crowdsecurity/caddy-logs crowdsecurity/sshd-logs`, + }, + } +} diff --git a/cmd/crowdsec-cli/cliitem/hubpostoverflow.go b/cmd/crowdsec-cli/cliitem/hubpostoverflow.go new file mode 100644 index 00000000000..cfd5f7c95aa --- /dev/null +++ b/cmd/crowdsec-cli/cliitem/hubpostoverflow.go @@ -0,0 +1,102 @@ +package cliitem + +import ( + "github.com/crowdsecurity/crowdsec/pkg/cwhub" +) + +func NewPostOverflow(cfg configGetter) *cliItem { + return &cliItem{ + cfg: cfg, + name: cwhub.POSTOVERFLOWS, + singular: "postoverflow", + oneOrMore: "postoverflow(s)", + help: cliHelp{ + example: `cscli postoverflows list -a +cscli postoverflows install crowdsecurity/cdn-whitelist crowdsecurity/rdns +cscli postoverflows inspect crowdsecurity/cdn-whitelist crowdsecurity/rdns +cscli postoverflows upgrade crowdsecurity/cdn-whitelist crowdsecurity/rdns +cscli postoverflows remove crowdsecurity/cdn-whitelist crowdsecurity/rdns +`, + }, + installHelp: cliHelp{ + example: `# Install some postoverflows. +cscli postoverflows install crowdsecurity/cdn-whitelist crowdsecurity/rdns + +# Show the execution plan without changing anything - compact output sorted by type and name. +cscli postoverflows install crowdsecurity/cdn-whitelist crowdsecurity/rdns --dry-run + +# Show the execution plan without changing anything - verbose output sorted by execution order. +cscli postoverflows install crowdsecurity/cdn-whitelist crowdsecurity/rdns --dry-run -o raw + +# Download only, to be installed later. +cscli postoverflows install crowdsecurity/cdn-whitelist crowdsecurity/rdns --download-only + +# Install over tainted items. Can be used to restore or repair after local modifications or missing dependencies. +cscli postoverflows install crowdsecurity/cdn-whitelist crowdsecurity/rdns --force + +# Proceed without prompting. +cscli postoverflows install crowdsecurity/cdn-whitelist crowdsecurity/rdns --yes + +# The "--yes" parameter is implied when the command is not connected to a terminal, like pipes or scripts.`, + }, + removeHelp: cliHelp{ + example: `# Uninstall some postoverflows. +cscli postoverflows remove crowdsecurity/cdn-whitelist crowdsecurity/rdns + +# Show the execution plan without changing anything - compact output sorted by type and name. +cscli postoverflows remove crowdsecurity/cdn-whitelist crowdsecurity/rdns --dry-run + +# Show the execution plan without changing anything - verbose output sorted by execution order. +cscli postoverflows remove crowdsecurity/cdn-whitelist crowdsecurity/rdns --dry-run -o raw + +# Uninstall and also remove the downloaded files. +cscli postoverflows remove crowdsecurity/cdn-whitelist crowdsecurity/rdns --purge + +# Remove tainted items. +cscli postoverflows remove crowdsecurity/cdn-whitelist crowdsecurity/rdns --force + +# Proceed without prompting. +cscli postoverflows remove crowdsecurity/cdn-whitelist crowdsecurity/rdns --yes + +# The "--yes" parameter is implied when the command is not connected to a terminal, like pipes or scripts.`, + }, + upgradeHelp: cliHelp{ + example: `# Upgrade some postoverflows. If they are not currently installed, they are downloaded but not installed. +cscli postoverflows upgrade crowdsecurity/cdn-whitelist crowdsecurity/rdnss + +# Show the execution plan without changing anything - compact output sorted by type and name. +cscli postoverflows upgrade crowdsecurity/cdn-whitelist crowdsecurity/rdnss --dry-run + +# Show the execution plan without changing anything - verbose output sorted by execution order. +cscli postoverflows upgrade crowdsecurity/cdn-whitelist crowdsecurity/rdnss --dry-run -o raw + +# Upgrade over tainted items. Can be used to restore or repair after local modifications or missing dependencies. +cscli postoverflows upgrade crowdsecurity/cdn-whitelist crowdsecurity/rdnss --force + +# Proceed without prompting. +cscli postoverflows upgrade crowdsecurity/cdn-whitelist crowdsecurity/rdnss --yes + +# The "--yes" parameter is implied when the command is not connected to a terminal, like pipes or scripts.`, + }, + inspectHelp: cliHelp{ + example: `# Display metadata, state and ancestor collections of postoverflows (installed or not). +cscli postoverflows inspect crowdsecurity/cdn-whitelist + +# Display difference between a tainted item and the latest one. +cscli postoverflows inspect crowdsecurity/cdn-whitelist --diff + +# Reverse the above diff +cscli postoverflows inspect crowdsecurity/cdn-whitelist --diff --rev`, + }, + listHelp: cliHelp{ + example: `# List enabled (installed) postoverflows. +cscli postoverflows list + +# List all available postoverflows (installed or not). +cscli postoverflows list -a + +# List specific postoverflows (installed or not). +cscli postoverflows list crowdsecurity/cdn-whitelists crowdsecurity/rdns`, + }, + } +} diff --git a/cmd/crowdsec-cli/cliitem/hubscenario.go b/cmd/crowdsec-cli/cliitem/hubscenario.go index a5e854b3c82..5dee3323f6f 100644 --- a/cmd/crowdsec-cli/cliitem/hubscenario.go +++ b/cmd/crowdsec-cli/cliitem/hubscenario.go @@ -19,23 +19,87 @@ cscli scenarios remove crowdsecurity/ssh-bf crowdsecurity/http-probing `, }, installHelp: cliHelp{ - example: `cscli scenarios install crowdsecurity/ssh-bf crowdsecurity/http-probing`, + example: `# Install some scenarios. +cscli scenarios install crowdsecurity/ssh-bf crowdsecurity/http-probing + +# Show the execution plan without changing anything - compact output sorted by type and name. +cscli scenarios install crowdsecurity/ssh-bf crowdsecurity/http-probing --dry-run + +# Show the execution plan without changing anything - verbose output sorted by execution order. +cscli scenarios install crowdsecurity/ssh-bf crowdsecurity/http-probing --dry-run -o raw + +# Download only, to be installed later. +cscli scenarios install crowdsecurity/ssh-bf crowdsecurity/http-probing --download-only + +# Install over tainted items. Can be used to restore or repair after local modifications or missing dependencies. +cscli scenarios install crowdsecurity/ssh-bf crowdsecurity/http-probing --force + +# Proceed without prompting. +cscli scenarios install crowdsecurity/ssh-bf crowdsecurity/http-probing --yes + +# The "--yes" parameter is implied when the command is not connected to a terminal, like pipes or scripts.`, }, removeHelp: cliHelp{ - example: `cscli scenarios remove crowdsecurity/ssh-bf crowdsecurity/http-probing`, + example: `# Uninstall some scenarios. +cscli scenarios remove crowdsecurity/ssh-bf crowdsecurity/http-probing + +# Show the execution plan without changing anything - compact output sorted by type and name. +cscli scenarios remove crowdsecurity/ssh-bf crowdsecurity/http-probing --dry-run + +# Show the execution plan without changing anything - verbose output sorted by execution order. +cscli scenarios remove crowdsecurity/ssh-bf crowdsecurity/http-probing --dry-run -o raw + +# Uninstall and also remove the downloaded files. +cscli scenarios remove crowdsecurity/ssh-bf crowdsecurity/http-probing --purge + +# Remove tainted items. +cscli scenarios remove crowdsecurity/ssh-bf crowdsecurity/http-probing --force + +# Proceed without prompting. +cscli scenarios remove crowdsecurity/ssh-bf crowdsecurity/http-probing --yes + +# The "--yes" parameter is implied when the command is not connected to a terminal, like pipes or scripts.`, }, upgradeHelp: cliHelp{ - example: `cscli scenarios upgrade crowdsecurity/ssh-bf crowdsecurity/http-probing`, + example: `# Upgrade some scenarios. If they are not currently installed, they are downloaded but not installed. +cscli scenarios upgrade crowdsecurity/ssh-bf crowdsecurity/http-probing + +# Show the execution plan without changing anything - compact output sorted by type and name. +cscli scenarios upgrade crowdsecurity/ssh-bf crowdsecurity/http-probing --dry-run + +# Show the execution plan without changing anything - verbose output sorted by execution order. +cscli scenarios upgrade crowdsecurity/ssh-bf crowdsecurity/http-probing --dry-run -o raw + +# Upgrade over tainted items. Can be used to restore or repair after local modifications or missing dependencies. +cscli scenarios upgrade crowdsecurity/ssh-bf crowdsecurity/http-probing --force + +# Proceed without prompting. +cscli scenarios upgrade crowdsecurity/ssh-bf crowdsecurity/http-probing --yes + +# The "--yes" parameter is implied when the command is not connected to a terminal, like pipes or scripts.`, }, inspectHelp: cliHelp{ - example: `cscli scenarios inspect crowdsecurity/ssh-bf crowdsecurity/http-probing`, + example: `# Display metadata, state, metrics and ancestor collections of scenarios (installed or not). +cscli scenarios inspect crowdsecurity/ssh-bf crowdsecurity/http-probing + +# Don't collect metrics (avoid error if crowdsec is not running). +cscli scenarios inspect crowdsecurity/ssh-bf --no-metrics + +# Display difference between a tainted item and the latest one. +cscli scenarios inspect crowdsecurity/ssh-bf --diff + +# Reverse the above diff +cscli scenarios inspect crowdsecurity/ssh-bf --diff --rev`, }, listHelp: cliHelp{ - example: `cscli scenarios list + example: `# List enabled (installed) scenarios. +cscli scenarios list + +# List all available scenarios (installed or not). cscli scenarios list -a -cscli scenarios list crowdsecurity/ssh-bf crowdsecurity/http-probing -List only enabled scenarios unless "-a" or names are specified.`, +# List specific scenarios (installed or not). +cscli scenarios list crowdsecurity/ssh-bf crowdsecurity/http-probing`, }, } } diff --git a/cmd/crowdsec-cli/cliitem/item.go b/cmd/crowdsec-cli/cliitem/item.go index 28828eb9c95..3dcc0665a89 100644 --- a/cmd/crowdsec-cli/cliitem/item.go +++ b/cmd/crowdsec-cli/cliitem/item.go @@ -2,21 +2,14 @@ package cliitem import ( "cmp" - "context" - "errors" "fmt" - "os" "strings" "github.com/fatih/color" - "github.com/hexops/gotextdiff" - "github.com/hexops/gotextdiff/myers" - "github.com/hexops/gotextdiff/span" log "github.com/sirupsen/logrus" "github.com/spf13/cobra" "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/clihub" - "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/reload" "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/require" "github.com/crowdsecurity/crowdsec/pkg/csconfig" "github.com/crowdsecurity/crowdsec/pkg/cwhub" @@ -67,365 +60,10 @@ func (cli cliItem) NewCommand() *cobra.Command { return cmd } -func (cli cliItem) install(ctx context.Context, args []string, downloadOnly bool, force bool, ignoreError bool) error { - cfg := cli.cfg() - - hub, err := require.Hub(cfg, require.RemoteHub(ctx, cfg), log.StandardLogger()) - if err != nil { - return err - } - - for _, name := range args { - item := hub.GetItem(cli.name, name) - if item == nil { - msg := suggestNearestMessage(hub, cli.name, name) - if !ignoreError { - return errors.New(msg) - } - - log.Error(msg) - - continue - } - - if err := item.Install(ctx, force, downloadOnly); err != nil { - if !ignoreError { - return fmt.Errorf("error while installing '%s': %w", item.Name, err) - } - - log.Errorf("Error while installing '%s': %s", item.Name, err) - } - } - - log.Info(reload.Message) - - return nil -} - -func (cli cliItem) newInstallCmd() *cobra.Command { - var ( - downloadOnly bool - force bool - ignoreError bool - ) - - cmd := &cobra.Command{ - Use: cmp.Or(cli.installHelp.use, "install [item]..."), - Short: cmp.Or(cli.installHelp.short, "Install given "+cli.oneOrMore), - Long: cmp.Or(cli.installHelp.long, fmt.Sprintf("Fetch and install one or more %s from the hub", cli.name)), - Example: cli.installHelp.example, - Args: cobra.MinimumNArgs(1), - DisableAutoGenTag: true, - ValidArgsFunction: func(_ *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { - return compAllItems(cli.name, args, toComplete, cli.cfg) - }, - RunE: func(cmd *cobra.Command, args []string) error { - return cli.install(cmd.Context(), args, downloadOnly, force, ignoreError) - }, - } - - flags := cmd.Flags() - flags.BoolVarP(&downloadOnly, "download-only", "d", false, "Only download packages, don't enable") - flags.BoolVar(&force, "force", false, "Force install: overwrite tainted and outdated files") - flags.BoolVar(&ignoreError, "ignore", false, "Ignore errors when installing multiple "+cli.name) - - return cmd -} - -// return the names of the installed parents of an item, used to check if we can remove it -func istalledParentNames(item *cwhub.Item) []string { - ret := make([]string, 0) - - for _, parent := range item.Ancestors() { - if parent.State.Installed { - ret = append(ret, parent.Name) - } - } - - return ret -} - -func (cli cliItem) remove(args []string, purge bool, force bool, all bool) error { - hub, err := require.Hub(cli.cfg(), nil, log.StandardLogger()) - if err != nil { - return err - } - - if all { - itemGetter := hub.GetInstalledByType - if purge { - itemGetter = hub.GetItemsByType - } - - removed := 0 - - for _, item := range itemGetter(cli.name, true) { - didRemove, err := item.Remove(purge, force) - if err != nil { - return err - } - - if didRemove { - log.Infof("Removed %s", item.Name) - - removed++ - } - } - - log.Infof("Removed %d %s", removed, cli.name) - - if removed > 0 { - log.Info(reload.Message) - } - - return nil - } - - if len(args) == 0 { - return fmt.Errorf("specify at least one %s to remove or '--all'", cli.singular) - } - - removed := 0 - - for _, itemName := range args { - item := hub.GetItem(cli.name, itemName) - if item == nil { - return fmt.Errorf("can't find '%s' in %s", itemName, cli.name) - } - - parents := istalledParentNames(item) - - if !force && len(parents) > 0 { - log.Warningf("%s belongs to collections: %s", item.Name, parents) - log.Warningf("Run 'sudo cscli %s remove %s --force' if you want to force remove this %s", item.Type, item.Name, cli.singular) - - continue - } - - didRemove, err := item.Remove(purge, force) - if err != nil { - return err - } - - if didRemove { - log.Infof("Removed %s", item.Name) - - removed++ - } - } - - log.Infof("Removed %d %s", removed, cli.name) - - if removed > 0 { - log.Info(reload.Message) - } - - return nil -} - -func (cli cliItem) newRemoveCmd() *cobra.Command { - var ( - purge bool - force bool - all bool - ) - - cmd := &cobra.Command{ - Use: cmp.Or(cli.removeHelp.use, "remove [item]..."), - Short: cmp.Or(cli.removeHelp.short, "Remove given "+cli.oneOrMore), - Long: cmp.Or(cli.removeHelp.long, "Remove one or more "+cli.name), - Example: cli.removeHelp.example, - Aliases: []string{"delete"}, - DisableAutoGenTag: true, - ValidArgsFunction: func(_ *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { - return compInstalledItems(cli.name, args, toComplete, cli.cfg) - }, - RunE: func(_ *cobra.Command, args []string) error { - return cli.remove(args, purge, force, all) - }, - } - - flags := cmd.Flags() - flags.BoolVar(&purge, "purge", false, "Delete source file too") - flags.BoolVar(&force, "force", false, "Force remove: remove tainted and outdated files") - flags.BoolVar(&all, "all", false, "Remove all the "+cli.name) - - return cmd -} - -func (cli cliItem) upgrade(ctx context.Context, args []string, force bool, all bool) error { - cfg := cli.cfg() - - hub, err := require.Hub(cfg, require.RemoteHub(ctx, cfg), log.StandardLogger()) - if err != nil { - return err - } - - if all { - updated := 0 - - for _, item := range hub.GetInstalledByType(cli.name, true) { - didUpdate, err := item.Upgrade(ctx, force) - if err != nil { - return err - } - - if didUpdate { - updated++ - } - } - - log.Infof("Updated %d %s", updated, cli.name) - - if updated > 0 { - log.Info(reload.Message) - } - - return nil - } - - if len(args) == 0 { - return fmt.Errorf("specify at least one %s to upgrade or '--all'", cli.singular) - } - - updated := 0 - - for _, itemName := range args { - item := hub.GetItem(cli.name, itemName) - if item == nil { - return fmt.Errorf("can't find '%s' in %s", itemName, cli.name) - } - - didUpdate, err := item.Upgrade(ctx, force) - if err != nil { - return err - } - - if didUpdate { - log.Infof("Updated %s", item.Name) - - updated++ - } - } - - if updated > 0 { - log.Info(reload.Message) - } - - return nil -} - -func (cli cliItem) newUpgradeCmd() *cobra.Command { - var ( - all bool - force bool - ) - - cmd := &cobra.Command{ - Use: cmp.Or(cli.upgradeHelp.use, "upgrade [item]..."), - Short: cmp.Or(cli.upgradeHelp.short, "Upgrade given "+cli.oneOrMore), - Long: cmp.Or(cli.upgradeHelp.long, fmt.Sprintf("Fetch and upgrade one or more %s from the hub", cli.name)), - Example: cli.upgradeHelp.example, - DisableAutoGenTag: true, - ValidArgsFunction: func(_ *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { - return compInstalledItems(cli.name, args, toComplete, cli.cfg) - }, - RunE: func(cmd *cobra.Command, args []string) error { - return cli.upgrade(cmd.Context(), args, force, all) - }, - } - - flags := cmd.Flags() - flags.BoolVarP(&all, "all", "a", false, "Upgrade all the "+cli.name) - flags.BoolVar(&force, "force", false, "Force upgrade: overwrite tainted and outdated files") - - return cmd -} - -func (cli cliItem) inspect(ctx context.Context, args []string, url string, diff bool, rev bool, noMetrics bool) error { - cfg := cli.cfg() - - if rev && !diff { - return errors.New("--rev can only be used with --diff") - } - - if url != "" { - cfg.Cscli.PrometheusUrl = url - } - - remote := (*cwhub.RemoteHubCfg)(nil) - - if diff { - remote = require.RemoteHub(ctx, cfg) - } - - hub, err := require.Hub(cfg, remote, log.StandardLogger()) - if err != nil { - return err - } - - for _, name := range args { - item := hub.GetItem(cli.name, name) - if item == nil { - return fmt.Errorf("can't find '%s' in %s", name, cli.name) - } - - if diff { - fmt.Println(cli.whyTainted(ctx, hub, item, rev)) - - continue - } - - if err = clihub.InspectItem(item, !noMetrics, cfg.Cscli.Output, cfg.Cscli.PrometheusUrl, cfg.Cscli.Color); err != nil { - return err - } - - if cli.inspectDetail != nil { - if err = cli.inspectDetail(item); err != nil { - return err - } - } - } - - return nil -} - -func (cli cliItem) newInspectCmd() *cobra.Command { - var ( - url string - diff bool - rev bool - noMetrics bool - ) - - cmd := &cobra.Command{ - Use: cmp.Or(cli.inspectHelp.use, "inspect [item]..."), - Short: cmp.Or(cli.inspectHelp.short, "Inspect given "+cli.oneOrMore), - Long: cmp.Or(cli.inspectHelp.long, "Inspect the state of one or more "+cli.name), - Example: cli.inspectHelp.example, - Args: cobra.MinimumNArgs(1), - DisableAutoGenTag: true, - ValidArgsFunction: func(_ *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { - return compInstalledItems(cli.name, args, toComplete, cli.cfg) - }, - RunE: func(cmd *cobra.Command, args []string) error { - return cli.inspect(cmd.Context(), args, url, diff, rev, noMetrics) - }, - } - - flags := cmd.Flags() - flags.StringVarP(&url, "url", "u", "", "Prometheus url") - flags.BoolVar(&diff, "diff", false, "Show diff with latest version (for tainted items)") - flags.BoolVar(&rev, "rev", false, "Reverse diff output") - flags.BoolVar(&noMetrics, "no-metrics", false, "Don't show metrics (when cscli.output=human)") - - return cmd -} - func (cli cliItem) list(args []string, all bool) error { cfg := cli.cfg() - hub, err := require.Hub(cli.cfg(), nil, log.StandardLogger()) + hub, err := require.Hub(cli.cfg(), log.StandardLogger()) if err != nil { return err } @@ -460,91 +98,23 @@ func (cli cliItem) newListCmd() *cobra.Command { return cmd } -// return the diff between the installed version and the latest version -func (cli cliItem) itemDiff(ctx context.Context, item *cwhub.Item, reverse bool) (string, error) { - if !item.State.Installed { - return "", fmt.Errorf("'%s' is not installed", item.FQName()) - } - - dest, err := os.CreateTemp("", "cscli-diff-*") - if err != nil { - return "", fmt.Errorf("while creating temporary file: %w", err) - } - defer os.Remove(dest.Name()) - - _, remoteURL, err := item.FetchContentTo(ctx, dest.Name()) - if err != nil { - return "", err - } - - latestContent, err := os.ReadFile(dest.Name()) - if err != nil { - return "", fmt.Errorf("while reading %s: %w", dest.Name(), err) - } - - localContent, err := os.ReadFile(item.State.LocalPath) +func compInstalledItems(itemType string, args []string, toComplete string, cfg configGetter) ([]string, cobra.ShellCompDirective) { + hub, err := require.Hub(cfg(), nil) if err != nil { - return "", fmt.Errorf("while reading %s: %w", item.State.LocalPath, err) - } - - file1 := item.State.LocalPath - file2 := remoteURL - content1 := string(localContent) - content2 := string(latestContent) - - if reverse { - file1, file2 = file2, file1 - content1, content2 = content2, content1 - } - - edits := myers.ComputeEdits(span.URIFromPath(file1), content1, content2) - diff := gotextdiff.ToUnified(file1, file2, content1, edits) - - return fmt.Sprintf("%s", diff), nil -} - -func (cli cliItem) whyTainted(ctx context.Context, hub *cwhub.Hub, item *cwhub.Item, reverse bool) string { - if !item.State.Installed { - return fmt.Sprintf("# %s is not installed", item.FQName()) - } - - if !item.State.Tainted { - return fmt.Sprintf("# %s is not tainted", item.FQName()) - } - - if len(item.State.TaintedBy) == 0 { - return fmt.Sprintf("# %s is tainted but we don't know why. please report this as a bug", item.FQName()) - } - - ret := []string{ - fmt.Sprintf("# Let's see why %s is tainted.", item.FQName()), + return nil, cobra.ShellCompDirectiveDefault } - for _, fqsub := range item.State.TaintedBy { - ret = append(ret, fmt.Sprintf("\n-> %s\n", fqsub)) - - sub, err := hub.GetItemFQ(fqsub) - if err != nil { - ret = append(ret, err.Error()) - } - - diff, err := cli.itemDiff(ctx, sub, reverse) - if err != nil { - ret = append(ret, err.Error()) - } + items := hub.GetInstalledByType(itemType, true) - if diff != "" { - ret = append(ret, diff) - } else if len(sub.State.TaintedBy) > 0 { - taintList := strings.Join(sub.State.TaintedBy, ", ") - if sub.FQName() == taintList { - // hack: avoid message "item is tainted by itself" - continue - } + comp := make([]string, 0) - ret = append(ret, fmt.Sprintf("# %s is tainted by %s", sub.FQName(), taintList)) + for _, item := range items { + if strings.Contains(item.Name, toComplete) { + comp = append(comp, item.Name) } } - return strings.Join(ret, "\n") + cobra.CompDebugln(fmt.Sprintf("%s: %+v", itemType, comp), true) + + return comp, cobra.ShellCompDirectiveNoFileComp } diff --git a/cmd/crowdsec-cli/clihub/item_metrics.go b/cmd/crowdsec-cli/cliitem/metrics.go similarity index 78% rename from cmd/crowdsec-cli/clihub/item_metrics.go rename to cmd/crowdsec-cli/cliitem/metrics.go index f4af8f635db..4999ea38078 100644 --- a/cmd/crowdsec-cli/clihub/item_metrics.go +++ b/cmd/crowdsec-cli/cliitem/metrics.go @@ -1,6 +1,7 @@ -package clihub +package cliitem import ( + "fmt" "net/http" "strconv" "strings" @@ -16,22 +17,31 @@ import ( "github.com/crowdsecurity/crowdsec/pkg/cwhub" ) -func showMetrics(prometheusURL string, hubItem *cwhub.Item, wantColor string) error { +func showMetrics(prometheusURL string, hub *cwhub.Hub, hubItem *cwhub.Item, wantColor string) error { switch hubItem.Type { case cwhub.PARSERS: - metrics := getParserMetric(prometheusURL, hubItem.Name) + metrics, err := getParserMetric(prometheusURL, hubItem.Name) + if err != nil { + return err + } parserMetricsTable(color.Output, wantColor, hubItem.Name, metrics) case cwhub.SCENARIOS: - metrics := getScenarioMetric(prometheusURL, hubItem.Name) + metrics, err := getScenarioMetric(prometheusURL, hubItem.Name) + if err != nil { + return err + } scenarioMetricsTable(color.Output, wantColor, hubItem.Name, metrics) case cwhub.COLLECTIONS: - for _, sub := range hubItem.SubItems() { - if err := showMetrics(prometheusURL, sub, wantColor); err != nil { + for sub := range hubItem.CurrentDependencies().SubItems(hub) { + if err := showMetrics(prometheusURL, hub, sub, wantColor); err != nil { return err } } case cwhub.APPSEC_RULES: - metrics := getAppsecRuleMetric(prometheusURL, hubItem.Name) + metrics, err := getAppsecRuleMetric(prometheusURL, hubItem.Name) + if err != nil { + return err + } appsecMetricsTable(color.Output, wantColor, hubItem.Name, metrics) default: // no metrics for this item type } @@ -40,11 +50,15 @@ func showMetrics(prometheusURL string, hubItem *cwhub.Item, wantColor string) er } // getParserMetric is a complete rip from prom2json -func getParserMetric(url string, itemName string) map[string]map[string]int { +func getParserMetric(url string, itemName string) (map[string]map[string]int, error) { stats := make(map[string]map[string]int) - result := getPrometheusMetric(url) - for idx, fam := range result { + results, err := getPrometheusMetric(url) + if err != nil { + return nil, err + } + + for idx, fam := range results { if !strings.HasPrefix(fam.Name, "cs_") { continue } @@ -128,10 +142,10 @@ func getParserMetric(url string, itemName string) map[string]map[string]int { } } - return stats + return stats, nil } -func getScenarioMetric(url string, itemName string) map[string]int { +func getScenarioMetric(url string, itemName string) (map[string]int, error) { stats := make(map[string]int) stats["instantiation"] = 0 @@ -140,8 +154,12 @@ func getScenarioMetric(url string, itemName string) map[string]int { stats["pour"] = 0 stats["underflow"] = 0 - result := getPrometheusMetric(url) - for idx, fam := range result { + results, err := getPrometheusMetric(url) + if err != nil { + return nil, err + } + + for idx, fam := range results { if !strings.HasPrefix(fam.Name, "cs_") { continue } @@ -192,16 +210,20 @@ func getScenarioMetric(url string, itemName string) map[string]int { } } - return stats + return stats, nil } -func getAppsecRuleMetric(url string, itemName string) map[string]int { +func getAppsecRuleMetric(url string, itemName string) (map[string]int, error) { stats := make(map[string]int) stats["inband_hits"] = 0 stats["outband_hits"] = 0 - results := getPrometheusMetric(url) + results, err := getPrometheusMetric(url) + if err != nil { + return nil, err + } + for idx, fam := range results { if !strings.HasPrefix(fam.Name, "cs_") { continue @@ -257,10 +279,10 @@ func getAppsecRuleMetric(url string, itemName string) map[string]int { } } - return stats + return stats, nil } -func getPrometheusMetric(url string) []*prom2json.Family { +func getPrometheusMetric(url string) ([]*prom2json.Family, error) { mfChan := make(chan *dto.MetricFamily, 1024) // Start with the DefaultTransport for sane defaults. @@ -271,12 +293,15 @@ func getPrometheusMetric(url string) []*prom2json.Family { // Timeout early if the server doesn't even return the headers. transport.ResponseHeaderTimeout = time.Minute + var fetchErr error + go func() { defer trace.CatchPanic("crowdsec/GetPrometheusMetric") - err := prom2json.FetchMetricFamilies(url, mfChan, transport) - if err != nil { - log.Fatalf("failed to fetch prometheus metrics : %v", err) + // mfChan is closed by prom2json.FetchMetricFamilies in all cases. + if err := prom2json.FetchMetricFamilies(url, mfChan, transport); err != nil { + fetchErr = fmt.Errorf("failed to fetch prometheus metrics: %w", err) + return } }() @@ -285,7 +310,11 @@ func getPrometheusMetric(url string) []*prom2json.Family { result = append(result, prom2json.NewFamily(mf)) } + if fetchErr != nil { + return nil, fetchErr + } + log.Debugf("Finished reading prometheus output, %d entries", len(result)) - return result + return result, nil } diff --git a/cmd/crowdsec-cli/cliitem/metrics_table.go b/cmd/crowdsec-cli/cliitem/metrics_table.go new file mode 100644 index 00000000000..a41ea0fad39 --- /dev/null +++ b/cmd/crowdsec-cli/cliitem/metrics_table.go @@ -0,0 +1,70 @@ +package cliitem + +import ( + "fmt" + "io" + "strconv" + + "github.com/jedib0t/go-pretty/v6/table" + + "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/cstable" +) + +func appsecMetricsTable(out io.Writer, wantColor string, itemName string, metrics map[string]int) { + t := cstable.NewLight(out, wantColor).Writer + t.AppendHeader(table.Row{"Inband Hits", "Outband Hits"}) + + t.AppendRow(table.Row{ + strconv.Itoa(metrics["inband_hits"]), + strconv.Itoa(metrics["outband_hits"]), + }) + + t.SetTitle("(AppSec) " + itemName) + fmt.Fprintln(out, t.Render()) +} + +func scenarioMetricsTable(out io.Writer, wantColor string, itemName string, metrics map[string]int) { + if metrics["instantiation"] == 0 { + return + } + + t := cstable.New(out, wantColor).Writer + t.AppendHeader(table.Row{"Current Count", "Overflows", "Instantiated", "Poured", "Expired"}) + + t.AppendRow(table.Row{ + strconv.Itoa(metrics["curr_count"]), + strconv.Itoa(metrics["overflow"]), + strconv.Itoa(metrics["instantiation"]), + strconv.Itoa(metrics["pour"]), + strconv.Itoa(metrics["underflow"]), + }) + + t.SetTitle("(Scenario) " + itemName) + fmt.Fprintln(out, t.Render()) +} + +func parserMetricsTable(out io.Writer, wantColor string, itemName string, metrics map[string]map[string]int) { + t := cstable.New(out, wantColor).Writer + t.AppendHeader(table.Row{"Parsers", "Hits", "Parsed", "Unparsed"}) + + // don't show table if no hits + showTable := false + + for source, stats := range metrics { + if stats["hits"] > 0 { + t.AppendRow(table.Row{ + source, + strconv.Itoa(stats["hits"]), + strconv.Itoa(stats["parsed"]), + strconv.Itoa(stats["unparsed"]), + }) + + showTable = true + } + } + + if showTable { + t.SetTitle("(Parser) " + itemName) + fmt.Fprintln(out, t.Render()) + } +} diff --git a/cmd/crowdsec-cli/cliitem/parser.go b/cmd/crowdsec-cli/cliitem/parser.go deleted file mode 100644 index bc1d96bdaf0..00000000000 --- a/cmd/crowdsec-cli/cliitem/parser.go +++ /dev/null @@ -1,41 +0,0 @@ -package cliitem - -import ( - "github.com/crowdsecurity/crowdsec/pkg/cwhub" -) - -func NewParser(cfg configGetter) *cliItem { - return &cliItem{ - cfg: cfg, - name: cwhub.PARSERS, - singular: "parser", - oneOrMore: "parser(s)", - help: cliHelp{ - example: `cscli parsers list -a -cscli parsers install crowdsecurity/caddy-logs crowdsecurity/sshd-logs -cscli parsers inspect crowdsecurity/caddy-logs crowdsecurity/sshd-logs -cscli parsers upgrade crowdsecurity/caddy-logs crowdsecurity/sshd-logs -cscli parsers remove crowdsecurity/caddy-logs crowdsecurity/sshd-logs -`, - }, - installHelp: cliHelp{ - example: `cscli parsers install crowdsecurity/caddy-logs crowdsecurity/sshd-logs`, - }, - removeHelp: cliHelp{ - example: `cscli parsers remove crowdsecurity/caddy-logs crowdsecurity/sshd-logs`, - }, - upgradeHelp: cliHelp{ - example: `cscli parsers upgrade crowdsecurity/caddy-logs crowdsecurity/sshd-logs`, - }, - inspectHelp: cliHelp{ - example: `cscli parsers inspect crowdsecurity/httpd-logs crowdsecurity/sshd-logs`, - }, - listHelp: cliHelp{ - example: `cscli parsers list -cscli parsers list -a -cscli parsers list crowdsecurity/caddy-logs crowdsecurity/sshd-logs - -List only enabled parsers unless "-a" or names are specified.`, - }, - } -} diff --git a/cmd/crowdsec-cli/cliitem/postoverflow.go b/cmd/crowdsec-cli/cliitem/postoverflow.go deleted file mode 100644 index ea53aef327d..00000000000 --- a/cmd/crowdsec-cli/cliitem/postoverflow.go +++ /dev/null @@ -1,41 +0,0 @@ -package cliitem - -import ( - "github.com/crowdsecurity/crowdsec/pkg/cwhub" -) - -func NewPostOverflow(cfg configGetter) *cliItem { - return &cliItem{ - cfg: cfg, - name: cwhub.POSTOVERFLOWS, - singular: "postoverflow", - oneOrMore: "postoverflow(s)", - help: cliHelp{ - example: `cscli postoverflows list -a -cscli postoverflows install crowdsecurity/cdn-whitelist crowdsecurity/rdns -cscli postoverflows inspect crowdsecurity/cdn-whitelist crowdsecurity/rdns -cscli postoverflows upgrade crowdsecurity/cdn-whitelist crowdsecurity/rdns -cscli postoverflows remove crowdsecurity/cdn-whitelist crowdsecurity/rdns -`, - }, - installHelp: cliHelp{ - example: `cscli postoverflows install crowdsecurity/cdn-whitelist crowdsecurity/rdns`, - }, - removeHelp: cliHelp{ - example: `cscli postoverflows remove crowdsecurity/cdn-whitelist crowdsecurity/rdns`, - }, - upgradeHelp: cliHelp{ - example: `cscli postoverflows upgrade crowdsecurity/cdn-whitelist crowdsecurity/rdns`, - }, - inspectHelp: cliHelp{ - example: `cscli postoverflows inspect crowdsecurity/cdn-whitelist crowdsecurity/rdns`, - }, - listHelp: cliHelp{ - example: `cscli postoverflows list -cscli postoverflows list -a -cscli postoverflows list crowdsecurity/cdn-whitelist crowdsecurity/rdns - -List only enabled postoverflows unless "-a" or names are specified.`, - }, - } -} diff --git a/cmd/crowdsec-cli/cliitem/suggest.go b/cmd/crowdsec-cli/cliitem/suggest.go deleted file mode 100644 index 5b080722af9..00000000000 --- a/cmd/crowdsec-cli/cliitem/suggest.go +++ /dev/null @@ -1,77 +0,0 @@ -package cliitem - -import ( - "fmt" - "slices" - "strings" - - "github.com/agext/levenshtein" - "github.com/spf13/cobra" - - "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/require" - "github.com/crowdsecurity/crowdsec/pkg/cwhub" -) - -// suggestNearestMessage returns a message with the most similar item name, if one is found -func suggestNearestMessage(hub *cwhub.Hub, itemType string, itemName string) string { - const maxDistance = 7 - - score := 100 - nearest := "" - - for _, item := range hub.GetItemsByType(itemType, false) { - d := levenshtein.Distance(itemName, item.Name, nil) - if d < score { - score = d - nearest = item.Name - } - } - - msg := fmt.Sprintf("can't find '%s' in %s", itemName, itemType) - - if score < maxDistance { - msg += fmt.Sprintf(", did you mean '%s'?", nearest) - } - - return msg -} - -func compAllItems(itemType string, args []string, toComplete string, cfg configGetter) ([]string, cobra.ShellCompDirective) { - hub, err := require.Hub(cfg(), nil, nil) - if err != nil { - return nil, cobra.ShellCompDirectiveDefault - } - - comp := make([]string, 0) - - for _, item := range hub.GetItemsByType(itemType, false) { - if !slices.Contains(args, item.Name) && strings.Contains(item.Name, toComplete) { - comp = append(comp, item.Name) - } - } - - cobra.CompDebugln(fmt.Sprintf("%s: %+v", itemType, comp), true) - - return comp, cobra.ShellCompDirectiveNoFileComp -} - -func compInstalledItems(itemType string, args []string, toComplete string, cfg configGetter) ([]string, cobra.ShellCompDirective) { - hub, err := require.Hub(cfg(), nil, nil) - if err != nil { - return nil, cobra.ShellCompDirectiveDefault - } - - items := hub.GetInstalledByType(itemType, true) - - comp := make([]string, 0) - - for _, item := range items { - if strings.Contains(item.Name, toComplete) { - comp = append(comp, item.Name) - } - } - - cobra.CompDebugln(fmt.Sprintf("%s: %+v", itemType, comp), true) - - return comp, cobra.ShellCompDirectiveNoFileComp -} diff --git a/cmd/crowdsec-cli/clilapi/context.go b/cmd/crowdsec-cli/clilapi/context.go index 20ceb2b9596..0730ba2b2a9 100644 --- a/cmd/crowdsec-cli/clilapi/context.go +++ b/cmd/crowdsec-cli/clilapi/context.go @@ -59,7 +59,7 @@ cscli lapi context add --value evt.Meta.source_ip --value evt.Meta.target_user `, DisableAutoGenTag: true, RunE: func(_ *cobra.Command, _ []string) error { - hub, err := require.Hub(cli.cfg(), nil, nil) + hub, err := require.Hub(cli.cfg(), nil) if err != nil { return err } @@ -101,7 +101,7 @@ func (cli *cliLapi) newContextStatusCmd() *cobra.Command { DisableAutoGenTag: true, RunE: func(_ *cobra.Command, _ []string) error { cfg := cli.cfg() - hub, err := require.Hub(cfg, nil, nil) + hub, err := require.Hub(cfg, nil) if err != nil { return err } @@ -153,7 +153,7 @@ cscli lapi context detect crowdsecurity/sshd-logs return fmt.Errorf("failed to init expr helpers: %w", err) } - hub, err := require.Hub(cfg, nil, nil) + hub, err := require.Hub(cfg, nil) if err != nil { return err } diff --git a/cmd/crowdsec-cli/clilapi/register.go b/cmd/crowdsec-cli/clilapi/register.go index 4c9b0f39903..7430c73c3c8 100644 --- a/cmd/crowdsec-cli/clilapi/register.go +++ b/cmd/crowdsec-cli/clilapi/register.go @@ -28,7 +28,12 @@ func (cli *cliLapi) register(ctx context.Context, apiURL string, outputFile stri } } - password := strfmt.Password(idgen.GeneratePassword(idgen.PasswordLength)) + pstr, err := idgen.GeneratePassword(idgen.PasswordLength) + if err != nil { + return err + } + + password := strfmt.Password(pstr) apiurl, err := prepareAPIURL(cfg.API.Client, apiURL) if err != nil { @@ -82,7 +87,9 @@ func (cli *cliLapi) register(ctx context.Context, apiURL string, outputFile stri fmt.Printf("%s\n", string(apiConfigDump)) } - log.Warning(reload.Message) + if msg := reload.UserMessage(); msg != "" { + log.Warning(msg) + } return nil } diff --git a/cmd/crowdsec-cli/clilapi/status.go b/cmd/crowdsec-cli/clilapi/status.go index 6ff88834602..039c75e585d 100644 --- a/cmd/crowdsec-cli/clilapi/status.go +++ b/cmd/crowdsec-cli/clilapi/status.go @@ -102,7 +102,7 @@ func (cli *cliLapi) newStatusCmd() *cobra.Command { Args: cobra.MinimumNArgs(0), DisableAutoGenTag: true, RunE: func(cmd *cobra.Command, _ []string) error { - hub, err := require.Hub(cli.cfg(), nil, nil) + hub, err := require.Hub(cli.cfg(), nil) if err != nil { return err } diff --git a/cmd/crowdsec-cli/climachine/add.go b/cmd/crowdsec-cli/climachine/add.go index afddb4e4b65..b2595583823 100644 --- a/cmd/crowdsec-cli/climachine/add.go +++ b/cmd/crowdsec-cli/climachine/add.go @@ -65,12 +65,17 @@ func (cli *cliMachines) add(ctx context.Context, args []string, machinePassword return errors.New("please specify a password with --password or use --auto") } - machinePassword = idgen.GeneratePassword(idgen.PasswordLength) + machinePassword, err = idgen.GeneratePassword(idgen.PasswordLength) + if err != nil { + return err + } } else if machinePassword == "" && interactive { qs := &survey.Password{ Message: "Please provide a password for the machine:", } - survey.AskOne(qs, &machinePassword) + if err := survey.AskOne(qs, &machinePassword); err != nil { + return err + } } password := strfmt.Password(machinePassword) @@ -144,9 +149,9 @@ cscli machines add -f- --auto > /tmp/mycreds.yaml`, flags.VarP(&password, "password", "p", "machine password to login to the API") flags.StringVarP(&dumpFile, "file", "f", "", "output file destination (defaults to "+csconfig.DefaultConfigPath("local_api_credentials.yaml")+")") flags.StringVarP(&apiURL, "url", "u", "", "URL of the local API") - flags.BoolVarP(&interactive, "interactive", "i", false, "interfactive mode to enter the password") + flags.BoolVarP(&interactive, "interactive", "i", false, "interactive mode to enter the password") flags.BoolVarP(&autoAdd, "auto", "a", false, "automatically generate password (and username if not provided)") - flags.BoolVar(&force, "force", false, "will force add the machine if it already exist") + flags.BoolVar(&force, "force", false, "will force add the machine if it already exists") return cmd } diff --git a/cmd/crowdsec-cli/climachine/inspect.go b/cmd/crowdsec-cli/climachine/inspect.go index b08f2f62794..e973d07e96b 100644 --- a/cmd/crowdsec-cli/climachine/inspect.go +++ b/cmd/crowdsec-cli/climachine/inspect.go @@ -44,7 +44,7 @@ func (cli *cliMachines) inspectHubHuman(out io.Writer, machine *ent.Machine) { t.AppendHeader(table.Row{"Name", "Status", "Version"}) t.SetTitle(itemType) t.AppendRows(rows) - io.WriteString(out, t.Render()+"\n") + fmt.Fprintln(out, t.Render()) } } @@ -80,7 +80,7 @@ func (cli *cliMachines) inspectHuman(out io.Writer, machine *ent.Machine) { t.AppendRow(table.Row{"Collections", coll.Name}) } - io.WriteString(out, t.Render()+"\n") + fmt.Fprintln(out, t.Render()) } func (cli *cliMachines) inspect(machine *ent.Machine) error { diff --git a/cmd/crowdsec-cli/climachine/list.go b/cmd/crowdsec-cli/climachine/list.go index 6bedb2ad807..6fb45166aa2 100644 --- a/cmd/crowdsec-cli/climachine/list.go +++ b/cmd/crowdsec-cli/climachine/list.go @@ -55,7 +55,7 @@ func (cli *cliMachines) listHuman(out io.Writer, machines ent.Machines) { t.AppendRow(table.Row{m.MachineId, m.IpAddress, m.UpdatedAt.Format(time.RFC3339), validated, m.Version, clientinfo.GetOSNameAndVersion(m), m.AuthType, hb}) } - io.WriteString(out, t.Render()+"\n") + fmt.Fprintln(out, t.Render()) } func (cli *cliMachines) listCSV(out io.Writer, machines ent.Machines) error { @@ -90,7 +90,6 @@ func (cli *cliMachines) listCSV(out io.Writer, machines ent.Machines) error { func (cli *cliMachines) List(ctx context.Context, out io.Writer, db *database.Client) error { // XXX: must use the provided db object, the one in the struct might be nil // (calling List directly skips the PersistentPreRunE) - machines, err := db.ListMachines(ctx) if err != nil { return fmt.Errorf("unable to list machines: %w", err) diff --git a/cmd/crowdsec-cli/climetrics/list.go b/cmd/crowdsec-cli/climetrics/list.go index 27fa99710c8..32e2f8e0a80 100644 --- a/cmd/crowdsec-cli/climetrics/list.go +++ b/cmd/crowdsec-cli/climetrics/list.go @@ -3,7 +3,6 @@ package climetrics import ( "encoding/json" "fmt" - "io" "github.com/fatih/color" "github.com/jedib0t/go-pretty/v6/table" @@ -64,7 +63,7 @@ func (cli *cliMetrics) list() error { t.AppendRow(table.Row{metric.Type, metric.Title, metric.Description}) } - io.WriteString(out, t.Render()+"\n") + fmt.Fprintln(out, t.Render()) case "json": x, err := json.MarshalIndent(allMetrics, "", " ") if err != nil { diff --git a/cmd/crowdsec-cli/climetrics/show.go b/cmd/crowdsec-cli/climetrics/show.go index 045959048f6..172d3799435 100644 --- a/cmd/crowdsec-cli/climetrics/show.go +++ b/cmd/crowdsec-cli/climetrics/show.go @@ -4,11 +4,15 @@ import ( "context" "errors" "fmt" + "slices" + "strings" "github.com/fatih/color" log "github.com/sirupsen/logrus" "github.com/spf13/cobra" + "github.com/crowdsecurity/go-cs-lib/maptools" + "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/require" ) @@ -99,6 +103,17 @@ cscli metrics list; cscli metrics list -o json cscli metrics show acquisition parsers scenarios stash -o json`, // Positional args are optional DisableAutoGenTag: true, + ValidArgsFunction: func(_ *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { + ms := NewMetricStore() + ret := []string{} + for _, section := range maptools.SortedKeys(ms) { + if !slices.Contains(args, section) && strings.Contains(section, toComplete) { + ret = append(ret, section) + } + } + + return ret, cobra.ShellCompDirectiveNoFileComp + }, RunE: func(cmd *cobra.Command, args []string) error { args = expandAlias(args) return cli.show(cmd.Context(), args, url, noUnit) diff --git a/cmd/crowdsec-cli/climetrics/statacquis.go b/cmd/crowdsec-cli/climetrics/statacquis.go index 0af2e796f40..da17b1d9480 100644 --- a/cmd/crowdsec-cli/climetrics/statacquis.go +++ b/cmd/crowdsec-cli/climetrics/statacquis.go @@ -1,6 +1,7 @@ package climetrics import ( + "fmt" "io" "github.com/jedib0t/go-pretty/v6/table" @@ -37,8 +38,7 @@ func (s statAcquis) Table(out io.Writer, wantColor string, noUnit bool, showEmpt log.Warningf("while collecting acquis stats: %s", err) } else if numRows > 0 || showEmpty { title, _ := s.Description() - io.WriteString(out, title+":\n") - io.WriteString(out, t.Render()+"\n") - io.WriteString(out, "\n") + t.SetTitle(title) + fmt.Fprintln(out, t.Render()) } } diff --git a/cmd/crowdsec-cli/climetrics/statalert.go b/cmd/crowdsec-cli/climetrics/statalert.go index 942eceaa75c..416b78f0508 100644 --- a/cmd/crowdsec-cli/climetrics/statalert.go +++ b/cmd/crowdsec-cli/climetrics/statalert.go @@ -1,6 +1,7 @@ package climetrics import ( + "fmt" "io" "strconv" @@ -38,8 +39,7 @@ func (s statAlert) Table(out io.Writer, wantColor string, noUnit bool, showEmpty if numRows > 0 || showEmpty { title, _ := s.Description() - io.WriteString(out, title+":\n") - io.WriteString(out, t.Render()+"\n") - io.WriteString(out, "\n") + t.SetTitle(title) + fmt.Fprintln(out, t.Render()) } } diff --git a/cmd/crowdsec-cli/climetrics/statappsecengine.go b/cmd/crowdsec-cli/climetrics/statappsecengine.go index d924375247f..93cc1283c96 100644 --- a/cmd/crowdsec-cli/climetrics/statappsecengine.go +++ b/cmd/crowdsec-cli/climetrics/statappsecengine.go @@ -1,6 +1,7 @@ package climetrics import ( + "fmt" "io" "github.com/jedib0t/go-pretty/v6/table" @@ -34,8 +35,7 @@ func (s statAppsecEngine) Table(out io.Writer, wantColor string, noUnit bool, sh log.Warningf("while collecting appsec stats: %s", err) } else if numRows > 0 || showEmpty { title, _ := s.Description() - io.WriteString(out, title+":\n") - io.WriteString(out, t.Render()+"\n") - io.WriteString(out, "\n") + t.SetTitle(title) + fmt.Fprintln(out, t.Render()) } } diff --git a/cmd/crowdsec-cli/climetrics/statappsecrule.go b/cmd/crowdsec-cli/climetrics/statappsecrule.go index e06a7c2e2b3..8e243aba642 100644 --- a/cmd/crowdsec-cli/climetrics/statappsecrule.go +++ b/cmd/crowdsec-cli/climetrics/statappsecrule.go @@ -40,9 +40,8 @@ func (s statAppsecRule) Table(out io.Writer, wantColor string, noUnit bool, show if numRows, err := metricsToTable(t, appsecEngineRulesStats, keys, noUnit); err != nil { log.Warningf("while collecting appsec rules stats: %s", err) } else if numRows > 0 || showEmpty { - io.WriteString(out, fmt.Sprintf("Appsec '%s' Rules Metrics:\n", appsecEngine)) - io.WriteString(out, t.Render()+"\n") - io.WriteString(out, "\n") + t.SetTitle(fmt.Sprintf("Appsec '%s' Rules Metrics", appsecEngine)) + fmt.Fprintln(out, t.Render()) } } } diff --git a/cmd/crowdsec-cli/climetrics/statbouncer.go b/cmd/crowdsec-cli/climetrics/statbouncer.go index bc0da152d6d..ac79074d506 100644 --- a/cmd/crowdsec-cli/climetrics/statbouncer.go +++ b/cmd/crowdsec-cli/climetrics/statbouncer.go @@ -176,17 +176,20 @@ func (*statBouncer) extractRawMetrics(metrics []*ent.Metric) ([]bouncerMetricIte if item.Name == nil { logWarningOnce(warningsLogged, "missing 'name' field in metrics reported by "+bouncerName) + // no continue - keep checking the rest valid = false } if item.Unit == nil { logWarningOnce(warningsLogged, "missing 'unit' field in metrics reported by "+bouncerName) + valid = false } if item.Value == nil { logWarningOnce(warningsLogged, "missing 'value' field in metrics reported by "+bouncerName) + valid = false } @@ -439,11 +442,8 @@ func (s *statBouncer) bouncerTable(out io.Writer, bouncerName string, wantColor title = fmt.Sprintf("%s since %s", title, s.oldestTS[bouncerName].String()) } - // don't use SetTitle() because it draws the title inside table box - io.WriteString(out, title+":\n") - io.WriteString(out, t.Render()+"\n") - // empty line between tables - io.WriteString(out, "\n") + t.SetTitle(title) + fmt.Fprintln(out, t.Render()) } // Table displays a table of metrics for each bouncer @@ -452,10 +452,11 @@ func (s *statBouncer) Table(out io.Writer, wantColor string, noUnit bool, showEm for _, bouncerName := range maptools.SortedKeys(s.aggOverOrigin) { s.bouncerTable(out, bouncerName, wantColor, noUnit) + found = true } if !found && showEmpty { - io.WriteString(out, "No bouncer metrics found.\n\n") + fmt.Fprintln(out, "No bouncer metrics found.") } } diff --git a/cmd/crowdsec-cli/climetrics/statbucket.go b/cmd/crowdsec-cli/climetrics/statbucket.go index 1882fe21df1..4cddfeb3731 100644 --- a/cmd/crowdsec-cli/climetrics/statbucket.go +++ b/cmd/crowdsec-cli/climetrics/statbucket.go @@ -1,6 +1,7 @@ package climetrics import ( + "fmt" "io" "github.com/jedib0t/go-pretty/v6/table" @@ -35,8 +36,7 @@ func (s statBucket) Table(out io.Writer, wantColor string, noUnit bool, showEmpt log.Warningf("while collecting scenario stats: %s", err) } else if numRows > 0 || showEmpty { title, _ := s.Description() - io.WriteString(out, title+":\n") - io.WriteString(out, t.Render()+"\n") - io.WriteString(out, "\n") + t.SetTitle(title) + fmt.Fprintln(out, t.Render()) } } diff --git a/cmd/crowdsec-cli/climetrics/statdecision.go b/cmd/crowdsec-cli/climetrics/statdecision.go index b862f49ff12..2f27410f56f 100644 --- a/cmd/crowdsec-cli/climetrics/statdecision.go +++ b/cmd/crowdsec-cli/climetrics/statdecision.go @@ -1,6 +1,7 @@ package climetrics import ( + "fmt" "io" "strconv" @@ -53,8 +54,7 @@ func (s statDecision) Table(out io.Writer, wantColor string, noUnit bool, showEm if numRows > 0 || showEmpty { title, _ := s.Description() - io.WriteString(out, title+":\n") - io.WriteString(out, t.Render()+"\n") - io.WriteString(out, "\n") + t.SetTitle(title) + fmt.Fprintln(out, t.Render()) } } diff --git a/cmd/crowdsec-cli/climetrics/statlapi.go b/cmd/crowdsec-cli/climetrics/statlapi.go index 9559eacf0f4..2f460ca5a71 100644 --- a/cmd/crowdsec-cli/climetrics/statlapi.go +++ b/cmd/crowdsec-cli/climetrics/statlapi.go @@ -1,6 +1,7 @@ package climetrics import ( + "fmt" "io" "strconv" @@ -49,8 +50,7 @@ func (s statLapi) Table(out io.Writer, wantColor string, noUnit bool, showEmpty if numRows > 0 || showEmpty { title, _ := s.Description() - io.WriteString(out, title+":\n") - io.WriteString(out, t.Render()+"\n") - io.WriteString(out, "\n") + t.SetTitle(title) + fmt.Fprintln(out, t.Render()) } } diff --git a/cmd/crowdsec-cli/climetrics/statlapibouncer.go b/cmd/crowdsec-cli/climetrics/statlapibouncer.go index 5e5f63a79d3..2ea6b67cd0a 100644 --- a/cmd/crowdsec-cli/climetrics/statlapibouncer.go +++ b/cmd/crowdsec-cli/climetrics/statlapibouncer.go @@ -1,6 +1,7 @@ package climetrics import ( + "fmt" "io" "github.com/jedib0t/go-pretty/v6/table" @@ -35,8 +36,7 @@ func (s statLapiBouncer) Table(out io.Writer, wantColor string, noUnit bool, sho if numRows > 0 || showEmpty { title, _ := s.Description() - io.WriteString(out, title+":\n") - io.WriteString(out, t.Render()+"\n") - io.WriteString(out, "\n") + t.SetTitle(title) + fmt.Fprintln(out, t.Render()) } } diff --git a/cmd/crowdsec-cli/climetrics/statlapidecision.go b/cmd/crowdsec-cli/climetrics/statlapidecision.go index 44f0e8f4b87..3371cb0e8ff 100644 --- a/cmd/crowdsec-cli/climetrics/statlapidecision.go +++ b/cmd/crowdsec-cli/climetrics/statlapidecision.go @@ -1,6 +1,7 @@ package climetrics import ( + "fmt" "io" "strconv" @@ -57,8 +58,7 @@ func (s statLapiDecision) Table(out io.Writer, wantColor string, noUnit bool, sh if numRows > 0 || showEmpty { title, _ := s.Description() - io.WriteString(out, title+":\n") - io.WriteString(out, t.Render()+"\n") - io.WriteString(out, "\n") + t.SetTitle(title) + fmt.Fprintln(out, t.Render()) } } diff --git a/cmd/crowdsec-cli/climetrics/statlapimachine.go b/cmd/crowdsec-cli/climetrics/statlapimachine.go index 0e6693bea82..04fbb98ae8e 100644 --- a/cmd/crowdsec-cli/climetrics/statlapimachine.go +++ b/cmd/crowdsec-cli/climetrics/statlapimachine.go @@ -1,6 +1,7 @@ package climetrics import ( + "fmt" "io" "github.com/jedib0t/go-pretty/v6/table" @@ -35,8 +36,7 @@ func (s statLapiMachine) Table(out io.Writer, wantColor string, noUnit bool, sho if numRows > 0 || showEmpty { title, _ := s.Description() - io.WriteString(out, title+":\n") - io.WriteString(out, t.Render()+"\n") - io.WriteString(out, "\n") + t.SetTitle(title) + fmt.Fprintln(out, t.Render()) } } diff --git a/cmd/crowdsec-cli/climetrics/statparser.go b/cmd/crowdsec-cli/climetrics/statparser.go index 520e68f9adf..bdc9caa8597 100644 --- a/cmd/crowdsec-cli/climetrics/statparser.go +++ b/cmd/crowdsec-cli/climetrics/statparser.go @@ -1,6 +1,7 @@ package climetrics import ( + "fmt" "io" "github.com/jedib0t/go-pretty/v6/table" @@ -36,8 +37,7 @@ func (s statParser) Table(out io.Writer, wantColor string, noUnit bool, showEmpt log.Warningf("while collecting parsers stats: %s", err) } else if numRows > 0 || showEmpty { title, _ := s.Description() - io.WriteString(out, title+":\n") - io.WriteString(out, t.Render()+"\n") - io.WriteString(out, "\n") + t.SetTitle(title) + fmt.Fprintln(out, t.Render()) } } diff --git a/cmd/crowdsec-cli/climetrics/statstash.go b/cmd/crowdsec-cli/climetrics/statstash.go index 2729de931a1..496deaf0535 100644 --- a/cmd/crowdsec-cli/climetrics/statstash.go +++ b/cmd/crowdsec-cli/climetrics/statstash.go @@ -1,6 +1,7 @@ package climetrics import ( + "fmt" "io" "strconv" @@ -52,8 +53,7 @@ func (s statStash) Table(out io.Writer, wantColor string, noUnit bool, showEmpty if numRows > 0 || showEmpty { title, _ := s.Description() - io.WriteString(out, title+":\n") - io.WriteString(out, t.Render()+"\n") - io.WriteString(out, "\n") + t.SetTitle(title) + fmt.Fprintln(out, t.Render()) } } diff --git a/cmd/crowdsec-cli/climetrics/statwhitelist.go b/cmd/crowdsec-cli/climetrics/statwhitelist.go index 7f533b45b4b..a42f653d50d 100644 --- a/cmd/crowdsec-cli/climetrics/statwhitelist.go +++ b/cmd/crowdsec-cli/climetrics/statwhitelist.go @@ -1,6 +1,7 @@ package climetrics import ( + "fmt" "io" "github.com/jedib0t/go-pretty/v6/table" @@ -36,8 +37,7 @@ func (s statWhitelist) Table(out io.Writer, wantColor string, noUnit bool, showE log.Warningf("while collecting parsers stats: %s", err) } else if numRows > 0 || showEmpty { title, _ := s.Description() - io.WriteString(out, title+":\n") - io.WriteString(out, t.Render()+"\n") - io.WriteString(out, "\n") + t.SetTitle(title) + fmt.Fprintln(out, t.Render()) } } diff --git a/cmd/crowdsec-cli/climetrics/store.go b/cmd/crowdsec-cli/climetrics/store.go index 55fab5dbd7f..6c402447901 100644 --- a/cmd/crowdsec-cli/climetrics/store.go +++ b/cmd/crowdsec-cli/climetrics/store.go @@ -262,7 +262,8 @@ func (ms metricStore) Format(out io.Writer, wantColor string, sections []string, if err != nil { return fmt.Errorf("failed to serialize metrics: %w", err) } - out.Write(x) + + fmt.Fprint(out, string(x)) default: return fmt.Errorf("output format '%s' not supported for this command", outputFormat) } diff --git a/cmd/crowdsec-cli/clinotifications/notifications.go b/cmd/crowdsec-cli/clinotifications/notifications.go index baf899c10cf..80ffebeaa23 100644 --- a/cmd/crowdsec-cli/clinotifications/notifications.go +++ b/cmd/crowdsec-cli/clinotifications/notifications.go @@ -260,7 +260,7 @@ func (cli *cliNotifications) notificationConfigFilter(cmd *cobra.Command, args [ return ret, cobra.ShellCompDirectiveNoFileComp } -func (cli cliNotifications) newTestCmd() *cobra.Command { +func (cli *cliNotifications) newTestCmd() *cobra.Command { var ( pluginBroker csplugin.PluginBroker pluginTomb tomb.Tomb diff --git a/cmd/crowdsec-cli/clisetup/setup.go b/cmd/crowdsec-cli/clisetup/setup.go index 269cdfb78e9..77c357e7251 100644 --- a/cmd/crowdsec-cli/clisetup/setup.go +++ b/cmd/crowdsec-cli/clisetup/setup.go @@ -94,7 +94,10 @@ func (cli *cliSetup) newDetectCmd() *cobra.Command { } func (cli *cliSetup) newInstallHubCmd() *cobra.Command { - var dryRun bool + var ( + yes bool + dryRun bool + ) cmd := &cobra.Command{ Use: "install-hub [setup_file] [flags]", @@ -102,12 +105,14 @@ func (cli *cliSetup) newInstallHubCmd() *cobra.Command { Args: cobra.ExactArgs(1), DisableAutoGenTag: true, RunE: func(cmd *cobra.Command, args []string) error { - return cli.install(cmd.Context(), dryRun, args[0]) + return cli.install(cmd.Context(), yes, dryRun, args[0]) }, } flags := cmd.Flags() + flags.BoolVarP(&yes, "yes", "y", false, "confirm execution without prompt") flags.BoolVar(&dryRun, "dry-run", false, "don't install anything; print out what would have been") + cmd.MarkFlagsMutuallyExclusive("yes", "dry-run") return cmd } @@ -276,7 +281,7 @@ func (cli *cliSetup) dataSources(fromFile string, toDir string) error { return nil } -func (cli *cliSetup) install(ctx context.Context, dryRun bool, fromFile string) error { +func (cli *cliSetup) install(ctx context.Context, yes bool, dryRun bool, fromFile string) error { input, err := os.ReadFile(fromFile) if err != nil { return fmt.Errorf("while reading file %s: %w", fromFile, err) @@ -284,12 +289,16 @@ func (cli *cliSetup) install(ctx context.Context, dryRun bool, fromFile string) cfg := cli.cfg() - hub, err := require.Hub(cfg, require.RemoteHub(ctx, cfg), log.StandardLogger()) + hub, err := require.Hub(cfg, log.StandardLogger()) if err != nil { return err } - return setup.InstallHubItems(ctx, hub, input, dryRun) + verbose := (cfg.Cscli.Output == "raw") + + contentProvider := require.HubDownloader(ctx, cfg) + + return setup.InstallHubItems(ctx, hub, contentProvider, input, yes, dryRun, verbose) } func (cli *cliSetup) validate(fromFile string) error { diff --git a/cmd/crowdsec-cli/clisimulation/simulation.go b/cmd/crowdsec-cli/clisimulation/simulation.go index 8136aa213c3..1b46c70c90a 100644 --- a/cmd/crowdsec-cli/clisimulation/simulation.go +++ b/cmd/crowdsec-cli/clisimulation/simulation.go @@ -47,8 +47,8 @@ cscli simulation disable crowdsecurity/ssh-bf`, return nil }, PersistentPostRun: func(cmd *cobra.Command, _ []string) { - if cmd.Name() != "status" { - log.Info(reload.Message) + if msg := reload.UserMessage(); msg != "" && cmd.Name() != "status" { + log.Info(msg) } }, } @@ -71,7 +71,7 @@ func (cli *cliSimulation) newEnableCmd() *cobra.Command { Example: `cscli simulation enable`, DisableAutoGenTag: true, RunE: func(cmd *cobra.Command, args []string) error { - hub, err := require.Hub(cli.cfg(), nil, nil) + hub, err := require.Hub(cli.cfg(), nil) if err != nil { return err } diff --git a/cmd/crowdsec-cli/clisupport/support.go b/cmd/crowdsec-cli/clisupport/support.go index 4474f5c8f11..eb3e03df253 100644 --- a/cmd/crowdsec-cli/clisupport/support.go +++ b/cmd/crowdsec-cli/clisupport/support.go @@ -290,7 +290,7 @@ func (cli *cliSupport) dumpConfigYAML(zw *zip.Writer) error { cfg := cli.cfg() - config, err := os.ReadFile(*cfg.FilePath) + config, err := os.ReadFile(cfg.FilePath) if err != nil { return fmt.Errorf("could not read config file: %w", err) } @@ -314,7 +314,7 @@ func (cli *cliSupport) dumpPprof(ctx context.Context, zw *zip.Writer, prometheus ctx, http.MethodGet, fmt.Sprintf( - "http://%s/debug/pprof/%s?debug=1", + "http://%s/debug/pprof/%s", net.JoinHostPort( prometheusCfg.ListenAddr, strconv.Itoa(prometheusCfg.ListenPort), @@ -491,9 +491,9 @@ func (cli *cliSupport) dump(ctx context.Context, outFile string) error { skipAgent = true } - hub, err := require.Hub(cfg, nil, nil) + hub, err := require.Hub(cfg, nil) if err != nil { - log.Warn("Could not init hub, running on LAPI ? Hub related information will not be collected") + log.Warn("Could not init hub, running on LAPI? Hub related information will not be collected") // XXX: lapi status check requires scenarios, will return an error } diff --git a/cmd/crowdsec-cli/completion.go b/cmd/crowdsec-cli/completion.go index 7b6531f5516..fb60f9afab0 100644 --- a/cmd/crowdsec-cli/completion.go +++ b/cmd/crowdsec-cli/completion.go @@ -71,13 +71,13 @@ func NewCompletionCmd() *cobra.Command { Run: func(cmd *cobra.Command, args []string) { switch args[0] { case "bash": - cmd.Root().GenBashCompletion(os.Stdout) + _ = cmd.Root().GenBashCompletion(os.Stdout) case "zsh": - cmd.Root().GenZshCompletion(os.Stdout) + _ = cmd.Root().GenZshCompletion(os.Stdout) case "powershell": - cmd.Root().GenPowerShellCompletion(os.Stdout) + _ = cmd.Root().GenPowerShellCompletion(os.Stdout) case "fish": - cmd.Root().GenFishCompletion(os.Stdout, true) + _ = cmd.Root().GenFishCompletion(os.Stdout, true) } }, } diff --git a/cmd/crowdsec-cli/config_backup.go b/cmd/crowdsec-cli/config_backup.go deleted file mode 100644 index d23aff80a78..00000000000 --- a/cmd/crowdsec-cli/config_backup.go +++ /dev/null @@ -1,236 +0,0 @@ -package main - -import ( - "encoding/json" - "errors" - "fmt" - "os" - "path/filepath" - - log "github.com/sirupsen/logrus" - "github.com/spf13/cobra" - - "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/require" - "github.com/crowdsecurity/crowdsec/pkg/cwhub" -) - -func (cli *cliConfig) backupHub(dirPath string) error { - hub, err := require.Hub(cli.cfg(), nil, nil) - if err != nil { - return err - } - - for _, itemType := range cwhub.ItemTypes { - clog := log.WithField("type", itemType) - - itemMap := hub.GetItemMap(itemType) - if itemMap == nil { - clog.Infof("No %s to backup.", itemType) - continue - } - - itemDirectory := fmt.Sprintf("%s/%s/", dirPath, itemType) - if err = os.MkdirAll(itemDirectory, os.ModePerm); err != nil { - return fmt.Errorf("error while creating %s: %w", itemDirectory, err) - } - - upstreamParsers := []string{} - - for k, v := range itemMap { - clog = clog.WithField("file", v.Name) - if !v.State.Installed { // only backup installed ones - clog.Debugf("[%s]: not installed", k) - continue - } - - // for the local/tainted ones, we back up the full file - if v.State.Tainted || v.State.IsLocal() || !v.State.UpToDate { - // we need to backup stages for parsers - if itemType == cwhub.PARSERS || itemType == cwhub.POSTOVERFLOWS { - fstagedir := fmt.Sprintf("%s%s", itemDirectory, v.Stage) - if err = os.MkdirAll(fstagedir, os.ModePerm); err != nil { - return fmt.Errorf("error while creating stage dir %s: %w", fstagedir, err) - } - } - - clog.Debugf("[%s]: backing up file (tainted:%t local:%t up-to-date:%t)", k, v.State.Tainted, v.State.IsLocal(), v.State.UpToDate) - - tfile := fmt.Sprintf("%s%s/%s", itemDirectory, v.Stage, v.FileName) - if err = CopyFile(v.State.LocalPath, tfile); err != nil { - return fmt.Errorf("failed copy %s %s to %s: %w", itemType, v.State.LocalPath, tfile, err) - } - - clog.Infof("local/tainted saved %s to %s", v.State.LocalPath, tfile) - - continue - } - - clog.Debugf("[%s]: from hub, just backup name (up-to-date:%t)", k, v.State.UpToDate) - clog.Infof("saving, version:%s, up-to-date:%t", v.Version, v.State.UpToDate) - upstreamParsers = append(upstreamParsers, v.Name) - } - // write the upstream items - upstreamParsersFname := fmt.Sprintf("%s/upstream-%s.json", itemDirectory, itemType) - - upstreamParsersContent, err := json.MarshalIndent(upstreamParsers, "", " ") - if err != nil { - return fmt.Errorf("failed to serialize upstream parsers: %w", err) - } - - err = os.WriteFile(upstreamParsersFname, upstreamParsersContent, 0o644) - if err != nil { - return fmt.Errorf("unable to write to %s %s: %w", itemType, upstreamParsersFname, err) - } - - clog.Infof("Wrote %d entries for %s to %s", len(upstreamParsers), itemType, upstreamParsersFname) - } - - return nil -} - -/* - Backup crowdsec configurations to directory : - -- Main config (config.yaml) -- Profiles config (profiles.yaml) -- Simulation config (simulation.yaml) -- Backup of API credentials (local API and online API) -- List of scenarios, parsers, postoverflows and collections that are up-to-date -- Tainted/local/out-of-date scenarios, parsers, postoverflows and collections -- Acquisition files (acquis.yaml, acquis.d/*.yaml) -*/ -func (cli *cliConfig) backup(dirPath string) error { - var err error - - cfg := cli.cfg() - - if dirPath == "" { - return errors.New("directory path can't be empty") - } - - log.Infof("Starting configuration backup") - - /*if parent directory doesn't exist, bail out. create final dir with Mkdir*/ - parentDir := filepath.Dir(dirPath) - if _, err = os.Stat(parentDir); err != nil { - return fmt.Errorf("while checking parent directory %s existence: %w", parentDir, err) - } - - if err = os.Mkdir(dirPath, 0o700); err != nil { - return fmt.Errorf("while creating %s: %w", dirPath, err) - } - - if cfg.ConfigPaths.SimulationFilePath != "" { - backupSimulation := filepath.Join(dirPath, "simulation.yaml") - if err = CopyFile(cfg.ConfigPaths.SimulationFilePath, backupSimulation); err != nil { - return fmt.Errorf("failed copy %s to %s: %w", cfg.ConfigPaths.SimulationFilePath, backupSimulation, err) - } - - log.Infof("Saved simulation to %s", backupSimulation) - } - - /* - - backup AcquisitionFilePath - - backup the other files of acquisition directory - */ - if cfg.Crowdsec != nil && cfg.Crowdsec.AcquisitionFilePath != "" { - backupAcquisition := filepath.Join(dirPath, "acquis.yaml") - if err = CopyFile(cfg.Crowdsec.AcquisitionFilePath, backupAcquisition); err != nil { - return fmt.Errorf("failed copy %s to %s: %w", cfg.Crowdsec.AcquisitionFilePath, backupAcquisition, err) - } - } - - acquisBackupDir := filepath.Join(dirPath, "acquis") - if err = os.Mkdir(acquisBackupDir, 0o700); err != nil { - return fmt.Errorf("error while creating %s: %w", acquisBackupDir, err) - } - - if cfg.Crowdsec != nil && len(cfg.Crowdsec.AcquisitionFiles) > 0 { - for _, acquisFile := range cfg.Crowdsec.AcquisitionFiles { - /*if it was the default one, it was already backup'ed*/ - if cfg.Crowdsec.AcquisitionFilePath == acquisFile { - continue - } - - targetFname, err := filepath.Abs(filepath.Join(acquisBackupDir, filepath.Base(acquisFile))) - if err != nil { - return fmt.Errorf("while saving %s to %s: %w", acquisFile, acquisBackupDir, err) - } - - if err = CopyFile(acquisFile, targetFname); err != nil { - return fmt.Errorf("failed copy %s to %s: %w", acquisFile, targetFname, err) - } - - log.Infof("Saved acquis %s to %s", acquisFile, targetFname) - } - } - - if ConfigFilePath != "" { - backupMain := fmt.Sprintf("%s/config.yaml", dirPath) - if err = CopyFile(ConfigFilePath, backupMain); err != nil { - return fmt.Errorf("failed copy %s to %s: %w", ConfigFilePath, backupMain, err) - } - - log.Infof("Saved default yaml to %s", backupMain) - } - - if cfg.API != nil && cfg.API.Server != nil && cfg.API.Server.OnlineClient != nil && cfg.API.Server.OnlineClient.CredentialsFilePath != "" { - backupCAPICreds := fmt.Sprintf("%s/online_api_credentials.yaml", dirPath) - if err = CopyFile(cfg.API.Server.OnlineClient.CredentialsFilePath, backupCAPICreds); err != nil { - return fmt.Errorf("failed copy %s to %s: %w", cfg.API.Server.OnlineClient.CredentialsFilePath, backupCAPICreds, err) - } - - log.Infof("Saved online API credentials to %s", backupCAPICreds) - } - - if cfg.API != nil && cfg.API.Client != nil && cfg.API.Client.CredentialsFilePath != "" { - backupLAPICreds := fmt.Sprintf("%s/local_api_credentials.yaml", dirPath) - if err = CopyFile(cfg.API.Client.CredentialsFilePath, backupLAPICreds); err != nil { - return fmt.Errorf("failed copy %s to %s: %w", cfg.API.Client.CredentialsFilePath, backupLAPICreds, err) - } - - log.Infof("Saved local API credentials to %s", backupLAPICreds) - } - - if cfg.API != nil && cfg.API.Server != nil && cfg.API.Server.ProfilesPath != "" { - backupProfiles := fmt.Sprintf("%s/profiles.yaml", dirPath) - if err = CopyFile(cfg.API.Server.ProfilesPath, backupProfiles); err != nil { - return fmt.Errorf("failed copy %s to %s: %w", cfg.API.Server.ProfilesPath, backupProfiles, err) - } - - log.Infof("Saved profiles to %s", backupProfiles) - } - - if err = cli.backupHub(dirPath); err != nil { - return fmt.Errorf("failed to backup hub config: %w", err) - } - - return nil -} - -func (cli *cliConfig) newBackupCmd() *cobra.Command { - cmd := &cobra.Command{ - Use: `backup "directory"`, - Short: "Backup current config", - Long: `Backup the current crowdsec configuration including : - -- Main config (config.yaml) -- Simulation config (simulation.yaml) -- Profiles config (profiles.yaml) -- List of scenarios, parsers, postoverflows and collections that are up-to-date -- Tainted/local/out-of-date scenarios, parsers, postoverflows and collections -- Backup of API credentials (local API and online API)`, - Example: `cscli config backup ./my-backup`, - Args: cobra.ExactArgs(1), - DisableAutoGenTag: true, - RunE: func(_ *cobra.Command, args []string) error { - if err := cli.backup(args[0]); err != nil { - return fmt.Errorf("failed to backup config: %w", err) - } - - return nil - }, - } - - return cmd -} diff --git a/cmd/crowdsec-cli/config_restore.go b/cmd/crowdsec-cli/config_restore.go deleted file mode 100644 index c32328485ec..00000000000 --- a/cmd/crowdsec-cli/config_restore.go +++ /dev/null @@ -1,274 +0,0 @@ -package main - -import ( - "context" - "encoding/json" - "fmt" - "os" - "path/filepath" - - log "github.com/sirupsen/logrus" - "github.com/spf13/cobra" - - "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/require" - "github.com/crowdsecurity/crowdsec/pkg/cwhub" -) - -func (cli *cliConfig) restoreHub(ctx context.Context, dirPath string) error { - cfg := cli.cfg() - - hub, err := require.Hub(cfg, require.RemoteHub(ctx, cfg), nil) - if err != nil { - return err - } - - for _, itype := range cwhub.ItemTypes { - itemDirectory := fmt.Sprintf("%s/%s/", dirPath, itype) - if _, err = os.Stat(itemDirectory); err != nil { - log.Infof("no %s in backup", itype) - continue - } - /*restore the upstream items*/ - upstreamListFN := fmt.Sprintf("%s/upstream-%s.json", itemDirectory, itype) - - file, err := os.ReadFile(upstreamListFN) - if err != nil { - return fmt.Errorf("error while opening %s: %w", upstreamListFN, err) - } - - var upstreamList []string - - err = json.Unmarshal(file, &upstreamList) - if err != nil { - return fmt.Errorf("error parsing %s: %w", upstreamListFN, err) - } - - for _, toinstall := range upstreamList { - item := hub.GetItem(itype, toinstall) - if item == nil { - log.Errorf("Item %s/%s not found in hub", itype, toinstall) - continue - } - - if err = item.Install(ctx, false, false); err != nil { - log.Errorf("Error while installing %s : %s", toinstall, err) - } - } - - /*restore the local and tainted items*/ - files, err := os.ReadDir(itemDirectory) - if err != nil { - return fmt.Errorf("failed enumerating files of %s: %w", itemDirectory, err) - } - - for _, file := range files { - // this was the upstream data - if file.Name() == fmt.Sprintf("upstream-%s.json", itype) { - continue - } - - if itype == cwhub.PARSERS || itype == cwhub.POSTOVERFLOWS { - // we expect a stage here - if !file.IsDir() { - continue - } - - stage := file.Name() - stagedir := fmt.Sprintf("%s/%s/%s/", cfg.ConfigPaths.ConfigDir, itype, stage) - log.Debugf("Found stage %s in %s, target directory : %s", stage, itype, stagedir) - - if err = os.MkdirAll(stagedir, os.ModePerm); err != nil { - return fmt.Errorf("error while creating stage directory %s: %w", stagedir, err) - } - - // find items - ifiles, err := os.ReadDir(itemDirectory + "/" + stage + "/") - if err != nil { - return fmt.Errorf("failed enumerating files of %s: %w", itemDirectory+"/"+stage, err) - } - - // finally copy item - for _, tfile := range ifiles { - log.Infof("Going to restore local/tainted [%s]", tfile.Name()) - sourceFile := fmt.Sprintf("%s/%s/%s", itemDirectory, stage, tfile.Name()) - - destinationFile := fmt.Sprintf("%s%s", stagedir, tfile.Name()) - if err = CopyFile(sourceFile, destinationFile); err != nil { - return fmt.Errorf("failed copy %s %s to %s: %w", itype, sourceFile, destinationFile, err) - } - - log.Infof("restored %s to %s", sourceFile, destinationFile) - } - } else { - log.Infof("Going to restore local/tainted [%s]", file.Name()) - sourceFile := fmt.Sprintf("%s/%s", itemDirectory, file.Name()) - destinationFile := fmt.Sprintf("%s/%s/%s", cfg.ConfigPaths.ConfigDir, itype, file.Name()) - - if err = CopyFile(sourceFile, destinationFile); err != nil { - return fmt.Errorf("failed copy %s %s to %s: %w", itype, sourceFile, destinationFile, err) - } - - log.Infof("restored %s to %s", sourceFile, destinationFile) - } - } - } - - return nil -} - -/* - Restore crowdsec configurations to directory : - -- Main config (config.yaml) -- Profiles config (profiles.yaml) -- Simulation config (simulation.yaml) -- Backup of API credentials (local API and online API) -- List of scenarios, parsers, postoverflows and collections that are up-to-date -- Tainted/local/out-of-date scenarios, parsers, postoverflows and collections -- Acquisition files (acquis.yaml, acquis.d/*.yaml) -*/ -func (cli *cliConfig) restore(ctx context.Context, dirPath string) error { - var err error - - cfg := cli.cfg() - - backupMain := fmt.Sprintf("%s/config.yaml", dirPath) - if _, err = os.Stat(backupMain); err == nil { - if cfg.ConfigPaths != nil && cfg.ConfigPaths.ConfigDir != "" { - if err = CopyFile(backupMain, fmt.Sprintf("%s/config.yaml", cfg.ConfigPaths.ConfigDir)); err != nil { - return fmt.Errorf("failed copy %s to %s: %w", backupMain, cfg.ConfigPaths.ConfigDir, err) - } - } - } - - // Now we have config.yaml, we should regenerate config struct to have rights paths etc - ConfigFilePath = fmt.Sprintf("%s/config.yaml", cfg.ConfigPaths.ConfigDir) - - log.Debug("Reloading configuration") - - csConfig, _, err = loadConfigFor("config") - if err != nil { - return fmt.Errorf("failed to reload configuration: %w", err) - } - - cfg = cli.cfg() - - backupCAPICreds := fmt.Sprintf("%s/online_api_credentials.yaml", dirPath) - if _, err = os.Stat(backupCAPICreds); err == nil { - if err = CopyFile(backupCAPICreds, cfg.API.Server.OnlineClient.CredentialsFilePath); err != nil { - return fmt.Errorf("failed copy %s to %s: %w", backupCAPICreds, cfg.API.Server.OnlineClient.CredentialsFilePath, err) - } - } - - backupLAPICreds := fmt.Sprintf("%s/local_api_credentials.yaml", dirPath) - if _, err = os.Stat(backupLAPICreds); err == nil { - if err = CopyFile(backupLAPICreds, cfg.API.Client.CredentialsFilePath); err != nil { - return fmt.Errorf("failed copy %s to %s: %w", backupLAPICreds, cfg.API.Client.CredentialsFilePath, err) - } - } - - backupProfiles := fmt.Sprintf("%s/profiles.yaml", dirPath) - if _, err = os.Stat(backupProfiles); err == nil { - if err = CopyFile(backupProfiles, cfg.API.Server.ProfilesPath); err != nil { - return fmt.Errorf("failed copy %s to %s: %w", backupProfiles, cfg.API.Server.ProfilesPath, err) - } - } - - backupSimulation := fmt.Sprintf("%s/simulation.yaml", dirPath) - if _, err = os.Stat(backupSimulation); err == nil { - if err = CopyFile(backupSimulation, cfg.ConfigPaths.SimulationFilePath); err != nil { - return fmt.Errorf("failed copy %s to %s: %w", backupSimulation, cfg.ConfigPaths.SimulationFilePath, err) - } - } - - /*if there is a acquisition dir, restore its content*/ - if cfg.Crowdsec.AcquisitionDirPath != "" { - if err = os.MkdirAll(cfg.Crowdsec.AcquisitionDirPath, 0o700); err != nil { - return fmt.Errorf("error while creating %s: %w", cfg.Crowdsec.AcquisitionDirPath, err) - } - } - - // if there was a single one - backupAcquisition := fmt.Sprintf("%s/acquis.yaml", dirPath) - if _, err = os.Stat(backupAcquisition); err == nil { - log.Debugf("restoring backup'ed %s", backupAcquisition) - - if err = CopyFile(backupAcquisition, cfg.Crowdsec.AcquisitionFilePath); err != nil { - return fmt.Errorf("failed copy %s to %s: %w", backupAcquisition, cfg.Crowdsec.AcquisitionFilePath, err) - } - } - - // if there are files in the acquis backup dir, restore them - acquisBackupDir := filepath.Join(dirPath, "acquis", "*.yaml") - if acquisFiles, err := filepath.Glob(acquisBackupDir); err == nil { - for _, acquisFile := range acquisFiles { - targetFname, err := filepath.Abs(cfg.Crowdsec.AcquisitionDirPath + "/" + filepath.Base(acquisFile)) - if err != nil { - return fmt.Errorf("while saving %s to %s: %w", acquisFile, targetFname, err) - } - - log.Debugf("restoring %s to %s", acquisFile, targetFname) - - if err = CopyFile(acquisFile, targetFname); err != nil { - return fmt.Errorf("failed copy %s to %s: %w", acquisFile, targetFname, err) - } - } - } - - if cfg.Crowdsec != nil && len(cfg.Crowdsec.AcquisitionFiles) > 0 { - for _, acquisFile := range cfg.Crowdsec.AcquisitionFiles { - log.Infof("backup filepath from dir -> %s", acquisFile) - - // if it was the default one, it has already been backed up - if cfg.Crowdsec.AcquisitionFilePath == acquisFile { - log.Infof("skip this one") - continue - } - - targetFname, err := filepath.Abs(filepath.Join(acquisBackupDir, filepath.Base(acquisFile))) - if err != nil { - return fmt.Errorf("while saving %s to %s: %w", acquisFile, acquisBackupDir, err) - } - - if err = CopyFile(acquisFile, targetFname); err != nil { - return fmt.Errorf("failed copy %s to %s: %w", acquisFile, targetFname, err) - } - - log.Infof("Saved acquis %s to %s", acquisFile, targetFname) - } - } - - if err = cli.restoreHub(ctx, dirPath); err != nil { - return fmt.Errorf("failed to restore hub config: %w", err) - } - - return nil -} - -func (cli *cliConfig) newRestoreCmd() *cobra.Command { - cmd := &cobra.Command{ - Use: `restore "directory"`, - Short: `Restore config in backup "directory"`, - Long: `Restore the crowdsec configuration from specified backup "directory" including: - -- Main config (config.yaml) -- Simulation config (simulation.yaml) -- Profiles config (profiles.yaml) -- List of scenarios, parsers, postoverflows and collections that are up-to-date -- Tainted/local/out-of-date scenarios, parsers, postoverflows and collections -- Backup of API credentials (local API and online API)`, - Args: cobra.ExactArgs(1), - DisableAutoGenTag: true, - RunE: func(cmd *cobra.Command, args []string) error { - dirPath := args[0] - - if err := cli.restore(cmd.Context(), dirPath); err != nil { - return fmt.Errorf("failed to restore config from %s: %w", dirPath, err) - } - - return nil - }, - } - - return cmd -} diff --git a/cmd/crowdsec-cli/copyfile.go b/cmd/crowdsec-cli/copyfile.go deleted file mode 100644 index 272fb3f7851..00000000000 --- a/cmd/crowdsec-cli/copyfile.go +++ /dev/null @@ -1,82 +0,0 @@ -package main - -import ( - "fmt" - "io" - "os" - "path/filepath" - - log "github.com/sirupsen/logrus" -) - -/*help to copy the file, ioutil doesn't offer the feature*/ - -func copyFileContents(src, dst string) (err error) { - in, err := os.Open(src) - if err != nil { - return - } - defer in.Close() - - out, err := os.Create(dst) - if err != nil { - return - } - - defer func() { - cerr := out.Close() - if err == nil { - err = cerr - } - }() - - if _, err = io.Copy(out, in); err != nil { - return - } - - err = out.Sync() - - return -} - -/*copy the file, ioutile doesn't offer the feature*/ -func CopyFile(sourceSymLink, destinationFile string) error { - sourceFile, err := filepath.EvalSymlinks(sourceSymLink) - if err != nil { - log.Infof("Not a symlink : %s", err) - - sourceFile = sourceSymLink - } - - sourceFileStat, err := os.Stat(sourceFile) - if err != nil { - return err - } - - if !sourceFileStat.Mode().IsRegular() { - // cannot copy non-regular files (e.g., directories, - // symlinks, devices, etc.) - return fmt.Errorf("copyFile: non-regular source file %s (%q)", sourceFileStat.Name(), sourceFileStat.Mode().String()) - } - - destinationFileStat, err := os.Stat(destinationFile) - if err != nil { - if !os.IsNotExist(err) { - return err - } - } else { - if !(destinationFileStat.Mode().IsRegular()) { - return fmt.Errorf("copyFile: non-regular destination file %s (%q)", destinationFileStat.Name(), destinationFileStat.Mode().String()) - } - - if os.SameFile(sourceFileStat, destinationFileStat) { - return err - } - } - - if err = os.Link(sourceFile, destinationFile); err != nil { - err = copyFileContents(sourceFile, destinationFile) - } - - return err -} diff --git a/cmd/crowdsec-cli/dashboard.go b/cmd/crowdsec-cli/dashboard.go index 53a7dff85a0..c3c974eb9b8 100644 --- a/cmd/crowdsec-cli/dashboard.go +++ b/cmd/crowdsec-cli/dashboard.go @@ -36,10 +36,11 @@ var ( metabaseConfigFile = "metabase.yaml" metabaseImage = "metabase/metabase:v0.46.6.1" /**/ - metabaseListenAddress = "127.0.0.1" - metabaseListenPort = "3000" - metabaseContainerID = "crowdsec-metabase" - crowdsecGroup = "crowdsec" + metabaseListenAddress = "127.0.0.1" + metabaseListenPort = "3000" + metabaseContainerID = "crowdsec-metabase" + metabaseContainerEnvironmentVariables []string + crowdsecGroup = "crowdsec" forceYes bool @@ -144,7 +145,11 @@ cscli dashboard setup -l 0.0.0.0 -p 443 --password if metabasePassword == "" { isValid := passwordIsValid(metabasePassword) for !isValid { - metabasePassword = idgen.GeneratePassword(16) + var err error + metabasePassword, err = idgen.GeneratePassword(16) + if err != nil { + return err + } isValid = passwordIsValid(metabasePassword) } } @@ -162,7 +167,9 @@ cscli dashboard setup -l 0.0.0.0 -p 443 --password if err = cli.chownDatabase(dockerGroup.Gid); err != nil { return err } - mb, err := metabase.SetupMetabase(cli.cfg().API.Server.DbConfig, metabaseListenAddress, metabaseListenPort, metabaseUser, metabasePassword, metabaseDBPath, dockerGroup.Gid, metabaseContainerID, metabaseImage) + mb, err := metabase.SetupMetabase(cli.cfg().API.Server.DbConfig, metabaseListenAddress, + metabaseListenPort, metabaseUser, metabasePassword, metabaseDBPath, dockerGroup.Gid, + metabaseContainerID, metabaseImage, metabaseContainerEnvironmentVariables) if err != nil { return err } @@ -189,6 +196,7 @@ cscli dashboard setup -l 0.0.0.0 -p 443 --password flags.BoolVarP(&forceYes, "yes", "y", false, "force yes") // flags.StringVarP(&metabaseUser, "user", "u", "crowdsec@crowdsec.net", "metabase user") flags.StringVar(&metabasePassword, "password", "", "metabase password") + flags.StringSliceVarP(&metabaseContainerEnvironmentVariables, "env", "e", nil, "Additional environment variables to pass to the metabase container") return cmd } @@ -243,7 +251,8 @@ func (cli *cliDashboard) newStopCmd() *cobra.Command { } func (cli *cliDashboard) newShowPasswordCmd() *cobra.Command { - cmd := &cobra.Command{Use: "show-password", + cmd := &cobra.Command{ + Use: "show-password", Short: "displays password of metabase.", Args: cobra.NoArgs, DisableAutoGenTag: true, @@ -457,7 +466,6 @@ func checkGroups(forceYes *bool) (*user.Group, error) { func (cli *cliDashboard) chownDatabase(gid string) error { cfg := cli.cfg() intID, err := strconv.Atoi(gid) - if err != nil { return fmt.Errorf("unable to convert group ID to int: %s", err) } diff --git a/cmd/crowdsec-cli/idgen/machineid.go b/cmd/crowdsec-cli/idgen/machineid.go index 4bd356b3abc..434f79128e9 100644 --- a/cmd/crowdsec-cli/idgen/machineid.go +++ b/cmd/crowdsec-cli/idgen/machineid.go @@ -42,7 +42,11 @@ func GenerateMachineID(prefix string) (string, error) { } prefix = strings.ReplaceAll(prefix, "-", "")[:32] - suffix := GeneratePassword(16) + + suffix, err := GeneratePassword(16) + if err != nil { + return "", err + } return prefix + suffix, nil } diff --git a/cmd/crowdsec-cli/idgen/password.go b/cmd/crowdsec-cli/idgen/password.go index e0faa4daacc..9f1925288ce 100644 --- a/cmd/crowdsec-cli/idgen/password.go +++ b/cmd/crowdsec-cli/idgen/password.go @@ -2,14 +2,13 @@ package idgen import ( saferand "crypto/rand" + "fmt" "math/big" - - log "github.com/sirupsen/logrus" ) const PasswordLength = 64 -func GeneratePassword(length int) string { +func GeneratePassword(length int) (string, error) { upper := "ABCDEFGHIJKLMNOPQRSTUVWXY" lower := "abcdefghijklmnopqrstuvwxyz" digits := "0123456789" @@ -22,11 +21,11 @@ func GeneratePassword(length int) string { for i := range length { rInt, err := saferand.Int(saferand.Reader, big.NewInt(int64(charsetLength))) if err != nil { - log.Fatalf("failed getting data from prng for password generation : %s", err) + return "", fmt.Errorf("prng failed to generate unique id or password: %w", err) } buf[i] = charset[rInt.Int64()] } - return string(buf) + return string(buf), nil } diff --git a/cmd/crowdsec-cli/main.go b/cmd/crowdsec-cli/main.go index 1cca03b1d3d..a17bafb96d8 100644 --- a/cmd/crowdsec-cli/main.go +++ b/cmd/crowdsec-cli/main.go @@ -17,6 +17,7 @@ import ( "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/clialert" "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/clibouncer" "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/clicapi" + "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/cliconfig" "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/cliconsole" "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/clidecision" "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/cliexplain" @@ -91,7 +92,6 @@ func loadConfigFor(command string) (*csconfig.Config, string, error) { "help", "completion", "version", - "hubtest", } if !slices.Contains(noNeedConfig, command) { @@ -146,7 +146,10 @@ func (cli *cliRoot) initialize() error { return fmt.Errorf("output format '%s' not supported: must be one of human, json, raw", csConfig.Cscli.Output) } - log.SetFormatter(&log.TextFormatter{DisableTimestamp: true}) + log.SetFormatter(&log.TextFormatter{ + DisableTimestamp: true, + DisableLevelTruncation: true, + }) if csConfig.Cscli.Output == "json" { log.SetFormatter(&log.JSONFormatter{}) @@ -254,7 +257,7 @@ It is meant to allow you to manage bans, parsers/scenarios/etc, api and generall cmd.AddCommand(NewCLIDoc().NewCommand(cmd)) cmd.AddCommand(NewCLIVersion().NewCommand()) - cmd.AddCommand(NewCLIConfig(cli.cfg).NewCommand()) + cmd.AddCommand(cliconfig.New(cli.cfg).NewCommand(func() string { return mergedConfig })) cmd.AddCommand(clihub.New(cli.cfg).NewCommand()) cmd.AddCommand(climetrics.New(cli.cfg).NewCommand()) cmd.AddCommand(NewCLIDashboard(cli.cfg).NewCommand()) @@ -302,6 +305,8 @@ func main() { } if err := cmd.Execute(); err != nil { - log.Fatal(err) + red := color.New(color.FgRed).SprintFunc() + fmt.Fprintln(os.Stderr, red("Error:"), err) + os.Exit(1) } } diff --git a/cmd/crowdsec-cli/reload/message.go b/cmd/crowdsec-cli/reload/message.go new file mode 100644 index 00000000000..cd8e7d4795f --- /dev/null +++ b/cmd/crowdsec-cli/reload/message.go @@ -0,0 +1,6 @@ +//go:build !windows && !freebsd && !linux + +package reload + +// generic message since we don't know the platform +const message = "Please reload the crowdsec process for the new configuration to be effective." diff --git a/cmd/crowdsec-cli/reload/reload_freebsd.go b/cmd/crowdsec-cli/reload/message_freebsd.go similarity index 64% rename from cmd/crowdsec-cli/reload/reload_freebsd.go rename to cmd/crowdsec-cli/reload/message_freebsd.go index 0dac99f2315..9328f935be8 100644 --- a/cmd/crowdsec-cli/reload/reload_freebsd.go +++ b/cmd/crowdsec-cli/reload/message_freebsd.go @@ -1,4 +1,4 @@ package reload // actually sudo is not that popular on freebsd, but this will do -const Message = "Run 'sudo service crowdsec reload' for the new configuration to be effective." +const message = "Run 'sudo service crowdsec reload' for the new configuration to be effective." diff --git a/cmd/crowdsec-cli/reload/reload_linux.go b/cmd/crowdsec-cli/reload/message_linux.go similarity index 62% rename from cmd/crowdsec-cli/reload/reload_linux.go rename to cmd/crowdsec-cli/reload/message_linux.go index fbe16e5f168..11c95165372 100644 --- a/cmd/crowdsec-cli/reload/reload_linux.go +++ b/cmd/crowdsec-cli/reload/message_linux.go @@ -1,4 +1,4 @@ package reload // assume systemd, although gentoo and others may differ -const Message = "Run 'sudo systemctl reload crowdsec' for the new configuration to be effective." +const message = "Run 'sudo systemctl reload crowdsec' for the new configuration to be effective." diff --git a/cmd/crowdsec-cli/reload/message_windows.go b/cmd/crowdsec-cli/reload/message_windows.go new file mode 100644 index 00000000000..888cb44b0d2 --- /dev/null +++ b/cmd/crowdsec-cli/reload/message_windows.go @@ -0,0 +1,3 @@ +package reload + +const message = "Please restart the crowdsec service for the new configuration to be effective." diff --git a/cmd/crowdsec-cli/reload/reload.go b/cmd/crowdsec-cli/reload/reload.go index fe03af1ea79..44d001fda0c 100644 --- a/cmd/crowdsec-cli/reload/reload.go +++ b/cmd/crowdsec-cli/reload/reload.go @@ -1,6 +1,20 @@ -//go:build !windows && !freebsd && !linux - package reload -// generic message since we don't know the platform -const Message = "Please reload the crowdsec process for the new configuration to be effective." +import ( + "os" + + "github.com/crowdsecurity/go-cs-lib/version" + isatty "github.com/mattn/go-isatty" +) + +func UserMessage() string { + if version.System == "docker" { + if isatty.IsTerminal(os.Stdout.Fd()) || isatty.IsCygwinTerminal(os.Stdout.Fd()) { + return "You may need to restart the container to apply the changes." + } + + return "" + } + + return message +} diff --git a/cmd/crowdsec-cli/reload/reload_windows.go b/cmd/crowdsec-cli/reload/reload_windows.go deleted file mode 100644 index 88642425ae2..00000000000 --- a/cmd/crowdsec-cli/reload/reload_windows.go +++ /dev/null @@ -1,3 +0,0 @@ -package reload - -const Message = "Please restart the crowdsec service for the new configuration to be effective." diff --git a/cmd/crowdsec-cli/require/branch.go b/cmd/crowdsec-cli/require/branch.go index 09acc0fef8a..ab9b8e50bdc 100644 --- a/cmd/crowdsec-cli/require/branch.go +++ b/cmd/crowdsec-cli/require/branch.go @@ -69,7 +69,7 @@ func chooseBranch(ctx context.Context, cfg *csconfig.Config) string { return "master" } - csVersion := cwversion.VersionStrip() + csVersion := cwversion.BaseVersion() if csVersion == "" { log.Warning("Crowdsec version is not set, using hub branch 'master'") return "master" diff --git a/cmd/crowdsec-cli/require/require.go b/cmd/crowdsec-cli/require/require.go index 191eee55bc5..beffa29f3eb 100644 --- a/cmd/crowdsec-cli/require/require.go +++ b/cmd/crowdsec-cli/require/require.go @@ -27,7 +27,7 @@ func LAPI(c *csconfig.Config) error { func CAPI(c *csconfig.Config) error { if c.API.Server.OnlineClient == nil { - return fmt.Errorf("no configuration for Central API (CAPI) in '%s'", *c.FilePath) + return fmt.Errorf("no configuration for Central API (CAPI) in '%s'", c.FilePath) } return nil @@ -82,15 +82,13 @@ func Notifications(c *csconfig.Config) error { return nil } -// RemoteHub returns the configuration required to download hub index and items: url, branch, etc. -func RemoteHub(ctx context.Context, c *csconfig.Config) *cwhub.RemoteHubCfg { +func HubDownloader(ctx context.Context, c *csconfig.Config) *cwhub.Downloader { // set branch in config, and log if necessary branch := HubBranch(ctx, c) urlTemplate := HubURLTemplate(c) - remote := &cwhub.RemoteHubCfg{ + remote := &cwhub.Downloader{ Branch: branch, URLTemplate: urlTemplate, - IndexPath: ".index.json", } return remote @@ -98,7 +96,7 @@ func RemoteHub(ctx context.Context, c *csconfig.Config) *cwhub.RemoteHubCfg { // Hub initializes the hub. If a remote configuration is provided, it can be used to download the index and items. // If no remote parameter is provided, the hub can only be used for local operations. -func Hub(c *csconfig.Config, remote *cwhub.RemoteHubCfg, logger *logrus.Logger) (*cwhub.Hub, error) { +func Hub(c *csconfig.Config, logger *logrus.Logger) (*cwhub.Hub, error) { local := c.Hub if local == nil { @@ -110,13 +108,13 @@ func Hub(c *csconfig.Config, remote *cwhub.RemoteHubCfg, logger *logrus.Logger) logger.SetOutput(io.Discard) } - hub, err := cwhub.NewHub(local, remote, logger) + hub, err := cwhub.NewHub(local, logger) if err != nil { return nil, err } if err := hub.Load(); err != nil { - return nil, fmt.Errorf("failed to read Hub index: %w. Run 'sudo cscli hub update' to download the index again", err) + return nil, err } return hub, nil diff --git a/cmd/crowdsec-cli/setup.go b/cmd/crowdsec-cli/setup.go index 66c0d71e777..3581d69f052 100644 --- a/cmd/crowdsec-cli/setup.go +++ b/cmd/crowdsec-cli/setup.go @@ -1,4 +1,5 @@ //go:build !no_cscli_setup + package main import ( diff --git a/cmd/crowdsec/appsec.go b/cmd/crowdsec/appsec.go index cb02b137dcd..4320133b063 100644 --- a/cmd/crowdsec/appsec.go +++ b/cmd/crowdsec/appsec.go @@ -1,4 +1,4 @@ -// +build !no_datasource_appsec +//go:build !no_datasource_appsec package main diff --git a/cmd/crowdsec/fatalhook.go b/cmd/crowdsec/fatalhook.go index 84a57406a21..56e945c84a5 100644 --- a/cmd/crowdsec/fatalhook.go +++ b/cmd/crowdsec/fatalhook.go @@ -2,6 +2,7 @@ package main import ( "io" + "os" log "github.com/sirupsen/logrus" ) @@ -9,16 +10,35 @@ import ( // FatalHook is used to log fatal messages to stderr when the rest goes to a file type FatalHook struct { Writer io.Writer + Formatter log.Formatter LogLevels []log.Level } +func newFatalHook() *FatalHook { + return &FatalHook{ + Writer: os.Stderr, + Formatter: &log.TextFormatter{ + DisableTimestamp: true, + // XXX: logrus.TextFormatter has either key pairs with no colors, + // or "LEVEL [optional timestamp] message", with colors. + // We force colors to make sure we get the latter, even if + // the output is not a terminal. + // There are more flexible formatters that don't conflate the two concepts, + // or we can write our own. + ForceColors: true, + DisableLevelTruncation: true, + }, + LogLevels: []log.Level{log.FatalLevel, log.PanicLevel}, + } +} + func (hook *FatalHook) Fire(entry *log.Entry) error { - line, err := entry.String() + line, err := hook.Formatter.Format(entry) if err != nil { return err } - _, err = hook.Writer.Write([]byte(line)) + _, err = hook.Writer.Write(line) return err } diff --git a/cmd/crowdsec/main.go b/cmd/crowdsec/main.go index 6d8ca24c335..02220e15216 100644 --- a/cmd/crowdsec/main.go +++ b/cmd/crowdsec/main.go @@ -86,20 +86,15 @@ func (f *Flags) haveTimeMachine() bool { type labelsMap map[string]string func LoadBuckets(cConfig *csconfig.Config, hub *cwhub.Hub) error { - var ( - err error - files []string - ) - - for _, hubScenarioItem := range hub.GetInstalledByType(cwhub.SCENARIOS, false) { - files = append(files, hubScenarioItem.State.LocalPath) - } + var err error buckets = leakybucket.NewBuckets() - log.Infof("Loading %d scenario files", len(files)) + scenarios := hub.GetInstalledByType(cwhub.SCENARIOS, false) + + log.Infof("Loading %d scenario files", len(scenarios)) - holders, outputEventChan, err = leakybucket.LoadBuckets(cConfig.Crowdsec, hub, files, &bucketsTomb, buckets, flags.OrderEvent) + holders, outputEventChan, err = leakybucket.LoadBuckets(cConfig.Crowdsec, hub, scenarios, &bucketsTomb, buckets, flags.OrderEvent) if err != nil { return fmt.Errorf("scenario loading failed: %w", err) } @@ -148,14 +143,14 @@ func (l *labelsMap) String() string { return "labels" } -func (l labelsMap) Set(label string) error { +func (l *labelsMap) Set(label string) error { for _, pair := range strings.Split(label, ",") { split := strings.Split(pair, ":") if len(split) != 2 { return fmt.Errorf("invalid format for label '%s', must be key:value", pair) } - l[split[0]] = split[1] + (*l)[split[0]] = split[1] } return nil @@ -254,16 +249,13 @@ func LoadConfig(configFile string, disableAgent bool, disableAPI bool, quiet boo if err := types.SetDefaultLoggerConfig(cConfig.Common.LogMedia, cConfig.Common.LogDir, *cConfig.Common.LogLevel, cConfig.Common.LogMaxSize, cConfig.Common.LogMaxFiles, - cConfig.Common.LogMaxAge, cConfig.Common.CompressLogs, + cConfig.Common.LogMaxAge, cConfig.Common.LogFormat, cConfig.Common.CompressLogs, cConfig.Common.ForceColorLogs); err != nil { return nil, err } if cConfig.Common.LogMedia != "stdout" { - log.AddHook(&FatalHook{ - Writer: os.Stderr, - LogLevels: []log.Level{log.FatalLevel, log.PanicLevel}, - }) + log.AddHook(newFatalHook()) } if err := csconfig.LoadFeatureFlagsFile(configFile, log.StandardLogger()); err != nil { diff --git a/cmd/crowdsec/pour.go b/cmd/crowdsec/pour.go index 2fc7d7e42c9..4c83b65bd48 100644 --- a/cmd/crowdsec/pour.go +++ b/cmd/crowdsec/pour.go @@ -46,7 +46,7 @@ func runPour(input chan types.Event, holders []leaky.BucketFactory, buckets *lea // here we can bucketify with parsed poured, err := leaky.PourItemToHolders(parsed, holders, buckets) if err != nil { - log.Errorf("bucketify failed for: %v", parsed) + log.Errorf("bucketify failed for: %v with %s", parsed, err) continue } diff --git a/cmd/crowdsec/serve.go b/cmd/crowdsec/serve.go index 14602c425fe..0f7a84ce5c7 100644 --- a/cmd/crowdsec/serve.go +++ b/cmd/crowdsec/serve.go @@ -85,7 +85,7 @@ func reloadHandler(sig os.Signal) (*csconfig.Config, error) { } if !cConfig.DisableAgent { - hub, err := cwhub.NewHub(cConfig.Hub, nil, log.StandardLogger()) + hub, err := cwhub.NewHub(cConfig.Hub, log.StandardLogger()) if err != nil { return nil, err } @@ -387,7 +387,7 @@ func Serve(cConfig *csconfig.Config, agentReady chan bool) error { } if !cConfig.DisableAgent { - hub, err := cwhub.NewHub(cConfig.Hub, nil, log.StandardLogger()) + hub, err := cwhub.NewHub(cConfig.Hub, log.StandardLogger()) if err != nil { return err } @@ -419,7 +419,7 @@ func Serve(cConfig *csconfig.Config, agentReady chan bool) error { } if cConfig.Common != nil && cConfig.Common.Daemonize { - csdaemon.Notify(csdaemon.Ready, log.StandardLogger()) + _ = csdaemon.Notify(csdaemon.Ready, log.StandardLogger()) // wait for signals return HandleSignals(cConfig) } diff --git a/cmd/crowdsec/win_service.go b/cmd/crowdsec/win_service.go index 6aa363ca3a7..ae48e77447c 100644 --- a/cmd/crowdsec/win_service.go +++ b/cmd/crowdsec/win_service.go @@ -67,7 +67,7 @@ func runService(name string) error { // All the calls to logging before the logger is configured are pretty much useless, but we keep them for clarity err := eventlog.InstallAsEventCreate("CrowdSec", eventlog.Error|eventlog.Warning|eventlog.Info) if err != nil { - if errno, ok := err.(syscall.Errno); ok { + if errno, ok := err.(syscall.Errno); ok { //nolint:errorlint if errno == windows.ERROR_ACCESS_DENIED { log.Warnf("Access denied when installing event source, running as non-admin ?") } else { diff --git a/cmd/notification-email/main.go b/cmd/notification-email/main.go index 5fc02cdd1d7..b61644611b4 100644 --- a/cmd/notification-email/main.go +++ b/cmd/notification-email/main.go @@ -68,7 +68,7 @@ func (n *EmailPlugin) Configure(ctx context.Context, config *protobufs.Config) ( EncryptionType: "ssltls", AuthType: "login", SenderEmail: "crowdsec@crowdsec.local", - HeloHost: "localhost", + HeloHost: "localhost", } if err := yaml.Unmarshal(config.Config, &d); err != nil { diff --git a/debian/install b/debian/install index fa422cac8d9..2d4cc6e1a7f 100644 --- a/debian/install +++ b/debian/install @@ -3,7 +3,6 @@ config/profiles.yaml etc/crowdsec/ config/simulation.yaml etc/crowdsec/ config/patterns/* etc/crowdsec/patterns -config/crowdsec.service lib/systemd/system # Referenced configs: cmd/notification-slack/slack.yaml etc/crowdsec/notifications/ diff --git a/debian/postinst b/debian/postinst index 77f2511f556..b73619b9e6f 100644 --- a/debian/postinst +++ b/debian/postinst @@ -11,14 +11,6 @@ if [ "$1" = configure ]; then mkdir -p /var/lib/crowdsec/data fi - if [[ -d /var/lib/crowdsec/backup ]]; then - cscli config restore /var/lib/crowdsec/backup/backup.config - rm -rf /var/lib/crowdsec/backup - /usr/bin/cscli hub update - /usr/bin/cscli hub upgrade - systemctl start crowdsec - fi - . /usr/share/crowdsec/wizard.sh -n if ! [[ -f /etc/crowdsec/acquis.yaml ]]; then echo Creating /etc/crowdsec/acquis.yaml @@ -76,18 +68,14 @@ if [ "$1" = configure ]; then echo Updating hub /usr/bin/cscli hub update + /usr/bin/cscli hub upgrade + if [ "$COLLECTIONS" = true ]; then set +e CSCLI_BIN_INSTALLED="/usr/bin/cscli" SILENT=true install_collection set -e fi - - if [[ -f /var/lib/crowdsec/data/crowdsec.db.backup ]]; then - cp /var/lib/crowdsec/data/crowdsec.db.backup /var/lib/crowdsec/data/crowdsec.db - rm -f /var/lib/crowdsec/data/crowdsec.db.backup - fi - systemctl --quiet is-enabled crowdsec || systemctl unmask crowdsec && systemctl enable crowdsec API=$(cscli config show --key "Config.API.Server") @@ -103,12 +91,18 @@ if [ "$1" = configure ]; then echo "This port is configured through /etc/crowdsec/config.yaml and /etc/crowdsec/local_api_credentials.yaml" fi - echo "Get started with CrowdSec:" - echo " * Detailed guides are available in our documentation: https://docs.crowdsec.net" - echo " * Configuration items created by the community can be found at the Hub: https://hub.crowdsec.net" - echo " * Gain insights into your use of CrowdSec with the help of the console https://app.crowdsec.net" - - + GREEN='\033[0;32m' + BOLD='\033[1m' + RESET='\033[0m' + + echo -e "${BOLD}Get started with CrowdSec:${RESET}" + echo -e " * Go further by following our ${BOLD}post installation steps${RESET} : ${GREEN}${BOLD}https://docs.crowdsec.net/u/getting_started/next_steps${RESET}" + echo -e "====================================================================================================================" + echo -e " * Install a ${BOLD}remediation component${RESET} to block attackers: ${GREEN}${BOLD}https://docs.crowdsec.net/u/bouncers/intro${RESET}" + echo -e "====================================================================================================================" + echo -e " * Find more ${BOLD}collections${RESET}, ${BOLD}parsers${RESET} and ${BOLD}scenarios${RESET} created by the community with the Hub: ${GREEN}${BOLD}https://hub.crowdsec.net${RESET}" + echo -e "====================================================================================================================" + echo -e " * Subscribe to ${BOLD}additional blocklists${RESET}, ${BOLD}visualize${RESET} your alerts and more with the console: ${GREEN}${BOLD}https://app.crowdsec.net${RESET}" fi echo "You can always run the configuration again interactively by using '/usr/share/crowdsec/wizard.sh -c'" diff --git a/debian/preinst b/debian/preinst deleted file mode 100644 index 217b836caa6..00000000000 --- a/debian/preinst +++ /dev/null @@ -1,43 +0,0 @@ -#!/bin/bash - -set -e - -# Source debconf library. -. /usr/share/debconf/confmodule - - -OLD_MAJOR_VERSION=$(echo $2 | cut -d'.' -f1) -OLD_MINOR_VERSION=$(echo $2 | cut -d'.' -f2) -OLD_PATCH_VERSION=$(echo $2 | cut -d'.' -f3|cut -d'-' -f1) - -NEW_MAJOR_VERSION=$(echo $3 | cut -d'.' -f1) -NEW_MINOR_VERSION=$(echo $3 | cut -d'.' -f2) -NEW_PATCH_VERSION=$(echo $3 | cut -d'.' -f3|cut -d'-' -f1) - - - -if [ "$1" = upgrade ]; then - - OLD_MAJOR_VERSION=$(echo $2 | cut -d'.' -f1) - OLD_MINOR_VERSION=$(echo $2 | cut -d'.' -f2) - OLD_PATCH_VERSION=$(echo $2 | cut -d'.' -f3|cut -d'-' -f1) - - NEW_MAJOR_VERSION=$(echo $3 | cut -d'.' -f1) - NEW_MINOR_VERSION=$(echo $3 | cut -d'.' -f2) - NEW_PATCH_VERSION=$(echo $3 | cut -d'.' -f3|cut -d'-' -f1) - - - if [[ $OLD_MAJOR_VERSION -eq "1" ]] && [[ $OLD_MINOR_VERSION -eq "0" ]] && [[ $OLD_PATCH_VERSION -lt "9" ]]; then - if [[ -f /var/lib/crowdsec/data/crowdsec.db ]]; then - cp /var/lib/crowdsec/data/crowdsec.db /var/lib/crowdsec/data/crowdsec.db.backup - fi - fi - - if [[ $NEW_MAJOR_VERSION -gt $OLD_MAJOR_VERSION ]]; then - echo "Stopping crowdsec" - systemctl stop crowdsec || true - cscli config backup /var/lib/crowdsec/backup - fi -fi - -echo "You can always run the configuration again interactively by using '/usr/share/crowdsec/wizard.sh -c'" diff --git a/debian/prerm b/debian/prerm index a463a6a1c80..10afcf1906d 100644 --- a/debian/prerm +++ b/debian/prerm @@ -1,9 +1,8 @@ if [ "$1" = "remove" ]; then - cscli dashboard remove -f -y --error || echo "Ignore the above error if you never installed the local dashboard." systemctl stop crowdsec systemctl disable crowdsec fi if [ "$1" = "upgrade" ]; then systemctl stop crowdsec -fi \ No newline at end of file +fi diff --git a/debian/rules b/debian/rules index 5b8d6fc51f8..ec80caff985 100755 --- a/debian/rules +++ b/debian/rules @@ -1,6 +1,6 @@ #!/usr/bin/make -f -export DEB_VERSION=$(shell dpkg-parsechangelog | grep -E '^Version:' | cut -f 2 -d ' ') +export DEB_VERSION=$(shell dpkg-parsechangelog -SVersion) export BUILD_VERSION=v${DEB_VERSION}-debian-pragmatic export GO111MODULE=on diff --git a/docker/test/.python-version b/docker/test/.python-version new file mode 100644 index 00000000000..e4fba218358 --- /dev/null +++ b/docker/test/.python-version @@ -0,0 +1 @@ +3.12 diff --git a/docker/test/Pipfile b/docker/test/Pipfile deleted file mode 100644 index c57ccb628e8..00000000000 --- a/docker/test/Pipfile +++ /dev/null @@ -1,11 +0,0 @@ -[packages] -pytest-dotenv = "0.5.2" -pytest-xdist = "3.5.0" -pytest-cs = {ref = "0.7.19", git = "https://github.com/crowdsecurity/pytest-cs.git"} - -[dev-packages] -gnureadline = "8.1.2" -ipdb = "0.13.13" - -[requires] -python_version = "*" diff --git a/docker/test/Pipfile.lock b/docker/test/Pipfile.lock deleted file mode 100644 index 99184d9f2a2..00000000000 --- a/docker/test/Pipfile.lock +++ /dev/null @@ -1,604 +0,0 @@ -{ - "_meta": { - "hash": { - "sha256": "b5d25a7199d15a900b285be1af97cf7b7083c6637d631ad777b454471c8319fe" - }, - "pipfile-spec": 6, - "requires": { - "python_version": "*" - }, - "sources": [ - { - "name": "pypi", - "url": "https://pypi.org/simple", - "verify_ssl": true - } - ] - }, - "default": { - "certifi": { - "hashes": [ - "sha256:922820b53db7a7257ffbda3f597266d435245903d80737e34f8a45ff3e3230d8", - "sha256:bec941d2aa8195e248a60b31ff9f0558284cf01a52591ceda73ea9afffd69fd9" - ], - "markers": "python_version >= '3.6'", - "version": "==2024.8.30" - }, - "cffi": { - "hashes": [ - "sha256:045d61c734659cc045141be4bae381a41d89b741f795af1dd018bfb532fd0df8", - "sha256:0984a4925a435b1da406122d4d7968dd861c1385afe3b45ba82b750f229811e2", - "sha256:0e2b1fac190ae3ebfe37b979cc1ce69c81f4e4fe5746bb401dca63a9062cdaf1", - "sha256:0f048dcf80db46f0098ccac01132761580d28e28bc0f78ae0d58048063317e15", - "sha256:1257bdabf294dceb59f5e70c64a3e2f462c30c7ad68092d01bbbfb1c16b1ba36", - "sha256:1c39c6016c32bc48dd54561950ebd6836e1670f2ae46128f67cf49e789c52824", - "sha256:1d599671f396c4723d016dbddb72fe8e0397082b0a77a4fab8028923bec050e8", - "sha256:28b16024becceed8c6dfbc75629e27788d8a3f9030691a1dbf9821a128b22c36", - "sha256:2bb1a08b8008b281856e5971307cc386a8e9c5b625ac297e853d36da6efe9c17", - "sha256:30c5e0cb5ae493c04c8b42916e52ca38079f1b235c2f8ae5f4527b963c401caf", - "sha256:31000ec67d4221a71bd3f67df918b1f88f676f1c3b535a7eb473255fdc0b83fc", - "sha256:386c8bf53c502fff58903061338ce4f4950cbdcb23e2902d86c0f722b786bbe3", - "sha256:3edc8d958eb099c634dace3c7e16560ae474aa3803a5df240542b305d14e14ed", - "sha256:45398b671ac6d70e67da8e4224a065cec6a93541bb7aebe1b198a61b58c7b702", - "sha256:46bf43160c1a35f7ec506d254e5c890f3c03648a4dbac12d624e4490a7046cd1", - "sha256:4ceb10419a9adf4460ea14cfd6bc43d08701f0835e979bf821052f1805850fe8", - "sha256:51392eae71afec0d0c8fb1a53b204dbb3bcabcb3c9b807eedf3e1e6ccf2de903", - "sha256:5da5719280082ac6bd9aa7becb3938dc9f9cbd57fac7d2871717b1feb0902ab6", - "sha256:610faea79c43e44c71e1ec53a554553fa22321b65fae24889706c0a84d4ad86d", - "sha256:636062ea65bd0195bc012fea9321aca499c0504409f413dc88af450b57ffd03b", - "sha256:6883e737d7d9e4899a8a695e00ec36bd4e5e4f18fabe0aca0efe0a4b44cdb13e", - "sha256:6b8b4a92e1c65048ff98cfe1f735ef8f1ceb72e3d5f0c25fdb12087a23da22be", - "sha256:6f17be4345073b0a7b8ea599688f692ac3ef23ce28e5df79c04de519dbc4912c", - "sha256:706510fe141c86a69c8ddc029c7910003a17353970cff3b904ff0686a5927683", - "sha256:72e72408cad3d5419375fc87d289076ee319835bdfa2caad331e377589aebba9", - "sha256:733e99bc2df47476e3848417c5a4540522f234dfd4ef3ab7fafdf555b082ec0c", - "sha256:7596d6620d3fa590f677e9ee430df2958d2d6d6de2feeae5b20e82c00b76fbf8", - "sha256:78122be759c3f8a014ce010908ae03364d00a1f81ab5c7f4a7a5120607ea56e1", - "sha256:805b4371bf7197c329fcb3ead37e710d1bca9da5d583f5073b799d5c5bd1eee4", - "sha256:85a950a4ac9c359340d5963966e3e0a94a676bd6245a4b55bc43949eee26a655", - "sha256:8f2cdc858323644ab277e9bb925ad72ae0e67f69e804f4898c070998d50b1a67", - "sha256:9755e4345d1ec879e3849e62222a18c7174d65a6a92d5b346b1863912168b595", - "sha256:98e3969bcff97cae1b2def8ba499ea3d6f31ddfdb7635374834cf89a1a08ecf0", - "sha256:a08d7e755f8ed21095a310a693525137cfe756ce62d066e53f502a83dc550f65", - "sha256:a1ed2dd2972641495a3ec98445e09766f077aee98a1c896dcb4ad0d303628e41", - "sha256:a24ed04c8ffd54b0729c07cee15a81d964e6fee0e3d4d342a27b020d22959dc6", - "sha256:a45e3c6913c5b87b3ff120dcdc03f6131fa0065027d0ed7ee6190736a74cd401", - "sha256:a9b15d491f3ad5d692e11f6b71f7857e7835eb677955c00cc0aefcd0669adaf6", - "sha256:ad9413ccdeda48c5afdae7e4fa2192157e991ff761e7ab8fdd8926f40b160cc3", - "sha256:b2ab587605f4ba0bf81dc0cb08a41bd1c0a5906bd59243d56bad7668a6fc6c16", - "sha256:b62ce867176a75d03a665bad002af8e6d54644fad99a3c70905c543130e39d93", - "sha256:c03e868a0b3bc35839ba98e74211ed2b05d2119be4e8a0f224fba9384f1fe02e", - "sha256:c59d6e989d07460165cc5ad3c61f9fd8f1b4796eacbd81cee78957842b834af4", - "sha256:c7eac2ef9b63c79431bc4b25f1cd649d7f061a28808cbc6c47b534bd789ef964", - "sha256:c9c3d058ebabb74db66e431095118094d06abf53284d9c81f27300d0e0d8bc7c", - "sha256:ca74b8dbe6e8e8263c0ffd60277de77dcee6c837a3d0881d8c1ead7268c9e576", - "sha256:caaf0640ef5f5517f49bc275eca1406b0ffa6aa184892812030f04c2abf589a0", - "sha256:cdf5ce3acdfd1661132f2a9c19cac174758dc2352bfe37d98aa7512c6b7178b3", - "sha256:d016c76bdd850f3c626af19b0542c9677ba156e4ee4fccfdd7848803533ef662", - "sha256:d01b12eeeb4427d3110de311e1774046ad344f5b1a7403101878976ecd7a10f3", - "sha256:d63afe322132c194cf832bfec0dc69a99fb9bb6bbd550f161a49e9e855cc78ff", - "sha256:da95af8214998d77a98cc14e3a3bd00aa191526343078b530ceb0bd710fb48a5", - "sha256:dd398dbc6773384a17fe0d3e7eeb8d1a21c2200473ee6806bb5e6a8e62bb73dd", - "sha256:de2ea4b5833625383e464549fec1bc395c1bdeeb5f25c4a3a82b5a8c756ec22f", - "sha256:de55b766c7aa2e2a3092c51e0483d700341182f08e67c63630d5b6f200bb28e5", - "sha256:df8b1c11f177bc2313ec4b2d46baec87a5f3e71fc8b45dab2ee7cae86d9aba14", - "sha256:e03eab0a8677fa80d646b5ddece1cbeaf556c313dcfac435ba11f107ba117b5d", - "sha256:e221cf152cff04059d011ee126477f0d9588303eb57e88923578ace7baad17f9", - "sha256:e31ae45bc2e29f6b2abd0de1cc3b9d5205aa847cafaecb8af1476a609a2f6eb7", - "sha256:edae79245293e15384b51f88b00613ba9f7198016a5948b5dddf4917d4d26382", - "sha256:f1e22e8c4419538cb197e4dd60acc919d7696e5ef98ee4da4e01d3f8cfa4cc5a", - "sha256:f3a2b4222ce6b60e2e8b337bb9596923045681d71e5a082783484d845390938e", - "sha256:f6a16c31041f09ead72d69f583767292f750d24913dadacf5756b966aacb3f1a", - "sha256:f75c7ab1f9e4aca5414ed4d8e5c0e303a34f4421f8a0d47a4d019ceff0ab6af4", - "sha256:f79fc4fc25f1c8698ff97788206bb3c2598949bfe0fef03d299eb1b5356ada99", - "sha256:f7f5baafcc48261359e14bcd6d9bff6d4b28d9103847c9e136694cb0501aef87", - "sha256:fc48c783f9c87e60831201f2cce7f3b2e4846bf4d8728eabe54d60700b318a0b" - ], - "markers": "platform_python_implementation != 'PyPy'", - "version": "==1.17.1" - }, - "charset-normalizer": { - "hashes": [ - "sha256:06435b539f889b1f6f4ac1758871aae42dc3a8c0e24ac9e60c2384973ad73027", - "sha256:06a81e93cd441c56a9b65d8e1d043daeb97a3d0856d177d5c90ba85acb3db087", - "sha256:0a55554a2fa0d408816b3b5cedf0045f4b8e1a6065aec45849de2d6f3f8e9786", - "sha256:0b2b64d2bb6d3fb9112bafa732def486049e63de9618b5843bcdd081d8144cd8", - "sha256:10955842570876604d404661fbccbc9c7e684caf432c09c715ec38fbae45ae09", - "sha256:122c7fa62b130ed55f8f285bfd56d5f4b4a5b503609d181f9ad85e55c89f4185", - "sha256:1ceae2f17a9c33cb48e3263960dc5fc8005351ee19db217e9b1bb15d28c02574", - "sha256:1d3193f4a680c64b4b6a9115943538edb896edc190f0b222e73761716519268e", - "sha256:1f79682fbe303db92bc2b1136016a38a42e835d932bab5b3b1bfcfbf0640e519", - "sha256:2127566c664442652f024c837091890cb1942c30937add288223dc895793f898", - "sha256:22afcb9f253dac0696b5a4be4a1c0f8762f8239e21b99680099abd9b2b1b2269", - "sha256:25baf083bf6f6b341f4121c2f3c548875ee6f5339300e08be3f2b2ba1721cdd3", - "sha256:2e81c7b9c8979ce92ed306c249d46894776a909505d8f5a4ba55b14206e3222f", - "sha256:3287761bc4ee9e33561a7e058c72ac0938c4f57fe49a09eae428fd88aafe7bb6", - "sha256:34d1c8da1e78d2e001f363791c98a272bb734000fcef47a491c1e3b0505657a8", - "sha256:37e55c8e51c236f95b033f6fb391d7d7970ba5fe7ff453dad675e88cf303377a", - "sha256:3d47fa203a7bd9c5b6cee4736ee84ca03b8ef23193c0d1ca99b5089f72645c73", - "sha256:3e4d1f6587322d2788836a99c69062fbb091331ec940e02d12d179c1d53e25fc", - "sha256:42cb296636fcc8b0644486d15c12376cb9fa75443e00fb25de0b8602e64c1714", - "sha256:45485e01ff4d3630ec0d9617310448a8702f70e9c01906b0d0118bdf9d124cf2", - "sha256:4a78b2b446bd7c934f5dcedc588903fb2f5eec172f3d29e52a9096a43722adfc", - "sha256:4ab2fe47fae9e0f9dee8c04187ce5d09f48eabe611be8259444906793ab7cbce", - "sha256:4d0d1650369165a14e14e1e47b372cfcb31d6ab44e6e33cb2d4e57265290044d", - "sha256:549a3a73da901d5bc3ce8d24e0600d1fa85524c10287f6004fbab87672bf3e1e", - "sha256:55086ee1064215781fff39a1af09518bc9255b50d6333f2e4c74ca09fac6a8f6", - "sha256:572c3763a264ba47b3cf708a44ce965d98555f618ca42c926a9c1616d8f34269", - "sha256:573f6eac48f4769d667c4442081b1794f52919e7edada77495aaed9236d13a96", - "sha256:5b4c145409bef602a690e7cfad0a15a55c13320ff7a3ad7ca59c13bb8ba4d45d", - "sha256:6463effa3186ea09411d50efc7d85360b38d5f09b870c48e4600f63af490e56a", - "sha256:65f6f63034100ead094b8744b3b97965785388f308a64cf8d7c34f2f2e5be0c4", - "sha256:663946639d296df6a2bb2aa51b60a2454ca1cb29835324c640dafb5ff2131a77", - "sha256:6897af51655e3691ff853668779c7bad41579facacf5fd7253b0133308cf000d", - "sha256:68d1f8a9e9e37c1223b656399be5d6b448dea850bed7d0f87a8311f1ff3dabb0", - "sha256:6ac7ffc7ad6d040517be39eb591cac5ff87416c2537df6ba3cba3bae290c0fed", - "sha256:6b3251890fff30ee142c44144871185dbe13b11bab478a88887a639655be1068", - "sha256:6c4caeef8fa63d06bd437cd4bdcf3ffefe6738fb1b25951440d80dc7df8c03ac", - "sha256:6ef1d82a3af9d3eecdba2321dc1b3c238245d890843e040e41e470ffa64c3e25", - "sha256:753f10e867343b4511128c6ed8c82f7bec3bd026875576dfd88483c5c73b2fd8", - "sha256:7cd13a2e3ddeed6913a65e66e94b51d80a041145a026c27e6bb76c31a853c6ab", - "sha256:7ed9e526742851e8d5cc9e6cf41427dfc6068d4f5a3bb03659444b4cabf6bc26", - "sha256:7f04c839ed0b6b98b1a7501a002144b76c18fb1c1850c8b98d458ac269e26ed2", - "sha256:802fe99cca7457642125a8a88a084cef28ff0cf9407060f7b93dca5aa25480db", - "sha256:80402cd6ee291dcb72644d6eac93785fe2c8b9cb30893c1af5b8fdd753b9d40f", - "sha256:8465322196c8b4d7ab6d1e049e4c5cb460d0394da4a27d23cc242fbf0034b6b5", - "sha256:86216b5cee4b06df986d214f664305142d9c76df9b6512be2738aa72a2048f99", - "sha256:87d1351268731db79e0f8e745d92493ee2841c974128ef629dc518b937d9194c", - "sha256:8bdb58ff7ba23002a4c5808d608e4e6c687175724f54a5dade5fa8c67b604e4d", - "sha256:8c622a5fe39a48f78944a87d4fb8a53ee07344641b0562c540d840748571b811", - "sha256:8d756e44e94489e49571086ef83b2bb8ce311e730092d2c34ca8f7d925cb20aa", - "sha256:8f4a014bc36d3c57402e2977dada34f9c12300af536839dc38c0beab8878f38a", - "sha256:9063e24fdb1e498ab71cb7419e24622516c4a04476b17a2dab57e8baa30d6e03", - "sha256:90d558489962fd4918143277a773316e56c72da56ec7aa3dc3dbbe20fdfed15b", - "sha256:923c0c831b7cfcb071580d3f46c4baf50f174be571576556269530f4bbd79d04", - "sha256:95f2a5796329323b8f0512e09dbb7a1860c46a39da62ecb2324f116fa8fdc85c", - "sha256:96b02a3dc4381e5494fad39be677abcb5e6634bf7b4fa83a6dd3112607547001", - "sha256:9f96df6923e21816da7e0ad3fd47dd8f94b2a5ce594e00677c0013018b813458", - "sha256:a10af20b82360ab00827f916a6058451b723b4e65030c5a18577c8b2de5b3389", - "sha256:a50aebfa173e157099939b17f18600f72f84eed3049e743b68ad15bd69b6bf99", - "sha256:a981a536974bbc7a512cf44ed14938cf01030a99e9b3a06dd59578882f06f985", - "sha256:a9a8e9031d613fd2009c182b69c7b2c1ef8239a0efb1df3f7c8da66d5dd3d537", - "sha256:ae5f4161f18c61806f411a13b0310bea87f987c7d2ecdbdaad0e94eb2e404238", - "sha256:aed38f6e4fb3f5d6bf81bfa990a07806be9d83cf7bacef998ab1a9bd660a581f", - "sha256:b01b88d45a6fcb69667cd6d2f7a9aeb4bf53760d7fc536bf679ec94fe9f3ff3d", - "sha256:b261ccdec7821281dade748d088bb6e9b69e6d15b30652b74cbbac25e280b796", - "sha256:b2b0a0c0517616b6869869f8c581d4eb2dd83a4d79e0ebcb7d373ef9956aeb0a", - "sha256:b4a23f61ce87adf89be746c8a8974fe1c823c891d8f86eb218bb957c924bb143", - "sha256:bd8f7df7d12c2db9fab40bdd87a7c09b1530128315d047a086fa3ae3435cb3a8", - "sha256:beb58fe5cdb101e3a055192ac291b7a21e3b7ef4f67fa1d74e331a7f2124341c", - "sha256:c002b4ffc0be611f0d9da932eb0f704fe2602a9a949d1f738e4c34c75b0863d5", - "sha256:c083af607d2515612056a31f0a8d9e0fcb5876b7bfc0abad3ecd275bc4ebc2d5", - "sha256:c180f51afb394e165eafe4ac2936a14bee3eb10debc9d9e4db8958fe36afe711", - "sha256:c235ebd9baae02f1b77bcea61bce332cb4331dc3617d254df3323aa01ab47bd4", - "sha256:cd70574b12bb8a4d2aaa0094515df2463cb429d8536cfb6c7ce983246983e5a6", - "sha256:d0eccceffcb53201b5bfebb52600a5fb483a20b61da9dbc885f8b103cbe7598c", - "sha256:d965bba47ddeec8cd560687584e88cf699fd28f192ceb452d1d7ee807c5597b7", - "sha256:db364eca23f876da6f9e16c9da0df51aa4f104a972735574842618b8c6d999d4", - "sha256:ddbb2551d7e0102e7252db79ba445cdab71b26640817ab1e3e3648dad515003b", - "sha256:deb6be0ac38ece9ba87dea880e438f25ca3eddfac8b002a2ec3d9183a454e8ae", - "sha256:e06ed3eb3218bc64786f7db41917d4e686cc4856944f53d5bdf83a6884432e12", - "sha256:e27ad930a842b4c5eb8ac0016b0a54f5aebbe679340c26101df33424142c143c", - "sha256:e537484df0d8f426ce2afb2d0f8e1c3d0b114b83f8850e5f2fbea0e797bd82ae", - "sha256:eb00ed941194665c332bf8e078baf037d6c35d7c4f3102ea2d4f16ca94a26dc8", - "sha256:eb6904c354526e758fda7167b33005998fb68c46fbc10e013ca97f21ca5c8887", - "sha256:eb8821e09e916165e160797a6c17edda0679379a4be5c716c260e836e122f54b", - "sha256:efcb3f6676480691518c177e3b465bcddf57cea040302f9f4e6e191af91174d4", - "sha256:f27273b60488abe721a075bcca6d7f3964f9f6f067c8c4c605743023d7d3944f", - "sha256:f30c3cb33b24454a82faecaf01b19c18562b1e89558fb6c56de4d9118a032fd5", - "sha256:fb69256e180cb6c8a894fee62b3afebae785babc1ee98b81cdf68bbca1987f33", - "sha256:fd1abc0d89e30cc4e02e4064dc67fcc51bd941eb395c502aac3ec19fab46b519", - "sha256:ff8fa367d09b717b2a17a052544193ad76cd49979c805768879cb63d9ca50561" - ], - "markers": "python_full_version >= '3.7.0'", - "version": "==3.3.2" - }, - "cryptography": { - "hashes": [ - "sha256:014f58110f53237ace6a408b5beb6c427b64e084eb451ef25a28308270086494", - "sha256:1bbcce1a551e262dfbafb6e6252f1ae36a248e615ca44ba302df077a846a8806", - "sha256:203e92a75716d8cfb491dc47c79e17d0d9207ccffcbcb35f598fbe463ae3444d", - "sha256:27e613d7077ac613e399270253259d9d53872aaf657471473ebfc9a52935c062", - "sha256:2bd51274dcd59f09dd952afb696bf9c61a7a49dfc764c04dd33ef7a6b502a1e2", - "sha256:38926c50cff6f533f8a2dae3d7f19541432610d114a70808f0926d5aaa7121e4", - "sha256:511f4273808ab590912a93ddb4e3914dfd8a388fed883361b02dea3791f292e1", - "sha256:58d4e9129985185a06d849aa6df265bdd5a74ca6e1b736a77959b498e0505b85", - "sha256:5b43d1ea6b378b54a1dc99dd8a2b5be47658fe9a7ce0a58ff0b55f4b43ef2b84", - "sha256:61ec41068b7b74268fa86e3e9e12b9f0c21fcf65434571dbb13d954bceb08042", - "sha256:666ae11966643886c2987b3b721899d250855718d6d9ce41b521252a17985f4d", - "sha256:68aaecc4178e90719e95298515979814bda0cbada1256a4485414860bd7ab962", - "sha256:7c05650fe8023c5ed0d46793d4b7d7e6cd9c04e68eabe5b0aeea836e37bdcec2", - "sha256:80eda8b3e173f0f247f711eef62be51b599b5d425c429b5d4ca6a05e9e856baa", - "sha256:8385d98f6a3bf8bb2d65a73e17ed87a3ba84f6991c155691c51112075f9ffc5d", - "sha256:88cce104c36870d70c49c7c8fd22885875d950d9ee6ab54df2745f83ba0dc365", - "sha256:9d3cdb25fa98afdd3d0892d132b8d7139e2c087da1712041f6b762e4f807cc96", - "sha256:a575913fb06e05e6b4b814d7f7468c2c660e8bb16d8d5a1faf9b33ccc569dd47", - "sha256:ac119bb76b9faa00f48128b7f5679e1d8d437365c5d26f1c2c3f0da4ce1b553d", - "sha256:c1332724be35d23a854994ff0b66530119500b6053d0bd3363265f7e5e77288d", - "sha256:d03a475165f3134f773d1388aeb19c2d25ba88b6a9733c5c590b9ff7bbfa2e0c", - "sha256:d75601ad10b059ec832e78823b348bfa1a59f6b8d545db3a24fd44362a1564cb", - "sha256:de41fd81a41e53267cb020bb3a7212861da53a7d39f863585d13ea11049cf277", - "sha256:e710bf40870f4db63c3d7d929aa9e09e4e7ee219e703f949ec4073b4294f6172", - "sha256:ea25acb556320250756e53f9e20a4177515f012c9eaea17eb7587a8c4d8ae034", - "sha256:f98bf604c82c416bc829e490c700ca1553eafdf2912a91e23a79d97d9801372a", - "sha256:fba1007b3ef89946dbbb515aeeb41e30203b004f0b4b00e5e16078b518563289" - ], - "markers": "python_version >= '3.7'", - "version": "==43.0.1" - }, - "docker": { - "hashes": [ - "sha256:ad8c70e6e3f8926cb8a92619b832b4ea5299e2831c14284663184e200546fa6c", - "sha256:c96b93b7f0a746f9e77d325bcfb87422a3d8bd4f03136ae8a85b37f1898d5fc0" - ], - "markers": "python_version >= '3.8'", - "version": "==7.1.0" - }, - "execnet": { - "hashes": [ - "sha256:26dee51f1b80cebd6d0ca8e74dd8745419761d3bef34163928cbebbdc4749fdc", - "sha256:5189b52c6121c24feae288166ab41b32549c7e2348652736540b9e6e7d4e72e3" - ], - "markers": "python_version >= '3.8'", - "version": "==2.1.1" - }, - "idna": { - "hashes": [ - "sha256:12f65c9b470abda6dc35cf8e63cc574b1c52b11df2c86030af0ac09b01b13ea9", - "sha256:946d195a0d259cbba61165e88e65941f16e9b36ea6ddb97f00452bae8b1287d3" - ], - "markers": "python_version >= '3.6'", - "version": "==3.10" - }, - "iniconfig": { - "hashes": [ - "sha256:2d91e135bf72d31a410b17c16da610a82cb55f6b0477d1a902134b24a455b8b3", - "sha256:b6a85871a79d2e3b22d2d1b94ac2824226a63c6b741c88f7ae975f18b6778374" - ], - "markers": "python_version >= '3.7'", - "version": "==2.0.0" - }, - "packaging": { - "hashes": [ - "sha256:026ed72c8ed3fcce5bf8950572258698927fd1dbda10a5e981cdf0ac37f4f002", - "sha256:5b8f2217dbdbd2f7f384c41c628544e6d52f2d0f53c6d0c3ea61aa5d1d7ff124" - ], - "markers": "python_version >= '3.8'", - "version": "==24.1" - }, - "pluggy": { - "hashes": [ - "sha256:2cffa88e94fdc978c4c574f15f9e59b7f4201d439195c3715ca9e2486f1d0cf1", - "sha256:44e1ad92c8ca002de6377e165f3e0f1be63266ab4d554740532335b9d75ea669" - ], - "markers": "python_version >= '3.8'", - "version": "==1.5.0" - }, - "psutil": { - "hashes": [ - "sha256:02b69001f44cc73c1c5279d02b30a817e339ceb258ad75997325e0e6169d8b35", - "sha256:1287c2b95f1c0a364d23bc6f2ea2365a8d4d9b726a3be7294296ff7ba97c17f0", - "sha256:1e7c870afcb7d91fdea2b37c24aeb08f98b6d67257a5cb0a8bc3ac68d0f1a68c", - "sha256:21f1fb635deccd510f69f485b87433460a603919b45e2a324ad65b0cc74f8fb1", - "sha256:33ea5e1c975250a720b3a6609c490db40dae5d83a4eb315170c4fe0d8b1f34b3", - "sha256:34859b8d8f423b86e4385ff3665d3f4d94be3cdf48221fbe476e883514fdb71c", - "sha256:5fd9a97c8e94059b0ef54a7d4baf13b405011176c3b6ff257c247cae0d560ecd", - "sha256:6ec7588fb3ddaec7344a825afe298db83fe01bfaaab39155fa84cf1c0d6b13c3", - "sha256:6ed2440ada7ef7d0d608f20ad89a04ec47d2d3ab7190896cd62ca5fc4fe08bf0", - "sha256:8faae4f310b6d969fa26ca0545338b21f73c6b15db7c4a8d934a5482faa818f2", - "sha256:a021da3e881cd935e64a3d0a20983bda0bb4cf80e4f74fa9bfcb1bc5785360c6", - "sha256:a495580d6bae27291324fe60cea0b5a7c23fa36a7cd35035a16d93bdcf076b9d", - "sha256:a9a3dbfb4de4f18174528d87cc352d1f788b7496991cca33c6996f40c9e3c92c", - "sha256:c588a7e9b1173b6e866756dde596fd4cad94f9399daf99ad8c3258b3cb2b47a0", - "sha256:e2e8d0054fc88153ca0544f5c4d554d42e33df2e009c4ff42284ac9ebdef4132", - "sha256:fc8c9510cde0146432bbdb433322861ee8c3efbf8589865c8bf8d21cb30c4d14", - "sha256:ffe7fc9b6b36beadc8c322f84e1caff51e8703b88eee1da46d1e3a6ae11b4fd0" - ], - "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3, 3.4, 3.5'", - "version": "==6.0.0" - }, - "pycparser": { - "hashes": [ - "sha256:491c8be9c040f5390f5bf44a5b07752bd07f56edf992381b05c701439eec10f6", - "sha256:c3702b6d3dd8c7abc1afa565d7e63d53a1d0bd86cdc24edd75470f4de499cfcc" - ], - "markers": "python_version >= '3.8'", - "version": "==2.22" - }, - "pytest": { - "hashes": [ - "sha256:70b98107bd648308a7952b06e6ca9a50bc660be218d53c257cc1fc94fda10181", - "sha256:a6853c7375b2663155079443d2e45de913a911a11d669df02a50814944db57b2" - ], - "markers": "python_version >= '3.8'", - "version": "==8.3.3" - }, - "pytest-cs": { - "git": "https://github.com/crowdsecurity/pytest-cs.git", - "ref": "aea7e8549faa32f5e1d1f17755a5db3712396a2a" - }, - "pytest-datadir": { - "hashes": [ - "sha256:1617ed92f9afda0c877e4eac91904b5f779d24ba8f5e438752e3ae39d8d2ee3f", - "sha256:34adf361bcc7b37961bbc1dfa8d25a4829e778bab461703c38a5c50ca9c36dc8" - ], - "markers": "python_version >= '3.8'", - "version": "==1.5.0" - }, - "pytest-dotenv": { - "hashes": [ - "sha256:2dc6c3ac6d8764c71c6d2804e902d0ff810fa19692e95fe138aefc9b1aa73732", - "sha256:40a2cece120a213898afaa5407673f6bd924b1fa7eafce6bda0e8abffe2f710f" - ], - "index": "pypi", - "version": "==0.5.2" - }, - "pytest-xdist": { - "hashes": [ - "sha256:cbb36f3d67e0c478baa57fa4edc8843887e0f6cfc42d677530a36d7472b32d8a", - "sha256:d075629c7e00b611df89f490a5063944bee7a4362a5ff11c7cc7824a03dfce24" - ], - "index": "pypi", - "markers": "python_version >= '3.7'", - "version": "==3.5.0" - }, - "python-dotenv": { - "hashes": [ - "sha256:e324ee90a023d808f1959c46bcbc04446a10ced277783dc6ee09987c37ec10ca", - "sha256:f7b63ef50f1b690dddf550d03497b66d609393b40b564ed0d674909a68ebf16a" - ], - "markers": "python_version >= '3.8'", - "version": "==1.0.1" - }, - "pyyaml": { - "hashes": [ - "sha256:01179a4a8559ab5de078078f37e5c1a30d76bb88519906844fd7bdea1b7729ff", - "sha256:0833f8694549e586547b576dcfaba4a6b55b9e96098b36cdc7ebefe667dfed48", - "sha256:0a9a2848a5b7feac301353437eb7d5957887edbf81d56e903999a75a3d743086", - "sha256:0b69e4ce7a131fe56b7e4d770c67429700908fc0752af059838b1cfb41960e4e", - "sha256:0ffe8360bab4910ef1b9e87fb812d8bc0a308b0d0eef8c8f44e0254ab3b07133", - "sha256:11d8f3dd2b9c1207dcaf2ee0bbbfd5991f571186ec9cc78427ba5bd32afae4b5", - "sha256:17e311b6c678207928d649faa7cb0d7b4c26a0ba73d41e99c4fff6b6c3276484", - "sha256:1e2120ef853f59c7419231f3bf4e7021f1b936f6ebd222406c3b60212205d2ee", - "sha256:1f71ea527786de97d1a0cc0eacd1defc0985dcf6b3f17bb77dcfc8c34bec4dc5", - "sha256:23502f431948090f597378482b4812b0caae32c22213aecf3b55325e049a6c68", - "sha256:24471b829b3bf607e04e88d79542a9d48bb037c2267d7927a874e6c205ca7e9a", - "sha256:29717114e51c84ddfba879543fb232a6ed60086602313ca38cce623c1d62cfbf", - "sha256:2e99c6826ffa974fe6e27cdb5ed0021786b03fc98e5ee3c5bfe1fd5015f42b99", - "sha256:39693e1f8320ae4f43943590b49779ffb98acb81f788220ea932a6b6c51004d8", - "sha256:3ad2a3decf9aaba3d29c8f537ac4b243e36bef957511b4766cb0057d32b0be85", - "sha256:3b1fdb9dc17f5a7677423d508ab4f243a726dea51fa5e70992e59a7411c89d19", - "sha256:41e4e3953a79407c794916fa277a82531dd93aad34e29c2a514c2c0c5fe971cc", - "sha256:43fa96a3ca0d6b1812e01ced1044a003533c47f6ee8aca31724f78e93ccc089a", - "sha256:50187695423ffe49e2deacb8cd10510bc361faac997de9efef88badc3bb9e2d1", - "sha256:5ac9328ec4831237bec75defaf839f7d4564be1e6b25ac710bd1a96321cc8317", - "sha256:5d225db5a45f21e78dd9358e58a98702a0302f2659a3c6cd320564b75b86f47c", - "sha256:6395c297d42274772abc367baaa79683958044e5d3835486c16da75d2a694631", - "sha256:688ba32a1cffef67fd2e9398a2efebaea461578b0923624778664cc1c914db5d", - "sha256:68ccc6023a3400877818152ad9a1033e3db8625d899c72eacb5a668902e4d652", - "sha256:70b189594dbe54f75ab3a1acec5f1e3faa7e8cf2f1e08d9b561cb41b845f69d5", - "sha256:797b4f722ffa07cc8d62053e4cff1486fa6dc094105d13fea7b1de7d8bf71c9e", - "sha256:7c36280e6fb8385e520936c3cb3b8042851904eba0e58d277dca80a5cfed590b", - "sha256:7e7401d0de89a9a855c839bc697c079a4af81cf878373abd7dc625847d25cbd8", - "sha256:80bab7bfc629882493af4aa31a4cfa43a4c57c83813253626916b8c7ada83476", - "sha256:82d09873e40955485746739bcb8b4586983670466c23382c19cffecbf1fd8706", - "sha256:8388ee1976c416731879ac16da0aff3f63b286ffdd57cdeb95f3f2e085687563", - "sha256:8824b5a04a04a047e72eea5cec3bc266db09e35de6bdfe34c9436ac5ee27d237", - "sha256:8b9c7197f7cb2738065c481a0461e50ad02f18c78cd75775628afb4d7137fb3b", - "sha256:9056c1ecd25795207ad294bcf39f2db3d845767be0ea6e6a34d856f006006083", - "sha256:936d68689298c36b53b29f23c6dbb74de12b4ac12ca6cfe0e047bedceea56180", - "sha256:9b22676e8097e9e22e36d6b7bda33190d0d400f345f23d4065d48f4ca7ae0425", - "sha256:a4d3091415f010369ae4ed1fc6b79def9416358877534caf6a0fdd2146c87a3e", - "sha256:a8786accb172bd8afb8be14490a16625cbc387036876ab6ba70912730faf8e1f", - "sha256:a9f8c2e67970f13b16084e04f134610fd1d374bf477b17ec1599185cf611d725", - "sha256:bc2fa7c6b47d6bc618dd7fb02ef6fdedb1090ec036abab80d4681424b84c1183", - "sha256:c70c95198c015b85feafc136515252a261a84561b7b1d51e3384e0655ddf25ab", - "sha256:cc1c1159b3d456576af7a3e4d1ba7e6924cb39de8f67111c735f6fc832082774", - "sha256:ce826d6ef20b1bc864f0a68340c8b3287705cae2f8b4b1d932177dcc76721725", - "sha256:d584d9ec91ad65861cc08d42e834324ef890a082e591037abe114850ff7bbc3e", - "sha256:d7fded462629cfa4b685c5416b949ebad6cec74af5e2d42905d41e257e0869f5", - "sha256:d84a1718ee396f54f3a086ea0a66d8e552b2ab2017ef8b420e92edbc841c352d", - "sha256:d8e03406cac8513435335dbab54c0d385e4a49e4945d2909a581c83647ca0290", - "sha256:e10ce637b18caea04431ce14fabcf5c64a1c61ec9c56b071a4b7ca131ca52d44", - "sha256:ec031d5d2feb36d1d1a24380e4db6d43695f3748343d99434e6f5f9156aaa2ed", - "sha256:ef6107725bd54b262d6dedcc2af448a266975032bc85ef0172c5f059da6325b4", - "sha256:efdca5630322a10774e8e98e1af481aad470dd62c3170801852d752aa7a783ba", - "sha256:f753120cb8181e736c57ef7636e83f31b9c0d1722c516f7e86cf15b7aa57ff12", - "sha256:ff3824dc5261f50c9b0dfb3be22b4567a6f938ccce4587b38952d85fd9e9afe4" - ], - "markers": "python_version >= '3.8'", - "version": "==6.0.2" - }, - "requests": { - "hashes": [ - "sha256:55365417734eb18255590a9ff9eb97e9e1da868d4ccd6402399eaf68af20a760", - "sha256:70761cfe03c773ceb22aa2f671b4757976145175cdfca038c02654d061d6dcc6" - ], - "markers": "python_version >= '3.8'", - "version": "==2.32.3" - }, - "trustme": { - "hashes": [ - "sha256:5375ad7fb427074bec956592e0d4ee2a4cf4da68934e1ba4bcf4217126bc45e6", - "sha256:ce105b68fb9f6d7ac7a9ee6e95bb2347a22ce4d3be78ef9a6494d5ef890e1e16" - ], - "markers": "python_version >= '3.8'", - "version": "==1.1.0" - }, - "urllib3": { - "hashes": [ - "sha256:ca899ca043dcb1bafa3e262d73aa25c465bfb49e0bd9dd5d59f1d0acba2f8fac", - "sha256:e7d814a81dad81e6caf2ec9fdedb284ecc9c73076b62654547cc64ccdcae26e9" - ], - "markers": "python_version >= '3.8'", - "version": "==2.2.3" - } - }, - "develop": { - "asttokens": { - "hashes": [ - "sha256:051ed49c3dcae8913ea7cd08e46a606dba30b79993209636c4875bc1d637bc24", - "sha256:b03869718ba9a6eb027e134bfdf69f38a236d681c83c160d510768af11254ba0" - ], - "version": "==2.4.1" - }, - "decorator": { - "hashes": [ - "sha256:637996211036b6385ef91435e4fae22989472f9d571faba8927ba8253acbc330", - "sha256:b8c3f85900b9dc423225913c5aace94729fe1fa9763b38939a95226f02d37186" - ], - "markers": "python_version >= '3.11'", - "version": "==5.1.1" - }, - "executing": { - "hashes": [ - "sha256:8d63781349375b5ebccc3142f4b30350c0cd9c79f921cde38be2be4637e98eaf", - "sha256:8ea27ddd260da8150fa5a708269c4a10e76161e2496ec3e587da9e3c0fe4b9ab" - ], - "markers": "python_version >= '3.8'", - "version": "==2.1.0" - }, - "gnureadline": { - "hashes": [ - "sha256:17a651e0c49d4b44e8ccf8992edc5a544e33ed9695d3b940ef002858c2215744", - "sha256:194bafa818d0fc3d46f8d71a8811a297a493c1264d3e2d0a71b1b1ff05f8fc15", - "sha256:1e3a8aaf1d61d351c16ad2d3425caf5768603ff5d0e86ba61da9b8756bdd1b95", - "sha256:264f22e865975a3c2ac1183f431dddd8ff7de5a645b89a801c6a276d800f49f3", - "sha256:2753aa1e46b4260b38da424c6a7da7a3ddac161a0b4e6fb71c1093e9ef3d2e73", - "sha256:2816bac8be6bc0e3aa2301acac76e308137eeef1b618c9e0c95c1f89a139a4d8", - "sha256:2ce5c49ecc54e1df0193e90422806a5940f908553206689aeaa04bc959d3aa9a", - "sha256:33ea248385e0d87a3fada38c9164a5756861aa59d6ee010c8be30eeb41f41b49", - "sha256:3903cba2987d42340f1d85c38d3780e954c95e64bfe1839002c7818aa63f8ac3", - "sha256:4262a6aa356ab22ef642f43a7f94eb42a72d6f0c532edb4e8c6b933f573056d2", - "sha256:49df5a432e4ff39cee1b0632c6d0e5fb304757113e502d70b50e33d9ffa47372", - "sha256:4ad9b10409d969ba42acbf89e58352cf3043a5155c2ee677d061e292336b5479", - "sha256:5e1e2d34b0c4ad81c7b00019fafa6de2faf6969c55fa58229e26267cae34047e", - "sha256:5fde3e6417d9004381e8e9835e0a89d81d2d77eeace9364d2e3d9fb64054d449", - "sha256:72da8bac1eb24b6c8237a33d7019a3f004a3d5ba867337175ed764831d9a2c99", - "sha256:74f2538ac15ff4ef9534823abdef077bb34c7dd343e204a36d978f09e168462f", - "sha256:861936c9b362d96152af2d73ccb6f3e901e70f0e4a2e7e62f4e226e91d349edb", - "sha256:8c4690d6c89dbead0958b19263ae67ef995e6109d6bc880cb0e40720cb1ba301", - "sha256:aa29a18594277ea691f92b0c6627d594c0f3387a6685e2e42038ab3f718c794e", - "sha256:b422ff3a78e281ee2e19b0eff70efa48396284bbefa86b83438d668ea9d038a3", - "sha256:c1bcb32e3b63442570d6425055aa6d5c3b6e8b09b9c7d1f8333e70203166a5a3", - "sha256:c402bc6e107beb015ae18c3d2e11f28375f049e464423ead88b35affe80f9be0", - "sha256:c7971653083a48049abd52baa9c8c0188aee362e7b2dd236fe51ecd4e6bc9bbe", - "sha256:de3d8ea66f1b5d00ed843b8925fc07476b8c838c38e584af8639c6a976a43d08", - "sha256:deb921c2cbc14671bb81f3f33d9363a9d0720203b5d716baee32e51c399e914b", - "sha256:e84e903de1514043e6a22866a1973c2ad5f5717f78e9d54e4d6809c48fbd3d81", - "sha256:ecdc4368bd2f7ae9a22de31b024455222082cb49b98ee69ffd0a59734bf648e1" - ], - "index": "pypi", - "version": "==8.1.2" - }, - "ipdb": { - "hashes": [ - "sha256:45529994741c4ab6d2388bfa5d7b725c2cf7fe9deffabdb8a6113aa5ed449ed4", - "sha256:e3ac6018ef05126d442af680aad863006ec19d02290561ac88b8b1c0b0cfc726" - ], - "index": "pypi", - "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'", - "version": "==0.13.13" - }, - "ipython": { - "hashes": [ - "sha256:0d0d15ca1e01faeb868ef56bc7ee5a0de5bd66885735682e8a322ae289a13d1a", - "sha256:530ef1e7bb693724d3cdc37287c80b07ad9b25986c007a53aa1857272dac3f35" - ], - "markers": "python_version >= '3.11'", - "version": "==8.28.0" - }, - "jedi": { - "hashes": [ - "sha256:cf0496f3651bc65d7174ac1b7d043eff454892c708a87d1b683e57b569927ffd", - "sha256:e983c654fe5c02867aef4cdfce5a2fbb4a50adc0af145f70504238f18ef5e7e0" - ], - "markers": "python_version >= '3.6'", - "version": "==0.19.1" - }, - "matplotlib-inline": { - "hashes": [ - "sha256:8423b23ec666be3d16e16b60bdd8ac4e86e840ebd1dd11a30b9f117f2fa0ab90", - "sha256:df192d39a4ff8f21b1895d72e6a13f5fcc5099f00fa84384e0ea28c2cc0653ca" - ], - "markers": "python_version >= '3.8'", - "version": "==0.1.7" - }, - "parso": { - "hashes": [ - "sha256:a418670a20291dacd2dddc80c377c5c3791378ee1e8d12bffc35420643d43f18", - "sha256:eb3a7b58240fb99099a345571deecc0f9540ea5f4dd2fe14c2a99d6b281ab92d" - ], - "markers": "python_version >= '3.6'", - "version": "==0.8.4" - }, - "pexpect": { - "hashes": [ - "sha256:7236d1e080e4936be2dc3e326cec0af72acf9212a7e1d060210e70a47e253523", - "sha256:ee7d41123f3c9911050ea2c2dac107568dc43b2d3b0c7557a33212c398ead30f" - ], - "markers": "sys_platform != 'win32' and sys_platform != 'emscripten'", - "version": "==4.9.0" - }, - "prompt-toolkit": { - "hashes": [ - "sha256:d6623ab0477a80df74e646bdbc93621143f5caf104206aa29294d53de1a03d90", - "sha256:f49a827f90062e411f1ce1f854f2aedb3c23353244f8108b89283587397ac10e" - ], - "markers": "python_full_version >= '3.7.0'", - "version": "==3.0.48" - }, - "ptyprocess": { - "hashes": [ - "sha256:4b41f3967fce3af57cc7e94b888626c18bf37a083e3651ca8feeb66d492fef35", - "sha256:5c5d0a3b48ceee0b48485e0c26037c0acd7d29765ca3fbb5cb3831d347423220" - ], - "version": "==0.7.0" - }, - "pure-eval": { - "hashes": [ - "sha256:1db8e35b67b3d218d818ae653e27f06c3aa420901fa7b081ca98cbedc874e0d0", - "sha256:5f4e983f40564c576c7c8635ae88db5956bb2229d7e9237d03b3c0b0190eaf42" - ], - "version": "==0.2.3" - }, - "pygments": { - "hashes": [ - "sha256:786ff802f32e91311bff3889f6e9a86e81505fe99f2735bb6d60ae0c5004f199", - "sha256:b8e6aca0523f3ab76fee51799c488e38782ac06eafcf95e7ba832985c8e7b13a" - ], - "markers": "python_version >= '3.8'", - "version": "==2.18.0" - }, - "six": { - "hashes": [ - "sha256:1e61c37477a1626458e36f7b1d82aa5c9b094fa4802892072e49de9c60c4c926", - "sha256:8abb2f1d86890a2dfb989f9a77cfcfd3e47c2a354b01111771326f8aa26e0254" - ], - "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'", - "version": "==1.16.0" - }, - "stack-data": { - "hashes": [ - "sha256:836a778de4fec4dcd1dcd89ed8abff8a221f58308462e1c4aa2a3cf30148f0b9", - "sha256:d5558e0c25a4cb0853cddad3d77da9891a08cb85dd9f9f91b9f8cd66e511e695" - ], - "version": "==0.6.3" - }, - "traitlets": { - "hashes": [ - "sha256:9ed0579d3502c94b4b3732ac120375cda96f923114522847de4b3bb98b96b6b7", - "sha256:b74e89e397b1ed28cc831db7aea759ba6640cb3de13090ca145426688ff1ac4f" - ], - "markers": "python_version >= '3.8'", - "version": "==5.14.3" - }, - "wcwidth": { - "hashes": [ - "sha256:3da69048e4540d84af32131829ff948f1e022c1c6bdb8d6102117aac784f6859", - "sha256:72ea0c06399eb286d978fdedb6923a9eb47e1c486ce63e9b4e64fc18303972b5" - ], - "version": "==0.2.13" - } - } -} diff --git a/docker/test/README.md b/docker/test/README.md new file mode 100644 index 00000000000..e69de29bb2d diff --git a/docker/test/pyproject.toml b/docker/test/pyproject.toml new file mode 100644 index 00000000000..d32d184424f --- /dev/null +++ b/docker/test/pyproject.toml @@ -0,0 +1,41 @@ +[project] +name = "crowdsec-docker-tests" +version = "0.1.0" +description = "Docker tests for Crowdsec" +readme = "README.md" +requires-python = ">=3.12" +dependencies = [ + "pytest>=8.3.4", + "pytest-cs", + "pytest-dotenv>=0.5.2", + "pytest-xdist>=3.6.1", +] + +[dependency-groups] +dev = [ + "ipdb>=0.13.13", + "ruff>=0.9.3", +] + +[tool.uv.sources] +pytest-cs = { git = "https://github.com/crowdsecurity/pytest-cs" } + +[tool.ruff] + +line-length = 120 + +[tool.ruff.lint] +select = [ + "E", # pycodestyle errors + "W", # pycodestyle warnings + "F", # pyflakes + "I", # isort + "C", # flake8-comprehensions + "B", # flake8-bugbear + "UP", # pyupgrade + "C90", # macabe +] + +ignore = [ + "B008", # do not perform function calls in argument defaults +] diff --git a/docker/test/tests/conftest.py b/docker/test/tests/conftest.py index 3498da82660..d32ffa28c37 100644 --- a/docker/test/tests/conftest.py +++ b/docker/test/tests/conftest.py @@ -1,11 +1,6 @@ - pytest_plugins = ("cs",) def pytest_configure(config): - config.addinivalue_line( - 'markers', 'docker: mark tests for lone or manually orchestrated containers' - ) - config.addinivalue_line( - 'markers', 'compose: mark tests for docker compose projects' - ) + config.addinivalue_line("markers", "docker: mark tests for lone or manually orchestrated containers") + config.addinivalue_line("markers", "compose: mark tests for docker compose projects") diff --git a/docker/test/tests/test_agent.py b/docker/test/tests/test_agent.py index e55d11af850..aec1bbdaae8 100644 --- a/docker/test/tests/test_agent.py +++ b/docker/test/tests/test_agent.py @@ -10,12 +10,12 @@ def test_no_agent(crowdsec, flavor): """Test DISABLE_AGENT=true""" env = { - 'DISABLE_AGENT': 'true', + "DISABLE_AGENT": "true", } with crowdsec(flavor=flavor, environment=env) as cs: cs.wait_for_log("*CrowdSec Local API listening on *:8080*") - cs.wait_for_http(8080, '/health', want_status=HTTPStatus.OK) - res = cs.cont.exec_run('cscli lapi status') + cs.wait_for_http(8080, "/health", want_status=HTTPStatus.OK) + res = cs.cont.exec_run("cscli lapi status") assert res.exit_code == 0 stdout = res.output.decode() assert "You can successfully interact with Local API (LAPI)" in stdout @@ -24,23 +24,25 @@ def test_no_agent(crowdsec, flavor): def test_machine_register(crowdsec, flavor, tmp_path_factory): """A local agent is always registered for use by cscli""" - data_dir = tmp_path_factory.mktemp('data') + data_dir = tmp_path_factory.mktemp("data") env = { - 'DISABLE_AGENT': 'true', + "DISABLE_AGENT": "true", } volumes = { - data_dir: {'bind': '/var/lib/crowdsec/data', 'mode': 'rw'}, + data_dir: {"bind": "/var/lib/crowdsec/data", "mode": "rw"}, } with crowdsec(flavor=flavor, environment=env, volumes=volumes) as cs: - cs.wait_for_log([ + cs.wait_for_log( + [ "*Generate local agent credentials*", "*CrowdSec Local API listening on *:8080*", - ]) - cs.wait_for_http(8080, '/health', want_status=HTTPStatus.OK) - res = cs.cont.exec_run('cscli lapi status') + ] + ) + cs.wait_for_http(8080, "/health", want_status=HTTPStatus.OK) + res = cs.cont.exec_run("cscli lapi status") assert res.exit_code == 0 stdout = res.output.decode() assert "You can successfully interact with Local API (LAPI)" in stdout @@ -48,27 +50,31 @@ def test_machine_register(crowdsec, flavor, tmp_path_factory): # The local agent is not registered, because we didn't persist local_api_credentials.yaml with crowdsec(flavor=flavor, environment=env, volumes=volumes) as cs: - cs.wait_for_log([ + cs.wait_for_log( + [ "*Generate local agent credentials*", "*CrowdSec Local API listening on *:8080*", - ]) - cs.wait_for_http(8080, '/health', want_status=HTTPStatus.OK) - res = cs.cont.exec_run('cscli lapi status') + ] + ) + cs.wait_for_http(8080, "/health", want_status=HTTPStatus.OK) + res = cs.cont.exec_run("cscli lapi status") assert res.exit_code == 0 stdout = res.output.decode() assert "You can successfully interact with Local API (LAPI)" in stdout - config_dir = tmp_path_factory.mktemp('config') + config_dir = tmp_path_factory.mktemp("config") - volumes[config_dir] = {'bind': '/etc/crowdsec', 'mode': 'rw'} + volumes[config_dir] = {"bind": "/etc/crowdsec", "mode": "rw"} with crowdsec(flavor=flavor, environment=env, volumes=volumes) as cs: - cs.wait_for_log([ + cs.wait_for_log( + [ "*Generate local agent credentials*", "*CrowdSec Local API listening on *:8080*", - ]) - cs.wait_for_http(8080, '/health', want_status=HTTPStatus.OK) - res = cs.cont.exec_run('cscli lapi status') + ] + ) + cs.wait_for_http(8080, "/health", want_status=HTTPStatus.OK) + res = cs.cont.exec_run("cscli lapi status") assert res.exit_code == 0 stdout = res.output.decode() assert "You can successfully interact with Local API (LAPI)" in stdout @@ -76,12 +82,14 @@ def test_machine_register(crowdsec, flavor, tmp_path_factory): # The local agent is now already registered with crowdsec(flavor=flavor, environment=env, volumes=volumes) as cs: - cs.wait_for_log([ + cs.wait_for_log( + [ "*Local agent already registered*", "*CrowdSec Local API listening on *:8080*", - ]) - cs.wait_for_http(8080, '/health', want_status=HTTPStatus.OK) - res = cs.cont.exec_run('cscli lapi status') + ] + ) + cs.wait_for_http(8080, "/health", want_status=HTTPStatus.OK) + res = cs.cont.exec_run("cscli lapi status") assert res.exit_code == 0 stdout = res.output.decode() assert "You can successfully interact with Local API (LAPI)" in stdout diff --git a/docker/test/tests/test_agent_only.py b/docker/test/tests/test_agent_only.py index 038b726e324..4e1689e0b9b 100644 --- a/docker/test/tests/test_agent_only.py +++ b/docker/test/tests/test_agent_only.py @@ -1,7 +1,7 @@ #!/usr/bin/env python -from http import HTTPStatus import random +from http import HTTPStatus import pytest @@ -10,19 +10,19 @@ def test_split_lapi_agent(crowdsec, flavor): rand = str(random.randint(0, 10000)) - lapiname = f'lapi-{rand}' - agentname = f'agent-{rand}' + lapiname = f"lapi-{rand}" + agentname = f"agent-{rand}" lapi_env = { - 'AGENT_USERNAME': 'testagent', - 'AGENT_PASSWORD': 'testpassword', + "AGENT_USERNAME": "testagent", + "AGENT_PASSWORD": "testpassword", } agent_env = { - 'AGENT_USERNAME': 'testagent', - 'AGENT_PASSWORD': 'testpassword', - 'DISABLE_LOCAL_API': 'true', - 'LOCAL_API_URL': f'http://{lapiname}:8080', + "AGENT_USERNAME": "testagent", + "AGENT_PASSWORD": "testpassword", + "DISABLE_LOCAL_API": "true", + "LOCAL_API_URL": f"http://{lapiname}:8080", } cs_lapi = crowdsec(name=lapiname, environment=lapi_env, flavor=flavor) @@ -30,10 +30,10 @@ def test_split_lapi_agent(crowdsec, flavor): with cs_lapi as lapi: lapi.wait_for_log("*CrowdSec Local API listening on *:8080*") - lapi.wait_for_http(8080, '/health', want_status=HTTPStatus.OK) + lapi.wait_for_http(8080, "/health", want_status=HTTPStatus.OK) with cs_agent as agent: agent.wait_for_log("*Starting processing data*") - res = agent.cont.exec_run('cscli lapi status') + res = agent.cont.exec_run("cscli lapi status") assert res.exit_code == 0 stdout = res.output.decode() assert "You can successfully interact with Local API (LAPI)" in stdout diff --git a/docker/test/tests/test_bouncer.py b/docker/test/tests/test_bouncer.py index 98b86de858c..d87aff734c5 100644 --- a/docker/test/tests/test_bouncer.py +++ b/docker/test/tests/test_bouncer.py @@ -5,8 +5,8 @@ """ import hashlib -from http import HTTPStatus import json +from http import HTTPStatus import pytest @@ -21,36 +21,33 @@ def hex512(s): def test_register_bouncer_env(crowdsec, flavor): """Test installing bouncers at startup, from envvar""" - env = { - 'BOUNCER_KEY_bouncer1name': 'bouncer1key', - 'BOUNCER_KEY_bouncer2name': 'bouncer2key' - } + env = {"BOUNCER_KEY_bouncer1name": "bouncer1key", "BOUNCER_KEY_bouncer2name": "bouncer2key"} with crowdsec(flavor=flavor, environment=env) as cs: cs.wait_for_log("*Starting processing data*") - cs.wait_for_http(8080, '/health', want_status=HTTPStatus.OK) - res = cs.cont.exec_run('cscli bouncers list -o json') + cs.wait_for_http(8080, "/health", want_status=HTTPStatus.OK) + res = cs.cont.exec_run("cscli bouncers list -o json") assert res.exit_code == 0 j = json.loads(res.output) assert len(j) == 2 bouncer1, bouncer2 = j - assert bouncer1['name'] == 'bouncer1name' - assert bouncer2['name'] == 'bouncer2name' + assert bouncer1["name"] == "bouncer1name" + assert bouncer2["name"] == "bouncer2name" # add a second bouncer at runtime - res = cs.cont.exec_run('cscli bouncers add bouncer3name -k bouncer3key') + res = cs.cont.exec_run("cscli bouncers add bouncer3name -k bouncer3key") assert res.exit_code == 0 - res = cs.cont.exec_run('cscli bouncers list -o json') + res = cs.cont.exec_run("cscli bouncers list -o json") assert res.exit_code == 0 j = json.loads(res.output) assert len(j) == 3 bouncer3 = j[2] - assert bouncer3['name'] == 'bouncer3name' + assert bouncer3["name"] == "bouncer3name" # remove all bouncers - res = cs.cont.exec_run('cscli bouncers delete bouncer1name bouncer2name bouncer3name') + res = cs.cont.exec_run("cscli bouncers delete bouncer1name bouncer2name bouncer3name") assert res.exit_code == 0 - res = cs.cont.exec_run('cscli bouncers list -o json') + res = cs.cont.exec_run("cscli bouncers list -o json") assert res.exit_code == 0 j = json.loads(res.output) assert len(j) == 0 diff --git a/docker/test/tests/test_capi.py b/docker/test/tests/test_capi.py index 08b3a70471e..ad25f7a766f 100644 --- a/docker/test/tests/test_capi.py +++ b/docker/test/tests/test_capi.py @@ -3,6 +3,7 @@ from http import HTTPStatus import pytest + pytestmark = pytest.mark.docker @@ -10,13 +11,13 @@ def test_no_capi(crowdsec, flavor): """Test no CAPI (disabled by default in tests)""" env = { - 'DISABLE_ONLINE_API': 'true', + "DISABLE_ONLINE_API": "true", } with crowdsec(flavor=flavor, environment=env) as cs: cs.wait_for_log("*Starting processing data*") - cs.wait_for_http(8080, '/health', want_status=HTTPStatus.OK) - res = cs.cont.exec_run('cscli capi status') + cs.wait_for_http(8080, "/health", want_status=HTTPStatus.OK) + res = cs.cont.exec_run("cscli capi status") assert res.exit_code == 1 assert "You can successfully interact with Central API (CAPI)" not in res.output.decode() @@ -29,17 +30,19 @@ def test_capi(crowdsec, flavor): """Test CAPI""" env = { - 'DISABLE_ONLINE_API': 'false', + "DISABLE_ONLINE_API": "false", } with crowdsec(flavor=flavor, environment=env) as cs: cs.wait_for_log("*Starting processing data*") - cs.wait_for_http(8080, '/health', want_status=HTTPStatus.OK) - res = cs.cont.exec_run('cscli capi status') + cs.wait_for_http(8080, "/health", want_status=HTTPStatus.OK) + res = cs.cont.exec_run("cscli capi status") assert res.exit_code == 0 assert "You can successfully interact with Central API (CAPI)" in res.output.decode() - cs.wait_for_log([ - "*Successfully registered to Central API (CAPI)*", - "*Registration to online API done*", - ]) + cs.wait_for_log( + [ + "*Successfully registered to Central API (CAPI)*", + "*Registration to online API done*", + ] + ) diff --git a/docker/test/tests/test_capi_whitelists.py b/docker/test/tests/test_capi_whitelists.py index 19378ba86f0..6cdd5f401f5 100644 --- a/docker/test/tests/test_capi_whitelists.py +++ b/docker/test/tests/test_capi_whitelists.py @@ -1,32 +1,32 @@ #!/usr/bin/env python from http import HTTPStatus -import yaml import pytest +import yaml pytestmark = pytest.mark.docker -def test_capi_whitelists(crowdsec, tmp_path_factory, flavor,): +def test_capi_whitelists( + crowdsec, + tmp_path_factory, + flavor, +): """Test CAPI_WHITELISTS_PATH""" - env = { - "CAPI_WHITELISTS_PATH": "/path/to/whitelists.yaml" - } + env = {"CAPI_WHITELISTS_PATH": "/path/to/whitelists.yaml"} whitelists = tmp_path_factory.mktemp("whitelists") with open(whitelists / "whitelists.yaml", "w") as f: yaml.dump({"ips": ["1.2.3.4", "2.3.4.5"], "cidrs": ["1.2.3.0/24"]}, f) - volumes = { - whitelists / "whitelists.yaml": {"bind": "/path/to/whitelists.yaml", "mode": "ro"} - } + volumes = {whitelists / "whitelists.yaml": {"bind": "/path/to/whitelists.yaml", "mode": "ro"}} with crowdsec(flavor=flavor, environment=env, volumes=volumes) as cs: cs.wait_for_log("*Starting processing data*") - cs.wait_for_http(8080, '/health', want_status=HTTPStatus.OK) - res = cs.cont.exec_run('cscli config show-yaml') + cs.wait_for_http(8080, "/health", want_status=HTTPStatus.OK) + res = cs.cont.exec_run("cscli config show-yaml") assert res.exit_code == 0 stdout = res.output.decode() y = yaml.safe_load(stdout) - assert y['api']['server']['capi_whitelists_path'] == '/path/to/whitelists.yaml' + assert y["api"]["server"]["capi_whitelists_path"] == "/path/to/whitelists.yaml" diff --git a/docker/test/tests/test_cold_logs.py b/docker/test/tests/test_cold_logs.py index 6f6c578ebe0..2eb3248ffd7 100644 --- a/docker/test/tests/test_cold_logs.py +++ b/docker/test/tests/test_cold_logs.py @@ -2,16 +2,15 @@ import datetime -from pytest_cs import Status - import pytest +from pytest_cs import Status pytestmark = pytest.mark.docker def test_cold_logs(crowdsec, tmp_path_factory, flavor): env = { - 'DSN': 'file:///var/log/toto.log', + "DSN": "file:///var/log/toto.log", } logs = tmp_path_factory.mktemp("logs") @@ -20,11 +19,11 @@ def test_cold_logs(crowdsec, tmp_path_factory, flavor): with open(logs / "toto.log", "w") as f: # like date '+%b %d %H:%M:%S' but in python for i in range(10): - ts = (now + datetime.timedelta(seconds=i)).strftime('%b %d %H:%M:%S') - f.write(ts + ' sd-126005 sshd[12422]: Invalid user netflix from 1.1.1.172 port 35424\n') + ts = (now + datetime.timedelta(seconds=i)).strftime("%b %d %H:%M:%S") + f.write(ts + " sd-126005 sshd[12422]: Invalid user netflix from 1.1.1.172 port 35424\n") volumes = { - logs / "toto.log": {'bind': '/var/log/toto.log', 'mode': 'ro'}, + logs / "toto.log": {"bind": "/var/log/toto.log", "mode": "ro"}, } # missing type @@ -32,20 +31,22 @@ def test_cold_logs(crowdsec, tmp_path_factory, flavor): with crowdsec(flavor=flavor, environment=env, volumes=volumes, wait_status=Status.EXITED) as cs: cs.wait_for_log("*-dsn requires a -type argument*") - env['TYPE'] = 'syslog' + env["TYPE"] = "syslog" with crowdsec(flavor=flavor, environment=env, volumes=volumes) as cs: - cs.wait_for_log([ - "*Adding file /var/log/toto.log to filelist*", - "*reading /var/log/toto.log at once*", - "*Ip 1.1.1.172 performed 'crowdsecurity/ssh-bf' (6 events over 5s)*", - "*crowdsec shutdown*" - ]) + cs.wait_for_log( + [ + "*Adding file /var/log/toto.log to filelist*", + "*reading /var/log/toto.log at once*", + "*Ip 1.1.1.172 performed 'crowdsecurity/ssh-bf' (6 events over 5s)*", + "*crowdsec shutdown*", + ] + ) def test_cold_logs_missing_dsn(crowdsec, flavor): env = { - 'TYPE': 'syslog', + "TYPE": "syslog", } with crowdsec(flavor=flavor, environment=env, wait_status=Status.EXITED) as cs: diff --git a/docker/test/tests/test_flavors.py b/docker/test/tests/test_flavors.py index 7e78b8d681b..a48fe428c7b 100644 --- a/docker/test/tests/test_flavors.py +++ b/docker/test/tests/test_flavors.py @@ -15,8 +15,8 @@ def test_cscli_lapi(crowdsec, flavor): """Test if cscli can talk to lapi""" with crowdsec(flavor=flavor) as cs: cs.wait_for_log("*Starting processing data*") - cs.wait_for_http(8080, '/health', want_status=HTTPStatus.OK) - x = cs.cont.exec_run('cscli lapi status') + cs.wait_for_http(8080, "/health", want_status=HTTPStatus.OK) + x = cs.cont.exec_run("cscli lapi status") assert x.exit_code == 0 stdout = x.output.decode() assert "You can successfully interact with Local API (LAPI)" in stdout @@ -27,35 +27,34 @@ def test_flavor_content(crowdsec, flavor): """Test flavor contents""" with crowdsec(flavor=flavor) as cs: cs.wait_for_log("*Starting processing data*") - cs.wait_for_http(8080, '/health', want_status=HTTPStatus.OK) - x = cs.cont.exec_run('ls -1 /var/lib/crowdsec/data/') + cs.wait_for_http(8080, "/health", want_status=HTTPStatus.OK) + x = cs.cont.exec_run("ls -1 /var/lib/crowdsec/data/") assert x.exit_code == 0 stdout = x.output.decode() - if 'slim' in flavor or 'plugins' in flavor: - assert 'GeoLite2-City.mmdb' not in stdout - assert 'GeoLite2-ASN.mmdb' not in stdout + if "slim" in flavor or "plugins" in flavor: + assert "GeoLite2-City.mmdb" not in stdout + assert "GeoLite2-ASN.mmdb" not in stdout else: - assert 'GeoLite2-City.mmdb' in stdout - assert 'GeoLite2-ASN.mmdb' in stdout - assert 'crowdsec.db' in stdout + assert "GeoLite2-City.mmdb" in stdout + assert "GeoLite2-ASN.mmdb" in stdout + assert "crowdsec.db" in stdout - x = cs.cont.exec_run( - 'ls -1 /usr/local/lib/crowdsec/plugins/') + x = cs.cont.exec_run("ls -1 /usr/local/lib/crowdsec/plugins/") stdout = x.output.decode() - if 'slim' in flavor: + if "slim" in flavor: # the exact return code and full message depend # on the 'ls' implementation (busybox vs coreutils) assert x.exit_code != 0 - assert 'No such file or directory' in stdout - assert 'notification-email' not in stdout - assert 'notification-http' not in stdout - assert 'notification-slack' not in stdout - assert 'notification-splunk' not in stdout - assert 'notification-sentinel' not in stdout + assert "No such file or directory" in stdout + assert "notification-email" not in stdout + assert "notification-http" not in stdout + assert "notification-slack" not in stdout + assert "notification-splunk" not in stdout + assert "notification-sentinel" not in stdout else: assert x.exit_code == 0 - assert 'notification-email' in stdout - assert 'notification-http' in stdout - assert 'notification-slack' in stdout - assert 'notification-splunk' in stdout - assert 'notification-sentinel' in stdout + assert "notification-email" in stdout + assert "notification-http" in stdout + assert "notification-slack" in stdout + assert "notification-splunk" in stdout + assert "notification-sentinel" in stdout diff --git a/docker/test/tests/test_hello.py b/docker/test/tests/test_hello.py index a21fde85044..a3ff4f07a93 100644 --- a/docker/test/tests/test_hello.py +++ b/docker/test/tests/test_hello.py @@ -13,24 +13,23 @@ def test_docker_cli_run(): """Test if docker run works from the command line. Capture stdout too""" - res = subprocess.run(['docker', 'run', '--rm', 'hello-world'], - capture_output=True, text=True) + res = subprocess.run(["docker", "run", "--rm", "hello-world"], capture_output=True, text=True) assert 0 == res.returncode - assert 'Hello from Docker!' in res.stdout + assert "Hello from Docker!" in res.stdout def test_docker_run(docker_client): """Test if docker run works from the python SDK.""" - output = docker_client.containers.run('hello-world', remove=True) + output = docker_client.containers.run("hello-world", remove=True) lines = output.decode().splitlines() assert "Hello from Docker!" in lines def test_docker_run_detach(docker_client): """Test with python SDK (async).""" - cont = docker_client.containers.run('hello-world', detach=True) - assert cont.status == 'created' - assert cont.attrs['State']['ExitCode'] == 0 + cont = docker_client.containers.run("hello-world", detach=True) + assert cont.status == "created" + assert cont.attrs["State"]["ExitCode"] == 0 lines = cont.logs().decode().splitlines() assert "Hello from Docker!" in lines cont.remove(force=True) diff --git a/docker/test/tests/test_hub.py b/docker/test/tests/test_hub.py index 2365e3a9cef..a7134fcb5c8 100644 --- a/docker/test/tests/test_hub.py +++ b/docker/test/tests/test_hub.py @@ -4,8 +4,8 @@ Test pre-installed hub items. """ -from http import HTTPStatus import json +from http import HTTPStatus import pytest @@ -16,12 +16,12 @@ def test_preinstalled_hub(crowdsec, flavor): """Test hub objects installed in the entrypoint""" with crowdsec(flavor=flavor) as cs: cs.wait_for_log("*Starting processing data*") - cs.wait_for_http(8080, '/health', want_status=HTTPStatus.OK) - res = cs.cont.exec_run('cscli hub list -o json') + cs.wait_for_http(8080, "/health", want_status=HTTPStatus.OK) + res = cs.cont.exec_run("cscli hub list -o json", stderr=False) assert res.exit_code == 0 j = json.loads(res.output) - collections = {c['name']: c for c in j['collections']} - assert collections['crowdsecurity/linux']['status'] == 'enabled' - parsers = {c['name']: c for c in j['parsers']} - assert parsers['crowdsecurity/whitelists']['status'] == 'enabled' - assert parsers['crowdsecurity/docker-logs']['status'] == 'enabled' + collections = {c["name"]: c for c in j["collections"]} + assert collections["crowdsecurity/linux"]["status"] == "enabled" + parsers = {c["name"]: c for c in j["parsers"]} + assert parsers["crowdsecurity/whitelists"]["status"] == "enabled" + assert parsers["crowdsecurity/docker-logs"]["status"] == "enabled" diff --git a/docker/test/tests/test_hub_collections.py b/docker/test/tests/test_hub_collections.py index 962f8ff8df4..71fa698af06 100644 --- a/docker/test/tests/test_hub_collections.py +++ b/docker/test/tests/test_hub_collections.py @@ -4,8 +4,8 @@ Test collection management """ -from http import HTTPStatus import json +from http import HTTPStatus import pytest @@ -14,101 +14,98 @@ def test_install_two_collections(crowdsec, flavor): """Test installing collections at startup""" - it1 = 'crowdsecurity/apache2' - it2 = 'crowdsecurity/asterisk' - env = { - 'COLLECTIONS': f'{it1} {it2}' - } + it1 = "crowdsecurity/apache2" + it2 = "crowdsecurity/asterisk" + env = {"COLLECTIONS": f"{it1} {it2}"} with crowdsec(flavor=flavor, environment=env) as cs: - cs.wait_for_http(8080, '/health', want_status=HTTPStatus.OK) - res = cs.cont.exec_run('cscli collections list -o json') + cs.wait_for_http(8080, "/health", want_status=HTTPStatus.OK) + res = cs.cont.exec_run("cscli collections list -o json") assert res.exit_code == 0 j = json.loads(res.output) - items = {c['name']: c for c in j['collections']} - assert items[it1]['status'] == 'enabled' - assert items[it2]['status'] == 'enabled' - cs.wait_for_log([ - # f'*collections install "{it1}"*' - # f'*collections install "{it2}"*' - f'*Enabled collections: {it1}*', - f'*Enabled collections: {it2}*', - ]) + items = {c["name"]: c for c in j["collections"]} + assert items[it1]["status"] == "enabled" + assert items[it2]["status"] == "enabled" + cs.wait_for_log( + [ + f"*enabling collections:{it1}*", + f"*enabling collections:{it2}*", + ] + ) def test_disable_collection(crowdsec, flavor): """Test removing a pre-installed collection at startup""" - it = 'crowdsecurity/linux' - env = { - 'DISABLE_COLLECTIONS': it - } + it = "crowdsecurity/linux" + env = {"DISABLE_COLLECTIONS": it} with crowdsec(flavor=flavor, environment=env) as cs: cs.wait_for_log("*Starting processing data*") - cs.wait_for_http(8080, '/health', want_status=HTTPStatus.OK) - res = cs.cont.exec_run('cscli collections list -o json') + cs.wait_for_http(8080, "/health", want_status=HTTPStatus.OK) + res = cs.cont.exec_run("cscli collections list -o json") assert res.exit_code == 0 j = json.loads(res.output) - items = {c['name'] for c in j['collections']} + items = {c["name"] for c in j["collections"]} assert it not in items - cs.wait_for_log([ - # f'*collections remove "{it}*", - f'*Removed symlink [[]{it}[]]*', - ]) + cs.wait_for_log( + [ + f"*disabling collections:{it}*", + ] + ) def test_install_and_disable_collection(crowdsec, flavor): """Declare a collection to install AND disable: disable wins""" - it = 'crowdsecurity/apache2' + it = "crowdsecurity/apache2" env = { - 'COLLECTIONS': it, - 'DISABLE_COLLECTIONS': it, + "COLLECTIONS": it, + "DISABLE_COLLECTIONS": it, } with crowdsec(flavor=flavor, environment=env) as cs: cs.wait_for_log("*Starting processing data*") - cs.wait_for_http(8080, '/health', want_status=HTTPStatus.OK) - res = cs.cont.exec_run('cscli collections list -o json') + cs.wait_for_http(8080, "/health", want_status=HTTPStatus.OK) + res = cs.cont.exec_run("cscli collections list -o json") assert res.exit_code == 0 j = json.loads(res.output) - items = {c['name'] for c in j['collections']} + items = {c["name"] for c in j["collections"]} assert it not in items logs = cs.log_lines() # check that there was no attempt to install - assert not any(f'Enabled collections: {it}' in line for line in logs) + assert not any(f"enabling collections:{it}" in line for line in logs) # already done in bats, prividing here as example of a somewhat complex test def test_taint_bubble_up(crowdsec, tmp_path_factory, flavor): - coll = 'crowdsecurity/nginx' - env = { - 'COLLECTIONS': f'{coll}' - } + coll = "crowdsecurity/nginx" + env = {"COLLECTIONS": f"{coll}"} with crowdsec(flavor=flavor, environment=env) as cs: - cs.wait_for_http(8080, '/health', want_status=HTTPStatus.OK) - res = cs.cont.exec_run('cscli collections list -o json') + cs.wait_for_http(8080, "/health", want_status=HTTPStatus.OK) + res = cs.cont.exec_run("cscli collections list -o json") assert res.exit_code == 0 j = json.loads(res.output) - items = {c['name']: c for c in j['collections']} + items = {c["name"]: c for c in j["collections"]} # implicit check for tainted=False - assert items[coll]['status'] == 'enabled' - cs.wait_for_log([ - f'*Enabled collections: {coll}*', - ]) + assert items[coll]["status"] == "enabled" + cs.wait_for_log( + [ + f"*enabling collections:{coll}*", + ] + ) - scenario = 'crowdsecurity/http-crawl-non_statics' + scenario = "crowdsecurity/http-crawl-non_statics" # the description won't be read back, it's from the index yq_command = f"yq -e -i '.description=\"tainted\"' /etc/crowdsec/hub/scenarios/{scenario}.yaml" res = cs.cont.exec_run(yq_command) assert res.exit_code == 0 - res = cs.cont.exec_run(f'cscli scenarios inspect {scenario} -o json') + res = cs.cont.exec_run(f"cscli scenarios inspect {scenario} -o json") assert res.exit_code == 0 j = json.loads(res.output) - assert j['tainted'] is True + assert j["tainted"] is True - res = cs.cont.exec_run('cscli collections list -o json') + res = cs.cont.exec_run("cscli collections list -o json") assert res.exit_code == 0 j = json.loads(res.output) - items = {c['name']: c for c in j['collections']} - assert items['crowdsecurity/nginx']['status'] == 'enabled,tainted' - assert items['crowdsecurity/base-http-scenarios']['status'] == 'enabled,tainted' + items = {c["name"]: c for c in j["collections"]} + assert items["crowdsecurity/nginx"]["status"] == "enabled,tainted" + assert items["crowdsecurity/base-http-scenarios"]["status"] == "enabled,tainted" diff --git a/docker/test/tests/test_hub_parsers.py b/docker/test/tests/test_hub_parsers.py index 8cfaeecf94c..42794d20b42 100644 --- a/docker/test/tests/test_hub_parsers.py +++ b/docker/test/tests/test_hub_parsers.py @@ -4,8 +4,8 @@ Test parser management """ -from http import HTTPStatus import json +from http import HTTPStatus import pytest @@ -14,60 +14,54 @@ def test_install_two_parsers(crowdsec, flavor): """Test installing parsers at startup""" - it1 = 'crowdsecurity/cpanel-logs' - it2 = 'crowdsecurity/cowrie-logs' - env = { - 'PARSERS': f'{it1} {it2}' - } + it1 = "crowdsecurity/cpanel-logs" + it2 = "crowdsecurity/cowrie-logs" + env = {"PARSERS": f"{it1} {it2}"} with crowdsec(flavor=flavor, environment=env) as cs: - cs.wait_for_log([ - f'*parsers install "{it1}"*', - f'*parsers install "{it2}"*', - "*Starting processing data*" - ]) - cs.wait_for_http(8080, '/health', want_status=HTTPStatus.OK) - res = cs.cont.exec_run('cscli parsers list -o json') + cs.wait_for_log([f'*parsers install "{it1}"*', f'*parsers install "{it2}"*', "*Starting processing data*"]) + cs.wait_for_http(8080, "/health", want_status=HTTPStatus.OK) + res = cs.cont.exec_run("cscli parsers list -o json") assert res.exit_code == 0 j = json.loads(res.output) - items = {c['name']: c for c in j['parsers']} - assert items[it1]['status'] == 'enabled' - assert items[it2]['status'] == 'enabled' + items = {c["name"]: c for c in j["parsers"]} + assert items[it1]["status"] == "enabled" + assert items[it2]["status"] == "enabled" # XXX check that the parser is preinstalled by default def test_disable_parser(crowdsec, flavor): """Test removing a pre-installed parser at startup""" - it = 'crowdsecurity/whitelists' - env = { - 'DISABLE_PARSERS': it - } + it = "crowdsecurity/whitelists" + env = {"DISABLE_PARSERS": it} with crowdsec(flavor=flavor, environment=env) as cs: - cs.wait_for_log([ - f'*parsers remove "{it}"*', - "*Starting processing data*", - ]) - cs.wait_for_http(8080, '/health', want_status=HTTPStatus.OK) - res = cs.cont.exec_run('cscli parsers list -o json') + cs.wait_for_log( + [ + f'*parsers remove "{it}"*', + "*Starting processing data*", + ] + ) + cs.wait_for_http(8080, "/health", want_status=HTTPStatus.OK) + res = cs.cont.exec_run("cscli parsers list -o json") assert res.exit_code == 0 j = json.loads(res.output) - items = {c['name'] for c in j['parsers']} + items = {c["name"] for c in j["parsers"]} assert it not in items def test_install_and_disable_parser(crowdsec, flavor): """Declare a parser to install AND disable: disable wins""" - it = 'crowdsecurity/cpanel-logs' + it = "crowdsecurity/cpanel-logs" env = { - 'PARSERS': it, - 'DISABLE_PARSERS': it, + "PARSERS": it, + "DISABLE_PARSERS": it, } with crowdsec(flavor=flavor, environment=env) as cs: cs.wait_for_log("*Starting processing data*") - cs.wait_for_http(8080, '/health', want_status=HTTPStatus.OK) - res = cs.cont.exec_run('cscli parsers list -o json') + cs.wait_for_http(8080, "/health", want_status=HTTPStatus.OK) + res = cs.cont.exec_run("cscli parsers list -o json") assert res.exit_code == 0 j = json.loads(res.output) - items = {c['name'] for c in j['parsers']} + items = {c["name"] for c in j["parsers"]} assert it not in items logs = cs.log_lines() # check that there was no attempt to install diff --git a/docker/test/tests/test_hub_postoverflows.py b/docker/test/tests/test_hub_postoverflows.py index 80fdbc2b7bd..69f383cda24 100644 --- a/docker/test/tests/test_hub_postoverflows.py +++ b/docker/test/tests/test_hub_postoverflows.py @@ -4,8 +4,9 @@ Test postoverflow management """ -from http import HTTPStatus import json +from http import HTTPStatus + import pytest pytestmark = pytest.mark.docker @@ -13,24 +14,20 @@ def test_install_two_postoverflows(crowdsec, flavor): """Test installing postoverflows at startup""" - it1 = 'crowdsecurity/cdn-whitelist' - it2 = 'crowdsecurity/ipv6_to_range' - env = { - 'POSTOVERFLOWS': f'{it1} {it2}' - } + it1 = "crowdsecurity/cdn-whitelist" + it2 = "crowdsecurity/ipv6_to_range" + env = {"POSTOVERFLOWS": f"{it1} {it2}"} with crowdsec(flavor=flavor, environment=env) as cs: - cs.wait_for_log([ - f'*postoverflows install "{it1}"*', - f'*postoverflows install "{it2}"*', - "*Starting processing data*" - ]) - cs.wait_for_http(8080, '/health', want_status=HTTPStatus.OK) - res = cs.cont.exec_run('cscli postoverflows list -o json') + cs.wait_for_log( + [f'*postoverflows install "{it1}"*', f'*postoverflows install "{it2}"*', "*Starting processing data*"] + ) + cs.wait_for_http(8080, "/health", want_status=HTTPStatus.OK) + res = cs.cont.exec_run("cscli postoverflows list -o json") assert res.exit_code == 0 j = json.loads(res.output) - items = {c['name']: c for c in j['postoverflows']} - assert items[it1]['status'] == 'enabled' - assert items[it2]['status'] == 'enabled' + items = {c["name"]: c for c in j["postoverflows"]} + assert items[it1]["status"] == "enabled" + assert items[it2]["status"] == "enabled" def test_disable_postoverflow(): @@ -40,18 +37,18 @@ def test_disable_postoverflow(): def test_install_and_disable_postoverflow(crowdsec, flavor): """Declare a postoverflow to install AND disable: disable wins""" - it = 'crowdsecurity/cdn-whitelist' + it = "crowdsecurity/cdn-whitelist" env = { - 'POSTOVERFLOWS': it, - 'DISABLE_POSTOVERFLOWS': it, + "POSTOVERFLOWS": it, + "DISABLE_POSTOVERFLOWS": it, } with crowdsec(flavor=flavor, environment=env) as cs: cs.wait_for_log("*Starting processing data*") - cs.wait_for_http(8080, '/health', want_status=HTTPStatus.OK) - res = cs.cont.exec_run('cscli postoverflows list -o json') + cs.wait_for_http(8080, "/health", want_status=HTTPStatus.OK) + res = cs.cont.exec_run("cscli postoverflows list -o json") assert res.exit_code == 0 j = json.loads(res.output) - items = {c['name'] for c in j['postoverflows']} + items = {c["name"] for c in j["postoverflows"]} assert it not in items logs = cs.log_lines() # check that there was no attempt to install diff --git a/docker/test/tests/test_hub_scenarios.py b/docker/test/tests/test_hub_scenarios.py index 2a8c3a275f2..4376a3ce64a 100644 --- a/docker/test/tests/test_hub_scenarios.py +++ b/docker/test/tests/test_hub_scenarios.py @@ -4,8 +4,8 @@ Test scenario management """ -from http import HTTPStatus import json +from http import HTTPStatus import pytest @@ -14,59 +14,48 @@ def test_install_two_scenarios(crowdsec, flavor): """Test installing scenarios at startup""" - it1 = 'crowdsecurity/cpanel-bf-attempt' - it2 = 'crowdsecurity/asterisk_bf' - env = { - 'SCENARIOS': f'{it1} {it2}' - } + it1 = "crowdsecurity/cpanel-bf-attempt" + it2 = "crowdsecurity/asterisk_bf" + env = {"SCENARIOS": f"{it1} {it2}"} with crowdsec(flavor=flavor, environment=env) as cs: - cs.wait_for_log([ - f'*scenarios install "{it1}"*', - f'*scenarios install "{it2}"*', - "*Starting processing data*" - ]) - cs.wait_for_http(8080, '/health', want_status=HTTPStatus.OK) - res = cs.cont.exec_run('cscli scenarios list -o json') + cs.wait_for_log([f'*scenarios install "{it1}"*', f'*scenarios install "{it2}"*', "*Starting processing data*"]) + cs.wait_for_http(8080, "/health", want_status=HTTPStatus.OK) + res = cs.cont.exec_run("cscli scenarios list -o json") assert res.exit_code == 0 j = json.loads(res.output) - items = {c['name']: c for c in j['scenarios']} - assert items[it1]['status'] == 'enabled' - assert items[it2]['status'] == 'enabled' + items = {c["name"]: c for c in j["scenarios"]} + assert items[it1]["status"] == "enabled" + assert items[it2]["status"] == "enabled" def test_disable_scenario(crowdsec, flavor): """Test removing a pre-installed scenario at startup""" - it = 'crowdsecurity/ssh-bf' - env = { - 'DISABLE_SCENARIOS': it - } + it = "crowdsecurity/ssh-bf" + env = {"DISABLE_SCENARIOS": it} with crowdsec(flavor=flavor, environment=env) as cs: - cs.wait_for_log([ - f'*scenarios remove "{it}"*', - "*Starting processing data*" - ]) - cs.wait_for_http(8080, '/health', want_status=HTTPStatus.OK) - res = cs.cont.exec_run('cscli scenarios list -o json') + cs.wait_for_log([f'*scenarios remove "{it}"*', "*Starting processing data*"]) + cs.wait_for_http(8080, "/health", want_status=HTTPStatus.OK) + res = cs.cont.exec_run("cscli scenarios list -o json") assert res.exit_code == 0 j = json.loads(res.output) - items = {c['name'] for c in j['scenarios']} + items = {c["name"] for c in j["scenarios"]} assert it not in items def test_install_and_disable_scenario(crowdsec, flavor): """Declare a scenario to install AND disable: disable wins""" - it = 'crowdsecurity/asterisk_bf' + it = "crowdsecurity/asterisk_bf" env = { - 'SCENARIOS': it, - 'DISABLE_SCENARIOS': it, + "SCENARIOS": it, + "DISABLE_SCENARIOS": it, } with crowdsec(flavor=flavor, environment=env) as cs: cs.wait_for_log("*Starting processing data*") - cs.wait_for_http(8080, '/health', want_status=HTTPStatus.OK) - res = cs.cont.exec_run('cscli scenarios list -o json') + cs.wait_for_http(8080, "/health", want_status=HTTPStatus.OK) + res = cs.cont.exec_run("cscli scenarios list -o json") assert res.exit_code == 0 j = json.loads(res.output) - items = {c['name'] for c in j['scenarios']} + items = {c["name"] for c in j["scenarios"]} assert it not in items logs = cs.cont.logs().decode().splitlines() # check that there was no attempt to install diff --git a/docker/test/tests/test_local_api_url.py b/docker/test/tests/test_local_api_url.py index aa90c9fb798..e38af3fedbe 100644 --- a/docker/test/tests/test_local_api_url.py +++ b/docker/test/tests/test_local_api_url.py @@ -10,12 +10,9 @@ def test_local_api_url_default(crowdsec, flavor): """Test LOCAL_API_URL (default)""" with crowdsec(flavor=flavor) as cs: - cs.wait_for_log([ - "*CrowdSec Local API listening on *:8080*", - "*Starting processing data*" - ]) - cs.wait_for_http(8080, '/health', want_status=HTTPStatus.OK) - res = cs.cont.exec_run('cscli lapi status') + cs.wait_for_log(["*CrowdSec Local API listening on *:8080*", "*Starting processing data*"]) + cs.wait_for_http(8080, "/health", want_status=HTTPStatus.OK) + res = cs.cont.exec_run("cscli lapi status") assert res.exit_code == 0 stdout = res.output.decode() assert "on http://0.0.0.0:8080/" in stdout @@ -24,16 +21,11 @@ def test_local_api_url_default(crowdsec, flavor): def test_local_api_url(crowdsec, flavor): """Test LOCAL_API_URL (custom)""" - env = { - "LOCAL_API_URL": "http://127.0.0.1:8080" - } + env = {"LOCAL_API_URL": "http://127.0.0.1:8080"} with crowdsec(flavor=flavor, environment=env) as cs: - cs.wait_for_log([ - "*CrowdSec Local API listening on *:8080*", - "*Starting processing data*" - ]) - cs.wait_for_http(8080, '/health', want_status=HTTPStatus.OK) - res = cs.cont.exec_run('cscli lapi status') + cs.wait_for_log(["*CrowdSec Local API listening on *:8080*", "*Starting processing data*"]) + cs.wait_for_http(8080, "/health", want_status=HTTPStatus.OK) + res = cs.cont.exec_run("cscli lapi status") assert res.exit_code == 0 stdout = res.output.decode() assert "on http://127.0.0.1:8080/" in stdout @@ -48,16 +40,16 @@ def test_local_api_url_ipv6(crowdsec, flavor): # FIXME: https://forums.docker.com/t/assigning-default-ipv6-addresses/128665/3 # FIXME: https://github.com/moby/moby/issues/41438 - env = { - "LOCAL_API_URL": "http://[::1]:8080" - } + env = {"LOCAL_API_URL": "http://[::1]:8080"} with crowdsec(flavor=flavor, environment=env) as cs: - cs.wait_for_log([ - "*Starting processing data*", - "*CrowdSec Local API listening on [::1]:8080*", - ]) - cs.wait_for_http(8080, '/health', want_status=HTTPStatus.OK) - res = cs.cont.exec_run('cscli lapi status') + cs.wait_for_log( + [ + "*Starting processing data*", + "*CrowdSec Local API listening on [::1]:8080*", + ] + ) + cs.wait_for_http(8080, "/health", want_status=HTTPStatus.OK) + res = cs.cont.exec_run("cscli lapi status") assert res.exit_code == 0 stdout = res.output.decode() assert "on http://[::1]:8080/" in stdout diff --git a/docker/test/tests/test_local_item.py b/docker/test/tests/test_local_item.py index 3d6ac2fc954..e4c8e3c165a 100644 --- a/docker/test/tests/test_local_item.py +++ b/docker/test/tests/test_local_item.py @@ -4,8 +4,8 @@ Test bind-mounting local items """ -from http import HTTPStatus import json +from http import HTTPStatus import pytest @@ -15,33 +15,29 @@ def test_inject_local_item(crowdsec, tmp_path_factory, flavor): """Test mounting a custom whitelist at startup""" - localitems = tmp_path_factory.mktemp('localitems') - custom_whitelists = localitems / 'custom_whitelists.yaml' + localitems = tmp_path_factory.mktemp("localitems") + custom_whitelists = localitems / "custom_whitelists.yaml" - with open(custom_whitelists, 'w') as f: + with open(custom_whitelists, "w") as f: f.write('{"whitelist":{"reason":"Good IPs","ip":["1.2.3.4"]}}') - volumes = { - custom_whitelists: {'bind': '/etc/crowdsec/parsers/s02-enrich/custom_whitelists.yaml'} - } + volumes = {custom_whitelists: {"bind": "/etc/crowdsec/parsers/s02-enrich/custom_whitelists.yaml"}} with crowdsec(flavor=flavor, volumes=volumes) as cs: - cs.wait_for_log([ - "*Starting processing data*" - ]) - cs.wait_for_http(8080, '/health', want_status=HTTPStatus.OK) + cs.wait_for_log(["*Starting processing data*"]) + cs.wait_for_http(8080, "/health", want_status=HTTPStatus.OK) # the parser should be enabled - res = cs.cont.exec_run('cscli parsers list -o json') + res = cs.cont.exec_run("cscli parsers list -o json") assert res.exit_code == 0 j = json.loads(res.output) - items = {c['name']: c for c in j['parsers']} - assert items['custom_whitelists.yaml']['status'] == 'enabled,local' + items = {c["name"]: c for c in j["parsers"]} + assert items["custom_whitelists.yaml"]["status"] == "enabled,local" # regression test: the linux collection should not be tainted # (the parsers were not copied from /staging when using "cp -an" with local parsers) - res = cs.cont.exec_run('cscli collections inspect crowdsecurity/linux -o json') + res = cs.cont.exec_run("cscli collections inspect crowdsecurity/linux -o json") assert res.exit_code == 0 j = json.loads(res.output) # crowdsec <= 1.5.5 omits a "tainted" when it's false - assert j.get('tainted', False) is False + assert j.get("tainted", False) is False diff --git a/docker/test/tests/test_metrics.py b/docker/test/tests/test_metrics.py index 8a6d5318156..bd41bdcea41 100644 --- a/docker/test/tests/test_metrics.py +++ b/docker/test/tests/test_metrics.py @@ -12,12 +12,12 @@ def test_metrics_port_default(crowdsec, flavor): metrics_port = 6060 with crowdsec(flavor=flavor) as cs: cs.wait_for_log("*Starting processing data*") - cs.wait_for_http(8080, '/health', want_status=HTTPStatus.OK) - cs.wait_for_http(metrics_port, '/metrics', want_status=HTTPStatus.OK) - res = cs.cont.exec_run(f'wget -O - http://127.0.0.1:{metrics_port}/metrics') - if 'executable file not found' in res.output.decode(): + cs.wait_for_http(8080, "/health", want_status=HTTPStatus.OK) + cs.wait_for_http(metrics_port, "/metrics", want_status=HTTPStatus.OK) + res = cs.cont.exec_run(f"wget -O - http://127.0.0.1:{metrics_port}/metrics") + if "executable file not found" in res.output.decode(): # TODO: find an alternative to wget - pytest.skip('wget not found') + pytest.skip("wget not found") assert res.exit_code == 0 stdout = res.output.decode() assert "# HELP cs_info Information about Crowdsec." in stdout @@ -25,15 +25,15 @@ def test_metrics_port_default(crowdsec, flavor): def test_metrics_port_default_ipv6(crowdsec, flavor): """Test metrics (ipv6)""" - pytest.skip('ipv6 not supported yet') + pytest.skip("ipv6 not supported yet") port = 6060 with crowdsec(flavor=flavor) as cs: cs.wait_for_log("*Starting processing data*") - cs.wait_for_http(8080, '/health', want_status=HTTPStatus.OK) - res = cs.cont.exec_run(f'wget -O - http://[::1]:{port}/metrics') - if 'executable file not found' in res.output.decode(): + cs.wait_for_http(8080, "/health", want_status=HTTPStatus.OK) + res = cs.cont.exec_run(f"wget -O - http://[::1]:{port}/metrics") + if "executable file not found" in res.output.decode(): # TODO: find an alternative to wget - pytest.skip('wget not found') + pytest.skip("wget not found") assert res.exit_code == 0 stdout = res.output.decode() assert "# HELP cs_info Information about Crowdsec." in stdout @@ -42,16 +42,14 @@ def test_metrics_port_default_ipv6(crowdsec, flavor): def test_metrics_port(crowdsec, flavor): """Test metrics (custom METRICS_PORT)""" port = 7070 - env = { - "METRICS_PORT": port - } + env = {"METRICS_PORT": port} with crowdsec(flavor=flavor, environment=env) as cs: cs.wait_for_log("*Starting processing data*") - cs.wait_for_http(8080, '/health', want_status=HTTPStatus.OK) - res = cs.cont.exec_run(f'wget -O - http://127.0.0.1:{port}/metrics') - if 'executable file not found' in res.output.decode(): + cs.wait_for_http(8080, "/health", want_status=HTTPStatus.OK) + res = cs.cont.exec_run(f"wget -O - http://127.0.0.1:{port}/metrics") + if "executable file not found" in res.output.decode(): # TODO: find an alternative to wget - pytest.skip('wget not found') + pytest.skip("wget not found") assert res.exit_code == 0 stdout = res.output.decode() assert "# HELP cs_info Information about Crowdsec." in stdout @@ -59,18 +57,16 @@ def test_metrics_port(crowdsec, flavor): def test_metrics_port_ipv6(crowdsec, flavor): """Test metrics (custom METRICS_PORT, ipv6)""" - pytest.skip('ipv6 not supported yet') + pytest.skip("ipv6 not supported yet") port = 7070 - env = { - "METRICS_PORT": port - } + env = {"METRICS_PORT": port} with crowdsec(flavor=flavor, environment=env) as cs: cs.wait_for_log("*Starting processing data*") - cs.wait_for_http(8080, '/health', want_status=HTTPStatus.OK) - res = cs.cont.exec_run(f'wget -O - http://[::1]:{port}/metrics') - if 'executable file not found' in res.output.decode(): + cs.wait_for_http(8080, "/health", want_status=HTTPStatus.OK) + res = cs.cont.exec_run(f"wget -O - http://[::1]:{port}/metrics") + if "executable file not found" in res.output.decode(): # TODO: find an alternative to wget - pytest.skip('wget not found') + pytest.skip("wget not found") assert res.exit_code == 0 stdout = res.output.decode() assert "# HELP cs_info Information about Crowdsec." in stdout diff --git a/docker/test/tests/test_nolapi.py b/docker/test/tests/test_nolapi.py index 6edb354fe75..e5dbc3c2624 100644 --- a/docker/test/tests/test_nolapi.py +++ b/docker/test/tests/test_nolapi.py @@ -1,8 +1,7 @@ #!/usr/bin/env python -from pytest_cs import Status - import pytest +from pytest_cs import Status pytestmark = pytest.mark.docker @@ -10,7 +9,7 @@ def test_no_agent(crowdsec, flavor): """Test DISABLE_LOCAL_API=true (failing stand-alone container)""" env = { - 'DISABLE_LOCAL_API': 'true', + "DISABLE_LOCAL_API": "true", } # if an alternative lapi url is not defined, the container should exit diff --git a/docker/test/tests/test_simple.py b/docker/test/tests/test_simple.py index 951d8be4b24..b5c8425b371 100644 --- a/docker/test/tests/test_simple.py +++ b/docker/test/tests/test_simple.py @@ -13,4 +13,4 @@ def test_crowdsec(crowdsec, flavor): matcher.fnmatch_lines(["*Starting processing data*"]) res = cs.cont.exec_run('sh -c "echo $CI_TESTING"') assert res.exit_code == 0 - assert 'true' == res.output.decode().strip() + assert "true" == res.output.decode().strip() diff --git a/docker/test/tests/test_tls.py b/docker/test/tests/test_tls.py index d2f512fcbc1..220738a9f07 100644 --- a/docker/test/tests/test_tls.py +++ b/docker/test/tests/test_tls.py @@ -6,9 +6,8 @@ import uuid -from pytest_cs import Status - import pytest +from pytest_cs import Status pytestmark = pytest.mark.docker @@ -17,8 +16,8 @@ def test_missing_key_file(crowdsec, flavor): """Test that cscli and agent can communicate to LAPI with TLS""" env = { - 'CERT_FILE': '/etc/ssl/crowdsec/cert.pem', - 'USE_TLS': 'true', + "CERT_FILE": "/etc/ssl/crowdsec/cert.pem", + "USE_TLS": "true", } with crowdsec(flavor=flavor, environment=env, wait_status=Status.EXITED) as cs: @@ -29,8 +28,8 @@ def test_missing_cert_file(crowdsec, flavor): """Test that cscli and agent can communicate to LAPI with TLS""" env = { - 'KEY_FILE': '/etc/ssl/crowdsec/cert.key', - 'USE_TLS': 'true', + "KEY_FILE": "/etc/ssl/crowdsec/cert.key", + "USE_TLS": "true", } with crowdsec(flavor=flavor, environment=env, wait_status=Status.EXITED) as cs: @@ -41,14 +40,14 @@ def test_tls_missing_ca(crowdsec, flavor, certs_dir): """Missing CA cert, unknown authority""" env = { - 'CERT_FILE': '/etc/ssl/crowdsec/lapi.crt', - 'KEY_FILE': '/etc/ssl/crowdsec/lapi.key', - 'USE_TLS': 'true', - 'LOCAL_API_URL': 'https://localhost:8080', + "CERT_FILE": "/etc/ssl/crowdsec/lapi.crt", + "KEY_FILE": "/etc/ssl/crowdsec/lapi.key", + "USE_TLS": "true", + "LOCAL_API_URL": "https://localhost:8080", } volumes = { - certs_dir(lapi_hostname='lapi'): {'bind': '/etc/ssl/crowdsec', 'mode': 'ro'}, + certs_dir(lapi_hostname="lapi"): {"bind": "/etc/ssl/crowdsec", "mode": "ro"}, } with crowdsec(flavor=flavor, environment=env, volumes=volumes, wait_status=Status.EXITED) as cs: @@ -59,22 +58,22 @@ def test_tls_legacy_var(crowdsec, flavor, certs_dir): """Test server-only certificate, legacy variables""" env = { - 'CACERT_FILE': '/etc/ssl/crowdsec/ca.crt', - 'CERT_FILE': '/etc/ssl/crowdsec/lapi.crt', - 'KEY_FILE': '/etc/ssl/crowdsec/lapi.key', - 'USE_TLS': 'true', - 'LOCAL_API_URL': 'https://localhost:8080', + "CACERT_FILE": "/etc/ssl/crowdsec/ca.crt", + "CERT_FILE": "/etc/ssl/crowdsec/lapi.crt", + "KEY_FILE": "/etc/ssl/crowdsec/lapi.key", + "USE_TLS": "true", + "LOCAL_API_URL": "https://localhost:8080", } volumes = { - certs_dir(lapi_hostname='lapi'): {'bind': '/etc/ssl/crowdsec', 'mode': 'ro'}, + certs_dir(lapi_hostname="lapi"): {"bind": "/etc/ssl/crowdsec", "mode": "ro"}, } with crowdsec(flavor=flavor, environment=env, volumes=volumes) as cs: cs.wait_for_log("*Starting processing data*") # TODO: wait_for_https - cs.wait_for_http(8080, '/health', want_status=None) - x = cs.cont.exec_run('cscli lapi status') + cs.wait_for_http(8080, "/health", want_status=None) + x = cs.cont.exec_run("cscli lapi status") assert x.exit_code == 0 stdout = x.output.decode() assert "You can successfully interact with Local API (LAPI)" in stdout @@ -84,24 +83,24 @@ def test_tls_mutual_monolith(crowdsec, flavor, certs_dir): """Server and client certificates, on the same container""" env = { - 'CACERT_FILE': '/etc/ssl/crowdsec/ca.crt', - 'LAPI_CERT_FILE': '/etc/ssl/crowdsec/lapi.crt', - 'LAPI_KEY_FILE': '/etc/ssl/crowdsec/lapi.key', - 'CLIENT_CERT_FILE': '/etc/ssl/crowdsec/agent.crt', - 'CLIENT_KEY_FILE': '/etc/ssl/crowdsec/agent.key', - 'USE_TLS': 'true', - 'LOCAL_API_URL': 'https://localhost:8080', + "CACERT_FILE": "/etc/ssl/crowdsec/ca.crt", + "LAPI_CERT_FILE": "/etc/ssl/crowdsec/lapi.crt", + "LAPI_KEY_FILE": "/etc/ssl/crowdsec/lapi.key", + "CLIENT_CERT_FILE": "/etc/ssl/crowdsec/agent.crt", + "CLIENT_KEY_FILE": "/etc/ssl/crowdsec/agent.key", + "USE_TLS": "true", + "LOCAL_API_URL": "https://localhost:8080", } volumes = { - certs_dir(lapi_hostname='lapi'): {'bind': '/etc/ssl/crowdsec', 'mode': 'ro'}, + certs_dir(lapi_hostname="lapi"): {"bind": "/etc/ssl/crowdsec", "mode": "ro"}, } with crowdsec(flavor=flavor, environment=env, volumes=volumes) as cs: cs.wait_for_log("*Starting processing data*") # TODO: wait_for_https - cs.wait_for_http(8080, '/health', want_status=None) - x = cs.cont.exec_run('cscli lapi status') + cs.wait_for_http(8080, "/health", want_status=None) + x = cs.cont.exec_run("cscli lapi status") assert x.exit_code == 0 stdout = x.output.decode() assert "You can successfully interact with Local API (LAPI)" in stdout @@ -111,26 +110,27 @@ def test_tls_lapi_var(crowdsec, flavor, certs_dir): """Test server-only certificate, lapi variables""" env = { - 'CACERT_FILE': '/etc/ssl/crowdsec/ca.crt', - 'LAPI_CERT_FILE': '/etc/ssl/crowdsec/lapi.crt', - 'LAPI_KEY_FILE': '/etc/ssl/crowdsec/lapi.key', - 'USE_TLS': 'true', - 'LOCAL_API_URL': 'https://localhost:8080', + "CACERT_FILE": "/etc/ssl/crowdsec/ca.crt", + "LAPI_CERT_FILE": "/etc/ssl/crowdsec/lapi.crt", + "LAPI_KEY_FILE": "/etc/ssl/crowdsec/lapi.key", + "USE_TLS": "true", + "LOCAL_API_URL": "https://localhost:8080", } volumes = { - certs_dir(lapi_hostname='lapi'): {'bind': '/etc/ssl/crowdsec', 'mode': 'ro'}, + certs_dir(lapi_hostname="lapi"): {"bind": "/etc/ssl/crowdsec", "mode": "ro"}, } with crowdsec(flavor=flavor, environment=env, volumes=volumes) as cs: cs.wait_for_log("*Starting processing data*") # TODO: wait_for_https - cs.wait_for_http(8080, '/health', want_status=None) - x = cs.cont.exec_run('cscli lapi status') + cs.wait_for_http(8080, "/health", want_status=None) + x = cs.cont.exec_run("cscli lapi status") assert x.exit_code == 0 stdout = x.output.decode() assert "You can successfully interact with Local API (LAPI)" in stdout + # TODO: bad lapi hostname # the cert is valid, but has a CN that doesn't match the hostname # we must set insecure_skip_verify to true to use it @@ -140,50 +140,49 @@ def test_tls_split_lapi_agent(crowdsec, flavor, certs_dir): """Server-only certificate, split containers""" rand = uuid.uuid1() - lapiname = 'lapi-' + str(rand) - agentname = 'agent-' + str(rand) + lapiname = "lapi-" + str(rand) + agentname = "agent-" + str(rand) lapi_env = { - 'USE_TLS': 'true', - 'CACERT_FILE': '/etc/ssl/crowdsec/ca.crt', - 'LAPI_CERT_FILE': '/etc/ssl/crowdsec/lapi.crt', - 'LAPI_KEY_FILE': '/etc/ssl/crowdsec/lapi.key', - 'AGENT_USERNAME': 'testagent', - 'AGENT_PASSWORD': 'testpassword', - 'LOCAL_API_URL': 'https://localhost:8080', + "USE_TLS": "true", + "CACERT_FILE": "/etc/ssl/crowdsec/ca.crt", + "LAPI_CERT_FILE": "/etc/ssl/crowdsec/lapi.crt", + "LAPI_KEY_FILE": "/etc/ssl/crowdsec/lapi.key", + "AGENT_USERNAME": "testagent", + "AGENT_PASSWORD": "testpassword", + "LOCAL_API_URL": "https://localhost:8080", } agent_env = { - 'USE_TLS': 'true', - 'CACERT_FILE': '/etc/ssl/crowdsec/ca.crt', - 'AGENT_USERNAME': 'testagent', - 'AGENT_PASSWORD': 'testpassword', - 'LOCAL_API_URL': f'https://{lapiname}:8080', - 'DISABLE_LOCAL_API': 'true', - 'CROWDSEC_FEATURE_DISABLE_HTTP_RETRY_BACKOFF': 'false', + "USE_TLS": "true", + "CACERT_FILE": "/etc/ssl/crowdsec/ca.crt", + "AGENT_USERNAME": "testagent", + "AGENT_PASSWORD": "testpassword", + "LOCAL_API_URL": f"https://{lapiname}:8080", + "DISABLE_LOCAL_API": "true", + "CROWDSEC_FEATURE_DISABLE_HTTP_RETRY_BACKOFF": "false", } volumes = { - certs_dir(lapi_hostname=lapiname): {'bind': '/etc/ssl/crowdsec', 'mode': 'ro'}, + certs_dir(lapi_hostname=lapiname): {"bind": "/etc/ssl/crowdsec", "mode": "ro"}, } cs_lapi = crowdsec(flavor=flavor, name=lapiname, environment=lapi_env, volumes=volumes) cs_agent = crowdsec(flavor=flavor, name=agentname, environment=agent_env, volumes=volumes) with cs_lapi as lapi: - lapi.wait_for_log([ - "*(tls) Client Auth Type set to VerifyClientCertIfGiven*", - "*CrowdSec Local API listening on *:8080*" - ]) + lapi.wait_for_log( + ["*(tls) Client Auth Type set to VerifyClientCertIfGiven*", "*CrowdSec Local API listening on *:8080*"] + ) # TODO: wait_for_https - lapi.wait_for_http(8080, '/health', want_status=None) + lapi.wait_for_http(8080, "/health", want_status=None) with cs_agent as agent: agent.wait_for_log("*Starting processing data*") - res = agent.cont.exec_run('cscli lapi status') + res = agent.cont.exec_run("cscli lapi status") assert res.exit_code == 0 stdout = res.output.decode() assert "You can successfully interact with Local API (LAPI)" in stdout - res = lapi.cont.exec_run('cscli lapi status') + res = lapi.cont.exec_run("cscli lapi status") assert res.exit_code == 0 stdout = res.output.decode() assert "You can successfully interact with Local API (LAPI)" in stdout @@ -193,48 +192,47 @@ def test_tls_mutual_split_lapi_agent(crowdsec, flavor, certs_dir): """Server and client certificates, split containers""" rand = uuid.uuid1() - lapiname = 'lapi-' + str(rand) - agentname = 'agent-' + str(rand) + lapiname = "lapi-" + str(rand) + agentname = "agent-" + str(rand) lapi_env = { - 'USE_TLS': 'true', - 'CACERT_FILE': '/etc/ssl/crowdsec/ca.crt', - 'LAPI_CERT_FILE': '/etc/ssl/crowdsec/lapi.crt', - 'LAPI_KEY_FILE': '/etc/ssl/crowdsec/lapi.key', - 'LOCAL_API_URL': 'https://localhost:8080', + "USE_TLS": "true", + "CACERT_FILE": "/etc/ssl/crowdsec/ca.crt", + "LAPI_CERT_FILE": "/etc/ssl/crowdsec/lapi.crt", + "LAPI_KEY_FILE": "/etc/ssl/crowdsec/lapi.key", + "LOCAL_API_URL": "https://localhost:8080", } agent_env = { - 'USE_TLS': 'true', - 'CACERT_FILE': '/etc/ssl/crowdsec/ca.crt', - 'CLIENT_CERT_FILE': '/etc/ssl/crowdsec/agent.crt', - 'CLIENT_KEY_FILE': '/etc/ssl/crowdsec/agent.key', - 'LOCAL_API_URL': f'https://{lapiname}:8080', - 'DISABLE_LOCAL_API': 'true', - 'CROWDSEC_FEATURE_DISABLE_HTTP_RETRY_BACKOFF': 'false', + "USE_TLS": "true", + "CACERT_FILE": "/etc/ssl/crowdsec/ca.crt", + "CLIENT_CERT_FILE": "/etc/ssl/crowdsec/agent.crt", + "CLIENT_KEY_FILE": "/etc/ssl/crowdsec/agent.key", + "LOCAL_API_URL": f"https://{lapiname}:8080", + "DISABLE_LOCAL_API": "true", + "CROWDSEC_FEATURE_DISABLE_HTTP_RETRY_BACKOFF": "false", } volumes = { - certs_dir(lapi_hostname=lapiname): {'bind': '/etc/ssl/crowdsec', 'mode': 'ro'}, + certs_dir(lapi_hostname=lapiname): {"bind": "/etc/ssl/crowdsec", "mode": "ro"}, } cs_lapi = crowdsec(flavor=flavor, name=lapiname, environment=lapi_env, volumes=volumes) cs_agent = crowdsec(flavor=flavor, name=agentname, environment=agent_env, volumes=volumes) with cs_lapi as lapi: - lapi.wait_for_log([ - "*(tls) Client Auth Type set to VerifyClientCertIfGiven*", - "*CrowdSec Local API listening on *:8080*" - ]) + lapi.wait_for_log( + ["*(tls) Client Auth Type set to VerifyClientCertIfGiven*", "*CrowdSec Local API listening on *:8080*"] + ) # TODO: wait_for_https - lapi.wait_for_http(8080, '/health', want_status=None) + lapi.wait_for_http(8080, "/health", want_status=None) with cs_agent as agent: agent.wait_for_log("*Starting processing data*") - res = agent.cont.exec_run('cscli lapi status') + res = agent.cont.exec_run("cscli lapi status") assert res.exit_code == 0 stdout = res.output.decode() assert "You can successfully interact with Local API (LAPI)" in stdout - res = lapi.cont.exec_run('cscli lapi status') + res = lapi.cont.exec_run("cscli lapi status") assert res.exit_code == 0 stdout = res.output.decode() assert "You can successfully interact with Local API (LAPI)" in stdout @@ -244,78 +242,78 @@ def test_tls_client_ou(crowdsec, flavor, certs_dir): """Check behavior of client certificate vs AGENTS_ALLOWED_OU""" rand = uuid.uuid1() - lapiname = 'lapi-' + str(rand) - agentname = 'agent-' + str(rand) + lapiname = "lapi-" + str(rand) + agentname = "agent-" + str(rand) lapi_env = { - 'USE_TLS': 'true', - 'CACERT_FILE': '/etc/ssl/crowdsec/ca.crt', - 'LAPI_CERT_FILE': '/etc/ssl/crowdsec/lapi.crt', - 'LAPI_KEY_FILE': '/etc/ssl/crowdsec/lapi.key', - 'LOCAL_API_URL': 'https://localhost:8080', + "USE_TLS": "true", + "CACERT_FILE": "/etc/ssl/crowdsec/ca.crt", + "LAPI_CERT_FILE": "/etc/ssl/crowdsec/lapi.crt", + "LAPI_KEY_FILE": "/etc/ssl/crowdsec/lapi.key", + "LOCAL_API_URL": "https://localhost:8080", } agent_env = { - 'USE_TLS': 'true', - 'CACERT_FILE': '/etc/ssl/crowdsec/ca.crt', - 'CLIENT_CERT_FILE': '/etc/ssl/crowdsec/agent.crt', - 'CLIENT_KEY_FILE': '/etc/ssl/crowdsec/agent.key', - 'LOCAL_API_URL': f'https://{lapiname}:8080', - 'DISABLE_LOCAL_API': 'true', - 'CROWDSEC_FEATURE_DISABLE_HTTP_RETRY_BACKOFF': 'false', + "USE_TLS": "true", + "CACERT_FILE": "/etc/ssl/crowdsec/ca.crt", + "CLIENT_CERT_FILE": "/etc/ssl/crowdsec/agent.crt", + "CLIENT_KEY_FILE": "/etc/ssl/crowdsec/agent.key", + "LOCAL_API_URL": f"https://{lapiname}:8080", + "DISABLE_LOCAL_API": "true", + "CROWDSEC_FEATURE_DISABLE_HTTP_RETRY_BACKOFF": "false", } volumes = { - certs_dir(lapi_hostname=lapiname, agent_ou='custom-client-ou'): {'bind': '/etc/ssl/crowdsec', 'mode': 'ro'}, + certs_dir(lapi_hostname=lapiname, agent_ou="custom-client-ou"): {"bind": "/etc/ssl/crowdsec", "mode": "ro"}, } cs_lapi = crowdsec(flavor=flavor, name=lapiname, environment=lapi_env, volumes=volumes) cs_agent = crowdsec(flavor=flavor, name=agentname, environment=agent_env, volumes=volumes) with cs_lapi as lapi: - lapi.wait_for_log([ - "*(tls) Client Auth Type set to VerifyClientCertIfGiven*", - "*CrowdSec Local API listening on *:8080*" - ]) + lapi.wait_for_log( + ["*(tls) Client Auth Type set to VerifyClientCertIfGiven*", "*CrowdSec Local API listening on *:8080*"] + ) # TODO: wait_for_https - lapi.wait_for_http(8080, '/health', want_status=None) + lapi.wait_for_http(8080, "/health", want_status=None) with cs_agent as agent: - lapi.wait_for_log([ - "*client certificate OU ?custom-client-ou? doesn't match expected OU ?agent-ou?*", - ]) + lapi.wait_for_log( + [ + "*client certificate OU ?custom-client-ou? doesn't match expected OU ?agent-ou?*", + ] + ) - lapi_env['AGENTS_ALLOWED_OU'] = 'custom-client-ou' + lapi_env["AGENTS_ALLOWED_OU"] = "custom-client-ou" # change container names to avoid conflict # recreate certificates because they need the new hostname rand = uuid.uuid1() - lapiname = 'lapi-' + str(rand) - agentname = 'agent-' + str(rand) + lapiname = "lapi-" + str(rand) + agentname = "agent-" + str(rand) - agent_env['LOCAL_API_URL'] = f'https://{lapiname}:8080' + agent_env["LOCAL_API_URL"] = f"https://{lapiname}:8080" volumes = { - certs_dir(lapi_hostname=lapiname, agent_ou='custom-client-ou'): {'bind': '/etc/ssl/crowdsec', 'mode': 'ro'}, + certs_dir(lapi_hostname=lapiname, agent_ou="custom-client-ou"): {"bind": "/etc/ssl/crowdsec", "mode": "ro"}, } cs_lapi = crowdsec(flavor=flavor, name=lapiname, environment=lapi_env, volumes=volumes) cs_agent = crowdsec(flavor=flavor, name=agentname, environment=agent_env, volumes=volumes) with cs_lapi as lapi: - lapi.wait_for_log([ - "*(tls) Client Auth Type set to VerifyClientCertIfGiven*", - "*CrowdSec Local API listening on *:8080*" - ]) + lapi.wait_for_log( + ["*(tls) Client Auth Type set to VerifyClientCertIfGiven*", "*CrowdSec Local API listening on *:8080*"] + ) # TODO: wait_for_https - lapi.wait_for_http(8080, '/health', want_status=None) + lapi.wait_for_http(8080, "/health", want_status=None) with cs_agent as agent: agent.wait_for_log("*Starting processing data*") - res = agent.cont.exec_run('cscli lapi status') + res = agent.cont.exec_run("cscli lapi status") assert res.exit_code == 0 stdout = res.output.decode() assert "You can successfully interact with Local API (LAPI)" in stdout - res = lapi.cont.exec_run('cscli lapi status') + res = lapi.cont.exec_run("cscli lapi status") assert res.exit_code == 0 stdout = res.output.decode() assert "You can successfully interact with Local API (LAPI)" in stdout diff --git a/docker/test/tests/test_version.py b/docker/test/tests/test_version.py index c152d2e4e6c..baac61c36ab 100644 --- a/docker/test/tests/test_version.py +++ b/docker/test/tests/test_version.py @@ -10,9 +10,9 @@ def test_version_docker_platform(crowdsec, flavor): for waiter in cs.log_waiters(): with waiter as matcher: matcher.fnmatch_lines(["*Starting processing data*"]) - res = cs.cont.exec_run('cscli version') + res = cs.cont.exec_run("cscli version") assert res.exit_code == 0 - assert 'Platform: docker' in res.output.decode() - res = cs.cont.exec_run('crowdsec -version') + assert "Platform: docker" in res.output.decode() + res = cs.cont.exec_run("crowdsec -version") assert res.exit_code == 0 - assert 'Platform: docker' in res.output.decode() + assert "Platform: docker" in res.output.decode() diff --git a/docker/test/tests/test_wal.py b/docker/test/tests/test_wal.py index e3edbcaf385..e1fe3d260be 100644 --- a/docker/test/tests/test_wal.py +++ b/docker/test/tests/test_wal.py @@ -11,8 +11,8 @@ def test_use_wal_default(crowdsec, flavor): """Test USE_WAL default""" with crowdsec(flavor=flavor) as cs: cs.wait_for_log("*Starting processing data*") - cs.wait_for_http(8080, '/health', want_status=HTTPStatus.OK) - res = cs.cont.exec_run('cscli config show --key Config.DbConfig.UseWal -o json') + cs.wait_for_http(8080, "/health", want_status=HTTPStatus.OK) + res = cs.cont.exec_run("cscli config show --key Config.DbConfig.UseWal -o json") assert res.exit_code == 0 stdout = res.output.decode() assert "false" in stdout @@ -21,12 +21,12 @@ def test_use_wal_default(crowdsec, flavor): def test_use_wal_true(crowdsec, flavor): """Test USE_WAL=true""" env = { - 'USE_WAL': 'true', + "USE_WAL": "true", } with crowdsec(flavor=flavor, environment=env) as cs: cs.wait_for_log("*Starting processing data*") - cs.wait_for_http(8080, '/health', want_status=HTTPStatus.OK) - res = cs.cont.exec_run('cscli config show --key Config.DbConfig.UseWal -o json') + cs.wait_for_http(8080, "/health", want_status=HTTPStatus.OK) + res = cs.cont.exec_run("cscli config show --key Config.DbConfig.UseWal -o json") assert res.exit_code == 0 stdout = res.output.decode() assert "true" in stdout @@ -35,12 +35,12 @@ def test_use_wal_true(crowdsec, flavor): def test_use_wal_false(crowdsec, flavor): """Test USE_WAL=false""" env = { - 'USE_WAL': 'false', + "USE_WAL": "false", } with crowdsec(flavor=flavor, environment=env) as cs: cs.wait_for_log("*Starting processing data*") - cs.wait_for_http(8080, '/health', want_status=HTTPStatus.OK) - res = cs.cont.exec_run('cscli config show --key Config.DbConfig.UseWal -o json') + cs.wait_for_http(8080, "/health", want_status=HTTPStatus.OK) + res = cs.cont.exec_run("cscli config show --key Config.DbConfig.UseWal -o json") assert res.exit_code == 0 stdout = res.output.decode() assert "false" in stdout diff --git a/docker/test/uv.lock b/docker/test/uv.lock new file mode 100644 index 00000000000..d8cc42c89ab --- /dev/null +++ b/docker/test/uv.lock @@ -0,0 +1,587 @@ +version = 1 +requires-python = ">=3.12" + +[[package]] +name = "asttokens" +version = "3.0.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/4a/e7/82da0a03e7ba5141f05cce0d302e6eed121ae055e0456ca228bf693984bc/asttokens-3.0.0.tar.gz", hash = "sha256:0dcd8baa8d62b0c1d118b399b2ddba3c4aff271d0d7a9e0d4c1681c79035bbc7", size = 61978 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/25/8a/c46dcc25341b5bce5472c718902eb3d38600a903b14fa6aeecef3f21a46f/asttokens-3.0.0-py3-none-any.whl", hash = "sha256:e3078351a059199dd5138cb1c706e6430c05eff2ff136af5eb4790f9d28932e2", size = 26918 }, +] + +[[package]] +name = "certifi" +version = "2024.12.14" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/0f/bd/1d41ee578ce09523c81a15426705dd20969f5abf006d1afe8aeff0dd776a/certifi-2024.12.14.tar.gz", hash = "sha256:b650d30f370c2b724812bee08008be0c4163b163ddaec3f2546c1caf65f191db", size = 166010 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/a5/32/8f6669fc4798494966bf446c8c4a162e0b5d893dff088afddf76414f70e1/certifi-2024.12.14-py3-none-any.whl", hash = "sha256:1275f7a45be9464efc1173084eaa30f866fe2e47d389406136d332ed4967ec56", size = 164927 }, +] + +[[package]] +name = "cffi" +version = "1.17.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "pycparser" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/fc/97/c783634659c2920c3fc70419e3af40972dbaf758daa229a7d6ea6135c90d/cffi-1.17.1.tar.gz", hash = "sha256:1c39c6016c32bc48dd54561950ebd6836e1670f2ae46128f67cf49e789c52824", size = 516621 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/5a/84/e94227139ee5fb4d600a7a4927f322e1d4aea6fdc50bd3fca8493caba23f/cffi-1.17.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:805b4371bf7197c329fcb3ead37e710d1bca9da5d583f5073b799d5c5bd1eee4", size = 183178 }, + { url = "https://files.pythonhosted.org/packages/da/ee/fb72c2b48656111c4ef27f0f91da355e130a923473bf5ee75c5643d00cca/cffi-1.17.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:733e99bc2df47476e3848417c5a4540522f234dfd4ef3ab7fafdf555b082ec0c", size = 178840 }, + { url = "https://files.pythonhosted.org/packages/cc/b6/db007700f67d151abadf508cbfd6a1884f57eab90b1bb985c4c8c02b0f28/cffi-1.17.1-cp312-cp312-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1257bdabf294dceb59f5e70c64a3e2f462c30c7ad68092d01bbbfb1c16b1ba36", size = 454803 }, + { url = "https://files.pythonhosted.org/packages/1a/df/f8d151540d8c200eb1c6fba8cd0dfd40904f1b0682ea705c36e6c2e97ab3/cffi-1.17.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:da95af8214998d77a98cc14e3a3bd00aa191526343078b530ceb0bd710fb48a5", size = 478850 }, + { url = "https://files.pythonhosted.org/packages/28/c0/b31116332a547fd2677ae5b78a2ef662dfc8023d67f41b2a83f7c2aa78b1/cffi-1.17.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d63afe322132c194cf832bfec0dc69a99fb9bb6bbd550f161a49e9e855cc78ff", size = 485729 }, + { url = "https://files.pythonhosted.org/packages/91/2b/9a1ddfa5c7f13cab007a2c9cc295b70fbbda7cb10a286aa6810338e60ea1/cffi-1.17.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f79fc4fc25f1c8698ff97788206bb3c2598949bfe0fef03d299eb1b5356ada99", size = 471256 }, + { url = "https://files.pythonhosted.org/packages/b2/d5/da47df7004cb17e4955df6a43d14b3b4ae77737dff8bf7f8f333196717bf/cffi-1.17.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b62ce867176a75d03a665bad002af8e6d54644fad99a3c70905c543130e39d93", size = 479424 }, + { url = "https://files.pythonhosted.org/packages/0b/ac/2a28bcf513e93a219c8a4e8e125534f4f6db03e3179ba1c45e949b76212c/cffi-1.17.1-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:386c8bf53c502fff58903061338ce4f4950cbdcb23e2902d86c0f722b786bbe3", size = 484568 }, + { url = "https://files.pythonhosted.org/packages/d4/38/ca8a4f639065f14ae0f1d9751e70447a261f1a30fa7547a828ae08142465/cffi-1.17.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:4ceb10419a9adf4460ea14cfd6bc43d08701f0835e979bf821052f1805850fe8", size = 488736 }, + { url = "https://files.pythonhosted.org/packages/86/c5/28b2d6f799ec0bdecf44dced2ec5ed43e0eb63097b0f58c293583b406582/cffi-1.17.1-cp312-cp312-win32.whl", hash = "sha256:a08d7e755f8ed21095a310a693525137cfe756ce62d066e53f502a83dc550f65", size = 172448 }, + { url = "https://files.pythonhosted.org/packages/50/b9/db34c4755a7bd1cb2d1603ac3863f22bcecbd1ba29e5ee841a4bc510b294/cffi-1.17.1-cp312-cp312-win_amd64.whl", hash = "sha256:51392eae71afec0d0c8fb1a53b204dbb3bcabcb3c9b807eedf3e1e6ccf2de903", size = 181976 }, + { url = "https://files.pythonhosted.org/packages/8d/f8/dd6c246b148639254dad4d6803eb6a54e8c85c6e11ec9df2cffa87571dbe/cffi-1.17.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:f3a2b4222ce6b60e2e8b337bb9596923045681d71e5a082783484d845390938e", size = 182989 }, + { url = "https://files.pythonhosted.org/packages/8b/f1/672d303ddf17c24fc83afd712316fda78dc6fce1cd53011b839483e1ecc8/cffi-1.17.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:0984a4925a435b1da406122d4d7968dd861c1385afe3b45ba82b750f229811e2", size = 178802 }, + { url = "https://files.pythonhosted.org/packages/0e/2d/eab2e858a91fdff70533cab61dcff4a1f55ec60425832ddfdc9cd36bc8af/cffi-1.17.1-cp313-cp313-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d01b12eeeb4427d3110de311e1774046ad344f5b1a7403101878976ecd7a10f3", size = 454792 }, + { url = "https://files.pythonhosted.org/packages/75/b2/fbaec7c4455c604e29388d55599b99ebcc250a60050610fadde58932b7ee/cffi-1.17.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:706510fe141c86a69c8ddc029c7910003a17353970cff3b904ff0686a5927683", size = 478893 }, + { url = "https://files.pythonhosted.org/packages/4f/b7/6e4a2162178bf1935c336d4da8a9352cccab4d3a5d7914065490f08c0690/cffi-1.17.1-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:de55b766c7aa2e2a3092c51e0483d700341182f08e67c63630d5b6f200bb28e5", size = 485810 }, + { url = "https://files.pythonhosted.org/packages/c7/8a/1d0e4a9c26e54746dc08c2c6c037889124d4f59dffd853a659fa545f1b40/cffi-1.17.1-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c59d6e989d07460165cc5ad3c61f9fd8f1b4796eacbd81cee78957842b834af4", size = 471200 }, + { url = "https://files.pythonhosted.org/packages/26/9f/1aab65a6c0db35f43c4d1b4f580e8df53914310afc10ae0397d29d697af4/cffi-1.17.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dd398dbc6773384a17fe0d3e7eeb8d1a21c2200473ee6806bb5e6a8e62bb73dd", size = 479447 }, + { url = "https://files.pythonhosted.org/packages/5f/e4/fb8b3dd8dc0e98edf1135ff067ae070bb32ef9d509d6cb0f538cd6f7483f/cffi-1.17.1-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:3edc8d958eb099c634dace3c7e16560ae474aa3803a5df240542b305d14e14ed", size = 484358 }, + { url = "https://files.pythonhosted.org/packages/f1/47/d7145bf2dc04684935d57d67dff9d6d795b2ba2796806bb109864be3a151/cffi-1.17.1-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:72e72408cad3d5419375fc87d289076ee319835bdfa2caad331e377589aebba9", size = 488469 }, + { url = "https://files.pythonhosted.org/packages/bf/ee/f94057fa6426481d663b88637a9a10e859e492c73d0384514a17d78ee205/cffi-1.17.1-cp313-cp313-win32.whl", hash = "sha256:e03eab0a8677fa80d646b5ddece1cbeaf556c313dcfac435ba11f107ba117b5d", size = 172475 }, + { url = "https://files.pythonhosted.org/packages/7c/fc/6a8cb64e5f0324877d503c854da15d76c1e50eb722e320b15345c4d0c6de/cffi-1.17.1-cp313-cp313-win_amd64.whl", hash = "sha256:f6a16c31041f09ead72d69f583767292f750d24913dadacf5756b966aacb3f1a", size = 182009 }, +] + +[[package]] +name = "charset-normalizer" +version = "3.4.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/16/b0/572805e227f01586461c80e0fd25d65a2115599cc9dad142fee4b747c357/charset_normalizer-3.4.1.tar.gz", hash = "sha256:44251f18cd68a75b56585dd00dae26183e102cd5e0f9f1466e6df5da2ed64ea3", size = 123188 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/0a/9a/dd1e1cdceb841925b7798369a09279bd1cf183cef0f9ddf15a3a6502ee45/charset_normalizer-3.4.1-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:73d94b58ec7fecbc7366247d3b0b10a21681004153238750bb67bd9012414545", size = 196105 }, + { url = "https://files.pythonhosted.org/packages/d3/8c/90bfabf8c4809ecb648f39794cf2a84ff2e7d2a6cf159fe68d9a26160467/charset_normalizer-3.4.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:dad3e487649f498dd991eeb901125411559b22e8d7ab25d3aeb1af367df5efd7", size = 140404 }, + { url = "https://files.pythonhosted.org/packages/ad/8f/e410d57c721945ea3b4f1a04b74f70ce8fa800d393d72899f0a40526401f/charset_normalizer-3.4.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c30197aa96e8eed02200a83fba2657b4c3acd0f0aa4bdc9f6c1af8e8962e0757", size = 150423 }, + { url = "https://files.pythonhosted.org/packages/f0/b8/e6825e25deb691ff98cf5c9072ee0605dc2acfca98af70c2d1b1bc75190d/charset_normalizer-3.4.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2369eea1ee4a7610a860d88f268eb39b95cb588acd7235e02fd5a5601773d4fa", size = 143184 }, + { url = "https://files.pythonhosted.org/packages/3e/a2/513f6cbe752421f16d969e32f3583762bfd583848b763913ddab8d9bfd4f/charset_normalizer-3.4.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bc2722592d8998c870fa4e290c2eec2c1569b87fe58618e67d38b4665dfa680d", size = 145268 }, + { url = "https://files.pythonhosted.org/packages/74/94/8a5277664f27c3c438546f3eb53b33f5b19568eb7424736bdc440a88a31f/charset_normalizer-3.4.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ffc9202a29ab3920fa812879e95a9e78b2465fd10be7fcbd042899695d75e616", size = 147601 }, + { url = "https://files.pythonhosted.org/packages/7c/5f/6d352c51ee763623a98e31194823518e09bfa48be2a7e8383cf691bbb3d0/charset_normalizer-3.4.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:804a4d582ba6e5b747c625bf1255e6b1507465494a40a2130978bda7b932c90b", size = 141098 }, + { url = "https://files.pythonhosted.org/packages/78/d4/f5704cb629ba5ab16d1d3d741396aec6dc3ca2b67757c45b0599bb010478/charset_normalizer-3.4.1-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:0f55e69f030f7163dffe9fd0752b32f070566451afe180f99dbeeb81f511ad8d", size = 149520 }, + { url = "https://files.pythonhosted.org/packages/c5/96/64120b1d02b81785f222b976c0fb79a35875457fa9bb40827678e54d1bc8/charset_normalizer-3.4.1-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:c4c3e6da02df6fa1410a7680bd3f63d4f710232d3139089536310d027950696a", size = 152852 }, + { url = "https://files.pythonhosted.org/packages/84/c9/98e3732278a99f47d487fd3468bc60b882920cef29d1fa6ca460a1fdf4e6/charset_normalizer-3.4.1-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:5df196eb874dae23dcfb968c83d4f8fdccb333330fe1fc278ac5ceeb101003a9", size = 150488 }, + { url = "https://files.pythonhosted.org/packages/13/0e/9c8d4cb99c98c1007cc11eda969ebfe837bbbd0acdb4736d228ccaabcd22/charset_normalizer-3.4.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:e358e64305fe12299a08e08978f51fc21fac060dcfcddd95453eabe5b93ed0e1", size = 146192 }, + { url = "https://files.pythonhosted.org/packages/b2/21/2b6b5b860781a0b49427309cb8670785aa543fb2178de875b87b9cc97746/charset_normalizer-3.4.1-cp312-cp312-win32.whl", hash = "sha256:9b23ca7ef998bc739bf6ffc077c2116917eabcc901f88da1b9856b210ef63f35", size = 95550 }, + { url = "https://files.pythonhosted.org/packages/21/5b/1b390b03b1d16c7e382b561c5329f83cc06623916aab983e8ab9239c7d5c/charset_normalizer-3.4.1-cp312-cp312-win_amd64.whl", hash = "sha256:6ff8a4a60c227ad87030d76e99cd1698345d4491638dfa6673027c48b3cd395f", size = 102785 }, + { url = "https://files.pythonhosted.org/packages/38/94/ce8e6f63d18049672c76d07d119304e1e2d7c6098f0841b51c666e9f44a0/charset_normalizer-3.4.1-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:aabfa34badd18f1da5ec1bc2715cadc8dca465868a4e73a0173466b688f29dda", size = 195698 }, + { url = "https://files.pythonhosted.org/packages/24/2e/dfdd9770664aae179a96561cc6952ff08f9a8cd09a908f259a9dfa063568/charset_normalizer-3.4.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:22e14b5d70560b8dd51ec22863f370d1e595ac3d024cb8ad7d308b4cd95f8313", size = 140162 }, + { url = "https://files.pythonhosted.org/packages/24/4e/f646b9093cff8fc86f2d60af2de4dc17c759de9d554f130b140ea4738ca6/charset_normalizer-3.4.1-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8436c508b408b82d87dc5f62496973a1805cd46727c34440b0d29d8a2f50a6c9", size = 150263 }, + { url = "https://files.pythonhosted.org/packages/5e/67/2937f8d548c3ef6e2f9aab0f6e21001056f692d43282b165e7c56023e6dd/charset_normalizer-3.4.1-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2d074908e1aecee37a7635990b2c6d504cd4766c7bc9fc86d63f9c09af3fa11b", size = 142966 }, + { url = "https://files.pythonhosted.org/packages/52/ed/b7f4f07de100bdb95c1756d3a4d17b90c1a3c53715c1a476f8738058e0fa/charset_normalizer-3.4.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:955f8851919303c92343d2f66165294848d57e9bba6cf6e3625485a70a038d11", size = 144992 }, + { url = "https://files.pythonhosted.org/packages/96/2c/d49710a6dbcd3776265f4c923bb73ebe83933dfbaa841c5da850fe0fd20b/charset_normalizer-3.4.1-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:44ecbf16649486d4aebafeaa7ec4c9fed8b88101f4dd612dcaf65d5e815f837f", size = 147162 }, + { url = "https://files.pythonhosted.org/packages/b4/41/35ff1f9a6bd380303dea55e44c4933b4cc3c4850988927d4082ada230273/charset_normalizer-3.4.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:0924e81d3d5e70f8126529951dac65c1010cdf117bb75eb02dd12339b57749dd", size = 140972 }, + { url = "https://files.pythonhosted.org/packages/fb/43/c6a0b685fe6910d08ba971f62cd9c3e862a85770395ba5d9cad4fede33ab/charset_normalizer-3.4.1-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:2967f74ad52c3b98de4c3b32e1a44e32975e008a9cd2a8cc8966d6a5218c5cb2", size = 149095 }, + { url = "https://files.pythonhosted.org/packages/4c/ff/a9a504662452e2d2878512115638966e75633519ec11f25fca3d2049a94a/charset_normalizer-3.4.1-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:c75cb2a3e389853835e84a2d8fb2b81a10645b503eca9bcb98df6b5a43eb8886", size = 152668 }, + { url = "https://files.pythonhosted.org/packages/6c/71/189996b6d9a4b932564701628af5cee6716733e9165af1d5e1b285c530ed/charset_normalizer-3.4.1-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:09b26ae6b1abf0d27570633b2b078a2a20419c99d66fb2823173d73f188ce601", size = 150073 }, + { url = "https://files.pythonhosted.org/packages/e4/93/946a86ce20790e11312c87c75ba68d5f6ad2208cfb52b2d6a2c32840d922/charset_normalizer-3.4.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:fa88b843d6e211393a37219e6a1c1df99d35e8fd90446f1118f4216e307e48cd", size = 145732 }, + { url = "https://files.pythonhosted.org/packages/cd/e5/131d2fb1b0dddafc37be4f3a2fa79aa4c037368be9423061dccadfd90091/charset_normalizer-3.4.1-cp313-cp313-win32.whl", hash = "sha256:eb8178fe3dba6450a3e024e95ac49ed3400e506fd4e9e5c32d30adda88cbd407", size = 95391 }, + { url = "https://files.pythonhosted.org/packages/27/f2/4f9a69cc7712b9b5ad8fdb87039fd89abba997ad5cbe690d1835d40405b0/charset_normalizer-3.4.1-cp313-cp313-win_amd64.whl", hash = "sha256:b1ac5992a838106edb89654e0aebfc24f5848ae2547d22c2c3f66454daa11971", size = 102702 }, + { url = "https://files.pythonhosted.org/packages/0e/f6/65ecc6878a89bb1c23a086ea335ad4bf21a588990c3f535a227b9eea9108/charset_normalizer-3.4.1-py3-none-any.whl", hash = "sha256:d98b1668f06378c6dbefec3b92299716b931cd4e6061f3c875a71ced1780ab85", size = 49767 }, +] + +[[package]] +name = "colorama" +version = "0.4.6" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/d8/53/6f443c9a4a8358a93a6792e2acffb9d9d5cb0a5cfd8802644b7b1c9a02e4/colorama-0.4.6.tar.gz", hash = "sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44", size = 27697 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/d1/d6/3965ed04c63042e047cb6a3e6ed1a63a35087b6a609aa3a15ed8ac56c221/colorama-0.4.6-py2.py3-none-any.whl", hash = "sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6", size = 25335 }, +] + +[[package]] +name = "crowdsec-docker-tests" +version = "0.1.0" +source = { virtual = "." } +dependencies = [ + { name = "pytest" }, + { name = "pytest-cs" }, + { name = "pytest-dotenv" }, + { name = "pytest-xdist" }, +] + +[package.dev-dependencies] +dev = [ + { name = "ipdb" }, + { name = "ruff" }, +] + +[package.metadata] +requires-dist = [ + { name = "pytest", specifier = ">=8.3.4" }, + { name = "pytest-cs", git = "https://github.com/crowdsecurity/pytest-cs" }, + { name = "pytest-dotenv", specifier = ">=0.5.2" }, + { name = "pytest-xdist", specifier = ">=3.6.1" }, +] + +[package.metadata.requires-dev] +dev = [ + { name = "ipdb", specifier = ">=0.13.13" }, + { name = "ruff", specifier = ">=0.9.3" }, +] + +[[package]] +name = "cryptography" +version = "44.0.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "cffi", marker = "platform_python_implementation != 'PyPy'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/91/4c/45dfa6829acffa344e3967d6006ee4ae8be57af746ae2eba1c431949b32c/cryptography-44.0.0.tar.gz", hash = "sha256:cd4e834f340b4293430701e772ec543b0fbe6c2dea510a5286fe0acabe153a02", size = 710657 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/55/09/8cc67f9b84730ad330b3b72cf867150744bf07ff113cda21a15a1c6d2c7c/cryptography-44.0.0-cp37-abi3-macosx_10_9_universal2.whl", hash = "sha256:84111ad4ff3f6253820e6d3e58be2cc2a00adb29335d4cacb5ab4d4d34f2a123", size = 6541833 }, + { url = "https://files.pythonhosted.org/packages/7e/5b/3759e30a103144e29632e7cb72aec28cedc79e514b2ea8896bb17163c19b/cryptography-44.0.0-cp37-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b15492a11f9e1b62ba9d73c210e2416724633167de94607ec6069ef724fad092", size = 3922710 }, + { url = "https://files.pythonhosted.org/packages/5f/58/3b14bf39f1a0cfd679e753e8647ada56cddbf5acebffe7db90e184c76168/cryptography-44.0.0-cp37-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:831c3c4d0774e488fdc83a1923b49b9957d33287de923d58ebd3cec47a0ae43f", size = 4137546 }, + { url = "https://files.pythonhosted.org/packages/98/65/13d9e76ca19b0ba5603d71ac8424b5694415b348e719db277b5edc985ff5/cryptography-44.0.0-cp37-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:761817a3377ef15ac23cd7834715081791d4ec77f9297ee694ca1ee9c2c7e5eb", size = 3915420 }, + { url = "https://files.pythonhosted.org/packages/b1/07/40fe09ce96b91fc9276a9ad272832ead0fddedcba87f1190372af8e3039c/cryptography-44.0.0-cp37-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:3c672a53c0fb4725a29c303be906d3c1fa99c32f58abe008a82705f9ee96f40b", size = 4154498 }, + { url = "https://files.pythonhosted.org/packages/75/ea/af65619c800ec0a7e4034207aec543acdf248d9bffba0533342d1bd435e1/cryptography-44.0.0-cp37-abi3-manylinux_2_34_aarch64.whl", hash = "sha256:4ac4c9f37eba52cb6fbeaf5b59c152ea976726b865bd4cf87883a7e7006cc543", size = 3932569 }, + { url = "https://files.pythonhosted.org/packages/c7/af/d1deb0c04d59612e3d5e54203159e284d3e7a6921e565bb0eeb6269bdd8a/cryptography-44.0.0-cp37-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:ed3534eb1090483c96178fcb0f8893719d96d5274dfde98aa6add34614e97c8e", size = 4016721 }, + { url = "https://files.pythonhosted.org/packages/bd/69/7ca326c55698d0688db867795134bdfac87136b80ef373aaa42b225d6dd5/cryptography-44.0.0-cp37-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:f3f6fdfa89ee2d9d496e2c087cebef9d4fcbb0ad63c40e821b39f74bf48d9c5e", size = 4240915 }, + { url = "https://files.pythonhosted.org/packages/ef/d4/cae11bf68c0f981e0413906c6dd03ae7fa864347ed5fac40021df1ef467c/cryptography-44.0.0-cp37-abi3-win32.whl", hash = "sha256:eb33480f1bad5b78233b0ad3e1b0be21e8ef1da745d8d2aecbb20671658b9053", size = 2757925 }, + { url = "https://files.pythonhosted.org/packages/64/b1/50d7739254d2002acae64eed4fc43b24ac0cc44bf0a0d388d1ca06ec5bb1/cryptography-44.0.0-cp37-abi3-win_amd64.whl", hash = "sha256:abc998e0c0eee3c8a1904221d3f67dcfa76422b23620173e28c11d3e626c21bd", size = 3202055 }, + { url = "https://files.pythonhosted.org/packages/11/18/61e52a3d28fc1514a43b0ac291177acd1b4de00e9301aaf7ef867076ff8a/cryptography-44.0.0-cp39-abi3-macosx_10_9_universal2.whl", hash = "sha256:660cb7312a08bc38be15b696462fa7cc7cd85c3ed9c576e81f4dc4d8b2b31591", size = 6542801 }, + { url = "https://files.pythonhosted.org/packages/1a/07/5f165b6c65696ef75601b781a280fc3b33f1e0cd6aa5a92d9fb96c410e97/cryptography-44.0.0-cp39-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1923cb251c04be85eec9fda837661c67c1049063305d6be5721643c22dd4e2b7", size = 3922613 }, + { url = "https://files.pythonhosted.org/packages/28/34/6b3ac1d80fc174812486561cf25194338151780f27e438526f9c64e16869/cryptography-44.0.0-cp39-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:404fdc66ee5f83a1388be54300ae978b2efd538018de18556dde92575e05defc", size = 4137925 }, + { url = "https://files.pythonhosted.org/packages/d0/c7/c656eb08fd22255d21bc3129625ed9cd5ee305f33752ef2278711b3fa98b/cryptography-44.0.0-cp39-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:c5eb858beed7835e5ad1faba59e865109f3e52b3783b9ac21e7e47dc5554e289", size = 3915417 }, + { url = "https://files.pythonhosted.org/packages/ef/82/72403624f197af0db6bac4e58153bc9ac0e6020e57234115db9596eee85d/cryptography-44.0.0-cp39-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:f53c2c87e0fb4b0c00fa9571082a057e37690a8f12233306161c8f4b819960b7", size = 4155160 }, + { url = "https://files.pythonhosted.org/packages/a2/cd/2f3c440913d4329ade49b146d74f2e9766422e1732613f57097fea61f344/cryptography-44.0.0-cp39-abi3-manylinux_2_34_aarch64.whl", hash = "sha256:9e6fc8a08e116fb7c7dd1f040074c9d7b51d74a8ea40d4df2fc7aa08b76b9e6c", size = 3932331 }, + { url = "https://files.pythonhosted.org/packages/7f/df/8be88797f0a1cca6e255189a57bb49237402b1880d6e8721690c5603ac23/cryptography-44.0.0-cp39-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:d2436114e46b36d00f8b72ff57e598978b37399d2786fd39793c36c6d5cb1c64", size = 4017372 }, + { url = "https://files.pythonhosted.org/packages/af/36/5ccc376f025a834e72b8e52e18746b927f34e4520487098e283a719c205e/cryptography-44.0.0-cp39-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:a01956ddfa0a6790d594f5b34fc1bfa6098aca434696a03cfdbe469b8ed79285", size = 4239657 }, + { url = "https://files.pythonhosted.org/packages/46/b0/f4f7d0d0bcfbc8dd6296c1449be326d04217c57afb8b2594f017eed95533/cryptography-44.0.0-cp39-abi3-win32.whl", hash = "sha256:eca27345e1214d1b9f9490d200f9db5a874479be914199194e746c893788d417", size = 2758672 }, + { url = "https://files.pythonhosted.org/packages/97/9b/443270b9210f13f6ef240eff73fd32e02d381e7103969dc66ce8e89ee901/cryptography-44.0.0-cp39-abi3-win_amd64.whl", hash = "sha256:708ee5f1bafe76d041b53a4f95eb28cdeb8d18da17e597d46d7833ee59b97ede", size = 3202071 }, +] + +[[package]] +name = "decorator" +version = "5.1.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/66/0c/8d907af351aa16b42caae42f9d6aa37b900c67308052d10fdce809f8d952/decorator-5.1.1.tar.gz", hash = "sha256:637996211036b6385ef91435e4fae22989472f9d571faba8927ba8253acbc330", size = 35016 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/d5/50/83c593b07763e1161326b3b8c6686f0f4b0f24d5526546bee538c89837d6/decorator-5.1.1-py3-none-any.whl", hash = "sha256:b8c3f85900b9dc423225913c5aace94729fe1fa9763b38939a95226f02d37186", size = 9073 }, +] + +[[package]] +name = "docker" +version = "7.1.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "pywin32", marker = "sys_platform == 'win32'" }, + { name = "requests" }, + { name = "urllib3" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/91/9b/4a2ea29aeba62471211598dac5d96825bb49348fa07e906ea930394a83ce/docker-7.1.0.tar.gz", hash = "sha256:ad8c70e6e3f8926cb8a92619b832b4ea5299e2831c14284663184e200546fa6c", size = 117834 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/e3/26/57c6fb270950d476074c087527a558ccb6f4436657314bfb6cdf484114c4/docker-7.1.0-py3-none-any.whl", hash = "sha256:c96b93b7f0a746f9e77d325bcfb87422a3d8bd4f03136ae8a85b37f1898d5fc0", size = 147774 }, +] + +[[package]] +name = "execnet" +version = "2.1.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/bb/ff/b4c0dc78fbe20c3e59c0c7334de0c27eb4001a2b2017999af398bf730817/execnet-2.1.1.tar.gz", hash = "sha256:5189b52c6121c24feae288166ab41b32549c7e2348652736540b9e6e7d4e72e3", size = 166524 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/43/09/2aea36ff60d16dd8879bdb2f5b3ee0ba8d08cbbdcdfe870e695ce3784385/execnet-2.1.1-py3-none-any.whl", hash = "sha256:26dee51f1b80cebd6d0ca8e74dd8745419761d3bef34163928cbebbdc4749fdc", size = 40612 }, +] + +[[package]] +name = "executing" +version = "2.2.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/91/50/a9d80c47ff289c611ff12e63f7c5d13942c65d68125160cefd768c73e6e4/executing-2.2.0.tar.gz", hash = "sha256:5d108c028108fe2551d1a7b2e8b713341e2cb4fc0aa7dcf966fa4327a5226755", size = 978693 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/7b/8f/c4d9bafc34ad7ad5d8dc16dd1347ee0e507a52c3adb6bfa8887e1c6a26ba/executing-2.2.0-py2.py3-none-any.whl", hash = "sha256:11387150cad388d62750327a53d3339fad4888b39a6fe233c3afbb54ecffd3aa", size = 26702 }, +] + +[[package]] +name = "idna" +version = "3.10" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/f1/70/7703c29685631f5a7590aa73f1f1d3fa9a380e654b86af429e0934a32f7d/idna-3.10.tar.gz", hash = "sha256:12f65c9b470abda6dc35cf8e63cc574b1c52b11df2c86030af0ac09b01b13ea9", size = 190490 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/76/c6/c88e154df9c4e1a2a66ccf0005a88dfb2650c1dffb6f5ce603dfbd452ce3/idna-3.10-py3-none-any.whl", hash = "sha256:946d195a0d259cbba61165e88e65941f16e9b36ea6ddb97f00452bae8b1287d3", size = 70442 }, +] + +[[package]] +name = "iniconfig" +version = "2.0.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/d7/4b/cbd8e699e64a6f16ca3a8220661b5f83792b3017d0f79807cb8708d33913/iniconfig-2.0.0.tar.gz", hash = "sha256:2d91e135bf72d31a410b17c16da610a82cb55f6b0477d1a902134b24a455b8b3", size = 4646 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/ef/a6/62565a6e1cf69e10f5727360368e451d4b7f58beeac6173dc9db836a5b46/iniconfig-2.0.0-py3-none-any.whl", hash = "sha256:b6a85871a79d2e3b22d2d1b94ac2824226a63c6b741c88f7ae975f18b6778374", size = 5892 }, +] + +[[package]] +name = "ipdb" +version = "0.13.13" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "decorator" }, + { name = "ipython" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/3d/1b/7e07e7b752017f7693a0f4d41c13e5ca29ce8cbcfdcc1fd6c4ad8c0a27a0/ipdb-0.13.13.tar.gz", hash = "sha256:e3ac6018ef05126d442af680aad863006ec19d02290561ac88b8b1c0b0cfc726", size = 17042 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/0c/4c/b075da0092003d9a55cf2ecc1cae9384a1ca4f650d51b00fc59875fe76f6/ipdb-0.13.13-py3-none-any.whl", hash = "sha256:45529994741c4ab6d2388bfa5d7b725c2cf7fe9deffabdb8a6113aa5ed449ed4", size = 12130 }, +] + +[[package]] +name = "ipython" +version = "8.31.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "colorama", marker = "sys_platform == 'win32'" }, + { name = "decorator" }, + { name = "jedi" }, + { name = "matplotlib-inline" }, + { name = "pexpect", marker = "sys_platform != 'emscripten' and sys_platform != 'win32'" }, + { name = "prompt-toolkit" }, + { name = "pygments" }, + { name = "stack-data" }, + { name = "traitlets" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/01/35/6f90fdddff7a08b7b715fccbd2427b5212c9525cd043d26fdc45bee0708d/ipython-8.31.0.tar.gz", hash = "sha256:b6a2274606bec6166405ff05e54932ed6e5cfecaca1fc05f2cacde7bb074d70b", size = 5501011 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/04/60/d0feb6b6d9fe4ab89fe8fe5b47cbf6cd936bfd9f1e7ffa9d0015425aeed6/ipython-8.31.0-py3-none-any.whl", hash = "sha256:46ec58f8d3d076a61d128fe517a51eb730e3aaf0c184ea8c17d16e366660c6a6", size = 821583 }, +] + +[[package]] +name = "jedi" +version = "0.19.2" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "parso" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/72/3a/79a912fbd4d8dd6fbb02bf69afd3bb72cf0c729bb3063c6f4498603db17a/jedi-0.19.2.tar.gz", hash = "sha256:4770dc3de41bde3966b02eb84fbcf557fb33cce26ad23da12c742fb50ecb11f0", size = 1231287 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/c0/5a/9cac0c82afec3d09ccd97c8b6502d48f165f9124db81b4bcb90b4af974ee/jedi-0.19.2-py2.py3-none-any.whl", hash = "sha256:a8ef22bde8490f57fe5c7681a3c83cb58874daf72b4784de3cce5b6ef6edb5b9", size = 1572278 }, +] + +[[package]] +name = "matplotlib-inline" +version = "0.1.7" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "traitlets" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/99/5b/a36a337438a14116b16480db471ad061c36c3694df7c2084a0da7ba538b7/matplotlib_inline-0.1.7.tar.gz", hash = "sha256:8423b23ec666be3d16e16b60bdd8ac4e86e840ebd1dd11a30b9f117f2fa0ab90", size = 8159 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/8f/8e/9ad090d3553c280a8060fbf6e24dc1c0c29704ee7d1c372f0c174aa59285/matplotlib_inline-0.1.7-py3-none-any.whl", hash = "sha256:df192d39a4ff8f21b1895d72e6a13f5fcc5099f00fa84384e0ea28c2cc0653ca", size = 9899 }, +] + +[[package]] +name = "packaging" +version = "24.2" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/d0/63/68dbb6eb2de9cb10ee4c9c14a0148804425e13c4fb20d61cce69f53106da/packaging-24.2.tar.gz", hash = "sha256:c228a6dc5e932d346bc5739379109d49e8853dd8223571c7c5b55260edc0b97f", size = 163950 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/88/ef/eb23f262cca3c0c4eb7ab1933c3b1f03d021f2c48f54763065b6f0e321be/packaging-24.2-py3-none-any.whl", hash = "sha256:09abb1bccd265c01f4a3aa3f7a7db064b36514d2cba19a2f694fe6150451a759", size = 65451 }, +] + +[[package]] +name = "parso" +version = "0.8.4" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/66/94/68e2e17afaa9169cf6412ab0f28623903be73d1b32e208d9e8e541bb086d/parso-0.8.4.tar.gz", hash = "sha256:eb3a7b58240fb99099a345571deecc0f9540ea5f4dd2fe14c2a99d6b281ab92d", size = 400609 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/c6/ac/dac4a63f978e4dcb3c6d3a78c4d8e0192a113d288502a1216950c41b1027/parso-0.8.4-py2.py3-none-any.whl", hash = "sha256:a418670a20291dacd2dddc80c377c5c3791378ee1e8d12bffc35420643d43f18", size = 103650 }, +] + +[[package]] +name = "pexpect" +version = "4.9.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "ptyprocess" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/42/92/cc564bf6381ff43ce1f4d06852fc19a2f11d180f23dc32d9588bee2f149d/pexpect-4.9.0.tar.gz", hash = "sha256:ee7d41123f3c9911050ea2c2dac107568dc43b2d3b0c7557a33212c398ead30f", size = 166450 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/9e/c3/059298687310d527a58bb01f3b1965787ee3b40dce76752eda8b44e9a2c5/pexpect-4.9.0-py2.py3-none-any.whl", hash = "sha256:7236d1e080e4936be2dc3e326cec0af72acf9212a7e1d060210e70a47e253523", size = 63772 }, +] + +[[package]] +name = "pluggy" +version = "1.5.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/96/2d/02d4312c973c6050a18b314a5ad0b3210edb65a906f868e31c111dede4a6/pluggy-1.5.0.tar.gz", hash = "sha256:2cffa88e94fdc978c4c574f15f9e59b7f4201d439195c3715ca9e2486f1d0cf1", size = 67955 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/88/5f/e351af9a41f866ac3f1fac4ca0613908d9a41741cfcf2228f4ad853b697d/pluggy-1.5.0-py3-none-any.whl", hash = "sha256:44e1ad92c8ca002de6377e165f3e0f1be63266ab4d554740532335b9d75ea669", size = 20556 }, +] + +[[package]] +name = "prompt-toolkit" +version = "3.0.50" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "wcwidth" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/a1/e1/bd15cb8ffdcfeeb2bdc215de3c3cffca11408d829e4b8416dcfe71ba8854/prompt_toolkit-3.0.50.tar.gz", hash = "sha256:544748f3860a2623ca5cd6d2795e7a14f3d0e1c3c9728359013f79877fc89bab", size = 429087 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/e4/ea/d836f008d33151c7a1f62caf3d8dd782e4d15f6a43897f64480c2b8de2ad/prompt_toolkit-3.0.50-py3-none-any.whl", hash = "sha256:9b6427eb19e479d98acff65196a307c555eb567989e6d88ebbb1b509d9779198", size = 387816 }, +] + +[[package]] +name = "psutil" +version = "6.1.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/1f/5a/07871137bb752428aa4b659f910b399ba6f291156bdea939be3e96cae7cb/psutil-6.1.1.tar.gz", hash = "sha256:cf8496728c18f2d0b45198f06895be52f36611711746b7f30c464b422b50e2f5", size = 508502 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/61/99/ca79d302be46f7bdd8321089762dd4476ee725fce16fc2b2e1dbba8cac17/psutil-6.1.1-cp36-abi3-macosx_10_9_x86_64.whl", hash = "sha256:fc0ed7fe2231a444fc219b9c42d0376e0a9a1a72f16c5cfa0f68d19f1a0663e8", size = 247511 }, + { url = "https://files.pythonhosted.org/packages/0b/6b/73dbde0dd38f3782905d4587049b9be64d76671042fdcaf60e2430c6796d/psutil-6.1.1-cp36-abi3-macosx_11_0_arm64.whl", hash = "sha256:0bdd4eab935276290ad3cb718e9809412895ca6b5b334f5a9111ee6d9aff9377", size = 248985 }, + { url = "https://files.pythonhosted.org/packages/17/38/c319d31a1d3f88c5b79c68b3116c129e5133f1822157dd6da34043e32ed6/psutil-6.1.1-cp36-abi3-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b6e06c20c05fe95a3d7302d74e7097756d4ba1247975ad6905441ae1b5b66003", size = 284488 }, + { url = "https://files.pythonhosted.org/packages/9c/39/0f88a830a1c8a3aba27fededc642da37613c57cbff143412e3536f89784f/psutil-6.1.1-cp36-abi3-manylinux_2_12_x86_64.manylinux2010_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:97f7cb9921fbec4904f522d972f0c0e1f4fabbdd4e0287813b21215074a0f160", size = 287477 }, + { url = "https://files.pythonhosted.org/packages/47/da/99f4345d4ddf2845cb5b5bd0d93d554e84542d116934fde07a0c50bd4e9f/psutil-6.1.1-cp36-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:33431e84fee02bc84ea36d9e2c4a6d395d479c9dd9bba2376c1f6ee8f3a4e0b3", size = 289017 }, + { url = "https://files.pythonhosted.org/packages/38/53/bd755c2896f4461fd4f36fa6a6dcb66a88a9e4b9fd4e5b66a77cf9d4a584/psutil-6.1.1-cp37-abi3-win32.whl", hash = "sha256:eaa912e0b11848c4d9279a93d7e2783df352b082f40111e078388701fd479e53", size = 250602 }, + { url = "https://files.pythonhosted.org/packages/7b/d7/7831438e6c3ebbfa6e01a927127a6cb42ad3ab844247f3c5b96bea25d73d/psutil-6.1.1-cp37-abi3-win_amd64.whl", hash = "sha256:f35cfccb065fff93529d2afb4a2e89e363fe63ca1e4a5da22b603a85833c2649", size = 254444 }, +] + +[[package]] +name = "ptyprocess" +version = "0.7.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/20/e5/16ff212c1e452235a90aeb09066144d0c5a6a8c0834397e03f5224495c4e/ptyprocess-0.7.0.tar.gz", hash = "sha256:5c5d0a3b48ceee0b48485e0c26037c0acd7d29765ca3fbb5cb3831d347423220", size = 70762 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/22/a6/858897256d0deac81a172289110f31629fc4cee19b6f01283303e18c8db3/ptyprocess-0.7.0-py2.py3-none-any.whl", hash = "sha256:4b41f3967fce3af57cc7e94b888626c18bf37a083e3651ca8feeb66d492fef35", size = 13993 }, +] + +[[package]] +name = "pure-eval" +version = "0.2.3" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/cd/05/0a34433a064256a578f1783a10da6df098ceaa4a57bbeaa96a6c0352786b/pure_eval-0.2.3.tar.gz", hash = "sha256:5f4e983f40564c576c7c8635ae88db5956bb2229d7e9237d03b3c0b0190eaf42", size = 19752 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/8e/37/efad0257dc6e593a18957422533ff0f87ede7c9c6ea010a2177d738fb82f/pure_eval-0.2.3-py3-none-any.whl", hash = "sha256:1db8e35b67b3d218d818ae653e27f06c3aa420901fa7b081ca98cbedc874e0d0", size = 11842 }, +] + +[[package]] +name = "pycparser" +version = "2.22" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/1d/b2/31537cf4b1ca988837256c910a668b553fceb8f069bedc4b1c826024b52c/pycparser-2.22.tar.gz", hash = "sha256:491c8be9c040f5390f5bf44a5b07752bd07f56edf992381b05c701439eec10f6", size = 172736 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/13/a3/a812df4e2dd5696d1f351d58b8fe16a405b234ad2886a0dab9183fb78109/pycparser-2.22-py3-none-any.whl", hash = "sha256:c3702b6d3dd8c7abc1afa565d7e63d53a1d0bd86cdc24edd75470f4de499cfcc", size = 117552 }, +] + +[[package]] +name = "pygments" +version = "2.19.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/7c/2d/c3338d48ea6cc0feb8446d8e6937e1408088a72a39937982cc6111d17f84/pygments-2.19.1.tar.gz", hash = "sha256:61c16d2a8576dc0649d9f39e089b5f02bcd27fba10d8fb4dcc28173f7a45151f", size = 4968581 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/8a/0b/9fcc47d19c48b59121088dd6da2488a49d5f72dacf8262e2790a1d2c7d15/pygments-2.19.1-py3-none-any.whl", hash = "sha256:9ea1544ad55cecf4b8242fab6dd35a93bbce657034b0611ee383099054ab6d8c", size = 1225293 }, +] + +[[package]] +name = "pytest" +version = "8.3.4" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "colorama", marker = "sys_platform == 'win32'" }, + { name = "iniconfig" }, + { name = "packaging" }, + { name = "pluggy" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/05/35/30e0d83068951d90a01852cb1cef56e5d8a09d20c7f511634cc2f7e0372a/pytest-8.3.4.tar.gz", hash = "sha256:965370d062bce11e73868e0335abac31b4d3de0e82f4007408d242b4f8610761", size = 1445919 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/11/92/76a1c94d3afee238333bc0a42b82935dd8f9cf8ce9e336ff87ee14d9e1cf/pytest-8.3.4-py3-none-any.whl", hash = "sha256:50e16d954148559c9a74109af1eaf0c945ba2d8f30f0a3d3335edde19788b6f6", size = 343083 }, +] + +[[package]] +name = "pytest-cs" +version = "0.7.20" +source = { git = "https://github.com/crowdsecurity/pytest-cs#73380b837a80337f361414bebbaf4b914713c4ae" } +dependencies = [ + { name = "docker" }, + { name = "psutil" }, + { name = "pytest" }, + { name = "pytest-datadir" }, + { name = "pytest-dotenv" }, + { name = "pyyaml" }, + { name = "requests" }, + { name = "trustme" }, +] + +[[package]] +name = "pytest-datadir" +version = "1.5.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "pytest" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/aa/97/a93900d82635aa3f419c3cd2059b4de7d7fe44e415eaf00c298854582dcc/pytest-datadir-1.5.0.tar.gz", hash = "sha256:1617ed92f9afda0c877e4eac91904b5f779d24ba8f5e438752e3ae39d8d2ee3f", size = 8821 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/7c/90/96b9474cddda5ef9e10e6f1871c0fadfa153b605e0e749ba30437bfb62a0/pytest_datadir-1.5.0-py3-none-any.whl", hash = "sha256:34adf361bcc7b37961bbc1dfa8d25a4829e778bab461703c38a5c50ca9c36dc8", size = 5095 }, +] + +[[package]] +name = "pytest-dotenv" +version = "0.5.2" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "pytest" }, + { name = "python-dotenv" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/cd/b0/cafee9c627c1bae228eb07c9977f679b3a7cb111b488307ab9594ba9e4da/pytest-dotenv-0.5.2.tar.gz", hash = "sha256:2dc6c3ac6d8764c71c6d2804e902d0ff810fa19692e95fe138aefc9b1aa73732", size = 3782 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/d0/da/9da67c67b3d0963160e3d2cbc7c38b6fae342670cc8e6d5936644b2cf944/pytest_dotenv-0.5.2-py3-none-any.whl", hash = "sha256:40a2cece120a213898afaa5407673f6bd924b1fa7eafce6bda0e8abffe2f710f", size = 3993 }, +] + +[[package]] +name = "pytest-xdist" +version = "3.6.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "execnet" }, + { name = "pytest" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/41/c4/3c310a19bc1f1e9ef50075582652673ef2bfc8cd62afef9585683821902f/pytest_xdist-3.6.1.tar.gz", hash = "sha256:ead156a4db231eec769737f57668ef58a2084a34b2e55c4a8fa20d861107300d", size = 84060 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/6d/82/1d96bf03ee4c0fdc3c0cbe61470070e659ca78dc0086fb88b66c185e2449/pytest_xdist-3.6.1-py3-none-any.whl", hash = "sha256:9ed4adfb68a016610848639bb7e02c9352d5d9f03d04809919e2dafc3be4cca7", size = 46108 }, +] + +[[package]] +name = "python-dotenv" +version = "1.0.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/bc/57/e84d88dfe0aec03b7a2d4327012c1627ab5f03652216c63d49846d7a6c58/python-dotenv-1.0.1.tar.gz", hash = "sha256:e324ee90a023d808f1959c46bcbc04446a10ced277783dc6ee09987c37ec10ca", size = 39115 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/6a/3e/b68c118422ec867fa7ab88444e1274aa40681c606d59ac27de5a5588f082/python_dotenv-1.0.1-py3-none-any.whl", hash = "sha256:f7b63ef50f1b690dddf550d03497b66d609393b40b564ed0d674909a68ebf16a", size = 19863 }, +] + +[[package]] +name = "pywin32" +version = "308" +source = { registry = "https://pypi.org/simple" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/00/7c/d00d6bdd96de4344e06c4afbf218bc86b54436a94c01c71a8701f613aa56/pywin32-308-cp312-cp312-win32.whl", hash = "sha256:587f3e19696f4bf96fde9d8a57cec74a57021ad5f204c9e627e15c33ff568897", size = 5939729 }, + { url = "https://files.pythonhosted.org/packages/21/27/0c8811fbc3ca188f93b5354e7c286eb91f80a53afa4e11007ef661afa746/pywin32-308-cp312-cp312-win_amd64.whl", hash = "sha256:00b3e11ef09ede56c6a43c71f2d31857cf7c54b0ab6e78ac659497abd2834f47", size = 6543015 }, + { url = "https://files.pythonhosted.org/packages/9d/0f/d40f8373608caed2255781a3ad9a51d03a594a1248cd632d6a298daca693/pywin32-308-cp312-cp312-win_arm64.whl", hash = "sha256:9b4de86c8d909aed15b7011182c8cab38c8850de36e6afb1f0db22b8959e3091", size = 7976033 }, + { url = "https://files.pythonhosted.org/packages/a9/a4/aa562d8935e3df5e49c161b427a3a2efad2ed4e9cf81c3de636f1fdddfd0/pywin32-308-cp313-cp313-win32.whl", hash = "sha256:1c44539a37a5b7b21d02ab34e6a4d314e0788f1690d65b48e9b0b89f31abbbed", size = 5938579 }, + { url = "https://files.pythonhosted.org/packages/c7/50/b0efb8bb66210da67a53ab95fd7a98826a97ee21f1d22949863e6d588b22/pywin32-308-cp313-cp313-win_amd64.whl", hash = "sha256:fd380990e792eaf6827fcb7e187b2b4b1cede0585e3d0c9e84201ec27b9905e4", size = 6542056 }, + { url = "https://files.pythonhosted.org/packages/26/df/2b63e3e4f2df0224f8aaf6d131f54fe4e8c96400eb9df563e2aae2e1a1f9/pywin32-308-cp313-cp313-win_arm64.whl", hash = "sha256:ef313c46d4c18dfb82a2431e3051ac8f112ccee1a34f29c263c583c568db63cd", size = 7974986 }, +] + +[[package]] +name = "pyyaml" +version = "6.0.2" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/54/ed/79a089b6be93607fa5cdaedf301d7dfb23af5f25c398d5ead2525b063e17/pyyaml-6.0.2.tar.gz", hash = "sha256:d584d9ec91ad65861cc08d42e834324ef890a082e591037abe114850ff7bbc3e", size = 130631 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/86/0c/c581167fc46d6d6d7ddcfb8c843a4de25bdd27e4466938109ca68492292c/PyYAML-6.0.2-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:c70c95198c015b85feafc136515252a261a84561b7b1d51e3384e0655ddf25ab", size = 183873 }, + { url = "https://files.pythonhosted.org/packages/a8/0c/38374f5bb272c051e2a69281d71cba6fdb983413e6758b84482905e29a5d/PyYAML-6.0.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:ce826d6ef20b1bc864f0a68340c8b3287705cae2f8b4b1d932177dcc76721725", size = 173302 }, + { url = "https://files.pythonhosted.org/packages/c3/93/9916574aa8c00aa06bbac729972eb1071d002b8e158bd0e83a3b9a20a1f7/PyYAML-6.0.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1f71ea527786de97d1a0cc0eacd1defc0985dcf6b3f17bb77dcfc8c34bec4dc5", size = 739154 }, + { url = "https://files.pythonhosted.org/packages/95/0f/b8938f1cbd09739c6da569d172531567dbcc9789e0029aa070856f123984/PyYAML-6.0.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9b22676e8097e9e22e36d6b7bda33190d0d400f345f23d4065d48f4ca7ae0425", size = 766223 }, + { url = "https://files.pythonhosted.org/packages/b9/2b/614b4752f2e127db5cc206abc23a8c19678e92b23c3db30fc86ab731d3bd/PyYAML-6.0.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:80bab7bfc629882493af4aa31a4cfa43a4c57c83813253626916b8c7ada83476", size = 767542 }, + { url = "https://files.pythonhosted.org/packages/d4/00/dd137d5bcc7efea1836d6264f049359861cf548469d18da90cd8216cf05f/PyYAML-6.0.2-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:0833f8694549e586547b576dcfaba4a6b55b9e96098b36cdc7ebefe667dfed48", size = 731164 }, + { url = "https://files.pythonhosted.org/packages/c9/1f/4f998c900485e5c0ef43838363ba4a9723ac0ad73a9dc42068b12aaba4e4/PyYAML-6.0.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:8b9c7197f7cb2738065c481a0461e50ad02f18c78cd75775628afb4d7137fb3b", size = 756611 }, + { url = "https://files.pythonhosted.org/packages/df/d1/f5a275fdb252768b7a11ec63585bc38d0e87c9e05668a139fea92b80634c/PyYAML-6.0.2-cp312-cp312-win32.whl", hash = "sha256:ef6107725bd54b262d6dedcc2af448a266975032bc85ef0172c5f059da6325b4", size = 140591 }, + { url = "https://files.pythonhosted.org/packages/0c/e8/4f648c598b17c3d06e8753d7d13d57542b30d56e6c2dedf9c331ae56312e/PyYAML-6.0.2-cp312-cp312-win_amd64.whl", hash = "sha256:7e7401d0de89a9a855c839bc697c079a4af81cf878373abd7dc625847d25cbd8", size = 156338 }, + { url = "https://files.pythonhosted.org/packages/ef/e3/3af305b830494fa85d95f6d95ef7fa73f2ee1cc8ef5b495c7c3269fb835f/PyYAML-6.0.2-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:efdca5630322a10774e8e98e1af481aad470dd62c3170801852d752aa7a783ba", size = 181309 }, + { url = "https://files.pythonhosted.org/packages/45/9f/3b1c20a0b7a3200524eb0076cc027a970d320bd3a6592873c85c92a08731/PyYAML-6.0.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:50187695423ffe49e2deacb8cd10510bc361faac997de9efef88badc3bb9e2d1", size = 171679 }, + { url = "https://files.pythonhosted.org/packages/7c/9a/337322f27005c33bcb656c655fa78325b730324c78620e8328ae28b64d0c/PyYAML-6.0.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0ffe8360bab4910ef1b9e87fb812d8bc0a308b0d0eef8c8f44e0254ab3b07133", size = 733428 }, + { url = "https://files.pythonhosted.org/packages/a3/69/864fbe19e6c18ea3cc196cbe5d392175b4cf3d5d0ac1403ec3f2d237ebb5/PyYAML-6.0.2-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:17e311b6c678207928d649faa7cb0d7b4c26a0ba73d41e99c4fff6b6c3276484", size = 763361 }, + { url = "https://files.pythonhosted.org/packages/04/24/b7721e4845c2f162d26f50521b825fb061bc0a5afcf9a386840f23ea19fa/PyYAML-6.0.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:70b189594dbe54f75ab3a1acec5f1e3faa7e8cf2f1e08d9b561cb41b845f69d5", size = 759523 }, + { url = "https://files.pythonhosted.org/packages/2b/b2/e3234f59ba06559c6ff63c4e10baea10e5e7df868092bf9ab40e5b9c56b6/PyYAML-6.0.2-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:41e4e3953a79407c794916fa277a82531dd93aad34e29c2a514c2c0c5fe971cc", size = 726660 }, + { url = "https://files.pythonhosted.org/packages/fe/0f/25911a9f080464c59fab9027482f822b86bf0608957a5fcc6eaac85aa515/PyYAML-6.0.2-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:68ccc6023a3400877818152ad9a1033e3db8625d899c72eacb5a668902e4d652", size = 751597 }, + { url = "https://files.pythonhosted.org/packages/14/0d/e2c3b43bbce3cf6bd97c840b46088a3031085179e596d4929729d8d68270/PyYAML-6.0.2-cp313-cp313-win32.whl", hash = "sha256:bc2fa7c6b47d6bc618dd7fb02ef6fdedb1090ec036abab80d4681424b84c1183", size = 140527 }, + { url = "https://files.pythonhosted.org/packages/fa/de/02b54f42487e3d3c6efb3f89428677074ca7bf43aae402517bc7cca949f3/PyYAML-6.0.2-cp313-cp313-win_amd64.whl", hash = "sha256:8388ee1976c416731879ac16da0aff3f63b286ffdd57cdeb95f3f2e085687563", size = 156446 }, +] + +[[package]] +name = "requests" +version = "2.32.3" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "certifi" }, + { name = "charset-normalizer" }, + { name = "idna" }, + { name = "urllib3" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/63/70/2bf7780ad2d390a8d301ad0b550f1581eadbd9a20f896afe06353c2a2913/requests-2.32.3.tar.gz", hash = "sha256:55365417734eb18255590a9ff9eb97e9e1da868d4ccd6402399eaf68af20a760", size = 131218 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/f9/9b/335f9764261e915ed497fcdeb11df5dfd6f7bf257d4a6a2a686d80da4d54/requests-2.32.3-py3-none-any.whl", hash = "sha256:70761cfe03c773ceb22aa2f671b4757976145175cdfca038c02654d061d6dcc6", size = 64928 }, +] + +[[package]] +name = "ruff" +version = "0.9.3" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/1e/7f/60fda2eec81f23f8aa7cbbfdf6ec2ca11eb11c273827933fb2541c2ce9d8/ruff-0.9.3.tar.gz", hash = "sha256:8293f89985a090ebc3ed1064df31f3b4b56320cdfcec8b60d3295bddb955c22a", size = 3586740 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/f9/77/4fb790596d5d52c87fd55b7160c557c400e90f6116a56d82d76e95d9374a/ruff-0.9.3-py3-none-linux_armv6l.whl", hash = "sha256:7f39b879064c7d9670197d91124a75d118d00b0990586549949aae80cdc16624", size = 11656815 }, + { url = "https://files.pythonhosted.org/packages/a2/a8/3338ecb97573eafe74505f28431df3842c1933c5f8eae615427c1de32858/ruff-0.9.3-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:a187171e7c09efa4b4cc30ee5d0d55a8d6c5311b3e1b74ac5cb96cc89bafc43c", size = 11594821 }, + { url = "https://files.pythonhosted.org/packages/8e/89/320223c3421962762531a6b2dd58579b858ca9916fb2674874df5e97d628/ruff-0.9.3-py3-none-macosx_11_0_arm64.whl", hash = "sha256:c59ab92f8e92d6725b7ded9d4a31be3ef42688a115c6d3da9457a5bda140e2b4", size = 11040475 }, + { url = "https://files.pythonhosted.org/packages/b2/bd/1d775eac5e51409535804a3a888a9623e87a8f4b53e2491580858a083692/ruff-0.9.3-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2dc153c25e715be41bb228bc651c1e9b1a88d5c6e5ed0194fa0dfea02b026439", size = 11856207 }, + { url = "https://files.pythonhosted.org/packages/7f/c6/3e14e09be29587393d188454064a4aa85174910d16644051a80444e4fd88/ruff-0.9.3-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:646909a1e25e0dc28fbc529eab8eb7bb583079628e8cbe738192853dbbe43af5", size = 11420460 }, + { url = "https://files.pythonhosted.org/packages/ef/42/b7ca38ffd568ae9b128a2fa76353e9a9a3c80ef19746408d4ce99217ecc1/ruff-0.9.3-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:5a5a46e09355695fbdbb30ed9889d6cf1c61b77b700a9fafc21b41f097bfbba4", size = 12605472 }, + { url = "https://files.pythonhosted.org/packages/a6/a1/3167023f23e3530fde899497ccfe239e4523854cb874458ac082992d206c/ruff-0.9.3-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:c4bb09d2bbb394e3730d0918c00276e79b2de70ec2a5231cd4ebb51a57df9ba1", size = 13243123 }, + { url = "https://files.pythonhosted.org/packages/d0/b4/3c600758e320f5bf7de16858502e849f4216cb0151f819fa0d1154874802/ruff-0.9.3-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:96a87ec31dc1044d8c2da2ebbed1c456d9b561e7d087734336518181b26b3aa5", size = 12744650 }, + { url = "https://files.pythonhosted.org/packages/be/38/266fbcbb3d0088862c9bafa8b1b99486691d2945a90b9a7316336a0d9a1b/ruff-0.9.3-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9bb7554aca6f842645022fe2d301c264e6925baa708b392867b7a62645304df4", size = 14458585 }, + { url = "https://files.pythonhosted.org/packages/63/a6/47fd0e96990ee9b7a4abda62de26d291bd3f7647218d05b7d6d38af47c30/ruff-0.9.3-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cabc332b7075a914ecea912cd1f3d4370489c8018f2c945a30bcc934e3bc06a6", size = 12419624 }, + { url = "https://files.pythonhosted.org/packages/84/5d/de0b7652e09f7dda49e1a3825a164a65f4998175b6486603c7601279baad/ruff-0.9.3-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:33866c3cc2a575cbd546f2cd02bdd466fed65118e4365ee538a3deffd6fcb730", size = 11843238 }, + { url = "https://files.pythonhosted.org/packages/9e/be/3f341ceb1c62b565ec1fb6fd2139cc40b60ae6eff4b6fb8f94b1bb37c7a9/ruff-0.9.3-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:006e5de2621304c8810bcd2ee101587712fa93b4f955ed0985907a36c427e0c2", size = 11484012 }, + { url = "https://files.pythonhosted.org/packages/a3/c8/ff8acbd33addc7e797e702cf00bfde352ab469723720c5607b964491d5cf/ruff-0.9.3-py3-none-musllinux_1_2_i686.whl", hash = "sha256:ba6eea4459dbd6b1be4e6bfc766079fb9b8dd2e5a35aff6baee4d9b1514ea519", size = 12038494 }, + { url = "https://files.pythonhosted.org/packages/73/b1/8d9a2c0efbbabe848b55f877bc10c5001a37ab10aca13c711431673414e5/ruff-0.9.3-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:90230a6b8055ad47d3325e9ee8f8a9ae7e273078a66401ac66df68943ced029b", size = 12473639 }, + { url = "https://files.pythonhosted.org/packages/cb/44/a673647105b1ba6da9824a928634fe23186ab19f9d526d7bdf278cd27bc3/ruff-0.9.3-py3-none-win32.whl", hash = "sha256:eabe5eb2c19a42f4808c03b82bd313fc84d4e395133fb3fc1b1516170a31213c", size = 9834353 }, + { url = "https://files.pythonhosted.org/packages/c3/01/65cadb59bf8d4fbe33d1a750103e6883d9ef302f60c28b73b773092fbde5/ruff-0.9.3-py3-none-win_amd64.whl", hash = "sha256:040ceb7f20791dfa0e78b4230ee9dce23da3b64dd5848e40e3bf3ab76468dcf4", size = 10821444 }, + { url = "https://files.pythonhosted.org/packages/69/cb/b3fe58a136a27d981911cba2f18e4b29f15010623b79f0f2510fd0d31fd3/ruff-0.9.3-py3-none-win_arm64.whl", hash = "sha256:800d773f6d4d33b0a3c60e2c6ae8f4c202ea2de056365acfa519aa48acf28e0b", size = 10038168 }, +] + +[[package]] +name = "stack-data" +version = "0.6.3" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "asttokens" }, + { name = "executing" }, + { name = "pure-eval" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/28/e3/55dcc2cfbc3ca9c29519eb6884dd1415ecb53b0e934862d3559ddcb7e20b/stack_data-0.6.3.tar.gz", hash = "sha256:836a778de4fec4dcd1dcd89ed8abff8a221f58308462e1c4aa2a3cf30148f0b9", size = 44707 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/f1/7b/ce1eafaf1a76852e2ec9b22edecf1daa58175c090266e9f6c64afcd81d91/stack_data-0.6.3-py3-none-any.whl", hash = "sha256:d5558e0c25a4cb0853cddad3d77da9891a08cb85dd9f9f91b9f8cd66e511e695", size = 24521 }, +] + +[[package]] +name = "traitlets" +version = "5.14.3" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/eb/79/72064e6a701c2183016abbbfedaba506d81e30e232a68c9f0d6f6fcd1574/traitlets-5.14.3.tar.gz", hash = "sha256:9ed0579d3502c94b4b3732ac120375cda96f923114522847de4b3bb98b96b6b7", size = 161621 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/00/c0/8f5d070730d7836adc9c9b6408dec68c6ced86b304a9b26a14df072a6e8c/traitlets-5.14.3-py3-none-any.whl", hash = "sha256:b74e89e397b1ed28cc831db7aea759ba6640cb3de13090ca145426688ff1ac4f", size = 85359 }, +] + +[[package]] +name = "trustme" +version = "1.2.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "cryptography" }, + { name = "idna" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/4b/c5/931476f4cf1cd9e736f32651005078061a50dc164a2569fb874e00eb2786/trustme-1.2.1.tar.gz", hash = "sha256:6528ba2bbc7f2db41f33825c8dd13e3e3eb9d334ba0f909713c8c3139f4ae47f", size = 26844 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/b5/f3/c34dbabf6da5eda56fe923226769d40e11806952cd7f46655dd06e10f018/trustme-1.2.1-py3-none-any.whl", hash = "sha256:d768e5fc57c86dfc5ec9365102e9b092541cd6954b35d8c1eea01a84f35a762a", size = 16530 }, +] + +[[package]] +name = "urllib3" +version = "2.3.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/aa/63/e53da845320b757bf29ef6a9062f5c669fe997973f966045cb019c3f4b66/urllib3-2.3.0.tar.gz", hash = "sha256:f8c5449b3cf0861679ce7e0503c7b44b5ec981bec0d1d3795a07f1ba96f0204d", size = 307268 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/c8/19/4ec628951a74043532ca2cf5d97b7b14863931476d117c471e8e2b1eb39f/urllib3-2.3.0-py3-none-any.whl", hash = "sha256:1cee9ad369867bfdbbb48b7dd50374c0967a0bb7710050facf0dd6911440e3df", size = 128369 }, +] + +[[package]] +name = "wcwidth" +version = "0.2.13" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/6c/63/53559446a878410fc5a5974feb13d31d78d752eb18aeba59c7fef1af7598/wcwidth-0.2.13.tar.gz", hash = "sha256:72ea0c06399eb286d978fdedb6923a9eb47e1c486ce63e9b4e64fc18303972b5", size = 101301 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/fd/84/fd2ba7aafacbad3c4201d395674fc6348826569da3c0937e75505ead3528/wcwidth-0.2.13-py2.py3-none-any.whl", hash = "sha256:3da69048e4540d84af32131829ff948f1e022c1c6bdb8d6102117aac784f6859", size = 34166 }, +] diff --git a/go.mod b/go.mod index f4bd9379a2d..ed406e4aedc 100644 --- a/go.mod +++ b/go.mod @@ -1,6 +1,6 @@ module github.com/crowdsecurity/crowdsec -go 1.23.3 +go 1.23.5 // Don't use the toolchain directive to avoid uncontrolled downloads during // a build, especially in sandboxed environments (freebsd, gentoo...). @@ -11,6 +11,7 @@ require ( github.com/AlecAivazis/survey/v2 v2.3.7 github.com/Masterminds/semver/v3 v3.2.1 github.com/Masterminds/sprig/v3 v3.2.3 + github.com/Microsoft/go-winio v0.6.2 // indirect github.com/agext/levenshtein v1.2.3 github.com/alexliesenfeld/health v0.8.0 github.com/appleboy/gin-jwt/v2 v2.9.2 @@ -22,16 +23,21 @@ require ( github.com/buger/jsonparser v1.1.1 github.com/c-robinson/iplib v1.0.8 github.com/cespare/xxhash/v2 v2.3.0 - github.com/corazawaf/libinjection-go v0.1.2 - github.com/crowdsecurity/coraza/v3 v3.0.0-20240108124027-a62b8d8e5607 + github.com/containerd/log v0.1.0 // indirect + github.com/corazawaf/libinjection-go v0.2.2 + github.com/coreos/go-systemd/v22 v22.5.0 // indirect + github.com/creack/pty v1.1.21 // indirect + github.com/crowdsecurity/coraza/v3 v3.0.0-20250121111732-9b0043b679d7 github.com/crowdsecurity/dlog v0.0.0-20170105205344-4fb5f8204f26 - github.com/crowdsecurity/go-cs-lib v0.0.15 + github.com/crowdsecurity/go-cs-lib v0.0.16 github.com/crowdsecurity/grokky v0.2.2 github.com/crowdsecurity/machineid v1.0.2 github.com/davecgh/go-spew v1.1.1 github.com/dghubble/sling v1.4.2 - github.com/docker/docker v24.0.9+incompatible - github.com/docker/go-connections v0.4.0 + github.com/distribution/reference v0.6.0 // indirect + github.com/docker/docker v27.3.1+incompatible + github.com/docker/go-connections v0.5.0 + github.com/docker/go-units v0.5.0 // indirect github.com/expr-lang/expr v1.16.9 github.com/fatih/color v1.16.0 github.com/fsnotify/fsnotify v1.7.0 @@ -43,8 +49,10 @@ require ( github.com/go-openapi/validate v0.20.0 github.com/go-sql-driver/mysql v1.6.0 github.com/goccy/go-yaml v1.11.0 - github.com/gofrs/uuid v4.0.0+incompatible - github.com/golang-jwt/jwt/v4 v4.5.0 + github.com/gogo/protobuf v1.3.2 // indirect + github.com/golang-jwt/jwt/v4 v4.5.1 + github.com/golang/protobuf v1.5.4 // indirect + github.com/google/go-cmp v0.6.0 // indirect github.com/google/go-querystring v1.1.0 github.com/google/uuid v1.6.0 github.com/google/winops v0.0.0-20230712152054-af9b550d0601 @@ -59,17 +67,24 @@ require ( github.com/jarcoal/httpmock v1.1.0 github.com/jedib0t/go-pretty/v6 v6.5.9 github.com/jszwec/csvutil v1.5.1 + github.com/klauspost/compress v1.17.9 // indirect github.com/lithammer/dedent v1.1.0 github.com/mattn/go-isatty v0.0.20 github.com/mattn/go-sqlite3 v1.14.16 + github.com/mitchellh/copystructure v1.2.0 // indirect + github.com/moby/docker-image-spec v1.3.1 // indirect + github.com/moby/term v0.5.0 // indirect github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826 + github.com/morikuni/aec v1.0.0 // indirect github.com/nxadm/tail v1.4.8 + github.com/opencontainers/go-digest v1.0.0 // indirect + github.com/opencontainers/image-spec v1.1.0 // indirect github.com/oschwald/geoip2-golang v1.9.0 github.com/oschwald/maxminddb-golang v1.12.0 github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 github.com/pkg/errors v0.9.1 - github.com/prometheus/client_golang v1.16.0 - github.com/prometheus/client_model v0.4.0 + github.com/prometheus/client_golang v1.17.0 + github.com/prometheus/client_model v0.5.0 github.com/prometheus/prom2json v1.3.0 github.com/r3labs/diff/v2 v2.14.1 github.com/sanity-io/litter v1.5.5 @@ -77,28 +92,38 @@ require ( github.com/shirou/gopsutil/v3 v3.23.5 github.com/sirupsen/logrus v1.9.3 github.com/slack-go/slack v0.12.2 - github.com/spf13/cobra v1.8.0 + github.com/spf13/cobra v1.8.1 + github.com/spf13/pflag v1.0.5 // indirect github.com/stretchr/testify v1.9.0 github.com/umahmood/haversine v0.0.0-20151105152445-808ab04add26 github.com/wasilibs/go-re2 v1.7.0 github.com/xhit/go-simple-mail/v2 v2.16.0 - golang.org/x/crypto v0.26.0 - golang.org/x/mod v0.17.0 - golang.org/x/sys v0.24.0 - golang.org/x/text v0.17.0 + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.46.1 // indirect + go.opentelemetry.io/otel v1.28.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.28.0 // indirect + go.opentelemetry.io/otel/sdk v1.28.0 // indirect + go.opentelemetry.io/otel/trace v1.28.0 // indirect + golang.org/x/crypto v0.32.0 + golang.org/x/mod v0.20.0 + golang.org/x/net v0.34.0 // indirect + golang.org/x/sync v0.10.0 // indirect + golang.org/x/sys v0.29.0 + golang.org/x/text v0.21.0 + golang.org/x/time v0.6.0 // indirect google.golang.org/grpc v1.67.1 - google.golang.org/protobuf v1.34.2 + google.golang.org/protobuf v1.36.3 gopkg.in/natefinch/lumberjack.v2 v2.2.1 gopkg.in/tomb.v2 v2.0.0-20161208151619-d5d1b5820637 gopkg.in/yaml.v2 v2.4.0 gopkg.in/yaml.v3 v3.0.1 + gotest.tools/v3 v3.5.1 // indirect k8s.io/apiserver v0.28.4 + ) require ( ariga.io/atlas v0.19.1-0.20240203083654-5948b60a8e43 // indirect github.com/Masterminds/goutils v1.1.1 // indirect - github.com/Microsoft/go-winio v0.6.1 // indirect github.com/ahmetalpbalkan/dlog v0.0.0-20170105205344-4fb5f8204f26 // indirect github.com/apparentlymart/go-textseg/v13 v13.0.0 // indirect github.com/asaskevich/govalidator v0.0.0-20200907205600-7a23bdc65eef // indirect @@ -106,14 +131,13 @@ require ( github.com/bytedance/sonic v1.10.2 // indirect github.com/chenzhuoyu/base64x v0.0.0-20230717121745-296ad89f973d // indirect github.com/chenzhuoyu/iasm v0.9.1 // indirect - github.com/coreos/go-systemd/v22 v22.5.0 // indirect - github.com/cpuguy83/go-md2man/v2 v2.0.3 // indirect - github.com/creack/pty v1.1.18 // indirect - github.com/docker/distribution v2.8.2+incompatible // indirect - github.com/docker/go-units v0.5.0 // indirect + github.com/corazawaf/coraza-coreruleset v0.0.0-20240226094324-415b1017abdc // indirect + github.com/cpuguy83/go-md2man/v2 v2.0.4 // indirect + github.com/felixge/httpsnoop v1.0.4 // indirect github.com/gabriel-vasile/mimetype v1.4.3 // indirect github.com/gin-contrib/sse v0.1.0 // indirect - github.com/go-logr/logr v1.2.4 // indirect + github.com/go-logr/logr v1.4.2 // indirect + github.com/go-logr/stdr v1.2.2 // indirect github.com/go-ole/go-ole v1.2.6 // indirect github.com/go-openapi/analysis v0.19.16 // indirect github.com/go-openapi/inflect v0.19.0 // indirect @@ -127,10 +151,7 @@ require ( github.com/go-playground/validator/v10 v10.17.0 // indirect github.com/go-stack/stack v1.8.0 // indirect github.com/goccy/go-json v0.10.2 // indirect - github.com/gogo/protobuf v1.3.2 // indirect github.com/golang/glog v1.2.2 // indirect - github.com/golang/protobuf v1.5.3 // indirect - github.com/google/go-cmp v0.6.0 // indirect github.com/google/gofuzz v1.2.0 // indirect github.com/hashicorp/hcl/v2 v2.13.0 // indirect github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb // indirect @@ -144,49 +165,44 @@ require ( github.com/jackc/pgproto3/v2 v2.3.3 // indirect github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a // indirect github.com/jackc/pgtype v1.14.0 // indirect + github.com/jcchavezs/mergefs v0.1.0 // indirect github.com/jmespath/go-jmespath v0.4.0 // indirect github.com/josharian/intern v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 // indirect - github.com/klauspost/compress v1.17.3 // indirect github.com/klauspost/cpuid/v2 v2.2.6 // indirect github.com/leodido/go-urn v1.3.0 // indirect github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 // indirect - github.com/magefile/mage v1.15.1-0.20230912152418-9f54e0f83e2a // indirect + github.com/magefile/mage v1.15.1-0.20241126214340-bdc92f694516 // indirect github.com/mailru/easyjson v0.7.7 // indirect github.com/mattn/go-colorable v0.1.13 // indirect github.com/mattn/go-runewidth v0.0.15 // indirect github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect github.com/mgutz/ansi v0.0.0-20200706080929-d51e80ef957d // indirect - github.com/mitchellh/copystructure v1.2.0 // indirect github.com/mitchellh/go-testing-interface v1.0.0 // indirect github.com/mitchellh/go-wordwrap v1.0.1 // indirect github.com/mitchellh/mapstructure v1.5.0 // indirect github.com/mitchellh/reflectwalk v1.0.2 // indirect - github.com/moby/term v0.5.0 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect - github.com/morikuni/aec v1.0.0 // indirect github.com/oklog/run v1.0.0 // indirect - github.com/opencontainers/go-digest v1.0.0 // indirect - github.com/opencontainers/image-spec v1.0.3-0.20211202183452-c5a74bcca799 // indirect github.com/pelletier/go-toml/v2 v2.1.1 // indirect - github.com/petar-dambovaliev/aho-corasick v0.0.0-20230725210150-fb29fc3c913e // indirect + github.com/petar-dambovaliev/aho-corasick v0.0.0-20240411101913-e07a1f0e8eb4 // indirect github.com/pierrec/lz4/v4 v4.1.18 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c // indirect github.com/prometheus/common v0.44.0 // indirect - github.com/prometheus/procfs v0.10.1 // indirect + github.com/prometheus/procfs v0.15.1 // indirect github.com/rivo/uniseg v0.2.0 // indirect github.com/robfig/cron/v3 v3.0.1 // indirect + github.com/rogpeppe/go-internal v1.12.0 // indirect github.com/russross/blackfriday/v2 v2.1.0 // indirect github.com/sergi/go-diff v1.3.1 // indirect github.com/shoenig/go-m1cpu v0.1.6 // indirect github.com/shopspring/decimal v1.2.0 // indirect github.com/spf13/cast v1.3.1 // indirect - github.com/spf13/pflag v1.0.5 // indirect github.com/tetratelabs/wazero v1.8.0 // indirect - github.com/tidwall/gjson v1.17.0 // indirect + github.com/tidwall/gjson v1.18.0 // indirect github.com/tidwall/match v1.1.1 // indirect github.com/tidwall/pretty v1.2.1 // indirect github.com/tklauser/go-sysconf v0.3.11 // indirect @@ -194,24 +210,21 @@ require ( github.com/toorop/go-dkim v0.0.0-20201103131630-e1cd1a0a5208 // indirect github.com/twitchyliquid64/golang-asm v0.15.1 // indirect github.com/ugorji/go/codec v1.2.12 // indirect + github.com/valllabh/ocsf-schema-golang v1.0.3 // indirect github.com/vmihailenco/msgpack v4.0.4+incompatible // indirect github.com/wasilibs/wazero-helpers v0.0.0-20240620070341-3dff1577cd52 // indirect github.com/yusufpapurcu/wmi v1.2.3 // indirect github.com/zclconf/go-cty v1.8.0 // indirect go.mongodb.org/mongo-driver v1.9.4 // indirect + go.opentelemetry.io/otel/metric v1.28.0 // indirect go.uber.org/atomic v1.10.0 // indirect golang.org/x/arch v0.7.0 // indirect - golang.org/x/net v0.28.0 // indirect - golang.org/x/sync v0.8.0 // indirect - golang.org/x/term v0.23.0 // indirect - golang.org/x/time v0.3.0 // indirect - golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d // indirect - golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 // indirect - google.golang.org/appengine v1.6.7 // indirect + golang.org/x/term v0.28.0 // indirect + golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 // indirect + google.golang.org/appengine v1.6.8 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20240814211410-ddb44dafa142 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 // indirect - gotest.tools/v3 v3.5.0 // indirect k8s.io/api v0.28.4 // indirect k8s.io/apimachinery v0.28.4 // indirect k8s.io/klog/v2 v2.100.1 // indirect diff --git a/go.sum b/go.sum index b2bd77c9915..e9873f0d46f 100644 --- a/go.sum +++ b/go.sum @@ -19,8 +19,8 @@ github.com/Masterminds/semver/v3 v3.2.1 h1:RN9w6+7QoMeJVGyfmbcgs28Br8cvmnucEXnY0 github.com/Masterminds/semver/v3 v3.2.1/go.mod h1:qvl/7zhW3nngYb5+80sSMF+FG2BjYrf8m9wsX0PNOMQ= github.com/Masterminds/sprig/v3 v3.2.3 h1:eL2fZNezLomi0uOLqjQoN6BfsDD+fyLtgbJMAj9n6YA= github.com/Masterminds/sprig/v3 v3.2.3/go.mod h1:rXcFaZ2zZbLRJv/xSysmlgIM1u11eBaRMhvYXJNkGuM= -github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow= -github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM= +github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERoyfY= +github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU= github.com/Netflix/go-expect v0.0.0-20220104043353-73e0943537d2 h1:+vx7roKuyA63nhn5WAunQHLTznkw5W8b1Xc0dNjp83s= github.com/Netflix/go-expect v0.0.0-20220104043353-73e0943537d2/go.mod h1:HBCaDeC1lPdgDeDbhX8XFpy1jqjK0IBG8W5K+xYqA0w= github.com/PuerkitoBio/purell v1.1.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= @@ -56,8 +56,6 @@ github.com/aws/aws-lambda-go v1.47.0/go.mod h1:dpMpZgvWx5vuQJfBt0zqBha60q7Dd7Rfg github.com/aws/aws-sdk-go v1.34.28/go.mod h1:H7NKnBqNVzoTJpGfLrQkkD+ytBA93eiDYi/+8rV9s48= github.com/aws/aws-sdk-go v1.52.0 h1:ptgek/4B2v/ljsjYSEvLQ8LTD+SQyrqhOOWvHc/VGPI= github.com/aws/aws-sdk-go v1.52.0/go.mod h1:LF8svs817+Nz+DmiMQKTO3ubZ/6IaTpq3TjupRn3Eqk= -github.com/beevik/etree v1.3.0 h1:hQTc+pylzIKDb23yYprodCWWTt+ojFfUZyzU09a/hmU= -github.com/beevik/etree v1.3.0/go.mod h1:aiPf89g/1k3AShMVAzriilpcE4R/Vuor90y83zVZWFc= github.com/beevik/etree v1.4.1 h1:PmQJDDYahBGNKDcpdX8uPy1xRCwoCGVUiW669MEirVI= github.com/beevik/etree v1.4.1/go.mod h1:gPNJNaBGVZ9AwsidazFZyygnd+0pAU38N4D+WemwKNs= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= @@ -76,6 +74,8 @@ github.com/bytedance/sonic v1.10.2 h1:GQebETVBxYB7JGWJtLBi07OVzWwt+8dWA00gEVW2ZF github.com/bytedance/sonic v1.10.2/go.mod h1:iZcSUejdk5aukTND/Eu/ivjQuEL0Cu9/rf50Hi0u/g4= github.com/c-robinson/iplib v1.0.8 h1:exDRViDyL9UBLcfmlxxkY5odWX5092nPsQIykHXhIn4= github.com/c-robinson/iplib v1.0.8/go.mod h1:i3LuuFL1hRT5gFpBRnEydzw8R6yhGkF4szNDIbF8pgo= +github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8= +github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/chenzhuoyu/base64x v0.0.0-20211019084208-fb5309c8db06/go.mod h1:DH46F32mSOjUmXrMHnKwZdA8wcEefY7UVqBKYGjpdQY= @@ -87,26 +87,32 @@ github.com/chenzhuoyu/iasm v0.9.1 h1:tUHQJXo3NhBqw6s33wkGn9SP3bvrWLdlVIJ3hQBL7P0 github.com/chenzhuoyu/iasm v0.9.1/go.mod h1:Xjy2NpN3h7aUqeqM+woSuuvxmIe6+DDsiNLIrkAmYog= github.com/cockroachdb/apd v1.1.0 h1:3LFP3629v+1aKXU5Q37mxmRxX/pIu1nijXydLShEq5I= github.com/cockroachdb/apd v1.1.0/go.mod h1:8Sl8LxpKi29FqWXR16WEFZRNSz3SoPzUzeMeY4+DwBQ= -github.com/corazawaf/libinjection-go v0.1.2 h1:oeiV9pc5rvJ+2oqOqXEAMJousPpGiup6f7Y3nZj5GoM= -github.com/corazawaf/libinjection-go v0.1.2/go.mod h1:OP4TM7xdJ2skyXqNX1AN1wN5nNZEmJNuWbNPOItn7aw= +github.com/containerd/log v0.1.0 h1:TCJt7ioM2cr/tfR8GPbGf9/VRAX8D2B4PjzCpfX540I= +github.com/containerd/log v0.1.0/go.mod h1:VRRf09a7mHDIRezVKTRCrOq78v577GXq3bSa3EhrzVo= +github.com/corazawaf/coraza-coreruleset v0.0.0-20240226094324-415b1017abdc h1:OlJhrgI3I+FLUCTI3JJW8MoqyM78WbqJjecqMnqG+wc= +github.com/corazawaf/coraza-coreruleset v0.0.0-20240226094324-415b1017abdc/go.mod h1:7rsocqNDkTCira5T0M7buoKR2ehh7YZiPkzxRuAgvVU= +github.com/corazawaf/coraza/v3 v3.3.2 h1:eG1HPLySTR9lND6y6fPOajubwbuHRF6aXCsCtxyqKTY= +github.com/corazawaf/coraza/v3 v3.3.2/go.mod h1:4EqMZkRoil11FnResCT/2JIg61dH+6D7F48VG8SVzuA= +github.com/corazawaf/libinjection-go v0.2.2 h1:Chzodvb6+NXh6wew5/yhD0Ggioif9ACrQGR4qjTCs1g= +github.com/corazawaf/libinjection-go v0.2.2/go.mod h1:OP4TM7xdJ2skyXqNX1AN1wN5nNZEmJNuWbNPOItn7aw= github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs= github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= github.com/cpuguy83/go-md2man/v2 v2.0.1/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= -github.com/cpuguy83/go-md2man/v2 v2.0.3 h1:qMCsGGgs+MAzDFyp9LpAe1Lqy/fY/qCovCm0qnXZOBM= -github.com/cpuguy83/go-md2man/v2 v2.0.3/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= +github.com/cpuguy83/go-md2man/v2 v2.0.4 h1:wfIWP927BUkWJb2NmU/kNDYIBTh/ziUX91+lVfRxZq4= +github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/creack/pty v1.1.17/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4= -github.com/creack/pty v1.1.18 h1:n56/Zwd5o6whRC5PMGretI4IdRLlmBXYNjScPaBgsbY= -github.com/creack/pty v1.1.18/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4= -github.com/crowdsecurity/coraza/v3 v3.0.0-20240108124027-a62b8d8e5607 h1:hyrYw3h8clMcRL2u5ooZ3tmwnmJftmhb9Ws1MKmavvI= -github.com/crowdsecurity/coraza/v3 v3.0.0-20240108124027-a62b8d8e5607/go.mod h1:br36fEqurGYZQGit+iDYsIzW0FF6VufMbDzyyLxEuPA= +github.com/creack/pty v1.1.21 h1:1/QdRyBaHHJP61QkWMXlOIBfsgdDeeKfK8SYVUWJKf0= +github.com/creack/pty v1.1.21/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4= +github.com/crowdsecurity/coraza/v3 v3.0.0-20250121111732-9b0043b679d7 h1:nIwAjapWmiQD3W/uAWYE3z+DC5Coy/zTyPBCJ379fAw= +github.com/crowdsecurity/coraza/v3 v3.0.0-20250121111732-9b0043b679d7/go.mod h1:A+uciRXu+yhZcHMtM052bSM6vyJsMMU37NJN+tVoGqo= github.com/crowdsecurity/dlog v0.0.0-20170105205344-4fb5f8204f26 h1:r97WNVC30Uen+7WnLs4xDScS/Ex988+id2k6mDf8psU= github.com/crowdsecurity/dlog v0.0.0-20170105205344-4fb5f8204f26/go.mod h1:zpv7r+7KXwgVUZnUNjyP22zc/D7LKjyoY02weH2RBbk= -github.com/crowdsecurity/go-cs-lib v0.0.15 h1:zNWqOPVLHgKUstlr6clom9d66S0eIIW66jQG3Y7FEvo= -github.com/crowdsecurity/go-cs-lib v0.0.15/go.mod h1:ePyQyJBxp1W/1bq4YpVAilnLSz7HkzmtI7TRhX187EU= +github.com/crowdsecurity/go-cs-lib v0.0.16 h1:2/htodjwc/sfsv4deX8F/2Fzg1bOI8w3O1/BPSvvsB0= +github.com/crowdsecurity/go-cs-lib v0.0.16/go.mod h1:XwGcvTt4lMq4Tm1IRMSKMDf0CVrnytTU8Uoofa7AR+g= github.com/crowdsecurity/grokky v0.2.2 h1:yALsI9zqpDArYzmSSxfBq2dhYuGUTKMJq8KOEIAsuo4= github.com/crowdsecurity/grokky v0.2.2/go.mod h1:33usDIYzGDsgX1kHAThCbseso6JuWNJXOzRQDGXHtWM= github.com/crowdsecurity/machineid v1.0.2 h1:wpkpsUghJF8Khtmn/tg6GxgdhLA1Xflerh5lirI+bdc= @@ -117,12 +123,12 @@ github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/dghubble/sling v1.4.2 h1:vs1HIGBbSl2SEALyU+irpYFLZMfc49Fp+jYryFebQjM= github.com/dghubble/sling v1.4.2/go.mod h1:o0arCOz0HwfqYQJLrRtqunaWOn4X6jxE/6ORKRpVTD4= -github.com/docker/distribution v2.8.2+incompatible h1:T3de5rq0dB1j30rp0sA2rER+m322EBzniBPB6ZIzuh8= -github.com/docker/distribution v2.8.2+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= -github.com/docker/docker v24.0.9+incompatible h1:HPGzNmwfLZWdxHqK9/II92pyi1EpYKsAqcl4G0Of9v0= -github.com/docker/docker v24.0.9+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= -github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ= -github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= +github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5QvfrDyIgxBk= +github.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= +github.com/docker/docker v27.3.1+incompatible h1:KttF0XoteNTicmUtBO0L2tP+J7FGRFTjaEF4k6WdhfI= +github.com/docker/docker v27.3.1+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/go-connections v0.5.0 h1:USnMq7hx7gwdVZq1L49hLXaFtUdTADjXGp+uj1Br63c= +github.com/docker/go-connections v0.5.0/go.mod h1:ov60Kzw0kKElRwhNs9UlUHAE/F9Fe6GLaXnqyDdmEXc= github.com/docker/go-units v0.3.3/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= @@ -132,8 +138,10 @@ github.com/expr-lang/expr v1.16.9/go.mod h1:8/vRC7+7HBzESEqt5kKpYXxrxkr31SaO8r40 github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk= github.com/fatih/color v1.16.0 h1:zmkK9Ngbjj+K0yRhTVONQh1p/HknKYSlNT+vZCzyokM= github.com/fatih/color v1.16.0/go.mod h1:fL2Sau1YI5c0pdGEVCbKQbLXB6edEj1ZgiY4NijnWvE= -github.com/foxcpp/go-mockdns v1.0.0 h1:7jBqxd3WDWwi/6WhDvacvH1XsN3rOLXyHM1uhvIx6FI= -github.com/foxcpp/go-mockdns v1.0.0/go.mod h1:lgRN6+KxQBawyIghpnl5CezHFGS9VLzvtVlwxvzXTQ4= +github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= +github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= +github.com/foxcpp/go-mockdns v1.1.0 h1:jI0rD8M0wuYAxL7r/ynTrCQQq0BVqfB99Vgk7DlmewI= +github.com/foxcpp/go-mockdns v1.1.0/go.mod h1:IhLeSFGed3mJIAXPH2aiRQB+kqz7oqu8ld2qVbOu7Wk= github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= @@ -154,8 +162,11 @@ github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9 github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/logr v1.2.4 h1:g01GSCwiDw2xSZfjJ2/T9M+S6pFdcNtFYsp+Y43HYDQ= -github.com/go-logr/logr v1.2.4/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= +github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= +github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/go-ole/go-ole v1.2.6 h1:/Fpf6oFPoeFik9ty7siob0G6Ke8QvQEuVcuChpwXzpY= github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0= github.com/go-openapi/analysis v0.0.0-20180825180245-b006789cd277/go.mod h1:k70tL6pCuVxPJOHXQ+wIac1FUrvNkHolPie/cLEU6hI= @@ -294,8 +305,8 @@ github.com/gofrs/uuid v4.0.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRx github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= -github.com/golang-jwt/jwt/v4 v4.5.0 h1:7cYmW1XlMY7h7ii7UhUyChSgS5wUJEnm9uZVTGqOWzg= -github.com/golang-jwt/jwt/v4 v4.5.0/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= +github.com/golang-jwt/jwt/v4 v4.5.1 h1:JdqV9zKUdtaa9gdPlywC3aeoEsR681PlKC+4F5gQgeo= +github.com/golang-jwt/jwt/v4 v4.5.1/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= github.com/golang/glog v1.2.2 h1:1+mZ9upx1Dh6FmUTFR1naJ77miKiXgALjWOZ3NVFPmY= github.com/golang/glog v1.2.2/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwmO+w= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= @@ -303,8 +314,9 @@ github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5y github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= -github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= -github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= +github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= @@ -334,6 +346,9 @@ github.com/goombaio/namegenerator v0.0.0-20181006234301-989e774b106e/go.mod h1:A github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc= github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= +github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 h1:bkypFPDjIYGfCYD5mRBvpqxfYX1YCS1PXdKYWi8FsN0= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0/go.mod h1:P+Lt/0by1T8bfcF3z737NnSbmxQAppXMRziHUxPOC8k= github.com/hashicorp/go-hclog v1.5.0 h1:bI2ocEMgcVlz55Oj1xZNBsVi900c7II+fWDyV9o+13c= github.com/hashicorp/go-hclog v1.5.0/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M= github.com/hashicorp/go-plugin v1.4.10 h1:xUbmA4jC6Dq163/fWcp8P3JuHilrHHMLNRxzGQJ9hNk= @@ -407,6 +422,8 @@ github.com/jackc/puddle v0.0.0-20190608224051-11cab39313c9/go.mod h1:m4B5Dj62Y0f github.com/jackc/puddle v1.1.3/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= github.com/jarcoal/httpmock v1.1.0 h1:F47ChZj1Y2zFsCXxNkBPwNNKnAyOATcdQibk0qEdVCE= github.com/jarcoal/httpmock v1.1.0/go.mod h1:ATjnClrvW/3tijVmpL/va5Z3aAyGvqU3gCT8nX0Txik= +github.com/jcchavezs/mergefs v0.1.0 h1:7oteO7Ocl/fnfFMkoVLJxTveCjrsd//UB0j89xmnpec= +github.com/jcchavezs/mergefs v0.1.0/go.mod h1:eRLTrsA+vFwQZ48hj8p8gki/5v9C2bFtHH5Mnn4bcGk= github.com/jedib0t/go-pretty/v6 v6.5.9 h1:ACteMBRrrmm1gMsXe9PSTOClQ63IXDUt03H5U+UV8OU= github.com/jedib0t/go-pretty/v6 v6.5.9/go.mod h1:zbn98qrYlh95FIhwwsbIip0LYpwSG8SUOScs+v9/t0E= github.com/jhump/protoreflect v1.6.0 h1:h5jfMVslIg6l29nsMs0D8Wj17RDVdNYti0vDN/PZZoE= @@ -434,8 +451,8 @@ github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+o github.com/klauspost/compress v1.9.5/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= github.com/klauspost/compress v1.15.9/go.mod h1:PhcZ0MbTNciWF3rruxRgKxI5NkcHHrHUDtV4Yw2GlzU= -github.com/klauspost/compress v1.17.3 h1:qkRjuerhUU1EmXLYGkSH6EZL+vPSxIrYjLNAK4slzwA= -github.com/klauspost/compress v1.17.3/go.mod h1:/dCuZOvVtNoHsyb+cuJD3itjs3NbnF6KH9zAO4BDxPM= +github.com/klauspost/compress v1.17.9 h1:6KIumPrER1LHsvBVuDa0r5xaG0Es51mhhB9BQB2qeMA= +github.com/klauspost/compress v1.17.9/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw= github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= github.com/klauspost/cpuid/v2 v2.2.6 h1:ndNyv040zDGIDh8thGkXYjnFtiN02M1PVVF+JE/48xc= github.com/klauspost/cpuid/v2 v2.2.6/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws= @@ -467,8 +484,8 @@ github.com/lithammer/dedent v1.1.0 h1:VNzHMVCBNG1j0fh3OrsFRkVUwStdDArbgBWoPAffkt github.com/lithammer/dedent v1.1.0/go.mod h1:jrXYCQtgg0nJiN+StA2KgR7w6CiQNv9Fd/Z9BP0jIOc= github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 h1:6E+4a0GO5zZEnZ81pIr0yLvtUWk2if982qA3F3QD6H4= github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0/go.mod h1:zJYVVT2jmtg6P3p1VtQj7WsuWi/y4VnjVBn7F8KPB3I= -github.com/magefile/mage v1.15.1-0.20230912152418-9f54e0f83e2a h1:tdPcGgyiH0K+SbsJBBm2oPyEIOTAvLBwD9TuUwVtZho= -github.com/magefile/mage v1.15.1-0.20230912152418-9f54e0f83e2a/go.mod h1:z5UZb/iS3GoOSn0JgWuiw7dxlurVYTu+/jHXqQg881A= +github.com/magefile/mage v1.15.1-0.20241126214340-bdc92f694516 h1:aAO0L0ulox6m/CLRYvJff+jWXYYCKGpEm3os7dM/Z+M= +github.com/magefile/mage v1.15.1-0.20241126214340-bdc92f694516/go.mod h1:z5UZb/iS3GoOSn0JgWuiw7dxlurVYTu+/jHXqQg881A= github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20190312143242-1de009706dbe/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= @@ -504,8 +521,8 @@ github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfr github.com/mgutz/ansi v0.0.0-20170206155736-9520e82c474b/go.mod h1:01TrycV0kFyexm33Z7vhZRXopbI8J3TDReVlkTgMUxE= github.com/mgutz/ansi v0.0.0-20200706080929-d51e80ef957d h1:5PJl274Y63IEHC+7izoQE9x6ikvDFZS2mDVS3drnohI= github.com/mgutz/ansi v0.0.0-20200706080929-d51e80ef957d/go.mod h1:01TrycV0kFyexm33Z7vhZRXopbI8J3TDReVlkTgMUxE= -github.com/miekg/dns v1.1.50 h1:DQUfb9uc6smULcREF09Uc+/Gd46YWqJd5DbpPE9xkcA= -github.com/miekg/dns v1.1.50/go.mod h1:e3IlAVfNqAllflbibAZEWOXOQ+Ynzk/dDozDxY7XnME= +github.com/miekg/dns v1.1.57 h1:Jzi7ApEIzwEPLHWRcafCN9LZSBbqQpxjt/wpgvg7wcM= +github.com/miekg/dns v1.1.57/go.mod h1:uqRjCRUuEAA6qsOiJvDd+CFo/vW+y5WR6SNmHE55hZk= github.com/mitchellh/copystructure v1.0.0/go.mod h1:SNtv71yrdKgLRyLFxmLdkAbkKEFWgYaq1OVrnRcwhnw= github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa15WveJJGw= github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s= @@ -522,6 +539,8 @@ github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RR github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ= github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= +github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0= +github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo= github.com/moby/term v0.5.0 h1:xt8Q1nalod/v7BqbG21f8mQPqH+xAaC9C3N3wfWbVP0= github.com/moby/term v0.5.0/go.mod h1:8FzsFHVUBGZdbDsJw/ot+X+d5HLUbvklYLJ9uGfcI3Y= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= @@ -543,8 +562,8 @@ github.com/oklog/run v1.0.0 h1:Ru7dDtJNOyC66gQ5dQmaCa0qIsAUFY3sFpK1Xk8igrw= github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= -github.com/opencontainers/image-spec v1.0.3-0.20211202183452-c5a74bcca799 h1:rc3tiVYb5z54aKaDfakKn0dDjIyPpTtszkjuMzyt7ec= -github.com/opencontainers/image-spec v1.0.3-0.20211202183452-c5a74bcca799/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= +github.com/opencontainers/image-spec v1.1.0 h1:8SG7/vwALn54lVB/0yZ/MMwhFrPYtpEHQb2IpWsCzug= +github.com/opencontainers/image-spec v1.1.0/go.mod h1:W4s4sFTMaBeK1BQLXbG4AdM2szdn85PY75RI83NrTrM= github.com/oschwald/geoip2-golang v1.9.0 h1:uvD3O6fXAXs+usU+UGExshpdP13GAqp4GBrzN7IgKZc= github.com/oschwald/geoip2-golang v1.9.0/go.mod h1:BHK6TvDyATVQhKNbQBdrj9eAvuwOMi2zSFXizL3K81Y= github.com/oschwald/maxminddb-golang v1.12.0 h1:9FnTOD0YOhP7DGxGsq4glzpGy5+w7pq50AS6wALUMYs= @@ -556,8 +575,8 @@ github.com/pelletier/go-toml v1.4.0/go.mod h1:PN7xzY2wHTK0K9p34ErDQMlFxa51Fk0OUr github.com/pelletier/go-toml v1.7.0/go.mod h1:vwGMzjaWMwyfHwgIBhI2YUM4fB6nL6lVAvS1LBMMhTE= github.com/pelletier/go-toml/v2 v2.1.1 h1:LWAJwfNvjQZCFIDKWYQaM62NcYeYViCmWIwmOStowAI= github.com/pelletier/go-toml/v2 v2.1.1/go.mod h1:tJU2Z3ZkXwnxa4DPO899bsyIoywizdUvyaeZurnPPDc= -github.com/petar-dambovaliev/aho-corasick v0.0.0-20230725210150-fb29fc3c913e h1:POJco99aNgosh92lGqmx7L1ei+kCymivB/419SD15PQ= -github.com/petar-dambovaliev/aho-corasick v0.0.0-20230725210150-fb29fc3c913e/go.mod h1:EHPiTAKtiFmrMldLUNswFwfZ2eJIYBHktdaUTZxYWRw= +github.com/petar-dambovaliev/aho-corasick v0.0.0-20240411101913-e07a1f0e8eb4 h1:1Kw2vDBXmjop+LclnzCb/fFy+sgb3gYARwfmoUcQe6o= +github.com/petar-dambovaliev/aho-corasick v0.0.0-20240411101913-e07a1f0e8eb4/go.mod h1:EHPiTAKtiFmrMldLUNswFwfZ2eJIYBHktdaUTZxYWRw= github.com/pierrec/lz4/v4 v4.1.15/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= github.com/pierrec/lz4/v4 v4.1.18 h1:xaKrnTkyoqfh1YItXl56+6KJNVYWlEEPuAQW9xsplYQ= github.com/pierrec/lz4/v4 v4.1.18/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= @@ -573,21 +592,21 @@ github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c h1:ncq/mPwQF github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= -github.com/prometheus/client_golang v1.16.0 h1:yk/hx9hDbrGHovbci4BY+pRMfSuuat626eFsHb7tmT8= -github.com/prometheus/client_golang v1.16.0/go.mod h1:Zsulrv/L9oM40tJ7T815tM89lFEugiJ9HzIqaAx4LKc= +github.com/prometheus/client_golang v1.17.0 h1:rl2sfwZMtSthVU752MqfjQozy7blglC+1SOtjMAMh+Q= +github.com/prometheus/client_golang v1.17.0/go.mod h1:VeL+gMmOAxkS2IqfCq0ZmHSL+LjWfWDUmp1mBz9JgUY= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.1.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.4.0 h1:5lQXD3cAg1OXBf4Wq03gTrXHeaV0TQvGfUooCfx1yqY= -github.com/prometheus/client_model v0.4.0/go.mod h1:oMQmHW1/JoDwqLtg57MGgP/Fb1CJEYF2imWWhWtMkYU= +github.com/prometheus/client_model v0.5.0 h1:VQw1hfvPvk3Uv6Qf29VrPF32JB6rtbgI6cYPYQjL0Qw= +github.com/prometheus/client_model v0.5.0/go.mod h1:dTiFglRmd66nLR9Pv9f0mZi7B7fk5Pm3gvsjB5tr+kI= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.7.0/go.mod h1:DjGbpBbp5NYNiECxcL/VnbXCCaQpKd3tt26CguLLsqA= github.com/prometheus/common v0.44.0 h1:+5BrQJwiBB9xsMygAB3TNvpQKOwlkc25LbISbrdOOfY= github.com/prometheus/common v0.44.0/go.mod h1:ofAIvZbQ1e/nugmZGz4/qCb9Ap1VoSTIO7x0VV9VvuY= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= -github.com/prometheus/procfs v0.10.1 h1:kYK1Va/YMlutzCGazswoHKo//tZVlFpKYh+PymziUAg= -github.com/prometheus/procfs v0.10.1/go.mod h1:nwNm2aOCAYw8uTR/9bWRREkZFxAUcWzPHWJq+XBB/FM= +github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc= +github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk= github.com/prometheus/prom2json v1.3.0 h1:BlqrtbT9lLH3ZsOVhXPsHzFrApCTKRifB7gjJuypu6Y= github.com/prometheus/prom2json v1.3.0/go.mod h1:rMN7m0ApCowcoDlypBHlkNbp5eJQf/+1isKykIP5ZnM= github.com/r3labs/diff/v2 v2.14.1 h1:wRZ3jB44Ny50DSXsoIcFQ27l2x+n5P31K/Pk+b9B0Ic= @@ -601,8 +620,8 @@ github.com/rogpeppe/go-internal v1.2.2/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFR github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= github.com/rogpeppe/go-internal v1.8.1/go.mod h1:JeRgkft04UBgHMgCIwADu4Pn6Mtm5d4nPKWu0nJ5d+o= -github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= -github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog= +github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= +github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= github.com/rs/xid v1.2.1/go.mod h1:+uKXf+4Djp6Md1KODXJxgGQPKngRmWyn10oCKFzNHOQ= github.com/rs/zerolog v1.13.0/go.mod h1:YbFCdg8HfsridGWAh22vktObvhZbQsZXe4/zB0OKkWU= github.com/rs/zerolog v1.15.0/go.mod h1:xYTKnLHcpfU2225ny5qZjxnj9NvkumZYjJHlAThCjNc= @@ -637,8 +656,8 @@ github.com/spf13/cast v1.3.1 h1:nFm6S0SMdyzrzcmThSipiEubIDy8WEXKNZ0UOgiRpng= github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= github.com/spf13/cobra v1.4.0/go.mod h1:Wo4iy3BUC+X2Fybo0PDqwJIv3dNRiZLHQymsfxlB84g= -github.com/spf13/cobra v1.8.0 h1:7aJaZx1B85qltLMc546zn58BxxfZdR/W22ej9CFoEf0= -github.com/spf13/cobra v1.8.0/go.mod h1:WXLWApfZ71AjXPya3WOlMsY9yMs7YeiHhFVlvLyhcho= +github.com/spf13/cobra v1.8.1 h1:e5/vxKd/rZsfSJMUX1agtjeTDf+qv1/JdBF8gg5k9ZM= +github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3kD9Y= github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= @@ -667,8 +686,8 @@ github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsT github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/tetratelabs/wazero v1.8.0 h1:iEKu0d4c2Pd+QSRieYbnQC9yiFlMS9D+Jr0LsRmcF4g= github.com/tetratelabs/wazero v1.8.0/go.mod h1:yAI0XTsMBhREkM/YDAK/zNou3GoiAce1P6+rp/wQhjs= -github.com/tidwall/gjson v1.17.0 h1:/Jocvlh98kcTfpN2+JzGQWQcqrPQwDrVEMApx/M5ZwM= -github.com/tidwall/gjson v1.17.0/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk= +github.com/tidwall/gjson v1.18.0 h1:FIDeeyB800efLX89e5a8Y0BNH+LOngJyGrIWxG2FKQY= +github.com/tidwall/gjson v1.18.0/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk= github.com/tidwall/match v1.1.1 h1:+Ho715JplO36QYgwN9PGYNhgZvoUSc9X2c80KVTi+GA= github.com/tidwall/match v1.1.1/go.mod h1:eRSPERbgtNPcGhD8UCthc6PmLEQXEWd3PRB5JTxsfmM= github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= @@ -687,6 +706,8 @@ github.com/ugorji/go/codec v1.2.12 h1:9LC83zGrHhuUA9l16C9AHXAqEV/2wBQ4nkvumAE65E github.com/ugorji/go/codec v1.2.12/go.mod h1:UNopzCgEMSXjBc6AOMqYvWC1ktqTAfzJZUZgYf6w6lg= github.com/umahmood/haversine v0.0.0-20151105152445-808ab04add26 h1:UFHFmFfixpmfRBcxuu+LA9l8MdURWVdVNUHxO5n1d2w= github.com/umahmood/haversine v0.0.0-20151105152445-808ab04add26/go.mod h1:IGhd0qMDsUa9acVjsbsT7bu3ktadtGOHI79+idTew/M= +github.com/valllabh/ocsf-schema-golang v1.0.3 h1:eR8k/3jP/OOqB8LRCtdJ4U+vlgd/gk5y3KMXoodrsrw= +github.com/valllabh/ocsf-schema-golang v1.0.3/go.mod h1:sZ3as9xqm1SSK5feFWIR2CuGeGRhsM7TR1MbpBctzPk= github.com/vektah/gqlparser v1.1.2/go.mod h1:1ycwN7Ij5njmMkPPAOaRFY4rET2Enx7IkVv3vaXspKw= github.com/vjeantet/grok v1.0.1 h1:2rhIR7J4gThTgcZ1m2JY4TrJZNgjn985U28kT2wQrJ4= github.com/vjeantet/grok v1.0.1/go.mod h1:ax1aAchzC6/QMXMcyzHQGZWaW1l195+uMYIkCWPCNIo= @@ -729,6 +750,22 @@ go.mongodb.org/mongo-driver v1.4.3/go.mod h1:WcMNYLx/IlOxLe6JRJiv2uXuCz6zBLndR4S go.mongodb.org/mongo-driver v1.4.4/go.mod h1:WcMNYLx/IlOxLe6JRJiv2uXuCz6zBLndR4SoGjYphSc= go.mongodb.org/mongo-driver v1.9.4 h1:qXWlnK2WCOWSxJ/Hm3XyYOGKv3ujA2btBsCyuIFvQjc= go.mongodb.org/mongo-driver v1.9.4/go.mod h1:0sQWfOeY63QTntERDJJ/0SuKK0T1uVSgKCuAROlKEPY= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.46.1 h1:aFJWCqJMNjENlcleuuOkGAPH82y0yULBScfXcIEdS24= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.46.1/go.mod h1:sEGXWArGqc3tVa+ekntsN65DmVbVeW+7lTKTjZF3/Fo= +go.opentelemetry.io/otel v1.28.0 h1:/SqNcYk+idO0CxKEUOtKQClMK/MimZihKYMruSMViUo= +go.opentelemetry.io/otel v1.28.0/go.mod h1:q68ijF8Fc8CnMHKyzqL6akLO46ePnjkgfIMIjUIX9z4= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0 h1:3Q/xZUyC1BBkualc9ROb4G8qkH90LXEIICcs5zv1OYY= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0/go.mod h1:s75jGIWA9OfCMzF0xr+ZgfrB5FEbbV7UuYo32ahUiFI= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.28.0 h1:j9+03ymgYhPKmeXGk5Zu+cIZOlVzd9Zv7QIiyItjFBU= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.28.0/go.mod h1:Y5+XiUG4Emn1hTfciPzGPJaSI+RpDts6BnCIir0SLqk= +go.opentelemetry.io/otel/metric v1.28.0 h1:f0HGvSl1KRAU1DLgLGFjrwVyismPlnuU6JD6bOeuA5Q= +go.opentelemetry.io/otel/metric v1.28.0/go.mod h1:Fb1eVBFZmLVTMb6PPohq3TO9IIhUisDsbJoL/+uQW4s= +go.opentelemetry.io/otel/sdk v1.28.0 h1:b9d7hIry8yZsgtbmM0DKyPWMMUMlK9NEKuIG4aBqWyE= +go.opentelemetry.io/otel/sdk v1.28.0/go.mod h1:oYj7ClPUA7Iw3m+r7GeEjz0qckQRJK2B8zjcZEfu7Pg= +go.opentelemetry.io/otel/trace v1.28.0 h1:GhQ9cUuQGmNDd5BTCP2dAvv75RdMxEfTmYejp+lkx9g= +go.opentelemetry.io/otel/trace v1.28.0/go.mod h1:jPyXzNPg6da9+38HEwElrQiHlVMTnVfM3/yv2OlIHaI= +go.opentelemetry.io/proto/otlp v1.3.1 h1:TrMUixzpM0yuc/znrFTP9MMRh8trP93mkCiDVeXrui0= +go.opentelemetry.io/proto/otlp v1.3.1/go.mod h1:0X1WI4de4ZsLrrJNLAQbFeLCm3T7yBkR0XqQ7niQU+8= go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= @@ -765,8 +802,8 @@ golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97/go.mod h1:GvvjBRRGRdwPK5y golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.3.0/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4= golang.org/x/crypto v0.14.0/go.mod h1:MVFd36DqK4CsrnJYDkBA3VC4m2GkXAM0PvzMCn4JQf4= -golang.org/x/crypto v0.26.0 h1:RrRspgV4mU+YwB4FYnuBoKsUapNIL5cohGAmSH3azsw= -golang.org/x/crypto v0.26.0/go.mod h1:GY7jblb9wI+FOo5y8/S2oY4zWP07AkOJ4+jxCqdqn54= +golang.org/x/crypto v0.32.0 h1:euUpcYgM8WcP71gNpTqQCn6rC2t6ULUPiOzfWaXVVfc= +golang.org/x/crypto v0.32.0/go.mod h1:ZnnJkOaASj8g0AjIduWNlq2NRxL0PlBrbKVyZ6V/Ugc= golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= @@ -774,8 +811,8 @@ golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= -golang.org/x/mod v0.17.0 h1:zY54UmvipHiNd+pm+m0x9KhZ9hl1/7QNMyxXbc6ICqA= -golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/mod v0.20.0 h1:utOm6MM3R3dnawAiJgn0y+xvuYRsm1RKM/4giyfDgV0= +golang.org/x/mod v0.20.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/net v0.0.0-20181005035420-146acd28ed58/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= @@ -799,8 +836,8 @@ golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY= golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE= -golang.org/x/net v0.28.0 h1:a9JDOJc5GMUJ0+UDqmLT86WiEy7iWyIhz8gz8E4e5hE= -golang.org/x/net v0.28.0/go.mod h1:yqtgsTWOOnlGLG9GFRrK3++bGOUEkNBoHZc8MEDWPNg= +golang.org/x/net v0.34.0 h1:Mb7Mrk043xzHgnRM88suvJFwzVrRfHEHJEl5/71CKw0= +golang.org/x/net v0.34.0/go.mod h1:di0qlW3YNM5oh6GqDGQr92MyTozJPmybPK4Ev/Gm31k= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -810,8 +847,8 @@ golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.8.0 h1:3NFvSEYkUoMifnESzZl15y791HH1qU2xm6eCJU5ZPXQ= -golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.10.0 h1:3NQrjDixjgGwUOCaF8w2+VYHv0Ve/vGYSbdkTa98gmQ= +golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -847,8 +884,8 @@ golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.24.0 h1:Twjiwq9dn6R1fQcyiK+wQyHWfaz/BJB+YIpzU/Cv3Xg= -golang.org/x/sys v0.24.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.29.0 h1:TPYlXGxvx1MGTn2GiZDhnjPA9wZzZeGKHHmKhHYvgaU= +golang.org/x/sys v0.29.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= @@ -856,8 +893,8 @@ golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= golang.org/x/term v0.13.0/go.mod h1:LTmsnFJwVN6bCy1rVCoS+qHT1HhALEFxKncY3WNNh4U= -golang.org/x/term v0.23.0 h1:F6D4vR+EHoL9/sWAWgAR1H2DcHr4PareCbAaCo1RpuU= -golang.org/x/term v0.23.0/go.mod h1:DgV24QBUrK6jhZXl+20l6UWznPlwAHm1Q1mGHtydmSk= +golang.org/x/term v0.28.0 h1:/Ts8HFuMR2E6IP/jlo7QVLZHggjKQbhu/7H0LJFr3Gg= +golang.org/x/term v0.28.0/go.mod h1:Sw/lC2IAUZ92udQNf3WodGtn4k/XoLyZoh8v/8uiwek= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= @@ -870,10 +907,10 @@ golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= -golang.org/x/text v0.17.0 h1:XtiM5bkSOt+ewxlOE/aE/AKEHibwj/6gvWMl9Rsh0Qc= -golang.org/x/text v0.17.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= -golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4= -golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo= +golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ= +golang.org/x/time v0.6.0 h1:eTDhh4ZXt5Qf0augr54TN6suAUudPcawVZeIAPU7D4U= +golang.org/x/time v0.6.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190125232054-d66bd3c5d5a6/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -895,27 +932,31 @@ golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roY golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= -golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d h1:vU5i/LfpvrRCpgM/VPfJLg5KjxD3E+hfT1SH+d9zLwg= -golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk= +golang.org/x/tools v0.22.0 h1:gqSGLZqv+AI9lIQzniJ0nZDRG5GBPsSi+DRNHWNz6yA= +golang.org/x/tools v0.22.0/go.mod h1:aCwcsjqvq7Yqt6TNyX7QMU2enbQ/Gt0bo6krSeEri+c= golang.org/x/xerrors v0.0.0-20190410155217-1f06c39b4373/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20190513163551-3ee3066db522/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 h1:H2TDz8ibqkAF6YGhCdN3jS9O0/s90v0rJh3X/OLHEUk= +golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c= -google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.8 h1:IhEN5q69dyKagZPYMSdIjS2HqprW324FRQZJcGqPAsM= +google.golang.org/appengine v1.6.8/go.mod h1:1jJ3jBArFh5pcgW8gCtRJnepW8FzD1V44FJffLiz/Ds= +google.golang.org/genproto v0.0.0-20230526161137-0005af68ea54 h1:9NWlQfY2ePejTmfwUH1OWwmznFa+0kKcHGPDvcPza9M= +google.golang.org/genproto/googleapis/api v0.0.0-20240814211410-ddb44dafa142 h1:wKguEg1hsxI2/L3hUYrpo1RVi48K+uTyzKqprwLXsb8= +google.golang.org/genproto/googleapis/api v0.0.0-20240814211410-ddb44dafa142/go.mod h1:d6be+8HhtEtucleCbxpPW9PA9XwISACu8nvpPqF0BVo= google.golang.org/genproto/googleapis/rpc v0.0.0-20240814211410-ddb44dafa142 h1:e7S5W7MGGLaSu8j3YjdezkZ+m1/Nm0uRVRMEMGk26Xs= google.golang.org/genproto/googleapis/rpc v0.0.0-20240814211410-ddb44dafa142/go.mod h1:UqMtugtsSgubUsoxbuAoiCXvqvErP7Gf0so0mK9tHxU= google.golang.org/grpc v1.67.1 h1:zWnc1Vrcno+lHZCOofnIMvycFcc0QRGIzm9dhnDX68E= google.golang.org/grpc v1.67.1/go.mod h1:1gLDyUQU7CTLJI90u3nXZ9ekeghjeM7pTDZlqFNg2AA= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg= -google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw= +google.golang.org/protobuf v1.36.3 h1:82DV7MYdb8anAVi3qge1wSnMDrnKK7ebr+I0hHRN1BU= +google.golang.org/protobuf v1.36.3/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= @@ -945,8 +986,8 @@ gopkg.in/yaml.v3 v3.0.0-20200605160147-a5ece683394c/go.mod h1:K4uyk7z7BCEPqu6E+C gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gotest.tools/v3 v3.5.0 h1:Ljk6PdHdOhAb5aDMWXjDLMMhph+BpztA4v1QdqEW2eY= -gotest.tools/v3 v3.5.0/go.mod h1:isy3WKz7GK6uNw/sbHzfKBLvlvXwUyV06n6brMxxopU= +gotest.tools/v3 v3.5.1 h1:EENdUnS3pdur5nybKYIh2Vfgc8IUNBjxDPSjtiJcOzU= +gotest.tools/v3 v3.5.1/go.mod h1:isy3WKz7GK6uNw/sbHzfKBLvlvXwUyV06n6brMxxopU= honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= k8s.io/api v0.28.4 h1:8ZBrLjwosLl/NYgv1P7EQLqoO8MGQApnbgH8tu3BMzY= k8s.io/api v0.28.4/go.mod h1:axWTGrY88s/5YE+JSt4uUi6NMM+gur1en2REMR7IRj0= diff --git a/pkg/acquisition/acquisition.go b/pkg/acquisition/acquisition.go index ef5a413b91f..d3928270598 100644 --- a/pkg/acquisition/acquisition.go +++ b/pkg/acquisition/acquisition.go @@ -16,6 +16,7 @@ import ( tomb "gopkg.in/tomb.v2" "gopkg.in/yaml.v2" + "github.com/crowdsecurity/go-cs-lib/csstring" "github.com/crowdsecurity/go-cs-lib/trace" "github.com/crowdsecurity/crowdsec/pkg/acquisition/configuration" @@ -116,7 +117,7 @@ func setupLogger(source, name string, level *log.Level) (*log.Entry, error) { // if the configuration is not valid it returns an error. // If the datasource can't be run (eg. journalctl not available), it still returns an error which // can be checked for the appropriate action. -func DataSourceConfigure(commonConfig configuration.DataSourceCommonCfg, metricsLevel int) (*DataSource, error) { +func DataSourceConfigure(commonConfig configuration.DataSourceCommonCfg, metricsLevel int) (DataSource, error) { // we dump it back to []byte, because we want to decode the yaml blob twice: // once to DataSourceCommonCfg, and then later to the dedicated type of the datasource yamlConfig, err := yaml.Marshal(commonConfig) @@ -140,10 +141,10 @@ func DataSourceConfigure(commonConfig configuration.DataSourceCommonCfg, metrics } /* configure the actual datasource */ if err := dataSrc.Configure(yamlConfig, subLogger, metricsLevel); err != nil { - return nil, fmt.Errorf("failed to configure datasource %s: %w", commonConfig.Source, err) + return nil, err } - return &dataSrc, nil + return dataSrc, nil } // detectBackwardCompatAcquis: try to magically detect the type for backward compat (type was not mandatory then) @@ -164,8 +165,6 @@ func detectBackwardCompatAcquis(sub configuration.DataSourceCommonCfg) string { } func LoadAcquisitionFromDSN(dsn string, labels map[string]string, transformExpr string) ([]DataSource, error) { - var sources []DataSource - frags := strings.Split(dsn, ":") if len(frags) == 1 { return nil, fmt.Errorf("%s isn't valid dsn (no protocol)", dsn) @@ -197,9 +196,7 @@ func LoadAcquisitionFromDSN(dsn string, labels map[string]string, transformExpr return nil, fmt.Errorf("while configuration datasource for %s: %w", dsn, err) } - sources = append(sources, dataSrc) - - return sources, nil + return []DataSource{dataSrc}, nil } func GetMetricsLevelFromPromCfg(prom *csconfig.PrometheusCfg) int { @@ -236,7 +233,16 @@ func LoadAcquisitionFromFile(config *csconfig.CrowdsecServiceCfg, prom *csconfig return nil, err } - dec := yaml.NewDecoder(yamlFile) + defer yamlFile.Close() + + acquisContent, err := io.ReadAll(yamlFile) + if err != nil { + return nil, fmt.Errorf("failed to read %s: %w", acquisFile, err) + } + + expandedAcquis := csstring.StrictExpand(string(acquisContent), os.LookupEnv) + + dec := yaml.NewDecoder(strings.NewReader(expandedAcquis)) dec.SetStrict(true) idx := -1 @@ -249,7 +255,7 @@ func LoadAcquisitionFromFile(config *csconfig.CrowdsecServiceCfg, prom *csconfig err = dec.Decode(&sub) if err != nil { if !errors.Is(err, io.EOF) { - return nil, fmt.Errorf("failed to yaml decode %s: %w", acquisFile, err) + return nil, fmt.Errorf("failed to parse %s: %w", acquisFile, err) } log.Tracef("End of yaml file") @@ -259,6 +265,12 @@ func LoadAcquisitionFromFile(config *csconfig.CrowdsecServiceCfg, prom *csconfig // for backward compat ('type' was not mandatory, detect it) if guessType := detectBackwardCompatAcquis(sub); guessType != "" { + log.Debugf("datasource type missing in %s (position %d): detected 'source=%s'", acquisFile, idx, guessType) + + if sub.Source != "" && sub.Source != guessType { + log.Warnf("datasource type mismatch in %s (position %d): found '%s' but should probably be '%s'", acquisFile, idx, sub.Source, guessType) + } + sub.Source = guessType } // it's an empty item, skip it @@ -270,18 +282,18 @@ func LoadAcquisitionFromFile(config *csconfig.CrowdsecServiceCfg, prom *csconfig if sub.Source != "docker" { // docker is the only source that can be empty - return nil, fmt.Errorf("missing labels in %s (position: %d)", acquisFile, idx) + return nil, fmt.Errorf("missing labels in %s (position %d)", acquisFile, idx) } } if sub.Source == "" { - return nil, fmt.Errorf("data source type is empty ('source') in %s (position: %d)", acquisFile, idx) + return nil, fmt.Errorf("data source type is empty ('source') in %s (position %d)", acquisFile, idx) } // pre-check that the source is valid _, err := GetDataSourceIface(sub.Source) if err != nil { - return nil, fmt.Errorf("in file %s (position: %d) - %w", acquisFile, idx, err) + return nil, fmt.Errorf("in file %s (position %d) - %w", acquisFile, idx, err) } uniqueId := uuid.NewString() @@ -295,19 +307,19 @@ func LoadAcquisitionFromFile(config *csconfig.CrowdsecServiceCfg, prom *csconfig continue } - return nil, fmt.Errorf("while configuring datasource of type %s from %s (position: %d): %w", sub.Source, acquisFile, idx, err) + return nil, fmt.Errorf("while configuring datasource of type %s from %s (position %d): %w", sub.Source, acquisFile, idx, err) } if sub.TransformExpr != "" { vm, err := expr.Compile(sub.TransformExpr, exprhelpers.GetExprOptions(map[string]interface{}{"evt": &types.Event{}})...) if err != nil { - return nil, fmt.Errorf("while compiling transform expression '%s' for datasource %s in %s (position: %d): %w", sub.TransformExpr, sub.Source, acquisFile, idx, err) + return nil, fmt.Errorf("while compiling transform expression '%s' for datasource %s in %s (position %d): %w", sub.TransformExpr, sub.Source, acquisFile, idx, err) } transformRuntimes[uniqueId] = vm } - sources = append(sources, *src) + sources = append(sources, src) } } @@ -326,7 +338,8 @@ func GetMetrics(sources []DataSource, aggregated bool) error { for _, metric := range metrics { if err := prometheus.Register(metric); err != nil { - if _, ok := err.(prometheus.AlreadyRegisteredError); !ok { + var alreadyRegisteredErr prometheus.AlreadyRegisteredError + if !errors.As(err, &alreadyRegisteredErr) { return fmt.Errorf("could not register metrics for datasource %s: %w", sources[i].GetName(), err) } // ignore the error @@ -344,6 +357,7 @@ func copyEvent(evt types.Event, line string) types.Event { evtCopy.Line = evt.Line evtCopy.Line.Raw = line evtCopy.Line.Labels = make(map[string]string) + for k, v := range evt.Line.Labels { evtCopy.Line.Labels[k] = v } @@ -351,13 +365,13 @@ func copyEvent(evt types.Event, line string) types.Event { return evtCopy } -func transform(transformChan chan types.Event, output chan types.Event, AcquisTomb *tomb.Tomb, transformRuntime *vm.Program, logger *log.Entry) { +func transform(transformChan chan types.Event, output chan types.Event, acquisTomb *tomb.Tomb, transformRuntime *vm.Program, logger *log.Entry) { defer trace.CatchPanic("crowdsec/acquis") logger.Infof("transformer started") for { select { - case <-AcquisTomb.Dying(): + case <-acquisTomb.Dying(): logger.Debugf("transformer is dying") return case evt := <-transformChan: @@ -386,6 +400,7 @@ func transform(transformChan chan types.Event, output chan types.Event, AcquisTo if !ok { logger.Errorf("transform expression returned []interface{}, but cannot assert an element to string") output <- evt + continue } @@ -405,7 +420,7 @@ func transform(transformChan chan types.Event, output chan types.Event, AcquisTo } } -func StartAcquisition(ctx context.Context, sources []DataSource, output chan types.Event, AcquisTomb *tomb.Tomb) error { +func StartAcquisition(ctx context.Context, sources []DataSource, output chan types.Event, acquisTomb *tomb.Tomb) error { // Don't wait if we have no sources, as it will hang forever if len(sources) == 0 { return nil @@ -415,7 +430,7 @@ func StartAcquisition(ctx context.Context, sources []DataSource, output chan typ subsrc := sources[i] // ensure its a copy log.Debugf("starting one source %d/%d ->> %T", i, len(sources), subsrc) - AcquisTomb.Go(func() error { + acquisTomb.Go(func() error { defer trace.CatchPanic("crowdsec/acquis") var err error @@ -434,21 +449,21 @@ func StartAcquisition(ctx context.Context, sources []DataSource, output chan typ "datasource": subsrc.GetName(), }) - AcquisTomb.Go(func() error { - transform(outChan, output, AcquisTomb, transformRuntime, transformLogger) + acquisTomb.Go(func() error { + transform(outChan, output, acquisTomb, transformRuntime, transformLogger) return nil }) } if subsrc.GetMode() == configuration.TAIL_MODE { - err = subsrc.StreamingAcquisition(ctx, outChan, AcquisTomb) + err = subsrc.StreamingAcquisition(ctx, outChan, acquisTomb) } else { - err = subsrc.OneShotAcquisition(ctx, outChan, AcquisTomb) + err = subsrc.OneShotAcquisition(ctx, outChan, acquisTomb) } if err != nil { // if one of the acqusition returns an error, we kill the others to properly shutdown - AcquisTomb.Kill(err) + acquisTomb.Kill(err) } return nil @@ -456,7 +471,7 @@ func StartAcquisition(ctx context.Context, sources []DataSource, output chan typ } /*return only when acquisition is over (cat) or never (tail)*/ - err := AcquisTomb.Wait() + err := acquisTomb.Wait() return err } diff --git a/pkg/acquisition/acquisition_test.go b/pkg/acquisition/acquisition_test.go index dd70172cf62..1ea8f11c22a 100644 --- a/pkg/acquisition/acquisition_test.go +++ b/pkg/acquisition/acquisition_test.go @@ -140,7 +140,7 @@ log_level: debug source: mock toto: test_value1 `, - ExpectedError: "failed to configure datasource mock: mode ratata is not supported", + ExpectedError: "mode ratata is not supported", }, { TestName: "bad_type_config", @@ -182,7 +182,8 @@ wowo: ajsajasjas for _, tc := range tests { t.Run(tc.TestName, func(t *testing.T) { common := configuration.DataSourceCommonCfg{} - yaml.Unmarshal([]byte(tc.String), &common) + err := yaml.Unmarshal([]byte(tc.String), &common) + require.NoError(t, err) ds, err := DataSourceConfigure(common, configuration.METRICS_NONE) cstest.RequireErrorContains(t, err, tc.ExpectedError) @@ -192,19 +193,19 @@ wowo: ajsajasjas switch tc.TestName { case "basic_valid_config": - mock := (*ds).Dump().(*MockSource) + mock := ds.Dump().(*MockSource) assert.Equal(t, "test_value1", mock.Toto) assert.Equal(t, "cat", mock.Mode) assert.Equal(t, log.InfoLevel, mock.logger.Logger.Level) assert.Equal(t, map[string]string{"test": "foobar"}, mock.Labels) case "basic_debug_config": - mock := (*ds).Dump().(*MockSource) + mock := ds.Dump().(*MockSource) assert.Equal(t, "test_value1", mock.Toto) assert.Equal(t, "cat", mock.Mode) assert.Equal(t, log.DebugLevel, mock.logger.Logger.Level) assert.Equal(t, map[string]string{"test": "foobar"}, mock.Labels) case "basic_tailmode_config": - mock := (*ds).Dump().(*MockSource) + mock := ds.Dump().(*MockSource) assert.Equal(t, "test_value1", mock.Toto) assert.Equal(t, "tail", mock.Mode) assert.Equal(t, log.DebugLevel, mock.logger.Logger.Level) @@ -216,6 +217,7 @@ wowo: ajsajasjas func TestLoadAcquisitionFromFile(t *testing.T) { appendMockSource() + t.Setenv("TEST_ENV", "test_value2") tests := []struct { TestName string @@ -236,7 +238,7 @@ func TestLoadAcquisitionFromFile(t *testing.T) { Config: csconfig.CrowdsecServiceCfg{ AcquisitionFiles: []string{"test_files/badyaml.yaml"}, }, - ExpectedError: "failed to yaml decode test_files/badyaml.yaml: yaml: unmarshal errors", + ExpectedError: "failed to parse test_files/badyaml.yaml: yaml: unmarshal errors", ExpectedLen: 0, }, { @@ -272,7 +274,7 @@ func TestLoadAcquisitionFromFile(t *testing.T) { Config: csconfig.CrowdsecServiceCfg{ AcquisitionFiles: []string{"test_files/bad_source.yaml"}, }, - ExpectedError: "in file test_files/bad_source.yaml (position: 0) - unknown data source does_not_exist", + ExpectedError: "in file test_files/bad_source.yaml (position 0) - unknown data source does_not_exist", }, { TestName: "invalid_filetype_config", @@ -281,6 +283,13 @@ func TestLoadAcquisitionFromFile(t *testing.T) { }, ExpectedError: "while configuring datasource of type file from test_files/bad_filetype.yaml", }, + { + TestName: "from_env", + Config: csconfig.CrowdsecServiceCfg{ + AcquisitionFiles: []string{"test_files/env.yaml"}, + }, + ExpectedLen: 1, + }, } for _, tc := range tests { t.Run(tc.TestName, func(t *testing.T) { @@ -292,6 +301,13 @@ func TestLoadAcquisitionFromFile(t *testing.T) { } assert.Len(t, dss, tc.ExpectedLen) + if tc.TestName == "from_env" { + mock := dss[0].Dump().(*MockSource) + assert.Equal(t, "test_value2", mock.Toto) + assert.Equal(t, "foobar", mock.Labels["test"]) + assert.Equal(t, "${NON_EXISTING}", mock.Labels["non_existing"]) + assert.Equal(t, log.InfoLevel, mock.logger.Logger.Level) + } }) } } diff --git a/pkg/acquisition/configuration/configuration.go b/pkg/acquisition/configuration/configuration.go index 3e27da1b9e6..a9d570d2788 100644 --- a/pkg/acquisition/configuration/configuration.go +++ b/pkg/acquisition/configuration/configuration.go @@ -13,12 +13,14 @@ type DataSourceCommonCfg struct { UseTimeMachine bool `yaml:"use_time_machine,omitempty"` UniqueId string `yaml:"unique_id,omitempty"` TransformExpr string `yaml:"transform,omitempty"` - Config map[string]interface{} `yaml:",inline"` //to keep the datasource-specific configuration directives + Config map[string]interface{} `yaml:",inline"` // to keep the datasource-specific configuration directives } -var TAIL_MODE = "tail" -var CAT_MODE = "cat" -var SERVER_MODE = "server" // No difference with tail, just a bit more verbose +var ( + TAIL_MODE = "tail" + CAT_MODE = "cat" + SERVER_MODE = "server" // No difference with tail, just a bit more verbose +) const ( METRICS_NONE = iota diff --git a/pkg/acquisition/modules/appsec/appsec.go b/pkg/acquisition/modules/appsec/appsec.go index 2f7861b32ff..78225d5f8c3 100644 --- a/pkg/acquisition/modules/appsec/appsec.go +++ b/pkg/acquisition/modules/appsec/appsec.go @@ -155,14 +155,14 @@ func (w *AppsecSource) GetAggregMetrics() []prometheus.Collector { return []prometheus.Collector{AppsecReqCounter, AppsecBlockCounter, AppsecRuleHits, AppsecOutbandParsingHistogram, AppsecInbandParsingHistogram, AppsecGlobalParsingHistogram} } -func (w *AppsecSource) Configure(yamlConfig []byte, logger *log.Entry, MetricsLevel int) error { +func (w *AppsecSource) Configure(yamlConfig []byte, logger *log.Entry, metricsLevel int) error { err := w.UnmarshalConfig(yamlConfig) if err != nil { return fmt.Errorf("unable to parse appsec configuration: %w", err) } w.logger = logger - w.metricsLevel = MetricsLevel + w.metricsLevel = metricsLevel w.logger.Tracef("Appsec configuration: %+v", w.config) if w.config.AuthCacheDuration == nil { @@ -180,7 +180,7 @@ func (w *AppsecSource) Configure(yamlConfig []byte, logger *log.Entry, MetricsLe w.InChan = make(chan appsec.ParsedRequest) appsecCfg := appsec.AppsecConfig{Logger: w.logger.WithField("component", "appsec_config")} - //we keep the datasource name + // we keep the datasource name appsecCfg.Name = w.config.Name // let's load the associated appsec_config: @@ -275,6 +275,7 @@ func (w *AppsecSource) StreamingAcquisition(ctx context.Context, out chan types. for _, runner := range w.AppsecRunners { runner.outChan = out + t.Go(func() error { defer trace.CatchPanic("crowdsec/acquis/appsec/live/runner") return runner.Run(t) @@ -285,16 +286,20 @@ func (w *AppsecSource) StreamingAcquisition(ctx context.Context, out chan types. if w.config.ListenSocket != "" { w.logger.Infof("creating unix socket %s", w.config.ListenSocket) _ = os.RemoveAll(w.config.ListenSocket) + listener, err := net.Listen("unix", w.config.ListenSocket) if err != nil { return fmt.Errorf("appsec server failed: %w", err) } + defer listener.Close() + if w.config.CertFilePath != "" && w.config.KeyFilePath != "" { err = w.server.ServeTLS(listener, w.config.CertFilePath, w.config.KeyFilePath) } else { err = w.server.Serve(listener) } + if err != nil && !errors.Is(err, http.ErrServerClosed) { return fmt.Errorf("appsec server failed: %w", err) } @@ -304,8 +309,10 @@ func (w *AppsecSource) StreamingAcquisition(ctx context.Context, out chan types. }) t.Go(func() error { var err error + if w.config.ListenAddr != "" { w.logger.Infof("creating TCP server on %s", w.config.ListenAddr) + if w.config.CertFilePath != "" && w.config.KeyFilePath != "" { err = w.server.ListenAndServeTLS(w.config.CertFilePath, w.config.KeyFilePath) } else { @@ -323,7 +330,11 @@ func (w *AppsecSource) StreamingAcquisition(ctx context.Context, out chan types. w.logger.Info("Shutting down Appsec server") // xx let's clean up the appsec runners :) appsec.AppsecRulesDetails = make(map[int]appsec.RulesDetails) - w.server.Shutdown(ctx) + + if err := w.server.Shutdown(ctx); err != nil { + w.logger.Errorf("Error shutting down Appsec server: %s", err.Error()) + } + return nil }) @@ -354,11 +365,13 @@ func (w *AppsecSource) IsAuth(apiKey string) bool { } req.Header.Add("X-Api-Key", apiKey) + resp, err := client.Do(req) if err != nil { log.Errorf("Error performing request: %s", err) return false } + defer resp.Body.Close() return resp.StatusCode == http.StatusOK @@ -371,17 +384,21 @@ func (w *AppsecSource) appsecHandler(rw http.ResponseWriter, r *http.Request) { apiKey := r.Header.Get(appsec.APIKeyHeaderName) clientIP := r.Header.Get(appsec.IPHeaderName) remoteIP := r.RemoteAddr + if apiKey == "" { w.logger.Errorf("Unauthorized request from '%s' (real IP = %s)", remoteIP, clientIP) rw.WriteHeader(http.StatusUnauthorized) + return } + expiration, exists := w.AuthCache.Get(apiKey) // if the apiKey is not in cache or has expired, just recheck the auth if !exists || time.Now().After(expiration) { if !w.IsAuth(apiKey) { rw.WriteHeader(http.StatusUnauthorized) w.logger.Errorf("Unauthorized request from '%s' (real IP = %s)", remoteIP, clientIP) + return } @@ -394,8 +411,10 @@ func (w *AppsecSource) appsecHandler(rw http.ResponseWriter, r *http.Request) { if err != nil { w.logger.Errorf("%s", err) rw.WriteHeader(http.StatusInternalServerError) + return } + parsedRequest.AppsecEngine = w.config.Name logger := w.logger.WithFields(log.Fields{ @@ -427,6 +446,8 @@ func (w *AppsecSource) appsecHandler(rw http.ResponseWriter, r *http.Request) { logger.Errorf("unable to serialize response: %s", err) rw.WriteHeader(http.StatusInternalServerError) } else { - rw.Write(body) + if _, err := rw.Write(body); err != nil { + logger.Errorf("unable to write response: %s", err) + } } } diff --git a/pkg/acquisition/modules/appsec/appsec_hooks_test.go b/pkg/acquisition/modules/appsec/appsec_hooks_test.go index c549d2ef1d1..d87384a0189 100644 --- a/pkg/acquisition/modules/appsec/appsec_hooks_test.go +++ b/pkg/acquisition/modules/appsec/appsec_hooks_test.go @@ -341,7 +341,6 @@ func TestAppsecOnMatchHooks(t *testing.T) { } func TestAppsecPreEvalHooks(t *testing.T) { - tests := []appsecRuleTest{ { name: "Basic pre_eval hook to disable inband rule", @@ -403,7 +402,6 @@ func TestAppsecPreEvalHooks(t *testing.T) { require.Len(t, responses, 1) require.True(t, responses[0].InBandInterrupt) - }, }, { @@ -670,7 +668,6 @@ func TestAppsecPreEvalHooks(t *testing.T) { } func TestAppsecRemediationConfigHooks(t *testing.T) { - tests := []appsecRuleTest{ { name: "Basic matching rule", @@ -759,6 +756,7 @@ func TestAppsecRemediationConfigHooks(t *testing.T) { }) } } + func TestOnMatchRemediationHooks(t *testing.T) { tests := []appsecRuleTest{ { diff --git a/pkg/acquisition/modules/appsec/appsec_runner.go b/pkg/acquisition/modules/appsec/appsec_runner.go index 7ce43779591..8bdb6405d98 100644 --- a/pkg/acquisition/modules/appsec/appsec_runner.go +++ b/pkg/acquisition/modules/appsec/appsec_runner.go @@ -35,19 +35,24 @@ type AppsecRunner struct { func (r *AppsecRunner) MergeDedupRules(collections []appsec.AppsecCollection, logger *log.Entry) string { var rulesArr []string dedupRules := make(map[string]struct{}) + discarded := 0 for _, collection := range collections { + // Dedup *our* rules for _, rule := range collection.Rules { - if _, ok := dedupRules[rule]; !ok { - rulesArr = append(rulesArr, rule) - dedupRules[rule] = struct{}{} - } else { - logger.Debugf("Discarding duplicate rule : %s", rule) + if _, ok := dedupRules[rule]; ok { + discarded++ + logger.Debugf("Discarding duplicate rule : %s", rule) + continue } + rulesArr = append(rulesArr, rule) + dedupRules[rule] = struct{}{} } + // Don't mess up with native modsec rules + rulesArr = append(rulesArr, collection.NativeRules...) } - if len(rulesArr) != len(dedupRules) { - logger.Warningf("%d rules were discarded as they were duplicates", len(rulesArr)-len(dedupRules)) + if discarded > 0 { + logger.Warningf("%d rules were discarded as they were duplicates", discarded) } return strings.Join(rulesArr, "\n") @@ -90,6 +95,9 @@ func (r *AppsecRunner) Init(datadir string) error { outbandCfg = outbandCfg.WithRequestBodyInMemoryLimit(*r.AppsecRuntime.Config.OutOfBandOptions.RequestBodyInMemoryLimit) } r.AppsecOutbandEngine, err = coraza.NewWAF(outbandCfg) + if err != nil { + return fmt.Errorf("unable to initialize outband engine : %w", err) + } if r.AppsecRuntime.DisabledInBandRulesTags != nil { for _, tag := range r.AppsecRuntime.DisabledInBandRulesTags { @@ -118,10 +126,6 @@ func (r *AppsecRunner) Init(datadir string) error { r.logger.Tracef("Loaded inband rules: %+v", r.AppsecInbandEngine.GetRuleGroup().GetRules()) r.logger.Tracef("Loaded outband rules: %+v", r.AppsecOutbandEngine.GetRuleGroup().GetRules()) - if err != nil { - return fmt.Errorf("unable to initialize outband engine : %w", err) - } - return nil } @@ -379,7 +383,6 @@ func (r *AppsecRunner) handleRequest(request *appsec.ParsedRequest) { // time spent to process inband AND out of band rules globalParsingElapsed := time.Since(startGlobalParsing) AppsecGlobalParsingHistogram.With(prometheus.Labels{"source": request.RemoteAddrNormalized, "appsec_engine": request.AppsecEngine}).Observe(globalParsingElapsed.Seconds()) - } func (r *AppsecRunner) Run(t *tomb.Tomb) error { diff --git a/pkg/acquisition/modules/appsec/appsec_runner_test.go b/pkg/acquisition/modules/appsec/appsec_runner_test.go index 2027cf1d2c0..38d8bbe431f 100644 --- a/pkg/acquisition/modules/appsec/appsec_runner_test.go +++ b/pkg/acquisition/modules/appsec/appsec_runner_test.go @@ -3,14 +3,35 @@ package appsecacquisition import ( "testing" - "github.com/crowdsecurity/crowdsec/pkg/appsec/appsec_rule" log "github.com/sirupsen/logrus" "github.com/stretchr/testify/require" + + "github.com/crowdsecurity/crowdsec/pkg/appsec/appsec_rule" ) -func TestAppsecRuleLoad(t *testing.T) { +func TestAppsecConflictRuleLoad(t *testing.T) { log.SetLevel(log.TraceLevel) tests := []appsecRuleTest{ + { + name: "simple native rule load", + expected_load_ok: true, + inband_native_rules: []string{ + `Secrule REQUEST_HEADERS:Content-Type "@rx ^application/x-www-form-urlencoded" "id:100,phase:1,pass,nolog,noauditlog,ctl:requestBodyProcessor=URLENCODED"`, + `Secrule REQUEST_HEADERS:Content-Type "@rx ^multipart/form-data" "id:101,phase:1,pass,nolog,noauditlog,ctl:requestBodyProcessor=MULTIPART"`, + }, + afterload_asserts: func(runner AppsecRunner) { + require.Len(t, runner.AppsecInbandEngine.GetRuleGroup().GetRules(), 2) + }, + }, + { + name: "id conflict on native rule load", + expected_load_ok: false, + inband_native_rules: []string{ + `Secrule REQUEST_HEADERS:Content-Type "@rx ^application/x-www-form-urlencoded" "id:100,phase:1,pass,nolog,noauditlog,ctl:requestBodyProcessor=URLENCODED"`, + `Secrule REQUEST_HEADERS:Content-Type "@rx ^multipart/form-data" "id:101,phase:1,pass,nolog,noauditlog,ctl:requestBodyProcessor=MULTIPART"`, + `Secrule REQUEST_HEADERS:Content-Type "@rx ^application/x-www-form-urlencoded" "id:100,phase:1,pass,nolog,noauditlog,ctl:requestBodyProcessor=URLENCODED"`, + }, + }, { name: "simple rule load", expected_load_ok: true, @@ -26,33 +47,66 @@ func TestAppsecRuleLoad(t *testing.T) { }, }, { - name: "simple native rule load", + name: "duplicate rule load", expected_load_ok: true, - inband_native_rules: []string{ - `Secrule REQUEST_HEADERS:Content-Type "@rx ^application/x-www-form-urlencoded" "id:100,phase:1,pass,nolog,noauditlog,ctl:requestBodyProcessor=URLENCODED"`, + inband_rules: []appsec_rule.CustomRule{ + { + Name: "rule1", + Zones: []string{"ARGS"}, + Match: appsec_rule.Match{Type: "equals", Value: "toto"}, + }, + { + Name: "rule1", + Zones: []string{"ARGS"}, + Match: appsec_rule.Match{Type: "equals", Value: "toto"}, + }, + }, + afterload_asserts: func(runner AppsecRunner) { + require.Len(t, runner.AppsecInbandEngine.GetRuleGroup().GetRules(), 1) + }, + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + loadAppSecEngine(test, t) + }) + } +} + +func TestAppsecRuleLoad(t *testing.T) { + log.SetLevel(log.TraceLevel) + + tests := []appsecRuleTest{ + { + name: "simple rule load", + expected_load_ok: true, + inband_rules: []appsec_rule.CustomRule{ + { + Name: "rule1", + Zones: []string{"ARGS"}, + Match: appsec_rule.Match{Type: "equals", Value: "toto"}, + }, }, afterload_asserts: func(runner AppsecRunner) { require.Len(t, runner.AppsecInbandEngine.GetRuleGroup().GetRules(), 1) }, }, { - name: "simple native rule load (2)", + name: "simple native rule load", expected_load_ok: true, inband_native_rules: []string{ `Secrule REQUEST_HEADERS:Content-Type "@rx ^application/x-www-form-urlencoded" "id:100,phase:1,pass,nolog,noauditlog,ctl:requestBodyProcessor=URLENCODED"`, - `Secrule REQUEST_HEADERS:Content-Type "@rx ^multipart/form-data" "id:101,phase:1,pass,nolog,noauditlog,ctl:requestBodyProcessor=MULTIPART"`, }, afterload_asserts: func(runner AppsecRunner) { - require.Len(t, runner.AppsecInbandEngine.GetRuleGroup().GetRules(), 2) + require.Len(t, runner.AppsecInbandEngine.GetRuleGroup().GetRules(), 1) }, }, { - name: "simple native rule load + dedup", + name: "simple native rule load (2)", expected_load_ok: true, inband_native_rules: []string{ `Secrule REQUEST_HEADERS:Content-Type "@rx ^application/x-www-form-urlencoded" "id:100,phase:1,pass,nolog,noauditlog,ctl:requestBodyProcessor=URLENCODED"`, `Secrule REQUEST_HEADERS:Content-Type "@rx ^multipart/form-data" "id:101,phase:1,pass,nolog,noauditlog,ctl:requestBodyProcessor=MULTIPART"`, - `Secrule REQUEST_HEADERS:Content-Type "@rx ^application/x-www-form-urlencoded" "id:100,phase:1,pass,nolog,noauditlog,ctl:requestBodyProcessor=URLENCODED"`, }, afterload_asserts: func(runner AppsecRunner) { require.Len(t, runner.AppsecInbandEngine.GetRuleGroup().GetRules(), 2) @@ -105,21 +159,22 @@ func TestAppsecRuleLoad(t *testing.T) { Or: []appsec_rule.CustomRule{ { - //Name: "rule1", + // Name: "rule1", Zones: []string{"ARGS"}, Match: appsec_rule.Match{Type: "equals", Value: "toto"}, }, { - //Name: "rule1", + // Name: "rule1", Zones: []string{"ARGS"}, Match: appsec_rule.Match{Type: "equals", Value: "tutu"}, }, { - //Name: "rule1", + // Name: "rule1", Zones: []string{"ARGS"}, Match: appsec_rule.Match{Type: "equals", Value: "tata"}, - }, { - //Name: "rule1", + }, + { + // Name: "rule1", Zones: []string{"ARGS"}, Match: appsec_rule.Match{Type: "equals", Value: "titi"}, }, @@ -130,6 +185,20 @@ func TestAppsecRuleLoad(t *testing.T) { require.Len(t, runner.AppsecInbandEngine.GetRuleGroup().GetRules(), 4) }, }, + { + name: "invalid inband rule", + expected_load_ok: false, + inband_native_rules: []string{ + "this_is_not_a_rule", + }, + }, + { + name: "invalid outofband rule", + expected_load_ok: false, + outofband_native_rules: []string{ + "this_is_not_a_rule", + }, + }, } for _, test := range tests { t.Run(test.name, func(t *testing.T) { diff --git a/pkg/acquisition/modules/appsec/appsec_test.go b/pkg/acquisition/modules/appsec/appsec_test.go index 1534f5cb7fa..5f2b93836f6 100644 --- a/pkg/acquisition/modules/appsec/appsec_test.go +++ b/pkg/acquisition/modules/appsec/appsec_test.go @@ -41,7 +41,9 @@ func loadAppSecEngine(test appsecRuleTest, t *testing.T) { log.SetLevel(log.WarnLevel) } inbandRules := []string{} + nativeInbandRules := []string{} outofbandRules := []string{} + nativeOutofbandRules := []string{} InChan := make(chan appsec.ParsedRequest) OutChan := make(chan types.Event) @@ -56,8 +58,8 @@ func loadAppSecEngine(test appsecRuleTest, t *testing.T) { inbandRules = append(inbandRules, strRule) } - inbandRules = append(inbandRules, test.inband_native_rules...) - outofbandRules = append(outofbandRules, test.outofband_native_rules...) + nativeInbandRules = append(nativeInbandRules, test.inband_native_rules...) + nativeOutofbandRules = append(nativeOutofbandRules, test.outofband_native_rules...) for ridx, rule := range test.outofband_rules { strRule, _, err := rule.Convert(appsec_rule.ModsecurityRuleType, rule.Name) if err != nil { @@ -66,7 +68,8 @@ func loadAppSecEngine(test appsecRuleTest, t *testing.T) { outofbandRules = append(outofbandRules, strRule) } - appsecCfg := appsec.AppsecConfig{Logger: logger, + appsecCfg := appsec.AppsecConfig{ + Logger: logger, OnLoad: test.on_load, PreEval: test.pre_eval, PostEval: test.post_eval, @@ -75,13 +78,14 @@ func loadAppSecEngine(test appsecRuleTest, t *testing.T) { UserBlockedHTTPCode: test.UserBlockedHTTPCode, UserPassedHTTPCode: test.UserPassedHTTPCode, DefaultRemediation: test.DefaultRemediation, - DefaultPassAction: test.DefaultPassAction} + DefaultPassAction: test.DefaultPassAction, + } AppsecRuntime, err := appsecCfg.Build() if err != nil { t.Fatalf("unable to build appsec runtime : %s", err) } - AppsecRuntime.InBandRules = []appsec.AppsecCollection{{Rules: inbandRules}} - AppsecRuntime.OutOfBandRules = []appsec.AppsecCollection{{Rules: outofbandRules}} + AppsecRuntime.InBandRules = []appsec.AppsecCollection{{Rules: inbandRules, NativeRules: nativeInbandRules}} + AppsecRuntime.OutOfBandRules = []appsec.AppsecCollection{{Rules: outofbandRules, NativeRules: nativeOutofbandRules}} appsecRunnerUUID := uuid.New().String() //we copy AppsecRutime for each runner wrt := *AppsecRuntime @@ -96,8 +100,14 @@ func loadAppSecEngine(test appsecRuleTest, t *testing.T) { } err = runner.Init("/tmp/") if err != nil { + if !test.expected_load_ok { + return + } t.Fatalf("unable to initialize runner : %s", err) } + if !test.expected_load_ok { + t.Fatalf("expected load to fail but it didn't") + } if test.afterload_asserts != nil { //afterload asserts are just to evaluate the state of the runner after the rules have been loaded diff --git a/pkg/acquisition/modules/appsec/bodyprocessors/raw.go b/pkg/acquisition/modules/appsec/bodyprocessors/raw.go index e2e23eb57ae..aa467ecf048 100644 --- a/pkg/acquisition/modules/appsec/bodyprocessors/raw.go +++ b/pkg/acquisition/modules/appsec/bodyprocessors/raw.go @@ -9,8 +9,7 @@ import ( "github.com/crowdsecurity/coraza/v3/experimental/plugins/plugintypes" ) -type rawBodyProcessor struct { -} +type rawBodyProcessor struct{} type setterInterface interface { Set(string) @@ -33,9 +32,7 @@ func (*rawBodyProcessor) ProcessResponse(reader io.Reader, v plugintypes.Transac return nil } -var ( - _ plugintypes.BodyProcessor = &rawBodyProcessor{} -) +var _ plugintypes.BodyProcessor = &rawBodyProcessor{} //nolint:gochecknoinits //Coraza recommends to use init() for registering plugins func init() { diff --git a/pkg/acquisition/modules/appsec/utils.go b/pkg/acquisition/modules/appsec/utils.go index 8995b305680..65bb4601c21 100644 --- a/pkg/acquisition/modules/appsec/utils.go +++ b/pkg/acquisition/modules/appsec/utils.go @@ -296,6 +296,7 @@ func (r *AppsecRunner) AccumulateTxToEvent(evt *types.Event, req *appsec.ParsedR "hash": hash, "version": version, "matched_zones": matchedZones, + "logdata": rule.Data(), } evt.Appsec.MatchedRules = append(evt.Appsec.MatchedRules, corazaRule) } diff --git a/pkg/acquisition/modules/cloudwatch/cloudwatch.go b/pkg/acquisition/modules/cloudwatch/cloudwatch.go index ba267c9050b..5739ebc3124 100644 --- a/pkg/acquisition/modules/cloudwatch/cloudwatch.go +++ b/pkg/acquisition/modules/cloudwatch/cloudwatch.go @@ -154,13 +154,13 @@ func (cw *CloudwatchSource) UnmarshalConfig(yamlConfig []byte) error { return nil } -func (cw *CloudwatchSource) Configure(yamlConfig []byte, logger *log.Entry, MetricsLevel int) error { +func (cw *CloudwatchSource) Configure(yamlConfig []byte, logger *log.Entry, metricsLevel int) error { err := cw.UnmarshalConfig(yamlConfig) if err != nil { return err } - cw.metricsLevel = MetricsLevel + cw.metricsLevel = metricsLevel cw.logger = logger.WithField("group", cw.Config.GroupName) @@ -330,9 +330,12 @@ func (cw *CloudwatchSource) WatchLogGroupForStreams(ctx context.Context, out cha LastIngestionTime := time.Unix(0, *event.LastIngestionTime*int64(time.Millisecond)) if LastIngestionTime.Before(oldest) { cw.logger.Tracef("stop iteration, %s reached oldest age, stop (%s < %s)", *event.LogStreamName, LastIngestionTime, time.Now().UTC().Add(-*cw.Config.MaxStreamAge)) + hasMoreStreams = false + return false } + cw.logger.Tracef("stream %s is elligible for monitoring", *event.LogStreamName) // the stream has been updated recently, check if we should monitor it var expectMode int @@ -341,6 +344,7 @@ func (cw *CloudwatchSource) WatchLogGroupForStreams(ctx context.Context, out cha } else { expectMode = types.TIMEMACHINE } + monitorStream := LogStreamTailConfig{ GroupName: cw.Config.GroupName, StreamName: *event.LogStreamName, @@ -354,16 +358,20 @@ func (cw *CloudwatchSource) WatchLogGroupForStreams(ctx context.Context, out cha out <- monitorStream } } + if lastPage { cw.logger.Tracef("reached last page") + hasMoreStreams = false } + return true }, ) if err != nil { return fmt.Errorf("while describing group %s: %w", cw.Config.GroupName, err) } + cw.logger.Tracef("after DescribeLogStreamsPagesWithContext") } } @@ -373,12 +381,14 @@ func (cw *CloudwatchSource) WatchLogGroupForStreams(ctx context.Context, out cha // LogStreamManager receives the potential streams to monitor, and starts a go routine when needed func (cw *CloudwatchSource) LogStreamManager(ctx context.Context, in chan LogStreamTailConfig, outChan chan types.Event) error { cw.logger.Debugf("starting to monitor streams for %s", cw.Config.GroupName) + pollDeadStreamInterval := time.NewTicker(def_PollDeadStreamInterval) for { select { case newStream := <-in: //nolint:govet // copylocks won't matter if the tomb is not initialized shouldCreate := true + cw.logger.Tracef("received new streams to monitor : %s/%s", newStream.GroupName, newStream.StreamName) if cw.Config.StreamName != nil && newStream.StreamName != *cw.Config.StreamName { @@ -402,12 +412,16 @@ func (cw *CloudwatchSource) LogStreamManager(ctx context.Context, in chan LogStr if !stream.t.Alive() { cw.logger.Debugf("stream %s already exists, but is dead", newStream.StreamName) cw.monitoredStreams = append(cw.monitoredStreams[:idx], cw.monitoredStreams[idx+1:]...) + if cw.metricsLevel != configuration.METRICS_NONE { openedStreams.With(prometheus.Labels{"group": newStream.GroupName}).Dec() } + break } + shouldCreate = false + break } } @@ -417,19 +431,23 @@ func (cw *CloudwatchSource) LogStreamManager(ctx context.Context, in chan LogStr if cw.metricsLevel != configuration.METRICS_NONE { openedStreams.With(prometheus.Labels{"group": newStream.GroupName}).Inc() } + newStream.t = tomb.Tomb{} newStream.logger = cw.logger.WithField("stream", newStream.StreamName) cw.logger.Debugf("starting tail of stream %s", newStream.StreamName) newStream.t.Go(func() error { return cw.TailLogStream(ctx, &newStream, outChan) }) + cw.monitoredStreams = append(cw.monitoredStreams, &newStream) } case <-pollDeadStreamInterval.C: newMonitoredStreams := cw.monitoredStreams[:0] + for idx, stream := range cw.monitoredStreams { if !cw.monitoredStreams[idx].t.Alive() { cw.logger.Debugf("remove dead stream %s", stream.StreamName) + if cw.metricsLevel != configuration.METRICS_NONE { openedStreams.With(prometheus.Labels{"group": cw.monitoredStreams[idx].GroupName}).Dec() } @@ -437,20 +455,25 @@ func (cw *CloudwatchSource) LogStreamManager(ctx context.Context, in chan LogStr newMonitoredStreams = append(newMonitoredStreams, stream) } } + cw.monitoredStreams = newMonitoredStreams case <-cw.t.Dying(): cw.logger.Infof("LogStreamManager for %s is dying, %d alive streams", cw.Config.GroupName, len(cw.monitoredStreams)) + for idx, stream := range cw.monitoredStreams { if cw.monitoredStreams[idx].t.Alive() { cw.logger.Debugf("killing stream %s", stream.StreamName) cw.monitoredStreams[idx].t.Kill(nil) + if err := cw.monitoredStreams[idx].t.Wait(); err != nil { cw.logger.Debugf("error while waiting for death of %s : %s", stream.StreamName, err) } } } + cw.monitoredStreams = nil cw.logger.Debugf("routine cleanup done, return") + return nil } } @@ -458,12 +481,14 @@ func (cw *CloudwatchSource) LogStreamManager(ctx context.Context, in chan LogStr func (cw *CloudwatchSource) TailLogStream(ctx context.Context, cfg *LogStreamTailConfig, outChan chan types.Event) error { var startFrom *string + lastReadMessage := time.Now().UTC() ticker := time.NewTicker(cfg.PollStreamInterval) // resume at existing index if we already had streamIndexMutex.Lock() v := cw.streamIndexes[cfg.GroupName+"+"+cfg.StreamName] streamIndexMutex.Unlock() + if v != "" { cfg.logger.Debugf("restarting on index %s", v) startFrom = &v @@ -474,7 +499,9 @@ func (cw *CloudwatchSource) TailLogStream(ctx context.Context, cfg *LogStreamTai select { case <-ticker.C: cfg.logger.Tracef("entering loop") + hasMorePages := true + for hasMorePages { /*for the first call, we only consume the last item*/ cfg.logger.Tracef("calling GetLogEventsPagesWithContext") @@ -489,36 +516,44 @@ func (cw *CloudwatchSource) TailLogStream(ctx context.Context, cfg *LogStreamTai func(page *cloudwatchlogs.GetLogEventsOutput, lastPage bool) bool { cfg.logger.Tracef("%d results, last:%t", len(page.Events), lastPage) startFrom = page.NextForwardToken + if page.NextForwardToken != nil { streamIndexMutex.Lock() cw.streamIndexes[cfg.GroupName+"+"+cfg.StreamName] = *page.NextForwardToken streamIndexMutex.Unlock() } + if lastPage { /*wait another ticker to check on new log availability*/ cfg.logger.Tracef("last page") + hasMorePages = false } + if len(page.Events) > 0 { lastReadMessage = time.Now().UTC() } + for _, event := range page.Events { evt, err := cwLogToEvent(event, cfg) if err != nil { cfg.logger.Warningf("cwLogToEvent error, discarded event : %s", err) } else { cfg.logger.Debugf("pushing message : %s", evt.Line.Raw) + if cw.metricsLevel != configuration.METRICS_NONE { linesRead.With(prometheus.Labels{"group": cfg.GroupName, "stream": cfg.StreamName}).Inc() } outChan <- evt } } + return true }, ) if err != nil { newerr := fmt.Errorf("while reading %s/%s: %w", cfg.GroupName, cfg.StreamName, err) cfg.logger.Warningf("err : %s", newerr) + return newerr } cfg.logger.Tracef("done reading GetLogEventsPagesWithContext") diff --git a/pkg/acquisition/modules/docker/docker.go b/pkg/acquisition/modules/docker/docker.go index b27255ec13f..582da3d53a1 100644 --- a/pkg/acquisition/modules/docker/docker.go +++ b/pkg/acquisition/modules/docker/docker.go @@ -12,6 +12,7 @@ import ( "time" dockerTypes "github.com/docker/docker/api/types" + dockerContainer "github.com/docker/docker/api/types/container" "github.com/docker/docker/client" "github.com/prometheus/client_golang/prometheus" log "github.com/sirupsen/logrus" @@ -56,7 +57,7 @@ type DockerSource struct { logger *log.Entry Client client.CommonAPIClient t *tomb.Tomb - containerLogsOptions *dockerTypes.ContainerLogsOptions + containerLogsOptions *dockerContainer.LogsOptions } type ContainerConfig struct { @@ -104,6 +105,7 @@ func (d *DockerSource) UnmarshalConfig(yamlConfig []byte) error { if d.Config.Mode == "" { d.Config.Mode = configuration.TAIL_MODE } + if d.Config.Mode != configuration.CAT_MODE && d.Config.Mode != configuration.TAIL_MODE { return fmt.Errorf("unsupported mode %s for docker datasource", d.Config.Mode) } @@ -120,7 +122,7 @@ func (d *DockerSource) UnmarshalConfig(yamlConfig []byte) error { d.Config.Since = time.Now().UTC().Format(time.RFC3339) } - d.containerLogsOptions = &dockerTypes.ContainerLogsOptions{ + d.containerLogsOptions = &dockerContainer.LogsOptions{ ShowStdout: d.Config.FollowStdout, ShowStderr: d.Config.FollowStdErr, Follow: true, @@ -134,9 +136,10 @@ func (d *DockerSource) UnmarshalConfig(yamlConfig []byte) error { return nil } -func (d *DockerSource) Configure(yamlConfig []byte, logger *log.Entry, MetricsLevel int) error { +func (d *DockerSource) Configure(yamlConfig []byte, logger *log.Entry, metricsLevel int) error { d.logger = logger - d.metricsLevel = MetricsLevel + d.metricsLevel = metricsLevel + err := d.UnmarshalConfig(yamlConfig) if err != nil { return err @@ -146,18 +149,19 @@ func (d *DockerSource) Configure(yamlConfig []byte, logger *log.Entry, MetricsLe d.logger.Tracef("Actual DockerAcquisition configuration %+v", d.Config) - dockerClient, err := client.NewClientWithOpts(client.FromEnv, client.WithAPIVersionNegotiation()) - if err != nil { - return err + opts := []client.Opt{ + client.FromEnv, + client.WithAPIVersionNegotiation(), } if d.Config.DockerHost != "" { - err = client.WithHost(d.Config.DockerHost)(dockerClient) - if err != nil { - return err - } + opts = append(opts, client.WithHost(d.Config.DockerHost)) + } + + d.Client, err = client.NewClientWithOpts(opts...) + if err != nil { + return err } - d.Client = dockerClient _, err = d.Client.Info(context.Background()) if err != nil { @@ -170,7 +174,12 @@ func (d *DockerSource) Configure(yamlConfig []byte, logger *log.Entry, MetricsLe func (d *DockerSource) ConfigureByDSN(dsn string, labels map[string]string, logger *log.Entry, uuid string) error { var err error - if !strings.HasPrefix(dsn, d.GetName()+"://") { + parsedURL, err := url.Parse(dsn) + if err != nil { + return fmt.Errorf("failed to parse DSN %s: %w", dsn, err) + } + + if parsedURL.Scheme != d.GetName() { return fmt.Errorf("invalid DSN %s for docker source, must start with %s://", dsn, d.GetName()) } @@ -187,40 +196,28 @@ func (d *DockerSource) ConfigureByDSN(dsn string, labels map[string]string, logg d.logger = logger d.Config.Labels = labels - dockerClient, err := client.NewClientWithOpts(client.FromEnv, client.WithAPIVersionNegotiation()) - if err != nil { - return err + opts := []client.Opt{ + client.FromEnv, + client.WithAPIVersionNegotiation(), } - d.containerLogsOptions = &dockerTypes.ContainerLogsOptions{ + d.containerLogsOptions = &dockerContainer.LogsOptions{ ShowStdout: d.Config.FollowStdout, ShowStderr: d.Config.FollowStdErr, Follow: false, } - dsn = strings.TrimPrefix(dsn, d.GetName()+"://") - args := strings.Split(dsn, "?") - if len(args) == 0 { - return fmt.Errorf("invalid dsn: %s", dsn) - } + containerNameOrID := parsedURL.Host - if len(args) == 1 && args[0] == "" { + if containerNameOrID == "" { return fmt.Errorf("empty %s DSN", d.GetName()+"://") } - d.Config.ContainerName = append(d.Config.ContainerName, args[0]) + + d.Config.ContainerName = append(d.Config.ContainerName, containerNameOrID) // we add it as an ID also so user can provide docker name or docker ID - d.Config.ContainerID = append(d.Config.ContainerID, args[0]) + d.Config.ContainerID = append(d.Config.ContainerID, containerNameOrID) - // no parameters - if len(args) == 1 { - d.Client = dockerClient - return nil - } - - parameters, err := url.ParseQuery(args[1]) - if err != nil { - return fmt.Errorf("while parsing parameters %s: %w", dsn, err) - } + parameters := parsedURL.Query() for k, v := range parameters { switch k { @@ -267,12 +264,15 @@ func (d *DockerSource) ConfigureByDSN(dsn string, labels map[string]string, logg if len(v) != 1 { return errors.New("only one 'docker_host' parameters is required, not many") } - if err := client.WithHost(v[0])(dockerClient); err != nil { - return err - } + opts = append(opts, client.WithHost(v[0])) } } - d.Client = dockerClient + + d.Client, err = client.NewClientWithOpts(opts...) + if err != nil { + return err + } + return nil } @@ -288,33 +288,42 @@ func (d *DockerSource) SupportedModes() []string { // OneShotAcquisition reads a set of file and returns when done func (d *DockerSource) OneShotAcquisition(ctx context.Context, out chan types.Event, t *tomb.Tomb) error { d.logger.Debug("In oneshot") - runningContainer, err := d.Client.ContainerList(ctx, dockerTypes.ContainerListOptions{}) + + runningContainers, err := d.Client.ContainerList(ctx, dockerContainer.ListOptions{}) if err != nil { return err } + foundOne := false - for _, container := range runningContainer { + + for _, container := range runningContainers { if _, ok := d.runningContainerState[container.ID]; ok { d.logger.Debugf("container with id %s is already being read from", container.ID) continue } + if containerConfig := d.EvalContainer(ctx, container); containerConfig != nil { d.logger.Infof("reading logs from container %s", containerConfig.Name) d.logger.Debugf("logs options: %+v", *d.containerLogsOptions) + dockerReader, err := d.Client.ContainerLogs(ctx, containerConfig.ID, *d.containerLogsOptions) if err != nil { d.logger.Errorf("unable to read logs from container: %+v", err) return err } + // we use this library to normalize docker API logs (cf. https://ahmet.im/blog/docker-logs-api-binary-format-explained/) foundOne = true + var scanner *bufio.Scanner + if containerConfig.Tty { scanner = bufio.NewScanner(dockerReader) } else { reader := dlog.NewReader(dockerReader) scanner = bufio.NewScanner(reader) } + for scanner.Scan() { select { case <-t.Dying(): @@ -324,6 +333,7 @@ func (d *DockerSource) OneShotAcquisition(ctx context.Context, out chan types.Ev if line == "" { continue } + l := types.Line{} l.Raw = line l.Labels = d.Config.Labels @@ -331,9 +341,11 @@ func (d *DockerSource) OneShotAcquisition(ctx context.Context, out chan types.Ev l.Src = containerConfig.Name l.Process = true l.Module = d.GetName() + if d.metricsLevel != configuration.METRICS_NONE { linesRead.With(prometheus.Labels{"source": containerConfig.Name}).Inc() } + evt := types.MakeEvent(true, types.LOG, true) evt.Line = l evt.Process = true @@ -342,10 +354,12 @@ func (d *DockerSource) OneShotAcquisition(ctx context.Context, out chan types.Ev d.logger.Debugf("Sent line to parsing: %+v", evt.Line.Raw) } } + err = scanner.Err() if err != nil { d.logger.Errorf("Got error from docker read: %s", err) } + d.runningContainerState[container.ID] = containerConfig } } @@ -380,6 +394,7 @@ func (d *DockerSource) getContainerTTY(ctx context.Context, containerId string) if err != nil { return false } + return containerDetails.Config.Tty } @@ -388,6 +403,7 @@ func (d *DockerSource) getContainerLabels(ctx context.Context, containerId strin if err != nil { return map[string]interface{}{} } + return parseLabels(containerDetails.Config.Labels) } @@ -403,6 +419,7 @@ func (d *DockerSource) EvalContainer(ctx context.Context, container dockerTypes. if strings.HasPrefix(name, "/") && name != "" { name = name[1:] } + if name == containerName { return &ContainerConfig{ID: container.ID, Name: name, Labels: d.Config.Labels, Tty: d.getContainerTTY(ctx, container.ID)} } @@ -429,38 +446,49 @@ func (d *DockerSource) EvalContainer(ctx context.Context, container dockerTypes. d.logger.Tracef("container has no 'crowdsec' labels set, ignoring container: %s", container.ID) return nil } + if _, ok := parsedLabels["enable"]; !ok { d.logger.Errorf("container has 'crowdsec' labels set but no 'crowdsec.enable' key found") return nil } + enable, ok := parsedLabels["enable"].(string) if !ok { d.logger.Error("container has 'crowdsec.enable' label set but it's not a string") return nil } + if strings.ToLower(enable) != "true" { d.logger.Debugf("container has 'crowdsec.enable' label not set to true ignoring container: %s", container.ID) return nil } + if _, ok = parsedLabels["labels"]; !ok { d.logger.Error("container has 'crowdsec.enable' label set to true but no 'labels' keys found") return nil } + labelsTypeCast, ok := parsedLabels["labels"].(map[string]interface{}) if !ok { d.logger.Error("container has 'crowdsec.enable' label set to true but 'labels' is not a map") return nil } + d.logger.Debugf("container labels %+v", labelsTypeCast) + labels := make(map[string]string) + for k, v := range labelsTypeCast { if v, ok := v.(string); ok { log.Debugf("label %s is a string with value %s", k, v) labels[k] = v + continue } + d.logger.Errorf("label %s is not a string", k) } + return &ContainerConfig{ID: container.ID, Name: container.Names[0], Labels: labels, Tty: d.getContainerTTY(ctx, container.ID)} } @@ -470,6 +498,7 @@ func (d *DockerSource) EvalContainer(ctx context.Context, container dockerTypes. func (d *DockerSource) WatchContainer(ctx context.Context, monitChan chan *ContainerConfig, deleteChan chan *ContainerConfig) error { ticker := time.NewTicker(d.CheckIntervalDuration) d.logger.Infof("Container watcher started, interval: %s", d.CheckIntervalDuration.String()) + for { select { case <-d.t.Dying(): @@ -478,32 +507,37 @@ func (d *DockerSource) WatchContainer(ctx context.Context, monitChan chan *Conta case <-ticker.C: // to track for garbage collection runningContainersID := make(map[string]bool) - runningContainer, err := d.Client.ContainerList(ctx, dockerTypes.ContainerListOptions{}) + + runningContainers, err := d.Client.ContainerList(ctx, dockerContainer.ListOptions{}) if err != nil { if strings.Contains(strings.ToLower(err.Error()), "cannot connect to the docker daemon at") { for idx, container := range d.runningContainerState { if d.runningContainerState[idx].t.Alive() { d.logger.Infof("killing tail for container %s", container.Name) d.runningContainerState[idx].t.Kill(nil) + if err := d.runningContainerState[idx].t.Wait(); err != nil { d.logger.Infof("error while waiting for death of %s : %s", container.Name, err) } } + delete(d.runningContainerState, idx) } } else { log.Errorf("container list err: %s", err) } + continue } - for _, container := range runningContainer { + for _, container := range runningContainers { runningContainersID[container.ID] = true // don't need to re eval an already monitored container if _, ok := d.runningContainerState[container.ID]; ok { continue } + if containerConfig := d.EvalContainer(ctx, container); containerConfig != nil { monitChan <- containerConfig } @@ -514,6 +548,7 @@ func (d *DockerSource) WatchContainer(ctx context.Context, monitChan chan *Conta deleteChan <- containerConfig } } + d.logger.Tracef("Reading logs from %d containers", len(d.runningContainerState)) ticker.Reset(d.CheckIntervalDuration) @@ -525,7 +560,9 @@ func (d *DockerSource) StreamingAcquisition(ctx context.Context, out chan types. d.t = t monitChan := make(chan *ContainerConfig) deleteChan := make(chan *ContainerConfig) + d.logger.Infof("Starting docker acquisition") + t.Go(func() error { return d.DockerManager(ctx, monitChan, deleteChan, out) }) @@ -546,6 +583,7 @@ func ReadTailScanner(scanner *bufio.Scanner, out chan string, t *tomb.Tomb) erro func (d *DockerSource) TailDocker(ctx context.Context, container *ContainerConfig, outChan chan types.Event, deleteChan chan *ContainerConfig) error { container.logger.Infof("start tail for container %s", container.Name) + dockerReader, err := d.Client.ContainerLogs(ctx, container.ID, *d.containerLogsOptions) if err != nil { container.logger.Errorf("unable to read logs from container: %+v", err) @@ -560,11 +598,13 @@ func (d *DockerSource) TailDocker(ctx context.Context, container *ContainerConfi reader := dlog.NewReader(dockerReader) scanner = bufio.NewScanner(reader) } + readerChan := make(chan string) readerTomb := &tomb.Tomb{} readerTomb.Go(func() error { return ReadTailScanner(scanner, readerChan, readerTomb) }) + for { select { case <-container.t.Dying(): @@ -595,6 +635,7 @@ func (d *DockerSource) TailDocker(ctx context.Context, container *ContainerConfi // Also reset the Since to avoid re-reading logs d.Config.Since = time.Now().UTC().Format(time.RFC3339) d.containerLogsOptions.Since = d.Config.Since + return nil } } @@ -602,6 +643,7 @@ func (d *DockerSource) TailDocker(ctx context.Context, container *ContainerConfi func (d *DockerSource) DockerManager(ctx context.Context, in chan *ContainerConfig, deleteChan chan *ContainerConfig, outChan chan types.Event) error { d.logger.Info("DockerSource Manager started") + for { select { case newContainer := <-in: @@ -611,6 +653,7 @@ func (d *DockerSource) DockerManager(ctx context.Context, in chan *ContainerConf newContainer.t.Go(func() error { return d.TailDocker(ctx, newContainer, outChan, deleteChan) }) + d.runningContainerState[newContainer.ID] = newContainer } case containerToDelete := <-deleteChan: @@ -624,13 +667,16 @@ func (d *DockerSource) DockerManager(ctx context.Context, in chan *ContainerConf if d.runningContainerState[idx].t.Alive() { d.logger.Infof("killing tail for container %s", container.Name) d.runningContainerState[idx].t.Kill(nil) + if err := d.runningContainerState[idx].t.Wait(); err != nil { d.logger.Infof("error while waiting for death of %s : %s", container.Name, err) } } } + d.runningContainerState = nil d.logger.Debugf("routine cleanup done, return") + return nil } } diff --git a/pkg/acquisition/modules/docker/docker_test.go b/pkg/acquisition/modules/docker/docker_test.go index 5d8208637e8..73e26b1e497 100644 --- a/pkg/acquisition/modules/docker/docker_test.go +++ b/pkg/acquisition/modules/docker/docker_test.go @@ -82,6 +82,11 @@ func TestConfigureDSN(t *testing.T) { }{ { name: "invalid DSN", + dsn: "asdfasdf", + expectedErr: "invalid DSN asdfasdf for docker source, must start with docker://", + }, + { + name: "invalid DSN scheme", dsn: "asd://", expectedErr: "invalid DSN asd:// for docker source, must start with docker://", }, @@ -102,16 +107,18 @@ func TestConfigureDSN(t *testing.T) { }, { name: "DSN ok with multiple parameters", - dsn: fmt.Sprintf("docker://test_docker?since=42min&docker_host=%s", dockerHost), + dsn: "docker://test_docker?since=42min&docker_host=" + dockerHost, expectedErr: "", }, } subLogger := log.WithField("type", "docker") for _, test := range tests { - f := DockerSource{} - err := f.ConfigureByDSN(test.dsn, map[string]string{"type": "testtype"}, subLogger, "") - cstest.AssertErrorContains(t, err, test.expectedErr) + t.Run(test.name, func(t *testing.T) { + f := DockerSource{} + err := f.ConfigureByDSN(test.dsn, map[string]string{"type": "testtype"}, subLogger, "") + cstest.AssertErrorContains(t, err, test.expectedErr) + }) } } @@ -121,6 +128,7 @@ type mockDockerCli struct { func TestStreamingAcquisition(t *testing.T) { ctx := context.Background() + log.SetOutput(os.Stdout) log.SetLevel(log.InfoLevel) log.Info("Test 'TestStreamingAcquisition'") @@ -191,6 +199,7 @@ container_name_regexp: readerTomb.Go(func() error { time.Sleep(1 * time.Second) ticker := time.NewTicker(1 * time.Second) + for { select { case <-out: @@ -205,7 +214,7 @@ container_name_regexp: }) cstest.AssertErrorContains(t, err, ts.expectedErr) - if err := readerTomb.Wait(); err != nil { + if err = readerTomb.Wait(); err != nil { t.Fatal(err) } @@ -220,7 +229,7 @@ container_name_regexp: } } -func (cli *mockDockerCli) ContainerList(ctx context.Context, options dockerTypes.ContainerListOptions) ([]dockerTypes.Container, error) { +func (cli *mockDockerCli) ContainerList(ctx context.Context, options dockerContainer.ListOptions) ([]dockerTypes.Container, error) { if readLogs { return []dockerTypes.Container{}, nil } @@ -235,7 +244,7 @@ func (cli *mockDockerCli) ContainerList(ctx context.Context, options dockerTypes return containers, nil } -func (cli *mockDockerCli) ContainerLogs(ctx context.Context, container string, options dockerTypes.ContainerLogsOptions) (io.ReadCloser, error) { +func (cli *mockDockerCli) ContainerLogs(ctx context.Context, container string, options dockerContainer.LogsOptions) (io.ReadCloser, error) { if readLogs { return io.NopCloser(strings.NewReader("")), nil } @@ -298,38 +307,40 @@ func TestOneShot(t *testing.T) { } for _, ts := range tests { - var ( - subLogger *log.Entry - logger *log.Logger - ) - - if ts.expectedOutput != "" { - logger.SetLevel(ts.logLevel) - subLogger = logger.WithField("type", "docker") - } else { - log.SetLevel(ts.logLevel) - subLogger = log.WithField("type", "docker") - } + t.Run(ts.dsn, func(t *testing.T) { + var ( + subLogger *log.Entry + logger *log.Logger + ) + + if ts.expectedOutput != "" { + logger.SetLevel(ts.logLevel) + subLogger = logger.WithField("type", "docker") + } else { + log.SetLevel(ts.logLevel) + subLogger = log.WithField("type", "docker") + } - readLogs = false - dockerClient := &DockerSource{} - labels := make(map[string]string) - labels["type"] = ts.logType + readLogs = false + dockerClient := &DockerSource{} + labels := make(map[string]string) + labels["type"] = ts.logType - if err := dockerClient.ConfigureByDSN(ts.dsn, labels, subLogger, ""); err != nil { - t.Fatalf("unable to configure dsn '%s': %s", ts.dsn, err) - } + if err := dockerClient.ConfigureByDSN(ts.dsn, labels, subLogger, ""); err != nil { + t.Fatalf("unable to configure dsn '%s': %s", ts.dsn, err) + } - dockerClient.Client = new(mockDockerCli) - out := make(chan types.Event, 100) - tomb := tomb.Tomb{} - err := dockerClient.OneShotAcquisition(ctx, out, &tomb) - cstest.AssertErrorContains(t, err, ts.expectedErr) + dockerClient.Client = new(mockDockerCli) + out := make(chan types.Event, 100) + tomb := tomb.Tomb{} + err := dockerClient.OneShotAcquisition(ctx, out, &tomb) + cstest.AssertErrorContains(t, err, ts.expectedErr) - // else we do the check before actualLines is incremented ... - if ts.expectedLines != 0 { - assert.Len(t, out, ts.expectedLines) - } + // else we do the check before actualLines is incremented ... + if ts.expectedLines != 0 { + assert.Len(t, out, ts.expectedLines) + } + }) } } diff --git a/pkg/acquisition/modules/file/file.go b/pkg/acquisition/modules/file/file.go index 9f439b0c82e..697a3d35dc2 100644 --- a/pkg/acquisition/modules/file/file.go +++ b/pkg/acquisition/modules/file/file.go @@ -102,9 +102,9 @@ func (f *FileSource) UnmarshalConfig(yamlConfig []byte) error { return nil } -func (f *FileSource) Configure(yamlConfig []byte, logger *log.Entry, MetricsLevel int) error { +func (f *FileSource) Configure(yamlConfig []byte, logger *log.Entry, metricsLevel int) error { f.logger = logger - f.metricsLevel = MetricsLevel + f.metricsLevel = metricsLevel err := f.UnmarshalConfig(yamlConfig) if err != nil { diff --git a/pkg/acquisition/modules/file/file_test.go b/pkg/acquisition/modules/file/file_test.go index a26e44cc9c7..b9c6e65d8ce 100644 --- a/pkg/acquisition/modules/file/file_test.go +++ b/pkg/acquisition/modules/file/file_test.go @@ -333,14 +333,19 @@ force_inotify: true`, testPattern), logLevel: log.DebugLevel, name: "GlobInotifyChmod", afterConfigure: func() { - f, _ := os.Create("test_files/a.log") - f.Close() + f, err := os.Create("test_files/a.log") + require.NoError(t, err) + err = f.Close() + require.NoError(t, err) time.Sleep(1 * time.Second) - os.Chmod("test_files/a.log", 0o000) + err = os.Chmod("test_files/a.log", 0o000) + require.NoError(t, err) }, teardown: func() { - os.Chmod("test_files/a.log", 0o644) - os.Remove("test_files/a.log") + err := os.Chmod("test_files/a.log", 0o644) + require.NoError(t, err) + err = os.Remove("test_files/a.log") + require.NoError(t, err) }, }, { @@ -353,7 +358,8 @@ force_inotify: true`, testPattern), logLevel: log.DebugLevel, name: "InotifyMkDir", afterConfigure: func() { - os.Mkdir("test_files/pouet/", 0o700) + err := os.Mkdir("test_files/pouet/", 0o700) + require.NoError(t, err) }, teardown: func() { os.Remove("test_files/pouet/") diff --git a/pkg/acquisition/modules/http/http.go b/pkg/acquisition/modules/http/http.go index 98af134c84e..97e220570ff 100644 --- a/pkg/acquisition/modules/http/http.go +++ b/pkg/acquisition/modules/http/http.go @@ -16,7 +16,6 @@ import ( "github.com/prometheus/client_golang/prometheus" log "github.com/sirupsen/logrus" - "gopkg.in/tomb.v2" "gopkg.in/yaml.v3" @@ -26,9 +25,7 @@ import ( "github.com/crowdsecurity/crowdsec/pkg/types" ) -var ( - dataSourceName = "http" -) +var dataSourceName = "http" var linesRead = prometheus.NewCounterVec( prometheus.CounterOpts{ @@ -38,8 +35,8 @@ var linesRead = prometheus.NewCounterVec( []string{"path", "src"}) type HttpConfiguration struct { - //IPFilter []string `yaml:"ip_filter"` - //ChunkSize *int64 `yaml:"chunk_size"` + // IPFilter []string `yaml:"ip_filter"` + // ChunkSize *int64 `yaml:"chunk_size"` ListenAddr string `yaml:"listen_addr"` Path string `yaml:"path"` AuthType string `yaml:"auth_type"` @@ -78,6 +75,7 @@ func (h *HTTPSource) GetUuid() string { func (h *HTTPSource) UnmarshalConfig(yamlConfig []byte) error { h.Config = HttpConfiguration{} + err := yaml.Unmarshal(yamlConfig, &h.Config) if err != nil { return fmt.Errorf("cannot parse %s datasource configuration: %w", dataSourceName, err) @@ -98,6 +96,7 @@ func (hc *HttpConfiguration) Validate() error { if hc.Path == "" { hc.Path = "/" } + if hc.Path[0] != '/' { return errors.New("path must start with /") } @@ -108,9 +107,11 @@ func (hc *HttpConfiguration) Validate() error { if hc.BasicAuth == nil { return errors.New(baseErr + " basic_auth is not provided") } + if hc.BasicAuth.Username == "" { return errors.New(baseErr + " username is not provided") } + if hc.BasicAuth.Password == "" { return errors.New(baseErr + " password is not provided") } @@ -130,6 +131,7 @@ func (hc *HttpConfiguration) Validate() error { if hc.TLS.ServerCert == "" { return errors.New("server_cert is required") } + if hc.TLS.ServerKey == "" { return errors.New("server_key is required") } @@ -155,9 +157,10 @@ func (hc *HttpConfiguration) Validate() error { return nil } -func (h *HTTPSource) Configure(yamlConfig []byte, logger *log.Entry, MetricsLevel int) error { +func (h *HTTPSource) Configure(yamlConfig []byte, logger *log.Entry, metricsLevel int) error { h.logger = logger - h.metricsLevel = MetricsLevel + h.metricsLevel = metricsLevel + err := h.UnmarshalConfig(yamlConfig) if err != nil { return err @@ -212,6 +215,7 @@ func (hc *HttpConfiguration) NewTLSConfig() (*tls.Config, error) { if err != nil { return nil, fmt.Errorf("failed to load server cert/key: %w", err) } + tlsConfig.Certificates = []tls.Certificate{cert} } @@ -229,6 +233,7 @@ func (hc *HttpConfiguration) NewTLSConfig() (*tls.Config, error) { if caCertPool == nil { caCertPool = x509.NewCertPool() } + caCertPool.AppendCertsFromPEM(caCert) tlsConfig.ClientCAs = caCertPool tlsConfig.ClientAuth = tls.RequireAndVerifyClientCert @@ -243,10 +248,12 @@ func authorizeRequest(r *http.Request, hc *HttpConfiguration) error { if !ok { return errors.New("missing basic auth") } + if username != hc.BasicAuth.Username || password != hc.BasicAuth.Password { return errors.New("invalid basic auth") } } + if hc.AuthType == "headers" { for key, value := range *hc.Headers { if r.Header.Get(key) != value { @@ -254,6 +261,7 @@ func authorizeRequest(r *http.Request, hc *HttpConfiguration) error { } } } + return nil } @@ -282,6 +290,7 @@ func (h *HTTPSource) processRequest(w http.ResponseWriter, r *http.Request, hc * } decoder := json.NewDecoder(reader) + for { var message json.RawMessage @@ -289,7 +298,9 @@ func (h *HTTPSource) processRequest(w http.ResponseWriter, r *http.Request, hc * if err == io.EOF { break } + w.WriteHeader(http.StatusBadRequest) + return fmt.Errorf("failed to decode: %w", err) } @@ -328,13 +339,17 @@ func (h *HTTPSource) RunServer(out chan types.Event, t *tomb.Tomb) error { if r.Method != http.MethodPost { h.logger.Errorf("method not allowed: %s", r.Method) http.Error(w, "Method Not Allowed", http.StatusMethodNotAllowed) + return } + if err := authorizeRequest(r, &h.Config); err != nil { h.logger.Errorf("failed to authorize request from '%s': %s", r.RemoteAddr, err) http.Error(w, "Unauthorized", http.StatusUnauthorized) + return } + err := h.processRequest(w, r, &h.Config, out) if err != nil { h.logger.Errorf("failed to process request from '%s': %s", r.RemoteAddr, err) @@ -346,6 +361,7 @@ func (h *HTTPSource) RunServer(out chan types.Event, t *tomb.Tomb) error { w.Header().Set(key, value) } } + if h.Config.CustomStatusCode != nil { w.WriteHeader(*h.Config.CustomStatusCode) } else { @@ -369,25 +385,30 @@ func (h *HTTPSource) RunServer(out chan types.Event, t *tomb.Tomb) error { if err != nil { return fmt.Errorf("failed to create tls config: %w", err) } + h.logger.Tracef("tls config: %+v", tlsConfig) h.Server.TLSConfig = tlsConfig } t.Go(func() error { defer trace.CatchPanic("crowdsec/acquis/http/server") + if h.Config.TLS != nil { h.logger.Infof("start https server on %s", h.Config.ListenAddr) + err := h.Server.ListenAndServeTLS(h.Config.TLS.ServerCert, h.Config.TLS.ServerKey) if err != nil && err != http.ErrServerClosed { return fmt.Errorf("https server failed: %w", err) } } else { h.logger.Infof("start http server on %s", h.Config.ListenAddr) + err := h.Server.ListenAndServe() if err != nil && err != http.ErrServerClosed { return fmt.Errorf("http server failed: %w", err) } } + return nil }) diff --git a/pkg/acquisition/modules/http/http_test.go b/pkg/acquisition/modules/http/http_test.go index f89ba7aa8ba..b05979c5adf 100644 --- a/pkg/acquisition/modules/http/http_test.go +++ b/pkg/acquisition/modules/http/http_test.go @@ -14,13 +14,15 @@ import ( "testing" "time" - "github.com/crowdsecurity/crowdsec/pkg/types" - "github.com/crowdsecurity/go-cs-lib/cstest" "github.com/prometheus/client_golang/prometheus" log "github.com/sirupsen/logrus" - "gopkg.in/tomb.v2" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "gopkg.in/tomb.v2" + + "github.com/crowdsecurity/go-cs-lib/cstest" + + "github.com/crowdsecurity/crowdsec/pkg/types" ) const ( @@ -218,7 +220,7 @@ func TestGetName(t *testing.T) { assert.Equal(t, "http", h.GetName()) } -func SetupAndRunHTTPSource(t *testing.T, h *HTTPSource, config []byte, metricLevel int) (chan types.Event, *tomb.Tomb) { +func SetupAndRunHTTPSource(t *testing.T, h *HTTPSource, config []byte, metricLevel int) (chan types.Event, *prometheus.Registry, *tomb.Tomb) { ctx := context.Background() subLogger := log.WithFields(log.Fields{ "type": "http", @@ -230,16 +232,18 @@ func SetupAndRunHTTPSource(t *testing.T, h *HTTPSource, config []byte, metricLev err = h.StreamingAcquisition(ctx, out, &tomb) require.NoError(t, err) + testRegistry := prometheus.NewPedanticRegistry() for _, metric := range h.GetMetrics() { - prometheus.Register(metric) + err = testRegistry.Register(metric) + require.NoError(t, err) } - return out, &tomb + return out, testRegistry, &tomb } func TestStreamingAcquisitionWrongHTTPMethod(t *testing.T) { h := &HTTPSource{} - _, tomb := SetupAndRunHTTPSource(t, h, []byte(` + _, _, tomb := SetupAndRunHTTPSource(t, h, []byte(` source: http listen_addr: 127.0.0.1:8080 path: /test @@ -256,13 +260,13 @@ basic_auth: h.Server.Close() tomb.Kill(nil) - tomb.Wait() - + err = tomb.Wait() + require.NoError(t, err) } func TestStreamingAcquisitionUnknownPath(t *testing.T) { h := &HTTPSource{} - _, tomb := SetupAndRunHTTPSource(t, h, []byte(` + _, _, tomb := SetupAndRunHTTPSource(t, h, []byte(` source: http listen_addr: 127.0.0.1:8080 path: /test @@ -279,12 +283,13 @@ basic_auth: h.Server.Close() tomb.Kill(nil) - tomb.Wait() + err = tomb.Wait() + require.NoError(t, err) } func TestStreamingAcquisitionBasicAuth(t *testing.T) { h := &HTTPSource{} - _, tomb := SetupAndRunHTTPSource(t, h, []byte(` + _, _, tomb := SetupAndRunHTTPSource(t, h, []byte(` source: http listen_addr: 127.0.0.1:8080 path: /test @@ -311,12 +316,13 @@ basic_auth: h.Server.Close() tomb.Kill(nil) - tomb.Wait() + err = tomb.Wait() + require.NoError(t, err) } func TestStreamingAcquisitionBadHeaders(t *testing.T) { h := &HTTPSource{} - _, tomb := SetupAndRunHTTPSource(t, h, []byte(` + _, _, tomb := SetupAndRunHTTPSource(t, h, []byte(` source: http listen_addr: 127.0.0.1:8080 path: /test @@ -338,12 +344,13 @@ headers: h.Server.Close() tomb.Kill(nil) - tomb.Wait() + err = tomb.Wait() + require.NoError(t, err) } func TestStreamingAcquisitionMaxBodySize(t *testing.T) { h := &HTTPSource{} - _, tomb := SetupAndRunHTTPSource(t, h, []byte(` + _, _, tomb := SetupAndRunHTTPSource(t, h, []byte(` source: http listen_addr: 127.0.0.1:8080 path: /test @@ -366,12 +373,13 @@ max_body_size: 5`), 0) h.Server.Close() tomb.Kill(nil) - tomb.Wait() + err = tomb.Wait() + require.NoError(t, err) } func TestStreamingAcquisitionSuccess(t *testing.T) { h := &HTTPSource{} - out, tomb := SetupAndRunHTTPSource(t, h, []byte(` + out, reg, tomb := SetupAndRunHTTPSource(t, h, []byte(` source: http listen_addr: 127.0.0.1:8080 path: /test @@ -397,16 +405,17 @@ headers: err = <-errChan require.NoError(t, err) - assertMetrics(t, h.GetMetrics(), 1) + assertMetrics(t, reg, h.GetMetrics(), 1) h.Server.Close() tomb.Kill(nil) - tomb.Wait() + err = tomb.Wait() + require.NoError(t, err) } func TestStreamingAcquisitionCustomStatusCodeAndCustomHeaders(t *testing.T) { h := &HTTPSource{} - out, tomb := SetupAndRunHTTPSource(t, h, []byte(` + out, reg, tomb := SetupAndRunHTTPSource(t, h, []byte(` source: http listen_addr: 127.0.0.1:8080 path: /test @@ -436,11 +445,12 @@ custom_headers: err = <-errChan require.NoError(t, err) - assertMetrics(t, h.GetMetrics(), 1) + assertMetrics(t, reg, h.GetMetrics(), 1) h.Server.Close() tomb.Kill(nil) - tomb.Wait() + err = tomb.Wait() + require.NoError(t, err) } type slowReader struct { @@ -496,7 +506,7 @@ func assertEvents(out chan types.Event, expected []string, errChan chan error) { func TestStreamingAcquisitionTimeout(t *testing.T) { h := &HTTPSource{} - _, tomb := SetupAndRunHTTPSource(t, h, []byte(` + _, _, tomb := SetupAndRunHTTPSource(t, h, []byte(` source: http listen_addr: 127.0.0.1:8080 path: /test @@ -526,12 +536,13 @@ timeout: 1s`), 0) h.Server.Close() tomb.Kill(nil) - tomb.Wait() + err = tomb.Wait() + require.NoError(t, err) } func TestStreamingAcquisitionTLSHTTPRequest(t *testing.T) { h := &HTTPSource{} - _, tomb := SetupAndRunHTTPSource(t, h, []byte(` + _, _, tomb := SetupAndRunHTTPSource(t, h, []byte(` source: http listen_addr: 127.0.0.1:8080 auth_type: mtls @@ -550,12 +561,13 @@ tls: h.Server.Close() tomb.Kill(nil) - tomb.Wait() + err = tomb.Wait() + require.NoError(t, err) } func TestStreamingAcquisitionTLSWithHeadersAuthSuccess(t *testing.T) { h := &HTTPSource{} - out, tomb := SetupAndRunHTTPSource(t, h, []byte(` + out, reg, tomb := SetupAndRunHTTPSource(t, h, []byte(` source: http listen_addr: 127.0.0.1:8080 path: /test @@ -601,16 +613,17 @@ tls: err = <-errChan require.NoError(t, err) - assertMetrics(t, h.GetMetrics(), 0) + assertMetrics(t, reg, h.GetMetrics(), 0) h.Server.Close() tomb.Kill(nil) - tomb.Wait() + err = tomb.Wait() + require.NoError(t, err) } func TestStreamingAcquisitionMTLS(t *testing.T) { h := &HTTPSource{} - out, tomb := SetupAndRunHTTPSource(t, h, []byte(` + out, reg, tomb := SetupAndRunHTTPSource(t, h, []byte(` source: http listen_addr: 127.0.0.1:8080 path: /test @@ -658,16 +671,17 @@ tls: err = <-errChan require.NoError(t, err) - assertMetrics(t, h.GetMetrics(), 0) + assertMetrics(t, reg, h.GetMetrics(), 0) h.Server.Close() tomb.Kill(nil) - tomb.Wait() + err = tomb.Wait() + require.NoError(t, err) } func TestStreamingAcquisitionGzipData(t *testing.T) { h := &HTTPSource{} - out, tomb := SetupAndRunHTTPSource(t, h, []byte(` + out, reg, tomb := SetupAndRunHTTPSource(t, h, []byte(` source: http listen_addr: 127.0.0.1:8080 path: /test @@ -710,16 +724,17 @@ headers: err = <-errChan require.NoError(t, err) - assertMetrics(t, h.GetMetrics(), 2) + assertMetrics(t, reg, h.GetMetrics(), 2) h.Server.Close() tomb.Kill(nil) - tomb.Wait() + err = tomb.Wait() + require.NoError(t, err) } func TestStreamingAcquisitionNDJson(t *testing.T) { h := &HTTPSource{} - out, tomb := SetupAndRunHTTPSource(t, h, []byte(` + out, reg, tomb := SetupAndRunHTTPSource(t, h, []byte(` source: http listen_addr: 127.0.0.1:8080 path: /test @@ -748,15 +763,16 @@ headers: err = <-errChan require.NoError(t, err) - assertMetrics(t, h.GetMetrics(), 2) + assertMetrics(t, reg, h.GetMetrics(), 2) h.Server.Close() tomb.Kill(nil) - tomb.Wait() + err = tomb.Wait() + require.NoError(t, err) } -func assertMetrics(t *testing.T, metrics []prometheus.Collector, expected int) { - promMetrics, err := prometheus.DefaultGatherer.Gather() +func assertMetrics(t *testing.T, reg *prometheus.Registry, metrics []prometheus.Collector, expected int) { + promMetrics, err := reg.Gather() require.NoError(t, err) isExist := false diff --git a/pkg/acquisition/modules/journalctl/journalctl.go b/pkg/acquisition/modules/journalctl/journalctl.go index 27f20b9f446..f72878d9b3c 100644 --- a/pkg/acquisition/modules/journalctl/journalctl.go +++ b/pkg/acquisition/modules/journalctl/journalctl.go @@ -53,15 +53,18 @@ func readLine(scanner *bufio.Scanner, out chan string, errChan chan error) error txt := scanner.Text() out <- txt } + if errChan != nil && scanner.Err() != nil { errChan <- scanner.Err() close(errChan) // the error is already consumed by runJournalCtl return nil //nolint:nilerr } + if errChan != nil { close(errChan) } + return nil } @@ -69,15 +72,17 @@ func (j *JournalCtlSource) runJournalCtl(ctx context.Context, out chan types.Eve ctx, cancel := context.WithCancel(ctx) cmd := exec.CommandContext(ctx, journalctlCmd, j.args...) + stdout, err := cmd.StdoutPipe() if err != nil { cancel() - return fmt.Errorf("could not get journalctl stdout: %s", err) + return fmt.Errorf("could not get journalctl stdout: %w", err) } + stderr, err := cmd.StderrPipe() if err != nil { cancel() - return fmt.Errorf("could not get journalctl stderr: %s", err) + return fmt.Errorf("could not get journalctl stderr: %w", err) } stderrChan := make(chan string) @@ -87,6 +92,7 @@ func (j *JournalCtlSource) runJournalCtl(ctx context.Context, out chan types.Eve logger := j.logger.WithField("src", j.src) logger.Infof("Running journalctl command: %s %s", cmd.Path, cmd.Args) + err = cmd.Start() if err != nil { cancel() @@ -109,9 +115,11 @@ func (j *JournalCtlSource) runJournalCtl(ctx context.Context, out chan types.Eve cmd.Wait() return errors.New("failed to create stderr scanner") } + t.Go(func() error { return readLine(stdoutscanner, stdoutChan, errChan) }) + t.Go(func() error { // looks like journalctl closes stderr quite early, so ignore its status (but not its output) return readLine(stderrScanner, stderrChan, nil) @@ -123,6 +131,7 @@ func (j *JournalCtlSource) runJournalCtl(ctx context.Context, out chan types.Eve logger.Infof("journalctl datasource %s stopping", j.src) cancel() cmd.Wait() // avoid zombie process + return nil case stdoutLine := <-stdoutChan: l := types.Line{} @@ -133,6 +142,7 @@ func (j *JournalCtlSource) runJournalCtl(ctx context.Context, out chan types.Eve l.Src = j.src l.Process = true l.Module = j.GetName() + if j.metricsLevel != configuration.METRICS_NONE { linesRead.With(prometheus.Labels{"source": j.src}).Inc() } @@ -149,6 +159,7 @@ func (j *JournalCtlSource) runJournalCtl(ctx context.Context, out chan types.Eve logger.Debugf("errChan is closed, quitting") t.Kill(nil) } + if errScanner != nil { t.Kill(errScanner) } @@ -170,6 +181,7 @@ func (j *JournalCtlSource) GetAggregMetrics() []prometheus.Collector { func (j *JournalCtlSource) UnmarshalConfig(yamlConfig []byte) error { j.config = JournalCtlConfiguration{} + err := yaml.UnmarshalStrict(yamlConfig, &j.config) if err != nil { return fmt.Errorf("cannot parse JournalCtlSource configuration: %w", err) @@ -189,15 +201,18 @@ func (j *JournalCtlSource) UnmarshalConfig(yamlConfig []byte) error { if len(j.config.Filters) == 0 { return errors.New("journalctl_filter is required") } - j.args = append(args, j.config.Filters...) - j.src = fmt.Sprintf("journalctl-%s", strings.Join(j.config.Filters, ".")) + + args = append(args, j.config.Filters...) + + j.args = args + j.src = "journalctl-%s" + strings.Join(j.config.Filters, ".") return nil } -func (j *JournalCtlSource) Configure(yamlConfig []byte, logger *log.Entry, MetricsLevel int) error { +func (j *JournalCtlSource) Configure(yamlConfig []byte, logger *log.Entry, metricsLevel int) error { j.logger = logger - j.metricsLevel = MetricsLevel + j.metricsLevel = metricsLevel err := j.UnmarshalConfig(yamlConfig) if err != nil { @@ -226,8 +241,9 @@ func (j *JournalCtlSource) ConfigureByDSN(dsn string, labels map[string]string, params, err := url.ParseQuery(qs) if err != nil { - return fmt.Errorf("could not parse journalctl DSN : %s", err) + return fmt.Errorf("could not parse journalctl DSN: %w", err) } + for key, value := range params { switch key { case "filters": @@ -236,10 +252,12 @@ func (j *JournalCtlSource) ConfigureByDSN(dsn string, labels map[string]string, if len(value) != 1 { return errors.New("expected zero or one value for 'log_level'") } + lvl, err := log.ParseLevel(value[0]) if err != nil { return fmt.Errorf("unknown level %s: %w", value[0], err) } + j.logger.Logger.SetLevel(lvl) case "since": j.args = append(j.args, "--since", value[0]) @@ -247,7 +265,9 @@ func (j *JournalCtlSource) ConfigureByDSN(dsn string, labels map[string]string, return fmt.Errorf("unsupported key %s in journalctl DSN", key) } } + j.args = append(j.args, j.config.Filters...) + return nil } @@ -261,8 +281,10 @@ func (j *JournalCtlSource) GetName() string { func (j *JournalCtlSource) OneShotAcquisition(ctx context.Context, out chan types.Event, t *tomb.Tomb) error { defer trace.CatchPanic("crowdsec/acquis/journalctl/oneshot") + err := j.runJournalCtl(ctx, out, t) j.logger.Debug("Oneshot journalctl acquisition is done") + return err } @@ -271,6 +293,7 @@ func (j *JournalCtlSource) StreamingAcquisition(ctx context.Context, out chan ty defer trace.CatchPanic("crowdsec/acquis/journalctl/streaming") return j.runJournalCtl(ctx, out, t) }) + return nil } diff --git a/pkg/acquisition/modules/journalctl/journalctl_test.go b/pkg/acquisition/modules/journalctl/journalctl_test.go index 687067c1881..48b034f41c6 100644 --- a/pkg/acquisition/modules/journalctl/journalctl_test.go +++ b/pkg/acquisition/modules/journalctl/journalctl_test.go @@ -12,6 +12,7 @@ import ( log "github.com/sirupsen/logrus" "github.com/sirupsen/logrus/hooks/test" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" "gopkg.in/tomb.v2" "github.com/crowdsecurity/go-cs-lib/cstest" @@ -81,7 +82,7 @@ func TestConfigureDSN(t *testing.T) { }, { dsn: "journalctl://filters=%ZZ", - expectedErr: "could not parse journalctl DSN : invalid URL escape \"%ZZ\"", + expectedErr: "could not parse journalctl DSN: invalid URL escape \"%ZZ\"", }, { dsn: "journalctl://filters=_UID=42?log_level=warn", @@ -191,6 +192,7 @@ journalctl_filter: func TestStreaming(t *testing.T) { ctx := context.Background() + if runtime.GOOS == "windows" { t.Skip("Skipping test on windows") } @@ -267,10 +269,11 @@ journalctl_filter: } tomb.Kill(nil) - tomb.Wait() + err = tomb.Wait() + require.NoError(t, err) output, _ := exec.Command("pgrep", "-x", "journalctl").CombinedOutput() - if string(output) != "" { + if len(output) != 0 { t.Fatalf("Found a journalctl process after killing the tomb !") } diff --git a/pkg/acquisition/modules/kafka/kafka.go b/pkg/acquisition/modules/kafka/kafka.go index 77fc44e310d..f213b85814c 100644 --- a/pkg/acquisition/modules/kafka/kafka.go +++ b/pkg/acquisition/modules/kafka/kafka.go @@ -85,9 +85,9 @@ func (k *KafkaSource) UnmarshalConfig(yamlConfig []byte) error { return err } -func (k *KafkaSource) Configure(yamlConfig []byte, logger *log.Entry, MetricsLevel int) error { +func (k *KafkaSource) Configure(yamlConfig []byte, logger *log.Entry, metricsLevel int) error { k.logger = logger - k.metricsLevel = MetricsLevel + k.metricsLevel = metricsLevel k.logger.Debugf("start configuring %s source", dataSourceName) @@ -160,6 +160,7 @@ func (k *KafkaSource) ReadMessage(ctx context.Context, out chan types.Event) err k.logger.Errorln(fmt.Errorf("while reading %s message: %w", dataSourceName, err)) continue } + k.logger.Tracef("got message: %s", string(m.Value)) l := types.Line{ Raw: string(m.Value), @@ -170,9 +171,11 @@ func (k *KafkaSource) ReadMessage(ctx context.Context, out chan types.Event) err Module: k.GetName(), } k.logger.Tracef("line with message read from topic '%s': %+v", k.Config.Topic, l) + if k.metricsLevel != configuration.METRICS_NONE { linesRead.With(prometheus.Labels{"topic": k.Config.Topic}).Inc() } + evt := types.MakeEvent(k.Config.UseTimeMachine, types.LOG, true) evt.Line = l out <- evt diff --git a/pkg/acquisition/modules/kafka/kafka_test.go b/pkg/acquisition/modules/kafka/kafka_test.go index d796166a6ca..2f3361c4f6b 100644 --- a/pkg/acquisition/modules/kafka/kafka_test.go +++ b/pkg/acquisition/modules/kafka/kafka_test.go @@ -194,7 +194,8 @@ topic: crowdsecplaintext`), subLogger, configuration.METRICS_NONE) } require.Equal(t, ts.expectedLines, actualLines) tomb.Kill(nil) - tomb.Wait() + err = tomb.Wait() + require.NoError(t, err) }) } } @@ -271,7 +272,8 @@ tls: } require.Equal(t, ts.expectedLines, actualLines) tomb.Kill(nil) - tomb.Wait() + err = tomb.Wait() + require.NoError(t, err) }) } } diff --git a/pkg/acquisition/modules/kinesis/kinesis.go b/pkg/acquisition/modules/kinesis/kinesis.go index 3744e43f38d..16c91ad06bc 100644 --- a/pkg/acquisition/modules/kinesis/kinesis.go +++ b/pkg/acquisition/modules/kinesis/kinesis.go @@ -99,17 +99,22 @@ func (k *KinesisSource) newClient() error { if sess == nil { return errors.New("failed to create aws session") } + config := aws.NewConfig() + if k.Config.AwsRegion != "" { config = config.WithRegion(k.Config.AwsRegion) } + if k.Config.AwsEndpoint != "" { config = config.WithEndpoint(k.Config.AwsEndpoint) } + k.kClient = kinesis.New(sess, config) if k.kClient == nil { return errors.New("failed to create kinesis client") } + return nil } @@ -136,15 +141,19 @@ func (k *KinesisSource) UnmarshalConfig(yamlConfig []byte) error { if k.Config.StreamName == "" && !k.Config.UseEnhancedFanOut { return errors.New("stream_name is mandatory when use_enhanced_fanout is false") } + if k.Config.StreamARN == "" && k.Config.UseEnhancedFanOut { return errors.New("stream_arn is mandatory when use_enhanced_fanout is true") } + if k.Config.ConsumerName == "" && k.Config.UseEnhancedFanOut { return errors.New("consumer_name is mandatory when use_enhanced_fanout is true") } + if k.Config.StreamARN != "" && k.Config.StreamName != "" { return errors.New("stream_arn and stream_name are mutually exclusive") } + if k.Config.MaxRetries <= 0 { k.Config.MaxRetries = 10 } @@ -152,9 +161,9 @@ func (k *KinesisSource) UnmarshalConfig(yamlConfig []byte) error { return nil } -func (k *KinesisSource) Configure(yamlConfig []byte, logger *log.Entry, MetricsLevel int) error { +func (k *KinesisSource) Configure(yamlConfig []byte, logger *log.Entry, metricsLevel int) error { k.logger = logger - k.metricsLevel = MetricsLevel + k.metricsLevel = metricsLevel err := k.UnmarshalConfig(yamlConfig) if err != nil { @@ -167,6 +176,7 @@ func (k *KinesisSource) Configure(yamlConfig []byte, logger *log.Entry, MetricsL } k.shardReaderTomb = &tomb.Tomb{} + return nil } @@ -188,22 +198,27 @@ func (k *KinesisSource) OneShotAcquisition(_ context.Context, _ chan types.Event func (k *KinesisSource) decodeFromSubscription(record []byte) ([]CloudwatchSubscriptionLogEvent, error) { b := bytes.NewBuffer(record) + r, err := gzip.NewReader(b) if err != nil { k.logger.Error(err) return nil, err } + decompressed, err := io.ReadAll(r) if err != nil { k.logger.Error(err) return nil, err } + var subscriptionRecord CloudWatchSubscriptionRecord + err = json.Unmarshal(decompressed, &subscriptionRecord) if err != nil { k.logger.Error(err) return nil, err } + return subscriptionRecord.LogEvents, nil } @@ -214,17 +229,20 @@ func (k *KinesisSource) WaitForConsumerDeregistration(consumerName string, strea ConsumerName: aws.String(consumerName), StreamARN: aws.String(streamARN), }) + + var resourceNotFoundErr *kinesis.ResourceNotFoundException + if errors.As(err, &resourceNotFoundErr) { + return nil + } + if err != nil { - switch err.(type) { - case *kinesis.ResourceNotFoundException: - return nil - default: - k.logger.Errorf("Error while waiting for consumer deregistration: %s", err) - return fmt.Errorf("cannot describe stream consumer: %w", err) - } + k.logger.Errorf("Error while waiting for consumer deregistration: %s", err) + return fmt.Errorf("cannot describe stream consumer: %w", err) } + time.Sleep(time.Millisecond * 200 * time.Duration(i+1)) } + return fmt.Errorf("consumer %s is not deregistered after %d tries", consumerName, maxTries) } @@ -234,17 +252,21 @@ func (k *KinesisSource) DeregisterConsumer() error { ConsumerName: aws.String(k.Config.ConsumerName), StreamARN: aws.String(k.Config.StreamARN), }) + + var resourceNotFoundErr *kinesis.ResourceNotFoundException + if errors.As(err, &resourceNotFoundErr) { + return nil + } + if err != nil { - switch err.(type) { - case *kinesis.ResourceNotFoundException: - default: - return fmt.Errorf("cannot deregister stream consumer: %w", err) - } + return fmt.Errorf("cannot deregister stream consumer: %w", err) } + err = k.WaitForConsumerDeregistration(k.Config.ConsumerName, k.Config.StreamARN) if err != nil { return fmt.Errorf("cannot wait for consumer deregistration: %w", err) } + return nil } @@ -257,18 +279,22 @@ func (k *KinesisSource) WaitForConsumerRegistration(consumerARN string) error { if err != nil { return fmt.Errorf("cannot describe stream consumer: %w", err) } + if *describeOutput.ConsumerDescription.ConsumerStatus == "ACTIVE" { k.logger.Debugf("Consumer %s is active", consumerARN) return nil } + time.Sleep(time.Millisecond * 200 * time.Duration(i+1)) k.logger.Debugf("Waiting for consumer registration %d", i) } + return fmt.Errorf("consumer %s is not active after %d tries", consumerARN, maxTries) } func (k *KinesisSource) RegisterConsumer() (*kinesis.RegisterStreamConsumerOutput, error) { k.logger.Debugf("Registering consumer %s", k.Config.ConsumerName) + streamConsumer, err := k.kClient.RegisterStreamConsumer(&kinesis.RegisterStreamConsumerInput{ ConsumerName: aws.String(k.Config.ConsumerName), StreamARN: aws.String(k.Config.StreamARN), @@ -276,10 +302,12 @@ func (k *KinesisSource) RegisterConsumer() (*kinesis.RegisterStreamConsumerOutpu if err != nil { return nil, fmt.Errorf("cannot register stream consumer: %w", err) } + err = k.WaitForConsumerRegistration(*streamConsumer.Consumer.ConsumerARN) if err != nil { return nil, fmt.Errorf("timeout while waiting for consumer to be active: %w", err) } + return streamConsumer, nil } @@ -296,8 +324,12 @@ func (k *KinesisSource) ParseAndPushRecords(records []*kinesis.Record, out chan linesRead.With(prometheus.Labels{"stream": k.Config.StreamName}).Inc() } } - var data []CloudwatchSubscriptionLogEvent - var err error + + var ( + data []CloudwatchSubscriptionLogEvent + err error + ) + if k.Config.FromSubscription { // The AWS docs says that the data is base64 encoded // but apparently GetRecords decodes it for us ? @@ -309,19 +341,22 @@ func (k *KinesisSource) ParseAndPushRecords(records []*kinesis.Record, out chan } else { data = []CloudwatchSubscriptionLogEvent{{Message: string(record.Data)}} } + for _, event := range data { logger.Tracef("got record %s", event.Message) + l := types.Line{} l.Raw = event.Message l.Labels = k.Config.Labels l.Time = time.Now().UTC() l.Process = true l.Module = k.GetName() - if k.Config.StreamARN != "" { - l.Src = k.Config.StreamARN - } else { + + l.Src = k.Config.StreamARN + if l.Src == "" { l.Src = k.Config.StreamName } + evt := types.MakeEvent(k.Config.UseTimeMachine, types.LOG, true) evt.Line = l out <- evt @@ -335,20 +370,23 @@ func (k *KinesisSource) ReadFromSubscription(reader kinesis.SubscribeToShardEven // and we won't be able to start a new one if this is the first one started by the tomb // TODO: look into parent shards to see if a shard is closed before starting to read it ? time.Sleep(time.Second) + for { select { case <-k.shardReaderTomb.Dying(): logger.Infof("Subscribed shard reader is dying") - err := reader.Close() - if err != nil { + + if err := reader.Close(); err != nil { return fmt.Errorf("cannot close kinesis subscribed shard reader: %w", err) } + return nil case event, ok := <-reader.Events(): if !ok { logger.Infof("Event chan has been closed") return nil } + switch event := event.(type) { case *kinesis.SubscribeToShardEvent: k.ParseAndPushRecords(event.Records, out, logger, shardId) @@ -369,6 +407,7 @@ func (k *KinesisSource) SubscribeToShards(arn arn.ARN, streamConsumer *kinesis.R for _, shard := range shards.Shards { shardId := *shard.ShardId + r, err := k.kClient.SubscribeToShard(&kinesis.SubscribeToShardInput{ ShardId: aws.String(shardId), StartingPosition: &kinesis.StartingPosition{Type: aws.String(kinesis.ShardIteratorTypeLatest)}, @@ -377,10 +416,12 @@ func (k *KinesisSource) SubscribeToShards(arn arn.ARN, streamConsumer *kinesis.R if err != nil { return fmt.Errorf("cannot subscribe to shard: %w", err) } + k.shardReaderTomb.Go(func() error { return k.ReadFromSubscription(r.GetEventStream().Reader, out, shardId, arn.Resource[7:]) }) } + return nil } @@ -389,12 +430,14 @@ func (k *KinesisSource) EnhancedRead(out chan types.Event, t *tomb.Tomb) error { if err != nil { return fmt.Errorf("cannot parse stream ARN: %w", err) } + if !strings.HasPrefix(parsedARN.Resource, "stream/") { return fmt.Errorf("resource part of stream ARN %s does not start with stream/", k.Config.StreamARN) } k.logger = k.logger.WithField("stream", parsedARN.Resource[7:]) k.logger.Info("starting kinesis acquisition with enhanced fan-out") + err = k.DeregisterConsumer() if err != nil { return fmt.Errorf("cannot deregister consumer: %w", err) @@ -417,18 +460,22 @@ func (k *KinesisSource) EnhancedRead(out chan types.Event, t *tomb.Tomb) error { k.logger.Infof("Kinesis source is dying") k.shardReaderTomb.Kill(nil) _ = k.shardReaderTomb.Wait() // we don't care about the error as we kill the tomb ourselves + err = k.DeregisterConsumer() if err != nil { return fmt.Errorf("cannot deregister consumer: %w", err) } + return nil case <-k.shardReaderTomb.Dying(): k.logger.Debugf("Kinesis subscribed shard reader is dying") + if k.shardReaderTomb.Err() != nil { return k.shardReaderTomb.Err() } // All goroutines have exited without error, so a resharding event, start again k.logger.Debugf("All reader goroutines have exited, resharding event or periodic resubscribe") + continue } } @@ -437,6 +484,7 @@ func (k *KinesisSource) EnhancedRead(out chan types.Event, t *tomb.Tomb) error { func (k *KinesisSource) ReadFromShard(out chan types.Event, shardId string) error { logger := k.logger.WithField("shard", shardId) logger.Debugf("Starting to read shard") + sharIt, err := k.kClient.GetShardIterator(&kinesis.GetShardIteratorInput{ ShardId: aws.String(shardId), StreamName: &k.Config.StreamName, @@ -446,28 +494,35 @@ func (k *KinesisSource) ReadFromShard(out chan types.Event, shardId string) erro logger.Errorf("Cannot get shard iterator: %s", err) return fmt.Errorf("cannot get shard iterator: %w", err) } + it := sharIt.ShardIterator // AWS recommends to wait for a second between calls to GetRecords for a given shard ticker := time.NewTicker(time.Second) + for { select { case <-ticker.C: records, err := k.kClient.GetRecords(&kinesis.GetRecordsInput{ShardIterator: it}) it = records.NextShardIterator + + var throughputErr *kinesis.ProvisionedThroughputExceededException + if errors.As(err, &throughputErr) { + logger.Warn("Provisioned throughput exceeded") + // TODO: implement exponential backoff + continue + } + + var expiredIteratorErr *kinesis.ExpiredIteratorException + if errors.As(err, &expiredIteratorErr) { + logger.Warn("Expired iterator") + continue + } + if err != nil { - switch err.(type) { - case *kinesis.ProvisionedThroughputExceededException: - logger.Warn("Provisioned throughput exceeded") - // TODO: implement exponential backoff - continue - case *kinesis.ExpiredIteratorException: - logger.Warn("Expired iterator") - continue - default: - logger.Error("Cannot get records") - return fmt.Errorf("cannot get records: %w", err) - } + logger.Error("Cannot get records") + return fmt.Errorf("cannot get records: %w", err) } + k.ParseAndPushRecords(records.Records, out, logger, shardId) if it == nil { @@ -477,6 +532,7 @@ func (k *KinesisSource) ReadFromShard(out chan types.Event, shardId string) erro case <-k.shardReaderTomb.Dying(): logger.Infof("shardReaderTomb is dying, exiting ReadFromShard") ticker.Stop() + return nil } } @@ -485,6 +541,7 @@ func (k *KinesisSource) ReadFromShard(out chan types.Event, shardId string) erro func (k *KinesisSource) ReadFromStream(out chan types.Event, t *tomb.Tomb) error { k.logger = k.logger.WithField("stream", k.Config.StreamName) k.logger.Info("starting kinesis acquisition from shards") + for { shards, err := k.kClient.ListShards(&kinesis.ListShardsInput{ StreamName: aws.String(k.Config.StreamName), @@ -492,9 +549,12 @@ func (k *KinesisSource) ReadFromStream(out chan types.Event, t *tomb.Tomb) error if err != nil { return fmt.Errorf("cannot list shards: %w", err) } + k.shardReaderTomb = &tomb.Tomb{} + for _, shard := range shards.Shards { shardId := *shard.ShardId + k.shardReaderTomb.Go(func() error { defer trace.CatchPanic("crowdsec/acquis/kinesis/streaming/shard") return k.ReadFromShard(out, shardId) @@ -505,6 +565,7 @@ func (k *KinesisSource) ReadFromStream(out chan types.Event, t *tomb.Tomb) error k.logger.Info("kinesis source is dying") k.shardReaderTomb.Kill(nil) _ = k.shardReaderTomb.Wait() // we don't care about the error as we kill the tomb ourselves + return nil case <-k.shardReaderTomb.Dying(): reason := k.shardReaderTomb.Err() @@ -512,7 +573,9 @@ func (k *KinesisSource) ReadFromStream(out chan types.Event, t *tomb.Tomb) error k.logger.Errorf("Unexpected error from shard reader : %s", reason) return reason } + k.logger.Infof("All shards have been closed, probably a resharding event, restarting acquisition") + continue } } @@ -521,11 +584,14 @@ func (k *KinesisSource) ReadFromStream(out chan types.Event, t *tomb.Tomb) error func (k *KinesisSource) StreamingAcquisition(ctx context.Context, out chan types.Event, t *tomb.Tomb) error { t.Go(func() error { defer trace.CatchPanic("crowdsec/acquis/kinesis/streaming") + if k.Config.UseEnhancedFanOut { return k.EnhancedRead(out, t) } + return k.ReadFromStream(out, t) }) + return nil } diff --git a/pkg/acquisition/modules/kinesis/kinesis_test.go b/pkg/acquisition/modules/kinesis/kinesis_test.go index 027cbde9240..3f6d780b192 100644 --- a/pkg/acquisition/modules/kinesis/kinesis_test.go +++ b/pkg/acquisition/modules/kinesis/kinesis_test.go @@ -9,6 +9,7 @@ import ( "net" "os" "runtime" + "strconv" "strings" "testing" "time" @@ -18,6 +19,7 @@ import ( "github.com/aws/aws-sdk-go/service/kinesis" log "github.com/sirupsen/logrus" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" "gopkg.in/tomb.v2" "github.com/crowdsecurity/go-cs-lib/cstest" @@ -28,17 +30,20 @@ import ( func getLocalStackEndpoint() (string, error) { endpoint := "http://localhost:4566" + if v := os.Getenv("AWS_ENDPOINT_FORCE"); v != "" { v = strings.TrimPrefix(v, "http://") + _, err := net.Dial("tcp", v) if err != nil { return "", fmt.Errorf("while dialing %s: %w: aws endpoint isn't available", v, err) } } + return endpoint, nil } -func GenSubObject(i int) []byte { +func GenSubObject(t *testing.T, i int) []byte { r := CloudWatchSubscriptionRecord{ MessageType: "subscription", Owner: "test", @@ -48,51 +53,51 @@ func GenSubObject(i int) []byte { LogEvents: []CloudwatchSubscriptionLogEvent{ { ID: "testid", - Message: fmt.Sprintf("%d", i), + Message: strconv.Itoa(i), Timestamp: time.Now().UTC().Unix(), }, }, } body, err := json.Marshal(r) - if err != nil { - log.Fatal(err) - } + require.NoError(t, err) + var b bytes.Buffer gz := gzip.NewWriter(&b) - gz.Write(body) + _, err = gz.Write(body) + require.NoError(t, err) gz.Close() // AWS actually base64 encodes the data, but it looks like kinesis automatically decodes it at some point // localstack does not do it, so let's just write a raw gzipped stream return b.Bytes() } -func WriteToStream(streamName string, count int, shards int, sub bool) { +func WriteToStream(t *testing.T, streamName string, count int, shards int, sub bool) { endpoint, err := getLocalStackEndpoint() - if err != nil { - log.Fatal(err) - } + require.NoError(t, err) + sess := session.Must(session.NewSession()) kinesisClient := kinesis.New(sess, aws.NewConfig().WithEndpoint(endpoint).WithRegion("us-east-1")) + for i := range count { partition := "partition" if shards != 1 { partition = fmt.Sprintf("partition-%d", i%shards) } + var data []byte + if sub { - data = GenSubObject(i) + data = GenSubObject(t, i) } else { - data = []byte(fmt.Sprintf("%d", i)) + data = []byte(strconv.Itoa(i)) } + _, err = kinesisClient.PutRecord(&kinesis.PutRecordInput{ Data: data, PartitionKey: aws.String(partition), StreamName: aws.String(streamName), }) - if err != nil { - fmt.Printf("Error writing to stream: %s\n", err) - log.Fatal(err) - } + require.NoError(t, err) } } @@ -111,6 +116,7 @@ func TestBadConfiguration(t *testing.T) { if runtime.GOOS == "windows" { t.Skip("Skipping test on windows") } + tests := []struct { config string expectedErr string @@ -142,6 +148,7 @@ stream_arn: arn:aws:kinesis:eu-west-1:123456789012:stream/my-stream`, } subLogger := log.WithField("type", "kinesis") + for _, test := range tests { f := KinesisSource{} err := f.Configure([]byte(test.config), subLogger, configuration.METRICS_NONE) @@ -151,9 +158,11 @@ stream_arn: arn:aws:kinesis:eu-west-1:123456789012:stream/my-stream`, func TestReadFromStream(t *testing.T) { ctx := context.Background() + if runtime.GOOS == "windows" { t.Skip("Skipping test on windows") } + tests := []struct { config string count int @@ -169,36 +178,39 @@ stream_name: stream-1-shard`, }, } endpoint, _ := getLocalStackEndpoint() + for _, test := range tests { f := KinesisSource{} config := fmt.Sprintf(test.config, endpoint) err := f.Configure([]byte(config), log.WithField("type", "kinesis"), configuration.METRICS_NONE) - if err != nil { - t.Fatalf("Error configuring source: %s", err) - } + require.NoError(t, err) + tomb := &tomb.Tomb{} out := make(chan types.Event) err = f.StreamingAcquisition(ctx, out, tomb) - if err != nil { - t.Fatalf("Error starting source: %s", err) - } + require.NoError(t, err) // Allow the datasource to start listening to the stream time.Sleep(4 * time.Second) - WriteToStream(f.Config.StreamName, test.count, test.shards, false) + WriteToStream(t, f.Config.StreamName, test.count, test.shards, false) + for i := range test.count { e := <-out - assert.Equal(t, fmt.Sprintf("%d", i), e.Line.Raw) + assert.Equal(t, strconv.Itoa(i), e.Line.Raw) } + tomb.Kill(nil) - tomb.Wait() + err = tomb.Wait() + require.NoError(t, err) } } func TestReadFromMultipleShards(t *testing.T) { ctx := context.Background() + if runtime.GOOS == "windows" { t.Skip("Skipping test on windows") } + tests := []struct { config string count int @@ -214,38 +226,40 @@ stream_name: stream-2-shards`, }, } endpoint, _ := getLocalStackEndpoint() + for _, test := range tests { f := KinesisSource{} config := fmt.Sprintf(test.config, endpoint) err := f.Configure([]byte(config), log.WithField("type", "kinesis"), configuration.METRICS_NONE) - if err != nil { - t.Fatalf("Error configuring source: %s", err) - } + require.NoError(t, err) tomb := &tomb.Tomb{} out := make(chan types.Event) err = f.StreamingAcquisition(ctx, out, tomb) - if err != nil { - t.Fatalf("Error starting source: %s", err) - } + require.NoError(t, err) // Allow the datasource to start listening to the stream time.Sleep(4 * time.Second) - WriteToStream(f.Config.StreamName, test.count, test.shards, false) + WriteToStream(t, f.Config.StreamName, test.count, test.shards, false) + c := 0 + for range test.count { <-out c += 1 } assert.Equal(t, test.count, c) tomb.Kill(nil) - tomb.Wait() + err = tomb.Wait() + require.NoError(t, err) } } func TestFromSubscription(t *testing.T) { ctx := context.Background() + if runtime.GOOS == "windows" { t.Skip("Skipping test on windows") } + tests := []struct { config string count int @@ -266,24 +280,21 @@ from_subscription: true`, f := KinesisSource{} config := fmt.Sprintf(test.config, endpoint) err := f.Configure([]byte(config), log.WithField("type", "kinesis"), configuration.METRICS_NONE) - if err != nil { - t.Fatalf("Error configuring source: %s", err) - } + require.NoError(t, err) tomb := &tomb.Tomb{} out := make(chan types.Event) err = f.StreamingAcquisition(ctx, out, tomb) - if err != nil { - t.Fatalf("Error starting source: %s", err) - } + require.NoError(t, err) // Allow the datasource to start listening to the stream time.Sleep(4 * time.Second) - WriteToStream(f.Config.StreamName, test.count, test.shards, true) + WriteToStream(t, f.Config.StreamName, test.count, test.shards, true) for i := range test.count { e := <-out assert.Equal(t, fmt.Sprintf("%d", i), e.Line.Raw) } tomb.Kill(nil) - tomb.Wait() + err = tomb.Wait() + require.NoError(t, err) } } @@ -310,15 +321,11 @@ use_enhanced_fanout: true`, f := KinesisSource{} config := fmt.Sprintf(test.config, endpoint) err := f.Configure([]byte(config), log.WithField("type", "kinesis")) - if err != nil { - t.Fatalf("Error configuring source: %s", err) - } + require.NoError(t, err) tomb := &tomb.Tomb{} out := make(chan types.Event) err = f.StreamingAcquisition(out, tomb) - if err != nil { - t.Fatalf("Error starting source: %s", err) - } + require.NoError(t, err) //Allow the datasource to start listening to the stream time.Sleep(10 * time.Second) WriteToStream("stream-1-shard", test.count, test.shards) diff --git a/pkg/acquisition/modules/kubernetesaudit/k8s_audit.go b/pkg/acquisition/modules/kubernetesaudit/k8s_audit.go index 1fa6c894a32..b0650d3906e 100644 --- a/pkg/acquisition/modules/kubernetesaudit/k8s_audit.go +++ b/pkg/acquisition/modules/kubernetesaudit/k8s_audit.go @@ -66,6 +66,7 @@ func (ka *KubernetesAuditSource) GetAggregMetrics() []prometheus.Collector { func (ka *KubernetesAuditSource) UnmarshalConfig(yamlConfig []byte) error { k8sConfig := KubernetesAuditConfiguration{} + err := yaml.UnmarshalStrict(yamlConfig, &k8sConfig) if err != nil { return fmt.Errorf("cannot parse k8s-audit configuration: %w", err) @@ -92,12 +93,13 @@ func (ka *KubernetesAuditSource) UnmarshalConfig(yamlConfig []byte) error { if ka.config.Mode == "" { ka.config.Mode = configuration.TAIL_MODE } + return nil } -func (ka *KubernetesAuditSource) Configure(config []byte, logger *log.Entry, MetricsLevel int) error { +func (ka *KubernetesAuditSource) Configure(config []byte, logger *log.Entry, metricsLevel int) error { ka.logger = logger - ka.metricsLevel = MetricsLevel + ka.metricsLevel = metricsLevel err := ka.UnmarshalConfig(config) if err != nil { @@ -116,6 +118,7 @@ func (ka *KubernetesAuditSource) Configure(config []byte, logger *log.Entry, Met } ka.mux.HandleFunc(ka.config.WebhookPath, ka.webhookHandler) + return nil } @@ -137,6 +140,7 @@ func (ka *KubernetesAuditSource) OneShotAcquisition(_ context.Context, _ chan ty func (ka *KubernetesAuditSource) StreamingAcquisition(ctx context.Context, out chan types.Event, t *tomb.Tomb) error { ka.outChan = out + t.Go(func() error { defer trace.CatchPanic("crowdsec/acquis/k8s-audit/live") ka.logger.Infof("Starting k8s-audit server on %s:%d%s", ka.config.ListenAddr, ka.config.ListenPort, ka.config.WebhookPath) @@ -145,13 +149,18 @@ func (ka *KubernetesAuditSource) StreamingAcquisition(ctx context.Context, out c if err != nil && err != http.ErrServerClosed { return fmt.Errorf("k8s-audit server failed: %w", err) } + return nil }) <-t.Dying() ka.logger.Infof("Stopping k8s-audit server on %s:%d%s", ka.config.ListenAddr, ka.config.ListenPort, ka.config.WebhookPath) - ka.server.Shutdown(ctx) + if err := ka.server.Shutdown(ctx); err != nil { + ka.logger.Errorf("Error shutting down k8s-audit server: %s", err.Error()) + } + return nil }) + return nil } @@ -167,42 +176,52 @@ func (ka *KubernetesAuditSource) webhookHandler(w http.ResponseWriter, r *http.R if ka.metricsLevel != configuration.METRICS_NONE { requestCount.WithLabelValues(ka.addr).Inc() } + if r.Method != http.MethodPost { w.WriteHeader(http.StatusMethodNotAllowed) return } + ka.logger.Tracef("webhookHandler called") + var auditEvents audit.EventList jsonBody, err := io.ReadAll(r.Body) if err != nil { ka.logger.Errorf("Error reading request body: %v", err) w.WriteHeader(http.StatusInternalServerError) + return } + ka.logger.Tracef("webhookHandler receveid: %s", string(jsonBody)) + err = json.Unmarshal(jsonBody, &auditEvents) if err != nil { ka.logger.Errorf("Error decoding audit events: %s", err) w.WriteHeader(http.StatusInternalServerError) + return } remoteIP := strings.Split(r.RemoteAddr, ":")[0] - for _, auditEvent := range auditEvents.Items { + + for idx := range auditEvents.Items { if ka.metricsLevel != configuration.METRICS_NONE { eventCount.WithLabelValues(ka.addr).Inc() } - bytesEvent, err := json.Marshal(auditEvent) + + bytesEvent, err := json.Marshal(auditEvents.Items[idx]) if err != nil { ka.logger.Errorf("Error serializing audit event: %s", err) continue } + ka.logger.Tracef("Got audit event: %s", string(bytesEvent)) l := types.Line{ Raw: string(bytesEvent), Labels: ka.config.Labels, - Time: auditEvent.StageTimestamp.Time, + Time: auditEvents.Items[idx].StageTimestamp.Time, Src: remoteIP, Process: true, Module: ka.GetName(), diff --git a/pkg/acquisition/modules/kubernetesaudit/k8s_audit_test.go b/pkg/acquisition/modules/kubernetesaudit/k8s_audit_test.go index a086a756e4a..bf8a8cea02c 100644 --- a/pkg/acquisition/modules/kubernetesaudit/k8s_audit_test.go +++ b/pkg/acquisition/modules/kubernetesaudit/k8s_audit_test.go @@ -85,7 +85,8 @@ webhook_path: /k8s-audit`, err = f.Configure([]byte(test.config), subLogger, configuration.METRICS_NONE) require.NoError(t, err) - f.StreamingAcquisition(ctx, out, tb) + err = f.StreamingAcquisition(ctx, out, tb) + require.NoError(t, err) time.Sleep(1 * time.Second) tb.Kill(nil) @@ -260,7 +261,8 @@ webhook_path: /k8s-audit`, req := httptest.NewRequest(test.method, "/k8s-audit", strings.NewReader(test.body)) w := httptest.NewRecorder() - f.StreamingAcquisition(ctx, out, tb) + err = f.StreamingAcquisition(ctx, out, tb) + require.NoError(t, err) f.webhookHandler(w, req) diff --git a/pkg/acquisition/modules/loki/loki.go b/pkg/acquisition/modules/loki/loki.go index c57e6a67c94..47493d8cdfe 100644 --- a/pkg/acquisition/modules/loki/loki.go +++ b/pkg/acquisition/modules/loki/loki.go @@ -120,10 +120,10 @@ func (l *LokiSource) UnmarshalConfig(yamlConfig []byte) error { return nil } -func (l *LokiSource) Configure(config []byte, logger *log.Entry, MetricsLevel int) error { +func (l *LokiSource) Configure(config []byte, logger *log.Entry, metricsLevel int) error { l.Config = LokiConfiguration{} l.logger = logger - l.metricsLevel = MetricsLevel + l.metricsLevel = metricsLevel err := l.UnmarshalConfig(config) if err != nil { return err diff --git a/pkg/acquisition/modules/syslog/internal/parser/rfc3164/parse.go b/pkg/acquisition/modules/syslog/internal/parser/rfc3164/parse.go index 66d842ed519..04c7053ef27 100644 --- a/pkg/acquisition/modules/syslog/internal/parser/rfc3164/parse.go +++ b/pkg/acquisition/modules/syslog/internal/parser/rfc3164/parse.go @@ -48,7 +48,6 @@ func WithStrictHostname() RFC3164Option { } func (r *RFC3164) parsePRI() error { - pri := 0 if r.buf[r.position] != '<' { diff --git a/pkg/acquisition/modules/syslog/internal/parser/rfc5424/parse.go b/pkg/acquisition/modules/syslog/internal/parser/rfc5424/parse.go index 639e91e1224..c9aa89f7256 100644 --- a/pkg/acquisition/modules/syslog/internal/parser/rfc5424/parse.go +++ b/pkg/acquisition/modules/syslog/internal/parser/rfc5424/parse.go @@ -48,7 +48,6 @@ func WithStrictHostname() RFC5424Option { } func (r *RFC5424) parsePRI() error { - pri := 0 if r.buf[r.position] != '<' { @@ -94,7 +93,6 @@ func (r *RFC5424) parseVersion() error { } func (r *RFC5424) parseTimestamp() error { - timestamp := []byte{} if r.buf[r.position] == NIL_VALUE { @@ -121,7 +119,6 @@ func (r *RFC5424) parseTimestamp() error { } date, err := time.Parse(VALID_TIMESTAMP, string(timestamp)) - if err != nil { return errors.New("timestamp is not valid") } diff --git a/pkg/acquisition/modules/syslog/internal/parser/rfc5424/parse_test.go b/pkg/acquisition/modules/syslog/internal/parser/rfc5424/parse_test.go index 0938e947fe7..d3a68c196db 100644 --- a/pkg/acquisition/modules/syslog/internal/parser/rfc5424/parse_test.go +++ b/pkg/acquisition/modules/syslog/internal/parser/rfc5424/parse_test.go @@ -94,7 +94,8 @@ func TestParse(t *testing.T) { }{ { "valid msg", - `<13>1 2021-05-18T11:58:40.828081+02:42 mantis sshd 49340 - [timeQuality isSynced="0" tzKnown="1"] blabla`, expected{ + `<13>1 2021-05-18T11:58:40.828081+02:42 mantis sshd 49340 - [timeQuality isSynced="0" tzKnown="1"] blabla`, + expected{ Timestamp: time.Date(2021, 5, 18, 11, 58, 40, 828081000, time.FixedZone("+0242", 9720)), Hostname: "mantis", Tag: "sshd", @@ -102,11 +103,14 @@ func TestParse(t *testing.T) { MsgID: "", Message: "blabla", PRI: 13, - }, "", []RFC5424Option{}, + }, + "", + []RFC5424Option{}, }, { "valid msg with msgid", - `<13>1 2021-05-18T11:58:40.828081+02:42 mantis foobar 49340 123123 [timeQuality isSynced="0" tzKnown="1"] blabla`, expected{ + `<13>1 2021-05-18T11:58:40.828081+02:42 mantis foobar 49340 123123 [timeQuality isSynced="0" tzKnown="1"] blabla`, + expected{ Timestamp: time.Date(2021, 5, 18, 11, 58, 40, 828081000, time.FixedZone("+0242", 9720)), Hostname: "mantis", Tag: "foobar", @@ -114,11 +118,14 @@ func TestParse(t *testing.T) { MsgID: "123123", Message: "blabla", PRI: 13, - }, "", []RFC5424Option{}, + }, + "", + []RFC5424Option{}, }, { "valid msg with repeating SD", - `<13>1 2021-05-18T11:58:40.828081+02:42 mantis foobar 49340 123123 [timeQuality isSynced="0" tzKnown="1"][foo="bar][a] blabla`, expected{ + `<13>1 2021-05-18T11:58:40.828081+02:42 mantis foobar 49340 123123 [timeQuality isSynced="0" tzKnown="1"][foo="bar][a] blabla`, + expected{ Timestamp: time.Date(2021, 5, 18, 11, 58, 40, 828081000, time.FixedZone("+0242", 9720)), Hostname: "mantis", Tag: "foobar", @@ -126,36 +133,53 @@ func TestParse(t *testing.T) { MsgID: "123123", Message: "blabla", PRI: 13, - }, "", []RFC5424Option{}, + }, + "", + []RFC5424Option{}, }, { "invalid SD", - `<13>1 2021-05-18T11:58:40.828081+02:00 mantis foobar 49340 123123 [timeQuality asd`, expected{}, "structured data must end with ']'", []RFC5424Option{}, + `<13>1 2021-05-18T11:58:40.828081+02:00 mantis foobar 49340 123123 [timeQuality asd`, + expected{}, + "structured data must end with ']'", + []RFC5424Option{}, }, { "invalid version", - `<13>42 2021-05-18T11:58:40.828081+02:00 mantis foobar 49340 123123 [timeQuality isSynced="0" tzKnown="1"] blabla`, expected{}, "version must be 1", []RFC5424Option{}, + `<13>42 2021-05-18T11:58:40.828081+02:00 mantis foobar 49340 123123 [timeQuality isSynced="0" tzKnown="1"] blabla`, + expected{}, + "version must be 1", + []RFC5424Option{}, }, { "invalid message", - `<13>1`, expected{}, "version must be followed by a space", []RFC5424Option{}, + `<13>1`, + expected{}, + "version must be followed by a space", + []RFC5424Option{}, }, { "valid msg with empty fields", - `<13>1 - foo - - - - blabla`, expected{ + `<13>1 - foo - - - - blabla`, + expected{ Timestamp: time.Now().UTC(), Hostname: "foo", PRI: 13, Message: "blabla", - }, "", []RFC5424Option{}, + }, + "", + []RFC5424Option{}, }, { "valid msg with empty fields", - `<13>1 - - - - - - blabla`, expected{ + `<13>1 - - - - - - blabla`, + expected{ Timestamp: time.Now().UTC(), PRI: 13, Message: "blabla", - }, "", []RFC5424Option{}, + }, + "", + []RFC5424Option{}, }, { "valid msg with escaped SD", @@ -167,7 +191,9 @@ func TestParse(t *testing.T) { Hostname: "testhostname", MsgID: `sn="msgid"`, Message: `testmessage`, - }, "", []RFC5424Option{}, + }, + "", + []RFC5424Option{}, }, { "valid complex msg", @@ -179,7 +205,9 @@ func TestParse(t *testing.T) { PRI: 13, MsgID: `sn="msgid"`, Message: `source: sn="www.foobar.com" | message: 1.1.1.1 - - [24/May/2022:10:57:37 +0200] "GET /dist/precache-manifest.58b57debe6bc4f96698da0dc314461e9.js HTTP/2.0" 304 0 "https://www.foobar.com/sw.js" "Mozilla/5.0 (Linux; Android 9; ANE-LX1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/101.0.4951.61 Mobile Safari/537.36" "-" "www.foobar.com" sn="www.foobar.com" rt=0.000 ua="-" us="-" ut="-" ul="-" cs=HIT { request: /dist/precache-manifest.58b57debe6bc4f96698da0dc314461e9.js | src_ip_geo_country: DE | MONTH: May | COMMONAPACHELOG: 1.1.1.1 - - [24/May/2022:10:57:37 +0200] "GET /dist/precache-manifest.58b57debe6bc4f96698da0dc314461e9.js HTTP/2.0" 304 0 | auth: - | HOUR: 10 | gl2_remote_ip: 172.31.32.142 | ident: - | gl2_remote_port: 43375 | BASE10NUM: [2.0, 304, 0] | pid: -1 | program: nginx | gl2_source_input: 623ed3440183476d61cff974 | INT: +0200 | is_private_ip: false | YEAR: 2022 | src_ip_geo_city: Achern | clientip: 1.1.1.1 | USERNAME:`, - }, "", []RFC5424Option{}, + }, + "", + []RFC5424Option{}, }, { "partial message", diff --git a/pkg/acquisition/modules/syslog/internal/server/syslogserver.go b/pkg/acquisition/modules/syslog/internal/server/syslogserver.go index 7118c295b54..83f5e5a57e5 100644 --- a/pkg/acquisition/modules/syslog/internal/server/syslogserver.go +++ b/pkg/acquisition/modules/syslog/internal/server/syslogserver.go @@ -25,7 +25,6 @@ type SyslogMessage struct { } func (s *SyslogServer) Listen(listenAddr string, port int) error { - s.listenAddr = listenAddr s.port = port udpAddr, err := net.ResolveUDPAddr("udp", fmt.Sprintf("%s:%d", s.listenAddr, s.port)) diff --git a/pkg/acquisition/modules/syslog/syslog.go b/pkg/acquisition/modules/syslog/syslog.go index fb6a04600c1..df805d08cae 100644 --- a/pkg/acquisition/modules/syslog/syslog.go +++ b/pkg/acquisition/modules/syslog/syslog.go @@ -124,10 +124,10 @@ func (s *SyslogSource) UnmarshalConfig(yamlConfig []byte) error { return nil } -func (s *SyslogSource) Configure(yamlConfig []byte, logger *log.Entry, MetricsLevel int) error { +func (s *SyslogSource) Configure(yamlConfig []byte, logger *log.Entry, metricsLevel int) error { s.logger = logger s.logger.Infof("Starting syslog datasource configuration") - s.metricsLevel = MetricsLevel + s.metricsLevel = metricsLevel err := s.UnmarshalConfig(yamlConfig) if err != nil { return err diff --git a/pkg/acquisition/modules/syslog/syslog_test.go b/pkg/acquisition/modules/syslog/syslog_test.go index 57fa3e8747b..3008ba5507b 100644 --- a/pkg/acquisition/modules/syslog/syslog_test.go +++ b/pkg/acquisition/modules/syslog/syslog_test.go @@ -10,6 +10,7 @@ import ( log "github.com/sirupsen/logrus" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" "gopkg.in/tomb.v2" "github.com/crowdsecurity/go-cs-lib/cstest" @@ -168,7 +169,8 @@ listen_addr: 127.0.0.1`, } assert.Equal(t, ts.expectedLines, actualLines) tomb.Kill(nil) - tomb.Wait() + err = tomb.Wait() + require.NoError(t, err) }) } } diff --git a/pkg/acquisition/modules/victorialogs/internal/vlclient/types.go b/pkg/acquisition/modules/victorialogs/internal/vlclient/types.go new file mode 100644 index 00000000000..167a84e41b1 --- /dev/null +++ b/pkg/acquisition/modules/victorialogs/internal/vlclient/types.go @@ -0,0 +1,12 @@ +package vlclient + +import ( + "time" +) + +// Log represents a VictoriaLogs log line +// See: https://docs.victoriametrics.com/victorialogs/querying/#querying-logs +type Log struct { + Message string `json:"_msg"` + Time time.Time `json:"_time"` +} diff --git a/pkg/acquisition/modules/victorialogs/internal/vlclient/vl_client.go b/pkg/acquisition/modules/victorialogs/internal/vlclient/vl_client.go new file mode 100644 index 00000000000..402754a1307 --- /dev/null +++ b/pkg/acquisition/modules/victorialogs/internal/vlclient/vl_client.go @@ -0,0 +1,405 @@ +package vlclient + +import ( + "bufio" + "bytes" + "context" + "encoding/base64" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "time" + + log "github.com/sirupsen/logrus" + "gopkg.in/tomb.v2" + + "github.com/crowdsecurity/crowdsec/pkg/apiclient/useragent" +) + +type VLClient struct { + Logger *log.Entry + + config Config + t *tomb.Tomb + failStart time.Time + currentTickerInterval time.Duration + requestHeaders map[string]string + + client *http.Client +} + +type Config struct { + URL string + Prefix string + Query string + Headers map[string]string + + Username string + Password string + + Since time.Duration + + FailMaxDuration time.Duration + + Limit int +} + +func updateURI(uri string, newStart time.Time) string { + u, _ := url.Parse(uri) + queryParams := u.Query() + + if !newStart.IsZero() { + // +1 the last timestamp to avoid getting the same result again. + updatedStart := newStart.Add(1 * time.Nanosecond) + queryParams.Set("start", updatedStart.Format(time.RFC3339Nano)) + } + + u.RawQuery = queryParams.Encode() + + return u.String() +} + +func (lc *VLClient) SetTomb(t *tomb.Tomb) { + lc.t = t +} + +func (lc *VLClient) shouldRetry() bool { + if lc.failStart.IsZero() { + lc.Logger.Warningf("VictoriaLogs is not available, will retry for %s", lc.config.FailMaxDuration) + lc.failStart = time.Now() + + return true + } + + if time.Since(lc.failStart) > lc.config.FailMaxDuration { + lc.Logger.Errorf("VictoriaLogs didn't manage to recover after %s, giving up", lc.config.FailMaxDuration) + return false + } + + return true +} + +func (lc *VLClient) increaseTicker(ticker *time.Ticker) { + maxTicker := 10 * time.Second + if lc.currentTickerInterval < maxTicker { + lc.currentTickerInterval *= 2 + if lc.currentTickerInterval > maxTicker { + lc.currentTickerInterval = maxTicker + } + + ticker.Reset(lc.currentTickerInterval) + } +} + +func (lc *VLClient) decreaseTicker(ticker *time.Ticker) { + minTicker := 100 * time.Millisecond + if lc.currentTickerInterval != minTicker { + lc.currentTickerInterval = minTicker + ticker.Reset(lc.currentTickerInterval) + } +} + +func (lc *VLClient) doQueryRange(ctx context.Context, uri string, c chan *Log, infinite bool) error { + lc.currentTickerInterval = 100 * time.Millisecond + ticker := time.NewTicker(lc.currentTickerInterval) + + defer ticker.Stop() + + for { + select { + case <-ctx.Done(): + return ctx.Err() + case <-lc.t.Dying(): + return lc.t.Err() + case <-ticker.C: + resp, err := lc.Get(ctx, uri) + if err != nil { + if ok := lc.shouldRetry(); !ok { + return fmt.Errorf("error querying range: %w", err) + } + + lc.increaseTicker(ticker) + + continue + } + + if resp.StatusCode != http.StatusOK { + lc.Logger.Warnf("bad HTTP response code for query range: %d", resp.StatusCode) + body, _ := io.ReadAll(resp.Body) + resp.Body.Close() + + if ok := lc.shouldRetry(); !ok { + return fmt.Errorf("bad HTTP response code: %d: %s: %w", resp.StatusCode, string(body), err) + } + + lc.increaseTicker(ticker) + + continue + } + + n, largestTime, err := lc.readResponse(ctx, resp, c) + if err != nil { + return err + } + + if !infinite && n < lc.config.Limit { + lc.Logger.Infof("Got less than %d results (%d), stopping", lc.config.Limit, n) + close(c) + + return nil + } + + lc.Logger.Debugf("(timer:%v) %d results (uri:%s)", lc.currentTickerInterval, n, uri) + + if infinite { + if n > 0 { + // as long as we get results, we keep lowest ticker + lc.decreaseTicker(ticker) + } else { + lc.increaseTicker(ticker) + } + } + + uri = updateURI(uri, largestTime) + } + } +} + +// Parses response from body in JSON-LD format and sends results to the channel +func (lc *VLClient) readResponse(ctx context.Context, resp *http.Response, c chan *Log) (int, time.Time, error) { + br := bufio.NewReaderSize(resp.Body, 64*1024) + + var ( + finishedReading bool + n int + latestTs time.Time + ) + + for !finishedReading { + select { + case <-ctx.Done(): + return n, latestTs, nil + default: + } + + b, err := br.ReadBytes('\n') + if err != nil { + if errors.Is(err, bufio.ErrBufferFull) { + lc.Logger.Infof("skipping line number #%d: line too long", n) + continue + } + + if errors.Is(err, io.EOF) { + // b can be != nil when EOF is returned, so we need to process it + finishedReading = true + } else if errors.Is(err, context.Canceled) { + return n, latestTs, nil + } else { + return n, latestTs, fmt.Errorf("cannot read line in response: %w", err) + } + } + + if len(b) == 0 { + continue + } + + b = bytes.Trim(b, "\n") + + var logLine Log + + if err := json.Unmarshal(b, &logLine); err != nil { + lc.Logger.Warnf("cannot unmarshal line in response: %s", string(b)) + continue + } + + n++ + + lc.Logger.Tracef("Got response: %+v", logLine) + c <- &logLine + + if logLine.Time.After(latestTs) { + latestTs = logLine.Time + } + } + + return n, latestTs, nil +} + +func (lc *VLClient) getURLFor(endpoint string, params map[string]string) string { + u, err := url.Parse(lc.config.URL) + if err != nil { + return "" + } + + queryParams := u.Query() + + for k, v := range params { + queryParams.Set(k, v) + } + + u.RawQuery = queryParams.Encode() + + u.Path, err = url.JoinPath(lc.config.Prefix, u.Path, endpoint) + if err != nil { + return "" + } + + return u.String() +} + +func (lc *VLClient) Ready(ctx context.Context) error { + tick := time.NewTicker(500 * time.Millisecond) + u := lc.getURLFor("", nil) + + for { + select { + case <-ctx.Done(): + tick.Stop() + return ctx.Err() + case <-lc.t.Dying(): + tick.Stop() + return lc.t.Err() + case <-tick.C: + lc.Logger.Debug("Checking if VictoriaLogs is ready") + + resp, err := lc.Get(ctx, u) + if err != nil { + lc.Logger.Warnf("Error checking if VictoriaLogs is ready: %s", err) + continue + } + + _ = resp.Body.Close() + + if resp.StatusCode != http.StatusOK { + lc.Logger.Debugf("VictoriaLogs is not ready, status code: %d", resp.StatusCode) + continue + } + + lc.Logger.Info("VictoriaLogs is ready") + + return nil + } + } +} + +// Tail live-tailing for logs +// See: https://docs.victoriametrics.com/victorialogs/querying/#live-tailing +func (lc *VLClient) Tail(ctx context.Context) (chan *Log, error) { + t := time.Now().Add(-1 * lc.config.Since) + u := lc.getURLFor("select/logsql/tail", map[string]string{ + "limit": strconv.Itoa(lc.config.Limit), + "start": t.Format(time.RFC3339Nano), + "query": lc.config.Query, + }) + + lc.Logger.Debugf("Since: %s (%s)", lc.config.Since, t) + lc.Logger.Infof("Connecting to %s", u) + + var ( + resp *http.Response + err error + ) + + for { + resp, err = lc.Get(ctx, u) + lc.Logger.Tracef("Tail request done: %v | %s", resp, err) + + if err != nil { + if errors.Is(err, context.Canceled) { + return nil, nil + } + + if ok := lc.shouldRetry(); !ok { + return nil, fmt.Errorf("error tailing logs: %w", err) + } + + continue + } + + break + } + + if resp.StatusCode != http.StatusOK { + lc.Logger.Warnf("bad HTTP response code for tail request: %d", resp.StatusCode) + body, _ := io.ReadAll(resp.Body) + resp.Body.Close() + + if ok := lc.shouldRetry(); !ok { + return nil, fmt.Errorf("bad HTTP response code: %d: %s: %w", resp.StatusCode, string(body), err) + } + } + + responseChan := make(chan *Log) + + lc.t.Go(func() error { + _, _, err = lc.readResponse(ctx, resp, responseChan) + if err != nil { + return fmt.Errorf("error while reading tail response: %w", err) + } + + return nil + }) + + return responseChan, nil +} + +// QueryRange queries the logs +// See: https://docs.victoriametrics.com/victorialogs/querying/#querying-logs +func (lc *VLClient) QueryRange(ctx context.Context, infinite bool) chan *Log { + t := time.Now().Add(-1 * lc.config.Since) + u := lc.getURLFor("select/logsql/query", map[string]string{ + "query": lc.config.Query, + "start": t.Format(time.RFC3339Nano), + "limit": strconv.Itoa(lc.config.Limit), + }) + + c := make(chan *Log) + + lc.Logger.Debugf("Since: %s (%s)", lc.config.Since, t) + + lc.Logger.Infof("Connecting to %s", u) + lc.t.Go(func() error { + return lc.doQueryRange(ctx, u, c, infinite) + }) + + return c +} + +func (lc *VLClient) Get(ctx context.Context, url string) (*http.Response, error) { + request, err := http.NewRequestWithContext(ctx, http.MethodGet, url, nil) + if err != nil { + return nil, err + } + + for k, v := range lc.requestHeaders { + request.Header.Add(k, v) + } + + lc.Logger.Debugf("GET %s", url) + + return lc.client.Do(request) +} + +func NewVLClient(config Config) *VLClient { + headers := make(map[string]string) + for k, v := range config.Headers { + headers[k] = v + } + + if config.Username != "" || config.Password != "" { + headers["Authorization"] = "Basic " + base64.StdEncoding.EncodeToString([]byte(config.Username+":"+config.Password)) + } + + headers["User-Agent"] = useragent.Default() + + return &VLClient{ + Logger: log.WithField("component", "victorialogs-client"), + config: config, + requestHeaders: headers, + client: &http.Client{}, + } +} diff --git a/pkg/acquisition/modules/victorialogs/victorialogs.go b/pkg/acquisition/modules/victorialogs/victorialogs.go new file mode 100644 index 00000000000..c6bb3b320ba --- /dev/null +++ b/pkg/acquisition/modules/victorialogs/victorialogs.go @@ -0,0 +1,369 @@ +package victorialogs + +import ( + "context" + "errors" + "fmt" + "net/url" + "strconv" + "strings" + "time" + + "github.com/prometheus/client_golang/prometheus" + log "github.com/sirupsen/logrus" + "gopkg.in/tomb.v2" + "gopkg.in/yaml.v2" + + "github.com/crowdsecurity/crowdsec/pkg/acquisition/configuration" + "github.com/crowdsecurity/crowdsec/pkg/acquisition/modules/victorialogs/internal/vlclient" + "github.com/crowdsecurity/crowdsec/pkg/types" +) + +const ( + defaultLimit int = 100 +) + +var linesRead = prometheus.NewCounterVec( + prometheus.CounterOpts{ + Name: "cs_victorialogssource_hits_total", + Help: "Total lines that were read.", + }, + []string{"source"}) + +type VLAuthConfiguration struct { + Username string `yaml:"username"` + Password string `yaml:"password"` +} + +type VLConfiguration struct { + URL string `yaml:"url"` // VictoriaLogs url + Prefix string `yaml:"prefix"` // VictoriaLogs prefix + Query string `yaml:"query"` // LogsQL query + Limit int `yaml:"limit"` // Limit of logs to read + Since time.Duration `yaml:"since"` + Headers map[string]string `yaml:"headers"` // HTTP headers for talking to VictoriaLogs + WaitForReady time.Duration `yaml:"wait_for_ready"` // Retry interval, default is 10 seconds + Auth VLAuthConfiguration `yaml:"auth"` + MaxFailureDuration time.Duration `yaml:"max_failure_duration"` // Max duration of failure before stopping the source + configuration.DataSourceCommonCfg `yaml:",inline"` +} + +type VLSource struct { + metricsLevel int + Config VLConfiguration + + Client *vlclient.VLClient + + logger *log.Entry +} + +func (l *VLSource) GetMetrics() []prometheus.Collector { + return []prometheus.Collector{linesRead} +} + +func (l *VLSource) GetAggregMetrics() []prometheus.Collector { + return []prometheus.Collector{linesRead} +} + +func (l *VLSource) UnmarshalConfig(yamlConfig []byte) error { + err := yaml.UnmarshalStrict(yamlConfig, &l.Config) + if err != nil { + return fmt.Errorf("cannot parse VictoriaLogs acquisition configuration: %w", err) + } + + if l.Config.Query == "" { + return errors.New("VictoriaLogs query is mandatory") + } + + if l.Config.WaitForReady == 0 { + l.Config.WaitForReady = 10 * time.Second + } + + if l.Config.Mode == "" { + l.Config.Mode = configuration.TAIL_MODE + } + if l.Config.Prefix == "" { + l.Config.Prefix = "/" + } + + if !strings.HasSuffix(l.Config.Prefix, "/") { + l.Config.Prefix += "/" + } + + if l.Config.Limit == 0 { + l.Config.Limit = defaultLimit + } + + if l.Config.Mode == configuration.TAIL_MODE { + l.logger.Infof("Resetting since") + l.Config.Since = 0 + } + + if l.Config.MaxFailureDuration == 0 { + l.Config.MaxFailureDuration = 30 * time.Second + } + + return nil +} + +func (l *VLSource) Configure(config []byte, logger *log.Entry, metricsLevel int) error { + l.Config = VLConfiguration{} + l.logger = logger + l.metricsLevel = metricsLevel + err := l.UnmarshalConfig(config) + if err != nil { + return err + } + + l.logger.Infof("Since value: %s", l.Config.Since.String()) + + clientConfig := vlclient.Config{ + URL: l.Config.URL, + Headers: l.Config.Headers, + Limit: l.Config.Limit, + Query: l.Config.Query, + Since: l.Config.Since, + Username: l.Config.Auth.Username, + Password: l.Config.Auth.Password, + FailMaxDuration: l.Config.MaxFailureDuration, + } + + l.Client = vlclient.NewVLClient(clientConfig) + l.Client.Logger = logger.WithFields(log.Fields{"component": "victorialogs-client", "source": l.Config.URL}) + return nil +} + +func (l *VLSource) ConfigureByDSN(dsn string, labels map[string]string, logger *log.Entry, uuid string) error { + l.logger = logger + l.Config = VLConfiguration{} + l.Config.Mode = configuration.CAT_MODE + l.Config.Labels = labels + l.Config.UniqueId = uuid + + u, err := url.Parse(dsn) + if err != nil { + return fmt.Errorf("while parsing dsn '%s': %w", dsn, err) + } + if u.Scheme != "victorialogs" { + return fmt.Errorf("invalid DSN %s for VictoriaLogs source, must start with victorialogs://", dsn) + } + if u.Host == "" { + return errors.New("empty host") + } + scheme := "http" + + params := u.Query() + if q := params.Get("ssl"); q != "" { + scheme = "https" + } + if q := params.Get("query"); q != "" { + l.Config.Query = q + } + if w := params.Get("wait_for_ready"); w != "" { + l.Config.WaitForReady, err = time.ParseDuration(w) + if err != nil { + return err + } + } else { + l.Config.WaitForReady = 10 * time.Second + } + + if s := params.Get("since"); s != "" { + l.Config.Since, err = time.ParseDuration(s) + if err != nil { + return fmt.Errorf("invalid since in dsn: %w", err) + } + } + + if maxFailureDuration := params.Get("max_failure_duration"); maxFailureDuration != "" { + duration, err := time.ParseDuration(maxFailureDuration) + if err != nil { + return fmt.Errorf("invalid max_failure_duration in dsn: %w", err) + } + l.Config.MaxFailureDuration = duration + } else { + l.Config.MaxFailureDuration = 5 * time.Second // for OneShot mode it doesn't make sense to have longer duration + } + + if limit := params.Get("limit"); limit != "" { + limit, err := strconv.Atoi(limit) + if err != nil { + return fmt.Errorf("invalid limit in dsn: %w", err) + } + l.Config.Limit = limit + } + + if logLevel := params.Get("log_level"); logLevel != "" { + level, err := log.ParseLevel(logLevel) + if err != nil { + return fmt.Errorf("invalid log_level in dsn: %w", err) + } + l.Config.LogLevel = &level + l.logger.Logger.SetLevel(level) + } + + l.Config.URL = fmt.Sprintf("%s://%s", scheme, u.Host) + if u.User != nil { + l.Config.Auth.Username = u.User.Username() + l.Config.Auth.Password, _ = u.User.Password() + } + + clientConfig := vlclient.Config{ + URL: l.Config.URL, + Headers: l.Config.Headers, + Limit: l.Config.Limit, + Query: l.Config.Query, + Since: l.Config.Since, + Username: l.Config.Auth.Username, + Password: l.Config.Auth.Password, + } + + l.Client = vlclient.NewVLClient(clientConfig) + l.Client.Logger = logger.WithFields(log.Fields{"component": "victorialogs-client", "source": l.Config.URL}) + + return nil +} + +func (l *VLSource) GetMode() string { + return l.Config.Mode +} + +func (l *VLSource) GetName() string { + return "victorialogs" +} + +// OneShotAcquisition reads a set of file and returns when done +func (l *VLSource) OneShotAcquisition(ctx context.Context, out chan types.Event, t *tomb.Tomb) error { + l.logger.Debug("VictoriaLogs one shot acquisition") + l.Client.SetTomb(t) + readyCtx, cancel := context.WithTimeout(ctx, l.Config.WaitForReady) + defer cancel() + err := l.Client.Ready(readyCtx) + if err != nil { + return fmt.Errorf("VictoriaLogs is not ready: %w", err) + } + + ctx, cancel = context.WithCancel(ctx) + defer cancel() + + respChan, err := l.getResponseChan(ctx, false) + if err != nil { + return fmt.Errorf("error when starting acquisition: %w", err) + } + + for { + select { + case <-t.Dying(): + l.logger.Debug("VictoriaLogs one shot acquisition stopped") + return nil + case resp, ok := <-respChan: + if !ok { + l.logger.Info("VictoriaLogs acquisition completed") + return nil + } + l.readOneEntry(resp, l.Config.Labels, out) + } + } +} + +func (l *VLSource) readOneEntry(entry *vlclient.Log, labels map[string]string, out chan types.Event) { + ll := types.Line{} + ll.Raw = entry.Message + ll.Time = entry.Time + ll.Src = l.Config.URL + ll.Labels = labels + ll.Process = true + ll.Module = l.GetName() + + if l.metricsLevel != configuration.METRICS_NONE { + linesRead.With(prometheus.Labels{"source": l.Config.URL}).Inc() + } + expectMode := types.LIVE + if l.Config.UseTimeMachine { + expectMode = types.TIMEMACHINE + } + out <- types.Event{ + Line: ll, + Process: true, + Type: types.LOG, + ExpectMode: expectMode, + } +} + +func (l *VLSource) StreamingAcquisition(ctx context.Context, out chan types.Event, t *tomb.Tomb) error { + l.Client.SetTomb(t) + readyCtx, cancel := context.WithTimeout(ctx, l.Config.WaitForReady) + defer cancel() + err := l.Client.Ready(readyCtx) + if err != nil { + return fmt.Errorf("VictoriaLogs is not ready: %w", err) + } + + lctx, clientCancel := context.WithCancel(ctx) + //Don't defer clientCancel(), the client outlives this function call + + t.Go(func() error { + <-t.Dying() + clientCancel() + return nil + }) + + t.Go(func() error { + respChan, err := l.getResponseChan(lctx, true) + if err != nil { + clientCancel() + l.logger.Errorf("could not start VictoriaLogs tail: %s", err) + return fmt.Errorf("while starting VictoriaLogs tail: %w", err) + } + for { + select { + case resp, ok := <-respChan: + if !ok { + l.logger.Warnf("VictoriaLogs channel closed") + clientCancel() + return err + } + l.readOneEntry(resp, l.Config.Labels, out) + case <-t.Dying(): + clientCancel() + return nil + } + } + }) + return nil +} + +func (l *VLSource) getResponseChan(ctx context.Context, infinite bool) (chan *vlclient.Log, error) { + var ( + respChan chan *vlclient.Log + err error + ) + + if l.Config.Mode == configuration.TAIL_MODE { + respChan, err = l.Client.Tail(ctx) + if err != nil { + l.logger.Errorf("could not start VictoriaLogs tail: %s", err) + return respChan, fmt.Errorf("while starting VictoriaLogs tail: %w", err) + } + } else { + respChan = l.Client.QueryRange(ctx, infinite) + } + return respChan, err +} + +func (l *VLSource) CanRun() error { + return nil +} + +func (l *VLSource) GetUuid() string { + return l.Config.UniqueId +} + +func (l *VLSource) Dump() interface{} { + return l +} + +// SupportedModes returns the supported modes by the acquisition module +func (l *VLSource) SupportedModes() []string { + return []string{configuration.TAIL_MODE, configuration.CAT_MODE} +} diff --git a/pkg/acquisition/modules/victorialogs/victorialogs_test.go b/pkg/acquisition/modules/victorialogs/victorialogs_test.go new file mode 100644 index 00000000000..182b009c414 --- /dev/null +++ b/pkg/acquisition/modules/victorialogs/victorialogs_test.go @@ -0,0 +1,479 @@ +package victorialogs_test + +import ( + "bytes" + "context" + "fmt" + "io" + "math/rand" + "net/http" + "net/url" + "os" + "runtime" + "strconv" + "strings" + "testing" + "time" + + log "github.com/sirupsen/logrus" + "github.com/stretchr/testify/assert" + "gopkg.in/tomb.v2" + + "github.com/crowdsecurity/go-cs-lib/cstest" + + "github.com/crowdsecurity/crowdsec/pkg/acquisition/configuration" + "github.com/crowdsecurity/crowdsec/pkg/acquisition/modules/victorialogs" + "github.com/crowdsecurity/crowdsec/pkg/types" +) + +func TestConfiguration(t *testing.T) { + log.Infof("Test 'TestConfigure'") + + tests := []struct { + config string + expectedErr string + password string + waitForReady time.Duration + testName string + }{ + { + config: `foobar: asd`, + expectedErr: "line 1: field foobar not found in type victorialogs.VLConfiguration", + testName: "Unknown field", + }, + { + config: ` +mode: tail +source: victorialogs`, + expectedErr: "query is mandatory", + testName: "Missing url", + }, + { + config: ` +mode: tail +source: victorialogs +url: http://localhost:9428/ +`, + expectedErr: "query is mandatory", + testName: "Missing query", + }, + { + config: ` +mode: tail +source: victorialogs +url: http://localhost:9428/ +query: > + {server="demo"} +`, + expectedErr: "", + testName: "Correct config", + }, + { + config: ` +mode: tail +source: victorialogs +url: http://localhost:9428/ +wait_for_ready: 5s +query: > + {server="demo"} +`, + expectedErr: "", + testName: "Correct config with wait_for_ready", + waitForReady: 5 * time.Second, + }, + { + config: ` +mode: tail +source: victorialogs +url: http://localhost:9428/ +auth: + username: foo + password: bar +query: > + {server="demo"} +`, + expectedErr: "", + password: "bar", + testName: "Correct config with password", + }, + } + subLogger := log.WithField("type", "victorialogs") + + for _, test := range tests { + t.Run(test.testName, func(t *testing.T) { + vlSource := victorialogs.VLSource{} + err := vlSource.Configure([]byte(test.config), subLogger, configuration.METRICS_NONE) + cstest.AssertErrorContains(t, err, test.expectedErr) + + if test.password != "" { + p := vlSource.Config.Auth.Password + if test.password != p { + t.Fatalf("Password mismatch : %s != %s", test.password, p) + } + } + + if test.waitForReady != 0 { + if vlSource.Config.WaitForReady != test.waitForReady { + t.Fatalf("Wrong WaitForReady %v != %v", vlSource.Config.WaitForReady, test.waitForReady) + } + } + }) + } +} + +func TestConfigureDSN(t *testing.T) { + log.Infof("Test 'TestConfigureDSN'") + + tests := []struct { + name string + dsn string + expectedErr string + since time.Time + password string + scheme string + waitForReady time.Duration + }{ + { + name: "Wrong scheme", + dsn: "wrong://", + expectedErr: "invalid DSN wrong:// for VictoriaLogs source, must start with victorialogs://", + }, + { + name: "Correct DSN", + dsn: `victorialogs://localhost:9428/?query={server="demo"}`, + expectedErr: "", + }, + { + name: "Empty host", + dsn: "victorialogs://", + expectedErr: "empty host", + }, + { + name: "Invalid DSN", + dsn: "victorialogs", + expectedErr: "invalid DSN victorialogs for VictoriaLogs source, must start with victorialogs://", + }, + { + name: "Bad since param", + dsn: `victorialogs://127.0.0.1:9428/?since=3h&query={server="demo"}`, + since: time.Now().Add(-3 * time.Hour), + }, + { + name: "Basic Auth", + dsn: `victorialogs://login:password@localhost:3102/?query={server="demo"}`, + password: "password", + }, + { + name: "Correct DSN", + dsn: `victorialogs://localhost:9428/?query={server="demo"}&wait_for_ready=5s`, + expectedErr: "", + waitForReady: 5 * time.Second, + }, + { + name: "SSL DSN", + dsn: `victorialogs://localhost:9428/?ssl=true`, + scheme: "https", + }, + } + + for _, test := range tests { + subLogger := log.WithFields(log.Fields{ + "type": "victorialogs", + "name": test.name, + }) + + t.Logf("Test : %s", test.name) + + vlSource := &victorialogs.VLSource{} + err := vlSource.ConfigureByDSN(test.dsn, map[string]string{"type": "testtype"}, subLogger, "") + cstest.AssertErrorContains(t, err, test.expectedErr) + + noDuration, _ := time.ParseDuration("0s") + if vlSource.Config.Since != noDuration && vlSource.Config.Since.Round(time.Second) != time.Since(test.since).Round(time.Second) { + t.Fatalf("Invalid since %v", vlSource.Config.Since) + } + + if test.password != "" { + p := vlSource.Config.Auth.Password + if test.password != p { + t.Fatalf("Password mismatch : %s != %s", test.password, p) + } + } + + if test.scheme != "" { + url, _ := url.Parse(vlSource.Config.URL) + if test.scheme != url.Scheme { + t.Fatalf("Schema mismatch : %s != %s", test.scheme, url.Scheme) + } + } + + if test.waitForReady != 0 { + if vlSource.Config.WaitForReady != test.waitForReady { + t.Fatalf("Wrong WaitForReady %v != %v", vlSource.Config.WaitForReady, test.waitForReady) + } + } + } +} + +// Ingestion format docs: https://docs.victoriametrics.com/victorialogs/data-ingestion/#json-stream-api +func feedVLogs(ctx context.Context, logger *log.Entry, n int, title string) error { + bb := bytes.NewBuffer(nil) + for i := range n { + fmt.Fprintf(bb, + `{ "_time": %q,"_msg":"Log line #%d %v", "server": "demo", "key": %q} +`, time.Now().Format(time.RFC3339), i, title, title) + } + + req, err := http.NewRequestWithContext(ctx, http.MethodPost, "http://127.0.0.1:9428/insert/jsonline?_stream_fields=server,key", bb) + if err != nil { + return err + } + + req.Header.Set("Content-Type", "application/json") + + resp, err := http.DefaultClient.Do(req) + if err != nil { + return err + } + + defer resp.Body.Close() + + if resp.StatusCode != http.StatusOK { + b, _ := io.ReadAll(resp.Body) + logger.Error(string(b)) + + return fmt.Errorf("Bad post status %d", resp.StatusCode) + } + + logger.Info(n, " Events sent") + // VictoriaLogs buffers data before saving to disk + // Default flush deadline is 2s, waiting 3s to be safe + time.Sleep(3 * time.Second) + + return nil +} + +func TestOneShotAcquisition(t *testing.T) { + ctx := context.Background() + + if runtime.GOOS == "windows" { + t.Skip("Skipping test on windows") + } + + log.SetOutput(os.Stdout) + log.SetLevel(log.InfoLevel) + log.Info("Test 'TestStreamingAcquisition'") + + key := strconv.Itoa(rand.Intn(1000)) + tests := []struct { + config string + }{ + { + config: fmt.Sprintf(` +mode: cat +source: victorialogs +url: http://127.0.0.1:9428 +query: > + {server=demo, key=%q} +since: 1h +`, key), + }, + } + + for _, ts := range tests { + logger := log.New() + subLogger := logger.WithField("type", "victorialogs") + vlSource := victorialogs.VLSource{} + + err := vlSource.Configure([]byte(ts.config), subLogger, configuration.METRICS_NONE) + if err != nil { + t.Fatalf("Unexpected error : %s", err) + } + + err = feedVLogs(ctx, subLogger, 20, key) + if err != nil { + t.Fatalf("Unexpected error : %s", err) + } + + out := make(chan types.Event) + read := 0 + + go func() { + for { + <-out + + read++ + } + }() + + vlTomb := tomb.Tomb{} + + err = vlSource.OneShotAcquisition(ctx, out, &vlTomb) + if err != nil { + t.Fatalf("Unexpected error : %s", err) + } + + // Some logs might be buffered + assert.Greater(t, read, 10) + } +} + +func TestStreamingAcquisition(t *testing.T) { + if runtime.GOOS == "windows" { + t.Skip("Skipping test on windows") + } + + log.SetOutput(os.Stdout) + log.SetLevel(log.InfoLevel) + log.Info("Test 'TestStreamingAcquisition'") + + title := time.Now().String() + tests := []struct { + name string + config string + expectedErr string + streamErr string + expectedLines int + }{ + { + name: "Bad port", + config: `mode: tail +source: victorialogs +url: "http://127.0.0.1:9429" +query: > + server:"demo"`, // Wrong port + expectedErr: "", + streamErr: `VictoriaLogs is not ready`, + expectedLines: 0, + }, + { + name: "ok", + config: `mode: tail +source: victorialogs +url: "http://127.0.0.1:9428" +query: > + server:"demo"`, + expectedErr: "", + streamErr: "", + expectedLines: 20, + }, + } + + ctx := context.Background() + + for _, ts := range tests { + t.Run(ts.name, func(t *testing.T) { + logger := log.New() + subLogger := logger.WithFields(log.Fields{ + "type": "victorialogs", + "name": ts.name, + }) + + out := make(chan types.Event) + vlTomb := tomb.Tomb{} + vlSource := victorialogs.VLSource{} + + err := vlSource.Configure([]byte(ts.config), subLogger, configuration.METRICS_NONE) + if err != nil { + t.Fatalf("Unexpected error : %s", err) + } + + err = vlSource.StreamingAcquisition(ctx, out, &vlTomb) + cstest.AssertErrorContains(t, err, ts.streamErr) + + if ts.streamErr != "" { + return + } + + time.Sleep(time.Second * 2) // We need to give time to start reading from the WS + + readTomb := tomb.Tomb{} + readCtx, cancel := context.WithTimeout(ctx, time.Second*10) + count := 0 + + readTomb.Go(func() error { + defer cancel() + + for { + select { + case <-readCtx.Done(): + return readCtx.Err() + case evt := <-out: + count++ + + if !strings.HasSuffix(evt.Line.Raw, title) { + return fmt.Errorf("Incorrect suffix : %s", evt.Line.Raw) + } + + if count == ts.expectedLines { + return nil + } + } + } + }) + + err = feedVLogs(ctx, subLogger, ts.expectedLines, title) + if err != nil { + t.Fatalf("Unexpected error : %s", err) + } + + err = readTomb.Wait() + + cancel() + + if err != nil { + t.Fatalf("Unexpected error : %s", err) + } + + assert.Equal(t, ts.expectedLines, count) + }) + } +} + +func TestStopStreaming(t *testing.T) { + ctx := context.Background() + + if runtime.GOOS == "windows" { + t.Skip("Skipping test on windows") + } + + config := ` +mode: tail +source: victorialogs +url: http://127.0.0.1:9428 +query: > + server:"demo" +` + logger := log.New() + subLogger := logger.WithField("type", "victorialogs") + title := time.Now().String() + vlSource := victorialogs.VLSource{} + + err := vlSource.Configure([]byte(config), subLogger, configuration.METRICS_NONE) + if err != nil { + t.Fatalf("Unexpected error : %s", err) + } + + out := make(chan types.Event, 10) + + vlTomb := &tomb.Tomb{} + + err = vlSource.StreamingAcquisition(ctx, out, vlTomb) + if err != nil { + t.Fatalf("Unexpected error : %s", err) + } + + time.Sleep(time.Second * 2) + + err = feedVLogs(ctx, subLogger, 1, title) + if err != nil { + t.Fatalf("Unexpected error : %s", err) + } + + vlTomb.Kill(nil) + + err = vlTomb.Wait() + if err != nil { + t.Fatalf("Unexpected error : %s", err) + } +} diff --git a/pkg/acquisition/modules/wineventlog/wineventlog_windows.go b/pkg/acquisition/modules/wineventlog/wineventlog_windows.go index 8283bcc21a2..22186ea96cb 100644 --- a/pkg/acquisition/modules/wineventlog/wineventlog_windows.go +++ b/pkg/acquisition/modules/wineventlog/wineventlog_windows.go @@ -287,9 +287,9 @@ func (w *WinEventLogSource) UnmarshalConfig(yamlConfig []byte) error { return nil } -func (w *WinEventLogSource) Configure(yamlConfig []byte, logger *log.Entry, MetricsLevel int) error { +func (w *WinEventLogSource) Configure(yamlConfig []byte, logger *log.Entry, metricsLevel int) error { w.logger = logger - w.metricsLevel = MetricsLevel + w.metricsLevel = metricsLevel err := w.UnmarshalConfig(yamlConfig) if err != nil { diff --git a/pkg/acquisition/modules/wineventlog/wineventlog_windows_test.go b/pkg/acquisition/modules/wineventlog/wineventlog_windows_test.go index 2f6fe15450f..b4998de76c4 100644 --- a/pkg/acquisition/modules/wineventlog/wineventlog_windows_test.go +++ b/pkg/acquisition/modules/wineventlog/wineventlog_windows_test.go @@ -7,18 +7,22 @@ import ( "testing" "time" - "github.com/crowdsecurity/crowdsec/pkg/acquisition/configuration" - "github.com/crowdsecurity/crowdsec/pkg/exprhelpers" - "github.com/crowdsecurity/crowdsec/pkg/types" log "github.com/sirupsen/logrus" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "golang.org/x/sys/windows/svc/eventlog" "gopkg.in/tomb.v2" + + "github.com/crowdsecurity/go-cs-lib/cstest" + + "github.com/crowdsecurity/crowdsec/pkg/acquisition/configuration" + "github.com/crowdsecurity/crowdsec/pkg/exprhelpers" + "github.com/crowdsecurity/crowdsec/pkg/types" ) func TestBadConfiguration(t *testing.T) { - exprhelpers.Init(nil) + err := exprhelpers.Init(nil) + require.NoError(t, err) tests := []struct { config string @@ -62,7 +66,8 @@ xpath_query: test`, } func TestQueryBuilder(t *testing.T) { - exprhelpers.Init(nil) + err := exprhelpers.Init(nil) + require.NoError(t, err) tests := []struct { config string @@ -111,23 +116,26 @@ event_level: bla`, } subLogger := log.WithField("type", "windowseventlog") for _, test := range tests { - f := WinEventLogSource{} - f.Configure([]byte(test.config), subLogger, configuration.METRICS_NONE) - q, err := f.buildXpathQuery() - if test.expectedErr != "" { - if err == nil { - t.Fatalf("expected error '%s' but got none", test.expectedErr) + t.Run(test.config, func(t *testing.T) { + f := WinEventLogSource{} + + err := f.Configure([]byte(test.config), subLogger, configuration.METRICS_NONE) + cstest.AssertErrorContains(t, err, test.expectedErr) + if test.expectedErr != "" { + return } - assert.Contains(t, err.Error(), test.expectedErr) - } else { + + q, err := f.buildXpathQuery() require.NoError(t, err) assert.Equal(t, test.expectedQuery, q) - } + }) } } func TestLiveAcquisition(t *testing.T) { - exprhelpers.Init(nil) + err := exprhelpers.Init(nil) + require.NoError(t, err) + ctx := context.Background() tests := []struct { @@ -185,8 +193,13 @@ event_ids: to := &tomb.Tomb{} c := make(chan types.Event) f := WinEventLogSource{} - f.Configure([]byte(test.config), subLogger, configuration.METRICS_NONE) - f.StreamingAcquisition(ctx, c, to) + + err := f.Configure([]byte(test.config), subLogger, configuration.METRICS_NONE) + require.NoError(t, err) + + err = f.StreamingAcquisition(ctx, c, to) + require.NoError(t, err) + time.Sleep(time.Second) lines := test.expectedLines go func() { @@ -261,7 +274,8 @@ func TestOneShotAcquisition(t *testing.T) { }, } - exprhelpers.Init(nil) + err := exprhelpers.Init(nil) + require.NoError(t, err) for _, test := range tests { t.Run(test.name, func(t *testing.T) { @@ -269,15 +283,13 @@ func TestOneShotAcquisition(t *testing.T) { to := &tomb.Tomb{} c := make(chan types.Event) f := WinEventLogSource{} - err := f.ConfigureByDSN(test.dsn, map[string]string{"type": "wineventlog"}, log.WithField("type", "windowseventlog"), "") + err := f.ConfigureByDSN(test.dsn, map[string]string{"type": "wineventlog"}, log.WithField("type", "windowseventlog"), "") + cstest.AssertErrorContains(t, err, test.expectedConfigureErr) if test.expectedConfigureErr != "" { - assert.Contains(t, err.Error(), test.expectedConfigureErr) return } - require.NoError(t, err) - go func() { for { select { diff --git a/pkg/acquisition/test_files/env.yaml b/pkg/acquisition/test_files/env.yaml new file mode 100644 index 00000000000..8abd4b16ca5 --- /dev/null +++ b/pkg/acquisition/test_files/env.yaml @@ -0,0 +1,6 @@ +labels: + test: foobar + non_existing: ${NON_EXISTING} +log_level: info +source: mock +toto: ${TEST_ENV} \ No newline at end of file diff --git a/pkg/acquisition/victorialogs.go b/pkg/acquisition/victorialogs.go new file mode 100644 index 00000000000..b097f0c8dfc --- /dev/null +++ b/pkg/acquisition/victorialogs.go @@ -0,0 +1,12 @@ +//go:build !no_datasource_victorialogs + +package acquisition + +import ( + "github.com/crowdsecurity/crowdsec/pkg/acquisition/modules/victorialogs" +) + +//nolint:gochecknoinits +func init() { + registerDataSource("victorialogs", func() DataSource { return &victorialogs.VLSource{} }) +} diff --git a/pkg/alertcontext/alertcontext.go b/pkg/alertcontext/alertcontext.go index 1b7d1e20018..0b38336a698 100644 --- a/pkg/alertcontext/alertcontext.go +++ b/pkg/alertcontext/alertcontext.go @@ -4,6 +4,7 @@ import ( "encoding/json" "fmt" "net/http" + "reflect" "slices" "strconv" @@ -16,9 +17,7 @@ import ( "github.com/crowdsecurity/crowdsec/pkg/types" ) -const ( - MaxContextValueLen = 4000 -) +const MaxContextValueLen = 4000 var alertContext = Context{} @@ -34,7 +33,8 @@ func ValidateContextExpr(key string, expressions []string) error { _, err := expr.Compile(expression, exprhelpers.GetExprOptions(map[string]interface{}{ "evt": &types.Event{}, "match": &types.MatchedRule{}, - "req": &http.Request{}})...) + "req": &http.Request{}, + })...) if err != nil { return fmt.Errorf("compilation of '%s' failed: %w", expression, err) } @@ -79,7 +79,8 @@ func NewAlertContext(contextToSend map[string][]string, valueLength int) error { valueCompiled, err := expr.Compile(value, exprhelpers.GetExprOptions(map[string]interface{}{ "evt": &types.Event{}, "match": &types.MatchedRule{}, - "req": &http.Request{}})...) + "req": &http.Request{}, + })...) if err != nil { return fmt.Errorf("compilation of '%s' context value failed: %w", value, err) } @@ -114,6 +115,7 @@ func TruncateContextMap(contextMap map[string][]string, contextValueLen int) ([] } metas = append(metas, &meta) } + return metas, errors } @@ -150,20 +152,19 @@ func TruncateContext(values []string, contextValueLen int) (string, error) { } func EvalAlertContextRules(evt types.Event, match *types.MatchedRule, request *http.Request, tmpContext map[string][]string) []error { - var errors []error - //if we're evaluating context for appsec event, match and request will be present. - //otherwise, only evt will be. + // if we're evaluating context for appsec event, match and request will be present. + // otherwise, only evt will be. if match == nil { match = types.NewMatchedRule() } + if request == nil { request = &http.Request{} } for key, values := range alertContext.ContextToSendCompiled { - if _, ok := tmpContext[key]; !ok { tmpContext[key] = make([]string, 0) } @@ -176,6 +177,7 @@ func EvalAlertContextRules(evt types.Event, match *types.MatchedRule, request *h errors = append(errors, fmt.Errorf("failed to get value for %s: %w", key, err)) continue } + switch out := output.(type) { case string: val = out @@ -201,6 +203,10 @@ func EvalAlertContextRules(evt types.Event, match *types.MatchedRule, request *h } } default: + r := reflect.ValueOf(output) + if r.IsZero() || r.IsNil() { + continue + } val := fmt.Sprintf("%v", output) if val != "" && !slices.Contains(tmpContext[key], val) { tmpContext[key] = append(tmpContext[key], val) @@ -208,6 +214,7 @@ func EvalAlertContextRules(evt types.Event, match *types.MatchedRule, request *h } } } + return errors } @@ -237,8 +244,8 @@ func EventToContext(events []types.Event) (models.Meta, []error) { tmpContext := make(map[string][]string) - for _, evt := range events { - tmpErrors := EvalAlertContextRules(evt, nil, nil, tmpContext) + for i := range events { + tmpErrors := EvalAlertContextRules(events[i], nil, nil, tmpContext) errors = append(errors, tmpErrors...) } diff --git a/pkg/alertcontext/alertcontext_test.go b/pkg/alertcontext/alertcontext_test.go index dc752ba8b09..9d9373bcd36 100644 --- a/pkg/alertcontext/alertcontext_test.go +++ b/pkg/alertcontext/alertcontext_test.go @@ -8,9 +8,10 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "github.com/crowdsecurity/go-cs-lib/ptr" + "github.com/crowdsecurity/crowdsec/pkg/models" "github.com/crowdsecurity/crowdsec/pkg/types" - "github.com/crowdsecurity/go-cs-lib/ptr" ) func TestNewAlertContext(t *testing.T) { @@ -229,6 +230,7 @@ func TestValidateContextExpr(t *testing.T) { } for _, test := range tests { fmt.Printf("Running test '%s'\n", test.name) + err := ValidateContextExpr(test.key, test.exprs) if test.expectedErr == nil { require.NoError(t, err) @@ -239,7 +241,6 @@ func TestValidateContextExpr(t *testing.T) { } func TestAppsecEventToContext(t *testing.T) { - tests := []struct { name string contextToSend map[string][]string @@ -349,16 +350,62 @@ func TestAppsecEventToContext(t *testing.T) { } for _, test := range tests { - //reset cache + // reset cache alertContext = Context{} - //compile + // compile if err := NewAlertContext(test.contextToSend, 100); err != nil { t.Fatalf("failed to compile %s: %s", test.name, err) } - //run + // run metas, errors := AppsecEventToContext(test.match, test.req) assert.Len(t, errors, test.expectedErrLen) assert.ElementsMatch(t, test.expectedResult, metas) } } + +func TestEvalAlertContextRules(t *testing.T) { + tests := []struct { + name string + contextToSend map[string][]string + event types.Event + match types.MatchedRule + req *http.Request + expectedResult map[string][]string + expectedErrLen int + }{ + { + name: "no appsec match", + contextToSend: map[string][]string{ + "source_ip": {"evt.Parsed.source_ip"}, + "id": {"match.id"}, + }, + event: types.Event{ + Parsed: map[string]string{ + "source_ip": "1.2.3.4", + "source_machine": "mymachine", + "uri": "/test/test/test/../../../../../../../../", + }, + }, + expectedResult: map[string][]string{ + "source_ip": {"1.2.3.4"}, + "id": {}, + }, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + contextDict := make(map[string][]string) + + alertContext = Context{} + if err := NewAlertContext(test.contextToSend, 100); err != nil { + t.Fatalf("failed to compile %s: %s", test.name, err) + } + + errs := EvalAlertContextRules(test.event, &test.match, test.req, contextDict) + assert.Len(t, errs, test.expectedErrLen) + assert.Equal(t, test.expectedResult, contextDict) + }) + } +} diff --git a/pkg/apiclient/alerts_service_test.go b/pkg/apiclient/alerts_service_test.go index 0d1ff41685f..9df633fa8be 100644 --- a/pkg/apiclient/alerts_service_test.go +++ b/pkg/apiclient/alerts_service_test.go @@ -23,7 +23,8 @@ func TestAlertsListAsMachine(t *testing.T) { mux, urlx, teardown := setup() mux.HandleFunc("/watchers/login", func(w http.ResponseWriter, r *http.Request) { w.WriteHeader(http.StatusOK) - w.Write([]byte(`{"code": 200, "expire": "2030-01-02T15:04:05Z", "token": "oklol"}`)) + _, err := w.Write([]byte(`{"code": 200, "expire": "2030-01-02T15:04:05Z", "token": "oklol"}`)) + assert.NoError(t, err) }) log.Printf("URL is %s", urlx) @@ -202,7 +203,8 @@ func TestAlertsGetAsMachine(t *testing.T) { mux, urlx, teardown := setup() mux.HandleFunc("/watchers/login", func(w http.ResponseWriter, r *http.Request) { w.WriteHeader(http.StatusOK) - w.Write([]byte(`{"code": 200, "expire": "2030-01-02T15:04:05Z", "token": "oklol"}`)) + _, err := w.Write([]byte(`{"code": 200, "expire": "2030-01-02T15:04:05Z", "token": "oklol"}`)) + assert.NoError(t, err) }) log.Printf("URL is %s", urlx) @@ -368,13 +370,15 @@ func TestAlertsCreateAsMachine(t *testing.T) { mux, urlx, teardown := setup() mux.HandleFunc("/watchers/login", func(w http.ResponseWriter, r *http.Request) { w.WriteHeader(http.StatusOK) - w.Write([]byte(`{"code": 200, "expire": "2030-01-02T15:04:05Z", "token": "oklol"}`)) + _, err := w.Write([]byte(`{"code": 200, "expire": "2030-01-02T15:04:05Z", "token": "oklol"}`)) + assert.NoError(t, err) }) mux.HandleFunc("/alerts", func(w http.ResponseWriter, r *http.Request) { testMethod(t, r, "POST") w.WriteHeader(http.StatusOK) - w.Write([]byte(`["3"]`)) + _, err := w.Write([]byte(`["3"]`)) + assert.NoError(t, err) }) log.Printf("URL is %s", urlx) @@ -408,14 +412,16 @@ func TestAlertsDeleteAsMachine(t *testing.T) { mux, urlx, teardown := setup() mux.HandleFunc("/watchers/login", func(w http.ResponseWriter, r *http.Request) { w.WriteHeader(http.StatusOK) - w.Write([]byte(`{"code": 200, "expire": "2030-01-02T15:04:05Z", "token": "oklol"}`)) + _, err := w.Write([]byte(`{"code": 200, "expire": "2030-01-02T15:04:05Z", "token": "oklol"}`)) + assert.NoError(t, err) }) mux.HandleFunc("/alerts", func(w http.ResponseWriter, r *http.Request) { testMethod(t, r, "DELETE") assert.Equal(t, "ip=1.2.3.4", r.URL.RawQuery) w.WriteHeader(http.StatusOK) - w.Write([]byte(`{"message":"0 deleted alerts"}`)) + _, err := w.Write([]byte(`{"message":"0 deleted alerts"}`)) + assert.NoError(t, err) }) log.Printf("URL is %s", urlx) diff --git a/pkg/apiclient/auth_jwt.go b/pkg/apiclient/auth_jwt.go index 193486ff065..c43e9fc291c 100644 --- a/pkg/apiclient/auth_jwt.go +++ b/pkg/apiclient/auth_jwt.go @@ -62,7 +62,6 @@ func (t *JWTTransport) refreshJwtToken() error { enc := json.NewEncoder(buf) enc.SetEscapeHTML(false) err = enc.Encode(auth) - if err != nil { return fmt.Errorf("could not encode jwt auth body: %w", err) } @@ -169,7 +168,6 @@ func (t *JWTTransport) prepareRequest(req *http.Request) (*http.Request, error) // RoundTrip implements the RoundTripper interface. func (t *JWTTransport) RoundTrip(req *http.Request) (*http.Response, error) { - var resp *http.Response attemptsCount := make(map[int]int) @@ -229,7 +227,6 @@ func (t *JWTTransport) RoundTrip(req *http.Request) (*http.Response, error) { } } return resp, nil - } func (t *JWTTransport) Client() *http.Client { diff --git a/pkg/apiclient/auth_key_test.go b/pkg/apiclient/auth_key_test.go index f686de6227a..b7cce3e15c9 100644 --- a/pkg/apiclient/auth_key_test.go +++ b/pkg/apiclient/auth_key_test.go @@ -24,10 +24,12 @@ func TestApiAuth(t *testing.T) { if r.Header.Get("X-Api-Key") == "ixu" { assert.Equal(t, "ip=1.2.3.4", r.URL.RawQuery) w.WriteHeader(http.StatusOK) - w.Write([]byte(`null`)) + _, err := w.Write([]byte(`null`)) + assert.NoError(t, err) } else { w.WriteHeader(http.StatusForbidden) - w.Write([]byte(`{"message":"access forbidden"}`)) + _, err := w.Write([]byte(`{"message":"access forbidden"}`)) + assert.NoError(t, err) } }) diff --git a/pkg/apiclient/client.go b/pkg/apiclient/client.go index 47d97a28344..ec473beca77 100644 --- a/pkg/apiclient/client.go +++ b/pkg/apiclient/client.go @@ -125,8 +125,8 @@ func NewClient(config *Config) (*ApiClient, error) { return c, nil } -func NewDefaultClient(URL *url.URL, prefix string, userAgent string, client *http.Client) (*ApiClient, error) { - transport, baseURL := createTransport(URL) +func NewDefaultClient(url *url.URL, prefix string, userAgent string, client *http.Client) (*ApiClient, error) { + transport, baseURL := createTransport(url) if client == nil { client = &http.Client{} diff --git a/pkg/apiclient/client_http.go b/pkg/apiclient/client_http.go index eeca929ea6e..c64404dc7ee 100644 --- a/pkg/apiclient/client_http.go +++ b/pkg/apiclient/client_http.go @@ -78,10 +78,11 @@ func (c *ApiClient) Do(ctx context.Context, req *http.Request, v interface{}) (* } // If the error type is *url.Error, sanitize its URL before returning. - if e, ok := err.(*url.Error); ok { - if url, err := url.Parse(e.URL); err == nil { - e.URL = url.String() - return newResponse(resp), e + var urlErr *url.Error + if errors.As(err, &urlErr) { + if parsedURL, parseErr := url.Parse(urlErr.URL); parseErr == nil { + urlErr.URL = parsedURL.String() + return newResponse(resp), urlErr } return newResponse(resp), err diff --git a/pkg/apiclient/client_http_test.go b/pkg/apiclient/client_http_test.go index 45cd8410a8e..0d6cf3d993e 100644 --- a/pkg/apiclient/client_http_test.go +++ b/pkg/apiclient/client_http_test.go @@ -7,6 +7,7 @@ import ( "testing" "time" + "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "github.com/crowdsecurity/go-cs-lib/cstest" @@ -31,7 +32,8 @@ func TestNewRequestInvalid(t *testing.T) { /*mock login*/ mux.HandleFunc("/watchers/login", func(w http.ResponseWriter, r *http.Request) { w.WriteHeader(http.StatusUnauthorized) - w.Write([]byte(`{"code": 401, "message" : "bad login/password"}`)) + _, err := w.Write([]byte(`{"code": 401, "message" : "bad login/password"}`)) + assert.NoError(t, err) }) mux.HandleFunc("/alerts", func(w http.ResponseWriter, r *http.Request) { diff --git a/pkg/apiclient/client_test.go b/pkg/apiclient/client_test.go index d1f58f33ad2..c172849c21e 100644 --- a/pkg/apiclient/client_test.go +++ b/pkg/apiclient/client_test.go @@ -56,13 +56,11 @@ func toUNCPath(path string) (string, error) { return uncPath, nil } -func setupUnixSocketWithPrefix(socket string, urlPrefix string) (mux *http.ServeMux, serverURL string, teardown func()) { +func setupUnixSocketWithPrefix(t *testing.T, socket string, urlPrefix string) (mux *http.ServeMux, serverURL string, teardown func()) { var err error if runtime.GOOS == "windows" { socket, err = toUNCPath(socket) - if err != nil { - log.Fatalf("converting to UNC path: %s", err) - } + require.NoError(t, err, "converting to UNC path") } mux = http.NewServeMux() @@ -103,7 +101,8 @@ func TestNewClientOk(t *testing.T) { /*mock login*/ mux.HandleFunc("/watchers/login", func(w http.ResponseWriter, r *http.Request) { w.WriteHeader(http.StatusOK) - w.Write([]byte(`{"code": 200, "expire": "2030-01-02T15:04:05Z", "token": "oklol"}`)) + _, err := w.Write([]byte(`{"code": 200, "expire": "2030-01-02T15:04:05Z", "token": "oklol"}`)) + assert.NoError(t, err) }) mux.HandleFunc("/alerts", func(w http.ResponseWriter, r *http.Request) { @@ -120,7 +119,7 @@ func TestNewClientOk_UnixSocket(t *testing.T) { tmpDir := t.TempDir() socket := path.Join(tmpDir, "socket") - mux, urlx, teardown := setupUnixSocketWithPrefix(socket, "v1") + mux, urlx, teardown := setupUnixSocketWithPrefix(t, socket, "v1") defer teardown() apiURL, err := url.Parse(urlx) @@ -140,7 +139,8 @@ func TestNewClientOk_UnixSocket(t *testing.T) { /*mock login*/ mux.HandleFunc("/watchers/login", func(w http.ResponseWriter, r *http.Request) { w.WriteHeader(http.StatusOK) - w.Write([]byte(`{"code": 200, "expire": "2030-01-02T15:04:05Z", "token": "oklol"}`)) + _, err := w.Write([]byte(`{"code": 200, "expire": "2030-01-02T15:04:05Z", "token": "oklol"}`)) + assert.NoError(t, err) }) mux.HandleFunc("/alerts", func(w http.ResponseWriter, r *http.Request) { @@ -176,7 +176,8 @@ func TestNewClientKo(t *testing.T) { /*mock login*/ mux.HandleFunc("/watchers/login", func(w http.ResponseWriter, r *http.Request) { w.WriteHeader(http.StatusUnauthorized) - w.Write([]byte(`{"code": 401, "message" : "bad login/password"}`)) + _, err := w.Write([]byte(`{"code": 401, "message" : "bad login/password"}`)) + assert.NoError(t, err) }) mux.HandleFunc("/alerts", func(w http.ResponseWriter, r *http.Request) { @@ -202,7 +203,8 @@ func TestNewDefaultClient(t *testing.T) { mux.HandleFunc("/alerts", func(w http.ResponseWriter, r *http.Request) { w.WriteHeader(http.StatusUnauthorized) - w.Write([]byte(`{"code": 401, "message" : "brr"}`)) + _, err := w.Write([]byte(`{"code": 401, "message" : "brr"}`)) + assert.NoError(t, err) }) _, _, err = client.Alerts.List(context.Background(), AlertsListOpts{}) @@ -215,7 +217,7 @@ func TestNewDefaultClient_UnixSocket(t *testing.T) { tmpDir := t.TempDir() socket := path.Join(tmpDir, "socket") - mux, urlx, teardown := setupUnixSocketWithPrefix(socket, "v1") + mux, urlx, teardown := setupUnixSocketWithPrefix(t, socket, "v1") defer teardown() apiURL, err := url.Parse(urlx) @@ -230,7 +232,8 @@ func TestNewDefaultClient_UnixSocket(t *testing.T) { mux.HandleFunc("/alerts", func(w http.ResponseWriter, r *http.Request) { w.WriteHeader(http.StatusUnauthorized) - w.Write([]byte(`{"code": 401, "message" : "brr"}`)) + _, err := w.Write([]byte(`{"code": 401, "message" : "brr"}`)) + assert.NoError(t, err) }) _, _, err = client.Alerts.List(context.Background(), AlertsListOpts{}) @@ -268,7 +271,8 @@ func TestNewClientRegisterOK(t *testing.T) { mux.HandleFunc("/watchers", func(w http.ResponseWriter, r *http.Request) { testMethod(t, r, "POST") w.WriteHeader(http.StatusOK) - w.Write([]byte(`{"code": 200, "expire": "2030-01-02T15:04:05Z", "token": "oklol"}`)) + _, err := w.Write([]byte(`{"code": 200, "expire": "2030-01-02T15:04:05Z", "token": "oklol"}`)) + assert.NoError(t, err) }) apiURL, err := url.Parse(urlx + "/") @@ -293,14 +297,15 @@ func TestNewClientRegisterOK_UnixSocket(t *testing.T) { tmpDir := t.TempDir() socket := path.Join(tmpDir, "socket") - mux, urlx, teardown := setupUnixSocketWithPrefix(socket, "v1") + mux, urlx, teardown := setupUnixSocketWithPrefix(t, socket, "v1") defer teardown() /*mock login*/ mux.HandleFunc("/watchers", func(w http.ResponseWriter, r *http.Request) { testMethod(t, r, "POST") w.WriteHeader(http.StatusOK) - w.Write([]byte(`{"code": 200, "expire": "2030-01-02T15:04:05Z", "token": "oklol"}`)) + _, err := w.Write([]byte(`{"code": 200, "expire": "2030-01-02T15:04:05Z", "token": "oklol"}`)) + assert.NoError(t, err) }) apiURL, err := url.Parse(urlx) @@ -333,7 +338,8 @@ func TestNewClientBadAnswer(t *testing.T) { mux.HandleFunc("/watchers", func(w http.ResponseWriter, r *http.Request) { testMethod(t, r, "POST") w.WriteHeader(http.StatusUnauthorized) - w.Write([]byte(`bad`)) + _, err := w.Write([]byte(`bad`)) + assert.NoError(t, err) }) apiURL, err := url.Parse(urlx + "/") diff --git a/pkg/apiclient/decisions_service_test.go b/pkg/apiclient/decisions_service_test.go index 942d14689ff..b8bc327a7d7 100644 --- a/pkg/apiclient/decisions_service_test.go +++ b/pkg/apiclient/decisions_service_test.go @@ -31,11 +31,12 @@ func TestDecisionsList(t *testing.T) { assert.Equal(t, "ip=1.2.3.4", r.URL.RawQuery) assert.Equal(t, "ixu", r.Header.Get("X-Api-Key")) w.WriteHeader(http.StatusOK) - w.Write([]byte(`[{"duration":"3h59m55.756182786s","id":4,"origin":"cscli","scenario":"manual 'ban' from '82929df7ee394b73b81252fe3b4e50203yaT2u6nXiaN7Ix9'","scope":"Ip","type":"ban","value":"1.2.3.4"}]`)) + _, err := w.Write([]byte(`[{"duration":"3h59m55.756182786s","id":4,"origin":"cscli","scenario":"manual 'ban' from '82929df7ee394b73b81252fe3b4e50203yaT2u6nXiaN7Ix9'","scope":"Ip","type":"ban","value":"1.2.3.4"}]`)) + assert.NoError(t, err) } else { w.WriteHeader(http.StatusOK) - w.Write([]byte(`null`)) - // no results + _, err := w.Write([]byte(`null`)) + assert.NoError(t, err) } }) @@ -90,10 +91,12 @@ func TestDecisionsStream(t *testing.T) { if r.Method == http.MethodGet { if strings.Contains(r.URL.RawQuery, "startup=true") { w.WriteHeader(http.StatusOK) - w.Write([]byte(`{"deleted":null,"new":[{"duration":"3h59m55.756182786s","id":4,"origin":"cscli","scenario":"manual 'ban' from '82929df7ee394b73b81252fe3b4e50203yaT2u6nXiaN7Ix9'","scope":"Ip","type":"ban","value":"1.2.3.4"}]}`)) + _, err := w.Write([]byte(`{"deleted":null,"new":[{"duration":"3h59m55.756182786s","id":4,"origin":"cscli","scenario":"manual 'ban' from '82929df7ee394b73b81252fe3b4e50203yaT2u6nXiaN7Ix9'","scope":"Ip","type":"ban","value":"1.2.3.4"}]}`)) + assert.NoError(t, err) } else { w.WriteHeader(http.StatusOK) - w.Write([]byte(`{"deleted":null,"new":null}`)) + _, err := w.Write([]byte(`{"deleted":null,"new":null}`)) + assert.NoError(t, err) } } }) @@ -163,10 +166,12 @@ func TestDecisionsStreamV3Compatibility(t *testing.T) { if r.Method == http.MethodGet { if strings.Contains(r.URL.RawQuery, "startup=true") { w.WriteHeader(http.StatusOK) - w.Write([]byte(`{"deleted":[{"scope":"ip","decisions":["1.2.3.5"]}],"new":[{"scope":"ip", "scenario": "manual 'ban' from '82929df7ee394b73b81252fe3b4e50203yaT2u6nXiaN7Ix9'", "decisions":[{"duration":"3h59m55.756182786s","value":"1.2.3.4"}]}]}`)) + _, err := w.Write([]byte(`{"deleted":[{"scope":"ip","decisions":["1.2.3.5"]}],"new":[{"scope":"ip", "scenario": "manual 'ban' from '82929df7ee394b73b81252fe3b4e50203yaT2u6nXiaN7Ix9'", "decisions":[{"duration":"3h59m55.756182786s","value":"1.2.3.4"}]}]}`)) + assert.NoError(t, err) } else { w.WriteHeader(http.StatusOK) - w.Write([]byte(`{"deleted":null,"new":null}`)) + _, err := w.Write([]byte(`{"deleted":null,"new":null}`)) + assert.NoError(t, err) } } }) @@ -227,9 +232,10 @@ func TestDecisionsStreamV3(t *testing.T) { if r.Method == http.MethodGet { w.WriteHeader(http.StatusOK) - w.Write([]byte(`{"deleted":[{"scope":"ip","decisions":["1.2.3.5"]}], + _, err := w.Write([]byte(`{"deleted":[{"scope":"ip","decisions":["1.2.3.5"]}], "new":[{"scope":"ip", "scenario": "manual 'ban' from '82929df7ee394b73b81252fe3b4e50203yaT2u6nXiaN7Ix9'", "decisions":[{"duration":"3h59m55.756182786s","value":"1.2.3.4"}]}], "links": {"blocklists":[{"name":"blocklist1","url":"/v3/blocklist","scope":"ip","remediation":"ban","duration":"24h"}]}}`)) + assert.NoError(t, err) } }) @@ -303,7 +309,8 @@ func TestDecisionsFromBlocklist(t *testing.T) { if r.Method == http.MethodGet { w.WriteHeader(http.StatusOK) - w.Write([]byte("1.2.3.4\r\n1.2.3.5")) + _, err := w.Write([]byte("1.2.3.4\r\n1.2.3.5")) + assert.NoError(t, err) } }) @@ -388,14 +395,16 @@ func TestDeleteDecisions(t *testing.T) { mux, urlx, teardown := setup() mux.HandleFunc("/watchers/login", func(w http.ResponseWriter, r *http.Request) { w.WriteHeader(http.StatusOK) - w.Write([]byte(`{"code": 200, "expire": "2030-01-02T15:04:05Z", "token": "oklol"}`)) + _, err := w.Write([]byte(`{"code": 200, "expire": "2030-01-02T15:04:05Z", "token": "oklol"}`)) + assert.NoError(t, err) }) mux.HandleFunc("/decisions", func(w http.ResponseWriter, r *http.Request) { testMethod(t, r, "DELETE") assert.Equal(t, "ip=1.2.3.4", r.URL.RawQuery) w.WriteHeader(http.StatusOK) - w.Write([]byte(`{"nbDeleted":"1"}`)) + _, err := w.Write([]byte(`{"nbDeleted":"1"}`)) + assert.NoError(t, err) // w.Write([]byte(`{"message":"0 deleted alerts"}`)) }) diff --git a/pkg/apiserver/alerts_test.go b/pkg/apiserver/alerts_test.go index d86234e4813..c4edb42d475 100644 --- a/pkg/apiserver/alerts_test.go +++ b/pkg/apiserver/alerts_test.go @@ -103,13 +103,13 @@ func TestSimulatedAlert(t *testing.T) { // exclude decision in simulation mode w := lapi.RecordResponse(t, ctx, "GET", "/v1/alerts?simulated=false", alertContent, "password") - assert.Equal(t, 200, w.Code) + assert.Equal(t, http.StatusOK, w.Code) assert.Contains(t, w.Body.String(), `"message":"Ip 91.121.79.178 performed crowdsecurity/ssh-bf (6 events over `) assert.NotContains(t, w.Body.String(), `"message":"Ip 91.121.79.179 performed crowdsecurity/ssh-bf (6 events over `) // include decision in simulation mode w = lapi.RecordResponse(t, ctx, "GET", "/v1/alerts?simulated=true", alertContent, "password") - assert.Equal(t, 200, w.Code) + assert.Equal(t, http.StatusOK, w.Code) assert.Contains(t, w.Body.String(), `"message":"Ip 91.121.79.178 performed crowdsecurity/ssh-bf (6 events over `) assert.Contains(t, w.Body.String(), `"message":"Ip 91.121.79.179 performed crowdsecurity/ssh-bf (6 events over `) } @@ -120,21 +120,21 @@ func TestCreateAlert(t *testing.T) { // Create Alert with invalid format w := lapi.RecordResponse(t, ctx, http.MethodPost, "/v1/alerts", strings.NewReader("test"), "password") - assert.Equal(t, 400, w.Code) - assert.Equal(t, `{"message":"invalid character 'e' in literal true (expecting 'r')"}`, w.Body.String()) + assert.Equal(t, http.StatusBadRequest, w.Code) + assert.JSONEq(t, `{"message":"invalid character 'e' in literal true (expecting 'r')"}`, w.Body.String()) // Create Alert with invalid input alertContent := GetAlertReaderFromFile(t, "./tests/invalidAlert_sample.json") w = lapi.RecordResponse(t, ctx, http.MethodPost, "/v1/alerts", alertContent, "password") - assert.Equal(t, 500, w.Code) - assert.Equal(t, + assert.Equal(t, http.StatusInternalServerError, w.Code) + assert.JSONEq(t, `{"message":"validation failure list:\n0.scenario in body is required\n0.scenario_hash in body is required\n0.scenario_version in body is required\n0.simulated in body is required\n0.source in body is required"}`, w.Body.String()) // Create Valid Alert w = lapi.InsertAlertFromFile(t, ctx, "./tests/alert_sample.json") - assert.Equal(t, 201, w.Code) + assert.Equal(t, http.StatusCreated, w.Code) assert.Equal(t, `["1"]`, w.Body.String()) } @@ -142,7 +142,8 @@ func TestCreateAlertChannels(t *testing.T) { ctx := context.Background() apiServer, config := NewAPIServer(t, ctx) apiServer.controller.PluginChannel = make(chan csplugin.ProfileAlert) - apiServer.InitController() + err := apiServer.InitController() + require.NoError(t, err) loginResp := LoginToTestAPI(t, ctx, apiServer.router, config) lapi := LAPI{router: apiServer.router, loginResp: loginResp} @@ -175,13 +176,13 @@ func TestAlertListFilters(t *testing.T) { // bad filter w := lapi.RecordResponse(t, ctx, "GET", "/v1/alerts?test=test", alertContent, "password") - assert.Equal(t, 500, w.Code) - assert.Equal(t, `{"message":"Filter parameter 'test' is unknown (=test): invalid filter"}`, w.Body.String()) + assert.Equal(t, http.StatusInternalServerError, w.Code) + assert.JSONEq(t, `{"message":"Filter parameter 'test' is unknown (=test): invalid filter"}`, w.Body.String()) // get without filters w = lapi.RecordResponse(t, ctx, "GET", "/v1/alerts", emptyBody, "password") - assert.Equal(t, 200, w.Code) + assert.Equal(t, http.StatusOK, w.Code) // check alert and decision assert.Contains(t, w.Body.String(), "Ip 91.121.79.195 performed 'crowdsecurity/ssh-bf' (6 events over ") assert.Contains(t, w.Body.String(), `scope":"Ip","simulated":false,"type":"ban","value":"91.121.79.195"`) @@ -189,150 +190,150 @@ func TestAlertListFilters(t *testing.T) { // test decision_type filter (ok) w = lapi.RecordResponse(t, ctx, "GET", "/v1/alerts?decision_type=ban", emptyBody, "password") - assert.Equal(t, 200, w.Code) + assert.Equal(t, http.StatusOK, w.Code) assert.Contains(t, w.Body.String(), "Ip 91.121.79.195 performed 'crowdsecurity/ssh-bf' (6 events over ") assert.Contains(t, w.Body.String(), `scope":"Ip","simulated":false,"type":"ban","value":"91.121.79.195"`) // test decision_type filter (bad value) w = lapi.RecordResponse(t, ctx, "GET", "/v1/alerts?decision_type=ratata", emptyBody, "password") - assert.Equal(t, 200, w.Code) + assert.Equal(t, http.StatusOK, w.Code) assert.Equal(t, "null", w.Body.String()) // test scope (ok) w = lapi.RecordResponse(t, ctx, "GET", "/v1/alerts?scope=Ip", emptyBody, "password") - assert.Equal(t, 200, w.Code) + assert.Equal(t, http.StatusOK, w.Code) assert.Contains(t, w.Body.String(), "Ip 91.121.79.195 performed 'crowdsecurity/ssh-bf' (6 events over ") assert.Contains(t, w.Body.String(), `scope":"Ip","simulated":false,"type":"ban","value":"91.121.79.195"`) // test scope (bad value) w = lapi.RecordResponse(t, ctx, "GET", "/v1/alerts?scope=rarara", emptyBody, "password") - assert.Equal(t, 200, w.Code) + assert.Equal(t, http.StatusOK, w.Code) assert.Equal(t, "null", w.Body.String()) // test scenario (ok) w = lapi.RecordResponse(t, ctx, "GET", "/v1/alerts?scenario=crowdsecurity/ssh-bf", emptyBody, "password") - assert.Equal(t, 200, w.Code) + assert.Equal(t, http.StatusOK, w.Code) assert.Contains(t, w.Body.String(), "Ip 91.121.79.195 performed 'crowdsecurity/ssh-bf' (6 events over ") assert.Contains(t, w.Body.String(), `scope":"Ip","simulated":false,"type":"ban","value":"91.121.79.195"`) // test scenario (bad value) w = lapi.RecordResponse(t, ctx, "GET", "/v1/alerts?scenario=crowdsecurity/nope", emptyBody, "password") - assert.Equal(t, 200, w.Code) + assert.Equal(t, http.StatusOK, w.Code) assert.Equal(t, "null", w.Body.String()) // test ip (ok) w = lapi.RecordResponse(t, ctx, "GET", "/v1/alerts?ip=91.121.79.195", emptyBody, "password") - assert.Equal(t, 200, w.Code) + assert.Equal(t, http.StatusOK, w.Code) assert.Contains(t, w.Body.String(), "Ip 91.121.79.195 performed 'crowdsecurity/ssh-bf' (6 events over ") assert.Contains(t, w.Body.String(), `scope":"Ip","simulated":false,"type":"ban","value":"91.121.79.195"`) // test ip (bad value) w = lapi.RecordResponse(t, ctx, "GET", "/v1/alerts?ip=99.122.77.195", emptyBody, "password") - assert.Equal(t, 200, w.Code) + assert.Equal(t, http.StatusOK, w.Code) assert.Equal(t, "null", w.Body.String()) // test ip (invalid value) w = lapi.RecordResponse(t, ctx, "GET", "/v1/alerts?ip=gruueq", emptyBody, "password") - assert.Equal(t, 500, w.Code) - assert.Equal(t, `{"message":"unable to convert 'gruueq' to int: invalid address: invalid ip address / range"}`, w.Body.String()) + assert.Equal(t, http.StatusInternalServerError, w.Code) + assert.JSONEq(t, `{"message":"invalid ip address 'gruueq'"}`, w.Body.String()) // test range (ok) w = lapi.RecordResponse(t, ctx, "GET", "/v1/alerts?range=91.121.79.0/24&contains=false", emptyBody, "password") - assert.Equal(t, 200, w.Code) + assert.Equal(t, http.StatusOK, w.Code) assert.Contains(t, w.Body.String(), "Ip 91.121.79.195 performed 'crowdsecurity/ssh-bf' (6 events over ") assert.Contains(t, w.Body.String(), `scope":"Ip","simulated":false,"type":"ban","value":"91.121.79.195"`) // test range w = lapi.RecordResponse(t, ctx, "GET", "/v1/alerts?range=99.122.77.0/24&contains=false", emptyBody, "password") - assert.Equal(t, 200, w.Code) + assert.Equal(t, http.StatusOK, w.Code) assert.Equal(t, "null", w.Body.String()) // test range (invalid value) w = lapi.RecordResponse(t, ctx, "GET", "/v1/alerts?range=ratata", emptyBody, "password") - assert.Equal(t, 500, w.Code) - assert.Equal(t, `{"message":"unable to convert 'ratata' to int: invalid address: invalid ip address / range"}`, w.Body.String()) + assert.Equal(t, http.StatusInternalServerError, w.Code) + assert.JSONEq(t, `{"message":"invalid ip address 'ratata'"}`, w.Body.String()) // test since (ok) w = lapi.RecordResponse(t, ctx, "GET", "/v1/alerts?since=1h", emptyBody, "password") - assert.Equal(t, 200, w.Code) + assert.Equal(t, http.StatusOK, w.Code) assert.Contains(t, w.Body.String(), "Ip 91.121.79.195 performed 'crowdsecurity/ssh-bf' (6 events over ") assert.Contains(t, w.Body.String(), `scope":"Ip","simulated":false,"type":"ban","value":"91.121.79.195"`) // test since (ok but yields no results) w = lapi.RecordResponse(t, ctx, "GET", "/v1/alerts?since=1ns", emptyBody, "password") - assert.Equal(t, 200, w.Code) + assert.Equal(t, http.StatusOK, w.Code) assert.Equal(t, "null", w.Body.String()) // test since (invalid value) w = lapi.RecordResponse(t, ctx, "GET", "/v1/alerts?since=1zuzu", emptyBody, "password") - assert.Equal(t, 500, w.Code) + assert.Equal(t, http.StatusInternalServerError, w.Code) assert.Contains(t, w.Body.String(), `{"message":"while parsing duration: time: unknown unit`) // test until (ok) w = lapi.RecordResponse(t, ctx, "GET", "/v1/alerts?until=1ns", emptyBody, "password") - assert.Equal(t, 200, w.Code) + assert.Equal(t, http.StatusOK, w.Code) assert.Contains(t, w.Body.String(), "Ip 91.121.79.195 performed 'crowdsecurity/ssh-bf' (6 events over ") assert.Contains(t, w.Body.String(), `scope":"Ip","simulated":false,"type":"ban","value":"91.121.79.195"`) // test until (ok but no return) w = lapi.RecordResponse(t, ctx, "GET", "/v1/alerts?until=1m", emptyBody, "password") - assert.Equal(t, 200, w.Code) + assert.Equal(t, http.StatusOK, w.Code) assert.Equal(t, "null", w.Body.String()) // test until (invalid value) w = lapi.RecordResponse(t, ctx, "GET", "/v1/alerts?until=1zuzu", emptyBody, "password") - assert.Equal(t, 500, w.Code) + assert.Equal(t, http.StatusInternalServerError, w.Code) assert.Contains(t, w.Body.String(), `{"message":"while parsing duration: time: unknown unit`) // test simulated (ok) w = lapi.RecordResponse(t, ctx, "GET", "/v1/alerts?simulated=true", emptyBody, "password") - assert.Equal(t, 200, w.Code) + assert.Equal(t, http.StatusOK, w.Code) assert.Contains(t, w.Body.String(), "Ip 91.121.79.195 performed 'crowdsecurity/ssh-bf' (6 events over ") assert.Contains(t, w.Body.String(), `scope":"Ip","simulated":false,"type":"ban","value":"91.121.79.195"`) // test simulated (ok) w = lapi.RecordResponse(t, ctx, "GET", "/v1/alerts?simulated=false", emptyBody, "password") - assert.Equal(t, 200, w.Code) + assert.Equal(t, http.StatusOK, w.Code) assert.Contains(t, w.Body.String(), "Ip 91.121.79.195 performed 'crowdsecurity/ssh-bf' (6 events over ") assert.Contains(t, w.Body.String(), `scope":"Ip","simulated":false,"type":"ban","value":"91.121.79.195"`) // test has active decision w = lapi.RecordResponse(t, ctx, "GET", "/v1/alerts?has_active_decision=true", emptyBody, "password") - assert.Equal(t, 200, w.Code) + assert.Equal(t, http.StatusOK, w.Code) assert.Contains(t, w.Body.String(), "Ip 91.121.79.195 performed 'crowdsecurity/ssh-bf' (6 events over ") assert.Contains(t, w.Body.String(), `scope":"Ip","simulated":false,"type":"ban","value":"91.121.79.195"`) // test has active decision w = lapi.RecordResponse(t, ctx, "GET", "/v1/alerts?has_active_decision=false", emptyBody, "password") - assert.Equal(t, 200, w.Code) + assert.Equal(t, http.StatusOK, w.Code) assert.Equal(t, "null", w.Body.String()) // test has active decision (invalid value) w = lapi.RecordResponse(t, ctx, "GET", "/v1/alerts?has_active_decision=ratatqata", emptyBody, "password") - assert.Equal(t, 500, w.Code) - assert.Equal(t, `{"message":"'ratatqata' is not a boolean: strconv.ParseBool: parsing \"ratatqata\": invalid syntax: unable to parse type"}`, w.Body.String()) + assert.Equal(t, http.StatusInternalServerError, w.Code) + assert.JSONEq(t, `{"message":"'ratatqata' is not a boolean: strconv.ParseBool: parsing \"ratatqata\": invalid syntax: unable to parse type"}`, w.Body.String()) } func TestAlertBulkInsert(t *testing.T) { @@ -343,7 +344,7 @@ func TestAlertBulkInsert(t *testing.T) { alertContent := GetAlertReaderFromFile(t, "./tests/alert_bulk.json") w := lapi.RecordResponse(t, ctx, "GET", "/v1/alerts", alertContent, "password") - assert.Equal(t, 200, w.Code) + assert.Equal(t, http.StatusOK, w.Code) } func TestListAlert(t *testing.T) { @@ -353,13 +354,13 @@ func TestListAlert(t *testing.T) { // List Alert with invalid filter w := lapi.RecordResponse(t, ctx, "GET", "/v1/alerts?test=test", emptyBody, "password") - assert.Equal(t, 500, w.Code) - assert.Equal(t, `{"message":"Filter parameter 'test' is unknown (=test): invalid filter"}`, w.Body.String()) + assert.Equal(t, http.StatusInternalServerError, w.Code) + assert.JSONEq(t, `{"message":"Filter parameter 'test' is unknown (=test): invalid filter"}`, w.Body.String()) // List Alert w = lapi.RecordResponse(t, ctx, "GET", "/v1/alerts", emptyBody, "password") - assert.Equal(t, 200, w.Code) + assert.Equal(t, http.StatusOK, w.Code) assert.Contains(t, w.Body.String(), "crowdsecurity/test") } @@ -374,7 +375,7 @@ func TestCreateAlertErrors(t *testing.T) { req.Header.Add("User-Agent", UserAgent) req.Header.Add("Authorization", fmt.Sprintf("Bearer %s", "ratata")) lapi.router.ServeHTTP(w, req) - assert.Equal(t, 401, w.Code) + assert.Equal(t, http.StatusUnauthorized, w.Code) // test invalid bearer w = httptest.NewRecorder() @@ -382,7 +383,7 @@ func TestCreateAlertErrors(t *testing.T) { req.Header.Add("User-Agent", UserAgent) req.Header.Add("Authorization", fmt.Sprintf("Bearer %s", lapi.loginResp.Token+"s")) lapi.router.ServeHTTP(w, req) - assert.Equal(t, 401, w.Code) + assert.Equal(t, http.StatusUnauthorized, w.Code) } func TestDeleteAlert(t *testing.T) { @@ -396,8 +397,8 @@ func TestDeleteAlert(t *testing.T) { AddAuthHeaders(req, lapi.loginResp) req.RemoteAddr = "127.0.0.2:4242" lapi.router.ServeHTTP(w, req) - assert.Equal(t, 403, w.Code) - assert.Equal(t, `{"message":"access forbidden from this IP (127.0.0.2)"}`, w.Body.String()) + assert.Equal(t, http.StatusForbidden, w.Code) + assert.JSONEq(t, `{"message":"access forbidden from this IP (127.0.0.2)"}`, w.Body.String()) // Delete Alert w = httptest.NewRecorder() @@ -405,8 +406,8 @@ func TestDeleteAlert(t *testing.T) { AddAuthHeaders(req, lapi.loginResp) req.RemoteAddr = "127.0.0.1:4242" lapi.router.ServeHTTP(w, req) - assert.Equal(t, 200, w.Code) - assert.Equal(t, `{"nbDeleted":"1"}`, w.Body.String()) + assert.Equal(t, http.StatusOK, w.Code) + assert.JSONEq(t, `{"nbDeleted":"1"}`, w.Body.String()) } func TestDeleteAlertByID(t *testing.T) { @@ -420,8 +421,8 @@ func TestDeleteAlertByID(t *testing.T) { AddAuthHeaders(req, lapi.loginResp) req.RemoteAddr = "127.0.0.2:4242" lapi.router.ServeHTTP(w, req) - assert.Equal(t, 403, w.Code) - assert.Equal(t, `{"message":"access forbidden from this IP (127.0.0.2)"}`, w.Body.String()) + assert.Equal(t, http.StatusForbidden, w.Code) + assert.JSONEq(t, `{"message":"access forbidden from this IP (127.0.0.2)"}`, w.Body.String()) // Delete Alert w = httptest.NewRecorder() @@ -429,8 +430,8 @@ func TestDeleteAlertByID(t *testing.T) { AddAuthHeaders(req, lapi.loginResp) req.RemoteAddr = "127.0.0.1:4242" lapi.router.ServeHTTP(w, req) - assert.Equal(t, 200, w.Code) - assert.Equal(t, `{"nbDeleted":"1"}`, w.Body.String()) + assert.Equal(t, http.StatusOK, w.Code) + assert.JSONEq(t, `{"nbDeleted":"1"}`, w.Body.String()) } func TestDeleteAlertTrustedIPS(t *testing.T) { @@ -463,7 +464,7 @@ func TestDeleteAlertTrustedIPS(t *testing.T) { req.RemoteAddr = ip + ":1234" router.ServeHTTP(w, req) - assert.Equal(t, 403, w.Code) + assert.Equal(t, http.StatusForbidden, w.Code) assert.Contains(t, w.Body.String(), fmt.Sprintf(`{"message":"access forbidden from this IP (%s)"}`, ip)) } @@ -474,8 +475,8 @@ func TestDeleteAlertTrustedIPS(t *testing.T) { req.RemoteAddr = ip + ":1234" router.ServeHTTP(w, req) - assert.Equal(t, 200, w.Code) - assert.Equal(t, `{"nbDeleted":"1"}`, w.Body.String()) + assert.Equal(t, http.StatusOK, w.Code) + assert.JSONEq(t, `{"nbDeleted":"1"}`, w.Body.String()) } lapi.InsertAlertFromFile(t, ctx, "./tests/alert_sample.json") diff --git a/pkg/apiserver/apic.go b/pkg/apiserver/apic.go index 51a85b1ea23..32847f7489a 100644 --- a/pkg/apiserver/apic.go +++ b/pkg/apiserver/apic.go @@ -332,7 +332,6 @@ func getScenarioTrustOfAlert(alert *models.Alert) string { } func shouldShareAlert(alert *models.Alert, consoleConfig *csconfig.ConsoleConfig, shareSignals bool) bool { - if !shareSignals { log.Debugf("sharing signals is disabled") return false diff --git a/pkg/apiserver/apiserver.go b/pkg/apiserver/apiserver.go index 05f9150b037..88f1bd21dc4 100644 --- a/pkg/apiserver/apiserver.go +++ b/pkg/apiserver/apiserver.go @@ -46,10 +46,18 @@ type APIServer struct { consoleConfig *csconfig.ConsoleConfig } -func isBrokenConnection(err any) bool { - if ne, ok := err.(*net.OpError); ok { - if se, ok := ne.Err.(*os.SyscallError); ok { - if strings.Contains(strings.ToLower(se.Error()), "broken pipe") || strings.Contains(strings.ToLower(se.Error()), "connection reset by peer") { +func isBrokenConnection(maybeError any) bool { + err, ok := maybeError.(error) + if !ok { + return false + } + + var netOpError *net.OpError + if errors.As(err, &netOpError) { + var syscallError *os.SyscallError + if errors.As(netOpError.Err, &syscallError) { + if strings.Contains(strings.ToLower(syscallError.Error()), "broken pipe") || + strings.Contains(strings.ToLower(syscallError.Error()), "connection reset by peer") { return true } } @@ -57,21 +65,19 @@ func isBrokenConnection(err any) bool { // because of https://github.com/golang/net/blob/39120d07d75e76f0079fe5d27480bcb965a21e4c/http2/server.go // and because it seems gin doesn't handle those neither, we need to "hand define" some errors to properly catch them - if strErr, ok := err.(error); ok { - // stolen from http2/server.go in x/net - var ( - errClientDisconnected = errors.New("client disconnected") - errClosedBody = errors.New("body closed by handler") - errHandlerComplete = errors.New("http2: request body closed due to handler exiting") - errStreamClosed = errors.New("http2: stream closed") - ) + // stolen from http2/server.go in x/net + var ( + errClientDisconnected = errors.New("client disconnected") + errClosedBody = errors.New("body closed by handler") + errHandlerComplete = errors.New("http2: request body closed due to handler exiting") + errStreamClosed = errors.New("http2: stream closed") + ) - if errors.Is(strErr, errClientDisconnected) || - errors.Is(strErr, errClosedBody) || - errors.Is(strErr, errHandlerComplete) || - errors.Is(strErr, errStreamClosed) { - return true - } + if errors.Is(err, errClientDisconnected) || + errors.Is(err, errClosedBody) || + errors.Is(err, errHandlerComplete) || + errors.Is(err, errStreamClosed) { + return true } return false @@ -209,7 +215,7 @@ func NewServer(ctx context.Context, config *csconfig.LocalApiServerCfg) (*APISer gin.DefaultWriter = clog.Writer() router.Use(gin.LoggerWithFormatter(func(param gin.LogFormatterParams) string { - return fmt.Sprintf("%s - [%s] \"%s %s %s %d %s \"%s\" %s\"\n", + return fmt.Sprintf("%s - [%s] \"%s %s %s %d %s %q %s\"\n", param.ClientIP, param.TimeStamp.Format(time.RFC1123), param.Method, diff --git a/pkg/apiserver/apiserver_test.go b/pkg/apiserver/apiserver_test.go index cf4c91dedda..d8f24add75e 100644 --- a/pkg/apiserver/apiserver_test.go +++ b/pkg/apiserver/apiserver_test.go @@ -387,7 +387,7 @@ func TestLoggingDebugToFileConfig(t *testing.T) { cfg.LogLevel = ptr.Of(log.DebugLevel) // Configure logging - err := types.SetDefaultLoggerConfig(cfg.LogMedia, cfg.LogDir, *cfg.LogLevel, cfg.LogMaxSize, cfg.LogMaxFiles, cfg.LogMaxAge, cfg.CompressLogs, false) + err := types.SetDefaultLoggerConfig(cfg.LogMedia, cfg.LogDir, *cfg.LogLevel, cfg.LogMaxSize, cfg.LogMaxFiles, cfg.LogMaxAge, cfg.LogFormat, cfg.CompressLogs, false) require.NoError(t, err) api, err := NewServer(ctx, &cfg) @@ -439,7 +439,7 @@ func TestLoggingErrorToFileConfig(t *testing.T) { cfg.LogLevel = ptr.Of(log.ErrorLevel) // Configure logging - err := types.SetDefaultLoggerConfig(cfg.LogMedia, cfg.LogDir, *cfg.LogLevel, cfg.LogMaxSize, cfg.LogMaxFiles, cfg.LogMaxAge, cfg.CompressLogs, false) + err := types.SetDefaultLoggerConfig(cfg.LogMedia, cfg.LogDir, *cfg.LogLevel, cfg.LogMaxSize, cfg.LogMaxFiles, cfg.LogMaxAge, cfg.LogFormat, cfg.CompressLogs, false) require.NoError(t, err) api, err := NewServer(ctx, &cfg) diff --git a/pkg/apiserver/controllers/v1/decisions.go b/pkg/apiserver/controllers/v1/decisions.go index ffefffc226b..6a316d8a2e4 100644 --- a/pkg/apiserver/controllers/v1/decisions.go +++ b/pkg/apiserver/controllers/v1/decisions.go @@ -394,8 +394,6 @@ func (c *Controller) StreamDecisionNonChunked(gctx *gin.Context, bouncerInfo *en func (c *Controller) StreamDecision(gctx *gin.Context) { var err error - ctx := gctx.Request.Context() - streamStartTime := time.Now().UTC() bouncerInfo, err := getBouncerFromContext(gctx) @@ -426,7 +424,8 @@ func (c *Controller) StreamDecision(gctx *gin.Context) { if err == nil { // Only update the last pull time if no error occurred when sending the decisions to avoid missing decisions - if err := c.DBClient.UpdateBouncerLastPull(ctx, streamStartTime, bouncerInfo.ID); err != nil { + // Do not reuse the context provided by gin because we already have sent the response to the client, so there's a chance for it to already be canceled + if err := c.DBClient.UpdateBouncerLastPull(context.Background(), streamStartTime, bouncerInfo.ID); err != nil { log.Errorf("unable to update bouncer '%s' pull: %v", bouncerInfo.Name, err) } } diff --git a/pkg/apiserver/controllers/v1/errors.go b/pkg/apiserver/controllers/v1/errors.go index d661de44b0e..d7b60c1a1b8 100644 --- a/pkg/apiserver/controllers/v1/errors.go +++ b/pkg/apiserver/controllers/v1/errors.go @@ -21,18 +21,6 @@ func (c *Controller) HandleDBErrors(gctx *gin.Context, err error) { case errors.Is(err, database.HashError): gctx.JSON(http.StatusBadRequest, gin.H{"message": err.Error()}) return - case errors.Is(err, database.InsertFail): - gctx.JSON(http.StatusInternalServerError, gin.H{"message": err.Error()}) - return - case errors.Is(err, database.QueryFail): - gctx.JSON(http.StatusInternalServerError, gin.H{"message": err.Error()}) - return - case errors.Is(err, database.ParseTimeFail): - gctx.JSON(http.StatusInternalServerError, gin.H{"message": err.Error()}) - return - case errors.Is(err, database.ParseDurationFail): - gctx.JSON(http.StatusInternalServerError, gin.H{"message": err.Error()}) - return default: gctx.JSON(http.StatusInternalServerError, gin.H{"message": err.Error()}) return diff --git a/pkg/apiserver/decisions_test.go b/pkg/apiserver/decisions_test.go index a0af6956443..cb5d2e1c4f1 100644 --- a/pkg/apiserver/decisions_test.go +++ b/pkg/apiserver/decisions_test.go @@ -22,19 +22,19 @@ func TestDeleteDecisionRange(t *testing.T) { // delete by ip wrong w := lapi.RecordResponse(t, ctx, "DELETE", "/v1/decisions?range=1.2.3.0/24", emptyBody, PASSWORD) assert.Equal(t, 200, w.Code) - assert.Equal(t, `{"nbDeleted":"0"}`, w.Body.String()) + assert.JSONEq(t, `{"nbDeleted":"0"}`, w.Body.String()) // delete by range w = lapi.RecordResponse(t, ctx, "DELETE", "/v1/decisions?range=91.121.79.0/24&contains=false", emptyBody, PASSWORD) assert.Equal(t, 200, w.Code) - assert.Equal(t, `{"nbDeleted":"2"}`, w.Body.String()) + assert.JSONEq(t, `{"nbDeleted":"2"}`, w.Body.String()) // delete by range : ensure it was already deleted w = lapi.RecordResponse(t, ctx, "DELETE", "/v1/decisions?range=91.121.79.0/24", emptyBody, PASSWORD) assert.Equal(t, 200, w.Code) - assert.Equal(t, `{"nbDeleted":"0"}`, w.Body.String()) + assert.JSONEq(t, `{"nbDeleted":"0"}`, w.Body.String()) } func TestDeleteDecisionFilter(t *testing.T) { @@ -48,19 +48,19 @@ func TestDeleteDecisionFilter(t *testing.T) { w := lapi.RecordResponse(t, ctx, "DELETE", "/v1/decisions?ip=1.2.3.4", emptyBody, PASSWORD) assert.Equal(t, 200, w.Code) - assert.Equal(t, `{"nbDeleted":"0"}`, w.Body.String()) + assert.JSONEq(t, `{"nbDeleted":"0"}`, w.Body.String()) // delete by ip good w = lapi.RecordResponse(t, ctx, "DELETE", "/v1/decisions?ip=91.121.79.179", emptyBody, PASSWORD) assert.Equal(t, 200, w.Code) - assert.Equal(t, `{"nbDeleted":"1"}`, w.Body.String()) + assert.JSONEq(t, `{"nbDeleted":"1"}`, w.Body.String()) // delete by scope/value w = lapi.RecordResponse(t, ctx, "DELETE", "/v1/decisions?scopes=Ip&value=91.121.79.178", emptyBody, PASSWORD) assert.Equal(t, 200, w.Code) - assert.Equal(t, `{"nbDeleted":"1"}`, w.Body.String()) + assert.JSONEq(t, `{"nbDeleted":"1"}`, w.Body.String()) } func TestDeleteDecisionFilterByScenario(t *testing.T) { @@ -74,13 +74,13 @@ func TestDeleteDecisionFilterByScenario(t *testing.T) { w := lapi.RecordResponse(t, ctx, "DELETE", "/v1/decisions?scenario=crowdsecurity/ssh-bff", emptyBody, PASSWORD) assert.Equal(t, 200, w.Code) - assert.Equal(t, `{"nbDeleted":"0"}`, w.Body.String()) + assert.JSONEq(t, `{"nbDeleted":"0"}`, w.Body.String()) // delete by scenario good w = lapi.RecordResponse(t, ctx, "DELETE", "/v1/decisions?scenario=crowdsecurity/ssh-bf", emptyBody, PASSWORD) assert.Equal(t, 200, w.Code) - assert.Equal(t, `{"nbDeleted":"2"}`, w.Body.String()) + assert.JSONEq(t, `{"nbDeleted":"2"}`, w.Body.String()) } func TestGetDecisionFilters(t *testing.T) { diff --git a/pkg/apiserver/jwt_test.go b/pkg/apiserver/jwt_test.go index f6f51763975..72ae0302ae4 100644 --- a/pkg/apiserver/jwt_test.go +++ b/pkg/apiserver/jwt_test.go @@ -23,7 +23,7 @@ func TestLogin(t *testing.T) { router.ServeHTTP(w, req) assert.Equal(t, 401, w.Code) - assert.Equal(t, `{"code":401,"message":"machine test not validated"}`, w.Body.String()) + assert.JSONEq(t, `{"code":401,"message":"machine test not validated"}`, w.Body.String()) // Login with machine not exist w = httptest.NewRecorder() @@ -32,7 +32,7 @@ func TestLogin(t *testing.T) { router.ServeHTTP(w, req) assert.Equal(t, 401, w.Code) - assert.Equal(t, `{"code":401,"message":"ent: machine not found"}`, w.Body.String()) + assert.JSONEq(t, `{"code":401,"message":"ent: machine not found"}`, w.Body.String()) // Login with invalid body w = httptest.NewRecorder() @@ -41,7 +41,7 @@ func TestLogin(t *testing.T) { router.ServeHTTP(w, req) assert.Equal(t, 401, w.Code) - assert.Equal(t, `{"code":401,"message":"missing: invalid character 'e' in literal true (expecting 'r')"}`, w.Body.String()) + assert.JSONEq(t, `{"code":401,"message":"missing: invalid character 'e' in literal true (expecting 'r')"}`, w.Body.String()) // Login with invalid format w = httptest.NewRecorder() @@ -50,7 +50,7 @@ func TestLogin(t *testing.T) { router.ServeHTTP(w, req) assert.Equal(t, 401, w.Code) - assert.Equal(t, `{"code":401,"message":"validation failure list:\npassword in body is required"}`, w.Body.String()) + assert.JSONEq(t, `{"code":401,"message":"validation failure list:\npassword in body is required"}`, w.Body.String()) // Validate machine ValidateMachine(t, ctx, "test", config.API.Server.DbConfig) @@ -62,7 +62,7 @@ func TestLogin(t *testing.T) { router.ServeHTTP(w, req) assert.Equal(t, 401, w.Code) - assert.Equal(t, `{"code":401,"message":"incorrect Username or Password"}`, w.Body.String()) + assert.JSONEq(t, `{"code":401,"message":"incorrect Username or Password"}`, w.Body.String()) // Login with valid machine w = httptest.NewRecorder() diff --git a/pkg/apiserver/machines_test.go b/pkg/apiserver/machines_test.go index 969f75707d6..57b96f54ddd 100644 --- a/pkg/apiserver/machines_test.go +++ b/pkg/apiserver/machines_test.go @@ -25,7 +25,7 @@ func TestCreateMachine(t *testing.T) { router.ServeHTTP(w, req) assert.Equal(t, http.StatusBadRequest, w.Code) - assert.Equal(t, `{"message":"invalid character 'e' in literal true (expecting 'r')"}`, w.Body.String()) + assert.JSONEq(t, `{"message":"invalid character 'e' in literal true (expecting 'r')"}`, w.Body.String()) // Create machine with invalid input w = httptest.NewRecorder() @@ -34,7 +34,7 @@ func TestCreateMachine(t *testing.T) { router.ServeHTTP(w, req) assert.Equal(t, http.StatusUnprocessableEntity, w.Code) - assert.Equal(t, `{"message":"validation failure list:\nmachine_id in body is required\npassword in body is required"}`, w.Body.String()) + assert.JSONEq(t, `{"message":"validation failure list:\nmachine_id in body is required\npassword in body is required"}`, w.Body.String()) // Create machine b, err := json.Marshal(MachineTest) @@ -144,7 +144,7 @@ func TestCreateMachineAlreadyExist(t *testing.T) { router.ServeHTTP(w, req) assert.Equal(t, http.StatusForbidden, w.Code) - assert.Equal(t, `{"message":"user 'test': user already exist"}`, w.Body.String()) + assert.JSONEq(t, `{"message":"user 'test': user already exist"}`, w.Body.String()) } func TestAutoRegistration(t *testing.T) { diff --git a/pkg/apiserver/middlewares/v1/api_key.go b/pkg/apiserver/middlewares/v1/api_key.go index 3c154be4fab..df2f68930d6 100644 --- a/pkg/apiserver/middlewares/v1/api_key.go +++ b/pkg/apiserver/middlewares/v1/api_key.go @@ -174,7 +174,6 @@ func (a *APIKey) authPlain(c *gin.Context, logger *log.Entry) *ent.Bouncer { logger.Infof("Creating bouncer %s", bouncerName) bouncer, err = a.DbClient.CreateBouncer(ctx, bouncerName, clientIP, hashStr, types.ApiKeyAuthType, true) - if err != nil { logger.Errorf("while creating bouncer db entry: %s", err) return nil diff --git a/pkg/appsec/appsec.go b/pkg/appsec/appsec.go index 553db205b5d..5f01f76d993 100644 --- a/pkg/appsec/appsec.go +++ b/pkg/appsec/appsec.go @@ -158,7 +158,6 @@ func (wc *AppsecConfig) SetUpLogger() { /* wc.Name is actually the datasource name.*/ wc.Logger = wc.Logger.Dup().WithField("name", wc.Name) wc.Logger.Logger.SetLevel(*wc.LogLevel) - } func (wc *AppsecConfig) LoadByPath(file string) error { diff --git a/pkg/appsec/appsec_rule/appsec_rule.go b/pkg/appsec/appsec_rule/appsec_rule.go index 136d8b11cb7..9d47c0eed5c 100644 --- a/pkg/appsec/appsec_rule/appsec_rule.go +++ b/pkg/appsec/appsec_rule/appsec_rule.go @@ -47,7 +47,6 @@ type CustomRule struct { } func (v *CustomRule) Convert(ruleType string, appsecRuleName string) (string, []uint32, error) { - if v.Zones == nil && v.And == nil && v.Or == nil { return "", nil, errors.New("no zones defined") } diff --git a/pkg/appsec/appsec_rule/modsec_rule_test.go b/pkg/appsec/appsec_rule/modsec_rule_test.go index ffb8a15ff1f..74e9b85426e 100644 --- a/pkg/appsec/appsec_rule/modsec_rule_test.go +++ b/pkg/appsec/appsec_rule/modsec_rule_test.go @@ -88,7 +88,6 @@ func TestVPatchRuleString(t *testing.T) { rule: CustomRule{ And: []CustomRule{ { - Zones: []string{"ARGS"}, Variables: []string{"foo"}, Match: Match{Type: "regex", Value: "[^a-zA-Z]"}, @@ -161,7 +160,6 @@ SecRule ARGS_GET:foo "@rx [^a-zA-Z]" "id:1519945803,phase:2,deny,log,msg:'OR AND for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { actual, _, err := tt.rule.Convert(ModsecurityRuleType, tt.name) - if err != nil { t.Errorf("Error converting rule: %s", err) } diff --git a/pkg/appsec/appsec_rules_collection.go b/pkg/appsec/appsec_rules_collection.go index d283f95cb19..33e442e7f5b 100644 --- a/pkg/appsec/appsec_rules_collection.go +++ b/pkg/appsec/appsec_rules_collection.go @@ -15,6 +15,7 @@ import ( type AppsecCollection struct { collectionName string Rules []string + NativeRules []string } var APPSEC_RULE = "appsec-rule" @@ -88,14 +89,14 @@ func LoadCollection(pattern string, logger *log.Entry) ([]AppsecCollection, erro if strings.TrimSpace(line) == "" { continue } - appsecCol.Rules = append(appsecCol.Rules, line) + appsecCol.NativeRules = append(appsecCol.NativeRules, line) } } } if appsecRule.SecLangRules != nil { logger.Tracef("Adding inline rules %+v", appsecRule.SecLangRules) - appsecCol.Rules = append(appsecCol.Rules, appsecRule.SecLangRules...) + appsecCol.NativeRules = append(appsecCol.NativeRules, appsecRule.SecLangRules...) } if appsecRule.Rules != nil { diff --git a/pkg/appsec/coraza_logger.go b/pkg/appsec/coraza_logger.go index d2c1612cbd7..93e31be5876 100644 --- a/pkg/appsec/coraza_logger.go +++ b/pkg/appsec/coraza_logger.go @@ -124,7 +124,7 @@ func (e *crzLogEvent) Stringer(key string, val fmt.Stringer) dbg.Event { return e } -func (e crzLogEvent) IsEnabled() bool { +func (e *crzLogEvent) IsEnabled() bool { return !e.muted } diff --git a/pkg/appsec/request_test.go b/pkg/appsec/request_test.go index f8333e4e5f9..8b457e24dab 100644 --- a/pkg/appsec/request_test.go +++ b/pkg/appsec/request_test.go @@ -3,7 +3,6 @@ package appsec import "testing" func TestBodyDumper(t *testing.T) { - tests := []struct { name string req *ParsedRequest @@ -159,7 +158,6 @@ func TestBodyDumper(t *testing.T) { } for idx, test := range tests { - t.Run(test.name, func(t *testing.T) { orig_dr := test.req.DumpRequest() result := test.filter(orig_dr).GetFilteredRequest() @@ -177,5 +175,4 @@ func TestBodyDumper(t *testing.T) { } }) } - } diff --git a/pkg/cache/cache_test.go b/pkg/cache/cache_test.go index a4e0bd0127a..4da9fd5bf7b 100644 --- a/pkg/cache/cache_test.go +++ b/pkg/cache/cache_test.go @@ -5,26 +5,27 @@ import ( "time" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) func TestCreateSetGet(t *testing.T) { err := CacheInit(CacheCfg{Name: "test", Size: 100, TTL: 1 * time.Second}) - assert.Empty(t, err) + require.NoError(t, err) //set & get err = SetKey("test", "testkey0", "testvalue1", nil) - assert.Empty(t, err) + require.NoError(t, err) ret, err := GetKey("test", "testkey0") assert.Equal(t, "testvalue1", ret) - assert.Empty(t, err) + require.NoError(t, err) //re-set err = SetKey("test", "testkey0", "testvalue2", nil) - assert.Empty(t, err) + require.NoError(t, err) assert.Equal(t, "testvalue1", ret) - assert.Empty(t, err) + require.NoError(t, err) //expire time.Sleep(1500 * time.Millisecond) ret, err = GetKey("test", "testkey0") assert.Equal(t, "", ret) - assert.Empty(t, err) + require.NoError(t, err) } diff --git a/pkg/csconfig/api.go b/pkg/csconfig/api.go index 5f2f8f9248b..d94d90aaf19 100644 --- a/pkg/csconfig/api.go +++ b/pkg/csconfig/api.go @@ -271,6 +271,7 @@ type LocalApiServerCfg struct { LogMaxSize int `yaml:"-"` LogMaxAge int `yaml:"-"` LogMaxFiles int `yaml:"-"` + LogFormat string `yaml:"-"` TrustedIPs []string `yaml:"trusted_ips,omitempty"` PapiLogLevel *log.Level `yaml:"papi_log_level"` DisableRemoteLapiRegistration bool `yaml:"disable_remote_lapi_registration,omitempty"` @@ -351,7 +352,7 @@ func (c *Config) LoadAPIServer(inCli bool) error { log.Printf("push and pull to Central API disabled") } - //Set default values for CAPI push/pull + // Set default values for CAPI push/pull if c.API.Server.OnlineClient != nil { if c.API.Server.OnlineClient.PullConfig.Community == nil { c.API.Server.OnlineClient.PullConfig.Community = ptr.Of(true) @@ -391,6 +392,7 @@ func (c *Config) LoadAPIServer(inCli bool) error { c.API.Server.CompressLogs = c.Common.CompressLogs c.API.Server.LogMaxSize = c.Common.LogMaxSize c.API.Server.LogMaxAge = c.Common.LogMaxAge + c.API.Server.LogFormat = c.Common.LogFormat c.API.Server.LogMaxFiles = c.Common.LogMaxFiles if c.API.Server.UseForwardedForHeaders && c.API.Server.TrustedProxies == nil { diff --git a/pkg/csconfig/common.go b/pkg/csconfig/common.go index 7e1ef6e5c98..e312756ce20 100644 --- a/pkg/csconfig/common.go +++ b/pkg/csconfig/common.go @@ -12,11 +12,12 @@ type CommonCfg struct { Daemonize bool PidDir string `yaml:"pid_dir,omitempty"` // TODO: This is just for backward compat. Remove this later LogMedia string `yaml:"log_media"` - LogDir string `yaml:"log_dir,omitempty"` //if LogMedia = file + LogDir string `yaml:"log_dir,omitempty"` // if LogMedia = file LogLevel *log.Level `yaml:"log_level"` WorkingDir string `yaml:"working_dir,omitempty"` // TODO: This is just for backward compat. Remove this later CompressLogs *bool `yaml:"compress_logs,omitempty"` LogMaxSize int `yaml:"log_max_size,omitempty"` + LogFormat string `yaml:"log_format,omitempty"` LogMaxAge int `yaml:"log_max_age,omitempty"` LogMaxFiles int `yaml:"log_max_files,omitempty"` ForceColorLogs bool `yaml:"force_color_logs,omitempty"` @@ -24,6 +25,7 @@ type CommonCfg struct { func (c *Config) loadCommon() error { var err error + if c.Common == nil { c.Common = &CommonCfg{} } @@ -32,13 +34,15 @@ func (c *Config) loadCommon() error { c.Common.LogMedia = "stdout" } - var CommonCleanup = []*string{ + CommonCleanup := []*string{ &c.Common.LogDir, } + for _, k := range CommonCleanup { if *k == "" { continue } + *k, err = filepath.Abs(*k) if err != nil { return fmt.Errorf("failed to get absolute path of '%s': %w", *k, err) diff --git a/pkg/csconfig/config.go b/pkg/csconfig/config.go index 3bbdf607187..b0784e5e6f3 100644 --- a/pkg/csconfig/config.go +++ b/pkg/csconfig/config.go @@ -30,7 +30,7 @@ var globalConfig = Config{} // Config contains top-level defaults -> overridden by configuration file -> overridden by CLI flags type Config struct { // just a path to ourselves :p - FilePath *string `yaml:"-"` + FilePath string `yaml:"-"` Self []byte `yaml:"-"` Common *CommonCfg `yaml:"common,omitempty"` Prometheus *PrometheusCfg `yaml:"prometheus,omitempty"` @@ -45,9 +45,10 @@ type Config struct { Hub *LocalHubCfg `yaml:"-"` } -func NewConfig(configFile string, disableAgent bool, disableAPI bool, inCli bool) (*Config, string, error) { +// NewConfig +func NewConfig(configFile string, disableAgent bool, disableAPI bool, quiet bool) (*Config, string, error) { patcher := yamlpatch.NewPatcher(configFile, ".local") - patcher.SetQuiet(inCli) + patcher.SetQuiet(quiet) fcontent, err := patcher.MergedPatchContent() if err != nil { @@ -56,7 +57,7 @@ func NewConfig(configFile string, disableAgent bool, disableAPI bool, inCli bool configData := csstring.StrictExpand(string(fcontent), os.LookupEnv) cfg := Config{ - FilePath: &configFile, + FilePath: configFile, DisableAgent: disableAgent, DisableAPI: disableAPI, } diff --git a/pkg/csconfig/cscli.go b/pkg/csconfig/cscli.go index 9393156c0ed..ad119dc9e13 100644 --- a/pkg/csconfig/cscli.go +++ b/pkg/csconfig/cscli.go @@ -10,6 +10,7 @@ type CscliCfg struct { Color string `yaml:"color,omitempty"` HubBranch string `yaml:"hub_branch"` HubURLTemplate string `yaml:"__hub_url_template__,omitempty"` + HubWithContent bool `yaml:"hub_with_content,omitempty"` SimulationConfig *SimulationConfig `yaml:"-"` DbConfig *DatabaseCfg `yaml:"-"` diff --git a/pkg/csconfig/fflag.go b/pkg/csconfig/fflag.go index c86686889eb..ec1282c5a04 100644 --- a/pkg/csconfig/fflag.go +++ b/pkg/csconfig/fflag.go @@ -38,7 +38,7 @@ func LoadFeatureFlagsFile(configPath string, logger *log.Logger) error { func ListFeatureFlags() string { enabledFeatures := fflag.Crowdsec.GetEnabledFeatures() - msg := "" + msg := "none" if len(enabledFeatures) > 0 { msg = strings.Join(enabledFeatures, ", ") } diff --git a/pkg/csplugin/broker.go b/pkg/csplugin/broker.go index e996fa9b68c..f53c831e186 100644 --- a/pkg/csplugin/broker.go +++ b/pkg/csplugin/broker.go @@ -91,7 +91,6 @@ func (pb *PluginBroker) Init(ctx context.Context, pluginCfg *csconfig.PluginCfg, pb.watcher = PluginWatcher{} pb.watcher.Init(pb.pluginConfigByName, pb.alertsByPluginName) return nil - } func (pb *PluginBroker) Kill() { @@ -166,6 +165,7 @@ func (pb *PluginBroker) addProfileAlert(profileAlert ProfileAlert) { pb.watcher.Inserts <- pluginName } } + func (pb *PluginBroker) profilesContainPlugin(pluginName string) bool { for _, profileCfg := range pb.profileConfigs { for _, name := range profileCfg.Notifications { @@ -176,6 +176,7 @@ func (pb *PluginBroker) profilesContainPlugin(pluginName string) bool { } return false } + func (pb *PluginBroker) loadConfig(path string) error { files, err := listFilesAtPath(path) if err != nil { @@ -277,7 +278,6 @@ func (pb *PluginBroker) loadPlugins(ctx context.Context, path string) error { } func (pb *PluginBroker) loadNotificationPlugin(name string, binaryPath string) (protobufs.NotifierServer, error) { - handshake, err := getHandshake() if err != nil { return nil, err diff --git a/pkg/csplugin/listfiles_test.go b/pkg/csplugin/listfiles_test.go index c476d7a4e4a..32269f3f5f1 100644 --- a/pkg/csplugin/listfiles_test.go +++ b/pkg/csplugin/listfiles_test.go @@ -12,19 +12,22 @@ import ( ) func TestListFilesAtPath(t *testing.T) { - dir, err := os.MkdirTemp("", "test-listfiles") - require.NoError(t, err) - t.Cleanup(func() { - os.RemoveAll(dir) - }) - _, err = os.Create(filepath.Join(dir, "notification-gitter")) + dir := t.TempDir() + + f, err := os.Create(filepath.Join(dir, "notification-gitter")) require.NoError(t, err) - _, err = os.Create(filepath.Join(dir, "slack")) + require.NoError(t, f.Close()) + + f, err = os.Create(filepath.Join(dir, "slack")) require.NoError(t, err) + require.NoError(t, f.Close()) + err = os.Mkdir(filepath.Join(dir, "somedir"), 0o755) require.NoError(t, err) - _, err = os.Create(filepath.Join(dir, "somedir", "inner")) + + f, err = os.Create(filepath.Join(dir, "somedir", "inner")) require.NoError(t, err) + require.NoError(t, f.Close()) tests := []struct { name string diff --git a/pkg/csplugin/watcher_test.go b/pkg/csplugin/watcher_test.go index 84e63ec6493..9868b8433c3 100644 --- a/pkg/csplugin/watcher_test.go +++ b/pkg/csplugin/watcher_test.go @@ -15,13 +15,12 @@ import ( "github.com/crowdsecurity/crowdsec/pkg/models" ) -func resetTestTomb(testTomb *tomb.Tomb, pw *PluginWatcher) { +func resetTestTomb(t *testing.T, testTomb *tomb.Tomb, pw *PluginWatcher) { testTomb.Kill(nil) <-pw.PluginEvents - if err := testTomb.Wait(); err != nil { - log.Fatal(err) - } + err := testTomb.Wait() + require.NoError(t, err) } func resetWatcherAlertCounter(pw *PluginWatcher) { @@ -72,7 +71,7 @@ func TestPluginWatcherInterval(t *testing.T) { err := listenChannelWithTimeout(ct, pw.PluginEvents) cstest.RequireErrorContains(t, err, "context deadline exceeded") - resetTestTomb(&testTomb, &pw) + resetTestTomb(t, &testTomb, &pw) testTomb = tomb.Tomb{} pw.Start(&testTomb) @@ -81,7 +80,7 @@ func TestPluginWatcherInterval(t *testing.T) { err = listenChannelWithTimeout(ct, pw.PluginEvents) require.NoError(t, err) - resetTestTomb(&testTomb, &pw) + resetTestTomb(t, &testTomb, &pw) // This is to avoid the int complaining } @@ -130,5 +129,5 @@ func TestPluginAlertCountWatcher(t *testing.T) { err = listenChannelWithTimeout(ct, pw.PluginEvents) require.NoError(t, err) - resetTestTomb(&testTomb, &pw) + resetTestTomb(t, &testTomb, &pw) } diff --git a/pkg/csprofiles/csprofiles.go b/pkg/csprofiles/csprofiles.go index 52cda1ed2e1..c509fb448e3 100644 --- a/pkg/csprofiles/csprofiles.go +++ b/pkg/csprofiles/csprofiles.go @@ -96,17 +96,17 @@ func NewProfile(profilesCfg []*csconfig.ProfileCfg) ([]*Runtime, error) { return profilesRuntime, nil } -func (Profile *Runtime) GenerateDecisionFromProfile(Alert *models.Alert) ([]*models.Decision, error) { +func (profile *Runtime) GenerateDecisionFromProfile(alert *models.Alert) ([]*models.Decision, error) { var decisions []*models.Decision - for _, refDecision := range Profile.Cfg.Decisions { + for _, refDecision := range profile.Cfg.Decisions { decision := models.Decision{} /*the reference decision from profile is in simulated mode */ if refDecision.Simulated != nil && *refDecision.Simulated { decision.Simulated = new(bool) *decision.Simulated = true /*the event is already in simulation mode */ - } else if Alert.Simulated != nil && *Alert.Simulated { + } else if alert.Simulated != nil && *alert.Simulated { decision.Simulated = new(bool) *decision.Simulated = true } @@ -116,7 +116,7 @@ func (Profile *Runtime) GenerateDecisionFromProfile(Alert *models.Alert) ([]*mod if refDecision.Scope != nil && *refDecision.Scope != "" { *decision.Scope = *refDecision.Scope } else { - *decision.Scope = *Alert.Source.Scope + *decision.Scope = *alert.Source.Scope } /*some fields are populated from the reference object : duration, scope, type*/ @@ -125,19 +125,19 @@ func (Profile *Runtime) GenerateDecisionFromProfile(Alert *models.Alert) ([]*mod *decision.Duration = *refDecision.Duration } - if Profile.Cfg.DurationExpr != "" && Profile.RuntimeDurationExpr != nil { + if profile.Cfg.DurationExpr != "" && profile.RuntimeDurationExpr != nil { profileDebug := false - if Profile.Cfg.Debug != nil && *Profile.Cfg.Debug { + if profile.Cfg.Debug != nil && *profile.Cfg.Debug { profileDebug = true } - duration, err := exprhelpers.Run(Profile.RuntimeDurationExpr, map[string]interface{}{"Alert": Alert}, Profile.Logger, profileDebug) + duration, err := exprhelpers.Run(profile.RuntimeDurationExpr, map[string]interface{}{"Alert": alert}, profile.Logger, profileDebug) if err != nil { - Profile.Logger.Warningf("Failed to run duration_expr : %v", err) + profile.Logger.Warningf("Failed to run duration_expr : %v", err) } else { durationStr := fmt.Sprint(duration) if _, err := time.ParseDuration(durationStr); err != nil { - Profile.Logger.Warningf("Failed to parse expr duration result '%s'", duration) + profile.Logger.Warningf("Failed to parse expr duration result '%s'", duration) } else { *decision.Duration = durationStr } @@ -149,7 +149,7 @@ func (Profile *Runtime) GenerateDecisionFromProfile(Alert *models.Alert) ([]*mod /*for the others, let's populate it from the alert and its source*/ decision.Value = new(string) - *decision.Value = *Alert.Source.Value + *decision.Value = *alert.Source.Value decision.Origin = new(string) *decision.Origin = types.CrowdSecOrigin @@ -158,7 +158,7 @@ func (Profile *Runtime) GenerateDecisionFromProfile(Alert *models.Alert) ([]*mod } decision.Scenario = new(string) - *decision.Scenario = *Alert.Scenario + *decision.Scenario = *alert.Scenario decisions = append(decisions, &decision) } @@ -166,21 +166,21 @@ func (Profile *Runtime) GenerateDecisionFromProfile(Alert *models.Alert) ([]*mod } // EvaluateProfile is going to evaluate an Alert against a profile to generate Decisions -func (Profile *Runtime) EvaluateProfile(Alert *models.Alert) ([]*models.Decision, bool, error) { +func (profile *Runtime) EvaluateProfile(alert *models.Alert) ([]*models.Decision, bool, error) { var decisions []*models.Decision matched := false - for eIdx, expression := range Profile.RuntimeFilters { + for eIdx, expression := range profile.RuntimeFilters { debugProfile := false - if Profile.Cfg.Debug != nil && *Profile.Cfg.Debug { + if profile.Cfg.Debug != nil && *profile.Cfg.Debug { debugProfile = true } - output, err := exprhelpers.Run(expression, map[string]interface{}{"Alert": Alert}, Profile.Logger, debugProfile) + output, err := exprhelpers.Run(expression, map[string]interface{}{"Alert": alert}, profile.Logger, debugProfile) if err != nil { - Profile.Logger.Warningf("failed to run profile expr for %s: %v", Profile.Cfg.Name, err) - return nil, matched, fmt.Errorf("while running expression %s: %w", Profile.Cfg.Filters[eIdx], err) + profile.Logger.Warningf("failed to run profile expr for %s: %v", profile.Cfg.Name, err) + return nil, matched, fmt.Errorf("while running expression %s: %w", profile.Cfg.Filters[eIdx], err) } switch out := output.(type) { @@ -188,22 +188,22 @@ func (Profile *Runtime) EvaluateProfile(Alert *models.Alert) ([]*models.Decision if out { matched = true /*the expression matched, create the associated decision*/ - subdecisions, err := Profile.GenerateDecisionFromProfile(Alert) + subdecisions, err := profile.GenerateDecisionFromProfile(alert) if err != nil { - return nil, matched, fmt.Errorf("while generating decision from profile %s: %w", Profile.Cfg.Name, err) + return nil, matched, fmt.Errorf("while generating decision from profile %s: %w", profile.Cfg.Name, err) } decisions = append(decisions, subdecisions...) } else { - Profile.Logger.Debugf("Profile %s filter is unsuccessful", Profile.Cfg.Name) + profile.Logger.Debugf("Profile %s filter is unsuccessful", profile.Cfg.Name) - if Profile.Cfg.OnFailure == "break" { + if profile.Cfg.OnFailure == "break" { break } } default: - return nil, matched, fmt.Errorf("unexpected type %t (%v) while running '%s'", output, output, Profile.Cfg.Filters[eIdx]) + return nil, matched, fmt.Errorf("unexpected type %t (%v) while running '%s'", output, output, profile.Cfg.Filters[eIdx]) } } diff --git a/pkg/csprofiles/csprofiles_test.go b/pkg/csprofiles/csprofiles_test.go index 0247243ddd3..dc3239fe5c1 100644 --- a/pkg/csprofiles/csprofiles_test.go +++ b/pkg/csprofiles/csprofiles_test.go @@ -119,7 +119,8 @@ func TestEvaluateProfile(t *testing.T) { Alert *models.Alert } - exprhelpers.Init(nil) + err := exprhelpers.Init(nil) + require.NoError(t, err) tests := []struct { name string @@ -132,7 +133,7 @@ func TestEvaluateProfile(t *testing.T) { name: "simple pass single expr", args: args{ profileCfg: &csconfig.ProfileCfg{ - Filters: []string{fmt.Sprintf("Alert.GetScenario() == \"%s\"", scenario)}, + Filters: []string{fmt.Sprintf("Alert.GetScenario() == %q", scenario)}, Debug: &boolFalse, }, Alert: &models.Alert{Remediation: true, Scenario: &scenario}, @@ -199,17 +200,22 @@ func TestEvaluateProfile(t *testing.T) { profilesCfg := []*csconfig.ProfileCfg{ tt.args.profileCfg, } + profile, err := NewProfile(profilesCfg) if err != nil { t.Errorf("failed to get newProfile : %+v", err) } + got, got1, _ := profile[0].EvaluateProfile(tt.args.Alert) + if !reflect.DeepEqual(len(got), tt.expectedDecisionCount) { t.Errorf("EvaluateProfile() got = %+v, want %+v", got, tt.expectedDecisionCount) } + if got1 != tt.expectedMatchStatus { t.Errorf("EvaluateProfile() got1 = %v, want %v", got1, tt.expectedMatchStatus) } + if tt.expectedDuration != "" { require.Equal(t, tt.expectedDuration, *got[0].Duration, "The two durations should be the same") } diff --git a/pkg/cticlient/example/fire.go b/pkg/cticlient/example/fire.go index e52922571ef..598175ce02c 100644 --- a/pkg/cticlient/example/fire.go +++ b/pkg/cticlient/example/fire.go @@ -57,6 +57,12 @@ func main() { }) } } - csvWriter.Write(csvHeader) - csvWriter.WriteAll(allItems) + + if err = csvWriter.Write(csvHeader); err != nil { + panic(err) + } + + if err = csvWriter.WriteAll(allItems); err != nil { + panic(err) + } } diff --git a/pkg/cticlient/types.go b/pkg/cticlient/types.go index 2ad0a6eb34e..5ea29d6c5b0 100644 --- a/pkg/cticlient/types.go +++ b/pkg/cticlient/types.go @@ -64,6 +64,9 @@ type CTIReferences struct { type SmokeItem struct { IpRangeScore int `json:"ip_range_score"` Ip string `json:"ip"` + Reputation string `json:"reputation"` + BackgroundNoise string `json:"background_noise"` + Confidence string `json:"confidence"` IpRange *string `json:"ip_range"` AsName *string `json:"as_name"` AsNum *int `json:"as_num"` @@ -77,6 +80,7 @@ type SmokeItem struct { BackgroundNoiseScore *int `json:"background_noise_score"` Scores CTIScores `json:"scores"` References []CTIReferences `json:"references"` + CVEs []string `json:"cves"` IsOk bool `json:"-"` } @@ -120,6 +124,10 @@ type FireItem struct { BackgroundNoiseScore *int `json:"background_noise_score"` Scores CTIScores `json:"scores"` References []CTIReferences `json:"references"` + CVEs []string `json:"cves"` + Reputation string `json:"reputation"` + BackgroundNoise string `json:"background_noise"` + Confidence string `json:"confidence"` State string `json:"state"` Expiration CustomTime `json:"expiration"` } @@ -209,8 +217,19 @@ func (c *SmokeItem) GetFalsePositives() []string { return ret } -func (c *SmokeItem) IsFalsePositive() bool { +func (c *SmokeItem) GetClassifications() []string { + ret := make([]string, 0) + if c.Classifications.Classifications != nil { + for _, b := range c.Classifications.Classifications { + ret = append(ret, b.Name) + } + } + + return ret +} + +func (c *SmokeItem) IsFalsePositive() bool { if c.Classifications.FalsePositives != nil { if len(c.Classifications.FalsePositives) > 0 { return true @@ -283,8 +302,19 @@ func (c *FireItem) GetFalsePositives() []string { return ret } -func (c *FireItem) IsFalsePositive() bool { +func (c *FireItem) GetClassifications() []string { + ret := make([]string, 0) + if c.Classifications.Classifications != nil { + for _, b := range c.Classifications.Classifications { + ret = append(ret, b.Name) + } + } + + return ret +} + +func (c *FireItem) IsFalsePositive() bool { if c.Classifications.FalsePositives != nil { if len(c.Classifications.FalsePositives) > 0 { return true diff --git a/pkg/cticlient/types_test.go b/pkg/cticlient/types_test.go index a7308af35e0..9c7840de324 100644 --- a/pkg/cticlient/types_test.go +++ b/pkg/cticlient/types_test.go @@ -40,8 +40,14 @@ func getSampleSmokeItem() SmokeItem { DaysAge: 1, }, Classifications: CTIClassifications{ - FalsePositives: []CTIClassification{}, - Classifications: []CTIClassification{}, + FalsePositives: []CTIClassification{}, + Classifications: []CTIClassification{ + { + Name: "profile:likely_botnet", + Label: "Likely Botnet", + Description: "IP appears to be a botnet.", + }, + }, }, AttackDetails: []*CTIAttackDetails{ { @@ -101,6 +107,7 @@ func TestBasicSmokeItem(t *testing.T) { assert.Equal(t, 3, item.GetBackgroundNoiseScore()) assert.Equal(t, []string{}, item.GetFalsePositives()) assert.False(t, item.IsFalsePositive()) + assert.Equal(t, []string{"profile:likely_botnet"}, item.GetClassifications()) } func TestEmptySmokeItem(t *testing.T) { @@ -112,4 +119,5 @@ func TestEmptySmokeItem(t *testing.T) { assert.Equal(t, 0, item.GetBackgroundNoiseScore()) assert.Equal(t, []string{}, item.GetFalsePositives()) assert.False(t, item.IsFalsePositive()) + assert.Equal(t, []string{}, item.GetClassifications()) } diff --git a/pkg/cwhub/cwhub.go b/pkg/cwhub/cwhub.go index 683f1853b43..b41d1d16312 100644 --- a/pkg/cwhub/cwhub.go +++ b/pkg/cwhub/cwhub.go @@ -20,14 +20,14 @@ func (t *hubTransport) RoundTrip(req *http.Request) (*http.Response, error) { return t.RoundTripper.RoundTrip(req) } -// hubClient is the HTTP client used to communicate with the CrowdSec Hub. -var hubClient = &http.Client{ +// HubClient is the HTTP client used to communicate with the CrowdSec Hub. +var HubClient = &http.Client{ Timeout: 120 * time.Second, Transport: &hubTransport{http.DefaultTransport}, } -// safePath returns a joined path and ensures that it does not escape the base directory. -func safePath(dir, filePath string) (string, error) { +// SafePath returns a joined path and ensures that it does not escape the base directory. +func SafePath(dir, filePath string) (string, error) { absBaseDir, err := filepath.Abs(filepath.Clean(dir)) if err != nil { return "", err diff --git a/pkg/cwhub/cwhub_test.go b/pkg/cwhub/cwhub_test.go index 17e7a0dc723..befd279ff65 100644 --- a/pkg/cwhub/cwhub_test.go +++ b/pkg/cwhub/cwhub_test.go @@ -29,10 +29,9 @@ const mockURLTemplate = "https://cdn-hub.crowdsec.net/crowdsecurity/%s/%s" var responseByPath map[string]string -// testHub initializes a temporary hub with an empty json file, optionally updating it. -func testHub(t *testing.T, update bool) *Hub { - tmpDir, err := os.MkdirTemp("", "testhub") - require.NoError(t, err) +// testHubOld initializes a temporary hub with an empty json file, optionally updating it. +func testHubOld(t *testing.T, update bool) *Hub { + tmpDir := t.TempDir() local := &csconfig.LocalHubCfg{ HubDir: filepath.Join(tmpDir, "crowdsec", "hub"), @@ -41,7 +40,7 @@ func testHub(t *testing.T, update bool) *Hub { InstallDataDir: filepath.Join(tmpDir, "installed-data"), } - err = os.MkdirAll(local.HubDir, 0o700) + err := os.MkdirAll(local.HubDir, 0o700) require.NoError(t, err) err = os.MkdirAll(local.InstallDir, 0o700) @@ -53,22 +52,17 @@ func testHub(t *testing.T, update bool) *Hub { err = os.WriteFile(local.HubIndexFile, []byte("{}"), 0o644) require.NoError(t, err) - t.Cleanup(func() { - os.RemoveAll(tmpDir) - }) - - remote := &RemoteHubCfg{ - Branch: "master", - URLTemplate: mockURLTemplate, - IndexPath: ".index.json", - } - - hub, err := NewHub(local, remote, log.StandardLogger()) + hub, err := NewHub(local, log.StandardLogger()) require.NoError(t, err) if update { + indexProvider := &Downloader{ + Branch: "master", + URLTemplate: mockURLTemplate, + } + ctx := context.Background() - err := hub.Update(ctx) + err = hub.Update(ctx, indexProvider, false) require.NoError(t, err) } @@ -83,16 +77,16 @@ func envSetup(t *testing.T) *Hub { setResponseByPath() log.SetLevel(log.DebugLevel) - defaultTransport := hubClient.Transport + defaultTransport := HubClient.Transport t.Cleanup(func() { - hubClient.Transport = defaultTransport + HubClient.Transport = defaultTransport }) // Mock the http client - hubClient.Transport = newMockTransport() + HubClient.Transport = newMockTransport() - hub := testHub(t, true) + hub := testHubOld(t, true) return hub } diff --git a/pkg/cwhub/dataset.go b/pkg/cwhub/dataset.go deleted file mode 100644 index 90bc9e057f9..00000000000 --- a/pkg/cwhub/dataset.go +++ /dev/null @@ -1,72 +0,0 @@ -package cwhub - -import ( - "context" - "errors" - "fmt" - "io" - "time" - - "github.com/sirupsen/logrus" - "gopkg.in/yaml.v3" - - "github.com/crowdsecurity/go-cs-lib/downloader" - - "github.com/crowdsecurity/crowdsec/pkg/types" -) - -// The DataSet is a list of data sources required by an item (built from the data: section in the yaml). -type DataSet struct { - Data []types.DataSource `yaml:"data,omitempty"` -} - -// downloadDataSet downloads all the data files for an item. -func downloadDataSet(ctx context.Context, dataFolder string, force bool, reader io.Reader, logger *logrus.Logger) error { - dec := yaml.NewDecoder(reader) - - for { - data := &DataSet{} - - if err := dec.Decode(data); err != nil { - if errors.Is(err, io.EOF) { - break - } - - return fmt.Errorf("while reading file: %w", err) - } - - for _, dataS := range data.Data { - destPath, err := safePath(dataFolder, dataS.DestPath) - if err != nil { - return err - } - - d := downloader. - New(). - WithHTTPClient(hubClient). - ToFile(destPath). - CompareContent(). - WithLogger(logrus.WithField("url", dataS.SourceURL)) - - if !force { - d = d.WithLastModified(). - WithShelfLife(7 * 24 * time.Hour) - } - - downloaded, err := d.Download(ctx, dataS.SourceURL) - if err != nil { - return fmt.Errorf("while getting data: %w", err) - } - - if downloaded { - logger.Infof("Downloaded %s", destPath) - // a check on stdout is used while scripting to know if the hub has been upgraded - // and a configuration reload is required - // TODO: use a better way to communicate this - fmt.Printf("updated %s\n", destPath) - } - } - } - - return nil -} diff --git a/pkg/cwhub/doc.go b/pkg/cwhub/doc.go index f86b95c6454..fb7209b77ae 100644 --- a/pkg/cwhub/doc.go +++ b/pkg/cwhub/doc.go @@ -1,4 +1,5 @@ -// Package cwhub is responsible for installing and upgrading the local hub files for CrowdSec. +// Package cwhub is responsible for providing the state of the local hub to the security engine and cscli command. +// Installation, upgrade and removal of items or data files has been moved to pkg/hubops. // // # Definitions // @@ -84,31 +85,11 @@ // return fmt.Errorf("collection not found") // } // -// You can also install items if they have already been downloaded: +// Some commands require an object to provide the hub index, or contents: // -// // install a parser -// force := false -// downloadOnly := false -// err := parser.Install(force, downloadOnly) -// if err != nil { -// return fmt.Errorf("unable to install parser: %w", err) -// } -// -// As soon as you try to install an item that is not downloaded or is not up-to-date (meaning its computed hash -// does not correspond to the latest version available in the index), a download will be attempted and you'll -// get the error "remote hub configuration is not provided". -// -// To provide the remote hub configuration, use the second parameter of NewHub(): -// -// remoteHub := cwhub.RemoteHubCfg{ +// indexProvider := cwhub.Downloader{ // URLTemplate: "https://cdn-hub.crowdsec.net/crowdsecurity/%s/%s", // Branch: "master", -// IndexPath: ".index.json", -// } -// -// hub, err := cwhub.NewHub(localHub, remoteHub, logger) -// if err != nil { -// return fmt.Errorf("unable to initialize hub: %w", err) // } // // The URLTemplate is a string that will be used to build the URL of the remote hub. It must contain two @@ -116,7 +97,7 @@ // // Before calling hub.Load(), you can update the index file by calling the Update() method: // -// err := hub.Update(context.Background()) +// err := hub.Update(context.Background(), indexProvider) // if err != nil { // return fmt.Errorf("unable to update hub index: %w", err) // } diff --git a/pkg/cwhub/download.go b/pkg/cwhub/download.go new file mode 100644 index 00000000000..fa92e9960de --- /dev/null +++ b/pkg/cwhub/download.go @@ -0,0 +1,126 @@ +package cwhub + +import ( + "context" + "errors" + "fmt" + "net/http" + "net/url" + + "github.com/sirupsen/logrus" + + "github.com/crowdsecurity/go-cs-lib/downloader" +) + +// no need to import the lib package to use this +type NotFoundError = downloader.NotFoundError + +// Downloader is used to retrieve index and items from a remote hub, with cache control. +type Downloader struct { + Branch string + URLTemplate string +} + +// IndexProvider retrieves and writes .index.json +type IndexProvider interface { + FetchIndex(ctx context.Context, indexFile string, withContent bool, logger *logrus.Logger) (bool, error) +} + +// ContentProvider retrieves and writes the YAML files with the item content. +type ContentProvider interface { + FetchContent(ctx context.Context, remotePath, destPath, wantHash string, logger *logrus.Logger) (bool, string, error) +} + +// urlTo builds the URL to download a file from the remote hub. +func (d *Downloader) urlTo(remotePath string) (string, error) { + // the template must contain two string placeholders + if fmt.Sprintf(d.URLTemplate, "%s", "%s") != d.URLTemplate { + return "", fmt.Errorf("invalid URL template '%s'", d.URLTemplate) + } + + return fmt.Sprintf(d.URLTemplate, d.Branch, remotePath), nil +} + +// addURLParam adds a parameter with a value (ex. "with_content=true") to the URL if it's not already present. +func addURLParam(rawURL string, param string, value string) (string, error) { + parsedURL, err := url.Parse(rawURL) + if err != nil { + return "", fmt.Errorf("failed to parse URL: %w", err) + } + + query := parsedURL.Query() + + if _, exists := query[param]; !exists { + query.Add(param, value) + } + + parsedURL.RawQuery = query.Encode() + + return parsedURL.String(), nil +} + +// FetchIndex downloads the index from the hub and writes it to the filesystem. +// It uses a temporary file to avoid partial downloads, and won't overwrite the original +// if it has not changed. +func (d *Downloader) FetchIndex(ctx context.Context, destPath string, withContent bool, logger *logrus.Logger) (bool, error) { + url, err := d.urlTo(".index.json") + if err != nil { + return false, fmt.Errorf("failed to build hub index request: %w", err) + } + + if withContent { + url, err = addURLParam(url, "with_content", "true") + if err != nil { + return false, fmt.Errorf("failed to add 'with_content' parameter to URL: %w", err) + } + } + + downloaded, err := downloader. + New(). + WithHTTPClient(HubClient). + ToFile(destPath). + WithETagFn(downloader.SHA256). + CompareContent(). + WithLogger(logger.WithField("url", url)). + BeforeRequest(func(_ *http.Request) { + fmt.Println("Downloading " + destPath) + }). + Download(ctx, url) + if err != nil { + return false, err + } + + return downloaded, nil +} + +// FetchContent downloads the content to the specified path, through a temporary file +// to avoid partial downloads. +// If the hash does not match, it will not overwrite and log a warning. +func (d *Downloader) FetchContent(ctx context.Context, remotePath, destPath, wantHash string, logger *logrus.Logger) (bool, string, error) { + url, err := d.urlTo(remotePath) + if err != nil { + return false, "", fmt.Errorf("failed to build request: %w", err) + } + + downloaded, err := downloader. + New(). + WithHTTPClient(HubClient). + ToFile(destPath). + WithETagFn(downloader.SHA256). + WithMakeDirs(true). + WithLogger(logger.WithField("url", url)). + CompareContent(). + VerifyHash("sha256", wantHash). + Download(ctx, url) + + var hasherr downloader.HashMismatchError + + switch { + case errors.As(err, &hasherr): + logger.Warnf("%s. The index file is outdated, please run 'cscli hub update' and try again", err.Error()) + case err != nil: + return false, "", err + } + + return downloaded, url, nil +} diff --git a/pkg/cwhub/download_test.go b/pkg/cwhub/download_test.go new file mode 100644 index 00000000000..7b0b99c28d8 --- /dev/null +++ b/pkg/cwhub/download_test.go @@ -0,0 +1,182 @@ +package cwhub + +import ( + "context" + "io" + "net/http" + "net/http/httptest" + "os" + "path/filepath" + "testing" + + "github.com/sirupsen/logrus" + logtest "github.com/sirupsen/logrus/hooks/test" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/crowdsecurity/go-cs-lib/cstest" +) + +func TestFetchIndex(t *testing.T) { + ctx := context.Background() + + mockServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if r.URL.Path != "/main/.index.json" { + w.WriteHeader(http.StatusNotFound) + } + + if r.URL.Query().Get("with_content") == "true" { + _, err := w.Write([]byte(`Hi I'm an index with content`)) + assert.NoError(t, err) + } else { + _, err := w.Write([]byte(`Hi I'm a minified index`)) + assert.NoError(t, err) + } + })) + defer mockServer.Close() + + discard := logrus.New() + discard.Out = io.Discard + + downloader := &Downloader{ + URLTemplate: mockServer.URL + "/%s/%s", + } + + destPath := filepath.Join(t.TempDir(), "index-here") + withContent := true + + var notFoundError NotFoundError + + // bad branch + + downloader.Branch = "dev" + + downloaded, err := downloader.FetchIndex(ctx, destPath, withContent, discard) + require.ErrorAs(t, err, ¬FoundError) + assert.False(t, downloaded) + + // ok + + downloader.Branch = "main" + + downloaded, err = downloader.FetchIndex(ctx, destPath, withContent, discard) + require.NoError(t, err) + assert.True(t, downloaded) + + content, err := os.ReadFile(destPath) + require.NoError(t, err) + assert.Equal(t, "Hi I'm an index with content", string(content)) + + // not "downloading" a second time + // since we don't have cache control in the mockServer, + // the file is downloaded to a temporary location but not replaced + + downloaded, err = downloader.FetchIndex(ctx, destPath, withContent, discard) + require.NoError(t, err) + assert.False(t, downloaded) + + // download without item content + + downloaded, err = downloader.FetchIndex(ctx, destPath, !withContent, discard) + require.NoError(t, err) + assert.True(t, downloaded) + + content, err = os.ReadFile(destPath) + require.NoError(t, err) + assert.Equal(t, "Hi I'm a minified index", string(content)) + + // bad domain name + + downloader.URLTemplate = "x/%s/%s" + downloaded, err = downloader.FetchIndex(ctx, destPath, !withContent, discard) + cstest.AssertErrorContains(t, err, `Get "x/main/.index.json": unsupported protocol scheme ""`) + assert.False(t, downloaded) + + downloader.URLTemplate = "http://x/%s/%s" + downloaded, err = downloader.FetchIndex(ctx, destPath, !withContent, discard) + // can be no such host, server misbehaving, etc + cstest.AssertErrorContains(t, err, `Get "http://x/main/.index.json": dial tcp: lookup x`) + assert.False(t, downloaded) +} + +func TestFetchContent(t *testing.T) { + ctx := context.Background() + + wantContent := "{'description':'linux'}" + wantHash := "e557cb9e1cb051bc3b6a695e4396c5f8e0eff4b7b0d2cc09f7684e1d52ea2224" + remotePath := "collections/crowdsecurity/linux.yaml" + + mockServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if r.URL.Path != "/main/"+remotePath { + w.WriteHeader(http.StatusNotFound) + } + + _, err := w.Write([]byte(wantContent)) + assert.NoError(t, err) + })) + defer mockServer.Close() + + wantURL := mockServer.URL + "/main/collections/crowdsecurity/linux.yaml" + + // bad branch + + hubDownloader := &Downloader{ + URLTemplate: mockServer.URL + "/%s/%s", + } + + discard := logrus.New() + discard.Out = io.Discard + + destPath := filepath.Join(t.TempDir(), "content-here") + + var notFoundError NotFoundError + + // bad branch + + hubDownloader.Branch = "dev" + + downloaded, url, err := hubDownloader.FetchContent(ctx, remotePath, destPath, wantHash, discard) + assert.Empty(t, url) + require.ErrorAs(t, err, ¬FoundError) + assert.False(t, downloaded) + + // bad path + + hubDownloader.Branch = "main" + + downloaded, url, err = hubDownloader.FetchContent(ctx, "collections/linux.yaml", destPath, wantHash, discard) + assert.Empty(t, url) + require.ErrorAs(t, err, ¬FoundError) + assert.False(t, downloaded) + + // hash mismatch: the file is not reported as downloaded because it's not replaced + + capture, hook := logtest.NewNullLogger() + capture.SetLevel(logrus.WarnLevel) + + downloaded, url, err = hubDownloader.FetchContent(ctx, remotePath, destPath, "1234", capture) + assert.Equal(t, wantURL, url) + require.NoError(t, err) + assert.False(t, downloaded) + cstest.RequireLogContains(t, hook, "hash mismatch: expected 1234, got "+wantHash) + + // ok + + downloaded, url, err = hubDownloader.FetchContent(ctx, remotePath, destPath, wantHash, discard) + assert.Equal(t, wantURL, url) + require.NoError(t, err) + assert.True(t, downloaded) + + content, err := os.ReadFile(destPath) + require.NoError(t, err) + assert.Equal(t, wantContent, string(content)) + + // not "downloading" a second time + // since we don't have cache control in the mockServer, + // the file is downloaded to a temporary location but not replaced + + downloaded, url, err = hubDownloader.FetchContent(ctx, remotePath, destPath, wantHash, discard) + assert.Equal(t, wantURL, url) + require.NoError(t, err) + assert.False(t, downloaded) +} diff --git a/pkg/cwhub/errors.go b/pkg/cwhub/errors.go deleted file mode 100644 index b0be444fcba..00000000000 --- a/pkg/cwhub/errors.go +++ /dev/null @@ -1,19 +0,0 @@ -package cwhub - -import ( - "errors" - "fmt" -) - -// ErrNilRemoteHub is returned when trying to download with a local-only configuration. -var ErrNilRemoteHub = errors.New("remote hub configuration is not provided. Please report this issue to the developers") - -// IndexNotFoundError is returned when the remote hub index is not found. -type IndexNotFoundError struct { - URL string - Branch string -} - -func (e IndexNotFoundError) Error() string { - return fmt.Sprintf("index not found at %s, branch '%s'. Please check the .cscli.hub_branch value if you specified it in config.yaml, or use 'master' if not sure", e.URL, e.Branch) -} diff --git a/pkg/cwhub/fetch.go b/pkg/cwhub/fetch.go new file mode 100644 index 00000000000..e8dacad4a6d --- /dev/null +++ b/pkg/cwhub/fetch.go @@ -0,0 +1,70 @@ +package cwhub + +import ( + "context" + "crypto" + "encoding/base64" + "encoding/hex" + "fmt" + "os" + "path/filepath" +) + +// writeEmbeddedContentTo writes the embedded content to the specified path and checks the hash. +// If the content is base64 encoded, it will be decoded before writing. Call this method only +// if item.Content if not empty. +func (i *Item) writeEmbeddedContentTo(destPath, wantHash string) error { + if i.Content == "" { + return fmt.Errorf("no embedded content for %s", i.Name) + } + + content, err := base64.StdEncoding.DecodeString(i.Content) + if err != nil { + content = []byte(i.Content) + } + + dir := filepath.Dir(destPath) + + if err := os.MkdirAll(dir, 0o755); err != nil { + return fmt.Errorf("while creating %s: %w", dir, err) + } + + // check sha256 + hash := crypto.SHA256.New() + if _, err := hash.Write(content); err != nil { + return fmt.Errorf("while hashing %s: %w", i.Name, err) + } + + gotHash := hex.EncodeToString(hash.Sum(nil)) + if gotHash != wantHash { + return fmt.Errorf("hash mismatch: expected %s, got %s. The index file is invalid, please run 'cscli hub update' and try again", wantHash, gotHash) + } + + if err := os.WriteFile(destPath, content, 0o600); err != nil { + return fmt.Errorf("while writing %s: %w", destPath, err) + } + + return nil +} + +// FetchContentTo writes the last version of the item's YAML file to the specified path. +// If the file is embedded in the index file, it will be written directly without downloads. +// Returns whether the file was downloaded (to inform if the security engine needs reloading) +// and the remote url for feedback purposes. +func (i *Item) FetchContentTo(ctx context.Context, contentProvider ContentProvider, destPath string) (bool, string, error) { + wantHash := i.latestHash() + if wantHash == "" { + return false, "", fmt.Errorf("%s: latest hash missing from index. The index file is invalid, please run 'cscli hub update' and try again", i.FQName()) + } + + // Use the embedded content if available + if i.Content != "" { + if err := i.writeEmbeddedContentTo(destPath, wantHash); err != nil { + return false, "", err + } + + return true, fmt.Sprintf("(embedded in %s)", i.hub.local.HubIndexFile), nil + } + + return contentProvider.FetchContent(ctx, i.RemotePath, destPath, wantHash, i.hub.logger) +} diff --git a/pkg/cwhub/hub.go b/pkg/cwhub/hub.go index f74a794a512..aeccb3268f7 100644 --- a/pkg/cwhub/hub.go +++ b/pkg/cwhub/hub.go @@ -22,7 +22,6 @@ type Hub struct { items HubItems // Items read from HubDir and InstallDir pathIndex map[string]*Item local *csconfig.LocalHubCfg - remote *RemoteHubCfg logger *logrus.Logger Warnings []string // Warnings encountered during sync } @@ -35,10 +34,9 @@ func (h *Hub) GetDataDir() string { // NewHub returns a new Hub instance with local and (optionally) remote configuration. // The hub is not synced automatically. Load() must be called to read the index, sync the local state, // and check for unmanaged items. -// All download operations (including updateIndex) return ErrNilRemoteHub if the remote configuration is not set. -func NewHub(local *csconfig.LocalHubCfg, remote *RemoteHubCfg, logger *logrus.Logger) (*Hub, error) { +func NewHub(local *csconfig.LocalHubCfg, logger *logrus.Logger) (*Hub, error) { if local == nil { - return nil, errors.New("no hub configuration found") + return nil, errors.New("no hub configuration provided") } if logger == nil { @@ -48,7 +46,6 @@ func NewHub(local *csconfig.LocalHubCfg, remote *RemoteHubCfg, logger *logrus.Lo hub := &Hub{ local: local, - remote: remote, logger: logger, pathIndex: make(map[string]*Item, 0), } @@ -61,14 +58,10 @@ func (h *Hub) Load() error { h.logger.Debugf("loading hub idx %s", h.local.HubIndexFile) if err := h.parseIndex(); err != nil { - return fmt.Errorf("failed to load hub index: %w", err) + return fmt.Errorf("invalid hub index: %w. Run 'sudo cscli hub update' to download the index again", err) } - if err := h.localSync(); err != nil { - return fmt.Errorf("failed to sync hub items: %w", err) - } - - return nil + return h.localSync() } // parseIndex takes the content of an index file and fills the map of associated parsers/scenarios/collections. @@ -82,21 +75,25 @@ func (h *Hub) parseIndex() error { return fmt.Errorf("failed to parse index: %w", err) } - h.logger.Debugf("%d item types in hub index", len(ItemTypes)) - // Iterate over the different types to complete the struct for _, itemType := range ItemTypes { - h.logger.Tracef("%s: %d items", itemType, len(h.GetItemMap(itemType))) - for name, item := range h.GetItemMap(itemType) { - item.hub = h - item.Name = name + if item == nil { + // likely defined as empty object or null in the index file + return fmt.Errorf("%s:%s has no index metadata", itemType, name) + } - // if the item has no (redundant) author, take it from the json key - if item.Author == "" && strings.Contains(name, "/") { - item.Author = strings.Split(name, "/")[0] + if item.RemotePath == "" { + return fmt.Errorf("%s:%s has no download path", itemType, name) } + if (itemType == PARSERS || itemType == POSTOVERFLOWS) && item.Stage == "" { + return fmt.Errorf("%s:%s has no stage", itemType, name) + } + + item.hub = h + item.Name = name + item.Type = itemType item.FileName = path.Base(item.RemotePath) @@ -152,28 +149,29 @@ func (h *Hub) ItemStats() []string { return ret } -// Update downloads the latest version of the index and writes it to disk if it changed. It cannot be called after Load() -// unless the hub is completely empty. -func (h *Hub) Update(ctx context.Context) error { - if len(h.pathIndex) > 0 { +var ErrUpdateAfterSync = errors.New("cannot update hub index after load/sync") + +// Update downloads the latest version of the index and writes it to disk if it changed. +// It cannot be called after Load() unless the index was completely empty. +func (h *Hub) Update(ctx context.Context, indexProvider IndexProvider, withContent bool) error { + if len(h.items) > 0 { // if this happens, it's a bug. - return errors.New("cannot update hub after items have been loaded") + return ErrUpdateAfterSync } - downloaded, err := h.remote.fetchIndex(ctx, h.local.HubIndexFile) + downloaded, err := indexProvider.FetchIndex(ctx, h.local.HubIndexFile, withContent, h.logger) if err != nil { return err } - if downloaded { - h.logger.Infof("Wrote index to %s", h.local.HubIndexFile) - } else { - h.logger.Info("hub index is up to date") + if !downloaded { + fmt.Println("Nothing to do, the hub index is up to date.") } return nil } +// addItem adds an item to the hub. It silently replaces an existing item with the same type and name. func (h *Hub) addItem(item *Item) { if h.items[item.Type] == nil { h.items[item.Type] = make(map[string]*Item) @@ -236,6 +234,7 @@ func (h *Hub) GetItemsByType(itemType string, sorted bool) []*Item { } idx := 0 + for _, item := range items { ret[idx] = item idx += 1 @@ -267,6 +266,7 @@ func (h *Hub) GetInstalledListForAPI() []string { ret := make([]string, len(scenarios)+len(appsecRules)) idx := 0 + for _, item := range scenarios { ret[idx] = item.Name idx += 1 diff --git a/pkg/cwhub/hub_test.go b/pkg/cwhub/hub_test.go index 1c2c9ccceca..461b59de78b 100644 --- a/pkg/cwhub/hub_test.go +++ b/pkg/cwhub/hub_test.go @@ -2,90 +2,261 @@ package cwhub import ( "context" - "fmt" + "net/http" + "net/http/httptest" "os" + "path/filepath" "testing" + "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "github.com/crowdsecurity/go-cs-lib/cstest" + + "github.com/crowdsecurity/crowdsec/pkg/csconfig" ) -func TestInitHubUpdate(t *testing.T) { - hub := envSetup(t) - remote := &RemoteHubCfg{ - URLTemplate: mockURLTemplate, - Branch: "master", - IndexPath: ".index.json", +// testHubCfg creates an empty hub structure in a temporary directory +// and returns its configuration object. +// +// This allow the reuse of the hub content for multiple instances +// of the Hub object. +func testHubCfg(t *testing.T) *csconfig.LocalHubCfg { + tempDir := t.TempDir() + + local := csconfig.LocalHubCfg{ + HubDir: filepath.Join(tempDir, "crowdsec", "hub"), + HubIndexFile: filepath.Join(tempDir, "crowdsec", "hub", ".index.json"), + InstallDir: filepath.Join(tempDir, "crowdsec"), + InstallDataDir: filepath.Join(tempDir, "installed-data"), } - _, err := NewHub(hub.local, remote, nil) + err := os.MkdirAll(local.HubDir, 0o755) require.NoError(t, err) - ctx := context.Background() + err = os.MkdirAll(local.InstallDir, 0o755) + require.NoError(t, err) - err = hub.Update(ctx) + err = os.MkdirAll(local.InstallDataDir, 0o755) require.NoError(t, err) + return &local +} + +func testHub(t *testing.T, localCfg *csconfig.LocalHubCfg, indexJson string) (*Hub, error) { + if localCfg == nil { + localCfg = testHubCfg(t) + } + + err := os.WriteFile(localCfg.HubIndexFile, []byte(indexJson), 0o644) + require.NoError(t, err) + + hub, err := NewHub(localCfg, nil) + require.NoError(t, err) err = hub.Load() + + return hub, err +} + +func TestIndexEmpty(t *testing.T) { + // an empty hub is valid, and should not have warnings + hub, err := testHub(t, nil, "{}") require.NoError(t, err) + assert.Empty(t, hub.Warnings) } -func TestUpdateIndex(t *testing.T) { - // bad url template - fmt.Println("Test 'bad URL'") +func TestIndexJSON(t *testing.T) { + // but it can't be an empty string + hub, err := testHub(t, nil, "") + cstest.RequireErrorContains(t, err, "invalid hub index: failed to parse index: unexpected end of JSON input") + assert.Empty(t, hub.Warnings) + + // it must be valid json + hub, err = testHub(t, nil, "def not json") + cstest.RequireErrorContains(t, err, "invalid hub index: failed to parse index: invalid character 'd' looking for beginning of value. Run 'sudo cscli hub update' to download the index again") + assert.Empty(t, hub.Warnings) + + hub, err = testHub(t, nil, "{") + cstest.RequireErrorContains(t, err, "invalid hub index: failed to parse index: unexpected end of JSON input") + assert.Empty(t, hub.Warnings) - tmpIndex, err := os.CreateTemp("", "index.json") + // and by json we mean an object + hub, err = testHub(t, nil, "[]") + cstest.RequireErrorContains(t, err, "invalid hub index: failed to parse index: json: cannot unmarshal array into Go value of type cwhub.HubItems") + assert.Empty(t, hub.Warnings) +} + +func TestIndexUnknownItemType(t *testing.T) { + // Allow unknown fields in the top level object, likely new item types + hub, err := testHub(t, nil, `{"goodies": {}}`) require.NoError(t, err) + assert.Empty(t, hub.Warnings) +} - // close the file to avoid preventing the rename on windows - err = tmpIndex.Close() +func TestHubUpdate(t *testing.T) { + // update an empty hub with a index containing a parser. + hub, err := testHub(t, nil, "{}") require.NoError(t, err) - t.Cleanup(func() { - os.Remove(tmpIndex.Name()) - }) + index1 := ` +{ + "parsers": { + "author/pars1": { + "path": "parsers/s01-parse/pars1.yaml", + "stage": "s01-parse", + "version": "0.0", + "versions": { + "0.0": { + "digest": "44136fa355b3678a1146ad16f7e8649e94fb4fc21fe77e8310c060f61caaff8a" + } + }, + "content": "{}" + } + } +}` - hub := envSetup(t) + mockServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if r.URL.Path != "/main/.index.json" { + w.WriteHeader(http.StatusNotFound) + } - hub.remote = &RemoteHubCfg{ - URLTemplate: "x", - Branch: "", - IndexPath: "", + _, err = w.Write([]byte(index1)) + assert.NoError(t, err) + })) + defer mockServer.Close() + + ctx := context.Background() + + downloader := &Downloader{ + Branch: "main", + URLTemplate: mockServer.URL + "/%s/%s", } - hub.local.HubIndexFile = tmpIndex.Name() + err = hub.Update(ctx, downloader, true) + require.NoError(t, err) - ctx := context.Background() + err = hub.Load() + require.NoError(t, err) + + item := hub.GetItem("parsers", "author/pars1") + assert.NotEmpty(t, item) + assert.Equal(t, "author/pars1", item.Name) +} - err = hub.Update(ctx) - cstest.RequireErrorContains(t, err, "failed to build hub index request: invalid URL template 'x'") +func TestHubUpdateInvalidTemplate(t *testing.T) { + hub, err := testHub(t, nil, "{}") + require.NoError(t, err) - // bad domain - fmt.Println("Test 'bad domain'") + ctx := context.Background() - hub.remote = &RemoteHubCfg{ - URLTemplate: "https://baddomain/crowdsecurity/%s/%s", - Branch: "master", - IndexPath: ".index.json", + downloader := &Downloader{ + Branch: "main", + URLTemplate: "x", } - err = hub.Update(ctx) + err = hub.Update(ctx, downloader, true) + cstest.RequireErrorMessage(t, err, "failed to build hub index request: invalid URL template 'x'") +} + +func TestHubUpdateCannotWrite(t *testing.T) { + hub, err := testHub(t, nil, "{}") require.NoError(t, err) - // XXX: this is not failing - // cstest.RequireErrorContains(t, err, "failed http request for hub index: Get") - // bad target path - fmt.Println("Test 'bad target path'") + index1 := ` +{ + "parsers": { + "author/pars1": { + "path": "parsers/s01-parse/pars1.yaml", + "stage": "s01-parse", + "version": "0.0", + "versions": { + "0.0": { + "digest": "44136fa355b3678a1146ad16f7e8649e94fb4fc21fe77e8310c060f61caaff8a" + } + }, + "content": "{}" + } + } +}` + + mockServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if r.URL.Path != "/main/.index.json" { + w.WriteHeader(http.StatusNotFound) + } + + _, err = w.Write([]byte(index1)) + assert.NoError(t, err) + })) + defer mockServer.Close() + + ctx := context.Background() - hub.remote = &RemoteHubCfg{ - URLTemplate: mockURLTemplate, - Branch: "master", - IndexPath: ".index.json", + downloader := &Downloader{ + Branch: "main", + URLTemplate: mockServer.URL + "/%s/%s", } - hub.local.HubIndexFile = "/does/not/exist/index.json" + hub.local.HubIndexFile = "/proc/foo/bar/baz/.index.json" + + err = hub.Update(ctx, downloader, true) + cstest.RequireErrorContains(t, err, "failed to create temporary download file for /proc/foo/bar/baz/.index.json") +} + +func TestHubUpdateAfterLoad(t *testing.T) { + // Update() can't be called after Load() if the hub is not completely empty. + index1 := ` +{ + "parsers": { + "author/pars1": { + "path": "parsers/s01-parse/pars1.yaml", + "stage": "s01-parse", + "version": "0.0", + "versions": { + "0.0": { + "digest": "44136fa355b3678a1146ad16f7e8649e94fb4fc21fe77e8310c060f61caaff8a" + } + }, + "content": "{}" + } + } +}` + hub, err := testHub(t, nil, index1) + require.NoError(t, err) + + index2 := ` +{ + "parsers": { + "author/pars2": { + "path": "parsers/s01-parse/pars2.yaml", + "stage": "s01-parse", + "version": "0.0", + "versions": { + "0.0": { + "digest": "44136fa355b3678a1146ad16f7e8649e94fb4fc21fe77e8310c060f61caaff8a" + } + }, + "content": "{}" + } + } +}` + + mockServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if r.URL.Path != "/main/.index.json" { + w.WriteHeader(http.StatusNotFound) + } + + _, err = w.Write([]byte(index2)) + assert.NoError(t, err) + })) + defer mockServer.Close() + + ctx := context.Background() + + downloader := &Downloader{ + Branch: "main", + URLTemplate: mockServer.URL + "/%s/%s", + } - err = hub.Update(ctx) - cstest.RequireErrorContains(t, err, "failed to create temporary download file for /does/not/exist/index.json:") + err = hub.Update(ctx, downloader, true) + require.ErrorIs(t, err, ErrUpdateAfterSync) } diff --git a/pkg/cwhub/item.go b/pkg/cwhub/item.go index 32d1acf94ff..38385d9399d 100644 --- a/pkg/cwhub/item.go +++ b/pkg/cwhub/item.go @@ -2,13 +2,15 @@ package cwhub import ( "encoding/json" + "errors" "fmt" + "io/fs" + "os" "path/filepath" "slices" "github.com/Masterminds/semver/v3" - - "github.com/crowdsecurity/crowdsec/pkg/emoji" + yaml "gopkg.in/yaml.v3" ) const ( @@ -42,59 +44,49 @@ type ItemVersion struct { Deprecated bool `json:"deprecated,omitempty" yaml:"deprecated,omitempty"` } -// ItemState is used to keep the local state (i.e. at runtime) of an item. -// This data is not stored in the index, but is displayed with "cscli ... inspect". -type ItemState struct { - LocalPath string `json:"local_path,omitempty" yaml:"local_path,omitempty"` - LocalVersion string `json:"local_version,omitempty" yaml:"local_version,omitempty"` - LocalHash string `json:"local_hash,omitempty" yaml:"local_hash,omitempty"` - Installed bool `json:"installed"` - Downloaded bool `json:"downloaded"` - UpToDate bool `json:"up_to_date"` - Tainted bool `json:"tainted"` - TaintedBy []string `json:"tainted_by,omitempty" yaml:"tainted_by,omitempty"` - BelongsToCollections []string `json:"belongs_to_collections,omitempty" yaml:"belongs_to_collections,omitempty"` +type Dependencies struct { + Parsers []string `json:"parsers,omitempty" yaml:"parsers,omitempty"` + PostOverflows []string `json:"postoverflows,omitempty" yaml:"postoverflows,omitempty"` + Scenarios []string `json:"scenarios,omitempty" yaml:"scenarios,omitempty"` + Collections []string `json:"collections,omitempty" yaml:"collections,omitempty"` + Contexts []string `json:"contexts,omitempty" yaml:"contexts,omitempty"` + AppsecConfigs []string `json:"appsec-configs,omitempty" yaml:"appsec-configs,omitempty"` + AppsecRules []string `json:"appsec-rules,omitempty" yaml:"appsec-rules,omitempty"` } -// IsLocal returns true if the item has been create by a user (not downloaded from the hub). -func (s *ItemState) IsLocal() bool { - return s.Installed && !s.Downloaded +// a group of items of the same type +type itemgroup struct { + typeName string + itemNames []string } -// Text returns the status of the item as a string (eg. "enabled,update-available"). -func (s *ItemState) Text() string { - ret := "disabled" - - if s.Installed { - ret = "enabled" +func (d Dependencies) byType() []itemgroup { + return []itemgroup{ + {PARSERS, d.Parsers}, + {POSTOVERFLOWS, d.PostOverflows}, + {SCENARIOS, d.Scenarios}, + {CONTEXTS, d.Contexts}, + {APPSEC_CONFIGS, d.AppsecConfigs}, + {APPSEC_RULES, d.AppsecRules}, + {COLLECTIONS, d.Collections}, } - - if s.IsLocal() { - ret += ",local" - } - - if s.Tainted { - ret += ",tainted" - } else if !s.UpToDate && !s.IsLocal() { - ret += ",update-available" - } - - return ret } -// Emoji returns the status of the item as an emoji (eg. emoji.Warning). -func (s *ItemState) Emoji() string { - switch { - case s.IsLocal(): - return emoji.House - case !s.Installed: - return emoji.Prohibited - case s.Tainted || (!s.UpToDate && !s.IsLocal()): - return emoji.Warning - case s.Installed: - return emoji.CheckMark - default: - return emoji.QuestionMark +// SubItems iterates over the sub-items in the struct, excluding the ones that were not found in the hub. +func (d Dependencies) SubItems(hub *Hub) func(func(*Item) bool) { + return func(yield func(*Item) bool) { + for _, typeGroup := range d.byType() { + for _, name := range typeGroup.itemNames { + s := hub.GetItem(typeGroup.typeName, name) + if s == nil { + continue + } + + if !yield(s) { + return + } + } + } } } @@ -104,46 +96,40 @@ type Item struct { State ItemState `json:"-" yaml:"-"` // local state, not stored in the index - Type string `json:"type,omitempty" yaml:"type,omitempty"` // one of the ItemTypes - Stage string `json:"stage,omitempty" yaml:"stage,omitempty"` // Stage for parser|postoverflow: s00-raw/s01-... - Name string `json:"name,omitempty" yaml:"name,omitempty"` // usually "author/name" - FileName string `json:"file_name,omitempty" yaml:"file_name,omitempty"` // eg. apache2-logs.yaml + Type string `json:"type,omitempty" yaml:"type,omitempty"` + Stage string `json:"stage,omitempty" yaml:"stage,omitempty"` // Stage for parser|postoverflow: s00-raw/s01-... + Name string `json:"name,omitempty" yaml:"name,omitempty"` // usually "author/name" + FileName string `json:"file_name,omitempty" yaml:"file_name,omitempty"` // eg. apache2-logs.yaml Description string `json:"description,omitempty" yaml:"description,omitempty"` - Content string `json:"content,omitempty" yaml:"-"` - Author string `json:"author,omitempty" yaml:"author,omitempty"` - References []string `json:"references,omitempty" yaml:"references,omitempty"` + Content string `json:"content,omitempty" yaml:"-"` + References []string `json:"references,omitempty" yaml:"references,omitempty"` + // NOTE: RemotePath could be derived from the other fields RemotePath string `json:"path,omitempty" yaml:"path,omitempty"` // path relative to the base URL eg. /parsers/stage/author/file.yaml Version string `json:"version,omitempty" yaml:"version,omitempty"` // the last available version Versions map[string]ItemVersion `json:"versions,omitempty" yaml:"-"` // all the known versions - // if it's a collection, it can have sub items - Parsers []string `json:"parsers,omitempty" yaml:"parsers,omitempty"` - PostOverflows []string `json:"postoverflows,omitempty" yaml:"postoverflows,omitempty"` - Scenarios []string `json:"scenarios,omitempty" yaml:"scenarios,omitempty"` - Collections []string `json:"collections,omitempty" yaml:"collections,omitempty"` - Contexts []string `json:"contexts,omitempty" yaml:"contexts,omitempty"` - AppsecConfigs []string `json:"appsec-configs,omitempty" yaml:"appsec-configs,omitempty"` - AppsecRules []string `json:"appsec-rules,omitempty" yaml:"appsec-rules,omitempty"` + // The index contains the dependencies of the "latest" version (collections only) + Dependencies } -// installPath returns the location of the symlink to the item in the hub, or the path of the item itself if it's local +// InstallPath returns the location of the symlink to the item in the hub, or the path of the item itself if it's local // (eg. /etc/crowdsec/collections/xyz.yaml). // Raises an error if the path goes outside of the install dir. -func (i *Item) installPath() (string, error) { +func (i *Item) InstallPath() (string, error) { p := i.Type if i.Stage != "" { p = filepath.Join(p, i.Stage) } - return safePath(i.hub.local.InstallDir, filepath.Join(p, i.FileName)) + return SafePath(i.hub.local.InstallDir, filepath.Join(p, i.FileName)) } -// downloadPath returns the location of the actual config file in the hub +// DownloadPath returns the location of the actual config file in the hub // (eg. /etc/crowdsec/hub/collections/author/xyz.yaml). // Raises an error if the path goes outside of the hub dir. -func (i *Item) downloadPath() (string, error) { - ret, err := safePath(i.hub.local.HubDir, i.RemotePath) +func (i *Item) DownloadPath() (string, error) { + ret, err := SafePath(i.hub.local.HubDir, i.RemotePath) if err != nil { return "", err } @@ -203,141 +189,128 @@ func (i Item) MarshalYAML() (interface{}, error) { }, nil } -// SubItems returns a slice of sub-items, excluding the ones that were not found. -func (i *Item) SubItems() []*Item { - sub := make([]*Item, 0) - - for _, name := range i.Parsers { - s := i.hub.GetItem(PARSERS, name) - if s == nil { - continue - } +// LatestDependencies returns a slice of sub-items of the "latest" available version of the item, as opposed to the version that is actually installed. The information comes from the index. +func (i *Item) LatestDependencies() Dependencies { + return i.Dependencies +} - sub = append(sub, s) +// CurrentSubItems returns a slice of sub-items of the installed version, excluding the ones that were not found. +// The list comes from the content file if parseable, otherwise from the index (same as LatestDependencies). +func (i *Item) CurrentDependencies() Dependencies { + if !i.HasSubItems() { + return Dependencies{} } - for _, name := range i.PostOverflows { - s := i.hub.GetItem(POSTOVERFLOWS, name) - if s == nil { - continue - } - - sub = append(sub, s) + if i.State.UpToDate { + return i.Dependencies } - for _, name := range i.Scenarios { - s := i.hub.GetItem(SCENARIOS, name) - if s == nil { - continue - } - - sub = append(sub, s) + contentPath, err := i.InstallPath() + if err != nil { + i.hub.logger.Warningf("can't access dependencies for %s, using index", i.FQName()) + return i.Dependencies } - for _, name := range i.Contexts { - s := i.hub.GetItem(CONTEXTS, name) - if s == nil { - continue - } + currentContent, err := os.ReadFile(contentPath) + if errors.Is(err, fs.ErrNotExist) { + return i.Dependencies + } - sub = append(sub, s) + if err != nil { + // a file might be corrupted, or in development + i.hub.logger.Warningf("can't read dependencies for %s, using index", i.FQName()) + return i.Dependencies } - for _, name := range i.AppsecConfigs { - s := i.hub.GetItem(APPSEC_CONFIGS, name) - if s == nil { - continue - } + var d Dependencies - sub = append(sub, s) + // XXX: assume collection content never has multiple documents + if err := yaml.Unmarshal(currentContent, &d); err != nil { + i.hub.logger.Warningf("can't parse dependencies for %s, using index", i.FQName()) + return i.Dependencies } - for _, name := range i.AppsecRules { - s := i.hub.GetItem(APPSEC_RULES, name) - if s == nil { - continue - } + return d +} - sub = append(sub, s) +func (i *Item) logMissingSubItems() { + for _, sub := range i.CurrentDependencies().byType() { + for _, subName := range sub.itemNames { + if i.hub.GetItem(sub.typeName, subName) == nil { + i.hub.logger.Errorf("can't find %s:%s, required by %s", sub.typeName, subName, i.Name) + } + } } +} + +// Ancestors returns a slice of items (typically collections) that have this item as a direct or indirect dependency. +func (i *Item) Ancestors() []*Item { + ret := make([]*Item, 0) - for _, name := range i.Collections { - s := i.hub.GetItem(COLLECTIONS, name) - if s == nil { + for _, parentName := range i.State.BelongsToCollections { + parent := i.hub.GetItem(COLLECTIONS, parentName) + if parent == nil { continue } - sub = append(sub, s) + ret = append(ret, parent) } - return sub + return ret } -func (i *Item) logMissingSubItems() { - if !i.HasSubItems() { - return - } +// SafeToRemoveDeps returns a slice of dependencies that can be safely removed when this item is removed. +// The returned slice can contain items that are not installed, or not downloaded. +func (i *Item) SafeToRemoveDeps() ([]*Item, error) { + ret := make([]*Item, 0) - for _, subName := range i.Parsers { - if i.hub.GetItem(PARSERS, subName) == nil { - i.hub.logger.Errorf("can't find %s in %s, required by %s", subName, PARSERS, i.Name) - } + // can return err for circular dependencies + descendants, err := i.descendants() + if err != nil { + return nil, err } - for _, subName := range i.Scenarios { - if i.hub.GetItem(SCENARIOS, subName) == nil { - i.hub.logger.Errorf("can't find %s in %s, required by %s", subName, SCENARIOS, i.Name) - } - } + ancestors := i.Ancestors() - for _, subName := range i.PostOverflows { - if i.hub.GetItem(POSTOVERFLOWS, subName) == nil { - i.hub.logger.Errorf("can't find %s in %s, required by %s", subName, POSTOVERFLOWS, i.Name) - } - } + for sub := range i.CurrentDependencies().SubItems(i.hub) { + safe := true - for _, subName := range i.Contexts { - if i.hub.GetItem(CONTEXTS, subName) == nil { - i.hub.logger.Errorf("can't find %s in %s, required by %s", subName, CONTEXTS, i.Name) - } - } + // if the sub depends on a collection that is not a direct or indirect dependency + // of the current item, it is not removed + for _, subParent := range sub.Ancestors() { + if !subParent.State.Installed { + continue + } - for _, subName := range i.AppsecConfigs { - if i.hub.GetItem(APPSEC_CONFIGS, subName) == nil { - i.hub.logger.Errorf("can't find %s in %s, required by %s", subName, APPSEC_CONFIGS, i.Name) - } - } + // the ancestor that would block the removal of the sub item is also an ancestor + // of the item we are removing, so we don't want false warnings + // (e.g. crowdsecurity/sshd-logs was not removed because it also belongs to crowdsecurity/linux, + // while we are removing crowdsecurity/sshd) + if slices.Contains(ancestors, subParent) { + continue + } - for _, subName := range i.AppsecRules { - if i.hub.GetItem(APPSEC_RULES, subName) == nil { - i.hub.logger.Errorf("can't find %s in %s, required by %s", subName, APPSEC_RULES, i.Name) - } - } + // the sub-item belongs to the item we are removing, but we already knew that + if subParent == i { + continue + } - for _, subName := range i.Collections { - if i.hub.GetItem(COLLECTIONS, subName) == nil { - i.hub.logger.Errorf("can't find %s in %s, required by %s", subName, COLLECTIONS, i.Name) + if !slices.Contains(descendants, subParent) { + // not removing %s because it also belongs to %s", sub.FQName(), subParent.FQName()) + safe = false + break + } } - } -} - -// Ancestors returns a slice of items (typically collections) that have this item as a direct or indirect dependency. -func (i *Item) Ancestors() []*Item { - ret := make([]*Item, 0) - for _, parentName := range i.State.BelongsToCollections { - parent := i.hub.GetItem(COLLECTIONS, parentName) - if parent == nil { - continue + if safe { + ret = append(ret, sub) } - - ret = append(ret, parent) } - return ret + return ret, nil } -// descendants returns a list of all (direct or indirect) dependencies of the item. +// descendants returns a list of all (direct or indirect) dependencies of the item's current version. func (i *Item) descendants() ([]*Item, error) { var collectSubItems func(item *Item, visited map[*Item]bool, result *[]*Item) error @@ -352,7 +325,7 @@ func (i *Item) descendants() ([]*Item, error) { visited[item] = true - for _, subItem := range item.SubItems() { + for subItem := range item.CurrentDependencies().SubItems(item.hub) { if subItem == i { return fmt.Errorf("circular dependency detected: %s depends on %s", item.Name, i.Name) } diff --git a/pkg/cwhub/item_test.go b/pkg/cwhub/item_test.go index 703bbb5cb90..350861ff85e 100644 --- a/pkg/cwhub/item_test.go +++ b/pkg/cwhub/item_test.go @@ -6,39 +6,16 @@ import ( "github.com/stretchr/testify/require" ) -func TestItemStatus(t *testing.T) { +func TestItemStats(t *testing.T) { hub := envSetup(t) // get existing map x := hub.GetItemMap(COLLECTIONS) require.NotEmpty(t, x) - // Get item: good and bad - for k := range x { - item := hub.GetItem(COLLECTIONS, k) - require.NotNil(t, item) - - item.State.Installed = true - item.State.UpToDate = false - item.State.Tainted = false - item.State.Downloaded = true - - txt := item.State.Text() - require.Equal(t, "enabled,update-available", txt) - - item.State.Installed = true - item.State.UpToDate = false - item.State.Tainted = false - item.State.Downloaded = false - - txt = item.State.Text() - require.Equal(t, "enabled,local", txt) - } - stats := hub.ItemStats() require.Equal(t, []string{ "Loaded: 2 parsers, 1 scenarios, 3 collections", - "Unmanaged items: 3 local, 0 tainted", }, stats) } diff --git a/pkg/cwhub/iteminstall.go b/pkg/cwhub/iteminstall.go deleted file mode 100644 index 912897d0d7e..00000000000 --- a/pkg/cwhub/iteminstall.go +++ /dev/null @@ -1,73 +0,0 @@ -package cwhub - -import ( - "context" - "fmt" -) - -// enable enables the item by creating a symlink to the downloaded content, and also enables sub-items. -func (i *Item) enable() error { - if i.State.Installed { - if i.State.Tainted { - return fmt.Errorf("%s is tainted, won't overwrite unless --force", i.Name) - } - - if i.State.IsLocal() { - return fmt.Errorf("%s is local, won't overwrite", i.Name) - } - - // if it's a collection, check sub-items even if the collection file itself is up-to-date - if i.State.UpToDate && !i.HasSubItems() { - i.hub.logger.Tracef("%s is installed and up-to-date, skip.", i.Name) - return nil - } - } - - for _, sub := range i.SubItems() { - if err := sub.enable(); err != nil { - return fmt.Errorf("while installing %s: %w", sub.Name, err) - } - } - - if err := i.createInstallLink(); err != nil { - return err - } - - i.hub.logger.Infof("Enabled %s: %s", i.Type, i.Name) - i.State.Installed = true - - return nil -} - -// Install installs the item from the hub, downloading it if needed. -func (i *Item) Install(ctx context.Context, force bool, downloadOnly bool) error { - if downloadOnly && i.State.Downloaded && i.State.UpToDate { - i.hub.logger.Infof("%s is already downloaded and up-to-date", i.Name) - - if !force { - return nil - } - } - - downloaded, err := i.downloadLatest(ctx, force, true) - if err != nil { - return err - } - - if downloadOnly && downloaded { - return nil - } - - if err := i.enable(); err != nil { - return fmt.Errorf("while enabling %s: %w", i.Name, err) - } - - // a check on stdout is used while scripting to know if the hub has been upgraded - // and a configuration reload is required - // TODO: use a better way to communicate this - fmt.Printf("installed %s\n", i.Name) - - i.hub.logger.Infof("Enabled %s", i.Name) - - return nil -} diff --git a/pkg/cwhub/iteminstall_test.go b/pkg/cwhub/iteminstall_test.go index 5bfc7e8148e..ba47f2f4b4a 100644 --- a/pkg/cwhub/iteminstall_test.go +++ b/pkg/cwhub/iteminstall_test.go @@ -1,5 +1,9 @@ package cwhub +// XXX: these tests are obsolete + +/* + import ( "context" "os" @@ -103,7 +107,7 @@ func TestInstallParser(t *testing.T) { - force update it - check its status - remove it - */ + * hub := envSetup(t) // map iteration is random by itself @@ -126,7 +130,7 @@ func TestInstallCollection(t *testing.T) { - force update it - check its status - remove it - */ + * hub := envSetup(t) // map iteration is random by itself @@ -139,3 +143,5 @@ func TestInstallCollection(t *testing.T) { break } } + +*/ diff --git a/pkg/cwhub/itemlink.go b/pkg/cwhub/itemlink.go deleted file mode 100644 index 8a78d6805b7..00000000000 --- a/pkg/cwhub/itemlink.go +++ /dev/null @@ -1,78 +0,0 @@ -package cwhub - -import ( - "fmt" - "os" - "path/filepath" -) - -// createInstallLink creates a symlink between the actual config file at hub.HubDir and hub.ConfigDir. -func (i *Item) createInstallLink() error { - dest, err := i.installPath() - if err != nil { - return err - } - - destDir := filepath.Dir(dest) - if err = os.MkdirAll(destDir, os.ModePerm); err != nil { - return fmt.Errorf("while creating %s: %w", destDir, err) - } - - if _, err = os.Lstat(dest); !os.IsNotExist(err) { - i.hub.logger.Infof("%s already exists.", dest) - return nil - } - - src, err := i.downloadPath() - if err != nil { - return err - } - - if err = os.Symlink(src, dest); err != nil { - return fmt.Errorf("while creating symlink from %s to %s: %w", src, dest, err) - } - - return nil -} - -// removeInstallLink removes the symlink to the downloaded content. -func (i *Item) removeInstallLink() error { - syml, err := i.installPath() - if err != nil { - return err - } - - stat, err := os.Lstat(syml) - if err != nil { - return err - } - - // if it's managed by hub, it's a symlink to csconfig.GConfig.hub.HubDir / ... - if stat.Mode()&os.ModeSymlink == 0 { - i.hub.logger.Warningf("%s (%s) isn't a symlink, can't disable", i.Name, syml) - return fmt.Errorf("%s isn't managed by hub", i.Name) - } - - hubpath, err := os.Readlink(syml) - if err != nil { - return fmt.Errorf("while reading symlink: %w", err) - } - - src, err := i.downloadPath() - if err != nil { - return err - } - - if hubpath != src { - i.hub.logger.Warningf("%s (%s) isn't a symlink to %s", i.Name, syml, src) - return fmt.Errorf("%s isn't managed by hub", i.Name) - } - - if err := os.Remove(syml); err != nil { - return fmt.Errorf("while removing symlink: %w", err) - } - - i.hub.logger.Infof("Removed symlink [%s]: %s", i.Name, syml) - - return nil -} diff --git a/pkg/cwhub/itemremove.go b/pkg/cwhub/itemremove.go deleted file mode 100644 index eca0c856237..00000000000 --- a/pkg/cwhub/itemremove.go +++ /dev/null @@ -1,138 +0,0 @@ -package cwhub - -import ( - "fmt" - "os" - "slices" -) - -// purge removes the actual config file that was downloaded. -func (i *Item) purge() (bool, error) { - if !i.State.Downloaded { - i.hub.logger.Debugf("removing %s: not downloaded -- no need to remove", i.Name) - return false, nil - } - - src, err := i.downloadPath() - if err != nil { - return false, err - } - - if err := os.Remove(src); err != nil { - if os.IsNotExist(err) { - i.hub.logger.Debugf("%s doesn't exist, no need to remove", src) - return false, nil - } - - return false, fmt.Errorf("while removing file: %w", err) - } - - i.State.Downloaded = false - i.hub.logger.Infof("Removed source file [%s]: %s", i.Name, src) - - return true, nil -} - -// disable removes the install link, and optionally the downloaded content. -func (i *Item) disable(purge bool, force bool) (bool, error) { - didRemove := true - - err := i.removeInstallLink() - if os.IsNotExist(err) { - if !purge && !force { - link, _ := i.installPath() - return false, fmt.Errorf("link %s does not exist (override with --force or --purge)", link) - } - - didRemove = false - } else if err != nil { - return false, err - } - - i.State.Installed = false - didPurge := false - - if purge { - if didPurge, err = i.purge(); err != nil { - return didRemove, err - } - } - - ret := didRemove || didPurge - - return ret, nil -} - -// Remove disables the item, optionally removing the downloaded content. -func (i *Item) Remove(purge bool, force bool) (bool, error) { - if i.State.IsLocal() { - i.hub.logger.Warningf("%s is a local item, please delete manually", i.Name) - return false, nil - } - - if i.State.Tainted && !force { - return false, fmt.Errorf("%s is tainted, use '--force' to remove", i.Name) - } - - if !i.State.Installed && !purge { - i.hub.logger.Infof("removing %s: not installed -- no need to remove", i.Name) - return false, nil - } - - removed := false - - descendants, err := i.descendants() - if err != nil { - return false, err - } - - ancestors := i.Ancestors() - - for _, sub := range i.SubItems() { - if !sub.State.Installed { - continue - } - - // if the sub depends on a collection that is not a direct or indirect dependency - // of the current item, it is not removed - for _, subParent := range sub.Ancestors() { - if !purge && !subParent.State.Installed { - continue - } - - // the ancestor that would block the removal of the sub item is also an ancestor - // of the item we are removing, so we don't want false warnings - // (e.g. crowdsecurity/sshd-logs was not removed because it also belongs to crowdsecurity/linux, - // while we are removing crowdsecurity/sshd) - if slices.Contains(ancestors, subParent) { - continue - } - - // the sub-item belongs to the item we are removing, but we already knew that - if subParent == i { - continue - } - - if !slices.Contains(descendants, subParent) { - i.hub.logger.Infof("%s was not removed because it also belongs to %s", sub.Name, subParent.Name) - continue - } - } - - subRemoved, err := sub.Remove(purge, force) - if err != nil { - return false, fmt.Errorf("unable to disable %s: %w", i.Name, err) - } - - removed = removed || subRemoved - } - - didDisable, err := i.disable(purge, force) - if err != nil { - return false, fmt.Errorf("while removing %s: %w", i.Name, err) - } - - removed = removed || didDisable - - return removed, nil -} diff --git a/pkg/cwhub/itemupgrade.go b/pkg/cwhub/itemupgrade.go deleted file mode 100644 index 105e5ebec31..00000000000 --- a/pkg/cwhub/itemupgrade.go +++ /dev/null @@ -1,254 +0,0 @@ -package cwhub - -// Install, upgrade and remove items from the hub to the local configuration - -import ( - "context" - "crypto" - "encoding/base64" - "encoding/hex" - "errors" - "fmt" - "os" - "path/filepath" - - "github.com/sirupsen/logrus" - - "github.com/crowdsecurity/go-cs-lib/downloader" - - "github.com/crowdsecurity/crowdsec/pkg/emoji" -) - -// Upgrade downloads and applies the last version of the item from the hub. -func (i *Item) Upgrade(ctx context.Context, force bool) (bool, error) { - if i.State.IsLocal() { - i.hub.logger.Infof("not upgrading %s: local item", i.Name) - return false, nil - } - - if !i.State.Downloaded { - return false, fmt.Errorf("can't upgrade %s: not installed", i.Name) - } - - if !i.State.Installed { - return false, fmt.Errorf("can't upgrade %s: downloaded but not installed", i.Name) - } - - if i.State.UpToDate { - i.hub.logger.Infof("%s: up-to-date", i.Name) - - if err := i.DownloadDataIfNeeded(ctx, force); err != nil { - return false, fmt.Errorf("%s: download failed: %w", i.Name, err) - } - - if !force { - // no upgrade needed - return false, nil - } - } - - if _, err := i.downloadLatest(ctx, force, true); err != nil { - return false, fmt.Errorf("%s: download failed: %w", i.Name, err) - } - - if !i.State.UpToDate { - if i.State.Tainted { - i.hub.logger.Warningf("%v %s is tainted, --force to overwrite", emoji.Warning, i.Name) - } - - return false, nil - } - - // a check on stdout is used while scripting to know if the hub has been upgraded - // and a configuration reload is required - // TODO: use a better way to communicate this - fmt.Printf("updated %s\n", i.Name) - i.hub.logger.Infof("%v %s: updated", emoji.Package, i.Name) - - return true, nil -} - -// downloadLatest downloads the latest version of the item to the hub directory. -func (i *Item) downloadLatest(ctx context.Context, overwrite bool, updateOnly bool) (bool, error) { - i.hub.logger.Debugf("Downloading %s %s", i.Type, i.Name) - - for _, sub := range i.SubItems() { - if !sub.State.Installed && updateOnly && sub.State.Downloaded { - i.hub.logger.Debugf("skipping upgrade of %s: not installed", i.Name) - continue - } - - i.hub.logger.Debugf("Download %s sub-item: %s %s (%t -> %t)", i.Name, sub.Type, sub.Name, i.State.Installed, updateOnly) - - // recurse as it's a collection - if sub.HasSubItems() { - i.hub.logger.Tracef("collection, recurse") - - if _, err := sub.downloadLatest(ctx, overwrite, updateOnly); err != nil { - return false, err - } - } - - downloaded := sub.State.Downloaded - - if _, err := sub.download(ctx, overwrite); err != nil { - return false, err - } - - // We need to enable an item when it has been added to a collection since latest release of the collection. - // We check if sub.Downloaded is false because maybe the item has been disabled by the user. - if !sub.State.Installed && !downloaded { - if err := sub.enable(); err != nil { - return false, fmt.Errorf("enabling '%s': %w", sub.Name, err) - } - } - } - - if !i.State.Installed && updateOnly && i.State.Downloaded && !overwrite { - i.hub.logger.Debugf("skipping upgrade of %s: not installed", i.Name) - return false, nil - } - - return i.download(ctx, overwrite) -} - -// FetchContentTo downloads the last version of the item's YAML file to the specified path. -func (i *Item) FetchContentTo(ctx context.Context, destPath string) (bool, string, error) { - wantHash := i.latestHash() - if wantHash == "" { - return false, "", errors.New("latest hash missing from index. The index file is invalid, please run 'cscli hub update' and try again") - } - - // Use the embedded content if available - if i.Content != "" { - // the content was historically base64 encoded - content, err := base64.StdEncoding.DecodeString(i.Content) - if err != nil { - content = []byte(i.Content) - } - - dir := filepath.Dir(destPath) - - if err := os.MkdirAll(dir, 0o755); err != nil { - return false, "", fmt.Errorf("while creating %s: %w", dir, err) - } - - // check sha256 - hash := crypto.SHA256.New() - if _, err := hash.Write(content); err != nil { - return false, "", fmt.Errorf("while hashing %s: %w", i.Name, err) - } - - gotHash := hex.EncodeToString(hash.Sum(nil)) - if gotHash != wantHash { - return false, "", fmt.Errorf("hash mismatch: expected %s, got %s. The index file is invalid, please run 'cscli hub update' and try again", wantHash, gotHash) - } - - if err := os.WriteFile(destPath, content, 0o600); err != nil { - return false, "", fmt.Errorf("while writing %s: %w", destPath, err) - } - - i.hub.logger.Debugf("Wrote %s content from .index.json to %s", i.Name, destPath) - - return true, fmt.Sprintf("(embedded in %s)", i.hub.local.HubIndexFile), nil - } - - url, err := i.hub.remote.urlTo(i.RemotePath) - if err != nil { - return false, "", fmt.Errorf("failed to build request: %w", err) - } - - d := downloader. - New(). - WithHTTPClient(hubClient). - ToFile(destPath). - WithETagFn(downloader.SHA256). - WithMakeDirs(true). - WithLogger(logrus.WithField("url", url)). - CompareContent(). - VerifyHash("sha256", wantHash) - - // TODO: recommend hub update if hash does not match - - downloaded, err := d.Download(ctx, url) - if err != nil { - return false, "", err - } - - return downloaded, url, nil -} - -// download downloads the item from the hub and writes it to the hub directory. -func (i *Item) download(ctx context.Context, overwrite bool) (bool, error) { - // ensure that target file is within target dir - finalPath, err := i.downloadPath() - if err != nil { - return false, err - } - - if i.State.IsLocal() { - i.hub.logger.Warningf("%s is local, can't download", i.Name) - return false, nil - } - - // if user didn't --force, don't overwrite local, tainted, up-to-date files - if !overwrite { - if i.State.Tainted { - i.hub.logger.Debugf("%s: tainted, not updated", i.Name) - return false, nil - } - - if i.State.UpToDate { - // We still have to check if data files are present - i.hub.logger.Debugf("%s: up-to-date, not updated", i.Name) - } - } - - downloaded, _, err := i.FetchContentTo(ctx, finalPath) - if err != nil { - return false, err - } - - if downloaded { - i.hub.logger.Infof("Downloaded %s", i.Name) - } - - i.State.Downloaded = true - i.State.Tainted = false - i.State.UpToDate = true - - // read content to get the list of data files - reader, err := os.Open(finalPath) - if err != nil { - return false, fmt.Errorf("while opening %s: %w", finalPath, err) - } - - defer reader.Close() - - if err = downloadDataSet(ctx, i.hub.local.InstallDataDir, overwrite, reader, i.hub.logger); err != nil { - return false, fmt.Errorf("while downloading data for %s: %w", i.FileName, err) - } - - return true, nil -} - -// DownloadDataIfNeeded downloads the data set for the item. -func (i *Item) DownloadDataIfNeeded(ctx context.Context, force bool) error { - itemFilePath, err := i.installPath() - if err != nil { - return err - } - - itemFile, err := os.Open(itemFilePath) - if err != nil { - return fmt.Errorf("while opening %s: %w", itemFilePath, err) - } - - defer itemFile.Close() - - if err = downloadDataSet(ctx, i.hub.local.InstallDataDir, force, itemFile, i.hub.logger); err != nil { - return fmt.Errorf("while downloading data for %s: %w", itemFilePath, err) - } - - return nil -} diff --git a/pkg/cwhub/itemupgrade_test.go b/pkg/cwhub/itemupgrade_test.go index 5f9e4d1944e..3225d2f013b 100644 --- a/pkg/cwhub/itemupgrade_test.go +++ b/pkg/cwhub/itemupgrade_test.go @@ -1,5 +1,7 @@ package cwhub +/* + import ( "context" "testing" @@ -36,10 +38,9 @@ func TestUpgradeItemNewScenarioInCollection(t *testing.T) { // collection receives an update. It now adds new scenario "crowdsecurity/barfoo_scenario" pushUpdateToCollectionInHub() - remote := &RemoteHubCfg{ + remote := &Downloader{ URLTemplate: mockURLTemplate, Branch: "master", - IndexPath: ".index.json", } hub, err := NewHub(hub.local, remote, nil) @@ -96,10 +97,9 @@ func TestUpgradeItemInDisabledScenarioShouldNotBeInstalled(t *testing.T) { require.NoError(t, err) require.True(t, didRemove) - remote := &RemoteHubCfg{ + remote := &Downloader{ URLTemplate: mockURLTemplate, Branch: "master", - IndexPath: ".index.json", } hub = getHubOrFail(t, hub.local, remote) @@ -130,7 +130,7 @@ func TestUpgradeItemInDisabledScenarioShouldNotBeInstalled(t *testing.T) { } // getHubOrFail refreshes the hub state (load index, sync) and returns the singleton, or fails the test. -func getHubOrFail(t *testing.T, local *csconfig.LocalHubCfg, remote *RemoteHubCfg) *Hub { +func getHubOrFail(t *testing.T, local *csconfig.LocalHubCfg, remote *Downloader) *Hub { hub, err := NewHub(local, remote, nil) require.NoError(t, err) @@ -168,10 +168,9 @@ func TestUpgradeItemNewScenarioIsInstalledWhenReferencedScenarioIsDisabled(t *te require.NoError(t, err) require.True(t, didRemove) - remote := &RemoteHubCfg{ + remote := &Downloader{ URLTemplate: mockURLTemplate, Branch: "master", - IndexPath: ".index.json", } hub = getHubOrFail(t, hub.local, remote) @@ -221,3 +220,5 @@ func pushUpdateToCollectionInHub() { responseByPath["/crowdsecurity/master/.index.json"] = fileToStringX("./testdata/index2.json") responseByPath["/crowdsecurity/master/collections/crowdsecurity/test_collection.yaml"] = fileToStringX("./testdata/collection_v2.yaml") } + +*/ diff --git a/pkg/cwhub/remote.go b/pkg/cwhub/remote.go deleted file mode 100644 index 8d2dc2dbb94..00000000000 --- a/pkg/cwhub/remote.go +++ /dev/null @@ -1,84 +0,0 @@ -package cwhub - -import ( - "context" - "fmt" - "net/url" - - "github.com/sirupsen/logrus" - - "github.com/crowdsecurity/go-cs-lib/downloader" -) - -// RemoteHubCfg is used to retrieve index and items from the remote hub. -type RemoteHubCfg struct { - Branch string - URLTemplate string - IndexPath string - EmbedItemContent bool -} - -// urlTo builds the URL to download a file from the remote hub. -func (r *RemoteHubCfg) urlTo(remotePath string) (string, error) { - if r == nil { - return "", ErrNilRemoteHub - } - - // the template must contain two string placeholders - if fmt.Sprintf(r.URLTemplate, "%s", "%s") != r.URLTemplate { - return "", fmt.Errorf("invalid URL template '%s'", r.URLTemplate) - } - - return fmt.Sprintf(r.URLTemplate, r.Branch, remotePath), nil -} - -// addURLParam adds the "with_content=true" parameter to the URL if it's not already present. -func addURLParam(rawURL string, param string, value string) (string, error) { - parsedURL, err := url.Parse(rawURL) - if err != nil { - return "", fmt.Errorf("failed to parse URL: %w", err) - } - - query := parsedURL.Query() - - if _, exists := query[param]; !exists { - query.Add(param, value) - } - - parsedURL.RawQuery = query.Encode() - - return parsedURL.String(), nil -} - -// fetchIndex downloads the index from the hub and returns the content. -func (r *RemoteHubCfg) fetchIndex(ctx context.Context, destPath string) (bool, error) { - if r == nil { - return false, ErrNilRemoteHub - } - - url, err := r.urlTo(r.IndexPath) - if err != nil { - return false, fmt.Errorf("failed to build hub index request: %w", err) - } - - if r.EmbedItemContent { - url, err = addURLParam(url, "with_content", "true") - if err != nil { - return false, fmt.Errorf("failed to add 'with_content' parameter to URL: %w", err) - } - } - - downloaded, err := downloader. - New(). - WithHTTPClient(hubClient). - ToFile(destPath). - WithETagFn(downloader.SHA256). - CompareContent(). - WithLogger(logrus.WithField("url", url)). - Download(ctx, url) - if err != nil { - return false, err - } - - return downloaded, nil -} diff --git a/pkg/cwhub/state.go b/pkg/cwhub/state.go new file mode 100644 index 00000000000..63a433151cd --- /dev/null +++ b/pkg/cwhub/state.go @@ -0,0 +1,62 @@ +package cwhub + +import ( + "github.com/crowdsecurity/crowdsec/pkg/emoji" +) + +// ItemState is used to keep the local state (i.e. at runtime) of an item. +// This data is not stored in the index, but is displayed with "cscli ... inspect". +type ItemState struct { + LocalPath string `json:"local_path,omitempty" yaml:"local_path,omitempty"` + LocalVersion string `json:"local_version,omitempty" yaml:"local_version,omitempty"` + LocalHash string `json:"local_hash,omitempty" yaml:"local_hash,omitempty"` + Installed bool `json:"installed"` + local bool + Downloaded bool `json:"downloaded"` + UpToDate bool `json:"up_to_date"` + Tainted bool `json:"tainted"` + TaintedBy []string `json:"tainted_by,omitempty" yaml:"tainted_by,omitempty"` + BelongsToCollections []string `json:"belongs_to_collections,omitempty" yaml:"belongs_to_collections,omitempty"` +} + +// IsLocal returns true if the item has been create by a user (not downloaded from the hub). +func (s *ItemState) IsLocal() bool { + return s.local +} + +// Text returns the status of the item as a string (eg. "enabled,update-available"). +func (s *ItemState) Text() string { + ret := "disabled" + + if s.Installed { + ret = "enabled" + } + + if s.IsLocal() { + ret += ",local" + } + + if s.Tainted { + ret += ",tainted" + } else if !s.UpToDate && !s.IsLocal() { + ret += ",update-available" + } + + return ret +} + +// Emoji returns the status of the item as an emoji (eg. emoji.Warning). +func (s *ItemState) Emoji() string { + switch { + case s.IsLocal(): + return emoji.House + case !s.Installed: + return emoji.Prohibited + case s.Tainted || (!s.UpToDate && !s.IsLocal()): + return emoji.Warning + case s.Installed: + return emoji.CheckMark + default: + return emoji.QuestionMark + } +} diff --git a/pkg/cwhub/state_test.go b/pkg/cwhub/state_test.go new file mode 100644 index 00000000000..20741809ae2 --- /dev/null +++ b/pkg/cwhub/state_test.go @@ -0,0 +1,77 @@ +package cwhub + +import ( + "strconv" + "testing" + + "github.com/stretchr/testify/assert" + + "github.com/crowdsecurity/crowdsec/pkg/emoji" +) + +func TestItemStateText(t *testing.T) { + // Test the text representation of an item state. + type test struct { + state ItemState + want string + wantIcon string + } + + tests := []test{ + { + ItemState{ + Installed: true, + UpToDate: false, + Tainted: false, + Downloaded: true, + }, + "enabled,update-available", + emoji.Warning, + }, { + ItemState{ + Installed: true, + UpToDate: true, + Tainted: false, + Downloaded: true, + }, + "enabled", + emoji.CheckMark, + }, { + ItemState{ + Installed: true, + UpToDate: false, + local: true, + Tainted: false, + Downloaded: false, + }, + "enabled,local", + emoji.House, + }, { + ItemState{ + Installed: false, + UpToDate: false, + Tainted: false, + Downloaded: true, + }, + "disabled,update-available", + emoji.Prohibited, + }, { + ItemState{ + Installed: true, + UpToDate: false, + Tainted: true, + Downloaded: true, + }, + "enabled,tainted", + emoji.Warning, + }, + } + + for idx, tc := range tests { + t.Run("Test "+strconv.Itoa(idx), func(t *testing.T) { + got := tc.state.Text() + assert.Equal(t, tc.want, got) + assert.Equal(t, tc.wantIcon, tc.state.Emoji()) + }) + } +} diff --git a/pkg/cwhub/sync.go b/pkg/cwhub/sync.go index c82822e64ef..ed99f4806d5 100644 --- a/pkg/cwhub/sync.go +++ b/pkg/cwhub/sync.go @@ -50,9 +50,8 @@ func resolveSymlink(path string) (string, error) { } // isPathInside checks if a path is inside the given directory -// it can return false negatives if the filesystem is case insensitive func isPathInside(path, dir string) (bool, error) { - absFilePath, err := filepath.Abs(path) + absFile, err := filepath.Abs(path) if err != nil { return false, err } @@ -62,99 +61,145 @@ func isPathInside(path, dir string) (bool, error) { return false, err } - return strings.HasPrefix(absFilePath, absDir), nil -} + rel, err := filepath.Rel(absDir, absFile) + if err != nil { + return false, err + } -// information used to create a new Item, from a file path. -type itemFileInfo struct { - fname string - stage string - ftype string - fauthor string - inhub bool + return !strings.HasPrefix(rel, ".."), nil } -func (h *Hub) getItemFileInfo(path string, logger *logrus.Logger) (*itemFileInfo, error) { - var ret *itemFileInfo +// itemSpec contains some information needed to complete the items +// after they have been parsed from the index. itemSpecs are created by +// scanning the hub (/etc/crowdsec/hub/*) and install (/etc/crowdsec/*) directories. +// Only directories for the known types are scanned. +type itemSpec struct { + path string // full path to the file (or link) + fname string // name of the item: + // for local item, taken from the file content or defaults to the filename (including extension) + // for non-local items, always {author}/{name} + stage string // stage for parsers and overflows + ftype string // type, plural (collections, contexts etc.) + fauthor string // author - empty for local items + inhub bool // true if the spec comes from the hub dir + target string // the target of path if it's a link, otherwise == path + local bool // is this a spec for a local item? +} - hubDir := h.local.HubDir - installDir := h.local.InstallDir +func newHubItemSpec(path string, subs []string, logger *logrus.Logger) (*itemSpec, error) { + // .../hub/parsers/s00-raw/crowdsecurity/skip-pretag.yaml + // .../hub/scenarios/crowdsecurity/ssh_bf.yaml + // .../hub/profiles/crowdsecurity/linux.yaml + if len(subs) < 3 { + return nil, fmt.Errorf("path is too short: %s (%d)", path, len(subs)) + } - subsHub := relativePathComponents(path, hubDir) - subsInstall := relativePathComponents(path, installDir) + ftype := subs[0] + if !slices.Contains(ItemTypes, ftype) { + // this doesn't really happen anymore, because we only scan the {hubtype} directories + return nil, fmt.Errorf("unknown configuration type '%s'", ftype) + } - switch { - case len(subsHub) > 0: - logger.Tracef("in hub dir") + stage := "" + fauthor := subs[1] + fname := subs[2] - // .../hub/parsers/s00-raw/crowdsecurity/skip-pretag.yaml - // .../hub/scenarios/crowdsecurity/ssh_bf.yaml - // .../hub/profiles/crowdsecurity/linux.yaml - if len(subsHub) < 3 { - return nil, fmt.Errorf("path is too short: %s (%d)", path, len(subsHub)) + if ftype == PARSERS || ftype == POSTOVERFLOWS { + if len(subs) < 4 { + return nil, fmt.Errorf("path is too short: %s (%d)", path, len(subs)) } - ftype := subsHub[0] - if !slices.Contains(ItemTypes, ftype) { - // this doesn't really happen anymore, because we only scan the {hubtype} directories - return nil, fmt.Errorf("unknown configuration type '%s'", ftype) - } + stage = subs[1] + fauthor = subs[2] + fname = subs[3] + } - stage := "" - fauthor := subsHub[1] - fname := subsHub[2] + spec := itemSpec{ + path: path, + inhub: true, + ftype: ftype, + stage: stage, + fauthor: fauthor, + fname: fname, + } - if ftype == PARSERS || ftype == POSTOVERFLOWS { - stage = subsHub[1] - fauthor = subsHub[2] - fname = subsHub[3] - } + return &spec, nil +} - ret = &itemFileInfo{ - inhub: true, - ftype: ftype, - stage: stage, - fauthor: fauthor, - fname: fname, - } +func newInstallItemSpec(path string, subs []string, logger *logrus.Logger) (*itemSpec, error) { + logger.Tracef("%s in install dir", path) - case len(subsInstall) > 0: - logger.Tracef("in install dir") + // .../config/parser/stage/file.yaml + // .../config/postoverflow/stage/file.yaml + // .../config/scenarios/scenar.yaml + // .../config/collections/linux.yaml //file is empty - // .../config/parser/stage/file.yaml - // .../config/postoverflow/stage/file.yaml - // .../config/scenarios/scenar.yaml - // .../config/collections/linux.yaml //file is empty + if len(subs) < 2 { + return nil, fmt.Errorf("path is too short: %s (%d)", path, len(subs)) + } - if len(subsInstall) < 2 { - return nil, fmt.Errorf("path is too short: %s (%d)", path, len(subsInstall)) - } + // this can be in any number of subdirs, we join them to compose the item name - // this can be in any number of subdirs, we join them to compose the item name + ftype := subs[0] + stage := "" + fname := strings.Join(subs[1:], "/") - ftype := subsInstall[0] - stage := "" - fname := strings.Join(subsInstall[1:], "/") + if ftype == PARSERS || ftype == POSTOVERFLOWS { + stage = subs[1] + fname = strings.Join(subs[2:], "/") + } - if ftype == PARSERS || ftype == POSTOVERFLOWS { - stage = subsInstall[1] - fname = strings.Join(subsInstall[2:], "/") - } + spec := itemSpec{ + path: path, + inhub: false, + ftype: ftype, + stage: stage, + fauthor: "", + fname: fname, + } - ret = &itemFileInfo{ - inhub: false, - ftype: ftype, - stage: stage, - fauthor: "", - fname: fname, + return &spec, nil +} + +func newItemSpec(path, hubDir, installDir string, logger *logrus.Logger) (*itemSpec, error) { + var ( + spec *itemSpec + err error + ) + + if subs := relativePathComponents(path, hubDir); len(subs) > 0 { + spec, err = newHubItemSpec(path, subs, logger) + if err != nil { + return nil, err } - default: + } else if subs := relativePathComponents(path, installDir); len(subs) > 0 { + spec, err = newInstallItemSpec(path, subs, logger) + if err != nil { + return nil, err + } + } + + if spec == nil { return nil, fmt.Errorf("file '%s' is not from hub '%s' nor from the configuration directory '%s'", path, hubDir, installDir) } - logger.Tracef("CORRECTED [%s] by [%s] in stage [%s] of type [%s]", ret.fname, ret.fauthor, ret.stage, ret.ftype) + // follow the link to see if it falls in the hub directory + // if it's not a link, target == path + spec.target, err = resolveSymlink(spec.path) + if err != nil { + // target does not exist, the user might have removed the file + // or switched to a hub branch without it; or symlink loop + return nil, err + } - return ret, nil + targetInHub, err := isPathInside(spec.target, hubDir) + if err != nil { + return nil, ErrSkipPath + } + + spec.local = !targetInHub + + return spec, nil } // sortedVersions returns the input data, sorted in reverse order (new, old) by semver. @@ -164,6 +209,7 @@ func sortedVersions(raw []string) ([]string, error) { for idx, r := range raw { v, err := semver.NewVersion(r) if err != nil { + // TODO: should catch this during index parsing return nil, fmt.Errorf("%s: %w", r, err) } @@ -180,7 +226,7 @@ func sortedVersions(raw []string) ([]string, error) { return ret, nil } -func newLocalItem(h *Hub, path string, info *itemFileInfo) (*Item, error) { +func newLocalItem(h *Hub, path string, spec *itemSpec) (*Item, error) { type localItemName struct { Name string `yaml:"name"` } @@ -189,12 +235,13 @@ func newLocalItem(h *Hub, path string, info *itemFileInfo) (*Item, error) { item := &Item{ hub: h, - Name: info.fname, - Stage: info.stage, - Type: info.ftype, + Name: spec.fname, + Stage: spec.stage, + Type: spec.ftype, FileName: fileName, State: ItemState{ LocalPath: path, + local: true, Installed: true, UpToDate: true, }, @@ -220,22 +267,25 @@ func newLocalItem(h *Hub, path string, info *itemFileInfo) (*Item, error) { return item, nil } -func (h *Hub) itemVisit(path string, f os.DirEntry, err error) error { +// A sentinel to skip regular files because "nil, nil" is ambiguous. Returning SkipDir with files would skip the rest of the directory. +var ErrSkipPath = errors.New("sentinel") + +func (h *Hub) itemVisit(path string, f os.DirEntry, err error) (*itemSpec, error) { if err != nil { h.logger.Debugf("while syncing hub dir: %s", err) // there is a path error, we ignore the file - return nil + return nil, ErrSkipPath + } + + // permission errors, files removed while reading, etc. + if f == nil { + return nil, ErrSkipPath } // only happens if the current working directory was removed (!) path, err = filepath.Abs(path) if err != nil { - return err - } - - // permission errors, files removed while reading, etc. - if f == nil { - return nil + return nil, err } if f.IsDir() { @@ -244,106 +294,125 @@ func (h *Hub) itemVisit(path string, f os.DirEntry, err error) error { // - double dot prefix is used by k8s to mount config maps if strings.HasPrefix(f.Name(), ".") { h.logger.Tracef("skipping hidden directory %s", path) - return filepath.SkipDir + return nil, filepath.SkipDir } // keep traversing - return nil + return nil, nil } // we only care about YAML files if !isYAMLFileName(f.Name()) { - return nil + return nil, ErrSkipPath } - info, err := h.getItemFileInfo(path, h.logger) + spec, err := newItemSpec(path, h.local.HubDir, h.local.InstallDir, h.logger) if err != nil { h.logger.Warningf("Ignoring file %s: %s", path, err) - return nil + return nil, ErrSkipPath } - // follow the link to see if it falls in the hub directory - // if it's not a link, target == path - target, err := resolveSymlink(path) - if err != nil { - // target does not exist, the user might have removed the file - // or switched to a hub branch without it; or symlink loop - h.logger.Warningf("Ignoring file %s: %s", path, err) - return nil - } - - targetInHub, err := isPathInside(target, h.local.HubDir) - if err != nil { - h.logger.Warningf("Ignoring file %s: %s", path, err) - return nil - } - - // local (custom) item if the file or link target is not inside the hub dir - if !targetInHub { - h.logger.Tracef("%s is a local file, skip", path) - - item, err := newLocalItem(h, path, info) - if err != nil { - return err - } - - h.addItem(item) + return spec, nil +} - return nil +func updateNonLocalItem(h *Hub, path string, spec *itemSpec, symlinkTarget string) (*Item, error) { + // look for the matching index entry + tot := 0 + for range h.GetItemMap(spec.ftype) { + tot++ } - hubpath := target - - // try to find which configuration item it is - h.logger.Tracef("check [%s] of %s", info.fname, info.ftype) - - for _, item := range h.GetItemMap(info.ftype) { - if info.fname != item.FileName { + for _, item := range h.GetItemMap(spec.ftype) { + if spec.fname != item.FileName { continue } - if item.Stage != info.stage { + if item.Stage != spec.stage { continue } - // if we are walking hub dir, just mark present files as downloaded - if info.inhub { - // wrong author - if info.fauthor != item.Author { - continue - } - + // Downloaded item, in the hub dir. + if spec.inhub { // not the item we're looking for - if !item.validPath(info.fauthor, info.fname) { + if !item.validPath(spec.fauthor, spec.fname) { continue } - src, err := item.downloadPath() + src, err := item.DownloadPath() if err != nil { - return err + return nil, err } - if path == src { + if spec.path == src { h.logger.Tracef("marking %s as downloaded", item.Name) item.State.Downloaded = true } - } else if !hasPathSuffix(hubpath, item.RemotePath) { + } else if !hasPathSuffix(symlinkTarget, item.RemotePath) { // wrong file // ///.yaml continue } - err := item.setVersionState(path, info.inhub) + err := item.setVersionState(spec.path, spec.inhub) + if err != nil { + return nil, err + } + + return item, nil + } + + return nil, nil +} + +// addItemFromSpec adds an item to the hub based on the spec, or updates it if already present. +// +// When the item is: +// +// Local - an itemSpec instance is created while scanning the install directory +// and an Item instance will be added to the hub.items map. +// +// Not downloaded, not installed - an Item instance is already on hub.items (decoded from index) and left untouched. +// +// Downloaded, not installed - an Item instance is on hub.items (decoded from index) and an itemSpec instance is created +// to complete it (i.e. set version and state flags). +// +// Downloaded, installed - an Item instance is on hub.items and is complemented with two itemSpecs: one from the file +// on the hub directory, one from the link in the install directory. +func (h *Hub) addItemFromSpec(spec *itemSpec) error { + var ( + item *Item + err error + ) + + // Local item: links outside the hub directory. + // We add it, or overwrite the existing one if it happened to have the same name. + if spec.local { + item, err = newLocalItem(h, spec.path, spec) if err != nil { return err } - h.pathIndex[path] = item + // we now have the name declared in the file (for local), + // see if there's another installed item of the same name + theOtherItem := h.GetItem(spec.ftype, item.Name) + if theOtherItem != nil { + if theOtherItem.State.Installed { + h.logger.Warnf("multiple %s named %s: ignoring %s", spec.ftype, item.Name, theOtherItem.State.LocalPath) + } + } + } else { + item, err = updateNonLocalItem(h, spec.path, spec, spec.target) + if err != nil { + return err + } + } + if item == nil { + h.logger.Infof("Ignoring file %s of type %s", spec.path, spec.ftype) return nil } - h.logger.Infof("Ignoring file %s of type %s", path, info.ftype) + h.addItem(item) return nil } @@ -364,7 +433,7 @@ func (i *Item) checkSubItemVersions() []string { // ensure all the sub-items are installed, or tag the parent as tainted i.hub.logger.Tracef("checking submembers of %s installed:%t", i.Name, i.State.Installed) - for _, sub := range i.SubItems() { + for sub := range i.CurrentDependencies().SubItems(i.hub) { i.hub.logger.Tracef("check %s installed:%t", sub.Name, sub.State.Installed) if !i.State.Installed { @@ -398,7 +467,7 @@ func (i *Item) checkSubItemVersions() []string { if !sub.State.UpToDate { i.State.UpToDate = false - warn = append(warn, fmt.Sprintf("%s is tainted by outdated %s", i.Name, sub.FQName())) + warn = append(warn, fmt.Sprintf("%s is outdated because of %s", i.Name, sub.FQName())) continue } @@ -411,6 +480,8 @@ func (i *Item) checkSubItemVersions() []string { // syncDir scans a directory for items, and updates the Hub state accordingly. func (h *Hub) syncDir(dir string) error { + specs := []*itemSpec{} + // For each, scan PARSERS, POSTOVERFLOWS... and COLLECTIONS last for _, scan := range ItemTypes { // cpath: top-level item directory, either downloaded or installed items. @@ -423,11 +494,46 @@ func (h *Hub) syncDir(dir string) error { // explicit check for non existing directory, avoid spamming log.Debug if _, err = os.Stat(cpath); os.IsNotExist(err) { - h.logger.Tracef("directory %s doesn't exist, skipping", cpath) continue } - if err = filepath.WalkDir(cpath, h.itemVisit); err != nil { + // wrap itemVisit to collect spec results + specCollector := func(path string, f os.DirEntry, err error) error { + spec, err := h.itemVisit(path, f, err) + if err == nil && spec != nil { + specs = append(specs, spec) + } + + if errors.Is(err, ErrSkipPath) { + return nil + } + + return err + } + + if err = filepath.WalkDir(cpath, specCollector); err != nil { + return err + } + } + + // add non-local items first, so they can find the place in the index + // before it's overridden by local items in case of name collision + for _, spec := range specs { + if spec.local { + continue + } + + if err := h.addItemFromSpec(spec); err != nil { + return err + } + } + + for _, spec := range specs { + if !spec.local { + continue + } + + if err := h.addItemFromSpec(spec); err != nil { return err } } @@ -463,13 +569,14 @@ func removeDuplicates(sl []string) []string { // localSync updates the hub state with downloaded, installed and local items. func (h *Hub) localSync() error { - err := h.syncDir(h.local.InstallDir) - if err != nil { - return fmt.Errorf("failed to scan %s: %w", h.local.InstallDir, err) + // add downloaded files first, so they can find the place in the index + // before it's overridden by local items in case of name collision + if err := h.syncDir(h.local.HubDir); err != nil { + return fmt.Errorf("failed to sync %s: %w", h.local.HubDir, err) } - if err = h.syncDir(h.local.HubDir); err != nil { - return fmt.Errorf("failed to scan %s: %w", h.local.HubDir, err) + if err := h.syncDir(h.local.InstallDir); err != nil { + return fmt.Errorf("failed to sync %s: %w", h.local.InstallDir, err) } warnings := make([]string, 0) diff --git a/pkg/cwversion/component/component.go b/pkg/cwversion/component/component.go index 7ed596525e0..2c6374e4bb7 100644 --- a/pkg/cwversion/component/component.go +++ b/pkg/cwversion/component/component.go @@ -8,20 +8,21 @@ package component // Built is a map of all the known components, and whether they are built-in or not. // This is populated as soon as possible by the respective init() functions var Built = map[string]bool{ - "datasource_appsec": false, - "datasource_cloudwatch": false, - "datasource_docker": false, - "datasource_file": false, - "datasource_journalctl": false, - "datasource_k8s-audit": false, - "datasource_kafka": false, - "datasource_kinesis": false, - "datasource_loki": false, - "datasource_s3": false, - "datasource_syslog": false, - "datasource_wineventlog": false, - "datasource_http": false, - "cscli_setup": false, + "datasource_appsec": false, + "datasource_cloudwatch": false, + "datasource_docker": false, + "datasource_file": false, + "datasource_journalctl": false, + "datasource_k8s-audit": false, + "datasource_kafka": false, + "datasource_kinesis": false, + "datasource_loki": false, + "datasource_s3": false, + "datasource_syslog": false, + "datasource_wineventlog": false, + "datasource_victorialogs": false, + "datasource_http": false, + "cscli_setup": false, } func Register(name string) { diff --git a/pkg/cwversion/version.go b/pkg/cwversion/version.go index 2cb7de13e18..87d855444e7 100644 --- a/pkg/cwversion/version.go +++ b/pkg/cwversion/version.go @@ -2,6 +2,7 @@ package cwversion import ( "fmt" + "regexp" "strings" "github.com/crowdsecurity/go-cs-lib/maptools" @@ -57,10 +58,19 @@ func FullString() string { return ret } -// VersionStrip remove the tag from the version string, used to match with a hub branch -func VersionStrip() string { - ret := strings.Split(version.Version, "~") - ret = strings.Split(ret[0], "-") +// StripTags removes any tag (-rc, ~foo3, .r1, etc) from a version string +func StripTags(version string) string { + reVersion := regexp.MustCompile(`^v(\d+)\.(\d+)\.(\d+)`) + ret := reVersion.FindStringSubmatch(version) + + if len(ret) == 0 { + return version + } return ret[0] } + +// BaseVersion returns the version number used to match a hub branch. +func BaseVersion() string { + return StripTags(version.Version) +} diff --git a/pkg/cwversion/version_test.go b/pkg/cwversion/version_test.go new file mode 100644 index 00000000000..13293d4a479 --- /dev/null +++ b/pkg/cwversion/version_test.go @@ -0,0 +1,68 @@ +package cwversion + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +func TestStripTags(t *testing.T) { + tests := []struct { + name string + input string + want string + }{ + { + name: "no tag, valid version v1.2.3", + input: "v1.2.3", + want: "v1.2.3", + }, + { + name: "tag appended with dash", + input: "v1.2.3-rc1", + want: "v1.2.3", + }, + { + name: "tag appended with tilde", + input: "v1.2.3~foo3", + want: "v1.2.3", + }, + { + name: "tag appended with dot", + input: "v1.2.3.r1", + want: "v1.2.3", + }, + { + name: "tag appended directly", + input: "v1.2.3r1", + want: "v1.2.3", + }, + { + name: "multiple digits in version", + input: "v10.20.30-rc2", + want: "v10.20.30", + }, + { + name: "invalid version (no 'v' prefix)", + input: "1.2.3-tag", + want: "1.2.3-tag", + }, + { + name: "random string", + input: "some-random-string", + want: "some-random-string", + }, + { + name: "freebsd pre-release", + input: "v1.6.5.r1", + want: "v1.6.5", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got := StripTags(tt.input) + require.Equal(t, tt.want, got) + }) + } +} diff --git a/pkg/database/alertfilter.go b/pkg/database/alertfilter.go new file mode 100644 index 00000000000..9e8cf53a450 --- /dev/null +++ b/pkg/database/alertfilter.go @@ -0,0 +1,258 @@ +package database + +import ( + "fmt" + "strconv" + "strings" + "time" + + "github.com/pkg/errors" + log "github.com/sirupsen/logrus" + + "github.com/crowdsecurity/crowdsec/pkg/database/ent" + "github.com/crowdsecurity/crowdsec/pkg/database/ent/alert" + "github.com/crowdsecurity/crowdsec/pkg/database/ent/decision" + "github.com/crowdsecurity/crowdsec/pkg/database/ent/predicate" + "github.com/crowdsecurity/crowdsec/pkg/types" +) + +func handleSimulatedFilter(filter map[string][]string, predicates *[]predicate.Alert) { + /* the simulated filter is a bit different : if it's not present *or* set to false, specifically exclude records with simulated to true */ + if v, ok := filter["simulated"]; ok && v[0] == "false" { + *predicates = append(*predicates, alert.SimulatedEQ(false)) + } +} + +func handleOriginFilter(filter map[string][]string, predicates *[]predicate.Alert) { + if _, ok := filter["origin"]; ok { + filter["include_capi"] = []string{"true"} + } +} + +func handleScopeFilter(scope string, predicates *[]predicate.Alert) { + if strings.ToLower(scope) == "ip" { + scope = types.Ip + } else if strings.ToLower(scope) == "range" { + scope = types.Range + } + + *predicates = append(*predicates, alert.SourceScopeEQ(scope)) +} + +func handleTimeFilters(param, value string, predicates *[]predicate.Alert) error { + duration, err := ParseDuration(value) + if err != nil { + return fmt.Errorf("while parsing duration: %w", err) + } + + timePoint := time.Now().UTC().Add(-duration) + if timePoint.IsZero() { + return fmt.Errorf("empty time now() - %s", timePoint.String()) + } + + switch param { + case "since": + *predicates = append(*predicates, alert.StartedAtGTE(timePoint)) + case "created_before": + *predicates = append(*predicates, alert.CreatedAtLTE(timePoint)) + case "until": + *predicates = append(*predicates, alert.StartedAtLTE(timePoint)) + } + + return nil +} + +func handleIPv4Predicates(ip_sz int, contains bool, start_ip, start_sfx, end_ip, end_sfx int64, predicates *[]predicate.Alert) { + if contains { // decision contains {start_ip,end_ip} + *predicates = append(*predicates, alert.And( + alert.HasDecisionsWith(decision.StartIPLTE(start_ip)), + alert.HasDecisionsWith(decision.EndIPGTE(end_ip)), + alert.HasDecisionsWith(decision.IPSizeEQ(int64(ip_sz))), + )) + } else { // decision is contained within {start_ip,end_ip} + *predicates = append(*predicates, alert.And( + alert.HasDecisionsWith(decision.StartIPGTE(start_ip)), + alert.HasDecisionsWith(decision.EndIPLTE(end_ip)), + alert.HasDecisionsWith(decision.IPSizeEQ(int64(ip_sz))), + )) + } +} + +func handleIPv6Predicates(ip_sz int, contains bool, start_ip, start_sfx, end_ip, end_sfx int64, predicates *[]predicate.Alert) { + if contains { // decision contains {start_ip,end_ip} + *predicates = append(*predicates, alert.And( + // matching addr size + alert.HasDecisionsWith(decision.IPSizeEQ(int64(ip_sz))), + alert.Or( + // decision.start_ip < query.start_ip + alert.HasDecisionsWith(decision.StartIPLT(start_ip)), + alert.And( + // decision.start_ip == query.start_ip + alert.HasDecisionsWith(decision.StartIPEQ(start_ip)), + // decision.start_suffix <= query.start_suffix + alert.HasDecisionsWith(decision.StartSuffixLTE(start_sfx)), + ), + ), + alert.Or( + // decision.end_ip > query.end_ip + alert.HasDecisionsWith(decision.EndIPGT(end_ip)), + alert.And( + // decision.end_ip == query.end_ip + alert.HasDecisionsWith(decision.EndIPEQ(end_ip)), + // decision.end_suffix >= query.end_suffix + alert.HasDecisionsWith(decision.EndSuffixGTE(end_sfx)), + ), + ), + )) + } else { // decision is contained within {start_ip,end_ip} + *predicates = append(*predicates, alert.And( + // matching addr size + alert.HasDecisionsWith(decision.IPSizeEQ(int64(ip_sz))), + alert.Or( + // decision.start_ip > query.start_ip + alert.HasDecisionsWith(decision.StartIPGT(start_ip)), + alert.And( + // decision.start_ip == query.start_ip + alert.HasDecisionsWith(decision.StartIPEQ(start_ip)), + // decision.start_suffix >= query.start_suffix + alert.HasDecisionsWith(decision.StartSuffixGTE(start_sfx)), + ), + ), + alert.Or( + // decision.end_ip < query.end_ip + alert.HasDecisionsWith(decision.EndIPLT(end_ip)), + alert.And( + // decision.end_ip == query.end_ip + alert.HasDecisionsWith(decision.EndIPEQ(end_ip)), + // decision.end_suffix <= query.end_suffix + alert.HasDecisionsWith(decision.EndSuffixLTE(end_sfx)), + ), + ), + )) + } +} + +func handleIPPredicates(ip_sz int, contains bool, start_ip, start_sfx, end_ip, end_sfx int64, predicates *[]predicate.Alert) error { + if ip_sz == 4 { + handleIPv4Predicates(ip_sz, contains, start_ip, start_sfx, end_ip, end_sfx, predicates) + } else if ip_sz == 16 { + handleIPv6Predicates(ip_sz, contains, start_ip, start_sfx, end_ip, end_sfx, predicates) + } else if ip_sz != 0 { + return errors.Wrapf(InvalidFilter, "Unknown ip size %d", ip_sz) + } + + return nil +} + +func handleIncludeCapiFilter(value string, predicates *[]predicate.Alert) error { + if value == "false" { + *predicates = append(*predicates, alert.And( + // do not show alerts with active decisions having origin CAPI or lists + alert.And( + alert.Not(alert.HasDecisionsWith(decision.OriginEQ(types.CAPIOrigin))), + alert.Not(alert.HasDecisionsWith(decision.OriginEQ(types.ListOrigin))), + ), + alert.Not( + alert.And( + // do not show neither alerts with no decisions if the Source Scope is lists: or CAPI + alert.Not(alert.HasDecisions()), + alert.Or( + alert.SourceScopeHasPrefix(types.ListOrigin+":"), + alert.SourceScopeEQ(types.CommunityBlocklistPullSourceScope), + ), + ), + ), + )) + } else if value != "true" { + log.Errorf("invalid bool '%s' for include_capi", value) + } + + return nil +} + +func AlertPredicatesFromFilter(filter map[string][]string) ([]predicate.Alert, error) { + predicates := make([]predicate.Alert, 0) + + var ( + err error + start_ip, start_sfx, end_ip, end_sfx int64 + hasActiveDecision bool + ip_sz int + ) + + contains := true + + /*if contains is true, return bans that *contains* the given value (value is the inner) + else, return bans that are *contained* by the given value (value is the outer)*/ + + handleSimulatedFilter(filter, &predicates) + handleOriginFilter(filter, &predicates) + + for param, value := range filter { + switch param { + case "contains": + contains, err = strconv.ParseBool(value[0]) + if err != nil { + return nil, errors.Wrapf(InvalidFilter, "invalid contains value : %s", err) + } + case "scope": + handleScopeFilter(value[0], &predicates) + case "value": + predicates = append(predicates, alert.SourceValueEQ(value[0])) + case "scenario": + predicates = append(predicates, alert.HasDecisionsWith(decision.ScenarioEQ(value[0]))) + case "ip", "range": + ip_sz, start_ip, start_sfx, end_ip, end_sfx, err = types.Addr2Ints(value[0]) + if err != nil { + return nil, err + } + case "since", "created_before", "until": + if err := handleTimeFilters(param, value[0], &predicates); err != nil { + return nil, err + } + case "decision_type": + predicates = append(predicates, alert.HasDecisionsWith(decision.TypeEQ(value[0]))) + case "origin": + predicates = append(predicates, alert.HasDecisionsWith(decision.OriginEQ(value[0]))) + case "include_capi": // allows to exclude one or more specific origins + if err = handleIncludeCapiFilter(value[0], &predicates); err != nil { + return nil, err + } + case "has_active_decision": + if hasActiveDecision, err = strconv.ParseBool(value[0]); err != nil { + return nil, errors.Wrapf(ParseType, "'%s' is not a boolean: %s", value[0], err) + } + + if hasActiveDecision { + predicates = append(predicates, alert.HasDecisionsWith(decision.UntilGTE(time.Now().UTC()))) + } else { + predicates = append(predicates, alert.Not(alert.HasDecisions())) + } + case "limit": + continue + case "sort": + continue + case "simulated": + continue + case "with_decisions": + continue + default: + return nil, errors.Wrapf(InvalidFilter, "Filter parameter '%s' is unknown (=%s)", param, value[0]) + } + } + + if err := handleIPPredicates(ip_sz, contains, start_ip, start_sfx, end_ip, end_sfx, &predicates); err != nil { + return nil, err + } + + return predicates, nil +} + +func BuildAlertRequestFromFilter(alerts *ent.AlertQuery, filter map[string][]string) (*ent.AlertQuery, error) { + preds, err := AlertPredicatesFromFilter(filter) + if err != nil { + return nil, err + } + + return alerts.Where(preds...), nil +} diff --git a/pkg/database/alerts.go b/pkg/database/alerts.go index ede9c89fe9a..107abcbb1d0 100644 --- a/pkg/database/alerts.go +++ b/pkg/database/alerts.go @@ -20,7 +20,6 @@ import ( "github.com/crowdsecurity/crowdsec/pkg/database/ent/decision" "github.com/crowdsecurity/crowdsec/pkg/database/ent/event" "github.com/crowdsecurity/crowdsec/pkg/database/ent/meta" - "github.com/crowdsecurity/crowdsec/pkg/database/ent/predicate" "github.com/crowdsecurity/crowdsec/pkg/models" "github.com/crowdsecurity/crowdsec/pkg/types" ) @@ -32,6 +31,14 @@ const ( maxLockRetries = 10 // how many times to retry a bulk operation when sqlite3.ErrBusy is encountered ) +func rollbackOnError(tx *ent.Tx, err error, msg string) error { + if rbErr := tx.Rollback(); rbErr != nil { + log.Errorf("rollback error: %v", rbErr) + } + + return fmt.Errorf("%s: %w", msg, err) +} + // CreateOrUpdateAlert is specific to PAPI : It checks if alert already exists, otherwise inserts it // if alert already exists, it checks it associated decisions already exists // if some associated decisions are missing (ie. previous insert ended up in error) it inserts them @@ -285,12 +292,7 @@ func (c *Client) UpdateCommunityBlocklist(ctx context.Context, alertItem *models duration, err := time.ParseDuration(*decisionItem.Duration) if err != nil { - rollbackErr := txClient.Rollback() - if rollbackErr != nil { - log.Errorf("rollback error: %s", rollbackErr) - } - - return 0, 0, 0, errors.Wrapf(ParseDurationFail, "decision duration '%+v' : %s", *decisionItem.Duration, err) + return 0,0,0, rollbackOnError(txClient, err, "parsing decision duration") } if decisionItem.Scope == nil { @@ -302,12 +304,7 @@ func (c *Client) UpdateCommunityBlocklist(ctx context.Context, alertItem *models if strings.ToLower(*decisionItem.Scope) == "ip" || strings.ToLower(*decisionItem.Scope) == "range" { sz, start_ip, start_sfx, end_ip, end_sfx, err = types.Addr2Ints(*decisionItem.Value) if err != nil { - rollbackErr := txClient.Rollback() - if rollbackErr != nil { - log.Errorf("rollback error: %s", rollbackErr) - } - - return 0, 0, 0, errors.Wrapf(InvalidIPOrRange, "invalid addr/range %s : %s", *decisionItem.Value, err) + return 0, 0, 0, rollbackOnError(txClient, err, "invalid ip addr/range") } } @@ -349,12 +346,7 @@ func (c *Client) UpdateCommunityBlocklist(ctx context.Context, alertItem *models decision.ValueIn(deleteChunk...), )).Exec(ctx) if err != nil { - rollbackErr := txClient.Rollback() - if rollbackErr != nil { - log.Errorf("rollback error: %s", rollbackErr) - } - - return 0, 0, 0, fmt.Errorf("while deleting older community blocklist decisions: %w", err) + return 0, 0, 0, rollbackOnError(txClient, err, "deleting older community blocklist decisions") } deleted += deletedDecisions @@ -365,12 +357,7 @@ func (c *Client) UpdateCommunityBlocklist(ctx context.Context, alertItem *models for _, builderChunk := range builderChunks { insertedDecisions, err := txClient.Decision.CreateBulk(builderChunk...).Save(ctx) if err != nil { - rollbackErr := txClient.Rollback() - if rollbackErr != nil { - log.Errorf("rollback error: %s", rollbackErr) - } - - return 0, 0, 0, fmt.Errorf("while bulk creating decisions: %w", err) + return 0, 0, 0, rollbackOnError(txClient, err, "bulk creating decisions") } inserted += len(insertedDecisions) @@ -380,12 +367,7 @@ func (c *Client) UpdateCommunityBlocklist(ctx context.Context, alertItem *models err = txClient.Commit() if err != nil { - rollbackErr := txClient.Rollback() - if rollbackErr != nil { - log.Errorf("rollback error: %s", rollbackErr) - } - - return 0, 0, 0, fmt.Errorf("error committing transaction: %w", err) + return 0, 0, 0, rollbackOnError(txClient, err, "error committing transaction") } return alertRef.ID, inserted, deleted, nil @@ -660,7 +642,8 @@ func (c *Client) createAlertChunk(ctx context.Context, machineID string, owner * break } - if sqliteErr, ok := err.(sqlite3.Error); ok { + var sqliteErr sqlite3.Error + if errors.As(err, &sqliteErr) { if sqliteErr.Code == sqlite3.ErrBusy { // sqlite3.Error{ // Code: 5, @@ -727,247 +710,6 @@ func (c *Client) CreateAlert(ctx context.Context, machineID string, alertList [] return alertIDs, nil } -func handleSimulatedFilter(filter map[string][]string, predicates *[]predicate.Alert) { - /* the simulated filter is a bit different : if it's not present *or* set to false, specifically exclude records with simulated to true */ - if v, ok := filter["simulated"]; ok && v[0] == "false" { - *predicates = append(*predicates, alert.SimulatedEQ(false)) - } -} - -func handleOriginFilter(filter map[string][]string, predicates *[]predicate.Alert) { - if _, ok := filter["origin"]; ok { - filter["include_capi"] = []string{"true"} - } -} - -func handleScopeFilter(scope string, predicates *[]predicate.Alert) { - if strings.ToLower(scope) == "ip" { - scope = types.Ip - } else if strings.ToLower(scope) == "range" { - scope = types.Range - } - - *predicates = append(*predicates, alert.SourceScopeEQ(scope)) -} - -func handleTimeFilters(param, value string, predicates *[]predicate.Alert) error { - duration, err := ParseDuration(value) - if err != nil { - return fmt.Errorf("while parsing duration: %w", err) - } - - timePoint := time.Now().UTC().Add(-duration) - if timePoint.IsZero() { - return fmt.Errorf("empty time now() - %s", timePoint.String()) - } - - switch param { - case "since": - *predicates = append(*predicates, alert.StartedAtGTE(timePoint)) - case "created_before": - *predicates = append(*predicates, alert.CreatedAtLTE(timePoint)) - case "until": - *predicates = append(*predicates, alert.StartedAtLTE(timePoint)) - } - - return nil -} - -func handleIPv4Predicates(ip_sz int, contains bool, start_ip, start_sfx, end_ip, end_sfx int64, predicates *[]predicate.Alert) { - if contains { // decision contains {start_ip,end_ip} - *predicates = append(*predicates, alert.And( - alert.HasDecisionsWith(decision.StartIPLTE(start_ip)), - alert.HasDecisionsWith(decision.EndIPGTE(end_ip)), - alert.HasDecisionsWith(decision.IPSizeEQ(int64(ip_sz))), - )) - } else { // decision is contained within {start_ip,end_ip} - *predicates = append(*predicates, alert.And( - alert.HasDecisionsWith(decision.StartIPGTE(start_ip)), - alert.HasDecisionsWith(decision.EndIPLTE(end_ip)), - alert.HasDecisionsWith(decision.IPSizeEQ(int64(ip_sz))), - )) - } -} - -func handleIPv6Predicates(ip_sz int, contains bool, start_ip, start_sfx, end_ip, end_sfx int64, predicates *[]predicate.Alert) { - if contains { // decision contains {start_ip,end_ip} - *predicates = append(*predicates, alert.And( - // matching addr size - alert.HasDecisionsWith(decision.IPSizeEQ(int64(ip_sz))), - alert.Or( - // decision.start_ip < query.start_ip - alert.HasDecisionsWith(decision.StartIPLT(start_ip)), - alert.And( - // decision.start_ip == query.start_ip - alert.HasDecisionsWith(decision.StartIPEQ(start_ip)), - // decision.start_suffix <= query.start_suffix - alert.HasDecisionsWith(decision.StartSuffixLTE(start_sfx)), - ), - ), - alert.Or( - // decision.end_ip > query.end_ip - alert.HasDecisionsWith(decision.EndIPGT(end_ip)), - alert.And( - // decision.end_ip == query.end_ip - alert.HasDecisionsWith(decision.EndIPEQ(end_ip)), - // decision.end_suffix >= query.end_suffix - alert.HasDecisionsWith(decision.EndSuffixGTE(end_sfx)), - ), - ), - )) - } else { // decision is contained within {start_ip,end_ip} - *predicates = append(*predicates, alert.And( - // matching addr size - alert.HasDecisionsWith(decision.IPSizeEQ(int64(ip_sz))), - alert.Or( - // decision.start_ip > query.start_ip - alert.HasDecisionsWith(decision.StartIPGT(start_ip)), - alert.And( - // decision.start_ip == query.start_ip - alert.HasDecisionsWith(decision.StartIPEQ(start_ip)), - // decision.start_suffix >= query.start_suffix - alert.HasDecisionsWith(decision.StartSuffixGTE(start_sfx)), - ), - ), - alert.Or( - // decision.end_ip < query.end_ip - alert.HasDecisionsWith(decision.EndIPLT(end_ip)), - alert.And( - // decision.end_ip == query.end_ip - alert.HasDecisionsWith(decision.EndIPEQ(end_ip)), - // decision.end_suffix <= query.end_suffix - alert.HasDecisionsWith(decision.EndSuffixLTE(end_sfx)), - ), - ), - )) - } -} - -func handleIPPredicates(ip_sz int, contains bool, start_ip, start_sfx, end_ip, end_sfx int64, predicates *[]predicate.Alert) error { - if ip_sz == 4 { - handleIPv4Predicates(ip_sz, contains, start_ip, start_sfx, end_ip, end_sfx, predicates) - } else if ip_sz == 16 { - handleIPv6Predicates(ip_sz, contains, start_ip, start_sfx, end_ip, end_sfx, predicates) - } else if ip_sz != 0 { - return errors.Wrapf(InvalidFilter, "Unknown ip size %d", ip_sz) - } - - return nil -} - -func handleIncludeCapiFilter(value string, predicates *[]predicate.Alert) error { - if value == "false" { - *predicates = append(*predicates, alert.And( - // do not show alerts with active decisions having origin CAPI or lists - alert.And( - alert.Not(alert.HasDecisionsWith(decision.OriginEQ(types.CAPIOrigin))), - alert.Not(alert.HasDecisionsWith(decision.OriginEQ(types.ListOrigin))), - ), - alert.Not( - alert.And( - // do not show neither alerts with no decisions if the Source Scope is lists: or CAPI - alert.Not(alert.HasDecisions()), - alert.Or( - alert.SourceScopeHasPrefix(types.ListOrigin+":"), - alert.SourceScopeEQ(types.CommunityBlocklistPullSourceScope), - ), - ), - ), - )) - } else if value != "true" { - log.Errorf("invalid bool '%s' for include_capi", value) - } - - return nil -} - -func AlertPredicatesFromFilter(filter map[string][]string) ([]predicate.Alert, error) { - predicates := make([]predicate.Alert, 0) - - var ( - err error - start_ip, start_sfx, end_ip, end_sfx int64 - hasActiveDecision bool - ip_sz int - ) - - contains := true - - /*if contains is true, return bans that *contains* the given value (value is the inner) - else, return bans that are *contained* by the given value (value is the outer)*/ - - handleSimulatedFilter(filter, &predicates) - handleOriginFilter(filter, &predicates) - - for param, value := range filter { - switch param { - case "contains": - contains, err = strconv.ParseBool(value[0]) - if err != nil { - return nil, errors.Wrapf(InvalidFilter, "invalid contains value : %s", err) - } - case "scope": - handleScopeFilter(value[0], &predicates) - case "value": - predicates = append(predicates, alert.SourceValueEQ(value[0])) - case "scenario": - predicates = append(predicates, alert.HasDecisionsWith(decision.ScenarioEQ(value[0]))) - case "ip", "range": - ip_sz, start_ip, start_sfx, end_ip, end_sfx, err = types.Addr2Ints(value[0]) - if err != nil { - return nil, errors.Wrapf(InvalidIPOrRange, "unable to convert '%s' to int: %s", value[0], err) - } - case "since", "created_before", "until": - if err := handleTimeFilters(param, value[0], &predicates); err != nil { - return nil, err - } - case "decision_type": - predicates = append(predicates, alert.HasDecisionsWith(decision.TypeEQ(value[0]))) - case "origin": - predicates = append(predicates, alert.HasDecisionsWith(decision.OriginEQ(value[0]))) - case "include_capi": // allows to exclude one or more specific origins - if err = handleIncludeCapiFilter(value[0], &predicates); err != nil { - return nil, err - } - case "has_active_decision": - if hasActiveDecision, err = strconv.ParseBool(value[0]); err != nil { - return nil, errors.Wrapf(ParseType, "'%s' is not a boolean: %s", value[0], err) - } - - if hasActiveDecision { - predicates = append(predicates, alert.HasDecisionsWith(decision.UntilGTE(time.Now().UTC()))) - } else { - predicates = append(predicates, alert.Not(alert.HasDecisions())) - } - case "limit": - continue - case "sort": - continue - case "simulated": - continue - case "with_decisions": - continue - default: - return nil, errors.Wrapf(InvalidFilter, "Filter parameter '%s' is unknown (=%s)", param, value[0]) - } - } - - if err := handleIPPredicates(ip_sz, contains, start_ip, start_sfx, end_ip, end_sfx, &predicates); err != nil { - return nil, err - } - - return predicates, nil -} - -func BuildAlertRequestFromFilter(alerts *ent.AlertQuery, filter map[string][]string) (*ent.AlertQuery, error) { - preds, err := AlertPredicatesFromFilter(filter) - if err != nil { - return nil, err - } - - return alerts.Where(preds...), nil -} - func (c *Client) AlertsCountPerScenario(ctx context.Context, filters map[string][]string) (map[string]int, error) { var res []struct { Scenario string diff --git a/pkg/database/database.go b/pkg/database/database.go index bb41dd3b645..80479710751 100644 --- a/pkg/database/database.go +++ b/pkg/database/database.go @@ -68,7 +68,7 @@ func NewClient(ctx context.Context, config *csconfig.DatabaseCfg) (*Client, erro return nil, err // unsupported database caught here } - if config.Type == "sqlite" { + if config.Type == "sqlite" && config.DbPath != ":memory:" { /*if it's the first startup, we want to touch and chmod file*/ if _, err = os.Stat(config.DbPath); os.IsNotExist(err) { f, err := os.OpenFile(config.DbPath, os.O_CREATE|os.O_RDWR, 0o600) diff --git a/pkg/database/errors.go b/pkg/database/errors.go index 77f92707e51..e0223be95b8 100644 --- a/pkg/database/errors.go +++ b/pkg/database/errors.go @@ -14,7 +14,6 @@ var ( ParseTimeFail = errors.New("unable to parse time") ParseDurationFail = errors.New("unable to parse duration") MarshalFail = errors.New("unable to serialize") - UnmarshalFail = errors.New("unable to parse") BulkError = errors.New("unable to insert bulk") ParseType = errors.New("unable to parse type") InvalidIPOrRange = errors.New("invalid ip address / range") diff --git a/pkg/database/flush.go b/pkg/database/flush.go index 8f646ddc961..4a3a93a406c 100644 --- a/pkg/database/flush.go +++ b/pkg/database/flush.go @@ -222,7 +222,7 @@ func (c *Client) FlushAgentsAndBouncers(ctx context.Context, agentsCfg *csconfig return nil } -func (c *Client) FlushAlerts(ctx context.Context, MaxAge string, MaxItems int) error { +func (c *Client) FlushAlerts(ctx context.Context, maxAge string, maxItems int) error { var ( deletedByAge int deletedByNbItem int @@ -247,22 +247,22 @@ func (c *Client) FlushAlerts(ctx context.Context, MaxAge string, MaxItems int) e c.Log.Debugf("FlushAlerts (Total alerts): %d", totalAlerts) - if MaxAge != "" { + if maxAge != "" { filter := map[string][]string{ - "created_before": {MaxAge}, + "created_before": {maxAge}, } nbDeleted, err := c.DeleteAlertWithFilter(ctx, filter) if err != nil { c.Log.Warningf("FlushAlerts (max age): %s", err) - return fmt.Errorf("unable to flush alerts with filter until=%s: %w", MaxAge, err) + return fmt.Errorf("unable to flush alerts with filter until=%s: %w", maxAge, err) } c.Log.Debugf("FlushAlerts (deleted max age alerts): %d", nbDeleted) deletedByAge = nbDeleted } - if MaxItems > 0 { + if maxItems > 0 { // We get the highest id for the alerts // We subtract MaxItems to avoid deleting alerts that are not old enough // This gives us the oldest alert that we want to keep @@ -282,7 +282,7 @@ func (c *Client) FlushAlerts(ctx context.Context, MaxAge string, MaxItems int) e } if len(lastAlert) != 0 { - maxid := lastAlert[0].ID - MaxItems + maxid := lastAlert[0].ID - maxItems c.Log.Debugf("FlushAlerts (max id): %d", maxid) @@ -299,12 +299,12 @@ func (c *Client) FlushAlerts(ctx context.Context, MaxAge string, MaxItems int) e if deletedByNbItem > 0 { c.Log.Infof("flushed %d/%d alerts because the max number of alerts has been reached (%d max)", - deletedByNbItem, totalAlerts, MaxItems) + deletedByNbItem, totalAlerts, maxItems) } if deletedByAge > 0 { c.Log.Infof("flushed %d/%d alerts because they were created %s ago or more", - deletedByAge, totalAlerts, MaxAge) + deletedByAge, totalAlerts, maxAge) } return nil diff --git a/pkg/database/machines.go b/pkg/database/machines.go index d8c02825312..1293633ed9e 100644 --- a/pkg/database/machines.go +++ b/pkg/database/machines.go @@ -34,14 +34,6 @@ func (c *Client) MachineUpdateBaseMetrics(ctx context.Context, machineID string, os := baseMetrics.Os features := strings.Join(baseMetrics.FeatureFlags, ",") - var heartbeat time.Time - - if len(baseMetrics.Metrics) == 0 { - heartbeat = time.Now().UTC() - } else { - heartbeat = time.Unix(*baseMetrics.Metrics[0].Meta.UtcNowTimestamp, 0) - } - hubState := map[string][]schema.ItemState{} for itemType, items := range hubItems { hubState[itemType] = []schema.ItemState{} @@ -61,7 +53,6 @@ func (c *Client) MachineUpdateBaseMetrics(ctx context.Context, machineID string, SetOsname(*os.Name). SetOsversion(*os.Version). SetFeatureflags(features). - SetLastHeartbeat(heartbeat). SetHubstate(hubState). SetDatasources(datasources). Save(ctx) diff --git a/pkg/dumps/parser_dump.go b/pkg/dumps/parser_dump.go index bc8f78dc203..bd385bec194 100644 --- a/pkg/dumps/parser_dump.go +++ b/pkg/dumps/parser_dump.go @@ -145,25 +145,25 @@ func (t *tree) processEvents(parserResults ParserResults) { } func (t *tree) processBuckets(bucketPour BucketPourInfo) { - for bname, evtlist := range bucketPour { - for _, evt := range evtlist { - if evt.Line.Raw == "" { + for bname, events := range bucketPour { + for i := range events { + if events[i].Line.Raw == "" { continue } // it might be bucket overflow being reprocessed, skip this - if _, ok := t.state[evt.Line.Time]; !ok { - t.state[evt.Line.Time] = make(map[string]map[string]ParserResult) - t.assoc[evt.Line.Time] = evt.Line.Raw + if _, ok := t.state[events[i].Line.Time]; !ok { + t.state[events[i].Line.Time] = make(map[string]map[string]ParserResult) + t.assoc[events[i].Line.Time] = events[i].Line.Raw } - // there is a trick : to know if an event successfully exit the parsers, we check if it reached the pour() phase + // there is a trick: to know if an event successfully exit the parsers, we check if it reached the pour() phase // we thus use a fake stage "buckets" and a fake parser "OK" to know if it entered - if _, ok := t.state[evt.Line.Time]["buckets"]; !ok { - t.state[evt.Line.Time]["buckets"] = make(map[string]ParserResult) + if _, ok := t.state[events[i].Line.Time]["buckets"]; !ok { + t.state[events[i].Line.Time]["buckets"] = make(map[string]ParserResult) } - t.state[evt.Line.Time]["buckets"][bname] = ParserResult{Success: true} + t.state[events[i].Line.Time]["buckets"][bname] = ParserResult{Success: true} } } } diff --git a/pkg/emoji/emoji.go b/pkg/emoji/emoji.go index 51295a85411..9b939249bf0 100644 --- a/pkg/emoji/emoji.go +++ b/pkg/emoji/emoji.go @@ -11,4 +11,8 @@ const ( QuestionMark = "\u2753" // ❓ RedCircle = "\U0001f534" // 🔴 Warning = "\u26a0\ufe0f" // ⚠️ + InboxTray = "\U0001f4e5" // 📥 + DownArrow = "\u2b07" // ⬇️ + Wastebasket = "\U0001f5d1" // 🗑 + Sync = "\U0001F504" // 🔄 official name is Anticlockwise Downwards and Upwards Open Circle Arrows and I'm not even joking ) diff --git a/pkg/exprhelpers/crowdsec_cti.go b/pkg/exprhelpers/crowdsec_cti.go index ccd67b27a49..900bd7824a8 100644 --- a/pkg/exprhelpers/crowdsec_cti.go +++ b/pkg/exprhelpers/crowdsec_cti.go @@ -12,42 +12,46 @@ import ( "github.com/crowdsecurity/crowdsec/pkg/types" ) -var CTIUrl = "https://cti.api.crowdsec.net" -var CTIUrlSuffix = "/v2/smoke/" -var CTIApiKey = "" +var ( + CTIUrl = "https://cti.api.crowdsec.net" + CTIUrlSuffix = "/v2/smoke/" + CTIApiKey = "" +) // this is set for non-recoverable errors, such as 403 when querying API or empty API key var CTIApiEnabled = false // when hitting quotas or auth errors, we temporarily disable the API -var CTIBackOffUntil time.Time -var CTIBackOffDuration = 5 * time.Minute +var ( + CTIBackOffUntil time.Time + CTIBackOffDuration = 5 * time.Minute +) var ctiClient *cticlient.CrowdsecCTIClient -func InitCrowdsecCTI(Key *string, TTL *time.Duration, Size *int, LogLevel *log.Level) error { - if Key == nil || *Key == "" { +func InitCrowdsecCTI(key *string, ttl *time.Duration, size *int, logLevel *log.Level) error { + if key == nil || *key == "" { log.Warningf("CTI API key not set or empty, CTI will not be available") return cticlient.ErrDisabled } - CTIApiKey = *Key - if Size == nil { - Size = new(int) - *Size = 1000 + CTIApiKey = *key + if size == nil { + size = new(int) + *size = 1000 } - if TTL == nil { - TTL = new(time.Duration) - *TTL = 5 * time.Minute + if ttl == nil { + ttl = new(time.Duration) + *ttl = 5 * time.Minute } clog := log.New() if err := types.ConfigureLogger(clog); err != nil { return fmt.Errorf("while configuring datasource logger: %w", err) } - if LogLevel != nil { - clog.SetLevel(*LogLevel) + if logLevel != nil { + clog.SetLevel(*logLevel) } subLogger := clog.WithField("type", "crowdsec-cti") - CrowdsecCTIInitCache(*Size, *TTL) + CrowdsecCTIInitCache(*size, *ttl) ctiClient = cticlient.NewCrowdsecCTIClient(cticlient.WithAPIKey(CTIApiKey), cticlient.WithLogger(subLogger)) CTIApiEnabled = true return nil @@ -62,8 +66,10 @@ func ShutdownCrowdsecCTI() { } // Cache for responses -var CTICache gcache.Cache -var CacheExpiration time.Duration +var ( + CTICache gcache.Cache + CacheExpiration time.Duration +) func CrowdsecCTIInitCache(size int, ttl time.Duration) { CTICache = gcache.New(size).LRU().Build() diff --git a/pkg/exprhelpers/debugger.go b/pkg/exprhelpers/debugger.go index 2e47af6d1de..d44b8fc97e1 100644 --- a/pkg/exprhelpers/debugger.go +++ b/pkg/exprhelpers/debugger.go @@ -21,35 +21,35 @@ var IndentStep = 4 // we use this struct to store the output of the expr runtime type OpOutput struct { - Code string //relevant code part + Code string // relevant code part - CodeDepth int //level of nesting + CodeDepth int // level of nesting BlockStart bool BlockEnd bool - Func bool //true if it's a function call + Func bool // true if it's a function call FuncName string Args []string FuncResults []string // - Comparison bool //true if it's a comparison + Comparison bool // true if it's a comparison Negated bool Left string Right string // - JumpIf bool //true if it's conditional jump + JumpIf bool // true if it's conditional jump IfTrue bool IfFalse bool // - Condition bool //true if it's a condition + Condition bool // true if it's a condition ConditionIn bool ConditionContains bool - //used for comparisons, conditional jumps and conditions + // used for comparisons, conditional jumps and conditions StrConditionResult string - ConditionResult *bool //should always be present for conditions + ConditionResult *bool // should always be present for conditions // - Finalized bool //used when a node is finalized, we already fetched result from next OP + Finalized bool // used when a node is finalized, we already fetched result from next OP } func (o *OpOutput) String() string { @@ -57,6 +57,7 @@ func (o *OpOutput) String() string { if o.Code != "" { ret += fmt.Sprintf("[%s]", o.Code) } + ret += " " switch { @@ -68,19 +69,24 @@ func (o *OpOutput) String() string { if indent < 0 { indent = 0 } + ret = fmt.Sprintf("%*cBLOCK_END [%s]", indent, ' ', o.Code) + if o.StrConditionResult != "" { ret += fmt.Sprintf(" -> %s", o.StrConditionResult) } + return ret - //A block end can carry a value, for example if it's a count, any, all etc. XXX + // A block end can carry a value, for example if it's a count, any, all etc. XXX case o.Func: return ret + fmt.Sprintf("%s(%s) = %s", o.FuncName, strings.Join(o.Args, ", "), strings.Join(o.FuncResults, ", ")) case o.Comparison: if o.Negated { ret += "NOT " } + ret += fmt.Sprintf("%s == %s -> %s", o.Left, o.Right, o.StrConditionResult) + return ret case o.ConditionIn: return ret + fmt.Sprintf("%s in %s -> %s", o.Args[0], o.Args[1], o.StrConditionResult) @@ -91,18 +97,23 @@ func (o *OpOutput) String() string { if *o.ConditionResult { return ret + "OR -> false" } + return ret + "OR -> true" } + return ret + "OR(?)" case o.JumpIf && o.IfFalse: if o.ConditionResult != nil { if *o.ConditionResult { return ret + "AND -> true" } + return ret + "AND -> false" } + return ret + "AND(?)" } + return ret + "" } @@ -135,7 +146,7 @@ func (erp ExprRuntimeDebug) extractCode(ip int, program *vm.Program) string { func autoQuote(v any) string { switch x := v.(type) { case string: - //let's avoid printing long strings. it can happen ie. when we are debugging expr with `File()` or similar helpers + // let's avoid printing long strings. it can happen ie. when we are debugging expr with `File()` or similar helpers if len(x) > 40 { return fmt.Sprintf("%q", x[:40]+"...") } else { @@ -147,35 +158,40 @@ func autoQuote(v any) string { } func (erp ExprRuntimeDebug) ipDebug(ip int, vm *vm.VM, program *vm.Program, parts []string, outputs []OpOutput) ([]OpOutput, error) { - IdxOut := len(outputs) prevIdxOut := 0 currentDepth := 0 - //when there is a function call or comparison, we need to wait for the next instruction to get the result and "finalize" the previous one + // when there is a function call or comparison, we need to wait for the next instruction to get the result and "finalize" the previous one if IdxOut > 0 { prevIdxOut = IdxOut - 1 currentDepth = outputs[prevIdxOut].CodeDepth + if outputs[prevIdxOut].Func && !outputs[prevIdxOut].Finalized { stack := vm.Stack num_items := 1 + for i := len(stack) - 1; i >= 0 && num_items > 0; i-- { outputs[prevIdxOut].FuncResults = append(outputs[prevIdxOut].FuncResults, autoQuote(stack[i])) num_items-- } + outputs[prevIdxOut].Finalized = true } else if (outputs[prevIdxOut].Comparison || outputs[prevIdxOut].Condition) && !outputs[prevIdxOut].Finalized { stack := vm.Stack outputs[prevIdxOut].StrConditionResult = fmt.Sprintf("%+v", stack) + if val, ok := stack[0].(bool); ok { outputs[prevIdxOut].ConditionResult = new(bool) *outputs[prevIdxOut].ConditionResult = val } + outputs[prevIdxOut].Finalized = true } } erp.Logger.Tracef("[STEP %d:%s] (stack:%+v) (parts:%+v) {depth:%d}", ip, parts[1], vm.Stack, parts, currentDepth) + out := OpOutput{} out.CodeDepth = currentDepth out.Code = erp.extractCode(ip, program) @@ -188,27 +204,28 @@ func (erp ExprRuntimeDebug) ipDebug(ip int, vm *vm.VM, program *vm.Program, part case "OpEnd": out.CodeDepth -= IndentStep out.BlockEnd = true - //OpEnd can carry value, if it's any/all/count etc. + // OpEnd can carry value, if it's any/all/count etc. if len(vm.Stack) > 0 { out.StrConditionResult = fmt.Sprintf("%v", vm.Stack) } + outputs = append(outputs, out) case "OpNot": - //negate the previous condition + // negate the previous condition outputs[prevIdxOut].Negated = true - case "OpTrue": //generated when possible ? (1 == 1) + case "OpTrue": // generated when possible ? (1 == 1) out.Condition = true out.ConditionResult = new(bool) *out.ConditionResult = true out.StrConditionResult = "true" outputs = append(outputs, out) - case "OpFalse": //generated when possible ? (1 != 1) + case "OpFalse": // generated when possible ? (1 != 1) out.Condition = true out.ConditionResult = new(bool) *out.ConditionResult = false out.StrConditionResult = "false" outputs = append(outputs, out) - case "OpJumpIfTrue": //OR + case "OpJumpIfTrue": // OR stack := vm.Stack out.JumpIf = true out.IfTrue = true @@ -218,78 +235,88 @@ func (erp ExprRuntimeDebug) ipDebug(ip int, vm *vm.VM, program *vm.Program, part out.ConditionResult = new(bool) *out.ConditionResult = val } + outputs = append(outputs, out) - case "OpJumpIfFalse": //AND + case "OpJumpIfFalse": // AND stack := vm.Stack out.JumpIf = true out.IfFalse = true out.StrConditionResult = fmt.Sprintf("%v", stack[0]) + if val, ok := stack[0].(bool); ok { out.ConditionResult = new(bool) *out.ConditionResult = val } + outputs = append(outputs, out) - case "OpCall1": //Op for function calls + case "OpCall1": // Op for function calls out.Func = true out.FuncName = parts[3] stack := vm.Stack + num_items := 1 for i := len(stack) - 1; i >= 0 && num_items > 0; i-- { out.Args = append(out.Args, autoQuote(stack[i])) num_items-- } + outputs = append(outputs, out) - case "OpCall2": //Op for function calls + case "OpCall2": // Op for function calls out.Func = true out.FuncName = parts[3] stack := vm.Stack + num_items := 2 for i := len(stack) - 1; i >= 0 && num_items > 0; i-- { out.Args = append(out.Args, autoQuote(stack[i])) num_items-- } + outputs = append(outputs, out) - case "OpCall3": //Op for function calls + case "OpCall3": // Op for function calls out.Func = true out.FuncName = parts[3] stack := vm.Stack + num_items := 3 for i := len(stack) - 1; i >= 0 && num_items > 0; i-- { out.Args = append(out.Args, autoQuote(stack[i])) num_items-- } + outputs = append(outputs, out) - //double check OpCallFast and OpCallTyped + // double check OpCallFast and OpCallTyped case "OpCallFast", "OpCallTyped": // - case "OpCallN": //Op for function calls with more than 3 args + case "OpCallN": // Op for function calls with more than 3 args out.Func = true out.FuncName = parts[1] stack := vm.Stack - //for OpCallN, we get the number of args + // for OpCallN, we get the number of args if len(program.Arguments) >= ip { nb_args := program.Arguments[ip] if nb_args > 0 { - //we need to skip the top item on stack + // we need to skip the top item on stack for i := len(stack) - 2; i >= 0 && nb_args > 0; i-- { out.Args = append(out.Args, autoQuote(stack[i])) nb_args-- } } - } else { //let's blindly take the items on stack + } else { // let's blindly take the items on stack for _, val := range vm.Stack { out.Args = append(out.Args, autoQuote(val)) } } + outputs = append(outputs, out) - case "OpEqualString", "OpEqual", "OpEqualInt": //comparisons + case "OpEqualString", "OpEqual", "OpEqualInt": // comparisons stack := vm.Stack out.Comparison = true out.Left = autoQuote(stack[0]) out.Right = autoQuote(stack[1]) outputs = append(outputs, out) - case "OpIn": //in operator + case "OpIn": // in operator stack := vm.Stack out.Condition = true out.ConditionIn = true @@ -299,7 +326,7 @@ func (erp ExprRuntimeDebug) ipDebug(ip int, vm *vm.VM, program *vm.Program, part out.Args = append(out.Args, autoQuote(stack[0])) out.Args = append(out.Args, autoQuote(stack[1])) outputs = append(outputs, out) - case "OpContains": //kind OpIn , but reverse + case "OpContains": // kind OpIn , but reverse stack := vm.Stack out.Condition = true out.ConditionContains = true @@ -310,6 +337,7 @@ func (erp ExprRuntimeDebug) ipDebug(ip int, vm *vm.VM, program *vm.Program, part out.Args = append(out.Args, autoQuote(stack[1])) outputs = append(outputs, out) } + return outputs, nil } @@ -319,10 +347,12 @@ func (erp ExprRuntimeDebug) ipSeek(ip int) []string { if len(parts) == 0 { continue } + if parts[0] == strconv.Itoa(ip) { return parts } } + return nil } @@ -330,19 +360,23 @@ func Run(program *vm.Program, env interface{}, logger *log.Entry, debug bool) (a if debug { dbgInfo, ret, err := RunWithDebug(program, env, logger) DisplayExprDebug(program, dbgInfo, logger, ret) + return ret, err } + return expr.Run(program, env) } func cleanTextForDebug(text string) string { text = strings.Join(strings.Fields(text), " ") text = strings.Trim(text, " \t\n") + return text } func DisplayExprDebug(program *vm.Program, outputs []OpOutput, logger *log.Entry, ret any) { logger.Debugf("dbg(result=%v): %s", ret, cleanTextForDebug(string(program.Source()))) + for _, output := range outputs { logger.Debugf("%s", output.String()) } @@ -360,46 +394,55 @@ func RunWithDebug(program *vm.Program, env interface{}, logger *log.Entry) ([]Op erp.Lines = lines go func() { - //We must never return until the execution of the program is done + // We must never return until the execution of the program is done var err error + erp.Logger.Tracef("[START] ip 0") + ops := erp.ipSeek(0) if ops == nil { log.Warningf("error while debugging expr: failed getting ops for ip 0") } + if outputs, err = erp.ipDebug(0, vm, program, ops, outputs); err != nil { log.Warningf("error while debugging expr: error while debugging at ip 0") } + vm.Step() + for ip := range vm.Position() { ops := erp.ipSeek(ip) if ops == nil { erp.Logger.Tracef("[DONE] ip %d", ip) break } + if outputs, err = erp.ipDebug(ip, vm, program, ops, outputs); err != nil { log.Warningf("error while debugging expr: error while debugging at ip %d", ip) } + vm.Step() } }() var return_error error + ret, err := vm.Run(program, env) - //if the expr runtime failed, we don't need to wait for the debug to finish + // if the expr runtime failed, we don't need to wait for the debug to finish if err != nil { return_error = err } - //the overall result of expression is the result of last op ? + // the overall result of expression is the result of last op ? if len(outputs) > 0 { lastOutIdx := len(outputs) if lastOutIdx > 0 { lastOutIdx -= 1 } + switch val := ret.(type) { case bool: log.Tracef("completing with bool %t", ret) - //if outputs[lastOutIdx].Comparison { + // if outputs[lastOutIdx].Comparison { outputs[lastOutIdx].StrConditionResult = fmt.Sprintf("%v", ret) outputs[lastOutIdx].ConditionResult = new(bool) *outputs[lastOutIdx].ConditionResult = val @@ -412,5 +455,6 @@ func RunWithDebug(program *vm.Program, env interface{}, logger *log.Entry) ([]Op } else { log.Tracef("no output from expr runtime") } + return outputs, ret, return_error } diff --git a/pkg/exprhelpers/debugger_test.go b/pkg/exprhelpers/debugger_test.go index 32144454084..0852d7ab2de 100644 --- a/pkg/exprhelpers/debugger_test.go +++ b/pkg/exprhelpers/debugger_test.go @@ -1,3 +1,4 @@ +//go:build expr_debug package exprhelpers import ( diff --git a/pkg/exprhelpers/debuggerstub_test.go b/pkg/exprhelpers/debuggerstub_test.go new file mode 100644 index 00000000000..cc41c793b47 --- /dev/null +++ b/pkg/exprhelpers/debuggerstub_test.go @@ -0,0 +1,10 @@ +//go:build !expr_debug +package exprhelpers + +import ( + "testing" +) + +func TestFailWithoutExprDebug(t *testing.T) { + t.Fatal("To test pkg/exprhelpers, you need the expr_debug build tag") +} diff --git a/pkg/exprhelpers/exprlib_test.go b/pkg/exprhelpers/exprlib_test.go index f2eb208ebfa..932db4b7da4 100644 --- a/pkg/exprhelpers/exprlib_test.go +++ b/pkg/exprhelpers/exprlib_test.go @@ -3,7 +3,6 @@ package exprhelpers import ( "context" "errors" - "os" "testing" "time" @@ -26,15 +25,12 @@ const TestFolder = "tests" func getDBClient(t *testing.T) *database.Client { t.Helper() - dbPath, err := os.CreateTemp("", "*sqlite") - require.NoError(t, err) - ctx := context.Background() testDBClient, err := database.NewClient(ctx, &csconfig.DatabaseCfg{ Type: "sqlite", DbName: "crowdsec", - DbPath: dbPath.Name(), + DbPath: ":memory:", }) require.NoError(t, err) diff --git a/pkg/exprhelpers/geoip.go b/pkg/exprhelpers/geoip.go index fb0c344d884..6d8813dc0ad 100644 --- a/pkg/exprhelpers/geoip.go +++ b/pkg/exprhelpers/geoip.go @@ -14,7 +14,6 @@ func GeoIPEnrich(params ...any) (any, error) { parsedIP := net.ParseIP(ip) city, err := geoIPCityReader.City(parsedIP) - if err != nil { return nil, err } @@ -31,7 +30,6 @@ func GeoIPASNEnrich(params ...any) (any, error) { parsedIP := net.ParseIP(ip) asn, err := geoIPASNReader.ASN(parsedIP) - if err != nil { return nil, err } @@ -50,7 +48,6 @@ func GeoIPRangeEnrich(params ...any) (any, error) { parsedIP := net.ParseIP(ip) rangeIP, ok, err := geoIPRangeReader.LookupNetwork(parsedIP, &dummy) - if err != nil { return nil, err } diff --git a/pkg/exprhelpers/helpers.go b/pkg/exprhelpers/helpers.go index 9bc991a8f2d..d0f6f2cfe22 100644 --- a/pkg/exprhelpers/helpers.go +++ b/pkg/exprhelpers/helpers.go @@ -29,8 +29,6 @@ import ( "github.com/umahmood/haversine" "github.com/wasilibs/go-re2" - "github.com/crowdsecurity/go-cs-lib/ptr" - "github.com/crowdsecurity/crowdsec/pkg/cache" "github.com/crowdsecurity/crowdsec/pkg/database" "github.com/crowdsecurity/crowdsec/pkg/fflag" @@ -129,32 +127,36 @@ func Init(databaseClient *database.Client) error { dataFileRegex = make(map[string][]*regexp.Regexp) dataFileRe2 = make(map[string][]*re2.Regexp) dbClient = databaseClient + XMLCacheInit() + return nil } -func RegexpCacheInit(filename string, CacheCfg types.DataSource) error { +func RegexpCacheInit(filename string, cacheCfg types.DataSource) error { // cache is explicitly disabled - if CacheCfg.Cache != nil && !*CacheCfg.Cache { + if cacheCfg.Cache != nil && !*cacheCfg.Cache { return nil } // cache is implicitly disabled if no cache config is provided - if CacheCfg.Strategy == nil && CacheCfg.TTL == nil && CacheCfg.Size == nil { + if cacheCfg.Strategy == nil && cacheCfg.TTL == nil && cacheCfg.Size == nil { return nil } // cache is enabled - if CacheCfg.Size == nil { - CacheCfg.Size = ptr.Of(50) + size := 50 + if cacheCfg.Size != nil { + size = *cacheCfg.Size } - gc := gcache.New(*CacheCfg.Size) + gc := gcache.New(size) - if CacheCfg.Strategy == nil { - CacheCfg.Strategy = ptr.Of("LRU") + strategy := "LRU" + if cacheCfg.Strategy != nil { + strategy = *cacheCfg.Strategy } - switch *CacheCfg.Strategy { + switch strategy { case "LRU": gc = gc.LRU() case "LFU": @@ -162,11 +164,11 @@ func RegexpCacheInit(filename string, CacheCfg types.DataSource) error { case "ARC": gc = gc.ARC() default: - return fmt.Errorf("unknown cache strategy '%s'", *CacheCfg.Strategy) + return fmt.Errorf("unknown cache strategy '%s'", strategy) } - if CacheCfg.TTL != nil { - gc.Expiration(*CacheCfg.TTL) + if cacheCfg.TTL != nil { + gc.Expiration(*cacheCfg.TTL) } cache := gc.Build() @@ -240,6 +242,7 @@ func Distinct(params ...any) (any, error) { if rt := reflect.TypeOf(params[0]).Kind(); rt != reflect.Slice && rt != reflect.Array { return nil, nil } + array := params[0].([]interface{}) if array == nil { return []interface{}{}, nil @@ -254,6 +257,7 @@ func Distinct(params ...any) (any, error) { ret = append(ret, val) } } + return ret, nil } @@ -282,8 +286,10 @@ func flatten(args []interface{}, v reflect.Value) []interface{} { } func existsInFileMaps(filename string, ftype string) (bool, error) { - ok := false var err error + + ok := false + switch ftype { case "regex", "regexp": if fflag.Re2RegexpInfileSupport.IsEnabled() { @@ -296,10 +302,11 @@ func existsInFileMaps(filename string, ftype string) (bool, error) { default: err = fmt.Errorf("unknown data type '%s' for : '%s'", ftype, filename) } + return ok, err } -//Expr helpers +// Expr helpers // func Get(arr []string, index int) string { func Get(params ...any) (any, error) { @@ -315,10 +322,12 @@ func Get(params ...any) (any, error) { func Atof(params ...any) (any, error) { x := params[0].(string) log.Debugf("debug atof %s", x) + ret, err := strconv.ParseFloat(x, 64) if err != nil { log.Warningf("Atof : can't convert float '%s' : %v", x, err) } + return ret, nil } @@ -340,22 +349,28 @@ func Distance(params ...any) (any, error) { long1 := params[1].(string) lat2 := params[2].(string) long2 := params[3].(string) + lat1f, err := strconv.ParseFloat(lat1, 64) if err != nil { log.Warningf("lat1 is not a float : %v", err) + return 0.0, fmt.Errorf("lat1 is not a float : %v", err) } + long1f, err := strconv.ParseFloat(long1, 64) if err != nil { log.Warningf("long1 is not a float : %v", err) + return 0.0, fmt.Errorf("long1 is not a float : %v", err) } + lat2f, err := strconv.ParseFloat(lat2, 64) if err != nil { log.Warningf("lat2 is not a float : %v", err) return 0.0, fmt.Errorf("lat2 is not a float : %v", err) } + long2f, err := strconv.ParseFloat(long2, 64) if err != nil { log.Warningf("long2 is not a float : %v", err) @@ -363,7 +378,7 @@ func Distance(params ...any) (any, error) { return 0.0, fmt.Errorf("long2 is not a float : %v", err) } - //either set of coordinates is 0,0, return 0 to avoid FPs + // either set of coordinates is 0,0, return 0 to avoid FPs if (lat1f == 0.0 && long1f == 0.0) || (lat2f == 0.0 && long2f == 0.0) { log.Warningf("one of the coordinates is 0,0, returning 0") return 0.0, nil @@ -373,6 +388,7 @@ func Distance(params ...any) (any, error) { second := haversine.Coord{Lat: lat2f, Lon: long2f} _, km := haversine.Distance(first, second) + return km, nil } diff --git a/pkg/fflag/crowdsec.go b/pkg/fflag/crowdsec.go index d42d6a05ef6..ea397bfe5bc 100644 --- a/pkg/fflag/crowdsec.go +++ b/pkg/fflag/crowdsec.go @@ -2,12 +2,14 @@ package fflag var Crowdsec = FeatureRegister{EnvPrefix: "CROWDSEC_FEATURE_"} -var CscliSetup = &Feature{Name: "cscli_setup", Description: "Enable cscli setup command (service detection)"} -var DisableHttpRetryBackoff = &Feature{Name: "disable_http_retry_backoff", Description: "Disable http retry backoff"} -var ChunkedDecisionsStream = &Feature{Name: "chunked_decisions_stream", Description: "Enable chunked decisions stream"} -var PapiClient = &Feature{Name: "papi_client", Description: "Enable Polling API client", State: DeprecatedState} -var Re2GrokSupport = &Feature{Name: "re2_grok_support", Description: "Enable RE2 support for GROK patterns"} -var Re2RegexpInfileSupport = &Feature{Name: "re2_regexp_in_file_support", Description: "Enable RE2 support for RegexpInFile expr helper"} +var ( + CscliSetup = &Feature{Name: "cscli_setup", Description: "Enable cscli setup command (service detection)"} + DisableHttpRetryBackoff = &Feature{Name: "disable_http_retry_backoff", Description: "Disable http retry backoff"} + ChunkedDecisionsStream = &Feature{Name: "chunked_decisions_stream", Description: "Enable chunked decisions stream"} + PapiClient = &Feature{Name: "papi_client", Description: "Enable Polling API client", State: DeprecatedState} + Re2GrokSupport = &Feature{Name: "re2_grok_support", Description: "Enable RE2 support for GROK patterns"} + Re2RegexpInfileSupport = &Feature{Name: "re2_regexp_in_file_support", Description: "Enable RE2 support for RegexpInFile expr helper"} +) func RegisterAllFeatures() error { err := Crowdsec.RegisterFeature(CscliSetup) diff --git a/pkg/fflag/features_test.go b/pkg/fflag/features_test.go index 481e86573e8..bf8ddeca8fd 100644 --- a/pkg/fflag/features_test.go +++ b/pkg/fflag/features_test.go @@ -351,11 +351,9 @@ func TestSetFromYaml(t *testing.T) { } func TestSetFromYamlFile(t *testing.T) { - tmpfile, err := os.CreateTemp("", "test") + tmpfile, err := os.CreateTemp(t.TempDir(), "test") require.NoError(t, err) - defer os.Remove(tmpfile.Name()) - // write the config file _, err = tmpfile.WriteString("- experimental1") require.NoError(t, err) @@ -376,11 +374,13 @@ func TestGetEnabledFeatures(t *testing.T) { feat1, err := fr.GetFeature("new_standard") require.NoError(t, err) - feat1.Set(true) + err = feat1.Set(true) + require.Error(t, err, "the flag is deprecated") feat2, err := fr.GetFeature("experimental1") require.NoError(t, err) - feat2.Set(true) + err = feat2.Set(true) + require.NoError(t, err) expected := []string{ "experimental1", diff --git a/pkg/hubops/colorize.go b/pkg/hubops/colorize.go new file mode 100644 index 00000000000..3af2aecab93 --- /dev/null +++ b/pkg/hubops/colorize.go @@ -0,0 +1,38 @@ +package hubops + +import ( + "strings" + + "github.com/fatih/color" + + "github.com/crowdsecurity/crowdsec/pkg/emoji" +) + +// colorizeItemName splits the input string on "/" and colorizes the second part. +func colorizeItemName(fullname string) string { + parts := strings.SplitN(fullname, "/", 2) + if len(parts) == 2 { + bold := color.New(color.Bold) + author := parts[0] + name := parts[1] + return author + "/" + bold.Sprint(name) + } + return fullname +} + +func colorizeOpType(opType string) string { + switch opType { + case (&DownloadCommand{}).OperationType(): + return emoji.InboxTray + " " + color.BlueString(opType) + case (&EnableCommand{}).OperationType(): + return emoji.CheckMarkButton + " " + color.GreenString(opType) + case (&DisableCommand{}).OperationType(): + return emoji.CrossMark + " " + color.RedString(opType) + case (&PurgeCommand{}).OperationType(): + return emoji.Wastebasket + " " + color.RedString(opType) + case (&DataRefreshCommand{}).OperationType(): + return emoji.Sync + " " + opType + } + + return opType +} diff --git a/pkg/hubops/datarefresh.go b/pkg/hubops/datarefresh.go new file mode 100644 index 00000000000..985db8c1a11 --- /dev/null +++ b/pkg/hubops/datarefresh.go @@ -0,0 +1,75 @@ +package hubops + +import ( + "context" + "fmt" + "os" + + "github.com/crowdsecurity/crowdsec/pkg/cwhub" +) + +// XXX: TODO: temporary for hubtests, but will have to go. +// DownloadDataIfNeeded downloads the data set for the item. +func DownloadDataIfNeeded(ctx context.Context, hub *cwhub.Hub, item *cwhub.Item, force bool) (bool, error) { + itemFilePath, err := item.InstallPath() + if err != nil { + return false, err + } + + itemFile, err := os.Open(itemFilePath) + if err != nil { + return false, fmt.Errorf("while opening %s: %w", itemFilePath, err) + } + + defer itemFile.Close() + + needReload, err := downloadDataSet(ctx, hub.GetDataDir(), force, itemFile) + if err != nil { + return needReload, fmt.Errorf("while downloading data for %s: %w", itemFilePath, err) + } + + return needReload, nil +} + +// DataRefreshCommand updates the data files associated with the installed hub items. +type DataRefreshCommand struct { + Force bool +} + +func NewDataRefreshCommand(force bool) *DataRefreshCommand { + return &DataRefreshCommand{Force: force} +} + +func (c *DataRefreshCommand) Prepare(plan *ActionPlan) (bool, error) { + // we can't prepare much at this point because we don't know which data files yet, + // and items needs to be downloaded/updated + // evertyhing will be done in Run() + return true, nil +} + +func (c *DataRefreshCommand) Run(ctx context.Context, plan *ActionPlan) error { + for _, itemType := range cwhub.ItemTypes { + for _, item := range plan.hub.GetInstalledByType(itemType, true) { + needReload, err := DownloadDataIfNeeded(ctx, plan.hub, item, c.Force) + if err != nil { + return err + } + + plan.ReloadNeeded = plan.ReloadNeeded || needReload + } + } + + return nil +} + +func (c *DataRefreshCommand) OperationType() string { + return "check & update data files" +} + +func (c *DataRefreshCommand) ItemType() string { + return "" +} + +func (c *DataRefreshCommand) Detail() string { + return "" +} diff --git a/pkg/hubops/disable.go b/pkg/hubops/disable.go new file mode 100644 index 00000000000..b6368e85036 --- /dev/null +++ b/pkg/hubops/disable.go @@ -0,0 +1,121 @@ +package hubops + +import ( + "context" + "fmt" + "os" + + "github.com/crowdsecurity/crowdsec/pkg/cwhub" +) + +// RemoveInstallLink removes the item's symlink between the installation directory and the local hub. +func RemoveInstallLink(i *cwhub.Item) error { + syml, err := i.InstallPath() + if err != nil { + return err + } + + stat, err := os.Lstat(syml) + if err != nil { + return err + } + + // if it's managed by hub, it's a symlink to csconfig.GConfig.hub.HubDir / ... + if stat.Mode()&os.ModeSymlink == 0 { + return fmt.Errorf("%s isn't managed by hub", i.Name) + } + + hubpath, err := os.Readlink(syml) + if err != nil { + return fmt.Errorf("while reading symlink: %w", err) + } + + src, err := i.DownloadPath() + if err != nil { + return err + } + + if hubpath != src { + return fmt.Errorf("%s isn't managed by hub", i.Name) + } + + if err := os.Remove(syml); err != nil { + return fmt.Errorf("while removing symlink: %w", err) + } + + return nil +} + +// DisableCommand uninstalls an item and its dependencies, ensuring that no +// sub-item is left in an inconsistent state. +type DisableCommand struct { + Item *cwhub.Item + Force bool +} + +func NewDisableCommand(item *cwhub.Item, force bool) *DisableCommand { + return &DisableCommand{Item: item, Force: force} +} + +func (c *DisableCommand) Prepare(plan *ActionPlan) (bool, error) { + i := c.Item + + if i.State.IsLocal() { + plan.Warning(i.FQName() + " is a local item, please delete manually") + return false, nil + } + + if i.State.Tainted && !c.Force { + return false, fmt.Errorf("%s is tainted, use '--force' to remove", i.Name) + } + + if !i.State.Installed { + return false, nil + } + + subsToRemove, err := i.SafeToRemoveDeps() + if err != nil { + return false, err + } + + for _, sub := range subsToRemove { + if !sub.State.Installed { + continue + } + + if err := plan.AddCommand(NewDisableCommand(sub, c.Force)); err != nil { + return false, err + } + } + + return true, nil +} + +func (c *DisableCommand) Run(ctx context.Context, plan *ActionPlan) error { + i := c.Item + + fmt.Println("disabling " + colorizeItemName(i.FQName())) + + if err := RemoveInstallLink(i); err != nil { + return fmt.Errorf("while disabling %s: %w", i.FQName(), err) + } + + plan.ReloadNeeded = true + + i.State.Installed = false + i.State.Tainted = false + + return nil +} + +func (c *DisableCommand) OperationType() string { + return "disable" +} + +func (c *DisableCommand) ItemType() string { + return c.Item.Type +} + +func (c *DisableCommand) Detail() string { + return colorizeItemName(c.Item.Name) +} diff --git a/pkg/hubops/doc.go b/pkg/hubops/doc.go new file mode 100644 index 00000000000..b87a42653bc --- /dev/null +++ b/pkg/hubops/doc.go @@ -0,0 +1,45 @@ +/* +Package hubops is responsible for managing the local hub (items and data files) for CrowdSec. + +The index file itself (.index.json) is still managed by pkg/cwhub, which also provides the Hub +and Item structs. + +The hubops package is mostly used by cscli for the "cscli install/remove/upgrade ..." commands. + +It adopts a command-based pattern: a Plan contains a sequence of Commands. Both Plan and Command +have separate preparation and execution methods. + + - Command Interface: + The Command interface defines the contract for all operations that can be + performed on hub items. Each operation implements the Prepare and Run + methods, allowing for pre-execution setup and actual execution logic. + + - ActionPlan: + ActionPlan serves as a container for a sequence of Commands. It manages the + addition of commands, handles dependencies between them, and orchestrates their + execution. ActionPlan also provides a mechanism for interactive confirmation and dry-run. + +To perform operations on hub items, create an ActionPlan and add the desired +Commands to it. Once all commands are added, execute the ActionPlan to perform +the operations in the correct order, handling dependencies and user confirmations. + +Example: + + hub := cwhub.NewHub(...) + plan := hubops.NewActionPlan(hub) + + downloadCmd := hubops.NewDownloadCommand(item, force) + if err := plan.AddCommand(downloadCmd); err != nil { + logrus.Fatalf("Failed to add download command: %v", err) + } + + enableCmd := hubops.NewEnableCommand(item, force) + if err := plan.AddCommand(enableCmd); err != nil { + logrus.Fatalf("Failed to add enable command: %v", err) + } + + if err := plan.Execute(ctx, confirm, dryRun, verbose); err != nil { + logrus.Fatalf("Failed to execute action plan: %v", err) + } +*/ +package hubops diff --git a/pkg/hubops/download.go b/pkg/hubops/download.go new file mode 100644 index 00000000000..72aed542115 --- /dev/null +++ b/pkg/hubops/download.go @@ -0,0 +1,212 @@ +package hubops + +import ( + "context" + "errors" + "fmt" + "io" + "net/http" + "os" + "time" + + "github.com/fatih/color" + "github.com/sirupsen/logrus" + "gopkg.in/yaml.v3" + + "github.com/crowdsecurity/go-cs-lib/downloader" + + "github.com/crowdsecurity/crowdsec/pkg/cwhub" + "github.com/crowdsecurity/crowdsec/pkg/types" +) + +// DownloadCommand handles the downloading of hub items. +// It ensures that items are fetched from the hub (or from the index file if it also has content) +// managing dependencies and verifying the integrity of downloaded content. +// This is used by "cscli install" and "cscli upgrade". +// Tainted items require the force parameter, local items are skipped. +type DownloadCommand struct { + Item *cwhub.Item + Force bool + contentProvider cwhub.ContentProvider +} + +func NewDownloadCommand(item *cwhub.Item, contentProvider cwhub.ContentProvider, force bool) *DownloadCommand { + return &DownloadCommand{Item: item, Force: force, contentProvider: contentProvider} +} + +func (c *DownloadCommand) Prepare(plan *ActionPlan) (bool, error) { + i := c.Item + + if i.State.IsLocal() { + plan.Info(i.FQName() + " - not downloading local item") + return false, nil + } + + // XXX: if it's tainted do we upgrade the dependencies anyway? + if i.State.Tainted && !c.Force { + plan.Warning(i.FQName() + " is tainted, use '--force' to overwrite") + return false, nil + } + + toDisable := make(map[*cwhub.Item]struct{}) + + var disableKeys []*cwhub.Item + + if i.State.Installed { + for sub := range i.CurrentDependencies().SubItems(plan.hub) { + disableKeys = append(disableKeys, sub) + toDisable[sub] = struct{}{} + } + } + + for sub := range i.LatestDependencies().SubItems(plan.hub) { + if err := plan.AddCommand(NewDownloadCommand(sub, c.contentProvider, c.Force)); err != nil { + return false, err + } + + if i.State.Installed { + // ensure the _new_ dependencies are installed too + if err := plan.AddCommand(NewEnableCommand(sub, c.Force)); err != nil { + return false, err + } + + for _, sub2 := range disableKeys { + if sub2 == sub { + delete(toDisable, sub) + } + } + } + } + + for sub := range toDisable { + if err := plan.AddCommand(NewDisableCommand(sub, c.Force)); err != nil { + return false, err + } + } + + if i.State.Downloaded && i.State.UpToDate { + return false, nil + } + + return true, nil +} + +// The DataSet is a list of data sources required by an item (built from the data: section in the yaml). +type DataSet struct { + Data []types.DataSource `yaml:"data,omitempty"` +} + +// downloadDataSet downloads all the data files for an item. +func downloadDataSet(ctx context.Context, dataFolder string, force bool, reader io.Reader) (bool, error) { + needReload := false + + dec := yaml.NewDecoder(reader) + + for { + data := &DataSet{} + + if err := dec.Decode(data); err != nil { + if errors.Is(err, io.EOF) { + break + } + + return needReload, fmt.Errorf("while reading file: %w", err) + } + + for _, dataS := range data.Data { + // XXX: check context cancellation + destPath, err := cwhub.SafePath(dataFolder, dataS.DestPath) + if err != nil { + return needReload, err + } + + d := downloader. + New(). + WithHTTPClient(cwhub.HubClient). + ToFile(destPath). + CompareContent(). + BeforeRequest(func(req *http.Request) { + fmt.Printf("downloading %s\n", req.URL) + }). + WithLogger(logrus.WithField("url", dataS.SourceURL)) + + if !force { + d = d.WithLastModified(). + WithShelfLife(7 * 24 * time.Hour) + } + + downloaded, err := d.Download(ctx, dataS.SourceURL) + if err != nil { + return needReload, fmt.Errorf("while getting data: %w", err) + } + + needReload = needReload || downloaded + } + } + + return needReload, nil +} + +func (c *DownloadCommand) Run(ctx context.Context, plan *ActionPlan) error { + i := c.Item + + fmt.Printf("downloading %s\n", colorizeItemName(i.FQName())) + + // ensure that target file is within target dir + finalPath, err := i.DownloadPath() + if err != nil { + return err + } + + downloaded, _, err := i.FetchContentTo(ctx, c.contentProvider, finalPath) + if err != nil { + return fmt.Errorf("%s: %w", i.FQName(), err) + } + + if downloaded { + plan.ReloadNeeded = true + } + + i.State.Downloaded = true + i.State.Tainted = false + i.State.UpToDate = true + + // read content to get the list of data files + reader, err := os.Open(finalPath) + if err != nil { + return fmt.Errorf("while opening %s: %w", finalPath, err) + } + + defer reader.Close() + + needReload, err := downloadDataSet(ctx, plan.hub.GetDataDir(), c.Force, reader) + if err != nil { + return fmt.Errorf("while downloading data for %s: %w", i.FileName, err) + } + + if needReload { + plan.ReloadNeeded = true + } + + return nil +} + +func (c *DownloadCommand) OperationType() string { + return "download" +} + +func (c *DownloadCommand) ItemType() string { + return c.Item.Type +} + +func (c *DownloadCommand) Detail() string { + i := c.Item + + version := color.YellowString(i.Version) + + if i.State.Downloaded { + version = c.Item.State.LocalVersion + " -> " + color.YellowString(i.Version) + } + + return colorizeItemName(c.Item.Name) + " (" + version + ")" +} diff --git a/pkg/hubops/enable.go b/pkg/hubops/enable.go new file mode 100644 index 00000000000..40de40c8662 --- /dev/null +++ b/pkg/hubops/enable.go @@ -0,0 +1,113 @@ +package hubops + +import ( + "context" + "fmt" + "os" + "path/filepath" + + "github.com/crowdsecurity/crowdsec/pkg/cwhub" +) + +// EnableCommand installs a hub item and its dependencies. +// In case this command is called during an upgrade, the sub-items list it taken from the +// latest version in the index, otherwise from the version that is currently installed. +type EnableCommand struct { + Item *cwhub.Item + Force bool + FromLatest bool +} + +func NewEnableCommand(item *cwhub.Item, force bool) *EnableCommand { + return &EnableCommand{Item: item, Force: force} +} + +func (c *EnableCommand) Prepare(plan *ActionPlan) (bool, error) { + var dependencies cwhub.Dependencies + + i := c.Item + + if c.FromLatest { + // we are upgrading + dependencies = i.LatestDependencies() + } else { + dependencies = i.CurrentDependencies() + } + + for sub := range dependencies.SubItems(plan.hub) { + if err := plan.AddCommand(NewEnableCommand(sub, c.Force)); err != nil { + return false, err + } + } + + if i.State.Installed { + return false, nil + } + + return true, nil +} + +// CreateInstallLink creates a symlink between the actual config file at hub.HubDir and hub.ConfigDir. +func CreateInstallLink(i *cwhub.Item) error { + dest, err := i.InstallPath() + if err != nil { + return err + } + + destDir := filepath.Dir(dest) + if err = os.MkdirAll(destDir, os.ModePerm); err != nil { + return fmt.Errorf("while creating %s: %w", destDir, err) + } + + if _, err = os.Lstat(dest); err == nil { + // already exists + return nil + } else if !os.IsNotExist(err) { + return fmt.Errorf("failed to stat %s: %w", dest, err) + } + + src, err := i.DownloadPath() + if err != nil { + return err + } + + if err = os.Symlink(src, dest); err != nil { + return fmt.Errorf("while creating symlink from %s to %s: %w", src, dest, err) + } + + return nil +} + +func (c *EnableCommand) Run(ctx context.Context, plan *ActionPlan) error { + i := c.Item + + fmt.Println("enabling " + colorizeItemName(i.FQName())) + + if !i.State.Downloaded { + // XXX: this a warning? + return fmt.Errorf("can't enable %s: not downloaded", i.FQName()) + } + + if err := CreateInstallLink(i); err != nil { + return fmt.Errorf("while enabling %s: %w", i.FQName(), err) + } + + plan.ReloadNeeded = true + + i.State.Installed = true + i.State.Tainted = false + + return nil +} + +func (c *EnableCommand) OperationType() string { + return "enable" +} + +func (c *EnableCommand) ItemType() string { + return c.Item.Type +} + +func (c *EnableCommand) Detail() string { + return colorizeItemName(c.Item.Name) +} diff --git a/pkg/hubops/plan.go b/pkg/hubops/plan.go new file mode 100644 index 00000000000..eb99056fab3 --- /dev/null +++ b/pkg/hubops/plan.go @@ -0,0 +1,250 @@ +package hubops + +import ( + "context" + "fmt" + "os" + "slices" + "strings" + + "github.com/AlecAivazis/survey/v2" + "github.com/fatih/color" + isatty "github.com/mattn/go-isatty" + + "github.com/crowdsecurity/go-cs-lib/slicetools" + + "github.com/crowdsecurity/crowdsec/pkg/cwhub" +) + +// Command represents an operation that can be performed on a CrowdSec hub item. +// +// Each concrete implementation defines a Prepare() method to check for errors and preconditions, +// decide which sub-commands are required (like installing dependencies) and add them to the action plan. +type Command interface { + // Prepare sets up the command for execution within the given + // ActionPlan. It may add additional commands to the ActionPlan based + // on dependencies or prerequisites. Returns a boolean indicating + // whether the command execution should be skipped (it can be + // redundant, like installing something that is already installed) and + // an error if the preparation failed. + // NOTE: Returning an error will bubble up from the plan.AddCommand() method, + // but Prepare() might already have modified the plan's command slice. + Prepare(*ActionPlan) (bool, error) + + // Run executes the command within the provided context and ActionPlan. + // It performs the actual operation and returns an error if execution fails. + // NOTE: Returning an error will currently stop the execution of the action plan. + Run(ctx context.Context, plan *ActionPlan) error + + // OperationType returns a unique string representing the type of operation to perform + // (e.g., "download", "enable"). + OperationType() string + + // ItemType returns the type of item the operation is performed on + // (e.g., "collections"). Used in confirmation prompt and dry-run. + ItemType() string + + // Detail provides further details on the operation, + // such as the item's name and version. + Detail() string +} + +// UniqueKey generates a unique string key for a Command based on its operation type, item type, and detail. +// Is is used to avoid adding duplicate commands to the action plan. +func UniqueKey(c Command) string { + return fmt.Sprintf("%s:%s:%s", c.OperationType(), c.ItemType(), c.Detail()) +} + +// ActionPlan orchestrates the sequence of operations (Commands) to manage CrowdSec hub items. +type ActionPlan struct { + // hold the list of Commands to be executed as part of the action plan. + // If a command is skipped (i.e. calling Prepare() returned false), it won't be included in the slice. + commands []Command + + // Tracks unique commands + commandsTracker map[string]struct{} + + // A reference to the Hub instance, required for dependency lookup. + hub *cwhub.Hub + + // Indicates whether a reload of the CrowdSec service is required after executing the action plan. + ReloadNeeded bool +} + +func NewActionPlan(hub *cwhub.Hub) *ActionPlan { + return &ActionPlan{ + hub: hub, + commandsTracker: make(map[string]struct{}), + } +} + +func (p *ActionPlan) AddCommand(c Command) error { + ok, err := c.Prepare(p) + if err != nil { + return err + } + + if ok { + key := UniqueKey(c) + if _, exists := p.commandsTracker[key]; !exists { + p.commands = append(p.commands, c) + p.commandsTracker[key] = struct{}{} + } + } + + return nil +} + +func (p *ActionPlan) Info(msg string) { + fmt.Println(msg) +} + +func (p *ActionPlan) Warning(msg string) { + fmt.Printf("%s %s\n", color.YellowString("WARN"), msg) +} + +// Description returns a string representation of the action plan. +// If verbose is false, the operations are grouped by item type and operation type. +// If verbose is true, they are listed as they appear in the command slice. +func (p *ActionPlan) Description(verbose bool) string { + if verbose { + return p.verboseDescription() + } + + return p.compactDescription() +} + +func (p *ActionPlan) verboseDescription() string { + sb := strings.Builder{} + + // Here we display the commands in the order they will be executed. + for _, cmd := range p.commands { + sb.WriteString(colorizeOpType(cmd.OperationType()) + " " + cmd.ItemType() + ":" + cmd.Detail() + "\n") + } + + return sb.String() +} + +// describe the operations of a given type in a compact way. +func describe(opType string, desc map[string]map[string][]string, sb *strings.Builder) { + if _, ok := desc[opType]; !ok { + return + } + + sb.WriteString(colorizeOpType(opType) + "\n") + + // iterate cwhub.ItemTypes in reverse order, so we have collections first + for _, itemType := range slicetools.Backward(cwhub.ItemTypes) { + if desc[opType][itemType] == nil { + continue + } + + details := desc[opType][itemType] + // Sorting for user convenience, but it's not the same order the commands will be carried out. + slices.Sort(details) + + if itemType != "" { + sb.WriteString(" " + itemType + ": ") + } + + if len(details) != 0 { + sb.WriteString(strings.Join(details, ", ")) + sb.WriteString("\n") + } + } +} + +func (p *ActionPlan) compactDescription() string { + desc := make(map[string]map[string][]string) + + for _, cmd := range p.commands { + opType := cmd.OperationType() + itemType := cmd.ItemType() + detail := cmd.Detail() + + if _, ok := desc[opType]; !ok { + desc[opType] = make(map[string][]string) + } + + desc[opType][itemType] = append(desc[opType][itemType], detail) + } + + sb := strings.Builder{} + + // Enforce presentation order. + + describe("download", desc, &sb) + delete(desc, "download") + describe("enable", desc, &sb) + delete(desc, "enable") + describe("disable", desc, &sb) + delete(desc, "disable") + describe("remove", desc, &sb) + delete(desc, "remove") + + for optype := range desc { + describe(optype, desc, &sb) + } + + return sb.String() +} + +func (p *ActionPlan) Confirm(verbose bool) (bool, error) { + if !isatty.IsTerminal(os.Stdout.Fd()) && !isatty.IsCygwinTerminal(os.Stdout.Fd()) { + return true, nil + } + + fmt.Println("The following actions will be performed:\n" + p.Description(verbose)) + + var answer bool + + prompt := &survey.Confirm{ + Message: "Do you want to continue?", + Default: true, + } + + if err := survey.AskOne(prompt, &answer); err != nil { + return false, err + } + + fmt.Println() + + return answer, nil +} + +func (p *ActionPlan) Execute(ctx context.Context, confirm bool, dryRun bool, verbose bool) error { + var err error + + if len(p.commands) == 0 { + // XXX: show skipped commands, warnings? + fmt.Println("Nothing to do.") + return nil + } + + if dryRun { + fmt.Println("Action plan:\n" + p.Description(verbose)) + fmt.Println("Dry run, no action taken.") + + return nil + } + + if !confirm { + confirm, err = p.Confirm(verbose) + if err != nil { + return err + } + } + + if !confirm { + fmt.Println("Operation canceled.") + return nil + } + + for _, c := range p.commands { + if err := c.Run(ctx, p); err != nil { + return err + } + } + + return nil +} diff --git a/pkg/hubops/purge.go b/pkg/hubops/purge.go new file mode 100644 index 00000000000..3b415b27428 --- /dev/null +++ b/pkg/hubops/purge.go @@ -0,0 +1,88 @@ +package hubops + +import ( + "context" + "fmt" + "os" + + "github.com/crowdsecurity/crowdsec/pkg/cwhub" +) + +// PurgeCommand removes the downloaded content of a hub item, effectively +// removing it from the local system. This command also removes the sub-items +// but not the associated data files. +type PurgeCommand struct { + Item *cwhub.Item + Force bool +} + +func NewPurgeCommand(item *cwhub.Item, force bool) *PurgeCommand { + return &PurgeCommand{Item: item, Force: force} +} + +func (c *PurgeCommand) Prepare(plan *ActionPlan) (bool, error) { + i := c.Item + + if i.State.IsLocal() { + // not downloaded, by definition + return false, nil + } + + if i.State.Tainted && !c.Force { + return false, fmt.Errorf("%s is tainted, use '--force' to remove", i.Name) + } + + subsToRemove, err := i.SafeToRemoveDeps() + if err != nil { + return false, err + } + + for _, sub := range subsToRemove { + if err := plan.AddCommand(NewPurgeCommand(sub, c.Force)); err != nil { + return false, err + } + } + + if !i.State.Downloaded { + return false, nil + } + + return true, nil +} + +func (c *PurgeCommand) Run(ctx context.Context, plan *ActionPlan) error { + i := c.Item + + fmt.Println("purging " + colorizeItemName(i.FQName())) + + src, err := i.DownloadPath() + if err != nil { + return err + } + + if err := os.Remove(src); err != nil { + if os.IsNotExist(err) { + return nil + } + + return fmt.Errorf("while removing file: %w", err) + } + + i.State.Downloaded = false + i.State.Tainted = false + i.State.UpToDate = false + + return nil +} + +func (c *PurgeCommand) OperationType() string { + return "purge (delete source)" +} + +func (c *PurgeCommand) ItemType() string { + return c.Item.Type +} + +func (c *PurgeCommand) Detail() string { + return colorizeItemName(c.Item.Name) +} diff --git a/pkg/hubtest/hubtest.go b/pkg/hubtest/hubtest.go index 93f5abaa879..6e5a11fff10 100644 --- a/pkg/hubtest/hubtest.go +++ b/pkg/hubtest/hubtest.go @@ -14,8 +14,8 @@ type HubTest struct { CrowdSecPath string CscliPath string HubPath string - HubTestPath string //generic parser/scenario tests .tests - HubAppsecTestPath string //dir specific to appsec tests .appsec-tests + HubTestPath string // generic parser/scenario tests .tests + HubAppsecTestPath string // dir specific to appsec tests .appsec-tests HubIndexFile string TemplateConfigPath string TemplateProfilePath string @@ -25,8 +25,8 @@ type HubTest struct { NucleiTargetHost string AppSecHost string - HubIndex *cwhub.Hub - Tests []*HubTestItem + HubIndex *cwhub.Hub + Tests []*HubTestItem } const ( @@ -93,7 +93,7 @@ func NewHubTest(hubPath string, crowdsecPath string, cscliPath string, isAppsecT InstallDataDir: HubTestPath, } - hub, err := cwhub.NewHub(local, nil, nil) + hub, err := cwhub.NewHub(local, nil) if err != nil { return HubTest{}, err } @@ -130,7 +130,7 @@ func NewHubTest(hubPath string, crowdsecPath string, cscliPath string, isAppsecT InstallDataDir: HubTestPath, } - hub, err := cwhub.NewHub(local, nil, nil) + hub, err := cwhub.NewHub(local, nil) if err != nil { return HubTest{}, err } diff --git a/pkg/hubtest/hubtest_item.go b/pkg/hubtest/hubtest_item.go index bc9c8955d0d..75895dc729b 100644 --- a/pkg/hubtest/hubtest_item.go +++ b/pkg/hubtest/hubtest_item.go @@ -15,6 +15,7 @@ import ( "github.com/crowdsecurity/crowdsec/pkg/csconfig" "github.com/crowdsecurity/crowdsec/pkg/cwhub" + "github.com/crowdsecurity/crowdsec/pkg/hubops" "github.com/crowdsecurity/crowdsec/pkg/parser" ) @@ -211,7 +212,7 @@ func (t *HubTestItem) InstallHub() error { } // load installed hub - hub, err := cwhub.NewHub(t.RuntimeHubConfig, nil, nil) + hub, err := cwhub.NewHub(t.RuntimeHubConfig, nil) if err != nil { return err } @@ -224,7 +225,7 @@ func (t *HubTestItem) InstallHub() error { // install data for parsers if needed for _, item := range hub.GetInstalledByType(cwhub.PARSERS, true) { - if err := item.DownloadDataIfNeeded(ctx, true); err != nil { + if _, err := hubops.DownloadDataIfNeeded(ctx, hub, item, true); err != nil { return fmt.Errorf("unable to download data for parser '%s': %+v", item.Name, err) } @@ -233,7 +234,7 @@ func (t *HubTestItem) InstallHub() error { // install data for scenarios if needed for _, item := range hub.GetInstalledByType(cwhub.SCENARIOS, true) { - if err := item.DownloadDataIfNeeded(ctx, true); err != nil { + if _, err := hubops.DownloadDataIfNeeded(ctx, hub, item, true); err != nil { return fmt.Errorf("unable to download data for parser '%s': %+v", item.Name, err) } @@ -242,7 +243,7 @@ func (t *HubTestItem) InstallHub() error { // install data for postoverflows if needed for _, item := range hub.GetInstalledByType(cwhub.POSTOVERFLOWS, true) { - if err := item.DownloadDataIfNeeded(ctx, true); err != nil { + if _, err := hubops.DownloadDataIfNeeded(ctx, hub, item, true); err != nil { return fmt.Errorf("unable to download data for parser '%s': %+v", item.Name, err) } @@ -299,7 +300,7 @@ func (t *HubTestItem) RunWithNucleiTemplate() error { crowdsecDaemon.Start() // wait for the appsec port to be available - if _, err := IsAlive(t.AppSecHost); err != nil { + if _, err = IsAlive(t.AppSecHost); err != nil { crowdsecLog, err2 := os.ReadFile(crowdsecLogFile) if err2 != nil { log.Errorf("unable to read crowdsec log file '%s': %s", crowdsecLogFile, err) @@ -318,7 +319,7 @@ func (t *HubTestItem) RunWithNucleiTemplate() error { } nucleiTargetHost := nucleiTargetParsedURL.Host - if _, err := IsAlive(nucleiTargetHost); err != nil { + if _, err = IsAlive(nucleiTargetHost); err != nil { return fmt.Errorf("target is down: %w", err) } @@ -381,7 +382,7 @@ func createDirs(dirs []string) error { return nil } -func (t *HubTestItem) RunWithLogFile() error { +func (t *HubTestItem) RunWithLogFile(patternDir string) error { testPath := filepath.Join(t.HubTestPath, t.Name) if _, err := os.Stat(testPath); os.IsNotExist(err) { return fmt.Errorf("test '%s' doesn't exist in '%s', exiting", t.Name, t.HubTestPath) @@ -416,11 +417,9 @@ func (t *HubTestItem) RunWithLogFile() error { return fmt.Errorf("unable to copy '%s' to '%s': %v", t.TemplateSimulationPath, t.RuntimeSimulationFilePath, err) } - crowdsecPatternsFolder := csconfig.DefaultConfigPath("patterns") - // copy template patterns folder to runtime folder - if err = CopyDir(crowdsecPatternsFolder, t.RuntimePatternsPath); err != nil { - return fmt.Errorf("unable to copy 'patterns' from '%s' to '%s': %w", crowdsecPatternsFolder, t.RuntimePatternsPath, err) + if err = CopyDir(patternDir, t.RuntimePatternsPath); err != nil { + return fmt.Errorf("unable to copy 'patterns' from '%s' to '%s': %w", patternDir, t.RuntimePatternsPath, err) } // install the hub in the runtime folder @@ -565,7 +564,7 @@ func (t *HubTestItem) RunWithLogFile() error { return nil } -func (t *HubTestItem) Run() error { +func (t *HubTestItem) Run(patternDir string) error { var err error t.Success = false @@ -595,11 +594,9 @@ func (t *HubTestItem) Run() error { return fmt.Errorf("unable to copy '%s' to '%s': %v", t.TemplateSimulationPath, t.RuntimeSimulationFilePath, err) } - crowdsecPatternsFolder := csconfig.DefaultConfigPath("patterns") - // copy template patterns folder to runtime folder - if err = CopyDir(crowdsecPatternsFolder, t.RuntimePatternsPath); err != nil { - return fmt.Errorf("unable to copy 'patterns' from '%s' to '%s': %w", crowdsecPatternsFolder, t.RuntimePatternsPath, err) + if err = CopyDir(patternDir, t.RuntimePatternsPath); err != nil { + return fmt.Errorf("unable to copy 'patterns' from '%s' to '%s': %w", patternDir, t.RuntimePatternsPath, err) } // create the appsec-configs dir @@ -633,9 +630,12 @@ func (t *HubTestItem) Run() error { } if t.Config.LogFile != "" { - return t.RunWithLogFile() - } else if t.Config.NucleiTemplate != "" { + return t.RunWithLogFile(patternDir) + } + + if t.Config.NucleiTemplate != "" { return t.RunWithNucleiTemplate() } + return fmt.Errorf("log file or nuclei template must be set in '%s'", t.Name) } diff --git a/pkg/hubtest/parser_assert.go b/pkg/hubtest/parser_assert.go index be4fdbdb5e6..90d952506d1 100644 --- a/pkg/hubtest/parser_assert.go +++ b/pkg/hubtest/parser_assert.go @@ -270,7 +270,7 @@ func (p *ParserAssert) AutoGenParserAssert() string { continue } - base := fmt.Sprintf(`results["%s"]["%s"][%d].Evt.Unmarshaled["%s"]`, stage, parser, pidx, ukey) + base := fmt.Sprintf("results[%q][%q][%d].Evt.Unmarshaled[%q]", stage, parser, pidx, ukey) for _, line := range p.buildUnmarshaledAssert(base, uval) { ret += line @@ -295,11 +295,11 @@ func (p *ParserAssert) buildUnmarshaledAssert(ekey string, eval interface{}) []s switch val := eval.(type) { case map[string]interface{}: for k, v := range val { - ret = append(ret, p.buildUnmarshaledAssert(fmt.Sprintf(`%s["%s"]`, ekey, k), v)...) + ret = append(ret, p.buildUnmarshaledAssert(fmt.Sprintf("%s[%q]", ekey, k), v)...) } case map[interface{}]interface{}: for k, v := range val { - ret = append(ret, p.buildUnmarshaledAssert(fmt.Sprintf(`%s["%s"]`, ekey, k), v)...) + ret = append(ret, p.buildUnmarshaledAssert(fmt.Sprintf("%s[%q]", ekey, k), v)...) } case []interface{}: case string: diff --git a/pkg/leakybucket/bayesian.go b/pkg/leakybucket/bayesian.go index 357d51f597b..30e1b396ef8 100644 --- a/pkg/leakybucket/bayesian.go +++ b/pkg/leakybucket/bayesian.go @@ -31,9 +31,9 @@ type BayesianBucket struct { DumbProcessor } -func updateProbability(prior, probGivenEvil, ProbGivenBenign float32) float32 { +func updateProbability(prior, probGivenEvil, probGivenBenign float32) float32 { numerator := probGivenEvil * prior - denominator := numerator + ProbGivenBenign*(1-prior) + denominator := numerator + probGivenBenign*(1-prior) return numerator / denominator } diff --git a/pkg/leakybucket/blackhole.go b/pkg/leakybucket/blackhole.go index b12f169acd9..95ea18f723b 100644 --- a/pkg/leakybucket/blackhole.go +++ b/pkg/leakybucket/blackhole.go @@ -21,7 +21,6 @@ type Blackhole struct { func NewBlackhole(bucketFactory *BucketFactory) (*Blackhole, error) { duration, err := time.ParseDuration(bucketFactory.Blackhole) if err != nil { - bucketFactory.logger.Warning("Blackhole duration not valid, using 1h") return nil, fmt.Errorf("blackhole duration not valid '%s'", bucketFactory.Blackhole) } return &Blackhole{ @@ -49,7 +48,6 @@ func (bl *Blackhole) OnBucketOverflow(bucketFactory *BucketFactory) func(*Leaky, tmp = append(tmp, element) } else { leaky.logger.Debugf("%s left blackhole %s ago", element.key, leaky.Ovflw_ts.Sub(element.expiration)) - } } bl.hiddenKeys = tmp @@ -64,5 +62,4 @@ func (bl *Blackhole) OnBucketOverflow(bucketFactory *BucketFactory) func(*Leaky, leaky.logger.Debugf("Adding overflow to blackhole (%s)", leaky.First_ts) return alert, queue } - } diff --git a/pkg/leakybucket/bucket.go b/pkg/leakybucket/bucket.go index e981551af8f..e7ea6e3e240 100644 --- a/pkg/leakybucket/bucket.go +++ b/pkg/leakybucket/bucket.go @@ -204,7 +204,6 @@ func FromFactory(bucketFactory BucketFactory) *Leaky { /* for now mimic a leak routine */ //LeakRoutine us the life of a bucket. It dies when the bucket underflows or overflows func LeakRoutine(leaky *Leaky) error { - var ( durationTickerChan = make(<-chan time.Time) durationTicker *time.Ticker @@ -317,7 +316,7 @@ func LeakRoutine(leaky *Leaky) error { alert, err = NewAlert(leaky, ofw) if err != nil { - log.Errorf("%s", err) + log.Error(err) } for _, f := range leaky.BucketConfig.processors { alert, ofw = f.OnBucketOverflow(leaky.BucketConfig)(leaky, alert, ofw) diff --git a/pkg/leakybucket/buckets.go b/pkg/leakybucket/buckets.go index cfe8d7c302e..72948da1ad7 100644 --- a/pkg/leakybucket/buckets.go +++ b/pkg/leakybucket/buckets.go @@ -25,5 +25,4 @@ func NewBuckets() *Buckets { func GetKey(bucketCfg BucketFactory, stackkey string) string { return fmt.Sprintf("%x", sha1.Sum([]byte(bucketCfg.Filter+stackkey+bucketCfg.Name))) - } diff --git a/pkg/leakybucket/buckets_test.go b/pkg/leakybucket/buckets_test.go index 1da906cb555..90a751160cb 100644 --- a/pkg/leakybucket/buckets_test.go +++ b/pkg/leakybucket/buckets_test.go @@ -46,7 +46,7 @@ func TestBucket(t *testing.T) { InstallDataDir: testdata, } - hub, err := cwhub.NewHub(hubCfg, nil, nil) + hub, err := cwhub.NewHub(hubCfg, nil) require.NoError(t, err) err = hub.Load() @@ -139,14 +139,25 @@ func testOneBucket(t *testing.T, hub *cwhub.Hub, dir string, tomb *tomb.Tomb) er t.Fatalf("failed to parse %s : %s", stagecfg, err) } - files := []string{} + scenarios := []*cwhub.Item{} + for _, x := range stages { - files = append(files, x.Filename) + // XXX: LoadBuckets should take an interface, BucketProvider ScenarioProvider or w/e + item := &cwhub.Item{ + Name: x.Filename, + State: cwhub.ItemState{ + LocalVersion: "", + LocalPath: x.Filename, + LocalHash: "", + }, + } + + scenarios = append(scenarios, item) } cscfg := &csconfig.CrowdsecServiceCfg{} - holders, response, err := LoadBuckets(cscfg, hub, files, tomb, buckets, false) + holders, response, err := LoadBuckets(cscfg, hub, scenarios, tomb, buckets, false) if err != nil { t.Fatalf("failed loading bucket : %s", err) } @@ -184,7 +195,7 @@ func testFile(t *testing.T, file string, bs string, holders []BucketFactory, res } dec := json.NewDecoder(yamlFile) dec.DisallowUnknownFields() - //dec.SetStrict(true) + // dec.SetStrict(true) tf := TestFile{} err = dec.Decode(&tf) if err != nil { @@ -196,7 +207,7 @@ func testFile(t *testing.T, file string, bs string, holders []BucketFactory, res } var latest_ts time.Time for _, in := range tf.Lines { - //just to avoid any race during ingestion of funny scenarios + // just to avoid any race during ingestion of funny scenarios time.Sleep(50 * time.Millisecond) var ts time.Time @@ -226,7 +237,7 @@ func testFile(t *testing.T, file string, bs string, holders []BucketFactory, res time.Sleep(1 * time.Second) - //Read results from chan + // Read results from chan POLL_AGAIN: fails := 0 for fails < 2 { @@ -287,37 +298,37 @@ POLL_AGAIN: log.Tracef("Checking next expected result.") - //empty overflow + // empty overflow if out.Overflow.Alert == nil && expected.Overflow.Alert == nil { - //match stuff + // match stuff } else { if out.Overflow.Alert == nil || expected.Overflow.Alert == nil { log.Printf("Here ?") continue } - //Scenario + // Scenario if *out.Overflow.Alert.Scenario != *expected.Overflow.Alert.Scenario { log.Errorf("(scenario) %v != %v", *out.Overflow.Alert.Scenario, *expected.Overflow.Alert.Scenario) continue } log.Infof("(scenario) %v == %v", *out.Overflow.Alert.Scenario, *expected.Overflow.Alert.Scenario) - //EventsCount + // EventsCount if *out.Overflow.Alert.EventsCount != *expected.Overflow.Alert.EventsCount { log.Errorf("(EventsCount) %d != %d", *out.Overflow.Alert.EventsCount, *expected.Overflow.Alert.EventsCount) continue } log.Infof("(EventsCount) %d == %d", *out.Overflow.Alert.EventsCount, *expected.Overflow.Alert.EventsCount) - //Sources + // Sources if !reflect.DeepEqual(out.Overflow.Sources, expected.Overflow.Sources) { log.Errorf("(Sources %s != %s)", spew.Sdump(out.Overflow.Sources), spew.Sdump(expected.Overflow.Sources)) continue } log.Infof("(Sources: %s == %s)", spew.Sdump(out.Overflow.Sources), spew.Sdump(expected.Overflow.Sources)) } - //Events + // Events // if !reflect.DeepEqual(out.Overflow.Alert.Events, expected.Overflow.Alert.Events) { // log.Errorf("(Events %s != %s)", spew.Sdump(out.Overflow.Alert.Events), spew.Sdump(expected.Overflow.Alert.Events)) // valid = false @@ -326,10 +337,10 @@ POLL_AGAIN: // log.Infof("(Events: %s == %s)", spew.Sdump(out.Overflow.Alert.Events), spew.Sdump(expected.Overflow.Alert.Events)) // } - //CheckFailed: + // CheckFailed: log.Warningf("The test is valid, remove entry %d from expects, and %d from t.Results", eidx, ridx) - //don't do this at home : delete current element from list and redo + // don't do this at home : delete current element from list and redo results[eidx] = results[len(results)-1] results = results[:len(results)-1] tf.Results[ridx] = tf.Results[len(tf.Results)-1] diff --git a/pkg/leakybucket/conditional.go b/pkg/leakybucket/conditional.go index a203a639743..b3a84b07c21 100644 --- a/pkg/leakybucket/conditional.go +++ b/pkg/leakybucket/conditional.go @@ -11,8 +11,10 @@ import ( "github.com/crowdsecurity/crowdsec/pkg/types" ) -var conditionalExprCache map[string]vm.Program -var conditionalExprCacheLock sync.Mutex +var ( + conditionalExprCache map[string]vm.Program + conditionalExprCacheLock sync.Mutex +) type ConditionalOverflow struct { ConditionalFilter string diff --git a/pkg/leakybucket/manager_load.go b/pkg/leakybucket/manager_load.go index b8310b8cb17..13ce1df75ae 100644 --- a/pkg/leakybucket/manager_load.go +++ b/pkg/leakybucket/manager_load.go @@ -7,7 +7,6 @@ import ( "io" "os" "path/filepath" - "strings" "sync" "time" @@ -201,44 +200,41 @@ func ValidateFactory(bucketFactory *BucketFactory) error { return fmt.Errorf("unknown bucket type '%s'", bucketFactory.Type) } - switch bucketFactory.ScopeType.Scope { - case types.Undefined: + return compileScopeFilter(bucketFactory) +} + +func compileScopeFilter(bucketFactory *BucketFactory) error { + if bucketFactory.ScopeType.Scope == types.Undefined { bucketFactory.ScopeType.Scope = types.Ip - case types.Ip: - case types.Range: - var ( - runTimeFilter *vm.Program - err error - ) + } + if bucketFactory.ScopeType.Scope == types.Ip { if bucketFactory.ScopeType.Filter != "" { - if runTimeFilter, err = expr.Compile(bucketFactory.ScopeType.Filter, exprhelpers.GetExprOptions(map[string]interface{}{"evt": &types.Event{}})...); err != nil { - return fmt.Errorf("error compiling the scope filter: %w", err) - } - - bucketFactory.ScopeType.RunTimeFilter = runTimeFilter + return errors.New("filter is not allowed for IP scope") } - default: - // Compile the scope filter - var ( - runTimeFilter *vm.Program - err error - ) + return nil + } - if bucketFactory.ScopeType.Filter != "" { - if runTimeFilter, err = expr.Compile(bucketFactory.ScopeType.Filter, exprhelpers.GetExprOptions(map[string]interface{}{"evt": &types.Event{}})...); err != nil { - return fmt.Errorf("error compiling the scope filter: %w", err) - } + if bucketFactory.ScopeType.Scope == types.Range && bucketFactory.ScopeType.Filter == "" { + return nil + } - bucketFactory.ScopeType.RunTimeFilter = runTimeFilter - } + if bucketFactory.ScopeType.Filter == "" { + return errors.New("filter is mandatory for non-IP, non-Range scope") + } + + runTimeFilter, err := expr.Compile(bucketFactory.ScopeType.Filter, exprhelpers.GetExprOptions(map[string]interface{}{"evt": &types.Event{}})...) + if err != nil { + return fmt.Errorf("error compiling the scope filter: %w", err) } + bucketFactory.ScopeType.RunTimeFilter = runTimeFilter + return nil } -func LoadBuckets(cscfg *csconfig.CrowdsecServiceCfg, hub *cwhub.Hub, files []string, tomb *tomb.Tomb, buckets *Buckets, orderEvent bool) ([]BucketFactory, chan types.Event, error) { +func LoadBuckets(cscfg *csconfig.CrowdsecServiceCfg, hub *cwhub.Hub, scenarios []*cwhub.Item, tomb *tomb.Tomb, buckets *Buckets, orderEvent bool) ([]BucketFactory, chan types.Event, error) { var ( ret = []BucketFactory{} response chan types.Event @@ -246,18 +242,15 @@ func LoadBuckets(cscfg *csconfig.CrowdsecServiceCfg, hub *cwhub.Hub, files []str response = make(chan types.Event, 1) - for _, f := range files { - log.Debugf("Loading '%s'", f) + for _, item := range scenarios { + log.Debugf("Loading '%s'", item.State.LocalPath) - if !strings.HasSuffix(f, ".yaml") && !strings.HasSuffix(f, ".yml") { - log.Debugf("Skipping %s : not a yaml file", f) - continue - } + itemPath := item.State.LocalPath // process the yaml - bucketConfigurationFile, err := os.Open(f) + bucketConfigurationFile, err := os.Open(itemPath) if err != nil { - log.Errorf("Can't access leaky configuration file %s", f) + log.Errorf("Can't access leaky configuration file %s", itemPath) return nil, nil, err } @@ -271,8 +264,8 @@ func LoadBuckets(cscfg *csconfig.CrowdsecServiceCfg, hub *cwhub.Hub, files []str err = dec.Decode(&bucketFactory) if err != nil { if !errors.Is(err, io.EOF) { - log.Errorf("Bad yaml in %s: %v", f, err) - return nil, nil, fmt.Errorf("bad yaml in %s: %w", f, err) + log.Errorf("Bad yaml in %s: %v", itemPath, err) + return nil, nil, fmt.Errorf("bad yaml in %s: %w", itemPath, err) } log.Tracef("End of yaml file") @@ -288,7 +281,7 @@ func LoadBuckets(cscfg *csconfig.CrowdsecServiceCfg, hub *cwhub.Hub, files []str } // check compat if bucketFactory.FormatVersion == "" { - log.Tracef("no version in %s : %s, assuming '1.0'", bucketFactory.Name, f) + log.Tracef("no version in %s : %s, assuming '1.0'", bucketFactory.Name, itemPath) bucketFactory.FormatVersion = "1.0" } @@ -302,22 +295,17 @@ func LoadBuckets(cscfg *csconfig.CrowdsecServiceCfg, hub *cwhub.Hub, files []str continue } - bucketFactory.Filename = filepath.Clean(f) + bucketFactory.Filename = filepath.Clean(itemPath) bucketFactory.BucketName = seed.Generate() bucketFactory.ret = response - hubItem := hub.GetItemByPath(bucketFactory.Filename) - if hubItem == nil { - log.Errorf("scenario %s (%s) could not be found in hub (ignore if in unit tests)", bucketFactory.Name, bucketFactory.Filename) - } else { - if cscfg.SimulationConfig != nil { - bucketFactory.Simulated = cscfg.SimulationConfig.IsSimulated(hubItem.Name) - } - - bucketFactory.ScenarioVersion = hubItem.State.LocalVersion - bucketFactory.hash = hubItem.State.LocalHash + if cscfg.SimulationConfig != nil { + bucketFactory.Simulated = cscfg.SimulationConfig.IsSimulated(bucketFactory.Name) } + bucketFactory.ScenarioVersion = item.State.LocalVersion + bucketFactory.hash = item.State.LocalHash + bucketFactory.wgDumpState = buckets.wgDumpState bucketFactory.wgPour = buckets.wgPour @@ -348,7 +336,7 @@ func LoadBucket(bucketFactory *BucketFactory, tomb *tomb.Tomb) error { if bucketFactory.Debug { clog := log.New() - if err := types.ConfigureLogger(clog); err != nil { + if err = types.ConfigureLogger(clog); err != nil { return fmt.Errorf("while creating bucket-specific logger: %w", err) } @@ -417,11 +405,22 @@ func LoadBucket(bucketFactory *BucketFactory, tomb *tomb.Tomb) error { if bucketFactory.Distinct != "" { bucketFactory.logger.Tracef("Adding a non duplicate filter") bucketFactory.processors = append(bucketFactory.processors, &Uniq{}) + bucketFactory.logger.Infof("Compiling distinct '%s'", bucketFactory.Distinct) + //we're compiling and discarding the expression to be able to detect it during loading + _, err = expr.Compile(bucketFactory.Distinct, exprhelpers.GetExprOptions(map[string]interface{}{"evt": &types.Event{}})...) + if err != nil { + return fmt.Errorf("invalid distinct '%s' in %s: %w", bucketFactory.Distinct, bucketFactory.Filename, err) + } } if bucketFactory.CancelOnFilter != "" { bucketFactory.logger.Tracef("Adding a cancel_on filter") bucketFactory.processors = append(bucketFactory.processors, &CancelOnFilter{}) + //we're compiling and discarding the expression to be able to detect it during loading + _, err = expr.Compile(bucketFactory.CancelOnFilter, exprhelpers.GetExprOptions(map[string]interface{}{"evt": &types.Event{}})...) + if err != nil { + return fmt.Errorf("invalid cancel_on '%s' in %s: %w", bucketFactory.CancelOnFilter, bucketFactory.Filename, err) + } } if bucketFactory.OverflowFilter != "" { @@ -451,6 +450,11 @@ func LoadBucket(bucketFactory *BucketFactory, tomb *tomb.Tomb) error { if bucketFactory.ConditionalOverflow != "" { bucketFactory.logger.Tracef("Adding conditional overflow") bucketFactory.processors = append(bucketFactory.processors, &ConditionalOverflow{}) + //we're compiling and discarding the expression to be able to detect it during loading + _, err = expr.Compile(bucketFactory.ConditionalOverflow, exprhelpers.GetExprOptions(map[string]interface{}{"queue": &types.Queue{}, "leaky": &Leaky{}, "evt": &types.Event{}})...) + if err != nil { + return fmt.Errorf("invalid condition '%s' in %s: %w", bucketFactory.ConditionalOverflow, bucketFactory.Filename, err) + } } if bucketFactory.BayesianThreshold != 0 { @@ -470,7 +474,9 @@ func LoadBucket(bucketFactory *BucketFactory, tomb *tomb.Tomb) error { } if data.Type == "regexp" { // cache only makes sense for regexp - exprhelpers.RegexpCacheInit(data.DestPath, *data) + if err := exprhelpers.RegexpCacheInit(data.DestPath, *data); err != nil { + bucketFactory.logger.Error(err.Error()) + } } } @@ -496,7 +502,7 @@ func LoadBucketsState(file string, buckets *Buckets, bucketFactories []BucketFac return fmt.Errorf("can't parse state file %s: %w", file, err) } - for k, v := range state { + for k := range state { var tbucket *Leaky log.Debugf("Reloading bucket %s", k) @@ -509,30 +515,30 @@ func LoadBucketsState(file string, buckets *Buckets, bucketFactories []BucketFac found := false for _, h := range bucketFactories { - if h.Name != v.Name { + if h.Name != state[k].Name { continue } log.Debugf("found factory %s/%s -> %s", h.Author, h.Name, h.Description) // check in which mode the bucket was - if v.Mode == types.TIMEMACHINE { + if state[k].Mode == types.TIMEMACHINE { tbucket = NewTimeMachine(h) - } else if v.Mode == types.LIVE { + } else if state[k].Mode == types.LIVE { tbucket = NewLeaky(h) } else { - log.Errorf("Unknown bucket type : %d", v.Mode) + log.Errorf("Unknown bucket type : %d", state[k].Mode) } /*Trying to restore queue state*/ - tbucket.Queue = v.Queue + tbucket.Queue = state[k].Queue /*Trying to set the limiter to the saved values*/ - tbucket.Limiter.Load(v.SerializedState) + tbucket.Limiter.Load(state[k].SerializedState) tbucket.In = make(chan *types.Event) tbucket.Mapkey = k tbucket.Signal = make(chan bool, 1) - tbucket.First_ts = v.First_ts - tbucket.Last_ts = v.Last_ts - tbucket.Ovflw_ts = v.Ovflw_ts - tbucket.Total_count = v.Total_count + tbucket.First_ts = state[k].First_ts + tbucket.Last_ts = state[k].Last_ts + tbucket.Ovflw_ts = state[k].Ovflw_ts + tbucket.Total_count = state[k].Total_count buckets.Bucket_map.Store(k, tbucket) h.tomb.Go(func() error { return LeakRoutine(tbucket) @@ -545,7 +551,7 @@ func LoadBucketsState(file string, buckets *Buckets, bucketFactories []BucketFac } if !found { - return fmt.Errorf("unable to find holder for bucket %s: %s", k, spew.Sdump(v)) + return fmt.Errorf("unable to find holder for bucket %s: %s", k, spew.Sdump(state[k])) } } diff --git a/pkg/leakybucket/manager_load_test.go b/pkg/leakybucket/manager_load_test.go index 513f11ff373..6b40deb8c9e 100644 --- a/pkg/leakybucket/manager_load_test.go +++ b/pkg/leakybucket/manager_load_test.go @@ -51,93 +51,100 @@ func TestBadBucketsConfig(t *testing.T) { } func TestLeakyBucketsConfig(t *testing.T) { - var CfgTests = []cfgTest{ - //leaky with bad capacity + CfgTests := []cfgTest{ + // leaky with bad capacity {BucketFactory{Name: "test", Description: "test1", Type: "leaky", Capacity: 0}, false, false}, - //leaky with empty leakspeed + // leaky with empty leakspeed {BucketFactory{Name: "test", Description: "test1", Type: "leaky", Capacity: 1}, false, false}, - //leaky with missing filter + // leaky with missing filter {BucketFactory{Name: "test", Description: "test1", Type: "leaky", Capacity: 1, LeakSpeed: "1s"}, false, true}, - //leaky with invalid leakspeed + // leaky with invalid leakspeed {BucketFactory{Name: "test", Description: "test1", Type: "leaky", Capacity: 1, LeakSpeed: "abs", Filter: "true"}, false, false}, - //leaky with valid filter + // leaky with valid filter {BucketFactory{Name: "test", Description: "test1", Type: "leaky", Capacity: 1, LeakSpeed: "1s", Filter: "true"}, true, true}, - //leaky with invalid filter + // leaky with invalid filter {BucketFactory{Name: "test", Description: "test1", Type: "leaky", Capacity: 1, LeakSpeed: "1s", Filter: "xu"}, false, true}, - //leaky with valid filter + // leaky with invalid uniq + {BucketFactory{Name: "test", Description: "test1", Type: "leaky", Capacity: 1, LeakSpeed: "1s", Filter: "true", Distinct: "foo"}, false, true}, + // leaky with valid uniq + {BucketFactory{Name: "test", Description: "test1", Type: "leaky", Capacity: 1, LeakSpeed: "1s", Filter: "true", Distinct: "evt.Parsed.foobar"}, true, true}, + // leaky with valid filter {BucketFactory{Name: "test", Description: "test1", Type: "leaky", Capacity: 1, LeakSpeed: "1s", Filter: "true"}, true, true}, - //leaky with bad overflow filter + // leaky with bad overflow filter {BucketFactory{Name: "test", Description: "test1", Type: "leaky", Capacity: 1, LeakSpeed: "1s", Filter: "true", OverflowFilter: "xu"}, false, true}, + // leaky with valid overflow filter + {BucketFactory{Name: "test", Description: "test1", Type: "leaky", Capacity: 1, LeakSpeed: "1s", Filter: "true", OverflowFilter: "true"}, true, true}, + // leaky with invalid cancel_on filter + {BucketFactory{Name: "test", Description: "test1", Type: "leaky", Capacity: 1, LeakSpeed: "1s", Filter: "true", CancelOnFilter: "xu"}, false, true}, + // leaky with valid cancel_on filter + {BucketFactory{Name: "test", Description: "test1", Type: "leaky", Capacity: 1, LeakSpeed: "1s", Filter: "true", CancelOnFilter: "true"}, true, true}, + // leaky with invalid conditional overflow filter + {BucketFactory{Name: "test", Description: "test1", Type: "leaky", Capacity: 1, LeakSpeed: "1s", Filter: "true", ConditionalOverflow: "xu"}, false, true}, + // leaky with valid conditional overflow filter + {BucketFactory{Name: "test", Description: "test1", Type: "leaky", Capacity: 1, LeakSpeed: "1s", Filter: "true", ConditionalOverflow: "true"}, true, true}, } if err := runTest(CfgTests); err != nil { t.Fatalf("%s", err) } - } func TestBlackholeConfig(t *testing.T) { - var CfgTests = []cfgTest{ - //basic bh + CfgTests := []cfgTest{ + // basic bh {BucketFactory{Name: "test", Description: "test1", Type: "trigger", Filter: "true", Blackhole: "15s"}, true, true}, - //bad bh + // bad bh {BucketFactory{Name: "test", Description: "test1", Type: "trigger", Filter: "true", Blackhole: "abc"}, false, true}, } if err := runTest(CfgTests); err != nil { t.Fatalf("%s", err) } - } func TestTriggerBucketsConfig(t *testing.T) { - var CfgTests = []cfgTest{ - //basic valid counter + CfgTests := []cfgTest{ + // basic valid counter {BucketFactory{Name: "test", Description: "test1", Type: "trigger", Filter: "true"}, true, true}, } if err := runTest(CfgTests); err != nil { t.Fatalf("%s", err) } - } func TestCounterBucketsConfig(t *testing.T) { - var CfgTests = []cfgTest{ - - //basic valid counter + CfgTests := []cfgTest{ + // basic valid counter {BucketFactory{Name: "test", Description: "test1", Type: "counter", Capacity: -1, Duration: "5s", Filter: "true"}, true, true}, - //missing duration + // missing duration {BucketFactory{Name: "test", Description: "test1", Type: "counter", Capacity: -1, Filter: "true"}, false, false}, - //bad duration + // bad duration {BucketFactory{Name: "test", Description: "test1", Type: "counter", Capacity: -1, Duration: "abc", Filter: "true"}, false, false}, - //capacity must be -1 + // capacity must be -1 {BucketFactory{Name: "test", Description: "test1", Type: "counter", Capacity: 0, Duration: "5s", Filter: "true"}, false, false}, } if err := runTest(CfgTests); err != nil { t.Fatalf("%s", err) } - } func TestBayesianBucketsConfig(t *testing.T) { - var CfgTests = []cfgTest{ - - //basic valid counter + CfgTests := []cfgTest{ + // basic valid counter {BucketFactory{Name: "test", Description: "test1", Type: "bayesian", Capacity: -1, Filter: "true", BayesianPrior: 0.5, BayesianThreshold: 0.5, BayesianConditions: []RawBayesianCondition{{ConditionalFilterName: "true", ProbGivenEvil: 0.5, ProbGivenBenign: 0.5}}}, true, true}, - //bad capacity + // bad capacity {BucketFactory{Name: "test", Description: "test1", Type: "bayesian", Capacity: 1, Filter: "true", BayesianPrior: 0.5, BayesianThreshold: 0.5, BayesianConditions: []RawBayesianCondition{{ConditionalFilterName: "true", ProbGivenEvil: 0.5, ProbGivenBenign: 0.5}}}, false, false}, - //missing prior + // missing prior {BucketFactory{Name: "test", Description: "test1", Type: "bayesian", Capacity: -1, Filter: "true", BayesianThreshold: 0.5, BayesianConditions: []RawBayesianCondition{{ConditionalFilterName: "true", ProbGivenEvil: 0.5, ProbGivenBenign: 0.5}}}, false, false}, - //missing threshold + // missing threshold {BucketFactory{Name: "test", Description: "test1", Type: "bayesian", Capacity: -1, Filter: "true", BayesianPrior: 0.5, BayesianConditions: []RawBayesianCondition{{ConditionalFilterName: "true", ProbGivenEvil: 0.5, ProbGivenBenign: 0.5}}}, false, false}, - //bad prior + // bad prior {BucketFactory{Name: "test", Description: "test1", Type: "bayesian", Capacity: -1, Filter: "true", BayesianPrior: 1.5, BayesianThreshold: 0.5, BayesianConditions: []RawBayesianCondition{{ConditionalFilterName: "true", ProbGivenEvil: 0.5, ProbGivenBenign: 0.5}}}, false, false}, - //bad threshold + // bad threshold {BucketFactory{Name: "test", Description: "test1", Type: "bayesian", Capacity: -1, Filter: "true", BayesianPrior: 0.5, BayesianThreshold: 1.5, BayesianConditions: []RawBayesianCondition{{ConditionalFilterName: "true", ProbGivenEvil: 0.5, ProbGivenBenign: 0.5}}}, false, false}, } if err := runTest(CfgTests); err != nil { t.Fatalf("%s", err) } - } diff --git a/pkg/leakybucket/manager_run.go b/pkg/leakybucket/manager_run.go index 2858d8b5635..e6712e6e47e 100644 --- a/pkg/leakybucket/manager_run.go +++ b/pkg/leakybucket/manager_run.go @@ -17,9 +17,11 @@ import ( "github.com/crowdsecurity/crowdsec/pkg/types" ) -var serialized map[string]Leaky -var BucketPourCache map[string][]types.Event -var BucketPourTrack bool +var ( + serialized map[string]Leaky + BucketPourCache map[string][]types.Event + BucketPourTrack bool +) /* The leaky routines lifecycle are based on "real" time. @@ -243,7 +245,6 @@ func PourItemToBucket(bucket *Leaky, holder BucketFactory, buckets *Buckets, par } func LoadOrStoreBucketFromHolder(partitionKey string, buckets *Buckets, holder BucketFactory, expectMode int) (*Leaky, error) { - biface, ok := buckets.Bucket_map.Load(partitionKey) /* the bucket doesn't exist, create it !*/ @@ -283,9 +284,7 @@ func LoadOrStoreBucketFromHolder(partitionKey string, buckets *Buckets, holder B var orderEvent map[string]*sync.WaitGroup func PourItemToHolders(parsed types.Event, holders []BucketFactory, buckets *Buckets) (bool, error) { - var ( - ok, condition, poured bool - ) + var ok, condition, poured bool if BucketPourTrack { if BucketPourCache == nil { diff --git a/pkg/leakybucket/overflow_filter.go b/pkg/leakybucket/overflow_filter.go index 01dd491ed41..b37e431fadf 100644 --- a/pkg/leakybucket/overflow_filter.go +++ b/pkg/leakybucket/overflow_filter.go @@ -36,10 +36,10 @@ func NewOverflowFilter(g *BucketFactory) (*OverflowFilter, error) { return &u, nil } -func (u *OverflowFilter) OnBucketOverflow(Bucket *BucketFactory) func(*Leaky, types.RuntimeAlert, *types.Queue) (types.RuntimeAlert, *types.Queue) { +func (u *OverflowFilter) OnBucketOverflow(bucket *BucketFactory) func(*Leaky, types.RuntimeAlert, *types.Queue) (types.RuntimeAlert, *types.Queue) { return func(l *Leaky, s types.RuntimeAlert, q *types.Queue) (types.RuntimeAlert, *types.Queue) { el, err := exprhelpers.Run(u.FilterRuntime, map[string]interface{}{ - "queue": q, "signal": s, "leaky": l}, l.logger, Bucket.Debug) + "queue": q, "signal": s, "leaky": l}, l.logger, bucket.Debug) if err != nil { l.logger.Errorf("Failed running overflow filter: %s", err) return s, q diff --git a/pkg/leakybucket/overflows.go b/pkg/leakybucket/overflows.go index 39b0e6a0ec4..9357caefaff 100644 --- a/pkg/leakybucket/overflows.go +++ b/pkg/leakybucket/overflows.go @@ -149,6 +149,7 @@ func eventSources(evt types.Event, leaky *Leaky) (map[string]models.Source, erro leaky.logger.Tracef("Valid range from %s : %s", src.IP, src.Range) } } + if leaky.scopeType.Scope == types.Ip { src.Value = &src.IP } else if leaky.scopeType.Scope == types.Range { @@ -198,22 +199,24 @@ func eventSources(evt types.Event, leaky *Leaky) (map[string]models.Source, erro func EventsFromQueue(queue *types.Queue) []*models.Event { events := []*models.Event{} - for _, evt := range queue.Queue { - if evt.Meta == nil { + qEvents := queue.GetQueue() + + for idx := range qEvents { + if qEvents[idx].Meta == nil { continue } meta := models.Meta{} // we want consistence - skeys := make([]string, 0, len(evt.Meta)) - for k := range evt.Meta { + skeys := make([]string, 0, len(qEvents[idx].Meta)) + for k := range qEvents[idx].Meta { skeys = append(skeys, k) } sort.Strings(skeys) for _, k := range skeys { - v := evt.Meta[k] + v := qEvents[idx].Meta[k] subMeta := models.MetaItems0{Key: k, Value: v} meta = append(meta, &subMeta) } @@ -223,15 +226,15 @@ func EventsFromQueue(queue *types.Queue) []*models.Event { Meta: meta, } // either MarshaledTime is present and is extracted from log - if evt.MarshaledTime != "" { - tmpTimeStamp := evt.MarshaledTime + if qEvents[idx].MarshaledTime != "" { + tmpTimeStamp := qEvents[idx].MarshaledTime ovflwEvent.Timestamp = &tmpTimeStamp - } else if !evt.Time.IsZero() { // or .Time has been set during parse as time.Now().UTC() + } else if !qEvents[idx].Time.IsZero() { // or .Time has been set during parse as time.Now().UTC() ovflwEvent.Timestamp = new(string) - raw, err := evt.Time.MarshalText() + raw, err := qEvents[idx].Time.MarshalText() if err != nil { - log.Warningf("while serializing time '%s' : %s", evt.Time.String(), err) + log.Warningf("while serializing time '%s' : %s", qEvents[idx].Time.String(), err) } else { *ovflwEvent.Timestamp = string(raw) } @@ -253,8 +256,9 @@ func alertFormatSource(leaky *Leaky, queue *types.Queue) (map[string]models.Sour log.Debugf("Formatting (%s) - scope Info : scope_type:%s / scope_filter:%s", leaky.Name, leaky.scopeType.Scope, leaky.scopeType.Filter) - for _, evt := range queue.Queue { - srcs, err := SourceFromEvent(evt, leaky) + qEvents := queue.GetQueue() + for idx := range qEvents { + srcs, err := SourceFromEvent(qEvents[idx], leaky) if err != nil { return nil, "", fmt.Errorf("while extracting scope from bucket %s: %w", leaky.Name, err) } @@ -359,9 +363,7 @@ func NewAlert(leaky *Leaky, queue *types.Queue) (types.RuntimeAlert, error) { } if err := newApiAlert.Validate(strfmt.Default); err != nil { - log.Errorf("Generated alerts isn't valid") - log.Errorf("->%s", spew.Sdump(newApiAlert)) - log.Fatalf("error : %s", err) + return runtimeAlert, fmt.Errorf("invalid generated alert: %w: %s", err, spew.Sdump(newApiAlert)) } runtimeAlert.APIAlerts = append(runtimeAlert.APIAlerts, newApiAlert) diff --git a/pkg/leakybucket/processor.go b/pkg/leakybucket/processor.go index 81af3000c1c..dc5330a612e 100644 --- a/pkg/leakybucket/processor.go +++ b/pkg/leakybucket/processor.go @@ -10,8 +10,7 @@ type Processor interface { AfterBucketPour(Bucket *BucketFactory) func(types.Event, *Leaky) *types.Event } -type DumbProcessor struct { -} +type DumbProcessor struct{} func (d *DumbProcessor) OnBucketInit(bucketFactory *BucketFactory) error { return nil diff --git a/pkg/leakybucket/reset_filter.go b/pkg/leakybucket/reset_filter.go index 452ccc085b1..3b9b876aff4 100644 --- a/pkg/leakybucket/reset_filter.go +++ b/pkg/leakybucket/reset_filter.go @@ -23,10 +23,12 @@ type CancelOnFilter struct { Debug bool } -var cancelExprCacheLock sync.Mutex -var cancelExprCache map[string]struct { - CancelOnFilter *vm.Program -} +var ( + cancelExprCacheLock sync.Mutex + cancelExprCache map[string]struct { + CancelOnFilter *vm.Program + } +) func (u *CancelOnFilter) OnBucketPour(bucketFactory *BucketFactory) func(types.Event, *Leaky) *types.Event { return func(msg types.Event, leaky *Leaky) *types.Event { diff --git a/pkg/leakybucket/uniq.go b/pkg/leakybucket/uniq.go index 0cc0583390b..8a97f30b092 100644 --- a/pkg/leakybucket/uniq.go +++ b/pkg/leakybucket/uniq.go @@ -16,8 +16,10 @@ import ( // on overflow // on leak -var uniqExprCache map[string]vm.Program -var uniqExprCacheLock sync.Mutex +var ( + uniqExprCache map[string]vm.Program + uniqExprCacheLock sync.Mutex +) type Uniq struct { DistinctCompiled *vm.Program @@ -58,9 +60,6 @@ func (u *Uniq) AfterBucketPour(bucketFactory *BucketFactory) func(types.Event, * } func (u *Uniq) OnBucketInit(bucketFactory *BucketFactory) error { - var err error - var compiledExpr *vm.Program - if uniqExprCache == nil { uniqExprCache = make(map[string]vm.Program) } @@ -72,14 +71,17 @@ func (u *Uniq) OnBucketInit(bucketFactory *BucketFactory) error { } else { uniqExprCacheLock.Unlock() //release the lock during compile - compiledExpr, err = expr.Compile(bucketFactory.Distinct, exprhelpers.GetExprOptions(map[string]interface{}{"evt": &types.Event{}})...) + compiledExpr, err := expr.Compile(bucketFactory.Distinct, exprhelpers.GetExprOptions(map[string]interface{}{"evt": &types.Event{}})...) + if err != nil { + return err + } u.DistinctCompiled = compiledExpr uniqExprCacheLock.Lock() uniqExprCache[bucketFactory.Distinct] = *compiledExpr uniqExprCacheLock.Unlock() } u.KeyCache = make(map[string]bool) - return err + return nil } // getElement computes a string from an event and a filter diff --git a/pkg/longpollclient/client.go b/pkg/longpollclient/client.go index 5c395185b20..6a668e07d84 100644 --- a/pkg/longpollclient/client.go +++ b/pkg/longpollclient/client.go @@ -10,7 +10,7 @@ import ( "net/url" "time" - "github.com/gofrs/uuid" + "github.com/google/uuid" log "github.com/sirupsen/logrus" "gopkg.in/tomb.v2" ) diff --git a/pkg/metabase/container.go b/pkg/metabase/container.go index 8b3dd4084c0..9787e535e86 100644 --- a/pkg/metabase/container.go +++ b/pkg/metabase/container.go @@ -5,8 +5,8 @@ import ( "context" "fmt" - "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/container" + typesImage "github.com/docker/docker/api/types/image" "github.com/docker/docker/api/types/mount" "github.com/docker/docker/client" "github.com/docker/go-connections/nat" @@ -16,38 +16,40 @@ import ( ) type Container struct { - ListenAddr string - ListenPort string - SharedFolder string - Image string - Name string - ID string - CLI *client.Client - MBDBUri string - DockerGroupID string + ListenAddr string + ListenPort string + SharedFolder string + Image string + Name string + ID string + CLI *client.Client + MBDBUri string + DockerGroupID string + EnvironmentVariables []string } -func NewContainer(listenAddr string, listenPort string, sharedFolder string, containerName string, image string, mbDBURI string, dockerGroupID string) (*Container, error) { +func NewContainer(listenAddr string, listenPort string, sharedFolder string, containerName string, image string, mbDBURI string, dockerGroupID string, environmentVariables []string) (*Container, error) { cli, err := client.NewClientWithOpts(client.FromEnv, client.WithAPIVersionNegotiation()) if err != nil { return nil, fmt.Errorf("failed to create docker client : %s", err) } return &Container{ - ListenAddr: listenAddr, - ListenPort: listenPort, - SharedFolder: sharedFolder, - Image: image, - Name: containerName, - CLI: cli, - MBDBUri: mbDBURI, - DockerGroupID: dockerGroupID, + ListenAddr: listenAddr, + ListenPort: listenPort, + SharedFolder: sharedFolder, + Image: image, + Name: containerName, + CLI: cli, + MBDBUri: mbDBURI, + DockerGroupID: dockerGroupID, + EnvironmentVariables: environmentVariables, }, nil } func (c *Container) Create() error { ctx := context.Background() log.Printf("Pulling docker image %s", c.Image) - reader, err := c.CLI.ImagePull(ctx, c.Image, types.ImagePullOptions{}) + reader, err := c.CLI.ImagePull(ctx, c.Image, typesImage.PullOptions{}) if err != nil { return fmt.Errorf("failed to pull docker image : %s", err) } @@ -79,9 +81,9 @@ func (c *Container) Create() error { }, } - env := []string{ - fmt.Sprintf("MB_DB_FILE=%s/metabase.db", containerSharedFolder), - } + env := c.EnvironmentVariables + + env = append(env, fmt.Sprintf("MB_DB_FILE=%s/metabase.db", containerSharedFolder)) if c.MBDBUri != "" { env = append(env, c.MBDBUri) } @@ -105,7 +107,7 @@ func (c *Container) Create() error { func (c *Container) Start() error { ctx := context.Background() - if err := c.CLI.ContainerStart(ctx, c.Name, types.ContainerStartOptions{}); err != nil { + if err := c.CLI.ContainerStart(ctx, c.Name, container.StartOptions{}); err != nil { return fmt.Errorf("failed while starting %s : %s", c.ID, err) } @@ -118,7 +120,7 @@ func StartContainer(name string) error { return fmt.Errorf("failed to create docker client : %s", err) } ctx := context.Background() - if err := cli.ContainerStart(ctx, name, types.ContainerStartOptions{}); err != nil { + if err := cli.ContainerStart(ctx, name, container.StartOptions{}); err != nil { return fmt.Errorf("failed while starting %s : %s", name, err) } @@ -146,7 +148,7 @@ func RemoveContainer(name string) error { } ctx := context.Background() log.Printf("Removing docker metabase %s", name) - if err := cli.ContainerRemove(ctx, name, types.ContainerRemoveOptions{}); err != nil { + if err := cli.ContainerRemove(ctx, name, container.RemoveOptions{}); err != nil { return fmt.Errorf("failed to remove container %s : %s", name, err) } return nil @@ -159,7 +161,7 @@ func RemoveImageContainer(image string) error { } ctx := context.Background() log.Printf("Removing docker image '%s'", image) - if _, err := cli.ImageRemove(ctx, image, types.ImageRemoveOptions{}); err != nil { + if _, err := cli.ImageRemove(ctx, image, typesImage.RemoveOptions{}); err != nil { return fmt.Errorf("failed to remove image container %s : %s", image, err) } return nil diff --git a/pkg/metabase/metabase.go b/pkg/metabase/metabase.go index 324a05666a1..0ebb219d211 100644 --- a/pkg/metabase/metabase.go +++ b/pkg/metabase/metabase.go @@ -30,15 +30,16 @@ type Metabase struct { } type Config struct { - Database *csconfig.DatabaseCfg `yaml:"database"` - ListenAddr string `yaml:"listen_addr"` - ListenPort string `yaml:"listen_port"` - ListenURL string `yaml:"listen_url"` - Username string `yaml:"username"` - Password string `yaml:"password"` - DBPath string `yaml:"metabase_db_path"` - DockerGroupID string `yaml:"-"` - Image string `yaml:"image"` + Database *csconfig.DatabaseCfg `yaml:"database"` + ListenAddr string `yaml:"listen_addr"` + ListenPort string `yaml:"listen_port"` + ListenURL string `yaml:"listen_url"` + Username string `yaml:"username"` + Password string `yaml:"password"` + DBPath string `yaml:"metabase_db_path"` + DockerGroupID string `yaml:"-"` + Image string `yaml:"image"` + EnvironmentVariables []string `yaml:"environment_variables"` } var ( @@ -88,7 +89,7 @@ func (m *Metabase) Init(containerName string, image string) error { if err != nil { return err } - m.Container, err = NewContainer(m.Config.ListenAddr, m.Config.ListenPort, m.Config.DBPath, containerName, image, DBConnectionURI, m.Config.DockerGroupID) + m.Container, err = NewContainer(m.Config.ListenAddr, m.Config.ListenPort, m.Config.DBPath, containerName, image, DBConnectionURI, m.Config.DockerGroupID, m.Config.EnvironmentVariables) if err != nil { return fmt.Errorf("container init: %w", err) } @@ -137,21 +138,21 @@ func (m *Metabase) LoadConfig(configPath string) error { m.Config = config return nil - } -func SetupMetabase(dbConfig *csconfig.DatabaseCfg, listenAddr string, listenPort string, username string, password string, mbDBPath string, dockerGroupID string, containerName string, image string) (*Metabase, error) { +func SetupMetabase(dbConfig *csconfig.DatabaseCfg, listenAddr string, listenPort string, username string, password string, mbDBPath string, dockerGroupID string, containerName string, image string, environmentVariables []string) (*Metabase, error) { metabase := &Metabase{ Config: &Config{ - Database: dbConfig, - ListenAddr: listenAddr, - ListenPort: listenPort, - Username: username, - Password: password, - ListenURL: fmt.Sprintf("http://%s:%s", listenAddr, listenPort), - DBPath: mbDBPath, - DockerGroupID: dockerGroupID, - Image: image, + Database: dbConfig, + ListenAddr: listenAddr, + ListenPort: listenPort, + Username: username, + Password: password, + ListenURL: fmt.Sprintf("http://%s:%s", listenAddr, listenPort), + DBPath: mbDBPath, + DockerGroupID: dockerGroupID, + Image: image, + EnvironmentVariables: environmentVariables, }, } if err := metabase.Init(containerName, image); err != nil { diff --git a/pkg/parser/enrich.go b/pkg/parser/enrich.go index 661410d20d3..a69cd963813 100644 --- a/pkg/parser/enrich.go +++ b/pkg/parser/enrich.go @@ -7,8 +7,10 @@ import ( ) /* should be part of a package shared with enrich/geoip.go */ -type EnrichFunc func(string, *types.Event, *log.Entry) (map[string]string, error) -type InitFunc func(map[string]string) (interface{}, error) +type ( + EnrichFunc func(string, *types.Event, *log.Entry) (map[string]string, error) + InitFunc func(map[string]string) (interface{}, error) +) type EnricherCtx struct { Registered map[string]*Enricher diff --git a/pkg/parser/enrich_date.go b/pkg/parser/enrich_date.go index 40c8de39da5..0a4bc51b862 100644 --- a/pkg/parser/enrich_date.go +++ b/pkg/parser/enrich_date.go @@ -44,6 +44,7 @@ func GenDateParse(date string) (string, time.Time) { "2006-01-02 15:04", "2006/01/02 15:04:05", "2006-01-02 15:04:05", + "2006-01-02T15:04:05", } ) diff --git a/pkg/parser/enrich_date_test.go b/pkg/parser/enrich_date_test.go index 930633feb35..13e106f3049 100644 --- a/pkg/parser/enrich_date_test.go +++ b/pkg/parser/enrich_date_test.go @@ -40,6 +40,38 @@ func TestDateParse(t *testing.T) { }, expected: "2011-12-17T08:17:43Z", }, + { + name: "ISO 8601, no timezone", + evt: types.Event{ + StrTime: "2024-11-26T20:13:32", + StrTimeFormat: "", + }, + expected: "2024-11-26T20:13:32Z", + }, + { + name: "ISO 8601, no timezone, milliseconds", + evt: types.Event{ + StrTime: "2024-11-26T20:13:32.123", + StrTimeFormat: "", + }, + expected: "2024-11-26T20:13:32.123Z", + }, + { + name: "ISO 8601, no timezone, microseconds", + evt: types.Event{ + StrTime: "2024-11-26T20:13:32.123456", + StrTimeFormat: "", + }, + expected: "2024-11-26T20:13:32.123456Z", + }, + { + name: "ISO 8601, no timezone, nanoseconds", + evt: types.Event{ + StrTime: "2024-11-26T20:13:32.123456789", + StrTimeFormat: "", + }, + expected: "2024-11-26T20:13:32.123456789Z", + }, } logger := log.WithField("test", "test") diff --git a/pkg/parser/enrich_geoip.go b/pkg/parser/enrich_geoip.go index 1756927bc4b..79a70077283 100644 --- a/pkg/parser/enrich_geoip.go +++ b/pkg/parser/enrich_geoip.go @@ -18,7 +18,6 @@ func IpToRange(field string, p *types.Event, plog *log.Entry) (map[string]string } r, err := exprhelpers.GeoIPRangeEnrich(field) - if err != nil { plog.Errorf("Unable to enrich ip '%s'", field) return nil, nil //nolint:nilerr @@ -47,7 +46,6 @@ func GeoIpASN(field string, p *types.Event, plog *log.Entry) (map[string]string, } r, err := exprhelpers.GeoIPASNEnrich(field) - if err != nil { plog.Debugf("Unable to enrich ip '%s'", field) return nil, nil //nolint:nilerr @@ -81,7 +79,6 @@ func GeoIpCity(field string, p *types.Event, plog *log.Entry) (map[string]string } r, err := exprhelpers.GeoIPEnrich(field) - if err != nil { plog.Debugf("Unable to enrich ip '%s'", field) return nil, nil //nolint:nilerr diff --git a/pkg/parser/node.go b/pkg/parser/node.go index 26046ae4fd6..1229a0f4470 100644 --- a/pkg/parser/node.go +++ b/pkg/parser/node.go @@ -3,6 +3,7 @@ package parser import ( "errors" "fmt" + "strconv" "strings" "time" @@ -236,7 +237,7 @@ func (n *Node) processGrok(p *types.Event, cachedExprEnv map[string]any) (bool, case string: gstr = out case int: - gstr = fmt.Sprintf("%d", out) + gstr = strconv.Itoa(out) case float64, float32: gstr = fmt.Sprintf("%f", out) default: @@ -352,21 +353,24 @@ func (n *Node) process(p *types.Event, ctx UnixParserCtx, expressionEnv map[stri clog.Warningf("unexpected type %t (%v) while running '%s'", output, output, stash.Key) continue } - cache.SetKey(stash.Name, key, value, &stash.TTLVal) + if err = cache.SetKey(stash.Name, key, value, &stash.TTLVal); err != nil { + clog.Warningf("failed to store data in cache: %s", err.Error()) + } } } // Iterate on leafs - for _, leaf := range n.LeavesNodes { - ret, err := leaf.process(p, ctx, cachedExprEnv) + leaves := n.LeavesNodes + for idx := range leaves { + ret, err := leaves[idx].process(p, ctx, cachedExprEnv) if err != nil { - clog.Tracef("\tNode (%s) failed : %v", leaf.rn, err) + clog.Tracef("\tNode (%s) failed : %v", leaves[idx].rn, err) clog.Debugf("Event leaving node : ko") return false, err } - clog.Tracef("\tsub-node (%s) ret : %v (strategy:%s)", leaf.rn, ret, n.OnSuccess) + clog.Tracef("\tsub-node (%s) ret : %v (strategy:%s)", leaves[idx].rn, ret, n.OnSuccess) if ret { NodeState = true @@ -593,7 +597,7 @@ func (n *Node) compile(pctx *UnixParserCtx, ectx EnricherCtx) error { /* compile leafs if present */ for idx := range n.LeavesNodes { if n.LeavesNodes[idx].Name == "" { - n.LeavesNodes[idx].Name = fmt.Sprintf("child-%s", n.Name) + n.LeavesNodes[idx].Name = "child-" + n.Name } /*propagate debug/stats to child nodes*/ if !n.LeavesNodes[idx].Debug && n.Debug { diff --git a/pkg/parser/parsing_test.go b/pkg/parser/parsing_test.go index 269d51a1ba2..84d5f4db743 100644 --- a/pkg/parser/parsing_test.go +++ b/pkg/parser/parsing_test.go @@ -13,6 +13,8 @@ import ( "github.com/davecgh/go-spew/spew" log "github.com/sirupsen/logrus" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" "gopkg.in/yaml.v2" "github.com/crowdsecurity/crowdsec/pkg/exprhelpers" @@ -33,14 +35,11 @@ func TestParser(t *testing.T) { envSetting := os.Getenv("TEST_ONLY") - pctx, ectx, err := prepTests() - if err != nil { - t.Fatalf("failed to load env : %s", err) - } + pctx, ectx := prepTests(t) // Init the enricher if envSetting != "" { - if err := testOneParser(pctx, ectx, envSetting, nil); err != nil { + if err := testOneParser(t, pctx, ectx, envSetting, nil); err != nil { t.Fatalf("Test '%s' failed : %s", envSetting, err) } } else { @@ -57,7 +56,7 @@ func TestParser(t *testing.T) { fname := "./tests/" + fd.Name() log.Infof("Running test on %s", fname) - if err := testOneParser(pctx, ectx, fname, nil); err != nil { + if err := testOneParser(t, pctx, ectx, fname, nil); err != nil { t.Fatalf("Test '%s' failed : %s", fname, err) } } @@ -71,22 +70,16 @@ func BenchmarkParser(t *testing.B) { log.SetLevel(log.ErrorLevel) - pctx, ectx, err := prepTests() - if err != nil { - t.Fatalf("failed to load env : %s", err) - } + pctx, ectx := prepTests(t) envSetting := os.Getenv("TEST_ONLY") if envSetting != "" { - if err := testOneParser(pctx, ectx, envSetting, t); err != nil { - t.Fatalf("Test '%s' failed : %s", envSetting, err) - } + err := testOneParser(t, pctx, ectx, envSetting, t) + require.NoError(t, err, "Test '%s' failed", envSetting) } else { fds, err := os.ReadDir("./tests/") - if err != nil { - t.Fatalf("Unable to read test directory : %s", err) - } + require.NoError(t, err, "Unable to read test directory") for _, fd := range fds { if !fd.IsDir() { @@ -96,14 +89,13 @@ func BenchmarkParser(t *testing.B) { fname := "./tests/" + fd.Name() log.Infof("Running test on %s", fname) - if err := testOneParser(pctx, ectx, fname, t); err != nil { - t.Fatalf("Test '%s' failed : %s", fname, err) - } + err := testOneParser(t, pctx, ectx, fname, t) + require.NoError(t, err, "Test '%s' failed", fname) } } } -func testOneParser(pctx *UnixParserCtx, ectx EnricherCtx, dir string, b *testing.B) error { +func testOneParser(t require.TestingT, pctx *UnixParserCtx, ectx EnricherCtx, dir string, b *testing.B) error { var ( err error pnodes []Node @@ -143,7 +135,7 @@ func testOneParser(pctx *UnixParserCtx, ectx EnricherCtx, dir string, b *testing // TBD: Load post overflows // func testFile(t *testing.T, file string, pctx UnixParserCtx, nodes []Node) bool { parser_test_file := fmt.Sprintf("%s/test.yaml", dir) - tests := loadTestFile(parser_test_file) + tests := loadTestFile(t, parser_test_file) count := 1 if b != nil { @@ -151,8 +143,8 @@ func testOneParser(pctx *UnixParserCtx, ectx EnricherCtx, dir string, b *testing b.ResetTimer() } - for range(count) { - if !testFile(tests, *pctx, pnodes) { + for range count { + if !testFile(t, tests, *pctx, pnodes) { return errors.New("test failed") } } @@ -161,7 +153,7 @@ func testOneParser(pctx *UnixParserCtx, ectx EnricherCtx, dir string, b *testing } // prepTests is going to do the initialisation of parser : it's going to load enrichment plugins and load the patterns. This is done here so that we don't redo it for each test -func prepTests() (*UnixParserCtx, EnricherCtx, error) { +func prepTests(t require.TestingT) (*UnixParserCtx, EnricherCtx) { var ( err error pctx *UnixParserCtx @@ -169,22 +161,16 @@ func prepTests() (*UnixParserCtx, EnricherCtx, error) { ) err = exprhelpers.Init(nil) - if err != nil { - return nil, ectx, fmt.Errorf("exprhelpers init failed: %w", err) - } + require.NoError(t, err, "exprhelpers init failed") // Load enrichment datadir := "./test_data/" err = exprhelpers.GeoIPInit(datadir) - if err != nil { - log.Fatalf("unable to initialize GeoIP: %s", err) - } + require.NoError(t, err, "geoip init failed") ectx, err = Loadplugin() - if err != nil { - return nil, ectx, fmt.Errorf("failed to load plugin geoip: %v", err) - } + require.NoError(t, err, "load plugin failed") log.Printf("Loaded -> %+v", ectx) @@ -194,18 +180,14 @@ func prepTests() (*UnixParserCtx, EnricherCtx, error) { /* this should be refactored to 2 lines :p */ // Init the parser pctx, err = Init(map[string]interface{}{"patterns": cfgdir + string("/patterns/"), "data": "./tests/"}) - if err != nil { - return nil, ectx, fmt.Errorf("failed to initialize parser: %v", err) - } + require.NoError(t, err, "parser init failed") - return pctx, ectx, nil + return pctx, ectx } -func loadTestFile(file string) []TestFile { +func loadTestFile(t require.TestingT, file string) []TestFile { yamlFile, err := os.Open(file) - if err != nil { - log.Fatalf("yamlFile.Get err #%v ", err) - } + require.NoError(t, err, "failed to open test file") dec := yaml.NewDecoder(yamlFile) dec.SetStrict(true) @@ -221,7 +203,7 @@ func loadTestFile(file string) []TestFile { break } - log.Fatalf("Failed to load testfile '%s' yaml error : %v", file, err) + require.NoError(t, err, "failed to load testfile '%s'", file) return nil } @@ -285,7 +267,7 @@ func matchEvent(expected types.Event, out types.Event, debug bool) ([]string, bo valid = true - for mapIdx := range(len(expectMaps)) { + for mapIdx := range len(expectMaps) { for expKey, expVal := range expectMaps[mapIdx] { outVal, ok := outMaps[mapIdx][expKey] if !ok { @@ -391,19 +373,14 @@ reCheck: return true, nil } -func testFile(testSet []TestFile, pctx UnixParserCtx, nodes []Node) bool { +func testFile(t require.TestingT, testSet []TestFile, pctx UnixParserCtx, nodes []Node) bool { log.Warning("Going to process one test set") for _, tf := range testSet { // func testSubSet(testSet TestFile, pctx UnixParserCtx, nodes []Node) (bool, error) { testOk, err := testSubSet(tf, pctx, nodes) - if err != nil { - log.Fatalf("test failed : %s", err) - } - - if !testOk { - log.Fatalf("failed test : %+v", tf) - } + require.NoError(t, err, "test failed") + assert.True(t, testOk, "failed test: %+v", tf) } return true @@ -427,9 +404,7 @@ func TestGeneratePatternsDoc(t *testing.T) { } pctx, err := Init(map[string]interface{}{"patterns": "../../config/patterns/", "data": "./tests/"}) - if err != nil { - t.Fatalf("unable to load patterns : %s", err) - } + require.NoError(t, err, "unable to load patterns") log.Infof("-> %s", spew.Sdump(pctx)) /*don't judge me, we do it for the users*/ diff --git a/pkg/parser/runtime.go b/pkg/parser/runtime.go index 8068690b68f..7af82a71535 100644 --- a/pkg/parser/runtime.go +++ b/pkg/parser/runtime.go @@ -29,10 +29,11 @@ func SetTargetByName(target string, value string, evt *types.Event) bool { return false } - //it's a hack, we do it for the user + // it's a hack, we do it for the user target = strings.TrimPrefix(target, "evt.") log.Debugf("setting target %s to %s", target, value) + defer func() { if r := recover(); r != nil { log.Errorf("Runtime error while trying to set '%s': %+v", target, r) @@ -46,6 +47,7 @@ func SetTargetByName(target string, value string, evt *types.Event) bool { //event is nil return false } + for _, f := range strings.Split(target, ".") { /* ** According to current Event layout we only have to handle struct and map @@ -57,7 +59,9 @@ func SetTargetByName(target string, value string, evt *types.Event) bool { if (tmp == reflect.Value{}) || tmp.IsZero() { log.Debugf("map entry is zero in '%s'", target) } + iter.SetMapIndex(reflect.ValueOf(f), reflect.ValueOf(value)) + return true case reflect.Struct: tmp := iter.FieldByName(f) @@ -65,9 +69,11 @@ func SetTargetByName(target string, value string, evt *types.Event) bool { log.Debugf("'%s' is not a valid target because '%s' is not valid", target, f) return false } + if tmp.Kind() == reflect.Ptr { tmp = reflect.Indirect(tmp) } + iter = tmp case reflect.Ptr: tmp := iter.Elem() @@ -82,11 +88,14 @@ func SetTargetByName(target string, value string, evt *types.Event) bool { log.Errorf("'%s' can't be set", target) return false } + if iter.Kind() != reflect.String { log.Errorf("Expected string, got %v when handling '%s'", iter.Kind(), target) return false } + iter.Set(reflect.ValueOf(value)) + return true } @@ -248,14 +257,18 @@ func stageidx(stage string, stages []string) int { return -1 } -var ParseDump bool -var DumpFolder string +var ( + ParseDump bool + DumpFolder string +) -var StageParseCache dumps.ParserResults -var StageParseMutex sync.Mutex +var ( + StageParseCache dumps.ParserResults + StageParseMutex sync.Mutex +) func Parse(ctx UnixParserCtx, xp types.Event, nodes []Node) (types.Event, error) { - var event = xp + event := xp /* the stage is undefined, probably line is freshly acquired, set to first stage !*/ if event.Stage == "" && len(ctx.Stages) > 0 { @@ -317,46 +330,46 @@ func Parse(ctx UnixParserCtx, xp types.Event, nodes []Node) (types.Event, error) } isStageOK := false - for idx, node := range nodes { + for idx := range nodes { //Only process current stage's nodes - if event.Stage != node.Stage { + if event.Stage != nodes[idx].Stage { continue } clog := log.WithFields(log.Fields{ - "node-name": node.rn, + "node-name": nodes[idx].rn, "stage": event.Stage, }) - clog.Tracef("Processing node %d/%d -> %s", idx, len(nodes), node.rn) + clog.Tracef("Processing node %d/%d -> %s", idx, len(nodes), nodes[idx].rn) if ctx.Profiling { - node.Profiling = true + nodes[idx].Profiling = true } - ret, err := node.process(&event, ctx, map[string]interface{}{"evt": &event}) + ret, err := nodes[idx].process(&event, ctx, map[string]interface{}{"evt": &event}) if err != nil { clog.Errorf("Error while processing node : %v", err) return event, err } - clog.Tracef("node (%s) ret : %v", node.rn, ret) + clog.Tracef("node (%s) ret : %v", nodes[idx].rn, ret) if ParseDump { var parserIdxInStage int StageParseMutex.Lock() - if len(StageParseCache[stage][node.Name]) == 0 { - StageParseCache[stage][node.Name] = make([]dumps.ParserResult, 0) + if len(StageParseCache[stage][nodes[idx].Name]) == 0 { + StageParseCache[stage][nodes[idx].Name] = make([]dumps.ParserResult, 0) parserIdxInStage = len(StageParseCache[stage]) } else { - parserIdxInStage = StageParseCache[stage][node.Name][0].Idx + parserIdxInStage = StageParseCache[stage][nodes[idx].Name][0].Idx } StageParseMutex.Unlock() evtcopy := deepcopy.Copy(event) parserInfo := dumps.ParserResult{Evt: evtcopy.(types.Event), Success: ret, Idx: parserIdxInStage} StageParseMutex.Lock() - StageParseCache[stage][node.Name] = append(StageParseCache[stage][node.Name], parserInfo) + StageParseCache[stage][nodes[idx].Name] = append(StageParseCache[stage][nodes[idx].Name], parserInfo) StageParseMutex.Unlock() } if ret { isStageOK = true } - if ret && node.OnSuccess == "next_stage" { + if ret && nodes[idx].OnSuccess == "next_stage" { clog.Debugf("node successful, stop end stage %s", stage) break } diff --git a/pkg/parser/stage.go b/pkg/parser/stage.go index b98db350254..ddc07ca7f1d 100644 --- a/pkg/parser/stage.go +++ b/pkg/parser/stage.go @@ -114,10 +114,12 @@ func LoadStages(stageFiles []Stagefile, pctx *UnixParserCtx, ectx EnricherCtx) ( for _, data := range node.Data { err = exprhelpers.FileInit(pctx.DataFolder, data.DestPath, data.Type) if err != nil { - log.Error(err) + log.Error(err.Error()) } if data.Type == "regexp" { //cache only makes sense for regexp - exprhelpers.RegexpCacheInit(data.DestPath, *data) + if err = exprhelpers.RegexpCacheInit(data.DestPath, *data); err != nil { + log.Error(err.Error()) + } } } diff --git a/pkg/parser/whitelist_test.go b/pkg/parser/whitelist_test.go index 02846f17fc1..a3b95b2fa3f 100644 --- a/pkg/parser/whitelist_test.go +++ b/pkg/parser/whitelist_test.go @@ -284,9 +284,9 @@ func TestWhitelistCheck(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - var err error node.Whitelist = tt.whitelist - node.CompileWLs() + _, err := node.CompileWLs() + require.NoError(t, err) isWhitelisted := node.CheckIPsWL(tt.event) if !isWhitelisted { isWhitelisted, err = node.CheckExprWL(map[string]interface{}{"evt": tt.event}, tt.event) diff --git a/pkg/setup/detect_test.go b/pkg/setup/detect_test.go index 588e74dab54..72356bc1924 100644 --- a/pkg/setup/detect_test.go +++ b/pkg/setup/detect_test.go @@ -54,15 +54,20 @@ func TestSetupHelperProcess(t *testing.T) { } fmt.Fprint(os.Stdout, fakeSystemctlOutput) - os.Exit(0) + os.Exit(0) //nolint:revive } func tempYAML(t *testing.T, content string) os.File { t.Helper() require := require.New(t) - file, err := os.CreateTemp("", "") + file, err := os.CreateTemp(t.TempDir(), "") require.NoError(err) + t.Cleanup(func() { + require.NoError(file.Close()) + require.NoError(os.Remove(file.Name())) + }) + _, err = file.WriteString(dedent.Dedent(content)) require.NoError(err) @@ -249,7 +254,6 @@ func TestListSupported(t *testing.T) { t.Parallel() f := tempYAML(t, tc.yml) - defer os.Remove(f.Name()) supported, err := setup.ListSupported(&f) cstest.RequireErrorContains(t, err, tc.expectedErr) @@ -375,7 +379,6 @@ func TestDetectSimpleRule(t *testing.T) { - false ugly: `) - defer os.Remove(f.Name()) detected, err := setup.Detect(&f, setup.DetectOptions{}) require.NoError(err) @@ -421,7 +424,6 @@ detect: for _, tc := range tests { t.Run(tc.name, func(t *testing.T) { f := tempYAML(t, tc.config) - defer os.Remove(f.Name()) detected, err := setup.Detect(&f, setup.DetectOptions{}) cstest.RequireErrorContains(t, err, tc.expectedErr) @@ -514,7 +516,6 @@ detect: for _, tc := range tests { t.Run(tc.name, func(t *testing.T) { f := tempYAML(t, tc.config) - defer os.Remove(f.Name()) detected, err := setup.Detect(&f, setup.DetectOptions{}) cstest.RequireErrorContains(t, err, tc.expectedErr) @@ -542,7 +543,6 @@ func TestDetectForcedUnit(t *testing.T) { journalctl_filter: - _SYSTEMD_UNIT=crowdsec-setup-forced.service `) - defer os.Remove(f.Name()) detected, err := setup.Detect(&f, setup.DetectOptions{ForcedUnits: []string{"crowdsec-setup-forced.service"}}) require.NoError(err) @@ -580,7 +580,6 @@ func TestDetectForcedProcess(t *testing.T) { when: - ProcessRunning("foobar") `) - defer os.Remove(f.Name()) detected, err := setup.Detect(&f, setup.DetectOptions{ForcedProcesses: []string{"foobar"}}) require.NoError(err) @@ -610,7 +609,6 @@ func TestDetectSkipService(t *testing.T) { when: - ProcessRunning("foobar") `) - defer os.Remove(f.Name()) detected, err := setup.Detect(&f, setup.DetectOptions{ForcedProcesses: []string{"foobar"}, SkipServices: []string{"wizard"}}) require.NoError(err) @@ -825,7 +823,6 @@ func TestDetectForcedOS(t *testing.T) { for _, tc := range tests { t.Run(tc.name, func(t *testing.T) { f := tempYAML(t, tc.config) - defer os.Remove(f.Name()) detected, err := setup.Detect(&f, setup.DetectOptions{ForcedOS: tc.forced}) cstest.RequireErrorContains(t, err, tc.expectedErr) @@ -1009,7 +1006,6 @@ func TestDetectDatasourceValidation(t *testing.T) { for _, tc := range tests { t.Run(tc.name, func(t *testing.T) { f := tempYAML(t, tc.config) - defer os.Remove(f.Name()) detected, err := setup.Detect(&f, setup.DetectOptions{}) cstest.RequireErrorContains(t, err, tc.expectedErr) require.Equal(tc.expected, detected) diff --git a/pkg/setup/install.go b/pkg/setup/install.go index d63a1ee1775..556ddab4c9a 100644 --- a/pkg/setup/install.go +++ b/pkg/setup/install.go @@ -13,6 +13,7 @@ import ( "gopkg.in/yaml.v3" "github.com/crowdsecurity/crowdsec/pkg/cwhub" + "github.com/crowdsecurity/crowdsec/pkg/hubops" ) // AcquisDocument is created from a SetupItem. It represents a single YAML document, and can be part of a multi-document file. @@ -47,12 +48,14 @@ func decodeSetup(input []byte, fancyErrors bool) (Setup, error) { } // InstallHubItems installs the objects recommended in a setup file. -func InstallHubItems(ctx context.Context, hub *cwhub.Hub, input []byte, dryRun bool) error { +func InstallHubItems(ctx context.Context, hub *cwhub.Hub, contentProvider cwhub.ContentProvider, input []byte, yes, dryRun, verbose bool) error { setupEnvelope, err := decodeSetup(input, false) if err != nil { return err } + plan := hubops.NewActionPlan(hub) + for _, setupItem := range setupEnvelope.Setup { forceAction := false downloadOnly := false @@ -68,70 +71,70 @@ func InstallHubItems(ctx context.Context, hub *cwhub.Hub, input []byte, dryRun b return fmt.Errorf("collection %s not found", collection) } - if dryRun { - fmt.Println("dry-run: would install collection", collection) - - continue + if err := plan.AddCommand(hubops.NewDownloadCommand(item, contentProvider, forceAction)); err != nil { + return err } - if err := item.Install(ctx, forceAction, downloadOnly); err != nil { - return fmt.Errorf("while installing collection %s: %w", item.Name, err) + if !downloadOnly { + if err := plan.AddCommand(hubops.NewEnableCommand(item, forceAction)); err != nil { + return err + } } } for _, parser := range setupItem.Install.Parsers { - if dryRun { - fmt.Println("dry-run: would install parser", parser) - - continue - } - item := hub.GetItem(cwhub.PARSERS, parser) if item == nil { return fmt.Errorf("parser %s not found", parser) } - if err := item.Install(ctx, forceAction, downloadOnly); err != nil { - return fmt.Errorf("while installing parser %s: %w", item.Name, err) + if err := plan.AddCommand(hubops.NewDownloadCommand(item, contentProvider, forceAction)); err != nil { + return err } - } - - for _, scenario := range setupItem.Install.Scenarios { - if dryRun { - fmt.Println("dry-run: would install scenario", scenario) - continue + if !downloadOnly { + if err := plan.AddCommand(hubops.NewEnableCommand(item, forceAction)); err != nil { + return err + } } + } + for _, scenario := range setupItem.Install.Scenarios { item := hub.GetItem(cwhub.SCENARIOS, scenario) if item == nil { return fmt.Errorf("scenario %s not found", scenario) } - if err := item.Install(ctx, forceAction, downloadOnly); err != nil { - return fmt.Errorf("while installing scenario %s: %w", item.Name, err) + if err := plan.AddCommand(hubops.NewDownloadCommand(item, contentProvider, forceAction)); err != nil { + return err } - } - - for _, postoverflow := range setupItem.Install.PostOverflows { - if dryRun { - fmt.Println("dry-run: would install postoverflow", postoverflow) - continue + if !downloadOnly { + if err := plan.AddCommand(hubops.NewEnableCommand(item, forceAction)); err != nil { + return err + } } + } + for _, postoverflow := range setupItem.Install.PostOverflows { item := hub.GetItem(cwhub.POSTOVERFLOWS, postoverflow) if item == nil { return fmt.Errorf("postoverflow %s not found", postoverflow) } - if err := item.Install(ctx, forceAction, downloadOnly); err != nil { - return fmt.Errorf("while installing postoverflow %s: %w", item.Name, err) + if err := plan.AddCommand(hubops.NewDownloadCommand(item, contentProvider, forceAction)); err != nil { + return err + } + + if !downloadOnly { + if err := plan.AddCommand(hubops.NewEnableCommand(item, forceAction)); err != nil { + return err + } } } } - return nil + return plan.Execute(ctx, yes, dryRun, verbose) } // marshalAcquisDocuments creates the monolithic file, or itemized files (if a directory is provided) with the acquisition documents. @@ -189,7 +192,9 @@ func marshalAcquisDocuments(ads []AcquisDocument, toDir string) (string, error) return "", fmt.Errorf("while writing to %s: %w", ad.AcquisFilename, err) } - f.Sync() + if err = f.Sync(); err != nil { + return "", fmt.Errorf("while syncing %s: %w", ad.AcquisFilename, err) + } continue } diff --git a/pkg/types/appsec_event.go b/pkg/types/appsec_event.go index 11d70ad368d..54163f53fef 100644 --- a/pkg/types/appsec_event.go +++ b/pkg/types/appsec_event.go @@ -60,7 +60,6 @@ func (w AppsecEvent) GetVar(varName string) string { } log.Infof("var %s not found. Available variables: %+v", varName, w.Vars) return "" - } // getters diff --git a/pkg/types/constants.go b/pkg/types/constants.go index acb5b5bfacf..2421b076b97 100644 --- a/pkg/types/constants.go +++ b/pkg/types/constants.go @@ -1,23 +1,29 @@ package types -const ApiKeyAuthType = "api-key" -const TlsAuthType = "tls" -const PasswordAuthType = "password" +const ( + ApiKeyAuthType = "api-key" + TlsAuthType = "tls" + PasswordAuthType = "password" +) -const PAPIBaseURL = "https://papi.api.crowdsec.net/" -const PAPIVersion = "v1" -const PAPIPollUrl = "/decisions/stream/poll" -const PAPIPermissionsUrl = "/permissions" +const ( + PAPIBaseURL = "https://papi.api.crowdsec.net/" + PAPIVersion = "v1" + PAPIPollUrl = "/decisions/stream/poll" + PAPIPermissionsUrl = "/permissions" +) const CAPIBaseURL = "https://api.crowdsec.net/" -const CscliOrigin = "cscli" -const CrowdSecOrigin = "crowdsec" -const ConsoleOrigin = "console" -const CscliImportOrigin = "cscli-import" -const ListOrigin = "lists" -const CAPIOrigin = "CAPI" -const CommunityBlocklistPullSourceScope = "crowdsecurity/community-blocklist" +const ( + CscliOrigin = "cscli" + CrowdSecOrigin = "crowdsec" + ConsoleOrigin = "console" + CscliImportOrigin = "cscli-import" + ListOrigin = "lists" + CAPIOrigin = "CAPI" + CommunityBlocklistPullSourceScope = "crowdsecurity/community-blocklist" +) const DecisionTypeBan = "ban" diff --git a/pkg/types/event.go b/pkg/types/event.go index 9300626b927..0b09bf7cbdf 100644 --- a/pkg/types/event.go +++ b/pkg/types/event.go @@ -60,6 +60,7 @@ func MakeEvent(timeMachine bool, evtType int, process bool) Event { if timeMachine { evt.ExpectMode = TIMEMACHINE } + return evt } @@ -97,8 +98,9 @@ func (e *Event) GetType() string { func (e *Event) GetMeta(key string) string { if e.Type == OVFLW { - for _, alert := range e.Overflow.APIAlerts { - for _, event := range alert.Events { + alerts := e.Overflow.APIAlerts + for idx := range alerts { + for _, event := range alerts[idx].Events { if event.GetMeta(key) != "" { return event.GetMeta(key) } diff --git a/pkg/types/event_test.go b/pkg/types/event_test.go index 97b13f96d9a..638e42fe757 100644 --- a/pkg/types/event_test.go +++ b/pkg/types/event_test.go @@ -46,7 +46,6 @@ func TestSetParsed(t *testing.T) { assert.Equal(t, tt.value, tt.evt.Parsed[tt.key]) }) } - } func TestSetMeta(t *testing.T) { @@ -86,7 +85,6 @@ func TestSetMeta(t *testing.T) { assert.Equal(t, tt.value, tt.evt.GetMeta(tt.key)) }) } - } func TestParseIPSources(t *testing.T) { diff --git a/pkg/types/getfstype.go b/pkg/types/getfstype.go index 728e986bed0..c16fe86ec9c 100644 --- a/pkg/types/getfstype.go +++ b/pkg/types/getfstype.go @@ -100,7 +100,6 @@ func GetFSType(path string) (string, error) { var buf unix.Statfs_t err := unix.Statfs(path, &buf) - if err != nil { return "", err } diff --git a/pkg/types/ip.go b/pkg/types/ip.go index 9d08afd8809..3f52a7ccf18 100644 --- a/pkg/types/ip.go +++ b/pkg/types/ip.go @@ -2,7 +2,6 @@ package types import ( "encoding/binary" - "errors" "fmt" "math" "net" @@ -23,7 +22,8 @@ func LastAddress(n net.IPNet) net.IP { ip[6] | ^n.Mask[6], ip[7] | ^n.Mask[7], ip[8] | ^n.Mask[8], ip[9] | ^n.Mask[9], ip[10] | ^n.Mask[10], ip[11] | ^n.Mask[11], ip[12] | ^n.Mask[12], ip[13] | ^n.Mask[13], ip[14] | ^n.Mask[14], - ip[15] | ^n.Mask[15]} + ip[15] | ^n.Mask[15], + } } return net.IPv4( @@ -38,7 +38,7 @@ func Addr2Ints(anyIP string) (int, int64, int64, int64, int64, error) { if strings.Contains(anyIP, "/") { _, net, err := net.ParseCIDR(anyIP) if err != nil { - return -1, 0, 0, 0, 0, fmt.Errorf("while parsing range %s: %w", anyIP, err) + return -1, 0, 0, 0, 0, fmt.Errorf("invalid ip range '%s': %w", anyIP, err) } return Range2Ints(*net) @@ -46,12 +46,12 @@ func Addr2Ints(anyIP string) (int, int64, int64, int64, int64, error) { ip := net.ParseIP(anyIP) if ip == nil { - return -1, 0, 0, 0, 0, errors.New("invalid address") + return -1, 0, 0, 0, 0, fmt.Errorf("invalid ip address '%s'", anyIP) } sz, start, end, err := IP2Ints(ip) if err != nil { - return -1, 0, 0, 0, 0, fmt.Errorf("while parsing ip %s: %w", anyIP, err) + return -1, 0, 0, 0, 0, fmt.Errorf("invalid ip address '%s': %w", anyIP, err) } return sz, start, end, start, end, nil diff --git a/pkg/types/ip_test.go b/pkg/types/ip_test.go index f8c14b12e3c..571163761d4 100644 --- a/pkg/types/ip_test.go +++ b/pkg/types/ip_test.go @@ -8,21 +8,21 @@ import ( ) func TestIP2Int(t *testing.T) { - tEmpty := net.IP{} + _, _, _, err := IP2Ints(tEmpty) if !strings.Contains(err.Error(), "unexpected len 0 for ") { t.Fatalf("unexpected: %s", err) } } + func TestRange2Int(t *testing.T) { tEmpty := net.IPNet{} - //empty item + // empty item _, _, _, _, _, err := Range2Ints(tEmpty) if !strings.Contains(err.Error(), "converting first ip in range") { t.Fatalf("unexpected: %s", err) } - } func TestAdd2Int(t *testing.T) { @@ -181,7 +181,7 @@ func TestAdd2Int(t *testing.T) { }, { in_addr: "xxx2", - exp_error: "invalid address", + exp_error: "invalid ip address 'xxx2'", }, } @@ -190,31 +190,37 @@ func TestAdd2Int(t *testing.T) { if err != nil && test.exp_error == "" { t.Fatalf("%d unexpected error : %s", idx, err) } + if test.exp_error != "" { if !strings.Contains(err.Error(), test.exp_error) { t.Fatalf("%d unmatched error : %s != %s", idx, err, test.exp_error) } - continue //we can skip this one + + continue // we can skip this one } + if sz != test.exp_sz { t.Fatalf("%d unexpected size %d != %d", idx, sz, test.exp_sz) } + if start_ip != test.exp_start_ip { t.Fatalf("%d unexpected start_ip %d != %d", idx, start_ip, test.exp_start_ip) } + if sz == 16 { if start_sfx != test.exp_start_sfx { t.Fatalf("%d unexpected start sfx %d != %d", idx, start_sfx, test.exp_start_sfx) } } + if end_ip != test.exp_end_ip { t.Fatalf("%d unexpected end ip %d != %d", idx, end_ip, test.exp_end_ip) } + if sz == 16 { if end_sfx != test.exp_end_sfx { t.Fatalf("%d unexpected end sfx %d != %d", idx, end_sfx, test.exp_end_sfx) } } - } } diff --git a/pkg/types/utils.go b/pkg/types/utils.go index 712d44ba12d..d5e4ac6f986 100644 --- a/pkg/types/utils.go +++ b/pkg/types/utils.go @@ -10,25 +10,46 @@ import ( "gopkg.in/natefinch/lumberjack.v2" ) -var logFormatter log.Formatter -var LogOutput *lumberjack.Logger //io.Writer -var logLevel log.Level +var ( + logFormatter log.Formatter + LogOutput *lumberjack.Logger // io.Writer + logLevel log.Level +) + +func SetDefaultLoggerConfig(cfgMode string, cfgFolder string, cfgLevel log.Level, maxSize int, maxFiles int, maxAge int, format string, compress *bool, forceColors bool) error { + if format == "" { + format = "text" + } + + switch format { + case "text": + logFormatter = &log.TextFormatter{ + TimestampFormat: time.RFC3339, + FullTimestamp: true, + ForceColors: forceColors, + } + case "json": + logFormatter = &log.JSONFormatter{TimestampFormat: time.RFC3339} + default: + return fmt.Errorf("unknown log_format '%s'", format) + } -func SetDefaultLoggerConfig(cfgMode string, cfgFolder string, cfgLevel log.Level, maxSize int, maxFiles int, maxAge int, compress *bool, forceColors bool) error { - /*Configure logs*/ if cfgMode == "file" { _maxsize := 500 if maxSize != 0 { _maxsize = maxSize } + _maxfiles := 3 if maxFiles != 0 { _maxfiles = maxFiles } + _maxage := 28 if maxAge != 0 { _maxage = maxAge } + _compress := true if compress != nil { _compress = *compress @@ -45,10 +66,11 @@ func SetDefaultLoggerConfig(cfgMode string, cfgFolder string, cfgLevel log.Level } else if cfgMode != "stdout" { return fmt.Errorf("log mode '%s' unknown", cfgMode) } + logLevel = cfgLevel log.SetLevel(logLevel) - logFormatter = &log.TextFormatter{TimestampFormat: time.RFC3339, FullTimestamp: true, ForceColors: forceColors} log.SetFormatter(logFormatter) + return nil } @@ -61,7 +83,9 @@ func ConfigureLogger(clog *log.Logger) error { if logFormatter != nil { clog.SetFormatter(logFormatter) } + clog.SetLevel(logLevel) + return nil } @@ -74,6 +98,8 @@ func IsNetworkFS(path string) (bool, string, error) { if err != nil { return false, "", err } + fsType = strings.ToLower(fsType) + return fsType == "nfs" || fsType == "cifs" || fsType == "smb" || fsType == "smb2", fsType, nil } diff --git a/rpm/SPECS/crowdsec.spec b/rpm/SPECS/crowdsec.spec index ac438ad0c14..ca912d58e49 100644 --- a/rpm/SPECS/crowdsec.spec +++ b/rpm/SPECS/crowdsec.spec @@ -143,18 +143,15 @@ rm -rf %{buildroot} #systemctl stop crowdsec || true -if [ $1 == 2 ];then - if [[ ! -d /var/lib/crowdsec/backup ]]; then - cscli config backup /var/lib/crowdsec/backup - fi -fi +#if [ $1 == 2 ]; then +# upgrade pre-install here +#fi %post -p /bin/bash #install if [ $1 == 1 ]; then - if [ ! -f "/var/lib/crowdsec/data/crowdsec.db" ] ; then touch /var/lib/crowdsec/data/crowdsec.db fi @@ -179,27 +176,21 @@ if [ $1 == 1 ]; then fi cscli hub update + cscli hub upgrade CSCLI_BIN_INSTALLED="/usr/bin/cscli" SILENT=true install_collection - echo "Get started with CrowdSec:" - echo " * Detailed guides are available in our documentation: https://docs.crowdsec.net" - echo " * Configuration items created by the community can be found at the Hub: https://hub.crowdsec.net" - echo " * Gain insights into your use of CrowdSec with the help of the console https://app.crowdsec.net" - -#upgrade -elif [ $1 == 2 ] && [ -d /var/lib/crowdsec/backup ]; then - cscli config restore /var/lib/crowdsec/backup - if [ $? == 0 ]; then - rm -rf /var/lib/crowdsec/backup - fi - - if [[ -f %{_sysconfdir}/crowdsec/online_api_credentials.yaml ]] ; then - chmod 600 %{_sysconfdir}/crowdsec/online_api_credentials.yaml - fi - - if [[ -f %{_sysconfdir}/crowdsec/local_api_credentials.yaml ]] ; then - chmod 600 %{_sysconfdir}/crowdsec/local_api_credentials.yaml - fi + GREEN='\033[0;32m' + BOLD='\033[1m' + RESET='\033[0m' + + echo -e "${BOLD}Get started with CrowdSec:${RESET}" + echo -e " * Go further by following our ${BOLD}post installation steps${RESET} : ${GREEN}${BOLD}https://docs.crowdsec.net/u/getting_started/next_steps${RESET}" + echo -e "====================================================================================================================" + echo -e " * Install a ${BOLD}remediation component${RESET} to block attackers: ${GREEN}${BOLD}https://docs.crowdsec.net/u/bouncers/intro${RESET}" + echo -e "====================================================================================================================" + echo -e " * Find more ${BOLD}collections${RESET}, ${BOLD}parsers${RESET} and ${BOLD}scenarios${RESET} created by the community with the Hub: ${GREEN}${BOLD}https://hub.crowdsec.net${RESET}" + echo -e "====================================================================================================================" + echo -e " * Subscribe to ${BOLD}additional blocklists${RESET}, ${BOLD}visualize${RESET} your alerts and more with the console: ${GREEN}${BOLD}https://app.crowdsec.net${RESET}" fi %systemd_post %{name}.service diff --git a/test/bats/01_crowdsec.bats b/test/bats/01_crowdsec.bats index aa5830a6bae..3df0b42a0f2 100644 --- a/test/bats/01_crowdsec.bats +++ b/test/bats/01_crowdsec.bats @@ -1,5 +1,4 @@ #!/usr/bin/env bats -# vim: ft=bats:list:ts=8:sts=4:sw=4:et:ai:si: set -u @@ -68,6 +67,40 @@ teardown() { refute_output } +@test "crowdsec - log format" { + # fail early + config_disable_lapi + config_disable_agent + + config_set '.common.log_media="stdout"' + + config_set '.common.log_format=""' + rune -0 wait-for --err "you must run at least the API Server or crowdsec" "$CROWDSEC" + assert_stderr --partial 'level=fatal msg="you must run at least the API Server or crowdsec"' + + config_set '.common.log_format="text"' + rune -0 wait-for --err "you must run at least the API Server or crowdsec" "$CROWDSEC" + assert_stderr --partial 'level=fatal msg="you must run at least the API Server or crowdsec"' + + config_set '.common.log_format="json"' + rune -0 wait-for --err "you must run at least the API Server or crowdsec" "$CROWDSEC" + rune -0 jq -c 'select(.msg=="you must run at least the API Server or crowdsec") | .level' <(stderr | grep "^{") + assert_output '"fatal"' + + # If log_media='file', a hook to stderr is added only for fatal messages, + # with a predefined formatter (level + msg, no timestamp, ignore log_format) + + config_set '.common.log_media="file"' + + config_set '.common.log_format="text"' + rune -0 wait-for --err "you must run at least the API Server or crowdsec" "$CROWDSEC" + assert_stderr --regexp 'FATAL.* you must run at least the API Server or crowdsec$' + + config_set '.common.log_format="json"' + rune -0 wait-for --err "you must run at least the API Server or crowdsec" "$CROWDSEC" + assert_stderr --regexp 'FATAL.* you must run at least the API Server or crowdsec$' +} + @test "CS_LAPI_SECRET not strong enough" { CS_LAPI_SECRET=foo rune -1 wait-for "$CROWDSEC" assert_stderr --partial "api server init: unable to run local API: controller init: CS_LAPI_SECRET not strong enough" @@ -138,6 +171,8 @@ teardown() { rune -0 ./instance-crowdsec stop } +# TODO: move acquisition tests to test/bats/crowdsec-acquisition.bats + @test "crowdsec (error if the acquisition_path file is defined but missing)" { ACQUIS_YAML=$(config_get '.crowdsec_service.acquisition_path') rm -f "$ACQUIS_YAML" @@ -278,7 +313,7 @@ teardown() { # if filenames are missing, it won't be able to detect source type config_set "$ACQUIS_YAML" '.source="file"' rune -1 wait-for "$CROWDSEC" - assert_stderr --partial "failed to configure datasource file: no filename or filenames configuration provided" + assert_stderr --partial "while configuring datasource of type file from $ACQUIS_YAML (position 0): no filename or filenames configuration provided" config_set "$ACQUIS_YAML" '.filenames=["file.log"]' config_set "$ACQUIS_YAML" '.meh=3' diff --git a/test/bats/01_cscli.bats b/test/bats/01_cscli.bats index 264870501a5..9af3c841759 100644 --- a/test/bats/01_cscli.bats +++ b/test/bats/01_cscli.bats @@ -33,9 +33,9 @@ teardown() { # no "usage" output after every error rune -1 cscli blahblah - # error is displayed as log entry, not with print - assert_stderr --partial 'level=fatal msg="unknown command \"blahblah\" for \"cscli\""' - refute_stderr --partial 'unknown command "blahblah" for "cscli"' + # error is displayed with print, not as a log entry + assert_stderr --partial 'unknown command "blahblah" for "cscli"' + refute_stderr --partial 'level=fatal' } @test "cscli version" { @@ -172,41 +172,13 @@ teardown() { } @test "cscli config backup / restore" { - # test that we need a valid path - # disabled because in CI, the empty string is not passed as a parameter - #rune -1 cscli config backup "" - #assert_stderr --partial "failed to backup config: directory path can't be empty" + CONFIG_DIR=$(config_get '.config_paths.config_dir') rune -1 cscli config backup "/dev/null/blah" - assert_stderr --partial "failed to backup config: while creating /dev/null/blah: mkdir /dev/null/blah: not a directory" + assert_stderr --partial "'cscli config backup' has been removed, you can manually backup/restore $CONFIG_DIR instead" - # pick a dirpath - backupdir=$(TMPDIR="$BATS_TEST_TMPDIR" mktemp -u) - - # succeed the first time - rune -0 cscli config backup "$backupdir" - assert_stderr --partial "Starting configuration backup" - - # don't overwrite an existing backup - rune -1 cscli config backup "$backupdir" - assert_stderr --partial "failed to backup config" - assert_stderr --partial "file exists" - - SIMULATION_YAML="$(config_get '.config_paths.simulation_path')" - - # restore - rm "$SIMULATION_YAML" - rune -0 cscli config restore "$backupdir" - assert_file_exists "$SIMULATION_YAML" - - # cleanup - rm -rf -- "${backupdir:?}" - - # backup: detect missing files - rm "$SIMULATION_YAML" - rune -1 cscli config backup "$backupdir" - assert_stderr --regexp "failed to backup config: failed copy .* to .*: stat .*: no such file or directory" - rm -rf -- "${backupdir:?}" + rune -1 cscli config restore "/dev/null/blah" + assert_stderr --partial "'cscli config restore' has been removed, you can manually backup/restore $CONFIG_DIR instead" } @test "'cscli completion' with or without configuration file" { @@ -294,7 +266,7 @@ teardown() { # it is possible to enable subcommands with feature flags defined in feature.yaml rune -1 cscli setup - assert_stderr --partial 'unknown command \"setup\" for \"cscli\"' + assert_stderr --partial 'unknown command "setup" for "cscli"' CONFIG_DIR=$(dirname "$CONFIG_YAML") echo ' - cscli_setup' >> "$CONFIG_DIR"/feature.yaml rune -0 cscli setup diff --git a/test/bats/01_cscli_lapi.bats b/test/bats/01_cscli_lapi.bats index 6e876576a6e..005eb15e141 100644 --- a/test/bats/01_cscli_lapi.bats +++ b/test/bats/01_cscli_lapi.bats @@ -113,9 +113,8 @@ teardown() { LOCAL_API_CREDENTIALS=$(config_get '.api.client.credentials_path') config_set "$LOCAL_API_CREDENTIALS" '.url="http://127.0.0.1:-80"' - rune -1 cscli lapi status -o json - rune -0 jq -r '.msg' <(stderr) - assert_output 'failed to authenticate to Local API (LAPI): parse "http://127.0.0.1:-80/": invalid port ":-80" after host' + rune -1 cscli lapi status + assert_stderr 'Error: failed to authenticate to Local API (LAPI): parse "http://127.0.0.1:-80/": invalid port ":-80" after host' } @test "cscli - bad LAPI password" { @@ -123,9 +122,8 @@ teardown() { LOCAL_API_CREDENTIALS=$(config_get '.api.client.credentials_path') config_set "$LOCAL_API_CREDENTIALS" '.password="meh"' - rune -1 cscli lapi status -o json - rune -0 jq -r '.msg' <(stderr) - assert_output 'failed to authenticate to Local API (LAPI): API error: incorrect Username or Password' + rune -1 cscli lapi status + assert_stderr 'Error: failed to authenticate to Local API (LAPI): API error: incorrect Username or Password' } @test "cscli lapi register / machines validate" { @@ -189,8 +187,10 @@ teardown() { rune -1 cscli lapi register --machine malicious --token 123456789012345678901234badtoken assert_stderr --partial "401 Unauthorized: API error: invalid token for auto registration" - rune -1 cscli machines inspect malicious -o json - assert_stderr --partial "unable to read machine data 'malicious': user 'malicious': user doesn't exist" + rune -1 cscli machines inspect malicious + # XXX: we may want to remove this warning + assert_stderr --partial 'QueryMachineByID : ent: machine not found' + assert_stderr --partial "Error: unable to read machine data 'malicious': user 'malicious': user doesn't exist" rune -0 cscli lapi register --machine newmachine --token 12345678901234567890123456789012 assert_stderr --partial "Successfully registered to Local API" diff --git a/test/bats/02_nolapi.bats b/test/bats/02_nolapi.bats index cefa6d798b4..70495a0ed91 100644 --- a/test/bats/02_nolapi.bats +++ b/test/bats/02_nolapi.bats @@ -66,18 +66,6 @@ teardown() { refute_output --partial "Local API Server" } -@test "cscli config backup" { - config_disable_lapi - backupdir=$(TMPDIR="$BATS_TEST_TMPDIR" mktemp -u) - rune -0 cscli config backup "$backupdir" - assert_stderr --partial "Starting configuration backup" - rune -1 cscli config backup "$backupdir" - rm -rf -- "${backupdir:?}" - - assert_stderr --partial "failed to backup config" - assert_stderr --partial "file exists" -} - @test "lapi status shouldn't be ok without api.server" { config_disable_lapi rune -1 cscli machines list diff --git a/test/bats/03_noagent.bats b/test/bats/03_noagent.bats index 6be5101cee2..972b84977ad 100644 --- a/test/bats/03_noagent.bats +++ b/test/bats/03_noagent.bats @@ -60,18 +60,6 @@ teardown() { refute_output --partial "Crowdsec" } -@test "no agent: cscli config backup" { - config_disable_agent - backupdir=$(TMPDIR="$BATS_TEST_TMPDIR" mktemp -u) - rune -0 cscli config backup "$backupdir" - assert_stderr --partial "Starting configuration backup" - rune -1 cscli config backup "$backupdir" - - assert_stderr --partial "failed to backup config" - assert_stderr --partial "file exists" - rm -rf -- "${backupdir:?}" -} - @test "no agent: lapi status should be ok" { config_disable_agent ./instance-crowdsec start diff --git a/test/bats/04_nocapi.bats b/test/bats/04_nocapi.bats index d22a6f0a953..eaeb0939112 100644 --- a/test/bats/04_nocapi.bats +++ b/test/bats/04_nocapi.bats @@ -51,17 +51,6 @@ teardown() { assert_output --regexp "Global:.*Crowdsec.*cscli:.*Local API Server:" } -@test "no agent: cscli config backup" { - config_disable_capi - backupdir=$(TMPDIR="$BATS_TEST_TMPDIR" mktemp -u) - rune -0 cscli config backup "$backupdir" - assert_stderr --partial "Starting configuration backup" - rune -1 cscli config backup "$backupdir" - assert_stderr --partial "failed to backup config" - assert_stderr --partial "file exists" - rm -rf -- "${backupdir:?}" -} - @test "without capi: cscli lapi status -> success" { config_disable_capi ./instance-crowdsec start @@ -76,5 +65,5 @@ teardown() { rune -0 cscli metrics assert_output --partial "Route" assert_output --partial '/v1/watchers/login' - assert_output --partial "Local API Metrics:" + assert_output --partial "Local API Metrics" } diff --git a/test/bats/07_setup.bats b/test/bats/07_setup.bats index f832ac572d2..72a8b64a57a 100644 --- a/test/bats/07_setup.bats +++ b/test/bats/07_setup.bats @@ -511,8 +511,9 @@ update-notifier-motd.timer enabled enabled rune -0 jq -e '.installed == false' <(output) # we install it - rune -0 cscli setup install-hub /dev/stdin --dry-run <<< '{"setup":[{"install":{"collections":["crowdsecurity/apache2"]}}]}' - assert_output 'dry-run: would install collection crowdsecurity/apache2' + rune -0 cscli setup install-hub /dev/stdin --dry-run --output raw <<< '{"setup":[{"install":{"collections":["crowdsecurity/apache2"]}}]}' + assert_line --regexp 'download collections:crowdsecurity/apache2' + assert_line --regexp 'enable collections:crowdsecurity/apache2' # still not installed rune -0 cscli collections inspect crowdsecurity/apache2 -o json @@ -520,8 +521,8 @@ update-notifier-motd.timer enabled enabled # same with dependencies rune -0 cscli collections remove --all - rune -0 cscli setup install-hub /dev/stdin --dry-run <<< '{"setup":[{"install":{"collections":["crowdsecurity/linux"]}}]}' - assert_output 'dry-run: would install collection crowdsecurity/linux' + rune -0 cscli setup install-hub /dev/stdin --dry-run --output raw <<< '{"setup":[{"install":{"collections":["crowdsecurity/linux"]}}]}' + assert_line --regexp 'enable collections:crowdsecurity/linux' } @test "cscli setup install-hub (dry run: install multiple collections)" { @@ -530,8 +531,8 @@ update-notifier-motd.timer enabled enabled rune -0 jq -e '.installed == false' <(output) # we install it - rune -0 cscli setup install-hub /dev/stdin --dry-run <<< '{"setup":[{"install":{"collections":["crowdsecurity/apache2"]}}]}' - assert_output 'dry-run: would install collection crowdsecurity/apache2' + rune -0 cscli setup install-hub /dev/stdin --dry-run --output raw <<< '{"setup":[{"install":{"collections":["crowdsecurity/apache2"]}}]}' + assert_line --regexp 'enable collections:crowdsecurity/apache2' # still not installed rune -0 cscli collections inspect crowdsecurity/apache2 -o json @@ -539,15 +540,15 @@ update-notifier-motd.timer enabled enabled } @test "cscli setup install-hub (dry run: install multiple collections, parsers, scenarios, postoverflows)" { - rune -0 cscli setup install-hub /dev/stdin --dry-run <<< '{"setup":[{"install":{"collections":["crowdsecurity/aws-console","crowdsecurity/caddy"],"parsers":["crowdsecurity/asterisk-logs"],"scenarios":["crowdsecurity/smb-fs"],"postoverflows":["crowdsecurity/cdn-whitelist","crowdsecurity/rdns"]}}]}' - assert_line 'dry-run: would install collection crowdsecurity/aws-console' - assert_line 'dry-run: would install collection crowdsecurity/caddy' - assert_line 'dry-run: would install parser crowdsecurity/asterisk-logs' - assert_line 'dry-run: would install scenario crowdsecurity/smb-fs' - assert_line 'dry-run: would install postoverflow crowdsecurity/cdn-whitelist' - assert_line 'dry-run: would install postoverflow crowdsecurity/rdns' - - rune -1 cscli setup install-hub /dev/stdin --dry-run <<< '{"setup":[{"install":{"collections":["crowdsecurity/foo"]}}]}' + rune -0 cscli setup install-hub /dev/stdin --dry-run --output raw <<< '{"setup":[{"install":{"collections":["crowdsecurity/aws-console","crowdsecurity/caddy"],"parsers":["crowdsecurity/asterisk-logs"],"scenarios":["crowdsecurity/smb-bf"],"postoverflows":["crowdsecurity/cdn-whitelist","crowdsecurity/rdns"]}}]}' + assert_line --regexp 'enable collections:crowdsecurity/aws-console' + assert_line --regexp 'enable collections:crowdsecurity/caddy' + assert_line --regexp 'enable parsers:crowdsecurity/asterisk-logs' + assert_line --regexp 'enable scenarios:crowdsecurity/smb-bf' + assert_line --regexp 'enable postoverflows:crowdsecurity/cdn-whitelist' + assert_line --regexp 'enable postoverflows:crowdsecurity/rdns' + + rune -1 cscli setup install-hub /dev/stdin --dry-run --output raw <<< '{"setup":[{"install":{"collections":["crowdsecurity/foo"]}}]}' assert_stderr --partial 'collection crowdsecurity/foo not found' } diff --git a/test/bats/08_metrics.bats b/test/bats/08_metrics.bats index e260e667524..f3be9c60a95 100644 --- a/test/bats/08_metrics.bats +++ b/test/bats/08_metrics.bats @@ -66,7 +66,7 @@ teardown() { rune -0 cscli metrics assert_output --partial "Route" assert_output --partial '/v1/watchers/login' - assert_output --partial "Local API Metrics:" + assert_output --partial "Local API Metrics" rune -0 cscli metrics -o json rune -0 jq 'keys' <(output) @@ -93,7 +93,7 @@ teardown() { assert_stderr --partial "unknown metrics type: foobar" rune -0 cscli metrics show lapi - assert_output --partial "Local API Metrics:" + assert_output --partial "Local API Metrics" assert_output --regexp "Route.*Method.*Hits" assert_output --regexp "/v1/watchers/login.*POST" diff --git a/test/bats/08_metrics_bouncer.bats b/test/bats/08_metrics_bouncer.bats index c4dfebbab1d..5fb2c543bda 100644 --- a/test/bats/08_metrics_bouncer.bats +++ b/test/bats/08_metrics_bouncer.bats @@ -136,7 +136,10 @@ teardown() { rune -0 cscli metrics show bouncers assert_output - <<-EOT - Bouncer Metrics (testbouncer) since 2024-02-08 13:35:16 +0000 UTC: + +--------------------------+ + | Bouncer Metrics (testbou | + | ncer) since 2024-02-08 1 | + | 3:35:16 +0000 UTC | +--------+-----------------+ | Origin | foo | | | dogyear | pound | @@ -226,7 +229,8 @@ teardown() { rune -0 cscli metrics show bouncers assert_output - <<-EOT - Bouncer Metrics (testbouncer) since 2024-02-08 13:35:16 +0000 UTC: + +-------------------------------------------------------------------------------------------+ + | Bouncer Metrics (testbouncer) since 2024-02-08 13:35:16 +0000 UTC | +----------------------------------+------------------+-------------------+-----------------+ | Origin | active_decisions | dropped | foo | | | IPs | bytes | packets | dogyear | pound | @@ -309,7 +313,8 @@ teardown() { rune -0 cscli metrics show bouncers assert_output - <<-EOT - Bouncer Metrics (testbouncer) since 2024-02-08 13:35:16 +0000 UTC: + +-------------------------------------------------------------------------------------------+ + | Bouncer Metrics (testbouncer) since 2024-02-08 13:35:16 +0000 UTC | +----------------------------------+------------------+-------------------+-----------------+ | Origin | active_decisions | dropped | foo | | | IPs | bytes | packets | dogyear | pound | @@ -365,7 +370,9 @@ teardown() { rune -0 cscli metrics show bouncers assert_output - <<-EOT - Bouncer Metrics (testbouncer) since 2024-02-09 03:40:00 +0000 UTC: + +-----------------------------------------------+ + | Bouncer Metrics (testbouncer) since 2024-02-0 | + | 9 03:40:00 +0000 UTC | +--------------------------+--------+-----------+ | Origin | ima | notagauge | | | second | inch | @@ -417,7 +424,9 @@ teardown() { rune -0 cscli metrics show bouncers assert_output - <<-EOT - Bouncer Metrics (testbouncer) since 2024-02-09 03:40:00 +0000 UTC: + +---------------------------------------------+ + | Bouncer Metrics (testbouncer) since 2024-02 | + | -09 03:40:00 +0000 UTC | +--------------------------+------------------+ | Origin | active_decisions | | | IPs | @@ -502,7 +511,9 @@ teardown() { rune -0 cscli metrics show bouncers assert_output - <<-EOT - Bouncer Metrics (bouncer1) since 2024-02-08 13:35:16 +0000 UTC: + +--------------------------------------------------------------+ + | Bouncer Metrics (bouncer1) since 2024-02-08 13:35:16 +0000 U | + | TC | +----------------------------+---------+-----------------------+ | Origin | dropped | processed | | | bytes | bytes | packets | @@ -512,8 +523,9 @@ teardown() { +----------------------------+---------+-----------+-----------+ | Total | 1.80k | 12.34k | 100 | +----------------------------+---------+-----------+-----------+ - - Bouncer Metrics (bouncer2) since 2024-02-08 10:48:36 +0000 UTC: + +------------------------------------------------+ + | Bouncer Metrics (bouncer2) since 2024-02-08 10 | + | :48:36 +0000 UTC | +----------------------------+-------------------+ | Origin | dropped | | | bytes | packets | diff --git a/test/bats/10_bouncers.bats b/test/bats/10_bouncers.bats index b1c90116dd2..c9ee1b0cd0c 100644 --- a/test/bats/10_bouncers.bats +++ b/test/bats/10_bouncers.bats @@ -117,12 +117,9 @@ teardown() { @test "we can't add the same bouncer twice" { rune -0 cscli bouncers add ciTestBouncer - rune -1 cscli bouncers add ciTestBouncer -o json + rune -1 cscli bouncers add ciTestBouncer - # XXX temporary hack to filter out unwanted log lines that may appear before - # log configuration (= not json) - rune -0 jq -c '[.level,.msg]' <(stderr | grep "^{") - assert_output '["fatal","unable to create bouncer: bouncer ciTestBouncer already exists"]' + assert_stderr 'Error: unable to create bouncer: bouncer ciTestBouncer already exists' rune -0 cscli bouncers list -o json rune -0 jq '. | length' <(output) diff --git a/test/bats/20_hub.bats b/test/bats/20_hub.bats index b8fa1e9efca..b03b58732fa 100644 --- a/test/bats/20_hub.bats +++ b/test/bats/20_hub.bats @@ -20,7 +20,6 @@ setup() { load "../lib/setup.sh" load "../lib/bats-file/load.bash" ./instance-data load - hub_strip_index } teardown() { @@ -76,15 +75,15 @@ teardown() { assert_stderr --partial "invalid hub item appsec-rules:crowdsecurity/vpatch-laravel-debug-mode: latest version missing from index" rune -1 cscli appsec-rules install crowdsecurity/vpatch-laravel-debug-mode --force - assert_stderr --partial "error while installing 'crowdsecurity/vpatch-laravel-debug-mode': latest hash missing from index. The index file is invalid, please run 'cscli hub update' and try again" + assert_stderr --partial "appsec-rules:crowdsecurity/vpatch-laravel-debug-mode: latest hash missing from index. The index file is invalid, please run 'cscli hub update' and try again" } @test "missing reference in hub index" { new_hub=$(jq <"$INDEX_PATH" 'del(.parsers."crowdsecurity/smb-logs") | del (.scenarios."crowdsecurity/mysql-bf")') echo "$new_hub" >"$INDEX_PATH" rune -0 cscli hub list --error - assert_stderr --partial "can't find crowdsecurity/smb-logs in parsers, required by crowdsecurity/smb" - assert_stderr --partial "can't find crowdsecurity/mysql-bf in scenarios, required by crowdsecurity/mysql" + assert_stderr --partial "can't find parsers:crowdsecurity/smb-logs, required by crowdsecurity/smb" + assert_stderr --partial "can't find scenarios:crowdsecurity/mysql-bf, required by crowdsecurity/mysql" } @test "loading hub reports tainted items (subitem is tainted)" { @@ -108,47 +107,28 @@ teardown() { @test "cscli hub update" { rm -f "$INDEX_PATH" rune -0 cscli hub update - assert_stderr --partial "Wrote index to $INDEX_PATH" + assert_output "Downloading $INDEX_PATH" rune -0 cscli hub update - assert_stderr --partial "hub index is up to date" + assert_output "Nothing to do, the hub index is up to date." } -@test "cscli hub upgrade" { +@test "cscli hub upgrade (up to date)" { rune -0 cscli hub upgrade - assert_stderr --partial "Upgrading parsers" - assert_stderr --partial "Upgraded 0 parsers" - assert_stderr --partial "Upgrading postoverflows" - assert_stderr --partial "Upgraded 0 postoverflows" - assert_stderr --partial "Upgrading scenarios" - assert_stderr --partial "Upgraded 0 scenarios" - assert_stderr --partial "Upgrading contexts" - assert_stderr --partial "Upgraded 0 contexts" - assert_stderr --partial "Upgrading collections" - assert_stderr --partial "Upgraded 0 collections" - assert_stderr --partial "Upgrading appsec-configs" - assert_stderr --partial "Upgraded 0 appsec-configs" - assert_stderr --partial "Upgrading appsec-rules" - assert_stderr --partial "Upgraded 0 appsec-rules" - assert_stderr --partial "Upgrading collections" - assert_stderr --partial "Upgraded 0 collections" + refute_output rune -0 cscli parsers install crowdsecurity/syslog-logs - rune -0 cscli hub upgrade - assert_stderr --partial "crowdsecurity/syslog-logs: up-to-date" - rune -0 cscli hub upgrade --force - assert_stderr --partial "crowdsecurity/syslog-logs: up-to-date" - assert_stderr --partial "crowdsecurity/syslog-logs: updated" - assert_stderr --partial "Upgraded 1 parsers" - # this is used by the cron script to know if the hub was updated - assert_output --partial "updated crowdsecurity/syslog-logs" + refute_output + skip "todo: data files are re-downloaded with --force" } @test "cscli hub upgrade (with local items)" { mkdir -p "$CONFIG_DIR/collections" touch "$CONFIG_DIR/collections/foo.yaml" rune -0 cscli hub upgrade - assert_stderr --partial "not upgrading foo.yaml: local item" + assert_output - <<-EOT + collections:foo.yaml - not downloading local item + EOT } @test "cscli hub types" { diff --git a/test/bats/20_hub_collections.bats b/test/bats/20_hub_collections.bats deleted file mode 100644 index 6822339ae40..00000000000 --- a/test/bats/20_hub_collections.bats +++ /dev/null @@ -1,381 +0,0 @@ -#!/usr/bin/env bats -# vim: ft=bats:list:ts=8:sts=4:sw=4:et:ai:si: - -set -u - -setup_file() { - load "../lib/setup_file.sh" - ./instance-data load - HUB_DIR=$(config_get '.config_paths.hub_dir') - export HUB_DIR - INDEX_PATH=$(config_get '.config_paths.index_path') - export INDEX_PATH - CONFIG_DIR=$(config_get '.config_paths.config_dir') - export CONFIG_DIR -} - -teardown_file() { - load "../lib/teardown_file.sh" -} - -setup() { - load "../lib/setup.sh" - load "../lib/bats-file/load.bash" - ./instance-data load - hub_strip_index -} - -teardown() { - ./instance-crowdsec stop -} - -#---------- - -@test "cscli collections list" { - hub_purge_all - - # no items - rune -0 cscli collections list - assert_output --partial "COLLECTIONS" - rune -0 cscli collections list -o json - assert_json '{collections:[]}' - rune -0 cscli collections list -o raw - assert_output 'name,status,version,description' - - # some items - rune -0 cscli collections install crowdsecurity/sshd crowdsecurity/smb - - rune -0 cscli collections list - assert_output --partial crowdsecurity/sshd - assert_output --partial crowdsecurity/smb - rune -0 grep -c enabled <(output) - assert_output "2" - - rune -0 cscli collections list -o json - assert_output --partial crowdsecurity/sshd - assert_output --partial crowdsecurity/smb - rune -0 jq '.collections | length' <(output) - assert_output "2" - - rune -0 cscli collections list -o raw - assert_output --partial crowdsecurity/sshd - assert_output --partial crowdsecurity/smb - rune -0 grep -vc 'name,status,version,description' <(output) - assert_output "2" -} - -@test "cscli collections list -a" { - expected=$(jq <"$INDEX_PATH" -r '.collections | length') - - rune -0 cscli collections list -a - rune -0 grep -c disabled <(output) - assert_output "$expected" - - rune -0 cscli collections list -o json -a - rune -0 jq '.collections | length' <(output) - assert_output "$expected" - - rune -0 cscli collections list -o raw -a - rune -0 grep -vc 'name,status,version,description' <(output) - assert_output "$expected" - - # the list should be the same in all formats, and sorted (not case sensitive) - - list_raw=$(cscli collections list -o raw -a | tail -n +2 | cut -d, -f1) - list_human=$(cscli collections list -o human -a | tail -n +6 | head -n -1 | cut -d' ' -f2) - list_json=$(cscli collections list -o json -a | jq -r '.collections[].name') - - rune -0 sort -f <<<"$list_raw" - assert_output "$list_raw" - - assert_equal "$list_raw" "$list_json" - assert_equal "$list_raw" "$list_human" -} - -@test "cscli collections list [collection]..." { - # non-existent - rune -1 cscli collections install foo/bar - assert_stderr --partial "can't find 'foo/bar' in collections" - - # not installed - rune -0 cscli collections list crowdsecurity/smb - assert_output --regexp 'crowdsecurity/smb.*disabled' - - # install two items - rune -0 cscli collections install crowdsecurity/sshd crowdsecurity/smb - - # list an installed item - rune -0 cscli collections list crowdsecurity/sshd - assert_output --regexp "crowdsecurity/sshd" - refute_output --partial "crowdsecurity/smb" - - # list multiple installed and non installed items - rune -0 cscli collections list crowdsecurity/sshd crowdsecurity/smb crowdsecurity/nginx - assert_output --partial "crowdsecurity/sshd" - assert_output --partial "crowdsecurity/smb" - assert_output --partial "crowdsecurity/nginx" - - rune -0 cscli collections list crowdsecurity/sshd -o json - rune -0 jq '.collections | length' <(output) - assert_output "1" - rune -0 cscli collections list crowdsecurity/sshd crowdsecurity/smb crowdsecurity/nginx -o json - rune -0 jq '.collections | length' <(output) - assert_output "3" - - rune -0 cscli collections list crowdsecurity/sshd -o raw - rune -0 grep -vc 'name,status,version,description' <(output) - assert_output "1" - rune -0 cscli collections list crowdsecurity/sshd crowdsecurity/smb -o raw - rune -0 grep -vc 'name,status,version,description' <(output) - assert_output "2" -} - -@test "cscli collections install" { - rune -1 cscli collections install - assert_stderr --partial 'requires at least 1 arg(s), only received 0' - - # not in hub - rune -1 cscli collections install crowdsecurity/blahblah - assert_stderr --partial "can't find 'crowdsecurity/blahblah' in collections" - - # simple install - rune -0 cscli collections install crowdsecurity/sshd - rune -0 cscli collections inspect crowdsecurity/sshd --no-metrics - assert_output --partial 'crowdsecurity/sshd' - assert_output --partial 'installed: true' - - # autocorrect - rune -1 cscli collections install crowdsecurity/ssshd - assert_stderr --partial "can't find 'crowdsecurity/ssshd' in collections, did you mean 'crowdsecurity/sshd'?" - - # install multiple - rune -0 cscli collections install crowdsecurity/sshd crowdsecurity/smb - rune -0 cscli collections inspect crowdsecurity/sshd --no-metrics - assert_output --partial 'crowdsecurity/sshd' - assert_output --partial 'installed: true' - rune -0 cscli collections inspect crowdsecurity/smb --no-metrics - assert_output --partial 'crowdsecurity/smb' - assert_output --partial 'installed: true' -} - -@test "cscli collections install (file location and download-only)" { - rune -0 cscli collections install crowdsecurity/linux --download-only - rune -0 cscli collections inspect crowdsecurity/linux --no-metrics - assert_output --partial 'crowdsecurity/linux' - assert_output --partial 'installed: false' - assert_file_exists "$HUB_DIR/collections/crowdsecurity/linux.yaml" - assert_file_not_exists "$CONFIG_DIR/collections/linux.yaml" - - rune -0 cscli collections install crowdsecurity/linux - rune -0 cscli collections inspect crowdsecurity/linux --no-metrics - assert_output --partial 'installed: true' - assert_file_exists "$CONFIG_DIR/collections/linux.yaml" -} - -@test "cscli collections install --force (tainted)" { - rune -0 cscli collections install crowdsecurity/sshd - echo "dirty" >"$CONFIG_DIR/collections/sshd.yaml" - - rune -1 cscli collections install crowdsecurity/sshd - assert_stderr --partial "error while installing 'crowdsecurity/sshd': while enabling crowdsecurity/sshd: crowdsecurity/sshd is tainted, won't overwrite unless --force" - - rune -0 cscli collections install crowdsecurity/sshd --force - assert_stderr --partial "Enabled crowdsecurity/sshd" -} - -@test "cscli collections install --ignore (skip on errors)" { - rune -1 cscli collections install foo/bar crowdsecurity/sshd - assert_stderr --partial "can't find 'foo/bar' in collections" - refute_stderr --partial "Enabled collections: crowdsecurity/sshd" - - rune -0 cscli collections install foo/bar crowdsecurity/sshd --ignore - assert_stderr --partial "can't find 'foo/bar' in collections" - assert_stderr --partial "Enabled collections: crowdsecurity/sshd" -} - -@test "cscli collections inspect" { - rune -1 cscli collections inspect - assert_stderr --partial 'requires at least 1 arg(s), only received 0' - # required for metrics - ./instance-crowdsec start - - rune -1 cscli collections inspect blahblah/blahblah - assert_stderr --partial "can't find 'blahblah/blahblah' in collections" - - # one item - rune -0 cscli collections inspect crowdsecurity/sshd --no-metrics - assert_line 'type: collections' - assert_line 'name: crowdsecurity/sshd' - assert_line 'author: crowdsecurity' - assert_line 'path: collections/crowdsecurity/sshd.yaml' - assert_line 'installed: false' - refute_line --partial 'Current metrics:' - - # one item, with metrics - rune -0 cscli collections inspect crowdsecurity/sshd - assert_line --partial 'Current metrics:' - - # one item, json - rune -0 cscli collections inspect crowdsecurity/sshd -o json - rune -0 jq -c '[.type, .name, .author, .path, .installed]' <(output) - assert_json '["collections","crowdsecurity/sshd","crowdsecurity","collections/crowdsecurity/sshd.yaml",false]' - - # one item, raw - rune -0 cscli collections inspect crowdsecurity/sshd -o raw - assert_line 'type: collections' - assert_line 'name: crowdsecurity/sshd' - assert_line 'author: crowdsecurity' - assert_line 'path: collections/crowdsecurity/sshd.yaml' - assert_line 'installed: false' - refute_line --partial 'Current metrics:' - - # multiple items - rune -0 cscli collections inspect crowdsecurity/sshd crowdsecurity/smb --no-metrics - assert_output --partial 'crowdsecurity/sshd' - assert_output --partial 'crowdsecurity/smb' - rune -1 grep -c 'Current metrics:' <(output) - assert_output "0" - - # multiple items, with metrics - rune -0 cscli collections inspect crowdsecurity/sshd crowdsecurity/smb - rune -0 grep -c 'Current metrics:' <(output) - assert_output "2" - - # multiple items, json - rune -0 cscli collections inspect crowdsecurity/sshd crowdsecurity/smb -o json - rune -0 jq -sc '[.[] | [.type, .name, .author, .path, .installed]]' <(output) - assert_json '[["collections","crowdsecurity/sshd","crowdsecurity","collections/crowdsecurity/sshd.yaml",false],["collections","crowdsecurity/smb","crowdsecurity","collections/crowdsecurity/smb.yaml",false]]' - - # multiple items, raw - rune -0 cscli collections inspect crowdsecurity/sshd crowdsecurity/smb -o raw - assert_output --partial 'crowdsecurity/sshd' - assert_output --partial 'crowdsecurity/smb' - rune -1 grep -c 'Current metrics:' <(output) - assert_output "0" -} - -@test "cscli collections remove" { - rune -1 cscli collections remove - assert_stderr --partial "specify at least one collection to remove or '--all'" - rune -1 cscli collections remove blahblah/blahblah - assert_stderr --partial "can't find 'blahblah/blahblah' in collections" - - rune -0 cscli collections install crowdsecurity/sshd --download-only - rune -0 cscli collections remove crowdsecurity/sshd - assert_stderr --partial 'removing crowdsecurity/sshd: not installed -- no need to remove' - - rune -0 cscli collections install crowdsecurity/sshd - rune -0 cscli collections remove crowdsecurity/sshd - assert_stderr --partial 'Removed crowdsecurity/sshd' - - rune -0 cscli collections remove crowdsecurity/sshd --purge - assert_stderr --partial 'Removed source file [crowdsecurity/sshd]' - - rune -0 cscli collections remove crowdsecurity/sshd - assert_stderr --partial 'removing crowdsecurity/sshd: not installed -- no need to remove' - - rune -0 cscli collections remove crowdsecurity/sshd --purge --debug - assert_stderr --partial 'removing crowdsecurity/sshd: not downloaded -- no need to remove' - refute_stderr --partial 'Removed source file [crowdsecurity/sshd]' - - # install, then remove, check files - rune -0 cscli collections install crowdsecurity/sshd - assert_file_exists "$CONFIG_DIR/collections/sshd.yaml" - rune -0 cscli collections remove crowdsecurity/sshd - assert_file_not_exists "$CONFIG_DIR/collections/sshd.yaml" - - # delete is an alias for remove - rune -0 cscli collections install crowdsecurity/sshd - assert_file_exists "$CONFIG_DIR/collections/sshd.yaml" - rune -0 cscli collections delete crowdsecurity/sshd - assert_file_not_exists "$CONFIG_DIR/collections/sshd.yaml" - - # purge - assert_file_exists "$HUB_DIR/collections/crowdsecurity/sshd.yaml" - rune -0 cscli collections remove crowdsecurity/sshd --purge - assert_file_not_exists "$HUB_DIR/collections/crowdsecurity/sshd.yaml" - - rune -0 cscli collections install crowdsecurity/sshd crowdsecurity/smb - - # --all - rune -0 cscli collections list -o raw - rune -0 grep -vc 'name,status,version,description' <(output) - assert_output "2" - - rune -0 cscli collections remove --all - - rune -0 cscli collections list -o raw - rune -1 grep -vc 'name,status,version,description' <(output) - assert_output "0" -} - -@test "cscli collections remove --force" { - # remove a collections that belongs to a collection - rune -0 cscli collections install crowdsecurity/linux - rune -0 cscli collections remove crowdsecurity/sshd - assert_stderr --partial "crowdsecurity/sshd belongs to collections: [crowdsecurity/linux]" - assert_stderr --partial "Run 'sudo cscli collections remove crowdsecurity/sshd --force' if you want to force remove this collection" -} - -@test "cscli collections upgrade" { - rune -1 cscli collections upgrade - assert_stderr --partial "specify at least one collection to upgrade or '--all'" - rune -1 cscli collections upgrade blahblah/blahblah - assert_stderr --partial "can't find 'blahblah/blahblah' in collections" - rune -0 cscli collections remove crowdsecurity/exim --purge - rune -1 cscli collections upgrade crowdsecurity/exim - assert_stderr --partial "can't upgrade crowdsecurity/exim: not installed" - rune -0 cscli collections install crowdsecurity/exim --download-only - rune -1 cscli collections upgrade crowdsecurity/exim - assert_stderr --partial "can't upgrade crowdsecurity/exim: downloaded but not installed" - - # hash of the string "v0.0" - sha256_0_0="dfebecf42784a31aa3d009dbcec0c657154a034b45f49cf22a895373f6dbf63d" - - # add version 0.0 to all collections - new_hub=$(jq --arg DIGEST "$sha256_0_0" <"$INDEX_PATH" '.collections |= with_entries(.value.versions["0.0"] = {"digest": $DIGEST, "deprecated": false})') - echo "$new_hub" >"$INDEX_PATH" - - rune -0 cscli collections install crowdsecurity/sshd - - echo "v0.0" > "$CONFIG_DIR/collections/sshd.yaml" - rune -0 cscli collections inspect crowdsecurity/sshd -o json - rune -0 jq -e '.local_version=="0.0"' <(output) - - # upgrade - rune -0 cscli collections upgrade crowdsecurity/sshd - rune -0 cscli collections inspect crowdsecurity/sshd -o json - rune -0 jq -e '.local_version==.version' <(output) - - # taint - echo "dirty" >"$CONFIG_DIR/collections/sshd.yaml" - # XXX: should return error - rune -0 cscli collections upgrade crowdsecurity/sshd - assert_stderr --partial "crowdsecurity/sshd is tainted, --force to overwrite" - rune -0 cscli collections inspect crowdsecurity/sshd -o json - rune -0 jq -e '.local_version=="?"' <(output) - - # force upgrade with taint - rune -0 cscli collections upgrade crowdsecurity/sshd --force - rune -0 cscli collections inspect crowdsecurity/sshd -o json - rune -0 jq -e '.local_version==.version' <(output) - - # multiple items - rune -0 cscli collections install crowdsecurity/smb - echo "v0.0" >"$CONFIG_DIR/collections/sshd.yaml" - echo "v0.0" >"$CONFIG_DIR/collections/smb.yaml" - rune -0 cscli collections list -o json - rune -0 jq -e '[.collections[].local_version]==["0.0","0.0"]' <(output) - rune -0 cscli collections upgrade crowdsecurity/sshd crowdsecurity/smb - rune -0 cscli collections list -o json - rune -0 jq -e 'any(.collections[].local_version; .=="0.0") | not' <(output) - - # upgrade all - echo "v0.0" >"$CONFIG_DIR/collections/sshd.yaml" - echo "v0.0" >"$CONFIG_DIR/collections/smb.yaml" - rune -0 cscli collections list -o json - rune -0 jq -e '[.collections[].local_version]==["0.0","0.0"]' <(output) - rune -0 cscli collections upgrade --all - rune -0 cscli collections list -o json - rune -0 jq -e 'any(.collections[].local_version; .=="0.0") | not' <(output) -} diff --git a/test/bats/20_hub_collections_dep.bats b/test/bats/20_hub_collections_dep.bats index 673b812dc0d..94a984709a8 100644 --- a/test/bats/20_hub_collections_dep.bats +++ b/test/bats/20_hub_collections_dep.bats @@ -20,7 +20,6 @@ setup() { load "../lib/setup.sh" load "../lib/bats-file/load.bash" ./instance-data load - hub_strip_index } teardown() { @@ -84,18 +83,32 @@ teardown() { assert_stderr --partial "crowdsecurity/smb is tainted, use '--force' to remove" } +@test "cscli collections inspect (dependencies)" { + rune -0 cscli collections install crowdsecurity/smb + + # The inspect command must show the dependencies of the local or older version. + echo "{'collections': ['crowdsecurity/sshd']}" >"$CONFIG_DIR/collections/smb.yaml" + + rune -0 cscli collections inspect crowdsecurity/smb --no-metrics -o json + rune -0 jq -e '.collections' <(output) + assert_json '["crowdsecurity/sshd"]' +} + @test "cscli collections (dependencies II: the revenge)" { rune -0 cscli collections install crowdsecurity/wireguard baudneo/gotify rune -0 cscli collections remove crowdsecurity/wireguard - assert_stderr --partial "crowdsecurity/syslog-logs was not removed because it also belongs to baudneo/gotify" + assert_output --regexp 'disabling collections:crowdsecurity/wireguard' + refute_output --regexp 'disabling parsers:crowdsecurity/syslog-logs' rune -0 cscli collections inspect crowdsecurity/wireguard -o json rune -0 jq -e '.installed==false' <(output) + rune -0 cscli parsers inspect crowdsecurity/syslog-logs -o json + rune -0 jq -e '.installed==true' <(output) } @test "cscli collections (dependencies III: origins)" { # it is perfectly fine to remove an item belonging to a collection that we are removing anyway - # inject a dependency: sshd requires the syslog-logs parsers, but linux does too + # inject a direct dependency: sshd requires the syslog-logs parsers, but linux does too hub_dep=$(jq <"$INDEX_PATH" '. * {collections:{"crowdsecurity/sshd":{parsers:["crowdsecurity/syslog-logs"]}}}') echo "$hub_dep" >"$INDEX_PATH" @@ -108,11 +121,8 @@ teardown() { # removing linux should remove syslog-logs even though sshd depends on it rune -0 cscli collections remove crowdsecurity/linux - refute_stderr --partial "crowdsecurity/syslog-logs was not removed" - # we must also consider indirect dependencies - refute_stderr --partial "crowdsecurity/ssh-bf was not removed" - rune -0 cscli parsers list -o json - rune -0 jq -e '.parsers | length == 0' <(output) + rune -0 cscli hub list -o json + rune -0 jq -e 'add | length == 0' <(output) } @test "cscli collections (dependencies IV: looper)" { diff --git a/test/bats/20_hub_items.bats b/test/bats/20_hub_items.bats index 4b390c90ed4..4ddaf387488 100644 --- a/test/bats/20_hub_items.bats +++ b/test/bats/20_hub_items.bats @@ -22,7 +22,6 @@ setup() { load "../lib/setup.sh" load "../lib/bats-file/load.bash" ./instance-data load - hub_strip_index } teardown() { @@ -80,10 +79,9 @@ teardown() { echo "$new_hub" >"$INDEX_PATH" rune -0 cscli collections install crowdsecurity/sshd - rune -1 cscli collections inspect crowdsecurity/sshd --no-metrics -o json - # XXX: we are on the verbose side here... - rune -0 jq -r ".msg" <(stderr) - assert_output --regexp "failed to read Hub index: failed to sync hub items: failed to scan .*: while syncing collections sshd.yaml: 1.2.3.4: Invalid Semantic Version. Run 'sudo cscli hub update' to download the index again" + rune -1 cscli collections inspect crowdsecurity/sshd --no-metrics + # XXX: it would be better to trigger this during parse, not sync + assert_stderr "Error: failed to sync $HUB_DIR: while syncing collections sshd.yaml: 1.2.3.4: Invalid Semantic Version" } @test "removing or purging an item already removed by hand" { @@ -92,20 +90,15 @@ teardown() { rune -0 jq -r '.local_path' <(output) rune -0 rm "$(output)" - rune -0 cscli parsers remove crowdsecurity/syslog-logs --debug - assert_stderr --partial "removing crowdsecurity/syslog-logs: not installed -- no need to remove" + rune -0 cscli parsers remove crowdsecurity/syslog-logs + assert_output "Nothing to do." rune -0 cscli parsers inspect crowdsecurity/syslog-logs -o json rune -0 jq -r '.path' <(output) rune -0 rm "$HUB_DIR/$(output)" - rune -0 cscli parsers remove crowdsecurity/syslog-logs --purge --debug - assert_stderr --partial "removing crowdsecurity/syslog-logs: not downloaded -- no need to remove" - - rune -0 cscli parsers remove crowdsecurity/linux --all --error --purge --force - rune -0 cscli collections remove crowdsecurity/linux --all --error --purge --force - refute_output - refute_stderr + rune -0 cscli parsers remove crowdsecurity/syslog-logs --purge + assert_output "Nothing to do." } @test "a local item is not tainted" { @@ -122,7 +115,7 @@ teardown() { # and not from hub update rune -0 cscli hub update - assert_stderr --partial "collection crowdsecurity/sshd is tainted" + assert_stderr --partial "collection crowdsecurity/sshd is tainted by local changes" refute_stderr --partial "collection foobar.yaml is tainted" } @@ -151,25 +144,42 @@ teardown() { @test "a local item cannot be downloaded by cscli" { rune -0 mkdir -p "$CONFIG_DIR/collections" rune -0 touch "$CONFIG_DIR/collections/foobar.yaml" - rune -1 cscli collections install foobar.yaml - assert_stderr --partial "foobar.yaml is local, can't download" - rune -1 cscli collections install foobar.yaml --force - assert_stderr --partial "foobar.yaml is local, can't download" + rune -0 cscli collections install foobar.yaml + assert_output --partial "Nothing to do." + rune -0 cscli collections install foobar.yaml --force + assert_output --partial "Nothing to do." + rune -0 cscli collections install --download-only foobar.yaml + assert_output --partial "Nothing to do." } @test "a local item cannot be removed by cscli" { - rune -0 mkdir -p "$CONFIG_DIR/collections" - rune -0 touch "$CONFIG_DIR/collections/foobar.yaml" - rune -0 cscli collections remove foobar.yaml - assert_stderr --partial "foobar.yaml is a local item, please delete manually" - rune -0 cscli collections remove foobar.yaml --purge - assert_stderr --partial "foobar.yaml is a local item, please delete manually" - rune -0 cscli collections remove foobar.yaml --force - assert_stderr --partial "foobar.yaml is a local item, please delete manually" - rune -0 cscli collections remove --all - assert_stderr --partial "foobar.yaml is a local item, please delete manually" - rune -0 cscli collections remove --all --purge - assert_stderr --partial "foobar.yaml is a local item, please delete manually" + rune -0 mkdir -p "$CONFIG_DIR/scenarios" + rune -0 touch "$CONFIG_DIR/scenarios/foobar.yaml" + rune -0 cscli scenarios remove foobar.yaml + assert_output - <<-EOT + WARN scenarios:foobar.yaml is a local item, please delete manually + Nothing to do. + EOT + rune -0 cscli scenarios remove foobar.yaml --purge + assert_output - <<-EOT + WARN scenarios:foobar.yaml is a local item, please delete manually + Nothing to do. + EOT + rune -0 cscli scenarios remove foobar.yaml --force + assert_output - <<-EOT + WARN scenarios:foobar.yaml is a local item, please delete manually + Nothing to do. + EOT + + rune -0 cscli scenarios install crowdsecurity/ssh-bf + + rune -0 cscli scenarios remove --all + assert_line "WARN scenarios:foobar.yaml is a local item, please delete manually" + assert_line "disabling scenarios:crowdsecurity/ssh-bf" + + rune -0 cscli scenarios remove --all --purge + assert_line "WARN scenarios:foobar.yaml is a local item, please delete manually" + assert_line "purging scenarios:crowdsecurity/ssh-bf" } @test "a dangling link is reported with a warning" { @@ -182,6 +192,16 @@ teardown() { assert_json '[]' } +@test "replacing a symlink with a regular file makes a local item" { + rune -0 cscli parsers install crowdsecurity/caddy-logs + rune -0 rm "$CONFIG_DIR/parsers/s01-parse/caddy-logs.yaml" + rune -0 cp "$HUB_DIR/parsers/s01-parse/crowdsecurity/caddy-logs.yaml" "$CONFIG_DIR/parsers/s01-parse/caddy-logs.yaml" + rune -0 cscli hub list + rune -0 cscli parsers inspect crowdsecurity/caddy-logs -o json + rune -0 jq -e '[.tainted,.local,.local_version==false,true,"?"]' <(output) + refute_stderr +} + @test "tainted hub file, not enabled, install --force should repair" { rune -0 cscli scenarios install crowdsecurity/ssh-bf rune -0 cscli scenarios inspect crowdsecurity/ssh-bf -o json diff --git a/test/bats/20_hub_parsers.bats b/test/bats/20_hub_parsers.bats deleted file mode 100644 index 791b1a2177f..00000000000 --- a/test/bats/20_hub_parsers.bats +++ /dev/null @@ -1,383 +0,0 @@ -#!/usr/bin/env bats -# vim: ft=bats:list:ts=8:sts=4:sw=4:et:ai:si: - -set -u - -setup_file() { - load "../lib/setup_file.sh" - ./instance-data load - HUB_DIR=$(config_get '.config_paths.hub_dir') - export HUB_DIR - INDEX_PATH=$(config_get '.config_paths.index_path') - export INDEX_PATH - CONFIG_DIR=$(config_get '.config_paths.config_dir') - export CONFIG_DIR -} - -teardown_file() { - load "../lib/teardown_file.sh" -} - -setup() { - load "../lib/setup.sh" - load "../lib/bats-file/load.bash" - ./instance-data load - hub_strip_index -} - -teardown() { - ./instance-crowdsec stop -} - -#---------- - -@test "cscli parsers list" { - hub_purge_all - - # no items - rune -0 cscli parsers list - assert_output --partial "PARSERS" - rune -0 cscli parsers list -o json - assert_json '{parsers:[]}' - rune -0 cscli parsers list -o raw - assert_output 'name,status,version,description' - - # some items - rune -0 cscli parsers install crowdsecurity/whitelists crowdsecurity/windows-auth - - rune -0 cscli parsers list - assert_output --partial crowdsecurity/whitelists - assert_output --partial crowdsecurity/windows-auth - rune -0 grep -c enabled <(output) - assert_output "2" - - rune -0 cscli parsers list -o json - assert_output --partial crowdsecurity/whitelists - assert_output --partial crowdsecurity/windows-auth - rune -0 jq '.parsers | length' <(output) - assert_output "2" - - rune -0 cscli parsers list -o raw - assert_output --partial crowdsecurity/whitelists - assert_output --partial crowdsecurity/windows-auth - rune -0 grep -vc 'name,status,version,description' <(output) - assert_output "2" -} - -@test "cscli parsers list -a" { - expected=$(jq <"$INDEX_PATH" -r '.parsers | length') - - rune -0 cscli parsers list -a - rune -0 grep -c disabled <(output) - assert_output "$expected" - - rune -0 cscli parsers list -o json -a - rune -0 jq '.parsers | length' <(output) - assert_output "$expected" - - rune -0 cscli parsers list -o raw -a - rune -0 grep -vc 'name,status,version,description' <(output) - assert_output "$expected" - - # the list should be the same in all formats, and sorted (not case sensitive) - - list_raw=$(cscli parsers list -o raw -a | tail -n +2 | cut -d, -f1) - list_human=$(cscli parsers list -o human -a | tail -n +6 | head -n -1 | cut -d' ' -f2) - list_json=$(cscli parsers list -o json -a | jq -r '.parsers[].name') - - rune -0 sort -f <<<"$list_raw" - assert_output "$list_raw" - - assert_equal "$list_raw" "$list_json" - assert_equal "$list_raw" "$list_human" -} - -@test "cscli parsers list [parser]..." { - # non-existent - rune -1 cscli parsers install foo/bar - assert_stderr --partial "can't find 'foo/bar' in parsers" - - # not installed - rune -0 cscli parsers list crowdsecurity/whitelists - assert_output --regexp 'crowdsecurity/whitelists.*disabled' - - # install two items - rune -0 cscli parsers install crowdsecurity/whitelists crowdsecurity/windows-auth - - # list an installed item - rune -0 cscli parsers list crowdsecurity/whitelists - assert_output --regexp "crowdsecurity/whitelists.*enabled" - refute_output --partial "crowdsecurity/windows-auth" - - # list multiple installed and non installed items - rune -0 cscli parsers list crowdsecurity/whitelists crowdsecurity/windows-auth crowdsecurity/traefik-logs - assert_output --partial "crowdsecurity/whitelists" - assert_output --partial "crowdsecurity/windows-auth" - assert_output --partial "crowdsecurity/traefik-logs" - - rune -0 cscli parsers list crowdsecurity/whitelists -o json - rune -0 jq '.parsers | length' <(output) - assert_output "1" - rune -0 cscli parsers list crowdsecurity/whitelists crowdsecurity/windows-auth crowdsecurity/traefik-logs -o json - rune -0 jq '.parsers | length' <(output) - assert_output "3" - - rune -0 cscli parsers list crowdsecurity/whitelists -o raw - rune -0 grep -vc 'name,status,version,description' <(output) - assert_output "1" - rune -0 cscli parsers list crowdsecurity/whitelists crowdsecurity/windows-auth crowdsecurity/traefik-logs -o raw - rune -0 grep -vc 'name,status,version,description' <(output) - assert_output "3" -} - -@test "cscli parsers install" { - rune -1 cscli parsers install - assert_stderr --partial 'requires at least 1 arg(s), only received 0' - - # not in hub - rune -1 cscli parsers install crowdsecurity/blahblah - assert_stderr --partial "can't find 'crowdsecurity/blahblah' in parsers" - - # simple install - rune -0 cscli parsers install crowdsecurity/whitelists - rune -0 cscli parsers inspect crowdsecurity/whitelists --no-metrics - assert_output --partial 'crowdsecurity/whitelists' - assert_output --partial 'installed: true' - - # autocorrect - rune -1 cscli parsers install crowdsecurity/sshd-logz - assert_stderr --partial "can't find 'crowdsecurity/sshd-logz' in parsers, did you mean 'crowdsecurity/sshd-logs'?" - - # install multiple - rune -0 cscli parsers install crowdsecurity/pgsql-logs crowdsecurity/postfix-logs - rune -0 cscli parsers inspect crowdsecurity/pgsql-logs --no-metrics - assert_output --partial 'crowdsecurity/pgsql-logs' - assert_output --partial 'installed: true' - rune -0 cscli parsers inspect crowdsecurity/postfix-logs --no-metrics - assert_output --partial 'crowdsecurity/postfix-logs' - assert_output --partial 'installed: true' -} - -@test "cscli parsers install (file location and download-only)" { - rune -0 cscli parsers install crowdsecurity/whitelists --download-only - rune -0 cscli parsers inspect crowdsecurity/whitelists --no-metrics - assert_output --partial 'crowdsecurity/whitelists' - assert_output --partial 'installed: false' - assert_file_exists "$HUB_DIR/parsers/s02-enrich/crowdsecurity/whitelists.yaml" - assert_file_not_exists "$CONFIG_DIR/parsers/s02-enrich/whitelists.yaml" - - rune -0 cscli parsers install crowdsecurity/whitelists - rune -0 cscli parsers inspect crowdsecurity/whitelists --no-metrics - assert_output --partial 'installed: true' - assert_file_exists "$CONFIG_DIR/parsers/s02-enrich/whitelists.yaml" -} - -@test "cscli parsers install --force (tainted)" { - rune -0 cscli parsers install crowdsecurity/whitelists - echo "dirty" >"$CONFIG_DIR/parsers/s02-enrich/whitelists.yaml" - - rune -1 cscli parsers install crowdsecurity/whitelists - assert_stderr --partial "error while installing 'crowdsecurity/whitelists': while enabling crowdsecurity/whitelists: crowdsecurity/whitelists is tainted, won't overwrite unless --force" - - rune -0 cscli parsers install crowdsecurity/whitelists --force - assert_stderr --partial "Enabled crowdsecurity/whitelists" -} - -@test "cscli parsers install --ignore (skip on errors)" { - rune -1 cscli parsers install foo/bar crowdsecurity/whitelists - assert_stderr --partial "can't find 'foo/bar' in parsers" - refute_stderr --partial "Enabled parsers: crowdsecurity/whitelists" - - rune -0 cscli parsers install foo/bar crowdsecurity/whitelists --ignore - assert_stderr --partial "can't find 'foo/bar' in parsers" - assert_stderr --partial "Enabled parsers: crowdsecurity/whitelists" -} - -@test "cscli parsers inspect" { - rune -1 cscli parsers inspect - assert_stderr --partial 'requires at least 1 arg(s), only received 0' - # required for metrics - ./instance-crowdsec start - - rune -1 cscli parsers inspect blahblah/blahblah - assert_stderr --partial "can't find 'blahblah/blahblah' in parsers" - - # one item - rune -0 cscli parsers inspect crowdsecurity/sshd-logs --no-metrics - assert_line 'type: parsers' - assert_line 'stage: s01-parse' - assert_line 'name: crowdsecurity/sshd-logs' - assert_line 'author: crowdsecurity' - assert_line 'path: parsers/s01-parse/crowdsecurity/sshd-logs.yaml' - assert_line 'installed: false' - refute_line --partial 'Current metrics:' - - # one item, with metrics - rune -0 cscli parsers inspect crowdsecurity/sshd-logs - assert_line --partial 'Current metrics:' - - # one item, json - rune -0 cscli parsers inspect crowdsecurity/sshd-logs -o json - rune -0 jq -c '[.type, .stage, .name, .author, .path, .installed]' <(output) - assert_json '["parsers","s01-parse","crowdsecurity/sshd-logs","crowdsecurity","parsers/s01-parse/crowdsecurity/sshd-logs.yaml",false]' - - # one item, raw - rune -0 cscli parsers inspect crowdsecurity/sshd-logs -o raw - assert_line 'type: parsers' - assert_line 'name: crowdsecurity/sshd-logs' - assert_line 'stage: s01-parse' - assert_line 'author: crowdsecurity' - assert_line 'path: parsers/s01-parse/crowdsecurity/sshd-logs.yaml' - assert_line 'installed: false' - refute_line --partial 'Current metrics:' - - # multiple items - rune -0 cscli parsers inspect crowdsecurity/sshd-logs crowdsecurity/whitelists --no-metrics - assert_output --partial 'crowdsecurity/sshd-logs' - assert_output --partial 'crowdsecurity/whitelists' - rune -1 grep -c 'Current metrics:' <(output) - assert_output "0" - - # multiple items, with metrics - rune -0 cscli parsers inspect crowdsecurity/sshd-logs crowdsecurity/whitelists - rune -0 grep -c 'Current metrics:' <(output) - assert_output "2" - - # multiple items, json - rune -0 cscli parsers inspect crowdsecurity/sshd-logs crowdsecurity/whitelists -o json - rune -0 jq -sc '[.[] | [.type, .stage, .name, .author, .path, .installed]]' <(output) - assert_json '[["parsers","s01-parse","crowdsecurity/sshd-logs","crowdsecurity","parsers/s01-parse/crowdsecurity/sshd-logs.yaml",false],["parsers","s02-enrich","crowdsecurity/whitelists","crowdsecurity","parsers/s02-enrich/crowdsecurity/whitelists.yaml",false]]' - - # multiple items, raw - rune -0 cscli parsers inspect crowdsecurity/sshd-logs crowdsecurity/whitelists -o raw - assert_output --partial 'crowdsecurity/sshd-logs' - assert_output --partial 'crowdsecurity/whitelists' - rune -1 grep -c 'Current metrics:' <(output) - assert_output "0" -} - -@test "cscli parsers remove" { - rune -1 cscli parsers remove - assert_stderr --partial "specify at least one parser to remove or '--all'" - rune -1 cscli parsers remove blahblah/blahblah - assert_stderr --partial "can't find 'blahblah/blahblah' in parsers" - - rune -0 cscli parsers install crowdsecurity/whitelists --download-only - rune -0 cscli parsers remove crowdsecurity/whitelists - assert_stderr --partial "removing crowdsecurity/whitelists: not installed -- no need to remove" - - rune -0 cscli parsers install crowdsecurity/whitelists - rune -0 cscli parsers remove crowdsecurity/whitelists - assert_stderr --partial "Removed crowdsecurity/whitelists" - - rune -0 cscli parsers remove crowdsecurity/whitelists --purge - assert_stderr --partial 'Removed source file [crowdsecurity/whitelists]' - - rune -0 cscli parsers remove crowdsecurity/whitelists - assert_stderr --partial "removing crowdsecurity/whitelists: not installed -- no need to remove" - - rune -0 cscli parsers remove crowdsecurity/whitelists --purge --debug - assert_stderr --partial 'removing crowdsecurity/whitelists: not downloaded -- no need to remove' - refute_stderr --partial 'Removed source file [crowdsecurity/whitelists]' - - # install, then remove, check files - rune -0 cscli parsers install crowdsecurity/whitelists - assert_file_exists "$CONFIG_DIR/parsers/s02-enrich/whitelists.yaml" - rune -0 cscli parsers remove crowdsecurity/whitelists - assert_file_not_exists "$CONFIG_DIR/parsers/s02-enrich/whitelists.yaml" - - # delete is an alias for remove - rune -0 cscli parsers install crowdsecurity/whitelists - assert_file_exists "$CONFIG_DIR/parsers/s02-enrich/whitelists.yaml" - rune -0 cscli parsers delete crowdsecurity/whitelists - assert_file_not_exists "$CONFIG_DIR/parsers/s02-enrich/whitelists.yaml" - - # purge - assert_file_exists "$HUB_DIR/parsers/s02-enrich/crowdsecurity/whitelists.yaml" - rune -0 cscli parsers remove crowdsecurity/whitelists --purge - assert_file_not_exists "$HUB_DIR/parsers/s02-enrich/crowdsecurity/whitelists.yaml" - - rune -0 cscli parsers install crowdsecurity/whitelists crowdsecurity/windows-auth - - # --all - rune -0 cscli parsers list -o raw - rune -0 grep -vc 'name,status,version,description' <(output) - assert_output "2" - - rune -0 cscli parsers remove --all - - rune -0 cscli parsers list -o raw - rune -1 grep -vc 'name,status,version,description' <(output) - assert_output "0" -} - -@test "cscli parsers remove --force" { - # remove a parser that belongs to a collection - rune -0 cscli collections install crowdsecurity/sshd - rune -0 cscli parsers remove crowdsecurity/sshd-logs - assert_stderr --partial "crowdsecurity/sshd-logs belongs to collections: [crowdsecurity/sshd]" - assert_stderr --partial "Run 'sudo cscli parsers remove crowdsecurity/sshd-logs --force' if you want to force remove this parser" -} - -@test "cscli parsers upgrade" { - rune -1 cscli parsers upgrade - assert_stderr --partial "specify at least one parser to upgrade or '--all'" - rune -1 cscli parsers upgrade blahblah/blahblah - assert_stderr --partial "can't find 'blahblah/blahblah' in parsers" - rune -0 cscli parsers remove crowdsecurity/pam-logs --purge - rune -1 cscli parsers upgrade crowdsecurity/pam-logs - assert_stderr --partial "can't upgrade crowdsecurity/pam-logs: not installed" - rune -0 cscli parsers install crowdsecurity/pam-logs --download-only - rune -1 cscli parsers upgrade crowdsecurity/pam-logs - assert_stderr --partial "can't upgrade crowdsecurity/pam-logs: downloaded but not installed" - - # hash of the string "v0.0" - sha256_0_0="dfebecf42784a31aa3d009dbcec0c657154a034b45f49cf22a895373f6dbf63d" - - # add version 0.0 to all parsers - new_hub=$(jq --arg DIGEST "$sha256_0_0" <"$INDEX_PATH" '.parsers |= with_entries(.value.versions["0.0"] = {"digest": $DIGEST, "deprecated": false})') - echo "$new_hub" >"$INDEX_PATH" - - rune -0 cscli parsers install crowdsecurity/whitelists - - echo "v0.0" > "$CONFIG_DIR/parsers/s02-enrich/whitelists.yaml" - rune -0 cscli parsers inspect crowdsecurity/whitelists -o json - rune -0 jq -e '.local_version=="0.0"' <(output) - - # upgrade - rune -0 cscli parsers upgrade crowdsecurity/whitelists - rune -0 cscli parsers inspect crowdsecurity/whitelists -o json - rune -0 jq -e '.local_version==.version' <(output) - - # taint - echo "dirty" >"$CONFIG_DIR/parsers/s02-enrich/whitelists.yaml" - # XXX: should return error - rune -0 cscli parsers upgrade crowdsecurity/whitelists - assert_stderr --partial "crowdsecurity/whitelists is tainted, --force to overwrite" - rune -0 cscli parsers inspect crowdsecurity/whitelists -o json - rune -0 jq -e '.local_version=="?"' <(output) - - # force upgrade with taint - rune -0 cscli parsers upgrade crowdsecurity/whitelists --force - rune -0 cscli parsers inspect crowdsecurity/whitelists -o json - rune -0 jq -e '.local_version==.version' <(output) - - # multiple items - rune -0 cscli parsers install crowdsecurity/windows-auth - echo "v0.0" >"$CONFIG_DIR/parsers/s02-enrich/whitelists.yaml" - echo "v0.0" >"$CONFIG_DIR/parsers/s01-parse/windows-auth.yaml" - rune -0 cscli parsers list -o json - rune -0 jq -e '[.parsers[].local_version]==["0.0","0.0"]' <(output) - rune -0 cscli parsers upgrade crowdsecurity/whitelists crowdsecurity/windows-auth - rune -0 cscli parsers list -o json - rune -0 jq -e 'any(.parsers[].local_version; .=="0.0") | not' <(output) - - # upgrade all - echo "v0.0" >"$CONFIG_DIR/parsers/s02-enrich/whitelists.yaml" - echo "v0.0" >"$CONFIG_DIR/parsers/s01-parse/windows-auth.yaml" - rune -0 cscli parsers list -o json - rune -0 jq -e '[.parsers[].local_version]==["0.0","0.0"]' <(output) - rune -0 cscli parsers upgrade --all - rune -0 cscli parsers list -o json - rune -0 jq -e 'any(.parsers[].local_version; .=="0.0") | not' <(output) -} diff --git a/test/bats/20_hub_postoverflows.bats b/test/bats/20_hub_postoverflows.bats deleted file mode 100644 index 37337b08caa..00000000000 --- a/test/bats/20_hub_postoverflows.bats +++ /dev/null @@ -1,383 +0,0 @@ -#!/usr/bin/env bats -# vim: ft=bats:list:ts=8:sts=4:sw=4:et:ai:si: - -set -u - -setup_file() { - load "../lib/setup_file.sh" - ./instance-data load - HUB_DIR=$(config_get '.config_paths.hub_dir') - export HUB_DIR - INDEX_PATH=$(config_get '.config_paths.index_path') - export INDEX_PATH - CONFIG_DIR=$(config_get '.config_paths.config_dir') - export CONFIG_DIR -} - -teardown_file() { - load "../lib/teardown_file.sh" -} - -setup() { - load "../lib/setup.sh" - load "../lib/bats-file/load.bash" - ./instance-data load - hub_strip_index -} - -teardown() { - ./instance-crowdsec stop -} - -#---------- - -@test "cscli postoverflows list" { - hub_purge_all - - # no items - rune -0 cscli postoverflows list - assert_output --partial "POSTOVERFLOWS" - rune -0 cscli postoverflows list -o json - assert_json '{postoverflows:[]}' - rune -0 cscli postoverflows list -o raw - assert_output 'name,status,version,description' - - # some items - rune -0 cscli postoverflows install crowdsecurity/rdns crowdsecurity/cdn-whitelist - - rune -0 cscli postoverflows list - assert_output --partial crowdsecurity/rdns - assert_output --partial crowdsecurity/cdn-whitelist - rune -0 grep -c enabled <(output) - assert_output "2" - - rune -0 cscli postoverflows list -o json - assert_output --partial crowdsecurity/rdns - assert_output --partial crowdsecurity/cdn-whitelist - rune -0 jq '.postoverflows | length' <(output) - assert_output "2" - - rune -0 cscli postoverflows list -o raw - assert_output --partial crowdsecurity/rdns - assert_output --partial crowdsecurity/cdn-whitelist - rune -0 grep -vc 'name,status,version,description' <(output) - assert_output "2" -} - -@test "cscli postoverflows list -a" { - expected=$(jq <"$INDEX_PATH" -r '.postoverflows | length') - - rune -0 cscli postoverflows list -a - rune -0 grep -c disabled <(output) - assert_output "$expected" - - rune -0 cscli postoverflows list -o json -a - rune -0 jq '.postoverflows | length' <(output) - assert_output "$expected" - - rune -0 cscli postoverflows list -o raw -a - rune -0 grep -vc 'name,status,version,description' <(output) - assert_output "$expected" - - # the list should be the same in all formats, and sorted (not case sensitive) - - list_raw=$(cscli postoverflows list -o raw -a | tail -n +2 | cut -d, -f1) - list_human=$(cscli postoverflows list -o human -a | tail -n +6 | head -n -1 | cut -d' ' -f2) - list_json=$(cscli postoverflows list -o json -a | jq -r '.postoverflows[].name') - - rune -0 sort -f <<<"$list_raw" - assert_output "$list_raw" - - assert_equal "$list_raw" "$list_json" - assert_equal "$list_raw" "$list_human" -} - -@test "cscli postoverflows list [postoverflow]..." { - # non-existent - rune -1 cscli postoverflows install foo/bar - assert_stderr --partial "can't find 'foo/bar' in postoverflows" - - # not installed - rune -0 cscli postoverflows list crowdsecurity/rdns - assert_output --regexp 'crowdsecurity/rdns.*disabled' - - # install two items - rune -0 cscli postoverflows install crowdsecurity/rdns crowdsecurity/cdn-whitelist - - # list an installed item - rune -0 cscli postoverflows list crowdsecurity/rdns - assert_output --regexp "crowdsecurity/rdns.*enabled" - refute_output --partial "crowdsecurity/cdn-whitelist" - - # list multiple installed and non installed items - rune -0 cscli postoverflows list crowdsecurity/rdns crowdsecurity/cdn-whitelist crowdsecurity/ipv6_to_range - assert_output --partial "crowdsecurity/rdns" - assert_output --partial "crowdsecurity/cdn-whitelist" - assert_output --partial "crowdsecurity/ipv6_to_range" - - rune -0 cscli postoverflows list crowdsecurity/rdns -o json - rune -0 jq '.postoverflows | length' <(output) - assert_output "1" - rune -0 cscli postoverflows list crowdsecurity/rdns crowdsecurity/cdn-whitelist crowdsecurity/ipv6_to_range -o json - rune -0 jq '.postoverflows | length' <(output) - assert_output "3" - - rune -0 cscli postoverflows list crowdsecurity/rdns -o raw - rune -0 grep -vc 'name,status,version,description' <(output) - assert_output "1" - rune -0 cscli postoverflows list crowdsecurity/rdns crowdsecurity/cdn-whitelist crowdsecurity/ipv6_to_range -o raw - rune -0 grep -vc 'name,status,version,description' <(output) - assert_output "3" -} - -@test "cscli postoverflows install" { - rune -1 cscli postoverflows install - assert_stderr --partial 'requires at least 1 arg(s), only received 0' - - # not in hub - rune -1 cscli postoverflows install crowdsecurity/blahblah - assert_stderr --partial "can't find 'crowdsecurity/blahblah' in postoverflows" - - # simple install - rune -0 cscli postoverflows install crowdsecurity/rdns - rune -0 cscli postoverflows inspect crowdsecurity/rdns --no-metrics - assert_output --partial 'crowdsecurity/rdns' - assert_output --partial 'installed: true' - - # autocorrect - rune -1 cscli postoverflows install crowdsecurity/rdnf - assert_stderr --partial "can't find 'crowdsecurity/rdnf' in postoverflows, did you mean 'crowdsecurity/rdns'?" - - # install multiple - rune -0 cscli postoverflows install crowdsecurity/rdns crowdsecurity/cdn-whitelist - rune -0 cscli postoverflows inspect crowdsecurity/rdns --no-metrics - assert_output --partial 'crowdsecurity/rdns' - assert_output --partial 'installed: true' - rune -0 cscli postoverflows inspect crowdsecurity/cdn-whitelist --no-metrics - assert_output --partial 'crowdsecurity/cdn-whitelist' - assert_output --partial 'installed: true' -} - -@test "cscli postoverflows install (file location and download-only)" { - rune -0 cscli postoverflows install crowdsecurity/rdns --download-only - rune -0 cscli postoverflows inspect crowdsecurity/rdns --no-metrics - assert_output --partial 'crowdsecurity/rdns' - assert_output --partial 'installed: false' - assert_file_exists "$HUB_DIR/postoverflows/s00-enrich/crowdsecurity/rdns.yaml" - assert_file_not_exists "$CONFIG_DIR/postoverflows/s00-enrich/rdns.yaml" - - rune -0 cscli postoverflows install crowdsecurity/rdns - rune -0 cscli postoverflows inspect crowdsecurity/rdns --no-metrics - assert_output --partial 'installed: true' - assert_file_exists "$CONFIG_DIR/postoverflows/s00-enrich/rdns.yaml" -} - -@test "cscli postoverflows install --force (tainted)" { - rune -0 cscli postoverflows install crowdsecurity/rdns - echo "dirty" >"$CONFIG_DIR/postoverflows/s00-enrich/rdns.yaml" - - rune -1 cscli postoverflows install crowdsecurity/rdns - assert_stderr --partial "error while installing 'crowdsecurity/rdns': while enabling crowdsecurity/rdns: crowdsecurity/rdns is tainted, won't overwrite unless --force" - - rune -0 cscli postoverflows install crowdsecurity/rdns --force - assert_stderr --partial "Enabled crowdsecurity/rdns" -} - -@test "cscli postoverflow install --ignore (skip on errors)" { - rune -1 cscli postoverflows install foo/bar crowdsecurity/rdns - assert_stderr --partial "can't find 'foo/bar' in postoverflows" - refute_stderr --partial "Enabled postoverflows: crowdsecurity/rdns" - - rune -0 cscli postoverflows install foo/bar crowdsecurity/rdns --ignore - assert_stderr --partial "can't find 'foo/bar' in postoverflows" - assert_stderr --partial "Enabled postoverflows: crowdsecurity/rdns" -} - -@test "cscli postoverflows inspect" { - rune -1 cscli postoverflows inspect - assert_stderr --partial 'requires at least 1 arg(s), only received 0' - # required for metrics - ./instance-crowdsec start - - rune -1 cscli postoverflows inspect blahblah/blahblah - assert_stderr --partial "can't find 'blahblah/blahblah' in postoverflows" - - # one item - rune -0 cscli postoverflows inspect crowdsecurity/rdns --no-metrics - assert_line 'type: postoverflows' - assert_line 'stage: s00-enrich' - assert_line 'name: crowdsecurity/rdns' - assert_line 'author: crowdsecurity' - assert_line 'path: postoverflows/s00-enrich/crowdsecurity/rdns.yaml' - assert_line 'installed: false' - refute_line --partial 'Current metrics:' - - # one item, with metrics - rune -0 cscli postoverflows inspect crowdsecurity/rdns - assert_line --partial 'Current metrics:' - - # one item, json - rune -0 cscli postoverflows inspect crowdsecurity/rdns -o json - rune -0 jq -c '[.type, .stage, .name, .author, .path, .installed]' <(output) - assert_json '["postoverflows","s00-enrich","crowdsecurity/rdns","crowdsecurity","postoverflows/s00-enrich/crowdsecurity/rdns.yaml",false]' - - # one item, raw - rune -0 cscli postoverflows inspect crowdsecurity/rdns -o raw - assert_line 'type: postoverflows' - assert_line 'name: crowdsecurity/rdns' - assert_line 'stage: s00-enrich' - assert_line 'author: crowdsecurity' - assert_line 'path: postoverflows/s00-enrich/crowdsecurity/rdns.yaml' - assert_line 'installed: false' - refute_line --partial 'Current metrics:' - - # multiple items - rune -0 cscli postoverflows inspect crowdsecurity/rdns crowdsecurity/cdn-whitelist --no-metrics - assert_output --partial 'crowdsecurity/rdns' - assert_output --partial 'crowdsecurity/cdn-whitelist' - rune -1 grep -c 'Current metrics:' <(output) - assert_output "0" - - # multiple items, with metrics - rune -0 cscli postoverflows inspect crowdsecurity/rdns crowdsecurity/cdn-whitelist - rune -0 grep -c 'Current metrics:' <(output) - assert_output "2" - - # multiple items, json - rune -0 cscli postoverflows inspect crowdsecurity/rdns crowdsecurity/cdn-whitelist -o json - rune -0 jq -sc '[.[] | [.type, .stage, .name, .author, .path, .installed]]' <(output) - assert_json '[["postoverflows","s00-enrich","crowdsecurity/rdns","crowdsecurity","postoverflows/s00-enrich/crowdsecurity/rdns.yaml",false],["postoverflows","s01-whitelist","crowdsecurity/cdn-whitelist","crowdsecurity","postoverflows/s01-whitelist/crowdsecurity/cdn-whitelist.yaml",false]]' - - # multiple items, raw - rune -0 cscli postoverflows inspect crowdsecurity/rdns crowdsecurity/cdn-whitelist -o raw - assert_output --partial 'crowdsecurity/rdns' - assert_output --partial 'crowdsecurity/cdn-whitelist' - run -1 grep -c 'Current metrics:' <(output) - assert_output "0" -} - -@test "cscli postoverflows remove" { - rune -1 cscli postoverflows remove - assert_stderr --partial "specify at least one postoverflow to remove or '--all'" - rune -1 cscli postoverflows remove blahblah/blahblah - assert_stderr --partial "can't find 'blahblah/blahblah' in postoverflows" - - rune -0 cscli postoverflows install crowdsecurity/rdns --download-only - rune -0 cscli postoverflows remove crowdsecurity/rdns - assert_stderr --partial "removing crowdsecurity/rdns: not installed -- no need to remove" - - rune -0 cscli postoverflows install crowdsecurity/rdns - rune -0 cscli postoverflows remove crowdsecurity/rdns - assert_stderr --partial 'Removed crowdsecurity/rdns' - - rune -0 cscli postoverflows remove crowdsecurity/rdns --purge - assert_stderr --partial 'Removed source file [crowdsecurity/rdns]' - - rune -0 cscli postoverflows remove crowdsecurity/rdns - assert_stderr --partial 'removing crowdsecurity/rdns: not installed -- no need to remove' - - rune -0 cscli postoverflows remove crowdsecurity/rdns --purge --debug - assert_stderr --partial 'removing crowdsecurity/rdns: not downloaded -- no need to remove' - refute_stderr --partial 'Removed source file [crowdsecurity/rdns]' - - # install, then remove, check files - rune -0 cscli postoverflows install crowdsecurity/rdns - assert_file_exists "$CONFIG_DIR/postoverflows/s00-enrich/rdns.yaml" - rune -0 cscli postoverflows remove crowdsecurity/rdns - assert_file_not_exists "$CONFIG_DIR/postoverflows/s00-enrich/rdns.yaml" - - # delete is an alias for remove - rune -0 cscli postoverflows install crowdsecurity/rdns - assert_file_exists "$CONFIG_DIR/postoverflows/s00-enrich/rdns.yaml" - rune -0 cscli postoverflows delete crowdsecurity/rdns - assert_file_not_exists "$CONFIG_DIR/postoverflows/s00-enrich/rdns.yaml" - - # purge - assert_file_exists "$HUB_DIR/postoverflows/s00-enrich/crowdsecurity/rdns.yaml" - rune -0 cscli postoverflows remove crowdsecurity/rdns --purge - assert_file_not_exists "$HUB_DIR/postoverflows/s00-enrich/crowdsecurity/rdns.yaml" - - rune -0 cscli postoverflows install crowdsecurity/rdns crowdsecurity/cdn-whitelist - - # --all - rune -0 cscli postoverflows list -o raw - rune -0 grep -vc 'name,status,version,description' <(output) - assert_output "2" - - rune -0 cscli postoverflows remove --all - - rune -0 cscli postoverflows list -o raw - rune -1 grep -vc 'name,status,version,description' <(output) - assert_output "0" -} - -@test "cscli postoverflows remove --force" { - # remove a postoverflow that belongs to a collection - rune -0 cscli collections install crowdsecurity/auditd - rune -0 cscli postoverflows remove crowdsecurity/auditd-whitelisted-process - assert_stderr --partial "crowdsecurity/auditd-whitelisted-process belongs to collections: [crowdsecurity/auditd]" - assert_stderr --partial "Run 'sudo cscli postoverflows remove crowdsecurity/auditd-whitelisted-process --force' if you want to force remove this postoverflow" -} - -@test "cscli postoverflows upgrade" { - rune -1 cscli postoverflows upgrade - assert_stderr --partial "specify at least one postoverflow to upgrade or '--all'" - rune -1 cscli postoverflows upgrade blahblah/blahblah - assert_stderr --partial "can't find 'blahblah/blahblah' in postoverflows" - rune -0 cscli postoverflows remove crowdsecurity/discord-crawler-whitelist --purge - rune -1 cscli postoverflows upgrade crowdsecurity/discord-crawler-whitelist - assert_stderr --partial "can't upgrade crowdsecurity/discord-crawler-whitelist: not installed" - rune -0 cscli postoverflows install crowdsecurity/discord-crawler-whitelist --download-only - rune -1 cscli postoverflows upgrade crowdsecurity/discord-crawler-whitelist - assert_stderr --partial "can't upgrade crowdsecurity/discord-crawler-whitelist: downloaded but not installed" - - # hash of the string "v0.0" - sha256_0_0="dfebecf42784a31aa3d009dbcec0c657154a034b45f49cf22a895373f6dbf63d" - - # add version 0.0 to all postoverflows - new_hub=$(jq --arg DIGEST "$sha256_0_0" <"$INDEX_PATH" '.postoverflows |= with_entries(.value.versions["0.0"] = {"digest": $DIGEST, "deprecated": false})') - echo "$new_hub" >"$INDEX_PATH" - - rune -0 cscli postoverflows install crowdsecurity/rdns - - echo "v0.0" > "$CONFIG_DIR/postoverflows/s00-enrich/rdns.yaml" - rune -0 cscli postoverflows inspect crowdsecurity/rdns -o json - rune -0 jq -e '.local_version=="0.0"' <(output) - - # upgrade - rune -0 cscli postoverflows upgrade crowdsecurity/rdns - rune -0 cscli postoverflows inspect crowdsecurity/rdns -o json - rune -0 jq -e '.local_version==.version' <(output) - - # taint - echo "dirty" >"$CONFIG_DIR/postoverflows/s00-enrich/rdns.yaml" - # XXX: should return error - rune -0 cscli postoverflows upgrade crowdsecurity/rdns - assert_stderr --partial "crowdsecurity/rdns is tainted, --force to overwrite" - rune -0 cscli postoverflows inspect crowdsecurity/rdns -o json - rune -0 jq -e '.local_version=="?"' <(output) - - # force upgrade with taint - rune -0 cscli postoverflows upgrade crowdsecurity/rdns --force - rune -0 cscli postoverflows inspect crowdsecurity/rdns -o json - rune -0 jq -e '.local_version==.version' <(output) - - # multiple items - rune -0 cscli postoverflows install crowdsecurity/cdn-whitelist - echo "v0.0" >"$CONFIG_DIR/postoverflows/s00-enrich/rdns.yaml" - echo "v0.0" >"$CONFIG_DIR/postoverflows/s01-whitelist/cdn-whitelist.yaml" - rune -0 cscli postoverflows list -o json - rune -0 jq -e '[.postoverflows[].local_version]==["0.0","0.0"]' <(output) - rune -0 cscli postoverflows upgrade crowdsecurity/rdns crowdsecurity/cdn-whitelist - rune -0 cscli postoverflows list -o json - rune -0 jq -e 'any(.postoverflows[].local_version; .=="0.0") | not' <(output) - - # upgrade all - echo "v0.0" >"$CONFIG_DIR/postoverflows/s00-enrich/rdns.yaml" - echo "v0.0" >"$CONFIG_DIR/postoverflows/s01-whitelist/cdn-whitelist.yaml" - rune -0 cscli postoverflows list -o json - rune -0 jq -e '[.postoverflows[].local_version]==["0.0","0.0"]' <(output) - rune -0 cscli postoverflows upgrade --all - rune -0 cscli postoverflows list -o json - rune -0 jq -e 'any(.postoverflows[].local_version; .=="0.0") | not' <(output) -} diff --git a/test/bats/20_hub_scenarios.bats b/test/bats/20_hub_scenarios.bats deleted file mode 100644 index 3ab3d944c93..00000000000 --- a/test/bats/20_hub_scenarios.bats +++ /dev/null @@ -1,382 +0,0 @@ -#!/usr/bin/env bats -# vim: ft=bats:list:ts=8:sts=4:sw=4:et:ai:si: - -set -u - -setup_file() { - load "../lib/setup_file.sh" - ./instance-data load - HUB_DIR=$(config_get '.config_paths.hub_dir') - export HUB_DIR - INDEX_PATH=$(config_get '.config_paths.index_path') - export INDEX_PATH - CONFIG_DIR=$(config_get '.config_paths.config_dir') - export CONFIG_DIR -} - -teardown_file() { - load "../lib/teardown_file.sh" -} - -setup() { - load "../lib/setup.sh" - load "../lib/bats-file/load.bash" - ./instance-data load - hub_strip_index -} - -teardown() { - ./instance-crowdsec stop -} - -#---------- - -@test "cscli scenarios list" { - hub_purge_all - - # no items - rune -0 cscli scenarios list - assert_output --partial "SCENARIOS" - rune -0 cscli scenarios list -o json - assert_json '{scenarios:[]}' - rune -0 cscli scenarios list -o raw - assert_output 'name,status,version,description' - - # some items - rune -0 cscli scenarios install crowdsecurity/ssh-bf crowdsecurity/telnet-bf - - rune -0 cscli scenarios list - assert_output --partial crowdsecurity/ssh-bf - assert_output --partial crowdsecurity/telnet-bf - rune -0 grep -c enabled <(output) - assert_output "2" - - rune -0 cscli scenarios list -o json - assert_output --partial crowdsecurity/ssh-bf - assert_output --partial crowdsecurity/telnet-bf - rune -0 jq '.scenarios | length' <(output) - assert_output "2" - - rune -0 cscli scenarios list -o raw - assert_output --partial crowdsecurity/ssh-bf - assert_output --partial crowdsecurity/telnet-bf - rune -0 grep -vc 'name,status,version,description' <(output) - assert_output "2" -} - -@test "cscli scenarios list -a" { - expected=$(jq <"$INDEX_PATH" -r '.scenarios | length') - - rune -0 cscli scenarios list -a - rune -0 grep -c disabled <(output) - assert_output "$expected" - - rune -0 cscli scenarios list -o json -a - rune -0 jq '.scenarios | length' <(output) - assert_output "$expected" - - rune -0 cscli scenarios list -o raw -a - rune -0 grep -vc 'name,status,version,description' <(output) - assert_output "$expected" - - # the list should be the same in all formats, and sorted (not case sensitive) - - list_raw=$(cscli scenarios list -o raw -a | tail -n +2 | cut -d, -f1) - list_human=$(cscli scenarios list -o human -a | tail -n +6 | head -n -1 | cut -d' ' -f2) - list_json=$(cscli scenarios list -o json -a | jq -r '.scenarios[].name') - - rune -0 sort -f <<<"$list_raw" - assert_output "$list_raw" - - assert_equal "$list_raw" "$list_json" - assert_equal "$list_raw" "$list_human" -} - -@test "cscli scenarios list [scenario]..." { - # non-existent - rune -1 cscli scenario install foo/bar - assert_stderr --partial "can't find 'foo/bar' in scenarios" - - # not installed - rune -0 cscli scenarios list crowdsecurity/ssh-bf - assert_output --regexp 'crowdsecurity/ssh-bf.*disabled' - - # install two items - rune -0 cscli scenarios install crowdsecurity/ssh-bf crowdsecurity/telnet-bf - - # list an installed item - rune -0 cscli scenarios list crowdsecurity/ssh-bf - assert_output --regexp "crowdsecurity/ssh-bf.*enabled" - refute_output --partial "crowdsecurity/telnet-bf" - - # list multiple installed and non installed items - rune -0 cscli scenarios list crowdsecurity/ssh-bf crowdsecurity/telnet-bf crowdsecurity/aws-bf crowdsecurity/aws-bf - assert_output --partial "crowdsecurity/ssh-bf" - assert_output --partial "crowdsecurity/telnet-bf" - assert_output --partial "crowdsecurity/aws-bf" - - rune -0 cscli scenarios list crowdsecurity/ssh-bf -o json - rune -0 jq '.scenarios | length' <(output) - assert_output "1" - rune -0 cscli scenarios list crowdsecurity/ssh-bf crowdsecurity/telnet-bf crowdsecurity/aws-bf -o json - rune -0 jq '.scenarios | length' <(output) - assert_output "3" - - rune -0 cscli scenarios list crowdsecurity/ssh-bf -o raw - rune -0 grep -vc 'name,status,version,description' <(output) - assert_output "1" - rune -0 cscli scenarios list crowdsecurity/ssh-bf crowdsecurity/telnet-bf crowdsecurity/aws-bf -o raw - rune -0 grep -vc 'name,status,version,description' <(output) - assert_output "3" -} - -@test "cscli scenarios install" { - rune -1 cscli scenarios install - assert_stderr --partial 'requires at least 1 arg(s), only received 0' - - # not in hub - rune -1 cscli scenarios install crowdsecurity/blahblah - assert_stderr --partial "can't find 'crowdsecurity/blahblah' in scenarios" - - # simple install - rune -0 cscli scenarios install crowdsecurity/ssh-bf - rune -0 cscli scenarios inspect crowdsecurity/ssh-bf --no-metrics - assert_output --partial 'crowdsecurity/ssh-bf' - assert_output --partial 'installed: true' - - # autocorrect - rune -1 cscli scenarios install crowdsecurity/ssh-tf - assert_stderr --partial "can't find 'crowdsecurity/ssh-tf' in scenarios, did you mean 'crowdsecurity/ssh-bf'?" - - # install multiple - rune -0 cscli scenarios install crowdsecurity/ssh-bf crowdsecurity/telnet-bf - rune -0 cscli scenarios inspect crowdsecurity/ssh-bf --no-metrics - assert_output --partial 'crowdsecurity/ssh-bf' - assert_output --partial 'installed: true' - rune -0 cscli scenarios inspect crowdsecurity/telnet-bf --no-metrics - assert_output --partial 'crowdsecurity/telnet-bf' - assert_output --partial 'installed: true' -} - -@test "cscli scenarios install (file location and download-only)" { - # simple install - rune -0 cscli scenarios install crowdsecurity/ssh-bf --download-only - rune -0 cscli scenarios inspect crowdsecurity/ssh-bf --no-metrics - assert_output --partial 'crowdsecurity/ssh-bf' - assert_output --partial 'installed: false' - assert_file_exists "$HUB_DIR/scenarios/crowdsecurity/ssh-bf.yaml" - assert_file_not_exists "$CONFIG_DIR/scenarios/ssh-bf.yaml" - - rune -0 cscli scenarios install crowdsecurity/ssh-bf - rune -0 cscli scenarios inspect crowdsecurity/ssh-bf --no-metrics - assert_output --partial 'installed: true' - assert_file_exists "$CONFIG_DIR/scenarios/ssh-bf.yaml" -} - -@test "cscli scenarios install --force (tainted)" { - rune -0 cscli scenarios install crowdsecurity/ssh-bf - echo "dirty" >"$CONFIG_DIR/scenarios/ssh-bf.yaml" - - rune -1 cscli scenarios install crowdsecurity/ssh-bf - assert_stderr --partial "error while installing 'crowdsecurity/ssh-bf': while enabling crowdsecurity/ssh-bf: crowdsecurity/ssh-bf is tainted, won't overwrite unless --force" - - rune -0 cscli scenarios install crowdsecurity/ssh-bf --force - assert_stderr --partial "Enabled crowdsecurity/ssh-bf" -} - -@test "cscli scenarios install --ignore (skip on errors)" { - rune -1 cscli scenarios install foo/bar crowdsecurity/ssh-bf - assert_stderr --partial "can't find 'foo/bar' in scenarios" - refute_stderr --partial "Enabled scenarios: crowdsecurity/ssh-bf" - - rune -0 cscli scenarios install foo/bar crowdsecurity/ssh-bf --ignore - assert_stderr --partial "can't find 'foo/bar' in scenarios" - assert_stderr --partial "Enabled scenarios: crowdsecurity/ssh-bf" -} - -@test "cscli scenarios inspect" { - rune -1 cscli scenarios inspect - assert_stderr --partial 'requires at least 1 arg(s), only received 0' - # required for metrics - ./instance-crowdsec start - - rune -1 cscli scenarios inspect blahblah/blahblah - assert_stderr --partial "can't find 'blahblah/blahblah' in scenarios" - - # one item - rune -0 cscli scenarios inspect crowdsecurity/ssh-bf --no-metrics - assert_line 'type: scenarios' - assert_line 'name: crowdsecurity/ssh-bf' - assert_line 'author: crowdsecurity' - assert_line 'path: scenarios/crowdsecurity/ssh-bf.yaml' - assert_line 'installed: false' - refute_line --partial 'Current metrics:' - - # one item, with metrics - rune -0 cscli scenarios inspect crowdsecurity/ssh-bf - assert_line --partial 'Current metrics:' - - # one item, json - rune -0 cscli scenarios inspect crowdsecurity/ssh-bf -o json - rune -0 jq -c '[.type, .name, .author, .path, .installed]' <(output) - assert_json '["scenarios","crowdsecurity/ssh-bf","crowdsecurity","scenarios/crowdsecurity/ssh-bf.yaml",false]' - - # one item, raw - rune -0 cscli scenarios inspect crowdsecurity/ssh-bf -o raw - assert_line 'type: scenarios' - assert_line 'name: crowdsecurity/ssh-bf' - assert_line 'author: crowdsecurity' - assert_line 'path: scenarios/crowdsecurity/ssh-bf.yaml' - assert_line 'installed: false' - refute_line --partial 'Current metrics:' - - # multiple items - rune -0 cscli scenarios inspect crowdsecurity/ssh-bf crowdsecurity/telnet-bf --no-metrics - assert_output --partial 'crowdsecurity/ssh-bf' - assert_output --partial 'crowdsecurity/telnet-bf' - rune -1 grep -c 'Current metrics:' <(output) - assert_output "0" - - # multiple items, with metrics - rune -0 cscli scenarios inspect crowdsecurity/ssh-bf crowdsecurity/telnet-bf - rune -0 grep -c 'Current metrics:' <(output) - assert_output "2" - - # multiple items, json - rune -0 cscli scenarios inspect crowdsecurity/ssh-bf crowdsecurity/telnet-bf -o json - rune -0 jq -sc '[.[] | [.type, .name, .author, .path, .installed]]' <(output) - assert_json '[["scenarios","crowdsecurity/ssh-bf","crowdsecurity","scenarios/crowdsecurity/ssh-bf.yaml",false],["scenarios","crowdsecurity/telnet-bf","crowdsecurity","scenarios/crowdsecurity/telnet-bf.yaml",false]]' - - # multiple items, raw - rune -0 cscli scenarios inspect crowdsecurity/ssh-bf crowdsecurity/telnet-bf -o raw - assert_output --partial 'crowdsecurity/ssh-bf' - assert_output --partial 'crowdsecurity/telnet-bf' - run -1 grep -c 'Current metrics:' <(output) - assert_output "0" -} - -@test "cscli scenarios remove" { - rune -1 cscli scenarios remove - assert_stderr --partial "specify at least one scenario to remove or '--all'" - rune -1 cscli scenarios remove blahblah/blahblah - assert_stderr --partial "can't find 'blahblah/blahblah' in scenarios" - - rune -0 cscli scenarios install crowdsecurity/ssh-bf --download-only - rune -0 cscli scenarios remove crowdsecurity/ssh-bf - assert_stderr --partial "removing crowdsecurity/ssh-bf: not installed -- no need to remove" - - rune -0 cscli scenarios install crowdsecurity/ssh-bf - rune -0 cscli scenarios remove crowdsecurity/ssh-bf - assert_stderr --partial "Removed crowdsecurity/ssh-bf" - - rune -0 cscli scenarios remove crowdsecurity/ssh-bf --purge - assert_stderr --partial 'Removed source file [crowdsecurity/ssh-bf]' - - rune -0 cscli scenarios remove crowdsecurity/ssh-bf - assert_stderr --partial "removing crowdsecurity/ssh-bf: not installed -- no need to remove" - - rune -0 cscli scenarios remove crowdsecurity/ssh-bf --purge --debug - assert_stderr --partial 'removing crowdsecurity/ssh-bf: not downloaded -- no need to remove' - refute_stderr --partial 'Removed source file [crowdsecurity/ssh-bf]' - - # install, then remove, check files - rune -0 cscli scenarios install crowdsecurity/ssh-bf - assert_file_exists "$CONFIG_DIR/scenarios/ssh-bf.yaml" - rune -0 cscli scenarios remove crowdsecurity/ssh-bf - assert_file_not_exists "$CONFIG_DIR/scenarios/ssh-bf.yaml" - - # delete is an alias for remove - rune -0 cscli scenarios install crowdsecurity/ssh-bf - assert_file_exists "$CONFIG_DIR/scenarios/ssh-bf.yaml" - rune -0 cscli scenarios delete crowdsecurity/ssh-bf - assert_file_not_exists "$CONFIG_DIR/scenarios/ssh-bf.yaml" - - # purge - assert_file_exists "$HUB_DIR/scenarios/crowdsecurity/ssh-bf.yaml" - rune -0 cscli scenarios remove crowdsecurity/ssh-bf --purge - assert_file_not_exists "$HUB_DIR/scenarios/crowdsecurity/ssh-bf.yaml" - - rune -0 cscli scenarios install crowdsecurity/ssh-bf crowdsecurity/telnet-bf - - # --all - rune -0 cscli scenarios list -o raw - rune -0 grep -vc 'name,status,version,description' <(output) - assert_output "2" - - rune -0 cscli scenarios remove --all - - rune -0 cscli scenarios list -o raw - rune -1 grep -vc 'name,status,version,description' <(output) - assert_output "0" -} - -@test "cscli scenarios remove --force" { - # remove a scenario that belongs to a collection - rune -0 cscli collections install crowdsecurity/sshd - rune -0 cscli scenarios remove crowdsecurity/ssh-bf - assert_stderr --partial "crowdsecurity/ssh-bf belongs to collections: [crowdsecurity/sshd]" - assert_stderr --partial "Run 'sudo cscli scenarios remove crowdsecurity/ssh-bf --force' if you want to force remove this scenario" -} - -@test "cscli scenarios upgrade" { - rune -1 cscli scenarios upgrade - assert_stderr --partial "specify at least one scenario to upgrade or '--all'" - rune -1 cscli scenarios upgrade blahblah/blahblah - assert_stderr --partial "can't find 'blahblah/blahblah' in scenarios" - rune -0 cscli scenarios remove crowdsecurity/vsftpd-bf --purge - rune -1 cscli scenarios upgrade crowdsecurity/vsftpd-bf - assert_stderr --partial "can't upgrade crowdsecurity/vsftpd-bf: not installed" - rune -0 cscli scenarios install crowdsecurity/vsftpd-bf --download-only - rune -1 cscli scenarios upgrade crowdsecurity/vsftpd-bf - assert_stderr --partial "can't upgrade crowdsecurity/vsftpd-bf: downloaded but not installed" - - # hash of the string "v0.0" - sha256_0_0="dfebecf42784a31aa3d009dbcec0c657154a034b45f49cf22a895373f6dbf63d" - - # add version 0.0 to all scenarios - new_hub=$(jq --arg DIGEST "$sha256_0_0" <"$INDEX_PATH" '.scenarios |= with_entries(.value.versions["0.0"] = {"digest": $DIGEST, "deprecated": false})') - echo "$new_hub" >"$INDEX_PATH" - - rune -0 cscli scenarios install crowdsecurity/ssh-bf - - echo "v0.0" > "$CONFIG_DIR/scenarios/ssh-bf.yaml" - rune -0 cscli scenarios inspect crowdsecurity/ssh-bf -o json - rune -0 jq -e '.local_version=="0.0"' <(output) - - # upgrade - rune -0 cscli scenarios upgrade crowdsecurity/ssh-bf - rune -0 cscli scenarios inspect crowdsecurity/ssh-bf -o json - rune -0 jq -e '.local_version==.version' <(output) - - # taint - echo "dirty" >"$CONFIG_DIR/scenarios/ssh-bf.yaml" - # XXX: should return error - rune -0 cscli scenarios upgrade crowdsecurity/ssh-bf - assert_stderr --partial "crowdsecurity/ssh-bf is tainted, --force to overwrite" - rune -0 cscli scenarios inspect crowdsecurity/ssh-bf -o json - rune -0 jq -e '.local_version=="?"' <(output) - - # force upgrade with taint - rune -0 cscli scenarios upgrade crowdsecurity/ssh-bf --force - rune -0 cscli scenarios inspect crowdsecurity/ssh-bf -o json - rune -0 jq -e '.local_version==.version' <(output) - - # multiple items - rune -0 cscli scenarios install crowdsecurity/telnet-bf - echo "v0.0" >"$CONFIG_DIR/scenarios/ssh-bf.yaml" - echo "v0.0" >"$CONFIG_DIR/scenarios/telnet-bf.yaml" - rune -0 cscli scenarios list -o json - rune -0 jq -e '[.scenarios[].local_version]==["0.0","0.0"]' <(output) - rune -0 cscli scenarios upgrade crowdsecurity/ssh-bf crowdsecurity/telnet-bf - rune -0 cscli scenarios list -o json - rune -0 jq -e 'any(.scenarios[].local_version; .=="0.0") | not' <(output) - - # upgrade all - echo "v0.0" >"$CONFIG_DIR/scenarios/ssh-bf.yaml" - echo "v0.0" >"$CONFIG_DIR/scenarios/telnet-bf.yaml" - rune -0 cscli scenarios list -o json - rune -0 jq -e '[.scenarios[].local_version]==["0.0","0.0"]' <(output) - rune -0 cscli scenarios upgrade --all - rune -0 cscli scenarios list -o json - rune -0 jq -e 'any(.scenarios[].local_version; .=="0.0") | not' <(output) -} diff --git a/test/bats/30_machines.bats b/test/bats/30_machines.bats index d4cce67d0b0..3d73bd096ae 100644 --- a/test/bats/30_machines.bats +++ b/test/bats/30_machines.bats @@ -30,9 +30,8 @@ teardown() { } @test "don't overwrite local credentials by default" { - rune -1 cscli machines add local -a -o json - rune -0 jq -r '.msg' <(stderr) - assert_output --partial 'already exists: please remove it, use "--force" or specify a different file with "-f"' + rune -1 cscli machines add local -a + assert_stderr --partial 'already exists: please remove it, use "--force" or specify a different file with "-f"' rune -0 cscli machines add local -a --force assert_stderr --partial "Machine 'local' successfully added to the local API." } diff --git a/test/bats/80_alerts.bats b/test/bats/80_alerts.bats index 6d84c1a1fce..f01e918925c 100644 --- a/test/bats/80_alerts.bats +++ b/test/bats/80_alerts.bats @@ -89,7 +89,7 @@ teardown() { assert_line --regexp "^ - AS *: *$" assert_line --regexp "^ - Begin *: .*$" assert_line --regexp "^ - End *: .*$" - assert_line --regexp "^ - Active Decisions *:$" + assert_line --regexp "^\| Active Decisions *\|$" assert_line --regexp "^.* ID .* scope:value .* action .* expiration .* created_at .*$" assert_line --regexp "^.* Ip:10.20.30.40 .* ban .*$" diff --git a/test/bats/90_decisions.bats b/test/bats/90_decisions.bats index 8601414db48..3c3ab9987ca 100644 --- a/test/bats/90_decisions.bats +++ b/test/bats/90_decisions.bats @@ -31,11 +31,7 @@ teardown() { @test "'decisions add' requires parameters" { rune -1 cscli decisions add - assert_stderr --partial "missing arguments, a value is required (--ip, --range or --scope and --value)" - - rune -1 cscli decisions add -o json - rune -0 jq -c '[ .level, .msg]' <(stderr | grep "^{") - assert_output '["fatal","missing arguments, a value is required (--ip, --range or --scope and --value)"]' + assert_stderr "Error: missing arguments, a value is required (--ip, --range or --scope and --value)" } @test "cscli decisions list, with and without --machine" { @@ -61,16 +57,13 @@ teardown() { @test "cscli decisions list, incorrect parameters" { rune -1 cscli decisions list --until toto - assert_stderr --partial 'unable to retrieve decisions: performing request: API error: while parsing duration: time: invalid duration \"toto\"' - rune -1 cscli decisions list --until toto -o json - rune -0 jq -c '[.level, .msg]' <(stderr | grep "^{") - assert_output '["fatal","unable to retrieve decisions: performing request: API error: while parsing duration: time: invalid duration \"toto\""]' + assert_stderr 'Error: unable to retrieve decisions: performing request: API error: while parsing duration: time: invalid duration "toto"' } @test "cscli decisions import" { # required input rune -1 cscli decisions import - assert_stderr --partial 'required flag(s) \"input\" not set"' + assert_stderr 'Error: required flag(s) "input" not set' # unsupported format rune -1 cscli decisions import -i - <<<'value\n5.6.7.8' --format xml @@ -172,7 +165,7 @@ teardown() { EOT assert_stderr --partial 'Parsing values' assert_stderr --partial 'Imported 1 decisions' - assert_file_contains "$LOGFILE" "invalid addr/range 'whatever': invalid address" + assert_file_contains "$LOGFILE" "invalid addr/range 'whatever': invalid ip address 'whatever'" rune -0 cscli decisions list -a -o json assert_json '[]' @@ -189,7 +182,7 @@ teardown() { EOT assert_stderr --partial 'Parsing values' assert_stderr --partial 'Imported 3 decisions' - assert_file_contains "$LOGFILE" "invalid addr/range 'bad-apple': invalid address" + assert_file_contains "$LOGFILE" "invalid addr/range 'bad-apple': invalid ip address 'bad-apple'" rune -0 cscli decisions list -a -o json rune -0 jq -r '.[0].decisions | length' <(output) diff --git a/test/bats/crowdsec-acquisition.bats b/test/bats/crowdsec-acquisition.bats new file mode 100644 index 00000000000..1a92624b4c4 --- /dev/null +++ b/test/bats/crowdsec-acquisition.bats @@ -0,0 +1,78 @@ +#!/usr/bin/env bats + +set -u + +setup_file() { + load "../lib/setup_file.sh" +} + +teardown_file() { + load "../lib/teardown_file.sh" +} + +setup() { + load "../lib/setup.sh" + load "../lib/bats-file/load.bash" + ./instance-data load + ACQUIS_DIR=$(config_get '.crowdsec_service.acquisition_dir') + mkdir -p "$ACQUIS_DIR" +} + +teardown() { + ./instance-crowdsec stop +} + +#---------- + +@test "malformed acqusition file" { + cat >"$ACQUIS_DIR/file.yaml" <<-EOT + filename: + - /path/to/file.log + labels: + type: syslog + EOT + + rune -1 "$CROWDSEC" -t + assert_stderr --partial "crowdsec init: while loading acquisition config: while configuring datasource of type file from $ACQUIS_DIR/file.yaml (position 0): cannot parse FileAcquisition configuration: yaml: unmarshal errors:" +} + +@test "datasource type detection" { + config_set '.common.log_level="debug" | .common.log_media="stdout"' + + # for backward compatibility, a missing source type is not a problem if it can be detected by the presence of other fields + + cat >"$ACQUIS_DIR/file.yaml" <<-EOT + filename: /path/to/file.log + labels: + type: syslog + --- + filenames: + - /path/to/file.log + labels: + type: syslog + EOT + + cat >"$ACQUIS_DIR"/journal.yaml <<-EOT + journalctl_filter: + - "_SYSTEMD_UNIT=ssh.service" + labels: + type: syslog + EOT + + # However, a wrong source type will raise a brow. + # This is currently not a fatal error because it has been tolerated in the past. + + cat >"$ACQUIS_DIR"/bad.yaml <<-EOT + source: docker + journalctl_filter: + - "_SYSTEMD_UNIT=ssh.service" + labels: + type: syslog + EOT + + rune -0 "$CROWDSEC" -t + assert_stderr --partial "datasource type missing in $ACQUIS_DIR/file.yaml (position 0): detected 'source=file'" + assert_stderr --partial "datasource type missing in $ACQUIS_DIR/file.yaml (position 1): detected 'source=file'" + assert_stderr --partial "datasource type missing in $ACQUIS_DIR/journal.yaml (position 0): detected 'source=journalctl'" + assert_stderr --partial "datasource type mismatch in $ACQUIS_DIR/bad.yaml (position 0): found 'docker' but should probably be 'journalctl'" +} diff --git a/test/bats/cscli-hubtype-inspect.bats b/test/bats/cscli-hubtype-inspect.bats new file mode 100644 index 00000000000..9c96aadb3ad --- /dev/null +++ b/test/bats/cscli-hubtype-inspect.bats @@ -0,0 +1,93 @@ +#!/usr/bin/env bats + +# Generic tests for the command "cscli inspect". +# +# Behavior that is specific to a hubtype should be tested in a separate file. + +set -u + +setup_file() { + load "../lib/setup_file.sh" + ./instance-data load + HUB_DIR=$(config_get '.config_paths.hub_dir') + export HUB_DIR + INDEX_PATH=$(config_get '.config_paths.index_path') + export INDEX_PATH + CONFIG_DIR=$(config_get '.config_paths.config_dir') + export CONFIG_DIR +} + +teardown_file() { + load "../lib/teardown_file.sh" +} + +setup() { + load "../lib/setup.sh" + load "../lib/bats-file/load.bash" + ./instance-data load +} + +teardown() { + ./instance-crowdsec stop +} + +#---------- + +@test "cscli parsers inspect" { + rune -1 cscli parsers inspect + assert_stderr --partial 'requires at least 1 arg(s), only received 0' + # required for metrics + ./instance-crowdsec start + + rune -1 cscli parsers inspect blahblah/blahblah + assert_stderr --partial "can't find 'blahblah/blahblah' in parsers" + + # one item + rune -0 cscli parsers inspect crowdsecurity/sshd-logs --no-metrics + assert_line 'type: parsers' + assert_line 'name: crowdsecurity/sshd-logs' + assert_line 'path: parsers/s01-parse/crowdsecurity/sshd-logs.yaml' + assert_line 'installed: false' + refute_line --partial 'Current metrics:' + + # one item, with metrics + rune -0 cscli parsers inspect crowdsecurity/sshd-logs + assert_line --partial 'Current metrics:' + + # one item, json + rune -0 cscli parsers inspect crowdsecurity/sshd-logs -o json + rune -0 jq -c '[.type, .name, .path, .installed]' <(output) + assert_json '["parsers","crowdsecurity/sshd-logs","parsers/s01-parse/crowdsecurity/sshd-logs.yaml",false]' + + # one item, raw + rune -0 cscli parsers inspect crowdsecurity/sshd-logs -o raw + assert_line 'type: parsers' + assert_line 'name: crowdsecurity/sshd-logs' + assert_line 'path: parsers/s01-parse/crowdsecurity/sshd-logs.yaml' + assert_line 'installed: false' + refute_line --partial 'Current metrics:' + + # multiple items + rune -0 cscli parsers inspect crowdsecurity/sshd-logs crowdsecurity/whitelists --no-metrics + assert_output --partial 'crowdsecurity/sshd-logs' + assert_output --partial 'crowdsecurity/whitelists' + rune -1 grep -c 'Current metrics:' <(output) + assert_output "0" + + # multiple items, with metrics + rune -0 cscli parsers inspect crowdsecurity/sshd-logs crowdsecurity/whitelists + rune -0 grep -c 'Current metrics:' <(output) + assert_output "2" + + # multiple items, json + rune -0 cscli parsers inspect crowdsecurity/sshd-logs crowdsecurity/whitelists -o json + rune -0 jq -sc '[.[] | [.type, .name, .path, .installed]]' <(output) + assert_json '[["parsers","crowdsecurity/sshd-logs","parsers/s01-parse/crowdsecurity/sshd-logs.yaml",false],["parsers","crowdsecurity/whitelists","parsers/s02-enrich/crowdsecurity/whitelists.yaml",false]]' + + # multiple items, raw + rune -0 cscli parsers inspect crowdsecurity/sshd-logs crowdsecurity/whitelists -o raw + assert_output --partial 'crowdsecurity/sshd-logs' + assert_output --partial 'crowdsecurity/whitelists' + rune -1 grep -c 'Current metrics:' <(output) + assert_output "0" +} diff --git a/test/bats/cscli-hubtype-install.bats b/test/bats/cscli-hubtype-install.bats new file mode 100644 index 00000000000..58c16dd968d --- /dev/null +++ b/test/bats/cscli-hubtype-install.bats @@ -0,0 +1,301 @@ +#!/usr/bin/env bats + +# Generic tests for the command "cscli install". +# +# Behavior that is specific to a hubtype should be tested in a separate file. + +set -u + +setup_file() { + load "../lib/setup_file.sh" + ./instance-data load + HUB_DIR=$(config_get '.config_paths.hub_dir') + export HUB_DIR +# INDEX_PATH=$(config_get '.config_paths.index_path') +# export INDEX_PATH + CONFIG_DIR=$(config_get '.config_paths.config_dir') + export CONFIG_DIR +} + +teardown_file() { + load "../lib/teardown_file.sh" +} + +setup() { + load "../lib/setup.sh" + load "../lib/bats-file/load.bash" + ./instance-data load + # make sure the hub is empty + hub_purge_all +} + +teardown() { + # most tests don't need the service, but we ensure it's stopped + ./instance-crowdsec stop +} + +#---------- + +@test "cscli install (no argument)" { + rune -1 cscli parsers install + refute_output + assert_stderr --partial 'requires at least 1 arg(s), only received 0' +} + +@test "cscli install (aliased)" { + rune -1 cscli parser install + refute_output + assert_stderr --partial 'requires at least 1 arg(s), only received 0' +} + +@test "install an item (non-existent)" { + rune -1 cscli parsers install foo/bar + assert_stderr --partial "can't find 'foo/bar' in parsers" +} + +@test "install an item (dry run)" { + rune -0 cscli parsers install crowdsecurity/whitelists --dry-run + assert_output - --regexp <<-EOT + Action plan: + 📥 download + parsers: crowdsecurity/whitelists \([0-9]+.[0-9]+\) + ✅ enable + parsers: crowdsecurity/whitelists + + Dry run, no action taken. + EOT + rune -0 cscli parsers inspect crowdsecurity/whitelists --no-metrics -o json + rune -0 jq -e '.installed==false' <(output) + assert_file_not_exists "$CONFIG_DIR/parsers/s02-enrich/whitelists.yaml" +} + +@test "install an item (dry-run, de-duplicate commands)" { + rune -0 cscli parsers install crowdsecurity/whitelists crowdsecurity/whitelists --dry-run --output raw + assert_output - --regexp <<-EOT + Action plan: + 📥 download parsers:crowdsecurity/whitelists \([0-9]+.[0-9]+\) + ✅ enable parsers:crowdsecurity/whitelists + + Dry run, no action taken. + EOT + refute_stderr +} + +@test "install an item" { + rune -0 cscli parsers install crowdsecurity/whitelists + assert_output - <<-EOT + downloading parsers:crowdsecurity/whitelists + enabling parsers:crowdsecurity/whitelists + + $RELOAD_MESSAGE + EOT + refute_stderr + + rune -0 cscli parsers inspect crowdsecurity/whitelists --no-metrics -o json + rune -0 jq -e '.installed==true' <(output) + assert_file_exists "$CONFIG_DIR/parsers/s02-enrich/whitelists.yaml" +} + +@test "install an item (autocorrect)" { + rune -1 cscli parsers install crowdsecurity/whatelists + assert_stderr --partial "can't find 'crowdsecurity/whatelists' in parsers, did you mean 'crowdsecurity/whitelists'?" + refute_output +} + +@test "install an item (download only)" { + assert_file_not_exists "$HUB_DIR/parsers/s02-enrich/crowdsecurity/whitelists.yaml" + rune -0 cscli parsers install crowdsecurity/whitelists --download-only + assert_output - <<-EOT + downloading parsers:crowdsecurity/whitelists + + $RELOAD_MESSAGE + EOT + refute_stderr + + rune -0 cscli parsers inspect crowdsecurity/whitelists --no-metrics -o json + rune -0 jq -e '.installed==false' <(output) + assert_file_exists "$HUB_DIR/parsers/s02-enrich/crowdsecurity/whitelists.yaml" +} + +@test "install an item (already installed)" { + rune -0 cscli parsers install crowdsecurity/whitelists + rune -0 cscli parsers install crowdsecurity/whitelists --dry-run + assert_output "Nothing to do." + refute_stderr + rune -0 cscli parsers install crowdsecurity/whitelists + assert_output "Nothing to do." + refute_stderr +} + +@test "install an item (force is no-op if not tainted)" { + rune -0 cscli parsers install crowdsecurity/whitelists + rune -0 cscli parsers install crowdsecurity/whitelists + assert_output "Nothing to do." + refute_stderr + rune -0 cscli parsers install crowdsecurity/whitelists --force + assert_output "Nothing to do." + refute_stderr +} + +@test "install an item (tainted, requires --force)" { + rune -0 cscli parsers install crowdsecurity/whitelists + echo "dirty" >"$CONFIG_DIR/parsers/s02-enrich/whitelists.yaml" + + rune -0 cscli parsers install crowdsecurity/whitelists --dry-run + assert_output - --stderr <<-EOT + WARN parsers:crowdsecurity/whitelists is tainted, use '--force' to overwrite + Nothing to do. + EOT + refute_stderr + + # XXX should this fail with status 1 instead? + rune -0 cscli parsers install crowdsecurity/whitelists + assert_output - <<-EOT + WARN parsers:crowdsecurity/whitelists is tainted, use '--force' to overwrite + Nothing to do. + EOT + refute_stderr + + rune -0 cscli parsers install crowdsecurity/whitelists --force + assert_output - <<-EOT + downloading parsers:crowdsecurity/whitelists + + $RELOAD_MESSAGE + EOT + refute_stderr + rune -0 cscli parsers inspect crowdsecurity/whitelists --no-metrics -o json + rune -0 jq -e '.installed==true' <(output) +} + +@test "install multiple items" { + rune -0 cscli parsers install crowdsecurity/pgsql-logs crowdsecurity/postfix-logs + rune -0 cscli parsers inspect crowdsecurity/pgsql-logs --no-metrics -o json + rune -0 jq -e '.installed==true' <(output) + rune -0 cscli parsers inspect crowdsecurity/postfix-logs --no-metrics -o json + rune -0 jq -e '.installed==true' <(output) +} + +@test "install multiple items (some already installed)" { + rune -0 cscli parsers install crowdsecurity/pgsql-logs + rune -0 cscli parsers install crowdsecurity/pgsql-logs crowdsecurity/postfix-logs --dry-run + assert_output - --regexp <<-EOT + Action plan: + 📥 download + parsers: crowdsecurity/postfix-logs \([0-9]+.[0-9]+\) + ✅ enable + parsers: crowdsecurity/postfix-logs + + Dry run, no action taken. + EOT + refute_stderr +} + +@test "install one or multiple items (ignore errors)" { + rune -0 cscli parsers install foo/bar --ignore + assert_stderr --partial "can't find 'foo/bar' in parsers" + assert_output "Nothing to do." + + rune -0 cscli parsers install crowdsecurity/whitelists + echo "dirty" >"$CONFIG_DIR/parsers/s02-enrich/whitelists.yaml" + # XXX: this is not testing '--ignore' anymore; TODO find a better error to ignore + # and maybe re-evaluate the --ignore flag + rune -0 cscli parsers install crowdsecurity/whitelists --ignore + assert_output - <<-EOT + WARN parsers:crowdsecurity/whitelists is tainted, use '--force' to overwrite + Nothing to do. + EOT + refute_stderr + + # error on one item, should still install the others + rune -0 cscli parsers install crowdsecurity/whitelists crowdsecurity/pgsql-logs --ignore + refute_stderr + assert_output - <<-EOT + WARN parsers:crowdsecurity/whitelists is tainted, use '--force' to overwrite + downloading parsers:crowdsecurity/pgsql-logs + enabling parsers:crowdsecurity/pgsql-logs + + $RELOAD_MESSAGE + EOT + rune -0 cscli parsers inspect crowdsecurity/pgsql-logs --no-metrics -o json + rune -0 jq -e '.installed==true' <(output) +} + +@test "override part of a collection with local items" { + # A collection will use a local item to fulfil a dependency provided it has + # the correct name field. + + mkdir -p "$CONFIG_DIR/parsers/s01-parse" + echo "name: crowdsecurity/sshd-logs" > "$CONFIG_DIR/parsers/s01-parse/sshd-logs.yaml" + rune -0 cscli parsers list -o json + rune -0 jq -c '.parsers[] | [.name,.status]' <(output) + assert_json '["crowdsecurity/sshd-logs","enabled,local"]' + + # attempt to install from hub + rune -0 cscli parsers install crowdsecurity/sshd-logs + assert_line 'parsers:crowdsecurity/sshd-logs - not downloading local item' + rune -0 cscli parsers list -o json + rune -0 jq -c '.parsers[] | [.name,.status]' <(output) + assert_json '["crowdsecurity/sshd-logs","enabled,local"]' + + # attempt to install from a collection + rune -0 cscli collections install crowdsecurity/sshd + assert_line 'parsers:crowdsecurity/sshd-logs - not downloading local item' + + # verify it installed the rest of the collection + assert_line 'enabling contexts:crowdsecurity/bf_base' + assert_line 'enabling collections:crowdsecurity/sshd' + + # remove them + rune -0 cscli collections delete crowdsecurity/sshd --force --purge + rune -0 rm "$CONFIG_DIR/parsers/s01-parse/sshd-logs.yaml" + + # do the same with a different file name + echo "name: crowdsecurity/sshd-logs" > "$CONFIG_DIR/parsers/s01-parse/something.yaml" + rune -0 cscli parsers list -o json + rune -0 jq -c '.parsers[] | [.name,.status]' <(output) + assert_json '["crowdsecurity/sshd-logs","enabled,local"]' + + # attempt to install from hub + rune -0 cscli parsers install crowdsecurity/sshd-logs + assert_line 'parsers:crowdsecurity/sshd-logs - not downloading local item' + + # attempt to install from a collection + rune -0 cscli collections install crowdsecurity/sshd + assert_line 'parsers:crowdsecurity/sshd-logs - not downloading local item' + + # verify it installed the rest of the collection + assert_line 'enabling contexts:crowdsecurity/bf_base' + assert_line 'enabling collections:crowdsecurity/sshd' +} + +@test "a local item can override an official one, if it's not installed" { + mkdir -p "$CONFIG_DIR/parsers/s02-enrich" + rune -0 cscli parsers install crowdsecurity/whitelists --download-only + echo "name: crowdsecurity/whitelists" > "$CONFIG_DIR/parsers/s02-enrich/hi.yaml" + # no warning + rune -0 cscli parsers list + refute_stderr + rune -0 cscli parsers list -o json + rune -0 jq -e '.installed,.local==true,true' <(output) +} + +@test "conflicting item names: local and non local - the local one has priority" { + mkdir -p "$CONFIG_DIR/parsers/s02-enrich" + rune -0 cscli parsers install crowdsecurity/whitelists + echo "name: crowdsecurity/whitelists" > "$CONFIG_DIR/parsers/s02-enrich/hi.yaml" + rune -0 cscli parsers list -o json + rune -0 jq -e '.installed,.local==true,true' <(output) + rune -0 cscli parsers list + assert_stderr --partial "multiple parsers named crowdsecurity/whitelists: ignoring $CONFIG_DIR/parsers/s02-enrich/whitelists.yaml" +} + +@test "conflicting item names: both local, the last one wins" { + mkdir -p "$CONFIG_DIR/parsers/s02-enrich" + echo "name: crowdsecurity/whitelists" > "$CONFIG_DIR/parsers/s02-enrich/one.yaml" + echo "name: crowdsecurity/whitelists" > "$CONFIG_DIR/parsers/s02-enrich/two.yaml" + rune -0 cscli parsers inspect crowdsecurity/whitelists -o json + rune -0 jq -r '.local_path' <(output) + assert_output --partial "/parsers/s02-enrich/two.yaml" + rune -0 cscli parsers list + assert_stderr --partial "multiple parsers named crowdsecurity/whitelists: ignoring $CONFIG_DIR/parsers/s02-enrich/one.yaml" +} diff --git a/test/bats/cscli-hubtype-list.bats b/test/bats/cscli-hubtype-list.bats new file mode 100644 index 00000000000..14113650c74 --- /dev/null +++ b/test/bats/cscli-hubtype-list.bats @@ -0,0 +1,130 @@ +#!/usr/bin/env bats + +set -u + +setup_file() { + load "../lib/setup_file.sh" + ./instance-data load + HUB_DIR=$(config_get '.config_paths.hub_dir') + export HUB_DIR + INDEX_PATH=$(config_get '.config_paths.index_path') + export INDEX_PATH + CONFIG_DIR=$(config_get '.config_paths.config_dir') + export CONFIG_DIR +} + +teardown_file() { + load "../lib/teardown_file.sh" +} + +setup() { + load "../lib/setup.sh" + load "../lib/bats-file/load.bash" + ./instance-data load +} + +teardown() { + ./instance-crowdsec stop +} + +#---------- + +@test "cscli parsers list" { + hub_purge_all + + # no items + rune -0 cscli parsers list + assert_output --partial "PARSERS" + rune -0 cscli parsers list -o json + assert_json '{parsers:[]}' + rune -0 cscli parsers list -o raw + assert_output 'name,status,version,description' + + # some items + rune -0 cscli parsers install crowdsecurity/whitelists crowdsecurity/windows-auth + + rune -0 cscli parsers list + assert_output --partial crowdsecurity/whitelists + assert_output --partial crowdsecurity/windows-auth + rune -0 grep -c enabled <(output) + assert_output "2" + + rune -0 cscli parsers list -o json + assert_output --partial crowdsecurity/whitelists + assert_output --partial crowdsecurity/windows-auth + rune -0 jq '.parsers | length' <(output) + assert_output "2" + + rune -0 cscli parsers list -o raw + assert_output --partial crowdsecurity/whitelists + assert_output --partial crowdsecurity/windows-auth + rune -0 grep -vc 'name,status,version,description' <(output) + assert_output "2" +} + +@test "cscli parsers list -a" { + expected=$(jq <"$INDEX_PATH" -r '.parsers | length') + + rune -0 cscli parsers list -a + rune -0 grep -c disabled <(output) + assert_output "$expected" + + rune -0 cscli parsers list -o json -a + rune -0 jq '.parsers | length' <(output) + assert_output "$expected" + + rune -0 cscli parsers list -o raw -a + rune -0 grep -vc 'name,status,version,description' <(output) + assert_output "$expected" + + # the list should be the same in all formats, and sorted (not case sensitive) + + list_raw=$(cscli parsers list -o raw -a | tail -n +2 | cut -d, -f1) + list_human=$(cscli parsers list -o human -a | tail -n +6 | head -n -1 | cut -d' ' -f2) + list_json=$(cscli parsers list -o json -a | jq -r '.parsers[].name') + + # use python to sort because it handles "_" like go + rune -0 python3 -c 'import sys; print("".join(sorted(sys.stdin.readlines(), key=str.casefold)), end="")' <<<"$list_raw" + assert_output "$list_raw" + + assert_equal "$list_raw" "$list_json" + assert_equal "$list_raw" "$list_human" +} + +@test "cscli parsers list [parser]..." { + # non-existent + rune -1 cscli parsers install foo/bar + assert_stderr --partial "can't find 'foo/bar' in parsers" + + # not installed + rune -0 cscli parsers list crowdsecurity/whitelists + assert_output --regexp 'crowdsecurity/whitelists.*disabled' + + # install two items + rune -0 cscli parsers install crowdsecurity/whitelists crowdsecurity/windows-auth + + # list an installed item + rune -0 cscli parsers list crowdsecurity/whitelists + assert_output --regexp "crowdsecurity/whitelists.*enabled" + refute_output --partial "crowdsecurity/windows-auth" + + # list multiple installed and non installed items + rune -0 cscli parsers list crowdsecurity/whitelists crowdsecurity/windows-auth crowdsecurity/traefik-logs + assert_output --partial "crowdsecurity/whitelists" + assert_output --partial "crowdsecurity/windows-auth" + assert_output --partial "crowdsecurity/traefik-logs" + + rune -0 cscli parsers list crowdsecurity/whitelists -o json + rune -0 jq '.parsers | length' <(output) + assert_output "1" + rune -0 cscli parsers list crowdsecurity/whitelists crowdsecurity/windows-auth crowdsecurity/traefik-logs -o json + rune -0 jq '.parsers | length' <(output) + assert_output "3" + + rune -0 cscli parsers list crowdsecurity/whitelists -o raw + rune -0 grep -vc 'name,status,version,description' <(output) + assert_output "1" + rune -0 cscli parsers list crowdsecurity/whitelists crowdsecurity/windows-auth crowdsecurity/traefik-logs -o raw + rune -0 grep -vc 'name,status,version,description' <(output) + assert_output "3" +} diff --git a/test/bats/cscli-hubtype-remove.bats b/test/bats/cscli-hubtype-remove.bats new file mode 100644 index 00000000000..32db8efe788 --- /dev/null +++ b/test/bats/cscli-hubtype-remove.bats @@ -0,0 +1,245 @@ +#!/usr/bin/env bats + +# Generic tests for the command "cscli remove". +# +# Behavior that is specific to a hubtype should be tested in a separate file. + + +set -u + +setup_file() { + load "../lib/setup_file.sh" + ./instance-data load + HUB_DIR=$(config_get '.config_paths.hub_dir') + export HUB_DIR +# INDEX_PATH=$(config_get '.config_paths.index_path') +# export INDEX_PATH + CONFIG_DIR=$(config_get '.config_paths.config_dir') + export CONFIG_DIR +} + +teardown_file() { + load "../lib/teardown_file.sh" +} + +setup() { + load "../lib/setup.sh" + load "../lib/bats-file/load.bash" + ./instance-data load + # make sure the hub is empty + hub_purge_all +} + +teardown() { + # most tests don't need the service, but we ensure it's stopped + ./instance-crowdsec stop +} + +#---------- + +@test "cscli remove (no argument)" { + rune -1 cscli parsers remove + refute_output + assert_stderr --partial "specify at least one parser to remove or '--all'" +} + +@test "cscli remove (aliased)" { + rune -1 cscli parser remove + refute_output + assert_stderr --partial "specify at least one parser to remove or '--all'" +} + +@test "cscli delete (alias of remove)" { + rune -1 cscli parsers delete + refute_output + assert_stderr --partial "specify at least one parser to remove or '--all'" +} + +@test "remove an item (non-existent)" { + rune -1 cscli parsers remove foo/bar + refute_output + assert_stderr --partial "can't find 'foo/bar' in parsers" +} + +@test "remove an item (not downloaded)" { + rune -0 cscli parsers inspect crowdsecurity/whitelists --no-metrics -o json + rune -0 jq -e '.downloaded==false' <(output) + + rune -0 cscli parsers remove crowdsecurity/whitelists --dry-run + assert_output "Nothing to do." + refute_stderr + rune -0 cscli parsers remove crowdsecurity/whitelists + assert_output "Nothing to do." + refute_stderr + rune -0 cscli parsers remove crowdsecurity/whitelists --force + assert_output "Nothing to do." + refute_stderr + rune -0 cscli parsers remove crowdsecurity/whitelists --purge + assert_output "Nothing to do." + refute_stderr +} + +@test "remove an item (not installed)" { + rune -0 cscli parsers install crowdsecurity/whitelists --download-only + rune -0 cscli parsers inspect crowdsecurity/whitelists --no-metrics -o json + rune -0 jq -e '.installed==false' <(output) + + rune -0 cscli parsers remove crowdsecurity/whitelists --dry-run + assert_output "Nothing to do." + refute_stderr + rune -0 cscli parsers remove crowdsecurity/whitelists + assert_output "Nothing to do." + refute_stderr + rune -0 cscli parsers remove crowdsecurity/whitelists --force + assert_output "Nothing to do." + refute_stderr + rune -0 cscli parsers remove crowdsecurity/whitelists --purge + assert_output --partial "purging parsers:crowdsecurity/whitelists" +} + +@test "remove an item (dry run)" { + rune -0 cscli parsers install crowdsecurity/whitelists + assert_file_exists "$CONFIG_DIR/parsers/s02-enrich/whitelists.yaml" + + rune -0 cscli parsers remove crowdsecurity/whitelists --dry-run + assert_output - --regexp <<-EOT + Action plan: + ❌ disable + parsers: crowdsecurity/whitelists + + Dry run, no action taken. + EOT + refute_stderr + rune -0 cscli parsers inspect crowdsecurity/whitelists --no-metrics -o json + rune -0 jq -e '.installed==true' <(output) + assert_file_exists "$CONFIG_DIR/parsers/s02-enrich/whitelists.yaml" +} + +@test "remove an item" { + rune -0 cscli parsers install crowdsecurity/whitelists + assert_file_exists "$CONFIG_DIR/parsers/s02-enrich/whitelists.yaml" + + rune -0 cscli parsers remove crowdsecurity/whitelists + assert_output - <<-EOT + disabling parsers:crowdsecurity/whitelists + + $RELOAD_MESSAGE + EOT + refute_stderr + + rune -0 cscli parsers inspect crowdsecurity/whitelists --no-metrics -o json + rune -0 jq -e '.installed==false' <(output) + assert_file_not_exists "$CONFIG_DIR/parsers/s02-enrich/whitelists.yaml" + assert_file_exists "$HUB_DIR/parsers/s02-enrich/crowdsecurity/whitelists.yaml" +} + +@test "remove an item (purge)" { + rune -0 cscli parsers install crowdsecurity/whitelists + assert_file_exists "$CONFIG_DIR/parsers/s02-enrich/whitelists.yaml" + + rune -0 cscli parsers remove crowdsecurity/whitelists --purge + assert_output - <<-EOT + disabling parsers:crowdsecurity/whitelists + purging parsers:crowdsecurity/whitelists + + $RELOAD_MESSAGE + EOT + refute_stderr + + rune -0 cscli parsers inspect crowdsecurity/whitelists --no-metrics -o json + rune -0 jq -e '.downloaded==false' <(output) + assert_file_not_exists "$CONFIG_DIR/parsers/s02-enrich/whitelists.yaml" + assert_file_not_exists "$HUB_DIR/parsers/s02-enrich/crowdsecurity/whitelists.yaml" +} + +@test "remove multiple items" { + rune -0 cscli parsers install crowdsecurity/whitelists crowdsecurity/windows-auth + rune -0 cscli parsers remove crowdsecurity/whitelists crowdsecurity/windows-auth --dry-run + assert_output - --regexp <<-EOT + Action plan: + ❌ disable + parsers: crowdsecurity/whitelists, crowdsecurity/windows-auth + + Dry run, no action taken. + EOT + refute_stderr + + rune -0 cscli parsers remove crowdsecurity/whitelists crowdsecurity/windows-auth + rune -0 cscli parsers inspect crowdsecurity/whitelists --no-metrics -o json + rune -0 jq -e '.installed==false' <(output) + rune -0 cscli parsers inspect crowdsecurity/windows-auth --no-metrics -o json + rune -0 jq -e '.installed==false' <(output) +} + +@test "remove all items of a same type" { + rune -0 cscli parsers install crowdsecurity/whitelists crowdsecurity/windows-auth + + rune -1 cscli parsers remove crowdsecurity/whitelists --all + assert_stderr "Error: can't specify items and '--all' at the same time" + + rune -0 cscli parsers remove --all --dry-run + assert_output - --regexp <<-EOT + Action plan: + ❌ disable + parsers: crowdsecurity/whitelists, crowdsecurity/windows-auth + + Dry run, no action taken. + EOT + refute_stderr + + rune -0 cscli parsers remove --all + rune -0 cscli parsers inspect crowdsecurity/whitelists --no-metrics -o json + rune -0 jq -e '.installed==false' <(output) + rune -0 cscli parsers inspect crowdsecurity/windows-auth --no-metrics -o json + rune -0 jq -e '.installed==false' <(output) +} + +@test "remove an item (tainted, requires --force)" { + rune -0 cscli parsers install crowdsecurity/whitelists + echo "dirty" >"$CONFIG_DIR/parsers/s02-enrich/whitelists.yaml" + + rune -1 cscli parsers remove crowdsecurity/whitelists --dry-run + assert_stderr --partial "crowdsecurity/whitelists is tainted, use '--force' to remove" + refute_output + + rune -1 cscli parsers remove crowdsecurity/whitelists + assert_stderr --partial "crowdsecurity/whitelists is tainted, use '--force' to remove" + refute_output + + rune -0 cscli parsers remove crowdsecurity/whitelists --force + assert_output - <<-EOT + disabling parsers:crowdsecurity/whitelists + + $RELOAD_MESSAGE + EOT + refute_stderr + rune -0 cscli parsers inspect crowdsecurity/whitelists --no-metrics -o json + rune -0 jq -e '.installed==false' <(output) + assert_file_not_exists "$CONFIG_DIR/parsers/s02-enrich/crowdsecurity/whitelists.yaml" +} + +@test "remove an item that belongs to a collection (requires --force)" { + rune -0 cscli collections install crowdsecurity/sshd + # XXX: should exit with 1? + rune -0 cscli parsers remove crowdsecurity/sshd-logs + assert_output "Nothing to do." + assert_stderr --partial "crowdsecurity/sshd-logs belongs to collections: [crowdsecurity/sshd]" + assert_stderr --partial "Run 'sudo cscli parsers remove crowdsecurity/sshd-logs --force' if you want to force remove this parser" + assert_file_exists "$CONFIG_DIR/parsers/s01-parse/sshd-logs.yaml" + + rune -0 cscli parsers remove crowdsecurity/sshd-logs --force + assert_output - <<-EOT + disabling parsers:crowdsecurity/sshd-logs + + $RELOAD_MESSAGE + EOT + refute_stderr + assert_file_not_exists "$CONFIG_DIR/parsers/s01-parse/sshd-logs.yaml" +} + +@test "remove an item (autocomplete)" { + rune -0 cscli parsers install crowdsecurity/whitelists + rune -0 cscli __complete parsers remove crowd + assert_stderr --partial '[Debug] parsers: [crowdsecurity/whitelists]' + assert_output --partial 'crowdsecurity/whitelists' +} diff --git a/test/bats/cscli-hubtype-upgrade.bats b/test/bats/cscli-hubtype-upgrade.bats new file mode 100644 index 00000000000..4244e611cf6 --- /dev/null +++ b/test/bats/cscli-hubtype-upgrade.bats @@ -0,0 +1,253 @@ +#!/usr/bin/env bats + +# Generic tests for the upgrade of hub items and data files. +# +# Commands under test: +# cscli upgrade +# +# This file should test behavior that can be applied to all types. + +set -u + +setup_file() { + load "../lib/setup_file.sh" + ./instance-data load + HUB_DIR=$(config_get '.config_paths.hub_dir') + export HUB_DIR + INDEX_PATH=$(config_get '.config_paths.index_path') + export INDEX_PATH + CONFIG_DIR=$(config_get '.config_paths.config_dir') + export CONFIG_DIR +} + +teardown_file() { + load "../lib/teardown_file.sh" +} + +setup() { + load "../lib/setup.sh" + load "../lib/bats-file/load.bash" + ./instance-data load + # make sure the hub is empty + hub_purge_all +} + +teardown() { + # most tests don't need the service, but we ensure it's stopped + ./instance-crowdsec stop +} + +hub_inject_v0() { + # add a version 0.0 to all parsers + + # hash of the string "v0.0" + sha256_0_0="daa1832414a685d69269e0ae15024b908f4602db45f9900e9c6e7f204af207c0" + + new_hub=$(jq --arg DIGEST "$sha256_0_0" <"$INDEX_PATH" '.parsers |= with_entries(.value.versions["0.0"] = {"digest": $DIGEST, "deprecated": false})') + echo "$new_hub" >"$INDEX_PATH" +} + +install_v0() { + local hubtype=$1 + shift + local item_name=$1 + shift + + cscli "$hubtype" install "$item_name" + printf "%s" "v0.0" > "$(jq -r '.local_path' <(cscli "$hubtype" inspect "$item_name" --no-metrics -o json))" +} + +#---------- + +@test "cscli upgrade (no argument)" { + rune -1 cscli parsers upgrade + refute_output + assert_stderr --partial "specify at least one parser to upgrade or '--all'" +} + +@test "cscli upgrade (aliased)" { + rune -1 cscli parser upgrade + refute_output + assert_stderr --partial "specify at least one parser to upgrade or '--all'" +} + +@test "upgrade an item (non-existent)" { + rune -1 cscli parsers upgrade foo/bar + assert_stderr --partial "can't find 'foo/bar' in parsers" +} + +@test "upgrade an item (non installed)" { + rune -0 cscli parsers upgrade crowdsecurity/whitelists + assert_output - <<-EOT + downloading parsers:crowdsecurity/whitelists + + $RELOAD_MESSAGE + EOT + refute_stderr + + rune -0 cscli parsers install crowdsecurity/whitelists --download-only + rune -0 cscli parsers upgrade crowdsecurity/whitelists + assert_output 'Nothing to do.' + refute_stderr +} + +@test "upgrade an item (up-to-date)" { + rune -0 cscli parsers install crowdsecurity/whitelists + rune -0 cscli parsers upgrade crowdsecurity/whitelists --dry-run + assert_output 'Nothing to do.' + rune -0 cscli parsers upgrade crowdsecurity/whitelists + assert_output 'Nothing to do.' +} + +@test "upgrade an item (dry run)" { + hub_inject_v0 + install_v0 parsers crowdsecurity/whitelists + latest=$(get_latest_version parsers crowdsecurity/whitelists) + + rune -0 cscli parsers upgrade crowdsecurity/whitelists --dry-run + assert_output - <<-EOT + Action plan: + 📥 download + parsers: crowdsecurity/whitelists (0.0 -> $latest) + + Dry run, no action taken. + EOT + refute_stderr +} + +get_latest_version() { + local hubtype=$1 + shift + local item_name=$1 + shift + + cscli "$hubtype" inspect "$item_name" -o json | jq -r '.version' +} + +@test "upgrade an item" { + hub_inject_v0 + install_v0 parsers crowdsecurity/whitelists + + rune -0 cscli parsers inspect crowdsecurity/whitelists -o json + rune -0 jq -e '.local_version=="0.0"' <(output) + + rune -0 cscli parsers upgrade crowdsecurity/whitelists + assert_output - <<-EOT + downloading parsers:crowdsecurity/whitelists + + $RELOAD_MESSAGE + EOT + refute_stderr + + rune -0 cscli parsers inspect crowdsecurity/whitelists -o json + + # the version is now the latest + rune -0 jq -e '.local_version==.version' <(output) +} + +@test "upgrade an item (tainted, requires --force)" { + rune -0 cscli parsers install crowdsecurity/whitelists + echo "dirty" >"$CONFIG_DIR/parsers/s02-enrich/whitelists.yaml" + + rune -0 cscli parsers inspect crowdsecurity/whitelists -o json + rune -0 jq -e '.local_version=="?"' <(output) + + rune -0 cscli parsers upgrade crowdsecurity/whitelists --dry-run + assert_output - <<-EOT + WARN parsers:crowdsecurity/whitelists is tainted, use '--force' to overwrite + Nothing to do. + EOT + refute_stderr + + rune -0 cscli parsers upgrade crowdsecurity/whitelists + assert_output - <<-EOT + WARN parsers:crowdsecurity/whitelists is tainted, use '--force' to overwrite + Nothing to do. + EOT + refute_stderr + + rune -0 cscli parsers upgrade crowdsecurity/whitelists --force + assert_output - <<-EOT + downloading parsers:crowdsecurity/whitelists + + $RELOAD_MESSAGE + EOT + refute_stderr + + rune -0 cscli parsers inspect crowdsecurity/whitelists -o json + rune -0 jq -e '.local_version==.version' <(output) +} + +@test "upgrade multiple items" { + hub_inject_v0 + + install_v0 parsers crowdsecurity/whitelists + rune -0 cscli parsers inspect crowdsecurity/whitelists -o json + rune -0 jq -e '.local_version=="0.0"' <(output) + latest_whitelists=$(get_latest_version parsers crowdsecurity/whitelists) + + install_v0 parsers crowdsecurity/sshd-logs + rune -0 cscli parsers inspect crowdsecurity/sshd-logs -o json + rune -0 jq -e '.local_version=="0.0"' <(output) + latest_sshd=$(get_latest_version parsers crowdsecurity/sshd-logs) + + rune -0 cscli parsers upgrade crowdsecurity/whitelists crowdsecurity/sshd-logs --dry-run + assert_output - <<-EOT + Action plan: + 📥 download + parsers: crowdsecurity/sshd-logs (0.0 -> $latest_sshd), crowdsecurity/whitelists (0.0 -> $latest_whitelists) + + Dry run, no action taken. + EOT + refute_stderr + + rune -0 cscli parsers upgrade crowdsecurity/whitelists crowdsecurity/sshd-logs + assert_output - <<-EOT + downloading parsers:crowdsecurity/whitelists + downloading parsers:crowdsecurity/sshd-logs + + $RELOAD_MESSAGE + EOT + refute_stderr + + rune -0 cscli parsers inspect crowdsecurity/whitelists -o json + rune -0 jq -e '.local_version==.version' <(output) + + rune -0 cscli parsers inspect crowdsecurity/sshd-logs -o json + rune -0 jq -e '.local_version==.version' <(output) +} + +@test "upgrade all items of the same type" { + hub_inject_v0 + + install_v0 parsers crowdsecurity/whitelists + install_v0 parsers crowdsecurity/sshd-logs + install_v0 parsers crowdsecurity/windows-auth + + rune -0 cscli parsers upgrade --all + assert_output - <<-EOT + downloading parsers:crowdsecurity/sshd-logs + downloading parsers:crowdsecurity/whitelists + downloading parsers:crowdsecurity/windows-auth + + $RELOAD_MESSAGE + EOT + refute_stderr + + rune -0 cscli parsers inspect crowdsecurity/whitelists -o json + rune -0 jq -e '.local_version==.version' <(output) + + rune -0 cscli parsers inspect crowdsecurity/sshd-logs -o json + rune -0 jq -e '.local_version==.version' <(output) + + rune -0 cscli parsers inspect crowdsecurity/windows-auth -o json + rune -0 jq -e '.local_version==.version' <(output) +} + +@test "upgrade an item (autocomplete)" { + rune -0 cscli parsers install crowdsecurity/whitelists + rune -0 cscli __complete parsers upgrade crowd + assert_stderr --partial '[Debug] parsers: [crowdsecurity/whitelists]' + assert_output --partial 'crowdsecurity/whitelists' +} + diff --git a/test/bats/cscli-parsers.bats b/test/bats/cscli-parsers.bats new file mode 100644 index 00000000000..6ff138e9fd8 --- /dev/null +++ b/test/bats/cscli-parsers.bats @@ -0,0 +1,44 @@ +#!/usr/bin/env bats + +# Tests for the "cscli parsers" behavior that is not covered by cscli-hubtype-*.bats + +set -u + +setup_file() { + load "../lib/setup_file.sh" + ./instance-data load + HUB_DIR=$(config_get '.config_paths.hub_dir') + export HUB_DIR + INDEX_PATH=$(config_get '.config_paths.index_path') + export INDEX_PATH + CONFIG_DIR=$(config_get '.config_paths.config_dir') + export CONFIG_DIR +} + +teardown_file() { + load "../lib/teardown_file.sh" +} + +setup() { + load "../lib/setup.sh" + load "../lib/bats-file/load.bash" + ./instance-data load +} + +teardown() { + ./instance-crowdsec stop +} + +#---------- + +@test "cscli parsers inspect (includes the stage attribute)" { + rune -0 cscli parsers inspect crowdsecurity/sshd-logs --no-metrics -o human + assert_line 'stage: s01-parse' + + rune -0 cscli parsers inspect crowdsecurity/sshd-logs --no-metrics -o raw + assert_line 'stage: s01-parse' + + rune -0 cscli parsers inspect crowdsecurity/sshd-logs --no-metrics -o json + rune -0 jq -r '.stage' <(output) + assert_output 's01-parse' +} diff --git a/test/bats/cscli-postoverflows.bats b/test/bats/cscli-postoverflows.bats new file mode 100644 index 00000000000..979ee81defb --- /dev/null +++ b/test/bats/cscli-postoverflows.bats @@ -0,0 +1,44 @@ +#!/usr/bin/env bats + +# Tests for the "cscli postoverflows" behavior that is not covered by cscli-hubtype-*.bats + +set -u + +setup_file() { + load "../lib/setup_file.sh" + ./instance-data load + HUB_DIR=$(config_get '.config_paths.hub_dir') + export HUB_DIR + INDEX_PATH=$(config_get '.config_paths.index_path') + export INDEX_PATH + CONFIG_DIR=$(config_get '.config_paths.config_dir') + export CONFIG_DIR +} + +teardown_file() { + load "../lib/teardown_file.sh" +} + +setup() { + load "../lib/setup.sh" + load "../lib/bats-file/load.bash" + ./instance-data load +} + +teardown() { + ./instance-crowdsec stop +} + +#---------- + +@test "cscli postoverflows inspect (includes the stage attribute)" { + rune -0 cscli postoverflows inspect crowdsecurity/rdns --no-metrics -o human + assert_line 'stage: s00-enrich' + + rune -0 cscli postoverflows inspect crowdsecurity/rdns --no-metrics -o raw + assert_line 'stage: s00-enrich' + + rune -0 cscli postoverflows inspect crowdsecurity/rdns --no-metrics -o json + rune -0 jq -r '.stage' <(output) + assert_output 's00-enrich' +} diff --git a/test/bats/hub-index.bats b/test/bats/hub-index.bats new file mode 100644 index 00000000000..a609974d67a --- /dev/null +++ b/test/bats/hub-index.bats @@ -0,0 +1,357 @@ +#!/usr/bin/env bats + +set -u + +setup_file() { + load "../lib/setup_file.sh" + ./instance-data load + INDEX_PATH=$(config_get '.config_paths.index_path') + export INDEX_PATH +} + +teardown_file() { + load "../lib/teardown_file.sh" +} + +setup() { + load "../lib/setup.sh" + load "../lib/bats-file/load.bash" + ./instance-data load +} + +teardown() { + ./instance-crowdsec stop +} + +#---------- + +@test "malformed index - null item" { + yq -o json >"$INDEX_PATH" <<-'EOF' + parsers: + author/pars1: + EOF + + rune -1 cscli hub list + assert_stderr --partial "invalid hub index: parsers:author/pars1 has no index metadata." +} + +@test "malformed index - no download path" { + yq -o json >"$INDEX_PATH" <<-'EOF' + parsers: + author/pars1: + version: "0.0" + versions: + 0.0: + digest: daa1832414a685d69269e0ae15024b908f4602db45f9900e9c6e7f204af207c0 + EOF + + rune -1 cscli hub list + assert_stderr --partial "invalid hub index: parsers:author/pars1 has no download path." +} + +@test "malformed parser - no stage" { + # Installing a parser requires a stage directory + yq -o json >"$INDEX_PATH" <<-'EOF' + parsers: + author/pars1: + path: parsers/s01-parse/author/pars1.yaml + version: "0.0" + versions: + 0.0: + digest: 44136fa355b3678a1146ad16f7e8649e94fb4fc21fe77e8310c060f61caaff8a + content: "{}" + EOF + + rune -1 cscli hub list -o raw + assert_stderr --partial "invalid hub index: parsers:author/pars1 has no stage." +} + +@test "malformed parser - short path" { + # Installing a parser requires a stage directory + yq -o json >"$INDEX_PATH" <<-'EOF' + parsers: + author/pars1: + path: parsers/s01-parse/pars1.yaml + stage: s01-parse + version: "0.0" + versions: + 0.0: + digest: 44136fa355b3678a1146ad16f7e8649e94fb4fc21fe77e8310c060f61caaff8a + content: "{}" + EOF + + rune -0 cscli hub list -o raw + rune -0 cscli parsers install author/pars1 + rune -0 cscli hub list + # XXX here the item is installed but won't work, we only have a warning + assert_stderr --partial 'Ignoring file' + assert_stderr --partial 'path is too short' +} + +@test "malformed item - not yaml" { + # Installing an item requires reading the list of data files + yq -o json >"$INDEX_PATH" <<-'EOF' + parsers: + author/pars1: + path: parsers/s01-parse/pars1.yaml + stage: s01-parse + version: "0.0" + versions: + 0.0: + digest: daa1832414a685d69269e0ae15024b908f4602db45f9900e9c6e7f204af207c0 + content: "v0.0" + EOF + + rune -0 cscli hub list -o raw + rune -1 cscli parsers install author/pars1 + assert_stderr --partial 'unmarshal errors' +} + +@test "malformed item - hash mismatch" { + yq -o json >"$INDEX_PATH" <<-'EOF' + parsers: + author/pars1: + path: parsers/s01-parse/pars1.yaml + stage: s01-parse + version: "0.0" + versions: + 0.0: + digest: "0000000000000000000000000000000000000000000000000000000000000000" + content: "v0.0" + EOF + + rune -0 cscli hub list -o raw + rune -1 cscli parsers install author/pars1 + assert_stderr --partial 'parsers:author/pars1: hash mismatch: expected 0000000000000000000000000000000000000000000000000000000000000000, got daa1832414a685d69269e0ae15024b908f4602db45f9900e9c6e7f204af207c0.' +} + +@test "install minimal item" { + yq -o json >"$INDEX_PATH" <<-'EOF' + parsers: + author/pars1: + path: parsers/s01-parse/pars1.yaml + stage: s01-parse + version: "0.0" + versions: + 0.0: + digest: 44136fa355b3678a1146ad16f7e8649e94fb4fc21fe77e8310c060f61caaff8a + content: "{}" + EOF + + rune -0 cscli hub list -o raw + rune -0 cscli parsers install author/pars1 + assert_line "downloading parsers:author/pars1" + assert_line "enabling parsers:author/pars1" + rune -0 cscli hub list +} + +@test "replace an item in a collection update" { + # A new version of coll1 will uninstall pars1 and install pars2. + yq -o json >"$INDEX_PATH" <<-'EOF' + collections: + author/coll1: + path: collections/author/coll1.yaml + version: "0.0" + versions: + 0.0: + digest: 801e11865f8fdf82a348e70fe3f568af190715c40a176e058da2ad21ff5e20be + content: "{'parsers': ['author/pars1']}" + parsers: + - author/pars1 + parsers: + author/pars1: + path: parsers/s01-parse/author/pars1.yaml + stage: s01-parse + version: "0.0" + versions: + 0.0: + digest: 44136fa355b3678a1146ad16f7e8649e94fb4fc21fe77e8310c060f61caaff8a + content: "{}" + author/pars2: + path: parsers/s01-parse/author/pars2.yaml + stage: s01-parse + version: "0.0" + versions: + 0.0: + digest: 44136fa355b3678a1146ad16f7e8649e94fb4fc21fe77e8310c060f61caaff8a + content: "{}" + EOF + + rune -0 cscli hub list + rune -0 cscli collections install author/coll1 + + yq -o json >"$INDEX_PATH" <<-'EOF' + collections: + author/coll1: + path: collections/author/coll1.yaml + version: "0.1" + versions: + 0.0: + digest: 801e11865f8fdf82a348e70fe3f568af190715c40a176e058da2ad21ff5e20be + 0.1: + digest: f3c535c2d01abec5aadbb5ce03c357a478d91b116410c9fee288e073cd34c0dd + content: "{'parsers': ['author/pars2']}" + parsers: + - author/pars2 + parsers: + author/pars1: + path: parsers/s01-parse/author/pars1.yaml + stage: s01-parse + version: "0.0" + versions: + 0.0: + digest: 44136fa355b3678a1146ad16f7e8649e94fb4fc21fe77e8310c060f61caaff8a + content: "{}" + author/pars2: + path: parsers/s01-parse/author/pars2.yaml + stage: s01-parse + version: "0.0" + versions: + 0.0: + digest: 44136fa355b3678a1146ad16f7e8649e94fb4fc21fe77e8310c060f61caaff8a + content: "{}" + EOF + + rune -0 cscli hub list -o raw + rune -0 cscli collections upgrade author/coll1 + assert_output - <<-EOT + downloading parsers:author/pars2 + enabling parsers:author/pars2 + disabling parsers:author/pars1 + downloading collections:author/coll1 + + $RELOAD_MESSAGE + EOT + + rune -0 cscli hub list -o raw + assert_output - <<-EOT + name,status,version,description,type + author/pars2,enabled,0.0,,parsers + author/coll1,enabled,0.1,,collections + EOT +} + +@test "replace an outdated item only if it's not used elsewhere" { + # XXX + skip "not implemented" + # A new version of coll1 will uninstall pars1 and install pars2. + # Pars3 will not be uninstalled because it's still required by coll2. + yq -o json >"$INDEX_PATH" <<-'EOF' + collections: + author/coll1: + path: collections/author/coll1.yaml + version: "0.0" + versions: + 0.0: + digest: 0c397c7b3e19d730578932fdc260c53f39bd2488fad87207ab6b7e4dc315b067 + content: "{'parsers': ['author/pars1', 'author/pars3']}" + parsers: + - author/pars1 + - author/pars3 + author/coll2: + path: collections/author/coll2.yaml + version: "0.0" + versions: + 0.0: + digest: 96df483ff697d4d214792b135a3ba5ddaca0ebfd856e7da89215926394ac4001 + content: "{'parsers': ['author/pars3']}" + parsers: + - author/pars3 + parsers: + author/pars1: + path: parsers/s01-parse/author/pars1.yaml + stage: s01-parse + version: "0.0" + versions: + 0.0: + digest: 44136fa355b3678a1146ad16f7e8649e94fb4fc21fe77e8310c060f61caaff8a + content: "{}" + author/pars2: + path: parsers/s01-parse/author/pars2.yaml + stage: s01-parse + version: "0.0" + versions: + 0.0: + digest: 44136fa355b3678a1146ad16f7e8649e94fb4fc21fe77e8310c060f61caaff8a + content: "{}" + author/pars3: + path: parsers/s01-parse/author/pars3.yaml + stage: s01-parse + version: "0.0" + versions: + 0.0: + digest: 44136fa355b3678a1146ad16f7e8649e94fb4fc21fe77e8310c060f61caaff8a + content: "{}" + EOF + + rune -0 cscli hub list + rune -0 cscli collections install author/coll1 author/coll2 + + yq -o json >"$INDEX_PATH" <<-'EOF' + collections: + author/coll1: + path: collections/author/coll1.yaml + version: "0.1" + versions: + 0.0: + digest: 0c397c7b3e19d730578932fdc260c53f39bd2488fad87207ab6b7e4dc315b067 + 0.1: + digest: f3c535c2d01abec5aadbb5ce03c357a478d91b116410c9fee288e073cd34c0dd + content: "{'parsers': ['author/pars2']}" + parsers: + - author/pars2 + author/coll2: + path: collections/author/coll2.yaml + version: "0.0" + versions: + 0.0: + digest: 96df483ff697d4d214792b135a3ba5ddaca0ebfd856e7da89215926394ac4001 + content: "{'parsers': ['author/pars3']}" + parsers: + - author/pars3 + parsers: + author/pars1: + path: parsers/s01-parse/author/pars1.yaml + stage: s01-parse + version: "0.0" + versions: + 0.0: + digest: 44136fa355b3678a1146ad16f7e8649e94fb4fc21fe77e8310c060f61caaff8a + content: "{}" + author/pars2: + path: parsers/s01-parse/author/pars2.yaml + stage: s01-parse + version: "0.0" + versions: + 0.0: + digest: 44136fa355b3678a1146ad16f7e8649e94fb4fc21fe77e8310c060f61caaff8a + content: "{}" + author/pars3: + path: parsers/s01-parse/author/pars3.yaml + stage: s01-parse + version: "0.0" + versions: + 0.0: + digest: 44136fa355b3678a1146ad16f7e8649e94fb4fc21fe77e8310c060f61caaff8a + content: "{}" + EOF + + rune -0 cscli hub list -o raw + rune -0 cscli collections upgrade author/coll1 + assert_output - <<-EOT + downloading parsers:author/pars2 + enabling parsers:author/pars2 + disabling parsers:author/pars1 + downloading collections:author/coll1 + + $RELOAD_MESSAGE + EOT + + rune -0 cscli hub list -o raw + assert_output - <<-EOT + name,status,version,description,type + author/pars2,enabled,0.0,,parsers + author/pars3,enabled,0.0,,parsers + author/coll1,enabled,0.1,,collections + EOT +} diff --git a/test/bin/remove-all-hub-items b/test/bin/remove-all-hub-items index 981602b775a..b5d611782ff 100755 --- a/test/bin/remove-all-hub-items +++ b/test/bin/remove-all-hub-items @@ -14,7 +14,7 @@ echo "Pre-downloading Hub content..." types=$("$CSCLI" hub types -o raw) for itemtype in $types; do - "$CSCLI" "$itemtype" remove --all --force + "$CSCLI" "$itemtype" remove --all --force --purge --yes done echo " done." diff --git a/test/lib/config/config-local b/test/lib/config/config-local index 3e3c806b616..4f3ec7cc2ae 100755 --- a/test/lib/config/config-local +++ b/test/lib/config/config-local @@ -117,7 +117,7 @@ make_init_data() { "$CSCLI" --warning hub update --with-content # preload some content and data files - "$CSCLI" collections install crowdsecurity/linux --download-only + "$CSCLI" collections install crowdsecurity/linux --download-only --yes # sub-items did not respect --download-only ./bin/remove-all-hub-items diff --git a/test/lib/setup_file.sh b/test/lib/setup_file.sh index 39a084596e2..902edc5de82 100755 --- a/test/lib/setup_file.sh +++ b/test/lib/setup_file.sh @@ -260,16 +260,6 @@ hub_purge_all() { } export -f hub_purge_all -# remove unused data from the index, to make sure we don't rely on it in any way -hub_strip_index() { - local INDEX - INDEX=$(config_get .config_paths.index_path) - local hub_min - hub_min=$(jq <"$INDEX" 'del(..|.long_description?) | del(..|.deprecated?) | del (..|.labels?)') - echo "$hub_min" >"$INDEX" -} -export -f hub_strip_index - # remove color and style sequences from stdin plaintext() { sed -E 's/\x1B\[[0-9;]*[JKmsu]//g' @@ -340,3 +330,17 @@ lp-get-token() { echo "$resp" | yq -r '.token' } export -f lp-get-token + +case $(uname) in + "Linux") + # shellcheck disable=SC2089 + RELOAD_MESSAGE="Run 'sudo systemctl reload crowdsec' for the new configuration to be effective." + ;; + *) + # shellcheck disable=SC2089 + RELOAD_MESSAGE="Run 'sudo service crowdsec reload' for the new configuration to be effective." + ;; +esac + +# shellcheck disable=SC2090 +export RELOAD_MESSAGE diff --git a/test/localstack/docker-compose.yml b/test/localstack/docker-compose.yml index f58f3c7f263..9f0a690353b 100644 --- a/test/localstack/docker-compose.yml +++ b/test/localstack/docker-compose.yml @@ -15,7 +15,6 @@ services: AWS_HOST: localstack DEBUG: "" KINESYS_ERROR_PROBABILITY: "" - DOCKER_HOST: "unix://var/run/docker.sock" LOCALSTACK_HOST: "localstack" AWS_REGION: "us-east-1" diff --git a/wizard.sh b/wizard.sh index 6e215365f6c..2d3260fc22f 100755 --- a/wizard.sh +++ b/wizard.sh @@ -21,11 +21,8 @@ DOCKER_MODE="false" CROWDSEC_LIB_DIR="/var/lib/crowdsec" CROWDSEC_USR_DIR="/usr/local/lib/crowdsec" CROWDSEC_DATA_DIR="${CROWDSEC_LIB_DIR}/data" -CROWDSEC_DB_PATH="${CROWDSEC_DATA_DIR}/crowdsec.db" CROWDSEC_PATH="/etc/crowdsec" CROWDSEC_CONFIG_PATH="${CROWDSEC_PATH}" -CROWDSEC_LOG_FILE="/var/log/crowdsec.log" -LAPI_LOG_FILE="/var/log/crowdsec_api.log" CROWDSEC_PLUGIN_DIR="${CROWDSEC_USR_DIR}/plugins" CROWDSEC_CONSOLE_DIR="${CROWDSEC_PATH}/console" @@ -35,8 +32,6 @@ CSCLI_BIN="./cmd/crowdsec-cli/cscli" CLIENT_SECRETS="local_api_credentials.yaml" LAPI_SECRETS="online_api_credentials.yaml" -CONSOLE_FILE="console.yaml" - BIN_INSTALL_PATH="/usr/local/bin" CROWDSEC_BIN_INSTALLED="${BIN_INSTALL_PATH}/crowdsec" @@ -91,9 +86,6 @@ SENTINEL_PLUGIN_CONFIG="./cmd/notification-sentinel/sentinel.yaml" FILE_PLUGIN_CONFIG="./cmd/notification-file/file.yaml" -BACKUP_DIR=$(mktemp -d) -rm -rf -- "$BACKUP_DIR" - log_info() { msg=$1 date=$(date "+%Y-%m-%d %H:%M:%S") @@ -262,20 +254,26 @@ install_collection() { fi done + local YES="" + if [[ ${SILENT} == "false" ]]; then COLLECTION_TO_INSTALL=($(whiptail --separate-output --ok-button Continue --title "Crowdsec collections" --checklist "Available collections in crowdsec, try to pick one that fits your profile. Collections contains parsers and scenarios to protect your system." 20 120 10 "${HMENU[@]}" 3>&1 1>&2 2>&3)) if [ $? -eq 1 ]; then log_err "user bailed out at collection selection" exit 1; fi; + else + YES="--yes" fi; for collection in "${COLLECTION_TO_INSTALL[@]}"; do log_info "Installing collection '${collection}'" - ${CSCLI_BIN_INSTALLED} collections install "${collection}" --error + # shellcheck disable=SC2248 + ${CSCLI_BIN_INSTALLED} collections install "${collection}" --error ${YES} done - ${CSCLI_BIN_INSTALLED} parsers install "crowdsecurity/whitelists" --error + # shellcheck disable=SC2248 + ${CSCLI_BIN_INSTALLED} parsers install "crowdsecurity/whitelists" --error ${YES} if [[ ${SILENT} == "false" ]]; then whiptail --msgbox "Out of safety, I installed a parser called 'crowdsecurity/whitelists'. This one will prevent private IP addresses from being banned, feel free to remove it any time." 20 50 fi @@ -420,22 +418,19 @@ install_crowdsec() { mkdir -p "${CROWDSEC_CONFIG_PATH}/contexts" || exit mkdir -p "${CROWDSEC_CONSOLE_DIR}" || exit - # tmp - mkdir -p /tmp/data mkdir -p /etc/crowdsec/hub/ - install -v -m 600 -D "./config/${CLIENT_SECRETS}" "${CROWDSEC_CONFIG_PATH}" 1> /dev/null || exit - install -v -m 600 -D "./config/${LAPI_SECRETS}" "${CROWDSEC_CONFIG_PATH}" 1> /dev/null || exit - - ## end tmp - install -v -m 600 -D ./config/config.yaml "${CROWDSEC_CONFIG_PATH}" 1> /dev/null || exit - install -v -m 644 -D ./config/dev.yaml "${CROWDSEC_CONFIG_PATH}" 1> /dev/null || exit - install -v -m 644 -D ./config/user.yaml "${CROWDSEC_CONFIG_PATH}" 1> /dev/null || exit - install -v -m 644 -D ./config/acquis.yaml "${CROWDSEC_CONFIG_PATH}" 1> /dev/null || exit - install -v -m 644 -D ./config/profiles.yaml "${CROWDSEC_CONFIG_PATH}" 1> /dev/null || exit - install -v -m 644 -D ./config/simulation.yaml "${CROWDSEC_CONFIG_PATH}" 1> /dev/null || exit - install -v -m 644 -D ./config/"${CONSOLE_FILE}" "${CROWDSEC_CONFIG_PATH}" 1> /dev/null || exit - install -v -m 644 -D ./config/context.yaml "${CROWDSEC_CONSOLE_DIR}" 1> /dev/null || exit + # Don't overwrite existing files + [[ ! -f "${CROWDSEC_CONFIG_PATH}/${CLIENT_SECRETS}" ]] && install -v -m 600 -D "./config/${CLIENT_SECRETS}" "${CROWDSEC_CONFIG_PATH}" >/dev/null || exit + [[ ! -f "${CROWDSEC_CONFIG_PATH}/${LAPI_SECRETS}" ]] && install -v -m 600 -D "./config/${LAPI_SECRETS}" "${CROWDSEC_CONFIG_PATH}" > /dev/null || exit + [[ ! -f "${CROWDSEC_CONFIG_PATH}/config.yaml" ]] && install -v -m 600 -D ./config/config.yaml "${CROWDSEC_CONFIG_PATH}" > /dev/null || exit + [[ ! -f "${CROWDSEC_CONFIG_PATH}/dev.yaml" ]] && install -v -m 644 -D ./config/dev.yaml "${CROWDSEC_CONFIG_PATH}" > /dev/null || exit + [[ ! -f "${CROWDSEC_CONFIG_PATH}/user.yaml" ]] && install -v -m 644 -D ./config/user.yaml "${CROWDSEC_CONFIG_PATH}" > /dev/null || exit + [[ ! -f "${CROWDSEC_CONFIG_PATH}/acquis.yaml" ]] && install -v -m 644 -D ./config/acquis.yaml "${CROWDSEC_CONFIG_PATH}" > /dev/null || exit + [[ ! -f "${CROWDSEC_CONFIG_PATH}/profiles.yaml" ]] && install -v -m 644 -D ./config/profiles.yaml "${CROWDSEC_CONFIG_PATH}" > /dev/null || exit + [[ ! -f "${CROWDSEC_CONFIG_PATH}/simulation.yaml" ]] && install -v -m 644 -D ./config/simulation.yaml "${CROWDSEC_CONFIG_PATH}" > /dev/null || exit + [[ ! -f "${CROWDSEC_CONFIG_PATH}/console.yaml" ]] && install -v -m 644 -D ./config/console.yaml "${CROWDSEC_CONFIG_PATH}" > /dev/null || exit + [[ ! -f "${CROWDSEC_CONFIG_PATH}/context.yaml" ]] && install -v -m 644 -D ./config/context.yaml "${CROWDSEC_CONSOLE_DIR}" > /dev/null || exit DATA=${CROWDSEC_DATA_DIR} CFG=${CROWDSEC_CONFIG_PATH} envsubst '$CFG $DATA' < ./config/user.yaml > ${CROWDSEC_CONFIG_PATH}"/user.yaml" || log_fatal "unable to generate user configuration file" if [[ ${DOCKER_MODE} == "false" ]]; then @@ -465,23 +460,12 @@ update_full() { log_err "Cscli binary '$CSCLI_BIN' not found. Please build it with 'make build'" && exit fi - log_info "Backing up existing configuration" - ${CSCLI_BIN_INSTALLED} config backup ${BACKUP_DIR} - log_info "Saving default database content if exist" - if [[ -f "/var/lib/crowdsec/data/crowdsec.db" ]]; then - cp /var/lib/crowdsec/data/crowdsec.db ${BACKUP_DIR}/crowdsec.db - fi - log_info "Cleanup existing crowdsec configuration" + log_info "Removing old binaries" uninstall_crowdsec log_info "Installing crowdsec" install_crowdsec - log_info "Restoring configuration" + log_info "Updating hub" ${CSCLI_BIN_INSTALLED} hub update - ${CSCLI_BIN_INSTALLED} config restore ${BACKUP_DIR} - log_info "Restoring saved database if exist" - if [[ -f "${BACKUP_DIR}/crowdsec.db" ]]; then - cp ${BACKUP_DIR}/crowdsec.db /var/lib/crowdsec/data/crowdsec.db - fi log_info "Finished, restarting" systemctl restart crowdsec || log_fatal "Failed to restart crowdsec" } @@ -559,15 +543,6 @@ uninstall_crowdsec() { ${CSCLI_BIN} dashboard remove -f -y >/dev/null delete_bins - # tmp - rm -rf /tmp/data/ - ## end tmp - - find /etc/crowdsec -maxdepth 1 -mindepth 1 | grep -v "bouncer" | xargs rm -rf || echo "" - rm -f ${CROWDSEC_LOG_FILE} || echo "" - rm -f ${LAPI_LOG_FILE} || echo "" - rm -f ${CROWDSEC_DB_PATH} || echo "" - rm -rf ${CROWDSEC_LIB_DIR} || echo "" rm -rf ${CROWDSEC_USR_DIR} || echo "" rm -f ${SYSTEMD_PATH_FILE} || echo "" log_info "crowdsec successfully uninstalled" @@ -759,12 +734,11 @@ usage() { echo " ./wizard.sh --unattended Install in unattended mode, no question will be asked and defaults will be followed" echo " ./wizard.sh --docker-mode Will install crowdsec without systemd and generate random machine-id" echo " ./wizard.sh -n|--noop Do nothing" - - exit 0 } if [[ $# -eq 0 ]]; then -usage + usage + exit 0 fi while [[ $# -gt 0 ]] From 362d5dc068849be5415677187f922df0b7c222a5 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Wed, 5 Feb 2025 16:50:14 +0100 Subject: [PATCH 420/581] CI: use alpine 3.21 for docker build (#3445) --- Dockerfile | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Dockerfile b/Dockerfile index d368f0f6ede..383578c48b4 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,4 +1,4 @@ -FROM docker.io/golang:1.23-alpine3.20 AS build +FROM docker.io/golang:1.23-alpine3.21 AS build ARG BUILD_VERSION @@ -30,7 +30,7 @@ RUN make clean release DOCKER_BUILD=1 BUILD_STATIC=1 CGO_CFLAGS="-D_LARGEFILE64_ # In case we need to remove agents here.. # cscli machines list -o json | yq '.[].machineId' | xargs -r cscli machines delete -FROM docker.io/alpine:latest AS slim +FROM docker.io/alpine:3.21 AS slim RUN apk add --no-cache --repository=http://dl-cdn.alpinelinux.org/alpine/edge/community tzdata bash rsync && \ mkdir -p /staging/etc/crowdsec && \ From bf0a1cc470591c069dd9134f8f1e2035f9d2bff6 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Wed, 5 Feb 2025 16:51:34 +0100 Subject: [PATCH 421/581] cscli: when prompting, use default in case of EOF instead of going for "no" (#3447) --- pkg/hubops/plan.go | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/pkg/hubops/plan.go b/pkg/hubops/plan.go index eb99056fab3..b87145d9583 100644 --- a/pkg/hubops/plan.go +++ b/pkg/hubops/plan.go @@ -2,7 +2,9 @@ package hubops import ( "context" + "errors" "fmt" + "io" "os" "slices" "strings" @@ -203,7 +205,11 @@ func (p *ActionPlan) Confirm(verbose bool) (bool, error) { Default: true, } + // in case of EOF, it's likely been closed by the package manager (freebsd?), ignore it if err := survey.AskOne(prompt, &answer); err != nil { + if errors.Is(err, io.EOF) { + return prompt.Default, nil + } return false, err } From a001e1d760ed65976cd681f6d35c4f45a4b0de82 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Thu, 6 Feb 2025 11:43:29 +0100 Subject: [PATCH 422/581] cscli: replace '--yes' option with '--interactive' (#3448) --- cmd/crowdsec-cli/clihub/hub.go | 22 ++++++----- cmd/crowdsec-cli/cliitem/cmdinstall.go | 12 +++--- cmd/crowdsec-cli/cliitem/cmdremove.go | 20 +++++----- cmd/crowdsec-cli/cliitem/cmdupgrade.go | 18 ++++----- cmd/crowdsec-cli/cliitem/hubappsec.go | 42 +++++++++------------ cmd/crowdsec-cli/cliitem/hubcollection.go | 21 +++++------ cmd/crowdsec-cli/cliitem/hubcontext.go | 21 +++++------ cmd/crowdsec-cli/cliitem/hubparser.go | 21 +++++------ cmd/crowdsec-cli/cliitem/hubpostoverflow.go | 21 +++++------ cmd/crowdsec-cli/cliitem/hubscenario.go | 21 +++++------ go.mod | 2 +- pkg/hubops/plan.go | 21 ++++++----- test/bin/remove-all-hub-items | 2 +- test/lib/config/config-local | 2 +- 14 files changed, 115 insertions(+), 131 deletions(-) diff --git a/cmd/crowdsec-cli/clihub/hub.go b/cmd/crowdsec-cli/clihub/hub.go index 66fbe7c405a..4dce6295c2d 100644 --- a/cmd/crowdsec-cli/clihub/hub.go +++ b/cmd/crowdsec-cli/clihub/hub.go @@ -163,7 +163,7 @@ cscli hub update --with-content`, return cmd } -func (cli *cliHub) upgrade(ctx context.Context, yes bool, dryRun bool, force bool) error { +func (cli *cliHub) upgrade(ctx context.Context, interactive bool, dryRun bool, force bool) error { cfg := cli.cfg() hub, err := require.Hub(cfg, log.StandardLogger()) @@ -189,7 +189,7 @@ func (cli *cliHub) upgrade(ctx context.Context, yes bool, dryRun bool, force boo verbose := (cfg.Cscli.Output == "raw") - if err := plan.Execute(ctx, yes, dryRun, verbose); err != nil { + if err := plan.Execute(ctx, interactive, dryRun, verbose); err != nil { return err } @@ -202,9 +202,9 @@ func (cli *cliHub) upgrade(ctx context.Context, yes bool, dryRun bool, force boo func (cli *cliHub) newUpgradeCmd() *cobra.Command { var ( - yes bool - dryRun bool - force bool + interactive bool + dryRun bool + force bool ) cmd := &cobra.Command{ @@ -217,19 +217,23 @@ Upgrade all configs installed from Crowdsec Hub. Run 'sudo cscli hub update' if cscli hub upgrade # Upgrade tainted items as well; force re-download of data files. -cscli hub upgrade --force`, +cscli hub upgrade --force + +# Prompt for confirmation if running in an interactive terminal; otherwise, the option is ignored. +cscli hub upgrade --interactive +cscli hub upgrade -i`, Args: cobra.NoArgs, DisableAutoGenTag: true, RunE: func(cmd *cobra.Command, _ []string) error { - return cli.upgrade(cmd.Context(), yes, dryRun, force) + return cli.upgrade(cmd.Context(), interactive, dryRun, force) }, } flags := cmd.Flags() - flags.BoolVar(&yes, "yes", false, "Confirm execution without prompt") + flags.BoolVarP(&interactive, "interactive", "i", false, "Ask for confirmation before proceeding") flags.BoolVar(&dryRun, "dry-run", false, "Don't install or remove anything; print the execution plan") flags.BoolVar(&force, "force", false, "Force upgrade: overwrite tainted and outdated items; always update data files") - cmd.MarkFlagsMutuallyExclusive("yes", "dry-run") + cmd.MarkFlagsMutuallyExclusive("interactive", "dry-run") return cmd } diff --git a/cmd/crowdsec-cli/cliitem/cmdinstall.go b/cmd/crowdsec-cli/cliitem/cmdinstall.go index 74ffbe727f4..ab09507995e 100644 --- a/cmd/crowdsec-cli/cliitem/cmdinstall.go +++ b/cmd/crowdsec-cli/cliitem/cmdinstall.go @@ -42,7 +42,7 @@ func suggestNearestMessage(hub *cwhub.Hub, itemType string, itemName string) str return msg } -func (cli cliItem) install(ctx context.Context, args []string, yes bool, dryRun bool, downloadOnly bool, force bool, ignoreError bool) error { +func (cli cliItem) install(ctx context.Context, args []string, interactive bool, dryRun bool, downloadOnly bool, force bool, ignoreError bool) error { cfg := cli.cfg() hub, err := require.Hub(cfg, log.StandardLogger()) @@ -80,7 +80,7 @@ func (cli cliItem) install(ctx context.Context, args []string, yes bool, dryRun verbose := (cfg.Cscli.Output == "raw") - if err := plan.Execute(ctx, yes, dryRun, verbose); err != nil { + if err := plan.Execute(ctx, interactive, dryRun, verbose); err != nil { if !ignoreError { return err } @@ -116,7 +116,7 @@ func compAllItems(itemType string, args []string, toComplete string, cfg configG func (cli cliItem) newInstallCmd() *cobra.Command { var ( - yes bool + interactive bool dryRun bool downloadOnly bool force bool @@ -134,17 +134,17 @@ func (cli cliItem) newInstallCmd() *cobra.Command { return compAllItems(cli.name, args, toComplete, cli.cfg) }, RunE: func(cmd *cobra.Command, args []string) error { - return cli.install(cmd.Context(), args, yes, dryRun, downloadOnly, force, ignoreError) + return cli.install(cmd.Context(), args, interactive, dryRun, downloadOnly, force, ignoreError) }, } flags := cmd.Flags() - flags.BoolVarP(&yes, "yes", "y", false, "Confirm execution without prompt") + flags.BoolVarP(&interactive, "interactive", "i", false, "Ask for confirmation before proceeding") flags.BoolVar(&dryRun, "dry-run", false, "Don't install or remove anything; print the execution plan") flags.BoolVarP(&downloadOnly, "download-only", "d", false, "Only download packages, don't enable") flags.BoolVar(&force, "force", false, "Force install: overwrite tainted and outdated files") flags.BoolVar(&ignoreError, "ignore", false, "Ignore errors when installing multiple "+cli.name) - cmd.MarkFlagsMutuallyExclusive("yes", "dry-run") + cmd.MarkFlagsMutuallyExclusive("interactive", "dry-run") return cmd } diff --git a/cmd/crowdsec-cli/cliitem/cmdremove.go b/cmd/crowdsec-cli/cliitem/cmdremove.go index c8ea041acbf..5472e4aebbb 100644 --- a/cmd/crowdsec-cli/cliitem/cmdremove.go +++ b/cmd/crowdsec-cli/cliitem/cmdremove.go @@ -85,7 +85,7 @@ func installedParentNames(item *cwhub.Item) []string { return ret } -func (cli cliItem) remove(ctx context.Context, args []string, yes bool, dryRun bool, purge bool, force bool, all bool) error { +func (cli cliItem) remove(ctx context.Context, args []string, interactive bool, dryRun bool, purge bool, force bool, all bool) error { cfg := cli.cfg() hub, err := require.Hub(cli.cfg(), log.StandardLogger()) @@ -100,7 +100,7 @@ func (cli cliItem) remove(ctx context.Context, args []string, yes bool, dryRun b verbose := (cfg.Cscli.Output == "raw") - if err := plan.Execute(ctx, yes, dryRun, verbose); err != nil { + if err := plan.Execute(ctx, interactive, dryRun, verbose); err != nil { return err } @@ -113,11 +113,11 @@ func (cli cliItem) remove(ctx context.Context, args []string, yes bool, dryRun b func (cli cliItem) newRemoveCmd() *cobra.Command { var ( - yes bool - dryRun bool - purge bool - force bool - all bool + interactive bool + dryRun bool + purge bool + force bool + all bool ) cmd := &cobra.Command{ @@ -135,17 +135,17 @@ func (cli cliItem) newRemoveCmd() *cobra.Command { return errors.New("can't specify items and '--all' at the same time") } - return cli.remove(cmd.Context(), args, yes, dryRun, purge, force, all) + return cli.remove(cmd.Context(), args, interactive, dryRun, purge, force, all) }, } flags := cmd.Flags() - flags.BoolVarP(&yes, "yes", "y", false, "Confirm execution without prompt") + flags.BoolVarP(&interactive, "interactive", "i", false, "Ask for confirmation before proceeding") flags.BoolVar(&dryRun, "dry-run", false, "Don't install or remove anything; print the execution plan") flags.BoolVar(&purge, "purge", false, "Delete source file too") flags.BoolVar(&force, "force", false, "Force remove: remove tainted and outdated files") flags.BoolVar(&all, "all", false, "Remove all the "+cli.name) - cmd.MarkFlagsMutuallyExclusive("yes", "dry-run") + cmd.MarkFlagsMutuallyExclusive("interactive", "dry-run") return cmd } diff --git a/cmd/crowdsec-cli/cliitem/cmdupgrade.go b/cmd/crowdsec-cli/cliitem/cmdupgrade.go index 5320bc04bc6..f9c85a1dcd1 100644 --- a/cmd/crowdsec-cli/cliitem/cmdupgrade.go +++ b/cmd/crowdsec-cli/cliitem/cmdupgrade.go @@ -45,7 +45,7 @@ func (cli cliItem) upgradePlan(hub *cwhub.Hub, contentProvider cwhub.ContentProv return plan, nil } -func (cli cliItem) upgrade(ctx context.Context, args []string, yes bool, dryRun bool, force bool, all bool) error { +func (cli cliItem) upgrade(ctx context.Context, args []string, interactive bool, dryRun bool, force bool, all bool) error { cfg := cli.cfg() hub, err := require.Hub(cfg, log.StandardLogger()) @@ -62,7 +62,7 @@ func (cli cliItem) upgrade(ctx context.Context, args []string, yes bool, dryRun verbose := (cfg.Cscli.Output == "raw") - if err := plan.Execute(ctx, yes, dryRun, verbose); err != nil { + if err := plan.Execute(ctx, interactive, dryRun, verbose); err != nil { return err } @@ -75,10 +75,10 @@ func (cli cliItem) upgrade(ctx context.Context, args []string, yes bool, dryRun func (cli cliItem) newUpgradeCmd() *cobra.Command { var ( - yes bool - dryRun bool - all bool - force bool + interactive bool + dryRun bool + all bool + force bool ) cmd := &cobra.Command{ @@ -91,16 +91,16 @@ func (cli cliItem) newUpgradeCmd() *cobra.Command { return compInstalledItems(cli.name, args, toComplete, cli.cfg) }, RunE: func(cmd *cobra.Command, args []string) error { - return cli.upgrade(cmd.Context(), args, yes, dryRun, force, all) + return cli.upgrade(cmd.Context(), args, interactive, dryRun, force, all) }, } flags := cmd.Flags() - flags.BoolVarP(&yes, "yes", "y", false, "Confirm execution without prompt") + flags.BoolVarP(&interactive, "interactive", "i", false, "Ask for confirmation before proceeding") flags.BoolVar(&dryRun, "dry-run", false, "Don't install or remove anything; print the execution plan") flags.BoolVarP(&all, "all", "a", false, "Upgrade all the "+cli.name) flags.BoolVar(&force, "force", false, "Force upgrade: overwrite tainted and outdated files") - cmd.MarkFlagsMutuallyExclusive("yes", "dry-run") + cmd.MarkFlagsMutuallyExclusive("interactive", "dry-run") return cmd } diff --git a/cmd/crowdsec-cli/cliitem/hubappsec.go b/cmd/crowdsec-cli/cliitem/hubappsec.go index 7f9143d35b8..1c1875d2f2c 100644 --- a/cmd/crowdsec-cli/cliitem/hubappsec.go +++ b/cmd/crowdsec-cli/cliitem/hubappsec.go @@ -43,10 +43,9 @@ cscli appsec-configs install crowdsecurity/virtual-patching --download-only # Install over tainted items. Can be used to restore or repair after local modifications or missing dependencies. cscli appsec-configs install crowdsecurity/virtual-patching --force -# Proceed without prompting. -cscli appsec-configs install crowdsecurity/virtual-patching --yes - -# The "--yes" parameter is implied when the command is not connected to a terminal, like pipes or scripts.`, +# Prompt for confirmation if running in an interactive terminal; otherwise, the option is ignored. +cscli appsec-configs install crowdsecurity/virtual-patching -i +cscli appsec-configs install crowdsecurity/virtual-patching --interactive`, }, removeHelp: cliHelp{ example: `# Uninstall some appsec-configs. @@ -64,10 +63,9 @@ cscli appsec-configs remove crowdsecurity/virtual-patching --purge # Remove tainted items. cscli appsec-configs remove crowdsecurity/virtual-patching --force -# Proceed without prompting. -cscli appsec-configs remove crowdsecurity/virtual-patching --yes - -# The "--yes" parameter is implied when the command is not connected to a terminal, like pipes or scripts.`, +# Prompt for confirmation if running in an interactive terminal; otherwise, the option is ignored. +cscli appsec-configs remove crowdsecurity/virtual-patching -i +cscli appsec-configs remove crowdsecurity/virtual-patching --interactive`, }, upgradeHelp: cliHelp{ example: `# Upgrade some appsec-configs. If they are not currently installed, they are downloaded but not installed. @@ -82,10 +80,9 @@ cscli appsec-configs upgrade crowdsecurity/virtual-patching --dry-run -o raw # Upgrade over tainted items. Can be used to restore or repair after local modifications or missing dependencies. cscli appsec-configs upgrade crowdsecurity/virtual-patching --force -# Proceed without prompting. -cscli appsec-configs upgrade crowdsecurity/virtual-patching --yes - -# The "--yes" parameter is implied when the command is not connected to a terminal, like pipes or scripts.`, +# Prompt for confirmation if running in an interactive terminal; otherwise, the option is ignored. +cscli appsec-configs upgrade crowdsecurity/virtual-patching -i +cscli appsec-configs upgrade crowdsecurity/virtual-patching --interactive`, }, inspectHelp: cliHelp{ example: `# Display metadata, state, metrics and ancestor collections of appsec-configs (installed or not). @@ -183,10 +180,9 @@ cscli appsec-rules install crowdsecurity/crs --download-only # Install over tainted items. Can be used to restore or repair after local modifications or missing dependencies. cscli appsec-rules install crowdsecurity/crs --force -# Proceed without prompting. -cscli appsec-rules install crowdsecurity/crs --yes - -# The "--yes" parameter is implied when the command is not connected to a terminal, like pipes or scripts.`, +# Prompt for confirmation if running in an interactive terminal; otherwise, the option is ignored. +cscli appsec-rules install crowdsecurity/crs -i +cscli appsec-rules install crowdsecurity/crs --interactive`, }, removeHelp: cliHelp{ example: `# Uninstall some appsec-rules. @@ -204,10 +200,9 @@ cscli appsec-rules remove crowdsecurity/crs --purge # Remove tainted items. cscli appsec-rules remove crowdsecurity/crs --force -# Proceed without prompting. -cscli appsec-rules remove crowdsecurity/crs --yes - -# The "--yes" parameter is implied when the command is not connected to a terminal, like pipes or scripts.`, +# Prompt for confirmation if running in an interactive terminal; otherwise, the option is ignored. +cscli appsec-rules remove crowdsecurity/crs -i +cscli appsec-rules remove crowdsecurity/crs --interactive`, }, upgradeHelp: cliHelp{ example: `# Upgrade some appsec-rules. If they are not currently installed, they are downloaded but not installed. @@ -222,10 +217,9 @@ cscli appsec-rules upgrade crowdsecurity/crs --dry-run -o raw # Upgrade over tainted items. Can be used to restore or repair after local modifications or missing dependencies. cscli appsec-rules upgrade crowdsecurity/crs --force -# Proceed without prompting. -cscli appsec-rules upgrade crowdsecurity/crs --yes - -# The "--yes" parameter is implied when the command is not connected to a terminal, like pipes or scripts.`, +# Prompt for confirmation if running in an interactive terminal; otherwise, the option is ignored. +cscli appsec-rules upgrade crowdsecurity/crs -i +cscli appsec-rules upgrade crowdsecurity/crs --interactive`, }, inspectHelp: cliHelp{ example: `# Display metadata, state, metrics and ancestor collections of appsec-rules (installed or not). diff --git a/cmd/crowdsec-cli/cliitem/hubcollection.go b/cmd/crowdsec-cli/cliitem/hubcollection.go index b45f956e0ac..c0e590ce5dd 100644 --- a/cmd/crowdsec-cli/cliitem/hubcollection.go +++ b/cmd/crowdsec-cli/cliitem/hubcollection.go @@ -34,10 +34,9 @@ cscli collections install crowdsecurity/http-cve crowdsecurity/iptables --downlo # Install over tainted items. Can be used to restore or repair after local modifications or missing dependencies. cscli collections install crowdsecurity/http-cve crowdsecurity/iptables --force -# Proceed without prompting. -cscli collections install crowdsecurity/http-cve crowdsecurity/iptables --yes - -# The "--yes" parameter is implied when the command is not connected to a terminal, like pipes or scripts.`, +# Prompt for confirmation if running in an interactive terminal; otherwise, the option is ignored. +cscli collections install crowdsecurity/http-cve crowdsecurity/iptables -i +cscli collections install crowdsecurity/http-cve crowdsecurity/iptables --interactive`, }, removeHelp: cliHelp{ example: `# Uninstall some collections. @@ -55,10 +54,9 @@ cscli collections remove crowdsecurity/http-cve crowdsecurity/iptables --purge # Remove tainted items. cscli collections remove crowdsecurity/http-cve crowdsecurity/iptables --force -# Proceed without prompting. -cscli collections remove crowdsecurity/http-cve crowdsecurity/iptables --yes - -# The "--yes" parameter is implied when the command is not connected to a terminal, like pipes or scripts.`, +# Prompt for confirmation if running in an interactive terminal; otherwise, the option is ignored. +cscli collections remove crowdsecurity/http-cve crowdsecurity/iptables -i +cscli collections remove crowdsecurity/http-cve crowdsecurity/iptables --interactive`, }, upgradeHelp: cliHelp{ example: `# Upgrade some collections. If they are not currently installed, they are downloaded but not installed. @@ -73,10 +71,9 @@ cscli collections upgrade crowdsecurity/http-cve crowdsecurity/iptables --dry-ru # Upgrade over tainted items. Can be used to restore or repair after local modifications or missing dependencies. cscli collections upgrade crowdsecurity/http-cve crowdsecurity/iptables --force -# Proceed without prompting. -cscli collections upgrade crowdsecurity/http-cve crowdsecurity/iptables --yes - -# The "--yes" parameter is implied when the command is not connected to a terminal, like pipes or scripts.`, +# Prompt for confirmation if running in an interactive terminal; otherwise, the option is ignored. +cscli collections upgrade crowdsecurity/http-cve crowdsecurity/iptables -i +cscli collections upgrade crowdsecurity/http-cve crowdsecurity/iptables --interactive`, }, inspectHelp: cliHelp{ example: `# Display metadata, state, metrics and dependencies of collections (installed or not). diff --git a/cmd/crowdsec-cli/cliitem/hubcontext.go b/cmd/crowdsec-cli/cliitem/hubcontext.go index 3a94687843d..167cca37054 100644 --- a/cmd/crowdsec-cli/cliitem/hubcontext.go +++ b/cmd/crowdsec-cli/cliitem/hubcontext.go @@ -34,10 +34,9 @@ cscli contexts install crowdsecurity/bf_base crowdsecurity/fortinet --download-o # Install over tainted items. Can be used to restore or repair after local modifications or missing dependencies. cscli contexts install crowdsecurity/bf_base crowdsecurity/fortinet --force -# Proceed without prompting. -cscli contexts install crowdsecurity/bf_base crowdsecurity/fortinet --yes - -# The "--yes" parameter is implied when the command is not connected to a terminal, like pipes or scripts.`, +# Prompt for confirmation if running in an interactive terminal; otherwise, the option is ignored. +cscli contexts install crowdsecurity/bf_base crowdsecurity/fortinet -i +cscli contexts install crowdsecurity/bf_base crowdsecurity/fortinet --interactive`, }, removeHelp: cliHelp{ example: `# Uninstall some contexts. @@ -55,10 +54,9 @@ cscli contexts remove crowdsecurity/bf_base crowdsecurity/fortinet --purge # Remove tainted items. cscli contexts remove crowdsecurity/bf_base crowdsecurity/fortinet --force -# Proceed without prompting. -cscli contexts remove crowdsecurity/bf_base crowdsecurity/fortinet --yes - -# The "--yes" parameter is implied when the command is not connected to a terminal, like pipes or scripts.`, +# Prompt for confirmation if running in an interactive terminal; otherwise, the option is ignored. +cscli contexts remove crowdsecurity/bf_base crowdsecurity/fortinet -i +cscli contexts remove crowdsecurity/bf_base crowdsecurity/fortinet --interactive`, }, upgradeHelp: cliHelp{ example: `# Upgrade some contexts. If they are not currently installed, they are downloaded but not installed. @@ -73,10 +71,9 @@ cscli contexts upgrade crowdsecurity/bf_base crowdsecurity/fortinet --dry-run -o # Upgrade over tainted items. Can be used to restore or repair after local modifications or missing dependencies. cscli contexts upgrade crowdsecurity/bf_base crowdsecurity/fortinet --force -# Proceed without prompting. -cscli contexts upgrade crowdsecurity/bf_base crowdsecurity/fortinet --yes - -# The "--yes" parameter is implied when the command is not connected to a terminal, like pipes or scripts.`, +# Prompt for confirmation if running in an interactive terminal; otherwise, the option is ignored. +cscli contexts upgrade crowdsecurity/bf_base crowdsecurity/fortinet -i +cscli contexts upgrade crowdsecurity/bf_base crowdsecurity/fortinet --interactive`, }, inspectHelp: cliHelp{ example: `# Display metadata, state and ancestor collections of contexts (installed or not). diff --git a/cmd/crowdsec-cli/cliitem/hubparser.go b/cmd/crowdsec-cli/cliitem/hubparser.go index 440cb61204f..79491baa705 100644 --- a/cmd/crowdsec-cli/cliitem/hubparser.go +++ b/cmd/crowdsec-cli/cliitem/hubparser.go @@ -34,10 +34,9 @@ cscli parsers install crowdsecurity/caddy-logs crowdsecurity/sshd-logs --downloa # Install over tainted items. Can be used to restore or repair after local modifications or missing dependencies. cscli parsers install crowdsecurity/caddy-logs crowdsecurity/sshd-logs --force -# Proceed without prompting. -cscli parsers install crowdsecurity/caddy-logs crowdsecurity/sshd-logs --yes - -# The "--yes" parameter is implied when the command is not connected to a terminal, like pipes or scripts.`, +# Prompt for confirmation if running in an interactive terminal; otherwise, the option is ignored. +cscli parsers install crowdsecurity/caddy-logs crowdsecurity/sshd-logs -i +cscli parsers install crowdsecurity/caddy-logs crowdsecurity/sshd-logs --interactive`, }, removeHelp: cliHelp{ example: `# Uninstall some parsers. @@ -55,10 +54,9 @@ cscli parsers remove crowdsecurity/caddy-logs crowdsecurity/sshd-logs --purge # Remove tainted items. cscli parsers remove crowdsecurity/caddy-logs crowdsecurity/sshd-logs --force -# Proceed without prompting. -cscli parsers remove crowdsecurity/caddy-logs crowdsecurity/sshd-logs --yes - -# The "--yes" parameter is implied when the command is not connected to a terminal, like pipes or scripts.`, +# Prompt for confirmation if running in an interactive terminal; otherwise, the option is ignored. +cscli parsers remove crowdsecurity/caddy-logs crowdsecurity/sshd-logs -i +cscli parsers remove crowdsecurity/caddy-logs crowdsecurity/sshd-logs --interactive`, }, upgradeHelp: cliHelp{ example: `# Upgrade some parsers. If they are not currently installed, they are downloaded but not installed. @@ -73,10 +71,9 @@ cscli parsers upgrade crowdsecurity/caddy-logs crowdsecurity/sshd-logs --dry-run # Upgrade over tainted items. Can be used to restore or repair after local modifications or missing dependencies. cscli parsers upgrade crowdsecurity/caddy-logs crowdsecurity/sshd-logs --force -# Proceed without prompting. -cscli parsers upgrade crowdsecurity/caddy-logs crowdsecurity/sshd-logs --yes - -# The "--yes" parameter is implied when the command is not connected to a terminal, like pipes or scripts.`, +# Prompt for confirmation if running in an interactive terminal; otherwise, the option is ignored. +cscli parsers upgrade crowdsecurity/caddy-logs crowdsecurity/sshd-logs -i +cscli parsers upgrade crowdsecurity/caddy-logs crowdsecurity/sshd-logs --interactive`, }, inspectHelp: cliHelp{ example: `# Display metadata, state, metrics and ancestor collections of parsers (installed or not). diff --git a/cmd/crowdsec-cli/cliitem/hubpostoverflow.go b/cmd/crowdsec-cli/cliitem/hubpostoverflow.go index cfd5f7c95aa..4d346fd545d 100644 --- a/cmd/crowdsec-cli/cliitem/hubpostoverflow.go +++ b/cmd/crowdsec-cli/cliitem/hubpostoverflow.go @@ -34,10 +34,9 @@ cscli postoverflows install crowdsecurity/cdn-whitelist crowdsecurity/rdns --dow # Install over tainted items. Can be used to restore or repair after local modifications or missing dependencies. cscli postoverflows install crowdsecurity/cdn-whitelist crowdsecurity/rdns --force -# Proceed without prompting. -cscli postoverflows install crowdsecurity/cdn-whitelist crowdsecurity/rdns --yes - -# The "--yes" parameter is implied when the command is not connected to a terminal, like pipes or scripts.`, +# Prompt for confirmation if running in an interactive terminal; otherwise, the option is ignored. +cscli postoverflows install crowdsecurity/cdn-whitelist crowdsecurity/rdns -i +cscli postoverflows install crowdsecurity/cdn-whitelist crowdsecurity/rdns --interactive`, }, removeHelp: cliHelp{ example: `# Uninstall some postoverflows. @@ -55,10 +54,9 @@ cscli postoverflows remove crowdsecurity/cdn-whitelist crowdsecurity/rdns --purg # Remove tainted items. cscli postoverflows remove crowdsecurity/cdn-whitelist crowdsecurity/rdns --force -# Proceed without prompting. -cscli postoverflows remove crowdsecurity/cdn-whitelist crowdsecurity/rdns --yes - -# The "--yes" parameter is implied when the command is not connected to a terminal, like pipes or scripts.`, +# Prompt for confirmation if running in an interactive terminal; otherwise, the option is ignored. +cscli postoverflows remove crowdsecurity/cdn-whitelist crowdsecurity/rdns -i +cscli postoverflows remove crowdsecurity/cdn-whitelist crowdsecurity/rdns --interactive`, }, upgradeHelp: cliHelp{ example: `# Upgrade some postoverflows. If they are not currently installed, they are downloaded but not installed. @@ -73,10 +71,9 @@ cscli postoverflows upgrade crowdsecurity/cdn-whitelist crowdsecurity/rdnss --dr # Upgrade over tainted items. Can be used to restore or repair after local modifications or missing dependencies. cscli postoverflows upgrade crowdsecurity/cdn-whitelist crowdsecurity/rdnss --force -# Proceed without prompting. -cscli postoverflows upgrade crowdsecurity/cdn-whitelist crowdsecurity/rdnss --yes - -# The "--yes" parameter is implied when the command is not connected to a terminal, like pipes or scripts.`, +# Prompt for confirmation if running in an interactive terminal; otherwise, the option is ignored. +cscli postoverflows upgrade crowdsecurity/cdn-whitelist crowdsecurity/rdnss -i +cscli postoverflows upgrade crowdsecurity/cdn-whitelist crowdsecurity/rdnss --interactive`, }, inspectHelp: cliHelp{ example: `# Display metadata, state and ancestor collections of postoverflows (installed or not). diff --git a/cmd/crowdsec-cli/cliitem/hubscenario.go b/cmd/crowdsec-cli/cliitem/hubscenario.go index 5dee3323f6f..ae56e16ccff 100644 --- a/cmd/crowdsec-cli/cliitem/hubscenario.go +++ b/cmd/crowdsec-cli/cliitem/hubscenario.go @@ -34,10 +34,9 @@ cscli scenarios install crowdsecurity/ssh-bf crowdsecurity/http-probing --downlo # Install over tainted items. Can be used to restore or repair after local modifications or missing dependencies. cscli scenarios install crowdsecurity/ssh-bf crowdsecurity/http-probing --force -# Proceed without prompting. -cscli scenarios install crowdsecurity/ssh-bf crowdsecurity/http-probing --yes - -# The "--yes" parameter is implied when the command is not connected to a terminal, like pipes or scripts.`, +# Prompt for confirmation if running in an interactive terminal; otherwise, the option is ignored. +cscli scenarios install crowdsecurity/ssh-bf crowdsecurity/http-probing -i +cscli scenarios install crowdsecurity/ssh-bf crowdsecurity/http-probing --interactive`, }, removeHelp: cliHelp{ example: `# Uninstall some scenarios. @@ -55,10 +54,9 @@ cscli scenarios remove crowdsecurity/ssh-bf crowdsecurity/http-probing --purge # Remove tainted items. cscli scenarios remove crowdsecurity/ssh-bf crowdsecurity/http-probing --force -# Proceed without prompting. -cscli scenarios remove crowdsecurity/ssh-bf crowdsecurity/http-probing --yes - -# The "--yes" parameter is implied when the command is not connected to a terminal, like pipes or scripts.`, +# Prompt for confirmation if running in an interactive terminal; otherwise, the option is ignored. +cscli scenarios remove crowdsecurity/ssh-bf crowdsecurity/http-probing -i +cscli scenarios remove crowdsecurity/ssh-bf crowdsecurity/http-probing --interactive`, }, upgradeHelp: cliHelp{ example: `# Upgrade some scenarios. If they are not currently installed, they are downloaded but not installed. @@ -73,10 +71,9 @@ cscli scenarios upgrade crowdsecurity/ssh-bf crowdsecurity/http-probing --dry-ru # Upgrade over tainted items. Can be used to restore or repair after local modifications or missing dependencies. cscli scenarios upgrade crowdsecurity/ssh-bf crowdsecurity/http-probing --force -# Proceed without prompting. -cscli scenarios upgrade crowdsecurity/ssh-bf crowdsecurity/http-probing --yes - -# The "--yes" parameter is implied when the command is not connected to a terminal, like pipes or scripts.`, +# Prompt for confirmation if running in an interactive terminal; otherwise, the option is ignored. +cscli scenarios upgrade crowdsecurity/ssh-bf crowdsecurity/http-probing -i +cscli scenarios upgrade crowdsecurity/ssh-bf crowdsecurity/http-probing --interactive`, }, inspectHelp: cliHelp{ example: `# Display metadata, state, metrics and ancestor collections of scenarios (installed or not). diff --git a/go.mod b/go.mod index ed406e4aedc..58ee77586aa 100644 --- a/go.mod +++ b/go.mod @@ -1,6 +1,6 @@ module github.com/crowdsecurity/crowdsec -go 1.23.5 +go 1.23.6 // Don't use the toolchain directive to avoid uncontrolled downloads during // a build, especially in sandboxed environments (freebsd, gentoo...). diff --git a/pkg/hubops/plan.go b/pkg/hubops/plan.go index b87145d9583..6fb11da7215 100644 --- a/pkg/hubops/plan.go +++ b/pkg/hubops/plan.go @@ -192,6 +192,7 @@ func (p *ActionPlan) compactDescription() string { } func (p *ActionPlan) Confirm(verbose bool) (bool, error) { + // user provided an --interactive flag, but we go with the defaults if it's not a tty if !isatty.IsTerminal(os.Stdout.Fd()) && !isatty.IsCygwinTerminal(os.Stdout.Fd()) { return true, nil } @@ -205,11 +206,13 @@ func (p *ActionPlan) Confirm(verbose bool) (bool, error) { Default: true, } - // in case of EOF, it's likely been closed by the package manager (freebsd?), ignore it + // in case of EOF, it's likely stdin has been closed in a script or package manager, + // we can't do anything but go with the default if err := survey.AskOne(prompt, &answer); err != nil { if errors.Is(err, io.EOF) { return prompt.Default, nil } + return false, err } @@ -218,9 +221,7 @@ func (p *ActionPlan) Confirm(verbose bool) (bool, error) { return answer, nil } -func (p *ActionPlan) Execute(ctx context.Context, confirm bool, dryRun bool, verbose bool) error { - var err error - +func (p *ActionPlan) Execute(ctx context.Context, interactive bool, dryRun bool, verbose bool) error { if len(p.commands) == 0 { // XXX: show skipped commands, warnings? fmt.Println("Nothing to do.") @@ -234,16 +235,16 @@ func (p *ActionPlan) Execute(ctx context.Context, confirm bool, dryRun bool, ver return nil } - if !confirm { - confirm, err = p.Confirm(verbose) + if interactive { + answer, err := p.Confirm(verbose) if err != nil { return err } - } - if !confirm { - fmt.Println("Operation canceled.") - return nil + if !answer { + fmt.Println("Operation canceled.") + return nil + } } for _, c := range p.commands { diff --git a/test/bin/remove-all-hub-items b/test/bin/remove-all-hub-items index b5d611782ff..c68bf6e586d 100755 --- a/test/bin/remove-all-hub-items +++ b/test/bin/remove-all-hub-items @@ -14,7 +14,7 @@ echo "Pre-downloading Hub content..." types=$("$CSCLI" hub types -o raw) for itemtype in $types; do - "$CSCLI" "$itemtype" remove --all --force --purge --yes + "$CSCLI" "$itemtype" remove --all --force --purge done echo " done." diff --git a/test/lib/config/config-local b/test/lib/config/config-local index 4f3ec7cc2ae..3e3c806b616 100755 --- a/test/lib/config/config-local +++ b/test/lib/config/config-local @@ -117,7 +117,7 @@ make_init_data() { "$CSCLI" --warning hub update --with-content # preload some content and data files - "$CSCLI" collections install crowdsecurity/linux --download-only --yes + "$CSCLI" collections install crowdsecurity/linux --download-only # sub-items did not respect --download-only ./bin/remove-all-hub-items From a9cc23e446221540fd5c33bf9642ef533db991fc Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Fri, 7 Feb 2025 10:52:14 +0100 Subject: [PATCH 423/581] cscli: exclude removed commands from generated docs (#3449) --- cmd/crowdsec-cli/cliconfig/backup.go | 2 ++ cmd/crowdsec-cli/cliconfig/restore.go | 4 +++- 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/cmd/crowdsec-cli/cliconfig/backup.go b/cmd/crowdsec-cli/cliconfig/backup.go index 5cd34fcf07f..ef326fd6886 100644 --- a/cmd/crowdsec-cli/cliconfig/backup.go +++ b/cmd/crowdsec-cli/cliconfig/backup.go @@ -9,11 +9,13 @@ import ( func (cli *cliConfig) newBackupCmd() *cobra.Command { cmd := &cobra.Command{ Use: "backup", + Short: "This command has been removed. You can backup/restore the configuration by other means.", DisableAutoGenTag: true, RunE: func(_ *cobra.Command, _ []string) error { configDir := cli.cfg().ConfigPaths.ConfigDir return fmt.Errorf("'cscli config backup' has been removed, you can manually backup/restore %s instead", configDir) }, + Hidden: true, } return cmd diff --git a/cmd/crowdsec-cli/cliconfig/restore.go b/cmd/crowdsec-cli/cliconfig/restore.go index d368b27ea30..adb3ec2d3aa 100644 --- a/cmd/crowdsec-cli/cliconfig/restore.go +++ b/cmd/crowdsec-cli/cliconfig/restore.go @@ -9,11 +9,13 @@ import ( func (cli *cliConfig) newRestoreCmd() *cobra.Command { cmd := &cobra.Command{ Use: "restore", + Short: "This command has been removed. You can backup/restore the configuration by other means.", DisableAutoGenTag: true, - RunE: func(cmd *cobra.Command, _ []string) error { + RunE: func(_ *cobra.Command, _ []string) error { configDir := cli.cfg().ConfigPaths.ConfigDir return fmt.Errorf("'cscli config restore' has been removed, you can manually backup/restore %s instead", configDir) }, + Hidden: true, } return cmd From 037bac86ad4ec19f34cb38dd9fc57d8ed7cb8313 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Fri, 7 Feb 2025 12:14:13 +0100 Subject: [PATCH 424/581] wizard.sh: remove obsolete --yes option (#3450) --- wizard.sh | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) diff --git a/wizard.sh b/wizard.sh index 2d3260fc22f..bb03af5a9ac 100755 --- a/wizard.sh +++ b/wizard.sh @@ -254,26 +254,22 @@ install_collection() { fi done - local YES="" - if [[ ${SILENT} == "false" ]]; then COLLECTION_TO_INSTALL=($(whiptail --separate-output --ok-button Continue --title "Crowdsec collections" --checklist "Available collections in crowdsec, try to pick one that fits your profile. Collections contains parsers and scenarios to protect your system." 20 120 10 "${HMENU[@]}" 3>&1 1>&2 2>&3)) if [ $? -eq 1 ]; then log_err "user bailed out at collection selection" exit 1; fi; - else - YES="--yes" fi; for collection in "${COLLECTION_TO_INSTALL[@]}"; do log_info "Installing collection '${collection}'" # shellcheck disable=SC2248 - ${CSCLI_BIN_INSTALLED} collections install "${collection}" --error ${YES} + ${CSCLI_BIN_INSTALLED} collections install "${collection}" --error done # shellcheck disable=SC2248 - ${CSCLI_BIN_INSTALLED} parsers install "crowdsecurity/whitelists" --error ${YES} + ${CSCLI_BIN_INSTALLED} parsers install "crowdsecurity/whitelists" --error if [[ ${SILENT} == "false" ]]; then whiptail --msgbox "Out of safety, I installed a parser called 'crowdsecurity/whitelists'. This one will prevent private IP addresses from being banned, feel free to remove it any time." 20 50 fi From 9f2d6425126828b19232201928b49d5ed9a8306e Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Sat, 8 Feb 2025 23:47:57 +0100 Subject: [PATCH 425/581] cscli: don't attempt to download data files when url="" (#3454) --- pkg/hubops/download.go | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/pkg/hubops/download.go b/pkg/hubops/download.go index 72aed542115..552fddc775c 100644 --- a/pkg/hubops/download.go +++ b/pkg/hubops/download.go @@ -114,6 +114,10 @@ func downloadDataSet(ctx context.Context, dataFolder string, force bool, reader } for _, dataS := range data.Data { + if dataS.SourceURL == "" { + continue + } + // XXX: check context cancellation destPath, err := cwhub.SafePath(dataFolder, dataS.DestPath) if err != nil { From c5e0003b5923654b755051c49763b1a7eebef15e Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Mon, 10 Feb 2025 11:24:59 +0100 Subject: [PATCH 426/581] silence "cscli hub update" if noop in cron jobs (#3460) --- pkg/cwhub/hub.go | 4 +++- test/bats/20_hub.bats | 3 ++- 2 files changed, 5 insertions(+), 2 deletions(-) diff --git a/pkg/cwhub/hub.go b/pkg/cwhub/hub.go index aeccb3268f7..2d432df67c3 100644 --- a/pkg/cwhub/hub.go +++ b/pkg/cwhub/hub.go @@ -165,7 +165,9 @@ func (h *Hub) Update(ctx context.Context, indexProvider IndexProvider, withConte } if !downloaded { - fmt.Println("Nothing to do, the hub index is up to date.") + // use logger and the message will be silenced in the cron job + // (no mail if nothing happened) + h.logger.Info("Nothing to do, the hub index is up to date.") } return nil diff --git a/test/bats/20_hub.bats b/test/bats/20_hub.bats index b03b58732fa..07b8be626df 100644 --- a/test/bats/20_hub.bats +++ b/test/bats/20_hub.bats @@ -109,7 +109,8 @@ teardown() { rune -0 cscli hub update assert_output "Downloading $INDEX_PATH" rune -0 cscli hub update - assert_output "Nothing to do, the hub index is up to date." + refute_output + assert_stderr 'level=info msg="Nothing to do, the hub index is up to date."' } @test "cscli hub upgrade (up to date)" { From b7d0ccc95ac0a99faf23c88835a24cd559176058 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Mon, 10 Feb 2025 13:32:41 +0100 Subject: [PATCH 427/581] CI: skip unit tests with dynamic build (#3461) --- .github/workflows/go-tests.yml | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/.github/workflows/go-tests.yml b/.github/workflows/go-tests.yml index 5a8148c473e..8629f58cf61 100644 --- a/.github/workflows/go-tests.yml +++ b/.github/workflows/go-tests.yml @@ -184,11 +184,9 @@ jobs: run: | make build BUILD_PROFILE=minimal - - name: Run tests again, dynamic + - name: Ensure we can do a dynamic build, without tests run: | make clean build - set -o pipefail - make go-acc | sed 's/ *coverage:.*of statements in.*//' | richgo testfilter - name: Upload unit coverage to Codecov uses: codecov/codecov-action@v4 From ae9da48d039c1ba979673499e0fdcedb6e9ae237 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Mon, 17 Feb 2025 10:16:09 +0100 Subject: [PATCH 428/581] deps: update gin-jwt (#3430) --- go.mod | 25 +++++++++++++------------ go.sum | 59 +++++++++++++++++++++++++++------------------------------- 2 files changed, 40 insertions(+), 44 deletions(-) diff --git a/go.mod b/go.mod index 58ee77586aa..0d3293664a3 100644 --- a/go.mod +++ b/go.mod @@ -14,7 +14,7 @@ require ( github.com/Microsoft/go-winio v0.6.2 // indirect github.com/agext/levenshtein v1.2.3 github.com/alexliesenfeld/health v0.8.0 - github.com/appleboy/gin-jwt/v2 v2.9.2 + github.com/appleboy/gin-jwt/v2 v2.10.1 github.com/aws/aws-lambda-go v1.47.0 github.com/aws/aws-sdk-go v1.52.0 github.com/beevik/etree v1.4.1 @@ -41,7 +41,7 @@ require ( github.com/expr-lang/expr v1.16.9 github.com/fatih/color v1.16.0 github.com/fsnotify/fsnotify v1.7.0 - github.com/gin-gonic/gin v1.9.1 + github.com/gin-gonic/gin v1.10.0 github.com/go-co-op/gocron v1.37.0 github.com/go-openapi/errors v0.20.1 github.com/go-openapi/strfmt v0.19.11 @@ -128,13 +128,14 @@ require ( github.com/apparentlymart/go-textseg/v13 v13.0.0 // indirect github.com/asaskevich/govalidator v0.0.0-20200907205600-7a23bdc65eef // indirect github.com/beorn7/perks v1.0.1 // indirect - github.com/bytedance/sonic v1.10.2 // indirect - github.com/chenzhuoyu/base64x v0.0.0-20230717121745-296ad89f973d // indirect - github.com/chenzhuoyu/iasm v0.9.1 // indirect + github.com/bytedance/sonic v1.12.6 // indirect + github.com/bytedance/sonic/loader v0.2.1 // indirect + github.com/cloudwego/base64x v0.1.4 // indirect + github.com/cloudwego/iasm v0.2.0 // indirect github.com/corazawaf/coraza-coreruleset v0.0.0-20240226094324-415b1017abdc // indirect github.com/cpuguy83/go-md2man/v2 v2.0.4 // indirect github.com/felixge/httpsnoop v1.0.4 // indirect - github.com/gabriel-vasile/mimetype v1.4.3 // indirect + github.com/gabriel-vasile/mimetype v1.4.7 // indirect github.com/gin-contrib/sse v0.1.0 // indirect github.com/go-logr/logr v1.4.2 // indirect github.com/go-logr/stdr v1.2.2 // indirect @@ -148,9 +149,9 @@ require ( github.com/go-openapi/spec v0.20.0 // indirect github.com/go-playground/locales v0.14.1 // indirect github.com/go-playground/universal-translator v0.18.1 // indirect - github.com/go-playground/validator/v10 v10.17.0 // indirect + github.com/go-playground/validator/v10 v10.23.0 // indirect github.com/go-stack/stack v1.8.0 // indirect - github.com/goccy/go-json v0.10.2 // indirect + github.com/goccy/go-json v0.10.4 // indirect github.com/golang/glog v1.2.2 // indirect github.com/google/gofuzz v1.2.0 // indirect github.com/hashicorp/hcl/v2 v2.13.0 // indirect @@ -170,8 +171,8 @@ require ( github.com/josharian/intern v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 // indirect - github.com/klauspost/cpuid/v2 v2.2.6 // indirect - github.com/leodido/go-urn v1.3.0 // indirect + github.com/klauspost/cpuid/v2 v2.2.9 // indirect + github.com/leodido/go-urn v1.4.0 // indirect github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 // indirect github.com/magefile/mage v1.15.1-0.20241126214340-bdc92f694516 // indirect github.com/mailru/easyjson v0.7.7 // indirect @@ -186,7 +187,7 @@ require ( github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect github.com/oklog/run v1.0.0 // indirect - github.com/pelletier/go-toml/v2 v2.1.1 // indirect + github.com/pelletier/go-toml/v2 v2.2.3 // indirect github.com/petar-dambovaliev/aho-corasick v0.0.0-20240411101913-e07a1f0e8eb4 // indirect github.com/pierrec/lz4/v4 v4.1.18 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect @@ -218,7 +219,7 @@ require ( go.mongodb.org/mongo-driver v1.9.4 // indirect go.opentelemetry.io/otel/metric v1.28.0 // indirect go.uber.org/atomic v1.10.0 // indirect - golang.org/x/arch v0.7.0 // indirect + golang.org/x/arch v0.12.0 // indirect golang.org/x/term v0.28.0 // indirect golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 // indirect google.golang.org/appengine v1.6.8 // indirect diff --git a/go.sum b/go.sum index e9873f0d46f..e4c35f463e1 100644 --- a/go.sum +++ b/go.sum @@ -41,8 +41,8 @@ github.com/alexliesenfeld/health v0.8.0/go.mod h1:TfNP0f+9WQVWMQRzvMUjlws4ceXKEL github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883/go.mod h1:rCTlJbsFo29Kk6CurOXKm700vrz8f0KW0JNfpkRJY/8= github.com/apparentlymart/go-textseg/v13 v13.0.0 h1:Y+KvPE1NYz0xl601PVImeQfFyEy6iT90AvPUL1NNfNw= github.com/apparentlymart/go-textseg/v13 v13.0.0/go.mod h1:ZK2fH7c4NqDTLtiYLvIkEghdlcqw7yxLeM89kiTRPUo= -github.com/appleboy/gin-jwt/v2 v2.9.2 h1:GeS3lm9mb9HMmj7+GNjYUtpp3V1DAQ1TkUFa5poiZ7Y= -github.com/appleboy/gin-jwt/v2 v2.9.2/go.mod h1:mxGjKt9Lrx9Xusy1SrnmsCJMZG6UJwmdHN9bN27/QDw= +github.com/appleboy/gin-jwt/v2 v2.10.1 h1:I68+9qGsgHDx8omd65MKhYXF7Qz5LtdFFTsB/kSU4z0= +github.com/appleboy/gin-jwt/v2 v2.10.1/go.mod h1:xuzn4aNUwqwR3+j+jbL6MhryiRKinUL1SJ7WUfB33vU= github.com/appleboy/gofight/v2 v2.1.2 h1:VOy3jow4vIK8BRQJoC/I9muxyYlJ2yb9ht2hZoS3rf4= github.com/appleboy/gofight/v2 v2.1.2/go.mod h1:frW+U1QZEdDgixycTj4CygQ48yLTUhplt43+Wczp3rw= github.com/asaskevich/govalidator v0.0.0-20180720115003-f9ffefc3facf/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= @@ -68,23 +68,21 @@ github.com/bluele/gcache v0.0.2 h1:WcbfdXICg7G/DGBh1PFfcirkWOQV+v077yF1pSy3DGw= github.com/bluele/gcache v0.0.2/go.mod h1:m15KV+ECjptwSPxKhOhQoAFQVtUFjTVkc3H8o0t/fp0= github.com/buger/jsonparser v1.1.1 h1:2PnMjfWD7wBILjqQbt530v576A/cAbQvEW9gGIpYMUs= github.com/buger/jsonparser v1.1.1/go.mod h1:6RYKKt7H4d4+iWqouImQ9R2FZql3VbhNgx27UK13J/0= -github.com/bytedance/sonic v1.5.0/go.mod h1:ED5hyg4y6t3/9Ku1R6dU/4KyJ48DZ4jPhfY1O2AihPM= -github.com/bytedance/sonic v1.10.0-rc/go.mod h1:ElCzW+ufi8qKqNW0FY314xriJhyJhuoJ3gFZdAHF7NM= -github.com/bytedance/sonic v1.10.2 h1:GQebETVBxYB7JGWJtLBi07OVzWwt+8dWA00gEVW2ZFE= -github.com/bytedance/sonic v1.10.2/go.mod h1:iZcSUejdk5aukTND/Eu/ivjQuEL0Cu9/rf50Hi0u/g4= +github.com/bytedance/sonic v1.12.6 h1:/isNmCUF2x3Sh8RAp/4mh4ZGkcFAX/hLrzrK3AvpRzk= +github.com/bytedance/sonic v1.12.6/go.mod h1:B8Gt/XvtZ3Fqj+iSKMypzymZxw/FVwgIGKzMzT9r/rk= +github.com/bytedance/sonic/loader v0.1.1/go.mod h1:ncP89zfokxS5LZrJxl5z0UJcsk4M4yY2JpfqGeCtNLU= +github.com/bytedance/sonic/loader v0.2.1 h1:1GgorWTqf12TA8mma4DDSbaQigE2wOgQo7iCjjJv3+E= +github.com/bytedance/sonic/loader v0.2.1/go.mod h1:ncP89zfokxS5LZrJxl5z0UJcsk4M4yY2JpfqGeCtNLU= github.com/c-robinson/iplib v1.0.8 h1:exDRViDyL9UBLcfmlxxkY5odWX5092nPsQIykHXhIn4= github.com/c-robinson/iplib v1.0.8/go.mod h1:i3LuuFL1hRT5gFpBRnEydzw8R6yhGkF4szNDIbF8pgo= github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8= github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/chenzhuoyu/base64x v0.0.0-20211019084208-fb5309c8db06/go.mod h1:DH46F32mSOjUmXrMHnKwZdA8wcEefY7UVqBKYGjpdQY= -github.com/chenzhuoyu/base64x v0.0.0-20221115062448-fe3a3abad311/go.mod h1:b583jCggY9gE99b6G5LEC39OIiVsWj+R97kbl5odCEk= -github.com/chenzhuoyu/base64x v0.0.0-20230717121745-296ad89f973d h1:77cEq6EriyTZ0g/qfRdp61a3Uu/AWrgIq2s0ClJV1g0= -github.com/chenzhuoyu/base64x v0.0.0-20230717121745-296ad89f973d/go.mod h1:8EPpVsBuRksnlj1mLy4AWzRNQYxauNi62uWcE3to6eA= -github.com/chenzhuoyu/iasm v0.9.0/go.mod h1:Xjy2NpN3h7aUqeqM+woSuuvxmIe6+DDsiNLIrkAmYog= -github.com/chenzhuoyu/iasm v0.9.1 h1:tUHQJXo3NhBqw6s33wkGn9SP3bvrWLdlVIJ3hQBL7P0= -github.com/chenzhuoyu/iasm v0.9.1/go.mod h1:Xjy2NpN3h7aUqeqM+woSuuvxmIe6+DDsiNLIrkAmYog= +github.com/cloudwego/base64x v0.1.4 h1:jwCgWpFanWmN8xoIUHa2rtzmkd5J2plF/dnLS6Xd/0Y= +github.com/cloudwego/base64x v0.1.4/go.mod h1:0zlkT4Wn5C6NdauXdJRhSKRlJvmclQ1hhJgA0rcu/8w= +github.com/cloudwego/iasm v0.2.0 h1:1KNIy1I1H9hNNFEEH3DVnI4UujN+1zjpuk6gwHLTssg= +github.com/cloudwego/iasm v0.2.0/go.mod h1:8rXZaNYT2n95jn+zTI1sDr+IgcD2GVs0nlbbQPiEFhY= github.com/cockroachdb/apd v1.1.0 h1:3LFP3629v+1aKXU5Q37mxmRxX/pIu1nijXydLShEq5I= github.com/cockroachdb/apd v1.1.0/go.mod h1:8Sl8LxpKi29FqWXR16WEFZRNSz3SoPzUzeMeY4+DwBQ= github.com/containerd/log v0.1.0 h1:TCJt7ioM2cr/tfR8GPbGf9/VRAX8D2B4PjzCpfX540I= @@ -145,12 +143,12 @@ github.com/foxcpp/go-mockdns v1.1.0/go.mod h1:IhLeSFGed3mJIAXPH2aiRQB+kqz7oqu8ld github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= -github.com/gabriel-vasile/mimetype v1.4.3 h1:in2uUcidCuFcDKtdcBxlR0rJ1+fsokWf+uqxgUFjbI0= -github.com/gabriel-vasile/mimetype v1.4.3/go.mod h1:d8uq/6HKRL6CGdk+aubisF/M5GcPfT7nKyLpA0lbSSk= +github.com/gabriel-vasile/mimetype v1.4.7 h1:SKFKl7kD0RiPdbht0s7hFtjl489WcQ1VyPW8ZzUMYCA= +github.com/gabriel-vasile/mimetype v1.4.7/go.mod h1:GDlAgAyIRT27BhFl53XNAFtfjzOkLaF35JdEG0P7LtU= github.com/gin-contrib/sse v0.1.0 h1:Y/yl/+YNO8GZSjAhjMsSuLt29uWRFHdHYUb5lYOV9qE= github.com/gin-contrib/sse v0.1.0/go.mod h1:RHrZQHXnP2xjPF+u1gW/2HnVO7nvIa9PG3Gm+fLHvGI= -github.com/gin-gonic/gin v1.9.1 h1:4idEAncQnU5cB7BeOkPtxjfCSye0AAm1R0RVIqJ+Jmg= -github.com/gin-gonic/gin v1.9.1/go.mod h1:hPrL7YrpYKXt5YId3A/Tnip5kqbEAP+KLuI3SUcPTeU= +github.com/gin-gonic/gin v1.10.0 h1:nTuyha1TYqgedzytsKYqna+DfLos46nTv2ygFy86HFU= +github.com/gin-gonic/gin v1.10.0/go.mod h1:4PMNQiOhvDRa013RKVbsiNwoyezlm2rm0uX/T7kzp5Y= github.com/globalsign/mgo v0.0.0-20180905125535-1ca0a4f7cbcb/go.mod h1:xkRDCp4j0OGD1HRkm4kmhM+pmpv3AKq5SU7GMg4oO/Q= github.com/globalsign/mgo v0.0.0-20181015135952-eeefdecb41b8/go.mod h1:xkRDCp4j0OGD1HRkm4kmhM+pmpv3AKq5SU7GMg4oO/Q= github.com/go-co-op/gocron v1.37.0 h1:ZYDJGtQ4OMhTLKOKMIch+/CY70Brbb1dGdooLEhh7b0= @@ -262,8 +260,8 @@ github.com/go-playground/locales v0.14.1 h1:EWaQ/wswjilfKLTECiXz7Rh+3BjFhfDFKv/o github.com/go-playground/locales v0.14.1/go.mod h1:hxrqLVvrK65+Rwrd5Fc6F2O76J/NuW9t0sjnWqG1slY= github.com/go-playground/universal-translator v0.18.1 h1:Bcnm0ZwsGyWbCzImXv+pAJnYK9S473LQFuzCbDbfSFY= github.com/go-playground/universal-translator v0.18.1/go.mod h1:xekY+UJKNuX9WP91TpwSH2VMlDf28Uj24BCp08ZFTUY= -github.com/go-playground/validator/v10 v10.17.0 h1:SmVVlfAOtlZncTxRuinDPomC2DkXJ4E5T9gDA0AIH74= -github.com/go-playground/validator/v10 v10.17.0/go.mod h1:9iXMNT7sEkjXb0I+enO7QXmzG6QCsPWY4zveKFVRSyU= +github.com/go-playground/validator/v10 v10.23.0 h1:/PwmTwZhS0dPkav3cdK9kV1FsAmrL8sThn8IHr/sO+o= +github.com/go-playground/validator/v10 v10.23.0/go.mod h1:dbuPbCMFw/DrkbEynArYaCwl3amGuJotoKCe95atGMM= github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= github.com/go-sql-driver/mysql v1.6.0 h1:BCTh4TKNUYmOmMUcQ3IipzF5prigylS7XXjEkfCHuOE= github.com/go-sql-driver/mysql v1.6.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= @@ -295,8 +293,8 @@ github.com/gobuffalo/packd v0.1.0/go.mod h1:M2Juc+hhDXf/PnmBANFCqx4DM3wRbgDvnVWe github.com/gobuffalo/packr/v2 v2.0.9/go.mod h1:emmyGweYTm6Kdper+iywB6YK5YzuKchGtJQZ0Odn4pQ= github.com/gobuffalo/packr/v2 v2.2.0/go.mod h1:CaAwI0GPIAv+5wKLtv8Afwl+Cm78K/I/VCm/3ptBN+0= github.com/gobuffalo/syncx v0.0.0-20190224160051-33c29581e754/go.mod h1:HhnNqWY95UYwwW3uSASeV7vtgYkT2t16hJgV3AEPUpw= -github.com/goccy/go-json v0.10.2 h1:CrxCmQqYDkv1z7lO7Wbh2HN93uovUHgrECaO5ZrCXAU= -github.com/goccy/go-json v0.10.2/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I= +github.com/goccy/go-json v0.10.4 h1:JSwxQzIqKfmFX1swYPpUThQZp/Ka4wzJdK0LWVytLPM= +github.com/goccy/go-json v0.10.4/go.mod h1:oq7eo15ShAhp70Anwd5lgX2pLfOS3QCiwU/PULtXL6M= github.com/goccy/go-yaml v1.11.0 h1:n7Z+zx8S9f9KgzG6KtQKf+kwqXZlLNR2F6018Dgau54= github.com/goccy/go-yaml v1.11.0/go.mod h1:H+mJrWtjPTJAHvRbV09MCK9xYwODM+wRTVFFTWckfng= github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= @@ -454,8 +452,8 @@ github.com/klauspost/compress v1.15.9/go.mod h1:PhcZ0MbTNciWF3rruxRgKxI5NkcHHrHU github.com/klauspost/compress v1.17.9 h1:6KIumPrER1LHsvBVuDa0r5xaG0Es51mhhB9BQB2qeMA= github.com/klauspost/compress v1.17.9/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw= github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= -github.com/klauspost/cpuid/v2 v2.2.6 h1:ndNyv040zDGIDh8thGkXYjnFtiN02M1PVVF+JE/48xc= -github.com/klauspost/cpuid/v2 v2.2.6/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws= +github.com/klauspost/cpuid/v2 v2.2.9 h1:66ze0taIn2H33fBvCkXuv9BmCwDfafmiIVpKV9kKGuY= +github.com/klauspost/cpuid/v2 v2.2.9/go.mod h1:rqkxqrZ1EhYM9G+hXH7YdowN5R5RGN6NK4QwQ3WMXF8= github.com/knz/go-libedit v1.10.1/go.mod h1:MZTVkCWyz0oBc7JOWP3wNAzd002ZbM/5hgShxwh4x8M= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= @@ -473,8 +471,8 @@ github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= -github.com/leodido/go-urn v1.3.0 h1:jX8FDLfW4ThVXctBNZ+3cIWnCSnrACDV73r76dy0aQQ= -github.com/leodido/go-urn v1.3.0/go.mod h1:bvxc+MVxLKB4z00jd1z+Dvzr47oO32F/QSNjSBOlFxI= +github.com/leodido/go-urn v1.4.0 h1:WT9HwE9SGECu3lg4d/dIA+jxlljEa1/ffXKmRjqdmIQ= +github.com/leodido/go-urn v1.4.0/go.mod h1:bvxc+MVxLKB4z00jd1z+Dvzr47oO32F/QSNjSBOlFxI= github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= github.com/lib/pq v1.1.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= @@ -573,8 +571,8 @@ github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58/go.mod h1:DXv8WO4yhM github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= github.com/pelletier/go-toml v1.4.0/go.mod h1:PN7xzY2wHTK0K9p34ErDQMlFxa51Fk0OUruD3k1mMwo= github.com/pelletier/go-toml v1.7.0/go.mod h1:vwGMzjaWMwyfHwgIBhI2YUM4fB6nL6lVAvS1LBMMhTE= -github.com/pelletier/go-toml/v2 v2.1.1 h1:LWAJwfNvjQZCFIDKWYQaM62NcYeYViCmWIwmOStowAI= -github.com/pelletier/go-toml/v2 v2.1.1/go.mod h1:tJU2Z3ZkXwnxa4DPO899bsyIoywizdUvyaeZurnPPDc= +github.com/pelletier/go-toml/v2 v2.2.3 h1:YmeHyLY8mFWbdkNWwpr+qIL2bEqT0o95WSdkNHvL12M= +github.com/pelletier/go-toml/v2 v2.2.3/go.mod h1:MfCQTFTvCcUyyvvwm1+G6H/jORL20Xlb6rzQu9GuUkc= github.com/petar-dambovaliev/aho-corasick v0.0.0-20240411101913-e07a1f0e8eb4 h1:1Kw2vDBXmjop+LclnzCb/fFy+sgb3gYARwfmoUcQe6o= github.com/petar-dambovaliev/aho-corasick v0.0.0-20240411101913-e07a1f0e8eb4/go.mod h1:EHPiTAKtiFmrMldLUNswFwfZ2eJIYBHktdaUTZxYWRw= github.com/pierrec/lz4/v4 v4.1.15/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= @@ -681,7 +679,6 @@ github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/stretchr/testify v1.8.3/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= -github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/tetratelabs/wazero v1.8.0 h1:iEKu0d4c2Pd+QSRieYbnQC9yiFlMS9D+Jr0LsRmcF4g= @@ -780,9 +777,8 @@ go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9E go.uber.org/zap v1.9.1/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= go.uber.org/zap v1.13.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM= -golang.org/x/arch v0.0.0-20210923205945-b76863e36670/go.mod h1:5om86z9Hs0C8fWVUuoMHwpExlXzs5Tkyp9hOrfG7pp8= -golang.org/x/arch v0.7.0 h1:pskyeJh/3AmoQ8CPE95vxHLqp1G1GfGNXTmcl9NEKTc= -golang.org/x/arch v0.7.0/go.mod h1:FEVrYAQjsQXMVJ1nsMoVVXPZg6p2JE2mx8psSWTDQys= +golang.org/x/arch v0.12.0 h1:UsYJhbzPYGsT0HbEdmYcqtCv8UNGvnaL561NnIUvaKg= +golang.org/x/arch v0.12.0/go.mod h1:FEVrYAQjsQXMVJ1nsMoVVXPZg6p2JE2mx8psSWTDQys= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190320223903-b7391e95e576/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= @@ -1002,7 +998,6 @@ k8s.io/utils v0.0.0-20230406110748-d93618cff8a2/go.mod h1:OLgZIPagt7ERELqWJFomSt nullprogram.com/x/optparse v1.0.0/go.mod h1:KdyPE+Igbe0jQUrVfMqDMeJQIJZEuyV7pjYmp6pbG50= rsc.io/binaryregexp v0.2.0 h1:HfqmD5MEmC0zvwBuF187nq9mdnXjXsSivRiXN7SmRkE= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= -rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= sigs.k8s.io/structured-merge-diff/v4 v4.2.3 h1:PRbqxJClWWYMNV1dhaG4NsibJbArud9kFxnAMREiWFE= From a73bed902b65737bf99aec1a4193e4675811e456 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Mon, 17 Feb 2025 10:32:15 +0100 Subject: [PATCH 429/581] file acquisition: remove redundant logging info (#3468) * file acquisition: remove redundant logging info * lint --- pkg/acquisition/modules/file/file.go | 13 +++++++------ pkg/acquisition/modules/file/file_test.go | 7 +++++-- 2 files changed, 12 insertions(+), 8 deletions(-) diff --git a/pkg/acquisition/modules/file/file.go b/pkg/acquisition/modules/file/file.go index 697a3d35dc2..4e673f87571 100644 --- a/pkg/acquisition/modules/file/file.go +++ b/pkg/acquisition/modules/file/file.go @@ -353,7 +353,7 @@ func (f *FileSource) StreamingAcquisition(ctx context.Context, out chan types.Ev continue } - if err := fd.Close(); err != nil { + if err = fd.Close(); err != nil { f.logger.Errorf("unable to close %s : %s", file, err) continue } @@ -381,6 +381,7 @@ func (f *FileSource) StreamingAcquisition(ctx context.Context, out chan types.Ev if networkFS { f.logger.Warnf("Disabling inotify polling on %s as it is on a network share. You can manually set poll_without_inotify to true to make this message disappear, or to false to enforce inotify poll", file) + pollFile = true } } @@ -562,12 +563,12 @@ func (f *FileSource) monitorNewFiles(out chan types.Event, t *tomb.Tomb) error { func (f *FileSource) tailFile(out chan types.Event, t *tomb.Tomb, tail *tail.Tail) error { logger := f.logger.WithField("tail", tail.Filename) - logger.Debugf("-> Starting tail of %s", tail.Filename) + logger.Debug("-> start tailing") for { select { case <-t.Dying(): - logger.Infof("File datasource %s stopping", tail.Filename) + logger.Info("File datasource stopping") if err := tail.Stop(); err != nil { f.logger.Errorf("error in stop : %s", err) @@ -576,7 +577,7 @@ func (f *FileSource) tailFile(out chan types.Event, t *tomb.Tomb, tail *tail.Tai return nil case <-tail.Dying(): // our tailer is dying - errMsg := fmt.Sprintf("file reader of %s died", tail.Filename) + errMsg := "file reader died" err := tail.Err() if err != nil { @@ -588,7 +589,7 @@ func (f *FileSource) tailFile(out chan types.Event, t *tomb.Tomb, tail *tail.Tai return nil case line := <-tail.Lines: if line == nil { - logger.Warningf("tail for %s is empty", tail.Filename) + logger.Warning("tail is empty") continue } @@ -663,7 +664,7 @@ func (f *FileSource) readFile(filename string, out chan types.Event, t *tomb.Tom for scanner.Scan() { select { case <-t.Dying(): - logger.Infof("File datasource %s stopping", filename) + logger.Info("File datasource stopping") return nil default: if scanner.Text() == "" { diff --git a/pkg/acquisition/modules/file/file_test.go b/pkg/acquisition/modules/file/file_test.go index b9c6e65d8ce..1f0ab0d98c2 100644 --- a/pkg/acquisition/modules/file/file_test.go +++ b/pkg/acquisition/modules/file/file_test.go @@ -219,6 +219,7 @@ filename: test_files/test_delete.log`, err := f.Configure([]byte(tc.config), subLogger, configuration.METRICS_NONE) cstest.RequireErrorContains(t, err, tc.expectedConfigErr) + if tc.expectedConfigErr != "" { return } @@ -226,18 +227,19 @@ filename: test_files/test_delete.log`, if tc.afterConfigure != nil { tc.afterConfigure() } + err = f.OneShotAcquisition(ctx, out, &tomb) - actualLines := len(out) cstest.RequireErrorContains(t, err, tc.expectedErr) if tc.expectedLines != 0 { - assert.Equal(t, tc.expectedLines, actualLines) + assert.Len(t, out, tc.expectedLines) } if tc.expectedOutput != "" { assert.Contains(t, hook.LastEntry().Message, tc.expectedOutput) hook.Reset() } + if tc.teardown != nil { tc.teardown() } @@ -391,6 +393,7 @@ force_inotify: true`, testPattern), } actualLines := 0 + if tc.expectedLines != 0 { go func() { for { From 6f737a71f159d1e5d70be1d2a0a6a5ee6c5487d9 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 17 Feb 2025 10:40:46 +0100 Subject: [PATCH 430/581] build(deps): bump github.com/golang/glog from 1.2.2 to 1.2.4 (#3431) Bumps [github.com/golang/glog](https://github.com/golang/glog) from 1.2.2 to 1.2.4. - [Release notes](https://github.com/golang/glog/releases) - [Commits](https://github.com/golang/glog/compare/v1.2.2...v1.2.4) --- updated-dependencies: - dependency-name: github.com/golang/glog dependency-type: indirect ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 0d3293664a3..25b673d7707 100644 --- a/go.mod +++ b/go.mod @@ -152,7 +152,7 @@ require ( github.com/go-playground/validator/v10 v10.23.0 // indirect github.com/go-stack/stack v1.8.0 // indirect github.com/goccy/go-json v0.10.4 // indirect - github.com/golang/glog v1.2.2 // indirect + github.com/golang/glog v1.2.4 // indirect github.com/google/gofuzz v1.2.0 // indirect github.com/hashicorp/hcl/v2 v2.13.0 // indirect github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb // indirect diff --git a/go.sum b/go.sum index e4c35f463e1..82e1b1acc22 100644 --- a/go.sum +++ b/go.sum @@ -305,8 +305,8 @@ github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/golang-jwt/jwt/v4 v4.5.1 h1:JdqV9zKUdtaa9gdPlywC3aeoEsR681PlKC+4F5gQgeo= github.com/golang-jwt/jwt/v4 v4.5.1/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= -github.com/golang/glog v1.2.2 h1:1+mZ9upx1Dh6FmUTFR1naJ77miKiXgALjWOZ3NVFPmY= -github.com/golang/glog v1.2.2/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwmO+w= +github.com/golang/glog v1.2.4 h1:CNNw5U8lSiiBk7druxtSHHTsRWcxKoac6kZKm2peBBc= +github.com/golang/glog v1.2.4/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwmO+w= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= From 5136d928ed27b91f45e394cd4c306dc1d0bc7380 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Mon, 17 Feb 2025 10:55:18 +0100 Subject: [PATCH 431/581] lint: gocritic/typeDefFirst (ensure type definitions come before methods) (#3404) * lint: gocritic/typeDefFirst (ensure type definitions come before methods) * lint --- .golangci.yml | 1 - cmd/crowdsec-cli/clialert/alerts.go | 14 ++--- cmd/crowdsec-cli/clidecision/decisions.go | 22 +++---- cmd/crowdsec-cli/clisupport/support.go | 12 ++-- .../configuration/configuration.go | 2 +- pkg/acquisition/modules/kafka/kafka.go | 14 ++++- pkg/apiserver/papi.go | 2 +- pkg/appsec/appsec_rules_collection.go | 6 +- pkg/csconfig/api.go | 60 +++++++++---------- pkg/exprhelpers/debugger.go | 2 +- pkg/leakybucket/manager_load.go | 8 +-- pkg/setup/detect_test.go | 2 +- 12 files changed, 80 insertions(+), 65 deletions(-) diff --git a/.golangci.yml b/.golangci.yml index b3be5adb687..3afa4571b10 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -213,7 +213,6 @@ linters-settings: gocritic: enable-all: true disabled-checks: - - typeDefFirst - paramTypeCombine - httpNoBody - ifElseChain diff --git a/cmd/crowdsec-cli/clialert/alerts.go b/cmd/crowdsec-cli/clialert/alerts.go index 4ae72919a9e..891a720d169 100644 --- a/cmd/crowdsec-cli/clialert/alerts.go +++ b/cmd/crowdsec-cli/clialert/alerts.go @@ -29,6 +29,13 @@ import ( "github.com/crowdsecurity/crowdsec/pkg/types" ) +type configGetter func() *csconfig.Config + +type cliAlerts struct { + client *apiclient.ApiClient + cfg configGetter +} + func decisionsFromAlert(alert *models.Alert) string { ret := "" decMap := make(map[string]int) @@ -183,13 +190,6 @@ func (cli *cliAlerts) displayOneAlert(alert *models.Alert, withDetail bool) erro return nil } -type configGetter func() *csconfig.Config - -type cliAlerts struct { - client *apiclient.ApiClient - cfg configGetter -} - func New(getconfig configGetter) *cliAlerts { return &cliAlerts{ cfg: getconfig, diff --git a/cmd/crowdsec-cli/clidecision/decisions.go b/cmd/crowdsec-cli/clidecision/decisions.go index b5865bab6e0..da45b5f0bfb 100644 --- a/cmd/crowdsec-cli/clidecision/decisions.go +++ b/cmd/crowdsec-cli/clidecision/decisions.go @@ -24,6 +24,13 @@ import ( "github.com/crowdsecurity/crowdsec/pkg/types" ) +type configGetter func() *csconfig.Config + +type cliDecisions struct { + client *apiclient.ApiClient + cfg configGetter +} + func (cli *cliDecisions) decisionsToTable(alerts *models.GetAlertsResponse, printMachine bool) error { /*here we cheat a bit : to make it more readable for the user, we dedup some entries*/ spamLimit := make(map[string]bool) @@ -65,17 +72,17 @@ func (cli *cliDecisions) decisionsToTable(alerts *models.GetAlertsResponse, prin for _, alertItem := range *alerts { for _, decisionItem := range alertItem.Decisions { raw := []string{ - fmt.Sprintf("%d", decisionItem.ID), + strconv.FormatInt(decisionItem.ID, 10), *decisionItem.Origin, *decisionItem.Scope + ":" + *decisionItem.Value, *decisionItem.Scenario, *decisionItem.Type, alertItem.Source.Cn, alertItem.Source.GetAsNumberName(), - fmt.Sprintf("%d", *alertItem.EventsCount), + strconv.FormatInt(int64(*alertItem.EventsCount), 10), *decisionItem.Duration, - fmt.Sprintf("%t", *decisionItem.Simulated), - fmt.Sprintf("%d", alertItem.ID), + strconv.FormatBool(*decisionItem.Simulated), + strconv.FormatInt(alertItem.ID, 10), } if printMachine { raw = append(raw, alertItem.MachineID) @@ -115,13 +122,6 @@ func (cli *cliDecisions) decisionsToTable(alerts *models.GetAlertsResponse, prin return nil } -type configGetter func() *csconfig.Config - -type cliDecisions struct { - client *apiclient.ApiClient - cfg configGetter -} - func New(cfg configGetter) *cliDecisions { return &cliDecisions{ cfg: cfg, diff --git a/cmd/crowdsec-cli/clisupport/support.go b/cmd/crowdsec-cli/clisupport/support.go index eb3e03df253..ed52e3792f0 100644 --- a/cmd/crowdsec-cli/clisupport/support.go +++ b/cmd/crowdsec-cli/clisupport/support.go @@ -85,6 +85,12 @@ func stripAnsiString(str string) string { return reStripAnsi.ReplaceAllString(str, "") } +type configGetter func() *csconfig.Config + +type cliSupport struct { + cfg configGetter +} + func (cli *cliSupport) dumpMetrics(ctx context.Context, db *database.Client, zw *zip.Writer) error { log.Info("Collecting prometheus metrics") @@ -393,12 +399,6 @@ func (cli *cliSupport) dumpCrash(zw *zip.Writer) error { return nil } -type configGetter func() *csconfig.Config - -type cliSupport struct { - cfg configGetter -} - func New(cfg configGetter) *cliSupport { return &cliSupport{ cfg: cfg, diff --git a/pkg/acquisition/configuration/configuration.go b/pkg/acquisition/configuration/configuration.go index a9d570d2788..9ea1e5d2137 100644 --- a/pkg/acquisition/configuration/configuration.go +++ b/pkg/acquisition/configuration/configuration.go @@ -16,7 +16,7 @@ type DataSourceCommonCfg struct { Config map[string]interface{} `yaml:",inline"` // to keep the datasource-specific configuration directives } -var ( +const ( TAIL_MODE = "tail" CAT_MODE = "cat" SERVER_MODE = "server" // No difference with tail, just a bit more verbose diff --git a/pkg/acquisition/modules/kafka/kafka.go b/pkg/acquisition/modules/kafka/kafka.go index f213b85814c..94806f5d66b 100644 --- a/pkg/acquisition/modules/kafka/kafka.go +++ b/pkg/acquisition/modules/kafka/kafka.go @@ -23,7 +23,7 @@ import ( "github.com/crowdsecurity/crowdsec/pkg/types" ) -var dataSourceName = "kafka" +const dataSourceName = "kafka" var linesRead = prometheus.NewCounterVec( prometheus.CounterOpts{ @@ -150,14 +150,18 @@ func (k *KafkaSource) Dump() interface{} { func (k *KafkaSource) ReadMessage(ctx context.Context, out chan types.Event) error { // Start processing from latest Offset k.Reader.SetOffsetAt(ctx, time.Now()) + for { k.logger.Tracef("reading message from topic '%s'", k.Config.Topic) + m, err := k.Reader.ReadMessage(ctx) if err != nil { if errors.Is(err, io.EOF) { return nil } + k.logger.Errorln(fmt.Errorf("while reading %s message: %w", dataSourceName, err)) + continue } @@ -220,19 +224,23 @@ func (kc *KafkaConfiguration) NewTLSConfig() (*tls.Config, error) { if err != nil { return &tlsConfig, err } + tlsConfig.Certificates = []tls.Certificate{cert} caCert, err := os.ReadFile(kc.TLS.CaCert) if err != nil { return &tlsConfig, err } + caCertPool, err := x509.SystemCertPool() if err != nil { return &tlsConfig, fmt.Errorf("unable to load system CA certificates: %w", err) } + if caCertPool == nil { caCertPool = x509.NewCertPool() } + caCertPool.AppendCertsFromPEM(caCert) tlsConfig.RootCAs = caCertPool @@ -273,9 +281,11 @@ func (kc *KafkaConfiguration) NewReader(dialer *kafka.Dialer, logger *log.Entry) Logger: kafka.LoggerFunc(logger.Debugf), ErrorLogger: kafka.LoggerFunc(logger.Errorf), } + if kc.GroupID != "" && kc.Partition != 0 { return &kafka.Reader{}, errors.New("cannot specify both group_id and partition") } + if kc.GroupID != "" { rConf.GroupID = kc.GroupID } else if kc.Partition != 0 { @@ -283,8 +293,10 @@ func (kc *KafkaConfiguration) NewReader(dialer *kafka.Dialer, logger *log.Entry) } else { logger.Warnf("no group_id specified, crowdsec will only read from the 1st partition of the topic") } + if err := rConf.Validate(); err != nil { return &kafka.Reader{}, fmt.Errorf("while validating reader configuration: %w", err) } + return kafka.NewReader(rConf), nil } diff --git a/pkg/apiserver/papi.go b/pkg/apiserver/papi.go index 83ba13843b9..7f494c98bf4 100644 --- a/pkg/apiserver/papi.go +++ b/pkg/apiserver/papi.go @@ -22,7 +22,7 @@ import ( "github.com/crowdsecurity/crowdsec/pkg/types" ) -var SyncInterval = time.Second * 10 +const SyncInterval = time.Second * 10 const PapiPullKey = "papi:last_pull" diff --git a/pkg/appsec/appsec_rules_collection.go b/pkg/appsec/appsec_rules_collection.go index 33e442e7f5b..9e015f5b2b6 100644 --- a/pkg/appsec/appsec_rules_collection.go +++ b/pkg/appsec/appsec_rules_collection.go @@ -18,7 +18,7 @@ type AppsecCollection struct { NativeRules []string } -var APPSEC_RULE = "appsec-rule" +const APPSEC_RULE = "appsec-rule" // to be filled w/ seb update type AppsecCollectionConfig struct { @@ -77,18 +77,22 @@ func LoadCollection(pattern string, logger *log.Entry) ([]AppsecCollection, erro for _, rulesFile := range appsecRule.SecLangFilesRules { logger.Debugf("Adding rules from %s", rulesFile) fullPath := filepath.Join(hub.GetDataDir(), rulesFile) + c, err := os.ReadFile(fullPath) if err != nil { logger.Errorf("unable to read file %s : %s", rulesFile, err) continue } + for _, line := range strings.Split(string(c), "\n") { if strings.HasPrefix(line, "#") { continue } + if strings.TrimSpace(line) == "" { continue } + appsecCol.NativeRules = append(appsecCol.NativeRules, line) } } diff --git a/pkg/csconfig/api.go b/pkg/csconfig/api.go index d94d90aaf19..8aa99686c2a 100644 --- a/pkg/csconfig/api.go +++ b/pkg/csconfig/api.go @@ -209,6 +209,36 @@ func (l *LocalApiClientCfg) Load() error { return nil } +/*local api service configuration*/ +type LocalApiServerCfg struct { + Enable *bool `yaml:"enable"` + ListenURI string `yaml:"listen_uri,omitempty"` // 127.0.0.1:8080 + ListenSocket string `yaml:"listen_socket,omitempty"` + TLS *TLSCfg `yaml:"tls"` + DbConfig *DatabaseCfg `yaml:"-"` + LogDir string `yaml:"-"` + LogMedia string `yaml:"-"` + OnlineClient *OnlineApiClientCfg `yaml:"online_client"` + ProfilesPath string `yaml:"profiles_path,omitempty"` + ConsoleConfigPath string `yaml:"console_path,omitempty"` + ConsoleConfig *ConsoleConfig `yaml:"-"` + Profiles []*ProfileCfg `yaml:"-"` + LogLevel *log.Level `yaml:"log_level"` + UseForwardedForHeaders bool `yaml:"use_forwarded_for_headers,omitempty"` + TrustedProxies *[]string `yaml:"trusted_proxies,omitempty"` + CompressLogs *bool `yaml:"-"` + LogMaxSize int `yaml:"-"` + LogMaxAge int `yaml:"-"` + LogMaxFiles int `yaml:"-"` + LogFormat string `yaml:"-"` + TrustedIPs []string `yaml:"trusted_ips,omitempty"` + PapiLogLevel *log.Level `yaml:"papi_log_level"` + DisableRemoteLapiRegistration bool `yaml:"disable_remote_lapi_registration,omitempty"` + CapiWhitelistsPath string `yaml:"capi_whitelists_path,omitempty"` + CapiWhitelists *CapiWhitelist `yaml:"-"` + AutoRegister *LocalAPIAutoRegisterCfg `yaml:"auto_registration,omitempty"` +} + func (c *LocalApiServerCfg) GetTrustedIPs() ([]net.IPNet, error) { trustedIPs := make([]net.IPNet, 0) @@ -250,36 +280,6 @@ type LocalAPIAutoRegisterCfg struct { AllowedRangesParsed []*net.IPNet `yaml:"-"` } -/*local api service configuration*/ -type LocalApiServerCfg struct { - Enable *bool `yaml:"enable"` - ListenURI string `yaml:"listen_uri,omitempty"` // 127.0.0.1:8080 - ListenSocket string `yaml:"listen_socket,omitempty"` - TLS *TLSCfg `yaml:"tls"` - DbConfig *DatabaseCfg `yaml:"-"` - LogDir string `yaml:"-"` - LogMedia string `yaml:"-"` - OnlineClient *OnlineApiClientCfg `yaml:"online_client"` - ProfilesPath string `yaml:"profiles_path,omitempty"` - ConsoleConfigPath string `yaml:"console_path,omitempty"` - ConsoleConfig *ConsoleConfig `yaml:"-"` - Profiles []*ProfileCfg `yaml:"-"` - LogLevel *log.Level `yaml:"log_level"` - UseForwardedForHeaders bool `yaml:"use_forwarded_for_headers,omitempty"` - TrustedProxies *[]string `yaml:"trusted_proxies,omitempty"` - CompressLogs *bool `yaml:"-"` - LogMaxSize int `yaml:"-"` - LogMaxAge int `yaml:"-"` - LogMaxFiles int `yaml:"-"` - LogFormat string `yaml:"-"` - TrustedIPs []string `yaml:"trusted_ips,omitempty"` - PapiLogLevel *log.Level `yaml:"papi_log_level"` - DisableRemoteLapiRegistration bool `yaml:"disable_remote_lapi_registration,omitempty"` - CapiWhitelistsPath string `yaml:"capi_whitelists_path,omitempty"` - CapiWhitelists *CapiWhitelist `yaml:"-"` - AutoRegister *LocalAPIAutoRegisterCfg `yaml:"auto_registration,omitempty"` -} - func (c *LocalApiServerCfg) ClientURL() string { if c == nil { return "" diff --git a/pkg/exprhelpers/debugger.go b/pkg/exprhelpers/debugger.go index d44b8fc97e1..d2c76e05a22 100644 --- a/pkg/exprhelpers/debugger.go +++ b/pkg/exprhelpers/debugger.go @@ -17,7 +17,7 @@ type ExprRuntimeDebug struct { Outputs []OpOutput } -var IndentStep = 4 +const IndentStep = 4 // we use this struct to store the output of the expr runtime type OpOutput struct { diff --git a/pkg/leakybucket/manager_load.go b/pkg/leakybucket/manager_load.go index 13ce1df75ae..fbcbafcc285 100644 --- a/pkg/leakybucket/manager_load.go +++ b/pkg/leakybucket/manager_load.go @@ -76,7 +76,7 @@ type BucketFactory struct { } // we use one NameGenerator for all the future buckets -var seed namegenerator.Generator = namegenerator.NewNameGenerator(time.Now().UTC().UnixNano()) +var seed = namegenerator.NewNameGenerator(time.Now().UTC().UnixNano()) func validateLeakyType(bucketFactory *BucketFactory) error { if bucketFactory.Capacity <= 0 { // capacity must be a positive int @@ -406,7 +406,7 @@ func LoadBucket(bucketFactory *BucketFactory, tomb *tomb.Tomb) error { bucketFactory.logger.Tracef("Adding a non duplicate filter") bucketFactory.processors = append(bucketFactory.processors, &Uniq{}) bucketFactory.logger.Infof("Compiling distinct '%s'", bucketFactory.Distinct) - //we're compiling and discarding the expression to be able to detect it during loading + // we're compiling and discarding the expression to be able to detect it during loading _, err = expr.Compile(bucketFactory.Distinct, exprhelpers.GetExprOptions(map[string]interface{}{"evt": &types.Event{}})...) if err != nil { return fmt.Errorf("invalid distinct '%s' in %s: %w", bucketFactory.Distinct, bucketFactory.Filename, err) @@ -416,7 +416,7 @@ func LoadBucket(bucketFactory *BucketFactory, tomb *tomb.Tomb) error { if bucketFactory.CancelOnFilter != "" { bucketFactory.logger.Tracef("Adding a cancel_on filter") bucketFactory.processors = append(bucketFactory.processors, &CancelOnFilter{}) - //we're compiling and discarding the expression to be able to detect it during loading + // we're compiling and discarding the expression to be able to detect it during loading _, err = expr.Compile(bucketFactory.CancelOnFilter, exprhelpers.GetExprOptions(map[string]interface{}{"evt": &types.Event{}})...) if err != nil { return fmt.Errorf("invalid cancel_on '%s' in %s: %w", bucketFactory.CancelOnFilter, bucketFactory.Filename, err) @@ -450,7 +450,7 @@ func LoadBucket(bucketFactory *BucketFactory, tomb *tomb.Tomb) error { if bucketFactory.ConditionalOverflow != "" { bucketFactory.logger.Tracef("Adding conditional overflow") bucketFactory.processors = append(bucketFactory.processors, &ConditionalOverflow{}) - //we're compiling and discarding the expression to be able to detect it during loading + // we're compiling and discarding the expression to be able to detect it during loading _, err = expr.Compile(bucketFactory.ConditionalOverflow, exprhelpers.GetExprOptions(map[string]interface{}{"queue": &types.Queue{}, "leaky": &Leaky{}, "evt": &types.Event{}})...) if err != nil { return fmt.Errorf("invalid condition '%s' in %s: %w", bucketFactory.ConditionalOverflow, bucketFactory.Filename, err) diff --git a/pkg/setup/detect_test.go b/pkg/setup/detect_test.go index 72356bc1924..00b6e356bd7 100644 --- a/pkg/setup/detect_test.go +++ b/pkg/setup/detect_test.go @@ -16,7 +16,7 @@ import ( ) //nolint:dupword -var fakeSystemctlOutput = `UNIT FILE STATE VENDOR PRESET +const fakeSystemctlOutput = `UNIT FILE STATE VENDOR PRESET crowdsec-setup-detect.service enabled enabled apache2.service enabled enabled apparmor.service enabled enabled From b3da6e03ffadfc3dd385642ba93d3e42ac108444 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Mon, 17 Feb 2025 11:04:26 +0100 Subject: [PATCH 432/581] CI: lint docker tests (#3443) --- .github/workflows/docker-tests.yml | 1 + docker/test/pyproject.toml | 37 ++++++--- docker/test/tests/__init__.py | 0 docker/test/tests/conftest.py | 4 +- docker/test/tests/test_agent.py | 6 +- docker/test/tests/test_agent_only.py | 8 +- docker/test/tests/test_bouncer.py | 10 +-- docker/test/tests/test_capi.py | 6 +- docker/test/tests/test_capi_whitelists.py | 10 +-- docker/test/tests/test_cold_logs.py | 12 ++- docker/test/tests/test_flavors.py | 6 +- docker/test/tests/test_hello.py | 13 ++- docker/test/tests/test_hub.py | 4 +- docker/test/tests/test_hub_collections.py | 10 +-- docker/test/tests/test_hub_parsers.py | 8 +- docker/test/tests/test_hub_postoverflows.py | 8 +- docker/test/tests/test_hub_scenarios.py | 8 +- docker/test/tests/test_local_api_url.py | 8 +- docker/test/tests/test_local_item.py | 8 +- docker/test/tests/test_metrics.py | 10 +-- docker/test/tests/test_nolapi.py | 4 +- docker/test/tests/test_simple.py | 6 +- docker/test/tests/test_tls.py | 22 +++--- docker/test/tests/test_version.py | 4 +- docker/test/tests/test_wal.py | 8 +- docker/test/uv.lock | 88 ++++++++++++++------- 26 files changed, 156 insertions(+), 153 deletions(-) create mode 100644 docker/test/tests/__init__.py diff --git a/.github/workflows/docker-tests.yml b/.github/workflows/docker-tests.yml index 647f3e55cdb..796dd916f02 100644 --- a/.github/workflows/docker-tests.yml +++ b/.github/workflows/docker-tests.yml @@ -70,6 +70,7 @@ jobs: cd docker/test uv sync --all-extras --dev --locked uv run ruff check + uv run basedpyright uv run pytest tests -n 1 --durations=0 --color=yes env: CROWDSEC_TEST_VERSION: test diff --git a/docker/test/pyproject.toml b/docker/test/pyproject.toml index d32d184424f..5ec0c5a7f01 100644 --- a/docker/test/pyproject.toml +++ b/docker/test/pyproject.toml @@ -13,6 +13,7 @@ dependencies = [ [dependency-groups] dev = [ + "basedpyright>=1.26.0", "ipdb>=0.13.13", "ruff>=0.9.3", ] @@ -26,16 +27,34 @@ line-length = 120 [tool.ruff.lint] select = [ - "E", # pycodestyle errors - "W", # pycodestyle warnings - "F", # pyflakes - "I", # isort - "C", # flake8-comprehensions - "B", # flake8-bugbear - "UP", # pyupgrade - "C90", # macabe + "ALL" ] ignore = [ - "B008", # do not perform function calls in argument defaults + "ANN", # Missing type annotations + "COM", # flake8-commas + "D", # pydocstyle + "ERA", # eradicate + "FIX", # flake8-fixme + "TD", # flake8-todos + + "INP001", # File `...` is part of an implicit namespace package. Add an `__init__.py`. + "E501", # line too long + # ^ does not ignore comments that can't be moved to their own line, line noqa, pyright + # so we take care of line lenghts only with "ruff format" + "PLR2004", # Magic value used in comparison, consider replacing `...` with a constant variable + "S101", # Use of 'assert' detected + "S603", # `subprocess` call: check for execution of untrusted input + "S607", # Starting a process with a partial executable path ] + +[tool.basedpyright] +reportUnknownArgumentType = "none" +reportUnknownParameterType = "none" +reportMissingParameterType = "none" +reportMissingTypeStubs = "none" +reportUnknownVariableType = "none" +reportUnknownMemberType = "none" +reportUnreachable = "none" +reportAny = "none" + diff --git a/docker/test/tests/__init__.py b/docker/test/tests/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/docker/test/tests/conftest.py b/docker/test/tests/conftest.py index d32ffa28c37..5c9eaded2a3 100644 --- a/docker/test/tests/conftest.py +++ b/docker/test/tests/conftest.py @@ -1,6 +1,8 @@ +from _pytest.config import Config + pytest_plugins = ("cs",) -def pytest_configure(config): +def pytest_configure(config: Config) -> None: config.addinivalue_line("markers", "docker: mark tests for lone or manually orchestrated containers") config.addinivalue_line("markers", "compose: mark tests for docker compose projects") diff --git a/docker/test/tests/test_agent.py b/docker/test/tests/test_agent.py index aec1bbdaae8..4518c9ea408 100644 --- a/docker/test/tests/test_agent.py +++ b/docker/test/tests/test_agent.py @@ -1,5 +1,3 @@ -#!/usr/bin/env python - from http import HTTPStatus import pytest @@ -7,7 +5,7 @@ pytestmark = pytest.mark.docker -def test_no_agent(crowdsec, flavor): +def test_no_agent(crowdsec, flavor: str) -> None: """Test DISABLE_AGENT=true""" env = { "DISABLE_AGENT": "true", @@ -21,7 +19,7 @@ def test_no_agent(crowdsec, flavor): assert "You can successfully interact with Local API (LAPI)" in stdout -def test_machine_register(crowdsec, flavor, tmp_path_factory): +def test_machine_register(crowdsec, flavor: str, tmp_path_factory: pytest.TempPathFactory) -> None: """A local agent is always registered for use by cscli""" data_dir = tmp_path_factory.mktemp("data") diff --git a/docker/test/tests/test_agent_only.py b/docker/test/tests/test_agent_only.py index 4e1689e0b9b..1da659d7ee7 100644 --- a/docker/test/tests/test_agent_only.py +++ b/docker/test/tests/test_agent_only.py @@ -1,6 +1,4 @@ -#!/usr/bin/env python - -import random +import secrets from http import HTTPStatus import pytest @@ -8,8 +6,8 @@ pytestmark = pytest.mark.docker -def test_split_lapi_agent(crowdsec, flavor): - rand = str(random.randint(0, 10000)) +def test_split_lapi_agent(crowdsec, flavor: str) -> None: + rand = str(secrets.randbelow(10000)) lapiname = f"lapi-{rand}" agentname = f"agent-{rand}" diff --git a/docker/test/tests/test_bouncer.py b/docker/test/tests/test_bouncer.py index d87aff734c5..b186c4579a8 100644 --- a/docker/test/tests/test_bouncer.py +++ b/docker/test/tests/test_bouncer.py @@ -1,10 +1,7 @@ -#!/usr/bin/env python - """ Test bouncer management: pre-installed, run-time installation and removal. """ -import hashlib import json from http import HTTPStatus @@ -13,12 +10,7 @@ pytestmark = pytest.mark.docker -def hex512(s): - """Return the sha512 hash of a string as a hex string""" - return hashlib.sha512(s.encode()).hexdigest() - - -def test_register_bouncer_env(crowdsec, flavor): +def test_register_bouncer_env(crowdsec, flavor: str) -> None: """Test installing bouncers at startup, from envvar""" env = {"BOUNCER_KEY_bouncer1name": "bouncer1key", "BOUNCER_KEY_bouncer2name": "bouncer2key"} diff --git a/docker/test/tests/test_capi.py b/docker/test/tests/test_capi.py index ad25f7a766f..bc748b02ef7 100644 --- a/docker/test/tests/test_capi.py +++ b/docker/test/tests/test_capi.py @@ -1,5 +1,3 @@ -#!/usr/bin/env python - from http import HTTPStatus import pytest @@ -7,7 +5,7 @@ pytestmark = pytest.mark.docker -def test_no_capi(crowdsec, flavor): +def test_no_capi(crowdsec, flavor: str) -> None: """Test no CAPI (disabled by default in tests)""" env = { @@ -26,7 +24,7 @@ def test_no_capi(crowdsec, flavor): assert not any("Registration to online API done" in line for line in logs) -def test_capi(crowdsec, flavor): +def test_capi(crowdsec, flavor: str) -> None: """Test CAPI""" env = { diff --git a/docker/test/tests/test_capi_whitelists.py b/docker/test/tests/test_capi_whitelists.py index 6cdd5f401f5..8ed0c531e98 100644 --- a/docker/test/tests/test_capi_whitelists.py +++ b/docker/test/tests/test_capi_whitelists.py @@ -1,5 +1,3 @@ -#!/usr/bin/env python - from http import HTTPStatus import pytest @@ -8,16 +6,12 @@ pytestmark = pytest.mark.docker -def test_capi_whitelists( - crowdsec, - tmp_path_factory, - flavor, -): +def test_capi_whitelists(crowdsec, tmp_path_factory: pytest.TempPathFactory, flavor: str) -> None: """Test CAPI_WHITELISTS_PATH""" env = {"CAPI_WHITELISTS_PATH": "/path/to/whitelists.yaml"} whitelists = tmp_path_factory.mktemp("whitelists") - with open(whitelists / "whitelists.yaml", "w") as f: + with (whitelists / "whitelists.yaml").open("w") as f: yaml.dump({"ips": ["1.2.3.4", "2.3.4.5"], "cidrs": ["1.2.3.0/24"]}, f) volumes = {whitelists / "whitelists.yaml": {"bind": "/path/to/whitelists.yaml", "mode": "ro"}} diff --git a/docker/test/tests/test_cold_logs.py b/docker/test/tests/test_cold_logs.py index 2eb3248ffd7..11c332cb302 100644 --- a/docker/test/tests/test_cold_logs.py +++ b/docker/test/tests/test_cold_logs.py @@ -1,5 +1,3 @@ -#!/usr/bin/env python - import datetime import pytest @@ -8,19 +6,19 @@ pytestmark = pytest.mark.docker -def test_cold_logs(crowdsec, tmp_path_factory, flavor): +def test_cold_logs(crowdsec, tmp_path_factory: pytest.TempPathFactory, flavor: str) -> None: env = { "DSN": "file:///var/log/toto.log", } logs = tmp_path_factory.mktemp("logs") - now = datetime.datetime.now() - datetime.timedelta(minutes=1) - with open(logs / "toto.log", "w") as f: + now = datetime.datetime.now(tz=datetime.UTC) - datetime.timedelta(minutes=1) + with (logs / "toto.log").open("w") as f: # like date '+%b %d %H:%M:%S' but in python for i in range(10): ts = (now + datetime.timedelta(seconds=i)).strftime("%b %d %H:%M:%S") - f.write(ts + " sd-126005 sshd[12422]: Invalid user netflix from 1.1.1.172 port 35424\n") + _ = f.write(ts + " sd-126005 sshd[12422]: Invalid user netflix from 1.1.1.172 port 35424\n") volumes = { logs / "toto.log": {"bind": "/var/log/toto.log", "mode": "ro"}, @@ -44,7 +42,7 @@ def test_cold_logs(crowdsec, tmp_path_factory, flavor): ) -def test_cold_logs_missing_dsn(crowdsec, flavor): +def test_cold_logs_missing_dsn(crowdsec, flavor: str) -> None: env = { "TYPE": "syslog", } diff --git a/docker/test/tests/test_flavors.py b/docker/test/tests/test_flavors.py index a48fe428c7b..682213f0c6c 100644 --- a/docker/test/tests/test_flavors.py +++ b/docker/test/tests/test_flavors.py @@ -1,5 +1,3 @@ -#!/usr/bin/env python - """ Test basic behavior of all the image variants """ @@ -11,7 +9,7 @@ pytestmark = pytest.mark.docker -def test_cscli_lapi(crowdsec, flavor): +def test_cscli_lapi(crowdsec, flavor: str) -> None: """Test if cscli can talk to lapi""" with crowdsec(flavor=flavor) as cs: cs.wait_for_log("*Starting processing data*") @@ -23,7 +21,7 @@ def test_cscli_lapi(crowdsec, flavor): @pytest.mark.skip(reason="currently broken by hub upgrade") -def test_flavor_content(crowdsec, flavor): +def test_flavor_content(crowdsec, flavor: str) -> None: """Test flavor contents""" with crowdsec(flavor=flavor) as cs: cs.wait_for_log("*Starting processing data*") diff --git a/docker/test/tests/test_hello.py b/docker/test/tests/test_hello.py index a3ff4f07a93..bf6f5cb2e95 100644 --- a/docker/test/tests/test_hello.py +++ b/docker/test/tests/test_hello.py @@ -1,31 +1,30 @@ -#!/usr/bin/env python - """ Smoke tests in case docker is not set up correctly or has connection issues. """ import subprocess +import docker import pytest pytestmark = pytest.mark.docker -def test_docker_cli_run(): +def test_docker_cli_run() -> None: """Test if docker run works from the command line. Capture stdout too""" - res = subprocess.run(["docker", "run", "--rm", "hello-world"], capture_output=True, text=True) - assert 0 == res.returncode + res = subprocess.run(["docker", "run", "--rm", "hello-world"], capture_output=True, text=True, check=True) + assert res.returncode == 0 assert "Hello from Docker!" in res.stdout -def test_docker_run(docker_client): +def test_docker_run(docker_client: docker.DockerClient) -> None: """Test if docker run works from the python SDK.""" output = docker_client.containers.run("hello-world", remove=True) lines = output.decode().splitlines() assert "Hello from Docker!" in lines -def test_docker_run_detach(docker_client): +def test_docker_run_detach(docker_client: docker.DockerClient) -> None: """Test with python SDK (async).""" cont = docker_client.containers.run("hello-world", detach=True) assert cont.status == "created" diff --git a/docker/test/tests/test_hub.py b/docker/test/tests/test_hub.py index a7134fcb5c8..e64ab4a4175 100644 --- a/docker/test/tests/test_hub.py +++ b/docker/test/tests/test_hub.py @@ -1,5 +1,3 @@ -#!/usr/bin/env python - """ Test pre-installed hub items. """ @@ -12,7 +10,7 @@ pytestmark = pytest.mark.docker -def test_preinstalled_hub(crowdsec, flavor): +def test_preinstalled_hub(crowdsec, flavor: str) -> None: """Test hub objects installed in the entrypoint""" with crowdsec(flavor=flavor) as cs: cs.wait_for_log("*Starting processing data*") diff --git a/docker/test/tests/test_hub_collections.py b/docker/test/tests/test_hub_collections.py index 71fa698af06..929d77361e6 100644 --- a/docker/test/tests/test_hub_collections.py +++ b/docker/test/tests/test_hub_collections.py @@ -1,5 +1,3 @@ -#!/usr/bin/env python - """ Test collection management """ @@ -12,7 +10,7 @@ pytestmark = pytest.mark.docker -def test_install_two_collections(crowdsec, flavor): +def test_install_two_collections(crowdsec, flavor: str) -> None: """Test installing collections at startup""" it1 = "crowdsecurity/apache2" it2 = "crowdsecurity/asterisk" @@ -33,7 +31,7 @@ def test_install_two_collections(crowdsec, flavor): ) -def test_disable_collection(crowdsec, flavor): +def test_disable_collection(crowdsec, flavor: str) -> None: """Test removing a pre-installed collection at startup""" it = "crowdsecurity/linux" env = {"DISABLE_COLLECTIONS": it} @@ -52,7 +50,7 @@ def test_disable_collection(crowdsec, flavor): ) -def test_install_and_disable_collection(crowdsec, flavor): +def test_install_and_disable_collection(crowdsec, flavor: str) -> None: """Declare a collection to install AND disable: disable wins""" it = "crowdsecurity/apache2" env = { @@ -73,7 +71,7 @@ def test_install_and_disable_collection(crowdsec, flavor): # already done in bats, prividing here as example of a somewhat complex test -def test_taint_bubble_up(crowdsec, tmp_path_factory, flavor): +def test_taint_bubble_up(crowdsec, flavor: str) -> None: coll = "crowdsecurity/nginx" env = {"COLLECTIONS": f"{coll}"} diff --git a/docker/test/tests/test_hub_parsers.py b/docker/test/tests/test_hub_parsers.py index 42794d20b42..33414b54a40 100644 --- a/docker/test/tests/test_hub_parsers.py +++ b/docker/test/tests/test_hub_parsers.py @@ -1,5 +1,3 @@ -#!/usr/bin/env python - """ Test parser management """ @@ -12,7 +10,7 @@ pytestmark = pytest.mark.docker -def test_install_two_parsers(crowdsec, flavor): +def test_install_two_parsers(crowdsec, flavor: str) -> None: """Test installing parsers at startup""" it1 = "crowdsecurity/cpanel-logs" it2 = "crowdsecurity/cowrie-logs" @@ -29,7 +27,7 @@ def test_install_two_parsers(crowdsec, flavor): # XXX check that the parser is preinstalled by default -def test_disable_parser(crowdsec, flavor): +def test_disable_parser(crowdsec, flavor: str) -> None: """Test removing a pre-installed parser at startup""" it = "crowdsecurity/whitelists" env = {"DISABLE_PARSERS": it} @@ -48,7 +46,7 @@ def test_disable_parser(crowdsec, flavor): assert it not in items -def test_install_and_disable_parser(crowdsec, flavor): +def test_install_and_disable_parser(crowdsec, flavor: str) -> None: """Declare a parser to install AND disable: disable wins""" it = "crowdsecurity/cpanel-logs" env = { diff --git a/docker/test/tests/test_hub_postoverflows.py b/docker/test/tests/test_hub_postoverflows.py index 69f383cda24..b082102d5e5 100644 --- a/docker/test/tests/test_hub_postoverflows.py +++ b/docker/test/tests/test_hub_postoverflows.py @@ -1,5 +1,3 @@ -#!/usr/bin/env python - """ Test postoverflow management """ @@ -12,7 +10,7 @@ pytestmark = pytest.mark.docker -def test_install_two_postoverflows(crowdsec, flavor): +def test_install_two_postoverflows(crowdsec, flavor: str) -> None: """Test installing postoverflows at startup""" it1 = "crowdsecurity/cdn-whitelist" it2 = "crowdsecurity/ipv6_to_range" @@ -30,12 +28,12 @@ def test_install_two_postoverflows(crowdsec, flavor): assert items[it2]["status"] == "enabled" -def test_disable_postoverflow(): +def test_disable_postoverflow() -> None: """Test removing a pre-installed postoverflow at startup""" pytest.skip("we don't preinstall postoverflows") -def test_install_and_disable_postoverflow(crowdsec, flavor): +def test_install_and_disable_postoverflow(crowdsec, flavor: str) -> None: """Declare a postoverflow to install AND disable: disable wins""" it = "crowdsecurity/cdn-whitelist" env = { diff --git a/docker/test/tests/test_hub_scenarios.py b/docker/test/tests/test_hub_scenarios.py index 4376a3ce64a..207461b48f8 100644 --- a/docker/test/tests/test_hub_scenarios.py +++ b/docker/test/tests/test_hub_scenarios.py @@ -1,5 +1,3 @@ -#!/usr/bin/env python - """ Test scenario management """ @@ -12,7 +10,7 @@ pytestmark = pytest.mark.docker -def test_install_two_scenarios(crowdsec, flavor): +def test_install_two_scenarios(crowdsec, flavor: str) -> None: """Test installing scenarios at startup""" it1 = "crowdsecurity/cpanel-bf-attempt" it2 = "crowdsecurity/asterisk_bf" @@ -28,7 +26,7 @@ def test_install_two_scenarios(crowdsec, flavor): assert items[it2]["status"] == "enabled" -def test_disable_scenario(crowdsec, flavor): +def test_disable_scenario(crowdsec, flavor: str) -> None: """Test removing a pre-installed scenario at startup""" it = "crowdsecurity/ssh-bf" env = {"DISABLE_SCENARIOS": it} @@ -42,7 +40,7 @@ def test_disable_scenario(crowdsec, flavor): assert it not in items -def test_install_and_disable_scenario(crowdsec, flavor): +def test_install_and_disable_scenario(crowdsec, flavor: str) -> None: """Declare a scenario to install AND disable: disable wins""" it = "crowdsecurity/asterisk_bf" env = { diff --git a/docker/test/tests/test_local_api_url.py b/docker/test/tests/test_local_api_url.py index e38af3fedbe..72cf120e4cd 100644 --- a/docker/test/tests/test_local_api_url.py +++ b/docker/test/tests/test_local_api_url.py @@ -1,5 +1,3 @@ -#!/usr/bin/env python - from http import HTTPStatus import pytest @@ -7,7 +5,7 @@ pytestmark = pytest.mark.docker -def test_local_api_url_default(crowdsec, flavor): +def test_local_api_url_default(crowdsec, flavor: str) -> None: """Test LOCAL_API_URL (default)""" with crowdsec(flavor=flavor) as cs: cs.wait_for_log(["*CrowdSec Local API listening on *:8080*", "*Starting processing data*"]) @@ -19,7 +17,7 @@ def test_local_api_url_default(crowdsec, flavor): assert "You can successfully interact with Local API (LAPI)" in stdout -def test_local_api_url(crowdsec, flavor): +def test_local_api_url(crowdsec, flavor: str) -> None: """Test LOCAL_API_URL (custom)""" env = {"LOCAL_API_URL": "http://127.0.0.1:8080"} with crowdsec(flavor=flavor, environment=env) as cs: @@ -32,7 +30,7 @@ def test_local_api_url(crowdsec, flavor): assert "You can successfully interact with Local API (LAPI)" in stdout -def test_local_api_url_ipv6(crowdsec, flavor): +def test_local_api_url_ipv6(crowdsec, flavor: str) -> None: """Test LOCAL_API_URL (custom with ipv6)""" pytest.skip("ipv6 not supported yet") diff --git a/docker/test/tests/test_local_item.py b/docker/test/tests/test_local_item.py index e4c8e3c165a..4309ad57868 100644 --- a/docker/test/tests/test_local_item.py +++ b/docker/test/tests/test_local_item.py @@ -1,5 +1,3 @@ -#!/usr/bin/env python - """ Test bind-mounting local items """ @@ -12,14 +10,14 @@ pytestmark = pytest.mark.docker -def test_inject_local_item(crowdsec, tmp_path_factory, flavor): +def test_inject_local_item(crowdsec, tmp_path_factory: pytest.TempPathFactory, flavor: str) -> None: """Test mounting a custom whitelist at startup""" localitems = tmp_path_factory.mktemp("localitems") custom_whitelists = localitems / "custom_whitelists.yaml" - with open(custom_whitelists, "w") as f: - f.write('{"whitelist":{"reason":"Good IPs","ip":["1.2.3.4"]}}') + with custom_whitelists.open("w") as f: + _ = f.write('{"whitelist":{"reason":"Good IPs","ip":["1.2.3.4"]}}') volumes = {custom_whitelists: {"bind": "/etc/crowdsec/parsers/s02-enrich/custom_whitelists.yaml"}} diff --git a/docker/test/tests/test_metrics.py b/docker/test/tests/test_metrics.py index bd41bdcea41..34261349cc0 100644 --- a/docker/test/tests/test_metrics.py +++ b/docker/test/tests/test_metrics.py @@ -1,5 +1,3 @@ -#!/usr/bin/env python - from http import HTTPStatus import pytest @@ -7,7 +5,7 @@ pytestmark = pytest.mark.docker -def test_metrics_port_default(crowdsec, flavor): +def test_metrics_port_default(crowdsec, flavor: str) -> None: """Test metrics""" metrics_port = 6060 with crowdsec(flavor=flavor) as cs: @@ -23,7 +21,7 @@ def test_metrics_port_default(crowdsec, flavor): assert "# HELP cs_info Information about Crowdsec." in stdout -def test_metrics_port_default_ipv6(crowdsec, flavor): +def test_metrics_port_default_ipv6(crowdsec, flavor: str) -> None: """Test metrics (ipv6)""" pytest.skip("ipv6 not supported yet") port = 6060 @@ -39,7 +37,7 @@ def test_metrics_port_default_ipv6(crowdsec, flavor): assert "# HELP cs_info Information about Crowdsec." in stdout -def test_metrics_port(crowdsec, flavor): +def test_metrics_port(crowdsec, flavor: str) -> None: """Test metrics (custom METRICS_PORT)""" port = 7070 env = {"METRICS_PORT": port} @@ -55,7 +53,7 @@ def test_metrics_port(crowdsec, flavor): assert "# HELP cs_info Information about Crowdsec." in stdout -def test_metrics_port_ipv6(crowdsec, flavor): +def test_metrics_port_ipv6(crowdsec, flavor: str) -> None: """Test metrics (custom METRICS_PORT, ipv6)""" pytest.skip("ipv6 not supported yet") port = 7070 diff --git a/docker/test/tests/test_nolapi.py b/docker/test/tests/test_nolapi.py index e5dbc3c2624..da8849139d3 100644 --- a/docker/test/tests/test_nolapi.py +++ b/docker/test/tests/test_nolapi.py @@ -1,12 +1,10 @@ -#!/usr/bin/env python - import pytest from pytest_cs import Status pytestmark = pytest.mark.docker -def test_no_agent(crowdsec, flavor): +def test_no_agent(crowdsec, flavor: str) -> None: """Test DISABLE_LOCAL_API=true (failing stand-alone container)""" env = { "DISABLE_LOCAL_API": "true", diff --git a/docker/test/tests/test_simple.py b/docker/test/tests/test_simple.py index b5c8425b371..1ae7e5c7030 100644 --- a/docker/test/tests/test_simple.py +++ b/docker/test/tests/test_simple.py @@ -1,16 +1,14 @@ -#!/usr/bin/env python - import pytest pytestmark = pytest.mark.docker # XXX this is redundant, already tested in pytest_cs -def test_crowdsec(crowdsec, flavor): +def test_crowdsec(crowdsec, flavor: str) -> None: with crowdsec(flavor=flavor) as cs: for waiter in cs.log_waiters(): with waiter as matcher: matcher.fnmatch_lines(["*Starting processing data*"]) res = cs.cont.exec_run('sh -c "echo $CI_TESTING"') assert res.exit_code == 0 - assert "true" == res.output.decode().strip() + assert res.output.decode().strip() == "true" diff --git a/docker/test/tests/test_tls.py b/docker/test/tests/test_tls.py index 220738a9f07..15f5c46634a 100644 --- a/docker/test/tests/test_tls.py +++ b/docker/test/tests/test_tls.py @@ -1,10 +1,10 @@ -#!/usr/bin/env python - """ Test agent-lapi and cscli-lapi communication via TLS, on the same container. """ +import pathlib import uuid +from collections.abc import Callable import pytest from pytest_cs import Status @@ -12,7 +12,7 @@ pytestmark = pytest.mark.docker -def test_missing_key_file(crowdsec, flavor): +def test_missing_key_file(crowdsec, flavor: str) -> None: """Test that cscli and agent can communicate to LAPI with TLS""" env = { @@ -24,7 +24,7 @@ def test_missing_key_file(crowdsec, flavor): cs.wait_for_log("*local API server stopped with error: missing TLS key file*") -def test_missing_cert_file(crowdsec, flavor): +def test_missing_cert_file(crowdsec, flavor: str) -> None: """Test that cscli and agent can communicate to LAPI with TLS""" env = { @@ -36,7 +36,7 @@ def test_missing_cert_file(crowdsec, flavor): cs.wait_for_log("*local API server stopped with error: missing TLS cert file*") -def test_tls_missing_ca(crowdsec, flavor, certs_dir): +def test_tls_missing_ca(crowdsec, flavor: str, certs_dir: Callable[..., pathlib.Path]) -> None: """Missing CA cert, unknown authority""" env = { @@ -54,7 +54,7 @@ def test_tls_missing_ca(crowdsec, flavor, certs_dir): cs.wait_for_log("*certificate signed by unknown authority*") -def test_tls_legacy_var(crowdsec, flavor, certs_dir): +def test_tls_legacy_var(crowdsec, flavor: str, certs_dir: Callable[..., pathlib.Path]) -> None: """Test server-only certificate, legacy variables""" env = { @@ -79,7 +79,7 @@ def test_tls_legacy_var(crowdsec, flavor, certs_dir): assert "You can successfully interact with Local API (LAPI)" in stdout -def test_tls_mutual_monolith(crowdsec, flavor, certs_dir): +def test_tls_mutual_monolith(crowdsec, flavor: str, certs_dir: Callable[..., pathlib.Path]) -> None: """Server and client certificates, on the same container""" env = { @@ -106,7 +106,7 @@ def test_tls_mutual_monolith(crowdsec, flavor, certs_dir): assert "You can successfully interact with Local API (LAPI)" in stdout -def test_tls_lapi_var(crowdsec, flavor, certs_dir): +def test_tls_lapi_var(crowdsec, flavor: str, certs_dir: Callable[..., pathlib.Path]) -> None: """Test server-only certificate, lapi variables""" env = { @@ -136,7 +136,7 @@ def test_tls_lapi_var(crowdsec, flavor, certs_dir): # we must set insecure_skip_verify to true to use it -def test_tls_split_lapi_agent(crowdsec, flavor, certs_dir): +def test_tls_split_lapi_agent(crowdsec, flavor: str, certs_dir: Callable[..., pathlib.Path]) -> None: """Server-only certificate, split containers""" rand = uuid.uuid1() @@ -188,7 +188,7 @@ def test_tls_split_lapi_agent(crowdsec, flavor, certs_dir): assert "You can successfully interact with Local API (LAPI)" in stdout -def test_tls_mutual_split_lapi_agent(crowdsec, flavor, certs_dir): +def test_tls_mutual_split_lapi_agent(crowdsec, flavor: str, certs_dir: Callable[..., pathlib.Path]) -> None: """Server and client certificates, split containers""" rand = uuid.uuid1() @@ -238,7 +238,7 @@ def test_tls_mutual_split_lapi_agent(crowdsec, flavor, certs_dir): assert "You can successfully interact with Local API (LAPI)" in stdout -def test_tls_client_ou(crowdsec, flavor, certs_dir): +def test_tls_client_ou(crowdsec, flavor: str, certs_dir: Callable[..., pathlib.Path]) -> None: """Check behavior of client certificate vs AGENTS_ALLOWED_OU""" rand = uuid.uuid1() diff --git a/docker/test/tests/test_version.py b/docker/test/tests/test_version.py index baac61c36ab..8e01c639f9c 100644 --- a/docker/test/tests/test_version.py +++ b/docker/test/tests/test_version.py @@ -1,11 +1,9 @@ -#!/usr/bin/env python - import pytest pytestmark = pytest.mark.docker -def test_version_docker_platform(crowdsec, flavor): +def test_version_docker_platform(crowdsec, flavor: str) -> None: with crowdsec(flavor=flavor) as cs: for waiter in cs.log_waiters(): with waiter as matcher: diff --git a/docker/test/tests/test_wal.py b/docker/test/tests/test_wal.py index e1fe3d260be..1431866c61d 100644 --- a/docker/test/tests/test_wal.py +++ b/docker/test/tests/test_wal.py @@ -1,5 +1,3 @@ -#!/usr/bin/env python - from http import HTTPStatus import pytest @@ -7,7 +5,7 @@ pytestmark = pytest.mark.docker -def test_use_wal_default(crowdsec, flavor): +def test_use_wal_default(crowdsec, flavor: str) -> None: """Test USE_WAL default""" with crowdsec(flavor=flavor) as cs: cs.wait_for_log("*Starting processing data*") @@ -18,7 +16,7 @@ def test_use_wal_default(crowdsec, flavor): assert "false" in stdout -def test_use_wal_true(crowdsec, flavor): +def test_use_wal_true(crowdsec, flavor: str) -> None: """Test USE_WAL=true""" env = { "USE_WAL": "true", @@ -32,7 +30,7 @@ def test_use_wal_true(crowdsec, flavor): assert "true" in stdout -def test_use_wal_false(crowdsec, flavor): +def test_use_wal_false(crowdsec, flavor: str) -> None: """Test USE_WAL=false""" env = { "USE_WAL": "false", diff --git a/docker/test/uv.lock b/docker/test/uv.lock index d8cc42c89ab..a930db9cd2f 100644 --- a/docker/test/uv.lock +++ b/docker/test/uv.lock @@ -10,13 +10,25 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/25/8a/c46dcc25341b5bce5472c718902eb3d38600a903b14fa6aeecef3f21a46f/asttokens-3.0.0-py3-none-any.whl", hash = "sha256:e3078351a059199dd5138cb1c706e6430c05eff2ff136af5eb4790f9d28932e2", size = 26918 }, ] +[[package]] +name = "basedpyright" +version = "1.26.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "nodejs-wheel-binaries" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/18/c2/5685d040d4f2598788d42bfd2db5f808e9aa2eaee77fcae3c2fbe4ea0e7c/basedpyright-1.26.0.tar.gz", hash = "sha256:5e01f6eb9290a09ef39672106cf1a02924fdc8970e521838bc502ccf0676f32f", size = 24932771 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/8e/72/65308f45bb73efc93075426cac5f37eea937ae364aa675785521cb3512c7/basedpyright-1.26.0-py3-none-any.whl", hash = "sha256:5a6a17f2c389ec313dd2c3644f40e8221bc90252164802e626055341c0a37381", size = 11504579 }, +] + [[package]] name = "certifi" -version = "2024.12.14" +version = "2025.1.31" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/0f/bd/1d41ee578ce09523c81a15426705dd20969f5abf006d1afe8aeff0dd776a/certifi-2024.12.14.tar.gz", hash = "sha256:b650d30f370c2b724812bee08008be0c4163b163ddaec3f2546c1caf65f191db", size = 166010 } +sdist = { url = "https://files.pythonhosted.org/packages/1c/ab/c9f1e32b7b1bf505bf26f0ef697775960db7932abeb7b516de930ba2705f/certifi-2025.1.31.tar.gz", hash = "sha256:3d5da6925056f6f18f119200434a4780a94263f10d1c21d032a6f6b2baa20651", size = 167577 } wheels = [ - { url = "https://files.pythonhosted.org/packages/a5/32/8f6669fc4798494966bf446c8c4a162e0b5d893dff088afddf76414f70e1/certifi-2024.12.14-py3-none-any.whl", hash = "sha256:1275f7a45be9464efc1173084eaa30f866fe2e47d389406136d332ed4967ec56", size = 164927 }, + { url = "https://files.pythonhosted.org/packages/38/fc/bce832fd4fd99766c04d1ee0eead6b0ec6486fb100ae5e74c1d91292b982/certifi-2025.1.31-py3-none-any.whl", hash = "sha256:ca78db4565a652026a4db2bcdf68f2fb589ea80d0be70e03929ed730746b84fe", size = 166393 }, ] [[package]] @@ -109,6 +121,7 @@ dependencies = [ [package.dev-dependencies] dev = [ + { name = "basedpyright" }, { name = "ipdb" }, { name = "ruff" }, ] @@ -123,6 +136,7 @@ requires-dist = [ [package.metadata.requires-dev] dev = [ + { name = "basedpyright", specifier = ">=1.26.0" }, { name = "ipdb", specifier = ">=0.13.13" }, { name = "ruff", specifier = ">=0.9.3" }, ] @@ -232,7 +246,7 @@ wheels = [ [[package]] name = "ipython" -version = "8.31.0" +version = "8.32.0" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "colorama", marker = "sys_platform == 'win32'" }, @@ -245,9 +259,9 @@ dependencies = [ { name = "stack-data" }, { name = "traitlets" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/01/35/6f90fdddff7a08b7b715fccbd2427b5212c9525cd043d26fdc45bee0708d/ipython-8.31.0.tar.gz", hash = "sha256:b6a2274606bec6166405ff05e54932ed6e5cfecaca1fc05f2cacde7bb074d70b", size = 5501011 } +sdist = { url = "https://files.pythonhosted.org/packages/36/80/4d2a072e0db7d250f134bc11676517299264ebe16d62a8619d49a78ced73/ipython-8.32.0.tar.gz", hash = "sha256:be2c91895b0b9ea7ba49d33b23e2040c352b33eb6a519cca7ce6e0c743444251", size = 5507441 } wheels = [ - { url = "https://files.pythonhosted.org/packages/04/60/d0feb6b6d9fe4ab89fe8fe5b47cbf6cd936bfd9f1e7ffa9d0015425aeed6/ipython-8.31.0-py3-none-any.whl", hash = "sha256:46ec58f8d3d076a61d128fe517a51eb730e3aaf0c184ea8c17d16e366660c6a6", size = 821583 }, + { url = "https://files.pythonhosted.org/packages/e7/e1/f4474a7ecdb7745a820f6f6039dc43c66add40f1bcc66485607d93571af6/ipython-8.32.0-py3-none-any.whl", hash = "sha256:cae85b0c61eff1fc48b0a8002de5958b6528fa9c8defb1894da63f42613708aa", size = 825524 }, ] [[package]] @@ -274,6 +288,22 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/8f/8e/9ad090d3553c280a8060fbf6e24dc1c0c29704ee7d1c372f0c174aa59285/matplotlib_inline-0.1.7-py3-none-any.whl", hash = "sha256:df192d39a4ff8f21b1895d72e6a13f5fcc5099f00fa84384e0ea28c2cc0653ca", size = 9899 }, ] +[[package]] +name = "nodejs-wheel-binaries" +version = "22.13.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/5d/c5/1af2fc54fcc18f4a99426b46f18832a04f755ee340019e1be536187c1e1c/nodejs_wheel_binaries-22.13.1.tar.gz", hash = "sha256:a0c15213c9c3383541be4400a30959883868ce5da9cebb3d63ddc7fe61459308", size = 8053 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/7c/e9/b0dd118e0fd4eabe1ec9c3d9a68df4d811282e8837b811d804f23742e117/nodejs_wheel_binaries-22.13.1-py2.py3-none-macosx_11_0_arm64.whl", hash = "sha256:e4f64d0e26600d51cbdd98a6718a19c2d1b8c7538e9e353e95a634a06a8e1a58", size = 51015650 }, + { url = "https://files.pythonhosted.org/packages/cc/a6/9ba835f5d4f3f6b1f01191e7ac0874871f9743de5c42a5a9a54e67c2e2a6/nodejs_wheel_binaries-22.13.1-py2.py3-none-macosx_11_0_x86_64.whl", hash = "sha256:afcb40484bb02f23137f838014724604ae183fd767b30da95b0be1510a40c06d", size = 51814957 }, + { url = "https://files.pythonhosted.org/packages/0d/2e/a430207e5f22bd3dcffb81acbddf57ee4108b9e2b0f99a5578dc2c1ff7fc/nodejs_wheel_binaries-22.13.1-py2.py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4fc88c98eebabfc36b5270a4ab974a2682746931567ca76a5ca49c54482bbb51", size = 57148437 }, + { url = "https://files.pythonhosted.org/packages/97/f4/5731b6f0c8af434619b4f1b8fd895bc33fca60168cd68133e52841872114/nodejs_wheel_binaries-22.13.1-py2.py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8b9f75ea8f5e3e5416256fcb00a98cbe14c8d3b6dcaf17da29c4ade5723026d8", size = 57634451 }, + { url = "https://files.pythonhosted.org/packages/49/28/83166f7e39812e9ef99cfa3e722c54e32dd9de6a1290f3216c2e5d1f4957/nodejs_wheel_binaries-22.13.1-py2.py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:94608702ef6c389d32e89ff3b7a925cb5dedaf55b5d98bd0c4fb3450a8b6d1c1", size = 58794510 }, + { url = "https://files.pythonhosted.org/packages/f7/64/4832ec26d0a7ca7a5574df265d85c6832f9a624024511fc34958227ad740/nodejs_wheel_binaries-22.13.1-py2.py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:53a40d0269689aa2eaf2e261cbe5ec256644bc56aae0201ef344b7d8f40ccc79", size = 59738596 }, + { url = "https://files.pythonhosted.org/packages/18/cd/def29615dac250cda3d141e1c03b7153b9a027360bde0272a6768c5fae33/nodejs_wheel_binaries-22.13.1-py2.py3-none-win_amd64.whl", hash = "sha256:549371a929a29fbce8d0ab8f1b5410549946d4f1b0376a5ce635b45f6d05298f", size = 40455444 }, + { url = "https://files.pythonhosted.org/packages/15/d7/6de2bc615203bf590ca437a5cac145b2f86d994ce329489125a0a90ba715/nodejs_wheel_binaries-22.13.1-py2.py3-none-win_arm64.whl", hash = "sha256:cf72d50d755f4e5c0709b0449de01768d96b3b1ec7aa531561415b88f179ad8b", size = 36200929 }, +] + [[package]] name = "packaging" version = "24.2" @@ -393,8 +423,8 @@ wheels = [ [[package]] name = "pytest-cs" -version = "0.7.20" -source = { git = "https://github.com/crowdsecurity/pytest-cs#73380b837a80337f361414bebbaf4b914713c4ae" } +version = "0.7.21" +source = { git = "https://github.com/crowdsecurity/pytest-cs#1eb949d7befa6fe172bf459616b267d4ffc01179" } dependencies = [ { name = "docker" }, { name = "psutil" }, @@ -509,27 +539,27 @@ wheels = [ [[package]] name = "ruff" -version = "0.9.3" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/1e/7f/60fda2eec81f23f8aa7cbbfdf6ec2ca11eb11c273827933fb2541c2ce9d8/ruff-0.9.3.tar.gz", hash = "sha256:8293f89985a090ebc3ed1064df31f3b4b56320cdfcec8b60d3295bddb955c22a", size = 3586740 } -wheels = [ - { url = "https://files.pythonhosted.org/packages/f9/77/4fb790596d5d52c87fd55b7160c557c400e90f6116a56d82d76e95d9374a/ruff-0.9.3-py3-none-linux_armv6l.whl", hash = "sha256:7f39b879064c7d9670197d91124a75d118d00b0990586549949aae80cdc16624", size = 11656815 }, - { url = "https://files.pythonhosted.org/packages/a2/a8/3338ecb97573eafe74505f28431df3842c1933c5f8eae615427c1de32858/ruff-0.9.3-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:a187171e7c09efa4b4cc30ee5d0d55a8d6c5311b3e1b74ac5cb96cc89bafc43c", size = 11594821 }, - { url = "https://files.pythonhosted.org/packages/8e/89/320223c3421962762531a6b2dd58579b858ca9916fb2674874df5e97d628/ruff-0.9.3-py3-none-macosx_11_0_arm64.whl", hash = "sha256:c59ab92f8e92d6725b7ded9d4a31be3ef42688a115c6d3da9457a5bda140e2b4", size = 11040475 }, - { url = "https://files.pythonhosted.org/packages/b2/bd/1d775eac5e51409535804a3a888a9623e87a8f4b53e2491580858a083692/ruff-0.9.3-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2dc153c25e715be41bb228bc651c1e9b1a88d5c6e5ed0194fa0dfea02b026439", size = 11856207 }, - { url = "https://files.pythonhosted.org/packages/7f/c6/3e14e09be29587393d188454064a4aa85174910d16644051a80444e4fd88/ruff-0.9.3-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:646909a1e25e0dc28fbc529eab8eb7bb583079628e8cbe738192853dbbe43af5", size = 11420460 }, - { url = "https://files.pythonhosted.org/packages/ef/42/b7ca38ffd568ae9b128a2fa76353e9a9a3c80ef19746408d4ce99217ecc1/ruff-0.9.3-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:5a5a46e09355695fbdbb30ed9889d6cf1c61b77b700a9fafc21b41f097bfbba4", size = 12605472 }, - { url = "https://files.pythonhosted.org/packages/a6/a1/3167023f23e3530fde899497ccfe239e4523854cb874458ac082992d206c/ruff-0.9.3-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:c4bb09d2bbb394e3730d0918c00276e79b2de70ec2a5231cd4ebb51a57df9ba1", size = 13243123 }, - { url = "https://files.pythonhosted.org/packages/d0/b4/3c600758e320f5bf7de16858502e849f4216cb0151f819fa0d1154874802/ruff-0.9.3-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:96a87ec31dc1044d8c2da2ebbed1c456d9b561e7d087734336518181b26b3aa5", size = 12744650 }, - { url = "https://files.pythonhosted.org/packages/be/38/266fbcbb3d0088862c9bafa8b1b99486691d2945a90b9a7316336a0d9a1b/ruff-0.9.3-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9bb7554aca6f842645022fe2d301c264e6925baa708b392867b7a62645304df4", size = 14458585 }, - { url = "https://files.pythonhosted.org/packages/63/a6/47fd0e96990ee9b7a4abda62de26d291bd3f7647218d05b7d6d38af47c30/ruff-0.9.3-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cabc332b7075a914ecea912cd1f3d4370489c8018f2c945a30bcc934e3bc06a6", size = 12419624 }, - { url = "https://files.pythonhosted.org/packages/84/5d/de0b7652e09f7dda49e1a3825a164a65f4998175b6486603c7601279baad/ruff-0.9.3-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:33866c3cc2a575cbd546f2cd02bdd466fed65118e4365ee538a3deffd6fcb730", size = 11843238 }, - { url = "https://files.pythonhosted.org/packages/9e/be/3f341ceb1c62b565ec1fb6fd2139cc40b60ae6eff4b6fb8f94b1bb37c7a9/ruff-0.9.3-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:006e5de2621304c8810bcd2ee101587712fa93b4f955ed0985907a36c427e0c2", size = 11484012 }, - { url = "https://files.pythonhosted.org/packages/a3/c8/ff8acbd33addc7e797e702cf00bfde352ab469723720c5607b964491d5cf/ruff-0.9.3-py3-none-musllinux_1_2_i686.whl", hash = "sha256:ba6eea4459dbd6b1be4e6bfc766079fb9b8dd2e5a35aff6baee4d9b1514ea519", size = 12038494 }, - { url = "https://files.pythonhosted.org/packages/73/b1/8d9a2c0efbbabe848b55f877bc10c5001a37ab10aca13c711431673414e5/ruff-0.9.3-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:90230a6b8055ad47d3325e9ee8f8a9ae7e273078a66401ac66df68943ced029b", size = 12473639 }, - { url = "https://files.pythonhosted.org/packages/cb/44/a673647105b1ba6da9824a928634fe23186ab19f9d526d7bdf278cd27bc3/ruff-0.9.3-py3-none-win32.whl", hash = "sha256:eabe5eb2c19a42f4808c03b82bd313fc84d4e395133fb3fc1b1516170a31213c", size = 9834353 }, - { url = "https://files.pythonhosted.org/packages/c3/01/65cadb59bf8d4fbe33d1a750103e6883d9ef302f60c28b73b773092fbde5/ruff-0.9.3-py3-none-win_amd64.whl", hash = "sha256:040ceb7f20791dfa0e78b4230ee9dce23da3b64dd5848e40e3bf3ab76468dcf4", size = 10821444 }, - { url = "https://files.pythonhosted.org/packages/69/cb/b3fe58a136a27d981911cba2f18e4b29f15010623b79f0f2510fd0d31fd3/ruff-0.9.3-py3-none-win_arm64.whl", hash = "sha256:800d773f6d4d33b0a3c60e2c6ae8f4c202ea2de056365acfa519aa48acf28e0b", size = 10038168 }, +version = "0.9.4" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/c0/17/529e78f49fc6f8076f50d985edd9a2cf011d1dbadb1cdeacc1d12afc1d26/ruff-0.9.4.tar.gz", hash = "sha256:6907ee3529244bb0ed066683e075f09285b38dd5b4039370df6ff06041ca19e7", size = 3599458 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/b6/f8/3fafb7804d82e0699a122101b5bee5f0d6e17c3a806dcbc527bb7d3f5b7a/ruff-0.9.4-py3-none-linux_armv6l.whl", hash = "sha256:64e73d25b954f71ff100bb70f39f1ee09e880728efb4250c632ceed4e4cdf706", size = 11668400 }, + { url = "https://files.pythonhosted.org/packages/2e/a6/2efa772d335da48a70ab2c6bb41a096c8517ca43c086ea672d51079e3d1f/ruff-0.9.4-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:6ce6743ed64d9afab4fafeaea70d3631b4d4b28b592db21a5c2d1f0ef52934bf", size = 11628395 }, + { url = "https://files.pythonhosted.org/packages/dc/d7/cd822437561082f1c9d7225cc0d0fbb4bad117ad7ac3c41cd5d7f0fa948c/ruff-0.9.4-py3-none-macosx_11_0_arm64.whl", hash = "sha256:54499fb08408e32b57360f6f9de7157a5fec24ad79cb3f42ef2c3f3f728dfe2b", size = 11090052 }, + { url = "https://files.pythonhosted.org/packages/9e/67/3660d58e893d470abb9a13f679223368ff1684a4ef40f254a0157f51b448/ruff-0.9.4-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:37c892540108314a6f01f105040b5106aeb829fa5fb0561d2dcaf71485021137", size = 11882221 }, + { url = "https://files.pythonhosted.org/packages/79/d1/757559995c8ba5f14dfec4459ef2dd3fcea82ac43bc4e7c7bf47484180c0/ruff-0.9.4-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:de9edf2ce4b9ddf43fd93e20ef635a900e25f622f87ed6e3047a664d0e8f810e", size = 11424862 }, + { url = "https://files.pythonhosted.org/packages/c0/96/7915a7c6877bb734caa6a2af424045baf6419f685632469643dbd8eb2958/ruff-0.9.4-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:87c90c32357c74f11deb7fbb065126d91771b207bf9bfaaee01277ca59b574ec", size = 12626735 }, + { url = "https://files.pythonhosted.org/packages/0e/cc/dadb9b35473d7cb17c7ffe4737b4377aeec519a446ee8514123ff4a26091/ruff-0.9.4-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:56acd6c694da3695a7461cc55775f3a409c3815ac467279dfa126061d84b314b", size = 13255976 }, + { url = "https://files.pythonhosted.org/packages/5f/c3/ad2dd59d3cabbc12df308cced780f9c14367f0321e7800ca0fe52849da4c/ruff-0.9.4-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e0c93e7d47ed951b9394cf352d6695b31498e68fd5782d6cbc282425655f687a", size = 12752262 }, + { url = "https://files.pythonhosted.org/packages/c7/17/5f1971e54bd71604da6788efd84d66d789362b1105e17e5ccc53bba0289b/ruff-0.9.4-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:1d4c8772670aecf037d1bf7a07c39106574d143b26cfe5ed1787d2f31e800214", size = 14401648 }, + { url = "https://files.pythonhosted.org/packages/30/24/6200b13ea611b83260501b6955b764bb320e23b2b75884c60ee7d3f0b68e/ruff-0.9.4-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bfc5f1d7afeda8d5d37660eeca6d389b142d7f2b5a1ab659d9214ebd0e025231", size = 12414702 }, + { url = "https://files.pythonhosted.org/packages/34/cb/f5d50d0c4ecdcc7670e348bd0b11878154bc4617f3fdd1e8ad5297c0d0ba/ruff-0.9.4-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:faa935fc00ae854d8b638c16a5f1ce881bc3f67446957dd6f2af440a5fc8526b", size = 11859608 }, + { url = "https://files.pythonhosted.org/packages/d6/f4/9c8499ae8426da48363bbb78d081b817b0f64a9305f9b7f87eab2a8fb2c1/ruff-0.9.4-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:a6c634fc6f5a0ceae1ab3e13c58183978185d131a29c425e4eaa9f40afe1e6d6", size = 11485702 }, + { url = "https://files.pythonhosted.org/packages/18/59/30490e483e804ccaa8147dd78c52e44ff96e1c30b5a95d69a63163cdb15b/ruff-0.9.4-py3-none-musllinux_1_2_i686.whl", hash = "sha256:433dedf6ddfdec7f1ac7575ec1eb9844fa60c4c8c2f8887a070672b8d353d34c", size = 12067782 }, + { url = "https://files.pythonhosted.org/packages/3d/8c/893fa9551760b2f8eb2a351b603e96f15af167ceaf27e27ad873570bc04c/ruff-0.9.4-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:d612dbd0f3a919a8cc1d12037168bfa536862066808960e0cc901404b77968f0", size = 12483087 }, + { url = "https://files.pythonhosted.org/packages/23/15/f6751c07c21ca10e3f4a51ea495ca975ad936d780c347d9808bcedbd7182/ruff-0.9.4-py3-none-win32.whl", hash = "sha256:db1192ddda2200671f9ef61d9597fcef89d934f5d1705e571a93a67fb13a4402", size = 9852302 }, + { url = "https://files.pythonhosted.org/packages/12/41/2d2d2c6a72e62566f730e49254f602dfed23019c33b5b21ea8f8917315a1/ruff-0.9.4-py3-none-win_amd64.whl", hash = "sha256:05bebf4cdbe3ef75430d26c375773978950bbf4ee3c95ccb5448940dc092408e", size = 10850051 }, + { url = "https://files.pythonhosted.org/packages/c6/e6/3d6ec3bc3d254e7f005c543a661a41c3e788976d0e52a1ada195bd664344/ruff-0.9.4-py3-none-win_arm64.whl", hash = "sha256:585792f1e81509e38ac5123492f8875fbc36f3ede8185af0a26df348e5154f41", size = 10078251 }, ] [[package]] From 7c1d038645d316b095a004c5f353a68cf8cd7dd8 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Tue, 18 Feb 2025 15:52:51 +0100 Subject: [PATCH 433/581] leaky bucket: reduce log verbosity (#3472) --- pkg/leakybucket/manager_load.go | 1 - 1 file changed, 1 deletion(-) diff --git a/pkg/leakybucket/manager_load.go b/pkg/leakybucket/manager_load.go index fbcbafcc285..cdf8f080773 100644 --- a/pkg/leakybucket/manager_load.go +++ b/pkg/leakybucket/manager_load.go @@ -405,7 +405,6 @@ func LoadBucket(bucketFactory *BucketFactory, tomb *tomb.Tomb) error { if bucketFactory.Distinct != "" { bucketFactory.logger.Tracef("Adding a non duplicate filter") bucketFactory.processors = append(bucketFactory.processors, &Uniq{}) - bucketFactory.logger.Infof("Compiling distinct '%s'", bucketFactory.Distinct) // we're compiling and discarding the expression to be able to detect it during loading _, err = expr.Compile(bucketFactory.Distinct, exprhelpers.GetExprOptions(map[string]interface{}{"evt": &types.Event{}})...) if err != nil { From efbb42bf9eb1ceef46275fc827e6b000c5295a1b Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Tue, 18 Feb 2025 17:08:58 +0100 Subject: [PATCH 434/581] deps: use ent 0.14.2 (#3259) --- go.mod | 15 ++++++----- go.sum | 40 ++++++++++++++-------------- pkg/database/ent/alert_query.go | 21 ++++++++------- pkg/database/ent/bouncer_query.go | 21 ++++++++------- pkg/database/ent/configitem_query.go | 21 ++++++++------- pkg/database/ent/decision_query.go | 21 ++++++++------- pkg/database/ent/ent.go | 2 +- pkg/database/ent/event_query.go | 21 ++++++++------- pkg/database/ent/generate.go | 2 +- pkg/database/ent/lock_query.go | 21 ++++++++------- pkg/database/ent/machine_query.go | 21 ++++++++------- pkg/database/ent/meta_query.go | 21 ++++++++------- pkg/database/ent/metric_query.go | 21 ++++++++------- pkg/database/ent/runtime/runtime.go | 4 +-- 14 files changed, 132 insertions(+), 120 deletions(-) diff --git a/go.mod b/go.mod index 25b673d7707..63b0a1893f0 100644 --- a/go.mod +++ b/go.mod @@ -7,7 +7,7 @@ go 1.23.6 // toolchain go1.21.3 require ( - entgo.io/ent v0.13.1 + entgo.io/ent v0.14.2 github.com/AlecAivazis/survey/v2 v2.3.7 github.com/Masterminds/semver/v3 v3.2.1 github.com/Masterminds/sprig/v3 v3.2.3 @@ -104,10 +104,10 @@ require ( go.opentelemetry.io/otel/sdk v1.28.0 // indirect go.opentelemetry.io/otel/trace v1.28.0 // indirect golang.org/x/crypto v0.32.0 - golang.org/x/mod v0.20.0 + golang.org/x/mod v0.23.0 golang.org/x/net v0.34.0 // indirect - golang.org/x/sync v0.10.0 // indirect - golang.org/x/sys v0.29.0 + golang.org/x/sync v0.11.0 // indirect + golang.org/x/sys v0.30.0 golang.org/x/text v0.21.0 golang.org/x/time v0.6.0 // indirect google.golang.org/grpc v1.67.1 @@ -122,12 +122,14 @@ require ( ) require ( - ariga.io/atlas v0.19.1-0.20240203083654-5948b60a8e43 // indirect + ariga.io/atlas v0.31.1-0.20250212144724-069be8033e83 // indirect github.com/Masterminds/goutils v1.1.1 // indirect github.com/ahmetalpbalkan/dlog v0.0.0-20170105205344-4fb5f8204f26 // indirect github.com/apparentlymart/go-textseg/v13 v13.0.0 // indirect + github.com/apparentlymart/go-textseg/v15 v15.0.0 // indirect github.com/asaskevich/govalidator v0.0.0-20200907205600-7a23bdc65eef // indirect github.com/beorn7/perks v1.0.1 // indirect + github.com/bmatcuk/doublestar v1.3.4 // indirect github.com/bytedance/sonic v1.12.6 // indirect github.com/bytedance/sonic/loader v0.2.1 // indirect github.com/cloudwego/base64x v0.1.4 // indirect @@ -215,7 +217,8 @@ require ( github.com/vmihailenco/msgpack v4.0.4+incompatible // indirect github.com/wasilibs/wazero-helpers v0.0.0-20240620070341-3dff1577cd52 // indirect github.com/yusufpapurcu/wmi v1.2.3 // indirect - github.com/zclconf/go-cty v1.8.0 // indirect + github.com/zclconf/go-cty v1.14.4 // indirect + github.com/zclconf/go-cty-yaml v1.1.0 // indirect go.mongodb.org/mongo-driver v1.9.4 // indirect go.opentelemetry.io/otel/metric v1.28.0 // indirect go.uber.org/atomic v1.10.0 // indirect diff --git a/go.sum b/go.sum index 82e1b1acc22..12aa4934b1f 100644 --- a/go.sum +++ b/go.sum @@ -1,9 +1,9 @@ -ariga.io/atlas v0.19.1-0.20240203083654-5948b60a8e43 h1:GwdJbXydHCYPedeeLt4x/lrlIISQ4JTH1mRWuE5ZZ14= -ariga.io/atlas v0.19.1-0.20240203083654-5948b60a8e43/go.mod h1:uj3pm+hUTVN/X5yfdBexHlZv+1Xu5u5ZbZx7+CDavNU= +ariga.io/atlas v0.31.1-0.20250212144724-069be8033e83 h1:nX4HXncwIdvQ8/8sIUIf1nyCkK8qdBaHQ7EtzPpuiGE= +ariga.io/atlas v0.31.1-0.20250212144724-069be8033e83/go.mod h1:Oe1xWPuu5q9LzyrWfbZmEZxFYeu4BHTyzfjeW2aZp/w= bitbucket.org/creachadair/stringset v0.0.9 h1:L4vld9nzPt90UZNrXjNelTshD74ps4P5NGs3Iq6yN3o= bitbucket.org/creachadair/stringset v0.0.9/go.mod h1:t+4WcQ4+PXTa8aQdNKe40ZP6iwesoMFWAxPGd3UGjyY= -entgo.io/ent v0.13.1 h1:uD8QwN1h6SNphdCCzmkMN3feSUzNnVvV/WIkHKMbzOE= -entgo.io/ent v0.13.1/go.mod h1:qCEmo+biw3ccBn9OyL4ZK5dfpwg++l1Gxwac5B1206A= +entgo.io/ent v0.14.2 h1:ywld/j2Rx4EmnIKs8eZ29cbFA1zpB+DA9TLL5l3rlq0= +entgo.io/ent v0.14.2/go.mod h1:aDPE/OziPEu8+OWbzy4UlvWmD2/kbRuWfK2A40hcxJM= github.com/AlecAivazis/survey/v2 v2.3.7 h1:6I/u8FvytdGsgonrYsVn2t8t4QiRnh6QSTqkkhIiSjQ= github.com/AlecAivazis/survey/v2 v2.3.7/go.mod h1:xUTIdE4KCOIjsBAE1JYsUPoCqYdZ1reCfTwbto0Fduo= github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 h1:UQHMgLO+TxOElx5B5HZ4hJQsoJ/PvUvKRhJHDQXO8P8= @@ -41,6 +41,8 @@ github.com/alexliesenfeld/health v0.8.0/go.mod h1:TfNP0f+9WQVWMQRzvMUjlws4ceXKEL github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883/go.mod h1:rCTlJbsFo29Kk6CurOXKm700vrz8f0KW0JNfpkRJY/8= github.com/apparentlymart/go-textseg/v13 v13.0.0 h1:Y+KvPE1NYz0xl601PVImeQfFyEy6iT90AvPUL1NNfNw= github.com/apparentlymart/go-textseg/v13 v13.0.0/go.mod h1:ZK2fH7c4NqDTLtiYLvIkEghdlcqw7yxLeM89kiTRPUo= +github.com/apparentlymart/go-textseg/v15 v15.0.0 h1:uYvfpb3DyLSCGWnctWKGj857c6ew1u1fNQOlOtuGxQY= +github.com/apparentlymart/go-textseg/v15 v15.0.0/go.mod h1:K8XmNZdhEBkdlyDdvbmmsvpAG721bKi0joRfFdHIWJ4= github.com/appleboy/gin-jwt/v2 v2.10.1 h1:I68+9qGsgHDx8omd65MKhYXF7Qz5LtdFFTsB/kSU4z0= github.com/appleboy/gin-jwt/v2 v2.10.1/go.mod h1:xuzn4aNUwqwR3+j+jbL6MhryiRKinUL1SJ7WUfB33vU= github.com/appleboy/gofight/v2 v2.1.2 h1:VOy3jow4vIK8BRQJoC/I9muxyYlJ2yb9ht2hZoS3rf4= @@ -66,6 +68,8 @@ github.com/blackfireio/osinfo v1.0.5 h1:6hlaWzfcpb87gRmznVf7wSdhysGqLRz9V/xuSdCE github.com/blackfireio/osinfo v1.0.5/go.mod h1:Pd987poVNmd5Wsx6PRPw4+w7kLlf9iJxoRKPtPAjOrA= github.com/bluele/gcache v0.0.2 h1:WcbfdXICg7G/DGBh1PFfcirkWOQV+v077yF1pSy3DGw= github.com/bluele/gcache v0.0.2/go.mod h1:m15KV+ECjptwSPxKhOhQoAFQVtUFjTVkc3H8o0t/fp0= +github.com/bmatcuk/doublestar v1.3.4 h1:gPypJ5xD31uhX6Tf54sDPUOBXTqKH4c9aPY66CyQrS0= +github.com/bmatcuk/doublestar v1.3.4/go.mod h1:wiQtGV+rzVYxB7WIlirSN++5HPtPlXEo9MEoZQC/PmE= github.com/buger/jsonparser v1.1.1 h1:2PnMjfWD7wBILjqQbt530v576A/cAbQvEW9gGIpYMUs= github.com/buger/jsonparser v1.1.1/go.mod h1:6RYKKt7H4d4+iWqouImQ9R2FZql3VbhNgx27UK13J/0= github.com/bytedance/sonic v1.12.6 h1:/isNmCUF2x3Sh8RAp/4mh4ZGkcFAX/hLrzrK3AvpRzk= @@ -310,7 +314,6 @@ github.com/golang/glog v1.2.4/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwm github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= @@ -318,7 +321,6 @@ github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6 github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= @@ -710,8 +712,6 @@ github.com/vjeantet/grok v1.0.1 h1:2rhIR7J4gThTgcZ1m2JY4TrJZNgjn985U28kT2wQrJ4= github.com/vjeantet/grok v1.0.1/go.mod h1:ax1aAchzC6/QMXMcyzHQGZWaW1l195+uMYIkCWPCNIo= github.com/vmihailenco/msgpack v4.0.4+incompatible h1:dSLoQfGFAo3F6OoNhwUmLwVgaUXK79GlxNBwueZn0xI= github.com/vmihailenco/msgpack v4.0.4+incompatible/go.mod h1:fy3FlTQTDXWkZ7Bh6AcGMlsjHatGryHQYUTf1ShIgkk= -github.com/vmihailenco/msgpack/v4 v4.3.12/go.mod h1:gborTTJjAo/GWTqqRjrLCn9pgNN+NXzzngzBKDPIqw4= -github.com/vmihailenco/tagparser v0.1.1/go.mod h1:OeAg3pn3UbLjkWt+rN9oFYB6u/cQgqMEUPoW2WPyhdI= github.com/wasilibs/go-re2 v1.7.0 h1:bYhl8gn+a9h01dxwotNycxkiFPTiSgwUrIz8KZJ90Lc= github.com/wasilibs/go-re2 v1.7.0/go.mod h1:sUsZMLflgl+LNivDE229omtmvjICmOseT9xOy199VDU= github.com/wasilibs/nottinygc v0.4.0 h1:h1TJMihMC4neN6Zq+WKpLxgd9xCFMw7O9ETLwY2exJQ= @@ -736,8 +736,10 @@ github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9dec github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= github.com/yusufpapurcu/wmi v1.2.3 h1:E1ctvB7uKFMOJw3fdOW32DwGE9I7t++CRUEMKvFoFiw= github.com/yusufpapurcu/wmi v1.2.3/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0= -github.com/zclconf/go-cty v1.8.0 h1:s4AvqaeQzJIu3ndv4gVIhplVD0krU+bgrcLSVUnaWuA= -github.com/zclconf/go-cty v1.8.0/go.mod h1:vVKLxnk3puL4qRAv72AO+W99LUD4da90g3uUAzyuvAk= +github.com/zclconf/go-cty v1.14.4 h1:uXXczd9QDGsgu0i/QFR/hzI5NYCHLf6NQw/atrbnhq8= +github.com/zclconf/go-cty v1.14.4/go.mod h1:VvMs5i0vgZdhYawQNq5kePSpLAoz8u1xvZgrPIxfnZE= +github.com/zclconf/go-cty-yaml v1.1.0 h1:nP+jp0qPHv2IhUVqmQSzjvqAWcObN0KBkUl2rWBdig0= +github.com/zclconf/go-cty-yaml v1.1.0/go.mod h1:9YLUH4g7lOhVWqUbctnVlZ5KLpg7JAprQNgxSZ1Gyxs= github.com/zenazn/goji v0.9.0/go.mod h1:7S9M489iMyHBNxwZnk9/EHS098H4/F6TATF2mIxtB1Q= go.mongodb.org/mongo-driver v1.0.3/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= go.mongodb.org/mongo-driver v1.1.1/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= @@ -807,8 +809,8 @@ golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= -golang.org/x/mod v0.20.0 h1:utOm6MM3R3dnawAiJgn0y+xvuYRsm1RKM/4giyfDgV0= -golang.org/x/mod v0.20.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/mod v0.23.0 h1:Zb7khfcRGKk+kqfxFaP5tZqCnDZMjC5VtUBs87Hr6QM= +golang.org/x/mod v0.23.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY= golang.org/x/net v0.0.0-20181005035420-146acd28ed58/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= @@ -821,7 +823,6 @@ golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200602114024-627f9648deb9/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= @@ -843,8 +844,8 @@ golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.10.0 h1:3NQrjDixjgGwUOCaF8w2+VYHv0Ve/vGYSbdkTa98gmQ= -golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.11.0 h1:GGz8+XQP4FvTTrjZPzNKTMFtSXH80RAzG+5ghFPgK9w= +golang.org/x/sync v0.11.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -880,8 +881,8 @@ golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.29.0 h1:TPYlXGxvx1MGTn2GiZDhnjPA9wZzZeGKHHmKhHYvgaU= -golang.org/x/sys v0.29.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.30.0 h1:QjkSwP/36a20jFYWkSue1YwXzLmsV5Gfq7Eiy72C1uc= +golang.org/x/sys v0.30.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= @@ -928,8 +929,8 @@ golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roY golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= -golang.org/x/tools v0.22.0 h1:gqSGLZqv+AI9lIQzniJ0nZDRG5GBPsSi+DRNHWNz6yA= -golang.org/x/tools v0.22.0/go.mod h1:aCwcsjqvq7Yqt6TNyX7QMU2enbQ/Gt0bo6krSeEri+c= +golang.org/x/tools v0.30.0 h1:BgcpHewrV5AUp2G9MebG4XPFI1E2W41zU1SaqVA9vJY= +golang.org/x/tools v0.30.0/go.mod h1:c347cR/OJfw5TI+GfX7RUPNMdDRRbjvYTS0jPyvsVtY= golang.org/x/xerrors v0.0.0-20190410155217-1f06c39b4373/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20190513163551-3ee3066db522/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -938,7 +939,6 @@ golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8T golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 h1:H2TDz8ibqkAF6YGhCdN3jS9O0/s90v0rJh3X/OLHEUk= golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= -google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/appengine v1.6.8 h1:IhEN5q69dyKagZPYMSdIjS2HqprW324FRQZJcGqPAsM= google.golang.org/appengine v1.6.8/go.mod h1:1jJ3jBArFh5pcgW8gCtRJnepW8FzD1V44FJffLiz/Ds= diff --git a/pkg/database/ent/alert_query.go b/pkg/database/ent/alert_query.go index 7eddb6ce024..ae0fbc96531 100644 --- a/pkg/database/ent/alert_query.go +++ b/pkg/database/ent/alert_query.go @@ -8,6 +8,7 @@ import ( "fmt" "math" + "entgo.io/ent" "entgo.io/ent/dialect/sql" "entgo.io/ent/dialect/sql/sqlgraph" "entgo.io/ent/schema/field" @@ -158,7 +159,7 @@ func (aq *AlertQuery) QueryMetas() *MetaQuery { // First returns the first Alert entity from the query. // Returns a *NotFoundError when no Alert was found. func (aq *AlertQuery) First(ctx context.Context) (*Alert, error) { - nodes, err := aq.Limit(1).All(setContextOp(ctx, aq.ctx, "First")) + nodes, err := aq.Limit(1).All(setContextOp(ctx, aq.ctx, ent.OpQueryFirst)) if err != nil { return nil, err } @@ -181,7 +182,7 @@ func (aq *AlertQuery) FirstX(ctx context.Context) *Alert { // Returns a *NotFoundError when no Alert ID was found. func (aq *AlertQuery) FirstID(ctx context.Context) (id int, err error) { var ids []int - if ids, err = aq.Limit(1).IDs(setContextOp(ctx, aq.ctx, "FirstID")); err != nil { + if ids, err = aq.Limit(1).IDs(setContextOp(ctx, aq.ctx, ent.OpQueryFirstID)); err != nil { return } if len(ids) == 0 { @@ -204,7 +205,7 @@ func (aq *AlertQuery) FirstIDX(ctx context.Context) int { // Returns a *NotSingularError when more than one Alert entity is found. // Returns a *NotFoundError when no Alert entities are found. func (aq *AlertQuery) Only(ctx context.Context) (*Alert, error) { - nodes, err := aq.Limit(2).All(setContextOp(ctx, aq.ctx, "Only")) + nodes, err := aq.Limit(2).All(setContextOp(ctx, aq.ctx, ent.OpQueryOnly)) if err != nil { return nil, err } @@ -232,7 +233,7 @@ func (aq *AlertQuery) OnlyX(ctx context.Context) *Alert { // Returns a *NotFoundError when no entities are found. func (aq *AlertQuery) OnlyID(ctx context.Context) (id int, err error) { var ids []int - if ids, err = aq.Limit(2).IDs(setContextOp(ctx, aq.ctx, "OnlyID")); err != nil { + if ids, err = aq.Limit(2).IDs(setContextOp(ctx, aq.ctx, ent.OpQueryOnlyID)); err != nil { return } switch len(ids) { @@ -257,7 +258,7 @@ func (aq *AlertQuery) OnlyIDX(ctx context.Context) int { // All executes the query and returns a list of Alerts. func (aq *AlertQuery) All(ctx context.Context) ([]*Alert, error) { - ctx = setContextOp(ctx, aq.ctx, "All") + ctx = setContextOp(ctx, aq.ctx, ent.OpQueryAll) if err := aq.prepareQuery(ctx); err != nil { return nil, err } @@ -279,7 +280,7 @@ func (aq *AlertQuery) IDs(ctx context.Context) (ids []int, err error) { if aq.ctx.Unique == nil && aq.path != nil { aq.Unique(true) } - ctx = setContextOp(ctx, aq.ctx, "IDs") + ctx = setContextOp(ctx, aq.ctx, ent.OpQueryIDs) if err = aq.Select(alert.FieldID).Scan(ctx, &ids); err != nil { return nil, err } @@ -297,7 +298,7 @@ func (aq *AlertQuery) IDsX(ctx context.Context) []int { // Count returns the count of the given query. func (aq *AlertQuery) Count(ctx context.Context) (int, error) { - ctx = setContextOp(ctx, aq.ctx, "Count") + ctx = setContextOp(ctx, aq.ctx, ent.OpQueryCount) if err := aq.prepareQuery(ctx); err != nil { return 0, err } @@ -315,7 +316,7 @@ func (aq *AlertQuery) CountX(ctx context.Context) int { // Exist returns true if the query has elements in the graph. func (aq *AlertQuery) Exist(ctx context.Context) (bool, error) { - ctx = setContextOp(ctx, aq.ctx, "Exist") + ctx = setContextOp(ctx, aq.ctx, ent.OpQueryExist) switch _, err := aq.FirstID(ctx); { case IsNotFound(err): return false, nil @@ -759,7 +760,7 @@ func (agb *AlertGroupBy) Aggregate(fns ...AggregateFunc) *AlertGroupBy { // Scan applies the selector query and scans the result into the given value. func (agb *AlertGroupBy) Scan(ctx context.Context, v any) error { - ctx = setContextOp(ctx, agb.build.ctx, "GroupBy") + ctx = setContextOp(ctx, agb.build.ctx, ent.OpQueryGroupBy) if err := agb.build.prepareQuery(ctx); err != nil { return err } @@ -807,7 +808,7 @@ func (as *AlertSelect) Aggregate(fns ...AggregateFunc) *AlertSelect { // Scan applies the selector query and scans the result into the given value. func (as *AlertSelect) Scan(ctx context.Context, v any) error { - ctx = setContextOp(ctx, as.ctx, "Select") + ctx = setContextOp(ctx, as.ctx, ent.OpQuerySelect) if err := as.prepareQuery(ctx); err != nil { return err } diff --git a/pkg/database/ent/bouncer_query.go b/pkg/database/ent/bouncer_query.go index ea2b7495733..4277dbc0fc1 100644 --- a/pkg/database/ent/bouncer_query.go +++ b/pkg/database/ent/bouncer_query.go @@ -7,6 +7,7 @@ import ( "fmt" "math" + "entgo.io/ent" "entgo.io/ent/dialect/sql" "entgo.io/ent/dialect/sql/sqlgraph" "entgo.io/ent/schema/field" @@ -60,7 +61,7 @@ func (bq *BouncerQuery) Order(o ...bouncer.OrderOption) *BouncerQuery { // First returns the first Bouncer entity from the query. // Returns a *NotFoundError when no Bouncer was found. func (bq *BouncerQuery) First(ctx context.Context) (*Bouncer, error) { - nodes, err := bq.Limit(1).All(setContextOp(ctx, bq.ctx, "First")) + nodes, err := bq.Limit(1).All(setContextOp(ctx, bq.ctx, ent.OpQueryFirst)) if err != nil { return nil, err } @@ -83,7 +84,7 @@ func (bq *BouncerQuery) FirstX(ctx context.Context) *Bouncer { // Returns a *NotFoundError when no Bouncer ID was found. func (bq *BouncerQuery) FirstID(ctx context.Context) (id int, err error) { var ids []int - if ids, err = bq.Limit(1).IDs(setContextOp(ctx, bq.ctx, "FirstID")); err != nil { + if ids, err = bq.Limit(1).IDs(setContextOp(ctx, bq.ctx, ent.OpQueryFirstID)); err != nil { return } if len(ids) == 0 { @@ -106,7 +107,7 @@ func (bq *BouncerQuery) FirstIDX(ctx context.Context) int { // Returns a *NotSingularError when more than one Bouncer entity is found. // Returns a *NotFoundError when no Bouncer entities are found. func (bq *BouncerQuery) Only(ctx context.Context) (*Bouncer, error) { - nodes, err := bq.Limit(2).All(setContextOp(ctx, bq.ctx, "Only")) + nodes, err := bq.Limit(2).All(setContextOp(ctx, bq.ctx, ent.OpQueryOnly)) if err != nil { return nil, err } @@ -134,7 +135,7 @@ func (bq *BouncerQuery) OnlyX(ctx context.Context) *Bouncer { // Returns a *NotFoundError when no entities are found. func (bq *BouncerQuery) OnlyID(ctx context.Context) (id int, err error) { var ids []int - if ids, err = bq.Limit(2).IDs(setContextOp(ctx, bq.ctx, "OnlyID")); err != nil { + if ids, err = bq.Limit(2).IDs(setContextOp(ctx, bq.ctx, ent.OpQueryOnlyID)); err != nil { return } switch len(ids) { @@ -159,7 +160,7 @@ func (bq *BouncerQuery) OnlyIDX(ctx context.Context) int { // All executes the query and returns a list of Bouncers. func (bq *BouncerQuery) All(ctx context.Context) ([]*Bouncer, error) { - ctx = setContextOp(ctx, bq.ctx, "All") + ctx = setContextOp(ctx, bq.ctx, ent.OpQueryAll) if err := bq.prepareQuery(ctx); err != nil { return nil, err } @@ -181,7 +182,7 @@ func (bq *BouncerQuery) IDs(ctx context.Context) (ids []int, err error) { if bq.ctx.Unique == nil && bq.path != nil { bq.Unique(true) } - ctx = setContextOp(ctx, bq.ctx, "IDs") + ctx = setContextOp(ctx, bq.ctx, ent.OpQueryIDs) if err = bq.Select(bouncer.FieldID).Scan(ctx, &ids); err != nil { return nil, err } @@ -199,7 +200,7 @@ func (bq *BouncerQuery) IDsX(ctx context.Context) []int { // Count returns the count of the given query. func (bq *BouncerQuery) Count(ctx context.Context) (int, error) { - ctx = setContextOp(ctx, bq.ctx, "Count") + ctx = setContextOp(ctx, bq.ctx, ent.OpQueryCount) if err := bq.prepareQuery(ctx); err != nil { return 0, err } @@ -217,7 +218,7 @@ func (bq *BouncerQuery) CountX(ctx context.Context) int { // Exist returns true if the query has elements in the graph. func (bq *BouncerQuery) Exist(ctx context.Context) (bool, error) { - ctx = setContextOp(ctx, bq.ctx, "Exist") + ctx = setContextOp(ctx, bq.ctx, ent.OpQueryExist) switch _, err := bq.FirstID(ctx); { case IsNotFound(err): return false, nil @@ -449,7 +450,7 @@ func (bgb *BouncerGroupBy) Aggregate(fns ...AggregateFunc) *BouncerGroupBy { // Scan applies the selector query and scans the result into the given value. func (bgb *BouncerGroupBy) Scan(ctx context.Context, v any) error { - ctx = setContextOp(ctx, bgb.build.ctx, "GroupBy") + ctx = setContextOp(ctx, bgb.build.ctx, ent.OpQueryGroupBy) if err := bgb.build.prepareQuery(ctx); err != nil { return err } @@ -497,7 +498,7 @@ func (bs *BouncerSelect) Aggregate(fns ...AggregateFunc) *BouncerSelect { // Scan applies the selector query and scans the result into the given value. func (bs *BouncerSelect) Scan(ctx context.Context, v any) error { - ctx = setContextOp(ctx, bs.ctx, "Select") + ctx = setContextOp(ctx, bs.ctx, ent.OpQuerySelect) if err := bs.prepareQuery(ctx); err != nil { return err } diff --git a/pkg/database/ent/configitem_query.go b/pkg/database/ent/configitem_query.go index f68b8953ddb..b1d6403de1f 100644 --- a/pkg/database/ent/configitem_query.go +++ b/pkg/database/ent/configitem_query.go @@ -7,6 +7,7 @@ import ( "fmt" "math" + "entgo.io/ent" "entgo.io/ent/dialect/sql" "entgo.io/ent/dialect/sql/sqlgraph" "entgo.io/ent/schema/field" @@ -60,7 +61,7 @@ func (ciq *ConfigItemQuery) Order(o ...configitem.OrderOption) *ConfigItemQuery // First returns the first ConfigItem entity from the query. // Returns a *NotFoundError when no ConfigItem was found. func (ciq *ConfigItemQuery) First(ctx context.Context) (*ConfigItem, error) { - nodes, err := ciq.Limit(1).All(setContextOp(ctx, ciq.ctx, "First")) + nodes, err := ciq.Limit(1).All(setContextOp(ctx, ciq.ctx, ent.OpQueryFirst)) if err != nil { return nil, err } @@ -83,7 +84,7 @@ func (ciq *ConfigItemQuery) FirstX(ctx context.Context) *ConfigItem { // Returns a *NotFoundError when no ConfigItem ID was found. func (ciq *ConfigItemQuery) FirstID(ctx context.Context) (id int, err error) { var ids []int - if ids, err = ciq.Limit(1).IDs(setContextOp(ctx, ciq.ctx, "FirstID")); err != nil { + if ids, err = ciq.Limit(1).IDs(setContextOp(ctx, ciq.ctx, ent.OpQueryFirstID)); err != nil { return } if len(ids) == 0 { @@ -106,7 +107,7 @@ func (ciq *ConfigItemQuery) FirstIDX(ctx context.Context) int { // Returns a *NotSingularError when more than one ConfigItem entity is found. // Returns a *NotFoundError when no ConfigItem entities are found. func (ciq *ConfigItemQuery) Only(ctx context.Context) (*ConfigItem, error) { - nodes, err := ciq.Limit(2).All(setContextOp(ctx, ciq.ctx, "Only")) + nodes, err := ciq.Limit(2).All(setContextOp(ctx, ciq.ctx, ent.OpQueryOnly)) if err != nil { return nil, err } @@ -134,7 +135,7 @@ func (ciq *ConfigItemQuery) OnlyX(ctx context.Context) *ConfigItem { // Returns a *NotFoundError when no entities are found. func (ciq *ConfigItemQuery) OnlyID(ctx context.Context) (id int, err error) { var ids []int - if ids, err = ciq.Limit(2).IDs(setContextOp(ctx, ciq.ctx, "OnlyID")); err != nil { + if ids, err = ciq.Limit(2).IDs(setContextOp(ctx, ciq.ctx, ent.OpQueryOnlyID)); err != nil { return } switch len(ids) { @@ -159,7 +160,7 @@ func (ciq *ConfigItemQuery) OnlyIDX(ctx context.Context) int { // All executes the query and returns a list of ConfigItems. func (ciq *ConfigItemQuery) All(ctx context.Context) ([]*ConfigItem, error) { - ctx = setContextOp(ctx, ciq.ctx, "All") + ctx = setContextOp(ctx, ciq.ctx, ent.OpQueryAll) if err := ciq.prepareQuery(ctx); err != nil { return nil, err } @@ -181,7 +182,7 @@ func (ciq *ConfigItemQuery) IDs(ctx context.Context) (ids []int, err error) { if ciq.ctx.Unique == nil && ciq.path != nil { ciq.Unique(true) } - ctx = setContextOp(ctx, ciq.ctx, "IDs") + ctx = setContextOp(ctx, ciq.ctx, ent.OpQueryIDs) if err = ciq.Select(configitem.FieldID).Scan(ctx, &ids); err != nil { return nil, err } @@ -199,7 +200,7 @@ func (ciq *ConfigItemQuery) IDsX(ctx context.Context) []int { // Count returns the count of the given query. func (ciq *ConfigItemQuery) Count(ctx context.Context) (int, error) { - ctx = setContextOp(ctx, ciq.ctx, "Count") + ctx = setContextOp(ctx, ciq.ctx, ent.OpQueryCount) if err := ciq.prepareQuery(ctx); err != nil { return 0, err } @@ -217,7 +218,7 @@ func (ciq *ConfigItemQuery) CountX(ctx context.Context) int { // Exist returns true if the query has elements in the graph. func (ciq *ConfigItemQuery) Exist(ctx context.Context) (bool, error) { - ctx = setContextOp(ctx, ciq.ctx, "Exist") + ctx = setContextOp(ctx, ciq.ctx, ent.OpQueryExist) switch _, err := ciq.FirstID(ctx); { case IsNotFound(err): return false, nil @@ -449,7 +450,7 @@ func (cigb *ConfigItemGroupBy) Aggregate(fns ...AggregateFunc) *ConfigItemGroupB // Scan applies the selector query and scans the result into the given value. func (cigb *ConfigItemGroupBy) Scan(ctx context.Context, v any) error { - ctx = setContextOp(ctx, cigb.build.ctx, "GroupBy") + ctx = setContextOp(ctx, cigb.build.ctx, ent.OpQueryGroupBy) if err := cigb.build.prepareQuery(ctx); err != nil { return err } @@ -497,7 +498,7 @@ func (cis *ConfigItemSelect) Aggregate(fns ...AggregateFunc) *ConfigItemSelect { // Scan applies the selector query and scans the result into the given value. func (cis *ConfigItemSelect) Scan(ctx context.Context, v any) error { - ctx = setContextOp(ctx, cis.ctx, "Select") + ctx = setContextOp(ctx, cis.ctx, ent.OpQuerySelect) if err := cis.prepareQuery(ctx); err != nil { return err } diff --git a/pkg/database/ent/decision_query.go b/pkg/database/ent/decision_query.go index b050a4d9649..e5f0b16e8ad 100644 --- a/pkg/database/ent/decision_query.go +++ b/pkg/database/ent/decision_query.go @@ -7,6 +7,7 @@ import ( "fmt" "math" + "entgo.io/ent" "entgo.io/ent/dialect/sql" "entgo.io/ent/dialect/sql/sqlgraph" "entgo.io/ent/schema/field" @@ -84,7 +85,7 @@ func (dq *DecisionQuery) QueryOwner() *AlertQuery { // First returns the first Decision entity from the query. // Returns a *NotFoundError when no Decision was found. func (dq *DecisionQuery) First(ctx context.Context) (*Decision, error) { - nodes, err := dq.Limit(1).All(setContextOp(ctx, dq.ctx, "First")) + nodes, err := dq.Limit(1).All(setContextOp(ctx, dq.ctx, ent.OpQueryFirst)) if err != nil { return nil, err } @@ -107,7 +108,7 @@ func (dq *DecisionQuery) FirstX(ctx context.Context) *Decision { // Returns a *NotFoundError when no Decision ID was found. func (dq *DecisionQuery) FirstID(ctx context.Context) (id int, err error) { var ids []int - if ids, err = dq.Limit(1).IDs(setContextOp(ctx, dq.ctx, "FirstID")); err != nil { + if ids, err = dq.Limit(1).IDs(setContextOp(ctx, dq.ctx, ent.OpQueryFirstID)); err != nil { return } if len(ids) == 0 { @@ -130,7 +131,7 @@ func (dq *DecisionQuery) FirstIDX(ctx context.Context) int { // Returns a *NotSingularError when more than one Decision entity is found. // Returns a *NotFoundError when no Decision entities are found. func (dq *DecisionQuery) Only(ctx context.Context) (*Decision, error) { - nodes, err := dq.Limit(2).All(setContextOp(ctx, dq.ctx, "Only")) + nodes, err := dq.Limit(2).All(setContextOp(ctx, dq.ctx, ent.OpQueryOnly)) if err != nil { return nil, err } @@ -158,7 +159,7 @@ func (dq *DecisionQuery) OnlyX(ctx context.Context) *Decision { // Returns a *NotFoundError when no entities are found. func (dq *DecisionQuery) OnlyID(ctx context.Context) (id int, err error) { var ids []int - if ids, err = dq.Limit(2).IDs(setContextOp(ctx, dq.ctx, "OnlyID")); err != nil { + if ids, err = dq.Limit(2).IDs(setContextOp(ctx, dq.ctx, ent.OpQueryOnlyID)); err != nil { return } switch len(ids) { @@ -183,7 +184,7 @@ func (dq *DecisionQuery) OnlyIDX(ctx context.Context) int { // All executes the query and returns a list of Decisions. func (dq *DecisionQuery) All(ctx context.Context) ([]*Decision, error) { - ctx = setContextOp(ctx, dq.ctx, "All") + ctx = setContextOp(ctx, dq.ctx, ent.OpQueryAll) if err := dq.prepareQuery(ctx); err != nil { return nil, err } @@ -205,7 +206,7 @@ func (dq *DecisionQuery) IDs(ctx context.Context) (ids []int, err error) { if dq.ctx.Unique == nil && dq.path != nil { dq.Unique(true) } - ctx = setContextOp(ctx, dq.ctx, "IDs") + ctx = setContextOp(ctx, dq.ctx, ent.OpQueryIDs) if err = dq.Select(decision.FieldID).Scan(ctx, &ids); err != nil { return nil, err } @@ -223,7 +224,7 @@ func (dq *DecisionQuery) IDsX(ctx context.Context) []int { // Count returns the count of the given query. func (dq *DecisionQuery) Count(ctx context.Context) (int, error) { - ctx = setContextOp(ctx, dq.ctx, "Count") + ctx = setContextOp(ctx, dq.ctx, ent.OpQueryCount) if err := dq.prepareQuery(ctx); err != nil { return 0, err } @@ -241,7 +242,7 @@ func (dq *DecisionQuery) CountX(ctx context.Context) int { // Exist returns true if the query has elements in the graph. func (dq *DecisionQuery) Exist(ctx context.Context) (bool, error) { - ctx = setContextOp(ctx, dq.ctx, "Exist") + ctx = setContextOp(ctx, dq.ctx, ent.OpQueryExist) switch _, err := dq.FirstID(ctx); { case IsNotFound(err): return false, nil @@ -528,7 +529,7 @@ func (dgb *DecisionGroupBy) Aggregate(fns ...AggregateFunc) *DecisionGroupBy { // Scan applies the selector query and scans the result into the given value. func (dgb *DecisionGroupBy) Scan(ctx context.Context, v any) error { - ctx = setContextOp(ctx, dgb.build.ctx, "GroupBy") + ctx = setContextOp(ctx, dgb.build.ctx, ent.OpQueryGroupBy) if err := dgb.build.prepareQuery(ctx); err != nil { return err } @@ -576,7 +577,7 @@ func (ds *DecisionSelect) Aggregate(fns ...AggregateFunc) *DecisionSelect { // Scan applies the selector query and scans the result into the given value. func (ds *DecisionSelect) Scan(ctx context.Context, v any) error { - ctx = setContextOp(ctx, ds.ctx, "Select") + ctx = setContextOp(ctx, ds.ctx, ent.OpQuerySelect) if err := ds.prepareQuery(ctx); err != nil { return err } diff --git a/pkg/database/ent/ent.go b/pkg/database/ent/ent.go index 2a5ad188197..612b231518d 100644 --- a/pkg/database/ent/ent.go +++ b/pkg/database/ent/ent.go @@ -77,7 +77,7 @@ var ( columnCheck sql.ColumnCheck ) -// columnChecker checks if the column exists in the given table. +// checkColumn checks if the column exists in the given table. func checkColumn(table, column string) error { initCheck.Do(func() { columnCheck = sql.NewColumnCheck(map[string]func(string) bool{ diff --git a/pkg/database/ent/event_query.go b/pkg/database/ent/event_query.go index 1493d7bd32c..2a8a8d64119 100644 --- a/pkg/database/ent/event_query.go +++ b/pkg/database/ent/event_query.go @@ -7,6 +7,7 @@ import ( "fmt" "math" + "entgo.io/ent" "entgo.io/ent/dialect/sql" "entgo.io/ent/dialect/sql/sqlgraph" "entgo.io/ent/schema/field" @@ -84,7 +85,7 @@ func (eq *EventQuery) QueryOwner() *AlertQuery { // First returns the first Event entity from the query. // Returns a *NotFoundError when no Event was found. func (eq *EventQuery) First(ctx context.Context) (*Event, error) { - nodes, err := eq.Limit(1).All(setContextOp(ctx, eq.ctx, "First")) + nodes, err := eq.Limit(1).All(setContextOp(ctx, eq.ctx, ent.OpQueryFirst)) if err != nil { return nil, err } @@ -107,7 +108,7 @@ func (eq *EventQuery) FirstX(ctx context.Context) *Event { // Returns a *NotFoundError when no Event ID was found. func (eq *EventQuery) FirstID(ctx context.Context) (id int, err error) { var ids []int - if ids, err = eq.Limit(1).IDs(setContextOp(ctx, eq.ctx, "FirstID")); err != nil { + if ids, err = eq.Limit(1).IDs(setContextOp(ctx, eq.ctx, ent.OpQueryFirstID)); err != nil { return } if len(ids) == 0 { @@ -130,7 +131,7 @@ func (eq *EventQuery) FirstIDX(ctx context.Context) int { // Returns a *NotSingularError when more than one Event entity is found. // Returns a *NotFoundError when no Event entities are found. func (eq *EventQuery) Only(ctx context.Context) (*Event, error) { - nodes, err := eq.Limit(2).All(setContextOp(ctx, eq.ctx, "Only")) + nodes, err := eq.Limit(2).All(setContextOp(ctx, eq.ctx, ent.OpQueryOnly)) if err != nil { return nil, err } @@ -158,7 +159,7 @@ func (eq *EventQuery) OnlyX(ctx context.Context) *Event { // Returns a *NotFoundError when no entities are found. func (eq *EventQuery) OnlyID(ctx context.Context) (id int, err error) { var ids []int - if ids, err = eq.Limit(2).IDs(setContextOp(ctx, eq.ctx, "OnlyID")); err != nil { + if ids, err = eq.Limit(2).IDs(setContextOp(ctx, eq.ctx, ent.OpQueryOnlyID)); err != nil { return } switch len(ids) { @@ -183,7 +184,7 @@ func (eq *EventQuery) OnlyIDX(ctx context.Context) int { // All executes the query and returns a list of Events. func (eq *EventQuery) All(ctx context.Context) ([]*Event, error) { - ctx = setContextOp(ctx, eq.ctx, "All") + ctx = setContextOp(ctx, eq.ctx, ent.OpQueryAll) if err := eq.prepareQuery(ctx); err != nil { return nil, err } @@ -205,7 +206,7 @@ func (eq *EventQuery) IDs(ctx context.Context) (ids []int, err error) { if eq.ctx.Unique == nil && eq.path != nil { eq.Unique(true) } - ctx = setContextOp(ctx, eq.ctx, "IDs") + ctx = setContextOp(ctx, eq.ctx, ent.OpQueryIDs) if err = eq.Select(event.FieldID).Scan(ctx, &ids); err != nil { return nil, err } @@ -223,7 +224,7 @@ func (eq *EventQuery) IDsX(ctx context.Context) []int { // Count returns the count of the given query. func (eq *EventQuery) Count(ctx context.Context) (int, error) { - ctx = setContextOp(ctx, eq.ctx, "Count") + ctx = setContextOp(ctx, eq.ctx, ent.OpQueryCount) if err := eq.prepareQuery(ctx); err != nil { return 0, err } @@ -241,7 +242,7 @@ func (eq *EventQuery) CountX(ctx context.Context) int { // Exist returns true if the query has elements in the graph. func (eq *EventQuery) Exist(ctx context.Context) (bool, error) { - ctx = setContextOp(ctx, eq.ctx, "Exist") + ctx = setContextOp(ctx, eq.ctx, ent.OpQueryExist) switch _, err := eq.FirstID(ctx); { case IsNotFound(err): return false, nil @@ -528,7 +529,7 @@ func (egb *EventGroupBy) Aggregate(fns ...AggregateFunc) *EventGroupBy { // Scan applies the selector query and scans the result into the given value. func (egb *EventGroupBy) Scan(ctx context.Context, v any) error { - ctx = setContextOp(ctx, egb.build.ctx, "GroupBy") + ctx = setContextOp(ctx, egb.build.ctx, ent.OpQueryGroupBy) if err := egb.build.prepareQuery(ctx); err != nil { return err } @@ -576,7 +577,7 @@ func (es *EventSelect) Aggregate(fns ...AggregateFunc) *EventSelect { // Scan applies the selector query and scans the result into the given value. func (es *EventSelect) Scan(ctx context.Context, v any) error { - ctx = setContextOp(ctx, es.ctx, "Select") + ctx = setContextOp(ctx, es.ctx, ent.OpQuerySelect) if err := es.prepareQuery(ctx); err != nil { return err } diff --git a/pkg/database/ent/generate.go b/pkg/database/ent/generate.go index 8ada999d7ab..58533df47ab 100644 --- a/pkg/database/ent/generate.go +++ b/pkg/database/ent/generate.go @@ -1,4 +1,4 @@ package ent -//go:generate go run -mod=mod entgo.io/ent/cmd/ent@v0.13.1 generate ./schema +//go:generate go run -mod=mod entgo.io/ent/cmd/ent@v0.14.2 generate ./schema diff --git a/pkg/database/ent/lock_query.go b/pkg/database/ent/lock_query.go index 75e5da48a94..5bc2c749a7f 100644 --- a/pkg/database/ent/lock_query.go +++ b/pkg/database/ent/lock_query.go @@ -7,6 +7,7 @@ import ( "fmt" "math" + "entgo.io/ent" "entgo.io/ent/dialect/sql" "entgo.io/ent/dialect/sql/sqlgraph" "entgo.io/ent/schema/field" @@ -60,7 +61,7 @@ func (lq *LockQuery) Order(o ...lock.OrderOption) *LockQuery { // First returns the first Lock entity from the query. // Returns a *NotFoundError when no Lock was found. func (lq *LockQuery) First(ctx context.Context) (*Lock, error) { - nodes, err := lq.Limit(1).All(setContextOp(ctx, lq.ctx, "First")) + nodes, err := lq.Limit(1).All(setContextOp(ctx, lq.ctx, ent.OpQueryFirst)) if err != nil { return nil, err } @@ -83,7 +84,7 @@ func (lq *LockQuery) FirstX(ctx context.Context) *Lock { // Returns a *NotFoundError when no Lock ID was found. func (lq *LockQuery) FirstID(ctx context.Context) (id int, err error) { var ids []int - if ids, err = lq.Limit(1).IDs(setContextOp(ctx, lq.ctx, "FirstID")); err != nil { + if ids, err = lq.Limit(1).IDs(setContextOp(ctx, lq.ctx, ent.OpQueryFirstID)); err != nil { return } if len(ids) == 0 { @@ -106,7 +107,7 @@ func (lq *LockQuery) FirstIDX(ctx context.Context) int { // Returns a *NotSingularError when more than one Lock entity is found. // Returns a *NotFoundError when no Lock entities are found. func (lq *LockQuery) Only(ctx context.Context) (*Lock, error) { - nodes, err := lq.Limit(2).All(setContextOp(ctx, lq.ctx, "Only")) + nodes, err := lq.Limit(2).All(setContextOp(ctx, lq.ctx, ent.OpQueryOnly)) if err != nil { return nil, err } @@ -134,7 +135,7 @@ func (lq *LockQuery) OnlyX(ctx context.Context) *Lock { // Returns a *NotFoundError when no entities are found. func (lq *LockQuery) OnlyID(ctx context.Context) (id int, err error) { var ids []int - if ids, err = lq.Limit(2).IDs(setContextOp(ctx, lq.ctx, "OnlyID")); err != nil { + if ids, err = lq.Limit(2).IDs(setContextOp(ctx, lq.ctx, ent.OpQueryOnlyID)); err != nil { return } switch len(ids) { @@ -159,7 +160,7 @@ func (lq *LockQuery) OnlyIDX(ctx context.Context) int { // All executes the query and returns a list of Locks. func (lq *LockQuery) All(ctx context.Context) ([]*Lock, error) { - ctx = setContextOp(ctx, lq.ctx, "All") + ctx = setContextOp(ctx, lq.ctx, ent.OpQueryAll) if err := lq.prepareQuery(ctx); err != nil { return nil, err } @@ -181,7 +182,7 @@ func (lq *LockQuery) IDs(ctx context.Context) (ids []int, err error) { if lq.ctx.Unique == nil && lq.path != nil { lq.Unique(true) } - ctx = setContextOp(ctx, lq.ctx, "IDs") + ctx = setContextOp(ctx, lq.ctx, ent.OpQueryIDs) if err = lq.Select(lock.FieldID).Scan(ctx, &ids); err != nil { return nil, err } @@ -199,7 +200,7 @@ func (lq *LockQuery) IDsX(ctx context.Context) []int { // Count returns the count of the given query. func (lq *LockQuery) Count(ctx context.Context) (int, error) { - ctx = setContextOp(ctx, lq.ctx, "Count") + ctx = setContextOp(ctx, lq.ctx, ent.OpQueryCount) if err := lq.prepareQuery(ctx); err != nil { return 0, err } @@ -217,7 +218,7 @@ func (lq *LockQuery) CountX(ctx context.Context) int { // Exist returns true if the query has elements in the graph. func (lq *LockQuery) Exist(ctx context.Context) (bool, error) { - ctx = setContextOp(ctx, lq.ctx, "Exist") + ctx = setContextOp(ctx, lq.ctx, ent.OpQueryExist) switch _, err := lq.FirstID(ctx); { case IsNotFound(err): return false, nil @@ -449,7 +450,7 @@ func (lgb *LockGroupBy) Aggregate(fns ...AggregateFunc) *LockGroupBy { // Scan applies the selector query and scans the result into the given value. func (lgb *LockGroupBy) Scan(ctx context.Context, v any) error { - ctx = setContextOp(ctx, lgb.build.ctx, "GroupBy") + ctx = setContextOp(ctx, lgb.build.ctx, ent.OpQueryGroupBy) if err := lgb.build.prepareQuery(ctx); err != nil { return err } @@ -497,7 +498,7 @@ func (ls *LockSelect) Aggregate(fns ...AggregateFunc) *LockSelect { // Scan applies the selector query and scans the result into the given value. func (ls *LockSelect) Scan(ctx context.Context, v any) error { - ctx = setContextOp(ctx, ls.ctx, "Select") + ctx = setContextOp(ctx, ls.ctx, ent.OpQuerySelect) if err := ls.prepareQuery(ctx); err != nil { return err } diff --git a/pkg/database/ent/machine_query.go b/pkg/database/ent/machine_query.go index 462c2cf35b1..4367dc39670 100644 --- a/pkg/database/ent/machine_query.go +++ b/pkg/database/ent/machine_query.go @@ -8,6 +8,7 @@ import ( "fmt" "math" + "entgo.io/ent" "entgo.io/ent/dialect/sql" "entgo.io/ent/dialect/sql/sqlgraph" "entgo.io/ent/schema/field" @@ -85,7 +86,7 @@ func (mq *MachineQuery) QueryAlerts() *AlertQuery { // First returns the first Machine entity from the query. // Returns a *NotFoundError when no Machine was found. func (mq *MachineQuery) First(ctx context.Context) (*Machine, error) { - nodes, err := mq.Limit(1).All(setContextOp(ctx, mq.ctx, "First")) + nodes, err := mq.Limit(1).All(setContextOp(ctx, mq.ctx, ent.OpQueryFirst)) if err != nil { return nil, err } @@ -108,7 +109,7 @@ func (mq *MachineQuery) FirstX(ctx context.Context) *Machine { // Returns a *NotFoundError when no Machine ID was found. func (mq *MachineQuery) FirstID(ctx context.Context) (id int, err error) { var ids []int - if ids, err = mq.Limit(1).IDs(setContextOp(ctx, mq.ctx, "FirstID")); err != nil { + if ids, err = mq.Limit(1).IDs(setContextOp(ctx, mq.ctx, ent.OpQueryFirstID)); err != nil { return } if len(ids) == 0 { @@ -131,7 +132,7 @@ func (mq *MachineQuery) FirstIDX(ctx context.Context) int { // Returns a *NotSingularError when more than one Machine entity is found. // Returns a *NotFoundError when no Machine entities are found. func (mq *MachineQuery) Only(ctx context.Context) (*Machine, error) { - nodes, err := mq.Limit(2).All(setContextOp(ctx, mq.ctx, "Only")) + nodes, err := mq.Limit(2).All(setContextOp(ctx, mq.ctx, ent.OpQueryOnly)) if err != nil { return nil, err } @@ -159,7 +160,7 @@ func (mq *MachineQuery) OnlyX(ctx context.Context) *Machine { // Returns a *NotFoundError when no entities are found. func (mq *MachineQuery) OnlyID(ctx context.Context) (id int, err error) { var ids []int - if ids, err = mq.Limit(2).IDs(setContextOp(ctx, mq.ctx, "OnlyID")); err != nil { + if ids, err = mq.Limit(2).IDs(setContextOp(ctx, mq.ctx, ent.OpQueryOnlyID)); err != nil { return } switch len(ids) { @@ -184,7 +185,7 @@ func (mq *MachineQuery) OnlyIDX(ctx context.Context) int { // All executes the query and returns a list of Machines. func (mq *MachineQuery) All(ctx context.Context) ([]*Machine, error) { - ctx = setContextOp(ctx, mq.ctx, "All") + ctx = setContextOp(ctx, mq.ctx, ent.OpQueryAll) if err := mq.prepareQuery(ctx); err != nil { return nil, err } @@ -206,7 +207,7 @@ func (mq *MachineQuery) IDs(ctx context.Context) (ids []int, err error) { if mq.ctx.Unique == nil && mq.path != nil { mq.Unique(true) } - ctx = setContextOp(ctx, mq.ctx, "IDs") + ctx = setContextOp(ctx, mq.ctx, ent.OpQueryIDs) if err = mq.Select(machine.FieldID).Scan(ctx, &ids); err != nil { return nil, err } @@ -224,7 +225,7 @@ func (mq *MachineQuery) IDsX(ctx context.Context) []int { // Count returns the count of the given query. func (mq *MachineQuery) Count(ctx context.Context) (int, error) { - ctx = setContextOp(ctx, mq.ctx, "Count") + ctx = setContextOp(ctx, mq.ctx, ent.OpQueryCount) if err := mq.prepareQuery(ctx); err != nil { return 0, err } @@ -242,7 +243,7 @@ func (mq *MachineQuery) CountX(ctx context.Context) int { // Exist returns true if the query has elements in the graph. func (mq *MachineQuery) Exist(ctx context.Context) (bool, error) { - ctx = setContextOp(ctx, mq.ctx, "Exist") + ctx = setContextOp(ctx, mq.ctx, ent.OpQueryExist) switch _, err := mq.FirstID(ctx); { case IsNotFound(err): return false, nil @@ -529,7 +530,7 @@ func (mgb *MachineGroupBy) Aggregate(fns ...AggregateFunc) *MachineGroupBy { // Scan applies the selector query and scans the result into the given value. func (mgb *MachineGroupBy) Scan(ctx context.Context, v any) error { - ctx = setContextOp(ctx, mgb.build.ctx, "GroupBy") + ctx = setContextOp(ctx, mgb.build.ctx, ent.OpQueryGroupBy) if err := mgb.build.prepareQuery(ctx); err != nil { return err } @@ -577,7 +578,7 @@ func (ms *MachineSelect) Aggregate(fns ...AggregateFunc) *MachineSelect { // Scan applies the selector query and scans the result into the given value. func (ms *MachineSelect) Scan(ctx context.Context, v any) error { - ctx = setContextOp(ctx, ms.ctx, "Select") + ctx = setContextOp(ctx, ms.ctx, ent.OpQuerySelect) if err := ms.prepareQuery(ctx); err != nil { return err } diff --git a/pkg/database/ent/meta_query.go b/pkg/database/ent/meta_query.go index 87d91d09e0e..9b8acefaaea 100644 --- a/pkg/database/ent/meta_query.go +++ b/pkg/database/ent/meta_query.go @@ -7,6 +7,7 @@ import ( "fmt" "math" + "entgo.io/ent" "entgo.io/ent/dialect/sql" "entgo.io/ent/dialect/sql/sqlgraph" "entgo.io/ent/schema/field" @@ -84,7 +85,7 @@ func (mq *MetaQuery) QueryOwner() *AlertQuery { // First returns the first Meta entity from the query. // Returns a *NotFoundError when no Meta was found. func (mq *MetaQuery) First(ctx context.Context) (*Meta, error) { - nodes, err := mq.Limit(1).All(setContextOp(ctx, mq.ctx, "First")) + nodes, err := mq.Limit(1).All(setContextOp(ctx, mq.ctx, ent.OpQueryFirst)) if err != nil { return nil, err } @@ -107,7 +108,7 @@ func (mq *MetaQuery) FirstX(ctx context.Context) *Meta { // Returns a *NotFoundError when no Meta ID was found. func (mq *MetaQuery) FirstID(ctx context.Context) (id int, err error) { var ids []int - if ids, err = mq.Limit(1).IDs(setContextOp(ctx, mq.ctx, "FirstID")); err != nil { + if ids, err = mq.Limit(1).IDs(setContextOp(ctx, mq.ctx, ent.OpQueryFirstID)); err != nil { return } if len(ids) == 0 { @@ -130,7 +131,7 @@ func (mq *MetaQuery) FirstIDX(ctx context.Context) int { // Returns a *NotSingularError when more than one Meta entity is found. // Returns a *NotFoundError when no Meta entities are found. func (mq *MetaQuery) Only(ctx context.Context) (*Meta, error) { - nodes, err := mq.Limit(2).All(setContextOp(ctx, mq.ctx, "Only")) + nodes, err := mq.Limit(2).All(setContextOp(ctx, mq.ctx, ent.OpQueryOnly)) if err != nil { return nil, err } @@ -158,7 +159,7 @@ func (mq *MetaQuery) OnlyX(ctx context.Context) *Meta { // Returns a *NotFoundError when no entities are found. func (mq *MetaQuery) OnlyID(ctx context.Context) (id int, err error) { var ids []int - if ids, err = mq.Limit(2).IDs(setContextOp(ctx, mq.ctx, "OnlyID")); err != nil { + if ids, err = mq.Limit(2).IDs(setContextOp(ctx, mq.ctx, ent.OpQueryOnlyID)); err != nil { return } switch len(ids) { @@ -183,7 +184,7 @@ func (mq *MetaQuery) OnlyIDX(ctx context.Context) int { // All executes the query and returns a list of MetaSlice. func (mq *MetaQuery) All(ctx context.Context) ([]*Meta, error) { - ctx = setContextOp(ctx, mq.ctx, "All") + ctx = setContextOp(ctx, mq.ctx, ent.OpQueryAll) if err := mq.prepareQuery(ctx); err != nil { return nil, err } @@ -205,7 +206,7 @@ func (mq *MetaQuery) IDs(ctx context.Context) (ids []int, err error) { if mq.ctx.Unique == nil && mq.path != nil { mq.Unique(true) } - ctx = setContextOp(ctx, mq.ctx, "IDs") + ctx = setContextOp(ctx, mq.ctx, ent.OpQueryIDs) if err = mq.Select(meta.FieldID).Scan(ctx, &ids); err != nil { return nil, err } @@ -223,7 +224,7 @@ func (mq *MetaQuery) IDsX(ctx context.Context) []int { // Count returns the count of the given query. func (mq *MetaQuery) Count(ctx context.Context) (int, error) { - ctx = setContextOp(ctx, mq.ctx, "Count") + ctx = setContextOp(ctx, mq.ctx, ent.OpQueryCount) if err := mq.prepareQuery(ctx); err != nil { return 0, err } @@ -241,7 +242,7 @@ func (mq *MetaQuery) CountX(ctx context.Context) int { // Exist returns true if the query has elements in the graph. func (mq *MetaQuery) Exist(ctx context.Context) (bool, error) { - ctx = setContextOp(ctx, mq.ctx, "Exist") + ctx = setContextOp(ctx, mq.ctx, ent.OpQueryExist) switch _, err := mq.FirstID(ctx); { case IsNotFound(err): return false, nil @@ -528,7 +529,7 @@ func (mgb *MetaGroupBy) Aggregate(fns ...AggregateFunc) *MetaGroupBy { // Scan applies the selector query and scans the result into the given value. func (mgb *MetaGroupBy) Scan(ctx context.Context, v any) error { - ctx = setContextOp(ctx, mgb.build.ctx, "GroupBy") + ctx = setContextOp(ctx, mgb.build.ctx, ent.OpQueryGroupBy) if err := mgb.build.prepareQuery(ctx); err != nil { return err } @@ -576,7 +577,7 @@ func (ms *MetaSelect) Aggregate(fns ...AggregateFunc) *MetaSelect { // Scan applies the selector query and scans the result into the given value. func (ms *MetaSelect) Scan(ctx context.Context, v any) error { - ctx = setContextOp(ctx, ms.ctx, "Select") + ctx = setContextOp(ctx, ms.ctx, ent.OpQuerySelect) if err := ms.prepareQuery(ctx); err != nil { return err } diff --git a/pkg/database/ent/metric_query.go b/pkg/database/ent/metric_query.go index 6e1c6f08b4a..d8007eea31c 100644 --- a/pkg/database/ent/metric_query.go +++ b/pkg/database/ent/metric_query.go @@ -7,6 +7,7 @@ import ( "fmt" "math" + "entgo.io/ent" "entgo.io/ent/dialect/sql" "entgo.io/ent/dialect/sql/sqlgraph" "entgo.io/ent/schema/field" @@ -60,7 +61,7 @@ func (mq *MetricQuery) Order(o ...metric.OrderOption) *MetricQuery { // First returns the first Metric entity from the query. // Returns a *NotFoundError when no Metric was found. func (mq *MetricQuery) First(ctx context.Context) (*Metric, error) { - nodes, err := mq.Limit(1).All(setContextOp(ctx, mq.ctx, "First")) + nodes, err := mq.Limit(1).All(setContextOp(ctx, mq.ctx, ent.OpQueryFirst)) if err != nil { return nil, err } @@ -83,7 +84,7 @@ func (mq *MetricQuery) FirstX(ctx context.Context) *Metric { // Returns a *NotFoundError when no Metric ID was found. func (mq *MetricQuery) FirstID(ctx context.Context) (id int, err error) { var ids []int - if ids, err = mq.Limit(1).IDs(setContextOp(ctx, mq.ctx, "FirstID")); err != nil { + if ids, err = mq.Limit(1).IDs(setContextOp(ctx, mq.ctx, ent.OpQueryFirstID)); err != nil { return } if len(ids) == 0 { @@ -106,7 +107,7 @@ func (mq *MetricQuery) FirstIDX(ctx context.Context) int { // Returns a *NotSingularError when more than one Metric entity is found. // Returns a *NotFoundError when no Metric entities are found. func (mq *MetricQuery) Only(ctx context.Context) (*Metric, error) { - nodes, err := mq.Limit(2).All(setContextOp(ctx, mq.ctx, "Only")) + nodes, err := mq.Limit(2).All(setContextOp(ctx, mq.ctx, ent.OpQueryOnly)) if err != nil { return nil, err } @@ -134,7 +135,7 @@ func (mq *MetricQuery) OnlyX(ctx context.Context) *Metric { // Returns a *NotFoundError when no entities are found. func (mq *MetricQuery) OnlyID(ctx context.Context) (id int, err error) { var ids []int - if ids, err = mq.Limit(2).IDs(setContextOp(ctx, mq.ctx, "OnlyID")); err != nil { + if ids, err = mq.Limit(2).IDs(setContextOp(ctx, mq.ctx, ent.OpQueryOnlyID)); err != nil { return } switch len(ids) { @@ -159,7 +160,7 @@ func (mq *MetricQuery) OnlyIDX(ctx context.Context) int { // All executes the query and returns a list of Metrics. func (mq *MetricQuery) All(ctx context.Context) ([]*Metric, error) { - ctx = setContextOp(ctx, mq.ctx, "All") + ctx = setContextOp(ctx, mq.ctx, ent.OpQueryAll) if err := mq.prepareQuery(ctx); err != nil { return nil, err } @@ -181,7 +182,7 @@ func (mq *MetricQuery) IDs(ctx context.Context) (ids []int, err error) { if mq.ctx.Unique == nil && mq.path != nil { mq.Unique(true) } - ctx = setContextOp(ctx, mq.ctx, "IDs") + ctx = setContextOp(ctx, mq.ctx, ent.OpQueryIDs) if err = mq.Select(metric.FieldID).Scan(ctx, &ids); err != nil { return nil, err } @@ -199,7 +200,7 @@ func (mq *MetricQuery) IDsX(ctx context.Context) []int { // Count returns the count of the given query. func (mq *MetricQuery) Count(ctx context.Context) (int, error) { - ctx = setContextOp(ctx, mq.ctx, "Count") + ctx = setContextOp(ctx, mq.ctx, ent.OpQueryCount) if err := mq.prepareQuery(ctx); err != nil { return 0, err } @@ -217,7 +218,7 @@ func (mq *MetricQuery) CountX(ctx context.Context) int { // Exist returns true if the query has elements in the graph. func (mq *MetricQuery) Exist(ctx context.Context) (bool, error) { - ctx = setContextOp(ctx, mq.ctx, "Exist") + ctx = setContextOp(ctx, mq.ctx, ent.OpQueryExist) switch _, err := mq.FirstID(ctx); { case IsNotFound(err): return false, nil @@ -449,7 +450,7 @@ func (mgb *MetricGroupBy) Aggregate(fns ...AggregateFunc) *MetricGroupBy { // Scan applies the selector query and scans the result into the given value. func (mgb *MetricGroupBy) Scan(ctx context.Context, v any) error { - ctx = setContextOp(ctx, mgb.build.ctx, "GroupBy") + ctx = setContextOp(ctx, mgb.build.ctx, ent.OpQueryGroupBy) if err := mgb.build.prepareQuery(ctx); err != nil { return err } @@ -497,7 +498,7 @@ func (ms *MetricSelect) Aggregate(fns ...AggregateFunc) *MetricSelect { // Scan applies the selector query and scans the result into the given value. func (ms *MetricSelect) Scan(ctx context.Context, v any) error { - ctx = setContextOp(ctx, ms.ctx, "Select") + ctx = setContextOp(ctx, ms.ctx, ent.OpQuerySelect) if err := ms.prepareQuery(ctx); err != nil { return err } diff --git a/pkg/database/ent/runtime/runtime.go b/pkg/database/ent/runtime/runtime.go index 9cb9d96258a..d96449c429e 100644 --- a/pkg/database/ent/runtime/runtime.go +++ b/pkg/database/ent/runtime/runtime.go @@ -5,6 +5,6 @@ package runtime // The schema-stitching logic is generated in github.com/crowdsecurity/crowdsec/pkg/database/ent/runtime.go const ( - Version = "v0.13.1" // Version of ent codegen. - Sum = "h1:uD8QwN1h6SNphdCCzmkMN3feSUzNnVvV/WIkHKMbzOE=" // Sum of ent codegen. + Version = "v0.14.2" // Version of ent codegen. + Sum = "h1:ywld/j2Rx4EmnIKs8eZ29cbFA1zpB+DA9TLL5l3rlq0=" // Sum of ent codegen. ) From c4ff4228befb022df10c16a22b7406f9e307c095 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Wed, 19 Feb 2025 14:05:17 +0100 Subject: [PATCH 435/581] use go 1.24, enable unencrypted http2 (#3470) --- .github/workflows/go-tests-windows.yml | 2 +- .github/workflows/go-tests.yml | 2 +- .golangci.yml | 7 ++++++- Dockerfile | 2 +- Dockerfile.debian | 2 +- go.mod | 6 +----- pkg/acquisition/modules/appsec/appsec.go | 9 +++++++-- pkg/acquisition/modules/http/http.go | 9 +++++++-- .../modules/kubernetesaudit/k8s_audit.go | 10 ++++++++-- pkg/apiserver/apiserver.go | 8 ++++++-- pkg/exprhelpers/debugger.go | 4 ++-- pkg/exprhelpers/helpers.go | 18 ++++++++++++++++-- test/bats/08_metrics_bouncer.bats | 2 +- 13 files changed, 58 insertions(+), 23 deletions(-) diff --git a/.github/workflows/go-tests-windows.yml b/.github/workflows/go-tests-windows.yml index 68cb9715b18..a99572fee88 100644 --- a/.github/workflows/go-tests-windows.yml +++ b/.github/workflows/go-tests-windows.yml @@ -61,6 +61,6 @@ jobs: - name: golangci-lint uses: golangci/golangci-lint-action@v6 with: - version: v1.63 + version: v1.64 args: --issues-exit-code=1 --timeout 10m only-new-issues: false diff --git a/.github/workflows/go-tests.yml b/.github/workflows/go-tests.yml index 8629f58cf61..6a48a932b1c 100644 --- a/.github/workflows/go-tests.yml +++ b/.github/workflows/go-tests.yml @@ -198,6 +198,6 @@ jobs: - name: golangci-lint uses: golangci/golangci-lint-action@v6 with: - version: v1.63 + version: v1.64 args: --issues-exit-code=1 --timeout 10m only-new-issues: false diff --git a/.golangci.yml b/.golangci.yml index 3afa4571b10..ede7de421f3 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -240,7 +240,7 @@ linters: # # DEPRECATED by golangi-lint # - - exportloopref + - tenv # # Redundant @@ -493,6 +493,11 @@ issues: text: "argument-limit: .*" # need some cleanup first: to create db in memory and share the client, not the config + - linters: + - usetesting + path: "(.+)_test.go" + text: "context.Background.*" + - linters: - usetesting path: "pkg/apiserver/(.+)_test.go" diff --git a/Dockerfile b/Dockerfile index 383578c48b4..45b9c2bedd1 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,4 +1,4 @@ -FROM docker.io/golang:1.23-alpine3.21 AS build +FROM docker.io/golang:1.24-alpine3.21 AS build ARG BUILD_VERSION diff --git a/Dockerfile.debian b/Dockerfile.debian index a9b58c633ed..0e99ade7890 100644 --- a/Dockerfile.debian +++ b/Dockerfile.debian @@ -1,4 +1,4 @@ -FROM docker.io/golang:1.23-bookworm AS build +FROM docker.io/golang:1.24-bookworm AS build ARG BUILD_VERSION diff --git a/go.mod b/go.mod index 63b0a1893f0..8c12c029203 100644 --- a/go.mod +++ b/go.mod @@ -1,10 +1,6 @@ module github.com/crowdsecurity/crowdsec -go 1.23.6 - -// Don't use the toolchain directive to avoid uncontrolled downloads during -// a build, especially in sandboxed environments (freebsd, gentoo...). -// toolchain go1.21.3 +go 1.24.0 require ( entgo.io/ent v0.14.2 diff --git a/pkg/acquisition/modules/appsec/appsec.go b/pkg/acquisition/modules/appsec/appsec.go index 78225d5f8c3..3e2a4f5765a 100644 --- a/pkg/acquisition/modules/appsec/appsec.go +++ b/pkg/acquisition/modules/appsec/appsec.go @@ -173,10 +173,15 @@ func (w *AppsecSource) Configure(yamlConfig []byte, logger *log.Entry, metricsLe w.mux = http.NewServeMux() w.server = &http.Server{ - Addr: w.config.ListenAddr, - Handler: w.mux, + Addr: w.config.ListenAddr, + Handler: w.mux, + Protocols: &http.Protocols{}, } + w.server.Protocols.SetHTTP1(true) + w.server.Protocols.SetUnencryptedHTTP2(true) + w.server.Protocols.SetHTTP2(true) + w.InChan = make(chan appsec.ParsedRequest) appsecCfg := appsec.AppsecConfig{Logger: w.logger.WithField("component", "appsec_config")} diff --git a/pkg/acquisition/modules/http/http.go b/pkg/acquisition/modules/http/http.go index 97e220570ff..76d7d06d240 100644 --- a/pkg/acquisition/modules/http/http.go +++ b/pkg/acquisition/modules/http/http.go @@ -372,10 +372,15 @@ func (h *HTTPSource) RunServer(out chan types.Event, t *tomb.Tomb) error { }) h.Server = &http.Server{ - Addr: h.Config.ListenAddr, - Handler: mux, + Addr: h.Config.ListenAddr, + Handler: mux, + Protocols: &http.Protocols{}, } + h.Server.Protocols.SetHTTP1(true) + h.Server.Protocols.SetUnencryptedHTTP2(true) + h.Server.Protocols.SetHTTP2(true) + if h.Config.Timeout != nil { h.Server.ReadTimeout = *h.Config.Timeout } diff --git a/pkg/acquisition/modules/kubernetesaudit/k8s_audit.go b/pkg/acquisition/modules/kubernetesaudit/k8s_audit.go index b0650d3906e..5d1d04c95f5 100644 --- a/pkg/acquisition/modules/kubernetesaudit/k8s_audit.go +++ b/pkg/acquisition/modules/kubernetesaudit/k8s_audit.go @@ -113,10 +113,15 @@ func (ka *KubernetesAuditSource) Configure(config []byte, logger *log.Entry, met ka.mux = http.NewServeMux() ka.server = &http.Server{ - Addr: ka.addr, - Handler: ka.mux, + Addr: ka.addr, + Handler: ka.mux, + Protocols: &http.Protocols{}, } + ka.server.Protocols.SetHTTP1(true) + ka.server.Protocols.SetUnencryptedHTTP2(true) + ka.server.Protocols.SetHTTP2(true) + ka.mux.HandleFunc(ka.config.WebhookPath, ka.webhookHandler) return nil @@ -154,6 +159,7 @@ func (ka *KubernetesAuditSource) StreamingAcquisition(ctx context.Context, out c }) <-t.Dying() ka.logger.Infof("Stopping k8s-audit server on %s:%d%s", ka.config.ListenAddr, ka.config.ListenPort, ka.config.WebhookPath) + if err := ka.server.Shutdown(ctx); err != nil { ka.logger.Errorf("Error shutting down k8s-audit server: %s", err.Error()) } diff --git a/pkg/apiserver/apiserver.go b/pkg/apiserver/apiserver.go index 88f1bd21dc4..a9ab45cebde 100644 --- a/pkg/apiserver/apiserver.go +++ b/pkg/apiserver/apiserver.go @@ -56,8 +56,7 @@ func isBrokenConnection(maybeError any) bool { if errors.As(err, &netOpError) { var syscallError *os.SyscallError if errors.As(netOpError.Err, &syscallError) { - if strings.Contains(strings.ToLower(syscallError.Error()), "broken pipe") || - strings.Contains(strings.ToLower(syscallError.Error()), "connection reset by peer") { + if strings.Contains(strings.ToLower(syscallError.Error()), "broken pipe") || strings.Contains(strings.ToLower(syscallError.Error()), "connection reset by peer") { return true } } @@ -384,8 +383,13 @@ func (s *APIServer) Run(apiReady chan bool) error { Addr: s.URL, Handler: s.router, TLSConfig: tlsCfg, + Protocols: &http.Protocols{}, } + s.httpServer.Protocols.SetHTTP1(true) + s.httpServer.Protocols.SetUnencryptedHTTP2(true) + s.httpServer.Protocols.SetHTTP2(true) + ctx := context.TODO() if s.apic != nil { diff --git a/pkg/exprhelpers/debugger.go b/pkg/exprhelpers/debugger.go index d2c76e05a22..65aa29b6a1d 100644 --- a/pkg/exprhelpers/debugger.go +++ b/pkg/exprhelpers/debugger.go @@ -149,9 +149,9 @@ func autoQuote(v any) string { // let's avoid printing long strings. it can happen ie. when we are debugging expr with `File()` or similar helpers if len(x) > 40 { return fmt.Sprintf("%q", x[:40]+"...") - } else { - return fmt.Sprintf("%q", x) } + + return fmt.Sprintf("%q", x) default: return fmt.Sprintf("%v", x) } diff --git a/pkg/exprhelpers/helpers.go b/pkg/exprhelpers/helpers.go index d0f6f2cfe22..3525bb6c762 100644 --- a/pkg/exprhelpers/helpers.go +++ b/pkg/exprhelpers/helpers.go @@ -216,7 +216,8 @@ func FileInit(fileFolder string, filename string, fileType string) error { if strings.HasPrefix(scanner.Text(), "#") { // allow comments continue } - if scanner.Text() == "" { //skip empty lines + + if scanner.Text() == "" { // skip empty lines continue } @@ -262,7 +263,7 @@ func Distinct(params ...any) (any, error) { } func FlattenDistinct(params ...any) (any, error) { - return Distinct(flatten(nil, reflect.ValueOf(params))) //nolint:asasalint + return Distinct(flatten(nil, reflect.ValueOf(params))) } func Flatten(params ...any) (any, error) { @@ -312,9 +313,11 @@ func existsInFileMaps(filename string, ftype string) (bool, error) { func Get(params ...any) (any, error) { arr := params[0].([]string) index := params[1].(int) + if index >= len(arr) { return "", nil } + return arr[index], nil } @@ -407,22 +410,26 @@ func PathEscape(params ...any) (any, error) { // func PathUnescape(s string) string { func PathUnescape(params ...any) (any, error) { s := params[0].(string) + ret, err := url.PathUnescape(s) if err != nil { log.Debugf("unable to PathUnescape '%s': %+v", s, err) return s, nil } + return ret, nil } // func QueryUnescape(s string) string { func QueryUnescape(params ...any) (any, error) { s := params[0].(string) + ret, err := url.QueryUnescape(s) if err != nil { log.Debugf("unable to QueryUnescape '%s': %+v", s, err) return s, nil } + return ret, nil } @@ -432,8 +439,10 @@ func File(params ...any) (any, error) { if _, ok := dataFile[filename]; ok { return dataFile[filename], nil } + log.Errorf("file '%s' (type:string) not found in expr library", filename) log.Errorf("expr library : %s", spew.Sdump(dataFile)) + return []string{}, nil } @@ -441,13 +450,16 @@ func File(params ...any) (any, error) { func RegexpInFile(params ...any) (any, error) { data := params[0].(string) filename := params[1].(string) + var hash uint64 + hasCache := false matched := false if _, ok := dataFileRegexCache[filename]; ok { hasCache = true hash = xxhash.Sum64String(data) + if val, err := dataFileRegexCache[filename].Get(hash); err == nil { return val.(bool), nil } @@ -479,9 +491,11 @@ func RegexpInFile(params ...any) (any, error) { log.Errorf("expr library : %s", spew.Sdump(dataFileRegex)) } } + if hasCache { dataFileRegexCache[filename].Set(hash, matched) } + return matched, nil } diff --git a/test/bats/08_metrics_bouncer.bats b/test/bats/08_metrics_bouncer.bats index 5fb2c543bda..1c9e9395782 100644 --- a/test/bats/08_metrics_bouncer.bats +++ b/test/bats/08_metrics_bouncer.bats @@ -74,7 +74,7 @@ teardown() { payload=$(yq -o j '.remediation_components[0].utc_startup_timestamp = "2021-09-01T00:00:00Z"' <<<"$payload") rune -22 curl-with-key '/v1/usage-metrics' -X POST --data "$payload" assert_stderr --partial "error: 400" - assert_json '{message: "json: cannot unmarshal string into Go struct field AllMetrics.remediation_components of type int64"}' + assert_json '{message: "json: cannot unmarshal string into Go struct field AllMetrics.remediation_components.utc_startup_timestamp of type int64"}' payload=$(yq -o j '.remediation_components[0].utc_startup_timestamp = 1707399316' <<<"$payload") rune -0 curl-with-key '/v1/usage-metrics' -X POST --data "$payload" From 8a10e2c61dfc9c3e7b2f8d9f6dad378160aef163 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Wed, 19 Feb 2025 14:50:38 +0100 Subject: [PATCH 436/581] refact: avoid use of defer calls in loops (#3466) * refact apic.Send() * refact Papi.SendDeletedDecisions() * refact MetricsProvider.Run() * refact PluginBroker.pushNotificationsToPlugin() * refact leakybucket.LoadBuckets() --- cmd/crowdsec-cli/clipapi/papi.go | 2 +- cmd/crowdsec/lpmetrics.go | 49 +++++------ pkg/apiserver/apic.go | 50 ++++------- pkg/apiserver/apiserver.go | 6 +- pkg/apiserver/papi.go | 55 +++++------- pkg/csplugin/broker.go | 102 +++++++++++++++++----- pkg/leakybucket/manager_load.go | 141 ++++++++++++++++--------------- 7 files changed, 223 insertions(+), 182 deletions(-) diff --git a/cmd/crowdsec-cli/clipapi/papi.go b/cmd/crowdsec-cli/clipapi/papi.go index 7ac2455d28f..c954e3ab996 100644 --- a/cmd/crowdsec-cli/clipapi/papi.go +++ b/cmd/crowdsec-cli/clipapi/papi.go @@ -134,7 +134,7 @@ func (cli *cliPapi) sync(ctx context.Context, out io.Writer, db *database.Client return fmt.Errorf("unable to initialize PAPI client: %w", err) } - t.Go(papi.SyncDecisions) + t.Go(func() error { return papi.SyncDecisions(ctx) }) err = papi.PullOnce(ctx, time.Time{}, true) if err != nil { diff --git a/cmd/crowdsec/lpmetrics.go b/cmd/crowdsec/lpmetrics.go index 24842851294..48eeaae91b9 100644 --- a/cmd/crowdsec/lpmetrics.go +++ b/cmd/crowdsec/lpmetrics.go @@ -130,6 +130,27 @@ func (m *MetricsProvider) metricsPayload() *models.AllMetrics { } } +func (m *MetricsProvider) sendMetrics(ctx context.Context, met *models.AllMetrics) { + defer trace.CatchPanic("crowdsec/MetricsProvider.sendMetrics") + + ctxTime, cancel := context.WithTimeout(ctx, 10*time.Second) + defer cancel() + + _, resp, err := m.apic.UsageMetrics.Add(ctxTime, met) + switch { + case errors.Is(err, context.DeadlineExceeded): + m.logger.Warnf("timeout sending lp metrics") + case err != nil && resp != nil && resp.Response.StatusCode == http.StatusNotFound: + m.logger.Warnf("metrics endpoint not found, older LAPI?") + case err != nil: + m.logger.Warnf("failed to send lp metrics: %s", err) + case resp.Response.StatusCode != http.StatusCreated: + m.logger.Warnf("failed to send lp metrics: %s", resp.Response.Status) + default: + m.logger.Tracef("lp usage metrics sent") + } +} + func (m *MetricsProvider) Run(ctx context.Context, myTomb *tomb.Tomb) error { defer trace.CatchPanic("crowdsec/MetricsProvider.Run") @@ -144,34 +165,8 @@ func (m *MetricsProvider) Run(ctx context.Context, myTomb *tomb.Tomb) error { for { select { case <-ticker.C: - ctxTime, cancel := context.WithTimeout(ctx, 10*time.Second) - defer cancel() - - _, resp, err := m.apic.UsageMetrics.Add(ctxTime, met) - switch { - case errors.Is(err, context.DeadlineExceeded): - m.logger.Warnf("timeout sending lp metrics") - ticker.Reset(m.interval) - continue - case err != nil && resp != nil && resp.Response.StatusCode == http.StatusNotFound: - m.logger.Warnf("metrics endpoint not found, older LAPI?") - ticker.Reset(m.interval) - continue - case err != nil: - m.logger.Warnf("failed to send lp metrics: %s", err) - ticker.Reset(m.interval) - continue - } - - if resp.Response.StatusCode != http.StatusCreated { - m.logger.Warnf("failed to send lp metrics: %s", resp.Response.Status) - ticker.Reset(m.interval) - continue - } - + m.sendMetrics(ctx, met) ticker.Reset(m.interval) - - m.logger.Tracef("lp usage metrics sent") case <-myTomb.Dying(): ticker.Stop() return nil diff --git a/pkg/apiserver/apic.go b/pkg/apiserver/apic.go index 32847f7489a..2c606dcbaee 100644 --- a/pkg/apiserver/apic.go +++ b/pkg/apiserver/apic.go @@ -35,7 +35,7 @@ import ( const ( // delta values must be smaller than the interval pullIntervalDefault = time.Hour * 2 - pullIntervalDelta = 5 * time.Minute + pullIntervalDelta = time.Minute * 5 pushIntervalDefault = time.Second * 10 pushIntervalDelta = time.Second * 7 metricsIntervalDefault = time.Minute * 30 @@ -363,6 +363,15 @@ func shouldShareAlert(alert *models.Alert, consoleConfig *csconfig.ConsoleConfig return true } +func (a *apic) sendBatch(ctx context.Context, signals []*models.AddSignalsRequestItem) error { + ctxBatch, cancel := context.WithTimeout(ctx, 5*time.Second) + defer cancel() + + _, _, err := a.apiClient.Signal.Add(ctxBatch, (*models.AddSignalsRequest)(&signals)) + + return err +} + func (a *apic) Send(ctx context.Context, cacheOrig *models.AddSignalsRequest) { /*we do have a problem with this : The apic.Push background routine reads from alertToPush chan. @@ -375,44 +384,21 @@ func (a *apic) Send(ctx context.Context, cacheOrig *models.AddSignalsRequest) { I don't know enough about gin to tell how much of an issue it can be. */ - var ( - cache []*models.AddSignalsRequestItem = *cacheOrig - send models.AddSignalsRequest - ) - - bulkSize := 50 - pageStart := 0 - pageEnd := bulkSize - - for { - if pageEnd >= len(cache) { - send = cache[pageStart:] - ctx, cancel := context.WithTimeout(ctx, 5*time.Second) + var cache []*models.AddSignalsRequestItem = *cacheOrig - defer cancel() + batchSize := 50 - _, _, err := a.apiClient.Signal.Add(ctx, &send) - if err != nil { - log.Errorf("sending signal to central API: %s", err) - return - } + for start := 0; start < len(cache); start += batchSize { + end := start + batchSize - break + if end > len(cache) { + end = len(cache) } - send = cache[pageStart:pageEnd] - ctx, cancel := context.WithTimeout(ctx, 5*time.Second) - - defer cancel() - - _, _, err := a.apiClient.Signal.Add(ctx, &send) - if err != nil { - // we log it here as well, because the return value of func might be discarded + if err := a.sendBatch(ctx, cache[start:end]); err != nil { log.Errorf("sending signal to central API: %s", err) + return } - - pageStart += bulkSize - pageEnd += bulkSize } } diff --git a/pkg/apiserver/apiserver.go b/pkg/apiserver/apiserver.go index a9ab45cebde..a14e656fa19 100644 --- a/pkg/apiserver/apiserver.go +++ b/pkg/apiserver/apiserver.go @@ -332,8 +332,8 @@ func (s *APIServer) papiPull(ctx context.Context) error { return nil } -func (s *APIServer) papiSync() error { - if err := s.papi.SyncDecisions(); err != nil { +func (s *APIServer) papiSync(ctx context.Context) error { + if err := s.papi.SyncDecisions(ctx); err != nil { log.Errorf("capi decisions sync: %s", err) return err } @@ -351,7 +351,7 @@ func (s *APIServer) initAPIC(ctx context.Context) { if s.papi.URL != "" { log.Info("Starting PAPI decision receiver") s.papi.pullTomb.Go(func() error { return s.papiPull(ctx) }) - s.papi.syncTomb.Go(s.papiSync) + s.papi.syncTomb.Go(func() error { return s.papiSync(ctx) }) } else { log.Warnf("papi_url is not set in online_api_credentials.yaml, can't synchronize with the console. Run cscli console enable console_management to add it.") } diff --git a/pkg/apiserver/papi.go b/pkg/apiserver/papi.go index 7f494c98bf4..cddaabb87cc 100644 --- a/pkg/apiserver/papi.go +++ b/pkg/apiserver/papi.go @@ -287,7 +287,7 @@ func (p *Papi) Pull(ctx context.Context) error { return nil } -func (p *Papi) SyncDecisions() error { +func (p *Papi) SyncDecisions(ctx context.Context) error { defer trace.CatchPanic("lapi/syncDecisionsToCAPI") var cache models.DecisionsDeleteRequest @@ -304,7 +304,7 @@ func (p *Papi) SyncDecisions() error { return nil } - go p.SendDeletedDecisions(&cache) + go p.SendDeletedDecisions(ctx, &cache) return nil case <-ticker.C: @@ -315,7 +315,7 @@ func (p *Papi) SyncDecisions() error { p.mu.Unlock() p.Logger.Infof("sync decisions: %d deleted decisions to push", len(cacheCopy)) - go p.SendDeletedDecisions(&cacheCopy) + go p.SendDeletedDecisions(ctx, &cacheCopy) } case deletedDecisions := <-p.Channels.DeleteDecisionChannel: if (p.consoleConfig.ShareManualDecisions != nil && *p.consoleConfig.ShareManualDecisions) || (p.consoleConfig.ConsoleManagement != nil && *p.consoleConfig.ConsoleManagement) { @@ -335,45 +335,34 @@ func (p *Papi) SyncDecisions() error { } } -func (p *Papi) SendDeletedDecisions(cacheOrig *models.DecisionsDeleteRequest) { - var ( - cache []models.DecisionsDeleteRequestItem = *cacheOrig - send models.DecisionsDeleteRequest - ) +func (p *Papi) sendDeletedDecisionsBatch(ctx context.Context, decisions []models.DecisionsDeleteRequestItem) error { + ctxBatch, cancel := context.WithTimeout(ctx, 5*time.Second) + defer cancel() - bulkSize := 50 - pageStart := 0 - pageEnd := bulkSize - - for { - if pageEnd >= len(cache) { - send = cache[pageStart:] - ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + _, _, err := p.apiClient.DecisionDelete.Add(ctxBatch, (*models.DecisionsDeleteRequest)(&decisions)) + if err != nil { + return err + } - defer cancel() + return nil +} - _, _, err := p.apiClient.DecisionDelete.Add(ctx, &send) - if err != nil { - p.Logger.Errorf("sending deleted decisions to central API: %s", err) - return - } +func (p *Papi) SendDeletedDecisions(ctx context.Context, cacheOrig *models.DecisionsDeleteRequest) { + var cache []models.DecisionsDeleteRequestItem = *cacheOrig - break - } + batchSize := 50 - send = cache[pageStart:pageEnd] - ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + for start := 0; start < len(cache); start += batchSize { + end := start + batchSize - defer cancel() + if end > len(cache) { + end = len(cache) + } - _, _, err := p.apiClient.DecisionDelete.Add(ctx, &send) - if err != nil { - // we log it here as well, because the return value of func might be discarded + if err := p.sendDeletedDecisionsBatch(ctx, cache[start:end]); err != nil { p.Logger.Errorf("sending deleted decisions to central API: %s", err) + return } - - pageStart += bulkSize - pageEnd += bulkSize } } diff --git a/pkg/csplugin/broker.go b/pkg/csplugin/broker.go index f53c831e186..3d040459638 100644 --- a/pkg/csplugin/broker.go +++ b/pkg/csplugin/broker.go @@ -63,8 +63,7 @@ type PluginConfig struct { Format string `yaml:"format,omitempty"` // specific to notification plugins - Config map[string]interface{} `yaml:",inline"` //to keep the plugin-specific config - + Config map[string]interface{} `yaml:",inline"` // to keep the plugin-specific config } type ProfileAlert struct { @@ -82,14 +81,18 @@ func (pb *PluginBroker) Init(ctx context.Context, pluginCfg *csconfig.PluginCfg, pb.profileConfigs = profileConfigs pb.pluginProcConfig = pluginCfg pb.pluginsTypesToDispatch = make(map[string]struct{}) + if err := pb.loadConfig(configPaths.NotificationDir); err != nil { return fmt.Errorf("while loading plugin config: %w", err) } + if err := pb.loadPlugins(ctx, configPaths.PluginDir); err != nil { return fmt.Errorf("while loading plugin: %w", err) } + pb.watcher = PluginWatcher{} pb.watcher.Init(pb.pluginConfigByName, pb.alertsByPluginName) + return nil } @@ -100,8 +103,11 @@ func (pb *PluginBroker) Kill() { } func (pb *PluginBroker) Run(pluginTomb *tomb.Tomb) { - //we get signaled via the channel when notifications need to be delivered to plugin (via the watcher) + // we get signaled via the channel when notifications need to be delivered to plugin (via the watcher) + ctx := context.TODO() + pb.watcher.Start(&tomb.Tomb{}) + for { select { case profileAlert := <-pb.PluginChannel: @@ -114,14 +120,16 @@ func (pb *PluginBroker) Run(pluginTomb *tomb.Tomb) { tmpAlerts := pb.alertsByPluginName[pluginName] pb.alertsByPluginName[pluginName] = make([]*models.Alert, 0) pluginMutex.Unlock() + go func() { - //Chunk alerts to respect group_threshold + // Chunk alerts to respect group_threshold threshold := pb.pluginConfigByName[pluginName].GroupThreshold if threshold == 0 { threshold = 1 } + for _, chunk := range slicetools.Chunks(tmpAlerts, threshold) { - if err := pb.pushNotificationsToPlugin(pluginName, chunk); err != nil { + if err := pb.pushNotificationsToPlugin(ctx, pluginName, chunk); err != nil { log.WithField("plugin:", pluginName).Error(err) } } @@ -130,11 +138,13 @@ func (pb *PluginBroker) Run(pluginTomb *tomb.Tomb) { case <-pluginTomb.Dying(): log.Infof("pluginTomb dying") pb.watcher.tomb.Kill(errors.New("Terminating")) + for { select { case <-pb.watcher.tomb.Dead(): log.Info("killing all plugins") pb.Kill() + return case pluginName := <-pb.watcher.PluginEvents: // this can be run in goroutine, but then locks will be needed @@ -144,7 +154,7 @@ func (pb *PluginBroker) Run(pluginTomb *tomb.Tomb) { pb.alertsByPluginName[pluginName] = make([]*models.Alert, 0) pluginMutex.Unlock() - if err := pb.pushNotificationsToPlugin(pluginName, tmpAlerts); err != nil { + if err := pb.pushNotificationsToPlugin(ctx, pluginName, tmpAlerts); err != nil { log.WithField("plugin:", pluginName).Error(err) } } @@ -159,6 +169,7 @@ func (pb *PluginBroker) addProfileAlert(profileAlert ProfileAlert) { log.Errorf("plugin %s is not configured properly.", pluginName) continue } + pluginMutex.Lock() pb.alertsByPluginName[pluginName] = append(pb.alertsByPluginName[pluginName], profileAlert.Alert) pluginMutex.Unlock() @@ -174,6 +185,7 @@ func (pb *PluginBroker) profilesContainPlugin(pluginName string) bool { } } } + return false } @@ -182,6 +194,7 @@ func (pb *PluginBroker) loadConfig(path string) error { if err != nil { return err } + for _, configFilePath := range files { if !strings.HasSuffix(configFilePath, ".yaml") && !strings.HasSuffix(configFilePath, ".yml") { continue @@ -191,19 +204,22 @@ func (pb *PluginBroker) loadConfig(path string) error { if err != nil { return err } + for _, pluginConfig := range pluginConfigs { SetRequiredFields(&pluginConfig) + if _, ok := pb.pluginConfigByName[pluginConfig.Name]; ok { log.Warningf("notification '%s' is defined multiple times", pluginConfig.Name) } + pb.pluginConfigByName[pluginConfig.Name] = pluginConfig if !pb.profilesContainPlugin(pluginConfig.Name) { continue } } } - err = pb.verifyPluginConfigsWithProfile() - return err + + return pb.verifyPluginConfigsWithProfile() } // checks whether every notification in profile has its own config file @@ -213,9 +229,11 @@ func (pb *PluginBroker) verifyPluginConfigsWithProfile() error { if _, ok := pb.pluginConfigByName[pluginName]; !ok { return fmt.Errorf("config file for plugin %s not found", pluginName) } + pb.pluginsTypesToDispatch[pb.pluginConfigByName[pluginName].Type] = struct{}{} } } + return nil } @@ -228,6 +246,7 @@ func (pb *PluginBroker) verifyPluginBinaryWithProfile() error { } } } + return nil } @@ -236,14 +255,17 @@ func (pb *PluginBroker) loadPlugins(ctx context.Context, path string) error { if err != nil { return err } + for _, binaryPath := range binaryPaths { if err := pluginIsValid(binaryPath); err != nil { return err } + pType, pSubtype, err := getPluginTypeAndSubtypeFromPath(binaryPath) // eg pType="notification" , pSubtype="slack" if err != nil { return err } + if pType != "notification" { continue } @@ -256,6 +278,7 @@ func (pb *PluginBroker) loadPlugins(ctx context.Context, path string) error { if err != nil { return err } + for _, pc := range pb.pluginConfigByName { if pc.Type != pSubtype { continue @@ -265,15 +288,20 @@ func (pb *PluginBroker) loadPlugins(ctx context.Context, path string) error { if err != nil { return err } + data = []byte(csstring.StrictExpand(string(data), os.LookupEnv)) + _, err = pluginClient.Configure(ctx, &protobufs.Config{Config: data}) if err != nil { return fmt.Errorf("while configuring %s: %w", pc.Name, err) } + log.Infof("registered plugin %s", pc.Name) + pb.notificationPluginByName[pc.Name] = pluginClient } } + return pb.verifyPluginBinaryWithProfile() } @@ -282,13 +310,17 @@ func (pb *PluginBroker) loadNotificationPlugin(name string, binaryPath string) ( if err != nil { return nil, err } + log.Debugf("Executing plugin %s", binaryPath) + cmd, err := pb.CreateCmd(binaryPath) if err != nil { return nil, err } + pb.pluginMap[name] = &NotifierPlugin{} l := log.New() + err = types.ConfigureLogger(l) if err != nil { return nil, err @@ -304,20 +336,44 @@ func (pb *PluginBroker) loadNotificationPlugin(name string, binaryPath string) ( AllowedProtocols: []plugin.Protocol{plugin.ProtocolGRPC}, Logger: logger, }) + client, err := c.Client() if err != nil { return nil, err } + raw, err := client.Dispense(name) if err != nil { return nil, err } + pb.pluginKillMethods = append(pb.pluginKillMethods, c.Kill) + return raw.(protobufs.NotifierServer), nil } -func (pb *PluginBroker) pushNotificationsToPlugin(pluginName string, alerts []*models.Alert) error { +func (pb *PluginBroker) tryNotify(ctx context.Context, pluginName, message string) error { + timeout := pb.pluginConfigByName[pluginName].TimeOut + ctxTimeout, cancel := context.WithTimeout(ctx, timeout) + + defer cancel() + + plugin := pb.notificationPluginByName[pluginName] + + _, err := plugin.Notify( + ctxTimeout, + &protobufs.Notification{ + Text: message, + Name: pluginName, + }, + ) + + return err +} + +func (pb *PluginBroker) pushNotificationsToPlugin(ctx context.Context, pluginName string, alerts []*models.Alert) error { log.WithField("plugin", pluginName).Debugf("pushing %d alerts to plugin", len(alerts)) + if len(alerts) == 0 { return nil } @@ -326,21 +382,14 @@ func (pb *PluginBroker) pushNotificationsToPlugin(pluginName string, alerts []*m if err != nil { return err } - plugin := pb.notificationPluginByName[pluginName] + backoffDuration := time.Second + for i := 1; i <= pb.pluginConfigByName[pluginName].MaxRetry; i++ { - ctx, cancel := context.WithTimeout(context.Background(), pb.pluginConfigByName[pluginName].TimeOut) - defer cancel() - _, err = plugin.Notify( - ctx, - &protobufs.Notification{ - Text: message, - Name: pluginName, - }, - ) - if err == nil { + if err = pb.tryNotify(ctx, pluginName, message); err == nil { return nil } + log.WithField("plugin", pluginName).Errorf("%s error, retry num %d", err, i) time.Sleep(backoffDuration) backoffDuration *= 2 @@ -351,27 +400,34 @@ func (pb *PluginBroker) pushNotificationsToPlugin(pluginName string, alerts []*m func ParsePluginConfigFile(path string) ([]PluginConfig, error) { parsedConfigs := make([]PluginConfig, 0) + yamlFile, err := os.Open(path) if err != nil { return nil, fmt.Errorf("while opening %s: %w", path, err) } + dec := yaml.NewDecoder(yamlFile) dec.SetStrict(true) + for { pc := PluginConfig{} + err = dec.Decode(&pc) if err != nil { if errors.Is(err, io.EOF) { break } + return nil, fmt.Errorf("while decoding %s got error %s", path, err) } // if the yaml document is empty, skip if reflect.DeepEqual(pc, PluginConfig{}) { continue } + parsedConfigs = append(parsedConfigs, pc) } + return parsedConfigs, nil } @@ -390,6 +446,7 @@ func getUUID() (string, error) { if err != nil { return "", err } + return uuidv4.String(), nil } @@ -398,11 +455,13 @@ func getHandshake() (plugin.HandshakeConfig, error) { if err != nil { return plugin.HandshakeConfig{}, err } + handshake := plugin.HandshakeConfig{ ProtocolVersion: PluginProtocolVersion, MagicCookieKey: CrowdsecPluginKey, MagicCookieValue: uuid, } + return handshake, nil } @@ -411,10 +470,13 @@ func FormatAlerts(format string, alerts []*models.Alert) (string, error) { if err != nil { return "", err } + b := new(strings.Builder) + err = template.Execute(b, alerts) if err != nil { return "", err } + return b.String(), nil } diff --git a/pkg/leakybucket/manager_load.go b/pkg/leakybucket/manager_load.go index cdf8f080773..9216c7f6724 100644 --- a/pkg/leakybucket/manager_load.go +++ b/pkg/leakybucket/manager_load.go @@ -234,100 +234,109 @@ func compileScopeFilter(bucketFactory *BucketFactory) error { return nil } -func LoadBuckets(cscfg *csconfig.CrowdsecServiceCfg, hub *cwhub.Hub, scenarios []*cwhub.Item, tomb *tomb.Tomb, buckets *Buckets, orderEvent bool) ([]BucketFactory, chan types.Event, error) { - var ( - ret = []BucketFactory{} - response chan types.Event - ) +func loadBucketFactoriesFromFile(item *cwhub.Item, hub *cwhub.Hub, buckets *Buckets, tomb *tomb.Tomb, response chan types.Event, orderEvent bool, simulationConfig *csconfig.SimulationConfig) ([]BucketFactory, error) { + itemPath := item.State.LocalPath - response = make(chan types.Event, 1) + // process the yaml + bucketConfigurationFile, err := os.Open(itemPath) + if err != nil { + log.Errorf("Can't access leaky configuration file %s", itemPath) + return nil, err + } - for _, item := range scenarios { - log.Debugf("Loading '%s'", item.State.LocalPath) + defer bucketConfigurationFile.Close() + dec := yaml.NewDecoder(bucketConfigurationFile) + dec.SetStrict(true) - itemPath := item.State.LocalPath + factories := []BucketFactory{} - // process the yaml - bucketConfigurationFile, err := os.Open(itemPath) + for { + bucketFactory := BucketFactory{} + + err = dec.Decode(&bucketFactory) if err != nil { - log.Errorf("Can't access leaky configuration file %s", itemPath) - return nil, nil, err - } + if !errors.Is(err, io.EOF) { + log.Errorf("Bad yaml in %s: %v", itemPath, err) + return nil, fmt.Errorf("bad yaml in %s: %w", itemPath, err) + } - defer bucketConfigurationFile.Close() - dec := yaml.NewDecoder(bucketConfigurationFile) - dec.SetStrict(true) + log.Tracef("End of yaml file") - for { - bucketFactory := BucketFactory{} + break + } - err = dec.Decode(&bucketFactory) - if err != nil { - if !errors.Is(err, io.EOF) { - log.Errorf("Bad yaml in %s: %v", itemPath, err) - return nil, nil, fmt.Errorf("bad yaml in %s: %w", itemPath, err) - } + bucketFactory.DataDir = hub.GetDataDir() + // check empty + if bucketFactory.Name == "" { + log.Errorf("Won't load nameless bucket") + return nil, errors.New("nameless bucket") + } + // check compat + if bucketFactory.FormatVersion == "" { + log.Tracef("no version in %s : %s, assuming '1.0'", bucketFactory.Name, itemPath) + bucketFactory.FormatVersion = "1.0" + } - log.Tracef("End of yaml file") + ok, err := constraint.Satisfies(bucketFactory.FormatVersion, constraint.Scenario) + if err != nil { + return nil, fmt.Errorf("failed to check version: %w", err) + } - break - } + if !ok { + log.Errorf("can't load %s : %s doesn't satisfy scenario format %s, skip", bucketFactory.Name, bucketFactory.FormatVersion, constraint.Scenario) + continue + } - bucketFactory.DataDir = hub.GetDataDir() - // check empty - if bucketFactory.Name == "" { - log.Errorf("Won't load nameless bucket") - return nil, nil, errors.New("nameless bucket") - } - // check compat - if bucketFactory.FormatVersion == "" { - log.Tracef("no version in %s : %s, assuming '1.0'", bucketFactory.Name, itemPath) - bucketFactory.FormatVersion = "1.0" - } + bucketFactory.Filename = filepath.Clean(itemPath) + bucketFactory.BucketName = seed.Generate() + bucketFactory.ret = response - ok, err := constraint.Satisfies(bucketFactory.FormatVersion, constraint.Scenario) - if err != nil { - return nil, nil, fmt.Errorf("failed to check version: %w", err) - } + if simulationConfig != nil { + bucketFactory.Simulated = simulationConfig.IsSimulated(bucketFactory.Name) + } - if !ok { - log.Errorf("can't load %s : %s doesn't satisfy scenario format %s, skip", bucketFactory.Name, bucketFactory.FormatVersion, constraint.Scenario) - continue - } + bucketFactory.ScenarioVersion = item.State.LocalVersion + bucketFactory.hash = item.State.LocalHash - bucketFactory.Filename = filepath.Clean(itemPath) - bucketFactory.BucketName = seed.Generate() - bucketFactory.ret = response + bucketFactory.wgDumpState = buckets.wgDumpState + bucketFactory.wgPour = buckets.wgPour - if cscfg.SimulationConfig != nil { - bucketFactory.Simulated = cscfg.SimulationConfig.IsSimulated(bucketFactory.Name) - } + err = LoadBucket(&bucketFactory, tomb) + if err != nil { + log.Errorf("Failed to load bucket %s: %v", bucketFactory.Name, err) + return nil, fmt.Errorf("loading of %s failed: %w", bucketFactory.Name, err) + } + + bucketFactory.orderEvent = orderEvent - bucketFactory.ScenarioVersion = item.State.LocalVersion - bucketFactory.hash = item.State.LocalHash + factories = append(factories, bucketFactory) + } - bucketFactory.wgDumpState = buckets.wgDumpState - bucketFactory.wgPour = buckets.wgPour + return factories, nil +} - err = LoadBucket(&bucketFactory, tomb) - if err != nil { - log.Errorf("Failed to load bucket %s: %v", bucketFactory.Name, err) - return nil, nil, fmt.Errorf("loading of %s failed: %w", bucketFactory.Name, err) - } +func LoadBuckets(cscfg *csconfig.CrowdsecServiceCfg, hub *cwhub.Hub, scenarios []*cwhub.Item, tomb *tomb.Tomb, buckets *Buckets, orderEvent bool) ([]BucketFactory, chan types.Event, error) { + allFactories := []BucketFactory{} + response := make(chan types.Event, 1) - bucketFactory.orderEvent = orderEvent + for _, item := range scenarios { + log.Debugf("Loading '%s'", item.State.LocalPath) - ret = append(ret, bucketFactory) + factories, err := loadBucketFactoriesFromFile(item, hub, buckets, tomb, response, orderEvent, cscfg.SimulationConfig) + if err != nil { + return nil, nil, err } + + allFactories = append(allFactories, factories...) } if err := alertcontext.NewAlertContext(cscfg.ContextToSend, cscfg.ConsoleContextValueLength); err != nil { return nil, nil, fmt.Errorf("unable to load alert context: %w", err) } - log.Infof("Loaded %d scenarios", len(ret)) + log.Infof("Loaded %d scenarios", len(allFactories)) - return ret, response, nil + return allFactories, response, nil } /* Init recursively process yaml files from a directory and loads them as BucketFactory */ From 16d0677938e1aa58e2b2ad2c8d49bb814243546d Mon Sep 17 00:00:00 2001 From: blotus Date: Wed, 19 Feb 2025 15:04:47 +0100 Subject: [PATCH 437/581] Add support for centralized allowlists (#3355) --- cmd/crowdsec-cli/args/args.go | 27 + cmd/crowdsec-cli/cliallowlists/allowlists.go | 641 ++++++ cmd/crowdsec-cli/clidecision/decisions.go | 30 +- cmd/crowdsec-cli/main.go | 5 + cmd/crowdsec/crowdsec.go | 15 +- cmd/crowdsec/serve.go | 4 +- pkg/acquisition/modules/appsec/appsec.go | 49 +- .../modules/appsec/appsec_hooks_test.go | 34 + .../modules/appsec/appsec_runner.go | 36 +- pkg/acquisition/modules/appsec/appsec_test.go | 74 +- .../loki/internal/lokiclient/loki_client.go | 1 + pkg/apiclient/allowlists_service.go | 110 + pkg/apiclient/auth_jwt.go | 6 +- pkg/apiclient/client.go | 69 + pkg/apiserver/alerts_test.go | 68 +- pkg/apiserver/allowlists_test.go | 127 ++ pkg/apiserver/api_key_test.go | 2 +- pkg/apiserver/apic.go | 171 +- pkg/apiserver/apic_test.go | 36 + pkg/apiserver/apiserver_test.go | 4 +- pkg/apiserver/controllers/controller.go | 4 + pkg/apiserver/controllers/v1/alerts.go | 36 +- pkg/apiserver/controllers/v1/allowlist.go | 130 ++ pkg/apiserver/papi_cmd.go | 99 +- pkg/apiserver/tests/alert_allowlisted.json | 275 +++ .../tests/alert_allowlisted_expired.json | 275 +++ pkg/appsec/allowlists/allowlists.go | 156 ++ pkg/appsec/allowlists/allowlists_test.go | 93 + pkg/database/allowlists.go | 372 ++++ pkg/database/allowlists_test.go | 106 + pkg/database/ent/allowlist.go | 189 ++ pkg/database/ent/allowlist/allowlist.go | 133 ++ pkg/database/ent/allowlist/where.go | 429 ++++ pkg/database/ent/allowlist_create.go | 321 +++ pkg/database/ent/allowlist_delete.go | 88 + pkg/database/ent/allowlist_query.go | 637 ++++++ pkg/database/ent/allowlist_update.go | 501 +++++ pkg/database/ent/allowlistitem.go | 231 ++ .../ent/allowlistitem/allowlistitem.go | 165 ++ pkg/database/ent/allowlistitem/where.go | 664 ++++++ pkg/database/ent/allowlistitem_create.go | 398 ++++ pkg/database/ent/allowlistitem_delete.go | 88 + pkg/database/ent/allowlistitem_query.go | 637 ++++++ pkg/database/ent/allowlistitem_update.go | 463 ++++ pkg/database/ent/client.go | 374 +++- pkg/database/ent/ent.go | 22 +- pkg/database/ent/hook/hook.go | 24 + pkg/database/ent/migrate/schema.go | 90 + pkg/database/ent/mutation.go | 1966 ++++++++++++++++- pkg/database/ent/predicate/predicate.go | 6 + pkg/database/ent/runtime.go | 26 + pkg/database/ent/schema/allowlist.go | 44 + pkg/database/ent/schema/allowlist_item.go | 51 + pkg/database/ent/tx.go | 6 + pkg/database/flush.go | 22 + pkg/models/allowlist_item.go | 100 + pkg/models/check_allowlist_response.go | 53 + pkg/models/get_allowlist_response.go | 174 ++ pkg/models/get_allowlists_response.go | 78 + pkg/models/localapi_swagger.yaml | 176 ++ pkg/modelscapi/allowlist_link.go | 166 ++ pkg/modelscapi/centralapi_swagger.yaml | 35 + .../get_decisions_stream_response_links.go | 62 + test/bats/cscli-allowlists.bats | 216 ++ 64 files changed, 11539 insertions(+), 151 deletions(-) create mode 100644 cmd/crowdsec-cli/args/args.go create mode 100644 cmd/crowdsec-cli/cliallowlists/allowlists.go create mode 100644 pkg/apiclient/allowlists_service.go create mode 100644 pkg/apiserver/allowlists_test.go create mode 100644 pkg/apiserver/controllers/v1/allowlist.go create mode 100644 pkg/apiserver/tests/alert_allowlisted.json create mode 100644 pkg/apiserver/tests/alert_allowlisted_expired.json create mode 100644 pkg/appsec/allowlists/allowlists.go create mode 100644 pkg/appsec/allowlists/allowlists_test.go create mode 100644 pkg/database/allowlists.go create mode 100644 pkg/database/allowlists_test.go create mode 100644 pkg/database/ent/allowlist.go create mode 100644 pkg/database/ent/allowlist/allowlist.go create mode 100644 pkg/database/ent/allowlist/where.go create mode 100644 pkg/database/ent/allowlist_create.go create mode 100644 pkg/database/ent/allowlist_delete.go create mode 100644 pkg/database/ent/allowlist_query.go create mode 100644 pkg/database/ent/allowlist_update.go create mode 100644 pkg/database/ent/allowlistitem.go create mode 100644 pkg/database/ent/allowlistitem/allowlistitem.go create mode 100644 pkg/database/ent/allowlistitem/where.go create mode 100644 pkg/database/ent/allowlistitem_create.go create mode 100644 pkg/database/ent/allowlistitem_delete.go create mode 100644 pkg/database/ent/allowlistitem_query.go create mode 100644 pkg/database/ent/allowlistitem_update.go create mode 100644 pkg/database/ent/schema/allowlist.go create mode 100644 pkg/database/ent/schema/allowlist_item.go create mode 100644 pkg/models/allowlist_item.go create mode 100644 pkg/models/check_allowlist_response.go create mode 100644 pkg/models/get_allowlist_response.go create mode 100644 pkg/models/get_allowlists_response.go create mode 100644 pkg/modelscapi/allowlist_link.go create mode 100644 test/bats/cscli-allowlists.bats diff --git a/cmd/crowdsec-cli/args/args.go b/cmd/crowdsec-cli/args/args.go new file mode 100644 index 00000000000..7e827f2c78b --- /dev/null +++ b/cmd/crowdsec-cli/args/args.go @@ -0,0 +1,27 @@ +package args + +import ( + "fmt" + + "github.com/spf13/cobra" +) + +func MinimumNArgs(n int) cobra.PositionalArgs { + return func(cmd *cobra.Command, args []string) error { + if len(args) < n { + cmd.Help() //nolint:errcheck + return fmt.Errorf("requires at least %d arg(s), only received %d", n, len(args)) + } + return nil + } +} + +func ExactArgs(n int) cobra.PositionalArgs { + return func(cmd *cobra.Command, args []string) error { + if len(args) != n { + cmd.Help() //nolint:errcheck + return fmt.Errorf("accepts %d arg(s), received %d", n, len(args)) + } + return nil + } +} diff --git a/cmd/crowdsec-cli/cliallowlists/allowlists.go b/cmd/crowdsec-cli/cliallowlists/allowlists.go new file mode 100644 index 00000000000..18cfe06c27f --- /dev/null +++ b/cmd/crowdsec-cli/cliallowlists/allowlists.go @@ -0,0 +1,641 @@ +package cliallowlists + +import ( + "context" + "encoding/csv" + "encoding/json" + "errors" + "fmt" + "io" + "net/url" + "slices" + "strconv" + "strings" + "time" + + "github.com/fatih/color" + "github.com/go-openapi/strfmt" + "github.com/jedib0t/go-pretty/v6/table" + log "github.com/sirupsen/logrus" + "github.com/spf13/cobra" + + "github.com/crowdsecurity/go-cs-lib/cstime" + + "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/args" + "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/cstable" + "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/require" + "github.com/crowdsecurity/crowdsec/pkg/apiclient" + "github.com/crowdsecurity/crowdsec/pkg/csconfig" + "github.com/crowdsecurity/crowdsec/pkg/database" + "github.com/crowdsecurity/crowdsec/pkg/models" +) + +type configGetter func() *csconfig.Config + +type cliAllowLists struct { + cfg configGetter +} + +func New(cfg configGetter) *cliAllowLists { + return &cliAllowLists{ + cfg: cfg, + } +} + +// validAllowlists returns a list of valid allowlists name for command completion +// Used for completion in cscli by commands that allow editing (add), so it excludes allowlists managed by console +func (cli *cliAllowLists) validAllowlists(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { + var err error + + cfg := cli.cfg() + ctx := cmd.Context() + + if err = require.LAPI(cfg); err != nil { + cobra.CompError("unable to load LAPI " + err.Error()) + return nil, cobra.ShellCompDirectiveNoFileComp + } + + db, err := require.DBClient(ctx, cfg.DbConfig) + if err != nil { + cobra.CompError("unable to load dbclient " + err.Error()) + return nil, cobra.ShellCompDirectiveNoFileComp + } + + allowlists, err := db.ListAllowLists(ctx, false) + if err != nil { + cobra.CompError("unable to list allowlists " + err.Error()) + return nil, cobra.ShellCompDirectiveNoFileComp + } + + ret := []string{} + + for _, allowlist := range allowlists { + if strings.Contains(allowlist.Name, toComplete) && !slices.Contains(args, allowlist.Name) && !allowlist.FromConsole { + ret = append(ret, allowlist.Name) + } + } + + return ret, cobra.ShellCompDirectiveNoFileComp +} + +// Used for completion in cscli +// This version returns a list of all allowlists, including those managed by console (for completion in read-only commands, such as inspect) +func (cli *cliAllowLists) validAllowlistsWithConsole(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { + var err error + + cfg := cli.cfg() + ctx := cmd.Context() + + if err = require.LAPI(cfg); err != nil { + cobra.CompError("unable to load LAPI " + err.Error()) + return nil, cobra.ShellCompDirectiveNoFileComp + } + + db, err := require.DBClient(ctx, cfg.DbConfig) + if err != nil { + cobra.CompError("unable to load dbclient " + err.Error()) + return nil, cobra.ShellCompDirectiveNoFileComp + } + + allowlists, err := db.ListAllowLists(ctx, false) + if err != nil { + cobra.CompError("unable to list allowlists " + err.Error()) + return nil, cobra.ShellCompDirectiveNoFileComp + } + + ret := []string{} + + for _, allowlist := range allowlists { + if strings.Contains(allowlist.Name, toComplete) && !slices.Contains(args, allowlist.Name) { + ret = append(ret, allowlist.Name) + } + } + + return ret, cobra.ShellCompDirectiveNoFileComp +} + +func (cli *cliAllowLists) listCSV(out io.Writer, allowlists *models.GetAllowlistsResponse) error { + csvwriter := csv.NewWriter(out) + + err := csvwriter.Write([]string{"name", "description", "created_at", "updated_at", "console_managed", "size"}) + if err != nil { + return fmt.Errorf("failed to write raw header: %w", err) + } + + for _, allowlist := range *allowlists { + createdAt := time.Time(allowlist.CreatedAt).Format(time.RFC3339) + updatedAt := time.Time(allowlist.UpdatedAt).Format(time.RFC3339) + consoleManaged := strconv.FormatBool(allowlist.ConsoleManaged) + itemsCount := strconv.Itoa(len(allowlist.Items)) + + err := csvwriter.Write([]string{allowlist.Name, allowlist.Description, createdAt, updatedAt, consoleManaged, itemsCount}) + if err != nil { + return fmt.Errorf("failed to write raw: %w", err) + } + } + + csvwriter.Flush() + + return nil +} + +func (cli *cliAllowLists) listCSVContent(out io.Writer, allowlist *models.GetAllowlistResponse) error { + csvwriter := csv.NewWriter(out) + + err := csvwriter.Write([]string{"name", "description", "value", "comment", "expiration", "created_at", "console_managed"}) + if err != nil { + return fmt.Errorf("failed to write raw header: %w", err) + } + + for _, item := range allowlist.Items { + createdAt := time.Time(item.CreatedAt).Format(time.RFC3339) + expiration := "never" + + if !time.Time(item.Expiration).IsZero() { + expiration = time.Time(item.Expiration).Format(time.RFC3339) + } + + err := csvwriter.Write([]string{allowlist.Name, allowlist.Description, item.Value, item.Description, expiration, createdAt, strconv.FormatBool(allowlist.ConsoleManaged)}) + if err != nil { + return fmt.Errorf("failed to write raw: %w", err) + } + } + + csvwriter.Flush() + + return nil +} + +func (cli *cliAllowLists) listHuman(out io.Writer, allowlists *models.GetAllowlistsResponse) error { + t := cstable.NewLight(out, cli.cfg().Cscli.Color).Writer + t.AppendHeader(table.Row{"Name", "Description", "Created at", "Updated at", "Managed by Console", "Size"}) + + for _, allowlist := range *allowlists { + managed := "no" + if allowlist.ConsoleManaged { + managed = "yes" + } + + t.AppendRow(table.Row{allowlist.Name, allowlist.Description, allowlist.CreatedAt, allowlist.UpdatedAt, managed, len(allowlist.Items)}) + } + + fmt.Fprintln(out, t.Render()) + + return nil +} + +func (cli *cliAllowLists) listContentHuman(out io.Writer, allowlist *models.GetAllowlistResponse) error { + infoTable := cstable.NewLight(out, cli.cfg().Cscli.Color).Writer + infoTable.SetTitle("Allowlist: " + allowlist.Name) + infoTable.SetColumnConfigs([]table.ColumnConfig{ + {Number: 1, AutoMerge: true}, + }) + + contentTable := cstable.NewLight(out, cli.cfg().Cscli.Color).Writer + contentTable.AppendHeader(table.Row{"Value", "Comment", "Expiration", "Created at"}) + + managed := "no" + if allowlist.ConsoleManaged { + managed = "yes" + } + + infoTable.AppendRows([]table.Row{ + {"Name", allowlist.Name}, + {"Description", allowlist.Description}, + {"Created at", allowlist.CreatedAt}, + {"Updated at", allowlist.UpdatedAt}, + {"Managed by Console", managed}, + }) + + for _, content := range allowlist.Items { + expiration := "never" + if !time.Time(content.Expiration).IsZero() { + expiration = content.Expiration.String() + } + + contentTable.AppendRow(table.Row{content.Value, content.Description, expiration, allowlist.CreatedAt}) + } + + fmt.Fprintln(out, infoTable.Render()) + fmt.Fprintln(out) + fmt.Fprintln(out, contentTable.Render()) + + return nil +} + +func (cli *cliAllowLists) NewCommand() *cobra.Command { + cmd := &cobra.Command{ + Use: "allowlists [action]", + Short: "Manage centralized allowlists", + Aliases: []string{"allowlist"}, + Args: cobra.MinimumNArgs(1), + DisableAutoGenTag: true, + } + + cmd.AddCommand(cli.newCreateCmd()) + cmd.AddCommand(cli.newListCmd()) + cmd.AddCommand(cli.newDeleteCmd()) + cmd.AddCommand(cli.newAddCmd()) + cmd.AddCommand(cli.newRemoveCmd()) + cmd.AddCommand(cli.newInspectCmd()) + + return cmd +} + +func (cli *cliAllowLists) newCreateCmd() *cobra.Command { + var description string + + cmd := &cobra.Command{ + Use: "create [allowlist_name]", + Example: "cscli allowlists create my_allowlist -d 'my allowlist description'", + Short: "Create a new allowlist", + Args: args.ExactArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + var err error + cfg := cli.cfg() + + if err = require.LAPI(cfg); err != nil { + return err + } + + db, err := require.DBClient(cmd.Context(), cfg.DbConfig) + if err != nil { + return err + } + + name := args[0] + + return cli.create(cmd.Context(), db, name, description) + }, + } + + flags := cmd.Flags() + + flags.StringVarP(&description, "description", "d", "", "description of the allowlist") + + _ = cmd.MarkFlagRequired("description") + + return cmd +} + +func (cli *cliAllowLists) create(ctx context.Context, db *database.Client, name string, description string) error { + _, err := db.CreateAllowList(ctx, name, description, "", false) + if err != nil { + return err + } + + fmt.Printf("allowlist '%s' created successfully\n", name) + + return nil +} + +func (cli *cliAllowLists) newListCmd() *cobra.Command { + cmd := &cobra.Command{ + Use: "list", + Example: `cscli allowlists list`, + Short: "List all allowlists", + Args: cobra.NoArgs, + RunE: func(cmd *cobra.Command, _ []string) error { + cfg := cli.cfg() + if err := cfg.LoadAPIClient(); err != nil { + return fmt.Errorf("loading api client: %w", err) + } + + apiURL, err := url.Parse(cfg.API.Client.Credentials.URL) + if err != nil { + return fmt.Errorf("parsing api url: %w", err) + } + + client, err := apiclient.NewClient(&apiclient.Config{ + MachineID: cfg.API.Client.Credentials.Login, + Password: strfmt.Password(cfg.API.Client.Credentials.Password), + URL: apiURL, + VersionPrefix: "v1", + }) + if err != nil { + return fmt.Errorf("creating api client: %w", err) + } + + return cli.list(cmd.Context(), client, color.Output) + }, + } + + return cmd +} + +func (cli *cliAllowLists) list(ctx context.Context, client *apiclient.ApiClient, out io.Writer) error { + // not db? + allowlists, _, err := client.Allowlists.List(ctx, apiclient.AllowlistListOpts{WithContent: true}) + if err != nil { + return err + } + + switch cli.cfg().Cscli.Output { + case "human": + return cli.listHuman(out, allowlists) + case "json": + enc := json.NewEncoder(out) + enc.SetIndent("", " ") + + if err := enc.Encode(allowlists); err != nil { + return errors.New("failed to serialize") + } + + return nil + case "raw": + return cli.listCSV(out, allowlists) + } + + return nil +} + +func (cli *cliAllowLists) newDeleteCmd() *cobra.Command { + cmd := &cobra.Command{ + Use: "delete [allowlist_name]", + Short: "Delete an allowlist", + Example: `cscli allowlists delete my_allowlist`, + Args: args.ExactArgs(1), + ValidArgsFunction: cli.validAllowlists, + RunE: func(cmd *cobra.Command, args []string) error { + var err error + cfg := cli.cfg() + + if err = require.LAPI(cfg); err != nil { + return err + } + + ctx := cmd.Context() + + db, err := require.DBClient(ctx, cfg.DbConfig) + if err != nil { + return err + } + + return cli.delete(ctx, db, args[0]) + }, + } + + return cmd +} + +func (cli *cliAllowLists) delete(ctx context.Context, db *database.Client, name string) error { + list, err := db.GetAllowList(ctx, name, false) + if err != nil { + return err + } + + if list.FromConsole { + return fmt.Errorf("allowlist %s is managed by console, cannot delete with cscli. Please visit https://app.crowdsec.net/allowlists/%s to unsubscribe", name, list.AllowlistID) + } + + err = db.DeleteAllowList(ctx, name, false) + if err != nil { + return err + } + + fmt.Printf("allowlist '%s' deleted successfully\n", name) + + return nil +} + +func (cli *cliAllowLists) newAddCmd() *cobra.Command { + var ( + expirationStr string + comment string + ) + + cmd := &cobra.Command{ + Use: "add [allowlist_name] [value...] [-e expiration] [-d comment]", + Short: "Add content to an allowlist", + Example: `cscli allowlists add my_allowlist 1.2.3.4 2.3.4.5 -e 1h -d "my comment"`, + Args: args.MinimumNArgs(2), + ValidArgsFunction: cli.validAllowlists, + RunE: func(cmd *cobra.Command, args []string) error { + var err error + cfg := cli.cfg() + + if err = require.LAPI(cfg); err != nil { + return err + } + + ctx := cmd.Context() + + db, err := require.DBClient(ctx, cfg.DbConfig) + if err != nil { + return err + } + + var expiration time.Duration + + if expirationStr != "" { + expiration, err = cstime.ParseDuration(expirationStr) + if err != nil { + return err + } + } + + name := args[0] + values := args[1:] + + return cli.add(ctx, db, name, values, expiration, comment) + }, + } + + flags := cmd.Flags() + + flags.StringVarP(&expirationStr, "expiration", "e", "", "expiration duration") + flags.StringVarP(&comment, "comment", "d", "", "comment for the value") + + return cmd +} + +func (cli *cliAllowLists) add(ctx context.Context, db *database.Client, name string, values []string, expiration time.Duration, comment string) error { + allowlist, err := db.GetAllowList(ctx, name, true) + if err != nil { + return err + } + + if allowlist.FromConsole { + return fmt.Errorf("allowlist %s is managed by console, cannot update with cscli. Please visit https://app.crowdsec.net/allowlists/%s to update", name, allowlist.AllowlistID) + } + + toAdd := make([]*models.AllowlistItem, 0) + + for _, v := range values { + found := false + + for _, item := range allowlist.Edges.AllowlistItems { + if item.Value == v { + found = true + + log.Warnf("value %s already in allowlist", v) + + break + } + } + + if !found { + expTS := time.Time{} + if expiration != 0 { + expTS = time.Now().UTC().Add(expiration) + } + + toAdd = append(toAdd, &models.AllowlistItem{Value: v, Description: comment, Expiration: strfmt.DateTime(expTS)}) + } + } + + if len(toAdd) == 0 { + fmt.Println("no new values for allowlist") + return nil + } + + added, err := db.AddToAllowlist(ctx, allowlist, toAdd) + if err != nil { + return fmt.Errorf("unable to add values to allowlist: %w", err) + } + + if added > 0 { + fmt.Printf("added %d values to allowlist %s\n", added, name) + } + + return nil +} + +func (cli *cliAllowLists) newInspectCmd() *cobra.Command { + cmd := &cobra.Command{ + Use: "inspect [allowlist_name]", + Example: `cscli allowlists inspect my_allowlist`, + Short: "Inspect an allowlist", + Args: args.ExactArgs(1), + ValidArgsFunction: cli.validAllowlistsWithConsole, + RunE: func(cmd *cobra.Command, args []string) error { + cfg := cli.cfg() + if err := cfg.LoadAPIClient(); err != nil { + return fmt.Errorf("loading api client: %w", err) + } + + apiURL, err := url.Parse(cfg.API.Client.Credentials.URL) + if err != nil { + return fmt.Errorf("parsing api url: %w", err) + } + + client, err := apiclient.NewClient(&apiclient.Config{ + MachineID: cfg.API.Client.Credentials.Login, + Password: strfmt.Password(cfg.API.Client.Credentials.Password), + URL: apiURL, + VersionPrefix: "v1", + }) + if err != nil { + return fmt.Errorf("creating api client: %w", err) + } + + name := args[0] + + return cli.inspect(cmd.Context(), client, name, color.Output) + }, + } + + return cmd +} + +func (cli *cliAllowLists) inspect(ctx context.Context, client *apiclient.ApiClient, name string, out io.Writer) error { + allowlist, _, err := client.Allowlists.Get(ctx, name, apiclient.AllowlistGetOpts{WithContent: true}) + if err != nil { + return fmt.Errorf("unable to get allowlist: %w", err) + } + + switch cli.cfg().Cscli.Output { + case "human": + return cli.listContentHuman(out, allowlist) + case "json": + enc := json.NewEncoder(out) + enc.SetIndent("", " ") + + if err := enc.Encode(allowlist); err != nil { + return errors.New("failed to serialize") + } + + return nil + case "raw": + return cli.listCSVContent(out, allowlist) + } + + return nil +} + +func (cli *cliAllowLists) newRemoveCmd() *cobra.Command { + cmd := &cobra.Command{ + Use: "remove [allowlist_name] [value]", + Short: "Remove content from an allowlist", + Example: `cscli allowlists remove my_allowlist 1.2.3.4 2.3.4.5`, + Args: args.MinimumNArgs(2), + ValidArgsFunction: cli.validAllowlists, + SilenceUsage: false, + RunE: func(cmd *cobra.Command, args []string) error { + var err error + cfg := cli.cfg() + + if err = require.LAPI(cfg); err != nil { + return err + } + + ctx := cmd.Context() + + db, err := require.DBClient(ctx, cfg.DbConfig) + if err != nil { + return err + } + + name := args[0] + values := args[1:] + + return cli.remove(ctx, db, name, values) + }, + } + + return cmd +} + +func (cli *cliAllowLists) remove(ctx context.Context, db *database.Client, name string, values []string) error { + allowlist, err := db.GetAllowList(ctx, name, true) + if err != nil { + return err + } + + if allowlist.FromConsole { + return fmt.Errorf("allowlist %s is managed by console, cannot delete with cscli. Please visit https://app.crowdsec.net/allowlists/%s to delete", name, allowlist.AllowlistID) + } + + toRemove := make([]string, 0) + + for _, v := range values { + found := false + + for _, item := range allowlist.Edges.AllowlistItems { + if item.Value == v { + found = true + break + } + } + + if found { + toRemove = append(toRemove, v) + } + } + + if len(toRemove) == 0 { + fmt.Println("no value to remove from allowlist") + return nil + } + + deleted, err := db.RemoveFromAllowlist(ctx, allowlist, toRemove...) + if err != nil { + return fmt.Errorf("unable to remove values from allowlist: %w", err) + } + + if deleted > 0 { + fmt.Printf("removed %d values from allowlist %s", deleted, name) + } + + return nil +} diff --git a/cmd/crowdsec-cli/clidecision/decisions.go b/cmd/crowdsec-cli/clidecision/decisions.go index da45b5f0bfb..822ad4bf3a8 100644 --- a/cmd/crowdsec-cli/clidecision/decisions.go +++ b/cmd/crowdsec-cli/clidecision/decisions.go @@ -317,7 +317,8 @@ cscli decisions list --origin lists --scenario list_name return cmd } -func (cli *cliDecisions) add(ctx context.Context, addIP, addRange, addDuration, addValue, addScope, addReason, addType string) error { +//nolint:revive // we'll reduce the number of args later +func (cli *cliDecisions) add(ctx context.Context, addIP, addRange, addDuration, addValue, addScope, addReason, addType string, bypassAllowlist bool) error { alerts := models.AddAlertsRequest{} origin := types.CscliOrigin capacity := int32(0) @@ -350,6 +351,15 @@ func (cli *cliDecisions) add(ctx context.Context, addIP, addRange, addDuration, addReason = fmt.Sprintf("manual '%s' from '%s'", addType, cli.cfg().API.Client.Credentials.Login) } + if !bypassAllowlist && (addScope == types.Ip || addScope == types.Range) { + resp, _, err := cli.client.Allowlists.CheckIfAllowlistedWithReason(ctx, addValue) + if err != nil { + log.Errorf("Cannot check if %s is in allowlist: %s", addValue, err) + } else if resp.Allowlisted { + return fmt.Errorf("%s is allowlisted by item %s, use --bypass-allowlist to add the decision anyway", addValue, resp.Reason) + } + } + decision := models.Decision{ Duration: &addDuration, Scope: &addScope, @@ -398,13 +408,14 @@ func (cli *cliDecisions) add(ctx context.Context, addIP, addRange, addDuration, func (cli *cliDecisions) newAddCmd() *cobra.Command { var ( - addIP string - addRange string - addDuration string - addValue string - addScope string - addReason string - addType string + addIP string + addRange string + addDuration string + addValue string + addScope string + addReason string + addType string + bypassAllowlist bool ) cmd := &cobra.Command{ @@ -419,7 +430,7 @@ cscli decisions add --scope username --value foobar Args: cobra.NoArgs, DisableAutoGenTag: true, RunE: func(cmd *cobra.Command, _ []string) error { - return cli.add(cmd.Context(), addIP, addRange, addDuration, addValue, addScope, addReason, addType) + return cli.add(cmd.Context(), addIP, addRange, addDuration, addValue, addScope, addReason, addType, bypassAllowlist) }, } @@ -432,6 +443,7 @@ cscli decisions add --scope username --value foobar flags.StringVar(&addScope, "scope", types.Ip, "Decision scope (ie. ip,range,username)") flags.StringVarP(&addReason, "reason", "R", "", "Decision reason (ie. scenario-name)") flags.StringVarP(&addType, "type", "t", "ban", "Decision type (ie. ban,captcha,throttle)") + flags.BoolVarP(&bypassAllowlist, "bypass-allowlist", "B", false, "Add decision even if value is in allowlist") return cmd } diff --git a/cmd/crowdsec-cli/main.go b/cmd/crowdsec-cli/main.go index a17bafb96d8..f968b6eacf9 100644 --- a/cmd/crowdsec-cli/main.go +++ b/cmd/crowdsec-cli/main.go @@ -12,9 +12,11 @@ import ( log "github.com/sirupsen/logrus" "github.com/spf13/cobra" + "github.com/crowdsecurity/go-cs-lib/ptr" "github.com/crowdsecurity/go-cs-lib/trace" "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/clialert" + "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/cliallowlists" "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/clibouncer" "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/clicapi" "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/cliconfig" @@ -166,6 +168,8 @@ func (cli *cliRoot) initialize() error { } } + csConfig.DbConfig.LogLevel = ptr.Of(cli.wantedLogLevel()) + return nil } @@ -282,6 +286,7 @@ It is meant to allow you to manage bans, parsers/scenarios/etc, api and generall cmd.AddCommand(cliitem.NewContext(cli.cfg).NewCommand()) cmd.AddCommand(cliitem.NewAppsecConfig(cli.cfg).NewCommand()) cmd.AddCommand(cliitem.NewAppsecRule(cli.cfg).NewCommand()) + cmd.AddCommand(cliallowlists.New(cli.cfg).NewCommand()) cli.addSetup(cmd) diff --git a/cmd/crowdsec/crowdsec.go b/cmd/crowdsec/crowdsec.go index db93992605d..29be3ff779f 100644 --- a/cmd/crowdsec/crowdsec.go +++ b/cmd/crowdsec/crowdsec.go @@ -14,6 +14,7 @@ import ( "github.com/crowdsecurity/crowdsec/pkg/acquisition" "github.com/crowdsecurity/crowdsec/pkg/acquisition/configuration" "github.com/crowdsecurity/crowdsec/pkg/alertcontext" + "github.com/crowdsecurity/crowdsec/pkg/apiclient" "github.com/crowdsecurity/crowdsec/pkg/csconfig" "github.com/crowdsecurity/crowdsec/pkg/cwhub" "github.com/crowdsecurity/crowdsec/pkg/exprhelpers" @@ -23,7 +24,7 @@ import ( ) // initCrowdsec prepares the log processor service -func initCrowdsec(cConfig *csconfig.Config, hub *cwhub.Hub) (*parser.Parsers, []acquisition.DataSource, error) { +func initCrowdsec(cConfig *csconfig.Config, hub *cwhub.Hub, testMode bool) (*parser.Parsers, []acquisition.DataSource, error) { var err error if err = alertcontext.LoadConsoleContext(cConfig, hub); err != nil { @@ -51,6 +52,16 @@ func initCrowdsec(cConfig *csconfig.Config, hub *cwhub.Hub) (*parser.Parsers, [] return nil, nil, err } + if !testMode { + err = apiclient.InitLAPIClient( + context.TODO(), cConfig.API.Client.Credentials.URL, cConfig.API.Client.Credentials.PapiURL, + cConfig.API.Client.Credentials.Login, cConfig.API.Client.Credentials.Password, + hub.GetInstalledListForAPI()) + if err != nil { + return nil, nil, fmt.Errorf("while initializing LAPIClient: %w", err) + } + } + datasources, err := LoadAcquisition(cConfig) if err != nil { return nil, nil, fmt.Errorf("while loading acquisition config: %w", err) @@ -116,7 +127,7 @@ func runCrowdsec(cConfig *csconfig.Config, parsers *parser.Parsers, hub *cwhub.H }) bucketWg.Wait() - apiClient, err := AuthenticatedLAPIClient(context.TODO(), *cConfig.API.Client.Credentials, hub) + apiClient, err := apiclient.GetLAPIClient() if err != nil { return err } diff --git a/cmd/crowdsec/serve.go b/cmd/crowdsec/serve.go index 0f7a84ce5c7..a9e496fe36e 100644 --- a/cmd/crowdsec/serve.go +++ b/cmd/crowdsec/serve.go @@ -94,7 +94,7 @@ func reloadHandler(sig os.Signal) (*csconfig.Config, error) { return nil, err } - csParsers, datasources, err := initCrowdsec(cConfig, hub) + csParsers, datasources, err := initCrowdsec(cConfig, hub, false) if err != nil { return nil, fmt.Errorf("unable to init crowdsec: %w", err) } @@ -396,7 +396,7 @@ func Serve(cConfig *csconfig.Config, agentReady chan bool) error { return err } - csParsers, datasources, err := initCrowdsec(cConfig, hub) + csParsers, datasources, err := initCrowdsec(cConfig, hub, flags.TestMode) if err != nil { return fmt.Errorf("crowdsec init: %w", err) } diff --git a/pkg/acquisition/modules/appsec/appsec.go b/pkg/acquisition/modules/appsec/appsec.go index 3e2a4f5765a..e41ace98b19 100644 --- a/pkg/acquisition/modules/appsec/appsec.go +++ b/pkg/acquisition/modules/appsec/appsec.go @@ -20,7 +20,9 @@ import ( "github.com/crowdsecurity/go-cs-lib/trace" "github.com/crowdsecurity/crowdsec/pkg/acquisition/configuration" + "github.com/crowdsecurity/crowdsec/pkg/apiclient" "github.com/crowdsecurity/crowdsec/pkg/appsec" + "github.com/crowdsecurity/crowdsec/pkg/appsec/allowlists" "github.com/crowdsecurity/crowdsec/pkg/csconfig" "github.com/crowdsecurity/crowdsec/pkg/types" ) @@ -49,18 +51,20 @@ type AppsecSourceConfig struct { // runtime structure of AppsecSourceConfig type AppsecSource struct { - metricsLevel int - config AppsecSourceConfig - logger *log.Entry - mux *http.ServeMux - server *http.Server - outChan chan types.Event - InChan chan appsec.ParsedRequest - AppsecRuntime *appsec.AppsecRuntimeConfig - AppsecConfigs map[string]appsec.AppsecConfig - lapiURL string - AuthCache AuthCache - AppsecRunners []AppsecRunner // one for each go-routine + metricsLevel int + config AppsecSourceConfig + logger *log.Entry + mux *http.ServeMux + server *http.Server + outChan chan types.Event + InChan chan appsec.ParsedRequest + AppsecRuntime *appsec.AppsecRuntimeConfig + AppsecConfigs map[string]appsec.AppsecConfig + lapiURL string + AuthCache AuthCache + AppsecRunners []AppsecRunner // one for each go-routine + apiClient *apiclient.ApiClient + appsecAllowlistClient *allowlists.AppsecAllowlist } // Struct to handle cache of authentication @@ -225,17 +229,24 @@ func (w *AppsecSource) Configure(yamlConfig []byte, logger *log.Entry, metricsLe w.AppsecRunners = make([]AppsecRunner, w.config.Routines) + w.apiClient, err = apiclient.GetLAPIClient() + if err != nil { + return fmt.Errorf("unable to get authenticated LAPI client: %w", err) + } + w.appsecAllowlistClient = allowlists.NewAppsecAllowlist(w.apiClient, w.logger) + for nbRoutine := range w.config.Routines { appsecRunnerUUID := uuid.New().String() - // we copy AppsecRutime for each runner + // we copy AppsecRuntime for each runner wrt := *w.AppsecRuntime wrt.Logger = w.logger.Dup().WithField("runner_uuid", appsecRunnerUUID) runner := AppsecRunner{ - inChan: w.InChan, - UUID: appsecRunnerUUID, - logger: w.logger.WithField("runner_uuid", appsecRunnerUUID), - AppsecRuntime: &wrt, - Labels: w.config.Labels, + inChan: w.InChan, + UUID: appsecRunnerUUID, + logger: w.logger.WithField("runner_uuid", appsecRunnerUUID), + AppsecRuntime: &wrt, + Labels: w.config.Labels, + appsecAllowlistsClient: w.appsecAllowlistClient, } err := runner.Init(appsecCfg.GetDataDir()) @@ -273,6 +284,8 @@ func (w *AppsecSource) OneShotAcquisition(_ context.Context, _ chan types.Event, func (w *AppsecSource) StreamingAcquisition(ctx context.Context, out chan types.Event, t *tomb.Tomb) error { w.outChan = out + w.appsecAllowlistClient.StartRefresh(t) + t.Go(func() error { defer trace.CatchPanic("crowdsec/acquis/appsec/live") diff --git a/pkg/acquisition/modules/appsec/appsec_hooks_test.go b/pkg/acquisition/modules/appsec/appsec_hooks_test.go index d87384a0189..46b2ed4d68d 100644 --- a/pkg/acquisition/modules/appsec/appsec_hooks_test.go +++ b/pkg/acquisition/modules/appsec/appsec_hooks_test.go @@ -883,6 +883,40 @@ func TestOnMatchRemediationHooks(t *testing.T) { require.Equal(t, http.StatusForbidden, statusCode) }, }, + { + name: "on_match: allowlisted IP", + expected_load_ok: true, + inband_rules: []appsec_rule.CustomRule{ + { + Name: "rule42", + Zones: []string{"ARGS"}, + Variables: []string{"foo"}, + Match: appsec_rule.Match{Type: "regex", Value: "^toto"}, + Transform: []string{"lowercase"}, + }, + }, + input_request: appsec.ParsedRequest{ + ClientIP: "5.4.3.2", + RemoteAddr: "5.4.3.2", + Method: "GET", + URI: "/urllll", + Args: url.Values{"foo": []string{"toto"}}, + }, + DefaultRemediation: appsec.AllowRemediation, + on_match: []appsec.Hook{ + {Filter: "IsInBand == true", Apply: []string{"SetRemediation('captcha')", "SetReturnCode(418)"}, OnSuccess: "continue"}, + {Filter: "IsInBand == true", Apply: []string{"SetRemediation('ban')"}}, + }, + output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { + spew.Dump(responses) + spew.Dump(appsecResponse) + + log.Errorf("http status : %d", statusCode) + require.Equal(t, appsec.AllowRemediation, appsecResponse.Action) + require.Equal(t, http.StatusOK, appsecResponse.HTTPStatus) + require.Equal(t, http.StatusOK, statusCode) + }, + }, } for _, test := range tests { t.Run(test.name, func(t *testing.T) { diff --git a/pkg/acquisition/modules/appsec/appsec_runner.go b/pkg/acquisition/modules/appsec/appsec_runner.go index 8bdb6405d98..a21a16598d7 100644 --- a/pkg/acquisition/modules/appsec/appsec_runner.go +++ b/pkg/acquisition/modules/appsec/appsec_runner.go @@ -17,19 +17,21 @@ import ( // load body processors via init() _ "github.com/crowdsecurity/crowdsec/pkg/acquisition/modules/appsec/bodyprocessors" "github.com/crowdsecurity/crowdsec/pkg/appsec" + "github.com/crowdsecurity/crowdsec/pkg/appsec/allowlists" "github.com/crowdsecurity/crowdsec/pkg/types" ) // that's the runtime structure of the Application security engine as seen from the acquis type AppsecRunner struct { - outChan chan types.Event - inChan chan appsec.ParsedRequest - UUID string - AppsecRuntime *appsec.AppsecRuntimeConfig //this holds the actual appsec runtime config, rules, remediations, hooks etc. - AppsecInbandEngine coraza.WAF - AppsecOutbandEngine coraza.WAF - Labels map[string]string - logger *log.Entry + outChan chan types.Event + inChan chan appsec.ParsedRequest + UUID string + AppsecRuntime *appsec.AppsecRuntimeConfig //this holds the actual appsec runtime config, rules, remediations, hooks etc. + AppsecInbandEngine coraza.WAF + AppsecOutbandEngine coraza.WAF + Labels map[string]string + logger *log.Entry + appsecAllowlistsClient *allowlists.AppsecAllowlist } func (r *AppsecRunner) MergeDedupRules(collections []appsec.AppsecCollection, logger *log.Entry) string { @@ -41,9 +43,9 @@ func (r *AppsecRunner) MergeDedupRules(collections []appsec.AppsecCollection, lo // Dedup *our* rules for _, rule := range collection.Rules { if _, ok := dedupRules[rule]; ok { - discarded++ - logger.Debugf("Discarding duplicate rule : %s", rule) - continue + discarded++ + logger.Debugf("Discarding duplicate rule : %s", rule) + continue } rulesArr = append(rulesArr, rule) dedupRules[rule] = struct{}{} @@ -234,6 +236,12 @@ func (r *AppsecRunner) ProcessOutOfBandRules(request *appsec.ParsedRequest) erro } func (r *AppsecRunner) handleInBandInterrupt(request *appsec.ParsedRequest) { + + if allowed, reason := r.appsecAllowlistsClient.IsAllowlisted(request.ClientIP); allowed { + r.logger.Infof("%s is allowlisted by %s, skipping", request.ClientIP, reason) + return + } + //create the associated event for crowdsec itself evt, err := EventFromRequest(request, r.Labels) if err != nil { @@ -288,6 +296,12 @@ func (r *AppsecRunner) handleInBandInterrupt(request *appsec.ParsedRequest) { } func (r *AppsecRunner) handleOutBandInterrupt(request *appsec.ParsedRequest) { + + if allowed, reason := r.appsecAllowlistsClient.IsAllowlisted(request.ClientIP); allowed { + r.logger.Infof("%s is allowlisted by %s, skipping", request.ClientIP, reason) + return + } + evt, err := EventFromRequest(request, r.Labels) if err != nil { //let's not interrupt the pipeline for this diff --git a/pkg/acquisition/modules/appsec/appsec_test.go b/pkg/acquisition/modules/appsec/appsec_test.go index 5f2b93836f6..a640e7ed526 100644 --- a/pkg/acquisition/modules/appsec/appsec_test.go +++ b/pkg/acquisition/modules/appsec/appsec_test.go @@ -1,14 +1,21 @@ package appsecacquisition import ( + "net/http" + "net/http/httptest" + "net/url" "testing" "time" "github.com/davecgh/go-spew/spew" "github.com/google/uuid" log "github.com/sirupsen/logrus" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/crowdsecurity/crowdsec/pkg/apiclient" "github.com/crowdsecurity/crowdsec/pkg/appsec" + "github.com/crowdsecurity/crowdsec/pkg/appsec/allowlists" "github.com/crowdsecurity/crowdsec/pkg/appsec/appsec_rule" "github.com/crowdsecurity/crowdsec/pkg/types" ) @@ -34,6 +41,23 @@ type appsecRuleTest struct { output_asserts func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) } +func setupLapi() (*http.ServeMux, string, func()) { + return setupWithPrefix("v1") +} + +func setupWithPrefix(urlPrefix string) (*http.ServeMux, string, func()) { + // mux is the HTTP request multiplexer used with the test server. + mux := http.NewServeMux() + baseURLPath := "/" + urlPrefix + + apiHandler := http.NewServeMux() + apiHandler.Handle(baseURLPath+"/", http.StripPrefix(baseURLPath, mux)) + + server := httptest.NewServer(apiHandler) + + return mux, server.URL, server.Close +} + func loadAppSecEngine(test appsecRuleTest, t *testing.T) { if testing.Verbose() { log.SetLevel(log.TraceLevel) @@ -90,13 +114,51 @@ func loadAppSecEngine(test appsecRuleTest, t *testing.T) { //we copy AppsecRutime for each runner wrt := *AppsecRuntime wrt.Logger = logger + + mux, urlx, teardown := setupLapi() + defer teardown() + + apiURL, err := url.Parse(urlx + "/") + require.NoError(t, err) + + client, err := apiclient.NewClient(&apiclient.Config{ + MachineID: "test_login", + Password: "test_password", + URL: apiURL, + VersionPrefix: "v1", + }) + require.NoError(t, err) + + mux.HandleFunc("/watchers/login", func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) + _, err := w.Write([]byte(`{"code": 200, "expire": "2030-01-02T15:04:05Z", "token": "oklol"}`)) + assert.NoError(t, err) + }) + + mux.HandleFunc("/allowlists", func(w http.ResponseWriter, r *http.Request) { + if r.URL.Query().Get("with_content") != "true" { + t.Errorf("with_content not set") + } + w.WriteHeader(http.StatusOK) + _, err := w.Write([]byte(`[{"allowlist_id":"xxxx","console_managed":false,"created_at":"2025-02-11T14:47:35.839Z","description":"test_desc2", + "items":[{"created_at":"2025-02-12T09:32:53.939Z","description":"sdfsdaf","expiration":"0001-01-01T00:00:00.000Z","value":"5.4.3.2"}, + {"created_at":"2025-02-12T09:32:53.939Z","description":"sdfsdaf","expiration":"0001-01-01T00:00:00.000Z","value":"5.4.4.0/24"}]}]`)) + assert.NoError(t, err) + }) + + allowlistClient := allowlists.NewAppsecAllowlist(client, logger) + // In real life, allowlists updater is started by the acquisition + // Do it manually here as we are simulating the appsec itself + err = allowlistClient.FetchAllowlists() + require.NoError(t, err) runner := AppsecRunner{ - inChan: InChan, - UUID: appsecRunnerUUID, - logger: logger, - AppsecRuntime: &wrt, - Labels: map[string]string{"foo": "bar"}, - outChan: OutChan, + inChan: InChan, + UUID: appsecRunnerUUID, + logger: logger, + AppsecRuntime: &wrt, + Labels: map[string]string{"foo": "bar"}, + outChan: OutChan, + appsecAllowlistsClient: allowlistClient, } err = runner.Init("/tmp/") if err != nil { diff --git a/pkg/acquisition/modules/loki/internal/lokiclient/loki_client.go b/pkg/acquisition/modules/loki/internal/lokiclient/loki_client.go index fce199c5708..5996518e191 100644 --- a/pkg/acquisition/modules/loki/internal/lokiclient/loki_client.go +++ b/pkg/acquisition/modules/loki/internal/lokiclient/loki_client.go @@ -205,6 +205,7 @@ func (lc *LokiClient) getURLFor(endpoint string, params map[string]string) strin func (lc *LokiClient) Ready(ctx context.Context) error { tick := time.NewTicker(500 * time.Millisecond) url := lc.getURLFor("ready", nil) + lc.Logger.Debugf("Using url: %s for ready check", url) for { select { case <-ctx.Done(): diff --git a/pkg/apiclient/allowlists_service.go b/pkg/apiclient/allowlists_service.go new file mode 100644 index 00000000000..0dc69a587fd --- /dev/null +++ b/pkg/apiclient/allowlists_service.go @@ -0,0 +1,110 @@ +package apiclient + +import ( + "context" + "fmt" + "net/http" + + qs "github.com/google/go-querystring/query" + log "github.com/sirupsen/logrus" + + "github.com/crowdsecurity/crowdsec/pkg/models" +) + +type AllowlistsService service + +type AllowlistListOpts struct { + WithContent bool `url:"with_content,omitempty"` +} + +func (s *AllowlistsService) List(ctx context.Context, opts AllowlistListOpts) (*models.GetAllowlistsResponse, *Response, error) { + u := s.client.URLPrefix + "/allowlists" + + params, err := qs.Values(opts) + if err != nil { + return nil, nil, fmt.Errorf("building query: %w", err) + } + + u += "?" + params.Encode() + + req, err := s.client.NewRequest(http.MethodGet, u, nil) + if err != nil { + return nil, nil, err + } + + allowlists := &models.GetAllowlistsResponse{} + + resp, err := s.client.Do(ctx, req, allowlists) + if err != nil { + return nil, resp, err + } + + return allowlists, resp, nil +} + +type AllowlistGetOpts struct { + WithContent bool `url:"with_content,omitempty"` +} + +func (s *AllowlistsService) Get(ctx context.Context, name string, opts AllowlistGetOpts) (*models.GetAllowlistResponse, *Response, error) { + u := s.client.URLPrefix + "/allowlists/" + name + + params, err := qs.Values(opts) + if err != nil { + return nil, nil, fmt.Errorf("building query: %w", err) + } + + u += "?" + params.Encode() + + log.Debugf("GET %s", u) + + req, err := s.client.NewRequest(http.MethodGet, u, nil) + if err != nil { + return nil, nil, err + } + + allowlist := &models.GetAllowlistResponse{} + + resp, err := s.client.Do(ctx, req, allowlist) + if err != nil { + return nil, resp, err + } + + return allowlist, resp, nil +} + +func (s *AllowlistsService) CheckIfAllowlisted(ctx context.Context, value string) (bool, *Response, error) { + u := s.client.URLPrefix + "/allowlists/check/" + value + + req, err := s.client.NewRequest(http.MethodHead, u, nil) + if err != nil { + return false, nil, err + } + + var discardBody interface{} + + resp, err := s.client.Do(ctx, req, discardBody) + if err != nil { + return false, resp, err + } + + return resp.Response.StatusCode == http.StatusOK, resp, nil +} + +func (s *AllowlistsService) CheckIfAllowlistedWithReason(ctx context.Context, value string) (*models.CheckAllowlistResponse, *Response, error) { + u := s.client.URLPrefix + "/allowlists/check/" + value + + req, err := s.client.NewRequest(http.MethodGet, u, nil) + if err != nil { + return nil, nil, err + } + + body := &models.CheckAllowlistResponse{} + + resp, err := s.client.Do(ctx, req, body) + if err != nil { + return nil, resp, err + } + + return body, resp, nil +} diff --git a/pkg/apiclient/auth_jwt.go b/pkg/apiclient/auth_jwt.go index c43e9fc291c..7ab3450c39f 100644 --- a/pkg/apiclient/auth_jwt.go +++ b/pkg/apiclient/auth_jwt.go @@ -61,6 +61,7 @@ func (t *JWTTransport) refreshJwtToken() error { var buf io.ReadWriter = &bytes.Buffer{} enc := json.NewEncoder(buf) enc.SetEscapeHTML(false) + err = enc.Encode(auth) if err != nil { return fmt.Errorf("could not encode jwt auth body: %w", err) @@ -186,11 +187,6 @@ func (t *JWTTransport) RoundTrip(req *http.Request) (*http.Response, error) { } resp, err = t.transport().RoundTrip(clonedReq) - if log.GetLevel() >= log.TraceLevel { - dump, _ := httputil.DumpResponse(resp, true) - log.Tracef("resp-jwt: %s (err:%v)", string(dump), err) - } - if err != nil { // we had an error (network error for example), reset the token? t.ResetToken() diff --git a/pkg/apiclient/client.go b/pkg/apiclient/client.go index ec473beca77..4828a1844c3 100644 --- a/pkg/apiclient/client.go +++ b/pkg/apiclient/client.go @@ -4,12 +4,15 @@ import ( "context" "crypto/tls" "crypto/x509" + "errors" "fmt" "net" "net/http" "net/url" "strings" + "time" + "github.com/go-openapi/strfmt" "github.com/golang-jwt/jwt/v4" "github.com/crowdsecurity/crowdsec/pkg/apiclient/useragent" @@ -20,6 +23,7 @@ var ( InsecureSkipVerify = false Cert *tls.Certificate CaCertPool *x509.CertPool + lapiClient *ApiClient ) type ApiClient struct { @@ -36,6 +40,7 @@ type ApiClient struct { Decisions *DecisionsService DecisionDelete *DecisionDeleteService Alerts *AlertsService + Allowlists *AllowlistsService Auth *AuthService Metrics *MetricsService Signal *SignalService @@ -66,6 +71,68 @@ type service struct { client *ApiClient } +func InitLAPIClient(ctx context.Context, apiUrl string, papiUrl string, login string, password string, scenarios []string) error { + if lapiClient != nil { + return errors.New("client already initialized") + } + + apiURL, err := url.Parse(apiUrl) + if err != nil { + return fmt.Errorf("parsing api url ('%s'): %w", apiURL, err) + } + + papiURL, err := url.Parse(papiUrl) + if err != nil { + return fmt.Errorf("parsing polling api url ('%s'): %w", papiURL, err) + } + + pwd := strfmt.Password(password) + + client, err := NewClient(&Config{ + MachineID: login, + Password: pwd, + Scenarios: scenarios, + URL: apiURL, + PapiURL: papiURL, + VersionPrefix: "v1", + UpdateScenario: func(_ context.Context) ([]string, error) { + return scenarios, nil + }, + }) + if err != nil { + return fmt.Errorf("new client api: %w", err) + } + + authResp, _, err := client.Auth.AuthenticateWatcher(ctx, models.WatcherAuthRequest{ + MachineID: &login, + Password: &pwd, + Scenarios: scenarios, + }) + if err != nil { + return fmt.Errorf("authenticate watcher (%s): %w", login, err) + } + + var expiration time.Time + if err := expiration.UnmarshalText([]byte(authResp.Expire)); err != nil { + return fmt.Errorf("unable to parse jwt expiration: %w", err) + } + + client.GetClient().Transport.(*JWTTransport).Token = authResp.Token + client.GetClient().Transport.(*JWTTransport).Expiration = expiration + + lapiClient = client + + return nil +} + +func GetLAPIClient() (*ApiClient, error) { + if lapiClient == nil { + return nil, errors.New("client not initialized") + } + + return lapiClient, nil +} + func NewClient(config *Config) (*ApiClient, error) { userAgent := config.UserAgent if userAgent == "" { @@ -115,6 +182,7 @@ func NewClient(config *Config) (*ApiClient, error) { c.common.client = c c.Decisions = (*DecisionsService)(&c.common) c.Alerts = (*AlertsService)(&c.common) + c.Allowlists = (*AllowlistsService)(&c.common) c.Auth = (*AuthService)(&c.common) c.Metrics = (*MetricsService)(&c.common) c.Signal = (*SignalService)(&c.common) @@ -157,6 +225,7 @@ func NewDefaultClient(url *url.URL, prefix string, userAgent string, client *htt c.common.client = c c.Decisions = (*DecisionsService)(&c.common) c.Alerts = (*AlertsService)(&c.common) + c.Allowlists = (*AllowlistsService)(&c.common) c.Auth = (*AuthService)(&c.common) c.Metrics = (*MetricsService)(&c.common) c.Signal = (*SignalService)(&c.common) diff --git a/pkg/apiserver/alerts_test.go b/pkg/apiserver/alerts_test.go index c4edb42d475..8cb012f2a81 100644 --- a/pkg/apiserver/alerts_test.go +++ b/pkg/apiserver/alerts_test.go @@ -9,34 +9,44 @@ import ( "strings" "sync" "testing" + "time" "github.com/gin-gonic/gin" + "github.com/go-openapi/strfmt" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "github.com/crowdsecurity/crowdsec/pkg/csconfig" "github.com/crowdsecurity/crowdsec/pkg/csplugin" + "github.com/crowdsecurity/crowdsec/pkg/database" "github.com/crowdsecurity/crowdsec/pkg/models" ) +const ( + passwordAuthType = "password" + apiKeyAuthType = "apikey" +) + type LAPI struct { router *gin.Engine loginResp models.WatcherAuthResponse bouncerKey string DBConfig *csconfig.DatabaseCfg + DBClient *database.Client } func SetupLAPITest(t *testing.T, ctx context.Context) LAPI { t.Helper() router, loginResp, config := InitMachineTest(t, ctx) - APIKey := CreateTestBouncer(t, ctx, config.API.Server.DbConfig) + APIKey, dbClient := CreateTestBouncer(t, ctx, config.API.Server.DbConfig) return LAPI{ router: router, loginResp: loginResp, bouncerKey: APIKey, DBConfig: config.API.Server.DbConfig, + DBClient: dbClient, } } @@ -51,9 +61,9 @@ func (l *LAPI) RecordResponse(t *testing.T, ctx context.Context, verb string, ur require.NoError(t, err) switch authType { - case "apikey": + case apiKeyAuthType: req.Header.Add("X-Api-Key", l.bouncerKey) - case "password": + case passwordAuthType: AddAuthHeaders(req, l.loginResp) default: t.Fatal("auth type not supported") @@ -138,6 +148,58 @@ func TestCreateAlert(t *testing.T) { assert.Equal(t, `["1"]`, w.Body.String()) } +func TestCreateAllowlistedAlert(t *testing.T) { + ctx := context.Background() + lapi := SetupLAPITest(t, ctx) + + allowlist, err := lapi.DBClient.CreateAllowList(ctx, "test", "test", "", false) + require.NoError(t, err) + added, err := lapi.DBClient.AddToAllowlist(ctx, allowlist, []*models.AllowlistItem{ + { + Value: "10.0.0.0/24", + }, + { + Value: "192.168.0.0/24", + Expiration: strfmt.DateTime(time.Now().Add(-time.Hour)), // Expired item + }, + { + Value: "127.0.0.1", + }, + }) + require.NoError(t, err) + assert.Equal(t, 3, added) + + // Create Alert with allowlisted IP + alertContent := GetAlertReaderFromFile(t, "./tests/alert_allowlisted.json") + w := lapi.RecordResponse(t, ctx, http.MethodPost, "/v1/alerts", alertContent, "password") + assert.Equal(t, http.StatusCreated, w.Code) + + // We should have no alert as the IP is allowlisted + w = lapi.RecordResponse(t, ctx, "GET", "/v1/alerts", emptyBody, "password") + assert.Equal(t, http.StatusOK, w.Code) + assert.Equal(t, "null", w.Body.String()) + + // Create Alert with expired allowlisted IP + alertContent = GetAlertReaderFromFile(t, "./tests/alert_allowlisted_expired.json") + w = lapi.RecordResponse(t, ctx, http.MethodPost, "/v1/alerts", alertContent, "password") + assert.Equal(t, http.StatusCreated, w.Code) + + // We should have an alert as the IP is allowlisted but the item is expired + w = lapi.RecordResponse(t, ctx, "GET", "/v1/alerts", emptyBody, "password") + assert.Equal(t, http.StatusOK, w.Code) + assert.Contains(t, w.Body.String(), "192.168.0.42") + + // Create Alert with allowlisted IP but with decisions (manual ban) + alertContent = GetAlertReaderFromFile(t, "./tests/alert_sample.json") + w = lapi.RecordResponse(t, ctx, http.MethodPost, "/v1/alerts", alertContent, "password") + assert.Equal(t, http.StatusCreated, w.Code) + + // We should have an alert as the IP is allowlisted but the alert has decisions + w = lapi.RecordResponse(t, ctx, "GET", "/v1/alerts", emptyBody, "password") + assert.Equal(t, http.StatusOK, w.Code) + assert.Contains(t, w.Body.String(), "127.0.0.1") +} + func TestCreateAlertChannels(t *testing.T) { ctx := context.Background() apiServer, config := NewAPIServer(t, ctx) diff --git a/pkg/apiserver/allowlists_test.go b/pkg/apiserver/allowlists_test.go new file mode 100644 index 00000000000..158f4852164 --- /dev/null +++ b/pkg/apiserver/allowlists_test.go @@ -0,0 +1,127 @@ +package apiserver + +import ( + "context" + "encoding/json" + "net/http" + "testing" + "time" + + "github.com/go-openapi/strfmt" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/crowdsecurity/crowdsec/pkg/models" +) + +func TestAllowlistList(t *testing.T) { + ctx := context.Background() + lapi := SetupLAPITest(t, ctx) + + _, err := lapi.DBClient.CreateAllowList(ctx, "test", "test", "", false) + + require.NoError(t, err) + + w := lapi.RecordResponse(t, ctx, http.MethodGet, "/v1/allowlists", emptyBody, passwordAuthType) + + require.Equal(t, http.StatusOK, w.Code) + + allowlists := models.GetAllowlistsResponse{} + + err = json.Unmarshal(w.Body.Bytes(), &allowlists) + require.NoError(t, err) + + require.Len(t, allowlists, 1) + require.Equal(t, "test", allowlists[0].Name) +} + +func TestGetAllowlist(t *testing.T) { + ctx := context.Background() + lapi := SetupLAPITest(t, ctx) + + l, err := lapi.DBClient.CreateAllowList(ctx, "test", "test", "", false) + + require.NoError(t, err) + + added, err := lapi.DBClient.AddToAllowlist(ctx, l, []*models.AllowlistItem{ + { + Value: "1.2.3.4", + }, + { + Value: "2.3.4.5", + Expiration: strfmt.DateTime(time.Now().Add(-time.Hour)), // expired + }, + }) + + require.NoError(t, err) + assert.Equal(t, 2, added) + + w := lapi.RecordResponse(t, ctx, http.MethodGet, "/v1/allowlists/test?with_content=true", emptyBody, passwordAuthType) + + require.Equal(t, http.StatusOK, w.Code) + + allowlist := models.GetAllowlistResponse{} + + err = json.Unmarshal(w.Body.Bytes(), &allowlist) + require.NoError(t, err) + + require.Equal(t, "test", allowlist.Name) + require.Len(t, allowlist.Items, 1) + require.Equal(t, "1.2.3.4", allowlist.Items[0].Value) +} + +func TestCheckInAllowlist(t *testing.T) { + ctx := context.Background() + lapi := SetupLAPITest(t, ctx) + + l, err := lapi.DBClient.CreateAllowList(ctx, "test", "test", "", false) + + require.NoError(t, err) + + added, err := lapi.DBClient.AddToAllowlist(ctx, l, []*models.AllowlistItem{ + { + Value: "1.2.3.4", + }, + { + Value: "2.3.4.5", + Expiration: strfmt.DateTime(time.Now().Add(-time.Hour)), // expired + }, + }) + + require.NoError(t, err) + assert.Equal(t, 2, added) + + // GET request, should return 200 and status in body + w := lapi.RecordResponse(t, ctx, http.MethodGet, "/v1/allowlists/check/1.2.3.4", emptyBody, passwordAuthType) + + require.Equal(t, http.StatusOK, w.Code) + + resp := models.CheckAllowlistResponse{} + + err = json.Unmarshal(w.Body.Bytes(), &resp) + require.NoError(t, err) + + require.True(t, resp.Allowlisted) + + // GET request, should return 200 and status in body + w = lapi.RecordResponse(t, ctx, http.MethodGet, "/v1/allowlists/check/2.3.4.5", emptyBody, passwordAuthType) + + require.Equal(t, http.StatusOK, w.Code) + + resp = models.CheckAllowlistResponse{} + + err = json.Unmarshal(w.Body.Bytes(), &resp) + + require.NoError(t, err) + require.False(t, resp.Allowlisted) + + // HEAD request, should return 200 + w = lapi.RecordResponse(t, ctx, http.MethodHead, "/v1/allowlists/check/1.2.3.4", emptyBody, passwordAuthType) + + require.Equal(t, http.StatusOK, w.Code) + + // HEAD request, should return 204 + w = lapi.RecordResponse(t, ctx, http.MethodHead, "/v1/allowlists/check/2.3.4.5", emptyBody, passwordAuthType) + + require.Equal(t, http.StatusNoContent, w.Code) +} diff --git a/pkg/apiserver/api_key_test.go b/pkg/apiserver/api_key_test.go index 45c02c806e7..89e37cd3852 100644 --- a/pkg/apiserver/api_key_test.go +++ b/pkg/apiserver/api_key_test.go @@ -14,7 +14,7 @@ func TestAPIKey(t *testing.T) { ctx := context.Background() router, config := NewAPITest(t, ctx) - APIKey := CreateTestBouncer(t, ctx, config.API.Server.DbConfig) + APIKey, _ := CreateTestBouncer(t, ctx, config.API.Server.DbConfig) // Login with empty token w := httptest.NewRecorder() diff --git a/pkg/apiserver/apic.go b/pkg/apiserver/apic.go index 2c606dcbaee..5460046bf7f 100644 --- a/pkg/apiserver/apic.go +++ b/pkg/apiserver/apic.go @@ -1,7 +1,9 @@ package apiserver import ( + "bufio" "context" + "encoding/json" "errors" "fmt" "math/rand" @@ -14,6 +16,7 @@ import ( "sync" "time" + "github.com/davecgh/go-spew/spew" "github.com/go-openapi/strfmt" log "github.com/sirupsen/logrus" "gopkg.in/tomb.v2" @@ -181,6 +184,10 @@ func alertToSignal(alert *models.Alert, scenarioTrust string, shareContext bool) func NewAPIC(ctx context.Context, config *csconfig.OnlineApiClientCfg, dbClient *database.Client, consoleConfig *csconfig.ConsoleConfig, apicWhitelist *csconfig.CapiWhitelist) (*apic, error) { var err error + if apicWhitelist == nil { + apicWhitelist = &csconfig.CapiWhitelist{} + } + ret := &apic{ AlertsAddChan: make(chan []*models.Alert), dbClient: dbClient, @@ -638,6 +645,7 @@ func (a *apic) PullTop(ctx context.Context, forcePull bool) error { if data.Links != nil { log.Debugf("Received %d blocklists links", len(data.Links.Blocklists)) + log.Debugf("Received %d allowlists links", len(data.Links.Allowlists)) } addCounters, deleteCounters := makeAddAndDeleteCounters() @@ -650,11 +658,20 @@ func (a *apic) PullTop(ctx context.Context, forcePull bool) error { log.Printf("capi/community-blocklist : %d explicit deletions", nbDeleted) + // Update allowlists before processing decisions + if data.Links != nil { + if len(data.Links.Allowlists) > 0 { + if err := a.UpdateAllowlists(ctx, data.Links.Allowlists, forcePull); err != nil { + return fmt.Errorf("while updating allowlists: %w", err) + } + } + } + if len(data.New) > 0 { // create one alert for community blocklist using the first decision decisions := a.apiClient.Decisions.GetDecisionsFromGroups(data.New) // apply APIC specific whitelists - decisions = a.ApplyApicWhitelists(decisions) + decisions = a.ApplyApicWhitelists(ctx, decisions) alert := createAlertForDecision(decisions[0]) alertsFromCapi := []*models.Alert{alert} @@ -672,9 +689,13 @@ func (a *apic) PullTop(ctx context.Context, forcePull bool) error { } } - // update blocklists - if err := a.UpdateBlocklists(ctx, data.Links, addCounters, forcePull); err != nil { - return fmt.Errorf("while updating blocklists: %w", err) + // update allowlists/blocklists + if data.Links != nil { + if len(data.Links.Blocklists) > 0 { + if err := a.UpdateBlocklists(ctx, data.Links.Blocklists, addCounters, forcePull); err != nil { + return fmt.Errorf("while updating blocklists: %w", err) + } + } } return nil @@ -683,18 +704,118 @@ func (a *apic) PullTop(ctx context.Context, forcePull bool) error { // we receive a link to a blocklist, we pull the content of the blocklist and we create one alert func (a *apic) PullBlocklist(ctx context.Context, blocklist *modelscapi.BlocklistLink, forcePull bool) error { addCounters, _ := makeAddAndDeleteCounters() - if err := a.UpdateBlocklists(ctx, &modelscapi.GetDecisionsStreamResponseLinks{ - Blocklists: []*modelscapi.BlocklistLink{blocklist}, - }, addCounters, forcePull); err != nil { + if err := a.UpdateBlocklists(ctx, []*modelscapi.BlocklistLink{blocklist}, addCounters, forcePull); err != nil { return fmt.Errorf("while pulling blocklist: %w", err) } return nil } +func (a *apic) PullAllowlist(ctx context.Context, allowlist *modelscapi.AllowlistLink, forcePull bool) error { + if err := a.UpdateAllowlists(ctx, []*modelscapi.AllowlistLink{allowlist}, forcePull); err != nil { + return fmt.Errorf("while pulling allowlist: %w", err) + } + + return nil +} + +func (a *apic) UpdateAllowlists(ctx context.Context, allowlistsLinks []*modelscapi.AllowlistLink, forcePull bool) error { + if len(allowlistsLinks) == 0 { + return nil + } + + defaultClient, err := apiclient.NewDefaultClient(a.apiClient.BaseURL, "", "", nil) + if err != nil { + return fmt.Errorf("while creating default client: %w", err) + } + + for _, link := range allowlistsLinks { + if log.GetLevel() >= log.TraceLevel { + log.Tracef("allowlist body: %+v", spew.Sdump(link)) + } + + if link.Name == nil { + log.Warningf("allowlist has no name") + continue + } + + if link.URL == nil { + log.Warningf("allowlist %s has no URL", *link.Name) + continue + } + + if link.ID == nil { + log.Warningf("allowlist %s has no ID", *link.Name) + continue + } + + description := "" + if link.Description != nil { + description = *link.Description + } + + resp, err := defaultClient.GetClient().Get(*link.URL) + if err != nil { + log.Errorf("while pulling allowlist: %s", err) + continue + } + defer resp.Body.Close() + + scanner := bufio.NewScanner(resp.Body) + items := make([]*models.AllowlistItem, 0) + + for scanner.Scan() { + item := scanner.Text() + j := &models.AllowlistItem{} + if err := json.Unmarshal([]byte(item), j); err != nil { + log.Errorf("while unmarshalling allowlist item: %s", err) + continue + } + + items = append(items, j) + } + + list, err := a.dbClient.GetAllowListByID(ctx, *link.ID, false) + if err != nil { + if !ent.IsNotFound(err) { + log.Errorf("while getting allowlist %s: %s", *link.Name, err) + continue + } + } + + if list == nil { + list, err = a.dbClient.CreateAllowList(ctx, *link.Name, description, *link.ID, true) + if err != nil { + log.Errorf("while creating allowlist %s: %s", *link.Name, err) + continue + } + } + + added, err := a.dbClient.ReplaceAllowlist(ctx, list, items, true) + if err != nil { + log.Errorf("while replacing allowlist %s: %s", *link.Name, err) + continue + } + + log.Infof("added %d values to allowlist %s", added, list.Name) + + if list.Name != *link.Name || list.Description != description { + err = a.dbClient.UpdateAllowlistMeta(ctx, *link.ID, *link.Name, description) + if err != nil { + log.Errorf("while updating allowlist meta %s: %s", *link.Name, err) + continue + } + } + + log.Infof("Allowlist %s updated", *link.Name) + } + + return nil +} + // if decisions is whitelisted: return representation of the whitelist ip or cidr // if not whitelisted: empty string -func (a *apic) whitelistedBy(decision *models.Decision) string { +func (a *apic) whitelistedBy(decision *models.Decision, additionalIPs []net.IP, additionalRanges []*net.IPNet) string { if decision.Value == nil { return "" } @@ -712,18 +833,35 @@ func (a *apic) whitelistedBy(decision *models.Decision) string { } } + for _, ip := range additionalIPs { + if ip.Equal(ipval) { + return ip.String() + } + } + + for _, cidr := range additionalRanges { + if cidr.Contains(ipval) { + return cidr.String() + } + } + return "" } -func (a *apic) ApplyApicWhitelists(decisions []*models.Decision) []*models.Decision { - if a.whitelists == nil || len(a.whitelists.Cidrs) == 0 && len(a.whitelists.Ips) == 0 { +func (a *apic) ApplyApicWhitelists(ctx context.Context, decisions []*models.Decision) []*models.Decision { + allowlisted_ips, allowlisted_cidrs, err := a.dbClient.GetAllowlistsContentForAPIC(ctx) + if err != nil { + log.Errorf("while getting allowlists content: %s", err) + } + + if (a.whitelists == nil || len(a.whitelists.Cidrs) == 0 && len(a.whitelists.Ips) == 0) && len(allowlisted_ips) == 0 && len(allowlisted_cidrs) == 0 { return decisions } // deal with CAPI whitelists for fire. We want to avoid having a second list, so we shrink in place outIdx := 0 for _, decision := range decisions { - whitelister := a.whitelistedBy(decision) + whitelister := a.whitelistedBy(decision, allowlisted_ips, allowlisted_cidrs) if whitelister != "" { log.Infof("%s from %s is whitelisted by %s", *decision.Value, *decision.Scenario, whitelister) continue @@ -852,7 +990,7 @@ func (a *apic) updateBlocklist(ctx context.Context, client *apiclient.ApiClient, return nil } // apply APIC specific whitelists - decisions = a.ApplyApicWhitelists(decisions) + decisions = a.ApplyApicWhitelists(ctx, decisions) alert := createAlertForDecision(decisions[0]) alertsFromCapi := []*models.Alert{alert} alertsFromCapi = fillAlertsWithDecisions(alertsFromCapi, decisions, addCounters) @@ -865,14 +1003,11 @@ func (a *apic) updateBlocklist(ctx context.Context, client *apiclient.ApiClient, return nil } -func (a *apic) UpdateBlocklists(ctx context.Context, links *modelscapi.GetDecisionsStreamResponseLinks, addCounters map[string]map[string]int, forcePull bool) error { - if links == nil { +func (a *apic) UpdateBlocklists(ctx context.Context, blocklists []*modelscapi.BlocklistLink, addCounters map[string]map[string]int, forcePull bool) error { + if len(blocklists) == 0 { return nil } - if links.Blocklists == nil { - return nil - } // we must use a different http client than apiClient's because the transport of apiClient is jwtTransport or here we have signed apis that are incompatibles // we can use the same baseUrl as the urls are absolute and the parse will take care of it defaultClient, err := apiclient.NewDefaultClient(a.apiClient.BaseURL, "", "", nil) @@ -880,7 +1015,7 @@ func (a *apic) UpdateBlocklists(ctx context.Context, links *modelscapi.GetDecisi return fmt.Errorf("while creating default client: %w", err) } - for _, blocklist := range links.Blocklists { + for _, blocklist := range blocklists { if err := a.updateBlocklist(ctx, defaultClient, blocklist, addCounters, forcePull); err != nil { return err } diff --git a/pkg/apiserver/apic_test.go b/pkg/apiserver/apic_test.go index a8fbb40c4fa..a72bc4dd3a9 100644 --- a/pkg/apiserver/apic_test.go +++ b/pkg/apiserver/apic_test.go @@ -14,6 +14,7 @@ import ( "testing" "time" + "github.com/go-openapi/strfmt" "github.com/jarcoal/httpmock" "github.com/sirupsen/logrus" "github.com/stretchr/testify/assert" @@ -615,6 +616,16 @@ func TestAPICWhitelists(t *testing.T) { }, }, }, + &modelscapi.GetDecisionsStreamResponseNewItem{ + Scenario: ptr.Of("crowdsecurity/test1"), + Scope: ptr.Of("Ip"), + Decisions: []*modelscapi.GetDecisionsStreamResponseNewItemDecisionsItems0{ + { + Value: ptr.Of("10.2.3.4"), // wl by allowlist that we pull at the same time + Duration: ptr.Of("24h"), + }, + }, + }, }, Links: &modelscapi.GetDecisionsStreamResponseLinks{ Blocklists: []*modelscapi.BlocklistLink{ @@ -633,6 +644,15 @@ func TestAPICWhitelists(t *testing.T) { Duration: ptr.Of("24h"), }, }, + Allowlists: []*modelscapi.AllowlistLink{ + { + URL: ptr.Of("http://api.crowdsec.net/allowlist1"), + Name: ptr.Of("allowlist1"), + ID: ptr.Of("1"), + Description: ptr.Of("test"), + CreatedAt: ptr.Of(strfmt.DateTime(time.Now())), + }, + }, }, }, ), @@ -646,6 +666,10 @@ func TestAPICWhitelists(t *testing.T) { 200, "1.2.3.7", )) + httpmock.RegisterResponder("GET", "http://api.crowdsec.net/allowlist1", httpmock.NewStringResponder( + 200, `{"value":"10.2.3.4"}`, + )) + url, err := url.ParseRequestURI("http://api.crowdsec.net/") require.NoError(t, err) @@ -661,6 +685,14 @@ func TestAPICWhitelists(t *testing.T) { err = api.PullTop(ctx, false) require.NoError(t, err) + allowlists, err := api.dbClient.ListAllowLists(ctx, true) + require.NoError(t, err) + + require.Len(t, allowlists, 1) + require.Equal(t, "allowlist1", allowlists[0].Name) + require.Equal(t, "test", allowlists[0].Description) + require.True(t, allowlists[0].FromConsole) + assertTotalDecisionCount(t, ctx, api.dbClient, 5) // 2 from FIRE + 2 from bl + 1 existing assertTotalValidDecisionCount(t, api.dbClient, 4) assertTotalAlertCount(t, api.dbClient, 3) // 2 for list sub , 1 for community list. @@ -703,6 +735,10 @@ func TestAPICWhitelists(t *testing.T) { t.Errorf("9.2.3.4 is whitelisted") } + if _, ok := decisionIP["10.2.3.4"]; ok { + t.Errorf("10.2.3.4 is whitelisted") + } + assert.Equal(t, 1, decisionScenarioFreq["blocklist1"], 1) assert.Equal(t, 1, decisionScenarioFreq["blocklist2"], 1) assert.Equal(t, 2, decisionScenarioFreq["crowdsecurity/test1"], 2) diff --git a/pkg/apiserver/apiserver_test.go b/pkg/apiserver/apiserver_test.go index d8f24add75e..6b0052a8929 100644 --- a/pkg/apiserver/apiserver_test.go +++ b/pkg/apiserver/apiserver_test.go @@ -297,7 +297,7 @@ func CreateTestMachine(t *testing.T, ctx context.Context, router *gin.Engine, to return body } -func CreateTestBouncer(t *testing.T, ctx context.Context, config *csconfig.DatabaseCfg) string { +func CreateTestBouncer(t *testing.T, ctx context.Context, config *csconfig.DatabaseCfg) (string, *database.Client) { dbClient, err := database.NewClient(ctx, config) require.NoError(t, err) @@ -307,7 +307,7 @@ func CreateTestBouncer(t *testing.T, ctx context.Context, config *csconfig.Datab _, err = dbClient.CreateBouncer(ctx, "test", "127.0.0.1", middlewares.HashSHA512(apiKey), types.ApiKeyAuthType, false) require.NoError(t, err) - return apiKey + return apiKey, dbClient } func TestWithWrongDBConfig(t *testing.T) { diff --git a/pkg/apiserver/controllers/controller.go b/pkg/apiserver/controllers/controller.go index 719bb231006..03f1659ee4f 100644 --- a/pkg/apiserver/controllers/controller.go +++ b/pkg/apiserver/controllers/controller.go @@ -123,6 +123,10 @@ func (c *Controller) NewV1() error { jwtAuth.DELETE("/decisions", c.HandlerV1.DeleteDecisions) jwtAuth.DELETE("/decisions/:decision_id", c.HandlerV1.DeleteDecisionById) jwtAuth.GET("/heartbeat", c.HandlerV1.HeartBeat) + jwtAuth.GET("/allowlists", c.HandlerV1.GetAllowlists) + jwtAuth.GET("/allowlists/:allowlist_name", c.HandlerV1.GetAllowlist) + jwtAuth.GET("/allowlists/check/:ip_or_range", c.HandlerV1.CheckInAllowlist) + jwtAuth.HEAD("/allowlists/check/:ip_or_range", c.HandlerV1.CheckInAllowlist) } apiKeyAuth := groupV1.Group("") diff --git a/pkg/apiserver/controllers/v1/alerts.go b/pkg/apiserver/controllers/v1/alerts.go index d1f93228512..8b0401efd49 100644 --- a/pkg/apiserver/controllers/v1/alerts.go +++ b/pkg/apiserver/controllers/v1/alerts.go @@ -1,6 +1,7 @@ package v1 import ( + "context" "encoding/json" "fmt" "net" @@ -123,6 +124,27 @@ func (c *Controller) sendAlertToPluginChannel(alert *models.Alert, profileID uin } } +func (c *Controller) isAllowListed(ctx context.Context, alert *models.Alert) (bool, string) { + // If we have decisions, it comes from cscli that already checked the allowlist + if len(alert.Decisions) > 0 { + return false, "" + } + + if alert.Source.Scope != nil && (*alert.Source.Scope == types.Ip || *alert.Source.Scope == types.Range) && // Allowlist only works for IP/range + alert.Source.Value != nil { // Is this possible ? + isAllowlisted, reason, err := c.DBClient.IsAllowlisted(ctx, *alert.Source.Value) + if err == nil && isAllowlisted { + return true, reason + } else if err != nil { + // FIXME: Do we still want to process the alert normally if we can't check the allowlist ? + log.Errorf("error while checking allowlist: %s", err) + return false, "" + } + } + + return false, "" +} + // CreateAlert writes the alerts received in the body to the database func (c *Controller) CreateAlert(gctx *gin.Context) { var input models.AddAlertsRequest @@ -141,6 +163,7 @@ func (c *Controller) CreateAlert(gctx *gin.Context) { } stopFlush := false + alertsToSave := make([]*models.Alert, 0) for _, alert := range input { // normalize scope for alert.Source and decisions @@ -154,6 +177,11 @@ func (c *Controller) CreateAlert(gctx *gin.Context) { } } + if allowlisted, reason := c.isAllowListed(ctx, alert); allowlisted { + log.Infof("alert source %s is allowlisted by %s, skipping", *alert.Source.Value, reason) + continue + } + alert.MachineID = machineID // generate uuid here for alert alert.UUID = uuid.NewString() @@ -189,6 +217,8 @@ func (c *Controller) CreateAlert(gctx *gin.Context) { stopFlush = true } + alertsToSave = append(alertsToSave, alert) + continue } @@ -234,13 +264,15 @@ func (c *Controller) CreateAlert(gctx *gin.Context) { break } } + + alertsToSave = append(alertsToSave, alert) } if stopFlush { c.DBClient.CanFlush = false } - alerts, err := c.DBClient.CreateAlert(ctx, machineID, input) + alerts, err := c.DBClient.CreateAlert(ctx, machineID, alertsToSave) c.DBClient.CanFlush = true if err != nil { @@ -250,7 +282,7 @@ func (c *Controller) CreateAlert(gctx *gin.Context) { if c.AlertsAddChan != nil { select { - case c.AlertsAddChan <- input: + case c.AlertsAddChan <- alertsToSave: log.Debug("alert sent to CAPI channel") default: log.Warning("Cannot send alert to Central API channel") diff --git a/pkg/apiserver/controllers/v1/allowlist.go b/pkg/apiserver/controllers/v1/allowlist.go new file mode 100644 index 00000000000..e77344eb2bb --- /dev/null +++ b/pkg/apiserver/controllers/v1/allowlist.go @@ -0,0 +1,130 @@ +package v1 + +import ( + "net/http" + "time" + + "github.com/gin-gonic/gin" + "github.com/go-openapi/strfmt" + + "github.com/crowdsecurity/crowdsec/pkg/models" +) + +func (c *Controller) CheckInAllowlist(gctx *gin.Context) { + value := gctx.Param("ip_or_range") + + if value == "" { + gctx.JSON(http.StatusBadRequest, gin.H{"message": "value is required"}) + return + } + + allowlisted, reason, err := c.DBClient.IsAllowlisted(gctx.Request.Context(), value) + if err != nil { + c.HandleDBErrors(gctx, err) + return + } + + if gctx.Request.Method == http.MethodHead { + if allowlisted { + gctx.Status(http.StatusOK) + } else { + gctx.Status(http.StatusNoContent) + } + + return + } + + resp := models.CheckAllowlistResponse{ + Allowlisted: allowlisted, + Reason: reason, + } + + gctx.JSON(http.StatusOK, resp) +} + +func (c *Controller) GetAllowlists(gctx *gin.Context) { + params := gctx.Request.URL.Query() + + withContent := params.Get("with_content") == "true" + + allowlists, err := c.DBClient.ListAllowLists(gctx.Request.Context(), withContent) + if err != nil { + c.HandleDBErrors(gctx, err) + return + } + + resp := models.GetAllowlistsResponse{} + + for _, allowlist := range allowlists { + items := make([]*models.AllowlistItem, 0) + + if withContent { + for _, item := range allowlist.Edges.AllowlistItems { + if !item.ExpiresAt.IsZero() && item.ExpiresAt.Before(time.Now()) { + continue + } + + items = append(items, &models.AllowlistItem{ + CreatedAt: strfmt.DateTime(item.CreatedAt), + Description: item.Comment, + Expiration: strfmt.DateTime(item.ExpiresAt), + Value: item.Value, + }) + } + } + + resp = append(resp, &models.GetAllowlistResponse{ + AllowlistID: allowlist.AllowlistID, + Name: allowlist.Name, + Description: allowlist.Description, + CreatedAt: strfmt.DateTime(allowlist.CreatedAt), + UpdatedAt: strfmt.DateTime(allowlist.UpdatedAt), + ConsoleManaged: allowlist.FromConsole, + Items: items, + }) + } + + gctx.JSON(http.StatusOK, resp) +} + +func (c *Controller) GetAllowlist(gctx *gin.Context) { + allowlist := gctx.Param("allowlist_name") + + params := gctx.Request.URL.Query() + withContent := params.Get("with_content") == "true" + + allowlistModel, err := c.DBClient.GetAllowList(gctx.Request.Context(), allowlist, withContent) + if err != nil { + c.HandleDBErrors(gctx, err) + return + } + + items := make([]*models.AllowlistItem, 0) + + if withContent { + for _, item := range allowlistModel.Edges.AllowlistItems { + if !item.ExpiresAt.IsZero() && item.ExpiresAt.Before(time.Now()) { + continue + } + + items = append(items, &models.AllowlistItem{ + CreatedAt: strfmt.DateTime(item.CreatedAt), + Description: item.Comment, + Expiration: strfmt.DateTime(item.ExpiresAt), + Value: item.Value, + }) + } + } + + resp := models.GetAllowlistResponse{ + AllowlistID: allowlistModel.AllowlistID, + Name: allowlistModel.Name, + Description: allowlistModel.Description, + CreatedAt: strfmt.DateTime(allowlistModel.CreatedAt), + UpdatedAt: strfmt.DateTime(allowlistModel.UpdatedAt), + ConsoleManaged: allowlistModel.FromConsole, + Items: items, + } + + gctx.JSON(http.StatusOK, resp) +} diff --git a/pkg/apiserver/papi_cmd.go b/pkg/apiserver/papi_cmd.go index 78f5dc9b0fe..2e48ef4eaec 100644 --- a/pkg/apiserver/papi_cmd.go +++ b/pkg/apiserver/papi_cmd.go @@ -6,11 +6,13 @@ import ( "fmt" "time" + "github.com/go-openapi/strfmt" log "github.com/sirupsen/logrus" "github.com/crowdsecurity/go-cs-lib/ptr" "github.com/crowdsecurity/crowdsec/pkg/apiclient" + "github.com/crowdsecurity/crowdsec/pkg/database/ent" "github.com/crowdsecurity/crowdsec/pkg/models" "github.com/crowdsecurity/crowdsec/pkg/modelscapi" "github.com/crowdsecurity/crowdsec/pkg/types" @@ -21,25 +23,18 @@ type deleteDecisions struct { Decisions []string `json:"decisions"` } -type blocklistLink struct { - // blocklist name - Name string `json:"name"` - // blocklist url - Url string `json:"url"` - // blocklist remediation - Remediation string `json:"remediation"` - // blocklist scope - Scope string `json:"scope,omitempty"` - // blocklist duration - Duration string `json:"duration,omitempty"` +type forcePull struct { + Blocklist *modelscapi.BlocklistLink `json:"blocklist,omitempty"` + Allowlist *modelscapi.AllowlistLink `json:"allowlist,omitempty"` } -type forcePull struct { - Blocklist *blocklistLink `json:"blocklist,omitempty"` +type blocklistUnsubscribe struct { + Name string `json:"name"` } -type listUnsubscribe struct { +type allowlistUnsubscribe struct { Name string `json:"name"` + Id string `json:"id"` } func DecisionCmd(message *Message, p *Papi, sync bool) error { @@ -186,7 +181,8 @@ func ManagementCmd(message *Message, p *Papi, sync bool) error { return err } - unsubscribeMsg := listUnsubscribe{} + unsubscribeMsg := blocklistUnsubscribe{} + if err := json.Unmarshal(data, &unsubscribeMsg); err != nil { return fmt.Errorf("message for '%s' contains bad data format: %w", message.Header.OperationType, err) } @@ -224,27 +220,82 @@ func ManagementCmd(message *Message, p *Papi, sync bool) error { ctx := context.TODO() - if forcePullMsg.Blocklist == nil { - p.Logger.Infof("Received force_pull command from PAPI, pulling community and 3rd-party blocklists") + if forcePullMsg.Blocklist == nil && forcePullMsg.Allowlist == nil { + p.Logger.Infof("Received force_pull command from PAPI, pulling community, 3rd-party blocklists and allowlists") err = p.apic.PullTop(ctx, true) if err != nil { return fmt.Errorf("failed to force pull operation: %w", err) } - } else { - p.Logger.Infof("Received force_pull command from PAPI, pulling blocklist %s", forcePullMsg.Blocklist.Name) + } else if forcePullMsg.Blocklist != nil { + err = forcePullMsg.Blocklist.Validate(strfmt.Default) + if err != nil { + return fmt.Errorf("message for '%s' contains bad data format: %w", message.Header.OperationType, err) + } + + p.Logger.Infof("Received blocklist force_pull command from PAPI, pulling blocklist %s", *forcePullMsg.Blocklist.Name) err = p.apic.PullBlocklist(ctx, &modelscapi.BlocklistLink{ - Name: &forcePullMsg.Blocklist.Name, - URL: &forcePullMsg.Blocklist.Url, - Remediation: &forcePullMsg.Blocklist.Remediation, - Scope: &forcePullMsg.Blocklist.Scope, - Duration: &forcePullMsg.Blocklist.Duration, + Name: forcePullMsg.Blocklist.Name, + URL: forcePullMsg.Blocklist.URL, + Remediation: forcePullMsg.Blocklist.Remediation, + Scope: forcePullMsg.Blocklist.Scope, + Duration: forcePullMsg.Blocklist.Duration, }, true) if err != nil { return fmt.Errorf("failed to force pull operation: %w", err) } + } else if forcePullMsg.Allowlist != nil { + err = forcePullMsg.Allowlist.Validate(strfmt.Default) + if err != nil { + return fmt.Errorf("message for '%s' contains bad data format: %w", message.Header.OperationType, err) + } + + p.Logger.Infof("Received allowlist force_pull command from PAPI, pulling allowlist %s", *forcePullMsg.Allowlist.Name) + + err = p.apic.PullAllowlist(ctx, &modelscapi.AllowlistLink{ + Name: forcePullMsg.Allowlist.Name, + URL: forcePullMsg.Allowlist.URL, + ID: forcePullMsg.Allowlist.ID, + CreatedAt: forcePullMsg.Allowlist.CreatedAt, + UpdatedAt: forcePullMsg.Allowlist.UpdatedAt, + Description: forcePullMsg.Allowlist.Description, + }, true) + if err != nil { + return fmt.Errorf("failed to force pull operation: %w", err) + } + } + case "allowlist_unsubscribe": + data, err := json.Marshal(message.Data) + if err != nil { + return err + } + + unsubscribeMsg := allowlistUnsubscribe{} + + if err := json.Unmarshal(data, &unsubscribeMsg); err != nil { + return fmt.Errorf("message for '%s' contains bad data format: %w", message.Header.OperationType, err) + } + + if unsubscribeMsg.Name == "" { + return fmt.Errorf("message for '%s' contains bad data format: missing allowlist name", message.Header.OperationType) + } + + if unsubscribeMsg.Id == "" { + return fmt.Errorf("message for '%s' contains bad data format: missing allowlist id", message.Header.OperationType) + } + + p.Logger.Infof("Received allowlist_unsubscribe command from PAPI, unsubscribing from allowlist %s", unsubscribeMsg.Name) + + if err := p.DBClient.DeleteAllowListByID(ctx, unsubscribeMsg.Name, unsubscribeMsg.Id, true); err != nil { + if !ent.IsNotFound(err) { + return err + } + + p.Logger.Warningf("Allowlist %s not found", unsubscribeMsg.Name) } + + return nil default: return fmt.Errorf("unknown command '%s' for operation type '%s'", message.Header.OperationCmd, message.Header.OperationType) } diff --git a/pkg/apiserver/tests/alert_allowlisted.json b/pkg/apiserver/tests/alert_allowlisted.json new file mode 100644 index 00000000000..ad383f69955 --- /dev/null +++ b/pkg/apiserver/tests/alert_allowlisted.json @@ -0,0 +1,275 @@ +[ + { + "capacity": 5, + "decisions": null, + "events": [ + { + "meta": [ + { + "key": "ASNOrg", + "value": "OVH SAS" + }, + { + "key": "target_user", + "value": "root" + }, + { + "key": "service", + "value": "ssh" + }, + { + "key": "log_type", + "value": "ssh_failed-auth" + }, + { + "key": "IsoCode", + "value": "FR" + }, + { + "key": "ASNNumber", + "value": "16276" + }, + { + "key": "source_ip", + "value": "91.121.79.195" + }, + { + "key": "IsInEU", + "value": "true" + }, + { + "key": "SourceRange", + "value": "91.121.72.0/21" + } + ], + "timestamp": "2020-10-02T17:09:08Z" + }, + { + "meta": [ + { + "key": "ASNOrg", + "value": "OVH SAS" + }, + { + "key": "target_user", + "value": "root" + }, + { + "key": "service", + "value": "ssh" + }, + { + "key": "source_ip", + "value": "91.121.79.195" + }, + { + "key": "ASNNumber", + "value": "16276" + }, + { + "key": "SourceRange", + "value": "91.121.72.0/21" + }, + { + "key": "log_type", + "value": "ssh_failed-auth" + }, + { + "key": "IsoCode", + "value": "FR" + }, + { + "key": "IsInEU", + "value": "true" + } + ], + "timestamp": "2020-10-02T17:09:08Z" + }, + { + "meta": [ + { + "key": "service", + "value": "ssh" + }, + { + "key": "log_type", + "value": "ssh_failed-auth" + }, + { + "key": "IsInEU", + "value": "true" + }, + { + "key": "ASNOrg", + "value": "OVH SAS" + }, + { + "key": "target_user", + "value": "root" + }, + { + "key": "source_ip", + "value": "91.121.79.195" + }, + { + "key": "IsoCode", + "value": "FR" + }, + { + "key": "ASNNumber", + "value": "16276" + }, + { + "key": "SourceRange", + "value": "91.121.72.0/21" + } + ], + "timestamp": "2020-10-02T17:09:08Z" + }, + { + "meta": [ + { + "key": "SourceRange", + "value": "91.121.72.0/21" + }, + { + "key": "target_user", + "value": "root" + }, + { + "key": "IsoCode", + "value": "FR" + }, + { + "key": "ASNNumber", + "value": "16276" + }, + { + "key": "ASNOrg", + "value": "OVH SAS" + }, + { + "key": "service", + "value": "ssh" + }, + { + "key": "log_type", + "value": "ssh_failed-auth" + }, + { + "key": "source_ip", + "value": "91.121.79.195" + }, + { + "key": "IsInEU", + "value": "true" + } + ], + "timestamp": "2020-10-02T17:09:08Z" + }, + { + "meta": [ + { + "key": "target_user", + "value": "root" + }, + { + "key": "log_type", + "value": "ssh_failed-auth" + }, + { + "key": "service", + "value": "ssh" + }, + { + "key": "source_ip", + "value": "91.121.79.195" + }, + { + "key": "IsoCode", + "value": "FR" + }, + { + "key": "IsInEU", + "value": "true" + }, + { + "key": "ASNNumber", + "value": "16276" + }, + { + "key": "ASNOrg", + "value": "OVH SAS" + }, + { + "key": "SourceRange", + "value": "91.121.72.0/21" + } + ], + "timestamp": "2020-10-02T17:09:08Z" + }, + { + "meta": [ + { + "key": "IsoCode", + "value": "FR" + }, + { + "key": "ASNNumber", + "value": "16276" + }, + { + "key": "ASNOrg", + "value": "OVH SAS" + }, + { + "key": "SourceRange", + "value": "91.121.72.0/21" + }, + { + "key": "target_user", + "value": "root" + }, + { + "key": "service", + "value": "ssh" + }, + { + "key": "log_type", + "value": "ssh_failed-auth" + }, + { + "key": "source_ip", + "value": "10.0.0.42" + }, + { + "key": "IsInEU", + "value": "true" + } + ], + "timestamp": "2020-10-02T17:09:08Z" + } + ], + "events_count": 6, + "labels": null, + "leakspeed": "10s", + "message": "Ip 10.0.0.42 performed 'crowdsecurity/ssh-bf' (6 events over 30.18165ms) at 2020-10-26 09:50:32.055535505 +0100 CET m=+6.235529150", + "remediation": true, + "scenario": "crowdsecurity/ssh-bf", + "scenario_hash": "4441dcff07020f6690d998b7101e642359ba405c2abb83565bbbdcee36de280f", + "scenario_version": "0.1", + "simulated": false, + "source": { + "as_name": "OVH SAS", + "cn": "FR", + "ip": "10.0.0.42", + "latitude": 50.646, + "longitude": 3.0758, + "range": "10.0.0.0/8", + "scope": "Ip", + "value": "10.0.0.42" + }, + "start_at": "2020-10-26T09:50:32.025353849+01:00", + "stop_at": "2020-10-26T09:50:32.055534398+01:00" + } + ] \ No newline at end of file diff --git a/pkg/apiserver/tests/alert_allowlisted_expired.json b/pkg/apiserver/tests/alert_allowlisted_expired.json new file mode 100644 index 00000000000..1b8548e4747 --- /dev/null +++ b/pkg/apiserver/tests/alert_allowlisted_expired.json @@ -0,0 +1,275 @@ +[ + { + "capacity": 5, + "decisions": null, + "events": [ + { + "meta": [ + { + "key": "ASNOrg", + "value": "OVH SAS" + }, + { + "key": "target_user", + "value": "root" + }, + { + "key": "service", + "value": "ssh" + }, + { + "key": "log_type", + "value": "ssh_failed-auth" + }, + { + "key": "IsoCode", + "value": "FR" + }, + { + "key": "ASNNumber", + "value": "16276" + }, + { + "key": "source_ip", + "value": "91.121.79.195" + }, + { + "key": "IsInEU", + "value": "true" + }, + { + "key": "SourceRange", + "value": "91.121.72.0/21" + } + ], + "timestamp": "2020-10-02T17:09:08Z" + }, + { + "meta": [ + { + "key": "ASNOrg", + "value": "OVH SAS" + }, + { + "key": "target_user", + "value": "root" + }, + { + "key": "service", + "value": "ssh" + }, + { + "key": "source_ip", + "value": "91.121.79.195" + }, + { + "key": "ASNNumber", + "value": "16276" + }, + { + "key": "SourceRange", + "value": "91.121.72.0/21" + }, + { + "key": "log_type", + "value": "ssh_failed-auth" + }, + { + "key": "IsoCode", + "value": "FR" + }, + { + "key": "IsInEU", + "value": "true" + } + ], + "timestamp": "2020-10-02T17:09:08Z" + }, + { + "meta": [ + { + "key": "service", + "value": "ssh" + }, + { + "key": "log_type", + "value": "ssh_failed-auth" + }, + { + "key": "IsInEU", + "value": "true" + }, + { + "key": "ASNOrg", + "value": "OVH SAS" + }, + { + "key": "target_user", + "value": "root" + }, + { + "key": "source_ip", + "value": "91.121.79.195" + }, + { + "key": "IsoCode", + "value": "FR" + }, + { + "key": "ASNNumber", + "value": "16276" + }, + { + "key": "SourceRange", + "value": "91.121.72.0/21" + } + ], + "timestamp": "2020-10-02T17:09:08Z" + }, + { + "meta": [ + { + "key": "SourceRange", + "value": "91.121.72.0/21" + }, + { + "key": "target_user", + "value": "root" + }, + { + "key": "IsoCode", + "value": "FR" + }, + { + "key": "ASNNumber", + "value": "16276" + }, + { + "key": "ASNOrg", + "value": "OVH SAS" + }, + { + "key": "service", + "value": "ssh" + }, + { + "key": "log_type", + "value": "ssh_failed-auth" + }, + { + "key": "source_ip", + "value": "91.121.79.195" + }, + { + "key": "IsInEU", + "value": "true" + } + ], + "timestamp": "2020-10-02T17:09:08Z" + }, + { + "meta": [ + { + "key": "target_user", + "value": "root" + }, + { + "key": "log_type", + "value": "ssh_failed-auth" + }, + { + "key": "service", + "value": "ssh" + }, + { + "key": "source_ip", + "value": "91.121.79.195" + }, + { + "key": "IsoCode", + "value": "FR" + }, + { + "key": "IsInEU", + "value": "true" + }, + { + "key": "ASNNumber", + "value": "16276" + }, + { + "key": "ASNOrg", + "value": "OVH SAS" + }, + { + "key": "SourceRange", + "value": "91.121.72.0/21" + } + ], + "timestamp": "2020-10-02T17:09:08Z" + }, + { + "meta": [ + { + "key": "IsoCode", + "value": "FR" + }, + { + "key": "ASNNumber", + "value": "16276" + }, + { + "key": "ASNOrg", + "value": "OVH SAS" + }, + { + "key": "SourceRange", + "value": "91.121.72.0/21" + }, + { + "key": "target_user", + "value": "root" + }, + { + "key": "service", + "value": "ssh" + }, + { + "key": "log_type", + "value": "ssh_failed-auth" + }, + { + "key": "source_ip", + "value": "10.0.0.42" + }, + { + "key": "IsInEU", + "value": "true" + } + ], + "timestamp": "2020-10-02T17:09:08Z" + } + ], + "events_count": 6, + "labels": null, + "leakspeed": "10s", + "message": "Ip 192.168.0.42 performed 'crowdsecurity/ssh-bf' (6 events over 30.18165ms) at 2020-10-26 09:50:32.055535505 +0100 CET m=+6.235529150", + "remediation": true, + "scenario": "crowdsecurity/ssh-bf", + "scenario_hash": "4441dcff07020f6690d998b7101e642359ba405c2abb83565bbbdcee36de280f", + "scenario_version": "0.1", + "simulated": false, + "source": { + "as_name": "OVH SAS", + "cn": "FR", + "ip": "192.168.0.42", + "latitude": 50.646, + "longitude": 3.0758, + "range": "192.168.0.0/16", + "scope": "Ip", + "value": "192.168.0.42" + }, + "start_at": "2020-10-26T09:50:32.025353849+01:00", + "stop_at": "2020-10-26T09:50:32.055534398+01:00" + } + ] \ No newline at end of file diff --git a/pkg/appsec/allowlists/allowlists.go b/pkg/appsec/allowlists/allowlists.go new file mode 100644 index 00000000000..640164b6787 --- /dev/null +++ b/pkg/appsec/allowlists/allowlists.go @@ -0,0 +1,156 @@ +package allowlists + +import ( + "context" + "net" + "strings" + "sync" + "time" + + log "github.com/sirupsen/logrus" + "gopkg.in/tomb.v2" + + "github.com/crowdsecurity/crowdsec/pkg/apiclient" +) + +const allowlistRefreshInterval = 60 * time.Second + +type rangeAllowlist struct { + Range net.IPNet + Description string + AllowlistName string +} + +type ipAllowlist struct { + IP net.IP + Description string + AllowlistName string +} + +type AppsecAllowlist struct { + LAPIClient *apiclient.ApiClient + ips []ipAllowlist + ranges []rangeAllowlist + lock sync.RWMutex + logger *log.Entry + tomb *tomb.Tomb +} + +func NewAppsecAllowlist(client *apiclient.ApiClient, logger *log.Entry) *AppsecAllowlist { + a := &AppsecAllowlist{ + LAPIClient: client, + logger: logger.WithField("component", "appsec-allowlist"), + ips: []ipAllowlist{}, + ranges: []rangeAllowlist{}, + } + + if err := a.FetchAllowlists(); err != nil { + a.logger.Errorf("failed to fetch allowlists: %s", err) + } + + return a +} + +func (a *AppsecAllowlist) FetchAllowlists() error { + a.logger.Debug("fetching allowlists") + + allowlists, _, err := a.LAPIClient.Allowlists.List(context.TODO(), apiclient.AllowlistListOpts{WithContent: true}) + if err != nil { + return err + } + + a.lock.Lock() + defer a.lock.Unlock() + a.ranges = []rangeAllowlist{} + a.ips = []ipAllowlist{} + + for _, allowlist := range *allowlists { + for _, item := range allowlist.Items { + if strings.Contains(item.Value, "/") { + _, ipNet, err := net.ParseCIDR(item.Value) + if err != nil { + continue + } + + a.ranges = append(a.ranges, rangeAllowlist{ + Range: *ipNet, + Description: item.Description, + AllowlistName: allowlist.Name, + }) + } else { + ip := net.ParseIP(item.Value) + if ip == nil { + return nil + } + + a.ips = append(a.ips, ipAllowlist{ + IP: ip, + Description: item.Description, + AllowlistName: allowlist.Name, + }) + } + } + } + + a.logger.Debugf("fetched %d IPs and %d ranges", len(a.ips), len(a.ranges)) + a.logger.Tracef("allowlisted ips: %+v", a.ips) + a.logger.Tracef("allowlisted ranges: %+v", a.ranges) + + return nil +} + +func (a *AppsecAllowlist) updateAllowlists() error { + ticker := time.NewTicker(allowlistRefreshInterval) + + for { + select { + case <-ticker.C: + if err := a.FetchAllowlists(); err != nil { + a.logger.Errorf("failed to fetch allowlists: %s", err) + } + case <-a.tomb.Dying(): + ticker.Stop() + return nil + } + } +} + +func (a *AppsecAllowlist) StartRefresh(t *tomb.Tomb) { + a.tomb = t + a.tomb.Go(a.updateAllowlists) +} + +func (a *AppsecAllowlist) IsAllowlisted(sourceIP string) (bool, string) { + a.lock.RLock() + defer a.lock.RUnlock() + + ip := net.ParseIP(sourceIP) + if ip == nil { + a.logger.Warnf("failed to parse IP %s", sourceIP) + return false, "" + } + + for _, allowedIP := range a.ips { + if allowedIP.IP.Equal(ip) { + a.logger.Debugf("IP %s is allowlisted by %s from %s", sourceIP, allowedIP.Description, allowedIP.AllowlistName) + reason := allowedIP.IP.String() + " from " + allowedIP.AllowlistName + if allowedIP.Description != "" { + reason += " (" + allowedIP.Description + ")" + } + return true, reason + } + } + + for _, allowedRange := range a.ranges { + if allowedRange.Range.Contains(ip) { + a.logger.Debugf("IP %s is within allowlisted range by %s from %s", sourceIP, allowedRange.Description, allowedRange.AllowlistName) + reason := allowedRange.Range.String() + " from " + allowedRange.AllowlistName + if allowedRange.Description != "" { + reason += " (" + allowedRange.Description + ")" + } + return true, reason + } + } + + return false, "" +} diff --git a/pkg/appsec/allowlists/allowlists_test.go b/pkg/appsec/allowlists/allowlists_test.go new file mode 100644 index 00000000000..19fb289b90f --- /dev/null +++ b/pkg/appsec/allowlists/allowlists_test.go @@ -0,0 +1,93 @@ +package allowlists + +import ( + "net/http" + "net/http/httptest" + "net/url" + "testing" + + log "github.com/sirupsen/logrus" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/crowdsecurity/crowdsec/pkg/apiclient" +) + +func setup() (*http.ServeMux, string, func()) { + return setupWithPrefix("v1") +} + +func setupWithPrefix(urlPrefix string) (*http.ServeMux, string, func()) { + // mux is the HTTP request multiplexer used with the test server. + mux := http.NewServeMux() + baseURLPath := "/" + urlPrefix + + apiHandler := http.NewServeMux() + apiHandler.Handle(baseURLPath+"/", http.StripPrefix(baseURLPath, mux)) + + server := httptest.NewServer(apiHandler) + + return mux, server.URL, server.Close +} + +func TestAppsecAllowlist(t *testing.T) { + mux, urlx, teardown := setup() + defer teardown() + + apiURL, err := url.Parse(urlx + "/") + require.NoError(t, err) + + client, err := apiclient.NewClient(&apiclient.Config{ + MachineID: "test_login", + Password: "test_password", + URL: apiURL, + VersionPrefix: "v1", + }) + require.NoError(t, err) + + mux.HandleFunc("/watchers/login", func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) + _, err = w.Write([]byte(`{"code": 200, "expire": "2030-01-02T15:04:05Z", "token": "oklol"}`)) + assert.NoError(t, err) + }) + + mux.HandleFunc("/allowlists", func(w http.ResponseWriter, r *http.Request) { + if r.URL.Query().Get("with_content") != "true" { + t.Errorf("with_content not set") + } + + w.WriteHeader(http.StatusOK) + + _, err = w.Write([]byte(`[{"name": "list1", "allowlist_id":"xxxx","console_managed":false,"created_at":"2025-02-11T14:47:35.839Z","description":"test_desc2", + "items":[{"created_at":"2025-02-12T09:32:53.939Z","description":"desc_ip","expiration":"0001-01-01T00:00:00.000Z","value":"5.4.3.2"}, + {"created_at":"2025-02-12T09:32:53.939Z","description":"desc_range","expiration":"0001-01-01T00:00:00.000Z","value":"5.4.4.0/24"}]}]`)) + assert.NoError(t, err) + }) + + allowlistClient := NewAppsecAllowlist(client, log.NewEntry(log.StandardLogger())) + + err = allowlistClient.FetchAllowlists() + require.NoError(t, err) + + res, reason := allowlistClient.IsAllowlisted("1.2.3.4") + assert.False(t, res) + assert.Empty(t, reason) + + res, reason = allowlistClient.IsAllowlisted("5.4.3.2") + assert.True(t, res) + assert.Equal(t, "5.4.3.2 from list1 (desc_ip)", reason) + + res, reason = allowlistClient.IsAllowlisted("5.4.4.42") + assert.True(t, res) + assert.Equal(t, "5.4.4.0/24 from list1 (desc_range)", reason) + + assert.Len(t, allowlistClient.ips, 1) + assert.Len(t, allowlistClient.ranges, 1) + + err = allowlistClient.FetchAllowlists() + require.NoError(t, err) + + // No duplicates should be added + assert.Len(t, allowlistClient.ips, 1) + assert.Len(t, allowlistClient.ranges, 1) +} diff --git a/pkg/database/allowlists.go b/pkg/database/allowlists.go new file mode 100644 index 00000000000..c9b1c76ad67 --- /dev/null +++ b/pkg/database/allowlists.go @@ -0,0 +1,372 @@ +package database + +import ( + "context" + "fmt" + "net" + "strings" + "time" + + "entgo.io/ent/dialect/sql/sqlgraph" + + "github.com/crowdsecurity/crowdsec/pkg/database/ent" + "github.com/crowdsecurity/crowdsec/pkg/database/ent/allowlist" + "github.com/crowdsecurity/crowdsec/pkg/database/ent/allowlistitem" + "github.com/crowdsecurity/crowdsec/pkg/models" + "github.com/crowdsecurity/crowdsec/pkg/types" +) + +func (c *Client) CreateAllowList(ctx context.Context, name string, description string, allowlistID string, fromConsole bool) (*ent.AllowList, error) { + allowlist, err := c.Ent.AllowList.Create(). + SetName(name). + SetFromConsole(fromConsole). + SetDescription(description). + SetAllowlistID(allowlistID). + Save(ctx) + if err != nil { + if sqlgraph.IsUniqueConstraintError(err) { + return nil, fmt.Errorf("allowlist '%s' already exists", name) + } + + return nil, fmt.Errorf("unable to create allowlist: %w", err) + } + + return allowlist, nil +} + +func (c *Client) DeleteAllowList(ctx context.Context, name string, fromConsole bool) error { + nbDeleted, err := c.Ent.AllowListItem.Delete().Where(allowlistitem.HasAllowlistWith(allowlist.NameEQ(name), allowlist.FromConsoleEQ(fromConsole))).Exec(ctx) + if err != nil { + return fmt.Errorf("unable to delete allowlist items: %w", err) + } + + c.Log.Debugf("deleted %d items from allowlist %s", nbDeleted, name) + + nbDeleted, err = c.Ent.AllowList. + Delete(). + Where(allowlist.NameEQ(name), allowlist.FromConsoleEQ(fromConsole)). + Exec(ctx) + if err != nil { + return fmt.Errorf("unable to delete allowlist: %w", err) + } + + if nbDeleted == 0 { + return fmt.Errorf("allowlist %s not found", name) + } + + return nil +} + +func (c *Client) DeleteAllowListByID(ctx context.Context, name string, allowlistID string, fromConsole bool) error { + nbDeleted, err := c.Ent.AllowListItem.Delete().Where(allowlistitem.HasAllowlistWith(allowlist.AllowlistIDEQ(allowlistID), allowlist.FromConsoleEQ(fromConsole))).Exec(ctx) + if err != nil { + return fmt.Errorf("unable to delete allowlist items: %w", err) + } + + c.Log.Debugf("deleted %d items from allowlist %s", nbDeleted, name) + + nbDeleted, err = c.Ent.AllowList. + Delete(). + Where(allowlist.AllowlistIDEQ(allowlistID), allowlist.FromConsoleEQ(fromConsole)). + Exec(ctx) + if err != nil { + return fmt.Errorf("unable to delete allowlist: %w", err) + } + + if nbDeleted == 0 { + return fmt.Errorf("allowlist %s not found", name) + } + + return nil +} + +func (c *Client) ListAllowLists(ctx context.Context, withContent bool) ([]*ent.AllowList, error) { + q := c.Ent.AllowList.Query() + if withContent { + q = q.WithAllowlistItems() + } + + result, err := q.All(ctx) + if err != nil { + return nil, fmt.Errorf("unable to list allowlists: %w", err) + } + + return result, nil +} + +func (c *Client) GetAllowList(ctx context.Context, name string, withContent bool) (*ent.AllowList, error) { + q := c.Ent.AllowList.Query().Where(allowlist.NameEQ(name)) + if withContent { + q = q.WithAllowlistItems() + } + + result, err := q.First(ctx) + if err != nil { + if ent.IsNotFound(err) { + return nil, fmt.Errorf("allowlist '%s' not found", name) + } + + return nil, err + } + + return result, nil +} + +func (c *Client) GetAllowListByID(ctx context.Context, allowlistID string, withContent bool) (*ent.AllowList, error) { + q := c.Ent.AllowList.Query().Where(allowlist.AllowlistIDEQ(allowlistID)) + if withContent { + q = q.WithAllowlistItems() + } + + result, err := q.First(ctx) + if err != nil { + return nil, err + } + + return result, nil +} + +func (c *Client) AddToAllowlist(ctx context.Context, list *ent.AllowList, items []*models.AllowlistItem) (int, error) { + added := 0 + + c.Log.Debugf("adding %d values to allowlist %s", len(items), list.Name) + c.Log.Tracef("values: %+v", items) + + txClient, err := c.Ent.Tx(ctx) + if err != nil { + return 0, fmt.Errorf("error creating transaction: %w", err) + } + + for _, item := range items { + c.Log.Debugf("adding value %s to allowlist %s", item.Value, list.Name) + + sz, start_ip, start_sfx, end_ip, end_sfx, err := types.Addr2Ints(item.Value) + if err != nil { + c.Log.Error(err) + continue + } + + query := txClient.AllowListItem.Create(). + SetValue(item.Value). + SetIPSize(int64(sz)). + SetStartIP(start_ip). + SetStartSuffix(start_sfx). + SetEndIP(end_ip). + SetEndSuffix(end_sfx). + SetComment(item.Description) + + if !time.Time(item.Expiration).IsZero() { + query = query.SetExpiresAt(time.Time(item.Expiration)) + } + + content, err := query.Save(ctx) + if err != nil { + return 0, rollbackOnError(txClient, err, "unable to add value to allowlist") + } + + c.Log.Debugf("Updating allowlist %s with value %s (exp: %s)", list.Name, item.Value, item.Expiration) + + // We don't have a clean way to handle name conflict from the console, so use id + err = txClient.AllowList.Update().AddAllowlistItems(content).Where(allowlist.IDEQ(list.ID)).Exec(ctx) + if err != nil { + c.Log.Errorf("unable to add value to allowlist: %s", err) + continue + } + + added++ + } + + err = txClient.Commit() + if err != nil { + return 0, rollbackOnError(txClient, err, "error committing transaction") + } + + return added, nil +} + +func (c *Client) RemoveFromAllowlist(ctx context.Context, list *ent.AllowList, values ...string) (int, error) { + c.Log.Debugf("removing %d values from allowlist %s", len(values), list.Name) + c.Log.Tracef("values: %v", values) + + nbDeleted, err := c.Ent.AllowListItem.Delete().Where( + allowlistitem.HasAllowlistWith(allowlist.IDEQ(list.ID)), + allowlistitem.ValueIn(values...), + ).Exec(ctx) + if err != nil { + return 0, fmt.Errorf("unable to remove values from allowlist: %w", err) + } + + return nbDeleted, nil +} + +func (c *Client) UpdateAllowlistMeta(ctx context.Context, allowlistID string, name string, description string) error { + c.Log.Debugf("updating allowlist %s meta", name) + + err := c.Ent.AllowList.Update().Where(allowlist.AllowlistIDEQ(allowlistID)).SetName(name).SetDescription(description).Exec(ctx) + if err != nil { + return fmt.Errorf("unable to update allowlist: %w", err) + } + + return nil +} + +func (c *Client) ReplaceAllowlist(ctx context.Context, list *ent.AllowList, items []*models.AllowlistItem, fromConsole bool) (int, error) { + c.Log.Debugf("replacing values in allowlist %s", list.Name) + c.Log.Tracef("items: %+v", items) + + _, err := c.Ent.AllowListItem.Delete().Where(allowlistitem.HasAllowlistWith(allowlist.IDEQ(list.ID))).Exec(ctx) + if err != nil { + return 0, fmt.Errorf("unable to delete allowlist contents: %w", err) + } + + added, err := c.AddToAllowlist(ctx, list, items) + if err != nil { + return 0, fmt.Errorf("unable to add values to allowlist: %w", err) + } + + if !list.FromConsole && fromConsole { + c.Log.Infof("marking allowlist %s as managed from console and replacing its content", list.Name) + + err = c.Ent.AllowList.Update().SetFromConsole(fromConsole).Where(allowlist.IDEQ(list.ID)).Exec(ctx) + if err != nil { + return 0, fmt.Errorf("unable to update allowlist: %w", err) + } + } + + return added, nil +} + +func (c *Client) IsAllowlisted(ctx context.Context, value string) (bool, string, error) { + /* + Few cases: + - value is an IP/range directly is in allowlist + - value is an IP/range in a range in allowlist + - value is a range and an IP/range belonging to it is in allowlist + */ + sz, start_ip, start_sfx, end_ip, end_sfx, err := types.Addr2Ints(value) + if err != nil { + return false, "", err + } + + c.Log.Debugf("checking if %s is allowlisted", value) + + now := time.Now().UTC() + query := c.Ent.AllowListItem.Query().Where( + allowlistitem.Or( + allowlistitem.ExpiresAtGTE(now), + allowlistitem.ExpiresAtIsNil(), + ), + allowlistitem.IPSizeEQ(int64(sz)), + ) + + if sz == 4 { + query = query.Where( + allowlistitem.Or( + // Value contained inside a range or exact match + allowlistitem.And( + allowlistitem.StartIPLTE(start_ip), + allowlistitem.EndIPGTE(end_ip), + ), + // Value contains another allowlisted value + allowlistitem.And( + allowlistitem.StartIPGTE(start_ip), + allowlistitem.EndIPLTE(end_ip), + ), + )) + } + + if sz == 16 { + query = query.Where( + // Value contained inside a range or exact match + allowlistitem.Or( + allowlistitem.And( + allowlistitem.Or( + allowlistitem.StartIPLT(start_ip), + allowlistitem.And( + allowlistitem.StartIPEQ(start_ip), + allowlistitem.StartSuffixLTE(start_sfx), + )), + allowlistitem.Or( + allowlistitem.EndIPGT(end_ip), + allowlistitem.And( + allowlistitem.EndIPEQ(end_ip), + allowlistitem.EndSuffixGTE(end_sfx), + ), + ), + ), + // Value contains another allowlisted value + allowlistitem.And( + allowlistitem.Or( + allowlistitem.StartIPGT(start_ip), + allowlistitem.And( + allowlistitem.StartIPEQ(start_ip), + allowlistitem.StartSuffixGTE(start_sfx), + )), + allowlistitem.Or( + allowlistitem.EndIPLT(end_ip), + allowlistitem.And( + allowlistitem.EndIPEQ(end_ip), + allowlistitem.EndSuffixLTE(end_sfx), + ), + ), + ), + ), + ) + } + + allowed, err := query.WithAllowlist().First(ctx) + if err != nil { + if ent.IsNotFound(err) { + return false, "", nil + } + + return false, "", fmt.Errorf("unable to check if value is allowlisted: %w", err) + } + + allowlistName := allowed.Edges.Allowlist[0].Name + reason := allowed.Value + " from " + allowlistName + + if allowed.Comment != "" { + reason += " (" + allowed.Comment + ")" + } + + return true, reason, nil +} + +func (c *Client) GetAllowlistsContentForAPIC(ctx context.Context) ([]net.IP, []*net.IPNet, error) { + allowlists, err := c.ListAllowLists(ctx, true) + if err != nil { + return nil, nil, fmt.Errorf("unable to get allowlists: %w", err) + } + + var ( + ips []net.IP + nets []*net.IPNet + ) + + for _, allowlist := range allowlists { + for _, item := range allowlist.Edges.AllowlistItems { + if item.ExpiresAt.IsZero() || item.ExpiresAt.After(time.Now().UTC()) { + if strings.Contains(item.Value, "/") { + _, ipNet, err := net.ParseCIDR(item.Value) + if err != nil { + c.Log.Errorf("unable to parse CIDR %s: %s", item.Value, err) + continue + } + + nets = append(nets, ipNet) + } else { + ip := net.ParseIP(item.Value) + if ip == nil { + c.Log.Errorf("unable to parse IP %s", item.Value) + continue + } + + ips = append(ips, ip) + } + } + } + } + + return ips, nets, nil +} diff --git a/pkg/database/allowlists_test.go b/pkg/database/allowlists_test.go new file mode 100644 index 00000000000..9a4eb8e1fb8 --- /dev/null +++ b/pkg/database/allowlists_test.go @@ -0,0 +1,106 @@ +package database + +import ( + "context" + "testing" + "time" + + "github.com/go-openapi/strfmt" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/crowdsecurity/crowdsec/pkg/csconfig" + "github.com/crowdsecurity/crowdsec/pkg/models" +) + +func getDBClient(t *testing.T, ctx context.Context) *Client { + t.Helper() + + dbClient, err := NewClient(ctx, &csconfig.DatabaseCfg{ + Type: "sqlite", + DbName: "crowdsec", + DbPath: ":memory:", + }) + require.NoError(t, err) + + return dbClient +} + +func TestCheckAllowlist(t *testing.T) { + ctx := context.Background() + dbClient := getDBClient(t, ctx) + + allowlist, err := dbClient.CreateAllowList(ctx, "test", "test", "", false) + + require.NoError(t, err) + + added, err := dbClient.AddToAllowlist(ctx, allowlist, []*models.AllowlistItem{ + { + CreatedAt: strfmt.DateTime(time.Now()), + Value: "1.2.3.4", + }, + { + CreatedAt: strfmt.DateTime(time.Now()), + Value: "8.0.0.0/8", + Description: "range allowlist", + }, + { + CreatedAt: strfmt.DateTime(time.Now()), + Value: "2001:db8::/32", + }, + { + CreatedAt: strfmt.DateTime(time.Now()), + Value: "2.3.4.5", + Expiration: strfmt.DateTime(time.Now().Add(-time.Hour)), // expired item + }, + { + CreatedAt: strfmt.DateTime(time.Now()), + Value: "8a95:c186:9f96:4c75:0dad:49c6:ff62:94b8", + }, + }) + + require.NoError(t, err) + assert.Equal(t, 5, added) + + // Exatch match + allowlisted, reason, err := dbClient.IsAllowlisted(ctx, "1.2.3.4") + require.NoError(t, err) + require.True(t, allowlisted) + require.Equal(t, "1.2.3.4 from test", reason) + + // CIDR match + allowlisted, reason, err = dbClient.IsAllowlisted(ctx, "8.8.8.8") + require.NoError(t, err) + require.True(t, allowlisted) + require.Equal(t, "8.0.0.0/8 from test (range allowlist)", reason) + + // IPv6 match + allowlisted, reason, err = dbClient.IsAllowlisted(ctx, "2001:db8::1") + require.NoError(t, err) + require.True(t, allowlisted) + require.Equal(t, "2001:db8::/32 from test", reason) + + // Expired item + allowlisted, reason, err = dbClient.IsAllowlisted(ctx, "2.3.4.5") + require.NoError(t, err) + require.False(t, allowlisted) + require.Empty(t, reason) + + // Decision on a range that contains an allowlisted value + allowlisted, reason, err = dbClient.IsAllowlisted(ctx, "1.2.3.0/24") + require.NoError(t, err) + require.True(t, allowlisted) + require.Equal(t, "1.2.3.4 from test", reason) + + // No match + allowlisted, reason, err = dbClient.IsAllowlisted(ctx, "42.42.42.42") + require.NoError(t, err) + require.False(t, allowlisted) + require.Empty(t, reason) + + // IPv6 range that contains an allowlisted value + allowlisted, reason, err = dbClient.IsAllowlisted(ctx, "8a95:c186:9f96:4c75::/64") + require.NoError(t, err) + require.True(t, allowlisted) + require.Equal(t, "8a95:c186:9f96:4c75:0dad:49c6:ff62:94b8 from test", reason) +} diff --git a/pkg/database/ent/allowlist.go b/pkg/database/ent/allowlist.go new file mode 100644 index 00000000000..99b36687a7b --- /dev/null +++ b/pkg/database/ent/allowlist.go @@ -0,0 +1,189 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "fmt" + "strings" + "time" + + "entgo.io/ent" + "entgo.io/ent/dialect/sql" + "github.com/crowdsecurity/crowdsec/pkg/database/ent/allowlist" +) + +// AllowList is the model entity for the AllowList schema. +type AllowList struct { + config `json:"-"` + // ID of the ent. + ID int `json:"id,omitempty"` + // CreatedAt holds the value of the "created_at" field. + CreatedAt time.Time `json:"created_at,omitempty"` + // UpdatedAt holds the value of the "updated_at" field. + UpdatedAt time.Time `json:"updated_at,omitempty"` + // Name holds the value of the "name" field. + Name string `json:"name,omitempty"` + // FromConsole holds the value of the "from_console" field. + FromConsole bool `json:"from_console,omitempty"` + // Description holds the value of the "description" field. + Description string `json:"description,omitempty"` + // AllowlistID holds the value of the "allowlist_id" field. + AllowlistID string `json:"allowlist_id,omitempty"` + // Edges holds the relations/edges for other nodes in the graph. + // The values are being populated by the AllowListQuery when eager-loading is set. + Edges AllowListEdges `json:"edges"` + selectValues sql.SelectValues +} + +// AllowListEdges holds the relations/edges for other nodes in the graph. +type AllowListEdges struct { + // AllowlistItems holds the value of the allowlist_items edge. + AllowlistItems []*AllowListItem `json:"allowlist_items,omitempty"` + // loadedTypes holds the information for reporting if a + // type was loaded (or requested) in eager-loading or not. + loadedTypes [1]bool +} + +// AllowlistItemsOrErr returns the AllowlistItems value or an error if the edge +// was not loaded in eager-loading. +func (e AllowListEdges) AllowlistItemsOrErr() ([]*AllowListItem, error) { + if e.loadedTypes[0] { + return e.AllowlistItems, nil + } + return nil, &NotLoadedError{edge: "allowlist_items"} +} + +// scanValues returns the types for scanning values from sql.Rows. +func (*AllowList) scanValues(columns []string) ([]any, error) { + values := make([]any, len(columns)) + for i := range columns { + switch columns[i] { + case allowlist.FieldFromConsole: + values[i] = new(sql.NullBool) + case allowlist.FieldID: + values[i] = new(sql.NullInt64) + case allowlist.FieldName, allowlist.FieldDescription, allowlist.FieldAllowlistID: + values[i] = new(sql.NullString) + case allowlist.FieldCreatedAt, allowlist.FieldUpdatedAt: + values[i] = new(sql.NullTime) + default: + values[i] = new(sql.UnknownType) + } + } + return values, nil +} + +// assignValues assigns the values that were returned from sql.Rows (after scanning) +// to the AllowList fields. +func (al *AllowList) assignValues(columns []string, values []any) error { + if m, n := len(values), len(columns); m < n { + return fmt.Errorf("mismatch number of scan values: %d != %d", m, n) + } + for i := range columns { + switch columns[i] { + case allowlist.FieldID: + value, ok := values[i].(*sql.NullInt64) + if !ok { + return fmt.Errorf("unexpected type %T for field id", value) + } + al.ID = int(value.Int64) + case allowlist.FieldCreatedAt: + if value, ok := values[i].(*sql.NullTime); !ok { + return fmt.Errorf("unexpected type %T for field created_at", values[i]) + } else if value.Valid { + al.CreatedAt = value.Time + } + case allowlist.FieldUpdatedAt: + if value, ok := values[i].(*sql.NullTime); !ok { + return fmt.Errorf("unexpected type %T for field updated_at", values[i]) + } else if value.Valid { + al.UpdatedAt = value.Time + } + case allowlist.FieldName: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field name", values[i]) + } else if value.Valid { + al.Name = value.String + } + case allowlist.FieldFromConsole: + if value, ok := values[i].(*sql.NullBool); !ok { + return fmt.Errorf("unexpected type %T for field from_console", values[i]) + } else if value.Valid { + al.FromConsole = value.Bool + } + case allowlist.FieldDescription: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field description", values[i]) + } else if value.Valid { + al.Description = value.String + } + case allowlist.FieldAllowlistID: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field allowlist_id", values[i]) + } else if value.Valid { + al.AllowlistID = value.String + } + default: + al.selectValues.Set(columns[i], values[i]) + } + } + return nil +} + +// Value returns the ent.Value that was dynamically selected and assigned to the AllowList. +// This includes values selected through modifiers, order, etc. +func (al *AllowList) Value(name string) (ent.Value, error) { + return al.selectValues.Get(name) +} + +// QueryAllowlistItems queries the "allowlist_items" edge of the AllowList entity. +func (al *AllowList) QueryAllowlistItems() *AllowListItemQuery { + return NewAllowListClient(al.config).QueryAllowlistItems(al) +} + +// Update returns a builder for updating this AllowList. +// Note that you need to call AllowList.Unwrap() before calling this method if this AllowList +// was returned from a transaction, and the transaction was committed or rolled back. +func (al *AllowList) Update() *AllowListUpdateOne { + return NewAllowListClient(al.config).UpdateOne(al) +} + +// Unwrap unwraps the AllowList entity that was returned from a transaction after it was closed, +// so that all future queries will be executed through the driver which created the transaction. +func (al *AllowList) Unwrap() *AllowList { + _tx, ok := al.config.driver.(*txDriver) + if !ok { + panic("ent: AllowList is not a transactional entity") + } + al.config.driver = _tx.drv + return al +} + +// String implements the fmt.Stringer. +func (al *AllowList) String() string { + var builder strings.Builder + builder.WriteString("AllowList(") + builder.WriteString(fmt.Sprintf("id=%v, ", al.ID)) + builder.WriteString("created_at=") + builder.WriteString(al.CreatedAt.Format(time.ANSIC)) + builder.WriteString(", ") + builder.WriteString("updated_at=") + builder.WriteString(al.UpdatedAt.Format(time.ANSIC)) + builder.WriteString(", ") + builder.WriteString("name=") + builder.WriteString(al.Name) + builder.WriteString(", ") + builder.WriteString("from_console=") + builder.WriteString(fmt.Sprintf("%v", al.FromConsole)) + builder.WriteString(", ") + builder.WriteString("description=") + builder.WriteString(al.Description) + builder.WriteString(", ") + builder.WriteString("allowlist_id=") + builder.WriteString(al.AllowlistID) + builder.WriteByte(')') + return builder.String() +} + +// AllowLists is a parsable slice of AllowList. +type AllowLists []*AllowList diff --git a/pkg/database/ent/allowlist/allowlist.go b/pkg/database/ent/allowlist/allowlist.go new file mode 100644 index 00000000000..36cac5c1b21 --- /dev/null +++ b/pkg/database/ent/allowlist/allowlist.go @@ -0,0 +1,133 @@ +// Code generated by ent, DO NOT EDIT. + +package allowlist + +import ( + "time" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" +) + +const ( + // Label holds the string label denoting the allowlist type in the database. + Label = "allow_list" + // FieldID holds the string denoting the id field in the database. + FieldID = "id" + // FieldCreatedAt holds the string denoting the created_at field in the database. + FieldCreatedAt = "created_at" + // FieldUpdatedAt holds the string denoting the updated_at field in the database. + FieldUpdatedAt = "updated_at" + // FieldName holds the string denoting the name field in the database. + FieldName = "name" + // FieldFromConsole holds the string denoting the from_console field in the database. + FieldFromConsole = "from_console" + // FieldDescription holds the string denoting the description field in the database. + FieldDescription = "description" + // FieldAllowlistID holds the string denoting the allowlist_id field in the database. + FieldAllowlistID = "allowlist_id" + // EdgeAllowlistItems holds the string denoting the allowlist_items edge name in mutations. + EdgeAllowlistItems = "allowlist_items" + // Table holds the table name of the allowlist in the database. + Table = "allow_lists" + // AllowlistItemsTable is the table that holds the allowlist_items relation/edge. The primary key declared below. + AllowlistItemsTable = "allow_list_allowlist_items" + // AllowlistItemsInverseTable is the table name for the AllowListItem entity. + // It exists in this package in order to avoid circular dependency with the "allowlistitem" package. + AllowlistItemsInverseTable = "allow_list_items" +) + +// Columns holds all SQL columns for allowlist fields. +var Columns = []string{ + FieldID, + FieldCreatedAt, + FieldUpdatedAt, + FieldName, + FieldFromConsole, + FieldDescription, + FieldAllowlistID, +} + +var ( + // AllowlistItemsPrimaryKey and AllowlistItemsColumn2 are the table columns denoting the + // primary key for the allowlist_items relation (M2M). + AllowlistItemsPrimaryKey = []string{"allow_list_id", "allow_list_item_id"} +) + +// ValidColumn reports if the column name is valid (part of the table columns). +func ValidColumn(column string) bool { + for i := range Columns { + if column == Columns[i] { + return true + } + } + return false +} + +var ( + // DefaultCreatedAt holds the default value on creation for the "created_at" field. + DefaultCreatedAt func() time.Time + // DefaultUpdatedAt holds the default value on creation for the "updated_at" field. + DefaultUpdatedAt func() time.Time + // UpdateDefaultUpdatedAt holds the default value on update for the "updated_at" field. + UpdateDefaultUpdatedAt func() time.Time +) + +// OrderOption defines the ordering options for the AllowList queries. +type OrderOption func(*sql.Selector) + +// ByID orders the results by the id field. +func ByID(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldID, opts...).ToFunc() +} + +// ByCreatedAt orders the results by the created_at field. +func ByCreatedAt(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldCreatedAt, opts...).ToFunc() +} + +// ByUpdatedAt orders the results by the updated_at field. +func ByUpdatedAt(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldUpdatedAt, opts...).ToFunc() +} + +// ByName orders the results by the name field. +func ByName(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldName, opts...).ToFunc() +} + +// ByFromConsole orders the results by the from_console field. +func ByFromConsole(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldFromConsole, opts...).ToFunc() +} + +// ByDescription orders the results by the description field. +func ByDescription(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldDescription, opts...).ToFunc() +} + +// ByAllowlistID orders the results by the allowlist_id field. +func ByAllowlistID(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldAllowlistID, opts...).ToFunc() +} + +// ByAllowlistItemsCount orders the results by allowlist_items count. +func ByAllowlistItemsCount(opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborsCount(s, newAllowlistItemsStep(), opts...) + } +} + +// ByAllowlistItems orders the results by allowlist_items terms. +func ByAllowlistItems(term sql.OrderTerm, terms ...sql.OrderTerm) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newAllowlistItemsStep(), append([]sql.OrderTerm{term}, terms...)...) + } +} +func newAllowlistItemsStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(AllowlistItemsInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.M2M, false, AllowlistItemsTable, AllowlistItemsPrimaryKey...), + ) +} diff --git a/pkg/database/ent/allowlist/where.go b/pkg/database/ent/allowlist/where.go new file mode 100644 index 00000000000..d8b43be2cf9 --- /dev/null +++ b/pkg/database/ent/allowlist/where.go @@ -0,0 +1,429 @@ +// Code generated by ent, DO NOT EDIT. + +package allowlist + +import ( + "time" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "github.com/crowdsecurity/crowdsec/pkg/database/ent/predicate" +) + +// ID filters vertices based on their ID field. +func ID(id int) predicate.AllowList { + return predicate.AllowList(sql.FieldEQ(FieldID, id)) +} + +// IDEQ applies the EQ predicate on the ID field. +func IDEQ(id int) predicate.AllowList { + return predicate.AllowList(sql.FieldEQ(FieldID, id)) +} + +// IDNEQ applies the NEQ predicate on the ID field. +func IDNEQ(id int) predicate.AllowList { + return predicate.AllowList(sql.FieldNEQ(FieldID, id)) +} + +// IDIn applies the In predicate on the ID field. +func IDIn(ids ...int) predicate.AllowList { + return predicate.AllowList(sql.FieldIn(FieldID, ids...)) +} + +// IDNotIn applies the NotIn predicate on the ID field. +func IDNotIn(ids ...int) predicate.AllowList { + return predicate.AllowList(sql.FieldNotIn(FieldID, ids...)) +} + +// IDGT applies the GT predicate on the ID field. +func IDGT(id int) predicate.AllowList { + return predicate.AllowList(sql.FieldGT(FieldID, id)) +} + +// IDGTE applies the GTE predicate on the ID field. +func IDGTE(id int) predicate.AllowList { + return predicate.AllowList(sql.FieldGTE(FieldID, id)) +} + +// IDLT applies the LT predicate on the ID field. +func IDLT(id int) predicate.AllowList { + return predicate.AllowList(sql.FieldLT(FieldID, id)) +} + +// IDLTE applies the LTE predicate on the ID field. +func IDLTE(id int) predicate.AllowList { + return predicate.AllowList(sql.FieldLTE(FieldID, id)) +} + +// CreatedAt applies equality check predicate on the "created_at" field. It's identical to CreatedAtEQ. +func CreatedAt(v time.Time) predicate.AllowList { + return predicate.AllowList(sql.FieldEQ(FieldCreatedAt, v)) +} + +// UpdatedAt applies equality check predicate on the "updated_at" field. It's identical to UpdatedAtEQ. +func UpdatedAt(v time.Time) predicate.AllowList { + return predicate.AllowList(sql.FieldEQ(FieldUpdatedAt, v)) +} + +// Name applies equality check predicate on the "name" field. It's identical to NameEQ. +func Name(v string) predicate.AllowList { + return predicate.AllowList(sql.FieldEQ(FieldName, v)) +} + +// FromConsole applies equality check predicate on the "from_console" field. It's identical to FromConsoleEQ. +func FromConsole(v bool) predicate.AllowList { + return predicate.AllowList(sql.FieldEQ(FieldFromConsole, v)) +} + +// Description applies equality check predicate on the "description" field. It's identical to DescriptionEQ. +func Description(v string) predicate.AllowList { + return predicate.AllowList(sql.FieldEQ(FieldDescription, v)) +} + +// AllowlistID applies equality check predicate on the "allowlist_id" field. It's identical to AllowlistIDEQ. +func AllowlistID(v string) predicate.AllowList { + return predicate.AllowList(sql.FieldEQ(FieldAllowlistID, v)) +} + +// CreatedAtEQ applies the EQ predicate on the "created_at" field. +func CreatedAtEQ(v time.Time) predicate.AllowList { + return predicate.AllowList(sql.FieldEQ(FieldCreatedAt, v)) +} + +// CreatedAtNEQ applies the NEQ predicate on the "created_at" field. +func CreatedAtNEQ(v time.Time) predicate.AllowList { + return predicate.AllowList(sql.FieldNEQ(FieldCreatedAt, v)) +} + +// CreatedAtIn applies the In predicate on the "created_at" field. +func CreatedAtIn(vs ...time.Time) predicate.AllowList { + return predicate.AllowList(sql.FieldIn(FieldCreatedAt, vs...)) +} + +// CreatedAtNotIn applies the NotIn predicate on the "created_at" field. +func CreatedAtNotIn(vs ...time.Time) predicate.AllowList { + return predicate.AllowList(sql.FieldNotIn(FieldCreatedAt, vs...)) +} + +// CreatedAtGT applies the GT predicate on the "created_at" field. +func CreatedAtGT(v time.Time) predicate.AllowList { + return predicate.AllowList(sql.FieldGT(FieldCreatedAt, v)) +} + +// CreatedAtGTE applies the GTE predicate on the "created_at" field. +func CreatedAtGTE(v time.Time) predicate.AllowList { + return predicate.AllowList(sql.FieldGTE(FieldCreatedAt, v)) +} + +// CreatedAtLT applies the LT predicate on the "created_at" field. +func CreatedAtLT(v time.Time) predicate.AllowList { + return predicate.AllowList(sql.FieldLT(FieldCreatedAt, v)) +} + +// CreatedAtLTE applies the LTE predicate on the "created_at" field. +func CreatedAtLTE(v time.Time) predicate.AllowList { + return predicate.AllowList(sql.FieldLTE(FieldCreatedAt, v)) +} + +// UpdatedAtEQ applies the EQ predicate on the "updated_at" field. +func UpdatedAtEQ(v time.Time) predicate.AllowList { + return predicate.AllowList(sql.FieldEQ(FieldUpdatedAt, v)) +} + +// UpdatedAtNEQ applies the NEQ predicate on the "updated_at" field. +func UpdatedAtNEQ(v time.Time) predicate.AllowList { + return predicate.AllowList(sql.FieldNEQ(FieldUpdatedAt, v)) +} + +// UpdatedAtIn applies the In predicate on the "updated_at" field. +func UpdatedAtIn(vs ...time.Time) predicate.AllowList { + return predicate.AllowList(sql.FieldIn(FieldUpdatedAt, vs...)) +} + +// UpdatedAtNotIn applies the NotIn predicate on the "updated_at" field. +func UpdatedAtNotIn(vs ...time.Time) predicate.AllowList { + return predicate.AllowList(sql.FieldNotIn(FieldUpdatedAt, vs...)) +} + +// UpdatedAtGT applies the GT predicate on the "updated_at" field. +func UpdatedAtGT(v time.Time) predicate.AllowList { + return predicate.AllowList(sql.FieldGT(FieldUpdatedAt, v)) +} + +// UpdatedAtGTE applies the GTE predicate on the "updated_at" field. +func UpdatedAtGTE(v time.Time) predicate.AllowList { + return predicate.AllowList(sql.FieldGTE(FieldUpdatedAt, v)) +} + +// UpdatedAtLT applies the LT predicate on the "updated_at" field. +func UpdatedAtLT(v time.Time) predicate.AllowList { + return predicate.AllowList(sql.FieldLT(FieldUpdatedAt, v)) +} + +// UpdatedAtLTE applies the LTE predicate on the "updated_at" field. +func UpdatedAtLTE(v time.Time) predicate.AllowList { + return predicate.AllowList(sql.FieldLTE(FieldUpdatedAt, v)) +} + +// NameEQ applies the EQ predicate on the "name" field. +func NameEQ(v string) predicate.AllowList { + return predicate.AllowList(sql.FieldEQ(FieldName, v)) +} + +// NameNEQ applies the NEQ predicate on the "name" field. +func NameNEQ(v string) predicate.AllowList { + return predicate.AllowList(sql.FieldNEQ(FieldName, v)) +} + +// NameIn applies the In predicate on the "name" field. +func NameIn(vs ...string) predicate.AllowList { + return predicate.AllowList(sql.FieldIn(FieldName, vs...)) +} + +// NameNotIn applies the NotIn predicate on the "name" field. +func NameNotIn(vs ...string) predicate.AllowList { + return predicate.AllowList(sql.FieldNotIn(FieldName, vs...)) +} + +// NameGT applies the GT predicate on the "name" field. +func NameGT(v string) predicate.AllowList { + return predicate.AllowList(sql.FieldGT(FieldName, v)) +} + +// NameGTE applies the GTE predicate on the "name" field. +func NameGTE(v string) predicate.AllowList { + return predicate.AllowList(sql.FieldGTE(FieldName, v)) +} + +// NameLT applies the LT predicate on the "name" field. +func NameLT(v string) predicate.AllowList { + return predicate.AllowList(sql.FieldLT(FieldName, v)) +} + +// NameLTE applies the LTE predicate on the "name" field. +func NameLTE(v string) predicate.AllowList { + return predicate.AllowList(sql.FieldLTE(FieldName, v)) +} + +// NameContains applies the Contains predicate on the "name" field. +func NameContains(v string) predicate.AllowList { + return predicate.AllowList(sql.FieldContains(FieldName, v)) +} + +// NameHasPrefix applies the HasPrefix predicate on the "name" field. +func NameHasPrefix(v string) predicate.AllowList { + return predicate.AllowList(sql.FieldHasPrefix(FieldName, v)) +} + +// NameHasSuffix applies the HasSuffix predicate on the "name" field. +func NameHasSuffix(v string) predicate.AllowList { + return predicate.AllowList(sql.FieldHasSuffix(FieldName, v)) +} + +// NameEqualFold applies the EqualFold predicate on the "name" field. +func NameEqualFold(v string) predicate.AllowList { + return predicate.AllowList(sql.FieldEqualFold(FieldName, v)) +} + +// NameContainsFold applies the ContainsFold predicate on the "name" field. +func NameContainsFold(v string) predicate.AllowList { + return predicate.AllowList(sql.FieldContainsFold(FieldName, v)) +} + +// FromConsoleEQ applies the EQ predicate on the "from_console" field. +func FromConsoleEQ(v bool) predicate.AllowList { + return predicate.AllowList(sql.FieldEQ(FieldFromConsole, v)) +} + +// FromConsoleNEQ applies the NEQ predicate on the "from_console" field. +func FromConsoleNEQ(v bool) predicate.AllowList { + return predicate.AllowList(sql.FieldNEQ(FieldFromConsole, v)) +} + +// DescriptionEQ applies the EQ predicate on the "description" field. +func DescriptionEQ(v string) predicate.AllowList { + return predicate.AllowList(sql.FieldEQ(FieldDescription, v)) +} + +// DescriptionNEQ applies the NEQ predicate on the "description" field. +func DescriptionNEQ(v string) predicate.AllowList { + return predicate.AllowList(sql.FieldNEQ(FieldDescription, v)) +} + +// DescriptionIn applies the In predicate on the "description" field. +func DescriptionIn(vs ...string) predicate.AllowList { + return predicate.AllowList(sql.FieldIn(FieldDescription, vs...)) +} + +// DescriptionNotIn applies the NotIn predicate on the "description" field. +func DescriptionNotIn(vs ...string) predicate.AllowList { + return predicate.AllowList(sql.FieldNotIn(FieldDescription, vs...)) +} + +// DescriptionGT applies the GT predicate on the "description" field. +func DescriptionGT(v string) predicate.AllowList { + return predicate.AllowList(sql.FieldGT(FieldDescription, v)) +} + +// DescriptionGTE applies the GTE predicate on the "description" field. +func DescriptionGTE(v string) predicate.AllowList { + return predicate.AllowList(sql.FieldGTE(FieldDescription, v)) +} + +// DescriptionLT applies the LT predicate on the "description" field. +func DescriptionLT(v string) predicate.AllowList { + return predicate.AllowList(sql.FieldLT(FieldDescription, v)) +} + +// DescriptionLTE applies the LTE predicate on the "description" field. +func DescriptionLTE(v string) predicate.AllowList { + return predicate.AllowList(sql.FieldLTE(FieldDescription, v)) +} + +// DescriptionContains applies the Contains predicate on the "description" field. +func DescriptionContains(v string) predicate.AllowList { + return predicate.AllowList(sql.FieldContains(FieldDescription, v)) +} + +// DescriptionHasPrefix applies the HasPrefix predicate on the "description" field. +func DescriptionHasPrefix(v string) predicate.AllowList { + return predicate.AllowList(sql.FieldHasPrefix(FieldDescription, v)) +} + +// DescriptionHasSuffix applies the HasSuffix predicate on the "description" field. +func DescriptionHasSuffix(v string) predicate.AllowList { + return predicate.AllowList(sql.FieldHasSuffix(FieldDescription, v)) +} + +// DescriptionIsNil applies the IsNil predicate on the "description" field. +func DescriptionIsNil() predicate.AllowList { + return predicate.AllowList(sql.FieldIsNull(FieldDescription)) +} + +// DescriptionNotNil applies the NotNil predicate on the "description" field. +func DescriptionNotNil() predicate.AllowList { + return predicate.AllowList(sql.FieldNotNull(FieldDescription)) +} + +// DescriptionEqualFold applies the EqualFold predicate on the "description" field. +func DescriptionEqualFold(v string) predicate.AllowList { + return predicate.AllowList(sql.FieldEqualFold(FieldDescription, v)) +} + +// DescriptionContainsFold applies the ContainsFold predicate on the "description" field. +func DescriptionContainsFold(v string) predicate.AllowList { + return predicate.AllowList(sql.FieldContainsFold(FieldDescription, v)) +} + +// AllowlistIDEQ applies the EQ predicate on the "allowlist_id" field. +func AllowlistIDEQ(v string) predicate.AllowList { + return predicate.AllowList(sql.FieldEQ(FieldAllowlistID, v)) +} + +// AllowlistIDNEQ applies the NEQ predicate on the "allowlist_id" field. +func AllowlistIDNEQ(v string) predicate.AllowList { + return predicate.AllowList(sql.FieldNEQ(FieldAllowlistID, v)) +} + +// AllowlistIDIn applies the In predicate on the "allowlist_id" field. +func AllowlistIDIn(vs ...string) predicate.AllowList { + return predicate.AllowList(sql.FieldIn(FieldAllowlistID, vs...)) +} + +// AllowlistIDNotIn applies the NotIn predicate on the "allowlist_id" field. +func AllowlistIDNotIn(vs ...string) predicate.AllowList { + return predicate.AllowList(sql.FieldNotIn(FieldAllowlistID, vs...)) +} + +// AllowlistIDGT applies the GT predicate on the "allowlist_id" field. +func AllowlistIDGT(v string) predicate.AllowList { + return predicate.AllowList(sql.FieldGT(FieldAllowlistID, v)) +} + +// AllowlistIDGTE applies the GTE predicate on the "allowlist_id" field. +func AllowlistIDGTE(v string) predicate.AllowList { + return predicate.AllowList(sql.FieldGTE(FieldAllowlistID, v)) +} + +// AllowlistIDLT applies the LT predicate on the "allowlist_id" field. +func AllowlistIDLT(v string) predicate.AllowList { + return predicate.AllowList(sql.FieldLT(FieldAllowlistID, v)) +} + +// AllowlistIDLTE applies the LTE predicate on the "allowlist_id" field. +func AllowlistIDLTE(v string) predicate.AllowList { + return predicate.AllowList(sql.FieldLTE(FieldAllowlistID, v)) +} + +// AllowlistIDContains applies the Contains predicate on the "allowlist_id" field. +func AllowlistIDContains(v string) predicate.AllowList { + return predicate.AllowList(sql.FieldContains(FieldAllowlistID, v)) +} + +// AllowlistIDHasPrefix applies the HasPrefix predicate on the "allowlist_id" field. +func AllowlistIDHasPrefix(v string) predicate.AllowList { + return predicate.AllowList(sql.FieldHasPrefix(FieldAllowlistID, v)) +} + +// AllowlistIDHasSuffix applies the HasSuffix predicate on the "allowlist_id" field. +func AllowlistIDHasSuffix(v string) predicate.AllowList { + return predicate.AllowList(sql.FieldHasSuffix(FieldAllowlistID, v)) +} + +// AllowlistIDIsNil applies the IsNil predicate on the "allowlist_id" field. +func AllowlistIDIsNil() predicate.AllowList { + return predicate.AllowList(sql.FieldIsNull(FieldAllowlistID)) +} + +// AllowlistIDNotNil applies the NotNil predicate on the "allowlist_id" field. +func AllowlistIDNotNil() predicate.AllowList { + return predicate.AllowList(sql.FieldNotNull(FieldAllowlistID)) +} + +// AllowlistIDEqualFold applies the EqualFold predicate on the "allowlist_id" field. +func AllowlistIDEqualFold(v string) predicate.AllowList { + return predicate.AllowList(sql.FieldEqualFold(FieldAllowlistID, v)) +} + +// AllowlistIDContainsFold applies the ContainsFold predicate on the "allowlist_id" field. +func AllowlistIDContainsFold(v string) predicate.AllowList { + return predicate.AllowList(sql.FieldContainsFold(FieldAllowlistID, v)) +} + +// HasAllowlistItems applies the HasEdge predicate on the "allowlist_items" edge. +func HasAllowlistItems() predicate.AllowList { + return predicate.AllowList(func(s *sql.Selector) { + step := sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.Edge(sqlgraph.M2M, false, AllowlistItemsTable, AllowlistItemsPrimaryKey...), + ) + sqlgraph.HasNeighbors(s, step) + }) +} + +// HasAllowlistItemsWith applies the HasEdge predicate on the "allowlist_items" edge with a given conditions (other predicates). +func HasAllowlistItemsWith(preds ...predicate.AllowListItem) predicate.AllowList { + return predicate.AllowList(func(s *sql.Selector) { + step := newAllowlistItemsStep() + sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { + for _, p := range preds { + p(s) + } + }) + }) +} + +// And groups predicates with the AND operator between them. +func And(predicates ...predicate.AllowList) predicate.AllowList { + return predicate.AllowList(sql.AndPredicates(predicates...)) +} + +// Or groups predicates with the OR operator between them. +func Or(predicates ...predicate.AllowList) predicate.AllowList { + return predicate.AllowList(sql.OrPredicates(predicates...)) +} + +// Not applies the not operator on the given predicate. +func Not(p predicate.AllowList) predicate.AllowList { + return predicate.AllowList(sql.NotPredicates(p)) +} diff --git a/pkg/database/ent/allowlist_create.go b/pkg/database/ent/allowlist_create.go new file mode 100644 index 00000000000..ec9d29b6ae5 --- /dev/null +++ b/pkg/database/ent/allowlist_create.go @@ -0,0 +1,321 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "errors" + "fmt" + "time" + + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/crowdsecurity/crowdsec/pkg/database/ent/allowlist" + "github.com/crowdsecurity/crowdsec/pkg/database/ent/allowlistitem" +) + +// AllowListCreate is the builder for creating a AllowList entity. +type AllowListCreate struct { + config + mutation *AllowListMutation + hooks []Hook +} + +// SetCreatedAt sets the "created_at" field. +func (alc *AllowListCreate) SetCreatedAt(t time.Time) *AllowListCreate { + alc.mutation.SetCreatedAt(t) + return alc +} + +// SetNillableCreatedAt sets the "created_at" field if the given value is not nil. +func (alc *AllowListCreate) SetNillableCreatedAt(t *time.Time) *AllowListCreate { + if t != nil { + alc.SetCreatedAt(*t) + } + return alc +} + +// SetUpdatedAt sets the "updated_at" field. +func (alc *AllowListCreate) SetUpdatedAt(t time.Time) *AllowListCreate { + alc.mutation.SetUpdatedAt(t) + return alc +} + +// SetNillableUpdatedAt sets the "updated_at" field if the given value is not nil. +func (alc *AllowListCreate) SetNillableUpdatedAt(t *time.Time) *AllowListCreate { + if t != nil { + alc.SetUpdatedAt(*t) + } + return alc +} + +// SetName sets the "name" field. +func (alc *AllowListCreate) SetName(s string) *AllowListCreate { + alc.mutation.SetName(s) + return alc +} + +// SetFromConsole sets the "from_console" field. +func (alc *AllowListCreate) SetFromConsole(b bool) *AllowListCreate { + alc.mutation.SetFromConsole(b) + return alc +} + +// SetDescription sets the "description" field. +func (alc *AllowListCreate) SetDescription(s string) *AllowListCreate { + alc.mutation.SetDescription(s) + return alc +} + +// SetNillableDescription sets the "description" field if the given value is not nil. +func (alc *AllowListCreate) SetNillableDescription(s *string) *AllowListCreate { + if s != nil { + alc.SetDescription(*s) + } + return alc +} + +// SetAllowlistID sets the "allowlist_id" field. +func (alc *AllowListCreate) SetAllowlistID(s string) *AllowListCreate { + alc.mutation.SetAllowlistID(s) + return alc +} + +// SetNillableAllowlistID sets the "allowlist_id" field if the given value is not nil. +func (alc *AllowListCreate) SetNillableAllowlistID(s *string) *AllowListCreate { + if s != nil { + alc.SetAllowlistID(*s) + } + return alc +} + +// AddAllowlistItemIDs adds the "allowlist_items" edge to the AllowListItem entity by IDs. +func (alc *AllowListCreate) AddAllowlistItemIDs(ids ...int) *AllowListCreate { + alc.mutation.AddAllowlistItemIDs(ids...) + return alc +} + +// AddAllowlistItems adds the "allowlist_items" edges to the AllowListItem entity. +func (alc *AllowListCreate) AddAllowlistItems(a ...*AllowListItem) *AllowListCreate { + ids := make([]int, len(a)) + for i := range a { + ids[i] = a[i].ID + } + return alc.AddAllowlistItemIDs(ids...) +} + +// Mutation returns the AllowListMutation object of the builder. +func (alc *AllowListCreate) Mutation() *AllowListMutation { + return alc.mutation +} + +// Save creates the AllowList in the database. +func (alc *AllowListCreate) Save(ctx context.Context) (*AllowList, error) { + alc.defaults() + return withHooks(ctx, alc.sqlSave, alc.mutation, alc.hooks) +} + +// SaveX calls Save and panics if Save returns an error. +func (alc *AllowListCreate) SaveX(ctx context.Context) *AllowList { + v, err := alc.Save(ctx) + if err != nil { + panic(err) + } + return v +} + +// Exec executes the query. +func (alc *AllowListCreate) Exec(ctx context.Context) error { + _, err := alc.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (alc *AllowListCreate) ExecX(ctx context.Context) { + if err := alc.Exec(ctx); err != nil { + panic(err) + } +} + +// defaults sets the default values of the builder before save. +func (alc *AllowListCreate) defaults() { + if _, ok := alc.mutation.CreatedAt(); !ok { + v := allowlist.DefaultCreatedAt() + alc.mutation.SetCreatedAt(v) + } + if _, ok := alc.mutation.UpdatedAt(); !ok { + v := allowlist.DefaultUpdatedAt() + alc.mutation.SetUpdatedAt(v) + } +} + +// check runs all checks and user-defined validators on the builder. +func (alc *AllowListCreate) check() error { + if _, ok := alc.mutation.CreatedAt(); !ok { + return &ValidationError{Name: "created_at", err: errors.New(`ent: missing required field "AllowList.created_at"`)} + } + if _, ok := alc.mutation.UpdatedAt(); !ok { + return &ValidationError{Name: "updated_at", err: errors.New(`ent: missing required field "AllowList.updated_at"`)} + } + if _, ok := alc.mutation.Name(); !ok { + return &ValidationError{Name: "name", err: errors.New(`ent: missing required field "AllowList.name"`)} + } + if _, ok := alc.mutation.FromConsole(); !ok { + return &ValidationError{Name: "from_console", err: errors.New(`ent: missing required field "AllowList.from_console"`)} + } + return nil +} + +func (alc *AllowListCreate) sqlSave(ctx context.Context) (*AllowList, error) { + if err := alc.check(); err != nil { + return nil, err + } + _node, _spec := alc.createSpec() + if err := sqlgraph.CreateNode(ctx, alc.driver, _spec); err != nil { + if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return nil, err + } + id := _spec.ID.Value.(int64) + _node.ID = int(id) + alc.mutation.id = &_node.ID + alc.mutation.done = true + return _node, nil +} + +func (alc *AllowListCreate) createSpec() (*AllowList, *sqlgraph.CreateSpec) { + var ( + _node = &AllowList{config: alc.config} + _spec = sqlgraph.NewCreateSpec(allowlist.Table, sqlgraph.NewFieldSpec(allowlist.FieldID, field.TypeInt)) + ) + if value, ok := alc.mutation.CreatedAt(); ok { + _spec.SetField(allowlist.FieldCreatedAt, field.TypeTime, value) + _node.CreatedAt = value + } + if value, ok := alc.mutation.UpdatedAt(); ok { + _spec.SetField(allowlist.FieldUpdatedAt, field.TypeTime, value) + _node.UpdatedAt = value + } + if value, ok := alc.mutation.Name(); ok { + _spec.SetField(allowlist.FieldName, field.TypeString, value) + _node.Name = value + } + if value, ok := alc.mutation.FromConsole(); ok { + _spec.SetField(allowlist.FieldFromConsole, field.TypeBool, value) + _node.FromConsole = value + } + if value, ok := alc.mutation.Description(); ok { + _spec.SetField(allowlist.FieldDescription, field.TypeString, value) + _node.Description = value + } + if value, ok := alc.mutation.AllowlistID(); ok { + _spec.SetField(allowlist.FieldAllowlistID, field.TypeString, value) + _node.AllowlistID = value + } + if nodes := alc.mutation.AllowlistItemsIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2M, + Inverse: false, + Table: allowlist.AllowlistItemsTable, + Columns: allowlist.AllowlistItemsPrimaryKey, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(allowlistitem.FieldID, field.TypeInt), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges = append(_spec.Edges, edge) + } + return _node, _spec +} + +// AllowListCreateBulk is the builder for creating many AllowList entities in bulk. +type AllowListCreateBulk struct { + config + err error + builders []*AllowListCreate +} + +// Save creates the AllowList entities in the database. +func (alcb *AllowListCreateBulk) Save(ctx context.Context) ([]*AllowList, error) { + if alcb.err != nil { + return nil, alcb.err + } + specs := make([]*sqlgraph.CreateSpec, len(alcb.builders)) + nodes := make([]*AllowList, len(alcb.builders)) + mutators := make([]Mutator, len(alcb.builders)) + for i := range alcb.builders { + func(i int, root context.Context) { + builder := alcb.builders[i] + builder.defaults() + var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { + mutation, ok := m.(*AllowListMutation) + if !ok { + return nil, fmt.Errorf("unexpected mutation type %T", m) + } + if err := builder.check(); err != nil { + return nil, err + } + builder.mutation = mutation + var err error + nodes[i], specs[i] = builder.createSpec() + if i < len(mutators)-1 { + _, err = mutators[i+1].Mutate(root, alcb.builders[i+1].mutation) + } else { + spec := &sqlgraph.BatchCreateSpec{Nodes: specs} + // Invoke the actual operation on the latest mutation in the chain. + if err = sqlgraph.BatchCreate(ctx, alcb.driver, spec); err != nil { + if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + } + } + if err != nil { + return nil, err + } + mutation.id = &nodes[i].ID + if specs[i].ID.Value != nil { + id := specs[i].ID.Value.(int64) + nodes[i].ID = int(id) + } + mutation.done = true + return nodes[i], nil + }) + for i := len(builder.hooks) - 1; i >= 0; i-- { + mut = builder.hooks[i](mut) + } + mutators[i] = mut + }(i, ctx) + } + if len(mutators) > 0 { + if _, err := mutators[0].Mutate(ctx, alcb.builders[0].mutation); err != nil { + return nil, err + } + } + return nodes, nil +} + +// SaveX is like Save, but panics if an error occurs. +func (alcb *AllowListCreateBulk) SaveX(ctx context.Context) []*AllowList { + v, err := alcb.Save(ctx) + if err != nil { + panic(err) + } + return v +} + +// Exec executes the query. +func (alcb *AllowListCreateBulk) Exec(ctx context.Context) error { + _, err := alcb.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (alcb *AllowListCreateBulk) ExecX(ctx context.Context) { + if err := alcb.Exec(ctx); err != nil { + panic(err) + } +} diff --git a/pkg/database/ent/allowlist_delete.go b/pkg/database/ent/allowlist_delete.go new file mode 100644 index 00000000000..dcfaa214f6f --- /dev/null +++ b/pkg/database/ent/allowlist_delete.go @@ -0,0 +1,88 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/crowdsecurity/crowdsec/pkg/database/ent/allowlist" + "github.com/crowdsecurity/crowdsec/pkg/database/ent/predicate" +) + +// AllowListDelete is the builder for deleting a AllowList entity. +type AllowListDelete struct { + config + hooks []Hook + mutation *AllowListMutation +} + +// Where appends a list predicates to the AllowListDelete builder. +func (ald *AllowListDelete) Where(ps ...predicate.AllowList) *AllowListDelete { + ald.mutation.Where(ps...) + return ald +} + +// Exec executes the deletion query and returns how many vertices were deleted. +func (ald *AllowListDelete) Exec(ctx context.Context) (int, error) { + return withHooks(ctx, ald.sqlExec, ald.mutation, ald.hooks) +} + +// ExecX is like Exec, but panics if an error occurs. +func (ald *AllowListDelete) ExecX(ctx context.Context) int { + n, err := ald.Exec(ctx) + if err != nil { + panic(err) + } + return n +} + +func (ald *AllowListDelete) sqlExec(ctx context.Context) (int, error) { + _spec := sqlgraph.NewDeleteSpec(allowlist.Table, sqlgraph.NewFieldSpec(allowlist.FieldID, field.TypeInt)) + if ps := ald.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + affected, err := sqlgraph.DeleteNodes(ctx, ald.driver, _spec) + if err != nil && sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + ald.mutation.done = true + return affected, err +} + +// AllowListDeleteOne is the builder for deleting a single AllowList entity. +type AllowListDeleteOne struct { + ald *AllowListDelete +} + +// Where appends a list predicates to the AllowListDelete builder. +func (aldo *AllowListDeleteOne) Where(ps ...predicate.AllowList) *AllowListDeleteOne { + aldo.ald.mutation.Where(ps...) + return aldo +} + +// Exec executes the deletion query. +func (aldo *AllowListDeleteOne) Exec(ctx context.Context) error { + n, err := aldo.ald.Exec(ctx) + switch { + case err != nil: + return err + case n == 0: + return &NotFoundError{allowlist.Label} + default: + return nil + } +} + +// ExecX is like Exec, but panics if an error occurs. +func (aldo *AllowListDeleteOne) ExecX(ctx context.Context) { + if err := aldo.Exec(ctx); err != nil { + panic(err) + } +} diff --git a/pkg/database/ent/allowlist_query.go b/pkg/database/ent/allowlist_query.go new file mode 100644 index 00000000000..8ea40ee0c8a --- /dev/null +++ b/pkg/database/ent/allowlist_query.go @@ -0,0 +1,637 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "database/sql/driver" + "fmt" + "math" + + "entgo.io/ent" + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/crowdsecurity/crowdsec/pkg/database/ent/allowlist" + "github.com/crowdsecurity/crowdsec/pkg/database/ent/allowlistitem" + "github.com/crowdsecurity/crowdsec/pkg/database/ent/predicate" +) + +// AllowListQuery is the builder for querying AllowList entities. +type AllowListQuery struct { + config + ctx *QueryContext + order []allowlist.OrderOption + inters []Interceptor + predicates []predicate.AllowList + withAllowlistItems *AllowListItemQuery + // intermediate query (i.e. traversal path). + sql *sql.Selector + path func(context.Context) (*sql.Selector, error) +} + +// Where adds a new predicate for the AllowListQuery builder. +func (alq *AllowListQuery) Where(ps ...predicate.AllowList) *AllowListQuery { + alq.predicates = append(alq.predicates, ps...) + return alq +} + +// Limit the number of records to be returned by this query. +func (alq *AllowListQuery) Limit(limit int) *AllowListQuery { + alq.ctx.Limit = &limit + return alq +} + +// Offset to start from. +func (alq *AllowListQuery) Offset(offset int) *AllowListQuery { + alq.ctx.Offset = &offset + return alq +} + +// Unique configures the query builder to filter duplicate records on query. +// By default, unique is set to true, and can be disabled using this method. +func (alq *AllowListQuery) Unique(unique bool) *AllowListQuery { + alq.ctx.Unique = &unique + return alq +} + +// Order specifies how the records should be ordered. +func (alq *AllowListQuery) Order(o ...allowlist.OrderOption) *AllowListQuery { + alq.order = append(alq.order, o...) + return alq +} + +// QueryAllowlistItems chains the current query on the "allowlist_items" edge. +func (alq *AllowListQuery) QueryAllowlistItems() *AllowListItemQuery { + query := (&AllowListItemClient{config: alq.config}).Query() + query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { + if err := alq.prepareQuery(ctx); err != nil { + return nil, err + } + selector := alq.sqlQuery(ctx) + if err := selector.Err(); err != nil { + return nil, err + } + step := sqlgraph.NewStep( + sqlgraph.From(allowlist.Table, allowlist.FieldID, selector), + sqlgraph.To(allowlistitem.Table, allowlistitem.FieldID), + sqlgraph.Edge(sqlgraph.M2M, false, allowlist.AllowlistItemsTable, allowlist.AllowlistItemsPrimaryKey...), + ) + fromU = sqlgraph.SetNeighbors(alq.driver.Dialect(), step) + return fromU, nil + } + return query +} + +// First returns the first AllowList entity from the query. +// Returns a *NotFoundError when no AllowList was found. +func (alq *AllowListQuery) First(ctx context.Context) (*AllowList, error) { + nodes, err := alq.Limit(1).All(setContextOp(ctx, alq.ctx, ent.OpQueryFirst)) + if err != nil { + return nil, err + } + if len(nodes) == 0 { + return nil, &NotFoundError{allowlist.Label} + } + return nodes[0], nil +} + +// FirstX is like First, but panics if an error occurs. +func (alq *AllowListQuery) FirstX(ctx context.Context) *AllowList { + node, err := alq.First(ctx) + if err != nil && !IsNotFound(err) { + panic(err) + } + return node +} + +// FirstID returns the first AllowList ID from the query. +// Returns a *NotFoundError when no AllowList ID was found. +func (alq *AllowListQuery) FirstID(ctx context.Context) (id int, err error) { + var ids []int + if ids, err = alq.Limit(1).IDs(setContextOp(ctx, alq.ctx, ent.OpQueryFirstID)); err != nil { + return + } + if len(ids) == 0 { + err = &NotFoundError{allowlist.Label} + return + } + return ids[0], nil +} + +// FirstIDX is like FirstID, but panics if an error occurs. +func (alq *AllowListQuery) FirstIDX(ctx context.Context) int { + id, err := alq.FirstID(ctx) + if err != nil && !IsNotFound(err) { + panic(err) + } + return id +} + +// Only returns a single AllowList entity found by the query, ensuring it only returns one. +// Returns a *NotSingularError when more than one AllowList entity is found. +// Returns a *NotFoundError when no AllowList entities are found. +func (alq *AllowListQuery) Only(ctx context.Context) (*AllowList, error) { + nodes, err := alq.Limit(2).All(setContextOp(ctx, alq.ctx, ent.OpQueryOnly)) + if err != nil { + return nil, err + } + switch len(nodes) { + case 1: + return nodes[0], nil + case 0: + return nil, &NotFoundError{allowlist.Label} + default: + return nil, &NotSingularError{allowlist.Label} + } +} + +// OnlyX is like Only, but panics if an error occurs. +func (alq *AllowListQuery) OnlyX(ctx context.Context) *AllowList { + node, err := alq.Only(ctx) + if err != nil { + panic(err) + } + return node +} + +// OnlyID is like Only, but returns the only AllowList ID in the query. +// Returns a *NotSingularError when more than one AllowList ID is found. +// Returns a *NotFoundError when no entities are found. +func (alq *AllowListQuery) OnlyID(ctx context.Context) (id int, err error) { + var ids []int + if ids, err = alq.Limit(2).IDs(setContextOp(ctx, alq.ctx, ent.OpQueryOnlyID)); err != nil { + return + } + switch len(ids) { + case 1: + id = ids[0] + case 0: + err = &NotFoundError{allowlist.Label} + default: + err = &NotSingularError{allowlist.Label} + } + return +} + +// OnlyIDX is like OnlyID, but panics if an error occurs. +func (alq *AllowListQuery) OnlyIDX(ctx context.Context) int { + id, err := alq.OnlyID(ctx) + if err != nil { + panic(err) + } + return id +} + +// All executes the query and returns a list of AllowLists. +func (alq *AllowListQuery) All(ctx context.Context) ([]*AllowList, error) { + ctx = setContextOp(ctx, alq.ctx, ent.OpQueryAll) + if err := alq.prepareQuery(ctx); err != nil { + return nil, err + } + qr := querierAll[[]*AllowList, *AllowListQuery]() + return withInterceptors[[]*AllowList](ctx, alq, qr, alq.inters) +} + +// AllX is like All, but panics if an error occurs. +func (alq *AllowListQuery) AllX(ctx context.Context) []*AllowList { + nodes, err := alq.All(ctx) + if err != nil { + panic(err) + } + return nodes +} + +// IDs executes the query and returns a list of AllowList IDs. +func (alq *AllowListQuery) IDs(ctx context.Context) (ids []int, err error) { + if alq.ctx.Unique == nil && alq.path != nil { + alq.Unique(true) + } + ctx = setContextOp(ctx, alq.ctx, ent.OpQueryIDs) + if err = alq.Select(allowlist.FieldID).Scan(ctx, &ids); err != nil { + return nil, err + } + return ids, nil +} + +// IDsX is like IDs, but panics if an error occurs. +func (alq *AllowListQuery) IDsX(ctx context.Context) []int { + ids, err := alq.IDs(ctx) + if err != nil { + panic(err) + } + return ids +} + +// Count returns the count of the given query. +func (alq *AllowListQuery) Count(ctx context.Context) (int, error) { + ctx = setContextOp(ctx, alq.ctx, ent.OpQueryCount) + if err := alq.prepareQuery(ctx); err != nil { + return 0, err + } + return withInterceptors[int](ctx, alq, querierCount[*AllowListQuery](), alq.inters) +} + +// CountX is like Count, but panics if an error occurs. +func (alq *AllowListQuery) CountX(ctx context.Context) int { + count, err := alq.Count(ctx) + if err != nil { + panic(err) + } + return count +} + +// Exist returns true if the query has elements in the graph. +func (alq *AllowListQuery) Exist(ctx context.Context) (bool, error) { + ctx = setContextOp(ctx, alq.ctx, ent.OpQueryExist) + switch _, err := alq.FirstID(ctx); { + case IsNotFound(err): + return false, nil + case err != nil: + return false, fmt.Errorf("ent: check existence: %w", err) + default: + return true, nil + } +} + +// ExistX is like Exist, but panics if an error occurs. +func (alq *AllowListQuery) ExistX(ctx context.Context) bool { + exist, err := alq.Exist(ctx) + if err != nil { + panic(err) + } + return exist +} + +// Clone returns a duplicate of the AllowListQuery builder, including all associated steps. It can be +// used to prepare common query builders and use them differently after the clone is made. +func (alq *AllowListQuery) Clone() *AllowListQuery { + if alq == nil { + return nil + } + return &AllowListQuery{ + config: alq.config, + ctx: alq.ctx.Clone(), + order: append([]allowlist.OrderOption{}, alq.order...), + inters: append([]Interceptor{}, alq.inters...), + predicates: append([]predicate.AllowList{}, alq.predicates...), + withAllowlistItems: alq.withAllowlistItems.Clone(), + // clone intermediate query. + sql: alq.sql.Clone(), + path: alq.path, + } +} + +// WithAllowlistItems tells the query-builder to eager-load the nodes that are connected to +// the "allowlist_items" edge. The optional arguments are used to configure the query builder of the edge. +func (alq *AllowListQuery) WithAllowlistItems(opts ...func(*AllowListItemQuery)) *AllowListQuery { + query := (&AllowListItemClient{config: alq.config}).Query() + for _, opt := range opts { + opt(query) + } + alq.withAllowlistItems = query + return alq +} + +// GroupBy is used to group vertices by one or more fields/columns. +// It is often used with aggregate functions, like: count, max, mean, min, sum. +// +// Example: +// +// var v []struct { +// CreatedAt time.Time `json:"created_at,omitempty"` +// Count int `json:"count,omitempty"` +// } +// +// client.AllowList.Query(). +// GroupBy(allowlist.FieldCreatedAt). +// Aggregate(ent.Count()). +// Scan(ctx, &v) +func (alq *AllowListQuery) GroupBy(field string, fields ...string) *AllowListGroupBy { + alq.ctx.Fields = append([]string{field}, fields...) + grbuild := &AllowListGroupBy{build: alq} + grbuild.flds = &alq.ctx.Fields + grbuild.label = allowlist.Label + grbuild.scan = grbuild.Scan + return grbuild +} + +// Select allows the selection one or more fields/columns for the given query, +// instead of selecting all fields in the entity. +// +// Example: +// +// var v []struct { +// CreatedAt time.Time `json:"created_at,omitempty"` +// } +// +// client.AllowList.Query(). +// Select(allowlist.FieldCreatedAt). +// Scan(ctx, &v) +func (alq *AllowListQuery) Select(fields ...string) *AllowListSelect { + alq.ctx.Fields = append(alq.ctx.Fields, fields...) + sbuild := &AllowListSelect{AllowListQuery: alq} + sbuild.label = allowlist.Label + sbuild.flds, sbuild.scan = &alq.ctx.Fields, sbuild.Scan + return sbuild +} + +// Aggregate returns a AllowListSelect configured with the given aggregations. +func (alq *AllowListQuery) Aggregate(fns ...AggregateFunc) *AllowListSelect { + return alq.Select().Aggregate(fns...) +} + +func (alq *AllowListQuery) prepareQuery(ctx context.Context) error { + for _, inter := range alq.inters { + if inter == nil { + return fmt.Errorf("ent: uninitialized interceptor (forgotten import ent/runtime?)") + } + if trv, ok := inter.(Traverser); ok { + if err := trv.Traverse(ctx, alq); err != nil { + return err + } + } + } + for _, f := range alq.ctx.Fields { + if !allowlist.ValidColumn(f) { + return &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)} + } + } + if alq.path != nil { + prev, err := alq.path(ctx) + if err != nil { + return err + } + alq.sql = prev + } + return nil +} + +func (alq *AllowListQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*AllowList, error) { + var ( + nodes = []*AllowList{} + _spec = alq.querySpec() + loadedTypes = [1]bool{ + alq.withAllowlistItems != nil, + } + ) + _spec.ScanValues = func(columns []string) ([]any, error) { + return (*AllowList).scanValues(nil, columns) + } + _spec.Assign = func(columns []string, values []any) error { + node := &AllowList{config: alq.config} + nodes = append(nodes, node) + node.Edges.loadedTypes = loadedTypes + return node.assignValues(columns, values) + } + for i := range hooks { + hooks[i](ctx, _spec) + } + if err := sqlgraph.QueryNodes(ctx, alq.driver, _spec); err != nil { + return nil, err + } + if len(nodes) == 0 { + return nodes, nil + } + if query := alq.withAllowlistItems; query != nil { + if err := alq.loadAllowlistItems(ctx, query, nodes, + func(n *AllowList) { n.Edges.AllowlistItems = []*AllowListItem{} }, + func(n *AllowList, e *AllowListItem) { n.Edges.AllowlistItems = append(n.Edges.AllowlistItems, e) }); err != nil { + return nil, err + } + } + return nodes, nil +} + +func (alq *AllowListQuery) loadAllowlistItems(ctx context.Context, query *AllowListItemQuery, nodes []*AllowList, init func(*AllowList), assign func(*AllowList, *AllowListItem)) error { + edgeIDs := make([]driver.Value, len(nodes)) + byID := make(map[int]*AllowList) + nids := make(map[int]map[*AllowList]struct{}) + for i, node := range nodes { + edgeIDs[i] = node.ID + byID[node.ID] = node + if init != nil { + init(node) + } + } + query.Where(func(s *sql.Selector) { + joinT := sql.Table(allowlist.AllowlistItemsTable) + s.Join(joinT).On(s.C(allowlistitem.FieldID), joinT.C(allowlist.AllowlistItemsPrimaryKey[1])) + s.Where(sql.InValues(joinT.C(allowlist.AllowlistItemsPrimaryKey[0]), edgeIDs...)) + columns := s.SelectedColumns() + s.Select(joinT.C(allowlist.AllowlistItemsPrimaryKey[0])) + s.AppendSelect(columns...) + s.SetDistinct(false) + }) + if err := query.prepareQuery(ctx); err != nil { + return err + } + qr := QuerierFunc(func(ctx context.Context, q Query) (Value, error) { + return query.sqlAll(ctx, func(_ context.Context, spec *sqlgraph.QuerySpec) { + assign := spec.Assign + values := spec.ScanValues + spec.ScanValues = func(columns []string) ([]any, error) { + values, err := values(columns[1:]) + if err != nil { + return nil, err + } + return append([]any{new(sql.NullInt64)}, values...), nil + } + spec.Assign = func(columns []string, values []any) error { + outValue := int(values[0].(*sql.NullInt64).Int64) + inValue := int(values[1].(*sql.NullInt64).Int64) + if nids[inValue] == nil { + nids[inValue] = map[*AllowList]struct{}{byID[outValue]: {}} + return assign(columns[1:], values[1:]) + } + nids[inValue][byID[outValue]] = struct{}{} + return nil + } + }) + }) + neighbors, err := withInterceptors[[]*AllowListItem](ctx, query, qr, query.inters) + if err != nil { + return err + } + for _, n := range neighbors { + nodes, ok := nids[n.ID] + if !ok { + return fmt.Errorf(`unexpected "allowlist_items" node returned %v`, n.ID) + } + for kn := range nodes { + assign(kn, n) + } + } + return nil +} + +func (alq *AllowListQuery) sqlCount(ctx context.Context) (int, error) { + _spec := alq.querySpec() + _spec.Node.Columns = alq.ctx.Fields + if len(alq.ctx.Fields) > 0 { + _spec.Unique = alq.ctx.Unique != nil && *alq.ctx.Unique + } + return sqlgraph.CountNodes(ctx, alq.driver, _spec) +} + +func (alq *AllowListQuery) querySpec() *sqlgraph.QuerySpec { + _spec := sqlgraph.NewQuerySpec(allowlist.Table, allowlist.Columns, sqlgraph.NewFieldSpec(allowlist.FieldID, field.TypeInt)) + _spec.From = alq.sql + if unique := alq.ctx.Unique; unique != nil { + _spec.Unique = *unique + } else if alq.path != nil { + _spec.Unique = true + } + if fields := alq.ctx.Fields; len(fields) > 0 { + _spec.Node.Columns = make([]string, 0, len(fields)) + _spec.Node.Columns = append(_spec.Node.Columns, allowlist.FieldID) + for i := range fields { + if fields[i] != allowlist.FieldID { + _spec.Node.Columns = append(_spec.Node.Columns, fields[i]) + } + } + } + if ps := alq.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if limit := alq.ctx.Limit; limit != nil { + _spec.Limit = *limit + } + if offset := alq.ctx.Offset; offset != nil { + _spec.Offset = *offset + } + if ps := alq.order; len(ps) > 0 { + _spec.Order = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + return _spec +} + +func (alq *AllowListQuery) sqlQuery(ctx context.Context) *sql.Selector { + builder := sql.Dialect(alq.driver.Dialect()) + t1 := builder.Table(allowlist.Table) + columns := alq.ctx.Fields + if len(columns) == 0 { + columns = allowlist.Columns + } + selector := builder.Select(t1.Columns(columns...)...).From(t1) + if alq.sql != nil { + selector = alq.sql + selector.Select(selector.Columns(columns...)...) + } + if alq.ctx.Unique != nil && *alq.ctx.Unique { + selector.Distinct() + } + for _, p := range alq.predicates { + p(selector) + } + for _, p := range alq.order { + p(selector) + } + if offset := alq.ctx.Offset; offset != nil { + // limit is mandatory for offset clause. We start + // with default value, and override it below if needed. + selector.Offset(*offset).Limit(math.MaxInt32) + } + if limit := alq.ctx.Limit; limit != nil { + selector.Limit(*limit) + } + return selector +} + +// AllowListGroupBy is the group-by builder for AllowList entities. +type AllowListGroupBy struct { + selector + build *AllowListQuery +} + +// Aggregate adds the given aggregation functions to the group-by query. +func (algb *AllowListGroupBy) Aggregate(fns ...AggregateFunc) *AllowListGroupBy { + algb.fns = append(algb.fns, fns...) + return algb +} + +// Scan applies the selector query and scans the result into the given value. +func (algb *AllowListGroupBy) Scan(ctx context.Context, v any) error { + ctx = setContextOp(ctx, algb.build.ctx, ent.OpQueryGroupBy) + if err := algb.build.prepareQuery(ctx); err != nil { + return err + } + return scanWithInterceptors[*AllowListQuery, *AllowListGroupBy](ctx, algb.build, algb, algb.build.inters, v) +} + +func (algb *AllowListGroupBy) sqlScan(ctx context.Context, root *AllowListQuery, v any) error { + selector := root.sqlQuery(ctx).Select() + aggregation := make([]string, 0, len(algb.fns)) + for _, fn := range algb.fns { + aggregation = append(aggregation, fn(selector)) + } + if len(selector.SelectedColumns()) == 0 { + columns := make([]string, 0, len(*algb.flds)+len(algb.fns)) + for _, f := range *algb.flds { + columns = append(columns, selector.C(f)) + } + columns = append(columns, aggregation...) + selector.Select(columns...) + } + selector.GroupBy(selector.Columns(*algb.flds...)...) + if err := selector.Err(); err != nil { + return err + } + rows := &sql.Rows{} + query, args := selector.Query() + if err := algb.build.driver.Query(ctx, query, args, rows); err != nil { + return err + } + defer rows.Close() + return sql.ScanSlice(rows, v) +} + +// AllowListSelect is the builder for selecting fields of AllowList entities. +type AllowListSelect struct { + *AllowListQuery + selector +} + +// Aggregate adds the given aggregation functions to the selector query. +func (als *AllowListSelect) Aggregate(fns ...AggregateFunc) *AllowListSelect { + als.fns = append(als.fns, fns...) + return als +} + +// Scan applies the selector query and scans the result into the given value. +func (als *AllowListSelect) Scan(ctx context.Context, v any) error { + ctx = setContextOp(ctx, als.ctx, ent.OpQuerySelect) + if err := als.prepareQuery(ctx); err != nil { + return err + } + return scanWithInterceptors[*AllowListQuery, *AllowListSelect](ctx, als.AllowListQuery, als, als.inters, v) +} + +func (als *AllowListSelect) sqlScan(ctx context.Context, root *AllowListQuery, v any) error { + selector := root.sqlQuery(ctx) + aggregation := make([]string, 0, len(als.fns)) + for _, fn := range als.fns { + aggregation = append(aggregation, fn(selector)) + } + switch n := len(*als.selector.flds); { + case n == 0 && len(aggregation) > 0: + selector.Select(aggregation...) + case n != 0 && len(aggregation) > 0: + selector.AppendSelect(aggregation...) + } + rows := &sql.Rows{} + query, args := selector.Query() + if err := als.driver.Query(ctx, query, args, rows); err != nil { + return err + } + defer rows.Close() + return sql.ScanSlice(rows, v) +} diff --git a/pkg/database/ent/allowlist_update.go b/pkg/database/ent/allowlist_update.go new file mode 100644 index 00000000000..0c871946f3d --- /dev/null +++ b/pkg/database/ent/allowlist_update.go @@ -0,0 +1,501 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "errors" + "fmt" + "time" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/crowdsecurity/crowdsec/pkg/database/ent/allowlist" + "github.com/crowdsecurity/crowdsec/pkg/database/ent/allowlistitem" + "github.com/crowdsecurity/crowdsec/pkg/database/ent/predicate" +) + +// AllowListUpdate is the builder for updating AllowList entities. +type AllowListUpdate struct { + config + hooks []Hook + mutation *AllowListMutation +} + +// Where appends a list predicates to the AllowListUpdate builder. +func (alu *AllowListUpdate) Where(ps ...predicate.AllowList) *AllowListUpdate { + alu.mutation.Where(ps...) + return alu +} + +// SetUpdatedAt sets the "updated_at" field. +func (alu *AllowListUpdate) SetUpdatedAt(t time.Time) *AllowListUpdate { + alu.mutation.SetUpdatedAt(t) + return alu +} + +// SetName sets the "name" field. +func (alu *AllowListUpdate) SetName(s string) *AllowListUpdate { + alu.mutation.SetName(s) + return alu +} + +// SetNillableName sets the "name" field if the given value is not nil. +func (alu *AllowListUpdate) SetNillableName(s *string) *AllowListUpdate { + if s != nil { + alu.SetName(*s) + } + return alu +} + +// SetFromConsole sets the "from_console" field. +func (alu *AllowListUpdate) SetFromConsole(b bool) *AllowListUpdate { + alu.mutation.SetFromConsole(b) + return alu +} + +// SetNillableFromConsole sets the "from_console" field if the given value is not nil. +func (alu *AllowListUpdate) SetNillableFromConsole(b *bool) *AllowListUpdate { + if b != nil { + alu.SetFromConsole(*b) + } + return alu +} + +// SetDescription sets the "description" field. +func (alu *AllowListUpdate) SetDescription(s string) *AllowListUpdate { + alu.mutation.SetDescription(s) + return alu +} + +// SetNillableDescription sets the "description" field if the given value is not nil. +func (alu *AllowListUpdate) SetNillableDescription(s *string) *AllowListUpdate { + if s != nil { + alu.SetDescription(*s) + } + return alu +} + +// ClearDescription clears the value of the "description" field. +func (alu *AllowListUpdate) ClearDescription() *AllowListUpdate { + alu.mutation.ClearDescription() + return alu +} + +// AddAllowlistItemIDs adds the "allowlist_items" edge to the AllowListItem entity by IDs. +func (alu *AllowListUpdate) AddAllowlistItemIDs(ids ...int) *AllowListUpdate { + alu.mutation.AddAllowlistItemIDs(ids...) + return alu +} + +// AddAllowlistItems adds the "allowlist_items" edges to the AllowListItem entity. +func (alu *AllowListUpdate) AddAllowlistItems(a ...*AllowListItem) *AllowListUpdate { + ids := make([]int, len(a)) + for i := range a { + ids[i] = a[i].ID + } + return alu.AddAllowlistItemIDs(ids...) +} + +// Mutation returns the AllowListMutation object of the builder. +func (alu *AllowListUpdate) Mutation() *AllowListMutation { + return alu.mutation +} + +// ClearAllowlistItems clears all "allowlist_items" edges to the AllowListItem entity. +func (alu *AllowListUpdate) ClearAllowlistItems() *AllowListUpdate { + alu.mutation.ClearAllowlistItems() + return alu +} + +// RemoveAllowlistItemIDs removes the "allowlist_items" edge to AllowListItem entities by IDs. +func (alu *AllowListUpdate) RemoveAllowlistItemIDs(ids ...int) *AllowListUpdate { + alu.mutation.RemoveAllowlistItemIDs(ids...) + return alu +} + +// RemoveAllowlistItems removes "allowlist_items" edges to AllowListItem entities. +func (alu *AllowListUpdate) RemoveAllowlistItems(a ...*AllowListItem) *AllowListUpdate { + ids := make([]int, len(a)) + for i := range a { + ids[i] = a[i].ID + } + return alu.RemoveAllowlistItemIDs(ids...) +} + +// Save executes the query and returns the number of nodes affected by the update operation. +func (alu *AllowListUpdate) Save(ctx context.Context) (int, error) { + alu.defaults() + return withHooks(ctx, alu.sqlSave, alu.mutation, alu.hooks) +} + +// SaveX is like Save, but panics if an error occurs. +func (alu *AllowListUpdate) SaveX(ctx context.Context) int { + affected, err := alu.Save(ctx) + if err != nil { + panic(err) + } + return affected +} + +// Exec executes the query. +func (alu *AllowListUpdate) Exec(ctx context.Context) error { + _, err := alu.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (alu *AllowListUpdate) ExecX(ctx context.Context) { + if err := alu.Exec(ctx); err != nil { + panic(err) + } +} + +// defaults sets the default values of the builder before save. +func (alu *AllowListUpdate) defaults() { + if _, ok := alu.mutation.UpdatedAt(); !ok { + v := allowlist.UpdateDefaultUpdatedAt() + alu.mutation.SetUpdatedAt(v) + } +} + +func (alu *AllowListUpdate) sqlSave(ctx context.Context) (n int, err error) { + _spec := sqlgraph.NewUpdateSpec(allowlist.Table, allowlist.Columns, sqlgraph.NewFieldSpec(allowlist.FieldID, field.TypeInt)) + if ps := alu.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if value, ok := alu.mutation.UpdatedAt(); ok { + _spec.SetField(allowlist.FieldUpdatedAt, field.TypeTime, value) + } + if value, ok := alu.mutation.Name(); ok { + _spec.SetField(allowlist.FieldName, field.TypeString, value) + } + if value, ok := alu.mutation.FromConsole(); ok { + _spec.SetField(allowlist.FieldFromConsole, field.TypeBool, value) + } + if value, ok := alu.mutation.Description(); ok { + _spec.SetField(allowlist.FieldDescription, field.TypeString, value) + } + if alu.mutation.DescriptionCleared() { + _spec.ClearField(allowlist.FieldDescription, field.TypeString) + } + if alu.mutation.AllowlistIDCleared() { + _spec.ClearField(allowlist.FieldAllowlistID, field.TypeString) + } + if alu.mutation.AllowlistItemsCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2M, + Inverse: false, + Table: allowlist.AllowlistItemsTable, + Columns: allowlist.AllowlistItemsPrimaryKey, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(allowlistitem.FieldID, field.TypeInt), + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := alu.mutation.RemovedAllowlistItemsIDs(); len(nodes) > 0 && !alu.mutation.AllowlistItemsCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2M, + Inverse: false, + Table: allowlist.AllowlistItemsTable, + Columns: allowlist.AllowlistItemsPrimaryKey, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(allowlistitem.FieldID, field.TypeInt), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := alu.mutation.AllowlistItemsIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2M, + Inverse: false, + Table: allowlist.AllowlistItemsTable, + Columns: allowlist.AllowlistItemsPrimaryKey, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(allowlistitem.FieldID, field.TypeInt), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + if n, err = sqlgraph.UpdateNodes(ctx, alu.driver, _spec); err != nil { + if _, ok := err.(*sqlgraph.NotFoundError); ok { + err = &NotFoundError{allowlist.Label} + } else if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return 0, err + } + alu.mutation.done = true + return n, nil +} + +// AllowListUpdateOne is the builder for updating a single AllowList entity. +type AllowListUpdateOne struct { + config + fields []string + hooks []Hook + mutation *AllowListMutation +} + +// SetUpdatedAt sets the "updated_at" field. +func (aluo *AllowListUpdateOne) SetUpdatedAt(t time.Time) *AllowListUpdateOne { + aluo.mutation.SetUpdatedAt(t) + return aluo +} + +// SetName sets the "name" field. +func (aluo *AllowListUpdateOne) SetName(s string) *AllowListUpdateOne { + aluo.mutation.SetName(s) + return aluo +} + +// SetNillableName sets the "name" field if the given value is not nil. +func (aluo *AllowListUpdateOne) SetNillableName(s *string) *AllowListUpdateOne { + if s != nil { + aluo.SetName(*s) + } + return aluo +} + +// SetFromConsole sets the "from_console" field. +func (aluo *AllowListUpdateOne) SetFromConsole(b bool) *AllowListUpdateOne { + aluo.mutation.SetFromConsole(b) + return aluo +} + +// SetNillableFromConsole sets the "from_console" field if the given value is not nil. +func (aluo *AllowListUpdateOne) SetNillableFromConsole(b *bool) *AllowListUpdateOne { + if b != nil { + aluo.SetFromConsole(*b) + } + return aluo +} + +// SetDescription sets the "description" field. +func (aluo *AllowListUpdateOne) SetDescription(s string) *AllowListUpdateOne { + aluo.mutation.SetDescription(s) + return aluo +} + +// SetNillableDescription sets the "description" field if the given value is not nil. +func (aluo *AllowListUpdateOne) SetNillableDescription(s *string) *AllowListUpdateOne { + if s != nil { + aluo.SetDescription(*s) + } + return aluo +} + +// ClearDescription clears the value of the "description" field. +func (aluo *AllowListUpdateOne) ClearDescription() *AllowListUpdateOne { + aluo.mutation.ClearDescription() + return aluo +} + +// AddAllowlistItemIDs adds the "allowlist_items" edge to the AllowListItem entity by IDs. +func (aluo *AllowListUpdateOne) AddAllowlistItemIDs(ids ...int) *AllowListUpdateOne { + aluo.mutation.AddAllowlistItemIDs(ids...) + return aluo +} + +// AddAllowlistItems adds the "allowlist_items" edges to the AllowListItem entity. +func (aluo *AllowListUpdateOne) AddAllowlistItems(a ...*AllowListItem) *AllowListUpdateOne { + ids := make([]int, len(a)) + for i := range a { + ids[i] = a[i].ID + } + return aluo.AddAllowlistItemIDs(ids...) +} + +// Mutation returns the AllowListMutation object of the builder. +func (aluo *AllowListUpdateOne) Mutation() *AllowListMutation { + return aluo.mutation +} + +// ClearAllowlistItems clears all "allowlist_items" edges to the AllowListItem entity. +func (aluo *AllowListUpdateOne) ClearAllowlistItems() *AllowListUpdateOne { + aluo.mutation.ClearAllowlistItems() + return aluo +} + +// RemoveAllowlistItemIDs removes the "allowlist_items" edge to AllowListItem entities by IDs. +func (aluo *AllowListUpdateOne) RemoveAllowlistItemIDs(ids ...int) *AllowListUpdateOne { + aluo.mutation.RemoveAllowlistItemIDs(ids...) + return aluo +} + +// RemoveAllowlistItems removes "allowlist_items" edges to AllowListItem entities. +func (aluo *AllowListUpdateOne) RemoveAllowlistItems(a ...*AllowListItem) *AllowListUpdateOne { + ids := make([]int, len(a)) + for i := range a { + ids[i] = a[i].ID + } + return aluo.RemoveAllowlistItemIDs(ids...) +} + +// Where appends a list predicates to the AllowListUpdate builder. +func (aluo *AllowListUpdateOne) Where(ps ...predicate.AllowList) *AllowListUpdateOne { + aluo.mutation.Where(ps...) + return aluo +} + +// Select allows selecting one or more fields (columns) of the returned entity. +// The default is selecting all fields defined in the entity schema. +func (aluo *AllowListUpdateOne) Select(field string, fields ...string) *AllowListUpdateOne { + aluo.fields = append([]string{field}, fields...) + return aluo +} + +// Save executes the query and returns the updated AllowList entity. +func (aluo *AllowListUpdateOne) Save(ctx context.Context) (*AllowList, error) { + aluo.defaults() + return withHooks(ctx, aluo.sqlSave, aluo.mutation, aluo.hooks) +} + +// SaveX is like Save, but panics if an error occurs. +func (aluo *AllowListUpdateOne) SaveX(ctx context.Context) *AllowList { + node, err := aluo.Save(ctx) + if err != nil { + panic(err) + } + return node +} + +// Exec executes the query on the entity. +func (aluo *AllowListUpdateOne) Exec(ctx context.Context) error { + _, err := aluo.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (aluo *AllowListUpdateOne) ExecX(ctx context.Context) { + if err := aluo.Exec(ctx); err != nil { + panic(err) + } +} + +// defaults sets the default values of the builder before save. +func (aluo *AllowListUpdateOne) defaults() { + if _, ok := aluo.mutation.UpdatedAt(); !ok { + v := allowlist.UpdateDefaultUpdatedAt() + aluo.mutation.SetUpdatedAt(v) + } +} + +func (aluo *AllowListUpdateOne) sqlSave(ctx context.Context) (_node *AllowList, err error) { + _spec := sqlgraph.NewUpdateSpec(allowlist.Table, allowlist.Columns, sqlgraph.NewFieldSpec(allowlist.FieldID, field.TypeInt)) + id, ok := aluo.mutation.ID() + if !ok { + return nil, &ValidationError{Name: "id", err: errors.New(`ent: missing "AllowList.id" for update`)} + } + _spec.Node.ID.Value = id + if fields := aluo.fields; len(fields) > 0 { + _spec.Node.Columns = make([]string, 0, len(fields)) + _spec.Node.Columns = append(_spec.Node.Columns, allowlist.FieldID) + for _, f := range fields { + if !allowlist.ValidColumn(f) { + return nil, &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)} + } + if f != allowlist.FieldID { + _spec.Node.Columns = append(_spec.Node.Columns, f) + } + } + } + if ps := aluo.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if value, ok := aluo.mutation.UpdatedAt(); ok { + _spec.SetField(allowlist.FieldUpdatedAt, field.TypeTime, value) + } + if value, ok := aluo.mutation.Name(); ok { + _spec.SetField(allowlist.FieldName, field.TypeString, value) + } + if value, ok := aluo.mutation.FromConsole(); ok { + _spec.SetField(allowlist.FieldFromConsole, field.TypeBool, value) + } + if value, ok := aluo.mutation.Description(); ok { + _spec.SetField(allowlist.FieldDescription, field.TypeString, value) + } + if aluo.mutation.DescriptionCleared() { + _spec.ClearField(allowlist.FieldDescription, field.TypeString) + } + if aluo.mutation.AllowlistIDCleared() { + _spec.ClearField(allowlist.FieldAllowlistID, field.TypeString) + } + if aluo.mutation.AllowlistItemsCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2M, + Inverse: false, + Table: allowlist.AllowlistItemsTable, + Columns: allowlist.AllowlistItemsPrimaryKey, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(allowlistitem.FieldID, field.TypeInt), + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := aluo.mutation.RemovedAllowlistItemsIDs(); len(nodes) > 0 && !aluo.mutation.AllowlistItemsCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2M, + Inverse: false, + Table: allowlist.AllowlistItemsTable, + Columns: allowlist.AllowlistItemsPrimaryKey, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(allowlistitem.FieldID, field.TypeInt), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := aluo.mutation.AllowlistItemsIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2M, + Inverse: false, + Table: allowlist.AllowlistItemsTable, + Columns: allowlist.AllowlistItemsPrimaryKey, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(allowlistitem.FieldID, field.TypeInt), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + _node = &AllowList{config: aluo.config} + _spec.Assign = _node.assignValues + _spec.ScanValues = _node.scanValues + if err = sqlgraph.UpdateNode(ctx, aluo.driver, _spec); err != nil { + if _, ok := err.(*sqlgraph.NotFoundError); ok { + err = &NotFoundError{allowlist.Label} + } else if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return nil, err + } + aluo.mutation.done = true + return _node, nil +} diff --git a/pkg/database/ent/allowlistitem.go b/pkg/database/ent/allowlistitem.go new file mode 100644 index 00000000000..2c0d997b1d7 --- /dev/null +++ b/pkg/database/ent/allowlistitem.go @@ -0,0 +1,231 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "fmt" + "strings" + "time" + + "entgo.io/ent" + "entgo.io/ent/dialect/sql" + "github.com/crowdsecurity/crowdsec/pkg/database/ent/allowlistitem" +) + +// AllowListItem is the model entity for the AllowListItem schema. +type AllowListItem struct { + config `json:"-"` + // ID of the ent. + ID int `json:"id,omitempty"` + // CreatedAt holds the value of the "created_at" field. + CreatedAt time.Time `json:"created_at,omitempty"` + // UpdatedAt holds the value of the "updated_at" field. + UpdatedAt time.Time `json:"updated_at,omitempty"` + // ExpiresAt holds the value of the "expires_at" field. + ExpiresAt time.Time `json:"expires_at,omitempty"` + // Comment holds the value of the "comment" field. + Comment string `json:"comment,omitempty"` + // Value holds the value of the "value" field. + Value string `json:"value,omitempty"` + // StartIP holds the value of the "start_ip" field. + StartIP int64 `json:"start_ip,omitempty"` + // EndIP holds the value of the "end_ip" field. + EndIP int64 `json:"end_ip,omitempty"` + // StartSuffix holds the value of the "start_suffix" field. + StartSuffix int64 `json:"start_suffix,omitempty"` + // EndSuffix holds the value of the "end_suffix" field. + EndSuffix int64 `json:"end_suffix,omitempty"` + // IPSize holds the value of the "ip_size" field. + IPSize int64 `json:"ip_size,omitempty"` + // Edges holds the relations/edges for other nodes in the graph. + // The values are being populated by the AllowListItemQuery when eager-loading is set. + Edges AllowListItemEdges `json:"edges"` + selectValues sql.SelectValues +} + +// AllowListItemEdges holds the relations/edges for other nodes in the graph. +type AllowListItemEdges struct { + // Allowlist holds the value of the allowlist edge. + Allowlist []*AllowList `json:"allowlist,omitempty"` + // loadedTypes holds the information for reporting if a + // type was loaded (or requested) in eager-loading or not. + loadedTypes [1]bool +} + +// AllowlistOrErr returns the Allowlist value or an error if the edge +// was not loaded in eager-loading. +func (e AllowListItemEdges) AllowlistOrErr() ([]*AllowList, error) { + if e.loadedTypes[0] { + return e.Allowlist, nil + } + return nil, &NotLoadedError{edge: "allowlist"} +} + +// scanValues returns the types for scanning values from sql.Rows. +func (*AllowListItem) scanValues(columns []string) ([]any, error) { + values := make([]any, len(columns)) + for i := range columns { + switch columns[i] { + case allowlistitem.FieldID, allowlistitem.FieldStartIP, allowlistitem.FieldEndIP, allowlistitem.FieldStartSuffix, allowlistitem.FieldEndSuffix, allowlistitem.FieldIPSize: + values[i] = new(sql.NullInt64) + case allowlistitem.FieldComment, allowlistitem.FieldValue: + values[i] = new(sql.NullString) + case allowlistitem.FieldCreatedAt, allowlistitem.FieldUpdatedAt, allowlistitem.FieldExpiresAt: + values[i] = new(sql.NullTime) + default: + values[i] = new(sql.UnknownType) + } + } + return values, nil +} + +// assignValues assigns the values that were returned from sql.Rows (after scanning) +// to the AllowListItem fields. +func (ali *AllowListItem) assignValues(columns []string, values []any) error { + if m, n := len(values), len(columns); m < n { + return fmt.Errorf("mismatch number of scan values: %d != %d", m, n) + } + for i := range columns { + switch columns[i] { + case allowlistitem.FieldID: + value, ok := values[i].(*sql.NullInt64) + if !ok { + return fmt.Errorf("unexpected type %T for field id", value) + } + ali.ID = int(value.Int64) + case allowlistitem.FieldCreatedAt: + if value, ok := values[i].(*sql.NullTime); !ok { + return fmt.Errorf("unexpected type %T for field created_at", values[i]) + } else if value.Valid { + ali.CreatedAt = value.Time + } + case allowlistitem.FieldUpdatedAt: + if value, ok := values[i].(*sql.NullTime); !ok { + return fmt.Errorf("unexpected type %T for field updated_at", values[i]) + } else if value.Valid { + ali.UpdatedAt = value.Time + } + case allowlistitem.FieldExpiresAt: + if value, ok := values[i].(*sql.NullTime); !ok { + return fmt.Errorf("unexpected type %T for field expires_at", values[i]) + } else if value.Valid { + ali.ExpiresAt = value.Time + } + case allowlistitem.FieldComment: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field comment", values[i]) + } else if value.Valid { + ali.Comment = value.String + } + case allowlistitem.FieldValue: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field value", values[i]) + } else if value.Valid { + ali.Value = value.String + } + case allowlistitem.FieldStartIP: + if value, ok := values[i].(*sql.NullInt64); !ok { + return fmt.Errorf("unexpected type %T for field start_ip", values[i]) + } else if value.Valid { + ali.StartIP = value.Int64 + } + case allowlistitem.FieldEndIP: + if value, ok := values[i].(*sql.NullInt64); !ok { + return fmt.Errorf("unexpected type %T for field end_ip", values[i]) + } else if value.Valid { + ali.EndIP = value.Int64 + } + case allowlistitem.FieldStartSuffix: + if value, ok := values[i].(*sql.NullInt64); !ok { + return fmt.Errorf("unexpected type %T for field start_suffix", values[i]) + } else if value.Valid { + ali.StartSuffix = value.Int64 + } + case allowlistitem.FieldEndSuffix: + if value, ok := values[i].(*sql.NullInt64); !ok { + return fmt.Errorf("unexpected type %T for field end_suffix", values[i]) + } else if value.Valid { + ali.EndSuffix = value.Int64 + } + case allowlistitem.FieldIPSize: + if value, ok := values[i].(*sql.NullInt64); !ok { + return fmt.Errorf("unexpected type %T for field ip_size", values[i]) + } else if value.Valid { + ali.IPSize = value.Int64 + } + default: + ali.selectValues.Set(columns[i], values[i]) + } + } + return nil +} + +// GetValue returns the ent.Value that was dynamically selected and assigned to the AllowListItem. +// This includes values selected through modifiers, order, etc. +func (ali *AllowListItem) GetValue(name string) (ent.Value, error) { + return ali.selectValues.Get(name) +} + +// QueryAllowlist queries the "allowlist" edge of the AllowListItem entity. +func (ali *AllowListItem) QueryAllowlist() *AllowListQuery { + return NewAllowListItemClient(ali.config).QueryAllowlist(ali) +} + +// Update returns a builder for updating this AllowListItem. +// Note that you need to call AllowListItem.Unwrap() before calling this method if this AllowListItem +// was returned from a transaction, and the transaction was committed or rolled back. +func (ali *AllowListItem) Update() *AllowListItemUpdateOne { + return NewAllowListItemClient(ali.config).UpdateOne(ali) +} + +// Unwrap unwraps the AllowListItem entity that was returned from a transaction after it was closed, +// so that all future queries will be executed through the driver which created the transaction. +func (ali *AllowListItem) Unwrap() *AllowListItem { + _tx, ok := ali.config.driver.(*txDriver) + if !ok { + panic("ent: AllowListItem is not a transactional entity") + } + ali.config.driver = _tx.drv + return ali +} + +// String implements the fmt.Stringer. +func (ali *AllowListItem) String() string { + var builder strings.Builder + builder.WriteString("AllowListItem(") + builder.WriteString(fmt.Sprintf("id=%v, ", ali.ID)) + builder.WriteString("created_at=") + builder.WriteString(ali.CreatedAt.Format(time.ANSIC)) + builder.WriteString(", ") + builder.WriteString("updated_at=") + builder.WriteString(ali.UpdatedAt.Format(time.ANSIC)) + builder.WriteString(", ") + builder.WriteString("expires_at=") + builder.WriteString(ali.ExpiresAt.Format(time.ANSIC)) + builder.WriteString(", ") + builder.WriteString("comment=") + builder.WriteString(ali.Comment) + builder.WriteString(", ") + builder.WriteString("value=") + builder.WriteString(ali.Value) + builder.WriteString(", ") + builder.WriteString("start_ip=") + builder.WriteString(fmt.Sprintf("%v", ali.StartIP)) + builder.WriteString(", ") + builder.WriteString("end_ip=") + builder.WriteString(fmt.Sprintf("%v", ali.EndIP)) + builder.WriteString(", ") + builder.WriteString("start_suffix=") + builder.WriteString(fmt.Sprintf("%v", ali.StartSuffix)) + builder.WriteString(", ") + builder.WriteString("end_suffix=") + builder.WriteString(fmt.Sprintf("%v", ali.EndSuffix)) + builder.WriteString(", ") + builder.WriteString("ip_size=") + builder.WriteString(fmt.Sprintf("%v", ali.IPSize)) + builder.WriteByte(')') + return builder.String() +} + +// AllowListItems is a parsable slice of AllowListItem. +type AllowListItems []*AllowListItem diff --git a/pkg/database/ent/allowlistitem/allowlistitem.go b/pkg/database/ent/allowlistitem/allowlistitem.go new file mode 100644 index 00000000000..5474763eac3 --- /dev/null +++ b/pkg/database/ent/allowlistitem/allowlistitem.go @@ -0,0 +1,165 @@ +// Code generated by ent, DO NOT EDIT. + +package allowlistitem + +import ( + "time" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" +) + +const ( + // Label holds the string label denoting the allowlistitem type in the database. + Label = "allow_list_item" + // FieldID holds the string denoting the id field in the database. + FieldID = "id" + // FieldCreatedAt holds the string denoting the created_at field in the database. + FieldCreatedAt = "created_at" + // FieldUpdatedAt holds the string denoting the updated_at field in the database. + FieldUpdatedAt = "updated_at" + // FieldExpiresAt holds the string denoting the expires_at field in the database. + FieldExpiresAt = "expires_at" + // FieldComment holds the string denoting the comment field in the database. + FieldComment = "comment" + // FieldValue holds the string denoting the value field in the database. + FieldValue = "value" + // FieldStartIP holds the string denoting the start_ip field in the database. + FieldStartIP = "start_ip" + // FieldEndIP holds the string denoting the end_ip field in the database. + FieldEndIP = "end_ip" + // FieldStartSuffix holds the string denoting the start_suffix field in the database. + FieldStartSuffix = "start_suffix" + // FieldEndSuffix holds the string denoting the end_suffix field in the database. + FieldEndSuffix = "end_suffix" + // FieldIPSize holds the string denoting the ip_size field in the database. + FieldIPSize = "ip_size" + // EdgeAllowlist holds the string denoting the allowlist edge name in mutations. + EdgeAllowlist = "allowlist" + // Table holds the table name of the allowlistitem in the database. + Table = "allow_list_items" + // AllowlistTable is the table that holds the allowlist relation/edge. The primary key declared below. + AllowlistTable = "allow_list_allowlist_items" + // AllowlistInverseTable is the table name for the AllowList entity. + // It exists in this package in order to avoid circular dependency with the "allowlist" package. + AllowlistInverseTable = "allow_lists" +) + +// Columns holds all SQL columns for allowlistitem fields. +var Columns = []string{ + FieldID, + FieldCreatedAt, + FieldUpdatedAt, + FieldExpiresAt, + FieldComment, + FieldValue, + FieldStartIP, + FieldEndIP, + FieldStartSuffix, + FieldEndSuffix, + FieldIPSize, +} + +var ( + // AllowlistPrimaryKey and AllowlistColumn2 are the table columns denoting the + // primary key for the allowlist relation (M2M). + AllowlistPrimaryKey = []string{"allow_list_id", "allow_list_item_id"} +) + +// ValidColumn reports if the column name is valid (part of the table columns). +func ValidColumn(column string) bool { + for i := range Columns { + if column == Columns[i] { + return true + } + } + return false +} + +var ( + // DefaultCreatedAt holds the default value on creation for the "created_at" field. + DefaultCreatedAt func() time.Time + // DefaultUpdatedAt holds the default value on creation for the "updated_at" field. + DefaultUpdatedAt func() time.Time + // UpdateDefaultUpdatedAt holds the default value on update for the "updated_at" field. + UpdateDefaultUpdatedAt func() time.Time +) + +// OrderOption defines the ordering options for the AllowListItem queries. +type OrderOption func(*sql.Selector) + +// ByID orders the results by the id field. +func ByID(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldID, opts...).ToFunc() +} + +// ByCreatedAt orders the results by the created_at field. +func ByCreatedAt(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldCreatedAt, opts...).ToFunc() +} + +// ByUpdatedAt orders the results by the updated_at field. +func ByUpdatedAt(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldUpdatedAt, opts...).ToFunc() +} + +// ByExpiresAt orders the results by the expires_at field. +func ByExpiresAt(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldExpiresAt, opts...).ToFunc() +} + +// ByComment orders the results by the comment field. +func ByComment(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldComment, opts...).ToFunc() +} + +// ByValue orders the results by the value field. +func ByValue(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldValue, opts...).ToFunc() +} + +// ByStartIP orders the results by the start_ip field. +func ByStartIP(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldStartIP, opts...).ToFunc() +} + +// ByEndIP orders the results by the end_ip field. +func ByEndIP(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldEndIP, opts...).ToFunc() +} + +// ByStartSuffix orders the results by the start_suffix field. +func ByStartSuffix(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldStartSuffix, opts...).ToFunc() +} + +// ByEndSuffix orders the results by the end_suffix field. +func ByEndSuffix(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldEndSuffix, opts...).ToFunc() +} + +// ByIPSize orders the results by the ip_size field. +func ByIPSize(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldIPSize, opts...).ToFunc() +} + +// ByAllowlistCount orders the results by allowlist count. +func ByAllowlistCount(opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborsCount(s, newAllowlistStep(), opts...) + } +} + +// ByAllowlist orders the results by allowlist terms. +func ByAllowlist(term sql.OrderTerm, terms ...sql.OrderTerm) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newAllowlistStep(), append([]sql.OrderTerm{term}, terms...)...) + } +} +func newAllowlistStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(AllowlistInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.M2M, true, AllowlistTable, AllowlistPrimaryKey...), + ) +} diff --git a/pkg/database/ent/allowlistitem/where.go b/pkg/database/ent/allowlistitem/where.go new file mode 100644 index 00000000000..32a10d77c22 --- /dev/null +++ b/pkg/database/ent/allowlistitem/where.go @@ -0,0 +1,664 @@ +// Code generated by ent, DO NOT EDIT. + +package allowlistitem + +import ( + "time" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "github.com/crowdsecurity/crowdsec/pkg/database/ent/predicate" +) + +// ID filters vertices based on their ID field. +func ID(id int) predicate.AllowListItem { + return predicate.AllowListItem(sql.FieldEQ(FieldID, id)) +} + +// IDEQ applies the EQ predicate on the ID field. +func IDEQ(id int) predicate.AllowListItem { + return predicate.AllowListItem(sql.FieldEQ(FieldID, id)) +} + +// IDNEQ applies the NEQ predicate on the ID field. +func IDNEQ(id int) predicate.AllowListItem { + return predicate.AllowListItem(sql.FieldNEQ(FieldID, id)) +} + +// IDIn applies the In predicate on the ID field. +func IDIn(ids ...int) predicate.AllowListItem { + return predicate.AllowListItem(sql.FieldIn(FieldID, ids...)) +} + +// IDNotIn applies the NotIn predicate on the ID field. +func IDNotIn(ids ...int) predicate.AllowListItem { + return predicate.AllowListItem(sql.FieldNotIn(FieldID, ids...)) +} + +// IDGT applies the GT predicate on the ID field. +func IDGT(id int) predicate.AllowListItem { + return predicate.AllowListItem(sql.FieldGT(FieldID, id)) +} + +// IDGTE applies the GTE predicate on the ID field. +func IDGTE(id int) predicate.AllowListItem { + return predicate.AllowListItem(sql.FieldGTE(FieldID, id)) +} + +// IDLT applies the LT predicate on the ID field. +func IDLT(id int) predicate.AllowListItem { + return predicate.AllowListItem(sql.FieldLT(FieldID, id)) +} + +// IDLTE applies the LTE predicate on the ID field. +func IDLTE(id int) predicate.AllowListItem { + return predicate.AllowListItem(sql.FieldLTE(FieldID, id)) +} + +// CreatedAt applies equality check predicate on the "created_at" field. It's identical to CreatedAtEQ. +func CreatedAt(v time.Time) predicate.AllowListItem { + return predicate.AllowListItem(sql.FieldEQ(FieldCreatedAt, v)) +} + +// UpdatedAt applies equality check predicate on the "updated_at" field. It's identical to UpdatedAtEQ. +func UpdatedAt(v time.Time) predicate.AllowListItem { + return predicate.AllowListItem(sql.FieldEQ(FieldUpdatedAt, v)) +} + +// ExpiresAt applies equality check predicate on the "expires_at" field. It's identical to ExpiresAtEQ. +func ExpiresAt(v time.Time) predicate.AllowListItem { + return predicate.AllowListItem(sql.FieldEQ(FieldExpiresAt, v)) +} + +// Comment applies equality check predicate on the "comment" field. It's identical to CommentEQ. +func Comment(v string) predicate.AllowListItem { + return predicate.AllowListItem(sql.FieldEQ(FieldComment, v)) +} + +// Value applies equality check predicate on the "value" field. It's identical to ValueEQ. +func Value(v string) predicate.AllowListItem { + return predicate.AllowListItem(sql.FieldEQ(FieldValue, v)) +} + +// StartIP applies equality check predicate on the "start_ip" field. It's identical to StartIPEQ. +func StartIP(v int64) predicate.AllowListItem { + return predicate.AllowListItem(sql.FieldEQ(FieldStartIP, v)) +} + +// EndIP applies equality check predicate on the "end_ip" field. It's identical to EndIPEQ. +func EndIP(v int64) predicate.AllowListItem { + return predicate.AllowListItem(sql.FieldEQ(FieldEndIP, v)) +} + +// StartSuffix applies equality check predicate on the "start_suffix" field. It's identical to StartSuffixEQ. +func StartSuffix(v int64) predicate.AllowListItem { + return predicate.AllowListItem(sql.FieldEQ(FieldStartSuffix, v)) +} + +// EndSuffix applies equality check predicate on the "end_suffix" field. It's identical to EndSuffixEQ. +func EndSuffix(v int64) predicate.AllowListItem { + return predicate.AllowListItem(sql.FieldEQ(FieldEndSuffix, v)) +} + +// IPSize applies equality check predicate on the "ip_size" field. It's identical to IPSizeEQ. +func IPSize(v int64) predicate.AllowListItem { + return predicate.AllowListItem(sql.FieldEQ(FieldIPSize, v)) +} + +// CreatedAtEQ applies the EQ predicate on the "created_at" field. +func CreatedAtEQ(v time.Time) predicate.AllowListItem { + return predicate.AllowListItem(sql.FieldEQ(FieldCreatedAt, v)) +} + +// CreatedAtNEQ applies the NEQ predicate on the "created_at" field. +func CreatedAtNEQ(v time.Time) predicate.AllowListItem { + return predicate.AllowListItem(sql.FieldNEQ(FieldCreatedAt, v)) +} + +// CreatedAtIn applies the In predicate on the "created_at" field. +func CreatedAtIn(vs ...time.Time) predicate.AllowListItem { + return predicate.AllowListItem(sql.FieldIn(FieldCreatedAt, vs...)) +} + +// CreatedAtNotIn applies the NotIn predicate on the "created_at" field. +func CreatedAtNotIn(vs ...time.Time) predicate.AllowListItem { + return predicate.AllowListItem(sql.FieldNotIn(FieldCreatedAt, vs...)) +} + +// CreatedAtGT applies the GT predicate on the "created_at" field. +func CreatedAtGT(v time.Time) predicate.AllowListItem { + return predicate.AllowListItem(sql.FieldGT(FieldCreatedAt, v)) +} + +// CreatedAtGTE applies the GTE predicate on the "created_at" field. +func CreatedAtGTE(v time.Time) predicate.AllowListItem { + return predicate.AllowListItem(sql.FieldGTE(FieldCreatedAt, v)) +} + +// CreatedAtLT applies the LT predicate on the "created_at" field. +func CreatedAtLT(v time.Time) predicate.AllowListItem { + return predicate.AllowListItem(sql.FieldLT(FieldCreatedAt, v)) +} + +// CreatedAtLTE applies the LTE predicate on the "created_at" field. +func CreatedAtLTE(v time.Time) predicate.AllowListItem { + return predicate.AllowListItem(sql.FieldLTE(FieldCreatedAt, v)) +} + +// UpdatedAtEQ applies the EQ predicate on the "updated_at" field. +func UpdatedAtEQ(v time.Time) predicate.AllowListItem { + return predicate.AllowListItem(sql.FieldEQ(FieldUpdatedAt, v)) +} + +// UpdatedAtNEQ applies the NEQ predicate on the "updated_at" field. +func UpdatedAtNEQ(v time.Time) predicate.AllowListItem { + return predicate.AllowListItem(sql.FieldNEQ(FieldUpdatedAt, v)) +} + +// UpdatedAtIn applies the In predicate on the "updated_at" field. +func UpdatedAtIn(vs ...time.Time) predicate.AllowListItem { + return predicate.AllowListItem(sql.FieldIn(FieldUpdatedAt, vs...)) +} + +// UpdatedAtNotIn applies the NotIn predicate on the "updated_at" field. +func UpdatedAtNotIn(vs ...time.Time) predicate.AllowListItem { + return predicate.AllowListItem(sql.FieldNotIn(FieldUpdatedAt, vs...)) +} + +// UpdatedAtGT applies the GT predicate on the "updated_at" field. +func UpdatedAtGT(v time.Time) predicate.AllowListItem { + return predicate.AllowListItem(sql.FieldGT(FieldUpdatedAt, v)) +} + +// UpdatedAtGTE applies the GTE predicate on the "updated_at" field. +func UpdatedAtGTE(v time.Time) predicate.AllowListItem { + return predicate.AllowListItem(sql.FieldGTE(FieldUpdatedAt, v)) +} + +// UpdatedAtLT applies the LT predicate on the "updated_at" field. +func UpdatedAtLT(v time.Time) predicate.AllowListItem { + return predicate.AllowListItem(sql.FieldLT(FieldUpdatedAt, v)) +} + +// UpdatedAtLTE applies the LTE predicate on the "updated_at" field. +func UpdatedAtLTE(v time.Time) predicate.AllowListItem { + return predicate.AllowListItem(sql.FieldLTE(FieldUpdatedAt, v)) +} + +// ExpiresAtEQ applies the EQ predicate on the "expires_at" field. +func ExpiresAtEQ(v time.Time) predicate.AllowListItem { + return predicate.AllowListItem(sql.FieldEQ(FieldExpiresAt, v)) +} + +// ExpiresAtNEQ applies the NEQ predicate on the "expires_at" field. +func ExpiresAtNEQ(v time.Time) predicate.AllowListItem { + return predicate.AllowListItem(sql.FieldNEQ(FieldExpiresAt, v)) +} + +// ExpiresAtIn applies the In predicate on the "expires_at" field. +func ExpiresAtIn(vs ...time.Time) predicate.AllowListItem { + return predicate.AllowListItem(sql.FieldIn(FieldExpiresAt, vs...)) +} + +// ExpiresAtNotIn applies the NotIn predicate on the "expires_at" field. +func ExpiresAtNotIn(vs ...time.Time) predicate.AllowListItem { + return predicate.AllowListItem(sql.FieldNotIn(FieldExpiresAt, vs...)) +} + +// ExpiresAtGT applies the GT predicate on the "expires_at" field. +func ExpiresAtGT(v time.Time) predicate.AllowListItem { + return predicate.AllowListItem(sql.FieldGT(FieldExpiresAt, v)) +} + +// ExpiresAtGTE applies the GTE predicate on the "expires_at" field. +func ExpiresAtGTE(v time.Time) predicate.AllowListItem { + return predicate.AllowListItem(sql.FieldGTE(FieldExpiresAt, v)) +} + +// ExpiresAtLT applies the LT predicate on the "expires_at" field. +func ExpiresAtLT(v time.Time) predicate.AllowListItem { + return predicate.AllowListItem(sql.FieldLT(FieldExpiresAt, v)) +} + +// ExpiresAtLTE applies the LTE predicate on the "expires_at" field. +func ExpiresAtLTE(v time.Time) predicate.AllowListItem { + return predicate.AllowListItem(sql.FieldLTE(FieldExpiresAt, v)) +} + +// ExpiresAtIsNil applies the IsNil predicate on the "expires_at" field. +func ExpiresAtIsNil() predicate.AllowListItem { + return predicate.AllowListItem(sql.FieldIsNull(FieldExpiresAt)) +} + +// ExpiresAtNotNil applies the NotNil predicate on the "expires_at" field. +func ExpiresAtNotNil() predicate.AllowListItem { + return predicate.AllowListItem(sql.FieldNotNull(FieldExpiresAt)) +} + +// CommentEQ applies the EQ predicate on the "comment" field. +func CommentEQ(v string) predicate.AllowListItem { + return predicate.AllowListItem(sql.FieldEQ(FieldComment, v)) +} + +// CommentNEQ applies the NEQ predicate on the "comment" field. +func CommentNEQ(v string) predicate.AllowListItem { + return predicate.AllowListItem(sql.FieldNEQ(FieldComment, v)) +} + +// CommentIn applies the In predicate on the "comment" field. +func CommentIn(vs ...string) predicate.AllowListItem { + return predicate.AllowListItem(sql.FieldIn(FieldComment, vs...)) +} + +// CommentNotIn applies the NotIn predicate on the "comment" field. +func CommentNotIn(vs ...string) predicate.AllowListItem { + return predicate.AllowListItem(sql.FieldNotIn(FieldComment, vs...)) +} + +// CommentGT applies the GT predicate on the "comment" field. +func CommentGT(v string) predicate.AllowListItem { + return predicate.AllowListItem(sql.FieldGT(FieldComment, v)) +} + +// CommentGTE applies the GTE predicate on the "comment" field. +func CommentGTE(v string) predicate.AllowListItem { + return predicate.AllowListItem(sql.FieldGTE(FieldComment, v)) +} + +// CommentLT applies the LT predicate on the "comment" field. +func CommentLT(v string) predicate.AllowListItem { + return predicate.AllowListItem(sql.FieldLT(FieldComment, v)) +} + +// CommentLTE applies the LTE predicate on the "comment" field. +func CommentLTE(v string) predicate.AllowListItem { + return predicate.AllowListItem(sql.FieldLTE(FieldComment, v)) +} + +// CommentContains applies the Contains predicate on the "comment" field. +func CommentContains(v string) predicate.AllowListItem { + return predicate.AllowListItem(sql.FieldContains(FieldComment, v)) +} + +// CommentHasPrefix applies the HasPrefix predicate on the "comment" field. +func CommentHasPrefix(v string) predicate.AllowListItem { + return predicate.AllowListItem(sql.FieldHasPrefix(FieldComment, v)) +} + +// CommentHasSuffix applies the HasSuffix predicate on the "comment" field. +func CommentHasSuffix(v string) predicate.AllowListItem { + return predicate.AllowListItem(sql.FieldHasSuffix(FieldComment, v)) +} + +// CommentIsNil applies the IsNil predicate on the "comment" field. +func CommentIsNil() predicate.AllowListItem { + return predicate.AllowListItem(sql.FieldIsNull(FieldComment)) +} + +// CommentNotNil applies the NotNil predicate on the "comment" field. +func CommentNotNil() predicate.AllowListItem { + return predicate.AllowListItem(sql.FieldNotNull(FieldComment)) +} + +// CommentEqualFold applies the EqualFold predicate on the "comment" field. +func CommentEqualFold(v string) predicate.AllowListItem { + return predicate.AllowListItem(sql.FieldEqualFold(FieldComment, v)) +} + +// CommentContainsFold applies the ContainsFold predicate on the "comment" field. +func CommentContainsFold(v string) predicate.AllowListItem { + return predicate.AllowListItem(sql.FieldContainsFold(FieldComment, v)) +} + +// ValueEQ applies the EQ predicate on the "value" field. +func ValueEQ(v string) predicate.AllowListItem { + return predicate.AllowListItem(sql.FieldEQ(FieldValue, v)) +} + +// ValueNEQ applies the NEQ predicate on the "value" field. +func ValueNEQ(v string) predicate.AllowListItem { + return predicate.AllowListItem(sql.FieldNEQ(FieldValue, v)) +} + +// ValueIn applies the In predicate on the "value" field. +func ValueIn(vs ...string) predicate.AllowListItem { + return predicate.AllowListItem(sql.FieldIn(FieldValue, vs...)) +} + +// ValueNotIn applies the NotIn predicate on the "value" field. +func ValueNotIn(vs ...string) predicate.AllowListItem { + return predicate.AllowListItem(sql.FieldNotIn(FieldValue, vs...)) +} + +// ValueGT applies the GT predicate on the "value" field. +func ValueGT(v string) predicate.AllowListItem { + return predicate.AllowListItem(sql.FieldGT(FieldValue, v)) +} + +// ValueGTE applies the GTE predicate on the "value" field. +func ValueGTE(v string) predicate.AllowListItem { + return predicate.AllowListItem(sql.FieldGTE(FieldValue, v)) +} + +// ValueLT applies the LT predicate on the "value" field. +func ValueLT(v string) predicate.AllowListItem { + return predicate.AllowListItem(sql.FieldLT(FieldValue, v)) +} + +// ValueLTE applies the LTE predicate on the "value" field. +func ValueLTE(v string) predicate.AllowListItem { + return predicate.AllowListItem(sql.FieldLTE(FieldValue, v)) +} + +// ValueContains applies the Contains predicate on the "value" field. +func ValueContains(v string) predicate.AllowListItem { + return predicate.AllowListItem(sql.FieldContains(FieldValue, v)) +} + +// ValueHasPrefix applies the HasPrefix predicate on the "value" field. +func ValueHasPrefix(v string) predicate.AllowListItem { + return predicate.AllowListItem(sql.FieldHasPrefix(FieldValue, v)) +} + +// ValueHasSuffix applies the HasSuffix predicate on the "value" field. +func ValueHasSuffix(v string) predicate.AllowListItem { + return predicate.AllowListItem(sql.FieldHasSuffix(FieldValue, v)) +} + +// ValueEqualFold applies the EqualFold predicate on the "value" field. +func ValueEqualFold(v string) predicate.AllowListItem { + return predicate.AllowListItem(sql.FieldEqualFold(FieldValue, v)) +} + +// ValueContainsFold applies the ContainsFold predicate on the "value" field. +func ValueContainsFold(v string) predicate.AllowListItem { + return predicate.AllowListItem(sql.FieldContainsFold(FieldValue, v)) +} + +// StartIPEQ applies the EQ predicate on the "start_ip" field. +func StartIPEQ(v int64) predicate.AllowListItem { + return predicate.AllowListItem(sql.FieldEQ(FieldStartIP, v)) +} + +// StartIPNEQ applies the NEQ predicate on the "start_ip" field. +func StartIPNEQ(v int64) predicate.AllowListItem { + return predicate.AllowListItem(sql.FieldNEQ(FieldStartIP, v)) +} + +// StartIPIn applies the In predicate on the "start_ip" field. +func StartIPIn(vs ...int64) predicate.AllowListItem { + return predicate.AllowListItem(sql.FieldIn(FieldStartIP, vs...)) +} + +// StartIPNotIn applies the NotIn predicate on the "start_ip" field. +func StartIPNotIn(vs ...int64) predicate.AllowListItem { + return predicate.AllowListItem(sql.FieldNotIn(FieldStartIP, vs...)) +} + +// StartIPGT applies the GT predicate on the "start_ip" field. +func StartIPGT(v int64) predicate.AllowListItem { + return predicate.AllowListItem(sql.FieldGT(FieldStartIP, v)) +} + +// StartIPGTE applies the GTE predicate on the "start_ip" field. +func StartIPGTE(v int64) predicate.AllowListItem { + return predicate.AllowListItem(sql.FieldGTE(FieldStartIP, v)) +} + +// StartIPLT applies the LT predicate on the "start_ip" field. +func StartIPLT(v int64) predicate.AllowListItem { + return predicate.AllowListItem(sql.FieldLT(FieldStartIP, v)) +} + +// StartIPLTE applies the LTE predicate on the "start_ip" field. +func StartIPLTE(v int64) predicate.AllowListItem { + return predicate.AllowListItem(sql.FieldLTE(FieldStartIP, v)) +} + +// StartIPIsNil applies the IsNil predicate on the "start_ip" field. +func StartIPIsNil() predicate.AllowListItem { + return predicate.AllowListItem(sql.FieldIsNull(FieldStartIP)) +} + +// StartIPNotNil applies the NotNil predicate on the "start_ip" field. +func StartIPNotNil() predicate.AllowListItem { + return predicate.AllowListItem(sql.FieldNotNull(FieldStartIP)) +} + +// EndIPEQ applies the EQ predicate on the "end_ip" field. +func EndIPEQ(v int64) predicate.AllowListItem { + return predicate.AllowListItem(sql.FieldEQ(FieldEndIP, v)) +} + +// EndIPNEQ applies the NEQ predicate on the "end_ip" field. +func EndIPNEQ(v int64) predicate.AllowListItem { + return predicate.AllowListItem(sql.FieldNEQ(FieldEndIP, v)) +} + +// EndIPIn applies the In predicate on the "end_ip" field. +func EndIPIn(vs ...int64) predicate.AllowListItem { + return predicate.AllowListItem(sql.FieldIn(FieldEndIP, vs...)) +} + +// EndIPNotIn applies the NotIn predicate on the "end_ip" field. +func EndIPNotIn(vs ...int64) predicate.AllowListItem { + return predicate.AllowListItem(sql.FieldNotIn(FieldEndIP, vs...)) +} + +// EndIPGT applies the GT predicate on the "end_ip" field. +func EndIPGT(v int64) predicate.AllowListItem { + return predicate.AllowListItem(sql.FieldGT(FieldEndIP, v)) +} + +// EndIPGTE applies the GTE predicate on the "end_ip" field. +func EndIPGTE(v int64) predicate.AllowListItem { + return predicate.AllowListItem(sql.FieldGTE(FieldEndIP, v)) +} + +// EndIPLT applies the LT predicate on the "end_ip" field. +func EndIPLT(v int64) predicate.AllowListItem { + return predicate.AllowListItem(sql.FieldLT(FieldEndIP, v)) +} + +// EndIPLTE applies the LTE predicate on the "end_ip" field. +func EndIPLTE(v int64) predicate.AllowListItem { + return predicate.AllowListItem(sql.FieldLTE(FieldEndIP, v)) +} + +// EndIPIsNil applies the IsNil predicate on the "end_ip" field. +func EndIPIsNil() predicate.AllowListItem { + return predicate.AllowListItem(sql.FieldIsNull(FieldEndIP)) +} + +// EndIPNotNil applies the NotNil predicate on the "end_ip" field. +func EndIPNotNil() predicate.AllowListItem { + return predicate.AllowListItem(sql.FieldNotNull(FieldEndIP)) +} + +// StartSuffixEQ applies the EQ predicate on the "start_suffix" field. +func StartSuffixEQ(v int64) predicate.AllowListItem { + return predicate.AllowListItem(sql.FieldEQ(FieldStartSuffix, v)) +} + +// StartSuffixNEQ applies the NEQ predicate on the "start_suffix" field. +func StartSuffixNEQ(v int64) predicate.AllowListItem { + return predicate.AllowListItem(sql.FieldNEQ(FieldStartSuffix, v)) +} + +// StartSuffixIn applies the In predicate on the "start_suffix" field. +func StartSuffixIn(vs ...int64) predicate.AllowListItem { + return predicate.AllowListItem(sql.FieldIn(FieldStartSuffix, vs...)) +} + +// StartSuffixNotIn applies the NotIn predicate on the "start_suffix" field. +func StartSuffixNotIn(vs ...int64) predicate.AllowListItem { + return predicate.AllowListItem(sql.FieldNotIn(FieldStartSuffix, vs...)) +} + +// StartSuffixGT applies the GT predicate on the "start_suffix" field. +func StartSuffixGT(v int64) predicate.AllowListItem { + return predicate.AllowListItem(sql.FieldGT(FieldStartSuffix, v)) +} + +// StartSuffixGTE applies the GTE predicate on the "start_suffix" field. +func StartSuffixGTE(v int64) predicate.AllowListItem { + return predicate.AllowListItem(sql.FieldGTE(FieldStartSuffix, v)) +} + +// StartSuffixLT applies the LT predicate on the "start_suffix" field. +func StartSuffixLT(v int64) predicate.AllowListItem { + return predicate.AllowListItem(sql.FieldLT(FieldStartSuffix, v)) +} + +// StartSuffixLTE applies the LTE predicate on the "start_suffix" field. +func StartSuffixLTE(v int64) predicate.AllowListItem { + return predicate.AllowListItem(sql.FieldLTE(FieldStartSuffix, v)) +} + +// StartSuffixIsNil applies the IsNil predicate on the "start_suffix" field. +func StartSuffixIsNil() predicate.AllowListItem { + return predicate.AllowListItem(sql.FieldIsNull(FieldStartSuffix)) +} + +// StartSuffixNotNil applies the NotNil predicate on the "start_suffix" field. +func StartSuffixNotNil() predicate.AllowListItem { + return predicate.AllowListItem(sql.FieldNotNull(FieldStartSuffix)) +} + +// EndSuffixEQ applies the EQ predicate on the "end_suffix" field. +func EndSuffixEQ(v int64) predicate.AllowListItem { + return predicate.AllowListItem(sql.FieldEQ(FieldEndSuffix, v)) +} + +// EndSuffixNEQ applies the NEQ predicate on the "end_suffix" field. +func EndSuffixNEQ(v int64) predicate.AllowListItem { + return predicate.AllowListItem(sql.FieldNEQ(FieldEndSuffix, v)) +} + +// EndSuffixIn applies the In predicate on the "end_suffix" field. +func EndSuffixIn(vs ...int64) predicate.AllowListItem { + return predicate.AllowListItem(sql.FieldIn(FieldEndSuffix, vs...)) +} + +// EndSuffixNotIn applies the NotIn predicate on the "end_suffix" field. +func EndSuffixNotIn(vs ...int64) predicate.AllowListItem { + return predicate.AllowListItem(sql.FieldNotIn(FieldEndSuffix, vs...)) +} + +// EndSuffixGT applies the GT predicate on the "end_suffix" field. +func EndSuffixGT(v int64) predicate.AllowListItem { + return predicate.AllowListItem(sql.FieldGT(FieldEndSuffix, v)) +} + +// EndSuffixGTE applies the GTE predicate on the "end_suffix" field. +func EndSuffixGTE(v int64) predicate.AllowListItem { + return predicate.AllowListItem(sql.FieldGTE(FieldEndSuffix, v)) +} + +// EndSuffixLT applies the LT predicate on the "end_suffix" field. +func EndSuffixLT(v int64) predicate.AllowListItem { + return predicate.AllowListItem(sql.FieldLT(FieldEndSuffix, v)) +} + +// EndSuffixLTE applies the LTE predicate on the "end_suffix" field. +func EndSuffixLTE(v int64) predicate.AllowListItem { + return predicate.AllowListItem(sql.FieldLTE(FieldEndSuffix, v)) +} + +// EndSuffixIsNil applies the IsNil predicate on the "end_suffix" field. +func EndSuffixIsNil() predicate.AllowListItem { + return predicate.AllowListItem(sql.FieldIsNull(FieldEndSuffix)) +} + +// EndSuffixNotNil applies the NotNil predicate on the "end_suffix" field. +func EndSuffixNotNil() predicate.AllowListItem { + return predicate.AllowListItem(sql.FieldNotNull(FieldEndSuffix)) +} + +// IPSizeEQ applies the EQ predicate on the "ip_size" field. +func IPSizeEQ(v int64) predicate.AllowListItem { + return predicate.AllowListItem(sql.FieldEQ(FieldIPSize, v)) +} + +// IPSizeNEQ applies the NEQ predicate on the "ip_size" field. +func IPSizeNEQ(v int64) predicate.AllowListItem { + return predicate.AllowListItem(sql.FieldNEQ(FieldIPSize, v)) +} + +// IPSizeIn applies the In predicate on the "ip_size" field. +func IPSizeIn(vs ...int64) predicate.AllowListItem { + return predicate.AllowListItem(sql.FieldIn(FieldIPSize, vs...)) +} + +// IPSizeNotIn applies the NotIn predicate on the "ip_size" field. +func IPSizeNotIn(vs ...int64) predicate.AllowListItem { + return predicate.AllowListItem(sql.FieldNotIn(FieldIPSize, vs...)) +} + +// IPSizeGT applies the GT predicate on the "ip_size" field. +func IPSizeGT(v int64) predicate.AllowListItem { + return predicate.AllowListItem(sql.FieldGT(FieldIPSize, v)) +} + +// IPSizeGTE applies the GTE predicate on the "ip_size" field. +func IPSizeGTE(v int64) predicate.AllowListItem { + return predicate.AllowListItem(sql.FieldGTE(FieldIPSize, v)) +} + +// IPSizeLT applies the LT predicate on the "ip_size" field. +func IPSizeLT(v int64) predicate.AllowListItem { + return predicate.AllowListItem(sql.FieldLT(FieldIPSize, v)) +} + +// IPSizeLTE applies the LTE predicate on the "ip_size" field. +func IPSizeLTE(v int64) predicate.AllowListItem { + return predicate.AllowListItem(sql.FieldLTE(FieldIPSize, v)) +} + +// IPSizeIsNil applies the IsNil predicate on the "ip_size" field. +func IPSizeIsNil() predicate.AllowListItem { + return predicate.AllowListItem(sql.FieldIsNull(FieldIPSize)) +} + +// IPSizeNotNil applies the NotNil predicate on the "ip_size" field. +func IPSizeNotNil() predicate.AllowListItem { + return predicate.AllowListItem(sql.FieldNotNull(FieldIPSize)) +} + +// HasAllowlist applies the HasEdge predicate on the "allowlist" edge. +func HasAllowlist() predicate.AllowListItem { + return predicate.AllowListItem(func(s *sql.Selector) { + step := sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.Edge(sqlgraph.M2M, true, AllowlistTable, AllowlistPrimaryKey...), + ) + sqlgraph.HasNeighbors(s, step) + }) +} + +// HasAllowlistWith applies the HasEdge predicate on the "allowlist" edge with a given conditions (other predicates). +func HasAllowlistWith(preds ...predicate.AllowList) predicate.AllowListItem { + return predicate.AllowListItem(func(s *sql.Selector) { + step := newAllowlistStep() + sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { + for _, p := range preds { + p(s) + } + }) + }) +} + +// And groups predicates with the AND operator between them. +func And(predicates ...predicate.AllowListItem) predicate.AllowListItem { + return predicate.AllowListItem(sql.AndPredicates(predicates...)) +} + +// Or groups predicates with the OR operator between them. +func Or(predicates ...predicate.AllowListItem) predicate.AllowListItem { + return predicate.AllowListItem(sql.OrPredicates(predicates...)) +} + +// Not applies the not operator on the given predicate. +func Not(p predicate.AllowListItem) predicate.AllowListItem { + return predicate.AllowListItem(sql.NotPredicates(p)) +} diff --git a/pkg/database/ent/allowlistitem_create.go b/pkg/database/ent/allowlistitem_create.go new file mode 100644 index 00000000000..502cec11db7 --- /dev/null +++ b/pkg/database/ent/allowlistitem_create.go @@ -0,0 +1,398 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "errors" + "fmt" + "time" + + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/crowdsecurity/crowdsec/pkg/database/ent/allowlist" + "github.com/crowdsecurity/crowdsec/pkg/database/ent/allowlistitem" +) + +// AllowListItemCreate is the builder for creating a AllowListItem entity. +type AllowListItemCreate struct { + config + mutation *AllowListItemMutation + hooks []Hook +} + +// SetCreatedAt sets the "created_at" field. +func (alic *AllowListItemCreate) SetCreatedAt(t time.Time) *AllowListItemCreate { + alic.mutation.SetCreatedAt(t) + return alic +} + +// SetNillableCreatedAt sets the "created_at" field if the given value is not nil. +func (alic *AllowListItemCreate) SetNillableCreatedAt(t *time.Time) *AllowListItemCreate { + if t != nil { + alic.SetCreatedAt(*t) + } + return alic +} + +// SetUpdatedAt sets the "updated_at" field. +func (alic *AllowListItemCreate) SetUpdatedAt(t time.Time) *AllowListItemCreate { + alic.mutation.SetUpdatedAt(t) + return alic +} + +// SetNillableUpdatedAt sets the "updated_at" field if the given value is not nil. +func (alic *AllowListItemCreate) SetNillableUpdatedAt(t *time.Time) *AllowListItemCreate { + if t != nil { + alic.SetUpdatedAt(*t) + } + return alic +} + +// SetExpiresAt sets the "expires_at" field. +func (alic *AllowListItemCreate) SetExpiresAt(t time.Time) *AllowListItemCreate { + alic.mutation.SetExpiresAt(t) + return alic +} + +// SetNillableExpiresAt sets the "expires_at" field if the given value is not nil. +func (alic *AllowListItemCreate) SetNillableExpiresAt(t *time.Time) *AllowListItemCreate { + if t != nil { + alic.SetExpiresAt(*t) + } + return alic +} + +// SetComment sets the "comment" field. +func (alic *AllowListItemCreate) SetComment(s string) *AllowListItemCreate { + alic.mutation.SetComment(s) + return alic +} + +// SetNillableComment sets the "comment" field if the given value is not nil. +func (alic *AllowListItemCreate) SetNillableComment(s *string) *AllowListItemCreate { + if s != nil { + alic.SetComment(*s) + } + return alic +} + +// SetValue sets the "value" field. +func (alic *AllowListItemCreate) SetValue(s string) *AllowListItemCreate { + alic.mutation.SetValue(s) + return alic +} + +// SetStartIP sets the "start_ip" field. +func (alic *AllowListItemCreate) SetStartIP(i int64) *AllowListItemCreate { + alic.mutation.SetStartIP(i) + return alic +} + +// SetNillableStartIP sets the "start_ip" field if the given value is not nil. +func (alic *AllowListItemCreate) SetNillableStartIP(i *int64) *AllowListItemCreate { + if i != nil { + alic.SetStartIP(*i) + } + return alic +} + +// SetEndIP sets the "end_ip" field. +func (alic *AllowListItemCreate) SetEndIP(i int64) *AllowListItemCreate { + alic.mutation.SetEndIP(i) + return alic +} + +// SetNillableEndIP sets the "end_ip" field if the given value is not nil. +func (alic *AllowListItemCreate) SetNillableEndIP(i *int64) *AllowListItemCreate { + if i != nil { + alic.SetEndIP(*i) + } + return alic +} + +// SetStartSuffix sets the "start_suffix" field. +func (alic *AllowListItemCreate) SetStartSuffix(i int64) *AllowListItemCreate { + alic.mutation.SetStartSuffix(i) + return alic +} + +// SetNillableStartSuffix sets the "start_suffix" field if the given value is not nil. +func (alic *AllowListItemCreate) SetNillableStartSuffix(i *int64) *AllowListItemCreate { + if i != nil { + alic.SetStartSuffix(*i) + } + return alic +} + +// SetEndSuffix sets the "end_suffix" field. +func (alic *AllowListItemCreate) SetEndSuffix(i int64) *AllowListItemCreate { + alic.mutation.SetEndSuffix(i) + return alic +} + +// SetNillableEndSuffix sets the "end_suffix" field if the given value is not nil. +func (alic *AllowListItemCreate) SetNillableEndSuffix(i *int64) *AllowListItemCreate { + if i != nil { + alic.SetEndSuffix(*i) + } + return alic +} + +// SetIPSize sets the "ip_size" field. +func (alic *AllowListItemCreate) SetIPSize(i int64) *AllowListItemCreate { + alic.mutation.SetIPSize(i) + return alic +} + +// SetNillableIPSize sets the "ip_size" field if the given value is not nil. +func (alic *AllowListItemCreate) SetNillableIPSize(i *int64) *AllowListItemCreate { + if i != nil { + alic.SetIPSize(*i) + } + return alic +} + +// AddAllowlistIDs adds the "allowlist" edge to the AllowList entity by IDs. +func (alic *AllowListItemCreate) AddAllowlistIDs(ids ...int) *AllowListItemCreate { + alic.mutation.AddAllowlistIDs(ids...) + return alic +} + +// AddAllowlist adds the "allowlist" edges to the AllowList entity. +func (alic *AllowListItemCreate) AddAllowlist(a ...*AllowList) *AllowListItemCreate { + ids := make([]int, len(a)) + for i := range a { + ids[i] = a[i].ID + } + return alic.AddAllowlistIDs(ids...) +} + +// Mutation returns the AllowListItemMutation object of the builder. +func (alic *AllowListItemCreate) Mutation() *AllowListItemMutation { + return alic.mutation +} + +// Save creates the AllowListItem in the database. +func (alic *AllowListItemCreate) Save(ctx context.Context) (*AllowListItem, error) { + alic.defaults() + return withHooks(ctx, alic.sqlSave, alic.mutation, alic.hooks) +} + +// SaveX calls Save and panics if Save returns an error. +func (alic *AllowListItemCreate) SaveX(ctx context.Context) *AllowListItem { + v, err := alic.Save(ctx) + if err != nil { + panic(err) + } + return v +} + +// Exec executes the query. +func (alic *AllowListItemCreate) Exec(ctx context.Context) error { + _, err := alic.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (alic *AllowListItemCreate) ExecX(ctx context.Context) { + if err := alic.Exec(ctx); err != nil { + panic(err) + } +} + +// defaults sets the default values of the builder before save. +func (alic *AllowListItemCreate) defaults() { + if _, ok := alic.mutation.CreatedAt(); !ok { + v := allowlistitem.DefaultCreatedAt() + alic.mutation.SetCreatedAt(v) + } + if _, ok := alic.mutation.UpdatedAt(); !ok { + v := allowlistitem.DefaultUpdatedAt() + alic.mutation.SetUpdatedAt(v) + } +} + +// check runs all checks and user-defined validators on the builder. +func (alic *AllowListItemCreate) check() error { + if _, ok := alic.mutation.CreatedAt(); !ok { + return &ValidationError{Name: "created_at", err: errors.New(`ent: missing required field "AllowListItem.created_at"`)} + } + if _, ok := alic.mutation.UpdatedAt(); !ok { + return &ValidationError{Name: "updated_at", err: errors.New(`ent: missing required field "AllowListItem.updated_at"`)} + } + if _, ok := alic.mutation.Value(); !ok { + return &ValidationError{Name: "value", err: errors.New(`ent: missing required field "AllowListItem.value"`)} + } + return nil +} + +func (alic *AllowListItemCreate) sqlSave(ctx context.Context) (*AllowListItem, error) { + if err := alic.check(); err != nil { + return nil, err + } + _node, _spec := alic.createSpec() + if err := sqlgraph.CreateNode(ctx, alic.driver, _spec); err != nil { + if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return nil, err + } + id := _spec.ID.Value.(int64) + _node.ID = int(id) + alic.mutation.id = &_node.ID + alic.mutation.done = true + return _node, nil +} + +func (alic *AllowListItemCreate) createSpec() (*AllowListItem, *sqlgraph.CreateSpec) { + var ( + _node = &AllowListItem{config: alic.config} + _spec = sqlgraph.NewCreateSpec(allowlistitem.Table, sqlgraph.NewFieldSpec(allowlistitem.FieldID, field.TypeInt)) + ) + if value, ok := alic.mutation.CreatedAt(); ok { + _spec.SetField(allowlistitem.FieldCreatedAt, field.TypeTime, value) + _node.CreatedAt = value + } + if value, ok := alic.mutation.UpdatedAt(); ok { + _spec.SetField(allowlistitem.FieldUpdatedAt, field.TypeTime, value) + _node.UpdatedAt = value + } + if value, ok := alic.mutation.ExpiresAt(); ok { + _spec.SetField(allowlistitem.FieldExpiresAt, field.TypeTime, value) + _node.ExpiresAt = value + } + if value, ok := alic.mutation.Comment(); ok { + _spec.SetField(allowlistitem.FieldComment, field.TypeString, value) + _node.Comment = value + } + if value, ok := alic.mutation.Value(); ok { + _spec.SetField(allowlistitem.FieldValue, field.TypeString, value) + _node.Value = value + } + if value, ok := alic.mutation.StartIP(); ok { + _spec.SetField(allowlistitem.FieldStartIP, field.TypeInt64, value) + _node.StartIP = value + } + if value, ok := alic.mutation.EndIP(); ok { + _spec.SetField(allowlistitem.FieldEndIP, field.TypeInt64, value) + _node.EndIP = value + } + if value, ok := alic.mutation.StartSuffix(); ok { + _spec.SetField(allowlistitem.FieldStartSuffix, field.TypeInt64, value) + _node.StartSuffix = value + } + if value, ok := alic.mutation.EndSuffix(); ok { + _spec.SetField(allowlistitem.FieldEndSuffix, field.TypeInt64, value) + _node.EndSuffix = value + } + if value, ok := alic.mutation.IPSize(); ok { + _spec.SetField(allowlistitem.FieldIPSize, field.TypeInt64, value) + _node.IPSize = value + } + if nodes := alic.mutation.AllowlistIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2M, + Inverse: true, + Table: allowlistitem.AllowlistTable, + Columns: allowlistitem.AllowlistPrimaryKey, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(allowlist.FieldID, field.TypeInt), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges = append(_spec.Edges, edge) + } + return _node, _spec +} + +// AllowListItemCreateBulk is the builder for creating many AllowListItem entities in bulk. +type AllowListItemCreateBulk struct { + config + err error + builders []*AllowListItemCreate +} + +// Save creates the AllowListItem entities in the database. +func (alicb *AllowListItemCreateBulk) Save(ctx context.Context) ([]*AllowListItem, error) { + if alicb.err != nil { + return nil, alicb.err + } + specs := make([]*sqlgraph.CreateSpec, len(alicb.builders)) + nodes := make([]*AllowListItem, len(alicb.builders)) + mutators := make([]Mutator, len(alicb.builders)) + for i := range alicb.builders { + func(i int, root context.Context) { + builder := alicb.builders[i] + builder.defaults() + var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { + mutation, ok := m.(*AllowListItemMutation) + if !ok { + return nil, fmt.Errorf("unexpected mutation type %T", m) + } + if err := builder.check(); err != nil { + return nil, err + } + builder.mutation = mutation + var err error + nodes[i], specs[i] = builder.createSpec() + if i < len(mutators)-1 { + _, err = mutators[i+1].Mutate(root, alicb.builders[i+1].mutation) + } else { + spec := &sqlgraph.BatchCreateSpec{Nodes: specs} + // Invoke the actual operation on the latest mutation in the chain. + if err = sqlgraph.BatchCreate(ctx, alicb.driver, spec); err != nil { + if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + } + } + if err != nil { + return nil, err + } + mutation.id = &nodes[i].ID + if specs[i].ID.Value != nil { + id := specs[i].ID.Value.(int64) + nodes[i].ID = int(id) + } + mutation.done = true + return nodes[i], nil + }) + for i := len(builder.hooks) - 1; i >= 0; i-- { + mut = builder.hooks[i](mut) + } + mutators[i] = mut + }(i, ctx) + } + if len(mutators) > 0 { + if _, err := mutators[0].Mutate(ctx, alicb.builders[0].mutation); err != nil { + return nil, err + } + } + return nodes, nil +} + +// SaveX is like Save, but panics if an error occurs. +func (alicb *AllowListItemCreateBulk) SaveX(ctx context.Context) []*AllowListItem { + v, err := alicb.Save(ctx) + if err != nil { + panic(err) + } + return v +} + +// Exec executes the query. +func (alicb *AllowListItemCreateBulk) Exec(ctx context.Context) error { + _, err := alicb.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (alicb *AllowListItemCreateBulk) ExecX(ctx context.Context) { + if err := alicb.Exec(ctx); err != nil { + panic(err) + } +} diff --git a/pkg/database/ent/allowlistitem_delete.go b/pkg/database/ent/allowlistitem_delete.go new file mode 100644 index 00000000000..87b340012df --- /dev/null +++ b/pkg/database/ent/allowlistitem_delete.go @@ -0,0 +1,88 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/crowdsecurity/crowdsec/pkg/database/ent/allowlistitem" + "github.com/crowdsecurity/crowdsec/pkg/database/ent/predicate" +) + +// AllowListItemDelete is the builder for deleting a AllowListItem entity. +type AllowListItemDelete struct { + config + hooks []Hook + mutation *AllowListItemMutation +} + +// Where appends a list predicates to the AllowListItemDelete builder. +func (alid *AllowListItemDelete) Where(ps ...predicate.AllowListItem) *AllowListItemDelete { + alid.mutation.Where(ps...) + return alid +} + +// Exec executes the deletion query and returns how many vertices were deleted. +func (alid *AllowListItemDelete) Exec(ctx context.Context) (int, error) { + return withHooks(ctx, alid.sqlExec, alid.mutation, alid.hooks) +} + +// ExecX is like Exec, but panics if an error occurs. +func (alid *AllowListItemDelete) ExecX(ctx context.Context) int { + n, err := alid.Exec(ctx) + if err != nil { + panic(err) + } + return n +} + +func (alid *AllowListItemDelete) sqlExec(ctx context.Context) (int, error) { + _spec := sqlgraph.NewDeleteSpec(allowlistitem.Table, sqlgraph.NewFieldSpec(allowlistitem.FieldID, field.TypeInt)) + if ps := alid.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + affected, err := sqlgraph.DeleteNodes(ctx, alid.driver, _spec) + if err != nil && sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + alid.mutation.done = true + return affected, err +} + +// AllowListItemDeleteOne is the builder for deleting a single AllowListItem entity. +type AllowListItemDeleteOne struct { + alid *AllowListItemDelete +} + +// Where appends a list predicates to the AllowListItemDelete builder. +func (alido *AllowListItemDeleteOne) Where(ps ...predicate.AllowListItem) *AllowListItemDeleteOne { + alido.alid.mutation.Where(ps...) + return alido +} + +// Exec executes the deletion query. +func (alido *AllowListItemDeleteOne) Exec(ctx context.Context) error { + n, err := alido.alid.Exec(ctx) + switch { + case err != nil: + return err + case n == 0: + return &NotFoundError{allowlistitem.Label} + default: + return nil + } +} + +// ExecX is like Exec, but panics if an error occurs. +func (alido *AllowListItemDeleteOne) ExecX(ctx context.Context) { + if err := alido.Exec(ctx); err != nil { + panic(err) + } +} diff --git a/pkg/database/ent/allowlistitem_query.go b/pkg/database/ent/allowlistitem_query.go new file mode 100644 index 00000000000..4dccd30fae0 --- /dev/null +++ b/pkg/database/ent/allowlistitem_query.go @@ -0,0 +1,637 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "database/sql/driver" + "fmt" + "math" + + "entgo.io/ent" + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/crowdsecurity/crowdsec/pkg/database/ent/allowlist" + "github.com/crowdsecurity/crowdsec/pkg/database/ent/allowlistitem" + "github.com/crowdsecurity/crowdsec/pkg/database/ent/predicate" +) + +// AllowListItemQuery is the builder for querying AllowListItem entities. +type AllowListItemQuery struct { + config + ctx *QueryContext + order []allowlistitem.OrderOption + inters []Interceptor + predicates []predicate.AllowListItem + withAllowlist *AllowListQuery + // intermediate query (i.e. traversal path). + sql *sql.Selector + path func(context.Context) (*sql.Selector, error) +} + +// Where adds a new predicate for the AllowListItemQuery builder. +func (aliq *AllowListItemQuery) Where(ps ...predicate.AllowListItem) *AllowListItemQuery { + aliq.predicates = append(aliq.predicates, ps...) + return aliq +} + +// Limit the number of records to be returned by this query. +func (aliq *AllowListItemQuery) Limit(limit int) *AllowListItemQuery { + aliq.ctx.Limit = &limit + return aliq +} + +// Offset to start from. +func (aliq *AllowListItemQuery) Offset(offset int) *AllowListItemQuery { + aliq.ctx.Offset = &offset + return aliq +} + +// Unique configures the query builder to filter duplicate records on query. +// By default, unique is set to true, and can be disabled using this method. +func (aliq *AllowListItemQuery) Unique(unique bool) *AllowListItemQuery { + aliq.ctx.Unique = &unique + return aliq +} + +// Order specifies how the records should be ordered. +func (aliq *AllowListItemQuery) Order(o ...allowlistitem.OrderOption) *AllowListItemQuery { + aliq.order = append(aliq.order, o...) + return aliq +} + +// QueryAllowlist chains the current query on the "allowlist" edge. +func (aliq *AllowListItemQuery) QueryAllowlist() *AllowListQuery { + query := (&AllowListClient{config: aliq.config}).Query() + query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { + if err := aliq.prepareQuery(ctx); err != nil { + return nil, err + } + selector := aliq.sqlQuery(ctx) + if err := selector.Err(); err != nil { + return nil, err + } + step := sqlgraph.NewStep( + sqlgraph.From(allowlistitem.Table, allowlistitem.FieldID, selector), + sqlgraph.To(allowlist.Table, allowlist.FieldID), + sqlgraph.Edge(sqlgraph.M2M, true, allowlistitem.AllowlistTable, allowlistitem.AllowlistPrimaryKey...), + ) + fromU = sqlgraph.SetNeighbors(aliq.driver.Dialect(), step) + return fromU, nil + } + return query +} + +// First returns the first AllowListItem entity from the query. +// Returns a *NotFoundError when no AllowListItem was found. +func (aliq *AllowListItemQuery) First(ctx context.Context) (*AllowListItem, error) { + nodes, err := aliq.Limit(1).All(setContextOp(ctx, aliq.ctx, ent.OpQueryFirst)) + if err != nil { + return nil, err + } + if len(nodes) == 0 { + return nil, &NotFoundError{allowlistitem.Label} + } + return nodes[0], nil +} + +// FirstX is like First, but panics if an error occurs. +func (aliq *AllowListItemQuery) FirstX(ctx context.Context) *AllowListItem { + node, err := aliq.First(ctx) + if err != nil && !IsNotFound(err) { + panic(err) + } + return node +} + +// FirstID returns the first AllowListItem ID from the query. +// Returns a *NotFoundError when no AllowListItem ID was found. +func (aliq *AllowListItemQuery) FirstID(ctx context.Context) (id int, err error) { + var ids []int + if ids, err = aliq.Limit(1).IDs(setContextOp(ctx, aliq.ctx, ent.OpQueryFirstID)); err != nil { + return + } + if len(ids) == 0 { + err = &NotFoundError{allowlistitem.Label} + return + } + return ids[0], nil +} + +// FirstIDX is like FirstID, but panics if an error occurs. +func (aliq *AllowListItemQuery) FirstIDX(ctx context.Context) int { + id, err := aliq.FirstID(ctx) + if err != nil && !IsNotFound(err) { + panic(err) + } + return id +} + +// Only returns a single AllowListItem entity found by the query, ensuring it only returns one. +// Returns a *NotSingularError when more than one AllowListItem entity is found. +// Returns a *NotFoundError when no AllowListItem entities are found. +func (aliq *AllowListItemQuery) Only(ctx context.Context) (*AllowListItem, error) { + nodes, err := aliq.Limit(2).All(setContextOp(ctx, aliq.ctx, ent.OpQueryOnly)) + if err != nil { + return nil, err + } + switch len(nodes) { + case 1: + return nodes[0], nil + case 0: + return nil, &NotFoundError{allowlistitem.Label} + default: + return nil, &NotSingularError{allowlistitem.Label} + } +} + +// OnlyX is like Only, but panics if an error occurs. +func (aliq *AllowListItemQuery) OnlyX(ctx context.Context) *AllowListItem { + node, err := aliq.Only(ctx) + if err != nil { + panic(err) + } + return node +} + +// OnlyID is like Only, but returns the only AllowListItem ID in the query. +// Returns a *NotSingularError when more than one AllowListItem ID is found. +// Returns a *NotFoundError when no entities are found. +func (aliq *AllowListItemQuery) OnlyID(ctx context.Context) (id int, err error) { + var ids []int + if ids, err = aliq.Limit(2).IDs(setContextOp(ctx, aliq.ctx, ent.OpQueryOnlyID)); err != nil { + return + } + switch len(ids) { + case 1: + id = ids[0] + case 0: + err = &NotFoundError{allowlistitem.Label} + default: + err = &NotSingularError{allowlistitem.Label} + } + return +} + +// OnlyIDX is like OnlyID, but panics if an error occurs. +func (aliq *AllowListItemQuery) OnlyIDX(ctx context.Context) int { + id, err := aliq.OnlyID(ctx) + if err != nil { + panic(err) + } + return id +} + +// All executes the query and returns a list of AllowListItems. +func (aliq *AllowListItemQuery) All(ctx context.Context) ([]*AllowListItem, error) { + ctx = setContextOp(ctx, aliq.ctx, ent.OpQueryAll) + if err := aliq.prepareQuery(ctx); err != nil { + return nil, err + } + qr := querierAll[[]*AllowListItem, *AllowListItemQuery]() + return withInterceptors[[]*AllowListItem](ctx, aliq, qr, aliq.inters) +} + +// AllX is like All, but panics if an error occurs. +func (aliq *AllowListItemQuery) AllX(ctx context.Context) []*AllowListItem { + nodes, err := aliq.All(ctx) + if err != nil { + panic(err) + } + return nodes +} + +// IDs executes the query and returns a list of AllowListItem IDs. +func (aliq *AllowListItemQuery) IDs(ctx context.Context) (ids []int, err error) { + if aliq.ctx.Unique == nil && aliq.path != nil { + aliq.Unique(true) + } + ctx = setContextOp(ctx, aliq.ctx, ent.OpQueryIDs) + if err = aliq.Select(allowlistitem.FieldID).Scan(ctx, &ids); err != nil { + return nil, err + } + return ids, nil +} + +// IDsX is like IDs, but panics if an error occurs. +func (aliq *AllowListItemQuery) IDsX(ctx context.Context) []int { + ids, err := aliq.IDs(ctx) + if err != nil { + panic(err) + } + return ids +} + +// Count returns the count of the given query. +func (aliq *AllowListItemQuery) Count(ctx context.Context) (int, error) { + ctx = setContextOp(ctx, aliq.ctx, ent.OpQueryCount) + if err := aliq.prepareQuery(ctx); err != nil { + return 0, err + } + return withInterceptors[int](ctx, aliq, querierCount[*AllowListItemQuery](), aliq.inters) +} + +// CountX is like Count, but panics if an error occurs. +func (aliq *AllowListItemQuery) CountX(ctx context.Context) int { + count, err := aliq.Count(ctx) + if err != nil { + panic(err) + } + return count +} + +// Exist returns true if the query has elements in the graph. +func (aliq *AllowListItemQuery) Exist(ctx context.Context) (bool, error) { + ctx = setContextOp(ctx, aliq.ctx, ent.OpQueryExist) + switch _, err := aliq.FirstID(ctx); { + case IsNotFound(err): + return false, nil + case err != nil: + return false, fmt.Errorf("ent: check existence: %w", err) + default: + return true, nil + } +} + +// ExistX is like Exist, but panics if an error occurs. +func (aliq *AllowListItemQuery) ExistX(ctx context.Context) bool { + exist, err := aliq.Exist(ctx) + if err != nil { + panic(err) + } + return exist +} + +// Clone returns a duplicate of the AllowListItemQuery builder, including all associated steps. It can be +// used to prepare common query builders and use them differently after the clone is made. +func (aliq *AllowListItemQuery) Clone() *AllowListItemQuery { + if aliq == nil { + return nil + } + return &AllowListItemQuery{ + config: aliq.config, + ctx: aliq.ctx.Clone(), + order: append([]allowlistitem.OrderOption{}, aliq.order...), + inters: append([]Interceptor{}, aliq.inters...), + predicates: append([]predicate.AllowListItem{}, aliq.predicates...), + withAllowlist: aliq.withAllowlist.Clone(), + // clone intermediate query. + sql: aliq.sql.Clone(), + path: aliq.path, + } +} + +// WithAllowlist tells the query-builder to eager-load the nodes that are connected to +// the "allowlist" edge. The optional arguments are used to configure the query builder of the edge. +func (aliq *AllowListItemQuery) WithAllowlist(opts ...func(*AllowListQuery)) *AllowListItemQuery { + query := (&AllowListClient{config: aliq.config}).Query() + for _, opt := range opts { + opt(query) + } + aliq.withAllowlist = query + return aliq +} + +// GroupBy is used to group vertices by one or more fields/columns. +// It is often used with aggregate functions, like: count, max, mean, min, sum. +// +// Example: +// +// var v []struct { +// CreatedAt time.Time `json:"created_at,omitempty"` +// Count int `json:"count,omitempty"` +// } +// +// client.AllowListItem.Query(). +// GroupBy(allowlistitem.FieldCreatedAt). +// Aggregate(ent.Count()). +// Scan(ctx, &v) +func (aliq *AllowListItemQuery) GroupBy(field string, fields ...string) *AllowListItemGroupBy { + aliq.ctx.Fields = append([]string{field}, fields...) + grbuild := &AllowListItemGroupBy{build: aliq} + grbuild.flds = &aliq.ctx.Fields + grbuild.label = allowlistitem.Label + grbuild.scan = grbuild.Scan + return grbuild +} + +// Select allows the selection one or more fields/columns for the given query, +// instead of selecting all fields in the entity. +// +// Example: +// +// var v []struct { +// CreatedAt time.Time `json:"created_at,omitempty"` +// } +// +// client.AllowListItem.Query(). +// Select(allowlistitem.FieldCreatedAt). +// Scan(ctx, &v) +func (aliq *AllowListItemQuery) Select(fields ...string) *AllowListItemSelect { + aliq.ctx.Fields = append(aliq.ctx.Fields, fields...) + sbuild := &AllowListItemSelect{AllowListItemQuery: aliq} + sbuild.label = allowlistitem.Label + sbuild.flds, sbuild.scan = &aliq.ctx.Fields, sbuild.Scan + return sbuild +} + +// Aggregate returns a AllowListItemSelect configured with the given aggregations. +func (aliq *AllowListItemQuery) Aggregate(fns ...AggregateFunc) *AllowListItemSelect { + return aliq.Select().Aggregate(fns...) +} + +func (aliq *AllowListItemQuery) prepareQuery(ctx context.Context) error { + for _, inter := range aliq.inters { + if inter == nil { + return fmt.Errorf("ent: uninitialized interceptor (forgotten import ent/runtime?)") + } + if trv, ok := inter.(Traverser); ok { + if err := trv.Traverse(ctx, aliq); err != nil { + return err + } + } + } + for _, f := range aliq.ctx.Fields { + if !allowlistitem.ValidColumn(f) { + return &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)} + } + } + if aliq.path != nil { + prev, err := aliq.path(ctx) + if err != nil { + return err + } + aliq.sql = prev + } + return nil +} + +func (aliq *AllowListItemQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*AllowListItem, error) { + var ( + nodes = []*AllowListItem{} + _spec = aliq.querySpec() + loadedTypes = [1]bool{ + aliq.withAllowlist != nil, + } + ) + _spec.ScanValues = func(columns []string) ([]any, error) { + return (*AllowListItem).scanValues(nil, columns) + } + _spec.Assign = func(columns []string, values []any) error { + node := &AllowListItem{config: aliq.config} + nodes = append(nodes, node) + node.Edges.loadedTypes = loadedTypes + return node.assignValues(columns, values) + } + for i := range hooks { + hooks[i](ctx, _spec) + } + if err := sqlgraph.QueryNodes(ctx, aliq.driver, _spec); err != nil { + return nil, err + } + if len(nodes) == 0 { + return nodes, nil + } + if query := aliq.withAllowlist; query != nil { + if err := aliq.loadAllowlist(ctx, query, nodes, + func(n *AllowListItem) { n.Edges.Allowlist = []*AllowList{} }, + func(n *AllowListItem, e *AllowList) { n.Edges.Allowlist = append(n.Edges.Allowlist, e) }); err != nil { + return nil, err + } + } + return nodes, nil +} + +func (aliq *AllowListItemQuery) loadAllowlist(ctx context.Context, query *AllowListQuery, nodes []*AllowListItem, init func(*AllowListItem), assign func(*AllowListItem, *AllowList)) error { + edgeIDs := make([]driver.Value, len(nodes)) + byID := make(map[int]*AllowListItem) + nids := make(map[int]map[*AllowListItem]struct{}) + for i, node := range nodes { + edgeIDs[i] = node.ID + byID[node.ID] = node + if init != nil { + init(node) + } + } + query.Where(func(s *sql.Selector) { + joinT := sql.Table(allowlistitem.AllowlistTable) + s.Join(joinT).On(s.C(allowlist.FieldID), joinT.C(allowlistitem.AllowlistPrimaryKey[0])) + s.Where(sql.InValues(joinT.C(allowlistitem.AllowlistPrimaryKey[1]), edgeIDs...)) + columns := s.SelectedColumns() + s.Select(joinT.C(allowlistitem.AllowlistPrimaryKey[1])) + s.AppendSelect(columns...) + s.SetDistinct(false) + }) + if err := query.prepareQuery(ctx); err != nil { + return err + } + qr := QuerierFunc(func(ctx context.Context, q Query) (Value, error) { + return query.sqlAll(ctx, func(_ context.Context, spec *sqlgraph.QuerySpec) { + assign := spec.Assign + values := spec.ScanValues + spec.ScanValues = func(columns []string) ([]any, error) { + values, err := values(columns[1:]) + if err != nil { + return nil, err + } + return append([]any{new(sql.NullInt64)}, values...), nil + } + spec.Assign = func(columns []string, values []any) error { + outValue := int(values[0].(*sql.NullInt64).Int64) + inValue := int(values[1].(*sql.NullInt64).Int64) + if nids[inValue] == nil { + nids[inValue] = map[*AllowListItem]struct{}{byID[outValue]: {}} + return assign(columns[1:], values[1:]) + } + nids[inValue][byID[outValue]] = struct{}{} + return nil + } + }) + }) + neighbors, err := withInterceptors[[]*AllowList](ctx, query, qr, query.inters) + if err != nil { + return err + } + for _, n := range neighbors { + nodes, ok := nids[n.ID] + if !ok { + return fmt.Errorf(`unexpected "allowlist" node returned %v`, n.ID) + } + for kn := range nodes { + assign(kn, n) + } + } + return nil +} + +func (aliq *AllowListItemQuery) sqlCount(ctx context.Context) (int, error) { + _spec := aliq.querySpec() + _spec.Node.Columns = aliq.ctx.Fields + if len(aliq.ctx.Fields) > 0 { + _spec.Unique = aliq.ctx.Unique != nil && *aliq.ctx.Unique + } + return sqlgraph.CountNodes(ctx, aliq.driver, _spec) +} + +func (aliq *AllowListItemQuery) querySpec() *sqlgraph.QuerySpec { + _spec := sqlgraph.NewQuerySpec(allowlistitem.Table, allowlistitem.Columns, sqlgraph.NewFieldSpec(allowlistitem.FieldID, field.TypeInt)) + _spec.From = aliq.sql + if unique := aliq.ctx.Unique; unique != nil { + _spec.Unique = *unique + } else if aliq.path != nil { + _spec.Unique = true + } + if fields := aliq.ctx.Fields; len(fields) > 0 { + _spec.Node.Columns = make([]string, 0, len(fields)) + _spec.Node.Columns = append(_spec.Node.Columns, allowlistitem.FieldID) + for i := range fields { + if fields[i] != allowlistitem.FieldID { + _spec.Node.Columns = append(_spec.Node.Columns, fields[i]) + } + } + } + if ps := aliq.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if limit := aliq.ctx.Limit; limit != nil { + _spec.Limit = *limit + } + if offset := aliq.ctx.Offset; offset != nil { + _spec.Offset = *offset + } + if ps := aliq.order; len(ps) > 0 { + _spec.Order = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + return _spec +} + +func (aliq *AllowListItemQuery) sqlQuery(ctx context.Context) *sql.Selector { + builder := sql.Dialect(aliq.driver.Dialect()) + t1 := builder.Table(allowlistitem.Table) + columns := aliq.ctx.Fields + if len(columns) == 0 { + columns = allowlistitem.Columns + } + selector := builder.Select(t1.Columns(columns...)...).From(t1) + if aliq.sql != nil { + selector = aliq.sql + selector.Select(selector.Columns(columns...)...) + } + if aliq.ctx.Unique != nil && *aliq.ctx.Unique { + selector.Distinct() + } + for _, p := range aliq.predicates { + p(selector) + } + for _, p := range aliq.order { + p(selector) + } + if offset := aliq.ctx.Offset; offset != nil { + // limit is mandatory for offset clause. We start + // with default value, and override it below if needed. + selector.Offset(*offset).Limit(math.MaxInt32) + } + if limit := aliq.ctx.Limit; limit != nil { + selector.Limit(*limit) + } + return selector +} + +// AllowListItemGroupBy is the group-by builder for AllowListItem entities. +type AllowListItemGroupBy struct { + selector + build *AllowListItemQuery +} + +// Aggregate adds the given aggregation functions to the group-by query. +func (aligb *AllowListItemGroupBy) Aggregate(fns ...AggregateFunc) *AllowListItemGroupBy { + aligb.fns = append(aligb.fns, fns...) + return aligb +} + +// Scan applies the selector query and scans the result into the given value. +func (aligb *AllowListItemGroupBy) Scan(ctx context.Context, v any) error { + ctx = setContextOp(ctx, aligb.build.ctx, ent.OpQueryGroupBy) + if err := aligb.build.prepareQuery(ctx); err != nil { + return err + } + return scanWithInterceptors[*AllowListItemQuery, *AllowListItemGroupBy](ctx, aligb.build, aligb, aligb.build.inters, v) +} + +func (aligb *AllowListItemGroupBy) sqlScan(ctx context.Context, root *AllowListItemQuery, v any) error { + selector := root.sqlQuery(ctx).Select() + aggregation := make([]string, 0, len(aligb.fns)) + for _, fn := range aligb.fns { + aggregation = append(aggregation, fn(selector)) + } + if len(selector.SelectedColumns()) == 0 { + columns := make([]string, 0, len(*aligb.flds)+len(aligb.fns)) + for _, f := range *aligb.flds { + columns = append(columns, selector.C(f)) + } + columns = append(columns, aggregation...) + selector.Select(columns...) + } + selector.GroupBy(selector.Columns(*aligb.flds...)...) + if err := selector.Err(); err != nil { + return err + } + rows := &sql.Rows{} + query, args := selector.Query() + if err := aligb.build.driver.Query(ctx, query, args, rows); err != nil { + return err + } + defer rows.Close() + return sql.ScanSlice(rows, v) +} + +// AllowListItemSelect is the builder for selecting fields of AllowListItem entities. +type AllowListItemSelect struct { + *AllowListItemQuery + selector +} + +// Aggregate adds the given aggregation functions to the selector query. +func (alis *AllowListItemSelect) Aggregate(fns ...AggregateFunc) *AllowListItemSelect { + alis.fns = append(alis.fns, fns...) + return alis +} + +// Scan applies the selector query and scans the result into the given value. +func (alis *AllowListItemSelect) Scan(ctx context.Context, v any) error { + ctx = setContextOp(ctx, alis.ctx, ent.OpQuerySelect) + if err := alis.prepareQuery(ctx); err != nil { + return err + } + return scanWithInterceptors[*AllowListItemQuery, *AllowListItemSelect](ctx, alis.AllowListItemQuery, alis, alis.inters, v) +} + +func (alis *AllowListItemSelect) sqlScan(ctx context.Context, root *AllowListItemQuery, v any) error { + selector := root.sqlQuery(ctx) + aggregation := make([]string, 0, len(alis.fns)) + for _, fn := range alis.fns { + aggregation = append(aggregation, fn(selector)) + } + switch n := len(*alis.selector.flds); { + case n == 0 && len(aggregation) > 0: + selector.Select(aggregation...) + case n != 0 && len(aggregation) > 0: + selector.AppendSelect(aggregation...) + } + rows := &sql.Rows{} + query, args := selector.Query() + if err := alis.driver.Query(ctx, query, args, rows); err != nil { + return err + } + defer rows.Close() + return sql.ScanSlice(rows, v) +} diff --git a/pkg/database/ent/allowlistitem_update.go b/pkg/database/ent/allowlistitem_update.go new file mode 100644 index 00000000000..e6878955afe --- /dev/null +++ b/pkg/database/ent/allowlistitem_update.go @@ -0,0 +1,463 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "errors" + "fmt" + "time" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/crowdsecurity/crowdsec/pkg/database/ent/allowlist" + "github.com/crowdsecurity/crowdsec/pkg/database/ent/allowlistitem" + "github.com/crowdsecurity/crowdsec/pkg/database/ent/predicate" +) + +// AllowListItemUpdate is the builder for updating AllowListItem entities. +type AllowListItemUpdate struct { + config + hooks []Hook + mutation *AllowListItemMutation +} + +// Where appends a list predicates to the AllowListItemUpdate builder. +func (aliu *AllowListItemUpdate) Where(ps ...predicate.AllowListItem) *AllowListItemUpdate { + aliu.mutation.Where(ps...) + return aliu +} + +// SetUpdatedAt sets the "updated_at" field. +func (aliu *AllowListItemUpdate) SetUpdatedAt(t time.Time) *AllowListItemUpdate { + aliu.mutation.SetUpdatedAt(t) + return aliu +} + +// SetExpiresAt sets the "expires_at" field. +func (aliu *AllowListItemUpdate) SetExpiresAt(t time.Time) *AllowListItemUpdate { + aliu.mutation.SetExpiresAt(t) + return aliu +} + +// SetNillableExpiresAt sets the "expires_at" field if the given value is not nil. +func (aliu *AllowListItemUpdate) SetNillableExpiresAt(t *time.Time) *AllowListItemUpdate { + if t != nil { + aliu.SetExpiresAt(*t) + } + return aliu +} + +// ClearExpiresAt clears the value of the "expires_at" field. +func (aliu *AllowListItemUpdate) ClearExpiresAt() *AllowListItemUpdate { + aliu.mutation.ClearExpiresAt() + return aliu +} + +// AddAllowlistIDs adds the "allowlist" edge to the AllowList entity by IDs. +func (aliu *AllowListItemUpdate) AddAllowlistIDs(ids ...int) *AllowListItemUpdate { + aliu.mutation.AddAllowlistIDs(ids...) + return aliu +} + +// AddAllowlist adds the "allowlist" edges to the AllowList entity. +func (aliu *AllowListItemUpdate) AddAllowlist(a ...*AllowList) *AllowListItemUpdate { + ids := make([]int, len(a)) + for i := range a { + ids[i] = a[i].ID + } + return aliu.AddAllowlistIDs(ids...) +} + +// Mutation returns the AllowListItemMutation object of the builder. +func (aliu *AllowListItemUpdate) Mutation() *AllowListItemMutation { + return aliu.mutation +} + +// ClearAllowlist clears all "allowlist" edges to the AllowList entity. +func (aliu *AllowListItemUpdate) ClearAllowlist() *AllowListItemUpdate { + aliu.mutation.ClearAllowlist() + return aliu +} + +// RemoveAllowlistIDs removes the "allowlist" edge to AllowList entities by IDs. +func (aliu *AllowListItemUpdate) RemoveAllowlistIDs(ids ...int) *AllowListItemUpdate { + aliu.mutation.RemoveAllowlistIDs(ids...) + return aliu +} + +// RemoveAllowlist removes "allowlist" edges to AllowList entities. +func (aliu *AllowListItemUpdate) RemoveAllowlist(a ...*AllowList) *AllowListItemUpdate { + ids := make([]int, len(a)) + for i := range a { + ids[i] = a[i].ID + } + return aliu.RemoveAllowlistIDs(ids...) +} + +// Save executes the query and returns the number of nodes affected by the update operation. +func (aliu *AllowListItemUpdate) Save(ctx context.Context) (int, error) { + aliu.defaults() + return withHooks(ctx, aliu.sqlSave, aliu.mutation, aliu.hooks) +} + +// SaveX is like Save, but panics if an error occurs. +func (aliu *AllowListItemUpdate) SaveX(ctx context.Context) int { + affected, err := aliu.Save(ctx) + if err != nil { + panic(err) + } + return affected +} + +// Exec executes the query. +func (aliu *AllowListItemUpdate) Exec(ctx context.Context) error { + _, err := aliu.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (aliu *AllowListItemUpdate) ExecX(ctx context.Context) { + if err := aliu.Exec(ctx); err != nil { + panic(err) + } +} + +// defaults sets the default values of the builder before save. +func (aliu *AllowListItemUpdate) defaults() { + if _, ok := aliu.mutation.UpdatedAt(); !ok { + v := allowlistitem.UpdateDefaultUpdatedAt() + aliu.mutation.SetUpdatedAt(v) + } +} + +func (aliu *AllowListItemUpdate) sqlSave(ctx context.Context) (n int, err error) { + _spec := sqlgraph.NewUpdateSpec(allowlistitem.Table, allowlistitem.Columns, sqlgraph.NewFieldSpec(allowlistitem.FieldID, field.TypeInt)) + if ps := aliu.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if value, ok := aliu.mutation.UpdatedAt(); ok { + _spec.SetField(allowlistitem.FieldUpdatedAt, field.TypeTime, value) + } + if value, ok := aliu.mutation.ExpiresAt(); ok { + _spec.SetField(allowlistitem.FieldExpiresAt, field.TypeTime, value) + } + if aliu.mutation.ExpiresAtCleared() { + _spec.ClearField(allowlistitem.FieldExpiresAt, field.TypeTime) + } + if aliu.mutation.CommentCleared() { + _spec.ClearField(allowlistitem.FieldComment, field.TypeString) + } + if aliu.mutation.StartIPCleared() { + _spec.ClearField(allowlistitem.FieldStartIP, field.TypeInt64) + } + if aliu.mutation.EndIPCleared() { + _spec.ClearField(allowlistitem.FieldEndIP, field.TypeInt64) + } + if aliu.mutation.StartSuffixCleared() { + _spec.ClearField(allowlistitem.FieldStartSuffix, field.TypeInt64) + } + if aliu.mutation.EndSuffixCleared() { + _spec.ClearField(allowlistitem.FieldEndSuffix, field.TypeInt64) + } + if aliu.mutation.IPSizeCleared() { + _spec.ClearField(allowlistitem.FieldIPSize, field.TypeInt64) + } + if aliu.mutation.AllowlistCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2M, + Inverse: true, + Table: allowlistitem.AllowlistTable, + Columns: allowlistitem.AllowlistPrimaryKey, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(allowlist.FieldID, field.TypeInt), + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := aliu.mutation.RemovedAllowlistIDs(); len(nodes) > 0 && !aliu.mutation.AllowlistCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2M, + Inverse: true, + Table: allowlistitem.AllowlistTable, + Columns: allowlistitem.AllowlistPrimaryKey, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(allowlist.FieldID, field.TypeInt), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := aliu.mutation.AllowlistIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2M, + Inverse: true, + Table: allowlistitem.AllowlistTable, + Columns: allowlistitem.AllowlistPrimaryKey, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(allowlist.FieldID, field.TypeInt), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + if n, err = sqlgraph.UpdateNodes(ctx, aliu.driver, _spec); err != nil { + if _, ok := err.(*sqlgraph.NotFoundError); ok { + err = &NotFoundError{allowlistitem.Label} + } else if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return 0, err + } + aliu.mutation.done = true + return n, nil +} + +// AllowListItemUpdateOne is the builder for updating a single AllowListItem entity. +type AllowListItemUpdateOne struct { + config + fields []string + hooks []Hook + mutation *AllowListItemMutation +} + +// SetUpdatedAt sets the "updated_at" field. +func (aliuo *AllowListItemUpdateOne) SetUpdatedAt(t time.Time) *AllowListItemUpdateOne { + aliuo.mutation.SetUpdatedAt(t) + return aliuo +} + +// SetExpiresAt sets the "expires_at" field. +func (aliuo *AllowListItemUpdateOne) SetExpiresAt(t time.Time) *AllowListItemUpdateOne { + aliuo.mutation.SetExpiresAt(t) + return aliuo +} + +// SetNillableExpiresAt sets the "expires_at" field if the given value is not nil. +func (aliuo *AllowListItemUpdateOne) SetNillableExpiresAt(t *time.Time) *AllowListItemUpdateOne { + if t != nil { + aliuo.SetExpiresAt(*t) + } + return aliuo +} + +// ClearExpiresAt clears the value of the "expires_at" field. +func (aliuo *AllowListItemUpdateOne) ClearExpiresAt() *AllowListItemUpdateOne { + aliuo.mutation.ClearExpiresAt() + return aliuo +} + +// AddAllowlistIDs adds the "allowlist" edge to the AllowList entity by IDs. +func (aliuo *AllowListItemUpdateOne) AddAllowlistIDs(ids ...int) *AllowListItemUpdateOne { + aliuo.mutation.AddAllowlistIDs(ids...) + return aliuo +} + +// AddAllowlist adds the "allowlist" edges to the AllowList entity. +func (aliuo *AllowListItemUpdateOne) AddAllowlist(a ...*AllowList) *AllowListItemUpdateOne { + ids := make([]int, len(a)) + for i := range a { + ids[i] = a[i].ID + } + return aliuo.AddAllowlistIDs(ids...) +} + +// Mutation returns the AllowListItemMutation object of the builder. +func (aliuo *AllowListItemUpdateOne) Mutation() *AllowListItemMutation { + return aliuo.mutation +} + +// ClearAllowlist clears all "allowlist" edges to the AllowList entity. +func (aliuo *AllowListItemUpdateOne) ClearAllowlist() *AllowListItemUpdateOne { + aliuo.mutation.ClearAllowlist() + return aliuo +} + +// RemoveAllowlistIDs removes the "allowlist" edge to AllowList entities by IDs. +func (aliuo *AllowListItemUpdateOne) RemoveAllowlistIDs(ids ...int) *AllowListItemUpdateOne { + aliuo.mutation.RemoveAllowlistIDs(ids...) + return aliuo +} + +// RemoveAllowlist removes "allowlist" edges to AllowList entities. +func (aliuo *AllowListItemUpdateOne) RemoveAllowlist(a ...*AllowList) *AllowListItemUpdateOne { + ids := make([]int, len(a)) + for i := range a { + ids[i] = a[i].ID + } + return aliuo.RemoveAllowlistIDs(ids...) +} + +// Where appends a list predicates to the AllowListItemUpdate builder. +func (aliuo *AllowListItemUpdateOne) Where(ps ...predicate.AllowListItem) *AllowListItemUpdateOne { + aliuo.mutation.Where(ps...) + return aliuo +} + +// Select allows selecting one or more fields (columns) of the returned entity. +// The default is selecting all fields defined in the entity schema. +func (aliuo *AllowListItemUpdateOne) Select(field string, fields ...string) *AllowListItemUpdateOne { + aliuo.fields = append([]string{field}, fields...) + return aliuo +} + +// Save executes the query and returns the updated AllowListItem entity. +func (aliuo *AllowListItemUpdateOne) Save(ctx context.Context) (*AllowListItem, error) { + aliuo.defaults() + return withHooks(ctx, aliuo.sqlSave, aliuo.mutation, aliuo.hooks) +} + +// SaveX is like Save, but panics if an error occurs. +func (aliuo *AllowListItemUpdateOne) SaveX(ctx context.Context) *AllowListItem { + node, err := aliuo.Save(ctx) + if err != nil { + panic(err) + } + return node +} + +// Exec executes the query on the entity. +func (aliuo *AllowListItemUpdateOne) Exec(ctx context.Context) error { + _, err := aliuo.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (aliuo *AllowListItemUpdateOne) ExecX(ctx context.Context) { + if err := aliuo.Exec(ctx); err != nil { + panic(err) + } +} + +// defaults sets the default values of the builder before save. +func (aliuo *AllowListItemUpdateOne) defaults() { + if _, ok := aliuo.mutation.UpdatedAt(); !ok { + v := allowlistitem.UpdateDefaultUpdatedAt() + aliuo.mutation.SetUpdatedAt(v) + } +} + +func (aliuo *AllowListItemUpdateOne) sqlSave(ctx context.Context) (_node *AllowListItem, err error) { + _spec := sqlgraph.NewUpdateSpec(allowlistitem.Table, allowlistitem.Columns, sqlgraph.NewFieldSpec(allowlistitem.FieldID, field.TypeInt)) + id, ok := aliuo.mutation.ID() + if !ok { + return nil, &ValidationError{Name: "id", err: errors.New(`ent: missing "AllowListItem.id" for update`)} + } + _spec.Node.ID.Value = id + if fields := aliuo.fields; len(fields) > 0 { + _spec.Node.Columns = make([]string, 0, len(fields)) + _spec.Node.Columns = append(_spec.Node.Columns, allowlistitem.FieldID) + for _, f := range fields { + if !allowlistitem.ValidColumn(f) { + return nil, &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)} + } + if f != allowlistitem.FieldID { + _spec.Node.Columns = append(_spec.Node.Columns, f) + } + } + } + if ps := aliuo.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if value, ok := aliuo.mutation.UpdatedAt(); ok { + _spec.SetField(allowlistitem.FieldUpdatedAt, field.TypeTime, value) + } + if value, ok := aliuo.mutation.ExpiresAt(); ok { + _spec.SetField(allowlistitem.FieldExpiresAt, field.TypeTime, value) + } + if aliuo.mutation.ExpiresAtCleared() { + _spec.ClearField(allowlistitem.FieldExpiresAt, field.TypeTime) + } + if aliuo.mutation.CommentCleared() { + _spec.ClearField(allowlistitem.FieldComment, field.TypeString) + } + if aliuo.mutation.StartIPCleared() { + _spec.ClearField(allowlistitem.FieldStartIP, field.TypeInt64) + } + if aliuo.mutation.EndIPCleared() { + _spec.ClearField(allowlistitem.FieldEndIP, field.TypeInt64) + } + if aliuo.mutation.StartSuffixCleared() { + _spec.ClearField(allowlistitem.FieldStartSuffix, field.TypeInt64) + } + if aliuo.mutation.EndSuffixCleared() { + _spec.ClearField(allowlistitem.FieldEndSuffix, field.TypeInt64) + } + if aliuo.mutation.IPSizeCleared() { + _spec.ClearField(allowlistitem.FieldIPSize, field.TypeInt64) + } + if aliuo.mutation.AllowlistCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2M, + Inverse: true, + Table: allowlistitem.AllowlistTable, + Columns: allowlistitem.AllowlistPrimaryKey, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(allowlist.FieldID, field.TypeInt), + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := aliuo.mutation.RemovedAllowlistIDs(); len(nodes) > 0 && !aliuo.mutation.AllowlistCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2M, + Inverse: true, + Table: allowlistitem.AllowlistTable, + Columns: allowlistitem.AllowlistPrimaryKey, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(allowlist.FieldID, field.TypeInt), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := aliuo.mutation.AllowlistIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2M, + Inverse: true, + Table: allowlistitem.AllowlistTable, + Columns: allowlistitem.AllowlistPrimaryKey, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(allowlist.FieldID, field.TypeInt), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + _node = &AllowListItem{config: aliuo.config} + _spec.Assign = _node.assignValues + _spec.ScanValues = _node.scanValues + if err = sqlgraph.UpdateNode(ctx, aliuo.driver, _spec); err != nil { + if _, ok := err.(*sqlgraph.NotFoundError); ok { + err = &NotFoundError{allowlistitem.Label} + } else if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return nil, err + } + aliuo.mutation.done = true + return _node, nil +} diff --git a/pkg/database/ent/client.go b/pkg/database/ent/client.go index 59686102ebe..bc7c0330459 100644 --- a/pkg/database/ent/client.go +++ b/pkg/database/ent/client.go @@ -16,6 +16,8 @@ import ( "entgo.io/ent/dialect/sql" "entgo.io/ent/dialect/sql/sqlgraph" "github.com/crowdsecurity/crowdsec/pkg/database/ent/alert" + "github.com/crowdsecurity/crowdsec/pkg/database/ent/allowlist" + "github.com/crowdsecurity/crowdsec/pkg/database/ent/allowlistitem" "github.com/crowdsecurity/crowdsec/pkg/database/ent/bouncer" "github.com/crowdsecurity/crowdsec/pkg/database/ent/configitem" "github.com/crowdsecurity/crowdsec/pkg/database/ent/decision" @@ -33,6 +35,10 @@ type Client struct { Schema *migrate.Schema // Alert is the client for interacting with the Alert builders. Alert *AlertClient + // AllowList is the client for interacting with the AllowList builders. + AllowList *AllowListClient + // AllowListItem is the client for interacting with the AllowListItem builders. + AllowListItem *AllowListItemClient // Bouncer is the client for interacting with the Bouncer builders. Bouncer *BouncerClient // ConfigItem is the client for interacting with the ConfigItem builders. @@ -61,6 +67,8 @@ func NewClient(opts ...Option) *Client { func (c *Client) init() { c.Schema = migrate.NewSchema(c.driver) c.Alert = NewAlertClient(c.config) + c.AllowList = NewAllowListClient(c.config) + c.AllowListItem = NewAllowListItemClient(c.config) c.Bouncer = NewBouncerClient(c.config) c.ConfigItem = NewConfigItemClient(c.config) c.Decision = NewDecisionClient(c.config) @@ -159,17 +167,19 @@ func (c *Client) Tx(ctx context.Context) (*Tx, error) { cfg := c.config cfg.driver = tx return &Tx{ - ctx: ctx, - config: cfg, - Alert: NewAlertClient(cfg), - Bouncer: NewBouncerClient(cfg), - ConfigItem: NewConfigItemClient(cfg), - Decision: NewDecisionClient(cfg), - Event: NewEventClient(cfg), - Lock: NewLockClient(cfg), - Machine: NewMachineClient(cfg), - Meta: NewMetaClient(cfg), - Metric: NewMetricClient(cfg), + ctx: ctx, + config: cfg, + Alert: NewAlertClient(cfg), + AllowList: NewAllowListClient(cfg), + AllowListItem: NewAllowListItemClient(cfg), + Bouncer: NewBouncerClient(cfg), + ConfigItem: NewConfigItemClient(cfg), + Decision: NewDecisionClient(cfg), + Event: NewEventClient(cfg), + Lock: NewLockClient(cfg), + Machine: NewMachineClient(cfg), + Meta: NewMetaClient(cfg), + Metric: NewMetricClient(cfg), }, nil } @@ -187,17 +197,19 @@ func (c *Client) BeginTx(ctx context.Context, opts *sql.TxOptions) (*Tx, error) cfg := c.config cfg.driver = &txDriver{tx: tx, drv: c.driver} return &Tx{ - ctx: ctx, - config: cfg, - Alert: NewAlertClient(cfg), - Bouncer: NewBouncerClient(cfg), - ConfigItem: NewConfigItemClient(cfg), - Decision: NewDecisionClient(cfg), - Event: NewEventClient(cfg), - Lock: NewLockClient(cfg), - Machine: NewMachineClient(cfg), - Meta: NewMetaClient(cfg), - Metric: NewMetricClient(cfg), + ctx: ctx, + config: cfg, + Alert: NewAlertClient(cfg), + AllowList: NewAllowListClient(cfg), + AllowListItem: NewAllowListItemClient(cfg), + Bouncer: NewBouncerClient(cfg), + ConfigItem: NewConfigItemClient(cfg), + Decision: NewDecisionClient(cfg), + Event: NewEventClient(cfg), + Lock: NewLockClient(cfg), + Machine: NewMachineClient(cfg), + Meta: NewMetaClient(cfg), + Metric: NewMetricClient(cfg), }, nil } @@ -227,8 +239,8 @@ func (c *Client) Close() error { // In order to add hooks to a specific client, call: `client.Node.Use(...)`. func (c *Client) Use(hooks ...Hook) { for _, n := range []interface{ Use(...Hook) }{ - c.Alert, c.Bouncer, c.ConfigItem, c.Decision, c.Event, c.Lock, c.Machine, - c.Meta, c.Metric, + c.Alert, c.AllowList, c.AllowListItem, c.Bouncer, c.ConfigItem, c.Decision, + c.Event, c.Lock, c.Machine, c.Meta, c.Metric, } { n.Use(hooks...) } @@ -238,8 +250,8 @@ func (c *Client) Use(hooks ...Hook) { // In order to add interceptors to a specific client, call: `client.Node.Intercept(...)`. func (c *Client) Intercept(interceptors ...Interceptor) { for _, n := range []interface{ Intercept(...Interceptor) }{ - c.Alert, c.Bouncer, c.ConfigItem, c.Decision, c.Event, c.Lock, c.Machine, - c.Meta, c.Metric, + c.Alert, c.AllowList, c.AllowListItem, c.Bouncer, c.ConfigItem, c.Decision, + c.Event, c.Lock, c.Machine, c.Meta, c.Metric, } { n.Intercept(interceptors...) } @@ -250,6 +262,10 @@ func (c *Client) Mutate(ctx context.Context, m Mutation) (Value, error) { switch m := m.(type) { case *AlertMutation: return c.Alert.mutate(ctx, m) + case *AllowListMutation: + return c.AllowList.mutate(ctx, m) + case *AllowListItemMutation: + return c.AllowListItem.mutate(ctx, m) case *BouncerMutation: return c.Bouncer.mutate(ctx, m) case *ConfigItemMutation: @@ -468,6 +484,304 @@ func (c *AlertClient) mutate(ctx context.Context, m *AlertMutation) (Value, erro } } +// AllowListClient is a client for the AllowList schema. +type AllowListClient struct { + config +} + +// NewAllowListClient returns a client for the AllowList from the given config. +func NewAllowListClient(c config) *AllowListClient { + return &AllowListClient{config: c} +} + +// Use adds a list of mutation hooks to the hooks stack. +// A call to `Use(f, g, h)` equals to `allowlist.Hooks(f(g(h())))`. +func (c *AllowListClient) Use(hooks ...Hook) { + c.hooks.AllowList = append(c.hooks.AllowList, hooks...) +} + +// Intercept adds a list of query interceptors to the interceptors stack. +// A call to `Intercept(f, g, h)` equals to `allowlist.Intercept(f(g(h())))`. +func (c *AllowListClient) Intercept(interceptors ...Interceptor) { + c.inters.AllowList = append(c.inters.AllowList, interceptors...) +} + +// Create returns a builder for creating a AllowList entity. +func (c *AllowListClient) Create() *AllowListCreate { + mutation := newAllowListMutation(c.config, OpCreate) + return &AllowListCreate{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// CreateBulk returns a builder for creating a bulk of AllowList entities. +func (c *AllowListClient) CreateBulk(builders ...*AllowListCreate) *AllowListCreateBulk { + return &AllowListCreateBulk{config: c.config, builders: builders} +} + +// MapCreateBulk creates a bulk creation builder from the given slice. For each item in the slice, the function creates +// a builder and applies setFunc on it. +func (c *AllowListClient) MapCreateBulk(slice any, setFunc func(*AllowListCreate, int)) *AllowListCreateBulk { + rv := reflect.ValueOf(slice) + if rv.Kind() != reflect.Slice { + return &AllowListCreateBulk{err: fmt.Errorf("calling to AllowListClient.MapCreateBulk with wrong type %T, need slice", slice)} + } + builders := make([]*AllowListCreate, rv.Len()) + for i := 0; i < rv.Len(); i++ { + builders[i] = c.Create() + setFunc(builders[i], i) + } + return &AllowListCreateBulk{config: c.config, builders: builders} +} + +// Update returns an update builder for AllowList. +func (c *AllowListClient) Update() *AllowListUpdate { + mutation := newAllowListMutation(c.config, OpUpdate) + return &AllowListUpdate{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// UpdateOne returns an update builder for the given entity. +func (c *AllowListClient) UpdateOne(al *AllowList) *AllowListUpdateOne { + mutation := newAllowListMutation(c.config, OpUpdateOne, withAllowList(al)) + return &AllowListUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// UpdateOneID returns an update builder for the given id. +func (c *AllowListClient) UpdateOneID(id int) *AllowListUpdateOne { + mutation := newAllowListMutation(c.config, OpUpdateOne, withAllowListID(id)) + return &AllowListUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// Delete returns a delete builder for AllowList. +func (c *AllowListClient) Delete() *AllowListDelete { + mutation := newAllowListMutation(c.config, OpDelete) + return &AllowListDelete{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// DeleteOne returns a builder for deleting the given entity. +func (c *AllowListClient) DeleteOne(al *AllowList) *AllowListDeleteOne { + return c.DeleteOneID(al.ID) +} + +// DeleteOneID returns a builder for deleting the given entity by its id. +func (c *AllowListClient) DeleteOneID(id int) *AllowListDeleteOne { + builder := c.Delete().Where(allowlist.ID(id)) + builder.mutation.id = &id + builder.mutation.op = OpDeleteOne + return &AllowListDeleteOne{builder} +} + +// Query returns a query builder for AllowList. +func (c *AllowListClient) Query() *AllowListQuery { + return &AllowListQuery{ + config: c.config, + ctx: &QueryContext{Type: TypeAllowList}, + inters: c.Interceptors(), + } +} + +// Get returns a AllowList entity by its id. +func (c *AllowListClient) Get(ctx context.Context, id int) (*AllowList, error) { + return c.Query().Where(allowlist.ID(id)).Only(ctx) +} + +// GetX is like Get, but panics if an error occurs. +func (c *AllowListClient) GetX(ctx context.Context, id int) *AllowList { + obj, err := c.Get(ctx, id) + if err != nil { + panic(err) + } + return obj +} + +// QueryAllowlistItems queries the allowlist_items edge of a AllowList. +func (c *AllowListClient) QueryAllowlistItems(al *AllowList) *AllowListItemQuery { + query := (&AllowListItemClient{config: c.config}).Query() + query.path = func(context.Context) (fromV *sql.Selector, _ error) { + id := al.ID + step := sqlgraph.NewStep( + sqlgraph.From(allowlist.Table, allowlist.FieldID, id), + sqlgraph.To(allowlistitem.Table, allowlistitem.FieldID), + sqlgraph.Edge(sqlgraph.M2M, false, allowlist.AllowlistItemsTable, allowlist.AllowlistItemsPrimaryKey...), + ) + fromV = sqlgraph.Neighbors(al.driver.Dialect(), step) + return fromV, nil + } + return query +} + +// Hooks returns the client hooks. +func (c *AllowListClient) Hooks() []Hook { + return c.hooks.AllowList +} + +// Interceptors returns the client interceptors. +func (c *AllowListClient) Interceptors() []Interceptor { + return c.inters.AllowList +} + +func (c *AllowListClient) mutate(ctx context.Context, m *AllowListMutation) (Value, error) { + switch m.Op() { + case OpCreate: + return (&AllowListCreate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpUpdate: + return (&AllowListUpdate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpUpdateOne: + return (&AllowListUpdateOne{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpDelete, OpDeleteOne: + return (&AllowListDelete{config: c.config, hooks: c.Hooks(), mutation: m}).Exec(ctx) + default: + return nil, fmt.Errorf("ent: unknown AllowList mutation op: %q", m.Op()) + } +} + +// AllowListItemClient is a client for the AllowListItem schema. +type AllowListItemClient struct { + config +} + +// NewAllowListItemClient returns a client for the AllowListItem from the given config. +func NewAllowListItemClient(c config) *AllowListItemClient { + return &AllowListItemClient{config: c} +} + +// Use adds a list of mutation hooks to the hooks stack. +// A call to `Use(f, g, h)` equals to `allowlistitem.Hooks(f(g(h())))`. +func (c *AllowListItemClient) Use(hooks ...Hook) { + c.hooks.AllowListItem = append(c.hooks.AllowListItem, hooks...) +} + +// Intercept adds a list of query interceptors to the interceptors stack. +// A call to `Intercept(f, g, h)` equals to `allowlistitem.Intercept(f(g(h())))`. +func (c *AllowListItemClient) Intercept(interceptors ...Interceptor) { + c.inters.AllowListItem = append(c.inters.AllowListItem, interceptors...) +} + +// Create returns a builder for creating a AllowListItem entity. +func (c *AllowListItemClient) Create() *AllowListItemCreate { + mutation := newAllowListItemMutation(c.config, OpCreate) + return &AllowListItemCreate{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// CreateBulk returns a builder for creating a bulk of AllowListItem entities. +func (c *AllowListItemClient) CreateBulk(builders ...*AllowListItemCreate) *AllowListItemCreateBulk { + return &AllowListItemCreateBulk{config: c.config, builders: builders} +} + +// MapCreateBulk creates a bulk creation builder from the given slice. For each item in the slice, the function creates +// a builder and applies setFunc on it. +func (c *AllowListItemClient) MapCreateBulk(slice any, setFunc func(*AllowListItemCreate, int)) *AllowListItemCreateBulk { + rv := reflect.ValueOf(slice) + if rv.Kind() != reflect.Slice { + return &AllowListItemCreateBulk{err: fmt.Errorf("calling to AllowListItemClient.MapCreateBulk with wrong type %T, need slice", slice)} + } + builders := make([]*AllowListItemCreate, rv.Len()) + for i := 0; i < rv.Len(); i++ { + builders[i] = c.Create() + setFunc(builders[i], i) + } + return &AllowListItemCreateBulk{config: c.config, builders: builders} +} + +// Update returns an update builder for AllowListItem. +func (c *AllowListItemClient) Update() *AllowListItemUpdate { + mutation := newAllowListItemMutation(c.config, OpUpdate) + return &AllowListItemUpdate{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// UpdateOne returns an update builder for the given entity. +func (c *AllowListItemClient) UpdateOne(ali *AllowListItem) *AllowListItemUpdateOne { + mutation := newAllowListItemMutation(c.config, OpUpdateOne, withAllowListItem(ali)) + return &AllowListItemUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// UpdateOneID returns an update builder for the given id. +func (c *AllowListItemClient) UpdateOneID(id int) *AllowListItemUpdateOne { + mutation := newAllowListItemMutation(c.config, OpUpdateOne, withAllowListItemID(id)) + return &AllowListItemUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// Delete returns a delete builder for AllowListItem. +func (c *AllowListItemClient) Delete() *AllowListItemDelete { + mutation := newAllowListItemMutation(c.config, OpDelete) + return &AllowListItemDelete{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// DeleteOne returns a builder for deleting the given entity. +func (c *AllowListItemClient) DeleteOne(ali *AllowListItem) *AllowListItemDeleteOne { + return c.DeleteOneID(ali.ID) +} + +// DeleteOneID returns a builder for deleting the given entity by its id. +func (c *AllowListItemClient) DeleteOneID(id int) *AllowListItemDeleteOne { + builder := c.Delete().Where(allowlistitem.ID(id)) + builder.mutation.id = &id + builder.mutation.op = OpDeleteOne + return &AllowListItemDeleteOne{builder} +} + +// Query returns a query builder for AllowListItem. +func (c *AllowListItemClient) Query() *AllowListItemQuery { + return &AllowListItemQuery{ + config: c.config, + ctx: &QueryContext{Type: TypeAllowListItem}, + inters: c.Interceptors(), + } +} + +// Get returns a AllowListItem entity by its id. +func (c *AllowListItemClient) Get(ctx context.Context, id int) (*AllowListItem, error) { + return c.Query().Where(allowlistitem.ID(id)).Only(ctx) +} + +// GetX is like Get, but panics if an error occurs. +func (c *AllowListItemClient) GetX(ctx context.Context, id int) *AllowListItem { + obj, err := c.Get(ctx, id) + if err != nil { + panic(err) + } + return obj +} + +// QueryAllowlist queries the allowlist edge of a AllowListItem. +func (c *AllowListItemClient) QueryAllowlist(ali *AllowListItem) *AllowListQuery { + query := (&AllowListClient{config: c.config}).Query() + query.path = func(context.Context) (fromV *sql.Selector, _ error) { + id := ali.ID + step := sqlgraph.NewStep( + sqlgraph.From(allowlistitem.Table, allowlistitem.FieldID, id), + sqlgraph.To(allowlist.Table, allowlist.FieldID), + sqlgraph.Edge(sqlgraph.M2M, true, allowlistitem.AllowlistTable, allowlistitem.AllowlistPrimaryKey...), + ) + fromV = sqlgraph.Neighbors(ali.driver.Dialect(), step) + return fromV, nil + } + return query +} + +// Hooks returns the client hooks. +func (c *AllowListItemClient) Hooks() []Hook { + return c.hooks.AllowListItem +} + +// Interceptors returns the client interceptors. +func (c *AllowListItemClient) Interceptors() []Interceptor { + return c.inters.AllowListItem +} + +func (c *AllowListItemClient) mutate(ctx context.Context, m *AllowListItemMutation) (Value, error) { + switch m.Op() { + case OpCreate: + return (&AllowListItemCreate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpUpdate: + return (&AllowListItemUpdate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpUpdateOne: + return (&AllowListItemUpdateOne{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpDelete, OpDeleteOne: + return (&AllowListItemDelete{config: c.config, hooks: c.Hooks(), mutation: m}).Exec(ctx) + default: + return nil, fmt.Errorf("ent: unknown AllowListItem mutation op: %q", m.Op()) + } +} + // BouncerClient is a client for the Bouncer schema. type BouncerClient struct { config @@ -1599,11 +1913,11 @@ func (c *MetricClient) mutate(ctx context.Context, m *MetricMutation) (Value, er // hooks and interceptors per client, for fast access. type ( hooks struct { - Alert, Bouncer, ConfigItem, Decision, Event, Lock, Machine, Meta, - Metric []ent.Hook + Alert, AllowList, AllowListItem, Bouncer, ConfigItem, Decision, Event, Lock, + Machine, Meta, Metric []ent.Hook } inters struct { - Alert, Bouncer, ConfigItem, Decision, Event, Lock, Machine, Meta, - Metric []ent.Interceptor + Alert, AllowList, AllowListItem, Bouncer, ConfigItem, Decision, Event, Lock, + Machine, Meta, Metric []ent.Interceptor } ) diff --git a/pkg/database/ent/ent.go b/pkg/database/ent/ent.go index 612b231518d..b28333afaf1 100644 --- a/pkg/database/ent/ent.go +++ b/pkg/database/ent/ent.go @@ -13,6 +13,8 @@ import ( "entgo.io/ent/dialect/sql" "entgo.io/ent/dialect/sql/sqlgraph" "github.com/crowdsecurity/crowdsec/pkg/database/ent/alert" + "github.com/crowdsecurity/crowdsec/pkg/database/ent/allowlist" + "github.com/crowdsecurity/crowdsec/pkg/database/ent/allowlistitem" "github.com/crowdsecurity/crowdsec/pkg/database/ent/bouncer" "github.com/crowdsecurity/crowdsec/pkg/database/ent/configitem" "github.com/crowdsecurity/crowdsec/pkg/database/ent/decision" @@ -81,15 +83,17 @@ var ( func checkColumn(table, column string) error { initCheck.Do(func() { columnCheck = sql.NewColumnCheck(map[string]func(string) bool{ - alert.Table: alert.ValidColumn, - bouncer.Table: bouncer.ValidColumn, - configitem.Table: configitem.ValidColumn, - decision.Table: decision.ValidColumn, - event.Table: event.ValidColumn, - lock.Table: lock.ValidColumn, - machine.Table: machine.ValidColumn, - meta.Table: meta.ValidColumn, - metric.Table: metric.ValidColumn, + alert.Table: alert.ValidColumn, + allowlist.Table: allowlist.ValidColumn, + allowlistitem.Table: allowlistitem.ValidColumn, + bouncer.Table: bouncer.ValidColumn, + configitem.Table: configitem.ValidColumn, + decision.Table: decision.ValidColumn, + event.Table: event.ValidColumn, + lock.Table: lock.ValidColumn, + machine.Table: machine.ValidColumn, + meta.Table: meta.ValidColumn, + metric.Table: metric.ValidColumn, }) }) return columnCheck(table, column) diff --git a/pkg/database/ent/hook/hook.go b/pkg/database/ent/hook/hook.go index 62cc07820d0..b5ddfc81290 100644 --- a/pkg/database/ent/hook/hook.go +++ b/pkg/database/ent/hook/hook.go @@ -21,6 +21,30 @@ func (f AlertFunc) Mutate(ctx context.Context, m ent.Mutation) (ent.Value, error return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.AlertMutation", m) } +// The AllowListFunc type is an adapter to allow the use of ordinary +// function as AllowList mutator. +type AllowListFunc func(context.Context, *ent.AllowListMutation) (ent.Value, error) + +// Mutate calls f(ctx, m). +func (f AllowListFunc) Mutate(ctx context.Context, m ent.Mutation) (ent.Value, error) { + if mv, ok := m.(*ent.AllowListMutation); ok { + return f(ctx, mv) + } + return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.AllowListMutation", m) +} + +// The AllowListItemFunc type is an adapter to allow the use of ordinary +// function as AllowListItem mutator. +type AllowListItemFunc func(context.Context, *ent.AllowListItemMutation) (ent.Value, error) + +// Mutate calls f(ctx, m). +func (f AllowListItemFunc) Mutate(ctx context.Context, m ent.Mutation) (ent.Value, error) { + if mv, ok := m.(*ent.AllowListItemMutation); ok { + return f(ctx, mv) + } + return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.AllowListItemMutation", m) +} + // The BouncerFunc type is an adapter to allow the use of ordinary // function as Bouncer mutator. type BouncerFunc func(context.Context, *ent.BouncerMutation) (ent.Value, error) diff --git a/pkg/database/ent/migrate/schema.go b/pkg/database/ent/migrate/schema.go index dae248c7f38..932c27dd7a6 100644 --- a/pkg/database/ent/migrate/schema.go +++ b/pkg/database/ent/migrate/schema.go @@ -58,6 +58,66 @@ var ( }, }, } + // AllowListsColumns holds the columns for the "allow_lists" table. + AllowListsColumns = []*schema.Column{ + {Name: "id", Type: field.TypeInt, Increment: true}, + {Name: "created_at", Type: field.TypeTime}, + {Name: "updated_at", Type: field.TypeTime}, + {Name: "name", Type: field.TypeString}, + {Name: "from_console", Type: field.TypeBool}, + {Name: "description", Type: field.TypeString, Nullable: true}, + {Name: "allowlist_id", Type: field.TypeString, Nullable: true}, + } + // AllowListsTable holds the schema information for the "allow_lists" table. + AllowListsTable = &schema.Table{ + Name: "allow_lists", + Columns: AllowListsColumns, + PrimaryKey: []*schema.Column{AllowListsColumns[0]}, + Indexes: []*schema.Index{ + { + Name: "allowlist_id", + Unique: true, + Columns: []*schema.Column{AllowListsColumns[0]}, + }, + { + Name: "allowlist_name", + Unique: true, + Columns: []*schema.Column{AllowListsColumns[3]}, + }, + }, + } + // AllowListItemsColumns holds the columns for the "allow_list_items" table. + AllowListItemsColumns = []*schema.Column{ + {Name: "id", Type: field.TypeInt, Increment: true}, + {Name: "created_at", Type: field.TypeTime}, + {Name: "updated_at", Type: field.TypeTime}, + {Name: "expires_at", Type: field.TypeTime, Nullable: true}, + {Name: "comment", Type: field.TypeString, Nullable: true}, + {Name: "value", Type: field.TypeString}, + {Name: "start_ip", Type: field.TypeInt64, Nullable: true}, + {Name: "end_ip", Type: field.TypeInt64, Nullable: true}, + {Name: "start_suffix", Type: field.TypeInt64, Nullable: true}, + {Name: "end_suffix", Type: field.TypeInt64, Nullable: true}, + {Name: "ip_size", Type: field.TypeInt64, Nullable: true}, + } + // AllowListItemsTable holds the schema information for the "allow_list_items" table. + AllowListItemsTable = &schema.Table{ + Name: "allow_list_items", + Columns: AllowListItemsColumns, + PrimaryKey: []*schema.Column{AllowListItemsColumns[0]}, + Indexes: []*schema.Index{ + { + Name: "allowlistitem_id", + Unique: false, + Columns: []*schema.Column{AllowListItemsColumns[0]}, + }, + { + Name: "allowlistitem_start_ip_end_ip", + Unique: false, + Columns: []*schema.Column{AllowListItemsColumns[6], AllowListItemsColumns[7]}, + }, + }, + } // BouncersColumns holds the columns for the "bouncers" table. BouncersColumns = []*schema.Column{ {Name: "id", Type: field.TypeInt, Increment: true}, @@ -265,9 +325,36 @@ var ( Columns: MetricsColumns, PrimaryKey: []*schema.Column{MetricsColumns[0]}, } + // AllowListAllowlistItemsColumns holds the columns for the "allow_list_allowlist_items" table. + AllowListAllowlistItemsColumns = []*schema.Column{ + {Name: "allow_list_id", Type: field.TypeInt}, + {Name: "allow_list_item_id", Type: field.TypeInt}, + } + // AllowListAllowlistItemsTable holds the schema information for the "allow_list_allowlist_items" table. + AllowListAllowlistItemsTable = &schema.Table{ + Name: "allow_list_allowlist_items", + Columns: AllowListAllowlistItemsColumns, + PrimaryKey: []*schema.Column{AllowListAllowlistItemsColumns[0], AllowListAllowlistItemsColumns[1]}, + ForeignKeys: []*schema.ForeignKey{ + { + Symbol: "allow_list_allowlist_items_allow_list_id", + Columns: []*schema.Column{AllowListAllowlistItemsColumns[0]}, + RefColumns: []*schema.Column{AllowListsColumns[0]}, + OnDelete: schema.Cascade, + }, + { + Symbol: "allow_list_allowlist_items_allow_list_item_id", + Columns: []*schema.Column{AllowListAllowlistItemsColumns[1]}, + RefColumns: []*schema.Column{AllowListItemsColumns[0]}, + OnDelete: schema.Cascade, + }, + }, + } // Tables holds all the tables in the schema. Tables = []*schema.Table{ AlertsTable, + AllowListsTable, + AllowListItemsTable, BouncersTable, ConfigItemsTable, DecisionsTable, @@ -276,6 +363,7 @@ var ( MachinesTable, MetaTable, MetricsTable, + AllowListAllowlistItemsTable, } ) @@ -284,4 +372,6 @@ func init() { DecisionsTable.ForeignKeys[0].RefTable = AlertsTable EventsTable.ForeignKeys[0].RefTable = AlertsTable MetaTable.ForeignKeys[0].RefTable = AlertsTable + AllowListAllowlistItemsTable.ForeignKeys[0].RefTable = AllowListsTable + AllowListAllowlistItemsTable.ForeignKeys[1].RefTable = AllowListItemsTable } diff --git a/pkg/database/ent/mutation.go b/pkg/database/ent/mutation.go index fa1ccb3da58..f45bd47a5fb 100644 --- a/pkg/database/ent/mutation.go +++ b/pkg/database/ent/mutation.go @@ -12,6 +12,8 @@ import ( "entgo.io/ent" "entgo.io/ent/dialect/sql" "github.com/crowdsecurity/crowdsec/pkg/database/ent/alert" + "github.com/crowdsecurity/crowdsec/pkg/database/ent/allowlist" + "github.com/crowdsecurity/crowdsec/pkg/database/ent/allowlistitem" "github.com/crowdsecurity/crowdsec/pkg/database/ent/bouncer" "github.com/crowdsecurity/crowdsec/pkg/database/ent/configitem" "github.com/crowdsecurity/crowdsec/pkg/database/ent/decision" @@ -33,15 +35,17 @@ const ( OpUpdateOne = ent.OpUpdateOne // Node types. - TypeAlert = "Alert" - TypeBouncer = "Bouncer" - TypeConfigItem = "ConfigItem" - TypeDecision = "Decision" - TypeEvent = "Event" - TypeLock = "Lock" - TypeMachine = "Machine" - TypeMeta = "Meta" - TypeMetric = "Metric" + TypeAlert = "Alert" + TypeAllowList = "AllowList" + TypeAllowListItem = "AllowListItem" + TypeBouncer = "Bouncer" + TypeConfigItem = "ConfigItem" + TypeDecision = "Decision" + TypeEvent = "Event" + TypeLock = "Lock" + TypeMachine = "Machine" + TypeMeta = "Meta" + TypeMetric = "Metric" ) // AlertMutation represents an operation that mutates the Alert nodes in the graph. @@ -2452,6 +2456,1950 @@ func (m *AlertMutation) ResetEdge(name string) error { return fmt.Errorf("unknown Alert edge %s", name) } +// AllowListMutation represents an operation that mutates the AllowList nodes in the graph. +type AllowListMutation struct { + config + op Op + typ string + id *int + created_at *time.Time + updated_at *time.Time + name *string + from_console *bool + description *string + allowlist_id *string + clearedFields map[string]struct{} + allowlist_items map[int]struct{} + removedallowlist_items map[int]struct{} + clearedallowlist_items bool + done bool + oldValue func(context.Context) (*AllowList, error) + predicates []predicate.AllowList +} + +var _ ent.Mutation = (*AllowListMutation)(nil) + +// allowlistOption allows management of the mutation configuration using functional options. +type allowlistOption func(*AllowListMutation) + +// newAllowListMutation creates new mutation for the AllowList entity. +func newAllowListMutation(c config, op Op, opts ...allowlistOption) *AllowListMutation { + m := &AllowListMutation{ + config: c, + op: op, + typ: TypeAllowList, + clearedFields: make(map[string]struct{}), + } + for _, opt := range opts { + opt(m) + } + return m +} + +// withAllowListID sets the ID field of the mutation. +func withAllowListID(id int) allowlistOption { + return func(m *AllowListMutation) { + var ( + err error + once sync.Once + value *AllowList + ) + m.oldValue = func(ctx context.Context) (*AllowList, error) { + once.Do(func() { + if m.done { + err = errors.New("querying old values post mutation is not allowed") + } else { + value, err = m.Client().AllowList.Get(ctx, id) + } + }) + return value, err + } + m.id = &id + } +} + +// withAllowList sets the old AllowList of the mutation. +func withAllowList(node *AllowList) allowlistOption { + return func(m *AllowListMutation) { + m.oldValue = func(context.Context) (*AllowList, error) { + return node, nil + } + m.id = &node.ID + } +} + +// Client returns a new `ent.Client` from the mutation. If the mutation was +// executed in a transaction (ent.Tx), a transactional client is returned. +func (m AllowListMutation) Client() *Client { + client := &Client{config: m.config} + client.init() + return client +} + +// Tx returns an `ent.Tx` for mutations that were executed in transactions; +// it returns an error otherwise. +func (m AllowListMutation) Tx() (*Tx, error) { + if _, ok := m.driver.(*txDriver); !ok { + return nil, errors.New("ent: mutation is not running in a transaction") + } + tx := &Tx{config: m.config} + tx.init() + return tx, nil +} + +// ID returns the ID value in the mutation. Note that the ID is only available +// if it was provided to the builder or after it was returned from the database. +func (m *AllowListMutation) ID() (id int, exists bool) { + if m.id == nil { + return + } + return *m.id, true +} + +// IDs queries the database and returns the entity ids that match the mutation's predicate. +// That means, if the mutation is applied within a transaction with an isolation level such +// as sql.LevelSerializable, the returned ids match the ids of the rows that will be updated +// or updated by the mutation. +func (m *AllowListMutation) IDs(ctx context.Context) ([]int, error) { + switch { + case m.op.Is(OpUpdateOne | OpDeleteOne): + id, exists := m.ID() + if exists { + return []int{id}, nil + } + fallthrough + case m.op.Is(OpUpdate | OpDelete): + return m.Client().AllowList.Query().Where(m.predicates...).IDs(ctx) + default: + return nil, fmt.Errorf("IDs is not allowed on %s operations", m.op) + } +} + +// SetCreatedAt sets the "created_at" field. +func (m *AllowListMutation) SetCreatedAt(t time.Time) { + m.created_at = &t +} + +// CreatedAt returns the value of the "created_at" field in the mutation. +func (m *AllowListMutation) CreatedAt() (r time.Time, exists bool) { + v := m.created_at + if v == nil { + return + } + return *v, true +} + +// OldCreatedAt returns the old "created_at" field's value of the AllowList entity. +// If the AllowList object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *AllowListMutation) OldCreatedAt(ctx context.Context) (v time.Time, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldCreatedAt is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldCreatedAt requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldCreatedAt: %w", err) + } + return oldValue.CreatedAt, nil +} + +// ResetCreatedAt resets all changes to the "created_at" field. +func (m *AllowListMutation) ResetCreatedAt() { + m.created_at = nil +} + +// SetUpdatedAt sets the "updated_at" field. +func (m *AllowListMutation) SetUpdatedAt(t time.Time) { + m.updated_at = &t +} + +// UpdatedAt returns the value of the "updated_at" field in the mutation. +func (m *AllowListMutation) UpdatedAt() (r time.Time, exists bool) { + v := m.updated_at + if v == nil { + return + } + return *v, true +} + +// OldUpdatedAt returns the old "updated_at" field's value of the AllowList entity. +// If the AllowList object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *AllowListMutation) OldUpdatedAt(ctx context.Context) (v time.Time, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldUpdatedAt is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldUpdatedAt requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldUpdatedAt: %w", err) + } + return oldValue.UpdatedAt, nil +} + +// ResetUpdatedAt resets all changes to the "updated_at" field. +func (m *AllowListMutation) ResetUpdatedAt() { + m.updated_at = nil +} + +// SetName sets the "name" field. +func (m *AllowListMutation) SetName(s string) { + m.name = &s +} + +// Name returns the value of the "name" field in the mutation. +func (m *AllowListMutation) Name() (r string, exists bool) { + v := m.name + if v == nil { + return + } + return *v, true +} + +// OldName returns the old "name" field's value of the AllowList entity. +// If the AllowList object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *AllowListMutation) OldName(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldName is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldName requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldName: %w", err) + } + return oldValue.Name, nil +} + +// ResetName resets all changes to the "name" field. +func (m *AllowListMutation) ResetName() { + m.name = nil +} + +// SetFromConsole sets the "from_console" field. +func (m *AllowListMutation) SetFromConsole(b bool) { + m.from_console = &b +} + +// FromConsole returns the value of the "from_console" field in the mutation. +func (m *AllowListMutation) FromConsole() (r bool, exists bool) { + v := m.from_console + if v == nil { + return + } + return *v, true +} + +// OldFromConsole returns the old "from_console" field's value of the AllowList entity. +// If the AllowList object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *AllowListMutation) OldFromConsole(ctx context.Context) (v bool, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldFromConsole is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldFromConsole requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldFromConsole: %w", err) + } + return oldValue.FromConsole, nil +} + +// ResetFromConsole resets all changes to the "from_console" field. +func (m *AllowListMutation) ResetFromConsole() { + m.from_console = nil +} + +// SetDescription sets the "description" field. +func (m *AllowListMutation) SetDescription(s string) { + m.description = &s +} + +// Description returns the value of the "description" field in the mutation. +func (m *AllowListMutation) Description() (r string, exists bool) { + v := m.description + if v == nil { + return + } + return *v, true +} + +// OldDescription returns the old "description" field's value of the AllowList entity. +// If the AllowList object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *AllowListMutation) OldDescription(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldDescription is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldDescription requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldDescription: %w", err) + } + return oldValue.Description, nil +} + +// ClearDescription clears the value of the "description" field. +func (m *AllowListMutation) ClearDescription() { + m.description = nil + m.clearedFields[allowlist.FieldDescription] = struct{}{} +} + +// DescriptionCleared returns if the "description" field was cleared in this mutation. +func (m *AllowListMutation) DescriptionCleared() bool { + _, ok := m.clearedFields[allowlist.FieldDescription] + return ok +} + +// ResetDescription resets all changes to the "description" field. +func (m *AllowListMutation) ResetDescription() { + m.description = nil + delete(m.clearedFields, allowlist.FieldDescription) +} + +// SetAllowlistID sets the "allowlist_id" field. +func (m *AllowListMutation) SetAllowlistID(s string) { + m.allowlist_id = &s +} + +// AllowlistID returns the value of the "allowlist_id" field in the mutation. +func (m *AllowListMutation) AllowlistID() (r string, exists bool) { + v := m.allowlist_id + if v == nil { + return + } + return *v, true +} + +// OldAllowlistID returns the old "allowlist_id" field's value of the AllowList entity. +// If the AllowList object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *AllowListMutation) OldAllowlistID(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldAllowlistID is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldAllowlistID requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldAllowlistID: %w", err) + } + return oldValue.AllowlistID, nil +} + +// ClearAllowlistID clears the value of the "allowlist_id" field. +func (m *AllowListMutation) ClearAllowlistID() { + m.allowlist_id = nil + m.clearedFields[allowlist.FieldAllowlistID] = struct{}{} +} + +// AllowlistIDCleared returns if the "allowlist_id" field was cleared in this mutation. +func (m *AllowListMutation) AllowlistIDCleared() bool { + _, ok := m.clearedFields[allowlist.FieldAllowlistID] + return ok +} + +// ResetAllowlistID resets all changes to the "allowlist_id" field. +func (m *AllowListMutation) ResetAllowlistID() { + m.allowlist_id = nil + delete(m.clearedFields, allowlist.FieldAllowlistID) +} + +// AddAllowlistItemIDs adds the "allowlist_items" edge to the AllowListItem entity by ids. +func (m *AllowListMutation) AddAllowlistItemIDs(ids ...int) { + if m.allowlist_items == nil { + m.allowlist_items = make(map[int]struct{}) + } + for i := range ids { + m.allowlist_items[ids[i]] = struct{}{} + } +} + +// ClearAllowlistItems clears the "allowlist_items" edge to the AllowListItem entity. +func (m *AllowListMutation) ClearAllowlistItems() { + m.clearedallowlist_items = true +} + +// AllowlistItemsCleared reports if the "allowlist_items" edge to the AllowListItem entity was cleared. +func (m *AllowListMutation) AllowlistItemsCleared() bool { + return m.clearedallowlist_items +} + +// RemoveAllowlistItemIDs removes the "allowlist_items" edge to the AllowListItem entity by IDs. +func (m *AllowListMutation) RemoveAllowlistItemIDs(ids ...int) { + if m.removedallowlist_items == nil { + m.removedallowlist_items = make(map[int]struct{}) + } + for i := range ids { + delete(m.allowlist_items, ids[i]) + m.removedallowlist_items[ids[i]] = struct{}{} + } +} + +// RemovedAllowlistItems returns the removed IDs of the "allowlist_items" edge to the AllowListItem entity. +func (m *AllowListMutation) RemovedAllowlistItemsIDs() (ids []int) { + for id := range m.removedallowlist_items { + ids = append(ids, id) + } + return +} + +// AllowlistItemsIDs returns the "allowlist_items" edge IDs in the mutation. +func (m *AllowListMutation) AllowlistItemsIDs() (ids []int) { + for id := range m.allowlist_items { + ids = append(ids, id) + } + return +} + +// ResetAllowlistItems resets all changes to the "allowlist_items" edge. +func (m *AllowListMutation) ResetAllowlistItems() { + m.allowlist_items = nil + m.clearedallowlist_items = false + m.removedallowlist_items = nil +} + +// Where appends a list predicates to the AllowListMutation builder. +func (m *AllowListMutation) Where(ps ...predicate.AllowList) { + m.predicates = append(m.predicates, ps...) +} + +// WhereP appends storage-level predicates to the AllowListMutation builder. Using this method, +// users can use type-assertion to append predicates that do not depend on any generated package. +func (m *AllowListMutation) WhereP(ps ...func(*sql.Selector)) { + p := make([]predicate.AllowList, len(ps)) + for i := range ps { + p[i] = ps[i] + } + m.Where(p...) +} + +// Op returns the operation name. +func (m *AllowListMutation) Op() Op { + return m.op +} + +// SetOp allows setting the mutation operation. +func (m *AllowListMutation) SetOp(op Op) { + m.op = op +} + +// Type returns the node type of this mutation (AllowList). +func (m *AllowListMutation) Type() string { + return m.typ +} + +// Fields returns all fields that were changed during this mutation. Note that in +// order to get all numeric fields that were incremented/decremented, call +// AddedFields(). +func (m *AllowListMutation) Fields() []string { + fields := make([]string, 0, 6) + if m.created_at != nil { + fields = append(fields, allowlist.FieldCreatedAt) + } + if m.updated_at != nil { + fields = append(fields, allowlist.FieldUpdatedAt) + } + if m.name != nil { + fields = append(fields, allowlist.FieldName) + } + if m.from_console != nil { + fields = append(fields, allowlist.FieldFromConsole) + } + if m.description != nil { + fields = append(fields, allowlist.FieldDescription) + } + if m.allowlist_id != nil { + fields = append(fields, allowlist.FieldAllowlistID) + } + return fields +} + +// Field returns the value of a field with the given name. The second boolean +// return value indicates that this field was not set, or was not defined in the +// schema. +func (m *AllowListMutation) Field(name string) (ent.Value, bool) { + switch name { + case allowlist.FieldCreatedAt: + return m.CreatedAt() + case allowlist.FieldUpdatedAt: + return m.UpdatedAt() + case allowlist.FieldName: + return m.Name() + case allowlist.FieldFromConsole: + return m.FromConsole() + case allowlist.FieldDescription: + return m.Description() + case allowlist.FieldAllowlistID: + return m.AllowlistID() + } + return nil, false +} + +// OldField returns the old value of the field from the database. An error is +// returned if the mutation operation is not UpdateOne, or the query to the +// database failed. +func (m *AllowListMutation) OldField(ctx context.Context, name string) (ent.Value, error) { + switch name { + case allowlist.FieldCreatedAt: + return m.OldCreatedAt(ctx) + case allowlist.FieldUpdatedAt: + return m.OldUpdatedAt(ctx) + case allowlist.FieldName: + return m.OldName(ctx) + case allowlist.FieldFromConsole: + return m.OldFromConsole(ctx) + case allowlist.FieldDescription: + return m.OldDescription(ctx) + case allowlist.FieldAllowlistID: + return m.OldAllowlistID(ctx) + } + return nil, fmt.Errorf("unknown AllowList field %s", name) +} + +// SetField sets the value of a field with the given name. It returns an error if +// the field is not defined in the schema, or if the type mismatched the field +// type. +func (m *AllowListMutation) SetField(name string, value ent.Value) error { + switch name { + case allowlist.FieldCreatedAt: + v, ok := value.(time.Time) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetCreatedAt(v) + return nil + case allowlist.FieldUpdatedAt: + v, ok := value.(time.Time) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetUpdatedAt(v) + return nil + case allowlist.FieldName: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetName(v) + return nil + case allowlist.FieldFromConsole: + v, ok := value.(bool) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetFromConsole(v) + return nil + case allowlist.FieldDescription: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetDescription(v) + return nil + case allowlist.FieldAllowlistID: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetAllowlistID(v) + return nil + } + return fmt.Errorf("unknown AllowList field %s", name) +} + +// AddedFields returns all numeric fields that were incremented/decremented during +// this mutation. +func (m *AllowListMutation) AddedFields() []string { + return nil +} + +// AddedField returns the numeric value that was incremented/decremented on a field +// with the given name. The second boolean return value indicates that this field +// was not set, or was not defined in the schema. +func (m *AllowListMutation) AddedField(name string) (ent.Value, bool) { + return nil, false +} + +// AddField adds the value to the field with the given name. It returns an error if +// the field is not defined in the schema, or if the type mismatched the field +// type. +func (m *AllowListMutation) AddField(name string, value ent.Value) error { + switch name { + } + return fmt.Errorf("unknown AllowList numeric field %s", name) +} + +// ClearedFields returns all nullable fields that were cleared during this +// mutation. +func (m *AllowListMutation) ClearedFields() []string { + var fields []string + if m.FieldCleared(allowlist.FieldDescription) { + fields = append(fields, allowlist.FieldDescription) + } + if m.FieldCleared(allowlist.FieldAllowlistID) { + fields = append(fields, allowlist.FieldAllowlistID) + } + return fields +} + +// FieldCleared returns a boolean indicating if a field with the given name was +// cleared in this mutation. +func (m *AllowListMutation) FieldCleared(name string) bool { + _, ok := m.clearedFields[name] + return ok +} + +// ClearField clears the value of the field with the given name. It returns an +// error if the field is not defined in the schema. +func (m *AllowListMutation) ClearField(name string) error { + switch name { + case allowlist.FieldDescription: + m.ClearDescription() + return nil + case allowlist.FieldAllowlistID: + m.ClearAllowlistID() + return nil + } + return fmt.Errorf("unknown AllowList nullable field %s", name) +} + +// ResetField resets all changes in the mutation for the field with the given name. +// It returns an error if the field is not defined in the schema. +func (m *AllowListMutation) ResetField(name string) error { + switch name { + case allowlist.FieldCreatedAt: + m.ResetCreatedAt() + return nil + case allowlist.FieldUpdatedAt: + m.ResetUpdatedAt() + return nil + case allowlist.FieldName: + m.ResetName() + return nil + case allowlist.FieldFromConsole: + m.ResetFromConsole() + return nil + case allowlist.FieldDescription: + m.ResetDescription() + return nil + case allowlist.FieldAllowlistID: + m.ResetAllowlistID() + return nil + } + return fmt.Errorf("unknown AllowList field %s", name) +} + +// AddedEdges returns all edge names that were set/added in this mutation. +func (m *AllowListMutation) AddedEdges() []string { + edges := make([]string, 0, 1) + if m.allowlist_items != nil { + edges = append(edges, allowlist.EdgeAllowlistItems) + } + return edges +} + +// AddedIDs returns all IDs (to other nodes) that were added for the given edge +// name in this mutation. +func (m *AllowListMutation) AddedIDs(name string) []ent.Value { + switch name { + case allowlist.EdgeAllowlistItems: + ids := make([]ent.Value, 0, len(m.allowlist_items)) + for id := range m.allowlist_items { + ids = append(ids, id) + } + return ids + } + return nil +} + +// RemovedEdges returns all edge names that were removed in this mutation. +func (m *AllowListMutation) RemovedEdges() []string { + edges := make([]string, 0, 1) + if m.removedallowlist_items != nil { + edges = append(edges, allowlist.EdgeAllowlistItems) + } + return edges +} + +// RemovedIDs returns all IDs (to other nodes) that were removed for the edge with +// the given name in this mutation. +func (m *AllowListMutation) RemovedIDs(name string) []ent.Value { + switch name { + case allowlist.EdgeAllowlistItems: + ids := make([]ent.Value, 0, len(m.removedallowlist_items)) + for id := range m.removedallowlist_items { + ids = append(ids, id) + } + return ids + } + return nil +} + +// ClearedEdges returns all edge names that were cleared in this mutation. +func (m *AllowListMutation) ClearedEdges() []string { + edges := make([]string, 0, 1) + if m.clearedallowlist_items { + edges = append(edges, allowlist.EdgeAllowlistItems) + } + return edges +} + +// EdgeCleared returns a boolean which indicates if the edge with the given name +// was cleared in this mutation. +func (m *AllowListMutation) EdgeCleared(name string) bool { + switch name { + case allowlist.EdgeAllowlistItems: + return m.clearedallowlist_items + } + return false +} + +// ClearEdge clears the value of the edge with the given name. It returns an error +// if that edge is not defined in the schema. +func (m *AllowListMutation) ClearEdge(name string) error { + switch name { + } + return fmt.Errorf("unknown AllowList unique edge %s", name) +} + +// ResetEdge resets all changes to the edge with the given name in this mutation. +// It returns an error if the edge is not defined in the schema. +func (m *AllowListMutation) ResetEdge(name string) error { + switch name { + case allowlist.EdgeAllowlistItems: + m.ResetAllowlistItems() + return nil + } + return fmt.Errorf("unknown AllowList edge %s", name) +} + +// AllowListItemMutation represents an operation that mutates the AllowListItem nodes in the graph. +type AllowListItemMutation struct { + config + op Op + typ string + id *int + created_at *time.Time + updated_at *time.Time + expires_at *time.Time + comment *string + value *string + start_ip *int64 + addstart_ip *int64 + end_ip *int64 + addend_ip *int64 + start_suffix *int64 + addstart_suffix *int64 + end_suffix *int64 + addend_suffix *int64 + ip_size *int64 + addip_size *int64 + clearedFields map[string]struct{} + allowlist map[int]struct{} + removedallowlist map[int]struct{} + clearedallowlist bool + done bool + oldValue func(context.Context) (*AllowListItem, error) + predicates []predicate.AllowListItem +} + +var _ ent.Mutation = (*AllowListItemMutation)(nil) + +// allowlistitemOption allows management of the mutation configuration using functional options. +type allowlistitemOption func(*AllowListItemMutation) + +// newAllowListItemMutation creates new mutation for the AllowListItem entity. +func newAllowListItemMutation(c config, op Op, opts ...allowlistitemOption) *AllowListItemMutation { + m := &AllowListItemMutation{ + config: c, + op: op, + typ: TypeAllowListItem, + clearedFields: make(map[string]struct{}), + } + for _, opt := range opts { + opt(m) + } + return m +} + +// withAllowListItemID sets the ID field of the mutation. +func withAllowListItemID(id int) allowlistitemOption { + return func(m *AllowListItemMutation) { + var ( + err error + once sync.Once + value *AllowListItem + ) + m.oldValue = func(ctx context.Context) (*AllowListItem, error) { + once.Do(func() { + if m.done { + err = errors.New("querying old values post mutation is not allowed") + } else { + value, err = m.Client().AllowListItem.Get(ctx, id) + } + }) + return value, err + } + m.id = &id + } +} + +// withAllowListItem sets the old AllowListItem of the mutation. +func withAllowListItem(node *AllowListItem) allowlistitemOption { + return func(m *AllowListItemMutation) { + m.oldValue = func(context.Context) (*AllowListItem, error) { + return node, nil + } + m.id = &node.ID + } +} + +// Client returns a new `ent.Client` from the mutation. If the mutation was +// executed in a transaction (ent.Tx), a transactional client is returned. +func (m AllowListItemMutation) Client() *Client { + client := &Client{config: m.config} + client.init() + return client +} + +// Tx returns an `ent.Tx` for mutations that were executed in transactions; +// it returns an error otherwise. +func (m AllowListItemMutation) Tx() (*Tx, error) { + if _, ok := m.driver.(*txDriver); !ok { + return nil, errors.New("ent: mutation is not running in a transaction") + } + tx := &Tx{config: m.config} + tx.init() + return tx, nil +} + +// ID returns the ID value in the mutation. Note that the ID is only available +// if it was provided to the builder or after it was returned from the database. +func (m *AllowListItemMutation) ID() (id int, exists bool) { + if m.id == nil { + return + } + return *m.id, true +} + +// IDs queries the database and returns the entity ids that match the mutation's predicate. +// That means, if the mutation is applied within a transaction with an isolation level such +// as sql.LevelSerializable, the returned ids match the ids of the rows that will be updated +// or updated by the mutation. +func (m *AllowListItemMutation) IDs(ctx context.Context) ([]int, error) { + switch { + case m.op.Is(OpUpdateOne | OpDeleteOne): + id, exists := m.ID() + if exists { + return []int{id}, nil + } + fallthrough + case m.op.Is(OpUpdate | OpDelete): + return m.Client().AllowListItem.Query().Where(m.predicates...).IDs(ctx) + default: + return nil, fmt.Errorf("IDs is not allowed on %s operations", m.op) + } +} + +// SetCreatedAt sets the "created_at" field. +func (m *AllowListItemMutation) SetCreatedAt(t time.Time) { + m.created_at = &t +} + +// CreatedAt returns the value of the "created_at" field in the mutation. +func (m *AllowListItemMutation) CreatedAt() (r time.Time, exists bool) { + v := m.created_at + if v == nil { + return + } + return *v, true +} + +// OldCreatedAt returns the old "created_at" field's value of the AllowListItem entity. +// If the AllowListItem object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *AllowListItemMutation) OldCreatedAt(ctx context.Context) (v time.Time, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldCreatedAt is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldCreatedAt requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldCreatedAt: %w", err) + } + return oldValue.CreatedAt, nil +} + +// ResetCreatedAt resets all changes to the "created_at" field. +func (m *AllowListItemMutation) ResetCreatedAt() { + m.created_at = nil +} + +// SetUpdatedAt sets the "updated_at" field. +func (m *AllowListItemMutation) SetUpdatedAt(t time.Time) { + m.updated_at = &t +} + +// UpdatedAt returns the value of the "updated_at" field in the mutation. +func (m *AllowListItemMutation) UpdatedAt() (r time.Time, exists bool) { + v := m.updated_at + if v == nil { + return + } + return *v, true +} + +// OldUpdatedAt returns the old "updated_at" field's value of the AllowListItem entity. +// If the AllowListItem object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *AllowListItemMutation) OldUpdatedAt(ctx context.Context) (v time.Time, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldUpdatedAt is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldUpdatedAt requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldUpdatedAt: %w", err) + } + return oldValue.UpdatedAt, nil +} + +// ResetUpdatedAt resets all changes to the "updated_at" field. +func (m *AllowListItemMutation) ResetUpdatedAt() { + m.updated_at = nil +} + +// SetExpiresAt sets the "expires_at" field. +func (m *AllowListItemMutation) SetExpiresAt(t time.Time) { + m.expires_at = &t +} + +// ExpiresAt returns the value of the "expires_at" field in the mutation. +func (m *AllowListItemMutation) ExpiresAt() (r time.Time, exists bool) { + v := m.expires_at + if v == nil { + return + } + return *v, true +} + +// OldExpiresAt returns the old "expires_at" field's value of the AllowListItem entity. +// If the AllowListItem object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *AllowListItemMutation) OldExpiresAt(ctx context.Context) (v time.Time, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldExpiresAt is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldExpiresAt requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldExpiresAt: %w", err) + } + return oldValue.ExpiresAt, nil +} + +// ClearExpiresAt clears the value of the "expires_at" field. +func (m *AllowListItemMutation) ClearExpiresAt() { + m.expires_at = nil + m.clearedFields[allowlistitem.FieldExpiresAt] = struct{}{} +} + +// ExpiresAtCleared returns if the "expires_at" field was cleared in this mutation. +func (m *AllowListItemMutation) ExpiresAtCleared() bool { + _, ok := m.clearedFields[allowlistitem.FieldExpiresAt] + return ok +} + +// ResetExpiresAt resets all changes to the "expires_at" field. +func (m *AllowListItemMutation) ResetExpiresAt() { + m.expires_at = nil + delete(m.clearedFields, allowlistitem.FieldExpiresAt) +} + +// SetComment sets the "comment" field. +func (m *AllowListItemMutation) SetComment(s string) { + m.comment = &s +} + +// Comment returns the value of the "comment" field in the mutation. +func (m *AllowListItemMutation) Comment() (r string, exists bool) { + v := m.comment + if v == nil { + return + } + return *v, true +} + +// OldComment returns the old "comment" field's value of the AllowListItem entity. +// If the AllowListItem object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *AllowListItemMutation) OldComment(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldComment is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldComment requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldComment: %w", err) + } + return oldValue.Comment, nil +} + +// ClearComment clears the value of the "comment" field. +func (m *AllowListItemMutation) ClearComment() { + m.comment = nil + m.clearedFields[allowlistitem.FieldComment] = struct{}{} +} + +// CommentCleared returns if the "comment" field was cleared in this mutation. +func (m *AllowListItemMutation) CommentCleared() bool { + _, ok := m.clearedFields[allowlistitem.FieldComment] + return ok +} + +// ResetComment resets all changes to the "comment" field. +func (m *AllowListItemMutation) ResetComment() { + m.comment = nil + delete(m.clearedFields, allowlistitem.FieldComment) +} + +// SetValue sets the "value" field. +func (m *AllowListItemMutation) SetValue(s string) { + m.value = &s +} + +// Value returns the value of the "value" field in the mutation. +func (m *AllowListItemMutation) Value() (r string, exists bool) { + v := m.value + if v == nil { + return + } + return *v, true +} + +// OldValue returns the old "value" field's value of the AllowListItem entity. +// If the AllowListItem object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *AllowListItemMutation) OldValue(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldValue is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldValue requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldValue: %w", err) + } + return oldValue.Value, nil +} + +// ResetValue resets all changes to the "value" field. +func (m *AllowListItemMutation) ResetValue() { + m.value = nil +} + +// SetStartIP sets the "start_ip" field. +func (m *AllowListItemMutation) SetStartIP(i int64) { + m.start_ip = &i + m.addstart_ip = nil +} + +// StartIP returns the value of the "start_ip" field in the mutation. +func (m *AllowListItemMutation) StartIP() (r int64, exists bool) { + v := m.start_ip + if v == nil { + return + } + return *v, true +} + +// OldStartIP returns the old "start_ip" field's value of the AllowListItem entity. +// If the AllowListItem object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *AllowListItemMutation) OldStartIP(ctx context.Context) (v int64, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldStartIP is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldStartIP requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldStartIP: %w", err) + } + return oldValue.StartIP, nil +} + +// AddStartIP adds i to the "start_ip" field. +func (m *AllowListItemMutation) AddStartIP(i int64) { + if m.addstart_ip != nil { + *m.addstart_ip += i + } else { + m.addstart_ip = &i + } +} + +// AddedStartIP returns the value that was added to the "start_ip" field in this mutation. +func (m *AllowListItemMutation) AddedStartIP() (r int64, exists bool) { + v := m.addstart_ip + if v == nil { + return + } + return *v, true +} + +// ClearStartIP clears the value of the "start_ip" field. +func (m *AllowListItemMutation) ClearStartIP() { + m.start_ip = nil + m.addstart_ip = nil + m.clearedFields[allowlistitem.FieldStartIP] = struct{}{} +} + +// StartIPCleared returns if the "start_ip" field was cleared in this mutation. +func (m *AllowListItemMutation) StartIPCleared() bool { + _, ok := m.clearedFields[allowlistitem.FieldStartIP] + return ok +} + +// ResetStartIP resets all changes to the "start_ip" field. +func (m *AllowListItemMutation) ResetStartIP() { + m.start_ip = nil + m.addstart_ip = nil + delete(m.clearedFields, allowlistitem.FieldStartIP) +} + +// SetEndIP sets the "end_ip" field. +func (m *AllowListItemMutation) SetEndIP(i int64) { + m.end_ip = &i + m.addend_ip = nil +} + +// EndIP returns the value of the "end_ip" field in the mutation. +func (m *AllowListItemMutation) EndIP() (r int64, exists bool) { + v := m.end_ip + if v == nil { + return + } + return *v, true +} + +// OldEndIP returns the old "end_ip" field's value of the AllowListItem entity. +// If the AllowListItem object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *AllowListItemMutation) OldEndIP(ctx context.Context) (v int64, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldEndIP is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldEndIP requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldEndIP: %w", err) + } + return oldValue.EndIP, nil +} + +// AddEndIP adds i to the "end_ip" field. +func (m *AllowListItemMutation) AddEndIP(i int64) { + if m.addend_ip != nil { + *m.addend_ip += i + } else { + m.addend_ip = &i + } +} + +// AddedEndIP returns the value that was added to the "end_ip" field in this mutation. +func (m *AllowListItemMutation) AddedEndIP() (r int64, exists bool) { + v := m.addend_ip + if v == nil { + return + } + return *v, true +} + +// ClearEndIP clears the value of the "end_ip" field. +func (m *AllowListItemMutation) ClearEndIP() { + m.end_ip = nil + m.addend_ip = nil + m.clearedFields[allowlistitem.FieldEndIP] = struct{}{} +} + +// EndIPCleared returns if the "end_ip" field was cleared in this mutation. +func (m *AllowListItemMutation) EndIPCleared() bool { + _, ok := m.clearedFields[allowlistitem.FieldEndIP] + return ok +} + +// ResetEndIP resets all changes to the "end_ip" field. +func (m *AllowListItemMutation) ResetEndIP() { + m.end_ip = nil + m.addend_ip = nil + delete(m.clearedFields, allowlistitem.FieldEndIP) +} + +// SetStartSuffix sets the "start_suffix" field. +func (m *AllowListItemMutation) SetStartSuffix(i int64) { + m.start_suffix = &i + m.addstart_suffix = nil +} + +// StartSuffix returns the value of the "start_suffix" field in the mutation. +func (m *AllowListItemMutation) StartSuffix() (r int64, exists bool) { + v := m.start_suffix + if v == nil { + return + } + return *v, true +} + +// OldStartSuffix returns the old "start_suffix" field's value of the AllowListItem entity. +// If the AllowListItem object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *AllowListItemMutation) OldStartSuffix(ctx context.Context) (v int64, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldStartSuffix is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldStartSuffix requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldStartSuffix: %w", err) + } + return oldValue.StartSuffix, nil +} + +// AddStartSuffix adds i to the "start_suffix" field. +func (m *AllowListItemMutation) AddStartSuffix(i int64) { + if m.addstart_suffix != nil { + *m.addstart_suffix += i + } else { + m.addstart_suffix = &i + } +} + +// AddedStartSuffix returns the value that was added to the "start_suffix" field in this mutation. +func (m *AllowListItemMutation) AddedStartSuffix() (r int64, exists bool) { + v := m.addstart_suffix + if v == nil { + return + } + return *v, true +} + +// ClearStartSuffix clears the value of the "start_suffix" field. +func (m *AllowListItemMutation) ClearStartSuffix() { + m.start_suffix = nil + m.addstart_suffix = nil + m.clearedFields[allowlistitem.FieldStartSuffix] = struct{}{} +} + +// StartSuffixCleared returns if the "start_suffix" field was cleared in this mutation. +func (m *AllowListItemMutation) StartSuffixCleared() bool { + _, ok := m.clearedFields[allowlistitem.FieldStartSuffix] + return ok +} + +// ResetStartSuffix resets all changes to the "start_suffix" field. +func (m *AllowListItemMutation) ResetStartSuffix() { + m.start_suffix = nil + m.addstart_suffix = nil + delete(m.clearedFields, allowlistitem.FieldStartSuffix) +} + +// SetEndSuffix sets the "end_suffix" field. +func (m *AllowListItemMutation) SetEndSuffix(i int64) { + m.end_suffix = &i + m.addend_suffix = nil +} + +// EndSuffix returns the value of the "end_suffix" field in the mutation. +func (m *AllowListItemMutation) EndSuffix() (r int64, exists bool) { + v := m.end_suffix + if v == nil { + return + } + return *v, true +} + +// OldEndSuffix returns the old "end_suffix" field's value of the AllowListItem entity. +// If the AllowListItem object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *AllowListItemMutation) OldEndSuffix(ctx context.Context) (v int64, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldEndSuffix is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldEndSuffix requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldEndSuffix: %w", err) + } + return oldValue.EndSuffix, nil +} + +// AddEndSuffix adds i to the "end_suffix" field. +func (m *AllowListItemMutation) AddEndSuffix(i int64) { + if m.addend_suffix != nil { + *m.addend_suffix += i + } else { + m.addend_suffix = &i + } +} + +// AddedEndSuffix returns the value that was added to the "end_suffix" field in this mutation. +func (m *AllowListItemMutation) AddedEndSuffix() (r int64, exists bool) { + v := m.addend_suffix + if v == nil { + return + } + return *v, true +} + +// ClearEndSuffix clears the value of the "end_suffix" field. +func (m *AllowListItemMutation) ClearEndSuffix() { + m.end_suffix = nil + m.addend_suffix = nil + m.clearedFields[allowlistitem.FieldEndSuffix] = struct{}{} +} + +// EndSuffixCleared returns if the "end_suffix" field was cleared in this mutation. +func (m *AllowListItemMutation) EndSuffixCleared() bool { + _, ok := m.clearedFields[allowlistitem.FieldEndSuffix] + return ok +} + +// ResetEndSuffix resets all changes to the "end_suffix" field. +func (m *AllowListItemMutation) ResetEndSuffix() { + m.end_suffix = nil + m.addend_suffix = nil + delete(m.clearedFields, allowlistitem.FieldEndSuffix) +} + +// SetIPSize sets the "ip_size" field. +func (m *AllowListItemMutation) SetIPSize(i int64) { + m.ip_size = &i + m.addip_size = nil +} + +// IPSize returns the value of the "ip_size" field in the mutation. +func (m *AllowListItemMutation) IPSize() (r int64, exists bool) { + v := m.ip_size + if v == nil { + return + } + return *v, true +} + +// OldIPSize returns the old "ip_size" field's value of the AllowListItem entity. +// If the AllowListItem object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *AllowListItemMutation) OldIPSize(ctx context.Context) (v int64, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldIPSize is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldIPSize requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldIPSize: %w", err) + } + return oldValue.IPSize, nil +} + +// AddIPSize adds i to the "ip_size" field. +func (m *AllowListItemMutation) AddIPSize(i int64) { + if m.addip_size != nil { + *m.addip_size += i + } else { + m.addip_size = &i + } +} + +// AddedIPSize returns the value that was added to the "ip_size" field in this mutation. +func (m *AllowListItemMutation) AddedIPSize() (r int64, exists bool) { + v := m.addip_size + if v == nil { + return + } + return *v, true +} + +// ClearIPSize clears the value of the "ip_size" field. +func (m *AllowListItemMutation) ClearIPSize() { + m.ip_size = nil + m.addip_size = nil + m.clearedFields[allowlistitem.FieldIPSize] = struct{}{} +} + +// IPSizeCleared returns if the "ip_size" field was cleared in this mutation. +func (m *AllowListItemMutation) IPSizeCleared() bool { + _, ok := m.clearedFields[allowlistitem.FieldIPSize] + return ok +} + +// ResetIPSize resets all changes to the "ip_size" field. +func (m *AllowListItemMutation) ResetIPSize() { + m.ip_size = nil + m.addip_size = nil + delete(m.clearedFields, allowlistitem.FieldIPSize) +} + +// AddAllowlistIDs adds the "allowlist" edge to the AllowList entity by ids. +func (m *AllowListItemMutation) AddAllowlistIDs(ids ...int) { + if m.allowlist == nil { + m.allowlist = make(map[int]struct{}) + } + for i := range ids { + m.allowlist[ids[i]] = struct{}{} + } +} + +// ClearAllowlist clears the "allowlist" edge to the AllowList entity. +func (m *AllowListItemMutation) ClearAllowlist() { + m.clearedallowlist = true +} + +// AllowlistCleared reports if the "allowlist" edge to the AllowList entity was cleared. +func (m *AllowListItemMutation) AllowlistCleared() bool { + return m.clearedallowlist +} + +// RemoveAllowlistIDs removes the "allowlist" edge to the AllowList entity by IDs. +func (m *AllowListItemMutation) RemoveAllowlistIDs(ids ...int) { + if m.removedallowlist == nil { + m.removedallowlist = make(map[int]struct{}) + } + for i := range ids { + delete(m.allowlist, ids[i]) + m.removedallowlist[ids[i]] = struct{}{} + } +} + +// RemovedAllowlist returns the removed IDs of the "allowlist" edge to the AllowList entity. +func (m *AllowListItemMutation) RemovedAllowlistIDs() (ids []int) { + for id := range m.removedallowlist { + ids = append(ids, id) + } + return +} + +// AllowlistIDs returns the "allowlist" edge IDs in the mutation. +func (m *AllowListItemMutation) AllowlistIDs() (ids []int) { + for id := range m.allowlist { + ids = append(ids, id) + } + return +} + +// ResetAllowlist resets all changes to the "allowlist" edge. +func (m *AllowListItemMutation) ResetAllowlist() { + m.allowlist = nil + m.clearedallowlist = false + m.removedallowlist = nil +} + +// Where appends a list predicates to the AllowListItemMutation builder. +func (m *AllowListItemMutation) Where(ps ...predicate.AllowListItem) { + m.predicates = append(m.predicates, ps...) +} + +// WhereP appends storage-level predicates to the AllowListItemMutation builder. Using this method, +// users can use type-assertion to append predicates that do not depend on any generated package. +func (m *AllowListItemMutation) WhereP(ps ...func(*sql.Selector)) { + p := make([]predicate.AllowListItem, len(ps)) + for i := range ps { + p[i] = ps[i] + } + m.Where(p...) +} + +// Op returns the operation name. +func (m *AllowListItemMutation) Op() Op { + return m.op +} + +// SetOp allows setting the mutation operation. +func (m *AllowListItemMutation) SetOp(op Op) { + m.op = op +} + +// Type returns the node type of this mutation (AllowListItem). +func (m *AllowListItemMutation) Type() string { + return m.typ +} + +// Fields returns all fields that were changed during this mutation. Note that in +// order to get all numeric fields that were incremented/decremented, call +// AddedFields(). +func (m *AllowListItemMutation) Fields() []string { + fields := make([]string, 0, 10) + if m.created_at != nil { + fields = append(fields, allowlistitem.FieldCreatedAt) + } + if m.updated_at != nil { + fields = append(fields, allowlistitem.FieldUpdatedAt) + } + if m.expires_at != nil { + fields = append(fields, allowlistitem.FieldExpiresAt) + } + if m.comment != nil { + fields = append(fields, allowlistitem.FieldComment) + } + if m.value != nil { + fields = append(fields, allowlistitem.FieldValue) + } + if m.start_ip != nil { + fields = append(fields, allowlistitem.FieldStartIP) + } + if m.end_ip != nil { + fields = append(fields, allowlistitem.FieldEndIP) + } + if m.start_suffix != nil { + fields = append(fields, allowlistitem.FieldStartSuffix) + } + if m.end_suffix != nil { + fields = append(fields, allowlistitem.FieldEndSuffix) + } + if m.ip_size != nil { + fields = append(fields, allowlistitem.FieldIPSize) + } + return fields +} + +// Field returns the value of a field with the given name. The second boolean +// return value indicates that this field was not set, or was not defined in the +// schema. +func (m *AllowListItemMutation) Field(name string) (ent.Value, bool) { + switch name { + case allowlistitem.FieldCreatedAt: + return m.CreatedAt() + case allowlistitem.FieldUpdatedAt: + return m.UpdatedAt() + case allowlistitem.FieldExpiresAt: + return m.ExpiresAt() + case allowlistitem.FieldComment: + return m.Comment() + case allowlistitem.FieldValue: + return m.Value() + case allowlistitem.FieldStartIP: + return m.StartIP() + case allowlistitem.FieldEndIP: + return m.EndIP() + case allowlistitem.FieldStartSuffix: + return m.StartSuffix() + case allowlistitem.FieldEndSuffix: + return m.EndSuffix() + case allowlistitem.FieldIPSize: + return m.IPSize() + } + return nil, false +} + +// OldField returns the old value of the field from the database. An error is +// returned if the mutation operation is not UpdateOne, or the query to the +// database failed. +func (m *AllowListItemMutation) OldField(ctx context.Context, name string) (ent.Value, error) { + switch name { + case allowlistitem.FieldCreatedAt: + return m.OldCreatedAt(ctx) + case allowlistitem.FieldUpdatedAt: + return m.OldUpdatedAt(ctx) + case allowlistitem.FieldExpiresAt: + return m.OldExpiresAt(ctx) + case allowlistitem.FieldComment: + return m.OldComment(ctx) + case allowlistitem.FieldValue: + return m.OldValue(ctx) + case allowlistitem.FieldStartIP: + return m.OldStartIP(ctx) + case allowlistitem.FieldEndIP: + return m.OldEndIP(ctx) + case allowlistitem.FieldStartSuffix: + return m.OldStartSuffix(ctx) + case allowlistitem.FieldEndSuffix: + return m.OldEndSuffix(ctx) + case allowlistitem.FieldIPSize: + return m.OldIPSize(ctx) + } + return nil, fmt.Errorf("unknown AllowListItem field %s", name) +} + +// SetField sets the value of a field with the given name. It returns an error if +// the field is not defined in the schema, or if the type mismatched the field +// type. +func (m *AllowListItemMutation) SetField(name string, value ent.Value) error { + switch name { + case allowlistitem.FieldCreatedAt: + v, ok := value.(time.Time) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetCreatedAt(v) + return nil + case allowlistitem.FieldUpdatedAt: + v, ok := value.(time.Time) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetUpdatedAt(v) + return nil + case allowlistitem.FieldExpiresAt: + v, ok := value.(time.Time) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetExpiresAt(v) + return nil + case allowlistitem.FieldComment: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetComment(v) + return nil + case allowlistitem.FieldValue: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetValue(v) + return nil + case allowlistitem.FieldStartIP: + v, ok := value.(int64) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetStartIP(v) + return nil + case allowlistitem.FieldEndIP: + v, ok := value.(int64) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetEndIP(v) + return nil + case allowlistitem.FieldStartSuffix: + v, ok := value.(int64) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetStartSuffix(v) + return nil + case allowlistitem.FieldEndSuffix: + v, ok := value.(int64) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetEndSuffix(v) + return nil + case allowlistitem.FieldIPSize: + v, ok := value.(int64) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetIPSize(v) + return nil + } + return fmt.Errorf("unknown AllowListItem field %s", name) +} + +// AddedFields returns all numeric fields that were incremented/decremented during +// this mutation. +func (m *AllowListItemMutation) AddedFields() []string { + var fields []string + if m.addstart_ip != nil { + fields = append(fields, allowlistitem.FieldStartIP) + } + if m.addend_ip != nil { + fields = append(fields, allowlistitem.FieldEndIP) + } + if m.addstart_suffix != nil { + fields = append(fields, allowlistitem.FieldStartSuffix) + } + if m.addend_suffix != nil { + fields = append(fields, allowlistitem.FieldEndSuffix) + } + if m.addip_size != nil { + fields = append(fields, allowlistitem.FieldIPSize) + } + return fields +} + +// AddedField returns the numeric value that was incremented/decremented on a field +// with the given name. The second boolean return value indicates that this field +// was not set, or was not defined in the schema. +func (m *AllowListItemMutation) AddedField(name string) (ent.Value, bool) { + switch name { + case allowlistitem.FieldStartIP: + return m.AddedStartIP() + case allowlistitem.FieldEndIP: + return m.AddedEndIP() + case allowlistitem.FieldStartSuffix: + return m.AddedStartSuffix() + case allowlistitem.FieldEndSuffix: + return m.AddedEndSuffix() + case allowlistitem.FieldIPSize: + return m.AddedIPSize() + } + return nil, false +} + +// AddField adds the value to the field with the given name. It returns an error if +// the field is not defined in the schema, or if the type mismatched the field +// type. +func (m *AllowListItemMutation) AddField(name string, value ent.Value) error { + switch name { + case allowlistitem.FieldStartIP: + v, ok := value.(int64) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.AddStartIP(v) + return nil + case allowlistitem.FieldEndIP: + v, ok := value.(int64) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.AddEndIP(v) + return nil + case allowlistitem.FieldStartSuffix: + v, ok := value.(int64) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.AddStartSuffix(v) + return nil + case allowlistitem.FieldEndSuffix: + v, ok := value.(int64) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.AddEndSuffix(v) + return nil + case allowlistitem.FieldIPSize: + v, ok := value.(int64) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.AddIPSize(v) + return nil + } + return fmt.Errorf("unknown AllowListItem numeric field %s", name) +} + +// ClearedFields returns all nullable fields that were cleared during this +// mutation. +func (m *AllowListItemMutation) ClearedFields() []string { + var fields []string + if m.FieldCleared(allowlistitem.FieldExpiresAt) { + fields = append(fields, allowlistitem.FieldExpiresAt) + } + if m.FieldCleared(allowlistitem.FieldComment) { + fields = append(fields, allowlistitem.FieldComment) + } + if m.FieldCleared(allowlistitem.FieldStartIP) { + fields = append(fields, allowlistitem.FieldStartIP) + } + if m.FieldCleared(allowlistitem.FieldEndIP) { + fields = append(fields, allowlistitem.FieldEndIP) + } + if m.FieldCleared(allowlistitem.FieldStartSuffix) { + fields = append(fields, allowlistitem.FieldStartSuffix) + } + if m.FieldCleared(allowlistitem.FieldEndSuffix) { + fields = append(fields, allowlistitem.FieldEndSuffix) + } + if m.FieldCleared(allowlistitem.FieldIPSize) { + fields = append(fields, allowlistitem.FieldIPSize) + } + return fields +} + +// FieldCleared returns a boolean indicating if a field with the given name was +// cleared in this mutation. +func (m *AllowListItemMutation) FieldCleared(name string) bool { + _, ok := m.clearedFields[name] + return ok +} + +// ClearField clears the value of the field with the given name. It returns an +// error if the field is not defined in the schema. +func (m *AllowListItemMutation) ClearField(name string) error { + switch name { + case allowlistitem.FieldExpiresAt: + m.ClearExpiresAt() + return nil + case allowlistitem.FieldComment: + m.ClearComment() + return nil + case allowlistitem.FieldStartIP: + m.ClearStartIP() + return nil + case allowlistitem.FieldEndIP: + m.ClearEndIP() + return nil + case allowlistitem.FieldStartSuffix: + m.ClearStartSuffix() + return nil + case allowlistitem.FieldEndSuffix: + m.ClearEndSuffix() + return nil + case allowlistitem.FieldIPSize: + m.ClearIPSize() + return nil + } + return fmt.Errorf("unknown AllowListItem nullable field %s", name) +} + +// ResetField resets all changes in the mutation for the field with the given name. +// It returns an error if the field is not defined in the schema. +func (m *AllowListItemMutation) ResetField(name string) error { + switch name { + case allowlistitem.FieldCreatedAt: + m.ResetCreatedAt() + return nil + case allowlistitem.FieldUpdatedAt: + m.ResetUpdatedAt() + return nil + case allowlistitem.FieldExpiresAt: + m.ResetExpiresAt() + return nil + case allowlistitem.FieldComment: + m.ResetComment() + return nil + case allowlistitem.FieldValue: + m.ResetValue() + return nil + case allowlistitem.FieldStartIP: + m.ResetStartIP() + return nil + case allowlistitem.FieldEndIP: + m.ResetEndIP() + return nil + case allowlistitem.FieldStartSuffix: + m.ResetStartSuffix() + return nil + case allowlistitem.FieldEndSuffix: + m.ResetEndSuffix() + return nil + case allowlistitem.FieldIPSize: + m.ResetIPSize() + return nil + } + return fmt.Errorf("unknown AllowListItem field %s", name) +} + +// AddedEdges returns all edge names that were set/added in this mutation. +func (m *AllowListItemMutation) AddedEdges() []string { + edges := make([]string, 0, 1) + if m.allowlist != nil { + edges = append(edges, allowlistitem.EdgeAllowlist) + } + return edges +} + +// AddedIDs returns all IDs (to other nodes) that were added for the given edge +// name in this mutation. +func (m *AllowListItemMutation) AddedIDs(name string) []ent.Value { + switch name { + case allowlistitem.EdgeAllowlist: + ids := make([]ent.Value, 0, len(m.allowlist)) + for id := range m.allowlist { + ids = append(ids, id) + } + return ids + } + return nil +} + +// RemovedEdges returns all edge names that were removed in this mutation. +func (m *AllowListItemMutation) RemovedEdges() []string { + edges := make([]string, 0, 1) + if m.removedallowlist != nil { + edges = append(edges, allowlistitem.EdgeAllowlist) + } + return edges +} + +// RemovedIDs returns all IDs (to other nodes) that were removed for the edge with +// the given name in this mutation. +func (m *AllowListItemMutation) RemovedIDs(name string) []ent.Value { + switch name { + case allowlistitem.EdgeAllowlist: + ids := make([]ent.Value, 0, len(m.removedallowlist)) + for id := range m.removedallowlist { + ids = append(ids, id) + } + return ids + } + return nil +} + +// ClearedEdges returns all edge names that were cleared in this mutation. +func (m *AllowListItemMutation) ClearedEdges() []string { + edges := make([]string, 0, 1) + if m.clearedallowlist { + edges = append(edges, allowlistitem.EdgeAllowlist) + } + return edges +} + +// EdgeCleared returns a boolean which indicates if the edge with the given name +// was cleared in this mutation. +func (m *AllowListItemMutation) EdgeCleared(name string) bool { + switch name { + case allowlistitem.EdgeAllowlist: + return m.clearedallowlist + } + return false +} + +// ClearEdge clears the value of the edge with the given name. It returns an error +// if that edge is not defined in the schema. +func (m *AllowListItemMutation) ClearEdge(name string) error { + switch name { + } + return fmt.Errorf("unknown AllowListItem unique edge %s", name) +} + +// ResetEdge resets all changes to the edge with the given name in this mutation. +// It returns an error if the edge is not defined in the schema. +func (m *AllowListItemMutation) ResetEdge(name string) error { + switch name { + case allowlistitem.EdgeAllowlist: + m.ResetAllowlist() + return nil + } + return fmt.Errorf("unknown AllowListItem edge %s", name) +} + // BouncerMutation represents an operation that mutates the Bouncer nodes in the graph. type BouncerMutation struct { config diff --git a/pkg/database/ent/predicate/predicate.go b/pkg/database/ent/predicate/predicate.go index 8ad03e2fc48..97e574aa167 100644 --- a/pkg/database/ent/predicate/predicate.go +++ b/pkg/database/ent/predicate/predicate.go @@ -9,6 +9,12 @@ import ( // Alert is the predicate function for alert builders. type Alert func(*sql.Selector) +// AllowList is the predicate function for allowlist builders. +type AllowList func(*sql.Selector) + +// AllowListItem is the predicate function for allowlistitem builders. +type AllowListItem func(*sql.Selector) + // Bouncer is the predicate function for bouncer builders. type Bouncer func(*sql.Selector) diff --git a/pkg/database/ent/runtime.go b/pkg/database/ent/runtime.go index 49921a17b03..989e67fda7d 100644 --- a/pkg/database/ent/runtime.go +++ b/pkg/database/ent/runtime.go @@ -6,6 +6,8 @@ import ( "time" "github.com/crowdsecurity/crowdsec/pkg/database/ent/alert" + "github.com/crowdsecurity/crowdsec/pkg/database/ent/allowlist" + "github.com/crowdsecurity/crowdsec/pkg/database/ent/allowlistitem" "github.com/crowdsecurity/crowdsec/pkg/database/ent/bouncer" "github.com/crowdsecurity/crowdsec/pkg/database/ent/configitem" "github.com/crowdsecurity/crowdsec/pkg/database/ent/decision" @@ -56,6 +58,30 @@ func init() { alertDescSimulated := alertFields[21].Descriptor() // alert.DefaultSimulated holds the default value on creation for the simulated field. alert.DefaultSimulated = alertDescSimulated.Default.(bool) + allowlistFields := schema.AllowList{}.Fields() + _ = allowlistFields + // allowlistDescCreatedAt is the schema descriptor for created_at field. + allowlistDescCreatedAt := allowlistFields[0].Descriptor() + // allowlist.DefaultCreatedAt holds the default value on creation for the created_at field. + allowlist.DefaultCreatedAt = allowlistDescCreatedAt.Default.(func() time.Time) + // allowlistDescUpdatedAt is the schema descriptor for updated_at field. + allowlistDescUpdatedAt := allowlistFields[1].Descriptor() + // allowlist.DefaultUpdatedAt holds the default value on creation for the updated_at field. + allowlist.DefaultUpdatedAt = allowlistDescUpdatedAt.Default.(func() time.Time) + // allowlist.UpdateDefaultUpdatedAt holds the default value on update for the updated_at field. + allowlist.UpdateDefaultUpdatedAt = allowlistDescUpdatedAt.UpdateDefault.(func() time.Time) + allowlistitemFields := schema.AllowListItem{}.Fields() + _ = allowlistitemFields + // allowlistitemDescCreatedAt is the schema descriptor for created_at field. + allowlistitemDescCreatedAt := allowlistitemFields[0].Descriptor() + // allowlistitem.DefaultCreatedAt holds the default value on creation for the created_at field. + allowlistitem.DefaultCreatedAt = allowlistitemDescCreatedAt.Default.(func() time.Time) + // allowlistitemDescUpdatedAt is the schema descriptor for updated_at field. + allowlistitemDescUpdatedAt := allowlistitemFields[1].Descriptor() + // allowlistitem.DefaultUpdatedAt holds the default value on creation for the updated_at field. + allowlistitem.DefaultUpdatedAt = allowlistitemDescUpdatedAt.Default.(func() time.Time) + // allowlistitem.UpdateDefaultUpdatedAt holds the default value on update for the updated_at field. + allowlistitem.UpdateDefaultUpdatedAt = allowlistitemDescUpdatedAt.UpdateDefault.(func() time.Time) bouncerFields := schema.Bouncer{}.Fields() _ = bouncerFields // bouncerDescCreatedAt is the schema descriptor for created_at field. diff --git a/pkg/database/ent/schema/allowlist.go b/pkg/database/ent/schema/allowlist.go new file mode 100644 index 00000000000..0e774aa211e --- /dev/null +++ b/pkg/database/ent/schema/allowlist.go @@ -0,0 +1,44 @@ +package schema + +import ( + "entgo.io/ent" + "entgo.io/ent/schema/edge" + "entgo.io/ent/schema/field" + "entgo.io/ent/schema/index" + + "github.com/crowdsecurity/crowdsec/pkg/types" +) + +// Alert holds the schema definition for the Alert entity. +type AllowList struct { + ent.Schema +} + +// Fields of the Alert. +func (AllowList) Fields() []ent.Field { + return []ent.Field{ + field.Time("created_at"). + Default(types.UtcNow). + Immutable(), + field.Time("updated_at"). + Default(types.UtcNow). + UpdateDefault(types.UtcNow), + field.String("name"), + field.Bool("from_console"), + field.String("description").Optional(), + field.String("allowlist_id").Optional().Immutable(), + } +} + +func (AllowList) Indexes() []ent.Index { + return []ent.Index{ + index.Fields("id").Unique(), + index.Fields("name").Unique(), + } +} + +func (AllowList) Edges() []ent.Edge { + return []ent.Edge{ + edge.To("allowlist_items", AllowListItem.Type), + } +} diff --git a/pkg/database/ent/schema/allowlist_item.go b/pkg/database/ent/schema/allowlist_item.go new file mode 100644 index 00000000000..7a58387d682 --- /dev/null +++ b/pkg/database/ent/schema/allowlist_item.go @@ -0,0 +1,51 @@ +package schema + +import ( + "entgo.io/ent" + "entgo.io/ent/schema/edge" + "entgo.io/ent/schema/field" + "entgo.io/ent/schema/index" + + "github.com/crowdsecurity/crowdsec/pkg/types" +) + +// AllowListItem holds the schema definition for the AllowListItem entity. +type AllowListItem struct { + ent.Schema +} + +// Fields of the AllowListItem. +func (AllowListItem) Fields() []ent.Field { + return []ent.Field{ + field.Time("created_at"). + Default(types.UtcNow). + Immutable(), + field.Time("updated_at"). + Default(types.UtcNow). + UpdateDefault(types.UtcNow), + field.Time("expires_at"). + Optional(), + field.String("comment").Optional().Immutable(), + field.String("value").Immutable(), // For textual representation of the IP/range + // Use the same fields as the decision table + field.Int64("start_ip").Optional().Immutable(), + field.Int64("end_ip").Optional().Immutable(), + field.Int64("start_suffix").Optional().Immutable(), + field.Int64("end_suffix").Optional().Immutable(), + field.Int64("ip_size").Optional().Immutable(), + } +} + +func (AllowListItem) Indexes() []ent.Index { + return []ent.Index{ + index.Fields("id"), + index.Fields("start_ip", "end_ip"), + } +} + +func (AllowListItem) Edges() []ent.Edge { + return []ent.Edge{ + edge.From("allowlist", AllowList.Type). + Ref("allowlist_items"), + } +} diff --git a/pkg/database/ent/tx.go b/pkg/database/ent/tx.go index bf8221ce4a5..69983beebc5 100644 --- a/pkg/database/ent/tx.go +++ b/pkg/database/ent/tx.go @@ -14,6 +14,10 @@ type Tx struct { config // Alert is the client for interacting with the Alert builders. Alert *AlertClient + // AllowList is the client for interacting with the AllowList builders. + AllowList *AllowListClient + // AllowListItem is the client for interacting with the AllowListItem builders. + AllowListItem *AllowListItemClient // Bouncer is the client for interacting with the Bouncer builders. Bouncer *BouncerClient // ConfigItem is the client for interacting with the ConfigItem builders. @@ -162,6 +166,8 @@ func (tx *Tx) Client() *Client { func (tx *Tx) init() { tx.Alert = NewAlertClient(tx.config) + tx.AllowList = NewAllowListClient(tx.config) + tx.AllowListItem = NewAllowListItemClient(tx.config) tx.Bouncer = NewBouncerClient(tx.config) tx.ConfigItem = NewConfigItemClient(tx.config) tx.Decision = NewDecisionClient(tx.config) diff --git a/pkg/database/flush.go b/pkg/database/flush.go index 4a3a93a406c..e1b5f9f4471 100644 --- a/pkg/database/flush.go +++ b/pkg/database/flush.go @@ -13,6 +13,7 @@ import ( "github.com/crowdsecurity/crowdsec/pkg/csconfig" "github.com/crowdsecurity/crowdsec/pkg/database/ent/alert" + "github.com/crowdsecurity/crowdsec/pkg/database/ent/allowlistitem" "github.com/crowdsecurity/crowdsec/pkg/database/ent/bouncer" "github.com/crowdsecurity/crowdsec/pkg/database/ent/decision" "github.com/crowdsecurity/crowdsec/pkg/database/ent/event" @@ -115,6 +116,13 @@ func (c *Client) StartFlushScheduler(ctx context.Context, config *csconfig.Flush metricsJob.SingletonMode() + allowlistsJob, err := scheduler.Every(flushInterval).Do(c.flushAllowlists, ctx) + if err != nil { + return nil, fmt.Errorf("while starting FlushAllowlists scheduler: %w", err) + } + + allowlistsJob.SingletonMode() + scheduler.StartAsync() return scheduler, nil @@ -309,3 +317,17 @@ func (c *Client) FlushAlerts(ctx context.Context, maxAge string, maxItems int) e return nil } + +func (c *Client) flushAllowlists(ctx context.Context) { + deleted, err := c.Ent.AllowListItem.Delete().Where( + allowlistitem.ExpiresAtLTE(time.Now().UTC()), + ).Exec(ctx) + if err != nil { + c.Log.Errorf("while flushing allowlists: %s", err) + return + } + + if deleted > 0 { + c.Log.Debugf("flushed %d allowlists", deleted) + } +} diff --git a/pkg/models/allowlist_item.go b/pkg/models/allowlist_item.go new file mode 100644 index 00000000000..3d688d52e5d --- /dev/null +++ b/pkg/models/allowlist_item.go @@ -0,0 +1,100 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + + "github.com/go-openapi/errors" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" + "github.com/go-openapi/validate" +) + +// AllowlistItem AllowlistItem +// +// swagger:model AllowlistItem +type AllowlistItem struct { + + // creation date of the allowlist item + // Format: date-time + CreatedAt strfmt.DateTime `json:"created_at,omitempty"` + + // description of the allowlist item + Description string `json:"description,omitempty"` + + // expiration date of the allowlist item + // Format: date-time + Expiration strfmt.DateTime `json:"expiration,omitempty"` + + // value of the allowlist item + Value string `json:"value,omitempty"` +} + +// Validate validates this allowlist item +func (m *AllowlistItem) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateCreatedAt(formats); err != nil { + res = append(res, err) + } + + if err := m.validateExpiration(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *AllowlistItem) validateCreatedAt(formats strfmt.Registry) error { + if swag.IsZero(m.CreatedAt) { // not required + return nil + } + + if err := validate.FormatOf("created_at", "body", "date-time", m.CreatedAt.String(), formats); err != nil { + return err + } + + return nil +} + +func (m *AllowlistItem) validateExpiration(formats strfmt.Registry) error { + if swag.IsZero(m.Expiration) { // not required + return nil + } + + if err := validate.FormatOf("expiration", "body", "date-time", m.Expiration.String(), formats); err != nil { + return err + } + + return nil +} + +// ContextValidate validates this allowlist item based on context it is used +func (m *AllowlistItem) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + return nil +} + +// MarshalBinary interface implementation +func (m *AllowlistItem) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *AllowlistItem) UnmarshalBinary(b []byte) error { + var res AllowlistItem + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/pkg/models/check_allowlist_response.go b/pkg/models/check_allowlist_response.go new file mode 100644 index 00000000000..e6dbd271abb --- /dev/null +++ b/pkg/models/check_allowlist_response.go @@ -0,0 +1,53 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" +) + +// CheckAllowlistResponse CheckAllowlistResponse +// +// swagger:model CheckAllowlistResponse +type CheckAllowlistResponse struct { + + // true if the IP or range is in the allowlist + Allowlisted bool `json:"allowlisted,omitempty"` + + // item that matched the provided value + Reason string `json:"reason,omitempty"` +} + +// Validate validates this check allowlist response +func (m *CheckAllowlistResponse) Validate(formats strfmt.Registry) error { + return nil +} + +// ContextValidate validates this check allowlist response based on context it is used +func (m *CheckAllowlistResponse) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + return nil +} + +// MarshalBinary interface implementation +func (m *CheckAllowlistResponse) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *CheckAllowlistResponse) UnmarshalBinary(b []byte) error { + var res CheckAllowlistResponse + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/pkg/models/get_allowlist_response.go b/pkg/models/get_allowlist_response.go new file mode 100644 index 00000000000..4459457ecb3 --- /dev/null +++ b/pkg/models/get_allowlist_response.go @@ -0,0 +1,174 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "strconv" + + "github.com/go-openapi/errors" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" + "github.com/go-openapi/validate" +) + +// GetAllowlistResponse GetAllowlistResponse +// +// swagger:model GetAllowlistResponse +type GetAllowlistResponse struct { + + // id of the allowlist + AllowlistID string `json:"allowlist_id,omitempty"` + + // true if the allowlist is managed by the console + ConsoleManaged bool `json:"console_managed,omitempty"` + + // creation date of the allowlist + // Format: date-time + CreatedAt strfmt.DateTime `json:"created_at,omitempty"` + + // description of the allowlist + Description string `json:"description,omitempty"` + + // items in the allowlist + Items []*AllowlistItem `json:"items"` + + // name of the allowlist + Name string `json:"name,omitempty"` + + // last update date of the allowlist + // Format: date-time + UpdatedAt strfmt.DateTime `json:"updated_at,omitempty"` +} + +// Validate validates this get allowlist response +func (m *GetAllowlistResponse) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateCreatedAt(formats); err != nil { + res = append(res, err) + } + + if err := m.validateItems(formats); err != nil { + res = append(res, err) + } + + if err := m.validateUpdatedAt(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *GetAllowlistResponse) validateCreatedAt(formats strfmt.Registry) error { + if swag.IsZero(m.CreatedAt) { // not required + return nil + } + + if err := validate.FormatOf("created_at", "body", "date-time", m.CreatedAt.String(), formats); err != nil { + return err + } + + return nil +} + +func (m *GetAllowlistResponse) validateItems(formats strfmt.Registry) error { + if swag.IsZero(m.Items) { // not required + return nil + } + + for i := 0; i < len(m.Items); i++ { + if swag.IsZero(m.Items[i]) { // not required + continue + } + + if m.Items[i] != nil { + if err := m.Items[i].Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("items" + "." + strconv.Itoa(i)) + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("items" + "." + strconv.Itoa(i)) + } + return err + } + } + + } + + return nil +} + +func (m *GetAllowlistResponse) validateUpdatedAt(formats strfmt.Registry) error { + if swag.IsZero(m.UpdatedAt) { // not required + return nil + } + + if err := validate.FormatOf("updated_at", "body", "date-time", m.UpdatedAt.String(), formats); err != nil { + return err + } + + return nil +} + +// ContextValidate validate this get allowlist response based on the context it is used +func (m *GetAllowlistResponse) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + var res []error + + if err := m.contextValidateItems(ctx, formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *GetAllowlistResponse) contextValidateItems(ctx context.Context, formats strfmt.Registry) error { + + for i := 0; i < len(m.Items); i++ { + + if m.Items[i] != nil { + + if swag.IsZero(m.Items[i]) { // not required + return nil + } + + if err := m.Items[i].ContextValidate(ctx, formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("items" + "." + strconv.Itoa(i)) + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("items" + "." + strconv.Itoa(i)) + } + return err + } + } + + } + + return nil +} + +// MarshalBinary interface implementation +func (m *GetAllowlistResponse) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *GetAllowlistResponse) UnmarshalBinary(b []byte) error { + var res GetAllowlistResponse + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/pkg/models/get_allowlists_response.go b/pkg/models/get_allowlists_response.go new file mode 100644 index 00000000000..dd6a80918c6 --- /dev/null +++ b/pkg/models/get_allowlists_response.go @@ -0,0 +1,78 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "strconv" + + "github.com/go-openapi/errors" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" +) + +// GetAllowlistsResponse GetAllowlistsResponse +// +// swagger:model GetAllowlistsResponse +type GetAllowlistsResponse []*GetAllowlistResponse + +// Validate validates this get allowlists response +func (m GetAllowlistsResponse) Validate(formats strfmt.Registry) error { + var res []error + + for i := 0; i < len(m); i++ { + if swag.IsZero(m[i]) { // not required + continue + } + + if m[i] != nil { + if err := m[i].Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName(strconv.Itoa(i)) + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName(strconv.Itoa(i)) + } + return err + } + } + + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +// ContextValidate validate this get allowlists response based on the context it is used +func (m GetAllowlistsResponse) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + var res []error + + for i := 0; i < len(m); i++ { + + if m[i] != nil { + + if swag.IsZero(m[i]) { // not required + return nil + } + + if err := m[i].ContextValidate(ctx, formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName(strconv.Itoa(i)) + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName(strconv.Itoa(i)) + } + return err + } + } + + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/pkg/models/localapi_swagger.yaml b/pkg/models/localapi_swagger.yaml index 01bbe6f8bde..3de9b7351c8 100644 --- a/pkg/models/localapi_swagger.yaml +++ b/pkg/models/localapi_swagger.yaml @@ -719,6 +719,120 @@ paths: security: - APIKeyAuthorizer: [] - JWTAuthorizer: [] + /allowlists: + get: + description: Get a list of all allowlists + summary: getAllowlists + tags: + - watchers + operationId: getAllowlists + produces: + - application/json + responses: + '200': + description: successful operation + schema: + $ref: '#/definitions/GetAllowlistsResponse' + headers: {} + /allowlists/{allowlist_name}: + get: + description: Get a specific allowlist + summary: getAllowlist + tags: + - watchers + operationId: getAllowlist + produces: + - application/json + parameters: + - name: allowlist_name + in: path + required: true + type: string + description: '' + responses: + '200': + description: successful operation + schema: + $ref: '#/definitions/GetAllowlistResponse' + headers: {} + '404': + description: "404 response" + schema: + $ref: "#/definitions/ErrorResponse" + head: + description: Get a specific allowlist + summary: getAllowlist + tags: + - watchers + operationId: headAllowlist + produces: + - application/json + parameters: + - name: allowlist_name + in: path + required: true + type: string + description: '' + - name: with_content + in: query + required: false + type: boolean + description: 'if true, the content of the allowlist will be returned as well' + responses: + '200': + description: successful operation + headers: {} + '404': + description: "404 response" + /allowlists/check/{ip_or_range}: + get: + description: Check if an IP or range is in an allowlist + summary: checkAllowlist + tags: + - watchers + operationId: checkAllowlist + produces: + - application/json + parameters: + - name: ip_or_range + in: path + required: true + type: string + description: '' + responses: + '200': + description: successful operation + schema: + $ref: '#/definitions/CheckAllowlistResponse' + headers: {} + '400': + description: "missing ip_or_range" + schema: + $ref: "#/definitions/ErrorResponse" + head: + description: Check if an IP or range is in an allowlist + summary: checkAllowlist + tags: + - watchers + operationId: headCheckAllowlist + produces: + - application/json + parameters: + - name: ip_or_range + in: path + required: true + type: string + description: '' + responses: + '200': + description: IP or range is in an allowlist + headers: {} + '204': + description: "IP or range is not in an allowlist" + '400': + description: "missing ip_or_range" + schema: + $ref: "#/definitions/ErrorResponse" definitions: WatcherRegistrationRequest: title: WatcherRegistrationRequest @@ -1220,6 +1334,68 @@ definitions: status: type: string description: status of the hub item (official, custom, tainted, etc.) + GetAllowlistsResponse: + title: GetAllowlistsResponse + type: array + items: + $ref: '#/definitions/GetAllowlistResponse' + GetAllowlistResponse: + title: GetAllowlistResponse + type: object + properties: + name: + type: string + description: name of the allowlist + allowlist_id: + type: string + description: id of the allowlist + description: + type: string + description: description of the allowlist + items: + type: array + items: + $ref: '#/definitions/AllowlistItem' + description: items in the allowlist + created_at: + type: string + format: date-time + description: creation date of the allowlist + updated_at: + type: string + format: date-time + description: last update date of the allowlist + console_managed: + type: boolean + description: true if the allowlist is managed by the console + AllowlistItem: + title: AllowlistItem + type: object + properties: + value: + type: string + description: value of the allowlist item + description: + type: string + description: description of the allowlist item + created_at: + type: string + format: date-time + description: creation date of the allowlist item + expiration: + type: string + format: date-time + description: expiration date of the allowlist item + CheckAllowlistResponse: + title: CheckAllowlistResponse + type: object + properties: + allowlisted: + type: boolean + description: 'true if the IP or range is in the allowlist' + reason: + type: string + description: 'item that matched the provided value' ErrorResponse: type: "object" required: diff --git a/pkg/modelscapi/allowlist_link.go b/pkg/modelscapi/allowlist_link.go new file mode 100644 index 00000000000..ce9fce17357 --- /dev/null +++ b/pkg/modelscapi/allowlist_link.go @@ -0,0 +1,166 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package modelscapi + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + + "github.com/go-openapi/errors" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" + "github.com/go-openapi/validate" +) + +// AllowlistLink allowlist link +// +// swagger:model AllowlistLink +type AllowlistLink struct { + + // the creation date of the allowlist + // Required: true + // Format: date-time + CreatedAt *strfmt.DateTime `json:"created_at"` + + // the description of the allowlist + // Required: true + Description *string `json:"description"` + + // the id of the allowlist + // Required: true + ID *string `json:"id"` + + // the name of the allowlist + // Required: true + Name *string `json:"name"` + + // the last update date of the allowlist + // Required: true + // Format: date-time + UpdatedAt *strfmt.DateTime `json:"updated_at"` + + // the url from which the allowlist content can be downloaded + // Required: true + URL *string `json:"url"` +} + +// Validate validates this allowlist link +func (m *AllowlistLink) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateCreatedAt(formats); err != nil { + res = append(res, err) + } + + if err := m.validateDescription(formats); err != nil { + res = append(res, err) + } + + if err := m.validateID(formats); err != nil { + res = append(res, err) + } + + if err := m.validateName(formats); err != nil { + res = append(res, err) + } + + if err := m.validateUpdatedAt(formats); err != nil { + res = append(res, err) + } + + if err := m.validateURL(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *AllowlistLink) validateCreatedAt(formats strfmt.Registry) error { + + if err := validate.Required("created_at", "body", m.CreatedAt); err != nil { + return err + } + + if err := validate.FormatOf("created_at", "body", "date-time", m.CreatedAt.String(), formats); err != nil { + return err + } + + return nil +} + +func (m *AllowlistLink) validateDescription(formats strfmt.Registry) error { + + if err := validate.Required("description", "body", m.Description); err != nil { + return err + } + + return nil +} + +func (m *AllowlistLink) validateID(formats strfmt.Registry) error { + + if err := validate.Required("id", "body", m.ID); err != nil { + return err + } + + return nil +} + +func (m *AllowlistLink) validateName(formats strfmt.Registry) error { + + if err := validate.Required("name", "body", m.Name); err != nil { + return err + } + + return nil +} + +func (m *AllowlistLink) validateUpdatedAt(formats strfmt.Registry) error { + + if err := validate.Required("updated_at", "body", m.UpdatedAt); err != nil { + return err + } + + if err := validate.FormatOf("updated_at", "body", "date-time", m.UpdatedAt.String(), formats); err != nil { + return err + } + + return nil +} + +func (m *AllowlistLink) validateURL(formats strfmt.Registry) error { + + if err := validate.Required("url", "body", m.URL); err != nil { + return err + } + + return nil +} + +// ContextValidate validates this allowlist link based on context it is used +func (m *AllowlistLink) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + return nil +} + +// MarshalBinary interface implementation +func (m *AllowlistLink) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *AllowlistLink) UnmarshalBinary(b []byte) error { + var res AllowlistLink + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/pkg/modelscapi/centralapi_swagger.yaml b/pkg/modelscapi/centralapi_swagger.yaml index c75233809c8..6a830ee3820 100644 --- a/pkg/modelscapi/centralapi_swagger.yaml +++ b/pkg/modelscapi/centralapi_swagger.yaml @@ -548,6 +548,37 @@ definitions: description: "the scope of decisions in the blocklist" duration: type: string + AllowlistLink: + type: object + required: + - name + - description + - url + - id + - created_at + - updated_at + properties: + name: + type: string + description: "the name of the allowlist" + description: + type: string + description: "the description of the allowlist" + url: + type: string + description: "the url from which the allowlist content can be downloaded" + id: + type: string + description: "the id of the allowlist" + created_at: + type: string + format: date-time + description: "the creation date of the allowlist" + updated_at: + type: string + format: date-time + description: "the last update date of the allowlist" + AddSignalsRequestItemDecisionsItem: type: "object" required: @@ -885,4 +916,8 @@ definitions: type: array items: $ref: "#/definitions/BlocklistLink" + allowlists: + type: array + items: + $ref: "#/definitions/AllowlistLink" diff --git a/pkg/modelscapi/get_decisions_stream_response_links.go b/pkg/modelscapi/get_decisions_stream_response_links.go index 6b9054574f1..f9e320aee38 100644 --- a/pkg/modelscapi/get_decisions_stream_response_links.go +++ b/pkg/modelscapi/get_decisions_stream_response_links.go @@ -19,6 +19,9 @@ import ( // swagger:model GetDecisionsStreamResponseLinks type GetDecisionsStreamResponseLinks struct { + // allowlists + Allowlists []*AllowlistLink `json:"allowlists"` + // blocklists Blocklists []*BlocklistLink `json:"blocklists"` } @@ -27,6 +30,10 @@ type GetDecisionsStreamResponseLinks struct { func (m *GetDecisionsStreamResponseLinks) Validate(formats strfmt.Registry) error { var res []error + if err := m.validateAllowlists(formats); err != nil { + res = append(res, err) + } + if err := m.validateBlocklists(formats); err != nil { res = append(res, err) } @@ -37,6 +44,32 @@ func (m *GetDecisionsStreamResponseLinks) Validate(formats strfmt.Registry) erro return nil } +func (m *GetDecisionsStreamResponseLinks) validateAllowlists(formats strfmt.Registry) error { + if swag.IsZero(m.Allowlists) { // not required + return nil + } + + for i := 0; i < len(m.Allowlists); i++ { + if swag.IsZero(m.Allowlists[i]) { // not required + continue + } + + if m.Allowlists[i] != nil { + if err := m.Allowlists[i].Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("allowlists" + "." + strconv.Itoa(i)) + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("allowlists" + "." + strconv.Itoa(i)) + } + return err + } + } + + } + + return nil +} + func (m *GetDecisionsStreamResponseLinks) validateBlocklists(formats strfmt.Registry) error { if swag.IsZero(m.Blocklists) { // not required return nil @@ -67,6 +100,10 @@ func (m *GetDecisionsStreamResponseLinks) validateBlocklists(formats strfmt.Regi func (m *GetDecisionsStreamResponseLinks) ContextValidate(ctx context.Context, formats strfmt.Registry) error { var res []error + if err := m.contextValidateAllowlists(ctx, formats); err != nil { + res = append(res, err) + } + if err := m.contextValidateBlocklists(ctx, formats); err != nil { res = append(res, err) } @@ -77,6 +114,31 @@ func (m *GetDecisionsStreamResponseLinks) ContextValidate(ctx context.Context, f return nil } +func (m *GetDecisionsStreamResponseLinks) contextValidateAllowlists(ctx context.Context, formats strfmt.Registry) error { + + for i := 0; i < len(m.Allowlists); i++ { + + if m.Allowlists[i] != nil { + + if swag.IsZero(m.Allowlists[i]) { // not required + return nil + } + + if err := m.Allowlists[i].ContextValidate(ctx, formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("allowlists" + "." + strconv.Itoa(i)) + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("allowlists" + "." + strconv.Itoa(i)) + } + return err + } + } + + } + + return nil +} + func (m *GetDecisionsStreamResponseLinks) contextValidateBlocklists(ctx context.Context, formats strfmt.Registry) error { for i := 0; i < len(m.Blocklists); i++ { diff --git a/test/bats/cscli-allowlists.bats b/test/bats/cscli-allowlists.bats new file mode 100644 index 00000000000..73305dd89cb --- /dev/null +++ b/test/bats/cscli-allowlists.bats @@ -0,0 +1,216 @@ +#!/usr/bin/env bats + +set -u + +setup_file() { + load "../lib/setup_file.sh" +} + +teardown_file() { + load "../lib/teardown_file.sh" +} + +setup() { + load "../lib/setup.sh" + load "../lib/bats-file/load.bash" + ./instance-data load + ./instance-crowdsec start +} + +teardown() { + ./instance-crowdsec stop +} + +#---------- + +@test "cscli allowlists list (empty)" { + rune -0 cscli allowlists list + assert_output - <<-EOT + --------------------------------------------------------------------- + Name Description Created at Updated at Managed by Console Size + --------------------------------------------------------------------- + --------------------------------------------------------------------- + EOT + + rune -0 cscli allowlists list -o raw + assert_output 'name,description,created_at,updated_at,console_managed,size' + + rune -0 cscli allowlists list -o json + assert_json '[]' + + # sub-command alias, like "decisions", "collections..." + rune -0 cscli allowlist list -o json + assert_json '[]' + + rune -0 cscli allowlist create foo -d 'a foo' + rune -0 cscli allowlist add foo 1.1.1.1 + + rune -0 cscli allowlists list + assert_output - --regexp <<-EOT + --------------------.* + Name Description .* Managed by Console Size + --------------------.* + foo a foo .* no 1 + --------------------.* + EOT + + # requires LAPI + ./instance-crowdsec stop + rune -1 wait-for --err 'error while performing request' "$CSCLI" allowlists list +} + +@test "cscli allowlists create" { + rune -1 cscli allowlist create + assert_stderr 'Error: accepts 1 arg(s), received 0' + + rune -1 cscli allowlist create foo + assert_stderr 'Error: required flag(s) "description" not set' + + rune -0 cscli allowlist create foo -d "A Foo" + assert_output "allowlist 'foo' created successfully" + + rune -1 cscli allowlist create foo -d "Another Foo" + assert_stderr "Error: allowlist 'foo' already exists" + + rune -0 cscli allowlists list -o json + rune -0 jq 'del(.[].created_at) | del(.[].updated_at)' <(output) + assert_json '[{"description":"A Foo","items":[],"name":"foo"}]' + + rune -0 cscli allowlist create Foo -d "Another Foo" + assert_output "allowlist 'Foo' created successfully" +} + +@test "cscli allowlists add" { + rune -1 cscli allowlist add + assert_stderr 'Error: requires at least 2 arg(s), only received 0' + + rune -1 cscli allowlist add foo + assert_stderr 'Error: requires at least 2 arg(s), only received 1' + + rune -1 cscli allowlist add foo bar + assert_stderr "Error: allowlist 'foo' not found" + + rune -0 cscli allowlist create foo -d 'a foo' + + rune -0 cscli allowlist add foo bar + # XXX: here we should return an error? + # and it's currently displayed as ERRO[0000] -- client logger has no formatter? + assert_stderr --partial "level=error msg=\"invalid ip address 'bar'\"" + refute_output + + rune -0 cscli allowlist add foo 1.1.1.256 + assert_stderr --partial "level=error msg=\"invalid ip address '1.1.1.256'\"" + refute_output + + rune -0 cscli allowlist add foo 1.1.1.1/2/3 + assert_stderr --partial "level=error msg=\"invalid ip range '1.1.1.1/2/3': invalid CIDR address: 1.1.1.1/2/3\"" + refute_output + + rune -0 cscli allowlist add foo 1.2.3.4 + refute_stderr + assert_output 'added 1 values to allowlist foo' + + rune -0 cscli allowlist add foo 1.2.3.4 + assert_stderr --partial 'level=warning msg="value 1.2.3.4 already in allowlist"' + assert_output 'no new values for allowlist' + + rune -0 cscli allowlist add foo 5.6.7.8/24 9.10.11.12 + assert_output 'added 2 values to allowlist foo' + + # comment and expiration are applied to all values + rune -1 cscli allowlist add foo 10.10.10.10 10.20.30.40 -d comment -e toto + assert_stderr 'Error: time: invalid duration "toto"' + refute_output + + rune -1 cscli allowlist add foo 10.10.10.10 10.20.30.40 -d comment -e '1 day' + refute_output + assert_stderr 'Error: strconv.Atoi: parsing "1 ": invalid syntax' + + rune -0 cscli allowlist add foo 10.10.10.10 -d comment -e '1d' + assert_output 'added 1 values to allowlist foo' + refute_stderr + + rune -0 cscli allowlist add foo 10.20.30.40 -d comment -e '30m' + assert_output 'added 1 values to allowlist foo' + refute_stderr +} + +@test "cscli allowlists delete" { + rune -1 cscli allowlist delete + assert_stderr 'Error: accepts 1 arg(s), received 0' + + rune -1 cscli allowlist delete does-not-exist + assert_stderr "Error: allowlist 'does-not-exist' not found" + + rune -0 cscli allowlist create foo -d "A Foo" + rune -0 cscli allowlist add foo 1.2.3.4 + + rune -0 cscli allowlist delete foo + assert_output "allowlist 'foo' deleted successfully" + refute_stderr +} + +@test "cscli allowlists inspect" { + rune -1 cscli allowlist inspect + assert_stderr 'Error: accepts 1 arg(s), received 0' + + rune -0 cscli allowlist create foo -d "A Foo" + assert_output "allowlist 'foo' created successfully" + + rune -0 cscli allowlist add foo 1.2.3.4 + + rune -0 cscli allowlist inspect foo + assert_output - --regexp <<-EOT + ---------------------.* + Allowlist: foo .* + ---------------------.* + Name foo .* + Description A Foo .* + Created at .* + Updated at .* + Managed by Console no .* + ---------------------.* + ------------------------------------------.* + Value Comment Expiration Created at .* + ------------------------------------------.* + 1.2.3.4 never .* + ------------------------------------------.* + EOT + + rune -0 cscli allowlist inspect foo -o raw + assert_output - --regexp <<-EOT + name,description,value,comment,expiration,created_at,console_managed + foo,A Foo,1.2.3.4,,never,.*,false + EOT + + rune -0 cscli allowlist inspect foo -o json + rune -0 jq 'del(.created_at) | del(.updated_at) | del(.items.[].created_at) | del(.items.[].expiration)' <(output) + assert_json '{"description":"A Foo","items":[{"value":"1.2.3.4"}],"name":"foo"}' +} + +@test "cscli allowlists remove" { + rune -1 cscli allowlist remove + assert_stderr 'Error: requires at least 2 arg(s), only received 0' + + rune -1 cscli allowlist remove foo + assert_stderr 'Error: requires at least 2 arg(s), only received 1' + + rune -1 cscli allowlist remove foo 1.2.3.4 + assert_stderr "Error: allowlist 'foo' not found" + + rune -0 cscli allowlist create foo -d 'a foo' + # no error, should be ok + rune -0 cscli allowlist remove foo 1.2.3.4 + assert_output 'no value to remove from allowlist' + + rune -0 cscli allowlist add foo 1.2.3.4 5.6.7.8 + rune -0 cscli allowlist remove foo 1.2.3.4 + assert_output 'removed 1 values from allowlist foo' + + rune -0 cscli allowlist remove foo 1.2.3.4 5.6.7.8 + refute_stderr + assert_output 'removed 1 values from allowlist foo' + rune -0 cscli allowlist inspect foo -o json + rune -0 jq 'del(.created_at) | del(.updated_at) | del(.items.[].created_at) | del(.items.[].expiration)' <(output) + assert_json '{"description":"a foo","items":[],"name":"foo"}' +} From 19eb278104befc51392454755d0a381c4fec30ed Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Thu, 20 Feb 2025 21:35:57 +0100 Subject: [PATCH 438/581] update dependencies: color, go-sqlite3, tail, slack, testify (#3474) --- go.mod | 18 +++++++++--------- go.sum | 38 +++++++++++++++++++++----------------- 2 files changed, 30 insertions(+), 26 deletions(-) diff --git a/go.mod b/go.mod index 8c12c029203..51b8aacabcf 100644 --- a/go.mod +++ b/go.mod @@ -35,7 +35,7 @@ require ( github.com/docker/go-connections v0.5.0 github.com/docker/go-units v0.5.0 // indirect github.com/expr-lang/expr v1.16.9 - github.com/fatih/color v1.16.0 + github.com/fatih/color v1.18.0 github.com/fsnotify/fsnotify v1.7.0 github.com/gin-gonic/gin v1.10.0 github.com/go-co-op/gocron v1.37.0 @@ -66,13 +66,13 @@ require ( github.com/klauspost/compress v1.17.9 // indirect github.com/lithammer/dedent v1.1.0 github.com/mattn/go-isatty v0.0.20 - github.com/mattn/go-sqlite3 v1.14.16 + github.com/mattn/go-sqlite3 v1.14.24 github.com/mitchellh/copystructure v1.2.0 // indirect github.com/moby/docker-image-spec v1.3.1 // indirect github.com/moby/term v0.5.0 // indirect github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826 github.com/morikuni/aec v1.0.0 // indirect - github.com/nxadm/tail v1.4.8 + github.com/nxadm/tail v1.4.11 github.com/opencontainers/go-digest v1.0.0 // indirect github.com/opencontainers/image-spec v1.1.0 // indirect github.com/oschwald/geoip2-golang v1.9.0 @@ -87,10 +87,10 @@ require ( github.com/segmentio/kafka-go v0.4.45 github.com/shirou/gopsutil/v3 v3.23.5 github.com/sirupsen/logrus v1.9.3 - github.com/slack-go/slack v0.12.2 + github.com/slack-go/slack v0.16.0 github.com/spf13/cobra v1.8.1 github.com/spf13/pflag v1.0.5 // indirect - github.com/stretchr/testify v1.9.0 + github.com/stretchr/testify v1.10.0 github.com/umahmood/haversine v0.0.0-20151105152445-808ab04add26 github.com/wasilibs/go-re2 v1.7.0 github.com/xhit/go-simple-mail/v2 v2.16.0 @@ -154,8 +154,8 @@ require ( github.com/google/gofuzz v1.2.0 // indirect github.com/hashicorp/hcl/v2 v2.13.0 // indirect github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb // indirect - github.com/huandu/xstrings v1.3.3 // indirect - github.com/imdario/mergo v0.3.12 // indirect + github.com/huandu/xstrings v1.5.0 // indirect + github.com/imdario/mergo v0.3.11 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/jackc/chunkreader/v2 v2.0.1 // indirect github.com/jackc/pgconn v1.14.3 // indirect @@ -198,8 +198,8 @@ require ( github.com/russross/blackfriday/v2 v2.1.0 // indirect github.com/sergi/go-diff v1.3.1 // indirect github.com/shoenig/go-m1cpu v0.1.6 // indirect - github.com/shopspring/decimal v1.2.0 // indirect - github.com/spf13/cast v1.3.1 // indirect + github.com/shopspring/decimal v1.4.0 // indirect + github.com/spf13/cast v1.7.0 // indirect github.com/tetratelabs/wazero v1.8.0 // indirect github.com/tidwall/gjson v1.18.0 // indirect github.com/tidwall/match v1.1.1 // indirect diff --git a/go.sum b/go.sum index 12aa4934b1f..d618d71c8d2 100644 --- a/go.sum +++ b/go.sum @@ -138,13 +138,15 @@ github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDD github.com/expr-lang/expr v1.16.9 h1:WUAzmR0JNI9JCiF0/ewwHB1gmcGw5wW7nWt8gc6PpCI= github.com/expr-lang/expr v1.16.9/go.mod h1:8/vRC7+7HBzESEqt5kKpYXxrxkr31SaO8r40VO/1IT4= github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk= -github.com/fatih/color v1.16.0 h1:zmkK9Ngbjj+K0yRhTVONQh1p/HknKYSlNT+vZCzyokM= -github.com/fatih/color v1.16.0/go.mod h1:fL2Sau1YI5c0pdGEVCbKQbLXB6edEj1ZgiY4NijnWvE= +github.com/fatih/color v1.18.0 h1:S8gINlzdQ840/4pfAwic/ZE0djQEH3wM94VfqLTZcOM= +github.com/fatih/color v1.18.0/go.mod h1:4FelSpRwEGDpQ12mAdzqdOukCy4u8WUtOY6lkT/6HfU= github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= github.com/foxcpp/go-mockdns v1.1.0 h1:jI0rD8M0wuYAxL7r/ynTrCQQq0BVqfB99Vgk7DlmewI= github.com/foxcpp/go-mockdns v1.1.0/go.mod h1:IhLeSFGed3mJIAXPH2aiRQB+kqz7oqu8ld2qVbOu7Wk= -github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= +github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8= +github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= +github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbSClcnxKAGw= github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= github.com/gabriel-vasile/mimetype v1.4.7 h1:SKFKl7kD0RiPdbht0s7hFtjl489WcQ1VyPW8ZzUMYCA= @@ -363,11 +365,11 @@ github.com/hexops/gotextdiff v1.0.3 h1:gitA9+qJrrTCsiCl7+kh75nPqQt1cx4ZkudSTLoUq github.com/hexops/gotextdiff v1.0.3/go.mod h1:pSWU5MAI3yDq+fZBTazCSJysOMbxWL1BSow5/V2vxeg= github.com/hinshun/vt10x v0.0.0-20220119200601-820417d04eec h1:qv2VnGeEQHchGaZ/u7lxST/RaJw+cv273q79D81Xbog= github.com/hinshun/vt10x v0.0.0-20220119200601-820417d04eec/go.mod h1:Q48J4R4DvxnHolD5P8pOtXigYlRuPLGl6moFx3ulM68= -github.com/huandu/xstrings v1.3.3 h1:/Gcsuc1x8JVbJ9/rlye4xZnVAbEkGauT8lbebqcQws4= github.com/huandu/xstrings v1.3.3/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE= +github.com/huandu/xstrings v1.5.0 h1:2ag3IFq9ZDANvthTwTiqSSZLjDc+BedvHPAp5tJy2TI= +github.com/huandu/xstrings v1.5.0/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE= +github.com/imdario/mergo v0.3.11 h1:3tnifQM4i+fbajXKBHXWEH+KvNHqojZ778UH75j3bGA= github.com/imdario/mergo v0.3.11/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= -github.com/imdario/mergo v0.3.12 h1:b6R2BslTbIEToALKP7LxUvijTsNI9TAe80pLWN2g/HU= -github.com/imdario/mergo v0.3.12/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= @@ -513,8 +515,8 @@ github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWE github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= github.com/mattn/go-runewidth v0.0.15 h1:UNAjwbU9l54TA3KzvqLGxwWjHmMgBUVhBiTjelZgg3U= github.com/mattn/go-runewidth v0.0.15/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= -github.com/mattn/go-sqlite3 v1.14.16 h1:yOQRA0RpS5PFz/oikGwBEqvAWhWg5ufRz4ETLjwpU1Y= -github.com/mattn/go-sqlite3 v1.14.16/go.mod h1:2eHXhiwb8IkHr+BDWZGa96P6+rkvnG63S2DGjv9HUNg= +github.com/mattn/go-sqlite3 v1.14.24 h1:tpSp2G2KyMnnQu99ngJ47EIkWVmliIizyZBfPrBWDRM= +github.com/mattn/go-sqlite3 v1.14.24/go.mod h1:Uh1q+B4BYcTPb+yiD3kU8Ct7aC0hY9fxUwlHK0RXw+Y= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo= github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= @@ -556,8 +558,8 @@ github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A= github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= -github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= -github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= +github.com/nxadm/tail v1.4.11 h1:8feyoE3OzPrcshW5/MJ4sGESc5cqmGkGCWlco4l0bqY= +github.com/nxadm/tail v1.4.11/go.mod h1:OTaG3NK980DZzxbRq6lEuzgU+mug70nY11sMd4JXXHc= github.com/oklog/run v1.0.0 h1:Ru7dDtJNOyC66gQ5dQmaCa0qIsAUFY3sFpK1Xk8igrw= github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= @@ -642,18 +644,20 @@ github.com/shoenig/go-m1cpu v0.1.6/go.mod h1:1JJMcUBvfNwpq05QDQVAnx3gUHr9IYF7GNg github.com/shoenig/test v0.6.4 h1:kVTaSd7WLz5WZ2IaoM0RSzRsUD+m8wRR+5qvntpn4LU= github.com/shoenig/test v0.6.4/go.mod h1:byHiCGXqrVaflBLAMq/srcZIHynQPQgeyvkvXnjqq0k= github.com/shopspring/decimal v0.0.0-20180709203117-cd690d0c9e24/go.mod h1:M+9NzErvs504Cn4c5DxATwIqPbtswREoFCre64PpcG4= -github.com/shopspring/decimal v1.2.0 h1:abSATXmQEYyShuxI4/vyW3tV1MrKAJzCZ/0zLUXYbsQ= github.com/shopspring/decimal v1.2.0/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= +github.com/shopspring/decimal v1.4.0 h1:bxl37RwXBklmTi0C79JfXCEBD1cqqHt0bbgBAGFp81k= +github.com/shopspring/decimal v1.4.0/go.mod h1:gawqmDU56v4yIKSwfBSFip1HdCCXN8/+DMd9qYNcwME= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= -github.com/slack-go/slack v0.12.2 h1:x3OppyMyGIbbiyFhsBmpf9pwkUzMhthJMRNmNlA4LaQ= -github.com/slack-go/slack v0.12.2/go.mod h1:hlGi5oXA+Gt+yWTPP0plCdRKmjsDxecdHxYQdlMQKOw= -github.com/spf13/cast v1.3.1 h1:nFm6S0SMdyzrzcmThSipiEubIDy8WEXKNZ0UOgiRpng= +github.com/slack-go/slack v0.16.0 h1:khp/WCFv+Hb/B/AJaAwvcxKun0hM6grN0bUZ8xG60P8= +github.com/slack-go/slack v0.16.0/go.mod h1:hlGi5oXA+Gt+yWTPP0plCdRKmjsDxecdHxYQdlMQKOw= github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= +github.com/spf13/cast v1.7.0 h1:ntdiHjuueXFgm5nzDRdOS4yfT43P5Fnud6DH50rz/7w= +github.com/spf13/cast v1.7.0/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo= github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= github.com/spf13/cobra v1.4.0/go.mod h1:Wo4iy3BUC+X2Fybo0PDqwJIv3dNRiZLHQymsfxlB84g= github.com/spf13/cobra v1.8.1 h1:e5/vxKd/rZsfSJMUX1agtjeTDf+qv1/JdBF8gg5k9ZM= @@ -681,8 +685,8 @@ github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/stretchr/testify v1.8.3/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= -github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= -github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= +github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/tetratelabs/wazero v1.8.0 h1:iEKu0d4c2Pd+QSRieYbnQC9yiFlMS9D+Jr0LsRmcF4g= github.com/tetratelabs/wazero v1.8.0/go.mod h1:yAI0XTsMBhREkM/YDAK/zNou3GoiAce1P6+rp/wQhjs= github.com/tidwall/gjson v1.18.0 h1:FIDeeyB800efLX89e5a8Y0BNH+LOngJyGrIWxG2FKQY= @@ -859,7 +863,6 @@ golang.org/x/sys v0.0.0-20190531175056-4c3a928424d2/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191220142924-d4481acd189f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -876,6 +879,7 @@ golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= From 45624c6fe5b87ab793dafe8c7fe710d76f7d8a41 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Fri, 21 Feb 2025 00:09:11 +0100 Subject: [PATCH 439/581] tests: switch context.Background() -> t.Context() from go 1.24 (#3473) --- pkg/acquisition/acquisition_test.go | 7 +-- .../modules/cloudwatch/cloudwatch_test.go | 48 ++++++++++++++++--- pkg/acquisition/modules/docker/docker_test.go | 29 +++++------ pkg/acquisition/modules/file/file_test.go | 5 +- pkg/acquisition/modules/http/http_test.go | 4 +- .../modules/journalctl/journalctl_test.go | 5 +- pkg/acquisition/modules/kafka/kafka_test.go | 6 ++- .../modules/kinesis/kinesis_test.go | 15 ++++-- .../modules/kubernetesaudit/k8s_audit_test.go | 7 +-- pkg/acquisition/modules/loki/loki_test.go | 20 ++++---- pkg/acquisition/modules/s3/s3_test.go | 9 ++-- pkg/acquisition/modules/syslog/syslog_test.go | 3 +- .../modules/victorialogs/victorialogs_test.go | 8 ++-- .../wineventlog/wineventlog_windows_test.go | 8 ++-- pkg/apiclient/alerts_service_test.go | 17 ++++--- pkg/apiclient/auth_key_test.go | 8 ++-- pkg/apiclient/auth_service_test.go | 17 +++---- pkg/apiclient/client_http_test.go | 8 +++- pkg/apiclient/client_test.go | 32 +++++++------ pkg/apiclient/decisions_service_test.go | 30 +++++++----- pkg/apiserver/alerts_test.go | 20 ++++---- pkg/apiserver/api_key_test.go | 3 +- pkg/apiserver/apic_metrics_test.go | 3 +- pkg/apiserver/apic_test.go | 44 +++++++++-------- pkg/apiserver/apiserver_test.go | 17 +++---- pkg/apiserver/decisions_test.go | 18 +++---- pkg/apiserver/heartbeat_test.go | 5 +- pkg/apiserver/jwt_test.go | 3 +- pkg/apiserver/machines_test.go | 17 ++++--- pkg/apiserver/usage_metrics_test.go | 5 +- pkg/csplugin/broker_test.go | 15 +++--- pkg/csplugin/broker_win_test.go | 5 +- pkg/csplugin/watcher_test.go | 4 +- pkg/cwhub/cwhub_test.go | 3 +- pkg/cwhub/download_test.go | 7 +-- pkg/cwhub/hub_test.go | 13 ++--- pkg/exprhelpers/exprlib_test.go | 25 ++++++---- 37 files changed, 271 insertions(+), 222 deletions(-) diff --git a/pkg/acquisition/acquisition_test.go b/pkg/acquisition/acquisition_test.go index 1ea8f11c22a..6247c876040 100644 --- a/pkg/acquisition/acquisition_test.go +++ b/pkg/acquisition/acquisition_test.go @@ -301,6 +301,7 @@ func TestLoadAcquisitionFromFile(t *testing.T) { } assert.Len(t, dss, tc.ExpectedLen) + if tc.TestName == "from_env" { mock := dss[0].Dump().(*MockSource) assert.Equal(t, "test_value2", mock.Toto) @@ -411,7 +412,7 @@ func (f *MockTail) GetUuid() string { return "" } // func StartAcquisition(sources []DataSource, output chan types.Event, AcquisTomb *tomb.Tomb) error { func TestStartAcquisitionCat(t *testing.T) { - ctx := context.Background() + ctx := t.Context() sources := []DataSource{ &MockCat{}, } @@ -439,7 +440,7 @@ READLOOP: } func TestStartAcquisitionTail(t *testing.T) { - ctx := context.Background() + ctx := t.Context() sources := []DataSource{ &MockTail{}, } @@ -487,7 +488,7 @@ func (f *MockTailError) StreamingAcquisition(ctx context.Context, out chan types } func TestStartAcquisitionTailError(t *testing.T) { - ctx := context.Background() + ctx := t.Context() sources := []DataSource{ &MockTailError{}, } diff --git a/pkg/acquisition/modules/cloudwatch/cloudwatch_test.go b/pkg/acquisition/modules/cloudwatch/cloudwatch_test.go index 3d638896537..2bc4f5d4a14 100644 --- a/pkg/acquisition/modules/cloudwatch/cloudwatch_test.go +++ b/pkg/acquisition/modules/cloudwatch/cloudwatch_test.go @@ -1,7 +1,6 @@ package cloudwatchacquisition import ( - "context" "errors" "fmt" "net" @@ -79,11 +78,14 @@ func TestMain(m *testing.M) { } func TestWatchLogGroupForStreams(t *testing.T) { - ctx := context.Background() + ctx := t.Context() + if runtime.GOOS == "windows" { t.Skip("Skipping test on windows") } + log.SetLevel(log.DebugLevel) + tests := []struct { config []byte expectedCfgErr string @@ -434,6 +436,7 @@ stream_name: test_stream`), dbgLogger := log.New().WithField("test", tc.name) dbgLogger.Logger.SetLevel(log.DebugLevel) dbgLogger.Infof("starting test") + cw := CloudwatchSource{} err := cw.Configure(tc.config, dbgLogger, configuration.METRICS_NONE) cstest.RequireErrorContains(t, err, tc.expectedCfgErr) @@ -446,16 +449,20 @@ stream_name: test_stream`), if tc.setup != nil { tc.setup(t, &cw) } + out := make(chan types.Event) tmb := tomb.Tomb{} - var rcvdEvts []types.Event + rcvdEvts := []types.Event{} dbgLogger.Infof("running StreamingAcquisition") + actmb := tomb.Tomb{} actmb.Go(func() error { err := cw.StreamingAcquisition(ctx, out, &actmb) + dbgLogger.Infof("acquis done") cstest.RequireErrorContains(t, err, tc.expectedStartErr) + return nil }) @@ -492,24 +499,30 @@ stream_name: test_stream`), if tc.expectedResLen != len(rcvdEvts) { t.Fatalf("%s : expected %d results got %d -> %v", tc.name, tc.expectedResLen, len(rcvdEvts), rcvdEvts) } + dbgLogger.Debugf("got %d expected messages", len(rcvdEvts)) } + if len(tc.expectedResMessages) != 0 { res := tc.expectedResMessages for idx, v := range rcvdEvts { if len(res) == 0 { t.Fatalf("result %d/%d : received '%s', didn't expect anything (recvd:%d, expected:%d)", idx, len(rcvdEvts), v.Line.Raw, len(rcvdEvts), len(tc.expectedResMessages)) } + if res[0] != v.Line.Raw { t.Fatalf("result %d/%d : expected '%s', received '%s' (recvd:%d, expected:%d)", idx, len(rcvdEvts), res[0], v.Line.Raw, len(rcvdEvts), len(tc.expectedResMessages)) } + dbgLogger.Debugf("got message '%s'", res[0]) res = res[1:] } + if len(res) != 0 { t.Fatalf("leftover unmatched results : %v", res) } } + if tc.teardown != nil { tc.teardown(t, &cw) } @@ -518,11 +531,14 @@ stream_name: test_stream`), } func TestConfiguration(t *testing.T) { - ctx := context.Background() + ctx := t.Context() + if runtime.GOOS == "windows" { t.Skip("Skipping test on windows") } + log.SetLevel(log.DebugLevel) + tests := []struct { config []byte expectedCfgErr string @@ -565,9 +581,11 @@ stream_name: test_stream`), t.Run(tc.name, func(t *testing.T) { dbgLogger := log.New().WithField("test", tc.name) dbgLogger.Logger.SetLevel(log.DebugLevel) + cw := CloudwatchSource{} err := cw.Configure(tc.config, dbgLogger, configuration.METRICS_NONE) cstest.RequireErrorContains(t, err, tc.expectedCfgErr) + if tc.expectedCfgErr != "" { return } @@ -596,7 +614,9 @@ func TestConfigureByDSN(t *testing.T) { if runtime.GOOS == "windows" { t.Skip("Skipping test on windows") } + log.SetLevel(log.DebugLevel) + tests := []struct { dsn string labels map[string]string @@ -629,6 +649,7 @@ func TestConfigureByDSN(t *testing.T) { t.Run(tc.name, func(t *testing.T) { dbgLogger := log.New().WithField("test", tc.name) dbgLogger.Logger.SetLevel(log.DebugLevel) + cw := CloudwatchSource{} err := cw.ConfigureByDSN(tc.dsn, tc.labels, dbgLogger, "") cstest.RequireErrorContains(t, err, tc.expectedCfgErr) @@ -637,12 +658,14 @@ func TestConfigureByDSN(t *testing.T) { } func TestOneShotAcquisition(t *testing.T) { - ctx := context.Background() + ctx := t.Context() if runtime.GOOS == "windows" { t.Skip("Skipping test on windows") } + log.SetLevel(log.DebugLevel) + tests := []struct { dsn string expectedCfgErr string @@ -753,9 +776,11 @@ func TestOneShotAcquisition(t *testing.T) { dbgLogger := log.New().WithField("test", tc.name) dbgLogger.Logger.SetLevel(log.DebugLevel) dbgLogger.Infof("starting test") + cw := CloudwatchSource{} err := cw.ConfigureByDSN(tc.dsn, map[string]string{"type": "test"}, dbgLogger, "") cstest.RequireErrorContains(t, err, tc.expectedCfgErr) + if tc.expectedCfgErr != "" { return } @@ -765,14 +790,17 @@ func TestOneShotAcquisition(t *testing.T) { if tc.setup != nil { tc.setup(t, &cw) } + out := make(chan types.Event, 100) tmb := tomb.Tomb{} - var rcvdEvts []types.Event + rcvdEvts := []types.Event{} dbgLogger.Infof("running StreamingAcquisition") + err = cw.OneShotAcquisition(ctx, out, &tmb) - dbgLogger.Infof("acquis done") cstest.RequireErrorContains(t, err, tc.expectedStartErr) + dbgLogger.Infof("acquis done") + close(out) // let's empty output chan for evt := range out { @@ -784,6 +812,7 @@ func TestOneShotAcquisition(t *testing.T) { } else { dbgLogger.Warning("no code to run") } + if tc.expectedResLen != -1 { if tc.expectedResLen != len(rcvdEvts) { t.Fatalf("%s : expected %d results got %d -> %v", tc.name, tc.expectedResLen, len(rcvdEvts), rcvdEvts) @@ -791,22 +820,27 @@ func TestOneShotAcquisition(t *testing.T) { dbgLogger.Debugf("got %d expected messages", len(rcvdEvts)) } } + if len(tc.expectedResMessages) != 0 { res := tc.expectedResMessages for idx, v := range rcvdEvts { if len(res) == 0 { t.Fatalf("result %d/%d : received '%s', didn't expect anything (recvd:%d, expected:%d)", idx, len(rcvdEvts), v.Line.Raw, len(rcvdEvts), len(tc.expectedResMessages)) } + if res[0] != v.Line.Raw { t.Fatalf("result %d/%d : expected '%s', received '%s' (recvd:%d, expected:%d)", idx, len(rcvdEvts), res[0], v.Line.Raw, len(rcvdEvts), len(tc.expectedResMessages)) } + dbgLogger.Debugf("got message '%s'", res[0]) res = res[1:] } + if len(res) != 0 { t.Fatalf("leftover unmatched results : %v", res) } } + if tc.teardown != nil { tc.teardown(t, &cw) } diff --git a/pkg/acquisition/modules/docker/docker_test.go b/pkg/acquisition/modules/docker/docker_test.go index 73e26b1e497..e44963b58a3 100644 --- a/pkg/acquisition/modules/docker/docker_test.go +++ b/pkg/acquisition/modules/docker/docker_test.go @@ -16,6 +16,7 @@ import ( "github.com/docker/docker/client" log "github.com/sirupsen/logrus" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" "gopkg.in/tomb.v2" "github.com/crowdsecurity/go-cs-lib/cstest" @@ -127,7 +128,7 @@ type mockDockerCli struct { } func TestStreamingAcquisition(t *testing.T) { - ctx := context.Background() + ctx := t.Context() log.SetOutput(os.Stdout) log.SetLevel(log.InfoLevel) @@ -185,9 +186,7 @@ container_name_regexp: dockerSource := DockerSource{} err := dockerSource.Configure([]byte(ts.config), subLogger, configuration.METRICS_NONE) - if err != nil { - t.Fatalf("Unexpected error : %s", err) - } + require.NoError(t, err) dockerSource.Client = new(mockDockerCli) actualLines := 0 @@ -204,28 +203,27 @@ container_name_regexp: select { case <-out: actualLines++ + ticker.Reset(1 * time.Second) case <-ticker.C: log.Infof("no more lines to read") dockerSource.t.Kill(nil) + return nil } } }) cstest.AssertErrorContains(t, err, ts.expectedErr) - if err = readerTomb.Wait(); err != nil { - t.Fatal(err) - } + err = readerTomb.Wait() + require.NoError(t, err) if ts.expectedLines != 0 { assert.Equal(t, ts.expectedLines, actualLines) } err = streamTomb.Wait() - if err != nil { - t.Fatalf("docker acquisition error: %s", err) - } + require.NoError(t, err) } } @@ -276,9 +274,9 @@ func (cli *mockDockerCli) ContainerInspect(ctx context.Context, c string) (docke } func TestOneShot(t *testing.T) { - ctx := context.Background() + ctx := t.Context() - log.Infof("Test 'TestOneShot'") + log.Info("Test 'TestOneShot'") tests := []struct { dsn string @@ -326,14 +324,13 @@ func TestOneShot(t *testing.T) { labels := make(map[string]string) labels["type"] = ts.logType - if err := dockerClient.ConfigureByDSN(ts.dsn, labels, subLogger, ""); err != nil { - t.Fatalf("unable to configure dsn '%s': %s", ts.dsn, err) - } + err := dockerClient.ConfigureByDSN(ts.dsn, labels, subLogger, "") + require.NoError(t, err) dockerClient.Client = new(mockDockerCli) out := make(chan types.Event, 100) tomb := tomb.Tomb{} - err := dockerClient.OneShotAcquisition(ctx, out, &tomb) + err = dockerClient.OneShotAcquisition(ctx, out, &tomb) cstest.AssertErrorContains(t, err, ts.expectedErr) // else we do the check before actualLines is incremented ... diff --git a/pkg/acquisition/modules/file/file_test.go b/pkg/acquisition/modules/file/file_test.go index 1f0ab0d98c2..9cfbdbc385a 100644 --- a/pkg/acquisition/modules/file/file_test.go +++ b/pkg/acquisition/modules/file/file_test.go @@ -1,7 +1,6 @@ package fileacquisition_test import ( - "context" "fmt" "os" "runtime" @@ -101,7 +100,7 @@ func TestConfigureDSN(t *testing.T) { } func TestOneShot(t *testing.T) { - ctx := context.Background() + ctx := t.Context() permDeniedFile := "/etc/shadow" permDeniedError := "failed opening /etc/shadow: open /etc/shadow: permission denied" @@ -248,7 +247,7 @@ filename: test_files/test_delete.log`, } func TestLiveAcquisition(t *testing.T) { - ctx := context.Background() + ctx := t.Context() permDeniedFile := "/etc/shadow" permDeniedError := "unable to read /etc/shadow : open /etc/shadow: permission denied" testPattern := "test_files/*.log" diff --git a/pkg/acquisition/modules/http/http_test.go b/pkg/acquisition/modules/http/http_test.go index b05979c5adf..5f068baa3e4 100644 --- a/pkg/acquisition/modules/http/http_test.go +++ b/pkg/acquisition/modules/http/http_test.go @@ -2,7 +2,6 @@ package httpacquisition import ( "compress/gzip" - "context" "crypto/tls" "crypto/x509" "errors" @@ -221,12 +220,13 @@ func TestGetName(t *testing.T) { } func SetupAndRunHTTPSource(t *testing.T, h *HTTPSource, config []byte, metricLevel int) (chan types.Event, *prometheus.Registry, *tomb.Tomb) { - ctx := context.Background() + ctx := t.Context() subLogger := log.WithFields(log.Fields{ "type": "http", }) err := h.Configure(config, subLogger, metricLevel) require.NoError(t, err) + tomb := tomb.Tomb{} out := make(chan types.Event) err = h.StreamingAcquisition(ctx, out, &tomb) diff --git a/pkg/acquisition/modules/journalctl/journalctl_test.go b/pkg/acquisition/modules/journalctl/journalctl_test.go index 48b034f41c6..843230e9286 100644 --- a/pkg/acquisition/modules/journalctl/journalctl_test.go +++ b/pkg/acquisition/modules/journalctl/journalctl_test.go @@ -1,7 +1,6 @@ package journalctlacquisition import ( - "context" "os" "os/exec" "path/filepath" @@ -108,7 +107,7 @@ func TestConfigureDSN(t *testing.T) { } func TestOneShot(t *testing.T) { - ctx := context.Background() + ctx := t.Context() if runtime.GOOS == "windows" { t.Skip("Skipping test on windows") @@ -191,7 +190,7 @@ journalctl_filter: } func TestStreaming(t *testing.T) { - ctx := context.Background() + ctx := t.Context() if runtime.GOOS == "windows" { t.Skip("Skipping test on windows") diff --git a/pkg/acquisition/modules/kafka/kafka_test.go b/pkg/acquisition/modules/kafka/kafka_test.go index 2f3361c4f6b..1d07159d0e7 100644 --- a/pkg/acquisition/modules/kafka/kafka_test.go +++ b/pkg/acquisition/modules/kafka/kafka_test.go @@ -128,7 +128,8 @@ func createTopic(topic string, broker string) { } func TestStreamingAcquisition(t *testing.T) { - ctx := context.Background() + ctx := t.Context() + if runtime.GOOS == "windows" { t.Skip("Skipping test on windows") } @@ -201,7 +202,8 @@ topic: crowdsecplaintext`), subLogger, configuration.METRICS_NONE) } func TestStreamingAcquisitionWithSSL(t *testing.T) { - ctx := context.Background() + ctx := t.Context() + if runtime.GOOS == "windows" { t.Skip("Skipping test on windows") } diff --git a/pkg/acquisition/modules/kinesis/kinesis_test.go b/pkg/acquisition/modules/kinesis/kinesis_test.go index 3f6d780b192..4eb3563fdc6 100644 --- a/pkg/acquisition/modules/kinesis/kinesis_test.go +++ b/pkg/acquisition/modules/kinesis/kinesis_test.go @@ -3,7 +3,6 @@ package kinesisacquisition import ( "bytes" "compress/gzip" - "context" "encoding/json" "fmt" "net" @@ -157,7 +156,7 @@ stream_arn: arn:aws:kinesis:eu-west-1:123456789012:stream/my-stream`, } func TestReadFromStream(t *testing.T) { - ctx := context.Background() + ctx := t.Context() if runtime.GOOS == "windows" { t.Skip("Skipping test on windows") @@ -205,7 +204,7 @@ stream_name: stream-1-shard`, } func TestReadFromMultipleShards(t *testing.T) { - ctx := context.Background() + ctx := t.Context() if runtime.GOOS == "windows" { t.Skip("Skipping test on windows") @@ -232,6 +231,7 @@ stream_name: stream-2-shards`, config := fmt.Sprintf(test.config, endpoint) err := f.Configure([]byte(config), log.WithField("type", "kinesis"), configuration.METRICS_NONE) require.NoError(t, err) + tomb := &tomb.Tomb{} out := make(chan types.Event) err = f.StreamingAcquisition(ctx, out, tomb) @@ -246,6 +246,7 @@ stream_name: stream-2-shards`, <-out c += 1 } + assert.Equal(t, test.count, c) tomb.Kill(nil) err = tomb.Wait() @@ -254,7 +255,7 @@ stream_name: stream-2-shards`, } func TestFromSubscription(t *testing.T) { - ctx := context.Background() + ctx := t.Context() if runtime.GOOS == "windows" { t.Skip("Skipping test on windows") @@ -276,11 +277,13 @@ from_subscription: true`, }, } endpoint, _ := getLocalStackEndpoint() + for _, test := range tests { f := KinesisSource{} config := fmt.Sprintf(test.config, endpoint) err := f.Configure([]byte(config), log.WithField("type", "kinesis"), configuration.METRICS_NONE) require.NoError(t, err) + tomb := &tomb.Tomb{} out := make(chan types.Event) err = f.StreamingAcquisition(ctx, out, tomb) @@ -288,10 +291,12 @@ from_subscription: true`, // Allow the datasource to start listening to the stream time.Sleep(4 * time.Second) WriteToStream(t, f.Config.StreamName, test.count, test.shards, true) + for i := range test.count { e := <-out - assert.Equal(t, fmt.Sprintf("%d", i), e.Line.Raw) + assert.Equal(t, strconv.Itoa(i), e.Line.Raw) } + tomb.Kill(nil) err = tomb.Wait() require.NoError(t, err) diff --git a/pkg/acquisition/modules/kubernetesaudit/k8s_audit_test.go b/pkg/acquisition/modules/kubernetesaudit/k8s_audit_test.go index bf8a8cea02c..56ca9193a32 100644 --- a/pkg/acquisition/modules/kubernetesaudit/k8s_audit_test.go +++ b/pkg/acquisition/modules/kubernetesaudit/k8s_audit_test.go @@ -1,7 +1,6 @@ package kubernetesauditacquisition import ( - "context" "net/http/httptest" "strings" "testing" @@ -53,7 +52,7 @@ listen_addr: 0.0.0.0`, } func TestInvalidConfig(t *testing.T) { - ctx := context.Background() + ctx := t.Context() tests := []struct { name string config string @@ -91,17 +90,19 @@ webhook_path: /k8s-audit`, time.Sleep(1 * time.Second) tb.Kill(nil) err = tb.Wait() + if test.expectedErr != "" { require.ErrorContains(t, err, test.expectedErr) return } + require.NoError(t, err) }) } } func TestHandler(t *testing.T) { - ctx := context.Background() + ctx := t.Context() tests := []struct { name string config string diff --git a/pkg/acquisition/modules/loki/loki_test.go b/pkg/acquisition/modules/loki/loki_test.go index 643aefad715..2a3789b4364 100644 --- a/pkg/acquisition/modules/loki/loki_test.go +++ b/pkg/acquisition/modules/loki/loki_test.go @@ -276,7 +276,6 @@ func TestConfigureDSN(t *testing.T) { } assert.Equal(t, test.noReadyCheck, lokiSource.Config.NoReadyCheck) - } } @@ -333,7 +332,7 @@ func feedLoki(ctx context.Context, logger *log.Entry, n int, title string) error } func TestOneShotAcquisition(t *testing.T) { - ctx := context.Background() + ctx := t.Context() if runtime.GOOS == "windows" { t.Skip("Skipping test on windows") @@ -364,13 +363,12 @@ since: 1h logger := log.New() subLogger := logger.WithField("type", "loki") lokiSource := loki.LokiSource{} - err := lokiSource.Configure([]byte(ts.config), subLogger, configuration.METRICS_NONE) - if err != nil { + + if err := lokiSource.Configure([]byte(ts.config), subLogger, configuration.METRICS_NONE); err != nil { t.Fatalf("Unexpected error : %s", err) } - err = feedLoki(ctx, subLogger, 20, title) - if err != nil { + if err := feedLoki(ctx, subLogger, 20, title); err != nil { t.Fatalf("Unexpected error : %s", err) } @@ -387,8 +385,7 @@ since: 1h lokiTomb := tomb.Tomb{} - err = lokiSource.OneShotAcquisition(ctx, out, &lokiTomb) - if err != nil { + if err := lokiSource.OneShotAcquisition(ctx, out, &lokiTomb); err != nil { t.Fatalf("Unexpected error : %s", err) } @@ -397,6 +394,8 @@ since: 1h } func TestStreamingAcquisition(t *testing.T) { + ctx := t.Context() + if runtime.GOOS == "windows" { t.Skip("Skipping test on windows") } @@ -441,8 +440,6 @@ query: > }, } - ctx := context.Background() - for _, ts := range tests { t.Run(ts.name, func(t *testing.T) { logger := log.New() @@ -513,7 +510,8 @@ query: > } func TestStopStreaming(t *testing.T) { - ctx := context.Background() + ctx := t.Context() + if runtime.GOOS == "windows" { t.Skip("Skipping test on windows") } diff --git a/pkg/acquisition/modules/s3/s3_test.go b/pkg/acquisition/modules/s3/s3_test.go index 367048aa33a..6f4ea4f7e69 100644 --- a/pkg/acquisition/modules/s3/s3_test.go +++ b/pkg/acquisition/modules/s3/s3_test.go @@ -68,10 +68,12 @@ sqs_name: foobar for _, test := range tests { t.Run(test.name, func(t *testing.T) { f := S3Source{} + err := f.Configure([]byte(test.config), nil, configuration.METRICS_NONE) if err == nil { t.Fatalf("expected error, got none") } + if err.Error() != test.expectedErr { t.Fatalf("expected error %s, got %s", test.expectedErr, err.Error()) } @@ -113,6 +115,7 @@ polling_method: list t.Run(test.name, func(t *testing.T) { f := S3Source{} logger := log.NewEntry(log.New()) + err := f.Configure([]byte(test.config), logger, configuration.METRICS_NONE) if err != nil { t.Fatalf("unexpected error: %s", err.Error()) @@ -208,7 +211,7 @@ func (msqs mockSQSClientNotif) DeleteMessage(input *sqs.DeleteMessageInput) (*sq } func TestDSNAcquis(t *testing.T) { - ctx := context.Background() + ctx := t.Context() tests := []struct { name string dsn string @@ -273,7 +276,7 @@ func TestDSNAcquis(t *testing.T) { } func TestListPolling(t *testing.T) { - ctx := context.Background() + ctx := t.Context() tests := []struct { name string config string @@ -350,7 +353,7 @@ prefix: foo/ } func TestSQSPoll(t *testing.T) { - ctx := context.Background() + ctx := t.Context() tests := []struct { name string config string diff --git a/pkg/acquisition/modules/syslog/syslog_test.go b/pkg/acquisition/modules/syslog/syslog_test.go index 3008ba5507b..53c7d77ae13 100644 --- a/pkg/acquisition/modules/syslog/syslog_test.go +++ b/pkg/acquisition/modules/syslog/syslog_test.go @@ -1,7 +1,6 @@ package syslogacquisition import ( - "context" "fmt" "net" "runtime" @@ -82,7 +81,7 @@ func writeToSyslog(logs []string) { } func TestStreamingAcquisition(t *testing.T) { - ctx := context.Background() + ctx := t.Context() tests := []struct { name string config string diff --git a/pkg/acquisition/modules/victorialogs/victorialogs_test.go b/pkg/acquisition/modules/victorialogs/victorialogs_test.go index 182b009c414..972523cd5c4 100644 --- a/pkg/acquisition/modules/victorialogs/victorialogs_test.go +++ b/pkg/acquisition/modules/victorialogs/victorialogs_test.go @@ -254,7 +254,7 @@ func feedVLogs(ctx context.Context, logger *log.Entry, n int, title string) erro } func TestOneShotAcquisition(t *testing.T) { - ctx := context.Background() + ctx := t.Context() if runtime.GOOS == "windows" { t.Skip("Skipping test on windows") @@ -319,6 +319,8 @@ since: 1h } func TestStreamingAcquisition(t *testing.T) { + ctx := t.Context() + if runtime.GOOS == "windows" { t.Skip("Skipping test on windows") } @@ -359,8 +361,6 @@ query: > }, } - ctx := context.Background() - for _, ts := range tests { t.Run(ts.name, func(t *testing.T) { logger := log.New() @@ -431,7 +431,7 @@ query: > } func TestStopStreaming(t *testing.T) { - ctx := context.Background() + ctx := t.Context() if runtime.GOOS == "windows" { t.Skip("Skipping test on windows") diff --git a/pkg/acquisition/modules/wineventlog/wineventlog_windows_test.go b/pkg/acquisition/modules/wineventlog/wineventlog_windows_test.go index b4998de76c4..1b2420bc941 100644 --- a/pkg/acquisition/modules/wineventlog/wineventlog_windows_test.go +++ b/pkg/acquisition/modules/wineventlog/wineventlog_windows_test.go @@ -3,7 +3,6 @@ package wineventlogacquisition import ( - "context" "testing" "time" @@ -133,11 +132,11 @@ event_level: bla`, } func TestLiveAcquisition(t *testing.T) { + ctx := t.Context() + err := exprhelpers.Init(nil) require.NoError(t, err) - ctx := context.Background() - tests := []struct { config string expectedLines []string @@ -236,7 +235,8 @@ event_ids: } func TestOneShotAcquisition(t *testing.T) { - ctx := context.Background() + ctx := t.Context() + tests := []struct { name string dsn string diff --git a/pkg/apiclient/alerts_service_test.go b/pkg/apiclient/alerts_service_test.go index 9df633fa8be..24b66937f20 100644 --- a/pkg/apiclient/alerts_service_test.go +++ b/pkg/apiclient/alerts_service_test.go @@ -1,7 +1,6 @@ package apiclient import ( - "context" "fmt" "net/http" "net/url" @@ -18,6 +17,7 @@ import ( ) func TestAlertsListAsMachine(t *testing.T) { + ctx := t.Context() log.SetLevel(log.DebugLevel) mux, urlx, teardown := setup() @@ -183,7 +183,7 @@ func TestAlertsListAsMachine(t *testing.T) { // log.Debugf("resp : -> %s", spew.Sdump(resp)) // log.Debugf("expected : -> %s", spew.Sdump(expected)) // first one returns data - alerts, resp, err := client.Alerts.List(context.Background(), AlertsListOpts{}) + alerts, resp, err := client.Alerts.List(ctx, AlertsListOpts{}) require.NoError(t, err) assert.Equal(t, http.StatusOK, resp.Response.StatusCode) assert.Equal(t, expected, *alerts) @@ -191,13 +191,14 @@ func TestAlertsListAsMachine(t *testing.T) { // this one doesn't filter := AlertsListOpts{IPEquals: ptr.Of("1.2.3.4")} - alerts, resp, err = client.Alerts.List(context.Background(), filter) + alerts, resp, err = client.Alerts.List(ctx, filter) require.NoError(t, err) assert.Equal(t, http.StatusOK, resp.Response.StatusCode) assert.Empty(t, *alerts) } func TestAlertsGetAsMachine(t *testing.T) { + ctx := t.Context() log.SetLevel(log.DebugLevel) mux, urlx, teardown := setup() @@ -354,17 +355,18 @@ func TestAlertsGetAsMachine(t *testing.T) { StopAt: ptr.Of("2020-11-28 10:20:46.845621385 +0100 +0100"), } - alerts, resp, err := client.Alerts.GetByID(context.Background(), 1) + alerts, resp, err := client.Alerts.GetByID(ctx, 1) require.NoError(t, err) assert.Equal(t, http.StatusOK, resp.Response.StatusCode) assert.Equal(t, *expected, *alerts) // fail - _, _, err = client.Alerts.GetByID(context.Background(), 2) + _, _, err = client.Alerts.GetByID(ctx, 2) cstest.RequireErrorMessage(t, err, "API error: object not found") } func TestAlertsCreateAsMachine(t *testing.T) { + ctx := t.Context() log.SetLevel(log.DebugLevel) mux, urlx, teardown := setup() @@ -397,7 +399,7 @@ func TestAlertsCreateAsMachine(t *testing.T) { defer teardown() alert := models.AddAlertsRequest{} - alerts, resp, err := client.Alerts.Add(context.Background(), alert) + alerts, resp, err := client.Alerts.Add(ctx, alert) require.NoError(t, err) expected := &models.AddAlertsResponse{"3"} @@ -407,6 +409,7 @@ func TestAlertsCreateAsMachine(t *testing.T) { } func TestAlertsDeleteAsMachine(t *testing.T) { + ctx := t.Context() log.SetLevel(log.DebugLevel) mux, urlx, teardown := setup() @@ -440,7 +443,7 @@ func TestAlertsDeleteAsMachine(t *testing.T) { defer teardown() alert := AlertsDeleteOpts{IPEquals: ptr.Of("1.2.3.4")} - alerts, resp, err := client.Alerts.Delete(context.Background(), alert) + alerts, resp, err := client.Alerts.Delete(ctx, alert) require.NoError(t, err) expected := &models.DeleteAlertsResponse{NbDeleted: ""} diff --git a/pkg/apiclient/auth_key_test.go b/pkg/apiclient/auth_key_test.go index b7cce3e15c9..aa92e03bbae 100644 --- a/pkg/apiclient/auth_key_test.go +++ b/pkg/apiclient/auth_key_test.go @@ -1,7 +1,6 @@ package apiclient import ( - "context" "net/http" "net/url" "testing" @@ -15,6 +14,7 @@ import ( ) func TestApiAuth(t *testing.T) { + ctx := t.Context() log.SetLevel(log.TraceLevel) mux, urlx, teardown := setup() @@ -49,7 +49,7 @@ func TestApiAuth(t *testing.T) { require.NoError(t, err) alert := DecisionsListOpts{IPEquals: ptr.Of("1.2.3.4")} - _, resp, err := newcli.Decisions.List(context.Background(), alert) + _, resp, err := newcli.Decisions.List(ctx, alert) require.NoError(t, err) assert.Equal(t, http.StatusOK, resp.Response.StatusCode) @@ -61,7 +61,7 @@ func TestApiAuth(t *testing.T) { newcli, err = NewDefaultClient(apiURL, "v1", "toto", auth.Client()) require.NoError(t, err) - _, resp, err = newcli.Decisions.List(context.Background(), alert) + _, resp, err = newcli.Decisions.List(ctx, alert) log.Infof("--> %s", err) @@ -75,7 +75,7 @@ func TestApiAuth(t *testing.T) { newcli, err = NewDefaultClient(apiURL, "v1", "toto", auth.Client()) require.NoError(t, err) - _, _, err = newcli.Decisions.List(context.Background(), alert) + _, _, err = newcli.Decisions.List(ctx, alert) require.Error(t, err) log.Infof("--> %s", err) diff --git a/pkg/apiclient/auth_service_test.go b/pkg/apiclient/auth_service_test.go index d22c9394014..11bc68fb7ff 100644 --- a/pkg/apiclient/auth_service_test.go +++ b/pkg/apiclient/auth_service_test.go @@ -2,7 +2,6 @@ package apiclient import ( "bytes" - "context" "encoding/json" "fmt" "io" @@ -72,6 +71,7 @@ func initBasicMuxMock(t *testing.T, mux *http.ServeMux, path string) { * 400, 409, 500 => Error */ func TestWatcherRegister(t *testing.T) { + ctx := t.Context() log.SetLevel(log.DebugLevel) mux, urlx, teardown := setup() @@ -92,8 +92,6 @@ func TestWatcherRegister(t *testing.T) { VersionPrefix: "v1", } - ctx := context.Background() - client, err := RegisterClient(ctx, &clientconfig, &http.Client{}) require.NoError(t, err) @@ -111,6 +109,7 @@ func TestWatcherRegister(t *testing.T) { } func TestWatcherAuth(t *testing.T) { + ctx := t.Context() log.SetLevel(log.DebugLevel) mux, urlx, teardown := setup() @@ -135,7 +134,7 @@ func TestWatcherAuth(t *testing.T) { client, err := NewClient(clientConfig) require.NoError(t, err) - _, _, err = client.Auth.AuthenticateWatcher(context.Background(), models.WatcherAuthRequest{ + _, _, err = client.Auth.AuthenticateWatcher(ctx, models.WatcherAuthRequest{ MachineID: &clientConfig.MachineID, Password: &clientConfig.Password, Scenarios: clientConfig.Scenarios, @@ -151,7 +150,7 @@ func TestWatcherAuth(t *testing.T) { client, err := NewClient(clientConfig) require.NoError(t, err) - _, resp, err := client.Auth.AuthenticateWatcher(context.Background(), models.WatcherAuthRequest{ + _, resp, err := client.Auth.AuthenticateWatcher(ctx, models.WatcherAuthRequest{ MachineID: &clientConfig.MachineID, Password: &clientConfig.Password, }) @@ -171,6 +170,7 @@ func TestWatcherAuth(t *testing.T) { } func TestWatcherUnregister(t *testing.T) { + ctx := t.Context() log.SetLevel(log.DebugLevel) mux, urlx, teardown := setup() @@ -216,13 +216,14 @@ func TestWatcherUnregister(t *testing.T) { client, err := NewClient(mycfg) require.NoError(t, err) - _, err = client.Auth.UnregisterWatcher(context.Background()) + _, err = client.Auth.UnregisterWatcher(ctx) require.NoError(t, err) log.Printf("->%T", client) } func TestWatcherEnroll(t *testing.T) { + ctx := t.Context() log.SetLevel(log.DebugLevel) mux, urlx, teardown := setup() @@ -270,9 +271,9 @@ func TestWatcherEnroll(t *testing.T) { client, err := NewClient(mycfg) require.NoError(t, err) - _, err = client.Auth.EnrollWatcher(context.Background(), "goodkey", "", []string{}, false) + _, err = client.Auth.EnrollWatcher(ctx, "goodkey", "", []string{}, false) require.NoError(t, err) - _, err = client.Auth.EnrollWatcher(context.Background(), "badkey", "", []string{}, false) + _, err = client.Auth.EnrollWatcher(ctx, "badkey", "", []string{}, false) assert.Contains(t, err.Error(), "the attachment key provided is not valid", "got %s", err.Error()) } diff --git a/pkg/apiclient/client_http_test.go b/pkg/apiclient/client_http_test.go index 0d6cf3d993e..93722d61c3e 100644 --- a/pkg/apiclient/client_http_test.go +++ b/pkg/apiclient/client_http_test.go @@ -14,6 +14,8 @@ import ( ) func TestNewRequestInvalid(t *testing.T) { + ctx := t.Context() + mux, urlx, teardown := setup() defer teardown() @@ -41,11 +43,13 @@ func TestNewRequestInvalid(t *testing.T) { w.WriteHeader(http.StatusOK) }) - _, _, err = client.Alerts.List(context.Background(), AlertsListOpts{}) + _, _, err = client.Alerts.List(ctx, AlertsListOpts{}) cstest.RequireErrorContains(t, err, "building request: BaseURL must have a trailing slash, but ") } func TestNewRequestTimeout(t *testing.T) { + ctx := t.Context() + mux, urlx, teardown := setup() defer teardown() @@ -66,7 +70,7 @@ func TestNewRequestTimeout(t *testing.T) { time.Sleep(2 * time.Second) }) - ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second) + ctx, cancel := context.WithTimeout(ctx, 1*time.Second) defer cancel() _, _, err = client.Alerts.List(ctx, AlertsListOpts{}) diff --git a/pkg/apiclient/client_test.go b/pkg/apiclient/client_test.go index c172849c21e..4dc4c00a73a 100644 --- a/pkg/apiclient/client_test.go +++ b/pkg/apiclient/client_test.go @@ -1,7 +1,6 @@ package apiclient import ( - "context" "fmt" "net" "net/http" @@ -84,6 +83,7 @@ func testMethod(t *testing.T, r *http.Request, want string) { } func TestNewClientOk(t *testing.T) { + ctx := t.Context() mux, urlx, teardown := setup() defer teardown() @@ -110,12 +110,13 @@ func TestNewClientOk(t *testing.T) { w.WriteHeader(http.StatusOK) }) - _, resp, err := client.Alerts.List(context.Background(), AlertsListOpts{}) + _, resp, err := client.Alerts.List(ctx, AlertsListOpts{}) require.NoError(t, err) assert.Equal(t, http.StatusOK, resp.Response.StatusCode) } func TestNewClientOk_UnixSocket(t *testing.T) { + ctx := t.Context() tmpDir := t.TempDir() socket := path.Join(tmpDir, "socket") @@ -148,7 +149,7 @@ func TestNewClientOk_UnixSocket(t *testing.T) { w.WriteHeader(http.StatusOK) }) - _, resp, err := client.Alerts.List(context.Background(), AlertsListOpts{}) + _, resp, err := client.Alerts.List(ctx, AlertsListOpts{}) if err != nil { t.Fatalf("test Unable to list alerts : %+v", err) } @@ -159,6 +160,8 @@ func TestNewClientOk_UnixSocket(t *testing.T) { } func TestNewClientKo(t *testing.T) { + ctx := t.Context() + mux, urlx, teardown := setup() defer teardown() @@ -185,13 +188,15 @@ func TestNewClientKo(t *testing.T) { w.WriteHeader(http.StatusOK) }) - _, _, err = client.Alerts.List(context.Background(), AlertsListOpts{}) + _, _, err = client.Alerts.List(ctx, AlertsListOpts{}) cstest.RequireErrorContains(t, err, `API error: bad login/password`) log.Printf("err-> %s", err) } func TestNewDefaultClient(t *testing.T) { + ctx := t.Context() + mux, urlx, teardown := setup() defer teardown() @@ -207,13 +212,15 @@ func TestNewDefaultClient(t *testing.T) { assert.NoError(t, err) }) - _, _, err = client.Alerts.List(context.Background(), AlertsListOpts{}) + _, _, err = client.Alerts.List(ctx, AlertsListOpts{}) cstest.RequireErrorMessage(t, err, "performing request: API error: brr") log.Printf("err-> %s", err) } func TestNewDefaultClient_UnixSocket(t *testing.T) { + ctx := t.Context() + tmpDir := t.TempDir() socket := path.Join(tmpDir, "socket") @@ -236,17 +243,17 @@ func TestNewDefaultClient_UnixSocket(t *testing.T) { assert.NoError(t, err) }) - _, _, err = client.Alerts.List(context.Background(), AlertsListOpts{}) + _, _, err = client.Alerts.List(ctx, AlertsListOpts{}) assert.Contains(t, err.Error(), `performing request: API error: brr`) log.Printf("err-> %s", err) } func TestNewClientRegisterKO(t *testing.T) { + ctx := t.Context() + apiURL, err := url.Parse("http://127.0.0.1:4242/") require.NoError(t, err) - ctx := context.Background() - _, err = RegisterClient(ctx, &Config{ MachineID: "test_login", Password: "test_password", @@ -262,6 +269,7 @@ func TestNewClientRegisterKO(t *testing.T) { } func TestNewClientRegisterOK(t *testing.T) { + ctx := t.Context() log.SetLevel(log.TraceLevel) mux, urlx, teardown := setup() @@ -278,8 +286,6 @@ func TestNewClientRegisterOK(t *testing.T) { apiURL, err := url.Parse(urlx + "/") require.NoError(t, err) - ctx := context.Background() - client, err := RegisterClient(ctx, &Config{ MachineID: "test_login", Password: "test_password", @@ -292,6 +298,7 @@ func TestNewClientRegisterOK(t *testing.T) { } func TestNewClientRegisterOK_UnixSocket(t *testing.T) { + ctx := t.Context() log.SetLevel(log.TraceLevel) tmpDir := t.TempDir() @@ -313,8 +320,6 @@ func TestNewClientRegisterOK_UnixSocket(t *testing.T) { t.Fatalf("parsing api url: %s", apiURL) } - ctx := context.Background() - client, err := RegisterClient(ctx, &Config{ MachineID: "test_login", Password: "test_password", @@ -329,6 +334,7 @@ func TestNewClientRegisterOK_UnixSocket(t *testing.T) { } func TestNewClientBadAnswer(t *testing.T) { + ctx := t.Context() log.SetLevel(log.TraceLevel) mux, urlx, teardown := setup() @@ -345,8 +351,6 @@ func TestNewClientBadAnswer(t *testing.T) { apiURL, err := url.Parse(urlx + "/") require.NoError(t, err) - ctx := context.Background() - _, err = RegisterClient(ctx, &Config{ MachineID: "test_login", Password: "test_password", diff --git a/pkg/apiclient/decisions_service_test.go b/pkg/apiclient/decisions_service_test.go index b8bc327a7d7..8bab7e7b74f 100644 --- a/pkg/apiclient/decisions_service_test.go +++ b/pkg/apiclient/decisions_service_test.go @@ -1,7 +1,6 @@ package apiclient import ( - "context" "net/http" "net/url" "strings" @@ -19,6 +18,7 @@ import ( ) func TestDecisionsList(t *testing.T) { + ctx := t.Context() log.SetLevel(log.DebugLevel) mux, urlx, teardown := setup() @@ -65,20 +65,21 @@ func TestDecisionsList(t *testing.T) { // OK decisions decisionsFilter := DecisionsListOpts{IPEquals: ptr.Of("1.2.3.4")} - decisions, resp, err := newcli.Decisions.List(context.Background(), decisionsFilter) + decisions, resp, err := newcli.Decisions.List(ctx, decisionsFilter) require.NoError(t, err) assert.Equal(t, http.StatusOK, resp.Response.StatusCode) assert.Equal(t, *expected, *decisions) // Empty return decisionsFilter = DecisionsListOpts{IPEquals: ptr.Of("1.2.3.5")} - decisions, resp, err = newcli.Decisions.List(context.Background(), decisionsFilter) + decisions, resp, err = newcli.Decisions.List(ctx, decisionsFilter) require.NoError(t, err) assert.Equal(t, http.StatusOK, resp.Response.StatusCode) assert.Empty(t, *decisions) } func TestDecisionsStream(t *testing.T) { + ctx := t.Context() log.SetLevel(log.DebugLevel) mux, urlx, teardown := setup() @@ -135,25 +136,26 @@ func TestDecisionsStream(t *testing.T) { }, } - decisions, resp, err := newcli.Decisions.GetStream(context.Background(), DecisionsStreamOpts{Startup: true}) + decisions, resp, err := newcli.Decisions.GetStream(ctx, DecisionsStreamOpts{Startup: true}) require.NoError(t, err) assert.Equal(t, http.StatusOK, resp.Response.StatusCode) assert.Equal(t, *expected, *decisions) // and second call, we get empty lists - decisions, resp, err = newcli.Decisions.GetStream(context.Background(), DecisionsStreamOpts{Startup: false}) + decisions, resp, err = newcli.Decisions.GetStream(ctx, DecisionsStreamOpts{Startup: false}) require.NoError(t, err) assert.Equal(t, http.StatusOK, resp.Response.StatusCode) assert.Empty(t, decisions.New) assert.Empty(t, decisions.Deleted) // delete stream - resp, err = newcli.Decisions.StopStream(context.Background()) + resp, err = newcli.Decisions.StopStream(ctx) require.NoError(t, err) assert.Equal(t, http.StatusOK, resp.Response.StatusCode) } func TestDecisionsStreamV3Compatibility(t *testing.T) { + ctx := t.Context() log.SetLevel(log.DebugLevel) mux, urlx, teardown := setupWithPrefix("v3") @@ -214,13 +216,14 @@ func TestDecisionsStreamV3Compatibility(t *testing.T) { } // GetStream is supposed to consume v3 payload and return v2 response - decisions, resp, err := newcli.Decisions.GetStream(context.Background(), DecisionsStreamOpts{Startup: true}) + decisions, resp, err := newcli.Decisions.GetStream(ctx, DecisionsStreamOpts{Startup: true}) require.NoError(t, err) assert.Equal(t, http.StatusOK, resp.Response.StatusCode) assert.Equal(t, *expected, *decisions) } func TestDecisionsStreamV3(t *testing.T) { + ctx := t.Context() log.SetLevel(log.DebugLevel) mux, urlx, teardown := setupWithPrefix("v3") @@ -286,13 +289,14 @@ func TestDecisionsStreamV3(t *testing.T) { } // GetStream is supposed to consume v3 payload and return v2 response - decisions, resp, err := newcli.Decisions.GetStreamV3(context.Background(), DecisionsStreamOpts{Startup: true}) + decisions, resp, err := newcli.Decisions.GetStreamV3(ctx, DecisionsStreamOpts{Startup: true}) require.NoError(t, err) assert.Equal(t, http.StatusOK, resp.Response.StatusCode) assert.Equal(t, *expected, *decisions) } func TestDecisionsFromBlocklist(t *testing.T) { + ctx := t.Context() log.SetLevel(log.DebugLevel) mux, urlx, teardown := setupWithPrefix("v3") @@ -349,7 +353,7 @@ func TestDecisionsFromBlocklist(t *testing.T) { Origin: &torigin, }, } - decisions, isModified, err := newcli.Decisions.GetDecisionsFromBlocklist(context.Background(), &modelscapi.BlocklistLink{ + decisions, isModified, err := newcli.Decisions.GetDecisionsFromBlocklist(ctx, &modelscapi.BlocklistLink{ URL: &turlBlocklist, Scope: &tscopeBlocklist, Remediation: &tremediationBlocklist, @@ -368,7 +372,7 @@ func TestDecisionsFromBlocklist(t *testing.T) { assert.Equal(t, expected, decisions) // test cache control - _, isModified, err = newcli.Decisions.GetDecisionsFromBlocklist(context.Background(), &modelscapi.BlocklistLink{ + _, isModified, err = newcli.Decisions.GetDecisionsFromBlocklist(ctx, &modelscapi.BlocklistLink{ URL: &turlBlocklist, Scope: &tscopeBlocklist, Remediation: &tremediationBlocklist, @@ -379,7 +383,7 @@ func TestDecisionsFromBlocklist(t *testing.T) { require.NoError(t, err) assert.False(t, isModified) - _, isModified, err = newcli.Decisions.GetDecisionsFromBlocklist(context.Background(), &modelscapi.BlocklistLink{ + _, isModified, err = newcli.Decisions.GetDecisionsFromBlocklist(ctx, &modelscapi.BlocklistLink{ URL: &turlBlocklist, Scope: &tscopeBlocklist, Remediation: &tremediationBlocklist, @@ -392,6 +396,8 @@ func TestDecisionsFromBlocklist(t *testing.T) { } func TestDeleteDecisions(t *testing.T) { + ctx := t.Context() + mux, urlx, teardown := setup() mux.HandleFunc("/watchers/login", func(w http.ResponseWriter, r *http.Request) { w.WriteHeader(http.StatusOK) @@ -424,7 +430,7 @@ func TestDeleteDecisions(t *testing.T) { filters := DecisionsDeleteOpts{IPEquals: new(string)} *filters.IPEquals = "1.2.3.4" - deleted, _, err := client.Decisions.Delete(context.Background(), filters) + deleted, _, err := client.Decisions.Delete(ctx, filters) require.NoError(t, err) assert.Equal(t, "1", deleted.NbDeleted) diff --git a/pkg/apiserver/alerts_test.go b/pkg/apiserver/alerts_test.go index 8cb012f2a81..0821cab7b0a 100644 --- a/pkg/apiserver/alerts_test.go +++ b/pkg/apiserver/alerts_test.go @@ -106,7 +106,7 @@ func AddAuthHeaders(request *http.Request, authResponse models.WatcherAuthRespon } func TestSimulatedAlert(t *testing.T) { - ctx := context.Background() + ctx := t.Context() lapi := SetupLAPITest(t, ctx) lapi.InsertAlertFromFile(t, ctx, "./tests/alert_minibulk+simul.json") alertContent := GetAlertReaderFromFile(t, "./tests/alert_minibulk+simul.json") @@ -125,7 +125,7 @@ func TestSimulatedAlert(t *testing.T) { } func TestCreateAlert(t *testing.T) { - ctx := context.Background() + ctx := t.Context() lapi := SetupLAPITest(t, ctx) // Create Alert with invalid format @@ -201,7 +201,7 @@ func TestCreateAllowlistedAlert(t *testing.T) { } func TestCreateAlertChannels(t *testing.T) { - ctx := context.Background() + ctx := t.Context() apiServer, config := NewAPIServer(t, ctx) apiServer.controller.PluginChannel = make(chan csplugin.ProfileAlert) err := apiServer.InitController() @@ -230,7 +230,7 @@ func TestCreateAlertChannels(t *testing.T) { } func TestAlertListFilters(t *testing.T) { - ctx := context.Background() + ctx := t.Context() lapi := SetupLAPITest(t, ctx) lapi.InsertAlertFromFile(t, ctx, "./tests/alert_ssh-bf.json") alertContent := GetAlertReaderFromFile(t, "./tests/alert_ssh-bf.json") @@ -399,7 +399,7 @@ func TestAlertListFilters(t *testing.T) { } func TestAlertBulkInsert(t *testing.T) { - ctx := context.Background() + ctx := t.Context() lapi := SetupLAPITest(t, ctx) // insert a bulk of 20 alerts to trigger bulk insert lapi.InsertAlertFromFile(t, ctx, "./tests/alert_bulk.json") @@ -410,7 +410,7 @@ func TestAlertBulkInsert(t *testing.T) { } func TestListAlert(t *testing.T) { - ctx := context.Background() + ctx := t.Context() lapi := SetupLAPITest(t, ctx) lapi.InsertAlertFromFile(t, ctx, "./tests/alert_sample.json") // List Alert with invalid filter @@ -427,7 +427,7 @@ func TestListAlert(t *testing.T) { } func TestCreateAlertErrors(t *testing.T) { - ctx := context.Background() + ctx := t.Context() lapi := SetupLAPITest(t, ctx) alertContent := GetAlertReaderFromFile(t, "./tests/alert_sample.json") @@ -449,7 +449,7 @@ func TestCreateAlertErrors(t *testing.T) { } func TestDeleteAlert(t *testing.T) { - ctx := context.Background() + ctx := t.Context() lapi := SetupLAPITest(t, ctx) lapi.InsertAlertFromFile(t, ctx, "./tests/alert_sample.json") @@ -473,7 +473,7 @@ func TestDeleteAlert(t *testing.T) { } func TestDeleteAlertByID(t *testing.T) { - ctx := context.Background() + ctx := t.Context() lapi := SetupLAPITest(t, ctx) lapi.InsertAlertFromFile(t, ctx, "./tests/alert_sample.json") @@ -497,7 +497,7 @@ func TestDeleteAlertByID(t *testing.T) { } func TestDeleteAlertTrustedIPS(t *testing.T) { - ctx := context.Background() + ctx := t.Context() cfg := LoadTestConfig(t) // IPv6 mocking doesn't seem to work. // cfg.API.Server.TrustedIPs = []string{"1.2.3.4", "1.2.4.0/24", "::"} diff --git a/pkg/apiserver/api_key_test.go b/pkg/apiserver/api_key_test.go index 89e37cd3852..1aae1c9b8ae 100644 --- a/pkg/apiserver/api_key_test.go +++ b/pkg/apiserver/api_key_test.go @@ -1,7 +1,6 @@ package apiserver import ( - "context" "net/http" "net/http/httptest" "strings" @@ -11,7 +10,7 @@ import ( ) func TestAPIKey(t *testing.T) { - ctx := context.Background() + ctx := t.Context() router, config := NewAPITest(t, ctx) APIKey, _ := CreateTestBouncer(t, ctx, config.API.Server.DbConfig) diff --git a/pkg/apiserver/apic_metrics_test.go b/pkg/apiserver/apic_metrics_test.go index d81af03f710..688a204f0fa 100644 --- a/pkg/apiserver/apic_metrics_test.go +++ b/pkg/apiserver/apic_metrics_test.go @@ -1,7 +1,6 @@ package apiserver import ( - "context" "net/url" "testing" "time" @@ -14,7 +13,7 @@ import ( ) func TestAPICSendMetrics(t *testing.T) { - ctx := context.Background() + ctx := t.Context() tests := []struct { name string diff --git a/pkg/apiserver/apic_test.go b/pkg/apiserver/apic_test.go index a72bc4dd3a9..053439f6d18 100644 --- a/pkg/apiserver/apic_test.go +++ b/pkg/apiserver/apic_test.go @@ -92,9 +92,10 @@ func assertTotalDecisionCount(t *testing.T, ctx context.Context, dbClient *datab } func assertTotalValidDecisionCount(t *testing.T, dbClient *database.Client, count int) { + ctx := t.Context() d := dbClient.Ent.Decision.Query().Where( decision.UntilGT(time.Now()), - ).AllX(context.Background()) + ).AllX(ctx) assert.Len(t, d, count) } @@ -108,12 +109,13 @@ func jsonMarshalX(v interface{}) []byte { } func assertTotalAlertCount(t *testing.T, dbClient *database.Client, count int) { - d := dbClient.Ent.Alert.Query().AllX(context.Background()) + ctx := t.Context() + d := dbClient.Ent.Alert.Query().AllX(ctx) assert.Len(t, d, count) } func TestAPICCAPIPullIsOld(t *testing.T) { - ctx := context.Background() + ctx := t.Context() api := getAPIC(t, ctx) isOld, err := api.CAPIPullIsOld(ctx) @@ -144,7 +146,7 @@ func TestAPICCAPIPullIsOld(t *testing.T) { } func TestAPICFetchScenariosListFromDB(t *testing.T) { - ctx := context.Background() + ctx := t.Context() tests := []struct { name string @@ -193,7 +195,7 @@ func TestAPICFetchScenariosListFromDB(t *testing.T) { } func TestNewAPIC(t *testing.T) { - ctx := context.Background() + ctx := t.Context() var testConfig *csconfig.OnlineApiClientCfg @@ -265,7 +267,7 @@ func TestNewAPIC(t *testing.T) { } func TestAPICGetMetrics(t *testing.T) { - ctx := context.Background() + ctx := t.Context() cleanUp := func(api *apic) { api.dbClient.Ent.Bouncer.Delete().ExecX(ctx) @@ -523,7 +525,7 @@ func TestFillAlertsWithDecisions(t *testing.T) { } func TestAPICWhitelists(t *testing.T) { - ctx := context.Background() + ctx := t.Context() api := getAPIC(t, ctx) // one whitelist on IP, one on CIDR api.whitelists = &csconfig.CapiWhitelist{} @@ -546,7 +548,7 @@ func TestAPICWhitelists(t *testing.T) { SetScope("Ip"). SetScenario("crowdsecurity/ssh-bf"). SetUntil(time.Now().Add(time.Hour)). - ExecX(context.Background()) + ExecX(ctx) assertTotalDecisionCount(t, ctx, api.dbClient, 1) assertTotalValidDecisionCount(t, api.dbClient, 1) httpmock.Activate() @@ -696,10 +698,10 @@ func TestAPICWhitelists(t *testing.T) { assertTotalDecisionCount(t, ctx, api.dbClient, 5) // 2 from FIRE + 2 from bl + 1 existing assertTotalValidDecisionCount(t, api.dbClient, 4) assertTotalAlertCount(t, api.dbClient, 3) // 2 for list sub , 1 for community list. - alerts := api.dbClient.Ent.Alert.Query().AllX(context.Background()) + alerts := api.dbClient.Ent.Alert.Query().AllX(ctx) validDecisions := api.dbClient.Ent.Decision.Query().Where( decision.UntilGT(time.Now())). - AllX(context.Background()) + AllX(ctx) decisionScenarioFreq := make(map[string]int) decisionIP := make(map[string]int) @@ -745,7 +747,7 @@ func TestAPICWhitelists(t *testing.T) { } func TestAPICPullTop(t *testing.T) { - ctx := context.Background() + ctx := t.Context() api := getAPIC(t, ctx) api.dbClient.Ent.Decision.Create(). SetOrigin(types.CAPIOrigin). @@ -842,10 +844,10 @@ func TestAPICPullTop(t *testing.T) { assertTotalDecisionCount(t, ctx, api.dbClient, 5) assertTotalValidDecisionCount(t, api.dbClient, 4) assertTotalAlertCount(t, api.dbClient, 3) // 2 for list sub , 1 for community list. - alerts := api.dbClient.Ent.Alert.Query().AllX(context.Background()) + alerts := api.dbClient.Ent.Alert.Query().AllX(ctx) validDecisions := api.dbClient.Ent.Decision.Query().Where( decision.UntilGT(time.Now())). - AllX(context.Background()) + AllX(ctx) decisionScenarioFreq := make(map[string]int) alertScenario := make(map[string]int) @@ -870,7 +872,7 @@ func TestAPICPullTop(t *testing.T) { } func TestAPICPullTopBLCacheFirstCall(t *testing.T) { - ctx := context.Background() + ctx := t.Context() // no decision in db, no last modified parameter. api := getAPIC(t, ctx) @@ -946,7 +948,7 @@ func TestAPICPullTopBLCacheFirstCall(t *testing.T) { } func TestAPICPullTopBLCacheForceCall(t *testing.T) { - ctx := context.Background() + ctx := t.Context() api := getAPIC(t, ctx) httpmock.Activate() @@ -958,7 +960,7 @@ func TestAPICPullTopBLCacheForceCall(t *testing.T) { SetScenario("update list"). SetSourceScope("list:blocklist1"). SetSourceValue("list:blocklist1"). - SaveX(context.Background()) + SaveX(ctx) api.dbClient.Ent.Decision.Create(). SetOrigin(types.ListOrigin). @@ -968,7 +970,7 @@ func TestAPICPullTopBLCacheForceCall(t *testing.T) { SetScenario("blocklist1"). SetUntil(time.Now().Add(time.Hour)). SetOwnerID(alertInstance.ID). - ExecX(context.Background()) + ExecX(ctx) httpmock.RegisterResponder("GET", "http://api.crowdsec.net/api/decisions/stream", httpmock.NewBytesResponder( 200, jsonMarshalX( @@ -1022,7 +1024,7 @@ func TestAPICPullTopBLCacheForceCall(t *testing.T) { } func TestAPICPullBlocklistCall(t *testing.T) { - ctx := context.Background() + ctx := t.Context() api := getAPIC(t, ctx) httpmock.Activate() @@ -1056,7 +1058,7 @@ func TestAPICPullBlocklistCall(t *testing.T) { } func TestAPICPush(t *testing.T) { - ctx := context.Background() + ctx := t.Context() tests := []struct { name string alerts []*models.Alert @@ -1148,7 +1150,7 @@ func TestAPICPush(t *testing.T) { } func TestAPICPull(t *testing.T) { - ctx := context.Background() + ctx := t.Context() api := getAPIC(t, ctx) tests := []struct { name string @@ -1169,7 +1171,7 @@ func TestAPICPull(t *testing.T) { SetPassword(testPassword.String()). SetIpAddress("1.2.3.4"). SetScenarios("crowdsecurity/ssh-bf"). - ExecX(context.Background()) + ExecX(ctx) }, expectedDecisionCount: 1, }, diff --git a/pkg/apiserver/apiserver_test.go b/pkg/apiserver/apiserver_test.go index 6b0052a8929..fb5c0d5e389 100644 --- a/pkg/apiserver/apiserver_test.go +++ b/pkg/apiserver/apiserver_test.go @@ -162,7 +162,8 @@ func NewAPITest(t *testing.T, ctx context.Context) (*gin.Engine, csconfig.Config return router, config } -func NewAPITestForwardedFor(t *testing.T, ctx context.Context) (*gin.Engine, csconfig.Config) { +func NewAPITestForwardedFor(t *testing.T) (*gin.Engine, csconfig.Config) { + ctx := t.Context() config := LoadTestConfigForwardedFor(t) os.Remove("./ent") @@ -191,7 +192,7 @@ func ValidateMachine(t *testing.T, ctx context.Context, machineID string, config } func GetMachineIP(t *testing.T, machineID string, config *csconfig.DatabaseCfg) string { - ctx := context.Background() + ctx := t.Context() dbClient, err := database.NewClient(ctx, config) require.NoError(t, err) @@ -209,7 +210,7 @@ func GetMachineIP(t *testing.T, machineID string, config *csconfig.DatabaseCfg) } func GetBouncers(t *testing.T, config *csconfig.DatabaseCfg) []*ent.Bouncer { - ctx := context.Background() + ctx := t.Context() dbClient, err := database.NewClient(ctx, config) require.NoError(t, err) @@ -311,7 +312,7 @@ func CreateTestBouncer(t *testing.T, ctx context.Context, config *csconfig.Datab } func TestWithWrongDBConfig(t *testing.T) { - ctx := context.Background() + ctx := t.Context() config := LoadTestConfig(t) config.API.Server.DbConfig.Type = "test" apiServer, err := NewServer(ctx, config.API.Server) @@ -321,7 +322,7 @@ func TestWithWrongDBConfig(t *testing.T) { } func TestWithWrongFlushConfig(t *testing.T) { - ctx := context.Background() + ctx := t.Context() config := LoadTestConfig(t) maxItems := -1 config.API.Server.DbConfig.Flush.MaxItems = &maxItems @@ -332,7 +333,7 @@ func TestWithWrongFlushConfig(t *testing.T) { } func TestUnknownPath(t *testing.T) { - ctx := context.Background() + ctx := t.Context() router, _ := NewAPITest(t, ctx) w := httptest.NewRecorder() @@ -359,7 +360,7 @@ ListenURI string `yaml:"listen_uri,omitempty"` //127.0 */ func TestLoggingDebugToFileConfig(t *testing.T) { - ctx := context.Background() + ctx := t.Context() /*declare settings*/ maxAge := "1h" @@ -412,7 +413,7 @@ func TestLoggingDebugToFileConfig(t *testing.T) { } func TestLoggingErrorToFileConfig(t *testing.T) { - ctx := context.Background() + ctx := t.Context() /*declare settings*/ maxAge := "1h" diff --git a/pkg/apiserver/decisions_test.go b/pkg/apiserver/decisions_test.go index cb5d2e1c4f1..9c4b54f8e2e 100644 --- a/pkg/apiserver/decisions_test.go +++ b/pkg/apiserver/decisions_test.go @@ -1,7 +1,6 @@ package apiserver import ( - "context" "testing" "github.com/stretchr/testify/assert" @@ -13,7 +12,7 @@ const ( ) func TestDeleteDecisionRange(t *testing.T) { - ctx := context.Background() + ctx := t.Context() lapi := SetupLAPITest(t, ctx) // Create Valid Alert @@ -38,7 +37,7 @@ func TestDeleteDecisionRange(t *testing.T) { } func TestDeleteDecisionFilter(t *testing.T) { - ctx := context.Background() + ctx := t.Context() lapi := SetupLAPITest(t, ctx) // Create Valid Alert @@ -64,7 +63,7 @@ func TestDeleteDecisionFilter(t *testing.T) { } func TestDeleteDecisionFilterByScenario(t *testing.T) { - ctx := context.Background() + ctx := t.Context() lapi := SetupLAPITest(t, ctx) // Create Valid Alert @@ -84,7 +83,7 @@ func TestDeleteDecisionFilterByScenario(t *testing.T) { } func TestGetDecisionFilters(t *testing.T) { - ctx := context.Background() + ctx := t.Context() lapi := SetupLAPITest(t, ctx) // Create Valid Alert @@ -160,7 +159,7 @@ func TestGetDecisionFilters(t *testing.T) { } func TestGetDecision(t *testing.T) { - ctx := context.Background() + ctx := t.Context() lapi := SetupLAPITest(t, ctx) // Create Valid Alert @@ -192,7 +191,7 @@ func TestGetDecision(t *testing.T) { } func TestDeleteDecisionByID(t *testing.T) { - ctx := context.Background() + ctx := t.Context() lapi := SetupLAPITest(t, ctx) // Create Valid Alert @@ -239,7 +238,7 @@ func TestDeleteDecisionByID(t *testing.T) { } func TestDeleteDecision(t *testing.T) { - ctx := context.Background() + ctx := t.Context() lapi := SetupLAPITest(t, ctx) // Create Valid Alert @@ -259,7 +258,8 @@ func TestDeleteDecision(t *testing.T) { } func TestStreamStartDecisionDedup(t *testing.T) { - ctx := context.Background() + ctx := t.Context() + // Ensure that at stream startup we only get the longest decision lapi := SetupLAPITest(t, ctx) diff --git a/pkg/apiserver/heartbeat_test.go b/pkg/apiserver/heartbeat_test.go index db051566f75..e72c6ade133 100644 --- a/pkg/apiserver/heartbeat_test.go +++ b/pkg/apiserver/heartbeat_test.go @@ -1,7 +1,6 @@ package apiserver import ( - "context" "net/http" "testing" @@ -9,12 +8,12 @@ import ( ) func TestHeartBeat(t *testing.T) { - ctx := context.Background() + ctx := t.Context() lapi := SetupLAPITest(t, ctx) w := lapi.RecordResponse(t, ctx, http.MethodGet, "/v1/heartbeat", emptyBody, "password") assert.Equal(t, 200, w.Code) - w = lapi.RecordResponse(t, ctx, "POST", "/v1/heartbeat", emptyBody, "password") + w = lapi.RecordResponse(t, ctx, http.MethodPost, "/v1/heartbeat", emptyBody, "password") assert.Equal(t, 405, w.Code) } diff --git a/pkg/apiserver/jwt_test.go b/pkg/apiserver/jwt_test.go index 72ae0302ae4..a42c705bea1 100644 --- a/pkg/apiserver/jwt_test.go +++ b/pkg/apiserver/jwt_test.go @@ -1,7 +1,6 @@ package apiserver import ( - "context" "net/http" "net/http/httptest" "strings" @@ -11,7 +10,7 @@ import ( ) func TestLogin(t *testing.T) { - ctx := context.Background() + ctx := t.Context() router, config := NewAPITest(t, ctx) body := CreateTestMachine(t, ctx, router, "") diff --git a/pkg/apiserver/machines_test.go b/pkg/apiserver/machines_test.go index 57b96f54ddd..3eecc39962f 100644 --- a/pkg/apiserver/machines_test.go +++ b/pkg/apiserver/machines_test.go @@ -1,7 +1,6 @@ package apiserver import ( - "context" "encoding/json" "net/http" "net/http/httptest" @@ -15,7 +14,7 @@ import ( ) func TestCreateMachine(t *testing.T) { - ctx := context.Background() + ctx := t.Context() router, _ := NewAPITest(t, ctx) // Create machine with invalid format @@ -52,8 +51,8 @@ func TestCreateMachine(t *testing.T) { } func TestCreateMachineWithForwardedFor(t *testing.T) { - ctx := context.Background() - router, config := NewAPITestForwardedFor(t, ctx) + ctx := t.Context() + router, config := NewAPITestForwardedFor(t) router.TrustedPlatform = "X-Real-IP" // Create machine @@ -77,7 +76,7 @@ func TestCreateMachineWithForwardedFor(t *testing.T) { } func TestCreateMachineWithForwardedForNoConfig(t *testing.T) { - ctx := context.Background() + ctx := t.Context() router, config := NewAPITest(t, ctx) // Create machine @@ -103,8 +102,8 @@ func TestCreateMachineWithForwardedForNoConfig(t *testing.T) { } func TestCreateMachineWithoutForwardedFor(t *testing.T) { - ctx := context.Background() - router, config := NewAPITestForwardedFor(t, ctx) + ctx := t.Context() + router, config := NewAPITestForwardedFor(t) // Create machine b, err := json.Marshal(MachineTest) @@ -128,7 +127,7 @@ func TestCreateMachineWithoutForwardedFor(t *testing.T) { } func TestCreateMachineAlreadyExist(t *testing.T) { - ctx := context.Background() + ctx := t.Context() router, _ := NewAPITest(t, ctx) body := CreateTestMachine(t, ctx, router, "") @@ -148,7 +147,7 @@ func TestCreateMachineAlreadyExist(t *testing.T) { } func TestAutoRegistration(t *testing.T) { - ctx := context.Background() + ctx := t.Context() router, _ := NewAPITest(t, ctx) // Invalid registration token / valid source IP diff --git a/pkg/apiserver/usage_metrics_test.go b/pkg/apiserver/usage_metrics_test.go index 32aeb7d9a5a..82b5fc17172 100644 --- a/pkg/apiserver/usage_metrics_test.go +++ b/pkg/apiserver/usage_metrics_test.go @@ -1,7 +1,6 @@ package apiserver import ( - "context" "net/http" "strings" "testing" @@ -13,7 +12,7 @@ import ( ) func TestLPMetrics(t *testing.T) { - ctx := context.Background() + ctx := t.Context() tests := []struct { name string @@ -216,7 +215,7 @@ func TestLPMetrics(t *testing.T) { } func TestRCMetrics(t *testing.T) { - ctx := context.Background() + ctx := t.Context() tests := []struct { name string diff --git a/pkg/csplugin/broker_test.go b/pkg/csplugin/broker_test.go index ae5a615b489..63ba65d2ddf 100644 --- a/pkg/csplugin/broker_test.go +++ b/pkg/csplugin/broker_test.go @@ -4,7 +4,6 @@ package csplugin import ( "bytes" - "context" "encoding/json" "io" "os" @@ -54,7 +53,7 @@ func (s *PluginSuite) writeconfig(config PluginConfig) { } func (s *PluginSuite) TestBrokerInit() { - ctx := context.Background() + ctx := s.T().Context() tests := []struct { name string action func(*testing.T) @@ -144,7 +143,7 @@ func (s *PluginSuite) TestBrokerInit() { } func (s *PluginSuite) TestBrokerNoThreshold() { - ctx := context.Background() + ctx := s.T().Context() var alerts []models.Alert @@ -191,7 +190,7 @@ func (s *PluginSuite) TestBrokerNoThreshold() { } func (s *PluginSuite) TestBrokerRunGroupAndTimeThreshold_TimeFirst() { - ctx := context.Background() + ctx := s.T().Context() // test grouping by "time" DefaultEmptyTicker = 50 * time.Millisecond @@ -230,7 +229,7 @@ func (s *PluginSuite) TestBrokerRunGroupAndTimeThreshold_TimeFirst() { } func (s *PluginSuite) TestBrokerRunGroupAndTimeThreshold_CountFirst() { - ctx := context.Background() + ctx := s.T().Context() DefaultEmptyTicker = 50 * time.Millisecond t := s.T() @@ -271,7 +270,7 @@ func (s *PluginSuite) TestBrokerRunGroupAndTimeThreshold_CountFirst() { } func (s *PluginSuite) TestBrokerRunGroupThreshold() { - ctx := context.Background() + ctx := s.T().Context() // test grouping by "size" DefaultEmptyTicker = 50 * time.Millisecond @@ -326,7 +325,7 @@ func (s *PluginSuite) TestBrokerRunGroupThreshold() { } func (s *PluginSuite) TestBrokerRunTimeThreshold() { - ctx := context.Background() + ctx := s.T().Context() DefaultEmptyTicker = 50 * time.Millisecond t := s.T() @@ -362,7 +361,7 @@ func (s *PluginSuite) TestBrokerRunTimeThreshold() { } func (s *PluginSuite) TestBrokerRunSimple() { - ctx := context.Background() + ctx := s.T().Context() DefaultEmptyTicker = 50 * time.Millisecond t := s.T() diff --git a/pkg/csplugin/broker_win_test.go b/pkg/csplugin/broker_win_test.go index 570f23e5015..e28056ae626 100644 --- a/pkg/csplugin/broker_win_test.go +++ b/pkg/csplugin/broker_win_test.go @@ -4,7 +4,6 @@ package csplugin import ( "bytes" - "context" "encoding/json" "io" "os" @@ -27,7 +26,7 @@ not if it will actually reject plugins with invalid permissions */ func (s *PluginSuite) TestBrokerInit() { - ctx := context.Background() + ctx := s.T().Context() tests := []struct { name string action func(*testing.T) @@ -68,8 +67,8 @@ func (s *PluginSuite) TestBrokerInit() { } func (s *PluginSuite) TestBrokerRun() { - ctx := context.Background() t := s.T() + ctx := t.Context() pb, err := s.InitBroker(ctx, nil) require.NoError(t, err) diff --git a/pkg/csplugin/watcher_test.go b/pkg/csplugin/watcher_test.go index 9868b8433c3..2414612ab41 100644 --- a/pkg/csplugin/watcher_test.go +++ b/pkg/csplugin/watcher_test.go @@ -49,7 +49,7 @@ func listenChannelWithTimeout(ctx context.Context, channel chan string) error { } func TestPluginWatcherInterval(t *testing.T) { - ctx := context.Background() + ctx := t.Context() if runtime.GOOS == "windows" { t.Skip("Skipping test on windows because timing is not reliable") @@ -85,7 +85,7 @@ func TestPluginWatcherInterval(t *testing.T) { } func TestPluginAlertCountWatcher(t *testing.T) { - ctx := context.Background() + ctx := t.Context() if runtime.GOOS == "windows" { t.Skip("Skipping test on windows because timing is not reliable") diff --git a/pkg/cwhub/cwhub_test.go b/pkg/cwhub/cwhub_test.go index befd279ff65..f12ac8f3437 100644 --- a/pkg/cwhub/cwhub_test.go +++ b/pkg/cwhub/cwhub_test.go @@ -1,7 +1,6 @@ package cwhub import ( - "context" "fmt" "io" "net/http" @@ -31,6 +30,7 @@ var responseByPath map[string]string // testHubOld initializes a temporary hub with an empty json file, optionally updating it. func testHubOld(t *testing.T, update bool) *Hub { + ctx := t.Context() tmpDir := t.TempDir() local := &csconfig.LocalHubCfg{ @@ -61,7 +61,6 @@ func testHubOld(t *testing.T, update bool) *Hub { URLTemplate: mockURLTemplate, } - ctx := context.Background() err = hub.Update(ctx, indexProvider, false) require.NoError(t, err) } diff --git a/pkg/cwhub/download_test.go b/pkg/cwhub/download_test.go index 7b0b99c28d8..52a69d29908 100644 --- a/pkg/cwhub/download_test.go +++ b/pkg/cwhub/download_test.go @@ -1,7 +1,6 @@ package cwhub import ( - "context" "io" "net/http" "net/http/httptest" @@ -18,8 +17,7 @@ import ( ) func TestFetchIndex(t *testing.T) { - ctx := context.Background() - + ctx := t.Context() mockServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { if r.URL.Path != "/main/.index.json" { w.WriteHeader(http.StatusNotFound) @@ -100,8 +98,7 @@ func TestFetchIndex(t *testing.T) { } func TestFetchContent(t *testing.T) { - ctx := context.Background() - + ctx := t.Context() wantContent := "{'description':'linux'}" wantHash := "e557cb9e1cb051bc3b6a695e4396c5f8e0eff4b7b0d2cc09f7684e1d52ea2224" remotePath := "collections/crowdsecurity/linux.yaml" diff --git a/pkg/cwhub/hub_test.go b/pkg/cwhub/hub_test.go index 461b59de78b..20277621a53 100644 --- a/pkg/cwhub/hub_test.go +++ b/pkg/cwhub/hub_test.go @@ -1,7 +1,6 @@ package cwhub import ( - "context" "net/http" "net/http/httptest" "os" @@ -94,6 +93,7 @@ func TestIndexUnknownItemType(t *testing.T) { } func TestHubUpdate(t *testing.T) { + ctx := t.Context() // update an empty hub with a index containing a parser. hub, err := testHub(t, nil, "{}") require.NoError(t, err) @@ -125,8 +125,6 @@ func TestHubUpdate(t *testing.T) { })) defer mockServer.Close() - ctx := context.Background() - downloader := &Downloader{ Branch: "main", URLTemplate: mockServer.URL + "/%s/%s", @@ -144,11 +142,10 @@ func TestHubUpdate(t *testing.T) { } func TestHubUpdateInvalidTemplate(t *testing.T) { + ctx := t.Context() hub, err := testHub(t, nil, "{}") require.NoError(t, err) - ctx := context.Background() - downloader := &Downloader{ Branch: "main", URLTemplate: "x", @@ -159,6 +156,7 @@ func TestHubUpdateInvalidTemplate(t *testing.T) { } func TestHubUpdateCannotWrite(t *testing.T) { + ctx := t.Context() hub, err := testHub(t, nil, "{}") require.NoError(t, err) @@ -189,8 +187,6 @@ func TestHubUpdateCannotWrite(t *testing.T) { })) defer mockServer.Close() - ctx := context.Background() - downloader := &Downloader{ Branch: "main", URLTemplate: mockServer.URL + "/%s/%s", @@ -203,6 +199,7 @@ func TestHubUpdateCannotWrite(t *testing.T) { } func TestHubUpdateAfterLoad(t *testing.T) { + ctx := t.Context() // Update() can't be called after Load() if the hub is not completely empty. index1 := ` { @@ -250,8 +247,6 @@ func TestHubUpdateAfterLoad(t *testing.T) { })) defer mockServer.Close() - ctx := context.Background() - downloader := &Downloader{ Branch: "main", URLTemplate: mockServer.URL + "/%s/%s", diff --git a/pkg/exprhelpers/exprlib_test.go b/pkg/exprhelpers/exprlib_test.go index 932db4b7da4..e449115c120 100644 --- a/pkg/exprhelpers/exprlib_test.go +++ b/pkg/exprhelpers/exprlib_test.go @@ -1,7 +1,6 @@ package exprhelpers import ( - "context" "errors" "testing" "time" @@ -25,7 +24,7 @@ const TestFolder = "tests" func getDBClient(t *testing.T) *database.Client { t.Helper() - ctx := context.Background() + ctx := t.Context() testDBClient, err := database.NewClient(ctx, &csconfig.DatabaseCfg{ Type: "sqlite", @@ -906,6 +905,8 @@ func TestLower(t *testing.T) { } func TestGetDecisionsCount(t *testing.T) { + ctx := t.Context() + existingIP := "1.2.3.4" unknownIP := "1.2.3.5" @@ -929,7 +930,7 @@ func TestGetDecisionsCount(t *testing.T) { SetScope("IP"). SetValue(existingIP). SetOrigin("CAPI"). - SaveX(context.Background()) + SaveX(ctx) if decision == nil { require.Error(t, errors.New("Failed to create sample decision")) @@ -994,6 +995,8 @@ func TestGetDecisionsCount(t *testing.T) { } func TestGetDecisionsSinceCount(t *testing.T) { + ctx := t.Context() + existingIP := "1.2.3.4" unknownIP := "1.2.3.5" @@ -1016,7 +1019,7 @@ func TestGetDecisionsSinceCount(t *testing.T) { SetScope("IP"). SetValue(existingIP). SetOrigin("CAPI"). - SaveX(context.Background()) + SaveX(ctx) if decision == nil { require.Error(t, errors.New("Failed to create sample decision")) } @@ -1034,7 +1037,7 @@ func TestGetDecisionsSinceCount(t *testing.T) { SetScope("IP"). SetValue(existingIP). SetOrigin("CAPI"). - SaveX(context.Background()) + SaveX(ctx) if decision2 == nil { require.Error(t, errors.New("Failed to create sample decision")) @@ -1117,6 +1120,8 @@ func TestGetDecisionsSinceCount(t *testing.T) { } func TestGetActiveDecisionsCount(t *testing.T) { + ctx := t.Context() + existingIP := "1.2.3.4" unknownIP := "1.2.3.5" @@ -1140,7 +1145,7 @@ func TestGetActiveDecisionsCount(t *testing.T) { SetScope("IP"). SetValue(existingIP). SetOrigin("CAPI"). - SaveX(context.Background()) + SaveX(ctx) if decision == nil { require.Error(t, errors.New("Failed to create sample decision")) @@ -1158,7 +1163,7 @@ func TestGetActiveDecisionsCount(t *testing.T) { SetScope("IP"). SetValue(existingIP). SetOrigin("CAPI"). - SaveX(context.Background()) + SaveX(ctx) if expiredDecision == nil { require.Error(t, errors.New("Failed to create sample decision")) @@ -1223,6 +1228,8 @@ func TestGetActiveDecisionsCount(t *testing.T) { } func TestGetActiveDecisionsTimeLeft(t *testing.T) { + ctx := t.Context() + existingIP := "1.2.3.4" unknownIP := "1.2.3.5" @@ -1246,7 +1253,7 @@ func TestGetActiveDecisionsTimeLeft(t *testing.T) { SetScope("IP"). SetValue(existingIP). SetOrigin("CAPI"). - SaveX(context.Background()) + SaveX(ctx) if decision == nil { require.Error(t, errors.New("Failed to create sample decision")) @@ -1264,7 +1271,7 @@ func TestGetActiveDecisionsTimeLeft(t *testing.T) { SetScope("IP"). SetValue(existingIP). SetOrigin("CAPI"). - SaveX(context.Background()) + SaveX(ctx) if longerDecision == nil { require.Error(t, errors.New("Failed to create sample decision")) From 2b70dbf3e5713c657c44405d2d67ad751a66d1a4 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Fri, 21 Feb 2025 00:17:01 +0100 Subject: [PATCH 440/581] cscli hub/items: always show action plan; fix --interactive in pipes (#3451) --- cmd/crowdsec-cli/clihub/hub.go | 12 ++++-- cmd/crowdsec-cli/cliitem/cmdinstall.go | 5 ++- cmd/crowdsec-cli/cliitem/cmdremove.go | 5 ++- cmd/crowdsec-cli/cliitem/cmdupgrade.go | 5 ++- cmd/crowdsec-cli/clisetup/setup.go | 19 +++++----- pkg/cwhub/cwhub_test.go | 4 +- pkg/cwhub/download.go | 1 + pkg/cwhub/hub.go | 17 ++------- pkg/cwhub/hub_test.go | 12 ++++-- pkg/hubops/plan.go | 39 +++++++++++--------- pkg/setup/install.go | 4 +- test/bats/20_hub.bats | 22 ++++++++++- test/bats/cscli-hubtype-install.bats | 50 +++++++++++++++++++++---- test/bats/cscli-hubtype-remove.bats | 20 +++++++++- test/bats/cscli-hubtype-upgrade.bats | 51 +++++++++++++++++++------- test/bats/hub-index.bats | 9 +++++ 16 files changed, 196 insertions(+), 79 deletions(-) diff --git a/cmd/crowdsec-cli/clihub/hub.go b/cmd/crowdsec-cli/clihub/hub.go index 4dce6295c2d..98be6a79e68 100644 --- a/cmd/crowdsec-cli/clihub/hub.go +++ b/cmd/crowdsec-cli/clihub/hub.go @@ -118,10 +118,15 @@ func (cli *cliHub) update(ctx context.Context, withContent bool) error { indexProvider := require.HubDownloader(ctx, cli.cfg()) - if err := hub.Update(ctx, indexProvider, withContent); err != nil { + updated, err := hub.Update(ctx, indexProvider, withContent) + if err != nil { return fmt.Errorf("failed to update hub: %w", err) } + if !updated && (log.StandardLogger().Level >= log.InfoLevel) { + fmt.Println("Nothing to do, the hub index is up to date.") + } + if err := hub.Load(); err != nil { return fmt.Errorf("failed to load hub: %w", err) } @@ -187,9 +192,10 @@ func (cli *cliHub) upgrade(ctx context.Context, interactive bool, dryRun bool, f return err } - verbose := (cfg.Cscli.Output == "raw") + showPlan := (log.StandardLogger().Level >= log.InfoLevel) + verbosePlan := (cfg.Cscli.Output == "raw") - if err := plan.Execute(ctx, interactive, dryRun, verbose); err != nil { + if err := plan.Execute(ctx, interactive, dryRun, showPlan, verbosePlan); err != nil { return err } diff --git a/cmd/crowdsec-cli/cliitem/cmdinstall.go b/cmd/crowdsec-cli/cliitem/cmdinstall.go index ab09507995e..b2846716fda 100644 --- a/cmd/crowdsec-cli/cliitem/cmdinstall.go +++ b/cmd/crowdsec-cli/cliitem/cmdinstall.go @@ -78,9 +78,10 @@ func (cli cliItem) install(ctx context.Context, args []string, interactive bool, } } - verbose := (cfg.Cscli.Output == "raw") + showPlan := (log.StandardLogger().Level >= log.InfoLevel) + verbosePlan := (cfg.Cscli.Output == "raw") - if err := plan.Execute(ctx, interactive, dryRun, verbose); err != nil { + if err := plan.Execute(ctx, interactive, dryRun, showPlan, verbosePlan); err != nil { if !ignoreError { return err } diff --git a/cmd/crowdsec-cli/cliitem/cmdremove.go b/cmd/crowdsec-cli/cliitem/cmdremove.go index 5472e4aebbb..42f72f25ca9 100644 --- a/cmd/crowdsec-cli/cliitem/cmdremove.go +++ b/cmd/crowdsec-cli/cliitem/cmdremove.go @@ -98,9 +98,10 @@ func (cli cliItem) remove(ctx context.Context, args []string, interactive bool, return err } - verbose := (cfg.Cscli.Output == "raw") + showPlan := (log.StandardLogger().Level >= log.InfoLevel) + verbosePlan := (cfg.Cscli.Output == "raw") - if err := plan.Execute(ctx, interactive, dryRun, verbose); err != nil { + if err := plan.Execute(ctx, interactive, dryRun, showPlan, verbosePlan); err != nil { return err } diff --git a/cmd/crowdsec-cli/cliitem/cmdupgrade.go b/cmd/crowdsec-cli/cliitem/cmdupgrade.go index f9c85a1dcd1..8dcbe3531d4 100644 --- a/cmd/crowdsec-cli/cliitem/cmdupgrade.go +++ b/cmd/crowdsec-cli/cliitem/cmdupgrade.go @@ -60,9 +60,10 @@ func (cli cliItem) upgrade(ctx context.Context, args []string, interactive bool, return err } - verbose := (cfg.Cscli.Output == "raw") + showPlan := (log.StandardLogger().Level >= log.InfoLevel) + verbosePlan := (cfg.Cscli.Output == "raw") - if err := plan.Execute(ctx, interactive, dryRun, verbose); err != nil { + if err := plan.Execute(ctx, interactive, dryRun, showPlan, verbosePlan); err != nil { return err } diff --git a/cmd/crowdsec-cli/clisetup/setup.go b/cmd/crowdsec-cli/clisetup/setup.go index 77c357e7251..7a0860182e8 100644 --- a/cmd/crowdsec-cli/clisetup/setup.go +++ b/cmd/crowdsec-cli/clisetup/setup.go @@ -95,8 +95,8 @@ func (cli *cliSetup) newDetectCmd() *cobra.Command { func (cli *cliSetup) newInstallHubCmd() *cobra.Command { var ( - yes bool - dryRun bool + interactive bool + dryRun bool ) cmd := &cobra.Command{ @@ -105,14 +105,14 @@ func (cli *cliSetup) newInstallHubCmd() *cobra.Command { Args: cobra.ExactArgs(1), DisableAutoGenTag: true, RunE: func(cmd *cobra.Command, args []string) error { - return cli.install(cmd.Context(), yes, dryRun, args[0]) + return cli.install(cmd.Context(), interactive, dryRun, args[0]) }, } flags := cmd.Flags() - flags.BoolVarP(&yes, "yes", "y", false, "confirm execution without prompt") + flags.BoolVarP(&interactive, "interactive", "i", false, "Ask for confirmation before proceeding") flags.BoolVar(&dryRun, "dry-run", false, "don't install anything; print out what would have been") - cmd.MarkFlagsMutuallyExclusive("yes", "dry-run") + cmd.MarkFlagsMutuallyExclusive("interactive", "dry-run") return cmd } @@ -281,7 +281,7 @@ func (cli *cliSetup) dataSources(fromFile string, toDir string) error { return nil } -func (cli *cliSetup) install(ctx context.Context, yes bool, dryRun bool, fromFile string) error { +func (cli *cliSetup) install(ctx context.Context, interactive bool, dryRun bool, fromFile string) error { input, err := os.ReadFile(fromFile) if err != nil { return fmt.Errorf("while reading file %s: %w", fromFile, err) @@ -294,11 +294,12 @@ func (cli *cliSetup) install(ctx context.Context, yes bool, dryRun bool, fromFil return err } - verbose := (cfg.Cscli.Output == "raw") - contentProvider := require.HubDownloader(ctx, cfg) - return setup.InstallHubItems(ctx, hub, contentProvider, input, yes, dryRun, verbose) + showPlan := (log.StandardLogger().Level >= log.InfoLevel) + verbosePlan := (cfg.Cscli.Output == "raw") + + return setup.InstallHubItems(ctx, hub, contentProvider, input, interactive, dryRun, showPlan, verbosePlan) } func (cli *cliSetup) validate(fromFile string) error { diff --git a/pkg/cwhub/cwhub_test.go b/pkg/cwhub/cwhub_test.go index f12ac8f3437..e39c57f1e2e 100644 --- a/pkg/cwhub/cwhub_test.go +++ b/pkg/cwhub/cwhub_test.go @@ -10,6 +10,7 @@ import ( "testing" log "github.com/sirupsen/logrus" + "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "github.com/crowdsecurity/crowdsec/pkg/csconfig" @@ -61,8 +62,9 @@ func testHubOld(t *testing.T, update bool) *Hub { URLTemplate: mockURLTemplate, } - err = hub.Update(ctx, indexProvider, false) + updated, err := hub.Update(ctx, indexProvider, false) require.NoError(t, err) + assert.True(t, updated) } err = hub.Load() diff --git a/pkg/cwhub/download.go b/pkg/cwhub/download.go index fa92e9960de..87ea3390441 100644 --- a/pkg/cwhub/download.go +++ b/pkg/cwhub/download.go @@ -62,6 +62,7 @@ func addURLParam(rawURL string, param string, value string) (string, error) { // FetchIndex downloads the index from the hub and writes it to the filesystem. // It uses a temporary file to avoid partial downloads, and won't overwrite the original // if it has not changed. +// Return true if the file has been updated, false if already up to date. func (d *Downloader) FetchIndex(ctx context.Context, destPath string, withContent bool, logger *logrus.Logger) (bool, error) { url, err := d.urlTo(".index.json") if err != nil { diff --git a/pkg/cwhub/hub.go b/pkg/cwhub/hub.go index 2d432df67c3..b75c173bc9b 100644 --- a/pkg/cwhub/hub.go +++ b/pkg/cwhub/hub.go @@ -153,24 +153,13 @@ var ErrUpdateAfterSync = errors.New("cannot update hub index after load/sync") // Update downloads the latest version of the index and writes it to disk if it changed. // It cannot be called after Load() unless the index was completely empty. -func (h *Hub) Update(ctx context.Context, indexProvider IndexProvider, withContent bool) error { +func (h *Hub) Update(ctx context.Context, indexProvider IndexProvider, withContent bool) (bool, error) { if len(h.items) > 0 { // if this happens, it's a bug. - return ErrUpdateAfterSync + return false, ErrUpdateAfterSync } - downloaded, err := indexProvider.FetchIndex(ctx, h.local.HubIndexFile, withContent, h.logger) - if err != nil { - return err - } - - if !downloaded { - // use logger and the message will be silenced in the cron job - // (no mail if nothing happened) - h.logger.Info("Nothing to do, the hub index is up to date.") - } - - return nil + return indexProvider.FetchIndex(ctx, h.local.HubIndexFile, withContent, h.logger) } // addItem adds an item to the hub. It silently replaces an existing item with the same type and name. diff --git a/pkg/cwhub/hub_test.go b/pkg/cwhub/hub_test.go index 20277621a53..1f3358bafa2 100644 --- a/pkg/cwhub/hub_test.go +++ b/pkg/cwhub/hub_test.go @@ -130,8 +130,9 @@ func TestHubUpdate(t *testing.T) { URLTemplate: mockServer.URL + "/%s/%s", } - err = hub.Update(ctx, downloader, true) + updated, err := hub.Update(ctx, downloader, true) require.NoError(t, err) + assert.True(t, updated) err = hub.Load() require.NoError(t, err) @@ -151,8 +152,9 @@ func TestHubUpdateInvalidTemplate(t *testing.T) { URLTemplate: "x", } - err = hub.Update(ctx, downloader, true) + updated, err := hub.Update(ctx, downloader, true) cstest.RequireErrorMessage(t, err, "failed to build hub index request: invalid URL template 'x'") + assert.False(t, updated) } func TestHubUpdateCannotWrite(t *testing.T) { @@ -194,8 +196,9 @@ func TestHubUpdateCannotWrite(t *testing.T) { hub.local.HubIndexFile = "/proc/foo/bar/baz/.index.json" - err = hub.Update(ctx, downloader, true) + updated, err := hub.Update(ctx, downloader, true) cstest.RequireErrorContains(t, err, "failed to create temporary download file for /proc/foo/bar/baz/.index.json") + assert.False(t, updated) } func TestHubUpdateAfterLoad(t *testing.T) { @@ -252,6 +255,7 @@ func TestHubUpdateAfterLoad(t *testing.T) { URLTemplate: mockServer.URL + "/%s/%s", } - err = hub.Update(ctx, downloader, true) + updated, err := hub.Update(ctx, downloader, true) require.ErrorIs(t, err, ErrUpdateAfterSync) + assert.False(t, updated) } diff --git a/pkg/hubops/plan.go b/pkg/hubops/plan.go index 6fb11da7215..b1691e71183 100644 --- a/pkg/hubops/plan.go +++ b/pkg/hubops/plan.go @@ -11,7 +11,6 @@ import ( "github.com/AlecAivazis/survey/v2" "github.com/fatih/color" - isatty "github.com/mattn/go-isatty" "github.com/crowdsecurity/go-cs-lib/slicetools" @@ -192,11 +191,6 @@ func (p *ActionPlan) compactDescription() string { } func (p *ActionPlan) Confirm(verbose bool) (bool, error) { - // user provided an --interactive flag, but we go with the defaults if it's not a tty - if !isatty.IsTerminal(os.Stdout.Fd()) && !isatty.IsCygwinTerminal(os.Stdout.Fd()) { - return true, nil - } - fmt.Println("The following actions will be performed:\n" + p.Description(verbose)) var answer bool @@ -206,9 +200,15 @@ func (p *ActionPlan) Confirm(verbose bool) (bool, error) { Default: true, } + tty, err := os.OpenFile("/dev/tty", os.O_RDWR, 0) + if err != nil { + return prompt.Default, nil + } + defer tty.Close() + // in case of EOF, it's likely stdin has been closed in a script or package manager, // we can't do anything but go with the default - if err := survey.AskOne(prompt, &answer); err != nil { + if err := survey.AskOne(prompt, &answer, survey.WithStdio(tty, tty, tty)); err != nil { if errors.Is(err, io.EOF) { return prompt.Default, nil } @@ -221,22 +221,18 @@ func (p *ActionPlan) Confirm(verbose bool) (bool, error) { return answer, nil } -func (p *ActionPlan) Execute(ctx context.Context, interactive bool, dryRun bool, verbose bool) error { +func (p *ActionPlan) Execute(ctx context.Context, interactive bool, dryRun bool, alwaysShowPlan bool, verbosePlan bool) error { + // interactive: show action plan, ask for confirm + // dry-run: show action plan, no prompt, no action + // alwaysShowPlan: print plan even if interactive and dry-run are false + // verbosePlan: plan summary is displaying each step in order if len(p.commands) == 0 { - // XXX: show skipped commands, warnings? fmt.Println("Nothing to do.") return nil } - if dryRun { - fmt.Println("Action plan:\n" + p.Description(verbose)) - fmt.Println("Dry run, no action taken.") - - return nil - } - if interactive { - answer, err := p.Confirm(verbose) + answer, err := p.Confirm(verbosePlan) if err != nil { return err } @@ -245,6 +241,15 @@ func (p *ActionPlan) Execute(ctx context.Context, interactive bool, dryRun bool, fmt.Println("Operation canceled.") return nil } + } else { + if dryRun || alwaysShowPlan { + fmt.Println("Action plan:\n" + p.Description(verbosePlan)) + } + + if dryRun { + fmt.Println("Dry run, no action taken.") + return nil + } } for _, c := range p.commands { diff --git a/pkg/setup/install.go b/pkg/setup/install.go index 556ddab4c9a..4f9155ebfac 100644 --- a/pkg/setup/install.go +++ b/pkg/setup/install.go @@ -48,7 +48,7 @@ func decodeSetup(input []byte, fancyErrors bool) (Setup, error) { } // InstallHubItems installs the objects recommended in a setup file. -func InstallHubItems(ctx context.Context, hub *cwhub.Hub, contentProvider cwhub.ContentProvider, input []byte, yes, dryRun, verbose bool) error { +func InstallHubItems(ctx context.Context, hub *cwhub.Hub, contentProvider cwhub.ContentProvider, input []byte, interactive, dryRun, showPlan, verbosePlan bool) error { setupEnvelope, err := decodeSetup(input, false) if err != nil { return err @@ -134,7 +134,7 @@ func InstallHubItems(ctx context.Context, hub *cwhub.Hub, contentProvider cwhub. } } - return plan.Execute(ctx, yes, dryRun, verbose) + return plan.Execute(ctx, interactive, dryRun, showPlan, verbosePlan) } // marshalAcquisDocuments creates the monolithic file, or itemized files (if a directory is provided) with the acquisition documents. diff --git a/test/bats/20_hub.bats b/test/bats/20_hub.bats index 07b8be626df..f4d9bb2eb4d 100644 --- a/test/bats/20_hub.bats +++ b/test/bats/20_hub.bats @@ -109,17 +109,33 @@ teardown() { rune -0 cscli hub update assert_output "Downloading $INDEX_PATH" rune -0 cscli hub update + assert_output "Nothing to do, the hub index is up to date." + + # hub update must honor the --error flag to be silent in noop cron jobs + rune -0 cscli hub update --error refute_output - assert_stderr 'level=info msg="Nothing to do, the hub index is up to date."' + refute_stderr } @test "cscli hub upgrade (up to date)" { rune -0 cscli hub upgrade - refute_output + assert_output - <<-EOT + Action plan: + 🔄 check & update data files + EOT rune -0 cscli parsers install crowdsecurity/syslog-logs rune -0 cscli hub upgrade --force + assert_output - <<-EOT + Action plan: + 🔄 check & update data files + EOT + + # hub upgrade must honor the --error flag to be silent in noop cron jobs + rune -0 cscli hub upgrade --error refute_output + refute_stderr + skip "todo: data files are re-downloaded with --force" } @@ -129,6 +145,8 @@ teardown() { rune -0 cscli hub upgrade assert_output - <<-EOT collections:foo.yaml - not downloading local item + Action plan: + 🔄 check & update data files EOT } diff --git a/test/bats/cscli-hubtype-install.bats b/test/bats/cscli-hubtype-install.bats index 58c16dd968d..fe346c24e0e 100644 --- a/test/bats/cscli-hubtype-install.bats +++ b/test/bats/cscli-hubtype-install.bats @@ -34,6 +34,15 @@ teardown() { ./instance-crowdsec stop } +get_latest_version() { + local hubtype=$1 + shift + local item_name=$1 + shift + + cscli "$hubtype" inspect "$item_name" -o json | jq -r '.version' +} + #---------- @test "cscli install (no argument)" { @@ -55,10 +64,11 @@ teardown() { @test "install an item (dry run)" { rune -0 cscli parsers install crowdsecurity/whitelists --dry-run - assert_output - --regexp <<-EOT + latest_whitelists=$(get_latest_version parsers crowdsecurity/whitelists) + assert_output - <<-EOT Action plan: 📥 download - parsers: crowdsecurity/whitelists \([0-9]+.[0-9]+\) + parsers: crowdsecurity/whitelists ($latest_whitelists) ✅ enable parsers: crowdsecurity/whitelists @@ -71,9 +81,10 @@ teardown() { @test "install an item (dry-run, de-duplicate commands)" { rune -0 cscli parsers install crowdsecurity/whitelists crowdsecurity/whitelists --dry-run --output raw - assert_output - --regexp <<-EOT + latest_whitelists=$(get_latest_version parsers crowdsecurity/whitelists) + assert_output - <<-EOT Action plan: - 📥 download parsers:crowdsecurity/whitelists \([0-9]+.[0-9]+\) + 📥 download parsers:crowdsecurity/whitelists ($latest_whitelists) ✅ enable parsers:crowdsecurity/whitelists Dry run, no action taken. @@ -83,7 +94,14 @@ teardown() { @test "install an item" { rune -0 cscli parsers install crowdsecurity/whitelists + latest_whitelists=$(get_latest_version parsers crowdsecurity/whitelists) assert_output - <<-EOT + Action plan: + 📥 download + parsers: crowdsecurity/whitelists ($latest_whitelists) + ✅ enable + parsers: crowdsecurity/whitelists + downloading parsers:crowdsecurity/whitelists enabling parsers:crowdsecurity/whitelists @@ -105,7 +123,12 @@ teardown() { @test "install an item (download only)" { assert_file_not_exists "$HUB_DIR/parsers/s02-enrich/crowdsecurity/whitelists.yaml" rune -0 cscli parsers install crowdsecurity/whitelists --download-only + latest_whitelists=$(get_latest_version parsers crowdsecurity/whitelists) assert_output - <<-EOT + Action plan: + 📥 download + parsers: crowdsecurity/whitelists ($latest_whitelists) + downloading parsers:crowdsecurity/whitelists $RELOAD_MESSAGE @@ -157,7 +180,12 @@ teardown() { refute_stderr rune -0 cscli parsers install crowdsecurity/whitelists --force + latest_whitelists=$(get_latest_version parsers crowdsecurity/whitelists) assert_output - <<-EOT + Action plan: + 📥 download + parsers: crowdsecurity/whitelists (? -> $latest_whitelists) + downloading parsers:crowdsecurity/whitelists $RELOAD_MESSAGE @@ -178,10 +206,11 @@ teardown() { @test "install multiple items (some already installed)" { rune -0 cscli parsers install crowdsecurity/pgsql-logs rune -0 cscli parsers install crowdsecurity/pgsql-logs crowdsecurity/postfix-logs --dry-run - assert_output - --regexp <<-EOT + latest_postfix=$(get_latest_version parsers crowdsecurity/postfix-logs) + assert_output - <<-EOT Action plan: 📥 download - parsers: crowdsecurity/postfix-logs \([0-9]+.[0-9]+\) + parsers: crowdsecurity/postfix-logs ($latest_postfix) ✅ enable parsers: crowdsecurity/postfix-logs @@ -209,11 +238,18 @@ teardown() { # error on one item, should still install the others rune -0 cscli parsers install crowdsecurity/whitelists crowdsecurity/pgsql-logs --ignore refute_stderr + latest_pgsql=$(get_latest_version parsers crowdsecurity/pgsql-logs) assert_output - <<-EOT WARN parsers:crowdsecurity/whitelists is tainted, use '--force' to overwrite + Action plan: + 📥 download + parsers: crowdsecurity/pgsql-logs ($latest_pgsql) + ✅ enable + parsers: crowdsecurity/pgsql-logs + downloading parsers:crowdsecurity/pgsql-logs enabling parsers:crowdsecurity/pgsql-logs - + $RELOAD_MESSAGE EOT rune -0 cscli parsers inspect crowdsecurity/pgsql-logs --no-metrics -o json diff --git a/test/bats/cscli-hubtype-remove.bats b/test/bats/cscli-hubtype-remove.bats index 32db8efe788..6e7f151ab90 100644 --- a/test/bats/cscli-hubtype-remove.bats +++ b/test/bats/cscli-hubtype-remove.bats @@ -121,8 +121,12 @@ teardown() { rune -0 cscli parsers remove crowdsecurity/whitelists assert_output - <<-EOT + Action plan: + ❌ disable + parsers: crowdsecurity/whitelists + disabling parsers:crowdsecurity/whitelists - + $RELOAD_MESSAGE EOT refute_stderr @@ -139,6 +143,12 @@ teardown() { rune -0 cscli parsers remove crowdsecurity/whitelists --purge assert_output - <<-EOT + Action plan: + ❌ disable + parsers: crowdsecurity/whitelists + 🗑 purge (delete source) + parsers: crowdsecurity/whitelists + disabling parsers:crowdsecurity/whitelists purging parsers:crowdsecurity/whitelists @@ -208,6 +218,10 @@ teardown() { rune -0 cscli parsers remove crowdsecurity/whitelists --force assert_output - <<-EOT + Action plan: + ❌ disable + parsers: crowdsecurity/whitelists + disabling parsers:crowdsecurity/whitelists $RELOAD_MESSAGE @@ -229,6 +243,10 @@ teardown() { rune -0 cscli parsers remove crowdsecurity/sshd-logs --force assert_output - <<-EOT + Action plan: + ❌ disable + parsers: crowdsecurity/sshd-logs + disabling parsers:crowdsecurity/sshd-logs $RELOAD_MESSAGE diff --git a/test/bats/cscli-hubtype-upgrade.bats b/test/bats/cscli-hubtype-upgrade.bats index 4244e611cf6..8faec90a870 100644 --- a/test/bats/cscli-hubtype-upgrade.bats +++ b/test/bats/cscli-hubtype-upgrade.bats @@ -57,6 +57,15 @@ install_v0() { printf "%s" "v0.0" > "$(jq -r '.local_path' <(cscli "$hubtype" inspect "$item_name" --no-metrics -o json))" } +get_latest_version() { + local hubtype=$1 + shift + local item_name=$1 + shift + + cscli "$hubtype" inspect "$item_name" -o json | jq -r '.version' +} + #---------- @test "cscli upgrade (no argument)" { @@ -78,9 +87,15 @@ install_v0() { @test "upgrade an item (non installed)" { rune -0 cscli parsers upgrade crowdsecurity/whitelists + latest_whitelists=$(get_latest_version parsers crowdsecurity/whitelists) + assert_output - <<-EOT + Action plan: + 📥 download + parsers: crowdsecurity/whitelists ($latest_whitelists) + downloading parsers:crowdsecurity/whitelists - + $RELOAD_MESSAGE EOT refute_stderr @@ -115,15 +130,6 @@ install_v0() { refute_stderr } -get_latest_version() { - local hubtype=$1 - shift - local item_name=$1 - shift - - cscli "$hubtype" inspect "$item_name" -o json | jq -r '.version' -} - @test "upgrade an item" { hub_inject_v0 install_v0 parsers crowdsecurity/whitelists @@ -132,9 +138,14 @@ get_latest_version() { rune -0 jq -e '.local_version=="0.0"' <(output) rune -0 cscli parsers upgrade crowdsecurity/whitelists + latest_whitelists=$(get_latest_version parsers crowdsecurity/whitelists) assert_output - <<-EOT + Action plan: + 📥 download + parsers: crowdsecurity/whitelists (0.0 -> $latest_whitelists) + downloading parsers:crowdsecurity/whitelists - + $RELOAD_MESSAGE EOT refute_stderr @@ -168,8 +179,12 @@ get_latest_version() { rune -0 cscli parsers upgrade crowdsecurity/whitelists --force assert_output - <<-EOT + Action plan: + 📥 download + parsers: crowdsecurity/whitelists (? -> 0.2) + downloading parsers:crowdsecurity/whitelists - + $RELOAD_MESSAGE EOT refute_stderr @@ -202,7 +217,13 @@ get_latest_version() { refute_stderr rune -0 cscli parsers upgrade crowdsecurity/whitelists crowdsecurity/sshd-logs + latest_sshd=$(get_latest_version parsers crowdsecurity/sshd-logs) + latest_whitelists=$(get_latest_version parsers crowdsecurity/whitelists) assert_output - <<-EOT + Action plan: + 📥 download + parsers: crowdsecurity/sshd-logs (0.0 -> $latest_sshd), crowdsecurity/whitelists (0.0 -> $latest_whitelists) + downloading parsers:crowdsecurity/whitelists downloading parsers:crowdsecurity/sshd-logs @@ -226,10 +247,14 @@ get_latest_version() { rune -0 cscli parsers upgrade --all assert_output - <<-EOT + Action plan: + 📥 download + parsers: crowdsecurity/sshd-logs (0.0 -> 2.9), crowdsecurity/whitelists (0.0 -> 0.2), crowdsecurity/windows-auth (0.0 -> 0.2) + downloading parsers:crowdsecurity/sshd-logs downloading parsers:crowdsecurity/whitelists downloading parsers:crowdsecurity/windows-auth - + $RELOAD_MESSAGE EOT refute_stderr diff --git a/test/bats/hub-index.bats b/test/bats/hub-index.bats index a609974d67a..dca7792f7e2 100644 --- a/test/bats/hub-index.bats +++ b/test/bats/hub-index.bats @@ -215,6 +215,15 @@ teardown() { rune -0 cscli hub list -o raw rune -0 cscli collections upgrade author/coll1 assert_output - <<-EOT + Action plan: + 📥 download + collections: author/coll1 (0.0 -> 0.1) + parsers: author/pars2 (0.0) + ✅ enable + parsers: author/pars2 + ❌ disable + parsers: author/pars1 + downloading parsers:author/pars2 enabling parsers:author/pars2 disabling parsers:author/pars1 From 105801d1f9dddce56ab7bf61db8116d893dda66f Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Fri, 21 Feb 2025 12:44:30 +0100 Subject: [PATCH 441/581] cscli: allow non-local symlinks to have a different name than hub items (#3475) --- pkg/cwhub/item.go | 17 +++++++---------- pkg/cwhub/sync.go | 23 ++++++++--------------- pkg/hubops/datarefresh.go | 11 +++-------- pkg/hubops/disable.go | 11 +++-------- test/bats/20_hub.bats | 30 ++++++++++++++++++++++++++++++ 5 files changed, 51 insertions(+), 41 deletions(-) diff --git a/pkg/cwhub/item.go b/pkg/cwhub/item.go index 38385d9399d..f0b447c6c4e 100644 --- a/pkg/cwhub/item.go +++ b/pkg/cwhub/item.go @@ -113,10 +113,13 @@ type Item struct { Dependencies } -// InstallPath returns the location of the symlink to the item in the hub, or the path of the item itself if it's local -// (eg. /etc/crowdsec/collections/xyz.yaml). -// Raises an error if the path goes outside of the install dir. +// InstallPath returns the path to use for the install symlink. +// Returns an error if an item is already installed or if the path goes outside of the install dir. func (i *Item) InstallPath() (string, error) { + if i.State.Installed { + return "", fmt.Errorf("%s is already installed at %s", i.FQName(), i.State.LocalPath) + } + p := i.Type if i.Stage != "" { p = filepath.Join(p, i.Stage) @@ -205,13 +208,7 @@ func (i *Item) CurrentDependencies() Dependencies { return i.Dependencies } - contentPath, err := i.InstallPath() - if err != nil { - i.hub.logger.Warningf("can't access dependencies for %s, using index", i.FQName()) - return i.Dependencies - } - - currentContent, err := os.ReadFile(contentPath) + currentContent, err := os.ReadFile(i.State.LocalPath) if errors.Is(err, fs.ErrNotExist) { return i.Dependencies } diff --git a/pkg/cwhub/sync.go b/pkg/cwhub/sync.go index ed99f4806d5..5de548a521a 100644 --- a/pkg/cwhub/sync.go +++ b/pkg/cwhub/sync.go @@ -10,7 +10,6 @@ import ( "strings" "github.com/Masterminds/semver/v3" - "github.com/sirupsen/logrus" "gopkg.in/yaml.v3" "github.com/crowdsecurity/go-cs-lib/downloader" @@ -86,7 +85,7 @@ type itemSpec struct { local bool // is this a spec for a local item? } -func newHubItemSpec(path string, subs []string, logger *logrus.Logger) (*itemSpec, error) { +func newHubItemSpec(path string, subs []string) (*itemSpec, error) { // .../hub/parsers/s00-raw/crowdsecurity/skip-pretag.yaml // .../hub/scenarios/crowdsecurity/ssh_bf.yaml // .../hub/profiles/crowdsecurity/linux.yaml @@ -126,9 +125,7 @@ func newHubItemSpec(path string, subs []string, logger *logrus.Logger) (*itemSpe return &spec, nil } -func newInstallItemSpec(path string, subs []string, logger *logrus.Logger) (*itemSpec, error) { - logger.Tracef("%s in install dir", path) - +func newInstallItemSpec(path string, subs []string) (*itemSpec, error) { // .../config/parser/stage/file.yaml // .../config/postoverflow/stage/file.yaml // .../config/scenarios/scenar.yaml @@ -161,19 +158,19 @@ func newInstallItemSpec(path string, subs []string, logger *logrus.Logger) (*ite return &spec, nil } -func newItemSpec(path, hubDir, installDir string, logger *logrus.Logger) (*itemSpec, error) { +func newItemSpec(path, hubDir, installDir string) (*itemSpec, error) { var ( spec *itemSpec err error ) if subs := relativePathComponents(path, hubDir); len(subs) > 0 { - spec, err = newHubItemSpec(path, subs, logger) + spec, err = newHubItemSpec(path, subs) if err != nil { return nil, err } } else if subs := relativePathComponents(path, installDir); len(subs) > 0 { - spec, err = newInstallItemSpec(path, subs, logger) + spec, err = newInstallItemSpec(path, subs) if err != nil { return nil, err } @@ -306,7 +303,7 @@ func (h *Hub) itemVisit(path string, f os.DirEntry, err error) (*itemSpec, error return nil, ErrSkipPath } - spec, err := newItemSpec(path, h.local.HubDir, h.local.InstallDir, h.logger) + spec, err := newItemSpec(path, h.local.HubDir, h.local.InstallDir) if err != nil { h.logger.Warningf("Ignoring file %s: %s", path, err) return nil, ErrSkipPath @@ -323,10 +320,6 @@ func updateNonLocalItem(h *Hub, path string, spec *itemSpec, symlinkTarget strin } for _, item := range h.GetItemMap(spec.ftype) { - if spec.fname != item.FileName { - continue - } - if item.Stage != spec.stage { continue } @@ -405,6 +398,8 @@ func (h *Hub) addItemFromSpec(spec *itemSpec) error { if err != nil { return err } + + item.State.LocalPath = spec.path } if item == nil { @@ -653,7 +648,6 @@ func (i *Item) setVersionState(path string, inhub bool) error { i.hub.logger.Tracef("got tainted match for %s: %s", i.Name, path) if !inhub { - i.State.LocalPath = path i.State.Installed = true } @@ -669,7 +663,6 @@ func (i *Item) setVersionState(path string, inhub bool) error { if !inhub { i.hub.logger.Tracef("found exact match for %s, version is %s, latest is %s", i.Name, i.State.LocalVersion, i.Version) - i.State.LocalPath = path i.State.Tainted = false // if we're walking the hub, present file doesn't means installed file i.State.Installed = true diff --git a/pkg/hubops/datarefresh.go b/pkg/hubops/datarefresh.go index 985db8c1a11..43a10686ae5 100644 --- a/pkg/hubops/datarefresh.go +++ b/pkg/hubops/datarefresh.go @@ -11,21 +11,16 @@ import ( // XXX: TODO: temporary for hubtests, but will have to go. // DownloadDataIfNeeded downloads the data set for the item. func DownloadDataIfNeeded(ctx context.Context, hub *cwhub.Hub, item *cwhub.Item, force bool) (bool, error) { - itemFilePath, err := item.InstallPath() + itemFile, err := os.Open(item.State.LocalPath) if err != nil { - return false, err - } - - itemFile, err := os.Open(itemFilePath) - if err != nil { - return false, fmt.Errorf("while opening %s: %w", itemFilePath, err) + return false, fmt.Errorf("while opening %s: %w", item.State.LocalPath, err) } defer itemFile.Close() needReload, err := downloadDataSet(ctx, hub.GetDataDir(), force, itemFile) if err != nil { - return needReload, fmt.Errorf("while downloading data for %s: %w", itemFilePath, err) + return needReload, fmt.Errorf("while downloading data for %s: %w", item.State.LocalPath, err) } return needReload, nil diff --git a/pkg/hubops/disable.go b/pkg/hubops/disable.go index b6368e85036..7340920c249 100644 --- a/pkg/hubops/disable.go +++ b/pkg/hubops/disable.go @@ -10,12 +10,7 @@ import ( // RemoveInstallLink removes the item's symlink between the installation directory and the local hub. func RemoveInstallLink(i *cwhub.Item) error { - syml, err := i.InstallPath() - if err != nil { - return err - } - - stat, err := os.Lstat(syml) + stat, err := os.Lstat(i.State.LocalPath) if err != nil { return err } @@ -25,7 +20,7 @@ func RemoveInstallLink(i *cwhub.Item) error { return fmt.Errorf("%s isn't managed by hub", i.Name) } - hubpath, err := os.Readlink(syml) + hubpath, err := os.Readlink(i.State.LocalPath) if err != nil { return fmt.Errorf("while reading symlink: %w", err) } @@ -39,7 +34,7 @@ func RemoveInstallLink(i *cwhub.Item) error { return fmt.Errorf("%s isn't managed by hub", i.Name) } - if err := os.Remove(syml); err != nil { + if err := os.Remove(i.State.LocalPath); err != nil { return fmt.Errorf("while removing symlink: %w", err) } diff --git a/test/bats/20_hub.bats b/test/bats/20_hub.bats index f4d9bb2eb4d..b7f34fc709f 100644 --- a/test/bats/20_hub.bats +++ b/test/bats/20_hub.bats @@ -104,6 +104,36 @@ teardown() { assert_stderr --partial "crowdsecurity/sshd is tainted by missing parsers:crowdsecurity/sshd-logs" } +@test "an install symlink can have a different name than the items it points to" { + rune -0 cscli scenarios install crowdsecurity/ssh-bf + rune -0 cscli scenarios inspect crowdsecurity/ssh-bf -o json + rune -0 jq -r '.local_path' <(output) + rune -0 mv "$output" "$CONFIG_DIR/scenarios/newname.yaml" + rune -0 cscli hub list -o json + rune -0 jq -r '.scenarios.[].name' <(output) + assert_output 'crowdsecurity/ssh-bf' + + rune -0 cscli scenarios inspect crowdsecurity/ssh-bf -o json + rune -0 jq -r '.installed' <(output) + assert_output true + + rune -0 cscli scenarios remove crowdsecurity/ssh-bf + assert_output - <<-EOT + Action plan: + ❌ disable + scenarios: crowdsecurity/ssh-bf + + disabling scenarios:crowdsecurity/ssh-bf + + $RELOAD_MESSAGE + EOT + refute_stderr + + rune -0 cscli scenarios inspect crowdsecurity/ssh-bf -o json + rune -0 jq -r '.installed' <(output) + assert_output false +} + @test "cscli hub update" { rm -f "$INDEX_PATH" rune -0 cscli hub update From 8da6a4dc928991f795f7ec1f4e54857d595fd17e Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Fri, 21 Feb 2025 12:55:02 +0100 Subject: [PATCH 442/581] CI: use go 1.24 for windows (#3479) --- azure-pipelines.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/azure-pipelines.yml b/azure-pipelines.yml index bcf327bdf38..59aaa7db035 100644 --- a/azure-pipelines.yml +++ b/azure-pipelines.yml @@ -21,7 +21,7 @@ stages: - task: GoTool@0 displayName: "Install Go" inputs: - version: '1.23.3' + version: '1.24.0' - pwsh: | choco install -y make From a3187d6f2c0fd56afd6fbb792c061fa59535b265 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Fri, 21 Feb 2025 13:23:39 +0100 Subject: [PATCH 443/581] refact: context propagation (apiclient, cticlient...) (#3477) --- pkg/acquisition/modules/appsec/appsec.go | 17 ++++----- pkg/acquisition/modules/http/http_test.go | 46 +++++++++++++++++------ pkg/apiclient/alerts_service.go | 10 ++--- pkg/apiclient/allowlists_service.go | 8 ++-- pkg/apiclient/auth_jwt.go | 6 ++- pkg/apiclient/auth_service.go | 8 ++-- pkg/apiclient/client_http.go | 4 +- pkg/apiclient/decisions_service.go | 21 +++++------ pkg/apiclient/decisions_sync_service.go | 2 +- pkg/apiclient/heartbeat.go | 6 ++- pkg/apiclient/metrics.go | 2 +- pkg/apiclient/signal.go | 2 +- pkg/apiclient/usagemetrics.go | 2 +- pkg/apiserver/middlewares/v1/ocsp.go | 9 +++-- pkg/apiserver/middlewares/v1/tls_auth.go | 14 ++++--- pkg/cticlient/client.go | 42 ++++++++++++++++++--- 16 files changed, 131 insertions(+), 68 deletions(-) diff --git a/pkg/acquisition/modules/appsec/appsec.go b/pkg/acquisition/modules/appsec/appsec.go index e41ace98b19..5edf708a15e 100644 --- a/pkg/acquisition/modules/appsec/appsec.go +++ b/pkg/acquisition/modules/appsec/appsec.go @@ -194,19 +194,16 @@ func (w *AppsecSource) Configure(yamlConfig []byte, logger *log.Entry, metricsLe // let's load the associated appsec_config: if w.config.AppsecConfigPath != "" { - err := appsecCfg.LoadByPath(w.config.AppsecConfigPath) - if err != nil { + if err = appsecCfg.LoadByPath(w.config.AppsecConfigPath); err != nil { return fmt.Errorf("unable to load appsec_config: %w", err) } } else if w.config.AppsecConfig != "" { - err := appsecCfg.Load(w.config.AppsecConfig) - if err != nil { + if err = appsecCfg.Load(w.config.AppsecConfig); err != nil { return fmt.Errorf("unable to load appsec_config: %w", err) } } else if len(w.config.AppsecConfigs) > 0 { for _, appsecConfig := range w.config.AppsecConfigs { - err := appsecCfg.Load(appsecConfig) - if err != nil { + if err = appsecCfg.Load(appsecConfig); err != nil { return fmt.Errorf("unable to load appsec_config: %w", err) } } @@ -233,6 +230,7 @@ func (w *AppsecSource) Configure(yamlConfig []byte, logger *log.Entry, metricsLe if err != nil { return fmt.Errorf("unable to get authenticated LAPI client: %w", err) } + w.appsecAllowlistClient = allowlists.NewAppsecAllowlist(w.apiClient, w.logger) for nbRoutine := range w.config.Routines { @@ -371,12 +369,12 @@ func (w *AppsecSource) Dump() interface{} { return w } -func (w *AppsecSource) IsAuth(apiKey string) bool { +func (w *AppsecSource) IsAuth(ctx context.Context, apiKey string) bool { client := &http.Client{ Timeout: 200 * time.Millisecond, } - req, err := http.NewRequest(http.MethodHead, w.lapiURL, nil) + req, err := http.NewRequestWithContext(ctx, http.MethodHead, w.lapiURL, nil) if err != nil { log.Errorf("Error creating request: %s", err) return false @@ -397,6 +395,7 @@ func (w *AppsecSource) IsAuth(apiKey string) bool { // should this be in the runner ? func (w *AppsecSource) appsecHandler(rw http.ResponseWriter, r *http.Request) { + ctx := r.Context() w.logger.Debugf("Received request from '%s' on %s", r.RemoteAddr, r.URL.Path) apiKey := r.Header.Get(appsec.APIKeyHeaderName) @@ -413,7 +412,7 @@ func (w *AppsecSource) appsecHandler(rw http.ResponseWriter, r *http.Request) { expiration, exists := w.AuthCache.Get(apiKey) // if the apiKey is not in cache or has expired, just recheck the auth if !exists || time.Now().After(expiration) { - if !w.IsAuth(apiKey) { + if !w.IsAuth(ctx, apiKey) { rw.WriteHeader(http.StatusUnauthorized) w.logger.Errorf("Unauthorized request from '%s' (real IP = %s)", remoteIP, clientIP) diff --git a/pkg/acquisition/modules/http/http_test.go b/pkg/acquisition/modules/http/http_test.go index 5f068baa3e4..6422498b543 100644 --- a/pkg/acquisition/modules/http/http_test.go +++ b/pkg/acquisition/modules/http/http_test.go @@ -288,6 +288,7 @@ basic_auth: } func TestStreamingAcquisitionBasicAuth(t *testing.T) { + ctx := t.Context() h := &HTTPSource{} _, _, tomb := SetupAndRunHTTPSource(t, h, []byte(` source: http @@ -306,7 +307,7 @@ basic_auth: require.NoError(t, err) assert.Equal(t, http.StatusUnauthorized, resp.StatusCode) - req, err := http.NewRequest(http.MethodPost, fmt.Sprintf("%s/test", testHTTPServerAddr), strings.NewReader("test")) + req, err := http.NewRequestWithContext(ctx, http.MethodPost, fmt.Sprintf("%s/test", testHTTPServerAddr), strings.NewReader("test")) require.NoError(t, err) req.SetBasicAuth("test", "WrongPassword") @@ -321,6 +322,7 @@ basic_auth: } func TestStreamingAcquisitionBadHeaders(t *testing.T) { + ctx := t.Context() h := &HTTPSource{} _, _, tomb := SetupAndRunHTTPSource(t, h, []byte(` source: http @@ -334,7 +336,7 @@ headers: client := &http.Client{} - req, err := http.NewRequest(http.MethodPost, fmt.Sprintf("%s/test", testHTTPServerAddr), strings.NewReader("test")) + req, err := http.NewRequestWithContext(ctx, http.MethodPost, fmt.Sprintf("%s/test", testHTTPServerAddr), strings.NewReader("test")) require.NoError(t, err) req.Header.Add("Key", "wrong") @@ -349,6 +351,7 @@ headers: } func TestStreamingAcquisitionMaxBodySize(t *testing.T) { + ctx := t.Context() h := &HTTPSource{} _, _, tomb := SetupAndRunHTTPSource(t, h, []byte(` source: http @@ -362,7 +365,7 @@ max_body_size: 5`), 0) time.Sleep(1 * time.Second) client := &http.Client{} - req, err := http.NewRequest(http.MethodPost, fmt.Sprintf("%s/test", testHTTPServerAddr), strings.NewReader("testtest")) + req, err := http.NewRequestWithContext(ctx, http.MethodPost, fmt.Sprintf("%s/test", testHTTPServerAddr), strings.NewReader("testtest")) require.NoError(t, err) req.Header.Add("Key", "test") @@ -378,6 +381,7 @@ max_body_size: 5`), 0) } func TestStreamingAcquisitionSuccess(t *testing.T) { + ctx := t.Context() h := &HTTPSource{} out, reg, tomb := SetupAndRunHTTPSource(t, h, []byte(` source: http @@ -388,13 +392,14 @@ headers: key: test`), 2) time.Sleep(1 * time.Second) + rawEvt := `{"test": "test"}` errChan := make(chan error) go assertEvents(out, []string{rawEvt}, errChan) client := &http.Client{} - req, err := http.NewRequest(http.MethodPost, fmt.Sprintf("%s/test", testHTTPServerAddr), strings.NewReader(rawEvt)) + req, err := http.NewRequestWithContext(ctx, http.MethodPost, fmt.Sprintf("%s/test", testHTTPServerAddr), strings.NewReader(rawEvt)) require.NoError(t, err) req.Header.Add("Key", "test") @@ -414,6 +419,7 @@ headers: } func TestStreamingAcquisitionCustomStatusCodeAndCustomHeaders(t *testing.T) { + ctx := t.Context() h := &HTTPSource{} out, reg, tomb := SetupAndRunHTTPSource(t, h, []byte(` source: http @@ -430,9 +436,10 @@ custom_headers: rawEvt := `{"test": "test"}` errChan := make(chan error) + go assertEvents(out, []string{rawEvt}, errChan) - req, err := http.NewRequest(http.MethodPost, fmt.Sprintf("%s/test", testHTTPServerAddr), strings.NewReader(rawEvt)) + req, err := http.NewRequestWithContext(ctx, http.MethodPost, fmt.Sprintf("%s/test", testHTTPServerAddr), strings.NewReader(rawEvt)) require.NoError(t, err) req.Header.Add("Key", "test") @@ -463,9 +470,11 @@ func (sr *slowReader) Read(p []byte) (int, error) { if sr.index >= len(sr.body) { return 0, io.EOF } + time.Sleep(sr.delay) // Simulate a delay in reading n := copy(p, sr.body[sr.index:]) sr.index += n + return n, nil } @@ -492,10 +501,12 @@ func assertEvents(out chan types.Event, expected []string, errChan chan error) { errChan <- fmt.Errorf(`expected %s, got '%+v'`, expected, evt.Line.Raw) return } + if evt.Line.Src != "127.0.0.1" { errChan <- fmt.Errorf("expected '127.0.0.1', got '%s'", evt.Line.Src) return } + if evt.Line.Module != "http" { errChan <- fmt.Errorf("expected 'http', got '%s'", evt.Line.Module) return @@ -505,6 +516,7 @@ func assertEvents(out chan types.Event, expected []string, errChan chan error) { } func TestStreamingAcquisitionTimeout(t *testing.T) { + ctx := t.Context() h := &HTTPSource{} _, _, tomb := SetupAndRunHTTPSource(t, h, []byte(` source: http @@ -522,7 +534,7 @@ timeout: 1s`), 0) body: []byte(`{"test": "delayed_payload"}`), } - req, err := http.NewRequest(http.MethodPost, fmt.Sprintf("%s/test", testHTTPServerAddr), slow) + req, err := http.NewRequestWithContext(ctx, http.MethodPost, fmt.Sprintf("%s/test", testHTTPServerAddr), slow) require.NoError(t, err) req.Header.Add("Key", "test") @@ -566,6 +578,7 @@ tls: } func TestStreamingAcquisitionTLSWithHeadersAuthSuccess(t *testing.T) { + ctx := t.Context() h := &HTTPSource{} out, reg, tomb := SetupAndRunHTTPSource(t, h, []byte(` source: http @@ -599,9 +612,10 @@ tls: rawEvt := `{"test": "test"}` errChan := make(chan error) + go assertEvents(out, []string{rawEvt}, errChan) - req, err := http.NewRequest(http.MethodPost, fmt.Sprintf("%s/test", testHTTPServerAddrTLS), strings.NewReader(rawEvt)) + req, err := http.NewRequestWithContext(ctx, http.MethodPost, fmt.Sprintf("%s/test", testHTTPServerAddrTLS), strings.NewReader(rawEvt)) require.NoError(t, err) req.Header.Add("Key", "test") @@ -622,6 +636,7 @@ tls: } func TestStreamingAcquisitionMTLS(t *testing.T) { + ctx := t.Context() h := &HTTPSource{} out, reg, tomb := SetupAndRunHTTPSource(t, h, []byte(` source: http @@ -658,9 +673,10 @@ tls: rawEvt := `{"test": "test"}` errChan := make(chan error) + go assertEvents(out, []string{rawEvt}, errChan) - req, err := http.NewRequest(http.MethodPost, fmt.Sprintf("%s/test", testHTTPServerAddrTLS), strings.NewReader(rawEvt)) + req, err := http.NewRequestWithContext(ctx, http.MethodPost, fmt.Sprintf("%s/test", testHTTPServerAddrTLS), strings.NewReader(rawEvt)) require.NoError(t, err) resp, err := client.Do(req) @@ -680,6 +696,7 @@ tls: } func TestStreamingAcquisitionGzipData(t *testing.T) { + ctx := t.Context() h := &HTTPSource{} out, reg, tomb := SetupAndRunHTTPSource(t, h, []byte(` source: http @@ -693,6 +710,7 @@ headers: rawEvt := `{"test": "test"}` errChan := make(chan error) + go assertEvents(out, []string{rawEvt, rawEvt}, errChan) var b strings.Builder @@ -709,7 +727,7 @@ headers: // send gzipped compressed data client := &http.Client{} - req, err := http.NewRequest(http.MethodPost, fmt.Sprintf("%s/test", testHTTPServerAddr), strings.NewReader(b.String())) + req, err := http.NewRequestWithContext(ctx, http.MethodPost, fmt.Sprintf("%s/test", testHTTPServerAddr), strings.NewReader(b.String())) require.NoError(t, err) req.Header.Add("Key", "test") @@ -733,6 +751,7 @@ headers: } func TestStreamingAcquisitionNDJson(t *testing.T) { + ctx := t.Context() h := &HTTPSource{} out, reg, tomb := SetupAndRunHTTPSource(t, h, []byte(` source: http @@ -743,13 +762,14 @@ headers: key: test`), 2) time.Sleep(1 * time.Second) - rawEvt := `{"test": "test"}` + rawEvt := `{"test": "test"}` errChan := make(chan error) + go assertEvents(out, []string{rawEvt, rawEvt}, errChan) client := &http.Client{} - req, err := http.NewRequest(http.MethodPost, fmt.Sprintf("%s/test", testHTTPServerAddr), strings.NewReader(fmt.Sprintf("%s\n%s\n", rawEvt, rawEvt))) + req, err := http.NewRequestWithContext(ctx, http.MethodPost, fmt.Sprintf("%s/test", testHTTPServerAddr), strings.NewReader(fmt.Sprintf("%s\n%s\n", rawEvt, rawEvt))) require.NoError(t, err) @@ -776,10 +796,13 @@ func assertMetrics(t *testing.T, reg *prometheus.Registry, metrics []prometheus. require.NoError(t, err) isExist := false + for _, metricFamily := range promMetrics { if metricFamily.GetName() == "cs_httpsource_hits_total" { isExist = true + assert.Len(t, metricFamily.GetMetric(), 1) + for _, metric := range metricFamily.GetMetric() { assert.InDelta(t, float64(expected), metric.GetCounter().GetValue(), 0.000001) labels := metric.GetLabel() @@ -791,6 +814,7 @@ func assertMetrics(t *testing.T, reg *prometheus.Registry, metrics []prometheus. } } } + if !isExist && expected > 0 { t.Fatalf("expected metric cs_httpsource_hits_total not found") } diff --git a/pkg/apiclient/alerts_service.go b/pkg/apiclient/alerts_service.go index a3da84d306e..5869ecc3753 100644 --- a/pkg/apiclient/alerts_service.go +++ b/pkg/apiclient/alerts_service.go @@ -49,7 +49,7 @@ type AlertsDeleteOpts struct { func (s *AlertsService) Add(ctx context.Context, alerts models.AddAlertsRequest) (*models.AddAlertsResponse, *Response, error) { u := fmt.Sprintf("%s/alerts", s.client.URLPrefix) - req, err := s.client.NewRequest(http.MethodPost, u, &alerts) + req, err := s.client.NewRequestWithContext(ctx, http.MethodPost, u, &alerts) if err != nil { return nil, nil, err } @@ -78,7 +78,7 @@ func (s *AlertsService) List(ctx context.Context, opts AlertsListOpts) (*models. URI = fmt.Sprintf("%s?%s", URI, params.Encode()) } - req, err := s.client.NewRequest(http.MethodGet, URI, nil) + req, err := s.client.NewRequestWithContext(ctx, http.MethodGet, URI, nil) if err != nil { return nil, nil, fmt.Errorf("building request: %w", err) } @@ -102,7 +102,7 @@ func (s *AlertsService) Delete(ctx context.Context, opts AlertsDeleteOpts) (*mod u := fmt.Sprintf("%s/alerts?%s", s.client.URLPrefix, params.Encode()) - req, err := s.client.NewRequest(http.MethodDelete, u, nil) + req, err := s.client.NewRequestWithContext(ctx, http.MethodDelete, u, nil) if err != nil { return nil, nil, err } @@ -120,7 +120,7 @@ func (s *AlertsService) Delete(ctx context.Context, opts AlertsDeleteOpts) (*mod func (s *AlertsService) DeleteOne(ctx context.Context, alertID string) (*models.DeleteAlertsResponse, *Response, error) { u := fmt.Sprintf("%s/alerts/%s", s.client.URLPrefix, alertID) - req, err := s.client.NewRequest(http.MethodDelete, u, nil) + req, err := s.client.NewRequestWithContext(ctx, http.MethodDelete, u, nil) if err != nil { return nil, nil, err } @@ -138,7 +138,7 @@ func (s *AlertsService) DeleteOne(ctx context.Context, alertID string) (*models. func (s *AlertsService) GetByID(ctx context.Context, alertID int) (*models.Alert, *Response, error) { u := fmt.Sprintf("%s/alerts/%d", s.client.URLPrefix, alertID) - req, err := s.client.NewRequest(http.MethodGet, u, nil) + req, err := s.client.NewRequestWithContext(ctx, http.MethodGet, u, nil) if err != nil { return nil, nil, err } diff --git a/pkg/apiclient/allowlists_service.go b/pkg/apiclient/allowlists_service.go index 0dc69a587fd..c81970d218d 100644 --- a/pkg/apiclient/allowlists_service.go +++ b/pkg/apiclient/allowlists_service.go @@ -27,7 +27,7 @@ func (s *AllowlistsService) List(ctx context.Context, opts AllowlistListOpts) (* u += "?" + params.Encode() - req, err := s.client.NewRequest(http.MethodGet, u, nil) + req, err := s.client.NewRequestWithContext(ctx, http.MethodGet, u, nil) if err != nil { return nil, nil, err } @@ -58,7 +58,7 @@ func (s *AllowlistsService) Get(ctx context.Context, name string, opts Allowlist log.Debugf("GET %s", u) - req, err := s.client.NewRequest(http.MethodGet, u, nil) + req, err := s.client.NewRequestWithContext(ctx, http.MethodGet, u, nil) if err != nil { return nil, nil, err } @@ -76,7 +76,7 @@ func (s *AllowlistsService) Get(ctx context.Context, name string, opts Allowlist func (s *AllowlistsService) CheckIfAllowlisted(ctx context.Context, value string) (bool, *Response, error) { u := s.client.URLPrefix + "/allowlists/check/" + value - req, err := s.client.NewRequest(http.MethodHead, u, nil) + req, err := s.client.NewRequestWithContext(ctx, http.MethodHead, u, nil) if err != nil { return false, nil, err } @@ -94,7 +94,7 @@ func (s *AllowlistsService) CheckIfAllowlisted(ctx context.Context, value string func (s *AllowlistsService) CheckIfAllowlistedWithReason(ctx context.Context, value string) (*models.CheckAllowlistResponse, *Response, error) { u := s.client.URLPrefix + "/allowlists/check/" + value - req, err := s.client.NewRequest(http.MethodGet, u, nil) + req, err := s.client.NewRequestWithContext(ctx, http.MethodGet, u, nil) if err != nil { return nil, nil, err } diff --git a/pkg/apiclient/auth_jwt.go b/pkg/apiclient/auth_jwt.go index 7ab3450c39f..1ea64e2881b 100644 --- a/pkg/apiclient/auth_jwt.go +++ b/pkg/apiclient/auth_jwt.go @@ -67,7 +67,7 @@ func (t *JWTTransport) refreshJwtToken() error { return fmt.Errorf("could not encode jwt auth body: %w", err) } - req, err := http.NewRequest(http.MethodPost, fmt.Sprintf("%s%s/watchers/login", t.URL, t.VersionPrefix), buf) + req, err := http.NewRequestWithContext(ctx, http.MethodPost, fmt.Sprintf("%s%s/watchers/login", t.URL, t.VersionPrefix), buf) if err != nil { return fmt.Errorf("could not create request: %w", err) } @@ -170,6 +170,7 @@ func (t *JWTTransport) prepareRequest(req *http.Request) (*http.Request, error) // RoundTrip implements the RoundTripper interface. func (t *JWTTransport) RoundTrip(req *http.Request) (*http.Response, error) { var resp *http.Response + attemptsCount := make(map[int]int) for { @@ -213,6 +214,7 @@ func (t *JWTTransport) RoundTrip(req *http.Request) (*http.Response, error) { } log.Debugf("retrying request to %s", req.URL.String()) + attemptsCount[resp.StatusCode]++ log.Infof("attempt %d out of %d", attemptsCount[resp.StatusCode], config.MaxAttempts) @@ -222,6 +224,7 @@ func (t *JWTTransport) RoundTrip(req *http.Request) (*http.Response, error) { time.Sleep(time.Duration(backoff) * time.Second) } } + return resp, nil } @@ -242,5 +245,6 @@ func (t *JWTTransport) transport() http.RoundTripper { if t.Transport != nil { return t.Transport } + return http.DefaultTransport } diff --git a/pkg/apiclient/auth_service.go b/pkg/apiclient/auth_service.go index e7a423cfd95..47d3daaaaaa 100644 --- a/pkg/apiclient/auth_service.go +++ b/pkg/apiclient/auth_service.go @@ -21,7 +21,7 @@ type enrollRequest struct { func (s *AuthService) UnregisterWatcher(ctx context.Context) (*Response, error) { u := fmt.Sprintf("%s/watchers", s.client.URLPrefix) - req, err := s.client.NewRequest(http.MethodDelete, u, nil) + req, err := s.client.NewRequestWithContext(ctx, http.MethodDelete, u, nil) if err != nil { return nil, err } @@ -37,7 +37,7 @@ func (s *AuthService) UnregisterWatcher(ctx context.Context) (*Response, error) func (s *AuthService) RegisterWatcher(ctx context.Context, registration models.WatcherRegistrationRequest) (*Response, error) { u := fmt.Sprintf("%s/watchers", s.client.URLPrefix) - req, err := s.client.NewRequest(http.MethodPost, u, ®istration) + req, err := s.client.NewRequestWithContext(ctx, http.MethodPost, u, ®istration) if err != nil { return nil, err } @@ -55,7 +55,7 @@ func (s *AuthService) AuthenticateWatcher(ctx context.Context, auth models.Watch u := fmt.Sprintf("%s/watchers/login", s.client.URLPrefix) - req, err := s.client.NewRequest(http.MethodPost, u, &auth) + req, err := s.client.NewRequestWithContext(ctx, http.MethodPost, u, &auth) if err != nil { return authResp, nil, err } @@ -71,7 +71,7 @@ func (s *AuthService) AuthenticateWatcher(ctx context.Context, auth models.Watch func (s *AuthService) EnrollWatcher(ctx context.Context, enrollKey string, name string, tags []string, overwrite bool) (*Response, error) { u := fmt.Sprintf("%s/watchers/enroll", s.client.URLPrefix) - req, err := s.client.NewRequest(http.MethodPost, u, &enrollRequest{EnrollKey: enrollKey, Name: name, Tags: tags, Overwrite: overwrite}) + req, err := s.client.NewRequestWithContext(ctx, http.MethodPost, u, &enrollRequest{EnrollKey: enrollKey, Name: name, Tags: tags, Overwrite: overwrite}) if err != nil { return nil, err } diff --git a/pkg/apiclient/client_http.go b/pkg/apiclient/client_http.go index c64404dc7ee..a2b1ff1490f 100644 --- a/pkg/apiclient/client_http.go +++ b/pkg/apiclient/client_http.go @@ -15,7 +15,7 @@ import ( log "github.com/sirupsen/logrus" ) -func (c *ApiClient) NewRequest(method, url string, body interface{}) (*http.Request, error) { +func (c *ApiClient) NewRequestWithContext(ctx context.Context, method, url string, body interface{}) (*http.Request, error) { if !strings.HasSuffix(c.BaseURL.Path, "/") { return nil, fmt.Errorf("BaseURL must have a trailing slash, but %q does not", c.BaseURL) } @@ -36,7 +36,7 @@ func (c *ApiClient) NewRequest(method, url string, body interface{}) (*http.Requ } } - req, err := http.NewRequest(method, u.String(), buf) + req, err := http.NewRequestWithContext(ctx, method, u.String(), buf) if err != nil { return nil, err } diff --git a/pkg/apiclient/decisions_service.go b/pkg/apiclient/decisions_service.go index fea2f39072d..0f3f4468c65 100644 --- a/pkg/apiclient/decisions_service.go +++ b/pkg/apiclient/decisions_service.go @@ -45,8 +45,8 @@ func (o *DecisionsStreamOpts) addQueryParamsToURL(url string) (string, error) { return "", err } - //Those 2 are a bit different - //They default to true, and we only want to include them if they are false + // Those 2 are a bit different + // They default to true, and we only want to include them if they are false if params.Get("community_pull") == "true" { params.Del("community_pull") @@ -81,7 +81,7 @@ func (s *DecisionsService) List(ctx context.Context, opts DecisionsListOpts) (*m u := fmt.Sprintf("%s/decisions?%s", s.client.URLPrefix, params.Encode()) - req, err := s.client.NewRequest(http.MethodGet, u, nil) + req, err := s.client.NewRequestWithContext(ctx, http.MethodGet, u, nil) if err != nil { return nil, nil, err } @@ -97,7 +97,7 @@ func (s *DecisionsService) List(ctx context.Context, opts DecisionsListOpts) (*m } func (s *DecisionsService) FetchV2Decisions(ctx context.Context, url string) (*models.DecisionsStreamResponse, *Response, error) { - req, err := s.client.NewRequest(http.MethodGet, url, nil) + req, err := s.client.NewRequestWithContext(ctx, http.MethodGet, url, nil) if err != nil { return nil, nil, err } @@ -138,7 +138,7 @@ func (s *DecisionsService) FetchV3Decisions(ctx context.Context, url string) (*m scenarioDeleted := "deleted" durationDeleted := "1h" - req, err := s.client.NewRequest(http.MethodGet, url, nil) + req, err := s.client.NewRequestWithContext(ctx, http.MethodGet, url, nil) if err != nil { return nil, nil, err } @@ -183,7 +183,7 @@ func (s *DecisionsService) GetDecisionsFromBlocklist(ctx context.Context, blockl client := http.Client{} - req, err := http.NewRequest(http.MethodGet, *blocklist.URL, nil) + req, err := http.NewRequestWithContext(ctx, http.MethodGet, *blocklist.URL, nil) if err != nil { return nil, false, err } @@ -192,7 +192,6 @@ func (s *DecisionsService) GetDecisionsFromBlocklist(ctx context.Context, blockl req.Header.Set("If-Modified-Since", *lastPullTimestamp) } - req = req.WithContext(ctx) log.Debugf("[URL] %s %s", req.Method, req.URL) // we don't use client_http Do method because we need the reader and is not provided. @@ -272,7 +271,7 @@ func (s *DecisionsService) GetStreamV3(ctx context.Context, opts DecisionsStream return nil, nil, err } - req, err := s.client.NewRequest(http.MethodGet, u, nil) + req, err := s.client.NewRequestWithContext(ctx, http.MethodGet, u, nil) if err != nil { return nil, nil, err } @@ -290,7 +289,7 @@ func (s *DecisionsService) GetStreamV3(ctx context.Context, opts DecisionsStream func (s *DecisionsService) StopStream(ctx context.Context) (*Response, error) { u := fmt.Sprintf("%s/decisions", s.client.URLPrefix) - req, err := s.client.NewRequest(http.MethodDelete, u, nil) + req, err := s.client.NewRequestWithContext(ctx, http.MethodDelete, u, nil) if err != nil { return nil, err } @@ -311,7 +310,7 @@ func (s *DecisionsService) Delete(ctx context.Context, opts DecisionsDeleteOpts) u := fmt.Sprintf("%s/decisions?%s", s.client.URLPrefix, params.Encode()) - req, err := s.client.NewRequest(http.MethodDelete, u, nil) + req, err := s.client.NewRequestWithContext(ctx, http.MethodDelete, u, nil) if err != nil { return nil, nil, err } @@ -329,7 +328,7 @@ func (s *DecisionsService) Delete(ctx context.Context, opts DecisionsDeleteOpts) func (s *DecisionsService) DeleteOne(ctx context.Context, decisionID string) (*models.DeleteDecisionResponse, *Response, error) { u := fmt.Sprintf("%s/decisions/%s", s.client.URLPrefix, decisionID) - req, err := s.client.NewRequest(http.MethodDelete, u, nil) + req, err := s.client.NewRequestWithContext(ctx, http.MethodDelete, u, nil) if err != nil { return nil, nil, err } diff --git a/pkg/apiclient/decisions_sync_service.go b/pkg/apiclient/decisions_sync_service.go index 25e33a8e29d..1efe2d7c756 100644 --- a/pkg/apiclient/decisions_sync_service.go +++ b/pkg/apiclient/decisions_sync_service.go @@ -16,7 +16,7 @@ type DecisionDeleteService service func (d *DecisionDeleteService) Add(ctx context.Context, deletedDecisions *models.DecisionsDeleteRequest) (interface{}, *Response, error) { u := fmt.Sprintf("%s/decisions/delete", d.client.URLPrefix) - req, err := d.client.NewRequest(http.MethodPost, u, &deletedDecisions) + req, err := d.client.NewRequestWithContext(ctx, http.MethodPost, u, &deletedDecisions) if err != nil { return nil, nil, fmt.Errorf("while building request: %w", err) } diff --git a/pkg/apiclient/heartbeat.go b/pkg/apiclient/heartbeat.go index c6b3d0832ba..7a5fdfd6cc4 100644 --- a/pkg/apiclient/heartbeat.go +++ b/pkg/apiclient/heartbeat.go @@ -17,7 +17,7 @@ type HeartBeatService service func (h *HeartBeatService) Ping(ctx context.Context) (bool, *Response, error) { u := fmt.Sprintf("%s/heartbeat", h.client.URLPrefix) - req, err := h.client.NewRequest(http.MethodGet, u, nil) + req, err := h.client.NewRequestWithContext(ctx, http.MethodGet, u, nil) if err != nil { return false, nil, err } @@ -33,7 +33,9 @@ func (h *HeartBeatService) Ping(ctx context.Context) (bool, *Response, error) { func (h *HeartBeatService) StartHeartBeat(ctx context.Context, t *tomb.Tomb) { t.Go(func() error { defer trace.CatchPanic("crowdsec/apiClient/heartbeat") + hbTimer := time.NewTicker(1 * time.Minute) + for { select { case <-hbTimer.C: @@ -46,6 +48,7 @@ func (h *HeartBeatService) StartHeartBeat(ctx context.Context, t *tomb.Tomb) { } resp.Response.Body.Close() + if resp.Response.StatusCode != http.StatusOK { log.Errorf("heartbeat unexpected return code: %d", resp.Response.StatusCode) continue @@ -58,6 +61,7 @@ func (h *HeartBeatService) StartHeartBeat(ctx context.Context, t *tomb.Tomb) { case <-t.Dying(): log.Debugf("heartbeat: stopping") hbTimer.Stop() + return nil } } diff --git a/pkg/apiclient/metrics.go b/pkg/apiclient/metrics.go index 7f8d095a2df..1ae073e47a3 100644 --- a/pkg/apiclient/metrics.go +++ b/pkg/apiclient/metrics.go @@ -13,7 +13,7 @@ type MetricsService service func (s *MetricsService) Add(ctx context.Context, metrics *models.Metrics) (interface{}, *Response, error) { u := fmt.Sprintf("%s/metrics/", s.client.URLPrefix) - req, err := s.client.NewRequest(http.MethodPost, u, &metrics) + req, err := s.client.NewRequestWithContext(ctx, http.MethodPost, u, &metrics) if err != nil { return nil, nil, err } diff --git a/pkg/apiclient/signal.go b/pkg/apiclient/signal.go index 613ce70bbfb..128af4e2566 100644 --- a/pkg/apiclient/signal.go +++ b/pkg/apiclient/signal.go @@ -15,7 +15,7 @@ type SignalService service func (s *SignalService) Add(ctx context.Context, signals *models.AddSignalsRequest) (interface{}, *Response, error) { u := fmt.Sprintf("%s/signals", s.client.URLPrefix) - req, err := s.client.NewRequest(http.MethodPost, u, &signals) + req, err := s.client.NewRequestWithContext(ctx, http.MethodPost, u, &signals) if err != nil { return nil, nil, fmt.Errorf("while building request: %w", err) } diff --git a/pkg/apiclient/usagemetrics.go b/pkg/apiclient/usagemetrics.go index 1d822bb5c1e..482987a7f7f 100644 --- a/pkg/apiclient/usagemetrics.go +++ b/pkg/apiclient/usagemetrics.go @@ -13,7 +13,7 @@ type UsageMetricsService service func (s *UsageMetricsService) Add(ctx context.Context, metrics *models.AllMetrics) (interface{}, *Response, error) { u := fmt.Sprintf("%s/usage-metrics", s.client.URLPrefix) - req, err := s.client.NewRequest(http.MethodPost, u, &metrics) + req, err := s.client.NewRequestWithContext(ctx, http.MethodPost, u, &metrics) if err != nil { return nil, nil, err } diff --git a/pkg/apiserver/middlewares/v1/ocsp.go b/pkg/apiserver/middlewares/v1/ocsp.go index 0b6406ad0e7..32b406430d9 100644 --- a/pkg/apiserver/middlewares/v1/ocsp.go +++ b/pkg/apiserver/middlewares/v1/ocsp.go @@ -2,6 +2,7 @@ package v1 import ( "bytes" + "context" "crypto" "crypto/x509" "io" @@ -22,14 +23,14 @@ func NewOCSPChecker(logger *log.Entry) *OCSPChecker { } } -func (oc *OCSPChecker) query(server string, cert *x509.Certificate, issuer *x509.Certificate) (*ocsp.Response, error) { +func (oc *OCSPChecker) query(ctx context.Context, server string, cert *x509.Certificate, issuer *x509.Certificate) (*ocsp.Response, error) { req, err := ocsp.CreateRequest(cert, issuer, &ocsp.RequestOptions{Hash: crypto.SHA256}) if err != nil { oc.logger.Errorf("TLSAuth: error creating OCSP request: %s", err) return nil, err } - httpRequest, err := http.NewRequest(http.MethodPost, server, bytes.NewBuffer(req)) + httpRequest, err := http.NewRequestWithContext(ctx, http.MethodPost, server, bytes.NewBuffer(req)) if err != nil { oc.logger.Error("TLSAuth: cannot create HTTP request for OCSP") return nil, err @@ -69,14 +70,14 @@ func (oc *OCSPChecker) query(server string, cert *x509.Certificate, issuer *x509 // isRevokedBy checks if the client certificate is revoked by the issuer via any of the OCSP servers present in the certificate. // It returns a boolean indicating if the certificate is revoked and a boolean indicating // if the OCSP check was successful and could be cached. -func (oc *OCSPChecker) isRevokedBy(cert *x509.Certificate, issuer *x509.Certificate) (bool, bool) { +func (oc *OCSPChecker) isRevokedBy(ctx context.Context, cert *x509.Certificate, issuer *x509.Certificate) (bool, bool) { if len(cert.OCSPServer) == 0 { oc.logger.Infof("TLSAuth: no OCSP Server present in client certificate, skipping OCSP verification") return false, true } for _, server := range cert.OCSPServer { - ocspResponse, err := oc.query(server, cert, issuer) + ocspResponse, err := oc.query(ctx, server, cert, issuer) if err != nil { oc.logger.Errorf("TLSAuth: error querying OCSP server %s: %s", server, err) continue diff --git a/pkg/apiserver/middlewares/v1/tls_auth.go b/pkg/apiserver/middlewares/v1/tls_auth.go index 673c8d0cdce..fadda5309fe 100644 --- a/pkg/apiserver/middlewares/v1/tls_auth.go +++ b/pkg/apiserver/middlewares/v1/tls_auth.go @@ -1,6 +1,7 @@ package v1 import ( + "context" "crypto/x509" "errors" "fmt" @@ -36,7 +37,7 @@ func (ta *TLSAuth) isExpired(cert *x509.Certificate) bool { } // checkRevocationPath checks a single chain against OCSP and CRL -func (ta *TLSAuth) checkRevocationPath(chain []*x509.Certificate) (error, bool) { //nolint:revive +func (ta *TLSAuth) checkRevocationPath(ctx context.Context, chain []*x509.Certificate) (error, bool) { //nolint:revive // if we ever fail to check OCSP or CRL, we should not cache the result couldCheck := true @@ -46,7 +47,7 @@ func (ta *TLSAuth) checkRevocationPath(chain []*x509.Certificate) (error, bool) cert := chain[i-1] issuer := chain[i] - revokedByOCSP, checkedByOCSP := ta.ocspChecker.isRevokedBy(cert, issuer) + revokedByOCSP, checkedByOCSP := ta.ocspChecker.isRevokedBy(ctx, cert, issuer) couldCheck = couldCheck && checkedByOCSP if revokedByOCSP && checkedByOCSP { @@ -130,12 +131,13 @@ func (ta *TLSAuth) ValidateCert(c *gin.Context) (string, error) { okToCache := true - var validErr error - - var couldCheck bool + var ( + validErr error + couldCheck bool + ) for _, chain := range c.Request.TLS.VerifiedChains { - validErr, couldCheck = ta.checkRevocationPath(chain) + validErr, couldCheck = ta.checkRevocationPath(c.Request.Context(), chain) okToCache = okToCache && couldCheck if validErr != nil { diff --git a/pkg/cticlient/client.go b/pkg/cticlient/client.go index 90112d80abf..b15766dd99c 100644 --- a/pkg/cticlient/client.go +++ b/pkg/cticlient/client.go @@ -1,6 +1,7 @@ package cticlient import ( + "context" "encoding/json" "errors" "fmt" @@ -33,7 +34,7 @@ type CrowdsecCTIClient struct { Logger *log.Entry } -func (c *CrowdsecCTIClient) doRequest(method string, endpoint string, params map[string]string) ([]byte, error) { +func (c *CrowdsecCTIClient) doRequest(ctx context.Context, method string, endpoint string, params map[string]string) ([]byte, error) { url := CTIBaseUrl + endpoint if len(params) > 0 { url += "?" @@ -41,7 +42,8 @@ func (c *CrowdsecCTIClient) doRequest(method string, endpoint string, params map url += fmt.Sprintf("%s=%s&", k, v) } } - req, err := http.NewRequest(method, url, nil) + + req, err := http.NewRequestWithContext(ctx, method, url, nil) if err != nil { return nil, err } @@ -53,78 +55,103 @@ func (c *CrowdsecCTIClient) doRequest(method string, endpoint string, params map if err != nil { return nil, err } + defer resp.Body.Close() + if resp.StatusCode != http.StatusOK { if resp.StatusCode == http.StatusForbidden { return nil, ErrUnauthorized } + if resp.StatusCode == http.StatusTooManyRequests { return nil, ErrLimit } + if resp.StatusCode == http.StatusNotFound { return nil, ErrNotFound } + return nil, fmt.Errorf("unexpected http code : %s", resp.Status) } + respBody, err := io.ReadAll(resp.Body) if err != nil { return nil, err } + return respBody, nil } func (c *CrowdsecCTIClient) GetIPInfo(ip string) (*SmokeItem, error) { - body, err := c.doRequest(http.MethodGet, smokeEndpoint+"/"+ip, nil) + ctx := context.TODO() + + body, err := c.doRequest(ctx, http.MethodGet, smokeEndpoint+"/"+ip, nil) if err != nil { if errors.Is(err, ErrNotFound) { return &SmokeItem{}, nil } + return nil, err } + item := SmokeItem{} + err = json.Unmarshal(body, &item) if err != nil { return nil, err } + return &item, nil } func (c *CrowdsecCTIClient) SearchIPs(ips []string) (*SearchIPResponse, error) { + ctx := context.TODO() params := make(map[string]string) params["ips"] = strings.Join(ips, ",") - body, err := c.doRequest(http.MethodGet, smokeEndpoint, params) + + body, err := c.doRequest(ctx, http.MethodGet, smokeEndpoint, params) if err != nil { return nil, err } + searchIPResponse := SearchIPResponse{} + err = json.Unmarshal(body, &searchIPResponse) if err != nil { return nil, err } + return &searchIPResponse, nil } func (c *CrowdsecCTIClient) Fire(params FireParams) (*FireResponse, error) { + ctx := context.TODO() paramsMap := make(map[string]string) + if params.Page != nil { paramsMap["page"] = fmt.Sprintf("%d", *params.Page) } + if params.Since != nil { paramsMap["since"] = *params.Since } + if params.Limit != nil { paramsMap["limit"] = fmt.Sprintf("%d", *params.Limit) } - body, err := c.doRequest(http.MethodGet, fireEndpoint, paramsMap) + body, err := c.doRequest(ctx, http.MethodGet, fireEndpoint, paramsMap) if err != nil { return nil, err } + fireResponse := FireResponse{} + err = json.Unmarshal(body, &fireResponse) if err != nil { return nil, err } + return &fireResponse, nil } @@ -133,13 +160,16 @@ func NewCrowdsecCTIClient(options ...func(*CrowdsecCTIClient)) *CrowdsecCTIClien for _, option := range options { option(client) } + if client.httpClient == nil { client.httpClient = &http.Client{} } - // we cannot return with a ni logger, so we set a default one + + // we cannot return with a nil logger, so we set a default one if client.Logger == nil { client.Logger = log.NewEntry(log.New()) } + return client } From ce5b4b435bde65a8d4f20a19c672ae2d3281557f Mon Sep 17 00:00:00 2001 From: blotus Date: Mon, 24 Feb 2025 15:20:33 +0100 Subject: [PATCH 444/581] add JA4H expr helper (#3401) --- pkg/alertcontext/alertcontext_test.go | 36 +++ pkg/appsec/ja4h/ja4h.go | 185 ++++++++++++++++ pkg/appsec/ja4h/ja4h_test.go | 306 ++++++++++++++++++++++++++ pkg/appsec/request.go | 36 ++- pkg/exprhelpers/expr_lib.go | 8 + pkg/exprhelpers/waf.go | 13 ++ pkg/exprhelpers/waf_test.go | 78 +++++++ 7 files changed, 656 insertions(+), 6 deletions(-) create mode 100644 pkg/appsec/ja4h/ja4h.go create mode 100644 pkg/appsec/ja4h/ja4h_test.go create mode 100644 pkg/exprhelpers/waf.go create mode 100644 pkg/exprhelpers/waf_test.go diff --git a/pkg/alertcontext/alertcontext_test.go b/pkg/alertcontext/alertcontext_test.go index 9d9373bcd36..c0955afcbec 100644 --- a/pkg/alertcontext/alertcontext_test.go +++ b/pkg/alertcontext/alertcontext_test.go @@ -347,6 +347,42 @@ func TestAppsecEventToContext(t *testing.T) { }, expectedErrLen: 0, }, + { + name: "test JA4H - appsec event", + contextToSend: map[string][]string{ + "ja4h": {"JA4H(req)"}, + }, + match: types.AppsecEvent{ + MatchedRules: types.MatchedRules{ + { + "id": "test", + }, + }, + }, + req: &http.Request{ + Header: map[string][]string{ + "User-Agent": {"test"}, + "Foobar": {"test1", "test2"}, + }, + ProtoMajor: 1, + ProtoMinor: 1, + Method: http.MethodGet, + }, + expectedResult: []*models.MetaItems0{ + { + Key: "ja4h", + Value: "[\"ge11nn020000_3a31a0f8fbf9_000000000000_000000000000\"]", + }, + }, + }, + { + name: "test JA4H - no appsec event", + contextToSend: map[string][]string{ + "ja4h": {"JA4H(req)"}, + }, + req: nil, + expectedResult: []*models.MetaItems0{}, + }, } for _, test := range tests { diff --git a/pkg/appsec/ja4h/ja4h.go b/pkg/appsec/ja4h/ja4h.go new file mode 100644 index 00000000000..62d89d5548b --- /dev/null +++ b/pkg/appsec/ja4h/ja4h.go @@ -0,0 +1,185 @@ +package ja4h + +import ( + "crypto/sha256" + "fmt" + "net/http" + "slices" + "sort" + "strings" +) + +// see: https://github.com/FoxIO-LLC/ja4/blob/main/technical_details/JA4H.png +// [JA4H_a]_[JA4H_b]_[JA4H_c]_[JA4H_d] + +// JA4H_a: +// [httpMethod] [httpVersion] [hasCookie] [hasReferer] [countHeaders] [primaryLanguage] +// 2 2 1 1 2 4 12 + +// JA4H_b: [headers hash] + +// JA4H_c: [cookie name hash] + +const ( + truncatedHashLength = 12 + ja4hFullHashLength = 51 + ja4hSubHashLength = 12 + defaultLang = "0000" + emptyCookiesHash = "000000000000" +) + +// httpMethod extracts the first two lowercase characters of the HTTP method. +func httpMethod(method string) string { + l := min(len(method), 2) + return strings.ToLower(method[:l]) +} + +// httpVersion extracts the version number from the HTTP protocol. +// The version is truncated to one digit each, but I believe the http server will control this anyway. +func httpVersion(major int, minor int) string { + return fmt.Sprintf("%d%d", major%10, minor%10) +} + +// hasCookie checks if the request has any cookies. +func hasCookie(req *http.Request) string { + if len(req.Cookies()) > 0 { + return "c" + } + return "n" +} + +// hasReferer checks if the Referer header is set. +func hasReferer(referer string) string { + if referer != "" { + return "r" + } + return "n" +} + +// countHeaders counts the headers, excluding specific ones like Cookie and Referer. +func countHeaders(headers http.Header) string { + count := len(headers) + if headers.Get("Cookie") != "" { + count-- + } + if headers.Get("Referer") != "" { + count-- + } + //header len needs to be on 2 chars: 3 -> 03 // 100 -> 99 + return fmt.Sprintf("%02d", min(count, 99)) +} + +// primaryLanguage extracts the first four characters of the primary Accept-Language header. +func primaryLanguage(headers http.Header) string { + lang := strings.ToLower(headers.Get("Accept-Language")) + if lang == "" { + return defaultLang + } + //cf. https://github.com/FoxIO-LLC/ja4/blob/main/python/ja4h.py#L13 + lang = strings.ReplaceAll(lang, "-", "") + lang = strings.ReplaceAll(lang, ";", ",") + + value := strings.Split(lang, ",")[0] + value = value[:min(len(value), 4)] + return value + strings.Repeat("0", 4-len(value)) +} + +// jA4H_a generates a summary fingerprint for the HTTP request. +func jA4H_a(req *http.Request) string { + var builder strings.Builder + + builder.Grow(ja4hSubHashLength) + builder.WriteString(httpMethod(req.Method)) + builder.WriteString(httpVersion(req.ProtoMajor, req.ProtoMinor)) + builder.WriteString(hasCookie(req)) + builder.WriteString(hasReferer(req.Referer())) + builder.WriteString(countHeaders(req.Header)) + builder.WriteString(primaryLanguage(req.Header)) + return builder.String() +} + +// jA4H_b computes a truncated SHA256 hash of sorted header names. +func jA4H_b(req *http.Request) string { + + // The reference implementation (https://github.com/FoxIO-LLC/ja4/blob/main/python/ja4h.py#L27) + // discards referer and headers **starting with "cookie"** + // If there's no headers, it hashes the empty string, instead of returning 0s + // like what is done for cookies. Not sure if it's intended or an oversight in the spec. + headers := make([]string, 0, len(req.Header)) + for name := range req.Header { + if strings.HasPrefix(strings.ToLower(name), "cookie") || strings.ToLower(name) == "referer" { + continue + } + headers = append(headers, name) + } + sort.Strings(headers) + + return hashTruncated(strings.Join(headers, ",")) +} + +// hashTruncated computes a truncated SHA256 hash for the given input. +func hashTruncated(input string) string { + hash := sha256.Sum256([]byte(input)) + return fmt.Sprintf("%x", hash)[:truncatedHashLength] +} + +// jA4H_c computes a truncated SHA256 hash of sorted cookie names. +func jA4H_c(cookies []*http.Cookie) string { + if len(cookies) == 0 { + return emptyCookiesHash + } + var builder strings.Builder + for i, cookie := range cookies { + builder.WriteString(cookie.Name) + if i < len(cookies)-1 { + builder.WriteString(",") + } + } + return hashTruncated(builder.String()) +} + +// jA4H_d computes a truncated SHA256 hash of cookie name-value pairs. +func jA4H_d(cookies []*http.Cookie) string { + if len(cookies) == 0 { + return emptyCookiesHash + } + var builder strings.Builder + for i, cookie := range cookies { + builder.WriteString(cookie.Name) + builder.WriteString("=") + builder.WriteString(cookie.Value) + if i < len(cookies)-1 { + builder.WriteString(",") + } + } + return hashTruncated(builder.String()) +} + +// JA4H computes the complete HTTP client fingerprint based on the request. +func JA4H(req *http.Request) string { + JA4H_a := jA4H_a(req) + JA4H_b := jA4H_b(req) + + cookies := req.Cookies() + + slices.SortFunc(cookies, func(a, b *http.Cookie) int { + return strings.Compare(a.Name, b.Name) + }) + + JA4H_c := jA4H_c(cookies) + JA4H_d := jA4H_d(cookies) + + var builder strings.Builder + + //JA4H is a fixed size, allocated it all at once + builder.Grow(ja4hFullHashLength) + builder.WriteString(JA4H_a) + builder.WriteString("_") + builder.WriteString(JA4H_b) + builder.WriteString("_") + builder.WriteString(JA4H_c) + builder.WriteString("_") + builder.WriteString(JA4H_d) + + return builder.String() +} diff --git a/pkg/appsec/ja4h/ja4h_test.go b/pkg/appsec/ja4h/ja4h_test.go new file mode 100644 index 00000000000..76d265d8a42 --- /dev/null +++ b/pkg/appsec/ja4h/ja4h_test.go @@ -0,0 +1,306 @@ +package ja4h + +import ( + "net/http" + "slices" + "strings" + "testing" +) + +/* + +The various hashes used comes from the python reference implementation: https://github.com/FoxIO-LLC/ja4/tree/main/python + +They are generated by: + - running a packet capture locally: sudo tshark -i lo -f "port 80" -w /tmp/foo.pcapng + - make a curl request: curl -b foo=bar -b baz=qux localhost + - generate the hash with the reference implementation: python ja4.py /tmp/foo.pcapng -r + +For the JA4H_B hash, the value we use is *not* the one returned by the reference implementation, as we cannot know the order of the headers. +For those hashes, the value used was the one returned by our code (because we deviate from the spec, as long as we are consistent, it's fine). +*/ + +func TestJA4H_A(t *testing.T) { + tests := []struct { + name string + request func() *http.Request + expectedResult string + }{ + { + name: "basic GET request - HTTP1.1 - no accept-language header", + request: func() *http.Request { + req, _ := http.NewRequest(http.MethodGet, "http://example.com", nil) + return req + }, + expectedResult: "ge11nn000000", + }, + { + name: "basic GET request - HTTP1.1 - with accept-language header", + request: func() *http.Request { + req, _ := http.NewRequest(http.MethodGet, "http://example.com", nil) + req.Header.Set("Accept-Language", "en-US") + return req + }, + expectedResult: "ge11nn01enus", + }, + { + name: "basic POST request - HTTP1.1 - no accept-language header - cookies - referer", + request: func() *http.Request { + req, _ := http.NewRequest(http.MethodPost, "http://example.com", nil) + req.AddCookie(&http.Cookie{Name: "foo", Value: "bar"}) + req.Header.Set("Referer", "http://example.com") + return req + }, + expectedResult: "po11cr000000", + }, + { + name: "bad accept-language header", + request: func() *http.Request { + req, _ := http.NewRequest(http.MethodGet, "http://example.com", nil) + req.Header.Set("Accept-Language", "aksjdhaslkdhalkjsd") + return req + }, + expectedResult: "ge11nn01aksj", + }, + { + name: "bad accept-language header 2", + request: func() *http.Request { + req, _ := http.NewRequest(http.MethodGet, "http://example.com", nil) + req.Header.Set("Accept-Language", ",") + return req + }, + expectedResult: "ge11nn010000", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := jA4H_a(tt.request()) + if result != tt.expectedResult { + t.Errorf("expected %s, got %s", tt.expectedResult, result) + } + }) + } +} + +func TestJA4H_B(t *testing.T) { + // This test is only for non-regression + // Because go does not keep headers order, we just want to make sure our code always process the headers in the same order + tests := []struct { + name string + request func() *http.Request + expectedResult string + }{ + { + name: "no headers", + request: func() *http.Request { + req, _ := http.NewRequest(http.MethodGet, "http://example.com", nil) + return req + }, + expectedResult: "e3b0c44298fc", + }, + { + name: "header with arbitrary content", + request: func() *http.Request { + req, _ := http.NewRequest(http.MethodGet, "http://example.com", nil) + req.Header.Set("X-Custom-Header", "some value") + return req + }, + expectedResult: "0a15aba5bbd6", + }, + { + name: "header with multiple headers", + request: func() *http.Request { + req, _ := http.NewRequest(http.MethodGet, "http://example.com", nil) + req.Header.Set("X-Custom-Header", "some value") + req.Header.Set("Authorization", "Bearer token") + return req + }, + expectedResult: "bbfc6cf16ecb", + }, + { + name: "curl-like request", + request: func() *http.Request { + req, _ := http.NewRequest(http.MethodGet, "http://localhost", nil) + req.Header.Set("Host", "localhost") + req.Header.Set("User-Agent", "curl/8.12.1") + req.Header.Set("Accept", "*/*") + return req + }, + expectedResult: "4722709a6f34", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := jA4H_b(tt.request()) + if result != tt.expectedResult { + t.Errorf("expected %s, got %s", tt.expectedResult, result) + } + }) + } +} + +func TestJA4H_C(t *testing.T) { + tests := []struct { + name string + cookies func() []*http.Cookie + expectedResult string + }{ + { + name: "no cookies", + cookies: func() []*http.Cookie { + req, _ := http.NewRequest(http.MethodGet, "http://example.com", nil) + return req.Cookies() + }, + expectedResult: "000000000000", + }, + { + name: "one cookie", + cookies: func() []*http.Cookie { + req, _ := http.NewRequest(http.MethodGet, "http://example.com", nil) + req.AddCookie(&http.Cookie{Name: "foo", Value: "bar"}) + return req.Cookies() + }, + expectedResult: "2c26b46b68ff", + }, + { + name: "duplicate cookies", + cookies: func() []*http.Cookie { + req, _ := http.NewRequest(http.MethodGet, "http://example.com", nil) + req.AddCookie(&http.Cookie{Name: "foo", Value: "bar"}) + req.AddCookie(&http.Cookie{Name: "foo", Value: "bar2"}) + return req.Cookies() + }, + expectedResult: "8990ce24137b", + }, + { + name: "multiple cookies", + cookies: func() []*http.Cookie { + req, _ := http.NewRequest(http.MethodGet, "http://example.com", nil) + req.AddCookie(&http.Cookie{Name: "foo", Value: "bar"}) + req.AddCookie(&http.Cookie{Name: "bar", Value: "foo"}) + cookies := req.Cookies() + slices.SortFunc(cookies, func(a, b *http.Cookie) int { + return strings.Compare(a.Name, b.Name) + }) + return cookies + }, + expectedResult: "41557db67d60", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := jA4H_c(tt.cookies()) + if result != tt.expectedResult { + t.Errorf("expected %s, got %s", tt.expectedResult, result) + } + }) + } +} + +func TestJA4H_D(t *testing.T) { + tests := []struct { + name string + cookies func() []*http.Cookie + expectedResult string + }{ + { + name: "no cookies", + cookies: func() []*http.Cookie { + req, _ := http.NewRequest(http.MethodGet, "http://example.com", nil) + return req.Cookies() + }, + expectedResult: "000000000000", + }, + { + name: "one cookie", + cookies: func() []*http.Cookie { + req, _ := http.NewRequest(http.MethodGet, "http://example.com", nil) + req.AddCookie(&http.Cookie{Name: "foo", Value: "bar"}) + return req.Cookies() + }, + expectedResult: "3ba8907e7a25", + }, + { + name: "duplicate cookies", + cookies: func() []*http.Cookie { + req, _ := http.NewRequest(http.MethodGet, "http://example.com", nil) + req.AddCookie(&http.Cookie{Name: "foo", Value: "bar"}) + req.AddCookie(&http.Cookie{Name: "foo", Value: "bar2"}) + return req.Cookies() + }, + expectedResult: "975821a3a881", + }, + { + name: "multiple cookies", + cookies: func() []*http.Cookie { + req, _ := http.NewRequest(http.MethodGet, "http://example.com", nil) + req.AddCookie(&http.Cookie{Name: "foo", Value: "bar"}) + req.AddCookie(&http.Cookie{Name: "bar", Value: "foo"}) + cookies := req.Cookies() + slices.SortFunc(cookies, func(a, b *http.Cookie) int { + return strings.Compare(a.Name, b.Name) + }) + return cookies + }, + expectedResult: "70f8bee1efb8", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := jA4H_d(tt.cookies()) + if result != tt.expectedResult { + t.Errorf("expected %s, got %s", tt.expectedResult, result) + } + }) + } +} + +func TestJA4H(t *testing.T) { + tests := []struct { + name string + req func() *http.Request + expectedHash string + }{ + { + name: "Basic GET - No cookies", + req: func() *http.Request { + req, _ := http.NewRequest(http.MethodGet, "http://example.com", nil) + return req + }, + expectedHash: "ge11nn000000_e3b0c44298fc_000000000000_000000000000", + }, + { + name: "Basic GET - With cookies", + req: func() *http.Request { + req, _ := http.NewRequest(http.MethodGet, "http://example.com", nil) + req.AddCookie(&http.Cookie{Name: "session", Value: "12345"}) + return req + }, + expectedHash: "ge11cn000000_e3b0c44298fc_3f3af1ecebbd_86a3f0069fcd", + }, + { + name: "Basic GET - Multiple cookies", + req: func() *http.Request { + req, _ := http.NewRequest(http.MethodGet, "http://example.com", nil) + req.AddCookie(&http.Cookie{Name: "foo", Value: "bar"}) + req.AddCookie(&http.Cookie{Name: "baz", Value: "qux"}) + return req + }, + expectedHash: "ge11cn000000_e3b0c44298fc_bd87575d11f6_d401f362552e", + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + hash := JA4H(test.req()) + if hash != test.expectedHash { + t.Errorf("expected %s, got %s", test.expectedHash, hash) + } + }) + } + +} diff --git a/pkg/appsec/request.go b/pkg/appsec/request.go index ccd7a9f9cc8..66ca13d55bb 100644 --- a/pkg/appsec/request.go +++ b/pkg/appsec/request.go @@ -16,12 +16,13 @@ import ( ) const ( - URIHeaderName = "X-Crowdsec-Appsec-Uri" - VerbHeaderName = "X-Crowdsec-Appsec-Verb" - HostHeaderName = "X-Crowdsec-Appsec-Host" - IPHeaderName = "X-Crowdsec-Appsec-Ip" - APIKeyHeaderName = "X-Crowdsec-Appsec-Api-Key" - UserAgentHeaderName = "X-Crowdsec-Appsec-User-Agent" + URIHeaderName = "X-Crowdsec-Appsec-Uri" + VerbHeaderName = "X-Crowdsec-Appsec-Verb" + HostHeaderName = "X-Crowdsec-Appsec-Host" + IPHeaderName = "X-Crowdsec-Appsec-Ip" + APIKeyHeaderName = "X-Crowdsec-Appsec-Api-Key" + UserAgentHeaderName = "X-Crowdsec-Appsec-User-Agent" + HTTPVersionHeaderName = "X-Crowdsec-Appsec-Http-Version" ) type ParsedRequest struct { @@ -313,6 +314,28 @@ func NewParsedRequestFromRequest(r *http.Request, logger *log.Entry) (ParsedRequ userAgent := r.Header.Get(UserAgentHeaderName) //This one is optional + httpVersion := r.Header.Get(HTTPVersionHeaderName) + if httpVersion == "" { + logger.Debugf("missing '%s' header", HTTPVersionHeaderName) + } + + if httpVersion != "" && len(httpVersion) == 2 && + httpVersion[0] >= '0' && httpVersion[0] <= '9' && + httpVersion[1] >= '0' && httpVersion[1] <= '9' { + major := httpVersion[0] + minor := httpVersion[1] + + r.ProtoMajor = int(major - '0') + r.ProtoMinor = int(minor - '0') + if r.ProtoMajor == 2 && r.ProtoMinor == 0 { + r.Proto = "HTTP/2" + } else { + r.Proto = "HTTP/" + string(major) + "." + string(minor) + } + } else { + logger.Warnf("Invalid value %s for HTTP version header", httpVersion) + } + // delete those headers before coraza process the request delete(r.Header, IPHeaderName) delete(r.Header, HostHeaderName) @@ -320,6 +343,7 @@ func NewParsedRequestFromRequest(r *http.Request, logger *log.Entry) (ParsedRequ delete(r.Header, VerbHeaderName) delete(r.Header, UserAgentHeaderName) delete(r.Header, APIKeyHeaderName) + delete(r.Header, HTTPVersionHeaderName) originalHTTPRequest := r.Clone(r.Context()) originalHTTPRequest.Body = io.NopCloser(bytes.NewBuffer(body)) diff --git a/pkg/exprhelpers/expr_lib.go b/pkg/exprhelpers/expr_lib.go index b90c1986153..8c49bdc5f80 100644 --- a/pkg/exprhelpers/expr_lib.go +++ b/pkg/exprhelpers/expr_lib.go @@ -2,6 +2,7 @@ package exprhelpers import ( "net" + "net/http" "time" "github.com/oschwald/geoip2-golang" @@ -493,6 +494,13 @@ var exprFuncs = []exprCustomFunc{ new(func(string) *net.IPNet), }, }, + { + name: "JA4H", + function: JA4H, + signature: []interface{}{ + new(func(*http.Request) string), + }, + }, } //go 1.20 "CutPrefix": strings.CutPrefix, diff --git a/pkg/exprhelpers/waf.go b/pkg/exprhelpers/waf.go new file mode 100644 index 00000000000..0648f7ffcf3 --- /dev/null +++ b/pkg/exprhelpers/waf.go @@ -0,0 +1,13 @@ +package exprhelpers + +import ( + "net/http" + + "github.com/crowdsecurity/crowdsec/pkg/appsec/ja4h" +) + +// JA4H(req *http.Request) string +func JA4H(params ...any) (any, error) { + req := params[0].(*http.Request) + return ja4h.JA4H(req), nil +} diff --git a/pkg/exprhelpers/waf_test.go b/pkg/exprhelpers/waf_test.go new file mode 100644 index 00000000000..594488fb785 --- /dev/null +++ b/pkg/exprhelpers/waf_test.go @@ -0,0 +1,78 @@ +package exprhelpers + +import ( + "net/http" + "testing" + + "github.com/stretchr/testify/require" +) + +func TestJA4H(t *testing.T) { + + tests := []struct { + name string + method string + url string + cookies map[string]string + headers map[string]string + expectedHash string + }{ + { + name: "Basic GET - No cookies", + method: "GET", + url: "http://example.com", + cookies: map[string]string{}, + headers: map[string]string{}, + expectedHash: "ge11nn000000_e3b0c44298fc_000000000000_000000000000", + }, + { + name: "Basic POST - No cookies", + method: "POST", + url: "http://example.com", + cookies: map[string]string{}, + headers: map[string]string{}, + expectedHash: "po11nn000000_e3b0c44298fc_000000000000_000000000000", + }, + { + name: "GET - With cookies", + method: "GET", + url: "http://example.com/foobar", + cookies: map[string]string{ + "foo": "bar", + "baz": "qux", + }, + headers: map[string]string{ + "User-Agent": "Mozilla/5.0", + }, + expectedHash: "ge11cn010000_b8bcd45ac095_bd87575d11f6_d401f362552e", + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + req, err := http.NewRequest(test.method, test.url, nil) + if err != nil { + t.Fatalf("Failed to create request: %s", err) + } + + for key, value := range test.cookies { + req.AddCookie(&http.Cookie{ + Name: key, + Value: value, + }) + } + + for key, value := range test.headers { + req.Header.Add(key, value) + } + + hash, err := JA4H(req) + require.NoError(t, err) + + if hash != test.expectedHash { + t.Fatalf("JA4H returned unexpected hash: %s", hash) + } + }) + } + +} From c161eb270bfcbf130c38bb76a516151ef5208229 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Tue, 25 Feb 2025 10:09:29 +0100 Subject: [PATCH 445/581] pkg/cwhub: refact Item.State.(Downloaded | Installed) (#3476) --- cmd/crowdsec-cli/clihub/items.go | 2 +- cmd/crowdsec-cli/cliitem/cmdinspect.go | 6 +- cmd/crowdsec-cli/cliitem/cmdremove.go | 2 +- cmd/crowdsec-cli/clisimulation/simulation.go | 2 +- pkg/appsec/appsec.go | 72 ++++++++++++++++++-- pkg/cwhub/fetch.go | 10 ++- pkg/cwhub/hub.go | 22 ++---- pkg/cwhub/item.go | 44 +++++++----- pkg/cwhub/state.go | 32 +++++---- pkg/cwhub/state_test.go | 42 ++++++------ pkg/cwhub/sync.go | 33 ++++----- pkg/hubops/disable.go | 16 ++--- pkg/hubops/download.go | 11 ++- pkg/hubops/enable.go | 14 ++-- pkg/hubops/purge.go | 12 ++-- 15 files changed, 189 insertions(+), 131 deletions(-) diff --git a/cmd/crowdsec-cli/clihub/items.go b/cmd/crowdsec-cli/clihub/items.go index 87cb10b1f93..483691fc5f9 100644 --- a/cmd/crowdsec-cli/clihub/items.go +++ b/cmd/crowdsec-cli/clihub/items.go @@ -43,7 +43,7 @@ func SelectItems(hub *cwhub.Hub, itemType string, args []string, installedOnly b for _, itemName := range itemNames { item := hub.GetItem(itemType, itemName) - if installedOnly && !item.State.Installed { + if installedOnly && !item.State.IsInstalled() { continue } diff --git a/cmd/crowdsec-cli/cliitem/cmdinspect.go b/cmd/crowdsec-cli/cliitem/cmdinspect.go index b5ee0816d72..25906c30c7a 100644 --- a/cmd/crowdsec-cli/cliitem/cmdinspect.go +++ b/cmd/crowdsec-cli/cliitem/cmdinspect.go @@ -71,7 +71,7 @@ func (cli cliItem) inspect(ctx context.Context, args []string, url string, diff // return the diff between the installed version and the latest version func (cli cliItem) itemDiff(ctx context.Context, item *cwhub.Item, contentProvider cwhub.ContentProvider, reverse bool) (string, error) { - if !item.State.Installed { + if !item.State.IsInstalled() { return "", fmt.Errorf("'%s' is not installed", item.FQName()) } @@ -113,7 +113,7 @@ func (cli cliItem) itemDiff(ctx context.Context, item *cwhub.Item, contentProvid } func (cli cliItem) whyTainted(ctx context.Context, hub *cwhub.Hub, contentProvider cwhub.ContentProvider, item *cwhub.Item, reverse bool) string { - if !item.State.Installed { + if !item.State.IsInstalled() { return fmt.Sprintf("# %s is not installed", item.FQName()) } @@ -203,7 +203,7 @@ func inspectItem(hub *cwhub.Hub, item *cwhub.Item, wantMetrics bool, output stri enc.SetIndent(2) if err := enc.Encode(item); err != nil { - return fmt.Errorf("unable to encode item: %w", err) + return fmt.Errorf("unable to serialize item: %w", err) } case "json": b, err := json.MarshalIndent(*item, "", " ") diff --git a/cmd/crowdsec-cli/cliitem/cmdremove.go b/cmd/crowdsec-cli/cliitem/cmdremove.go index 42f72f25ca9..506599f3efd 100644 --- a/cmd/crowdsec-cli/cliitem/cmdremove.go +++ b/cmd/crowdsec-cli/cliitem/cmdremove.go @@ -77,7 +77,7 @@ func installedParentNames(item *cwhub.Item) []string { ret := make([]string, 0) for _, parent := range item.Ancestors() { - if parent.State.Installed { + if parent.State.IsInstalled() { ret = append(ret, parent.Name) } } diff --git a/cmd/crowdsec-cli/clisimulation/simulation.go b/cmd/crowdsec-cli/clisimulation/simulation.go index 1b46c70c90a..1189f3f4ba3 100644 --- a/cmd/crowdsec-cli/clisimulation/simulation.go +++ b/cmd/crowdsec-cli/clisimulation/simulation.go @@ -83,7 +83,7 @@ func (cli *cliSimulation) newEnableCmd() *cobra.Command { log.Errorf("'%s' doesn't exist or is not a scenario", scenario) continue } - if !item.State.Installed { + if !item.State.IsInstalled() { log.Warningf("'%s' isn't enabled", scenario) } isExcluded := slices.Contains(cli.cfg().Cscli.SimulationConfig.Exclusions, scenario) diff --git a/pkg/appsec/appsec.go b/pkg/appsec/appsec.go index 5f01f76d993..feb84af109e 100644 --- a/pkg/appsec/appsec.go +++ b/pkg/appsec/appsec.go @@ -40,6 +40,7 @@ const ( func (h *Hook) Build(hookStage int) error { ctx := map[string]interface{}{} + switch hookStage { case hookOnLoad: ctx = GetOnLoadEnv(&AppsecRuntimeConfig{}) @@ -50,21 +51,26 @@ func (h *Hook) Build(hookStage int) error { case hookOnMatch: ctx = GetOnMatchEnv(&AppsecRuntimeConfig{}, &ParsedRequest{}, types.Event{}) } + opts := exprhelpers.GetExprOptions(ctx) if h.Filter != "" { program, err := expr.Compile(h.Filter, opts...) // FIXME: opts if err != nil { return fmt.Errorf("unable to compile filter %s : %w", h.Filter, err) } + h.FilterExpr = program } + for _, apply := range h.Apply { program, err := expr.Compile(apply, opts...) if err != nil { return fmt.Errorf("unable to compile apply %s : %w", apply, err) } + h.ApplyExpr = append(h.ApplyExpr, program) } + return nil } @@ -165,45 +171,51 @@ func (wc *AppsecConfig) LoadByPath(file string) error { yamlFile, err := os.ReadFile(file) if err != nil { - return fmt.Errorf("unable to read file %s : %s", file, err) + return fmt.Errorf("unable to read file %s : %w", file, err) } - //as LoadByPath can be called several time, we append rules/hooks, but override other options + // as LoadByPath can be called several time, we append rules/hooks, but override other options var tmp AppsecConfig err = yaml.UnmarshalStrict(yamlFile, &tmp) if err != nil { - return fmt.Errorf("unable to parse yaml file %s : %s", file, err) + return fmt.Errorf("unable to parse yaml file %s : %w", file, err) } if wc.Name == "" && tmp.Name != "" { wc.Name = tmp.Name } - //We can append rules/hooks + // We can append rules/hooks if tmp.OutOfBandRules != nil { wc.OutOfBandRules = append(wc.OutOfBandRules, tmp.OutOfBandRules...) } + if tmp.InBandRules != nil { wc.InBandRules = append(wc.InBandRules, tmp.InBandRules...) } + if tmp.OnLoad != nil { wc.OnLoad = append(wc.OnLoad, tmp.OnLoad...) } + if tmp.PreEval != nil { wc.PreEval = append(wc.PreEval, tmp.PreEval...) } + if tmp.PostEval != nil { wc.PostEval = append(wc.PostEval, tmp.PostEval...) } + if tmp.OnMatch != nil { wc.OnMatch = append(wc.OnMatch, tmp.OnMatch...) } + if tmp.VariablesTracking != nil { wc.VariablesTracking = append(wc.VariablesTracking, tmp.VariablesTracking...) } - //override other options + // override other options wc.LogLevel = tmp.LogLevel wc.DefaultRemediation = tmp.DefaultRemediation @@ -216,12 +228,15 @@ func (wc *AppsecConfig) LoadByPath(file string) error { if tmp.InbandOptions.DisableBodyInspection { wc.InbandOptions.DisableBodyInspection = true } + if tmp.InbandOptions.RequestBodyInMemoryLimit != nil { wc.InbandOptions.RequestBodyInMemoryLimit = tmp.InbandOptions.RequestBodyInMemoryLimit } + if tmp.OutOfBandOptions.DisableBodyInspection { wc.OutOfBandOptions.DisableBodyInspection = true } + if tmp.OutOfBandOptions.RequestBodyInMemoryLimit != nil { wc.OutOfBandOptions.RequestBodyInMemoryLimit = tmp.OutOfBandOptions.RequestBodyInMemoryLimit } @@ -232,12 +247,14 @@ func (wc *AppsecConfig) LoadByPath(file string) error { func (wc *AppsecConfig) Load(configName string) error { item := hub.GetItem(cwhub.APPSEC_CONFIGS, configName) - if item != nil && item.State.Installed { + if item != nil && item.State.IsInstalled() { wc.Logger.Infof("loading %s", item.State.LocalPath) + err := wc.LoadByPath(item.State.LocalPath) if err != nil { return fmt.Errorf("unable to load appsec-config %s : %s", item.State.LocalPath, err) } + return nil } @@ -254,6 +271,7 @@ func (wc *AppsecConfig) Build() (*AppsecRuntimeConfig, error) { if wc.BouncerBlockedHTTPCode == 0 { wc.BouncerBlockedHTTPCode = http.StatusForbidden } + if wc.BouncerPassedHTTPCode == 0 { wc.BouncerPassedHTTPCode = http.StatusOK } @@ -261,12 +279,15 @@ func (wc *AppsecConfig) Build() (*AppsecRuntimeConfig, error) { if wc.UserBlockedHTTPCode == 0 { wc.UserBlockedHTTPCode = http.StatusForbidden } + if wc.UserPassedHTTPCode == 0 { wc.UserPassedHTTPCode = http.StatusOK } + if wc.DefaultPassAction == "" { wc.DefaultPassAction = AllowRemediation } + if wc.DefaultRemediation == "" { wc.DefaultRemediation = BanRemediation } @@ -287,20 +308,25 @@ func (wc *AppsecConfig) Build() (*AppsecRuntimeConfig, error) { // load rules for _, rule := range wc.OutOfBandRules { wc.Logger.Infof("loading outofband rule %s", rule) + collections, err := LoadCollection(rule, wc.Logger.WithField("component", "appsec_collection_loader")) if err != nil { return nil, fmt.Errorf("unable to load outofband rule %s : %s", rule, err) } + ret.OutOfBandRules = append(ret.OutOfBandRules, collections...) } wc.Logger.Infof("Loaded %d outofband rules", len(ret.OutOfBandRules)) + for _, rule := range wc.InBandRules { wc.Logger.Infof("loading inband rule %s", rule) + collections, err := LoadCollection(rule, wc.Logger.WithField("component", "appsec_collection_loader")) if err != nil { return nil, fmt.Errorf("unable to load inband rule %s : %s", rule, err) } + ret.InBandRules = append(ret.InBandRules, collections...) } @@ -311,10 +337,12 @@ func (wc *AppsecConfig) Build() (*AppsecRuntimeConfig, error) { if hook.OnSuccess != "" && hook.OnSuccess != "continue" && hook.OnSuccess != "break" { return nil, fmt.Errorf("invalid 'on_success' for on_load hook : %s", hook.OnSuccess) } + err := hook.Build(hookOnLoad) if err != nil { return nil, fmt.Errorf("unable to build on_load hook : %s", err) } + ret.CompiledOnLoad = append(ret.CompiledOnLoad, hook) } @@ -322,10 +350,12 @@ func (wc *AppsecConfig) Build() (*AppsecRuntimeConfig, error) { if hook.OnSuccess != "" && hook.OnSuccess != "continue" && hook.OnSuccess != "break" { return nil, fmt.Errorf("invalid 'on_success' for pre_eval hook : %s", hook.OnSuccess) } + err := hook.Build(hookPreEval) if err != nil { return nil, fmt.Errorf("unable to build pre_eval hook : %s", err) } + ret.CompiledPreEval = append(ret.CompiledPreEval, hook) } @@ -333,10 +363,12 @@ func (wc *AppsecConfig) Build() (*AppsecRuntimeConfig, error) { if hook.OnSuccess != "" && hook.OnSuccess != "continue" && hook.OnSuccess != "break" { return nil, fmt.Errorf("invalid 'on_success' for post_eval hook : %s", hook.OnSuccess) } + err := hook.Build(hookPostEval) if err != nil { return nil, fmt.Errorf("unable to build post_eval hook : %s", err) } + ret.CompiledPostEval = append(ret.CompiledPostEval, hook) } @@ -344,10 +376,12 @@ func (wc *AppsecConfig) Build() (*AppsecRuntimeConfig, error) { if hook.OnSuccess != "" && hook.OnSuccess != "continue" && hook.OnSuccess != "break" { return nil, fmt.Errorf("invalid 'on_success' for on_match hook : %s", hook.OnSuccess) } + err := hook.Build(hookOnMatch) if err != nil { return nil, fmt.Errorf("unable to build on_match hook : %s", err) } + ret.CompiledOnMatch = append(ret.CompiledOnMatch, hook) } @@ -357,19 +391,23 @@ func (wc *AppsecConfig) Build() (*AppsecRuntimeConfig, error) { if err != nil { return nil, fmt.Errorf("cannot compile variable regexp %s: %w", variable, err) } + ret.CompiledVariablesTracking = append(ret.CompiledVariablesTracking, compiledVariableRule) } + return ret, nil } func (w *AppsecRuntimeConfig) ProcessOnLoadRules() error { has_match := false + for _, rule := range w.CompiledOnLoad { if rule.FilterExpr != nil { output, err := exprhelpers.Run(rule.FilterExpr, GetOnLoadEnv(w), w.Logger, w.Logger.Level >= log.DebugLevel) if err != nil { return fmt.Errorf("unable to run appsec on_load filter %s : %w", rule.Filter, err) } + switch t := output.(type) { case bool: if !t { @@ -380,14 +418,17 @@ func (w *AppsecRuntimeConfig) ProcessOnLoadRules() error { w.Logger.Errorf("Filter must return a boolean, can't filter") continue } + has_match = true } + for _, applyExpr := range rule.ApplyExpr { o, err := exprhelpers.Run(applyExpr, GetOnLoadEnv(w), w.Logger, w.Logger.Level >= log.DebugLevel) if err != nil { w.Logger.Errorf("unable to apply appsec on_load expr: %s", err) continue } + switch t := o.(type) { case error: w.Logger.Errorf("unable to apply appsec on_load expr: %s", t) @@ -395,21 +436,25 @@ func (w *AppsecRuntimeConfig) ProcessOnLoadRules() error { default: } } + if has_match && rule.OnSuccess == "break" { break } } + return nil } func (w *AppsecRuntimeConfig) ProcessOnMatchRules(request *ParsedRequest, evt types.Event) error { has_match := false + for _, rule := range w.CompiledOnMatch { if rule.FilterExpr != nil { output, err := exprhelpers.Run(rule.FilterExpr, GetOnMatchEnv(w, request, evt), w.Logger, w.Logger.Level >= log.DebugLevel) if err != nil { return fmt.Errorf("unable to run appsec on_match filter %s : %w", rule.Filter, err) } + switch t := output.(type) { case bool: if !t { @@ -420,14 +465,17 @@ func (w *AppsecRuntimeConfig) ProcessOnMatchRules(request *ParsedRequest, evt ty w.Logger.Errorf("Filter must return a boolean, can't filter") continue } + has_match = true } + for _, applyExpr := range rule.ApplyExpr { o, err := exprhelpers.Run(applyExpr, GetOnMatchEnv(w, request, evt), w.Logger, w.Logger.Level >= log.DebugLevel) if err != nil { w.Logger.Errorf("unable to apply appsec on_match expr: %s", err) continue } + switch t := o.(type) { case error: w.Logger.Errorf("unable to apply appsec on_match expr: %s", t) @@ -435,21 +483,25 @@ func (w *AppsecRuntimeConfig) ProcessOnMatchRules(request *ParsedRequest, evt ty default: } } + if has_match && rule.OnSuccess == "break" { break } } + return nil } func (w *AppsecRuntimeConfig) ProcessPreEvalRules(request *ParsedRequest) error { has_match := false + for _, rule := range w.CompiledPreEval { if rule.FilterExpr != nil { output, err := exprhelpers.Run(rule.FilterExpr, GetPreEvalEnv(w, request), w.Logger, w.Logger.Level >= log.DebugLevel) if err != nil { return fmt.Errorf("unable to run appsec pre_eval filter %s : %w", rule.Filter, err) } + switch t := output.(type) { case bool: if !t { @@ -460,6 +512,7 @@ func (w *AppsecRuntimeConfig) ProcessPreEvalRules(request *ParsedRequest) error w.Logger.Errorf("Filter must return a boolean, can't filter") continue } + has_match = true } // here means there is no filter or the filter matched @@ -469,6 +522,7 @@ func (w *AppsecRuntimeConfig) ProcessPreEvalRules(request *ParsedRequest) error w.Logger.Errorf("unable to apply appsec pre_eval expr: %s", err) continue } + switch t := o.(type) { case error: w.Logger.Errorf("unable to apply appsec pre_eval expr: %s", t) @@ -476,6 +530,7 @@ func (w *AppsecRuntimeConfig) ProcessPreEvalRules(request *ParsedRequest) error default: } } + if has_match && rule.OnSuccess == "break" { break } @@ -486,12 +541,14 @@ func (w *AppsecRuntimeConfig) ProcessPreEvalRules(request *ParsedRequest) error func (w *AppsecRuntimeConfig) ProcessPostEvalRules(request *ParsedRequest) error { has_match := false + for _, rule := range w.CompiledPostEval { if rule.FilterExpr != nil { output, err := exprhelpers.Run(rule.FilterExpr, GetPostEvalEnv(w, request), w.Logger, w.Logger.Level >= log.DebugLevel) if err != nil { return fmt.Errorf("unable to run appsec post_eval filter %s : %w", rule.Filter, err) } + switch t := output.(type) { case bool: if !t { @@ -502,6 +559,7 @@ func (w *AppsecRuntimeConfig) ProcessPostEvalRules(request *ParsedRequest) error w.Logger.Errorf("Filter must return a boolean, can't filter") continue } + has_match = true } // here means there is no filter or the filter matched @@ -519,6 +577,7 @@ func (w *AppsecRuntimeConfig) ProcessPostEvalRules(request *ParsedRequest) error default: } } + if has_match && rule.OnSuccess == "break" { break } @@ -562,6 +621,7 @@ func (w *AppsecRuntimeConfig) RemoveOutbandRuleByName(name string) error { func (w *AppsecRuntimeConfig) CancelEvent() error { w.Logger.Debugf("canceling event") w.Response.SendEvent = false + return nil } diff --git a/pkg/cwhub/fetch.go b/pkg/cwhub/fetch.go index e8dacad4a6d..fc5cd9d6230 100644 --- a/pkg/cwhub/fetch.go +++ b/pkg/cwhub/fetch.go @@ -63,8 +63,16 @@ func (i *Item) FetchContentTo(ctx context.Context, contentProvider ContentProvid return false, "", err } + i.State.DownloadPath = destPath + return true, fmt.Sprintf("(embedded in %s)", i.hub.local.HubIndexFile), nil } - return contentProvider.FetchContent(ctx, i.RemotePath, destPath, wantHash, i.hub.logger) + downloaded, _, err := contentProvider.FetchContent(ctx, i.RemotePath, destPath, wantHash, i.hub.logger) + + if err == nil && downloaded { + i.State.DownloadPath = destPath + } + + return downloaded, destPath, err } diff --git a/pkg/cwhub/hub.go b/pkg/cwhub/hub.go index b75c173bc9b..5c0caad1473 100644 --- a/pkg/cwhub/hub.go +++ b/pkg/cwhub/hub.go @@ -19,11 +19,10 @@ import ( // Hub is the main structure for the package. type Hub struct { - items HubItems // Items read from HubDir and InstallDir - pathIndex map[string]*Item - local *csconfig.LocalHubCfg - logger *logrus.Logger - Warnings []string // Warnings encountered during sync + items HubItems // Items read from HubDir and InstallDir + local *csconfig.LocalHubCfg + logger *logrus.Logger + Warnings []string // Warnings encountered during sync } // GetDataDir returns the data directory, where data sets are installed. @@ -45,9 +44,8 @@ func NewHub(local *csconfig.LocalHubCfg, logger *logrus.Logger) (*Hub, error) { } hub := &Hub{ - local: local, - logger: logger, - pathIndex: make(map[string]*Item, 0), + local: local, + logger: logger, } return hub, nil @@ -169,7 +167,6 @@ func (h *Hub) addItem(item *Item) { } h.items[item.Type][item.Name] = item - h.pathIndex[item.State.LocalPath] = item } // GetItemMap returns the map of items for a given type. @@ -182,11 +179,6 @@ func (h *Hub) GetItem(itemType string, itemName string) *Item { return h.GetItemMap(itemType)[itemName] } -// GetItemByPath returns an item from hub based on its (absolute) local path. -func (h *Hub) GetItemByPath(itemPath string) *Item { - return h.pathIndex[itemPath] -} - // GetItemFQ returns an item from hub based on its type and name (type:author/name). func (h *Hub) GetItemFQ(itemFQName string) (*Item, error) { // type and name are separated by a colon @@ -240,7 +232,7 @@ func (h *Hub) GetInstalledByType(itemType string, sorted bool) []*Item { ret := make([]*Item, 0) for _, item := range h.GetItemsByType(itemType, sorted) { - if item.State.Installed { + if item.State.IsInstalled() { ret = append(ret, item) } } diff --git a/pkg/cwhub/item.go b/pkg/cwhub/item.go index f0b447c6c4e..41087af1ac9 100644 --- a/pkg/cwhub/item.go +++ b/pkg/cwhub/item.go @@ -113,10 +113,11 @@ type Item struct { Dependencies } -// InstallPath returns the path to use for the install symlink. +// PathForInstall returns the path to use for the install symlink +// (eg. /etc/crowdsec/collections/xyz.yaml). // Returns an error if an item is already installed or if the path goes outside of the install dir. -func (i *Item) InstallPath() (string, error) { - if i.State.Installed { +func (i *Item) PathForInstall() (string, error) { + if i.State.IsInstalled() { return "", fmt.Errorf("%s is already installed at %s", i.FQName(), i.State.LocalPath) } @@ -128,16 +129,21 @@ func (i *Item) InstallPath() (string, error) { return SafePath(i.hub.local.InstallDir, filepath.Join(p, i.FileName)) } -// DownloadPath returns the location of the actual config file in the hub +// PathForDownload returns the path to use to store the item's file from the hub // (eg. /etc/crowdsec/hub/collections/author/xyz.yaml). // Raises an error if the path goes outside of the hub dir. -func (i *Item) DownloadPath() (string, error) { - ret, err := SafePath(i.hub.local.HubDir, i.RemotePath) - if err != nil { - return "", err +func (i *Item) PathForDownload() (string, error) { + path, err := SafePath(i.hub.local.HubDir, i.RemotePath) + + if i.State.IsDownloaded() && path != i.State.DownloadPath { + // A hub item with the same name is at a different location. + // This should not happen. + // user is downloading with --force so we are allowed to overwrite but + // should we remove the old location from here? Error, warning, more tests? + return "", fmt.Errorf("%s is already downloaded at %s", i.FQName(), i.State.DownloadPath) } - return ret, nil + return path, err } // HasSubItems returns true if items of this type can have sub-items. Currently only collections. @@ -167,8 +173,8 @@ func (i Item) MarshalJSON() ([]byte, error) { LocalPath: i.State.LocalPath, LocalVersion: i.State.LocalVersion, LocalHash: i.State.LocalHash, - Installed: i.State.Installed, - Downloaded: i.State.Downloaded, + Installed: i.State.IsInstalled(), + Downloaded: i.State.IsDownloaded(), UpToDate: i.State.UpToDate, Tainted: i.State.Tainted, BelongsToCollections: i.State.BelongsToCollections, @@ -182,13 +188,15 @@ func (i Item) MarshalYAML() (interface{}, error) { type Alias Item return &struct { - Alias `yaml:",inline"` - State ItemState `yaml:",inline"` - Local bool `yaml:"local"` + Alias `yaml:",inline"` + State ItemState `yaml:",inline"` + Installed bool `yaml:"installed"` + Local bool `yaml:"local"` }{ - Alias: Alias(i), - State: i.State, - Local: i.State.IsLocal(), + Alias: Alias(i), + State: i.State, + Installed: i.State.IsInstalled(), + Local: i.State.IsLocal(), }, nil } @@ -275,7 +283,7 @@ func (i *Item) SafeToRemoveDeps() ([]*Item, error) { // if the sub depends on a collection that is not a direct or indirect dependency // of the current item, it is not removed for _, subParent := range sub.Ancestors() { - if !subParent.State.Installed { + if !subParent.State.IsInstalled() { continue } diff --git a/pkg/cwhub/state.go b/pkg/cwhub/state.go index 63a433151cd..3e1876712b3 100644 --- a/pkg/cwhub/state.go +++ b/pkg/cwhub/state.go @@ -7,16 +7,16 @@ import ( // ItemState is used to keep the local state (i.e. at runtime) of an item. // This data is not stored in the index, but is displayed with "cscli ... inspect". type ItemState struct { - LocalPath string `json:"local_path,omitempty" yaml:"local_path,omitempty"` - LocalVersion string `json:"local_version,omitempty" yaml:"local_version,omitempty"` - LocalHash string `json:"local_hash,omitempty" yaml:"local_hash,omitempty"` - Installed bool `json:"installed"` + // Path to the install link or local file -- keep LocalPath for compatibility + LocalPath string `yaml:"local_path,omitempty"` + LocalVersion string `yaml:"local_version,omitempty"` + LocalHash string `yaml:"local_hash,omitempty"` + DownloadPath string local bool - Downloaded bool `json:"downloaded"` - UpToDate bool `json:"up_to_date"` - Tainted bool `json:"tainted"` - TaintedBy []string `json:"tainted_by,omitempty" yaml:"tainted_by,omitempty"` - BelongsToCollections []string `json:"belongs_to_collections,omitempty" yaml:"belongs_to_collections,omitempty"` + UpToDate bool `yaml:"up_to_date"` + Tainted bool `yaml:"tainted"` + TaintedBy []string `yaml:"tainted_by,omitempty"` + BelongsToCollections []string `yaml:"belongs_to_collections,omitempty"` } // IsLocal returns true if the item has been create by a user (not downloaded from the hub). @@ -28,7 +28,7 @@ func (s *ItemState) IsLocal() bool { func (s *ItemState) Text() string { ret := "disabled" - if s.Installed { + if s.IsInstalled() { ret = "enabled" } @@ -50,13 +50,21 @@ func (s *ItemState) Emoji() string { switch { case s.IsLocal(): return emoji.House - case !s.Installed: + case !s.IsInstalled(): return emoji.Prohibited case s.Tainted || (!s.UpToDate && !s.IsLocal()): return emoji.Warning - case s.Installed: + case s.IsInstalled(): return emoji.CheckMark default: return emoji.QuestionMark } } + +func (s *ItemState) IsDownloaded() bool { + return s.DownloadPath != "" +} + +func (s *ItemState) IsInstalled() bool { + return s.LocalPath != "" +} diff --git a/pkg/cwhub/state_test.go b/pkg/cwhub/state_test.go index 20741809ae2..78e68140143 100644 --- a/pkg/cwhub/state_test.go +++ b/pkg/cwhub/state_test.go @@ -20,47 +20,47 @@ func TestItemStateText(t *testing.T) { tests := []test{ { ItemState{ - Installed: true, - UpToDate: false, - Tainted: false, - Downloaded: true, + LocalPath: "path/to/install", + UpToDate: false, + Tainted: false, + DownloadPath: "path/to/download", }, "enabled,update-available", emoji.Warning, }, { ItemState{ - Installed: true, - UpToDate: true, - Tainted: false, - Downloaded: true, + LocalPath: "path/to/install", + UpToDate: true, + Tainted: false, + DownloadPath: "path/to/download", }, "enabled", emoji.CheckMark, }, { ItemState{ - Installed: true, - UpToDate: false, - local: true, - Tainted: false, - Downloaded: false, + LocalPath: "path/to/install", + UpToDate: false, + local: true, + Tainted: false, + DownloadPath: "", }, "enabled,local", emoji.House, }, { ItemState{ - Installed: false, - UpToDate: false, - Tainted: false, - Downloaded: true, + LocalPath: "", + UpToDate: false, + Tainted: false, + DownloadPath: "path/to/download", }, "disabled,update-available", emoji.Prohibited, }, { ItemState{ - Installed: true, - UpToDate: false, - Tainted: true, - Downloaded: true, + LocalPath: "path/to/install", + UpToDate: false, + Tainted: true, + DownloadPath: "path/to/download", }, "enabled,tainted", emoji.Warning, diff --git a/pkg/cwhub/sync.go b/pkg/cwhub/sync.go index 5de548a521a..65c81d2c333 100644 --- a/pkg/cwhub/sync.go +++ b/pkg/cwhub/sync.go @@ -130,7 +130,6 @@ func newInstallItemSpec(path string, subs []string) (*itemSpec, error) { // .../config/postoverflow/stage/file.yaml // .../config/scenarios/scenar.yaml // .../config/collections/linux.yaml //file is empty - if len(subs) < 2 { return nil, fmt.Errorf("path is too short: %s (%d)", path, len(subs)) } @@ -239,7 +238,6 @@ func newLocalItem(h *Hub, path string, spec *itemSpec) (*Item, error) { State: ItemState{ LocalPath: path, local: true, - Installed: true, UpToDate: true, }, } @@ -331,14 +329,14 @@ func updateNonLocalItem(h *Hub, path string, spec *itemSpec, symlinkTarget strin continue } - src, err := item.DownloadPath() + src, err := item.PathForDownload() if err != nil { return nil, err } if spec.path == src { h.logger.Tracef("marking %s as downloaded", item.Name) - item.State.Downloaded = true + item.State.DownloadPath = src } } else if !hasPathSuffix(symlinkTarget, item.RemotePath) { // wrong file @@ -389,7 +387,7 @@ func (h *Hub) addItemFromSpec(spec *itemSpec) error { // see if there's another installed item of the same name theOtherItem := h.GetItem(spec.ftype, item.Name) if theOtherItem != nil { - if theOtherItem.State.Installed { + if theOtherItem.State.IsInstalled() { h.logger.Warnf("multiple %s named %s: ignoring %s", spec.ftype, item.Name, theOtherItem.State.LocalPath) } } @@ -398,12 +396,10 @@ func (h *Hub) addItemFromSpec(spec *itemSpec) error { if err != nil { return err } - - item.State.LocalPath = spec.path } if item == nil { - h.logger.Infof("Ignoring file %s of type %s", spec.path, spec.ftype) + h.logger.Warningf("Ignoring file %s of type %s", spec.path, spec.ftype) return nil } @@ -426,12 +422,12 @@ func (i *Item) checkSubItemVersions() []string { } // ensure all the sub-items are installed, or tag the parent as tainted - i.hub.logger.Tracef("checking submembers of %s installed:%t", i.Name, i.State.Installed) + i.hub.logger.Tracef("checking submembers of %s installed:%t", i.Name, i.State.IsInstalled()) for sub := range i.CurrentDependencies().SubItems(i.hub) { - i.hub.logger.Tracef("check %s installed:%t", sub.Name, sub.State.Installed) + i.hub.logger.Tracef("check %s installed:%t", sub.Name, sub.State.IsInstalled()) - if !i.State.Installed { + if !i.State.IsInstalled() { continue } @@ -453,7 +449,7 @@ func (i *Item) checkSubItemVersions() []string { continue } - if !sub.State.Installed && i.State.Installed { + if !sub.State.IsInstalled() && i.State.IsInstalled() { i.addTaint(sub) warn = append(warn, fmt.Sprintf("%s is tainted by missing %s", i.Name, sub.FQName())) @@ -588,7 +584,7 @@ func (h *Hub) localSync() error { sub.State.BelongsToCollections = insertInOrderNoCase(sub.State.BelongsToCollections, item.Name) } - if !item.State.Installed { + if !item.State.IsInstalled() { continue } @@ -619,6 +615,10 @@ func (h *Hub) localSync() error { func (i *Item) setVersionState(path string, inhub bool) error { var err error + if !inhub { + i.State.LocalPath = path + } + i.State.LocalHash, err = downloader.SHA256(path) if err != nil { return fmt.Errorf("failed to get sha256 of %s: %w", path, err) @@ -647,10 +647,6 @@ func (i *Item) setVersionState(path string, inhub bool) error { if i.State.LocalVersion == "?" { i.hub.logger.Tracef("got tainted match for %s: %s", i.Name, path) - if !inhub { - i.State.Installed = true - } - i.State.UpToDate = false i.addTaint(i) @@ -659,13 +655,10 @@ func (i *Item) setVersionState(path string, inhub bool) error { // we got an exact match, update struct - i.State.Downloaded = true - if !inhub { i.hub.logger.Tracef("found exact match for %s, version is %s, latest is %s", i.Name, i.State.LocalVersion, i.Version) i.State.Tainted = false // if we're walking the hub, present file doesn't means installed file - i.State.Installed = true } if i.State.LocalVersion == i.Version { diff --git a/pkg/hubops/disable.go b/pkg/hubops/disable.go index 7340920c249..5ac959319cd 100644 --- a/pkg/hubops/disable.go +++ b/pkg/hubops/disable.go @@ -20,17 +20,12 @@ func RemoveInstallLink(i *cwhub.Item) error { return fmt.Errorf("%s isn't managed by hub", i.Name) } - hubpath, err := os.Readlink(i.State.LocalPath) + target, err := os.Readlink(i.State.LocalPath) if err != nil { return fmt.Errorf("while reading symlink: %w", err) } - src, err := i.DownloadPath() - if err != nil { - return err - } - - if hubpath != src { + if target != i.State.DownloadPath { return fmt.Errorf("%s isn't managed by hub", i.Name) } @@ -38,6 +33,8 @@ func RemoveInstallLink(i *cwhub.Item) error { return fmt.Errorf("while removing symlink: %w", err) } + i.State.LocalPath = "" + return nil } @@ -64,7 +61,7 @@ func (c *DisableCommand) Prepare(plan *ActionPlan) (bool, error) { return false, fmt.Errorf("%s is tainted, use '--force' to remove", i.Name) } - if !i.State.Installed { + if !i.State.IsInstalled() { return false, nil } @@ -74,7 +71,7 @@ func (c *DisableCommand) Prepare(plan *ActionPlan) (bool, error) { } for _, sub := range subsToRemove { - if !sub.State.Installed { + if !sub.State.IsInstalled() { continue } @@ -97,7 +94,6 @@ func (c *DisableCommand) Run(ctx context.Context, plan *ActionPlan) error { plan.ReloadNeeded = true - i.State.Installed = false i.State.Tainted = false return nil diff --git a/pkg/hubops/download.go b/pkg/hubops/download.go index 552fddc775c..c4ee4e5b017 100644 --- a/pkg/hubops/download.go +++ b/pkg/hubops/download.go @@ -52,7 +52,7 @@ func (c *DownloadCommand) Prepare(plan *ActionPlan) (bool, error) { var disableKeys []*cwhub.Item - if i.State.Installed { + if i.State.IsInstalled() { for sub := range i.CurrentDependencies().SubItems(plan.hub) { disableKeys = append(disableKeys, sub) toDisable[sub] = struct{}{} @@ -64,7 +64,7 @@ func (c *DownloadCommand) Prepare(plan *ActionPlan) (bool, error) { return false, err } - if i.State.Installed { + if i.State.IsInstalled() { // ensure the _new_ dependencies are installed too if err := plan.AddCommand(NewEnableCommand(sub, c.Force)); err != nil { return false, err @@ -84,7 +84,7 @@ func (c *DownloadCommand) Prepare(plan *ActionPlan) (bool, error) { } } - if i.State.Downloaded && i.State.UpToDate { + if i.State.IsDownloaded() && i.State.UpToDate { return false, nil } @@ -157,7 +157,7 @@ func (c *DownloadCommand) Run(ctx context.Context, plan *ActionPlan) error { fmt.Printf("downloading %s\n", colorizeItemName(i.FQName())) // ensure that target file is within target dir - finalPath, err := i.DownloadPath() + finalPath, err := i.PathForDownload() if err != nil { return err } @@ -171,7 +171,6 @@ func (c *DownloadCommand) Run(ctx context.Context, plan *ActionPlan) error { plan.ReloadNeeded = true } - i.State.Downloaded = true i.State.Tainted = false i.State.UpToDate = true @@ -208,7 +207,7 @@ func (c *DownloadCommand) Detail() string { version := color.YellowString(i.Version) - if i.State.Downloaded { + if i.State.IsDownloaded() { version = c.Item.State.LocalVersion + " -> " + color.YellowString(i.Version) } diff --git a/pkg/hubops/enable.go b/pkg/hubops/enable.go index 40de40c8662..af581883b69 100644 --- a/pkg/hubops/enable.go +++ b/pkg/hubops/enable.go @@ -40,7 +40,7 @@ func (c *EnableCommand) Prepare(plan *ActionPlan) (bool, error) { } } - if i.State.Installed { + if i.State.IsInstalled() { return false, nil } @@ -49,7 +49,7 @@ func (c *EnableCommand) Prepare(plan *ActionPlan) (bool, error) { // CreateInstallLink creates a symlink between the actual config file at hub.HubDir and hub.ConfigDir. func CreateInstallLink(i *cwhub.Item) error { - dest, err := i.InstallPath() + dest, err := i.PathForInstall() if err != nil { return err } @@ -66,15 +66,14 @@ func CreateInstallLink(i *cwhub.Item) error { return fmt.Errorf("failed to stat %s: %w", dest, err) } - src, err := i.DownloadPath() - if err != nil { - return err - } + src := i.State.DownloadPath if err = os.Symlink(src, dest); err != nil { return fmt.Errorf("while creating symlink from %s to %s: %w", src, dest, err) } + i.State.LocalPath = dest + return nil } @@ -83,7 +82,7 @@ func (c *EnableCommand) Run(ctx context.Context, plan *ActionPlan) error { fmt.Println("enabling " + colorizeItemName(i.FQName())) - if !i.State.Downloaded { + if !i.State.IsDownloaded() { // XXX: this a warning? return fmt.Errorf("can't enable %s: not downloaded", i.FQName()) } @@ -94,7 +93,6 @@ func (c *EnableCommand) Run(ctx context.Context, plan *ActionPlan) error { plan.ReloadNeeded = true - i.State.Installed = true i.State.Tainted = false return nil diff --git a/pkg/hubops/purge.go b/pkg/hubops/purge.go index 3b415b27428..f6cdf0fdc3e 100644 --- a/pkg/hubops/purge.go +++ b/pkg/hubops/purge.go @@ -43,7 +43,7 @@ func (c *PurgeCommand) Prepare(plan *ActionPlan) (bool, error) { } } - if !i.State.Downloaded { + if !i.State.IsDownloaded() { return false, nil } @@ -55,20 +55,16 @@ func (c *PurgeCommand) Run(ctx context.Context, plan *ActionPlan) error { fmt.Println("purging " + colorizeItemName(i.FQName())) - src, err := i.DownloadPath() - if err != nil { - return err - } - - if err := os.Remove(src); err != nil { + if err := os.Remove(i.State.DownloadPath); err != nil { if os.IsNotExist(err) { + i.State.DownloadPath = "" return nil } return fmt.Errorf("while removing file: %w", err) } - i.State.Downloaded = false + i.State.DownloadPath = "" i.State.Tainted = false i.State.UpToDate = false From 0bdb1f7f27a0c07095a4ce93998f89050ba54f05 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Thu, 27 Feb 2025 11:19:43 +0100 Subject: [PATCH 446/581] cron: avoid spamming stdout when the hub index is updated (#3485) --- config/crowdsec.cron.daily | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/config/crowdsec.cron.daily b/config/crowdsec.cron.daily index 9c488d29884..6b419a8b43d 100644 --- a/config/crowdsec.cron.daily +++ b/config/crowdsec.cron.daily @@ -5,7 +5,7 @@ test -x /usr/bin/cscli || exit 0 # splay hub upgrade and crowdsec reload sleep "$(seq 1 300 | shuf -n 1)" -/usr/bin/cscli --error hub update +/usr/bin/cscli --error hub update >/dev/null upgraded=$(/usr/bin/cscli --error hub upgrade) if [ -n "$upgraded" ]; then From c5f5896625abc3db49b9427f1ce60e9fc8792e3c Mon Sep 17 00:00:00 2001 From: blotus Date: Thu, 27 Feb 2025 14:26:38 +0100 Subject: [PATCH 447/581] crowdsec: allow -t to work if using appsec and allowlists (#3484) --- pkg/acquisition/modules/appsec/appsec.go | 19 +++++---- pkg/acquisition/modules/appsec/appsec_test.go | 7 ++-- pkg/appsec/allowlists/allowlists.go | 39 +++++++++++-------- pkg/appsec/allowlists/allowlists_test.go | 10 +++-- test/bats/crowdsec-acquisition.bats | 16 ++++++++ 5 files changed, 60 insertions(+), 31 deletions(-) diff --git a/pkg/acquisition/modules/appsec/appsec.go b/pkg/acquisition/modules/appsec/appsec.go index 5edf708a15e..4b6a52d5456 100644 --- a/pkg/acquisition/modules/appsec/appsec.go +++ b/pkg/acquisition/modules/appsec/appsec.go @@ -63,7 +63,6 @@ type AppsecSource struct { lapiURL string AuthCache AuthCache AppsecRunners []AppsecRunner // one for each go-routine - apiClient *apiclient.ApiClient appsecAllowlistClient *allowlists.AppsecAllowlist } @@ -226,12 +225,7 @@ func (w *AppsecSource) Configure(yamlConfig []byte, logger *log.Entry, metricsLe w.AppsecRunners = make([]AppsecRunner, w.config.Routines) - w.apiClient, err = apiclient.GetLAPIClient() - if err != nil { - return fmt.Errorf("unable to get authenticated LAPI client: %w", err) - } - - w.appsecAllowlistClient = allowlists.NewAppsecAllowlist(w.apiClient, w.logger) + w.appsecAllowlistClient = allowlists.NewAppsecAllowlist(w.logger) for nbRoutine := range w.config.Routines { appsecRunnerUUID := uuid.New().String() @@ -282,7 +276,16 @@ func (w *AppsecSource) OneShotAcquisition(_ context.Context, _ chan types.Event, func (w *AppsecSource) StreamingAcquisition(ctx context.Context, out chan types.Event, t *tomb.Tomb) error { w.outChan = out - w.appsecAllowlistClient.StartRefresh(t) + apiClient, err := apiclient.GetLAPIClient() + if err != nil { + return fmt.Errorf("unable to get authenticated LAPI client: %w", err) + } + + err = w.appsecAllowlistClient.Start(ctx, apiClient) + if err != nil { + return fmt.Errorf("failed to fetch allowlists: %w", err) + } + w.appsecAllowlistClient.StartRefresh(ctx, t) t.Go(func() error { defer trace.CatchPanic("crowdsec/acquis/appsec/live") diff --git a/pkg/acquisition/modules/appsec/appsec_test.go b/pkg/acquisition/modules/appsec/appsec_test.go index a640e7ed526..60960830f39 100644 --- a/pkg/acquisition/modules/appsec/appsec_test.go +++ b/pkg/acquisition/modules/appsec/appsec_test.go @@ -146,10 +146,9 @@ func loadAppSecEngine(test appsecRuleTest, t *testing.T) { assert.NoError(t, err) }) - allowlistClient := allowlists.NewAppsecAllowlist(client, logger) - // In real life, allowlists updater is started by the acquisition - // Do it manually here as we are simulating the appsec itself - err = allowlistClient.FetchAllowlists() + allowlistClient := allowlists.NewAppsecAllowlist(logger) + + err = allowlistClient.Start(t.Context(), client) require.NoError(t, err) runner := AppsecRunner{ inChan: InChan, diff --git a/pkg/appsec/allowlists/allowlists.go b/pkg/appsec/allowlists/allowlists.go index 640164b6787..61ea52b990e 100644 --- a/pkg/appsec/allowlists/allowlists.go +++ b/pkg/appsec/allowlists/allowlists.go @@ -36,25 +36,26 @@ type AppsecAllowlist struct { tomb *tomb.Tomb } -func NewAppsecAllowlist(client *apiclient.ApiClient, logger *log.Entry) *AppsecAllowlist { +func NewAppsecAllowlist(logger *log.Entry) *AppsecAllowlist { a := &AppsecAllowlist{ - LAPIClient: client, - logger: logger.WithField("component", "appsec-allowlist"), - ips: []ipAllowlist{}, - ranges: []rangeAllowlist{}, - } - - if err := a.FetchAllowlists(); err != nil { - a.logger.Errorf("failed to fetch allowlists: %s", err) + logger: logger.WithField("component", "appsec-allowlist"), + ips: []ipAllowlist{}, + ranges: []rangeAllowlist{}, } return a } -func (a *AppsecAllowlist) FetchAllowlists() error { +func (a *AppsecAllowlist) Start(ctx context.Context, client *apiclient.ApiClient) error { + a.LAPIClient = client + err := a.FetchAllowlists(ctx) + return err +} + +func (a *AppsecAllowlist) FetchAllowlists(ctx context.Context) error { a.logger.Debug("fetching allowlists") - allowlists, _, err := a.LAPIClient.Allowlists.List(context.TODO(), apiclient.AllowlistListOpts{WithContent: true}) + allowlists, _, err := a.LAPIClient.Allowlists.List(ctx, apiclient.AllowlistListOpts{WithContent: true}) if err != nil { return err } @@ -92,6 +93,9 @@ func (a *AppsecAllowlist) FetchAllowlists() error { } } + if len(a.ips) != 0 || len(a.ranges) != 0 { + a.logger.Infof("fetched %d IPs and %d ranges", len(a.ips), len(a.ranges)) + } a.logger.Debugf("fetched %d IPs and %d ranges", len(a.ips), len(a.ranges)) a.logger.Tracef("allowlisted ips: %+v", a.ips) a.logger.Tracef("allowlisted ranges: %+v", a.ranges) @@ -99,25 +103,28 @@ func (a *AppsecAllowlist) FetchAllowlists() error { return nil } -func (a *AppsecAllowlist) updateAllowlists() error { +func (a *AppsecAllowlist) updateAllowlists(ctx context.Context) { ticker := time.NewTicker(allowlistRefreshInterval) for { select { case <-ticker.C: - if err := a.FetchAllowlists(); err != nil { + if err := a.FetchAllowlists(ctx); err != nil { a.logger.Errorf("failed to fetch allowlists: %s", err) } case <-a.tomb.Dying(): ticker.Stop() - return nil + return } } } -func (a *AppsecAllowlist) StartRefresh(t *tomb.Tomb) { +func (a *AppsecAllowlist) StartRefresh(ctx context.Context, t *tomb.Tomb) { a.tomb = t - a.tomb.Go(a.updateAllowlists) + a.tomb.Go(func() error { + a.updateAllowlists(ctx) + return nil + }) } func (a *AppsecAllowlist) IsAllowlisted(sourceIP string) (bool, string) { diff --git a/pkg/appsec/allowlists/allowlists_test.go b/pkg/appsec/allowlists/allowlists_test.go index 19fb289b90f..5614e657058 100644 --- a/pkg/appsec/allowlists/allowlists_test.go +++ b/pkg/appsec/allowlists/allowlists_test.go @@ -64,9 +64,13 @@ func TestAppsecAllowlist(t *testing.T) { assert.NoError(t, err) }) - allowlistClient := NewAppsecAllowlist(client, log.NewEntry(log.StandardLogger())) + ctx := t.Context() + allowlistClient := NewAppsecAllowlist(log.NewEntry(log.StandardLogger())) - err = allowlistClient.FetchAllowlists() + err = allowlistClient.Start(ctx, client) + require.NoError(t, err) + + err = allowlistClient.FetchAllowlists(ctx) require.NoError(t, err) res, reason := allowlistClient.IsAllowlisted("1.2.3.4") @@ -84,7 +88,7 @@ func TestAppsecAllowlist(t *testing.T) { assert.Len(t, allowlistClient.ips, 1) assert.Len(t, allowlistClient.ranges, 1) - err = allowlistClient.FetchAllowlists() + err = allowlistClient.FetchAllowlists(ctx) require.NoError(t, err) // No duplicates should be added diff --git a/test/bats/crowdsec-acquisition.bats b/test/bats/crowdsec-acquisition.bats index 1a92624b4c4..28fdb8af31e 100644 --- a/test/bats/crowdsec-acquisition.bats +++ b/test/bats/crowdsec-acquisition.bats @@ -76,3 +76,19 @@ teardown() { assert_stderr --partial "datasource type missing in $ACQUIS_DIR/journal.yaml (position 0): detected 'source=journalctl'" assert_stderr --partial "datasource type mismatch in $ACQUIS_DIR/bad.yaml (position 0): found 'docker' but should probably be 'journalctl'" } + +@test "test mode does not fail because of appsec and allowlists" { + rune -0 cscli collections install crowdsecurity/appsec-virtual-patching + cat >"$ACQUIS_DIR/appsec.yaml" <<-EOT + source: appsec + appsec_config: crowdsecurity/virtual-patching + labels: + type: appsec + EOT + + config_set '.common.log_level="debug" | .common.log_media="stdout"' + + rune -0 "$CROWDSEC" -t --trace + + assert_stderr --partial "Configuration test done" +} From 970954f2c09a9cfdd5121ef554e42ba2dc5a681c Mon Sep 17 00:00:00 2001 From: Laurence Jones Date: Fri, 28 Feb 2025 14:20:17 +0000 Subject: [PATCH 448/581] enhance: add option to disable magic syslog RFC parsers (#3435) --- pkg/acquisition/modules/syslog/syslog.go | 95 +++++++++++++------ pkg/acquisition/modules/syslog/syslog_test.go | 20 ++++ 2 files changed, 86 insertions(+), 29 deletions(-) diff --git a/pkg/acquisition/modules/syslog/syslog.go b/pkg/acquisition/modules/syslog/syslog.go index df805d08cae..901e38c15d0 100644 --- a/pkg/acquisition/modules/syslog/syslog.go +++ b/pkg/acquisition/modules/syslog/syslog.go @@ -1,6 +1,7 @@ package syslogacquisition import ( + "bytes" "context" "errors" "fmt" @@ -27,6 +28,7 @@ type SyslogConfiguration struct { Port int `yaml:"listen_port,omitempty"` Addr string `yaml:"listen_addr,omitempty"` MaxMessageLen int `yaml:"max_message_len,omitempty"` + DisableRFCParser bool `yaml:"disable_rfc_parser,omitempty"` // if true, we don't try to be smart and just remove the PRI configuration.DataSourceCommonCfg `yaml:",inline"` } @@ -182,6 +184,66 @@ func (s *SyslogSource) buildLogFromSyslog(ts time.Time, hostname string, return ret } +func (s *SyslogSource) parseLine(syslogLine syslogserver.SyslogMessage) string { + var line string + + logger := s.logger.WithField("client", syslogLine.Client) + logger.Tracef("raw: %s", syslogLine) + if s.metricsLevel != configuration.METRICS_NONE { + linesReceived.With(prometheus.Labels{"source": syslogLine.Client}).Inc() + } + if !s.config.DisableRFCParser { + p := rfc3164.NewRFC3164Parser(rfc3164.WithCurrentYear()) + err := p.Parse(syslogLine.Message) + if err != nil { + logger.Debugf("could not parse as RFC3164 (%s)", err) + p2 := rfc5424.NewRFC5424Parser() + err = p2.Parse(syslogLine.Message) + if err != nil { + logger.Errorf("could not parse message: %s", err) + logger.Debugf("could not parse as RFC5424 (%s) : %s", err, syslogLine.Message) + return "" + } + line = s.buildLogFromSyslog(p2.Timestamp, p2.Hostname, p2.Tag, p2.PID, p2.Message) + if s.metricsLevel != configuration.METRICS_NONE { + linesParsed.With(prometheus.Labels{"source": syslogLine.Client, "type": "rfc5424"}).Inc() + } + } else { + line = s.buildLogFromSyslog(p.Timestamp, p.Hostname, p.Tag, p.PID, p.Message) + if s.metricsLevel != configuration.METRICS_NONE { + linesParsed.With(prometheus.Labels{"source": syslogLine.Client, "type": "rfc3164"}).Inc() + } + } + } else { + if len(syslogLine.Message) < 3 { + logger.Errorf("malformated message, missing PRI (message too short)") + return "" + } + if syslogLine.Message[0] != '<' { + logger.Errorf("malformated message, missing PRI beginning") + return "" + } + priEnd := bytes.Index(syslogLine.Message, []byte(">")) + if priEnd == -1 { + logger.Errorf("malformated message, missing PRI end") + return "" + } + if priEnd > 4 { + logger.Errorf("malformated message, PRI too long") + return "" + } + for i := 1; i < priEnd; i++ { + if syslogLine.Message[i] < '0' || syslogLine.Message[i] > '9' { + logger.Errorf("malformated message, PRI not a number") + return "" + } + } + line = string(syslogLine.Message[priEnd+1:]) + } + + return strings.TrimSuffix(line, "\n") +} + func (s *SyslogSource) handleSyslogMsg(out chan types.Event, t *tomb.Tomb, c chan syslogserver.SyslogMessage) error { killed := false for { @@ -196,37 +258,12 @@ func (s *SyslogSource) handleSyslogMsg(out chan types.Event, t *tomb.Tomb, c cha s.logger.Info("Syslog server has exited") return nil case syslogLine := <-c: - var line string - var ts time.Time - - logger := s.logger.WithField("client", syslogLine.Client) - logger.Tracef("raw: %s", syslogLine) - if s.metricsLevel != configuration.METRICS_NONE { - linesReceived.With(prometheus.Labels{"source": syslogLine.Client}).Inc() - } - p := rfc3164.NewRFC3164Parser(rfc3164.WithCurrentYear()) - err := p.Parse(syslogLine.Message) - if err != nil { - logger.Debugf("could not parse as RFC3164 (%s)", err) - p2 := rfc5424.NewRFC5424Parser() - err = p2.Parse(syslogLine.Message) - if err != nil { - logger.Errorf("could not parse message: %s", err) - logger.Debugf("could not parse as RFC5424 (%s) : %s", err, syslogLine.Message) - continue - } - line = s.buildLogFromSyslog(p2.Timestamp, p2.Hostname, p2.Tag, p2.PID, p2.Message) - if s.metricsLevel != configuration.METRICS_NONE { - linesParsed.With(prometheus.Labels{"source": syslogLine.Client, "type": "rfc5424"}).Inc() - } - } else { - line = s.buildLogFromSyslog(p.Timestamp, p.Hostname, p.Tag, p.PID, p.Message) - if s.metricsLevel != configuration.METRICS_NONE { - linesParsed.With(prometheus.Labels{"source": syslogLine.Client, "type": "rfc3164"}).Inc() - } + line := s.parseLine(syslogLine) + if line == "" { + continue } - line = strings.TrimSuffix(line, "\n") + var ts time.Time l := types.Line{} l.Raw = line diff --git a/pkg/acquisition/modules/syslog/syslog_test.go b/pkg/acquisition/modules/syslog/syslog_test.go index 53c7d77ae13..fdd891fa14f 100644 --- a/pkg/acquisition/modules/syslog/syslog_test.go +++ b/pkg/acquisition/modules/syslog/syslog_test.go @@ -120,6 +120,26 @@ listen_addr: 127.0.0.1`, `<13>May 18 12:37:56 mantis sshd`, }, }, + { + name: "RFC3164 - no parsing", + config: `source: syslog +listen_port: 4242 +listen_addr: 127.0.0.1 +disable_rfc_parser: true`, + expectedLines: 5, + logs: []string{ + `<13>May 18 12:37:56 mantis sshd[49340]: blabla2[foobar]`, + `<13>May 18 12:37:56 mantis sshd[49340]: blabla2`, + `<13>May 18 12:37:56 mantis sshd: blabla2`, + `<13>May 18 12:37:56 mantis sshd`, + `<999>May 18 12:37:56 mantis sshd`, + `<1000>May 18 12:37:56 mantis sshd`, + `>?> asd`, + `asdasd`, + `<1a asd`, + `<123123>asdasd`, + }, + }, } if runtime.GOOS != "windows" { tests = append(tests, struct { From 02d4793657a5ce14aa60c15dd9e1b7cf3edc7c88 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Fri, 28 Feb 2025 15:24:13 +0100 Subject: [PATCH 449/581] tests: remove modeline (#3486) --- .github/workflows/publish-tarball-release.yml | 1 - .golangci.yml | 4 ---- test/bats-detect/apache2-deb.bats | 1 - test/bats-detect/apache2-rpm.bats | 1 - test/bats-detect/asterisk-deb.bats | 1 - test/bats-detect/asterisk-rpm.bats | 1 - test/bats-detect/caddy-deb.bats | 1 - test/bats-detect/caddy-rpm.bats | 1 - test/bats-detect/dovecot-deb.bats | 1 - test/bats-detect/dovecot-rpm.bats | 1 - test/bats-detect/emby-deb.bats | 1 - test/bats-detect/emby-rpm.bats | 1 - test/bats-detect/endlessh-deb.bats | 1 - test/bats-detect/endlessh-rpm.bats | 1 - test/bats-detect/gitea.bats | 1 - test/bats-detect/haproxy-deb.bats | 1 - test/bats-detect/haproxy-rpm.bats | 1 - test/bats-detect/lemonldap-deb.bats | 1 - test/bats-detect/lemonldap-rpm.bats | 1 - test/bats-detect/litespeed.bats | 1 - test/bats-detect/mariadb-deb.bats | 1 - test/bats-detect/mariadb-rpm.bats | 1 - test/bats-detect/mysql-deb.bats | 1 - test/bats-detect/mysql-rpm.bats | 1 - test/bats-detect/nginx-deb.bats | 1 - test/bats-detect/nginx-rpm.bats | 1 - test/bats-detect/odoo-deb.bats | 1 - test/bats-detect/odoo-rpm.bats | 1 - test/bats-detect/ombi-deb.bats | 1 - test/bats-detect/openresty-deb.bats | 1 - test/bats-detect/openresty-rpm.bats | 1 - test/bats-detect/pgsql-deb.bats | 1 - test/bats-detect/pgsql-rpm.bats | 1 - test/bats-detect/postfix-deb.bats | 1 - test/bats-detect/postfix-rpm.bats | 1 - test/bats-detect/proftpd-deb.bats | 1 - test/bats-detect/proftpd-rpm.bats | 1 - test/bats-detect/proxmox-deb.bats | 1 - test/bats-detect/pureftpd-deb.bats | 1 - test/bats-detect/pureftpd-rpm.bats | 1 - test/bats-detect/smb-deb.bats | 1 - test/bats-detect/smb-rpm.bats | 1 - test/bats-detect/sshd-deb.bats | 1 - test/bats-detect/sshd-rpm.bats | 1 - test/bats-detect/suricata-deb.bats | 1 - test/bats-detect/suricata-rpm.bats | 1 - test/bats-detect/vsftpd-deb.bats | 1 - test/bats-detect/vsftpd-rpm.bats | 1 - test/bats/00_wait_for.bats | 1 - test/bats/01_crowdsec_lapi.bats | 1 - test/bats/01_cscli.bats | 1 - test/bats/01_cscli_lapi.bats | 1 - test/bats/02_nolapi.bats | 1 - test/bats/03_noagent.bats | 1 - test/bats/04_capi.bats | 1 - test/bats/04_nocapi.bats | 1 - test/bats/05_config_yaml_local.bats | 1 - test/bats/07_setup.bats | 1 - test/bats/08_metrics.bats | 1 - test/bats/08_metrics_bouncer.bats | 1 - test/bats/08_metrics_machines.bats | 1 - test/bats/09_console.bats | 1 - test/bats/09_context.bats | 1 - test/bats/09_socket.bats | 1 - test/bats/10_bouncers.bats | 1 - test/bats/11_bouncers_tls.bats | 1 - test/bats/12_notifications.bats | 1 - test/bats/13_capi_whitelists.bats | 1 - test/bats/20_hub.bats | 1 - test/bats/20_hub_collections_dep.bats | 1 - test/bats/20_hub_items.bats | 1 - test/bats/30_machines.bats | 1 - test/bats/30_machines_tls.bats | 1 - test/bats/40_cold-logs.bats | 1 - test/bats/40_live-ban.bats | 1 - test/bats/50_simulation.bats | 1 - test/bats/70_plugin_http.bats | 1 - test/bats/71_plugin_dummy.bats | 1 - test/bats/72_plugin_badconfig.bats | 1 - test/bats/73_plugin_formatting.bats | 1 - test/bats/80_alerts.bats | 1 - test/bats/81_alert_context.bats | 1 - test/bats/90_decisions.bats | 1 - test/bats/97_ipv4_single.bats | 1 - test/bats/97_ipv6_single.bats | 1 - test/bats/98_ipv4_range.bats | 1 - test/bats/98_ipv6_range.bats | 1 - test/bats/99_lapi-stream-mode-scenario.bats | 1 - test/bats/99_lapi-stream-mode-scopes.bats | 1 - test/bats/99_lapi-stream-mode.bats | 1 - 90 files changed, 93 deletions(-) diff --git a/.github/workflows/publish-tarball-release.yml b/.github/workflows/publish-tarball-release.yml index 18541f86e41..7a2ac84076e 100644 --- a/.github/workflows/publish-tarball-release.yml +++ b/.github/workflows/publish-tarball-release.yml @@ -1,4 +1,3 @@ -# .github/workflows/build-docker-image.yml name: Release on: diff --git a/.golangci.yml b/.golangci.yml index ede7de421f3..cda97f7fdaa 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -403,10 +403,6 @@ issues: path: pkg/acquisition/modules/victorialogs/internal/vlclient/vl_client.go text: "confusing-naming: Method 'QueryRange' differs only by capitalization to method 'queryRange' in the same source file" - - linters: - - revive - path: cmd/crowdsec-cli/copyfile.go - - linters: - revive path: pkg/hubtest/hubtest_item.go diff --git a/test/bats-detect/apache2-deb.bats b/test/bats-detect/apache2-deb.bats index 2c6e1deaf44..5fa73de9435 100644 --- a/test/bats-detect/apache2-deb.bats +++ b/test/bats-detect/apache2-deb.bats @@ -1,5 +1,4 @@ #!/usr/bin/env bats -# vim: ft=bats:list:ts=8:sts=4:sw=4:et:ai:si: set -u diff --git a/test/bats-detect/apache2-rpm.bats b/test/bats-detect/apache2-rpm.bats index 9b0fda87d9f..efd6926fa2c 100644 --- a/test/bats-detect/apache2-rpm.bats +++ b/test/bats-detect/apache2-rpm.bats @@ -1,5 +1,4 @@ #!/usr/bin/env bats -# vim: ft=bats:list:ts=8:sts=4:sw=4:et:ai:si: set -u diff --git a/test/bats-detect/asterisk-deb.bats b/test/bats-detect/asterisk-deb.bats index 7087cd735d5..d261fc7383e 100644 --- a/test/bats-detect/asterisk-deb.bats +++ b/test/bats-detect/asterisk-deb.bats @@ -1,5 +1,4 @@ #!/usr/bin/env bats -# vim: ft=bats:list:ts=8:sts=4:sw=4:et:ai:si: set -u diff --git a/test/bats-detect/asterisk-rpm.bats b/test/bats-detect/asterisk-rpm.bats index 081195f2ad5..e282b9735e9 100644 --- a/test/bats-detect/asterisk-rpm.bats +++ b/test/bats-detect/asterisk-rpm.bats @@ -1,5 +1,4 @@ #!/usr/bin/env bats -# vim: ft=bats:list:ts=8:sts=4:sw=4:et:ai:si: set -u diff --git a/test/bats-detect/caddy-deb.bats b/test/bats-detect/caddy-deb.bats index b2b35496cc5..4cbb0efc7cb 100644 --- a/test/bats-detect/caddy-deb.bats +++ b/test/bats-detect/caddy-deb.bats @@ -1,5 +1,4 @@ #!/usr/bin/env bats -# vim: ft=bats:list:ts=8:sts=4:sw=4:et:ai:si: set -u diff --git a/test/bats-detect/caddy-rpm.bats b/test/bats-detect/caddy-rpm.bats index 5ac225f2466..0340d353154 100644 --- a/test/bats-detect/caddy-rpm.bats +++ b/test/bats-detect/caddy-rpm.bats @@ -1,5 +1,4 @@ #!/usr/bin/env bats -# vim: ft=bats:list:ts=8:sts=4:sw=4:et:ai:si: set -u diff --git a/test/bats-detect/dovecot-deb.bats b/test/bats-detect/dovecot-deb.bats index bc14bd4d1c2..94006feda7b 100644 --- a/test/bats-detect/dovecot-deb.bats +++ b/test/bats-detect/dovecot-deb.bats @@ -1,5 +1,4 @@ #!/usr/bin/env bats -# vim: ft=bats:list:ts=8:sts=4:sw=4:et:ai:si: set -u diff --git a/test/bats-detect/dovecot-rpm.bats b/test/bats-detect/dovecot-rpm.bats index 5a17f11a57d..4579589b8a2 100644 --- a/test/bats-detect/dovecot-rpm.bats +++ b/test/bats-detect/dovecot-rpm.bats @@ -1,5 +1,4 @@ #!/usr/bin/env bats -# vim: ft=bats:list:ts=8:sts=4:sw=4:et:ai:si: set -u diff --git a/test/bats-detect/emby-deb.bats b/test/bats-detect/emby-deb.bats index 9554af304c3..d840bff54dc 100644 --- a/test/bats-detect/emby-deb.bats +++ b/test/bats-detect/emby-deb.bats @@ -1,5 +1,4 @@ #!/usr/bin/env bats -# vim: ft=bats:list:ts=8:sts=4:sw=4:et:ai:si: set -u diff --git a/test/bats-detect/emby-rpm.bats b/test/bats-detect/emby-rpm.bats index 72c9a01cbcb..a44cb85744b 100644 --- a/test/bats-detect/emby-rpm.bats +++ b/test/bats-detect/emby-rpm.bats @@ -1,5 +1,4 @@ #!/usr/bin/env bats -# vim: ft=bats:list:ts=8:sts=4:sw=4:et:ai:si: set -u diff --git a/test/bats-detect/endlessh-deb.bats b/test/bats-detect/endlessh-deb.bats index 55a8da8ff3e..007f08be424 100644 --- a/test/bats-detect/endlessh-deb.bats +++ b/test/bats-detect/endlessh-deb.bats @@ -1,5 +1,4 @@ #!/usr/bin/env bats -# vim: ft=bats:list:ts=8:sts=4:sw=4:et:ai:si: set -u diff --git a/test/bats-detect/endlessh-rpm.bats b/test/bats-detect/endlessh-rpm.bats index 812d627bbed..52c0ade9074 100644 --- a/test/bats-detect/endlessh-rpm.bats +++ b/test/bats-detect/endlessh-rpm.bats @@ -1,5 +1,4 @@ #!/usr/bin/env bats -# vim: ft=bats:list:ts=8:sts=4:sw=4:et:ai:si: set -u diff --git a/test/bats-detect/gitea.bats b/test/bats-detect/gitea.bats index b2e094ed684..e170e79073a 100644 --- a/test/bats-detect/gitea.bats +++ b/test/bats-detect/gitea.bats @@ -1,5 +1,4 @@ #!/usr/bin/env bats -# vim: ft=bats:list:ts=8:sts=4:sw=4:et:ai:si: set -u diff --git a/test/bats-detect/haproxy-deb.bats b/test/bats-detect/haproxy-deb.bats index 173ff5fcb12..3b89be81461 100644 --- a/test/bats-detect/haproxy-deb.bats +++ b/test/bats-detect/haproxy-deb.bats @@ -1,5 +1,4 @@ #!/usr/bin/env bats -# vim: ft=bats:list:ts=8:sts=4:sw=4:et:ai:si: set -u diff --git a/test/bats-detect/haproxy-rpm.bats b/test/bats-detect/haproxy-rpm.bats index d29aeb9df3b..8e2b3125b1d 100644 --- a/test/bats-detect/haproxy-rpm.bats +++ b/test/bats-detect/haproxy-rpm.bats @@ -1,5 +1,4 @@ #!/usr/bin/env bats -# vim: ft=bats:list:ts=8:sts=4:sw=4:et:ai:si: set -u diff --git a/test/bats-detect/lemonldap-deb.bats b/test/bats-detect/lemonldap-deb.bats index c77c0ae2054..e221ff4724d 100644 --- a/test/bats-detect/lemonldap-deb.bats +++ b/test/bats-detect/lemonldap-deb.bats @@ -1,5 +1,4 @@ #!/usr/bin/env bats -# vim: ft=bats:list:ts=8:sts=4:sw=4:et:ai:si: set -u diff --git a/test/bats-detect/lemonldap-rpm.bats b/test/bats-detect/lemonldap-rpm.bats index 319c7c55ebd..7bc517dfe9d 100644 --- a/test/bats-detect/lemonldap-rpm.bats +++ b/test/bats-detect/lemonldap-rpm.bats @@ -1,5 +1,4 @@ #!/usr/bin/env bats -# vim: ft=bats:list:ts=8:sts=4:sw=4:et:ai:si: set -u diff --git a/test/bats-detect/litespeed.bats b/test/bats-detect/litespeed.bats index ee6ba205f2d..be6af856edc 100644 --- a/test/bats-detect/litespeed.bats +++ b/test/bats-detect/litespeed.bats @@ -1,5 +1,4 @@ #!/usr/bin/env bats -# vim: ft=bats:list:ts=8:sts=4:sw=4:et:ai:si: set -u diff --git a/test/bats-detect/mariadb-deb.bats b/test/bats-detect/mariadb-deb.bats index 1d3546f4edc..731bb442293 100644 --- a/test/bats-detect/mariadb-deb.bats +++ b/test/bats-detect/mariadb-deb.bats @@ -1,5 +1,4 @@ #!/usr/bin/env bats -# vim: ft=bats:list:ts=8:sts=4:sw=4:et:ai:si: set -u diff --git a/test/bats-detect/mariadb-rpm.bats b/test/bats-detect/mariadb-rpm.bats index 54365d179db..5f4540e432c 100644 --- a/test/bats-detect/mariadb-rpm.bats +++ b/test/bats-detect/mariadb-rpm.bats @@ -1,5 +1,4 @@ #!/usr/bin/env bats -# vim: ft=bats:list:ts=8:sts=4:sw=4:et:ai:si: set -u diff --git a/test/bats-detect/mysql-deb.bats b/test/bats-detect/mysql-deb.bats index a12ea025d2f..d562d68bf0a 100644 --- a/test/bats-detect/mysql-deb.bats +++ b/test/bats-detect/mysql-deb.bats @@ -1,5 +1,4 @@ #!/usr/bin/env bats -# vim: ft=bats:list:ts=8:sts=4:sw=4:et:ai:si: set -u diff --git a/test/bats-detect/mysql-rpm.bats b/test/bats-detect/mysql-rpm.bats index 0ab911010cb..97a833c971c 100644 --- a/test/bats-detect/mysql-rpm.bats +++ b/test/bats-detect/mysql-rpm.bats @@ -1,5 +1,4 @@ #!/usr/bin/env bats -# vim: ft=bats:list:ts=8:sts=4:sw=4:et:ai:si: set -u diff --git a/test/bats-detect/nginx-deb.bats b/test/bats-detect/nginx-deb.bats index b269a6bc35a..9fa24e63696 100644 --- a/test/bats-detect/nginx-deb.bats +++ b/test/bats-detect/nginx-deb.bats @@ -1,5 +1,4 @@ #!/usr/bin/env bats -# vim: ft=bats:list:ts=8:sts=4:sw=4:et:ai:si: set -u diff --git a/test/bats-detect/nginx-rpm.bats b/test/bats-detect/nginx-rpm.bats index f2c93bb610f..70e09a36509 100644 --- a/test/bats-detect/nginx-rpm.bats +++ b/test/bats-detect/nginx-rpm.bats @@ -1,5 +1,4 @@ #!/usr/bin/env bats -# vim: ft=bats:list:ts=8:sts=4:sw=4:et:ai:si: set -u diff --git a/test/bats-detect/odoo-deb.bats b/test/bats-detect/odoo-deb.bats index e57e53d8308..ff9ae141df0 100644 --- a/test/bats-detect/odoo-deb.bats +++ b/test/bats-detect/odoo-deb.bats @@ -1,5 +1,4 @@ #!/usr/bin/env bats -# vim: ft=bats:list:ts=8:sts=4:sw=4:et:ai:si: set -u diff --git a/test/bats-detect/odoo-rpm.bats b/test/bats-detect/odoo-rpm.bats index e2577daeb35..371eab4afa0 100644 --- a/test/bats-detect/odoo-rpm.bats +++ b/test/bats-detect/odoo-rpm.bats @@ -1,5 +1,4 @@ #!/usr/bin/env bats -# vim: ft=bats:list:ts=8:sts=4:sw=4:et:ai:si: set -u diff --git a/test/bats-detect/ombi-deb.bats b/test/bats-detect/ombi-deb.bats index d02c0d89ea4..d06fd6ea1db 100644 --- a/test/bats-detect/ombi-deb.bats +++ b/test/bats-detect/ombi-deb.bats @@ -1,5 +1,4 @@ #!/usr/bin/env bats -# vim: ft=bats:list:ts=8:sts=4:sw=4:et:ai:si: set -u diff --git a/test/bats-detect/openresty-deb.bats b/test/bats-detect/openresty-deb.bats index 0c8bc3c9a0b..ca2e2fbeda8 100644 --- a/test/bats-detect/openresty-deb.bats +++ b/test/bats-detect/openresty-deb.bats @@ -1,5 +1,4 @@ #!/usr/bin/env bats -# vim: ft=bats:list:ts=8:sts=4:sw=4:et:ai:si: set -u diff --git a/test/bats-detect/openresty-rpm.bats b/test/bats-detect/openresty-rpm.bats index d4c3661bc76..23d450fd5de 100644 --- a/test/bats-detect/openresty-rpm.bats +++ b/test/bats-detect/openresty-rpm.bats @@ -1,5 +1,4 @@ #!/usr/bin/env bats -# vim: ft=bats:list:ts=8:sts=4:sw=4:et:ai:si: set -u diff --git a/test/bats-detect/pgsql-deb.bats b/test/bats-detect/pgsql-deb.bats index a8781968ac2..6b4143194ab 100644 --- a/test/bats-detect/pgsql-deb.bats +++ b/test/bats-detect/pgsql-deb.bats @@ -1,5 +1,4 @@ #!/usr/bin/env bats -# vim: ft=bats:list:ts=8:sts=4:sw=4:et:ai:si: set -u diff --git a/test/bats-detect/pgsql-rpm.bats b/test/bats-detect/pgsql-rpm.bats index b2fba8af47d..009756a679e 100644 --- a/test/bats-detect/pgsql-rpm.bats +++ b/test/bats-detect/pgsql-rpm.bats @@ -1,5 +1,4 @@ #!/usr/bin/env bats -# vim: ft=bats:list:ts=8:sts=4:sw=4:et:ai:si: set -u diff --git a/test/bats-detect/postfix-deb.bats b/test/bats-detect/postfix-deb.bats index c67285d75a1..7f404022bb2 100644 --- a/test/bats-detect/postfix-deb.bats +++ b/test/bats-detect/postfix-deb.bats @@ -1,5 +1,4 @@ #!/usr/bin/env bats -# vim: ft=bats:list:ts=8:sts=4:sw=4:et:ai:si: set -u diff --git a/test/bats-detect/postfix-rpm.bats b/test/bats-detect/postfix-rpm.bats index dc6b42a63a1..c7a60d6395d 100644 --- a/test/bats-detect/postfix-rpm.bats +++ b/test/bats-detect/postfix-rpm.bats @@ -1,5 +1,4 @@ #!/usr/bin/env bats -# vim: ft=bats:list:ts=8:sts=4:sw=4:et:ai:si: set -u diff --git a/test/bats-detect/proftpd-deb.bats b/test/bats-detect/proftpd-deb.bats index fce556cafee..5a06bc6162b 100644 --- a/test/bats-detect/proftpd-deb.bats +++ b/test/bats-detect/proftpd-deb.bats @@ -1,5 +1,4 @@ #!/usr/bin/env bats -# vim: ft=bats:list:ts=8:sts=4:sw=4:et:ai:si: set -u diff --git a/test/bats-detect/proftpd-rpm.bats b/test/bats-detect/proftpd-rpm.bats index 2c9df2b5545..1798f41ab3c 100644 --- a/test/bats-detect/proftpd-rpm.bats +++ b/test/bats-detect/proftpd-rpm.bats @@ -1,5 +1,4 @@ #!/usr/bin/env bats -# vim: ft=bats:list:ts=8:sts=4:sw=4:et:ai:si: set -u diff --git a/test/bats-detect/proxmox-deb.bats b/test/bats-detect/proxmox-deb.bats index ae02375c381..90b56f5a4bb 100644 --- a/test/bats-detect/proxmox-deb.bats +++ b/test/bats-detect/proxmox-deb.bats @@ -1,5 +1,4 @@ #!/usr/bin/env bats -# vim: ft=bats:list:ts=8:sts=4:sw=4:et:ai:si: set -u diff --git a/test/bats-detect/pureftpd-deb.bats b/test/bats-detect/pureftpd-deb.bats index 3b7aa68c9e4..7d40a361b35 100644 --- a/test/bats-detect/pureftpd-deb.bats +++ b/test/bats-detect/pureftpd-deb.bats @@ -1,5 +1,4 @@ #!/usr/bin/env bats -# vim: ft=bats:list:ts=8:sts=4:sw=4:et:ai:si: set -u diff --git a/test/bats-detect/pureftpd-rpm.bats b/test/bats-detect/pureftpd-rpm.bats index d4d0b7ad9f5..cb7d9a55cdf 100644 --- a/test/bats-detect/pureftpd-rpm.bats +++ b/test/bats-detect/pureftpd-rpm.bats @@ -1,5 +1,4 @@ #!/usr/bin/env bats -# vim: ft=bats:list:ts=8:sts=4:sw=4:et:ai:si: set -u diff --git a/test/bats-detect/smb-deb.bats b/test/bats-detect/smb-deb.bats index bb0ec5156e3..3197c44ea06 100644 --- a/test/bats-detect/smb-deb.bats +++ b/test/bats-detect/smb-deb.bats @@ -1,5 +1,4 @@ #!/usr/bin/env bats -# vim: ft=bats:list:ts=8:sts=4:sw=4:et:ai:si: set -u diff --git a/test/bats-detect/smb-rpm.bats b/test/bats-detect/smb-rpm.bats index 1866f540cfe..cdafdabfdd9 100644 --- a/test/bats-detect/smb-rpm.bats +++ b/test/bats-detect/smb-rpm.bats @@ -1,5 +1,4 @@ #!/usr/bin/env bats -# vim: ft=bats:list:ts=8:sts=4:sw=4:et:ai:si: set -u diff --git a/test/bats-detect/sshd-deb.bats b/test/bats-detect/sshd-deb.bats index 32a363d46c7..d4552f0cc58 100644 --- a/test/bats-detect/sshd-deb.bats +++ b/test/bats-detect/sshd-deb.bats @@ -1,5 +1,4 @@ #!/usr/bin/env bats -# vim: ft=bats:list:ts=8:sts=4:sw=4:et:ai:si: set -u diff --git a/test/bats-detect/sshd-rpm.bats b/test/bats-detect/sshd-rpm.bats index f6e0d5be1d1..c2e9c4c7181 100644 --- a/test/bats-detect/sshd-rpm.bats +++ b/test/bats-detect/sshd-rpm.bats @@ -1,5 +1,4 @@ #!/usr/bin/env bats -# vim: ft=bats:list:ts=8:sts=4:sw=4:et:ai:si: set -u diff --git a/test/bats-detect/suricata-deb.bats b/test/bats-detect/suricata-deb.bats index 13207b35696..5a952f2af2b 100644 --- a/test/bats-detect/suricata-deb.bats +++ b/test/bats-detect/suricata-deb.bats @@ -1,5 +1,4 @@ #!/usr/bin/env bats -# vim: ft=bats:list:ts=8:sts=4:sw=4:et:ai:si: set -u diff --git a/test/bats-detect/suricata-rpm.bats b/test/bats-detect/suricata-rpm.bats index c3c48036f7e..d97fd87050f 100644 --- a/test/bats-detect/suricata-rpm.bats +++ b/test/bats-detect/suricata-rpm.bats @@ -1,5 +1,4 @@ #!/usr/bin/env bats -# vim: ft=bats:list:ts=8:sts=4:sw=4:et:ai:si: set -u diff --git a/test/bats-detect/vsftpd-deb.bats b/test/bats-detect/vsftpd-deb.bats index f0cd4f73371..131dd52f9b5 100644 --- a/test/bats-detect/vsftpd-deb.bats +++ b/test/bats-detect/vsftpd-deb.bats @@ -1,5 +1,4 @@ #!/usr/bin/env bats -# vim: ft=bats:list:ts=8:sts=4:sw=4:et:ai:si: set -u diff --git a/test/bats-detect/vsftpd-rpm.bats b/test/bats-detect/vsftpd-rpm.bats index 54b1fd99a8c..eb822f325bf 100644 --- a/test/bats-detect/vsftpd-rpm.bats +++ b/test/bats-detect/vsftpd-rpm.bats @@ -1,5 +1,4 @@ #!/usr/bin/env bats -# vim: ft=bats:list:ts=8:sts=4:sw=4:et:ai:si: set -u diff --git a/test/bats/00_wait_for.bats b/test/bats/00_wait_for.bats index 94c65033bb4..b8530602cce 100644 --- a/test/bats/00_wait_for.bats +++ b/test/bats/00_wait_for.bats @@ -1,5 +1,4 @@ #!/usr/bin/env bats -# vim: ft=bats:list:ts=8:sts=4:sw=4:et:ai:si: set -u diff --git a/test/bats/01_crowdsec_lapi.bats b/test/bats/01_crowdsec_lapi.bats index 21e1d7a093e..c1222d7d2c9 100644 --- a/test/bats/01_crowdsec_lapi.bats +++ b/test/bats/01_crowdsec_lapi.bats @@ -1,5 +1,4 @@ #!/usr/bin/env bats -# vim: ft=bats:list:ts=8:sts=4:sw=4:et:ai:si: set -u diff --git a/test/bats/01_cscli.bats b/test/bats/01_cscli.bats index 9af3c841759..77c128568c9 100644 --- a/test/bats/01_cscli.bats +++ b/test/bats/01_cscli.bats @@ -1,5 +1,4 @@ #!/usr/bin/env bats -# vim: ft=bats:list:ts=8:sts=4:sw=4:et:ai:si: set -u diff --git a/test/bats/01_cscli_lapi.bats b/test/bats/01_cscli_lapi.bats index 005eb15e141..1503825497e 100644 --- a/test/bats/01_cscli_lapi.bats +++ b/test/bats/01_cscli_lapi.bats @@ -1,5 +1,4 @@ #!/usr/bin/env bats -# vim: ft=bats:list:ts=8:sts=4:sw=4:et:ai:si: set -u diff --git a/test/bats/02_nolapi.bats b/test/bats/02_nolapi.bats index 70495a0ed91..08c1fa8f472 100644 --- a/test/bats/02_nolapi.bats +++ b/test/bats/02_nolapi.bats @@ -1,5 +1,4 @@ #!/usr/bin/env bats -# vim: ft=bats:list:ts=8:sts=4:sw=4:et:ai:si: set -u diff --git a/test/bats/03_noagent.bats b/test/bats/03_noagent.bats index 972b84977ad..fc61165b687 100644 --- a/test/bats/03_noagent.bats +++ b/test/bats/03_noagent.bats @@ -1,5 +1,4 @@ #!/usr/bin/env bats -# vim: ft=bats:list:ts=8:sts=4:sw=4:et:ai:si: set -u diff --git a/test/bats/04_capi.bats b/test/bats/04_capi.bats index 7ba6bfa4428..8d0217d39bd 100644 --- a/test/bats/04_capi.bats +++ b/test/bats/04_capi.bats @@ -1,5 +1,4 @@ #!/usr/bin/env bats -# vim: ft=bats:list:ts=8:sts=4:sw=4:et:ai:si: set -u diff --git a/test/bats/04_nocapi.bats b/test/bats/04_nocapi.bats index eaeb0939112..29723af5e11 100644 --- a/test/bats/04_nocapi.bats +++ b/test/bats/04_nocapi.bats @@ -1,5 +1,4 @@ #!/usr/bin/env bats -# vim: ft=bats:list:ts=8:sts=4:sw=4:et:ai:si: set -u diff --git a/test/bats/05_config_yaml_local.bats b/test/bats/05_config_yaml_local.bats index ec7a4201964..da3d0e5f6d1 100644 --- a/test/bats/05_config_yaml_local.bats +++ b/test/bats/05_config_yaml_local.bats @@ -1,5 +1,4 @@ #!/usr/bin/env bats -# vim: ft=bats:list:ts=8:sts=4:sw=4:et:ai:si: set -u diff --git a/test/bats/07_setup.bats b/test/bats/07_setup.bats index 72a8b64a57a..936ea0ac893 100644 --- a/test/bats/07_setup.bats +++ b/test/bats/07_setup.bats @@ -1,5 +1,4 @@ #!/usr/bin/env bats -# vim: ft=bats:list:ts=8:sts=4:sw=4:et:ai:si: set -u diff --git a/test/bats/08_metrics.bats b/test/bats/08_metrics.bats index f3be9c60a95..bc6ffc85b11 100644 --- a/test/bats/08_metrics.bats +++ b/test/bats/08_metrics.bats @@ -1,5 +1,4 @@ #!/usr/bin/env bats -# vim: ft=bats:list:ts=8:sts=4:sw=4:et:ai:si: set -u diff --git a/test/bats/08_metrics_bouncer.bats b/test/bats/08_metrics_bouncer.bats index 1c9e9395782..b9586ac57e3 100644 --- a/test/bats/08_metrics_bouncer.bats +++ b/test/bats/08_metrics_bouncer.bats @@ -1,5 +1,4 @@ #!/usr/bin/env bats -# vim: ft=bats:list:ts=8:sts=4:sw=4:et:ai:si: set -u diff --git a/test/bats/08_metrics_machines.bats b/test/bats/08_metrics_machines.bats index 3b73839e753..072e11f9dd6 100644 --- a/test/bats/08_metrics_machines.bats +++ b/test/bats/08_metrics_machines.bats @@ -1,5 +1,4 @@ #!/usr/bin/env bats -# vim: ft=bats:list:ts=8:sts=4:sw=4:et:ai:si: set -u diff --git a/test/bats/09_console.bats b/test/bats/09_console.bats index 2e2f9bf058d..d203c933e0e 100644 --- a/test/bats/09_console.bats +++ b/test/bats/09_console.bats @@ -1,5 +1,4 @@ #!/usr/bin/env bats -# vim: ft=bats:list:ts=8:sts=4:sw=4:et:ai:si: set -u diff --git a/test/bats/09_context.bats b/test/bats/09_context.bats index 71aabc68d29..fcaf1709a92 100644 --- a/test/bats/09_context.bats +++ b/test/bats/09_context.bats @@ -1,5 +1,4 @@ #!/usr/bin/env bats -# vim: ft=bats:list:ts=8:sts=4:sw=4:et:ai:si: set -u diff --git a/test/bats/09_socket.bats b/test/bats/09_socket.bats index f861d8a40dc..4c8f653de1c 100644 --- a/test/bats/09_socket.bats +++ b/test/bats/09_socket.bats @@ -1,5 +1,4 @@ #!/usr/bin/env bats -# vim: ft=bats:list:ts=8:sts=4:sw=4:et:ai:si: set -u diff --git a/test/bats/10_bouncers.bats b/test/bats/10_bouncers.bats index c9ee1b0cd0c..382205796c8 100644 --- a/test/bats/10_bouncers.bats +++ b/test/bats/10_bouncers.bats @@ -1,5 +1,4 @@ #!/usr/bin/env bats -# vim: ft=bats:list:ts=8:sts=4:sw=4:et:ai:si: set -u diff --git a/test/bats/11_bouncers_tls.bats b/test/bats/11_bouncers_tls.bats index 554308ae962..19a2a23ede9 100644 --- a/test/bats/11_bouncers_tls.bats +++ b/test/bats/11_bouncers_tls.bats @@ -1,5 +1,4 @@ #!/usr/bin/env bats -# vim: ft=bats:list:ts=8:sts=4:sw=4:et:ai:si: set -u diff --git a/test/bats/12_notifications.bats b/test/bats/12_notifications.bats index 86032bf8212..441f749ec88 100644 --- a/test/bats/12_notifications.bats +++ b/test/bats/12_notifications.bats @@ -1,5 +1,4 @@ #!/usr/bin/env bats -# vim: ft=bats:list:ts=8:sts=4:sw=4:et:ai:si: set -u diff --git a/test/bats/13_capi_whitelists.bats b/test/bats/13_capi_whitelists.bats index ed7ef2ac560..c48b73be2e2 100644 --- a/test/bats/13_capi_whitelists.bats +++ b/test/bats/13_capi_whitelists.bats @@ -1,5 +1,4 @@ #!/usr/bin/env bats -# vim: ft=bats:list:ts=8:sts=4:sw=4:et:ai:si: set -u diff --git a/test/bats/20_hub.bats b/test/bats/20_hub.bats index b7f34fc709f..ae7da4dcd9f 100644 --- a/test/bats/20_hub.bats +++ b/test/bats/20_hub.bats @@ -1,5 +1,4 @@ #!/usr/bin/env bats -# vim: ft=bats:list:ts=8:sts=4:sw=4:et:ai:si: set -u diff --git a/test/bats/20_hub_collections_dep.bats b/test/bats/20_hub_collections_dep.bats index 94a984709a8..01f1ea5bedd 100644 --- a/test/bats/20_hub_collections_dep.bats +++ b/test/bats/20_hub_collections_dep.bats @@ -1,5 +1,4 @@ #!/usr/bin/env bats -# vim: ft=bats:list:ts=8:sts=4:sw=4:et:ai:si: set -u diff --git a/test/bats/20_hub_items.bats b/test/bats/20_hub_items.bats index 4ddaf387488..62162577ae7 100644 --- a/test/bats/20_hub_items.bats +++ b/test/bats/20_hub_items.bats @@ -1,5 +1,4 @@ #!/usr/bin/env bats -# vim: ft=bats:list:ts=8:sts=4:sw=4:et:ai:si: set -u diff --git a/test/bats/30_machines.bats b/test/bats/30_machines.bats index 3d73bd096ae..989cd5fe646 100644 --- a/test/bats/30_machines.bats +++ b/test/bats/30_machines.bats @@ -1,5 +1,4 @@ #!/usr/bin/env bats -# vim: ft=bats:list:ts=8:sts=4:sw=4:et:ai:si: set -u diff --git a/test/bats/30_machines_tls.bats b/test/bats/30_machines_tls.bats index ef02d1b57c3..eada450fb67 100644 --- a/test/bats/30_machines_tls.bats +++ b/test/bats/30_machines_tls.bats @@ -1,5 +1,4 @@ #!/usr/bin/env bats -# vim: ft=bats:list:ts=8:sts=4:sw=4:et:ai:si: set -u diff --git a/test/bats/40_cold-logs.bats b/test/bats/40_cold-logs.bats index 070a9eac5f1..4b03e35e58f 100644 --- a/test/bats/40_cold-logs.bats +++ b/test/bats/40_cold-logs.bats @@ -1,5 +1,4 @@ #!/usr/bin/env bats -# vim: ft=bats:list:ts=8:sts=4:sw=4:et:ai:si: set -u diff --git a/test/bats/40_live-ban.bats b/test/bats/40_live-ban.bats index fb5fd1fd435..31728db9288 100644 --- a/test/bats/40_live-ban.bats +++ b/test/bats/40_live-ban.bats @@ -1,5 +1,4 @@ #!/usr/bin/env bats -# vim: ft=bats:list:ts=8:sts=4:sw=4:et:ai:si: set -u diff --git a/test/bats/50_simulation.bats b/test/bats/50_simulation.bats index bffa50cbccc..682b0dbce66 100644 --- a/test/bats/50_simulation.bats +++ b/test/bats/50_simulation.bats @@ -1,5 +1,4 @@ #!/usr/bin/env bats -# vim: ft=bats:list:ts=8:sts=4:sw=4:et:ai:si: set -u diff --git a/test/bats/70_plugin_http.bats b/test/bats/70_plugin_http.bats index 462fc7c9406..917883f3461 100644 --- a/test/bats/70_plugin_http.bats +++ b/test/bats/70_plugin_http.bats @@ -1,5 +1,4 @@ #!/usr/bin/env bats -# vim: ft=bats:list:ts=8:sts=4:sw=4:et:ai:si: set -u diff --git a/test/bats/71_plugin_dummy.bats b/test/bats/71_plugin_dummy.bats index c242d7ec4bc..632aa689ee8 100644 --- a/test/bats/71_plugin_dummy.bats +++ b/test/bats/71_plugin_dummy.bats @@ -1,5 +1,4 @@ #!/usr/bin/env bats -# vim: ft=bats:list:ts=8:sts=4:sw=4:et:ai:si: set -u diff --git a/test/bats/72_plugin_badconfig.bats b/test/bats/72_plugin_badconfig.bats index 7be16c6cf8e..216b29f4db0 100644 --- a/test/bats/72_plugin_badconfig.bats +++ b/test/bats/72_plugin_badconfig.bats @@ -1,5 +1,4 @@ #!/usr/bin/env bats -# vim: ft=bats:list:ts=8:sts=4:sw=4:et:ai:si: set -u diff --git a/test/bats/73_plugin_formatting.bats b/test/bats/73_plugin_formatting.bats index 9ed64837403..5153946b061 100644 --- a/test/bats/73_plugin_formatting.bats +++ b/test/bats/73_plugin_formatting.bats @@ -1,5 +1,4 @@ #!/usr/bin/env bats -# vim: ft=bats:list:ts=8:sts=4:sw=4:et:ai:si: set -u diff --git a/test/bats/80_alerts.bats b/test/bats/80_alerts.bats index f01e918925c..f931048c5f2 100644 --- a/test/bats/80_alerts.bats +++ b/test/bats/80_alerts.bats @@ -1,5 +1,4 @@ #!/usr/bin/env bats -# vim: ft=bats:list:ts=8:sts=4:sw=4:et:ai:si: set -u diff --git a/test/bats/81_alert_context.bats b/test/bats/81_alert_context.bats index 69fb4158ffd..3d0b484b00c 100644 --- a/test/bats/81_alert_context.bats +++ b/test/bats/81_alert_context.bats @@ -1,5 +1,4 @@ #!/usr/bin/env bats -# vim: ft=bats:list:ts=8:sts=4:sw=4:et:ai:si: set -u diff --git a/test/bats/90_decisions.bats b/test/bats/90_decisions.bats index 3c3ab9987ca..64edea8f997 100644 --- a/test/bats/90_decisions.bats +++ b/test/bats/90_decisions.bats @@ -1,5 +1,4 @@ #!/usr/bin/env bats -# vim: ft=bats:list:ts=8:sts=4:sw=4:et:ai:si: set -u diff --git a/test/bats/97_ipv4_single.bats b/test/bats/97_ipv4_single.bats index b709930e2e5..38c78781c19 100644 --- a/test/bats/97_ipv4_single.bats +++ b/test/bats/97_ipv4_single.bats @@ -1,5 +1,4 @@ #!/usr/bin/env bats -# vim: ft=bats:list:ts=8:sts=4:sw=4:et:ai:si: set -u diff --git a/test/bats/97_ipv6_single.bats b/test/bats/97_ipv6_single.bats index c7aea030f9c..7f38f3df334 100644 --- a/test/bats/97_ipv6_single.bats +++ b/test/bats/97_ipv6_single.bats @@ -1,5 +1,4 @@ #!/usr/bin/env bats -# vim: ft=bats:list:ts=8:sts=4:sw=4:et:ai:si: set -u diff --git a/test/bats/98_ipv4_range.bats b/test/bats/98_ipv4_range.bats index c85e40267f3..ced1a597e40 100644 --- a/test/bats/98_ipv4_range.bats +++ b/test/bats/98_ipv4_range.bats @@ -1,5 +1,4 @@ #!/usr/bin/env bats -# vim: ft=bats:list:ts=8:sts=4:sw=4:et:ai:si: set -u diff --git a/test/bats/98_ipv6_range.bats b/test/bats/98_ipv6_range.bats index 531122a5533..216e25ec891 100644 --- a/test/bats/98_ipv6_range.bats +++ b/test/bats/98_ipv6_range.bats @@ -1,5 +1,4 @@ #!/usr/bin/env bats -# vim: ft=bats:list:ts=8:sts=4:sw=4:et:ai:si: set -u diff --git a/test/bats/99_lapi-stream-mode-scenario.bats b/test/bats/99_lapi-stream-mode-scenario.bats index 32c346061d1..4eb860a9dc9 100644 --- a/test/bats/99_lapi-stream-mode-scenario.bats +++ b/test/bats/99_lapi-stream-mode-scenario.bats @@ -1,5 +1,4 @@ #!/usr/bin/env bats -# vim: ft=bats:list:ts=8:sts=4:sw=4:et:ai:si: set -u diff --git a/test/bats/99_lapi-stream-mode-scopes.bats b/test/bats/99_lapi-stream-mode-scopes.bats index 67badebea0e..fe52d9674ac 100644 --- a/test/bats/99_lapi-stream-mode-scopes.bats +++ b/test/bats/99_lapi-stream-mode-scopes.bats @@ -1,5 +1,4 @@ #!/usr/bin/env bats -# vim: ft=bats:list:ts=8:sts=4:sw=4:et:ai:si: set -u diff --git a/test/bats/99_lapi-stream-mode.bats b/test/bats/99_lapi-stream-mode.bats index b3ee8a434ff..dc1fb70609f 100644 --- a/test/bats/99_lapi-stream-mode.bats +++ b/test/bats/99_lapi-stream-mode.bats @@ -1,5 +1,4 @@ #!/usr/bin/env bats -# vim: ft=bats:list:ts=8:sts=4:sw=4:et:ai:si: set -u From 61c7de1331eec41cbc19ca2c641f0ba57978990a Mon Sep 17 00:00:00 2001 From: "Thibault \"bui\" Koechlin" Date: Mon, 3 Mar 2025 16:09:09 +0100 Subject: [PATCH 450/581] enable/disable options for console enroll - make alert context a default (#3487) * enable/disable options for console enroll - make alert context a default * extract method and refact --------- Co-authored-by: marco --- cmd/crowdsec-cli/cliconsole/console.go | 121 ++++++++++++++++--------- 1 file changed, 77 insertions(+), 44 deletions(-) diff --git a/cmd/crowdsec-cli/cliconsole/console.go b/cmd/crowdsec-cli/cliconsole/console.go index fcc128bd5b5..ddd0527b36b 100644 --- a/cmd/crowdsec-cli/cliconsole/console.go +++ b/cmd/crowdsec-cli/cliconsole/console.go @@ -10,6 +10,7 @@ import ( "net/url" "os" "strconv" + "slices" "strings" "github.com/fatih/color" @@ -19,6 +20,7 @@ import ( "gopkg.in/yaml.v3" "github.com/crowdsecurity/go-cs-lib/ptr" + "github.com/crowdsecurity/go-cs-lib/slicetools" "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/reload" "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/require" @@ -75,45 +77,6 @@ func (cli *cliConsole) enroll(ctx context.Context, key string, name string, over return fmt.Errorf("could not parse CAPI URL: %w", err) } - enableOpts := []string{csconfig.SEND_MANUAL_SCENARIOS, csconfig.SEND_TAINTED_SCENARIOS} - - if len(opts) != 0 { - for _, opt := range opts { - valid := false - - if opt == "all" { - enableOpts = csconfig.CONSOLE_CONFIGS - break - } - - for _, availableOpt := range csconfig.CONSOLE_CONFIGS { - if opt != availableOpt { - continue - } - - valid = true - enable := true - - for _, enabledOpt := range enableOpts { - if opt == enabledOpt { - enable = false - continue - } - } - - if enable { - enableOpts = append(enableOpts, opt) - } - - break - } - - if !valid { - return fmt.Errorf("option %s doesn't exist", opt) - } - } - } - hub, err := require.Hub(cfg, nil) if err != nil { return err @@ -137,11 +100,11 @@ func (cli *cliConsole) enroll(ctx context.Context, key string, name string, over return nil } - if err := cli.setConsoleOpts(enableOpts, true); err != nil { + if err := cli.setConsoleOpts(opts, true); err != nil { return err } - for _, opt := range enableOpts { + for _, opt := range opts { log.Infof("Enabled %s : %s", opt, csconfig.CONSOLE_CONFIGS_HELP[opt]) } @@ -151,11 +114,67 @@ func (cli *cliConsole) enroll(ctx context.Context, key string, name string, over return nil } +func optionFilterEnable(opts []string, enableOpts []string) ([]string, error) { + if len(enableOpts) == 0 { + return opts, nil + } + + for _, opt := range enableOpts { + if opt == "all" { + opts = append(opts, csconfig.CONSOLE_CONFIGS...) + // keep validating the rest of the option names + continue + } + + if !slices.Contains(csconfig.CONSOLE_CONFIGS, opt) { + return nil, fmt.Errorf("option %s doesn't exist", opt) + } + + opts = append(opts, opt) + } + + opts = slicetools.Deduplicate(opts) + + return opts, nil +} + +func optionFilterDisable(opts []string, disableOpts []string) ([]string, error) { + if len(disableOpts) == 0 { + return opts, nil + } + + for _, opt := range disableOpts { + if opt == "all" { + opts = []string{} + // keep validating the rest of the option names + continue + } + + if !slices.Contains(csconfig.CONSOLE_CONFIGS, opt) { + return nil, fmt.Errorf("option %s doesn't exist", opt) + } + + // discard all elements == opt + + j := 0 + for _, o := range opts { + if o != opt { + opts[j] = o + j++ + } + } + opts = opts[:j] + } + + return opts, nil +} + func (cli *cliConsole) newEnrollCmd() *cobra.Command { name := "" overwrite := false tags := []string{} - opts := []string{} + enableOpts := []string{} + disableOpts := []string{} cmd := &cobra.Command{ Use: "enroll [enroll-key]", @@ -168,12 +187,25 @@ After running this command your will need to validate the enrollment in the weba Example: fmt.Sprintf(`cscli console enroll YOUR-ENROLL-KEY cscli console enroll --name [instance_name] YOUR-ENROLL-KEY cscli console enroll --name [instance_name] --tags [tag_1] --tags [tag_2] YOUR-ENROLL-KEY - cscli console enroll --enable context,manual YOUR-ENROLL-KEY + cscli console enroll --enable console_management YOUR-ENROLL-KEY + cscli console enroll --disable context YOUR-ENROLL-KEY valid options are : %s,all (see 'cscli console status' for details)`, strings.Join(csconfig.CONSOLE_CONFIGS, ",")), Args: cobra.ExactArgs(1), DisableAutoGenTag: true, RunE: func(cmd *cobra.Command, args []string) error { + opts := []string{csconfig.SEND_MANUAL_SCENARIOS, csconfig.SEND_TAINTED_SCENARIOS, csconfig.SEND_CONTEXT} + + opts, err := optionFilterEnable(opts, enableOpts) + if err != nil { + return err + } + + opts, err = optionFilterDisable(opts, disableOpts) + if err != nil { + return err + } + return cli.enroll(cmd.Context(), args[0], name, overwrite, tags, opts) }, } @@ -182,7 +214,8 @@ After running this command your will need to validate the enrollment in the weba flags.StringVarP(&name, "name", "n", "", "Name to display in the console") flags.BoolVarP(&overwrite, "overwrite", "", false, "Force enroll the instance") flags.StringSliceVarP(&tags, "tags", "t", tags, "Tags to display in the console") - flags.StringSliceVarP(&opts, "enable", "e", opts, "Enable console options") + flags.StringSliceVarP(&enableOpts, "enable", "e", enableOpts, "Enable console options") + flags.StringSliceVarP(&disableOpts, "disable", "d", disableOpts, "Disable console options") return cmd } From f49e1e28d20fa91692c20510a2e1045df241f9ce Mon Sep 17 00:00:00 2001 From: "Thibault \"bui\" Koechlin" Date: Tue, 4 Mar 2025 10:41:11 +0100 Subject: [PATCH 451/581] move ParseQuery to expr helpers, add ExtractQueryParam (#3491) * move ParseQuery to expr helpers, add ExtractQueryParam --- pkg/appsec/query_utils.go | 78 ---------- pkg/appsec/query_utils_test.go | 207 ------------------------- pkg/appsec/request.go | 3 +- pkg/exprhelpers/expr_lib.go | 15 ++ pkg/exprhelpers/exprlib_test.go | 63 ++++++++ pkg/exprhelpers/waf.go | 111 ++++++++++++++ pkg/exprhelpers/waf_test.go | 264 ++++++++++++++++++++++++++++++++ 7 files changed, 455 insertions(+), 286 deletions(-) delete mode 100644 pkg/appsec/query_utils.go delete mode 100644 pkg/appsec/query_utils_test.go diff --git a/pkg/appsec/query_utils.go b/pkg/appsec/query_utils.go deleted file mode 100644 index 0c886e0ea51..00000000000 --- a/pkg/appsec/query_utils.go +++ /dev/null @@ -1,78 +0,0 @@ -package appsec - -// This file is mostly stolen from net/url package, but with some modifications to allow less strict parsing of query strings - -import ( - "net/url" - "strings" -) - -// parseQuery and parseQuery are copied net/url package, but allow semicolon in values -func ParseQuery(query string) url.Values { - m := make(url.Values) - parseQuery(m, query) - return m -} - -func parseQuery(m url.Values, query string) { - for query != "" { - var key string - key, query, _ = strings.Cut(query, "&") - - if key == "" { - continue - } - key, value, _ := strings.Cut(key, "=") - //for now we'll just ignore the errors, but ideally we want to fire some "internal" rules when we see invalid query strings - key = unescape(key) - value = unescape(value) - m[key] = append(m[key], value) - } -} - -func hexDigitToByte(digit byte) (byte, bool) { - switch { - case digit >= '0' && digit <= '9': - return digit - '0', true - case digit >= 'a' && digit <= 'f': - return digit - 'a' + 10, true - case digit >= 'A' && digit <= 'F': - return digit - 'A' + 10, true - default: - return 0, false - } -} - -func unescape(input string) string { - ilen := len(input) - res := strings.Builder{} - res.Grow(ilen) - for i := 0; i < ilen; i++ { - ci := input[i] - if ci == '+' { - res.WriteByte(' ') - continue - } - if ci == '%' { - if i+2 >= ilen { - res.WriteByte(ci) - continue - } - hi, ok := hexDigitToByte(input[i+1]) - if !ok { - res.WriteByte(ci) - continue - } - lo, ok := hexDigitToByte(input[i+2]) - if !ok { - res.WriteByte(ci) - continue - } - res.WriteByte(hi<<4 | lo) - i += 2 - continue - } - res.WriteByte(ci) - } - return res.String() -} diff --git a/pkg/appsec/query_utils_test.go b/pkg/appsec/query_utils_test.go deleted file mode 100644 index 2ad7927968d..00000000000 --- a/pkg/appsec/query_utils_test.go +++ /dev/null @@ -1,207 +0,0 @@ -package appsec - -import ( - "net/url" - "reflect" - "testing" -) - -func TestParseQuery(t *testing.T) { - tests := []struct { - name string - query string - expected url.Values - }{ - { - name: "Simple query", - query: "foo=bar", - expected: url.Values{ - "foo": []string{"bar"}, - }, - }, - { - name: "Multiple values", - query: "foo=bar&foo=baz", - expected: url.Values{ - "foo": []string{"bar", "baz"}, - }, - }, - { - name: "Empty value", - query: "foo=", - expected: url.Values{ - "foo": []string{""}, - }, - }, - { - name: "Empty key", - query: "=bar", - expected: url.Values{ - "": []string{"bar"}, - }, - }, - { - name: "Empty query", - query: "", - expected: url.Values{}, - }, - { - name: "Multiple keys", - query: "foo=bar&baz=qux", - expected: url.Values{ - "foo": []string{"bar"}, - "baz": []string{"qux"}, - }, - }, - { - name: "Multiple keys with empty value", - query: "foo=bar&baz=qux&quux=", - expected: url.Values{ - "foo": []string{"bar"}, - "baz": []string{"qux"}, - "quux": []string{""}, - }, - }, - { - name: "Multiple keys with empty value and empty key", - query: "foo=bar&baz=qux&quux=&=quuz", - expected: url.Values{ - "foo": []string{"bar"}, - "baz": []string{"qux"}, - "quux": []string{""}, - "": []string{"quuz"}, - }, - }, - { - name: "Multiple keys with empty value and empty key and multiple values", - query: "foo=bar&baz=qux&quux=&=quuz&foo=baz", - expected: url.Values{ - "foo": []string{"bar", "baz"}, - "baz": []string{"qux"}, - "quux": []string{""}, - "": []string{"quuz"}, - }, - }, - { - name: "Multiple keys with empty value and empty key and multiple values and escaped characters", - query: "foo=bar&baz=qux&quux=&=quuz&foo=baz&foo=bar%20baz", - expected: url.Values{ - "foo": []string{"bar", "baz", "bar baz"}, - "baz": []string{"qux"}, - "quux": []string{""}, - "": []string{"quuz"}, - }, - }, - { - name: "Multiple keys with empty value and empty key and multiple values and escaped characters and semicolon", - query: "foo=bar&baz=qux&quux=&=quuz&foo=baz&foo=bar%20baz&foo=bar%3Bbaz", - expected: url.Values{ - "foo": []string{"bar", "baz", "bar baz", "bar;baz"}, - "baz": []string{"qux"}, - "quux": []string{""}, - "": []string{"quuz"}, - }, - }, - { - name: "Multiple keys with empty value and empty key and multiple values and escaped characters and semicolon and ampersand", - query: "foo=bar&baz=qux&quux=&=quuz&foo=baz&foo=bar%20baz&foo=bar%3Bbaz&foo=bar%26baz", - expected: url.Values{ - "foo": []string{"bar", "baz", "bar baz", "bar;baz", "bar&baz"}, - "baz": []string{"qux"}, - "quux": []string{""}, - "": []string{"quuz"}, - }, - }, - { - name: "Multiple keys with empty value and empty key and multiple values and escaped characters and semicolon and ampersand and equals", - query: "foo=bar&baz=qux&quux=&=quuz&foo=baz&foo=bar%20baz&foo=bar%3Bbaz&foo=bar%26baz&foo=bar%3Dbaz", - expected: url.Values{ - "foo": []string{"bar", "baz", "bar baz", "bar;baz", "bar&baz", "bar=baz"}, - "baz": []string{"qux"}, - "quux": []string{""}, - "": []string{"quuz"}, - }, - }, - { - name: "Multiple keys with empty value and empty key and multiple values and escaped characters and semicolon and ampersand and equals and question mark", - query: "foo=bar&baz=qux&quux=&=quuz&foo=baz&foo=bar%20baz&foo=bar%3Bbaz&foo=bar%26baz&foo=bar%3Dbaz&foo=bar%3Fbaz", - expected: url.Values{ - "foo": []string{"bar", "baz", "bar baz", "bar;baz", "bar&baz", "bar=baz", "bar?baz"}, - "baz": []string{"qux"}, - "quux": []string{""}, - "": []string{"quuz"}, - }, - }, - { - name: "keys with escaped characters", - query: "foo=ba;r&baz=qu;;x&quux=x\\&ww&xx=qu?uz&", - expected: url.Values{ - "foo": []string{"ba;r"}, - "baz": []string{"qu;;x"}, - "quux": []string{"x\\"}, - "ww": []string{""}, - "xx": []string{"qu?uz"}, - }, - }, - { - name: "hexadecimal characters", - query: "foo=bar%20baz", - expected: url.Values{ - "foo": []string{"bar baz"}, - }, - }, - { - name: "hexadecimal characters upper and lower case", - query: "foo=Ba%42%42&bar=w%2f%2F", - expected: url.Values{ - "foo": []string{"BaBB"}, - "bar": []string{"w//"}, - }, - }, - { - name: "hexadecimal characters with invalid characters", - query: "foo=bar%20baz%2", - expected: url.Values{ - "foo": []string{"bar baz%2"}, - }, - }, - { - name: "hexadecimal characters with invalid hex characters", - query: "foo=bar%xx", - expected: url.Values{ - "foo": []string{"bar%xx"}, - }, - }, - { - name: "hexadecimal characters with invalid 2nd hex character", - query: "foo=bar%2x", - expected: url.Values{ - "foo": []string{"bar%2x"}, - }, - }, - { - name: "url +", - query: "foo=bar+x", - expected: url.Values{ - "foo": []string{"bar x"}, - }, - }, - { - name: "url &&", - query: "foo=bar&&lol=bur", - expected: url.Values{ - "foo": []string{"bar"}, - "lol": []string{"bur"}, - }, - }, - } - - for _, test := range tests { - t.Run(test.name, func(t *testing.T) { - res := ParseQuery(test.query) - if !reflect.DeepEqual(res, test.expected) { - t.Fatalf("unexpected result: %v", res) - } - }) - } -} diff --git a/pkg/appsec/request.go b/pkg/appsec/request.go index 66ca13d55bb..e0e17bf9907 100644 --- a/pkg/appsec/request.go +++ b/pkg/appsec/request.go @@ -11,6 +11,7 @@ import ( "os" "regexp" + "github.com/crowdsecurity/crowdsec/pkg/exprhelpers" "github.com/google/uuid" log "github.com/sirupsen/logrus" ) @@ -396,7 +397,7 @@ func NewParsedRequestFromRequest(r *http.Request, logger *log.Entry) (ParsedRequ URL: parsedURL, Proto: r.Proto, Body: body, - Args: ParseQuery(parsedURL.RawQuery), + Args: exprhelpers.ParseQuery(parsedURL.RawQuery), TransferEncoding: r.TransferEncoding, ResponseChannel: make(chan AppsecTempResponse), RemoteAddrNormalized: remoteAddrNormalized, diff --git a/pkg/exprhelpers/expr_lib.go b/pkg/exprhelpers/expr_lib.go index 8c49bdc5f80..e0d7f6d97df 100644 --- a/pkg/exprhelpers/expr_lib.go +++ b/pkg/exprhelpers/expr_lib.go @@ -3,6 +3,7 @@ package exprhelpers import ( "net" "net/http" + "net/url" "time" "github.com/oschwald/geoip2-golang" @@ -151,6 +152,20 @@ var exprFuncs = []exprCustomFunc{ new(func(string) map[string][]string), }, }, + { + name: "ParseQuery", + function: ExprWrapParseQuery, + signature: []interface{}{ + new(func(string) url.Values), + }, + }, + { + name: "ExtractQueryParam", + function: ExprWrapExtractQueryParam, + signature: []interface{}{ + new(func(string, string) []string), + }, + }, { name: "PathUnescape", function: PathUnescape, diff --git a/pkg/exprhelpers/exprlib_test.go b/pkg/exprhelpers/exprlib_test.go index e449115c120..5829b01efd8 100644 --- a/pkg/exprhelpers/exprlib_test.go +++ b/pkg/exprhelpers/exprlib_test.go @@ -2,6 +2,7 @@ package exprhelpers import ( "errors" + "net/url" "testing" "time" @@ -160,6 +161,68 @@ func TestMatch(t *testing.T) { } } +// just to verify that the function is available, real tests are in TestExtractQueryParam +func TestExtractQueryParamExpr(t *testing.T) { + err := Init(nil) + require.NoError(t, err) + tests := []struct { + name string + env map[string]interface{} + code string + result []string + err string + }{ + { + name: "ExtractQueryParam() test: basic test", + env: map[string]interface{}{ + "query": "/foo?a=1&b=2", + }, + code: "ExtractQueryParam(query, 'a')", + result: []string{"1"}, + }, + } + for _, test := range tests { + program, err := expr.Compile(test.code, GetExprOptions(test.env)...) + require.NoError(t, err) + output, err := expr.Run(program, test.env) + require.NoError(t, err) + require.Equal(t, test.result, output) + log.Printf("test '%s' : OK", test.name) + } + +} + +// just to verify that the function is available, real tests are in TestParseQuery +func TestParseQueryInExpr(t *testing.T) { + err := Init(nil) + require.NoError(t, err) + tests := []struct { + name string + env map[string]interface{} + code string + result url.Values + err string + }{ + { + name: "ParseQuery() test: basic test", + env: map[string]interface{}{ + "query": "a=1&b=2", + "ParseQuery": ParseQuery, + }, + code: "ParseQuery(query)", + result: url.Values{"a": {"1"}, "b": {"2"}}, + }, + } + for _, test := range tests { + program, err := expr.Compile(test.code, GetExprOptions(test.env)...) + require.NoError(t, err) + output, err := expr.Run(program, test.env) + require.NoError(t, err) + require.Equal(t, test.result, output) + log.Printf("test '%s' : OK", test.name) + } +} + func TestDistanceHelper(t *testing.T) { err := Init(nil) require.NoError(t, err) diff --git a/pkg/exprhelpers/waf.go b/pkg/exprhelpers/waf.go index 0648f7ffcf3..8fca317688c 100644 --- a/pkg/exprhelpers/waf.go +++ b/pkg/exprhelpers/waf.go @@ -2,6 +2,8 @@ package exprhelpers import ( "net/http" + "net/url" + "strings" "github.com/crowdsecurity/crowdsec/pkg/appsec/ja4h" ) @@ -11,3 +13,112 @@ func JA4H(params ...any) (any, error) { req := params[0].(*http.Request) return ja4h.JA4H(req), nil } + +// just a expr wrapper for ParseQuery +func ExprWrapParseQuery(params ...any) (any, error) { + query := params[0].(string) + return ParseQuery(query), nil +} + +// parseQuery and parseQuery are copied net/url package, but allow semicolon in values +func ParseQuery(query string) url.Values { + m := make(url.Values) + ParseQueryIntoValues(m, query) + return m +} + +func ParseQueryIntoValues(m url.Values, query string) { + for query != "" { + var key string + key, query, _ = strings.Cut(query, "&") + + if key == "" { + continue + } + key, value, _ := strings.Cut(key, "=") + //for now we'll just ignore the errors, but ideally we want to fire some "internal" rules when we see invalid query strings + key = unescape(key) + value = unescape(value) + m[key] = append(m[key], value) + } +} + +func hexDigitToByte(digit byte) (byte, bool) { + switch { + case digit >= '0' && digit <= '9': + return digit - '0', true + case digit >= 'a' && digit <= 'f': + return digit - 'a' + 10, true + case digit >= 'A' && digit <= 'F': + return digit - 'A' + 10, true + default: + return 0, false + } +} + +func unescape(input string) string { + ilen := len(input) + res := strings.Builder{} + res.Grow(ilen) + for i := 0; i < ilen; i++ { + ci := input[i] + if ci == '+' { + res.WriteByte(' ') + continue + } + if ci == '%' { + if i+2 >= ilen { + res.WriteByte(ci) + continue + } + hi, ok := hexDigitToByte(input[i+1]) + if !ok { + res.WriteByte(ci) + continue + } + lo, ok := hexDigitToByte(input[i+2]) + if !ok { + res.WriteByte(ci) + continue + } + res.WriteByte(hi<<4 | lo) + i += 2 + continue + } + res.WriteByte(ci) + } + return res.String() +} + +// just a expr wrapper for ExtractQueryParam +func ExprWrapExtractQueryParam(params ...any) (any, error) { + uri := params[0].(string) + param := params[1].(string) + return ExtractQueryParam(uri, param), nil +} + +// ExtractQueryParam extracts values for a given query parameter from a raw URI string. +func ExtractQueryParam(uri, param string) []string { + // Find the first occurrence of "?" + idx := strings.Index(uri, "?") + if idx == -1 { + // No query string present + return []string{} + } + + // Extract the query string part + queryString := uri[idx+1:] + + // Parse the query string using a function that supports both `&` and `;` + values := ParseQuery(queryString) + + if values == nil { + // No query string present + return []string{} + } + // Retrieve the values for the specified parameter + if _, ok := values[param]; !ok { + return []string{} + } + return values[param] +} diff --git a/pkg/exprhelpers/waf_test.go b/pkg/exprhelpers/waf_test.go index 594488fb785..bfab22bb2c7 100644 --- a/pkg/exprhelpers/waf_test.go +++ b/pkg/exprhelpers/waf_test.go @@ -2,11 +2,275 @@ package exprhelpers import ( "net/http" + "net/url" + "reflect" "testing" "github.com/stretchr/testify/require" ) +func TestParseQuery(t *testing.T) { + tests := []struct { + name string + query string + expected url.Values + }{ + { + name: "Full URI", + query: "/foobar/toto?ab=cd&ef=gh", + expected: url.Values{ + "/foobar/toto?ab": []string{"cd"}, + "ef": []string{"gh"}, + }, + }, + { + name: "Simple query", + query: "foo=bar", + expected: url.Values{ + "foo": []string{"bar"}, + }, + }, + { + name: "Multiple values", + query: "foo=bar&foo=baz", + expected: url.Values{ + "foo": []string{"bar", "baz"}, + }, + }, + { + name: "Empty value", + query: "foo=", + expected: url.Values{ + "foo": []string{""}, + }, + }, + { + name: "Empty key", + query: "=bar", + expected: url.Values{ + "": []string{"bar"}, + }, + }, + { + name: "Empty query", + query: "", + expected: url.Values{}, + }, + { + name: "Multiple keys", + query: "foo=bar&baz=qux", + expected: url.Values{ + "foo": []string{"bar"}, + "baz": []string{"qux"}, + }, + }, + { + name: "Multiple keys with empty value", + query: "foo=bar&baz=qux&quux=", + expected: url.Values{ + "foo": []string{"bar"}, + "baz": []string{"qux"}, + "quux": []string{""}, + }, + }, + { + name: "Multiple keys with empty value and empty key", + query: "foo=bar&baz=qux&quux=&=quuz", + expected: url.Values{ + "foo": []string{"bar"}, + "baz": []string{"qux"}, + "quux": []string{""}, + "": []string{"quuz"}, + }, + }, + { + name: "Multiple keys with empty value and empty key and multiple values", + query: "foo=bar&baz=qux&quux=&=quuz&foo=baz", + expected: url.Values{ + "foo": []string{"bar", "baz"}, + "baz": []string{"qux"}, + "quux": []string{""}, + "": []string{"quuz"}, + }, + }, + { + name: "Multiple keys with empty value and empty key and multiple values and escaped characters", + query: "foo=bar&baz=qux&quux=&=quuz&foo=baz&foo=bar%20baz", + expected: url.Values{ + "foo": []string{"bar", "baz", "bar baz"}, + "baz": []string{"qux"}, + "quux": []string{""}, + "": []string{"quuz"}, + }, + }, + { + name: "Multiple keys with empty value and empty key and multiple values and escaped characters and semicolon", + query: "foo=bar&baz=qux&quux=&=quuz&foo=baz&foo=bar%20baz&foo=bar%3Bbaz", + expected: url.Values{ + "foo": []string{"bar", "baz", "bar baz", "bar;baz"}, + "baz": []string{"qux"}, + "quux": []string{""}, + "": []string{"quuz"}, + }, + }, + { + name: "Multiple keys with empty value and empty key and multiple values and escaped characters and semicolon and ampersand", + query: "foo=bar&baz=qux&quux=&=quuz&foo=baz&foo=bar%20baz&foo=bar%3Bbaz&foo=bar%26baz", + expected: url.Values{ + "foo": []string{"bar", "baz", "bar baz", "bar;baz", "bar&baz"}, + "baz": []string{"qux"}, + "quux": []string{""}, + "": []string{"quuz"}, + }, + }, + { + name: "Multiple keys with empty value and empty key and multiple values and escaped characters and semicolon and ampersand and equals", + query: "foo=bar&baz=qux&quux=&=quuz&foo=baz&foo=bar%20baz&foo=bar%3Bbaz&foo=bar%26baz&foo=bar%3Dbaz", + expected: url.Values{ + "foo": []string{"bar", "baz", "bar baz", "bar;baz", "bar&baz", "bar=baz"}, + "baz": []string{"qux"}, + "quux": []string{""}, + "": []string{"quuz"}, + }, + }, + { + name: "Multiple keys with empty value and empty key and multiple values and escaped characters and semicolon and ampersand and equals and question mark", + query: "foo=bar&baz=qux&quux=&=quuz&foo=baz&foo=bar%20baz&foo=bar%3Bbaz&foo=bar%26baz&foo=bar%3Dbaz&foo=bar%3Fbaz", + expected: url.Values{ + "foo": []string{"bar", "baz", "bar baz", "bar;baz", "bar&baz", "bar=baz", "bar?baz"}, + "baz": []string{"qux"}, + "quux": []string{""}, + "": []string{"quuz"}, + }, + }, + { + name: "keys with escaped characters", + query: "foo=ba;r&baz=qu;;x&quux=x\\&ww&xx=qu?uz&", + expected: url.Values{ + "foo": []string{"ba;r"}, + "baz": []string{"qu;;x"}, + "quux": []string{"x\\"}, + "ww": []string{""}, + "xx": []string{"qu?uz"}, + }, + }, + { + name: "hexadecimal characters", + query: "foo=bar%20baz", + expected: url.Values{ + "foo": []string{"bar baz"}, + }, + }, + { + name: "hexadecimal characters upper and lower case", + query: "foo=Ba%42%42&bar=w%2f%2F", + expected: url.Values{ + "foo": []string{"BaBB"}, + "bar": []string{"w//"}, + }, + }, + { + name: "hexadecimal characters with invalid characters", + query: "foo=bar%20baz%2", + expected: url.Values{ + "foo": []string{"bar baz%2"}, + }, + }, + { + name: "hexadecimal characters with invalid hex characters", + query: "foo=bar%xx", + expected: url.Values{ + "foo": []string{"bar%xx"}, + }, + }, + { + name: "hexadecimal characters with invalid 2nd hex character", + query: "foo=bar%2x", + expected: url.Values{ + "foo": []string{"bar%2x"}, + }, + }, + { + name: "url +", + query: "foo=bar+x", + expected: url.Values{ + "foo": []string{"bar x"}, + }, + }, + { + name: "url &&", + query: "foo=bar&&lol=bur", + expected: url.Values{ + "foo": []string{"bar"}, + "lol": []string{"bur"}, + }, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + res := ParseQuery(test.query) + if !reflect.DeepEqual(res, test.expected) { + t.Fatalf("unexpected result: %v", res) + } + }) + } +} + +func TestExtractQueryParam(t *testing.T) { + tests := []struct { + name string + query string + param string + expected []string + }{ + { + name: "Simple uri", + query: "/foobar/toto?ab=cd&ef=gh", + param: "ab", + expected: []string{"cd"}, + }, + { + name: "Simple uri, repeating param", + query: "/foobar?foo=bar&foo=baz", + param: "foo", + expected: []string{"bar", "baz"}, + }, + { + name: "Simple uri with semicolon", + query: "/foobar/toto?ab=cd;ef=gh", + param: "ab", + expected: []string{"cd;ef=gh"}, + }, + { + name: "Simple query no uri", + query: "foo=bar", + param: "foo", + expected: []string{}, + }, + { + name: "No QS", + query: "/foobar", + param: "foo", + expected: []string{}, + }, + { + name: "missing param", + query: "/foobar/toto?ab=cd&ef=gh", + param: "baz", + expected: []string{}, + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + res := ExtractQueryParam(test.query, test.param) + if !reflect.DeepEqual(res, test.expected) { + t.Fatalf("unexpected result: %v", res) + } + }) + } +} + func TestJA4H(t *testing.T) { tests := []struct { From b12ade27f475e510dd6cee5f1a3557d2ab1cc215 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Tue, 4 Mar 2025 12:21:27 +0100 Subject: [PATCH 452/581] cscli: review/update argument number checking (#3490) * cscsli: remove unused Command.Args setting * cscli: review/update argument number checking cscli will consistently print the help text if the number of arguments is wrong for the command, but not for other types of errors. * fix func tests * lint --- cmd/crowdsec-cli/args/args.go | 31 +++++++++++++++++-- cmd/crowdsec-cli/clialert/alerts.go | 11 +++---- cmd/crowdsec-cli/cliallowlists/allowlists.go | 3 +- cmd/crowdsec-cli/clibouncer/add.go | 3 +- cmd/crowdsec-cli/clibouncer/bouncers.go | 1 - cmd/crowdsec-cli/clibouncer/delete.go | 3 +- cmd/crowdsec-cli/clibouncer/inspect.go | 3 +- cmd/crowdsec-cli/clibouncer/list.go | 3 +- cmd/crowdsec-cli/clibouncer/prune.go | 3 +- cmd/crowdsec-cli/clicapi/capi.go | 6 ++-- cmd/crowdsec-cli/cliconfig/config.go | 1 - cmd/crowdsec-cli/cliconfig/feature_flags.go | 3 +- cmd/crowdsec-cli/cliconfig/show.go | 3 +- cmd/crowdsec-cli/cliconfig/showyaml.go | 4 ++- cmd/crowdsec-cli/cliconsole/console.go | 10 ++++-- cmd/crowdsec-cli/clidecision/decisions.go | 7 +++-- cmd/crowdsec-cli/clidecision/import.go | 3 +- cmd/crowdsec-cli/cliexplain/explain.go | 3 +- cmd/crowdsec-cli/clihub/hub.go | 10 +++--- cmd/crowdsec-cli/clihubtest/clean.go | 4 ++- cmd/crowdsec-cli/clihubtest/coverage.go | 2 ++ cmd/crowdsec-cli/clihubtest/create.go | 3 +- cmd/crowdsec-cli/clihubtest/eval.go | 6 ++-- cmd/crowdsec-cli/clihubtest/explain.go | 4 ++- cmd/crowdsec-cli/clihubtest/hubtest.go | 1 - cmd/crowdsec-cli/clihubtest/info.go | 3 +- cmd/crowdsec-cli/clihubtest/list.go | 3 ++ cmd/crowdsec-cli/cliitem/cmdinspect.go | 3 +- cmd/crowdsec-cli/cliitem/cmdinstall.go | 3 +- cmd/crowdsec-cli/cliitem/item.go | 1 - cmd/crowdsec-cli/clilapi/context.go | 3 ++ cmd/crowdsec-cli/clilapi/lapi.go | 1 - cmd/crowdsec-cli/clilapi/register.go | 3 +- cmd/crowdsec-cli/clilapi/status.go | 3 +- cmd/crowdsec-cli/climachine/add.go | 2 ++ cmd/crowdsec-cli/climachine/delete.go | 3 +- cmd/crowdsec-cli/climachine/inspect.go | 3 +- cmd/crowdsec-cli/climachine/list.go | 3 +- cmd/crowdsec-cli/climachine/prune.go | 3 +- cmd/crowdsec-cli/climachine/validate.go | 4 ++- cmd/crowdsec-cli/climetrics/list.go | 3 +- cmd/crowdsec-cli/climetrics/metrics.go | 1 - .../clinotifications/notifications.go | 10 +++--- cmd/crowdsec-cli/clipapi/papi.go | 6 ++-- cmd/crowdsec-cli/clisetup/setup.go | 9 +++--- cmd/crowdsec-cli/clisimulation/simulation.go | 2 ++ cmd/crowdsec-cli/clisupport/support.go | 4 +-- cmd/crowdsec-cli/completion.go | 4 ++- cmd/crowdsec-cli/dashboard.go | 12 +++---- cmd/crowdsec-cli/doc.go | 4 ++- cmd/crowdsec-cli/version.go | 3 +- test/bats/80_alerts.bats | 4 +-- test/bats/cscli-hubtype-install.bats | 4 +-- 53 files changed, 155 insertions(+), 83 deletions(-) diff --git a/cmd/crowdsec-cli/args/args.go b/cmd/crowdsec-cli/args/args.go index 7e827f2c78b..36500bc9dcb 100644 --- a/cmd/crowdsec-cli/args/args.go +++ b/cmd/crowdsec-cli/args/args.go @@ -6,22 +6,49 @@ import ( "github.com/spf13/cobra" ) +// MinimumNArgs is a drop-in replacement for cobra.MinimumNArgs that prints the usage in case of wrong number of arguments, but not other errors. func MinimumNArgs(n int) cobra.PositionalArgs { return func(cmd *cobra.Command, args []string) error { if len(args) < n { - cmd.Help() //nolint:errcheck + _ = cmd.Help() + fmt.Fprintln(cmd.OutOrStdout(), "") return fmt.Errorf("requires at least %d arg(s), only received %d", n, len(args)) } return nil } } +// MaximumNArgs is a drop-in replacement for cobra.MaximumNArgs that prints the usage in case of wrong number of arguments, but not other errors. +func MaximumNArgs(n int) cobra.PositionalArgs { + return func(cmd *cobra.Command, args []string) error { + if len(args) > n { + _ = cmd.Help() + fmt.Fprintln(cmd.OutOrStdout(), "") + return fmt.Errorf("accepts at most %d arg(s), received %d", n, len(args)) + } + return nil + } +} + +// ExactArgs is a drop-in replacement for cobra.ExactArgs that prints the usage in case of wrong number of arguments, but not other errors. func ExactArgs(n int) cobra.PositionalArgs { return func(cmd *cobra.Command, args []string) error { if len(args) != n { - cmd.Help() //nolint:errcheck + _ = cmd.Help() + fmt.Fprintln(cmd.OutOrStdout(), "") return fmt.Errorf("accepts %d arg(s), received %d", n, len(args)) } return nil } } + +// NoArgs is a drop-in replacement for cobra.NoArgs that prints the usage in case of wrong number of arguments, but not other errors. +func NoArgs(cmd *cobra.Command, args []string) error { + if len(args) > 0 { + _ = cmd.Help() + fmt.Fprintln(cmd.OutOrStdout(), "") + return fmt.Errorf("unknown command %q for %q", args[0], cmd.CommandPath()) + } + return nil +} + diff --git a/cmd/crowdsec-cli/clialert/alerts.go b/cmd/crowdsec-cli/clialert/alerts.go index 891a720d169..3e69acfec04 100644 --- a/cmd/crowdsec-cli/clialert/alerts.go +++ b/cmd/crowdsec-cli/clialert/alerts.go @@ -21,6 +21,7 @@ import ( "github.com/crowdsecurity/go-cs-lib/maptools" + "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/args" "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/cstable" "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/require" "github.com/crowdsecurity/crowdsec/pkg/apiclient" @@ -200,7 +201,6 @@ func (cli *cliAlerts) NewCommand() *cobra.Command { cmd := &cobra.Command{ Use: "alerts [action]", Short: "Manage alerts", - Args: cobra.MinimumNArgs(1), DisableAutoGenTag: true, Aliases: []string{"alert"}, PersistentPreRunE: func(_ *cobra.Command, _ []string) error { @@ -352,6 +352,7 @@ cscli alerts list --origin lists cscli alerts list -s crowdsecurity/ssh-bf cscli alerts list --type ban`, Long: `List alerts with optional filters`, + Args: args.NoArgs, DisableAutoGenTag: true, RunE: func(cmd *cobra.Command, _ []string) error { return cli.list(cmd.Context(), alertListFilter, limit, contained, printMachine) @@ -465,7 +466,7 @@ cscli alerts delete --range 1.2.3.0/24 cscli alerts delete -s crowdsecurity/ssh-bf"`, DisableAutoGenTag: true, Aliases: []string{"remove"}, - Args: cobra.NoArgs, + Args: args.NoArgs, PreRunE: func(cmd *cobra.Command, _ []string) error { if deleteAll { return nil @@ -545,12 +546,9 @@ func (cli *cliAlerts) newInspectCmd() *cobra.Command { Use: `inspect "alert_id"`, Short: `Show info about an alert`, Example: `cscli alerts inspect 123`, + Args: args.MinimumNArgs(1), DisableAutoGenTag: true, RunE: func(cmd *cobra.Command, args []string) error { - if len(args) == 0 { - _ = cmd.Help() - return errors.New("missing alert_id") - } return cli.inspect(cmd.Context(), details, args...) }, } @@ -572,6 +570,7 @@ func (cli *cliAlerts) newFlushCmd() *cobra.Command { Short: `Flush alerts /!\ This command can be used only on the same machine than the local API`, Example: `cscli alerts flush --max-items 1000 --max-age 7d`, + Args: args.NoArgs, DisableAutoGenTag: true, RunE: func(cmd *cobra.Command, _ []string) error { cfg := cli.cfg() diff --git a/cmd/crowdsec-cli/cliallowlists/allowlists.go b/cmd/crowdsec-cli/cliallowlists/allowlists.go index 18cfe06c27f..be4b966be9f 100644 --- a/cmd/crowdsec-cli/cliallowlists/allowlists.go +++ b/cmd/crowdsec-cli/cliallowlists/allowlists.go @@ -228,7 +228,6 @@ func (cli *cliAllowLists) NewCommand() *cobra.Command { Use: "allowlists [action]", Short: "Manage centralized allowlists", Aliases: []string{"allowlist"}, - Args: cobra.MinimumNArgs(1), DisableAutoGenTag: true, } @@ -294,7 +293,7 @@ func (cli *cliAllowLists) newListCmd() *cobra.Command { Use: "list", Example: `cscli allowlists list`, Short: "List all allowlists", - Args: cobra.NoArgs, + Args: args.NoArgs, RunE: func(cmd *cobra.Command, _ []string) error { cfg := cli.cfg() if err := cfg.LoadAPIClient(); err != nil { diff --git a/cmd/crowdsec-cli/clibouncer/add.go b/cmd/crowdsec-cli/clibouncer/add.go index 7cc74e45fba..a8065479e39 100644 --- a/cmd/crowdsec-cli/clibouncer/add.go +++ b/cmd/crowdsec-cli/clibouncer/add.go @@ -8,6 +8,7 @@ import ( "github.com/spf13/cobra" + "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/args" middlewares "github.com/crowdsecurity/crowdsec/pkg/apiserver/middlewares/v1" "github.com/crowdsecurity/crowdsec/pkg/types" ) @@ -56,7 +57,7 @@ func (cli *cliBouncers) newAddCmd() *cobra.Command { Short: "add a single bouncer to the database", Example: `cscli bouncers add MyBouncerName cscli bouncers add MyBouncerName --key `, - Args: cobra.ExactArgs(1), + Args: args.ExactArgs(1), DisableAutoGenTag: true, RunE: func(cmd *cobra.Command, args []string) error { return cli.add(cmd.Context(), args[0], key) diff --git a/cmd/crowdsec-cli/clibouncer/bouncers.go b/cmd/crowdsec-cli/clibouncer/bouncers.go index 2b0a3556873..6610d3a66e8 100644 --- a/cmd/crowdsec-cli/clibouncer/bouncers.go +++ b/cmd/crowdsec-cli/clibouncer/bouncers.go @@ -34,7 +34,6 @@ func (cli *cliBouncers) NewCommand() *cobra.Command { Long: `To list/add/delete/prune bouncers. Note: This command requires database direct access, so is intended to be run on Local API/master. `, - Args: cobra.MinimumNArgs(1), Aliases: []string{"bouncer"}, DisableAutoGenTag: true, PersistentPreRunE: func(cmd *cobra.Command, _ []string) error { diff --git a/cmd/crowdsec-cli/clibouncer/delete.go b/cmd/crowdsec-cli/clibouncer/delete.go index 33419f483b6..7559990a9b9 100644 --- a/cmd/crowdsec-cli/clibouncer/delete.go +++ b/cmd/crowdsec-cli/clibouncer/delete.go @@ -9,6 +9,7 @@ import ( log "github.com/sirupsen/logrus" "github.com/spf13/cobra" + "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/args" "github.com/crowdsecurity/crowdsec/pkg/database/ent" "github.com/crowdsecurity/crowdsec/pkg/types" ) @@ -83,7 +84,7 @@ func (cli *cliBouncers) newDeleteCmd() *cobra.Command { Use: "delete MyBouncerName", Short: "delete bouncer(s) from the database", Example: `cscli bouncers delete "bouncer1" "bouncer2"`, - Args: cobra.MinimumNArgs(1), + Args: args.MinimumNArgs(1), Aliases: []string{"remove"}, DisableAutoGenTag: true, ValidArgsFunction: cli.validBouncerID, diff --git a/cmd/crowdsec-cli/clibouncer/inspect.go b/cmd/crowdsec-cli/clibouncer/inspect.go index 9f1d56124d8..38371e29081 100644 --- a/cmd/crowdsec-cli/clibouncer/inspect.go +++ b/cmd/crowdsec-cli/clibouncer/inspect.go @@ -10,6 +10,7 @@ import ( "github.com/jedib0t/go-pretty/v6/table" "github.com/spf13/cobra" + "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/args" "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/clientinfo" "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/cstable" "github.com/crowdsecurity/crowdsec/pkg/database/ent" @@ -78,7 +79,7 @@ func (cli *cliBouncers) newInspectCmd() *cobra.Command { Use: "inspect [bouncer_name]", Short: "inspect a bouncer by name", Example: `cscli bouncers inspect "bouncer1"`, - Args: cobra.ExactArgs(1), + Args: args.ExactArgs(1), DisableAutoGenTag: true, ValidArgsFunction: cli.validBouncerID, RunE: func(cmd *cobra.Command, args []string) error { diff --git a/cmd/crowdsec-cli/clibouncer/list.go b/cmd/crowdsec-cli/clibouncer/list.go index 4ed22ce752f..67752077b99 100644 --- a/cmd/crowdsec-cli/clibouncer/list.go +++ b/cmd/crowdsec-cli/clibouncer/list.go @@ -13,6 +13,7 @@ import ( "github.com/jedib0t/go-pretty/v6/table" "github.com/spf13/cobra" + "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/args" "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/cstable" "github.com/crowdsecurity/crowdsec/pkg/database" "github.com/crowdsecurity/crowdsec/pkg/database/ent" @@ -105,7 +106,7 @@ func (cli *cliBouncers) newListCmd() *cobra.Command { Use: "list", Short: "list all bouncers within the database", Example: `cscli bouncers list`, - Args: cobra.NoArgs, + Args: args.NoArgs, DisableAutoGenTag: true, RunE: func(cmd *cobra.Command, _ []string) error { return cli.List(cmd.Context(), color.Output, cli.db) diff --git a/cmd/crowdsec-cli/clibouncer/prune.go b/cmd/crowdsec-cli/clibouncer/prune.go index 754e0898a3b..3c27efe394d 100644 --- a/cmd/crowdsec-cli/clibouncer/prune.go +++ b/cmd/crowdsec-cli/clibouncer/prune.go @@ -9,6 +9,7 @@ import ( "github.com/fatih/color" "github.com/spf13/cobra" + "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/args" "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/ask" ) @@ -68,7 +69,7 @@ func (cli *cliBouncers) newPruneCmd() *cobra.Command { cmd := &cobra.Command{ Use: "prune", Short: "prune multiple bouncers from the database", - Args: cobra.NoArgs, + Args: args.NoArgs, DisableAutoGenTag: true, Example: `cscli bouncers prune -d 45m cscli bouncers prune -d 45m --force`, diff --git a/cmd/crowdsec-cli/clicapi/capi.go b/cmd/crowdsec-cli/clicapi/capi.go index 14637a26e1a..864da56e8a4 100644 --- a/cmd/crowdsec-cli/clicapi/capi.go +++ b/cmd/crowdsec-cli/clicapi/capi.go @@ -14,6 +14,7 @@ import ( "github.com/spf13/cobra" "gopkg.in/yaml.v3" + "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/args" "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/idgen" "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/reload" "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/require" @@ -40,7 +41,6 @@ func (cli *cliCapi) NewCommand() *cobra.Command { cmd := &cobra.Command{ Use: "capi [action]", Short: "Manage interaction with Central API (CAPI)", - Args: cobra.MinimumNArgs(1), DisableAutoGenTag: true, PersistentPreRunE: func(_ *cobra.Command, _ []string) error { cfg := cli.cfg() @@ -139,7 +139,7 @@ func (cli *cliCapi) newRegisterCmd() *cobra.Command { cmd := &cobra.Command{ Use: "register", Short: "Register to Central API (CAPI)", - Args: cobra.MinimumNArgs(0), + Args: args.NoArgs, DisableAutoGenTag: true, RunE: func(cmd *cobra.Command, _ []string) error { return cli.register(cmd.Context(), capiUserPrefix, outputFile) @@ -260,7 +260,7 @@ func (cli *cliCapi) newStatusCmd() *cobra.Command { cmd := &cobra.Command{ Use: "status", Short: "Check status with the Central API (CAPI)", - Args: cobra.MinimumNArgs(0), + Args: args.NoArgs, DisableAutoGenTag: true, RunE: func(cmd *cobra.Command, _ []string) error { hub, err := require.Hub(cli.cfg(), nil) diff --git a/cmd/crowdsec-cli/cliconfig/config.go b/cmd/crowdsec-cli/cliconfig/config.go index 22095ac7d5b..c142b795384 100644 --- a/cmd/crowdsec-cli/cliconfig/config.go +++ b/cmd/crowdsec-cli/cliconfig/config.go @@ -24,7 +24,6 @@ func (cli *cliConfig) NewCommand(mergedConfigGetter mergedConfigGetter) *cobra.C cmd := &cobra.Command{ Use: "config [command]", Short: "Allows to view current config", - Args: cobra.NoArgs, DisableAutoGenTag: true, } diff --git a/cmd/crowdsec-cli/cliconfig/feature_flags.go b/cmd/crowdsec-cli/cliconfig/feature_flags.go index c03db10ccce..14ca450bd9d 100644 --- a/cmd/crowdsec-cli/cliconfig/feature_flags.go +++ b/cmd/crowdsec-cli/cliconfig/feature_flags.go @@ -7,6 +7,7 @@ import ( "github.com/fatih/color" "github.com/spf13/cobra" + "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/args" "github.com/crowdsecurity/crowdsec/pkg/csconfig" "github.com/crowdsecurity/crowdsec/pkg/fflag" ) @@ -121,7 +122,7 @@ func (cli *cliConfig) newFeatureFlagsCmd() *cobra.Command { Use: "feature-flags", Short: "Displays feature flag status", Long: `Displays the supported feature flags and their current status.`, - Args: cobra.NoArgs, + Args: args.NoArgs, DisableAutoGenTag: true, RunE: func(_ *cobra.Command, _ []string) error { return cli.featureFlags(showRetired) diff --git a/cmd/crowdsec-cli/cliconfig/show.go b/cmd/crowdsec-cli/cliconfig/show.go index 90c0ab71069..cff214e49e4 100644 --- a/cmd/crowdsec-cli/cliconfig/show.go +++ b/cmd/crowdsec-cli/cliconfig/show.go @@ -12,6 +12,7 @@ import ( "github.com/spf13/cobra" "gopkg.in/yaml.v3" + "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/args" "github.com/crowdsecurity/crowdsec/pkg/csconfig" "github.com/crowdsecurity/crowdsec/pkg/exprhelpers" ) @@ -235,7 +236,7 @@ func (cli *cliConfig) newShowCmd() *cobra.Command { Use: "show", Short: "Displays current config", Long: `Displays the current cli configuration.`, - Args: cobra.NoArgs, + Args: args.NoArgs, DisableAutoGenTag: true, RunE: func(_ *cobra.Command, _ []string) error { if err := cli.cfg().LoadAPIClient(); err != nil { diff --git a/cmd/crowdsec-cli/cliconfig/showyaml.go b/cmd/crowdsec-cli/cliconfig/showyaml.go index 2e46a0171ab..0215152b81e 100644 --- a/cmd/crowdsec-cli/cliconfig/showyaml.go +++ b/cmd/crowdsec-cli/cliconfig/showyaml.go @@ -4,6 +4,8 @@ import ( "fmt" "github.com/spf13/cobra" + + "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/args" ) func (cli *cliConfig) showYAML(mergedConfig string) error { @@ -15,7 +17,7 @@ func (cli *cliConfig) newShowYAMLCmd(mergedConfigGetter mergedConfigGetter) *cob cmd := &cobra.Command{ Use: "show-yaml", Short: "Displays merged config.yaml + config.yaml.local", - Args: cobra.NoArgs, + Args: args.NoArgs, DisableAutoGenTag: true, RunE: func(_ *cobra.Command, _ []string) error { return cli.showYAML(mergedConfigGetter()) diff --git a/cmd/crowdsec-cli/cliconsole/console.go b/cmd/crowdsec-cli/cliconsole/console.go index ddd0527b36b..5dc83fe5554 100644 --- a/cmd/crowdsec-cli/cliconsole/console.go +++ b/cmd/crowdsec-cli/cliconsole/console.go @@ -22,6 +22,7 @@ import ( "github.com/crowdsecurity/go-cs-lib/ptr" "github.com/crowdsecurity/go-cs-lib/slicetools" + "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/args" "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/reload" "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/require" "github.com/crowdsecurity/crowdsec/pkg/apiclient" @@ -45,7 +46,6 @@ func (cli *cliConsole) NewCommand() *cobra.Command { cmd := &cobra.Command{ Use: "console [action]", Short: "Manage interaction with Crowdsec console (https://app.crowdsec.net)", - Args: cobra.MinimumNArgs(1), DisableAutoGenTag: true, PersistentPreRunE: func(_ *cobra.Command, _ []string) error { cfg := cli.cfg() @@ -191,7 +191,7 @@ After running this command your will need to validate the enrollment in the weba cscli console enroll --disable context YOUR-ENROLL-KEY valid options are : %s,all (see 'cscli console status' for details)`, strings.Join(csconfig.CONSOLE_CONFIGS, ",")), - Args: cobra.ExactArgs(1), + Args: args.ExactArgs(1), DisableAutoGenTag: true, RunE: func(cmd *cobra.Command, args []string) error { opts := []string{csconfig.SEND_MANUAL_SCENARIOS, csconfig.SEND_TAINTED_SCENARIOS, csconfig.SEND_CONTEXT} @@ -224,7 +224,7 @@ func (cli *cliConsole) newEnableCmd() *cobra.Command { var enableAll bool cmd := &cobra.Command{ - Use: "enable [option]", + Use: "enable [option]...", Short: "Enable a console option", Example: "sudo cscli console enable tainted", Long: ` @@ -277,6 +277,9 @@ Disable given information push to the central API.`, } log.Infof("All features have been disabled") } else { + if len(args) == 0 { + return errors.New("you must specify at least one feature to disable") + } if err := cli.setConsoleOpts(args, false); err != nil { return err } @@ -300,6 +303,7 @@ func (cli *cliConsole) newStatusCmd() *cobra.Command { Use: "status", Short: "Shows status of the console options", Example: `sudo cscli console status`, + Args: args.NoArgs, DisableAutoGenTag: true, RunE: func(_ *cobra.Command, _ []string) error { cfg := cli.cfg() diff --git a/cmd/crowdsec-cli/clidecision/decisions.go b/cmd/crowdsec-cli/clidecision/decisions.go index 822ad4bf3a8..91f39f421e5 100644 --- a/cmd/crowdsec-cli/clidecision/decisions.go +++ b/cmd/crowdsec-cli/clidecision/decisions.go @@ -17,6 +17,7 @@ import ( log "github.com/sirupsen/logrus" "github.com/spf13/cobra" + "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/args" "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/clialert" "github.com/crowdsecurity/crowdsec/pkg/apiclient" "github.com/crowdsecurity/crowdsec/pkg/csconfig" @@ -136,7 +137,6 @@ func (cli *cliDecisions) NewCommand() *cobra.Command { Example: `cscli decisions [action] [filter]`, Aliases: []string{"decision"}, /*TBD example*/ - Args: cobra.MinimumNArgs(1), DisableAutoGenTag: true, PersistentPreRunE: func(_ *cobra.Command, _ []string) error { cfg := cli.cfg() @@ -290,7 +290,7 @@ cscli decisions list -r 1.2.3.0/24 cscli decisions list -s crowdsecurity/ssh-bf cscli decisions list --origin lists --scenario list_name `, - Args: cobra.NoArgs, + Args: args.NoArgs, DisableAutoGenTag: true, RunE: func(cmd *cobra.Command, _ []string) error { return cli.list(cmd.Context(), filter, NoSimu, contained, printMachine) @@ -427,7 +427,7 @@ cscli decisions add --ip 1.2.3.4 --duration 24h --type captcha cscli decisions add --scope username --value foobar `, /*TBD : fix long and example*/ - Args: cobra.NoArgs, + Args: args.NoArgs, DisableAutoGenTag: true, RunE: func(cmd *cobra.Command, _ []string) error { return cli.add(cmd.Context(), addIP, addRange, addDuration, addValue, addScope, addReason, addType, bypassAllowlist) @@ -532,6 +532,7 @@ func (cli *cliDecisions) newDeleteCmd() *cobra.Command { cmd := &cobra.Command{ Use: "delete [options]", Short: "Delete decisions", + Args: args.NoArgs, DisableAutoGenTag: true, Aliases: []string{"remove"}, Example: `cscli decisions delete -r 1.2.3.0/24 diff --git a/cmd/crowdsec-cli/clidecision/import.go b/cmd/crowdsec-cli/clidecision/import.go index 5b34b74a250..317fa5d62cd 100644 --- a/cmd/crowdsec-cli/clidecision/import.go +++ b/cmd/crowdsec-cli/clidecision/import.go @@ -19,6 +19,7 @@ import ( "github.com/crowdsecurity/go-cs-lib/ptr" "github.com/crowdsecurity/go-cs-lib/slicetools" + "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/args" "github.com/crowdsecurity/crowdsec/pkg/models" "github.com/crowdsecurity/crowdsec/pkg/types" ) @@ -216,7 +217,7 @@ func (cli *cliDecisions) newImportCmd() *cobra.Command { Long: "expected format:\n" + "csv : any of duration,reason,scope,type,value, with a header line\n" + "json :" + "`{" + `"duration": "24h", "reason": "my_scenario", "scope": "ip", "type": "ban", "value": "x.y.z.z"` + "}`", - Args: cobra.NoArgs, + Args: args.NoArgs, DisableAutoGenTag: true, Example: `decisions.csv: duration,scope,value diff --git a/cmd/crowdsec-cli/cliexplain/explain.go b/cmd/crowdsec-cli/cliexplain/explain.go index d6e821e4e6c..6a261d99432 100644 --- a/cmd/crowdsec-cli/cliexplain/explain.go +++ b/cmd/crowdsec-cli/cliexplain/explain.go @@ -12,6 +12,7 @@ import ( log "github.com/sirupsen/logrus" "github.com/spf13/cobra" + "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/args" "github.com/crowdsecurity/crowdsec/pkg/csconfig" "github.com/crowdsecurity/crowdsec/pkg/dumps" "github.com/crowdsecurity/crowdsec/pkg/hubtest" @@ -80,7 +81,7 @@ cscli explain --log "Sep 19 18:33:22 scw-d95986 sshd[24347]: pam_unix(sshd:auth) cscli explain --dsn "file://myfile.log" --type nginx tail -n 5 myfile.log | cscli explain --type nginx -f - `, - Args: cobra.NoArgs, + Args: args.NoArgs, DisableAutoGenTag: true, RunE: func(_ *cobra.Command, _ []string) error { return cli.run() diff --git a/cmd/crowdsec-cli/clihub/hub.go b/cmd/crowdsec-cli/clihub/hub.go index 98be6a79e68..87950810219 100644 --- a/cmd/crowdsec-cli/clihub/hub.go +++ b/cmd/crowdsec-cli/clihub/hub.go @@ -12,6 +12,7 @@ import ( "github.com/spf13/cobra" "gopkg.in/yaml.v3" + "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/args" "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/reload" "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/require" "github.com/crowdsecurity/crowdsec/pkg/csconfig" @@ -42,7 +43,6 @@ The Hub is managed by cscli, to get the latest hub files from [Crowdsec Hub](htt Example: `cscli hub list cscli hub update cscli hub upgrade`, - Args: cobra.NoArgs, DisableAutoGenTag: true, } @@ -90,7 +90,7 @@ func (cli *cliHub) newListCmd() *cobra.Command { cmd := &cobra.Command{ Use: "list [-a]", Short: "List all installed configurations", - Args: cobra.NoArgs, + Args: args.NoArgs, DisableAutoGenTag: true, RunE: func(_ *cobra.Command, _ []string) error { hub, err := require.Hub(cli.cfg(), log.StandardLogger()) @@ -152,7 +152,7 @@ cscli hub update # Download a 4x bigger version with all item contents (effectively pre-caching item downloads, but not data files). cscli hub update --with-content`, - Args: cobra.NoArgs, + Args: args.NoArgs, DisableAutoGenTag: true, RunE: func(cmd *cobra.Command, _ []string) error { if cmd.Flags().Changed("with-content") { @@ -228,7 +228,7 @@ cscli hub upgrade --force # Prompt for confirmation if running in an interactive terminal; otherwise, the option is ignored. cscli hub upgrade --interactive cscli hub upgrade -i`, - Args: cobra.NoArgs, + Args: args.NoArgs, DisableAutoGenTag: true, RunE: func(cmd *cobra.Command, _ []string) error { return cli.upgrade(cmd.Context(), interactive, dryRun, force) @@ -276,7 +276,7 @@ func (cli *cliHub) newTypesCmd() *cobra.Command { Long: ` List the types of supported hub items. `, - Args: cobra.NoArgs, + Args: args.NoArgs, DisableAutoGenTag: true, RunE: func(_ *cobra.Command, _ []string) error { return cli.types() diff --git a/cmd/crowdsec-cli/clihubtest/clean.go b/cmd/crowdsec-cli/clihubtest/clean.go index e3b40b6bd57..912b8838b5b 100644 --- a/cmd/crowdsec-cli/clihubtest/clean.go +++ b/cmd/crowdsec-cli/clihubtest/clean.go @@ -4,13 +4,15 @@ import ( "fmt" "github.com/spf13/cobra" + + "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/args" ) func (cli *cliHubTest) newCleanCmd() *cobra.Command { cmd := &cobra.Command{ Use: "clean", Short: "clean [test_name]", - Args: cobra.MinimumNArgs(1), + Args: args.MinimumNArgs(1), DisableAutoGenTag: true, RunE: func(_ *cobra.Command, args []string) error { for _, testName := range args { diff --git a/cmd/crowdsec-cli/clihubtest/coverage.go b/cmd/crowdsec-cli/clihubtest/coverage.go index 5a4f231caf5..ee840f81e5b 100644 --- a/cmd/crowdsec-cli/clihubtest/coverage.go +++ b/cmd/crowdsec-cli/clihubtest/coverage.go @@ -9,6 +9,7 @@ import ( "github.com/fatih/color" "github.com/spf13/cobra" + "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/args" "github.com/crowdsecurity/crowdsec/pkg/hubtest" ) @@ -151,6 +152,7 @@ func (cli *cliHubTest) newCoverageCmd() *cobra.Command { cmd := &cobra.Command{ Use: "coverage", Short: "coverage", + Args: args.NoArgs, DisableAutoGenTag: true, RunE: func(_ *cobra.Command, _ []string) error { return cli.coverage(showScenarioCov, showParserCov, showAppsecCov, showOnlyPercent) diff --git a/cmd/crowdsec-cli/clihubtest/create.go b/cmd/crowdsec-cli/clihubtest/create.go index 3822bed8903..f16541e4c81 100644 --- a/cmd/crowdsec-cli/clihubtest/create.go +++ b/cmd/crowdsec-cli/clihubtest/create.go @@ -10,6 +10,7 @@ import ( "github.com/spf13/cobra" "gopkg.in/yaml.v3" + "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/args" "github.com/crowdsecurity/crowdsec/pkg/hubtest" ) @@ -30,7 +31,7 @@ func (cli *cliHubTest) newCreateCmd() *cobra.Command { Example: `cscli hubtest create my-awesome-test --type syslog cscli hubtest create my-nginx-custom-test --type nginx cscli hubtest create my-scenario-test --parsers crowdsecurity/nginx --scenarios crowdsecurity/http-probing`, - Args: cobra.ExactArgs(1), + Args: args.ExactArgs(1), DisableAutoGenTag: true, RunE: func(_ *cobra.Command, args []string) error { testName := args[0] diff --git a/cmd/crowdsec-cli/clihubtest/eval.go b/cmd/crowdsec-cli/clihubtest/eval.go index 83e9eae9c15..09725c2c700 100644 --- a/cmd/crowdsec-cli/clihubtest/eval.go +++ b/cmd/crowdsec-cli/clihubtest/eval.go @@ -4,6 +4,8 @@ import ( "fmt" "github.com/spf13/cobra" + + "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/args" ) func (cli *cliHubTest) newEvalCmd() *cobra.Command { @@ -11,8 +13,8 @@ func (cli *cliHubTest) newEvalCmd() *cobra.Command { cmd := &cobra.Command{ Use: "eval", - Short: "eval [test_name]", - Args: cobra.ExactArgs(1), + Short: "eval [test_name]...", + Args: args.MinimumNArgs(1), DisableAutoGenTag: true, RunE: func(_ *cobra.Command, args []string) error { for _, testName := range args { diff --git a/cmd/crowdsec-cli/clihubtest/explain.go b/cmd/crowdsec-cli/clihubtest/explain.go index 877aec98a37..6217e44e2a3 100644 --- a/cmd/crowdsec-cli/clihubtest/explain.go +++ b/cmd/crowdsec-cli/clihubtest/explain.go @@ -6,6 +6,8 @@ import ( "github.com/spf13/cobra" "github.com/crowdsecurity/crowdsec/pkg/dumps" + + "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/args" ) func (cli *cliHubTest) explain(testName string, details bool, skipOk bool) error { @@ -58,7 +60,7 @@ func (cli *cliHubTest) newExplainCmd() *cobra.Command { cmd := &cobra.Command{ Use: "explain", Short: "explain [test_name]", - Args: cobra.ExactArgs(1), + Args: args.MinimumNArgs(1), DisableAutoGenTag: true, RunE: func(_ *cobra.Command, args []string) error { for _, testName := range args { diff --git a/cmd/crowdsec-cli/clihubtest/hubtest.go b/cmd/crowdsec-cli/clihubtest/hubtest.go index f4cfed2e1cb..f177abc5ad4 100644 --- a/cmd/crowdsec-cli/clihubtest/hubtest.go +++ b/cmd/crowdsec-cli/clihubtest/hubtest.go @@ -39,7 +39,6 @@ func (cli *cliHubTest) NewCommand() *cobra.Command { Use: "hubtest", Short: "Run functional tests on hub configurations", Long: "Run functional tests on hub configurations (parsers, scenarios, collections...)", - Args: cobra.NoArgs, DisableAutoGenTag: true, PersistentPreRunE: func(_ *cobra.Command, _ []string) error { var err error diff --git a/cmd/crowdsec-cli/clihubtest/info.go b/cmd/crowdsec-cli/clihubtest/info.go index a5d760eea01..31833d20e11 100644 --- a/cmd/crowdsec-cli/clihubtest/info.go +++ b/cmd/crowdsec-cli/clihubtest/info.go @@ -7,6 +7,7 @@ import ( "github.com/spf13/cobra" + "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/args" "github.com/crowdsecurity/crowdsec/pkg/hubtest" ) @@ -14,7 +15,7 @@ func (cli *cliHubTest) newInfoCmd() *cobra.Command { cmd := &cobra.Command{ Use: "info", Short: "info [test_name]", - Args: cobra.MinimumNArgs(1), + Args: args.MinimumNArgs(1), DisableAutoGenTag: true, RunE: func(_ *cobra.Command, args []string) error { for _, testName := range args { diff --git a/cmd/crowdsec-cli/clihubtest/list.go b/cmd/crowdsec-cli/clihubtest/list.go index 3e76824a18e..82854e4c486 100644 --- a/cmd/crowdsec-cli/clihubtest/list.go +++ b/cmd/crowdsec-cli/clihubtest/list.go @@ -7,12 +7,15 @@ import ( "github.com/fatih/color" "github.com/spf13/cobra" + + "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/args" ) func (cli *cliHubTest) newListCmd() *cobra.Command { cmd := &cobra.Command{ Use: "list", Short: "list", + Args: args.NoArgs, DisableAutoGenTag: true, RunE: func(_ *cobra.Command, _ []string) error { cfg := cli.cfg() diff --git a/cmd/crowdsec-cli/cliitem/cmdinspect.go b/cmd/crowdsec-cli/cliitem/cmdinspect.go index 25906c30c7a..7b5bd73530e 100644 --- a/cmd/crowdsec-cli/cliitem/cmdinspect.go +++ b/cmd/crowdsec-cli/cliitem/cmdinspect.go @@ -17,6 +17,7 @@ import ( "github.com/spf13/cobra" "gopkg.in/yaml.v3" + "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/args" "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/require" "github.com/crowdsecurity/crowdsec/pkg/cwhub" ) @@ -171,7 +172,7 @@ func (cli cliItem) newInspectCmd() *cobra.Command { Short: cmp.Or(cli.inspectHelp.short, "Inspect given "+cli.oneOrMore), Long: cmp.Or(cli.inspectHelp.long, "Inspect the state of one or more "+cli.name), Example: cli.inspectHelp.example, - Args: cobra.MinimumNArgs(1), + Args: args.MinimumNArgs(1), DisableAutoGenTag: true, ValidArgsFunction: func(_ *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { return compInstalledItems(cli.name, args, toComplete, cli.cfg) diff --git a/cmd/crowdsec-cli/cliitem/cmdinstall.go b/cmd/crowdsec-cli/cliitem/cmdinstall.go index b2846716fda..3bd208f597c 100644 --- a/cmd/crowdsec-cli/cliitem/cmdinstall.go +++ b/cmd/crowdsec-cli/cliitem/cmdinstall.go @@ -12,6 +12,7 @@ import ( log "github.com/sirupsen/logrus" "github.com/spf13/cobra" + "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/args" "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/reload" "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/require" "github.com/crowdsecurity/crowdsec/pkg/cwhub" @@ -129,7 +130,7 @@ func (cli cliItem) newInstallCmd() *cobra.Command { Short: cmp.Or(cli.installHelp.short, "Install given "+cli.oneOrMore), Long: cmp.Or(cli.installHelp.long, fmt.Sprintf("Fetch and install one or more %s from the hub", cli.name)), Example: cli.installHelp.example, - Args: cobra.MinimumNArgs(1), + Args: args.MinimumNArgs(1), DisableAutoGenTag: true, ValidArgsFunction: func(_ *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { return compAllItems(cli.name, args, toComplete, cli.cfg) diff --git a/cmd/crowdsec-cli/cliitem/item.go b/cmd/crowdsec-cli/cliitem/item.go index 3dcc0665a89..4737d7057e9 100644 --- a/cmd/crowdsec-cli/cliitem/item.go +++ b/cmd/crowdsec-cli/cliitem/item.go @@ -46,7 +46,6 @@ func (cli cliItem) NewCommand() *cobra.Command { Short: cmp.Or(cli.help.short, "Manage hub "+cli.name), Long: cli.help.long, Example: cli.help.example, - Args: cobra.MinimumNArgs(1), Aliases: []string{cli.singular}, DisableAutoGenTag: true, } diff --git a/cmd/crowdsec-cli/clilapi/context.go b/cmd/crowdsec-cli/clilapi/context.go index 0730ba2b2a9..ca981f7e55d 100644 --- a/cmd/crowdsec-cli/clilapi/context.go +++ b/cmd/crowdsec-cli/clilapi/context.go @@ -11,6 +11,7 @@ import ( "github.com/spf13/cobra" "gopkg.in/yaml.v3" + "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/args" "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/require" "github.com/crowdsecurity/crowdsec/pkg/alertcontext" "github.com/crowdsecurity/crowdsec/pkg/exprhelpers" @@ -57,6 +58,7 @@ func (cli *cliLapi) newContextAddCmd() *cobra.Command { cscli lapi context add --key file_source --value evt.Line.Src cscli lapi context add --value evt.Meta.source_ip --value evt.Meta.target_user `, + Args: args.NoArgs, DisableAutoGenTag: true, RunE: func(_ *cobra.Command, _ []string) error { hub, err := require.Hub(cli.cfg(), nil) @@ -98,6 +100,7 @@ func (cli *cliLapi) newContextStatusCmd() *cobra.Command { cmd := &cobra.Command{ Use: "status", Short: "List context to send with alerts", + Args: args.NoArgs, DisableAutoGenTag: true, RunE: func(_ *cobra.Command, _ []string) error { cfg := cli.cfg() diff --git a/cmd/crowdsec-cli/clilapi/lapi.go b/cmd/crowdsec-cli/clilapi/lapi.go index 01341330ae8..41c3dd34a0f 100644 --- a/cmd/crowdsec-cli/clilapi/lapi.go +++ b/cmd/crowdsec-cli/clilapi/lapi.go @@ -24,7 +24,6 @@ func (cli *cliLapi) NewCommand() *cobra.Command { cmd := &cobra.Command{ Use: "lapi [action]", Short: "Manage interaction with Local API (LAPI)", - Args: cobra.MinimumNArgs(1), DisableAutoGenTag: true, PersistentPreRunE: func(_ *cobra.Command, _ []string) error { if err := cli.cfg().LoadAPIClient(); err != nil { diff --git a/cmd/crowdsec-cli/clilapi/register.go b/cmd/crowdsec-cli/clilapi/register.go index 7430c73c3c8..e3a6991544b 100644 --- a/cmd/crowdsec-cli/clilapi/register.go +++ b/cmd/crowdsec-cli/clilapi/register.go @@ -10,6 +10,7 @@ import ( "github.com/spf13/cobra" "gopkg.in/yaml.v3" + "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/args" "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/idgen" "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/reload" "github.com/crowdsecurity/crowdsec/pkg/apiclient" @@ -107,7 +108,7 @@ func (cli *cliLapi) newRegisterCmd() *cobra.Command { Short: "Register a machine to Local API (LAPI)", Long: `Register your machine to the Local API (LAPI). Keep in mind the machine needs to be validated by an administrator on LAPI side to be effective.`, - Args: cobra.MinimumNArgs(0), + Args: args.NoArgs, DisableAutoGenTag: true, RunE: func(cmd *cobra.Command, _ []string) error { return cli.register(cmd.Context(), apiURL, outputFile, machine, token) diff --git a/cmd/crowdsec-cli/clilapi/status.go b/cmd/crowdsec-cli/clilapi/status.go index 039c75e585d..7fac775d55b 100644 --- a/cmd/crowdsec-cli/clilapi/status.go +++ b/cmd/crowdsec-cli/clilapi/status.go @@ -12,6 +12,7 @@ import ( "github.com/go-openapi/strfmt" "github.com/spf13/cobra" + "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/args" "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/require" "github.com/crowdsecurity/crowdsec/pkg/apiclient" "github.com/crowdsecurity/crowdsec/pkg/csconfig" @@ -99,7 +100,7 @@ func (cli *cliLapi) newStatusCmd() *cobra.Command { cmdLapiStatus := &cobra.Command{ Use: "status", Short: "Check authentication to Local API (LAPI)", - Args: cobra.MinimumNArgs(0), + Args: args.NoArgs, DisableAutoGenTag: true, RunE: func(cmd *cobra.Command, _ []string) error { hub, err := require.Hub(cli.cfg(), nil) diff --git a/cmd/crowdsec-cli/climachine/add.go b/cmd/crowdsec-cli/climachine/add.go index b2595583823..dfc3e568410 100644 --- a/cmd/crowdsec-cli/climachine/add.go +++ b/cmd/crowdsec-cli/climachine/add.go @@ -11,6 +11,7 @@ import ( "github.com/spf13/cobra" "gopkg.in/yaml.v3" + "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/args" "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/idgen" "github.com/crowdsecurity/crowdsec/pkg/csconfig" "github.com/crowdsecurity/crowdsec/pkg/types" @@ -134,6 +135,7 @@ func (cli *cliMachines) newAddCmd() *cobra.Command { cmd := &cobra.Command{ Use: "add", Short: "add a single machine to the database", + Args: args.MaximumNArgs(1), DisableAutoGenTag: true, Long: `Register a new machine in the database. cscli should be on the same machine as LAPI.`, Example: `cscli machines add --auto diff --git a/cmd/crowdsec-cli/climachine/delete.go b/cmd/crowdsec-cli/climachine/delete.go index 644ce93c642..eeadc66561f 100644 --- a/cmd/crowdsec-cli/climachine/delete.go +++ b/cmd/crowdsec-cli/climachine/delete.go @@ -7,6 +7,7 @@ import ( log "github.com/sirupsen/logrus" "github.com/spf13/cobra" + "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/args" "github.com/crowdsecurity/crowdsec/pkg/database" ) @@ -36,7 +37,7 @@ func (cli *cliMachines) newDeleteCmd() *cobra.Command { Use: "delete [machine_name]...", Short: "delete machine(s) by name", Example: `cscli machines delete "machine1" "machine2"`, - Args: cobra.MinimumNArgs(1), + Args: args.MinimumNArgs(1), Aliases: []string{"remove"}, DisableAutoGenTag: true, ValidArgsFunction: cli.validMachineID, diff --git a/cmd/crowdsec-cli/climachine/inspect.go b/cmd/crowdsec-cli/climachine/inspect.go index e973d07e96b..1bd85903dc1 100644 --- a/cmd/crowdsec-cli/climachine/inspect.go +++ b/cmd/crowdsec-cli/climachine/inspect.go @@ -11,6 +11,7 @@ import ( "github.com/jedib0t/go-pretty/v6/table" "github.com/spf13/cobra" + "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/args" "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/clientinfo" "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/cstable" "github.com/crowdsecurity/crowdsec/pkg/cwhub" @@ -156,7 +157,7 @@ func (cli *cliMachines) newInspectCmd() *cobra.Command { Use: "inspect [machine_name]", Short: "inspect a machine by name", Example: `cscli machines inspect "machine1"`, - Args: cobra.ExactArgs(1), + Args: args.ExactArgs(1), DisableAutoGenTag: true, ValidArgsFunction: cli.validMachineID, RunE: func(cmd *cobra.Command, args []string) error { diff --git a/cmd/crowdsec-cli/climachine/list.go b/cmd/crowdsec-cli/climachine/list.go index 6fb45166aa2..c20de115980 100644 --- a/cmd/crowdsec-cli/climachine/list.go +++ b/cmd/crowdsec-cli/climachine/list.go @@ -13,6 +13,7 @@ import ( "github.com/jedib0t/go-pretty/v6/table" "github.com/spf13/cobra" + "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/args" "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/clientinfo" "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/cstable" "github.com/crowdsecurity/crowdsec/pkg/database" @@ -125,7 +126,7 @@ func (cli *cliMachines) newListCmd() *cobra.Command { Short: "list all machines in the database", Long: `list all machines in the database with their status and last heartbeat`, Example: `cscli machines list`, - Args: cobra.NoArgs, + Args: args.NoArgs, DisableAutoGenTag: true, RunE: func(cmd *cobra.Command, _ []string) error { return cli.List(cmd.Context(), color.Output, cli.db) diff --git a/cmd/crowdsec-cli/climachine/prune.go b/cmd/crowdsec-cli/climachine/prune.go index ed41ef0a736..4054305f48f 100644 --- a/cmd/crowdsec-cli/climachine/prune.go +++ b/cmd/crowdsec-cli/climachine/prune.go @@ -9,6 +9,7 @@ import ( "github.com/fatih/color" "github.com/spf13/cobra" + "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/args" "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/ask" "github.com/crowdsecurity/crowdsec/pkg/database/ent" ) @@ -80,7 +81,7 @@ func (cli *cliMachines) newPruneCmd() *cobra.Command { Example: `cscli machines prune cscli machines prune --duration 1h cscli machines prune --not-validated-only --force`, - Args: cobra.NoArgs, + Args: args.NoArgs, DisableAutoGenTag: true, RunE: func(cmd *cobra.Command, _ []string) error { return cli.prune(cmd.Context(), duration, notValidOnly, force) diff --git a/cmd/crowdsec-cli/climachine/validate.go b/cmd/crowdsec-cli/climachine/validate.go index cba872aa05d..3f2c9b95c0f 100644 --- a/cmd/crowdsec-cli/climachine/validate.go +++ b/cmd/crowdsec-cli/climachine/validate.go @@ -6,6 +6,8 @@ import ( log "github.com/sirupsen/logrus" "github.com/spf13/cobra" + + "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/args" ) func (cli *cliMachines) validate(ctx context.Context, machineID string) error { @@ -24,7 +26,7 @@ func (cli *cliMachines) newValidateCmd() *cobra.Command { Short: "validate a machine to access the local API", Long: `validate a machine to access the local API.`, Example: `cscli machines validate "machine_name"`, - Args: cobra.ExactArgs(1), + Args: args.ExactArgs(1), DisableAutoGenTag: true, RunE: func(cmd *cobra.Command, args []string) error { return cli.validate(cmd.Context(), args[0]) diff --git a/cmd/crowdsec-cli/climetrics/list.go b/cmd/crowdsec-cli/climetrics/list.go index 32e2f8e0a80..940695744e3 100644 --- a/cmd/crowdsec-cli/climetrics/list.go +++ b/cmd/crowdsec-cli/climetrics/list.go @@ -11,6 +11,7 @@ import ( "github.com/crowdsecurity/go-cs-lib/maptools" + "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/args" "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/cstable" ) @@ -83,7 +84,7 @@ func (cli *cliMetrics) newListCmd() *cobra.Command { Use: "list", Short: "List available types of metrics.", Long: `List available types of metrics.`, - Args: cobra.NoArgs, + Args: args.NoArgs, DisableAutoGenTag: true, RunE: func(_ *cobra.Command, _ []string) error { return cli.list() diff --git a/cmd/crowdsec-cli/climetrics/metrics.go b/cmd/crowdsec-cli/climetrics/metrics.go index 67bd7b6ad93..15a852d0a41 100644 --- a/cmd/crowdsec-cli/climetrics/metrics.go +++ b/cmd/crowdsec-cli/climetrics/metrics.go @@ -36,7 +36,6 @@ cscli metrics --url http://lapi.local:6060/metrics show acquisition parsers # List available metric types cscli metrics list`, - Args: cobra.NoArgs, DisableAutoGenTag: true, RunE: func(cmd *cobra.Command, _ []string) error { return cli.show(cmd.Context(), nil, url, noUnit) diff --git a/cmd/crowdsec-cli/clinotifications/notifications.go b/cmd/crowdsec-cli/clinotifications/notifications.go index 80ffebeaa23..9259f7594e1 100644 --- a/cmd/crowdsec-cli/clinotifications/notifications.go +++ b/cmd/crowdsec-cli/clinotifications/notifications.go @@ -24,6 +24,7 @@ import ( "github.com/crowdsecurity/go-cs-lib/ptr" + "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/args" "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/require" "github.com/crowdsecurity/crowdsec/pkg/apiclient" "github.com/crowdsecurity/crowdsec/pkg/csconfig" @@ -56,7 +57,6 @@ func (cli *cliNotifications) NewCommand() *cobra.Command { Use: "notifications [action]", Short: "Helper for notification plugin configuration", Long: "To list/inspect/test notification template", - Args: cobra.MinimumNArgs(1), Aliases: []string{"notifications", "notification"}, DisableAutoGenTag: true, PersistentPreRunE: func(_ *cobra.Command, _ []string) error { @@ -158,7 +158,7 @@ func (cli *cliNotifications) newListCmd() *cobra.Command { Short: "list notifications plugins", Long: `list notifications plugins and their status (active or not)`, Example: `cscli notifications list`, - Args: cobra.NoArgs, + Args: args.NoArgs, DisableAutoGenTag: true, RunE: func(_ *cobra.Command, _ []string) error { cfg := cli.cfg() @@ -207,7 +207,7 @@ func (cli *cliNotifications) newInspectCmd() *cobra.Command { Short: "Inspect notifications plugin", Long: `Inspect notifications plugin and show configuration`, Example: `cscli notifications inspect `, - Args: cobra.ExactArgs(1), + Args: args.ExactArgs(1), ValidArgsFunction: cli.notificationConfigFilter, DisableAutoGenTag: true, RunE: func(_ *cobra.Command, args []string) error { @@ -272,7 +272,7 @@ func (cli *cliNotifications) newTestCmd() *cobra.Command { Short: "send a generic test alert to notification plugin", Long: `send a generic test alert to a notification plugin even if it is not active in profiles`, Example: `cscli notifications test [plugin_name]`, - Args: cobra.ExactArgs(1), + Args: args.ExactArgs(1), DisableAutoGenTag: true, ValidArgsFunction: cli.notificationConfigFilter, PreRunE: func(cmd *cobra.Command, args []string) error { @@ -367,7 +367,7 @@ cscli notifications reinject cscli notifications reinject -a '{"remediation": false,"scenario":"notification/test"}' cscli notifications reinject -a '{"remediation": true,"scenario":"notification/test"}' `, - Args: cobra.ExactArgs(1), + Args: args.ExactArgs(1), DisableAutoGenTag: true, PreRunE: func(cmd *cobra.Command, args []string) error { var err error diff --git a/cmd/crowdsec-cli/clipapi/papi.go b/cmd/crowdsec-cli/clipapi/papi.go index c954e3ab996..c2be87f8a04 100644 --- a/cmd/crowdsec-cli/clipapi/papi.go +++ b/cmd/crowdsec-cli/clipapi/papi.go @@ -13,6 +13,7 @@ import ( "github.com/crowdsecurity/go-cs-lib/ptr" + "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/args" "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/require" "github.com/crowdsecurity/crowdsec/pkg/apiserver" "github.com/crowdsecurity/crowdsec/pkg/csconfig" @@ -35,7 +36,6 @@ func (cli *cliPapi) NewCommand() *cobra.Command { cmd := &cobra.Command{ Use: "papi [action]", Short: "Manage interaction with Polling API (PAPI)", - Args: cobra.MinimumNArgs(1), DisableAutoGenTag: true, PersistentPreRunE: func(_ *cobra.Command, _ []string) error { cfg := cli.cfg() @@ -100,7 +100,7 @@ func (cli *cliPapi) newStatusCmd() *cobra.Command { cmd := &cobra.Command{ Use: "status", Short: "Get status of the Polling API", - Args: cobra.MinimumNArgs(0), + Args: args.NoArgs, DisableAutoGenTag: true, RunE: func(cmd *cobra.Command, _ []string) error { cfg := cli.cfg() @@ -155,7 +155,7 @@ func (cli *cliPapi) newSyncCmd() *cobra.Command { cmd := &cobra.Command{ Use: "sync", Short: "Sync with the Polling API, pulling all non-expired orders for the instance", - Args: cobra.MinimumNArgs(0), + Args: args.NoArgs, DisableAutoGenTag: true, RunE: func(cmd *cobra.Command, _ []string) error { cfg := cli.cfg() diff --git a/cmd/crowdsec-cli/clisetup/setup.go b/cmd/crowdsec-cli/clisetup/setup.go index 7a0860182e8..7c7e10b4599 100644 --- a/cmd/crowdsec-cli/clisetup/setup.go +++ b/cmd/crowdsec-cli/clisetup/setup.go @@ -13,6 +13,7 @@ import ( "github.com/spf13/cobra" "gopkg.in/yaml.v3" + "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/args" "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/require" "github.com/crowdsecurity/crowdsec/pkg/csconfig" "github.com/crowdsecurity/crowdsec/pkg/setup" @@ -35,7 +36,6 @@ func (cli *cliSetup) NewCommand() *cobra.Command { Use: "setup", Short: "Tools to configure crowdsec", Long: "Manage hub configuration and service detection", - Args: cobra.MinimumNArgs(0), DisableAutoGenTag: true, } @@ -82,6 +82,7 @@ func (cli *cliSetup) newDetectCmd() *cobra.Command { cmd := &cobra.Command{ Use: "detect", Short: "detect running services, generate a setup file", + Args: args.NoArgs, DisableAutoGenTag: true, RunE: func(_ *cobra.Command, args []string) error { return cli.detect(f) @@ -102,7 +103,7 @@ func (cli *cliSetup) newInstallHubCmd() *cobra.Command { cmd := &cobra.Command{ Use: "install-hub [setup_file] [flags]", Short: "install items from a setup file", - Args: cobra.ExactArgs(1), + Args: args.ExactArgs(1), DisableAutoGenTag: true, RunE: func(cmd *cobra.Command, args []string) error { return cli.install(cmd.Context(), interactive, dryRun, args[0]) @@ -123,7 +124,7 @@ func (cli *cliSetup) newDataSourcesCmd() *cobra.Command { cmd := &cobra.Command{ Use: "datasources [setup_file] [flags]", Short: "generate datasource (acquisition) configuration from a setup file", - Args: cobra.ExactArgs(1), + Args: args.ExactArgs(1), DisableAutoGenTag: true, RunE: func(cmd *cobra.Command, args []string) error { return cli.dataSources(args[0], toDir) @@ -140,7 +141,7 @@ func (cli *cliSetup) newValidateCmd() *cobra.Command { cmd := &cobra.Command{ Use: "validate [setup_file]", Short: "validate a setup file", - Args: cobra.ExactArgs(1), + Args: args.ExactArgs(1), DisableAutoGenTag: true, RunE: func(cmd *cobra.Command, args []string) error { return cli.validate(args[0]) diff --git a/cmd/crowdsec-cli/clisimulation/simulation.go b/cmd/crowdsec-cli/clisimulation/simulation.go index 1189f3f4ba3..155a101f18a 100644 --- a/cmd/crowdsec-cli/clisimulation/simulation.go +++ b/cmd/crowdsec-cli/clisimulation/simulation.go @@ -10,6 +10,7 @@ import ( "github.com/spf13/cobra" "gopkg.in/yaml.v3" + "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/args" "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/reload" "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/require" "github.com/crowdsecurity/crowdsec/pkg/csconfig" @@ -174,6 +175,7 @@ func (cli *cliSimulation) newStatusCmd() *cobra.Command { Use: "status", Short: "Show simulation mode status", Example: `cscli simulation status`, + Args: args.NoArgs, DisableAutoGenTag: true, Run: func(_ *cobra.Command, _ []string) { cli.status() diff --git a/cmd/crowdsec-cli/clisupport/support.go b/cmd/crowdsec-cli/clisupport/support.go index ed52e3792f0..c87e359db10 100644 --- a/cmd/crowdsec-cli/clisupport/support.go +++ b/cmd/crowdsec-cli/clisupport/support.go @@ -22,6 +22,7 @@ import ( "github.com/crowdsecurity/go-cs-lib/trace" + "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/args" "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/clibouncer" "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/clicapi" "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/clihub" @@ -409,7 +410,6 @@ func (cli *cliSupport) NewCommand() *cobra.Command { cmd := &cobra.Command{ Use: "support [action]", Short: "Provide commands to help during support", - Args: cobra.MinimumNArgs(1), DisableAutoGenTag: true, } @@ -625,7 +625,7 @@ func (cli *cliSupport) NewDumpCmd() *cobra.Command { Example: `cscli support dump cscli support dump -f /tmp/crowdsec-support.zip `, - Args: cobra.NoArgs, + Args: args.NoArgs, DisableAutoGenTag: true, RunE: func(cmd *cobra.Command, _ []string) error { output := cli.cfg().Cscli.Output diff --git a/cmd/crowdsec-cli/completion.go b/cmd/crowdsec-cli/completion.go index fb60f9afab0..3c0411f29a9 100644 --- a/cmd/crowdsec-cli/completion.go +++ b/cmd/crowdsec-cli/completion.go @@ -4,6 +4,8 @@ import ( "os" "github.com/spf13/cobra" + + "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/args" ) func NewCompletionCmd() *cobra.Command { @@ -67,7 +69,7 @@ func NewCompletionCmd() *cobra.Command { DisableFlagsInUseLine: true, DisableAutoGenTag: true, ValidArgs: []string{"bash", "zsh", "powershell", "fish"}, - Args: cobra.MatchAll(cobra.ExactArgs(1), cobra.OnlyValidArgs), + Args: cobra.MatchAll(args.ExactArgs(1), cobra.OnlyValidArgs), Run: func(cmd *cobra.Command, args []string) { switch args[0] { case "bash": diff --git a/cmd/crowdsec-cli/dashboard.go b/cmd/crowdsec-cli/dashboard.go index c3c974eb9b8..ca41ef65fb6 100644 --- a/cmd/crowdsec-cli/dashboard.go +++ b/cmd/crowdsec-cli/dashboard.go @@ -22,6 +22,7 @@ import ( "github.com/crowdsecurity/go-cs-lib/version" + "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/args" "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/idgen" "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/require" "github.com/crowdsecurity/crowdsec/pkg/metabase" @@ -64,7 +65,6 @@ func (cli *cliDashboard) NewCommand() *cobra.Command { Long: `Install/Start/Stop/Remove a metabase container exposing dashboard and metrics. Note: This command requires database direct access, so is intended to be run on Local API/master. `, - Args: cobra.ExactArgs(1), DisableAutoGenTag: true, Example: ` cscli dashboard setup @@ -130,7 +130,7 @@ func (cli *cliDashboard) newSetupCmd() *cobra.Command { Use: "setup", Short: "Setup a metabase container.", Long: `Perform a metabase docker setup, download standard dashboards, create a fresh user and start the container`, - Args: cobra.NoArgs, + Args: args.NoArgs, DisableAutoGenTag: true, Example: ` cscli dashboard setup @@ -206,7 +206,7 @@ func (cli *cliDashboard) newStartCmd() *cobra.Command { Use: "start", Short: "Start the metabase container.", Long: `Stats the metabase container using docker.`, - Args: cobra.NoArgs, + Args: args.NoArgs, DisableAutoGenTag: true, RunE: func(_ *cobra.Command, _ []string) error { mb, err := metabase.NewMetabase(metabaseConfigPath, metabaseContainerID) @@ -237,7 +237,7 @@ func (cli *cliDashboard) newStopCmd() *cobra.Command { Use: "stop", Short: "Stops the metabase container.", Long: `Stops the metabase container using docker.`, - Args: cobra.NoArgs, + Args: args.NoArgs, DisableAutoGenTag: true, RunE: func(_ *cobra.Command, _ []string) error { if err := metabase.StopContainer(metabaseContainerID); err != nil { @@ -254,7 +254,7 @@ func (cli *cliDashboard) newShowPasswordCmd() *cobra.Command { cmd := &cobra.Command{ Use: "show-password", Short: "displays password of metabase.", - Args: cobra.NoArgs, + Args: args.NoArgs, DisableAutoGenTag: true, RunE: func(_ *cobra.Command, _ []string) error { m := metabase.Metabase{} @@ -277,7 +277,7 @@ func (cli *cliDashboard) newRemoveCmd() *cobra.Command { Use: "remove", Short: "removes the metabase container.", Long: `removes the metabase container using docker.`, - Args: cobra.NoArgs, + Args: args.NoArgs, DisableAutoGenTag: true, Example: ` cscli dashboard remove diff --git a/cmd/crowdsec-cli/doc.go b/cmd/crowdsec-cli/doc.go index f68d535db03..9f42763d3c5 100644 --- a/cmd/crowdsec-cli/doc.go +++ b/cmd/crowdsec-cli/doc.go @@ -7,6 +7,8 @@ import ( "github.com/spf13/cobra" "github.com/spf13/cobra/doc" + + "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/args" ) type cliDoc struct{} @@ -23,7 +25,7 @@ func (cli cliDoc) NewCommand(rootCmd *cobra.Command) *cobra.Command { cmd := &cobra.Command{ Use: "doc", Short: "Generate the documentation related to cscli commands. Target directory must exist.", - Args: cobra.NoArgs, + Args: args.NoArgs, Hidden: true, DisableAutoGenTag: true, RunE: func(_ *cobra.Command, args []string) error { diff --git a/cmd/crowdsec-cli/version.go b/cmd/crowdsec-cli/version.go index 7ec5c459968..8d8007ef02c 100644 --- a/cmd/crowdsec-cli/version.go +++ b/cmd/crowdsec-cli/version.go @@ -5,6 +5,7 @@ import ( "github.com/spf13/cobra" + "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/args" "github.com/crowdsecurity/crowdsec/pkg/cwversion" ) @@ -18,7 +19,7 @@ func (cliVersion) NewCommand() *cobra.Command { cmd := &cobra.Command{ Use: "version", Short: "Display version", - Args: cobra.NoArgs, + Args: args.NoArgs, DisableAutoGenTag: true, Run: func(_ *cobra.Command, _ []string) { _, _ = os.Stdout.WriteString(cwversion.FullString()) diff --git a/test/bats/80_alerts.bats b/test/bats/80_alerts.bats index f931048c5f2..78c4e67a704 100644 --- a/test/bats/80_alerts.bats +++ b/test/bats/80_alerts.bats @@ -66,10 +66,10 @@ teardown() { @test "cscli alerts inspect" { rune -1 cscli alerts inspect - assert_stderr --partial 'missing alert_id' + assert_stderr 'Error: requires at least 1 arg(s), only received 0' rune -0 cscli decisions add -i 10.20.30.40 -t ban - rune -0 cscli alerts list -o raw <(output) + rune -0 cscli alerts list -o raw rune -0 grep 10.20.30.40 <(output) rune -0 cut -d, -f1 <(output) ALERT_ID="$output" diff --git a/test/bats/cscli-hubtype-install.bats b/test/bats/cscli-hubtype-install.bats index fe346c24e0e..f4243f6fa6a 100644 --- a/test/bats/cscli-hubtype-install.bats +++ b/test/bats/cscli-hubtype-install.bats @@ -47,13 +47,13 @@ get_latest_version() { @test "cscli install (no argument)" { rune -1 cscli parsers install - refute_output + assert_output --partial 'Usage:' assert_stderr --partial 'requires at least 1 arg(s), only received 0' } @test "cscli install (aliased)" { rune -1 cscli parser install - refute_output + assert_output --partial 'Usage:' assert_stderr --partial 'requires at least 1 arg(s), only received 0' } From a203d8ebbf904dd030b502d20fb2cfba7f2b4c8b Mon Sep 17 00:00:00 2001 From: blotus Date: Wed, 5 Mar 2025 16:04:16 +0100 Subject: [PATCH 453/581] appsec: handle SendAlert() properly for out of band matches (#3497) --- .../modules/appsec/appsec_hooks_test.go | 35 +++++++++++++++++++ .../modules/appsec/appsec_runner.go | 5 +-- pkg/acquisition/modules/appsec/utils.go | 4 +-- 3 files changed, 40 insertions(+), 4 deletions(-) diff --git a/pkg/acquisition/modules/appsec/appsec_hooks_test.go b/pkg/acquisition/modules/appsec/appsec_hooks_test.go index 46b2ed4d68d..ae8498df7b4 100644 --- a/pkg/acquisition/modules/appsec/appsec_hooks_test.go +++ b/pkg/acquisition/modules/appsec/appsec_hooks_test.go @@ -332,6 +332,41 @@ func TestAppsecOnMatchHooks(t *testing.T) { require.Equal(t, appsec.CaptchaRemediation, responses[0].Action) }, }, + { + name: "on_match: SendAlert() with out-of-band rule", + expected_load_ok: true, + outofband_rules: []appsec_rule.CustomRule{ + { + Name: "rule42", + Zones: []string{"ARGS"}, + Variables: []string{"foo"}, + Match: appsec_rule.Match{Type: "regex", Value: "^toto"}, + Transform: []string{"lowercase"}, + }, + }, + DefaultRemediation: appsec.AllowRemediation, + on_match: []appsec.Hook{ + {Filter: "IsInBand == false", Apply: []string{"SendAlert()"}}, + }, + input_request: appsec.ParsedRequest{ + ClientIP: "1.2.3.4", + RemoteAddr: "1.2.3.4", + Method: "GET", + URI: "/urllll", + Args: url.Values{"foo": []string{"toto"}}, + }, + output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { + require.Equal(t, appsec.AllowRemediation, appsecResponse.Action) + require.Equal(t, http.StatusOK, appsecResponse.HTTPStatus) + require.Equal(t, http.StatusOK, statusCode) + // We have both an event an overflow + require.Len(t, events, 2) + require.Equal(t, types.LOG, events[0].Type) + require.Equal(t, types.APPSEC, events[1].Type) + require.Nil(t, events[0].Overflow.Alert) + require.NotNil(t, events[1].Overflow.Alert) + }, + }, } for _, test := range tests { t.Run(test.name, func(t *testing.T) { diff --git a/pkg/acquisition/modules/appsec/appsec_runner.go b/pkg/acquisition/modules/appsec/appsec_runner.go index a21a16598d7..ad4bda6eae6 100644 --- a/pkg/acquisition/modules/appsec/appsec_runner.go +++ b/pkg/acquisition/modules/appsec/appsec_runner.go @@ -286,7 +286,6 @@ func (r *AppsecRunner) handleInBandInterrupt(request *appsec.ParsedRequest) { r.outChan <- *appsecOvlfw } } - // Should the in band match trigger an event ? if r.AppsecRuntime.Response.SendEvent { r.outChan <- evt @@ -332,7 +331,9 @@ func (r *AppsecRunner) handleOutBandInterrupt(request *appsec.ParsedRequest) { r.logger.Errorf("unable to generate appsec event : %s", err) return } - r.outChan <- *appsecOvlfw + if appsecOvlfw != nil { + r.outChan <- *appsecOvlfw + } } } } diff --git a/pkg/acquisition/modules/appsec/utils.go b/pkg/acquisition/modules/appsec/utils.go index 65bb4601c21..fece953b0d6 100644 --- a/pkg/acquisition/modules/appsec/utils.go +++ b/pkg/acquisition/modules/appsec/utils.go @@ -60,8 +60,8 @@ func AppsecEventGenerationGeoIPEnrich(src *models.Source) error { } func AppsecEventGeneration(inEvt types.Event, request *http.Request) (*types.Event, error) { - // if the request didnd't trigger inband rules, we don't want to generate an event to LAPI/CAPI - if !inEvt.Appsec.HasInBandMatches { + // if the request didn't trigger inband rules or out-of-band rules, we don't want to generate an event to LAPI/CAPI + if !inEvt.Appsec.HasInBandMatches && !inEvt.Appsec.HasOutBandMatches { return nil, nil } From 12a3c7086056e512a27f5256d68f1ff7fa2fe35a Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Fri, 7 Mar 2025 14:35:25 +0100 Subject: [PATCH 454/581] lint: gocritic/httpNoBody (#3493) * lint: gocritic/httpNoBody --- .golangci.yml | 1 - cmd/crowdsec-cli/clisupport/support.go | 4 +- cmd/crowdsec-cli/require/branch.go | 2 +- pkg/acquisition/modules/appsec/appsec.go | 2 +- .../loki/internal/lokiclient/loki_client.go | 2 +- .../internal/vlclient/vl_client.go | 2 +- pkg/apiclient/decisions_service.go | 2 +- pkg/apiserver/apiserver_test.go | 6 +-- pkg/apiserver/papi.go | 2 +- pkg/appsec/ja4h/ja4h_test.go | 40 +++++++++---------- pkg/cticlient/client.go | 3 +- pkg/exprhelpers/waf_test.go | 2 +- pkg/longpollclient/client.go | 2 +- 13 files changed, 34 insertions(+), 36 deletions(-) diff --git a/.golangci.yml b/.golangci.yml index cda97f7fdaa..e8251d697f3 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -214,7 +214,6 @@ linters-settings: enable-all: true disabled-checks: - paramTypeCombine - - httpNoBody - ifElseChain - importShadow - hugeParam diff --git a/cmd/crowdsec-cli/clisupport/support.go b/cmd/crowdsec-cli/clisupport/support.go index c87e359db10..03545d15b85 100644 --- a/cmd/crowdsec-cli/clisupport/support.go +++ b/cmd/crowdsec-cli/clisupport/support.go @@ -113,7 +113,7 @@ func (cli *cliSupport) dumpMetrics(ctx context.Context, db *database.Client, zw return fmt.Errorf("could not format prometheus metrics: %w", err) } - req, err := http.NewRequestWithContext(ctx, http.MethodGet, cfg.Cscli.PrometheusUrl, nil) + req, err := http.NewRequestWithContext(ctx, http.MethodGet, cfg.Cscli.PrometheusUrl, http.NoBody) if err != nil { return fmt.Errorf("could not create request to prometheus endpoint: %w", err) } @@ -328,7 +328,7 @@ func (cli *cliSupport) dumpPprof(ctx context.Context, zw *zip.Writer, prometheus ), endpoint, ), - nil, + http.NoBody, ) if err != nil { return fmt.Errorf("could not create request to pprof endpoint: %w", err) diff --git a/cmd/crowdsec-cli/require/branch.go b/cmd/crowdsec-cli/require/branch.go index ab9b8e50bdc..b0744a6b4ff 100644 --- a/cmd/crowdsec-cli/require/branch.go +++ b/cmd/crowdsec-cli/require/branch.go @@ -25,7 +25,7 @@ func lookupLatest(ctx context.Context) (string, error) { url := "https://version.crowdsec.net/latest" - req, err := http.NewRequestWithContext(ctx, http.MethodGet, url, nil) + req, err := http.NewRequestWithContext(ctx, http.MethodGet, url, http.NoBody) if err != nil { return "", fmt.Errorf("unable to create request for %s: %w", url, err) } diff --git a/pkg/acquisition/modules/appsec/appsec.go b/pkg/acquisition/modules/appsec/appsec.go index 4b6a52d5456..389704b5afb 100644 --- a/pkg/acquisition/modules/appsec/appsec.go +++ b/pkg/acquisition/modules/appsec/appsec.go @@ -377,7 +377,7 @@ func (w *AppsecSource) IsAuth(ctx context.Context, apiKey string) bool { Timeout: 200 * time.Millisecond, } - req, err := http.NewRequestWithContext(ctx, http.MethodHead, w.lapiURL, nil) + req, err := http.NewRequestWithContext(ctx, http.MethodHead, w.lapiURL, http.NoBody) if err != nil { log.Errorf("Error creating request: %s", err) return false diff --git a/pkg/acquisition/modules/loki/internal/lokiclient/loki_client.go b/pkg/acquisition/modules/loki/internal/lokiclient/loki_client.go index 5996518e191..f34d96b2129 100644 --- a/pkg/acquisition/modules/loki/internal/lokiclient/loki_client.go +++ b/pkg/acquisition/modules/loki/internal/lokiclient/loki_client.go @@ -302,7 +302,7 @@ func (lc *LokiClient) QueryRange(ctx context.Context, infinite bool) chan *LokiQ // Create a wrapper for http.Get to be able to set headers and auth func (lc *LokiClient) Get(ctx context.Context, url string) (*http.Response, error) { - request, err := http.NewRequestWithContext(ctx, http.MethodGet, url, nil) + request, err := http.NewRequestWithContext(ctx, http.MethodGet, url, http.NoBody) if err != nil { return nil, err } diff --git a/pkg/acquisition/modules/victorialogs/internal/vlclient/vl_client.go b/pkg/acquisition/modules/victorialogs/internal/vlclient/vl_client.go index 402754a1307..b091b25c163 100644 --- a/pkg/acquisition/modules/victorialogs/internal/vlclient/vl_client.go +++ b/pkg/acquisition/modules/victorialogs/internal/vlclient/vl_client.go @@ -370,7 +370,7 @@ func (lc *VLClient) QueryRange(ctx context.Context, infinite bool) chan *Log { } func (lc *VLClient) Get(ctx context.Context, url string) (*http.Response, error) { - request, err := http.NewRequestWithContext(ctx, http.MethodGet, url, nil) + request, err := http.NewRequestWithContext(ctx, http.MethodGet, url, http.NoBody) if err != nil { return nil, err } diff --git a/pkg/apiclient/decisions_service.go b/pkg/apiclient/decisions_service.go index 0f3f4468c65..531b0cdac5b 100644 --- a/pkg/apiclient/decisions_service.go +++ b/pkg/apiclient/decisions_service.go @@ -183,7 +183,7 @@ func (s *DecisionsService) GetDecisionsFromBlocklist(ctx context.Context, blockl client := http.Client{} - req, err := http.NewRequestWithContext(ctx, http.MethodGet, *blocklist.URL, nil) + req, err := http.NewRequestWithContext(ctx, http.MethodGet, *blocklist.URL, http.NoBody) if err != nil { return nil, false, err } diff --git a/pkg/apiserver/apiserver_test.go b/pkg/apiserver/apiserver_test.go index fb5c0d5e389..fe250aa370d 100644 --- a/pkg/apiserver/apiserver_test.go +++ b/pkg/apiserver/apiserver_test.go @@ -337,7 +337,7 @@ func TestUnknownPath(t *testing.T) { router, _ := NewAPITest(t, ctx) w := httptest.NewRecorder() - req, _ := http.NewRequestWithContext(ctx, http.MethodGet, "/test", nil) + req, _ := http.NewRequestWithContext(ctx, http.MethodGet, "/test", http.NoBody) req.Header.Set("User-Agent", UserAgent) router.ServeHTTP(w, req) @@ -396,7 +396,7 @@ func TestLoggingDebugToFileConfig(t *testing.T) { require.NotNil(t, api) w := httptest.NewRecorder() - req, _ := http.NewRequestWithContext(ctx, http.MethodGet, "/test42", nil) + req, _ := http.NewRequestWithContext(ctx, http.MethodGet, "/test42", http.NoBody) req.Header.Set("User-Agent", UserAgent) api.router.ServeHTTP(w, req) assert.Equal(t, http.StatusNotFound, w.Code) @@ -448,7 +448,7 @@ func TestLoggingErrorToFileConfig(t *testing.T) { require.NotNil(t, api) w := httptest.NewRecorder() - req, _ := http.NewRequestWithContext(ctx, http.MethodGet, "/test42", nil) + req, _ := http.NewRequestWithContext(ctx, http.MethodGet, "/test42", http.NoBody) req.Header.Set("User-Agent", UserAgent) api.router.ServeHTTP(w, req) assert.Equal(t, http.StatusNotFound, w.Code) diff --git a/pkg/apiserver/papi.go b/pkg/apiserver/papi.go index cddaabb87cc..6f50a08fa04 100644 --- a/pkg/apiserver/papi.go +++ b/pkg/apiserver/papi.go @@ -160,7 +160,7 @@ func (p *Papi) GetPermissions(ctx context.Context) (PapiPermCheckSuccess, error) httpClient := p.apiClient.GetClient() papiCheckUrl := fmt.Sprintf("%s%s%s", p.URL, types.PAPIVersion, types.PAPIPermissionsUrl) - req, err := http.NewRequestWithContext(ctx, http.MethodGet, papiCheckUrl, nil) + req, err := http.NewRequestWithContext(ctx, http.MethodGet, papiCheckUrl, http.NoBody) if err != nil { return PapiPermCheckSuccess{}, fmt.Errorf("failed to create request: %w", err) } diff --git a/pkg/appsec/ja4h/ja4h_test.go b/pkg/appsec/ja4h/ja4h_test.go index 76d265d8a42..434525b0c94 100644 --- a/pkg/appsec/ja4h/ja4h_test.go +++ b/pkg/appsec/ja4h/ja4h_test.go @@ -29,7 +29,7 @@ func TestJA4H_A(t *testing.T) { { name: "basic GET request - HTTP1.1 - no accept-language header", request: func() *http.Request { - req, _ := http.NewRequest(http.MethodGet, "http://example.com", nil) + req, _ := http.NewRequest(http.MethodGet, "http://example.com", http.NoBody) return req }, expectedResult: "ge11nn000000", @@ -37,7 +37,7 @@ func TestJA4H_A(t *testing.T) { { name: "basic GET request - HTTP1.1 - with accept-language header", request: func() *http.Request { - req, _ := http.NewRequest(http.MethodGet, "http://example.com", nil) + req, _ := http.NewRequest(http.MethodGet, "http://example.com", http.NoBody) req.Header.Set("Accept-Language", "en-US") return req }, @@ -46,7 +46,7 @@ func TestJA4H_A(t *testing.T) { { name: "basic POST request - HTTP1.1 - no accept-language header - cookies - referer", request: func() *http.Request { - req, _ := http.NewRequest(http.MethodPost, "http://example.com", nil) + req, _ := http.NewRequest(http.MethodPost, "http://example.com", http.NoBody) req.AddCookie(&http.Cookie{Name: "foo", Value: "bar"}) req.Header.Set("Referer", "http://example.com") return req @@ -56,7 +56,7 @@ func TestJA4H_A(t *testing.T) { { name: "bad accept-language header", request: func() *http.Request { - req, _ := http.NewRequest(http.MethodGet, "http://example.com", nil) + req, _ := http.NewRequest(http.MethodGet, "http://example.com", http.NoBody) req.Header.Set("Accept-Language", "aksjdhaslkdhalkjsd") return req }, @@ -65,7 +65,7 @@ func TestJA4H_A(t *testing.T) { { name: "bad accept-language header 2", request: func() *http.Request { - req, _ := http.NewRequest(http.MethodGet, "http://example.com", nil) + req, _ := http.NewRequest(http.MethodGet, "http://example.com", http.NoBody) req.Header.Set("Accept-Language", ",") return req }, @@ -94,7 +94,7 @@ func TestJA4H_B(t *testing.T) { { name: "no headers", request: func() *http.Request { - req, _ := http.NewRequest(http.MethodGet, "http://example.com", nil) + req, _ := http.NewRequest(http.MethodGet, "http://example.com", http.NoBody) return req }, expectedResult: "e3b0c44298fc", @@ -102,7 +102,7 @@ func TestJA4H_B(t *testing.T) { { name: "header with arbitrary content", request: func() *http.Request { - req, _ := http.NewRequest(http.MethodGet, "http://example.com", nil) + req, _ := http.NewRequest(http.MethodGet, "http://example.com", http.NoBody) req.Header.Set("X-Custom-Header", "some value") return req }, @@ -111,7 +111,7 @@ func TestJA4H_B(t *testing.T) { { name: "header with multiple headers", request: func() *http.Request { - req, _ := http.NewRequest(http.MethodGet, "http://example.com", nil) + req, _ := http.NewRequest(http.MethodGet, "http://example.com", http.NoBody) req.Header.Set("X-Custom-Header", "some value") req.Header.Set("Authorization", "Bearer token") return req @@ -121,7 +121,7 @@ func TestJA4H_B(t *testing.T) { { name: "curl-like request", request: func() *http.Request { - req, _ := http.NewRequest(http.MethodGet, "http://localhost", nil) + req, _ := http.NewRequest(http.MethodGet, "http://localhost", http.NoBody) req.Header.Set("Host", "localhost") req.Header.Set("User-Agent", "curl/8.12.1") req.Header.Set("Accept", "*/*") @@ -150,7 +150,7 @@ func TestJA4H_C(t *testing.T) { { name: "no cookies", cookies: func() []*http.Cookie { - req, _ := http.NewRequest(http.MethodGet, "http://example.com", nil) + req, _ := http.NewRequest(http.MethodGet, "http://example.com", http.NoBody) return req.Cookies() }, expectedResult: "000000000000", @@ -158,7 +158,7 @@ func TestJA4H_C(t *testing.T) { { name: "one cookie", cookies: func() []*http.Cookie { - req, _ := http.NewRequest(http.MethodGet, "http://example.com", nil) + req, _ := http.NewRequest(http.MethodGet, "http://example.com", http.NoBody) req.AddCookie(&http.Cookie{Name: "foo", Value: "bar"}) return req.Cookies() }, @@ -167,7 +167,7 @@ func TestJA4H_C(t *testing.T) { { name: "duplicate cookies", cookies: func() []*http.Cookie { - req, _ := http.NewRequest(http.MethodGet, "http://example.com", nil) + req, _ := http.NewRequest(http.MethodGet, "http://example.com", http.NoBody) req.AddCookie(&http.Cookie{Name: "foo", Value: "bar"}) req.AddCookie(&http.Cookie{Name: "foo", Value: "bar2"}) return req.Cookies() @@ -177,7 +177,7 @@ func TestJA4H_C(t *testing.T) { { name: "multiple cookies", cookies: func() []*http.Cookie { - req, _ := http.NewRequest(http.MethodGet, "http://example.com", nil) + req, _ := http.NewRequest(http.MethodGet, "http://example.com", http.NoBody) req.AddCookie(&http.Cookie{Name: "foo", Value: "bar"}) req.AddCookie(&http.Cookie{Name: "bar", Value: "foo"}) cookies := req.Cookies() @@ -209,7 +209,7 @@ func TestJA4H_D(t *testing.T) { { name: "no cookies", cookies: func() []*http.Cookie { - req, _ := http.NewRequest(http.MethodGet, "http://example.com", nil) + req, _ := http.NewRequest(http.MethodGet, "http://example.com", http.NoBody) return req.Cookies() }, expectedResult: "000000000000", @@ -217,7 +217,7 @@ func TestJA4H_D(t *testing.T) { { name: "one cookie", cookies: func() []*http.Cookie { - req, _ := http.NewRequest(http.MethodGet, "http://example.com", nil) + req, _ := http.NewRequest(http.MethodGet, "http://example.com", http.NoBody) req.AddCookie(&http.Cookie{Name: "foo", Value: "bar"}) return req.Cookies() }, @@ -226,7 +226,7 @@ func TestJA4H_D(t *testing.T) { { name: "duplicate cookies", cookies: func() []*http.Cookie { - req, _ := http.NewRequest(http.MethodGet, "http://example.com", nil) + req, _ := http.NewRequest(http.MethodGet, "http://example.com", http.NoBody) req.AddCookie(&http.Cookie{Name: "foo", Value: "bar"}) req.AddCookie(&http.Cookie{Name: "foo", Value: "bar2"}) return req.Cookies() @@ -236,7 +236,7 @@ func TestJA4H_D(t *testing.T) { { name: "multiple cookies", cookies: func() []*http.Cookie { - req, _ := http.NewRequest(http.MethodGet, "http://example.com", nil) + req, _ := http.NewRequest(http.MethodGet, "http://example.com", http.NoBody) req.AddCookie(&http.Cookie{Name: "foo", Value: "bar"}) req.AddCookie(&http.Cookie{Name: "bar", Value: "foo"}) cookies := req.Cookies() @@ -268,7 +268,7 @@ func TestJA4H(t *testing.T) { { name: "Basic GET - No cookies", req: func() *http.Request { - req, _ := http.NewRequest(http.MethodGet, "http://example.com", nil) + req, _ := http.NewRequest(http.MethodGet, "http://example.com", http.NoBody) return req }, expectedHash: "ge11nn000000_e3b0c44298fc_000000000000_000000000000", @@ -276,7 +276,7 @@ func TestJA4H(t *testing.T) { { name: "Basic GET - With cookies", req: func() *http.Request { - req, _ := http.NewRequest(http.MethodGet, "http://example.com", nil) + req, _ := http.NewRequest(http.MethodGet, "http://example.com", http.NoBody) req.AddCookie(&http.Cookie{Name: "session", Value: "12345"}) return req }, @@ -285,7 +285,7 @@ func TestJA4H(t *testing.T) { { name: "Basic GET - Multiple cookies", req: func() *http.Request { - req, _ := http.NewRequest(http.MethodGet, "http://example.com", nil) + req, _ := http.NewRequest(http.MethodGet, "http://example.com", http.NoBody) req.AddCookie(&http.Cookie{Name: "foo", Value: "bar"}) req.AddCookie(&http.Cookie{Name: "baz", Value: "qux"}) return req diff --git a/pkg/cticlient/client.go b/pkg/cticlient/client.go index b15766dd99c..56f35b8a583 100644 --- a/pkg/cticlient/client.go +++ b/pkg/cticlient/client.go @@ -42,8 +42,7 @@ func (c *CrowdsecCTIClient) doRequest(ctx context.Context, method string, endpoi url += fmt.Sprintf("%s=%s&", k, v) } } - - req, err := http.NewRequestWithContext(ctx, method, url, nil) + req, err := http.NewRequestWithContext(ctx, method, url, http.NoBody) if err != nil { return nil, err } diff --git a/pkg/exprhelpers/waf_test.go b/pkg/exprhelpers/waf_test.go index bfab22bb2c7..39696cea616 100644 --- a/pkg/exprhelpers/waf_test.go +++ b/pkg/exprhelpers/waf_test.go @@ -314,7 +314,7 @@ func TestJA4H(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { - req, err := http.NewRequest(test.method, test.url, nil) + req, err := http.NewRequest(test.method, test.url, http.NoBody) if err != nil { t.Fatalf("Failed to create request: %s", err) } diff --git a/pkg/longpollclient/client.go b/pkg/longpollclient/client.go index 6a668e07d84..eddcc904f92 100644 --- a/pkg/longpollclient/client.go +++ b/pkg/longpollclient/client.go @@ -60,7 +60,7 @@ func (c *LongPollClient) doQuery(ctx context.Context) (*http.Response, error) { logger.Debugf("Query parameters: %s", c.url.RawQuery) - req, err := http.NewRequestWithContext(ctx, http.MethodGet, c.url.String(), nil) + req, err := http.NewRequestWithContext(ctx, http.MethodGet, c.url.String(), http.NoBody) if err != nil { logger.Errorf("failed to create request: %s", err) return nil, err From bcce4afe5eb32e6b81d0e385c4f5d3f8aa1c91b2 Mon Sep 17 00:00:00 2001 From: Laurence Jones Date: Fri, 7 Mar 2025 13:42:08 +0000 Subject: [PATCH 455/581] enhance: Flags now superceed all log levels (#3496) * enhance: Flags now superceed all log levels * enhance: remove global var for local scope * test --------- Co-authored-by: marco --- cmd/crowdsec/main.go | 15 +++++++++------ pkg/acquisition/acquisition.go | 6 +----- pkg/alertcontext/alertcontext.go | 2 +- pkg/apiserver/apiserver.go | 6 +----- pkg/apiserver/apiserver_test.go | 4 ++-- pkg/apiserver/papi.go | 4 +--- pkg/cache/cache.go | 3 +-- pkg/csplugin/broker.go | 4 ++-- pkg/csprofiles/csprofiles.go | 4 ++-- pkg/database/database.go | 6 +----- pkg/exprhelpers/crowdsec_cti.go | 5 +---- pkg/leakybucket/manager_load.go | 4 ++-- pkg/parser/node.go | 4 ++-- pkg/types/utils.go | 16 +++++++++++----- test/bats/01_crowdsec.bats | 18 ++++++++++++++++++ 15 files changed, 55 insertions(+), 46 deletions(-) diff --git a/cmd/crowdsec/main.go b/cmd/crowdsec/main.go index 02220e15216..188116fb2f2 100644 --- a/cmd/crowdsec/main.go +++ b/cmd/crowdsec/main.go @@ -186,9 +186,10 @@ func (f *Flags) Parse() { flag.Parse() } -func newLogLevel(curLevelPtr *log.Level, f *Flags) *log.Level { +func newLogLevel(curLevelPtr *log.Level, f *Flags) (*log.Level, bool) { // mother of all defaults ret := log.InfoLevel + logLevelViaFlag := true // keep if already set if curLevelPtr != nil { @@ -210,14 +211,16 @@ func newLogLevel(curLevelPtr *log.Level, f *Flags) *log.Level { case f.LogLevelFatal: ret = log.FatalLevel default: + // We set logLevelViaFlag to false in default cause no flag was provided + logLevelViaFlag = false } if curLevelPtr != nil && ret == *curLevelPtr { // avoid returning a new ptr to the same value - return curLevelPtr + return curLevelPtr, logLevelViaFlag } - return &ret + return &ret, logLevelViaFlag } // LoadConfig returns a configuration parsed from configuration file @@ -230,8 +233,8 @@ func LoadConfig(configFile string, disableAgent bool, disableAPI bool, quiet boo if err := trace.Init(filepath.Join(cConfig.ConfigPaths.DataDir, "trace")); err != nil { return nil, fmt.Errorf("while setting up trace directory: %w", err) } - - cConfig.Common.LogLevel = newLogLevel(cConfig.Common.LogLevel, flags) + var logLevelViaFlag bool + cConfig.Common.LogLevel, logLevelViaFlag = newLogLevel(cConfig.Common.LogLevel, flags) if dumpFolder != "" { parser.ParseDump = true @@ -250,7 +253,7 @@ func LoadConfig(configFile string, disableAgent bool, disableAPI bool, quiet boo cConfig.Common.LogDir, *cConfig.Common.LogLevel, cConfig.Common.LogMaxSize, cConfig.Common.LogMaxFiles, cConfig.Common.LogMaxAge, cConfig.Common.LogFormat, cConfig.Common.CompressLogs, - cConfig.Common.ForceColorLogs); err != nil { + cConfig.Common.ForceColorLogs, logLevelViaFlag); err != nil { return nil, err } diff --git a/pkg/acquisition/acquisition.go b/pkg/acquisition/acquisition.go index d3928270598..4462d8df021 100644 --- a/pkg/acquisition/acquisition.go +++ b/pkg/acquisition/acquisition.go @@ -92,14 +92,10 @@ func registerDataSource(dataSourceType string, dsGetter func() DataSource) { // setupLogger creates a logger for the datasource to use at runtime. func setupLogger(source, name string, level *log.Level) (*log.Entry, error) { clog := log.New() - if err := types.ConfigureLogger(clog); err != nil { + if err := types.ConfigureLogger(clog, level); err != nil { return nil, fmt.Errorf("while configuring datasource logger: %w", err) } - if level != nil { - clog.SetLevel(*level) - } - fields := log.Fields{ "type": source, } diff --git a/pkg/alertcontext/alertcontext.go b/pkg/alertcontext/alertcontext.go index 0b38336a698..3c5a3e10c20 100644 --- a/pkg/alertcontext/alertcontext.go +++ b/pkg/alertcontext/alertcontext.go @@ -45,7 +45,7 @@ func ValidateContextExpr(key string, expressions []string) error { func NewAlertContext(contextToSend map[string][]string, valueLength int) error { clog := log.New() - if err := types.ConfigureLogger(clog); err != nil { + if err := types.ConfigureLogger(clog, nil); err != nil { return fmt.Errorf("couldn't create logger for alert context: %w", err) } diff --git a/pkg/apiserver/apiserver.go b/pkg/apiserver/apiserver.go index a14e656fa19..a14aeac098f 100644 --- a/pkg/apiserver/apiserver.go +++ b/pkg/apiserver/apiserver.go @@ -118,14 +118,10 @@ func CustomRecoveryWithWriter() gin.HandlerFunc { func newGinLogger(config *csconfig.LocalApiServerCfg) (*log.Logger, string, error) { clog := log.New() - if err := types.ConfigureLogger(clog); err != nil { + if err := types.ConfigureLogger(clog, config.LogLevel); err != nil { return nil, "", fmt.Errorf("while configuring gin logger: %w", err) } - if config.LogLevel != nil { - clog.SetLevel(*config.LogLevel) - } - if config.LogMedia != "file" { return clog, "", nil } diff --git a/pkg/apiserver/apiserver_test.go b/pkg/apiserver/apiserver_test.go index fe250aa370d..01a8588dfd4 100644 --- a/pkg/apiserver/apiserver_test.go +++ b/pkg/apiserver/apiserver_test.go @@ -388,7 +388,7 @@ func TestLoggingDebugToFileConfig(t *testing.T) { cfg.LogLevel = ptr.Of(log.DebugLevel) // Configure logging - err := types.SetDefaultLoggerConfig(cfg.LogMedia, cfg.LogDir, *cfg.LogLevel, cfg.LogMaxSize, cfg.LogMaxFiles, cfg.LogMaxAge, cfg.LogFormat, cfg.CompressLogs, false) + err := types.SetDefaultLoggerConfig(cfg.LogMedia, cfg.LogDir, *cfg.LogLevel, cfg.LogMaxSize, cfg.LogMaxFiles, cfg.LogMaxAge, cfg.LogFormat, cfg.CompressLogs, false, false) require.NoError(t, err) api, err := NewServer(ctx, &cfg) @@ -440,7 +440,7 @@ func TestLoggingErrorToFileConfig(t *testing.T) { cfg.LogLevel = ptr.Of(log.ErrorLevel) // Configure logging - err := types.SetDefaultLoggerConfig(cfg.LogMedia, cfg.LogDir, *cfg.LogLevel, cfg.LogMaxSize, cfg.LogMaxFiles, cfg.LogMaxAge, cfg.LogFormat, cfg.CompressLogs, false) + err := types.SetDefaultLoggerConfig(cfg.LogMedia, cfg.LogDir, *cfg.LogLevel, cfg.LogMaxSize, cfg.LogMaxFiles, cfg.LogMaxAge, cfg.LogFormat, cfg.CompressLogs, false, false) require.NoError(t, err) api, err := NewServer(ctx, &cfg) diff --git a/pkg/apiserver/papi.go b/pkg/apiserver/papi.go index 6f50a08fa04..442c5729554 100644 --- a/pkg/apiserver/papi.go +++ b/pkg/apiserver/papi.go @@ -83,12 +83,10 @@ type PapiPermCheckSuccess struct { func NewPAPI(apic *apic, dbClient *database.Client, consoleConfig *csconfig.ConsoleConfig, logLevel log.Level) (*Papi, error) { logger := log.New() - if err := types.ConfigureLogger(logger); err != nil { + if err := types.ConfigureLogger(logger, &logLevel); err != nil { return &Papi{}, fmt.Errorf("creating papi logger: %w", err) } - logger.SetLevel(logLevel) - papiUrl := *apic.apiClient.PapiURL papiUrl.Path = fmt.Sprintf("%s%s", types.PAPIVersion, types.PAPIPollUrl) diff --git a/pkg/cache/cache.go b/pkg/cache/cache.go index 8a696caf1f4..3885294c5bb 100644 --- a/pkg/cache/cache.go +++ b/pkg/cache/cache.go @@ -59,11 +59,10 @@ func CacheInit(cfg CacheCfg) error { clog := log.New() - if err := types.ConfigureLogger(clog); err != nil { + if err := types.ConfigureLogger(clog, cfg.LogLevel); err != nil { return fmt.Errorf("while creating cache logger: %w", err) } - clog.SetLevel(*cfg.LogLevel) cfg.Logger = clog.WithField("cache", cfg.Name) tmpCache := gcache.New(cfg.Size) diff --git a/pkg/csplugin/broker.go b/pkg/csplugin/broker.go index 3d040459638..df78d258da5 100644 --- a/pkg/csplugin/broker.go +++ b/pkg/csplugin/broker.go @@ -20,6 +20,7 @@ import ( "gopkg.in/yaml.v2" "github.com/crowdsecurity/go-cs-lib/csstring" + "github.com/crowdsecurity/go-cs-lib/ptr" "github.com/crowdsecurity/go-cs-lib/slicetools" "github.com/crowdsecurity/crowdsec/pkg/csconfig" @@ -321,13 +322,12 @@ func (pb *PluginBroker) loadNotificationPlugin(name string, binaryPath string) ( pb.pluginMap[name] = &NotifierPlugin{} l := log.New() - err = types.ConfigureLogger(l) + err = types.ConfigureLogger(l, ptr.Of(log.TraceLevel)) if err != nil { return nil, err } // We set the highest level to permit plugins to set their own log level // without that, crowdsec log level is controlling plugins level - l.SetLevel(log.TraceLevel) logger := NewHCLogAdapter(l, "") c := plugin.NewClient(&plugin.ClientConfig{ HandshakeConfig: handshake, diff --git a/pkg/csprofiles/csprofiles.go b/pkg/csprofiles/csprofiles.go index c509fb448e3..c9a7cda7961 100644 --- a/pkg/csprofiles/csprofiles.go +++ b/pkg/csprofiles/csprofiles.go @@ -12,6 +12,7 @@ import ( "github.com/crowdsecurity/crowdsec/pkg/exprhelpers" "github.com/crowdsecurity/crowdsec/pkg/models" "github.com/crowdsecurity/crowdsec/pkg/types" + "github.com/crowdsecurity/go-cs-lib/ptr" ) type Runtime struct { @@ -34,11 +35,10 @@ func NewProfile(profilesCfg []*csconfig.ProfileCfg) ([]*Runtime, error) { runtime := &Runtime{} xlog := log.New() - if err := types.ConfigureLogger(xlog); err != nil { + if err := types.ConfigureLogger(xlog, ptr.Of(log.InfoLevel)); err != nil { return nil, fmt.Errorf("while configuring profiles-specific logger: %w", err) } - xlog.SetLevel(log.InfoLevel) runtime.Logger = xlog.WithFields(log.Fields{ "type": "profile", "name": profile.Name, diff --git a/pkg/database/database.go b/pkg/database/database.go index 80479710751..d5186a76d25 100644 --- a/pkg/database/database.go +++ b/pkg/database/database.go @@ -52,14 +52,10 @@ func NewClient(ctx context.Context, config *csconfig.DatabaseCfg) (*Client, erro } /*The logger that will be used by db operations*/ clog := log.New() - if err := types.ConfigureLogger(clog); err != nil { + if err := types.ConfigureLogger(clog, config.LogLevel); err != nil { return nil, fmt.Errorf("while configuring db logger: %w", err) } - if config.LogLevel != nil { - clog.SetLevel(*config.LogLevel) - } - entLogger := clog.WithField("context", "ent") entOpt := ent.Log(entLogger.Debug) diff --git a/pkg/exprhelpers/crowdsec_cti.go b/pkg/exprhelpers/crowdsec_cti.go index 900bd7824a8..4d720dc1bc6 100644 --- a/pkg/exprhelpers/crowdsec_cti.go +++ b/pkg/exprhelpers/crowdsec_cti.go @@ -44,12 +44,9 @@ func InitCrowdsecCTI(key *string, ttl *time.Duration, size *int, logLevel *log.L *ttl = 5 * time.Minute } clog := log.New() - if err := types.ConfigureLogger(clog); err != nil { + if err := types.ConfigureLogger(clog, logLevel); err != nil { return fmt.Errorf("while configuring datasource logger: %w", err) } - if logLevel != nil { - clog.SetLevel(*logLevel) - } subLogger := clog.WithField("type", "crowdsec-cti") CrowdsecCTIInitCache(*size, *ttl) ctiClient = cticlient.NewCrowdsecCTIClient(cticlient.WithAPIKey(CTIApiKey), cticlient.WithLogger(subLogger)) diff --git a/pkg/leakybucket/manager_load.go b/pkg/leakybucket/manager_load.go index 9216c7f6724..474f0fe5ef9 100644 --- a/pkg/leakybucket/manager_load.go +++ b/pkg/leakybucket/manager_load.go @@ -24,6 +24,7 @@ import ( "github.com/crowdsecurity/crowdsec/pkg/cwversion/constraint" "github.com/crowdsecurity/crowdsec/pkg/exprhelpers" "github.com/crowdsecurity/crowdsec/pkg/types" + "github.com/crowdsecurity/go-cs-lib/ptr" ) // BucketFactory struct holds all fields for any bucket configuration. This is to have a @@ -345,11 +346,10 @@ func LoadBucket(bucketFactory *BucketFactory, tomb *tomb.Tomb) error { if bucketFactory.Debug { clog := log.New() - if err = types.ConfigureLogger(clog); err != nil { + if err = types.ConfigureLogger(clog, ptr.Of(log.DebugLevel)); err != nil { return fmt.Errorf("while creating bucket-specific logger: %w", err) } - clog.SetLevel(log.DebugLevel) bucketFactory.logger = clog.WithFields(log.Fields{ "cfg": bucketFactory.BucketName, "name": bucketFactory.Name, diff --git a/pkg/parser/node.go b/pkg/parser/node.go index 1229a0f4470..7332356b9f3 100644 --- a/pkg/parser/node.go +++ b/pkg/parser/node.go @@ -14,6 +14,7 @@ import ( log "github.com/sirupsen/logrus" yaml "gopkg.in/yaml.v2" + "github.com/crowdsecurity/go-cs-lib/ptr" "github.com/crowdsecurity/grokky" "github.com/crowdsecurity/crowdsec/pkg/cache" @@ -462,11 +463,10 @@ func (n *Node) compile(pctx *UnixParserCtx, ectx EnricherCtx) error { that will be used only for processing this node ;) */ if n.Debug { clog := log.New() - if err = types.ConfigureLogger(clog); err != nil { + if err = types.ConfigureLogger(clog, ptr.Of(log.DebugLevel)); err != nil { return fmt.Errorf("while creating bucket-specific logger: %w", err) } - clog.SetLevel(log.DebugLevel) n.Logger = clog.WithField("id", n.rn) n.Logger.Infof("%s has debug enabled", n.Name) } else { diff --git a/pkg/types/utils.go b/pkg/types/utils.go index d5e4ac6f986..6b1506f0de2 100644 --- a/pkg/types/utils.go +++ b/pkg/types/utils.go @@ -11,12 +11,13 @@ import ( ) var ( - logFormatter log.Formatter - LogOutput *lumberjack.Logger // io.Writer - logLevel log.Level + logFormatter log.Formatter + LogOutput *lumberjack.Logger // io.Writer + logLevel log.Level + logLevelViaFlag bool ) -func SetDefaultLoggerConfig(cfgMode string, cfgFolder string, cfgLevel log.Level, maxSize int, maxFiles int, maxAge int, format string, compress *bool, forceColors bool) error { +func SetDefaultLoggerConfig(cfgMode string, cfgFolder string, cfgLevel log.Level, maxSize int, maxFiles int, maxAge int, format string, compress *bool, forceColors bool, levelViaFlag bool) error { if format == "" { format = "text" } @@ -68,13 +69,14 @@ func SetDefaultLoggerConfig(cfgMode string, cfgFolder string, cfgLevel log.Level } logLevel = cfgLevel + logLevelViaFlag = levelViaFlag log.SetLevel(logLevel) log.SetFormatter(logFormatter) return nil } -func ConfigureLogger(clog *log.Logger) error { +func ConfigureLogger(clog *log.Logger, level *log.Level) error { /*Configure logs*/ if LogOutput != nil { clog.SetOutput(LogOutput) @@ -86,6 +88,10 @@ func ConfigureLogger(clog *log.Logger) error { clog.SetLevel(logLevel) + if level != nil && !logLevelViaFlag { + clog.SetLevel(*level) + } + return nil } diff --git a/test/bats/01_crowdsec.bats b/test/bats/01_crowdsec.bats index 3df0b42a0f2..2d2807b3980 100644 --- a/test/bats/01_crowdsec.bats +++ b/test/bats/01_crowdsec.bats @@ -101,6 +101,24 @@ teardown() { assert_stderr --regexp 'FATAL.* you must run at least the API Server or crowdsec$' } +@test "crowdsec - pass log level flag to apiserver" { + LOCAL_API_CREDENTIALS=$(config_get '.api.client.credentials_path') + config_set "$LOCAL_API_CREDENTIALS" '.password="badpassword"' + + config_set '.common.log_media="stdout"' + rune -1 "$CROWDSEC" + + # info + assert_stderr --partial "/v1/watchers/login" + # fatal + assert_stderr --partial "incorrect Username or Password" + + config_set '.common.log_media="stdout"' + rune -1 "$CROWDSEC" -error + + refute_stderr --partial "/v1/watchers/login" +} + @test "CS_LAPI_SECRET not strong enough" { CS_LAPI_SECRET=foo rune -1 wait-for "$CROWDSEC" assert_stderr --partial "api server init: unable to run local API: controller init: CS_LAPI_SECRET not strong enough" From 388dd629970f915b1de7beafde4d39f33b782eb7 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Mon, 10 Mar 2025 10:50:52 +0100 Subject: [PATCH 456/581] use go 1.24.1 (#3501) --- azure-pipelines.yml | 2 +- go.mod | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/azure-pipelines.yml b/azure-pipelines.yml index 59aaa7db035..3faea6cbdcb 100644 --- a/azure-pipelines.yml +++ b/azure-pipelines.yml @@ -21,7 +21,7 @@ stages: - task: GoTool@0 displayName: "Install Go" inputs: - version: '1.24.0' + version: '1.24.1' - pwsh: | choco install -y make diff --git a/go.mod b/go.mod index 51b8aacabcf..c136c6485aa 100644 --- a/go.mod +++ b/go.mod @@ -1,6 +1,6 @@ module github.com/crowdsecurity/crowdsec -go 1.24.0 +go 1.24.1 require ( entgo.io/ent v0.14.2 From 9bb7ad8c3ad1d658cf34db77dcc01abc01b02d18 Mon Sep 17 00:00:00 2001 From: Laurence Jones Date: Tue, 11 Mar 2025 09:56:47 +0000 Subject: [PATCH 457/581] enhancement: Add additional ssl options to db configuration (#3387) --- pkg/csconfig/database.go | 31 +++++++++++++++++++++++++++---- 1 file changed, 27 insertions(+), 4 deletions(-) diff --git a/pkg/csconfig/database.go b/pkg/csconfig/database.go index 4ca582cf576..29e8e4c3312 100644 --- a/pkg/csconfig/database.go +++ b/pkg/csconfig/database.go @@ -26,7 +26,10 @@ type DatabaseCfg struct { User string `yaml:"user"` Password string `yaml:"password"` DbName string `yaml:"db_name"` - Sslmode string `yaml:"sslmode"` + SSLMode string `yaml:"sslmode"` + SSLCACert string `yaml:"ssl_ca_cert"` + SSLClientCert string `yaml:"ssl_client_cert"` + SSLClientKey string `yaml:"ssl_client_key"` Host string `yaml:"host"` Port int `yaml:"port"` DbPath string `yaml:"db_path"` @@ -136,14 +139,34 @@ func (d *DatabaseCfg) ConnectionString() string { connString = fmt.Sprintf("%s:%s@tcp(%s:%d)/%s?parseTime=True", d.User, d.Password, d.Host, d.Port, d.DbName) } - if d.Sslmode != "" { - connString = fmt.Sprintf("%s&tls=%s", connString, d.Sslmode) + if d.SSLMode != "" { + connString = fmt.Sprintf("%s&tls=%s", connString, d.SSLMode) + } + + if d.SSLCACert != "" { + connString = fmt.Sprintf("%s&tls-ca=%s", connString, d.SSLCACert) + } + + if d.SSLClientCert != "" && d.SSLClientKey != "" { + connString = fmt.Sprintf("%s&tls-cert=%s&tls-key=%s", connString, d.SSLClientCert, d.SSLClientKey) } case "postgres", "postgresql", "pgx": if d.isSocketConfig() { connString = fmt.Sprintf("host=%s user=%s dbname=%s password=%s", d.DbPath, d.User, d.DbName, d.Password) } else { - connString = fmt.Sprintf("host=%s port=%d user=%s dbname=%s password=%s sslmode=%s", d.Host, d.Port, d.User, d.DbName, d.Password, d.Sslmode) + connString = fmt.Sprintf("host=%s port=%d user=%s dbname=%s password=%s", d.Host, d.Port, d.User, d.DbName, d.Password) + } + + if d.SSLMode != "" { + connString = fmt.Sprintf("%s sslmode=%s", connString, d.SSLMode) + } + + if d.SSLCACert != "" { + connString = fmt.Sprintf("%s sslrootcert=%s", connString, d.SSLCACert) + } + + if d.SSLClientCert != "" && d.SSLClientKey != "" { + connString = fmt.Sprintf("%s sslcert=%s sslkey=%s", connString, d.SSLClientCert, d.SSLClientKey) } } From a432a6352db353619b0e67c541c9ad60e601b8cd Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Wed, 12 Mar 2025 09:33:21 +0100 Subject: [PATCH 458/581] appsec: support custom CA for lapi (#3503) * apisever, appsec: refact listenAndServe..() * RemoveAll() -> Remove() * configure CA for tls auth request * ignore error from os.Remove(socket) when there's no file * appsec functional test * lint --- pkg/acquisition/modules/appsec/appsec.go | 217 ++++++++++++++++------- pkg/apiserver/apiserver.go | 51 +++--- test/bats/appsec.bats | 174 ++++++++++++++++++ 3 files changed, 354 insertions(+), 88 deletions(-) create mode 100644 test/bats/appsec.bats diff --git a/pkg/acquisition/modules/appsec/appsec.go b/pkg/acquisition/modules/appsec/appsec.go index 389704b5afb..fc4378cc7c2 100644 --- a/pkg/acquisition/modules/appsec/appsec.go +++ b/pkg/acquisition/modules/appsec/appsec.go @@ -2,9 +2,12 @@ package appsecacquisition import ( "context" + "crypto/tls" + "crypto/x509" "encoding/json" "errors" "fmt" + "io/fs" "net" "net/http" "os" @@ -64,6 +67,7 @@ type AppsecSource struct { AuthCache AuthCache AppsecRunners []AppsecRunner // one for each go-routine appsecAllowlistClient *allowlists.AppsecAllowlist + lapiCACertPool *x509.CertPool } // Struct to handle cache of authentication @@ -158,6 +162,28 @@ func (w *AppsecSource) GetAggregMetrics() []prometheus.Collector { return []prometheus.Collector{AppsecReqCounter, AppsecBlockCounter, AppsecRuleHits, AppsecOutbandParsingHistogram, AppsecInbandParsingHistogram, AppsecGlobalParsingHistogram} } +func loadCertPool(caCertPath string, logger log.FieldLogger) (*x509.CertPool, error) { + caCertPool, err := x509.SystemCertPool() + if err != nil { + logger.Warnf("Error loading system CA certificates: %s", err) + } + + if caCertPool == nil { + caCertPool = x509.NewCertPool() + } + + if caCertPath != "" { + caCert, err := os.ReadFile(caCertPath) + if err != nil { + return nil, fmt.Errorf("while opening cert file: %w", err) + } + + caCertPool.AppendCertsFromPEM(caCert) + } + + return caCertPool, nil +} + func (w *AppsecSource) Configure(yamlConfig []byte, logger *log.Entry, metricsLevel int) error { err := w.UnmarshalConfig(yamlConfig) if err != nil { @@ -241,8 +267,7 @@ func (w *AppsecSource) Configure(yamlConfig []byte, logger *log.Entry, metricsLe appsecAllowlistsClient: w.appsecAllowlistClient, } - err := runner.Init(appsecCfg.GetDataDir()) - if err != nil { + if err = runner.Init(appsecCfg.GetDataDir()); err != nil { return fmt.Errorf("unable to initialize runner: %w", err) } @@ -254,6 +279,19 @@ func (w *AppsecSource) Configure(yamlConfig []byte, logger *log.Entry, metricsLe // We don´t use the wrapper provided by coraza because we want to fully control what happens when a rule match to send the information in crowdsec w.mux.HandleFunc(w.config.Path, w.appsecHandler) + csConfig := csconfig.GetConfig() + + caCertPath := "" + + if csConfig.API.Server.TLS != nil { + caCertPath = csConfig.API.Server.TLS.CACertPath + } + + w.lapiCACertPool, err = loadCertPool(caCertPath, w.logger) + if err != nil { + return fmt.Errorf("unable to load LAPI CA cert pool: %w", err) + } + return nil } @@ -273,6 +311,103 @@ func (w *AppsecSource) OneShotAcquisition(_ context.Context, _ chan types.Event, return errors.New("AppSec datasource does not support command line acquisition") } +func (w *AppsecSource) listenAndServe(ctx context.Context, t *tomb.Tomb) error { + defer trace.CatchPanic("crowdsec/acquis/appsec/listenAndServe") + + w.logger.Infof("%d appsec runner to start", len(w.AppsecRunners)) + + serverError := make(chan error, 2) + + startServer := func(listener net.Listener, canTLS bool) { + var err error + + if canTLS && (w.config.CertFilePath != "" || w.config.KeyFilePath != "") { + if w.config.KeyFilePath == "" { + serverError <- errors.New("missing TLS key file") + return + } + + if w.config.CertFilePath == "" { + serverError <- errors.New("missing TLS cert file") + return + } + + err = w.server.ServeTLS(listener, w.config.CertFilePath, w.config.KeyFilePath) + } else { + err = w.server.Serve(listener) + } + + switch { + case errors.Is(err, http.ErrServerClosed): + break + case err != nil: + serverError <- err + } + } + + // Starting Unix socket listener + go func(socket string) { + if socket == "" { + return + } + + if err := os.Remove(w.config.ListenSocket); err != nil { + if !errors.Is(err, fs.ErrNotExist) { + w.logger.Errorf("can't remove socket %s: %s", socket, err) + } + } + + w.logger.Infof("creating unix socket %s", socket) + + listener, err := net.Listen("unix", socket) + if err != nil { + serverError <- fmt.Errorf("appsec server failed: %w", err) + return + } + + w.logger.Infof("Appsec listening on Unix socket %s", socket) + startServer(listener, false) + }(w.config.ListenSocket) + + // Starting TCP listener + go func(url string) { + if url == "" { + return + } + + listener, err := net.Listen("tcp", url) + if err != nil { + serverError <- fmt.Errorf("listening on %s: %w", url, err) + } + + w.logger.Infof("Appsec listening on %s", url) + startServer(listener, true) + }(w.config.ListenAddr) + + select { + case err := <-serverError: + return err + case <-t.Dying(): + w.logger.Info("Shutting down Appsec server") + // xx let's clean up the appsec runners :) + appsec.AppsecRulesDetails = make(map[int]appsec.RulesDetails) + + if err := w.server.Shutdown(ctx); err != nil { + w.logger.Errorf("Error shutting down Appsec server: %s", err.Error()) + } + + if w.config.ListenSocket != "" { + if err := os.Remove(w.config.ListenSocket); err != nil { + if !errors.Is(err, fs.ErrNotExist) { + w.logger.Errorf("can't remove socket %s: %s", w.config.ListenSocket, err) + } + } + } + } + + return nil +} + func (w *AppsecSource) StreamingAcquisition(ctx context.Context, out chan types.Event, t *tomb.Tomb) error { w.outChan = out @@ -285,13 +420,12 @@ func (w *AppsecSource) StreamingAcquisition(ctx context.Context, out chan types. if err != nil { return fmt.Errorf("failed to fetch allowlists: %w", err) } + w.appsecAllowlistClient.StartRefresh(ctx, t) t.Go(func() error { defer trace.CatchPanic("crowdsec/acquis/appsec/live") - w.logger.Infof("%d appsec runner to start", len(w.AppsecRunners)) - for _, runner := range w.AppsecRunners { runner.outChan = out @@ -301,60 +435,7 @@ func (w *AppsecSource) StreamingAcquisition(ctx context.Context, out chan types. }) } - t.Go(func() error { - if w.config.ListenSocket != "" { - w.logger.Infof("creating unix socket %s", w.config.ListenSocket) - _ = os.RemoveAll(w.config.ListenSocket) - - listener, err := net.Listen("unix", w.config.ListenSocket) - if err != nil { - return fmt.Errorf("appsec server failed: %w", err) - } - - defer listener.Close() - - if w.config.CertFilePath != "" && w.config.KeyFilePath != "" { - err = w.server.ServeTLS(listener, w.config.CertFilePath, w.config.KeyFilePath) - } else { - err = w.server.Serve(listener) - } - - if err != nil && !errors.Is(err, http.ErrServerClosed) { - return fmt.Errorf("appsec server failed: %w", err) - } - } - - return nil - }) - t.Go(func() error { - var err error - - if w.config.ListenAddr != "" { - w.logger.Infof("creating TCP server on %s", w.config.ListenAddr) - - if w.config.CertFilePath != "" && w.config.KeyFilePath != "" { - err = w.server.ListenAndServeTLS(w.config.CertFilePath, w.config.KeyFilePath) - } else { - err = w.server.ListenAndServe() - } - - if err != nil && err != http.ErrServerClosed { - return fmt.Errorf("appsec server failed: %w", err) - } - } - - return nil - }) - <-t.Dying() - w.logger.Info("Shutting down Appsec server") - // xx let's clean up the appsec runners :) - appsec.AppsecRulesDetails = make(map[int]appsec.RulesDetails) - - if err := w.server.Shutdown(ctx); err != nil { - w.logger.Errorf("Error shutting down Appsec server: %s", err.Error()) - } - - return nil + return w.listenAndServe(ctx, t) }) return nil @@ -373,21 +454,29 @@ func (w *AppsecSource) Dump() interface{} { } func (w *AppsecSource) IsAuth(ctx context.Context, apiKey string) bool { - client := &http.Client{ - Timeout: 200 * time.Millisecond, - } - req, err := http.NewRequestWithContext(ctx, http.MethodHead, w.lapiURL, http.NoBody) if err != nil { - log.Errorf("Error creating request: %s", err) + w.logger.Errorf("Error creating request: %s", err) return false } req.Header.Add("X-Api-Key", apiKey) + client := &http.Client{ + Timeout: 200 * time.Millisecond, + } + + if w.lapiCACertPool != nil { + client.Transport = &http.Transport{ + TLSClientConfig: &tls.Config{ + RootCAs: w.lapiCACertPool, + }, + } + } + resp, err := client.Do(req) if err != nil { - log.Errorf("Error performing request: %s", err) + w.logger.Errorf("Error performing request: %s", err) return false } diff --git a/pkg/apiserver/apiserver.go b/pkg/apiserver/apiserver.go index a14aeac098f..4c11b8435d2 100644 --- a/pkg/apiserver/apiserver.go +++ b/pkg/apiserver/apiserver.go @@ -5,6 +5,7 @@ import ( "errors" "fmt" "io" + "io/fs" "net" "net/http" "os" @@ -407,15 +408,11 @@ func (s *APIServer) Run(apiReady chan bool) error { // it also updates the URL field with the actual address the server is listening on // it's meant to be run in a separate goroutine func (s *APIServer) listenAndServeLAPI(apiReady chan bool) error { - var ( - tcpListener net.Listener - unixListener net.Listener - err error - serverError = make(chan error, 2) - listenerClosed = make(chan struct{}) - ) + serverError := make(chan error, 2) startServer := func(listener net.Listener, canTLS bool) { + var err error + if canTLS && s.TLS != nil && (s.TLS.CertFilePath != "" || s.TLS.KeyFilePath != "") { if s.TLS.KeyFilePath == "" { serverError <- errors.New("missing TLS key file") @@ -441,38 +438,42 @@ func (s *APIServer) listenAndServeLAPI(apiReady chan bool) error { } // Starting TCP listener - go func() { - if s.URL == "" { + go func(url string) { + if url == "" { return } - tcpListener, err = net.Listen("tcp", s.URL) + listener, err := net.Listen("tcp", url) if err != nil { - serverError <- fmt.Errorf("listening on %s: %w", s.URL, err) + serverError <- fmt.Errorf("listening on %s: %w", url, err) return } - log.Infof("CrowdSec Local API listening on %s", s.URL) - startServer(tcpListener, true) - }() + log.Infof("CrowdSec Local API listening on %s", url) + startServer(listener, true) + }(s.URL) // Starting Unix socket listener - go func() { - if s.UnixSocket == "" { + go func(socket string) { + if socket == "" { return } - _ = os.RemoveAll(s.UnixSocket) + if err := os.Remove(socket); err != nil { + if !errors.Is(err, fs.ErrNotExist) { + log.Errorf("can't remove socket %s: %s", socket, err) + } + } - unixListener, err = net.Listen("unix", s.UnixSocket) + listener, err := net.Listen("unix", socket) if err != nil { serverError <- fmt.Errorf("while creating unix listener: %w", err) return } - log.Infof("CrowdSec Local API listening on Unix socket %s", s.UnixSocket) - startServer(unixListener, false) - }() + log.Infof("CrowdSec Local API listening on Unix socket %s", socket) + startServer(listener, false) + }(s.UnixSocket) apiReady <- true @@ -489,10 +490,12 @@ func (s *APIServer) listenAndServeLAPI(apiReady chan bool) error { log.Errorf("while shutting down http server: %v", err) } - close(listenerClosed) - case <-listenerClosed: if s.UnixSocket != "" { - _ = os.RemoveAll(s.UnixSocket) + if err := os.Remove(s.UnixSocket); err != nil { + if !errors.Is(err, fs.ErrNotExist) { + log.Errorf("can't remove socket %s: %s", s.UnixSocket, err) + } + } } } diff --git a/test/bats/appsec.bats b/test/bats/appsec.bats new file mode 100644 index 00000000000..000c4a87932 --- /dev/null +++ b/test/bats/appsec.bats @@ -0,0 +1,174 @@ +#!/usr/bin/env bats + +set -u + +setup_file() { + load "../lib/setup_file.sh" + CONFIG_DIR=$(dirname "$CONFIG_YAML") + export CONFIG_DIR + + ACQUIS_DIR=$(config_get '.crowdsec_service.acquisition_dir') + export ACQUIS_DIR +} + +teardown_file() { + load "../lib/teardown_file.sh" +} + +setup() { + load "../lib/setup.sh" + ./instance-data load + mkdir -p "$ACQUIS_DIR" +} + +teardown() { + ./instance-crowdsec stop +} + +#---------- + +@test "invalid configuration" { + config_set '.common.log_media="stdout"' + + cat > "$ACQUIS_DIR"/appsec.yaml <<-EOT + source: appsec + EOT + + rune -1 wait-for "$CROWDSEC" + assert_stderr --partial "crowdsec init: while loading acquisition config: missing labels in $ACQUIS_DIR/appsec.yaml (position 0)" + + cat > "$ACQUIS_DIR"/appsec.yaml <<-EOT + source: appsec + labels: + type: appsec + EOT + + rune -1 wait-for "$CROWDSEC" + assert_stderr --partial "crowdsec init: while loading acquisition config: while configuring datasource of type appsec from $ACQUIS_DIR/appsec.yaml (position 0): unable to parse appsec configuration: appsec_config or appsec_config_path must be set" +} + +@test "appsec allow and ban" { + config_set '.common.log_media="stdout"' + + rune -0 cscli collections install crowdsecurity/appsec-virtual-patching + rune -0 cscli collections install crowdsecurity/appsec-generic-rules + + socket="$BATS_TEST_TMPDIR"/sock + + cat > "$ACQUIS_DIR"/appsec.yaml <<-EOT + source: appsec + listen_socket: $socket + labels: + type: appsec + appsec_config: crowdsecurity/appsec-default + EOT + + rune -0 wait-for \ + --err "Appsec Runner ready to process event" \ + "$CROWDSEC" + + assert_stderr --partial "loading inband rule crowdsecurity/base-config" + assert_stderr --partial "loading inband rule crowdsecurity/vpatch-*" + assert_stderr --partial "loading inband rule crowdsecurity/generic-*" + assert_stderr --partial "Created 1 appsec runners" + assert_stderr --partial "Appsec Runner ready to process event" + + ./instance-crowdsec start + + rune -0 cscli bouncers add appsecbouncer --key appkey + + # appsec will perform a HEAD request to validate. + # If it fails, check downstream with: + # + # lapisocket=$(config_get '.api.server.listen_socket') + # rune -0 curl -sS --fail-with-body --unix-socket "$lapisocket" -H "X-Api-Key: appkey" "http://fakehost/v1/decisions/stream" + # assert_json '{deleted:null,new:null}' + + rune -0 curl -sS --fail-with-body --unix-socket "$socket" \ + -H "x-crowdsec-appsec-api-key: appkey" \ + -H "x-crowdsec-appsec-ip: 1.2.3.4" \ + -H 'x-crowdsec-appsec-uri: /' \ + -H 'x-crowdsec-appsec-host: foo.com' \ + -H 'x-crowdsec-appsec-verb: GET' \ + 'http://fakehost' + + assert_json '{action:"allow",http_status:200}' + + rune -22 curl -sS --fail-with-body --unix-socket "$socket" \ + -H "x-crowdsec-appsec-api-key: appkey" \ + -H "x-crowdsec-appsec-ip: 1.2.3.4" \ + -H 'x-crowdsec-appsec-uri: /.env' \ + -H 'x-crowdsec-appsec-host: foo.com' \ + -H 'x-crowdsec-appsec-verb: GET' \ + 'http://fakehost' + + assert_json '{action:"ban",http_status:403}' +} + +@test "TLS connection to lapi, own CA" { + tmpdir="$BATS_FILE_TMPDIR" + + CFDIR="$BATS_TEST_DIRNAME/testdata/cfssl" + + # Root CA + cfssl gencert -loglevel 2 \ + --initca "$CFDIR/ca_root.json" \ + | cfssljson --bare "$tmpdir/root" + + # Intermediate CA + cfssl gencert -loglevel 2 \ + --initca "$CFDIR/ca_intermediate.json" \ + | cfssljson --bare "$tmpdir/inter" + + cfssl sign -loglevel 2 \ + -ca "$tmpdir/root.pem" -ca-key "$tmpdir/root-key.pem" \ + -config "$CFDIR/profiles.json" -profile intermediate_ca "$tmpdir/inter.csr" \ + | cfssljson --bare "$tmpdir/inter" + + # Server cert for crowdsec with the intermediate + cfssl gencert -loglevel 2 \ + -ca "$tmpdir/inter.pem" -ca-key "$tmpdir/inter-key.pem" \ + -config "$CFDIR/profiles.json" -profile=server "$CFDIR/server.json" \ + | cfssljson --bare "$tmpdir/server" + + cat "$tmpdir/root.pem" "$tmpdir/inter.pem" > "$tmpdir/bundle.pem" + + export tmpdir + config_set ' + .api.server.tls.cert_file=strenv(tmpdir) + "/server.pem" | + .api.server.tls.key_file=strenv(tmpdir) + "/server-key.pem" | + .api.server.tls.ca_cert_path=strenv(tmpdir) + "/inter.pem" + ' + + rune -0 cscli collections install crowdsecurity/appsec-virtual-patching + rune -0 cscli collections install crowdsecurity/appsec-generic-rules + + socket="$BATS_TEST_TMPDIR"/sock + + cat > "$ACQUIS_DIR"/appsec.yaml <<-EOT + source: appsec + listen_socket: $socket + labels: + type: appsec + appsec_config: crowdsecurity/appsec-default + EOT + + config_set "$CONFIG_DIR/local_api_credentials.yaml" ' + .url="https://127.0.0.1:8080" | + .ca_cert_path=strenv(tmpdir) + "/bundle.pem" + ' + + ./instance-crowdsec start + + rune -0 cscli bouncers add appsecbouncer --key appkey + + rune -0 curl -sS --fail-with-body --unix-socket "$socket" \ + -H "x-crowdsec-appsec-api-key: appkey" \ + -H "x-crowdsec-appsec-ip: 1.2.3.4" \ + -H 'x-crowdsec-appsec-uri: /' \ + -H 'x-crowdsec-appsec-host: foo.com' \ + -H 'x-crowdsec-appsec-verb: GET' \ + 'http://fakehost' + + assert_json '{action:"allow",http_status:200}' +} From 50a5ef5345a5a636ac7161ab75ebe5598ec30e76 Mon Sep 17 00:00:00 2001 From: blotus Date: Wed, 12 Mar 2025 10:12:30 +0100 Subject: [PATCH 459/581] deprecate capi_whitelists_path (#3504) --- pkg/apiserver/apic.go | 4 ++++ pkg/csconfig/api.go | 2 ++ 2 files changed, 6 insertions(+) diff --git a/pkg/apiserver/apic.go b/pkg/apiserver/apic.go index 5460046bf7f..821f6538169 100644 --- a/pkg/apiserver/apic.go +++ b/pkg/apiserver/apic.go @@ -854,6 +854,10 @@ func (a *apic) ApplyApicWhitelists(ctx context.Context, decisions []*models.Deci log.Errorf("while getting allowlists content: %s", err) } + if a.whitelists != nil { + log.Warn("capi_whitelists_path is deprecated, please use centralized allowlists instead. See https://docs.crowdsec.net/docs/next/local_api/centralized_allowlists.") + } + if (a.whitelists == nil || len(a.whitelists.Cidrs) == 0 && len(a.whitelists.Ips) == 0) && len(allowlisted_ips) == 0 && len(allowlisted_cidrs) == 0 { return decisions } diff --git a/pkg/csconfig/api.go b/pkg/csconfig/api.go index 8aa99686c2a..8732523094a 100644 --- a/pkg/csconfig/api.go +++ b/pkg/csconfig/api.go @@ -473,6 +473,8 @@ func (c *LocalApiServerCfg) LoadCapiWhitelists() error { return nil } + log.Warn("capi_whitelists_path is deprecated, please use centralized allowlists instead. See https://docs.crowdsec.net/docs/next/local_api/centralized_allowlists.") + fd, err := os.Open(c.CapiWhitelistsPath) if err != nil { return fmt.Errorf("while opening capi whitelist file: %w", err) From c4f9adb7998621bf9fe7818a17eaad15d0abf17a Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Wed, 12 Mar 2025 10:36:30 +0100 Subject: [PATCH 460/581] appsec: use CA from client credentials when connecting to LAPI (#3505) --- pkg/acquisition/modules/appsec/appsec.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pkg/acquisition/modules/appsec/appsec.go b/pkg/acquisition/modules/appsec/appsec.go index fc4378cc7c2..9796843844c 100644 --- a/pkg/acquisition/modules/appsec/appsec.go +++ b/pkg/acquisition/modules/appsec/appsec.go @@ -283,8 +283,8 @@ func (w *AppsecSource) Configure(yamlConfig []byte, logger *log.Entry, metricsLe caCertPath := "" - if csConfig.API.Server.TLS != nil { - caCertPath = csConfig.API.Server.TLS.CACertPath + if csConfig.API.Client != nil && csConfig.API.Client.Credentials != nil { + caCertPath = csConfig.API.Client.Credentials.CACertPath } w.lapiCACertPool, err = loadCertPool(caCertPath, w.logger) From 941b3d98b93d185e6a06cf03b466ce17f51ce8dd Mon Sep 17 00:00:00 2001 From: blotus Date: Wed, 12 Mar 2025 10:55:06 +0100 Subject: [PATCH 461/581] appsec: less verbose logging for allowlists and headers check (#3498) --- .../modules/appsec/appsec_hooks_test.go | 26 +++++++++++++++++++ pkg/appsec/allowlists/allowlists.go | 4 ++- pkg/appsec/request.go | 2 +- 3 files changed, 30 insertions(+), 2 deletions(-) diff --git a/pkg/acquisition/modules/appsec/appsec_hooks_test.go b/pkg/acquisition/modules/appsec/appsec_hooks_test.go index ae8498df7b4..1c446c49c2f 100644 --- a/pkg/acquisition/modules/appsec/appsec_hooks_test.go +++ b/pkg/acquisition/modules/appsec/appsec_hooks_test.go @@ -367,6 +367,32 @@ func TestAppsecOnMatchHooks(t *testing.T) { require.NotNil(t, events[1].Overflow.Alert) }, }, + { + name: "on_match: no alert with default config", + expected_load_ok: true, + outofband_rules: []appsec_rule.CustomRule{ + { + Name: "rule42", + Zones: []string{"ARGS"}, + Variables: []string{"foo"}, + Match: appsec_rule.Match{Type: "regex", Value: "^toto"}, + Transform: []string{"lowercase"}, + }, + }, + on_match: []appsec.Hook{}, + input_request: appsec.ParsedRequest{ + RemoteAddr: "1.2.3.4", + Method: "GET", + URI: "/urllll", + Args: url.Values{"foo": []string{"toto"}}, + }, + output_asserts: func(events []types.Event, responses []appsec.AppsecTempResponse, appsecResponse appsec.BodyResponse, statusCode int) { + require.Len(t, events, 1) + require.Equal(t, types.LOG, events[0].Type) + require.Len(t, responses, 1) + require.Equal(t, appsec.AllowRemediation, responses[0].Action) + }, + }, } for _, test := range tests { t.Run(test.name, func(t *testing.T) { diff --git a/pkg/appsec/allowlists/allowlists.go b/pkg/appsec/allowlists/allowlists.go index 61ea52b990e..6e3286ce2c9 100644 --- a/pkg/appsec/allowlists/allowlists.go +++ b/pkg/appsec/allowlists/allowlists.go @@ -62,6 +62,8 @@ func (a *AppsecAllowlist) FetchAllowlists(ctx context.Context) error { a.lock.Lock() defer a.lock.Unlock() + prevIPsLen := len(a.ips) + prevRangesLen := len(a.ranges) a.ranges = []rangeAllowlist{} a.ips = []ipAllowlist{} @@ -93,7 +95,7 @@ func (a *AppsecAllowlist) FetchAllowlists(ctx context.Context) error { } } - if len(a.ips) != 0 || len(a.ranges) != 0 { + if (len(a.ips) != 0 || len(a.ranges) != 0) && (prevIPsLen != len(a.ips) || prevRangesLen != len(a.ranges)) { a.logger.Infof("fetched %d IPs and %d ranges", len(a.ips), len(a.ranges)) } a.logger.Debugf("fetched %d IPs and %d ranges", len(a.ips), len(a.ranges)) diff --git a/pkg/appsec/request.go b/pkg/appsec/request.go index e0e17bf9907..23782298a47 100644 --- a/pkg/appsec/request.go +++ b/pkg/appsec/request.go @@ -333,7 +333,7 @@ func NewParsedRequestFromRequest(r *http.Request, logger *log.Entry) (ParsedRequ } else { r.Proto = "HTTP/" + string(major) + "." + string(minor) } - } else { + } else if httpVersion != "" { logger.Warnf("Invalid value %s for HTTP version header", httpVersion) } From 3bc8e4f35f9e5475d12c03a404e9e33a467551b0 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Wed, 12 Mar 2025 11:01:24 +0100 Subject: [PATCH 462/581] empty back-merge from release branch (#3506) From cce41a1e6cce6fdbdf48e15d534d091154e3c3da Mon Sep 17 00:00:00 2001 From: blotus Date: Thu, 13 Mar 2025 17:38:07 +0100 Subject: [PATCH 463/581] do not attempt to set db log level if no db config (#3510) --- cmd/crowdsec-cli/main.go | 5 +++-- test/bats/cscli-hubtype-upgrade.bats | 10 ++++++++-- 2 files changed, 11 insertions(+), 4 deletions(-) diff --git a/cmd/crowdsec-cli/main.go b/cmd/crowdsec-cli/main.go index f968b6eacf9..190d302336e 100644 --- a/cmd/crowdsec-cli/main.go +++ b/cmd/crowdsec-cli/main.go @@ -168,8 +168,9 @@ func (cli *cliRoot) initialize() error { } } - csConfig.DbConfig.LogLevel = ptr.Of(cli.wantedLogLevel()) - + if csConfig.DbConfig != nil { + csConfig.DbConfig.LogLevel = ptr.Of(cli.wantedLogLevel()) + } return nil } diff --git a/test/bats/cscli-hubtype-upgrade.bats b/test/bats/cscli-hubtype-upgrade.bats index 8faec90a870..5dd789f649e 100644 --- a/test/bats/cscli-hubtype-upgrade.bats +++ b/test/bats/cscli-hubtype-upgrade.bats @@ -177,11 +177,13 @@ get_latest_version() { EOT refute_stderr + latest_whitelists=$(get_latest_version parsers crowdsecurity/whitelists) + rune -0 cscli parsers upgrade crowdsecurity/whitelists --force assert_output - <<-EOT Action plan: 📥 download - parsers: crowdsecurity/whitelists (? -> 0.2) + parsers: crowdsecurity/whitelists (? -> $latest_whitelists) downloading parsers:crowdsecurity/whitelists @@ -245,11 +247,15 @@ get_latest_version() { install_v0 parsers crowdsecurity/sshd-logs install_v0 parsers crowdsecurity/windows-auth + latest_sshd=$(get_latest_version parsers crowdsecurity/sshd-logs) + latest_whitelists=$(get_latest_version parsers crowdsecurity/whitelists) + latest_windows=$(get_latest_version parsers crowdsecurity/windows-auth) + rune -0 cscli parsers upgrade --all assert_output - <<-EOT Action plan: 📥 download - parsers: crowdsecurity/sshd-logs (0.0 -> 2.9), crowdsecurity/whitelists (0.0 -> 0.2), crowdsecurity/windows-auth (0.0 -> 0.2) + parsers: crowdsecurity/sshd-logs (0.0 -> $latest_sshd), crowdsecurity/whitelists (0.0 -> $latest_whitelists), crowdsecurity/windows-auth (0.0 -> $latest_windows) downloading parsers:crowdsecurity/sshd-logs downloading parsers:crowdsecurity/whitelists From f5400482a6e8e736ef1ea089c7695cb0b3a7fc07 Mon Sep 17 00:00:00 2001 From: blotus Date: Sat, 15 Mar 2025 13:46:35 +0100 Subject: [PATCH 464/581] opensuse sets OSTYPE to linux (#3514) --- wizard.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/wizard.sh b/wizard.sh index bb03af5a9ac..d9b8caf2f06 100755 --- a/wizard.sh +++ b/wizard.sh @@ -142,7 +142,7 @@ detect_services () { fi; done; done; - if [[ ${OSTYPE} == "linux-gnu" ]] || [[ ${OSTYPE} == "linux-gnueabihf" ]]; then + if [[ ${OSTYPE} == "linux-gnu" ]] || [[ ${OSTYPE} == "linux-gnueabihf" ]] || [[ ${OSTYPE} == "linux" ]]; then DETECTED_SERVICES+=("linux") HMENU+=("linux" "on") else From cab99643d1bc344e5ea9d22599bcfce98d90682b Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Mon, 17 Mar 2025 11:27:09 +0100 Subject: [PATCH 465/581] Parallel hubtest (#3509) Hubtests are now much faster and have a --max-jobs option which defaults to the number of cpu cores. --- .github/workflows/bats-hub.yml | 12 +-- cmd/crowdsec-cli/clihubtest/clean.go | 38 +++++-- cmd/crowdsec-cli/clihubtest/coverage.go | 12 +-- cmd/crowdsec-cli/clihubtest/explain.go | 21 ++-- cmd/crowdsec-cli/clihubtest/run.go | 80 +++++++------- cmd/crowdsec-cli/clihubtest/table.go | 22 ++-- pkg/dumps/parser_dump.go | 6 +- pkg/hubtest/coverage.go | 17 +-- pkg/hubtest/helpers.go | 10 ++ pkg/hubtest/hubtest.go | 18 ++-- pkg/hubtest/hubtest_item.go | 135 +++++++++++------------- pkg/hubtest/parser_assert.go | 41 +++++-- pkg/hubtest/scenario_assert.go | 37 ++++--- test/bats.mk | 1 - test/bin/collect-hub-coverage | 9 +- test/bin/generate-hub-tests | 19 ---- test/bin/generate-hub-tests.py | 63 ----------- 17 files changed, 270 insertions(+), 271 deletions(-) create mode 100644 pkg/hubtest/helpers.go delete mode 100755 test/bin/generate-hub-tests delete mode 100644 test/bin/generate-hub-tests.py diff --git a/.github/workflows/bats-hub.yml b/.github/workflows/bats-hub.yml index 42f1252c8b9..e2f47c414fe 100644 --- a/.github/workflows/bats-hub.yml +++ b/.github/workflows/bats-hub.yml @@ -10,9 +10,6 @@ on: jobs: build: - strategy: - matrix: - test-file: ["hub-1.bats", "hub-2.bats", "hub-3.bats"] name: "Functional tests" runs-on: ubuntu-latest @@ -46,11 +43,14 @@ jobs: - name: "Run hub tests" run: | - ./test/bin/generate-hub-tests - ./test/run-tests ./test/dyn-bats/${{ matrix.test-file }} --formatter $(pwd)/test/lib/color-formatter + PATH=$(pwd)/test/local/bin:$PATH + ./test/instance-data load + git clone --depth 1 https://github.com/crowdsecurity/hub.git ./hub + cd ./hub + cscli hubtest run --all --clean --max-jobs 8 - name: "Collect hub coverage" - run: ./test/bin/collect-hub-coverage >> $GITHUB_ENV + run: ./test/bin/collect-hub-coverage ./hub >> $GITHUB_ENV - name: "Create Parsers badge" uses: schneegans/dynamic-badges-action@v1.7.0 diff --git a/cmd/crowdsec-cli/clihubtest/clean.go b/cmd/crowdsec-cli/clihubtest/clean.go index 912b8838b5b..e18ae2fae5f 100644 --- a/cmd/crowdsec-cli/clihubtest/clean.go +++ b/cmd/crowdsec-cli/clihubtest/clean.go @@ -1,33 +1,55 @@ package clihubtest import ( + "errors" "fmt" "github.com/spf13/cobra" - "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/args" + "github.com/crowdsecurity/crowdsec/pkg/hubtest" ) func (cli *cliHubTest) newCleanCmd() *cobra.Command { + var all bool + cmd := &cobra.Command{ Use: "clean", Short: "clean [test_name]", - Args: args.MinimumNArgs(1), DisableAutoGenTag: true, RunE: func(_ *cobra.Command, args []string) error { - for _, testName := range args { - test, err := hubPtr.LoadTestItem(testName) - if err != nil { - return fmt.Errorf("unable to load test '%s': %w", testName, err) + if !all && len(args) == 0 { + return errors.New("please provide test to run or --all flag") + } + + fmt.Println("Cleaning test data...") + + tests := []*hubtest.HubTestItem{} + + if all { + if err := hubPtr.LoadAllTests(); err != nil { + return fmt.Errorf("unable to load all tests: %w", err) } - if err := test.Clean(); err != nil { - return fmt.Errorf("unable to clean test '%s' env: %w", test.Name, err) + + tests = hubPtr.Tests + } else { + for _, testName := range args { + test, err := hubPtr.LoadTestItem(testName) + if err != nil { + return fmt.Errorf("unable to load test '%s': %w", testName, err) + } + tests = append(tests, test) } } + for _, test := range tests { + test.Clean() + } + return nil }, } + cmd.Flags().BoolVar(&all, "all", false, "Run all tests") + return cmd } diff --git a/cmd/crowdsec-cli/clihubtest/coverage.go b/cmd/crowdsec-cli/clihubtest/coverage.go index ee840f81e5b..d4f24407198 100644 --- a/cmd/crowdsec-cli/clihubtest/coverage.go +++ b/cmd/crowdsec-cli/clihubtest/coverage.go @@ -14,12 +14,12 @@ import ( ) // getCoverage returns the coverage and the percentage of tests that passed -func getCoverage(show bool, getCoverageFunc func() ([]hubtest.Coverage, error)) ([]hubtest.Coverage, int, error) { +func getCoverage(show bool, getCoverageFunc func(string) ([]hubtest.Coverage, error), hubDir string) ([]hubtest.Coverage, int, error) { if !show { return nil, 0, nil } - coverage, err := getCoverageFunc() + coverage, err := getCoverageFunc(hubDir) if err != nil { return nil, 0, fmt.Errorf("while getting coverage: %w", err) } @@ -46,7 +46,7 @@ func (cli *cliHubTest) coverage(showScenarioCov bool, showParserCov bool, showAp // for this one we explicitly don't do for appsec if err := HubTest.LoadAllTests(); err != nil { - return fmt.Errorf("unable to load all tests: %+v", err) + return fmt.Errorf("unable to load all tests: %w", err) } var err error @@ -58,17 +58,17 @@ func (cli *cliHubTest) coverage(showScenarioCov bool, showParserCov bool, showAp showAppsecCov = true } - parserCoverage, parserCoveragePercent, err := getCoverage(showParserCov, HubTest.GetParsersCoverage) + parserCoverage, parserCoveragePercent, err := getCoverage(showParserCov, HubTest.GetParsersCoverage, cfg.Hub.HubDir) if err != nil { return err } - scenarioCoverage, scenarioCoveragePercent, err := getCoverage(showScenarioCov, HubTest.GetScenariosCoverage) + scenarioCoverage, scenarioCoveragePercent, err := getCoverage(showScenarioCov, HubTest.GetScenariosCoverage, cfg.Hub.HubDir) if err != nil { return err } - appsecRuleCoverage, appsecRuleCoveragePercent, err := getCoverage(showAppsecCov, HubTest.GetAppsecCoverage) + appsecRuleCoverage, appsecRuleCoveragePercent, err := getCoverage(showAppsecCov, HubTest.GetAppsecCoverage, cfg.Hub.HubDir) if err != nil { return err } diff --git a/cmd/crowdsec-cli/clihubtest/explain.go b/cmd/crowdsec-cli/clihubtest/explain.go index 6217e44e2a3..c58122ccaf5 100644 --- a/cmd/crowdsec-cli/clihubtest/explain.go +++ b/cmd/crowdsec-cli/clihubtest/explain.go @@ -1,19 +1,19 @@ package clihubtest import ( + "context" "fmt" "github.com/spf13/cobra" - "github.com/crowdsecurity/crowdsec/pkg/dumps" - "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/args" + "github.com/crowdsecurity/crowdsec/pkg/dumps" ) -func (cli *cliHubTest) explain(testName string, details bool, skipOk bool) error { +func (cli *cliHubTest) explain(ctx context.Context, testName string, details bool, skipOk bool) error { test, err := HubTest.LoadTestItem(testName) if err != nil { - return fmt.Errorf("can't load test: %+v", err) + return fmt.Errorf("can't load test: %w", err) } cfg := cli.cfg() @@ -21,8 +21,8 @@ func (cli *cliHubTest) explain(testName string, details bool, skipOk bool) error err = test.ParserAssert.LoadTest(test.ParserResultFile) if err != nil { - if err = test.Run(patternDir); err != nil { - return fmt.Errorf("running test '%s' failed: %+v", test.Name, err) + if err = test.Run(ctx, patternDir); err != nil { + return fmt.Errorf("running test '%s' failed: %w", test.Name, err) } if err = test.ParserAssert.LoadTest(test.ParserResultFile); err != nil { @@ -32,8 +32,8 @@ func (cli *cliHubTest) explain(testName string, details bool, skipOk bool) error err = test.ScenarioAssert.LoadTest(test.ScenarioResultFile, test.BucketPourResultFile) if err != nil { - if err = test.Run(patternDir); err != nil { - return fmt.Errorf("running test '%s' failed: %+v", test.Name, err) + if err = test.Run(ctx, patternDir); err != nil { + return fmt.Errorf("running test '%s' failed: %w", test.Name, err) } if err = test.ScenarioAssert.LoadTest(test.ScenarioResultFile, test.BucketPourResultFile); err != nil { @@ -62,9 +62,10 @@ func (cli *cliHubTest) newExplainCmd() *cobra.Command { Short: "explain [test_name]", Args: args.MinimumNArgs(1), DisableAutoGenTag: true, - RunE: func(_ *cobra.Command, args []string) error { + RunE: func(cmd *cobra.Command, args []string) error { + ctx := cmd.Context() for _, testName := range args { - if err := cli.explain(testName, details, skipOk); err != nil { + if err := cli.explain(ctx, testName, details, skipOk); err != nil { return err } } diff --git a/cmd/crowdsec-cli/clihubtest/run.go b/cmd/crowdsec-cli/clihubtest/run.go index 94a3b0c10f3..3d4094cf4f0 100644 --- a/cmd/crowdsec-cli/clihubtest/run.go +++ b/cmd/crowdsec-cli/clihubtest/run.go @@ -1,34 +1,36 @@ package clihubtest import ( + "context" "encoding/json" "errors" "fmt" - "os" + "runtime" "strings" "github.com/AlecAivazis/survey/v2" "github.com/fatih/color" log "github.com/sirupsen/logrus" "github.com/spf13/cobra" + "golang.org/x/sync/errgroup" "github.com/crowdsecurity/crowdsec/pkg/emoji" "github.com/crowdsecurity/crowdsec/pkg/hubtest" ) -func (cli *cliHubTest) run(runAll bool, nucleiTargetHost string, appSecHost string, args []string) error { +func (cli *cliHubTest) run(ctx context.Context, all bool, nucleiTargetHost string, appSecHost string, args []string, maxJobs uint) error { cfg := cli.cfg() - if !runAll && len(args) == 0 { + if !all && len(args) == 0 { return errors.New("please provide test to run or --all flag") } hubPtr.NucleiTargetHost = nucleiTargetHost hubPtr.AppSecHost = appSecHost - if runAll { + if all { if err := hubPtr.LoadAllTests(); err != nil { - return fmt.Errorf("unable to load all tests: %+v", err) + return fmt.Errorf("unable to load all tests: %w", err) } } else { for _, testName := range args { @@ -39,23 +41,23 @@ func (cli *cliHubTest) run(runAll bool, nucleiTargetHost string, appSecHost stri } } - // set timezone to avoid DST issues - os.Setenv("TZ", "UTC") - patternDir := cfg.ConfigPaths.PatternDir + var eg errgroup.Group + + eg.SetLimit(int(maxJobs)) + for _, test := range hubPtr.Tests { if cfg.Cscli.Output == "human" { - log.Infof("Running test '%s'", test.Name) + fmt.Printf("Running test '%s'\n", test.Name) } - err := test.Run(patternDir) - if err != nil { - log.Errorf("running test '%s' failed: %+v", test.Name, err) - } + eg.Go(func() error { + return test.Run(ctx, patternDir) + }) } - return nil + return eg.Wait() } func printParserFailures(test *hubtest.HubTestItem) { @@ -101,24 +103,31 @@ func printScenarioFailures(test *hubtest.HubTestItem) { func (cli *cliHubTest) newRunCmd() *cobra.Command { var ( noClean bool - runAll bool + all bool + reportSuccess bool forceClean bool nucleiTargetHost string appSecHost string ) + maxJobs := uint(runtime.NumCPU()) + cmd := &cobra.Command{ Use: "run", Short: "run [test_name]", DisableAutoGenTag: true, - RunE: func(_ *cobra.Command, args []string) error { - return cli.run(runAll, nucleiTargetHost, appSecHost, args) + RunE: func(cmd *cobra.Command, args []string) error { + if all { + fmt.Printf("Running all tests (max_jobs: %d)\n", maxJobs) + } + + return cli.run(cmd.Context(), all, nucleiTargetHost, appSecHost, args, maxJobs) }, PersistentPostRunE: func(_ *cobra.Command, _ []string) error { cfg := cli.cfg() success := true - testResult := make(map[string]bool) + testMap := make(map[string]*hubtest.HubTestItem) for _, test := range hubPtr.Tests { if test.AutoGen && !isAppsecTest { if test.ParserAssert.AutoGenAssert { @@ -132,22 +141,15 @@ func (cli *cliHubTest) newRunCmd() *cobra.Command { fmt.Println(test.ScenarioAssert.AutoGenAssertData) } if !noClean { - if err := test.Clean(); err != nil { - return fmt.Errorf("unable to clean test '%s' env: %w", test.Name, err) - } + test.Clean() } return fmt.Errorf("please fill your assert file(s) for test '%s', exiting", test.Name) } - testResult[test.Name] = test.Success + testMap[test.Name] = test if test.Success { - if cfg.Cscli.Output == "human" { - log.Infof("Test '%s' passed successfully (%d assertions)\n", test.Name, test.ParserAssert.NbAssert+test.ScenarioAssert.NbAssert) - } if !noClean { - if err := test.Clean(); err != nil { - return fmt.Errorf("unable to clean test '%s' env: %w", test.Name, err) - } + test.Clean() } } else { success = false @@ -157,7 +159,7 @@ func (cli *cliHubTest) newRunCmd() *cobra.Command { printScenarioFailures(test) if !forceClean && !noClean { prompt := &survey.Confirm{ - Message: fmt.Sprintf("\nDo you want to remove runtime folder for test '%s'? (default: Yes)", test.Name), + Message: fmt.Sprintf("Do you want to remove runtime and result folder for '%s'?", test.Name), Default: true, } if err := survey.AskOne(prompt, &cleanTestEnv); err != nil { @@ -167,22 +169,20 @@ func (cli *cliHubTest) newRunCmd() *cobra.Command { } if cleanTestEnv || forceClean { - if err := test.Clean(); err != nil { - return fmt.Errorf("unable to clean test '%s' env: %w", test.Name, err) - } + test.Clean() } } } switch cfg.Cscli.Output { case "human": - hubTestResultTable(color.Output, cfg.Cscli.Color, testResult) + hubTestResultTable(color.Output, cfg.Cscli.Color, testMap, reportSuccess) case "json": jsonResult := make(map[string][]string, 0) jsonResult["success"] = make([]string, 0) jsonResult["fail"] = make([]string, 0) - for testName, success := range testResult { - if success { + for testName, test := range testMap { + if test.Success { jsonResult["success"] = append(jsonResult["success"], testName) } else { jsonResult["fail"] = append(jsonResult["fail"], testName) @@ -198,7 +198,11 @@ func (cli *cliHubTest) newRunCmd() *cobra.Command { } if !success { - return errors.New("some tests failed") + if reportSuccess { + return errors.New("some tests failed") + } + + return errors.New("some tests failed, use --report-success to show them all") } return nil @@ -209,7 +213,9 @@ func (cli *cliHubTest) newRunCmd() *cobra.Command { cmd.Flags().BoolVar(&forceClean, "clean", false, "Clean runtime environment if test fail") cmd.Flags().StringVar(&nucleiTargetHost, "target", hubtest.DefaultNucleiTarget, "Target for AppSec Test") cmd.Flags().StringVar(&appSecHost, "host", hubtest.DefaultAppsecHost, "Address to expose AppSec for hubtest") - cmd.Flags().BoolVar(&runAll, "all", false, "Run all tests") + cmd.Flags().BoolVar(&all, "all", false, "Run all tests") + cmd.Flags().BoolVar(&reportSuccess, "report-success", false, "Report successful tests too (implied with json output)") + cmd.Flags().UintVar(&maxJobs, "max-jobs", maxJobs, "Run batch") return cmd } diff --git a/cmd/crowdsec-cli/clihubtest/table.go b/cmd/crowdsec-cli/clihubtest/table.go index 2a105a1f5c1..b1311a624e0 100644 --- a/cmd/crowdsec-cli/clihubtest/table.go +++ b/cmd/crowdsec-cli/clihubtest/table.go @@ -3,6 +3,7 @@ package clihubtest import ( "fmt" "io" + "strconv" "github.com/jedib0t/go-pretty/v6/text" @@ -11,22 +12,31 @@ import ( "github.com/crowdsecurity/crowdsec/pkg/hubtest" ) -func hubTestResultTable(out io.Writer, wantColor string, testResult map[string]bool) { +func hubTestResultTable(out io.Writer, wantColor string, testMap map[string]*hubtest.HubTestItem, reportSuccess bool) { t := cstable.NewLight(out, wantColor) - t.SetHeaders("Test", "Result") + t.SetHeaders("Test", "Result", "Assertions") t.SetHeaderAlignment(text.AlignLeft) t.SetAlignment(text.AlignLeft) - for testName, success := range testResult { + showTable := reportSuccess + + for testName, test := range testMap { status := emoji.CheckMarkButton - if !success { + if !test.Success { status = emoji.CrossMark + showTable = true } - t.AddRow(testName, status) + if !test.Success || reportSuccess { + t.AddRow(testName, status, strconv.Itoa(test.ParserAssert.NbAssert+test.ScenarioAssert.NbAssert)) + } } - t.Render() + if showTable { + t.Render() + } else { + fmt.Println("All tests passed, use --report-success for more details.") + } } func hubTestListTable(out io.Writer, wantColor string, tests []*hubtest.HubTestItem) { diff --git a/pkg/dumps/parser_dump.go b/pkg/dumps/parser_dump.go index bd385bec194..64bac2ed7a1 100644 --- a/pkg/dumps/parser_dump.go +++ b/pkg/dumps/parser_dump.go @@ -35,6 +35,8 @@ type DumpOpts struct { } func LoadParserDump(filepath string) (*ParserResults, error) { + logger := log.WithField("file", filepath) + dumpData, err := os.Open(filepath) if err != nil { return nil, err @@ -83,9 +85,9 @@ func LoadParserDump(filepath string) (*ParserResults, error) { for idx, result := range pdump[lastStage][lastParser] { if result.Evt.StrTime == "" { - log.Warningf("Line %d/%d is missing evt.StrTime. It is most likely a mistake as it will prevent your logs to be processed in time-machine/forensic mode.", idx, len(pdump[lastStage][lastParser])) + logger.Warningf("Line %d/%d is missing evt.StrTime. It is most likely a mistake as it will prevent your logs to be processed in time-machine/forensic mode.", idx, len(pdump[lastStage][lastParser])) } else { - log.Debugf("Line %d/%d has evt.StrTime set to '%s'", idx, len(pdump[lastStage][lastParser]), result.Evt.StrTime) + logger.Debugf("Line %d/%d has evt.StrTime set to '%s'", idx, len(pdump[lastStage][lastParser]), result.Evt.StrTime) } } diff --git a/pkg/hubtest/coverage.go b/pkg/hubtest/coverage.go index e42c1e23455..057fc7e705f 100644 --- a/pkg/hubtest/coverage.go +++ b/pkg/hubtest/coverage.go @@ -23,7 +23,7 @@ type Coverage struct { PresentIn map[string]bool // poorman's set } -func (h *HubTest) GetAppsecCoverage() ([]Coverage, error) { +func (h *HubTest) GetAppsecCoverage(hubDir string) ([]Coverage, error) { if len(h.HubIndex.GetItemMap(cwhub.APPSEC_RULES)) == 0 { return nil, errors.New("no appsec rules in hub index") } @@ -41,7 +41,7 @@ func (h *HubTest) GetAppsecCoverage() ([]Coverage, error) { } // parser the expressions a-la-oneagain - appsecTestConfigs, err := filepath.Glob(".appsec-tests/*/config.yaml") + appsecTestConfigs, err := filepath.Glob(filepath.Join(hubDir, ".appsec-tests", "*", "config.yaml")) if err != nil { return nil, fmt.Errorf("while find appsec-tests config: %w", err) } @@ -57,7 +57,7 @@ func (h *HubTest) GetAppsecCoverage() ([]Coverage, error) { err = yaml.Unmarshal(yamlFile, configFileData) if err != nil { - return nil, fmt.Errorf("parsing: %v", err) + return nil, fmt.Errorf("parsing: %w", err) } for _, appsecRulesFile := range configFileData.AppsecRules { @@ -70,7 +70,7 @@ func (h *HubTest) GetAppsecCoverage() ([]Coverage, error) { err = yaml.Unmarshal(yamlFile, appsecRuleData) if err != nil { - return nil, fmt.Errorf("parsing: %v", err) + return nil, fmt.Errorf("parsing: %w", err) } appsecRuleName := appsecRuleData.Name @@ -87,7 +87,7 @@ func (h *HubTest) GetAppsecCoverage() ([]Coverage, error) { return coverage, nil } -func (h *HubTest) GetParsersCoverage() ([]Coverage, error) { +func (h *HubTest) GetParsersCoverage(hubDir string) ([]Coverage, error) { if len(h.HubIndex.GetItemMap(cwhub.PARSERS)) == 0 { return nil, errors.New("no parsers in hub index") } @@ -105,7 +105,7 @@ func (h *HubTest) GetParsersCoverage() ([]Coverage, error) { } // parser the expressions a-la-oneagain - passerts, err := filepath.Glob(".tests/*/parser.assert") + passerts, err := filepath.Glob(filepath.Join(hubDir, ".tests", "*", "parser.assert")) if err != nil { return nil, fmt.Errorf("while find parser asserts: %w", err) } @@ -173,7 +173,7 @@ func (h *HubTest) GetParsersCoverage() ([]Coverage, error) { return coverage, nil } -func (h *HubTest) GetScenariosCoverage() ([]Coverage, error) { +func (h *HubTest) GetScenariosCoverage(hubDir string) ([]Coverage, error) { if len(h.HubIndex.GetItemMap(cwhub.SCENARIOS)) == 0 { return nil, errors.New("no scenarios in hub index") } @@ -191,7 +191,7 @@ func (h *HubTest) GetScenariosCoverage() ([]Coverage, error) { } // parser the expressions a-la-oneagain - passerts, err := filepath.Glob(".tests/*/scenario.assert") + passerts, err := filepath.Glob(filepath.Join(hubDir, ".tests", "*", "scenario.assert")) if err != nil { return nil, fmt.Errorf("while find scenario asserts: %w", err) } @@ -259,6 +259,7 @@ func (h *HubTest) GetScenariosCoverage() ([]Coverage, error) { } } } + file.Close() } diff --git a/pkg/hubtest/helpers.go b/pkg/hubtest/helpers.go new file mode 100644 index 00000000000..d4714d86f75 --- /dev/null +++ b/pkg/hubtest/helpers.go @@ -0,0 +1,10 @@ +package hubtest + +import ( + "path/filepath" +) + +func basename(params ...any) (any, error) { + s := params[0].(string) + return filepath.Base(s), nil +} diff --git a/pkg/hubtest/hubtest.go b/pkg/hubtest/hubtest.go index 6e5a11fff10..a99d6cc4609 100644 --- a/pkg/hubtest/hubtest.go +++ b/pkg/hubtest/hubtest.go @@ -24,13 +24,13 @@ type HubTest struct { TemplateAppsecProfilePath string NucleiTargetHost string AppSecHost string - - HubIndex *cwhub.Hub - Tests []*HubTestItem + DataDir string // we share this one across tests, to avoid unnecessary downloads + HubIndex *cwhub.Hub + Tests []*HubTestItem } const ( - templateConfigFile = "template_config.yaml" + templateConfigFile = "template_config2.yaml" templateSimulationFile = "template_simulation.yaml" templateProfileFile = "template_profiles.yaml" templateAcquisFile = "template_acquis.yaml" @@ -61,7 +61,7 @@ http: func NewHubTest(hubPath string, crowdsecPath string, cscliPath string, isAppsecTest bool) (HubTest, error) { hubPath, err := filepath.Abs(hubPath) if err != nil { - return HubTest{}, fmt.Errorf("can't get absolute path of hub: %+v", err) + return HubTest{}, fmt.Errorf("can't get absolute path of hub: %w", err) } // we can't use hubtest without the hub @@ -139,9 +139,15 @@ func NewHubTest(hubPath string, crowdsecPath string, cscliPath string, isAppsecT return HubTest{}, err } + dataDir := filepath.Join(hubPath, ".cache", "data") + if err = os.MkdirAll(dataDir, 0o700); err != nil { + return HubTest{}, fmt.Errorf("while creating data dir: %w", err) + } + return HubTest{ CrowdSecPath: crowdsecPath, CscliPath: cscliPath, + DataDir: dataDir, HubPath: hubPath, HubTestPath: HubTestPath, HubIndexFile: hubIndexFile, @@ -155,7 +161,7 @@ func NewHubTest(hubPath string, crowdsecPath string, cscliPath string, isAppsecT func (h *HubTest) LoadTestItem(name string) (*HubTestItem, error) { HubTestItem := &HubTestItem{} - testItem, err := NewTest(name, h) + testItem, err := NewTest(name, h, h.DataDir) if err != nil { return HubTestItem, err } diff --git a/pkg/hubtest/hubtest_item.go b/pkg/hubtest/hubtest_item.go index 75895dc729b..be467be0222 100644 --- a/pkg/hubtest/hubtest_item.go +++ b/pkg/hubtest/hubtest_item.go @@ -4,11 +4,13 @@ import ( "context" "errors" "fmt" + "io/fs" "net/url" "os" "os/exec" "path/filepath" "strings" + "sync" log "github.com/sirupsen/logrus" "gopkg.in/yaml.v3" @@ -19,6 +21,8 @@ import ( "github.com/crowdsecurity/crowdsec/pkg/parser" ) +var downloadMutex sync.Mutex + type HubTestItemConfig struct { Parsers []string `yaml:"parsers,omitempty"` Scenarios []string `yaml:"scenarios,omitempty"` @@ -31,6 +35,7 @@ type HubTestItemConfig struct { Labels map[string]string `yaml:"labels,omitempty"` IgnoreParsers bool `yaml:"ignore_parsers,omitempty"` // if we test a scenario, we don't want to assert on Parser OverrideStatics []parser.ExtraField `yaml:"override_statics,omitempty"` // Allow to override statics. Executed before s00 + OwnDataDir bool `yaml:"own_data_dir,omitempty"` // Don't share dataDir with the other tests } type HubTestItem struct { @@ -41,6 +46,7 @@ type HubTestItem struct { CscliPath string RuntimePath string + RuntimeDBDir string RuntimeHubPath string RuntimeDataPath string RuntimePatternsPath string @@ -95,7 +101,7 @@ const ( DefaultAppsecHost = "127.0.0.1:4241" ) -func NewTest(name string, hubTest *HubTest) (*HubTestItem, error) { +func NewTest(name string, hubTest *HubTest, dataDir string) (*HubTestItem, error) { testPath := filepath.Join(hubTest.HubTestPath, name) runtimeFolder := filepath.Join(testPath, "runtime") runtimeHubFolder := filepath.Join(runtimeFolder, "hub") @@ -121,6 +127,15 @@ func NewTest(name string, hubTest *HubTest) (*HubTestItem, error) { scenarioAssertFilePath := filepath.Join(testPath, ScenarioAssertFileName) ScenarioAssert := NewScenarioAssert(scenarioAssertFilePath) + // force own_data_dir for backard compatibility + if name == "magento-ccs-by-as" || name == "magento-ccs-by-country" || name == "geoip-enrich" { + configFileData.OwnDataDir = true + } + + if configFileData.OwnDataDir { + dataDir = filepath.Join(runtimeFolder, "data") + } + return &HubTestItem{ Name: name, Path: testPath, @@ -128,8 +143,8 @@ func NewTest(name string, hubTest *HubTest) (*HubTestItem, error) { CscliPath: hubTest.CscliPath, RuntimePath: filepath.Join(testPath, "runtime"), RuntimeHubPath: runtimeHubFolder, - RuntimeDataPath: filepath.Join(runtimeFolder, "data"), RuntimePatternsPath: filepath.Join(runtimeFolder, "patterns"), + RuntimeDBDir: filepath.Join(runtimeFolder, "data"), RuntimeConfigFilePath: filepath.Join(runtimeFolder, "config.yaml"), RuntimeProfileFilePath: filepath.Join(runtimeFolder, "profiles.yaml"), RuntimeSimulationFilePath: filepath.Join(runtimeFolder, "simulation.yaml"), @@ -142,7 +157,7 @@ func NewTest(name string, hubTest *HubTest) (*HubTestItem, error) { HubDir: runtimeHubFolder, HubIndexFile: hubTest.HubIndexFile, InstallDir: runtimeFolder, - InstallDataDir: filepath.Join(runtimeFolder, "data"), + InstallDataDir: dataDir, }, Config: configFileData, HubPath: hubTest.HubPath, @@ -176,7 +191,7 @@ func (t *HubTestItem) installHubItems(names []string, installFunc func(string) e return nil } -func (t *HubTestItem) InstallHub() error { +func (t *HubTestItem) InstallHub(ctx context.Context) error { if err := t.installHubItems(t.Config.Parsers, t.installParser); err != nil { return err } @@ -221,12 +236,14 @@ func (t *HubTestItem) InstallHub() error { return err } - ctx := context.Background() + // prevent concurrent downloads of the same file + downloadMutex.Lock() + defer downloadMutex.Unlock() // install data for parsers if needed for _, item := range hub.GetInstalledByType(cwhub.PARSERS, true) { - if _, err := hubops.DownloadDataIfNeeded(ctx, hub, item, true); err != nil { - return fmt.Errorf("unable to download data for parser '%s': %+v", item.Name, err) + if _, err := hubops.DownloadDataIfNeeded(ctx, hub, item, false); err != nil { + return fmt.Errorf("unable to download data for parser '%s': %w", item.Name, err) } log.Debugf("parser '%s' installed successfully in runtime environment", item.Name) @@ -234,8 +251,8 @@ func (t *HubTestItem) InstallHub() error { // install data for scenarios if needed for _, item := range hub.GetInstalledByType(cwhub.SCENARIOS, true) { - if _, err := hubops.DownloadDataIfNeeded(ctx, hub, item, true); err != nil { - return fmt.Errorf("unable to download data for parser '%s': %+v", item.Name, err) + if _, err := hubops.DownloadDataIfNeeded(ctx, hub, item, false); err != nil { + return fmt.Errorf("unable to download data for parser '%s': %w", item.Name, err) } log.Debugf("scenario '%s' installed successfully in runtime environment", item.Name) @@ -243,8 +260,8 @@ func (t *HubTestItem) InstallHub() error { // install data for postoverflows if needed for _, item := range hub.GetInstalledByType(cwhub.POSTOVERFLOWS, true) { - if _, err := hubops.DownloadDataIfNeeded(ctx, hub, item, true); err != nil { - return fmt.Errorf("unable to download data for parser '%s': %+v", item.Name, err) + if _, err := hubops.DownloadDataIfNeeded(ctx, hub, item, false); err != nil { + return fmt.Errorf("unable to download data for parser '%s': %w", item.Name, err) } log.Debugf("postoverflow '%s' installed successfully in runtime environment", item.Name) @@ -253,49 +270,61 @@ func (t *HubTestItem) InstallHub() error { return nil } -func (t *HubTestItem) Clean() error { - return os.RemoveAll(t.RuntimePath) +func (t *HubTestItem) Clean() { + if err := os.RemoveAll(t.ResultsPath); err != nil { + if !errors.Is(err, fs.ErrNotExist) { + log.Errorf("while cleaning %s: %s", t.Name, err.Error()) + } + } + + if err := os.RemoveAll(t.RuntimePath); err != nil { + if !errors.Is(err, fs.ErrNotExist) { + log.Errorf("while cleaning %s: %s", t.Name, err.Error()) + } + } } func (t *HubTestItem) RunWithNucleiTemplate() error { - crowdsecLogFile := fmt.Sprintf("%s/log/crowdsec.log", t.RuntimePath) - testPath := filepath.Join(t.HubTestPath, t.Name) if _, err := os.Stat(testPath); os.IsNotExist(err) { return fmt.Errorf("test '%s' doesn't exist in '%s', exiting", t.Name, t.HubTestPath) } - if err := os.Chdir(testPath); err != nil { - return fmt.Errorf("can't 'cd' to '%s': %w", testPath, err) - } + crowdsecLogFile := fmt.Sprintf("%s/log/crowdsec.log", t.RuntimePath) // machine add cmdArgs := []string{"-c", t.RuntimeConfigFilePath, "machines", "add", "testMachine", "--force", "--auto"} cscliRegisterCmd := exec.Command(t.CscliPath, cmdArgs...) + cscliRegisterCmd.Dir = testPath + cscliRegisterCmd.Env = []string{"TESTDIR="+testPath, "DATADIR="+t.RuntimeHubConfig.InstallDataDir, "TZ=UTC"} output, err := cscliRegisterCmd.CombinedOutput() if err != nil { if !strings.Contains(string(output), "unable to create machine: user 'testMachine': user already exist") { fmt.Println(string(output)) - return fmt.Errorf("fail to run '%s' for test '%s': %v", cscliRegisterCmd.String(), t.Name, err) + return fmt.Errorf("fail to run '%s' for test '%s': %w", cscliRegisterCmd.String(), t.Name, err) } } // hardcode bouncer key cmdArgs = []string{"-c", t.RuntimeConfigFilePath, "bouncers", "add", "appsectests", "-k", TestBouncerApiKey} cscliBouncerCmd := exec.Command(t.CscliPath, cmdArgs...) + cscliBouncerCmd.Dir = testPath + cscliBouncerCmd.Env = []string{"TESTDIR="+testPath, "DATADIR="+t.RuntimeHubConfig.InstallDataDir, "TZ=UTC"} output, err = cscliBouncerCmd.CombinedOutput() if err != nil { if !strings.Contains(string(output), "unable to create bouncer: bouncer appsectests already exists") { fmt.Println(string(output)) - return fmt.Errorf("fail to run '%s' for test '%s': %v", cscliRegisterCmd.String(), t.Name, err) + return fmt.Errorf("fail to run '%s' for test '%s': %w", cscliRegisterCmd.String(), t.Name, err) } } // start crowdsec service cmdArgs = []string{"-c", t.RuntimeConfigFilePath} crowdsecDaemon := exec.Command(t.CrowdSecPath, cmdArgs...) + crowdsecDaemon.Dir = testPath + crowdsecDaemon.Env = []string{"TESTDIR="+testPath, "DATADIR="+t.RuntimeHubConfig.InstallDataDir, "TZ=UTC"} crowdsecDaemon.Start() @@ -382,59 +411,16 @@ func createDirs(dirs []string) error { return nil } -func (t *HubTestItem) RunWithLogFile(patternDir string) error { +func (t *HubTestItem) RunWithLogFile() error { testPath := filepath.Join(t.HubTestPath, t.Name) if _, err := os.Stat(testPath); os.IsNotExist(err) { return fmt.Errorf("test '%s' doesn't exist in '%s', exiting", t.Name, t.HubTestPath) } - currentDir, err := os.Getwd() // xx - if err != nil { - return fmt.Errorf("can't get current directory: %+v", err) - } - - // create runtime, data, hub folders - if err = createDirs([]string{t.RuntimePath, t.RuntimeDataPath, t.RuntimeHubPath, t.ResultsPath}); err != nil { - return err - } - - if err = Copy(t.HubIndexFile, filepath.Join(t.RuntimeHubPath, ".index.json")); err != nil { - return fmt.Errorf("unable to copy .index.json file in '%s': %w", filepath.Join(t.RuntimeHubPath, ".index.json"), err) - } - - // copy template config file to runtime folder - if err = Copy(t.TemplateConfigPath, t.RuntimeConfigFilePath); err != nil { - return fmt.Errorf("unable to copy '%s' to '%s': %v", t.TemplateConfigPath, t.RuntimeConfigFilePath, err) - } - - // copy template profile file to runtime folder - if err = Copy(t.TemplateProfilePath, t.RuntimeProfileFilePath); err != nil { - return fmt.Errorf("unable to copy '%s' to '%s': %v", t.TemplateProfilePath, t.RuntimeProfileFilePath, err) - } - - // copy template simulation file to runtime folder - if err = Copy(t.TemplateSimulationPath, t.RuntimeSimulationFilePath); err != nil { - return fmt.Errorf("unable to copy '%s' to '%s': %v", t.TemplateSimulationPath, t.RuntimeSimulationFilePath, err) - } - - // copy template patterns folder to runtime folder - if err = CopyDir(patternDir, t.RuntimePatternsPath); err != nil { - return fmt.Errorf("unable to copy 'patterns' from '%s' to '%s': %w", patternDir, t.RuntimePatternsPath, err) - } - - // install the hub in the runtime folder - if err = t.InstallHub(); err != nil { - return fmt.Errorf("unable to install hub in '%s': %w", t.RuntimeHubPath, err) - } - - logFile := t.Config.LogFile + logFile := filepath.Join(testPath, t.Config.LogFile) logType := t.Config.LogType dsn := fmt.Sprintf("file://%s", logFile) - if err = os.Chdir(testPath); err != nil { - return fmt.Errorf("can't 'cd' to '%s': %w", testPath, err) - } - logFileStat, err := os.Stat(logFile) if err != nil { return fmt.Errorf("unable to stat log file '%s': %w", logFile, err) @@ -446,6 +432,9 @@ func (t *HubTestItem) RunWithLogFile(patternDir string) error { cmdArgs := []string{"-c", t.RuntimeConfigFilePath, "machines", "add", "testMachine", "--force", "--auto"} cscliRegisterCmd := exec.Command(t.CscliPath, cmdArgs...) + cscliRegisterCmd.Dir = testPath + cscliRegisterCmd.Env = []string{"TESTDIR="+testPath, "DATADIR="+t.RuntimeHubConfig.InstallDataDir, "TZ=UTC"} + log.Debugf("%s", cscliRegisterCmd.String()) output, err := cscliRegisterCmd.CombinedOutput() @@ -464,6 +453,9 @@ func (t *HubTestItem) RunWithLogFile(patternDir string) error { } crowdsecCmd := exec.Command(t.CrowdSecPath, cmdArgs...) + crowdsecCmd.Dir = testPath + crowdsecCmd.Env = []string{"TESTDIR="+testPath, "DATADIR="+t.RuntimeHubConfig.InstallDataDir, "TZ=UTC"} + log.Debugf("%s", crowdsecCmd.String()) output, err = crowdsecCmd.CombinedOutput() @@ -475,10 +467,6 @@ func (t *HubTestItem) RunWithLogFile(patternDir string) error { return fmt.Errorf("fail to run '%s' for test '%s': %v", crowdsecCmd.String(), t.Name, err) } - if err := os.Chdir(currentDir); err != nil { - return fmt.Errorf("can't 'cd' to '%s': %w", currentDir, err) - } - // assert parsers if !t.Config.IgnoreParsers { _, err := os.Stat(t.ParserAssert.File) @@ -506,6 +494,7 @@ func (t *HubTestItem) RunWithLogFile(patternDir string) error { t.ParserAssert.AutoGenAssert = true } else { if err := t.ParserAssert.AssertFile(t.ParserResultFile); err != nil { + // TODO: no error - should not prevent running the other tests return fmt.Errorf("unable to run assertion on file '%s': %w", t.ParserResultFile, err) } } @@ -564,14 +553,14 @@ func (t *HubTestItem) RunWithLogFile(patternDir string) error { return nil } -func (t *HubTestItem) Run(patternDir string) error { +func (t *HubTestItem) Run(ctx context.Context, patternDir string) error { var err error t.Success = false t.ErrorsList = make([]string, 0) // create runtime, data, hub, result folders - if err = createDirs([]string{t.RuntimePath, t.RuntimeDataPath, t.RuntimeHubPath, t.ResultsPath}); err != nil { + if err = createDirs([]string{t.RuntimePath, t.RuntimeDBDir, t.RuntimeHubConfig.InstallDataDir, t.RuntimeHubPath, t.ResultsPath}); err != nil { return err } @@ -625,12 +614,12 @@ func (t *HubTestItem) Run(patternDir string) error { } // install the hub in the runtime folder - if err = t.InstallHub(); err != nil { + if err = t.InstallHub(ctx); err != nil { return fmt.Errorf("unable to install hub in '%s': %w", t.RuntimeHubPath, err) } if t.Config.LogFile != "" { - return t.RunWithLogFile(patternDir) + return t.RunWithLogFile() } if t.Config.NucleiTemplate != "" { diff --git a/pkg/hubtest/parser_assert.go b/pkg/hubtest/parser_assert.go index 90d952506d1..1e7c7b2b3f0 100644 --- a/pkg/hubtest/parser_assert.go +++ b/pkg/hubtest/parser_assert.go @@ -61,7 +61,7 @@ func (p *ParserAssert) AutoGenFromFile(filename string) (string, error) { func (p *ParserAssert) LoadTest(filename string) error { parserDump, err := dumps.LoadParserDump(filename) if err != nil { - return fmt.Errorf("loading parser dump file: %+v", err) + return fmt.Errorf("loading parser dump file: %w", err) } p.TestData = parserDump @@ -93,7 +93,7 @@ func (p *ParserAssert) AssertFile(testFile string) error { ok, err := p.Run(scanner.Text()) if err != nil { - return fmt.Errorf("unable to run assert '%s': %+v", scanner.Text(), err) + return fmt.Errorf("unable to run assert '%s': %w", scanner.Text(), err) } p.NbAssert++ @@ -151,26 +151,43 @@ func (p *ParserAssert) AssertFile(testFile string) error { return nil } +func basenameShim(expression string) string { + if strings.Contains(expression, "datasource_path") && !strings.Contains(expression, "basename(") { + // match everything before == and wrap it with basename() + match := strings.Split(expression, "==") + return fmt.Sprintf("basename(%s) == %s", match[0], match[1]) + } + + return expression +} + func (p *ParserAssert) RunExpression(expression string) (interface{}, error) { // debug doesn't make much sense with the ability to evaluate "on the fly" // var debugFilter *exprhelpers.ExprDebugger - var output interface{} + var output any - env := map[string]interface{}{"results": *p.TestData} + logger := log.WithField("file", p.File) - runtimeFilter, err := expr.Compile(expression, exprhelpers.GetExprOptions(env)...) + env := map[string]any{"results": *p.TestData} + opts := exprhelpers.GetExprOptions(env) + opts = append(opts, expr.Function("basename", basename, new(func (string) string))) + + // wrap with basename() in case of datasource_path, for backward compatibility + expression = basenameShim(expression) + + runtimeFilter, err := expr.Compile(expression, opts...) if err != nil { - log.Errorf("failed to compile '%s' : %s", expression, err) + logger.Errorf("failed to compile '%s': %s", expression, err) return output, err } // dump opcode in trace level - log.Tracef("%s", runtimeFilter.Disassemble()) + logger.Tracef("%s", runtimeFilter.Disassemble()) output, err = expr.Run(runtimeFilter, env) if err != nil { - log.Warningf("running : %s", expression) - log.Warningf("runtime error : %s", err) + logger.Warningf("running : %s", expression) + logger.Warningf("runtime error: %s", err) return output, fmt.Errorf("while running expression %s: %w", expression, err) } @@ -252,7 +269,11 @@ func (p *ParserAssert) AutoGenParserAssert() string { continue } - ret += fmt.Sprintf(`results["%s"]["%s"][%d].Evt.Meta["%s"] == "%s"`+"\n", stage, parser, pidx, mkey, Escape(mval)) + if mkey == "datasource_path" { + ret += fmt.Sprintf(`basename(results["%s"]["%s"][%d].Evt.Meta["%s"]) == "%s"`+"\n", stage, parser, pidx, mkey, Escape(mval)) + } else { + ret += fmt.Sprintf(`results["%s"]["%s"][%d].Evt.Meta["%s"] == "%s"`+"\n", stage, parser, pidx, mkey, Escape(mval)) + } } for _, ekey := range maptools.SortedKeys(result.Evt.Enriched) { diff --git a/pkg/hubtest/scenario_assert.go b/pkg/hubtest/scenario_assert.go index f32abf9e110..906751f491e 100644 --- a/pkg/hubtest/scenario_assert.go +++ b/pkg/hubtest/scenario_assert.go @@ -6,6 +6,7 @@ import ( "fmt" "io" "os" + "path/filepath" "sort" "strings" @@ -59,7 +60,7 @@ func (s *ScenarioAssert) AutoGenFromFile(filename string) (string, error) { func (s *ScenarioAssert) LoadTest(filename string, bucketpour string) error { bucketDump, err := LoadScenarioDump(filename) if err != nil { - return fmt.Errorf("loading scenario dump file '%s': %+v", filename, err) + return fmt.Errorf("loading scenario dump file '%s': %w", filename, err) } s.TestData = bucketDump @@ -67,7 +68,7 @@ func (s *ScenarioAssert) LoadTest(filename string, bucketpour string) error { if bucketpour != "" { pourDump, err := dumps.LoadBucketPourDump(bucketpour) if err != nil { - return fmt.Errorf("loading bucket pour dump file '%s': %+v", filename, err) + return fmt.Errorf("loading bucket pour dump file '%s': %w", filename, err) } s.PourData = pourDump @@ -100,7 +101,7 @@ func (s *ScenarioAssert) AssertFile(testFile string) error { ok, err := s.Run(scanner.Text()) if err != nil { - return fmt.Errorf("unable to run assert '%s': %+v", scanner.Text(), err) + return fmt.Errorf("unable to run assert '%s': %w", scanner.Text(), err) } s.NbAssert++ @@ -156,28 +157,34 @@ func (s *ScenarioAssert) AssertFile(testFile string) error { return nil } -func (s *ScenarioAssert) RunExpression(expression string) (interface{}, error) { +func (s *ScenarioAssert) RunExpression(expression string) (any, error) { // debug doesn't make much sense with the ability to evaluate "on the fly" // var debugFilter *exprhelpers.ExprDebugger - var output interface{} + var output any - env := map[string]interface{}{"results": *s.TestData} + logger := log.WithField("file", s.File) - runtimeFilter, err := expr.Compile(expression, exprhelpers.GetExprOptions(env)...) + env := map[string]any{"results": *s.TestData} + opts := exprhelpers.GetExprOptions(env) + opts = append(opts, expr.Function("basename", basename, new(func (string) string))) + + expression = basenameShim(expression) + + runtimeFilter, err := expr.Compile(expression, opts...) if err != nil { return nil, err } // if debugFilter, err = exprhelpers.NewDebugger(assert, expr.Env(env)); err != nil { - // log.Warningf("Failed building debugher for %s : %s", assert, err) + // logger.Warningf("Failed building debugher for %s : %s", assert, err) // } // dump opcode in trace level - log.Tracef("%s", runtimeFilter.Disassemble()) + logger.Tracef("%s", runtimeFilter.Disassemble()) - output, err = expr.Run(runtimeFilter, map[string]interface{}{"results": *s.TestData}) + output, err = expr.Run(runtimeFilter, map[string]any{"results": *s.TestData}) if err != nil { - log.Warningf("running : %s", expression) - log.Warningf("runtime error : %s", err) + logger.Warningf("running : %s", expression) + logger.Warningf("runtime error : %s", err) return nil, fmt.Errorf("while running expression %s: %w", expression, err) } @@ -228,7 +235,11 @@ func (s *ScenarioAssert) AutoGenScenarioAssert() string { for evtIndex, evt := range event.Overflow.Alert.Events { for _, meta := range evt.Meta { - ret += fmt.Sprintf(`results[%d].Overflow.Alert.Events[%d].GetMeta("%s") == "%s"`+"\n", eventIndex, evtIndex, meta.Key, Escape(meta.Value)) + if meta.Key == "datasource_path" { + ret += fmt.Sprintf(`basename(results[%d].Overflow.Alert.Events[%d].GetMeta("%s")) == "%s"`+"\n", eventIndex, evtIndex, meta.Key, Escape(filepath.Base(meta.Value))) + } else { + ret += fmt.Sprintf(`results[%d].Overflow.Alert.Events[%d].GetMeta("%s") == "%s"`+"\n", eventIndex, evtIndex, meta.Key, Escape(meta.Value)) + } } } diff --git a/test/bats.mk b/test/bats.mk index 72ac8863f72..7d05d245095 100644 --- a/test/bats.mk +++ b/test/bats.mk @@ -103,7 +103,6 @@ bats-test: bats-environment ## Run functional tests $(TEST_DIR)/run-tests $(TEST_DIR)/bats bats-test-hub: bats-environment bats-check-requirements ## Run all hub tests - @$(TEST_DIR)/bin/generate-hub-tests $(TEST_DIR)/run-tests $(TEST_DIR)/dyn-bats # Not failproof but they can catch bugs and improve learning of sh/bash diff --git a/test/bin/collect-hub-coverage b/test/bin/collect-hub-coverage index 05c4e06252f..1072ed8671f 100755 --- a/test/bin/collect-hub-coverage +++ b/test/bin/collect-hub-coverage @@ -12,13 +12,16 @@ THIS_DIR=$(CDPATH= cd -- "$(dirname -- "$0")" && pwd) # shellcheck disable=SC1091 . "${THIS_DIR}/../.environment.sh" -hubdir="${LOCAL_DIR}/hub-tests" - coverage() { "${CSCLI}" --crowdsec "${CROWDSEC}" --cscli "${CSCLI}" hubtest coverage --"$1" --percent } -cd "${hubdir}" || die "Could not find hub test results" +hubdir="${LOCAL_DIR}/hub-tests" + +hubdir="${1:-${hubdir}}" + +[[ -d "${hubdir}" ]] || die "Could not find hub test results in $hubdir" +cd "${hubdir}" || die "Could not find hub test results in $hubdir" shopt -s inherit_errexit diff --git a/test/bin/generate-hub-tests b/test/bin/generate-hub-tests deleted file mode 100755 index 658cc33a79a..00000000000 --- a/test/bin/generate-hub-tests +++ /dev/null @@ -1,19 +0,0 @@ -#!/usr/bin/env bash - -set -eu - -# shellcheck disable=SC1007 -THIS_DIR=$(CDPATH= cd -- "$(dirname -- "$0")" && pwd) -# shellcheck disable=SC1091 -. "${THIS_DIR}/../.environment.sh" - -"${TEST_DIR}/instance-data" load - -hubdir="${LOCAL_DIR}/hub-tests" -git clone --depth 1 https://github.com/crowdsecurity/hub.git "${hubdir}" >/dev/null 2>&1 || (cd "${hubdir}"; git pull) - -echo "Generating hub tests..." - -python3 "$THIS_DIR/generate-hub-tests.py" \ - <("${CSCLI}" --crowdsec "${CROWDSEC}" --cscli "${CSCLI}" hubtest --hub "${hubdir}" list -o json) \ - "${TEST_DIR}/dyn-bats/" diff --git a/test/bin/generate-hub-tests.py b/test/bin/generate-hub-tests.py deleted file mode 100644 index 48f296776d7..00000000000 --- a/test/bin/generate-hub-tests.py +++ /dev/null @@ -1,63 +0,0 @@ -#!/usr/bin/env python3 - -import json -import pathlib -import os -import sys -import textwrap - -test_header = """ -set -u - -setup_file() { - load "../lib/setup_file.sh" -} - -teardown_file() { - load "../lib/teardown_file.sh" -} - -setup() { - load "../lib/setup.sh" -} -""" - - -def write_chunk(target_dir, n, chunk): - with open(target_dir / f"hub-{n}.bats", "w") as f: - f.write(test_header) - for test in chunk: - cscli = os.environ['CSCLI'] - crowdsec = os.environ['CROWDSEC'] - testname = test['Name'] - hubdir = os.environ['LOCAL_DIR'] + '/hub-tests' - f.write(textwrap.dedent(f""" - @test "{testname}" {{ - run "{cscli}" \\ - --crowdsec "{crowdsec}" \\ - --cscli "{cscli}" \\ - --hub "{hubdir}" \\ - hubtest run "{testname}" \\ - --clean - echo "$output" - assert_success - }} - """)) - - -def main(): - hubtests_json = sys.argv[1] - target_dir = sys.argv[2] - - with open(hubtests_json) as f: - j = json.load(f) - chunk_size = len(j) // 3 + 1 - n = 1 - for i in range(0, len(j), chunk_size): - chunk = j[i:i + chunk_size] - write_chunk(pathlib.Path(target_dir), n, chunk) - n += 1 - - -if __name__ == "__main__": - main() From 663dad048bc9c1b0e0c17a63e01834df87d70f18 Mon Sep 17 00:00:00 2001 From: blotus Date: Mon, 17 Mar 2025 11:36:14 +0100 Subject: [PATCH 466/581] close appsec transactions after processing request (#3515) --- .../modules/appsec/appsec_rules_test.go | 9 ++++++++- pkg/acquisition/modules/appsec/appsec_runner.go | 17 +++++++++++++++++ pkg/appsec/tx.go | 4 ++++ 3 files changed, 29 insertions(+), 1 deletion(-) diff --git a/pkg/acquisition/modules/appsec/appsec_rules_test.go b/pkg/acquisition/modules/appsec/appsec_rules_test.go index 00093c5a5ad..1cd066d6f23 100644 --- a/pkg/acquisition/modules/appsec/appsec_rules_test.go +++ b/pkg/acquisition/modules/appsec/appsec_rules_test.go @@ -3,6 +3,8 @@ package appsecacquisition import ( "net/http" "net/url" + "os" + "path/filepath" "testing" log "github.com/sirupsen/logrus" @@ -346,7 +348,7 @@ func TestAppsecRuleMatches(t *testing.T) { input_request: appsec.ParsedRequest{ ClientIP: "1.2.3.4", RemoteAddr: "127.0.0.1", - Method: "GET", + Method: "POST", URI: "/urllll", Headers: http.Header{"Content-Type": []string{"multipart/form-data; boundary=boundary"}}, Body: []byte(` @@ -368,6 +370,11 @@ toto require.Len(t, responses, 1) require.True(t, responses[0].InBandInterrupt) + + // Might fail if you have artifacts from previous tests, but good enough 99% of the time + tmpFiles, err := filepath.Glob(filepath.Join(os.TempDir(), "crzmp*")) + require.NoError(t, err) + require.Empty(t, tmpFiles) }, }, { diff --git a/pkg/acquisition/modules/appsec/appsec_runner.go b/pkg/acquisition/modules/appsec/appsec_runner.go index ad4bda6eae6..b339c4ad532 100644 --- a/pkg/acquisition/modules/appsec/appsec_runner.go +++ b/pkg/acquisition/modules/appsec/appsec_runner.go @@ -355,6 +355,10 @@ func (r *AppsecRunner) handleRequest(request *appsec.ParsedRequest) { err := r.ProcessInBandRules(request) if err != nil { logger.Errorf("unable to process InBand rules: %s", err) + err = request.Tx.Close() + if err != nil { + logger.Errorf("unable to close inband transaction: %s", err) + } return } @@ -366,6 +370,11 @@ func (r *AppsecRunner) handleRequest(request *appsec.ParsedRequest) { r.handleInBandInterrupt(request) } + err = request.Tx.Close() + if err != nil { + r.logger.Errorf("unable to close inband transaction: %s", err) + } + // send back the result to the HTTP handler for the InBand part request.ResponseChannel <- r.AppsecRuntime.Response @@ -385,6 +394,10 @@ func (r *AppsecRunner) handleRequest(request *appsec.ParsedRequest) { err = r.ProcessOutOfBandRules(request) if err != nil { logger.Errorf("unable to process OutOfBand rules: %s", err) + err = request.Tx.Close() + if err != nil { + logger.Errorf("unable to close outband transaction: %s", err) + } return } @@ -395,6 +408,10 @@ func (r *AppsecRunner) handleRequest(request *appsec.ParsedRequest) { r.handleOutBandInterrupt(request) } } + err = request.Tx.Close() + if err != nil { + r.logger.Errorf("unable to close outband transaction: %s", err) + } // time spent to process inband AND out of band rules globalParsingElapsed := time.Since(startGlobalParsing) AppsecGlobalParsingHistogram.With(prometheus.Labels{"source": request.RemoteAddrNormalized, "appsec_engine": request.AppsecEngine}).Observe(globalParsingElapsed.Seconds()) diff --git a/pkg/appsec/tx.go b/pkg/appsec/tx.go index 47da19d1556..afa21a53c37 100644 --- a/pkg/appsec/tx.go +++ b/pkg/appsec/tx.go @@ -91,3 +91,7 @@ func (t *ExtendedTransaction) MatchedRules() []types.MatchedRule { func (t *ExtendedTransaction) ID() string { return t.Tx.ID() } + +func (t *ExtendedTransaction) Close() error { + return t.Tx.Close() +} From 0459a9a880845e134ef84a41b190d5739693b7c8 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Tue, 18 Mar 2025 11:45:06 +0100 Subject: [PATCH 467/581] update appsec test runner (#3518) --- cmd/crowdsec-cli/clihubtest/run.go | 8 +++++++- pkg/hubtest/hubtest.go | 17 +++++++++-------- pkg/hubtest/hubtest_item.go | 26 ++++++++++++++------------ 3 files changed, 30 insertions(+), 21 deletions(-) diff --git a/cmd/crowdsec-cli/clihubtest/run.go b/cmd/crowdsec-cli/clihubtest/run.go index 3d4094cf4f0..91f05300a47 100644 --- a/cmd/crowdsec-cli/clihubtest/run.go +++ b/cmd/crowdsec-cli/clihubtest/run.go @@ -45,6 +45,12 @@ func (cli *cliHubTest) run(ctx context.Context, all bool, nucleiTargetHost strin var eg errgroup.Group + if isAppsecTest { + log.Info("Appsec tests can not run in parallel: setting max_jobs=1") + + maxJobs = 1 + } + eg.SetLimit(int(maxJobs)) for _, test := range hubPtr.Tests { @@ -215,7 +221,7 @@ func (cli *cliHubTest) newRunCmd() *cobra.Command { cmd.Flags().StringVar(&appSecHost, "host", hubtest.DefaultAppsecHost, "Address to expose AppSec for hubtest") cmd.Flags().BoolVar(&all, "all", false, "Run all tests") cmd.Flags().BoolVar(&reportSuccess, "report-success", false, "Report successful tests too (implied with json output)") - cmd.Flags().UintVar(&maxJobs, "max-jobs", maxJobs, "Run batch") + cmd.Flags().UintVar(&maxJobs, "max-jobs", maxJobs, "Max number of concurrent tests (does not apply to appsec)") return cmd } diff --git a/pkg/hubtest/hubtest.go b/pkg/hubtest/hubtest.go index a99d6cc4609..2868753eba9 100644 --- a/pkg/hubtest/hubtest.go +++ b/pkg/hubtest/hubtest.go @@ -64,6 +64,11 @@ func NewHubTest(hubPath string, crowdsecPath string, cscliPath string, isAppsecT return HubTest{}, fmt.Errorf("can't get absolute path of hub: %w", err) } + sharedDataDir := filepath.Join(hubPath, ".cache", "data") + if err = os.MkdirAll(sharedDataDir, 0o700); err != nil { + return HubTest{}, fmt.Errorf("while creating data dir: %w", err) + } + // we can't use hubtest without the hub if _, err = os.Stat(hubPath); os.IsNotExist(err) { return HubTest{}, fmt.Errorf("path to hub '%s' doesn't exist, can't run", hubPath) @@ -90,7 +95,7 @@ func NewHubTest(hubPath string, crowdsecPath string, cscliPath string, isAppsecT HubDir: hubPath, HubIndexFile: hubIndexFile, InstallDir: HubTestPath, - InstallDataDir: HubTestPath, + InstallDataDir: sharedDataDir, } hub, err := cwhub.NewHub(local, nil) @@ -105,6 +110,7 @@ func NewHubTest(hubPath string, crowdsecPath string, cscliPath string, isAppsecT return HubTest{ CrowdSecPath: crowdsecPath, CscliPath: cscliPath, + DataDir: sharedDataDir, HubPath: hubPath, HubTestPath: HubTestPath, HubIndexFile: hubIndexFile, @@ -127,7 +133,7 @@ func NewHubTest(hubPath string, crowdsecPath string, cscliPath string, isAppsecT HubDir: hubPath, HubIndexFile: hubIndexFile, InstallDir: HubTestPath, - InstallDataDir: HubTestPath, + InstallDataDir: sharedDataDir, } hub, err := cwhub.NewHub(local, nil) @@ -139,15 +145,10 @@ func NewHubTest(hubPath string, crowdsecPath string, cscliPath string, isAppsecT return HubTest{}, err } - dataDir := filepath.Join(hubPath, ".cache", "data") - if err = os.MkdirAll(dataDir, 0o700); err != nil { - return HubTest{}, fmt.Errorf("while creating data dir: %w", err) - } - return HubTest{ CrowdSecPath: crowdsecPath, CscliPath: cscliPath, - DataDir: dataDir, + DataDir: sharedDataDir, HubPath: hubPath, HubTestPath: HubTestPath, HubIndexFile: hubIndexFile, diff --git a/pkg/hubtest/hubtest_item.go b/pkg/hubtest/hubtest_item.go index be467be0222..526876a10ed 100644 --- a/pkg/hubtest/hubtest_item.go +++ b/pkg/hubtest/hubtest_item.go @@ -290,7 +290,7 @@ func (t *HubTestItem) RunWithNucleiTemplate() error { return fmt.Errorf("test '%s' doesn't exist in '%s', exiting", t.Name, t.HubTestPath) } - crowdsecLogFile := fmt.Sprintf("%s/log/crowdsec.log", t.RuntimePath) + crowdsecLogFile := filepath.Join(t.RuntimePath, "log", "crowdsec.log") // machine add cmdArgs := []string{"-c", t.RuntimeConfigFilePath, "machines", "add", "testMachine", "--force", "--auto"} @@ -363,7 +363,10 @@ func (t *HubTestItem) RunWithNucleiTemplate() error { }, } - err = nucleiConfig.RunNucleiTemplate(t.Name, t.Config.NucleiTemplate, t.NucleiTargetHost) + // the value in config is relative + nucleiTemplate := filepath.Join(t.Path, t.Config.NucleiTemplate) + + err = nucleiConfig.RunNucleiTemplate(t.Name, nucleiTemplate, t.NucleiTargetHost) if t.Config.ExpectedNucleiFailure { if err != nil && errors.Is(err, ErrNucleiTemplateFail) { log.Infof("Appsec test %s failed as expected", t.Name) @@ -381,7 +384,6 @@ func (t *HubTestItem) RunWithNucleiTemplate() error { } } else { if err == nil { - log.Infof("Appsec test %s succeeded", t.Name) t.Success = true } else { log.Errorf("Appsec test %s failed: %s", t.Name, err) @@ -419,7 +421,7 @@ func (t *HubTestItem) RunWithLogFile() error { logFile := filepath.Join(testPath, t.Config.LogFile) logType := t.Config.LogType - dsn := fmt.Sprintf("file://%s", logFile) + dsn := "file://" + logFile logFileStat, err := os.Stat(logFile) if err != nil { @@ -441,7 +443,7 @@ func (t *HubTestItem) RunWithLogFile() error { if err != nil { if !strings.Contains(string(output), "unable to create machine: user 'testMachine': user already exist") { fmt.Println(string(output)) - return fmt.Errorf("fail to run '%s' for test '%s': %v", cscliRegisterCmd.String(), t.Name, err) + return fmt.Errorf("fail to run '%s' for test '%s': %w", cscliRegisterCmd.String(), t.Name, err) } } @@ -464,7 +466,7 @@ func (t *HubTestItem) RunWithLogFile() error { } if err != nil { - return fmt.Errorf("fail to run '%s' for test '%s': %v", crowdsecCmd.String(), t.Name, err) + return fmt.Errorf("fail to run '%s' for test '%s': %w", crowdsecCmd.String(), t.Name, err) } // assert parsers @@ -570,17 +572,17 @@ func (t *HubTestItem) Run(ctx context.Context, patternDir string) error { // copy template config file to runtime folder if err = Copy(t.TemplateConfigPath, t.RuntimeConfigFilePath); err != nil { - return fmt.Errorf("unable to copy '%s' to '%s': %v", t.TemplateConfigPath, t.RuntimeConfigFilePath, err) + return fmt.Errorf("unable to copy '%s' to '%s': %w", t.TemplateConfigPath, t.RuntimeConfigFilePath, err) } // copy template profile file to runtime folder if err = Copy(t.TemplateProfilePath, t.RuntimeProfileFilePath); err != nil { - return fmt.Errorf("unable to copy '%s' to '%s': %v", t.TemplateProfilePath, t.RuntimeProfileFilePath, err) + return fmt.Errorf("unable to copy '%s' to '%s': %w", t.TemplateProfilePath, t.RuntimeProfileFilePath, err) } // copy template simulation file to runtime folder if err = Copy(t.TemplateSimulationPath, t.RuntimeSimulationFilePath); err != nil { - return fmt.Errorf("unable to copy '%s' to '%s': %v", t.TemplateSimulationPath, t.RuntimeSimulationFilePath, err) + return fmt.Errorf("unable to copy '%s' to '%s': %w", t.TemplateSimulationPath, t.RuntimeSimulationFilePath, err) } // copy template patterns folder to runtime folder @@ -590,7 +592,7 @@ func (t *HubTestItem) Run(ctx context.Context, patternDir string) error { // create the appsec-configs dir if err = os.MkdirAll(filepath.Join(t.RuntimePath, "appsec-configs"), os.ModePerm); err != nil { - return fmt.Errorf("unable to create folder '%s': %+v", t.RuntimePath, err) + return fmt.Errorf("unable to create folder '%s': %w", t.RuntimePath, err) } // if it's an appsec rule test, we need acquis and appsec profile @@ -599,13 +601,13 @@ func (t *HubTestItem) Run(ctx context.Context, patternDir string) error { log.Debugf("copying %s to %s", t.TemplateAcquisPath, t.RuntimeAcquisFilePath) if err = Copy(t.TemplateAcquisPath, t.RuntimeAcquisFilePath); err != nil { - return fmt.Errorf("unable to copy '%s' to '%s': %v", t.TemplateAcquisPath, t.RuntimeAcquisFilePath, err) + return fmt.Errorf("unable to copy '%s' to '%s': %w", t.TemplateAcquisPath, t.RuntimeAcquisFilePath, err) } log.Debugf("copying %s to %s", t.TemplateAppsecProfilePath, filepath.Join(t.RuntimePath, "appsec-configs", "config.yaml")) // copy template appsec-config file to runtime folder if err = Copy(t.TemplateAppsecProfilePath, filepath.Join(t.RuntimePath, "appsec-configs", "config.yaml")); err != nil { - return fmt.Errorf("unable to copy '%s' to '%s': %v", t.TemplateAppsecProfilePath, filepath.Join(t.RuntimePath, "appsec-configs", "config.yaml"), err) + return fmt.Errorf("unable to copy '%s' to '%s': %w", t.TemplateAppsecProfilePath, filepath.Join(t.RuntimePath, "appsec-configs", "config.yaml"), err) } } else { // otherwise we drop a blank acquis file if err = os.WriteFile(t.RuntimeAcquisFilePath, []byte(""), os.ModePerm); err != nil { From ea1a1d733b585f1c62d4396c1eee2a4291d82cf3 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Thu, 20 Mar 2025 11:25:57 +0100 Subject: [PATCH 468/581] empty back-merge from release branch (#3527) From 2dcc3ae0dfc9be6e799742c22ed3814067a22bd5 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Thu, 20 Mar 2025 11:28:37 +0100 Subject: [PATCH 469/581] CI: pin hub branch during functional tests (#3526) --- cmd/crowdsec-cli/clihub/hub.go | 23 +++++++++++++++++++++++ test/lib/config/config-global | 13 +++++++++++++ test/lib/config/config-local | 13 +++++++++++++ 3 files changed, 49 insertions(+) diff --git a/cmd/crowdsec-cli/clihub/hub.go b/cmd/crowdsec-cli/clihub/hub.go index 87950810219..0a33d493469 100644 --- a/cmd/crowdsec-cli/clihub/hub.go +++ b/cmd/crowdsec-cli/clihub/hub.go @@ -46,6 +46,7 @@ cscli hub upgrade`, DisableAutoGenTag: true, } + cmd.AddCommand(cli.newBranchCmd()) cmd.AddCommand(cli.newListCmd()) cmd.AddCommand(cli.newUpdateCmd()) cmd.AddCommand(cli.newUpgradeCmd()) @@ -84,6 +85,28 @@ func (cli *cliHub) List(out io.Writer, hub *cwhub.Hub, all bool) error { return nil } +func (cli *cliHub) newBranchCmd() *cobra.Command { + var all bool + + cmd := &cobra.Command{ + Use: "branch", + Short: "Show selected hub branch", + Long: "Display the hub branch to be used, depending on configuration and crowdsec version", + Args: args.NoArgs, + DisableAutoGenTag: true, + RunE: func(cmd *cobra.Command, _ []string) error { + branch := require.HubBranch(cmd.Context(), cli.cfg()) + fmt.Println(branch) + return nil + }, + } + + flags := cmd.Flags() + flags.BoolVarP(&all, "all", "a", false, "List all available items, including those not installed") + + return cmd +} + func (cli *cliHub) newListCmd() *cobra.Command { var all bool diff --git a/test/lib/config/config-global b/test/lib/config/config-global index 9b2b71c1dd1..83d95e68e29 100755 --- a/test/lib/config/config-global +++ b/test/lib/config/config-global @@ -61,6 +61,19 @@ config_prepare() { .api.server.listen_socket="/run/crowdsec.sock" | .config_paths.config_dir |= sub("/$", "") ' "${CONFIG_DIR}/config.yaml" + + # pin the branch to avoid having to query the last version repeatedly. + # this means the fixture could possibly go stale (i.e. use the wrong branch) if a new version is released, + # but that shouldn't impact the tests anyway. + + HUB_BRANCH=$("$CSCLI" hub branch 2>/dev/null) + export HUB_BRANCH + + echo "Setting up tests with hub branch $HUB_BRANCH" + + # need a working config, so we do it as a separate step. + + yq -i e '.cscli.hub_branch=strenv(HUB_BRANCH)' "${CONFIG_DIR}/config.yaml" } make_init_data() { diff --git a/test/lib/config/config-local b/test/lib/config/config-local index 3e3c806b616..54ac8550c5f 100755 --- a/test/lib/config/config-local +++ b/test/lib/config/config-local @@ -98,6 +98,19 @@ config_generate() { .api.server.console_path=strenv(CONFIG_DIR)+"/console.yaml" | del(.api.server.online_client) ' ../config/config.yaml >"${CONFIG_DIR}/config.yaml" + + # pin the branch to avoid having to query the last version repeatedly. + # this means the fixture could possibly go stale (i.e. use the wrong branch) if a new version is released, + # but that shouldn't impact the tests anyway. + + HUB_BRANCH=$("$CSCLI" hub branch 2>/dev/null) + export HUB_BRANCH + + echo "Setting up tests with hub branch $HUB_BRANCH" + + # need a working config, so we do it as a separate step. + + yq -i e '.cscli.hub_branch=strenv(HUB_BRANCH)' "${CONFIG_DIR}/config.yaml" } make_init_data() { From 3843213d5cf323c7ace1c86a5d7a9cfb6e95571a Mon Sep 17 00:00:00 2001 From: blotus Date: Fri, 21 Mar 2025 14:30:27 +0100 Subject: [PATCH 470/581] use replace for coraza instead of renaming the entire package (#3530) --- .golangci.yml | 1 + go.mod | 23 ++++++------ go.sum | 36 +++++++++---------- .../modules/appsec/appsec_runner.go | 4 +-- .../modules/appsec/bodyprocessors/raw.go | 4 +-- pkg/acquisition/modules/appsec/rx_operator.go | 4 +-- pkg/acquisition/modules/appsec/utils.go | 4 +-- pkg/appsec/coraza_logger.go | 2 +- pkg/appsec/tx.go | 8 ++--- 9 files changed, 44 insertions(+), 42 deletions(-) diff --git a/.golangci.yml b/.golangci.yml index e8251d697f3..afc9f9f421c 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -46,6 +46,7 @@ linters-settings: gomoddirectives: replace-allow-list: - golang.org/x/time/rate + - github.com/corazawaf/coraza/v3 govet: enable-all: true diff --git a/go.mod b/go.mod index c136c6485aa..e1b686cd995 100644 --- a/go.mod +++ b/go.mod @@ -23,12 +23,11 @@ require ( github.com/corazawaf/libinjection-go v0.2.2 github.com/coreos/go-systemd/v22 v22.5.0 // indirect github.com/creack/pty v1.1.21 // indirect - github.com/crowdsecurity/coraza/v3 v3.0.0-20250121111732-9b0043b679d7 github.com/crowdsecurity/dlog v0.0.0-20170105205344-4fb5f8204f26 github.com/crowdsecurity/go-cs-lib v0.0.16 github.com/crowdsecurity/grokky v0.2.2 github.com/crowdsecurity/machineid v1.0.2 - github.com/davecgh/go-spew v1.1.1 + github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc github.com/dghubble/sling v1.4.2 github.com/distribution/reference v0.6.0 // indirect github.com/docker/docker v27.3.1+incompatible @@ -99,12 +98,12 @@ require ( go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.28.0 // indirect go.opentelemetry.io/otel/sdk v1.28.0 // indirect go.opentelemetry.io/otel/trace v1.28.0 // indirect - golang.org/x/crypto v0.32.0 + golang.org/x/crypto v0.36.0 golang.org/x/mod v0.23.0 - golang.org/x/net v0.34.0 // indirect - golang.org/x/sync v0.11.0 // indirect - golang.org/x/sys v0.30.0 - golang.org/x/text v0.21.0 + golang.org/x/net v0.37.0 // indirect + golang.org/x/sync v0.12.0 + golang.org/x/sys v0.31.0 + golang.org/x/text v0.23.0 golang.org/x/time v0.6.0 // indirect google.golang.org/grpc v1.67.1 google.golang.org/protobuf v1.36.3 @@ -117,6 +116,8 @@ require ( ) +require github.com/corazawaf/coraza/v3 v3.3.2 + require ( ariga.io/atlas v0.31.1-0.20250212144724-069be8033e83 // indirect github.com/Masterminds/goutils v1.1.1 // indirect @@ -130,7 +131,6 @@ require ( github.com/bytedance/sonic/loader v0.2.1 // indirect github.com/cloudwego/base64x v0.1.4 // indirect github.com/cloudwego/iasm v0.2.0 // indirect - github.com/corazawaf/coraza-coreruleset v0.0.0-20240226094324-415b1017abdc // indirect github.com/cpuguy83/go-md2man/v2 v2.0.4 // indirect github.com/felixge/httpsnoop v1.0.4 // indirect github.com/gabriel-vasile/mimetype v1.4.7 // indirect @@ -164,7 +164,6 @@ require ( github.com/jackc/pgproto3/v2 v2.3.3 // indirect github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a // indirect github.com/jackc/pgtype v1.14.0 // indirect - github.com/jcchavezs/mergefs v0.1.0 // indirect github.com/jmespath/go-jmespath v0.4.0 // indirect github.com/josharian/intern v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect @@ -188,7 +187,7 @@ require ( github.com/pelletier/go-toml/v2 v2.2.3 // indirect github.com/petar-dambovaliev/aho-corasick v0.0.0-20240411101913-e07a1f0e8eb4 // indirect github.com/pierrec/lz4/v4 v4.1.18 // indirect - github.com/pmezard/go-difflib v1.0.0 // indirect + github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c // indirect github.com/prometheus/common v0.44.0 // indirect github.com/prometheus/procfs v0.15.1 // indirect @@ -219,7 +218,7 @@ require ( go.opentelemetry.io/otel/metric v1.28.0 // indirect go.uber.org/atomic v1.10.0 // indirect golang.org/x/arch v0.12.0 // indirect - golang.org/x/term v0.28.0 // indirect + golang.org/x/term v0.30.0 // indirect golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 // indirect google.golang.org/appengine v1.6.8 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20240814211410-ddb44dafa142 // indirect @@ -235,3 +234,5 @@ require ( ) replace golang.org/x/time/rate => github.com/crowdsecurity/crowdsec/pkg/time/rate v0.0.0 + +replace github.com/corazawaf/coraza/v3 => github.com/crowdsecurity/coraza/v3 v3.0.0-20250320231801-749b8bded21a diff --git a/go.sum b/go.sum index d618d71c8d2..c410ad70450 100644 --- a/go.sum +++ b/go.sum @@ -93,8 +93,6 @@ github.com/containerd/log v0.1.0 h1:TCJt7ioM2cr/tfR8GPbGf9/VRAX8D2B4PjzCpfX540I= github.com/containerd/log v0.1.0/go.mod h1:VRRf09a7mHDIRezVKTRCrOq78v577GXq3bSa3EhrzVo= github.com/corazawaf/coraza-coreruleset v0.0.0-20240226094324-415b1017abdc h1:OlJhrgI3I+FLUCTI3JJW8MoqyM78WbqJjecqMnqG+wc= github.com/corazawaf/coraza-coreruleset v0.0.0-20240226094324-415b1017abdc/go.mod h1:7rsocqNDkTCira5T0M7buoKR2ehh7YZiPkzxRuAgvVU= -github.com/corazawaf/coraza/v3 v3.3.2 h1:eG1HPLySTR9lND6y6fPOajubwbuHRF6aXCsCtxyqKTY= -github.com/corazawaf/coraza/v3 v3.3.2/go.mod h1:4EqMZkRoil11FnResCT/2JIg61dH+6D7F48VG8SVzuA= github.com/corazawaf/libinjection-go v0.2.2 h1:Chzodvb6+NXh6wew5/yhD0Ggioif9ACrQGR4qjTCs1g= github.com/corazawaf/libinjection-go v0.2.2/go.mod h1:OP4TM7xdJ2skyXqNX1AN1wN5nNZEmJNuWbNPOItn7aw= github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= @@ -109,8 +107,8 @@ github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ3 github.com/creack/pty v1.1.17/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4= github.com/creack/pty v1.1.21 h1:1/QdRyBaHHJP61QkWMXlOIBfsgdDeeKfK8SYVUWJKf0= github.com/creack/pty v1.1.21/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4= -github.com/crowdsecurity/coraza/v3 v3.0.0-20250121111732-9b0043b679d7 h1:nIwAjapWmiQD3W/uAWYE3z+DC5Coy/zTyPBCJ379fAw= -github.com/crowdsecurity/coraza/v3 v3.0.0-20250121111732-9b0043b679d7/go.mod h1:A+uciRXu+yhZcHMtM052bSM6vyJsMMU37NJN+tVoGqo= +github.com/crowdsecurity/coraza/v3 v3.0.0-20250320231801-749b8bded21a h1:2Nyr+47Y/K68wohQWCrE7jKRIOpp6hJ29XCEQO3FhOw= +github.com/crowdsecurity/coraza/v3 v3.0.0-20250320231801-749b8bded21a/go.mod h1:xSaXWOhFMSbrV8qOOfBKAyw3aOqfwaSaOy5BgSF8XlA= github.com/crowdsecurity/dlog v0.0.0-20170105205344-4fb5f8204f26 h1:r97WNVC30Uen+7WnLs4xDScS/Ex988+id2k6mDf8psU= github.com/crowdsecurity/dlog v0.0.0-20170105205344-4fb5f8204f26/go.mod h1:zpv7r+7KXwgVUZnUNjyP22zc/D7LKjyoY02weH2RBbk= github.com/crowdsecurity/go-cs-lib v0.0.16 h1:2/htodjwc/sfsv4deX8F/2Fzg1bOI8w3O1/BPSvvsB0= @@ -121,8 +119,9 @@ github.com/crowdsecurity/machineid v1.0.2 h1:wpkpsUghJF8Khtmn/tg6GxgdhLA1Xflerh5 github.com/crowdsecurity/machineid v1.0.2/go.mod h1:XWUSlnS0R0+u/JK5ulidwlbceNT3ZOCKteoVQEn6Luo= github.com/davecgh/go-spew v0.0.0-20161028175848-04cdfd42973b/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/dghubble/sling v1.4.2 h1:vs1HIGBbSl2SEALyU+irpYFLZMfc49Fp+jYryFebQjM= github.com/dghubble/sling v1.4.2/go.mod h1:o0arCOz0HwfqYQJLrRtqunaWOn4X6jxE/6ORKRpVTD4= github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5QvfrDyIgxBk= @@ -588,8 +587,9 @@ github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINE github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v0.0.0-20151028094244-d8ed2627bdf0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c h1:ncq/mPwQF4JjgDlrVEn3C11VoGHZN7m8qihwgMEtzYw= github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= @@ -804,8 +804,8 @@ golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97/go.mod h1:GvvjBRRGRdwPK5y golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.3.0/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4= golang.org/x/crypto v0.14.0/go.mod h1:MVFd36DqK4CsrnJYDkBA3VC4m2GkXAM0PvzMCn4JQf4= -golang.org/x/crypto v0.32.0 h1:euUpcYgM8WcP71gNpTqQCn6rC2t6ULUPiOzfWaXVVfc= -golang.org/x/crypto v0.32.0/go.mod h1:ZnnJkOaASj8g0AjIduWNlq2NRxL0PlBrbKVyZ6V/Ugc= +golang.org/x/crypto v0.36.0 h1:AnAEvhDddvBdpY+uR+MyHmuZzzNqXSe/GvuDeob5L34= +golang.org/x/crypto v0.36.0/go.mod h1:Y4J0ReaxCR1IMaabaSMugxJES1EpwhBHhv2bDHklZvc= golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= @@ -837,8 +837,8 @@ golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY= golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE= -golang.org/x/net v0.34.0 h1:Mb7Mrk043xzHgnRM88suvJFwzVrRfHEHJEl5/71CKw0= -golang.org/x/net v0.34.0/go.mod h1:di0qlW3YNM5oh6GqDGQr92MyTozJPmybPK4Ev/Gm31k= +golang.org/x/net v0.37.0 h1:1zLorHbz+LYj7MQlSf1+2tPIIgibq2eL5xkrGk6f+2c= +golang.org/x/net v0.37.0/go.mod h1:ivrbrMbzFq5J41QOQh0siUuly180yBYtLp+CKbEaFx8= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -848,8 +848,8 @@ golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.11.0 h1:GGz8+XQP4FvTTrjZPzNKTMFtSXH80RAzG+5ghFPgK9w= -golang.org/x/sync v0.11.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.12.0 h1:MHc5BpPuC30uJk597Ri8TV3CNZcTLu6B6z4lJy+g6Jw= +golang.org/x/sync v0.12.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -885,8 +885,8 @@ golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.30.0 h1:QjkSwP/36a20jFYWkSue1YwXzLmsV5Gfq7Eiy72C1uc= -golang.org/x/sys v0.30.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.31.0 h1:ioabZlmFYtWhL+TRYpcnNlLwhyxaM9kWTDEmfnprqik= +golang.org/x/sys v0.31.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= @@ -894,8 +894,8 @@ golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= golang.org/x/term v0.13.0/go.mod h1:LTmsnFJwVN6bCy1rVCoS+qHT1HhALEFxKncY3WNNh4U= -golang.org/x/term v0.28.0 h1:/Ts8HFuMR2E6IP/jlo7QVLZHggjKQbhu/7H0LJFr3Gg= -golang.org/x/term v0.28.0/go.mod h1:Sw/lC2IAUZ92udQNf3WodGtn4k/XoLyZoh8v/8uiwek= +golang.org/x/term v0.30.0 h1:PQ39fJZ+mfadBm0y5WlL4vlM7Sx1Hgf13sMIY2+QS9Y= +golang.org/x/term v0.30.0/go.mod h1:NYYFdzHoI5wRh/h5tDMdMqCqPJZEuNqVR5xJLd/n67g= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= @@ -908,8 +908,8 @@ golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= -golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo= -golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ= +golang.org/x/text v0.23.0 h1:D71I7dUrlY+VX0gQShAThNGHFxZ13dGLBHQLVl1mJlY= +golang.org/x/text v0.23.0/go.mod h1:/BLNzu4aZCJ1+kcD0DNRotWKage4q2rGVAg4o22unh4= golang.org/x/time v0.6.0 h1:eTDhh4ZXt5Qf0augr54TN6suAUudPcawVZeIAPU7D4U= golang.org/x/time v0.6.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= diff --git a/pkg/acquisition/modules/appsec/appsec_runner.go b/pkg/acquisition/modules/appsec/appsec_runner.go index b339c4ad532..1d37781aee2 100644 --- a/pkg/acquisition/modules/appsec/appsec_runner.go +++ b/pkg/acquisition/modules/appsec/appsec_runner.go @@ -11,8 +11,8 @@ import ( log "github.com/sirupsen/logrus" "gopkg.in/tomb.v2" - "github.com/crowdsecurity/coraza/v3" - corazatypes "github.com/crowdsecurity/coraza/v3/types" + "github.com/corazawaf/coraza/v3" + corazatypes "github.com/corazawaf/coraza/v3/types" // load body processors via init() _ "github.com/crowdsecurity/crowdsec/pkg/acquisition/modules/appsec/bodyprocessors" diff --git a/pkg/acquisition/modules/appsec/bodyprocessors/raw.go b/pkg/acquisition/modules/appsec/bodyprocessors/raw.go index aa467ecf048..b175f09ad80 100644 --- a/pkg/acquisition/modules/appsec/bodyprocessors/raw.go +++ b/pkg/acquisition/modules/appsec/bodyprocessors/raw.go @@ -5,8 +5,8 @@ import ( "strconv" "strings" - "github.com/crowdsecurity/coraza/v3/experimental/plugins" - "github.com/crowdsecurity/coraza/v3/experimental/plugins/plugintypes" + "github.com/corazawaf/coraza/v3/experimental/plugins" + "github.com/corazawaf/coraza/v3/experimental/plugins/plugintypes" ) type rawBodyProcessor struct{} diff --git a/pkg/acquisition/modules/appsec/rx_operator.go b/pkg/acquisition/modules/appsec/rx_operator.go index 4b16296fd40..edeacaa6525 100644 --- a/pkg/acquisition/modules/appsec/rx_operator.go +++ b/pkg/acquisition/modules/appsec/rx_operator.go @@ -8,8 +8,8 @@ import ( "github.com/wasilibs/go-re2" "github.com/wasilibs/go-re2/experimental" - "github.com/crowdsecurity/coraza/v3/experimental/plugins" - "github.com/crowdsecurity/coraza/v3/experimental/plugins/plugintypes" + "github.com/corazawaf/coraza/v3/experimental/plugins" + "github.com/corazawaf/coraza/v3/experimental/plugins/plugintypes" ) type rx struct { diff --git a/pkg/acquisition/modules/appsec/utils.go b/pkg/acquisition/modules/appsec/utils.go index fece953b0d6..0535a8f128a 100644 --- a/pkg/acquisition/modules/appsec/utils.go +++ b/pkg/acquisition/modules/appsec/utils.go @@ -11,8 +11,8 @@ import ( "github.com/prometheus/client_golang/prometheus" log "github.com/sirupsen/logrus" - "github.com/crowdsecurity/coraza/v3/collection" - "github.com/crowdsecurity/coraza/v3/types/variables" + "github.com/corazawaf/coraza/v3/collection" + "github.com/corazawaf/coraza/v3/types/variables" "github.com/crowdsecurity/go-cs-lib/ptr" "github.com/crowdsecurity/crowdsec/pkg/alertcontext" diff --git a/pkg/appsec/coraza_logger.go b/pkg/appsec/coraza_logger.go index 93e31be5876..204b31a735d 100644 --- a/pkg/appsec/coraza_logger.go +++ b/pkg/appsec/coraza_logger.go @@ -6,7 +6,7 @@ import ( log "github.com/sirupsen/logrus" - dbg "github.com/crowdsecurity/coraza/v3/debuglog" + dbg "github.com/corazawaf/coraza/v3/debuglog" ) var DebugRules = map[int]bool{} diff --git a/pkg/appsec/tx.go b/pkg/appsec/tx.go index afa21a53c37..56557b5f385 100644 --- a/pkg/appsec/tx.go +++ b/pkg/appsec/tx.go @@ -1,10 +1,10 @@ package appsec import ( - "github.com/crowdsecurity/coraza/v3" - "github.com/crowdsecurity/coraza/v3/experimental" - "github.com/crowdsecurity/coraza/v3/experimental/plugins/plugintypes" - "github.com/crowdsecurity/coraza/v3/types" + "github.com/corazawaf/coraza/v3" + "github.com/corazawaf/coraza/v3/experimental" + "github.com/corazawaf/coraza/v3/experimental/plugins/plugintypes" + "github.com/corazawaf/coraza/v3/types" ) type ExtendedTransaction struct { From 2641a6cc07b4a5e573c26140b98402b2db660831 Mon Sep 17 00:00:00 2001 From: blotus Date: Mon, 24 Mar 2025 15:05:53 +0100 Subject: [PATCH 471/581] only warn about capi_whitelists_path being deprecated if actually in use (#3535) --- pkg/apiserver/apic.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/apiserver/apic.go b/pkg/apiserver/apic.go index 821f6538169..2565292c12b 100644 --- a/pkg/apiserver/apic.go +++ b/pkg/apiserver/apic.go @@ -854,7 +854,7 @@ func (a *apic) ApplyApicWhitelists(ctx context.Context, decisions []*models.Deci log.Errorf("while getting allowlists content: %s", err) } - if a.whitelists != nil { + if a.whitelists != nil && (len(a.whitelists.Cidrs) > 0 || len(a.whitelists.Ips) > 0) { log.Warn("capi_whitelists_path is deprecated, please use centralized allowlists instead. See https://docs.crowdsec.net/docs/next/local_api/centralized_allowlists.") } From 9a3f94dc8f318d3ca52e5bf9493f17492af52310 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Mon, 24 Mar 2025 15:10:10 +0100 Subject: [PATCH 472/581] fix #3532 "reload causes crashing process" (#3534) --- pkg/apiclient/client.go | 4 ---- test/bats/01_crowdsec.bats | 22 ++++++++++++++++++++++ 2 files changed, 22 insertions(+), 4 deletions(-) diff --git a/pkg/apiclient/client.go b/pkg/apiclient/client.go index 4828a1844c3..9ea683c41bf 100644 --- a/pkg/apiclient/client.go +++ b/pkg/apiclient/client.go @@ -72,10 +72,6 @@ type service struct { } func InitLAPIClient(ctx context.Context, apiUrl string, papiUrl string, login string, password string, scenarios []string) error { - if lapiClient != nil { - return errors.New("client already initialized") - } - apiURL, err := url.Parse(apiUrl) if err != nil { return fmt.Errorf("parsing api url ('%s'): %w", apiURL, err) diff --git a/test/bats/01_crowdsec.bats b/test/bats/01_crowdsec.bats index 2d2807b3980..fb7d3f16619 100644 --- a/test/bats/01_crowdsec.bats +++ b/test/bats/01_crowdsec.bats @@ -124,7 +124,29 @@ teardown() { assert_stderr --partial "api server init: unable to run local API: controller init: CS_LAPI_SECRET not strong enough" } +@test "crowdsec - reload" { + # we test that reload works as intended with the agent enabled + + logfile="$(config_get '.common.log_dir')/crowdsec.log" + + rune -0 truncate -s0 "$logfile" + + rune -0 ./instance-crowdsec start-pid + PID="$output" + + sleep .5 + rune -0 kill -HUP "$PID" + + sleep 5 + rune -0 ps "$PID" + + assert_file_contains "$logfile" "Reload is finished" +} + @test "crowdsec - reload (change of logfile, disabled agent)" { + # we test that reload works as intended with the agent disabled + # and that we can change the log configuration + logdir1=$(TMPDIR="$BATS_TEST_TMPDIR" mktemp -u) log_old="${logdir1}/crowdsec.log" config_set ".common.log_dir=\"${logdir1}\"" From f735457ca4bc262b93b0d4a9366379b769458cf1 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Mon, 24 Mar 2025 15:13:24 +0100 Subject: [PATCH 473/581] revert ActionPlan info/warning to StandardLogger (#3536) --- pkg/hubops/disable.go | 4 ++- pkg/hubops/download.go | 8 ++--- pkg/hubops/plan.go | 9 ------ test/bats/20_hub.bats | 3 +- test/bats/20_hub_items.bats | 10 +++---- test/bats/cscli-hubtype-install.bats | 20 +++++-------- test/bats/cscli-hubtype-upgrade.bats | 6 ++-- test/lib/bats-assert | 2 +- test/lib/setup_file.sh | 44 ---------------------------- 9 files changed, 25 insertions(+), 81 deletions(-) diff --git a/pkg/hubops/disable.go b/pkg/hubops/disable.go index 5ac959319cd..2797cdfacaf 100644 --- a/pkg/hubops/disable.go +++ b/pkg/hubops/disable.go @@ -5,6 +5,8 @@ import ( "fmt" "os" + log "github.com/sirupsen/logrus" + "github.com/crowdsecurity/crowdsec/pkg/cwhub" ) @@ -53,7 +55,7 @@ func (c *DisableCommand) Prepare(plan *ActionPlan) (bool, error) { i := c.Item if i.State.IsLocal() { - plan.Warning(i.FQName() + " is a local item, please delete manually") + log.Warnf("%s is a local item, please delete manually", i.FQName()) return false, nil } diff --git a/pkg/hubops/download.go b/pkg/hubops/download.go index c4ee4e5b017..fab514fcdfd 100644 --- a/pkg/hubops/download.go +++ b/pkg/hubops/download.go @@ -10,7 +10,7 @@ import ( "time" "github.com/fatih/color" - "github.com/sirupsen/logrus" + log "github.com/sirupsen/logrus" "gopkg.in/yaml.v3" "github.com/crowdsecurity/go-cs-lib/downloader" @@ -38,13 +38,13 @@ func (c *DownloadCommand) Prepare(plan *ActionPlan) (bool, error) { i := c.Item if i.State.IsLocal() { - plan.Info(i.FQName() + " - not downloading local item") + log.Infof("%s - not downloading local item", i.FQName()) return false, nil } // XXX: if it's tainted do we upgrade the dependencies anyway? if i.State.Tainted && !c.Force { - plan.Warning(i.FQName() + " is tainted, use '--force' to overwrite") + log.Warnf("%s is tainted, use '--force' to overwrite", i.FQName()) return false, nil } @@ -132,7 +132,7 @@ func downloadDataSet(ctx context.Context, dataFolder string, force bool, reader BeforeRequest(func(req *http.Request) { fmt.Printf("downloading %s\n", req.URL) }). - WithLogger(logrus.WithField("url", dataS.SourceURL)) + WithLogger(log.WithField("url", dataS.SourceURL)) if !force { d = d.WithLastModified(). diff --git a/pkg/hubops/plan.go b/pkg/hubops/plan.go index b1691e71183..7b2260f64c4 100644 --- a/pkg/hubops/plan.go +++ b/pkg/hubops/plan.go @@ -10,7 +10,6 @@ import ( "strings" "github.com/AlecAivazis/survey/v2" - "github.com/fatih/color" "github.com/crowdsecurity/go-cs-lib/slicetools" @@ -96,14 +95,6 @@ func (p *ActionPlan) AddCommand(c Command) error { return nil } -func (p *ActionPlan) Info(msg string) { - fmt.Println(msg) -} - -func (p *ActionPlan) Warning(msg string) { - fmt.Printf("%s %s\n", color.YellowString("WARN"), msg) -} - // Description returns a string representation of the action plan. // If verbose is false, the operations are grouped by item type and operation type. // If verbose is true, they are listed as they appear in the command slice. diff --git a/test/bats/20_hub.bats b/test/bats/20_hub.bats index ae7da4dcd9f..52bed6c9f78 100644 --- a/test/bats/20_hub.bats +++ b/test/bats/20_hub.bats @@ -172,8 +172,9 @@ teardown() { mkdir -p "$CONFIG_DIR/collections" touch "$CONFIG_DIR/collections/foo.yaml" rune -0 cscli hub upgrade + assert_stderr --partial 'collections:foo.yaml - not downloading local item' + assert_output - <<-EOT - collections:foo.yaml - not downloading local item Action plan: 🔄 check & update data files EOT diff --git a/test/bats/20_hub_items.bats b/test/bats/20_hub_items.bats index 62162577ae7..b3f929e1947 100644 --- a/test/bats/20_hub_items.bats +++ b/test/bats/20_hub_items.bats @@ -155,29 +155,29 @@ teardown() { rune -0 mkdir -p "$CONFIG_DIR/scenarios" rune -0 touch "$CONFIG_DIR/scenarios/foobar.yaml" rune -0 cscli scenarios remove foobar.yaml + assert_stderr --partial 'scenarios:foobar.yaml is a local item, please delete manually' assert_output - <<-EOT - WARN scenarios:foobar.yaml is a local item, please delete manually Nothing to do. EOT rune -0 cscli scenarios remove foobar.yaml --purge + assert_stderr --partial 'scenarios:foobar.yaml is a local item, please delete manually' assert_output - <<-EOT - WARN scenarios:foobar.yaml is a local item, please delete manually Nothing to do. EOT rune -0 cscli scenarios remove foobar.yaml --force + assert_stderr --partial 'scenarios:foobar.yaml is a local item, please delete manually' assert_output - <<-EOT - WARN scenarios:foobar.yaml is a local item, please delete manually Nothing to do. EOT rune -0 cscli scenarios install crowdsecurity/ssh-bf rune -0 cscli scenarios remove --all - assert_line "WARN scenarios:foobar.yaml is a local item, please delete manually" assert_line "disabling scenarios:crowdsecurity/ssh-bf" + assert_stderr --partial "scenarios:foobar.yaml is a local item, please delete manually" rune -0 cscli scenarios remove --all --purge - assert_line "WARN scenarios:foobar.yaml is a local item, please delete manually" + assert_stderr --partial "scenarios:foobar.yaml is a local item, please delete manually" assert_line "purging scenarios:crowdsecurity/ssh-bf" } diff --git a/test/bats/cscli-hubtype-install.bats b/test/bats/cscli-hubtype-install.bats index f4243f6fa6a..219ccd04e0a 100644 --- a/test/bats/cscli-hubtype-install.bats +++ b/test/bats/cscli-hubtype-install.bats @@ -166,18 +166,16 @@ get_latest_version() { rune -0 cscli parsers install crowdsecurity/whitelists --dry-run assert_output - --stderr <<-EOT - WARN parsers:crowdsecurity/whitelists is tainted, use '--force' to overwrite Nothing to do. EOT - refute_stderr + assert_stderr --partial "parsers:crowdsecurity/whitelists is tainted, use '--force' to overwrite" # XXX should this fail with status 1 instead? rune -0 cscli parsers install crowdsecurity/whitelists assert_output - <<-EOT - WARN parsers:crowdsecurity/whitelists is tainted, use '--force' to overwrite Nothing to do. EOT - refute_stderr + assert_stderr --partial "parsers:crowdsecurity/whitelists is tainted, use '--force' to overwrite" rune -0 cscli parsers install crowdsecurity/whitelists --force latest_whitelists=$(get_latest_version parsers crowdsecurity/whitelists) @@ -230,17 +228,14 @@ get_latest_version() { # and maybe re-evaluate the --ignore flag rune -0 cscli parsers install crowdsecurity/whitelists --ignore assert_output - <<-EOT - WARN parsers:crowdsecurity/whitelists is tainted, use '--force' to overwrite Nothing to do. EOT - refute_stderr + assert_stderr --partial "parsers:crowdsecurity/whitelists is tainted, use '--force' to overwrite" # error on one item, should still install the others rune -0 cscli parsers install crowdsecurity/whitelists crowdsecurity/pgsql-logs --ignore - refute_stderr latest_pgsql=$(get_latest_version parsers crowdsecurity/pgsql-logs) assert_output - <<-EOT - WARN parsers:crowdsecurity/whitelists is tainted, use '--force' to overwrite Action plan: 📥 download parsers: crowdsecurity/pgsql-logs ($latest_pgsql) @@ -252,6 +247,7 @@ get_latest_version() { $RELOAD_MESSAGE EOT + assert_stderr --partial "parsers:crowdsecurity/whitelists is tainted, use '--force' to overwrite" rune -0 cscli parsers inspect crowdsecurity/pgsql-logs --no-metrics -o json rune -0 jq -e '.installed==true' <(output) } @@ -268,14 +264,14 @@ get_latest_version() { # attempt to install from hub rune -0 cscli parsers install crowdsecurity/sshd-logs - assert_line 'parsers:crowdsecurity/sshd-logs - not downloading local item' + assert_stderr --partial 'parsers:crowdsecurity/sshd-logs - not downloading local item' rune -0 cscli parsers list -o json rune -0 jq -c '.parsers[] | [.name,.status]' <(output) assert_json '["crowdsecurity/sshd-logs","enabled,local"]' # attempt to install from a collection rune -0 cscli collections install crowdsecurity/sshd - assert_line 'parsers:crowdsecurity/sshd-logs - not downloading local item' + assert_stderr --partial 'parsers:crowdsecurity/sshd-logs - not downloading local item' # verify it installed the rest of the collection assert_line 'enabling contexts:crowdsecurity/bf_base' @@ -293,11 +289,11 @@ get_latest_version() { # attempt to install from hub rune -0 cscli parsers install crowdsecurity/sshd-logs - assert_line 'parsers:crowdsecurity/sshd-logs - not downloading local item' + assert_stderr --partial 'parsers:crowdsecurity/sshd-logs - not downloading local item' # attempt to install from a collection rune -0 cscli collections install crowdsecurity/sshd - assert_line 'parsers:crowdsecurity/sshd-logs - not downloading local item' + assert_stderr --partial 'parsers:crowdsecurity/sshd-logs - not downloading local item' # verify it installed the rest of the collection assert_line 'enabling contexts:crowdsecurity/bf_base' diff --git a/test/bats/cscli-hubtype-upgrade.bats b/test/bats/cscli-hubtype-upgrade.bats index 5dd789f649e..c0bbe55b01b 100644 --- a/test/bats/cscli-hubtype-upgrade.bats +++ b/test/bats/cscli-hubtype-upgrade.bats @@ -165,17 +165,15 @@ get_latest_version() { rune -0 cscli parsers upgrade crowdsecurity/whitelists --dry-run assert_output - <<-EOT - WARN parsers:crowdsecurity/whitelists is tainted, use '--force' to overwrite Nothing to do. EOT - refute_stderr + assert_stderr --partial "parsers:crowdsecurity/whitelists is tainted, use '--force' to overwrite" rune -0 cscli parsers upgrade crowdsecurity/whitelists assert_output - <<-EOT - WARN parsers:crowdsecurity/whitelists is tainted, use '--force' to overwrite Nothing to do. EOT - refute_stderr + assert_stderr --partial "parsers:crowdsecurity/whitelists is tainted, use '--force' to overwrite" latest_whitelists=$(get_latest_version parsers crowdsecurity/whitelists) diff --git a/test/lib/bats-assert b/test/lib/bats-assert index 44913ffe602..b93143a1bfb 160000 --- a/test/lib/bats-assert +++ b/test/lib/bats-assert @@ -1 +1 @@ -Subproject commit 44913ffe6020d1561c4c4d1e26cda8e07a1f374f +Subproject commit b93143a1bfbde41d9b7343aab0d36f3ef6549e6b diff --git a/test/lib/setup_file.sh b/test/lib/setup_file.sh index 902edc5de82..ad6962076f3 100755 --- a/test/lib/setup_file.sh +++ b/test/lib/setup_file.sh @@ -200,50 +200,6 @@ is_stdin_empty() { } export -f is_stdin_empty -assert_stderr() { - # it is never useful to call this without arguments - if [[ "$#" -eq 0 ]]; then - # maybe the caller forgot to use '-' with an heredoc - if ! is_stdin_empty; then - fail "${FUNCNAME[0]}: called with stdin and no arguments (heredoc?)" - fi - fail "${FUNCNAME[0]}: called with no arguments" - fi - - local oldout="${output}" - run -0 echo "${stderr}" - assert_output "$@" - output="${oldout}" -} -export -f assert_stderr - -# like refute_output, but for stderr -refute_stderr() { - # calling this without arguments is ok, as long as stdin in empty - if ! is_stdin_empty; then - fail "${FUNCNAME[0]}: called with stdin (heredoc?)" - fi - - local oldout="${output}" - run -0 echo "${stderr}" - refute_output "$@" - output="${oldout}" -} -export -f refute_stderr - -# like assert_output, but for stderr -assert_stderr_line() { - if [[ "$#" -eq 0 ]]; then - fail "${FUNCNAME[0]}: called with no arguments" - fi - - local oldout="${output}" - run -0 echo "${stderr}" - assert_line "$@" - output="${oldout}" -} -export -f assert_stderr_line - # remove all installed items and data hub_purge_all() { local CONFIG_DIR From a7184754226f3dbe947d2469de7a2228c9d76484 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Mon, 24 Mar 2025 15:15:43 +0100 Subject: [PATCH 474/581] CI: enable linter "containedctx" (#3529) --- .golangci.yml | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/.golangci.yml b/.golangci.yml index afc9f9f421c..b106f4d1991 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -280,7 +280,6 @@ linters: # Recommended? (requires some work) # - - containedctx # containedctx is a linter that detects struct contained context.Context field - errname # Checks that sentinel errors are prefixed with the `Err` and error types are suffixed with the `Error`. - ireturn # Accept Interfaces, Return Concrete Types - mnd # An analyzer to detect magic numbers. @@ -503,3 +502,8 @@ issues: - usetesting path: "pkg/apiserver/(.+)_test.go" text: "os.CreateTemp.* could be replaced by os.CreateTemp.*" + + - linters: + - containedctx + path: "cmd/notification-file/main.go" + text: "found a struct that contains a context.Context field" From c245b1e6f8bd5dd8a9fae454cb767f027b6899c5 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Mon, 24 Mar 2025 15:16:40 +0100 Subject: [PATCH 475/581] CI: enable linter "noctx" (#3528) * CI: enable linter "noctx" * rename NewRequestWithContext() -> PrepareRequest() --- .golangci.yml | 1 - pkg/acquisition/modules/http/http_test.go | 30 ++++++++++++++++++---- pkg/apiclient/alerts_service.go | 10 ++++---- pkg/apiclient/allowlists_service.go | 8 +++--- pkg/apiclient/auth_service.go | 8 +++--- pkg/apiclient/client_http.go | 2 +- pkg/apiclient/decisions_service.go | 14 +++++----- pkg/apiclient/decisions_sync_service.go | 2 +- pkg/apiclient/heartbeat.go | 2 +- pkg/apiclient/metrics.go | 2 +- pkg/apiclient/signal.go | 2 +- pkg/apiclient/usagemetrics.go | 2 +- pkg/apiserver/apic.go | 8 +++++- pkg/appsec/ja4h/ja4h_test.go | 31 ++++++++++++++--------- 14 files changed, 77 insertions(+), 45 deletions(-) diff --git a/.golangci.yml b/.golangci.yml index b106f4d1991..675c3b24b78 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -284,7 +284,6 @@ linters: - ireturn # Accept Interfaces, Return Concrete Types - mnd # An analyzer to detect magic numbers. - nilnil # Checks that there is no simultaneous return of `nil` error and an invalid value. - - noctx # Finds sending http request without context.Context - unparam # Reports unused function parameters # diff --git a/pkg/acquisition/modules/http/http_test.go b/pkg/acquisition/modules/http/http_test.go index 6422498b543..552fe90e387 100644 --- a/pkg/acquisition/modules/http/http_test.go +++ b/pkg/acquisition/modules/http/http_test.go @@ -254,7 +254,12 @@ basic_auth: time.Sleep(1 * time.Second) - res, err := http.Get(fmt.Sprintf("%s/test", testHTTPServerAddr)) + ctx := t.Context() + + req, err := http.NewRequestWithContext(ctx, http.MethodGet, testHTTPServerAddr + "/test", http.NoBody) + require.NoError(t, err) + + res, err := http.DefaultClient.Do(req) require.NoError(t, err) assert.Equal(t, http.StatusMethodNotAllowed, res.StatusCode) @@ -265,6 +270,8 @@ basic_auth: } func TestStreamingAcquisitionUnknownPath(t *testing.T) { + ctx := t.Context() + h := &HTTPSource{} _, _, tomb := SetupAndRunHTTPSource(t, h, []byte(` source: http @@ -277,7 +284,10 @@ basic_auth: time.Sleep(1 * time.Second) - res, err := http.Get(fmt.Sprintf("%s/unknown", testHTTPServerAddr)) + req, err := http.NewRequestWithContext(ctx, http.MethodGet, testHTTPServerAddr + "/unknown", http.NoBody) + require.NoError(t, err) + + res, err := http.DefaultClient.Do(req) require.NoError(t, err) assert.Equal(t, http.StatusNotFound, res.StatusCode) @@ -303,11 +313,15 @@ basic_auth: client := &http.Client{} - resp, err := http.Post(fmt.Sprintf("%s/test", testHTTPServerAddr), "application/json", strings.NewReader("test")) + req, err := http.NewRequestWithContext(ctx, http.MethodPost, testHTTPServerAddr + "/test", strings.NewReader("test")) + require.NoError(t, err) + req.Header.Set("Content-Type", "application/json") + + resp, err := http.DefaultClient.Do(req) require.NoError(t, err) assert.Equal(t, http.StatusUnauthorized, resp.StatusCode) - req, err := http.NewRequestWithContext(ctx, http.MethodPost, fmt.Sprintf("%s/test", testHTTPServerAddr), strings.NewReader("test")) + req, err = http.NewRequestWithContext(ctx, http.MethodPost, testHTTPServerAddr + "/test", strings.NewReader("test")) require.NoError(t, err) req.SetBasicAuth("test", "WrongPassword") @@ -553,6 +567,8 @@ timeout: 1s`), 0) } func TestStreamingAcquisitionTLSHTTPRequest(t *testing.T) { + ctx := t.Context() + h := &HTTPSource{} _, _, tomb := SetupAndRunHTTPSource(t, h, []byte(` source: http @@ -566,7 +582,11 @@ tls: time.Sleep(1 * time.Second) - resp, err := http.Post(fmt.Sprintf("%s/test", testHTTPServerAddr), "application/json", strings.NewReader("test")) + req, err := http.NewRequestWithContext(ctx, http.MethodPost, testHTTPServerAddr + "/test", strings.NewReader("test")) + require.NoError(t, err) + req.Header.Set("Content-Type", "application/json") + + resp, err := http.DefaultClient.Do(req) require.NoError(t, err) assert.Equal(t, http.StatusBadRequest, resp.StatusCode) diff --git a/pkg/apiclient/alerts_service.go b/pkg/apiclient/alerts_service.go index 5869ecc3753..1f84862a811 100644 --- a/pkg/apiclient/alerts_service.go +++ b/pkg/apiclient/alerts_service.go @@ -49,7 +49,7 @@ type AlertsDeleteOpts struct { func (s *AlertsService) Add(ctx context.Context, alerts models.AddAlertsRequest) (*models.AddAlertsResponse, *Response, error) { u := fmt.Sprintf("%s/alerts", s.client.URLPrefix) - req, err := s.client.NewRequestWithContext(ctx, http.MethodPost, u, &alerts) + req, err := s.client.PrepareRequest(ctx, http.MethodPost, u, &alerts) if err != nil { return nil, nil, err } @@ -78,7 +78,7 @@ func (s *AlertsService) List(ctx context.Context, opts AlertsListOpts) (*models. URI = fmt.Sprintf("%s?%s", URI, params.Encode()) } - req, err := s.client.NewRequestWithContext(ctx, http.MethodGet, URI, nil) + req, err := s.client.PrepareRequest(ctx, http.MethodGet, URI, nil) if err != nil { return nil, nil, fmt.Errorf("building request: %w", err) } @@ -102,7 +102,7 @@ func (s *AlertsService) Delete(ctx context.Context, opts AlertsDeleteOpts) (*mod u := fmt.Sprintf("%s/alerts?%s", s.client.URLPrefix, params.Encode()) - req, err := s.client.NewRequestWithContext(ctx, http.MethodDelete, u, nil) + req, err := s.client.PrepareRequest(ctx, http.MethodDelete, u, nil) if err != nil { return nil, nil, err } @@ -120,7 +120,7 @@ func (s *AlertsService) Delete(ctx context.Context, opts AlertsDeleteOpts) (*mod func (s *AlertsService) DeleteOne(ctx context.Context, alertID string) (*models.DeleteAlertsResponse, *Response, error) { u := fmt.Sprintf("%s/alerts/%s", s.client.URLPrefix, alertID) - req, err := s.client.NewRequestWithContext(ctx, http.MethodDelete, u, nil) + req, err := s.client.PrepareRequest(ctx, http.MethodDelete, u, nil) if err != nil { return nil, nil, err } @@ -138,7 +138,7 @@ func (s *AlertsService) DeleteOne(ctx context.Context, alertID string) (*models. func (s *AlertsService) GetByID(ctx context.Context, alertID int) (*models.Alert, *Response, error) { u := fmt.Sprintf("%s/alerts/%d", s.client.URLPrefix, alertID) - req, err := s.client.NewRequestWithContext(ctx, http.MethodGet, u, nil) + req, err := s.client.PrepareRequest(ctx, http.MethodGet, u, nil) if err != nil { return nil, nil, err } diff --git a/pkg/apiclient/allowlists_service.go b/pkg/apiclient/allowlists_service.go index c81970d218d..ef1dc85da65 100644 --- a/pkg/apiclient/allowlists_service.go +++ b/pkg/apiclient/allowlists_service.go @@ -27,7 +27,7 @@ func (s *AllowlistsService) List(ctx context.Context, opts AllowlistListOpts) (* u += "?" + params.Encode() - req, err := s.client.NewRequestWithContext(ctx, http.MethodGet, u, nil) + req, err := s.client.PrepareRequest(ctx, http.MethodGet, u, nil) if err != nil { return nil, nil, err } @@ -58,7 +58,7 @@ func (s *AllowlistsService) Get(ctx context.Context, name string, opts Allowlist log.Debugf("GET %s", u) - req, err := s.client.NewRequestWithContext(ctx, http.MethodGet, u, nil) + req, err := s.client.PrepareRequest(ctx, http.MethodGet, u, nil) if err != nil { return nil, nil, err } @@ -76,7 +76,7 @@ func (s *AllowlistsService) Get(ctx context.Context, name string, opts Allowlist func (s *AllowlistsService) CheckIfAllowlisted(ctx context.Context, value string) (bool, *Response, error) { u := s.client.URLPrefix + "/allowlists/check/" + value - req, err := s.client.NewRequestWithContext(ctx, http.MethodHead, u, nil) + req, err := s.client.PrepareRequest(ctx, http.MethodHead, u, nil) if err != nil { return false, nil, err } @@ -94,7 +94,7 @@ func (s *AllowlistsService) CheckIfAllowlisted(ctx context.Context, value string func (s *AllowlistsService) CheckIfAllowlistedWithReason(ctx context.Context, value string) (*models.CheckAllowlistResponse, *Response, error) { u := s.client.URLPrefix + "/allowlists/check/" + value - req, err := s.client.NewRequestWithContext(ctx, http.MethodGet, u, nil) + req, err := s.client.PrepareRequest(ctx, http.MethodGet, u, nil) if err != nil { return nil, nil, err } diff --git a/pkg/apiclient/auth_service.go b/pkg/apiclient/auth_service.go index 47d3daaaaaa..1dfdeb23c5e 100644 --- a/pkg/apiclient/auth_service.go +++ b/pkg/apiclient/auth_service.go @@ -21,7 +21,7 @@ type enrollRequest struct { func (s *AuthService) UnregisterWatcher(ctx context.Context) (*Response, error) { u := fmt.Sprintf("%s/watchers", s.client.URLPrefix) - req, err := s.client.NewRequestWithContext(ctx, http.MethodDelete, u, nil) + req, err := s.client.PrepareRequest(ctx, http.MethodDelete, u, nil) if err != nil { return nil, err } @@ -37,7 +37,7 @@ func (s *AuthService) UnregisterWatcher(ctx context.Context) (*Response, error) func (s *AuthService) RegisterWatcher(ctx context.Context, registration models.WatcherRegistrationRequest) (*Response, error) { u := fmt.Sprintf("%s/watchers", s.client.URLPrefix) - req, err := s.client.NewRequestWithContext(ctx, http.MethodPost, u, ®istration) + req, err := s.client.PrepareRequest(ctx, http.MethodPost, u, ®istration) if err != nil { return nil, err } @@ -55,7 +55,7 @@ func (s *AuthService) AuthenticateWatcher(ctx context.Context, auth models.Watch u := fmt.Sprintf("%s/watchers/login", s.client.URLPrefix) - req, err := s.client.NewRequestWithContext(ctx, http.MethodPost, u, &auth) + req, err := s.client.PrepareRequest(ctx, http.MethodPost, u, &auth) if err != nil { return authResp, nil, err } @@ -71,7 +71,7 @@ func (s *AuthService) AuthenticateWatcher(ctx context.Context, auth models.Watch func (s *AuthService) EnrollWatcher(ctx context.Context, enrollKey string, name string, tags []string, overwrite bool) (*Response, error) { u := fmt.Sprintf("%s/watchers/enroll", s.client.URLPrefix) - req, err := s.client.NewRequestWithContext(ctx, http.MethodPost, u, &enrollRequest{EnrollKey: enrollKey, Name: name, Tags: tags, Overwrite: overwrite}) + req, err := s.client.PrepareRequest(ctx, http.MethodPost, u, &enrollRequest{EnrollKey: enrollKey, Name: name, Tags: tags, Overwrite: overwrite}) if err != nil { return nil, err } diff --git a/pkg/apiclient/client_http.go b/pkg/apiclient/client_http.go index a2b1ff1490f..cd35c9bb795 100644 --- a/pkg/apiclient/client_http.go +++ b/pkg/apiclient/client_http.go @@ -15,7 +15,7 @@ import ( log "github.com/sirupsen/logrus" ) -func (c *ApiClient) NewRequestWithContext(ctx context.Context, method, url string, body interface{}) (*http.Request, error) { +func (c *ApiClient) PrepareRequest(ctx context.Context, method, url string, body interface{}) (*http.Request, error) { if !strings.HasSuffix(c.BaseURL.Path, "/") { return nil, fmt.Errorf("BaseURL must have a trailing slash, but %q does not", c.BaseURL) } diff --git a/pkg/apiclient/decisions_service.go b/pkg/apiclient/decisions_service.go index 531b0cdac5b..a1810e831ec 100644 --- a/pkg/apiclient/decisions_service.go +++ b/pkg/apiclient/decisions_service.go @@ -81,7 +81,7 @@ func (s *DecisionsService) List(ctx context.Context, opts DecisionsListOpts) (*m u := fmt.Sprintf("%s/decisions?%s", s.client.URLPrefix, params.Encode()) - req, err := s.client.NewRequestWithContext(ctx, http.MethodGet, u, nil) + req, err := s.client.PrepareRequest(ctx, http.MethodGet, u, nil) if err != nil { return nil, nil, err } @@ -97,7 +97,7 @@ func (s *DecisionsService) List(ctx context.Context, opts DecisionsListOpts) (*m } func (s *DecisionsService) FetchV2Decisions(ctx context.Context, url string) (*models.DecisionsStreamResponse, *Response, error) { - req, err := s.client.NewRequestWithContext(ctx, http.MethodGet, url, nil) + req, err := s.client.PrepareRequest(ctx, http.MethodGet, url, nil) if err != nil { return nil, nil, err } @@ -138,7 +138,7 @@ func (s *DecisionsService) FetchV3Decisions(ctx context.Context, url string) (*m scenarioDeleted := "deleted" durationDeleted := "1h" - req, err := s.client.NewRequestWithContext(ctx, http.MethodGet, url, nil) + req, err := s.client.PrepareRequest(ctx, http.MethodGet, url, nil) if err != nil { return nil, nil, err } @@ -271,7 +271,7 @@ func (s *DecisionsService) GetStreamV3(ctx context.Context, opts DecisionsStream return nil, nil, err } - req, err := s.client.NewRequestWithContext(ctx, http.MethodGet, u, nil) + req, err := s.client.PrepareRequest(ctx, http.MethodGet, u, nil) if err != nil { return nil, nil, err } @@ -289,7 +289,7 @@ func (s *DecisionsService) GetStreamV3(ctx context.Context, opts DecisionsStream func (s *DecisionsService) StopStream(ctx context.Context) (*Response, error) { u := fmt.Sprintf("%s/decisions", s.client.URLPrefix) - req, err := s.client.NewRequestWithContext(ctx, http.MethodDelete, u, nil) + req, err := s.client.PrepareRequest(ctx, http.MethodDelete, u, nil) if err != nil { return nil, err } @@ -310,7 +310,7 @@ func (s *DecisionsService) Delete(ctx context.Context, opts DecisionsDeleteOpts) u := fmt.Sprintf("%s/decisions?%s", s.client.URLPrefix, params.Encode()) - req, err := s.client.NewRequestWithContext(ctx, http.MethodDelete, u, nil) + req, err := s.client.PrepareRequest(ctx, http.MethodDelete, u, nil) if err != nil { return nil, nil, err } @@ -328,7 +328,7 @@ func (s *DecisionsService) Delete(ctx context.Context, opts DecisionsDeleteOpts) func (s *DecisionsService) DeleteOne(ctx context.Context, decisionID string) (*models.DeleteDecisionResponse, *Response, error) { u := fmt.Sprintf("%s/decisions/%s", s.client.URLPrefix, decisionID) - req, err := s.client.NewRequestWithContext(ctx, http.MethodDelete, u, nil) + req, err := s.client.PrepareRequest(ctx, http.MethodDelete, u, nil) if err != nil { return nil, nil, err } diff --git a/pkg/apiclient/decisions_sync_service.go b/pkg/apiclient/decisions_sync_service.go index 1efe2d7c756..e578b6d42c6 100644 --- a/pkg/apiclient/decisions_sync_service.go +++ b/pkg/apiclient/decisions_sync_service.go @@ -16,7 +16,7 @@ type DecisionDeleteService service func (d *DecisionDeleteService) Add(ctx context.Context, deletedDecisions *models.DecisionsDeleteRequest) (interface{}, *Response, error) { u := fmt.Sprintf("%s/decisions/delete", d.client.URLPrefix) - req, err := d.client.NewRequestWithContext(ctx, http.MethodPost, u, &deletedDecisions) + req, err := d.client.PrepareRequest(ctx, http.MethodPost, u, &deletedDecisions) if err != nil { return nil, nil, fmt.Errorf("while building request: %w", err) } diff --git a/pkg/apiclient/heartbeat.go b/pkg/apiclient/heartbeat.go index 7a5fdfd6cc4..c2992706be2 100644 --- a/pkg/apiclient/heartbeat.go +++ b/pkg/apiclient/heartbeat.go @@ -17,7 +17,7 @@ type HeartBeatService service func (h *HeartBeatService) Ping(ctx context.Context) (bool, *Response, error) { u := fmt.Sprintf("%s/heartbeat", h.client.URLPrefix) - req, err := h.client.NewRequestWithContext(ctx, http.MethodGet, u, nil) + req, err := h.client.PrepareRequest(ctx, http.MethodGet, u, nil) if err != nil { return false, nil, err } diff --git a/pkg/apiclient/metrics.go b/pkg/apiclient/metrics.go index 1ae073e47a3..9599a3eff97 100644 --- a/pkg/apiclient/metrics.go +++ b/pkg/apiclient/metrics.go @@ -13,7 +13,7 @@ type MetricsService service func (s *MetricsService) Add(ctx context.Context, metrics *models.Metrics) (interface{}, *Response, error) { u := fmt.Sprintf("%s/metrics/", s.client.URLPrefix) - req, err := s.client.NewRequestWithContext(ctx, http.MethodPost, u, &metrics) + req, err := s.client.PrepareRequest(ctx, http.MethodPost, u, &metrics) if err != nil { return nil, nil, err } diff --git a/pkg/apiclient/signal.go b/pkg/apiclient/signal.go index 128af4e2566..5dacbaaaf17 100644 --- a/pkg/apiclient/signal.go +++ b/pkg/apiclient/signal.go @@ -15,7 +15,7 @@ type SignalService service func (s *SignalService) Add(ctx context.Context, signals *models.AddSignalsRequest) (interface{}, *Response, error) { u := fmt.Sprintf("%s/signals", s.client.URLPrefix) - req, err := s.client.NewRequestWithContext(ctx, http.MethodPost, u, &signals) + req, err := s.client.PrepareRequest(ctx, http.MethodPost, u, &signals) if err != nil { return nil, nil, fmt.Errorf("while building request: %w", err) } diff --git a/pkg/apiclient/usagemetrics.go b/pkg/apiclient/usagemetrics.go index 482987a7f7f..8c70d32d796 100644 --- a/pkg/apiclient/usagemetrics.go +++ b/pkg/apiclient/usagemetrics.go @@ -13,7 +13,7 @@ type UsageMetricsService service func (s *UsageMetricsService) Add(ctx context.Context, metrics *models.AllMetrics) (interface{}, *Response, error) { u := fmt.Sprintf("%s/usage-metrics", s.client.URLPrefix) - req, err := s.client.NewRequestWithContext(ctx, http.MethodPost, u, &metrics) + req, err := s.client.PrepareRequest(ctx, http.MethodPost, u, &metrics) if err != nil { return nil, nil, err } diff --git a/pkg/apiserver/apic.go b/pkg/apiserver/apic.go index 2565292c12b..4d863d11164 100644 --- a/pkg/apiserver/apic.go +++ b/pkg/apiserver/apic.go @@ -754,7 +754,13 @@ func (a *apic) UpdateAllowlists(ctx context.Context, allowlistsLinks []*modelsca description = *link.Description } - resp, err := defaultClient.GetClient().Get(*link.URL) + req, err := http.NewRequestWithContext(ctx, http.MethodGet, *link.URL, http.NoBody) + if err != nil { + log.Errorf("while pulling allowlist: %s", err) + continue + } + + resp, err := defaultClient.GetClient().Do(req) if err != nil { log.Errorf("while pulling allowlist: %s", err) continue diff --git a/pkg/appsec/ja4h/ja4h_test.go b/pkg/appsec/ja4h/ja4h_test.go index 434525b0c94..33e3c446038 100644 --- a/pkg/appsec/ja4h/ja4h_test.go +++ b/pkg/appsec/ja4h/ja4h_test.go @@ -21,6 +21,8 @@ For those hashes, the value used was the one returned by our code (because we de */ func TestJA4H_A(t *testing.T) { + ctx := t.Context() + tests := []struct { name string request func() *http.Request @@ -29,7 +31,7 @@ func TestJA4H_A(t *testing.T) { { name: "basic GET request - HTTP1.1 - no accept-language header", request: func() *http.Request { - req, _ := http.NewRequest(http.MethodGet, "http://example.com", http.NoBody) + req, _ := http.NewRequestWithContext(ctx, http.MethodGet, "http://example.com", http.NoBody) return req }, expectedResult: "ge11nn000000", @@ -37,7 +39,7 @@ func TestJA4H_A(t *testing.T) { { name: "basic GET request - HTTP1.1 - with accept-language header", request: func() *http.Request { - req, _ := http.NewRequest(http.MethodGet, "http://example.com", http.NoBody) + req, _ := http.NewRequestWithContext(ctx, http.MethodGet, "http://example.com", http.NoBody) req.Header.Set("Accept-Language", "en-US") return req }, @@ -46,7 +48,7 @@ func TestJA4H_A(t *testing.T) { { name: "basic POST request - HTTP1.1 - no accept-language header - cookies - referer", request: func() *http.Request { - req, _ := http.NewRequest(http.MethodPost, "http://example.com", http.NoBody) + req, _ := http.NewRequestWithContext(ctx, http.MethodPost, "http://example.com", http.NoBody) req.AddCookie(&http.Cookie{Name: "foo", Value: "bar"}) req.Header.Set("Referer", "http://example.com") return req @@ -56,7 +58,7 @@ func TestJA4H_A(t *testing.T) { { name: "bad accept-language header", request: func() *http.Request { - req, _ := http.NewRequest(http.MethodGet, "http://example.com", http.NoBody) + req, _ := http.NewRequestWithContext(ctx, http.MethodGet, "http://example.com", http.NoBody) req.Header.Set("Accept-Language", "aksjdhaslkdhalkjsd") return req }, @@ -65,7 +67,7 @@ func TestJA4H_A(t *testing.T) { { name: "bad accept-language header 2", request: func() *http.Request { - req, _ := http.NewRequest(http.MethodGet, "http://example.com", http.NoBody) + req, _ := http.NewRequestWithContext(ctx, http.MethodGet, "http://example.com", http.NoBody) req.Header.Set("Accept-Language", ",") return req }, @@ -86,6 +88,9 @@ func TestJA4H_A(t *testing.T) { func TestJA4H_B(t *testing.T) { // This test is only for non-regression // Because go does not keep headers order, we just want to make sure our code always process the headers in the same order + + ctx := t.Context() + tests := []struct { name string request func() *http.Request @@ -94,7 +99,7 @@ func TestJA4H_B(t *testing.T) { { name: "no headers", request: func() *http.Request { - req, _ := http.NewRequest(http.MethodGet, "http://example.com", http.NoBody) + req, _ := http.NewRequestWithContext(ctx, http.MethodGet, "http://example.com", http.NoBody) return req }, expectedResult: "e3b0c44298fc", @@ -102,7 +107,7 @@ func TestJA4H_B(t *testing.T) { { name: "header with arbitrary content", request: func() *http.Request { - req, _ := http.NewRequest(http.MethodGet, "http://example.com", http.NoBody) + req, _ := http.NewRequestWithContext(ctx, http.MethodGet, "http://example.com", http.NoBody) req.Header.Set("X-Custom-Header", "some value") return req }, @@ -111,7 +116,7 @@ func TestJA4H_B(t *testing.T) { { name: "header with multiple headers", request: func() *http.Request { - req, _ := http.NewRequest(http.MethodGet, "http://example.com", http.NoBody) + req, _ := http.NewRequestWithContext(ctx, http.MethodGet, "http://example.com", http.NoBody) req.Header.Set("X-Custom-Header", "some value") req.Header.Set("Authorization", "Bearer token") return req @@ -121,7 +126,7 @@ func TestJA4H_B(t *testing.T) { { name: "curl-like request", request: func() *http.Request { - req, _ := http.NewRequest(http.MethodGet, "http://localhost", http.NoBody) + req, _ := http.NewRequestWithContext(ctx, http.MethodGet, "http://localhost", http.NoBody) req.Header.Set("Host", "localhost") req.Header.Set("User-Agent", "curl/8.12.1") req.Header.Set("Accept", "*/*") @@ -260,6 +265,8 @@ func TestJA4H_D(t *testing.T) { } func TestJA4H(t *testing.T) { + ctx := t.Context() + tests := []struct { name string req func() *http.Request @@ -268,7 +275,7 @@ func TestJA4H(t *testing.T) { { name: "Basic GET - No cookies", req: func() *http.Request { - req, _ := http.NewRequest(http.MethodGet, "http://example.com", http.NoBody) + req, _ := http.NewRequestWithContext(ctx, http.MethodGet, "http://example.com", http.NoBody) return req }, expectedHash: "ge11nn000000_e3b0c44298fc_000000000000_000000000000", @@ -276,7 +283,7 @@ func TestJA4H(t *testing.T) { { name: "Basic GET - With cookies", req: func() *http.Request { - req, _ := http.NewRequest(http.MethodGet, "http://example.com", http.NoBody) + req, _ := http.NewRequestWithContext(ctx, http.MethodGet, "http://example.com", http.NoBody) req.AddCookie(&http.Cookie{Name: "session", Value: "12345"}) return req }, @@ -285,7 +292,7 @@ func TestJA4H(t *testing.T) { { name: "Basic GET - Multiple cookies", req: func() *http.Request { - req, _ := http.NewRequest(http.MethodGet, "http://example.com", http.NoBody) + req, _ := http.NewRequestWithContext(ctx, http.MethodGet, "http://example.com", http.NoBody) req.AddCookie(&http.Cookie{Name: "foo", Value: "bar"}) req.AddCookie(&http.Cookie{Name: "baz", Value: "qux"}) return req From 0106c40a5ba3ab2befd12512795c2883541df0b5 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 24 Mar 2025 15:34:53 +0100 Subject: [PATCH 476/581] build(deps): bump github.com/golang-jwt/jwt/v4 from 4.5.1 to 4.5.2 (#3531) --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index e1b686cd995..cd45b4da5fa 100644 --- a/go.mod +++ b/go.mod @@ -45,7 +45,7 @@ require ( github.com/go-sql-driver/mysql v1.6.0 github.com/goccy/go-yaml v1.11.0 github.com/gogo/protobuf v1.3.2 // indirect - github.com/golang-jwt/jwt/v4 v4.5.1 + github.com/golang-jwt/jwt/v4 v4.5.2 github.com/golang/protobuf v1.5.4 // indirect github.com/google/go-cmp v0.6.0 // indirect github.com/google/go-querystring v1.1.0 diff --git a/go.sum b/go.sum index c410ad70450..c3ff8dcf952 100644 --- a/go.sum +++ b/go.sum @@ -308,8 +308,8 @@ github.com/gofrs/uuid v4.0.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRx github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= -github.com/golang-jwt/jwt/v4 v4.5.1 h1:JdqV9zKUdtaa9gdPlywC3aeoEsR681PlKC+4F5gQgeo= -github.com/golang-jwt/jwt/v4 v4.5.1/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= +github.com/golang-jwt/jwt/v4 v4.5.2 h1:YtQM7lnr8iZ+j5q71MGKkNw9Mn7AjHM68uc9g5fXeUI= +github.com/golang-jwt/jwt/v4 v4.5.2/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= github.com/golang/glog v1.2.4 h1:CNNw5U8lSiiBk7druxtSHHTsRWcxKoac6kZKm2peBBc= github.com/golang/glog v1.2.4/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwmO+w= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= From 0d2c2daca5c91dbed1ed9fd4314c34864517741d Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Tue, 25 Mar 2025 11:39:57 +0100 Subject: [PATCH 477/581] Migration script from debian/ubuntu package 1.4.6 (#3420) --- debian/migrate-hub.sh | 63 +++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 63 insertions(+) create mode 100755 debian/migrate-hub.sh diff --git a/debian/migrate-hub.sh b/debian/migrate-hub.sh new file mode 100755 index 00000000000..877b6f00632 --- /dev/null +++ b/debian/migrate-hub.sh @@ -0,0 +1,63 @@ +#!/usr/bin/env sh + +# This script is provided (only in the source distribution) as an ad-hoc solution +# to migrate an installation from the crowdsec package maintained in the debian repositories +# to the official crowdsec repository. + +set -eu + +if [ ! -d /var/lib/crowdsec/hub/ ]; then + echo "You don't have a hub directory to migrate." + echo + echo "Use this script only if you upgrade from the crowdsec package included in the debian repositories." + exit 1 +fi + +# Download everything on the new hub but don't install anything yet + +echo "Downloading Hub content..." + +for itemtype in $(cscli hub types -o raw); do + ALL_ITEMS=$(cscli "$itemtype" list -a -o raw | tail +2 | cut -d, -f1) + if [ -n "${ALL_ITEMS}" ]; then + # shellcheck disable=SC2086 + cscli "$itemtype" install \ + $ALL_ITEMS \ + --download-only -y + fi +done + +# Fix links + +BASEDIR=/etc/crowdsec/ +OLD_PATH=/var/lib/crowdsec/hub/ +NEW_PATH=/etc/crowdsec/hub/ + +find "$BASEDIR" -type l 2>/dev/null | while IFS= read -r link +do + target="$(readlink "$link")" || continue + + case "$target" in + "$OLD_PATH"*) + suffix="${target#"$OLD_PATH"}" + new_target="${NEW_PATH}${suffix}" + + if [ -e "$target" ]; then + continue + fi + + if [ ! -e "$new_target" ]; then + continue + fi + + echo "Update symlink: $link" + ln -sf "$new_target" "$link" + ;; + *) + ;; + esac +done + +# upgrade tainted collections + +cscli hub upgrade --force From 55aa1893d173ea40016cb189e0432b5f5bde7bfc Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Tue, 25 Mar 2025 11:53:49 +0100 Subject: [PATCH 478/581] explicit message for malformed data URL in local items (#3537) --- debian/migrate-hub.sh | 4 ++-- pkg/hubops/colorize.go | 2 ++ pkg/hubops/download.go | 8 ++++++++ test/bats/20_hub_items.bats | 23 +++++++++++++++++++++++ 4 files changed, 35 insertions(+), 2 deletions(-) diff --git a/debian/migrate-hub.sh b/debian/migrate-hub.sh index 877b6f00632..fb78b34a192 100755 --- a/debian/migrate-hub.sh +++ b/debian/migrate-hub.sh @@ -1,7 +1,7 @@ #!/usr/bin/env sh # This script is provided (only in the source distribution) as an ad-hoc solution -# to migrate an installation from the crowdsec package maintained in the debian repositories +# to migrate an installation from the crowdsec package maintained in the debian or ubuntu repositories # to the official crowdsec repository. set -eu @@ -9,7 +9,7 @@ set -eu if [ ! -d /var/lib/crowdsec/hub/ ]; then echo "You don't have a hub directory to migrate." echo - echo "Use this script only if you upgrade from the crowdsec package included in the debian repositories." + echo "Use this script only if you upgrade from the crowdsec package included in the debian or ubuntu repositories." exit 1 fi diff --git a/pkg/hubops/colorize.go b/pkg/hubops/colorize.go index 3af2aecab93..b988d1e948e 100644 --- a/pkg/hubops/colorize.go +++ b/pkg/hubops/colorize.go @@ -15,8 +15,10 @@ func colorizeItemName(fullname string) string { bold := color.New(color.Bold) author := parts[0] name := parts[1] + return author + "/" + bold.Sprint(name) } + return fullname } diff --git a/pkg/hubops/download.go b/pkg/hubops/download.go index fab514fcdfd..68b3213c60e 100644 --- a/pkg/hubops/download.go +++ b/pkg/hubops/download.go @@ -6,6 +6,7 @@ import ( "fmt" "io" "net/http" + "net/url" "os" "time" @@ -118,6 +119,13 @@ func downloadDataSet(ctx context.Context, dataFolder string, force bool, reader continue } + // twopenny validation + if u, err := url.Parse(dataS.SourceURL); err != nil { + return false, err + } else if u.Scheme == "" { + return false, fmt.Errorf("a valid URL was expected (note: local items can download data too): %s", dataS.SourceURL) + } + // XXX: check context cancellation destPath, err := cwhub.SafePath(dataFolder, dataS.DestPath) if err != nil { diff --git a/test/bats/20_hub_items.bats b/test/bats/20_hub_items.bats index b3f929e1947..4bf7a2544d2 100644 --- a/test/bats/20_hub_items.bats +++ b/test/bats/20_hub_items.bats @@ -151,6 +151,29 @@ teardown() { assert_output --partial "Nothing to do." } +@test "when upgrading the hub, a local item's data will be downloaded" { + rune -0 mkdir -p "$CONFIG_DIR/collections" + cat >"$CONFIG_DIR"/collections/foobar.yaml <<-EOT + data: + - source_url: https://localhost:1234/database.mmdb + dest_file: database.mmdb + EOT + rune -1 cscli hub upgrade + assert_line "downloading https://localhost:1234/database.mmdb" + assert_stderr --partial 'Get "https://localhost:1234/database.mmdb":' + assert_stderr --partial 'connect: connection refused' + + # bad link, or local path + cat >"$CONFIG_DIR"/collections/foobar.yaml <<-EOT + data: + - source_url: /tmp/meh + dest_file: database.mmdb + EOT + rune -1 cscli hub upgrade + refute_line "downloading /tmp/meh" + assert_stderr --partial 'a valid URL was expected (note: local items can download data too): /tmp/meh' +} + @test "a local item cannot be removed by cscli" { rune -0 mkdir -p "$CONFIG_DIR/scenarios" rune -0 touch "$CONFIG_DIR/scenarios/foobar.yaml" From d64f196b3f3008cf39a538d6420012100044808a Mon Sep 17 00:00:00 2001 From: blotus Date: Tue, 25 Mar 2025 14:30:18 +0100 Subject: [PATCH 479/581] Allowlists: fix range check in LAPI endpoint (#3538) --- pkg/apiclient/allowlists_service.go | 7 +++++-- pkg/apiserver/allowlists_test.go | 22 ++++++++++++++++++++++ pkg/apiserver/controllers/controller.go | 2 ++ test/bats/cscli-allowlists.bats | 11 +++++++++++ 4 files changed, 40 insertions(+), 2 deletions(-) diff --git a/pkg/apiclient/allowlists_service.go b/pkg/apiclient/allowlists_service.go index ef1dc85da65..0498921577f 100644 --- a/pkg/apiclient/allowlists_service.go +++ b/pkg/apiclient/allowlists_service.go @@ -4,6 +4,7 @@ import ( "context" "fmt" "net/http" + "net/url" qs "github.com/google/go-querystring/query" log "github.com/sirupsen/logrus" @@ -74,7 +75,8 @@ func (s *AllowlistsService) Get(ctx context.Context, name string, opts Allowlist } func (s *AllowlistsService) CheckIfAllowlisted(ctx context.Context, value string) (bool, *Response, error) { - u := s.client.URLPrefix + "/allowlists/check/" + value + escapedValue := url.PathEscape(value) + u := s.client.URLPrefix + "/allowlists/check/" + escapedValue req, err := s.client.PrepareRequest(ctx, http.MethodHead, u, nil) if err != nil { @@ -92,7 +94,8 @@ func (s *AllowlistsService) CheckIfAllowlisted(ctx context.Context, value string } func (s *AllowlistsService) CheckIfAllowlistedWithReason(ctx context.Context, value string) (*models.CheckAllowlistResponse, *Response, error) { - u := s.client.URLPrefix + "/allowlists/check/" + value + escapedValue := url.PathEscape(value) + u := s.client.URLPrefix + "/allowlists/check/" + escapedValue req, err := s.client.PrepareRequest(ctx, http.MethodGet, u, nil) if err != nil { diff --git a/pkg/apiserver/allowlists_test.go b/pkg/apiserver/allowlists_test.go index 158f4852164..6e319da967c 100644 --- a/pkg/apiserver/allowlists_test.go +++ b/pkg/apiserver/allowlists_test.go @@ -115,13 +115,35 @@ func TestCheckInAllowlist(t *testing.T) { require.NoError(t, err) require.False(t, resp.Allowlisted) + // GET request, should return 200 and status in body + w = lapi.RecordResponse(t, ctx, http.MethodGet, "/v1/allowlists/check/2.3.4.0%2F24", emptyBody, passwordAuthType) + + require.Equal(t, http.StatusOK, w.Code) + + resp = models.CheckAllowlistResponse{} + + err = json.Unmarshal(w.Body.Bytes(), &resp) + + require.NoError(t, err) + require.False(t, resp.Allowlisted) + // HEAD request, should return 200 w = lapi.RecordResponse(t, ctx, http.MethodHead, "/v1/allowlists/check/1.2.3.4", emptyBody, passwordAuthType) require.Equal(t, http.StatusOK, w.Code) + // HEAD request, should return 200 + w = lapi.RecordResponse(t, ctx, http.MethodHead, "/v1/allowlists/check/1.2.3.0%2F24", emptyBody, passwordAuthType) + + require.Equal(t, http.StatusOK, w.Code) + // HEAD request, should return 204 w = lapi.RecordResponse(t, ctx, http.MethodHead, "/v1/allowlists/check/2.3.4.5", emptyBody, passwordAuthType) require.Equal(t, http.StatusNoContent, w.Code) + + // HEAD request, should return 204 + w = lapi.RecordResponse(t, ctx, http.MethodHead, "/v1/allowlists/check/2.3.4.5%2F24", emptyBody, passwordAuthType) + + require.Equal(t, http.StatusNoContent, w.Code) } diff --git a/pkg/apiserver/controllers/controller.go b/pkg/apiserver/controllers/controller.go index 03f1659ee4f..84aa2c06a81 100644 --- a/pkg/apiserver/controllers/controller.go +++ b/pkg/apiserver/controllers/controller.go @@ -98,6 +98,8 @@ func (c *Controller) NewV1() error { c.Router.GET("/health", gin.WrapF(serveHealth())) c.Router.Use(v1.PrometheusMiddleware()) c.Router.HandleMethodNotAllowed = true + c.Router.UnescapePathValues = true + c.Router.UseRawPath = true c.Router.NoRoute(func(ctx *gin.Context) { ctx.AbortWithStatus(http.StatusNotFound) }) diff --git a/test/bats/cscli-allowlists.bats b/test/bats/cscli-allowlists.bats index 73305dd89cb..6a91518d9c0 100644 --- a/test/bats/cscli-allowlists.bats +++ b/test/bats/cscli-allowlists.bats @@ -135,6 +135,17 @@ teardown() { refute_stderr } +@test "cscli allolists: range check" { + rune -0 cscli allowlist create foo -d 'a foo' + rune -0 cscli allowlist add foo 192.168.0.0/16 + rune -1 cscli decisions add -r 192.168.10.20/24 + assert_stderr 'Error: 192.168.10.20/24 is allowlisted by item 192.168.0.0/16 from foo, use --bypass-allowlist to add the decision anyway' + refute_output + rune -0 cscli decisions add -r 192.168.10.20/24 --bypass-allowlist + assert_stderr --partial 'Decision successfully added' + refute_output +} + @test "cscli allowlists delete" { rune -1 cscli allowlist delete assert_stderr 'Error: accepts 1 arg(s), received 0' From 78a6179566a7885c6131805a601419d3ed2c0260 Mon Sep 17 00:00:00 2001 From: AlteredCoder <64792091+AlteredCoder@users.noreply.github.com> Date: Mon, 31 Mar 2025 14:44:51 +0200 Subject: [PATCH 480/581] Support WithUserAgent in cti client (#3542) --- pkg/cticlient/client.go | 27 +++++++++++++++++++-------- 1 file changed, 19 insertions(+), 8 deletions(-) diff --git a/pkg/cticlient/client.go b/pkg/cticlient/client.go index 56f35b8a583..3e168a39a64 100644 --- a/pkg/cticlient/client.go +++ b/pkg/cticlient/client.go @@ -9,9 +9,8 @@ import ( "net/http" "strings" - log "github.com/sirupsen/logrus" - "github.com/crowdsecurity/crowdsec/pkg/apiclient/useragent" + log "github.com/sirupsen/logrus" ) const ( @@ -21,17 +20,19 @@ const ( ) var ( - ErrUnauthorized = errors.New("unauthorized") - ErrLimit = errors.New("request quota exceeded, please reduce your request rate") - ErrNotFound = errors.New("ip not found") - ErrDisabled = errors.New("cti is disabled") - ErrUnknown = errors.New("unknown error") + ErrUnauthorized = errors.New("unauthorized") + ErrLimit = errors.New("request quota exceeded, please reduce your request rate") + ErrNotFound = errors.New("ip not found") + ErrDisabled = errors.New("cti is disabled") + ErrUnknown = errors.New("unknown error") + defaultUserAgent = useragent.Default() ) type CrowdsecCTIClient struct { httpClient *http.Client apiKey string Logger *log.Entry + UserAgent string } func (c *CrowdsecCTIClient) doRequest(ctx context.Context, method string, endpoint string, params map[string]string) ([]byte, error) { @@ -48,7 +49,7 @@ func (c *CrowdsecCTIClient) doRequest(ctx context.Context, method string, endpoi } req.Header.Set("X-Api-Key", c.apiKey) - req.Header.Set("User-Agent", useragent.Default()) + req.Header.Set("User-Agent", c.UserAgent) resp, err := c.httpClient.Do(req) if err != nil { @@ -169,6 +170,10 @@ func NewCrowdsecCTIClient(options ...func(*CrowdsecCTIClient)) *CrowdsecCTIClien client.Logger = log.NewEntry(log.New()) } + if client.UserAgent == "" { + client.UserAgent = defaultUserAgent + } + return client } @@ -189,3 +194,9 @@ func WithAPIKey(apiKey string) func(*CrowdsecCTIClient) { c.apiKey = apiKey } } + +func WithUserAgent(userAgent string) func(*CrowdsecCTIClient) { + return func(c *CrowdsecCTIClient) { + c.UserAgent = userAgent + } +} From 6c507604b1ffdb0191fdf0fd4439b45d2cbe03be Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Thu, 3 Apr 2025 12:56:19 +0200 Subject: [PATCH 481/581] hubtests: correct basename check in parser tests (#3557) --- pkg/hubtest/parser_assert.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/pkg/hubtest/parser_assert.go b/pkg/hubtest/parser_assert.go index 1e7c7b2b3f0..e18f4d5e3e6 100644 --- a/pkg/hubtest/parser_assert.go +++ b/pkg/hubtest/parser_assert.go @@ -5,6 +5,7 @@ import ( "errors" "fmt" "os" + "path/filepath" "strings" "github.com/expr-lang/expr" @@ -270,7 +271,7 @@ func (p *ParserAssert) AutoGenParserAssert() string { } if mkey == "datasource_path" { - ret += fmt.Sprintf(`basename(results["%s"]["%s"][%d].Evt.Meta["%s"]) == "%s"`+"\n", stage, parser, pidx, mkey, Escape(mval)) + ret += fmt.Sprintf(`basename(results["%s"]["%s"][%d].Evt.Meta["%s"]) == "%s"`+"\n", stage, parser, pidx, mkey, Escape(filepath.Base(mval))) } else { ret += fmt.Sprintf(`results["%s"]["%s"][%d].Evt.Meta["%s"] == "%s"`+"\n", stage, parser, pidx, mkey, Escape(mval)) } From 49927f79a127889d23994c56827bea707d60257b Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Thu, 3 Apr 2025 16:57:56 +0200 Subject: [PATCH 482/581] CI: golangci-lint v2 (#3558) * golangci-lint v2 * CI: reduce parallel jobs for hubtests --- .github/workflows/bats-hub.yml | 4 +- .github/workflows/go-tests-windows.yml | 4 +- .github/workflows/go-tests.yml | 4 +- .github/workflows/update_docker_hub_doc.yml | 7 +- .golangci.yml | 865 ++++++++++---------- pkg/apiserver/apic_test.go | 10 +- pkg/apiserver/machines_test.go | 12 +- pkg/apiserver/middlewares/v1/api_key.go | 1 + pkg/cache/cache_test.go | 2 +- pkg/cticlient/client_test.go | 2 +- 10 files changed, 460 insertions(+), 451 deletions(-) diff --git a/.github/workflows/bats-hub.yml b/.github/workflows/bats-hub.yml index e2f47c414fe..b0aaab56dfe 100644 --- a/.github/workflows/bats-hub.yml +++ b/.github/workflows/bats-hub.yml @@ -47,7 +47,7 @@ jobs: ./test/instance-data load git clone --depth 1 https://github.com/crowdsecurity/hub.git ./hub cd ./hub - cscli hubtest run --all --clean --max-jobs 8 + cscli hubtest run --all --clean --max-jobs 4 - name: "Collect hub coverage" run: ./test/bin/collect-hub-coverage ./hub >> $GITHUB_ENV @@ -61,7 +61,7 @@ jobs: filename: crowdsec_parsers_badge.json label: Hub Parsers message: ${{ env.PARSERS_COV }} - color: ${{ env.SCENARIO_BADGE_COLOR }} + color: ${{ env.PARSERS_BADGE_COLOR }} - name: "Create Scenarios badge" uses: schneegans/dynamic-badges-action@v1.7.0 diff --git a/.github/workflows/go-tests-windows.yml b/.github/workflows/go-tests-windows.yml index a99572fee88..b7c79496361 100644 --- a/.github/workflows/go-tests-windows.yml +++ b/.github/workflows/go-tests-windows.yml @@ -59,8 +59,8 @@ jobs: token: ${{ secrets.CODECOV_TOKEN }} - name: golangci-lint - uses: golangci/golangci-lint-action@v6 + uses: golangci/golangci-lint-action@v7 with: - version: v1.64 + version: v2.0 args: --issues-exit-code=1 --timeout 10m only-new-issues: false diff --git a/.github/workflows/go-tests.yml b/.github/workflows/go-tests.yml index 6a48a932b1c..0c5b0fec1a1 100644 --- a/.github/workflows/go-tests.yml +++ b/.github/workflows/go-tests.yml @@ -196,8 +196,8 @@ jobs: token: ${{ secrets.CODECOV_TOKEN }} - name: golangci-lint - uses: golangci/golangci-lint-action@v6 + uses: golangci/golangci-lint-action@v7 with: - version: v1.64 + version: v2.0 args: --issues-exit-code=1 --timeout 10m only-new-issues: false diff --git a/.github/workflows/update_docker_hub_doc.yml b/.github/workflows/update_docker_hub_doc.yml index 5c5f76acca4..77d0b13e190 100644 --- a/.github/workflows/update_docker_hub_doc.yml +++ b/.github/workflows/update_docker_hub_doc.yml @@ -11,16 +11,19 @@ jobs: update-docker-hub-readme: runs-on: ubuntu-latest steps: + - name: Check out the repo uses: actions/checkout@v4 if: ${{ github.repository_owner == 'crowdsecurity' }} + - name: Update docker hub README - uses: ms-jpq/sync-dockerhub-readme@v1 + uses: peter-evans/dockerhub-description@v4 if: ${{ github.repository_owner == 'crowdsecurity' }} with: username: ${{ secrets.DOCKER_USERNAME }} password: ${{ secrets.DOCKER_PASSWORD }} repository: crowdsecurity/crowdsec - readme: "./docker/README.md" + short-description: ${{ github.event.repository.description }} + readme-filepath: "./docker/README.md" diff --git a/.golangci.yml b/.golangci.yml index 675c3b24b78..a169984d8f4 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -1,256 +1,26 @@ -# https://github.com/golangci/golangci-lint/blob/master/.golangci.reference.yml +version: "2" run: build-tags: - expr_debug -linters-settings: - errcheck: - # Report about not checking of errors in type assertions: `a := b.(MyStruct)`. - # Such cases aren't reported by default. - # Default: false - check-type-assertions: false - # List of functions to exclude from checking, where each entry is a single function to exclude. - # See https://github.com/kisielk/errcheck#excluding-functions for details. - exclude-functions: - - (*bytes.Buffer).ReadFrom # TODO: - - io.Copy # TODO: - - (net/http.ResponseWriter).Write # TODO: - - (*os/exec.Cmd).Start - - (*os/exec.Cmd).Wait - - (*os.Process).Kill - - (*text/template.Template).ExecuteTemplate - - syscall.FreeLibrary - - golang.org/x/sys/windows.CloseHandle - - golang.org/x/sys/windows.ResetEvent - - (*golang.org/x/sys/windows/svc/eventlog.Log).Info - - (*golang.org/x/sys/windows/svc/mgr.Mgr).Disconnect - - - (github.com/bluele/gcache.Cache).Set - - (github.com/gin-gonic/gin.ResponseWriter).WriteString - - (*github.com/segmentio/kafka-go.Reader).SetOffsetAt - - (*gopkg.in/tomb.v2.Tomb).Wait - - - (*github.com/crowdsecurity/crowdsec/pkg/appsec.ReqDumpFilter).FilterArgs - - (*github.com/crowdsecurity/crowdsec/pkg/appsec.ReqDumpFilter).FilterBody - - (*github.com/crowdsecurity/crowdsec/pkg/appsec.ReqDumpFilter).FilterHeaders - - (*github.com/crowdsecurity/crowdsec/pkg/longpollclient.LongPollClient).Stop - - gci: - sections: - - standard - - default - - prefix(github.com/crowdsecurity) - - prefix(github.com/crowdsecurity/crowdsec) - - gomoddirectives: - replace-allow-list: - - golang.org/x/time/rate - - github.com/corazawaf/coraza/v3 - - govet: - enable-all: true - disable: - - reflectvaluecompare - - fieldalignment - - maintidx: - # raise this after refactoring - under: 15 - - misspell: - locale: US - - nestif: - # lower this after refactoring - min-complexity: 16 - - nlreturn: - block-size: 5 - - nolintlint: - allow-unused: false # report any unused nolint directives - require-explanation: false # don't require an explanation for nolint directives - require-specific: false # don't require nolint directives to be specific about which linter is being skipped - - interfacebloat: - max: 12 - - depguard: - rules: - wrap: - deny: - - pkg: "github.com/pkg/errors" - desc: "errors.Wrap() is deprecated in favor of fmt.Errorf()" - files: - - "!**/pkg/database/*.go" - yaml: - files: - - "!**/pkg/acquisition/acquisition.go" - - "!**/pkg/acquisition/acquisition_test.go" - - "!**/pkg/acquisition/modules/appsec/appsec.go" - - "!**/pkg/acquisition/modules/cloudwatch/cloudwatch.go" - - "!**/pkg/acquisition/modules/docker/docker.go" - - "!**/pkg/acquisition/modules/file/file.go" - - "!**/pkg/acquisition/modules/journalctl/journalctl.go" - - "!**/pkg/acquisition/modules/kafka/kafka.go" - - "!**/pkg/acquisition/modules/kinesis/kinesis.go" - - "!**/pkg/acquisition/modules/kubernetesaudit/k8s_audit.go" - - "!**/pkg/acquisition/modules/loki/loki.go" - - "!**/pkg/acquisition/modules/loki/timestamp_test.go" - - "!**/pkg/acquisition/modules/victorialogs/victorialogs.go" - - "!**/pkg/acquisition/modules/s3/s3.go" - - "!**/pkg/acquisition/modules/syslog/syslog.go" - - "!**/pkg/acquisition/modules/wineventlog/wineventlog_windows.go" - - "!**/pkg/appsec/appsec.go" - - "!**/pkg/appsec/loader.go" - - "!**/pkg/csplugin/broker.go" - - "!**/pkg/leakybucket/buckets_test.go" - - "!**/pkg/leakybucket/manager_load.go" - - "!**/pkg/parser/node.go" - - "!**/pkg/parser/node_test.go" - - "!**/pkg/parser/parsing_test.go" - - "!**/pkg/parser/stage.go" - deny: - - pkg: "gopkg.in/yaml.v2" - desc: "yaml.v2 is deprecated for new code in favor of yaml.v3" - - stylecheck: - checks: - - all - - -ST1003 # should not use underscores in Go names; ... - - -ST1005 # error strings should not be capitalized - - -ST1012 # error var ... should have name of the form ErrFoo - - -ST1016 # methods on the same type should have the same receiver name - - -ST1022 # comment on exported var ... should be of the form ... - - revive: - ignore-generated-header: true - severity: error - enable-all-rules: true - rules: - - name: add-constant - disabled: true - - name: cognitive-complexity - # lower this after refactoring - arguments: [119] - - name: comment-spacings - disabled: true - - name: confusing-results - disabled: true - - name: cyclomatic - # lower this after refactoring - arguments: [39] - - name: defer - disabled: true - - name: empty-block - disabled: true - - name: empty-lines - disabled: true - - name: error-naming - disabled: true - - name: flag-parameter - disabled: true - - name: function-result-limit - arguments: [6] - - name: function-length - # lower this after refactoring - arguments: [111, 238] - - name: get-return - disabled: true - - name: increment-decrement - disabled: true - - name: import-alias-naming - disabled: true - - name: import-shadowing - disabled: true - - name: line-length-limit - # lower this after refactoring - arguments: [221] - - name: max-control-nesting - # lower this after refactoring - arguments: [7] - - name: max-public-structs - disabled: true - - name: nested-structs - disabled: true - - name: package-comments - disabled: true - - name: redundant-import-alias - disabled: true - - name: time-equal - disabled: true - - name: var-naming - disabled: true - - name: unchecked-type-assertion - disabled: true - - name: exported - disabled: true - - name: unexported-naming - disabled: true - - name: unexported-return - disabled: true - - name: unhandled-error - disabled: true - arguments: - - "fmt.Print" - - "fmt.Printf" - - "fmt.Println" - - name: unnecessary-stmt - disabled: true - - name: unused-parameter - disabled: true - - name: unused-receiver - disabled: true - - name: use-any - disabled: true - - name: useless-break - disabled: true - - wsl: - # Allow blocks to end with comments - allow-trailing-comment: true - - gocritic: - enable-all: true - disabled-checks: - - paramTypeCombine - - ifElseChain - - importShadow - - hugeParam - - commentedOutCode - - commentedOutImport - - unnamedResult - - sloppyReassign - - appendCombine - - typeUnparen - - commentFormatting - - deferInLoop # - - whyNoLint - - equalFold # - - unnecessaryBlock # - - tooManyResultsChecker - - unnecessaryDefer - - docStub - - preferFprint - linters: - enable-all: true + default: all disable: # # DEPRECATED by golangi-lint - # - - tenv + + + # none right now # # Redundant # - - - gocyclo # revive - - cyclop # revive - - lll # revive - - funlen # revive - - gocognit # revive + - cyclop # revive + - funlen # revive + - gocognit # revive + - gocyclo # revive + - lll # revive # Disabled atm @@ -263,10 +33,7 @@ linters: - dogsled # Checks assignments with too many blank identifiers (e.g. x, _, _, _, := f()) - errchkjson # Checks types passed to the json encoding functions. Reports unsupported types and reports occasions, where the check for the returned error can be omitted. - exhaustive # check exhaustiveness of enum switch statements - - gci # Gci control golang package import order and make it always deterministic. - godot # Check if comments end in a period - - gofmt # Gofmt checks whether code was gofmt-ed. By default this tool runs with -s option to check for code simplification - - goimports # Check import statements are formatted according to the 'goimport' command. Reformat imports in autofix mode. - gosec # (gas): Inspects source code for security problems - inamedparam # reports interfaces with unnamed method parameters - musttag # enforce field tags in (un)marshaled structs @@ -290,7 +57,6 @@ linters: # Formatting only, useful in IDE but should not be forced on CI? # - - gofumpt # Gofumpt checks whether code was gofumpt-ed. - nlreturn # nlreturn checks for a new line before return and branch statements to increase code clarity - whitespace # Whitespace is a linter that checks for unnecessary newlines at the start and end of functions, if, for, etc. - wsl # add or remove empty lines @@ -314,195 +80,434 @@ linters: - goconst # Finds repeated strings that could be replaced by a constant - tagliatelle # Checks the struct tags. - varnamelen # checks that the length of a variable's name matches its scope + - prealloc + + settings: + + depguard: + rules: + wrap: + files: + - '!**/pkg/database/*.go' + deny: + - pkg: github.com/pkg/errors + desc: errors.Wrap() is deprecated in favor of fmt.Errorf() + yaml: + files: + - '!**/pkg/acquisition/acquisition.go' + - '!**/pkg/acquisition/acquisition_test.go' + - '!**/pkg/acquisition/modules/appsec/appsec.go' + - '!**/pkg/acquisition/modules/cloudwatch/cloudwatch.go' + - '!**/pkg/acquisition/modules/docker/docker.go' + - '!**/pkg/acquisition/modules/file/file.go' + - '!**/pkg/acquisition/modules/journalctl/journalctl.go' + - '!**/pkg/acquisition/modules/kafka/kafka.go' + - '!**/pkg/acquisition/modules/kinesis/kinesis.go' + - '!**/pkg/acquisition/modules/kubernetesaudit/k8s_audit.go' + - '!**/pkg/acquisition/modules/loki/loki.go' + - '!**/pkg/acquisition/modules/loki/timestamp_test.go' + - '!**/pkg/acquisition/modules/victorialogs/victorialogs.go' + - '!**/pkg/acquisition/modules/s3/s3.go' + - '!**/pkg/acquisition/modules/syslog/syslog.go' + - '!**/pkg/acquisition/modules/wineventlog/wineventlog_windows.go' + - '!**/pkg/appsec/appsec.go' + - '!**/pkg/appsec/loader.go' + - '!**/pkg/csplugin/broker.go' + - '!**/pkg/leakybucket/buckets_test.go' + - '!**/pkg/leakybucket/manager_load.go' + - '!**/pkg/parser/node.go' + - '!**/pkg/parser/node_test.go' + - '!**/pkg/parser/parsing_test.go' + - '!**/pkg/parser/stage.go' + deny: + - pkg: gopkg.in/yaml.v2 + desc: yaml.v2 is deprecated for new code in favor of yaml.v3 + + errcheck: + # Report about not checking of errors in type assertions: `a := b.(MyStruct)`. + # Such cases aren't reported by default. + check-type-assertions: false + # List of functions to exclude from checking, where each entry is a single function to exclude. + # See https://github.com/kisielk/errcheck#excluding-functions for details. + exclude-functions: + - (*bytes.Buffer).ReadFrom # TODO + - io.Copy # TODO + - (net/http.ResponseWriter).Write # TODO + - (*os/exec.Cmd).Start + - (*os/exec.Cmd).Wait + - (*os.Process).Kill + - (*text/template.Template).ExecuteTemplate + - syscall.FreeLibrary + - golang.org/x/sys/windows.CloseHandle + - golang.org/x/sys/windows.ResetEvent + - (*golang.org/x/sys/windows/svc/eventlog.Log).Info + - (*golang.org/x/sys/windows/svc/mgr.Mgr).Disconnect + + - (github.com/bluele/gcache.Cache).Set + - (github.com/gin-gonic/gin.ResponseWriter).WriteString + - (*github.com/segmentio/kafka-go.Reader).SetOffsetAt + - (*gopkg.in/tomb.v2.Tomb).Wait + + - (*github.com/crowdsecurity/crowdsec/pkg/appsec.ReqDumpFilter).FilterArgs + - (*github.com/crowdsecurity/crowdsec/pkg/appsec.ReqDumpFilter).FilterBody + - (*github.com/crowdsecurity/crowdsec/pkg/appsec.ReqDumpFilter).FilterHeaders + - (*github.com/crowdsecurity/crowdsec/pkg/longpollclient.LongPollClient).Stop + + gocritic: + enable-all: true + disabled-checks: + - paramTypeCombine + - ifElseChain + - importShadow + - hugeParam + - commentedOutCode + - commentedOutImport + - unnamedResult + - sloppyReassign + - appendCombine + - typeUnparen + - commentFormatting + - deferInLoop # + - whyNoLint + - equalFold # + - unnecessaryBlock # + - tooManyResultsChecker + - unnecessaryDefer + - docStub + - preferFprint + + gomoddirectives: + replace-allow-list: + - golang.org/x/time/rate + - github.com/corazawaf/coraza/v3 + + govet: + disable: + - reflectvaluecompare + - fieldalignment + enable-all: true + + interfacebloat: + max: 12 + + maintidx: + # raise this after refactoring + under: 15 + + misspell: + locale: US + + nestif: + # lower this after refactoring + min-complexity: 16 + + nlreturn: + block-size: 5 + + nolintlint: + require-explanation: false # don't require an explanation for nolint directives + require-specific: false # don't require nolint directives to be specific about which linter is being skipped + allow-unused: false # report any unused nolint directives + + revive: + severity: error + enable-all-rules: true + rules: + - name: add-constant + disabled: true + - name: cognitive-complexity + arguments: + # lower this after refactoring + - 119 + - name: comment-spacings + disabled: true + - name: confusing-results + disabled: true + - name: cyclomatic + arguments: + # lower this after refactoring + - 39 + - name: defer + disabled: true + - name: empty-block + disabled: true + - name: empty-lines + disabled: true + - name: error-naming + disabled: true + - name: flag-parameter + disabled: true + - name: function-result-limit + arguments: + - 6 + - name: function-length + arguments: + # lower this after refactoring + - 111 + - 238 + - name: get-return + disabled: true + - name: increment-decrement + disabled: true + - name: import-alias-naming + disabled: true + - name: import-shadowing + disabled: true + - name: line-length-limit + arguments: + # lower this after refactoring + - 221 + - name: max-control-nesting + arguments: + # lower this after refactoring + - 7 + - name: max-public-structs + disabled: true + - name: nested-structs + disabled: true + - name: package-comments + disabled: true + - name: redundant-import-alias + disabled: true + - name: time-equal + disabled: true + - name: var-naming + disabled: true + - name: unchecked-type-assertion + disabled: true + - name: exported + disabled: true + - name: unexported-naming + disabled: true + - name: unexported-return + disabled: true + - name: unhandled-error + arguments: + - fmt.Print + - fmt.Printf + - fmt.Println + disabled: true + - name: unnecessary-stmt + disabled: true + - name: unused-parameter + disabled: true + - name: unused-receiver + disabled: true + - name: use-any + disabled: true + - name: useless-break + disabled: true + + staticcheck: + checks: + - all + - -ST1003 + - -ST1005 + - -ST1012 + - -ST1016 + - -ST1022 + - -QF1003 + - -QF1008 + - -QF1012 + + wsl: + # Allow blocks to end with comments + allow-trailing-comment: true + + exclusions: + presets: + - comments + - common-false-positives + - legacy + - std-error-handling + rules: - # - # Under evaluation - # - - - prealloc # Finds slice declarations that could potentially be preallocated - + # `err` is often shadowed, we may continue to do it + + - linters: + - govet + text: 'shadow: declaration of "(err|ctx)" shadows declaration' + + # Will apply, trivial - just beware of merge conflicts + + - linters: + - perfsprint + text: fmt.Sprintf can be replaced .* + + # Will fix, easy but some thinking required + + - linters: + - errorlint + text: non-wrapping format verb for fmt.Errorf. Use `%w` to format errors + - linters: + - nosprintfhostport + text: host:port in url should be constructed with net.JoinHostPort and not directly with fmt.Sprintf + + # https://github.com/timakin/bodyclose + - linters: + - bodyclose + text: response body must be closed + + # named/naked returns are evil, with a single exception + # https://go.dev/wiki/CodeReviewComments#named-result-parameters + - linters: + - nonamedreturns + text: named return .* with type .* found + + - linters: + - revive + path: pkg/leakybucket/manager_load.go + text: 'confusing-naming: Field ''.*'' differs only by capitalization to other field in the struct type BucketFactory' + + - linters: + - revive + path: pkg/exprhelpers/helpers.go + text: 'confusing-naming: Method ''flatten'' differs only by capitalization to function ''Flatten'' in the same source file' + + - linters: + - revive + path: pkg/appsec/query_utils.go + text: 'confusing-naming: Method ''parseQuery'' differs only by capitalization to function ''ParseQuery'' in the same source file' + + - linters: + - revive + path: pkg/acquisition/modules/loki/internal/lokiclient/loki_client.go + text: 'confusing-naming: Method ''QueryRange'' differs only by capitalization to method ''queryRange'' in the same source file' + + - linters: + - revive + path: pkg/acquisition/modules/victorialogs/internal/vlclient/vl_client.go + text: 'confusing-naming: Method ''QueryRange'' differs only by capitalization to method ''queryRange'' in the same source file' + + - linters: + - revive + path: pkg/hubtest/hubtest_item.go + text: 'cyclomatic: .*RunWithLogFile' + + # tolerate complex functions in tests for now + - linters: + - maintidx + path: (.+)_test.go + + # tolerate long functions in tests + - linters: + - revive + path: pkg/(.+)_test.go + text: 'function-length: .*' + + # tolerate long lines in tests + - linters: + - revive + path: pkg/(.+)_test.go + text: 'line-length-limit: .*' + + # we use t,ctx instead of ctx,t in tests + - linters: + - revive + path: pkg/(.+)_test.go + text: 'context-as-argument: context.Context should be the first parameter of a function' + + # tolerate deep exit in cobra's OnInitialize, for now + - linters: + - revive + path: cmd/crowdsec-cli/main.go + text: 'deep-exit: .*' + + - linters: + - revive + path: cmd/crowdsec/crowdsec.go + text: 'deep-exit: .*' + + - linters: + - revive + path: cmd/crowdsec/api.go + text: 'deep-exit: .*' + + - linters: + - revive + path: cmd/crowdsec/win_service.go + text: 'deep-exit: .*' + + - linters: + - recvcheck + path: pkg/csplugin/hclog_adapter.go + text: the methods of "HCLogAdapter" use pointer receiver and non-pointer receiver. + + # encoding to json/yaml requires value receivers + - linters: + - recvcheck + path: pkg/cwhub/item.go + text: the methods of "Item" use pointer receiver and non-pointer receiver. + + - linters: + - gocritic + path: cmd/crowdsec-cli + text: 'rangeValCopy: .*' + + - linters: + - gocritic + path: pkg/(cticlient|hubtest) + text: 'rangeValCopy: .*' + + - linters: + - gocritic + path: (.+)_test.go + text: 'rangeValCopy: .*' + + - linters: + - gocritic + path: pkg/(appsec|acquisition|dumps|alertcontext|leakybucket|exprhelpers) + text: 'rangeValCopy: .*' + + - linters: + - revive + path: pkg/types/utils.go + text: 'argument-limit: .*' + + # need some cleanup first: to create db in memory and share the client, not the config + - linters: + - usetesting + path: (.+)_test.go + text: context.Background.* + + - linters: + - usetesting + path: pkg/apiserver/(.+)_test.go + text: os.MkdirTemp.* could be replaced by t.TempDir.* + + - linters: + - usetesting + path: pkg/apiserver/(.+)_test.go + text: os.CreateTemp.* could be replaced by os.CreateTemp.* + + - linters: + - containedctx + path: cmd/notification-file/main.go + text: found a struct that contains a context.Context field + paths: + - pkg/yamlpatch/merge.go + - pkg/yamlpatch/merge_test.go + - pkg/time/rate + - pkg/metabase + - third_party$ + - builtin$ + - examples$ issues: - # “Look, that’s why there’s rules, understand? So that you think before you - # break ‘em.” ― Terry Pratchett - - exclude-dirs: - - pkg/time/rate - - pkg/metabase - - exclude-files: - - pkg/yamlpatch/merge.go - - pkg/yamlpatch/merge_test.go - - exclude-generated: strict - max-issues-per-linter: 0 max-same-issues: 0 - exclude-rules: - - # Won't fix: - - # `err` is often shadowed, we may continue to do it - - linters: - - govet - text: "shadow: declaration of \"(err|ctx)\" shadows declaration" - - # Will fix, trivial - just beware of merge conflicts - - - linters: - - perfsprint - text: "fmt.Sprintf can be replaced .*" - - # - # Will fix, easy but some neurons required - # - - linters: - - errorlint - text: "non-wrapping format verb for fmt.Errorf. Use `%w` to format errors" - - - linters: - - nosprintfhostport - text: "host:port in url should be constructed with net.JoinHostPort and not directly with fmt.Sprintf" - - # https://github.com/timakin/bodyclose - - linters: - - bodyclose - text: "response body must be closed" - - # named/naked returns are evil, with a single exception - # https://go.dev/wiki/CodeReviewComments#named-result-parameters - - linters: - - nonamedreturns - text: "named return .* with type .* found" - - - linters: - - revive - path: pkg/leakybucket/manager_load.go - text: "confusing-naming: Field '.*' differs only by capitalization to other field in the struct type BucketFactory" - - - linters: - - revive - path: pkg/exprhelpers/helpers.go - text: "confusing-naming: Method 'flatten' differs only by capitalization to function 'Flatten' in the same source file" - - - linters: - - revive - path: pkg/appsec/query_utils.go - text: "confusing-naming: Method 'parseQuery' differs only by capitalization to function 'ParseQuery' in the same source file" - - - linters: - - revive - path: pkg/acquisition/modules/loki/internal/lokiclient/loki_client.go - text: "confusing-naming: Method 'QueryRange' differs only by capitalization to method 'queryRange' in the same source file" - - - linters: - - revive - path: pkg/acquisition/modules/victorialogs/internal/vlclient/vl_client.go - text: "confusing-naming: Method 'QueryRange' differs only by capitalization to method 'queryRange' in the same source file" - - - linters: - - revive - path: pkg/hubtest/hubtest_item.go - text: "cyclomatic: .*RunWithLogFile" - - # tolerate complex functions in tests for now - - linters: - - maintidx - path: "(.+)_test.go" - - # tolerate long functions in tests - - linters: - - revive - path: "pkg/(.+)_test.go" - text: "function-length: .*" - - # tolerate long lines in tests - - linters: - - revive - path: "pkg/(.+)_test.go" - text: "line-length-limit: .*" - - # we use t,ctx instead of ctx,t in tests - - linters: - - revive - path: "pkg/(.+)_test.go" - text: "context-as-argument: context.Context should be the first parameter of a function" - - # tolerate deep exit in cobra's OnInitialize, for now - - linters: - - revive - path: "cmd/crowdsec-cli/main.go" - text: "deep-exit: .*" - - - linters: - - revive - path: "cmd/crowdsec/crowdsec.go" - text: "deep-exit: .*" - - - linters: - - revive - path: "cmd/crowdsec/api.go" - text: "deep-exit: .*" - - - linters: - - revive - path: "cmd/crowdsec/win_service.go" - text: "deep-exit: .*" - - - linters: - - recvcheck - path: "pkg/csplugin/hclog_adapter.go" - text: 'the methods of "HCLogAdapter" use pointer receiver and non-pointer receiver.' - - # encoding to json/yaml requires value receivers - - linters: - - recvcheck - path: "pkg/cwhub/item.go" - text: 'the methods of "Item" use pointer receiver and non-pointer receiver.' - - - linters: - - gocritic - path: "cmd/crowdsec-cli" - text: "rangeValCopy: .*" - - - linters: - - gocritic - path: "pkg/(cticlient|hubtest)" - text: "rangeValCopy: .*" - - - linters: - - gocritic - path: "(.+)_test.go" - text: "rangeValCopy: .*" - - - linters: - - gocritic - path: "pkg/(appsec|acquisition|dumps|alertcontext|leakybucket|exprhelpers)" - text: "rangeValCopy: .*" - - - linters: - - revive - path: "pkg/types/utils.go" - text: "argument-limit: .*" - - # need some cleanup first: to create db in memory and share the client, not the config - - linters: - - usetesting - path: "(.+)_test.go" - text: "context.Background.*" - - - linters: - - usetesting - path: "pkg/apiserver/(.+)_test.go" - text: "os.MkdirTemp.* could be replaced by t.TempDir.*" - - - linters: - - usetesting - path: "pkg/apiserver/(.+)_test.go" - text: "os.CreateTemp.* could be replaced by os.CreateTemp.*" - - - linters: - - containedctx - path: "cmd/notification-file/main.go" - text: "found a struct that contains a context.Context field" +formatters: + settings: + gci: + sections: + - standard + - default + - prefix(github.com/crowdsecurity) + - prefix(github.com/crowdsecurity/crowdsec) + + exclusions: + paths: + - third_party$ + - builtin$ + - examples$ diff --git a/pkg/apiserver/apic_test.go b/pkg/apiserver/apic_test.go index 053439f6d18..fc4e290877d 100644 --- a/pkg/apiserver/apic_test.go +++ b/pkg/apiserver/apic_test.go @@ -910,7 +910,7 @@ func TestAPICPullTopBLCacheFirstCall(t *testing.T) { )) httpmock.RegisterResponder("GET", "http://api.crowdsec.net/blocklist1", func(req *http.Request) (*http.Response, error) { - assert.Equal(t, "", req.Header.Get("If-Modified-Since")) + assert.Empty(t, req.Header.Get("If-Modified-Since")) return httpmock.NewStringResponse(200, "1.2.3.4"), nil }) @@ -932,11 +932,11 @@ func TestAPICPullTopBLCacheFirstCall(t *testing.T) { blocklistConfigItemName := "blocklist:blocklist1:last_pull" lastPullTimestamp, err := api.dbClient.GetConfigItem(ctx, blocklistConfigItemName) require.NoError(t, err) - assert.NotEqual(t, "", *lastPullTimestamp) + assert.NotEmpty(t, *lastPullTimestamp) // new call should return 304 and should not change lastPullTimestamp httpmock.RegisterResponder("GET", "http://api.crowdsec.net/blocklist1", func(req *http.Request) (*http.Response, error) { - assert.NotEqual(t, "", req.Header.Get("If-Modified-Since")) + assert.NotEmpty(t, req.Header.Get("If-Modified-Since")) return httpmock.NewStringResponse(304, ""), nil }) @@ -1003,7 +1003,7 @@ func TestAPICPullTopBLCacheForceCall(t *testing.T) { )) httpmock.RegisterResponder("GET", "http://api.crowdsec.net/blocklist1", func(req *http.Request) (*http.Response, error) { - assert.Equal(t, "", req.Header.Get("If-Modified-Since")) + assert.Empty(t, req.Header.Get("If-Modified-Since")) return httpmock.NewStringResponse(304, ""), nil }) @@ -1031,7 +1031,7 @@ func TestAPICPullBlocklistCall(t *testing.T) { defer httpmock.DeactivateAndReset() httpmock.RegisterResponder("GET", "http://api.crowdsec.net/blocklist1", func(req *http.Request) (*http.Response, error) { - assert.Equal(t, "", req.Header.Get("If-Modified-Since")) + assert.Empty(t, req.Header.Get("If-Modified-Since")) return httpmock.NewStringResponse(200, "1.2.3.4"), nil }) diff --git a/pkg/apiserver/machines_test.go b/pkg/apiserver/machines_test.go index 3eecc39962f..96ad5e8e834 100644 --- a/pkg/apiserver/machines_test.go +++ b/pkg/apiserver/machines_test.go @@ -47,7 +47,7 @@ func TestCreateMachine(t *testing.T) { router.ServeHTTP(w, req) assert.Equal(t, http.StatusCreated, w.Code) - assert.Equal(t, "", w.Body.String()) + assert.Empty(t, w.Body.String()) } func TestCreateMachineWithForwardedFor(t *testing.T) { @@ -68,7 +68,7 @@ func TestCreateMachineWithForwardedFor(t *testing.T) { router.ServeHTTP(w, req) assert.Equal(t, http.StatusCreated, w.Code) - assert.Equal(t, "", w.Body.String()) + assert.Empty(t, w.Body.String()) ip := GetMachineIP(t, *MachineTest.MachineID, config.API.Server.DbConfig) @@ -92,13 +92,13 @@ func TestCreateMachineWithForwardedForNoConfig(t *testing.T) { router.ServeHTTP(w, req) assert.Equal(t, http.StatusCreated, w.Code) - assert.Equal(t, "", w.Body.String()) + assert.Empty(t, w.Body.String()) ip := GetMachineIP(t, *MachineTest.MachineID, config.API.Server.DbConfig) // For some reason, the IP is empty when running tests // if no forwarded-for headers are present - assert.Equal(t, "", ip) + assert.Empty(t, ip) } func TestCreateMachineWithoutForwardedFor(t *testing.T) { @@ -117,13 +117,13 @@ func TestCreateMachineWithoutForwardedFor(t *testing.T) { router.ServeHTTP(w, req) assert.Equal(t, http.StatusCreated, w.Code) - assert.Equal(t, "", w.Body.String()) + assert.Empty(t, w.Body.String()) ip := GetMachineIP(t, *MachineTest.MachineID, config.API.Server.DbConfig) // For some reason, the IP is empty when running tests // if no forwarded-for headers are present - assert.Equal(t, "", ip) + assert.Empty(t, ip) } func TestCreateMachineAlreadyExist(t *testing.T) { diff --git a/pkg/apiserver/middlewares/v1/api_key.go b/pkg/apiserver/middlewares/v1/api_key.go index df2f68930d6..750237c83e6 100644 --- a/pkg/apiserver/middlewares/v1/api_key.go +++ b/pkg/apiserver/middlewares/v1/api_key.go @@ -29,6 +29,7 @@ type APIKey struct { TlsAuth *TLSAuth } +// func GenerateAPIKey(n int) (string, error) { bytes := make([]byte, n) if _, err := rand.Read(bytes); err != nil { diff --git a/pkg/cache/cache_test.go b/pkg/cache/cache_test.go index 4da9fd5bf7b..3eadb0e9358 100644 --- a/pkg/cache/cache_test.go +++ b/pkg/cache/cache_test.go @@ -26,6 +26,6 @@ func TestCreateSetGet(t *testing.T) { //expire time.Sleep(1500 * time.Millisecond) ret, err = GetKey("test", "testkey0") - assert.Equal(t, "", ret) + assert.Empty(t, ret) require.NoError(t, err) } diff --git a/pkg/cticlient/client_test.go b/pkg/cticlient/client_test.go index cdbbd0c9732..a7d4c5aaf73 100644 --- a/pkg/cticlient/client_test.go +++ b/pkg/cticlient/client_test.go @@ -280,7 +280,7 @@ func TestSmokeUnknownIP(t *testing.T) { t.Fatalf("failed to get ip info: %s", err) } - assert.Equal(t, "", resp.Ip) + assert.Empty(t, resp.Ip) } func TestRateLimit(t *testing.T) { From 48e3c8c5d47b746ffd7e686e420d8179b0418b34 Mon Sep 17 00:00:00 2001 From: blotus Date: Fri, 4 Apr 2025 14:44:18 +0200 Subject: [PATCH 483/581] do not return an error if we cannot fetch allowlists when starting the appsec (#3550) --- pkg/acquisition/modules/appsec/appsec.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/pkg/acquisition/modules/appsec/appsec.go b/pkg/acquisition/modules/appsec/appsec.go index 9796843844c..628ea58ab33 100644 --- a/pkg/acquisition/modules/appsec/appsec.go +++ b/pkg/acquisition/modules/appsec/appsec.go @@ -418,11 +418,11 @@ func (w *AppsecSource) StreamingAcquisition(ctx context.Context, out chan types. err = w.appsecAllowlistClient.Start(ctx, apiClient) if err != nil { - return fmt.Errorf("failed to fetch allowlists: %w", err) + w.logger.Errorf("failed to fetch allowlists for appsec, disabling them: %s", err) + } else { + w.appsecAllowlistClient.StartRefresh(ctx, t) } - w.appsecAllowlistClient.StartRefresh(ctx, t) - t.Go(func() error { defer trace.CatchPanic("crowdsec/acquis/appsec/live") From faa5b482f655eca37910b15f8ddc8da5eb522d10 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Fri, 4 Apr 2025 14:52:45 +0200 Subject: [PATCH 484/581] lint/refactor: defer, reflectvaluecompare, stylecheck (#3544) * lint: enable reflectvaluecompare * lint: remove exception * lint: stylecheck ST016 - methods on the same type should have the same receiver name * lint: enable revive[time-equal] --- .golangci.yml | 9 --------- cmd/crowdsec/serve.go | 2 +- pkg/apiclient/client.go | 8 ++++---- pkg/parser/runtime.go | 5 ++--- 4 files changed, 7 insertions(+), 17 deletions(-) diff --git a/.golangci.yml b/.golangci.yml index a169984d8f4..cd5f4aef7c7 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -183,7 +183,6 @@ linters: govet: disable: - - reflectvaluecompare - fieldalignment enable-all: true @@ -269,8 +268,6 @@ linters: disabled: true - name: redundant-import-alias disabled: true - - name: time-equal - disabled: true - name: var-naming disabled: true - name: unchecked-type-assertion @@ -304,7 +301,6 @@ linters: - -ST1003 - -ST1005 - -ST1012 - - -ST1016 - -ST1022 - -QF1003 - -QF1008 @@ -379,11 +375,6 @@ linters: path: pkg/acquisition/modules/victorialogs/internal/vlclient/vl_client.go text: 'confusing-naming: Method ''QueryRange'' differs only by capitalization to method ''queryRange'' in the same source file' - - linters: - - revive - path: pkg/hubtest/hubtest_item.go - text: 'cyclomatic: .*RunWithLogFile' - # tolerate complex functions in tests for now - linters: - maintidx diff --git a/cmd/crowdsec/serve.go b/cmd/crowdsec/serve.go index a9e496fe36e..642ac46b8fa 100644 --- a/cmd/crowdsec/serve.go +++ b/cmd/crowdsec/serve.go @@ -23,7 +23,7 @@ import ( "github.com/crowdsecurity/crowdsec/pkg/types" ) -//nolint:deadcode,unused // debugHandler is kept as a dev convenience: it shuts down and serialize internal state +//nolint:unused // debugHandler is kept as a dev convenience: it shuts down and serialize internal state func debugHandler(sig os.Signal, cConfig *csconfig.Config) error { var ( tmpFile string diff --git a/pkg/apiclient/client.go b/pkg/apiclient/client.go index 9ea683c41bf..9192bf095a1 100644 --- a/pkg/apiclient/client.go +++ b/pkg/apiclient/client.go @@ -48,12 +48,12 @@ type ApiClient struct { UsageMetrics *UsageMetricsService } -func (a *ApiClient) GetClient() *http.Client { - return a.client +func (c *ApiClient) GetClient() *http.Client { + return c.client } -func (a *ApiClient) IsEnrolled() bool { - jwtTransport := a.client.Transport.(*JWTTransport) +func (c *ApiClient) IsEnrolled() bool { + jwtTransport := c.client.Transport.(*JWTTransport) tokenStr := jwtTransport.Token token, _ := jwt.Parse(tokenStr, nil) diff --git a/pkg/parser/runtime.go b/pkg/parser/runtime.go index 7af82a71535..831e478af0c 100644 --- a/pkg/parser/runtime.go +++ b/pkg/parser/runtime.go @@ -42,9 +42,8 @@ func SetTargetByName(target string, value string, evt *types.Event) bool { }() iter := reflect.ValueOf(evt).Elem() - if (iter == reflect.Value{}) || iter.IsZero() { + if !iter.IsValid() || iter.IsZero() { log.Tracef("event is nil") - //event is nil return false } @@ -56,7 +55,7 @@ func SetTargetByName(target string, value string, evt *types.Event) bool { case reflect.Map: tmp := iter.MapIndex(reflect.ValueOf(f)) /*if we're in a map and the field doesn't exist, the user wants to add it :) */ - if (tmp == reflect.Value{}) || tmp.IsZero() { + if !tmp.IsValid() || tmp.IsZero() { log.Debugf("map entry is zero in '%s'", target) } From 89761938c7a2758b527e349775b1aafa51a29fff Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Fri, 4 Apr 2025 14:54:22 +0200 Subject: [PATCH 485/581] pkg/hubtest: use os.CopyFS() (#3539) --- pkg/hubtest/utils.go | 43 ++++--------------------------------------- 1 file changed, 4 insertions(+), 39 deletions(-) diff --git a/pkg/hubtest/utils.go b/pkg/hubtest/utils.go index b42a73461f3..6bfba13807d 100644 --- a/pkg/hubtest/utils.go +++ b/pkg/hubtest/utils.go @@ -71,47 +71,12 @@ func checkPathNotContained(path string, subpath string) error { return nil } +// CopyDir copies the content of a directory to another directory. +// It delegates the operation to os.CopyFS with an additional check to prevent infinite loops. func CopyDir(src string, dest string) error { - err := checkPathNotContained(src, dest) - if err != nil { - return err - } - - f, err := os.Open(src) - if err != nil { - return err - } - - file, err := f.Stat() - if err != nil { + if err := checkPathNotContained(src, dest); err != nil { return err } - if !file.IsDir() { - return errors.New("Source " + file.Name() + " is not a directory!") - } - - err = os.MkdirAll(dest, 0o755) - if err != nil { - return err - } - - files, err := os.ReadDir(src) - if err != nil { - return err - } - - for _, f := range files { - if f.IsDir() { - if err = CopyDir(filepath.Join(src, f.Name()), filepath.Join(dest, f.Name())); err != nil { - return err - } - } else { - if err = Copy(filepath.Join(src, f.Name()), filepath.Join(dest, f.Name())); err != nil { - return err - } - } - } - - return nil + return os.CopyFS(dest, os.DirFS(src)) } From c17d42278f1c1683b7e345ac56f2ec6c0d5b3276 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Mon, 14 Apr 2025 16:21:32 +0200 Subject: [PATCH 486/581] replace go-acc, richgo with gotestsum (#3567) --- .github/workflows/bats-sqlite-coverage.yml | 11 +--- .github/workflows/go-tests-windows.yml | 9 +-- .github/workflows/go-tests.yml | 22 ++++---- .github/workflows/publish-tarball-release.yml | 2 +- .gitignore | 2 +- Makefile | 55 ++++++++++++++----- go.mod | 2 +- go.sum | 4 +- .../modules/cloudwatch/cloudwatch_test.go | 20 +++---- .../modules/journalctl/journalctl_test.go | 21 ++----- pkg/acquisition/modules/kafka/kafka_test.go | 15 ++--- .../modules/kinesis/kinesis_test.go | 26 ++++----- pkg/acquisition/modules/loki/loki_test.go | 22 +++----- .../modules/victorialogs/victorialogs_test.go | 22 +++----- pkg/csplugin/watcher_test.go | 13 ++--- pkg/setup/detect_test.go | 13 +---- test/README.md | 10 +--- test/bats.mk | 6 +- test/dyn-bats/README.md | 2 - 19 files changed, 118 insertions(+), 159 deletions(-) delete mode 100644 test/dyn-bats/README.md diff --git a/.github/workflows/bats-sqlite-coverage.yml b/.github/workflows/bats-sqlite-coverage.yml index a5b2758b6b0..d93c5f0eca1 100644 --- a/.github/workflows/bats-sqlite-coverage.yml +++ b/.github/workflows/bats-sqlite-coverage.yml @@ -52,16 +52,7 @@ jobs: - name: "Collect coverage data" run: | - go tool covdata textfmt -i test/coverage -o coverage-bats-raw.out - # filter out unwanted packages, should match the argument to "go-acc --ignore" - grep -v \ - -e '/pkg/database' \ - -e '/plugins/notifications' \ - -e '/pkg/protobufs' \ - -e '/pkg/cwversions' \ - -e '/pkg/models' \ - < coverage-bats-raw.out \ - > coverage-bats.out + go tool covdata textfmt -i test/coverage -o coverage-bats.out # # In case you need to inspect the database status after the failure of a given test diff --git a/.github/workflows/go-tests-windows.yml b/.github/workflows/go-tests-windows.yml index b7c79496361..e08bbecd65f 100644 --- a/.github/workflows/go-tests-windows.yml +++ b/.github/workflows/go-tests-windows.yml @@ -15,7 +15,6 @@ on: - 'README.md' env: - RICHGO_FORCE_COLOR: 1 CROWDSEC_FEATURE_DISABLE_HTTP_RETRY_BACKOFF: true jobs: @@ -44,12 +43,10 @@ jobs: run: | .github/generate-codecov-yml.sh >> .github/codecov.yml - - name: Run tests + - name: Unit tests run: | - go install github.com/kyoh86/richgo@v0.3.10 - go test -tags expr_debug -coverprofile coverage.out -covermode=atomic ./... > out.txt - if(!$?) { cat out.txt | sed 's/ *coverage:.*of statements in.*//' | richgo testfilter; Exit 1 } - cat out.txt | sed 's/ *coverage:.*of statements in.*//' | richgo testfilter + go install gotest.tools/gotestsum@v1.12.1 + make testcover - name: Upload unit coverage to Codecov uses: codecov/codecov-action@v4 diff --git a/.github/workflows/go-tests.yml b/.github/workflows/go-tests.yml index 0c5b0fec1a1..abe923d054b 100644 --- a/.github/workflows/go-tests.yml +++ b/.github/workflows/go-tests.yml @@ -21,7 +21,6 @@ on: # these env variables are for localstack, so we can emulate aws services env: - RICHGO_FORCE_COLOR: 1 AWS_HOST: localstack # these are to mimic aws config AWS_ACCESS_KEY_ID: test @@ -170,24 +169,25 @@ jobs: run: | .github/generate-codecov-yml.sh >> .github/codecov.yml - - name: Build and run tests, static + - name: Ensure we can do a dynamic build run: | sudo apt -qq -y -o=Dpkg::Use-Pty=0 install build-essential libre2-dev - go install github.com/ory/go-acc@v0.2.8 - go install github.com/kyoh86/richgo@v0.3.10 - set -o pipefail - make build BUILD_STATIC=1 - make go-acc | sed 's/ *coverage:.*of statements in.*//' | richgo testfilter + make build + + - name: Ensure we can do a static build + run: | + make clean build BUILD_STATIC=1 + + - name: Unit tests + run: | + go install gotest.tools/gotestsum@v1.12.1 + make testcover # check if some component stubs are missing - name: "Build profile: minimal" run: | make build BUILD_PROFILE=minimal - - name: Ensure we can do a dynamic build, without tests - run: | - make clean build - - name: Upload unit coverage to Codecov uses: codecov/codecov-action@v4 with: diff --git a/.github/workflows/publish-tarball-release.yml b/.github/workflows/publish-tarball-release.yml index 7a2ac84076e..189aeca4256 100644 --- a/.github/workflows/publish-tarball-release.yml +++ b/.github/workflows/publish-tarball-release.yml @@ -6,7 +6,7 @@ on: - prereleased permissions: - # Use write for: hub release edit + # Use write for: gh release upload contents: write jobs: diff --git a/.gitignore b/.gitignore index cba570fdb84..e1068145e01 100644 --- a/.gitignore +++ b/.gitignore @@ -31,7 +31,7 @@ test/bats/.bats/run-logs # Test binaries, built from *_test.go pkg/csplugin/tests/cs_plugin_test* -# Output of go-acc, go -cover +# Output of test coverage *.out test/coverage/* diff --git a/Makefile b/Makefile index 93387488001..60014f393cd 100644 --- a/Makefile +++ b/Makefile @@ -260,7 +260,7 @@ clean-rpm: @$(RM) -r rpm/SRPMS .PHONY: clean -clean: clean-debian clean-rpm testclean ## Remove build artifacts +clean: clean-debian clean-rpm bats-clean ## Remove build artifacts @$(MAKE) -C $(CROWDSEC_FOLDER) clean $(MAKE_FLAGS) @$(MAKE) -C $(CSCLI_FOLDER) clean $(MAKE_FLAGS) @$(RM) $(CROWDSEC_BIN) $(WIN_IGNORE_ERR) @@ -279,28 +279,55 @@ cscli: ## Build cscli crowdsec: ## Build crowdsec @$(MAKE) -C $(CROWDSEC_FOLDER) build $(MAKE_FLAGS) -.PHONY: testclean -testclean: bats-clean ## Remove test artifacts - @$(RM) pkg/apiserver/ent $(WIN_IGNORE_ERR) - @$(RM) pkg/cwhub/hubdir $(WIN_IGNORE_ERR) - @$(RM) pkg/cwhub/install $(WIN_IGNORE_ERR) - @$(RM) pkg/types/example.txt $(WIN_IGNORE_ERR) - # for the tests with localstack export AWS_ENDPOINT_FORCE=http://localhost:4566 export AWS_ACCESS_KEY_ID=test export AWS_SECRET_ACCESS_KEY=test testenv: - @echo 'NOTE: You need to run "make localstack" in a separate shell, "make localstack-stop" to terminate it' +ifeq ($(TEST_LOCAL_ONLY),) + @echo 'NOTE: You need to run "make localstack" in a separate shell, "make localstack-stop" to terminate it; or define the envvar TEST_LOCAL_ONLY to some value.' +else + @echo 'TEST_LOCAL_ONLY: skipping tests that require mock containers (localstack, kafka...)' +endif + +.PHONY: check_gotestsum +check_gotestsum: +ifeq ($(OS),Windows_NT) + @where gotestsum >nul || (echo "Error: gotestsum is not installed. Install it with 'go install gotest.tools/gotestsum@latest'" && exit 1) +else + @command -v gotestsum > /dev/null 2>&1 || (echo "Error: gotestsum is not installed. Install it with 'go install gotest.tools/gotestsum@latest'" && exit 1) +endif + +# Default format +GOTESTSUM_FORMAT := pkgname + +# If running in GitHub Actions, change format +ifdef GITHUB_ACTIONS + GOTESTSUM_FORMAT := github-actions +endif .PHONY: test -test: testenv ## Run unit tests with localstack - $(GOTEST) --tags=$(GO_TAGS) $(LD_OPTS) ./... +test: check_gotestsum testenv ## Run unit tests +# The quotes in the next command are required for PowerShell + gotestsum --format $(GOTESTSUM_FORMAT) --format-hide-empty-pkg -- "-tags=$(GO_TAGS)" ./... + +.PHONY: testcover +testcover: check_gotestsum testenv ## Run unit tests with coverage report +# The quotes in the next command are required for PowerShell + gotestsum --format $(GOTESTSUM_FORMAT) --format-hide-empty-pkg -- -covermode=atomic "-coverprofile=coverage.out" -coverpkg=./... "-tags=$(GO_TAGS)" ./... + +.PHONY: check_golangci-lint +check_golangci-lint: +ifeq ($(OS),Windows_NT) + @where golangci-lint >nul || (echo "Error: golangci-lint is not installed. Install it from https://github.com/golangci/golangci-lint" && exit 1) +else + @command -v galangci-lint > /dev/null 2>&1 || (echo "Error: golangci-lint is not installed. Install it from https://github.com/golangci/golangci-lint" && exit 1) +endif -.PHONY: go-acc -go-acc: testenv ## Run unit tests with localstack + coverage - go-acc ./... -o coverage.out --ignore database,notifications,protobufs,cwversion,cstest,models --tags $(GO_TAGS) -- $(LD_OPTS) +.PHONY: lint +lint: check_golangci-lint ## Run go linters + @golangci-lint run check_docker: @if ! docker info > /dev/null 2>&1; then \ diff --git a/go.mod b/go.mod index cd45b4da5fa..28540d62e48 100644 --- a/go.mod +++ b/go.mod @@ -24,7 +24,7 @@ require ( github.com/coreos/go-systemd/v22 v22.5.0 // indirect github.com/creack/pty v1.1.21 // indirect github.com/crowdsecurity/dlog v0.0.0-20170105205344-4fb5f8204f26 - github.com/crowdsecurity/go-cs-lib v0.0.16 + github.com/crowdsecurity/go-cs-lib v0.0.17 github.com/crowdsecurity/grokky v0.2.2 github.com/crowdsecurity/machineid v1.0.2 github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc diff --git a/go.sum b/go.sum index c3ff8dcf952..ca1093431d9 100644 --- a/go.sum +++ b/go.sum @@ -111,8 +111,8 @@ github.com/crowdsecurity/coraza/v3 v3.0.0-20250320231801-749b8bded21a h1:2Nyr+47 github.com/crowdsecurity/coraza/v3 v3.0.0-20250320231801-749b8bded21a/go.mod h1:xSaXWOhFMSbrV8qOOfBKAyw3aOqfwaSaOy5BgSF8XlA= github.com/crowdsecurity/dlog v0.0.0-20170105205344-4fb5f8204f26 h1:r97WNVC30Uen+7WnLs4xDScS/Ex988+id2k6mDf8psU= github.com/crowdsecurity/dlog v0.0.0-20170105205344-4fb5f8204f26/go.mod h1:zpv7r+7KXwgVUZnUNjyP22zc/D7LKjyoY02weH2RBbk= -github.com/crowdsecurity/go-cs-lib v0.0.16 h1:2/htodjwc/sfsv4deX8F/2Fzg1bOI8w3O1/BPSvvsB0= -github.com/crowdsecurity/go-cs-lib v0.0.16/go.mod h1:XwGcvTt4lMq4Tm1IRMSKMDf0CVrnytTU8Uoofa7AR+g= +github.com/crowdsecurity/go-cs-lib v0.0.17 h1:VM++7EDa34kVCXsCRwOjaua3XHru8FVfKUAbqEoQPas= +github.com/crowdsecurity/go-cs-lib v0.0.17/go.mod h1:XwGcvTt4lMq4Tm1IRMSKMDf0CVrnytTU8Uoofa7AR+g= github.com/crowdsecurity/grokky v0.2.2 h1:yALsI9zqpDArYzmSSxfBq2dhYuGUTKMJq8KOEIAsuo4= github.com/crowdsecurity/grokky v0.2.2/go.mod h1:33usDIYzGDsgX1kHAThCbseso6JuWNJXOzRQDGXHtWM= github.com/crowdsecurity/machineid v1.0.2 h1:wpkpsUghJF8Khtmn/tg6GxgdhLA1Xflerh5lirI+bdc= diff --git a/pkg/acquisition/modules/cloudwatch/cloudwatch_test.go b/pkg/acquisition/modules/cloudwatch/cloudwatch_test.go index 2bc4f5d4a14..49d30663162 100644 --- a/pkg/acquisition/modules/cloudwatch/cloudwatch_test.go +++ b/pkg/acquisition/modules/cloudwatch/cloudwatch_test.go @@ -64,6 +64,10 @@ func TestMain(m *testing.M) { os.Exit(0) } + if os.Getenv("TEST_LOCAL_ONLY") != "" { + os.Exit(0) + } + if err := checkForLocalStackAvailability(); err != nil { log.Fatalf("local stack error : %s", err) } @@ -80,9 +84,7 @@ func TestMain(m *testing.M) { func TestWatchLogGroupForStreams(t *testing.T) { ctx := t.Context() - if runtime.GOOS == "windows" { - t.Skip("Skipping test on windows") - } + cstest.SkipOnWindows(t) log.SetLevel(log.DebugLevel) @@ -533,9 +535,7 @@ stream_name: test_stream`), func TestConfiguration(t *testing.T) { ctx := t.Context() - if runtime.GOOS == "windows" { - t.Skip("Skipping test on windows") - } + cstest.SkipOnWindows(t) log.SetLevel(log.DebugLevel) @@ -611,9 +611,7 @@ stream_name: test_stream`), } func TestConfigureByDSN(t *testing.T) { - if runtime.GOOS == "windows" { - t.Skip("Skipping test on windows") - } + cstest.SkipOnWindows(t) log.SetLevel(log.DebugLevel) @@ -660,9 +658,7 @@ func TestConfigureByDSN(t *testing.T) { func TestOneShotAcquisition(t *testing.T) { ctx := t.Context() - if runtime.GOOS == "windows" { - t.Skip("Skipping test on windows") - } + cstest.SkipOnWindows(t) log.SetLevel(log.DebugLevel) diff --git a/pkg/acquisition/modules/journalctl/journalctl_test.go b/pkg/acquisition/modules/journalctl/journalctl_test.go index 843230e9286..424612d8bfc 100644 --- a/pkg/acquisition/modules/journalctl/journalctl_test.go +++ b/pkg/acquisition/modules/journalctl/journalctl_test.go @@ -4,7 +4,6 @@ import ( "os" "os/exec" "path/filepath" - "runtime" "testing" "time" @@ -21,9 +20,7 @@ import ( ) func TestBadConfiguration(t *testing.T) { - if runtime.GOOS == "windows" { - t.Skip("Skipping test on windows") - } + cstest.SkipOnWindows(t) tests := []struct { config string @@ -59,9 +56,7 @@ journalctl_filter: } func TestConfigureDSN(t *testing.T) { - if runtime.GOOS == "windows" { - t.Skip("Skipping test on windows") - } + cstest.SkipOnWindows(t) tests := []struct { dsn string @@ -107,11 +102,9 @@ func TestConfigureDSN(t *testing.T) { } func TestOneShot(t *testing.T) { - ctx := t.Context() + cstest.SkipOnWindows(t) - if runtime.GOOS == "windows" { - t.Skip("Skipping test on windows") - } + ctx := t.Context() tests := []struct { config string @@ -190,11 +183,9 @@ journalctl_filter: } func TestStreaming(t *testing.T) { - ctx := t.Context() + cstest.SkipOnWindows(t) - if runtime.GOOS == "windows" { - t.Skip("Skipping test on windows") - } + ctx := t.Context() tests := []struct { config string diff --git a/pkg/acquisition/modules/kafka/kafka_test.go b/pkg/acquisition/modules/kafka/kafka_test.go index 1d07159d0e7..186cd19bc10 100644 --- a/pkg/acquisition/modules/kafka/kafka_test.go +++ b/pkg/acquisition/modules/kafka/kafka_test.go @@ -3,7 +3,6 @@ package kafkaacquisition import ( "context" "net" - "runtime" "strconv" "testing" "time" @@ -128,11 +127,10 @@ func createTopic(topic string, broker string) { } func TestStreamingAcquisition(t *testing.T) { - ctx := t.Context() + cstest.SkipOnWindows(t) + cstest.SkipIfDefined(t, "TEST_LOCAL_ONLY") - if runtime.GOOS == "windows" { - t.Skip("Skipping test on windows") - } + ctx := t.Context() tests := []struct { name string @@ -202,11 +200,10 @@ topic: crowdsecplaintext`), subLogger, configuration.METRICS_NONE) } func TestStreamingAcquisitionWithSSL(t *testing.T) { - ctx := t.Context() + cstest.SkipIfDefined(t, "TEST_LOCAL_ONLY") + cstest.SkipOnWindows(t) - if runtime.GOOS == "windows" { - t.Skip("Skipping test on windows") - } + ctx := t.Context() tests := []struct { name string diff --git a/pkg/acquisition/modules/kinesis/kinesis_test.go b/pkg/acquisition/modules/kinesis/kinesis_test.go index 4eb3563fdc6..7b0a266748a 100644 --- a/pkg/acquisition/modules/kinesis/kinesis_test.go +++ b/pkg/acquisition/modules/kinesis/kinesis_test.go @@ -7,7 +7,6 @@ import ( "fmt" "net" "os" - "runtime" "strconv" "strings" "testing" @@ -112,9 +111,7 @@ func TestMain(m *testing.M) { } func TestBadConfiguration(t *testing.T) { - if runtime.GOOS == "windows" { - t.Skip("Skipping test on windows") - } + cstest.SkipOnWindows(t) tests := []struct { config string @@ -156,11 +153,10 @@ stream_arn: arn:aws:kinesis:eu-west-1:123456789012:stream/my-stream`, } func TestReadFromStream(t *testing.T) { - ctx := t.Context() + cstest.SkipOnWindows(t) + cstest.SkipIfDefined(t, "TEST_LOCAL_ONLY") - if runtime.GOOS == "windows" { - t.Skip("Skipping test on windows") - } + ctx := t.Context() tests := []struct { config string @@ -204,11 +200,10 @@ stream_name: stream-1-shard`, } func TestReadFromMultipleShards(t *testing.T) { - ctx := t.Context() + cstest.SkipOnWindows(t) + cstest.SkipIfDefined(t, "TEST_LOCAL_ONLY") - if runtime.GOOS == "windows" { - t.Skip("Skipping test on windows") - } + ctx := t.Context() tests := []struct { config string @@ -255,11 +250,10 @@ stream_name: stream-2-shards`, } func TestFromSubscription(t *testing.T) { - ctx := t.Context() + cstest.SkipOnWindows(t) + cstest.SkipIfDefined(t, "TEST_LOCAL_ONLY") - if runtime.GOOS == "windows" { - t.Skip("Skipping test on windows") - } + ctx := t.Context() tests := []struct { config string diff --git a/pkg/acquisition/modules/loki/loki_test.go b/pkg/acquisition/modules/loki/loki_test.go index 2a3789b4364..5bfd6ff8981 100644 --- a/pkg/acquisition/modules/loki/loki_test.go +++ b/pkg/acquisition/modules/loki/loki_test.go @@ -9,7 +9,6 @@ import ( "net/http" "net/url" "os" - "runtime" "strings" "testing" "time" @@ -332,11 +331,10 @@ func feedLoki(ctx context.Context, logger *log.Entry, n int, title string) error } func TestOneShotAcquisition(t *testing.T) { - ctx := t.Context() + cstest.SkipIfDefined(t, "TEST_LOCAL_ONLY") + cstest.SkipOnWindows(t) - if runtime.GOOS == "windows" { - t.Skip("Skipping test on windows") - } + ctx := t.Context() log.SetOutput(os.Stdout) log.SetLevel(log.InfoLevel) @@ -394,11 +392,10 @@ since: 1h } func TestStreamingAcquisition(t *testing.T) { - ctx := t.Context() + cstest.SkipOnWindows(t) + cstest.SkipIfDefined(t, "TEST_LOCAL_ONLY") - if runtime.GOOS == "windows" { - t.Skip("Skipping test on windows") - } + ctx := t.Context() log.SetOutput(os.Stdout) log.SetLevel(log.InfoLevel) @@ -510,11 +507,10 @@ query: > } func TestStopStreaming(t *testing.T) { - ctx := t.Context() + cstest.SkipOnWindows(t) + cstest.SkipIfDefined(t, "TEST_LOCAL_ONLY") - if runtime.GOOS == "windows" { - t.Skip("Skipping test on windows") - } + ctx := t.Context() config := ` mode: tail diff --git a/pkg/acquisition/modules/victorialogs/victorialogs_test.go b/pkg/acquisition/modules/victorialogs/victorialogs_test.go index 972523cd5c4..e8e43cdba3c 100644 --- a/pkg/acquisition/modules/victorialogs/victorialogs_test.go +++ b/pkg/acquisition/modules/victorialogs/victorialogs_test.go @@ -9,7 +9,6 @@ import ( "net/http" "net/url" "os" - "runtime" "strconv" "strings" "testing" @@ -254,11 +253,10 @@ func feedVLogs(ctx context.Context, logger *log.Entry, n int, title string) erro } func TestOneShotAcquisition(t *testing.T) { - ctx := t.Context() + cstest.SkipOnWindows(t) + cstest.SkipIfDefined(t, "TEST_LOCAL_ONLY") - if runtime.GOOS == "windows" { - t.Skip("Skipping test on windows") - } + ctx := t.Context() log.SetOutput(os.Stdout) log.SetLevel(log.InfoLevel) @@ -319,11 +317,10 @@ since: 1h } func TestStreamingAcquisition(t *testing.T) { - ctx := t.Context() + cstest.SkipOnWindows(t) + cstest.SkipIfDefined(t, "TEST_LOCAL_ONLY") - if runtime.GOOS == "windows" { - t.Skip("Skipping test on windows") - } + ctx := t.Context() log.SetOutput(os.Stdout) log.SetLevel(log.InfoLevel) @@ -431,11 +428,10 @@ query: > } func TestStopStreaming(t *testing.T) { - ctx := t.Context() + cstest.SkipOnWindows(t) + cstest.SkipIfDefined(t, "TEST_LOCAL_ONLY") - if runtime.GOOS == "windows" { - t.Skip("Skipping test on windows") - } + ctx := t.Context() config := ` mode: tail diff --git a/pkg/csplugin/watcher_test.go b/pkg/csplugin/watcher_test.go index 2414612ab41..7321852a34a 100644 --- a/pkg/csplugin/watcher_test.go +++ b/pkg/csplugin/watcher_test.go @@ -3,7 +3,6 @@ package csplugin import ( "context" "log" - "runtime" "testing" "time" @@ -49,11 +48,9 @@ func listenChannelWithTimeout(ctx context.Context, channel chan string) error { } func TestPluginWatcherInterval(t *testing.T) { - ctx := t.Context() + cstest.SkipOnWindowsBecause(t, "timing is not reliable") - if runtime.GOOS == "windows" { - t.Skip("Skipping test on windows because timing is not reliable") - } + ctx := t.Context() pw := PluginWatcher{} alertsByPluginName := make(map[string][]*models.Alert) @@ -85,11 +82,9 @@ func TestPluginWatcherInterval(t *testing.T) { } func TestPluginAlertCountWatcher(t *testing.T) { - ctx := t.Context() + cstest.SkipOnWindowsBecause(t, "timing is not reliable") - if runtime.GOOS == "windows" { - t.Skip("Skipping test on windows because timing is not reliable") - } + ctx := t.Context() pw := PluginWatcher{} alertsByPluginName := make(map[string][]*models.Alert) diff --git a/pkg/setup/detect_test.go b/pkg/setup/detect_test.go index 00b6e356bd7..5fa652a6eae 100644 --- a/pkg/setup/detect_test.go +++ b/pkg/setup/detect_test.go @@ -392,9 +392,7 @@ func TestDetectSimpleRule(t *testing.T) { } func TestDetectUnitError(t *testing.T) { - if runtime.GOOS == "windows" { - t.Skip("skipping on windows") - } + cstest.SkipOnWindows(t) require := require.New(t) setup.ExecCommand = fakeExecCommandNotFound @@ -563,10 +561,7 @@ func TestDetectForcedUnit(t *testing.T) { } func TestDetectForcedProcess(t *testing.T) { - if runtime.GOOS == "windows" { - // while looking for service wizard: rule 'ProcessRunning("foobar")': while looking up running processes: could not get Name: A device attached to the system is not functioning. - t.Skip("skipping on windows") - } + cstest.SkipOnWindows(t) require := require.New(t) setup.ExecCommand = fakeExecCommand @@ -593,9 +588,7 @@ func TestDetectForcedProcess(t *testing.T) { } func TestDetectSkipService(t *testing.T) { - if runtime.GOOS == "windows" { - t.Skip("skipping on windows") - } + cstest.SkipOnWindows(t) require := require.New(t) setup.ExecCommand = fakeExecCommand diff --git a/test/README.md b/test/README.md index f7b036e7905..b1489f1b7eb 100644 --- a/test/README.md +++ b/test/README.md @@ -356,15 +356,7 @@ If you are not using Docker, you may need to adjust the `PGHOST`/`PGPORT`/`PGPAS An additional user and database both named `crowdsec_test` will be created. -Now you can build and run the tests (we skip bats-test-hub here, they really -should not be affected by a change in DB). - -``` -$ export DB_BACKEND=postgres -$ make clean bats-build bats-fixture bats-test -``` - -or with the pgx driver: +Now you can build and run the tests: ``` $ export DB_BACKEND=pgx diff --git a/test/bats.mk b/test/bats.mk index 7d05d245095..e25156ed929 100644 --- a/test/bats.mk +++ b/test/bats.mk @@ -55,7 +55,7 @@ export GOCOVERDIR="$(TEST_DIR)/coverage" export PATH="$(TEST_DIR)/tools:$(PATH)" endef -bats-all: bats-clean bats-build bats-fixture bats-test bats-test-hub +bats-all: bats-clean bats-build bats-fixture bats-test # Source this to run the scripts outside of the Makefile # Old versions of make don't have $(file) directive @@ -102,9 +102,6 @@ bats-clean: ## Remove functional test environment bats-test: bats-environment ## Run functional tests $(TEST_DIR)/run-tests $(TEST_DIR)/bats -bats-test-hub: bats-environment bats-check-requirements ## Run all hub tests - $(TEST_DIR)/run-tests $(TEST_DIR)/dyn-bats - # Not failproof but they can catch bugs and improve learning of sh/bash bats-lint: ## Static checks for the test scripts. @shellcheck --version >/dev/null 2>&1 || (echo "ERROR: shellcheck is required."; exit 1) @@ -113,4 +110,3 @@ bats-lint: ## Static checks for the test scripts. bats-test-package: bats-environment ## CI only - test a binary package (deb, rpm, ...) $(TEST_DIR)/instance-data make $(TEST_DIR)/run-tests $(TEST_DIR)/bats - $(TEST_DIR)/run-tests $(TEST_DIR)/dyn-bats diff --git a/test/dyn-bats/README.md b/test/dyn-bats/README.md deleted file mode 100644 index 1e4dec1d6fa..00000000000 --- a/test/dyn-bats/README.md +++ /dev/null @@ -1,2 +0,0 @@ -This directory is for dynamically generated tests. Do not commit them. -Any `*.bats` file here will be removed by the Makefile. From 620bd0117a923fc39180b37c2c02351c654c95ca Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Wed, 16 Apr 2025 11:53:52 +0200 Subject: [PATCH 487/581] Refact pkg/database/decisions.go (#3541) --- pkg/database/alertfilter.go | 16 +- pkg/database/alerts.go | 2 +- pkg/database/decisions.go | 359 ++++++++++++------------------------ 3 files changed, 132 insertions(+), 245 deletions(-) diff --git a/pkg/database/alertfilter.go b/pkg/database/alertfilter.go index 9e8cf53a450..e3f4e24cb4f 100644 --- a/pkg/database/alertfilter.go +++ b/pkg/database/alertfilter.go @@ -62,7 +62,7 @@ func handleTimeFilters(param, value string, predicates *[]predicate.Alert) error return nil } -func handleIPv4Predicates(ip_sz int, contains bool, start_ip, start_sfx, end_ip, end_sfx int64, predicates *[]predicate.Alert) { +func handleAlertIPv4Predicates(ip_sz int, contains bool, start_ip, start_sfx, end_ip, end_sfx int64, predicates *[]predicate.Alert) { if contains { // decision contains {start_ip,end_ip} *predicates = append(*predicates, alert.And( alert.HasDecisionsWith(decision.StartIPLTE(start_ip)), @@ -78,7 +78,7 @@ func handleIPv4Predicates(ip_sz int, contains bool, start_ip, start_sfx, end_ip, } } -func handleIPv6Predicates(ip_sz int, contains bool, start_ip, start_sfx, end_ip, end_sfx int64, predicates *[]predicate.Alert) { +func handleAlertIPv6Predicates(ip_sz int, contains bool, start_ip, start_sfx, end_ip, end_sfx int64, predicates *[]predicate.Alert) { if contains { // decision contains {start_ip,end_ip} *predicates = append(*predicates, alert.And( // matching addr size @@ -132,11 +132,11 @@ func handleIPv6Predicates(ip_sz int, contains bool, start_ip, start_sfx, end_ip, } } -func handleIPPredicates(ip_sz int, contains bool, start_ip, start_sfx, end_ip, end_sfx int64, predicates *[]predicate.Alert) error { +func handleAlertIPPredicates(ip_sz int, contains bool, start_ip, start_sfx, end_ip, end_sfx int64, predicates *[]predicate.Alert) error { if ip_sz == 4 { - handleIPv4Predicates(ip_sz, contains, start_ip, start_sfx, end_ip, end_sfx, predicates) + handleAlertIPv4Predicates(ip_sz, contains, start_ip, start_sfx, end_ip, end_sfx, predicates) } else if ip_sz == 16 { - handleIPv6Predicates(ip_sz, contains, start_ip, start_sfx, end_ip, end_sfx, predicates) + handleAlertIPv6Predicates(ip_sz, contains, start_ip, start_sfx, end_ip, end_sfx, predicates) } else if ip_sz != 0 { return errors.Wrapf(InvalidFilter, "Unknown ip size %d", ip_sz) } @@ -170,7 +170,7 @@ func handleIncludeCapiFilter(value string, predicates *[]predicate.Alert) error return nil } -func AlertPredicatesFromFilter(filter map[string][]string) ([]predicate.Alert, error) { +func alertPredicatesFromFilter(filter map[string][]string) ([]predicate.Alert, error) { predicates := make([]predicate.Alert, 0) var ( @@ -241,7 +241,7 @@ func AlertPredicatesFromFilter(filter map[string][]string) ([]predicate.Alert, e } } - if err := handleIPPredicates(ip_sz, contains, start_ip, start_sfx, end_ip, end_sfx, &predicates); err != nil { + if err := handleAlertIPPredicates(ip_sz, contains, start_ip, start_sfx, end_ip, end_sfx, &predicates); err != nil { return nil, err } @@ -249,7 +249,7 @@ func AlertPredicatesFromFilter(filter map[string][]string) ([]predicate.Alert, e } func BuildAlertRequestFromFilter(alerts *ent.AlertQuery, filter map[string][]string) (*ent.AlertQuery, error) { - preds, err := AlertPredicatesFromFilter(filter) + preds, err := alertPredicatesFromFilter(filter) if err != nil { return nil, err } diff --git a/pkg/database/alerts.go b/pkg/database/alerts.go index 107abcbb1d0..00121ac8c38 100644 --- a/pkg/database/alerts.go +++ b/pkg/database/alerts.go @@ -913,7 +913,7 @@ func (c *Client) DeleteAlertByID(ctx context.Context, id int) error { } func (c *Client) DeleteAlertWithFilter(ctx context.Context, filter map[string][]string) (int, error) { - preds, err := AlertPredicatesFromFilter(filter) + preds, err := alertPredicatesFromFilter(filter) if err != nil { return 0, err } diff --git a/pkg/database/decisions.go b/pkg/database/decisions.go index 7522a272799..049560a4883 100644 --- a/pkg/database/decisions.go +++ b/pkg/database/decisions.go @@ -28,9 +28,12 @@ type DecisionsByScenario struct { } func BuildDecisionRequestWithFilter(query *ent.DecisionQuery, filter map[string][]string) (*ent.DecisionQuery, error) { - var err error - var start_ip, start_sfx, end_ip, end_sfx int64 - var ip_sz int + var ( + err error + start_ip, start_sfx, end_ip, end_sfx int64 + ip_sz int + ) + contains := true /*if contains is true, return bans that *contains* the given value (value is the inner) else, return bans that are *contained* by the given value (value is the outer)*/ @@ -113,7 +116,7 @@ func BuildDecisionRequestWithFilter(query *ent.DecisionQuery, filter map[string] } } - query, err = applyStartIpEndIpFilter(query, contains, ip_sz, start_ip, start_sfx, end_ip, end_sfx) + query, err = decisionIPFilter(query, contains, ip_sz, start_ip, start_sfx, end_ip, end_sfx) if err != nil { return nil, fmt.Errorf("fail to apply StartIpEndIpFilter: %w", err) } @@ -197,8 +200,10 @@ func (c *Client) QueryDecisionCountByScenario(ctx context.Context) ([]*Decisions } func (c *Client) QueryDecisionWithFilter(ctx context.Context, filter map[string][]string) ([]*ent.Decision, error) { - var data []*ent.Decision - var err error + var ( + err error + data []*ent.Decision + ) decisions := c.Ent.Decision.Query(). Where(decision.UntilGTE(time.Now().UTC())) @@ -318,9 +323,12 @@ func (c *Client) QueryNewDecisionsSinceWithFilters(ctx context.Context, since *t } func (c *Client) DeleteDecisionsWithFilter(ctx context.Context, filter map[string][]string) (string, []*ent.Decision, error) { - var err error - var start_ip, start_sfx, end_ip, end_sfx int64 - var ip_sz int + var ( + err error + start_ip, start_sfx, end_ip, end_sfx int64 + ip_sz int + ) + contains := true /*if contains is true, return bans that *contains* the given value (value is the inner) else, return bans that are *contained* by the given value (value is the outer) */ @@ -352,72 +360,9 @@ func (c *Client) DeleteDecisionsWithFilter(ctx context.Context, filter map[strin } } - if ip_sz == 4 { - if contains { /*decision contains {start_ip,end_ip}*/ - decisions = decisions.Where(decision.And( - decision.StartIPLTE(start_ip), - decision.EndIPGTE(end_ip), - decision.IPSizeEQ(int64(ip_sz)), - )) - } else { /*decision is contained within {start_ip,end_ip}*/ - decisions = decisions.Where(decision.And( - decision.StartIPGTE(start_ip), - decision.EndIPLTE(end_ip), - decision.IPSizeEQ(int64(ip_sz)), - )) - } - } else if ip_sz == 16 { - if contains { /*decision contains {start_ip,end_ip}*/ - decisions = decisions.Where(decision.And( - // matching addr size - decision.IPSizeEQ(int64(ip_sz)), - decision.Or( - // decision.start_ip < query.start_ip - decision.StartIPLT(start_ip), - decision.And( - // decision.start_ip == query.start_ip - decision.StartIPEQ(start_ip), - // decision.start_suffix <= query.start_suffix - decision.StartSuffixLTE(start_sfx), - )), - decision.Or( - // decision.end_ip > query.end_ip - decision.EndIPGT(end_ip), - decision.And( - // decision.end_ip == query.end_ip - decision.EndIPEQ(end_ip), - // decision.end_suffix >= query.end_suffix - decision.EndSuffixGTE(end_sfx), - ), - ), - )) - } else { - decisions = decisions.Where(decision.And( - // matching addr size - decision.IPSizeEQ(int64(ip_sz)), - decision.Or( - // decision.start_ip > query.start_ip - decision.StartIPGT(start_ip), - decision.And( - // decision.start_ip == query.start_ip - decision.StartIPEQ(start_ip), - // decision.start_suffix >= query.start_suffix - decision.StartSuffixGTE(start_sfx), - )), - decision.Or( - // decision.end_ip < query.end_ip - decision.EndIPLT(end_ip), - decision.And( - // decision.end_ip == query.end_ip - decision.EndIPEQ(end_ip), - // decision.end_suffix <= query.end_suffix - decision.EndSuffixLTE(end_sfx), - ), - ), - )) - } - } else if ip_sz != 0 { - return "0", nil, errors.Wrapf(InvalidFilter, "Unknown ip size %d", ip_sz) + decisions, err = decisionIPFilter(decisions, contains, ip_sz, start_ip, start_sfx, end_ip, end_sfx) + if err != nil { + return "0", nil, err } toDelete, err := decisions.All(ctx) @@ -437,9 +382,12 @@ func (c *Client) DeleteDecisionsWithFilter(ctx context.Context, filter map[strin // ExpireDecisionsWithFilter updates the expiration time to now() for the decisions matching the filter, and returns the updated items func (c *Client) ExpireDecisionsWithFilter(ctx context.Context, filter map[string][]string) (string, []*ent.Decision, error) { - var err error - var start_ip, start_sfx, end_ip, end_sfx int64 - var ip_sz int + var ( + err error + start_ip, start_sfx, end_ip, end_sfx int64 + ip_sz int + ) + contains := true /*if contains is true, return bans that *contains* the given value (value is the inner) else, return bans that are *contained* by the given value (value is the outer)*/ @@ -473,76 +421,10 @@ func (c *Client) ExpireDecisionsWithFilter(ctx context.Context, filter map[strin return "0", nil, errors.Wrapf(InvalidFilter, "'%s' doesn't exist", param) } } - if ip_sz == 4 { - if contains { - /*Decision contains {start_ip,end_ip}*/ - decisions = decisions.Where(decision.And( - decision.StartIPLTE(start_ip), - decision.EndIPGTE(end_ip), - decision.IPSizeEQ(int64(ip_sz)), - )) - } else { - /*Decision is contained within {start_ip,end_ip}*/ - decisions = decisions.Where(decision.And( - decision.StartIPGTE(start_ip), - decision.EndIPLTE(end_ip), - decision.IPSizeEQ(int64(ip_sz)), - )) - } - } else if ip_sz == 16 { - /*decision contains {start_ip,end_ip}*/ - if contains { - decisions = decisions.Where(decision.And( - // matching addr size - decision.IPSizeEQ(int64(ip_sz)), - decision.Or( - // decision.start_ip < query.start_ip - decision.StartIPLT(start_ip), - decision.And( - // decision.start_ip == query.start_ip - decision.StartIPEQ(start_ip), - // decision.start_suffix <= query.start_suffix - decision.StartSuffixLTE(start_sfx), - )), - decision.Or( - // decision.end_ip > query.end_ip - decision.EndIPGT(end_ip), - decision.And( - // decision.end_ip == query.end_ip - decision.EndIPEQ(end_ip), - // decision.end_suffix >= query.end_suffix - decision.EndSuffixGTE(end_sfx), - ), - ), - )) - } else { - /*decision is contained within {start_ip,end_ip}*/ - decisions = decisions.Where(decision.And( - // matching addr size - decision.IPSizeEQ(int64(ip_sz)), - decision.Or( - // decision.start_ip > query.start_ip - decision.StartIPGT(start_ip), - decision.And( - // decision.start_ip == query.start_ip - decision.StartIPEQ(start_ip), - // decision.start_suffix >= query.start_suffix - decision.StartSuffixGTE(start_sfx), - )), - decision.Or( - // decision.end_ip < query.end_ip - decision.EndIPLT(end_ip), - decision.And( - // decision.end_ip == query.end_ip - decision.EndIPEQ(end_ip), - // decision.end_suffix <= query.end_suffix - decision.EndSuffixLTE(end_sfx), - ), - ), - )) - } - } else if ip_sz != 0 { - return "0", nil, errors.Wrapf(InvalidFilter, "Unknown ip size %d", ip_sz) + + decisions, err = decisionIPFilter(decisions, contains, ip_sz, start_ip, start_sfx, end_ip, end_sfx) + if err != nil { + return "0", nil, err } DecisionsToDelete, err := decisions.All(ctx) @@ -652,9 +534,11 @@ func (c *Client) ExpireDecisionByID(ctx context.Context, decisionID int) (int, [ } func (c *Client) CountDecisionsByValue(ctx context.Context, decisionValue string) (int, error) { - var err error - var start_ip, start_sfx, end_ip, end_sfx int64 - var ip_sz, count int + var ( + err error + start_ip, start_sfx, end_ip, end_sfx int64 + ip_sz, count int + ) ip_sz, start_ip, start_sfx, end_ip, end_sfx, err = types.Addr2Ints(decisionValue) if err != nil { @@ -664,7 +548,7 @@ func (c *Client) CountDecisionsByValue(ctx context.Context, decisionValue string contains := true decisions := c.Ent.Decision.Query() - decisions, err = applyStartIpEndIpFilter(decisions, contains, ip_sz, start_ip, start_sfx, end_ip, end_sfx) + decisions, err = decisionIPFilter(decisions, contains, ip_sz, start_ip, start_sfx, end_ip, end_sfx) if err != nil { return 0, errors.Wrapf(err, "fail to apply StartIpEndIpFilter") } @@ -678,9 +562,11 @@ func (c *Client) CountDecisionsByValue(ctx context.Context, decisionValue string } func (c *Client) CountActiveDecisionsByValue(ctx context.Context, decisionValue string) (int, error) { - var err error - var start_ip, start_sfx, end_ip, end_sfx int64 - var ip_sz, count int + var ( + err error + start_ip, start_sfx, end_ip, end_sfx int64 + ip_sz, count int + ) ip_sz, start_ip, start_sfx, end_ip, end_sfx, err = types.Addr2Ints(decisionValue) if err != nil { @@ -690,7 +576,7 @@ func (c *Client) CountActiveDecisionsByValue(ctx context.Context, decisionValue contains := true decisions := c.Ent.Decision.Query() - decisions, err = applyStartIpEndIpFilter(decisions, contains, ip_sz, start_ip, start_sfx, end_ip, end_sfx) + decisions, err = decisionIPFilter(decisions, contains, ip_sz, start_ip, start_sfx, end_ip, end_sfx) if err != nil { return 0, fmt.Errorf("fail to apply StartIpEndIpFilter: %w", err) } @@ -706,9 +592,11 @@ func (c *Client) CountActiveDecisionsByValue(ctx context.Context, decisionValue } func (c *Client) GetActiveDecisionsTimeLeftByValue(ctx context.Context, decisionValue string) (time.Duration, error) { - var err error - var start_ip, start_sfx, end_ip, end_sfx int64 - var ip_sz int + var ( + err error + start_ip, start_sfx, end_ip, end_sfx int64 + ip_sz int + ) ip_sz, start_ip, start_sfx, end_ip, end_sfx, err = types.Addr2Ints(decisionValue) if err != nil { @@ -720,7 +608,7 @@ func (c *Client) GetActiveDecisionsTimeLeftByValue(ctx context.Context, decision decision.UntilGT(time.Now().UTC()), ) - decisions, err = applyStartIpEndIpFilter(decisions, contains, ip_sz, start_ip, start_sfx, end_ip, end_sfx) + decisions, err = decisionIPFilter(decisions, contains, ip_sz, start_ip, start_sfx, end_ip, end_sfx) if err != nil { return 0, fmt.Errorf("fail to apply StartIpEndIpFilter: %w", err) } @@ -750,7 +638,7 @@ func (c *Client) CountDecisionsSinceByValue(ctx context.Context, decisionValue s decision.CreatedAtGT(since), ) - decisions, err = applyStartIpEndIpFilter(decisions, contains, ip_sz, start_ip, start_sfx, end_ip, end_sfx) + decisions, err = decisionIPFilter(decisions, contains, ip_sz, start_ip, start_sfx, end_ip, end_sfx) if err != nil { return 0, errors.Wrapf(err, "fail to apply StartIpEndIpFilter") } @@ -763,88 +651,87 @@ func (c *Client) CountDecisionsSinceByValue(ctx context.Context, decisionValue s return count, nil } -func applyStartIpEndIpFilter(decisions *ent.DecisionQuery, contains bool, ip_sz int, start_ip int64, start_sfx int64, end_ip int64, end_sfx int64) (*ent.DecisionQuery, error) { - if ip_sz == 4 { - if contains { - /*Decision contains {start_ip,end_ip}*/ - decisions = decisions.Where(decision.And( - decision.StartIPLTE(start_ip), - decision.EndIPGTE(end_ip), - decision.IPSizeEQ(int64(ip_sz)), - )) - } else { - /*Decision is contained within {start_ip,end_ip}*/ - decisions = decisions.Where(decision.And( - decision.StartIPGTE(start_ip), - decision.EndIPLTE(end_ip), - decision.IPSizeEQ(int64(ip_sz)), - )) - } - - return decisions, nil +func decisionIPv4Filter(decisions *ent.DecisionQuery, contains bool, ip_sz int, start_ip int64, start_sfx int64, end_ip int64, end_sfx int64) (*ent.DecisionQuery, error) { + if contains { + /*Decision contains {start_ip,end_ip}*/ + return decisions.Where(decision.And( + decision.StartIPLTE(start_ip), + decision.EndIPGTE(end_ip), + decision.IPSizeEQ(int64(ip_sz)))), nil } - if ip_sz == 16 { - /*decision contains {start_ip,end_ip}*/ - if contains { - decisions = decisions.Where(decision.And( - // matching addr size - decision.IPSizeEQ(int64(ip_sz)), - decision.Or( - // decision.start_ip < query.start_ip - decision.StartIPLT(start_ip), - decision.And( - // decision.start_ip == query.start_ip - decision.StartIPEQ(start_ip), - // decision.start_suffix <= query.start_suffix - decision.StartSuffixLTE(start_sfx), - )), - decision.Or( - // decision.end_ip > query.end_ip - decision.EndIPGT(end_ip), - decision.And( - // decision.end_ip == query.end_ip - decision.EndIPEQ(end_ip), - // decision.end_suffix >= query.end_suffix - decision.EndSuffixGTE(end_sfx), - ), - ), - )) - } else { - /*decision is contained within {start_ip,end_ip}*/ - decisions = decisions.Where(decision.And( - // matching addr size - decision.IPSizeEQ(int64(ip_sz)), - decision.Or( - // decision.start_ip > query.start_ip - decision.StartIPGT(start_ip), - decision.And( - // decision.start_ip == query.start_ip - decision.StartIPEQ(start_ip), - // decision.start_suffix >= query.start_suffix - decision.StartSuffixGTE(start_sfx), - )), - decision.Or( - // decision.end_ip < query.end_ip - decision.EndIPLT(end_ip), - decision.And( - // decision.end_ip == query.end_ip - decision.EndIPEQ(end_ip), - // decision.end_suffix <= query.end_suffix - decision.EndSuffixLTE(end_sfx), - ), + /*Decision is contained within {start_ip,end_ip}*/ + return decisions.Where(decision.And( + decision.StartIPGTE(start_ip), + decision.EndIPLTE(end_ip), + decision.IPSizeEQ(int64(ip_sz)))), nil +} + +func decisionIPv6Filter(decisions *ent.DecisionQuery, contains bool, ip_sz int, start_ip int64, start_sfx int64, end_ip int64, end_sfx int64) (*ent.DecisionQuery, error) { + /*decision contains {start_ip,end_ip}*/ + if contains { + return decisions.Where(decision.And( + // matching addr size + decision.IPSizeEQ(int64(ip_sz)), + decision.Or( + // decision.start_ip < query.start_ip + decision.StartIPLT(start_ip), + decision.And( + // decision.start_ip == query.start_ip + decision.StartIPEQ(start_ip), + // decision.start_suffix <= query.start_suffix + decision.StartSuffixLTE(start_sfx), + )), + decision.Or( + // decision.end_ip > query.end_ip + decision.EndIPGT(end_ip), + decision.And( + // decision.end_ip == query.end_ip + decision.EndIPEQ(end_ip), + // decision.end_suffix >= query.end_suffix + decision.EndSuffixGTE(end_sfx), ), - )) - } + ), + )), nil + } + + /*decision is contained within {start_ip,end_ip}*/ + return decisions.Where(decision.And( + // matching addr size + decision.IPSizeEQ(int64(ip_sz)), + decision.Or( + // decision.start_ip > query.start_ip + decision.StartIPGT(start_ip), + decision.And( + // decision.start_ip == query.start_ip + decision.StartIPEQ(start_ip), + // decision.start_suffix >= query.start_suffix + decision.StartSuffixGTE(start_sfx), + )), + decision.Or( + // decision.end_ip < query.end_ip + decision.EndIPLT(end_ip), + decision.And( + // decision.end_ip == query.end_ip + decision.EndIPEQ(end_ip), + // decision.end_suffix <= query.end_suffix + decision.EndSuffixLTE(end_sfx), + ), + ), + )), nil +} +func decisionIPFilter(decisions *ent.DecisionQuery, contains bool, ip_sz int, start_ip int64, start_sfx int64, end_ip int64, end_sfx int64) (*ent.DecisionQuery, error) { + switch ip_sz { + case 4: + return decisionIPv4Filter(decisions, contains, ip_sz, start_ip, start_sfx, end_ip, end_sfx) + case 16: + return decisionIPv6Filter(decisions, contains, ip_sz, start_ip, start_sfx, end_ip, end_sfx) + case 0: return decisions, nil - } - - if ip_sz != 0 { + default: return nil, errors.Wrapf(InvalidFilter, "unknown ip size %d", ip_sz) } - - return decisions, nil } func decisionPredicatesFromStr(s string, predicateFunc func(string) predicate.Decision) []predicate.Decision { From 922c29983ff5c9cd726fff440312812db3bac313 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Wed, 16 Apr 2025 12:37:50 +0200 Subject: [PATCH 488/581] test: add cold log event assert (#3577) --- test/bats/40_cold-logs.bats | 1 + 1 file changed, 1 insertion(+) diff --git a/test/bats/40_cold-logs.bats b/test/bats/40_cold-logs.bats index 4b03e35e58f..52605054242 100644 --- a/test/bats/40_cold-logs.bats +++ b/test/bats/40_cold-logs.bats @@ -43,6 +43,7 @@ setup() { assert_stderr --partial "single file mode : log_media=stdout daemonize=false" assert_stderr --regexp "Adding file .* to filelist" assert_stderr --regexp "reading .* at once" + assert_stderr --partial "Ip 1.1.1.172 performed 'crowdsecurity/ssh-bf' (6 events over 0s)" assert_stderr --regexp "Acquisition is finished, shutting down" assert_stderr --regexp "Killing parser routines" assert_stderr --regexp "Bucket routine exiting" From 7e280b23af3bee4505e8ba65da87d6491be3bd41 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Wed, 16 Apr 2025 13:15:12 +0200 Subject: [PATCH 489/581] fix: error check on postoverflow config (#3576) * fix error check on postoverflow config * lint --- pkg/parser/unix_parser.go | 49 +++++++++++++++++++++++++-------------- 1 file changed, 31 insertions(+), 18 deletions(-) diff --git a/pkg/parser/unix_parser.go b/pkg/parser/unix_parser.go index f0f26a06645..a05d5ee11c7 100644 --- a/pkg/parser/unix_parser.go +++ b/pkg/parser/unix_parser.go @@ -33,25 +33,31 @@ type Parsers struct { EnricherCtx EnricherCtx } -func Init(c map[string]interface{}) (*UnixParserCtx, error) { +func Init(c map[string]any) (*UnixParserCtx, error) { r := UnixParserCtx{} r.Grok = grokky.NewBase() r.Grok.UseRe2 = fflag.Re2GrokSupport.IsEnabled() + files, err := os.ReadDir(c["patterns"].(string)) if err != nil { return nil, err } + r.DataFolder = c["data"].(string) - for _, f := range files { - if strings.Contains(f.Name(), ".") || f.IsDir() { + + for _, file := range files { + if strings.Contains(file.Name(), ".") || file.IsDir() { continue } - if err := r.Grok.AddFromFile(filepath.Join(c["patterns"].(string), f.Name())); err != nil { - log.Errorf("failed to load pattern %s : %v", f.Name(), err) + + if err := r.Grok.AddFromFile(filepath.Join(c["patterns"].(string), file.Name())); err != nil { + log.Errorf("failed to load pattern %s: %v", file.Name(), err) return nil, err } } + log.Debugf("Loaded %d pattern files", len(files)) + return &r, nil } @@ -71,9 +77,11 @@ func NewParsers(hub *cwhub.Hub) *Parsers { Filename: hubParserItem.State.LocalPath, Stage: hubParserItem.Stage, } + if itemType == cwhub.PARSERS { parsers.StageFiles = append(parsers.StageFiles, stagefile) } + if itemType == cwhub.POSTOVERFLOWS { parsers.PovfwStageFiles = append(parsers.PovfwStageFiles, stagefile) } @@ -85,6 +93,7 @@ func NewParsers(hub *cwhub.Hub) *Parsers { return parsers.StageFiles[i].Filename < parsers.StageFiles[j].Filename }) } + if parsers.PovfwStageFiles != nil { sort.Slice(parsers.PovfwStageFiles, func(i, j int) bool { return parsers.PovfwStageFiles[i].Filename < parsers.PovfwStageFiles[j].Filename @@ -99,30 +108,32 @@ func LoadParsers(cConfig *csconfig.Config, parsers *Parsers) (*Parsers, error) { patternsDir := cConfig.ConfigPaths.PatternDir log.Infof("Loading grok library %s", patternsDir) + /* load base regexps for two grok parsers */ - parsers.Ctx, err = Init(map[string]interface{}{ + parsers.Ctx, err = Init(map[string]any{ "patterns": patternsDir, "data": cConfig.ConfigPaths.DataDir, }) if err != nil { - return parsers, fmt.Errorf("failed to load parser patterns : %v", err) + return parsers, fmt.Errorf("failed to load parser patterns: %w", err) } - parsers.Povfwctx, err = Init(map[string]interface{}{ + + parsers.Povfwctx, err = Init(map[string]any{ "patterns": patternsDir, "data": cConfig.ConfigPaths.DataDir, }) if err != nil { - return parsers, fmt.Errorf("failed to load postovflw parser patterns : %v", err) + return parsers, fmt.Errorf("failed to load postovflw parser patterns: %w", err) } /* Load enrichers */ - log.Infof("Loading enrich plugins") + log.Info("Loading enrich plugins") parsers.EnricherCtx, err = Loadplugin() if err != nil { - return parsers, fmt.Errorf("failed to load enrich plugin : %v", err) + return parsers, fmt.Errorf("failed to load enrich plugin: %w", err) } /* @@ -133,19 +144,20 @@ func LoadParsers(cConfig *csconfig.Config, parsers *Parsers) (*Parsers, error) { parsers.Nodes, err = LoadStages(parsers.StageFiles, parsers.Ctx, parsers.EnricherCtx) if err != nil { - return parsers, fmt.Errorf("failed to load parser config : %v", err) + return parsers, fmt.Errorf("failed to load parser config: %w", err) } if len(parsers.PovfwStageFiles) > 0 { - log.Infof("Loading postoverflow parsers") + log.Info("Loading postoverflow parsers") + parsers.Povfwnodes, err = LoadStages(parsers.PovfwStageFiles, parsers.Povfwctx, parsers.EnricherCtx) + if err != nil { + return parsers, fmt.Errorf("failed to load postoverflow config: %w", err) + } } else { - log.Infof("No postoverflow parsers to load") - parsers.Povfwnodes = []Node{} - } + log.Info("No postoverflow parsers to load") - if err != nil { - return parsers, fmt.Errorf("failed to load postoverflow config : %v", err) + parsers.Povfwnodes = []Node{} } if cConfig.Prometheus != nil && cConfig.Prometheus.Enabled { @@ -159,5 +171,6 @@ func LoadParsers(cConfig *csconfig.Config, parsers *Parsers) (*Parsers, error) { parsers.Povfwctx.Grok = grokky.Host{} parsers.StageFiles = []Stagefile{} parsers.PovfwStageFiles = []Stagefile{} + return parsers, nil } From 4004868245070ae45918ff06ce79928ca120f15c Mon Sep 17 00:00:00 2001 From: blotus Date: Wed, 16 Apr 2025 14:39:26 +0200 Subject: [PATCH 490/581] fix mysql client certificate support (#3575) --- pkg/csconfig/database.go | 55 +++++++++++++++++++++++++++++++++++----- pkg/database/database.go | 6 ++++- 2 files changed, 53 insertions(+), 8 deletions(-) diff --git a/pkg/csconfig/database.go b/pkg/csconfig/database.go index 29e8e4c3312..26150eb2ea4 100644 --- a/pkg/csconfig/database.go +++ b/pkg/csconfig/database.go @@ -1,12 +1,17 @@ package csconfig import ( + "crypto/tls" + "crypto/x509" "errors" "fmt" + "net/url" + "os" "path/filepath" "time" "entgo.io/ent/dialect" + "github.com/go-sql-driver/mysql" log "github.com/sirupsen/logrus" "github.com/crowdsecurity/go-cs-lib/ptr" @@ -119,7 +124,7 @@ func (c *Config) LoadDBConfig(inCli bool) error { return nil } -func (d *DatabaseCfg) ConnectionString() string { +func (d *DatabaseCfg) ConnectionString() (string, error) { connString := "" switch d.Type { @@ -133,23 +138,59 @@ func (d *DatabaseCfg) ConnectionString() string { connString = fmt.Sprintf("file:%s?%s", d.DbPath, sqliteConnectionStringParameters) case "mysql": + params := url.Values{} + params.Add("parseTime", "True") + + tlsConfig := &tls.Config{} + + // This is just to get an initial value, don't care about the error + systemRootCAs, _ := x509.SystemCertPool() + if systemRootCAs != nil { + tlsConfig.RootCAs = systemRootCAs + } + if d.isSocketConfig() { - connString = fmt.Sprintf("%s:%s@unix(%s)/%s?parseTime=True", d.User, d.Password, d.DbPath, d.DbName) + connString = fmt.Sprintf("%s:%s@unix(%s)/%s", d.User, d.Password, d.DbPath, d.DbName) } else { - connString = fmt.Sprintf("%s:%s@tcp(%s:%d)/%s?parseTime=True", d.User, d.Password, d.Host, d.Port, d.DbName) + connString = fmt.Sprintf("%s:%s@tcp(%s:%d)/%s", d.User, d.Password, d.Host, d.Port, d.DbName) } if d.SSLMode != "" { - connString = fmt.Sprintf("%s&tls=%s", connString, d.SSLMode) + // This will be overridden if a CA or client cert is provided + params.Set("tls", d.SSLMode) } if d.SSLCACert != "" { - connString = fmt.Sprintf("%s&tls-ca=%s", connString, d.SSLCACert) + caCert, err := os.ReadFile(d.SSLCACert) + if err != nil { + return "", fmt.Errorf("failed to read CA cert file %s: %w", d.SSLCACert, err) + } + if tlsConfig.RootCAs == nil { + tlsConfig.RootCAs = x509.NewCertPool() + } + if !tlsConfig.RootCAs.AppendCertsFromPEM(caCert) { + return "", fmt.Errorf("failed to append CA cert file %s: %w", d.SSLCACert, err) + } + params.Set("tls", "custom") } if d.SSLClientCert != "" && d.SSLClientKey != "" { - connString = fmt.Sprintf("%s&tls-cert=%s&tls-key=%s", connString, d.SSLClientCert, d.SSLClientKey) + cert, err := tls.LoadX509KeyPair(d.SSLClientCert, d.SSLClientKey) + if err != nil { + return "", fmt.Errorf("failed to load client cert/key pair: %w", err) + } + tlsConfig.Certificates = []tls.Certificate{cert} + params.Set("tls", "custom") + } + + if params.Get("tls") == "custom" { + // Register the custom TLS config + err := mysql.RegisterTLSConfig("custom", tlsConfig) + if err != nil { + return "", fmt.Errorf("failed to register custom TLS config: %w", err) + } } + connString = fmt.Sprintf("%s?%s", connString, params.Encode()) case "postgres", "postgresql", "pgx": if d.isSocketConfig() { connString = fmt.Sprintf("host=%s user=%s dbname=%s password=%s", d.DbPath, d.User, d.DbName, d.Password) @@ -170,7 +211,7 @@ func (d *DatabaseCfg) ConnectionString() string { } } - return connString + return connString, nil } func (d *DatabaseCfg) ConnectionDialect() (string, string, error) { diff --git a/pkg/database/database.go b/pkg/database/database.go index d5186a76d25..3d7a4e1b094 100644 --- a/pkg/database/database.go +++ b/pkg/database/database.go @@ -82,7 +82,11 @@ func NewClient(ctx context.Context, config *csconfig.DatabaseCfg) (*Client, erro } } - drv, err := getEntDriver(typ, dia, config.ConnectionString(), config) + dbConnectionString, err := config.ConnectionString() + if err != nil { + return nil, fmt.Errorf("failed to generate DB connection string: %w", err) + } + drv, err := getEntDriver(typ, dia, dbConnectionString, config) if err != nil { return nil, fmt.Errorf("failed opening connection to %s: %w", config.Type, err) } From a0fab0ac5a3adbb458b990edd41a40729c12e15d Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Thu, 17 Apr 2025 17:34:40 +0200 Subject: [PATCH 491/581] fix: avoid possible race condition while compiling expressions (#3582) --- pkg/exprhelpers/helpers.go | 107 +++++++++++++++++++++++++++---------- 1 file changed, 80 insertions(+), 27 deletions(-) diff --git a/pkg/exprhelpers/helpers.go b/pkg/exprhelpers/helpers.go index 3525bb6c762..1e5426fdb61 100644 --- a/pkg/exprhelpers/helpers.go +++ b/pkg/exprhelpers/helpers.go @@ -57,6 +57,13 @@ var dbClient *database.Client var exprFunctionOptions []expr.Option +func init() { //nolint:gochecknoinits + exprFunctionOptions = make([]expr.Option, len(exprFuncs)) + for i, fn := range exprFuncs { + exprFunctionOptions[i] = expr.Function(fn.name, fn.function, fn.signature...) + } +} + var keyValuePattern = regexp.MustCompile(`(?P[^=\s]+)=(?:"(?P[^"\\]*(?:\\.[^"\\]*)*)"|(?P[^=\s]+)|\s*)`) var ( @@ -65,23 +72,13 @@ var ( geoIPRangeReader *maxminddb.Reader ) -func GetExprOptions(ctx map[string]interface{}) []expr.Option { - if len(exprFunctionOptions) == 0 { - exprFunctionOptions = []expr.Option{} - for _, function := range exprFuncs { - exprFunctionOptions = append(exprFunctionOptions, - expr.Function(function.name, - function.function, - function.signature..., - )) - } - } - - ret := []expr.Option{} - ret = append(ret, exprFunctionOptions...) - ret = append(ret, expr.Env(ctx)) +func GetExprOptions(ctx map[string]any) []expr.Option { + // copy the pre‑built options + one Env(...) for this call + opts := make([]expr.Option, len(exprFunctionOptions)+1) + copy(opts, exprFunctionOptions) + opts[len(opts)-1] = expr.Env(ctx) - return ret + return opts } func GeoIPInit(datadir string) error { @@ -199,6 +196,7 @@ func FileInit(fileFolder string, filename string, fileType string) error { log.Debugf("ignored file %s%s because already loaded", fileFolder, filename) return nil } + if err != nil { return err } @@ -244,13 +242,13 @@ func Distinct(params ...any) (any, error) { return nil, nil } - array := params[0].([]interface{}) + array := params[0].([]any) if array == nil { - return []interface{}{}, nil + return []any{}, nil } exists := make(map[any]bool) - ret := make([]interface{}, 0) + ret := make([]any, 0) for _, val := range array { if _, ok := exists[val]; !ok { @@ -270,7 +268,7 @@ func Flatten(params ...any) (any, error) { return flatten(nil, reflect.ValueOf(params)), nil } -func flatten(args []interface{}, v reflect.Value) []interface{} { +func flatten(args []any, v reflect.Value) []any { if v.Kind() == reflect.Interface { v = v.Elem() } @@ -501,9 +499,11 @@ func RegexpInFile(params ...any) (any, error) { // func IpInRange(ip string, ipRange string) bool { func IpInRange(params ...any) (any, error) { - var err error - var ipParsed net.IP - var ipRangeParsed *net.IPNet + var ( + err error + ipParsed net.IP + ipRangeParsed *net.IPNet + ) ip := params[0].(string) ipRange := params[1].(string) @@ -513,13 +513,16 @@ func IpInRange(params ...any) (any, error) { log.Debugf("'%s' is not a valid IP", ip) return false, nil } + if _, ipRangeParsed, err = net.ParseCIDR(ipRange); err != nil { log.Debugf("'%s' is not a valid IP Range", ipRange) return false, nil //nolint:nilerr // This helper did not return an error before the move to expr.Function, we keep this behavior for backward compatibility } + if ipRangeParsed.Contains(ipParsed) { return true, nil } + return false, nil } @@ -527,6 +530,7 @@ func IpInRange(params ...any) (any, error) { func IsIPV6(params ...any) (any, error) { ip := params[0].(string) ipParsed := net.ParseIP(ip) + if ipParsed == nil { log.Debugf("'%s' is not a valid IP", ip) return false, nil @@ -540,10 +544,12 @@ func IsIPV6(params ...any) (any, error) { func IsIPV4(params ...any) (any, error) { ip := params[0].(string) ipParsed := net.ParseIP(ip) + if ipParsed == nil { log.Debugf("'%s' is not a valid IP", ip) return false, nil } + return ipParsed.To4() != nil, nil } @@ -551,10 +557,12 @@ func IsIPV4(params ...any) (any, error) { func IsIP(params ...any) (any, error) { ip := params[0].(string) ipParsed := net.ParseIP(ip) + if ipParsed == nil { log.Debugf("'%s' is not a valid IP", ip) return false, nil } + return true, nil } @@ -563,6 +571,7 @@ func IpToRange(params ...any) (any, error) { ip := params[0].(string) cidr := params[1].(string) cidr = strings.TrimPrefix(cidr, "/") + mask, err := strconv.Atoi(cidr) if err != nil { log.Errorf("bad cidr '%s': %s", cidr, err) @@ -574,11 +583,13 @@ func IpToRange(params ...any) (any, error) { log.Errorf("can't parse IP address '%s'", ip) return "", nil } + ipRange := iplib.NewNet(ipAddr, mask) if ipRange.IP() == nil { log.Errorf("can't get cidr '%s' of '%s'", cidr, ip) return "", nil } + return ipRange.String(), nil } @@ -591,37 +602,42 @@ func TimeNow(params ...any) (any, error) { func ParseUri(params ...any) (any, error) { uri := params[0].(string) ret := make(map[string][]string) + u, err := url.Parse(uri) if err != nil { log.Errorf("Could not parse URI: %s", err) return ret, nil } + parsed, err := url.ParseQuery(u.RawQuery) if err != nil { log.Errorf("Could not parse query uri : %s", err) return ret, nil } + for k, v := range parsed { ret[k] = v } + return ret, nil } // func KeyExists(key string, dict map[string]interface{}) bool { func KeyExists(params ...any) (any, error) { key := params[0].(string) - dict := params[1].(map[string]interface{}) + dict := params[1].(map[string]any) _, ok := dict[key] + return ok, nil } // func GetDecisionsCount(value string) int { func GetDecisionsCount(params ...any) (any, error) { value := params[0].(string) + if dbClient == nil { log.Error("No database config to call GetDecisionsCount()") return 0, nil - } ctx := context.TODO() @@ -631,6 +647,7 @@ func GetDecisionsCount(params ...any) (any, error) { log.Errorf("Failed to get decisions count from value '%s'", value) return 0, nil //nolint:nilerr // This helper did not return an error before the move to expr.Function, we keep this behavior for backward compatibility } + return count, nil } @@ -638,10 +655,12 @@ func GetDecisionsCount(params ...any) (any, error) { func GetDecisionsSinceCount(params ...any) (any, error) { value := params[0].(string) since := params[1].(string) + if dbClient == nil { log.Error("No database config to call GetDecisionsSinceCount()") return 0, nil } + sinceDuration, err := time.ParseDuration(since) if err != nil { log.Errorf("Failed to parse since parameter '%s' : %s", since, err) @@ -656,79 +675,95 @@ func GetDecisionsSinceCount(params ...any) (any, error) { log.Errorf("Failed to get decisions count from value '%s'", value) return 0, nil //nolint:nilerr // This helper did not return an error before the move to expr.Function, we keep this behavior for backward compatibility } + return count, nil } func GetActiveDecisionsCount(params ...any) (any, error) { value := params[0].(string) + if dbClient == nil { log.Error("No database config to call GetActiveDecisionsCount()") return 0, nil } + ctx := context.TODO() + count, err := dbClient.CountActiveDecisionsByValue(ctx, value) if err != nil { log.Errorf("Failed to get active decisions count from value '%s'", value) return 0, err } + return count, nil } func GetActiveDecisionsTimeLeft(params ...any) (any, error) { value := params[0].(string) + if dbClient == nil { log.Error("No database config to call GetActiveDecisionsTimeLeft()") return 0, nil } + ctx := context.TODO() + timeLeft, err := dbClient.GetActiveDecisionsTimeLeftByValue(ctx, value) if err != nil { log.Errorf("Failed to get active decisions time left from value '%s'", value) return 0, err } + return timeLeft, nil } // func LookupHost(value string) []string { func LookupHost(params ...any) (any, error) { value := params[0].(string) + addresses, err := net.LookupHost(value) if err != nil { log.Errorf("Failed to lookup host '%s' : %s", value, err) return []string{}, nil } + return addresses, nil } // func ParseUnixTime(value string) (time.Time, error) { func ParseUnixTime(params ...any) (any, error) { value := params[0].(string) - //Splitting string here as some unix timestamp may have milliseconds and break ParseInt + // Splitting string here as some unix timestamp may have milliseconds and break ParseInt i, err := strconv.ParseInt(strings.Split(value, ".")[0], 10, 64) if err != nil || i <= 0 { return time.Time{}, fmt.Errorf("unable to parse %s as unix timestamp", value) } + return time.Unix(i, 0), nil } // func ParseUnix(value string) string { func ParseUnix(params ...any) (any, error) { value := params[0].(string) + t, err := ParseUnixTime(value) if err != nil { log.Error(err) return "", nil } + return t.(time.Time).Format(time.RFC3339), nil } // func ToString(value interface{}) string { func ToString(params ...any) (any, error) { value := params[0] + s, ok := value.(string) if !ok { return "", nil } + return s, nil } @@ -736,6 +771,7 @@ func ToString(params ...any) (any, error) { func GetFromStash(params ...any) (any, error) { cacheName := params[0].(string) key := params[1].(string) + return cache.GetKey(cacheName, key) } @@ -745,6 +781,7 @@ func SetInStash(params ...any) (any, error) { key := params[1].(string) value := params[2].(string) expiration := params[3].(*time.Duration) + return cache.SetKey(cacheName, key, value, expiration), nil } @@ -763,12 +800,15 @@ func Match(params ...any) (any, error) { if pattern == "" { return name == "", nil } + if name == "" { if pattern == "*" || pattern == "" { return true, nil } + return false, nil } + if pattern[0] == '*' { for i := 0; i <= len(name); i++ { matched, _ := Match(pattern[1:], name[i:]) @@ -776,11 +816,14 @@ func Match(params ...any) (any, error) { return matched, nil } } + return matched, nil } + if pattern[0] == '?' || pattern[0] == name[0] { return Match(pattern[1:], name[1:]) } + return matched, nil } @@ -791,21 +834,24 @@ func FloatApproxEqual(params ...any) (any, error) { if math.Abs(float1-float2) < 1e-6 { return true, nil } + return false, nil } func B64Decode(params ...any) (any, error) { encoded := params[0].(string) + decoded, err := base64.StdEncoding.DecodeString(encoded) if err != nil { return "", err } + return string(decoded), nil } func ParseKV(params ...any) (any, error) { blob := params[0].(string) - target := params[1].(map[string]interface{}) + target := params[1].(map[string]any) prefix := params[2].(string) matches := keyValuePattern.FindAllStringSubmatch(blob, -1) @@ -813,6 +859,7 @@ func ParseKV(params ...any) (any, error) { log.Errorf("could not find any key/value pair in line") return nil, errors.New("invalid input format") } + if _, ok := target[prefix]; !ok { target[prefix] = make(map[string]string) } else { @@ -822,9 +869,11 @@ func ParseKV(params ...any) (any, error) { return nil, errors.New("target is not a map[string]string") } } + for _, match := range matches { key := "" value := "" + for i, name := range keyValuePattern.SubexpNames() { if name == "key" { key = match[i] @@ -834,9 +883,12 @@ func ParseKV(params ...any) (any, error) { value = match[i] } } + target[prefix].(map[string]string)[key] = value } + log.Tracef("unmarshaled KV: %+v", target[prefix]) + return nil, nil } @@ -845,5 +897,6 @@ func Hostname(params ...any) (any, error) { if err != nil { return "", err } + return hostname, nil } From 7396a103d01ab43929eb4d67cba8462f0e2c4fee Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Thu, 17 Apr 2025 17:35:09 +0200 Subject: [PATCH 492/581] refact: logrus.GetLevel() -> logrus.IsLevelEnabled() (#3579) --- pkg/apiclient/auth_jwt.go | 6 +++--- pkg/apiclient/auth_key.go | 4 ++-- pkg/apiclient/client_http.go | 6 +++--- pkg/apiserver/apic.go | 3 ++- pkg/apiserver/apiserver.go | 2 +- pkg/hubtest/hubtest_item.go | 4 ++-- pkg/models/helpers.go | 3 +-- 7 files changed, 14 insertions(+), 14 deletions(-) diff --git a/pkg/apiclient/auth_jwt.go b/pkg/apiclient/auth_jwt.go index 1ea64e2881b..54dafb615ba 100644 --- a/pkg/apiclient/auth_jwt.go +++ b/pkg/apiclient/auth_jwt.go @@ -92,7 +92,7 @@ func (t *JWTTransport) refreshJwtToken() error { req.Header.Add("User-Agent", t.UserAgent) } - if log.GetLevel() >= log.TraceLevel { + if log.IsLevelEnabled(log.TraceLevel) { dump, _ := httputil.DumpRequest(req, true) log.Tracef("auth-jwt request: %s", string(dump)) } @@ -106,7 +106,7 @@ func (t *JWTTransport) refreshJwtToken() error { log.Debugf("auth-jwt : http %d", resp.StatusCode) - if log.GetLevel() >= log.TraceLevel { + if log.IsLevelEnabled(log.TraceLevel) { dump, _ := httputil.DumpResponse(resp, true) log.Tracef("auth-jwt response: %s", string(dump)) } @@ -174,7 +174,7 @@ func (t *JWTTransport) RoundTrip(req *http.Request) (*http.Response, error) { attemptsCount := make(map[int]int) for { - if log.GetLevel() >= log.TraceLevel { + if log.IsLevelEnabled(log.TraceLevel) { // requestToDump := cloneRequest(req) dump, _ := httputil.DumpRequest(req, true) log.Tracef("req-jwt: %s", string(dump)) diff --git a/pkg/apiclient/auth_key.go b/pkg/apiclient/auth_key.go index e2213aca227..fff89d3e009 100644 --- a/pkg/apiclient/auth_key.go +++ b/pkg/apiclient/auth_key.go @@ -37,7 +37,7 @@ func (t *APIKeyTransport) RoundTrip(req *http.Request) (*http.Response, error) { log.Debugf("req-api: %s %s", req.Method, req.URL.String()) - if log.GetLevel() >= log.TraceLevel { + if log.IsLevelEnabled(log.TraceLevel) { dump, _ := httputil.DumpRequest(req, true) log.Tracef("auth-api request: %s", string(dump)) } @@ -50,7 +50,7 @@ func (t *APIKeyTransport) RoundTrip(req *http.Request) (*http.Response, error) { return resp, err } - if log.GetLevel() >= log.TraceLevel { + if log.IsLevelEnabled(log.TraceLevel) { dump, _ := httputil.DumpResponse(resp, true) log.Tracef("auth-api response: %s", string(dump)) } diff --git a/pkg/apiclient/client_http.go b/pkg/apiclient/client_http.go index cd35c9bb795..d3aaa6bc422 100644 --- a/pkg/apiclient/client_http.go +++ b/pkg/apiclient/client_http.go @@ -15,7 +15,7 @@ import ( log "github.com/sirupsen/logrus" ) -func (c *ApiClient) PrepareRequest(ctx context.Context, method, url string, body interface{}) (*http.Request, error) { +func (c *ApiClient) PrepareRequest(ctx context.Context, method, url string, body any) (*http.Request, error) { if !strings.HasSuffix(c.BaseURL.Path, "/") { return nil, fmt.Errorf("BaseURL must have a trailing slash, but %q does not", c.BaseURL) } @@ -48,7 +48,7 @@ func (c *ApiClient) PrepareRequest(ctx context.Context, method, url string, body return req, nil } -func (c *ApiClient) Do(ctx context.Context, req *http.Request, v interface{}) (*Response, error) { +func (c *ApiClient) Do(ctx context.Context, req *http.Request, v any) (*Response, error) { if ctx == nil { return nil, errors.New("context must be non-nil") } @@ -91,7 +91,7 @@ func (c *ApiClient) Do(ctx context.Context, req *http.Request, v interface{}) (* return newResponse(resp), err } - if log.GetLevel() >= log.DebugLevel { + if log.IsLevelEnabled(log.DebugLevel) { for k, v := range resp.Header { log.Debugf("[headers] %s: %s", k, v) } diff --git a/pkg/apiserver/apic.go b/pkg/apiserver/apic.go index 4d863d11164..ba2ffa18e39 100644 --- a/pkg/apiserver/apic.go +++ b/pkg/apiserver/apic.go @@ -730,7 +730,7 @@ func (a *apic) UpdateAllowlists(ctx context.Context, allowlistsLinks []*modelsca } for _, link := range allowlistsLinks { - if log.GetLevel() >= log.TraceLevel { + if log.IsLevelEnabled(log.TraceLevel) { log.Tracef("allowlist body: %+v", spew.Sdump(link)) } @@ -773,6 +773,7 @@ func (a *apic) UpdateAllowlists(ctx context.Context, allowlistsLinks []*modelsca for scanner.Scan() { item := scanner.Text() j := &models.AllowlistItem{} + if err := json.Unmarshal([]byte(item), j); err != nil { log.Errorf("while unmarshalling allowlist item: %s", err) continue diff --git a/pkg/apiserver/apiserver.go b/pkg/apiserver/apiserver.go index 4c11b8435d2..3c85afe550d 100644 --- a/pkg/apiserver/apiserver.go +++ b/pkg/apiserver/apiserver.go @@ -178,7 +178,7 @@ func NewServer(ctx context.Context, config *csconfig.LocalApiServerCfg) (*APISer } } - if log.GetLevel() < log.DebugLevel { + if !log.IsLevelEnabled(log.DebugLevel) { gin.SetMode(gin.ReleaseMode) } diff --git a/pkg/hubtest/hubtest_item.go b/pkg/hubtest/hubtest_item.go index 526876a10ed..2f22c3315c4 100644 --- a/pkg/hubtest/hubtest_item.go +++ b/pkg/hubtest/hubtest_item.go @@ -459,9 +459,9 @@ func (t *HubTestItem) RunWithLogFile() error { crowdsecCmd.Env = []string{"TESTDIR="+testPath, "DATADIR="+t.RuntimeHubConfig.InstallDataDir, "TZ=UTC"} log.Debugf("%s", crowdsecCmd.String()) - output, err = crowdsecCmd.CombinedOutput() - if log.GetLevel() >= log.DebugLevel || err != nil { + output, err = crowdsecCmd.CombinedOutput() + if err != nil || log.IsLevelEnabled(log.DebugLevel) { fmt.Println(string(output)) } diff --git a/pkg/models/helpers.go b/pkg/models/helpers.go index 5bc3f2a28b3..a5fecd6c5fc 100644 --- a/pkg/models/helpers.go +++ b/pkg/models/helpers.go @@ -143,8 +143,7 @@ func (a *Alert) FormatAsStrings(machineID string, logger *log.Logger) []string { decision = "(simulated decision)" } - if logger.GetLevel() >= log.DebugLevel { - /*spew is expensive*/ + if logger.IsLevelEnabled(log.DebugLevel) { logger.Debug(spew.Sdump(decisionItem)) } From 7c4e91d3047fcd75673d8b0323af3c6cf1925ecd Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Thu, 17 Apr 2025 17:35:28 +0200 Subject: [PATCH 493/581] tests: refact localtest helper, use testify.suite (#3574) --- Makefile | 5 - go.mod | 2 +- go.sum | 4 +- .../modules/cloudwatch/cloudwatch_test.go | 455 +++++------------- pkg/acquisition/modules/kafka/kafka_test.go | 6 +- .../modules/kinesis/kinesis_test.go | 43 +- pkg/acquisition/modules/loki/loki_test.go | 9 +- .../modules/victorialogs/victorialogs_test.go | 9 +- 8 files changed, 147 insertions(+), 386 deletions(-) diff --git a/Makefile b/Makefile index 60014f393cd..3a04f174cc9 100644 --- a/Makefile +++ b/Makefile @@ -279,11 +279,6 @@ cscli: ## Build cscli crowdsec: ## Build crowdsec @$(MAKE) -C $(CROWDSEC_FOLDER) build $(MAKE_FLAGS) -# for the tests with localstack -export AWS_ENDPOINT_FORCE=http://localhost:4566 -export AWS_ACCESS_KEY_ID=test -export AWS_SECRET_ACCESS_KEY=test - testenv: ifeq ($(TEST_LOCAL_ONLY),) @echo 'NOTE: You need to run "make localstack" in a separate shell, "make localstack-stop" to terminate it; or define the envvar TEST_LOCAL_ONLY to some value.' diff --git a/go.mod b/go.mod index 28540d62e48..1eafe27861b 100644 --- a/go.mod +++ b/go.mod @@ -24,7 +24,7 @@ require ( github.com/coreos/go-systemd/v22 v22.5.0 // indirect github.com/creack/pty v1.1.21 // indirect github.com/crowdsecurity/dlog v0.0.0-20170105205344-4fb5f8204f26 - github.com/crowdsecurity/go-cs-lib v0.0.17 + github.com/crowdsecurity/go-cs-lib v0.0.18 github.com/crowdsecurity/grokky v0.2.2 github.com/crowdsecurity/machineid v1.0.2 github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc diff --git a/go.sum b/go.sum index ca1093431d9..fe4a7d74dde 100644 --- a/go.sum +++ b/go.sum @@ -111,8 +111,8 @@ github.com/crowdsecurity/coraza/v3 v3.0.0-20250320231801-749b8bded21a h1:2Nyr+47 github.com/crowdsecurity/coraza/v3 v3.0.0-20250320231801-749b8bded21a/go.mod h1:xSaXWOhFMSbrV8qOOfBKAyw3aOqfwaSaOy5BgSF8XlA= github.com/crowdsecurity/dlog v0.0.0-20170105205344-4fb5f8204f26 h1:r97WNVC30Uen+7WnLs4xDScS/Ex988+id2k6mDf8psU= github.com/crowdsecurity/dlog v0.0.0-20170105205344-4fb5f8204f26/go.mod h1:zpv7r+7KXwgVUZnUNjyP22zc/D7LKjyoY02weH2RBbk= -github.com/crowdsecurity/go-cs-lib v0.0.17 h1:VM++7EDa34kVCXsCRwOjaua3XHru8FVfKUAbqEoQPas= -github.com/crowdsecurity/go-cs-lib v0.0.17/go.mod h1:XwGcvTt4lMq4Tm1IRMSKMDf0CVrnytTU8Uoofa7AR+g= +github.com/crowdsecurity/go-cs-lib v0.0.18 h1:GNyvaag5MXfuapIy4E30pIOvIE5AyHoanJBNSMA1cmE= +github.com/crowdsecurity/go-cs-lib v0.0.18/go.mod h1:XwGcvTt4lMq4Tm1IRMSKMDf0CVrnytTU8Uoofa7AR+g= github.com/crowdsecurity/grokky v0.2.2 h1:yALsI9zqpDArYzmSSxfBq2dhYuGUTKMJq8KOEIAsuo4= github.com/crowdsecurity/grokky v0.2.2/go.mod h1:33usDIYzGDsgX1kHAThCbseso6JuWNJXOzRQDGXHtWM= github.com/crowdsecurity/machineid v1.0.2 h1:wpkpsUghJF8Khtmn/tg6GxgdhLA1Xflerh5lirI+bdc= diff --git a/pkg/acquisition/modules/cloudwatch/cloudwatch_test.go b/pkg/acquisition/modules/cloudwatch/cloudwatch_test.go index 49d30663162..1a2702208ed 100644 --- a/pkg/acquisition/modules/cloudwatch/cloudwatch_test.go +++ b/pkg/acquisition/modules/cloudwatch/cloudwatch_test.go @@ -1,19 +1,14 @@ package cloudwatchacquisition import ( - "errors" - "fmt" - "net" - "os" - "runtime" - "strings" "testing" "time" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/cloudwatchlogs" - log "github.com/sirupsen/logrus" + "github.com/sirupsen/logrus" "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" "gopkg.in/tomb.v2" "github.com/crowdsecurity/go-cs-lib/cstest" @@ -30,6 +25,21 @@ import ( - check shutdown/restart */ +func createLogGroup(t *testing.T, cw *CloudwatchSource, group string) { + _, err := cw.cwClient.CreateLogGroup(&cloudwatchlogs.CreateLogGroupInput{ + LogGroupName: aws.String(group), + }) + require.NoError(t, err) +} + +func createLogStream(t *testing.T, cw *CloudwatchSource, group string, stream string) { + _, err := cw.cwClient.CreateLogStream(&cloudwatchlogs.CreateLogStreamInput{ + LogGroupName: aws.String(group), + LogStreamName: aws.String(stream), + }) + require.NoError(t, err) +} + func deleteAllLogGroups(t *testing.T, cw *CloudwatchSource) { input := &cloudwatchlogs.DescribeLogGroupsInput{} result, err := cw.cwClient.DescribeLogGroups(input) @@ -43,112 +53,69 @@ func deleteAllLogGroups(t *testing.T, cw *CloudwatchSource) { } } -func checkForLocalStackAvailability() error { - v := os.Getenv("AWS_ENDPOINT_FORCE") - if v == "" { - return errors.New("missing aws endpoint for tests : AWS_ENDPOINT_FORCE") - } - - v = strings.TrimPrefix(v, "http://") - - _, err := net.Dial("tcp", v) - if err != nil { - return fmt.Errorf("while dialing %s: %w: aws endpoint isn't available", v, err) - } - - return nil +type CloudwatchSuite struct { + suite.Suite } -func TestMain(m *testing.M) { - if runtime.GOOS == "windows" { - os.Exit(0) - } - - if os.Getenv("TEST_LOCAL_ONLY") != "" { - os.Exit(0) - } - - if err := checkForLocalStackAvailability(); err != nil { - log.Fatalf("local stack error : %s", err) - } - +func (s *CloudwatchSuite) SetupSuite() { def_PollNewStreamInterval = 1 * time.Second def_PollStreamInterval = 1 * time.Second def_StreamReadTimeout = 10 * time.Second def_MaxStreamAge = 5 * time.Second def_PollDeadStreamInterval = 5 * time.Second - - os.Exit(m.Run()) } -func TestWatchLogGroupForStreams(t *testing.T) { - ctx := t.Context() - - cstest.SkipOnWindows(t) +func TestCloudwatchSuite(t *testing.T) { + cstest.SetAWSTestEnv(t) + suite.Run(t, new(CloudwatchSuite)) +} - log.SetLevel(log.DebugLevel) +func (s *CloudwatchSuite) TestWatchLogGroupForStreams() { + logrus.SetLevel(logrus.DebugLevel) tests := []struct { - config []byte + config string expectedCfgErr string expectedStartErr string name string setup func(*testing.T, *CloudwatchSource) run func(*testing.T, *CloudwatchSource) teardown func(*testing.T, *CloudwatchSource) - expectedResLen int expectedResMessages []string }{ // require a group name that doesn't exist { - name: "group_does_not_exists", - config: []byte(` + name: "group_does_not_exist", + config: ` source: cloudwatch aws_region: us-east-1 labels: type: test_source group_name: b -stream_name: test_stream`), +stream_name: test_stream`, expectedStartErr: "The specified log group does not exist", setup: func(t *testing.T, cw *CloudwatchSource) { deleteAllLogGroups(t, cw) - _, err := cw.cwClient.CreateLogGroup(&cloudwatchlogs.CreateLogGroupInput{ - LogGroupName: aws.String("test_group_not_used_1"), - }) - require.NoError(t, err) - }, - teardown: func(t *testing.T, cw *CloudwatchSource) { - _, err := cw.cwClient.DeleteLogGroup(&cloudwatchlogs.DeleteLogGroupInput{ - LogGroupName: aws.String("test_group_not_used_1"), - }) - require.NoError(t, err) + createLogGroup(t, cw, "test_group_not_used_1") }, }, // test stream mismatch { name: "group_exists_bad_stream_name", - config: []byte(` + config: ` source: cloudwatch aws_region: us-east-1 labels: type: test_source group_name: test_group1 -stream_name: test_stream_bad`), +stream_name: test_stream_bad`, setup: func(t *testing.T, cw *CloudwatchSource) { deleteAllLogGroups(t, cw) - _, err := cw.cwClient.CreateLogGroup(&cloudwatchlogs.CreateLogGroupInput{ - LogGroupName: aws.String("test_group1"), - }) - require.NoError(t, err) - - _, err = cw.cwClient.CreateLogStream(&cloudwatchlogs.CreateLogStreamInput{ - LogGroupName: aws.String("test_group1"), - LogStreamName: aws.String("test_stream"), - }) - require.NoError(t, err) + createLogGroup(t, cw, "test_group1") + createLogStream(t, cw, "test_group1", "test_stream") // have a message before we start - won't be popped, but will trigger stream monitoring - _, err = cw.cwClient.PutLogEvents(&cloudwatchlogs.PutLogEventsInput{ + _, err := cw.cwClient.PutLogEvents(&cloudwatchlogs.PutLogEventsInput{ LogGroupName: aws.String("test_group1"), LogStreamName: aws.String("test_stream"), LogEvents: []*cloudwatchlogs.InputLogEvent{ @@ -160,39 +127,25 @@ stream_name: test_stream_bad`), }) require.NoError(t, err) }, - teardown: func(t *testing.T, cw *CloudwatchSource) { - _, err := cw.cwClient.DeleteLogGroup(&cloudwatchlogs.DeleteLogGroupInput{ - LogGroupName: aws.String("test_group1"), - }) - require.NoError(t, err) - }, - expectedResLen: 0, + expectedResMessages: []string{}, }, // test stream mismatch { name: "group_exists_bad_stream_regexp", - config: []byte(` + config: ` source: cloudwatch aws_region: us-east-1 labels: type: test_source group_name: test_group1 -stream_regexp: test_bad[0-9]+`), +stream_regexp: test_bad[0-9]+`, setup: func(t *testing.T, cw *CloudwatchSource) { deleteAllLogGroups(t, cw) - _, err := cw.cwClient.CreateLogGroup(&cloudwatchlogs.CreateLogGroupInput{ - LogGroupName: aws.String("test_group1"), - }) - require.NoError(t, err) - - _, err = cw.cwClient.CreateLogStream(&cloudwatchlogs.CreateLogStreamInput{ - LogGroupName: aws.String("test_group1"), - LogStreamName: aws.String("test_stream"), - }) - require.NoError(t, err) + createLogGroup(t, cw, "test_group1") + createLogStream(t, cw, "test_group1", "test_stream") // have a message before we start - won't be popped, but will trigger stream monitoring - _, err = cw.cwClient.PutLogEvents(&cloudwatchlogs.PutLogEventsInput{ + _, err := cw.cwClient.PutLogEvents(&cloudwatchlogs.PutLogEventsInput{ LogGroupName: aws.String("test_group1"), LogStreamName: aws.String("test_stream"), LogEvents: []*cloudwatchlogs.InputLogEvent{ @@ -204,41 +157,27 @@ stream_regexp: test_bad[0-9]+`), }) require.NoError(t, err) }, - teardown: func(t *testing.T, cw *CloudwatchSource) { - _, err := cw.cwClient.DeleteLogGroup(&cloudwatchlogs.DeleteLogGroupInput{ - LogGroupName: aws.String("test_group1"), - }) - require.NoError(t, err) - }, - expectedResLen: 0, + expectedResMessages: []string{}, }, // require a group name that does exist and contains a stream in which we are going to put events { name: "group_exists_stream_exists_has_events", - config: []byte(` + config: ` source: cloudwatch aws_region: us-east-1 labels: type: test_source group_name: test_log_group1 log_level: trace -stream_name: test_stream`), +stream_name: test_stream`, // expectedStartErr: "The specified log group does not exist", setup: func(t *testing.T, cw *CloudwatchSource) { deleteAllLogGroups(t, cw) - _, err := cw.cwClient.CreateLogGroup(&cloudwatchlogs.CreateLogGroupInput{ - LogGroupName: aws.String("test_log_group1"), - }) - require.NoError(t, err) - - _, err = cw.cwClient.CreateLogStream(&cloudwatchlogs.CreateLogStreamInput{ - LogGroupName: aws.String("test_log_group1"), - LogStreamName: aws.String("test_stream"), - }) - require.NoError(t, err) + createLogGroup(t, cw, "test_log_group1") + createLogStream(t, cw, "test_log_group1", "test_stream") // have a message before we start - won't be popped, but will trigger stream monitoring - _, err = cw.cwClient.PutLogEvents(&cloudwatchlogs.PutLogEventsInput{ + _, err := cw.cwClient.PutLogEvents(&cloudwatchlogs.PutLogEventsInput{ LogGroupName: aws.String("test_log_group1"), LogStreamName: aws.String("test_stream"), LogEvents: []*cloudwatchlogs.InputLogEvent{ @@ -271,48 +210,27 @@ stream_name: test_stream`), }) require.NoError(t, err) }, - teardown: func(t *testing.T, cw *CloudwatchSource) { - _, err := cw.cwClient.DeleteLogStream(&cloudwatchlogs.DeleteLogStreamInput{ - LogGroupName: aws.String("test_log_group1"), - LogStreamName: aws.String("test_stream"), - }) - require.NoError(t, err) - - _, err = cw.cwClient.DeleteLogGroup(&cloudwatchlogs.DeleteLogGroupInput{ - LogGroupName: aws.String("test_log_group1"), - }) - require.NoError(t, err) - }, - expectedResLen: 3, expectedResMessages: []string{"test_message_1", "test_message_4", "test_message_5"}, }, // have a stream generate events, reach time-out and gets polled again { name: "group_exists_stream_exists_has_events+timeout", - config: []byte(` + config: ` source: cloudwatch aws_region: us-east-1 labels: type: test_source group_name: test_log_group1 log_level: trace -stream_name: test_stream`), +stream_name: test_stream`, // expectedStartErr: "The specified log group does not exist", setup: func(t *testing.T, cw *CloudwatchSource) { deleteAllLogGroups(t, cw) - _, err := cw.cwClient.CreateLogGroup(&cloudwatchlogs.CreateLogGroupInput{ - LogGroupName: aws.String("test_log_group1"), - }) - require.NoError(t, err) - - _, err = cw.cwClient.CreateLogStream(&cloudwatchlogs.CreateLogStreamInput{ - LogGroupName: aws.String("test_log_group1"), - LogStreamName: aws.String("test_stream"), - }) - require.NoError(t, err) + createLogGroup(t, cw, "test_log_group1") + createLogStream(t, cw, "test_log_group1", "test_stream") // have a message before we start - won't be popped, but will trigger stream monitoring - _, err = cw.cwClient.PutLogEvents(&cloudwatchlogs.PutLogEventsInput{ + _, err := cw.cwClient.PutLogEvents(&cloudwatchlogs.PutLogEventsInput{ LogGroupName: aws.String("test_log_group1"), LogStreamName: aws.String("test_stream"), LogEvents: []*cloudwatchlogs.InputLogEvent{ @@ -358,48 +276,27 @@ stream_name: test_stream`), time.Sleep(def_PollNewStreamInterval + (1 * time.Second)) time.Sleep(def_PollStreamInterval + (1 * time.Second)) }, - teardown: func(t *testing.T, cw *CloudwatchSource) { - _, err := cw.cwClient.DeleteLogStream(&cloudwatchlogs.DeleteLogStreamInput{ - LogGroupName: aws.String("test_log_group1"), - LogStreamName: aws.String("test_stream"), - }) - require.NoError(t, err) - - _, err = cw.cwClient.DeleteLogGroup(&cloudwatchlogs.DeleteLogGroupInput{ - LogGroupName: aws.String("test_log_group1"), - }) - require.NoError(t, err) - }, - expectedResLen: 3, expectedResMessages: []string{"test_message_1", "test_message_41", "test_message_51"}, }, // have a stream generate events, reach time-out and dead body collection { name: "group_exists_stream_exists_has_events+timeout+GC", - config: []byte(` + config: ` source: cloudwatch aws_region: us-east-1 labels: type: test_source group_name: test_log_group1 log_level: trace -stream_name: test_stream`), +stream_name: test_stream`, // expectedStartErr: "The specified log group does not exist", setup: func(t *testing.T, cw *CloudwatchSource) { deleteAllLogGroups(t, cw) - _, err := cw.cwClient.CreateLogGroup(&cloudwatchlogs.CreateLogGroupInput{ - LogGroupName: aws.String("test_log_group1"), - }) - require.NoError(t, err) - - _, err = cw.cwClient.CreateLogStream(&cloudwatchlogs.CreateLogStreamInput{ - LogGroupName: aws.String("test_log_group1"), - LogStreamName: aws.String("test_stream"), - }) - require.NoError(t, err) + createLogGroup(t, cw, "test_log_group1") + createLogStream(t, cw, "test_log_group1", "test_stream") // have a message before we start - won't be popped, but will trigger stream monitoring - _, err = cw.cwClient.PutLogEvents(&cloudwatchlogs.PutLogEventsInput{ + _, err := cw.cwClient.PutLogEvents(&cloudwatchlogs.PutLogEventsInput{ LogGroupName: aws.String("test_log_group1"), LogStreamName: aws.String("test_stream"), LogEvents: []*cloudwatchlogs.InputLogEvent{ @@ -417,31 +314,19 @@ stream_name: test_stream`), time.Sleep(def_PollStreamInterval + (1 * time.Second)) time.Sleep(def_PollDeadStreamInterval + (1 * time.Second)) }, - teardown: func(t *testing.T, cw *CloudwatchSource) { - _, err := cw.cwClient.DeleteLogStream(&cloudwatchlogs.DeleteLogStreamInput{ - LogGroupName: aws.String("test_log_group1"), - LogStreamName: aws.String("test_stream"), - }) - require.NoError(t, err) - - _, err = cw.cwClient.DeleteLogGroup(&cloudwatchlogs.DeleteLogGroupInput{ - LogGroupName: aws.String("test_log_group1"), - }) - require.NoError(t, err) - }, - expectedResLen: 1, + expectedResMessages: []string{"test_message_1"}, }, } for _, tc := range tests { - t.Run(tc.name, func(t *testing.T) { - dbgLogger := log.New().WithField("test", tc.name) - dbgLogger.Logger.SetLevel(log.DebugLevel) + s.Run(tc.name, func() { + dbgLogger := logrus.New().WithField("test", tc.name) + dbgLogger.Logger.SetLevel(logrus.DebugLevel) dbgLogger.Infof("starting test") cw := CloudwatchSource{} - err := cw.Configure(tc.config, dbgLogger, configuration.METRICS_NONE) - cstest.RequireErrorContains(t, err, tc.expectedCfgErr) + err := cw.Configure(([]byte)(tc.config), dbgLogger, configuration.METRICS_NONE) + cstest.RequireErrorContains(s.T(), err, tc.expectedCfgErr) if tc.expectedCfgErr != "" { return @@ -449,41 +334,42 @@ stream_name: test_stream`), // run pre-routine : tests use it to set group & streams etc. if tc.setup != nil { - tc.setup(t, &cw) + tc.setup(s.T(), &cw) } out := make(chan types.Event) tmb := tomb.Tomb{} - rcvdEvts := []types.Event{} dbgLogger.Infof("running StreamingAcquisition") actmb := tomb.Tomb{} actmb.Go(func() error { - err := cw.StreamingAcquisition(ctx, out, &actmb) + err := cw.StreamingAcquisition(s.T().Context(), out, &actmb) dbgLogger.Infof("acquis done") - cstest.RequireErrorContains(t, err, tc.expectedStartErr) + cstest.RequireErrorContains(s.T(), err, tc.expectedStartErr) return nil }) + got := []string{} + // let's empty output chan tmb.Go(func() error { for { select { case in := <-out: - log.Debugf("received event %+v", in) - rcvdEvts = append(rcvdEvts, in) + dbgLogger.Debugf("received event %+v", in) + got = append(got, in.Line.Raw) case <-tmb.Dying(): - log.Debugf("pumper died") + dbgLogger.Debug("pumper died") return nil } } }) if tc.run != nil { - tc.run(t, &cw) + tc.run(s.T(), &cw) } else { dbgLogger.Warning("no code to run") } @@ -495,96 +381,69 @@ stream_name: test_stream`), dbgLogger.Infof("killing datasource") actmb.Kill(nil) <-actmb.Dead() - // dbgLogger.Infof("collected events : %d -> %+v", len(rcvd_evts), rcvd_evts) - // check results - if tc.expectedResLen != -1 { - if tc.expectedResLen != len(rcvdEvts) { - t.Fatalf("%s : expected %d results got %d -> %v", tc.name, tc.expectedResLen, len(rcvdEvts), rcvdEvts) - } - dbgLogger.Debugf("got %d expected messages", len(rcvdEvts)) - } - - if len(tc.expectedResMessages) != 0 { - res := tc.expectedResMessages - for idx, v := range rcvdEvts { - if len(res) == 0 { - t.Fatalf("result %d/%d : received '%s', didn't expect anything (recvd:%d, expected:%d)", idx, len(rcvdEvts), v.Line.Raw, len(rcvdEvts), len(tc.expectedResMessages)) - } - - if res[0] != v.Line.Raw { - t.Fatalf("result %d/%d : expected '%s', received '%s' (recvd:%d, expected:%d)", idx, len(rcvdEvts), res[0], v.Line.Raw, len(rcvdEvts), len(tc.expectedResMessages)) - } - - dbgLogger.Debugf("got message '%s'", res[0]) - res = res[1:] - } - - if len(res) != 0 { - t.Fatalf("leftover unmatched results : %v", res) - } + if len(tc.expectedResMessages) == 0 { + s.Empty(got, "unexpected events") + } else { + s.Equal(tc.expectedResMessages, got, "mismatched events") } if tc.teardown != nil { - tc.teardown(t, &cw) + tc.teardown(s.T(), &cw) } }) } } -func TestConfiguration(t *testing.T) { - ctx := t.Context() - - cstest.SkipOnWindows(t) - - log.SetLevel(log.DebugLevel) +func (s *CloudwatchSuite) TestConfiguration() { + logrus.SetLevel(logrus.DebugLevel) tests := []struct { - config []byte + config string expectedCfgErr string expectedStartErr string name string }{ { - name: "group_does_not_exists", - config: []byte(` + name: "group_does_not_exist", + config: ` source: cloudwatch aws_region: us-east-1 labels: type: test_source group_name: test_group -stream_name: test_stream`), +stream_name: test_stream`, expectedStartErr: "The specified log group does not exist", }, { - config: []byte(` + config: ` xxx: cloudwatch labels: type: test_source group_name: test_group -stream_name: test_stream`), +stream_name: test_stream`, expectedCfgErr: "field xxx not found in type", }, { name: "missing_group_name", - config: []byte(` + config: ` source: cloudwatch aws_region: us-east-1 labels: type: test_source -stream_name: test_stream`), +stream_name: test_stream`, expectedCfgErr: "group_name is mandatory for CloudwatchSource", }, } for _, tc := range tests { - t.Run(tc.name, func(t *testing.T) { - dbgLogger := log.New().WithField("test", tc.name) - dbgLogger.Logger.SetLevel(log.DebugLevel) + s.Run(tc.name, func() { + dbgLogger := logrus.New().WithField("test", tc.name) + dbgLogger.Logger.SetLevel(logrus.DebugLevel) cw := CloudwatchSource{} - err := cw.Configure(tc.config, dbgLogger, configuration.METRICS_NONE) - cstest.RequireErrorContains(t, err, tc.expectedCfgErr) + err := cw.Configure(([]byte)(tc.config), dbgLogger, configuration.METRICS_NONE) + cstest.RequireErrorContains(s.T(), err, tc.expectedCfgErr) if tc.expectedCfgErr != "" { return @@ -595,25 +454,23 @@ stream_name: test_stream`), switch cw.GetMode() { case "tail": - err = cw.StreamingAcquisition(ctx, out, &tmb) + err = cw.StreamingAcquisition(s.T().Context(), out, &tmb) case "cat": - err = cw.OneShotAcquisition(ctx, out, &tmb) + err = cw.OneShotAcquisition(s.T().Context(), out, &tmb) } - cstest.RequireErrorContains(t, err, tc.expectedStartErr) + cstest.RequireErrorContains(s.T(), err, tc.expectedStartErr) - log.Debugf("killing ...") + dbgLogger.Debugf("killing ...") tmb.Kill(nil) <-tmb.Dead() - log.Debugf("dead :)") + dbgLogger.Debugf("dead :)") }) } } -func TestConfigureByDSN(t *testing.T) { - cstest.SkipOnWindows(t) - - log.SetLevel(log.DebugLevel) +func (s *CloudwatchSuite) TestConfigureByDSN() { + logrus.SetLevel(logrus.DebugLevel) tests := []struct { dsn string @@ -644,23 +501,19 @@ func TestConfigureByDSN(t *testing.T) { } for _, tc := range tests { - t.Run(tc.name, func(t *testing.T) { - dbgLogger := log.New().WithField("test", tc.name) - dbgLogger.Logger.SetLevel(log.DebugLevel) + s.Run(tc.name, func() { + dbgLogger := logrus.New().WithField("test", tc.name) + dbgLogger.Logger.SetLevel(logrus.DebugLevel) cw := CloudwatchSource{} err := cw.ConfigureByDSN(tc.dsn, tc.labels, dbgLogger, "") - cstest.RequireErrorContains(t, err, tc.expectedCfgErr) + cstest.RequireErrorContains(s.T(), err, tc.expectedCfgErr) }) } } -func TestOneShotAcquisition(t *testing.T) { - ctx := t.Context() - - cstest.SkipOnWindows(t) - - log.SetLevel(log.DebugLevel) +func (s *CloudwatchSuite) TestOneShotAcquisition() { + logrus.SetLevel(logrus.DebugLevel) tests := []struct { dsn string @@ -670,7 +523,6 @@ func TestOneShotAcquisition(t *testing.T) { setup func(*testing.T, *CloudwatchSource) run func(*testing.T, *CloudwatchSource) teardown func(*testing.T, *CloudwatchSource) - expectedResLen int expectedResMessages []string }{ // stream with no data @@ -680,24 +532,10 @@ func TestOneShotAcquisition(t *testing.T) { // expectedStartErr: "The specified log group does not exist", setup: func(t *testing.T, cw *CloudwatchSource) { deleteAllLogGroups(t, cw) - _, err := cw.cwClient.CreateLogGroup(&cloudwatchlogs.CreateLogGroupInput{ - LogGroupName: aws.String("test_log_group1"), - }) - require.NoError(t, err) - - _, err = cw.cwClient.CreateLogStream(&cloudwatchlogs.CreateLogStreamInput{ - LogGroupName: aws.String("test_log_group1"), - LogStreamName: aws.String("test_stream"), - }) - require.NoError(t, err) + createLogGroup(t, cw, "test_log_group1") + createLogStream(t, cw, "test_log_group1", "test_stream") }, - teardown: func(t *testing.T, cw *CloudwatchSource) { - _, err := cw.cwClient.DeleteLogGroup(&cloudwatchlogs.DeleteLogGroupInput{ - LogGroupName: aws.String("test_log_group1"), - }) - require.NoError(t, err) - }, - expectedResLen: 0, + expectedResMessages: []string{}, }, // stream with one event { @@ -706,19 +544,11 @@ func TestOneShotAcquisition(t *testing.T) { // expectedStartErr: "The specified log group does not exist", setup: func(t *testing.T, cw *CloudwatchSource) { deleteAllLogGroups(t, cw) - _, err := cw.cwClient.CreateLogGroup(&cloudwatchlogs.CreateLogGroupInput{ - LogGroupName: aws.String("test_log_group1"), - }) - require.NoError(t, err) - - _, err = cw.cwClient.CreateLogStream(&cloudwatchlogs.CreateLogStreamInput{ - LogGroupName: aws.String("test_log_group1"), - LogStreamName: aws.String("test_stream"), - }) - require.NoError(t, err) + createLogGroup(t, cw, "test_log_group1") + createLogStream(t, cw, "test_log_group1", "test_stream") // this one is too much in the back - _, err = cw.cwClient.PutLogEvents(&cloudwatchlogs.PutLogEventsInput{ + _, err := cw.cwClient.PutLogEvents(&cloudwatchlogs.PutLogEventsInput{ LogGroupName: aws.String("test_log_group1"), LogStreamName: aws.String("test_stream"), LogEvents: []*cloudwatchlogs.InputLogEvent{ @@ -756,26 +586,19 @@ func TestOneShotAcquisition(t *testing.T) { }) require.NoError(t, err) }, - teardown: func(t *testing.T, cw *CloudwatchSource) { - _, err := cw.cwClient.DeleteLogGroup(&cloudwatchlogs.DeleteLogGroupInput{ - LogGroupName: aws.String("test_log_group1"), - }) - require.NoError(t, err) - }, - expectedResLen: 1, expectedResMessages: []string{"test_message_2"}, }, } for _, tc := range tests { - t.Run(tc.name, func(t *testing.T) { - dbgLogger := log.New().WithField("test", tc.name) - dbgLogger.Logger.SetLevel(log.DebugLevel) + s.Run(tc.name, func() { + dbgLogger := logrus.New().WithField("test", tc.name) + dbgLogger.Logger.SetLevel(logrus.DebugLevel) dbgLogger.Infof("starting test") cw := CloudwatchSource{} err := cw.ConfigureByDSN(tc.dsn, map[string]string{"type": "test"}, dbgLogger, "") - cstest.RequireErrorContains(t, err, tc.expectedCfgErr) + cstest.RequireErrorContains(s.T(), err, tc.expectedCfgErr) if tc.expectedCfgErr != "" { return @@ -784,61 +607,39 @@ func TestOneShotAcquisition(t *testing.T) { dbgLogger.Infof("config done test") // run pre-routine : tests use it to set group & streams etc. if tc.setup != nil { - tc.setup(t, &cw) + tc.setup(s.T(), &cw) } out := make(chan types.Event, 100) tmb := tomb.Tomb{} - rcvdEvts := []types.Event{} - dbgLogger.Infof("running StreamingAcquisition") + dbgLogger.Infof("running OneShotAcquisition") - err = cw.OneShotAcquisition(ctx, out, &tmb) - cstest.RequireErrorContains(t, err, tc.expectedStartErr) + err = cw.OneShotAcquisition(s.T().Context(), out, &tmb) + cstest.RequireErrorContains(s.T(), err, tc.expectedStartErr) dbgLogger.Infof("acquis done") close(out) // let's empty output chan + got := []string{} for evt := range out { - rcvdEvts = append(rcvdEvts, evt) + got = append(got, evt.Line.Raw) } if tc.run != nil { - tc.run(t, &cw) + tc.run(s.T(), &cw) } else { dbgLogger.Warning("no code to run") } - if tc.expectedResLen != -1 { - if tc.expectedResLen != len(rcvdEvts) { - t.Fatalf("%s : expected %d results got %d -> %v", tc.name, tc.expectedResLen, len(rcvdEvts), rcvdEvts) - } else { - dbgLogger.Debugf("got %d expected messages", len(rcvdEvts)) - } - } - - if len(tc.expectedResMessages) != 0 { - res := tc.expectedResMessages - for idx, v := range rcvdEvts { - if len(res) == 0 { - t.Fatalf("result %d/%d : received '%s', didn't expect anything (recvd:%d, expected:%d)", idx, len(rcvdEvts), v.Line.Raw, len(rcvdEvts), len(tc.expectedResMessages)) - } - - if res[0] != v.Line.Raw { - t.Fatalf("result %d/%d : expected '%s', received '%s' (recvd:%d, expected:%d)", idx, len(rcvdEvts), res[0], v.Line.Raw, len(rcvdEvts), len(tc.expectedResMessages)) - } - - dbgLogger.Debugf("got message '%s'", res[0]) - res = res[1:] - } - - if len(res) != 0 { - t.Fatalf("leftover unmatched results : %v", res) - } + if len(tc.expectedResMessages) == 0 { + s.Empty(got, "unexpected events") + } else { + s.Equal(tc.expectedResMessages, got, "mismatched events") } if tc.teardown != nil { - tc.teardown(t, &cw) + tc.teardown(s.T(), &cw) } }) } diff --git a/pkg/acquisition/modules/kafka/kafka_test.go b/pkg/acquisition/modules/kafka/kafka_test.go index 186cd19bc10..8206d1833df 100644 --- a/pkg/acquisition/modules/kafka/kafka_test.go +++ b/pkg/acquisition/modules/kafka/kafka_test.go @@ -127,8 +127,7 @@ func createTopic(topic string, broker string) { } func TestStreamingAcquisition(t *testing.T) { - cstest.SkipOnWindows(t) - cstest.SkipIfDefined(t, "TEST_LOCAL_ONLY") + cstest.SetAWSTestEnv(t) ctx := t.Context() @@ -200,8 +199,7 @@ topic: crowdsecplaintext`), subLogger, configuration.METRICS_NONE) } func TestStreamingAcquisitionWithSSL(t *testing.T) { - cstest.SkipIfDefined(t, "TEST_LOCAL_ONLY") - cstest.SkipOnWindows(t) + cstest.SetAWSTestEnv(t) ctx := t.Context() diff --git a/pkg/acquisition/modules/kinesis/kinesis_test.go b/pkg/acquisition/modules/kinesis/kinesis_test.go index 7b0a266748a..faeb14656f4 100644 --- a/pkg/acquisition/modules/kinesis/kinesis_test.go +++ b/pkg/acquisition/modules/kinesis/kinesis_test.go @@ -5,10 +5,8 @@ import ( "compress/gzip" "encoding/json" "fmt" - "net" "os" "strconv" - "strings" "testing" "time" @@ -26,21 +24,6 @@ import ( "github.com/crowdsecurity/crowdsec/pkg/types" ) -func getLocalStackEndpoint() (string, error) { - endpoint := "http://localhost:4566" - - if v := os.Getenv("AWS_ENDPOINT_FORCE"); v != "" { - v = strings.TrimPrefix(v, "http://") - - _, err := net.Dial("tcp", v) - if err != nil { - return "", fmt.Errorf("while dialing %s: %w: aws endpoint isn't available", v, err) - } - } - - return endpoint, nil -} - func GenSubObject(t *testing.T, i int) []byte { r := CloudWatchSubscriptionRecord{ MessageType: "subscription", @@ -69,10 +52,7 @@ func GenSubObject(t *testing.T, i int) []byte { return b.Bytes() } -func WriteToStream(t *testing.T, streamName string, count int, shards int, sub bool) { - endpoint, err := getLocalStackEndpoint() - require.NoError(t, err) - +func WriteToStream(t *testing.T, endpoint string, streamName string, count int, shards int, sub bool) { sess := session.Must(session.NewSession()) kinesisClient := kinesis.New(sess, aws.NewConfig().WithEndpoint(endpoint).WithRegion("us-east-1")) @@ -90,7 +70,7 @@ func WriteToStream(t *testing.T, streamName string, count int, shards int, sub b data = []byte(strconv.Itoa(i)) } - _, err = kinesisClient.PutRecord(&kinesis.PutRecordInput{ + _, err := kinesisClient.PutRecord(&kinesis.PutRecordInput{ Data: data, PartitionKey: aws.String(partition), StreamName: aws.String(streamName), @@ -153,8 +133,7 @@ stream_arn: arn:aws:kinesis:eu-west-1:123456789012:stream/my-stream`, } func TestReadFromStream(t *testing.T) { - cstest.SkipOnWindows(t) - cstest.SkipIfDefined(t, "TEST_LOCAL_ONLY") + endpoint := cstest.SetAWSTestEnv(t) ctx := t.Context() @@ -172,8 +151,6 @@ stream_name: stream-1-shard`, shards: 1, }, } - endpoint, _ := getLocalStackEndpoint() - for _, test := range tests { f := KinesisSource{} config := fmt.Sprintf(test.config, endpoint) @@ -186,7 +163,7 @@ stream_name: stream-1-shard`, require.NoError(t, err) // Allow the datasource to start listening to the stream time.Sleep(4 * time.Second) - WriteToStream(t, f.Config.StreamName, test.count, test.shards, false) + WriteToStream(t, endpoint, f.Config.StreamName, test.count, test.shards, false) for i := range test.count { e := <-out @@ -200,8 +177,7 @@ stream_name: stream-1-shard`, } func TestReadFromMultipleShards(t *testing.T) { - cstest.SkipOnWindows(t) - cstest.SkipIfDefined(t, "TEST_LOCAL_ONLY") + endpoint := cstest.SetAWSTestEnv(t) ctx := t.Context() @@ -219,7 +195,6 @@ stream_name: stream-2-shards`, shards: 2, }, } - endpoint, _ := getLocalStackEndpoint() for _, test := range tests { f := KinesisSource{} @@ -233,7 +208,7 @@ stream_name: stream-2-shards`, require.NoError(t, err) // Allow the datasource to start listening to the stream time.Sleep(4 * time.Second) - WriteToStream(t, f.Config.StreamName, test.count, test.shards, false) + WriteToStream(t, endpoint, f.Config.StreamName, test.count, test.shards, false) c := 0 @@ -250,8 +225,7 @@ stream_name: stream-2-shards`, } func TestFromSubscription(t *testing.T) { - cstest.SkipOnWindows(t) - cstest.SkipIfDefined(t, "TEST_LOCAL_ONLY") + endpoint := cstest.SetAWSTestEnv(t) ctx := t.Context() @@ -270,7 +244,6 @@ from_subscription: true`, shards: 1, }, } - endpoint, _ := getLocalStackEndpoint() for _, test := range tests { f := KinesisSource{} @@ -284,7 +257,7 @@ from_subscription: true`, require.NoError(t, err) // Allow the datasource to start listening to the stream time.Sleep(4 * time.Second) - WriteToStream(t, f.Config.StreamName, test.count, test.shards, true) + WriteToStream(t, endpoint, f.Config.StreamName, test.count, test.shards, true) for i := range test.count { e := <-out diff --git a/pkg/acquisition/modules/loki/loki_test.go b/pkg/acquisition/modules/loki/loki_test.go index 5bfd6ff8981..bb7b6e0ebd2 100644 --- a/pkg/acquisition/modules/loki/loki_test.go +++ b/pkg/acquisition/modules/loki/loki_test.go @@ -331,8 +331,7 @@ func feedLoki(ctx context.Context, logger *log.Entry, n int, title string) error } func TestOneShotAcquisition(t *testing.T) { - cstest.SkipIfDefined(t, "TEST_LOCAL_ONLY") - cstest.SkipOnWindows(t) + cstest.SetAWSTestEnv(t) ctx := t.Context() @@ -392,8 +391,7 @@ since: 1h } func TestStreamingAcquisition(t *testing.T) { - cstest.SkipOnWindows(t) - cstest.SkipIfDefined(t, "TEST_LOCAL_ONLY") + cstest.SetAWSTestEnv(t) ctx := t.Context() @@ -507,8 +505,7 @@ query: > } func TestStopStreaming(t *testing.T) { - cstest.SkipOnWindows(t) - cstest.SkipIfDefined(t, "TEST_LOCAL_ONLY") + cstest.SetAWSTestEnv(t) ctx := t.Context() diff --git a/pkg/acquisition/modules/victorialogs/victorialogs_test.go b/pkg/acquisition/modules/victorialogs/victorialogs_test.go index e8e43cdba3c..018f19a71b6 100644 --- a/pkg/acquisition/modules/victorialogs/victorialogs_test.go +++ b/pkg/acquisition/modules/victorialogs/victorialogs_test.go @@ -253,8 +253,7 @@ func feedVLogs(ctx context.Context, logger *log.Entry, n int, title string) erro } func TestOneShotAcquisition(t *testing.T) { - cstest.SkipOnWindows(t) - cstest.SkipIfDefined(t, "TEST_LOCAL_ONLY") + cstest.SetAWSTestEnv(t) ctx := t.Context() @@ -317,8 +316,7 @@ since: 1h } func TestStreamingAcquisition(t *testing.T) { - cstest.SkipOnWindows(t) - cstest.SkipIfDefined(t, "TEST_LOCAL_ONLY") + cstest.SetAWSTestEnv(t) ctx := t.Context() @@ -428,8 +426,7 @@ query: > } func TestStopStreaming(t *testing.T) { - cstest.SkipOnWindows(t) - cstest.SkipIfDefined(t, "TEST_LOCAL_ONLY") + cstest.SetAWSTestEnv(t) ctx := t.Context() From d46cef1bcb9534c842400ce9b8065d1c9579d1af Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Wed, 23 Apr 2025 21:30:03 +0200 Subject: [PATCH 494/581] refact parser Init: argument types (#3578) * refact parser Init: argument types * lint * tests * rename struct field; drop redundant nil check --- cmd/crowdsec/crowdsec.go | 2 +- pkg/parser/node_test.go | 5 +++- pkg/parser/parsing_test.go | 5 ++-- pkg/parser/unix_parser.go | 48 +++++++++++++++----------------------- 4 files changed, 27 insertions(+), 33 deletions(-) diff --git a/cmd/crowdsec/crowdsec.go b/cmd/crowdsec/crowdsec.go index 29be3ff779f..40f70f63d6d 100644 --- a/cmd/crowdsec/crowdsec.go +++ b/cmd/crowdsec/crowdsec.go @@ -144,7 +144,7 @@ func runCrowdsec(cConfig *csconfig.Config, parsers *parser.Parsers, hub *cwhub.H outputsTomb.Go(func() error { defer trace.CatchPanic("crowdsec/runOutput") - return runOutput(inputEventChan, outputEventChan, buckets, *parsers.Povfwctx, parsers.Povfwnodes, apiClient) + return runOutput(inputEventChan, outputEventChan, buckets, *parsers.PovfwCtx, parsers.Povfwnodes, apiClient) }) } diff --git a/pkg/parser/node_test.go b/pkg/parser/node_test.go index 76d35a9ffb0..880f01a6b7b 100644 --- a/pkg/parser/node_test.go +++ b/pkg/parser/node_test.go @@ -7,7 +7,7 @@ import ( ) func TestParserConfigs(t *testing.T) { - pctx, err := Init(map[string]interface{}{"patterns": "../../config/patterns/", "data": "./tests/"}) + pctx, err := NewUnixParserCtx("../../config/patterns/", "./tests/") if err != nil { t.Fatalf("unable to load patterns : %s", err) } @@ -47,11 +47,13 @@ func TestParserConfigs(t *testing.T) { {Key: string("MYGROKBIS"), Value: string("[a-z]")}, }, Grok: GrokPattern{RegexpValue: "^x%{MYGROKBIS:extr}$", TargetField: "t"}}, false, true}, } + for idx := range CfgTests { err := CfgTests[idx].NodeCfg.compile(pctx, EnricherCtx{}) if CfgTests[idx].Compiles && err != nil { t.Fatalf("Compile: (%d/%d) expected valid, got : %s", idx+1, len(CfgTests), err) } + if !CfgTests[idx].Compiles && err == nil { t.Fatalf("Compile: (%d/%d) expected error", idx+1, len(CfgTests)) } @@ -60,6 +62,7 @@ func TestParserConfigs(t *testing.T) { if CfgTests[idx].Valid && err != nil { t.Fatalf("Valid: (%d/%d) expected valid, got : %s", idx+1, len(CfgTests), err) } + if !CfgTests[idx].Valid && err == nil { t.Fatalf("Valid: (%d/%d) expected error", idx+1, len(CfgTests)) } diff --git a/pkg/parser/parsing_test.go b/pkg/parser/parsing_test.go index 84d5f4db743..dc96c988204 100644 --- a/pkg/parser/parsing_test.go +++ b/pkg/parser/parsing_test.go @@ -7,6 +7,7 @@ import ( "html/template" "io" "os" + "path/filepath" "sort" "strings" "testing" @@ -179,7 +180,7 @@ func prepTests(t require.TestingT) (*UnixParserCtx, EnricherCtx) { /* this should be refactored to 2 lines :p */ // Init the parser - pctx, err = Init(map[string]interface{}{"patterns": cfgdir + string("/patterns/"), "data": "./tests/"}) + pctx, err = NewUnixParserCtx(filepath.Join(cfgdir, "patterns"), "./tests/") require.NoError(t, err, "parser init failed") return pctx, ectx @@ -403,7 +404,7 @@ func TestGeneratePatternsDoc(t *testing.T) { return } - pctx, err := Init(map[string]interface{}{"patterns": "../../config/patterns/", "data": "./tests/"}) + pctx, err := NewUnixParserCtx("../../config/patterns/", "./tests/") require.NoError(t, err, "unable to load patterns") log.Infof("-> %s", spew.Sdump(pctx)) diff --git a/pkg/parser/unix_parser.go b/pkg/parser/unix_parser.go index a05d5ee11c7..e334d51c20b 100644 --- a/pkg/parser/unix_parser.go +++ b/pkg/parser/unix_parser.go @@ -25,7 +25,7 @@ type UnixParserCtx struct { type Parsers struct { Ctx *UnixParserCtx - Povfwctx *UnixParserCtx + PovfwCtx *UnixParserCtx StageFiles []Stagefile PovfwStageFiles []Stagefile Nodes []Node @@ -33,24 +33,24 @@ type Parsers struct { EnricherCtx EnricherCtx } -func Init(c map[string]any) (*UnixParserCtx, error) { +func NewUnixParserCtx(patternDir string, dataDir string) (*UnixParserCtx, error) { r := UnixParserCtx{} r.Grok = grokky.NewBase() r.Grok.UseRe2 = fflag.Re2GrokSupport.IsEnabled() - files, err := os.ReadDir(c["patterns"].(string)) + files, err := os.ReadDir(patternDir) if err != nil { return nil, err } - r.DataFolder = c["data"].(string) + r.DataFolder = dataDir for _, file := range files { if strings.Contains(file.Name(), ".") || file.IsDir() { continue } - if err := r.Grok.AddFromFile(filepath.Join(c["patterns"].(string), file.Name())); err != nil { + if err := r.Grok.AddFromFile(filepath.Join(patternDir, file.Name())); err != nil { log.Errorf("failed to load pattern %s: %v", file.Name(), err) return nil, err } @@ -66,7 +66,7 @@ func Init(c map[string]any) (*UnixParserCtx, error) { func NewParsers(hub *cwhub.Hub) *Parsers { parsers := &Parsers{ Ctx: &UnixParserCtx{}, - Povfwctx: &UnixParserCtx{}, + PovfwCtx: &UnixParserCtx{}, StageFiles: make([]Stagefile, 0), PovfwStageFiles: make([]Stagefile, 0), } @@ -88,17 +88,13 @@ func NewParsers(hub *cwhub.Hub) *Parsers { } } - if parsers.StageFiles != nil { - sort.Slice(parsers.StageFiles, func(i, j int) bool { - return parsers.StageFiles[i].Filename < parsers.StageFiles[j].Filename - }) - } + sort.Slice(parsers.StageFiles, func(i, j int) bool { + return parsers.StageFiles[i].Filename < parsers.StageFiles[j].Filename + }) - if parsers.PovfwStageFiles != nil { - sort.Slice(parsers.PovfwStageFiles, func(i, j int) bool { - return parsers.PovfwStageFiles[i].Filename < parsers.PovfwStageFiles[j].Filename - }) - } + sort.Slice(parsers.PovfwStageFiles, func(i, j int) bool { + return parsers.PovfwStageFiles[i].Filename < parsers.PovfwStageFiles[j].Filename + }) return parsers } @@ -106,22 +102,16 @@ func NewParsers(hub *cwhub.Hub) *Parsers { func LoadParsers(cConfig *csconfig.Config, parsers *Parsers) (*Parsers, error) { var err error - patternsDir := cConfig.ConfigPaths.PatternDir - log.Infof("Loading grok library %s", patternsDir) + patternDir := cConfig.ConfigPaths.PatternDir + log.Infof("Loading grok library %s", patternDir) /* load base regexps for two grok parsers */ - parsers.Ctx, err = Init(map[string]any{ - "patterns": patternsDir, - "data": cConfig.ConfigPaths.DataDir, - }) + parsers.Ctx, err = NewUnixParserCtx(patternDir, cConfig.ConfigPaths.DataDir) if err != nil { return parsers, fmt.Errorf("failed to load parser patterns: %w", err) } - parsers.Povfwctx, err = Init(map[string]any{ - "patterns": patternsDir, - "data": cConfig.ConfigPaths.DataDir, - }) + parsers.PovfwCtx, err = NewUnixParserCtx(patternDir, cConfig.ConfigPaths.DataDir) if err != nil { return parsers, fmt.Errorf("failed to load postovflw parser patterns: %w", err) } @@ -150,7 +140,7 @@ func LoadParsers(cConfig *csconfig.Config, parsers *Parsers) (*Parsers, error) { if len(parsers.PovfwStageFiles) > 0 { log.Info("Loading postoverflow parsers") - parsers.Povfwnodes, err = LoadStages(parsers.PovfwStageFiles, parsers.Povfwctx, parsers.EnricherCtx) + parsers.Povfwnodes, err = LoadStages(parsers.PovfwStageFiles, parsers.PovfwCtx, parsers.EnricherCtx) if err != nil { return parsers, fmt.Errorf("failed to load postoverflow config: %w", err) } @@ -162,13 +152,13 @@ func LoadParsers(cConfig *csconfig.Config, parsers *Parsers) (*Parsers, error) { if cConfig.Prometheus != nil && cConfig.Prometheus.Enabled { parsers.Ctx.Profiling = true - parsers.Povfwctx.Profiling = true + parsers.PovfwCtx.Profiling = true } /* Reset CTX grok to reduce memory footprint after we compile all the patterns */ parsers.Ctx.Grok = grokky.Host{} - parsers.Povfwctx.Grok = grokky.Host{} + parsers.PovfwCtx.Grok = grokky.Host{} parsers.StageFiles = []Stagefile{} parsers.PovfwStageFiles = []Stagefile{} From 34e306505c9ae08b996e28f592d35b8f2a0a6bb5 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Thu, 24 Apr 2025 10:25:48 +0200 Subject: [PATCH 495/581] refact pkg/database: dry decision count (#3586) --- pkg/database/decisions.go | 70 +++++--------------------------------- pkg/exprhelpers/helpers.go | 6 ++-- 2 files changed, 12 insertions(+), 64 deletions(-) diff --git a/pkg/database/decisions.go b/pkg/database/decisions.go index 049560a4883..94b8a54b792 100644 --- a/pkg/database/decisions.go +++ b/pkg/database/decisions.go @@ -533,16 +533,10 @@ func (c *Client) ExpireDecisionByID(ctx context.Context, decisionID int) (int, [ return count, toUpdate, err } -func (c *Client) CountDecisionsByValue(ctx context.Context, decisionValue string) (int, error) { - var ( - err error - start_ip, start_sfx, end_ip, end_sfx int64 - ip_sz, count int - ) - - ip_sz, start_ip, start_sfx, end_ip, end_sfx, err = types.Addr2Ints(decisionValue) +func (c *Client) CountDecisionsByValue(ctx context.Context, value string, since *time.Time, onlyActive bool) (int, error) { + ip_sz, start_ip, start_sfx, end_ip, end_sfx, err := types.Addr2Ints(value) if err != nil { - return 0, errors.Wrapf(InvalidIPOrRange, "unable to convert '%s' to int: %s", decisionValue, err) + return 0, fmt.Errorf("unable to convert '%s' to int: %w", value, err) } contains := true @@ -550,40 +544,18 @@ func (c *Client) CountDecisionsByValue(ctx context.Context, decisionValue string decisions, err = decisionIPFilter(decisions, contains, ip_sz, start_ip, start_sfx, end_ip, end_sfx) if err != nil { - return 0, errors.Wrapf(err, "fail to apply StartIpEndIpFilter") - } - - count, err = decisions.Count(ctx) - if err != nil { - return 0, errors.Wrapf(err, "fail to count decisions") + return 0, fmt.Errorf("fail to apply StartIpEndIpFilter: %w", err) } - return count, nil -} - -func (c *Client) CountActiveDecisionsByValue(ctx context.Context, decisionValue string) (int, error) { - var ( - err error - start_ip, start_sfx, end_ip, end_sfx int64 - ip_sz, count int - ) - - ip_sz, start_ip, start_sfx, end_ip, end_sfx, err = types.Addr2Ints(decisionValue) - if err != nil { - return 0, fmt.Errorf("unable to convert '%s' to int: %w", decisionValue, err) + if since != nil { + decisions = decisions.Where(decision.CreatedAtGT(*since)) } - contains := true - decisions := c.Ent.Decision.Query() - - decisions, err = decisionIPFilter(decisions, contains, ip_sz, start_ip, start_sfx, end_ip, end_sfx) - if err != nil { - return 0, fmt.Errorf("fail to apply StartIpEndIpFilter: %w", err) + if onlyActive { + decisions = decisions.Where(decision.UntilGT(time.Now().UTC())) } - decisions = decisions.Where(decision.UntilGT(time.Now().UTC())) - - count, err = decisions.Count(ctx) + count, err := decisions.Count(ctx) if err != nil { return 0, fmt.Errorf("fail to count decisions: %w", err) } @@ -627,30 +599,6 @@ func (c *Client) GetActiveDecisionsTimeLeftByValue(ctx context.Context, decision return decision.Until.Sub(time.Now().UTC()), nil } -func (c *Client) CountDecisionsSinceByValue(ctx context.Context, decisionValue string, since time.Time) (int, error) { - ip_sz, start_ip, start_sfx, end_ip, end_sfx, err := types.Addr2Ints(decisionValue) - if err != nil { - return 0, errors.Wrapf(InvalidIPOrRange, "unable to convert '%s' to int: %s", decisionValue, err) - } - - contains := true - decisions := c.Ent.Decision.Query().Where( - decision.CreatedAtGT(since), - ) - - decisions, err = decisionIPFilter(decisions, contains, ip_sz, start_ip, start_sfx, end_ip, end_sfx) - if err != nil { - return 0, errors.Wrapf(err, "fail to apply StartIpEndIpFilter") - } - - count, err := decisions.Count(ctx) - if err != nil { - return 0, errors.Wrapf(err, "fail to count decisions") - } - - return count, nil -} - func decisionIPv4Filter(decisions *ent.DecisionQuery, contains bool, ip_sz int, start_ip int64, start_sfx int64, end_ip int64, end_sfx int64) (*ent.DecisionQuery, error) { if contains { /*Decision contains {start_ip,end_ip}*/ diff --git a/pkg/exprhelpers/helpers.go b/pkg/exprhelpers/helpers.go index 1e5426fdb61..8d6e0cd655e 100644 --- a/pkg/exprhelpers/helpers.go +++ b/pkg/exprhelpers/helpers.go @@ -642,7 +642,7 @@ func GetDecisionsCount(params ...any) (any, error) { ctx := context.TODO() - count, err := dbClient.CountDecisionsByValue(ctx, value) + count, err := dbClient.CountDecisionsByValue(ctx, value, nil, false) if err != nil { log.Errorf("Failed to get decisions count from value '%s'", value) return 0, nil //nolint:nilerr // This helper did not return an error before the move to expr.Function, we keep this behavior for backward compatibility @@ -670,7 +670,7 @@ func GetDecisionsSinceCount(params ...any) (any, error) { ctx := context.TODO() sinceTime := time.Now().UTC().Add(-sinceDuration) - count, err := dbClient.CountDecisionsSinceByValue(ctx, value, sinceTime) + count, err := dbClient.CountDecisionsByValue(ctx, value, &sinceTime, false) if err != nil { log.Errorf("Failed to get decisions count from value '%s'", value) return 0, nil //nolint:nilerr // This helper did not return an error before the move to expr.Function, we keep this behavior for backward compatibility @@ -689,7 +689,7 @@ func GetActiveDecisionsCount(params ...any) (any, error) { ctx := context.TODO() - count, err := dbClient.CountActiveDecisionsByValue(ctx, value) + count, err := dbClient.CountDecisionsByValue(ctx, value, nil, true) if err != nil { log.Errorf("Failed to get active decisions count from value '%s'", value) return 0, err From 418a27596ea09c14a62a524e9c935896d244643e Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Thu, 24 Apr 2025 11:12:38 +0200 Subject: [PATCH 496/581] lint/gocritic: enable importShadow, typeUnparen, unnecessaryDefer (#3583) --- .golangci.yml | 3 --- cmd/crowdsec-cli/cliconsole/console.go | 2 +- pkg/acquisition/modules/appsec/utils.go | 10 +++++----- .../modules/cloudwatch/cloudwatch_test.go | 8 ++++---- pkg/acquisition/modules/kafka/kafka.go | 14 +++++++------- pkg/apiclient/decisions_service_test.go | 4 ++-- pkg/apiserver/apic.go | 6 +----- pkg/apiserver/apic_metrics.go | 2 +- pkg/apiserver/middlewares/v1/jwt.go | 8 ++++---- pkg/csplugin/watcher.go | 9 +++++---- pkg/database/alertfilter.go | 7 ++++--- pkg/exprhelpers/debugger.go | 6 +++--- pkg/exprhelpers/helpers.go | 7 ++++--- pkg/leakybucket/manager_load.go | 14 +++++++------- pkg/leakybucket/overflows.go | 6 +++--- pkg/parser/enrich_geoip.go | 9 +++++---- pkg/parser/runtime.go | 12 ++++++------ 17 files changed, 62 insertions(+), 65 deletions(-) diff --git a/.golangci.yml b/.golangci.yml index cd5f4aef7c7..f0b102ca7ce 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -158,21 +158,18 @@ linters: disabled-checks: - paramTypeCombine - ifElseChain - - importShadow - hugeParam - commentedOutCode - commentedOutImport - unnamedResult - sloppyReassign - appendCombine - - typeUnparen - commentFormatting - deferInLoop # - whyNoLint - equalFold # - unnecessaryBlock # - tooManyResultsChecker - - unnecessaryDefer - docStub - preferFprint diff --git a/cmd/crowdsec-cli/cliconsole/console.go b/cmd/crowdsec-cli/cliconsole/console.go index 5dc83fe5554..2360e854e6a 100644 --- a/cmd/crowdsec-cli/cliconsole/console.go +++ b/cmd/crowdsec-cli/cliconsole/console.go @@ -312,7 +312,7 @@ func (cli *cliConsole) newStatusCmd() *cobra.Command { case "human": cmdConsoleStatusTable(color.Output, cfg.Cscli.Color, *consoleCfg) case "json": - out := map[string](*bool){ + out := map[string]*bool{ csconfig.SEND_MANUAL_SCENARIOS: consoleCfg.ShareManualDecisions, csconfig.SEND_CUSTOM_SCENARIOS: consoleCfg.ShareCustomScenarios, csconfig.SEND_TAINTED_SCENARIOS: consoleCfg.ShareTaintedScenarios, diff --git a/pkg/acquisition/modules/appsec/utils.go b/pkg/acquisition/modules/appsec/utils.go index 0535a8f128a..bde837e6919 100644 --- a/pkg/acquisition/modules/appsec/utils.go +++ b/pkg/acquisition/modules/appsec/utils.go @@ -99,12 +99,12 @@ func AppsecEventGeneration(inEvt types.Event, request *http.Request) (*types.Eve alert.EventsCount = ptr.Of(int32(len(alert.Events))) alert.Leakspeed = ptr.Of("") - alert.Scenario = ptr.Of(inEvt.Appsec.MatchedRules.GetName()) - alert.ScenarioHash = ptr.Of(inEvt.Appsec.MatchedRules.GetHash()) - alert.ScenarioVersion = ptr.Of(inEvt.Appsec.MatchedRules.GetVersion()) + alert.Scenario = ptr.Of(inEvt.Appsec.GetName()) + alert.ScenarioHash = ptr.Of(inEvt.Appsec.GetHash()) + alert.ScenarioVersion = ptr.Of(inEvt.Appsec.GetVersion()) alert.Simulated = ptr.Of(false) alert.Source = &source - msg := fmt.Sprintf("AppSec block: %s from %s (%s)", inEvt.Appsec.MatchedRules.GetName(), + msg := fmt.Sprintf("AppSec block: %s from %s (%s)", inEvt.Appsec.GetName(), alert.Source.IP, inEvt.Parsed["remediation_cmpt_ip"]) alert.Message = &msg alert.StartAt = ptr.Of(time.Now().UTC().Format(time.RFC3339)) @@ -278,7 +278,7 @@ func (r *AppsecRunner) AccumulateTxToEvent(evt *types.Event, req *appsec.ParsedR matchedZones = append(matchedZones, zone) } - corazaRule := map[string]interface{}{ + corazaRule := map[string]any{ "id": rule.Rule().ID(), "uri": evt.Parsed["target_uri"], "rule_type": kind, diff --git a/pkg/acquisition/modules/cloudwatch/cloudwatch_test.go b/pkg/acquisition/modules/cloudwatch/cloudwatch_test.go index 1a2702208ed..88ac88996a6 100644 --- a/pkg/acquisition/modules/cloudwatch/cloudwatch_test.go +++ b/pkg/acquisition/modules/cloudwatch/cloudwatch_test.go @@ -57,7 +57,7 @@ type CloudwatchSuite struct { suite.Suite } -func (s *CloudwatchSuite) SetupSuite() { +func (*CloudwatchSuite) SetupSuite() { def_PollNewStreamInterval = 1 * time.Second def_PollStreamInterval = 1 * time.Second def_StreamReadTimeout = 10 * time.Second @@ -308,7 +308,7 @@ stream_name: test_stream`, }) require.NoError(t, err) }, - run: func(t *testing.T, cw *CloudwatchSource) { + run: func(_ *testing.T, _ *CloudwatchSource) { // wait for new stream pickup + stream poll interval time.Sleep(def_PollNewStreamInterval + (1 * time.Second)) time.Sleep(def_PollStreamInterval + (1 * time.Second)) @@ -325,7 +325,7 @@ stream_name: test_stream`, dbgLogger.Infof("starting test") cw := CloudwatchSource{} - err := cw.Configure(([]byte)(tc.config), dbgLogger, configuration.METRICS_NONE) + err := cw.Configure([]byte(tc.config), dbgLogger, configuration.METRICS_NONE) cstest.RequireErrorContains(s.T(), err, tc.expectedCfgErr) if tc.expectedCfgErr != "" { @@ -442,7 +442,7 @@ stream_name: test_stream`, dbgLogger.Logger.SetLevel(logrus.DebugLevel) cw := CloudwatchSource{} - err := cw.Configure(([]byte)(tc.config), dbgLogger, configuration.METRICS_NONE) + err := cw.Configure([]byte(tc.config), dbgLogger, configuration.METRICS_NONE) cstest.RequireErrorContains(s.T(), err, tc.expectedCfgErr) if tc.expectedCfgErr != "" { diff --git a/pkg/acquisition/modules/kafka/kafka.go b/pkg/acquisition/modules/kafka/kafka.go index 94806f5d66b..2fa01d1c2b4 100644 --- a/pkg/acquisition/modules/kafka/kafka.go +++ b/pkg/acquisition/modules/kafka/kafka.go @@ -115,7 +115,7 @@ func (k *KafkaSource) Configure(yamlConfig []byte, logger *log.Entry, metricsLev return nil } -func (k *KafkaSource) ConfigureByDSN(string, map[string]string, *log.Entry, string) error { +func (*KafkaSource) ConfigureByDSN(string, map[string]string, *log.Entry, string) error { return fmt.Errorf("%s datasource does not support command-line acquisition", dataSourceName) } @@ -123,27 +123,27 @@ func (k *KafkaSource) GetMode() string { return k.Config.Mode } -func (k *KafkaSource) GetName() string { +func (*KafkaSource) GetName() string { return dataSourceName } -func (k *KafkaSource) OneShotAcquisition(_ context.Context, _ chan types.Event, _ *tomb.Tomb) error { +func (*KafkaSource) OneShotAcquisition(_ context.Context, _ chan types.Event, _ *tomb.Tomb) error { return fmt.Errorf("%s datasource does not support one-shot acquisition", dataSourceName) } -func (k *KafkaSource) CanRun() error { +func (*KafkaSource) CanRun() error { return nil } -func (k *KafkaSource) GetMetrics() []prometheus.Collector { +func (*KafkaSource) GetMetrics() []prometheus.Collector { return []prometheus.Collector{linesRead} } -func (k *KafkaSource) GetAggregMetrics() []prometheus.Collector { +func (*KafkaSource) GetAggregMetrics() []prometheus.Collector { return []prometheus.Collector{linesRead} } -func (k *KafkaSource) Dump() interface{} { +func (k *KafkaSource) Dump() any { return k } diff --git a/pkg/apiclient/decisions_service_test.go b/pkg/apiclient/decisions_service_test.go index 8bab7e7b74f..c16abed6448 100644 --- a/pkg/apiclient/decisions_service_test.go +++ b/pkg/apiclient/decisions_service_test.go @@ -399,6 +399,8 @@ func TestDeleteDecisions(t *testing.T) { ctx := t.Context() mux, urlx, teardown := setup() + defer teardown() + mux.HandleFunc("/watchers/login", func(w http.ResponseWriter, r *http.Request) { w.WriteHeader(http.StatusOK) _, err := w.Write([]byte(`{"code": 200, "expire": "2030-01-02T15:04:05Z", "token": "oklol"}`)) @@ -433,8 +435,6 @@ func TestDeleteDecisions(t *testing.T) { deleted, _, err := client.Decisions.Delete(ctx, filters) require.NoError(t, err) assert.Equal(t, "1", deleted.NbDeleted) - - defer teardown() } func TestDecisionsStreamOpts_addQueryParamsToURL(t *testing.T) { diff --git a/pkg/apiserver/apic.go b/pkg/apiserver/apic.go index ba2ffa18e39..0de773ab4d4 100644 --- a/pkg/apiserver/apic.go +++ b/pkg/apiserver/apic.go @@ -396,11 +396,7 @@ func (a *apic) Send(ctx context.Context, cacheOrig *models.AddSignalsRequest) { batchSize := 50 for start := 0; start < len(cache); start += batchSize { - end := start + batchSize - - if end > len(cache) { - end = len(cache) - } + end := min(start+batchSize, len(cache)) if err := a.sendBatch(ctx, cache[start:end]); err != nil { log.Errorf("sending signal to central API: %s", err) diff --git a/pkg/apiserver/apic_metrics.go b/pkg/apiserver/apic_metrics.go index fe0dfd55821..df3148925c8 100644 --- a/pkg/apiserver/apic_metrics.go +++ b/pkg/apiserver/apic_metrics.go @@ -251,7 +251,7 @@ func (a *apic) fetchMachineIDs(ctx context.Context) ([]string, error) { // Metrics are sent at start, then at the randomized metricsIntervalFirst, // then at regular metricsInterval. If a change is detected in the list // of machines, the next metrics are sent immediately. -func (a *apic) SendMetrics(ctx context.Context, stop chan (bool)) { +func (a *apic) SendMetrics(ctx context.Context, stop chan bool) { defer trace.CatchPanic("lapi/metricsToAPIC") // verify the list of machines every interval diff --git a/pkg/apiserver/middlewares/v1/jwt.go b/pkg/apiserver/middlewares/v1/jwt.go index 9171e9fce06..4f1452500b2 100644 --- a/pkg/apiserver/middlewares/v1/jwt.go +++ b/pkg/apiserver/middlewares/v1/jwt.go @@ -29,7 +29,7 @@ type JWT struct { TlsAuth *TLSAuth } -func PayloadFunc(data interface{}) jwt.MapClaims { +func PayloadFunc(data any) jwt.MapClaims { if value, ok := data.(*models.WatcherAuthRequest); ok { return jwt.MapClaims{ MachineIDKey: &value.MachineID, @@ -39,7 +39,7 @@ func PayloadFunc(data interface{}) jwt.MapClaims { return jwt.MapClaims{} } -func IdentityHandler(c *gin.Context) interface{} { +func IdentityHandler(c *gin.Context) any { claims := jwt.ExtractClaims(c) machineID := claims[MachineIDKey].(string) @@ -172,7 +172,7 @@ func (j *JWT) authPlain(c *gin.Context) (*authInput, error) { return &ret, nil } -func (j *JWT) Authenticator(c *gin.Context) (interface{}, error) { +func (j *JWT) Authenticator(c *gin.Context) (any, error) { var ( err error auth *authInput @@ -248,7 +248,7 @@ func (j *JWT) Authenticator(c *gin.Context) (interface{}, error) { }, nil } -func Authorizator(data interface{}, c *gin.Context) bool { +func Authorizator(data any, c *gin.Context) bool { return true } diff --git a/pkg/csplugin/watcher.go b/pkg/csplugin/watcher.go index bec0302e462..7d3d517750f 100644 --- a/pkg/csplugin/watcher.go +++ b/pkg/csplugin/watcher.go @@ -92,17 +92,18 @@ func (pw *PluginWatcher) watchPluginTicker(pluginName string) { threshold := pw.PluginConfigByName[pluginName].GroupThreshold //only size is set - if threshold > 0 && interval == 0 { + switch { + case threshold > 0 && interval == 0: watchCount = threshold watchTime = DefaultEmptyTicker - } else if interval != 0 && threshold == 0 { + case interval != 0 && threshold == 0: //only time is set watchTime = interval - } else if interval != 0 && threshold != 0 { + case interval != 0 && threshold != 0: //both are set watchTime = DefaultEmptyTicker watchCount = threshold - } else { + default: //none are set, we sent every event we receive watchTime = DefaultEmptyTicker watchCount = 1 diff --git a/pkg/database/alertfilter.go b/pkg/database/alertfilter.go index e3f4e24cb4f..6ff2ab99a7f 100644 --- a/pkg/database/alertfilter.go +++ b/pkg/database/alertfilter.go @@ -133,11 +133,12 @@ func handleAlertIPv6Predicates(ip_sz int, contains bool, start_ip, start_sfx, en } func handleAlertIPPredicates(ip_sz int, contains bool, start_ip, start_sfx, end_ip, end_sfx int64, predicates *[]predicate.Alert) error { - if ip_sz == 4 { + switch { + case ip_sz == 4: handleAlertIPv4Predicates(ip_sz, contains, start_ip, start_sfx, end_ip, end_sfx, predicates) - } else if ip_sz == 16 { + case ip_sz == 16: handleAlertIPv6Predicates(ip_sz, contains, start_ip, start_sfx, end_ip, end_sfx, predicates) - } else if ip_sz != 0 { + case ip_sz != 0: return errors.Wrapf(InvalidFilter, "Unknown ip size %d", ip_sz) } diff --git a/pkg/exprhelpers/debugger.go b/pkg/exprhelpers/debugger.go index 65aa29b6a1d..34f7a6d3a62 100644 --- a/pkg/exprhelpers/debugger.go +++ b/pkg/exprhelpers/debugger.go @@ -117,7 +117,7 @@ func (o *OpOutput) String() string { return ret + "" } -func (erp ExprRuntimeDebug) extractCode(ip int, program *vm.Program) string { +func (ExprRuntimeDebug) extractCode(ip int, program *vm.Program) string { locations := program.Locations() src := string(program.Source()) @@ -356,7 +356,7 @@ func (erp ExprRuntimeDebug) ipSeek(ip int) []string { return nil } -func Run(program *vm.Program, env interface{}, logger *log.Entry, debug bool) (any, error) { +func Run(program *vm.Program, env any, logger *log.Entry, debug bool) (any, error) { if debug { dbgInfo, ret, err := RunWithDebug(program, env, logger) DisplayExprDebug(program, dbgInfo, logger, ret) @@ -383,7 +383,7 @@ func DisplayExprDebug(program *vm.Program, outputs []OpOutput, logger *log.Entry } // TBD: Based on the level of the logger (ie. trace vs debug) we could decide to add more low level instructions (pop, push, etc.) -func RunWithDebug(program *vm.Program, env interface{}, logger *log.Entry) ([]OpOutput, any, error) { +func RunWithDebug(program *vm.Program, env any, logger *log.Entry) ([]OpOutput, any, error) { outputs := []OpOutput{} erp := ExprRuntimeDebug{ Logger: logger, diff --git a/pkg/exprhelpers/helpers.go b/pkg/exprhelpers/helpers.go index 8d6e0cd655e..22bca7d0600 100644 --- a/pkg/exprhelpers/helpers.go +++ b/pkg/exprhelpers/helpers.go @@ -875,11 +875,12 @@ func ParseKV(params ...any) (any, error) { value := "" for i, name := range keyValuePattern.SubexpNames() { - if name == "key" { + switch { + case name == "key": key = match[i] - } else if name == "quoted_value" && match[i] != "" { + case name == "quoted_value" && match[i] != "": value = match[i] - } else if name == "value" && match[i] != "" { + case name == "value" && match[i] != "": value = match[i] } } diff --git a/pkg/leakybucket/manager_load.go b/pkg/leakybucket/manager_load.go index 474f0fe5ef9..e43e0d4f240 100644 --- a/pkg/leakybucket/manager_load.go +++ b/pkg/leakybucket/manager_load.go @@ -43,7 +43,7 @@ type BucketFactory struct { GroupBy string `yaml:"groupby,omitempty"` // groupy is an expr that allows to determine the partitions of the bucket. A common example is the source_ip Distinct string `yaml:"distinct"` // Distinct, when present, adds a `Pour()` processor that will only pour uniq items (based on distinct expr result) Debug bool `yaml:"debug"` // Debug, when set to true, will enable debugging for _this_ scenario specifically - Labels map[string]interface{} `yaml:"labels"` // Labels is K:V list aiming at providing context the overflow + Labels map[string]any `yaml:"labels"` // Labels is K:V list aiming at providing context the overflow Blackhole string `yaml:"blackhole,omitempty"` // Blackhole is a duration that, if present, will prevent same bucket partition to overflow more often than $duration logger *log.Entry // logger is bucket-specific logger (used by Debug as well) Reprocess bool `yaml:"reprocess"` // Reprocess, if true, will for the bucket to be re-injected into processing chain @@ -225,7 +225,7 @@ func compileScopeFilter(bucketFactory *BucketFactory) error { return errors.New("filter is mandatory for non-IP, non-Range scope") } - runTimeFilter, err := expr.Compile(bucketFactory.ScopeType.Filter, exprhelpers.GetExprOptions(map[string]interface{}{"evt": &types.Event{}})...) + runTimeFilter, err := expr.Compile(bucketFactory.ScopeType.Filter, exprhelpers.GetExprOptions(map[string]any{"evt": &types.Event{}})...) if err != nil { return fmt.Errorf("error compiling the scope filter: %w", err) } @@ -381,13 +381,13 @@ func LoadBucket(bucketFactory *BucketFactory, tomb *tomb.Tomb) error { return errors.New("bucket without filter directive") } - bucketFactory.RunTimeFilter, err = expr.Compile(bucketFactory.Filter, exprhelpers.GetExprOptions(map[string]interface{}{"evt": &types.Event{}})...) + bucketFactory.RunTimeFilter, err = expr.Compile(bucketFactory.Filter, exprhelpers.GetExprOptions(map[string]any{"evt": &types.Event{}})...) if err != nil { return fmt.Errorf("invalid filter '%s' in %s: %w", bucketFactory.Filter, bucketFactory.Filename, err) } if bucketFactory.GroupBy != "" { - bucketFactory.RunTimeGroupBy, err = expr.Compile(bucketFactory.GroupBy, exprhelpers.GetExprOptions(map[string]interface{}{"evt": &types.Event{}})...) + bucketFactory.RunTimeGroupBy, err = expr.Compile(bucketFactory.GroupBy, exprhelpers.GetExprOptions(map[string]any{"evt": &types.Event{}})...) if err != nil { return fmt.Errorf("invalid groupby '%s' in %s: %w", bucketFactory.GroupBy, bucketFactory.Filename, err) } @@ -415,7 +415,7 @@ func LoadBucket(bucketFactory *BucketFactory, tomb *tomb.Tomb) error { bucketFactory.logger.Tracef("Adding a non duplicate filter") bucketFactory.processors = append(bucketFactory.processors, &Uniq{}) // we're compiling and discarding the expression to be able to detect it during loading - _, err = expr.Compile(bucketFactory.Distinct, exprhelpers.GetExprOptions(map[string]interface{}{"evt": &types.Event{}})...) + _, err = expr.Compile(bucketFactory.Distinct, exprhelpers.GetExprOptions(map[string]any{"evt": &types.Event{}})...) if err != nil { return fmt.Errorf("invalid distinct '%s' in %s: %w", bucketFactory.Distinct, bucketFactory.Filename, err) } @@ -425,7 +425,7 @@ func LoadBucket(bucketFactory *BucketFactory, tomb *tomb.Tomb) error { bucketFactory.logger.Tracef("Adding a cancel_on filter") bucketFactory.processors = append(bucketFactory.processors, &CancelOnFilter{}) // we're compiling and discarding the expression to be able to detect it during loading - _, err = expr.Compile(bucketFactory.CancelOnFilter, exprhelpers.GetExprOptions(map[string]interface{}{"evt": &types.Event{}})...) + _, err = expr.Compile(bucketFactory.CancelOnFilter, exprhelpers.GetExprOptions(map[string]any{"evt": &types.Event{}})...) if err != nil { return fmt.Errorf("invalid cancel_on '%s' in %s: %w", bucketFactory.CancelOnFilter, bucketFactory.Filename, err) } @@ -459,7 +459,7 @@ func LoadBucket(bucketFactory *BucketFactory, tomb *tomb.Tomb) error { bucketFactory.logger.Tracef("Adding conditional overflow") bucketFactory.processors = append(bucketFactory.processors, &ConditionalOverflow{}) // we're compiling and discarding the expression to be able to detect it during loading - _, err = expr.Compile(bucketFactory.ConditionalOverflow, exprhelpers.GetExprOptions(map[string]interface{}{"queue": &types.Queue{}, "leaky": &Leaky{}, "evt": &types.Event{}})...) + _, err = expr.Compile(bucketFactory.ConditionalOverflow, exprhelpers.GetExprOptions(map[string]any{"queue": &types.Queue{}, "leaky": &Leaky{}, "evt": &types.Event{}})...) if err != nil { return fmt.Errorf("invalid condition '%s' in %s: %w", bucketFactory.ConditionalOverflow, bucketFactory.Filename, err) } diff --git a/pkg/leakybucket/overflows.go b/pkg/leakybucket/overflows.go index 9357caefaff..a4b0c2f2f5d 100644 --- a/pkg/leakybucket/overflows.go +++ b/pkg/leakybucket/overflows.go @@ -59,7 +59,7 @@ func overflowEventSources(evt types.Event, leaky *Leaky) (map[string]models.Sour } if leaky.scopeType.RunTimeFilter != nil { - retValue, err := exprhelpers.Run(leaky.scopeType.RunTimeFilter, map[string]interface{}{"evt": &evt}, leaky.logger, leaky.BucketConfig.Debug) + retValue, err := exprhelpers.Run(leaky.scopeType.RunTimeFilter, map[string]any{"evt": &evt}, leaky.logger, leaky.BucketConfig.Debug) if err != nil { return srcs, fmt.Errorf("while running scope filter: %w", err) } @@ -156,7 +156,7 @@ func eventSources(evt types.Event, leaky *Leaky) (map[string]models.Source, erro src.Value = &src.Range if leaky.scopeType.RunTimeFilter != nil { - retValue, err := exprhelpers.Run(leaky.scopeType.RunTimeFilter, map[string]interface{}{"evt": &evt}, leaky.logger, leaky.BucketConfig.Debug) + retValue, err := exprhelpers.Run(leaky.scopeType.RunTimeFilter, map[string]any{"evt": &evt}, leaky.logger, leaky.BucketConfig.Debug) if err != nil { return srcs, fmt.Errorf("while running scope filter: %w", err) } @@ -176,7 +176,7 @@ func eventSources(evt types.Event, leaky *Leaky) (map[string]models.Source, erro return srcs, errors.New("empty scope information") } - retValue, err := exprhelpers.Run(leaky.scopeType.RunTimeFilter, map[string]interface{}{"evt": &evt}, leaky.logger, leaky.BucketConfig.Debug) + retValue, err := exprhelpers.Run(leaky.scopeType.RunTimeFilter, map[string]any{"evt": &evt}, leaky.logger, leaky.BucketConfig.Debug) if err != nil { return srcs, fmt.Errorf("while running scope filter: %w", err) } diff --git a/pkg/parser/enrich_geoip.go b/pkg/parser/enrich_geoip.go index 79a70077283..df0755794e7 100644 --- a/pkg/parser/enrich_geoip.go +++ b/pkg/parser/enrich_geoip.go @@ -97,16 +97,17 @@ func GeoIpCity(field string, p *types.Event, plog *log.Entry) (map[string]string ret := make(map[string]string) - if record.Country.IsoCode != "" { + switch { + case record.Country.IsoCode != "": ret["IsoCode"] = record.Country.IsoCode ret["IsInEU"] = strconv.FormatBool(record.Country.IsInEuropeanUnion) - } else if record.RegisteredCountry.IsoCode != "" { + case record.RegisteredCountry.IsoCode != "": ret["IsoCode"] = record.RegisteredCountry.IsoCode ret["IsInEU"] = strconv.FormatBool(record.RegisteredCountry.IsInEuropeanUnion) - } else if record.RepresentedCountry.IsoCode != "" { + case record.RepresentedCountry.IsoCode != "": ret["IsoCode"] = record.RepresentedCountry.IsoCode ret["IsInEU"] = strconv.FormatBool(record.RepresentedCountry.IsInEuropeanUnion) - } else { + default: ret["IsoCode"] = "" ret["IsInEU"] = "false" } diff --git a/pkg/parser/runtime.go b/pkg/parser/runtime.go index 831e478af0c..cd0b32d750c 100644 --- a/pkg/parser/runtime.go +++ b/pkg/parser/runtime.go @@ -47,7 +47,7 @@ func SetTargetByName(target string, value string, evt *types.Event) bool { return false } - for _, f := range strings.Split(target, ".") { + for f := range strings.SplitSeq(target, ".") { /* ** According to current Event layout we only have to handle struct and map */ @@ -126,7 +126,7 @@ func (n *Node) ProcessStatics(statics []ExtraField, event *types.Event) error { if static.Value != "" { value = static.Value } else if static.RunTimeValue != nil { - output, err := exprhelpers.Run(static.RunTimeValue, map[string]interface{}{"evt": event}, clog, n.Debug) + output, err := exprhelpers.Run(static.RunTimeValue, map[string]any{"evt": event}, clog, n.Debug) if err != nil { clog.Warningf("failed to run RunTimeValue : %v", err) continue @@ -138,9 +138,9 @@ func (n *Node) ProcessStatics(statics []ExtraField, event *types.Event) error { value = strconv.Itoa(out) case float64, float32: value = fmt.Sprintf("%f", out) - case map[string]interface{}: + case map[string]any: clog.Warnf("Expression '%s' returned a map, please use ToJsonString() to convert it to string if you want to keep it as is, or refine your expression to extract a string", static.ExpValue) - case []interface{}: + case []any: clog.Warnf("Expression '%s' returned an array, please use ToJsonString() to convert it to string if you want to keep it as is, or refine your expression to extract a string", static.ExpValue) case nil: clog.Debugf("Expression '%s' returned nil, skipping", static.ExpValue) @@ -289,7 +289,7 @@ func Parse(ctx UnixParserCtx, xp types.Event, nodes []Node) (types.Event, error) event.Meta = make(map[string]string) } if event.Unmarshaled == nil { - event.Unmarshaled = make(map[string]interface{}) + event.Unmarshaled = make(map[string]any) } if event.Type == types.LOG { log.Tracef("INPUT '%s'", event.Line.Raw) @@ -342,7 +342,7 @@ func Parse(ctx UnixParserCtx, xp types.Event, nodes []Node) (types.Event, error) if ctx.Profiling { nodes[idx].Profiling = true } - ret, err := nodes[idx].process(&event, ctx, map[string]interface{}{"evt": &event}) + ret, err := nodes[idx].process(&event, ctx, map[string]any{"evt": &event}) if err != nil { clog.Errorf("Error while processing node : %v", err) return event, err From 8689783ade9d57abf941bf27da79f8e1ea788bfb Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Thu, 24 Apr 2025 11:27:45 +0200 Subject: [PATCH 497/581] refact: pkg/exprhelpers/debugger, convert switch to function dispatch (#3587) --- pkg/exprhelpers/debugger.go | 358 ++++++++++++++++++++++-------------- 1 file changed, 217 insertions(+), 141 deletions(-) diff --git a/pkg/exprhelpers/debugger.go b/pkg/exprhelpers/debugger.go index 34f7a6d3a62..32802021c45 100644 --- a/pkg/exprhelpers/debugger.go +++ b/pkg/exprhelpers/debugger.go @@ -157,6 +157,210 @@ func autoQuote(v any) string { } } + +type opHandler func(out OpOutput, prevOut *OpOutput, ip int, parts []string, vm *vm.VM, program *vm.Program) *OpOutput + +var opHandlers = map[string]opHandler{ + "OpBegin": opBegin, + "OpEnd": opEnd, + "OpNot": opNot, + "OpTrue": opTrue, + "OpFalse": opFalse, + "OpJumpIfTrue": opJumpIfTrue, + "OpJumpIfFalse": opJumpIfFalse, + "OpCall1": opCall1, + "OpCall2": opCall2, + "OpCall3": opCall3, + "OpCallFast": opCallFast, + "OpCallTyped": opCallTyped, + "OpCallN": opCallN, + "OpEqualString": opEqual, + "OpEqual": opEqual, + "OpEqualInt": opEqual, + "OpIn": opIn, + "OpContains": opContains, +} + +func opBegin(out OpOutput, _ *OpOutput, _ int, _ []string, _ *vm.VM, _ *vm.Program) *OpOutput { + out.CodeDepth += IndentStep + out.BlockStart = true + return &out +} + +func opEnd(out OpOutput, _ *OpOutput, _ int, _ []string, vm *vm.VM, _ *vm.Program) *OpOutput { + out.CodeDepth -= IndentStep + out.BlockEnd = true + // OpEnd can carry value, if it's any/all/count etc. + if len(vm.Stack) > 0 { + out.StrConditionResult = fmt.Sprintf("%v", vm.Stack) + } + return &out +} + +func opNot(_ OpOutput, prevOut *OpOutput, _ int, _ []string, _ *vm.VM, _ *vm.Program) *OpOutput { + // negate the previous condition + prevOut.Negated = true + return nil +} + +func opTrue(out OpOutput, _ *OpOutput, _ int, _ []string, _ *vm.VM, _ *vm.Program) *OpOutput { + // generated when possible ? (1 == 1) + out.Condition = true + out.ConditionResult = new(bool) + *out.ConditionResult = true + out.StrConditionResult = "true" + return &out +} + +func opFalse(out OpOutput, _ *OpOutput, _ int, _ []string, _ *vm.VM, _ *vm.Program) *OpOutput { + // generated when possible ? (1 != 1) + out.Condition = true + out.ConditionResult = new(bool) + *out.ConditionResult = false + out.StrConditionResult = "false" + return &out +} + +func opJumpIfTrue(out OpOutput, _ *OpOutput, _ int, _ []string, vm *vm.VM, _ *vm.Program) *OpOutput { + stack := vm.Stack + out.JumpIf = true + out.IfTrue = true + out.StrConditionResult = fmt.Sprintf("%v", stack[0]) + + if val, ok := stack[0].(bool); ok { + out.ConditionResult = new(bool) + *out.ConditionResult = val + } + + return &out +} + +func opJumpIfFalse(out OpOutput, _ *OpOutput, _ int, _ []string, vm *vm.VM, _ *vm.Program) *OpOutput { + stack := vm.Stack + out.JumpIf = true + out.IfFalse = true + out.StrConditionResult = fmt.Sprintf("%v", stack[0]) + + if val, ok := stack[0].(bool); ok { + out.ConditionResult = new(bool) + *out.ConditionResult = val + } + + return &out +} + +func opCall1(out OpOutput, _ *OpOutput, _ int, parts []string, vm *vm.VM, _ *vm.Program) *OpOutput { + out.Func = true + out.FuncName = parts[3] + stack := vm.Stack + + num_items := 1 + for i := len(stack) - 1; i >= 0 && num_items > 0; i-- { + out.Args = append(out.Args, autoQuote(stack[i])) + num_items-- + } + + return &out +} + +func opCall2(out OpOutput, _ *OpOutput, _ int, parts []string, vm *vm.VM, _ *vm.Program) *OpOutput { + out.Func = true + out.FuncName = parts[3] + stack := vm.Stack + + num_items := 2 + for i := len(stack) - 1; i >= 0 && num_items > 0; i-- { + out.Args = append(out.Args, autoQuote(stack[i])) + num_items-- + } + + return &out +} + +func opCall3(out OpOutput, _ *OpOutput, _ int, parts []string, vm *vm.VM, _ *vm.Program) *OpOutput { + out.Func = true + out.FuncName = parts[3] + stack := vm.Stack + + num_items := 3 + for i := len(stack) - 1; i >= 0 && num_items > 0; i-- { + out.Args = append(out.Args, autoQuote(stack[i])) + num_items-- + } + + return &out +} + +func opCallFast(_ OpOutput, _ *OpOutput, _ int, _ []string, _ *vm.VM, _ *vm.Program) *OpOutput { + // double check OpCallFast and OpCallTyped + return nil +} + +func opCallTyped(_ OpOutput, _ *OpOutput, _ int, _ []string, _ *vm.VM, _ *vm.Program) *OpOutput { + // double check OpCallFast and OpCallTyped + return nil +} + +func opCallN(out OpOutput, _ *OpOutput, ip int, parts []string, vm *vm.VM, program *vm.Program) *OpOutput { + // Op for function calls with more than 3 args + out.Func = true + out.FuncName = parts[1] + stack := vm.Stack + + // for OpCallN, we get the number of args + if len(program.Arguments) >= ip { + nb_args := program.Arguments[ip] + if nb_args > 0 { + // we need to skip the top item on stack + for i := len(stack) - 2; i >= 0 && nb_args > 0; i-- { + out.Args = append(out.Args, autoQuote(stack[i])) + nb_args-- + } + } + } else { // let's blindly take the items on stack + for _, val := range vm.Stack { + out.Args = append(out.Args, autoQuote(val)) + } + } + + return &out +} + +func opEqual(out OpOutput, _ *OpOutput, _ int, _ []string, vm *vm.VM, _ *vm.Program) *OpOutput { + stack := vm.Stack + out.Comparison = true + out.Left = autoQuote(stack[0]) + out.Right = autoQuote(stack[1]) + return &out +} + +func opIn(out OpOutput, _ *OpOutput, _ int, _ []string, vm *vm.VM, _ *vm.Program) *OpOutput { + // in operator + stack := vm.Stack + out.Condition = true + out.ConditionIn = true + //seems that we tend to receive stack[1] as a map. + //it is tempting to use reflect to extract keys, but we end up with an array that doesn't match the initial order + //(because of the random order of the map) + out.Args = append(out.Args, autoQuote(stack[0])) + out.Args = append(out.Args, autoQuote(stack[1])) + return &out +} + +func opContains(out OpOutput, _ *OpOutput, _ int, _ []string, vm *vm.VM, _ *vm.Program) *OpOutput { + // kind OpIn , but reverse + stack := vm.Stack + out.Condition = true + out.ConditionContains = true + //seems that we tend to receive stack[1] as a map. + //it is tempting to use reflect to extract keys, but we end up with an array that doesn't match the initial order + //(because of the random order of the map) + out.Args = append(out.Args, autoQuote(stack[0])) + out.Args = append(out.Args, autoQuote(stack[1])) + return &out +} + + func (erp ExprRuntimeDebug) ipDebug(ip int, vm *vm.VM, program *vm.Program, parts []string, outputs []OpOutput) ([]OpOutput, error) { IdxOut := len(outputs) prevIdxOut := 0 @@ -192,150 +396,22 @@ func (erp ExprRuntimeDebug) ipDebug(ip int, vm *vm.VM, program *vm.Program, part erp.Logger.Tracef("[STEP %d:%s] (stack:%+v) (parts:%+v) {depth:%d}", ip, parts[1], vm.Stack, parts, currentDepth) - out := OpOutput{} - out.CodeDepth = currentDepth - out.Code = erp.extractCode(ip, program) - - switch parts[1] { - case "OpBegin": - out.CodeDepth += IndentStep - out.BlockStart = true - outputs = append(outputs, out) - case "OpEnd": - out.CodeDepth -= IndentStep - out.BlockEnd = true - // OpEnd can carry value, if it's any/all/count etc. - if len(vm.Stack) > 0 { - out.StrConditionResult = fmt.Sprintf("%v", vm.Stack) - } + var prevOut *OpOutput - outputs = append(outputs, out) - case "OpNot": - // negate the previous condition - outputs[prevIdxOut].Negated = true - case "OpTrue": // generated when possible ? (1 == 1) - out.Condition = true - out.ConditionResult = new(bool) - *out.ConditionResult = true - out.StrConditionResult = "true" - outputs = append(outputs, out) - case "OpFalse": // generated when possible ? (1 != 1) - out.Condition = true - out.ConditionResult = new(bool) - *out.ConditionResult = false - out.StrConditionResult = "false" - outputs = append(outputs, out) - case "OpJumpIfTrue": // OR - stack := vm.Stack - out.JumpIf = true - out.IfTrue = true - out.StrConditionResult = fmt.Sprintf("%v", stack[0]) - - if val, ok := stack[0].(bool); ok { - out.ConditionResult = new(bool) - *out.ConditionResult = val + if handler, ok := opHandlers[parts[1]]; ok { + if len(outputs) > 0 { + prevOut = &outputs[prevIdxOut] } - - outputs = append(outputs, out) - case "OpJumpIfFalse": // AND - stack := vm.Stack - out.JumpIf = true - out.IfFalse = true - out.StrConditionResult = fmt.Sprintf("%v", stack[0]) - - if val, ok := stack[0].(bool); ok { - out.ConditionResult = new(bool) - *out.ConditionResult = val + out := handler( + OpOutput{ + CodeDepth: currentDepth, + Code: erp.extractCode(ip, program), + }, + prevOut, + ip, parts, vm, program) + if out != nil { + outputs = append(outputs, *out) } - - outputs = append(outputs, out) - case "OpCall1": // Op for function calls - out.Func = true - out.FuncName = parts[3] - stack := vm.Stack - - num_items := 1 - for i := len(stack) - 1; i >= 0 && num_items > 0; i-- { - out.Args = append(out.Args, autoQuote(stack[i])) - num_items-- - } - - outputs = append(outputs, out) - case "OpCall2": // Op for function calls - out.Func = true - out.FuncName = parts[3] - stack := vm.Stack - - num_items := 2 - for i := len(stack) - 1; i >= 0 && num_items > 0; i-- { - out.Args = append(out.Args, autoQuote(stack[i])) - num_items-- - } - - outputs = append(outputs, out) - case "OpCall3": // Op for function calls - out.Func = true - out.FuncName = parts[3] - stack := vm.Stack - - num_items := 3 - for i := len(stack) - 1; i >= 0 && num_items > 0; i-- { - out.Args = append(out.Args, autoQuote(stack[i])) - num_items-- - } - - outputs = append(outputs, out) - // double check OpCallFast and OpCallTyped - case "OpCallFast", "OpCallTyped": - // - case "OpCallN": // Op for function calls with more than 3 args - out.Func = true - out.FuncName = parts[1] - stack := vm.Stack - - // for OpCallN, we get the number of args - if len(program.Arguments) >= ip { - nb_args := program.Arguments[ip] - if nb_args > 0 { - // we need to skip the top item on stack - for i := len(stack) - 2; i >= 0 && nb_args > 0; i-- { - out.Args = append(out.Args, autoQuote(stack[i])) - nb_args-- - } - } - } else { // let's blindly take the items on stack - for _, val := range vm.Stack { - out.Args = append(out.Args, autoQuote(val)) - } - } - - outputs = append(outputs, out) - case "OpEqualString", "OpEqual", "OpEqualInt": // comparisons - stack := vm.Stack - out.Comparison = true - out.Left = autoQuote(stack[0]) - out.Right = autoQuote(stack[1]) - outputs = append(outputs, out) - case "OpIn": // in operator - stack := vm.Stack - out.Condition = true - out.ConditionIn = true - //seems that we tend to receive stack[1] as a map. - //it is tempting to use reflect to extract keys, but we end up with an array that doesn't match the initial order - //(because of the random order of the map) - out.Args = append(out.Args, autoQuote(stack[0])) - out.Args = append(out.Args, autoQuote(stack[1])) - outputs = append(outputs, out) - case "OpContains": // kind OpIn , but reverse - stack := vm.Stack - out.Condition = true - out.ConditionContains = true - //seems that we tend to receive stack[1] as a map. - //it is tempting to use reflect to extract keys, but we end up with an array that doesn't match the initial order - //(because of the random order of the map) - out.Args = append(out.Args, autoQuote(stack[0])) - out.Args = append(out.Args, autoQuote(stack[1])) - outputs = append(outputs, out) } return outputs, nil From 5bc2b493872a36ec0b2570b1d01cabf8f6241498 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Mon, 28 Apr 2025 17:11:17 +0200 Subject: [PATCH 498/581] allowlists: check during bulk decision import (#3588) --- cmd/crowdsec-cli/clidecision/import.go | 72 ++++++++++-- pkg/apiclient/allowlists_service.go | 24 +++- pkg/apiserver/allowlists_test.go | 55 +++++++++ pkg/apiserver/controllers/controller.go | 1 + pkg/apiserver/controllers/v1/allowlist.go | 37 ++++++ pkg/database/allowlists.go | 41 +++++-- pkg/database/allowlists_test.go | 60 ++++++++++ pkg/models/bulk_check_allowlist_request.go | 71 +++++++++++ pkg/models/bulk_check_allowlist_response.go | 124 ++++++++++++++++++++ pkg/models/bulk_check_allowlist_result.go | 88 ++++++++++++++ pkg/models/localapi_swagger.yaml | 61 ++++++++++ test/bats/90_decisions.bats | 51 ++++---- test/bats/cscli-allowlists.bats | 29 ++++- 13 files changed, 664 insertions(+), 50 deletions(-) create mode 100644 pkg/models/bulk_check_allowlist_request.go create mode 100644 pkg/models/bulk_check_allowlist_response.go create mode 100644 pkg/models/bulk_check_allowlist_result.go diff --git a/cmd/crowdsec-cli/clidecision/import.go b/cmd/crowdsec-cli/clidecision/import.go index 317fa5d62cd..ee6ac7c1a88 100644 --- a/cmd/crowdsec-cli/clidecision/import.go +++ b/cmd/crowdsec-cli/clidecision/import.go @@ -8,7 +8,9 @@ import ( "errors" "fmt" "io" + "iter" "os" + "slices" "strings" "time" @@ -17,7 +19,6 @@ import ( "github.com/spf13/cobra" "github.com/crowdsecurity/go-cs-lib/ptr" - "github.com/crowdsecurity/go-cs-lib/slicetools" "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/args" "github.com/crowdsecurity/crowdsec/pkg/models" @@ -38,7 +39,7 @@ func parseDecisionList(content []byte, format string) ([]decisionRaw, error) { switch format { case "values": - log.Infof("Parsing values") + fmt.Fprintln(os.Stdout, "Parsing values") scanner := bufio.NewScanner(bytes.NewReader(content)) for scanner.Scan() { @@ -50,13 +51,13 @@ func parseDecisionList(content []byte, format string) ([]decisionRaw, error) { return nil, fmt.Errorf("unable to parse values: '%w'", err) } case "json": - log.Infof("Parsing json") + fmt.Fprintln(os.Stdout, "Parsing json") if err := json.Unmarshal(content, &ret); err != nil { return nil, err } case "csv": - log.Infof("Parsing csv") + fmt.Fprintln(os.Stdout, "Parsing csv") if err := csvutil.Unmarshal(content, &ret); err != nil { return nil, fmt.Errorf("unable to parse csv: '%w'", err) @@ -68,6 +69,20 @@ func parseDecisionList(content []byte, format string) ([]decisionRaw, error) { return ret, nil } +func excludeAllowlistedDecisions(decisions []*models.Decision, allowlistedValues []string) iter.Seq[*models.Decision] { + return func(yield func(*models.Decision) bool) { + for _, d := range decisions { + if slices.Contains(allowlistedValues, *d.Value) { + continue + } + + if !yield(d) { + return + } + } + } +} + func (cli *cliDecisions) import_(ctx context.Context, input string, duration string, scope string, reason string, type_ string, batch int, format string) error { var ( content []byte @@ -124,6 +139,10 @@ func (cli *cliDecisions) import_(ctx context.Context, input string, duration str return err } + if len(decisionsListRaw) == 0 { + return errors.New("no decisions found") + } + decisions := make([]*models.Decision, len(decisionsListRaw)) for i, d := range decisionsListRaw { @@ -163,11 +182,50 @@ func (cli *cliDecisions) import_(ctx context.Context, input string, duration str } if len(decisions) > 1000 { - log.Infof("You are about to add %d decisions, this may take a while", len(decisions)) + fmt.Fprintf(os.Stdout, "You are about to add %d decisions, this may take a while\n", len(decisions)) } - for _, chunk := range slicetools.Chunks(decisions, batch) { + if batch == 0 { + batch = len(decisions) + } else { + fmt.Fprintf(os.Stdout, "batch size: %d\n", batch) + } + + allowlistedValues := make([]string, 0) + + for chunk := range slices.Chunk(decisions, batch) { log.Debugf("Processing chunk of %d decisions", len(chunk)) + + decisionsStr := make([]string, 0, len(chunk)) + + for _, d := range chunk { + if *d.Scope != types.Ip && *d.Scope != types.Range { + continue + } + + decisionsStr = append(decisionsStr, *d.Value) + } + + // Skip if no IPs or ranges + if len(decisionsStr) == 0 { + continue + } + + allowlistResp, _, err := cli.client.Allowlists.CheckIfAllowlistedBulk(ctx, decisionsStr) + + if err != nil { + return err + } + + for _, r := range allowlistResp.Results { + fmt.Fprintf(os.Stdout, "Value %s is allowlisted by %s\n", *r.Target, r.Allowlists) + allowlistedValues = append(allowlistedValues, *r.Target) + } + } + + actualDecisions := slices.Collect(excludeAllowlistedDecisions(decisions, allowlistedValues)) + + for chunk := range slices.Chunk(actualDecisions, batch) { importAlert := models.Alert{ CreatedAt: time.Now().UTC().Format(time.RFC3339), Scenario: ptr.Of(fmt.Sprintf("import %s: %d IPs", input, len(chunk))), @@ -195,7 +253,7 @@ func (cli *cliDecisions) import_(ctx context.Context, input string, duration str } } - log.Infof("Imported %d decisions", len(decisions)) + fmt.Fprintf(os.Stdout, "Imported %d decisions", len(actualDecisions)) return nil } diff --git a/pkg/apiclient/allowlists_service.go b/pkg/apiclient/allowlists_service.go index 0498921577f..427a198b0b3 100644 --- a/pkg/apiclient/allowlists_service.go +++ b/pkg/apiclient/allowlists_service.go @@ -83,7 +83,7 @@ func (s *AllowlistsService) CheckIfAllowlisted(ctx context.Context, value string return false, nil, err } - var discardBody interface{} + var discardBody any resp, err := s.client.Do(ctx, req, discardBody) if err != nil { @@ -111,3 +111,25 @@ func (s *AllowlistsService) CheckIfAllowlistedWithReason(ctx context.Context, va return body, resp, nil } + +func (s *AllowlistsService) CheckIfAllowlistedBulk(ctx context.Context, values []string) (*models.BulkCheckAllowlistResponse, *Response, error) { + u := s.client.URLPrefix + "/allowlists/check" + + body := &models.BulkCheckAllowlistRequest{ + Targets: values, + } + + req, err := s.client.PrepareRequest(ctx, http.MethodPost, u, body) + if err != nil { + return nil, nil, err + } + + responseBody := &models.BulkCheckAllowlistResponse{} + + resp, err := s.client.Do(ctx, req, responseBody) + if err != nil { + return nil, resp, err + } + + return responseBody, resp, nil +} diff --git a/pkg/apiserver/allowlists_test.go b/pkg/apiserver/allowlists_test.go index 6e319da967c..d2fa3d9bdc1 100644 --- a/pkg/apiserver/allowlists_test.go +++ b/pkg/apiserver/allowlists_test.go @@ -4,6 +4,7 @@ import ( "context" "encoding/json" "net/http" + "strings" "testing" "time" @@ -147,3 +148,57 @@ func TestCheckInAllowlist(t *testing.T) { require.Equal(t, http.StatusNoContent, w.Code) } + +func TestBulkCheckAllowlist(t *testing.T) { + ctx := context.Background() + lapi := SetupLAPITest(t, ctx) + + // create an allowlist and add one live entry + l, err := lapi.DBClient.CreateAllowList(ctx, "test", "test", "", false) + require.NoError(t, err) + + added, err := lapi.DBClient.AddToAllowlist(ctx, l, []*models.AllowlistItem{ + {Value: "1.2.3.4"}, + }) + require.NoError(t, err) + assert.Equal(t, 1, added) + + // craft a bulk check payload with one matching and one non-matching target + reqBody := `{"targets":["1.2.3.4","2.3.4.5"]}` + w := lapi.RecordResponse(t, ctx, http.MethodPost, "/v1/allowlists/check", strings.NewReader(reqBody), passwordAuthType) + require.Equal(t, http.StatusOK, w.Code) + + // unmarshal and verify + resp := models.BulkCheckAllowlistResponse{} + require.NoError(t, json.Unmarshal(w.Body.Bytes(), &resp)) + require.Len(t, resp.Results, 1) + + // expect only "1.2.3.4" in the "test" allowlist, while "2.3.4.5" should not be in the response + var match bool + + for _, r := range resp.Results { + switch *r.Target { + case "1.2.3.4": + match = true + + assert.Equal(t, []string{"1.2.3.4 from test"}, r.Allowlists) + default: + t.Errorf("unexpected target %v", r.Target) + } + } + + require.True(t, match, "did not see result for 1.2.3.4") +} + +func TestBulkCheckAllowlist_BadRequest(t *testing.T) { + ctx := context.Background() + lapi := SetupLAPITest(t, ctx) + + // missing or empty body should yield 400 + w := lapi.RecordResponse(t, ctx, http.MethodPost, "/v1/allowlists/check", emptyBody, passwordAuthType) + require.Equal(t, http.StatusBadRequest, w.Code) + + // malformed JSON should also yield 400 + w = lapi.RecordResponse(t, ctx, http.MethodPost, "/v1/allowlists/check", strings.NewReader("{invalid-json"), passwordAuthType) + require.Equal(t, http.StatusBadRequest, w.Code) +} diff --git a/pkg/apiserver/controllers/controller.go b/pkg/apiserver/controllers/controller.go index 84aa2c06a81..c7503c9ef5e 100644 --- a/pkg/apiserver/controllers/controller.go +++ b/pkg/apiserver/controllers/controller.go @@ -129,6 +129,7 @@ func (c *Controller) NewV1() error { jwtAuth.GET("/allowlists/:allowlist_name", c.HandlerV1.GetAllowlist) jwtAuth.GET("/allowlists/check/:ip_or_range", c.HandlerV1.CheckInAllowlist) jwtAuth.HEAD("/allowlists/check/:ip_or_range", c.HandlerV1.CheckInAllowlist) + jwtAuth.POST("/allowlists/check", c.HandlerV1.CheckInAllowlistBulk) } apiKeyAuth := groupV1.Group("") diff --git a/pkg/apiserver/controllers/v1/allowlist.go b/pkg/apiserver/controllers/v1/allowlist.go index e77344eb2bb..e35354ff330 100644 --- a/pkg/apiserver/controllers/v1/allowlist.go +++ b/pkg/apiserver/controllers/v1/allowlist.go @@ -10,6 +10,43 @@ import ( "github.com/crowdsecurity/crowdsec/pkg/models" ) +func (c *Controller) CheckInAllowlistBulk(gctx *gin.Context) { + var req models.BulkCheckAllowlistRequest + + if err := gctx.ShouldBindJSON(&req); err != nil { + gctx.JSON(http.StatusBadRequest, gin.H{"message": err.Error()}) + return + } + + if len(req.Targets) == 0 { + gctx.JSON(http.StatusBadRequest, gin.H{"message": "targets list cannot be empty"}) + return + } + + resp := models.BulkCheckAllowlistResponse{ + Results: make([]*models.BulkCheckAllowlistResult, 0), + } + + for _, target := range req.Targets { + lists, err := c.DBClient.IsAllowlistedBy(gctx.Request.Context(), target) + if err != nil { + c.HandleDBErrors(gctx, err) + return + } + + if len(lists) == 0 { + continue + } + + resp.Results = append(resp.Results, &models.BulkCheckAllowlistResult{ + Target: &target, + Allowlists: lists, + }) + } + + gctx.JSON(http.StatusOK, resp) +} + func (c *Controller) CheckInAllowlist(gctx *gin.Context) { value := gctx.Param("ip_or_range") diff --git a/pkg/database/allowlists.go b/pkg/database/allowlists.go index c9b1c76ad67..d14958f612d 100644 --- a/pkg/database/allowlists.go +++ b/pkg/database/allowlists.go @@ -156,7 +156,7 @@ func (c *Client) AddToAllowlist(ctx context.Context, list *ent.AllowList, items SetComment(item.Description) if !time.Time(item.Expiration).IsZero() { - query = query.SetExpiresAt(time.Time(item.Expiration)) + query = query.SetExpiresAt(time.Time(item.Expiration).UTC()) } content, err := query.Save(ctx) @@ -236,7 +236,7 @@ func (c *Client) ReplaceAllowlist(ctx context.Context, list *ent.AllowList, item return added, nil } -func (c *Client) IsAllowlisted(ctx context.Context, value string) (bool, string, error) { +func (c *Client) IsAllowlistedBy(ctx context.Context, value string) ([]string, error) { /* Few cases: - value is an IP/range directly is in allowlist @@ -245,7 +245,7 @@ func (c *Client) IsAllowlisted(ctx context.Context, value string) (bool, string, */ sz, start_ip, start_sfx, end_ip, end_sfx, err := types.Addr2Ints(value) if err != nil { - return false, "", err + return nil, err } c.Log.Debugf("checking if %s is allowlisted", value) @@ -314,22 +314,41 @@ func (c *Client) IsAllowlisted(ctx context.Context, value string) (bool, string, ) } - allowed, err := query.WithAllowlist().First(ctx) + items, err := query.WithAllowlist().All(ctx) if err != nil { - if ent.IsNotFound(err) { - return false, "", nil + return nil, fmt.Errorf("unable to check if value is allowlisted: %w", err) + } + + reasons := make([]string, 0) + + for _, item := range items { + if len(item.Edges.Allowlist) == 0 { + continue } - return false, "", fmt.Errorf("unable to check if value is allowlisted: %w", err) + reason := item.Value + " from " + item.Edges.Allowlist[0].Name + if item.Comment != "" { + reason += " (" + item.Comment + ")" + } + + reasons = append(reasons, reason) } - allowlistName := allowed.Edges.Allowlist[0].Name - reason := allowed.Value + " from " + allowlistName + return reasons, nil +} - if allowed.Comment != "" { - reason += " (" + allowed.Comment + ")" +func (c *Client) IsAllowlisted(ctx context.Context, value string) (bool, string, error) { + reasons, err := c.IsAllowlistedBy(ctx, value) + if err != nil { + return false, "", err + } + + if len(reasons) == 0 { + return false, "", nil } + reason := strings.Join(reasons, ", ") + return true, reason, nil } diff --git a/pkg/database/allowlists_test.go b/pkg/database/allowlists_test.go index 9a4eb8e1fb8..5cd799acba6 100644 --- a/pkg/database/allowlists_test.go +++ b/pkg/database/allowlists_test.go @@ -104,3 +104,63 @@ func TestCheckAllowlist(t *testing.T) { require.True(t, allowlisted) require.Equal(t, "8a95:c186:9f96:4c75:0dad:49c6:ff62:94b8 from test", reason) } + +func TestIsAllowListedBy_SingleAndMultiple(t *testing.T) { + ctx := context.Background() + dbClient := getDBClient(t, ctx) + + list1, err := dbClient.CreateAllowList(ctx, "list1", "first list", "", false) + require.NoError(t, err) + list2, err := dbClient.CreateAllowList(ctx, "list2", "second list", "", false) + require.NoError(t, err) + + // Add overlapping and distinct entries + _, err = dbClient.AddToAllowlist(ctx, list1, []*models.AllowlistItem{ + {Value: "1.1.1.1"}, + {Value: "10.0.0.0/8"}, + }) + require.NoError(t, err) + _, err = dbClient.AddToAllowlist(ctx, list2, []*models.AllowlistItem{ + {Value: "1.1.1.1"}, // overlaps with list1 + {Value: "192.168.0.0/16"}, // only in list2 + {Value: "2.2.2.2", Expiration: strfmt.DateTime(time.Now().Add(-time.Hour))}, // expired + }) + require.NoError(t, err) + + // Exact IP that lives in both + names, err := dbClient.IsAllowlistedBy(ctx, "1.1.1.1") + require.NoError(t, err) + assert.ElementsMatch(t, []string{"1.1.1.1 from list1", "1.1.1.1 from list2"}, names) + + // IP matching only list1's CIDR + names, err = dbClient.IsAllowlistedBy(ctx, "10.5.6.7") + require.NoError(t, err) + assert.Equal(t, []string{"10.0.0.0/8 from list1"}, names) + + // IP matching only list2's CIDR + names, err = dbClient.IsAllowlistedBy(ctx, "192.168.1.42") + require.NoError(t, err) + assert.Equal(t, []string{"192.168.0.0/16 from list2"}, names) + + // Expired entry in list2 should not appear + names, err = dbClient.IsAllowlistedBy(ctx, "2.2.2.2") + require.NoError(t, err) + assert.Empty(t, names) +} + +func TestIsAllowListedBy_NoMatch(t *testing.T) { + ctx := context.Background() + dbClient := getDBClient(t, ctx) + + list, err := dbClient.CreateAllowList(ctx, "solo", "single", "", false) + require.NoError(t, err) + _, err = dbClient.AddToAllowlist(ctx, list, []*models.AllowlistItem{ + {Value: "5.5.5.5"}, + }) + require.NoError(t, err) + + // completely unrelated IP + names, err := dbClient.IsAllowlistedBy(ctx, "8.8.4.4") + require.NoError(t, err) + assert.Empty(t, names) +} diff --git a/pkg/models/bulk_check_allowlist_request.go b/pkg/models/bulk_check_allowlist_request.go new file mode 100644 index 00000000000..ab69c53cd50 --- /dev/null +++ b/pkg/models/bulk_check_allowlist_request.go @@ -0,0 +1,71 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + + "github.com/go-openapi/errors" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" + "github.com/go-openapi/validate" +) + +// BulkCheckAllowlistRequest bulk check allowlist request +// +// swagger:model BulkCheckAllowlistRequest +type BulkCheckAllowlistRequest struct { + + // Array of IP addresses or CIDR ranges to check + // Required: true + Targets []string `json:"targets"` +} + +// Validate validates this bulk check allowlist request +func (m *BulkCheckAllowlistRequest) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateTargets(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *BulkCheckAllowlistRequest) validateTargets(formats strfmt.Registry) error { + + if err := validate.Required("targets", "body", m.Targets); err != nil { + return err + } + + return nil +} + +// ContextValidate validates this bulk check allowlist request based on context it is used +func (m *BulkCheckAllowlistRequest) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + return nil +} + +// MarshalBinary interface implementation +func (m *BulkCheckAllowlistRequest) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *BulkCheckAllowlistRequest) UnmarshalBinary(b []byte) error { + var res BulkCheckAllowlistRequest + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/pkg/models/bulk_check_allowlist_response.go b/pkg/models/bulk_check_allowlist_response.go new file mode 100644 index 00000000000..0e6a467de41 --- /dev/null +++ b/pkg/models/bulk_check_allowlist_response.go @@ -0,0 +1,124 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "strconv" + + "github.com/go-openapi/errors" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" + "github.com/go-openapi/validate" +) + +// BulkCheckAllowlistResponse bulk check allowlist response +// +// swagger:model BulkCheckAllowlistResponse +type BulkCheckAllowlistResponse struct { + + // Per-target allowlist membership results + // Required: true + Results []*BulkCheckAllowlistResult `json:"results"` +} + +// Validate validates this bulk check allowlist response +func (m *BulkCheckAllowlistResponse) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateResults(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *BulkCheckAllowlistResponse) validateResults(formats strfmt.Registry) error { + + if err := validate.Required("results", "body", m.Results); err != nil { + return err + } + + for i := 0; i < len(m.Results); i++ { + if swag.IsZero(m.Results[i]) { // not required + continue + } + + if m.Results[i] != nil { + if err := m.Results[i].Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("results" + "." + strconv.Itoa(i)) + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("results" + "." + strconv.Itoa(i)) + } + return err + } + } + + } + + return nil +} + +// ContextValidate validate this bulk check allowlist response based on the context it is used +func (m *BulkCheckAllowlistResponse) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + var res []error + + if err := m.contextValidateResults(ctx, formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *BulkCheckAllowlistResponse) contextValidateResults(ctx context.Context, formats strfmt.Registry) error { + + for i := 0; i < len(m.Results); i++ { + + if m.Results[i] != nil { + + if swag.IsZero(m.Results[i]) { // not required + return nil + } + + if err := m.Results[i].ContextValidate(ctx, formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("results" + "." + strconv.Itoa(i)) + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("results" + "." + strconv.Itoa(i)) + } + return err + } + } + + } + + return nil +} + +// MarshalBinary interface implementation +func (m *BulkCheckAllowlistResponse) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *BulkCheckAllowlistResponse) UnmarshalBinary(b []byte) error { + var res BulkCheckAllowlistResponse + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/pkg/models/bulk_check_allowlist_result.go b/pkg/models/bulk_check_allowlist_result.go new file mode 100644 index 00000000000..31be0078f93 --- /dev/null +++ b/pkg/models/bulk_check_allowlist_result.go @@ -0,0 +1,88 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + + "github.com/go-openapi/errors" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" + "github.com/go-openapi/validate" +) + +// BulkCheckAllowlistResult bulk check allowlist result +// +// swagger:model BulkCheckAllowlistResult +type BulkCheckAllowlistResult struct { + + // Matching ip or range, name of the allowlist and comment related to the target + // Required: true + Allowlists []string `json:"allowlists"` + + // The IP or range that is allowlisted + // Required: true + Target *string `json:"target"` +} + +// Validate validates this bulk check allowlist result +func (m *BulkCheckAllowlistResult) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateAllowlists(formats); err != nil { + res = append(res, err) + } + + if err := m.validateTarget(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *BulkCheckAllowlistResult) validateAllowlists(formats strfmt.Registry) error { + + if err := validate.Required("allowlists", "body", m.Allowlists); err != nil { + return err + } + + return nil +} + +func (m *BulkCheckAllowlistResult) validateTarget(formats strfmt.Registry) error { + + if err := validate.Required("target", "body", m.Target); err != nil { + return err + } + + return nil +} + +// ContextValidate validates this bulk check allowlist result based on context it is used +func (m *BulkCheckAllowlistResult) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + return nil +} + +// MarshalBinary interface implementation +func (m *BulkCheckAllowlistResult) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *BulkCheckAllowlistResult) UnmarshalBinary(b []byte) error { + var res BulkCheckAllowlistResult + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/pkg/models/localapi_swagger.yaml b/pkg/models/localapi_swagger.yaml index 3de9b7351c8..adbb2ef8227 100644 --- a/pkg/models/localapi_swagger.yaml +++ b/pkg/models/localapi_swagger.yaml @@ -833,6 +833,33 @@ paths: description: "missing ip_or_range" schema: $ref: "#/definitions/ErrorResponse" + /allowlists/check: + post: + description: Check multiple IPs or ranges against allowlists + summary: postCheckAllowlist + tags: + - watchers + operationId: postCheckAllowlist + consumes: + - application/json + produces: + - application/json + parameters: + - name: body + in: body + required: true + description: IP addresses or CIDR ranges to check + schema: + $ref: '#/definitions/BulkCheckAllowlistRequest' + responses: + '200': + description: Allowlists check results for each target + schema: + $ref: '#/definitions/BulkCheckAllowlistResponse' + '400': + description: "400 response" + schema: + $ref: "#/definitions/ErrorResponse" definitions: WatcherRegistrationRequest: title: WatcherRegistrationRequest @@ -1396,6 +1423,40 @@ definitions: reason: type: string description: 'item that matched the provided value' + BulkCheckAllowlistRequest: + type: object + properties: + targets: + type: array + items: + type: string + description: Array of IP addresses or CIDR ranges to check + required: + - targets + BulkCheckAllowlistResult: + type: object + properties: + target: + type: string + description: The IP or range that is allowlisted + allowlists: + type: array + items: + type: string + description: Matching ip or range, name of the allowlist and comment related to the target + required: + - target + - allowlists + BulkCheckAllowlistResponse: + type: object + properties: + results: + type: array + items: + $ref: '#/definitions/BulkCheckAllowlistResult' + description: Per-target allowlist membership results + required: + - results ErrorResponse: type: "object" required: diff --git a/test/bats/90_decisions.bats b/test/bats/90_decisions.bats index 64edea8f997..54904896573 100644 --- a/test/bats/90_decisions.bats +++ b/test/bats/90_decisions.bats @@ -87,24 +87,24 @@ teardown() { assert_stderr --partial "unable to guess format from file extension, please provide a format with --format flag" rune -0 cscli decisions import -i "${TESTDATA}/decisions.json" - assert_stderr --partial "Parsing json" - assert_stderr --partial "Imported 5 decisions" + assert_output --partial "Parsing json" + assert_output --partial "Imported 5 decisions" # import from stdin rune -1 cscli decisions import -i /dev/stdin < <(cat "${TESTDATA}/decisions.json") assert_stderr --partial "unable to guess format from file extension, please provide a format with --format flag" rune -0 cscli decisions import -i /dev/stdin < <(cat "${TESTDATA}/decisions.json") --format json - assert_stderr --partial "Parsing json" - assert_stderr --partial "Imported 5 decisions" + assert_output --partial "Parsing json" + assert_output --partial "Imported 5 decisions" # invalid json rune -1 cscli decisions import -i - <<<'{"blah":"blah"}' --format json - assert_stderr --partial 'Parsing json' + assert_output --partial 'Parsing json' assert_stderr --partial 'json: cannot unmarshal object into Go value of type []clidecision.decisionRaw' # json with extra data rune -1 cscli decisions import -i - <<<'{"values":"1.2.3.4","blah":"blah"}' --format json - assert_stderr --partial 'Parsing json' + assert_output --partial 'Parsing json' assert_stderr --partial 'json: cannot unmarshal object into Go value of type []clidecision.decisionRaw' #---------- @@ -116,21 +116,21 @@ teardown() { assert_stderr --partial "unable to guess format from file extension, please provide a format with --format flag" rune -0 cscli decisions import -i "${TESTDATA}/decisions.csv" - assert_stderr --partial 'Parsing csv' - assert_stderr --partial 'Imported 5 decisions' + assert_output --partial 'Parsing csv' + assert_output --partial 'Imported 5 decisions' # import from stdin rune -1 cscli decisions import -i /dev/stdin < <(cat "${TESTDATA}/decisions.csv") assert_stderr --partial "unable to guess format from file extension, please provide a format with --format flag" rune -0 cscli decisions import -i /dev/stdin < <(cat "${TESTDATA}/decisions.csv") --format csv - assert_stderr --partial "Parsing csv" - assert_stderr --partial "Imported 5 decisions" + assert_output --partial "Parsing csv" + assert_output --partial "Imported 5 decisions" # invalid csv # XXX: improve validation - rune -0 cscli decisions import -i - <<<'value\n1.2.3.4,5.6.7.8' --format csv - assert_stderr --partial 'Parsing csv' - assert_stderr --partial "Imported 0 decisions" + rune -1 cscli decisions import -i - <<<'value\n1.2.3.4,5.6.7.8' --format csv + assert_output "Parsing csv" + assert_stderr "Error: no decisions found" #---------- # VALUES @@ -142,8 +142,8 @@ teardown() { 1.2.3.5 1.2.3.6 EOT - assert_stderr --partial 'Parsing values' - assert_stderr --partial 'Imported 3 decisions' + assert_output --partial 'Parsing values' + assert_output --partial 'Imported 3 decisions' # leading or trailing spaces are ignored rune -0 cscli decisions import -i - --format values <<-EOT @@ -151,20 +151,18 @@ teardown() { 10.2.3.5 10.2.3.6 EOT - assert_stderr --partial 'Parsing values' - assert_stderr --partial 'Imported 3 decisions' + assert_output --partial 'Parsing values' + assert_output --partial 'Imported 3 decisions' # silently discarding (but logging) invalid decisions rune -0 cscli alerts delete --all truncate -s 0 "$LOGFILE" - rune -0 cscli decisions import -i - --format values <<-EOT + rune -1 cscli decisions import -i - --format values <<-EOT whatever EOT - assert_stderr --partial 'Parsing values' - assert_stderr --partial 'Imported 1 decisions' - assert_file_contains "$LOGFILE" "invalid addr/range 'whatever': invalid ip address 'whatever'" + assert_stderr --partial "invalid ip address 'whatever'" rune -0 cscli decisions list -a -o json assert_json '[]' @@ -174,18 +172,17 @@ teardown() { rune -0 cscli alerts delete --all truncate -s 0 "$LOGFILE" - rune -0 cscli decisions import -i - --format values <<-EOT + rune -1 cscli decisions import -i - --format values <<-EOT 1.2.3.4 bad-apple 1.2.3.5 EOT - assert_stderr --partial 'Parsing values' - assert_stderr --partial 'Imported 3 decisions' - assert_file_contains "$LOGFILE" "invalid addr/range 'bad-apple': invalid ip address 'bad-apple'" + assert_output "Parsing values" + assert_stderr "Error: API error: invalid ip address 'bad-apple'" rune -0 cscli decisions list -a -o json rune -0 jq -r '.[0].decisions | length' <(output) - assert_output 2 + assert_output 0 #---------- # Batch @@ -198,5 +195,5 @@ teardown() { EOT assert_stderr --partial 'Processing chunk of 2 decisions' assert_stderr --partial 'Processing chunk of 1 decisions' - assert_stderr --partial 'Imported 3 decisions' + assert_output --partial 'Imported 3 decisions' } diff --git a/test/bats/cscli-allowlists.bats b/test/bats/cscli-allowlists.bats index 6a91518d9c0..24810110dc0 100644 --- a/test/bats/cscli-allowlists.bats +++ b/test/bats/cscli-allowlists.bats @@ -135,17 +135,38 @@ teardown() { refute_stderr } -@test "cscli allolists: range check" { +@test "cscli allowlists: check during decisions add" { rune -0 cscli allowlist create foo -d 'a foo' rune -0 cscli allowlist add foo 192.168.0.0/16 - rune -1 cscli decisions add -r 192.168.10.20/24 - assert_stderr 'Error: 192.168.10.20/24 is allowlisted by item 192.168.0.0/16 from foo, use --bypass-allowlist to add the decision anyway' + rune -1 cscli decisions add -i 192.168.1.1 + assert_stderr 'Error: 192.168.1.1 is allowlisted by item 192.168.0.0/16 from foo, use --bypass-allowlist to add the decision anyway' refute_output - rune -0 cscli decisions add -r 192.168.10.20/24 --bypass-allowlist + rune -0 cscli decisions add -i 192.168.1.1 --bypass-allowlist assert_stderr --partial 'Decision successfully added' refute_output } +@test "cscli allowlists: check during decisions import" { + rune -0 cscli allowlist create foo -d 'a foo' + rune -0 cscli allowlist add foo 192.168.0.0/16 + rune -0 cscli decisions import -i - <<<'192.168.1.1' --format values + assert_output - <<-EOT + Parsing values + Value 192.168.1.1 is allowlisted by [192.168.0.0/16 from foo] + Imported 0 decisions + EOT + refute_stderr +} + +@test "cscli allowlists: range check" { + rune -0 cscli allowlist create foo -d 'a foo' + rune -0 cscli allowlist add foo 192.168.0.0/16 + rune -1 cscli decisions add -r 192.168.10.20/24 + assert_stderr --partial '192.168.10.20/24 is allowlisted by item 192.168.0.0/16 from foo, use --bypass-allowlist to add the decision anyway' + rune -0 cscli decisions add -r 192.168.10.20/24 --bypass-allowlist + assert_stderr --partial 'Decision successfully added' +} + @test "cscli allowlists delete" { rune -1 cscli allowlist delete assert_stderr 'Error: accepts 1 arg(s), received 0' From a79b92f2806fe6b318879820e06bce79fdba3c8d Mon Sep 17 00:00:00 2001 From: blotus Date: Mon, 28 Apr 2025 18:13:53 +0200 Subject: [PATCH 499/581] make CTI client available in cscli notifications (#3591) --- .../clinotifications/notifications.go | 31 +++++++++++++++++++ 1 file changed, 31 insertions(+) diff --git a/cmd/crowdsec-cli/clinotifications/notifications.go b/cmd/crowdsec-cli/clinotifications/notifications.go index 9259f7594e1..7856c89ff9e 100644 --- a/cmd/crowdsec-cli/clinotifications/notifications.go +++ b/cmd/crowdsec-cli/clinotifications/notifications.go @@ -30,6 +30,8 @@ import ( "github.com/crowdsecurity/crowdsec/pkg/csconfig" "github.com/crowdsecurity/crowdsec/pkg/csplugin" "github.com/crowdsecurity/crowdsec/pkg/csprofiles" + "github.com/crowdsecurity/crowdsec/pkg/database" + "github.com/crowdsecurity/crowdsec/pkg/exprhelpers" "github.com/crowdsecurity/crowdsec/pkg/models" "github.com/crowdsecurity/crowdsec/pkg/types" ) @@ -286,6 +288,14 @@ func (cli *cliNotifications) newTestCmd() *cobra.Command { if !ok { return fmt.Errorf("plugin name: '%s' does not exist", args[0]) } + + if cfg.API.CTI != nil && cfg.API.CTI.Enabled != nil && *cfg.API.CTI.Enabled { + log.Infof("Crowdsec CTI helper enabled") + if err := exprhelpers.InitCrowdsecCTI(cfg.API.CTI.Key, cfg.API.CTI.CacheTimeout, cfg.API.CTI.CacheSize, cfg.API.CTI.LogLevel); err != nil { + log.Errorf("failed to init crowdsec cti: %s", err) + } + } + // Create a single profile with plugin name as notification name return pluginBroker.Init(ctx, cfg.PluginConfig, []*csconfig.ProfileCfg{ { @@ -393,6 +403,27 @@ cscli notifications reinject -a '{"remediation": true,"scenario":"not } } + if cfg.API.Server != nil && cfg.API.Server.DbConfig != nil { + dbClient, err := database.NewClient(ctx, cfg.API.Server.DbConfig) + if err != nil { + log.Errorf("failed to get database client: %s", err) + } + + err = exprhelpers.Init(dbClient) + if err != nil { + log.Errorf("failed to init expr helpers: %s", err) + } + } else { + log.Warnf("no database client available, expr helpers will not be available") + } + + if cfg.API.CTI != nil && cfg.API.CTI.Enabled != nil && *cfg.API.CTI.Enabled { + log.Infof("Crowdsec CTI helper enabled") + if err := exprhelpers.InitCrowdsecCTI(cfg.API.CTI.Key, cfg.API.CTI.CacheTimeout, cfg.API.CTI.CacheSize, cfg.API.CTI.LogLevel); err != nil { + log.Errorf("failed to init crowdsec cti: %s", err) + } + } + err := pluginBroker.Init(ctx, cfg.PluginConfig, cfg.API.Server.Profiles, cfg.ConfigPaths) if err != nil { return fmt.Errorf("can't initialize plugins: %w", err) From 2002b8da8531b408b278ad8e9cf56317062a59a4 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Tue, 29 Apr 2025 11:24:01 +0200 Subject: [PATCH 500/581] update golangci-lint (#3590) --- .github/workflows/go-tests-windows.yml | 2 +- .github/workflows/go-tests.yml | 2 +- .golangci.yml | 8 ++------ 3 files changed, 4 insertions(+), 8 deletions(-) diff --git a/.github/workflows/go-tests-windows.yml b/.github/workflows/go-tests-windows.yml index e08bbecd65f..beacae48330 100644 --- a/.github/workflows/go-tests-windows.yml +++ b/.github/workflows/go-tests-windows.yml @@ -58,6 +58,6 @@ jobs: - name: golangci-lint uses: golangci/golangci-lint-action@v7 with: - version: v2.0 + version: v2.1 args: --issues-exit-code=1 --timeout 10m only-new-issues: false diff --git a/.github/workflows/go-tests.yml b/.github/workflows/go-tests.yml index abe923d054b..0963445d134 100644 --- a/.github/workflows/go-tests.yml +++ b/.github/workflows/go-tests.yml @@ -198,6 +198,6 @@ jobs: - name: golangci-lint uses: golangci/golangci-lint-action@v7 with: - version: v2.0 + version: v2.1 args: --issues-exit-code=1 --timeout 10m only-new-issues: false diff --git a/.golangci.yml b/.golangci.yml index f0b102ca7ce..93ed9b196f2 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -7,12 +7,6 @@ run: linters: default: all disable: - # - # DEPRECATED by golangi-lint - - - # none right now - # # Redundant # @@ -25,6 +19,7 @@ linters: # Disabled atm - intrange # intrange is a linter to find places where for loops could make use of an integer range. + - funcorder # # Recommended? (easy) @@ -172,6 +167,7 @@ linters: - tooManyResultsChecker - docStub - preferFprint + - importShadow gomoddirectives: replace-allow-list: From f9417971d0b4b95e04796238beba6d52f5f1d2a0 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Tue, 29 Apr 2025 11:24:30 +0200 Subject: [PATCH 501/581] enable codeql for python (#3545) --- .github/workflows/codeql-analysis.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml index cd37c7afaa9..5242be78b38 100644 --- a/.github/workflows/codeql-analysis.yml +++ b/.github/workflows/codeql-analysis.yml @@ -37,7 +37,7 @@ jobs: strategy: fail-fast: false matrix: - language: [ 'go' ] + language: [ 'go', 'python' ] # CodeQL supports [ 'cpp', 'csharp', 'go', 'java', 'javascript', 'python' ] # Learn more: # https://docs.github.com/en/free-pro-team@latest/github/finding-security-vulnerabilities-and-errors-in-your-code/configuring-code-scanning#changing-the-languages-that-are-analyzed From 789126848620274326c1f75f5280a3dc212a63d0 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Tue, 29 Apr 2025 12:42:49 +0200 Subject: [PATCH 502/581] lapi: return specific error if a unix socket path is too long for the OS (#3593) --- pkg/acquisition/modules/appsec/appsec.go | 3 ++- pkg/apiclient/client_test.go | 15 +++++++++------ pkg/apiserver/apiserver.go | 3 ++- pkg/csnet/socket.go | 23 +++++++++++++++++++++++ test/bats/01_crowdsec_lapi.bats | 8 ++++++++ 5 files changed, 44 insertions(+), 8 deletions(-) create mode 100644 pkg/csnet/socket.go diff --git a/pkg/acquisition/modules/appsec/appsec.go b/pkg/acquisition/modules/appsec/appsec.go index 628ea58ab33..26fc9e0a09f 100644 --- a/pkg/acquisition/modules/appsec/appsec.go +++ b/pkg/acquisition/modules/appsec/appsec.go @@ -27,6 +27,7 @@ import ( "github.com/crowdsecurity/crowdsec/pkg/appsec" "github.com/crowdsecurity/crowdsec/pkg/appsec/allowlists" "github.com/crowdsecurity/crowdsec/pkg/csconfig" + "github.com/crowdsecurity/crowdsec/pkg/csnet" "github.com/crowdsecurity/crowdsec/pkg/types" ) @@ -361,7 +362,7 @@ func (w *AppsecSource) listenAndServe(ctx context.Context, t *tomb.Tomb) error { listener, err := net.Listen("unix", socket) if err != nil { - serverError <- fmt.Errorf("appsec server failed: %w", err) + serverError <- csnet.WrapSockErr(err, socket) return } diff --git a/pkg/apiclient/client_test.go b/pkg/apiclient/client_test.go index 4dc4c00a73a..2bd5f1ee8d4 100644 --- a/pkg/apiclient/client_test.go +++ b/pkg/apiclient/client_test.go @@ -14,6 +14,7 @@ import ( log "github.com/sirupsen/logrus" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "golang.org/x/net/nettest" "github.com/crowdsecurity/go-cs-lib/cstest" ) @@ -69,8 +70,10 @@ func setupUnixSocketWithPrefix(t *testing.T, socket string, urlPrefix string) (m apiHandler.Handle(baseURLPath+"/", http.StripPrefix(baseURLPath, mux)) server := httptest.NewUnstartedServer(apiHandler) - l, _ := net.Listen("unix", socket) - _ = server.Listener.Close() + l, err := net.Listen("unix", socket) + require.NoError(t, err) + err = server.Listener.Close() + require.NoError(t, err) server.Listener = l server.Start() @@ -117,8 +120,8 @@ func TestNewClientOk(t *testing.T) { func TestNewClientOk_UnixSocket(t *testing.T) { ctx := t.Context() - tmpDir := t.TempDir() - socket := path.Join(tmpDir, "socket") + socket, err := nettest.LocalPath() + require.NoError(t, err) mux, urlx, teardown := setupUnixSocketWithPrefix(t, socket, "v1") defer teardown() @@ -221,8 +224,8 @@ func TestNewDefaultClient(t *testing.T) { func TestNewDefaultClient_UnixSocket(t *testing.T) { ctx := t.Context() - tmpDir := t.TempDir() - socket := path.Join(tmpDir, "socket") + socket, err := nettest.LocalPath() + require.NoError(t, err) mux, urlx, teardown := setupUnixSocketWithPrefix(t, socket, "v1") defer teardown() diff --git a/pkg/apiserver/apiserver.go b/pkg/apiserver/apiserver.go index 3c85afe550d..c4046a0e945 100644 --- a/pkg/apiserver/apiserver.go +++ b/pkg/apiserver/apiserver.go @@ -24,6 +24,7 @@ import ( "github.com/crowdsecurity/crowdsec/pkg/apiserver/controllers" v1 "github.com/crowdsecurity/crowdsec/pkg/apiserver/middlewares/v1" "github.com/crowdsecurity/crowdsec/pkg/csconfig" + "github.com/crowdsecurity/crowdsec/pkg/csnet" "github.com/crowdsecurity/crowdsec/pkg/csplugin" "github.com/crowdsecurity/crowdsec/pkg/database" "github.com/crowdsecurity/crowdsec/pkg/types" @@ -467,7 +468,7 @@ func (s *APIServer) listenAndServeLAPI(apiReady chan bool) error { listener, err := net.Listen("unix", socket) if err != nil { - serverError <- fmt.Errorf("while creating unix listener: %w", err) + serverError <- csnet.WrapSockErr(err, socket) return } diff --git a/pkg/csnet/socket.go b/pkg/csnet/socket.go new file mode 100644 index 00000000000..e91d6a19fbd --- /dev/null +++ b/pkg/csnet/socket.go @@ -0,0 +1,23 @@ +package csnet + +import ( + "fmt" + "runtime" +) + +// WrapSockErr wraps the provided error with a possible cause if the unix socket path exceeds +// a system-specific maximum length. It returns the original error otherwise. +func WrapSockErr(err error, socket string) error { + limit := 0 + switch runtime.GOOS { + case "linux": + // the actual numbers are not exported in Go, so we hardcode them + limit = 108 + case "freebsd", "darwin", "openbsd": + limit = 104 + } + if limit > 0 && len(socket) > limit { + return fmt.Errorf("%w (path length exceeds system limit: %d > %d)", err, len(socket), limit) + } + return err +} diff --git a/test/bats/01_crowdsec_lapi.bats b/test/bats/01_crowdsec_lapi.bats index c1222d7d2c9..31e848c83f0 100644 --- a/test/bats/01_crowdsec_lapi.bats +++ b/test/bats/01_crowdsec_lapi.bats @@ -42,6 +42,14 @@ teardown() { assert_stderr --partial "local API server stopped with error: listening on 127.0.0.1:-80: listen tcp: address -80: invalid port" } +@test "lapi (socket path too long)" { + LONG_NAME="12345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890" + export LONG_NAME + rune -0 config_set '.api.server.listen_socket = strenv(BATS_FILE_TMPDIR) + "/" + strenv(LONG_NAME)' + rune -1 "$CROWDSEC" -no-cs + assert_stderr --partial "local API server stopped with error: listen unix $BATS_FILE_TMPDIR/$LONG_NAME: bind: invalid argument (path length exceeds system limit" +} + @test "lapi (listen on random port)" { config_set '.common.log_media="stdout"' rune -0 config_set 'del(.api.server.listen_socket) | .api.server.listen_uri="127.0.0.1:0"' From 46e63988685cc0986e0a33c9b2bd0fe50682f30b Mon Sep 17 00:00:00 2001 From: Laurence Jones Date: Tue, 29 Apr 2025 12:18:06 +0100 Subject: [PATCH 503/581] enhance: Allow the use of 'd' suffix in profiles (#3594) --- pkg/csprofiles/csprofiles.go | 6 +++--- pkg/database/alerts.go | 4 ++-- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/pkg/csprofiles/csprofiles.go b/pkg/csprofiles/csprofiles.go index c9a7cda7961..0b2ae02805d 100644 --- a/pkg/csprofiles/csprofiles.go +++ b/pkg/csprofiles/csprofiles.go @@ -2,13 +2,13 @@ package csprofiles import ( "fmt" - "time" "github.com/expr-lang/expr" "github.com/expr-lang/expr/vm" log "github.com/sirupsen/logrus" "github.com/crowdsecurity/crowdsec/pkg/csconfig" + utils "github.com/crowdsecurity/crowdsec/pkg/database" "github.com/crowdsecurity/crowdsec/pkg/exprhelpers" "github.com/crowdsecurity/crowdsec/pkg/models" "github.com/crowdsecurity/crowdsec/pkg/types" @@ -84,7 +84,7 @@ func NewProfile(profilesCfg []*csconfig.ProfileCfg) ([]*Runtime, error) { duration = defaultDuration } - if _, err := time.ParseDuration(duration); err != nil { + if _, err := utils.ParseDuration(duration); err != nil { return nil, fmt.Errorf("error parsing duration '%s' of %s: %w", duration, profile.Name, err) } } @@ -136,7 +136,7 @@ func (profile *Runtime) GenerateDecisionFromProfile(alert *models.Alert) ([]*mod profile.Logger.Warningf("Failed to run duration_expr : %v", err) } else { durationStr := fmt.Sprint(duration) - if _, err := time.ParseDuration(durationStr); err != nil { + if _, err := utils.ParseDuration(durationStr); err != nil { profile.Logger.Warningf("Failed to parse expr duration result '%s'", duration) } else { *decision.Duration = durationStr diff --git a/pkg/database/alerts.go b/pkg/database/alerts.go index 00121ac8c38..025cf063083 100644 --- a/pkg/database/alerts.go +++ b/pkg/database/alerts.go @@ -292,7 +292,7 @@ func (c *Client) UpdateCommunityBlocklist(ctx context.Context, alertItem *models duration, err := time.ParseDuration(*decisionItem.Duration) if err != nil { - return 0,0,0, rollbackOnError(txClient, err, "parsing decision duration") + return 0, 0, 0, rollbackOnError(txClient, err, "parsing decision duration") } if decisionItem.Scope == nil { @@ -382,7 +382,7 @@ func (c *Client) createDecisionChunk(ctx context.Context, simulated bool, stopAt sz int ) - duration, err := time.ParseDuration(*decisionItem.Duration) + duration, err := ParseDuration(*decisionItem.Duration) if err != nil { return nil, errors.Wrapf(ParseDurationFail, "decision duration '%+v' : %s", *decisionItem.Duration, err) } From 764deee1c0f3370eb724f4b521c577fd43e26931 Mon Sep 17 00:00:00 2001 From: Laurence Jones Date: Tue, 29 Apr 2025 17:19:10 +0100 Subject: [PATCH 504/581] enhance: add listen_socket to http acquisition (#3499) * enhance: add listen_socket to http acquisition * wrap error for long socket path * enhance: Cancel early go routines if config is emtpy and add a socket test * enhance: use temp dir for socket tests * enhance: use mktemp instead of hardcoding * enhance: mr linter pls be happy with me --------- Co-authored-by: marco --- pkg/acquisition/modules/http/http.go | 44 ++++++++++++++-- pkg/acquisition/modules/http/http_test.go | 61 ++++++++++++++++++++--- 2 files changed, 96 insertions(+), 9 deletions(-) diff --git a/pkg/acquisition/modules/http/http.go b/pkg/acquisition/modules/http/http.go index 76d7d06d240..4cf5d6bbfe5 100644 --- a/pkg/acquisition/modules/http/http.go +++ b/pkg/acquisition/modules/http/http.go @@ -22,6 +22,7 @@ import ( "github.com/crowdsecurity/go-cs-lib/trace" "github.com/crowdsecurity/crowdsec/pkg/acquisition/configuration" + "github.com/crowdsecurity/crowdsec/pkg/csnet" "github.com/crowdsecurity/crowdsec/pkg/types" ) @@ -38,6 +39,7 @@ type HttpConfiguration struct { // IPFilter []string `yaml:"ip_filter"` // ChunkSize *int64 `yaml:"chunk_size"` ListenAddr string `yaml:"listen_addr"` + ListenSocket string `yaml:"listen_socket"` Path string `yaml:"path"` AuthType string `yaml:"auth_type"` BasicAuth *BasicAuthConfig `yaml:"basic_auth"` @@ -89,8 +91,8 @@ func (h *HTTPSource) UnmarshalConfig(yamlConfig []byte) error { } func (hc *HttpConfiguration) Validate() error { - if hc.ListenAddr == "" { - return errors.New("listen_addr is required") + if hc.ListenAddr == "" && hc.ListenSocket == "" { + return errors.New("listen_addr or listen_socket is required") } if hc.Path == "" { @@ -350,6 +352,11 @@ func (h *HTTPSource) RunServer(out chan types.Event, t *tomb.Tomb) error { return } + if r.RemoteAddr == "@" { + //We check if request came from unix socket and if so we set to loopback + r.RemoteAddr = "127.0.0.1:65535" + } + err := h.processRequest(w, r, &h.Config, out) if err != nil { h.logger.Errorf("failed to process request from '%s': %s", r.RemoteAddr, err) @@ -396,7 +403,38 @@ func (h *HTTPSource) RunServer(out chan types.Event, t *tomb.Tomb) error { } t.Go(func() error { - defer trace.CatchPanic("crowdsec/acquis/http/server") + if h.Config.ListenSocket == "" { + return nil + } + + defer trace.CatchPanic("crowdsec/acquis/http/server/unix") + h.logger.Infof("creating unix socket on %s", h.Config.ListenSocket) + _ = os.Remove(h.Config.ListenSocket) + listener, err := net.Listen("unix", h.Config.ListenSocket) + if err != nil { + return csnet.WrapSockErr(err, h.Config.ListenSocket) + } + if h.Config.TLS != nil { + err := h.Server.ServeTLS(listener, h.Config.TLS.ServerCert, h.Config.TLS.ServerKey) + if err != nil && err != http.ErrServerClosed { + return fmt.Errorf("https server failed: %w", err) + } + } else { + err := h.Server.Serve(listener) + if err != nil && err != http.ErrServerClosed { + return fmt.Errorf("http server failed: %w", err) + } + } + + return nil + }) + + t.Go(func() error { + if h.Config.ListenAddr == "" { + return nil + } + + defer trace.CatchPanic("crowdsec/acquis/http/server/tcp") if h.Config.TLS != nil { h.logger.Infof("start https server on %s", h.Config.ListenAddr) diff --git a/pkg/acquisition/modules/http/http_test.go b/pkg/acquisition/modules/http/http_test.go index 552fe90e387..ab55e956c11 100644 --- a/pkg/acquisition/modules/http/http_test.go +++ b/pkg/acquisition/modules/http/http_test.go @@ -2,13 +2,16 @@ package httpacquisition import ( "compress/gzip" + "context" "crypto/tls" "crypto/x509" "errors" "fmt" "io" + "net" "net/http" "os" + "path/filepath" "strings" "testing" "time" @@ -37,7 +40,7 @@ func TestConfigure(t *testing.T) { { config: ` foobar: bla`, - expectedErr: "invalid configuration: listen_addr is required", + expectedErr: "invalid configuration: listen_addr or listen_socket is required", }, { config: ` @@ -256,7 +259,7 @@ basic_auth: ctx := t.Context() - req, err := http.NewRequestWithContext(ctx, http.MethodGet, testHTTPServerAddr + "/test", http.NoBody) + req, err := http.NewRequestWithContext(ctx, http.MethodGet, testHTTPServerAddr+"/test", http.NoBody) require.NoError(t, err) res, err := http.DefaultClient.Do(req) @@ -284,7 +287,7 @@ basic_auth: time.Sleep(1 * time.Second) - req, err := http.NewRequestWithContext(ctx, http.MethodGet, testHTTPServerAddr + "/unknown", http.NoBody) + req, err := http.NewRequestWithContext(ctx, http.MethodGet, testHTTPServerAddr+"/unknown", http.NoBody) require.NoError(t, err) res, err := http.DefaultClient.Do(req) @@ -313,7 +316,7 @@ basic_auth: client := &http.Client{} - req, err := http.NewRequestWithContext(ctx, http.MethodPost, testHTTPServerAddr + "/test", strings.NewReader("test")) + req, err := http.NewRequestWithContext(ctx, http.MethodPost, testHTTPServerAddr+"/test", strings.NewReader("test")) require.NoError(t, err) req.Header.Set("Content-Type", "application/json") @@ -321,7 +324,7 @@ basic_auth: require.NoError(t, err) assert.Equal(t, http.StatusUnauthorized, resp.StatusCode) - req, err = http.NewRequestWithContext(ctx, http.MethodPost, testHTTPServerAddr + "/test", strings.NewReader("test")) + req, err = http.NewRequestWithContext(ctx, http.MethodPost, testHTTPServerAddr+"/test", strings.NewReader("test")) require.NoError(t, err) req.SetBasicAuth("test", "WrongPassword") @@ -474,6 +477,52 @@ custom_headers: require.NoError(t, err) } +func TestAcquistionSocket(t *testing.T) { + tempDir := t.TempDir() + socketFile := filepath.Join(tempDir, "test.sock") + + ctx := t.Context() + h := &HTTPSource{} + out, reg, tomb := SetupAndRunHTTPSource(t, h, []byte(` +source: http +listen_socket: `+socketFile+` +path: /test +auth_type: headers +headers: + key: test`), 2) + + time.Sleep(1 * time.Second) + rawEvt := `{"test": "test"}` + errChan := make(chan error) + go assertEvents(out, []string{rawEvt}, errChan) + + client := &http.Client{ + Transport: &http.Transport{ + DialContext: func(ctx context.Context, network, addr string) (net.Conn, error) { + return net.Dial("unix", socketFile) + }, + }, + } + req, err := http.NewRequestWithContext(ctx, http.MethodPost, fmt.Sprintf("%s/test", testHTTPServerAddr), strings.NewReader(rawEvt)) + require.NoError(t, err) + + req.Header.Add("Key", "test") + resp, err := client.Do(req) + require.NoError(t, err) + + assert.Equal(t, http.StatusOK, resp.StatusCode) + + err = <-errChan + require.NoError(t, err) + + assertMetrics(t, reg, h.GetMetrics(), 1) + + h.Server.Close() + tomb.Kill(nil) + err = tomb.Wait() + require.NoError(t, err) +} + type slowReader struct { delay time.Duration body []byte @@ -582,7 +631,7 @@ tls: time.Sleep(1 * time.Second) - req, err := http.NewRequestWithContext(ctx, http.MethodPost, testHTTPServerAddr + "/test", strings.NewReader("test")) + req, err := http.NewRequestWithContext(ctx, http.MethodPost, testHTTPServerAddr+"/test", strings.NewReader("test")) require.NoError(t, err) req.Header.Set("Content-Type", "application/json") From 582a192c1e08c0b7af23c80891b9140874db011e Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 30 Apr 2025 14:45:57 +0200 Subject: [PATCH 505/581] build(deps): bump golang.org/x/net from 0.37.0 to 0.38.0 (#3581) Bumps [golang.org/x/net](https://github.com/golang/net) from 0.37.0 to 0.38.0. - [Commits](https://github.com/golang/net/compare/v0.37.0...v0.38.0) --- updated-dependencies: - dependency-name: golang.org/x/net dependency-version: 0.38.0 dependency-type: indirect ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: blotus --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 1eafe27861b..81c41acc9ed 100644 --- a/go.mod +++ b/go.mod @@ -100,7 +100,7 @@ require ( go.opentelemetry.io/otel/trace v1.28.0 // indirect golang.org/x/crypto v0.36.0 golang.org/x/mod v0.23.0 - golang.org/x/net v0.37.0 // indirect + golang.org/x/net v0.38.0 // indirect golang.org/x/sync v0.12.0 golang.org/x/sys v0.31.0 golang.org/x/text v0.23.0 diff --git a/go.sum b/go.sum index fe4a7d74dde..133e1e9e065 100644 --- a/go.sum +++ b/go.sum @@ -837,8 +837,8 @@ golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY= golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE= -golang.org/x/net v0.37.0 h1:1zLorHbz+LYj7MQlSf1+2tPIIgibq2eL5xkrGk6f+2c= -golang.org/x/net v0.37.0/go.mod h1:ivrbrMbzFq5J41QOQh0siUuly180yBYtLp+CKbEaFx8= +golang.org/x/net v0.38.0 h1:vRMAPTMaeGqVhG5QyLJHqNDwecKTomGeqbnfZyKlBI8= +golang.org/x/net v0.38.0/go.mod h1:ivrbrMbzFq5J41QOQh0siUuly180yBYtLp+CKbEaFx8= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= From 8949309223d9c0e2890f8725e855fe937ae1cf56 Mon Sep 17 00:00:00 2001 From: David Date: Wed, 30 Apr 2025 15:05:17 +0200 Subject: [PATCH 506/581] Fix monitorNewFiles for NFS + Remove dead tails from tail map (#3508) * xx * xx * Tests * fix tests * XX * Fix race condition in TestLiveAcquisition implementation * Better comments for IsTailing and RemoveTail * lint * linter * unmarshal DiscoveryPollInterval to time.Duration * []byte -> string * prefer void assignment to nolint * extract method, add test * excludedByRE() -> isExcluded() * fix windows test * fix regression - tail files from the end if they are detected when the application starts --------- Co-authored-by: marco --- pkg/acquisition/modules/file/file.go | 398 +++++++++++----------- pkg/acquisition/modules/file/file_test.go | 200 ++++++++++- 2 files changed, 384 insertions(+), 214 deletions(-) diff --git a/pkg/acquisition/modules/file/file.go b/pkg/acquisition/modules/file/file.go index 4e673f87571..7c81f7cd2fc 100644 --- a/pkg/acquisition/modules/file/file.go +++ b/pkg/acquisition/modules/file/file.go @@ -2,6 +2,7 @@ package fileacquisition import ( "bufio" + "cmp" "compress/gzip" "context" "errors" @@ -29,6 +30,8 @@ import ( "github.com/crowdsecurity/crowdsec/pkg/types" ) +const defaultPollInterval = 30 * time.Second + var linesRead = prometheus.NewCounterVec( prometheus.CounterOpts{ Name: "cs_filesource_hits_total", @@ -38,12 +41,14 @@ var linesRead = prometheus.NewCounterVec( type FileConfiguration struct { Filenames []string - ExcludeRegexps []string `yaml:"exclude_regexps"` + ExcludeRegexps []string `yaml:"exclude_regexps"` Filename string - ForceInotify bool `yaml:"force_inotify"` - MaxBufferSize int `yaml:"max_buffer_size"` - PollWithoutInotify *bool `yaml:"poll_without_inotify"` - configuration.DataSourceCommonCfg `yaml:",inline"` + ForceInotify bool `yaml:"force_inotify"` + MaxBufferSize int `yaml:"max_buffer_size"` + PollWithoutInotify *bool `yaml:"poll_without_inotify"` + DiscoveryPollEnable bool `yaml:"discovery_poll_enable"` + DiscoveryPollInterval time.Duration `yaml:"discovery_poll_interval"` + configuration.DataSourceCommonCfg `yaml:",inline"` } type FileSource struct { @@ -149,20 +154,7 @@ func (f *FileSource) Configure(yamlConfig []byte, logger *log.Entry, metricsLeve } for _, file := range files { - // check if file is excluded - excluded := false - - for _, pattern := range f.exclude_regexps { - if pattern.MatchString(file) { - excluded = true - - f.logger.Infof("Skipping file %s as it matches exclude pattern %s", file, pattern) - - break - } - } - - if excluded { + if f.isExcluded(file) { continue } @@ -328,237 +320,215 @@ func (f *FileSource) StreamingAcquisition(ctx context.Context, out chan types.Ev }) for _, file := range f.files { - // before opening the file, check if we need to specifically avoid it. (XXX) - skip := false - - for _, pattern := range f.exclude_regexps { - if pattern.MatchString(file) { - f.logger.Infof("file %s matches exclusion pattern %s, skipping", file, pattern.String()) - - skip = true - - break - } - } - - if skip { - continue - } - - // cf. https://github.com/crowdsecurity/crowdsec/issues/1168 - // do not rely on stat, reclose file immediately as it's opened by Tail - fd, err := os.Open(file) - if err != nil { - f.logger.Errorf("unable to read %s : %s", file, err) - continue - } - - if err = fd.Close(); err != nil { - f.logger.Errorf("unable to close %s : %s", file, err) - continue - } - - fi, err := os.Stat(file) - if err != nil { - return fmt.Errorf("could not stat file %s : %w", file, err) + if err := f.setupTailForFile(file, out, true, t); err != nil { + f.logger.Errorf("Error setting up tail for %s: %s", file, err) } + } - if fi.IsDir() { - f.logger.Warnf("%s is a directory, ignoring it.", file) - continue - } + return nil +} - pollFile := false - if f.config.PollWithoutInotify != nil { - pollFile = *f.config.PollWithoutInotify - } else { - networkFS, fsType, err := types.IsNetworkFS(file) - if err != nil { - f.logger.Warningf("Could not get fs type for %s : %s", file, err) - } +func (f *FileSource) Dump() any { + return f +} - f.logger.Debugf("fs for %s is network: %t (%s)", file, networkFS, fsType) +// checkAndTailFile validates and sets up tailing for a given file. It performs the following checks: +// 1. Verifies if the file exists and is not a directory +// 2. Checks if the filename matches any of the configured patterns +// 3. Sets up file tailing if the file is valid and matches patterns +// +// Parameters: +// - filename: The path to the file to check and potentially tail +// - logger: A log.Entry for contextual logging +// - out: Channel to send file events to +// - t: A tomb.Tomb for graceful shutdown handling +// +// Returns an error if any validation fails or if tailing setup fails +func (f *FileSource) checkAndTailFile(filename string, logger *log.Entry, out chan types.Event, t *tomb.Tomb) error { + // Check if it's a directory + fi, err := os.Stat(filename) + if err != nil { + logger.Errorf("Could not stat() file %s, ignoring it : %s", filename, err) + return err + } - if networkFS { - f.logger.Warnf("Disabling inotify polling on %s as it is on a network share. You can manually set poll_without_inotify to true to make this message disappear, or to false to enforce inotify poll", file) + if fi.IsDir() { + return nil + } - pollFile = true - } - } + logger.Debugf("Processing file %s", filename) - filink, err := os.Lstat(file) + // Check if file matches any of our patterns + matched := false + for _, pattern := range f.config.Filenames { + logger.Debugf("Matching %s with %s", pattern, filename) + matched, err = filepath.Match(pattern, filename) if err != nil { - f.logger.Errorf("Could not lstat() new file %s, ignoring it : %s", file, err) + logger.Errorf("Could not match pattern : %s", err) continue } - - if filink.Mode()&os.ModeSymlink == os.ModeSymlink && !pollFile { - f.logger.Warnf("File %s is a symlink, but inotify polling is enabled. Crowdsec will not be able to detect rotation. Consider setting poll_without_inotify to true in your configuration", file) + if matched { + logger.Debugf("Matched %s with %s", pattern, filename) + break } + } - tail, err := tail.TailFile(file, tail.Config{ReOpen: true, Follow: true, Poll: pollFile, Location: &tail.SeekInfo{Offset: 0, Whence: io.SeekEnd}, Logger: log.NewEntry(log.StandardLogger())}) - if err != nil { - f.logger.Errorf("Could not start tailing file %s : %s", file, err) - continue - } + if !matched { + return nil + } - f.tailMapMutex.Lock() - f.tails[file] = true - f.tailMapMutex.Unlock() - t.Go(func() error { - defer trace.CatchPanic("crowdsec/acquis/file/live/fsnotify") - return f.tailFile(out, t, tail) - }) + // Setup the tail if needed + if err := f.setupTailForFile(filename, out, false, t); err != nil { + logger.Errorf("Error setting up tail for file %s: %s", filename, err) + return err } return nil } -func (f *FileSource) Dump() interface{} { - return f -} - func (f *FileSource) monitorNewFiles(out chan types.Event, t *tomb.Tomb) error { logger := f.logger.WithField("goroutine", "inotify") + // Setup polling if enabled + var tickerChan <-chan time.Time + var ticker *time.Ticker + if f.config.DiscoveryPollEnable { + interval := cmp.Or(f.config.DiscoveryPollInterval, defaultPollInterval) + ticker = time.NewTicker(interval) + tickerChan = ticker.C + defer ticker.Stop() + } + for { select { case event, ok := <-f.watcher.Events: if !ok { return nil } - if event.Op&fsnotify.Create != fsnotify.Create { continue } + _ = f.checkAndTailFile(event.Name, logger, out, t) - fi, err := os.Stat(event.Name) - if err != nil { - logger.Errorf("Could not stat() new file %s, ignoring it : %s", event.Name, err) - continue - } - - if fi.IsDir() { - continue - } - - logger.Debugf("Detected new file %s", event.Name) - - matched := false - + case <-tickerChan: // Will never trigger if tickerChan is nil + // Poll for all configured patterns for _, pattern := range f.config.Filenames { - logger.Debugf("Matching %s with %s", pattern, event.Name) - - matched, err = filepath.Match(pattern, event.Name) + files, err := filepath.Glob(pattern) if err != nil { - logger.Errorf("Could not match pattern : %s", err) + logger.Errorf("Error globbing pattern %s during poll: %s", pattern, err) continue } - - if matched { - logger.Debugf("Matched %s with %s", pattern, event.Name) - break + for _, file := range files { + _ = f.checkAndTailFile(file, logger, out, t) } } - if !matched { - continue + case err, ok := <-f.watcher.Errors: + if !ok { + return nil } + logger.Errorf("Error while monitoring folder: %s", err) - // before opening the file, check if we need to specifically avoid it. (XXX) - skip := false - - for _, pattern := range f.exclude_regexps { - if pattern.MatchString(event.Name) { - f.logger.Infof("file %s matches exclusion pattern %s, skipping", event.Name, pattern.String()) - - skip = true - - break - } + case <-t.Dying(): + err := f.watcher.Close() + if err != nil { + return fmt.Errorf("could not remove all inotify watches: %w", err) } + return nil + } + } +} - if skip { - continue - } +func (f *FileSource) setupTailForFile(file string, out chan types.Event, seekEnd bool, t *tomb.Tomb) error { + logger := f.logger.WithField("file", file) - f.tailMapMutex.RLock() - if f.tails[event.Name] { - f.tailMapMutex.RUnlock() - // we already have a tail on it, do not start a new one - logger.Debugf("Already tailing file %s, not creating a new tail", event.Name) + if f.isExcluded(file) { + return nil + } - break - } - f.tailMapMutex.RUnlock() - // cf. https://github.com/crowdsecurity/crowdsec/issues/1168 - // do not rely on stat, reclose file immediately as it's opened by Tail - fd, err := os.Open(event.Name) - if err != nil { - f.logger.Errorf("unable to read %s : %s", event.Name, err) - continue - } + // Check if we're already tailing + f.tailMapMutex.RLock() + if f.tails[file] { + f.tailMapMutex.RUnlock() + logger.Debugf("Already tailing file %s, not creating a new tail", file) + return nil + } + f.tailMapMutex.RUnlock() - if err = fd.Close(); err != nil { - f.logger.Errorf("unable to close %s : %s", event.Name, err) - continue - } + // Validate file + fd, err := os.Open(file) + if err != nil { + return fmt.Errorf("unable to read %s : %s", file, err) + } + if err = fd.Close(); err != nil { + return fmt.Errorf("unable to close %s : %s", file, err) + } - pollFile := false - if f.config.PollWithoutInotify != nil { - pollFile = *f.config.PollWithoutInotify - } else { - networkFS, fsType, err := types.IsNetworkFS(event.Name) - if err != nil { - f.logger.Warningf("Could not get fs type for %s : %s", event.Name, err) - } + fi, err := os.Stat(file) + if err != nil { + return fmt.Errorf("could not stat file %s : %w", file, err) + } + if fi.IsDir() { + logger.Warnf("%s is a directory, ignoring it.", file) + return nil + } - f.logger.Debugf("fs for %s is network: %t (%s)", event.Name, networkFS, fsType) + // Determine polling mode + pollFile := false + if f.config.PollWithoutInotify != nil { + pollFile = *f.config.PollWithoutInotify + } else { + networkFS, fsType, err := types.IsNetworkFS(file) + if err != nil { + logger.Warningf("Could not get fs type for %s : %s", file, err) + } + logger.Debugf("fs for %s is network: %t (%s)", file, networkFS, fsType) + if networkFS { + logger.Warnf("Disabling inotify polling on %s as it is on a network share. You can manually set poll_without_inotify to true to make this message disappear, or to false to enforce inotify poll", file) + pollFile = true + } + } - if networkFS { - pollFile = true - } - } + // Check symlink status + filink, err := os.Lstat(file) + if err != nil { + return fmt.Errorf("could not lstat() file %s: %w", file, err) + } + if filink.Mode()&os.ModeSymlink == os.ModeSymlink && !pollFile { + logger.Warnf("File %s is a symlink, but inotify polling is enabled. Crowdsec will not be able to detect rotation. Consider setting poll_without_inotify to true in your configuration", file) + } - filink, err := os.Lstat(event.Name) - if err != nil { - logger.Errorf("Could not lstat() new file %s, ignoring it : %s", event.Name, err) - continue - } + // Create the tailer with appropriate configuration + seekInfo := &tail.SeekInfo{Offset: 0, Whence: io.SeekEnd} + if f.config.Mode == configuration.CAT_MODE { + seekInfo.Whence = io.SeekStart + } - if filink.Mode()&os.ModeSymlink == os.ModeSymlink && !pollFile { - logger.Warnf("File %s is a symlink, but inotify polling is enabled. Crowdsec will not be able to detect rotation. Consider setting poll_without_inotify to true in your configuration", event.Name) - } + if seekEnd { + seekInfo.Whence = io.SeekEnd + } - // Slightly different parameters for Location, as we want to read the first lines of the newly created file - tail, err := tail.TailFile(event.Name, tail.Config{ReOpen: true, Follow: true, Poll: pollFile, Location: &tail.SeekInfo{Offset: 0, Whence: io.SeekStart}}) - if err != nil { - logger.Errorf("Could not start tailing file %s : %s", event.Name, err) - break - } + tail, err := tail.TailFile(file, tail.Config{ + ReOpen: true, + Follow: true, + Poll: pollFile, + Location: seekInfo, + Logger: log.NewEntry(log.StandardLogger()), + }) + if err != nil { + return fmt.Errorf("could not start tailing file %s : %w", file, err) + } - f.tailMapMutex.Lock() - f.tails[event.Name] = true - f.tailMapMutex.Unlock() - t.Go(func() error { - defer trace.CatchPanic("crowdsec/acquis/tailfile") - return f.tailFile(out, t, tail) - }) - case err, ok := <-f.watcher.Errors: - if !ok { - return nil - } + f.tailMapMutex.Lock() + f.tails[file] = true + f.tailMapMutex.Unlock() - logger.Errorf("Error while monitoring folder: %s", err) - case <-t.Dying(): - err := f.watcher.Close() - if err != nil { - return fmt.Errorf("could not remove all inotify watches: %w", err) - } + t.Go(func() error { + defer trace.CatchPanic("crowdsec/acquis/tailfile") + return f.tailFile(out, t, tail) + }) - return nil - } - } + return nil } func (f *FileSource) tailFile(out chan types.Event, t *tomb.Tomb, tail *tail.Tail) error { @@ -586,6 +556,12 @@ func (f *FileSource) tailFile(out chan types.Event, t *tomb.Tomb, tail *tail.Tai logger.Warning(errMsg) + // Just remove the dead tailer from our map and return + // monitorNewFiles will pick up the file again if it's recreated + f.tailMapMutex.Lock() + delete(f.tails, tail.Filename) + f.tailMapMutex.Unlock() + return nil case line := <-tail.Lines: if line == nil { @@ -683,7 +659,7 @@ func (f *FileSource) readFile(filename string, out chan types.Event, t *tomb.Tom linesRead.With(prometheus.Labels{"source": filename}).Inc() // we're reading logs at once, it must be time-machine buckets - out <- types.Event{Line: l, Process: true, Type: types.LOG, ExpectMode: types.TIMEMACHINE, Unmarshaled: make(map[string]interface{})} + out <- types.Event{Line: l, Process: true, Type: types.LOG, ExpectMode: types.TIMEMACHINE, Unmarshaled: make(map[string]any)} } } @@ -698,3 +674,31 @@ func (f *FileSource) readFile(filename string, out chan types.Event, t *tomb.Tom return nil } + +// IsTailing returns whether a given file is currently being tailed. For testing purposes. +// It is case sensitive and path delimiter sensitive (filename must match exactly what the filename would look being OS specific) +func (f *FileSource) IsTailing(filename string) bool { + f.tailMapMutex.RLock() + defer f.tailMapMutex.RUnlock() + return f.tails[filename] +} + +// RemoveTail is used for testing to simulate a dead tailer. For testing purposes. +// It is case sensitive and path delimiter sensitive (filename must match exactly what the filename would look being OS specific) +func (f *FileSource) RemoveTail(filename string) { + f.tailMapMutex.Lock() + defer f.tailMapMutex.Unlock() + delete(f.tails, filename) +} + +// isExcluded returns the first matching regexp from the list of excluding patterns, +// or nil if the file is not excluded. +func (f *FileSource) isExcluded(path string) bool { + for _, re := range f.exclude_regexps { + if re.MatchString(path) { + f.logger.WithField("file", path).Infof("Skipping file: matches exclude regex %q", re) + return true + } + } + return false +} diff --git a/pkg/acquisition/modules/file/file_test.go b/pkg/acquisition/modules/file/file_test.go index 9cfbdbc385a..4264ab0ca93 100644 --- a/pkg/acquisition/modules/file/file_test.go +++ b/pkg/acquisition/modules/file/file_test.go @@ -1,8 +1,10 @@ package fileacquisition_test import ( + "context" "fmt" "os" + "path/filepath" "runtime" "testing" "time" @@ -394,13 +396,19 @@ force_inotify: true`, testPattern), actualLines := 0 if tc.expectedLines != 0 { + var stopReading bool + defer func() { stopReading = true }() go func() { for { select { case <-out: actualLines++ - case <-time.After(2 * time.Second): - return + default: + if stopReading { + return + } + // Small sleep to prevent tight loop + time.Sleep(100 * time.Millisecond) } } }() @@ -410,21 +418,41 @@ force_inotify: true`, testPattern), cstest.RequireErrorContains(t, err, tc.expectedErr) if tc.expectedLines != 0 { - fd, err := os.Create("test_files/stream.log") + // f.IsTailing is path delimiter sensitive + streamLogFile := filepath.Join("test_files", "stream.log") + + fd, err := os.Create(streamLogFile) require.NoError(t, err, "could not create test file") + // wait for the file to be tailed + waitingForTail := true + for waitingForTail { + select { + case <-time.After(2 * time.Second): + t.Fatal("Timeout waiting for file to be tailed") + default: + if !f.IsTailing(streamLogFile) { + time.Sleep(50 * time.Millisecond) + continue + } + waitingForTail = false + } + } + for i := range 5 { _, err = fmt.Fprintf(fd, "%d\n", i) if err != nil { - os.Remove("test_files/stream.log") + os.Remove(streamLogFile) t.Fatalf("could not write test file : %s", err) } } fd.Close() - // we sleep to make sure we detect the new file - time.Sleep(3 * time.Second) - os.Remove("test_files/stream.log") + + // sleep to ensure the tail events are processed + time.Sleep(2 * time.Second) + + os.Remove(streamLogFile) assert.Equal(t, tc.expectedLines, actualLines) } @@ -454,20 +482,158 @@ exclude_regexps: ["\\.gz$"]` subLogger := logger.WithField("type", "file") f := fileacquisition.FileSource{} - if err := f.Configure([]byte(config), subLogger, configuration.METRICS_NONE); err != nil { - subLogger.Fatalf("unexpected error: %s", err) - } + err := f.Configure([]byte(config), subLogger, configuration.METRICS_NONE) + require.NoError(t, err) - expectedLogOutput := "Skipping file test_files/test.log.gz as it matches exclude pattern" + require.NotNil(t, hook.LastEntry()) + assert.Contains(t, hook.LastEntry().Message, `Skipping file: matches exclude regex "\\.gz`) + assert.Equal(t, filepath.Join("test_files", "test.log.gz"), hook.LastEntry().Data["file"]) + hook.Reset() +} - if runtime.GOOS == "windows" { - expectedLogOutput = `Skipping file test_files\test.log.gz as it matches exclude pattern \.gz` +func TestDiscoveryPollConfiguration(t *testing.T) { + tests := []struct { + name string + config string + wantErr string + }{ + { + name: "valid discovery poll config", + config: ` +filenames: + - "tests/test.log" +discovery_poll_enable: true +discovery_poll_interval: "30s" +mode: tail +`, + wantErr: "", + }, + { + name: "invalid poll interval", + config: ` +filenames: + - "tests/test.log" +discovery_poll_enable: true +discovery_poll_interval: "invalid" +mode: tail +`, + wantErr: "cannot unmarshal !!str `invalid` into time.Duration", + }, + { + name: "polling disabled", + config: ` +filenames: + - "tests/test.log" +discovery_poll_enable: false +mode: tail +`, + wantErr: "", + }, } - if hook.LastEntry() == nil { - t.Fatalf("expected output %s, but got nothing", expectedLogOutput) + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + f := &fileacquisition.FileSource{} + err := f.Configure([]byte(tc.config), log.NewEntry(log.New()), configuration.METRICS_NONE) + cstest.RequireErrorContains(t, err, tc.wantErr) + }) } +} - assert.Contains(t, hook.LastEntry().Message, expectedLogOutput) - hook.Reset() +func TestDiscoveryPolling(t *testing.T) { + dir := t.TempDir() + + pattern := filepath.Join(dir, "*.log") + yamlConfig := fmt.Sprintf(` +filenames: + - '%s' +discovery_poll_enable: true +discovery_poll_interval: "1s" +exclude_regexps: ["\\.ignore$"] +mode: tail +`, pattern) + + fmt.Printf("Config: %s\n", yamlConfig) + config := []byte(yamlConfig) + + f := &fileacquisition.FileSource{} + err := f.Configure(config, log.NewEntry(log.New()), configuration.METRICS_NONE) + require.NoError(t, err) + + // Create channel for events + eventChan := make(chan types.Event) + tomb := tomb.Tomb{} + + // Start acquisition + err = f.StreamingAcquisition(context.Background(), eventChan, &tomb) + require.NoError(t, err) + + // Create a test file + testFile := filepath.Join(dir, "test.log") + err = os.WriteFile(testFile, []byte("test line\n"), 0o644) + require.NoError(t, err) + + ignoredFile := filepath.Join(dir, ".ignored") + err = os.WriteFile(ignoredFile, []byte("test line\n"), 0o644) + require.NoError(t, err) + + // Wait for polling to detect the file + time.Sleep(4 * time.Second) + + require.True(t, f.IsTailing(testFile), "File should be tailed after polling") + require.False(t, f.IsTailing(ignoredFile), "File should be ignored after polling") + + // Cleanup + tomb.Kill(nil) + tomb.Wait() +} + +func TestFileResurrectionViaPolling(t *testing.T) { + dir := t.TempDir() + ctx := t.Context() + + testFile := filepath.Join(dir, "test.log") + err := os.WriteFile(testFile, []byte("test line\n"), 0o644) + require.NoError(t, err) + + pattern := filepath.Join(dir, "*.log") + yamlConfig := fmt.Sprintf(` +filenames: + - '%s' +discovery_poll_enable: true +discovery_poll_interval: "1s" +mode: tail +`, pattern) + + fmt.Printf("Config: %s\n", yamlConfig) + config := []byte(yamlConfig) + + f := &fileacquisition.FileSource{} + err = f.Configure(config, log.NewEntry(log.New()), configuration.METRICS_NONE) + require.NoError(t, err) + + eventChan := make(chan types.Event) + tomb := tomb.Tomb{} + + err = f.StreamingAcquisition(ctx, eventChan, &tomb) + require.NoError(t, err) + + // Wait for initial tail setup + time.Sleep(100 * time.Millisecond) + + // Simulate tailer death by removing it from the map + f.RemoveTail(testFile) + isTailed := f.IsTailing(testFile) + require.False(t, isTailed, "File should be removed from the map") + + // Wait for polling to resurrect the file + time.Sleep(2 * time.Second) + + // Verify file is being tailed again + isTailed = f.IsTailing(testFile) + require.True(t, isTailed, "File should be resurrected via polling") + + // Cleanup + tomb.Kill(nil) + tomb.Wait() } From 0a9e6ddd4f42bb18564c1ad640cda04d2f38bb34 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Wed, 30 Apr 2025 23:23:49 +0200 Subject: [PATCH 507/581] CI: remove obsolete reference to directory dyn-bats (#3600) --- test/.gitignore | 1 - test/bats.mk | 1 - test/run-tests | 4 ++-- 3 files changed, 2 insertions(+), 4 deletions(-) diff --git a/test/.gitignore b/test/.gitignore index 522c09bcc45..169461eaff5 100644 --- a/test/.gitignore +++ b/test/.gitignore @@ -1,4 +1,3 @@ /local/ /local-init/ /.environment.sh -/dyn-bats/*.bats diff --git a/test/bats.mk b/test/bats.mk index e25156ed929..2e2621f3e71 100644 --- a/test/bats.mk +++ b/test/bats.mk @@ -95,7 +95,6 @@ bats-fixture: bats-check-requirements bats-update-tools ## Build fixture for fu bats-clean: ## Remove functional test environment @$(RM) $(TEST_DIR)/local $(WIN_IGNORE_ERR) @$(RM) $(LOCAL_INIT_DIR) $(WIN_IGNORE_ERR) - @$(RM) $(TEST_DIR)/dyn-bats/*.bats $(WIN_IGNORE_ERR) @$(RM) test/.environment.sh $(WIN_IGNORE_ERR) @$(RM) test/coverage/* $(WIN_IGNORE_ERR) diff --git a/test/run-tests b/test/run-tests index 957eb663b9c..e7609188c37 100755 --- a/test/run-tests +++ b/test/run-tests @@ -37,10 +37,10 @@ if [[ $# -ge 1 ]]; then --print-output-on-failure \ "$@" else - echo "test files: $TEST_DIR/bats $TEST_DIR/dyn-bats" + echo "test files: $TEST_DIR/bats" "$TEST_DIR/lib/bats-core/bin/bats" \ --jobs 1 \ --timing \ --print-output-on-failure \ - "$TEST_DIR/bats" "$TEST_DIR/dyn-bats" + "$TEST_DIR/bats" fi From dafc9c30763d86d84670eda895bf2cd90cbcb042 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Wed, 30 Apr 2025 23:24:33 +0200 Subject: [PATCH 508/581] CI: correct uv.lock path (#3596) --- .github/workflows/docker-tests.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/docker-tests.yml b/.github/workflows/docker-tests.yml index 796dd916f02..70cca84c5d9 100644 --- a/.github/workflows/docker-tests.yml +++ b/.github/workflows/docker-tests.yml @@ -57,7 +57,7 @@ jobs: with: version: 0.5.24 enable-cache: true - cache-dependency-glob: "uv.lock" + cache-dependency-glob: "./docker/test/uv.lock" - name: "Set up Python" uses: actions/setup-python@v5 From 54571d1688ec820e2acc8db966367a7994d7dd85 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Thu, 1 May 2025 23:04:42 +0200 Subject: [PATCH 509/581] refact cscli: hub item - pointer receiver for consistency (#3595) --- cmd/crowdsec-cli/cliitem/cmdinspect.go | 18 ++++++++---------- cmd/crowdsec-cli/cliitem/cmdinstall.go | 7 ++++--- cmd/crowdsec-cli/cliitem/cmdremove.go | 9 +++++---- cmd/crowdsec-cli/cliitem/cmdupgrade.go | 9 +++++---- cmd/crowdsec-cli/cliitem/item.go | 8 ++++---- 5 files changed, 26 insertions(+), 25 deletions(-) diff --git a/cmd/crowdsec-cli/cliitem/cmdinspect.go b/cmd/crowdsec-cli/cliitem/cmdinspect.go index 7b5bd73530e..bb676fd22a4 100644 --- a/cmd/crowdsec-cli/cliitem/cmdinspect.go +++ b/cmd/crowdsec-cli/cliitem/cmdinspect.go @@ -22,7 +22,7 @@ import ( "github.com/crowdsecurity/crowdsec/pkg/cwhub" ) -func (cli cliItem) inspect(ctx context.Context, args []string, url string, diff bool, rev bool, noMetrics bool) error { +func (cli *cliItem) inspect(ctx context.Context, args []string, url string, diff bool, rev bool, noMetrics bool) error { cfg := cli.cfg() if rev && !diff { @@ -51,7 +51,7 @@ func (cli cliItem) inspect(ctx context.Context, args []string, url string, diff } if diff { - fmt.Println(cli.whyTainted(ctx, hub, contentProvider, item, rev)) + fmt.Fprintln(os.Stdout, cli.whyTainted(ctx, hub, contentProvider, item, rev)) continue } @@ -71,7 +71,7 @@ func (cli cliItem) inspect(ctx context.Context, args []string, url string, diff } // return the diff between the installed version and the latest version -func (cli cliItem) itemDiff(ctx context.Context, item *cwhub.Item, contentProvider cwhub.ContentProvider, reverse bool) (string, error) { +func (cli *cliItem) itemDiff(ctx context.Context, item *cwhub.Item, contentProvider cwhub.ContentProvider, reverse bool) (string, error) { if !item.State.IsInstalled() { return "", fmt.Errorf("'%s' is not installed", item.FQName()) } @@ -113,7 +113,7 @@ func (cli cliItem) itemDiff(ctx context.Context, item *cwhub.Item, contentProvid return fmt.Sprintf("%s", diff), nil } -func (cli cliItem) whyTainted(ctx context.Context, hub *cwhub.Hub, contentProvider cwhub.ContentProvider, item *cwhub.Item, reverse bool) string { +func (cli *cliItem) whyTainted(ctx context.Context, hub *cwhub.Hub, contentProvider cwhub.ContentProvider, item *cwhub.Item, reverse bool) string { if !item.State.IsInstalled() { return fmt.Sprintf("# %s is not installed", item.FQName()) } @@ -159,7 +159,7 @@ func (cli cliItem) whyTainted(ctx context.Context, hub *cwhub.Hub, contentProvid return strings.Join(ret, "\n") } -func (cli cliItem) newInspectCmd() *cobra.Command { +func (cli *cliItem) newInspectCmd() *cobra.Command { var ( url string diff bool @@ -212,7 +212,7 @@ func inspectItem(hub *cwhub.Hub, item *cwhub.Item, wantMetrics bool, output stri return fmt.Errorf("unable to serialize item: %w", err) } - fmt.Print(string(b)) + fmt.Fprintln(os.Stdout, string(b)) } if output != "human" { @@ -220,13 +220,11 @@ func inspectItem(hub *cwhub.Hub, item *cwhub.Item, wantMetrics bool, output stri } if item.State.Tainted { - fmt.Println() - fmt.Printf(`This item is tainted. Use "%s %s inspect --diff %s" to see why.`, filepath.Base(os.Args[0]), item.Type, item.Name) - fmt.Println() + fmt.Fprintf(os.Stdout, "\nThis item is tainted. Use '%s %s inspect --diff %s' to see why.\n", filepath.Base(os.Args[0]), item.Type, item.Name) } if wantMetrics { - fmt.Printf("\nCurrent metrics: \n") + fmt.Fprintf(os.Stdout, "\nCurrent metrics: \n") if err := showMetrics(prometheusURL, hub, item, wantColor); err != nil { return err diff --git a/cmd/crowdsec-cli/cliitem/cmdinstall.go b/cmd/crowdsec-cli/cliitem/cmdinstall.go index 3bd208f597c..e61365382f7 100644 --- a/cmd/crowdsec-cli/cliitem/cmdinstall.go +++ b/cmd/crowdsec-cli/cliitem/cmdinstall.go @@ -5,6 +5,7 @@ import ( "context" "errors" "fmt" + "os" "slices" "strings" @@ -43,7 +44,7 @@ func suggestNearestMessage(hub *cwhub.Hub, itemType string, itemName string) str return msg } -func (cli cliItem) install(ctx context.Context, args []string, interactive bool, dryRun bool, downloadOnly bool, force bool, ignoreError bool) error { +func (cli *cliItem) install(ctx context.Context, args []string, interactive bool, dryRun bool, downloadOnly bool, force bool, ignoreError bool) error { cfg := cli.cfg() hub, err := require.Hub(cfg, log.StandardLogger()) @@ -91,7 +92,7 @@ func (cli cliItem) install(ctx context.Context, args []string, interactive bool, } if msg := reload.UserMessage(); msg != "" && plan.ReloadNeeded { - fmt.Println("\n" + msg) + fmt.Fprintln(os.Stdout, "\n"+msg) } return nil @@ -116,7 +117,7 @@ func compAllItems(itemType string, args []string, toComplete string, cfg configG return comp, cobra.ShellCompDirectiveNoFileComp } -func (cli cliItem) newInstallCmd() *cobra.Command { +func (cli *cliItem) newInstallCmd() *cobra.Command { var ( interactive bool dryRun bool diff --git a/cmd/crowdsec-cli/cliitem/cmdremove.go b/cmd/crowdsec-cli/cliitem/cmdremove.go index 506599f3efd..bca39ecb4ab 100644 --- a/cmd/crowdsec-cli/cliitem/cmdremove.go +++ b/cmd/crowdsec-cli/cliitem/cmdremove.go @@ -5,6 +5,7 @@ import ( "context" "errors" "fmt" + "os" log "github.com/sirupsen/logrus" "github.com/spf13/cobra" @@ -15,7 +16,7 @@ import ( "github.com/crowdsecurity/crowdsec/pkg/hubops" ) -func (cli cliItem) removePlan(hub *cwhub.Hub, args []string, purge bool, force bool, all bool) (*hubops.ActionPlan, error) { +func (cli *cliItem) removePlan(hub *cwhub.Hub, args []string, purge bool, force bool, all bool) (*hubops.ActionPlan, error) { plan := hubops.NewActionPlan(hub) if all { @@ -85,7 +86,7 @@ func installedParentNames(item *cwhub.Item) []string { return ret } -func (cli cliItem) remove(ctx context.Context, args []string, interactive bool, dryRun bool, purge bool, force bool, all bool) error { +func (cli *cliItem) remove(ctx context.Context, args []string, interactive bool, dryRun bool, purge bool, force bool, all bool) error { cfg := cli.cfg() hub, err := require.Hub(cli.cfg(), log.StandardLogger()) @@ -106,13 +107,13 @@ func (cli cliItem) remove(ctx context.Context, args []string, interactive bool, } if msg := reload.UserMessage(); msg != "" && plan.ReloadNeeded { - fmt.Println("\n" + msg) + fmt.Fprintln(os.Stdout, "\n"+msg) } return nil } -func (cli cliItem) newRemoveCmd() *cobra.Command { +func (cli *cliItem) newRemoveCmd() *cobra.Command { var ( interactive bool dryRun bool diff --git a/cmd/crowdsec-cli/cliitem/cmdupgrade.go b/cmd/crowdsec-cli/cliitem/cmdupgrade.go index 8dcbe3531d4..b585aa76041 100644 --- a/cmd/crowdsec-cli/cliitem/cmdupgrade.go +++ b/cmd/crowdsec-cli/cliitem/cmdupgrade.go @@ -4,6 +4,7 @@ import ( "cmp" "context" "fmt" + "os" log "github.com/sirupsen/logrus" "github.com/spf13/cobra" @@ -14,7 +15,7 @@ import ( "github.com/crowdsecurity/crowdsec/pkg/hubops" ) -func (cli cliItem) upgradePlan(hub *cwhub.Hub, contentProvider cwhub.ContentProvider, args []string, force bool, all bool) (*hubops.ActionPlan, error) { +func (cli *cliItem) upgradePlan(hub *cwhub.Hub, contentProvider cwhub.ContentProvider, args []string, force bool, all bool) (*hubops.ActionPlan, error) { plan := hubops.NewActionPlan(hub) if all { @@ -45,7 +46,7 @@ func (cli cliItem) upgradePlan(hub *cwhub.Hub, contentProvider cwhub.ContentProv return plan, nil } -func (cli cliItem) upgrade(ctx context.Context, args []string, interactive bool, dryRun bool, force bool, all bool) error { +func (cli *cliItem) upgrade(ctx context.Context, args []string, interactive bool, dryRun bool, force bool, all bool) error { cfg := cli.cfg() hub, err := require.Hub(cfg, log.StandardLogger()) @@ -68,13 +69,13 @@ func (cli cliItem) upgrade(ctx context.Context, args []string, interactive bool, } if msg := reload.UserMessage(); msg != "" && plan.ReloadNeeded { - fmt.Println("\n" + msg) + fmt.Fprintln(os.Stdout, "\n"+msg) } return nil } -func (cli cliItem) newUpgradeCmd() *cobra.Command { +func (cli *cliItem) newUpgradeCmd() *cobra.Command { var ( interactive bool dryRun bool diff --git a/cmd/crowdsec-cli/cliitem/item.go b/cmd/crowdsec-cli/cliitem/item.go index 4737d7057e9..fa14baf1846 100644 --- a/cmd/crowdsec-cli/cliitem/item.go +++ b/cmd/crowdsec-cli/cliitem/item.go @@ -40,7 +40,7 @@ type cliItem struct { listHelp cliHelp } -func (cli cliItem) NewCommand() *cobra.Command { +func (cli *cliItem) NewCommand() *cobra.Command { cmd := &cobra.Command{ Use: cmp.Or(cli.help.use, cli.name+" [item]..."), Short: cmp.Or(cli.help.short, "Manage hub "+cli.name), @@ -59,7 +59,7 @@ func (cli cliItem) NewCommand() *cobra.Command { return cmd } -func (cli cliItem) list(args []string, all bool) error { +func (cli *cliItem) list(args []string, all bool) error { cfg := cli.cfg() hub, err := require.Hub(cli.cfg(), log.StandardLogger()) @@ -77,7 +77,7 @@ func (cli cliItem) list(args []string, all bool) error { return clihub.ListItems(color.Output, cfg.Cscli.Color, []string{cli.name}, items, false, cfg.Cscli.Output) } -func (cli cliItem) newListCmd() *cobra.Command { +func (cli *cliItem) newListCmd() *cobra.Command { var all bool cmd := &cobra.Command{ @@ -97,7 +97,7 @@ func (cli cliItem) newListCmd() *cobra.Command { return cmd } -func compInstalledItems(itemType string, args []string, toComplete string, cfg configGetter) ([]string, cobra.ShellCompDirective) { +func compInstalledItems(itemType string, _ []string, toComplete string, cfg configGetter) ([]string, cobra.ShellCompDirective) { hub, err := require.Hub(cfg(), nil) if err != nil { return nil, cobra.ShellCompDirectiveDefault From 201aebaac29b9c95d12f93a673c8351243fd92a7 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Fri, 2 May 2025 00:12:55 +0200 Subject: [PATCH 510/581] cscli inspect: don't show metrics or converted rules if an item is not installed (#3602) --- cmd/crowdsec-cli/cliitem/cmdinspect.go | 8 +++++--- cmd/crowdsec-cli/cliitem/hubappsec.go | 20 +++++++++++++------- cmd/crowdsec-cli/cliitem/hubcollection.go | 5 +++-- cmd/crowdsec-cli/cliitem/hubparser.go | 5 +++-- cmd/crowdsec-cli/cliitem/hubscenario.go | 5 +++-- test/bats/cscli-hubtype-inspect.bats | 21 ++++++++++++++------- 6 files changed, 41 insertions(+), 23 deletions(-) diff --git a/cmd/crowdsec-cli/cliitem/cmdinspect.go b/cmd/crowdsec-cli/cliitem/cmdinspect.go index bb676fd22a4..349347b0d88 100644 --- a/cmd/crowdsec-cli/cliitem/cmdinspect.go +++ b/cmd/crowdsec-cli/cliitem/cmdinspect.go @@ -56,12 +56,14 @@ func (cli *cliItem) inspect(ctx context.Context, args []string, url string, diff continue } - if err = inspectItem(hub, item, !noMetrics, cfg.Cscli.Output, cfg.Cscli.PrometheusUrl, cfg.Cscli.Color); err != nil { + wantMetrics := !noMetrics && item.State.IsInstalled() + + if err := inspectItem(hub, item, wantMetrics, cfg.Cscli.Output, cfg.Cscli.PrometheusUrl, cfg.Cscli.Color); err != nil { return err } if cli.inspectDetail != nil { - if err = cli.inspectDetail(item); err != nil { + if err := cli.inspectDetail(item); err != nil { return err } } @@ -71,7 +73,7 @@ func (cli *cliItem) inspect(ctx context.Context, args []string, url string, diff } // return the diff between the installed version and the latest version -func (cli *cliItem) itemDiff(ctx context.Context, item *cwhub.Item, contentProvider cwhub.ContentProvider, reverse bool) (string, error) { +func (*cliItem) itemDiff(ctx context.Context, item *cwhub.Item, contentProvider cwhub.ContentProvider, reverse bool) (string, error) { if !item.State.IsInstalled() { return "", fmt.Errorf("'%s' is not installed", item.FQName()) } diff --git a/cmd/crowdsec-cli/cliitem/hubappsec.go b/cmd/crowdsec-cli/cliitem/hubappsec.go index 1c1875d2f2c..ee5e83df995 100644 --- a/cmd/crowdsec-cli/cliitem/hubappsec.go +++ b/cmd/crowdsec-cli/cliitem/hubappsec.go @@ -85,10 +85,11 @@ cscli appsec-configs upgrade crowdsecurity/virtual-patching -i cscli appsec-configs upgrade crowdsecurity/virtual-patching --interactive`, }, inspectHelp: cliHelp{ - example: `# Display metadata, state, metrics and ancestor collections of appsec-configs (installed or not). + example: `# Display metadata, state, ancestor collections of appsec-configs (installed or not). cscli appsec-configs inspect crowdsecurity/virtual-patching -# Don't collect metrics (avoid error if crowdsec is not running). +# If the config is installed, its metrics are collected and shown as well (with an error if crowdsec is not running). +# To avoid this, use --no-metrics. cscli appsec-configs inspect crowdsecurity/virtual-patching --no-metrics # Display difference between a tainted item and the latest one. @@ -119,6 +120,10 @@ func NewAppsecRule(cfg configGetter) *cliItem { appsecRule := appsec.AppsecCollectionConfig{} + if item.State.LocalPath == "" { + return nil + } + yamlContent, err := os.ReadFile(item.State.LocalPath) if err != nil { return fmt.Errorf("unable to read file %s: %w", item.State.LocalPath, err) @@ -129,7 +134,7 @@ func NewAppsecRule(cfg configGetter) *cliItem { } for _, ruleType := range appsec_rule.SupportedTypes() { - fmt.Printf("\n%s format:\n", cases.Title(language.Und, cases.NoLower).String(ruleType)) + fmt.Fprintf(os.Stdout, "\n%s format:\n", cases.Title(language.Und, cases.NoLower).String(ruleType)) for _, rule := range appsecRule.Rules { convertedRule, _, err := rule.Convert(ruleType, appsecRule.Name) @@ -137,13 +142,13 @@ func NewAppsecRule(cfg configGetter) *cliItem { return fmt.Errorf("unable to convert rule %s: %w", rule.Name, err) } - fmt.Println(convertedRule) + fmt.Fprintln(os.Stdout, convertedRule) } switch ruleType { //nolint:gocritic case appsec_rule.ModsecurityRuleType: for _, rule := range appsecRule.SecLangRules { - fmt.Println(rule) + fmt.Fprintln(os.Stdout, rule) } } } @@ -222,10 +227,11 @@ cscli appsec-rules upgrade crowdsecurity/crs -i cscli appsec-rules upgrade crowdsecurity/crs --interactive`, }, inspectHelp: cliHelp{ - example: `# Display metadata, state, metrics and ancestor collections of appsec-rules (installed or not). + example: `# Display metadata, state, ancestor collections of appsec-rules (installed or not). cscli appsec-rules inspect crowdsecurity/crs -# Don't collect metrics (avoid error if crowdsec is not running). +# If the rule is installed, its metrics are collected and shown as well (with an error if crowdsec is not running). +# To avoid this, use --no-metrics. cscli appsec-configs inspect crowdsecurity/crs --no-metrics # Display difference between a tainted item and the latest one. diff --git a/cmd/crowdsec-cli/cliitem/hubcollection.go b/cmd/crowdsec-cli/cliitem/hubcollection.go index c0e590ce5dd..cc2740ef321 100644 --- a/cmd/crowdsec-cli/cliitem/hubcollection.go +++ b/cmd/crowdsec-cli/cliitem/hubcollection.go @@ -76,10 +76,11 @@ cscli collections upgrade crowdsecurity/http-cve crowdsecurity/iptables -i cscli collections upgrade crowdsecurity/http-cve crowdsecurity/iptables --interactive`, }, inspectHelp: cliHelp{ - example: `# Display metadata, state, metrics and dependencies of collections (installed or not). + example: `# Display metadata, state, and dependencies of collections (installed or not). cscli collections inspect crowdsecurity/http-cve crowdsecurity/iptables -# Don't collect metrics (avoid error if crowdsec is not running). +# If the collection is installed, its metrics are collected and shown as well (with an error if crowdsec is not running). +# To avoid this, use --no-metrics. cscli collections inspect crowdsecurity/http-cve crowdsecurity/iptables --no-metrics # Display difference between a tainted item and the latest one, or the reason for the taint if it's a dependency. diff --git a/cmd/crowdsec-cli/cliitem/hubparser.go b/cmd/crowdsec-cli/cliitem/hubparser.go index 79491baa705..6aa7aa3dfa4 100644 --- a/cmd/crowdsec-cli/cliitem/hubparser.go +++ b/cmd/crowdsec-cli/cliitem/hubparser.go @@ -76,10 +76,11 @@ cscli parsers upgrade crowdsecurity/caddy-logs crowdsecurity/sshd-logs -i cscli parsers upgrade crowdsecurity/caddy-logs crowdsecurity/sshd-logs --interactive`, }, inspectHelp: cliHelp{ - example: `# Display metadata, state, metrics and ancestor collections of parsers (installed or not). + example: `# Display metadata, state and ancestor collections of parsers (installed or not). cscli parsers inspect crowdsecurity/httpd-logs crowdsecurity/sshd-logs -# Don't collect metrics (avoid error if crowdsec is not running). +# If the parser is installed, its metrics are collected and shown as well (with an error if crowdsec is not running). +# To avoid this, use --no-metrics. cscli parsers inspect crowdsecurity/httpd-logs --no-metrics # Display difference between a tainted item and the latest one. diff --git a/cmd/crowdsec-cli/cliitem/hubscenario.go b/cmd/crowdsec-cli/cliitem/hubscenario.go index ae56e16ccff..f2b93a836fd 100644 --- a/cmd/crowdsec-cli/cliitem/hubscenario.go +++ b/cmd/crowdsec-cli/cliitem/hubscenario.go @@ -76,10 +76,11 @@ cscli scenarios upgrade crowdsecurity/ssh-bf crowdsecurity/http-probing -i cscli scenarios upgrade crowdsecurity/ssh-bf crowdsecurity/http-probing --interactive`, }, inspectHelp: cliHelp{ - example: `# Display metadata, state, metrics and ancestor collections of scenarios (installed or not). + example: `# Display metadata, state and ancestor collections of scenarios (installed or not). cscli scenarios inspect crowdsecurity/ssh-bf crowdsecurity/http-probing -# Don't collect metrics (avoid error if crowdsec is not running). +# If the scenario is installed, its metrics are collected and shown as well (with an error if crowdsec is not running). +# To avoid this, use --no-metrics. cscli scenarios inspect crowdsecurity/ssh-bf --no-metrics # Display difference between a tainted item and the latest one. diff --git a/test/bats/cscli-hubtype-inspect.bats b/test/bats/cscli-hubtype-inspect.bats index 9c96aadb3ad..7b18503654d 100644 --- a/test/bats/cscli-hubtype-inspect.bats +++ b/test/bats/cscli-hubtype-inspect.bats @@ -42,29 +42,34 @@ teardown() { rune -1 cscli parsers inspect blahblah/blahblah assert_stderr --partial "can't find 'blahblah/blahblah' in parsers" - # one item - rune -0 cscli parsers inspect crowdsecurity/sshd-logs --no-metrics + # one item. if it's not installed, metrics won't be read. + rune -0 cscli parsers inspect crowdsecurity/sshd-logs assert_line 'type: parsers' assert_line 'name: crowdsecurity/sshd-logs' assert_line 'path: parsers/s01-parse/crowdsecurity/sshd-logs.yaml' assert_line 'installed: false' - refute_line --partial 'Current metrics:' + refute_output --partial 'Current metrics:' + + rune -0 cscli parsers install crowdsecurity/sshd-logs + + rune -0 cscli parsers inspect crowdsecurity/sshd-logs --no-metrics + refute_output --partial 'Current metrics:' # one item, with metrics rune -0 cscli parsers inspect crowdsecurity/sshd-logs - assert_line --partial 'Current metrics:' + assert_output --partial 'Current metrics:' # one item, json rune -0 cscli parsers inspect crowdsecurity/sshd-logs -o json rune -0 jq -c '[.type, .name, .path, .installed]' <(output) - assert_json '["parsers","crowdsecurity/sshd-logs","parsers/s01-parse/crowdsecurity/sshd-logs.yaml",false]' + assert_json '["parsers","crowdsecurity/sshd-logs","parsers/s01-parse/crowdsecurity/sshd-logs.yaml",true]' # one item, raw rune -0 cscli parsers inspect crowdsecurity/sshd-logs -o raw assert_line 'type: parsers' assert_line 'name: crowdsecurity/sshd-logs' assert_line 'path: parsers/s01-parse/crowdsecurity/sshd-logs.yaml' - assert_line 'installed: false' + assert_line 'installed: true' refute_line --partial 'Current metrics:' # multiple items @@ -74,6 +79,8 @@ teardown() { rune -1 grep -c 'Current metrics:' <(output) assert_output "0" + rune -0 cscli parsers install crowdsecurity/whitelists + # multiple items, with metrics rune -0 cscli parsers inspect crowdsecurity/sshd-logs crowdsecurity/whitelists rune -0 grep -c 'Current metrics:' <(output) @@ -82,7 +89,7 @@ teardown() { # multiple items, json rune -0 cscli parsers inspect crowdsecurity/sshd-logs crowdsecurity/whitelists -o json rune -0 jq -sc '[.[] | [.type, .name, .path, .installed]]' <(output) - assert_json '[["parsers","crowdsecurity/sshd-logs","parsers/s01-parse/crowdsecurity/sshd-logs.yaml",false],["parsers","crowdsecurity/whitelists","parsers/s02-enrich/crowdsecurity/whitelists.yaml",false]]' + assert_json '[["parsers","crowdsecurity/sshd-logs","parsers/s01-parse/crowdsecurity/sshd-logs.yaml",true],["parsers","crowdsecurity/whitelists","parsers/s02-enrich/crowdsecurity/whitelists.yaml",true]]' # multiple items, raw rune -0 cscli parsers inspect crowdsecurity/sshd-logs crowdsecurity/whitelists -o raw From d10067e77213c9c7d36e753733485549d43d4b91 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Fri, 2 May 2025 14:12:00 +0200 Subject: [PATCH 511/581] refactor pkg/database/Client.createAlertChunk() (#3585) --- .golangci.yml | 2 +- pkg/database/alerts.go | 337 +++++++++++++++++++++++------------------ 2 files changed, 189 insertions(+), 150 deletions(-) diff --git a/.golangci.yml b/.golangci.yml index 93ed9b196f2..c6dac451fec 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -218,7 +218,7 @@ linters: - name: cyclomatic arguments: # lower this after refactoring - - 39 + - 38 - name: defer disabled: true - name: empty-block diff --git a/pkg/database/alerts.go b/pkg/database/alerts.go index 025cf063083..6f27b5afed9 100644 --- a/pkg/database/alerts.go +++ b/pkg/database/alerts.go @@ -426,160 +426,238 @@ func (c *Client) createDecisionChunk(ctx context.Context, simulated bool, stopAt return ret, nil } -func (c *Client) createAlertChunk(ctx context.Context, machineID string, owner *ent.Machine, alerts []*models.Alert) ([]string, error) { - alertBuilders := []*ent.AlertCreate{} - alertDecisions := [][]*ent.Decision{} +func parseAlertTimes(alert *models.Alert, logger log.FieldLogger) (time.Time, time.Time) { + now := time.Now().UTC() - for _, alertItem := range alerts { - var ( - metas []*ent.Meta - events []*ent.Event - ) + start, err := time.Parse(time.RFC3339, *alert.StartAt) + if err != nil { + logger.Errorf("creating alert: Failed to parse startAtTime '%s', defaulting to now: %s", *alert.StartAt, err) - startAtTime, err := time.Parse(time.RFC3339, *alertItem.StartAt) - if err != nil { - c.Log.Errorf("creating alert: Failed to parse startAtTime '%s', defaulting to now: %s", *alertItem.StartAt, err) + start = now + } - startAtTime = time.Now().UTC() - } + stop, err := time.Parse(time.RFC3339, *alert.StopAt) + if err != nil { + logger.Errorf("creating alert: Failed to parse stopAtTime '%s', defaulting to now: %s", *alert.StopAt, err) + + stop = now + } - stopAtTime, err := time.Parse(time.RFC3339, *alertItem.StopAt) + return start, stop +} + +func buildEventCreates(ctx context.Context, logger log.FieldLogger, client *ent.Client, machineID string, alertItem *models.Alert) ([]*ent.Event, error) { + // let's track when we strip or drop data, notify outside of loop to avoid spam + stripped := false + dropped := false + + if len(alertItem.Events) == 0 { + return nil, nil + } + + eventBulk := make([]*ent.EventCreate, len(alertItem.Events)) + + for i, eventItem := range alertItem.Events { + ts, err := time.Parse(time.RFC3339, *eventItem.Timestamp) if err != nil { - c.Log.Errorf("creating alert: Failed to parse stopAtTime '%s', defaulting to now: %s", *alertItem.StopAt, err) + logger.Errorf("creating alert: Failed to parse event timestamp '%s', defaulting to now: %s", *eventItem.Timestamp, err) - stopAtTime = time.Now().UTC() + ts = time.Now().UTC() } - /*display proper alert in logs*/ - for _, disp := range alertItem.FormatAsStrings(machineID, log.StandardLogger()) { - c.Log.Info(disp) + marshallMetas, err := json.Marshal(eventItem.Meta) + if err != nil { + return nil, errors.Wrapf(MarshalFail, "event meta '%v' : %s", eventItem.Meta, err) } - // let's track when we strip or drop data, notify outside of loop to avoid spam - stripped := false - dropped := false - - if len(alertItem.Events) > 0 { - eventBulk := make([]*ent.EventCreate, len(alertItem.Events)) + // the serialized field is too big, let's try to progressively strip it + if event.SerializedValidator(string(marshallMetas)) != nil { + stripped = true - for i, eventItem := range alertItem.Events { - ts, err := time.Parse(time.RFC3339, *eventItem.Timestamp) - if err != nil { - c.Log.Errorf("creating alert: Failed to parse event timestamp '%s', defaulting to now: %s", *eventItem.Timestamp, err) + valid := false + stripSize := 2048 - ts = time.Now().UTC() + for !valid && stripSize > 0 { + for _, serializedItem := range eventItem.Meta { + if len(serializedItem.Value) > stripSize*2 { + serializedItem.Value = serializedItem.Value[:stripSize] + "" + } } - marshallMetas, err := json.Marshal(eventItem.Meta) + marshallMetas, err = json.Marshal(eventItem.Meta) if err != nil { return nil, errors.Wrapf(MarshalFail, "event meta '%v' : %s", eventItem.Meta, err) } - // the serialized field is too big, let's try to progressively strip it - if event.SerializedValidator(string(marshallMetas)) != nil { - stripped = true + if event.SerializedValidator(string(marshallMetas)) == nil { + valid = true + } + + stripSize /= 2 + } + + // nothing worked, drop it + if !valid { + dropped = true + stripped = false + marshallMetas = []byte("") + } + } + + eventBulk[i] = client.Event.Create(). + SetTime(ts). + SetSerialized(string(marshallMetas)) + } - valid := false - stripSize := 2048 + if stripped { + logger.Warningf("stripped 'serialized' field (machine %s / scenario %s)", machineID, *alertItem.Scenario) + } - for !valid && stripSize > 0 { - for _, serializedItem := range eventItem.Meta { - if len(serializedItem.Value) > stripSize*2 { - serializedItem.Value = serializedItem.Value[:stripSize] + "" - } - } + if dropped { + logger.Warningf("dropped 'serialized' field (machine %s / scenario %s)", machineID, *alertItem.Scenario) + } - marshallMetas, err = json.Marshal(eventItem.Meta) - if err != nil { - return nil, errors.Wrapf(MarshalFail, "event meta '%v' : %s", eventItem.Meta, err) - } + return client.Event.CreateBulk(eventBulk...).Save(ctx) +} - if event.SerializedValidator(string(marshallMetas)) == nil { - valid = true - } +func buildMetaCreates(ctx context.Context, logger log.FieldLogger, client *ent.Client, alertItem *models.Alert) ([]*ent.Meta, error) { + if len(alertItem.Meta) == 0 { + return nil, nil + } - stripSize /= 2 - } + metaBulk := make([]*ent.MetaCreate, len(alertItem.Meta)) - // nothing worked, drop it - if !valid { - dropped = true - stripped = false - marshallMetas = []byte("") - } - } + for i, metaItem := range alertItem.Meta { + key := metaItem.Key + value := metaItem.Value - eventBulk[i] = c.Ent.Event.Create(). - SetTime(ts). - SetSerialized(string(marshallMetas)) - } + if len(metaItem.Value) > 4095 { + logger.Warningf("truncated meta %s: value too long", metaItem.Key) - if stripped { - c.Log.Warningf("stripped 'serialized' field (machine %s / scenario %s)", machineID, *alertItem.Scenario) - } + value = value[:4095] + } - if dropped { - c.Log.Warningf("dropped 'serialized' field (machine %s / scenario %s)", machineID, *alertItem.Scenario) - } + if len(metaItem.Key) > 255 { + logger.Warningf("truncated meta %s: key too long", metaItem.Key) - events, err = c.Ent.Event.CreateBulk(eventBulk...).Save(ctx) - if err != nil { - return nil, errors.Wrapf(BulkError, "creating alert events: %s", err) - } + key = key[:255] } - if len(alertItem.Meta) > 0 { - metaBulk := make([]*ent.MetaCreate, len(alertItem.Meta)) + metaBulk[i] = client.Meta.Create(). + SetKey(key). + SetValue(value) + } - for i, metaItem := range alertItem.Meta { - key := metaItem.Key - value := metaItem.Value + return client.Meta.CreateBulk(metaBulk...).Save(ctx) +} - if len(metaItem.Value) > 4095 { - c.Log.Warningf("truncated meta %s: value too long", metaItem.Key) +func buildDecisions(ctx context.Context, logger log.FieldLogger, client *Client, alertItem *models.Alert, stopAtTime time.Time) ([]*ent.Decision, int, error) { + decisions := []*ent.Decision{} - value = value[:4095] - } + decisionChunks := slicetools.Chunks(alertItem.Decisions, client.decisionBulkSize) + for _, decisionChunk := range decisionChunks { + decisionRet, err := client.createDecisionChunk(ctx, *alertItem.Simulated, stopAtTime, decisionChunk) + if err != nil { + return nil, 0, fmt.Errorf("creating alert decisions: %w", err) + } - if len(metaItem.Key) > 255 { - c.Log.Warningf("truncated meta %s: key too long", metaItem.Key) + decisions = append(decisions, decisionRet...) + } - key = key[:255] - } + discarded := len(alertItem.Decisions) - len(decisions) + if discarded > 0 { + logger.Warningf("discarded %d decisions for %s", discarded, alertItem.UUID) + } - metaBulk[i] = c.Ent.Meta.Create(). - SetKey(key). - SetValue(value) - } + return decisions, discarded, nil +} - metas, err = c.Ent.Meta.CreateBulk(metaBulk...).Save(ctx) - if err != nil { - c.Log.Warningf("error creating alert meta: %s", err) - } +func retryOnBusy(fn func() error) error { + for retry := range maxLockRetries { + err := fn() + if err == nil { + return nil + } + var sqliteErr sqlite3.Error + if errors.As(err, &sqliteErr) && sqliteErr.Code == sqlite3.ErrBusy { + // sqlite3.Error{ + // Code: 5, + // ExtendedCode: 5, + // SystemErrno: 0, + // err: "database is locked", + // } + log.Warningf("while updating decisions, sqlite3.ErrBusy: %s, retry %d of %d", err, retry, maxLockRetries) + time.Sleep(1 * time.Second) + + continue } - decisions := []*ent.Decision{} + return err + } - decisionChunks := slicetools.Chunks(alertItem.Decisions, c.decisionBulkSize) - for _, decisionChunk := range decisionChunks { - decisionRet, err := c.createDecisionChunk(ctx, *alertItem.Simulated, stopAtTime, decisionChunk) - if err != nil { - return nil, fmt.Errorf("creating alert decisions: %w", err) + return fmt.Errorf("exceeded %d busy retries", maxLockRetries) +} + +func saveAlerts(ctx context.Context, c *Client, alertBuilders []*ent.AlertCreate, alertDecisions [][]*ent.Decision) ([]string, error) { + alertsCreateBulk, err := c.Ent.Alert.CreateBulk(alertBuilders...).Save(ctx) + if err != nil { + return nil, errors.Wrapf(BulkError, "bulk creating alert : %s", err) + } + + ret := make([]string, len(alertsCreateBulk)) + for i, a := range alertsCreateBulk { + ret[i] = strconv.Itoa(a.ID) + + d := alertDecisions[i] + decisionsChunk := slicetools.Chunks(d, c.decisionBulkSize) + + for _, d2 := range decisionsChunk { + if err := retryOnBusy(func() error { + _, err := c.Ent.Alert.Update().Where(alert.IDEQ(a.ID)).AddDecisions(d2...).Save(ctx) + return err + }); err != nil { + return nil, fmt.Errorf("attach decisions to alert %d: %w", a.ID, err) } + } + } + + return ret, nil +} + +func (c *Client) createAlertChunk(ctx context.Context, machineID string, owner *ent.Machine, alerts []*models.Alert) ([]string, error) { + alertBuilders := []*ent.AlertCreate{} + alertDecisions := [][]*ent.Decision{} + + for _, alertItem := range alerts { + var err error - decisions = append(decisions, decisionRet...) + startAtTime, stopAtTime := parseAlertTimes(alertItem, c.Log) + + /*display proper alert in logs*/ + for _, disp := range alertItem.FormatAsStrings(machineID, log.StandardLogger()) { + c.Log.Info(disp) } - discarded := len(alertItem.Decisions) - len(decisions) - if discarded > 0 { - c.Log.Warningf("discarded %d decisions for %s", discarded, alertItem.UUID) + events, err := buildEventCreates(ctx, c.Log, c.Ent, machineID, alertItem) + if err != nil { + return nil, fmt.Errorf("building events for alert %s: %w", alertItem.UUID, err) + } + + metas, err := buildMetaCreates(ctx, c.Log, c.Ent, alertItem) + if err != nil { + c.Log.Warningf("error creating alert meta: %s", err) + } + + decisions, discardCount, err := buildDecisions(ctx, c.Log, c, alertItem, stopAtTime) + if err != nil { + return nil, fmt.Errorf("building decisions for alert %s: %w", alertItem.UUID, err) } // if all decisions were discarded, discard the alert too - if discarded > 0 && len(decisions) == 0 { - c.Log.Warningf("dropping alert %s with invalid decisions", alertItem.UUID) + if discardCount > 0 && len(decisions) == 0 { + c.Log.Warningf("dropping alert %s: all decisions invalid", alertItem.UUID) continue - } + } alertBuilder := c.Ent.Alert. Create(). @@ -620,51 +698,12 @@ func (c *Client) createAlertChunk(ctx context.Context, machineID string, owner * return nil, nil } - alertsCreateBulk, err := c.Ent.Alert.CreateBulk(alertBuilders...).Save(ctx) + // Save alerts, then attach decisions with retry logic + ids, err := saveAlerts(ctx, c, alertBuilders, alertDecisions) if err != nil { - return nil, errors.Wrapf(BulkError, "bulk creating alert : %s", err) - } - - ret := make([]string, len(alertsCreateBulk)) - for i, a := range alertsCreateBulk { - ret[i] = strconv.Itoa(a.ID) - - d := alertDecisions[i] - decisionsChunk := slicetools.Chunks(d, c.decisionBulkSize) - - for _, d2 := range decisionsChunk { - retry := 0 - - for retry < maxLockRetries { - // so much for the happy path... but sqlite3 errors work differently - _, err := c.Ent.Alert.Update().Where(alert.IDEQ(a.ID)).AddDecisions(d2...).Save(ctx) - if err == nil { - break - } - - var sqliteErr sqlite3.Error - if errors.As(err, &sqliteErr) { - if sqliteErr.Code == sqlite3.ErrBusy { - // sqlite3.Error{ - // Code: 5, - // ExtendedCode: 5, - // SystemErrno: 0, - // err: "database is locked", - // } - retry++ - log.Warningf("while updating decisions, sqlite3.ErrBusy: %s, retry %d of %d", err, retry, maxLockRetries) - time.Sleep(1 * time.Second) - - continue - } - } - - return nil, fmt.Errorf("error while updating decisions: %w", err) - } - } + return nil, err } - - return ret, nil + return ids, nil } func (c *Client) CreateAlert(ctx context.Context, machineID string, alertList []*models.Alert) ([]string, error) { From f8f0b2a211f05927669ccadaac56ff855dad59ae Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Mon, 5 May 2025 15:12:29 +0200 Subject: [PATCH 512/581] improve support for parsing time durations with 'day' units (#3599) * custom duration type for "cscli decisions list", "cscli alerts list" * custom duration type for "cscli allowlist add" * custom duration type for "cscli machines prune" * custom duration type for "cscli bouncers prune" * replace old function ParseDuration * use custom duration type in expr helpers * update dependency * lint * test fix * support days in 'metrics_max_age' * DurationWithDays for 'max_age' --- cmd/crowdsec-cli/clialert/alerts.go | 49 ++++-------------- cmd/crowdsec-cli/cliallowlists/allowlists.go | 17 ++---- cmd/crowdsec-cli/clibouncer/prune.go | 15 +++--- cmd/crowdsec-cli/clidecision/decisions.go | 38 +++----------- cmd/crowdsec-cli/climachine/prune.go | 11 ++-- go.mod | 10 ++-- go.sum | 15 +++--- pkg/apiclient/alerts_service.go | 54 ++++++++++---------- pkg/apiserver/apiserver_test.go | 17 +++--- pkg/csconfig/database.go | 9 ++-- pkg/csprofiles/csprofiles.go | 9 ++-- pkg/database/alertfilter.go | 6 ++- pkg/database/alerts.go | 3 +- pkg/database/flush.go | 33 +++++------- pkg/database/utils.go | 28 ---------- pkg/exprhelpers/helpers.go | 4 +- test/bats/10_bouncers.bats | 7 +++ test/bats/30_machines.bats | 7 +++ test/bats/80_alerts.bats | 9 ++++ test/bats/90_decisions.bats | 8 ++- test/bats/cscli-allowlists.bats | 6 +-- 21 files changed, 153 insertions(+), 202 deletions(-) diff --git a/cmd/crowdsec-cli/clialert/alerts.go b/cmd/crowdsec-cli/clialert/alerts.go index 3e69acfec04..88e870ee768 100644 --- a/cmd/crowdsec-cli/clialert/alerts.go +++ b/cmd/crowdsec-cli/clialert/alerts.go @@ -12,6 +12,7 @@ import ( "strconv" "strings" "text/template" + "time" "github.com/fatih/color" "github.com/go-openapi/strfmt" @@ -19,6 +20,7 @@ import ( "github.com/spf13/cobra" "gopkg.in/yaml.v3" + "github.com/crowdsecurity/go-cs-lib/cstime" "github.com/crowdsecurity/go-cs-lib/maptools" "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/args" @@ -247,34 +249,6 @@ func (cli *cliAlerts) list(ctx context.Context, alertListFilter apiclient.Alerts alertListFilter.Limit = limit } - if *alertListFilter.Until == "" { - alertListFilter.Until = nil - } else if strings.HasSuffix(*alertListFilter.Until, "d") { - /*time.ParseDuration support hours 'h' as bigger unit, let's make the user's life easier*/ - realDuration := strings.TrimSuffix(*alertListFilter.Until, "d") - - days, err := strconv.Atoi(realDuration) - if err != nil { - return fmt.Errorf("can't parse duration %s, valid durations format: 1d, 4h, 4h15m", *alertListFilter.Until) - } - - *alertListFilter.Until = fmt.Sprintf("%d%s", days*24, "h") - } - - if *alertListFilter.Since == "" { - alertListFilter.Since = nil - } else if strings.HasSuffix(*alertListFilter.Since, "d") { - // time.ParseDuration support hours 'h' as bigger unit, let's make the user's life easier - realDuration := strings.TrimSuffix(*alertListFilter.Since, "d") - - days, err := strconv.Atoi(realDuration) - if err != nil { - return fmt.Errorf("can't parse duration %s, valid durations format: 1d, 4h, 4h15m", *alertListFilter.Since) - } - - *alertListFilter.Since = fmt.Sprintf("%d%s", days*24, "h") - } - if *alertListFilter.IncludeCAPI { *alertListFilter.Limit = 0 } @@ -330,8 +304,8 @@ func (cli *cliAlerts) newListCmd() *cobra.Command { ScenarioEquals: new(string), IPEquals: new(string), RangeEquals: new(string), - Since: new(string), - Until: new(string), + Since: cstime.DurationWithDays(0), + Until: cstime.DurationWithDays(0), TypeEquals: new(string), IncludeCAPI: new(bool), OriginEquals: new(string), @@ -362,8 +336,8 @@ cscli alerts list --type ban`, flags := cmd.Flags() flags.SortFlags = false flags.BoolVarP(alertListFilter.IncludeCAPI, "all", "a", false, "Include decisions from Central API") - flags.StringVar(alertListFilter.Until, "until", "", "restrict to alerts older than until (ie. 4h, 30d)") - flags.StringVar(alertListFilter.Since, "since", "", "restrict to alerts newer than since (ie. 4h, 30d)") + flags.Var(&alertListFilter.Until, "until", "restrict to alerts older than until (ie. 4h, 30d)") + flags.Var(&alertListFilter.Since, "since", "restrict to alerts newer than since (ie. 4h, 30d)") flags.StringVarP(alertListFilter.IPEquals, "ip", "i", "", "restrict to alerts from this source ip (shorthand for --scope ip --value )") flags.StringVarP(alertListFilter.ScenarioEquals, "scenario", "s", "", "the scenario (ie. crowdsecurity/ssh-bf)") flags.StringVarP(alertListFilter.RangeEquals, "range", "r", "", "restrict to alerts from this range (shorthand for --scope range --value )") @@ -560,10 +534,9 @@ func (cli *cliAlerts) newInspectCmd() *cobra.Command { } func (cli *cliAlerts) newFlushCmd() *cobra.Command { - var ( - maxItems int - maxAge string - ) + var maxItems int + + maxAge := cstime.DurationWithDays(7*24*time.Hour) cmd := &cobra.Command{ Use: `flush`, @@ -584,7 +557,7 @@ func (cli *cliAlerts) newFlushCmd() *cobra.Command { return err } log.Info("Flushing alerts. !! This may take a long time !!") - err = db.FlushAlerts(ctx, maxAge, maxItems) + err = db.FlushAlerts(ctx, time.Duration(maxAge), maxItems) if err != nil { return fmt.Errorf("unable to flush alerts: %w", err) } @@ -596,7 +569,7 @@ func (cli *cliAlerts) newFlushCmd() *cobra.Command { cmd.Flags().SortFlags = false cmd.Flags().IntVar(&maxItems, "max-items", 5000, "Maximum number of alert items to keep in the database") - cmd.Flags().StringVar(&maxAge, "max-age", "7d", "Maximum age of alert items to keep in the database") + cmd.Flags().Var(&maxAge, "max-age", "Maximum age of alert items to keep in the database") return cmd } diff --git a/cmd/crowdsec-cli/cliallowlists/allowlists.go b/cmd/crowdsec-cli/cliallowlists/allowlists.go index be4b966be9f..972ee1b4704 100644 --- a/cmd/crowdsec-cli/cliallowlists/allowlists.go +++ b/cmd/crowdsec-cli/cliallowlists/allowlists.go @@ -399,8 +399,8 @@ func (cli *cliAllowLists) delete(ctx context.Context, db *database.Client, name func (cli *cliAllowLists) newAddCmd() *cobra.Command { var ( - expirationStr string - comment string + expiration cstime.DurationWithDays + comment string ) cmd := &cobra.Command{ @@ -424,25 +424,16 @@ func (cli *cliAllowLists) newAddCmd() *cobra.Command { return err } - var expiration time.Duration - - if expirationStr != "" { - expiration, err = cstime.ParseDuration(expirationStr) - if err != nil { - return err - } - } - name := args[0] values := args[1:] - return cli.add(ctx, db, name, values, expiration, comment) + return cli.add(ctx, db, name, values, time.Duration(expiration), comment) }, } flags := cmd.Flags() - flags.StringVarP(&expirationStr, "expiration", "e", "", "expiration duration") + flags.VarP(&expiration, "expiration", "e", "expiration duration") flags.StringVarP(&comment, "comment", "d", "", "comment for the value") return cmd diff --git a/cmd/crowdsec-cli/clibouncer/prune.go b/cmd/crowdsec-cli/clibouncer/prune.go index 3c27efe394d..cf70e63f485 100644 --- a/cmd/crowdsec-cli/clibouncer/prune.go +++ b/cmd/crowdsec-cli/clibouncer/prune.go @@ -9,10 +9,14 @@ import ( "github.com/fatih/color" "github.com/spf13/cobra" + "github.com/crowdsecurity/go-cs-lib/cstime" + "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/args" "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/ask" ) +const defaultPruneDuration = 60 * time.Minute + func (cli *cliBouncers) prune(ctx context.Context, duration time.Duration, force bool) error { if duration < 2*time.Minute { if yes, err := ask.YesNo( @@ -59,12 +63,9 @@ func (cli *cliBouncers) prune(ctx context.Context, duration time.Duration, force } func (cli *cliBouncers) newPruneCmd() *cobra.Command { - var ( - duration time.Duration - force bool - ) + var force bool - const defaultDuration = 60 * time.Minute + duration := cstime.DurationWithDays(defaultPruneDuration) cmd := &cobra.Command{ Use: "prune", @@ -74,12 +75,12 @@ func (cli *cliBouncers) newPruneCmd() *cobra.Command { Example: `cscli bouncers prune -d 45m cscli bouncers prune -d 45m --force`, RunE: func(cmd *cobra.Command, _ []string) error { - return cli.prune(cmd.Context(), duration, force) + return cli.prune(cmd.Context(), time.Duration(duration), force) }, } flags := cmd.Flags() - flags.DurationVarP(&duration, "duration", "d", defaultDuration, "duration of time since last pull") + flags.VarP(&duration, "duration", "d", "duration of time since last pull") flags.BoolVar(&force, "force", false, "force prune without asking for confirmation") return cmd diff --git a/cmd/crowdsec-cli/clidecision/decisions.go b/cmd/crowdsec-cli/clidecision/decisions.go index 91f39f421e5..c141f128791 100644 --- a/cmd/crowdsec-cli/clidecision/decisions.go +++ b/cmd/crowdsec-cli/clidecision/decisions.go @@ -23,6 +23,8 @@ import ( "github.com/crowdsecurity/crowdsec/pkg/csconfig" "github.com/crowdsecurity/crowdsec/pkg/models" "github.com/crowdsecurity/crowdsec/pkg/types" + + "github.com/crowdsecurity/go-cs-lib/cstime" ) type configGetter func() *csconfig.Config @@ -184,34 +186,8 @@ func (cli *cliDecisions) list(ctx context.Context, filter apiclient.AlertsListOp if noSimu != nil && *noSimu { filter.IncludeSimulated = new(bool) } - /* nullify the empty entries to avoid bad filter */ - if *filter.Until == "" { - filter.Until = nil - } else if strings.HasSuffix(*filter.Until, "d") { - /*time.ParseDuration support hours 'h' as bigger unit, let's make the user's life easier*/ - realDuration := strings.TrimSuffix(*filter.Until, "d") - - days, err := strconv.Atoi(realDuration) - if err != nil { - return fmt.Errorf("can't parse duration %s, valid durations format: 1d, 4h, 4h15m", *filter.Until) - } - - *filter.Until = fmt.Sprintf("%d%s", days*24, "h") - } - if *filter.Since == "" { - filter.Since = nil - } else if strings.HasSuffix(*filter.Since, "d") { - /*time.ParseDuration support hours 'h' as bigger unit, let's make the user's life easier*/ - realDuration := strings.TrimSuffix(*filter.Since, "d") - - days, err := strconv.Atoi(realDuration) - if err != nil { - return fmt.Errorf("can't parse duration %s, valid durations format: 1d, 4h, 4h15m", *filter.Since) - } - - *filter.Since = fmt.Sprintf("%d%s", days*24, "h") - } + /* nullify the empty entries to avoid bad filter */ if *filter.IncludeCAPI { *filter.Limit = 0 @@ -270,8 +246,8 @@ func (cli *cliDecisions) newListCmd() *cobra.Command { OriginEquals: new(string), IPEquals: new(string), RangeEquals: new(string), - Since: new(string), - Until: new(string), + Since: cstime.DurationWithDays(0), + Until: cstime.DurationWithDays(0), TypeEquals: new(string), IncludeCAPI: new(bool), Limit: new(int), @@ -300,8 +276,8 @@ cscli decisions list --origin lists --scenario list_name flags := cmd.Flags() flags.SortFlags = false flags.BoolVarP(filter.IncludeCAPI, "all", "a", false, "Include decisions from Central API") - flags.StringVar(filter.Since, "since", "", "restrict to alerts newer than since (ie. 4h, 30d)") - flags.StringVar(filter.Until, "until", "", "restrict to alerts older than until (ie. 4h, 30d)") + flags.Var(&filter.Since, "since", "restrict to alerts newer than since (ie. 4h, 30d)") + flags.Var(&filter.Until, "until", "restrict to alerts older than until (ie. 4h, 30d)") flags.StringVarP(filter.TypeEquals, "type", "t", "", "restrict to this decision type (ie. ban,captcha)") flags.StringVar(filter.ScopeEquals, "scope", "", "restrict to this scope (ie. ip,range,session)") flags.StringVar(filter.OriginEquals, "origin", "", fmt.Sprintf("the value to match for the specified origin (%s ...)", strings.Join(types.GetOrigins(), ","))) diff --git a/cmd/crowdsec-cli/climachine/prune.go b/cmd/crowdsec-cli/climachine/prune.go index 4054305f48f..be910242831 100644 --- a/cmd/crowdsec-cli/climachine/prune.go +++ b/cmd/crowdsec-cli/climachine/prune.go @@ -9,11 +9,15 @@ import ( "github.com/fatih/color" "github.com/spf13/cobra" + "github.com/crowdsecurity/go-cs-lib/cstime" + "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/args" "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/ask" "github.com/crowdsecurity/crowdsec/pkg/database/ent" ) +const defaultPruneDuration = 10 * time.Minute + func (cli *cliMachines) prune(ctx context.Context, duration time.Duration, notValidOnly bool, force bool) error { if duration < 2*time.Minute && !notValidOnly { if yes, err := ask.YesNo( @@ -67,12 +71,11 @@ func (cli *cliMachines) prune(ctx context.Context, duration time.Duration, notVa func (cli *cliMachines) newPruneCmd() *cobra.Command { var ( - duration time.Duration notValidOnly bool force bool ) - const defaultDuration = 10 * time.Minute + duration := cstime.DurationWithDays(defaultPruneDuration) cmd := &cobra.Command{ Use: "prune", @@ -84,12 +87,12 @@ cscli machines prune --not-validated-only --force`, Args: args.NoArgs, DisableAutoGenTag: true, RunE: func(cmd *cobra.Command, _ []string) error { - return cli.prune(cmd.Context(), duration, notValidOnly, force) + return cli.prune(cmd.Context(), time.Duration(duration), notValidOnly, force) }, } flags := cmd.Flags() - flags.DurationVarP(&duration, "duration", "d", defaultDuration, "duration of time since validated machine last heartbeat") + flags.VarP(&duration, "duration", "d", "duration of time since validated machine last heartbeat") flags.BoolVar(¬ValidOnly, "not-validated-only", false, "only prune machines that are not validated") flags.BoolVar(&force, "force", false, "force prune without asking for confirmation") diff --git a/go.mod b/go.mod index 81c41acc9ed..6c027fec445 100644 --- a/go.mod +++ b/go.mod @@ -24,7 +24,7 @@ require ( github.com/coreos/go-systemd/v22 v22.5.0 // indirect github.com/creack/pty v1.1.21 // indirect github.com/crowdsecurity/dlog v0.0.0-20170105205344-4fb5f8204f26 - github.com/crowdsecurity/go-cs-lib v0.0.18 + github.com/crowdsecurity/go-cs-lib v0.0.19 github.com/crowdsecurity/grokky v0.2.2 github.com/crowdsecurity/machineid v1.0.2 github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc @@ -87,8 +87,8 @@ require ( github.com/shirou/gopsutil/v3 v3.23.5 github.com/sirupsen/logrus v1.9.3 github.com/slack-go/slack v0.16.0 - github.com/spf13/cobra v1.8.1 - github.com/spf13/pflag v1.0.5 // indirect + github.com/spf13/cobra v1.9.1 + github.com/spf13/pflag v1.0.6 // indirect github.com/stretchr/testify v1.10.0 github.com/umahmood/haversine v0.0.0-20151105152445-808ab04add26 github.com/wasilibs/go-re2 v1.7.0 @@ -100,7 +100,7 @@ require ( go.opentelemetry.io/otel/trace v1.28.0 // indirect golang.org/x/crypto v0.36.0 golang.org/x/mod v0.23.0 - golang.org/x/net v0.38.0 // indirect + golang.org/x/net v0.38.0 golang.org/x/sync v0.12.0 golang.org/x/sys v0.31.0 golang.org/x/text v0.23.0 @@ -131,7 +131,7 @@ require ( github.com/bytedance/sonic/loader v0.2.1 // indirect github.com/cloudwego/base64x v0.1.4 // indirect github.com/cloudwego/iasm v0.2.0 // indirect - github.com/cpuguy83/go-md2man/v2 v2.0.4 // indirect + github.com/cpuguy83/go-md2man/v2 v2.0.6 // indirect github.com/felixge/httpsnoop v1.0.4 // indirect github.com/gabriel-vasile/mimetype v1.4.7 // indirect github.com/gin-contrib/sse v0.1.0 // indirect diff --git a/go.sum b/go.sum index 133e1e9e065..ece7d98e1e5 100644 --- a/go.sum +++ b/go.sum @@ -100,8 +100,8 @@ github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f/go.mod h1:F5haX7 github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs= github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= github.com/cpuguy83/go-md2man/v2 v2.0.1/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= -github.com/cpuguy83/go-md2man/v2 v2.0.4 h1:wfIWP927BUkWJb2NmU/kNDYIBTh/ziUX91+lVfRxZq4= -github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= +github.com/cpuguy83/go-md2man/v2 v2.0.6 h1:XJtiaUW6dEEqVuZiMTn1ldk455QWwEIsMIJlo5vtkx0= +github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g= github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/creack/pty v1.1.17/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4= @@ -111,8 +111,8 @@ github.com/crowdsecurity/coraza/v3 v3.0.0-20250320231801-749b8bded21a h1:2Nyr+47 github.com/crowdsecurity/coraza/v3 v3.0.0-20250320231801-749b8bded21a/go.mod h1:xSaXWOhFMSbrV8qOOfBKAyw3aOqfwaSaOy5BgSF8XlA= github.com/crowdsecurity/dlog v0.0.0-20170105205344-4fb5f8204f26 h1:r97WNVC30Uen+7WnLs4xDScS/Ex988+id2k6mDf8psU= github.com/crowdsecurity/dlog v0.0.0-20170105205344-4fb5f8204f26/go.mod h1:zpv7r+7KXwgVUZnUNjyP22zc/D7LKjyoY02weH2RBbk= -github.com/crowdsecurity/go-cs-lib v0.0.18 h1:GNyvaag5MXfuapIy4E30pIOvIE5AyHoanJBNSMA1cmE= -github.com/crowdsecurity/go-cs-lib v0.0.18/go.mod h1:XwGcvTt4lMq4Tm1IRMSKMDf0CVrnytTU8Uoofa7AR+g= +github.com/crowdsecurity/go-cs-lib v0.0.19 h1:wA4O8hGrEntTGn7eZTJqnQ3mrAje5JvQAj8DNbe5IZg= +github.com/crowdsecurity/go-cs-lib v0.0.19/go.mod h1:hz2FOHFXc0vWzH78uxo2VebtPQ9Snkbdzy3TMA20tVQ= github.com/crowdsecurity/grokky v0.2.2 h1:yALsI9zqpDArYzmSSxfBq2dhYuGUTKMJq8KOEIAsuo4= github.com/crowdsecurity/grokky v0.2.2/go.mod h1:33usDIYzGDsgX1kHAThCbseso6JuWNJXOzRQDGXHtWM= github.com/crowdsecurity/machineid v1.0.2 h1:wpkpsUghJF8Khtmn/tg6GxgdhLA1Xflerh5lirI+bdc= @@ -660,11 +660,12 @@ github.com/spf13/cast v1.7.0 h1:ntdiHjuueXFgm5nzDRdOS4yfT43P5Fnud6DH50rz/7w= github.com/spf13/cast v1.7.0/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo= github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= github.com/spf13/cobra v1.4.0/go.mod h1:Wo4iy3BUC+X2Fybo0PDqwJIv3dNRiZLHQymsfxlB84g= -github.com/spf13/cobra v1.8.1 h1:e5/vxKd/rZsfSJMUX1agtjeTDf+qv1/JdBF8gg5k9ZM= -github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3kD9Y= +github.com/spf13/cobra v1.9.1 h1:CXSaggrXdbHK9CF+8ywj8Amf7PBRmPCOJugH954Nnlo= +github.com/spf13/cobra v1.9.1/go.mod h1:nDyEzZ8ogv936Cinf6g1RU9MRY64Ir93oCnqb9wxYW0= github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= -github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/pflag v1.0.6 h1:jFzHGLGAlb3ruxLB8MhbI6A8+AQX/2eW4qeyNZXNp2o= +github.com/spf13/pflag v1.0.6/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= diff --git a/pkg/apiclient/alerts_service.go b/pkg/apiclient/alerts_service.go index 1f84862a811..b44049af406 100644 --- a/pkg/apiclient/alerts_service.go +++ b/pkg/apiclient/alerts_service.go @@ -7,42 +7,44 @@ import ( qs "github.com/google/go-querystring/query" + "github.com/crowdsecurity/go-cs-lib/cstime" + "github.com/crowdsecurity/crowdsec/pkg/models" ) type AlertsService service type AlertsListOpts struct { - ScopeEquals *string `url:"scope,omitempty"` - ValueEquals *string `url:"value,omitempty"` - ScenarioEquals *string `url:"scenario,omitempty"` - IPEquals *string `url:"ip,omitempty"` - RangeEquals *string `url:"range,omitempty"` - OriginEquals *string `url:"origin,omitempty"` - Since *string `url:"since,omitempty"` - TypeEquals *string `url:"decision_type,omitempty"` - Until *string `url:"until,omitempty"` - IncludeSimulated *bool `url:"simulated,omitempty"` - ActiveDecisionEquals *bool `url:"has_active_decision,omitempty"` - IncludeCAPI *bool `url:"include_capi,omitempty"` - Limit *int `url:"limit,omitempty"` - Contains *bool `url:"contains,omitempty"` + ScopeEquals *string `url:"scope,omitempty"` + ValueEquals *string `url:"value,omitempty"` + ScenarioEquals *string `url:"scenario,omitempty"` + IPEquals *string `url:"ip,omitempty"` + RangeEquals *string `url:"range,omitempty"` + OriginEquals *string `url:"origin,omitempty"` + Since cstime.DurationWithDays `url:"since,omitempty"` + TypeEquals *string `url:"decision_type,omitempty"` + Until cstime.DurationWithDays `url:"until,omitempty"` + IncludeSimulated *bool `url:"simulated,omitempty"` + ActiveDecisionEquals *bool `url:"has_active_decision,omitempty"` + IncludeCAPI *bool `url:"include_capi,omitempty"` + Limit *int `url:"limit,omitempty"` + Contains *bool `url:"contains,omitempty"` ListOpts } type AlertsDeleteOpts struct { - ScopeEquals *string `url:"scope,omitempty"` - ValueEquals *string `url:"value,omitempty"` - ScenarioEquals *string `url:"scenario,omitempty"` - IPEquals *string `url:"ip,omitempty"` - RangeEquals *string `url:"range,omitempty"` - Since *string `url:"since,omitempty"` - Until *string `url:"until,omitempty"` - OriginEquals *string `url:"origin,omitempty"` - ActiveDecisionEquals *bool `url:"has_active_decision,omitempty"` - SourceEquals *string `url:"alert_source,omitempty"` - Contains *bool `url:"contains,omitempty"` - Limit *int `url:"limit,omitempty"` + ScopeEquals *string `url:"scope,omitempty"` + ValueEquals *string `url:"value,omitempty"` + ScenarioEquals *string `url:"scenario,omitempty"` + IPEquals *string `url:"ip,omitempty"` + RangeEquals *string `url:"range,omitempty"` + Since cstime.DurationWithDays `url:"since,omitempty"` + Until cstime.DurationWithDays `url:"until,omitempty"` + OriginEquals *string `url:"origin,omitempty"` + ActiveDecisionEquals *bool `url:"has_active_decision,omitempty"` + SourceEquals *string `url:"alert_source,omitempty"` + Contains *bool `url:"contains,omitempty"` + Limit *int `url:"limit,omitempty"` ListOpts } diff --git a/pkg/apiserver/apiserver_test.go b/pkg/apiserver/apiserver_test.go index 01a8588dfd4..6cb0e3d546f 100644 --- a/pkg/apiserver/apiserver_test.go +++ b/pkg/apiserver/apiserver_test.go @@ -18,6 +18,7 @@ import ( "github.com/stretchr/testify/require" "github.com/crowdsecurity/go-cs-lib/cstest" + "github.com/crowdsecurity/go-cs-lib/cstime" "github.com/crowdsecurity/go-cs-lib/ptr" "github.com/crowdsecurity/go-cs-lib/version" @@ -47,9 +48,9 @@ var ( func LoadTestConfig(t *testing.T) csconfig.Config { config := csconfig.Config{} - maxAge := "1h" + maxAge := cstime.DurationWithDays(1*time.Hour) flushConfig := csconfig.FlushDBCfg{ - MaxAge: &maxAge, + MaxAge: maxAge, } tempDir, _ := os.MkdirTemp("", "crowdsec_tests") @@ -97,9 +98,9 @@ func LoadTestConfig(t *testing.T) csconfig.Config { func LoadTestConfigForwardedFor(t *testing.T) csconfig.Config { config := csconfig.Config{} - maxAge := "1h" + maxAge := cstime.DurationWithDays(1*time.Hour) flushConfig := csconfig.FlushDBCfg{ - MaxAge: &maxAge, + MaxAge: maxAge, } tempDir, _ := os.MkdirTemp("", "crowdsec_tests") @@ -363,9 +364,9 @@ func TestLoggingDebugToFileConfig(t *testing.T) { ctx := t.Context() /*declare settings*/ - maxAge := "1h" + maxAge := cstime.DurationWithDays(1*time.Hour) flushConfig := csconfig.FlushDBCfg{ - MaxAge: &maxAge, + MaxAge: maxAge, } tempDir, _ := os.MkdirTemp("", "crowdsec_tests") @@ -416,9 +417,9 @@ func TestLoggingErrorToFileConfig(t *testing.T) { ctx := t.Context() /*declare settings*/ - maxAge := "1h" + maxAge := cstime.DurationWithDays(1*time.Hour) flushConfig := csconfig.FlushDBCfg{ - MaxAge: &maxAge, + MaxAge: maxAge, } tempDir, _ := os.MkdirTemp("", "crowdsec_tests") diff --git a/pkg/csconfig/database.go b/pkg/csconfig/database.go index 26150eb2ea4..f34b4088917 100644 --- a/pkg/csconfig/database.go +++ b/pkg/csconfig/database.go @@ -14,6 +14,7 @@ import ( "github.com/go-sql-driver/mysql" log "github.com/sirupsen/logrus" + "github.com/crowdsecurity/go-cs-lib/cstime" "github.com/crowdsecurity/go-cs-lib/ptr" "github.com/crowdsecurity/crowdsec/pkg/types" @@ -58,10 +59,10 @@ type AuthGCCfg struct { type FlushDBCfg struct { MaxItems *int `yaml:"max_items,omitempty"` // We could unmarshal as time.Duration, but alert filters right now are a map of strings - MaxAge *string `yaml:"max_age,omitempty"` - BouncersGC *AuthGCCfg `yaml:"bouncers_autodelete,omitempty"` - AgentsGC *AuthGCCfg `yaml:"agents_autodelete,omitempty"` - MetricsMaxAge *time.Duration `yaml:"metrics_max_age,omitempty"` + MaxAge cstime.DurationWithDays `yaml:"max_age,omitempty"` + BouncersGC *AuthGCCfg `yaml:"bouncers_autodelete,omitempty"` + AgentsGC *AuthGCCfg `yaml:"agents_autodelete,omitempty"` + MetricsMaxAge cstime.DurationWithDays `yaml:"metrics_max_age,omitempty"` } func (c *Config) LoadDBConfig(inCli bool) error { diff --git a/pkg/csprofiles/csprofiles.go b/pkg/csprofiles/csprofiles.go index 0b2ae02805d..3081f4d3102 100644 --- a/pkg/csprofiles/csprofiles.go +++ b/pkg/csprofiles/csprofiles.go @@ -7,12 +7,13 @@ import ( "github.com/expr-lang/expr/vm" log "github.com/sirupsen/logrus" + "github.com/crowdsecurity/go-cs-lib/cstime" + "github.com/crowdsecurity/go-cs-lib/ptr" + "github.com/crowdsecurity/crowdsec/pkg/csconfig" - utils "github.com/crowdsecurity/crowdsec/pkg/database" "github.com/crowdsecurity/crowdsec/pkg/exprhelpers" "github.com/crowdsecurity/crowdsec/pkg/models" "github.com/crowdsecurity/crowdsec/pkg/types" - "github.com/crowdsecurity/go-cs-lib/ptr" ) type Runtime struct { @@ -84,7 +85,7 @@ func NewProfile(profilesCfg []*csconfig.ProfileCfg) ([]*Runtime, error) { duration = defaultDuration } - if _, err := utils.ParseDuration(duration); err != nil { + if _, err := cstime.ParseDurationWithDays(duration); err != nil { return nil, fmt.Errorf("error parsing duration '%s' of %s: %w", duration, profile.Name, err) } } @@ -136,7 +137,7 @@ func (profile *Runtime) GenerateDecisionFromProfile(alert *models.Alert) ([]*mod profile.Logger.Warningf("Failed to run duration_expr : %v", err) } else { durationStr := fmt.Sprint(duration) - if _, err := utils.ParseDuration(durationStr); err != nil { + if _, err := cstime.ParseDurationWithDays(durationStr); err != nil { profile.Logger.Warningf("Failed to parse expr duration result '%s'", duration) } else { *decision.Duration = durationStr diff --git a/pkg/database/alertfilter.go b/pkg/database/alertfilter.go index 6ff2ab99a7f..d966901401c 100644 --- a/pkg/database/alertfilter.go +++ b/pkg/database/alertfilter.go @@ -9,6 +9,8 @@ import ( "github.com/pkg/errors" log "github.com/sirupsen/logrus" + "github.com/crowdsecurity/go-cs-lib/cstime" + "github.com/crowdsecurity/crowdsec/pkg/database/ent" "github.com/crowdsecurity/crowdsec/pkg/database/ent/alert" "github.com/crowdsecurity/crowdsec/pkg/database/ent/decision" @@ -40,7 +42,9 @@ func handleScopeFilter(scope string, predicates *[]predicate.Alert) { } func handleTimeFilters(param, value string, predicates *[]predicate.Alert) error { - duration, err := ParseDuration(value) + // crowsdec now always sends duration without days, but we allow them for + // compatibility with other tools + duration, err := cstime.ParseDurationWithDays(value) if err != nil { return fmt.Errorf("while parsing duration: %w", err) } diff --git a/pkg/database/alerts.go b/pkg/database/alerts.go index 6f27b5afed9..2fcbb8a5f49 100644 --- a/pkg/database/alerts.go +++ b/pkg/database/alerts.go @@ -13,6 +13,7 @@ import ( "github.com/pkg/errors" log "github.com/sirupsen/logrus" + "github.com/crowdsecurity/go-cs-lib/cstime" "github.com/crowdsecurity/go-cs-lib/slicetools" "github.com/crowdsecurity/crowdsec/pkg/database/ent" @@ -382,7 +383,7 @@ func (c *Client) createDecisionChunk(ctx context.Context, simulated bool, stopAt sz int ) - duration, err := ParseDuration(*decisionItem.Duration) + duration, err := cstime.ParseDurationWithDays(*decisionItem.Duration) if err != nil { return nil, errors.Wrapf(ParseDurationFail, "decision duration '%+v' : %s", *decisionItem.Duration, err) } diff --git a/pkg/database/flush.go b/pkg/database/flush.go index e1b5f9f4471..e508c53c0c7 100644 --- a/pkg/database/flush.go +++ b/pkg/database/flush.go @@ -9,7 +9,7 @@ import ( "github.com/go-co-op/gocron" log "github.com/sirupsen/logrus" - "github.com/crowdsecurity/go-cs-lib/ptr" + "github.com/crowdsecurity/go-cs-lib/cstime" "github.com/crowdsecurity/crowdsec/pkg/csconfig" "github.com/crowdsecurity/crowdsec/pkg/database/ent/alert" @@ -30,7 +30,6 @@ const ( func (c *Client) StartFlushScheduler(ctx context.Context, config *csconfig.FlushDBCfg) (*gocron.Scheduler, error) { maxItems := 0 - maxAge := "" if config.MaxItems != nil && *config.MaxItems <= 0 { return nil, errors.New("max_items can't be zero or negative") @@ -40,14 +39,10 @@ func (c *Client) StartFlushScheduler(ctx context.Context, config *csconfig.Flush maxItems = *config.MaxItems } - if config.MaxAge != nil && *config.MaxAge != "" { - maxAge = *config.MaxAge - } - // Init & Start cronjob every minute for alerts scheduler := gocron.NewScheduler(time.UTC) - job, err := scheduler.Every(1).Minute().Do(c.FlushAlerts, ctx, maxAge, maxItems) + job, err := scheduler.Every(1).Minute().Do(c.FlushAlerts, ctx, time.Duration(config.MaxAge), maxItems) if err != nil { return nil, fmt.Errorf("while starting FlushAlerts scheduler: %w", err) } @@ -56,7 +51,7 @@ func (c *Client) StartFlushScheduler(ctx context.Context, config *csconfig.Flush // Init & Start cronjob every hour for bouncers/agents if config.AgentsGC != nil { if config.AgentsGC.Cert != nil { - duration, err := ParseDuration(*config.AgentsGC.Cert) + duration, err := cstime.ParseDurationWithDays(*config.AgentsGC.Cert) if err != nil { return nil, fmt.Errorf("while parsing agents cert auto-delete duration: %w", err) } @@ -65,7 +60,7 @@ func (c *Client) StartFlushScheduler(ctx context.Context, config *csconfig.Flush } if config.AgentsGC.LoginPassword != nil { - duration, err := ParseDuration(*config.AgentsGC.LoginPassword) + duration, err := cstime.ParseDurationWithDays(*config.AgentsGC.LoginPassword) if err != nil { return nil, fmt.Errorf("while parsing agents login/password auto-delete duration: %w", err) } @@ -80,7 +75,7 @@ func (c *Client) StartFlushScheduler(ctx context.Context, config *csconfig.Flush if config.BouncersGC != nil { if config.BouncersGC.Cert != nil { - duration, err := ParseDuration(*config.BouncersGC.Cert) + duration, err := cstime.ParseDurationWithDays(*config.BouncersGC.Cert) if err != nil { return nil, fmt.Errorf("while parsing bouncers cert auto-delete duration: %w", err) } @@ -89,7 +84,7 @@ func (c *Client) StartFlushScheduler(ctx context.Context, config *csconfig.Flush } if config.BouncersGC.Api != nil { - duration, err := ParseDuration(*config.BouncersGC.Api) + duration, err := cstime.ParseDurationWithDays(*config.BouncersGC.Api) if err != nil { return nil, fmt.Errorf("while parsing bouncers api auto-delete duration: %w", err) } @@ -109,7 +104,7 @@ func (c *Client) StartFlushScheduler(ctx context.Context, config *csconfig.Flush baJob.SingletonMode() - metricsJob, err := scheduler.Every(flushInterval).Do(c.flushMetrics, ctx, config.MetricsMaxAge) + metricsJob, err := scheduler.Every(flushInterval).Do(c.flushMetrics, ctx, time.Duration(config.MetricsMaxAge)) if err != nil { return nil, fmt.Errorf("while starting flushMetrics scheduler: %w", err) } @@ -129,15 +124,15 @@ func (c *Client) StartFlushScheduler(ctx context.Context, config *csconfig.Flush } // flushMetrics deletes metrics older than maxAge, regardless if they have been pushed to CAPI or not -func (c *Client) flushMetrics(ctx context.Context, maxAge *time.Duration) { - if maxAge == nil { - maxAge = ptr.Of(defaultMetricsMaxAge) +func (c *Client) flushMetrics(ctx context.Context, maxAge time.Duration) { + if maxAge == 0 { + maxAge = defaultMetricsMaxAge } c.Log.Debugf("flushing metrics older than %s", maxAge) deleted, err := c.Ent.Metric.Delete().Where( - metric.ReceivedAtLTE(time.Now().UTC().Add(-*maxAge)), + metric.ReceivedAtLTE(time.Now().UTC().Add(-maxAge)), ).Exec(ctx) if err != nil { c.Log.Errorf("while flushing metrics: %s", err) @@ -230,7 +225,7 @@ func (c *Client) FlushAgentsAndBouncers(ctx context.Context, agentsCfg *csconfig return nil } -func (c *Client) FlushAlerts(ctx context.Context, maxAge string, maxItems int) error { +func (c *Client) FlushAlerts(ctx context.Context, maxAge time.Duration, maxItems int) error { var ( deletedByAge int deletedByNbItem int @@ -255,9 +250,9 @@ func (c *Client) FlushAlerts(ctx context.Context, maxAge string, maxItems int) e c.Log.Debugf("FlushAlerts (Total alerts): %d", totalAlerts) - if maxAge != "" { + if maxAge != 0 { filter := map[string][]string{ - "created_before": {maxAge}, + "created_before": {maxAge.String()}, } nbDeleted, err := c.DeleteAlertWithFilter(ctx, filter) diff --git a/pkg/database/utils.go b/pkg/database/utils.go index 8148df56f24..9b8d20dcf92 100644 --- a/pkg/database/utils.go +++ b/pkg/database/utils.go @@ -4,9 +4,6 @@ import ( "encoding/binary" "fmt" "net" - "strconv" - "strings" - "time" ) func IP2Int(ip net.IP) uint32 { @@ -69,28 +66,3 @@ func GetIpsFromIpRange(host string) (int64, int64, error) { return ipStart, ipEnd, nil } - -func ParseDuration(d string) (time.Duration, error) { - durationStr := d - - if strings.HasSuffix(d, "d") { - days := strings.Split(d, "d")[0] - if days == "" { - return 0, fmt.Errorf("'%s' can't be parsed as duration", d) - } - - daysInt, err := strconv.Atoi(days) - if err != nil { - return 0, err - } - - durationStr = strconv.Itoa(daysInt*24) + "h" - } - - duration, err := time.ParseDuration(durationStr) - if err != nil { - return 0, err - } - - return duration, nil -} diff --git a/pkg/exprhelpers/helpers.go b/pkg/exprhelpers/helpers.go index 22bca7d0600..6c99c53dd98 100644 --- a/pkg/exprhelpers/helpers.go +++ b/pkg/exprhelpers/helpers.go @@ -29,6 +29,8 @@ import ( "github.com/umahmood/haversine" "github.com/wasilibs/go-re2" + "github.com/crowdsecurity/go-cs-lib/cstime" + "github.com/crowdsecurity/crowdsec/pkg/cache" "github.com/crowdsecurity/crowdsec/pkg/database" "github.com/crowdsecurity/crowdsec/pkg/fflag" @@ -661,7 +663,7 @@ func GetDecisionsSinceCount(params ...any) (any, error) { return 0, nil } - sinceDuration, err := time.ParseDuration(since) + sinceDuration, err := cstime.ParseDurationWithDays(since) if err != nil { log.Errorf("Failed to parse since parameter '%s' : %s", since, err) return 0, nil diff --git a/test/bats/10_bouncers.bats b/test/bats/10_bouncers.bats index 382205796c8..9381a452877 100644 --- a/test/bats/10_bouncers.bats +++ b/test/bats/10_bouncers.bats @@ -133,6 +133,13 @@ teardown() { } @test "cscli bouncers prune" { + rune -1 cscli bouncers prune --duration foobar + assert_stderr 'Error: invalid argument "foobar" for "-d, --duration" flag: time: invalid duration "foobar"' + + # duration takes days as well + rune -0 cscli bouncers prune --duration 1d30m + assert_output 'No bouncers to prune.' + rune -0 cscli bouncers prune assert_output 'No bouncers to prune.' rune -0 cscli bouncers add ciTestBouncer diff --git a/test/bats/30_machines.bats b/test/bats/30_machines.bats index 989cd5fe646..97f8584a1f6 100644 --- a/test/bats/30_machines.bats +++ b/test/bats/30_machines.bats @@ -124,12 +124,19 @@ teardown() { @test "cscli machines prune" { rune -0 cscli metrics + rune -1 cscli machines prune --duration foobar + assert_stderr 'Error: invalid argument "foobar" for "-d, --duration" flag: time: invalid duration "foobar"' + # if the fixture has been created some time ago, # the machines may be old enough to trigger a user prompt. # make sure the prune duration is high enough. rune -0 cscli machines prune --duration 1000000h assert_output 'No machines to prune.' + # duration takes days as well + rune -0 cscli machines prune --duration 1000d30m + assert_output 'No machines to prune.' + rune -0 cscli machines list -o json rune -0 jq -r '.[-1].machineId' <(output) rune -0 cscli machines delete "$output" diff --git a/test/bats/80_alerts.bats b/test/bats/80_alerts.bats index 78c4e67a704..0fadea984d5 100644 --- a/test/bats/80_alerts.bats +++ b/test/bats/80_alerts.bats @@ -43,6 +43,15 @@ teardown() { assert_output --regexp " githubciXXXXXXXXXXXXXXXXXXXXXXXX([a-zA-Z0-9]{16})? " } +@test "cscli alerts list, accept duration parameters with days" { + rune -1 cscli alerts list --until toto + assert_stderr 'Error: invalid argument "toto" for "--until" flag: time: invalid duration "toto"' + rune -0 cscli alerts list --until 2d12h --debug + assert_stderr --partial "until=60h0m0s" + rune -0 cscli alerts list --since 2d12h --debug + assert_stderr --partial "since=60h0m0s" +} + @test "cscli alerts list, human/json/raw" { rune -0 cscli decisions add -i 10.20.30.40 -t ban diff --git a/test/bats/90_decisions.bats b/test/bats/90_decisions.bats index 54904896573..5ba29659ed0 100644 --- a/test/bats/90_decisions.bats +++ b/test/bats/90_decisions.bats @@ -54,9 +54,13 @@ teardown() { assert_output --regexp " githubciXXXXXXXXXXXXXXXXXXXXXXXX([a-zA-Z0-9]{16})? " } -@test "cscli decisions list, incorrect parameters" { +@test "cscli decisions list, accept duration parameters with days" { rune -1 cscli decisions list --until toto - assert_stderr 'Error: unable to retrieve decisions: performing request: API error: while parsing duration: time: invalid duration "toto"' + assert_stderr 'Error: invalid argument "toto" for "--until" flag: time: invalid duration "toto"' + rune -0 cscli decisions list --until 2d12h --debug + assert_stderr --partial "until=60h0m0s" + rune -0 cscli decisions list --since 2d12h --debug + assert_stderr --partial "since=60h0m0s" } @test "cscli decisions import" { diff --git a/test/bats/cscli-allowlists.bats b/test/bats/cscli-allowlists.bats index 24810110dc0..e146e8750dc 100644 --- a/test/bats/cscli-allowlists.bats +++ b/test/bats/cscli-allowlists.bats @@ -119,14 +119,14 @@ teardown() { # comment and expiration are applied to all values rune -1 cscli allowlist add foo 10.10.10.10 10.20.30.40 -d comment -e toto - assert_stderr 'Error: time: invalid duration "toto"' + assert_stderr 'Error: invalid argument "toto" for "-e, --expiration" flag: time: invalid duration "toto"' refute_output rune -1 cscli allowlist add foo 10.10.10.10 10.20.30.40 -d comment -e '1 day' refute_output - assert_stderr 'Error: strconv.Atoi: parsing "1 ": invalid syntax' + assert_stderr 'Error: invalid argument "1 day" for "-e, --expiration" flag: invalid day value in duration "1 day"' - rune -0 cscli allowlist add foo 10.10.10.10 -d comment -e '1d' + rune -0 cscli allowlist add foo 10.10.10.10 -d comment -e '1d12h' assert_output 'added 1 values to allowlist foo' refute_stderr From 959b87211883119cb090b1820544a885448af786 Mon Sep 17 00:00:00 2001 From: blotus Date: Tue, 6 May 2025 14:10:30 +0200 Subject: [PATCH 513/581] allowlists: automatically expire current matching decisions on update (#3601) --- cmd/crowdsec-cli/cliallowlists/allowlists.go | 8 ++ pkg/apiserver/apic.go | 21 ++++- pkg/apiserver/papi_cmd.go | 8 ++ pkg/database/allowlists.go | 95 ++++++++++++++++++++ test/bats/cscli-allowlists.bats | 55 ++++++++++++ test/lib/config/config-global | 2 +- test/lib/config/config-local | 2 +- test/run-tests | 2 +- 8 files changed, 186 insertions(+), 7 deletions(-) diff --git a/cmd/crowdsec-cli/cliallowlists/allowlists.go b/cmd/crowdsec-cli/cliallowlists/allowlists.go index 972ee1b4704..bd68d5e53e7 100644 --- a/cmd/crowdsec-cli/cliallowlists/allowlists.go +++ b/cmd/crowdsec-cli/cliallowlists/allowlists.go @@ -488,6 +488,14 @@ func (cli *cliAllowLists) add(ctx context.Context, db *database.Client, name str fmt.Printf("added %d values to allowlist %s\n", added, name) } + deleted, err := db.ApplyAllowlistsToExistingDecisions(ctx) + if err != nil { + return fmt.Errorf("unable to apply allowlists to existing decisions: %w", err) + } + if deleted > 0 { + fmt.Printf("%d decisions deleted by allowlists\n", deleted) + } + return nil } diff --git a/pkg/apiserver/apic.go b/pkg/apiserver/apic.go index 0de773ab4d4..3295d8dde93 100644 --- a/pkg/apiserver/apic.go +++ b/pkg/apiserver/apic.go @@ -588,6 +588,8 @@ func fillAlertsWithDecisions(alerts []*models.Alert, decisions []*models.Decisio func (a *apic) PullTop(ctx context.Context, forcePull bool) error { var err error + hasPulledAllowlists := false + // A mutex with TryLock would be a bit simpler // But go does not guarantee that TryLock will be able to acquire the lock even if it is available select { @@ -649,7 +651,7 @@ func (a *apic) PullTop(ctx context.Context, forcePull bool) error { // process deleted decisions nbDeleted, err := a.HandleDeletedDecisionsV3(ctx, data.Deleted, deleteCounters) if err != nil { - return err + log.Errorf("could not delete decisions from CAPI: %s", err) } log.Printf("capi/community-blocklist : %d explicit deletions", nbDeleted) @@ -657,8 +659,9 @@ func (a *apic) PullTop(ctx context.Context, forcePull bool) error { // Update allowlists before processing decisions if data.Links != nil { if len(data.Links.Allowlists) > 0 { + hasPulledAllowlists = true if err := a.UpdateAllowlists(ctx, data.Links.Allowlists, forcePull); err != nil { - return fmt.Errorf("while updating allowlists: %w", err) + log.Errorf("could not update allowlists from CAPI: %s", err) } } } @@ -675,7 +678,7 @@ func (a *apic) PullTop(ctx context.Context, forcePull bool) error { err = a.SaveAlerts(ctx, alertsFromCapi, addCounters, deleteCounters) if err != nil { - return fmt.Errorf("while saving alerts: %w", err) + log.Errorf("could not save alert for CAPI pull: %s", err) } } else { if a.pullCommunity { @@ -689,11 +692,21 @@ func (a *apic) PullTop(ctx context.Context, forcePull bool) error { if data.Links != nil { if len(data.Links.Blocklists) > 0 { if err := a.UpdateBlocklists(ctx, data.Links.Blocklists, addCounters, forcePull); err != nil { - return fmt.Errorf("while updating blocklists: %w", err) + log.Errorf("could not update blocklists from CAPI: %s", err) } } } + if hasPulledAllowlists { + deleted, err := a.dbClient.ApplyAllowlistsToExistingDecisions(ctx) + if err != nil { + log.Errorf("could not apply allowlists to existing decisions: %s", err) + } + if deleted > 0 { + log.Infof("deleted %d decisions from allowlists", deleted) + } + } + return nil } diff --git a/pkg/apiserver/papi_cmd.go b/pkg/apiserver/papi_cmd.go index 2e48ef4eaec..f9ecbf359df 100644 --- a/pkg/apiserver/papi_cmd.go +++ b/pkg/apiserver/papi_cmd.go @@ -264,6 +264,14 @@ func ManagementCmd(message *Message, p *Papi, sync bool) error { if err != nil { return fmt.Errorf("failed to force pull operation: %w", err) } + + deleted, err := p.DBClient.ApplyAllowlistsToExistingDecisions(ctx) + if err != nil { + log.Errorf("could not apply allowlists to existing decisions: %s", err) + } + if deleted > 0 { + log.Infof("deleted %d decisions from allowlists", deleted) + } } case "allowlist_unsubscribe": data, err := json.Marshal(message.Data) diff --git a/pkg/database/allowlists.go b/pkg/database/allowlists.go index d14958f612d..a8815a7f62c 100644 --- a/pkg/database/allowlists.go +++ b/pkg/database/allowlists.go @@ -12,6 +12,8 @@ import ( "github.com/crowdsecurity/crowdsec/pkg/database/ent" "github.com/crowdsecurity/crowdsec/pkg/database/ent/allowlist" "github.com/crowdsecurity/crowdsec/pkg/database/ent/allowlistitem" + "github.com/crowdsecurity/crowdsec/pkg/database/ent/decision" + "github.com/crowdsecurity/crowdsec/pkg/models" "github.com/crowdsecurity/crowdsec/pkg/types" ) @@ -389,3 +391,96 @@ func (c *Client) GetAllowlistsContentForAPIC(ctx context.Context) ([]net.IP, []* return ips, nets, nil } + +func (c *Client) ApplyAllowlistsToExistingDecisions(ctx context.Context) (int, error) { + // Soft delete (set expiration to now) all decisions that matches any allowlist + + totalCount := 0 + + // Get all non-expired allowlist items + // We will match them one by one against all decisions + allowlistItems, err := c.Ent.AllowListItem.Query(). + Where( + allowlistitem.Or( + allowlistitem.ExpiresAtGTE(time.Now().UTC()), + allowlistitem.ExpiresAtIsNil(), + ), + ).All(ctx) + if err != nil { + return 0, fmt.Errorf("unable to get allowlist items: %w", err) + } + + now := time.Now().UTC() + + for _, item := range allowlistItems { + updateQuery := c.Ent.Decision.Update().SetUntil(now).Where(decision.UntilGTE(now)) + switch item.IPSize { + case 4: + updateQuery = updateQuery.Where( + decision.And( + decision.IPSizeEQ(4), + decision.Or( + decision.And( + decision.StartIPLTE(item.StartIP), + decision.EndIPGTE(item.EndIP), + ), + decision.And( + decision.StartIPGTE(item.StartIP), + decision.EndIPLTE(item.EndIP), + ), + ))) + case 16: + updateQuery = updateQuery.Where( + decision.And( + decision.IPSizeEQ(16), + decision.Or( + decision.And( + decision.Or( + decision.StartIPLT(item.StartIP), + decision.And( + decision.StartIPEQ(item.StartIP), + decision.StartSuffixLTE(item.StartSuffix), + )), + decision.Or( + decision.EndIPGT(item.EndIP), + decision.And( + decision.EndIPEQ(item.EndIP), + decision.EndSuffixGTE(item.EndSuffix), + ), + ), + ), + decision.And( + decision.Or( + decision.StartIPGT(item.StartIP), + decision.And( + decision.StartIPEQ(item.StartIP), + decision.StartSuffixGTE(item.StartSuffix), + )), + decision.Or( + decision.EndIPLT(item.EndIP), + decision.And( + decision.EndIPEQ(item.EndIP), + decision.EndSuffixLTE(item.EndSuffix), + ), + ), + ), + ), + ), + ) + default: + // This should never happen + // But better safe than sorry and just skip it instead of expiring all decisions + c.Log.Errorf("unexpected IP size %d for allowlist item %s", item.IPSize, item.Value) + continue + } + // Update the decisions + count, err := updateQuery.Save(ctx) + if err != nil { + c.Log.Errorf("unable to expire existing decisions: %s", err) + continue + } + totalCount += count + } + + return totalCount, nil +} diff --git a/test/bats/cscli-allowlists.bats b/test/bats/cscli-allowlists.bats index e146e8750dc..d357f29f931 100644 --- a/test/bats/cscli-allowlists.bats +++ b/test/bats/cscli-allowlists.bats @@ -246,3 +246,58 @@ teardown() { rune -0 jq 'del(.created_at) | del(.updated_at) | del(.items.[].created_at) | del(.items.[].expiration)' <(output) assert_json '{"description":"a foo","items":[],"name":"foo"}' } + +@test "allowlists expire active decisions" { + rune -0 cscli decisions add -i 1.2.3.4 + rune -0 cscli decisions add -r 2.3.4.0/24 + rune -0 cscli decisions add -i 5.4.3.42 + rune -0 cscli decisions add -r 6.5.4.0/24 + rune -0 cscli decisions add -r 10.0.0.0/23 + + rune -0 cscli decisions list -o json + rune -0 jq -r 'sort_by(.decisions[].value) | .[].decisions[0].value' <(output) + assert_output - <<-EOT + 1.2.3.4 + 10.0.0.0/23 + 2.3.4.0/24 + 5.4.3.42 + 6.5.4.0/24 + EOT + + rune -0 cscli allowlists create foo -d "foo" + + # add an allowlist that matches exactly + rune -0 cscli allowlists add foo 1.2.3.4 + if is_db_mysql; then sleep 2; fi + # it should not be here anymore + rune -0 cscli decisions list -o json + rune -0 jq -e 'any(.[].decisions[]; .value == "1.2.3.4") | not' <(output) + + # allowlist an IP belonging to a range + rune -0 cscli allowlist add foo 2.3.4.42 + if is_db_mysql; then sleep 2; fi + rune -0 cscli decisions list -o json + rune -0 jq -e 'any(.[].decisions[]; .value == "2.3.4.0/24") | not' <(output) + + # allowlist a range with an active decision inside + rune -0 cscli allowlist add foo 5.4.3.0/24 + if is_db_mysql; then sleep 2; fi + rune -0 cscli decisions list -o json + rune -0 jq -e 'any(.[].decisions[]; .value == "5.4.3.42") | not' <(output) + + # allowlist a range inside a range for which we have a decision + rune -0 cscli allowlist add foo 6.5.4.0/25 + if is_db_mysql; then sleep 2; fi + rune -0 cscli decisions list -o json + rune -0 jq -e 'any(.[].decisions[]; .value == "6.5.4.0/24") | not' <(output) + + # allowlist a range bigger than a range for which we have a decision + rune -0 cscli allowlist add foo 10.0.0.0/24 + if is_db_mysql; then sleep 2; fi + rune -0 cscli decisions list -o json + rune -0 jq -e 'any(.[].decisions[]; .value == "10.0.0.0/24") | not' <(output) + + # sanity check no more active decisions + rune -0 cscli decisions list -o json + assert_json [] +} diff --git a/test/lib/config/config-global b/test/lib/config/config-global index 83d95e68e29..014b7cc1972 100755 --- a/test/lib/config/config-global +++ b/test/lib/config/config-global @@ -116,7 +116,7 @@ load_init_data() { dump_backend="$(cat "${LOCAL_INIT_DIR}/.backend")" if [[ "${DB_BACKEND}" != "${dump_backend}" ]]; then - die "Can't run with backend '${DB_BACKEND}' because the test data was built with '${dump_backend}'" + die "Can't run with backend '${DB_BACKEND}' because 'make bats-fixture' was ran with '${dump_backend}'" fi remove_init_data diff --git a/test/lib/config/config-local b/test/lib/config/config-local index 54ac8550c5f..e6588c61cb8 100755 --- a/test/lib/config/config-local +++ b/test/lib/config/config-local @@ -168,7 +168,7 @@ load_init_data() { dump_backend="$(cat "${LOCAL_INIT_DIR}/.backend")" if [[ "${DB_BACKEND}" != "${dump_backend}" ]]; then - die "Can't run with backend '${DB_BACKEND}' because the test data was built with '${dump_backend}'" + die "Can't run with backend '${DB_BACKEND}' because 'make bats-fixture' was ran with '${dump_backend}'" fi remove_init_data diff --git a/test/run-tests b/test/run-tests index e7609188c37..ed9d6f553da 100755 --- a/test/run-tests +++ b/test/run-tests @@ -26,7 +26,7 @@ fi dump_backend="$(cat "$LOCAL_INIT_DIR/.backend")" if [[ "$DB_BACKEND" != "$dump_backend" ]]; then - die "Can't run with backend '$DB_BACKEND' because the test data was build with '$dump_backend'" + die "Can't run with backend '$DB_BACKEND' because 'make bats-fixture' was ran with '$dump_backend'" fi if [[ $# -ge 1 ]]; then From 4527ad0fa8fcece1ad404824b28f13706326b2e5 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Tue, 6 May 2025 15:15:31 +0200 Subject: [PATCH 514/581] CI: update lint complexity thresholds (#3608) --- .golangci.yml | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/.golangci.yml b/.golangci.yml index c6dac451fec..e9e426cb6e3 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -184,7 +184,7 @@ linters: maintidx: # raise this after refactoring - under: 15 + under: 18 misspell: locale: US @@ -210,7 +210,7 @@ linters: - name: cognitive-complexity arguments: # lower this after refactoring - - 119 + - 113 - name: comment-spacings disabled: true - name: confusing-results @@ -235,8 +235,8 @@ linters: - name: function-length arguments: # lower this after refactoring - - 111 - - 238 + - 87 + - 198 - name: get-return disabled: true - name: increment-decrement @@ -294,9 +294,7 @@ linters: - -ST1003 - -ST1005 - -ST1012 - - -ST1022 - -QF1003 - - -QF1008 - -QF1012 wsl: From 0e8b557402d66a0c564bf1c4c047bb18d05ed44e Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Tue, 6 May 2025 15:34:50 +0200 Subject: [PATCH 515/581] refact alert, decision filters: remove unnecessary pointers (#3607) --- cmd/crowdsec-cli/clialert/alerts.go | 122 ++++++----------- cmd/crowdsec-cli/cliallowlists/allowlists.go | 13 +- cmd/crowdsec-cli/clidecision/decisions.go | 130 ++++++------------- pkg/apiclient/alerts_service.go | 28 ++-- pkg/apiclient/alerts_service_test.go | 8 +- pkg/apiclient/auth_key_test.go | 10 +- pkg/apiclient/decisions_service.go | 28 ++-- pkg/apiclient/decisions_service_test.go | 16 +-- 8 files changed, 128 insertions(+), 227 deletions(-) diff --git a/cmd/crowdsec-cli/clialert/alerts.go b/cmd/crowdsec-cli/clialert/alerts.go index 88e870ee768..cd9e636ee13 100644 --- a/cmd/crowdsec-cli/clialert/alerts.go +++ b/cmd/crowdsec-cli/clialert/alerts.go @@ -104,15 +104,15 @@ func (cli *cliAlerts) alertsToTable(alerts *models.GetAlertsResponse, printMachi if *alerts == nil { // avoid returning "null" in json // could be cleaner if we used slice of alerts directly - fmt.Println("[]") + fmt.Fprintln(os.Stdout, "[]") return nil } x, _ := json.MarshalIndent(alerts, "", " ") - fmt.Print(string(x)) + fmt.Fprint(os.Stdout, string(x)) case "human": if len(*alerts) == 0 { - fmt.Println("No active alerts") + fmt.Fprintln(os.Stdout, "No active alerts") return nil } @@ -156,7 +156,7 @@ func (cli *cliAlerts) displayOneAlert(alert *models.Alert, withDetail bool) erro alertDecisionsTable(color.Output, cfg.Cscli.Color, alert) if len(alert.Meta) > 0 { - fmt.Printf("\n - Context :\n") + fmt.Fprintf(os.Stdout, "\n - Context :\n") sort.Slice(alert.Meta, func(i, j int) bool { return alert.Meta[i].Key < alert.Meta[j].Key }) @@ -183,7 +183,7 @@ func (cli *cliAlerts) displayOneAlert(alert *models.Alert, withDetail bool) erro } if withDetail { - fmt.Printf("\n - Events :\n") + fmt.Fprintf(os.Stdout, "\n - Events :\n") for _, event := range alert.Events { alertEventTable(color.Output, cfg.Cscli.Color, event) @@ -240,7 +240,7 @@ func (cli *cliAlerts) NewCommand() *cobra.Command { func (cli *cliAlerts) list(ctx context.Context, alertListFilter apiclient.AlertsListOpts, limit *int, contained *bool, printMachine bool) error { var err error - *alertListFilter.ScopeEquals, err = SanitizeScope(*alertListFilter.ScopeEquals, *alertListFilter.IPEquals, *alertListFilter.RangeEquals) + alertListFilter.ScopeEquals, err = SanitizeScope(alertListFilter.ScopeEquals, alertListFilter.IPEquals, alertListFilter.RangeEquals) if err != nil { return err } @@ -253,34 +253,6 @@ func (cli *cliAlerts) list(ctx context.Context, alertListFilter apiclient.Alerts *alertListFilter.Limit = 0 } - if *alertListFilter.TypeEquals == "" { - alertListFilter.TypeEquals = nil - } - - if *alertListFilter.ScopeEquals == "" { - alertListFilter.ScopeEquals = nil - } - - if *alertListFilter.ValueEquals == "" { - alertListFilter.ValueEquals = nil - } - - if *alertListFilter.ScenarioEquals == "" { - alertListFilter.ScenarioEquals = nil - } - - if *alertListFilter.IPEquals == "" { - alertListFilter.IPEquals = nil - } - - if *alertListFilter.RangeEquals == "" { - alertListFilter.RangeEquals = nil - } - - if *alertListFilter.OriginEquals == "" { - alertListFilter.OriginEquals = nil - } - if contained != nil && *contained { alertListFilter.Contains = new(bool) } @@ -299,16 +271,16 @@ func (cli *cliAlerts) list(ctx context.Context, alertListFilter apiclient.Alerts func (cli *cliAlerts) newListCmd() *cobra.Command { alertListFilter := apiclient.AlertsListOpts{ - ScopeEquals: new(string), - ValueEquals: new(string), - ScenarioEquals: new(string), - IPEquals: new(string), - RangeEquals: new(string), + ScopeEquals: "", + ValueEquals: "", + ScenarioEquals: "", + IPEquals: "", + RangeEquals: "", Since: cstime.DurationWithDays(0), Until: cstime.DurationWithDays(0), - TypeEquals: new(string), + TypeEquals: "", IncludeCAPI: new(bool), - OriginEquals: new(string), + OriginEquals: "", } limit := new(int) @@ -338,13 +310,13 @@ cscli alerts list --type ban`, flags.BoolVarP(alertListFilter.IncludeCAPI, "all", "a", false, "Include decisions from Central API") flags.Var(&alertListFilter.Until, "until", "restrict to alerts older than until (ie. 4h, 30d)") flags.Var(&alertListFilter.Since, "since", "restrict to alerts newer than since (ie. 4h, 30d)") - flags.StringVarP(alertListFilter.IPEquals, "ip", "i", "", "restrict to alerts from this source ip (shorthand for --scope ip --value )") - flags.StringVarP(alertListFilter.ScenarioEquals, "scenario", "s", "", "the scenario (ie. crowdsecurity/ssh-bf)") - flags.StringVarP(alertListFilter.RangeEquals, "range", "r", "", "restrict to alerts from this range (shorthand for --scope range --value )") - flags.StringVar(alertListFilter.TypeEquals, "type", "", "restrict to alerts with given decision type (ie. ban, captcha)") - flags.StringVar(alertListFilter.ScopeEquals, "scope", "", "restrict to alerts of this scope (ie. ip,range)") - flags.StringVarP(alertListFilter.ValueEquals, "value", "v", "", "the value to match for in the specified scope") - flags.StringVar(alertListFilter.OriginEquals, "origin", "", fmt.Sprintf("the value to match for the specified origin (%s ...)", strings.Join(types.GetOrigins(), ","))) + flags.StringVarP(&alertListFilter.IPEquals, "ip", "i", "", "restrict to alerts from this source ip (shorthand for --scope ip --value )") + flags.StringVarP(&alertListFilter.ScenarioEquals, "scenario", "s", "", "the scenario (ie. crowdsecurity/ssh-bf)") + flags.StringVarP(&alertListFilter.RangeEquals, "range", "r", "", "restrict to alerts from this range (shorthand for --scope range --value )") + flags.StringVar(&alertListFilter.TypeEquals, "type", "", "restrict to alerts with given decision type (ie. ban, captcha)") + flags.StringVar(&alertListFilter.ScopeEquals, "scope", "", "restrict to alerts of this scope (ie. ip,range)") + flags.StringVarP(&alertListFilter.ValueEquals, "value", "v", "", "the value to match for in the specified scope") + flags.StringVar(&alertListFilter.OriginEquals, "origin", "", fmt.Sprintf("the value to match for the specified origin (%s ...)", strings.Join(types.GetOrigins(), ","))) flags.BoolVar(contained, "contained", false, "query decisions contained by range") flags.BoolVarP(&printMachine, "machine", "m", false, "print machines that sent alerts") flags.IntVarP(limit, "limit", "l", 50, "limit size of alerts list table (0 to view all alerts)") @@ -356,7 +328,7 @@ func (cli *cliAlerts) delete(ctx context.Context, delFilter apiclient.AlertsDele var err error if !deleteAll { - *delFilter.ScopeEquals, err = SanitizeScope(*delFilter.ScopeEquals, *delFilter.IPEquals, *delFilter.RangeEquals) + delFilter.ScopeEquals, err = SanitizeScope(delFilter.ScopeEquals, delFilter.IPEquals, delFilter.RangeEquals) if err != nil { return err } @@ -365,26 +337,6 @@ func (cli *cliAlerts) delete(ctx context.Context, delFilter apiclient.AlertsDele delFilter.ActiveDecisionEquals = activeDecision } - if *delFilter.ScopeEquals == "" { - delFilter.ScopeEquals = nil - } - - if *delFilter.ValueEquals == "" { - delFilter.ValueEquals = nil - } - - if *delFilter.ScenarioEquals == "" { - delFilter.ScenarioEquals = nil - } - - if *delFilter.IPEquals == "" { - delFilter.IPEquals = nil - } - - if *delFilter.RangeEquals == "" { - delFilter.RangeEquals = nil - } - if contained != nil && *contained { delFilter.Contains = new(bool) } @@ -422,11 +374,11 @@ func (cli *cliAlerts) newDeleteCmd() *cobra.Command { ) delFilter := apiclient.AlertsDeleteOpts{ - ScopeEquals: new(string), - ValueEquals: new(string), - ScenarioEquals: new(string), - IPEquals: new(string), - RangeEquals: new(string), + ScopeEquals: "", + ValueEquals: "", + ScenarioEquals: "", + IPEquals: "", + RangeEquals: "", } contained := new(bool) @@ -445,9 +397,9 @@ cscli alerts delete -s crowdsecurity/ssh-bf"`, if deleteAll { return nil } - if *delFilter.ScopeEquals == "" && *delFilter.ValueEquals == "" && - *delFilter.ScenarioEquals == "" && *delFilter.IPEquals == "" && - *delFilter.RangeEquals == "" && delAlertByID == "" { + if delFilter.ScopeEquals == "" && delFilter.ValueEquals == "" && + delFilter.ScenarioEquals == "" && delFilter.IPEquals == "" && + delFilter.RangeEquals == "" && delAlertByID == "" { _ = cmd.Usage() return errors.New("at least one filter or --all must be specified") } @@ -461,11 +413,11 @@ cscli alerts delete -s crowdsecurity/ssh-bf"`, flags := cmd.Flags() flags.SortFlags = false - flags.StringVar(delFilter.ScopeEquals, "scope", "", "the scope (ie. ip,range)") - flags.StringVarP(delFilter.ValueEquals, "value", "v", "", "the value to match for in the specified scope") - flags.StringVarP(delFilter.ScenarioEquals, "scenario", "s", "", "the scenario (ie. crowdsecurity/ssh-bf)") - flags.StringVarP(delFilter.IPEquals, "ip", "i", "", "Source ip (shorthand for --scope ip --value )") - flags.StringVarP(delFilter.RangeEquals, "range", "r", "", "Range source ip (shorthand for --scope range --value )") + flags.StringVar(&delFilter.ScopeEquals, "scope", "", "the scope (ie. ip,range)") + flags.StringVarP(&delFilter.ValueEquals, "value", "v", "", "the value to match for in the specified scope") + flags.StringVarP(&delFilter.ScenarioEquals, "scenario", "s", "", "the scenario (ie. crowdsecurity/ssh-bf)") + flags.StringVarP(&delFilter.IPEquals, "ip", "i", "", "Source ip (shorthand for --scope ip --value )") + flags.StringVarP(&delFilter.RangeEquals, "range", "r", "", "Range source ip (shorthand for --scope range --value )") flags.StringVar(&delAlertByID, "id", "", "alert ID") flags.BoolVarP(&deleteAll, "all", "a", false, "delete all alerts") flags.BoolVar(contained, "contained", false, "query decisions contained by range") @@ -499,14 +451,14 @@ func (cli *cliAlerts) inspect(ctx context.Context, details bool, alertIDs ...str return fmt.Errorf("unable to serialize alert with id %s: %w", alertID, err) } - fmt.Printf("%s\n", string(data)) + fmt.Fprintln(os.Stdout, string(data)) case "raw": data, err := yaml.Marshal(alert) if err != nil { return fmt.Errorf("unable to serialize alert with id %s: %w", alertID, err) } - fmt.Println(string(data)) + fmt.Fprintln(os.Stdout, string(data)) } } @@ -536,7 +488,7 @@ func (cli *cliAlerts) newInspectCmd() *cobra.Command { func (cli *cliAlerts) newFlushCmd() *cobra.Command { var maxItems int - maxAge := cstime.DurationWithDays(7*24*time.Hour) + maxAge := cstime.DurationWithDays(7 * 24 * time.Hour) cmd := &cobra.Command{ Use: `flush`, diff --git a/cmd/crowdsec-cli/cliallowlists/allowlists.go b/cmd/crowdsec-cli/cliallowlists/allowlists.go index bd68d5e53e7..c28ab5c70bd 100644 --- a/cmd/crowdsec-cli/cliallowlists/allowlists.go +++ b/cmd/crowdsec-cli/cliallowlists/allowlists.go @@ -8,6 +8,7 @@ import ( "fmt" "io" "net/url" + "os" "slices" "strconv" "strings" @@ -283,7 +284,7 @@ func (cli *cliAllowLists) create(ctx context.Context, db *database.Client, name return err } - fmt.Printf("allowlist '%s' created successfully\n", name) + fmt.Fprintf(os.Stdout, "allowlist '%s' created successfully\n", name) return nil } @@ -392,7 +393,7 @@ func (cli *cliAllowLists) delete(ctx context.Context, db *database.Client, name return err } - fmt.Printf("allowlist '%s' deleted successfully\n", name) + fmt.Fprintf(os.Stdout, "allowlist '%s' deleted successfully\n", name) return nil } @@ -475,7 +476,7 @@ func (cli *cliAllowLists) add(ctx context.Context, db *database.Client, name str } if len(toAdd) == 0 { - fmt.Println("no new values for allowlist") + fmt.Fprintln(os.Stdout, "no new values for allowlist") return nil } @@ -485,7 +486,7 @@ func (cli *cliAllowLists) add(ctx context.Context, db *database.Client, name str } if added > 0 { - fmt.Printf("added %d values to allowlist %s\n", added, name) + fmt.Fprintf(os.Stdout, "added %d values to allowlist %s\n", added, name) } deleted, err := db.ApplyAllowlistsToExistingDecisions(ctx) @@ -622,7 +623,7 @@ func (cli *cliAllowLists) remove(ctx context.Context, db *database.Client, name } if len(toRemove) == 0 { - fmt.Println("no value to remove from allowlist") + fmt.Fprintln(os.Stdout, "no value to remove from allowlist") return nil } @@ -632,7 +633,7 @@ func (cli *cliAllowLists) remove(ctx context.Context, db *database.Client, name } if deleted > 0 { - fmt.Printf("removed %d values from allowlist %s", deleted, name) + fmt.Fprintf(os.Stdout, "removed %d values from allowlist %s", deleted, name) } return nil diff --git a/cmd/crowdsec-cli/clidecision/decisions.go b/cmd/crowdsec-cli/clidecision/decisions.go index c141f128791..c5a67582d53 100644 --- a/cmd/crowdsec-cli/clidecision/decisions.go +++ b/cmd/crowdsec-cli/clidecision/decisions.go @@ -103,22 +103,22 @@ func (cli *cliDecisions) decisionsToTable(alerts *models.GetAlertsResponse, prin if *alerts == nil { // avoid returning "null" in `json" // could be cleaner if we used slice of alerts directly - fmt.Println("[]") + fmt.Fprintln(os.Stdout, "[]") return nil } x, _ := json.MarshalIndent(alerts, "", " ") - fmt.Printf("%s", string(x)) + fmt.Fprintln(os.Stdout, string(x)) case "human": if len(*alerts) == 0 { - fmt.Println("No active decisions") + fmt.Fprintln(os.Stdout, "No active decisions") return nil } cli.decisionsTable(color.Output, alerts, printMachine) if skipped > 0 { - fmt.Printf("%d duplicated entries skipped\n", skipped) + fmt.Fprintf(os.Stdout, "%d duplicated entries skipped\n", skipped) } } @@ -175,7 +175,7 @@ func (cli *cliDecisions) NewCommand() *cobra.Command { func (cli *cliDecisions) list(ctx context.Context, filter apiclient.AlertsListOpts, noSimu *bool, contained *bool, printMachine bool) error { var err error - *filter.ScopeEquals, err = clialert.SanitizeScope(*filter.ScopeEquals, *filter.IPEquals, *filter.RangeEquals) + filter.ScopeEquals, err = clialert.SanitizeScope(filter.ScopeEquals, filter.IPEquals, filter.RangeEquals) if err != nil { return err } @@ -193,34 +193,6 @@ func (cli *cliDecisions) list(ctx context.Context, filter apiclient.AlertsListOp *filter.Limit = 0 } - if *filter.TypeEquals == "" { - filter.TypeEquals = nil - } - - if *filter.ValueEquals == "" { - filter.ValueEquals = nil - } - - if *filter.ScopeEquals == "" { - filter.ScopeEquals = nil - } - - if *filter.ScenarioEquals == "" { - filter.ScenarioEquals = nil - } - - if *filter.IPEquals == "" { - filter.IPEquals = nil - } - - if *filter.RangeEquals == "" { - filter.RangeEquals = nil - } - - if *filter.OriginEquals == "" { - filter.OriginEquals = nil - } - if contained != nil && *contained { filter.Contains = new(bool) } @@ -240,15 +212,15 @@ func (cli *cliDecisions) list(ctx context.Context, filter apiclient.AlertsListOp func (cli *cliDecisions) newListCmd() *cobra.Command { filter := apiclient.AlertsListOpts{ - ValueEquals: new(string), - ScopeEquals: new(string), - ScenarioEquals: new(string), - OriginEquals: new(string), - IPEquals: new(string), - RangeEquals: new(string), + ValueEquals: "", + ScopeEquals: "", + ScenarioEquals: "", + OriginEquals: "", + IPEquals: "", + RangeEquals: "", Since: cstime.DurationWithDays(0), Until: cstime.DurationWithDays(0), - TypeEquals: new(string), + TypeEquals: "", IncludeCAPI: new(bool), Limit: new(int), } @@ -278,13 +250,13 @@ cscli decisions list --origin lists --scenario list_name flags.BoolVarP(filter.IncludeCAPI, "all", "a", false, "Include decisions from Central API") flags.Var(&filter.Since, "since", "restrict to alerts newer than since (ie. 4h, 30d)") flags.Var(&filter.Until, "until", "restrict to alerts older than until (ie. 4h, 30d)") - flags.StringVarP(filter.TypeEquals, "type", "t", "", "restrict to this decision type (ie. ban,captcha)") - flags.StringVar(filter.ScopeEquals, "scope", "", "restrict to this scope (ie. ip,range,session)") - flags.StringVar(filter.OriginEquals, "origin", "", fmt.Sprintf("the value to match for the specified origin (%s ...)", strings.Join(types.GetOrigins(), ","))) - flags.StringVarP(filter.ValueEquals, "value", "v", "", "restrict to this value (ie. 1.2.3.4,userName)") - flags.StringVarP(filter.ScenarioEquals, "scenario", "s", "", "restrict to this scenario (ie. crowdsecurity/ssh-bf)") - flags.StringVarP(filter.IPEquals, "ip", "i", "", "restrict to alerts from this source ip (shorthand for --scope ip --value )") - flags.StringVarP(filter.RangeEquals, "range", "r", "", "restrict to alerts from this source range (shorthand for --scope range --value )") + flags.StringVarP(&filter.TypeEquals, "type", "t", "", "restrict to this decision type (ie. ban,captcha)") + flags.StringVar(&filter.ScopeEquals, "scope", "", "restrict to this scope (ie. ip,range,session)") + flags.StringVar(&filter.OriginEquals, "origin", "", fmt.Sprintf("the value to match for the specified origin (%s ...)", strings.Join(types.GetOrigins(), ","))) + flags.StringVarP(&filter.ValueEquals, "value", "v", "", "restrict to this value (ie. 1.2.3.4,userName)") + flags.StringVarP(&filter.ScenarioEquals, "scenario", "s", "", "restrict to this scenario (ie. crowdsecurity/ssh-bf)") + flags.StringVarP(&filter.IPEquals, "ip", "i", "", "restrict to alerts from this source ip (shorthand for --scope ip --value )") + flags.StringVarP(&filter.RangeEquals, "range", "r", "", "restrict to alerts from this source range (shorthand for --scope range --value )") flags.IntVarP(filter.Limit, "limit", "l", 100, "number of alerts to get (use 0 to remove the limit)") flags.BoolVar(NoSimu, "no-simu", false, "exclude decisions in simulation mode") flags.BoolVarP(&printMachine, "machine", "m", false, "print machines that triggered decisions") @@ -428,39 +400,11 @@ func (cli *cliDecisions) delete(ctx context.Context, delFilter apiclient.Decisio var err error /*take care of shorthand options*/ - *delFilter.ScopeEquals, err = clialert.SanitizeScope(*delFilter.ScopeEquals, *delFilter.IPEquals, *delFilter.RangeEquals) + delFilter.ScopeEquals, err = clialert.SanitizeScope(delFilter.ScopeEquals, delFilter.IPEquals, delFilter.RangeEquals) if err != nil { return err } - if *delFilter.ScopeEquals == "" { - delFilter.ScopeEquals = nil - } - - if *delFilter.OriginEquals == "" { - delFilter.OriginEquals = nil - } - - if *delFilter.ValueEquals == "" { - delFilter.ValueEquals = nil - } - - if *delFilter.ScenarioEquals == "" { - delFilter.ScenarioEquals = nil - } - - if *delFilter.TypeEquals == "" { - delFilter.TypeEquals = nil - } - - if *delFilter.IPEquals == "" { - delFilter.IPEquals = nil - } - - if *delFilter.RangeEquals == "" { - delFilter.RangeEquals = nil - } - if contained != nil && *contained { delFilter.Contains = new(bool) } @@ -490,13 +434,13 @@ func (cli *cliDecisions) delete(ctx context.Context, delFilter apiclient.Decisio func (cli *cliDecisions) newDeleteCmd() *cobra.Command { delFilter := apiclient.DecisionsDeleteOpts{ - ScopeEquals: new(string), - ValueEquals: new(string), - TypeEquals: new(string), - IPEquals: new(string), - RangeEquals: new(string), - ScenarioEquals: new(string), - OriginEquals: new(string), + ScopeEquals: "", + ValueEquals: "", + TypeEquals: "", + IPEquals: "", + RangeEquals: "", + ScenarioEquals: "", + OriginEquals: "", } var delDecisionID string @@ -522,10 +466,10 @@ cscli decisions delete --origin lists --scenario list_name if delDecisionAll { return nil } - if *delFilter.ScopeEquals == "" && *delFilter.ValueEquals == "" && - *delFilter.TypeEquals == "" && *delFilter.IPEquals == "" && - *delFilter.RangeEquals == "" && *delFilter.ScenarioEquals == "" && - *delFilter.OriginEquals == "" && delDecisionID == "" { + if delFilter.ScopeEquals == "" && delFilter.ValueEquals == "" && + delFilter.TypeEquals == "" && delFilter.IPEquals == "" && + delFilter.RangeEquals == "" && delFilter.ScenarioEquals == "" && + delFilter.OriginEquals == "" && delDecisionID == "" { _ = cmd.Usage() return errors.New("at least one filter or --all must be specified") } @@ -539,12 +483,12 @@ cscli decisions delete --origin lists --scenario list_name flags := cmd.Flags() flags.SortFlags = false - flags.StringVarP(delFilter.IPEquals, "ip", "i", "", "Source ip (shorthand for --scope ip --value )") - flags.StringVarP(delFilter.RangeEquals, "range", "r", "", "Range source ip (shorthand for --scope range --value )") - flags.StringVarP(delFilter.TypeEquals, "type", "t", "", "the decision type (ie. ban,captcha)") - flags.StringVarP(delFilter.ValueEquals, "value", "v", "", "the value to match for in the specified scope") - flags.StringVarP(delFilter.ScenarioEquals, "scenario", "s", "", "the scenario name (ie. crowdsecurity/ssh-bf)") - flags.StringVar(delFilter.OriginEquals, "origin", "", fmt.Sprintf("the value to match for the specified origin (%s ...)", strings.Join(types.GetOrigins(), ","))) + flags.StringVarP(&delFilter.IPEquals, "ip", "i", "", "Source ip (shorthand for --scope ip --value )") + flags.StringVarP(&delFilter.RangeEquals, "range", "r", "", "Range source ip (shorthand for --scope range --value )") + flags.StringVarP(&delFilter.TypeEquals, "type", "t", "", "the decision type (ie. ban,captcha)") + flags.StringVarP(&delFilter.ValueEquals, "value", "v", "", "the value to match for in the specified scope") + flags.StringVarP(&delFilter.ScenarioEquals, "scenario", "s", "", "the scenario name (ie. crowdsecurity/ssh-bf)") + flags.StringVar(&delFilter.OriginEquals, "origin", "", fmt.Sprintf("the value to match for the specified origin (%s ...)", strings.Join(types.GetOrigins(), ","))) flags.StringVar(&delDecisionID, "id", "", "decision id") flags.BoolVar(&delDecisionAll, "all", false, "delete all decisions") diff --git a/pkg/apiclient/alerts_service.go b/pkg/apiclient/alerts_service.go index b44049af406..4d30e5976ae 100644 --- a/pkg/apiclient/alerts_service.go +++ b/pkg/apiclient/alerts_service.go @@ -15,14 +15,14 @@ import ( type AlertsService service type AlertsListOpts struct { - ScopeEquals *string `url:"scope,omitempty"` - ValueEquals *string `url:"value,omitempty"` - ScenarioEquals *string `url:"scenario,omitempty"` - IPEquals *string `url:"ip,omitempty"` - RangeEquals *string `url:"range,omitempty"` - OriginEquals *string `url:"origin,omitempty"` + ScopeEquals string `url:"scope,omitempty"` + ValueEquals string `url:"value,omitempty"` + ScenarioEquals string `url:"scenario,omitempty"` + IPEquals string `url:"ip,omitempty"` + RangeEquals string `url:"range,omitempty"` + OriginEquals string `url:"origin,omitempty"` Since cstime.DurationWithDays `url:"since,omitempty"` - TypeEquals *string `url:"decision_type,omitempty"` + TypeEquals string `url:"decision_type,omitempty"` Until cstime.DurationWithDays `url:"until,omitempty"` IncludeSimulated *bool `url:"simulated,omitempty"` ActiveDecisionEquals *bool `url:"has_active_decision,omitempty"` @@ -33,16 +33,16 @@ type AlertsListOpts struct { } type AlertsDeleteOpts struct { - ScopeEquals *string `url:"scope,omitempty"` - ValueEquals *string `url:"value,omitempty"` - ScenarioEquals *string `url:"scenario,omitempty"` - IPEquals *string `url:"ip,omitempty"` - RangeEquals *string `url:"range,omitempty"` + ScopeEquals string `url:"scope,omitempty"` + ValueEquals string `url:"value,omitempty"` + ScenarioEquals string `url:"scenario,omitempty"` + IPEquals string `url:"ip,omitempty"` + RangeEquals string `url:"range,omitempty"` Since cstime.DurationWithDays `url:"since,omitempty"` Until cstime.DurationWithDays `url:"until,omitempty"` - OriginEquals *string `url:"origin,omitempty"` + OriginEquals string `url:"origin,omitempty"` ActiveDecisionEquals *bool `url:"has_active_decision,omitempty"` - SourceEquals *string `url:"alert_source,omitempty"` + SourceEquals string `url:"alert_source,omitempty"` Contains *bool `url:"contains,omitempty"` Limit *int `url:"limit,omitempty"` ListOpts diff --git a/pkg/apiclient/alerts_service_test.go b/pkg/apiclient/alerts_service_test.go index 24b66937f20..eed1598c83a 100644 --- a/pkg/apiclient/alerts_service_test.go +++ b/pkg/apiclient/alerts_service_test.go @@ -18,6 +18,7 @@ import ( func TestAlertsListAsMachine(t *testing.T) { ctx := t.Context() + log.SetLevel(log.DebugLevel) mux, urlx, teardown := setup() @@ -189,7 +190,7 @@ func TestAlertsListAsMachine(t *testing.T) { assert.Equal(t, expected, *alerts) // this one doesn't - filter := AlertsListOpts{IPEquals: ptr.Of("1.2.3.4")} + filter := AlertsListOpts{IPEquals: "1.2.3.4"} alerts, resp, err = client.Alerts.List(ctx, filter) require.NoError(t, err) @@ -199,6 +200,7 @@ func TestAlertsListAsMachine(t *testing.T) { func TestAlertsGetAsMachine(t *testing.T) { ctx := t.Context() + log.SetLevel(log.DebugLevel) mux, urlx, teardown := setup() @@ -367,6 +369,7 @@ func TestAlertsGetAsMachine(t *testing.T) { func TestAlertsCreateAsMachine(t *testing.T) { ctx := t.Context() + log.SetLevel(log.DebugLevel) mux, urlx, teardown := setup() @@ -410,6 +413,7 @@ func TestAlertsCreateAsMachine(t *testing.T) { func TestAlertsDeleteAsMachine(t *testing.T) { ctx := t.Context() + log.SetLevel(log.DebugLevel) mux, urlx, teardown := setup() @@ -442,7 +446,7 @@ func TestAlertsDeleteAsMachine(t *testing.T) { defer teardown() - alert := AlertsDeleteOpts{IPEquals: ptr.Of("1.2.3.4")} + alert := AlertsDeleteOpts{IPEquals: "1.2.3.4"} alerts, resp, err := client.Alerts.Delete(ctx, alert) require.NoError(t, err) diff --git a/pkg/apiclient/auth_key_test.go b/pkg/apiclient/auth_key_test.go index aa92e03bbae..1e5f95accbc 100644 --- a/pkg/apiclient/auth_key_test.go +++ b/pkg/apiclient/auth_key_test.go @@ -10,11 +10,11 @@ import ( "github.com/stretchr/testify/require" "github.com/crowdsecurity/go-cs-lib/cstest" - "github.com/crowdsecurity/go-cs-lib/ptr" ) func TestApiAuth(t *testing.T) { ctx := t.Context() + log.SetLevel(log.TraceLevel) mux, urlx, teardown := setup() @@ -40,7 +40,7 @@ func TestApiAuth(t *testing.T) { defer teardown() - //ok no answer + // ok no answer auth := &APIKeyTransport{ APIKey: "ixu", } @@ -48,12 +48,12 @@ func TestApiAuth(t *testing.T) { newcli, err := NewDefaultClient(apiURL, "v1", "toto", auth.Client()) require.NoError(t, err) - alert := DecisionsListOpts{IPEquals: ptr.Of("1.2.3.4")} + alert := DecisionsListOpts{IPEquals: "1.2.3.4"} _, resp, err := newcli.Decisions.List(ctx, alert) require.NoError(t, err) assert.Equal(t, http.StatusOK, resp.Response.StatusCode) - //ko bad token + // ko bad token auth = &APIKeyTransport{ APIKey: "bad", } @@ -69,7 +69,7 @@ func TestApiAuth(t *testing.T) { cstest.RequireErrorMessage(t, err, "API error: access forbidden") - //ko empty token + // ko empty token auth = &APIKeyTransport{} newcli, err = NewDefaultClient(apiURL, "v1", "toto", auth.Client()) diff --git a/pkg/apiclient/decisions_service.go b/pkg/apiclient/decisions_service.go index a1810e831ec..c222e2ddbb8 100644 --- a/pkg/apiclient/decisions_service.go +++ b/pkg/apiclient/decisions_service.go @@ -20,12 +20,12 @@ import ( type DecisionsService service type DecisionsListOpts struct { - ScopeEquals *string `url:"scope,omitempty"` - ValueEquals *string `url:"value,omitempty"` - TypeEquals *string `url:"type,omitempty"` - IPEquals *string `url:"ip,omitempty"` - RangeEquals *string `url:"range,omitempty"` - Contains *bool `url:"contains,omitempty"` + ScopeEquals string `url:"scope,omitempty"` + ValueEquals string `url:"value,omitempty"` + TypeEquals string `url:"type,omitempty"` + IPEquals string `url:"ip,omitempty"` + RangeEquals string `url:"range,omitempty"` + Contains *bool `url:"contains,omitempty"` ListOpts } @@ -60,15 +60,15 @@ func (o *DecisionsStreamOpts) addQueryParamsToURL(url string) (string, error) { } type DecisionsDeleteOpts struct { - ScopeEquals *string `url:"scope,omitempty"` - ValueEquals *string `url:"value,omitempty"` - TypeEquals *string `url:"type,omitempty"` - IPEquals *string `url:"ip,omitempty"` - RangeEquals *string `url:"range,omitempty"` - Contains *bool `url:"contains,omitempty"` - OriginEquals *string `url:"origin,omitempty"` + ScopeEquals string `url:"scope,omitempty"` + ValueEquals string `url:"value,omitempty"` + TypeEquals string `url:"type,omitempty"` + IPEquals string `url:"ip,omitempty"` + RangeEquals string `url:"range,omitempty"` + Contains *bool `url:"contains,omitempty"` + OriginEquals string `url:"origin,omitempty"` // - ScenarioEquals *string `url:"scenario,omitempty"` + ScenarioEquals string `url:"scenario,omitempty"` ListOpts } diff --git a/pkg/apiclient/decisions_service_test.go b/pkg/apiclient/decisions_service_test.go index c16abed6448..c9e555e9293 100644 --- a/pkg/apiclient/decisions_service_test.go +++ b/pkg/apiclient/decisions_service_test.go @@ -19,6 +19,7 @@ import ( func TestDecisionsList(t *testing.T) { ctx := t.Context() + log.SetLevel(log.DebugLevel) mux, urlx, teardown := setup() @@ -64,15 +65,13 @@ func TestDecisionsList(t *testing.T) { } // OK decisions - decisionsFilter := DecisionsListOpts{IPEquals: ptr.Of("1.2.3.4")} - decisions, resp, err := newcli.Decisions.List(ctx, decisionsFilter) + decisions, resp, err := newcli.Decisions.List(ctx, DecisionsListOpts{IPEquals: "1.2.3.4"}) require.NoError(t, err) assert.Equal(t, http.StatusOK, resp.Response.StatusCode) assert.Equal(t, *expected, *decisions) // Empty return - decisionsFilter = DecisionsListOpts{IPEquals: ptr.Of("1.2.3.5")} - decisions, resp, err = newcli.Decisions.List(ctx, decisionsFilter) + decisions, resp, err = newcli.Decisions.List(ctx, DecisionsListOpts{IPEquals: "1.2.3.5"}) require.NoError(t, err) assert.Equal(t, http.StatusOK, resp.Response.StatusCode) assert.Empty(t, *decisions) @@ -80,6 +79,7 @@ func TestDecisionsList(t *testing.T) { func TestDecisionsStream(t *testing.T) { ctx := t.Context() + log.SetLevel(log.DebugLevel) mux, urlx, teardown := setup() @@ -156,6 +156,7 @@ func TestDecisionsStream(t *testing.T) { func TestDecisionsStreamV3Compatibility(t *testing.T) { ctx := t.Context() + log.SetLevel(log.DebugLevel) mux, urlx, teardown := setupWithPrefix("v3") @@ -224,6 +225,7 @@ func TestDecisionsStreamV3Compatibility(t *testing.T) { func TestDecisionsStreamV3(t *testing.T) { ctx := t.Context() + log.SetLevel(log.DebugLevel) mux, urlx, teardown := setupWithPrefix("v3") @@ -297,6 +299,7 @@ func TestDecisionsStreamV3(t *testing.T) { func TestDecisionsFromBlocklist(t *testing.T) { ctx := t.Context() + log.SetLevel(log.DebugLevel) mux, urlx, teardown := setupWithPrefix("v3") @@ -429,10 +432,7 @@ func TestDeleteDecisions(t *testing.T) { }) require.NoError(t, err) - filters := DecisionsDeleteOpts{IPEquals: new(string)} - *filters.IPEquals = "1.2.3.4" - - deleted, _, err := client.Decisions.Delete(ctx, filters) + deleted, _, err := client.Decisions.Delete(ctx, DecisionsDeleteOpts{IPEquals: "1.2.3.4"}) require.NoError(t, err) assert.Equal(t, "1", deleted.NbDeleted) } From 15dcbdeec91846852d5e9cd8994cbc7caf53c147 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Tue, 6 May 2025 16:10:18 +0200 Subject: [PATCH 516/581] refact: remove unused metod DeleteDecisionsWithFilter() (#3605) --- pkg/apiserver/apic.go | 10 +-- pkg/apiserver/controllers/v1/decisions.go | 2 +- pkg/database/decisions.go | 95 ++++------------------- 3 files changed, 21 insertions(+), 86 deletions(-) diff --git a/pkg/apiserver/apic.go b/pkg/apiserver/apic.go index 3295d8dde93..07d2f9fcba5 100644 --- a/pkg/apiserver/apic.go +++ b/pkg/apiserver/apic.go @@ -11,7 +11,6 @@ import ( "net/http" "net/url" "slices" - "strconv" "strings" "sync" "time" @@ -439,16 +438,11 @@ func (a *apic) HandleDeletedDecisionsV3(ctx context.Context, deletedDecisions [] filter["scopes"] = []string{*scope} } - dbCliRet, _, err := a.dbClient.ExpireDecisionsWithFilter(ctx, filter) + dbCliDel, _, err := a.dbClient.ExpireDecisionsWithFilter(ctx, filter) if err != nil { return 0, fmt.Errorf("expiring decisions error: %w", err) } - dbCliDel, err := strconv.Atoi(dbCliRet) - if err != nil { - return 0, fmt.Errorf("converting db ret %d: %w", dbCliDel, err) - } - updateCounterForDecision(deleteCounters, ptr.Of(types.CAPIOrigin), nil, dbCliDel) nbDeleted += dbCliDel } @@ -660,6 +654,7 @@ func (a *apic) PullTop(ctx context.Context, forcePull bool) error { if data.Links != nil { if len(data.Links.Allowlists) > 0 { hasPulledAllowlists = true + if err := a.UpdateAllowlists(ctx, data.Links.Allowlists, forcePull); err != nil { log.Errorf("could not update allowlists from CAPI: %s", err) } @@ -702,6 +697,7 @@ func (a *apic) PullTop(ctx context.Context, forcePull bool) error { if err != nil { log.Errorf("could not apply allowlists to existing decisions: %s", err) } + if deleted > 0 { log.Infof("deleted %d decisions from allowlists", deleted) } diff --git a/pkg/apiserver/controllers/v1/decisions.go b/pkg/apiserver/controllers/v1/decisions.go index 6a316d8a2e4..86dd9845071 100644 --- a/pkg/apiserver/controllers/v1/decisions.go +++ b/pkg/apiserver/controllers/v1/decisions.go @@ -134,7 +134,7 @@ func (c *Controller) DeleteDecisions(gctx *gin.Context) { } deleteDecisionResp := models.DeleteDecisionResponse{ - NbDeleted: nbDeleted, + NbDeleted: strconv.Itoa(nbDeleted), } gctx.JSON(http.StatusOK, deleteDecisionResp) diff --git a/pkg/database/decisions.go b/pkg/database/decisions.go index 94b8a54b792..52d0e341c48 100644 --- a/pkg/database/decisions.go +++ b/pkg/database/decisions.go @@ -29,9 +29,9 @@ type DecisionsByScenario struct { func BuildDecisionRequestWithFilter(query *ent.DecisionQuery, filter map[string][]string) (*ent.DecisionQuery, error) { var ( - err error + err error start_ip, start_sfx, end_ip, end_sfx int64 - ip_sz int + ip_sz int ) contains := true @@ -100,18 +100,21 @@ func BuildDecisionRequestWithFilter(query *ent.DecisionQuery, filter map[string] if err != nil { return nil, errors.Wrapf(InvalidFilter, "invalid limit value : %s", err) } + query = query.Limit(limit) case "offset": offset, err := strconv.Atoi(value[0]) if err != nil { return nil, errors.Wrapf(InvalidFilter, "invalid offset value : %s", err) } + query = query.Offset(offset) case "id_gt": id, err := strconv.Atoi(value[0]) if err != nil { return nil, errors.Wrapf(InvalidFilter, "invalid id_gt value : %s", err) } + query = query.Where(decision.IDGT(id)) } } @@ -201,7 +204,7 @@ func (c *Client) QueryDecisionCountByScenario(ctx context.Context) ([]*Decisions func (c *Client) QueryDecisionWithFilter(ctx context.Context, filter map[string][]string) ([]*ent.Decision, error) { var ( - err error + err error data []*ent.Decision ) @@ -322,70 +325,12 @@ func (c *Client) QueryNewDecisionsSinceWithFilters(ctx context.Context, since *t return data, nil } -func (c *Client) DeleteDecisionsWithFilter(ctx context.Context, filter map[string][]string) (string, []*ent.Decision, error) { - var ( - err error - start_ip, start_sfx, end_ip, end_sfx int64 - ip_sz int - ) - - contains := true - /*if contains is true, return bans that *contains* the given value (value is the inner) - else, return bans that are *contained* by the given value (value is the outer) */ - - decisions := c.Ent.Decision.Query() - - for param, value := range filter { - switch param { - case "contains": - contains, err = strconv.ParseBool(value[0]) - if err != nil { - return "0", nil, errors.Wrapf(InvalidFilter, "invalid contains value : %s", err) - } - case "scope": - decisions = decisions.Where(decision.ScopeEQ(value[0])) - case "value": - decisions = decisions.Where(decision.ValueEQ(value[0])) - case "type": - decisions = decisions.Where(decision.TypeEQ(value[0])) - case "ip", "range": - ip_sz, start_ip, start_sfx, end_ip, end_sfx, err = types.Addr2Ints(value[0]) - if err != nil { - return "0", nil, errors.Wrapf(InvalidIPOrRange, "unable to convert '%s' to int: %s", value[0], err) - } - case "scenario": - decisions = decisions.Where(decision.ScenarioEQ(value[0])) - default: - return "0", nil, errors.Wrap(InvalidFilter, fmt.Sprintf("'%s' doesn't exist", param)) - } - } - - decisions, err = decisionIPFilter(decisions, contains, ip_sz, start_ip, start_sfx, end_ip, end_sfx) - if err != nil { - return "0", nil, err - } - - toDelete, err := decisions.All(ctx) - if err != nil { - c.Log.Warningf("DeleteDecisionsWithFilter : %s", err) - return "0", nil, errors.Wrap(DeleteFail, "decisions with provided filter") - } - - count, err := c.DeleteDecisions(ctx, toDelete) - if err != nil { - c.Log.Warningf("While deleting decisions : %s", err) - return "0", nil, errors.Wrap(DeleteFail, "decisions with provided filter") - } - - return strconv.Itoa(count), toDelete, nil -} - // ExpireDecisionsWithFilter updates the expiration time to now() for the decisions matching the filter, and returns the updated items -func (c *Client) ExpireDecisionsWithFilter(ctx context.Context, filter map[string][]string) (string, []*ent.Decision, error) { +func (c *Client) ExpireDecisionsWithFilter(ctx context.Context, filter map[string][]string) (int, []*ent.Decision, error) { var ( - err error + err error start_ip, start_sfx, end_ip, end_sfx int64 - ip_sz int + ip_sz int ) contains := true @@ -398,7 +343,7 @@ func (c *Client) ExpireDecisionsWithFilter(ctx context.Context, filter map[strin case "contains": contains, err = strconv.ParseBool(value[0]) if err != nil { - return "0", nil, errors.Wrapf(InvalidFilter, "invalid contains value : %s", err) + return 0, nil, errors.Wrapf(InvalidFilter, "invalid contains value : %s", err) } case "scopes": decisions = decisions.Where(decision.ScopeEQ(value[0])) @@ -413,32 +358,32 @@ func (c *Client) ExpireDecisionsWithFilter(ctx context.Context, filter map[strin case "ip", "range": ip_sz, start_ip, start_sfx, end_ip, end_sfx, err = types.Addr2Ints(value[0]) if err != nil { - return "0", nil, errors.Wrapf(InvalidIPOrRange, "unable to convert '%s' to int: %s", value[0], err) + return 0, nil, errors.Wrapf(InvalidIPOrRange, "unable to convert '%s' to int: %s", value[0], err) } case "scenario": decisions = decisions.Where(decision.ScenarioEQ(value[0])) default: - return "0", nil, errors.Wrapf(InvalidFilter, "'%s' doesn't exist", param) + return 0, nil, errors.Wrapf(InvalidFilter, "'%s' doesn't exist", param) } } decisions, err = decisionIPFilter(decisions, contains, ip_sz, start_ip, start_sfx, end_ip, end_sfx) if err != nil { - return "0", nil, err + return 0, nil, err } DecisionsToDelete, err := decisions.All(ctx) if err != nil { c.Log.Warningf("ExpireDecisionsWithFilter : %s", err) - return "0", nil, errors.Wrap(DeleteFail, "expire decisions with provided filter") + return 0, nil, errors.Wrap(DeleteFail, "expire decisions with provided filter") } count, err := c.ExpireDecisions(ctx, DecisionsToDelete) if err != nil { - return "0", nil, errors.Wrapf(DeleteFail, "expire decisions with provided filter : %s", err) + return 0, nil, errors.Wrapf(DeleteFail, "expire decisions with provided filter : %s", err) } - return strconv.Itoa(count), DecisionsToDelete, err + return count, DecisionsToDelete, err } func decisionIDs(decisions []*ent.Decision) []int { @@ -564,13 +509,7 @@ func (c *Client) CountDecisionsByValue(ctx context.Context, value string, since } func (c *Client) GetActiveDecisionsTimeLeftByValue(ctx context.Context, decisionValue string) (time.Duration, error) { - var ( - err error - start_ip, start_sfx, end_ip, end_sfx int64 - ip_sz int - ) - - ip_sz, start_ip, start_sfx, end_ip, end_sfx, err = types.Addr2Ints(decisionValue) + ip_sz, start_ip, start_sfx, end_ip, end_sfx, err := types.Addr2Ints(decisionValue) if err != nil { return 0, fmt.Errorf("unable to convert '%s' to int: %w", decisionValue, err) } From 3b9130469cb54299e6a3949df55123bd2538ddf7 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Tue, 6 May 2025 16:11:24 +0200 Subject: [PATCH 517/581] refact pkg/parser: extract method, avoid calling defer in loop (#3564) --- pkg/parser/stage.go | 181 +++++++++++++++++++++++++------------------- 1 file changed, 105 insertions(+), 76 deletions(-) diff --git a/pkg/parser/stage.go b/pkg/parser/stage.go index ddc07ca7f1d..22be68fdb68 100644 --- a/pkg/parser/stage.go +++ b/pkg/parser/stage.go @@ -37,103 +37,132 @@ type Stagefile struct { } func LoadStages(stageFiles []Stagefile, pctx *UnixParserCtx, ectx EnricherCtx) ([]Node, error) { - var nodes []Node - tmpstages := make(map[string]bool) + var allNodes []Node + + tmpStages := make(map[string]bool) pctx.Stages = []string{} - for _, stageFile := range stageFiles { - if !strings.HasSuffix(stageFile.Filename, ".yaml") && !strings.HasSuffix(stageFile.Filename, ".yml") { - log.Warningf("skip non yaml : %s", stageFile.Filename) + for _, sf := range stageFiles { + nodes, err := processStageFile(sf, pctx, ectx) + if err != nil { + return nil, err + } + + for _, n := range nodes { //nolint:gocritic // rangeValCopy + allNodes = append(allNodes, n) + tmpStages[n.Stage] = true + } + } + + for k := range tmpStages { + pctx.Stages = append(pctx.Stages, k) + } + + sort.Strings(pctx.Stages) + log.Infof("Loaded %d nodes from %d stages", len(allNodes), len(pctx.Stages)) + + return allNodes, nil +} + +func processStageFile(stageFile Stagefile, pctx *UnixParserCtx, ectx EnricherCtx) ([]Node, error) { + if !strings.HasSuffix(stageFile.Filename, ".yaml") && !strings.HasSuffix(stageFile.Filename, ".yml") { + log.Warningf("skip non yaml : %s", stageFile.Filename) + return nil, nil + } + + log.Debugf("loading parser file '%s'", stageFile) + + st, err := os.Stat(stageFile.Filename) + if err != nil { + return nil, fmt.Errorf("failed to stat %s : %v", stageFile, err) + } + + if st.IsDir() { + return nil, nil + } + + yamlFile, err := os.Open(stageFile.Filename) + if err != nil { + return nil, fmt.Errorf("can't access parsing configuration file %s : %s", stageFile.Filename, err) + } + defer yamlFile.Close() + // process the yaml + dec := yaml.NewDecoder(yamlFile) + dec.SetStrict(true) + + var nodes []Node + + nodesCount := 0 + + for { + node := Node{} + node.OnSuccess = "continue" // default behavior is to continue + + if err = dec.Decode(&node); err != nil { + if errors.Is(err, io.EOF) { + log.Tracef("End of yaml file") + break + } + + return nil, fmt.Errorf("error decoding parsing configuration file '%s': %v", stageFile.Filename, err) + } + + // check for empty bucket + if node.Name == "" && node.Description == "" && node.Author == "" { + log.Infof("Node in %s has no name, author or description. Skipping.", stageFile.Filename) continue } - log.Debugf("loading parser file '%s'", stageFile) - st, err := os.Stat(stageFile.Filename) + + // check compat + if node.FormatVersion == "" { + log.Tracef("no version in %s, assuming '1.0'", node.Name) + node.FormatVersion = "1.0" + } + + ok, err := constraint.Satisfies(node.FormatVersion, constraint.Parser) if err != nil { - return nil, fmt.Errorf("failed to stat %s : %v", stageFile, err) + return nil, fmt.Errorf("failed to check version : %s", err) } - if st.IsDir() { + + if !ok { + log.Errorf("%s : %s doesn't satisfy parser format %s, skip", node.Name, node.FormatVersion, constraint.Parser) continue } - yamlFile, err := os.Open(stageFile.Filename) + + node.Stage = stageFile.Stage + // compile the node : grok pattern and expression + + err = node.compile(pctx, ectx) if err != nil { - return nil, fmt.Errorf("can't access parsing configuration file %s : %s", stageFile.Filename, err) - } - defer yamlFile.Close() - //process the yaml - dec := yaml.NewDecoder(yamlFile) - dec.SetStrict(true) - nodesCount := 0 - for { - node := Node{} - node.OnSuccess = "continue" //default behavior is to continue - err = dec.Decode(&node) - if err != nil { - if errors.Is(err, io.EOF) { - log.Tracef("End of yaml file") - break - } - return nil, fmt.Errorf("error decoding parsing configuration file '%s': %v", stageFile.Filename, err) + if node.Name != "" { + return nil, fmt.Errorf("failed to compile node '%s' in '%s' : %s", node.Name, stageFile.Filename, err) } - //check for empty bucket - if node.Name == "" && node.Description == "" && node.Author == "" { - log.Infof("Node in %s has no name, author or description. Skipping.", stageFile.Filename) - continue - } - //check compat - if node.FormatVersion == "" { - log.Tracef("no version in %s, assuming '1.0'", node.Name) - node.FormatVersion = "1.0" - } - ok, err := constraint.Satisfies(node.FormatVersion, constraint.Parser) - if err != nil { - return nil, fmt.Errorf("failed to check version : %s", err) - } - if !ok { - log.Errorf("%s : %s doesn't satisfy parser format %s, skip", node.Name, node.FormatVersion, constraint.Parser) - continue - } + return nil, fmt.Errorf("failed to compile node in '%s' : %s", stageFile.Filename, err) + } + /* if the stage is empty, the node is empty, it's a trailing entry in users yaml file */ + if node.Stage == "" { + continue + } - node.Stage = stageFile.Stage - if _, ok := tmpstages[stageFile.Stage]; !ok { - tmpstages[stageFile.Stage] = true - } - //compile the node : grok pattern and expression - err = node.compile(pctx, ectx) + for _, data := range node.Data { + err = exprhelpers.FileInit(pctx.DataFolder, data.DestPath, data.Type) if err != nil { - if node.Name != "" { - return nil, fmt.Errorf("failed to compile node '%s' in '%s' : %s", node.Name, stageFile.Filename, err) - } - return nil, fmt.Errorf("failed to compile node in '%s' : %s", stageFile.Filename, err) - } - /* if the stage is empty, the node is empty, it's a trailing entry in users yaml file */ - if node.Stage == "" { - continue + log.Error(err.Error()) } - for _, data := range node.Data { - err = exprhelpers.FileInit(pctx.DataFolder, data.DestPath, data.Type) - if err != nil { + if data.Type == "regexp" { // cache only makes sense for regexp + if err = exprhelpers.RegexpCacheInit(data.DestPath, *data); err != nil { log.Error(err.Error()) } - if data.Type == "regexp" { //cache only makes sense for regexp - if err = exprhelpers.RegexpCacheInit(data.DestPath, *data); err != nil { - log.Error(err.Error()) - } - } } - - nodes = append(nodes, node) - nodesCount++ } - log.WithFields(log.Fields{"file": stageFile.Filename, "stage": stageFile.Stage}).Infof("Loaded %d parser nodes", nodesCount) - } - for k := range tmpstages { - pctx.Stages = append(pctx.Stages, k) + nodes = append(nodes, node) + nodesCount++ } - sort.Strings(pctx.Stages) - log.Infof("Loaded %d nodes from %d stages", len(nodes), len(pctx.Stages)) + + log.WithFields(log.Fields{"file": stageFile.Filename, "stage": stageFile.Stage}).Infof("Loaded %d parser nodes", nodesCount) return nodes, nil } From e6b85b641cfa924b1b163f46b1a84d4572d8de20 Mon Sep 17 00:00:00 2001 From: Manuel Sabban Date: Tue, 6 May 2025 16:23:56 +0200 Subject: [PATCH 518/581] feat(apic): add ApicAuth client and token re-authentication logic (#3522) --- pkg/apiserver/apic.go | 99 ++++++++++++++++++++++++++---- pkg/database/ent/migrate/schema.go | 2 +- pkg/database/ent/schema/config.go | 6 +- 3 files changed, 92 insertions(+), 15 deletions(-) diff --git a/pkg/apiserver/apic.go b/pkg/apiserver/apic.go index 07d2f9fcba5..8e92dc67487 100644 --- a/pkg/apiserver/apic.go +++ b/pkg/apiserver/apic.go @@ -17,6 +17,7 @@ import ( "github.com/davecgh/go-spew/spew" "github.com/go-openapi/strfmt" + "github.com/golang-jwt/jwt/v4" log "github.com/sirupsen/logrus" "gopkg.in/tomb.v2" @@ -213,8 +214,6 @@ func NewAPIC(ctx context.Context, config *csconfig.OnlineApiClientCfg, dbClient shareSignals: *config.Sharing, } - password := strfmt.Password(config.Credentials.Password) - apiURL, err := url.Parse(config.Credentials.URL) if err != nil { return nil, fmt.Errorf("while parsing '%s': %w", config.Credentials.URL, err) @@ -232,7 +231,7 @@ func NewAPIC(ctx context.Context, config *csconfig.OnlineApiClientCfg, dbClient ret.apiClient, err = apiclient.NewClient(&apiclient.Config{ MachineID: config.Credentials.Login, - Password: password, + Password: strfmt.Password(config.Credentials.Password), URL: apiURL, PapiURL: papiURL, VersionPrefix: "v3", @@ -243,29 +242,103 @@ func NewAPIC(ctx context.Context, config *csconfig.OnlineApiClientCfg, dbClient return nil, fmt.Errorf("while creating api client: %w", err) } - // The watcher will be authenticated by the RoundTripper the first time it will call CAPI - // Explicit authentication will provoke a useless supplementary call to CAPI - scenarios, err := ret.FetchScenariosListFromDB(ctx) + err = ret.Authenticate(ctx, config) + return ret, err +} + +// loadAPICToken attempts to retrieve and validate a JWT token from the local database. +// It returns the token string, its expiration time, and a boolean indicating whether the token is valid. +// +// A token is considered valid if: +// - it exists in the database, +// - it is a properly formatted JWT with an "exp" claim, +// - it is not expired or near expiry. +func loadAPICToken(ctx context.Context, db *database.Client) (string, time.Time, bool) { + token, err := db.GetConfigItem(ctx, "apic_token") + if err != nil { + log.Debugf("error fetching token from DB: %s", err) + return "", time.Time{}, false + } + + if token == nil { + log.Debug("no token found in DB") + return "", time.Time{}, false + } + + parser := new(jwt.Parser) + tok, _, err := parser.ParseUnverified(*token, jwt.MapClaims{}) + if err != nil { + log.Debugf("error parsing token: %s", err) + return "", time.Time{}, false + } + + claims, ok := tok.Claims.(jwt.MapClaims) + if !ok { + log.Debugf("error parsing token claims: %s", err) + return "", time.Time{}, false + } + + expFloat, ok := claims["exp"].(float64) + if !ok { + log.Debug("token missing 'exp' claim") + return "", time.Time{}, false + } + + exp := time.Unix(int64(expFloat), 0) + if time.Now().UTC().After(exp.Add(-1*time.Minute)) { + log.Debug("auth token expired") + return "", time.Time{}, false + } + + return *token, exp, true +} + +// saveAPICToken stores the given JWT token in the local database under the "apic_token" config item. +func saveAPICToken(ctx context.Context, db *database.Client, token string) error { + if err := db.SetConfigItem(ctx, "apic_token", token); err != nil { + return fmt.Errorf("saving token to db: %w", err) + } + + return nil +} + +// Authenticate ensures the API client is authorized to communicate with the CAPI. +// It attempts to reuse a previously saved JWT token from the database, falling back to +// an authentication request if the token is missing, invalid, or expired. +// +// If a new token is obtained, it is saved back to the database for caching. +func (a *apic) Authenticate(ctx context.Context, config *csconfig.OnlineApiClientCfg) error { + if token, exp, valid := loadAPICToken(ctx, a.dbClient); valid { + log.Debug("using valid token from DB") + a.apiClient.GetClient().Transport.(*apiclient.JWTTransport).Token = token + a.apiClient.GetClient().Transport.(*apiclient.JWTTransport).Expiration = exp + } + + log.Debug("No token found, authenticating") + + scenarios, err := a.FetchScenariosListFromDB(ctx) if err != nil { - return ret, fmt.Errorf("get scenario in db: %w", err) + return fmt.Errorf("get scenario in db: %w", err) } - authResp, _, err := ret.apiClient.Auth.AuthenticateWatcher(ctx, models.WatcherAuthRequest{ + password := strfmt.Password(config.Credentials.Password) + + authResp, _, err := a.apiClient.Auth.AuthenticateWatcher(ctx, models.WatcherAuthRequest{ MachineID: &config.Credentials.Login, Password: &password, Scenarios: scenarios, }) if err != nil { - return ret, fmt.Errorf("authenticate watcher (%s): %w", config.Credentials.Login, err) + return fmt.Errorf("authenticate watcher (%s): %w", config.Credentials.Login, err) } - if err = ret.apiClient.GetClient().Transport.(*apiclient.JWTTransport).Expiration.UnmarshalText([]byte(authResp.Expire)); err != nil { - return ret, fmt.Errorf("unable to parse jwt expiration: %w", err) + if err = a.apiClient.GetClient().Transport.(*apiclient.JWTTransport).Expiration.UnmarshalText([]byte(authResp.Expire)); err != nil { + return fmt.Errorf("unable to parse jwt expiration: %w", err) } - ret.apiClient.GetClient().Transport.(*apiclient.JWTTransport).Token = authResp.Token + a.apiClient.GetClient().Transport.(*apiclient.JWTTransport).Token = authResp.Token - return ret, err + return saveAPICToken(ctx, a.dbClient, authResp.Token) } // keep track of all alerts in cache and push it to CAPI every PushInterval. diff --git a/pkg/database/ent/migrate/schema.go b/pkg/database/ent/migrate/schema.go index 932c27dd7a6..571c04af833 100644 --- a/pkg/database/ent/migrate/schema.go +++ b/pkg/database/ent/migrate/schema.go @@ -148,7 +148,7 @@ var ( {Name: "created_at", Type: field.TypeTime}, {Name: "updated_at", Type: field.TypeTime}, {Name: "name", Type: field.TypeString, Unique: true}, - {Name: "value", Type: field.TypeString}, + {Name: "value", Type: field.TypeString, SchemaType: map[string]string{"mysql": "longtext", "postgres": "text"}}, } // ConfigItemsTable holds the schema information for the "config_items" table. ConfigItemsTable = &schema.Table{ diff --git a/pkg/database/ent/schema/config.go b/pkg/database/ent/schema/config.go index d526db25a8d..2f12f449153 100644 --- a/pkg/database/ent/schema/config.go +++ b/pkg/database/ent/schema/config.go @@ -2,6 +2,7 @@ package schema import ( "entgo.io/ent" + "entgo.io/ent/dialect" "entgo.io/ent/schema/field" "github.com/crowdsecurity/crowdsec/pkg/types" @@ -22,7 +23,10 @@ func (ConfigItem) Fields() []ent.Field { Default(types.UtcNow). UpdateDefault(types.UtcNow).StructTag(`json:"updated_at"`), field.String("name").Unique().StructTag(`json:"name"`).Immutable(), - field.String("value").StructTag(`json:"value"`), // a json object + field.String("value").SchemaType(map[string]string{ + dialect.MySQL: "longtext", + dialect.Postgres: "text", + }).StructTag(`json:"value"`), // a json object } } From 73a423034fc3d8629be1858aa73c3978349b91fc Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Wed, 7 May 2025 10:45:34 +0200 Subject: [PATCH 519/581] CI: update action for generating docker description (#3559) --- .github/workflows/update_docker_hub_doc.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/update_docker_hub_doc.yml b/.github/workflows/update_docker_hub_doc.yml index 77d0b13e190..3e2dfa50b0e 100644 --- a/.github/workflows/update_docker_hub_doc.yml +++ b/.github/workflows/update_docker_hub_doc.yml @@ -25,5 +25,5 @@ jobs: username: ${{ secrets.DOCKER_USERNAME }} password: ${{ secrets.DOCKER_PASSWORD }} repository: crowdsecurity/crowdsec - short-description: ${{ github.event.repository.description }} + short-description: "Crowdsec - An open-source, lightweight agent to detect and respond to bad behaviours." readme-filepath: "./docker/README.md" From 31b914512a327ac5a5ecf324d10f248f10915d4b Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Wed, 7 May 2025 11:12:27 +0200 Subject: [PATCH 520/581] refact pkg/database: unnecessary pointers (#3611) * refact pkg/database: unnecessary pointers * lint --- cmd/crowdsec-cli/clipapi/papi.go | 10 ++++------ pkg/apiclient/decisions_service.go | 10 +++++----- pkg/apiclient/decisions_service_test.go | 6 +++--- pkg/apiserver/apic.go | 17 ++++++++++------- pkg/apiserver/apic_test.go | 6 +++--- pkg/apiserver/papi.go | 13 +++++++------ pkg/database/config.go | 23 +++++++++++++---------- 7 files changed, 45 insertions(+), 40 deletions(-) diff --git a/cmd/crowdsec-cli/clipapi/papi.go b/cmd/crowdsec-cli/clipapi/papi.go index c2be87f8a04..b48685bd18b 100644 --- a/cmd/crowdsec-cli/clipapi/papi.go +++ b/cmd/crowdsec-cli/clipapi/papi.go @@ -11,8 +11,6 @@ import ( "github.com/spf13/cobra" "gopkg.in/tomb.v2" - "github.com/crowdsecurity/go-cs-lib/ptr" - "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/args" "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/require" "github.com/crowdsecurity/crowdsec/pkg/apiserver" @@ -76,17 +74,17 @@ func (cli *cliPapi) Status(ctx context.Context, out io.Writer, db *database.Clie lastTimestampStr, err := db.GetConfigItem(ctx, apiserver.PapiPullKey) if err != nil { - lastTimestampStr = ptr.Of("never") + lastTimestampStr = "never" } // both can and did happen - if lastTimestampStr == nil || *lastTimestampStr == "0001-01-01T00:00:00Z" { - lastTimestampStr = ptr.Of("never") + if lastTimestampStr == "" || lastTimestampStr == "0001-01-01T00:00:00Z" { + lastTimestampStr = "never" } fmt.Fprint(out, "You can successfully interact with Polling API (PAPI)\n") fmt.Fprintf(out, "Console plan: %s\n", perms.Plan) - fmt.Fprintf(out, "Last order received: %s\n", *lastTimestampStr) + fmt.Fprintf(out, "Last order received: %s\n", lastTimestampStr) fmt.Fprint(out, "PAPI subscriptions:\n") for _, sub := range perms.Categories { diff --git a/pkg/apiclient/decisions_service.go b/pkg/apiclient/decisions_service.go index c222e2ddbb8..47a37773e09 100644 --- a/pkg/apiclient/decisions_service.go +++ b/pkg/apiclient/decisions_service.go @@ -174,7 +174,7 @@ func (s *DecisionsService) FetchV3Decisions(ctx context.Context, url string) (*m return &v2Decisions, resp, nil } -func (s *DecisionsService) GetDecisionsFromBlocklist(ctx context.Context, blocklist *modelscapi.BlocklistLink, lastPullTimestamp *string) ([]*models.Decision, bool, error) { +func (s *DecisionsService) GetDecisionsFromBlocklist(ctx context.Context, blocklist *modelscapi.BlocklistLink, lastPullTimestamp string) ([]*models.Decision, bool, error) { if blocklist.URL == nil { return nil, false, errors.New("blocklist URL is nil") } @@ -188,8 +188,8 @@ func (s *DecisionsService) GetDecisionsFromBlocklist(ctx context.Context, blockl return nil, false, err } - if lastPullTimestamp != nil { - req.Header.Set("If-Modified-Since", *lastPullTimestamp) + if lastPullTimestamp != "" { + req.Header.Set("If-Modified-Since", lastPullTimestamp) } log.Debugf("[URL] %s %s", req.Method, req.URL) @@ -217,8 +217,8 @@ func (s *DecisionsService) GetDecisionsFromBlocklist(ctx context.Context, blockl } if resp.StatusCode == http.StatusNotModified { - if lastPullTimestamp != nil { - log.Debugf("Blocklist %s has not been modified since %s", *blocklist.URL, *lastPullTimestamp) + if lastPullTimestamp != "" { + log.Debugf("Blocklist %s has not been modified since %s", *blocklist.URL, lastPullTimestamp) } else { log.Debugf("Blocklist %s has not been modified (decisions about to expire)", *blocklist.URL) } diff --git a/pkg/apiclient/decisions_service_test.go b/pkg/apiclient/decisions_service_test.go index c9e555e9293..4bc65587698 100644 --- a/pkg/apiclient/decisions_service_test.go +++ b/pkg/apiclient/decisions_service_test.go @@ -362,7 +362,7 @@ func TestDecisionsFromBlocklist(t *testing.T) { Remediation: &tremediationBlocklist, Name: &tnameBlocklist, Duration: &tdurationBlocklist, - }, nil) + }, "") require.NoError(t, err) assert.True(t, isModified) @@ -381,7 +381,7 @@ func TestDecisionsFromBlocklist(t *testing.T) { Remediation: &tremediationBlocklist, Name: &tnameBlocklist, Duration: &tdurationBlocklist, - }, ptr.Of("Sun, 01 Jan 2023 01:01:01 GMT")) + }, "Sun, 01 Jan 2023 01:01:01 GMT") require.NoError(t, err) assert.False(t, isModified) @@ -392,7 +392,7 @@ func TestDecisionsFromBlocklist(t *testing.T) { Remediation: &tremediationBlocklist, Name: &tnameBlocklist, Duration: &tdurationBlocklist, - }, ptr.Of("Mon, 02 Jan 2023 01:01:01 GMT")) + }, "Mon, 02 Jan 2023 01:01:01 GMT") require.NoError(t, err) assert.True(t, isModified) diff --git a/pkg/apiserver/apic.go b/pkg/apiserver/apic.go index 8e92dc67487..3ab75f5ec05 100644 --- a/pkg/apiserver/apic.go +++ b/pkg/apiserver/apic.go @@ -243,6 +243,7 @@ func NewAPIC(ctx context.Context, config *csconfig.OnlineApiClientCfg, dbClient } err = ret.Authenticate(ctx, config) + return ret, err } @@ -260,13 +261,14 @@ func loadAPICToken(ctx context.Context, db *database.Client) (string, time.Time, return "", time.Time{}, false } - if token == nil { + if token == "" { log.Debug("no token found in DB") return "", time.Time{}, false } parser := new(jwt.Parser) - tok, _, err := parser.ParseUnverified(*token, jwt.MapClaims{}) + + tok, _, err := parser.ParseUnverified(token, jwt.MapClaims{}) if err != nil { log.Debugf("error parsing token: %s", err) return "", time.Time{}, false @@ -285,12 +287,12 @@ func loadAPICToken(ctx context.Context, db *database.Client) (string, time.Time, } exp := time.Unix(int64(expFloat), 0) - if time.Now().UTC().After(exp.Add(-1*time.Minute)) { + if time.Now().UTC().After(exp.Add(-1 * time.Minute)) { log.Debug("auth token expired") return "", time.Time{}, false } - return *token, exp, true + return token, exp, true } // saveAPICToken stores the given JWT token in the local database under the "apic_token" config item. @@ -310,6 +312,7 @@ func saveAPICToken(ctx context.Context, db *database.Client, token string) error func (a *apic) Authenticate(ctx context.Context, config *csconfig.OnlineApiClientCfg) error { if token, exp, valid := loadAPICToken(ctx, a.dbClient); valid { log.Debug("using valid token from DB") + a.apiClient.GetClient().Transport.(*apiclient.JWTTransport).Token = token a.apiClient.GetClient().Transport.(*apiclient.JWTTransport).Expiration = exp } @@ -1043,7 +1046,7 @@ func (a *apic) updateBlocklist(ctx context.Context, client *apiclient.ApiClient, blocklistConfigItemName := fmt.Sprintf("blocklist:%s:last_pull", *blocklist.Name) var ( - lastPullTimestamp *string + lastPullTimestamp string err error ) @@ -1060,10 +1063,10 @@ func (a *apic) updateBlocklist(ctx context.Context, client *apiclient.ApiClient, } if !hasChanged { - if lastPullTimestamp == nil { + if lastPullTimestamp == "" { log.Infof("blocklist %s hasn't been modified or there was an error reading it, skipping", *blocklist.Name) } else { - log.Infof("blocklist %s hasn't been modified since %s, skipping", *blocklist.Name, *lastPullTimestamp) + log.Infof("blocklist %s hasn't been modified since %s, skipping", *blocklist.Name, lastPullTimestamp) } return nil diff --git a/pkg/apiserver/apic_test.go b/pkg/apiserver/apic_test.go index fc4e290877d..a9931875513 100644 --- a/pkg/apiserver/apic_test.go +++ b/pkg/apiserver/apic_test.go @@ -99,7 +99,7 @@ func assertTotalValidDecisionCount(t *testing.T, dbClient *database.Client, coun assert.Len(t, d, count) } -func jsonMarshalX(v interface{}) []byte { +func jsonMarshalX(v any) []byte { data, err := json.Marshal(v) if err != nil { panic(err) @@ -932,7 +932,7 @@ func TestAPICPullTopBLCacheFirstCall(t *testing.T) { blocklistConfigItemName := "blocklist:blocklist1:last_pull" lastPullTimestamp, err := api.dbClient.GetConfigItem(ctx, blocklistConfigItemName) require.NoError(t, err) - assert.NotEmpty(t, *lastPullTimestamp) + assert.NotEmpty(t, lastPullTimestamp) // new call should return 304 and should not change lastPullTimestamp httpmock.RegisterResponder("GET", "http://api.crowdsec.net/blocklist1", func(req *http.Request) (*http.Response, error) { @@ -944,7 +944,7 @@ func TestAPICPullTopBLCacheFirstCall(t *testing.T) { require.NoError(t, err) secondLastPullTimestamp, err := api.dbClient.GetConfigItem(ctx, blocklistConfigItemName) require.NoError(t, err) - assert.Equal(t, *lastPullTimestamp, *secondLastPullTimestamp) + assert.Equal(t, lastPullTimestamp, secondLastPullTimestamp) } func TestAPICPullTopBLCacheForceCall(t *testing.T) { diff --git a/pkg/apiserver/papi.go b/pkg/apiserver/papi.go index 442c5729554..501f09e8f5c 100644 --- a/pkg/apiserver/papi.go +++ b/pkg/apiserver/papi.go @@ -22,9 +22,10 @@ import ( "github.com/crowdsecurity/crowdsec/pkg/types" ) -const SyncInterval = time.Second * 10 - -const PapiPullKey = "papi:last_pull" +const ( + SyncInterval = time.Second * 10 + PapiPullKey = "papi:last_pull" +) var operationMap = map[string]func(*Message, *Papi, bool) error{ "decision": DecisionCmd, @@ -48,7 +49,7 @@ type Source struct { type Message struct { Header *Header - Data interface{} `json:"data"` + Data any `json:"data"` } type OperationChannels struct { @@ -240,7 +241,7 @@ func (p *Papi) Pull(ctx context.Context) error { } // value doesn't exist, it's first time we're pulling - if lastTimestampStr == nil { + if lastTimestampStr == "" { binTime, err := lastTimestamp.MarshalText() if err != nil { return fmt.Errorf("failed to serialize last timestamp: %w", err) @@ -252,7 +253,7 @@ func (p *Papi) Pull(ctx context.Context) error { p.Logger.Debugf("config item '%s' set in database with value '%s'", PapiPullKey, string(binTime)) } } else { - if err := lastTimestamp.UnmarshalText([]byte(*lastTimestampStr)); err != nil { + if err := lastTimestamp.UnmarshalText([]byte(lastTimestampStr)); err != nil { return fmt.Errorf("failed to parse last timestamp: %w", err) } } diff --git a/pkg/database/config.go b/pkg/database/config.go index 89ccb1e1b28..2f262965078 100644 --- a/pkg/database/config.go +++ b/pkg/database/config.go @@ -9,27 +9,30 @@ import ( "github.com/crowdsecurity/crowdsec/pkg/database/ent/configitem" ) -func (c *Client) GetConfigItem(ctx context.Context, key string) (*string, error) { +func (c *Client) GetConfigItem(ctx context.Context, key string) (string, error) { result, err := c.Ent.ConfigItem.Query().Where(configitem.NameEQ(key)).First(ctx) - if err != nil && ent.IsNotFound(err) { - return nil, nil - } - if err != nil { - return nil, errors.Wrapf(QueryFail, "select config item: %s", err) + switch { + case ent.IsNotFound(err): + return "", nil + case err != nil: + return "", errors.Wrapf(QueryFail, "select config item: %s", err) + default: + return result.Value, nil } - - return &result.Value, nil } func (c *Client) SetConfigItem(ctx context.Context, key string, value string) error { nbUpdated, err := c.Ent.ConfigItem.Update().SetValue(value).Where(configitem.NameEQ(key)).Save(ctx) - if (err != nil && ent.IsNotFound(err)) || nbUpdated == 0 { // not found, create + + switch { + case ent.IsNotFound(err) || nbUpdated == 0: + // not found, create err := c.Ent.ConfigItem.Create().SetName(key).SetValue(value).Exec(ctx) if err != nil { return errors.Wrapf(QueryFail, "insert config item: %s", err) } - } else if err != nil { + case err != nil: return errors.Wrapf(QueryFail, "update config item: %s", err) } From 5484cc20443f126be010b0be3fbc391e1ce1f3f6 Mon Sep 17 00:00:00 2001 From: Micha Cassola Date: Wed, 7 May 2025 14:15:54 +0200 Subject: [PATCH 521/581] Fix cp -n (#3483) * Fix cp -n * Don't rely on cp for file existence check * Busybox cp can be used now * test -s -> test -f * test -f -> test -e --------- Co-authored-by: marco --- Dockerfile | 10 ---------- Dockerfile.debian | 10 ---------- wizard.sh | 15 ++++++--------- 3 files changed, 6 insertions(+), 29 deletions(-) diff --git a/Dockerfile b/Dockerfile index 45b9c2bedd1..0226f759750 100644 --- a/Dockerfile +++ b/Dockerfile @@ -49,14 +49,4 @@ ENTRYPOINT ["/bin/bash", "/docker_start.sh"] FROM slim AS full -# Due to the wizard using cp -n, we have to copy the config files directly from the source as -n does not exist in busybox cp -# The files are here for reference, as users will need to mount a new version to be actually able to use notifications -COPY --from=build \ - /go/src/crowdsec/cmd/notification-email/email.yaml \ - /go/src/crowdsec/cmd/notification-http/http.yaml \ - /go/src/crowdsec/cmd/notification-slack/slack.yaml \ - /go/src/crowdsec/cmd/notification-splunk/splunk.yaml \ - /go/src/crowdsec/cmd/notification-sentinel/sentinel.yaml \ - /staging/etc/crowdsec/notifications/ - COPY --from=build /usr/local/lib/crowdsec/plugins /usr/local/lib/crowdsec/plugins diff --git a/Dockerfile.debian b/Dockerfile.debian index 0e99ade7890..70714f624f9 100644 --- a/Dockerfile.debian +++ b/Dockerfile.debian @@ -65,16 +65,6 @@ ENTRYPOINT ["/bin/bash", "docker_start.sh"] FROM slim AS plugins -# Due to the wizard using cp -n, we have to copy the config files directly from the source as -n does not exist in busybox cp -# The files are here for reference, as users will need to mount a new version to be actually able to use notifications -COPY --from=build \ - /go/src/crowdsec/cmd/notification-email/email.yaml \ - /go/src/crowdsec/cmd/notification-http/http.yaml \ - /go/src/crowdsec/cmd/notification-slack/slack.yaml \ - /go/src/crowdsec/cmd/notification-splunk/splunk.yaml \ - /go/src/crowdsec/cmd/notification-sentinel/sentinel.yaml \ - /staging/etc/crowdsec/notifications/ - COPY --from=build /usr/local/lib/crowdsec/plugins /usr/local/lib/crowdsec/plugins FROM slim AS geoip diff --git a/wizard.sh b/wizard.sh index d9b8caf2f06..5418143ed37 100755 --- a/wizard.sh +++ b/wizard.sh @@ -497,7 +497,7 @@ delete_plugins() { rm -rf ${CROWDSEC_PLUGIN_DIR} } -install_plugins(){ +install_plugins() { mkdir -p ${CROWDSEC_PLUGIN_DIR} mkdir -p /etc/crowdsec/notifications @@ -508,14 +508,11 @@ install_plugins(){ cp ${SENTINEL_PLUGIN_BINARY} ${CROWDSEC_PLUGIN_DIR} cp ${FILE_PLUGIN_BINARY} ${CROWDSEC_PLUGIN_DIR} - if [[ ${DOCKER_MODE} == "false" ]]; then - cp -n ${SLACK_PLUGIN_CONFIG} /etc/crowdsec/notifications/ - cp -n ${SPLUNK_PLUGIN_CONFIG} /etc/crowdsec/notifications/ - cp -n ${HTTP_PLUGIN_CONFIG} /etc/crowdsec/notifications/ - cp -n ${EMAIL_PLUGIN_CONFIG} /etc/crowdsec/notifications/ - cp -n ${SENTINEL_PLUGIN_CONFIG} /etc/crowdsec/notifications/ - cp -n ${FILE_PLUGIN_CONFIG} /etc/crowdsec/notifications/ - fi + for yaml_conf in ${SLACK_PLUGIN_CONFIG} ${SPLUNK_PLUGIN_CONFIG} ${HTTP_PLUGIN_CONFIG} ${EMAIL_PLUGIN_CONFIG} ${SENTINEL_PLUGIN_CONFIG} ${FILE_PLUGIN_CONFIG}; do + if [[ ! -e /etc/crowdsec/notifications/"$(basename "$yaml_conf")" ]]; then + cp "$yaml_conf" /etc/crowdsec/notifications/ + fi + done } check_running_bouncers() { From 341e816a5c09a9c383b915a79a7ce15681d5f633 Mon Sep 17 00:00:00 2001 From: Manuel Sabban Date: Wed, 7 May 2025 17:02:04 +0200 Subject: [PATCH 522/581] fix(apiserver): ensure nil is returned after setting token and expiration and before we reauthenticate (#3613) --- pkg/apiserver/apic.go | 2 ++ 1 file changed, 2 insertions(+) diff --git a/pkg/apiserver/apic.go b/pkg/apiserver/apic.go index 3ab75f5ec05..0a618b0b44e 100644 --- a/pkg/apiserver/apic.go +++ b/pkg/apiserver/apic.go @@ -315,6 +315,8 @@ func (a *apic) Authenticate(ctx context.Context, config *csconfig.OnlineApiClien a.apiClient.GetClient().Transport.(*apiclient.JWTTransport).Token = token a.apiClient.GetClient().Transport.(*apiclient.JWTTransport).Expiration = exp + + return nil } log.Debug("No token found, authenticating") From ec553b34addaee80582b0e64f1b956cfd1c58e2d Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Fri, 9 May 2025 10:26:38 +0200 Subject: [PATCH 523/581] use go 1.24.3 (#3612) --- azure-pipelines.yml | 2 +- go.mod | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/azure-pipelines.yml b/azure-pipelines.yml index 3faea6cbdcb..f2525027aaa 100644 --- a/azure-pipelines.yml +++ b/azure-pipelines.yml @@ -21,7 +21,7 @@ stages: - task: GoTool@0 displayName: "Install Go" inputs: - version: '1.24.1' + version: '1.24.3' - pwsh: | choco install -y make diff --git a/go.mod b/go.mod index 6c027fec445..771b058b8de 100644 --- a/go.mod +++ b/go.mod @@ -1,6 +1,6 @@ module github.com/crowdsecurity/crowdsec -go 1.24.1 +go 1.24.3 require ( entgo.io/ent v0.14.2 From ce6018fbbfec19ce0ec16e559c3c286d79adab5c Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Fri, 9 May 2025 10:58:12 +0200 Subject: [PATCH 524/581] config.yaml: make config_dir and notification_dir optional (#3606) --- cmd/crowdsec-cli/cliconfig/show.go | 11 ++++++----- .../clinotifications/notifications.go | 16 ++++++++-------- cmd/crowdsec-cli/require/require.go | 8 -------- cmd/crowdsec/api.go | 4 ---- pkg/csconfig/config_paths.go | 10 ++++++++++ test/bats/01_cscli.bats | 5 +++++ test/bats/72_plugin_badconfig.bats | 10 +++++----- 7 files changed, 34 insertions(+), 30 deletions(-) diff --git a/cmd/crowdsec-cli/cliconfig/show.go b/cmd/crowdsec-cli/cliconfig/show.go index cff214e49e4..b5830ceec92 100644 --- a/cmd/crowdsec-cli/cliconfig/show.go +++ b/cmd/crowdsec-cli/cliconfig/show.go @@ -25,7 +25,7 @@ func (cli *cliConfig) showKey(key string) error { } opts := []expr.Option{} - opts = append(opts, exprhelpers.GetExprOptions(map[string]interface{}{})...) + opts = append(opts, exprhelpers.GetExprOptions(map[string]any{})...) opts = append(opts, expr.Env(Env{})) program, err := expr.Compile(key, opts...) @@ -44,7 +44,7 @@ func (cli *cliConfig) showKey(key string) error { // that would break compatibility with previous versions switch output.(type) { case string: - fmt.Println(output) + fmt.Fprintln(os.Stdout, output) default: litter.Dump(output) } @@ -54,7 +54,7 @@ func (cli *cliConfig) showKey(key string) error { return fmt.Errorf("failed to serialize configuration: %w", err) } - fmt.Println(string(data)) + fmt.Fprintln(os.Stdout, string(data)) } return nil @@ -67,6 +67,7 @@ func (cli *cliConfig) template() string { - Configuration Folder : {{.ConfigPaths.ConfigDir}} - Data Folder : {{.ConfigPaths.DataDir}} - Hub Folder : {{.ConfigPaths.HubDir}} + - Notification Folder : {{.ConfigPaths.NotificationDir}} - Simulation File : {{.ConfigPaths.SimulationFilePath}} {{- end }} @@ -216,14 +217,14 @@ func (cli *cliConfig) show() error { return fmt.Errorf("failed to serialize configuration: %w", err) } - fmt.Println(string(data)) + fmt.Fprintln(os.Stdout, string(data)) case "raw": data, err := yaml.Marshal(cfg) if err != nil { return fmt.Errorf("failed to serialize configuration: %w", err) } - fmt.Println(string(data)) + fmt.Fprintln(os.Stdout, string(data)) } return nil diff --git a/cmd/crowdsec-cli/clinotifications/notifications.go b/cmd/crowdsec-cli/clinotifications/notifications.go index 7856c89ff9e..9031a2d93e1 100644 --- a/cmd/crowdsec-cli/clinotifications/notifications.go +++ b/cmd/crowdsec-cli/clinotifications/notifications.go @@ -70,7 +70,7 @@ func (cli *cliNotifications) NewCommand() *cobra.Command { return fmt.Errorf("loading api client: %w", err) } - return require.Notifications(cfg) + return nil }, } @@ -176,7 +176,7 @@ func (cli *cliNotifications) newListCmd() *cobra.Command { if err != nil { return fmt.Errorf("failed to serialize notification configuration: %w", err) } - fmt.Printf("%s", string(x)) + fmt.Fprint(os.Stdout, string(x)) } else if cfg.Cscli.Output == "raw" { csvwriter := csv.NewWriter(os.Stdout) err := csvwriter.Write([]string{"Name", "Type", "Profile name"}) @@ -223,19 +223,19 @@ func (cli *cliNotifications) newInspectCmd() *cobra.Command { return fmt.Errorf("plugin '%s' does not exist or is not active", args[0]) } if cfg.Cscli.Output == "human" || cfg.Cscli.Output == "raw" { - fmt.Printf(" - %15s: %15s\n", "Type", ncfg.Config.Type) - fmt.Printf(" - %15s: %15s\n", "Name", ncfg.Config.Name) - fmt.Printf(" - %15s: %15s\n", "Timeout", ncfg.Config.TimeOut) - fmt.Printf(" - %15s: %15s\n", "Format", ncfg.Config.Format) + fmt.Fprintf(os.Stdout, " - %15s: %15s\n", "Type", ncfg.Config.Type) + fmt.Fprintf(os.Stdout, " - %15s: %15s\n", "Name", ncfg.Config.Name) + fmt.Fprintf(os.Stdout, " - %15s: %15s\n", "Timeout", ncfg.Config.TimeOut) + fmt.Fprintf(os.Stdout, " - %15s: %15s\n", "Format", ncfg.Config.Format) for k, v := range ncfg.Config.Config { - fmt.Printf(" - %15s: %15v\n", k, v) + fmt.Fprintf(os.Stdout, " - %15s: %15v\n", k, v) } } else if cfg.Cscli.Output == "json" { x, err := json.MarshalIndent(cfg, "", " ") if err != nil { return fmt.Errorf("failed to serialize notification configuration: %w", err) } - fmt.Printf("%s", string(x)) + fmt.Fprint(os.Stdout, string(x)) } return nil diff --git a/cmd/crowdsec-cli/require/require.go b/cmd/crowdsec-cli/require/require.go index beffa29f3eb..098218263a9 100644 --- a/cmd/crowdsec-cli/require/require.go +++ b/cmd/crowdsec-cli/require/require.go @@ -74,14 +74,6 @@ func DB(c *csconfig.Config) error { return nil } -func Notifications(c *csconfig.Config) error { - if c.ConfigPaths.NotificationDir == "" { - return errors.New("config_paths.notification_dir is not set in crowdsec config") - } - - return nil -} - func HubDownloader(ctx context.Context, c *csconfig.Config) *cwhub.Downloader { // set branch in config, and log if necessary branch := HubBranch(ctx, c) diff --git a/cmd/crowdsec/api.go b/cmd/crowdsec/api.go index ccb0acf0209..1b14d2f6945 100644 --- a/cmd/crowdsec/api.go +++ b/cmd/crowdsec/api.go @@ -32,10 +32,6 @@ func initAPIServer(ctx context.Context, cConfig *csconfig.Config) (*apiserver.AP return nil, errors.New("plugins are enabled, but the plugin_config section is missing in the configuration") } - if cConfig.ConfigPaths.NotificationDir == "" { - return nil, errors.New("plugins are enabled, but config_paths.notification_dir is not defined") - } - if cConfig.ConfigPaths.PluginDir == "" { return nil, errors.New("plugins are enabled, but config_paths.plugin_dir is not defined") } diff --git a/pkg/csconfig/config_paths.go b/pkg/csconfig/config_paths.go index a8d39a664f3..5fdf1d94bda 100644 --- a/pkg/csconfig/config_paths.go +++ b/pkg/csconfig/config_paths.go @@ -19,10 +19,15 @@ type ConfigurationPaths struct { func (c *Config) loadConfigurationPaths() error { var err error + if c.ConfigPaths == nil { return errors.New("no configuration paths provided") } + if c.ConfigPaths.ConfigDir == "" { + c.ConfigPaths.ConfigDir = filepath.Dir(c.FilePath) + } + if c.ConfigPaths.DataDir == "" { return errors.New("please provide a data directory with the 'data_dir' directive in the 'config_paths' section") } @@ -35,6 +40,10 @@ func (c *Config) loadConfigurationPaths() error { c.ConfigPaths.HubIndexFile = filepath.Join(c.ConfigPaths.HubDir, ".index.json") } + if c.ConfigPaths.NotificationDir == "" { + c.ConfigPaths.NotificationDir = filepath.Join(c.ConfigPaths.ConfigDir, "notifications") + } + if c.ConfigPaths.PatternDir == "" { c.ConfigPaths.PatternDir = filepath.Join(c.ConfigPaths.ConfigDir, "patterns") } @@ -53,6 +62,7 @@ func (c *Config) loadConfigurationPaths() error { if *k == "" { continue } + *k, err = filepath.Abs(*k) if err != nil { return fmt.Errorf("failed to get absolute path of '%s': %w", *k, err) diff --git a/test/bats/01_cscli.bats b/test/bats/01_cscli.bats index 77c128568c9..df4ece8d7f2 100644 --- a/test/bats/01_cscli.bats +++ b/test/bats/01_cscli.bats @@ -147,6 +147,11 @@ teardown() { # defaults + config_set 'del(.config_paths.config_dir)' + rune -0 cscli config show --key Config.ConfigPaths.ConfigDir + assert_output "$configdir" + echo "$config" > "$CONFIG_YAML" + config_set 'del(.config_paths.hub_dir)' rune -0 cscli hub list rune -0 cscli config show --key Config.ConfigPaths.HubDir diff --git a/test/bats/72_plugin_badconfig.bats b/test/bats/72_plugin_badconfig.bats index 216b29f4db0..ab8ceecc56b 100644 --- a/test/bats/72_plugin_badconfig.bats +++ b/test/bats/72_plugin_badconfig.bats @@ -12,6 +12,9 @@ setup_file() { PROFILES_PATH=$(config_get '.api.server.profiles_path') export PROFILES_PATH + + CONFIG_DIR=$(dirname "$CONFIG_YAML") + export CONFIG_DIR } teardown_file() { @@ -72,7 +75,6 @@ teardown() { } @test "duplicate notification config" { - CONFIG_DIR=$(dirname "$CONFIG_YAML") # email_default has two configurations rune -0 yq -i '.name="email_default"' "$CONFIG_DIR/notifications/http.yaml" # enable a notification, otherwise plugins are ignored @@ -110,10 +112,8 @@ teardown() { @test "config.yaml: missing config_paths.notification_dir" { config_set 'del(.config_paths.notification_dir)' - config_set "$PROFILES_PATH" '.notifications=["http_default"]' - rune -0 wait-for \ - --err "api server init: plugins are enabled, but config_paths.notification_dir is not defined" \ - "$CROWDSEC" + rune -0 cscli config show --key Config.ConfigPaths.NotificationDir + assert_output "$CONFIG_DIR/notifications" } @test "config.yaml: missing config_paths.plugin_dir" { From 95101467809401682ca4f9fd0d4a6027b5ea0058 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Fri, 9 May 2025 12:55:01 +0200 Subject: [PATCH 525/581] update expr to 1.17.2 (#3519) * update expr to 1.17.2 * add if test --------- Co-authored-by: Sebastien Blot --- go.mod | 2 +- go.sum | 4 ++-- pkg/exprhelpers/debugger_test.go | 15 +++++++++++++++ 3 files changed, 18 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 771b058b8de..931ca9e7e6f 100644 --- a/go.mod +++ b/go.mod @@ -33,7 +33,7 @@ require ( github.com/docker/docker v27.3.1+incompatible github.com/docker/go-connections v0.5.0 github.com/docker/go-units v0.5.0 // indirect - github.com/expr-lang/expr v1.16.9 + github.com/expr-lang/expr v1.17.2 github.com/fatih/color v1.18.0 github.com/fsnotify/fsnotify v1.7.0 github.com/gin-gonic/gin v1.10.0 diff --git a/go.sum b/go.sum index ece7d98e1e5..e9a0c8b4959 100644 --- a/go.sum +++ b/go.sum @@ -134,8 +134,8 @@ github.com/docker/go-units v0.3.3/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDD github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= -github.com/expr-lang/expr v1.16.9 h1:WUAzmR0JNI9JCiF0/ewwHB1gmcGw5wW7nWt8gc6PpCI= -github.com/expr-lang/expr v1.16.9/go.mod h1:8/vRC7+7HBzESEqt5kKpYXxrxkr31SaO8r40VO/1IT4= +github.com/expr-lang/expr v1.17.2 h1:o0A99O/Px+/DTjEnQiodAgOIK9PPxL8DtXhBRKC+Iso= +github.com/expr-lang/expr v1.17.2/go.mod h1:8/vRC7+7HBzESEqt5kKpYXxrxkr31SaO8r40VO/1IT4= github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk= github.com/fatih/color v1.18.0 h1:S8gINlzdQ840/4pfAwic/ZE0djQEH3wM94VfqLTZcOM= github.com/fatih/color v1.18.0/go.mod h1:4FelSpRwEGDpQ12mAdzqdOukCy4u8WUtOY6lkT/6HfU= diff --git a/pkg/exprhelpers/debugger_test.go b/pkg/exprhelpers/debugger_test.go index 0852d7ab2de..7a6b70cc78a 100644 --- a/pkg/exprhelpers/debugger_test.go +++ b/pkg/exprhelpers/debugger_test.go @@ -1,4 +1,5 @@ //go:build expr_debug + package exprhelpers import ( @@ -273,6 +274,20 @@ func TestBaseDbg(t *testing.T) { {Code: "and", CodeDepth: 0, JumpIf: true, IfFalse: true, StrConditionResult: "false", ConditionResult: boolPtr(false), Finalized: true}, }, }, + { + Name: "if", + Expr: `if base_int != 42 { + true +} else { + false +}`, + Env: defaultEnv, + ExpectedOutputs: []OpOutput{ + {Code: "!= 42 {", CodeDepth: 0, Negated: true, Comparison: true, Left: "42", Right: "42", StrConditionResult: "[true]", ConditionResult: boolPtr(true), Finalized: true}, + {Code: "if base_int != 42 {", CodeDepth: 0, ConditionResult: boolPtr(false), Finalized: false, IfFalse: true, JumpIf: true, StrConditionResult: "false"}, + {Code: "false }", CodeDepth: 0, StrConditionResult: "false", Condition: true, ConditionResult: boolPtr(false), Finalized: true}, + }, + }, } logger := log.WithField("test", "exprhelpers") From 627eb352f1ad78d958a1909581779c1a1f534f0e Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Fri, 9 May 2025 13:58:58 +0200 Subject: [PATCH 526/581] CI: update codecov list and fix workflow (#3617) --- .github/codecov.yml | 282 +++++++++++++++++--------------- .github/generate-codecov-yml.sh | 4 +- .github/workflows/go-tests.yml | 2 +- 3 files changed, 155 insertions(+), 133 deletions(-) diff --git a/.github/codecov.yml b/.github/codecov.yml index e3a81070324..35bd1a32e5f 100644 --- a/.github/codecov.yml +++ b/.github/codecov.yml @@ -15,150 +15,172 @@ coverage: # if a directory is ignored, there is no way to un-ignore files like pkg/models/helpers.go # so we make a full list ignore: - - "./pkg/modelscapi/success_response.go" - - "./pkg/modelscapi/get_decisions_stream_response_deleted.go" - - "./pkg/modelscapi/login_request.go" - - "./pkg/modelscapi/get_decisions_stream_response_links.go" - - "./pkg/modelscapi/login_response.go" - - "./pkg/modelscapi/add_signals_request_item.go" - - "./pkg/modelscapi/blocklist_link.go" - - "./pkg/modelscapi/get_decisions_stream_response_deleted_item.go" - - "./pkg/modelscapi/decisions_sync_request.go" - - "./pkg/modelscapi/get_decisions_stream_response.go" - - "./pkg/modelscapi/metrics_request_machines_item.go" - - "./pkg/modelscapi/metrics_request.go" - - "./pkg/modelscapi/get_decisions_stream_response_new.go" - - "./pkg/modelscapi/add_signals_request_item_decisions_item.go" - - "./pkg/modelscapi/metrics_request_bouncers_item.go" - - "./pkg/modelscapi/decisions_sync_request_item_decisions_item.go" - - "./pkg/modelscapi/decisions_delete_request_item.go" - - "./pkg/modelscapi/get_decisions_stream_response_new_item.go" - - "./pkg/modelscapi/decisions_sync_request_item.go" - - "./pkg/modelscapi/add_signals_request.go" - - "./pkg/modelscapi/reset_password_request.go" - - "./pkg/modelscapi/add_signals_request_item_decisions.go" - - "./pkg/modelscapi/decisions_sync_request_item_source.go" - - "./pkg/modelscapi/error_response.go" - - "./pkg/modelscapi/decisions_delete_request.go" - - "./pkg/modelscapi/decisions_sync_request_item_decisions.go" - - "./pkg/modelscapi/enroll_request.go" - - "./pkg/modelscapi/register_request.go" - - "./pkg/modelscapi/add_signals_request_item_source.go" - - "./pkg/models/success_response.go" - - "./pkg/models/hub_items.go" - - "./pkg/models/alert.go" - - "./pkg/models/metrics_bouncer_info.go" - - "./pkg/models/add_signals_request_item.go" - - "./pkg/models/metrics_meta.go" - - "./pkg/models/metrics_detail_item.go" - - "./pkg/models/add_signals_request_item_decisions_item.go" - - "./pkg/models/hub_item.go" - - "./pkg/models/get_alerts_response.go" - - "./pkg/models/metrics_labels.go" - - "./pkg/models/watcher_auth_request.go" - - "./pkg/models/add_alerts_request.go" - - "./pkg/models/event.go" - - "./pkg/models/decisions_delete_request_item.go" - - "./pkg/models/meta.go" - - "./pkg/models/detailed_metrics.go" - - "./pkg/models/delete_alerts_response.go" - - "./pkg/models/remediation_components_metrics.go" - - "./pkg/models/console_options.go" - - "./pkg/models/topx_response.go" - - "./pkg/models/add_signals_request.go" - - "./pkg/models/delete_decision_response.go" - - "./pkg/models/get_decisions_response.go" - - "./pkg/models/add_signals_request_item_decisions.go" - - "./pkg/models/source.go" - - "./pkg/models/decisions_stream_response.go" - - "./pkg/models/error_response.go" - - "./pkg/models/all_metrics.go" - - "./pkg/models/o_sversion.go" - - "./pkg/models/decision.go" - - "./pkg/models/decisions_delete_request.go" - - "./pkg/models/flush_decision_response.go" - - "./pkg/models/watcher_auth_response.go" - - "./pkg/models/lapi_metrics.go" - - "./pkg/models/watcher_registration_request.go" - - "./pkg/models/metrics_agent_info.go" - - "./pkg/models/log_processors_metrics.go" - - "./pkg/models/add_signals_request_item_source.go" - - "./pkg/models/base_metrics.go" - - "./pkg/models/add_alerts_response.go" - - "./pkg/models/metrics.go" - - "./pkg/protobufs/notifier.pb.go" - - "./pkg/protobufs/notifier_grpc.pb.go" - - "./pkg/database/ent/metric_update.go" - - "./pkg/database/ent/machine_delete.go" - - "./pkg/database/ent/decision_query.go" - - "./pkg/database/ent/meta_query.go" - - "./pkg/database/ent/metric/where.go" - - "./pkg/database/ent/metric/metric.go" - - "./pkg/database/ent/machine_create.go" - - "./pkg/database/ent/alert.go" - - "./pkg/database/ent/event_update.go" + - "./pkg/database/ent/alert/alert.go" - "./pkg/database/ent/alert_create.go" + - "./pkg/database/ent/alert_delete.go" + - "./pkg/database/ent/alert.go" - "./pkg/database/ent/alert_query.go" - - "./pkg/database/ent/metric_delete.go" - - "./pkg/database/ent/lock_create.go" - - "./pkg/database/ent/bouncer_update.go" - - "./pkg/database/ent/meta_update.go" - - "./pkg/database/ent/decision_create.go" - - "./pkg/database/ent/configitem_update.go" - - "./pkg/database/ent/machine_query.go" - - "./pkg/database/ent/client.go" - - "./pkg/database/ent/predicate/predicate.go" - - "./pkg/database/ent/lock/where.go" - - "./pkg/database/ent/lock/lock.go" - - "./pkg/database/ent/mutation.go" - - "./pkg/database/ent/migrate/migrate.go" - - "./pkg/database/ent/migrate/schema.go" - - "./pkg/database/ent/configitem.go" - - "./pkg/database/ent/metric_query.go" - - "./pkg/database/ent/event.go" - - "./pkg/database/ent/event_query.go" - - "./pkg/database/ent/lock_update.go" - - "./pkg/database/ent/meta.go" - - "./pkg/database/ent/configitem_query.go" - - "./pkg/database/ent/bouncer.go" - "./pkg/database/ent/alert_update.go" - - "./pkg/database/ent/meta/meta.go" - - "./pkg/database/ent/meta/where.go" - - "./pkg/database/ent/decision_update.go" - - "./pkg/database/ent/alert_delete.go" - - "./pkg/database/ent/lock.go" - - "./pkg/database/ent/runtime/runtime.go" - - "./pkg/database/ent/alert/alert.go" - "./pkg/database/ent/alert/where.go" - - "./pkg/database/ent/runtime.go" + - "./pkg/database/ent/allowlist/allowlist.go" + - "./pkg/database/ent/allowlist_create.go" + - "./pkg/database/ent/allowlist_delete.go" + - "./pkg/database/ent/allowlist.go" + - "./pkg/database/ent/allowlistitem/allowlistitem.go" + - "./pkg/database/ent/allowlistitem_create.go" + - "./pkg/database/ent/allowlistitem_delete.go" + - "./pkg/database/ent/allowlistitem.go" + - "./pkg/database/ent/allowlistitem_query.go" + - "./pkg/database/ent/allowlistitem_update.go" + - "./pkg/database/ent/allowlistitem/where.go" + - "./pkg/database/ent/allowlist_query.go" + - "./pkg/database/ent/allowlist_update.go" + - "./pkg/database/ent/allowlist/where.go" - "./pkg/database/ent/bouncer/bouncer.go" + - "./pkg/database/ent/bouncer_create.go" + - "./pkg/database/ent/bouncer_delete.go" + - "./pkg/database/ent/bouncer.go" + - "./pkg/database/ent/bouncer_query.go" + - "./pkg/database/ent/bouncer_update.go" - "./pkg/database/ent/bouncer/where.go" - - "./pkg/database/ent/hook/hook.go" - - "./pkg/database/ent/metric.go" + - "./pkg/database/ent/client.go" + - "./pkg/database/ent/configitem/configitem.go" - "./pkg/database/ent/configitem_create.go" - "./pkg/database/ent/configitem_delete.go" - - "./pkg/database/ent/tx.go" - - "./pkg/database/ent/decision.go" - - "./pkg/database/ent/lock_delete.go" + - "./pkg/database/ent/configitem.go" + - "./pkg/database/ent/configitem_query.go" + - "./pkg/database/ent/configitem_update.go" + - "./pkg/database/ent/configitem/where.go" + - "./pkg/database/ent/decision_create.go" + - "./pkg/database/ent/decision/decision.go" - "./pkg/database/ent/decision_delete.go" - - "./pkg/database/ent/machine/where.go" - - "./pkg/database/ent/machine/machine.go" - - "./pkg/database/ent/event_create.go" - - "./pkg/database/ent/metric_create.go" + - "./pkg/database/ent/decision.go" + - "./pkg/database/ent/decision_query.go" + - "./pkg/database/ent/decision_update.go" - "./pkg/database/ent/decision/where.go" - - "./pkg/database/ent/decision/decision.go" + - "./pkg/database/ent/ent.go" - "./pkg/database/ent/enttest/enttest.go" - - "./pkg/database/ent/lock_query.go" - - "./pkg/database/ent/bouncer_create.go" + - "./pkg/database/ent/event_create.go" - "./pkg/database/ent/event_delete.go" - - "./pkg/database/ent/bouncer_delete.go" - "./pkg/database/ent/event/event.go" + - "./pkg/database/ent/event.go" + - "./pkg/database/ent/event_query.go" + - "./pkg/database/ent/event_update.go" - "./pkg/database/ent/event/where.go" + - "./pkg/database/ent/hook/hook.go" + - "./pkg/database/ent/lock_create.go" + - "./pkg/database/ent/lock_delete.go" + - "./pkg/database/ent/lock.go" + - "./pkg/database/ent/lock/lock.go" + - "./pkg/database/ent/lock_query.go" + - "./pkg/database/ent/lock_update.go" + - "./pkg/database/ent/lock/where.go" + - "./pkg/database/ent/machine_create.go" + - "./pkg/database/ent/machine_delete.go" - "./pkg/database/ent/machine.go" - - "./pkg/database/ent/ent.go" + - "./pkg/database/ent/machine/machine.go" + - "./pkg/database/ent/machine_query.go" + - "./pkg/database/ent/machine_update.go" + - "./pkg/database/ent/machine/where.go" - "./pkg/database/ent/meta_create.go" - - "./pkg/database/ent/bouncer_query.go" - "./pkg/database/ent/meta_delete.go" - - "./pkg/database/ent/machine_update.go" - - "./pkg/database/ent/configitem/configitem.go" - - "./pkg/database/ent/configitem/where.go" + - "./pkg/database/ent/meta.go" + - "./pkg/database/ent/meta/meta.go" + - "./pkg/database/ent/meta_query.go" + - "./pkg/database/ent/meta_update.go" + - "./pkg/database/ent/meta/where.go" + - "./pkg/database/ent/metric_create.go" + - "./pkg/database/ent/metric_delete.go" + - "./pkg/database/ent/metric.go" + - "./pkg/database/ent/metric/metric.go" + - "./pkg/database/ent/metric_query.go" + - "./pkg/database/ent/metric_update.go" + - "./pkg/database/ent/metric/where.go" + - "./pkg/database/ent/migrate/migrate.go" + - "./pkg/database/ent/migrate/schema.go" + - "./pkg/database/ent/mutation.go" + - "./pkg/database/ent/predicate/predicate.go" + - "./pkg/database/ent/runtime.go" + - "./pkg/database/ent/runtime/runtime.go" + - "./pkg/database/ent/tx.go" + - "./pkg/models/add_alerts_request.go" + - "./pkg/models/add_alerts_response.go" + - "./pkg/models/add_signals_request.go" + - "./pkg/models/add_signals_request_item_decisions.go" + - "./pkg/models/add_signals_request_item_decisions_item.go" + - "./pkg/models/add_signals_request_item.go" + - "./pkg/models/add_signals_request_item_source.go" + - "./pkg/models/alert.go" + - "./pkg/models/all_metrics.go" + - "./pkg/models/allowlist_item.go" + - "./pkg/models/base_metrics.go" + - "./pkg/models/bulk_check_allowlist_request.go" + - "./pkg/models/bulk_check_allowlist_response.go" + - "./pkg/models/bulk_check_allowlist_result.go" + - "./pkg/modelscapi/add_signals_request.go" + - "./pkg/modelscapi/add_signals_request_item_decisions.go" + - "./pkg/modelscapi/add_signals_request_item_decisions_item.go" + - "./pkg/modelscapi/add_signals_request_item.go" + - "./pkg/modelscapi/add_signals_request_item_source.go" + - "./pkg/modelscapi/allowlist_link.go" + - "./pkg/modelscapi/blocklist_link.go" + - "./pkg/modelscapi/decisions_delete_request.go" + - "./pkg/modelscapi/decisions_delete_request_item.go" + - "./pkg/modelscapi/decisions_sync_request.go" + - "./pkg/modelscapi/decisions_sync_request_item_decisions.go" + - "./pkg/modelscapi/decisions_sync_request_item_decisions_item.go" + - "./pkg/modelscapi/decisions_sync_request_item.go" + - "./pkg/modelscapi/decisions_sync_request_item_source.go" + - "./pkg/modelscapi/enroll_request.go" + - "./pkg/modelscapi/error_response.go" + - "./pkg/modelscapi/get_decisions_stream_response_deleted.go" + - "./pkg/modelscapi/get_decisions_stream_response_deleted_item.go" + - "./pkg/modelscapi/get_decisions_stream_response.go" + - "./pkg/modelscapi/get_decisions_stream_response_links.go" + - "./pkg/modelscapi/get_decisions_stream_response_new.go" + - "./pkg/modelscapi/get_decisions_stream_response_new_item.go" + - "./pkg/modelscapi/login_request.go" + - "./pkg/modelscapi/login_response.go" + - "./pkg/modelscapi/metrics_request_bouncers_item.go" + - "./pkg/modelscapi/metrics_request.go" + - "./pkg/modelscapi/metrics_request_machines_item.go" + - "./pkg/modelscapi/register_request.go" + - "./pkg/modelscapi/reset_password_request.go" + - "./pkg/modelscapi/success_response.go" + - "./pkg/models/check_allowlist_response.go" + - "./pkg/models/console_options.go" + - "./pkg/models/decision.go" + - "./pkg/models/decisions_delete_request.go" + - "./pkg/models/decisions_delete_request_item.go" + - "./pkg/models/decisions_stream_response.go" + - "./pkg/models/delete_alerts_response.go" + - "./pkg/models/delete_decision_response.go" + - "./pkg/models/detailed_metrics.go" + - "./pkg/models/error_response.go" + - "./pkg/models/event.go" + - "./pkg/models/flush_decision_response.go" + - "./pkg/models/get_alerts_response.go" + - "./pkg/models/get_allowlist_response.go" + - "./pkg/models/get_allowlists_response.go" + - "./pkg/models/get_decisions_response.go" + - "./pkg/models/hub_item.go" + - "./pkg/models/hub_items.go" + - "./pkg/models/lapi_metrics.go" + - "./pkg/models/log_processors_metrics.go" + - "./pkg/models/meta.go" + - "./pkg/models/metrics_agent_info.go" + - "./pkg/models/metrics_bouncer_info.go" + - "./pkg/models/metrics_detail_item.go" + - "./pkg/models/metrics.go" + - "./pkg/models/metrics_labels.go" + - "./pkg/models/metrics_meta.go" + - "./pkg/models/o_sversion.go" + - "./pkg/models/remediation_components_metrics.go" + - "./pkg/models/source.go" + - "./pkg/models/success_response.go" + - "./pkg/models/topx_response.go" + - "./pkg/models/watcher_auth_request.go" + - "./pkg/models/watcher_auth_response.go" + - "./pkg/models/watcher_registration_request.go" + - "./pkg/protobufs/notifier_grpc.pb.go" + - "./pkg/protobufs/notifier.pb.go" diff --git a/.github/generate-codecov-yml.sh b/.github/generate-codecov-yml.sh index ddb60d0ce80..7cd1aed1747 100755 --- a/.github/generate-codecov-yml.sh +++ b/.github/generate-codecov-yml.sh @@ -2,7 +2,7 @@ # Run this from the repository root: # -# .github/generate-codecov-yml.sh >> .github/codecov.yml +# .github/generate-codecov-yml.sh > .github/codecov.yml cat <> .github/codecov.yml + .github/generate-codecov-yml.sh > .github/codecov.yml - name: Ensure we can do a dynamic build run: | From 505ad36dfdd391261e86e505efbb642dbfc438ce Mon Sep 17 00:00:00 2001 From: robigan <35210888+robigan@users.noreply.github.com> Date: Fri, 9 May 2025 16:43:15 +0200 Subject: [PATCH 527/581] Fix spelling mistake in metrics.go (#3618) --- cmd/crowdsec-cli/climetrics/metrics.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmd/crowdsec-cli/climetrics/metrics.go b/cmd/crowdsec-cli/climetrics/metrics.go index 15a852d0a41..892eb1123d8 100644 --- a/cmd/crowdsec-cli/climetrics/metrics.go +++ b/cmd/crowdsec-cli/climetrics/metrics.go @@ -28,7 +28,7 @@ func (cli *cliMetrics) NewCommand() *cobra.Command { Use: "metrics", Short: "Display crowdsec prometheus metrics.", Long: `Fetch metrics from a Local API server and display them`, - Example: `# Show all Metrics, skip empty tables (same as "cecli metrics show") + Example: `# Show all Metrics, skip empty tables (same as "cscli metrics show") cscli metrics # Show only some metrics, connect to a different url From 073a35deac313130095b599edcdc805a921afab1 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Mon, 12 May 2025 14:08:21 +0200 Subject: [PATCH 528/581] refact cmd/crowdsec: remove login code obsoleted by 16d06779 (#3620) --- cmd/crowdsec/lapiclient.go | 65 -------------------------------------- 1 file changed, 65 deletions(-) delete mode 100644 cmd/crowdsec/lapiclient.go diff --git a/cmd/crowdsec/lapiclient.go b/cmd/crowdsec/lapiclient.go deleted file mode 100644 index 6656ba6b4c2..00000000000 --- a/cmd/crowdsec/lapiclient.go +++ /dev/null @@ -1,65 +0,0 @@ -package main - -import ( - "context" - "fmt" - "net/url" - "time" - - "github.com/go-openapi/strfmt" - - "github.com/crowdsecurity/crowdsec/pkg/apiclient" - "github.com/crowdsecurity/crowdsec/pkg/csconfig" - "github.com/crowdsecurity/crowdsec/pkg/cwhub" - "github.com/crowdsecurity/crowdsec/pkg/models" -) - -func AuthenticatedLAPIClient(ctx context.Context, credentials csconfig.ApiCredentialsCfg, hub *cwhub.Hub) (*apiclient.ApiClient, error) { - apiURL, err := url.Parse(credentials.URL) - if err != nil { - return nil, fmt.Errorf("parsing api url ('%s'): %w", credentials.URL, err) - } - - papiURL, err := url.Parse(credentials.PapiURL) - if err != nil { - return nil, fmt.Errorf("parsing polling api url ('%s'): %w", credentials.PapiURL, err) - } - - password := strfmt.Password(credentials.Password) - - itemsForAPI := hub.GetInstalledListForAPI() - - client, err := apiclient.NewClient(&apiclient.Config{ - MachineID: credentials.Login, - Password: password, - Scenarios: itemsForAPI, - URL: apiURL, - PapiURL: papiURL, - VersionPrefix: "v1", - UpdateScenario: func(_ context.Context) ([]string, error) { - return itemsForAPI, nil - }, - }) - if err != nil { - return nil, fmt.Errorf("new client api: %w", err) - } - - authResp, _, err := client.Auth.AuthenticateWatcher(ctx, models.WatcherAuthRequest{ - MachineID: &credentials.Login, - Password: &password, - Scenarios: itemsForAPI, - }) - if err != nil { - return nil, fmt.Errorf("authenticate watcher (%s): %w", credentials.Login, err) - } - - var expiration time.Time - if err := expiration.UnmarshalText([]byte(authResp.Expire)); err != nil { - return nil, fmt.Errorf("unable to parse jwt expiration: %w", err) - } - - client.GetClient().Transport.(*apiclient.JWTTransport).Token = authResp.Token - client.GetClient().Transport.(*apiclient.JWTTransport).Expiration = expiration - - return client, nil -} From 14a1725efe3b0044a17b0038a60958ac2cca6f7c Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Wed, 14 May 2025 16:31:56 +0200 Subject: [PATCH 529/581] cscli capi status: save auth token, add tests (#3623) * "cscli capi status": save auth token * CI: test auth token cache * test that capi status saves the token too * fix postgres test --- cmd/crowdsec-cli/clicapi/capi.go | 27 ++++++++--- cmd/crowdsec-cli/clisupport/support.go | 6 +-- pkg/apiserver/apic.go | 62 +------------------------ pkg/database/config.go | 63 ++++++++++++++++++++++++++ test/bats/04_capi.bats | 45 +++++++++++++++++- test/bats/sql.bats | 24 ++++++++++ test/lib/db/instance-postgres | 2 + 7 files changed, 158 insertions(+), 71 deletions(-) create mode 100644 test/bats/sql.bats diff --git a/cmd/crowdsec-cli/clicapi/capi.go b/cmd/crowdsec-cli/clicapi/capi.go index 864da56e8a4..290e01fd52b 100644 --- a/cmd/crowdsec-cli/clicapi/capi.go +++ b/cmd/crowdsec-cli/clicapi/capi.go @@ -21,6 +21,7 @@ import ( "github.com/crowdsecurity/crowdsec/pkg/apiclient" "github.com/crowdsecurity/crowdsec/pkg/csconfig" "github.com/crowdsecurity/crowdsec/pkg/cwhub" + "github.com/crowdsecurity/crowdsec/pkg/database" "github.com/crowdsecurity/crowdsec/pkg/models" "github.com/crowdsecurity/crowdsec/pkg/types" ) @@ -120,7 +121,7 @@ func (cli *cliCapi) register(ctx context.Context, capiUserPrefix string, outputF log.Infof("Central API credentials written to '%s'", dumpFile) } else { - fmt.Println(string(apiConfigDump)) + fmt.Fprintln(os.Stdout, string(apiConfigDump)) } if msg := reload.UserMessage(); msg != "" { @@ -154,8 +155,8 @@ func (cli *cliCapi) newRegisterCmd() *cobra.Command { return cmd } -// queryCAPIStatus checks if the Central API is reachable, and if the credentials are correct. It then checks if the instance is enrolle in the console. -func queryCAPIStatus(ctx context.Context, hub *cwhub.Hub, credURL string, login string, password string) (bool, bool, error) { +// queryCAPIStatus checks if the Central API is reachable, and if the credentials are correct. It then checks if the instance is enrolled in the console. +func queryCAPIStatus(ctx context.Context, db *database.Client, hub *cwhub.Hub, credURL string, login string, password string) (bool, bool, error) { apiURL, err := url.Parse(credURL) if err != nil { return false, false, err @@ -198,6 +199,10 @@ func queryCAPIStatus(ctx context.Context, hub *cwhub.Hub, credURL string, login return false, false, err } + if err := db.SaveAPICToken(ctx, authResp.Token); err != nil { + return false, false, err + } + client.GetClient().Transport.(*apiclient.JWTTransport).Token = authResp.Token if client.IsEnrolled() { @@ -207,7 +212,7 @@ func queryCAPIStatus(ctx context.Context, hub *cwhub.Hub, credURL string, login return true, false, nil } -func (cli *cliCapi) Status(ctx context.Context, out io.Writer, hub *cwhub.Hub) error { +func (cli *cliCapi) Status(ctx context.Context, db *database.Client, out io.Writer, hub *cwhub.Hub) error { cfg := cli.cfg() if err := require.CAPIRegistered(cfg); err != nil { @@ -219,7 +224,7 @@ func (cli *cliCapi) Status(ctx context.Context, out io.Writer, hub *cwhub.Hub) e fmt.Fprintf(out, "Loaded credentials from %s\n", cfg.API.Server.OnlineClient.CredentialsFilePath) fmt.Fprintf(out, "Trying to authenticate with username %s on %s\n", cred.Login, cred.URL) - auth, enrolled, err := queryCAPIStatus(ctx, hub, cred.URL, cred.Login, cred.Password) + auth, enrolled, err := queryCAPIStatus(ctx, db, hub, cred.URL, cred.Login, cred.Password) if err != nil { return fmt.Errorf("failed to authenticate to Central API (CAPI): %w", err) } @@ -263,12 +268,20 @@ func (cli *cliCapi) newStatusCmd() *cobra.Command { Args: args.NoArgs, DisableAutoGenTag: true, RunE: func(cmd *cobra.Command, _ []string) error { - hub, err := require.Hub(cli.cfg(), nil) + cfg := cli.cfg() + ctx := cmd.Context() + + hub, err := require.Hub(cfg, nil) + if err != nil { + return err + } + + db, err := require.DBClient(ctx, cfg.DbConfig) if err != nil { return err } - return cli.Status(cmd.Context(), color.Output, hub) + return cli.Status(ctx, db, color.Output, hub) }, } diff --git a/cmd/crowdsec-cli/clisupport/support.go b/cmd/crowdsec-cli/clisupport/support.go index 03545d15b85..2f47b9a8b55 100644 --- a/cmd/crowdsec-cli/clisupport/support.go +++ b/cmd/crowdsec-cli/clisupport/support.go @@ -256,13 +256,13 @@ func (cli *cliSupport) dumpLAPIStatus(ctx context.Context, zw *zip.Writer, hub * return nil } -func (cli *cliSupport) dumpCAPIStatus(ctx context.Context, zw *zip.Writer, hub *cwhub.Hub) error { +func (cli *cliSupport) dumpCAPIStatus(ctx context.Context, zw *zip.Writer, hub *cwhub.Hub, db *database.Client) error { log.Info("Collecting CAPI status") out := new(bytes.Buffer) cc := clicapi.New(cli.cfg) - err := cc.Status(ctx, out, hub) + err := cc.Status(ctx, db, out, hub) if err != nil { fmt.Fprintf(out, "%s\n", err) } @@ -534,7 +534,7 @@ func (cli *cliSupport) dump(ctx context.Context, outFile string) error { } if !skipCAPI { - if err = cli.dumpCAPIStatus(ctx, zipWriter, hub); err != nil { + if err = cli.dumpCAPIStatus(ctx, zipWriter, hub, db); err != nil { log.Warnf("could not collect CAPI status: %s", err) } diff --git a/pkg/apiserver/apic.go b/pkg/apiserver/apic.go index 0a618b0b44e..e16197b7fed 100644 --- a/pkg/apiserver/apic.go +++ b/pkg/apiserver/apic.go @@ -17,7 +17,6 @@ import ( "github.com/davecgh/go-spew/spew" "github.com/go-openapi/strfmt" - "github.com/golang-jwt/jwt/v4" log "github.com/sirupsen/logrus" "gopkg.in/tomb.v2" @@ -247,70 +246,13 @@ func NewAPIC(ctx context.Context, config *csconfig.OnlineApiClientCfg, dbClient return ret, err } -// loadAPICToken attempts to retrieve and validate a JWT token from the local database. -// It returns the token string, its expiration time, and a boolean indicating whether the token is valid. -// -// A token is considered valid if: -// - it exists in the database, -// - it is a properly formatted JWT with an "exp" claim, -// - it is not expired or near expiry. -func loadAPICToken(ctx context.Context, db *database.Client) (string, time.Time, bool) { - token, err := db.GetConfigItem(ctx, "apic_token") - if err != nil { - log.Debugf("error fetching token from DB: %s", err) - return "", time.Time{}, false - } - - if token == "" { - log.Debug("no token found in DB") - return "", time.Time{}, false - } - - parser := new(jwt.Parser) - - tok, _, err := parser.ParseUnverified(token, jwt.MapClaims{}) - if err != nil { - log.Debugf("error parsing token: %s", err) - return "", time.Time{}, false - } - - claims, ok := tok.Claims.(jwt.MapClaims) - if !ok { - log.Debugf("error parsing token claims: %s", err) - return "", time.Time{}, false - } - - expFloat, ok := claims["exp"].(float64) - if !ok { - log.Debug("token missing 'exp' claim") - return "", time.Time{}, false - } - - exp := time.Unix(int64(expFloat), 0) - if time.Now().UTC().After(exp.Add(-1 * time.Minute)) { - log.Debug("auth token expired") - return "", time.Time{}, false - } - - return token, exp, true -} - -// saveAPICToken stores the given JWT token in the local database under the "apic_token" config item. -func saveAPICToken(ctx context.Context, db *database.Client, token string) error { - if err := db.SetConfigItem(ctx, "apic_token", token); err != nil { - return fmt.Errorf("saving token to db: %w", err) - } - - return nil -} - // Authenticate ensures the API client is authorized to communicate with the CAPI. // It attempts to reuse a previously saved JWT token from the database, falling back to // an authentication request if the token is missing, invalid, or expired. // // If a new token is obtained, it is saved back to the database for caching. func (a *apic) Authenticate(ctx context.Context, config *csconfig.OnlineApiClientCfg) error { - if token, exp, valid := loadAPICToken(ctx, a.dbClient); valid { + if token, exp, valid := a.dbClient.LoadAPICToken(ctx, log.StandardLogger()); valid { log.Debug("using valid token from DB") a.apiClient.GetClient().Transport.(*apiclient.JWTTransport).Token = token @@ -343,7 +285,7 @@ func (a *apic) Authenticate(ctx context.Context, config *csconfig.OnlineApiClien a.apiClient.GetClient().Transport.(*apiclient.JWTTransport).Token = authResp.Token - return saveAPICToken(ctx, a.dbClient, authResp.Token) + return a.dbClient.SaveAPICToken(ctx, authResp.Token) } // keep track of all alerts in cache and push it to CAPI every PushInterval. diff --git a/pkg/database/config.go b/pkg/database/config.go index 2f262965078..0223efc45ee 100644 --- a/pkg/database/config.go +++ b/pkg/database/config.go @@ -2,13 +2,19 @@ package database import ( "context" + "fmt" + "time" + "github.com/golang-jwt/jwt/v4" "github.com/pkg/errors" + "github.com/sirupsen/logrus" "github.com/crowdsecurity/crowdsec/pkg/database/ent" "github.com/crowdsecurity/crowdsec/pkg/database/ent/configitem" ) +const apicTokenKey = "apic_token" + func (c *Client) GetConfigItem(ctx context.Context, key string) (string, error) { result, err := c.Ent.ConfigItem.Query().Where(configitem.NameEQ(key)).First(ctx) @@ -38,3 +44,60 @@ func (c *Client) SetConfigItem(ctx context.Context, key string, value string) er return nil } + +// LoadAPICToken attempts to retrieve and validate a JWT token from the local database. +// It returns the token string, its expiration time, and a boolean indicating whether the token is valid. +// +// A token is considered valid if: +// - it exists in the database, +// - it is a properly formatted JWT with an "exp" claim, +// - it is not expired or near expiry. +func (c *Client) LoadAPICToken(ctx context.Context, logger logrus.FieldLogger) (string, time.Time, bool) { + token, err := c.GetConfigItem(ctx, apicTokenKey) + if err != nil { + logger.Debugf("error fetching token from DB: %s", err) + return "", time.Time{}, false + } + + if token == "" { + logger.Debug("no token found in DB") + return "", time.Time{}, false + } + + parser := new(jwt.Parser) + + tok, _, err := parser.ParseUnverified(token, jwt.MapClaims{}) + if err != nil { + logger.Debugf("error parsing token: %s", err) + return "", time.Time{}, false + } + + claims, ok := tok.Claims.(jwt.MapClaims) + if !ok { + logger.Debugf("error parsing token claims: %s", err) + return "", time.Time{}, false + } + + expFloat, ok := claims["exp"].(float64) + if !ok { + logger.Debug("token missing 'exp' claim") + return "", time.Time{}, false + } + + exp := time.Unix(int64(expFloat), 0) + if time.Now().UTC().After(exp.Add(-1 * time.Minute)) { + logger.Debug("auth token expired") + return "", time.Time{}, false + } + + return token, exp, true +} + +// SaveAPICToken stores the given JWT token in the local database under the appropriate config item. +func (c *Client) SaveAPICToken(ctx context.Context, token string) error { + if err := c.SetConfigItem(ctx, apicTokenKey, token); err != nil { + return fmt.Errorf("saving token to db: %w", err) + } + + return nil +} diff --git a/test/bats/04_capi.bats b/test/bats/04_capi.bats index 8d0217d39bd..40ed67a80c0 100644 --- a/test/bats/04_capi.bats +++ b/test/bats/04_capi.bats @@ -42,7 +42,7 @@ setup() { config_set 'del(.api.server.online_client)' rune -1 cscli capi status - assert_stderr --regexp "no configuration for Central API \(CAPI\) in '$(echo $CONFIG_YAML|sed s#//#/#g)'" + assert_stderr --regexp "no configuration for Central API \(CAPI\) in '${CONFIG_YAML//\/\//\/}'" } @test "cscli {capi,papi} status" { @@ -100,6 +100,49 @@ setup() { assert_output --partial "You can successfully interact with Central API (CAPI)" } +@test "CAPI login: use cached token from the db" { + ./instance-crowdsec stop + + config_set '.common.log_media="stdout" | .common.log_level="debug"' + + # a correct token was set in the previous test + + rune -0 wait-for \ + --err "CAPI manager configured successfully" \ + "$CROWDSEC" + assert_stderr --partial "using valid token from DB" + refute_stderr --partial "No token found, authenticating" + + # not valid anymore + + rune -0 ./instance-db exec_sql "UPDATE config_items SET VALUE='abc' WHERE name='apic_token'" + + rune -0 wait-for \ + --err "CAPI manager configured successfully" \ + "$CROWDSEC" + refute_stderr --partial "using valid token from DB" + assert_stderr --partial "error parsing token: token contains an invalid number of segments" + assert_stderr --partial "No token found, authenticating" + + # token was re-created + + rune -0 wait-for \ + --err "CAPI manager configured successfully" \ + "$CROWDSEC" + assert_stderr --partial "using valid token from DB" + refute_stderr --partial "No token found, authenticating" + + # "cscli capi status" also saves the token + + rune -0 ./instance-db exec_sql "UPDATE config_items SET VALUE='abc' WHERE name='apic_token'" + rune -0 cscli capi status + rune -0 wait-for \ + --err "CAPI manager configured successfully" \ + "$CROWDSEC" + assert_stderr --partial "using valid token from DB" + refute_stderr --partial "No token found, authenticating" +} + @test "capi register must be run from lapi" { config_disable_lapi rune -1 cscli capi register --schmilblick githubciXXXXXXXXXXXXXXXXXXXXXXXX diff --git a/test/bats/sql.bats b/test/bats/sql.bats new file mode 100644 index 00000000000..4ed1f9fb1e6 --- /dev/null +++ b/test/bats/sql.bats @@ -0,0 +1,24 @@ +#!/usr/bin/env bats + +set -u + +setup_file() { + load "../lib/setup_file.sh" +} + +teardown_file() { + load "../lib/teardown_file.sh" +} + +setup() { + load "../lib/setup.sh" + load "../lib/bats-file/load.bash" + ./instance-data load +} + +#---------- + +@test "sql helper" { + rune -0 ./instance-db exec_sql "SELECT 11235813" + assert_output --partial '11235813' +} diff --git a/test/lib/db/instance-postgres b/test/lib/db/instance-postgres index 918aa63c9df..d47008167da 100755 --- a/test/lib/db/instance-postgres +++ b/test/lib/db/instance-postgres @@ -91,6 +91,8 @@ case "$1" in restore "$@" ;; exec_sql) + PGDATABASE=${PGDATABASE:-crowdsec_test} + export PGDATABASE shift exec_sql "$@" ;; From 7ff6288fe016d11111f3a86f272e094a42742cbb Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Thu, 15 May 2025 15:05:44 +0200 Subject: [PATCH 530/581] Makefile: typo (#3628) --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index 3a04f174cc9..0d3f8d0b5b3 100644 --- a/Makefile +++ b/Makefile @@ -317,7 +317,7 @@ check_golangci-lint: ifeq ($(OS),Windows_NT) @where golangci-lint >nul || (echo "Error: golangci-lint is not installed. Install it from https://github.com/golangci/golangci-lint" && exit 1) else - @command -v galangci-lint > /dev/null 2>&1 || (echo "Error: golangci-lint is not installed. Install it from https://github.com/golangci/golangci-lint" && exit 1) + @command -v golangci-lint > /dev/null 2>&1 || (echo "Error: golangci-lint is not installed. Install it from https://github.com/golangci/golangci-lint" && exit 1) endif .PHONY: lint From 981d282de33d511d6d20dfc1bd66047eb6097a6d Mon Sep 17 00:00:00 2001 From: Laurence Jones Date: Mon, 19 May 2025 10:16:00 +0100 Subject: [PATCH 531/581] enhance: return err if notification has no plugin type (#3638) * enhance: return err if plugin has no type * fix + reduce error verbosity --------- Co-authored-by: marco --- cmd/crowdsec/api.go | 2 +- pkg/csplugin/broker.go | 16 ++++++++++++--- test/bats/70_plugin_http.bats | 1 + test/bats/71_plugin_dummy.bats | 1 + test/bats/72_plugin_badconfig.bats | 30 ++++++++++++++++++++--------- test/bats/73_plugin_formatting.bats | 1 + 6 files changed, 38 insertions(+), 13 deletions(-) diff --git a/cmd/crowdsec/api.go b/cmd/crowdsec/api.go index 1b14d2f6945..edafc3eeee4 100644 --- a/cmd/crowdsec/api.go +++ b/cmd/crowdsec/api.go @@ -38,7 +38,7 @@ func initAPIServer(ctx context.Context, cConfig *csconfig.Config) (*apiserver.AP err = pluginBroker.Init(ctx, cConfig.PluginConfig, cConfig.API.Server.Profiles, cConfig.ConfigPaths) if err != nil { - return nil, fmt.Errorf("unable to run plugin broker: %w", err) + return nil, fmt.Errorf("plugin broker: %w", err) } log.Info("initiated plugin broker") diff --git a/pkg/csplugin/broker.go b/pkg/csplugin/broker.go index df78d258da5..e1e151158ac 100644 --- a/pkg/csplugin/broker.go +++ b/pkg/csplugin/broker.go @@ -84,11 +84,11 @@ func (pb *PluginBroker) Init(ctx context.Context, pluginCfg *csconfig.PluginCfg, pb.pluginsTypesToDispatch = make(map[string]struct{}) if err := pb.loadConfig(configPaths.NotificationDir); err != nil { - return fmt.Errorf("while loading plugin config: %w", err) + return fmt.Errorf("loading config: %w", err) } if err := pb.loadPlugins(ctx, configPaths.PluginDir); err != nil { - return fmt.Errorf("while loading plugin: %w", err) + return fmt.Errorf("loading plugin: %w", err) } pb.watcher = PluginWatcher{} @@ -409,8 +409,12 @@ func ParsePluginConfigFile(path string) ([]PluginConfig, error) { dec := yaml.NewDecoder(yamlFile) dec.SetStrict(true) + idx := -1 + for { - pc := PluginConfig{} + var pc PluginConfig + + idx += 1 err = dec.Decode(&pc) if err != nil { @@ -420,11 +424,17 @@ func ParsePluginConfigFile(path string) ([]PluginConfig, error) { return nil, fmt.Errorf("while decoding %s got error %s", path, err) } + // if the yaml document is empty, skip if reflect.DeepEqual(pc, PluginConfig{}) { continue } + if pc.Type == "" { + return nil, fmt.Errorf("field 'type' missing in %s (position %d)", path, idx) + + } + parsedConfigs = append(parsedConfigs, pc) } diff --git a/test/bats/70_plugin_http.bats b/test/bats/70_plugin_http.bats index 917883f3461..6ae7e67521e 100644 --- a/test/bats/70_plugin_http.bats +++ b/test/bats/70_plugin_http.bats @@ -19,6 +19,7 @@ setup_file() { # https://mikefarah.gitbook.io/yq/operators/env-variable-operators config_set "$(config_get '.config_paths.notification_dir')/http.yaml" ' + .type="http" | .url=strenv(MOCK_URL) | .group_wait="5s" | .group_threshold=2 diff --git a/test/bats/71_plugin_dummy.bats b/test/bats/71_plugin_dummy.bats index 632aa689ee8..35a05463e46 100644 --- a/test/bats/71_plugin_dummy.bats +++ b/test/bats/71_plugin_dummy.bats @@ -17,6 +17,7 @@ setup_file() { DUMMY_YAML="$(config_get '.config_paths.notification_dir')/dummy.yaml" config_set "$DUMMY_YAML" ' + .type="dummy" | .group_wait="5s" | .group_threshold=2 | .output_file=strenv(tempfile) | diff --git a/test/bats/72_plugin_badconfig.bats b/test/bats/72_plugin_badconfig.bats index ab8ceecc56b..8570b1c8a0b 100644 --- a/test/bats/72_plugin_badconfig.bats +++ b/test/bats/72_plugin_badconfig.bats @@ -38,7 +38,7 @@ teardown() { config_set '.plugin_config.user="" | .plugin_config.group="nogroup"' config_set "$PROFILES_PATH" '.notifications=["http_default"]' rune -0 wait-for \ - --err "api server init: unable to run plugin broker: while loading plugin: while getting process attributes: both plugin user and group must be set" \ + --err "api server init: plugin broker: loading plugin: while getting process attributes: both plugin user and group must be set" \ "$CROWDSEC" } @@ -46,7 +46,7 @@ teardown() { config_set '(.plugin_config.user="nobody") | (.plugin_config.group="")' config_set "$PROFILES_PATH" '.notifications=["http_default"]' rune -0 wait-for \ - --err "api server init: unable to run plugin broker: while loading plugin: while getting process attributes: both plugin user and group must be set" \ + --err "api server init: plugin broker: loading plugin: while getting process attributes: both plugin user and group must be set" \ "$CROWDSEC" } @@ -54,7 +54,7 @@ teardown() { config_set '(.plugin_config.user="userdoesnotexist") | (.plugin_config.group="groupdoesnotexist")' config_set "$PROFILES_PATH" '.notifications=["http_default"]' rune -0 wait-for \ - --err "api server init: unable to run plugin broker: while loading plugin: while getting process attributes: user: unknown user userdoesnotexist" \ + --err "api server init: plugin broker: loading plugin: while getting process attributes: user: unknown user userdoesnotexist" \ "$CROWDSEC" } @@ -62,7 +62,7 @@ teardown() { config_set '(.plugin_config.user=strenv(USER)) | (.plugin_config.group="groupdoesnotexist")' config_set "$PROFILES_PATH" '.notifications=["http_default"]' rune -0 wait-for \ - --err "api server init: unable to run plugin broker: while loading plugin: while getting process attributes: group: unknown group groupdoesnotexist" \ + --err "api server init: plugin broker: loading plugin: while getting process attributes: group: unknown group groupdoesnotexist" \ "$CROWDSEC" } @@ -70,7 +70,7 @@ teardown() { config_set "$PROFILES_PATH" '.notifications=["http_default"]' cp "$PLUGIN_DIR"/notification-http "$PLUGIN_DIR"/badname rune -0 wait-for \ - --err "api server init: unable to run plugin broker: while loading plugin: plugin name ${PLUGIN_DIR}/badname is invalid. Name should be like {type-name}" \ + --err "api server init: plugin broker: loading plugin: plugin name ${PLUGIN_DIR}/badname is invalid. Name should be like {type-name}" \ "$CROWDSEC" } @@ -90,7 +90,7 @@ teardown() { config_set "$PROFILES_PATH" '.notifications=["http_default"]' chmod g+w "$PLUGIN_DIR"/notification-http rune -0 wait-for \ - --err "api server init: unable to run plugin broker: while loading plugin: plugin at ${PLUGIN_DIR}/notification-http is group writable, group writable plugins are invalid" \ + --err "api server init: plugin broker: loading plugin: plugin at ${PLUGIN_DIR}/notification-http is group writable, group writable plugins are invalid" \ "$CROWDSEC" } @@ -98,7 +98,7 @@ teardown() { config_set "$PROFILES_PATH" '.notifications=["http_default"]' chmod o+w "$PLUGIN_DIR"/notification-http rune -0 wait-for \ - --err "api server init: unable to run plugin broker: while loading plugin: plugin at ${PLUGIN_DIR}/notification-http is world writable, world writable plugins are invalid" \ + --err "api server init: plugin broker: loading plugin: plugin at ${PLUGIN_DIR}/notification-http is world writable, world writable plugins are invalid" \ "$CROWDSEC" } @@ -124,10 +124,22 @@ teardown() { "$CROWDSEC" } -@test "unable to run plugin broker: while reading plugin config" { +@test "plugin broker: missing notification dir" { config_set '.config_paths.notification_dir="/this/path/does/not/exist"' config_set "$PROFILES_PATH" '.notifications=["http_default"]' rune -0 wait-for \ - --err "api server init: unable to run plugin broker: while loading plugin config: open /this/path/does/not/exist: no such file or directory" \ + --err "api server init: plugin broker: loading config: open /this/path/does/not/exist: no such file or directory" \ "$CROWDSEC" } + +@test "misconfigured notification: missing plugin type" { + rune -0 yq -i 'del(.type)' "$CONFIG_DIR/notifications/http.yaml" + # enable a notification, otherwise plugins are ignored + config_set "$PROFILES_PATH" '.notifications=["http_default"]' + # the slack plugin may fail or not, but we just need the logs + config_set '.common.log_media="stdout"' + rune wait-for \ + --err "api server init: plugin broker: loading plugin config" \ + "$CROWDSEC" + assert_stderr --partial "field 'type' missing in $CONFIG_DIR/notifications/http.yaml (position 0)" +} diff --git a/test/bats/73_plugin_formatting.bats b/test/bats/73_plugin_formatting.bats index 5153946b061..7b528d0d690 100644 --- a/test/bats/73_plugin_formatting.bats +++ b/test/bats/73_plugin_formatting.bats @@ -17,6 +17,7 @@ setup_file() { # the $alert is not a shell variable # shellcheck disable=SC2016 config_set "$DUMMY_YAML" ' + .type="dummy" | .group_wait="5s" | .group_threshold=2 | .output_file=strenv(tempfile) | From 3b2517fe678b93b009711aeeadd0fca55f320065 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Mon, 19 May 2025 13:22:42 +0200 Subject: [PATCH 532/581] refact: cleanup bats helper (#3636) --- test/bin/wait-for | 64 ++++++++++++++++++++++++++++++----------------- 1 file changed, 41 insertions(+), 23 deletions(-) diff --git a/test/bin/wait-for b/test/bin/wait-for index b226783d44b..798e2a09b34 100755 --- a/test/bin/wait-for +++ b/test/bin/wait-for @@ -15,22 +15,31 @@ DEFAULT_TIMEOUT = 30 # TODO: print unmatched patterns -async def terminate(p): - # Terminate the process group (shell, crowdsec plugins) +async def terminate_group(p: asyncio.subprocess.Process): + """ + Terminate the process group (shell, crowdsec plugins) + """ try: os.killpg(os.getpgid(p.pid), signal.SIGTERM) except ProcessLookupError: pass -async def monitor(cmd, args, want_out, want_err, timeout): - """Monitor a process and terminate it if a pattern is matched in stdout or stderr. +async def monitor( + cmd: str, + args: list[str], + out_regex: re.Pattern[str] | None, + err_regex: re.Pattern[str] | None, + timeout: float +) -> int: + """ + Run a subprocess, monitor its stdout/stderr for matches, and handle timeouts or pattern hits. Args: cmd: The command to run. args: A list of arguments to pass to the command. - stdout: A regular expression pattern to search for in stdout. - stderr: A regular expression pattern to search for in stderr. + out_regex: A compiled regular expression to search for in stdout. + err_regex: A compiled regular expression to search for in stderr. timeout: The maximum number of seconds to wait for the process to terminate. Returns: @@ -39,17 +48,18 @@ async def monitor(cmd, args, want_out, want_err, timeout): status = None - async def read_stream(stream, outstream, pattern): + async def read_stream(stream: asyncio.StreamReader | None, out, pattern: re.Pattern[str] | None): nonlocal status if stream is None: return + while True: line = await stream.readline() if line: line = line.decode('utf-8') - outstream.write(line) + out.write(line) if pattern and pattern.search(line): - await terminate(process) + await terminate_group(process) # this is nasty. # if we timeout, we want to return a different exit code # in case of a match, so that the caller can tell @@ -76,9 +86,6 @@ async def monitor(cmd, args, want_out, want_err, timeout): # (required to kill child processes when cmd is a shell) preexec_fn=os.setsid) - out_regex = re.compile(want_out) if want_out else None - err_regex = re.compile(want_err) if want_err else None - # Apply a timeout try: await asyncio.wait_for( @@ -90,27 +97,38 @@ async def monitor(cmd, args, want_out, want_err, timeout): if status is None: status = process.returncode except asyncio.TimeoutError: - await terminate(process) + await terminate_group(process) status = 241 # Return the same exit code, stdout and stderr as the spawned process - return status + return status or 0 + + +class Args(argparse.Namespace): + cmd: str = '' + args: list[str] = [] + out: str = '' + err: str = '' + timeout: float = DEFAULT_TIMEOUT async def main(): parser = argparse.ArgumentParser( description='Monitor a process and terminate it if a pattern is matched in stdout or stderr.') - parser.add_argument('cmd', help='The command to run.') - parser.add_argument('args', nargs=argparse.REMAINDER, help='A list of arguments to pass to the command.') - parser.add_argument('--out', default='', help='A regular expression pattern to search for in stdout.') - parser.add_argument('--err', default='', help='A regular expression pattern to search for in stderr.') - parser.add_argument('--timeout', type=float, default=DEFAULT_TIMEOUT) - args = parser.parse_args() + _ = parser.add_argument('cmd', help='The command to run.') + _ = parser.add_argument('args', nargs=argparse.REMAINDER, help='A list of arguments to pass to the command.') + _ = parser.add_argument('--out', help='A regular expression pattern to search for in stdout.') + _ = parser.add_argument('--err', help='A regular expression pattern to search for in stderr.') + _ = parser.add_argument('--timeout', type=float, default=DEFAULT_TIMEOUT) + args: Args = parser.parse_args(namespace=Args()) + + out_regex = re.compile(args.out) if args.out else None + err_regex = re.compile(args.err) if args.err else None - exit_code = await monitor(args.cmd, args.args, args.out, args.err, args.timeout) + exit_code = await monitor(args.cmd, args.args, out_regex, err_regex, args.timeout) - sys.exit(exit_code) + return exit_code if __name__ == '__main__': - asyncio.run(main()) + sys.exit(asyncio.run(main())) From 3664d6f40ab01d39028d28b7f96d32070bf9714a Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Mon, 19 May 2025 13:23:18 +0200 Subject: [PATCH 533/581] CI: release-drafter configuration: permissions, skip-changelog label (#3631) --- .github/release-drafter.yml | 6 ++++-- .github/workflows/ci_release-drafter.yml | 10 ++++++++-- 2 files changed, 12 insertions(+), 4 deletions(-) diff --git a/.github/release-drafter.yml b/.github/release-drafter.yml index f56c03d49df..b87bc857dab 100644 --- a/.github/release-drafter.yml +++ b/.github/release-drafter.yml @@ -23,6 +23,8 @@ categories: - 'kind/dependencies' - 'kind/chore' tag-template: "- $TITLE @$AUTHOR (#$NUMBER)" +exclude-labels: + - 'skip-changelog' template: | ## Changes @@ -31,8 +33,8 @@ template: | ## Geolite2 notice This product includes GeoLite2 data created by MaxMind, available from https://www.maxmind.com. - + ## Installation Take a look at the [installation instructions](https://doc.crowdsec.net/docs/getting_started/install_crowdsec). - + diff --git a/.github/workflows/ci_release-drafter.yml b/.github/workflows/ci_release-drafter.yml index 0b8c9b386e6..b4404bced5e 100644 --- a/.github/workflows/ci_release-drafter.yml +++ b/.github/workflows/ci_release-drafter.yml @@ -4,18 +4,24 @@ on: push: # branches to consider in the event; optional, defaults to all branches: + - main - master - releases/** +permissions: + contents: read + jobs: update_release_draft: + permissions: + contents: write + pull-requests: read runs-on: ubuntu-latest steps: # Drafts your next Release notes as Pull Requests are merged into "master" - uses: release-drafter/release-drafter@v6 with: - config-name: release-drafter.yml # (Optional) specify config name to use, relative to .github/. Default: release-drafter.yml - # config-name: my-config.yml + config-name: release-drafter.yml env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} From bbc8d7ffb4ee0e609c3a203e458f9b8b29c75b60 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Mon, 19 May 2025 17:09:53 +0200 Subject: [PATCH 534/581] refact apiclient.Config: remove field Scenarios (#3622) --- cmd/crowdsec-cli/clicapi/capi.go | 1 - cmd/crowdsec-cli/cliconsole/console.go | 10 +++-- pkg/apiclient/auth_service_test.go | 52 ++++++++++++++++++-------- pkg/apiclient/client.go | 2 - pkg/apiclient/config.go | 1 - pkg/apiserver/apic.go | 8 ---- pkg/apiserver/apic_test.go | 1 - 7 files changed, 43 insertions(+), 32 deletions(-) diff --git a/cmd/crowdsec-cli/clicapi/capi.go b/cmd/crowdsec-cli/clicapi/capi.go index 290e01fd52b..64ab46b7c3f 100644 --- a/cmd/crowdsec-cli/clicapi/capi.go +++ b/cmd/crowdsec-cli/clicapi/capi.go @@ -173,7 +173,6 @@ func queryCAPIStatus(ctx context.Context, db *database.Client, hub *cwhub.Hub, c client, err := apiclient.NewClient(&apiclient.Config{ MachineID: login, Password: passwd, - Scenarios: itemsForAPI, URL: apiURL, // I don't believe papi is neede to check enrollement // PapiURL: papiURL, diff --git a/cmd/crowdsec-cli/cliconsole/console.go b/cmd/crowdsec-cli/cliconsole/console.go index 2360e854e6a..e9fc0ebab60 100644 --- a/cmd/crowdsec-cli/cliconsole/console.go +++ b/cmd/crowdsec-cli/cliconsole/console.go @@ -9,8 +9,8 @@ import ( "net/http" "net/url" "os" - "strconv" "slices" + "strconv" "strings" "github.com/fatih/color" @@ -85,9 +85,11 @@ func (cli *cliConsole) enroll(ctx context.Context, key string, name string, over c, _ := apiclient.NewClient(&apiclient.Config{ MachineID: cli.cfg().API.Server.OnlineClient.Credentials.Login, Password: password, - Scenarios: hub.GetInstalledListForAPI(), URL: apiURL, VersionPrefix: "v3", + UpdateScenario: func(_ context.Context) ([]string, error) { + return hub.GetInstalledListForAPI(), nil + }, }) resp, err := c.Auth.EnrollWatcher(ctx, key, name, tags, overwrite) @@ -157,12 +159,14 @@ func optionFilterDisable(opts []string, disableOpts []string) ([]string, error) // discard all elements == opt j := 0 + for _, o := range opts { if o != opt { opts[j] = o j++ } } + opts = opts[:j] } @@ -323,7 +327,7 @@ func (cli *cliConsole) newStatusCmd() *cobra.Command { if err != nil { return fmt.Errorf("failed to serialize configuration: %w", err) } - fmt.Println(string(data)) + fmt.Fprintln(os.Stdout, string(data)) case "raw": csvwriter := csv.NewWriter(os.Stdout) err := csvwriter.Write([]string{"option", "enabled"}) diff --git a/pkg/apiclient/auth_service_test.go b/pkg/apiclient/auth_service_test.go index 11bc68fb7ff..c49cd0d94c0 100644 --- a/pkg/apiclient/auth_service_test.go +++ b/pkg/apiclient/auth_service_test.go @@ -2,6 +2,7 @@ package apiclient import ( "bytes" + "context" "encoding/json" "fmt" "io" @@ -72,6 +73,7 @@ func initBasicMuxMock(t *testing.T, mux *http.ServeMux, path string) { */ func TestWatcherRegister(t *testing.T) { ctx := t.Context() + log.SetLevel(log.DebugLevel) mux, urlx, teardown := setup() @@ -110,6 +112,7 @@ func TestWatcherRegister(t *testing.T) { func TestWatcherAuth(t *testing.T) { ctx := t.Context() + log.SetLevel(log.DebugLevel) mux, urlx, teardown := setup() @@ -122,22 +125,29 @@ func TestWatcherAuth(t *testing.T) { apiURL, err := url.Parse(urlx + "/") require.NoError(t, err) + updateScenario := func(_ context.Context) ([]string, error) { + return []string{"crowdsecurity/test"}, nil + } + // ok auth clientConfig := &Config{ - MachineID: "test_login", - Password: "test_password", - URL: apiURL, - VersionPrefix: "v1", - Scenarios: []string{"crowdsecurity/test"}, + MachineID: "test_login", + Password: "test_password", + URL: apiURL, + VersionPrefix: "v1", + UpdateScenario: updateScenario, } client, err := NewClient(clientConfig) require.NoError(t, err) + scenarios, err := clientConfig.UpdateScenario(ctx) + require.NoError(t, err) + _, _, err = client.Auth.AuthenticateWatcher(ctx, models.WatcherAuthRequest{ MachineID: &clientConfig.MachineID, Password: &clientConfig.Password, - Scenarios: clientConfig.Scenarios, + Scenarios: scenarios, }) require.NoError(t, err) @@ -171,6 +181,7 @@ func TestWatcherAuth(t *testing.T) { func TestWatcherUnregister(t *testing.T) { ctx := t.Context() + log.SetLevel(log.DebugLevel) mux, urlx, teardown := setup() @@ -205,12 +216,16 @@ func TestWatcherUnregister(t *testing.T) { apiURL, err := url.Parse(urlx + "/") require.NoError(t, err) + updateScenario := func(_ context.Context) ([]string, error) { + return []string{"crowdsecurity/test"}, nil + } + mycfg := &Config{ - MachineID: "test_login", - Password: "test_password", - URL: apiURL, - VersionPrefix: "v1", - Scenarios: []string{"crowdsecurity/test"}, + MachineID: "test_login", + Password: "test_password", + URL: apiURL, + VersionPrefix: "v1", + UpdateScenario: updateScenario, } client, err := NewClient(mycfg) @@ -224,6 +239,7 @@ func TestWatcherUnregister(t *testing.T) { func TestWatcherEnroll(t *testing.T) { ctx := t.Context() + log.SetLevel(log.DebugLevel) mux, urlx, teardown := setup() @@ -260,12 +276,16 @@ func TestWatcherEnroll(t *testing.T) { apiURL, err := url.Parse(urlx + "/") require.NoError(t, err) + updateScenario := func(_ context.Context) ([]string, error) { + return []string{"crowdsecurity/test"}, nil + } + mycfg := &Config{ - MachineID: "test_login", - Password: "test_password", - URL: apiURL, - VersionPrefix: "v1", - Scenarios: []string{"crowdsecurity/test"}, + MachineID: "test_login", + Password: "test_password", + URL: apiURL, + VersionPrefix: "v1", + UpdateScenario: updateScenario, } client, err := NewClient(mycfg) diff --git a/pkg/apiclient/client.go b/pkg/apiclient/client.go index 9192bf095a1..3c5d477da32 100644 --- a/pkg/apiclient/client.go +++ b/pkg/apiclient/client.go @@ -87,7 +87,6 @@ func InitLAPIClient(ctx context.Context, apiUrl string, papiUrl string, login st client, err := NewClient(&Config{ MachineID: login, Password: pwd, - Scenarios: scenarios, URL: apiURL, PapiURL: papiURL, VersionPrefix: "v1", @@ -138,7 +137,6 @@ func NewClient(config *Config) (*ApiClient, error) { t := &JWTTransport{ MachineID: &config.MachineID, Password: &config.Password, - Scenarios: config.Scenarios, UserAgent: userAgent, VersionPrefix: config.VersionPrefix, UpdateScenario: config.UpdateScenario, diff --git a/pkg/apiclient/config.go b/pkg/apiclient/config.go index 29a8acf185e..9c0a853b1bc 100644 --- a/pkg/apiclient/config.go +++ b/pkg/apiclient/config.go @@ -10,7 +10,6 @@ import ( type Config struct { MachineID string Password strfmt.Password - Scenarios []string URL *url.URL PapiURL *url.URL VersionPrefix string diff --git a/pkg/apiserver/apic.go b/pkg/apiserver/apic.go index e16197b7fed..12aa630457e 100644 --- a/pkg/apiserver/apic.go +++ b/pkg/apiserver/apic.go @@ -67,7 +67,6 @@ type apic struct { metricsTomb tomb.Tomb startup bool credentials *csconfig.ApiCredentialsCfg - scenarioList []string consoleConfig *csconfig.ConsoleConfig isPulling chan bool whitelists *csconfig.CapiWhitelist @@ -196,7 +195,6 @@ func NewAPIC(ctx context.Context, config *csconfig.OnlineApiClientCfg, dbClient pullTomb: tomb.Tomb{}, pushTomb: tomb.Tomb{}, metricsTomb: tomb.Tomb{}, - scenarioList: make([]string, 0), consoleConfig: consoleConfig, pullInterval: pullIntervalDefault, pullIntervalFirst: randomDuration(pullIntervalDefault, pullIntervalDelta), @@ -223,18 +221,12 @@ func NewAPIC(ctx context.Context, config *csconfig.OnlineApiClientCfg, dbClient return nil, fmt.Errorf("while parsing '%s': %w", config.Credentials.PapiURL, err) } - ret.scenarioList, err = ret.FetchScenariosListFromDB(ctx) - if err != nil { - return nil, fmt.Errorf("while fetching scenarios from db: %w", err) - } - ret.apiClient, err = apiclient.NewClient(&apiclient.Config{ MachineID: config.Credentials.Login, Password: strfmt.Password(config.Credentials.Password), URL: apiURL, PapiURL: papiURL, VersionPrefix: "v3", - Scenarios: ret.scenarioList, UpdateScenario: ret.FetchScenariosListFromDB, }) if err != nil { diff --git a/pkg/apiserver/apic_test.go b/pkg/apiserver/apic_test.go index a9931875513..e8c049f1a99 100644 --- a/pkg/apiserver/apic_test.go +++ b/pkg/apiserver/apic_test.go @@ -63,7 +63,6 @@ func getAPIC(t *testing.T, ctx context.Context) *apic { pullTomb: tomb.Tomb{}, pushTomb: tomb.Tomb{}, metricsTomb: tomb.Tomb{}, - scenarioList: make([]string, 0), consoleConfig: &csconfig.ConsoleConfig{ ShareManualDecisions: ptr.Of(false), ShareTaintedScenarios: ptr.Of(false), From 1b2eec9aacdfca9f8f6d60913c3e9c6ee1b3ffd2 Mon Sep 17 00:00:00 2001 From: Manuel Sabban Date: Wed, 21 May 2025 10:54:21 +0200 Subject: [PATCH 535/581] feat(apiclient): add token save functionality (#3639) --- cmd/crowdsec-cli/clicapi/capi.go | 2 +- pkg/apiclient/auth_jwt.go | 7 +++++++ pkg/apiclient/client.go | 3 +++ pkg/apiclient/config.go | 3 +++ pkg/apiserver/apic.go | 7 ++++++- pkg/database/config.go | 21 ++++++++++++++++----- 6 files changed, 36 insertions(+), 7 deletions(-) diff --git a/cmd/crowdsec-cli/clicapi/capi.go b/cmd/crowdsec-cli/clicapi/capi.go index 64ab46b7c3f..41670d17f91 100644 --- a/cmd/crowdsec-cli/clicapi/capi.go +++ b/cmd/crowdsec-cli/clicapi/capi.go @@ -198,7 +198,7 @@ func queryCAPIStatus(ctx context.Context, db *database.Client, hub *cwhub.Hub, c return false, false, err } - if err := db.SaveAPICToken(ctx, authResp.Token); err != nil { + if err := db.SaveAPICToken(ctx, apiclient.TokenDBField, authResp.Token); err != nil { return false, false, err } diff --git a/pkg/apiclient/auth_jwt.go b/pkg/apiclient/auth_jwt.go index 54dafb615ba..4557258233c 100644 --- a/pkg/apiclient/auth_jwt.go +++ b/pkg/apiclient/auth_jwt.go @@ -33,6 +33,7 @@ type JWTTransport struct { Transport http.RoundTripper UpdateScenario func(context.Context) ([]string, error) refreshTokenMutex sync.Mutex + TokenSave TokenSave } func (t *JWTTransport) refreshJwtToken() error { @@ -134,6 +135,12 @@ func (t *JWTTransport) refreshJwtToken() error { t.Token = response.Token + if t.TokenSave != nil { + err = t.TokenSave(ctx, TokenDBField, t.Token) + if err != nil { + log.Errorf("unable to save token: %s", err) + } + } log.Debugf("token %s will expire on %s", t.Token, t.Expiration.String()) return nil diff --git a/pkg/apiclient/client.go b/pkg/apiclient/client.go index 3c5d477da32..371406c15c2 100644 --- a/pkg/apiclient/client.go +++ b/pkg/apiclient/client.go @@ -26,6 +26,8 @@ var ( lapiClient *ApiClient ) +type TokenSave func(ctx context.Context, tokenKey string, token string) error + type ApiClient struct { /*The http client used to make requests*/ client *http.Client @@ -147,6 +149,7 @@ func NewClient(config *Config) (*ApiClient, error) { WithStatusCodeConfig(http.StatusServiceUnavailable, 5, true, false), WithStatusCodeConfig(http.StatusGatewayTimeout, 5, true, false), ), + TokenSave: config.TokenSave, } transport, baseURL := createTransport(config.URL) diff --git a/pkg/apiclient/config.go b/pkg/apiclient/config.go index 9c0a853b1bc..e8be8541c29 100644 --- a/pkg/apiclient/config.go +++ b/pkg/apiclient/config.go @@ -7,6 +7,8 @@ import ( "github.com/go-openapi/strfmt" ) +const TokenDBField = "apic_token" + type Config struct { MachineID string Password strfmt.Password @@ -16,4 +18,5 @@ type Config struct { UserAgent string RegistrationToken string UpdateScenario func(context.Context) ([]string, error) + TokenSave func(context.Context, string, string) error } diff --git a/pkg/apiserver/apic.go b/pkg/apiserver/apic.go index 12aa630457e..5687f399f28 100644 --- a/pkg/apiserver/apic.go +++ b/pkg/apiserver/apic.go @@ -74,6 +74,8 @@ type apic struct { pullBlocklists bool pullCommunity bool shareSignals bool + + TokenSave apiclient.TokenSave } // randomDuration returns a duration value between d-delta and d+delta @@ -228,6 +230,9 @@ func NewAPIC(ctx context.Context, config *csconfig.OnlineApiClientCfg, dbClient PapiURL: papiURL, VersionPrefix: "v3", UpdateScenario: ret.FetchScenariosListFromDB, + TokenSave: func(ctx context.Context, tokenKey string, token string) error { + return dbClient.SaveAPICToken(ctx, tokenKey, token) + }, }) if err != nil { return nil, fmt.Errorf("while creating api client: %w", err) @@ -277,7 +282,7 @@ func (a *apic) Authenticate(ctx context.Context, config *csconfig.OnlineApiClien a.apiClient.GetClient().Transport.(*apiclient.JWTTransport).Token = authResp.Token - return a.dbClient.SaveAPICToken(ctx, authResp.Token) + return a.dbClient.SaveAPICToken(ctx, apiclient.TokenDBField, authResp.Token) } // keep track of all alerts in cache and push it to CAPI every PushInterval. diff --git a/pkg/database/config.go b/pkg/database/config.go index 0223efc45ee..3c51505bc19 100644 --- a/pkg/database/config.go +++ b/pkg/database/config.go @@ -9,12 +9,11 @@ import ( "github.com/pkg/errors" "github.com/sirupsen/logrus" + "github.com/crowdsecurity/crowdsec/pkg/apiclient" "github.com/crowdsecurity/crowdsec/pkg/database/ent" "github.com/crowdsecurity/crowdsec/pkg/database/ent/configitem" ) -const apicTokenKey = "apic_token" - func (c *Client) GetConfigItem(ctx context.Context, key string) (string, error) { result, err := c.Ent.ConfigItem.Query().Where(configitem.NameEQ(key)).First(ctx) @@ -53,7 +52,7 @@ func (c *Client) SetConfigItem(ctx context.Context, key string, value string) er // - it is a properly formatted JWT with an "exp" claim, // - it is not expired or near expiry. func (c *Client) LoadAPICToken(ctx context.Context, logger logrus.FieldLogger) (string, time.Time, bool) { - token, err := c.GetConfigItem(ctx, apicTokenKey) + token, err := c.GetConfigItem(ctx, apiclient.TokenDBField) // TokenKey is a constant string representing the key for the token in the database if err != nil { logger.Debugf("error fetching token from DB: %s", err) return "", time.Time{}, false @@ -78,6 +77,18 @@ func (c *Client) LoadAPICToken(ctx context.Context, logger logrus.FieldLogger) ( return "", time.Time{}, false } + iatFloat, ok := claims["iat"].(float64) + if !ok { + logger.Debug("token missing 'iat' claim") + return "", time.Time{}, false + } + + iat := time.Unix(int64(iatFloat), 0) + if time.Now().UTC().After(iat.Add(1 * time.Minute)) { + logger.Debug("token is more than 1 minute old, not using it") + return "", time.Time{}, false + } + expFloat, ok := claims["exp"].(float64) if !ok { logger.Debug("token missing 'exp' claim") @@ -94,8 +105,8 @@ func (c *Client) LoadAPICToken(ctx context.Context, logger logrus.FieldLogger) ( } // SaveAPICToken stores the given JWT token in the local database under the appropriate config item. -func (c *Client) SaveAPICToken(ctx context.Context, token string) error { - if err := c.SetConfigItem(ctx, apicTokenKey, token); err != nil { +func (c *Client) SaveAPICToken(ctx context.Context, tokenKey string, token string) error { + if err := c.SetConfigItem(ctx, tokenKey, token); err != nil { return fmt.Errorf("saving token to db: %w", err) } From 5fe038df192cd58762fa56976ec8cbd36c9efad2 Mon Sep 17 00:00:00 2001 From: blotus Date: Wed, 21 May 2025 10:55:10 +0200 Subject: [PATCH 536/581] kafka: expose batching configuration (#3621) --- pkg/acquisition/modules/kafka/kafka.go | 37 +++++++++++++++++++++----- 1 file changed, 31 insertions(+), 6 deletions(-) diff --git a/pkg/acquisition/modules/kafka/kafka.go b/pkg/acquisition/modules/kafka/kafka.go index 2fa01d1c2b4..ee182cc2301 100644 --- a/pkg/acquisition/modules/kafka/kafka.go +++ b/pkg/acquisition/modules/kafka/kafka.go @@ -33,12 +33,13 @@ var linesRead = prometheus.NewCounterVec( []string{"topic"}) type KafkaConfiguration struct { - Brokers []string `yaml:"brokers"` - Topic string `yaml:"topic"` - GroupID string `yaml:"group_id"` - Partition int `yaml:"partition"` - Timeout string `yaml:"timeout"` - TLS *TLSConfig `yaml:"tls"` + Brokers []string `yaml:"brokers"` + Topic string `yaml:"topic"` + GroupID string `yaml:"group_id"` + Partition int `yaml:"partition"` + Timeout string `yaml:"timeout"` + TLS *TLSConfig `yaml:"tls"` + BatchConfiguration KafkaBatchConfiguration `yaml:"batch"` configuration.DataSourceCommonCfg `yaml:",inline"` } @@ -49,6 +50,14 @@ type TLSConfig struct { CaCert string `yaml:"ca_cert"` } +type KafkaBatchConfiguration struct { + BatchMinBytes int `yaml:"min_bytes"` + BatchMaxBytes int `yaml:"max_bytes"` + BatchMaxWait time.Duration `yaml:"max_wait"` + BatchQueueSize int `yaml:"queue_size"` + CommitInterval time.Duration `yaml:"commit_interval"` +} + type KafkaSource struct { metricsLevel int Config KafkaConfiguration @@ -294,6 +303,22 @@ func (kc *KafkaConfiguration) NewReader(dialer *kafka.Dialer, logger *log.Entry) logger.Warnf("no group_id specified, crowdsec will only read from the 1st partition of the topic") } + if kc.BatchConfiguration.BatchMinBytes != 0 { + rConf.MinBytes = kc.BatchConfiguration.BatchMinBytes + } + if kc.BatchConfiguration.BatchMaxBytes != 0 { + rConf.MaxBytes = kc.BatchConfiguration.BatchMaxBytes + } + if kc.BatchConfiguration.BatchMaxWait != 0 { + rConf.MaxWait = kc.BatchConfiguration.BatchMaxWait + } + if kc.BatchConfiguration.BatchQueueSize != 0 { + rConf.QueueCapacity = kc.BatchConfiguration.BatchQueueSize + } + if kc.BatchConfiguration.CommitInterval != 0 { + rConf.CommitInterval = kc.BatchConfiguration.CommitInterval + } + if err := rConf.Validate(); err != nil { return &kafka.Reader{}, fmt.Errorf("while validating reader configuration: %w", err) } From 818974e7fc2fe961eaf5a3baa055c9b0d17213f9 Mon Sep 17 00:00:00 2001 From: Laurence Jones Date: Thu, 22 May 2025 16:17:18 +0100 Subject: [PATCH 537/581] enhance: Remove docker acquis internal timer use docker events (#3598) * enhance: Remove docker acquis internal timer use docker events * enhance: split code and add events to mock client * enhance: fixes * enhance: add check_interval test and add deprecation warning * enhance: warnf -> warn * enhance: Add a retry loop to reconnect to docker events when docker is down * enhance: remove max retries seconds as we have a max count instead * enhance: mr linter mad * enhance: keep trying until we hit max timer * enhance: After a reconnect we always check the containers to attempt to resurrect or else we wait until a event comes in which it may not * enhance: Move info outside for loop * enhance: Move info to reconnect goto * enhance: mr linter pls be happy * extract reusable backoff code; apply it to the initial connection, rename method etc. * log messages * connection error --------- Co-authored-by: blotus Co-authored-by: marco --- pkg/acquisition/modules/docker/docker.go | 194 +++++++++++++----- pkg/acquisition/modules/docker/docker_test.go | 18 ++ 2 files changed, 165 insertions(+), 47 deletions(-) diff --git a/pkg/acquisition/modules/docker/docker.go b/pkg/acquisition/modules/docker/docker.go index 582da3d53a1..89700dfebc2 100644 --- a/pkg/acquisition/modules/docker/docker.go +++ b/pkg/acquisition/modules/docker/docker.go @@ -13,6 +13,8 @@ import ( dockerTypes "github.com/docker/docker/api/types" dockerContainer "github.com/docker/docker/api/types/container" + dockerTypesEvents "github.com/docker/docker/api/types/events" + dockerFilter "github.com/docker/docker/api/types/filters" "github.com/docker/docker/client" "github.com/prometheus/client_golang/prometheus" log "github.com/sirupsen/logrus" @@ -53,7 +55,6 @@ type DockerSource struct { runningContainerState map[string]*ContainerConfig compiledContainerName []*regexp.Regexp compiledContainerID []*regexp.Regexp - CheckIntervalDuration time.Duration logger *log.Entry Client client.CommonAPIClient t *tomb.Tomb @@ -75,9 +76,8 @@ func (d *DockerSource) GetUuid() string { func (d *DockerSource) UnmarshalConfig(yamlConfig []byte) error { d.Config = DockerConfiguration{ - FollowStdout: true, // default - FollowStdErr: true, // default - CheckInterval: "1s", // default + FollowStdout: true, // default + FollowStdErr: true, // default } err := yaml.UnmarshalStrict(yamlConfig, &d.Config) @@ -97,9 +97,8 @@ func (d *DockerSource) UnmarshalConfig(yamlConfig []byte) error { return errors.New("use_container_labels and container_name, container_id, container_id_regexp, container_name_regexp are mutually exclusive") } - d.CheckIntervalDuration, err = time.ParseDuration(d.Config.CheckInterval) - if err != nil { - return fmt.Errorf("parsing 'check_interval' parameters: %s", d.CheckIntervalDuration) + if d.Config.CheckInterval != "" { + d.logger.Warn("check_interval is deprecated, it will be removed in a future version") } if d.Config.Mode == "" { @@ -495,63 +494,164 @@ func (d *DockerSource) EvalContainer(ctx context.Context, container dockerTypes. return nil } +func (d *DockerSource) checkContainers(ctx context.Context, monitChan chan *ContainerConfig, deleteChan chan *ContainerConfig) error { + // to track for garbage collection + runningContainersID := make(map[string]bool) + + runningContainers, err := d.Client.ContainerList(ctx, dockerContainer.ListOptions{}) + if err != nil { + if strings.Contains(strings.ToLower(err.Error()), "cannot connect to the docker daemon at") { + for idx, container := range d.runningContainerState { + if d.runningContainerState[idx].t.Alive() { + d.logger.Infof("killing tail for container %s", container.Name) + d.runningContainerState[idx].t.Kill(nil) + + if err := d.runningContainerState[idx].t.Wait(); err != nil { + d.logger.Infof("error while waiting for death of %s : %s", container.Name, err) + } + } + + delete(d.runningContainerState, idx) + } + } else { + log.Errorf("container list err: %s", err) + } + + return err + } + + for _, container := range runningContainers { + runningContainersID[container.ID] = true + + // don't need to re eval an already monitored container + if _, ok := d.runningContainerState[container.ID]; ok { + continue + } + + if containerConfig := d.EvalContainer(ctx, container); containerConfig != nil { + monitChan <- containerConfig + } + } + + for containerStateID, containerConfig := range d.runningContainerState { + if _, ok := runningContainersID[containerStateID]; !ok { + deleteChan <- containerConfig + } + } + + d.logger.Tracef("Reading logs from %d containers", len(d.runningContainerState)) + return nil +} + +// subscribeEvents will loop until it can successfully call d.Client.Events() +// without immediately receiving an error. It applies exponential backoff on failures. +// Returns the new (eventsChan, errChan) pair or an error if context/tomb is done. +func (d *DockerSource) subscribeEvents(ctx context.Context) (<-chan dockerTypesEvents.Message, <-chan error, error) { + const ( + initialBackoff = 2 * time.Second + backoffFactor = 2 + maxBackoff = 60 * time.Second + ) + + f := dockerFilter.NewArgs() + f.Add("type", "container") + + options := dockerTypesEvents.ListOptions{ + Filters: f, + } + + backoff := initialBackoff + retries := 0 + + d.logger.Infof("Subscribing to Docker events") + + for { + // bail out immediately if the context is canceled + select { + case <-ctx.Done(): + return nil, nil, ctx.Err() + case <-d.t.Dying(): + return nil, nil, errors.New("connection aborted, shutting down docker watcher") + default: + } + + // Try to reconnect + eventsChan, errChan := d.Client.Events(ctx, options) + + // Retry if the connection is immediately broken + select { + case err := <-errChan: + d.logger.Errorf("Connection to Docker failed (attempt %d): %v", retries+1, err) + + retries++ + + d.logger.Infof("Sleeping %s before next retry", backoff) + + // Wait for 'backoff', but still allow cancellation + select { + case <-time.After(backoff): + // Continue after backoff + case <-ctx.Done(): + return nil, nil, ctx.Err() + case <-d.t.Dying(): + return nil, nil, errors.New("connection aborted, shutting down docker watcher") + } + + backoff = max(backoff*backoffFactor, maxBackoff) + + continue + default: + // great success! + return eventsChan, errChan, nil + } + } +} + func (d *DockerSource) WatchContainer(ctx context.Context, monitChan chan *ContainerConfig, deleteChan chan *ContainerConfig) error { - ticker := time.NewTicker(d.CheckIntervalDuration) - d.logger.Infof("Container watcher started, interval: %s", d.CheckIntervalDuration.String()) + err := d.checkContainers(ctx, monitChan, deleteChan) + if err != nil { + return err + } + + eventsChan, errChan, err := d.subscribeEvents(ctx) + if err != nil { + return err + } for { select { case <-d.t.Dying(): d.logger.Infof("stopping container watcher") return nil - case <-ticker.C: - // to track for garbage collection - runningContainersID := make(map[string]bool) - runningContainers, err := d.Client.ContainerList(ctx, dockerContainer.ListOptions{}) - if err != nil { - if strings.Contains(strings.ToLower(err.Error()), "cannot connect to the docker daemon at") { - for idx, container := range d.runningContainerState { - if d.runningContainerState[idx].t.Alive() { - d.logger.Infof("killing tail for container %s", container.Name) - d.runningContainerState[idx].t.Kill(nil) - - if err := d.runningContainerState[idx].t.Wait(); err != nil { - d.logger.Infof("error while waiting for death of %s : %s", container.Name, err) - } - } - - delete(d.runningContainerState, idx) - } - } else { - log.Errorf("container list err: %s", err) + case event := <-eventsChan: + if event.Action == dockerTypesEvents.ActionStart || event.Action == dockerTypesEvents.ActionDie { + if err := d.checkContainers(ctx, monitChan, deleteChan); err != nil { + d.logger.Warnf("Failed to check containers: %v", err) } + } + case err := <-errChan: + if err == nil { continue } - for _, container := range runningContainers { - runningContainersID[container.ID] = true + d.logger.Errorf("Docker events error: %v", err) - // don't need to re eval an already monitored container - if _, ok := d.runningContainerState[container.ID]; ok { - continue - } - - if containerConfig := d.EvalContainer(ctx, container); containerConfig != nil { - monitChan <- containerConfig - } + // try to reconnect, replacing our channels on success. They are never nil if err is nil. + newEvents, newErr, recErr := d.subscribeEvents(ctx) + if recErr != nil { + return recErr } - for containerStateID, containerConfig := range d.runningContainerState { - if _, ok := runningContainersID[containerStateID]; !ok { - deleteChan <- containerConfig - } - } + eventsChan, errChan = newEvents, newErr - d.logger.Tracef("Reading logs from %d containers", len(d.runningContainerState)) - - ticker.Reset(d.CheckIntervalDuration) + d.logger.Info("Successfully reconnected to Docker events") + // We check containers after a reconnection because the docker daemon might have restarted + // and the container tombs may have self deleted + if err := d.checkContainers(ctx, monitChan, deleteChan); err != nil { + d.logger.Warnf("Failed to check containers: %v", err) + } } } } diff --git a/pkg/acquisition/modules/docker/docker_test.go b/pkg/acquisition/modules/docker/docker_test.go index e44963b58a3..24816346106 100644 --- a/pkg/acquisition/modules/docker/docker_test.go +++ b/pkg/acquisition/modules/docker/docker_test.go @@ -13,6 +13,7 @@ import ( dockerTypes "github.com/docker/docker/api/types" dockerContainer "github.com/docker/docker/api/types/container" + dockerTypesEvents "github.com/docker/docker/api/types/events" "github.com/docker/docker/client" log "github.com/sirupsen/logrus" "github.com/stretchr/testify/assert" @@ -50,6 +51,15 @@ source: docker`, config: ` mode: cat source: docker +container_name: + - toto`, + expectedErr: "", + }, + { + config: ` +mode: cat +source: docker +check_interval: 10s container_name: - toto`, expectedErr: "", @@ -273,6 +283,14 @@ func (cli *mockDockerCli) ContainerInspect(ctx context.Context, c string) (docke return r, nil } +// Since we are mocking the docker client, we return channels that will never be used +func (cli *mockDockerCli) Events(ctx context.Context, options dockerTypesEvents.ListOptions) (<-chan dockerTypesEvents.Message, <-chan error) { + eventsChan := make(chan dockerTypesEvents.Message) + errChan := make(chan error) + + return eventsChan, errChan +} + func TestOneShot(t *testing.T) { ctx := t.Context() From 52ebcc3e6211dc76299bea79b85dce891c6df705 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Tue, 27 May 2025 15:18:14 +0200 Subject: [PATCH 538/581] deprecate option 'daemonize' (#3648) --- cmd/crowdsec/main.go | 16 ++++------------ cmd/crowdsec/serve.go | 2 +- config/config_win.yaml | 1 - config/config_win_no_lapi.yaml | 1 - config/dev.yaml | 1 - config/user.yaml | 1 - docker/config.yaml | 1 - pkg/csconfig/common.go | 6 +++++- pkg/csconfig/config.go | 5 ++--- pkg/csconfig/testdata/config.yaml | 1 - test/bats/40_cold-logs.bats | 2 +- test/lib/config/config-local | 1 - 12 files changed, 13 insertions(+), 25 deletions(-) diff --git a/cmd/crowdsec/main.go b/cmd/crowdsec/main.go index 188116fb2f2..8bab12e53b4 100644 --- a/cmd/crowdsec/main.go +++ b/cmd/crowdsec/main.go @@ -144,7 +144,7 @@ func (l *labelsMap) String() string { } func (l *labelsMap) Set(label string) error { - for _, pair := range strings.Split(label, ",") { + for pair := range strings.SplitSeq(label, ",") { split := strings.Split(pair, ":") if len(split) != 2 { return fmt.Errorf("invalid format for label '%s', must be key:value", pair) @@ -233,7 +233,9 @@ func LoadConfig(configFile string, disableAgent bool, disableAPI bool, quiet boo if err := trace.Init(filepath.Join(cConfig.ConfigPaths.DataDir, "trace")); err != nil { return nil, fmt.Errorf("while setting up trace directory: %w", err) } + var logLevelViaFlag bool + cConfig.Common.LogLevel, logLevelViaFlag = newLogLevel(cConfig.Common.LogLevel, flags) if dumpFolder != "" { @@ -301,24 +303,14 @@ func LoadConfig(configFile string, disableAgent bool, disableAPI bool, quiet boo if cConfig.API != nil && cConfig.API.Server != nil { cConfig.API.Server.OnlineClient = nil } - // if the api is disabled as well, just read file and exit, don't daemonize - if cConfig.DisableAPI { - cConfig.Common.Daemonize = false - } - log.Infof("single file mode : log_media=%s daemonize=%t", cConfig.Common.LogMedia, cConfig.Common.Daemonize) + log.Infof("single file mode : log_media=%s", cConfig.Common.LogMedia) } if cConfig.Common.PidDir != "" { log.Warn("Deprecation warning: the pid_dir config can be safely removed and is not required") } - if cConfig.Common.Daemonize && runtime.GOOS == "windows" { - log.Debug("Daemonization is not supported on Windows, disabling") - - cConfig.Common.Daemonize = false - } - // recap of the enabled feature flags, because logging // was not enabled when we set them from envvars if fflist := csconfig.ListFeatureFlags(); fflist != "" { diff --git a/cmd/crowdsec/serve.go b/cmd/crowdsec/serve.go index 642ac46b8fa..62e128453d5 100644 --- a/cmd/crowdsec/serve.go +++ b/cmd/crowdsec/serve.go @@ -418,7 +418,7 @@ func Serve(cConfig *csconfig.Config, agentReady chan bool) error { return nil } - if cConfig.Common != nil && cConfig.Common.Daemonize { + if cConfig.Common != nil && !flags.haveTimeMachine() { _ = csdaemon.Notify(csdaemon.Ready, log.StandardLogger()) // wait for signals return HandleSignals(cConfig) diff --git a/config/config_win.yaml b/config/config_win.yaml index 5c34c69a2c0..7274c04d8e4 100644 --- a/config/config_win.yaml +++ b/config/config_win.yaml @@ -1,5 +1,4 @@ common: - daemonize: false log_media: file log_level: info log_dir: C:\ProgramData\CrowdSec\log\ diff --git a/config/config_win_no_lapi.yaml b/config/config_win_no_lapi.yaml index af240228bb5..acc782bdb2e 100644 --- a/config/config_win_no_lapi.yaml +++ b/config/config_win_no_lapi.yaml @@ -1,5 +1,4 @@ common: - daemonize: true log_media: file log_level: info log_dir: C:\ProgramData\CrowdSec\log\ diff --git a/config/dev.yaml b/config/dev.yaml index ca1f35f32ff..a124dc540d8 100644 --- a/config/dev.yaml +++ b/config/dev.yaml @@ -1,5 +1,4 @@ common: - daemonize: true log_media: stdout log_level: info config_paths: diff --git a/config/user.yaml b/config/user.yaml index a1047dcd0f7..a2fac5ca722 100644 --- a/config/user.yaml +++ b/config/user.yaml @@ -1,5 +1,4 @@ common: - daemonize: false log_media: stdout log_level: info log_dir: /var/log/ diff --git a/docker/config.yaml b/docker/config.yaml index 6811329099a..9572a573c9c 100644 --- a/docker/config.yaml +++ b/docker/config.yaml @@ -1,5 +1,4 @@ common: - daemonize: false log_media: stdout log_level: info log_dir: /var/log/ diff --git a/pkg/csconfig/common.go b/pkg/csconfig/common.go index e312756ce20..d792928a194 100644 --- a/pkg/csconfig/common.go +++ b/pkg/csconfig/common.go @@ -9,7 +9,7 @@ import ( /*daemonization/service related stuff*/ type CommonCfg struct { - Daemonize bool + Daemonize string // TODO: This is just for backward compat. Remove this later PidDir string `yaml:"pid_dir,omitempty"` // TODO: This is just for backward compat. Remove this later LogMedia string `yaml:"log_media"` LogDir string `yaml:"log_dir,omitempty"` // if LogMedia = file @@ -30,6 +30,10 @@ func (c *Config) loadCommon() error { c.Common = &CommonCfg{} } + if c.Common.Daemonize != "" { + log.Debug("the option 'daemonize' is deprecated and ignored") + } + if c.Common.LogMedia == "" { c.Common.LogMedia = "stdout" } diff --git a/pkg/csconfig/config.go b/pkg/csconfig/config.go index b0784e5e6f3..8ec356d6696 100644 --- a/pkg/csconfig/config.go +++ b/pkg/csconfig/config.go @@ -115,9 +115,8 @@ func GetConfig() Config { func NewDefaultConfig() *Config { logLevel := log.InfoLevel commonCfg := CommonCfg{ - Daemonize: false, - LogMedia: "stdout", - LogLevel: &logLevel, + LogMedia: "stdout", + LogLevel: &logLevel, } prometheus := PrometheusCfg{ Enabled: true, diff --git a/pkg/csconfig/testdata/config.yaml b/pkg/csconfig/testdata/config.yaml index 17975b10501..705f5fcc535 100644 --- a/pkg/csconfig/testdata/config.yaml +++ b/pkg/csconfig/testdata/config.yaml @@ -1,5 +1,4 @@ common: - daemonize: false log_media: stdout log_level: info prometheus: diff --git a/test/bats/40_cold-logs.bats b/test/bats/40_cold-logs.bats index 52605054242..f3e8cd14ffe 100644 --- a/test/bats/40_cold-logs.bats +++ b/test/bats/40_cold-logs.bats @@ -40,7 +40,7 @@ setup() { @test "the one-shot mode works" { rune -0 "$CROWDSEC" -dsn file://<(fake_log) -type syslog -no-api refute_output - assert_stderr --partial "single file mode : log_media=stdout daemonize=false" + assert_stderr --partial "single file mode : log_media=stdout" assert_stderr --regexp "Adding file .* to filelist" assert_stderr --regexp "reading .* at once" assert_stderr --partial "Ip 1.1.1.172 performed 'crowdsecurity/ssh-bf' (6 events over 0s)" diff --git a/test/lib/config/config-local b/test/lib/config/config-local index e6588c61cb8..0acb325b272 100755 --- a/test/lib/config/config-local +++ b/test/lib/config/config-local @@ -77,7 +77,6 @@ config_generate() { "${CONFIG_DIR}/notifications/" yq e ' - .common.daemonize=true | del(.common.pid_dir) | .common.log_level="info" | .common.log_dir=strenv(LOG_DIR) | From 983c5c3e56c6fd4e6518a2d00288071d474470fa Mon Sep 17 00:00:00 2001 From: blotus Date: Wed, 28 May 2025 10:02:10 +0200 Subject: [PATCH 539/581] allow watcher to self-delete on shutdown (#3565) * allow watcher to self-delete on shutdown * rename lapi endpoing * rename config option * test * fix tests * docker: $UNREGISTER_ON_EXIT * docker test * update test dependencies, with custom "stop_timeout" * update url in client * meh * wip * url --------- Co-authored-by: Thibault "bui" Koechlin Co-authored-by: marco --- cmd/crowdsec/serve.go | 13 + docker/README.md | 1 + docker/docker_start.sh | 2 + docker/test/pyproject.toml | 2 +- docker/test/tests/test_agent_only.py | 50 +++ docker/test/uv.lock | 472 ++++++++++++----------- pkg/apiclient/auth_service.go | 2 +- pkg/apiclient/auth_service_test.go | 2 +- pkg/apiserver/controllers/controller.go | 1 + pkg/apiserver/controllers/v1/machines.go | 24 ++ pkg/csconfig/api.go | 1 + pkg/models/localapi_swagger.yaml | 17 + test/bats/30_machines.bats | 21 +- 13 files changed, 377 insertions(+), 231 deletions(-) diff --git a/cmd/crowdsec/serve.go b/cmd/crowdsec/serve.go index 62e128453d5..b5eaa3f8a56 100644 --- a/cmd/crowdsec/serve.go +++ b/cmd/crowdsec/serve.go @@ -15,6 +15,7 @@ import ( "github.com/crowdsecurity/go-cs-lib/csdaemon" "github.com/crowdsecurity/go-cs-lib/trace" + "github.com/crowdsecurity/crowdsec/pkg/apiclient" "github.com/crowdsecurity/crowdsec/pkg/csconfig" "github.com/crowdsecurity/crowdsec/pkg/cwhub" "github.com/crowdsecurity/crowdsec/pkg/database" @@ -322,6 +323,18 @@ func HandleSignals(cConfig *csconfig.Config) error { if err == nil { log.Warning("Crowdsec service shutting down") } + if cConfig.API != nil && cConfig.API.Client != nil && cConfig.API.Client.UnregisterOnExit { + log.Warning("Unregistering watcher") + lapiClient, err := apiclient.GetLAPIClient() + if err != nil { + return err + } + _, err = lapiClient.Auth.UnregisterWatcher(context.TODO()) + if err != nil { + return fmt.Errorf("failed to unregister watcher: %w", err) + } + log.Warning("Watcher unregistered") + } return err } diff --git a/docker/README.md b/docker/README.md index ad31d10aed6..0f505f727cb 100644 --- a/docker/README.md +++ b/docker/README.md @@ -289,6 +289,7 @@ config.yaml) each time the container is run. | __Agent__ | | (these don't work with DISABLE_AGENT) | | `TYPE` | | [`Labels.type`](https://docs.crowdsec.net/Crowdsec/v1/references/acquisition/) for file in time-machine: `-e TYPE=""` | | `DSN` | | Process a single source in time-machine: `-e DSN="file:///var/log/toto.log"` or `-e DSN="cloudwatch:///your/group/path:stream_name?profile=dev&backlog=16h"` or `-e DSN="journalctl://filters=_SYSTEMD_UNIT=ssh.service"` | +| `UNREGISTER_ON_EXIT` | | Remove the agent from the LAPI when its container is stopped. | | | | | | __Bouncers__ | | | | `BOUNCER_KEY_` | | Register a bouncer with the name `` and a key equal to the value of the environment variable. | diff --git a/docker/docker_start.sh b/docker/docker_start.sh index fb87c1eff9b..d2e93621002 100755 --- a/docker/docker_start.sh +++ b/docker/docker_start.sh @@ -474,6 +474,8 @@ else conf_set '.api.server.enable=true' fi +conf_set_if "$UNREGISTER_ON_EXIT" '.api.client.unregister_on_exit=env(UNREGISTER_ON_EXIT)' + ARGS="" if [ "$CONFIG_FILE" != "" ]; then ARGS="-c $CONFIG_FILE" diff --git a/docker/test/pyproject.toml b/docker/test/pyproject.toml index 5ec0c5a7f01..dc075c4c65f 100644 --- a/docker/test/pyproject.toml +++ b/docker/test/pyproject.toml @@ -6,7 +6,7 @@ readme = "README.md" requires-python = ">=3.12" dependencies = [ "pytest>=8.3.4", - "pytest-cs", + "pytest-cs>=0.7.22", "pytest-dotenv>=0.5.2", "pytest-xdist>=3.6.1", ] diff --git a/docker/test/tests/test_agent_only.py b/docker/test/tests/test_agent_only.py index 1da659d7ee7..b3e8eba2441 100644 --- a/docker/test/tests/test_agent_only.py +++ b/docker/test/tests/test_agent_only.py @@ -1,4 +1,5 @@ import secrets +import time from http import HTTPStatus import pytest @@ -35,3 +36,52 @@ def test_split_lapi_agent(crowdsec, flavor: str) -> None: assert res.exit_code == 0 stdout = res.output.decode() assert "You can successfully interact with Local API (LAPI)" in stdout + + +def test_unregister_on_exit(crowdsec, flavor: str) -> None: + rand = str(secrets.randbelow(10000)) + lapiname = f"lapi-{rand}" + agentname = f"agent-{rand}" + + lapi_env = { + "AGENT_USERNAME": "testagent", + "AGENT_PASSWORD": "testpassword", + } + + agent_env = { + "AGENT_USERNAME": "testagent", + "AGENT_PASSWORD": "testpassword", + "DISABLE_LOCAL_API": "true", + "LOCAL_API_URL": f"http://{lapiname}:8080", + "UNREGISTER_ON_EXIT": "true", + } + + cs_lapi = crowdsec(name=lapiname, environment=lapi_env, flavor=flavor) + cs_agent = crowdsec(name=agentname, environment=agent_env, flavor=flavor, stop_timeout=5) + + with cs_lapi as lapi: + lapi.wait_for_log("*CrowdSec Local API listening on *:8080*") + lapi.wait_for_http(8080, "/health", want_status=HTTPStatus.OK) + + res = lapi.cont.exec_run("cscli machines list") + assert res.exit_code == 0 + # the machine is created in the lapi entrypoint + assert "testagent" in res.output.decode() + + with cs_agent as agent: + agent.wait_for_log("*Starting processing data*") + res = agent.cont.exec_run("cscli lapi status") + assert res.exit_code == 0 + stdout = res.output.decode() + assert "You can successfully interact with Local API (LAPI)" in stdout + + res = lapi.cont.exec_run("cscli machines list") + assert res.exit_code == 0 + assert "testagent" in res.output.decode() + + time.sleep(2) + + res = lapi.cont.exec_run("cscli machines list") + assert res.exit_code == 0 + # and it's not there anymore + assert "testagent" not in res.output.decode() diff --git a/docker/test/uv.lock b/docker/test/uv.lock index a930db9cd2f..a7679e8e362 100644 --- a/docker/test/uv.lock +++ b/docker/test/uv.lock @@ -1,34 +1,35 @@ version = 1 +revision = 2 requires-python = ">=3.12" [[package]] name = "asttokens" version = "3.0.0" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/4a/e7/82da0a03e7ba5141f05cce0d302e6eed121ae055e0456ca228bf693984bc/asttokens-3.0.0.tar.gz", hash = "sha256:0dcd8baa8d62b0c1d118b399b2ddba3c4aff271d0d7a9e0d4c1681c79035bbc7", size = 61978 } +sdist = { url = "https://files.pythonhosted.org/packages/4a/e7/82da0a03e7ba5141f05cce0d302e6eed121ae055e0456ca228bf693984bc/asttokens-3.0.0.tar.gz", hash = "sha256:0dcd8baa8d62b0c1d118b399b2ddba3c4aff271d0d7a9e0d4c1681c79035bbc7", size = 61978, upload-time = "2024-11-30T04:30:14.439Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/25/8a/c46dcc25341b5bce5472c718902eb3d38600a903b14fa6aeecef3f21a46f/asttokens-3.0.0-py3-none-any.whl", hash = "sha256:e3078351a059199dd5138cb1c706e6430c05eff2ff136af5eb4790f9d28932e2", size = 26918 }, + { url = "https://files.pythonhosted.org/packages/25/8a/c46dcc25341b5bce5472c718902eb3d38600a903b14fa6aeecef3f21a46f/asttokens-3.0.0-py3-none-any.whl", hash = "sha256:e3078351a059199dd5138cb1c706e6430c05eff2ff136af5eb4790f9d28932e2", size = 26918, upload-time = "2024-11-30T04:30:10.946Z" }, ] [[package]] name = "basedpyright" -version = "1.26.0" +version = "1.29.2" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "nodejs-wheel-binaries" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/18/c2/5685d040d4f2598788d42bfd2db5f808e9aa2eaee77fcae3c2fbe4ea0e7c/basedpyright-1.26.0.tar.gz", hash = "sha256:5e01f6eb9290a09ef39672106cf1a02924fdc8970e521838bc502ccf0676f32f", size = 24932771 } +sdist = { url = "https://files.pythonhosted.org/packages/0c/db/fcfced6e89a49b52694d51078c2cf626b3fe9d097c1145bb8424337d7ae6/basedpyright-1.29.2.tar.gz", hash = "sha256:12c49186003b9f69a028615da883ef97035ea2119a9e3f93a00091b3a27088a6", size = 21966851, upload-time = "2025-05-21T11:45:43.03Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/8e/72/65308f45bb73efc93075426cac5f37eea937ae364aa675785521cb3512c7/basedpyright-1.26.0-py3-none-any.whl", hash = "sha256:5a6a17f2c389ec313dd2c3644f40e8221bc90252164802e626055341c0a37381", size = 11504579 }, + { url = "https://files.pythonhosted.org/packages/98/b7/8307208ab517ac6e5d0331ad7347624fe8b250fb6e659ba3bd081d82c890/basedpyright-1.29.2-py3-none-any.whl", hash = "sha256:f389e2997de33d038c5065fd85bff351fbdc62fa6d6371c7b947fc3bce8d437d", size = 11472099, upload-time = "2025-05-21T11:45:38.785Z" }, ] [[package]] name = "certifi" -version = "2025.1.31" +version = "2025.4.26" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/1c/ab/c9f1e32b7b1bf505bf26f0ef697775960db7932abeb7b516de930ba2705f/certifi-2025.1.31.tar.gz", hash = "sha256:3d5da6925056f6f18f119200434a4780a94263f10d1c21d032a6f6b2baa20651", size = 167577 } +sdist = { url = "https://files.pythonhosted.org/packages/e8/9e/c05b3920a3b7d20d3d3310465f50348e5b3694f4f88c6daf736eef3024c4/certifi-2025.4.26.tar.gz", hash = "sha256:0a816057ea3cdefcef70270d2c515e4506bbc954f417fa5ade2021213bb8f0c6", size = 160705, upload-time = "2025-04-26T02:12:29.51Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/38/fc/bce832fd4fd99766c04d1ee0eead6b0ec6486fb100ae5e74c1d91292b982/certifi-2025.1.31-py3-none-any.whl", hash = "sha256:ca78db4565a652026a4db2bcdf68f2fb589ea80d0be70e03929ed730746b84fe", size = 166393 }, + { url = "https://files.pythonhosted.org/packages/4a/7e/3db2bd1b1f9e95f7cddca6d6e75e2f2bd9f51b1246e546d88addca0106bd/certifi-2025.4.26-py3-none-any.whl", hash = "sha256:30350364dfe371162649852c63336a15c70c6510c2ad5015b21c2345311805f3", size = 159618, upload-time = "2025-04-26T02:12:27.662Z" }, ] [[package]] @@ -38,74 +39,74 @@ source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "pycparser" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/fc/97/c783634659c2920c3fc70419e3af40972dbaf758daa229a7d6ea6135c90d/cffi-1.17.1.tar.gz", hash = "sha256:1c39c6016c32bc48dd54561950ebd6836e1670f2ae46128f67cf49e789c52824", size = 516621 } -wheels = [ - { url = "https://files.pythonhosted.org/packages/5a/84/e94227139ee5fb4d600a7a4927f322e1d4aea6fdc50bd3fca8493caba23f/cffi-1.17.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:805b4371bf7197c329fcb3ead37e710d1bca9da5d583f5073b799d5c5bd1eee4", size = 183178 }, - { url = "https://files.pythonhosted.org/packages/da/ee/fb72c2b48656111c4ef27f0f91da355e130a923473bf5ee75c5643d00cca/cffi-1.17.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:733e99bc2df47476e3848417c5a4540522f234dfd4ef3ab7fafdf555b082ec0c", size = 178840 }, - { url = "https://files.pythonhosted.org/packages/cc/b6/db007700f67d151abadf508cbfd6a1884f57eab90b1bb985c4c8c02b0f28/cffi-1.17.1-cp312-cp312-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1257bdabf294dceb59f5e70c64a3e2f462c30c7ad68092d01bbbfb1c16b1ba36", size = 454803 }, - { url = "https://files.pythonhosted.org/packages/1a/df/f8d151540d8c200eb1c6fba8cd0dfd40904f1b0682ea705c36e6c2e97ab3/cffi-1.17.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:da95af8214998d77a98cc14e3a3bd00aa191526343078b530ceb0bd710fb48a5", size = 478850 }, - { url = "https://files.pythonhosted.org/packages/28/c0/b31116332a547fd2677ae5b78a2ef662dfc8023d67f41b2a83f7c2aa78b1/cffi-1.17.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d63afe322132c194cf832bfec0dc69a99fb9bb6bbd550f161a49e9e855cc78ff", size = 485729 }, - { url = "https://files.pythonhosted.org/packages/91/2b/9a1ddfa5c7f13cab007a2c9cc295b70fbbda7cb10a286aa6810338e60ea1/cffi-1.17.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f79fc4fc25f1c8698ff97788206bb3c2598949bfe0fef03d299eb1b5356ada99", size = 471256 }, - { url = "https://files.pythonhosted.org/packages/b2/d5/da47df7004cb17e4955df6a43d14b3b4ae77737dff8bf7f8f333196717bf/cffi-1.17.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b62ce867176a75d03a665bad002af8e6d54644fad99a3c70905c543130e39d93", size = 479424 }, - { url = "https://files.pythonhosted.org/packages/0b/ac/2a28bcf513e93a219c8a4e8e125534f4f6db03e3179ba1c45e949b76212c/cffi-1.17.1-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:386c8bf53c502fff58903061338ce4f4950cbdcb23e2902d86c0f722b786bbe3", size = 484568 }, - { url = "https://files.pythonhosted.org/packages/d4/38/ca8a4f639065f14ae0f1d9751e70447a261f1a30fa7547a828ae08142465/cffi-1.17.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:4ceb10419a9adf4460ea14cfd6bc43d08701f0835e979bf821052f1805850fe8", size = 488736 }, - { url = "https://files.pythonhosted.org/packages/86/c5/28b2d6f799ec0bdecf44dced2ec5ed43e0eb63097b0f58c293583b406582/cffi-1.17.1-cp312-cp312-win32.whl", hash = "sha256:a08d7e755f8ed21095a310a693525137cfe756ce62d066e53f502a83dc550f65", size = 172448 }, - { url = "https://files.pythonhosted.org/packages/50/b9/db34c4755a7bd1cb2d1603ac3863f22bcecbd1ba29e5ee841a4bc510b294/cffi-1.17.1-cp312-cp312-win_amd64.whl", hash = "sha256:51392eae71afec0d0c8fb1a53b204dbb3bcabcb3c9b807eedf3e1e6ccf2de903", size = 181976 }, - { url = "https://files.pythonhosted.org/packages/8d/f8/dd6c246b148639254dad4d6803eb6a54e8c85c6e11ec9df2cffa87571dbe/cffi-1.17.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:f3a2b4222ce6b60e2e8b337bb9596923045681d71e5a082783484d845390938e", size = 182989 }, - { url = "https://files.pythonhosted.org/packages/8b/f1/672d303ddf17c24fc83afd712316fda78dc6fce1cd53011b839483e1ecc8/cffi-1.17.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:0984a4925a435b1da406122d4d7968dd861c1385afe3b45ba82b750f229811e2", size = 178802 }, - { url = "https://files.pythonhosted.org/packages/0e/2d/eab2e858a91fdff70533cab61dcff4a1f55ec60425832ddfdc9cd36bc8af/cffi-1.17.1-cp313-cp313-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d01b12eeeb4427d3110de311e1774046ad344f5b1a7403101878976ecd7a10f3", size = 454792 }, - { url = "https://files.pythonhosted.org/packages/75/b2/fbaec7c4455c604e29388d55599b99ebcc250a60050610fadde58932b7ee/cffi-1.17.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:706510fe141c86a69c8ddc029c7910003a17353970cff3b904ff0686a5927683", size = 478893 }, - { url = "https://files.pythonhosted.org/packages/4f/b7/6e4a2162178bf1935c336d4da8a9352cccab4d3a5d7914065490f08c0690/cffi-1.17.1-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:de55b766c7aa2e2a3092c51e0483d700341182f08e67c63630d5b6f200bb28e5", size = 485810 }, - { url = "https://files.pythonhosted.org/packages/c7/8a/1d0e4a9c26e54746dc08c2c6c037889124d4f59dffd853a659fa545f1b40/cffi-1.17.1-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c59d6e989d07460165cc5ad3c61f9fd8f1b4796eacbd81cee78957842b834af4", size = 471200 }, - { url = "https://files.pythonhosted.org/packages/26/9f/1aab65a6c0db35f43c4d1b4f580e8df53914310afc10ae0397d29d697af4/cffi-1.17.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dd398dbc6773384a17fe0d3e7eeb8d1a21c2200473ee6806bb5e6a8e62bb73dd", size = 479447 }, - { url = "https://files.pythonhosted.org/packages/5f/e4/fb8b3dd8dc0e98edf1135ff067ae070bb32ef9d509d6cb0f538cd6f7483f/cffi-1.17.1-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:3edc8d958eb099c634dace3c7e16560ae474aa3803a5df240542b305d14e14ed", size = 484358 }, - { url = "https://files.pythonhosted.org/packages/f1/47/d7145bf2dc04684935d57d67dff9d6d795b2ba2796806bb109864be3a151/cffi-1.17.1-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:72e72408cad3d5419375fc87d289076ee319835bdfa2caad331e377589aebba9", size = 488469 }, - { url = "https://files.pythonhosted.org/packages/bf/ee/f94057fa6426481d663b88637a9a10e859e492c73d0384514a17d78ee205/cffi-1.17.1-cp313-cp313-win32.whl", hash = "sha256:e03eab0a8677fa80d646b5ddece1cbeaf556c313dcfac435ba11f107ba117b5d", size = 172475 }, - { url = "https://files.pythonhosted.org/packages/7c/fc/6a8cb64e5f0324877d503c854da15d76c1e50eb722e320b15345c4d0c6de/cffi-1.17.1-cp313-cp313-win_amd64.whl", hash = "sha256:f6a16c31041f09ead72d69f583767292f750d24913dadacf5756b966aacb3f1a", size = 182009 }, +sdist = { url = "https://files.pythonhosted.org/packages/fc/97/c783634659c2920c3fc70419e3af40972dbaf758daa229a7d6ea6135c90d/cffi-1.17.1.tar.gz", hash = "sha256:1c39c6016c32bc48dd54561950ebd6836e1670f2ae46128f67cf49e789c52824", size = 516621, upload-time = "2024-09-04T20:45:21.852Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/5a/84/e94227139ee5fb4d600a7a4927f322e1d4aea6fdc50bd3fca8493caba23f/cffi-1.17.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:805b4371bf7197c329fcb3ead37e710d1bca9da5d583f5073b799d5c5bd1eee4", size = 183178, upload-time = "2024-09-04T20:44:12.232Z" }, + { url = "https://files.pythonhosted.org/packages/da/ee/fb72c2b48656111c4ef27f0f91da355e130a923473bf5ee75c5643d00cca/cffi-1.17.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:733e99bc2df47476e3848417c5a4540522f234dfd4ef3ab7fafdf555b082ec0c", size = 178840, upload-time = "2024-09-04T20:44:13.739Z" }, + { url = "https://files.pythonhosted.org/packages/cc/b6/db007700f67d151abadf508cbfd6a1884f57eab90b1bb985c4c8c02b0f28/cffi-1.17.1-cp312-cp312-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1257bdabf294dceb59f5e70c64a3e2f462c30c7ad68092d01bbbfb1c16b1ba36", size = 454803, upload-time = "2024-09-04T20:44:15.231Z" }, + { url = "https://files.pythonhosted.org/packages/1a/df/f8d151540d8c200eb1c6fba8cd0dfd40904f1b0682ea705c36e6c2e97ab3/cffi-1.17.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:da95af8214998d77a98cc14e3a3bd00aa191526343078b530ceb0bd710fb48a5", size = 478850, upload-time = "2024-09-04T20:44:17.188Z" }, + { url = "https://files.pythonhosted.org/packages/28/c0/b31116332a547fd2677ae5b78a2ef662dfc8023d67f41b2a83f7c2aa78b1/cffi-1.17.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d63afe322132c194cf832bfec0dc69a99fb9bb6bbd550f161a49e9e855cc78ff", size = 485729, upload-time = "2024-09-04T20:44:18.688Z" }, + { url = "https://files.pythonhosted.org/packages/91/2b/9a1ddfa5c7f13cab007a2c9cc295b70fbbda7cb10a286aa6810338e60ea1/cffi-1.17.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f79fc4fc25f1c8698ff97788206bb3c2598949bfe0fef03d299eb1b5356ada99", size = 471256, upload-time = "2024-09-04T20:44:20.248Z" }, + { url = "https://files.pythonhosted.org/packages/b2/d5/da47df7004cb17e4955df6a43d14b3b4ae77737dff8bf7f8f333196717bf/cffi-1.17.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b62ce867176a75d03a665bad002af8e6d54644fad99a3c70905c543130e39d93", size = 479424, upload-time = "2024-09-04T20:44:21.673Z" }, + { url = "https://files.pythonhosted.org/packages/0b/ac/2a28bcf513e93a219c8a4e8e125534f4f6db03e3179ba1c45e949b76212c/cffi-1.17.1-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:386c8bf53c502fff58903061338ce4f4950cbdcb23e2902d86c0f722b786bbe3", size = 484568, upload-time = "2024-09-04T20:44:23.245Z" }, + { url = "https://files.pythonhosted.org/packages/d4/38/ca8a4f639065f14ae0f1d9751e70447a261f1a30fa7547a828ae08142465/cffi-1.17.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:4ceb10419a9adf4460ea14cfd6bc43d08701f0835e979bf821052f1805850fe8", size = 488736, upload-time = "2024-09-04T20:44:24.757Z" }, + { url = "https://files.pythonhosted.org/packages/86/c5/28b2d6f799ec0bdecf44dced2ec5ed43e0eb63097b0f58c293583b406582/cffi-1.17.1-cp312-cp312-win32.whl", hash = "sha256:a08d7e755f8ed21095a310a693525137cfe756ce62d066e53f502a83dc550f65", size = 172448, upload-time = "2024-09-04T20:44:26.208Z" }, + { url = "https://files.pythonhosted.org/packages/50/b9/db34c4755a7bd1cb2d1603ac3863f22bcecbd1ba29e5ee841a4bc510b294/cffi-1.17.1-cp312-cp312-win_amd64.whl", hash = "sha256:51392eae71afec0d0c8fb1a53b204dbb3bcabcb3c9b807eedf3e1e6ccf2de903", size = 181976, upload-time = "2024-09-04T20:44:27.578Z" }, + { url = "https://files.pythonhosted.org/packages/8d/f8/dd6c246b148639254dad4d6803eb6a54e8c85c6e11ec9df2cffa87571dbe/cffi-1.17.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:f3a2b4222ce6b60e2e8b337bb9596923045681d71e5a082783484d845390938e", size = 182989, upload-time = "2024-09-04T20:44:28.956Z" }, + { url = "https://files.pythonhosted.org/packages/8b/f1/672d303ddf17c24fc83afd712316fda78dc6fce1cd53011b839483e1ecc8/cffi-1.17.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:0984a4925a435b1da406122d4d7968dd861c1385afe3b45ba82b750f229811e2", size = 178802, upload-time = "2024-09-04T20:44:30.289Z" }, + { url = "https://files.pythonhosted.org/packages/0e/2d/eab2e858a91fdff70533cab61dcff4a1f55ec60425832ddfdc9cd36bc8af/cffi-1.17.1-cp313-cp313-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d01b12eeeb4427d3110de311e1774046ad344f5b1a7403101878976ecd7a10f3", size = 454792, upload-time = "2024-09-04T20:44:32.01Z" }, + { url = "https://files.pythonhosted.org/packages/75/b2/fbaec7c4455c604e29388d55599b99ebcc250a60050610fadde58932b7ee/cffi-1.17.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:706510fe141c86a69c8ddc029c7910003a17353970cff3b904ff0686a5927683", size = 478893, upload-time = "2024-09-04T20:44:33.606Z" }, + { url = "https://files.pythonhosted.org/packages/4f/b7/6e4a2162178bf1935c336d4da8a9352cccab4d3a5d7914065490f08c0690/cffi-1.17.1-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:de55b766c7aa2e2a3092c51e0483d700341182f08e67c63630d5b6f200bb28e5", size = 485810, upload-time = "2024-09-04T20:44:35.191Z" }, + { url = "https://files.pythonhosted.org/packages/c7/8a/1d0e4a9c26e54746dc08c2c6c037889124d4f59dffd853a659fa545f1b40/cffi-1.17.1-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c59d6e989d07460165cc5ad3c61f9fd8f1b4796eacbd81cee78957842b834af4", size = 471200, upload-time = "2024-09-04T20:44:36.743Z" }, + { url = "https://files.pythonhosted.org/packages/26/9f/1aab65a6c0db35f43c4d1b4f580e8df53914310afc10ae0397d29d697af4/cffi-1.17.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dd398dbc6773384a17fe0d3e7eeb8d1a21c2200473ee6806bb5e6a8e62bb73dd", size = 479447, upload-time = "2024-09-04T20:44:38.492Z" }, + { url = "https://files.pythonhosted.org/packages/5f/e4/fb8b3dd8dc0e98edf1135ff067ae070bb32ef9d509d6cb0f538cd6f7483f/cffi-1.17.1-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:3edc8d958eb099c634dace3c7e16560ae474aa3803a5df240542b305d14e14ed", size = 484358, upload-time = "2024-09-04T20:44:40.046Z" }, + { url = "https://files.pythonhosted.org/packages/f1/47/d7145bf2dc04684935d57d67dff9d6d795b2ba2796806bb109864be3a151/cffi-1.17.1-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:72e72408cad3d5419375fc87d289076ee319835bdfa2caad331e377589aebba9", size = 488469, upload-time = "2024-09-04T20:44:41.616Z" }, + { url = "https://files.pythonhosted.org/packages/bf/ee/f94057fa6426481d663b88637a9a10e859e492c73d0384514a17d78ee205/cffi-1.17.1-cp313-cp313-win32.whl", hash = "sha256:e03eab0a8677fa80d646b5ddece1cbeaf556c313dcfac435ba11f107ba117b5d", size = 172475, upload-time = "2024-09-04T20:44:43.733Z" }, + { url = "https://files.pythonhosted.org/packages/7c/fc/6a8cb64e5f0324877d503c854da15d76c1e50eb722e320b15345c4d0c6de/cffi-1.17.1-cp313-cp313-win_amd64.whl", hash = "sha256:f6a16c31041f09ead72d69f583767292f750d24913dadacf5756b966aacb3f1a", size = 182009, upload-time = "2024-09-04T20:44:45.309Z" }, ] [[package]] name = "charset-normalizer" -version = "3.4.1" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/16/b0/572805e227f01586461c80e0fd25d65a2115599cc9dad142fee4b747c357/charset_normalizer-3.4.1.tar.gz", hash = "sha256:44251f18cd68a75b56585dd00dae26183e102cd5e0f9f1466e6df5da2ed64ea3", size = 123188 } -wheels = [ - { url = "https://files.pythonhosted.org/packages/0a/9a/dd1e1cdceb841925b7798369a09279bd1cf183cef0f9ddf15a3a6502ee45/charset_normalizer-3.4.1-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:73d94b58ec7fecbc7366247d3b0b10a21681004153238750bb67bd9012414545", size = 196105 }, - { url = "https://files.pythonhosted.org/packages/d3/8c/90bfabf8c4809ecb648f39794cf2a84ff2e7d2a6cf159fe68d9a26160467/charset_normalizer-3.4.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:dad3e487649f498dd991eeb901125411559b22e8d7ab25d3aeb1af367df5efd7", size = 140404 }, - { url = "https://files.pythonhosted.org/packages/ad/8f/e410d57c721945ea3b4f1a04b74f70ce8fa800d393d72899f0a40526401f/charset_normalizer-3.4.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c30197aa96e8eed02200a83fba2657b4c3acd0f0aa4bdc9f6c1af8e8962e0757", size = 150423 }, - { url = "https://files.pythonhosted.org/packages/f0/b8/e6825e25deb691ff98cf5c9072ee0605dc2acfca98af70c2d1b1bc75190d/charset_normalizer-3.4.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2369eea1ee4a7610a860d88f268eb39b95cb588acd7235e02fd5a5601773d4fa", size = 143184 }, - { url = "https://files.pythonhosted.org/packages/3e/a2/513f6cbe752421f16d969e32f3583762bfd583848b763913ddab8d9bfd4f/charset_normalizer-3.4.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bc2722592d8998c870fa4e290c2eec2c1569b87fe58618e67d38b4665dfa680d", size = 145268 }, - { url = "https://files.pythonhosted.org/packages/74/94/8a5277664f27c3c438546f3eb53b33f5b19568eb7424736bdc440a88a31f/charset_normalizer-3.4.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ffc9202a29ab3920fa812879e95a9e78b2465fd10be7fcbd042899695d75e616", size = 147601 }, - { url = "https://files.pythonhosted.org/packages/7c/5f/6d352c51ee763623a98e31194823518e09bfa48be2a7e8383cf691bbb3d0/charset_normalizer-3.4.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:804a4d582ba6e5b747c625bf1255e6b1507465494a40a2130978bda7b932c90b", size = 141098 }, - { url = "https://files.pythonhosted.org/packages/78/d4/f5704cb629ba5ab16d1d3d741396aec6dc3ca2b67757c45b0599bb010478/charset_normalizer-3.4.1-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:0f55e69f030f7163dffe9fd0752b32f070566451afe180f99dbeeb81f511ad8d", size = 149520 }, - { url = "https://files.pythonhosted.org/packages/c5/96/64120b1d02b81785f222b976c0fb79a35875457fa9bb40827678e54d1bc8/charset_normalizer-3.4.1-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:c4c3e6da02df6fa1410a7680bd3f63d4f710232d3139089536310d027950696a", size = 152852 }, - { url = "https://files.pythonhosted.org/packages/84/c9/98e3732278a99f47d487fd3468bc60b882920cef29d1fa6ca460a1fdf4e6/charset_normalizer-3.4.1-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:5df196eb874dae23dcfb968c83d4f8fdccb333330fe1fc278ac5ceeb101003a9", size = 150488 }, - { url = "https://files.pythonhosted.org/packages/13/0e/9c8d4cb99c98c1007cc11eda969ebfe837bbbd0acdb4736d228ccaabcd22/charset_normalizer-3.4.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:e358e64305fe12299a08e08978f51fc21fac060dcfcddd95453eabe5b93ed0e1", size = 146192 }, - { url = "https://files.pythonhosted.org/packages/b2/21/2b6b5b860781a0b49427309cb8670785aa543fb2178de875b87b9cc97746/charset_normalizer-3.4.1-cp312-cp312-win32.whl", hash = "sha256:9b23ca7ef998bc739bf6ffc077c2116917eabcc901f88da1b9856b210ef63f35", size = 95550 }, - { url = "https://files.pythonhosted.org/packages/21/5b/1b390b03b1d16c7e382b561c5329f83cc06623916aab983e8ab9239c7d5c/charset_normalizer-3.4.1-cp312-cp312-win_amd64.whl", hash = "sha256:6ff8a4a60c227ad87030d76e99cd1698345d4491638dfa6673027c48b3cd395f", size = 102785 }, - { url = "https://files.pythonhosted.org/packages/38/94/ce8e6f63d18049672c76d07d119304e1e2d7c6098f0841b51c666e9f44a0/charset_normalizer-3.4.1-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:aabfa34badd18f1da5ec1bc2715cadc8dca465868a4e73a0173466b688f29dda", size = 195698 }, - { url = "https://files.pythonhosted.org/packages/24/2e/dfdd9770664aae179a96561cc6952ff08f9a8cd09a908f259a9dfa063568/charset_normalizer-3.4.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:22e14b5d70560b8dd51ec22863f370d1e595ac3d024cb8ad7d308b4cd95f8313", size = 140162 }, - { url = "https://files.pythonhosted.org/packages/24/4e/f646b9093cff8fc86f2d60af2de4dc17c759de9d554f130b140ea4738ca6/charset_normalizer-3.4.1-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8436c508b408b82d87dc5f62496973a1805cd46727c34440b0d29d8a2f50a6c9", size = 150263 }, - { url = "https://files.pythonhosted.org/packages/5e/67/2937f8d548c3ef6e2f9aab0f6e21001056f692d43282b165e7c56023e6dd/charset_normalizer-3.4.1-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2d074908e1aecee37a7635990b2c6d504cd4766c7bc9fc86d63f9c09af3fa11b", size = 142966 }, - { url = "https://files.pythonhosted.org/packages/52/ed/b7f4f07de100bdb95c1756d3a4d17b90c1a3c53715c1a476f8738058e0fa/charset_normalizer-3.4.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:955f8851919303c92343d2f66165294848d57e9bba6cf6e3625485a70a038d11", size = 144992 }, - { url = "https://files.pythonhosted.org/packages/96/2c/d49710a6dbcd3776265f4c923bb73ebe83933dfbaa841c5da850fe0fd20b/charset_normalizer-3.4.1-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:44ecbf16649486d4aebafeaa7ec4c9fed8b88101f4dd612dcaf65d5e815f837f", size = 147162 }, - { url = "https://files.pythonhosted.org/packages/b4/41/35ff1f9a6bd380303dea55e44c4933b4cc3c4850988927d4082ada230273/charset_normalizer-3.4.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:0924e81d3d5e70f8126529951dac65c1010cdf117bb75eb02dd12339b57749dd", size = 140972 }, - { url = "https://files.pythonhosted.org/packages/fb/43/c6a0b685fe6910d08ba971f62cd9c3e862a85770395ba5d9cad4fede33ab/charset_normalizer-3.4.1-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:2967f74ad52c3b98de4c3b32e1a44e32975e008a9cd2a8cc8966d6a5218c5cb2", size = 149095 }, - { url = "https://files.pythonhosted.org/packages/4c/ff/a9a504662452e2d2878512115638966e75633519ec11f25fca3d2049a94a/charset_normalizer-3.4.1-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:c75cb2a3e389853835e84a2d8fb2b81a10645b503eca9bcb98df6b5a43eb8886", size = 152668 }, - { url = "https://files.pythonhosted.org/packages/6c/71/189996b6d9a4b932564701628af5cee6716733e9165af1d5e1b285c530ed/charset_normalizer-3.4.1-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:09b26ae6b1abf0d27570633b2b078a2a20419c99d66fb2823173d73f188ce601", size = 150073 }, - { url = "https://files.pythonhosted.org/packages/e4/93/946a86ce20790e11312c87c75ba68d5f6ad2208cfb52b2d6a2c32840d922/charset_normalizer-3.4.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:fa88b843d6e211393a37219e6a1c1df99d35e8fd90446f1118f4216e307e48cd", size = 145732 }, - { url = "https://files.pythonhosted.org/packages/cd/e5/131d2fb1b0dddafc37be4f3a2fa79aa4c037368be9423061dccadfd90091/charset_normalizer-3.4.1-cp313-cp313-win32.whl", hash = "sha256:eb8178fe3dba6450a3e024e95ac49ed3400e506fd4e9e5c32d30adda88cbd407", size = 95391 }, - { url = "https://files.pythonhosted.org/packages/27/f2/4f9a69cc7712b9b5ad8fdb87039fd89abba997ad5cbe690d1835d40405b0/charset_normalizer-3.4.1-cp313-cp313-win_amd64.whl", hash = "sha256:b1ac5992a838106edb89654e0aebfc24f5848ae2547d22c2c3f66454daa11971", size = 102702 }, - { url = "https://files.pythonhosted.org/packages/0e/f6/65ecc6878a89bb1c23a086ea335ad4bf21a588990c3f535a227b9eea9108/charset_normalizer-3.4.1-py3-none-any.whl", hash = "sha256:d98b1668f06378c6dbefec3b92299716b931cd4e6061f3c875a71ced1780ab85", size = 49767 }, +version = "3.4.2" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/e4/33/89c2ced2b67d1c2a61c19c6751aa8902d46ce3dacb23600a283619f5a12d/charset_normalizer-3.4.2.tar.gz", hash = "sha256:5baececa9ecba31eff645232d59845c07aa030f0c81ee70184a90d35099a0e63", size = 126367, upload-time = "2025-05-02T08:34:42.01Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/d7/a4/37f4d6035c89cac7930395a35cc0f1b872e652eaafb76a6075943754f095/charset_normalizer-3.4.2-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:0c29de6a1a95f24b9a1aa7aefd27d2487263f00dfd55a77719b530788f75cff7", size = 199936, upload-time = "2025-05-02T08:32:33.712Z" }, + { url = "https://files.pythonhosted.org/packages/ee/8a/1a5e33b73e0d9287274f899d967907cd0bf9c343e651755d9307e0dbf2b3/charset_normalizer-3.4.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cddf7bd982eaa998934a91f69d182aec997c6c468898efe6679af88283b498d3", size = 143790, upload-time = "2025-05-02T08:32:35.768Z" }, + { url = "https://files.pythonhosted.org/packages/66/52/59521f1d8e6ab1482164fa21409c5ef44da3e9f653c13ba71becdd98dec3/charset_normalizer-3.4.2-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:fcbe676a55d7445b22c10967bceaaf0ee69407fbe0ece4d032b6eb8d4565982a", size = 153924, upload-time = "2025-05-02T08:32:37.284Z" }, + { url = "https://files.pythonhosted.org/packages/86/2d/fb55fdf41964ec782febbf33cb64be480a6b8f16ded2dbe8db27a405c09f/charset_normalizer-3.4.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d41c4d287cfc69060fa91cae9683eacffad989f1a10811995fa309df656ec214", size = 146626, upload-time = "2025-05-02T08:32:38.803Z" }, + { url = "https://files.pythonhosted.org/packages/8c/73/6ede2ec59bce19b3edf4209d70004253ec5f4e319f9a2e3f2f15601ed5f7/charset_normalizer-3.4.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4e594135de17ab3866138f496755f302b72157d115086d100c3f19370839dd3a", size = 148567, upload-time = "2025-05-02T08:32:40.251Z" }, + { url = "https://files.pythonhosted.org/packages/09/14/957d03c6dc343c04904530b6bef4e5efae5ec7d7990a7cbb868e4595ee30/charset_normalizer-3.4.2-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:cf713fe9a71ef6fd5adf7a79670135081cd4431c2943864757f0fa3a65b1fafd", size = 150957, upload-time = "2025-05-02T08:32:41.705Z" }, + { url = "https://files.pythonhosted.org/packages/0d/c8/8174d0e5c10ccebdcb1b53cc959591c4c722a3ad92461a273e86b9f5a302/charset_normalizer-3.4.2-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:a370b3e078e418187da8c3674eddb9d983ec09445c99a3a263c2011993522981", size = 145408, upload-time = "2025-05-02T08:32:43.709Z" }, + { url = "https://files.pythonhosted.org/packages/58/aa/8904b84bc8084ac19dc52feb4f5952c6df03ffb460a887b42615ee1382e8/charset_normalizer-3.4.2-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:a955b438e62efdf7e0b7b52a64dc5c3396e2634baa62471768a64bc2adb73d5c", size = 153399, upload-time = "2025-05-02T08:32:46.197Z" }, + { url = "https://files.pythonhosted.org/packages/c2/26/89ee1f0e264d201cb65cf054aca6038c03b1a0c6b4ae998070392a3ce605/charset_normalizer-3.4.2-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:7222ffd5e4de8e57e03ce2cef95a4c43c98fcb72ad86909abdfc2c17d227fc1b", size = 156815, upload-time = "2025-05-02T08:32:48.105Z" }, + { url = "https://files.pythonhosted.org/packages/fd/07/68e95b4b345bad3dbbd3a8681737b4338ff2c9df29856a6d6d23ac4c73cb/charset_normalizer-3.4.2-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:bee093bf902e1d8fc0ac143c88902c3dfc8941f7ea1d6a8dd2bcb786d33db03d", size = 154537, upload-time = "2025-05-02T08:32:49.719Z" }, + { url = "https://files.pythonhosted.org/packages/77/1a/5eefc0ce04affb98af07bc05f3bac9094513c0e23b0562d64af46a06aae4/charset_normalizer-3.4.2-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:dedb8adb91d11846ee08bec4c8236c8549ac721c245678282dcb06b221aab59f", size = 149565, upload-time = "2025-05-02T08:32:51.404Z" }, + { url = "https://files.pythonhosted.org/packages/37/a0/2410e5e6032a174c95e0806b1a6585eb21e12f445ebe239fac441995226a/charset_normalizer-3.4.2-cp312-cp312-win32.whl", hash = "sha256:db4c7bf0e07fc3b7d89ac2a5880a6a8062056801b83ff56d8464b70f65482b6c", size = 98357, upload-time = "2025-05-02T08:32:53.079Z" }, + { url = "https://files.pythonhosted.org/packages/6c/4f/c02d5c493967af3eda9c771ad4d2bbc8df6f99ddbeb37ceea6e8716a32bc/charset_normalizer-3.4.2-cp312-cp312-win_amd64.whl", hash = "sha256:5a9979887252a82fefd3d3ed2a8e3b937a7a809f65dcb1e068b090e165bbe99e", size = 105776, upload-time = "2025-05-02T08:32:54.573Z" }, + { url = "https://files.pythonhosted.org/packages/ea/12/a93df3366ed32db1d907d7593a94f1fe6293903e3e92967bebd6950ed12c/charset_normalizer-3.4.2-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:926ca93accd5d36ccdabd803392ddc3e03e6d4cd1cf17deff3b989ab8e9dbcf0", size = 199622, upload-time = "2025-05-02T08:32:56.363Z" }, + { url = "https://files.pythonhosted.org/packages/04/93/bf204e6f344c39d9937d3c13c8cd5bbfc266472e51fc8c07cb7f64fcd2de/charset_normalizer-3.4.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:eba9904b0f38a143592d9fc0e19e2df0fa2e41c3c3745554761c5f6447eedabf", size = 143435, upload-time = "2025-05-02T08:32:58.551Z" }, + { url = "https://files.pythonhosted.org/packages/22/2a/ea8a2095b0bafa6c5b5a55ffdc2f924455233ee7b91c69b7edfcc9e02284/charset_normalizer-3.4.2-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3fddb7e2c84ac87ac3a947cb4e66d143ca5863ef48e4a5ecb83bd48619e4634e", size = 153653, upload-time = "2025-05-02T08:33:00.342Z" }, + { url = "https://files.pythonhosted.org/packages/b6/57/1b090ff183d13cef485dfbe272e2fe57622a76694061353c59da52c9a659/charset_normalizer-3.4.2-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:98f862da73774290f251b9df8d11161b6cf25b599a66baf087c1ffe340e9bfd1", size = 146231, upload-time = "2025-05-02T08:33:02.081Z" }, + { url = "https://files.pythonhosted.org/packages/e2/28/ffc026b26f441fc67bd21ab7f03b313ab3fe46714a14b516f931abe1a2d8/charset_normalizer-3.4.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6c9379d65defcab82d07b2a9dfbfc2e95bc8fe0ebb1b176a3190230a3ef0e07c", size = 148243, upload-time = "2025-05-02T08:33:04.063Z" }, + { url = "https://files.pythonhosted.org/packages/c0/0f/9abe9bd191629c33e69e47c6ef45ef99773320e9ad8e9cb08b8ab4a8d4cb/charset_normalizer-3.4.2-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e635b87f01ebc977342e2697d05b56632f5f879a4f15955dfe8cef2448b51691", size = 150442, upload-time = "2025-05-02T08:33:06.418Z" }, + { url = "https://files.pythonhosted.org/packages/67/7c/a123bbcedca91d5916c056407f89a7f5e8fdfce12ba825d7d6b9954a1a3c/charset_normalizer-3.4.2-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:1c95a1e2902a8b722868587c0e1184ad5c55631de5afc0eb96bc4b0d738092c0", size = 145147, upload-time = "2025-05-02T08:33:08.183Z" }, + { url = "https://files.pythonhosted.org/packages/ec/fe/1ac556fa4899d967b83e9893788e86b6af4d83e4726511eaaad035e36595/charset_normalizer-3.4.2-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:ef8de666d6179b009dce7bcb2ad4c4a779f113f12caf8dc77f0162c29d20490b", size = 153057, upload-time = "2025-05-02T08:33:09.986Z" }, + { url = "https://files.pythonhosted.org/packages/2b/ff/acfc0b0a70b19e3e54febdd5301a98b72fa07635e56f24f60502e954c461/charset_normalizer-3.4.2-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:32fc0341d72e0f73f80acb0a2c94216bd704f4f0bce10aedea38f30502b271ff", size = 156454, upload-time = "2025-05-02T08:33:11.814Z" }, + { url = "https://files.pythonhosted.org/packages/92/08/95b458ce9c740d0645feb0e96cea1f5ec946ea9c580a94adfe0b617f3573/charset_normalizer-3.4.2-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:289200a18fa698949d2b39c671c2cc7a24d44096784e76614899a7ccf2574b7b", size = 154174, upload-time = "2025-05-02T08:33:13.707Z" }, + { url = "https://files.pythonhosted.org/packages/78/be/8392efc43487ac051eee6c36d5fbd63032d78f7728cb37aebcc98191f1ff/charset_normalizer-3.4.2-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:4a476b06fbcf359ad25d34a057b7219281286ae2477cc5ff5e3f70a246971148", size = 149166, upload-time = "2025-05-02T08:33:15.458Z" }, + { url = "https://files.pythonhosted.org/packages/44/96/392abd49b094d30b91d9fbda6a69519e95802250b777841cf3bda8fe136c/charset_normalizer-3.4.2-cp313-cp313-win32.whl", hash = "sha256:aaeeb6a479c7667fbe1099af9617c83aaca22182d6cf8c53966491a0f1b7ffb7", size = 98064, upload-time = "2025-05-02T08:33:17.06Z" }, + { url = "https://files.pythonhosted.org/packages/e9/b0/0200da600134e001d91851ddc797809e2fe0ea72de90e09bec5a2fbdaccb/charset_normalizer-3.4.2-cp313-cp313-win_amd64.whl", hash = "sha256:aa6af9e7d59f9c12b33ae4e9450619cf2488e2bbe9b44030905877f0b2324980", size = 105641, upload-time = "2025-05-02T08:33:18.753Z" }, + { url = "https://files.pythonhosted.org/packages/20/94/c5790835a017658cbfabd07f3bfb549140c3ac458cfc196323996b10095a/charset_normalizer-3.4.2-py3-none-any.whl", hash = "sha256:7f56930ab0abd1c45cd15be65cc741c28b1c9a34876ce8c17a2fa107810c0af0", size = 52626, upload-time = "2025-05-02T08:34:40.053Z" }, ] [[package]] name = "colorama" version = "0.4.6" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/d8/53/6f443c9a4a8358a93a6792e2acffb9d9d5cb0a5cfd8802644b7b1c9a02e4/colorama-0.4.6.tar.gz", hash = "sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44", size = 27697 } +sdist = { url = "https://files.pythonhosted.org/packages/d8/53/6f443c9a4a8358a93a6792e2acffb9d9d5cb0a5cfd8802644b7b1c9a02e4/colorama-0.4.6.tar.gz", hash = "sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44", size = 27697, upload-time = "2022-10-25T02:36:22.414Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/d1/d6/3965ed04c63042e047cb6a3e6ed1a63a35087b6a609aa3a15ed8ac56c221/colorama-0.4.6-py2.py3-none-any.whl", hash = "sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6", size = 25335 }, + { url = "https://files.pythonhosted.org/packages/d1/d6/3965ed04c63042e047cb6a3e6ed1a63a35087b6a609aa3a15ed8ac56c221/colorama-0.4.6-py2.py3-none-any.whl", hash = "sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6", size = 25335, upload-time = "2022-10-25T02:36:20.889Z" }, ] [[package]] @@ -143,42 +144,46 @@ dev = [ [[package]] name = "cryptography" -version = "44.0.0" +version = "45.0.2" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "cffi", marker = "platform_python_implementation != 'PyPy'" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/91/4c/45dfa6829acffa344e3967d6006ee4ae8be57af746ae2eba1c431949b32c/cryptography-44.0.0.tar.gz", hash = "sha256:cd4e834f340b4293430701e772ec543b0fbe6c2dea510a5286fe0acabe153a02", size = 710657 } -wheels = [ - { url = "https://files.pythonhosted.org/packages/55/09/8cc67f9b84730ad330b3b72cf867150744bf07ff113cda21a15a1c6d2c7c/cryptography-44.0.0-cp37-abi3-macosx_10_9_universal2.whl", hash = "sha256:84111ad4ff3f6253820e6d3e58be2cc2a00adb29335d4cacb5ab4d4d34f2a123", size = 6541833 }, - { url = "https://files.pythonhosted.org/packages/7e/5b/3759e30a103144e29632e7cb72aec28cedc79e514b2ea8896bb17163c19b/cryptography-44.0.0-cp37-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b15492a11f9e1b62ba9d73c210e2416724633167de94607ec6069ef724fad092", size = 3922710 }, - { url = "https://files.pythonhosted.org/packages/5f/58/3b14bf39f1a0cfd679e753e8647ada56cddbf5acebffe7db90e184c76168/cryptography-44.0.0-cp37-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:831c3c4d0774e488fdc83a1923b49b9957d33287de923d58ebd3cec47a0ae43f", size = 4137546 }, - { url = "https://files.pythonhosted.org/packages/98/65/13d9e76ca19b0ba5603d71ac8424b5694415b348e719db277b5edc985ff5/cryptography-44.0.0-cp37-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:761817a3377ef15ac23cd7834715081791d4ec77f9297ee694ca1ee9c2c7e5eb", size = 3915420 }, - { url = "https://files.pythonhosted.org/packages/b1/07/40fe09ce96b91fc9276a9ad272832ead0fddedcba87f1190372af8e3039c/cryptography-44.0.0-cp37-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:3c672a53c0fb4725a29c303be906d3c1fa99c32f58abe008a82705f9ee96f40b", size = 4154498 }, - { url = "https://files.pythonhosted.org/packages/75/ea/af65619c800ec0a7e4034207aec543acdf248d9bffba0533342d1bd435e1/cryptography-44.0.0-cp37-abi3-manylinux_2_34_aarch64.whl", hash = "sha256:4ac4c9f37eba52cb6fbeaf5b59c152ea976726b865bd4cf87883a7e7006cc543", size = 3932569 }, - { url = "https://files.pythonhosted.org/packages/c7/af/d1deb0c04d59612e3d5e54203159e284d3e7a6921e565bb0eeb6269bdd8a/cryptography-44.0.0-cp37-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:ed3534eb1090483c96178fcb0f8893719d96d5274dfde98aa6add34614e97c8e", size = 4016721 }, - { url = "https://files.pythonhosted.org/packages/bd/69/7ca326c55698d0688db867795134bdfac87136b80ef373aaa42b225d6dd5/cryptography-44.0.0-cp37-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:f3f6fdfa89ee2d9d496e2c087cebef9d4fcbb0ad63c40e821b39f74bf48d9c5e", size = 4240915 }, - { url = "https://files.pythonhosted.org/packages/ef/d4/cae11bf68c0f981e0413906c6dd03ae7fa864347ed5fac40021df1ef467c/cryptography-44.0.0-cp37-abi3-win32.whl", hash = "sha256:eb33480f1bad5b78233b0ad3e1b0be21e8ef1da745d8d2aecbb20671658b9053", size = 2757925 }, - { url = "https://files.pythonhosted.org/packages/64/b1/50d7739254d2002acae64eed4fc43b24ac0cc44bf0a0d388d1ca06ec5bb1/cryptography-44.0.0-cp37-abi3-win_amd64.whl", hash = "sha256:abc998e0c0eee3c8a1904221d3f67dcfa76422b23620173e28c11d3e626c21bd", size = 3202055 }, - { url = "https://files.pythonhosted.org/packages/11/18/61e52a3d28fc1514a43b0ac291177acd1b4de00e9301aaf7ef867076ff8a/cryptography-44.0.0-cp39-abi3-macosx_10_9_universal2.whl", hash = "sha256:660cb7312a08bc38be15b696462fa7cc7cd85c3ed9c576e81f4dc4d8b2b31591", size = 6542801 }, - { url = "https://files.pythonhosted.org/packages/1a/07/5f165b6c65696ef75601b781a280fc3b33f1e0cd6aa5a92d9fb96c410e97/cryptography-44.0.0-cp39-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1923cb251c04be85eec9fda837661c67c1049063305d6be5721643c22dd4e2b7", size = 3922613 }, - { url = "https://files.pythonhosted.org/packages/28/34/6b3ac1d80fc174812486561cf25194338151780f27e438526f9c64e16869/cryptography-44.0.0-cp39-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:404fdc66ee5f83a1388be54300ae978b2efd538018de18556dde92575e05defc", size = 4137925 }, - { url = "https://files.pythonhosted.org/packages/d0/c7/c656eb08fd22255d21bc3129625ed9cd5ee305f33752ef2278711b3fa98b/cryptography-44.0.0-cp39-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:c5eb858beed7835e5ad1faba59e865109f3e52b3783b9ac21e7e47dc5554e289", size = 3915417 }, - { url = "https://files.pythonhosted.org/packages/ef/82/72403624f197af0db6bac4e58153bc9ac0e6020e57234115db9596eee85d/cryptography-44.0.0-cp39-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:f53c2c87e0fb4b0c00fa9571082a057e37690a8f12233306161c8f4b819960b7", size = 4155160 }, - { url = "https://files.pythonhosted.org/packages/a2/cd/2f3c440913d4329ade49b146d74f2e9766422e1732613f57097fea61f344/cryptography-44.0.0-cp39-abi3-manylinux_2_34_aarch64.whl", hash = "sha256:9e6fc8a08e116fb7c7dd1f040074c9d7b51d74a8ea40d4df2fc7aa08b76b9e6c", size = 3932331 }, - { url = "https://files.pythonhosted.org/packages/7f/df/8be88797f0a1cca6e255189a57bb49237402b1880d6e8721690c5603ac23/cryptography-44.0.0-cp39-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:d2436114e46b36d00f8b72ff57e598978b37399d2786fd39793c36c6d5cb1c64", size = 4017372 }, - { url = "https://files.pythonhosted.org/packages/af/36/5ccc376f025a834e72b8e52e18746b927f34e4520487098e283a719c205e/cryptography-44.0.0-cp39-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:a01956ddfa0a6790d594f5b34fc1bfa6098aca434696a03cfdbe469b8ed79285", size = 4239657 }, - { url = "https://files.pythonhosted.org/packages/46/b0/f4f7d0d0bcfbc8dd6296c1449be326d04217c57afb8b2594f017eed95533/cryptography-44.0.0-cp39-abi3-win32.whl", hash = "sha256:eca27345e1214d1b9f9490d200f9db5a874479be914199194e746c893788d417", size = 2758672 }, - { url = "https://files.pythonhosted.org/packages/97/9b/443270b9210f13f6ef240eff73fd32e02d381e7103969dc66ce8e89ee901/cryptography-44.0.0-cp39-abi3-win_amd64.whl", hash = "sha256:708ee5f1bafe76d041b53a4f95eb28cdeb8d18da17e597d46d7833ee59b97ede", size = 3202071 }, +sdist = { url = "https://files.pythonhosted.org/packages/f6/47/92a8914716f2405f33f1814b97353e3cfa223cd94a77104075d42de3099e/cryptography-45.0.2.tar.gz", hash = "sha256:d784d57b958ffd07e9e226d17272f9af0c41572557604ca7554214def32c26bf", size = 743865, upload-time = "2025-05-18T02:46:34.986Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/3d/2f/46b9e715157643ad16f039ec3c3c47d174da6f825bf5034b1c5f692ab9e2/cryptography-45.0.2-cp311-abi3-macosx_10_9_universal2.whl", hash = "sha256:61a8b1bbddd9332917485b2453d1de49f142e6334ce1d97b7916d5a85d179c84", size = 7043448, upload-time = "2025-05-18T02:45:12.495Z" }, + { url = "https://files.pythonhosted.org/packages/90/52/49e6c86278e1b5ec226e96b62322538ccc466306517bf9aad8854116a088/cryptography-45.0.2-cp311-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4cc31c66411e14dd70e2f384a9204a859dc25b05e1f303df0f5326691061b839", size = 4201098, upload-time = "2025-05-18T02:45:15.178Z" }, + { url = "https://files.pythonhosted.org/packages/7b/3a/201272539ac5b66b4cb1af89021e423fc0bfacb73498950280c51695fb78/cryptography-45.0.2-cp311-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:463096533acd5097f8751115bc600b0b64620c4aafcac10c6d0041e6e68f88fe", size = 4429839, upload-time = "2025-05-18T02:45:17.614Z" }, + { url = "https://files.pythonhosted.org/packages/99/89/fa1a84832b8f8f3917875cb15324bba98def5a70175a889df7d21a45dc75/cryptography-45.0.2-cp311-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:cdafb86eb673c3211accffbffdb3cdffa3aaafacd14819e0898d23696d18e4d3", size = 4205154, upload-time = "2025-05-18T02:45:19.874Z" }, + { url = "https://files.pythonhosted.org/packages/1c/c5/5225d5230d538ab461725711cf5220560a813d1eb68bafcfb00131b8f631/cryptography-45.0.2-cp311-abi3-manylinux_2_28_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:05c2385b1f5c89a17df19900cfb1345115a77168f5ed44bdf6fd3de1ce5cc65b", size = 3897145, upload-time = "2025-05-18T02:45:22.209Z" }, + { url = "https://files.pythonhosted.org/packages/fe/24/f19aae32526cc55ae17d473bc4588b1234af2979483d99cbfc57e55ffea6/cryptography-45.0.2-cp311-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:e9e4bdcd70216b08801e267c0b563316b787f957a46e215249921f99288456f9", size = 4462192, upload-time = "2025-05-18T02:45:24.773Z" }, + { url = "https://files.pythonhosted.org/packages/19/18/4a69ac95b0b3f03355970baa6c3f9502bbfc54e7df81fdb179654a00f48e/cryptography-45.0.2-cp311-abi3-manylinux_2_34_aarch64.whl", hash = "sha256:b2de529027579e43b6dc1f805f467b102fb7d13c1e54c334f1403ee2b37d0059", size = 4208093, upload-time = "2025-05-18T02:45:27.028Z" }, + { url = "https://files.pythonhosted.org/packages/7c/54/2dea55ccc9558b8fa14f67156250b6ee231e31765601524e4757d0b5db6b/cryptography-45.0.2-cp311-abi3-manylinux_2_34_x86_64.whl", hash = "sha256:10d68763892a7b19c22508ab57799c4423c7c8cd61d7eee4c5a6a55a46511949", size = 4461819, upload-time = "2025-05-18T02:45:29.39Z" }, + { url = "https://files.pythonhosted.org/packages/37/f1/1b220fcd5ef4b1f0ff3e59e733b61597505e47f945606cc877adab2c1a17/cryptography-45.0.2-cp311-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:d2a90ce2f0f5b695e4785ac07c19a58244092f3c85d57db6d8eb1a2b26d2aad6", size = 4329202, upload-time = "2025-05-18T02:45:31.925Z" }, + { url = "https://files.pythonhosted.org/packages/6d/e0/51d1dc4f96f819a56db70f0b4039b4185055bbb8616135884c3c3acc4c6d/cryptography-45.0.2-cp311-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:59c0c8f043dd376bbd9d4f636223836aed50431af4c5a467ed9bf61520294627", size = 4570412, upload-time = "2025-05-18T02:45:34.348Z" }, + { url = "https://files.pythonhosted.org/packages/dc/44/88efb40a3600d15277a77cdc69eeeab45a98532078d2a36cffd9325d3b3f/cryptography-45.0.2-cp311-abi3-win32.whl", hash = "sha256:80303ee6a02ef38c4253160446cbeb5c400c07e01d4ddbd4ff722a89b736d95a", size = 2933584, upload-time = "2025-05-18T02:45:36.198Z" }, + { url = "https://files.pythonhosted.org/packages/d9/a1/bc9f82ba08760442cc8346d1b4e7b769b86d197193c45b42b3595d231e84/cryptography-45.0.2-cp311-abi3-win_amd64.whl", hash = "sha256:7429936146063bd1b2cfc54f0e04016b90ee9b1c908a7bed0800049cbace70eb", size = 3408537, upload-time = "2025-05-18T02:45:38.184Z" }, + { url = "https://files.pythonhosted.org/packages/59/bc/1b6acb1dca366f9c0b3880888ecd7fcfb68023930d57df854847c6da1d10/cryptography-45.0.2-cp37-abi3-macosx_10_9_universal2.whl", hash = "sha256:e86c8d54cd19a13e9081898b3c24351683fd39d726ecf8e774aaa9d8d96f5f3a", size = 7025581, upload-time = "2025-05-18T02:45:40.632Z" }, + { url = "https://files.pythonhosted.org/packages/31/a3/a3e4a298d3db4a04085728f5ae6c8cda157e49c5bb784886d463b9fbff70/cryptography-45.0.2-cp37-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e328357b6bbf79928363dbf13f4635b7aac0306afb7e5ad24d21d0c5761c3253", size = 4189148, upload-time = "2025-05-18T02:45:42.538Z" }, + { url = "https://files.pythonhosted.org/packages/53/90/100dfadd4663b389cb56972541ec1103490a19ebad0132af284114ba0868/cryptography-45.0.2-cp37-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:49af56491473231159c98c2c26f1a8f3799a60e5cf0e872d00745b858ddac9d2", size = 4424113, upload-time = "2025-05-18T02:45:44.316Z" }, + { url = "https://files.pythonhosted.org/packages/0d/40/e2b9177dbed6f3fcbbf1942e1acea2fd15b17007204b79d675540dd053af/cryptography-45.0.2-cp37-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:f169469d04a23282de9d0be349499cb6683b6ff1b68901210faacac9b0c24b7d", size = 4189696, upload-time = "2025-05-18T02:45:46.622Z" }, + { url = "https://files.pythonhosted.org/packages/70/ae/ec29c79f481e1767c2ff916424ba36f3cf7774de93bbd60428a3c52d1357/cryptography-45.0.2-cp37-abi3-manylinux_2_28_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:9cfd1399064b13043082c660ddd97a0358e41c8b0dc7b77c1243e013d305c344", size = 3881498, upload-time = "2025-05-18T02:45:48.884Z" }, + { url = "https://files.pythonhosted.org/packages/5f/4a/72937090e5637a232b2f73801c9361cd08404a2d4e620ca4ec58c7ea4b70/cryptography-45.0.2-cp37-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:18f8084b7ca3ce1b8d38bdfe33c48116edf9a08b4d056ef4a96dceaa36d8d965", size = 4451678, upload-time = "2025-05-18T02:45:50.706Z" }, + { url = "https://files.pythonhosted.org/packages/d3/fa/1377fced81fd67a4a27514248261bb0d45c3c1e02169411fe231583088c8/cryptography-45.0.2-cp37-abi3-manylinux_2_34_aarch64.whl", hash = "sha256:2cb03a944a1a412724d15a7c051d50e63a868031f26b6a312f2016965b661942", size = 4192296, upload-time = "2025-05-18T02:45:52.422Z" }, + { url = "https://files.pythonhosted.org/packages/d1/cf/b6fe837c83a08b9df81e63299d75fc5b3c6d82cf24b3e1e0e331050e9e5c/cryptography-45.0.2-cp37-abi3-manylinux_2_34_x86_64.whl", hash = "sha256:a9727a21957d3327cf6b7eb5ffc9e4b663909a25fea158e3fcbc49d4cdd7881b", size = 4451749, upload-time = "2025-05-18T02:45:55.025Z" }, + { url = "https://files.pythonhosted.org/packages/af/d8/5a655675cc635c7190bfc8cffb84bcdc44fc62ce945ad1d844adaa884252/cryptography-45.0.2-cp37-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:ddb8d01aa900b741d6b7cc585a97aff787175f160ab975e21f880e89d810781a", size = 4317601, upload-time = "2025-05-18T02:45:56.911Z" }, + { url = "https://files.pythonhosted.org/packages/b9/d4/75d2375a20d80aa262a8adee77bf56950e9292929e394b9fae2481803f11/cryptography-45.0.2-cp37-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:c0c000c1a09f069632d8a9eb3b610ac029fcc682f1d69b758e625d6ee713f4ed", size = 4560535, upload-time = "2025-05-18T02:45:59.33Z" }, + { url = "https://files.pythonhosted.org/packages/aa/18/c3a94474987ebcfb88692036b2ec44880d243fefa73794bdcbf748679a6e/cryptography-45.0.2-cp37-abi3-win32.whl", hash = "sha256:08281de408e7eb71ba3cd5098709a356bfdf65eebd7ee7633c3610f0aa80d79b", size = 2922045, upload-time = "2025-05-18T02:46:01.012Z" }, + { url = "https://files.pythonhosted.org/packages/63/63/fb28b30c144182fd44ce93d13ab859791adbf923e43bdfb610024bfecda1/cryptography-45.0.2-cp37-abi3-win_amd64.whl", hash = "sha256:48caa55c528617fa6db1a9c3bf2e37ccb31b73e098ac2b71408d1f2db551dde4", size = 3393321, upload-time = "2025-05-18T02:46:03.441Z" }, ] [[package]] name = "decorator" -version = "5.1.1" +version = "5.2.1" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/66/0c/8d907af351aa16b42caae42f9d6aa37b900c67308052d10fdce809f8d952/decorator-5.1.1.tar.gz", hash = "sha256:637996211036b6385ef91435e4fae22989472f9d571faba8927ba8253acbc330", size = 35016 } +sdist = { url = "https://files.pythonhosted.org/packages/43/fa/6d96a0978d19e17b68d634497769987b16c8f4cd0a7a05048bec693caa6b/decorator-5.2.1.tar.gz", hash = "sha256:65f266143752f734b0a7cc83c46f4618af75b8c5911b00ccb61d0ac9b6da0360", size = 56711, upload-time = "2025-02-24T04:41:34.073Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/d5/50/83c593b07763e1161326b3b8c6686f0f4b0f24d5526546bee538c89837d6/decorator-5.1.1-py3-none-any.whl", hash = "sha256:b8c3f85900b9dc423225913c5aace94729fe1fa9763b38939a95226f02d37186", size = 9073 }, + { url = "https://files.pythonhosted.org/packages/4e/8c/f3147f5c4b73e7550fe5f9352eaa956ae838d5c51eb58e7a25b9f3e2643b/decorator-5.2.1-py3-none-any.whl", hash = "sha256:d316bb415a2d9e2d2b3abcc4084c6502fc09240e292cd76a76afc106a1c8e04a", size = 9190, upload-time = "2025-02-24T04:41:32.565Z" }, ] [[package]] @@ -190,45 +195,45 @@ dependencies = [ { name = "requests" }, { name = "urllib3" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/91/9b/4a2ea29aeba62471211598dac5d96825bb49348fa07e906ea930394a83ce/docker-7.1.0.tar.gz", hash = "sha256:ad8c70e6e3f8926cb8a92619b832b4ea5299e2831c14284663184e200546fa6c", size = 117834 } +sdist = { url = "https://files.pythonhosted.org/packages/91/9b/4a2ea29aeba62471211598dac5d96825bb49348fa07e906ea930394a83ce/docker-7.1.0.tar.gz", hash = "sha256:ad8c70e6e3f8926cb8a92619b832b4ea5299e2831c14284663184e200546fa6c", size = 117834, upload-time = "2024-05-23T11:13:57.216Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/e3/26/57c6fb270950d476074c087527a558ccb6f4436657314bfb6cdf484114c4/docker-7.1.0-py3-none-any.whl", hash = "sha256:c96b93b7f0a746f9e77d325bcfb87422a3d8bd4f03136ae8a85b37f1898d5fc0", size = 147774 }, + { url = "https://files.pythonhosted.org/packages/e3/26/57c6fb270950d476074c087527a558ccb6f4436657314bfb6cdf484114c4/docker-7.1.0-py3-none-any.whl", hash = "sha256:c96b93b7f0a746f9e77d325bcfb87422a3d8bd4f03136ae8a85b37f1898d5fc0", size = 147774, upload-time = "2024-05-23T11:13:55.01Z" }, ] [[package]] name = "execnet" version = "2.1.1" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/bb/ff/b4c0dc78fbe20c3e59c0c7334de0c27eb4001a2b2017999af398bf730817/execnet-2.1.1.tar.gz", hash = "sha256:5189b52c6121c24feae288166ab41b32549c7e2348652736540b9e6e7d4e72e3", size = 166524 } +sdist = { url = "https://files.pythonhosted.org/packages/bb/ff/b4c0dc78fbe20c3e59c0c7334de0c27eb4001a2b2017999af398bf730817/execnet-2.1.1.tar.gz", hash = "sha256:5189b52c6121c24feae288166ab41b32549c7e2348652736540b9e6e7d4e72e3", size = 166524, upload-time = "2024-04-08T09:04:19.245Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/43/09/2aea36ff60d16dd8879bdb2f5b3ee0ba8d08cbbdcdfe870e695ce3784385/execnet-2.1.1-py3-none-any.whl", hash = "sha256:26dee51f1b80cebd6d0ca8e74dd8745419761d3bef34163928cbebbdc4749fdc", size = 40612 }, + { url = "https://files.pythonhosted.org/packages/43/09/2aea36ff60d16dd8879bdb2f5b3ee0ba8d08cbbdcdfe870e695ce3784385/execnet-2.1.1-py3-none-any.whl", hash = "sha256:26dee51f1b80cebd6d0ca8e74dd8745419761d3bef34163928cbebbdc4749fdc", size = 40612, upload-time = "2024-04-08T09:04:17.414Z" }, ] [[package]] name = "executing" version = "2.2.0" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/91/50/a9d80c47ff289c611ff12e63f7c5d13942c65d68125160cefd768c73e6e4/executing-2.2.0.tar.gz", hash = "sha256:5d108c028108fe2551d1a7b2e8b713341e2cb4fc0aa7dcf966fa4327a5226755", size = 978693 } +sdist = { url = "https://files.pythonhosted.org/packages/91/50/a9d80c47ff289c611ff12e63f7c5d13942c65d68125160cefd768c73e6e4/executing-2.2.0.tar.gz", hash = "sha256:5d108c028108fe2551d1a7b2e8b713341e2cb4fc0aa7dcf966fa4327a5226755", size = 978693, upload-time = "2025-01-22T15:41:29.403Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/7b/8f/c4d9bafc34ad7ad5d8dc16dd1347ee0e507a52c3adb6bfa8887e1c6a26ba/executing-2.2.0-py2.py3-none-any.whl", hash = "sha256:11387150cad388d62750327a53d3339fad4888b39a6fe233c3afbb54ecffd3aa", size = 26702 }, + { url = "https://files.pythonhosted.org/packages/7b/8f/c4d9bafc34ad7ad5d8dc16dd1347ee0e507a52c3adb6bfa8887e1c6a26ba/executing-2.2.0-py2.py3-none-any.whl", hash = "sha256:11387150cad388d62750327a53d3339fad4888b39a6fe233c3afbb54ecffd3aa", size = 26702, upload-time = "2025-01-22T15:41:25.929Z" }, ] [[package]] name = "idna" version = "3.10" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/f1/70/7703c29685631f5a7590aa73f1f1d3fa9a380e654b86af429e0934a32f7d/idna-3.10.tar.gz", hash = "sha256:12f65c9b470abda6dc35cf8e63cc574b1c52b11df2c86030af0ac09b01b13ea9", size = 190490 } +sdist = { url = "https://files.pythonhosted.org/packages/f1/70/7703c29685631f5a7590aa73f1f1d3fa9a380e654b86af429e0934a32f7d/idna-3.10.tar.gz", hash = "sha256:12f65c9b470abda6dc35cf8e63cc574b1c52b11df2c86030af0ac09b01b13ea9", size = 190490, upload-time = "2024-09-15T18:07:39.745Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/76/c6/c88e154df9c4e1a2a66ccf0005a88dfb2650c1dffb6f5ce603dfbd452ce3/idna-3.10-py3-none-any.whl", hash = "sha256:946d195a0d259cbba61165e88e65941f16e9b36ea6ddb97f00452bae8b1287d3", size = 70442 }, + { url = "https://files.pythonhosted.org/packages/76/c6/c88e154df9c4e1a2a66ccf0005a88dfb2650c1dffb6f5ce603dfbd452ce3/idna-3.10-py3-none-any.whl", hash = "sha256:946d195a0d259cbba61165e88e65941f16e9b36ea6ddb97f00452bae8b1287d3", size = 70442, upload-time = "2024-09-15T18:07:37.964Z" }, ] [[package]] name = "iniconfig" -version = "2.0.0" +version = "2.1.0" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/d7/4b/cbd8e699e64a6f16ca3a8220661b5f83792b3017d0f79807cb8708d33913/iniconfig-2.0.0.tar.gz", hash = "sha256:2d91e135bf72d31a410b17c16da610a82cb55f6b0477d1a902134b24a455b8b3", size = 4646 } +sdist = { url = "https://files.pythonhosted.org/packages/f2/97/ebf4da567aa6827c909642694d71c9fcf53e5b504f2d96afea02718862f3/iniconfig-2.1.0.tar.gz", hash = "sha256:3abbd2e30b36733fee78f9c7f7308f2d0050e88f0087fd25c2645f63c773e1c7", size = 4793, upload-time = "2025-03-19T20:09:59.721Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/ef/a6/62565a6e1cf69e10f5727360368e451d4b7f58beeac6173dc9db836a5b46/iniconfig-2.0.0-py3-none-any.whl", hash = "sha256:b6a85871a79d2e3b22d2d1b94ac2824226a63c6b741c88f7ae975f18b6778374", size = 5892 }, + { url = "https://files.pythonhosted.org/packages/2c/e1/e6716421ea10d38022b952c159d5161ca1193197fb744506875fbb87ea7b/iniconfig-2.1.0-py3-none-any.whl", hash = "sha256:9deba5723312380e77435581c6bf4935c94cbfab9b1ed33ef8d238ea168eb760", size = 6050, upload-time = "2025-03-19T20:10:01.071Z" }, ] [[package]] @@ -239,18 +244,19 @@ dependencies = [ { name = "decorator" }, { name = "ipython" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/3d/1b/7e07e7b752017f7693a0f4d41c13e5ca29ce8cbcfdcc1fd6c4ad8c0a27a0/ipdb-0.13.13.tar.gz", hash = "sha256:e3ac6018ef05126d442af680aad863006ec19d02290561ac88b8b1c0b0cfc726", size = 17042 } +sdist = { url = "https://files.pythonhosted.org/packages/3d/1b/7e07e7b752017f7693a0f4d41c13e5ca29ce8cbcfdcc1fd6c4ad8c0a27a0/ipdb-0.13.13.tar.gz", hash = "sha256:e3ac6018ef05126d442af680aad863006ec19d02290561ac88b8b1c0b0cfc726", size = 17042, upload-time = "2023-03-09T15:40:57.487Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/0c/4c/b075da0092003d9a55cf2ecc1cae9384a1ca4f650d51b00fc59875fe76f6/ipdb-0.13.13-py3-none-any.whl", hash = "sha256:45529994741c4ab6d2388bfa5d7b725c2cf7fe9deffabdb8a6113aa5ed449ed4", size = 12130 }, + { url = "https://files.pythonhosted.org/packages/0c/4c/b075da0092003d9a55cf2ecc1cae9384a1ca4f650d51b00fc59875fe76f6/ipdb-0.13.13-py3-none-any.whl", hash = "sha256:45529994741c4ab6d2388bfa5d7b725c2cf7fe9deffabdb8a6113aa5ed449ed4", size = 12130, upload-time = "2023-03-09T15:40:55.021Z" }, ] [[package]] name = "ipython" -version = "8.32.0" +version = "9.2.0" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "colorama", marker = "sys_platform == 'win32'" }, { name = "decorator" }, + { name = "ipython-pygments-lexers" }, { name = "jedi" }, { name = "matplotlib-inline" }, { name = "pexpect", marker = "sys_platform != 'emscripten' and sys_platform != 'win32'" }, @@ -259,9 +265,21 @@ dependencies = [ { name = "stack-data" }, { name = "traitlets" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/36/80/4d2a072e0db7d250f134bc11676517299264ebe16d62a8619d49a78ced73/ipython-8.32.0.tar.gz", hash = "sha256:be2c91895b0b9ea7ba49d33b23e2040c352b33eb6a519cca7ce6e0c743444251", size = 5507441 } +sdist = { url = "https://files.pythonhosted.org/packages/9d/02/63a84444a7409b3c0acd1de9ffe524660e0e5d82ee473e78b45e5bfb64a4/ipython-9.2.0.tar.gz", hash = "sha256:62a9373dbc12f28f9feaf4700d052195bf89806279fc8ca11f3f54017d04751b", size = 4424394, upload-time = "2025-04-25T17:55:40.498Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/e7/e1/f4474a7ecdb7745a820f6f6039dc43c66add40f1bcc66485607d93571af6/ipython-8.32.0-py3-none-any.whl", hash = "sha256:cae85b0c61eff1fc48b0a8002de5958b6528fa9c8defb1894da63f42613708aa", size = 825524 }, + { url = "https://files.pythonhosted.org/packages/78/ce/5e897ee51b7d26ab4e47e5105e7368d40ce6cfae2367acdf3165396d50be/ipython-9.2.0-py3-none-any.whl", hash = "sha256:fef5e33c4a1ae0759e0bba5917c9db4eb8c53fee917b6a526bd973e1ca5159f6", size = 604277, upload-time = "2025-04-25T17:55:37.625Z" }, +] + +[[package]] +name = "ipython-pygments-lexers" +version = "1.1.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "pygments" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/ef/4c/5dd1d8af08107f88c7f741ead7a40854b8ac24ddf9ae850afbcf698aa552/ipython_pygments_lexers-1.1.1.tar.gz", hash = "sha256:09c0138009e56b6854f9535736f4171d855c8c08a563a0dcd8022f78355c7e81", size = 8393, upload-time = "2025-01-17T11:24:34.505Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/d9/33/1f075bf72b0b747cb3288d011319aaf64083cf2efef8354174e3ed4540e2/ipython_pygments_lexers-1.1.1-py3-none-any.whl", hash = "sha256:a9462224a505ade19a605f71f8fa63c2048833ce50abc86768a0d81d876dc81c", size = 8074, upload-time = "2025-01-17T11:24:33.271Z" }, ] [[package]] @@ -271,9 +289,9 @@ source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "parso" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/72/3a/79a912fbd4d8dd6fbb02bf69afd3bb72cf0c729bb3063c6f4498603db17a/jedi-0.19.2.tar.gz", hash = "sha256:4770dc3de41bde3966b02eb84fbcf557fb33cce26ad23da12c742fb50ecb11f0", size = 1231287 } +sdist = { url = "https://files.pythonhosted.org/packages/72/3a/79a912fbd4d8dd6fbb02bf69afd3bb72cf0c729bb3063c6f4498603db17a/jedi-0.19.2.tar.gz", hash = "sha256:4770dc3de41bde3966b02eb84fbcf557fb33cce26ad23da12c742fb50ecb11f0", size = 1231287, upload-time = "2024-11-11T01:41:42.873Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/c0/5a/9cac0c82afec3d09ccd97c8b6502d48f165f9124db81b4bcb90b4af974ee/jedi-0.19.2-py2.py3-none-any.whl", hash = "sha256:a8ef22bde8490f57fe5c7681a3c83cb58874daf72b4784de3cce5b6ef6edb5b9", size = 1572278 }, + { url = "https://files.pythonhosted.org/packages/c0/5a/9cac0c82afec3d09ccd97c8b6502d48f165f9124db81b4bcb90b4af974ee/jedi-0.19.2-py2.py3-none-any.whl", hash = "sha256:a8ef22bde8490f57fe5c7681a3c83cb58874daf72b4784de3cce5b6ef6edb5b9", size = 1572278, upload-time = "2024-11-11T01:41:40.175Z" }, ] [[package]] @@ -283,43 +301,43 @@ source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "traitlets" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/99/5b/a36a337438a14116b16480db471ad061c36c3694df7c2084a0da7ba538b7/matplotlib_inline-0.1.7.tar.gz", hash = "sha256:8423b23ec666be3d16e16b60bdd8ac4e86e840ebd1dd11a30b9f117f2fa0ab90", size = 8159 } +sdist = { url = "https://files.pythonhosted.org/packages/99/5b/a36a337438a14116b16480db471ad061c36c3694df7c2084a0da7ba538b7/matplotlib_inline-0.1.7.tar.gz", hash = "sha256:8423b23ec666be3d16e16b60bdd8ac4e86e840ebd1dd11a30b9f117f2fa0ab90", size = 8159, upload-time = "2024-04-15T13:44:44.803Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/8f/8e/9ad090d3553c280a8060fbf6e24dc1c0c29704ee7d1c372f0c174aa59285/matplotlib_inline-0.1.7-py3-none-any.whl", hash = "sha256:df192d39a4ff8f21b1895d72e6a13f5fcc5099f00fa84384e0ea28c2cc0653ca", size = 9899 }, + { url = "https://files.pythonhosted.org/packages/8f/8e/9ad090d3553c280a8060fbf6e24dc1c0c29704ee7d1c372f0c174aa59285/matplotlib_inline-0.1.7-py3-none-any.whl", hash = "sha256:df192d39a4ff8f21b1895d72e6a13f5fcc5099f00fa84384e0ea28c2cc0653ca", size = 9899, upload-time = "2024-04-15T13:44:43.265Z" }, ] [[package]] name = "nodejs-wheel-binaries" -version = "22.13.1" +version = "22.16.0" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/5d/c5/1af2fc54fcc18f4a99426b46f18832a04f755ee340019e1be536187c1e1c/nodejs_wheel_binaries-22.13.1.tar.gz", hash = "sha256:a0c15213c9c3383541be4400a30959883868ce5da9cebb3d63ddc7fe61459308", size = 8053 } +sdist = { url = "https://files.pythonhosted.org/packages/0f/c6/66f36b7b0d528660dfb4a59cb9b8dd6a3f4c0a3939cd49c404a775ea4a63/nodejs_wheel_binaries-22.16.0.tar.gz", hash = "sha256:d695832f026df3a0cf9a089d222225939de9d1b67f8f0a353b79f015aabbe7e2", size = 8061, upload-time = "2025-05-22T07:27:52.149Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/7c/e9/b0dd118e0fd4eabe1ec9c3d9a68df4d811282e8837b811d804f23742e117/nodejs_wheel_binaries-22.13.1-py2.py3-none-macosx_11_0_arm64.whl", hash = "sha256:e4f64d0e26600d51cbdd98a6718a19c2d1b8c7538e9e353e95a634a06a8e1a58", size = 51015650 }, - { url = "https://files.pythonhosted.org/packages/cc/a6/9ba835f5d4f3f6b1f01191e7ac0874871f9743de5c42a5a9a54e67c2e2a6/nodejs_wheel_binaries-22.13.1-py2.py3-none-macosx_11_0_x86_64.whl", hash = "sha256:afcb40484bb02f23137f838014724604ae183fd767b30da95b0be1510a40c06d", size = 51814957 }, - { url = "https://files.pythonhosted.org/packages/0d/2e/a430207e5f22bd3dcffb81acbddf57ee4108b9e2b0f99a5578dc2c1ff7fc/nodejs_wheel_binaries-22.13.1-py2.py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4fc88c98eebabfc36b5270a4ab974a2682746931567ca76a5ca49c54482bbb51", size = 57148437 }, - { url = "https://files.pythonhosted.org/packages/97/f4/5731b6f0c8af434619b4f1b8fd895bc33fca60168cd68133e52841872114/nodejs_wheel_binaries-22.13.1-py2.py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8b9f75ea8f5e3e5416256fcb00a98cbe14c8d3b6dcaf17da29c4ade5723026d8", size = 57634451 }, - { url = "https://files.pythonhosted.org/packages/49/28/83166f7e39812e9ef99cfa3e722c54e32dd9de6a1290f3216c2e5d1f4957/nodejs_wheel_binaries-22.13.1-py2.py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:94608702ef6c389d32e89ff3b7a925cb5dedaf55b5d98bd0c4fb3450a8b6d1c1", size = 58794510 }, - { url = "https://files.pythonhosted.org/packages/f7/64/4832ec26d0a7ca7a5574df265d85c6832f9a624024511fc34958227ad740/nodejs_wheel_binaries-22.13.1-py2.py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:53a40d0269689aa2eaf2e261cbe5ec256644bc56aae0201ef344b7d8f40ccc79", size = 59738596 }, - { url = "https://files.pythonhosted.org/packages/18/cd/def29615dac250cda3d141e1c03b7153b9a027360bde0272a6768c5fae33/nodejs_wheel_binaries-22.13.1-py2.py3-none-win_amd64.whl", hash = "sha256:549371a929a29fbce8d0ab8f1b5410549946d4f1b0376a5ce635b45f6d05298f", size = 40455444 }, - { url = "https://files.pythonhosted.org/packages/15/d7/6de2bc615203bf590ca437a5cac145b2f86d994ce329489125a0a90ba715/nodejs_wheel_binaries-22.13.1-py2.py3-none-win_arm64.whl", hash = "sha256:cf72d50d755f4e5c0709b0449de01768d96b3b1ec7aa531561415b88f179ad8b", size = 36200929 }, + { url = "https://files.pythonhosted.org/packages/d7/dc/417a5c5f99e53a5d2b3be122506312731eb90fb9630c248e327e2e38cc6b/nodejs_wheel_binaries-22.16.0-py2.py3-none-macosx_11_0_arm64.whl", hash = "sha256:986b715a96ed703f8ce0c15712f76fc42895cf09067d72b6ef29e8b334eccf64", size = 50957501, upload-time = "2025-05-22T07:27:20.132Z" }, + { url = "https://files.pythonhosted.org/packages/0e/dd/d6ce48209ed15f5d1fccb29eeaa111f962557123eaf4fd03a7316c42734c/nodejs_wheel_binaries-22.16.0-py2.py3-none-macosx_11_0_x86_64.whl", hash = "sha256:4ae3cf22138891cb44c3ee952862a257ce082b098b29024d7175684a9a77b0c0", size = 51891634, upload-time = "2025-05-22T07:27:24.029Z" }, + { url = "https://files.pythonhosted.org/packages/80/fa/a07e622fd87717eec3e5cff41575f85ad62717e8698884d28ca809266ca1/nodejs_wheel_binaries-22.16.0-py2.py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:71f2de4dc0b64ae43e146897ce811f80ac4f9acfbae6ccf814226282bf4ef174", size = 57857862, upload-time = "2025-05-22T07:27:27.933Z" }, + { url = "https://files.pythonhosted.org/packages/1f/80/52736f9570a93f8e6b7942981dc9770eca2bc7aa1d200c1d54198374a6ca/nodejs_wheel_binaries-22.16.0-py2.py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dbfccbcd558d2f142ccf66d8c3a098022bf4436db9525b5b8d32169ce185d99e", size = 58395868, upload-time = "2025-05-22T07:27:32.088Z" }, + { url = "https://files.pythonhosted.org/packages/0f/0e/53616a5ed8fc1fbe9e48bf132862da5a9abf5cc7f8483dab1722ec257187/nodejs_wheel_binaries-22.16.0-py2.py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:447ad796850eb52ca20356ad39b2d296ed8fef3f214921f84a1ccdad49f2eba1", size = 59712469, upload-time = "2025-05-22T07:27:37.193Z" }, + { url = "https://files.pythonhosted.org/packages/4a/cd/e2b5083df581fc1d08eb93feb6f8fbd3d56b113cef9b59d8e0fb7d4dd4f3/nodejs_wheel_binaries-22.16.0-py2.py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:7f526ca6a132b0caf633566a2a78c6985fe92857e7bfdb37380f76205a10b808", size = 60763005, upload-time = "2025-05-22T07:27:41.39Z" }, + { url = "https://files.pythonhosted.org/packages/71/8d/57112b49214e8bd636f3cc3386eba6be4d23552ec8a0f6efbe814013caa7/nodejs_wheel_binaries-22.16.0-py2.py3-none-win_amd64.whl", hash = "sha256:2fffb4bf1066fb5f660da20819d754f1b424bca1b234ba0f4fa901c52e3975fb", size = 41313324, upload-time = "2025-05-22T07:27:45.293Z" }, + { url = "https://files.pythonhosted.org/packages/91/03/a852711aec73dfb965844592dfe226024c0da28e37d1ee54083342e38f57/nodejs_wheel_binaries-22.16.0-py2.py3-none-win_arm64.whl", hash = "sha256:2728972d336d436d39ee45988978d8b5d963509e06f063e80fe41b203ee80b28", size = 38828154, upload-time = "2025-05-22T07:27:48.606Z" }, ] [[package]] name = "packaging" -version = "24.2" +version = "25.0" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/d0/63/68dbb6eb2de9cb10ee4c9c14a0148804425e13c4fb20d61cce69f53106da/packaging-24.2.tar.gz", hash = "sha256:c228a6dc5e932d346bc5739379109d49e8853dd8223571c7c5b55260edc0b97f", size = 163950 } +sdist = { url = "https://files.pythonhosted.org/packages/a1/d4/1fc4078c65507b51b96ca8f8c3ba19e6a61c8253c72794544580a7b6c24d/packaging-25.0.tar.gz", hash = "sha256:d443872c98d677bf60f6a1f2f8c1cb748e8fe762d2bf9d3148b5599295b0fc4f", size = 165727, upload-time = "2025-04-19T11:48:59.673Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/88/ef/eb23f262cca3c0c4eb7ab1933c3b1f03d021f2c48f54763065b6f0e321be/packaging-24.2-py3-none-any.whl", hash = "sha256:09abb1bccd265c01f4a3aa3f7a7db064b36514d2cba19a2f694fe6150451a759", size = 65451 }, + { url = "https://files.pythonhosted.org/packages/20/12/38679034af332785aac8774540895e234f4d07f7545804097de4b666afd8/packaging-25.0-py3-none-any.whl", hash = "sha256:29572ef2b1f17581046b3a2227d5c611fb25ec70ca1ba8554b24b0e69331a484", size = 66469, upload-time = "2025-04-19T11:48:57.875Z" }, ] [[package]] name = "parso" version = "0.8.4" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/66/94/68e2e17afaa9169cf6412ab0f28623903be73d1b32e208d9e8e541bb086d/parso-0.8.4.tar.gz", hash = "sha256:eb3a7b58240fb99099a345571deecc0f9540ea5f4dd2fe14c2a99d6b281ab92d", size = 400609 } +sdist = { url = "https://files.pythonhosted.org/packages/66/94/68e2e17afaa9169cf6412ab0f28623903be73d1b32e208d9e8e541bb086d/parso-0.8.4.tar.gz", hash = "sha256:eb3a7b58240fb99099a345571deecc0f9540ea5f4dd2fe14c2a99d6b281ab92d", size = 400609, upload-time = "2024-04-05T09:43:55.897Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/c6/ac/dac4a63f978e4dcb3c6d3a78c4d8e0192a113d288502a1216950c41b1027/parso-0.8.4-py2.py3-none-any.whl", hash = "sha256:a418670a20291dacd2dddc80c377c5c3791378ee1e8d12bffc35420643d43f18", size = 103650 }, + { url = "https://files.pythonhosted.org/packages/c6/ac/dac4a63f978e4dcb3c6d3a78c4d8e0192a113d288502a1216950c41b1027/parso-0.8.4-py2.py3-none-any.whl", hash = "sha256:a418670a20291dacd2dddc80c377c5c3791378ee1e8d12bffc35420643d43f18", size = 103650, upload-time = "2024-04-05T09:43:53.299Z" }, ] [[package]] @@ -329,86 +347,86 @@ source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "ptyprocess" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/42/92/cc564bf6381ff43ce1f4d06852fc19a2f11d180f23dc32d9588bee2f149d/pexpect-4.9.0.tar.gz", hash = "sha256:ee7d41123f3c9911050ea2c2dac107568dc43b2d3b0c7557a33212c398ead30f", size = 166450 } +sdist = { url = "https://files.pythonhosted.org/packages/42/92/cc564bf6381ff43ce1f4d06852fc19a2f11d180f23dc32d9588bee2f149d/pexpect-4.9.0.tar.gz", hash = "sha256:ee7d41123f3c9911050ea2c2dac107568dc43b2d3b0c7557a33212c398ead30f", size = 166450, upload-time = "2023-11-25T09:07:26.339Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/9e/c3/059298687310d527a58bb01f3b1965787ee3b40dce76752eda8b44e9a2c5/pexpect-4.9.0-py2.py3-none-any.whl", hash = "sha256:7236d1e080e4936be2dc3e326cec0af72acf9212a7e1d060210e70a47e253523", size = 63772 }, + { url = "https://files.pythonhosted.org/packages/9e/c3/059298687310d527a58bb01f3b1965787ee3b40dce76752eda8b44e9a2c5/pexpect-4.9.0-py2.py3-none-any.whl", hash = "sha256:7236d1e080e4936be2dc3e326cec0af72acf9212a7e1d060210e70a47e253523", size = 63772, upload-time = "2023-11-25T06:56:14.81Z" }, ] [[package]] name = "pluggy" -version = "1.5.0" +version = "1.6.0" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/96/2d/02d4312c973c6050a18b314a5ad0b3210edb65a906f868e31c111dede4a6/pluggy-1.5.0.tar.gz", hash = "sha256:2cffa88e94fdc978c4c574f15f9e59b7f4201d439195c3715ca9e2486f1d0cf1", size = 67955 } +sdist = { url = "https://files.pythonhosted.org/packages/f9/e2/3e91f31a7d2b083fe6ef3fa267035b518369d9511ffab804f839851d2779/pluggy-1.6.0.tar.gz", hash = "sha256:7dcc130b76258d33b90f61b658791dede3486c3e6bfb003ee5c9bfb396dd22f3", size = 69412, upload-time = "2025-05-15T12:30:07.975Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/88/5f/e351af9a41f866ac3f1fac4ca0613908d9a41741cfcf2228f4ad853b697d/pluggy-1.5.0-py3-none-any.whl", hash = "sha256:44e1ad92c8ca002de6377e165f3e0f1be63266ab4d554740532335b9d75ea669", size = 20556 }, + { url = "https://files.pythonhosted.org/packages/54/20/4d324d65cc6d9205fabedc306948156824eb9f0ee1633355a8f7ec5c66bf/pluggy-1.6.0-py3-none-any.whl", hash = "sha256:e920276dd6813095e9377c0bc5566d94c932c33b27a3e3945d8389c374dd4746", size = 20538, upload-time = "2025-05-15T12:30:06.134Z" }, ] [[package]] name = "prompt-toolkit" -version = "3.0.50" +version = "3.0.51" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "wcwidth" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/a1/e1/bd15cb8ffdcfeeb2bdc215de3c3cffca11408d829e4b8416dcfe71ba8854/prompt_toolkit-3.0.50.tar.gz", hash = "sha256:544748f3860a2623ca5cd6d2795e7a14f3d0e1c3c9728359013f79877fc89bab", size = 429087 } +sdist = { url = "https://files.pythonhosted.org/packages/bb/6e/9d084c929dfe9e3bfe0c6a47e31f78a25c54627d64a66e884a8bf5474f1c/prompt_toolkit-3.0.51.tar.gz", hash = "sha256:931a162e3b27fc90c86f1b48bb1fb2c528c2761475e57c9c06de13311c7b54ed", size = 428940, upload-time = "2025-04-15T09:18:47.731Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/e4/ea/d836f008d33151c7a1f62caf3d8dd782e4d15f6a43897f64480c2b8de2ad/prompt_toolkit-3.0.50-py3-none-any.whl", hash = "sha256:9b6427eb19e479d98acff65196a307c555eb567989e6d88ebbb1b509d9779198", size = 387816 }, + { url = "https://files.pythonhosted.org/packages/ce/4f/5249960887b1fbe561d9ff265496d170b55a735b76724f10ef19f9e40716/prompt_toolkit-3.0.51-py3-none-any.whl", hash = "sha256:52742911fde84e2d423e2f9a4cf1de7d7ac4e51958f648d9540e0fb8db077b07", size = 387810, upload-time = "2025-04-15T09:18:44.753Z" }, ] [[package]] name = "psutil" -version = "6.1.1" +version = "7.0.0" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/1f/5a/07871137bb752428aa4b659f910b399ba6f291156bdea939be3e96cae7cb/psutil-6.1.1.tar.gz", hash = "sha256:cf8496728c18f2d0b45198f06895be52f36611711746b7f30c464b422b50e2f5", size = 508502 } +sdist = { url = "https://files.pythonhosted.org/packages/2a/80/336820c1ad9286a4ded7e845b2eccfcb27851ab8ac6abece774a6ff4d3de/psutil-7.0.0.tar.gz", hash = "sha256:7be9c3eba38beccb6495ea33afd982a44074b78f28c434a1f51cc07fd315c456", size = 497003, upload-time = "2025-02-13T21:54:07.946Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/61/99/ca79d302be46f7bdd8321089762dd4476ee725fce16fc2b2e1dbba8cac17/psutil-6.1.1-cp36-abi3-macosx_10_9_x86_64.whl", hash = "sha256:fc0ed7fe2231a444fc219b9c42d0376e0a9a1a72f16c5cfa0f68d19f1a0663e8", size = 247511 }, - { url = "https://files.pythonhosted.org/packages/0b/6b/73dbde0dd38f3782905d4587049b9be64d76671042fdcaf60e2430c6796d/psutil-6.1.1-cp36-abi3-macosx_11_0_arm64.whl", hash = "sha256:0bdd4eab935276290ad3cb718e9809412895ca6b5b334f5a9111ee6d9aff9377", size = 248985 }, - { url = "https://files.pythonhosted.org/packages/17/38/c319d31a1d3f88c5b79c68b3116c129e5133f1822157dd6da34043e32ed6/psutil-6.1.1-cp36-abi3-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b6e06c20c05fe95a3d7302d74e7097756d4ba1247975ad6905441ae1b5b66003", size = 284488 }, - { url = "https://files.pythonhosted.org/packages/9c/39/0f88a830a1c8a3aba27fededc642da37613c57cbff143412e3536f89784f/psutil-6.1.1-cp36-abi3-manylinux_2_12_x86_64.manylinux2010_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:97f7cb9921fbec4904f522d972f0c0e1f4fabbdd4e0287813b21215074a0f160", size = 287477 }, - { url = "https://files.pythonhosted.org/packages/47/da/99f4345d4ddf2845cb5b5bd0d93d554e84542d116934fde07a0c50bd4e9f/psutil-6.1.1-cp36-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:33431e84fee02bc84ea36d9e2c4a6d395d479c9dd9bba2376c1f6ee8f3a4e0b3", size = 289017 }, - { url = "https://files.pythonhosted.org/packages/38/53/bd755c2896f4461fd4f36fa6a6dcb66a88a9e4b9fd4e5b66a77cf9d4a584/psutil-6.1.1-cp37-abi3-win32.whl", hash = "sha256:eaa912e0b11848c4d9279a93d7e2783df352b082f40111e078388701fd479e53", size = 250602 }, - { url = "https://files.pythonhosted.org/packages/7b/d7/7831438e6c3ebbfa6e01a927127a6cb42ad3ab844247f3c5b96bea25d73d/psutil-6.1.1-cp37-abi3-win_amd64.whl", hash = "sha256:f35cfccb065fff93529d2afb4a2e89e363fe63ca1e4a5da22b603a85833c2649", size = 254444 }, + { url = "https://files.pythonhosted.org/packages/ed/e6/2d26234410f8b8abdbf891c9da62bee396583f713fb9f3325a4760875d22/psutil-7.0.0-cp36-abi3-macosx_10_9_x86_64.whl", hash = "sha256:101d71dc322e3cffd7cea0650b09b3d08b8e7c4109dd6809fe452dfd00e58b25", size = 238051, upload-time = "2025-02-13T21:54:12.36Z" }, + { url = "https://files.pythonhosted.org/packages/04/8b/30f930733afe425e3cbfc0e1468a30a18942350c1a8816acfade80c005c4/psutil-7.0.0-cp36-abi3-macosx_11_0_arm64.whl", hash = "sha256:39db632f6bb862eeccf56660871433e111b6ea58f2caea825571951d4b6aa3da", size = 239535, upload-time = "2025-02-13T21:54:16.07Z" }, + { url = "https://files.pythonhosted.org/packages/2a/ed/d362e84620dd22876b55389248e522338ed1bf134a5edd3b8231d7207f6d/psutil-7.0.0-cp36-abi3-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1fcee592b4c6f146991ca55919ea3d1f8926497a713ed7faaf8225e174581e91", size = 275004, upload-time = "2025-02-13T21:54:18.662Z" }, + { url = "https://files.pythonhosted.org/packages/bf/b9/b0eb3f3cbcb734d930fdf839431606844a825b23eaf9a6ab371edac8162c/psutil-7.0.0-cp36-abi3-manylinux_2_12_x86_64.manylinux2010_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4b1388a4f6875d7e2aff5c4ca1cc16c545ed41dd8bb596cefea80111db353a34", size = 277986, upload-time = "2025-02-13T21:54:21.811Z" }, + { url = "https://files.pythonhosted.org/packages/eb/a2/709e0fe2f093556c17fbafda93ac032257242cabcc7ff3369e2cb76a97aa/psutil-7.0.0-cp36-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a5f098451abc2828f7dc6b58d44b532b22f2088f4999a937557b603ce72b1993", size = 279544, upload-time = "2025-02-13T21:54:24.68Z" }, + { url = "https://files.pythonhosted.org/packages/50/e6/eecf58810b9d12e6427369784efe814a1eec0f492084ce8eb8f4d89d6d61/psutil-7.0.0-cp37-abi3-win32.whl", hash = "sha256:ba3fcef7523064a6c9da440fc4d6bd07da93ac726b5733c29027d7dc95b39d99", size = 241053, upload-time = "2025-02-13T21:54:34.31Z" }, + { url = "https://files.pythonhosted.org/packages/50/1b/6921afe68c74868b4c9fa424dad3be35b095e16687989ebbb50ce4fceb7c/psutil-7.0.0-cp37-abi3-win_amd64.whl", hash = "sha256:4cf3d4eb1aa9b348dec30105c55cd9b7d4629285735a102beb4441e38db90553", size = 244885, upload-time = "2025-02-13T21:54:37.486Z" }, ] [[package]] name = "ptyprocess" version = "0.7.0" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/20/e5/16ff212c1e452235a90aeb09066144d0c5a6a8c0834397e03f5224495c4e/ptyprocess-0.7.0.tar.gz", hash = "sha256:5c5d0a3b48ceee0b48485e0c26037c0acd7d29765ca3fbb5cb3831d347423220", size = 70762 } +sdist = { url = "https://files.pythonhosted.org/packages/20/e5/16ff212c1e452235a90aeb09066144d0c5a6a8c0834397e03f5224495c4e/ptyprocess-0.7.0.tar.gz", hash = "sha256:5c5d0a3b48ceee0b48485e0c26037c0acd7d29765ca3fbb5cb3831d347423220", size = 70762, upload-time = "2020-12-28T15:15:30.155Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/22/a6/858897256d0deac81a172289110f31629fc4cee19b6f01283303e18c8db3/ptyprocess-0.7.0-py2.py3-none-any.whl", hash = "sha256:4b41f3967fce3af57cc7e94b888626c18bf37a083e3651ca8feeb66d492fef35", size = 13993 }, + { url = "https://files.pythonhosted.org/packages/22/a6/858897256d0deac81a172289110f31629fc4cee19b6f01283303e18c8db3/ptyprocess-0.7.0-py2.py3-none-any.whl", hash = "sha256:4b41f3967fce3af57cc7e94b888626c18bf37a083e3651ca8feeb66d492fef35", size = 13993, upload-time = "2020-12-28T15:15:28.35Z" }, ] [[package]] name = "pure-eval" version = "0.2.3" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/cd/05/0a34433a064256a578f1783a10da6df098ceaa4a57bbeaa96a6c0352786b/pure_eval-0.2.3.tar.gz", hash = "sha256:5f4e983f40564c576c7c8635ae88db5956bb2229d7e9237d03b3c0b0190eaf42", size = 19752 } +sdist = { url = "https://files.pythonhosted.org/packages/cd/05/0a34433a064256a578f1783a10da6df098ceaa4a57bbeaa96a6c0352786b/pure_eval-0.2.3.tar.gz", hash = "sha256:5f4e983f40564c576c7c8635ae88db5956bb2229d7e9237d03b3c0b0190eaf42", size = 19752, upload-time = "2024-07-21T12:58:21.801Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/8e/37/efad0257dc6e593a18957422533ff0f87ede7c9c6ea010a2177d738fb82f/pure_eval-0.2.3-py3-none-any.whl", hash = "sha256:1db8e35b67b3d218d818ae653e27f06c3aa420901fa7b081ca98cbedc874e0d0", size = 11842 }, + { url = "https://files.pythonhosted.org/packages/8e/37/efad0257dc6e593a18957422533ff0f87ede7c9c6ea010a2177d738fb82f/pure_eval-0.2.3-py3-none-any.whl", hash = "sha256:1db8e35b67b3d218d818ae653e27f06c3aa420901fa7b081ca98cbedc874e0d0", size = 11842, upload-time = "2024-07-21T12:58:20.04Z" }, ] [[package]] name = "pycparser" version = "2.22" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/1d/b2/31537cf4b1ca988837256c910a668b553fceb8f069bedc4b1c826024b52c/pycparser-2.22.tar.gz", hash = "sha256:491c8be9c040f5390f5bf44a5b07752bd07f56edf992381b05c701439eec10f6", size = 172736 } +sdist = { url = "https://files.pythonhosted.org/packages/1d/b2/31537cf4b1ca988837256c910a668b553fceb8f069bedc4b1c826024b52c/pycparser-2.22.tar.gz", hash = "sha256:491c8be9c040f5390f5bf44a5b07752bd07f56edf992381b05c701439eec10f6", size = 172736, upload-time = "2024-03-30T13:22:22.564Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/13/a3/a812df4e2dd5696d1f351d58b8fe16a405b234ad2886a0dab9183fb78109/pycparser-2.22-py3-none-any.whl", hash = "sha256:c3702b6d3dd8c7abc1afa565d7e63d53a1d0bd86cdc24edd75470f4de499cfcc", size = 117552 }, + { url = "https://files.pythonhosted.org/packages/13/a3/a812df4e2dd5696d1f351d58b8fe16a405b234ad2886a0dab9183fb78109/pycparser-2.22-py3-none-any.whl", hash = "sha256:c3702b6d3dd8c7abc1afa565d7e63d53a1d0bd86cdc24edd75470f4de499cfcc", size = 117552, upload-time = "2024-03-30T13:22:20.476Z" }, ] [[package]] name = "pygments" version = "2.19.1" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/7c/2d/c3338d48ea6cc0feb8446d8e6937e1408088a72a39937982cc6111d17f84/pygments-2.19.1.tar.gz", hash = "sha256:61c16d2a8576dc0649d9f39e089b5f02bcd27fba10d8fb4dcc28173f7a45151f", size = 4968581 } +sdist = { url = "https://files.pythonhosted.org/packages/7c/2d/c3338d48ea6cc0feb8446d8e6937e1408088a72a39937982cc6111d17f84/pygments-2.19.1.tar.gz", hash = "sha256:61c16d2a8576dc0649d9f39e089b5f02bcd27fba10d8fb4dcc28173f7a45151f", size = 4968581, upload-time = "2025-01-06T17:26:30.443Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/8a/0b/9fcc47d19c48b59121088dd6da2488a49d5f72dacf8262e2790a1d2c7d15/pygments-2.19.1-py3-none-any.whl", hash = "sha256:9ea1544ad55cecf4b8242fab6dd35a93bbce657034b0611ee383099054ab6d8c", size = 1225293 }, + { url = "https://files.pythonhosted.org/packages/8a/0b/9fcc47d19c48b59121088dd6da2488a49d5f72dacf8262e2790a1d2c7d15/pygments-2.19.1-py3-none-any.whl", hash = "sha256:9ea1544ad55cecf4b8242fab6dd35a93bbce657034b0611ee383099054ab6d8c", size = 1225293, upload-time = "2025-01-06T17:26:25.553Z" }, ] [[package]] name = "pytest" -version = "8.3.4" +version = "8.3.5" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "colorama", marker = "sys_platform == 'win32'" }, @@ -416,15 +434,15 @@ dependencies = [ { name = "packaging" }, { name = "pluggy" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/05/35/30e0d83068951d90a01852cb1cef56e5d8a09d20c7f511634cc2f7e0372a/pytest-8.3.4.tar.gz", hash = "sha256:965370d062bce11e73868e0335abac31b4d3de0e82f4007408d242b4f8610761", size = 1445919 } +sdist = { url = "https://files.pythonhosted.org/packages/ae/3c/c9d525a414d506893f0cd8a8d0de7706446213181570cdbd766691164e40/pytest-8.3.5.tar.gz", hash = "sha256:f4efe70cc14e511565ac476b57c279e12a855b11f48f212af1080ef2263d3845", size = 1450891, upload-time = "2025-03-02T12:54:54.503Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/11/92/76a1c94d3afee238333bc0a42b82935dd8f9cf8ce9e336ff87ee14d9e1cf/pytest-8.3.4-py3-none-any.whl", hash = "sha256:50e16d954148559c9a74109af1eaf0c945ba2d8f30f0a3d3335edde19788b6f6", size = 343083 }, + { url = "https://files.pythonhosted.org/packages/30/3d/64ad57c803f1fa1e963a7946b6e0fea4a70df53c1a7fed304586539c2bac/pytest-8.3.5-py3-none-any.whl", hash = "sha256:c69214aa47deac29fad6c2a4f590b9c4a9fdb16a403176fe154b79c0b4d4d820", size = 343634, upload-time = "2025-03-02T12:54:52.069Z" }, ] [[package]] name = "pytest-cs" -version = "0.7.21" -source = { git = "https://github.com/crowdsecurity/pytest-cs#1eb949d7befa6fe172bf459616b267d4ffc01179" } +version = "0.7.23" +source = { git = "https://github.com/crowdsecurity/pytest-cs#0ba706aaa34401a55b4e35b4d4252bc953242570" } dependencies = [ { name = "docker" }, { name = "psutil" }, @@ -438,14 +456,14 @@ dependencies = [ [[package]] name = "pytest-datadir" -version = "1.5.0" +version = "1.6.1" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "pytest" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/aa/97/a93900d82635aa3f419c3cd2059b4de7d7fe44e415eaf00c298854582dcc/pytest-datadir-1.5.0.tar.gz", hash = "sha256:1617ed92f9afda0c877e4eac91904b5f779d24ba8f5e438752e3ae39d8d2ee3f", size = 8821 } +sdist = { url = "https://files.pythonhosted.org/packages/bb/0e/63301415b9233f0131339799d49ce0c0e8804d82f3f12615056a70e563c5/pytest_datadir-1.6.1.tar.gz", hash = "sha256:4d204cf93cfe62ddc37b19922df6c8c0f133c2899c224bd339b24920e84e7fd3", size = 9391, upload-time = "2025-02-07T18:29:55.226Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/7c/90/96b9474cddda5ef9e10e6f1871c0fadfa153b605e0e749ba30437bfb62a0/pytest_datadir-1.5.0-py3-none-any.whl", hash = "sha256:34adf361bcc7b37961bbc1dfa8d25a4829e778bab461703c38a5c50ca9c36dc8", size = 5095 }, + { url = "https://files.pythonhosted.org/packages/55/f1/5e4d95ce96c03332726d7fd87b7b500f178994b638ca6a88b4ed3ca64438/pytest_datadir-1.6.1-py3-none-any.whl", hash = "sha256:aa427f6218d3fc7481129d59c892bd7adfb8822613a2726ffc97f51968879cdb", size = 5156, upload-time = "2025-02-07T18:29:53.788Z" }, ] [[package]] @@ -456,9 +474,9 @@ dependencies = [ { name = "pytest" }, { name = "python-dotenv" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/cd/b0/cafee9c627c1bae228eb07c9977f679b3a7cb111b488307ab9594ba9e4da/pytest-dotenv-0.5.2.tar.gz", hash = "sha256:2dc6c3ac6d8764c71c6d2804e902d0ff810fa19692e95fe138aefc9b1aa73732", size = 3782 } +sdist = { url = "https://files.pythonhosted.org/packages/cd/b0/cafee9c627c1bae228eb07c9977f679b3a7cb111b488307ab9594ba9e4da/pytest-dotenv-0.5.2.tar.gz", hash = "sha256:2dc6c3ac6d8764c71c6d2804e902d0ff810fa19692e95fe138aefc9b1aa73732", size = 3782, upload-time = "2020-06-16T12:38:03.4Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/d0/da/9da67c67b3d0963160e3d2cbc7c38b6fae342670cc8e6d5936644b2cf944/pytest_dotenv-0.5.2-py3-none-any.whl", hash = "sha256:40a2cece120a213898afaa5407673f6bd924b1fa7eafce6bda0e8abffe2f710f", size = 3993 }, + { url = "https://files.pythonhosted.org/packages/d0/da/9da67c67b3d0963160e3d2cbc7c38b6fae342670cc8e6d5936644b2cf944/pytest_dotenv-0.5.2-py3-none-any.whl", hash = "sha256:40a2cece120a213898afaa5407673f6bd924b1fa7eafce6bda0e8abffe2f710f", size = 3993, upload-time = "2020-06-16T12:38:01.139Z" }, ] [[package]] @@ -469,57 +487,57 @@ dependencies = [ { name = "execnet" }, { name = "pytest" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/41/c4/3c310a19bc1f1e9ef50075582652673ef2bfc8cd62afef9585683821902f/pytest_xdist-3.6.1.tar.gz", hash = "sha256:ead156a4db231eec769737f57668ef58a2084a34b2e55c4a8fa20d861107300d", size = 84060 } +sdist = { url = "https://files.pythonhosted.org/packages/41/c4/3c310a19bc1f1e9ef50075582652673ef2bfc8cd62afef9585683821902f/pytest_xdist-3.6.1.tar.gz", hash = "sha256:ead156a4db231eec769737f57668ef58a2084a34b2e55c4a8fa20d861107300d", size = 84060, upload-time = "2024-04-28T19:29:54.414Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/6d/82/1d96bf03ee4c0fdc3c0cbe61470070e659ca78dc0086fb88b66c185e2449/pytest_xdist-3.6.1-py3-none-any.whl", hash = "sha256:9ed4adfb68a016610848639bb7e02c9352d5d9f03d04809919e2dafc3be4cca7", size = 46108 }, + { url = "https://files.pythonhosted.org/packages/6d/82/1d96bf03ee4c0fdc3c0cbe61470070e659ca78dc0086fb88b66c185e2449/pytest_xdist-3.6.1-py3-none-any.whl", hash = "sha256:9ed4adfb68a016610848639bb7e02c9352d5d9f03d04809919e2dafc3be4cca7", size = 46108, upload-time = "2024-04-28T19:29:52.813Z" }, ] [[package]] name = "python-dotenv" -version = "1.0.1" +version = "1.1.0" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/bc/57/e84d88dfe0aec03b7a2d4327012c1627ab5f03652216c63d49846d7a6c58/python-dotenv-1.0.1.tar.gz", hash = "sha256:e324ee90a023d808f1959c46bcbc04446a10ced277783dc6ee09987c37ec10ca", size = 39115 } +sdist = { url = "https://files.pythonhosted.org/packages/88/2c/7bb1416c5620485aa793f2de31d3df393d3686aa8a8506d11e10e13c5baf/python_dotenv-1.1.0.tar.gz", hash = "sha256:41f90bc6f5f177fb41f53e87666db362025010eb28f60a01c9143bfa33a2b2d5", size = 39920, upload-time = "2025-03-25T10:14:56.835Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/6a/3e/b68c118422ec867fa7ab88444e1274aa40681c606d59ac27de5a5588f082/python_dotenv-1.0.1-py3-none-any.whl", hash = "sha256:f7b63ef50f1b690dddf550d03497b66d609393b40b564ed0d674909a68ebf16a", size = 19863 }, + { url = "https://files.pythonhosted.org/packages/1e/18/98a99ad95133c6a6e2005fe89faedf294a748bd5dc803008059409ac9b1e/python_dotenv-1.1.0-py3-none-any.whl", hash = "sha256:d7c01d9e2293916c18baf562d95698754b0dbbb5e74d457c45d4f6561fb9d55d", size = 20256, upload-time = "2025-03-25T10:14:55.034Z" }, ] [[package]] name = "pywin32" -version = "308" +version = "310" source = { registry = "https://pypi.org/simple" } wheels = [ - { url = "https://files.pythonhosted.org/packages/00/7c/d00d6bdd96de4344e06c4afbf218bc86b54436a94c01c71a8701f613aa56/pywin32-308-cp312-cp312-win32.whl", hash = "sha256:587f3e19696f4bf96fde9d8a57cec74a57021ad5f204c9e627e15c33ff568897", size = 5939729 }, - { url = "https://files.pythonhosted.org/packages/21/27/0c8811fbc3ca188f93b5354e7c286eb91f80a53afa4e11007ef661afa746/pywin32-308-cp312-cp312-win_amd64.whl", hash = "sha256:00b3e11ef09ede56c6a43c71f2d31857cf7c54b0ab6e78ac659497abd2834f47", size = 6543015 }, - { url = "https://files.pythonhosted.org/packages/9d/0f/d40f8373608caed2255781a3ad9a51d03a594a1248cd632d6a298daca693/pywin32-308-cp312-cp312-win_arm64.whl", hash = "sha256:9b4de86c8d909aed15b7011182c8cab38c8850de36e6afb1f0db22b8959e3091", size = 7976033 }, - { url = "https://files.pythonhosted.org/packages/a9/a4/aa562d8935e3df5e49c161b427a3a2efad2ed4e9cf81c3de636f1fdddfd0/pywin32-308-cp313-cp313-win32.whl", hash = "sha256:1c44539a37a5b7b21d02ab34e6a4d314e0788f1690d65b48e9b0b89f31abbbed", size = 5938579 }, - { url = "https://files.pythonhosted.org/packages/c7/50/b0efb8bb66210da67a53ab95fd7a98826a97ee21f1d22949863e6d588b22/pywin32-308-cp313-cp313-win_amd64.whl", hash = "sha256:fd380990e792eaf6827fcb7e187b2b4b1cede0585e3d0c9e84201ec27b9905e4", size = 6542056 }, - { url = "https://files.pythonhosted.org/packages/26/df/2b63e3e4f2df0224f8aaf6d131f54fe4e8c96400eb9df563e2aae2e1a1f9/pywin32-308-cp313-cp313-win_arm64.whl", hash = "sha256:ef313c46d4c18dfb82a2431e3051ac8f112ccee1a34f29c263c583c568db63cd", size = 7974986 }, + { url = "https://files.pythonhosted.org/packages/6b/ec/4fdbe47932f671d6e348474ea35ed94227fb5df56a7c30cbbb42cd396ed0/pywin32-310-cp312-cp312-win32.whl", hash = "sha256:8a75a5cc3893e83a108c05d82198880704c44bbaee4d06e442e471d3c9ea4f3d", size = 8796239, upload-time = "2025-03-17T00:55:58.807Z" }, + { url = "https://files.pythonhosted.org/packages/e3/e5/b0627f8bb84e06991bea89ad8153a9e50ace40b2e1195d68e9dff6b03d0f/pywin32-310-cp312-cp312-win_amd64.whl", hash = "sha256:bf5c397c9a9a19a6f62f3fb821fbf36cac08f03770056711f765ec1503972060", size = 9503839, upload-time = "2025-03-17T00:56:00.8Z" }, + { url = "https://files.pythonhosted.org/packages/1f/32/9ccf53748df72301a89713936645a664ec001abd35ecc8578beda593d37d/pywin32-310-cp312-cp312-win_arm64.whl", hash = "sha256:2349cc906eae872d0663d4d6290d13b90621eaf78964bb1578632ff20e152966", size = 8459470, upload-time = "2025-03-17T00:56:02.601Z" }, + { url = "https://files.pythonhosted.org/packages/1c/09/9c1b978ffc4ae53999e89c19c77ba882d9fce476729f23ef55211ea1c034/pywin32-310-cp313-cp313-win32.whl", hash = "sha256:5d241a659c496ada3253cd01cfaa779b048e90ce4b2b38cd44168ad555ce74ab", size = 8794384, upload-time = "2025-03-17T00:56:04.383Z" }, + { url = "https://files.pythonhosted.org/packages/45/3c/b4640f740ffebadd5d34df35fecba0e1cfef8fde9f3e594df91c28ad9b50/pywin32-310-cp313-cp313-win_amd64.whl", hash = "sha256:667827eb3a90208ddbdcc9e860c81bde63a135710e21e4cb3348968e4bd5249e", size = 9503039, upload-time = "2025-03-17T00:56:06.207Z" }, + { url = "https://files.pythonhosted.org/packages/b4/f4/f785020090fb050e7fb6d34b780f2231f302609dc964672f72bfaeb59a28/pywin32-310-cp313-cp313-win_arm64.whl", hash = "sha256:e308f831de771482b7cf692a1f308f8fca701b2d8f9dde6cc440c7da17e47b33", size = 8458152, upload-time = "2025-03-17T00:56:07.819Z" }, ] [[package]] name = "pyyaml" version = "6.0.2" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/54/ed/79a089b6be93607fa5cdaedf301d7dfb23af5f25c398d5ead2525b063e17/pyyaml-6.0.2.tar.gz", hash = "sha256:d584d9ec91ad65861cc08d42e834324ef890a082e591037abe114850ff7bbc3e", size = 130631 } -wheels = [ - { url = "https://files.pythonhosted.org/packages/86/0c/c581167fc46d6d6d7ddcfb8c843a4de25bdd27e4466938109ca68492292c/PyYAML-6.0.2-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:c70c95198c015b85feafc136515252a261a84561b7b1d51e3384e0655ddf25ab", size = 183873 }, - { url = "https://files.pythonhosted.org/packages/a8/0c/38374f5bb272c051e2a69281d71cba6fdb983413e6758b84482905e29a5d/PyYAML-6.0.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:ce826d6ef20b1bc864f0a68340c8b3287705cae2f8b4b1d932177dcc76721725", size = 173302 }, - { url = "https://files.pythonhosted.org/packages/c3/93/9916574aa8c00aa06bbac729972eb1071d002b8e158bd0e83a3b9a20a1f7/PyYAML-6.0.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1f71ea527786de97d1a0cc0eacd1defc0985dcf6b3f17bb77dcfc8c34bec4dc5", size = 739154 }, - { url = "https://files.pythonhosted.org/packages/95/0f/b8938f1cbd09739c6da569d172531567dbcc9789e0029aa070856f123984/PyYAML-6.0.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9b22676e8097e9e22e36d6b7bda33190d0d400f345f23d4065d48f4ca7ae0425", size = 766223 }, - { url = "https://files.pythonhosted.org/packages/b9/2b/614b4752f2e127db5cc206abc23a8c19678e92b23c3db30fc86ab731d3bd/PyYAML-6.0.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:80bab7bfc629882493af4aa31a4cfa43a4c57c83813253626916b8c7ada83476", size = 767542 }, - { url = "https://files.pythonhosted.org/packages/d4/00/dd137d5bcc7efea1836d6264f049359861cf548469d18da90cd8216cf05f/PyYAML-6.0.2-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:0833f8694549e586547b576dcfaba4a6b55b9e96098b36cdc7ebefe667dfed48", size = 731164 }, - { url = "https://files.pythonhosted.org/packages/c9/1f/4f998c900485e5c0ef43838363ba4a9723ac0ad73a9dc42068b12aaba4e4/PyYAML-6.0.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:8b9c7197f7cb2738065c481a0461e50ad02f18c78cd75775628afb4d7137fb3b", size = 756611 }, - { url = "https://files.pythonhosted.org/packages/df/d1/f5a275fdb252768b7a11ec63585bc38d0e87c9e05668a139fea92b80634c/PyYAML-6.0.2-cp312-cp312-win32.whl", hash = "sha256:ef6107725bd54b262d6dedcc2af448a266975032bc85ef0172c5f059da6325b4", size = 140591 }, - { url = "https://files.pythonhosted.org/packages/0c/e8/4f648c598b17c3d06e8753d7d13d57542b30d56e6c2dedf9c331ae56312e/PyYAML-6.0.2-cp312-cp312-win_amd64.whl", hash = "sha256:7e7401d0de89a9a855c839bc697c079a4af81cf878373abd7dc625847d25cbd8", size = 156338 }, - { url = "https://files.pythonhosted.org/packages/ef/e3/3af305b830494fa85d95f6d95ef7fa73f2ee1cc8ef5b495c7c3269fb835f/PyYAML-6.0.2-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:efdca5630322a10774e8e98e1af481aad470dd62c3170801852d752aa7a783ba", size = 181309 }, - { url = "https://files.pythonhosted.org/packages/45/9f/3b1c20a0b7a3200524eb0076cc027a970d320bd3a6592873c85c92a08731/PyYAML-6.0.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:50187695423ffe49e2deacb8cd10510bc361faac997de9efef88badc3bb9e2d1", size = 171679 }, - { url = "https://files.pythonhosted.org/packages/7c/9a/337322f27005c33bcb656c655fa78325b730324c78620e8328ae28b64d0c/PyYAML-6.0.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0ffe8360bab4910ef1b9e87fb812d8bc0a308b0d0eef8c8f44e0254ab3b07133", size = 733428 }, - { url = "https://files.pythonhosted.org/packages/a3/69/864fbe19e6c18ea3cc196cbe5d392175b4cf3d5d0ac1403ec3f2d237ebb5/PyYAML-6.0.2-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:17e311b6c678207928d649faa7cb0d7b4c26a0ba73d41e99c4fff6b6c3276484", size = 763361 }, - { url = "https://files.pythonhosted.org/packages/04/24/b7721e4845c2f162d26f50521b825fb061bc0a5afcf9a386840f23ea19fa/PyYAML-6.0.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:70b189594dbe54f75ab3a1acec5f1e3faa7e8cf2f1e08d9b561cb41b845f69d5", size = 759523 }, - { url = "https://files.pythonhosted.org/packages/2b/b2/e3234f59ba06559c6ff63c4e10baea10e5e7df868092bf9ab40e5b9c56b6/PyYAML-6.0.2-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:41e4e3953a79407c794916fa277a82531dd93aad34e29c2a514c2c0c5fe971cc", size = 726660 }, - { url = "https://files.pythonhosted.org/packages/fe/0f/25911a9f080464c59fab9027482f822b86bf0608957a5fcc6eaac85aa515/PyYAML-6.0.2-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:68ccc6023a3400877818152ad9a1033e3db8625d899c72eacb5a668902e4d652", size = 751597 }, - { url = "https://files.pythonhosted.org/packages/14/0d/e2c3b43bbce3cf6bd97c840b46088a3031085179e596d4929729d8d68270/PyYAML-6.0.2-cp313-cp313-win32.whl", hash = "sha256:bc2fa7c6b47d6bc618dd7fb02ef6fdedb1090ec036abab80d4681424b84c1183", size = 140527 }, - { url = "https://files.pythonhosted.org/packages/fa/de/02b54f42487e3d3c6efb3f89428677074ca7bf43aae402517bc7cca949f3/PyYAML-6.0.2-cp313-cp313-win_amd64.whl", hash = "sha256:8388ee1976c416731879ac16da0aff3f63b286ffdd57cdeb95f3f2e085687563", size = 156446 }, +sdist = { url = "https://files.pythonhosted.org/packages/54/ed/79a089b6be93607fa5cdaedf301d7dfb23af5f25c398d5ead2525b063e17/pyyaml-6.0.2.tar.gz", hash = "sha256:d584d9ec91ad65861cc08d42e834324ef890a082e591037abe114850ff7bbc3e", size = 130631, upload-time = "2024-08-06T20:33:50.674Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/86/0c/c581167fc46d6d6d7ddcfb8c843a4de25bdd27e4466938109ca68492292c/PyYAML-6.0.2-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:c70c95198c015b85feafc136515252a261a84561b7b1d51e3384e0655ddf25ab", size = 183873, upload-time = "2024-08-06T20:32:25.131Z" }, + { url = "https://files.pythonhosted.org/packages/a8/0c/38374f5bb272c051e2a69281d71cba6fdb983413e6758b84482905e29a5d/PyYAML-6.0.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:ce826d6ef20b1bc864f0a68340c8b3287705cae2f8b4b1d932177dcc76721725", size = 173302, upload-time = "2024-08-06T20:32:26.511Z" }, + { url = "https://files.pythonhosted.org/packages/c3/93/9916574aa8c00aa06bbac729972eb1071d002b8e158bd0e83a3b9a20a1f7/PyYAML-6.0.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1f71ea527786de97d1a0cc0eacd1defc0985dcf6b3f17bb77dcfc8c34bec4dc5", size = 739154, upload-time = "2024-08-06T20:32:28.363Z" }, + { url = "https://files.pythonhosted.org/packages/95/0f/b8938f1cbd09739c6da569d172531567dbcc9789e0029aa070856f123984/PyYAML-6.0.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9b22676e8097e9e22e36d6b7bda33190d0d400f345f23d4065d48f4ca7ae0425", size = 766223, upload-time = "2024-08-06T20:32:30.058Z" }, + { url = "https://files.pythonhosted.org/packages/b9/2b/614b4752f2e127db5cc206abc23a8c19678e92b23c3db30fc86ab731d3bd/PyYAML-6.0.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:80bab7bfc629882493af4aa31a4cfa43a4c57c83813253626916b8c7ada83476", size = 767542, upload-time = "2024-08-06T20:32:31.881Z" }, + { url = "https://files.pythonhosted.org/packages/d4/00/dd137d5bcc7efea1836d6264f049359861cf548469d18da90cd8216cf05f/PyYAML-6.0.2-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:0833f8694549e586547b576dcfaba4a6b55b9e96098b36cdc7ebefe667dfed48", size = 731164, upload-time = "2024-08-06T20:32:37.083Z" }, + { url = "https://files.pythonhosted.org/packages/c9/1f/4f998c900485e5c0ef43838363ba4a9723ac0ad73a9dc42068b12aaba4e4/PyYAML-6.0.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:8b9c7197f7cb2738065c481a0461e50ad02f18c78cd75775628afb4d7137fb3b", size = 756611, upload-time = "2024-08-06T20:32:38.898Z" }, + { url = "https://files.pythonhosted.org/packages/df/d1/f5a275fdb252768b7a11ec63585bc38d0e87c9e05668a139fea92b80634c/PyYAML-6.0.2-cp312-cp312-win32.whl", hash = "sha256:ef6107725bd54b262d6dedcc2af448a266975032bc85ef0172c5f059da6325b4", size = 140591, upload-time = "2024-08-06T20:32:40.241Z" }, + { url = "https://files.pythonhosted.org/packages/0c/e8/4f648c598b17c3d06e8753d7d13d57542b30d56e6c2dedf9c331ae56312e/PyYAML-6.0.2-cp312-cp312-win_amd64.whl", hash = "sha256:7e7401d0de89a9a855c839bc697c079a4af81cf878373abd7dc625847d25cbd8", size = 156338, upload-time = "2024-08-06T20:32:41.93Z" }, + { url = "https://files.pythonhosted.org/packages/ef/e3/3af305b830494fa85d95f6d95ef7fa73f2ee1cc8ef5b495c7c3269fb835f/PyYAML-6.0.2-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:efdca5630322a10774e8e98e1af481aad470dd62c3170801852d752aa7a783ba", size = 181309, upload-time = "2024-08-06T20:32:43.4Z" }, + { url = "https://files.pythonhosted.org/packages/45/9f/3b1c20a0b7a3200524eb0076cc027a970d320bd3a6592873c85c92a08731/PyYAML-6.0.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:50187695423ffe49e2deacb8cd10510bc361faac997de9efef88badc3bb9e2d1", size = 171679, upload-time = "2024-08-06T20:32:44.801Z" }, + { url = "https://files.pythonhosted.org/packages/7c/9a/337322f27005c33bcb656c655fa78325b730324c78620e8328ae28b64d0c/PyYAML-6.0.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0ffe8360bab4910ef1b9e87fb812d8bc0a308b0d0eef8c8f44e0254ab3b07133", size = 733428, upload-time = "2024-08-06T20:32:46.432Z" }, + { url = "https://files.pythonhosted.org/packages/a3/69/864fbe19e6c18ea3cc196cbe5d392175b4cf3d5d0ac1403ec3f2d237ebb5/PyYAML-6.0.2-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:17e311b6c678207928d649faa7cb0d7b4c26a0ba73d41e99c4fff6b6c3276484", size = 763361, upload-time = "2024-08-06T20:32:51.188Z" }, + { url = "https://files.pythonhosted.org/packages/04/24/b7721e4845c2f162d26f50521b825fb061bc0a5afcf9a386840f23ea19fa/PyYAML-6.0.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:70b189594dbe54f75ab3a1acec5f1e3faa7e8cf2f1e08d9b561cb41b845f69d5", size = 759523, upload-time = "2024-08-06T20:32:53.019Z" }, + { url = "https://files.pythonhosted.org/packages/2b/b2/e3234f59ba06559c6ff63c4e10baea10e5e7df868092bf9ab40e5b9c56b6/PyYAML-6.0.2-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:41e4e3953a79407c794916fa277a82531dd93aad34e29c2a514c2c0c5fe971cc", size = 726660, upload-time = "2024-08-06T20:32:54.708Z" }, + { url = "https://files.pythonhosted.org/packages/fe/0f/25911a9f080464c59fab9027482f822b86bf0608957a5fcc6eaac85aa515/PyYAML-6.0.2-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:68ccc6023a3400877818152ad9a1033e3db8625d899c72eacb5a668902e4d652", size = 751597, upload-time = "2024-08-06T20:32:56.985Z" }, + { url = "https://files.pythonhosted.org/packages/14/0d/e2c3b43bbce3cf6bd97c840b46088a3031085179e596d4929729d8d68270/PyYAML-6.0.2-cp313-cp313-win32.whl", hash = "sha256:bc2fa7c6b47d6bc618dd7fb02ef6fdedb1090ec036abab80d4681424b84c1183", size = 140527, upload-time = "2024-08-06T20:33:03.001Z" }, + { url = "https://files.pythonhosted.org/packages/fa/de/02b54f42487e3d3c6efb3f89428677074ca7bf43aae402517bc7cca949f3/PyYAML-6.0.2-cp313-cp313-win_amd64.whl", hash = "sha256:8388ee1976c416731879ac16da0aff3f63b286ffdd57cdeb95f3f2e085687563", size = 156446, upload-time = "2024-08-06T20:33:04.33Z" }, ] [[package]] @@ -532,34 +550,34 @@ dependencies = [ { name = "idna" }, { name = "urllib3" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/63/70/2bf7780ad2d390a8d301ad0b550f1581eadbd9a20f896afe06353c2a2913/requests-2.32.3.tar.gz", hash = "sha256:55365417734eb18255590a9ff9eb97e9e1da868d4ccd6402399eaf68af20a760", size = 131218 } +sdist = { url = "https://files.pythonhosted.org/packages/63/70/2bf7780ad2d390a8d301ad0b550f1581eadbd9a20f896afe06353c2a2913/requests-2.32.3.tar.gz", hash = "sha256:55365417734eb18255590a9ff9eb97e9e1da868d4ccd6402399eaf68af20a760", size = 131218, upload-time = "2024-05-29T15:37:49.536Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/f9/9b/335f9764261e915ed497fcdeb11df5dfd6f7bf257d4a6a2a686d80da4d54/requests-2.32.3-py3-none-any.whl", hash = "sha256:70761cfe03c773ceb22aa2f671b4757976145175cdfca038c02654d061d6dcc6", size = 64928 }, + { url = "https://files.pythonhosted.org/packages/f9/9b/335f9764261e915ed497fcdeb11df5dfd6f7bf257d4a6a2a686d80da4d54/requests-2.32.3-py3-none-any.whl", hash = "sha256:70761cfe03c773ceb22aa2f671b4757976145175cdfca038c02654d061d6dcc6", size = 64928, upload-time = "2024-05-29T15:37:47.027Z" }, ] [[package]] name = "ruff" -version = "0.9.4" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/c0/17/529e78f49fc6f8076f50d985edd9a2cf011d1dbadb1cdeacc1d12afc1d26/ruff-0.9.4.tar.gz", hash = "sha256:6907ee3529244bb0ed066683e075f09285b38dd5b4039370df6ff06041ca19e7", size = 3599458 } -wheels = [ - { url = "https://files.pythonhosted.org/packages/b6/f8/3fafb7804d82e0699a122101b5bee5f0d6e17c3a806dcbc527bb7d3f5b7a/ruff-0.9.4-py3-none-linux_armv6l.whl", hash = "sha256:64e73d25b954f71ff100bb70f39f1ee09e880728efb4250c632ceed4e4cdf706", size = 11668400 }, - { url = "https://files.pythonhosted.org/packages/2e/a6/2efa772d335da48a70ab2c6bb41a096c8517ca43c086ea672d51079e3d1f/ruff-0.9.4-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:6ce6743ed64d9afab4fafeaea70d3631b4d4b28b592db21a5c2d1f0ef52934bf", size = 11628395 }, - { url = "https://files.pythonhosted.org/packages/dc/d7/cd822437561082f1c9d7225cc0d0fbb4bad117ad7ac3c41cd5d7f0fa948c/ruff-0.9.4-py3-none-macosx_11_0_arm64.whl", hash = "sha256:54499fb08408e32b57360f6f9de7157a5fec24ad79cb3f42ef2c3f3f728dfe2b", size = 11090052 }, - { url = "https://files.pythonhosted.org/packages/9e/67/3660d58e893d470abb9a13f679223368ff1684a4ef40f254a0157f51b448/ruff-0.9.4-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:37c892540108314a6f01f105040b5106aeb829fa5fb0561d2dcaf71485021137", size = 11882221 }, - { url = "https://files.pythonhosted.org/packages/79/d1/757559995c8ba5f14dfec4459ef2dd3fcea82ac43bc4e7c7bf47484180c0/ruff-0.9.4-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:de9edf2ce4b9ddf43fd93e20ef635a900e25f622f87ed6e3047a664d0e8f810e", size = 11424862 }, - { url = "https://files.pythonhosted.org/packages/c0/96/7915a7c6877bb734caa6a2af424045baf6419f685632469643dbd8eb2958/ruff-0.9.4-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:87c90c32357c74f11deb7fbb065126d91771b207bf9bfaaee01277ca59b574ec", size = 12626735 }, - { url = "https://files.pythonhosted.org/packages/0e/cc/dadb9b35473d7cb17c7ffe4737b4377aeec519a446ee8514123ff4a26091/ruff-0.9.4-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:56acd6c694da3695a7461cc55775f3a409c3815ac467279dfa126061d84b314b", size = 13255976 }, - { url = "https://files.pythonhosted.org/packages/5f/c3/ad2dd59d3cabbc12df308cced780f9c14367f0321e7800ca0fe52849da4c/ruff-0.9.4-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e0c93e7d47ed951b9394cf352d6695b31498e68fd5782d6cbc282425655f687a", size = 12752262 }, - { url = "https://files.pythonhosted.org/packages/c7/17/5f1971e54bd71604da6788efd84d66d789362b1105e17e5ccc53bba0289b/ruff-0.9.4-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:1d4c8772670aecf037d1bf7a07c39106574d143b26cfe5ed1787d2f31e800214", size = 14401648 }, - { url = "https://files.pythonhosted.org/packages/30/24/6200b13ea611b83260501b6955b764bb320e23b2b75884c60ee7d3f0b68e/ruff-0.9.4-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bfc5f1d7afeda8d5d37660eeca6d389b142d7f2b5a1ab659d9214ebd0e025231", size = 12414702 }, - { url = "https://files.pythonhosted.org/packages/34/cb/f5d50d0c4ecdcc7670e348bd0b11878154bc4617f3fdd1e8ad5297c0d0ba/ruff-0.9.4-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:faa935fc00ae854d8b638c16a5f1ce881bc3f67446957dd6f2af440a5fc8526b", size = 11859608 }, - { url = "https://files.pythonhosted.org/packages/d6/f4/9c8499ae8426da48363bbb78d081b817b0f64a9305f9b7f87eab2a8fb2c1/ruff-0.9.4-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:a6c634fc6f5a0ceae1ab3e13c58183978185d131a29c425e4eaa9f40afe1e6d6", size = 11485702 }, - { url = "https://files.pythonhosted.org/packages/18/59/30490e483e804ccaa8147dd78c52e44ff96e1c30b5a95d69a63163cdb15b/ruff-0.9.4-py3-none-musllinux_1_2_i686.whl", hash = "sha256:433dedf6ddfdec7f1ac7575ec1eb9844fa60c4c8c2f8887a070672b8d353d34c", size = 12067782 }, - { url = "https://files.pythonhosted.org/packages/3d/8c/893fa9551760b2f8eb2a351b603e96f15af167ceaf27e27ad873570bc04c/ruff-0.9.4-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:d612dbd0f3a919a8cc1d12037168bfa536862066808960e0cc901404b77968f0", size = 12483087 }, - { url = "https://files.pythonhosted.org/packages/23/15/f6751c07c21ca10e3f4a51ea495ca975ad936d780c347d9808bcedbd7182/ruff-0.9.4-py3-none-win32.whl", hash = "sha256:db1192ddda2200671f9ef61d9597fcef89d934f5d1705e571a93a67fb13a4402", size = 9852302 }, - { url = "https://files.pythonhosted.org/packages/12/41/2d2d2c6a72e62566f730e49254f602dfed23019c33b5b21ea8f8917315a1/ruff-0.9.4-py3-none-win_amd64.whl", hash = "sha256:05bebf4cdbe3ef75430d26c375773978950bbf4ee3c95ccb5448940dc092408e", size = 10850051 }, - { url = "https://files.pythonhosted.org/packages/c6/e6/3d6ec3bc3d254e7f005c543a661a41c3e788976d0e52a1ada195bd664344/ruff-0.9.4-py3-none-win_arm64.whl", hash = "sha256:585792f1e81509e38ac5123492f8875fbc36f3ede8185af0a26df348e5154f41", size = 10078251 }, +version = "0.11.10" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/e8/4c/4a3c5a97faaae6b428b336dcca81d03ad04779f8072c267ad2bd860126bf/ruff-0.11.10.tar.gz", hash = "sha256:d522fb204b4959909ecac47da02830daec102eeb100fb50ea9554818d47a5fa6", size = 4165632, upload-time = "2025-05-15T14:08:56.76Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/2f/9f/596c628f8824a2ce4cd12b0f0b4c0629a62dfffc5d0f742c19a1d71be108/ruff-0.11.10-py3-none-linux_armv6l.whl", hash = "sha256:859a7bfa7bc8888abbea31ef8a2b411714e6a80f0d173c2a82f9041ed6b50f58", size = 10316243, upload-time = "2025-05-15T14:08:12.884Z" }, + { url = "https://files.pythonhosted.org/packages/3c/38/c1e0b77ab58b426f8c332c1d1d3432d9fc9a9ea622806e208220cb133c9e/ruff-0.11.10-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:968220a57e09ea5e4fd48ed1c646419961a0570727c7e069842edd018ee8afed", size = 11083636, upload-time = "2025-05-15T14:08:16.551Z" }, + { url = "https://files.pythonhosted.org/packages/23/41/b75e15961d6047d7fe1b13886e56e8413be8467a4e1be0a07f3b303cd65a/ruff-0.11.10-py3-none-macosx_11_0_arm64.whl", hash = "sha256:1067245bad978e7aa7b22f67113ecc6eb241dca0d9b696144256c3a879663bca", size = 10441624, upload-time = "2025-05-15T14:08:19.032Z" }, + { url = "https://files.pythonhosted.org/packages/b6/2c/e396b6703f131406db1811ea3d746f29d91b41bbd43ad572fea30da1435d/ruff-0.11.10-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f4854fd09c7aed5b1590e996a81aeff0c9ff51378b084eb5a0b9cd9518e6cff2", size = 10624358, upload-time = "2025-05-15T14:08:21.542Z" }, + { url = "https://files.pythonhosted.org/packages/bd/8c/ee6cca8bdaf0f9a3704796022851a33cd37d1340bceaf4f6e991eb164e2e/ruff-0.11.10-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:8b4564e9f99168c0f9195a0fd5fa5928004b33b377137f978055e40008a082c5", size = 10176850, upload-time = "2025-05-15T14:08:23.682Z" }, + { url = "https://files.pythonhosted.org/packages/e9/ce/4e27e131a434321b3b7c66512c3ee7505b446eb1c8a80777c023f7e876e6/ruff-0.11.10-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:5b6a9cc5b62c03cc1fea0044ed8576379dbaf751d5503d718c973d5418483641", size = 11759787, upload-time = "2025-05-15T14:08:25.733Z" }, + { url = "https://files.pythonhosted.org/packages/58/de/1e2e77fc72adc7cf5b5123fd04a59ed329651d3eab9825674a9e640b100b/ruff-0.11.10-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:607ecbb6f03e44c9e0a93aedacb17b4eb4f3563d00e8b474298a201622677947", size = 12430479, upload-time = "2025-05-15T14:08:28.013Z" }, + { url = "https://files.pythonhosted.org/packages/07/ed/af0f2340f33b70d50121628ef175523cc4c37619e98d98748c85764c8d88/ruff-0.11.10-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:7b3a522fa389402cd2137df9ddefe848f727250535c70dafa840badffb56b7a4", size = 11919760, upload-time = "2025-05-15T14:08:30.956Z" }, + { url = "https://files.pythonhosted.org/packages/24/09/d7b3d3226d535cb89234390f418d10e00a157b6c4a06dfbe723e9322cb7d/ruff-0.11.10-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2f071b0deed7e9245d5820dac235cbdd4ef99d7b12ff04c330a241ad3534319f", size = 14041747, upload-time = "2025-05-15T14:08:33.297Z" }, + { url = "https://files.pythonhosted.org/packages/62/b3/a63b4e91850e3f47f78795e6630ee9266cb6963de8f0191600289c2bb8f4/ruff-0.11.10-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4a60e3a0a617eafba1f2e4186d827759d65348fa53708ca547e384db28406a0b", size = 11550657, upload-time = "2025-05-15T14:08:35.639Z" }, + { url = "https://files.pythonhosted.org/packages/46/63/a4f95c241d79402ccdbdb1d823d156c89fbb36ebfc4289dce092e6c0aa8f/ruff-0.11.10-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:da8ec977eaa4b7bf75470fb575bea2cb41a0e07c7ea9d5a0a97d13dbca697bf2", size = 10489671, upload-time = "2025-05-15T14:08:38.437Z" }, + { url = "https://files.pythonhosted.org/packages/6a/9b/c2238bfebf1e473495659c523d50b1685258b6345d5ab0b418ca3f010cd7/ruff-0.11.10-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:ddf8967e08227d1bd95cc0851ef80d2ad9c7c0c5aab1eba31db49cf0a7b99523", size = 10160135, upload-time = "2025-05-15T14:08:41.247Z" }, + { url = "https://files.pythonhosted.org/packages/ba/ef/ba7251dd15206688dbfba7d413c0312e94df3b31b08f5d695580b755a899/ruff-0.11.10-py3-none-musllinux_1_2_i686.whl", hash = "sha256:5a94acf798a82db188f6f36575d80609072b032105d114b0f98661e1679c9125", size = 11170179, upload-time = "2025-05-15T14:08:43.762Z" }, + { url = "https://files.pythonhosted.org/packages/73/9f/5c336717293203ba275dbfa2ea16e49b29a9fd9a0ea8b6febfc17e133577/ruff-0.11.10-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:3afead355f1d16d95630df28d4ba17fb2cb9c8dfac8d21ced14984121f639bad", size = 11626021, upload-time = "2025-05-15T14:08:46.451Z" }, + { url = "https://files.pythonhosted.org/packages/d9/2b/162fa86d2639076667c9aa59196c020dc6d7023ac8f342416c2f5ec4bda0/ruff-0.11.10-py3-none-win32.whl", hash = "sha256:dc061a98d32a97211af7e7f3fa1d4ca2fcf919fb96c28f39551f35fc55bdbc19", size = 10494958, upload-time = "2025-05-15T14:08:49.601Z" }, + { url = "https://files.pythonhosted.org/packages/24/f3/66643d8f32f50a4b0d09a4832b7d919145ee2b944d43e604fbd7c144d175/ruff-0.11.10-py3-none-win_amd64.whl", hash = "sha256:5cc725fbb4d25b0f185cb42df07ab6b76c4489b4bfb740a175f3a59c70e8a224", size = 11650285, upload-time = "2025-05-15T14:08:52.392Z" }, + { url = "https://files.pythonhosted.org/packages/95/3a/2e8704d19f376c799748ff9cb041225c1d59f3e7711bc5596c8cfdc24925/ruff-0.11.10-py3-none-win_arm64.whl", hash = "sha256:ef69637b35fb8b210743926778d0e45e1bffa850a7c61e428c6b971549b5f5d1", size = 10765278, upload-time = "2025-05-15T14:08:54.56Z" }, ] [[package]] @@ -571,18 +589,18 @@ dependencies = [ { name = "executing" }, { name = "pure-eval" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/28/e3/55dcc2cfbc3ca9c29519eb6884dd1415ecb53b0e934862d3559ddcb7e20b/stack_data-0.6.3.tar.gz", hash = "sha256:836a778de4fec4dcd1dcd89ed8abff8a221f58308462e1c4aa2a3cf30148f0b9", size = 44707 } +sdist = { url = "https://files.pythonhosted.org/packages/28/e3/55dcc2cfbc3ca9c29519eb6884dd1415ecb53b0e934862d3559ddcb7e20b/stack_data-0.6.3.tar.gz", hash = "sha256:836a778de4fec4dcd1dcd89ed8abff8a221f58308462e1c4aa2a3cf30148f0b9", size = 44707, upload-time = "2023-09-30T13:58:05.479Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/f1/7b/ce1eafaf1a76852e2ec9b22edecf1daa58175c090266e9f6c64afcd81d91/stack_data-0.6.3-py3-none-any.whl", hash = "sha256:d5558e0c25a4cb0853cddad3d77da9891a08cb85dd9f9f91b9f8cd66e511e695", size = 24521 }, + { url = "https://files.pythonhosted.org/packages/f1/7b/ce1eafaf1a76852e2ec9b22edecf1daa58175c090266e9f6c64afcd81d91/stack_data-0.6.3-py3-none-any.whl", hash = "sha256:d5558e0c25a4cb0853cddad3d77da9891a08cb85dd9f9f91b9f8cd66e511e695", size = 24521, upload-time = "2023-09-30T13:58:03.53Z" }, ] [[package]] name = "traitlets" version = "5.14.3" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/eb/79/72064e6a701c2183016abbbfedaba506d81e30e232a68c9f0d6f6fcd1574/traitlets-5.14.3.tar.gz", hash = "sha256:9ed0579d3502c94b4b3732ac120375cda96f923114522847de4b3bb98b96b6b7", size = 161621 } +sdist = { url = "https://files.pythonhosted.org/packages/eb/79/72064e6a701c2183016abbbfedaba506d81e30e232a68c9f0d6f6fcd1574/traitlets-5.14.3.tar.gz", hash = "sha256:9ed0579d3502c94b4b3732ac120375cda96f923114522847de4b3bb98b96b6b7", size = 161621, upload-time = "2024-04-19T11:11:49.746Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/00/c0/8f5d070730d7836adc9c9b6408dec68c6ced86b304a9b26a14df072a6e8c/traitlets-5.14.3-py3-none-any.whl", hash = "sha256:b74e89e397b1ed28cc831db7aea759ba6640cb3de13090ca145426688ff1ac4f", size = 85359 }, + { url = "https://files.pythonhosted.org/packages/00/c0/8f5d070730d7836adc9c9b6408dec68c6ced86b304a9b26a14df072a6e8c/traitlets-5.14.3-py3-none-any.whl", hash = "sha256:b74e89e397b1ed28cc831db7aea759ba6640cb3de13090ca145426688ff1ac4f", size = 85359, upload-time = "2024-04-19T11:11:46.763Z" }, ] [[package]] @@ -593,25 +611,25 @@ dependencies = [ { name = "cryptography" }, { name = "idna" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/4b/c5/931476f4cf1cd9e736f32651005078061a50dc164a2569fb874e00eb2786/trustme-1.2.1.tar.gz", hash = "sha256:6528ba2bbc7f2db41f33825c8dd13e3e3eb9d334ba0f909713c8c3139f4ae47f", size = 26844 } +sdist = { url = "https://files.pythonhosted.org/packages/4b/c5/931476f4cf1cd9e736f32651005078061a50dc164a2569fb874e00eb2786/trustme-1.2.1.tar.gz", hash = "sha256:6528ba2bbc7f2db41f33825c8dd13e3e3eb9d334ba0f909713c8c3139f4ae47f", size = 26844, upload-time = "2025-01-02T01:55:32.632Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/b5/f3/c34dbabf6da5eda56fe923226769d40e11806952cd7f46655dd06e10f018/trustme-1.2.1-py3-none-any.whl", hash = "sha256:d768e5fc57c86dfc5ec9365102e9b092541cd6954b35d8c1eea01a84f35a762a", size = 16530 }, + { url = "https://files.pythonhosted.org/packages/b5/f3/c34dbabf6da5eda56fe923226769d40e11806952cd7f46655dd06e10f018/trustme-1.2.1-py3-none-any.whl", hash = "sha256:d768e5fc57c86dfc5ec9365102e9b092541cd6954b35d8c1eea01a84f35a762a", size = 16530, upload-time = "2025-01-02T01:55:30.181Z" }, ] [[package]] name = "urllib3" -version = "2.3.0" +version = "2.4.0" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/aa/63/e53da845320b757bf29ef6a9062f5c669fe997973f966045cb019c3f4b66/urllib3-2.3.0.tar.gz", hash = "sha256:f8c5449b3cf0861679ce7e0503c7b44b5ec981bec0d1d3795a07f1ba96f0204d", size = 307268 } +sdist = { url = "https://files.pythonhosted.org/packages/8a/78/16493d9c386d8e60e442a35feac5e00f0913c0f4b7c217c11e8ec2ff53e0/urllib3-2.4.0.tar.gz", hash = "sha256:414bc6535b787febd7567804cc015fee39daab8ad86268f1310a9250697de466", size = 390672, upload-time = "2025-04-10T15:23:39.232Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/c8/19/4ec628951a74043532ca2cf5d97b7b14863931476d117c471e8e2b1eb39f/urllib3-2.3.0-py3-none-any.whl", hash = "sha256:1cee9ad369867bfdbbb48b7dd50374c0967a0bb7710050facf0dd6911440e3df", size = 128369 }, + { url = "https://files.pythonhosted.org/packages/6b/11/cc635220681e93a0183390e26485430ca2c7b5f9d33b15c74c2861cb8091/urllib3-2.4.0-py3-none-any.whl", hash = "sha256:4e16665048960a0900c702d4a66415956a584919c03361cac9f1df5c5dd7e813", size = 128680, upload-time = "2025-04-10T15:23:37.377Z" }, ] [[package]] name = "wcwidth" version = "0.2.13" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/6c/63/53559446a878410fc5a5974feb13d31d78d752eb18aeba59c7fef1af7598/wcwidth-0.2.13.tar.gz", hash = "sha256:72ea0c06399eb286d978fdedb6923a9eb47e1c486ce63e9b4e64fc18303972b5", size = 101301 } +sdist = { url = "https://files.pythonhosted.org/packages/6c/63/53559446a878410fc5a5974feb13d31d78d752eb18aeba59c7fef1af7598/wcwidth-0.2.13.tar.gz", hash = "sha256:72ea0c06399eb286d978fdedb6923a9eb47e1c486ce63e9b4e64fc18303972b5", size = 101301, upload-time = "2024-01-06T02:10:57.829Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/fd/84/fd2ba7aafacbad3c4201d395674fc6348826569da3c0937e75505ead3528/wcwidth-0.2.13-py2.py3-none-any.whl", hash = "sha256:3da69048e4540d84af32131829ff948f1e022c1c6bdb8d6102117aac784f6859", size = 34166 }, + { url = "https://files.pythonhosted.org/packages/fd/84/fd2ba7aafacbad3c4201d395674fc6348826569da3c0937e75505ead3528/wcwidth-0.2.13-py2.py3-none-any.whl", hash = "sha256:3da69048e4540d84af32131829ff948f1e022c1c6bdb8d6102117aac784f6859", size = 34166, upload-time = "2024-01-06T02:10:55.763Z" }, ] diff --git a/pkg/apiclient/auth_service.go b/pkg/apiclient/auth_service.go index 1dfdeb23c5e..736bd56e9a6 100644 --- a/pkg/apiclient/auth_service.go +++ b/pkg/apiclient/auth_service.go @@ -19,7 +19,7 @@ type enrollRequest struct { } func (s *AuthService) UnregisterWatcher(ctx context.Context) (*Response, error) { - u := fmt.Sprintf("%s/watchers", s.client.URLPrefix) + u := fmt.Sprintf("%s/watchers/self", s.client.URLPrefix) req, err := s.client.PrepareRequest(ctx, http.MethodDelete, u, nil) if err != nil { diff --git a/pkg/apiclient/auth_service_test.go b/pkg/apiclient/auth_service_test.go index c49cd0d94c0..67c07d9a125 100644 --- a/pkg/apiclient/auth_service_test.go +++ b/pkg/apiclient/auth_service_test.go @@ -188,7 +188,7 @@ func TestWatcherUnregister(t *testing.T) { defer teardown() // body: models.WatcherRegistrationRequest{MachineID: &config.MachineID, Password: &config.Password} - mux.HandleFunc("/watchers", func(w http.ResponseWriter, r *http.Request) { + mux.HandleFunc("/watchers/self", func(w http.ResponseWriter, r *http.Request) { testMethod(t, r, "DELETE") assert.Equal(t, int64(0), r.ContentLength) w.WriteHeader(http.StatusOK) diff --git a/pkg/apiserver/controllers/controller.go b/pkg/apiserver/controllers/controller.go index c7503c9ef5e..60c101aa67a 100644 --- a/pkg/apiserver/controllers/controller.go +++ b/pkg/apiserver/controllers/controller.go @@ -130,6 +130,7 @@ func (c *Controller) NewV1() error { jwtAuth.GET("/allowlists/check/:ip_or_range", c.HandlerV1.CheckInAllowlist) jwtAuth.HEAD("/allowlists/check/:ip_or_range", c.HandlerV1.CheckInAllowlist) jwtAuth.POST("/allowlists/check", c.HandlerV1.CheckInAllowlistBulk) + jwtAuth.DELETE("/watchers/self", c.HandlerV1.DeleteMachine) } apiKeyAuth := groupV1.Group("") diff --git a/pkg/apiserver/controllers/v1/machines.go b/pkg/apiserver/controllers/v1/machines.go index ff59e389cb1..8c799fa5114 100644 --- a/pkg/apiserver/controllers/v1/machines.go +++ b/pkg/apiserver/controllers/v1/machines.go @@ -80,3 +80,27 @@ func (c *Controller) CreateMachine(gctx *gin.Context) { gctx.Status(http.StatusCreated) } } + +func (c *Controller) DeleteMachine(gctx *gin.Context) { + ctx := gctx.Request.Context() + + machineID, err := getMachineIDFromContext(gctx) + + if err != nil { + gctx.JSON(http.StatusBadRequest, gin.H{"message": err.Error()}) + return + } + if machineID == "" { + gctx.JSON(http.StatusBadRequest, gin.H{"message": "machineID not found in claims"}) + return + } + + if err := c.DBClient.DeleteWatcher(ctx, machineID); err != nil { + c.HandleDBErrors(gctx, err) + return + } + + log.WithFields(log.Fields{"ip": gctx.ClientIP(), "machine_id": machineID}).Info("Deleted machine") + + gctx.Status(http.StatusNoContent) +} diff --git a/pkg/csconfig/api.go b/pkg/csconfig/api.go index 8732523094a..75c8b57eb32 100644 --- a/pkg/csconfig/api.go +++ b/pkg/csconfig/api.go @@ -56,6 +56,7 @@ type LocalApiClientCfg struct { CredentialsFilePath string `yaml:"credentials_path,omitempty"` // credz will be edited by software, store in diff file Credentials *ApiCredentialsCfg `yaml:"-"` InsecureSkipVerify *bool `yaml:"insecure_skip_verify"` // check if api certificate is bad or not + UnregisterOnExit bool `yaml:"unregister_on_exit,omitempty"` } type CTICfg struct { diff --git a/pkg/models/localapi_swagger.yaml b/pkg/models/localapi_swagger.yaml index adbb2ef8227..fda6bce31e8 100644 --- a/pkg/models/localapi_swagger.yaml +++ b/pkg/models/localapi_swagger.yaml @@ -319,6 +319,23 @@ paths: description: "400 response" schema: $ref: "#/definitions/ErrorResponse" + /watchers/self: + delete: + description: watcher self-delete + summary: DeleteWatcher + tags: + - watchers + operationId: DeleteWatcher + deprecated: false + responses: + '204': + description: Watcher Deleted + '403': + description: "403 response" + schema: + $ref: "#/definitions/ErrorResponse" + security: + - JWTAuthorizer: [] /watchers/login: post: description: Authenticate current to get session ID diff --git a/test/bats/30_machines.bats b/test/bats/30_machines.bats index 97f8584a1f6..4376313fedd 100644 --- a/test/bats/30_machines.bats +++ b/test/bats/30_machines.bats @@ -13,7 +13,6 @@ teardown_file() { setup() { load "../lib/setup.sh" ./instance-data load - ./instance-crowdsec start } teardown() { @@ -93,6 +92,7 @@ teardown() { } @test "register, validate and then remove a machine" { + ./instance-crowdsec start rune -0 cscli lapi register --machine CiTestMachineRegister -f /dev/null -o human assert_stderr --partial "Successfully registered to Local API (LAPI)" assert_stderr --partial "Local API credentials written to '/dev/null'" @@ -119,6 +119,7 @@ teardown() { rune -0 cscli machines list -o json rune -0 jq '. | length' <(output) assert_output 1 + ./instance-crowdsec stop } @test "cscli machines prune" { @@ -144,3 +145,21 @@ teardown() { rune -0 cscli machines prune assert_output 'No machines to prune.' } + +#### THIS TEST MUST BE LAST + +#@test "machine auto-delete" { +# config_set '.api.client.unregister_on_exit = true' +# +# # Stop and start crowdsec to make the machine delete itself +# ./instance-crowdsec start +# +# ./instance-crowdsec stop +# +# # we have 0 machines now +# rune -0 cscli machines list -o json +# rune -0 jq '. | length' <(output) +# assert_output 0 +#} + +#### From 7314a4988ea58a90732da7fb9dcefecbf51d2a08 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Wed, 28 May 2025 15:22:43 +0200 Subject: [PATCH 540/581] CI: ensure tests don't alter the repository (#3616) --- .github/workflows/go-tests.yml | 11 ++++ pkg/acquisition/acquisition_test.go | 32 ++++++------ pkg/acquisition/modules/file/file_test.go | 48 ++++++++++-------- .../file/{test_files => testdata}/bad.gz | 0 .../file/{test_files => testdata}/test.log | 0 .../file/{test_files => testdata}/test.log.gz | Bin .../modules/journalctl/journalctl_test.go | 3 +- .../{test_files => testdata}/journalctl | 22 ++++---- .../{test_files => testdata}/Setup.evtx | Bin .../wineventlog/wineventlog_windows_test.go | 6 +-- .../backward_compat.yaml | 0 .../bad_filetype.yaml | 0 .../{test_files => testdata}/bad_source.yaml | 0 .../{test_files => testdata}/badyaml.yaml | 0 .../basic_filemode.yaml | 0 .../{test_files => testdata}/emptyitem.yaml | 0 .../{test_files => testdata}/env.yaml | 0 .../missing_labels.yaml | 0 pkg/csplugin/broker_suite_test.go | 6 ++- pkg/csplugin/broker_test.go | 28 +++++----- pkg/csplugin/broker_win_test.go | 8 +-- pkg/csplugin/testdata/dummy.yaml | 2 +- 22 files changed, 91 insertions(+), 75 deletions(-) rename pkg/acquisition/modules/file/{test_files => testdata}/bad.gz (100%) rename pkg/acquisition/modules/file/{test_files => testdata}/test.log (100%) rename pkg/acquisition/modules/file/{test_files => testdata}/test.log.gz (100%) rename pkg/acquisition/modules/journalctl/{test_files => testdata}/journalctl (78%) rename pkg/acquisition/modules/wineventlog/{test_files => testdata}/Setup.evtx (100%) rename pkg/acquisition/{test_files => testdata}/backward_compat.yaml (100%) rename pkg/acquisition/{test_files => testdata}/bad_filetype.yaml (100%) rename pkg/acquisition/{test_files => testdata}/bad_source.yaml (100%) rename pkg/acquisition/{test_files => testdata}/badyaml.yaml (100%) rename pkg/acquisition/{test_files => testdata}/basic_filemode.yaml (100%) rename pkg/acquisition/{test_files => testdata}/emptyitem.yaml (100%) rename pkg/acquisition/{test_files => testdata}/env.yaml (100%) rename pkg/acquisition/{test_files => testdata}/missing_labels.yaml (100%) diff --git a/.github/workflows/go-tests.yml b/.github/workflows/go-tests.yml index b0348c8422c..687a236d47f 100644 --- a/.github/workflows/go-tests.yml +++ b/.github/workflows/go-tests.yml @@ -181,7 +181,18 @@ jobs: - name: Unit tests run: | go install gotest.tools/gotestsum@v1.12.1 + # make the repo read-only, with the exception of coverage output + touch coverage.out + chmod -R a-w . + chmod u+w coverage.out make testcover + # ignore changes to codecov.yml + if [[ $(git status --porcelain -- . ":(exclude).github/codecov.yml") ]]; then + echo "Error: Unit tests should not create or alter files inside the repository. Please use the appropriate testing helpers or otherwise temporary locations." + git diff --name-only + exit 1 + fi + chmod -R u+w . # check if some component stubs are missing - name: "Build profile: minimal" diff --git a/pkg/acquisition/acquisition_test.go b/pkg/acquisition/acquisition_test.go index 6247c876040..c0927c6d8fe 100644 --- a/pkg/acquisition/acquisition_test.go +++ b/pkg/acquisition/acquisition_test.go @@ -68,7 +68,7 @@ func (f *MockSource) StreamingAcquisition(context.Context, chan types.Event, *to func (f *MockSource) CanRun() error { return nil } func (f *MockSource) GetMetrics() []prometheus.Collector { return nil } func (f *MockSource) GetAggregMetrics() []prometheus.Collector { return nil } -func (f *MockSource) Dump() interface{} { return f } +func (f *MockSource) Dump() any { return f } func (f *MockSource) GetName() string { return "mock" } func (f *MockSource) ConfigureByDSN(string, map[string]string, *log.Entry, string) error { return errors.New("not supported") @@ -236,57 +236,57 @@ func TestLoadAcquisitionFromFile(t *testing.T) { { TestName: "invalid_yaml_file", Config: csconfig.CrowdsecServiceCfg{ - AcquisitionFiles: []string{"test_files/badyaml.yaml"}, + AcquisitionFiles: []string{"testdata/badyaml.yaml"}, }, - ExpectedError: "failed to parse test_files/badyaml.yaml: yaml: unmarshal errors", + ExpectedError: "failed to parse testdata/badyaml.yaml: yaml: unmarshal errors", ExpectedLen: 0, }, { TestName: "invalid_empty_yaml", Config: csconfig.CrowdsecServiceCfg{ - AcquisitionFiles: []string{"test_files/emptyitem.yaml"}, + AcquisitionFiles: []string{"testdata/emptyitem.yaml"}, }, ExpectedLen: 0, }, { TestName: "basic_valid", Config: csconfig.CrowdsecServiceCfg{ - AcquisitionFiles: []string{"test_files/basic_filemode.yaml"}, + AcquisitionFiles: []string{"testdata/basic_filemode.yaml"}, }, ExpectedLen: 2, }, { TestName: "missing_labels", Config: csconfig.CrowdsecServiceCfg{ - AcquisitionFiles: []string{"test_files/missing_labels.yaml"}, + AcquisitionFiles: []string{"testdata/missing_labels.yaml"}, }, - ExpectedError: "missing labels in test_files/missing_labels.yaml", + ExpectedError: "missing labels in testdata/missing_labels.yaml", }, { TestName: "backward_compat", Config: csconfig.CrowdsecServiceCfg{ - AcquisitionFiles: []string{"test_files/backward_compat.yaml"}, + AcquisitionFiles: []string{"testdata/backward_compat.yaml"}, }, ExpectedLen: 2, }, { TestName: "bad_type", Config: csconfig.CrowdsecServiceCfg{ - AcquisitionFiles: []string{"test_files/bad_source.yaml"}, + AcquisitionFiles: []string{"testdata/bad_source.yaml"}, }, - ExpectedError: "in file test_files/bad_source.yaml (position 0) - unknown data source does_not_exist", + ExpectedError: "in file testdata/bad_source.yaml (position 0) - unknown data source does_not_exist", }, { TestName: "invalid_filetype_config", Config: csconfig.CrowdsecServiceCfg{ - AcquisitionFiles: []string{"test_files/bad_filetype.yaml"}, + AcquisitionFiles: []string{"testdata/bad_filetype.yaml"}, }, - ExpectedError: "while configuring datasource of type file from test_files/bad_filetype.yaml", + ExpectedError: "while configuring datasource of type file from testdata/bad_filetype.yaml", }, { TestName: "from_env", Config: csconfig.CrowdsecServiceCfg{ - AcquisitionFiles: []string{"test_files/env.yaml"}, + AcquisitionFiles: []string{"testdata/env.yaml"}, }, ExpectedLen: 1, }, @@ -356,7 +356,7 @@ func (f *MockCat) StreamingAcquisition(context.Context, chan types.Event, *tomb. func (f *MockCat) CanRun() error { return nil } func (f *MockCat) GetMetrics() []prometheus.Collector { return nil } func (f *MockCat) GetAggregMetrics() []prometheus.Collector { return nil } -func (f *MockCat) Dump() interface{} { return f } +func (f *MockCat) Dump() any { return f } func (f *MockCat) ConfigureByDSN(string, map[string]string, *log.Entry, string) error { return errors.New("not supported") } @@ -403,7 +403,7 @@ func (f *MockTail) StreamingAcquisition(ctx context.Context, out chan types.Even func (f *MockTail) CanRun() error { return nil } func (f *MockTail) GetMetrics() []prometheus.Collector { return nil } func (f *MockTail) GetAggregMetrics() []prometheus.Collector { return nil } -func (f *MockTail) Dump() interface{} { return f } +func (f *MockTail) Dump() any { return f } func (f *MockTail) ConfigureByDSN(string, map[string]string, *log.Entry, string) error { return errors.New("not supported") } @@ -538,7 +538,7 @@ func (f *MockSourceByDSN) StreamingAcquisition(context.Context, chan types.Event func (f *MockSourceByDSN) CanRun() error { return nil } func (f *MockSourceByDSN) GetMetrics() []prometheus.Collector { return nil } func (f *MockSourceByDSN) GetAggregMetrics() []prometheus.Collector { return nil } -func (f *MockSourceByDSN) Dump() interface{} { return f } +func (f *MockSourceByDSN) Dump() any { return f } func (f *MockSourceByDSN) GetName() string { return "mockdsn" } func (f *MockSourceByDSN) ConfigureByDSN(dsn string, labels map[string]string, logger *log.Entry, uuid string) error { dsn = strings.TrimPrefix(dsn, "mockdsn://") diff --git a/pkg/acquisition/modules/file/file_test.go b/pkg/acquisition/modules/file/file_test.go index 4264ab0ca93..7c6b5191883 100644 --- a/pkg/acquisition/modules/file/file_test.go +++ b/pkg/acquisition/modules/file/file_test.go @@ -103,6 +103,8 @@ func TestConfigureDSN(t *testing.T) { func TestOneShot(t *testing.T) { ctx := t.Context() + tmpDir := t.TempDir() + deletedFile := filepath.Join(tmpDir, "test_delete.log") permDeniedFile := "/etc/shadow" permDeniedError := "failed opening /etc/shadow: open /etc/shadow: permission denied" @@ -166,7 +168,7 @@ filename: /do/not/exist`, name: "test.log", config: ` mode: cat -filename: test_files/test.log`, +filename: testdata/test.log`, expectedLines: 5, logLevel: log.WarnLevel, }, @@ -174,7 +176,7 @@ filename: test_files/test.log`, name: "test.log.gz", config: ` mode: cat -filename: test_files/test.log.gz`, +filename: testdata/test.log.gz`, expectedLines: 5, logLevel: log.WarnLevel, }, @@ -182,24 +184,24 @@ filename: test_files/test.log.gz`, name: "unexpected end of gzip stream", config: ` mode: cat -filename: test_files/bad.gz`, - expectedErr: "failed to read gz test_files/bad.gz: unexpected EOF", +filename: testdata/bad.gz`, + expectedErr: "failed to read gz testdata/bad.gz: unexpected EOF", expectedLines: 0, logLevel: log.WarnLevel, }, { name: "deleted file", - config: ` + config: fmt.Sprintf(` mode: cat -filename: test_files/test_delete.log`, +filename: %s`, deletedFile), setup: func() { - f, _ := os.Create("test_files/test_delete.log") + f, _ := os.Create(deletedFile) f.Close() }, afterConfigure: func() { - os.Remove("test_files/test_delete.log") + os.Remove(deletedFile) }, - expectedErr: "could not stat file test_files/test_delete.log", + expectedErr: "could not stat file " + deletedFile, }, } @@ -252,14 +254,14 @@ func TestLiveAcquisition(t *testing.T) { ctx := t.Context() permDeniedFile := "/etc/shadow" permDeniedError := "unable to read /etc/shadow : open /etc/shadow: permission denied" - testPattern := "test_files/*.log" + tmpDir := t.TempDir() + testPattern := filepath.Join(tmpDir, "*.log") if runtime.GOOS == "windows" { // Technically, this is not a permission denied error, but we just want to test what happens // if we do not have access to the file permDeniedFile = `C:\Windows\System32\config\SAM` permDeniedError = `unable to read C:\Windows\System32\config\SAM : open C:\Windows\System32\config\SAM: The process cannot access the file because it is being used by another process` - testPattern = `test_files\*.log` } tests := []struct { @@ -320,10 +322,10 @@ force_inotify: true`, testPattern), logLevel: log.DebugLevel, name: "GlobInotify", afterConfigure: func() { - f, _ := os.Create("test_files/a.log") + f, _ := os.Create(filepath.Join(tmpDir, "a.log")) f.Close() time.Sleep(1 * time.Second) - os.Remove("test_files/a.log") + os.Remove(f.Name()) }, }, { @@ -336,18 +338,18 @@ force_inotify: true`, testPattern), logLevel: log.DebugLevel, name: "GlobInotifyChmod", afterConfigure: func() { - f, err := os.Create("test_files/a.log") + f, err := os.Create(filepath.Join(tmpDir, "a.log")) require.NoError(t, err) err = f.Close() require.NoError(t, err) time.Sleep(1 * time.Second) - err = os.Chmod("test_files/a.log", 0o000) + err = os.Chmod(f.Name(), 0o000) require.NoError(t, err) }, teardown: func() { - err := os.Chmod("test_files/a.log", 0o644) + err := os.Chmod(filepath.Join(tmpDir, "a.log"), 0o644) require.NoError(t, err) - err = os.Remove("test_files/a.log") + err = os.Remove(filepath.Join(tmpDir, "a.log")) require.NoError(t, err) }, }, @@ -361,11 +363,11 @@ force_inotify: true`, testPattern), logLevel: log.DebugLevel, name: "InotifyMkDir", afterConfigure: func() { - err := os.Mkdir("test_files/pouet/", 0o700) + err := os.Mkdir(filepath.Join(tmpDir, "pouet"), 0o700) require.NoError(t, err) }, teardown: func() { - os.Remove("test_files/pouet/") + os.Remove(filepath.Join(tmpDir, "pouet")) }, }, } @@ -398,6 +400,7 @@ force_inotify: true`, testPattern), if tc.expectedLines != 0 { var stopReading bool defer func() { stopReading = true }() + go func() { for { select { @@ -419,7 +422,7 @@ force_inotify: true`, testPattern), if tc.expectedLines != 0 { // f.IsTailing is path delimiter sensitive - streamLogFile := filepath.Join("test_files", "stream.log") + streamLogFile := filepath.Join(tmpDir, "stream.log") fd, err := os.Create(streamLogFile) require.NoError(t, err, "could not create test file") @@ -435,6 +438,7 @@ force_inotify: true`, testPattern), time.Sleep(50 * time.Millisecond) continue } + waitingForTail = false } } @@ -475,7 +479,7 @@ force_inotify: true`, testPattern), } func TestExclusion(t *testing.T) { - config := `filenames: ["test_files/*.log*"] + config := `filenames: ["testdata/*.log*"] exclude_regexps: ["\\.gz$"]` logger, hook := test.NewNullLogger() // logger.SetLevel(ts.logLevel) @@ -487,7 +491,7 @@ exclude_regexps: ["\\.gz$"]` require.NotNil(t, hook.LastEntry()) assert.Contains(t, hook.LastEntry().Message, `Skipping file: matches exclude regex "\\.gz`) - assert.Equal(t, filepath.Join("test_files", "test.log.gz"), hook.LastEntry().Data["file"]) + assert.Equal(t, filepath.Join("testdata", "test.log.gz"), hook.LastEntry().Data["file"]) hook.Reset() } diff --git a/pkg/acquisition/modules/file/test_files/bad.gz b/pkg/acquisition/modules/file/testdata/bad.gz similarity index 100% rename from pkg/acquisition/modules/file/test_files/bad.gz rename to pkg/acquisition/modules/file/testdata/bad.gz diff --git a/pkg/acquisition/modules/file/test_files/test.log b/pkg/acquisition/modules/file/testdata/test.log similarity index 100% rename from pkg/acquisition/modules/file/test_files/test.log rename to pkg/acquisition/modules/file/testdata/test.log diff --git a/pkg/acquisition/modules/file/test_files/test.log.gz b/pkg/acquisition/modules/file/testdata/test.log.gz similarity index 100% rename from pkg/acquisition/modules/file/test_files/test.log.gz rename to pkg/acquisition/modules/file/testdata/test.log.gz diff --git a/pkg/acquisition/modules/journalctl/journalctl_test.go b/pkg/acquisition/modules/journalctl/journalctl_test.go index 424612d8bfc..36b20c34d8d 100644 --- a/pkg/acquisition/modules/journalctl/journalctl_test.go +++ b/pkg/acquisition/modules/journalctl/journalctl_test.go @@ -280,8 +280,7 @@ journalctl_filter: func TestMain(m *testing.M) { if os.Getenv("USE_SYSTEM_JOURNALCTL") == "" { - currentDir, _ := os.Getwd() - fullPath := filepath.Join(currentDir, "test_files") + fullPath, _ := filepath.Abs("./testdata") os.Setenv("PATH", fullPath+":"+os.Getenv("PATH")) } diff --git a/pkg/acquisition/modules/journalctl/test_files/journalctl b/pkg/acquisition/modules/journalctl/testdata/journalctl similarity index 78% rename from pkg/acquisition/modules/journalctl/test_files/journalctl rename to pkg/acquisition/modules/journalctl/testdata/journalctl index 83c9ad1719a..a40bc8a004e 100755 --- a/pkg/acquisition/modules/journalctl/test_files/journalctl +++ b/pkg/acquisition/modules/journalctl/testdata/journalctl @@ -4,17 +4,17 @@ import argparse import time import sys + class CustomParser(argparse.ArgumentParser): - #small hack to make argparse errors the same as journalctl + # small hack to make argparse errors the same as journalctl def error(self, message): if 'unrecognized arguments:' in message: - sys.stderr.write("journalctl: invalid option -- '_'\n") - sys.stderr.flush() - exit(1) + _ = sys.stderr.write("journalctl: invalid option -- '_'\n") else: - sys.stderr.write(message) - sys.stderr.flush() - exit(1) + _ = sys.stderr.write(message) + _ = sys.stderr.flush() + exit(1) + LOGS = """-- Logs begin at Fri 2019-07-26 17:13:13 CEST, end at Mon 2020-11-23 09:17:34 CET. -- Nov 22 11:22:19 zeroed sshd[1480]: Invalid user wqeqwe from 127.0.0.1 port 55818 @@ -32,9 +32,9 @@ Nov 22 11:23:27 zeroed sshd[1791]: Invalid user wqeqwe5 from 127.0.0.1 port 5583 Nov 22 11:23:27 zeroed sshd[1791]: Failed password for invalid user wqeqwe5 from 127.0.0.1 port 55834 ssh2""" parser = CustomParser() -parser.add_argument('filter', metavar='FILTER', type=str, nargs='?') -parser.add_argument('-n', dest='n', type=int) -parser.add_argument('--follow', dest='follow', action='store_true', default=False) +_ = parser.add_argument('filter', metavar='FILTER', type=str, nargs='?') +_ = parser.add_argument('-n', dest='n', type=int) +_ = parser.add_argument('--follow', dest='follow', action='store_true', default=False) args = parser.parse_args() @@ -42,4 +42,4 @@ for line in LOGS.split('\n'): print(line) if args.follow: - time.sleep(9999) \ No newline at end of file + time.sleep(9999) diff --git a/pkg/acquisition/modules/wineventlog/test_files/Setup.evtx b/pkg/acquisition/modules/wineventlog/testdata/Setup.evtx similarity index 100% rename from pkg/acquisition/modules/wineventlog/test_files/Setup.evtx rename to pkg/acquisition/modules/wineventlog/testdata/Setup.evtx diff --git a/pkg/acquisition/modules/wineventlog/wineventlog_windows_test.go b/pkg/acquisition/modules/wineventlog/wineventlog_windows_test.go index 1b2420bc941..ef7aecda41d 100644 --- a/pkg/acquisition/modules/wineventlog/wineventlog_windows_test.go +++ b/pkg/acquisition/modules/wineventlog/wineventlog_windows_test.go @@ -258,18 +258,18 @@ func TestOneShotAcquisition(t *testing.T) { }, { name: "existing file", - dsn: `wineventlog://test_files/Setup.evtx`, + dsn: `wineventlog://testdata/Setup.evtx`, expectedCount: 24, expectedErr: "", }, { name: "filter on event_id", - dsn: `wineventlog://test_files/Setup.evtx?event_id=2`, + dsn: `wineventlog://testdata/Setup.evtx?event_id=2`, expectedCount: 1, }, { name: "filter on event_id", - dsn: `wineventlog://test_files/Setup.evtx?event_id=2&event_id=3`, + dsn: `wineventlog://testdata/Setup.evtx?event_id=2&event_id=3`, expectedCount: 24, }, } diff --git a/pkg/acquisition/test_files/backward_compat.yaml b/pkg/acquisition/testdata/backward_compat.yaml similarity index 100% rename from pkg/acquisition/test_files/backward_compat.yaml rename to pkg/acquisition/testdata/backward_compat.yaml diff --git a/pkg/acquisition/test_files/bad_filetype.yaml b/pkg/acquisition/testdata/bad_filetype.yaml similarity index 100% rename from pkg/acquisition/test_files/bad_filetype.yaml rename to pkg/acquisition/testdata/bad_filetype.yaml diff --git a/pkg/acquisition/test_files/bad_source.yaml b/pkg/acquisition/testdata/bad_source.yaml similarity index 100% rename from pkg/acquisition/test_files/bad_source.yaml rename to pkg/acquisition/testdata/bad_source.yaml diff --git a/pkg/acquisition/test_files/badyaml.yaml b/pkg/acquisition/testdata/badyaml.yaml similarity index 100% rename from pkg/acquisition/test_files/badyaml.yaml rename to pkg/acquisition/testdata/badyaml.yaml diff --git a/pkg/acquisition/test_files/basic_filemode.yaml b/pkg/acquisition/testdata/basic_filemode.yaml similarity index 100% rename from pkg/acquisition/test_files/basic_filemode.yaml rename to pkg/acquisition/testdata/basic_filemode.yaml diff --git a/pkg/acquisition/test_files/emptyitem.yaml b/pkg/acquisition/testdata/emptyitem.yaml similarity index 100% rename from pkg/acquisition/test_files/emptyitem.yaml rename to pkg/acquisition/testdata/emptyitem.yaml diff --git a/pkg/acquisition/test_files/env.yaml b/pkg/acquisition/testdata/env.yaml similarity index 100% rename from pkg/acquisition/test_files/env.yaml rename to pkg/acquisition/testdata/env.yaml diff --git a/pkg/acquisition/test_files/missing_labels.yaml b/pkg/acquisition/testdata/missing_labels.yaml similarity index 100% rename from pkg/acquisition/test_files/missing_labels.yaml rename to pkg/acquisition/testdata/missing_labels.yaml diff --git a/pkg/csplugin/broker_suite_test.go b/pkg/csplugin/broker_suite_test.go index 1210c67058a..768c0acd094 100644 --- a/pkg/csplugin/broker_suite_test.go +++ b/pkg/csplugin/broker_suite_test.go @@ -28,6 +28,7 @@ type PluginSuite struct { notifDir string // (config_paths.notification_dir) pluginBinary string // full path to the plugin binary (unique for each test) pluginConfig string // full path to the notification config (unique for each test) + outFile string // full path to the output file (unique for each test) pluginBroker *PluginBroker } @@ -125,6 +126,9 @@ func (s *PluginSuite) SetupSubTest() { s.pluginConfig = filepath.Join(s.notifDir, "dummy.yaml") err = copyFile("testdata/dummy.yaml", s.pluginConfig) require.NoError(t, err, "while copying plugin config") + + s.outFile = filepath.Join(t.TempDir(), "out") + t.Setenv("OUTFILE", s.outFile) } func (s *PluginSuite) TearDownSubTest() { @@ -139,8 +143,6 @@ func (s *PluginSuite) TearDownSubTest() { if runtime.GOOS != "windows" { require.NoError(t, err) } - - os.Remove("./out") } func (s *PluginSuite) InitBroker(ctx context.Context, procCfg *csconfig.PluginCfg) (*PluginBroker, error) { diff --git a/pkg/csplugin/broker_test.go b/pkg/csplugin/broker_test.go index 63ba65d2ddf..3da37b4269c 100644 --- a/pkg/csplugin/broker_test.go +++ b/pkg/csplugin/broker_test.go @@ -163,7 +163,7 @@ func (s *PluginSuite) TestBrokerNoThreshold() { time.Sleep(200 * time.Millisecond) // we expect one now - content, err := os.ReadFile("./out") + content, err := os.ReadFile(s.outFile) require.NoError(t, err, "Error reading file") err = json.Unmarshal(content, &alerts) @@ -171,7 +171,7 @@ func (s *PluginSuite) TestBrokerNoThreshold() { assert.Len(t, alerts, 1) // remove it - os.Remove("./out") + os.Remove(s.outFile) // and another one log.Printf("second send") @@ -180,7 +180,7 @@ func (s *PluginSuite) TestBrokerNoThreshold() { time.Sleep(200 * time.Millisecond) // we expect one again, as we cleaned the file - content, err = os.ReadFile("./out") + content, err = os.ReadFile(s.outFile) require.NoError(t, err, "Error reading file") err = json.Unmarshal(content, &alerts) @@ -216,10 +216,10 @@ func (s *PluginSuite) TestBrokerRunGroupAndTimeThreshold_TimeFirst() { time.Sleep(500 * time.Millisecond) // because of group threshold, we shouldn't have data yet - assert.NoFileExists(t, "./out") + assert.NoFileExists(t, s.outFile) time.Sleep(1 * time.Second) // after 1 seconds, we should have data - content, err := os.ReadFile("./out") + content, err := os.ReadFile(s.outFile) require.NoError(t, err) var alerts []models.Alert @@ -254,13 +254,13 @@ func (s *PluginSuite) TestBrokerRunGroupAndTimeThreshold_CountFirst() { time.Sleep(100 * time.Millisecond) // because of group threshold, we shouldn't have data yet - assert.NoFileExists(t, "./out") + assert.NoFileExists(t, s.outFile) pb.PluginChannel <- ProfileAlert{ProfileID: uint(0), Alert: &models.Alert{}} time.Sleep(100 * time.Millisecond) // and now we should - content, err := os.ReadFile("./out") + content, err := os.ReadFile(s.outFile) require.NoError(t, err, "Error reading file") var alerts []models.Alert @@ -295,7 +295,7 @@ func (s *PluginSuite) TestBrokerRunGroupThreshold() { time.Sleep(time.Second) // because of group threshold, we shouldn't have data yet - assert.NoFileExists(t, "./out") + assert.NoFileExists(t, s.outFile) pb.PluginChannel <- ProfileAlert{ProfileID: uint(0), Alert: &models.Alert{}} pb.PluginChannel <- ProfileAlert{ProfileID: uint(0), Alert: &models.Alert{}} pb.PluginChannel <- ProfileAlert{ProfileID: uint(0), Alert: &models.Alert{}} @@ -303,7 +303,7 @@ func (s *PluginSuite) TestBrokerRunGroupThreshold() { time.Sleep(time.Second) // and now we should - content, err := os.ReadFile("./out") + content, err := os.ReadFile(s.outFile) require.NoError(t, err, "Error reading file") decoder := json.NewDecoder(bytes.NewReader(content)) @@ -347,11 +347,11 @@ func (s *PluginSuite) TestBrokerRunTimeThreshold() { time.Sleep(200 * time.Millisecond) // we shouldn't have data yet - assert.NoFileExists(t, "./out") + assert.NoFileExists(t, s.outFile) time.Sleep(1 * time.Second) // and now we should - content, err := os.ReadFile("./out") + content, err := os.ReadFile(s.outFile) require.NoError(t, err, "Error reading file") var alerts []models.Alert @@ -372,16 +372,16 @@ func (s *PluginSuite) TestBrokerRunSimple() { tomb := tomb.Tomb{} go pb.Run(&tomb) - assert.NoFileExists(t, "./out") + assert.NoFileExists(t, s.outFile) - defer os.Remove("./out") + defer os.Remove(s.outFile) pb.PluginChannel <- ProfileAlert{ProfileID: uint(0), Alert: &models.Alert{}} pb.PluginChannel <- ProfileAlert{ProfileID: uint(0), Alert: &models.Alert{}} // make it wait a bit, CI can be slow time.Sleep(time.Second) - content, err := os.ReadFile("./out") + content, err := os.ReadFile(s.outFile) require.NoError(t, err, "Error reading file") decoder := json.NewDecoder(bytes.NewReader(content)) diff --git a/pkg/csplugin/broker_win_test.go b/pkg/csplugin/broker_win_test.go index e28056ae626..e7ea584b01e 100644 --- a/pkg/csplugin/broker_win_test.go +++ b/pkg/csplugin/broker_win_test.go @@ -76,16 +76,16 @@ func (s *PluginSuite) TestBrokerRun() { tomb := tomb.Tomb{} go pb.Run(&tomb) - assert.NoFileExists(t, "./out") - defer os.Remove("./out") + assert.NoFileExists(t, s.outFile) + defer os.Remove(s.outFile) pb.PluginChannel <- ProfileAlert{ProfileID: uint(0), Alert: &models.Alert{}} pb.PluginChannel <- ProfileAlert{ProfileID: uint(0), Alert: &models.Alert{}} time.Sleep(time.Second * 4) - assert.FileExists(t, ".\\out") + assert.FileExists(t, s.outFile) - content, err := os.ReadFile("./out") + content, err := os.ReadFile(s.outFile) require.NoError(t, err, "Error reading file") decoder := json.NewDecoder(bytes.NewReader(content)) diff --git a/pkg/csplugin/testdata/dummy.yaml b/pkg/csplugin/testdata/dummy.yaml index 1b883af4b08..5f50e7c6cbc 100644 --- a/pkg/csplugin/testdata/dummy.yaml +++ b/pkg/csplugin/testdata/dummy.yaml @@ -18,5 +18,5 @@ format: | {{.|toJson}} # -output_file: ./out # notifications will be appended here. optional +output_file: ${OUTFILE} From 1cede239d8465b0549d10a3a44aab7b96a2cd743 Mon Sep 17 00:00:00 2001 From: blotus Date: Mon, 2 Jun 2025 10:31:42 +0200 Subject: [PATCH 541/581] update coraza (#3657) --- go.mod | 20 ++++++++++---------- go.sum | 36 ++++++++++++++++++------------------ 2 files changed, 28 insertions(+), 28 deletions(-) diff --git a/go.mod b/go.mod index 931ca9e7e6f..11d15429a1a 100644 --- a/go.mod +++ b/go.mod @@ -98,15 +98,15 @@ require ( go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.28.0 // indirect go.opentelemetry.io/otel/sdk v1.28.0 // indirect go.opentelemetry.io/otel/trace v1.28.0 // indirect - golang.org/x/crypto v0.36.0 + golang.org/x/crypto v0.38.0 golang.org/x/mod v0.23.0 - golang.org/x/net v0.38.0 - golang.org/x/sync v0.12.0 - golang.org/x/sys v0.31.0 - golang.org/x/text v0.23.0 + golang.org/x/net v0.40.0 + golang.org/x/sync v0.14.0 + golang.org/x/sys v0.33.0 + golang.org/x/text v0.25.0 golang.org/x/time v0.6.0 // indirect google.golang.org/grpc v1.67.1 - google.golang.org/protobuf v1.36.3 + google.golang.org/protobuf v1.36.6 gopkg.in/natefinch/lumberjack.v2 v2.2.1 gopkg.in/tomb.v2 v2.0.0-20161208151619-d5d1b5820637 gopkg.in/yaml.v2 v2.4.0 @@ -116,7 +116,7 @@ require ( ) -require github.com/corazawaf/coraza/v3 v3.3.2 +require github.com/corazawaf/coraza/v3 v3.3.3 require ( ariga.io/atlas v0.31.1-0.20250212144724-069be8033e83 // indirect @@ -185,7 +185,7 @@ require ( github.com/modern-go/reflect2 v1.0.2 // indirect github.com/oklog/run v1.0.0 // indirect github.com/pelletier/go-toml/v2 v2.2.3 // indirect - github.com/petar-dambovaliev/aho-corasick v0.0.0-20240411101913-e07a1f0e8eb4 // indirect + github.com/petar-dambovaliev/aho-corasick v0.0.0-20250424160509-463d218d4745 // indirect github.com/pierrec/lz4/v4 v4.1.18 // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c // indirect @@ -218,7 +218,7 @@ require ( go.opentelemetry.io/otel/metric v1.28.0 // indirect go.uber.org/atomic v1.10.0 // indirect golang.org/x/arch v0.12.0 // indirect - golang.org/x/term v0.30.0 // indirect + golang.org/x/term v0.32.0 // indirect golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 // indirect google.golang.org/appengine v1.6.8 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20240814211410-ddb44dafa142 // indirect @@ -235,4 +235,4 @@ require ( replace golang.org/x/time/rate => github.com/crowdsecurity/crowdsec/pkg/time/rate v0.0.0 -replace github.com/corazawaf/coraza/v3 => github.com/crowdsecurity/coraza/v3 v3.0.0-20250320231801-749b8bded21a +replace github.com/corazawaf/coraza/v3 => github.com/crowdsecurity/coraza/v3 v3.0.0-20250601201637-67ae05361dc0 diff --git a/go.sum b/go.sum index e9a0c8b4959..4c4ef3cfd54 100644 --- a/go.sum +++ b/go.sum @@ -107,8 +107,8 @@ github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ3 github.com/creack/pty v1.1.17/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4= github.com/creack/pty v1.1.21 h1:1/QdRyBaHHJP61QkWMXlOIBfsgdDeeKfK8SYVUWJKf0= github.com/creack/pty v1.1.21/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4= -github.com/crowdsecurity/coraza/v3 v3.0.0-20250320231801-749b8bded21a h1:2Nyr+47Y/K68wohQWCrE7jKRIOpp6hJ29XCEQO3FhOw= -github.com/crowdsecurity/coraza/v3 v3.0.0-20250320231801-749b8bded21a/go.mod h1:xSaXWOhFMSbrV8qOOfBKAyw3aOqfwaSaOy5BgSF8XlA= +github.com/crowdsecurity/coraza/v3 v3.0.0-20250601201637-67ae05361dc0 h1:27dh+bYI98DiHMUJ4uZ8q+rT6/Llwymn597YBHd1ixw= +github.com/crowdsecurity/coraza/v3 v3.0.0-20250601201637-67ae05361dc0/go.mod h1:L6CEXtl7VKyEuL6evak4IViv9M5glswuF+UZx0FX/Tg= github.com/crowdsecurity/dlog v0.0.0-20170105205344-4fb5f8204f26 h1:r97WNVC30Uen+7WnLs4xDScS/Ex988+id2k6mDf8psU= github.com/crowdsecurity/dlog v0.0.0-20170105205344-4fb5f8204f26/go.mod h1:zpv7r+7KXwgVUZnUNjyP22zc/D7LKjyoY02weH2RBbk= github.com/crowdsecurity/go-cs-lib v0.0.19 h1:wA4O8hGrEntTGn7eZTJqnQ3mrAje5JvQAj8DNbe5IZg= @@ -576,8 +576,8 @@ github.com/pelletier/go-toml v1.4.0/go.mod h1:PN7xzY2wHTK0K9p34ErDQMlFxa51Fk0OUr github.com/pelletier/go-toml v1.7.0/go.mod h1:vwGMzjaWMwyfHwgIBhI2YUM4fB6nL6lVAvS1LBMMhTE= github.com/pelletier/go-toml/v2 v2.2.3 h1:YmeHyLY8mFWbdkNWwpr+qIL2bEqT0o95WSdkNHvL12M= github.com/pelletier/go-toml/v2 v2.2.3/go.mod h1:MfCQTFTvCcUyyvvwm1+G6H/jORL20Xlb6rzQu9GuUkc= -github.com/petar-dambovaliev/aho-corasick v0.0.0-20240411101913-e07a1f0e8eb4 h1:1Kw2vDBXmjop+LclnzCb/fFy+sgb3gYARwfmoUcQe6o= -github.com/petar-dambovaliev/aho-corasick v0.0.0-20240411101913-e07a1f0e8eb4/go.mod h1:EHPiTAKtiFmrMldLUNswFwfZ2eJIYBHktdaUTZxYWRw= +github.com/petar-dambovaliev/aho-corasick v0.0.0-20250424160509-463d218d4745 h1:Vpr4VgAizEgEZsaMohpw6JYDP+i9Of9dmdY4ufNP6HI= +github.com/petar-dambovaliev/aho-corasick v0.0.0-20250424160509-463d218d4745/go.mod h1:EHPiTAKtiFmrMldLUNswFwfZ2eJIYBHktdaUTZxYWRw= github.com/pierrec/lz4/v4 v4.1.15/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= github.com/pierrec/lz4/v4 v4.1.18 h1:xaKrnTkyoqfh1YItXl56+6KJNVYWlEEPuAQW9xsplYQ= github.com/pierrec/lz4/v4 v4.1.18/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= @@ -805,8 +805,8 @@ golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97/go.mod h1:GvvjBRRGRdwPK5y golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.3.0/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4= golang.org/x/crypto v0.14.0/go.mod h1:MVFd36DqK4CsrnJYDkBA3VC4m2GkXAM0PvzMCn4JQf4= -golang.org/x/crypto v0.36.0 h1:AnAEvhDddvBdpY+uR+MyHmuZzzNqXSe/GvuDeob5L34= -golang.org/x/crypto v0.36.0/go.mod h1:Y4J0ReaxCR1IMaabaSMugxJES1EpwhBHhv2bDHklZvc= +golang.org/x/crypto v0.38.0 h1:jt+WWG8IZlBnVbomuhg2Mdq0+BBQaHbtqHEFEigjUV8= +golang.org/x/crypto v0.38.0/go.mod h1:MvrbAqul58NNYPKnOra203SB9vpuZW0e+RRZV+Ggqjw= golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= @@ -838,8 +838,8 @@ golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY= golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE= -golang.org/x/net v0.38.0 h1:vRMAPTMaeGqVhG5QyLJHqNDwecKTomGeqbnfZyKlBI8= -golang.org/x/net v0.38.0/go.mod h1:ivrbrMbzFq5J41QOQh0siUuly180yBYtLp+CKbEaFx8= +golang.org/x/net v0.40.0 h1:79Xs7wF06Gbdcg4kdCCIQArK11Z1hr5POQ6+fIYHNuY= +golang.org/x/net v0.40.0/go.mod h1:y0hY0exeL2Pku80/zKK7tpntoX23cqL3Oa6njdgRtds= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -849,8 +849,8 @@ golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.12.0 h1:MHc5BpPuC30uJk597Ri8TV3CNZcTLu6B6z4lJy+g6Jw= -golang.org/x/sync v0.12.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA= +golang.org/x/sync v0.14.0 h1:woo0S4Yywslg6hp4eUFjTVOyKt0RookbpAHG4c1HmhQ= +golang.org/x/sync v0.14.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -886,8 +886,8 @@ golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.31.0 h1:ioabZlmFYtWhL+TRYpcnNlLwhyxaM9kWTDEmfnprqik= -golang.org/x/sys v0.31.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= +golang.org/x/sys v0.33.0 h1:q3i8TbbEz+JRD9ywIRlyRAQbM0qF7hu24q3teo2hbuw= +golang.org/x/sys v0.33.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= @@ -895,8 +895,8 @@ golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= golang.org/x/term v0.13.0/go.mod h1:LTmsnFJwVN6bCy1rVCoS+qHT1HhALEFxKncY3WNNh4U= -golang.org/x/term v0.30.0 h1:PQ39fJZ+mfadBm0y5WlL4vlM7Sx1Hgf13sMIY2+QS9Y= -golang.org/x/term v0.30.0/go.mod h1:NYYFdzHoI5wRh/h5tDMdMqCqPJZEuNqVR5xJLd/n67g= +golang.org/x/term v0.32.0 h1:DR4lr0TjUs3epypdhTOkMmuF5CDFJ/8pOnbzMZPQ7bg= +golang.org/x/term v0.32.0/go.mod h1:uZG1FhGx848Sqfsq4/DlJr3xGGsYMu/L5GW4abiaEPQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= @@ -909,8 +909,8 @@ golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= -golang.org/x/text v0.23.0 h1:D71I7dUrlY+VX0gQShAThNGHFxZ13dGLBHQLVl1mJlY= -golang.org/x/text v0.23.0/go.mod h1:/BLNzu4aZCJ1+kcD0DNRotWKage4q2rGVAg4o22unh4= +golang.org/x/text v0.25.0 h1:qVyWApTSYLk/drJRO5mDlNYskwQznZmkpV2c8q9zls4= +golang.org/x/text v0.25.0/go.mod h1:WEdwpYrmk1qmdHvhkSTNPm3app7v4rsT8F2UD6+VHIA= golang.org/x/time v0.6.0 h1:eTDhh4ZXt5Qf0augr54TN6suAUudPcawVZeIAPU7D4U= golang.org/x/time v0.6.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -956,8 +956,8 @@ google.golang.org/grpc v1.67.1 h1:zWnc1Vrcno+lHZCOofnIMvycFcc0QRGIzm9dhnDX68E= google.golang.org/grpc v1.67.1/go.mod h1:1gLDyUQU7CTLJI90u3nXZ9ekeghjeM7pTDZlqFNg2AA= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.36.3 h1:82DV7MYdb8anAVi3qge1wSnMDrnKK7ebr+I0hHRN1BU= -google.golang.org/protobuf v1.36.3/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= +google.golang.org/protobuf v1.36.6 h1:z1NpPI8ku2WgiWnf+t9wTPsn6eP1L7ksHUlkfLvd9xY= +google.golang.org/protobuf v1.36.6/go.mod h1:jduwjTPXsFjZGTmRluh+L6NjiWu7pchiJ2/5YcXBHnY= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= From 37c35ee86ffc8333fff758e506b71f623cbb24d1 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Tue, 3 Jun 2025 12:38:38 +0200 Subject: [PATCH 542/581] go.mod/sum cleanup (#3661) --- go.mod | 52 +++++++++++++++++++++++++--------------------------- 1 file changed, 25 insertions(+), 27 deletions(-) diff --git a/go.mod b/go.mod index 11d15429a1a..d1f9c342b2c 100644 --- a/go.mod +++ b/go.mod @@ -7,7 +7,6 @@ require ( github.com/AlecAivazis/survey/v2 v2.3.7 github.com/Masterminds/semver/v3 v3.2.1 github.com/Masterminds/sprig/v3 v3.2.3 - github.com/Microsoft/go-winio v0.6.2 // indirect github.com/agext/levenshtein v1.2.3 github.com/alexliesenfeld/health v0.8.0 github.com/appleboy/gin-jwt/v2 v2.10.1 @@ -19,20 +18,16 @@ require ( github.com/buger/jsonparser v1.1.1 github.com/c-robinson/iplib v1.0.8 github.com/cespare/xxhash/v2 v2.3.0 - github.com/containerd/log v0.1.0 // indirect + github.com/corazawaf/coraza/v3 v3.3.3 github.com/corazawaf/libinjection-go v0.2.2 - github.com/coreos/go-systemd/v22 v22.5.0 // indirect - github.com/creack/pty v1.1.21 // indirect github.com/crowdsecurity/dlog v0.0.0-20170105205344-4fb5f8204f26 github.com/crowdsecurity/go-cs-lib v0.0.19 github.com/crowdsecurity/grokky v0.2.2 github.com/crowdsecurity/machineid v1.0.2 github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc github.com/dghubble/sling v1.4.2 - github.com/distribution/reference v0.6.0 // indirect github.com/docker/docker v27.3.1+incompatible github.com/docker/go-connections v0.5.0 - github.com/docker/go-units v0.5.0 // indirect github.com/expr-lang/expr v1.17.2 github.com/fatih/color v1.18.0 github.com/fsnotify/fsnotify v1.7.0 @@ -44,10 +39,7 @@ require ( github.com/go-openapi/validate v0.20.0 github.com/go-sql-driver/mysql v1.6.0 github.com/goccy/go-yaml v1.11.0 - github.com/gogo/protobuf v1.3.2 // indirect github.com/golang-jwt/jwt/v4 v4.5.2 - github.com/golang/protobuf v1.5.4 // indirect - github.com/google/go-cmp v0.6.0 // indirect github.com/google/go-querystring v1.1.0 github.com/google/uuid v1.6.0 github.com/google/winops v0.0.0-20230712152054-af9b550d0601 @@ -62,18 +54,11 @@ require ( github.com/jarcoal/httpmock v1.1.0 github.com/jedib0t/go-pretty/v6 v6.5.9 github.com/jszwec/csvutil v1.5.1 - github.com/klauspost/compress v1.17.9 // indirect github.com/lithammer/dedent v1.1.0 github.com/mattn/go-isatty v0.0.20 github.com/mattn/go-sqlite3 v1.14.24 - github.com/mitchellh/copystructure v1.2.0 // indirect - github.com/moby/docker-image-spec v1.3.1 // indirect - github.com/moby/term v0.5.0 // indirect github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826 - github.com/morikuni/aec v1.0.0 // indirect github.com/nxadm/tail v1.4.11 - github.com/opencontainers/go-digest v1.0.0 // indirect - github.com/opencontainers/image-spec v1.1.0 // indirect github.com/oschwald/geoip2-golang v1.9.0 github.com/oschwald/maxminddb-golang v1.12.0 github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 @@ -88,39 +73,29 @@ require ( github.com/sirupsen/logrus v1.9.3 github.com/slack-go/slack v0.16.0 github.com/spf13/cobra v1.9.1 - github.com/spf13/pflag v1.0.6 // indirect github.com/stretchr/testify v1.10.0 github.com/umahmood/haversine v0.0.0-20151105152445-808ab04add26 github.com/wasilibs/go-re2 v1.7.0 github.com/xhit/go-simple-mail/v2 v2.16.0 - go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.46.1 // indirect - go.opentelemetry.io/otel v1.28.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.28.0 // indirect - go.opentelemetry.io/otel/sdk v1.28.0 // indirect - go.opentelemetry.io/otel/trace v1.28.0 // indirect golang.org/x/crypto v0.38.0 golang.org/x/mod v0.23.0 golang.org/x/net v0.40.0 golang.org/x/sync v0.14.0 golang.org/x/sys v0.33.0 golang.org/x/text v0.25.0 - golang.org/x/time v0.6.0 // indirect google.golang.org/grpc v1.67.1 google.golang.org/protobuf v1.36.6 gopkg.in/natefinch/lumberjack.v2 v2.2.1 gopkg.in/tomb.v2 v2.0.0-20161208151619-d5d1b5820637 gopkg.in/yaml.v2 v2.4.0 gopkg.in/yaml.v3 v3.0.1 - gotest.tools/v3 v3.5.1 // indirect k8s.io/apiserver v0.28.4 - ) -require github.com/corazawaf/coraza/v3 v3.3.3 - require ( ariga.io/atlas v0.31.1-0.20250212144724-069be8033e83 // indirect github.com/Masterminds/goutils v1.1.1 // indirect + github.com/Microsoft/go-winio v0.6.2 // indirect github.com/ahmetalpbalkan/dlog v0.0.0-20170105205344-4fb5f8204f26 // indirect github.com/apparentlymart/go-textseg/v13 v13.0.0 // indirect github.com/apparentlymart/go-textseg/v15 v15.0.0 // indirect @@ -131,7 +106,12 @@ require ( github.com/bytedance/sonic/loader v0.2.1 // indirect github.com/cloudwego/base64x v0.1.4 // indirect github.com/cloudwego/iasm v0.2.0 // indirect + github.com/containerd/log v0.1.0 // indirect + github.com/coreos/go-systemd/v22 v22.5.0 // indirect github.com/cpuguy83/go-md2man/v2 v2.0.6 // indirect + github.com/creack/pty v1.1.21 // indirect + github.com/distribution/reference v0.6.0 // indirect + github.com/docker/go-units v0.5.0 // indirect github.com/felixge/httpsnoop v1.0.4 // indirect github.com/gabriel-vasile/mimetype v1.4.7 // indirect github.com/gin-contrib/sse v0.1.0 // indirect @@ -150,7 +130,10 @@ require ( github.com/go-playground/validator/v10 v10.23.0 // indirect github.com/go-stack/stack v1.8.0 // indirect github.com/goccy/go-json v0.10.4 // indirect + github.com/gogo/protobuf v1.3.2 // indirect github.com/golang/glog v1.2.4 // indirect + github.com/golang/protobuf v1.5.4 // indirect + github.com/google/go-cmp v0.6.0 // indirect github.com/google/gofuzz v1.2.0 // indirect github.com/hashicorp/hcl/v2 v2.13.0 // indirect github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb // indirect @@ -168,6 +151,7 @@ require ( github.com/josharian/intern v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 // indirect + github.com/klauspost/compress v1.17.9 // indirect github.com/klauspost/cpuid/v2 v2.2.9 // indirect github.com/leodido/go-urn v1.4.0 // indirect github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 // indirect @@ -177,13 +161,19 @@ require ( github.com/mattn/go-runewidth v0.0.15 // indirect github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect github.com/mgutz/ansi v0.0.0-20200706080929-d51e80ef957d // indirect + github.com/mitchellh/copystructure v1.2.0 // indirect github.com/mitchellh/go-testing-interface v1.0.0 // indirect github.com/mitchellh/go-wordwrap v1.0.1 // indirect github.com/mitchellh/mapstructure v1.5.0 // indirect github.com/mitchellh/reflectwalk v1.0.2 // indirect + github.com/moby/docker-image-spec v1.3.1 // indirect + github.com/moby/term v0.5.0 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect + github.com/morikuni/aec v1.0.0 // indirect github.com/oklog/run v1.0.0 // indirect + github.com/opencontainers/go-digest v1.0.0 // indirect + github.com/opencontainers/image-spec v1.1.0 // indirect github.com/pelletier/go-toml/v2 v2.2.3 // indirect github.com/petar-dambovaliev/aho-corasick v0.0.0-20250424160509-463d218d4745 // indirect github.com/pierrec/lz4/v4 v4.1.18 // indirect @@ -199,6 +189,7 @@ require ( github.com/shoenig/go-m1cpu v0.1.6 // indirect github.com/shopspring/decimal v1.4.0 // indirect github.com/spf13/cast v1.7.0 // indirect + github.com/spf13/pflag v1.0.6 // indirect github.com/tetratelabs/wazero v1.8.0 // indirect github.com/tidwall/gjson v1.18.0 // indirect github.com/tidwall/match v1.1.1 // indirect @@ -215,15 +206,22 @@ require ( github.com/zclconf/go-cty v1.14.4 // indirect github.com/zclconf/go-cty-yaml v1.1.0 // indirect go.mongodb.org/mongo-driver v1.9.4 // indirect + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.46.1 // indirect + go.opentelemetry.io/otel v1.28.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.28.0 // indirect go.opentelemetry.io/otel/metric v1.28.0 // indirect + go.opentelemetry.io/otel/sdk v1.28.0 // indirect + go.opentelemetry.io/otel/trace v1.28.0 // indirect go.uber.org/atomic v1.10.0 // indirect golang.org/x/arch v0.12.0 // indirect golang.org/x/term v0.32.0 // indirect + golang.org/x/time v0.6.0 // indirect golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 // indirect google.golang.org/appengine v1.6.8 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20240814211410-ddb44dafa142 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 // indirect + gotest.tools/v3 v3.5.1 // indirect k8s.io/api v0.28.4 // indirect k8s.io/apimachinery v0.28.4 // indirect k8s.io/klog/v2 v2.100.1 // indirect From 373bf669f1d508c1955d585108ef6d100f55a8da Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Tue, 3 Jun 2025 12:43:05 +0200 Subject: [PATCH 543/581] update test/README.md (#3652) * update test/README.md * make targets: bats-mysql, bats-postgres --- test/README.md | 176 +++++++++++++++++------------------- test/bats.mk | 22 ++++- test/bin/check-requirements | 7 -- 3 files changed, 101 insertions(+), 104 deletions(-) diff --git a/test/README.md b/test/README.md index b1489f1b7eb..a63fc06ddf8 100644 --- a/test/README.md +++ b/test/README.md @@ -1,87 +1,86 @@ # What is this? -This directory contains scripts for functional testing. The tests are run with -the [bats-core](https://github.com/bats-core/bats-core) framework, which is an -active fork of the older BATS (Bash Automated Testing System). +This directory contains scripts for functional testing. They complement +the unit tests and provide an environment that can be used during development +as well. + +The tests are run with the [bats-core](https://github.com/bats-core/bats-core) +framework, which is provided as git submodules in the crowdsec repository. With the addition of [the ansible playbooks](ansible/README.md) it is possible to use VMs to test the binary packages, service management and other CPU architectures. -### cscli - -| Feature | Covered | Notes | -| :-------------------- | :----------------- | :------------------------- | -| `cscli alerts` | - | | -| `cscli bouncers` | `10_bouncers` | | -| `cscli capi` | `01_base` | `status` only | -| `cscli collections` | `20_collections` | | -| `cscli config` | `01_base` | minimal testing (no crash) | -| `cscli dashboard` | - | docker inside docker 😞 | -| `cscli decisions` | `9[78]_ipv[46]*` | | -| `cscli hub` | `dyn_bats/99_hub` | | -| `cscli lapi` | `01_base` | | -| `cscli machines` | `30_machines` | | -| `cscli metrics` | - | | -| `cscli parsers` | - | | -| `cscli postoverflows` | - | | -| `cscli scenarios` | - | | -| `cscli simulation` | `50_simulation` | | -| `cscli version` | `01_base` | | - -### crowdsec - -| Feature | Covered | Notes | -| :----------------------------- | :------------- | :----------------------------------------- | -| `systemctl` start/stop/restart | - | | -| agent behavior | `40_live-ban` | minimal testing (simple ssh-bf detection) | -| forensic mode | `40_cold-logs` | minimal testing (simple ssh-bf detection) | -| starting without LAPI | `02_nolapi` | | -| starting without agent | `03_noagent` | | -| starting without CAPI | `04_nocapi` | | -| prometheus testing | - | | - -### API - -| Feature | Covered | Notes | -| :----------------- | :--------------- | :----------- | -| alerts GET/POST | `9[78]_ipv[46]*` | | -| decisions GET/POST | `9[78]_ipv[46]*` | | -| stream mode | `99_lapi-stream-mode | | - - # How to use it ## pre-requisites - - `git submodule init; git submodule update` - - `base64` - - `bash>=4.4` - - `curl` - - `daemonize` - - `jq` - - `python3` +(for building crowdsec) + +- `go` +- `pkg-config` +- `re2` + +On Ubuntu/Debian, you can install the above with: + +```console +$ sudo apt install pkgconf golang-go libre2-dev +... +``` + +(for the test suite) + +- `git submodule init; git submodule update` +- `bash>=4.4` +- `curl` +- `daemonize` +- `jq` +- `python3` + +These can be installed with + +```console +$ sudo apt install bash curl daemonize jq python3 +... +``` + +Additional dependencies are required if you need to test with mysql or postgres +(see below). -## Running all tests +## Running the tests Run `make clean bats-all` to perform a test build + run. -To repeat test runs without rebuilding crowdsec, use `make bats-test`. +If a test is failing, you can run only the script that contains it with + +```console +$ ./test/run-tests test/bats/